diff --git a/.clang-format b/.clang-format index 252820d9c80a15..fe1aa1a30d4026 100644 --- a/.clang-format +++ b/.clang-format @@ -141,11 +141,13 @@ ForEachMacros: - 'damon_for_each_target_safe' - 'damos_for_each_filter' - 'damos_for_each_filter_safe' + - 'damos_for_each_quota_goal' + - 'damos_for_each_quota_goal_safe' - 'data__for_each_file' - 'data__for_each_file_new' - 'data__for_each_file_start' - 'device_for_each_child_node' - - 'displayid_iter_for_each' + - 'device_for_each_child_node_scoped' - 'dma_fence_array_for_each' - 'dma_fence_chain_for_each' - 'dma_fence_unwrap_for_each' @@ -172,11 +174,14 @@ ForEachMacros: - 'drm_for_each_plane' - 'drm_for_each_plane_mask' - 'drm_for_each_privobj' - - 'drm_gem_for_each_gpuva' - - 'drm_gem_for_each_gpuva_safe' + - 'drm_gem_for_each_gpuvm_bo' + - 'drm_gem_for_each_gpuvm_bo_safe' - 'drm_gpuva_for_each_op' - 'drm_gpuva_for_each_op_from_reverse' + - 'drm_gpuva_for_each_op_reverse' - 'drm_gpuva_for_each_op_safe' + - 'drm_gpuvm_bo_for_each_va' + - 'drm_gpuvm_bo_for_each_va_safe' - 'drm_gpuvm_for_each_va' - 'drm_gpuvm_for_each_va_range' - 'drm_gpuvm_for_each_va_range_safe' @@ -192,11 +197,11 @@ ForEachMacros: - 'dsa_switch_for_each_port_continue_reverse' - 'dsa_switch_for_each_port_safe' - 'dsa_switch_for_each_user_port' + - 'dsa_switch_for_each_user_port_continue_reverse' - 'dsa_tree_for_each_cpu_port' - 'dsa_tree_for_each_user_port' - 'dsa_tree_for_each_user_port_continue_reverse' - 'dso__for_each_symbol' - - 'dsos__for_each_with_build_id' - 'elf_hash_for_each_possible' - 'elf_symtab__for_each_symbol' - 'evlist__for_each_cpu' @@ -216,6 +221,7 @@ ForEachMacros: - 'for_each_and_bit' - 'for_each_andnot_bit' - 'for_each_available_child_of_node' + - 'for_each_available_child_of_node_scoped' - 'for_each_bench' - 'for_each_bio' - 'for_each_board_func_rsrc' @@ -234,6 +240,7 @@ ForEachMacros: - 'for_each_card_widgets_safe' - 'for_each_cgroup_storage_type' - 'for_each_child_of_node' + - 'for_each_child_of_node_scoped' - 'for_each_clear_bit' - 'for_each_clear_bit_from' - 'for_each_clear_bitrange' @@ -251,6 +258,7 @@ ForEachMacros: - 'for_each_cpu' - 'for_each_cpu_and' - 'for_each_cpu_andnot' + - 'for_each_cpu_from' - 'for_each_cpu_or' - 'for_each_cpu_wrap' - 'for_each_dapm_widgets' @@ -269,13 +277,14 @@ ForEachMacros: - 'for_each_element' - 'for_each_element_extid' - 'for_each_element_id' + - 'for_each_enabled_cpu' - 'for_each_endpoint_of_node' - 'for_each_event' - 'for_each_event_tps' - 'for_each_evictable_lru' - 'for_each_fib6_node_rt_rcu' - 'for_each_fib6_walker_rt' - - 'for_each_free_mem_pfn_range_in_zone' + - 'for_each_file_lock' - 'for_each_free_mem_pfn_range_in_zone_from' - 'for_each_free_mem_range' - 'for_each_free_mem_range_reverse' @@ -286,15 +295,18 @@ ForEachMacros: - 'for_each_group_member' - 'for_each_group_member_head' - 'for_each_hstate' + - 'for_each_hwgpio' - 'for_each_if' - 'for_each_inject_fn' - 'for_each_insn' + - 'for_each_insn_op_loc' - 'for_each_insn_prefix' - 'for_each_intid' - 'for_each_iommu' - 'for_each_ip_tunnel_rcu' - 'for_each_irq_nr' - 'for_each_lang' + - 'for_each_link_ch_maps' - 'for_each_link_codecs' - 'for_each_link_cpus' - 'for_each_link_platforms' @@ -332,6 +344,9 @@ ForEachMacros: - 'for_each_new_plane_in_state_reverse' - 'for_each_new_private_obj_in_state' - 'for_each_new_reg' + - 'for_each_nhlt_endpoint' + - 'for_each_nhlt_endpoint_fmtcfg' + - 'for_each_nhlt_fmtcfg' - 'for_each_node' - 'for_each_node_by_name' - 'for_each_node_by_type' @@ -387,12 +402,15 @@ ForEachMacros: - 'for_each_reloc_from' - 'for_each_requested_gpio' - 'for_each_requested_gpio_in_range' + - 'for_each_reserved_child_of_node' - 'for_each_reserved_mem_range' - 'for_each_reserved_mem_region' + - 'for_each_rtd_ch_maps' - 'for_each_rtd_codec_dais' - 'for_each_rtd_components' - 'for_each_rtd_cpu_dais' - 'for_each_rtd_dais' + - 'for_each_rtd_dais_reverse' - 'for_each_sband_iftype_data' - 'for_each_script' - 'for_each_sec' @@ -533,8 +551,6 @@ ForEachMacros: - 'lwq_for_each_safe' - 'map__for_each_symbol' - 'map__for_each_symbol_by_name' - - 'maps__for_each_entry' - - 'maps__for_each_entry_safe' - 'mas_for_each' - 'mci_for_each_dimm' - 'media_device_for_each_entity' @@ -560,7 +576,9 @@ ForEachMacros: - 'netdev_hw_addr_list_for_each' - 'nft_rule_for_each_expr' - 'nla_for_each_attr' + - 'nla_for_each_attr_type' - 'nla_for_each_nested' + - 'nla_for_each_nested_type' - 'nlmsg_for_each_attr' - 'nlmsg_for_each_msg' - 'nr_neigh_for_each' @@ -579,6 +597,7 @@ ForEachMacros: - 'perf_config_sections__for_each_entry' - 'perf_config_set__for_each_entry' - 'perf_cpu_map__for_each_cpu' + - 'perf_cpu_map__for_each_cpu_skip_any' - 'perf_cpu_map__for_each_idx' - 'perf_evlist__for_each_entry' - 'perf_evlist__for_each_entry_reverse' @@ -639,7 +658,6 @@ ForEachMacros: - 'shost_for_each_device' - 'sk_for_each' - 'sk_for_each_bound' - - 'sk_for_each_bound_bhash2' - 'sk_for_each_entry_offset_rcu' - 'sk_for_each_from' - 'sk_for_each_rcu' @@ -653,6 +671,7 @@ ForEachMacros: - 'snd_soc_dapm_widget_for_each_path_safe' - 'snd_soc_dapm_widget_for_each_sink_path' - 'snd_soc_dapm_widget_for_each_source_path' + - 'sparsebit_for_each_set_range' - 'strlist__for_each_entry' - 'strlist__for_each_entry_safe' - 'sym_for_each_insn' @@ -662,7 +681,6 @@ ForEachMacros: - 'tcf_act_for_each_action' - 'tcf_exts_for_each_action' - 'ttm_resource_manager_for_each_res' - - 'twsk_for_each_bound_bhash2' - 'udp_portaddr_for_each_entry' - 'udp_portaddr_for_each_entry_rcu' - 'usb_hub_for_each_child' @@ -686,6 +704,9 @@ ForEachMacros: - 'xbc_node_for_each_child' - 'xbc_node_for_each_key_value' - 'xbc_node_for_each_subkey' + - 'ynl_attr_for_each' + - 'ynl_attr_for_each_nested' + - 'ynl_attr_for_each_payload' - 'zorro_for_each_dev' IncludeBlocks: Preserve diff --git a/.gitignore b/.gitignore index 7902adf4f7f1a5..56972adb5031af 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ *.dwo *.elf *.gcno +*.gcda *.gz *.i *.ko @@ -46,7 +47,6 @@ *.so.dbg *.su *.symtypes -*.symversions *.tab.[ch] *.tar *.xz @@ -70,6 +70,7 @@ modules.order /Module.markers /modules.builtin /modules.builtin.modinfo +/modules.builtin.ranges /modules.nsdeps # @@ -142,7 +143,6 @@ GTAGS # id-utils files ID -*.orig *~ \#*# diff --git a/.mailmap b/.mailmap index 7c7f171d0e554d..0374777cc6628e 100644 --- a/.mailmap +++ b/.mailmap @@ -154,6 +154,9 @@ Christian Brauner Christian Marangi Christophe Ricard Christoph Hellwig +Chuck Lever +Chuck Lever +Chuck Lever Claudiu Beznea Colin Ian King Corey Minyard @@ -313,6 +316,7 @@ Jiri Slaby Jisheng Zhang Jisheng Zhang Jishnu Prakash +Joel Granados Johan Hovold Johan Hovold John Crispin @@ -613,6 +617,10 @@ Shuah Khan Sibi Sankar Sid Manning Simon Arlott +Simona Vetter +Simona Vetter +Simona Vetter +Simona Vetter Simon Horman Simon Horman Simon Kelley diff --git a/CREDITS b/CREDITS index 053e5a5003eb46..d439f5a1bc00d0 100644 --- a/CREDITS +++ b/CREDITS @@ -378,6 +378,9 @@ S: 1549 Hiironen Rd. S: Brimson, MN 55602 S: USA +N: Arnd Bergmann +D: Maintainer of Cell Broadband Engine Architecture + N: Hennus Bergman P: 1024/77D50909 76 99 FD 31 91 E1 96 1C 90 BB 22 80 62 F6 BD 63 D: Author and maintainer of the QIC-02 tape driver @@ -1869,6 +1872,9 @@ S: K osmidomkum 723 S: 160 00 Praha 6 S: Czech Republic +N: Jeremy Kerr +D: Maintainer of SPU File System + N: Michael Kerrisk E: mtk.manpages@gmail.com W: https://man7.org/ diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem index aa89adf18bc55b..0ae8cb074acf36 100644 --- a/Documentation/ABI/stable/sysfs-bus-nvmem +++ b/Documentation/ABI/stable/sysfs-bus-nvmem @@ -11,7 +11,7 @@ Description: Read returns '0' or '1' for read-write or read-only modes respectively. Write parses one of 'YyTt1NnFf0', or [oO][NnFf] for "on" - and "off", i.e. what kstrbool() supports. + and "off", i.e. what kstrtobool() supports. Note: This file is only present if CONFIG_NVMEM_SYSFS is enabled. diff --git a/Documentation/ABI/stable/vdso b/Documentation/ABI/stable/vdso index 951838d4278114..85dbb6a160df8f 100644 --- a/Documentation/ABI/stable/vdso +++ b/Documentation/ABI/stable/vdso @@ -9,9 +9,11 @@ maps an ELF DSO into that program's address space. This DSO is called the vDSO and it often contains useful and highly-optimized alternatives to real syscalls. -These functions are called just like ordinary C function according to -your platform's ABI. Call them from a sensible context. (For example, -if you set CS on x86 to something strange, the vDSO functions are +These functions are called according to your platform's ABI. On many +platforms they are called just like ordinary C function. On other platforms +(ex: powerpc) they are called with the same convention as system calls which +is different from ordinary C functions. Call them from a sensible context. +(For example, if you set CS on x86 to something strange, the vDSO functions are within their rights to crash.) In addition, if you pass a bad pointer to a vDSO function, you might get SIGSEGV instead of -EFAULT. diff --git a/Documentation/ABI/testing/configfs-usb-gadget-acm b/Documentation/ABI/testing/configfs-usb-gadget-acm index d21092d75a0587..25e68be9eb6612 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-acm +++ b/Documentation/ABI/testing/configfs-usb-gadget-acm @@ -6,3 +6,10 @@ Description: This item contains just one readonly attribute: port_num. It contains the port number of the /dev/ttyGS device associated with acm function's instance "name". + +What: /config/usb-gadget/gadget/functions/acm.name/protocol +Date: Aug 2024 +KernelVersion: 6.13 +Description: + Reported bInterfaceProtocol for the ACM device. For legacy + reasons, this defaults to 1 (USB_CDC_ACM_PROTO_AT_V25TER). diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1 b/Documentation/ABI/testing/configfs-usb-gadget-uac1 index c4ba92f004c313..64188a85592b5d 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uac1 +++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1 @@ -30,4 +30,12 @@ Description: req_number the number of pre-allocated requests for both capture and playback function_name name of the interface + p_it_name playback input terminal name + p_it_ch_name playback channels name + p_ot_name playback output terminal name + p_fu_vol_name playback mute/volume functional unit name + c_it_name capture input terminal name + c_it_ch_name capture channels name + c_ot_name capture output terminal name + c_fu_vol_name capture mute/volume functional unit name ===================== ======================================= diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2 index a2bf4fd82a5b7c..133e995c3e9280 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uac2 +++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2 @@ -35,6 +35,17 @@ Description: req_number the number of pre-allocated requests for both capture and playback function_name name of the interface + if_ctrl_name topology control name + clksrc_in_name input clock name + clksrc_out_name output clock name + p_it_name playback input terminal name + p_it_ch_name playback input first channel name + p_ot_name playback output terminal name + p_fu_vol_name playback mute/volume function unit name + c_it_name capture input terminal name + c_it_ch_name capture input first channel name + c_ot_name capture output terminal name + c_fu_vol_name capture mute/volume functional unit name c_terminal_type code of the capture terminal type p_terminal_type code of the playback terminal type ===================== ======================================= diff --git a/Documentation/ABI/testing/debugfs-iio-ad9467 b/Documentation/ABI/testing/debugfs-iio-ad9467 new file mode 100644 index 00000000000000..0352fca1f7f2ee --- /dev/null +++ b/Documentation/ABI/testing/debugfs-iio-ad9467 @@ -0,0 +1,39 @@ +What: /sys/kernel/debug/iio/iio:deviceX/calibration_table_dump +KernelVersion: 6.11 +Contact: linux-iio@vger.kernel.org +Description: + This dumps the calibration table that was filled during the + digital interface tuning process. + +What: /sys/kernel/debug/iio/iio:deviceX/in_voltage_test_mode_available +KernelVersion: 6.11 +Contact: linux-iio@vger.kernel.org +Description: + List all the available test tones: + - off + - midscale_short + - pos_fullscale + - neg_fullscale + - checkerboard + - prbs23 + - prbs9 + - one_zero_toggle + - user + - bit_toggle + - sync + - one_bit_high + - mixed_bit_frequency + - ramp + + Note that depending on the actual device being used, some of the + above might not be available (and they won't be listed when + reading the file). + +What: /sys/kernel/debug/iio/iio:deviceX/in_voltageY_test_mode +KernelVersion: 6.11 +Contact: linux-iio@vger.kernel.org +Description: + Writing to this file will initiate one of available test tone on + channel Y. Reading it, shows which test is running. In cases + where an IIO backend is available and supports the test tone, + additional information about the data correctness is given. diff --git a/Documentation/ABI/testing/debugfs-iio-backend b/Documentation/ABI/testing/debugfs-iio-backend new file mode 100644 index 00000000000000..01ab94469432c8 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-iio-backend @@ -0,0 +1,20 @@ +What: /sys/kernel/debug/iio/iio:deviceX/backendY/name +KernelVersion: 6.11 +Contact: linux-iio@vger.kernel.org +Description: + Name of Backend Y connected to device X. + +What: /sys/kernel/debug/iio/iio:deviceX/backendY/direct_reg_access +KernelVersion: 6.11 +Contact: linux-iio@vger.kernel.org +Description: + Directly access the registers of backend Y. Typical usage is: + + Reading address 0x50 + echo 0x50 > direct_reg_access + cat direct_reg_access + + Writing address 0x50 + echo 0x50 0x3 > direct_reg_access + //readback address 0x50 + cat direct_reg_access diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 628a00fb20a951..1ef69e0271f91d 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -151,3 +151,10 @@ Contact: Sergey Senozhatsky Description: The recompress file is write-only and triggers re-compression with secondary compression algorithms. + +What: /sys/block/zram/algorithm_params +Date: August 2024 +Contact: Sergey Senozhatsky +Description: + The algorithm_params file is write-only and is used to setup + compression algorithm parameters. diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 7cee78ad410856..89943c2d54e8a8 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -523,13 +523,27 @@ Description: What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_i_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_q_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibbias -What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias -What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibbias What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias +What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibbias +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias KernelVersion: 2.6.35 Contact: linux-iio@vger.kernel.org Description: @@ -541,6 +555,10 @@ Description: What: /sys/bus/iio/devices/iio:deviceX/in_accel_calibbias_available What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_calibbias_available +What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias_available +What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias_available +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias_available +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias_available KernelVersion: 5.8 Contact: linux-iio@vger.kernel.org Description: @@ -549,25 +567,34 @@ Description: - a small discrete set of values like "0 2 4 6 8" - a range specified as "[min step max]" -What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_intensity_both_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_intensity_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ir_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibscale What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibscale -What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale +What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibscale +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibscale KernelVersion: 2.6.35 Contact: linux-iio@vger.kernel.org Description: @@ -575,6 +602,20 @@ Description: production inaccuracies). If shared across all channels, _calibscale is used. +What: /sys/bus/iio/devices/iio:deviceX/in_illuminanceY_calibscale_available +What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibscale_available +What: /sys/bus/iio/devices/iio:deviceX/in_proximityY_calibscale_available +What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale_available +KernelVersion: 4.8 +Contact: linux-iio@vger.kernel.org +Description: + Available values of calibscale. Maybe expressed as either of: + + - a small discrete set of values like "1 8 16" + - a range specified as "[min step max]" + + If shared across all channels, _calibscale_available is used. + What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibgender What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibgender What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibgender @@ -708,6 +749,7 @@ Description: 2.5kohm_to_gnd: connected to ground via a 2.5kOhm resistor, 6kohm_to_gnd: connected to ground via a 6kOhm resistor, 20kohm_to_gnd: connected to ground via a 20kOhm resistor, + 42kohm_to_gnd: connected to ground via a 42kOhm resistor, 90kohm_to_gnd: connected to ground via a 90kOhm resistor, 100kohm_to_gnd: connected to ground via an 100kOhm resistor, 125kohm_to_gnd: connected to ground via an 125kOhm resistor, @@ -2289,3 +2331,11 @@ KernelVersion: 6.7 Contact: linux-iio@vger.kernel.org Description: List of available timeout value for tap gesture confirmation. + +What: /sys/.../iio:deviceX/in_shunt_resistor +What: /sys/.../iio:deviceX/in_current_shunt_resistor +What: /sys/.../iio:deviceX/in_power_shunt_resistor +KernelVersion: 6.10 +Contact: linux-iio@vger.kernel.org +Description: + The value of current sense resistor in Ohms. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 b/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 deleted file mode 100644 index 6d2d2b094941d3..00000000000000 --- a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 +++ /dev/null @@ -1,17 +0,0 @@ -What: /sys/bus/iio/devices/iio:deviceX/in_power_shunt_resistor -Date: March 2017 -KernelVersion: 4.12 -Contact: linux-iio@vger.kernel.org -Description: The value of the shunt resistor used to compute power drain on - common input voltage pin (RS+). In Ohms. - -What: /sys/bus/iio/devices/iio:deviceX/in_current_shunt_resistor -Date: March 2017 -KernelVersion: 4.12 -Contact: linux-iio@vger.kernel.org -Description: The value of the shunt resistor used to compute current flowing - between RS+ and RS- voltage sense inputs. In Ohms. - -These attributes describe a single physical component, exposed as two distinct -attributes as it is used to calculate two different values: power load and -current flowing between RS+ and RS- inputs. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 index 469a7c00fad412..a95547e874f155 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 +++ b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 @@ -15,17 +15,3 @@ Description: Set the relative humidity. This value is sent to the sensor for humidity compensation. Default value: 50000 (50 % relative humidity) - -What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias -Date: August 2021 -KernelVersion: 5.15 -Contact: Andreas Klinger -Description: - Set the bias value for the resistance which is used for - calculation of in_concentration_input as follows: - - x = (in_resistance_raw - in_resistance_calibbias) * 0.65 - - in_concentration_input = 500 / (1 + e^x) - - Default value: 30000 diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac new file mode 100644 index 00000000000000..810eaac5533c7b --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-dac @@ -0,0 +1,61 @@ +What: /sys/bus/iio/devices/iio:deviceX/out_currentY_toggle_en +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + Toggle enable. Write 1 to enable toggle or 0 to disable it. This + is useful when one wants to change the DAC output codes. For + autonomous toggling, the way it should be done is: + + - disable toggle operation; + - change out_currentY_rawN, where N is the integer value of the symbol; + - enable toggle operation. + +What: /sys/bus/iio/devices/iio:deviceX/out_currentY_rawN +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + This attribute has the same meaning as out_currentY_raw. It is + specific to toggle enabled channels and refers to the DAC output + code in INPUT_N (_rawN), where N is the integer value of the symbol. + The same scale and offset as in out_currentY_raw applies. + +What: /sys/bus/iio/devices/iio:deviceX/out_currentY_symbol +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + Performs a SW switch to a predefined output symbol. This attribute + is specific to toggle enabled channels and allows switching between + multiple predefined symbols. Each symbol corresponds to a different + output, denoted as out_currentY_rawN, where N is the integer value + of the symbol. Writing an integer value N will select out_currentY_rawN. + +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + Toggle enable. Write 1 to enable toggle or 0 to disable it. This + is useful when one wants to change the DAC output codes. For + autonomous toggling, the way it should be done is: + + - disable toggle operation; + - change out_voltageY_rawN, where N is the integer value of the symbol; + - enable toggle operation. + +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_rawN +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + This attribute has the same meaning as out_currentY_raw. It is + specific to toggle enabled channels and refers to the DAC output + code in INPUT_N (_rawN), where N is the integer value of the symbol. + The same scale and offset as in out_currentY_raw applies. + +What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol +KernelVersion: 5.18 +Contact: linux-iio@vger.kernel.org +Description: + Performs a SW switch to a predefined output symbol. This attribute + is specific to toggle enabled channels and allows switching between + multiple predefined symbols. Each symbol corresponds to a different + output, denoted as out_voltageY_rawN, where N is the integer value + of the symbol. Writing an integer value N will select out_voltageY_rawN. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 index 1c35971277ba27..ae95a5477382f2 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 +++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 @@ -53,34 +53,3 @@ KernelVersion: 5.18 Contact: linux-iio@vger.kernel.org Description: Returns the available values for the dither phase. - -What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en -KernelVersion: 5.18 -Contact: linux-iio@vger.kernel.org -Description: - Toggle enable. Write 1 to enable toggle or 0 to disable it. This is - useful when one wants to change the DAC output codes. The way it should - be done is: - - - disable toggle operation; - - change out_voltageY_raw0 and out_voltageY_raw1; - - enable toggle operation. - -What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw0 -What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw1 -KernelVersion: 5.18 -Contact: linux-iio@vger.kernel.org -Description: - It has the same meaning as out_voltageY_raw. This attribute is - specific to toggle enabled channels and refers to the DAC output - code in INPUT_A (_raw0) and INPUT_B (_raw1). The same scale and offset - as in out_voltageY_raw applies. - -What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol -KernelVersion: 5.18 -Contact: linux-iio@vger.kernel.org -Description: - Performs a SW toggle. This attribute is specific to toggle - enabled channels and allows to toggle between out_voltageY_raw0 - and out_voltageY_raw1 through software. Writing 0 will select - out_voltageY_raw0 while 1 selects out_voltageY_raw1. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 index 31dbb390573ff2..c431f0a13cf502 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 +++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 @@ -3,7 +3,7 @@ KernelVersion: Contact: linux-iio@vger.kernel.org Description: Reading this returns the valid values that can be written to the - on_altvoltage0_mode attribute: + filter_mode attribute: - auto -> Adjust bandpass filter to track changes in input clock rate. - manual -> disable/unregister the clock rate notifier / input clock tracking. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc index 8916f7ec650740..8dbca113112d84 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc +++ b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc @@ -13,12 +13,3 @@ Description: available for reading data. However, samples can be occasionally skipped or repeated, depending on the beat between the capture and conversion rates. - -What: /sys/bus/iio/devices/iio:deviceX/in_shunt_resistor -Date: December 2015 -KernelVersion: 4.4 -Contact: linux-iio@vger.kernel.org -Description: - The value of the shunt resistor may be known only at runtime fom an - eeprom content read by a client application. This attribute allows to - set its value in ohms. diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index ecf47559f495be..7f63c7e9777358 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -500,3 +500,75 @@ Description: console drivers from the device. Raw users of pci-sysfs resourceN attributes must be terminated prior to resizing. Success of the resizing operation is not guaranteed. + +What: /sys/bus/pci/devices/.../leds/*:enclosure:*/brightness +What: /sys/class/leds/*:enclosure:*/brightness +Date: August 2024 +KernelVersion: 6.12 +Description: + LED indications on PCIe storage enclosures which are controlled + through the NPEM interface (Native PCIe Enclosure Management, + PCIe r6.1 sec 6.28) are accessible as led class devices, both + below /sys/class/leds and below NPEM-capable PCI devices. + + Although these led class devices could be manipulated manually, + in practice they are typically manipulated automatically by an + application such as ledmon(8). + + The name of a led class device is as follows: + :enclosure: + where: + + - is the domain, bus, device and function number + (e.g. 10000:02:05.0) + - is a short description of the LED indication + + Valid indications per PCIe r6.1 table 6-27 are: + + - ok (drive is functioning normally) + - locate (drive is being identified by an admin) + - fail (drive is not functioning properly) + - rebuild (drive is part of an array that is rebuilding) + - pfa (drive is predicted to fail soon) + - hotspare (drive is marked to be used as a replacement) + - ica (drive is part of an array that is degraded) + - ifa (drive is part of an array that is failed) + - idt (drive is not the right type for the connector) + - disabled (drive is disabled, removal is safe) + - specific0 to specific7 (enclosure-specific indications) + + Broadly, the indications fall into one of these categories: + + - to signify drive state (ok, locate, fail, idt, disabled) + - to signify drive role or state in a software RAID array + (rebuild, pfa, hotspare, ica, ifa) + - to signify any other role or state (specific0 to specific7) + + Mandatory indications per PCIe r6.1 sec 7.9.19.2 comprise: + ok, locate, fail, rebuild. All others are optional. + A led class device is only visible if the corresponding + indication is supported by the device. + + To manipulate the indications, write 0 (LED_OFF) or 1 (LED_ON) + to the "brightness" file. Note that manipulating an indication + may implicitly manipulate other indications at the vendor's + discretion. E.g. when the user lights up the "ok" indication, + the vendor may choose to automatically turn off the "fail" + indication. The current state of an indication can be + retrieved by reading its "brightness" file. + + The PCIe Base Specification allows vendors leeway to choose + different colors or blinking patterns for the indications, + but they typically follow the IBPI standard. E.g. the "locate" + indication is usually presented as one or two LEDs blinking at + 4 Hz frequency: + https://en.wikipedia.org/wiki/International_Blinking_Pattern_Interpretation + + PCI Firmware Specification r3.3 sec 4.7 defines a DSM interface + to facilitate shared access by operating system and platform + firmware to a device's NPEM registers. The kernel will use + this DSM interface where available, instead of accessing NPEM + registers directly. The DSM interface does not support the + enclosure-specific indications "specific0" to "specific7", + hence the corresponding led class devices are unavailable if + the DSM interface is used. diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index 7c81f0a25a4872..45180b62d42686 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -377,17 +377,33 @@ What: /sys/class/power_supply//charge_type Date: July 2009 Contact: linux-pm@vger.kernel.org Description: - Represents the type of charging currently being applied to the - battery. "Trickle", "Fast", and "Standard" all mean different - charging speeds. "Adaptive" means that the charger uses some - algorithm to adjust the charge rate dynamically, without - any user configuration required. "Custom" means that the charger - uses the charge_control_* properties as configuration for some - different algorithm. "Long Life" means the charger reduces its - charging rate in order to prolong the battery health. "Bypass" - means the charger bypasses the charging path around the - integrated converter allowing for a "smart" wall adaptor to - perform the power conversion externally. + Select the charging algorithm to use for a battery. + + Standard: + Fully charge the battery at a moderate rate. + Fast: + Quickly charge the battery using fast-charge + technology. This is typically harder on the battery + than standard charging and may lower its lifespan. + Trickle: + Users who primarily operate the system while + plugged into an external power source can extend + battery life with this mode. Vendor tooling may + call this "Primarily AC Use". + Adaptive: + Automatically optimize battery charge rate based + on typical usage pattern. + Custom: + Use the charge_control_* properties to determine + when to start and stop charging. Advanced users + can use this to drastically extend battery life. + Long Life: + The charger reduces its charging rate in order to + prolong the battery health. + Bypass: + The charger bypasses the charging path around the + integrated converter allowing for a "smart" wall + adaptor to perform the power conversion externally. Access: Read, Write @@ -592,7 +608,12 @@ Description: the supply, for example it can show if USB-PD capable source is attached. - Access: Read-Only + Access: For power-supplies which consume USB power such + as battery charger chips, this indicates the type of + the connected USB power source and is Read-Only. + + For power-supplies which act as a USB power-source such as + e.g. the UCS1002 USB Port Power Controller this is writable. Valid values: "Unknown", "SDP", "DCP", "CDP", "ACA", "C", "PD", diff --git a/Documentation/ABI/testing/sysfs-class-tee b/Documentation/ABI/testing/sysfs-class-tee new file mode 100644 index 00000000000000..c9144d16003e6f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-tee @@ -0,0 +1,15 @@ +What: /sys/class/tee/tee{,priv}X/rpmb_routing_model +Date: May 2024 +KernelVersion: 6.10 +Contact: op-tee@lists.trustedfirmware.org +Description: + RPMB frames can be routed to the RPMB device via the + user-space daemon tee-supplicant or the RPMB subsystem + in the kernel. The value "user" means that the driver + will route the RPMB frames via user space. Conversely, + "kernel" means that the frames are routed via the RPMB + subsystem without assistance from tee-supplicant. It + should be assumed that RPMB frames are routed via user + space if the variable is absent. The primary purpose + of this variable is to let systemd know whether + tee-supplicant is needed in the early boot with initramfs. diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index a95e0f17c35ade..cec65827e6020d 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -115,6 +115,6 @@ What: /sys/devices/system/memory/crash_hotplug Date: Aug 2023 Contact: Linux kernel mailing list Description: - (RO) indicates whether or not the kernel directly supports - modifying the crash elfcorehdr for memory hot un/plug and/or - on/offline changes. + (RO) indicates whether or not the kernel updates relevant kexec + segments on memory hot un/plug and/or on/offline events, avoiding the + need to reload kdump kernel. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index de725ca3be825b..206079d3bd5b12 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -704,9 +704,9 @@ What: /sys/devices/system/cpu/crash_hotplug Date: Aug 2023 Contact: Linux kernel mailing list Description: - (RO) indicates whether or not the kernel directly supports - modifying the crash elfcorehdr for CPU hot un/plug and/or - on/offline changes. + (RO) indicates whether or not the kernel updates relevant kexec + segments on memory hot un/plug and/or on/offline events, avoiding the + need to reload kdump kernel. What: /sys/devices/system/cpu/enabled Date: Nov 2022 diff --git a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon index 92fe7c5c5ac1d1..be4141a7522f6b 100644 --- a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon +++ b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon @@ -75,3 +75,11 @@ Description: RO. Energy input of device or gt in microjoules. for the gt. Only supported for particular Intel i915 graphics platforms. + +What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon/fan1_input +Date: November 2024 +KernelVersion: 6.12 +Contact: intel-gfx@lists.freedesktop.org +Description: RO. Fan speed of device in RPM. + + Only supported for particular Intel i915 graphics platforms. diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index fe943ce76c60e4..5fa6655aee8409 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1532,3 +1532,30 @@ Contact: Bean Huo Description: rtc_update_ms indicates how often the host should synchronize or update the UFS RTC. If set to 0, this will disable UFS RTC periodic update. + +What: /sys/devices/platform/.../ufshci_capabilities/version +Date: August 2024 +Contact: Avri Altman +Description: + Host Capabilities register group: UFS version register. + Symbol - VER. This file shows the UFSHCD version. + Example: Version 3.12 would be represented as 0000_0312h. + The file is read only. + +What: /sys/devices/platform/.../ufshci_capabilities/product_id +Date: August 2024 +Contact: Avri Altman +Description: + Host Capabilities register group: product ID register. + Symbol - HCPID. This file shows the UFSHCD product id. + The content of this register is vendor specific. + The file is read only. + +What: /sys/devices/platform/.../ufshci_capabilities/man_id +Date: August 2024 +Contact: Avri Altman +Description: + Host Capabilities register group: manufacturer ID register. + Symbol - HCMID. This file shows the UFSHCD manufacturer id. + The Manufacturer ID is defined by JEDEC in JEDEC-JEP106. + The file is read only. diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index cad6c3dc1f9c1f..fdedf1ea944ba8 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -579,6 +579,12 @@ Description: When ATGC is on, it controls age threshold to bypass GCing young candidates whose age is not beyond the threshold, by default it was initialized as 604800 seconds (equals to 7 days). +What: /sys/fs/f2fs//atgc_enabled +Date: Feb 2024 +Contact: "Jinbao Liu" +Description: It represents whether ATGC is on or off. The value is 1 which + indicates that ATGC is on, and 0 indicates that it is off. + What: /sys/fs/f2fs//gc_reclaimed_segments Date: July 2021 Contact: "Daeho Jeong" @@ -763,3 +769,53 @@ Date: November 2023 Contact: "Chao Yu" Description: It controls to enable/disable IO aware feature for background discard. By default, the value is 1 which indicates IO aware is on. + +What: /sys/fs/f2fs//blkzone_alloc_policy +Date: July 2024 +Contact: "Yuanhong Liao" +Description: The zone UFS we are currently using consists of two parts: + conventional zones and sequential zones. It can be used to control which part + to prioritize for writes, with a default value of 0. + + ======================== ========================================= + value description + blkzone_alloc_policy = 0 Prioritize writing to sequential zones + blkzone_alloc_policy = 1 Only allow writing to sequential zones + blkzone_alloc_policy = 2 Prioritize writing to conventional zones + ======================== ========================================= + +What: /sys/fs/f2fs//migration_window_granularity +Date: September 2024 +Contact: "Daeho Jeong" +Description: Controls migration window granularity of garbage collection on large + section. it can control the scanning window granularity for GC migration + in a unit of segment, while migration_granularity controls the number + of segments which can be migrated at the same turn. + +What: /sys/fs/f2fs//reserved_segments +Date: September 2024 +Contact: "Daeho Jeong" +Description: In order to fine tune GC behavior, we can control the number of + reserved segments. + +What: /sys/fs/f2fs//gc_no_zoned_gc_percent +Date: September 2024 +Contact: "Daeho Jeong" +Description: If the percentage of free sections over total sections is above this + number, F2FS do not garbage collection for zoned devices through the + background GC thread. the default number is "60". + +What: /sys/fs/f2fs//gc_boost_zoned_gc_percent +Date: September 2024 +Contact: "Daeho Jeong" +Description: If the percentage of free sections over total sections is under this + number, F2FS boosts garbage collection for zoned devices through the + background GC thread. the default number is "25". + +What: /sys/fs/f2fs//gc_valid_thresh_ratio +Date: September 2024 +Contact: "Daeho Jeong" +Description: It controls the valid block ratio threshold not to trigger excessive GC + for zoned deivces. The initial value of it is 95(%). F2FS will stop the + background GC thread from intiating GC for sections having valid blocks + exceeding the ratio. diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst index dd7b1c0c21da02..f4d2662871ab1c 100644 --- a/Documentation/PCI/pci.rst +++ b/Documentation/PCI/pci.rst @@ -52,7 +52,7 @@ driver generally needs to perform the following initialization: - Enable DMA/processing engines When done using the device, and perhaps the module needs to be unloaded, -the driver needs to take the follow steps: +the driver needs to take the following steps: - Disable the device from generating IRQs - Release the IRQ (free_irq()) diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst index b34990c7c3778d..04e16775c752ba 100644 --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.rst +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.rst @@ -921,10 +921,10 @@ This portion of the ``rcu_data`` structure is declared as follows: :: - 1 int dynticks_snap; + 1 int watching_snap; 2 unsigned long dynticks_fqs; -The ``->dynticks_snap`` field is used to take a snapshot of the +The ``->watching_snap`` field is used to take a snapshot of the corresponding CPU's dyntick-idle state when forcing quiescent states, and is therefore accessed from other CPUs. Finally, the ``->dynticks_fqs`` field is used to count the number of times this CPU @@ -935,8 +935,8 @@ This portion of the rcu_data structure is declared as follows: :: - 1 long dynticks_nesting; - 2 long dynticks_nmi_nesting; + 1 long nesting; + 2 long nmi_nesting; 3 atomic_t dynticks; 4 bool rcu_need_heavy_qs; 5 bool rcu_urgent_qs; @@ -945,14 +945,14 @@ These fields in the rcu_data structure maintain the per-CPU dyntick-idle state for the corresponding CPU. The fields may be accessed only from the corresponding CPU (and from tracing) unless otherwise stated. -The ``->dynticks_nesting`` field counts the nesting depth of process +The ``->nesting`` field counts the nesting depth of process execution, so that in normal circumstances this counter has value zero or one. NMIs, irqs, and tracers are counted by the -``->dynticks_nmi_nesting`` field. Because NMIs cannot be masked, changes +``->nmi_nesting`` field. Because NMIs cannot be masked, changes to this variable have to be undertaken carefully using an algorithm provided by Andy Lutomirski. The initial transition from idle adds one, and nested transitions add two, so that a nesting level of five is -represented by a ``->dynticks_nmi_nesting`` value of nine. This counter +represented by a ``->nmi_nesting`` value of nine. This counter can therefore be thought of as counting the number of reasons why this CPU cannot be permitted to enter dyntick-idle mode, aside from process-level transitions. @@ -960,12 +960,12 @@ process-level transitions. However, it turns out that when running in non-idle kernel context, the Linux kernel is fully capable of entering interrupt handlers that never exit and perhaps also vice versa. Therefore, whenever the -``->dynticks_nesting`` field is incremented up from zero, the -``->dynticks_nmi_nesting`` field is set to a large positive number, and -whenever the ``->dynticks_nesting`` field is decremented down to zero, -the ``->dynticks_nmi_nesting`` field is set to zero. Assuming that +``->nesting`` field is incremented up from zero, the +``->nmi_nesting`` field is set to a large positive number, and +whenever the ``->nesting`` field is decremented down to zero, +the ``->nmi_nesting`` field is set to zero. Assuming that the number of misnested interrupts is not sufficient to overflow the -counter, this approach corrects the ``->dynticks_nmi_nesting`` field +counter, this approach corrects the ``->nmi_nesting`` field every time the corresponding CPU enters the idle loop from process context. @@ -992,8 +992,8 @@ code. +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| Why not simply combine the ``->dynticks_nesting`` and | -| ``->dynticks_nmi_nesting`` counters into a single counter that just | +| Why not simply combine the ``->nesting`` and | +| ``->nmi_nesting`` counters into a single counter that just | | counts the number of reasons that the corresponding CPU is non-idle? | +-----------------------------------------------------------------------+ | **Answer**: | diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst index 728b1e690c6460..1a5ff1a9f02e35 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst @@ -147,10 +147,10 @@ RCU read-side critical sections preceding and following the current idle sojourn. This case is handled by calls to the strongly ordered ``atomic_add_return()`` read-modify-write atomic operation that -is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry -time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time. -The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()`` -(preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()`` +is invoked within ``ct_kernel_exit_state()`` at idle-entry +time and within ``ct_kernel_enter_state()`` at idle-exit time. +The grace-period kthread invokes first ``ct_rcu_watching_cpu_acquire()`` +(preceded by a full memory barrier) and ``rcu_watching_snap_stopped_since()`` (both of which rely on acquire semantics) to detect idle CPUs. +-----------------------------------------------------------------------+ diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg index 423df00c4df9d5..3fbc19c48a584a 100644 --- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg +++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg @@ -528,7 +528,7 @@ font-style="normal" y="-8652.5312" x="2466.7822" - xml:space="preserve">dyntick_save_progress_counter() + xml:space="preserve">rcu_watching_snap_save() rcu_implicit_dynticks_qs() + xml:space="preserve">rcu_watching_snap_recheck() rcu_dynticks_eqs_enter() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state() rcu_dynticks_eqs_exit() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state() dyntick_save_progress_counter() + xml:space="preserve">rcu_watching_snap_save() rcu_implicit_dynticks_qs() + xml:space="preserve">rcu_watching_snap_recheck() rcu_dynticks_eqs_enter() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state() rcu_dynticks_eqs_exit() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state() dyntick_save_progress_counter() + xml:space="preserve">rcu_watching_snap_save() rcu_implicit_dynticks_qs() + xml:space="preserve">rcu_watching_snap_recheck() rcu_dynticks_eqs_enter() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_exit_state() rcu_dynticks_eqs_exit() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">ct_kernel_enter_state() dyntick_save_progress_counter() + xml:space="preserve">rcu_watching_snap_save() rcu_implicit_dynticks_qs() + xml:space="preserve">rcu_watching_snap_recheck() rcu_barrier() - call_srcu() -> srcu_barrier() - call_rcu_tasks() -> rcu_barrier_tasks() - - call_rcu_tasks_rude() -> rcu_barrier_tasks_rude() - call_rcu_tasks_trace() -> rcu_barrier_tasks_trace() However, these barrier functions are absolutely *not* guaranteed @@ -539,7 +535,6 @@ over a rather long period of time, but improvements are always welcome! - Either synchronize_srcu() or synchronize_srcu_expedited(), together with and srcu_barrier() - synchronize_rcu_tasks() and rcu_barrier_tasks() - - synchronize_tasks_rude() and rcu_barrier_tasks_rude() - synchronize_tasks_trace() and rcu_barrier_tasks_trace() If necessary, you can use something like workqueues to execute diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index d585a5490aeec7..1ef5784c1b84e2 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -1103,7 +1103,7 @@ RCU-Tasks-Rude:: Critical sections Grace period Barrier - N/A call_rcu_tasks_rude rcu_barrier_tasks_rude + N/A N/A synchronize_rcu_tasks_rude diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index efb7771273bbc3..018d6cc173d7e9 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -93,7 +93,7 @@ commands (does not impact QAIC). uAPI ==== -QAIC creates an accel device per phsyical PCIe device. This accel device exists +QAIC creates an accel device per physical PCIe device. This accel device exists for as long as the PCIe device is known to Linux. The PCIe device may not be in the state to accept requests from userspace at @@ -147,12 +147,6 @@ DRM_IOCTL_QAIC_PERF_STATS_BO recent execution of a BO. This allows userspace to construct an end to end timeline of the BO processing for a performance analysis. -DRM_IOCTL_QAIC_PART_DEV - This IOCTL allows userspace to request a duplicate "shadow device". This extra - accelN device is associated with a specific partition of resources on the - AIC100 device and can be used for limiting a process to some subset of - resources. - DRM_IOCTL_QAIC_DETACH_SLICE_BO This IOCTL allows userspace to remove the slicing information from a BO that was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst index a6ba95fbaa9f10..ce63be6d64ad63 100644 --- a/Documentation/admin-guide/LSM/index.rst +++ b/Documentation/admin-guide/LSM/index.rst @@ -47,3 +47,4 @@ subdirectories. tomoyo Yama SafeSetID + ipe diff --git a/Documentation/admin-guide/LSM/ipe.rst b/Documentation/admin-guide/LSM/ipe.rst new file mode 100644 index 00000000000000..f38e641df0e97f --- /dev/null +++ b/Documentation/admin-guide/LSM/ipe.rst @@ -0,0 +1,790 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Integrity Policy Enforcement (IPE) +================================== + +.. NOTE:: + + This is the documentation for admins, system builders, or individuals + attempting to use IPE. If you're looking for more developer-focused + documentation about IPE please see :doc:`the design docs `. + +Overview +-------- + +Integrity Policy Enforcement (IPE) is a Linux Security Module that takes a +complementary approach to access control. Unlike traditional access control +mechanisms that rely on labels and paths for decision-making, IPE focuses +on the immutable security properties inherent to system components. These +properties are fundamental attributes or features of a system component +that cannot be altered, ensuring a consistent and reliable basis for +security decisions. + +To elaborate, in the context of IPE, system components primarily refer to +files or the devices these files reside on. However, this is just a +starting point. The concept of system components is flexible and can be +extended to include new elements as the system evolves. The immutable +properties include the origin of a file, which remains constant and +unchangeable over time. For example, IPE policies can be crafted to trust +files originating from the initramfs. Since initramfs is typically verified +by the bootloader, its files are deemed trustworthy; "file is from +initramfs" becomes an immutable property under IPE's consideration. + +The immutable property concept extends to the security features enabled on +a file's origin, such as dm-verity or fs-verity, which provide a layer of +integrity and trust. For example, IPE allows the definition of policies +that trust files from a dm-verity protected device. dm-verity ensures the +integrity of an entire device by providing a verifiable and immutable state +of its contents. Similarly, fs-verity offers filesystem-level integrity +checks, allowing IPE to enforce policies that trust files protected by +fs-verity. These two features cannot be turned off once established, so +they are considered immutable properties. These examples demonstrate how +IPE leverages immutable properties, such as a file's origin and its +integrity protection mechanisms, to make access control decisions. + +For the IPE policy, specifically, it grants the ability to enforce +stringent access controls by assessing security properties against +reference values defined within the policy. This assessment can be based on +the existence of a security property (e.g., verifying if a file originates +from initramfs) or evaluating the internal state of an immutable security +property. The latter includes checking the roothash of a dm-verity +protected device, determining whether dm-verity possesses a valid +signature, assessing the digest of a fs-verity protected file, or +determining whether fs-verity possesses a valid built-in signature. This +nuanced approach to policy enforcement enables a highly secure and +customizable system defense mechanism, tailored to specific security +requirements and trust models. + +To enable IPE, ensure that ``CONFIG_SECURITY_IPE`` (under +:menuselection:`Security -> Integrity Policy Enforcement (IPE)`) config +option is enabled. + +Use Cases +--------- + +IPE works best in fixed-function devices: devices in which their purpose +is clearly defined and not supposed to be changed (e.g. network firewall +device in a data center, an IoT device, etcetera), where all software and +configuration is built and provisioned by the system owner. + +IPE is a long-way off for use in general-purpose computing: the Linux +community as a whole tends to follow a decentralized trust model (known as +the web of trust), which IPE has no support for it yet. Instead, IPE +supports PKI (public key infrastructure), which generally designates a +set of trusted entities that provide a measure of absolute trust. + +Additionally, while most packages are signed today, the files inside +the packages (for instance, the executables), tend to be unsigned. This +makes it difficult to utilize IPE in systems where a package manager is +expected to be functional, without major changes to the package manager +and ecosystem behind it. + +The digest_cache LSM [#digest_cache_lsm]_ is a system that when combined with IPE, +could be used to enable and support general-purpose computing use cases. + +Known Limitations +----------------- + +IPE cannot verify the integrity of anonymous executable memory, such as +the trampolines created by gcc closures and libffi (<3.4.2), or JIT'd code. +Unfortunately, as this is dynamically generated code, there is no way +for IPE to ensure the integrity of this code to form a trust basis. + +IPE cannot verify the integrity of programs written in interpreted +languages when these scripts are invoked by passing these program files +to the interpreter. This is because the way interpreters execute these +files; the scripts themselves are not evaluated as executable code +through one of IPE's hooks, but they are merely text files that are read +(as opposed to compiled executables) [#interpreters]_. + +Threat Model +------------ + +IPE specifically targets the risk of tampering with user-space executable +code after the kernel has initially booted, including the kernel modules +loaded from userspace via ``modprobe`` or ``insmod``. + +To illustrate, consider a scenario where an untrusted binary, possibly +malicious, is downloaded along with all necessary dependencies, including a +loader and libc. The primary function of IPE in this context is to prevent +the execution of such binaries and their dependencies. + +IPE achieves this by verifying the integrity and authenticity of all +executable code before allowing them to run. It conducts a thorough +check to ensure that the code's integrity is intact and that they match an +authorized reference value (digest, signature, etc) as per the defined +policy. If a binary does not pass this verification process, either +because its integrity has been compromised or it does not meet the +authorization criteria, IPE will deny its execution. Additionally, IPE +generates audit logs which may be utilized to detect and analyze failures +resulting from policy violation. + +Tampering threat scenarios include modification or replacement of +executable code by a range of actors including: + +- Actors with physical access to the hardware +- Actors with local network access to the system +- Actors with access to the deployment system +- Compromised internal systems under external control +- Malicious end users of the system +- Compromised end users of the system +- Remote (external) compromise of the system + +IPE does not mitigate threats arising from malicious but authorized +developers (with access to a signing certificate), or compromised +developer tools used by them (i.e. return-oriented programming attacks). +Additionally, IPE draws hard security boundary between userspace and +kernelspace. As a result, kernel-level exploits are considered outside +the scope of IPE and mitigation is left to other mechanisms. + +Policy +------ + +IPE policy is a plain-text [#devdoc]_ policy composed of multiple statements +over several lines. There is one required line, at the top of the +policy, indicating the policy name, and the policy version, for +instance:: + + policy_name=Ex_Policy policy_version=0.0.0 + +The policy name is a unique key identifying this policy in a human +readable name. This is used to create nodes under securityfs as well as +uniquely identify policies to deploy new policies vs update existing +policies. + +The policy version indicates the current version of the policy (NOT the +policy syntax version). This is used to prevent rollback of policy to +potentially insecure previous versions of the policy. + +The next portion of IPE policy are rules. Rules are formed by key=value +pairs, known as properties. IPE rules require two properties: ``action``, +which determines what IPE does when it encounters a match against the +rule, and ``op``, which determines when the rule should be evaluated. +The ordering is significant, a rule must start with ``op``, and end with +``action``. Thus, a minimal rule is:: + + op=EXECUTE action=ALLOW + +This example will allow any execution. Additional properties are used to +assess immutable security properties about the files being evaluated. +These properties are intended to be descriptions of systems within the +kernel that can provide a measure of integrity verification, such that IPE +can determine the trust of the resource based on the value of the property. + +Rules are evaluated top-to-bottom. As a result, any revocation rules, +or denies should be placed early in the file to ensure that these rules +are evaluated before a rule with ``action=ALLOW``. + +IPE policy supports comments. The character '#' will function as a +comment, ignoring all characters to the right of '#' until the newline. + +The default behavior of IPE evaluations can also be expressed in policy, +through the ``DEFAULT`` statement. This can be done at a global level, +or a per-operation level:: + + # Global + DEFAULT action=ALLOW + + # Operation Specific + DEFAULT op=EXECUTE action=ALLOW + +A default must be set for all known operations in IPE. If you want to +preserve older policies being compatible with newer kernels that can introduce +new operations, set a global default of ``ALLOW``, then override the +defaults on a per-operation basis (as above). + +With configurable policy-based LSMs, there's several issues with +enforcing the configurable policies at startup, around reading and +parsing the policy: + +1. The kernel *should* not read files from userspace, so directly reading + the policy file is prohibited. +2. The kernel command line has a character limit, and one kernel module + should not reserve the entire character limit for its own + configuration. +3. There are various boot loaders in the kernel ecosystem, so handing + off a memory block would be costly to maintain. + +As a result, IPE has addressed this problem through a concept of a "boot +policy". A boot policy is a minimal policy which is compiled into the +kernel. This policy is intended to get the system to a state where +userspace is set up and ready to receive commands, at which point a more +complex policy can be deployed via securityfs. The boot policy can be +specified via ``SECURITY_IPE_BOOT_POLICY`` config option, which accepts +a path to a plain-text version of the IPE policy to apply. This policy +will be compiled into the kernel. If not specified, IPE will be disabled +until a policy is deployed and activated through securityfs. + +Deploying Policies +~~~~~~~~~~~~~~~~~~ + +Policies can be deployed from userspace through securityfs. These policies +are signed through the PKCS#7 message format to enforce some level of +authorization of the policies (prohibiting an attacker from gaining +unconstrained root, and deploying an "allow all" policy). These +policies must be signed by a certificate that chains to the +``SYSTEM_TRUSTED_KEYRING``. With openssl, the policy can be signed by:: + + openssl smime -sign \ + -in "$MY_POLICY" \ + -signer "$MY_CERTIFICATE" \ + -inkey "$MY_PRIVATE_KEY" \ + -noattr \ + -nodetach \ + -nosmimecap \ + -outform der \ + -out "$MY_POLICY.p7b" + +Deploying the policies is done through securityfs, through the +``new_policy`` node. To deploy a policy, simply cat the file into the +securityfs node:: + + cat "$MY_POLICY.p7b" > /sys/kernel/security/ipe/new_policy + +Upon success, this will create one subdirectory under +``/sys/kernel/security/ipe/policies/``. The subdirectory will be the +``policy_name`` field of the policy deployed, so for the example above, +the directory will be ``/sys/kernel/security/ipe/policies/Ex_Policy``. +Within this directory, there will be seven files: ``pkcs7``, ``policy``, +``name``, ``version``, ``active``, ``update``, and ``delete``. + +The ``pkcs7`` file is read-only. Reading it returns the raw PKCS#7 data +that was provided to the kernel, representing the policy. If the policy being +read is the boot policy, this will return ``ENOENT``, as it is not signed. + +The ``policy`` file is read only. Reading it returns the PKCS#7 inner +content of the policy, which will be the plain text policy. + +The ``active`` file is used to set a policy as the currently active policy. +This file is rw, and accepts a value of ``"1"`` to set the policy as active. +Since only a single policy can be active at one time, all other policies +will be marked inactive. The policy being marked active must have a policy +version greater or equal to the currently-running version. + +The ``update`` file is used to update a policy that is already present +in the kernel. This file is write-only and accepts a PKCS#7 signed +policy. Two checks will always be performed on this policy: First, the +``policy_names`` must match with the updated version and the existing +version. Second the updated policy must have a policy version greater than +or equal to the currently-running version. This is to prevent rollback attacks. + +The ``delete`` file is used to remove a policy that is no longer needed. +This file is write-only and accepts a value of ``1`` to delete the policy. +On deletion, the securityfs node representing the policy will be removed. +However, delete the current active policy is not allowed and will return +an operation not permitted error. + +Similarly, writing to both ``update`` and ``new_policy`` could result in +bad message(policy syntax error) or file exists error. The latter error happens +when trying to deploy a policy with a ``policy_name`` while the kernel already +has a deployed policy with the same ``policy_name``. + +Deploying a policy will *not* cause IPE to start enforcing the policy. IPE will +only enforce the policy marked active. Note that only one policy can be active +at a time. + +Once deployment is successful, the policy can be activated, by writing file +``/sys/kernel/security/ipe/policies/$policy_name/active``. +For example, the ``Ex_Policy`` can be activated by:: + + echo 1 > "/sys/kernel/security/ipe/policies/Ex_Policy/active" + +From above point on, ``Ex_Policy`` is now the enforced policy on the +system. + +IPE also provides a way to delete policies. This can be done via the +``delete`` securityfs node, +``/sys/kernel/security/ipe/policies/$policy_name/delete``. +Writing ``1`` to that file deletes the policy:: + + echo 1 > "/sys/kernel/security/ipe/policies/$policy_name/delete" + +There is only one requirement to delete a policy: the policy being deleted +must be inactive. + +.. NOTE:: + + If a traditional MAC system is enabled (SELinux, apparmor, smack), all + writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``. + +Modes +~~~~~ + +IPE supports two modes of operation: permissive (similar to SELinux's +permissive mode) and enforced. In permissive mode, all events are +checked and policy violations are logged, but the policy is not really +enforced. This allows users to test policies before enforcing them. + +The default mode is enforce, and can be changed via the kernel command +line parameter ``ipe.enforce=(0|1)``, or the securityfs node +``/sys/kernel/security/ipe/enforce``. + +.. NOTE:: + + If a traditional MAC system is enabled (SELinux, apparmor, smack, etcetera), + all writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``. + +Audit Events +~~~~~~~~~~~~ + +1420 AUDIT_IPE_ACCESS +^^^^^^^^^^^^^^^^^^^^^ +Event Examples:: + + type=1420 audit(1653364370.067:61): ipe_op=EXECUTE ipe_hook=MMAP enforcing=1 pid=2241 comm="ld-linux.so" path="/deny/lib/libc.so.6" dev="sda2" ino=14549020 rule="DEFAULT action=DENY" + type=1300 audit(1653364370.067:61): SYSCALL arch=c000003e syscall=9 success=no exit=-13 a0=7f1105a28000 a1=195000 a2=5 a3=812 items=0 ppid=2219 pid=2241 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=2 comm="ld-linux.so" exe="/tmp/ipe-test/lib/ld-linux.so" subj=unconfined key=(null) + type=1327 audit(1653364370.067:61): 707974686F6E3300746573742F6D61696E2E7079002D6E00 + + type=1420 audit(1653364735.161:64): ipe_op=EXECUTE ipe_hook=MMAP enforcing=1 pid=2472 comm="mmap_test" path=? dev=? ino=? rule="DEFAULT action=DENY" + type=1300 audit(1653364735.161:64): SYSCALL arch=c000003e syscall=9 success=no exit=-13 a0=0 a1=1000 a2=4 a3=21 items=0 ppid=2219 pid=2472 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=2 comm="mmap_test" exe="/root/overlake_test/upstream_test/vol_fsverity/bin/mmap_test" subj=unconfined key=(null) + type=1327 audit(1653364735.161:64): 707974686F6E3300746573742F6D61696E2E7079002D6E00 + +This event indicates that IPE made an access control decision; the IPE +specific record (1420) is always emitted in conjunction with a +``AUDITSYSCALL`` record. + +Determining whether IPE is in permissive or enforced mode can be derived +from ``success`` property and exit code of the ``AUDITSYSCALL`` record. + + +Field descriptions: + ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| Field | Value Type | Optional? | Description of Value | ++===========+============+===========+=================================================================================+ +| ipe_op | string | No | The IPE operation name associated with the log | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| ipe_hook | string | No | The name of the LSM hook that triggered the IPE event | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| enforcing | integer | No | The current IPE enforcing state 1 is in enforcing mode, 0 is in permissive mode | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| pid | integer | No | The pid of the process that triggered the IPE event. | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| comm | string | No | The command line program name of the process that triggered the IPE event | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| path | string | Yes | The absolute path to the evaluated file | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| ino | integer | Yes | The inode number of the evaluated file | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| dev | string | Yes | The device name of the evaluated file, e.g. vda | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ +| rule | string | No | The matched policy rule | ++-----------+------------+-----------+---------------------------------------------------------------------------------+ + +1421 AUDIT_IPE_CONFIG_CHANGE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Event Example:: + + type=1421 audit(1653425583.136:54): old_active_pol_name="Allow_All" old_active_pol_version=0.0.0 old_policy_digest=sha256:E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 new_active_pol_name="boot_verified" new_active_pol_version=0.0.0 new_policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1 + type=1300 audit(1653425583.136:54): SYSCALL arch=c000003e syscall=1 success=yes exit=2 a0=3 a1=5596fcae1fb0 a2=2 a3=2 items=0 ppid=184 pid=229 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=4294967295 comm="python3" exe="/usr/bin/python3.10" key=(null) + type=1327 audit(1653425583.136:54): PROCTITLE proctitle=707974686F6E3300746573742F6D61696E2E7079002D66002E2 + +This event indicates that IPE switched the active poliy from one to another +along with the version and the hash digest of the two policies. +Note IPE can only have one policy active at a time, all access decision +evaluation is based on the current active policy. +The normal procedure to deploy a new policy is loading the policy to deploy +into the kernel first, then switch the active policy to it. + +This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall. + +Field descriptions: + ++------------------------+------------+-----------+---------------------------------------------------+ +| Field | Value Type | Optional? | Description of Value | ++========================+============+===========+===================================================+ +| old_active_pol_name | string | Yes | The name of previous active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| old_active_pol_version | string | Yes | The version of previous active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| old_policy_digest | string | Yes | The hash of previous active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| new_active_pol_name | string | No | The name of current active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| new_active_pol_version | string | No | The version of current active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| new_policy_digest | string | No | The hash of current active policy | ++------------------------+------------+-----------+---------------------------------------------------+ +| auid | integer | No | The login user ID | ++------------------------+------------+-----------+---------------------------------------------------+ +| ses | integer | No | The login session ID | ++------------------------+------------+-----------+---------------------------------------------------+ +| lsm | string | No | The lsm name associated with the event | ++------------------------+------------+-----------+---------------------------------------------------+ +| res | integer | No | The result of the audited operation(success/fail) | ++------------------------+------------+-----------+---------------------------------------------------+ + +1422 AUDIT_IPE_POLICY_LOAD +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Event Example:: + + type=1422 audit(1653425529.927:53): policy_name="boot_verified" policy_version=0.0.0 policy_digest=sha256:820EEA5B40CA42B51F68962354BA083122A20BB846F26765076DD8EED7B8F4DB auid=4294967295 ses=4294967295 lsm=ipe res=1 + type=1300 audit(1653425529.927:53): arch=c000003e syscall=1 success=yes exit=2567 a0=3 a1=5596fcae1fb0 a2=a07 a3=2 items=0 ppid=184 pid=229 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=pts0 ses=4294967295 comm="python3" exe="/usr/bin/python3.10" key=(null) + type=1327 audit(1653425529.927:53): PROCTITLE proctitle=707974686F6E3300746573742F6D61696E2E7079002D66002E2E + +This record indicates a new policy has been loaded into the kernel with the policy name, policy version and policy hash. + +This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall. + +Field descriptions: + ++----------------+------------+-----------+---------------------------------------------------+ +| Field | Value Type | Optional? | Description of Value | ++================+============+===========+===================================================+ +| policy_name | string | No | The policy_name | ++----------------+------------+-----------+---------------------------------------------------+ +| policy_version | string | No | The policy_version | ++----------------+------------+-----------+---------------------------------------------------+ +| policy_digest | string | No | The policy hash | ++----------------+------------+-----------+---------------------------------------------------+ +| auid | integer | No | The login user ID | ++----------------+------------+-----------+---------------------------------------------------+ +| ses | integer | No | The login session ID | ++----------------+------------+-----------+---------------------------------------------------+ +| lsm | string | No | The lsm name associated with the event | ++----------------+------------+-----------+---------------------------------------------------+ +| res | integer | No | The result of the audited operation(success/fail) | ++----------------+------------+-----------+---------------------------------------------------+ + + +1404 AUDIT_MAC_STATUS +^^^^^^^^^^^^^^^^^^^^^ + +Event Examples:: + + type=1404 audit(1653425689.008:55): enforcing=0 old_enforcing=1 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=ipe res=1 + type=1300 audit(1653425689.008:55): arch=c000003e syscall=1 success=yes exit=2 a0=1 a1=55c1065e5c60 a2=2 a3=0 items=0 ppid=405 pid=441 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=) + type=1327 audit(1653425689.008:55): proctitle="-bash" + + type=1404 audit(1653425689.008:55): enforcing=1 old_enforcing=0 auid=4294967295 ses=4294967295 enabled=1 old-enabled=1 lsm=ipe res=1 + type=1300 audit(1653425689.008:55): arch=c000003e syscall=1 success=yes exit=2 a0=1 a1=55c1065e5c60 a2=2 a3=0 items=0 ppid=405 pid=441 auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=) + type=1327 audit(1653425689.008:55): proctitle="-bash" + +This record will always be emitted in conjunction with a ``AUDITSYSCALL`` record for the ``write`` syscall. + +Field descriptions: + ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| Field | Value Type | Optional? | Description of Value | ++===============+============+===========+=================================================================================================+ +| enforcing | integer | No | The enforcing state IPE is being switched to, 1 is in enforcing mode, 0 is in permissive mode | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| old_enforcing | integer | No | The enforcing state IPE is being switched from, 1 is in enforcing mode, 0 is in permissive mode | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| auid | integer | No | The login user ID | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| ses | integer | No | The login session ID | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| enabled | integer | No | The new TTY audit enabled setting | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| old-enabled | integer | No | The old TTY audit enabled setting | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| lsm | string | No | The lsm name associated with the event | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ +| res | integer | No | The result of the audited operation(success/fail) | ++---------------+------------+-----------+-------------------------------------------------------------------------------------------------+ + + +Success Auditing +^^^^^^^^^^^^^^^^ + +IPE supports success auditing. When enabled, all events that pass IPE +policy and are not blocked will emit an audit event. This is disabled by +default, and can be enabled via the kernel command line +``ipe.success_audit=(0|1)`` or +``/sys/kernel/security/ipe/success_audit`` securityfs file. + +This is *very* noisy, as IPE will check every userspace binary on the +system, but is useful for debugging policies. + +.. NOTE:: + + If a traditional MAC system is enabled (SELinux, apparmor, smack, etcetera), + all writes to ipe's securityfs nodes require ``CAP_MAC_ADMIN``. + +Properties +---------- + +As explained above, IPE properties are ``key=value`` pairs expressed in IPE +policy. Two properties are built-into the policy parser: 'op' and 'action'. +The other properties are used to restrict immutable security properties +about the files being evaluated. Currently those properties are: +'``boot_verified``', '``dmverity_signature``', '``dmverity_roothash``', +'``fsverity_signature``', '``fsverity_digest``'. A description of all +properties supported by IPE are listed below: + +op +~~ + +Indicates the operation for a rule to apply to. Must be in every rule, +as the first token. IPE supports the following operations: + + ``EXECUTE`` + + Pertains to any file attempting to be executed, or loaded as an + executable. + + ``FIRMWARE``: + + Pertains to firmware being loaded via the firmware_class interface. + This covers both the preallocated buffer and the firmware file + itself. + + ``KMODULE``: + + Pertains to loading kernel modules via ``modprobe`` or ``insmod``. + + ``KEXEC_IMAGE``: + + Pertains to kernel images loading via ``kexec``. + + ``KEXEC_INITRAMFS`` + + Pertains to initrd images loading via ``kexec --initrd``. + + ``POLICY``: + + Controls loading policies via reading a kernel-space initiated read. + + An example of such is loading IMA policies by writing the path + to the policy file to ``$securityfs/ima/policy`` + + ``X509_CERT``: + + Controls loading IMA certificates through the Kconfigs, + ``CONFIG_IMA_X509_PATH`` and ``CONFIG_EVM_X509_PATH``. + +action +~~~~~~ + + Determines what IPE should do when a rule matches. Must be in every + rule, as the final clause. Can be one of: + + ``ALLOW``: + + If the rule matches, explicitly allow access to the resource to proceed + without executing any more rules. + + ``DENY``: + + If the rule matches, explicitly prohibit access to the resource to + proceed without executing any more rules. + +boot_verified +~~~~~~~~~~~~~ + + This property can be utilized for authorization of files from initramfs. + The format of this property is:: + + boot_verified=(TRUE|FALSE) + + + .. WARNING:: + + This property will trust files from initramfs(rootfs). It should + only be used during early booting stage. Before mounting the real + rootfs on top of the initramfs, initramfs script will recursively + remove all files and directories on the initramfs. This is typically + implemented by using switch_root(8) [#switch_root]_. Therefore the + initramfs will be empty and not accessible after the real + rootfs takes over. It is advised to switch to a different policy + that doesn't rely on the property after this point. + This ensures that the trust policies remain relevant and effective + throughout the system's operation. + +dmverity_roothash +~~~~~~~~~~~~~~~~~ + + This property can be utilized for authorization or revocation of + specific dm-verity volumes, identified via their root hashes. It has a + dependency on the DM_VERITY module. This property is controlled by + the ``IPE_PROP_DM_VERITY`` config option, it will be automatically + selected when ``SECURITY_IPE`` and ``DM_VERITY`` are all enabled. + The format of this property is:: + + dmverity_roothash=DigestName:HexadecimalString + + The supported DigestNames for dmverity_roothash are [#dmveritydigests]_ + + + blake2b-512 + + blake2s-256 + + sha256 + + sha384 + + sha512 + + sha3-224 + + sha3-256 + + sha3-384 + + sha3-512 + + sm3 + + rmd160 + +dmverity_signature +~~~~~~~~~~~~~~~~~~ + + This property can be utilized for authorization of all dm-verity + volumes that have a signed roothash that validated by a keyring + specified by dm-verity's configuration, either the system trusted + keyring, or the secondary keyring. It depends on + ``DM_VERITY_VERIFY_ROOTHASH_SIG`` config option and is controlled by + the ``IPE_PROP_DM_VERITY_SIGNATURE`` config option, it will be automatically + selected when ``SECURITY_IPE``, ``DM_VERITY`` and + ``DM_VERITY_VERIFY_ROOTHASH_SIG`` are all enabled. + The format of this property is:: + + dmverity_signature=(TRUE|FALSE) + +fsverity_digest +~~~~~~~~~~~~~~~ + + This property can be utilized for authorization of specific fsverity + enabled files, identified via their fsverity digests. + It depends on ``FS_VERITY`` config option and is controlled by + the ``IPE_PROP_FS_VERITY`` config option, it will be automatically + selected when ``SECURITY_IPE`` and ``FS_VERITY`` are all enabled. + The format of this property is:: + + fsverity_digest=DigestName:HexadecimalString + + The supported DigestNames for fsverity_digest are [#fsveritydigest]_ + + + sha256 + + sha512 + +fsverity_signature +~~~~~~~~~~~~~~~~~~ + + This property is used to authorize all fs-verity enabled files that have + been verified by fs-verity's built-in signature mechanism. The signature + verification relies on a key stored within the ".fs-verity" keyring. It + depends on ``FS_VERITY_BUILTIN_SIGNATURES`` config option and + it is controlled by the ``IPE_PROP_FS_VERITY`` config option, + it will be automatically selected when ``SECURITY_IPE``, ``FS_VERITY`` + and ``FS_VERITY_BUILTIN_SIGNATURES`` are all enabled. + The format of this property is:: + + fsverity_signature=(TRUE|FALSE) + +Policy Examples +--------------- + +Allow all +~~~~~~~~~ + +:: + + policy_name=Allow_All policy_version=0.0.0 + DEFAULT action=ALLOW + +Allow only initramfs +~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=Allow_Initramfs policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE boot_verified=TRUE action=ALLOW + +Allow any signed and validated dm-verity volume and the initramfs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=Allow_Signed_DMV_And_Initramfs policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE boot_verified=TRUE action=ALLOW + op=EXECUTE dmverity_signature=TRUE action=ALLOW + +Prohibit execution from a specific dm-verity volume +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=Deny_DMV_By_Roothash policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE dmverity_roothash=sha256:cd2c5bae7c6c579edaae4353049d58eb5f2e8be0244bf05345bc8e5ed257baff action=DENY + + op=EXECUTE boot_verified=TRUE action=ALLOW + op=EXECUTE dmverity_signature=TRUE action=ALLOW + +Allow only a specific dm-verity volume +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=Allow_DMV_By_Roothash policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE dmverity_roothash=sha256:401fcec5944823ae12f62726e8184407a5fa9599783f030dec146938 action=ALLOW + +Allow any fs-verity file with a valid built-in signature +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=Allow_Signed_And_Validated_FSVerity policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE fsverity_signature=TRUE action=ALLOW + +Allow execution of a specific fs-verity file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + policy_name=ALLOW_FSV_By_Digest policy_version=0.0.0 + DEFAULT action=DENY + + op=EXECUTE fsverity_digest=sha256:fd88f2b8824e197f850bf4c5109bea5cf0ee38104f710843bb72da796ba5af9e action=ALLOW + +Additional Information +---------------------- + +- `Github Repository `_ +- :doc:`Developer and design docs for IPE ` + +FAQ +--- + +Q: + What's the difference between other LSMs which provide a measure of + trust-based access control? + +A: + + In general, there's two other LSMs that can provide similar functionality: + IMA, and Loadpin. + + IMA and IPE are functionally very similar. The significant difference between + the two is the policy. [#devdoc]_ + + Loadpin and IPE differ fairly dramatically, as Loadpin only covers the IPE's + kernel read operations, whereas IPE is capable of controlling execution + on top of kernel read. The trust model is also different; Loadpin roots its + trust in the initial super-block, whereas trust in IPE is stemmed from kernel + itself (via ``SYSTEM_TRUSTED_KEYS``). + +----------- + +.. [#digest_cache_lsm] https://lore.kernel.org/lkml/20240415142436.2545003-1-roberto.sassu@huaweicloud.com/ + +.. [#interpreters] There is `some interest in solving this issue `_. + +.. [#devdoc] Please see :doc:`the design docs ` for more on + this topic. + +.. [#switch_root] https://man7.org/linux/man-pages/man8/switch_root.8.html + +.. [#dmveritydigests] These hash algorithms are based on values accepted by + the Linux crypto API; IPE does not impose any + restrictions on the digest algorithm itself; + thus, this list may be out of date. + +.. [#fsveritydigest] These hash algorithms are based on values accepted by the + kernel's fsverity support; IPE does not impose any + restrictions on the digest algorithm itself; + thus, this list may be out of date. diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst index 091e8bb388875d..678d70d6e1c3ac 100644 --- a/Documentation/admin-guide/blockdev/zram.rst +++ b/Documentation/admin-guide/blockdev/zram.rst @@ -102,17 +102,41 @@ Examples:: #select lzo compression algorithm echo lzo > /sys/block/zram0/comp_algorithm -For the time being, the `comp_algorithm` content does not necessarily -show every compression algorithm supported by the kernel. We keep this -list primarily to simplify device configuration and one can configure -a new device with a compression algorithm that is not listed in -`comp_algorithm`. The thing is that, internally, ZRAM uses Crypto API -and, if some of the algorithms were built as modules, it's impossible -to list all of them using, for instance, /proc/crypto or any other -method. This, however, has an advantage of permitting the usage of -custom crypto compression modules (implementing S/W or H/W compression). - -4) Set Disksize +For the time being, the `comp_algorithm` content shows only compression +algorithms that are supported by zram. + +4) Set compression algorithm parameters: Optional +================================================= + +Compression algorithms may support specific parameters which can be +tweaked for particular dataset. ZRAM has an `algorithm_params` device +attribute which provides a per-algorithm params configuration. + +For example, several compression algorithms support `level` parameter. +In addition, certain compression algorithms support pre-trained dictionaries, +which significantly change algorithms' characteristics. In order to configure +compression algorithm to use external pre-trained dictionary, pass full +path to the `dict` along with other parameters:: + + #pass path to pre-trained zstd dictionary + echo "algo=zstd dict=/etc/dictioary" > /sys/block/zram0/algorithm_params + + #same, but using algorithm priority + echo "priority=1 dict=/etc/dictioary" > \ + /sys/block/zram0/algorithm_params + + #pass path to pre-trained zstd dictionary and compression level + echo "algo=zstd level=8 dict=/etc/dictioary" > \ + /sys/block/zram0/algorithm_params + +Parameters are algorithm specific: not all algorithms support pre-trained +dictionaries, not all algorithms support `level`. Furthermore, for certain +algorithms `level` controls the compression level (the higher the value the +better the compression ratio, it even can take negatives values for some +algorithms), for other algorithms `level` is acceleration level (the higher +the value the lower the compression ratio). + +5) Set Disksize =============== Set disk size by writing the value to sysfs node 'disksize'. @@ -132,7 +156,7 @@ There is little point creating a zram of greater than twice the size of memory since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. -5) Set memory limit: Optional +6) Set memory limit: Optional ============================= Set memory limit by writing the value to sysfs node 'mem_limit'. @@ -151,7 +175,7 @@ Examples:: # To disable memory limit echo 0 > /sys/block/zram0/mem_limit -6) Activate +7) Activate =========== :: @@ -162,7 +186,7 @@ Examples:: mkfs.ext4 /dev/zram1 mount /dev/zram1 /tmp -7) Add/remove zram devices +8) Add/remove zram devices ========================== zram provides a control interface, which enables dynamic (on-demand) device @@ -182,7 +206,7 @@ execute:: echo X > /sys/class/zram-control/hot_remove -8) Stats +9) Stats ======== Per-device statistics are exported as various nodes under /sys/block/zram/ @@ -205,6 +229,7 @@ writeback_limit_enable RW show and set writeback_limit feature max_comp_streams RW the number of possible concurrent compress operations comp_algorithm RW show and change the compression algorithm +algorithm_params WO setup compression algorithm parameters compact WO trigger memory compaction debug_stat RO this file is used for zram debugging purposes backing_dev RW set up backend storage for zram to write out @@ -283,15 +308,15 @@ a single line of text and contains the following stats separated by whitespace: Unit: 4K bytes ============== ============================================================= -9) Deactivate -============= +10) Deactivate +============== :: swapoff /dev/zram0 umount /dev/zram1 -10) Reset +11) Reset ========= Write any positive value to 'reset' sysfs node:: @@ -487,11 +512,14 @@ registered compression algorithms, increases our chances of finding the algorithm that successfully compresses a particular page. Sometimes, however, it is convenient (and sometimes even necessary) to limit recompression to only one particular algorithm so that it will not try any other algorithms. -This can be achieved by providing a algo=NAME parameter::: +This can be achieved by providing a `algo` or `priority` parameter::: #use zstd algorithm only (if registered) echo "type=huge algo=zstd" > /sys/block/zramX/recompress + #use zstd algorithm only (if zstd was registered under priority 1) + echo "type=huge priority=1" > /sys/block/zramX/recompress + memory tracking =============== diff --git a/Documentation/admin-guide/bug-bisect.rst b/Documentation/admin-guide/bug-bisect.rst index 325c5d0ed34a0a..585630d14581c7 100644 --- a/Documentation/admin-guide/bug-bisect.rst +++ b/Documentation/admin-guide/bug-bisect.rst @@ -1,76 +1,144 @@ -Bisecting a bug -+++++++++++++++ +.. SPDX-License-Identifier: (GPL-2.0+ OR CC-BY-4.0) +.. [see the bottom of this file for redistribution information] -Last updated: 28 October 2016 +====================== +Bisecting a regression +====================== -Introduction -============ +This document describes how to use a ``git bisect`` to find the source code +change that broke something -- for example when some functionality stopped +working after upgrading from Linux 6.0 to 6.1. -Always try the latest kernel from kernel.org and build from source. If you are -not confident in doing that please report the bug to your distribution vendor -instead of to a kernel developer. +The text focuses on the gist of the process. If you are new to bisecting the +kernel, better follow Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst +instead: it depicts everything from start to finish while covering multiple +aspects even kernel developers occasionally forget. This includes detecting +situations early where a bisection would be a waste of time, as nobody would +care about the result -- for example, because the problem happens after the +kernel marked itself as 'tainted', occurs in an abandoned version, was already +fixed, or is caused by a .config change you or your Linux distributor performed. -Finding bugs is not always easy. Have a go though. If you can't find it don't -give up. Report as much as you have found to the relevant maintainer. See -MAINTAINERS for who that is for the subsystem you have worked on. +Finding the change causing a kernel issue using a bisection +=========================================================== -Before you submit a bug report read -'Documentation/admin-guide/reporting-issues.rst'. +*Note: the following process assumes you prepared everything for a bisection. +This includes having a Git clone with the appropriate sources, installing the +software required to build and install kernels, as well as a .config file stored +in a safe place (the following example assumes '~/prepared_kernel_.config') to +use as pristine base at each bisection step; ideally, you have also worked out +a fully reliable and straight-forward way to reproduce the regression, too.* -Devices not appearing -===================== - -Often this is caused by udev/systemd. Check that first before blaming it -on the kernel. - -Finding patch that caused a bug -=============================== - -Using the provided tools with ``git`` makes finding bugs easy provided the bug -is reproducible. - -Steps to do it: - -- build the Kernel from its git source -- start bisect with [#f1]_:: - - $ git bisect start - -- mark the broken changeset with:: - - $ git bisect bad [commit] - -- mark a changeset where the code is known to work with:: - - $ git bisect good [commit] - -- rebuild the Kernel and test -- interact with git bisect by using either:: - - $ git bisect good - - or:: - - $ git bisect bad - - depending if the bug happened on the changeset you're testing -- After some interactions, git bisect will give you the changeset that - likely caused the bug. - -- For example, if you know that the current version is bad, and version - 4.8 is good, you could do:: - - $ git bisect start - $ git bisect bad # Current version is bad - $ git bisect good v4.8 - - -.. [#f1] You can, optionally, provide both good and bad arguments at git - start with ``git bisect start [BAD] [GOOD]`` - -For further references, please read: - -- The man page for ``git-bisect`` -- `Fighting regressions with git bisect `_ -- `Fully automated bisecting with "git bisect run" `_ -- `Using Git bisect to figure out when brokenness was introduced `_ +* Preparation: start the bisection and tell Git about the points in the history + you consider to be working and broken, which Git calls 'good' and 'bad':: + + git bisect start + git bisect good v6.0 + git bisect bad v6.1 + + Instead of Git tags like 'v6.0' and 'v6.1' you can specify commit-ids, too. + +1. Copy your prepared .config into the build directory and adjust it to the + needs of the codebase Git checked out for testing:: + + cp ~/prepared_kernel_.config .config + make olddefconfig + +2. Now build, install, and boot a kernel. This might fail for unrelated reasons, + for example, when a compile error happens at the current stage of the + bisection a later change resolves. In such cases run ``git bisect skip`` and + go back to step 1. + +3. Check if the functionality that regressed works in the kernel you just built. + + If it works, execute:: + + git bisect good + + If it is broken, run:: + + git bisect bad + + Note, getting this wrong just once will send the rest of the bisection + totally off course. To prevent having to start anew later you thus want to + ensure what you tell Git is correct; it is thus often wise to spend a few + minutes more on testing in case your reproducer is unreliable. + + After issuing one of these two commands, Git will usually check out another + bisection point and print something like 'Bisecting: 675 revisions left to + test after this (roughly 10 steps)'. In that case go back to step 1. + + If Git instead prints something like 'cafecaca0c0dacafecaca0c0dacafecaca0c0da + is the first bad commit', then you have finished the bisection. In that case + move to the next point below. Note, right after displaying that line Git will + show some details about the culprit including its patch description; this can + easily fill your terminal, so you might need to scroll up to see the message + mentioning the culprit's commit-id. + + In case you missed Git's output, you can always run ``git bisect log`` to + print the status: it will show how many steps remain or mention the result of + the bisection. + +* Recommended complementary task: put the bisection log and the current .config + file aside for the bug report; furthermore tell Git to reset the sources to + the state before the bisection:: + + git bisect log > ~/bisection-log + cp .config ~/bisection-config-culprit + git bisect reset + +* Recommended optional task: try reverting the culprit on top of the latest + codebase and check if that fixes your bug; if that is the case, it validates + the bisection and enables developers to resolve the regression through a + revert. + + To try this, update your clone and check out latest mainline. Then tell Git + to revert the change by specifying its commit-id:: + + git revert --no-edit cafec0cacaca0 + + Git might reject this, for example when the bisection landed on a merge + commit. In that case, abandon the attempt. Do the same, if Git fails to revert + the culprit on its own because later changes depend on it -- at least unless + you bisected a stable or longterm kernel series, in which case you want to + check out its latest codebase and try a revert there. + + If a revert succeeds, build and test another kernel to check if reverting + resolved your regression. + +With that the process is complete. Now report the regression as described by +Documentation/admin-guide/reporting-issues.rst. + + +Additional reading material +--------------------------- + +* The `man page for 'git bisect' `_ and + `fighting regressions with 'git bisect' `_ + in the Git documentation. +* `Working with git bisect `_ + from kernel developer Nathan Chancellor. +* `Using Git bisect to figure out when brokenness was introduced `_. +* `Fully automated bisecting with 'git bisect run' `_. + +.. + end-of-content +.. + This document is maintained by Thorsten Leemhuis . If + you spot a typo or small mistake, feel free to let him know directly and + he'll fix it. You are free to do the same in a mostly informal way if you + want to contribute changes to the text -- but for copyright reasons please CC + linux-doc@vger.kernel.org and 'sign-off' your contribution as + Documentation/process/submitting-patches.rst explains in the section 'Sign + your work - the Developer's Certificate of Origin'. +.. + This text is available under GPL-2.0+ or CC-BY-4.0, as stated at the top + of the file. If you want to distribute this text under CC-BY-4.0 only, + please use 'The Linux kernel development community' for author attribution + and link this as source: + https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/Documentation/admin-guide/bug-bisect.rst + +.. + Note: Only the content of this RST file as found in the Linux kernel sources + is available under CC-BY-4.0, as versions of this text that were processed + (for example by the kernel's build system) might contain content taken from + files which use a more restrictive license. diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst index 95299b08c405af..1d0f8ceb30754e 100644 --- a/Documentation/admin-guide/bug-hunting.rst +++ b/Documentation/admin-guide/bug-hunting.rst @@ -244,14 +244,14 @@ Reporting the bug Once you find where the bug happened, by inspecting its location, you could either try to fix it yourself or report it upstream. -In order to report it upstream, you should identify the mailing list -used for the development of the affected code. This can be done by using -the ``get_maintainer.pl`` script. +In order to report it upstream, you should identify the bug tracker, if any, or +mailing list used for the development of the affected code. This can be done by +using the ``get_maintainer.pl`` script. For example, if you find a bug at the gspca's sonixj.c file, you can get its maintainers with:: - $ ./scripts/get_maintainer.pl -f drivers/media/usb/gspca/sonixj.c + $ ./scripts/get_maintainer.pl --bug -f drivers/media/usb/gspca/sonixj.c Hans Verkuil (odd fixer:GSPCA USB WEBCAM DRIVER,commit_signer:1/1=100%) Mauro Carvalho Chehab (maintainer:MEDIA INPUT INFRASTRUCTURE (V4L/DVB),commit_signer:1/1=100%) Tejun Heo (commit_signer:1/1=100%) @@ -267,11 +267,12 @@ Please notice that it will point to: - The driver maintainer (Hans Verkuil); - The subsystem maintainer (Mauro Carvalho Chehab); - The driver and/or subsystem mailing list (linux-media@vger.kernel.org); -- the Linux Kernel mailing list (linux-kernel@vger.kernel.org). +- The Linux Kernel mailing list (linux-kernel@vger.kernel.org); +- The bug reporting URIs for the driver/subsystem (none in the above example). -Usually, the fastest way to have your bug fixed is to report it to mailing -list used for the development of the code (linux-media ML) copying the -driver maintainer (Hans). +If the listing contains bug reporting URIs at the end, please prefer them over +email. Otherwise, please report bugs to the mailing list used for the +development of the code (linux-media ML) copying the driver maintainer (Hans). If you are totally stumped as to whom to send the report, and ``get_maintainer.pl`` didn't provide you anything useful, send it to diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 9cde26d3384355..270501db9f4e85 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -78,18 +78,24 @@ Brief summary of control files. memory.memsw.max_usage_in_bytes show max memory+Swap usage recorded memory.soft_limit_in_bytes set/show soft limit of memory usage This knob is not available on CONFIG_PREEMPT_RT systems. + This knob is deprecated and shouldn't be + used. memory.stat show various statistics memory.use_hierarchy set/show hierarchical account enabled This knob is deprecated and shouldn't be used. memory.force_empty trigger forced page reclaim memory.pressure_level set memory pressure notifications + This knob is deprecated and shouldn't be + used. memory.swappiness set/show swappiness parameter of vmscan (See sysctl's vm.swappiness) memory.move_charge_at_immigrate set/show controls of moving charges This knob is deprecated and shouldn't be used. memory.oom_control set/show oom controls. + This knob is deprecated and shouldn't be + used. memory.numa_stat show the number of memory usage per numa node memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel @@ -105,10 +111,18 @@ Brief summary of control files. memory.kmem.max_usage_in_bytes show max kernel memory usage recorded memory.kmem.tcp.limit_in_bytes set/show hard limit for tcp buf memory + This knob is deprecated and shouldn't be + used. memory.kmem.tcp.usage_in_bytes show current tcp buf memory allocation + This knob is deprecated and shouldn't be + used. memory.kmem.tcp.failcnt show the number of tcp buf memory usage hits limits + This knob is deprecated and shouldn't be + used. memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded + This knob is deprecated and shouldn't be + used. ==================================== ========================================== 1. History @@ -693,8 +707,10 @@ For compatibility reasons writing 1 to memory.use_hierarchy will always pass:: # echo 1 > memory.use_hierarchy -7. Soft limits -============== +7. Soft limits (DEPRECATED) +=========================== + +THIS IS DEPRECATED! Soft limits allow for greater sharing of memory. The idea behind soft limits is to allow control groups to use as much of the memory as needed, provided @@ -834,8 +850,10 @@ It's applicable for root and non-root cgroup. .. _cgroup-v1-memory-oom-control: -10. OOM Control -=============== +10. OOM Control (DEPRECATED) +============================ + +THIS IS DEPRECATED! memory.oom_control file is for OOM notification and other controls. @@ -882,8 +900,10 @@ At reading, current status of OOM is shown. The number of processes belonging to this cgroup killed by any kind of OOM killer. -11. Memory Pressure -=================== +11. Memory Pressure (DEPRECATED) +================================ + +THIS IS DEPRECATED! The pressure level notifications can be used to monitor the memory allocation cost; based on the pressure, applications can implement diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 95c18bc1708345..69af2173555fb6 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -533,10 +533,12 @@ cgroup namespace on namespace creation. Because the resource control interface files in a given directory control the distribution of the parent's resources, the delegatee shouldn't be allowed to write to them. For the first method, this is -achieved by not granting access to these files. For the second, the -kernel rejects writes to all files other than "cgroup.procs" and -"cgroup.subtree_control" on a namespace root from inside the -namespace. +achieved by not granting access to these files. For the second, files +outside the namespace should be hidden from the delegatee by the means +of at least mount namespacing, and the kernel rejects writes to all +files on a namespace root from inside the cgroup namespace, except for +those files listed in "/sys/kernel/cgroup/delegate" (including +"cgroup.procs", "cgroup.threads", "cgroup.subtree_control", etc.). The end results are equivalent for both delegation types. Once delegated, the user can build sub-hierarchy under the directory, @@ -981,6 +983,14 @@ All cgroup core files are prefixed with "cgroup." A dying cgroup can consume system resources not exceeding limits, which were active at the moment of cgroup deletion. + nr_subsys_ + Total number of live cgroup subsystems (e.g memory + cgroup) at and beneath the current cgroup. + + nr_dying_subsys_ + Total number of dying cgroup subsystems (e.g. memory + cgroup) at and beneath the current cgroup. + cgroup.freeze A read-write single value file which exists on non-root cgroups. Allowed values are "0" and "1". The default is "0". @@ -1333,11 +1343,14 @@ The following nested keys are defined. all the existing limitations and potential future extensions. memory.peak - A read-only single value file which exists on non-root - cgroups. + A read-write single value file which exists on non-root cgroups. + + The max memory usage recorded for the cgroup and its descendants since + either the creation of the cgroup or the most recent reset for that FD. - The max memory usage recorded for the cgroup and its - descendants since the creation of the cgroup. + A write of any non-empty string to this file resets it to the + current memory usage for subsequent reads through the same + file descriptor. memory.oom.group A read-write single value file which exists on non-root @@ -1614,6 +1627,25 @@ The following nested keys are defined. Usually because failed to allocate some continuous swap space for the huge page. + numa_pages_migrated (npn) + Number of pages migrated by NUMA balancing. + + numa_pte_updates (npn) + Number of pages whose page table entries are modified by + NUMA balancing to produce NUMA hinting faults on access. + + numa_hint_faults (npn) + Number of NUMA hinting faults. + + pgdemote_kswapd + Number of pages demoted by kswapd. + + pgdemote_direct + Number of pages demoted directly. + + pgdemote_khugepaged + Number of pages demoted by khugepaged. + memory.numa_stat A read-only nested-keyed file which exists on non-root cgroups. @@ -1663,11 +1695,14 @@ The following nested keys are defined. Healthy workloads are not expected to reach this limit. memory.swap.peak - A read-only single value file which exists on non-root - cgroups. + A read-write single value file which exists on non-root cgroups. + + The max swap usage recorded for the cgroup and its descendants since + the creation of the cgroup or the most recent reset for that FD. - The max swap usage recorded for the cgroup and its - descendants since the creation of the cgroup. + A write of any non-empty string to this file resets it to the + current memory usage for subsequent reads through the same + file descriptor. memory.swap.max A read-write single value file which exists on non-root @@ -1731,6 +1766,8 @@ The following nested keys are defined. Note that this is subtly different from setting memory.swap.max to 0, as it still allows for pages to be written to the zswap pool. + This setting has no effect if zswap is disabled, and swapping + is allowed unless memory.swap.max is set to 0. memory.pressure A read-only nested-keyed file. @@ -2940,8 +2977,8 @@ Deprecated v1 Core Features - "cgroup.clone_children" is removed. -- /proc/cgroups is meaningless for v2. Use "cgroup.controllers" file - at the root instead. +- /proc/cgroups is meaningless for v2. Use "cgroup.controllers" or + "cgroup.stat" files at the root instead. Issues with v1 and Rationales for v2 diff --git a/Documentation/admin-guide/device-mapper/delay.rst b/Documentation/admin-guide/device-mapper/delay.rst index 917ba8c333591b..4d667228e74426 100644 --- a/Documentation/admin-guide/device-mapper/delay.rst +++ b/Documentation/admin-guide/device-mapper/delay.rst @@ -3,29 +3,52 @@ dm-delay ======== Device-Mapper's "delay" target delays reads and/or writes -and maps them to different devices. +and/or flushs and optionally maps them to different devices. -Parameters:: +Arguments:: [ [ ]] -With separate write parameters, the first set is only used for reads. +Table line has to either have 3, 6 or 9 arguments: + +3: apply offset and delay to read, write and flush operations on device + +6: apply offset and delay to device, also apply write_offset and write_delay + to write and flush operations on optionally different write_device with + optionally different sector offset + +9: same as 6 arguments plus define flush_offset and flush_delay explicitely + on/with optionally different flush_device/flush_offset. + Offsets are specified in sectors. + Delays are specified in milliseconds. + Example scripts =============== :: - #!/bin/sh - # Create device delaying rw operation for 500ms - echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed + # + # Create mapped device named "delayed" delaying read, write and flush operations for 500ms. + # + dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 500" :: + #!/bin/sh + # + # Create mapped device delaying write and flush operations for 400ms and + # splitting reads to device $1 but writes and flushs to different device $2 + # to different offsets of 2048 and 4096 sectors respectively. + # + dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 2048 0 $2 4096 400" +:: #!/bin/sh - # Create device delaying only write operation for 500ms and - # splitting reads and writes to different devices $1 $2 - echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed + # + # Create mapped device delaying reads for 50ms, writes for 100ms and flushs for 333ms + # onto the same backing device at offset 0 sectors. + # + dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 50 $2 0 100 $1 0 333" diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst index 552c9155165d7c..9f8139ff97d656 100644 --- a/Documentation/admin-guide/device-mapper/dm-crypt.rst +++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst @@ -160,15 +160,24 @@ iv_large_sectors The must be multiple of (in 512 bytes units) if this flag is specified. +integrity_key_size: + Use an integrity key of size instead of using an integrity key size + of the digest size of the used HMAC algorithm. -Module parameters:: +Module parameters:: max_read_size + Maximum size of read requests. When a request larger than this size + is received, dm-crypt will split the request. The splitting improves + concurrency (the split requests could be encrypted in parallel by multiple + cores), but it also causes overhead. The user should tune this parameters to + fit the actual workload. + max_write_size - Maximum size of read or write requests. When a request larger than this size + Maximum size of write requests. When a request larger than this size is received, dm-crypt will split the request. The splitting improves concurrency (the split requests could be encrypted in parallel by multiple - cores), but it also causes overhead. The user should tune these parameters to + cores), but it also causes overhead. The user should tune this parameters to fit the actual workload. diff --git a/Documentation/admin-guide/device-mapper/vdo.rst b/Documentation/admin-guide/device-mapper/vdo.rst index c69ac186863a31..a14e6d3e787c91 100644 --- a/Documentation/admin-guide/device-mapper/vdo.rst +++ b/Documentation/admin-guide/device-mapper/vdo.rst @@ -251,7 +251,12 @@ The messages are: by the vdostats userspace program to interpret the output buffer. - dump: + config: + Outputs useful vdo configuration information. Mostly used + by users who want to recreate a similar VDO volume and + want to know the creation configuration used. + + dump: Dumps many internal structures to the system log. This is not always safe to run, so it should only be used to debug a hung vdo. Optional parameters to specify structures to diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index 5740d85439ff50..2418b0c2d3dfcc 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -212,16 +212,6 @@ When mounting an ext4 filesystem, the following option are accepted: that ext4's inode table readahead algorithm will pre-read into the buffer cache. The default value is 32 blocks. - nouser_xattr - Disables Extended User Attributes. See the attr(5) manual page for - more information about extended attributes. - - noacl - This option disables POSIX Access Control List support. If ACL support - is enabled in the kernel configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL - is enabled by default on mount. See the acl(5) manual page for more - information about acl. - bsddf (*) Make 'df' act like BSD. diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 4bd3ce3ba171f2..2ad1c05b8c8839 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -158,3 +158,72 @@ poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation technique similar to Retbleed one: srso_untrain_ret() and srso_safe_ret(). + +Checking the safe RET mitigation actually works +----------------------------------------------- + +In case one wants to validate whether the SRSO safe RET mitigation works +on a kernel, one could use two performance counters + +* PMC_0xc8 - Count of RET/RET lw retired +* PMC_0xc9 - Count of RET/RET lw retired mispredicted + +and compare the number of RETs retired properly vs those retired +mispredicted, in kernel mode. Another way of specifying those events +is:: + + # perf list ex_ret_near_ret + + List of pre-defined events (to be used in -e or -M): + + core: + ex_ret_near_ret + [Retired Near Returns] + ex_ret_near_ret_mispred + [Retired Near Returns Mispredicted] + +Either the command using the event mnemonics:: + + # perf stat -e ex_ret_near_ret:k -e ex_ret_near_ret_mispred:k sleep 10s + +or using the raw PMC numbers:: + + # perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s + +should give the same amount. I.e., every RET retired should be +mispredicted:: + + [root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s + + Performance counter stats for 'sleep 10s': + + 137,167 cpu/event=0xc8,umask=0/k + 137,173 cpu/event=0xc9,umask=0/k + + 10.004110303 seconds time elapsed + + 0.000000000 seconds user + 0.004462000 seconds sys + +vs the case when the mitigation is disabled (spec_rstack_overflow=off) +or not functioning properly, showing usually a lot smaller number of +mispredicted retired RETs vs the overall count of retired RETs during +a workload:: + + [root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s + + Performance counter stats for 'sleep 10s': + + 201,627 cpu/event=0xc8,umask=0/k + 4,074 cpu/event=0xc9,umask=0/k + + 10.003267252 seconds time elapsed + + 0.002729000 seconds user + 0.000000000 seconds sys + +Also, there is a selftest which performs the above, go to +tools/testing/selftests/x86/ and do:: + + make srso + ./srso diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 09126bb8cc9ffb..1518343bbe2237 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -333,12 +333,17 @@ allowed anymore to lift isolation requirements as needed. This option does not override iommu=pt - force_enable - Force enable the IOMMU on platforms known - to be buggy with IOMMU enabled. Use this - option with care. - pgtbl_v1 - Use v1 page table for DMA-API (Default). - pgtbl_v2 - Use v2 page table for DMA-API. - irtcachedis - Disable Interrupt Remapping Table (IRT) caching. + force_enable - Force enable the IOMMU on platforms known + to be buggy with IOMMU enabled. Use this + option with care. + pgtbl_v1 - Use v1 page table for DMA-API (Default). + pgtbl_v2 - Use v2 page table for DMA-API. + irtcachedis - Disable Interrupt Remapping Table (IRT) caching. + nohugepages - Limit page-sizes used for v1 page-tables + to 4 KiB. + v2_pgsizes_only - Limit page-sizes used for v1 page-tables + to 4KiB/2Mib/1GiB. + amd_iommu_dump= [HW,X86-64] Enable AMD IOMMU driver option to dump the ACPI table @@ -517,6 +522,18 @@ Format: ,, See header of drivers/net/hamradio/baycom_ser_hdx.c. + bdev_allow_write_mounted= + Format: + Control the ability to open a mounted block device + for writing, i.e., allow / disallow writes that bypass + the FS. This was implemented as a means to prevent + fuzzers from crashing the kernel by overwriting the + metadata underneath a mounted FS without its awareness. + This also prevents destructive formatting of mounted + filesystems by naive storage tooling that don't use + O_EXCL. Default is Y and can be changed through the + Kconfig option CONFIG_BLK_DEV_WRITE_MOUNTED. + bert_disable [ACPI] Disable BERT OS support on buggy BIOSes. @@ -2350,6 +2367,18 @@ ipcmni_extend [KNL,EARLY] Extend the maximum number of unique System V IPC identifiers from 32,768 to 16,777,216. + ipe.enforce= [IPE] + Format: + Determine whether IPE starts in permissive (0) or + enforce (1) mode. The default is enforce. + + ipe.success_audit= + [IPE] + Format: + Start IPE with success auditing enabled, emitting + an audit event when a binary is allowed. The default + is 0. + irqaffinity= [SMP] Set the default irq affinity mask The argument is a cpu list, as described above. @@ -2648,6 +2677,23 @@ Default is Y (on). + kvm.enable_virt_at_load=[KVM,ARM64,LOONGARCH,MIPS,RISCV,X86] + If enabled, KVM will enable virtualization in hardware + when KVM is loaded, and disable virtualization when KVM + is unloaded (if KVM is built as a module). + + If disabled, KVM will dynamically enable and disable + virtualization on-demand when creating and destroying + VMs, i.e. on the 0=>1 and 1=>0 transitions of the + number of VMs. + + Enabling virtualization at module lode avoids potential + latency for creation of the 0=>1 VM, as KVM serializes + virtualization enabling across all online CPUs. The + "cost" of enabling virtualization when KVM is loaded, + is that doing so may interfere with using out-of-tree + hypervisors that want to "own" virtualization hardware. + kvm.enable_vmware_backdoor=[KVM] Support VMware backdoor PV interface. Default is false (don't support). @@ -4123,6 +4169,21 @@ Disable NUMA, Only set up a single NUMA node spanning all memory. + numa=fake=[MG] + [KNL, ARM64, RISCV, X86, EARLY] + If given as a memory unit, fills all system RAM with + nodes of size interleaved over physical nodes. + + numa=fake= + [KNL, ARM64, RISCV, X86, EARLY] + If given as an integer, fills all system RAM with N + fake nodes interleaved over physical nodes. + + numa=fake=U + [KNL, ARM64, RISCV, X86, EARLY] + If given as an integer followed by 'U', it will + divide each physical node into N emulated nodes. + numa_balancing= [KNL,ARM64,PPC,RISCV,S390,X86] Enable or disable automatic NUMA balancing. Allowed values are enable and disable @@ -4788,6 +4849,16 @@ printk.time= Show timing data prefixed to each printk message line Format: (1/Y/y=enable, 0/N/n=disable) + proc_mem.force_override= [KNL] + Format: {always | ptrace | never} + Traditionally /proc/pid/mem allows memory permissions to be + overridden without restrictions. This option may be set to + restrict that. Can be one of: + - 'always': traditional behavior always allows mem overrides. + - 'ptrace': only allow mem overrides for active ptracers. + - 'never': never allow mem overrides. + If not specified, default is the CONFIG_PROC_MEM_* choice. + processor.max_cstate= [HW,ACPI] Limit processor to maximum C-state max_cstate=9 overrides any DMI blacklist limit. @@ -4935,6 +5006,10 @@ Set maximum number of finished RCU callbacks to process in one batch. + rcutree.csd_lock_suppress_rcu_stall= [KNL] + Do only a one-line RCU CPU stall warning when + there is an ongoing too-long CSD-lock wait. + rcutree.do_rcu_barrier= [KNL] Request a call to rcu_barrier(). This is throttled so that userspace tests can safely @@ -5382,7 +5457,13 @@ Time to wait (s) after boot before inducing stall. rcutorture.stall_cpu_irqsoff= [KNL] - Disable interrupts while stalling if set. + Disable interrupts while stalling if set, but only + on the first stall in the set. + + rcutorture.stall_cpu_repeat= [KNL] + Number of times to repeat the stall sequence, + so that rcutorture.stall_cpu_repeat=3 will result + in four stall sequences. rcutorture.stall_gp_kthread= [KNL] Duration (s) of forced sleep within RCU @@ -5570,14 +5651,6 @@ of zero will disable batching. Batching is always disabled for synchronize_rcu_tasks(). - rcupdate.rcu_tasks_rude_lazy_ms= [KNL] - Set timeout in milliseconds RCU Tasks - Rude asynchronous callback batching for - call_rcu_tasks_rude(). A negative value - will take the default. A value of zero will - disable batching. Batching is always disabled - for synchronize_rcu_tasks_rude(). - rcupdate.rcu_tasks_trace_lazy_ms= [KNL] Set timeout in milliseconds RCU Tasks Trace asynchronous callback batching for @@ -6614,6 +6687,15 @@ : poll all this frequency 0: no polling (default) + thp_anon= [KNL] + Format: ,[KMG]:;-[KMG]: + state is one of "always", "madvise", "never" or "inherit". + Control the default behavior of the system with respect + to anonymous transparent hugepages. + Can be used multiple times for multiple anon THP sizes. + See Documentation/admin-guide/mm/transhuge.rst for more + details. + threadirqs [KNL,EARLY] Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. @@ -6743,6 +6825,51 @@ the same thing would happen if it was left off). The irq_handler_entry event, and all events under the "initcall" system. + Flags can be added to the instance to modify its behavior when it is + created. The flags are separated by '^'. + + The available flags are: + + traceoff - Have the tracing instance tracing disabled after it is created. + traceprintk - Have trace_printk() write into this trace instance + (note, "printk" and "trace_printk" can also be used) + + trace_instance=foo^traceoff^traceprintk,sched,irq + + The flags must come before the defined events. + + If memory has been reserved (see memmap for x86), the instance + can use that memory: + + memmap=12M$0x284500000 trace_instance=boot_map@0x284500000:12M + + The above will create a "boot_map" instance that uses the physical + memory at 0x284500000 that is 12Megs. The per CPU buffers of that + instance will be split up accordingly. + + Alternatively, the memory can be reserved by the reserve_mem option: + + reserve_mem=12M:4096:trace trace_instance=boot_map@trace + + This will reserve 12 megabytes at boot up with a 4096 byte alignment + and place the ring buffer in this memory. Note that due to KASLR, the + memory may not be the same location each time, which will not preserve + the buffer content. + + Also note that the layout of the ring buffer data may change between + kernel versions where the validator will fail and reset the ring buffer + if the layout is not the same as the previous kernel. + + If the ring buffer is used for persistent bootups and has events enabled, + it is recommend to disable tracing so that events from a previous boot do not + mix with events of the current boot (unless you are debugging a random crash + at boot up). + + reserve_mem=12M:4096:trace trace_instance=boot_map^traceoff^traceprintk@trace,sched,irq + + See also Documentation/trace/debugging.rst + + trace_options=[option-list] [FTRACE] Enable or disable tracer options at boot. The option-list is a comma delimited list of options @@ -7352,6 +7479,13 @@ it can be updated at runtime by writing to the corresponding sysfs file. + workqueue.panic_on_stall= + Panic when workqueue stall is detected by + CONFIG_WQ_WATCHDOG. It sets the number times of the + stall to trigger panic. + + The default is 0, which disables the panic on stall. + workqueue.cpu_intensive_thresh_us= Per-cpu work items which run for longer than this threshold are automatically considered CPU intensive diff --git a/Documentation/admin-guide/media/cec.rst b/Documentation/admin-guide/media/cec.rst index 6b30e355cf23d8..92690e1f2183dc 100644 --- a/Documentation/admin-guide/media/cec.rst +++ b/Documentation/admin-guide/media/cec.rst @@ -42,10 +42,14 @@ dongles): ``persistent_config``: by default this is off, but when set to 1 the driver will store the current settings to the device's internal eeprom and restore it the next time the device is connected to the USB port. + - RainShadow Tech. Note: this driver does not support the persistent_config module option of the Pulse-Eight driver. The hardware supports it, but I have no plans to add this feature. But I accept patches :-) +- Extron DA HD 4K PLUS HDMI Distribution Amplifier. See + :ref:`extron_da_hd_4k_plus` for more information. + Miscellaneous: - vivid: emulates a CEC receiver and CEC transmitter. @@ -378,3 +382,86 @@ it later using ``--analyze-pin``. You can also use this as a full-fledged CEC device by configuring it using ``cec-ctl --tv -p0.0.0.0`` or ``cec-ctl --playback -p1.0.0.0``. + +.. _extron_da_hd_4k_plus: + +Extron DA HD 4K PLUS CEC Adapter driver +======================================= + +This driver is for the Extron DA HD 4K PLUS series of HDMI Distribution +Amplifiers: https://www.extron.com/product/dahd4kplusseries + +The 2, 4 and 6 port models are supported. + +Firmware version 1.02.0001 or higher is required. + +Note that older Extron hardware revisions have a problem with the CEC voltage, +which may mean that CEC will not work. This is fixed in hardware revisions +E34814 and up. + +The CEC support has two modes: the first is a manual mode where userspace has +to manually control CEC for the HDMI Input and all HDMI Outputs. While this gives +full control, it is also complicated. + +The second mode is an automatic mode, which is selected if the module option +``vendor_id`` is set. In that case the driver controls CEC and CEC messages +received in the input will be distributed to the outputs. It is still possible +to use the /dev/cecX devices to talk to the connected devices directly, but it is +the driver that configures everything and deals with things like Hotplug Detect +changes. + +The driver also takes care of the EDIDs: /dev/videoX devices are created to +read the EDIDs and (for the HDMI Input port) to set the EDID. + +By default userspace is responsible to set the EDID for the HDMI Input +according to the EDIDs of the connected displays. But if the ``manufacturer_name`` +module option is set, then the driver will take care of setting the EDID +of the HDMI Input based on the supported resolutions of the connected displays. +Currently the driver only supports resolutions 1080p60 and 4kp60: if all connected +displays support 4kp60, then it will advertise 4kp60 on the HDMI input, otherwise +it will fall back to an EDID that just reports 1080p60. + +The status of the Extron is reported in ``/sys/kernel/debug/cec/cecX/status``. + +The extron-da-hd-4k-plus driver implements the following module options: + +``debug`` +--------- + +If set to 1, then all serial port traffic is shown. + +``vendor_id`` +------------- + +The CEC Vendor ID to report to connected displays. + +If set, then the driver will take care of distributing CEC messages received +on the input to the HDMI outputs. This is done for the following CEC messages: + +- +- and +- +- +- + +If not set, then userspace is responsible for this, and it will have to +configure the CEC devices for HDMI Input and the HDMI Outputs manually. + +``manufacturer_name`` +--------------------- + +A three character manufacturer name that is used in the EDID for the HDMI +Input. If not set, then userspace is reponsible for configuring an EDID. +If set, then the driver will update the EDID automatically based on the +resolutions supported by the connected displays, and it will not be possible +anymore to manually set the EDID for the HDMI Input. + +``hpd_never_low`` +----------------- + +If set, then the Hotplug Detect pin of the HDMI Input will always be high, +even if nothing is connected to the HDMI Outputs. If not set (the default) +then the Hotplug Detect pin of the HDMI input will go low if all the detected +Hotplug Detect pins of the HDMI Outputs are also low. + +This option may be changed dynamically. diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst index e434d4a9eeb375..b9da127c074d14 100644 --- a/Documentation/admin-guide/media/mgb4.rst +++ b/Documentation/admin-guide/media/mgb4.rst @@ -227,8 +227,13 @@ Common FPDL3/GMSL output parameters open.* **frame_rate** (RW): - Output video frame rate in frames per second. The default frame rate is - 60Hz. + Output video signal frame rate limit in frames per second. Due to + the limited output pixel clock steps, the card can not always generate + a frame rate perfectly matching the value required by the connected display. + Using this parameter one can limit the frame rate by "crippling" the signal + so that the lines are not equal (the porches of the last line differ) but + the signal appears like having the exact frame rate to the connected display. + The default frame rate limit is 60Hz. **hsync_polarity** (RW): HSYNC signal polarity. @@ -253,33 +258,33 @@ Common FPDL3/GMSL output parameters and there is a non-linear stepping between two consecutive allowed frequencies. The driver finds the nearest allowed frequency to the given value and sets it. When reading this property, you get the exact - frequency set by the driver. The default frequency is 70000kHz. + frequency set by the driver. The default frequency is 61150kHz. *Note: This parameter can not be changed while the output v4l2 device is open.* **hsync_width** (RW): - Width of the HSYNC signal in pixels. The default value is 16. + Width of the HSYNC signal in pixels. The default value is 40. **vsync_width** (RW): - Width of the VSYNC signal in video lines. The default value is 2. + Width of the VSYNC signal in video lines. The default value is 20. **hback_porch** (RW): Number of PCLK pulses between deassertion of the HSYNC signal and the first - valid pixel in the video line (marked by DE=1). The default value is 32. + valid pixel in the video line (marked by DE=1). The default value is 50. **hfront_porch** (RW): Number of PCLK pulses between the end of the last valid pixel in the video line (marked by DE=1) and assertion of the HSYNC signal. The default value - is 32. + is 50. **vback_porch** (RW): Number of video lines between deassertion of the VSYNC signal and the video - line with the first valid pixel (marked by DE=1). The default value is 2. + line with the first valid pixel (marked by DE=1). The default value is 31. **vfront_porch** (RW): Number of video lines between the end of the last valid pixel line (marked - by DE=1) and assertion of the VSYNC signal. The default value is 2. + by DE=1) and assertion of the VSYNC signal. The default value is 30. FPDL3 specific input parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/admin-guide/media/rkisp1.rst b/Documentation/admin-guide/media/rkisp1.rst index 6f14d9561fa5b7..6c878c71442fd1 100644 --- a/Documentation/admin-guide/media/rkisp1.rst +++ b/Documentation/admin-guide/media/rkisp1.rst @@ -114,11 +114,18 @@ to be applied to the hardware during a video stream, allowing userspace to dynamically modify values such as black level, cross talk corrections and others. -The buffer format is defined by struct :c:type:`rkisp1_params_cfg`, and -userspace should set +The ISP driver supports two different parameters configuration methods, the +`fixed parameters format` or the `extensible parameters format`. + +When using the `fixed parameters` method the buffer format is defined by struct +:c:type:`rkisp1_params_cfg`, and userspace should set :ref:`V4L2_META_FMT_RK_ISP1_PARAMS ` as the dataformat. +When using the `extensible parameters` method the buffer format is defined by +struct :c:type:`rkisp1_ext_params_cfg`, and userspace should set +:ref:`V4L2_META_FMT_RK_ISP1_EXT_PARAMS ` as +the dataformat. Capturing Video Frames Example ============================== diff --git a/Documentation/admin-guide/media/vivid.rst b/Documentation/admin-guide/media/vivid.rst index 1306f19ecb5acc..034ca7c77fb973 100644 --- a/Documentation/admin-guide/media/vivid.rst +++ b/Documentation/admin-guide/media/vivid.rst @@ -328,7 +328,7 @@ and an HDMI input, one input for each input type. Those are described in more detail below. Special attention has been given to the rate at which new frames become -available. The jitter will be around 1 jiffie (that depends on the HZ +available. The jitter will be around 1 jiffy (that depends on the HZ configuration of your kernel, so usually 1/100, 1/250 or 1/1000 of a second), but the long-term behavior is exactly following the framerate. So a framerate of 59.94 Hz is really different from 60 Hz. If the framerate @@ -1343,7 +1343,7 @@ Some Future Improvements Just as a reminder and in no particular order: - Add a virtual alsa driver to test audio -- Add virtual sub-devices and media controller support +- Add virtual sub-devices - Some support for testing compressed video - Add support to loop raw VBI output to raw VBI input - Add support to loop teletext sliced VBI output to VBI input @@ -1358,4 +1358,4 @@ Just as a reminder and in no particular order: - Make a thread for the RDS generation, that would help in particular for the "Controls" RDS Rx I/O Mode as the read-only RDS controls could be updated in real-time. -- Changing the EDID should cause hotplug detect emulation to happen. +- Changing the EDID doesn't wait 100 ms before setting the HPD signal. diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst index 054010a7f3d81a..c4dddf6733cda5 100644 --- a/Documentation/admin-guide/mm/damon/start.rst +++ b/Documentation/admin-guide/mm/damon/start.rst @@ -7,7 +7,7 @@ Getting Started This document briefly describes how you can use DAMON by demonstrating its default user space tool. Please note that this document describes only a part of its features for brevity. Please refer to the usage `doc -`_ of the tool for more +`_ of the tool for more details. @@ -26,7 +26,7 @@ User Space Tool For the demonstration, we will use the default user space tool for DAMON, called DAMON Operator (DAMO). It is available at -https://github.com/awslabs/damo. The examples below assume that ``damo`` is on +https://github.com/damonitor/damo. The examples below assume that ``damo`` is on your ``$PATH``. It's not mandatory, though. Because DAMO is using the sysfs interface (refer to :doc:`usage` for the diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 26df6cfa44411c..d9be9f7caa7db1 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -7,19 +7,19 @@ Detailed Usages DAMON provides below interfaces for different users. - *DAMON user space tool.* - `This `_ is for privileged people such as + `This `_ is for privileged people such as system administrators who want a just-working human-friendly interface. Using this, users can use the DAMON’s major features in a human-friendly way. It may not be highly tuned for special cases, though. For more detail, please refer to its `usage document - `_. + `_. - *sysfs interface.* :ref:`This ` is for privileged user space programmers who want more optimized use of DAMON. Using this, users can use DAMON’s major features by reading from and writing to special sysfs files. Therefore, you can write and use your personalized DAMON sysfs wrapper programs that reads/writes the sysfs files instead of you. The `DAMON user space tool - `_ is one example of such programs. + `_ is one example of such programs. - *Kernel Space Programming Interface.* :doc:`This ` is for kernel space programmers. Using this, users can utilize every feature of DAMON most flexibly and efficiently by @@ -543,7 +543,7 @@ memory rate becomes larger than 60%, or lower than 30%". :: # echo 300 > watermarks/low Please note that it's highly recommended to use user space tools like `damo -`_ rather than manually reading and writing +`_ rather than manually reading and writing the files as above. Above is only for an example. .. _tracepoint: diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst index 098f14d83e991e..cb2c080f400ce4 100644 --- a/Documentation/admin-guide/mm/memory-hotplug.rst +++ b/Documentation/admin-guide/mm/memory-hotplug.rst @@ -294,8 +294,9 @@ The following files are currently defined: ``crash_hotplug`` read-only: when changes to the system memory map occur due to hot un/plug of memory, this file contains '1' if the kernel updates the kdump capture kernel memory - map itself (via elfcorehdr), or '0' if userspace must update - the kdump capture kernel memory map. + map itself (via elfcorehdr and other relevant kexec + segments), or '0' if userspace must update the kdump + capture kernel memory map. Availability depends on the CONFIG_MEMORY_HOTPLUG kernel configuration option. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 058485daf1860d..cfdd16a52e39fd 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -202,6 +202,16 @@ PMD-mappable transparent hugepage:: cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size +All THPs at fault and collapse time will be added to _deferred_list, +and will therefore be split under memory presure if they are considered +"underused". A THP is underused if the number of zero-filled pages in +the THP is above max_ptes_none (see below). It is possible to disable +this behaviour by writing 0 to shrink_underused, and enable it by writing +1 to it:: + + echo 0 > /sys/kernel/mm/transparent_hugepage/shrink_underused + echo 1 > /sys/kernel/mm/transparent_hugepage/shrink_underused + khugepaged will be automatically started when PMD-sized THP is enabled (either of the per-size anon control or the top-level control are set to "always" or "madvise"), and it'll be automatically shutdown when @@ -284,13 +294,37 @@ that THP is shared. Exceeding the number would block the collapse:: A higher value may increase memory footprint for some workloads. -Boot parameter -============== +Boot parameters +=============== + +You can change the sysfs boot time default for the top-level "enabled" +control by passing the parameter ``transparent_hugepage=always`` or +``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` to the +kernel command line. + +Alternatively, each supported anonymous THP size can be controlled by +passing ``thp_anon=,[KMG]:;-[KMG]:``, +where ```` is the THP size (must be a power of 2 of PAGE_SIZE and +supported anonymous THP) and ```` is one of ``always``, ``madvise``, +``never`` or ``inherit``. + +For example, the following will set 16K, 32K, 64K THP to ``always``, +set 128K, 512K to ``inherit``, set 256K to ``madvise`` and 1M, 2M +to ``never``:: -You can change the sysfs boot time defaults of Transparent Hugepage -Support by passing the parameter ``transparent_hugepage=always`` or -``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` -to the kernel command line. + thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never + +``thp_anon=`` may be specified multiple times to configure all THP sizes as +required. If ``thp_anon=`` is specified at least once, any anon THP sizes +not explicitly configured on the command line are implicitly set to +``never``. + +``transparent_hugepage`` setting only affects the global toggle. If +``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``. +However, if a valid ``thp_anon`` setting is provided by the user, the +PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER +is not defined within a valid ``thp_anon``, its policy will default to +``never``. Hugepages in tmpfs/shmem ======================== @@ -447,6 +481,12 @@ thp_deferred_split_page splitting it would free up some memory. Pages on split queue are going to be split under memory pressure. +thp_underused_split_page + is incremented when a huge page on the split queue was split + because it was underused. A THP is underused if the number of + zero pages in the THP is above a certain threshold + (/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none). + thp_split_pmd is incremented every time a PMD split into table of PTEs. This can happen, for instance, when application calls mprotect() or @@ -527,6 +567,18 @@ split_deferred it would free up some memory. Pages on split queue are going to be split under memory pressure, if splitting is possible. +nr_anon + the number of anonymous THP we have in the whole system. These THPs + might be currently entirely mapped or have partially unmapped/unused + subpages. + +nr_anon_partially_mapped + the number of anonymous THP which are likely partially mapped, possibly + wasting memory, and have been queued for deferred memory reclamation. + Note that in corner some cases (e.g., failed migration), we might detect + an anonymous THP as "partially mapped" and count it here, even though it + is not actually partially mapped anymore. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help diff --git a/Documentation/admin-guide/perf/arm-ni.rst b/Documentation/admin-guide/perf/arm-ni.rst new file mode 100644 index 00000000000000..d26a8f697c366f --- /dev/null +++ b/Documentation/admin-guide/perf/arm-ni.rst @@ -0,0 +1,17 @@ +==================================== +Arm Network-on Chip Interconnect PMU +==================================== + +NI-700 and friends implement a distinct PMU for each clock domain within the +interconnect. Correspondingly, the driver exposes multiple PMU devices named +arm_ni__cd_, where is an (arbitrary) instance identifier and is +the clock domain ID within that particular instance. If multiple NI instances +exist within a system, the PMU devices can be correlated with the underlying +hardware instance via sysfs parentage. + +Each PMU exposes base event aliases for the interface types present in its clock +domain. These require qualifying with the "eventid" and "nodeid" parameters +to specify the event code to count and the interface at which to count it +(per the configured hardware ID as reflected in the xxNI_NODE_INFO register). +The exception is the "cycles" alias for the PMU cycle counter, which is encoded +with the PMU node type and needs no further qualification. diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst index d47cd229d7106f..39b8e1fdd0cd07 100644 --- a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst +++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst @@ -46,16 +46,16 @@ Some of the events only exist for specific configurations. DesignWare Cores (DWC) PCIe PMU Driver ======================================= -This driver adds PMU devices for each PCIe Root Port named based on the BDF of +This driver adds PMU devices for each PCIe Root Port named based on the SBDF of the Root Port. For example, - 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) + 0001:30:03.0 PCI bridge: Device 1ded:8000 (rev 01) -the PMU device name for this Root Port is dwc_rootport_3018. +the PMU device name for this Root Port is dwc_rootport_13018. The DWC PCIe PMU driver registers a perf PMU driver, which provides description of available events and configuration options in sysfs, see -/sys/bus/event_source/devices/dwc_rootport_{bdf}. +/sys/bus/event_source/devices/dwc_rootport_{sbdf}. The "format" directory describes format of the config fields of the perf_event_attr structure. The "events" directory provides configuration @@ -66,16 +66,16 @@ The "perf list" command shall list the available events from sysfs, e.g.:: $# perf list | grep dwc_rootport <...> - dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] + dwc_rootport_13018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] <...> - dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event] + dwc_rootport_13018/rx_memory_read,lane=?/ [Kernel PMU event] Time Based Analysis Event Usage ------------------------------- Example usage of counting PCIe RX TLP data payload (Units of bytes):: - $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ + $# perf stat -a -e dwc_rootport_13018/Rx_PCIe_TLP_Data_Payload/ The average RX/TX bandwidth can be calculated using the following formula: @@ -88,7 +88,7 @@ Lane Event Usage Each lane has the same event set and to avoid generating a list of hundreds of events, the user need to specify the lane ID explicitly, e.g.:: - $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/ + $# perf stat -a -e dwc_rootport_13018/rx_memory_read,lane=4/ The driver does not support sampling, therefore "perf record" will not work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst index 5541ff40e06a88..083ca50de896be 100644 --- a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst +++ b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst @@ -28,7 +28,9 @@ The "identifier" sysfs file allows users to identify the version of the PMU hardware device. The "bus" sysfs file allows users to get the bus number of Root Ports -monitored by PMU. +monitored by PMU. Furthermore users can get the Root Ports range in +[bdf_min, bdf_max] from "bdf_min" and "bdf_max" sysfs attributes +respectively. Example usage of perf:: diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst index 7eb3dcd6f4da67..8502bc174640a4 100644 --- a/Documentation/admin-guide/perf/index.rst +++ b/Documentation/admin-guide/perf/index.rst @@ -16,6 +16,7 @@ Performance monitor support starfive_starlink_pmu arm-ccn arm-cmn + arm-ni xgene-pmu arm_dsu_pmu thunderx2-pmu diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst index d0324d44f5482d..210a808b74ec2c 100644 --- a/Documentation/admin-guide/pm/amd-pstate.rst +++ b/Documentation/admin-guide/pm/amd-pstate.rst @@ -251,7 +251,9 @@ performance supported in `AMD CPPC Performance Capability `_). In some ASICs, the highest CPPC performance is not the one in the ``_CPC`` table, so we need to expose it to sysfs. If boost is not active, but still supported, this maximum frequency will be larger than the one in -``cpuinfo``. +``cpuinfo``. On systems that support preferred core, the driver will have +different values for some cores than others and this will reflect the values +advertised by the platform at bootup. This attribute is read-only. ``amd_pstate_lowest_nonlinear_freq`` @@ -262,6 +264,17 @@ lowest non-linear performance in `AMD CPPC Performance Capability `_.) This attribute is read-only. +``amd_pstate_hw_prefcore`` + +Whether the platform supports the preferred core feature and it has been +enabled. This attribute is read-only. + +``amd_pstate_prefcore_ranking`` + +The performance ranking of the core. This number doesn't have any unit, but +larger numbers are preferred at the time of reading. This can change at +runtime based on platform conditions. This attribute is read-only. + ``energy_performance_available_preferences`` A list of all the supported EPP preferences that could be used for diff --git a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst index 5ab3440e6cee06..5151ec312dc041 100644 --- a/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst +++ b/Documentation/admin-guide/pm/intel_uncore_frequency_scaling.rst @@ -113,3 +113,62 @@ to apply at each uncore* level. Support for "current_freq_khz" is available only at each fabric cluster level (i.e., in uncore* directory). + +Efficiency vs. Latency Tradeoff +------------------------------- + +The Efficiency Latency Control (ELC) feature improves performance +per watt. With this feature hardware power management algorithms +optimize trade-off between latency and power consumption. For some +latency sensitive workloads further tuning can be done by SW to +get desired performance. + +The hardware monitors the average CPU utilization across all cores +in a power domain at regular intervals and decides an uncore frequency. +While this may result in the best performance per watt, workload may be +expecting higher performance at the expense of power. Consider an +application that intermittently wakes up to perform memory reads on an +otherwise idle system. In such cases, if hardware lowers uncore +frequency, then there may be delay in ramp up of frequency to meet +target performance. + +The ELC control defines some parameters which can be changed from SW. +If the average CPU utilization is below a user-defined threshold +(elc_low_threshold_percent attribute below), the user-defined uncore +floor frequency will be used (elc_floor_freq_khz attribute below) +instead of hardware calculated minimum. + +Similarly in high load scenario where the CPU utilization goes above +the high threshold value (elc_high_threshold_percent attribute below) +instead of jumping to maximum uncore frequency, frequency is increased +in 100MHz steps. This avoids consuming unnecessarily high power +immediately with CPU utilization spikes. + +Attributes for efficiency latency control: + +``elc_floor_freq_khz`` + This attribute is used to get/set the efficiency latency floor frequency. + If this variable is lower than the 'min_freq_khz', it is ignored by + the firmware. + +``elc_low_threshold_percent`` + This attribute is used to get/set the efficiency latency control low + threshold. This attribute is in percentages of CPU utilization. + +``elc_high_threshold_percent`` + This attribute is used to get/set the efficiency latency control high + threshold. This attribute is in percentages of CPU utilization. + +``elc_high_threshold_enable`` + This attribute is used to enable/disable the efficiency latency control + high threshold. Write '1' to enable, '0' to disable. + +Example system configuration below, which does following: + * when CPU utilization is less than 10%: sets uncore frequency to 800MHz + * when CPU utilization is higher than 95%: increases uncore frequency in + 100MHz steps, until power limit is reached + + elc_floor_freq_khz:800000 + elc_high_threshold_percent:95 + elc_high_threshold_enable:1 + elc_low_threshold_percent:10 diff --git a/Documentation/admin-guide/ramoops.rst b/Documentation/admin-guide/ramoops.rst index 6f534a707b2a46..2eabef31220dcb 100644 --- a/Documentation/admin-guide/ramoops.rst +++ b/Documentation/admin-guide/ramoops.rst @@ -129,7 +129,7 @@ Setting the ramoops parameters can be done in several different manners: takes a size, alignment and name as arguments. The name is used to map the memory to a label that can be retrieved by ramoops. - reserver_mem=2M:4096:oops ramoops.mem_name=oops + reserve_mem=2M:4096:oops ramoops.mem_name=oops You can specify either RAM memory or peripheral devices' memory. However, when specifying RAM, be sure to reserve the memory by issuing memblock_reserve() diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst index f92551539e8a66..700aa72eecb169 100644 --- a/Documentation/admin-guide/tainted-kernels.rst +++ b/Documentation/admin-guide/tainted-kernels.rst @@ -182,3 +182,5 @@ More detailed explanation for tainting produce extremely unusual kernel structure layouts (even performance pathological ones), which is important to know when debugging. Set at build time. + + 18) ``N`` if an in-kernel test, such as a KUnit test, has been run. diff --git a/Documentation/arch/arm/mem_alignment.rst b/Documentation/arch/arm/mem_alignment.rst index aa22893b62bc34..64bd779593003b 100644 --- a/Documentation/arch/arm/mem_alignment.rst +++ b/Documentation/arch/arm/mem_alignment.rst @@ -12,7 +12,7 @@ ones. Of course this is a bad idea to rely on the alignment trap to perform unaligned memory access in general. If those access are predictable, you -are better to use the macros provided by include/asm/unaligned.h. The +are better to use the macros provided by include/linux/unaligned.h. The alignment trap can fixup misaligned access for the exception cases, but at a high performance cost. It better be rare. diff --git a/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst b/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst index 2945e0e331047a..301aa30890aebf 100644 --- a/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst +++ b/Documentation/arch/arm/stm32/stm32-dma-mdma-chaining.rst @@ -359,7 +359,7 @@ Driver updates for STM32 DMA-MDMA chaining support in foo driver descriptor you want a callback to be called at the end of the transfer (dmaengine_prep_slave_sg()) or the period (dmaengine_prep_dma_cyclic()). Depending on the direction, set the callback on the descriptor that finishes - the overal transfer: + the overall transfer: * DMA_DEV_TO_MEM: set the callback on the "MDMA" descriptor * DMA_MEM_TO_DEV: set the callback on the "DMA" descriptor @@ -371,7 +371,7 @@ Driver updates for STM32 DMA-MDMA chaining support in foo driver As STM32 MDMA channel transfer is triggered by STM32 DMA, you must issue STM32 MDMA channel before STM32 DMA channel. - If any, your callback will be called to warn you about the end of the overal + If any, your callback will be called to warn you about the end of the overall transfer or the period completion. Don't forget to terminate both channels. STM32 DMA channel is configured in diff --git a/Documentation/arch/arm64/cpu-hotplug.rst b/Documentation/arch/arm64/cpu-hotplug.rst index 76ba8d932c7226..8fb438bf778162 100644 --- a/Documentation/arch/arm64/cpu-hotplug.rst +++ b/Documentation/arch/arm64/cpu-hotplug.rst @@ -26,7 +26,7 @@ There are no systems that support the physical addition (or removal) of CPUs while the system is running, and ACPI is not able to sufficiently describe them. -e.g. New CPUs come with new caches, but the platform's cache toplogy is +e.g. New CPUs come with new caches, but the platform's cache topology is described in a static table, the PPTT. How caches are shared between CPUs is not discoverable, and must be described by firmware. diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst index 448c1664879bc1..694f67fa07d196 100644 --- a/Documentation/arch/arm64/elf_hwcaps.rst +++ b/Documentation/arch/arm64/elf_hwcaps.rst @@ -365,6 +365,8 @@ HWCAP2_SME_SF8DP2 HWCAP2_SME_SF8DP4 Functionality implied by ID_AA64SMFR0_EL1.SF8DP4 == 0b1. +HWCAP2_POE + Functionality implied by ID_AA64MMFR3_EL1.S1POE == 0b0001. 4. Unused AT_HWCAP bits ----------------------- diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 50327c05be8d1b..65bfab1b186146 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -55,6 +55,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 | +----------------+-----------------+-----------------+-----------------------------+ +| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | +----------------+-----------------+-----------------+-----------------------------+ @@ -144,6 +146,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 | @@ -184,6 +188,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V1 | #1619801 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 | @@ -249,8 +255,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A | +----------------+-----------------+-----------------+-----------------------------+ -| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A | -| | Hip09 SMMU PMCG | | | +| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A | +| | ,11} SMMU PMCG | | | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | @@ -287,3 +293,5 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 | +----------------+-----------------+-----------------+-----------------------------+ +| Microsoft | Azure Cobalt 100| #3324339 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/arch/loongarch/irq-chip-model.rst b/Documentation/arch/loongarch/irq-chip-model.rst index 7988f41923639d..6dd48256e39f74 100644 --- a/Documentation/arch/loongarch/irq-chip-model.rst +++ b/Documentation/arch/loongarch/irq-chip-model.rst @@ -85,6 +85,38 @@ to CPUINTC directly:: | Devices | +---------+ +Advanced Extended IRQ model +=========================== + +In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go +to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go +to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts +go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI-related definitions ======================== diff --git a/Documentation/arch/powerpc/ultravisor.rst b/Documentation/arch/powerpc/ultravisor.rst index ba6b1bf1cc4458..6d0407b2f5a1f1 100644 --- a/Documentation/arch/powerpc/ultravisor.rst +++ b/Documentation/arch/powerpc/ultravisor.rst @@ -134,7 +134,7 @@ Hardware * PTCR and partition table entries (partition table is in secure memory). An attempt to write to PTCR will cause a Hypervisor - Emulation Assitance interrupt. + Emulation Assistance interrupt. * LDBAR (LD Base Address Register) and IMC (In-Memory Collection) non-architected registers. An attempt to write to them will cause a diff --git a/Documentation/arch/riscv/vector.rst b/Documentation/arch/riscv/vector.rst index 75dd88a62e1d84..3987f5f76a9deb 100644 --- a/Documentation/arch/riscv/vector.rst +++ b/Documentation/arch/riscv/vector.rst @@ -15,7 +15,7 @@ status for the use of Vector in userspace. The intended usage guideline for these interfaces is to give init systems a way to modify the availability of V for processes running under its domain. Calling these interfaces is not recommended in libraries routines because libraries should not override policies -configured from the parant process. Also, users must noted that these interfaces +configured from the parent process. Also, users must note that these interfaces are not portable to non-Linux, nor non-RISC-V environments, so it is discourage to use in a portable code. To get the availability of V in an ELF program, please read :c:macro:`COMPAT_HWCAP_ISA_V` bit of :c:macro:`ELF_HWCAP` in the diff --git a/Documentation/arch/s390/vfio-ap.rst b/Documentation/arch/s390/vfio-ap.rst index ea744cbc8687e4..eba1991fbdba5f 100644 --- a/Documentation/arch/s390/vfio-ap.rst +++ b/Documentation/arch/s390/vfio-ap.rst @@ -999,6 +999,36 @@ the vfio_ap mediated device to which it is assigned as long as each new APQN resulting from plugging it in references a queue device bound to the vfio_ap device driver. +Driver Features +=============== +The vfio_ap driver exposes a sysfs file containing supported features. +This exists so third party tools (like Libvirt and mdevctl) can query the +availability of specific features. + +The features list can be found here: /sys/bus/matrix/devices/matrix/features + +Entries are space delimited. Each entry consists of a combination of +alphanumeric and underscore characters. + +Example: +cat /sys/bus/matrix/devices/matrix/features +guest_matrix dyn ap_config + +the following features are advertised: + +---------------+---------------------------------------------------------------+ +| Flag | Description | ++==============+===============================================================+ +| guest_matrix | guest_matrix attribute exists. It reports the matrix of | +| | adapters and domains that are or will be passed through to a | +| | guest when the mdev is attached to it. | ++--------------+---------------------------------------------------------------+ +| dyn | Indicates hot plug/unplug of AP adapters, domains and control | +| | domains for a guest to which the mdev is attached. | ++------------+-----------------------------------------------------------------+ +| ap_config | ap_config interface for one-shot modifications to mdev config | ++--------------+---------------------------------------------------------------+ + Limitations =========== Live guest migration is not supported for guests using AP devices without diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst index c58c72362911cd..5a2e6c0ef04a53 100644 --- a/Documentation/arch/x86/mds.rst +++ b/Documentation/arch/x86/mds.rst @@ -162,7 +162,7 @@ Mitigation points 3. It would take a large number of these precisely-timed NMIs to mount an actual attack. There's presumably not enough bandwidth. 4. The NMI in question occurs after a VERW, i.e. when user state is - restored and most interesting data is already scrubbed. Whats left + restored and most interesting data is already scrubbed. What's left is only the data that NMI touches, and that may or may not be of any interest. diff --git a/Documentation/arch/x86/x86_64/boot-options.rst b/Documentation/arch/x86/x86_64/boot-options.rst index 137432d3410998..98d4805f0823a1 100644 --- a/Documentation/arch/x86/x86_64/boot-options.rst +++ b/Documentation/arch/x86/x86_64/boot-options.rst @@ -170,18 +170,6 @@ NUMA Don't parse the HMAT table for NUMA setup, or soft-reserved memory partitioning. - numa=fake=[MG] - If given as a memory unit, fills all system RAM with nodes of - size interleaved over physical nodes. - - numa=fake= - If given as an integer, fills all system RAM with N fake nodes - interleaved over physical nodes. - - numa=fake=U - If given as an integer followed by 'U', it will divide each - physical node into N emulated nodes. - ACPI ==== diff --git a/Documentation/arch/x86/x86_64/fsgs.rst b/Documentation/arch/x86/x86_64/fsgs.rst index 50960e09e1f664..d07e445dac5cf6 100644 --- a/Documentation/arch/x86/x86_64/fsgs.rst +++ b/Documentation/arch/x86/x86_64/fsgs.rst @@ -125,7 +125,7 @@ FSGSBASE instructions enablement FSGSBASE instructions compiler support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -GCC version 4.6.4 and newer provide instrinsics for the FSGSBASE +GCC version 4.6.4 and newer provide intrinsics for the FSGSBASE instructions. Clang 5 supports them as well. =================== =========================== @@ -135,7 +135,7 @@ instructions. Clang 5 supports them as well. _writegsbase_u64() Write the GS base register =================== =========================== -To utilize these instrinsics must be included in the source +To utilize these intrinsics must be included in the source code and the compiler option -mfsgsbase has to be added. Compiler support for FS/GS based addressing diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst index df3a8a47f58c0b..a0ff0eb11e7f18 100644 --- a/Documentation/block/bfq-iosched.rst +++ b/Documentation/block/bfq-iosched.rst @@ -9,7 +9,7 @@ controllers), BFQ's main features are: - BFQ guarantees a high system and application responsiveness, and a low latency for time-sensitive applications, such as audio or video players; -- BFQ distributes bandwidth, and not just time, among processes or +- BFQ distributes bandwidth, not just time, among processes or groups (switching back to time distribution when needed to keep throughput high). @@ -111,7 +111,7 @@ Higher speed for code-development tasks If some additional workload happens to be executed in parallel, then BFQ executes the I/O-related components of typical code-development -tasks (compilation, checkout, merge, ...) much more quickly than CFQ, +tasks (compilation, checkout, merge, etc.) much more quickly than CFQ, NOOP or DEADLINE. High throughput @@ -127,9 +127,9 @@ Strong fairness, bandwidth and delay guarantees ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ BFQ distributes the device throughput, and not just the device time, -among I/O-bound applications in proportion their weights, with any +among I/O-bound applications in proportion to their weights, with any workload and regardless of the device parameters. From these bandwidth -guarantees, it is possible to compute tight per-I/O-request delay +guarantees, it is possible to compute a tight per-I/O-request delay guarantees by a simple formula. If not configured for strict service guarantees, BFQ switches to time-based resource sharing (only) for applications that would otherwise cause a throughput loss. @@ -199,7 +199,7 @@ plus a lot of code, are borrowed from CFQ. - On flash-based storage with internal queueing of commands (typically NCQ), device idling happens to be always detrimental - for throughput. So, with these devices, BFQ performs idling + to throughput. So, with these devices, BFQ performs idling only when strictly needed for service guarantees, i.e., for guaranteeing low latency or fairness. In these cases, overall throughput may be sub-optimal. No solution currently exists to @@ -212,7 +212,7 @@ plus a lot of code, are borrowed from CFQ. and to reduce their latency. The most important action taken to achieve this goal is to give to the queues associated with these applications more than their fair share of the device - throughput. For brevity, we call just "weight-raising" the whole + throughput. For brevity, we call it just "weight-raising" the whole sets of actions taken by BFQ to privilege these queues. In particular, BFQ provides a milder form of weight-raising for interactive applications, and a stronger form for soft real-time @@ -231,7 +231,7 @@ plus a lot of code, are borrowed from CFQ. responsive in detecting interleaved I/O (cooperating processes), that it enables BFQ to achieve a high throughput, by queue merging, even for queues for which CFQ needs a different - mechanism, preemption, to get a high throughput. As such EQM is a + mechanism, preemption, to get a high throughput. As such, EQM is a unified mechanism to achieve a high throughput with interleaved I/O. @@ -254,7 +254,7 @@ plus a lot of code, are borrowed from CFQ. - First, with any proportional-share scheduler, the maximum deviation with respect to an ideal service is proportional to the maximum budget (slice) assigned to queues. As a consequence, - BFQ can keep this deviation tight not only because of the + BFQ can keep this deviation tight, not only because of the accurate service of B-WF2Q+, but also because BFQ *does not* need to assign a larger budget to a queue to let the queue receive a higher fraction of the device throughput. @@ -327,7 +327,7 @@ applications. Unset this tunable if you need/want to control weights. slice_idle ---------- -This parameter specifies how long BFQ should idle for next I/O +This parameter specifies how long BFQ should idle for the next I/O request, when certain sync BFQ queues become empty. By default slice_idle is a non-zero value. Idling has a double purpose: boosting throughput and making sure that the desired throughput distribution is @@ -365,7 +365,7 @@ terms of I/O-request dispatches. To guarantee that the actual service order then corresponds to the dispatch order, the strict_guarantees tunable must be set too. -There is an important flipside for idling: apart from the above cases +There is an important flip side to idling: apart from the above cases where it is beneficial also for throughput, idling can severely impact throughput. One important case is random workload. Because of this issue, BFQ tends to avoid idling as much as possible, when it is not @@ -475,7 +475,7 @@ max_budget Maximum amount of service, measured in sectors, that can be provided to a BFQ queue once it is set in service (of course within the limits -of the above timeout). According to what said in the description of +of the above timeout). According to what was said in the description of the algorithm, larger values increase the throughput in proportion to the percentage of sequential I/O requests issued. The price of larger values is that they coarsen the granularity of short-term bandwidth diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 257a7e1cdf5d0d..93060283b6fda2 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -368,7 +368,7 @@ No additional type data follow ``btf_type``. * ``info.kind_flag``: 0 * ``info.kind``: BTF_KIND_FUNC * ``info.vlen``: linkage information (BTF_FUNC_STATIC, BTF_FUNC_GLOBAL - or BTF_FUNC_EXTERN) + or BTF_FUNC_EXTERN - see :ref:`BTF_Function_Linkage_Constants`) * ``type``: a BTF_KIND_FUNC_PROTO type No additional type data follow ``btf_type``. @@ -424,9 +424,8 @@ following data:: __u32 linkage; }; -``struct btf_var`` encoding: - * ``linkage``: currently only static variable 0, or globally allocated - variable in ELF sections 1 +``btf_var.linkage`` may take the values: BTF_VAR_STATIC, BTF_VAR_GLOBAL_ALLOCATED or BTF_VAR_GLOBAL_EXTERN - +see :ref:`BTF_Var_Linkage_Constants`. Not all type of global variables are supported by LLVM at this point. The following is currently available: @@ -549,6 +548,38 @@ The ``btf_enum64`` encoding: If the original enum value is signed and the size is less than 8, that value will be sign extended into 8 bytes. +2.3 Constant Values +------------------- + +.. _BTF_Function_Linkage_Constants: + +2.3.1 Function Linkage Constant Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. table:: Function Linkage Values and Meanings + + =================== ===== =========== + kind value description + =================== ===== =========== + ``BTF_FUNC_STATIC`` 0x0 definition of subprogram not visible outside containing compilation unit + ``BTF_FUNC_GLOBAL`` 0x1 definition of subprogram visible outside containing compilation unit + ``BTF_FUNC_EXTERN`` 0x2 declaration of a subprogram whose definition is outside the containing compilation unit + =================== ===== =========== + + +.. _BTF_Var_Linkage_Constants: + +2.3.2 Variable Linkage Constant Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. table:: Variable Linkage Values and Meanings + + ============================ ===== =========== + kind value description + ============================ ===== =========== + ``BTF_VAR_STATIC`` 0x0 definition of global variable not visible outside containing compilation unit + ``BTF_VAR_GLOBAL_ALLOCATED`` 0x1 definition of global variable visible outside containing compilation unit + ``BTF_VAR_GLOBAL_EXTERN`` 0x2 declaration of global variable whose definition is outside the containing compilation unit + ============================ ===== =========== + 3. BTF Kernel API ================= diff --git a/Documentation/bpf/libbpf/program_types.rst b/Documentation/bpf/libbpf/program_types.rst index 63bb88846e5084..218b020a2f81ff 100644 --- a/Documentation/bpf/libbpf/program_types.rst +++ b/Documentation/bpf/libbpf/program_types.rst @@ -121,6 +121,8 @@ described in more detail in the footnotes. +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ +| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | | ++-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | | @@ -131,11 +133,23 @@ described in more detail in the footnotes. + + +----------------------------------+-----------+ | | | ``raw_tracepoint+`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | | +| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | | +| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | | + + +----------------------------------+-----------+ -| | | ``tc`` | | +| | | ``tc`` [#tc_legacy]_ | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ @@ -155,7 +169,9 @@ described in more detail in the footnotes. +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | | +| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | | ++ + +----------------------------------+-----------+ +| | | ``struct_ops.s+`` [#struct_ops]_ | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ @@ -209,5 +225,11 @@ described in more detail in the footnotes. ``a-zA-Z0-9_.*?``. .. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/``. .. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/``. +.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use + ``tcx/*`` instead. +.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/`` convention, + but ``name`` is ignored and it is recommended to just use plain + ``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer + that is tagged with ``SEC(".struct_ops[.link]")``. .. [#tp] The ``tracepoint`` attach format is ``tracepoint//``. .. [#iter] The ``iter`` attach format is ``iter[.s]/``. diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst index 356894399fbf83..d2376154000227 100644 --- a/Documentation/bpf/verifier.rst +++ b/Documentation/bpf/verifier.rst @@ -418,7 +418,7 @@ The rules for correspondence between registers / stack slots are as follows: linked to the registers and stack slots of the parent state with the same indices. -* For the outer stack frames, only caller saved registers (r6-r9) and stack +* For the outer stack frames, only callee saved registers (r6-r9) and stack slots are linked to the registers and stack slots of the parent state with the same indices. diff --git a/Documentation/core-api/cleanup.rst b/Documentation/core-api/cleanup.rst new file mode 100644 index 00000000000000..527eb2f8ec6efd --- /dev/null +++ b/Documentation/core-api/cleanup.rst @@ -0,0 +1,8 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +Scope-based Cleanup Helpers +=========================== + +.. kernel-doc:: include/linux/cleanup.h + :doc: scope-based cleanup helpers diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst index dcb0e379e5e8fb..a21dbf261be7f5 100644 --- a/Documentation/core-api/cpu_hotplug.rst +++ b/Documentation/core-api/cpu_hotplug.rst @@ -737,8 +737,9 @@ can process the event further. When changes to the CPUs in the system occur, the sysfs file /sys/devices/system/cpu/crash_hotplug contains '1' if the kernel -updates the kdump capture kernel list of CPUs itself (via elfcorehdr), -or '0' if userspace must update the kdump capture kernel list of CPUs. +updates the kdump capture kernel list of CPUs itself (via elfcorehdr and +other relevant kexec segment), or '0' if userspace must update the kdump +capture kernel list of CPUs. The availability depends on the CONFIG_HOTPLUG_CPU kernel configuration option. @@ -750,8 +751,9 @@ file can be used in a udev rule as follows: SUBSYSTEM=="cpu", ATTRS{crash_hotplug}=="1", GOTO="kdump_reload_end" For a CPU hot un/plug event, if the architecture supports kernel updates -of the elfcorehdr (which contains the list of CPUs), then the rule skips -the unload-then-reload of the kdump capture kernel. +of the elfcorehdr (which contains the list of CPUs) and other relevant +kexec segments, then the rule skips the unload-then-reload of the kdump +capture kernel. Kernel Inline Documentations Reference ====================================== diff --git a/Documentation/core-api/folio_queue.rst b/Documentation/core-api/folio_queue.rst new file mode 100644 index 00000000000000..1fe7a9bc4b8dbe --- /dev/null +++ b/Documentation/core-api/folio_queue.rst @@ -0,0 +1,212 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +=========== +Folio Queue +=========== + +:Author: David Howells + +.. Contents: + + * Overview + * Initialisation + * Adding and removing folios + * Querying information about a folio + * Querying information about a folio_queue + * Folio queue iteration + * Folio marks + * Lockless simultaneous production/consumption issues + + +Overview +======== + +The folio_queue struct forms a single segment in a segmented list of folios +that can be used to form an I/O buffer. As such, the list can be iterated over +using the ITER_FOLIOQ iov_iter type. + +The publicly accessible members of the structure are:: + + struct folio_queue { + struct folio_queue *next; + struct folio_queue *prev; + ... + }; + +A pair of pointers are provided, ``next`` and ``prev``, that point to the +segments on either side of the segment being accessed. Whilst this is a +doubly-linked list, it is intentionally not a circular list; the outward +sibling pointers in terminal segments should be NULL. + +Each segment in the list also stores: + + * an ordered sequence of folio pointers, + * the size of each folio and + * three 1-bit marks per folio, + +but hese should not be accessed directly as the underlying data structure may +change, but rather the access functions outlined below should be used. + +The facility can be made accessible by:: + + #include + +and to use the iterator:: + + #include + + +Initialisation +============== + +A segment should be initialised by calling:: + + void folioq_init(struct folio_queue *folioq); + +with a pointer to the segment to be initialised. Note that this will not +necessarily initialise all the folio pointers, so care must be taken to check +the number of folios added. + + +Adding and removing folios +========================== + +Folios can be set in the next unused slot in a segment struct by calling one +of:: + + unsigned int folioq_append(struct folio_queue *folioq, + struct folio *folio); + + unsigned int folioq_append_mark(struct folio_queue *folioq, + struct folio *folio); + +Both functions update the stored folio count, store the folio and note its +size. The second function also sets the first mark for the folio added. Both +functions return the number of the slot used. [!] Note that no attempt is made +to check that the capacity wasn't overrun and the list will not be extended +automatically. + +A folio can be excised by calling:: + + void folioq_clear(struct folio_queue *folioq, unsigned int slot); + +This clears the slot in the array and also clears all the marks for that folio, +but doesn't change the folio count - so future accesses of that slot must check +if the slot is occupied. + + +Querying information about a folio +================================== + +Information about the folio in a particular slot may be queried by the +following function:: + + struct folio *folioq_folio(const struct folio_queue *folioq, + unsigned int slot); + +If a folio has not yet been set in that slot, this may yield an undefined +pointer. The size of the folio in a slot may be queried with either of:: + + unsigned int folioq_folio_order(const struct folio_queue *folioq, + unsigned int slot); + + size_t folioq_folio_size(const struct folio_queue *folioq, + unsigned int slot); + +The first function returns the size as an order and the second as a number of +bytes. + + +Querying information about a folio_queue +======================================== + +Information may be retrieved about a particular segment with the following +functions:: + + unsigned int folioq_nr_slots(const struct folio_queue *folioq); + + unsigned int folioq_count(struct folio_queue *folioq); + + bool folioq_full(struct folio_queue *folioq); + +The first function returns the maximum capacity of a segment. It must not be +assumed that this won't vary between segments. The second returns the number +of folios added to a segments and the third is a shorthand to indicate if the +segment has been filled to capacity. + +Not that the count and fullness are not affected by clearing folios from the +segment. These are more about indicating how many slots in the array have been +initialised, and it assumed that slots won't get reused, but rather the segment +will get discarded as the queue is consumed. + + +Folio marks +=========== + +Folios within a queue can also have marks assigned to them. These marks can be +used to note information such as if a folio needs folio_put() calling upon it. +There are three marks available to be set for each folio. + +The marks can be set by:: + + void folioq_mark(struct folio_queue *folioq, unsigned int slot); + void folioq_mark2(struct folio_queue *folioq, unsigned int slot); + void folioq_mark3(struct folio_queue *folioq, unsigned int slot); + +Cleared by:: + + void folioq_unmark(struct folio_queue *folioq, unsigned int slot); + void folioq_unmark2(struct folio_queue *folioq, unsigned int slot); + void folioq_unmark3(struct folio_queue *folioq, unsigned int slot); + +And the marks can be queried by:: + + bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot); + bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot); + bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot); + +The marks can be used for any purpose and are not interpreted by this API. + + +Folio queue iteration +===================== + +A list of segments may be iterated over using the I/O iterator facility using +an ``iov_iter`` iterator of ``ITER_FOLIOQ`` type. The iterator may be +initialised with:: + + void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, + const struct folio_queue *folioq, + unsigned int first_slot, unsigned int offset, + size_t count); + +This may be told to start at a particular segment, slot and offset within a +queue. The iov iterator functions will follow the next pointers when advancing +and prev pointers when reverting when needed. + + +Lockless simultaneous production/consumption issues +=================================================== + +If properly managed, the list can be extended by the producer at the head end +and shortened by the consumer at the tail end simultaneously without the need +to take locks. The ITER_FOLIOQ iterator inserts appropriate barriers to aid +with this. + +Care must be taken when simultaneously producing and consuming a list. If the +last segment is reached and the folios it refers to are entirely consumed by +the IOV iterators, an iov_iter struct will be left pointing to the last segment +with a slot number equal to the capacity of that segment. The iterator will +try to continue on from this if there's another segment available when it is +used again, but care must be taken lest the segment got removed and freed by +the consumer before the iterator was advanced. + +It is recommended that the queue always contain at least one segment, even if +that segment has never been filled or is entirely spent. This prevents the +head and tail pointers from collapsing. + + +API Function Reference +====================== + +.. kernel-doc:: include/linux/folio_queue.h diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index f147854700e442..6a875743dd4b7f 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -35,7 +35,9 @@ Library functionality that is used throughout the kernel. kobject kref + cleanup assoc_array + folio_queue xarray maple_tree idr @@ -49,6 +51,7 @@ Library functionality that is used throughout the kernel. wrappers/atomic_t wrappers/atomic_bitops floating-point + union_find Low level entry and exit ======================== diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst index 8b84eb4bdae7fb..0f19dd52432394 100644 --- a/Documentation/core-api/memory-allocation.rst +++ b/Documentation/core-api/memory-allocation.rst @@ -45,8 +45,9 @@ here we briefly outline their recommended usage: * If the allocation is performed from an atomic context, e.g interrupt handler, use ``GFP_NOWAIT``. This flag prevents direct reclaim and IO or filesystem operations. Consequently, under memory pressure - ``GFP_NOWAIT`` allocation is likely to fail. Allocations which - have a reasonable fallback should be using ``GFP_NOWARN``. + ``GFP_NOWAIT`` allocation is likely to fail. Users of this flag need + to provide a suitable fallback to cope with such failures where + appropriate. * If you think that accessing memory reserves is justified and the kernel will be stressed unless allocation succeeds, you may use ``GFP_ATOMIC``. * Untrusted allocations triggered from userspace should be a subject diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst index 4451ef50193613..14e093da3ccd93 100644 --- a/Documentation/core-api/printk-formats.rst +++ b/Documentation/core-api/printk-formats.rst @@ -576,13 +576,12 @@ The field width is passed by value, the bitmap is passed by reference. Helper macros cpumask_pr_args() and nodemask_pr_args() are available to ease printing cpumask and nodemask. -Flags bitfields such as page flags, page_type, gfp_flags +Flags bitfields such as page flags and gfp_flags -------------------------------------------------------- :: %pGp 0x17ffffc0002036(referenced|uptodate|lru|active|private|node=0|zone=2|lastcpupid=0x1fffff) - %pGt 0xffffff7f(buddy) %pGg GFP_USER|GFP_DMA32|GFP_NOWARN %pGv read|exec|mayread|maywrite|mayexec|denywrite @@ -591,7 +590,6 @@ would construct the value. The type of flags is given by the third character. Currently supported are: - p - [p]age flags, expects value of type (``unsigned long *``) - - t - page [t]ype, expects value of type (``unsigned int *``) - v - [v]ma_flags, expects value of type (``unsigned long *``) - g - [g]fp_flags, expects value of type (``gfp_t *``) diff --git a/Documentation/core-api/unaligned-memory-access.rst b/Documentation/core-api/unaligned-memory-access.rst index 1ee82419d8aa66..5ceeb80eb539cf 100644 --- a/Documentation/core-api/unaligned-memory-access.rst +++ b/Documentation/core-api/unaligned-memory-access.rst @@ -203,7 +203,7 @@ Avoiding unaligned accesses =========================== The easiest way to avoid unaligned access is to use the get_unaligned() and -put_unaligned() macros provided by the header file. +put_unaligned() macros provided by the header file. Going back to an earlier example of code that potentially causes unaligned access:: diff --git a/Documentation/core-api/union_find.rst b/Documentation/core-api/union_find.rst new file mode 100644 index 00000000000000..6df8b94fdb5a0b --- /dev/null +++ b/Documentation/core-api/union_find.rst @@ -0,0 +1,106 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================== +Union-Find in Linux +==================== + + +:Date: June 21, 2024 +:Author: Xavier + +What is union-find, and what is it used for? +------------------------------------------------ + +Union-find is a data structure used to handle the merging and querying +of disjoint sets. The primary operations supported by union-find are: + + Initialization: Resetting each element as an individual set, with + each set's initial parent node pointing to itself. + + Find: Determine which set a particular element belongs to, usually by + returning a “representative element” of that set. This operation + is used to check if two elements are in the same set. + + Union: Merge two sets into one. + +As a data structure used to maintain sets (groups), union-find is commonly +utilized to solve problems related to offline queries, dynamic connectivity, +and graph theory. It is also a key component in Kruskal's algorithm for +computing the minimum spanning tree, which is crucial in scenarios like +network routing. Consequently, union-find is widely referenced. Additionally, +union-find has applications in symbolic computation, register allocation, +and more. + +Space Complexity: O(n), where n is the number of nodes. + +Time Complexity: Using path compression can reduce the time complexity of +the find operation, and using union by rank can reduce the time complexity +of the union operation. These optimizations reduce the average time +complexity of each find and union operation to O(α(n)), where α(n) is the +inverse Ackermann function. This can be roughly considered a constant time +complexity for practical purposes. + +This document covers use of the Linux union-find implementation. For more +information on the nature and implementation of union-find, see: + + Wikipedia entry on union-find + https://en.wikipedia.org/wiki/Disjoint-set_data_structure + +Linux implementation of union-find +----------------------------------- + +Linux's union-find implementation resides in the file "lib/union_find.c". +To use it, "#include ". + +The union-find data structure is defined as follows:: + + struct uf_node { + struct uf_node *parent; + unsigned int rank; + }; + +In this structure, parent points to the parent node of the current node. +The rank field represents the height of the current tree. During a union +operation, the tree with the smaller rank is attached under the tree with the +larger rank to maintain balance. + +Initializing union-find +----------------------- + +You can complete the initialization using either static or initialization +interface. Initialize the parent pointer to point to itself and set the rank +to 0. +Example:: + + struct uf_node my_node = UF_INIT_NODE(my_node); + +or + + uf_node_init(&my_node); + +Find the Root Node of union-find +-------------------------------- + +This operation is mainly used to determine whether two nodes belong to the same +set in the union-find. If they have the same root, they are in the same set. +During the find operation, path compression is performed to improve the +efficiency of subsequent find operations. +Example:: + + int connected; + struct uf_node *root1 = uf_find(&node_1); + struct uf_node *root2 = uf_find(&node_2); + if (root1 == root2) + connected = 1; + else + connected = 0; + +Union Two Sets in union-find +---------------------------- + +To union two sets in the union-find, you first find their respective root nodes +and then link the smaller node to the larger node based on the rank of the root +nodes. +Example:: + + uf_union(&node_1, &node_2); diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst index 5fce2b06f22954..dbd26b02ff3c94 100644 --- a/Documentation/dev-tools/gcov.rst +++ b/Documentation/dev-tools/gcov.rst @@ -75,6 +75,17 @@ Only files which are linked to the main kernel image or are compiled as kernel modules are supported by this mechanism. +Module specific configs +----------------------- + +Gcov kernel configs for specific modules are described below: + +CONFIG_GCOV_PROFILE_RDS: + Enables GCOV profiling on RDS for checking which functions or + lines are executed. This config is used by the rds selftest to + generate coverage reports. If left unset the report is omitted. + + Files ----- diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 02143f060b22f0..d81c42d1063eab 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -361,7 +361,8 @@ Alternatives Considered ----------------------- An alternative data race detection approach for the kernel can be found in the -`Kernel Thread Sanitizer (KTSAN) `_. +`Kernel Thread Sanitizer (KTSAN) +`_. KTSAN is a happens-before data race detector, which explicitly establishes the happens-before order between memory operations, which can then be used to determine data races as defined in `Data Races`_. diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 936f6aaa75c857..5418993538653c 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -53,6 +53,13 @@ configurable via the Kconfig option ``CONFIG_KFENCE_DEFERRABLE``. The KUnit test suite is very likely to fail when using a deferrable timer since it currently causes very unpredictable sample intervals. +By default KFENCE will only sample 1 heap allocation within each sample +interval. *Burst mode* allows to sample successive heap allocations, where the +kernel boot parameter ``kfence.burst`` can be set to a non-zero value which +denotes the *additional* successive allocations within a sample interval; +setting ``kfence.burst=N`` means that ``1 + N`` successive allocations are +attempted through KFENCE for each sample interval. + The KFENCE memory pool is of fixed size, and if the pool is exhausted, no further KFENCE allocations occur. With ``CONFIG_KFENCE_NUM_OBJECTS`` (default 255), the number of available guarded objects can be controlled. Each object diff --git a/Documentation/dev-tools/kunit/api/clk.rst b/Documentation/dev-tools/kunit/api/clk.rst new file mode 100644 index 00000000000000..eeaa500894536f --- /dev/null +++ b/Documentation/dev-tools/kunit/api/clk.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======== +Clk API +======== + +The KUnit clk API is used to test clk providers and clk consumers. + +.. kernel-doc:: drivers/clk/clk_kunit_helpers.c + :export: diff --git a/Documentation/dev-tools/kunit/api/index.rst b/Documentation/dev-tools/kunit/api/index.rst index 2d8f756aab56c8..5cdb552a0808f2 100644 --- a/Documentation/dev-tools/kunit/api/index.rst +++ b/Documentation/dev-tools/kunit/api/index.rst @@ -9,11 +9,17 @@ API Reference test resource functionredirection + clk + of + platformdevice This page documents the KUnit kernel testing API. It is divided into the following sections: +Core KUnit API +============== + Documentation/dev-tools/kunit/api/test.rst - Documents all of the standard testing API @@ -25,3 +31,18 @@ Documentation/dev-tools/kunit/api/resource.rst Documentation/dev-tools/kunit/api/functionredirection.rst - Documents the KUnit Function Redirection API + +Driver KUnit API +================ + +Documentation/dev-tools/kunit/api/clk.rst + + - Documents the KUnit clk API + +Documentation/dev-tools/kunit/api/of.rst + + - Documents the KUnit device tree (OF) API + +Documentation/dev-tools/kunit/api/platformdevice.rst + + - Documents the KUnit platform device API diff --git a/Documentation/dev-tools/kunit/api/of.rst b/Documentation/dev-tools/kunit/api/of.rst new file mode 100644 index 00000000000000..cb4193dcddbb85 --- /dev/null +++ b/Documentation/dev-tools/kunit/api/of.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================== +Device Tree (OF) API +==================== + +The KUnit device tree API is used to test device tree (of_*) dependent code. + +.. kernel-doc:: include/kunit/of.h + :internal: + +.. kernel-doc:: drivers/of/of_kunit_helpers.c + :export: diff --git a/Documentation/dev-tools/kunit/api/platformdevice.rst b/Documentation/dev-tools/kunit/api/platformdevice.rst new file mode 100644 index 00000000000000..49ddd572900313 --- /dev/null +++ b/Documentation/dev-tools/kunit/api/platformdevice.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================== +Platform Device API +=================== + +The KUnit platform device API is used to test platform devices. + +.. kernel-doc:: lib/kunit/platform.c + :export: diff --git a/Documentation/dev-tools/kunit/style.rst b/Documentation/dev-tools/kunit/style.rst index b6d0d7359f006a..eac81a714a2900 100644 --- a/Documentation/dev-tools/kunit/style.rst +++ b/Documentation/dev-tools/kunit/style.rst @@ -188,15 +188,26 @@ For example, a Kconfig entry might look like: Test File and Module Names ========================== -KUnit tests can often be compiled as a module. These modules should be named -after the test suite, followed by ``_test``. If this is likely to conflict with -non-KUnit tests, the suffix ``_kunit`` can also be used. +KUnit tests are often compiled as a separate module. To avoid conflicting +with regular modules, KUnit modules should be named after the test suite, +followed by ``_kunit`` (e.g. if "foobar" is the core module, then +"foobar_kunit" is the KUnit test module). -The easiest way of achieving this is to name the file containing the test suite -``_test.c`` (or, as above, ``_kunit.c``). This file should be -placed next to the code under test. +Test source files, whether compiled as a separate module or an +``#include`` in another source file, are best kept in a ``tests/`` +subdirectory to not conflict with other source files (e.g. for +tab-completion). + +Note that the ``_test`` suffix has also been used in some existing +tests. The ``_kunit`` suffix is preferred, as it makes the distinction +between KUnit and non-KUnit tests clearer. + +So for the common case, name the file containing the test suite +``tests/_kunit.c``. The ``tests`` directory should be placed at +the same level as the code under test. For example, tests for +``lib/string.c`` live in ``lib/tests/string_kunit.c``. If the suite name contains some or all of the name of the test's parent -directory, it may make sense to modify the source filename to reduce redundancy. -For example, a ``foo_firmware`` suite could be in the ``foo/firmware_test.c`` -file. +directory, it may make sense to modify the source filename to reduce +redundancy. For example, a ``foo_firmware`` suite could be in the +``foo/tests/firmware_kunit.c`` file. diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt deleted file mode 100644 index e4b9dcee6d41a1..00000000000000 --- a/Documentation/devicetree/bindings/arc/archs-pct.txt +++ /dev/null @@ -1,17 +0,0 @@ -* ARC HS Performance Counters - -The ARC HS can be configured with a pipeline performance monitor for counting -CPU and cache events like cache misses and hits. Like conventional PCT there -are 100+ hardware conditions dynamically mapped to up to 32 counters. -It also supports overflow interrupts. - -Required properties: - -- compatible : should contain - "snps,archs-pct" - -Example: - -pmu { - compatible = "snps,archs-pct"; -}; diff --git a/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml b/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml new file mode 100644 index 00000000000000..532f7584f59ffa --- /dev/null +++ b/Documentation/devicetree/bindings/arc/snps,archs-pct.yaml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arc/snps,archs-pct.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARC HS Performance Counters + +maintainers: + - Aryabhatta Dey + +description: + The ARC HS can be configured with a pipeline performance monitor for counting + CPU and cache events like cache misses and hits. Like conventional PCT there + are 100+ hardware conditions dynamically mapped to up to 32 counters. + It also supports overflow interrupts. + +properties: + compatible: + const: snps,archs-pct + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + +required: + - compatible + - reg + - clocks + +additionalProperties: false diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml index 7dff32f373cb93..b4f6695a601528 100644 --- a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml +++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml @@ -25,10 +25,18 @@ select: properties: compatible: - items: - - const: amlogic,meson-gx-ao-secure - - const: syscon - + oneOf: + - items: + - const: amlogic,meson-gx-ao-secure + - const: syscon + - items: + - enum: + - amlogic,a4-ao-secure + - amlogic,c3-ao-secure + - amlogic,s4-ao-secure + - amlogic,t7-ao-secure + - const: amlogic,meson-gx-ao-secure + - const: syscon reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml index d50a60368e279a..04a8c37b4aff55 100644 --- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml +++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml @@ -17,7 +17,7 @@ description: | The Coresight dummy source component is for the specific coresight source devices kernel don't have permission to access or configure. For some SOCs, there would be Coresight source trace components on sub-processor which - are conneted to AP processor via debug bus. For these devices, a dummy driver + are connected to AP processor via debug bus. For these devices, a dummy driver is needed to register them as Coresight source devices, so that paths can be created in the driver. It provides Coresight API for operations on dummy source devices, such as enabling and disabling them. It also provides the diff --git a/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml b/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml index 693f3fe7be60ff..cff1cdaadb13cc 100644 --- a/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml +++ b/Documentation/devicetree/bindings/arm/arm,corstone1000.yaml @@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: ARM Corstone1000 maintainers: - - Vishnu Banavath - - Rui Miguel Silva + - Abdellatif El Khlifi + - Hugues Kamba Mpiana description: |+ ARM's Corstone1000 includes pre-verified Corstone SSE-710 subsystem that diff --git a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml index 95113df178cc55..2f92b8ab08fa2c 100644 --- a/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml +++ b/Documentation/devicetree/bindings/arm/aspeed/aspeed.yaml @@ -79,6 +79,7 @@ properties: - aspeed,ast2600-evb-a1 - asus,x4tf-bmc - facebook,bletchley-bmc + - facebook,catalina-bmc - facebook,cloudripper-bmc - facebook,elbert-bmc - facebook,fuji-bmc @@ -86,7 +87,9 @@ properties: - facebook,harma-bmc - facebook,minerva-cmc - facebook,yosemite4-bmc + - ibm,blueridge-bmc - ibm,everest-bmc + - ibm,fuji-bmc - ibm,rainier-bmc - ibm,system1-bmc - ibm,tacoma-bmc diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt index 7374beb5a61321..76e2b79782502e 100644 --- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt +++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt @@ -11,7 +11,8 @@ PIT Timer required properties: shared across all System Controller members. PIT64B Timer required properties: -- compatible: Should be "microchip,sam9x60-pit64b" +- compatible: Should be "microchip,sam9x60-pit64b" or + "microchip,sam9x7-pit64b", "microchip,sam9x60-pit64b" - reg: Should contain registers location and length - interrupts: Should contain interrupt for PIT64B timer - clocks: Should contain the available clock sources for PIT64B timer. @@ -31,7 +32,8 @@ RAMC SDRAM/DDR Controller required properties: "atmel,at91sam9g45-ddramc", "atmel,sama5d3-ddramc", "microchip,sam9x60-ddramc", - "microchip,sama7g5-uddrc" + "microchip,sama7g5-uddrc", + "microchip,sam9x7-ddramc", "atmel,sama5d3-ddramc". - reg: Should contain registers location and length Examples: diff --git a/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml new file mode 100644 index 00000000000000..170aad5dd7ed8a --- /dev/null +++ b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/cirrus/cirrus,ep9301.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic EP93xx platforms + +description: + The EP93xx SoC is a ARMv4T-based with 200 MHz ARM9 CPU. + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +properties: + $nodename: + const: '/' + compatible: + oneOf: + - description: The TS-7250 is a compact, full-featured Single Board + Computer (SBC) based upon the Cirrus EP9302 ARM9 CPU + items: + - const: technologic,ts7250 + - const: cirrus,ep9301 + + - description: The Liebherr BK3 is a derivate from ts7250 board + items: + - const: liebherr,bk3 + - const: cirrus,ep9301 + + - description: EDB302 is an evaluation board by Cirrus Logic, + based on a Cirrus Logic EP9302 CPU + items: + - const: cirrus,edb9302 + - const: cirrus,ep9301 + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml index 80747d79418a00..b39a7e031177e6 100644 --- a/Documentation/devicetree/bindings/arm/fsl.yaml +++ b/Documentation/devicetree/bindings/arm/fsl.yaml @@ -809,19 +809,19 @@ properties: - const: kontron,sl-imx6ull # Kontron SL i.MX6ULL SoM - const: fsl,imx6ull - - description: TQ Systems TQMa6ULLx SoM on MBa6ULx board + - description: TQ-Systems TQMa6ULLx SoM on MBa6ULx board items: - enum: - - tq,imx6ull-tqma6ull2-mba6ulx - - const: tq,imx6ull-tqma6ull2 # MCIMX6Y2 + - tq,imx6ull-tqma6ull2-mba6ulx # TQMa6ULL socketable SoM with MCIMX6Y2 on MBa6ULx EVK + - const: tq,imx6ull-tqma6ull2 # TQMa6ULL socketable SoM with MCIMX6Y2 - const: fsl,imx6ull - - description: TQ Systems TQMa6ULLxL SoM on MBa6ULx[L] board + - description: TQ-Systems TQMa6ULLxL SoM on MBa6ULx[L] board items: - enum: - - tq,imx6ull-tqma6ull2l-mba6ulx # using LGA adapter - - tq,imx6ull-tqma6ull2l-mba6ulxl - - const: tq,imx6ull-tqma6ull2l # MCIMX6Y2, LGA SoM variant + - tq,imx6ull-tqma6ull2l-mba6ulx # TQMa6ULLxL LGA SoM with socketable Adapter on MBa6ULx EVK + - tq,imx6ull-tqma6ull2l-mba6ulxl # TQMa6ULLxL LGA SoM on MBa6ULxL gateway board + - const: tq,imx6ull-tqma6ull2l # TQMa6ULLxL LGA SoM with MCIMX6Y2 - const: fsl,imx6ull - description: Seeed Stuido i.MX6ULL SoM on dev boards @@ -939,8 +939,8 @@ properties: - fsl,imx8mm-ddr4-evk # i.MX8MM DDR4 EVK Board - fsl,imx8mm-evk # i.MX8MM EVK Board - fsl,imx8mm-evkb # i.MX8MM EVKB Board + - gateworks,imx8mm-gw75xx-0x # i.MX8MM Gateworks Board - gateworks,imx8mm-gw7904 - - gateworks,imx8mm-gw7905-0x # i.MX8MM Gateworks Board - gw,imx8mm-gw71xx-0x # i.MX8MM Gateworks Development Kit - gw,imx8mm-gw72xx-0x # i.MX8MM Gateworks Development Kit - gw,imx8mm-gw73xx-0x # i.MX8MM Gateworks Development Kit @@ -953,7 +953,6 @@ properties: - toradex,verdin-imx8mm # Verdin iMX8M Mini Modules - toradex,verdin-imx8mm-nonwifi # Verdin iMX8M Mini Modules without Wi-Fi / BT - toradex,verdin-imx8mm-wifi # Verdin iMX8M Mini Wi-Fi / BT Modules - - variscite,var-som-mx8mm # i.MX8MM Variscite VAR-SOM-MX8MM module - prt,prt8mm # i.MX8MM Protonic PRT8MM Board - const: fsl,imx8mm @@ -1082,7 +1081,7 @@ properties: - gateworks,imx8mp-gw72xx-2x # i.MX8MP Gateworks Board - gateworks,imx8mp-gw73xx-2x # i.MX8MP Gateworks Board - gateworks,imx8mp-gw74xx # i.MX8MP Gateworks Board - - gateworks,imx8mp-gw7905-2x # i.MX8MP Gateworks Board + - gateworks,imx8mp-gw75xx-2x # i.MX8MP Gateworks Board - skov,imx8mp-skov-revb-hdmi # SKOV i.MX8MP climate control without panel - skov,imx8mp-skov-revb-lt6 # SKOV i.MX8MP climate control with 7” panel - skov,imx8mp-skov-revb-mi1010ait-1cp1 # SKOV i.MX8MP climate control with 10.1" panel @@ -1168,6 +1167,12 @@ properties: - const: tq,imx8mp-tqma8mpql # TQ-Systems GmbH i.MX8MP TQMa8MPQL SOM - const: fsl,imx8mp + - description: Variscite VAR-SOM-MX8M Plus based boards + items: + - const: variscite,var-som-mx8mp-symphony + - const: variscite,var-som-mx8mp + - const: fsl,imx8mp + - description: i.MX8MQ based Boards items: - enum: @@ -1293,6 +1298,7 @@ properties: - enum: - fsl,imx93-9x9-qsb # i.MX93 9x9 QSB Board - fsl,imx93-11x11-evk # i.MX93 11x11 EVK Board + - fsl,imx93-14x14-evk # i.MX93 14x14 EVK Board - const: fsl,imx93 - description: i.MX95 based Boards @@ -1344,6 +1350,12 @@ properties: - const: variscite,var-som-mx93 - const: fsl,imx93 + - description: Kontron OSM-S i.MX93 SoM based boards + items: + - const: kontron,imx93-bl-osm-s # Kontron BL i.MX93 OSM-S board + - const: kontron,imx93-osm-s # Kontron OSM-S i.MX93 SoM + - const: fsl,imx93 + - description: Freescale Vybrid Platform Device Tree Bindings @@ -1523,6 +1535,12 @@ properties: - fsl,ls2080a-rdb - const: fsl,ls2080a + - description: LS2081A based Boards + items: + - enum: + - fsl,ls2081a-rdb + - const: fsl,ls2081a + - description: LS2088A based Boards items: - enum: diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt deleted file mode 100644 index 149567a3821507..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt +++ /dev/null @@ -1,24 +0,0 @@ -Mediatek bdpsys controller -============================ - -The Mediatek bdpsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be: - - "mediatek,mt2701-bdpsys", "syscon" - - "mediatek,mt2712-bdpsys", "syscon" - - "mediatek,mt7623-bdpsys", "mediatek,mt2701-bdpsys", "syscon" -- #clock-cells: Must be 1 - -The bdpsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -bdpsys: clock-controller@1c000000 { - compatible = "mediatek,mt2701-bdpsys", "syscon"; - reg = <0 0x1c000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt deleted file mode 100644 index a0ce82085ad0c2..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt +++ /dev/null @@ -1,24 +0,0 @@ -MediaTek CAMSYS controller -============================ - -The MediaTek camsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt6765-camsys", "syscon" - - "mediatek,mt6779-camsys", "syscon" - - "mediatek,mt8183-camsys", "syscon" -- #clock-cells: Must be 1 - -The camsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -camsys: camsys@1a000000 { - compatible = "mediatek,mt8183-camsys", "syscon"; - reg = <0 0x1a000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt deleted file mode 100644 index dce4c924193250..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt +++ /dev/null @@ -1,30 +0,0 @@ -Mediatek imgsys controller -============================ - -The Mediatek imgsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt2701-imgsys", "syscon" - - "mediatek,mt2712-imgsys", "syscon" - - "mediatek,mt6765-imgsys", "syscon" - - "mediatek,mt6779-imgsys", "syscon" - - "mediatek,mt6797-imgsys", "syscon" - - "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon" - - "mediatek,mt8167-imgsys", "syscon" - - "mediatek,mt8173-imgsys", "syscon" - - "mediatek,mt8183-imgsys", "syscon" -- #clock-cells: Must be 1 - -The imgsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -imgsys: clock-controller@15000000 { - compatible = "mediatek,mt8173-imgsys", "syscon"; - reg = <0 0x15000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml deleted file mode 100644 index 230b5188a88dbe..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,infracfg.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek Infrastructure System Configuration Controller - -maintainers: - - Matthias Brugger - -description: - The Mediatek infracfg controller provides various clocks and reset outputs - to the system. The clock values can be found in , - and reset values in and - . - -properties: - compatible: - oneOf: - - items: - - enum: - - mediatek,mt2701-infracfg - - mediatek,mt2712-infracfg - - mediatek,mt6765-infracfg - - mediatek,mt6795-infracfg - - mediatek,mt6779-infracfg_ao - - mediatek,mt6797-infracfg - - mediatek,mt7622-infracfg - - mediatek,mt7629-infracfg - - mediatek,mt7981-infracfg - - mediatek,mt7986-infracfg - - mediatek,mt7988-infracfg - - mediatek,mt8135-infracfg - - mediatek,mt8167-infracfg - - mediatek,mt8173-infracfg - - mediatek,mt8183-infracfg - - mediatek,mt8516-infracfg - - const: syscon - - items: - - const: mediatek,mt7623-infracfg - - const: mediatek,mt2701-infracfg - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - - '#clock-cells' - -if: - properties: - compatible: - contains: - enum: - - mediatek,mt2701-infracfg - - mediatek,mt2712-infracfg - - mediatek,mt6795-infracfg - - mediatek,mt7622-infracfg - - mediatek,mt7986-infracfg - - mediatek,mt8135-infracfg - - mediatek,mt8173-infracfg - - mediatek,mt8183-infracfg -then: - required: - - '#reset-cells' - -additionalProperties: false - -examples: - - | - infracfg: clock-controller@10001000 { - compatible = "mediatek,mt8173-infracfg", "syscon"; - reg = <0x10001000 0x1000>; - #clock-cells = <1>; - #reset-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt deleted file mode 100644 index 2ce889b023d9b4..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt +++ /dev/null @@ -1,22 +0,0 @@ -Mediatek ipesys controller -============================ - -The Mediatek ipesys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt6779-ipesys", "syscon" -- #clock-cells: Must be 1 - -The ipesys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -ipesys: clock-controller@1b000000 { - compatible = "mediatek,mt6779-ipesys", "syscon"; - reg = <0 0x1b000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt deleted file mode 100644 index aabc8c5c8ed2c9..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt +++ /dev/null @@ -1,43 +0,0 @@ -Mediatek IPU controller -============================ - -The Mediatek ipu controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt8183-ipu_conn", "syscon" - - "mediatek,mt8183-ipu_adl", "syscon" - - "mediatek,mt8183-ipu_core0", "syscon" - - "mediatek,mt8183-ipu_core1", "syscon" -- #clock-cells: Must be 1 - -The ipu controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -ipu_conn: syscon@19000000 { - compatible = "mediatek,mt8183-ipu_conn", "syscon"; - reg = <0 0x19000000 0 0x1000>; - #clock-cells = <1>; -}; - -ipu_adl: syscon@19010000 { - compatible = "mediatek,mt8183-ipu_adl", "syscon"; - reg = <0 0x19010000 0 0x1000>; - #clock-cells = <1>; -}; - -ipu_core0: syscon@19180000 { - compatible = "mediatek,mt8183-ipu_core0", "syscon"; - reg = <0 0x19180000 0 0x1000>; - #clock-cells = <1>; -}; - -ipu_core1: syscon@19280000 { - compatible = "mediatek,mt8183-ipu_core1", "syscon"; - reg = <0 0x19280000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt deleted file mode 100644 index 2df799cd06a742..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt +++ /dev/null @@ -1,22 +0,0 @@ -Mediatek jpgdecsys controller -============================ - -The Mediatek jpgdecsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be: - - "mediatek,mt2712-jpgdecsys", "syscon" -- #clock-cells: Must be 1 - -The jpgdecsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -jpgdecsys: syscon@19000000 { - compatible = "mediatek,mt2712-jpgdecsys", "syscon"; - reg = <0 0x19000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt deleted file mode 100644 index 2b882b7ca72ed2..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt +++ /dev/null @@ -1,23 +0,0 @@ -Mediatek mcucfg controller -============================ - -The Mediatek mcucfg controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt2712-mcucfg", "syscon" - - "mediatek,mt8183-mcucfg", "syscon" -- #clock-cells: Must be 1 - -The mcucfg controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -mcucfg: syscon@10220000 { - compatible = "mediatek,mt2712-mcucfg", "syscon"; - reg = <0 0x10220000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt deleted file mode 100644 index 054424fb64b4ed..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt +++ /dev/null @@ -1,25 +0,0 @@ -Mediatek mfgcfg controller -============================ - -The Mediatek mfgcfg controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt2712-mfgcfg", "syscon" - - "mediatek,mt6779-mfgcfg", "syscon" - - "mediatek,mt8167-mfgcfg", "syscon" - - "mediatek,mt8183-mfgcfg", "syscon" -- #clock-cells: Must be 1 - -The mfgcfg controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -mfgcfg: syscon@13000000 { - compatible = "mediatek,mt2712-mfgcfg", "syscon"; - reg = <0 0x13000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt deleted file mode 100644 index 1c671943ce4d62..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt +++ /dev/null @@ -1,28 +0,0 @@ -Mediatek mipi0a (mipi_rx_ana_csi0a) controller -============================ - -The Mediatek mipi0a controller provides various clocks -to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt6765-mipi0a", "syscon" -- #clock-cells: Must be 1 - -The mipi0a controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -The mipi0a controller also uses the common power domain from -Documentation/devicetree/bindings/soc/mediatek/scpsys.txt -The available power domains are defined in dt-bindings/power/mt*-power.h. - -Example: - -mipi0a: clock-controller@11c10000 { - compatible = "mediatek,mt6765-mipi0a", "syscon"; - reg = <0 0x11c10000 0 0x1000>; - power-domains = <&scpsys MT6765_POWER_DOMAIN_CAM>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml deleted file mode 100644 index 7cd14b163abe35..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek Functional Clock Controller for MT8186 - -maintainers: - - Chun-Jie Chen - -description: | - The clock architecture in MediaTek like below - PLLs --> - dividers --> - muxes - --> - clock gate - - The devices provide clock gate control in different IP blocks. - -properties: - compatible: - items: - - enum: - - mediatek,mt8186-imp_iic_wrap - - mediatek,mt8186-mfgsys - - mediatek,mt8186-wpesys - - mediatek,mt8186-imgsys1 - - mediatek,mt8186-imgsys2 - - mediatek,mt8186-vdecsys - - mediatek,mt8186-vencsys - - mediatek,mt8186-camsys - - mediatek,mt8186-camsys_rawa - - mediatek,mt8186-camsys_rawb - - mediatek,mt8186-mdpsys - - mediatek,mt8186-ipesys - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - imp_iic_wrap: clock-controller@11017000 { - compatible = "mediatek,mt8186-imp_iic_wrap"; - reg = <0x11017000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml deleted file mode 100644 index 64c76941669077..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-sys-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek System Clock Controller for MT8186 - -maintainers: - - Chun-Jie Chen - -description: | - The clock architecture in MediaTek like below - PLLs --> - dividers --> - muxes - --> - clock gate - - The apmixedsys provides most of PLLs which generated from SoC 26m. - The topckgen provides dividers and muxes which provide the clock source to other IP blocks. - The infracfg_ao provides clock gate in peripheral and infrastructure IP blocks. - The mcusys provides mux control to select the clock source in AP MCU. - The device nodes also provide the system control capacity for configuration. - -properties: - compatible: - items: - - enum: - - mediatek,mt8186-mcusys - - mediatek,mt8186-topckgen - - mediatek,mt8186-infracfg_ao - - mediatek,mt8186-apmixedsys - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - topckgen: syscon@10000000 { - compatible = "mediatek,mt8186-topckgen", "syscon"; - reg = <0x10000000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml deleted file mode 100644 index dff4c8e8fd4b0f..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml +++ /dev/null @@ -1,191 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek Functional Clock Controller for MT8192 - -maintainers: - - Chun-Jie Chen - -description: - The Mediatek functional clock controller provides various clocks on MT8192. - -properties: - compatible: - items: - - enum: - - mediatek,mt8192-scp_adsp - - mediatek,mt8192-imp_iic_wrap_c - - mediatek,mt8192-imp_iic_wrap_e - - mediatek,mt8192-imp_iic_wrap_s - - mediatek,mt8192-imp_iic_wrap_ws - - mediatek,mt8192-imp_iic_wrap_w - - mediatek,mt8192-imp_iic_wrap_n - - mediatek,mt8192-msdc_top - - mediatek,mt8192-mfgcfg - - mediatek,mt8192-imgsys - - mediatek,mt8192-imgsys2 - - mediatek,mt8192-vdecsys_soc - - mediatek,mt8192-vdecsys - - mediatek,mt8192-vencsys - - mediatek,mt8192-camsys - - mediatek,mt8192-camsys_rawa - - mediatek,mt8192-camsys_rawb - - mediatek,mt8192-camsys_rawc - - mediatek,mt8192-ipesys - - mediatek,mt8192-mdpsys - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - scp_adsp: clock-controller@10720000 { - compatible = "mediatek,mt8192-scp_adsp"; - reg = <0x10720000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_c: clock-controller@11007000 { - compatible = "mediatek,mt8192-imp_iic_wrap_c"; - reg = <0x11007000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_e: clock-controller@11cb1000 { - compatible = "mediatek,mt8192-imp_iic_wrap_e"; - reg = <0x11cb1000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_s: clock-controller@11d03000 { - compatible = "mediatek,mt8192-imp_iic_wrap_s"; - reg = <0x11d03000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_ws: clock-controller@11d23000 { - compatible = "mediatek,mt8192-imp_iic_wrap_ws"; - reg = <0x11d23000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_w: clock-controller@11e01000 { - compatible = "mediatek,mt8192-imp_iic_wrap_w"; - reg = <0x11e01000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_n: clock-controller@11f02000 { - compatible = "mediatek,mt8192-imp_iic_wrap_n"; - reg = <0x11f02000 0x1000>; - #clock-cells = <1>; - }; - - - | - msdc_top: clock-controller@11f10000 { - compatible = "mediatek,mt8192-msdc_top"; - reg = <0x11f10000 0x1000>; - #clock-cells = <1>; - }; - - - | - mfgcfg: clock-controller@13fbf000 { - compatible = "mediatek,mt8192-mfgcfg"; - reg = <0x13fbf000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys: clock-controller@15020000 { - compatible = "mediatek,mt8192-imgsys"; - reg = <0x15020000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys2: clock-controller@15820000 { - compatible = "mediatek,mt8192-imgsys2"; - reg = <0x15820000 0x1000>; - #clock-cells = <1>; - }; - - - | - vdecsys_soc: clock-controller@1600f000 { - compatible = "mediatek,mt8192-vdecsys_soc"; - reg = <0x1600f000 0x1000>; - #clock-cells = <1>; - }; - - - | - vdecsys: clock-controller@1602f000 { - compatible = "mediatek,mt8192-vdecsys"; - reg = <0x1602f000 0x1000>; - #clock-cells = <1>; - }; - - - | - vencsys: clock-controller@17000000 { - compatible = "mediatek,mt8192-vencsys"; - reg = <0x17000000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys: clock-controller@1a000000 { - compatible = "mediatek,mt8192-camsys"; - reg = <0x1a000000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_rawa: clock-controller@1a04f000 { - compatible = "mediatek,mt8192-camsys_rawa"; - reg = <0x1a04f000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_rawb: clock-controller@1a06f000 { - compatible = "mediatek,mt8192-camsys_rawb"; - reg = <0x1a06f000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_rawc: clock-controller@1a08f000 { - compatible = "mediatek,mt8192-camsys_rawc"; - reg = <0x1a08f000 0x1000>; - #clock-cells = <1>; - }; - - - | - ipesys: clock-controller@1b000000 { - compatible = "mediatek,mt8192-ipesys"; - reg = <0x1b000000 0x1000>; - #clock-cells = <1>; - }; - - - | - mdpsys: clock-controller@1f000000 { - compatible = "mediatek,mt8192-mdpsys"; - reg = <0x1f000000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml deleted file mode 100644 index 8d608fddf3f9ee..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-sys-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek System Clock Controller for MT8192 - -maintainers: - - Chun-Jie Chen - -description: - The Mediatek system clock controller provides various clocks and system configuration - like reset and bus protection on MT8192. - -properties: - compatible: - items: - - enum: - - mediatek,mt8192-topckgen - - mediatek,mt8192-infracfg - - mediatek,mt8192-pericfg - - mediatek,mt8192-apmixedsys - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - topckgen: syscon@10000000 { - compatible = "mediatek,mt8192-topckgen", "syscon"; - reg = <0x10000000 0x1000>; - #clock-cells = <1>; - }; - - - | - infracfg: syscon@10001000 { - compatible = "mediatek,mt8192-infracfg", "syscon"; - reg = <0x10001000 0x1000>; - #clock-cells = <1>; - }; - - - | - pericfg: syscon@10003000 { - compatible = "mediatek,mt8192-pericfg", "syscon"; - reg = <0x10003000 0x1000>; - #clock-cells = <1>; - }; - - - | - apmixedsys: syscon@1000c000 { - compatible = "mediatek,mt8192-apmixedsys", "syscon"; - reg = <0x1000c000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml deleted file mode 100644 index d17164b0b13ef2..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml +++ /dev/null @@ -1,238 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek Functional Clock Controller for MT8195 - -maintainers: - - Chun-Jie Chen - -description: - The clock architecture in Mediatek like below - PLLs --> - dividers --> - muxes - --> - clock gate - - The devices except apusys_pll provide clock gate control in different IP blocks. - The apusys_pll provides Plls which generated from SoC 26m for AI Processing Unit. - -properties: - compatible: - items: - - enum: - - mediatek,mt8195-scp_adsp - - mediatek,mt8195-imp_iic_wrap_s - - mediatek,mt8195-imp_iic_wrap_w - - mediatek,mt8195-mfgcfg - - mediatek,mt8195-wpesys - - mediatek,mt8195-wpesys_vpp0 - - mediatek,mt8195-wpesys_vpp1 - - mediatek,mt8195-imgsys - - mediatek,mt8195-imgsys1_dip_top - - mediatek,mt8195-imgsys1_dip_nr - - mediatek,mt8195-imgsys1_wpe - - mediatek,mt8195-ipesys - - mediatek,mt8195-camsys - - mediatek,mt8195-camsys_rawa - - mediatek,mt8195-camsys_yuva - - mediatek,mt8195-camsys_rawb - - mediatek,mt8195-camsys_yuvb - - mediatek,mt8195-camsys_mraw - - mediatek,mt8195-ccusys - - mediatek,mt8195-vdecsys_soc - - mediatek,mt8195-vdecsys - - mediatek,mt8195-vdecsys_core1 - - mediatek,mt8195-vencsys - - mediatek,mt8195-vencsys_core1 - - mediatek,mt8195-apusys_pll - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - scp_adsp: clock-controller@10720000 { - compatible = "mediatek,mt8195-scp_adsp"; - reg = <0x10720000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_s: clock-controller@11d03000 { - compatible = "mediatek,mt8195-imp_iic_wrap_s"; - reg = <0x11d03000 0x1000>; - #clock-cells = <1>; - }; - - - | - imp_iic_wrap_w: clock-controller@11e05000 { - compatible = "mediatek,mt8195-imp_iic_wrap_w"; - reg = <0x11e05000 0x1000>; - #clock-cells = <1>; - }; - - - | - mfgcfg: clock-controller@13fbf000 { - compatible = "mediatek,mt8195-mfgcfg"; - reg = <0x13fbf000 0x1000>; - #clock-cells = <1>; - }; - - - | - wpesys: clock-controller@14e00000 { - compatible = "mediatek,mt8195-wpesys"; - reg = <0x14e00000 0x1000>; - #clock-cells = <1>; - }; - - - | - wpesys_vpp0: clock-controller@14e02000 { - compatible = "mediatek,mt8195-wpesys_vpp0"; - reg = <0x14e02000 0x1000>; - #clock-cells = <1>; - }; - - - | - wpesys_vpp1: clock-controller@14e03000 { - compatible = "mediatek,mt8195-wpesys_vpp1"; - reg = <0x14e03000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys: clock-controller@15000000 { - compatible = "mediatek,mt8195-imgsys"; - reg = <0x15000000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys1_dip_top: clock-controller@15110000 { - compatible = "mediatek,mt8195-imgsys1_dip_top"; - reg = <0x15110000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys1_dip_nr: clock-controller@15130000 { - compatible = "mediatek,mt8195-imgsys1_dip_nr"; - reg = <0x15130000 0x1000>; - #clock-cells = <1>; - }; - - - | - imgsys1_wpe: clock-controller@15220000 { - compatible = "mediatek,mt8195-imgsys1_wpe"; - reg = <0x15220000 0x1000>; - #clock-cells = <1>; - }; - - - | - ipesys: clock-controller@15330000 { - compatible = "mediatek,mt8195-ipesys"; - reg = <0x15330000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys: clock-controller@16000000 { - compatible = "mediatek,mt8195-camsys"; - reg = <0x16000000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_rawa: clock-controller@1604f000 { - compatible = "mediatek,mt8195-camsys_rawa"; - reg = <0x1604f000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_yuva: clock-controller@1606f000 { - compatible = "mediatek,mt8195-camsys_yuva"; - reg = <0x1606f000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_rawb: clock-controller@1608f000 { - compatible = "mediatek,mt8195-camsys_rawb"; - reg = <0x1608f000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_yuvb: clock-controller@160af000 { - compatible = "mediatek,mt8195-camsys_yuvb"; - reg = <0x160af000 0x1000>; - #clock-cells = <1>; - }; - - - | - camsys_mraw: clock-controller@16140000 { - compatible = "mediatek,mt8195-camsys_mraw"; - reg = <0x16140000 0x1000>; - #clock-cells = <1>; - }; - - - | - ccusys: clock-controller@17200000 { - compatible = "mediatek,mt8195-ccusys"; - reg = <0x17200000 0x1000>; - #clock-cells = <1>; - }; - - - | - vdecsys_soc: clock-controller@1800f000 { - compatible = "mediatek,mt8195-vdecsys_soc"; - reg = <0x1800f000 0x1000>; - #clock-cells = <1>; - }; - - - | - vdecsys: clock-controller@1802f000 { - compatible = "mediatek,mt8195-vdecsys"; - reg = <0x1802f000 0x1000>; - #clock-cells = <1>; - }; - - - | - vdecsys_core1: clock-controller@1803f000 { - compatible = "mediatek,mt8195-vdecsys_core1"; - reg = <0x1803f000 0x1000>; - #clock-cells = <1>; - }; - - - | - vencsys: clock-controller@1a000000 { - compatible = "mediatek,mt8195-vencsys"; - reg = <0x1a000000 0x1000>; - #clock-cells = <1>; - }; - - - | - vencsys_core1: clock-controller@1b000000 { - compatible = "mediatek,mt8195-vencsys_core1"; - reg = <0x1b000000 0x1000>; - #clock-cells = <1>; - }; - - - | - apusys_pll: clock-controller@190f3000 { - compatible = "mediatek,mt8195-apusys_pll"; - reg = <0x190f3000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml deleted file mode 100644 index 066c9b3d6ac978..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-sys-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek System Clock Controller for MT8195 - -maintainers: - - Chun-Jie Chen - -description: - The clock architecture in Mediatek like below - PLLs --> - dividers --> - muxes - --> - clock gate - - The apmixedsys provides most of PLLs which generated from SoC 26m. - The topckgen provides dividers and muxes which provide the clock source to other IP blocks. - The infracfg_ao and pericfg_ao provides clock gate in peripheral and infrastructure IP blocks. - -properties: - compatible: - items: - - enum: - - mediatek,mt8195-topckgen - - mediatek,mt8195-infracfg_ao - - mediatek,mt8195-apmixedsys - - mediatek,mt8195-pericfg_ao - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - topckgen: syscon@10000000 { - compatible = "mediatek,mt8195-topckgen", "syscon"; - reg = <0x10000000 0x1000>; - #clock-cells = <1>; - }; - - - | - infracfg_ao: syscon@10001000 { - compatible = "mediatek,mt8195-infracfg_ao", "syscon"; - reg = <0x10001000 0x1000>; - #clock-cells = <1>; - }; - - - | - apmixedsys: syscon@1000c000 { - compatible = "mediatek,mt8195-apmixedsys", "syscon"; - reg = <0x1000c000 0x1000>; - #clock-cells = <1>; - }; - - - | - pericfg_ao: syscon@11003000 { - compatible = "mediatek,mt8195-pericfg_ao", "syscon"; - reg = <0x11003000 0x1000>; - #clock-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml deleted file mode 100644 index 33c94c491828e2..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/arm/mediatek/mediatek,pericfg.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek Peripheral Configuration Controller - -maintainers: - - Bartosz Golaszewski - -description: - The Mediatek pericfg controller provides various clocks and reset outputs - to the system. - -properties: - compatible: - oneOf: - - items: - - enum: - - mediatek,mt2701-pericfg - - mediatek,mt2712-pericfg - - mediatek,mt6765-pericfg - - mediatek,mt6795-pericfg - - mediatek,mt7622-pericfg - - mediatek,mt7629-pericfg - - mediatek,mt8135-pericfg - - mediatek,mt8173-pericfg - - mediatek,mt8183-pericfg - - mediatek,mt8186-pericfg - - mediatek,mt8188-pericfg - - mediatek,mt8195-pericfg - - mediatek,mt8516-pericfg - - const: syscon - - items: - # Special case for mt7623 for backward compatibility - - const: mediatek,mt7623-pericfg - - const: mediatek,mt2701-pericfg - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - pericfg@10003000 { - compatible = "mediatek,mt8173-pericfg", "syscon"; - reg = <0x10003000 0x1000>; - #clock-cells = <1>; - #reset-cells = <1>; - }; - - - | - pericfg@10003000 { - compatible = "mediatek,mt7623-pericfg", "mediatek,mt2701-pericfg", "syscon"; - reg = <0x10003000 0x1000>; - #clock-cells = <1>; - #reset-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt deleted file mode 100644 index f090147b7f1ee3..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt +++ /dev/null @@ -1,27 +0,0 @@ -Mediatek vcodecsys controller -============================ - -The Mediatek vcodecsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt6765-vcodecsys", "syscon" -- #clock-cells: Must be 1 - -The vcodecsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -The vcodecsys controller also uses the common power domain from -Documentation/devicetree/bindings/soc/mediatek/scpsys.txt -The available power domains are defined in dt-bindings/power/mt*-power.h. - -Example: - -venc_gcon: clock-controller@17000000 { - compatible = "mediatek,mt6765-vcodecsys", "syscon"; - reg = <0 0x17000000 0 0x10000>; - power-domains = <&scpsys MT6765_POWER_DOMAIN_VCODEC>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt deleted file mode 100644 index 98195169176a0c..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt +++ /dev/null @@ -1,29 +0,0 @@ -Mediatek vdecsys controller -============================ - -The Mediatek vdecsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt2701-vdecsys", "syscon" - - "mediatek,mt2712-vdecsys", "syscon" - - "mediatek,mt6779-vdecsys", "syscon" - - "mediatek,mt6797-vdecsys", "syscon" - - "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon" - - "mediatek,mt8167-vdecsys", "syscon" - - "mediatek,mt8173-vdecsys", "syscon" - - "mediatek,mt8183-vdecsys", "syscon" -- #clock-cells: Must be 1 - -The vdecsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -vdecsys: clock-controller@16000000 { - compatible = "mediatek,mt8173-vdecsys", "syscon"; - reg = <0 0x16000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt deleted file mode 100644 index 3cc299fd7857f0..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt +++ /dev/null @@ -1,22 +0,0 @@ -Mediatek vencltsys controller -============================ - -The Mediatek vencltsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be: - - "mediatek,mt8173-vencltsys", "syscon" -- #clock-cells: Must be 1 - -The vencltsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -vencltsys: clock-controller@19000000 { - compatible = "mediatek,mt8173-vencltsys", "syscon"; - reg = <0 0x19000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt deleted file mode 100644 index 6a6a14e15cd7ff..00000000000000 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt +++ /dev/null @@ -1,26 +0,0 @@ -Mediatek vencsys controller -============================ - -The Mediatek vencsys controller provides various clocks to the system. - -Required Properties: - -- compatible: Should be one of: - - "mediatek,mt2712-vencsys", "syscon" - - "mediatek,mt6779-vencsys", "syscon" - - "mediatek,mt6797-vencsys", "syscon" - - "mediatek,mt8173-vencsys", "syscon" - - "mediatek,mt8183-vencsys", "syscon" -- #clock-cells: Must be 1 - -The vencsys controller uses the common clk binding from -Documentation/devicetree/bindings/clock/clock-bindings.txt -The available clocks are defined in dt-bindings/clock/mt*-clk.h. - -Example: - -vencsys: clock-controller@18000000 { - compatible = "mediatek,mt8173-vencsys", "syscon"; - reg = <0 0x18000000 0 0x1000>; - #clock-cells = <1>; -}; diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml index f08e13b611728f..5cb54d69af0b72 100644 --- a/Documentation/devicetree/bindings/arm/qcom.yaml +++ b/Documentation/devicetree/bindings/arm/qcom.yaml @@ -155,6 +155,11 @@ properties: - const: qcom,msm8926 - const: qcom,msm8226 + - items: + - enum: + - wingtech,wt82918hd + - const: qcom,msm8929 + - items: - enum: - huawei,kiwi @@ -162,6 +167,8 @@ properties: - samsung,a7 - sony,kanuti-tulip - square,apq8039-t2 + - wingtech,wt82918 + - wingtech,wt82918hdhw39 - const: qcom,msm8939 - items: @@ -228,12 +235,15 @@ properties: - samsung,grandprimelte - samsung,gt510 - samsung,gt58 + - samsung,j3ltetw - samsung,j5 - samsung,j5x - samsung,rossa - samsung,serranove - thwc,uf896 - thwc,ufi001c + - wingtech,wt86518 + - wingtech,wt86528 - wingtech,wt88047 - yiming,uz801-v3 - const: qcom,msm8916 @@ -250,6 +260,7 @@ properties: - items: - enum: - lg,bullhead + - lg,h815 - microsoft,talkman - xiaomi,libra - const: qcom,msm8992 @@ -1038,10 +1049,18 @@ properties: - qcom,sm8650-qrd - const: qcom,sm8650 + - items: + - enum: + - lenovo,thinkpad-t14s + - const: qcom,x1e78100 + - const: qcom,x1e80100 + - items: - enum: - asus,vivobook-s15 - lenovo,yoga-slim7x + - microsoft,romulus13 + - microsoft,romulus15 - qcom,x1e80100-crd - qcom,x1e80100-qcp - const: qcom,x1e80100 diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml index 1ef09fbfdfaf50..687823e58c2257 100644 --- a/Documentation/devicetree/bindings/arm/rockchip.yaml +++ b/Documentation/devicetree/bindings/arm/rockchip.yaml @@ -96,6 +96,13 @@ properties: - const: coolpi,pi-cm5 - const: rockchip,rk3588 + - description: Cool Pi CM5 GenBook + items: + - enum: + - coolpi,pi-cm5-genbook + - const: coolpi,pi-cm5 + - const: rockchip,rk3588 + - description: Cool Pi 4 Model B items: - const: coolpi,pi-4b @@ -148,6 +155,12 @@ properties: - const: engicam,px30-core - const: rockchip,px30 + - description: Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard + items: + - const: firefly,px30-jd4-core-mb + - const: firefly,px30-jd4-core + - const: rockchip,px30 + - description: Firefly Firefly-RK3288 items: - enum: @@ -216,6 +229,7 @@ properties: - friendlyarm,nanopi-r2c - friendlyarm,nanopi-r2c-plus - friendlyarm,nanopi-r2s + - friendlyarm,nanopi-r2s-plus - const: rockchip,rk3328 - description: FriendlyElec NanoPi4 series boards @@ -243,9 +257,11 @@ properties: - friendlyarm,nanopi-r6s - const: rockchip,rk3588s - - description: FriendlyElec NanoPC T6 + - description: FriendlyElec NanoPC T6 series boards items: - - const: friendlyarm,nanopc-t6 + - enum: + - friendlyarm,nanopc-t6 + - friendlyarm,nanopc-t6-lts - const: rockchip,rk3588 - description: FriendlyElec CM3588-based boards @@ -255,6 +271,11 @@ properties: - const: friendlyarm,cm3588 - const: rockchip,rk3588 + - description: GameForce Ace + items: + - const: gameforce,ace + - const: rockchip,rk3588s + - description: GameForce Chi items: - const: gameforce,chi @@ -581,9 +602,19 @@ properties: - description: Hardkernel Odroid M1 items: - - const: rockchip,rk3568-odroid-m1 + - const: hardkernel,odroid-m1 - const: rockchip,rk3568 + - description: Hardkernel Odroid M1S + items: + - const: hardkernel,odroid-m1s + - const: rockchip,rk3566 + + - description: Hardkernel Odroid M2 + items: + - const: hardkernel,odroid-m2 + - const: rockchip,rk3588s + - description: Hugsun X99 TV Box items: - const: hugsun,x99 @@ -622,6 +653,11 @@ properties: - const: leez,p710 - const: rockchip,rk3399 + - description: LCKFB Taishan Pi RK3566 + items: + - const: lckfb,tspi-rk3566 + - const: rockchip,rk3566 + - description: Lunzn FastRhino R66S / R68S items: - enum: diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml index b79c81cd9f0e65..932f981265ccbf 100644 --- a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml +++ b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml @@ -26,6 +26,7 @@ select: - rockchip,rk3368-pmu - rockchip,rk3399-pmu - rockchip,rk3568-pmu + - rockchip,rk3576-pmu - rockchip,rk3588-pmu - rockchip,rv1126-pmu @@ -43,6 +44,7 @@ properties: - rockchip,rk3368-pmu - rockchip,rk3399-pmu - rockchip,rk3568-pmu + - rockchip,rk3576-pmu - rockchip,rk3588-pmu - rockchip,rv1126-pmu - const: syscon diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml index 58099949e8f3aa..703d4b574398d2 100644 --- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml @@ -54,6 +54,8 @@ properties: - description: ST STM32MP151 based Boards items: - enum: + - prt,mecio1r0 # Protonic MECIO1r0 + - prt,mect1s # Protonic MECT1S - prt,prtt1a # Protonic PRTT1A - prt,prtt1c # Protonic PRTT1C - prt,prtt1s # Protonic PRTT1S @@ -71,6 +73,12 @@ properties: - const: dh,stm32mp151a-dhcor-som - const: st,stm32mp151 + - description: ST STM32MP153 based Boards + items: + - enum: + - prt,mecio1r1 # Protonic MECIO1r1 + - const: st,stm32mp153 + - description: DH STM32MP153 DHCOM SoM based Boards items: - const: dh,stm32mp153c-dhcom-drc02 diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml index 09dc6f4249866a..4aa15f3668e037 100644 --- a/Documentation/devicetree/bindings/arm/sunxi.yaml +++ b/Documentation/devicetree/bindings/arm/sunxi.yaml @@ -61,14 +61,19 @@ properties: - const: anbernic,rg35xx-2024 - const: allwinner,sun50i-h700 + - description: Anbernic RG35XX H + items: + - const: anbernic,rg35xx-h + - const: allwinner,sun50i-h700 + - description: Anbernic RG35XX Plus items: - const: anbernic,rg35xx-plus - const: allwinner,sun50i-h700 - - description: Anbernic RG35XX H + - description: Anbernic RG35XX SP items: - - const: anbernic,rg35xx-h + - const: anbernic,rg35xx-sp - const: allwinner,sun50i-h700 - description: Amarula A64 Relic diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml index 8fb4923517d00c..2889fd0e65921d 100644 --- a/Documentation/devicetree/bindings/arm/tegra.yaml +++ b/Documentation/devicetree/bindings/arm/tegra.yaml @@ -127,6 +127,48 @@ properties: - nvidia,norrin - const: nvidia,tegra132 - const: nvidia,tegra124 + - items: + - const: google,nyan-blaze-rev10 + - const: google,nyan-blaze-rev9 + - const: google,nyan-blaze-rev8 + - const: google,nyan-blaze-rev7 + - const: google,nyan-blaze-rev6 + - const: google,nyan-blaze-rev5 + - const: google,nyan-blaze-rev4 + - const: google,nyan-blaze-rev3 + - const: google,nyan-blaze-rev2 + - const: google,nyan-blaze-rev1 + - const: google,nyan-blaze-rev0 + - const: google,nyan-blaze + - const: google,nyan + - const: nvidia,tegra124 + - items: + - const: google,nyan-big-rev10 + - const: google,nyan-big-rev9 + - const: google,nyan-big-rev8 + - const: google,nyan-big-rev7 + - const: google,nyan-big-rev6 + - const: google,nyan-big-rev5 + - const: google,nyan-big-rev4 + - const: google,nyan-big-rev3 + - const: google,nyan-big-rev2 + - const: google,nyan-big-rev1 + - const: google,nyan-big-rev0 + - const: google,nyan-big + - const: google,nyan + - const: nvidia,tegra124 + - items: + - const: google,nyan-big-rev7 + - const: google,nyan-big-rev6 + - const: google,nyan-big-rev5 + - const: google,nyan-big-rev4 + - const: google,nyan-big-rev3 + - const: google,nyan-big-rev2 + - const: google,nyan-big-rev1 + - const: google,nyan-big-rev0 + - const: google,nyan-big + - const: google,nyan + - const: nvidia,tegra124 - items: - enum: - nvidia,darcy diff --git a/Documentation/devicetree/bindings/arm/ti/k3.yaml b/Documentation/devicetree/bindings/arm/ti/k3.yaml index 4d9c5fbb4c265e..5df99e361c2158 100644 --- a/Documentation/devicetree/bindings/arm/ti/k3.yaml +++ b/Documentation/devicetree/bindings/arm/ti/k3.yaml @@ -140,6 +140,7 @@ properties: - description: K3 J722S SoC and Boards items: - enum: + - beagle,am67a-beagley-ai - ti,j722s-evm - const: ti,j722s diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.yaml b/Documentation/devicetree/bindings/ata/ahci-platform.yaml index 358617115bb8ec..ef19468e302251 100644 --- a/Documentation/devicetree/bindings/ata/ahci-platform.yaml +++ b/Documentation/devicetree/bindings/ata/ahci-platform.yaml @@ -30,6 +30,8 @@ select: - marvell,armada-3700-ahci - marvell,armada-8k-ahci - marvell,berlin2q-ahci + - qcom,apq8064-ahci + - qcom,ipq806x-ahci - socionext,uniphier-pro4-ahci - socionext,uniphier-pxs2-ahci - socionext,uniphier-pxs3-ahci @@ -45,6 +47,8 @@ properties: - marvell,armada-8k-ahci - marvell,berlin2-ahci - marvell,berlin2q-ahci + - qcom,apq8064-ahci + - qcom,ipq806x-ahci - socionext,uniphier-pro4-ahci - socionext,uniphier-pxs2-ahci - socionext,uniphier-pxs3-ahci @@ -64,11 +68,11 @@ properties: clocks: minItems: 1 - maxItems: 3 + maxItems: 5 clock-names: minItems: 1 - maxItems: 3 + maxItems: 5 interrupts: maxItems: 1 @@ -97,6 +101,31 @@ required: allOf: - $ref: ahci-common.yaml# + + - if: + properties: + compatible: + contains: + enum: + - qcom,apq8064-ahci + - qcom,ipq806x-ahci + then: + properties: + clocks: + minItems: 5 + clock-names: + items: + - const: slave_iface + - const: iface + - const: core + - const: rxoob + - const: pmalive + required: + - phys + - phy-names + - clocks + - clock-names + - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml new file mode 100644 index 00000000000000..8130923fdc728d --- /dev/null +++ b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ata/cirrus,ep9312-pata.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic EP9312 PATA controller + +maintainers: + - Damien Le Moal + +properties: + compatible: + oneOf: + - const: cirrus,ep9312-pata + - items: + - const: cirrus,ep9315-pata + - const: cirrus,ep9312-pata + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + ide@800a0000 { + compatible = "cirrus,ep9312-pata"; + reg = <0x800a0000 0x38>; + interrupt-parent = <&vic1>; + interrupts = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&ide_default_pins>; + }; diff --git a/Documentation/devicetree/bindings/ata/imx-sata.yaml b/Documentation/devicetree/bindings/ata/imx-sata.yaml index 68ffb97ddc9b2f..f4eb3550a0960c 100644 --- a/Documentation/devicetree/bindings/ata/imx-sata.yaml +++ b/Documentation/devicetree/bindings/ata/imx-sata.yaml @@ -19,6 +19,7 @@ properties: - fsl,imx53-ahci - fsl,imx6q-ahci - fsl,imx6qp-ahci + - fsl,imx8qm-ahci reg: maxItems: 1 @@ -27,12 +28,14 @@ properties: maxItems: 1 clocks: + minItems: 2 items: - description: sata clock - description: sata reference clock - description: ahb clock clock-names: + minItems: 2 items: - const: sata - const: sata_ref @@ -58,6 +61,25 @@ properties: $ref: /schemas/types.yaml#/definitions/flag description: if present, disable spread-spectrum clocking on the SATA link. + phys: + items: + - description: phandle to SATA PHY. + Since "REXT" pin is only present for first lane of i.MX8QM PHY, it's + calibration result will be stored, passed through second lane, and + shared with all three lanes PHY. The first two lanes PHY are used as + calibration PHYs, although only the third lane PHY is used by SATA. + - description: phandle to the first lane PHY of i.MX8QM. + - description: phandle to the second lane PHY of i.MX8QM. + + phy-names: + items: + - const: sata-phy + - const: cali-phy0 + - const: cali-phy1 + + power-domains: + maxItems: 1 + required: - compatible - reg @@ -65,6 +87,31 @@ required: - clocks - clock-names +allOf: + - if: + properties: + compatible: + contains: + enum: + - fsl,imx53-ahci + - fsl,imx6q-ahci + - fsl,imx6qp-ahci + then: + properties: + clock-names: + minItems: 3 + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx8qm-ahci + then: + properties: + clock-names: + minItems: 2 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/ata/qcom-sata.txt b/Documentation/devicetree/bindings/ata/qcom-sata.txt deleted file mode 100644 index 094de91cd9fd2e..00000000000000 --- a/Documentation/devicetree/bindings/ata/qcom-sata.txt +++ /dev/null @@ -1,48 +0,0 @@ -* Qualcomm AHCI SATA Controller - -SATA nodes are defined to describe on-chip Serial ATA controllers. -Each SATA controller should have its own node. - -Required properties: -- compatible : compatible list, must contain "generic-ahci" -- interrupts : -- reg : -- phys : Must contain exactly one entry as specified - in phy-bindings.txt -- phy-names : Must be "sata-phy" - -Required properties for "qcom,ipq806x-ahci" compatible: -- clocks : Must contain an entry for each entry in clock-names. -- clock-names : Shall be: - "slave_iface" - Fabric port AHB clock for SATA - "iface" - AHB clock - "core" - core clock - "rxoob" - RX out-of-band clock - "pmalive" - Power Module Alive clock -- assigned-clocks : Shall be: - SATA_RXOOB_CLK - SATA_PMALIVE_CLK -- assigned-clock-rates : Shall be: - 100Mhz (100000000) for SATA_RXOOB_CLK - 100Mhz (100000000) for SATA_PMALIVE_CLK - -Example: - sata@29000000 { - compatible = "qcom,ipq806x-ahci", "generic-ahci"; - reg = <0x29000000 0x180>; - - interrupts = <0 209 0x0>; - - clocks = <&gcc SFAB_SATA_S_H_CLK>, - <&gcc SATA_H_CLK>, - <&gcc SATA_A_CLK>, - <&gcc SATA_RXOOB_CLK>, - <&gcc SATA_PMALIVE_CLK>; - clock-names = "slave_iface", "iface", "core", - "rxoob", "pmalive"; - assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>; - assigned-clock-rates = <100000000>, <100000000>; - - phys = <&sata_phy>; - phy-names = "sata-phy"; - }; diff --git a/Documentation/devicetree/bindings/board/fsl,bcsr.yaml b/Documentation/devicetree/bindings/board/fsl,bcsr.yaml new file mode 100644 index 00000000000000..df3dd8399671fc --- /dev/null +++ b/Documentation/devicetree/bindings/board/fsl,bcsr.yaml @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/board/fsl,bcsr.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Board Control and Status + +maintainers: + - Frank Li + +properties: + compatible: + enum: + - fsl,mpc8360mds-bcsr + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + board@f8000000 { + compatible = "fsl,mpc8360mds-bcsr"; + reg = <0xf8000000 0x8000>; + }; + diff --git a/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml b/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml new file mode 100644 index 00000000000000..28b37772fb656e --- /dev/null +++ b/Documentation/devicetree/bindings/board/fsl,fpga-qixis-i2c.yaml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/board/fsl,fpga-qixis-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale on-board FPGA connected on I2C bus + +maintainers: + - Frank Li + +properties: + compatible: + oneOf: + - items: + - enum: + - fsl,bsc9132qds-fpga + - const: fsl,fpga-qixis-i2c + - items: + - enum: + - fsl,ls1028aqds-fpga + - fsl,lx2160aqds-fpga + - const: fsl,fpga-qixis-i2c + - const: simple-mfd + + interrupts: + maxItems: 1 + + reg: + maxItems: 1 + + mux-controller: + $ref: /schemas/mux/reg-mux.yaml + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + board-control@66 { + compatible = "fsl,bsc9132qds-fpga", "fsl,fpga-qixis-i2c"; + reg = <0x66>; + }; + }; + + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + board-control@66 { + compatible = "fsl,ls1028aqds-fpga", "fsl,fpga-qixis-i2c", + "simple-mfd"; + reg = <0x66>; + + mux-controller { + compatible = "reg-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x54 0xf0>; /* 0: reg 0x54, bits 7:4 */ + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml b/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml new file mode 100644 index 00000000000000..5a3cd431ef6e60 --- /dev/null +++ b/Documentation/devicetree/bindings/board/fsl,fpga-qixis.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/board/fsl,fpga-qixis.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale on-board FPGA/CPLD + +maintainers: + - Frank Li + +properties: + compatible: + oneOf: + - items: + - const: fsl,p1022ds-fpga + - const: fsl,fpga-ngpixis + - items: + - enum: + - fsl,ls1088aqds-fpga + - fsl,ls1088ardb-fpga + - fsl,ls2080aqds-fpga + - fsl,ls2080ardb-fpga + - const: fsl,fpga-qixis + - items: + - enum: + - fsl,ls1043aqds-fpga + - fsl,ls1043ardb-fpga + - fsl,ls1046aqds-fpga + - fsl,ls1046ardb-fpga + - fsl,ls208xaqds-fpga + - const: fsl,fpga-qixis + - const: simple-mfd + - enum: + - fsl,ls1043ardb-cpld + - fsl,ls1046ardb-cpld + - fsl,t1040rdb-cpld + - fsl,t1042rdb-cpld + - fsl,t1042rdb_pi-cpld + + interrupts: + maxItems: 1 + + reg: + maxItems: 1 + + "#address-cells": + const: 1 + + "#size-cells": + const: 1 + + ranges: + maxItems: 1 + +patternProperties: + '^mdio-mux@[a-f0-9,]+$': + $ref: /schemas/net/mdio-mux-mmioreg.yaml + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + board-control@3 { + compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis"; + reg = <3 0x30>; + interrupt-parent = <&mpic>; + interrupts = <8 IRQ_TYPE_LEVEL_LOW 0 0>; + }; + + - | + board-control@3 { + compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis"; + reg = <0x3 0x10000>; + }; + diff --git a/Documentation/devicetree/bindings/board/fsl-board.txt b/Documentation/devicetree/bindings/board/fsl-board.txt deleted file mode 100644 index 9cde5701592193..00000000000000 --- a/Documentation/devicetree/bindings/board/fsl-board.txt +++ /dev/null @@ -1,81 +0,0 @@ -Freescale Reference Board Bindings - -This document describes device tree bindings for various devices that -exist on some Freescale reference boards. - -* Board Control and Status (BCSR) - -Required properties: - - - compatible : Should be "fsl,-bcsr" - - reg : Offset and length of the register set for the device - -Example: - - bcsr@f8000000 { - compatible = "fsl,mpc8360mds-bcsr"; - reg = ; - }; - -* Freescale on-board FPGA - -This is the memory-mapped registers for on board FPGA. - -Required properties: -- compatible: should be a board-specific string followed by a string - indicating the type of FPGA. Example: - "fsl,-fpga", "fsl,fpga-pixis", or - "fsl,-fpga", "fsl,fpga-qixis" -- reg: should contain the address and the length of the FPGA register set. - -Optional properties: -- interrupts: should specify event (wakeup) IRQ. - -Example (P1022DS): - - board-control@3,0 { - compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis"; - reg = <3 0 0x30>; - interrupt-parent = <&mpic>; - interrupts = <8 8 0 0>; - }; - -Example (LS2080A-RDB): - - cpld@3,0 { - compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis"; - reg = <0x3 0 0x10000>; - }; - -* Freescale on-board FPGA connected on I2C bus - -Some Freescale boards like BSC9132QDS have on board FPGA connected on -the i2c bus. - -Required properties: -- compatible: Should be a board-specific string followed by a string - indicating the type of FPGA. Example: - "fsl,-fpga", "fsl,fpga-qixis-i2c" -- reg: Should contain the address of the FPGA - -Example: - fpga: fpga@66 { - compatible = "fsl,bsc9132qds-fpga", "fsl,fpga-qixis-i2c"; - reg = <0x66>; - }; - -* Freescale on-board CPLD - -Some Freescale boards like T1040RDB have an on board CPLD connected. - -Required properties: -- compatible: Should be a board-specific string like "fsl,-cpld" - Example: - "fsl,t1040rdb-cpld", "fsl,t1042rdb-cpld", "fsl,t1042rdb_pi-cpld" -- reg: should describe CPLD registers - -Example: - cpld@3,0 { - compatible = "fsl,t1040rdb-cpld"; - reg = <3 0 0x300>; - }; diff --git a/Documentation/devicetree/bindings/bus/qcom,ebi2.txt b/Documentation/devicetree/bindings/bus/qcom,ebi2.txt deleted file mode 100644 index 5058aa2c63b284..00000000000000 --- a/Documentation/devicetree/bindings/bus/qcom,ebi2.txt +++ /dev/null @@ -1,138 +0,0 @@ -Qualcomm External Bus Interface 2 (EBI2) - -The EBI2 contains two peripheral blocks: XMEM and LCDC. The XMEM handles any -external memory (such as NAND or other memory-mapped peripherals) whereas -LCDC handles LCD displays. - -As it says it connects devices to an external bus interface, meaning address -lines (up to 9 address lines so can only address 1KiB external memory space), -data lines (16 bits), OE (output enable), ADV (address valid, used on some -NOR flash memories), WE (write enable). This on top of 6 different chip selects -(CS0 thru CS5) so that in theory 6 different devices can be connected. - -Apparently this bus is clocked at 64MHz. It has dedicated pins on the package -and the bus can only come out on these pins, however if some of the pins are -unused they can be left unconnected or remuxed to be used as GPIO or in some -cases other orthogonal functions as well. - -Also CS1 and CS2 has -A and -B signals. Why they have that is unclear to me. - -The chip selects have the following memory range assignments. This region of -memory is referred to as "Chip Peripheral SS FPB0" and is 168MB big. - -Chip Select Physical address base -CS0 GPIO134 0x1a800000-0x1b000000 (8MB) -CS1 GPIO39 (A) / GPIO123 (B) 0x1b000000-0x1b800000 (8MB) -CS2 GPIO40 (A) / GPIO124 (B) 0x1b800000-0x1c000000 (8MB) -CS3 GPIO133 0x1d000000-0x25000000 (128 MB) -CS4 GPIO132 0x1c800000-0x1d000000 (8MB) -CS5 GPIO131 0x1c000000-0x1c800000 (8MB) - -The APQ8060 Qualcomm Application Processor User Guide, 80-N7150-14 Rev. A, -August 6, 2012 contains some incomplete documentation of the EBI2. - -FIXME: the manual mentions "write precharge cycles" and "precharge cycles". -We have not been able to figure out which bit fields these correspond to -in the hardware, or what valid values exist. The current hypothesis is that -this is something just used on the FAST chip selects and that the SLOW -chip selects are understood fully. There is also a "byte device enable" -flag somewhere for 8bit memories. - -FIXME: The chipselects have SLOW and FAST configuration registers. It's a bit -unclear what this means, if they are mutually exclusive or can be used -together, or if some chip selects are hardwired to be FAST and others are SLOW -by design. - -The XMEM registers are totally undocumented but could be partially decoded -because the Cypress AN49576 Antioch Westbridge apparently has suspiciously -similar register layout, see: http://www.cypress.com/file/105771/download - -Required properties: -- compatible: should be one of: - "qcom,msm8660-ebi2" - "qcom,apq8060-ebi2" -- #address-cells: should be <2>: the first cell is the chipselect, - the second cell is the offset inside the memory range -- #size-cells: should be <1> -- ranges: should be set to: - ranges = <0 0x0 0x1a800000 0x00800000>, - <1 0x0 0x1b000000 0x00800000>, - <2 0x0 0x1b800000 0x00800000>, - <3 0x0 0x1d000000 0x08000000>, - <4 0x0 0x1c800000 0x00800000>, - <5 0x0 0x1c000000 0x00800000>; -- reg: two ranges of registers: EBI2 config and XMEM config areas -- reg-names: should be "ebi2", "xmem" -- clocks: two clocks, EBI_2X and EBI -- clock-names: should be "ebi2x", "ebi2" - -Optional subnodes: -- Nodes inside the EBI2 will be considered device nodes. - -The following optional properties are properties that can be tagged onto -any device subnode. We are assuming that there can be only ONE device per -chipselect subnode, else the properties will become ambiguous. - -Optional properties arrays for SLOW chip selects: -- qcom,xmem-recovery-cycles: recovery cycles is the time the memory continues to - drive the data bus after OE is de-asserted, in order to avoid contention on - the data bus. They are inserted when reading one CS and switching to another - CS or read followed by write on the same CS. Valid values 0 thru 15. Minimum - value is actually 1, so a value of 0 will still yield 1 recovery cycle. -- qcom,xmem-write-hold-cycles: write hold cycles, these are extra cycles - inserted after every write minimum 1. The data out is driven from the time - WE is asserted until CS is asserted. With a hold of 1 (value = 0), the CS - stays active for 1 extra cycle etc. Valid values 0 thru 15. -- qcom,xmem-write-delta-cycles: initial latency for write cycles inserted for - the first write to a page or burst memory. Valid values 0 thru 255. -- qcom,xmem-read-delta-cycles: initial latency for read cycles inserted for the - first read to a page or burst memory. Valid values 0 thru 255. -- qcom,xmem-write-wait-cycles: number of wait cycles for every write access, 0=1 - cycle. Valid values 0 thru 15. -- qcom,xmem-read-wait-cycles: number of wait cycles for every read access, 0=1 - cycle. Valid values 0 thru 15. - -Optional properties arrays for FAST chip selects: -- qcom,xmem-address-hold-enable: this is a boolean property stating that we - shall hold the address for an extra cycle to meet hold time requirements - with ADV assertion. -- qcom,xmem-adv-to-oe-recovery-cycles: the number of cycles elapsed before an OE - assertion, with respect to the cycle where ADV (address valid) is asserted. - 2 means 2 cycles between ADV and OE. Valid values 0, 1, 2 or 3. -- qcom,xmem-read-hold-cycles: the length in cycles of the first segment of a - read transfer. For a single read transfer this will be the time from CS - assertion to OE assertion. Valid values 0 thru 15. - - -Example: - -ebi2@1a100000 { - compatible = "qcom,apq8060-ebi2"; - #address-cells = <2>; - #size-cells = <1>; - ranges = <0 0x0 0x1a800000 0x00800000>, - <1 0x0 0x1b000000 0x00800000>, - <2 0x0 0x1b800000 0x00800000>, - <3 0x0 0x1d000000 0x08000000>, - <4 0x0 0x1c800000 0x00800000>, - <5 0x0 0x1c000000 0x00800000>; - reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>; - reg-names = "ebi2", "xmem"; - clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>; - clock-names = "ebi2x", "ebi2"; - /* Make sure to set up the pin control for the EBI2 */ - pinctrl-names = "default"; - pinctrl-0 = <&foo_ebi2_pins>; - - foo-ebi2@2,0 { - compatible = "foo"; - reg = <2 0x0 0x100>; - (...) - qcom,xmem-recovery-cycles = <0>; - qcom,xmem-write-hold-cycles = <3>; - qcom,xmem-write-delta-cycles = <31>; - qcom,xmem-read-delta-cycles = <28>; - qcom,xmem-write-wait-cycles = <9>; - qcom,xmem-read-wait-cycles = <9>; - }; -}; diff --git a/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml b/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml new file mode 100644 index 00000000000000..1b1fb3538e6eed --- /dev/null +++ b/Documentation/devicetree/bindings/bus/qcom,ebi2.yaml @@ -0,0 +1,239 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/bus/qcom,ebi2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm External Bus Interface 2 (EBI2) + +description: | + The EBI2 contains two peripheral blocks: XMEM and LCDC. The XMEM handles any + external memory (such as NAND or other memory-mapped peripherals) whereas + LCDC handles LCD displays. + + As it says it connects devices to an external bus interface, meaning address + lines (up to 9 address lines so can only address 1KiB external memory space), + data lines (16 bits), OE (output enable), ADV (address valid, used on some + NOR flash memories), WE (write enable). This on top of 6 different chip selects + (CS0 thru CS5) so that in theory 6 different devices can be connected. + + Apparently this bus is clocked at 64MHz. It has dedicated pins on the package + and the bus can only come out on these pins, however if some of the pins are + unused they can be left unconnected or remuxed to be used as GPIO or in some + cases other orthogonal functions as well. + + Also CS1 and CS2 has -A and -B signals. Why they have that is unclear to me. + + The chip selects have the following memory range assignments. This region of + memory is referred to as "Chip Peripheral SS FPB0" and is 168MB big. + + Chip Select Physical address base + CS0 GPIO134 0x1a800000-0x1b000000 (8MB) + CS1 GPIO39 (A) / GPIO123 (B) 0x1b000000-0x1b800000 (8MB) + CS2 GPIO40 (A) / GPIO124 (B) 0x1b800000-0x1c000000 (8MB) + CS3 GPIO133 0x1d000000-0x25000000 (128 MB) + CS4 GPIO132 0x1c800000-0x1d000000 (8MB) + CS5 GPIO131 0x1c000000-0x1c800000 (8MB) + + The APQ8060 Qualcomm Application Processor User Guide, 80-N7150-14 Rev. A, + August 6, 2012 contains some incomplete documentation of the EBI2. + + FIXME: the manual mentions "write precharge cycles" and "precharge cycles". + We have not been able to figure out which bit fields these correspond to + in the hardware, or what valid values exist. The current hypothesis is that + this is something just used on the FAST chip selects and that the SLOW + chip selects are understood fully. There is also a "byte device enable" + flag somewhere for 8bit memories. + + FIXME: The chipselects have SLOW and FAST configuration registers. It's a bit + unclear what this means, if they are mutually exclusive or can be used + together, or if some chip selects are hardwired to be FAST and others are SLOW + by design. + + The XMEM registers are totally undocumented but could be partially decoded + because the Cypress AN49576 Antioch Westbridge apparently has suspiciously + similar register layout, see: http://www.cypress.com/file/105771/download + +maintainers: + - Bjorn Andersson + +properties: + compatible: + enum: + - qcom,apq8060-ebi2 + - qcom,msm8660-ebi2 + + reg: + items: + - description: EBI2 config region + - description: XMEM config region + + reg-names: + items: + - const: ebi2 + - const: xmem + + ranges: true + + clocks: + items: + - description: EBI_2X clock + - description: EBI clock + + clock-names: + items: + - const: ebi2x + - const: ebi2 + + '#address-cells': + const: 2 + + '#size-cells': + const: 1 + +required: + - compatible + - reg + - reg-names + - ranges + - clocks + - clock-names + - '#address-cells' + - '#size-cells' + +patternProperties: + "^.*@[0-5],[0-9a-f]+$": + type: object + additionalProperties: true + properties: + reg: + maxItems: 1 + + # SLOW chip selects + qcom,xmem-recovery-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The time the memory continues to drive the data bus after OE + is de-asserted, in order to avoid contention on the data bus. + They are inserted when reading one CS and switching to another + CS or read followed by write on the same CS. Minimum value is + actually 1, so a value of 0 will still yield 1 recovery cycle. + minimum: 0 + maximum: 15 + + qcom,xmem-write-hold-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The extra cycles inserted after every write minimum 1. The + data out is driven from the time WE is asserted until CS is + asserted. With a hold of 1 (value = 0), the CS stays active + for 1 extra cycle, etc. + minimum: 0 + maximum: 15 + + qcom,xmem-write-delta-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The initial latency for write cycles inserted for the first + write to a page or burst memory. + minimum: 0 + maximum: 255 + + qcom,xmem-read-delta-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The initial latency for read cycles inserted for the first + read to a page or burst memory. + minimum: 0 + maximum: 255 + + qcom,xmem-write-wait-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The number of wait cycles for every write access. + minimum: 0 + maximum: 15 + + qcom,xmem-read-wait-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The number of wait cycles for every read access. + minimum: 0 + maximum: 15 + + + # FAST chip selects + qcom,xmem-address-hold-enable: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + Holds the address for an extra cycle to meet hold time + requirements with ADV assertion, when set to 1. + enum: [ 0, 1 ] + + qcom,xmem-adv-to-oe-recovery-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The number of cycles elapsed before an OE assertion, with + respect to the cycle where ADV (address valid) is asserted. + minimum: 0 + maximum: 3 + + qcom,xmem-read-hold-cycles: + $ref: /schemas/types.yaml#/definitions/uint32 + description: > + The length in cycles of the first segment of a read transfer. + For a single read transfer this will be the time from CS + assertion to OE assertion. + minimum: 0 + maximum: 15 + + required: + - reg + +additionalProperties: false + +examples: + - | + #include + #include + #include + + external-bus@1a100000 { + compatible = "qcom,msm8660-ebi2"; + reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>; + reg-names = "ebi2", "xmem"; + ranges = <0 0x0 0x1a800000 0x00800000>, + <1 0x0 0x1b000000 0x00800000>, + <2 0x0 0x1b800000 0x00800000>, + <3 0x0 0x1d000000 0x08000000>, + <4 0x0 0x1c800000 0x00800000>, + <5 0x0 0x1c000000 0x00800000>; + + clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>; + clock-names = "ebi2x", "ebi2"; + + #address-cells = <2>; + #size-cells = <1>; + + ethernet@2,0 { + compatible = "smsc,lan9221", "smsc,lan9115"; + reg = <2 0x0 0x100>; + + interrupts-extended = <&pm8058_gpio 7 IRQ_TYPE_EDGE_FALLING>, + <&tlmm 29 IRQ_TYPE_EDGE_RISING>; + reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>; + + phy-mode = "mii"; + reg-io-width = <2>; + smsc,force-external-phy; + smsc,irq-push-pull; + + /* SLOW chipselect config */ + qcom,xmem-recovery-cycles = <0>; + qcom,xmem-write-hold-cycles = <3>; + qcom,xmem-write-delta-cycles = <31>; + qcom,xmem-read-delta-cycles = <28>; + qcom,xmem-write-wait-cycles = <9>; + qcom,xmem-read-wait-cycles = <9>; + }; + }; diff --git a/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml b/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml index 43de3c6fc1cffe..700865cc9792a0 100644 --- a/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml +++ b/Documentation/devicetree/bindings/clock/amlogic,c3-pll-clkc.yaml @@ -24,11 +24,13 @@ properties: items: - description: input top pll - description: input mclk pll + - description: input fix pll clock-names: items: - const: top - const: mclk + - const: fix "#clock-cells": const: 1 @@ -52,8 +54,9 @@ examples: compatible = "amlogic,c3-pll-clkc"; reg = <0x0 0x8000 0x0 0x1a4>; clocks = <&scmi_clk 2>, - <&scmi_clk 5>; - clock-names = "top", "mclk"; + <&scmi_clk 5>, + <&scmi_clk 12>; + clock-names = "top", "mclk", "fix"; #clock-cells = <1>; }; }; diff --git a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml index c1bdcd9058edf0..c9eb60776b4d4c 100644 --- a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml +++ b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml @@ -42,6 +42,7 @@ properties: - atmel,sama5d3-pmc - atmel,sama5d4-pmc - microchip,sam9x60-pmc + - microchip,sam9x7-pmc - microchip,sama7g5-pmc - const: syscon @@ -88,6 +89,7 @@ allOf: contains: enum: - microchip,sam9x60-pmc + - microchip,sam9x7-pmc - microchip,sama7g5-pmc then: properties: diff --git a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml index 7be29877e6d27f..c2283cd07f0543 100644 --- a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml +++ b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml @@ -18,7 +18,9 @@ properties: - atmel,sama5d4-sckc - microchip,sam9x60-sckc - items: - - const: microchip,sama7g5-sckc + - enum: + - microchip,sam9x7-sckc + - microchip,sama7g5-sckc - const: microchip,sam9x60-sckc reg: diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml index bd4cefbb1244d4..30252c95700c39 100644 --- a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml +++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml @@ -134,9 +134,13 @@ properties: "#reset-cells": const: 1 - clocks: true + clocks: + minItems: 3 + maxItems: 4 - clock-names: true + clock-names: + minItems: 3 + maxItems: 4 additionalProperties: false diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml index 59de125647ecb5..ccff74eda9fb93 100644 --- a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml +++ b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml @@ -67,9 +67,9 @@ properties: minItems: 1 maxItems: 19 - clocks: true - assigned-clocks: true - assigned-clock-parents: true + clocks: + minItems: 1 + maxItems: 19 additionalProperties: false diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml index a2c6eea9871d53..8b400da05fbefe 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml @@ -126,8 +126,6 @@ required: - compatible - reg - '#clock-cells' - - idt,shutdown - - idt,output-enable-active allOf: - if: diff --git a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml index 0a6dc1a6e122f7..6588a17a7d9a5d 100644 --- a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml +++ b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml @@ -44,6 +44,9 @@ properties: ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mp-clock.h for the full list of i.MX8MP IMX8MP_CLK_AUDIOMIX_ clock IDs. + '#reset-cells': + const: 1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml index 685535846cbb7f..db5f48e4dd157f 100644 --- a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml +++ b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml @@ -35,7 +35,7 @@ properties: - mediatek,mt2701-apmixedsys - mediatek,mt2712-apmixedsys - mediatek,mt6765-apmixedsys - - mediatek,mt6779-apmixedsys + - mediatek,mt6779-apmixed - mediatek,mt6795-apmixedsys - mediatek,mt7629-apmixedsys - mediatek,mt8167-apmixedsys diff --git a/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml new file mode 100644 index 00000000000000..252c46d316ee55 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,infracfg.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Infrastructure System Configuration Controller + +maintainers: + - Matthias Brugger + +description: + The Mediatek infracfg controller provides various clocks and reset outputs + to the system. The clock values can be found in , + and reset values in and + . + +properties: + compatible: + oneOf: + - items: + - enum: + - mediatek,mt2701-infracfg + - mediatek,mt2712-infracfg + - mediatek,mt6765-infracfg + - mediatek,mt6795-infracfg + - mediatek,mt6779-infracfg_ao + - mediatek,mt6797-infracfg + - mediatek,mt7622-infracfg + - mediatek,mt7629-infracfg + - mediatek,mt7981-infracfg + - mediatek,mt7986-infracfg + - mediatek,mt7988-infracfg + - mediatek,mt8135-infracfg + - mediatek,mt8167-infracfg + - mediatek,mt8173-infracfg + - mediatek,mt8183-infracfg + - mediatek,mt8516-infracfg + - const: syscon + - items: + - const: mediatek,mt7623-infracfg + - const: mediatek,mt2701-infracfg + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + - '#clock-cells' + +if: + properties: + compatible: + contains: + enum: + - mediatek,mt2701-infracfg + - mediatek,mt2712-infracfg + - mediatek,mt6795-infracfg + - mediatek,mt7622-infracfg + - mediatek,mt7986-infracfg + - mediatek,mt8135-infracfg + - mediatek,mt8173-infracfg + - mediatek,mt8183-infracfg +then: + required: + - '#reset-cells' + +additionalProperties: false + +examples: + - | + infracfg: clock-controller@10001000 { + compatible = "mediatek,mt8173-infracfg", "syscon"; + reg = <0x10001000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml deleted file mode 100644 index 378b761237d3a2..00000000000000 --- a/Documentation/devicetree/bindings/clock/mediatek,mt6795-sys-clock.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/clock/mediatek,mt6795-sys-clock.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MediaTek System Clock Controller for MT6795 - -maintainers: - - AngeloGioacchino Del Regno - - Chun-Jie Chen - -description: - The Mediatek system clock controller provides various clocks and system - configuration like reset and bus protection on MT6795. - -properties: - compatible: - items: - - enum: - - mediatek,mt6795-apmixedsys - - mediatek,mt6795-infracfg - - mediatek,mt6795-pericfg - - mediatek,mt6795-topckgen - - const: syscon - - reg: - maxItems: 1 - - '#clock-cells': - const: 1 - - '#reset-cells': - const: 1 - -required: - - compatible - - reg - - '#clock-cells' - -additionalProperties: false - -examples: - - | - soc { - #address-cells = <2>; - #size-cells = <2>; - - topckgen: clock-controller@10000000 { - compatible = "mediatek,mt6795-topckgen", "syscon"; - reg = <0 0x10000000 0 0x1000>; - #clock-cells = <1>; - }; - }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml new file mode 100644 index 00000000000000..f4e58bfa504fb2 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8186-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Functional Clock Controller for MT8186 + +maintainers: + - Chun-Jie Chen + +description: | + The clock architecture in MediaTek like below + PLLs --> + dividers --> + muxes + --> + clock gate + + The devices provide clock gate control in different IP blocks. + +properties: + compatible: + items: + - enum: + - mediatek,mt8186-imp_iic_wrap + - mediatek,mt8186-mfgsys + - mediatek,mt8186-wpesys + - mediatek,mt8186-imgsys1 + - mediatek,mt8186-imgsys2 + - mediatek,mt8186-vdecsys + - mediatek,mt8186-vencsys + - mediatek,mt8186-camsys + - mediatek,mt8186-camsys_rawa + - mediatek,mt8186-camsys_rawb + - mediatek,mt8186-mdpsys + - mediatek,mt8186-ipesys + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + imp_iic_wrap: clock-controller@11017000 { + compatible = "mediatek,mt8186-imp_iic_wrap"; + reg = <0x11017000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml new file mode 100644 index 00000000000000..1c446fbc5108c1 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8186-sys-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek System Clock Controller for MT8186 + +maintainers: + - Chun-Jie Chen + +description: | + The clock architecture in MediaTek like below + PLLs --> + dividers --> + muxes + --> + clock gate + + The apmixedsys provides most of PLLs which generated from SoC 26m. + The topckgen provides dividers and muxes which provide the clock source to other IP blocks. + The infracfg_ao provides clock gate in peripheral and infrastructure IP blocks. + The mcusys provides mux control to select the clock source in AP MCU. + The device nodes also provide the system control capacity for configuration. + +properties: + compatible: + items: + - enum: + - mediatek,mt8186-mcusys + - mediatek,mt8186-topckgen + - mediatek,mt8186-infracfg_ao + - mediatek,mt8186-apmixedsys + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + topckgen: syscon@10000000 { + compatible = "mediatek,mt8186-topckgen", "syscon"; + reg = <0x10000000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml new file mode 100644 index 00000000000000..b8d690e28bdc9c --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml @@ -0,0 +1,191 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8192-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Functional Clock Controller for MT8192 + +maintainers: + - Chun-Jie Chen + +description: + The Mediatek functional clock controller provides various clocks on MT8192. + +properties: + compatible: + items: + - enum: + - mediatek,mt8192-scp_adsp + - mediatek,mt8192-imp_iic_wrap_c + - mediatek,mt8192-imp_iic_wrap_e + - mediatek,mt8192-imp_iic_wrap_s + - mediatek,mt8192-imp_iic_wrap_ws + - mediatek,mt8192-imp_iic_wrap_w + - mediatek,mt8192-imp_iic_wrap_n + - mediatek,mt8192-msdc_top + - mediatek,mt8192-mfgcfg + - mediatek,mt8192-imgsys + - mediatek,mt8192-imgsys2 + - mediatek,mt8192-vdecsys_soc + - mediatek,mt8192-vdecsys + - mediatek,mt8192-vencsys + - mediatek,mt8192-camsys + - mediatek,mt8192-camsys_rawa + - mediatek,mt8192-camsys_rawb + - mediatek,mt8192-camsys_rawc + - mediatek,mt8192-ipesys + - mediatek,mt8192-mdpsys + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + scp_adsp: clock-controller@10720000 { + compatible = "mediatek,mt8192-scp_adsp"; + reg = <0x10720000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_c: clock-controller@11007000 { + compatible = "mediatek,mt8192-imp_iic_wrap_c"; + reg = <0x11007000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_e: clock-controller@11cb1000 { + compatible = "mediatek,mt8192-imp_iic_wrap_e"; + reg = <0x11cb1000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_s: clock-controller@11d03000 { + compatible = "mediatek,mt8192-imp_iic_wrap_s"; + reg = <0x11d03000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_ws: clock-controller@11d23000 { + compatible = "mediatek,mt8192-imp_iic_wrap_ws"; + reg = <0x11d23000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_w: clock-controller@11e01000 { + compatible = "mediatek,mt8192-imp_iic_wrap_w"; + reg = <0x11e01000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_n: clock-controller@11f02000 { + compatible = "mediatek,mt8192-imp_iic_wrap_n"; + reg = <0x11f02000 0x1000>; + #clock-cells = <1>; + }; + + - | + msdc_top: clock-controller@11f10000 { + compatible = "mediatek,mt8192-msdc_top"; + reg = <0x11f10000 0x1000>; + #clock-cells = <1>; + }; + + - | + mfgcfg: clock-controller@13fbf000 { + compatible = "mediatek,mt8192-mfgcfg"; + reg = <0x13fbf000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys: clock-controller@15020000 { + compatible = "mediatek,mt8192-imgsys"; + reg = <0x15020000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys2: clock-controller@15820000 { + compatible = "mediatek,mt8192-imgsys2"; + reg = <0x15820000 0x1000>; + #clock-cells = <1>; + }; + + - | + vdecsys_soc: clock-controller@1600f000 { + compatible = "mediatek,mt8192-vdecsys_soc"; + reg = <0x1600f000 0x1000>; + #clock-cells = <1>; + }; + + - | + vdecsys: clock-controller@1602f000 { + compatible = "mediatek,mt8192-vdecsys"; + reg = <0x1602f000 0x1000>; + #clock-cells = <1>; + }; + + - | + vencsys: clock-controller@17000000 { + compatible = "mediatek,mt8192-vencsys"; + reg = <0x17000000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys: clock-controller@1a000000 { + compatible = "mediatek,mt8192-camsys"; + reg = <0x1a000000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_rawa: clock-controller@1a04f000 { + compatible = "mediatek,mt8192-camsys_rawa"; + reg = <0x1a04f000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_rawb: clock-controller@1a06f000 { + compatible = "mediatek,mt8192-camsys_rawb"; + reg = <0x1a06f000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_rawc: clock-controller@1a08f000 { + compatible = "mediatek,mt8192-camsys_rawc"; + reg = <0x1a08f000 0x1000>; + #clock-cells = <1>; + }; + + - | + ipesys: clock-controller@1b000000 { + compatible = "mediatek,mt8192-ipesys"; + reg = <0x1b000000 0x1000>; + #clock-cells = <1>; + }; + + - | + mdpsys: clock-controller@1f000000 { + compatible = "mediatek,mt8192-mdpsys"; + reg = <0x1f000000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml new file mode 100644 index 00000000000000..bf8c9aacdf1e73 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8192-sys-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek System Clock Controller for MT8192 + +maintainers: + - Chun-Jie Chen + +description: + The Mediatek system clock controller provides various clocks and system configuration + like reset and bus protection on MT8192. + +properties: + compatible: + items: + - enum: + - mediatek,mt8192-topckgen + - mediatek,mt8192-infracfg + - mediatek,mt8192-pericfg + - mediatek,mt8192-apmixedsys + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + topckgen: syscon@10000000 { + compatible = "mediatek,mt8192-topckgen", "syscon"; + reg = <0x10000000 0x1000>; + #clock-cells = <1>; + }; + + - | + infracfg: syscon@10001000 { + compatible = "mediatek,mt8192-infracfg", "syscon"; + reg = <0x10001000 0x1000>; + #clock-cells = <1>; + }; + + - | + pericfg: syscon@10003000 { + compatible = "mediatek,mt8192-pericfg", "syscon"; + reg = <0x10003000 0x1000>; + #clock-cells = <1>; + }; + + - | + apmixedsys: syscon@1000c000 { + compatible = "mediatek,mt8192-apmixedsys", "syscon"; + reg = <0x1000c000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml new file mode 100644 index 00000000000000..fcc963aff08763 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml @@ -0,0 +1,238 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8195-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Functional Clock Controller for MT8195 + +maintainers: + - Chun-Jie Chen + +description: + The clock architecture in Mediatek like below + PLLs --> + dividers --> + muxes + --> + clock gate + + The devices except apusys_pll provide clock gate control in different IP blocks. + The apusys_pll provides Plls which generated from SoC 26m for AI Processing Unit. + +properties: + compatible: + items: + - enum: + - mediatek,mt8195-scp_adsp + - mediatek,mt8195-imp_iic_wrap_s + - mediatek,mt8195-imp_iic_wrap_w + - mediatek,mt8195-mfgcfg + - mediatek,mt8195-wpesys + - mediatek,mt8195-wpesys_vpp0 + - mediatek,mt8195-wpesys_vpp1 + - mediatek,mt8195-imgsys + - mediatek,mt8195-imgsys1_dip_top + - mediatek,mt8195-imgsys1_dip_nr + - mediatek,mt8195-imgsys1_wpe + - mediatek,mt8195-ipesys + - mediatek,mt8195-camsys + - mediatek,mt8195-camsys_rawa + - mediatek,mt8195-camsys_yuva + - mediatek,mt8195-camsys_rawb + - mediatek,mt8195-camsys_yuvb + - mediatek,mt8195-camsys_mraw + - mediatek,mt8195-ccusys + - mediatek,mt8195-vdecsys_soc + - mediatek,mt8195-vdecsys + - mediatek,mt8195-vdecsys_core1 + - mediatek,mt8195-vencsys + - mediatek,mt8195-vencsys_core1 + - mediatek,mt8195-apusys_pll + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + scp_adsp: clock-controller@10720000 { + compatible = "mediatek,mt8195-scp_adsp"; + reg = <0x10720000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_s: clock-controller@11d03000 { + compatible = "mediatek,mt8195-imp_iic_wrap_s"; + reg = <0x11d03000 0x1000>; + #clock-cells = <1>; + }; + + - | + imp_iic_wrap_w: clock-controller@11e05000 { + compatible = "mediatek,mt8195-imp_iic_wrap_w"; + reg = <0x11e05000 0x1000>; + #clock-cells = <1>; + }; + + - | + mfgcfg: clock-controller@13fbf000 { + compatible = "mediatek,mt8195-mfgcfg"; + reg = <0x13fbf000 0x1000>; + #clock-cells = <1>; + }; + + - | + wpesys: clock-controller@14e00000 { + compatible = "mediatek,mt8195-wpesys"; + reg = <0x14e00000 0x1000>; + #clock-cells = <1>; + }; + + - | + wpesys_vpp0: clock-controller@14e02000 { + compatible = "mediatek,mt8195-wpesys_vpp0"; + reg = <0x14e02000 0x1000>; + #clock-cells = <1>; + }; + + - | + wpesys_vpp1: clock-controller@14e03000 { + compatible = "mediatek,mt8195-wpesys_vpp1"; + reg = <0x14e03000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys: clock-controller@15000000 { + compatible = "mediatek,mt8195-imgsys"; + reg = <0x15000000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys1_dip_top: clock-controller@15110000 { + compatible = "mediatek,mt8195-imgsys1_dip_top"; + reg = <0x15110000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys1_dip_nr: clock-controller@15130000 { + compatible = "mediatek,mt8195-imgsys1_dip_nr"; + reg = <0x15130000 0x1000>; + #clock-cells = <1>; + }; + + - | + imgsys1_wpe: clock-controller@15220000 { + compatible = "mediatek,mt8195-imgsys1_wpe"; + reg = <0x15220000 0x1000>; + #clock-cells = <1>; + }; + + - | + ipesys: clock-controller@15330000 { + compatible = "mediatek,mt8195-ipesys"; + reg = <0x15330000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys: clock-controller@16000000 { + compatible = "mediatek,mt8195-camsys"; + reg = <0x16000000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_rawa: clock-controller@1604f000 { + compatible = "mediatek,mt8195-camsys_rawa"; + reg = <0x1604f000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_yuva: clock-controller@1606f000 { + compatible = "mediatek,mt8195-camsys_yuva"; + reg = <0x1606f000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_rawb: clock-controller@1608f000 { + compatible = "mediatek,mt8195-camsys_rawb"; + reg = <0x1608f000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_yuvb: clock-controller@160af000 { + compatible = "mediatek,mt8195-camsys_yuvb"; + reg = <0x160af000 0x1000>; + #clock-cells = <1>; + }; + + - | + camsys_mraw: clock-controller@16140000 { + compatible = "mediatek,mt8195-camsys_mraw"; + reg = <0x16140000 0x1000>; + #clock-cells = <1>; + }; + + - | + ccusys: clock-controller@17200000 { + compatible = "mediatek,mt8195-ccusys"; + reg = <0x17200000 0x1000>; + #clock-cells = <1>; + }; + + - | + vdecsys_soc: clock-controller@1800f000 { + compatible = "mediatek,mt8195-vdecsys_soc"; + reg = <0x1800f000 0x1000>; + #clock-cells = <1>; + }; + + - | + vdecsys: clock-controller@1802f000 { + compatible = "mediatek,mt8195-vdecsys"; + reg = <0x1802f000 0x1000>; + #clock-cells = <1>; + }; + + - | + vdecsys_core1: clock-controller@1803f000 { + compatible = "mediatek,mt8195-vdecsys_core1"; + reg = <0x1803f000 0x1000>; + #clock-cells = <1>; + }; + + - | + vencsys: clock-controller@1a000000 { + compatible = "mediatek,mt8195-vencsys"; + reg = <0x1a000000 0x1000>; + #clock-cells = <1>; + }; + + - | + vencsys_core1: clock-controller@1b000000 { + compatible = "mediatek,mt8195-vencsys_core1"; + reg = <0x1b000000 0x1000>; + #clock-cells = <1>; + }; + + - | + apusys_pll: clock-controller@190f3000 { + compatible = "mediatek,mt8195-apusys_pll"; + reg = <0x190f3000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml new file mode 100644 index 00000000000000..69f096eb168d19 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,mt8195-sys-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek System Clock Controller for MT8195 + +maintainers: + - Chun-Jie Chen + +description: + The clock architecture in Mediatek like below + PLLs --> + dividers --> + muxes + --> + clock gate + + The apmixedsys provides most of PLLs which generated from SoC 26m. + The topckgen provides dividers and muxes which provide the clock source to other IP blocks. + The infracfg_ao and pericfg_ao provides clock gate in peripheral and infrastructure IP blocks. + +properties: + compatible: + items: + - enum: + - mediatek,mt8195-topckgen + - mediatek,mt8195-infracfg_ao + - mediatek,mt8195-apmixedsys + - mediatek,mt8195-pericfg_ao + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + topckgen: syscon@10000000 { + compatible = "mediatek,mt8195-topckgen", "syscon"; + reg = <0x10000000 0x1000>; + #clock-cells = <1>; + }; + + - | + infracfg_ao: syscon@10001000 { + compatible = "mediatek,mt8195-infracfg_ao", "syscon"; + reg = <0x10001000 0x1000>; + #clock-cells = <1>; + }; + + - | + apmixedsys: syscon@1000c000 { + compatible = "mediatek,mt8195-apmixedsys", "syscon"; + reg = <0x1000c000 0x1000>; + #clock-cells = <1>; + }; + + - | + pericfg_ao: syscon@11003000 { + compatible = "mediatek,mt8195-pericfg_ao", "syscon"; + reg = <0x11003000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml new file mode 100644 index 00000000000000..2f06baecfd2334 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,pericfg.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Peripheral Configuration Controller + +maintainers: + - Bartosz Golaszewski + +description: + The Mediatek pericfg controller provides various clocks and reset outputs + to the system. + +properties: + compatible: + oneOf: + - items: + - enum: + - mediatek,mt2701-pericfg + - mediatek,mt2712-pericfg + - mediatek,mt6765-pericfg + - mediatek,mt6795-pericfg + - mediatek,mt7622-pericfg + - mediatek,mt7629-pericfg + - mediatek,mt8135-pericfg + - mediatek,mt8173-pericfg + - mediatek,mt8183-pericfg + - mediatek,mt8186-pericfg + - mediatek,mt8188-pericfg + - mediatek,mt8195-pericfg + - mediatek,mt8516-pericfg + - const: syscon + - items: + # Special case for mt7623 for backward compatibility + - const: mediatek,mt7623-pericfg + - const: mediatek,mt2701-pericfg + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + pericfg@10003000 { + compatible = "mediatek,mt8173-pericfg", "syscon"; + reg = <0x10003000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + - | + pericfg@10003000 { + compatible = "mediatek,mt7623-pericfg", "mediatek,mt2701-pericfg", "syscon"; + reg = <0x10003000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml new file mode 100644 index 00000000000000..10483e26878fb4 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/mediatek,syscon.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Clock controller syscon's + +maintainers: + - Matthias Brugger + - AngeloGioacchino Del Regno + +description: + The MediaTek clock controller syscon's provide various clocks to the system. + +properties: + compatible: + oneOf: + - items: + - enum: + - mediatek,mt2701-bdpsys + - mediatek,mt2701-imgsys + - mediatek,mt2701-vdecsys + - mediatek,mt2712-bdpsys + - mediatek,mt2712-imgsys + - mediatek,mt2712-jpgdecsys + - mediatek,mt2712-mcucfg + - mediatek,mt2712-mfgcfg + - mediatek,mt2712-vdecsys + - mediatek,mt2712-vencsys + - mediatek,mt6765-camsys + - mediatek,mt6765-imgsys + - mediatek,mt6765-mipi0a + - mediatek,mt6765-vcodecsys + - mediatek,mt6779-camsys + - mediatek,mt6779-imgsys + - mediatek,mt6779-ipesys + - mediatek,mt6779-mfgcfg + - mediatek,mt6779-vdecsys + - mediatek,mt6779-vencsys + - mediatek,mt6797-imgsys + - mediatek,mt6797-vdecsys + - mediatek,mt6797-vencsys + - mediatek,mt8167-imgsys + - mediatek,mt8167-mfgcfg + - mediatek,mt8167-vdecsys + - mediatek,mt8173-imgsys + - mediatek,mt8173-vdecsys + - mediatek,mt8173-vencltsys + - mediatek,mt8173-vencsys + - mediatek,mt8183-camsys + - mediatek,mt8183-imgsys + - mediatek,mt8183-ipu_conn + - mediatek,mt8183-ipu_adl + - mediatek,mt8183-ipu_core0 + - mediatek,mt8183-ipu_core1 + - mediatek,mt8183-mcucfg + - mediatek,mt8183-mfgcfg + - mediatek,mt8183-vdecsys + - mediatek,mt8183-vencsys + - const: syscon + - items: + - const: mediatek,mt7623-bdpsys + - const: mediatek,mt2701-bdpsys + - const: syscon + - items: + - const: mediatek,mt7623-imgsys + - const: mediatek,mt2701-imgsys + - const: syscon + - items: + - const: mediatek,mt7623-vdecsys + - const: mediatek,mt2701-vdecsys + - const: syscon + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - '#clock-cells' + +additionalProperties: false + +examples: + - | + clock-controller@11220000 { + compatible = "mediatek,mt2701-bdpsys", "syscon"; + reg = <0x11220000 0x2000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml index 2dffc02dcd8b5e..5dc360b2ea4b76 100644 --- a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml +++ b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml @@ -16,6 +16,7 @@ properties: - nxp,imx95-lvds-csr - nxp,imx95-display-csr - nxp,imx95-camera-csr + - nxp,imx95-netcmix-blk-ctrl - nxp,imx95-vpu-csr - const: syscon diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt deleted file mode 100644 index 20cbca3f41d8b5..00000000000000 --- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt +++ /dev/null @@ -1,30 +0,0 @@ -NXP LPC32xx Clock Controller - -Required properties: -- compatible: should be "nxp,lpc3220-clk" -- reg: should contain clock controller registers location and length -- #clock-cells: must be 1, the cell holds id of a clock provided by the - clock controller -- clocks: phandles of external oscillators, the list must contain one - 32768 Hz oscillator and may have one optional high frequency oscillator -- clock-names: list of external oscillator clock names, must contain - "xtal_32k" and may have optional "xtal" - -Examples: - - /* System Control Block */ - scb { - compatible = "simple-bus"; - ranges = <0x0 0x040004000 0x00001000>; - #address-cells = <1>; - #size-cells = <1>; - - clk: clock-controller@0 { - compatible = "nxp,lpc3220-clk"; - reg = <0x00 0x114>; - #clock-cells = <1>; - - clocks = <&xtal_32k>, <&xtal>; - clock-names = "xtal_32k", "xtal"; - }; - }; diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml new file mode 100644 index 00000000000000..16f79616d18a68 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/nxp,lpc3220-clk.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP LPC32xx Clock Controller + +maintainers: + - Animesh Agarwal + +properties: + compatible: + const: nxp,lpc3220-clk + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + + clocks: + minItems: 1 + items: + - description: External 32768 Hz oscillator. + - description: Optional high frequency oscillator. + + clock-names: + minItems: 1 + items: + - const: xtal_32k + - const: xtal + +required: + - compatible + - reg + - '#clock-cells' + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + clock-controller@0 { + compatible = "nxp,lpc3220-clk"; + reg = <0x00 0x114>; + #clock-cells = <1>; + clocks = <&xtal_32k>, <&xtal>; + clock-names = "xtal_32k", "xtal"; + }; diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt deleted file mode 100644 index 0aa249409b511c..00000000000000 --- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt +++ /dev/null @@ -1,22 +0,0 @@ -NXP LPC32xx USB Clock Controller - -Required properties: -- compatible: should be "nxp,lpc3220-usb-clk" -- reg: should contain clock controller registers location and length -- #clock-cells: must be 1, the cell holds id of a clock provided by the - USB clock controller - -Examples: - - usb { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - ranges = <0x0 0x31020000 0x00001000>; - - usbclk: clock-controller@f00 { - compatible = "nxp,lpc3220-usb-clk"; - reg = <0xf00 0x100>; - #clock-cells = <1>; - }; - }; diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml new file mode 100644 index 00000000000000..10361d2292fb5a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/nxp,lpc3220-usb-clk.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP LPC32xx USB Clock Controller + +maintainers: + - Animesh Agarwal + +properties: + compatible: + const: nxp,lpc3220-usb-clk + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - reg + - '#clock-cells' + +additionalProperties: false + +examples: + - | + clock-controller@f00 { + compatible = "nxp,lpc3220-usb-clk"; + reg = <0xf00 0x100>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml index 5ca927a8b1d538..47ceab641a4c7c 100644 --- a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml @@ -21,6 +21,7 @@ properties: - qcom,ipq6018-a53pll - qcom,ipq8074-a53pll - qcom,ipq9574-a73pll + - qcom,msm8226-a7pll - qcom,msm8916-a53pll - qcom,msm8939-a53pll @@ -40,6 +41,9 @@ properties: operating-points-v2: true + opp-table: + type: object + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml index adc30d84fa8f0b..9193de681de2e7 100644 --- a/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,ipq5332-gcc.yaml @@ -31,6 +31,8 @@ properties: - description: USB PCIE wrapper pipe clock source '#power-domain-cells': false + '#interconnect-cells': + const: 1 required: - compatible diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml new file mode 100644 index 00000000000000..033e010754a26b --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,qcs404-turingcc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Turing Clock & Reset Controller on QCS404 + +maintainers: + - Bjorn Andersson + +properties: + compatible: + const: qcom,qcs404-turingcc + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + +required: + - compatible + - reg + - clocks + - '#clock-cells' + - '#reset-cells' + +additionalProperties: false + +examples: + - | + #include + clock-controller@800000 { + compatible = "qcom,qcs404-turingcc"; + reg = <0x00800000 0x30000>; + clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>; + + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml index 3665dd30604a2f..02fcffe93f1aa8 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml @@ -139,7 +139,7 @@ examples: - | rpm { rpm-requests { - compatible = "qcom,rpm-msm8916"; + compatible = "qcom,rpm-msm8916", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; clock-controller { diff --git a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml index 3326dcd6766c34..273d66e245c5fd 100644 --- a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml @@ -18,9 +18,16 @@ description: | properties: compatible: - enum: - - qcom,sc8280xp-lpassaudiocc - - qcom,sc8280xp-lpasscc + oneOf: + - enum: + - qcom,sc8280xp-lpassaudiocc + - qcom,sc8280xp-lpasscc + - items: + - const: qcom,x1e80100-lpassaudiocc + - const: qcom,sc8280xp-lpassaudiocc + - items: + - const: qcom,x1e80100-lpasscc + - const: qcom,sc8280xp-lpasscc reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml new file mode 100644 index 00000000000000..f54ce865880ded --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-camcc.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,sm4450-camcc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Camera Clock & Reset Controller on SM4450 + +maintainers: + - Ajit Pandey + - Taniya Das + +description: | + Qualcomm camera clock control module provides the clocks, resets and power + domains on SM4450 + + See also:: include/dt-bindings/clock/qcom,sm4450-camcc.h + +properties: + compatible: + const: qcom,sm4450-camcc + + reg: + maxItems: 1 + + clocks: + items: + - description: Board XO source + - description: Camera AHB clock source from GCC + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + + '#power-domain-cells': + const: 1 + +required: + - compatible + - reg + - clocks + - '#clock-cells' + - '#reset-cells' + - '#power-domain-cells' + +additionalProperties: false + +examples: + - | + #include + #include + clock-controller@ade0000 { + compatible = "qcom,sm4450-camcc"; + reg = <0x0ade0000 0x20000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_CAMERA_AHB_CLK>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; +... diff --git a/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml new file mode 100644 index 00000000000000..2aa05353eff173 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,sm4450-dispcc.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,sm4450-dispcc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Display Clock & Reset Controller on SM4450 + +maintainers: + - Ajit Pandey + - Taniya Das + +description: | + Qualcomm display clock control module provides the clocks, resets and power + domains on SM4450 + + See also:: include/dt-bindings/clock/qcom,sm4450-dispcc.h + +properties: + compatible: + const: qcom,sm4450-dispcc + + reg: + maxItems: 1 + + clocks: + items: + - description: Board XO source + - description: Board active XO source + - description: Display AHB clock source from GCC + - description: sleep clock source + - description: Byte clock from DSI PHY0 + - description: Pixel clock from DSI PHY0 + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + + '#power-domain-cells': + const: 1 + +required: + - compatible + - reg + - clocks + - '#clock-cells' + - '#reset-cells' + - '#power-domain-cells' + +additionalProperties: false + +examples: + - | + #include + #include + clock-controller@af00000 { + compatible = "qcom,sm4450-dispcc"; + reg = <0x0af00000 0x20000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&rpmhcc RPMH_CXO_CLK_A>, + <&gcc GCC_DISP_AHB_CLK>, + <&sleep_clk>, + <&dsi0_phy_pll_out_byteclk>, + <&dsi0_phy_pll_out_dsiclk>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; +... diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml new file mode 100644 index 00000000000000..5e9f62d7866cfc --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,sm8150-camcc.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,sm8150-camcc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Camera Clock & Reset Controller on SM8150 + +maintainers: + - Satya Priya Kakitapalli + +description: | + Qualcomm camera clock control module provides the clocks, resets and + power domains on SM8150. + + See also:: include/dt-bindings/clock/qcom,sm8150-camcc.h + +properties: + compatible: + const: qcom,sm8150-camcc + + reg: + maxItems: 1 + + clocks: + items: + - description: Board XO source + - description: Camera AHB clock from GCC + + power-domains: + maxItems: 1 + description: + A phandle and PM domain specifier for the MMCX power domain. + + required-opps: + maxItems: 1 + description: + A phandle to an OPP node describing required MMCX performance point. + + '#clock-cells': + const: 1 + + '#reset-cells': + const: 1 + + '#power-domain-cells': + const: 1 + +required: + - compatible + - reg + - clocks + - power-domains + - required-opps + - '#clock-cells' + - '#reset-cells' + - '#power-domain-cells' + +additionalProperties: false + +examples: + - | + #include + #include + #include + clock-controller@ad00000 { + compatible = "qcom,sm8150-camcc"; + reg = <0x0ad00000 0x10000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_CAMERA_AHB_CLK>; + power-domains = <&rpmhpd SM8150_MMCX>; + required-opps = <&rpmhpd_opp_low_svs>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; +... diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml index f58edfc10f4c76..26afbbe655112f 100644 --- a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml @@ -21,9 +21,6 @@ description: | include/dt-bindings/clock/qcom,sm8650-camcc.h include/dt-bindings/clock/qcom,x1e80100-camcc.h -allOf: - - $ref: qcom,gcc.yaml# - properties: compatible: enum: @@ -57,7 +54,21 @@ required: - compatible - clocks - power-domains - - required-opps + +allOf: + - $ref: qcom,gcc.yaml# + - if: + properties: + compatible: + contains: + enum: + - qcom,sc8280xp-camcc + - qcom,sm8450-camcc + - qcom,sm8550-camcc + - qcom,x1e80100-camcc + then: + required: + - required-opps unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml index d10bb002906e9f..2d2c59aa8c6b09 100644 --- a/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-gpucc.yaml @@ -14,6 +14,7 @@ description: | domains on Qualcomm SoCs. See also:: + include/dt-bindings/clock/qcom,sm4450-gpucc.h include/dt-bindings/clock/qcom,sm8450-gpucc.h include/dt-bindings/clock/qcom,sm8550-gpucc.h include/dt-bindings/reset/qcom,sm8450-gpucc.h @@ -23,6 +24,7 @@ description: | properties: compatible: enum: + - qcom,sm4450-gpucc - qcom,sm8450-gpucc - qcom,sm8550-gpucc - qcom,sm8650-gpucc diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml index b2792b4bb554d6..9829ba28fe0ed3 100644 --- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml @@ -44,11 +44,20 @@ required: - compatible - clocks - power-domains - - required-opps - '#power-domain-cells' allOf: - $ref: qcom,gcc.yaml# + - if: + properties: + compatible: + contains: + enum: + - qcom,sm8450-videocc + - qcom,sm8550-videocc + then: + required: + - required-opps unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt b/Documentation/devicetree/bindings/clock/qcom,turingcc.txt deleted file mode 100644 index 126517de5f9aea..00000000000000 --- a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt +++ /dev/null @@ -1,19 +0,0 @@ -Qualcomm Turing Clock & Reset Controller Binding ------------------------------------------------- - -Required properties : -- compatible: shall contain "qcom,qcs404-turingcc". -- reg: shall contain base register location and length. -- clocks: ahb clock for the TuringCC -- #clock-cells: from common clock binding, shall contain 1. -- #reset-cells: from common reset binding, shall contain 1. - -Example: - turingcc: clock-controller@800000 { - compatible = "qcom,qcs404-turingcc"; - reg = <0x00800000 0x30000>; - clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>; - - #clock-cells = <1>; - #reset-cells = <1>; - }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml index 9185d101737e4d..a0e09b7002f071 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml @@ -32,12 +32,16 @@ properties: reg: maxItems: 1 - clocks: true + clocks: + minItems: 1 + maxItems: 3 '#clock-cells': const: 1 - clock-output-names: true + clock-output-names: + minItems: 3 + maxItems: 17 renesas,mode: description: Board-specific settings of the MD_CK* bits on R-Mobile A1 diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml index 084259d30232aa..77ce3615c65ace 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml @@ -31,6 +31,7 @@ properties: - renesas,r8a7745-cpg-mssr # RZ/G1E - renesas,r8a77470-cpg-mssr # RZ/G1C - renesas,r8a774a1-cpg-mssr # RZ/G2M + - renesas,r8a774a3-cpg-mssr # RZ/G2M v3.0 - renesas,r8a774b1-cpg-mssr # RZ/G2N - renesas,r8a774c0-cpg-mssr # RZ/G2E - renesas,r8a774e1-cpg-mssr # RZ/G2H diff --git a/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml new file mode 100644 index 00000000000000..926c503bed1f49 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,rzv2h-cpg.yaml @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/renesas,rzv2h-cpg.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas RZ/V2H(P) Clock Pulse Generator (CPG) + +maintainers: + - Lad Prabhakar + +description: + On Renesas RZ/V2H(P) SoCs, the CPG (Clock Pulse Generator) handles generation + and control of clock signals for the IP modules, generation and control of resets, + and control over booting, low power consumption and power supply domains. + +properties: + compatible: + const: renesas,r9a09g057-cpg + + reg: + maxItems: 1 + + clocks: + items: + - description: AUDIO_EXTAL clock input + - description: RTXIN clock input + - description: QEXTAL clock input + + clock-names: + items: + - const: audio_extal + - const: rtxin + - const: qextal + + '#clock-cells': + description: | + - For CPG core clocks, the two clock specifier cells must be "CPG_CORE" + and a core clock reference, as defined in + , + - For module clocks, the two clock specifier cells must be "CPG_MOD" and + a module number. The module number is calculated as the CLKON register + offset index multiplied by 16, plus the actual bit in the register + used to turn the CLK ON. For example, for CGC_GIC_0_GICCLK, the + calculation is (1 * 16 + 3) = 0x13. + const: 2 + + '#power-domain-cells': + const: 0 + + '#reset-cells': + description: + The single reset specifier cell must be the reset number. The reset number + is calculated as the reset register offset index multiplied by 16, plus the + actual bit in the register used to reset the specific IP block. For example, + for SYS_0_PRESETN, the calculation is (3 * 16 + 0) = 0x30. + const: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - '#clock-cells' + - '#power-domain-cells' + - '#reset-cells' + +additionalProperties: false + +examples: + - | + clock-controller@10420000 { + compatible = "renesas,r9a09g057-cpg"; + reg = <0x10420000 0x10000>; + clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>; + clock-names = "audio_extal", "rtxin", "qextal"; + #clock-cells = <2>; + #power-domain-cells = <0>; + #reset-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml new file mode 100644 index 00000000000000..9c9b36049c716a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/rockchip,rk3576-cru.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip rk3576 Family Clock and Reset Control Module + +maintainers: + - Elaine Zhang + - Heiko Stuebner + - Detlev Casanova + +description: + The RK3576 clock controller generates the clock and also implements a reset + controller for SoC peripherals. For example it provides SCLK_UART2 and + PCLK_UART2, as well as SRST_P_UART2 and SRST_S_UART2 for the second UART + module. + +properties: + compatible: + const: rockchip,rk3576-cru + + reg: + maxItems: 1 + + "#clock-cells": + const: 1 + + "#reset-cells": + const: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: xin24m + - const: xin32k + +required: + - compatible + - reg + - "#clock-cells" + - "#reset-cells" + +additionalProperties: false + +examples: + - | + clock-controller@27200000 { + compatible = "rockchip,rk3576-cru"; + reg = <0xfd7c0000 0x5c000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml index 74cd3f3f229ab4..4ff175c4992b68 100644 --- a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml +++ b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml @@ -42,10 +42,6 @@ properties: - const: xin24m - const: xin32k - assigned-clocks: true - - assigned-clock-rates: true - rockchip,grf: $ref: /schemas/types.yaml#/definitions/phandle description: > diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml index 55c4f94a14d182..32f39e543b3620 100644 --- a/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml +++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov9-clock.yaml @@ -35,6 +35,7 @@ properties: - samsung,exynosautov9-cmu-top - samsung,exynosautov9-cmu-busmc - samsung,exynosautov9-cmu-core + - samsung,exynosautov9-cmu-dpum - samsung,exynosautov9-cmu-fsys0 - samsung,exynosautov9-cmu-fsys1 - samsung,exynosautov9-cmu-fsys2 @@ -109,6 +110,24 @@ allOf: - const: oscclk - const: dout_clkcmu_core_bus + - if: + properties: + compatible: + contains: + const: samsung,exynosautov9-cmu-dpum + + then: + properties: + clocks: + items: + - description: External reference clock (26 MHz) + - description: DPU Main bus clock (from CMU_TOP) + + clock-names: + items: + - const: oscclk + - const: bus + - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml new file mode 100644 index 00000000000000..3330b272747475 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/samsung,exynosautov920-clock.yaml @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/samsung,exynosautov920-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung ExynosAuto v920 SoC clock controller + +maintainers: + - Sunyeal Hong + - Chanwoo Choi + - Krzysztof Kozlowski + - Sylwester Nawrocki + +description: | + ExynosAuto v920 clock controller is comprised of several CMU units, generating + clocks for different domains. Those CMU units are modeled as separate device + tree nodes, and might depend on each other. Root clocks in that clock tree are + two external clocks:: OSCCLK/XTCXO (38.4 MHz) and RTCCLK/XrtcXTI (32768 Hz). + The external OSCCLK must be defined as fixed-rate clock in dts. + + CMU_TOP is a top-level CMU, where all base clocks are prepared using PLLs and + dividers; all other clocks of function blocks (other CMUs) are usually + derived from CMU_TOP. + + Each clock is assigned an identifier and client nodes can use this identifier + to specify the clock which they consume. All clocks available for usage + in clock consumer nodes are defined as preprocessor macros in + 'include/dt-bindings/clock/samsung,exynosautov920.h' header. + +properties: + compatible: + enum: + - samsung,exynosautov920-cmu-top + - samsung,exynosautov920-cmu-peric0 + - samsung,exynosautov920-cmu-peric1 + - samsung,exynosautov920-cmu-misc + - samsung,exynosautov920-cmu-hsi0 + - samsung,exynosautov920-cmu-hsi1 + + clocks: + minItems: 1 + maxItems: 4 + + clock-names: + minItems: 1 + maxItems: 4 + + "#clock-cells": + const: 1 + + reg: + maxItems: 1 + +allOf: + - if: + properties: + compatible: + contains: + const: samsung,exynosautov920-cmu-top + + then: + properties: + clocks: + items: + - description: External reference clock (38.4 MHz) + + clock-names: + items: + - const: oscclk + + - if: + properties: + compatible: + contains: + enum: + - samsung,exynosautov920-cmu-peric0 + - samsung,exynosautov920-cmu-peric1 + + then: + properties: + clocks: + items: + - description: External reference clock (38.4 MHz) + - description: CMU_PERICn NOC clock (from CMU_TOP) + - description: CMU_PERICn IP clock (from CMU_TOP) + + clock-names: + items: + - const: oscclk + - const: noc + - const: ip + + - if: + properties: + compatible: + enum: + - samsung,exynosautov920-cmu-misc + - samsung,exynosautov920-cmu-hsi0 + + then: + properties: + clocks: + items: + - description: External reference clock (38.4 MHz) + - description: CMU_MISC/CMU_HSI0 NOC clock (from CMU_TOP) + + clock-names: + items: + - const: oscclk + - const: noc + + - if: + properties: + compatible: + contains: + const: samsung,exynosautov920-cmu-hsi1 + + then: + properties: + clocks: + items: + - description: External reference clock (38.4 MHz) + - description: CMU_HSI1 NOC clock (from CMU_TOP) + - description: CMU_HSI1 USBDRD clock (from CMU_TOP) + - description: CMU_HSI1 MMC_CARD clock (from CMU_TOP) + + clock-names: + items: + - const: oscclk + - const: noc + - const: usbdrd + - const: mmc_card + +required: + - compatible + - "#clock-cells" + - clocks + - clock-names + - reg + +additionalProperties: false + +examples: + # Clock controller node for CMU_PERIC0 + - | + #include + + cmu_peric0: clock-controller@10800000 { + compatible = "samsung,exynosautov920-cmu-peric0"; + reg = <0x10800000 0x8000>; + #clock-cells = <1>; + + clocks = <&xtcxo>, + <&cmu_top DOUT_CLKCMU_PERIC0_NOC>, + <&cmu_top DOUT_CLKCMU_PERIC0_IP>; + clock-names = "oscclk", + "noc", + "ip"; + }; + +... diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml index 5194be0b410e42..9b3aaae546cb08 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml +++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml @@ -60,8 +60,14 @@ properties: - st,stm32mp1-rcc - st,stm32mp13-rcc - const: syscon - clocks: true - clock-names: true + + clocks: + minItems: 1 + maxItems: 5 + + clock-names: + minItems: 1 + maxItems: 5 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/cpu/idle-states.yaml b/Documentation/devicetree/bindings/cpu/idle-states.yaml index 239480ef7c30d3..385b0a511652c5 100644 --- a/Documentation/devicetree/bindings/cpu/idle-states.yaml +++ b/Documentation/devicetree/bindings/cpu/idle-states.yaml @@ -385,7 +385,7 @@ patternProperties: This property is required in idle state nodes of device tree meant for RISC-V systems. For more details on the suspend_type parameter - refer the SBI specifiation v0.3 (or higher) [7]. + refer the SBI specification v0.3 (or higher) [7]. local-timer-stop: description: diff --git a/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml b/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml deleted file mode 100644 index 16a44897456103..00000000000000 --- a/Documentation/devicetree/bindings/cpu/nvidia,tegra186-ccplex-cluster.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/cpu/nvidia,tegra186-ccplex-cluster.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: NVIDIA Tegra186 CCPLEX Cluster - -maintainers: - - Thierry Reding - - Jon Hunter - -properties: - compatible: - const: nvidia,tegra186-ccplex-cluster - - reg: - maxItems: 1 - - nvidia,bpmp: - description: phandle to the BPMP used to query CPU frequency tables - $ref: /schemas/types.yaml#/definitions/phandle - -additionalProperties: false - -required: - - compatible - - reg - - nvidia,bpmp - -examples: - - | - ccplex@e000000 { - compatible = "nvidia,tegra186-ccplex-cluster"; - reg = <0x0e000000 0x400000>; - nvidia,bpmp = <&bpmp>; - }; diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml index 0a9ed2848b7c63..9c8c9991f29ad6 100644 --- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml +++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml @@ -137,7 +137,10 @@ patternProperties: - const: fsl,sec-v4.0-rtic reg: - maxItems: 1 + items: + - description: RTIC control and status register space. + - description: RTIC recoverable error indication register space. + minItems: 1 ranges: maxItems: 1 diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml index 89c88004b41bf5..048b769a73c028 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml @@ -17,6 +17,7 @@ properties: - qcom,prng-ee # 8996 and later using EE - items: - enum: + - qcom,sa8255p-trng - qcom,sa8775p-trng - qcom,sc7280-trng - qcom,sm8450-trng diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml index 2ad0cd6dd49e06..b78f64c9c5f44e 100644 --- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml +++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml @@ -92,12 +92,31 @@ properties: reference to a valid DPI output or input endpoint node. port@2: - $ref: /schemas/graph.yaml#/properties/port + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false description: | eDP/DP output port. The remote endpoint phandle should be a reference to a valid eDP panel input endpoint node. This port is optional, treated as DP panel if not defined + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + + properties: + toshiba,pre-emphasis: + description: + Display port output Pre-Emphasis settings for both DP lanes. + $ref: /schemas/types.yaml#/definitions/uint8-array + minItems: 2 + maxItems: 2 + items: + enum: + - 0 # No pre-emphasis + - 1 # 3.5dB pre-emphasis + - 2 # 6dB pre-emphasis + oneOf: - required: - port@0 diff --git a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml index 0681fc49aa1b08..8e3a98aeec32dd 100644 --- a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml +++ b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml @@ -50,6 +50,14 @@ properties: - const: disp_axi minItems: 1 + dmas: + items: + - description: DMA specifier for the RX DMA channel. + + dma-names: + items: + - const: rx + interrupts: items: - description: LCDIF DMA interrupt @@ -156,6 +164,18 @@ allOf: interrupts: maxItems: 1 + - if: + not: + properties: + compatible: + contains: + enum: + - fsl,imx28-lcdif + then: + properties: + dmas: false + dma-names: false + examples: - | #include diff --git a/Documentation/devicetree/bindings/display/lvds.yaml b/Documentation/devicetree/bindings/display/lvds.yaml index 224db4932011d7..b74efbea3be291 100644 --- a/Documentation/devicetree/bindings/display/lvds.yaml +++ b/Documentation/devicetree/bindings/display/lvds.yaml @@ -16,7 +16,7 @@ maintainers: description: This binding extends the data mapping defined in lvds-data-mapping.yaml. It supports reversing the bit order on the formats defined there in order - to accomodate for even more specialized data formats, since a variety of + to accommodate for even more specialized data formats, since a variety of data formats and layouts is used to drive LVDS displays. properties: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml index 5ca7679d542764..3a82aec9021c7c 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml @@ -62,6 +62,9 @@ properties: - const: default - const: sleep + power-domains: + maxItems: 1 + port: $ref: /schemas/graph.yaml#/properties/port description: @@ -76,6 +79,20 @@ required: - clock-names - port +allOf: + - if: + not: + properties: + compatible: + contains: + enum: + - mediatek,mt6795-dpi + - mediatek,mt8173-dpi + - mediatek,mt8186-dpi + then: + properties: + power-domains: false + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.yaml b/Documentation/devicetree/bindings/display/msm/hdmi.yaml index 47e97669821c35..d4a2033afea8d4 100644 --- a/Documentation/devicetree/bindings/display/msm/hdmi.yaml +++ b/Documentation/devicetree/bindings/display/msm/hdmi.yaml @@ -19,14 +19,15 @@ properties: - qcom,hdmi-tx-8974 - qcom,hdmi-tx-8994 - qcom,hdmi-tx-8996 + - qcom,hdmi-tx-8998 clocks: minItems: 1 - maxItems: 5 + maxItems: 8 clock-names: minItems: 1 - maxItems: 5 + maxItems: 8 reg: minItems: 1 @@ -142,6 +143,7 @@ allOf: properties: clocks: minItems: 5 + maxItems: 5 clock-names: items: - const: mdp_core @@ -151,6 +153,28 @@ allOf: - const: extp hdmi-mux-supplies: false + - if: + properties: + compatible: + contains: + enum: + - qcom,hdmi-tx-8998 + then: + properties: + clocks: + minItems: 8 + maxItems: 8 + clock-names: + items: + - const: mdp_core + - const: iface + - const: core + - const: alt_iface + - const: extp + - const: bus + - const: mnoc + - const: iface_mmss + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml index 5eaccce13c216f..6a82bd1ec763c2 100644 --- a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml +++ b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml @@ -9,20 +9,20 @@ title: BOE TH101MB31IG002-28A WXGA DSI Display Panel maintainers: - Manuel Traut -allOf: - - $ref: panel-common.yaml# - properties: compatible: enum: # BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel - boe,th101mb31ig002-28a + # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel + - starry,er88577 reg: maxItems: 1 backlight: true enable-gpios: true + reset-gpios: true power-supply: true port: true rotation: true @@ -33,6 +33,20 @@ required: - enable-gpios - power-supply +allOf: + - $ref: panel-common.yaml# + - if: + properties: + compatible: + # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel + const: starry,er88577 + then: + properties: + reset-gpios: false + else: + required: + - reset-gpios + additionalProperties: false examples: @@ -47,6 +61,7 @@ examples: reg = <0>; backlight = <&backlight_lcd0>; enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 55 GPIO_ACTIVE_LOW>; rotation = <90>; power-supply = <&vcc_3v3>; port { diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml new file mode 100644 index 00000000000000..dced98e1c69a35 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-ll2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: BOE TV101WUM-LL2 DSI Display Panel + +maintainers: + - Neil Armstrong + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: boe,tv101wum-ll2 + + reg: + maxItems: 1 + description: DSI virtual channel + + backlight: true + reset-gpios: true + vsp-supply: true + vsn-supply: true + port: true + rotation: true + +required: + - compatible + - reg + - reset-gpios + - vsp-supply + - vsn-supply + - port + +additionalProperties: false + +examples: + - | + #include + dsi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "boe,tv101wum-ll2"; + reg = <0>; + + vsn-supply = <&vsn_lcd>; + vsp-supply = <&vsp_lcd>; + + reset-gpios = <&pio 45 GPIO_ACTIVE_LOW>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml index 644387e4fb6f9f..75ccabff308bfa 100644 --- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml +++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml @@ -15,14 +15,12 @@ description: such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with a MIPI-DSI video interface. -allOf: - - $ref: panel-common.yaml# - properties: compatible: items: - enum: - hannstar,hsd060bhw4 + - microchip,ac40t08a-mipi-panel - powkiddy,x55-panel - const: himax,hx8394 @@ -46,7 +44,6 @@ properties: required: - compatible - reg - - reset-gpios - backlight - port - vcc-supply @@ -54,6 +51,18 @@ required: additionalProperties: false +allOf: + - $ref: panel-common.yaml# + - if: + not: + properties: + compatible: + enum: + - microchip,ac40t08a-mipi-panel + then: + required: + - reset-gpios + examples: - | #include diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml index cfd7cc9c872574..f8030757948544 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml @@ -16,6 +16,7 @@ properties: compatible: items: - enum: + - densitron,dmt028vghmcmi-1d - ortustech,com35h3p70ulc - const: ilitek,ili9806e diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml index 3d5bede98cf1d9..b8783eba3ddc62 100644 --- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml +++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml @@ -18,6 +18,7 @@ properties: - enum: - chongzhou,cz101b4001 - kingdisplay,kd101ne3-40ti + - melfas,lmfbx101117480 - radxa,display-10hd-ad001 - radxa,display-8hd-ad002 - const: jadard,jd9365da-h3 diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml index e78160d1aa24c4..10ed4b57232b9b 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple-lvds-dual-ports.yaml @@ -84,11 +84,7 @@ properties: - port@0 - port@1 - backlight: true - enable-gpios: true - power-supply: true - -additionalProperties: false +unevaluatedProperties: false required: - compatible diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 8a87e0100dcb21..b89e3979057911 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -158,6 +158,8 @@ properties: - innolux,at070tn92 # Innolux G070ACE-L01 7" WVGA (800x480) TFT LCD panel - innolux,g070ace-l01 + # Innolux G070ACE-LH3 7" WVGA (800x480) TFT LCD panel with WLED backlight + - innolux,g070ace-lh3 # Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel - innolux,g070y2-l01 # Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel @@ -222,6 +224,8 @@ properties: - okaya,rs800480t-7x0gp # Olimex 4.3" TFT LCD panel - olimex,lcd-olinuxino-43-ts + # On Tat Industrial Company 5" DPI TFT panel. + - ontat,kd50g21-40nt-a1 # On Tat Industrial Company 7" DPI TFT panel. - ontat,yx700wv03 # OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml index b348f5bf0a9809..b07f3eca669bff 100644 --- a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml +++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml @@ -20,21 +20,19 @@ description: | Densitron DMT028VGHMCMI-1A is 480x640, 2-lane MIPI DSI LCD panel which has built-in ST7701 chip. -allOf: - - $ref: panel-common.yaml# - properties: compatible: items: - enum: - anbernic,rg-arc-panel + - anbernic,rg28xx-panel - densitron,dmt028vghmcmi-1a - elida,kd50t048a - techstar,ts8550b - const: sitronix,st7701 reg: - description: DSI virtual channel used by that screen + description: DSI / SPI channel used by that screen maxItems: 1 VCC-supply: @@ -43,6 +41,13 @@ properties: IOVCC-supply: description: I/O system regulator + dc-gpios: + maxItems: 1 + description: + Controller data/command selection (D/CX) in 4-line SPI mode. + If not set, the controller is in 3-line SPI mode. + Disallowed for DSI. + port: true reset-gpios: true rotation: true @@ -57,7 +62,38 @@ required: - port - reset-gpios -additionalProperties: false +allOf: + - $ref: panel-common.yaml# + - if: + properties: + compatible: + contains: + # SPI connected panels + enum: + - anbernic,rg28xx-panel + then: + $ref: /schemas/spi/spi-peripheral-props.yaml# + + - if: + properties: + compatible: + not: + contains: + # DSI or SPI without D/CX pin + enum: + - anbernic,rg-arc-panel + - anbernic,rg28xx-panel + - densitron,dmt028vghmcmi-1a + - elida,kd50t048a + - techstar,ts8550b + then: + required: + - dc-gpios + else: + properties: + dc-gpios: false + +unevaluatedProperties: false examples: - | @@ -82,3 +118,26 @@ examples: }; }; }; + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "anbernic,rg28xx-panel", "sitronix,st7701"; + reg = <0>; + spi-max-frequency = <3125000>; + VCC-supply = <®_lcd>; + IOVCC-supply = <®_lcd>; + reset-gpios = <&pio 8 14 GPIO_ACTIVE_HIGH>; /* LCD-RST: PI14 */ + backlight = <&backlight>; + + port { + panel_in_rgb: endpoint { + remote-endpoint = <&tcon_lcd0_out_lcd>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml index 08e5b9478051a3..95e3d5e74b8761 100644 --- a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml @@ -18,6 +18,7 @@ properties: compatible: oneOf: - enum: + - renesas,r9a07g043u-du # RZ/G2UL - renesas,r9a07g044-du # RZ/G2{L,LC} - items: - enum: @@ -60,9 +61,6 @@ properties: $ref: /schemas/graph.yaml#/properties/port unevaluatedProperties: false - required: - - port@0 - unevaluatedProperties: false renesas,vsps: @@ -88,6 +86,34 @@ required: additionalProperties: false +allOf: + - if: + properties: + compatible: + contains: + const: renesas,r9a07g043u-du + then: + properties: + ports: + properties: + port@0: + description: DPI + + required: + - port@0 + else: + properties: + ports: + properties: + port@0: + description: DSI + port@1: + description: DPI + + required: + - port@0 + - port@1 + examples: # RZ/G2L DU - | diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml new file mode 100644 index 00000000000000..871b76ddf90f02 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2m.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic ep93xx SoC DMA controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +allOf: + - $ref: dma-controller.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-dma-m2m + - items: + - enum: + - cirrus,ep9302-dma-m2m + - cirrus,ep9307-dma-m2m + - cirrus,ep9312-dma-m2m + - cirrus,ep9315-dma-m2m + - const: cirrus,ep9301-dma-m2m + + reg: + items: + - description: m2m0 channel registers + - description: m2m1 channel registers + + clocks: + items: + - description: m2m0 channel gate clock + - description: m2m1 channel gate clock + + clock-names: + items: + - const: m2m0 + - const: m2m1 + + interrupts: + items: + - description: m2m0 channel interrupt + - description: m2m1 channel interrupt + + '#dma-cells': + const: 2 + description: | + The first cell is the unique device channel number as indicated by this + table for ep93xx: + + 10: SPI controller + 11: IDE controller + + The second cell is the DMA direction line number: + + 1: Memory to device + 2: Device to memory + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: false + +examples: + - | + #include + dma-controller@80000100 { + compatible = "cirrus,ep9301-dma-m2m"; + reg = <0x80000100 0x0040>, + <0x80000140 0x0040>; + clocks = <&syscon EP93XX_CLK_M2M0>, + <&syscon EP93XX_CLK_M2M1>; + clock-names = "m2m0", "m2m1"; + interrupt-parent = <&vic0>; + interrupts = <17>, <18>; + #dma-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml new file mode 100644 index 00000000000000..d14c3155354365 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2p.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic ep93xx SoC M2P DMA controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +allOf: + - $ref: dma-controller.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-dma-m2p + - items: + - enum: + - cirrus,ep9302-dma-m2p + - cirrus,ep9307-dma-m2p + - cirrus,ep9312-dma-m2p + - cirrus,ep9315-dma-m2p + - const: cirrus,ep9301-dma-m2p + + reg: + items: + - description: m2p0 channel registers + - description: m2p1 channel registers + - description: m2p2 channel registers + - description: m2p3 channel registers + - description: m2p4 channel registers + - description: m2p5 channel registers + - description: m2p6 channel registers + - description: m2p7 channel registers + - description: m2p8 channel registers + - description: m2p9 channel registers + + clocks: + items: + - description: m2p0 channel gate clock + - description: m2p1 channel gate clock + - description: m2p2 channel gate clock + - description: m2p3 channel gate clock + - description: m2p4 channel gate clock + - description: m2p5 channel gate clock + - description: m2p6 channel gate clock + - description: m2p7 channel gate clock + - description: m2p8 channel gate clock + - description: m2p9 channel gate clock + + clock-names: + items: + - const: m2p0 + - const: m2p1 + - const: m2p2 + - const: m2p3 + - const: m2p4 + - const: m2p5 + - const: m2p6 + - const: m2p7 + - const: m2p8 + - const: m2p9 + + interrupts: + items: + - description: m2p0 channel interrupt + - description: m2p1 channel interrupt + - description: m2p2 channel interrupt + - description: m2p3 channel interrupt + - description: m2p4 channel interrupt + - description: m2p5 channel interrupt + - description: m2p6 channel interrupt + - description: m2p7 channel interrupt + - description: m2p8 channel interrupt + - description: m2p9 channel interrupt + + '#dma-cells': + const: 2 + description: | + The first cell is the unique device channel number as indicated by this + table for ep93xx: + + 0: I2S channel 1 + 1: I2S channel 2 (unused) + 2: AC97 channel 1 (unused) + 3: AC97 channel 2 (unused) + 4: AC97 channel 3 (unused) + 5: I2S channel 3 (unused) + 6: UART1 (unused) + 7: UART2 (unused) + 8: UART3 (unused) + 9: IRDA (unused) + + The second cell is the DMA direction line number: + + 1: Memory to device + 2: Device to memory + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: false + +examples: + - | + #include + dma-controller@80000000 { + compatible = "cirrus,ep9301-dma-m2p"; + reg = <0x80000000 0x0040>, + <0x80000040 0x0040>, + <0x80000080 0x0040>, + <0x800000c0 0x0040>, + <0x80000240 0x0040>, + <0x80000200 0x0040>, + <0x800002c0 0x0040>, + <0x80000280 0x0040>, + <0x80000340 0x0040>, + <0x80000300 0x0040>; + clocks = <&syscon EP93XX_CLK_M2P0>, + <&syscon EP93XX_CLK_M2P1>, + <&syscon EP93XX_CLK_M2P2>, + <&syscon EP93XX_CLK_M2P3>, + <&syscon EP93XX_CLK_M2P4>, + <&syscon EP93XX_CLK_M2P5>, + <&syscon EP93XX_CLK_M2P6>, + <&syscon EP93XX_CLK_M2P7>, + <&syscon EP93XX_CLK_M2P8>, + <&syscon EP93XX_CLK_M2P9>; + clock-names = "m2p0", "m2p1", + "m2p2", "m2p3", + "m2p4", "m2p5", + "m2p6", "m2p7", + "m2p8", "m2p9"; + interrupt-parent = <&vic0>; + interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>, <15>, <16>; + #dma-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml index 902a11f65be22c..75957f9fb58b57 100644 --- a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml +++ b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml @@ -28,6 +28,14 @@ properties: - description: DMA Error interrupt minItems: 1 + clocks: + maxItems: 2 + + clock-names: + items: + - const: ipg + - const: ahb + "#dma-cells": const: 1 @@ -42,15 +50,21 @@ required: - reg - interrupts - "#dma-cells" + - clocks + - clock-names additionalProperties: false examples: - | + #include + dma-controller@10001000 { compatible = "fsl,imx27-dma"; reg = <0x10001000 0x1000>; interrupts = <32 33>; #dma-cells = <1>; dma-channels = <16>; + clocks = <&clks IMX27_CLK_DMA_IPG_GATE>, <&clks IMX27_CLK_DMA_AHB_GATE>; + clock-names = "ipg", "ahb"; }; diff --git a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml index add9c77e8b52af..a17cf2360dd4ac 100644 --- a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml +++ b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml @@ -11,6 +11,17 @@ maintainers: allOf: - $ref: dma-controller.yaml# + - if: + properties: + compatible: + contains: + const: fsl,imx8qxp-dma-apbh + then: + required: + - power-domains + else: + properties: + power-domains: false properties: compatible: @@ -20,6 +31,7 @@ properties: - fsl,imx6q-dma-apbh - fsl,imx6sx-dma-apbh - fsl,imx7d-dma-apbh + - fsl,imx8qxp-dma-apbh - const: fsl,imx28-dma-apbh - enum: - fsl,imx23-dma-apbh @@ -42,6 +54,9 @@ properties: dma-channels: enum: [4, 8, 16] + power-domains: + maxItems: 1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml index 1b9ebdbe528a1b..9401b1f6300d44 100644 --- a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml @@ -11,11 +11,14 @@ maintainers: properties: compatible: - enum: - - fsl,ls1021a-qdma - - fsl,ls1028a-qdma - - fsl,ls1043a-qdma - - fsl,ls1046a-qdma + oneOf: + - const: fsl,ls1021a-qdma + - items: + - enum: + - fsl,ls1028a-qdma + - fsl,ls1043a-qdma + - fsl,ls1046a-qdma + - const: fsl,ls1021a-qdma reg: items: diff --git a/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml new file mode 100644 index 00000000000000..4c7d2fb7b2925c --- /dev/null +++ b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/loongson,ls1b-apbdma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Loongson-1 APB DMA Controller + +maintainers: + - Keguang Zhang + +description: + Loongson-1 APB DMA controller provides 3 independent channels for + peripherals such as NAND, audio playback and capture. + +properties: + compatible: + oneOf: + - const: loongson,ls1b-apbdma + - items: + - enum: + - loongson,ls1a-apbdma + - loongson,ls1c-apbdma + - const: loongson,ls1b-apbdma + + reg: + maxItems: 1 + + interrupts: + items: + - description: NAND interrupt + - description: Audio playback interrupt + - description: Audio capture interrupt + + interrupt-names: + items: + - const: ch0 + - const: ch1 + - const: ch2 + + '#dma-cells': + const: 1 + +required: + - compatible + - reg + - interrupts + - interrupt-names + - '#dma-cells' + +additionalProperties: false + +examples: + - | + #include + dma-controller@1fd01160 { + compatible = "loongson,ls1b-apbdma"; + reg = <0x1fd01160 0x4>; + interrupt-parent = <&intc0>; + interrupts = <13 IRQ_TYPE_EDGE_RISING>, + <14 IRQ_TYPE_EDGE_RISING>, + <15 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "ch0", "ch1", "ch2"; + #dma-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml new file mode 100644 index 00000000000000..646b4e779d8a70 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/marvell,xor-v2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell XOR v2 engines + +maintainers: + - Andrew Lunn + +properties: + compatible: + oneOf: + - const: marvell,xor-v2 + - items: + - enum: + - marvell,armada-7k-xor + - const: marvell,xor-v2 + + reg: + items: + - description: DMA registers + - description: global registers + + clocks: + minItems: 1 + maxItems: 2 + + clock-names: + minItems: 1 + items: + - const: core + - const: reg + + msi-parent: + description: + Phandle to the MSI-capable interrupt controller used for + interrupts. + maxItems: 1 + + dma-coherent: true + +required: + - compatible + - reg + - msi-parent + - dma-coherent + +additionalProperties: false + +examples: + - | + xor0@6a0000 { + compatible = "marvell,armada-7k-xor", "marvell,xor-v2"; + reg = <0x6a0000 0x1000>, <0x6b0000 0x1000>; + clocks = <&ap_clk 0>, <&ap_clk 1>; + clock-names = "core", "reg"; + msi-parent = <&gic_v2m0>; + dma-coherent; + }; diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt deleted file mode 100644 index 9c38bbe7e6d7d8..00000000000000 --- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +++ /dev/null @@ -1,28 +0,0 @@ -* Marvell XOR v2 engines - -Required properties: -- compatible: one of the following values: - "marvell,armada-7k-xor" - "marvell,xor-v2" -- reg: Should contain registers location and length (two sets) - the first set is the DMA registers - the second set is the global registers -- msi-parent: Phandle to the MSI-capable interrupt controller used for - interrupts. - -Optional properties: -- clocks: Optional reference to the clocks used by the XOR engine. -- clock-names: mandatory if there is a second clock, in this case the - name must be "core" for the first clock and "reg" for the second - one - - -Example: - - xor0@400000 { - compatible = "marvell,xor-v2"; - reg = <0x400000 0x1000>, - <0x410000 0x1000>; - msi-parent = <&gic_v2m0>; - dma-coherent; - }; diff --git a/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml b/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml new file mode 100644 index 00000000000000..32f20874415444 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/nxp,lpc3220-dmamux.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: DMA multiplexer for LPC32XX SoC (DMA request router) + +maintainers: + - J.M.B. Downing + - Piotr Wojtaszczyk + +allOf: + - $ref: dma-router.yaml# + +properties: + compatible: + const: nxp,lpc3220-dmamux + + reg: + maxItems: 1 + + dma-masters: + description: phandle to a dma node compatible with arm,pl080 + maxItems: 1 + + "#dma-cells": + const: 3 + description: | + First two cells same as for device pointed in dma-masters. + Third cell represents mux value for the request. + +required: + - compatible + - reg + - dma-masters + +additionalProperties: false + +examples: + - | + dma-router@7c { + compatible = "nxp,lpc3220-dmamux"; + reg = <0x7c 0x8>; + dma-masters = <&dma>; + #dma-cells = <3>; + }; + +... diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml index a42b6a26a6d3f2..ca24cf48769f6d 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml @@ -19,6 +19,7 @@ properties: - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five - renesas,r9a07g044-dmac # RZ/G2{L,LC} - renesas,r9a07g054-dmac # RZ/V2L + - renesas,r9a08g045-dmac # RZ/G3S - const: renesas,rz-dmac reg: diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt index 47e477cce6d24d..1f9831540c9791 100644 --- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt +++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt @@ -20,7 +20,7 @@ Optional properties: memcpy channels in eDMA. Notes: -When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request +When requesting channel via ti,dra7-dma-crossbar, the DMA client must request the DMA event number as crossbar ID (input to the DMA crossbar). For ti,am335x-edma-crossbar: the meaning of parameters of dmas for clients: diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml index 769ce23aaac275..ac3198953b8e59 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml +++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml @@ -24,7 +24,9 @@ properties: const: 1 compatible: - const: xlnx,zynqmp-dma-1.0 + enum: + - amd,versal2-dma-1.0 + - xlnx,zynqmp-dma-1.0 reg: description: memory map for gdma/adma module access diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml index e396e47b2f13cb..b6239ec3512b34 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.yaml +++ b/Documentation/devicetree/bindings/eeprom/at24.yaml @@ -116,6 +116,7 @@ properties: - const: atmel,24c02 - items: - enum: + - giantec,gt24c04a - onnn,cat24c04 - onnn,cat24c05 - rohm,br24g04 diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml index d5cfa32ea52dd5..072b3c0c5fd037 100644 --- a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml +++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml @@ -37,6 +37,11 @@ properties: GPIO pin (output) used to control VBUS. If skipped, no such control takes place. + port: + $ref: /schemas/graph.yaml#/properties/port + description: + A port node to link the usb controller for the dual role switch. + required: - compatible - interrupts @@ -58,5 +63,11 @@ examples: interrupt-parent = <&msmgpio>; interrupts = <78 IRQ_TYPE_LEVEL_HIGH>; vbus-gpios = <&msmgpio 148 GPIO_ACTIVE_HIGH>; + + port { + endpoint { + remote-endpoint = <&usb1_drd_sw>; + }; + }; }; }; diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt deleted file mode 100644 index dfc14f71e81fb3..00000000000000 --- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt +++ /dev/null @@ -1,21 +0,0 @@ -USB GPIO Extcon device - -This is a virtual device used to generate USB cable states from the USB ID pin -connected to a GPIO pin. - -Required properties: -- compatible: Should be "linux,extcon-usb-gpio" - -Either one of id-gpio or vbus-gpio must be present. Both can be present as well. -- id-gpio: gpio for USB ID pin. See gpio binding. -- vbus-gpio: gpio for USB VBUS pin. - -Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below: - extcon_usb1 { - compatible = "linux,extcon-usb-gpio"; - id-gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>; - } - - &omap_dwc3_1 { - extcon = <&extcon_usb1>; - }; diff --git a/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml new file mode 100644 index 00000000000000..8856107bdd33b8 --- /dev/null +++ b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/extcon/linux,extcon-usb-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: USB GPIO Extcon device + +maintainers: + - Frank Li + +description: + This is a virtual device used to generate USB cable states from the USB ID pin + connected to a GPIO pin. + +properties: + compatible: + const: linux,extcon-usb-gpio + + id-gpios: + description: gpio for USB ID pin. See gpio binding. + vbus-gpios: + description: gpio for USB VBUS pin. + +required: + - compatible + +additionalProperties: false + +examples: + - | + #include + + extcon_usb1 { + compatible = "linux,extcon-usb-gpio"; + id-gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml index 4d823f3b1f0e90..54d7d11bfed4d5 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml @@ -22,6 +22,9 @@ description: | [0] https://developer.arm.com/documentation/den0056/latest +anyOf: + - $ref: /schemas/firmware/nxp,imx95-scmi.yaml + properties: $nodename: const: scmi @@ -121,6 +124,13 @@ properties: atomic mode of operation, even if requested. default: 0 + max-rx-timeout-ms: + description: + An optional time value, expressed in milliseconds, representing the + transport maximum timeout value for the receive channel. The value should + be a non-zero value if set. + minimum: 1 + arm,smc-id: $ref: /schemas/types.yaml#/definitions/uint32 description: @@ -145,6 +155,14 @@ properties: required: - '#power-domain-cells' + protocol@12: + $ref: '#/$defs/protocol-node' + unevaluatedProperties: false + + properties: + reg: + const: 0x12 + protocol@13: $ref: '#/$defs/protocol-node' unevaluatedProperties: false @@ -284,7 +302,7 @@ properties: required: - reg -additionalProperties: false +unevaluatedProperties: false $defs: protocol-node: diff --git a/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml new file mode 100644 index 00000000000000..1a95010a546b14 --- /dev/null +++ b/Documentation/devicetree/bindings/firmware/nxp,imx95-scmi.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2024 NXP +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/firmware/nxp,imx95-scmi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: i.MX95 System Control and Management Interface(SCMI) Vendor Protocols Extension + +maintainers: + - Peng Fan + +properties: + protocol@81: + $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node' + unevaluatedProperties: false + + properties: + reg: + const: 0x81 + + protocol@84: + $ref: '/schemas/firmware/arm,scmi.yaml#/$defs/protocol-node' + unevaluatedProperties: false + + properties: + reg: + const: 0x84 + + nxp,ctrl-ids: + description: + Each entry consists of 2 integers, represents the ctrl id and the value + items: + items: + - description: the ctrl id index + enum: [0, 1, 2, 3, 4, 5, 6, 7, 0x8000, 0x8001, 0x8002, 0x8003, + 0x8004, 0x8005, 0x8006, 0x8007] + - description: the value assigned to the ctrl id + minItems: 1 + maxItems: 16 + $ref: /schemas/types.yaml#/definitions/uint32-matrix + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml b/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml index c21549e0fba642..089166089498d1 100644 --- a/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml +++ b/Documentation/devicetree/bindings/gnss/brcm,bcm4751.yaml @@ -18,6 +18,7 @@ description: allOf: - $ref: gnss-common.yaml# + - $ref: /schemas/serial/serial-peripheral-props.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/gnss/gnss-common.yaml b/Documentation/devicetree/bindings/gnss/gnss-common.yaml index 963b926e30a704..d4430d2d6855c9 100644 --- a/Documentation/devicetree/bindings/gnss/gnss-common.yaml +++ b/Documentation/devicetree/bindings/gnss/gnss-common.yaml @@ -35,11 +35,6 @@ properties: GPIO line, this is used. maxItems: 1 - current-speed: - description: The baudrate in bits per second of the device as it comes - online, current active speed. - $ref: /schemas/types.yaml#/definitions/uint32 - additionalProperties: true examples: diff --git a/Documentation/devicetree/bindings/gnss/mediatek.yaml b/Documentation/devicetree/bindings/gnss/mediatek.yaml index c0eb35beb2ef39..2b9e5be4ebf302 100644 --- a/Documentation/devicetree/bindings/gnss/mediatek.yaml +++ b/Documentation/devicetree/bindings/gnss/mediatek.yaml @@ -15,6 +15,7 @@ description: allOf: - $ref: gnss-common.yaml# + - $ref: /schemas/serial/serial-peripheral-props.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.yaml b/Documentation/devicetree/bindings/gnss/sirfstar.yaml index 0bbe684d82e105..7e5da89a5ad718 100644 --- a/Documentation/devicetree/bindings/gnss/sirfstar.yaml +++ b/Documentation/devicetree/bindings/gnss/sirfstar.yaml @@ -21,6 +21,7 @@ description: allOf: - $ref: gnss-common.yaml# + - $ref: /schemas/serial/serial-peripheral-props.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml index cd80668182b608..7d4b6d49e5eea2 100644 --- a/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml +++ b/Documentation/devicetree/bindings/gnss/u-blox,neo-6m.yaml @@ -8,6 +8,7 @@ title: U-blox GNSS Receiver allOf: - $ref: gnss-common.yaml# + - $ref: /schemas/serial/serial-peripheral-props.yaml# maintainers: - Johan Hovold diff --git a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml index c0ad70e66f760c..e8bc9f018edb12 100644 --- a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml +++ b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml @@ -36,19 +36,8 @@ properties: patternProperties: "^(hog-[0-9]+|.+-hog(-[0-9]+)?)$": type: object - - properties: - gpio-hog: true - gpios: true - output-high: true - output-low: true - line-name: true - required: - gpio-hog - - gpios - - additionalProperties: false required: - compatible diff --git a/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml b/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml index 65b6970e42fb15..b74fa81e7d05bb 100644 --- a/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml +++ b/Documentation/devicetree/bindings/gpio/fcs,fxl6408.yaml @@ -28,6 +28,7 @@ properties: patternProperties: "^(hog-[0-9]+|.+-hog(-[0-9]+)?)$": + type: object required: - gpio-hog diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml index e1fc8bb6d379ab..6b06609c649ed8 100644 --- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml +++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml @@ -85,19 +85,8 @@ properties: patternProperties: "^(hog-[0-9]+|.+-hog(-[0-9]+)?)$": type: object - properties: - gpio-hog: true - gpios: true - input: true - output-high: true - output-low: true - line-name: true - required: - gpio-hog - - gpios - - additionalProperties: false required: - compatible diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml index 10e56cf306dbab..1434d08f8b741f 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.yaml @@ -32,6 +32,8 @@ properties: gpio-ranges: true + gpio-reserved-ranges: true + gpio-line-names: description: strings describing the names of each gpio line. minItems: 1 diff --git a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml index daadfb4926c319..3a1079d6ee200c 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml @@ -73,9 +73,10 @@ examples: reg-names = "data", "dir", "intr"; gpio-controller; #gpio-cells = <2>; - interrupt-controller; - interrupt-parent = <&vic1>; - interrupts = <27>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&vic1>; + interrupts = <27>; }; gpio@80840004 { @@ -87,6 +88,7 @@ examples: gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupt-parent = <&vic1>; interrupts = <27>; }; @@ -127,6 +129,7 @@ examples: gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupts-extended = <&vic0 19>, <&vic0 20>, <&vic0 21>, <&vic0 22>, <&vic1 15>, <&vic1 16>, diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml index 51e8390d6b32b3..7b1eb08fa055cc 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml @@ -107,19 +107,8 @@ properties: patternProperties: "^(hog-[0-9]+|.+-hog(-[0-9]+)?)$": type: object - properties: - gpio-hog: true - gpios: true - input: true - output-high: true - output-low: true - line-name: true - required: - gpio-hog - - gpios - - additionalProperties: false required: - compatible diff --git a/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt b/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt deleted file mode 100644 index 49819367a011f4..00000000000000 --- a/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt +++ /dev/null @@ -1,43 +0,0 @@ -NXP LPC32xx SoC GPIO controller - -Required properties: -- compatible: must be "nxp,lpc3220-gpio" -- reg: Physical base address and length of the controller's registers. -- gpio-controller: Marks the device node as a GPIO controller. -- #gpio-cells: Should be 3: - 1) bank: - 0: GPIO P0 - 1: GPIO P1 - 2: GPIO P2 - 3: GPIO P3 - 4: GPI P3 - 5: GPO P3 - 2) pin number - 3) optional parameters: - - bit 0 specifies polarity (0 for normal, 1 for inverted) -- reg: Index of the GPIO group - -Example: - - gpio: gpio@40028000 { - compatible = "nxp,lpc3220-gpio"; - reg = <0x40028000 0x1000>; - gpio-controller; - #gpio-cells = <3>; /* bank, pin, flags */ - }; - - leds { - compatible = "gpio-leds"; - - led0 { - gpios = <&gpio 5 1 1>; /* GPO_P3 1, active low */ - linux,default-trigger = "heartbeat"; - default-state = "off"; - }; - - led1 { - gpios = <&gpio 5 14 1>; /* GPO_P3 14, active low */ - linux,default-trigger = "timer"; - default-state = "off"; - }; - }; diff --git a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml index d61569b3f15b2a..d78da7dd2a5668 100644 --- a/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml +++ b/Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml @@ -49,20 +49,8 @@ properties: patternProperties: "^.+-hog(-[0-9]+)?$": type: object - - additionalProperties: false - - properties: - gpio-hog: true - gpios: true - input: true - output-high: true - output-low: true - line-name: true - required: - gpio-hog - - gpios allOf: - if: diff --git a/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml b/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml new file mode 100644 index 00000000000000..25b5494393ccbb --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/nxp,lpc3220-gpio.yaml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/nxp,lpc3220-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP LPC3220 SoC GPIO controller + +maintainers: + - Animesh Agarwal + +properties: + compatible: + const: nxp,lpc3220-gpio + + reg: + maxItems: 1 + + gpio-controller: true + + '#gpio-cells': + const: 3 + description: | + 1) bank: + 0: GPIO P0 + 1: GPIO P1 + 2: GPIO P2 + 3: GPIO P3 + 4: GPI P3 + 5: GPO P3 + 2) pin number + 3) flags: + - bit 0 specifies polarity (0 for normal, 1 for inverted) + +required: + - compatible + - reg + - gpio-controller + - '#gpio-cells' + +additionalProperties: false + +examples: + - | + gpio@40028000 { + compatible = "nxp,lpc3220-gpio"; + reg = <0x40028000 0x1000>; + gpio-controller; + #gpio-cells = <3>; /* bank, pin, flags */ + }; diff --git a/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml index 228fa27ffdc33d..36f5a06104713d 100644 --- a/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml +++ b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml @@ -55,19 +55,8 @@ properties: patternProperties: "^.+-hog(-[0-9]+)?$": type: object - properties: - gpio-hog: true - gpios: true - input: true - output-high: true - output-low: true - line-name: true - required: - gpio-hog - - gpios - - additionalProperties: false required: - compatible diff --git a/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml new file mode 100644 index 00000000000000..abe11df25761aa --- /dev/null +++ b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwlock/sprd,hwspinlock-r3p0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Spreadtrum hardware spinlock + +maintainers: + - Orson Zhai + - Baolin Wang + - Chunyan Zhang + +properties: + compatible: + const: sprd,hwspinlock-r3p0 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: enable + + '#hwlock-cells': + const: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - '#hwlock-cells' + +additionalProperties: false + +examples: + - | + #include + + hwlock@40500000 { + compatible = "sprd,hwspinlock-r3p0"; + reg = <0x40500000 0x1000>; + clocks = <&aon_gate CLK_SPLK_EB>; + clock-names = "enable"; + #hwlock-cells = <1>; + }; +... diff --git a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt deleted file mode 100644 index 581db9d941bad5..00000000000000 --- a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt +++ /dev/null @@ -1,23 +0,0 @@ -SPRD Hardware Spinlock Device Binding -------------------------------------- - -Required properties : -- compatible : should be "sprd,hwspinlock-r3p0". -- reg : the register address of hwspinlock. -- #hwlock-cells : hwlock users only use the hwlock id to represent a specific - hwlock, so the number of cells should be <1> here. -- clock-names : Must contain "enable". -- clocks : Must contain a phandle entry for the clock in clock-names, see the - common clock bindings. - -Please look at the generic hwlock binding for usage information for consumers, -"Documentation/devicetree/bindings/hwlock/hwlock.txt" - -Example of hwlock provider: - hwspinlock@40500000 { - compatible = "sprd,hwspinlock-r3p0"; - reg = <0 0x40500000 0 0x1000>; - #hwlock-cells = <1>; - clock-names = "enable"; - clocks = <&clk_aon_apb_gates0 22>; - }; diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml index 051c976ab711f6..79e8d62fa3b3dd 100644 --- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml +++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml @@ -45,12 +45,31 @@ properties: the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm uses a logic high output for 100% duty cycle. $ref: /schemas/types.yaml#/definitions/uint32-array + deprecated: true minItems: 3 maxItems: 3 items: enum: [0, 1] default: 1 + "#pwm-cells": + const: 4 + description: | + Number of cells in a PWM specifier. + - 0: The PWM channel + - 1: The PWM period in nanoseconds + - 90909091 (11 Hz) + - 71428571 (14 Hz) + - 45454545 (22 Hz) + - 34482759 (29 Hz) + - 28571429 (35 Hz) + - 22727273 (44 Hz) + - 17241379 (58 Hz) + - 11363636 (88 Hz) + - 44444 (22 kHz) + - 2: PWM flags 0 or PWM_POLARITY_INVERTED + - 3: The default PWM duty cycle in nanoseconds + patternProperties: "^adi,bypass-attenuator-in[0-4]$": description: | @@ -81,6 +100,10 @@ patternProperties: - smbalert# - gpio + "^fan-[0-9]+$": + $ref: fan-common.yaml# + unevaluatedProperties: false + required: - compatible - reg @@ -89,17 +112,27 @@ additionalProperties: false examples: - | + #include i2c { #address-cells = <1>; #size-cells = <0>; - hwmon@2e { + pwm: hwmon@2e { compatible = "adi,adt7476"; reg = <0x2e>; adi,bypass-attenuator-in0 = <1>; adi,bypass-attenuator-in1 = <0>; - adi,pwm-active-state = <1 0 1>; adi,pin10-function = "smbalert#"; adi,pin14-function = "tach4"; + #pwm-cells = <4>; + + /* PWMs at 22.5 kHz frequency, 50% duty*/ + fan-0 { + pwms = <&pwm 0 44444 0 22222>; + }; + + fan-1 { + pwms = <&pwm 2 44444 0 22222>; + }; }; }; diff --git a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml new file mode 100644 index 00000000000000..1f98da32f3feb9 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/lltc,ltc2978.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Octal Digital Power-supply monitor/supervisor/sequencer/margin controller. + +maintainers: + - Frank Li + +properties: + compatible: + enum: + - lltc,ltc2972 + - lltc,ltc2974 + - lltc,ltc2975 + - lltc,ltc2977 + - lltc,ltc2978 + - lltc,ltc2979 + - lltc,ltc2980 + - lltc,ltc3880 + - lltc,ltc3882 + - lltc,ltc3883 + - lltc,ltc3884 + - lltc,ltc3886 + - lltc,ltc3887 + - lltc,ltc3889 + - lltc,ltc7880 + - lltc,ltm2987 + - lltc,ltm4664 + - lltc,ltm4675 + - lltc,ltm4676 + - lltc,ltm4677 + - lltc,ltm4678 + - lltc,ltm4680 + - lltc,ltm4686 + - lltc,ltm4700 + + reg: + maxItems: 1 + + regulators: + type: object + description: | + list of regulators provided by this controller. + Valid names of regulators depend on number of supplies supported per device: + * ltc2972 vout0 - vout1 + * ltc2974, ltc2975 : vout0 - vout3 + * ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7 + * ltc2978 : vout0 - vout7 + * ltc3880, ltc3882, ltc3884, ltc3886, ltc3887, ltc3889 : vout0 - vout1 + * ltc7880 : vout0 - vout1 + * ltc3883 : vout0 + * ltm4664 : vout0 - vout1 + * ltm4675, ltm4676, ltm4677, ltm4678 : vout0 - vout1 + * ltm4680, ltm4686 : vout0 - vout1 + * ltm4700 : vout0 - vout1 + + patternProperties: + "^vout[0-7]$": + $ref: /schemas/regulator/regulator.yaml# + type: object + unevaluatedProperties: false + + additionalProperties: false + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + regulator@5e { + compatible = "lltc,ltc2978"; + reg = <0x5e>; + + regulators { + vout0 { + regulator-name = "FPGA-2.5V"; + }; + vout2 { + regulator-name = "FPGA-1.5V"; + }; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/hwmon/ltc2978.txt b/Documentation/devicetree/bindings/hwmon/ltc2978.txt deleted file mode 100644 index 4e7f6215a4533f..00000000000000 --- a/Documentation/devicetree/bindings/hwmon/ltc2978.txt +++ /dev/null @@ -1,62 +0,0 @@ -ltc2978 - -Required properties: -- compatible: should contain one of: - * "lltc,ltc2972" - * "lltc,ltc2974" - * "lltc,ltc2975" - * "lltc,ltc2977" - * "lltc,ltc2978" - * "lltc,ltc2979" - * "lltc,ltc2980" - * "lltc,ltc3880" - * "lltc,ltc3882" - * "lltc,ltc3883" - * "lltc,ltc3884" - * "lltc,ltc3886" - * "lltc,ltc3887" - * "lltc,ltc3889" - * "lltc,ltc7880" - * "lltc,ltm2987" - * "lltc,ltm4664" - * "lltc,ltm4675" - * "lltc,ltm4676" - * "lltc,ltm4677" - * "lltc,ltm4678" - * "lltc,ltm4680" - * "lltc,ltm4686" - * "lltc,ltm4700" -- reg: I2C slave address - -Optional properties: -- regulators: A node that houses a sub-node for each regulator controlled by - the device. Each sub-node is identified using the node's name, with valid - values listed below. The content of each sub-node is defined by the - standard binding for regulators; see regulator.txt. - -Valid names of regulators depend on number of supplies supported per device: - * ltc2972 vout0 - vout1 - * ltc2974, ltc2975 : vout0 - vout3 - * ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7 - * ltc2978 : vout0 - vout7 - * ltc3880, ltc3882, ltc3884, ltc3886, ltc3887, ltc3889 : vout0 - vout1 - * ltc7880 : vout0 - vout1 - * ltc3883 : vout0 - * ltm4664 : vout0 - vout1 - * ltm4675, ltm4676, ltm4677, ltm4678 : vout0 - vout1 - * ltm4680, ltm4686 : vout0 - vout1 - * ltm4700 : vout0 - vout1 - -Example: -ltc2978@5e { - compatible = "lltc,ltc2978"; - reg = <0x5e>; - regulators { - vout0 { - regulator-name = "FPGA-2.5V"; - }; - vout2 { - regulator-name = "FPGA-1.5V"; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml new file mode 100644 index 00000000000000..b1ff496f87f958 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/maxim,max31790.yaml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/maxim,max31790.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: The Maxim MAX31790 Fan Controller + +maintainers: + - Guenter Roeck + - Chanh Nguyen + +description: > + The MAX31790 controls the speeds of up to six fans using six + independent PWM outputs. The desired fan speeds (or PWM duty cycles) + are written through the I2C interface. + + Datasheets: + https://datasheets.maximintegrated.com/en/ds/MAX31790.pdf + +properties: + compatible: + const: maxim,max31790 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + resets: + maxItems: 1 + + "#pwm-cells": + const: 1 + +patternProperties: + "^fan-[0-9]+$": + $ref: fan-common.yaml# + unevaluatedProperties: false + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + pwm_provider: fan-controller@20 { + compatible = "maxim,max31790"; + reg = <0x20>; + clocks = <&sys_clk>; + resets = <&reset 0>; + #pwm-cells = <1>; + + fan-0 { + pwms = <&pwm_provider 1>; + }; + + fan-1 { + pwms = <&pwm_provider 2>; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml new file mode 100644 index 00000000000000..f0667ac41d75c4 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/sophgo,sg2042-hwmon-mcu.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/sophgo,sg2042-hwmon-mcu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sophgo SG2042 onboard MCU support + +maintainers: + - Inochi Amaoto + +properties: + compatible: + const: sophgo,sg2042-hwmon-mcu + + reg: + maxItems: 1 + + "#thermal-sensor-cells": + const: 1 + +required: + - compatible + - reg + - "#thermal-sensor-cells" + +allOf: + - $ref: /schemas/thermal/thermal-sensor.yaml# + +unevaluatedProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + hwmon@17 { + compatible = "sophgo,sg2042-hwmon-mcu"; + reg = <0x17>; + #thermal-sensor-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml index 6df27b47b922c5..5b9bd2feda3b73 100644 --- a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml @@ -44,11 +44,6 @@ properties: description: frequency of the bus clock in Hz defaults to 100 kHz when not specified - multi-master: - type: boolean - description: - states that there is another master active on this bus - required: - reg - compatible diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml index 82b9d6682297b9..a9dae5b52f2865 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml @@ -38,6 +38,7 @@ properties: - rockchip,rk3308-i2c - rockchip,rk3328-i2c - rockchip,rk3568-i2c + - rockchip,rk3576-i2c - rockchip,rk3588-i2c - rockchip,rv1126-i2c - const: rockchip,rk3399-i2c diff --git a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt b/Documentation/devicetree/bindings/i2c/i2c-sprd.txt deleted file mode 100644 index 7b6b3b8d0d116c..00000000000000 --- a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt +++ /dev/null @@ -1,31 +0,0 @@ -I2C for Spreadtrum platforms - -Required properties: -- compatible: Should be "sprd,sc9860-i2c". -- reg: Specify the physical base address of the controller and length - of memory mapped region. -- interrupts: Should contain I2C interrupt. -- clock-names: Should contain following entries: - "i2c" for I2C clock, - "source" for I2C source (parent) clock, - "enable" for I2C module enable clock. -- clocks: Should contain a clock specifier for each entry in clock-names. -- clock-frequency: Contains desired I2C bus clock frequency in Hz. -- #address-cells: Should be 1 to describe address cells for I2C device address. -- #size-cells: Should be 0 means no size cell for I2C device address. - -Optional properties: -- Child nodes conforming to I2C bus binding - -Examples: -i2c0: i2c@70500000 { - compatible = "sprd,sc9860-i2c"; - reg = <0 0x70500000 0 0x1000>; - interrupts = ; - clock-names = "i2c", "source", "enable"; - clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>; - clock-frequency = <400000>; - #address-cells = <1>; - #size-cells = <0>; -}; - diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml index 92fbc1a2671ad7..b57ae6963e6298 100644 --- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml @@ -103,6 +103,9 @@ properties: items: - const: i2c + power-domains: + maxItems: 1 + dmas: items: - description: DMA channel for the reception FIFO @@ -124,6 +127,8 @@ allOf: - nvidia,tegra30-i2c then: properties: + clocks: + minItems: 2 clock-names: items: - const: div-clk @@ -133,20 +138,13 @@ allOf: properties: compatible: contains: - const: nvidia,tegra114-i2c - then: - properties: - clock-names: - items: - - const: div-clk - - - if: - properties: - compatible: - contains: - const: nvidia,tegra210-i2c + enum: + - nvidia,tegra114-i2c + - nvidia,tegra210-i2c then: properties: + clocks: + maxItems: 1 clock-names: items: - const: div-clk @@ -158,6 +156,8 @@ allOf: const: nvidia,tegra210-i2c-vi then: properties: + clocks: + minItems: 2 clock-names: items: - const: div-clk @@ -165,6 +165,9 @@ allOf: power-domains: items: - description: phandle to the VENC power domain + else: + properties: + power-domains: false unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml index c33ae7b63b84f2..7dab3852c7f87f 100644 --- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml +++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml @@ -130,6 +130,7 @@ allOf: then: properties: clocks: + minItems: 4 maxItems: 4 clock-names: items: diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml index 7993fe463c4c3f..505a8ec922668e 100644 --- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml +++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml @@ -25,6 +25,10 @@ properties: - renesas,riic-r9a07g054 # RZ/V2L - const: renesas,riic-rz # RZ/A or RZ/G2L + - items: + - const: renesas,riic-r9a08g045 # RZ/G3S + - const: renesas,riic-r9a09g057 # RZ/V2H(P) + - const: renesas,riic-r9a09g057 # RZ/V2H(P) reg: diff --git a/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml new file mode 100644 index 00000000000000..ec0d39e73d267d --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/sprd,sc9860-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Spreadtrum SC9860 I2C controller + +maintainers: + - Orson Zhai + - Baolin Wang + - Chunyan Zhang + +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + +properties: + compatible: + const: sprd,sc9860-i2c + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: I2C clock + - description: I2C source (parent) clock + - description: I2C module enable clock + + clock-names: + items: + - const: i2c + - const: source + - const: enable + + clock-frequency: true + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - clock-frequency + +unevaluatedProperties: false + +examples: + - | + #include + #include + + i2c@70500000 { + compatible = "sprd,sc9860-i2c"; + reg = <0x70500000 0x1000>; + interrupts = ; + clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>; + clock-names = "i2c", "source", "enable"; + clock-frequency = <400000>; + #address-cells = <1>; + #size-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml new file mode 100644 index 00000000000000..28139b67666105 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/tsd,mule-i2c-mux.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Theobroma Systems Mule I2C multiplexer + +maintainers: + - Farouk Bouabid + - Quentin Schulz + +description: | + Theobroma Systems Mule is an MCU that emulates a set of I2C devices, among + which devices that are reachable through an I2C-mux. The devices on the mux + can be selected by writing the appropriate device number to an I2C config + register. + + + +--------------------------------------------------+ + | Mule | + 0x18| +---------------+ | + -------->|Config register|----+ | + | +---------------+ | | + | V_ | + | | \ +--------+ | + | | \-------->| dev #0 | | + | | | +--------+ | + 0x6f| | M |-------->| dev #1 | | + ---------------------------->| U | +--------+ | + | | X |-------->| dev #2 | | + | | | +--------+ | + | | /-------->| dev #3 | | + | |__/ +--------+ | + +--------------------------------------------------+ + + +allOf: + - $ref: /schemas/i2c/i2c-mux.yaml# + +properties: + compatible: + const: tsd,mule-i2c-mux + +required: + - compatible + +unevaluatedProperties: false + +examples: + - | + i2c-mux { + compatible = "tsd,mule-i2c-mux"; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + rtc@6f { + compatible = "isil,isl1208"; + reg = <0x6f>; + }; + }; + }; +... + diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml new file mode 100644 index 00000000000000..f1ff5ff4f478dc --- /dev/null +++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/accel/adi,adxl380.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices ADXL380/382 3-Axis Digital Accelerometer + +maintainers: + - Ramona Gradinariu + - Antoniu Miclaus + +description: | + The ADXL380/ADXL382 is a low noise density, low power, 3-axis + accelerometer with selectable measurement ranges. The ADXL380 + supports the ±4 g, ±8 g, and ±16 g ranges, and the ADXL382 supports + ±15 g, ±30 g, and ±60 g ranges. + + https://www.analog.com/en/products/adxl380.html + +properties: + compatible: + enum: + - adi,adxl380 + - adi,adxl382 + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + items: + - enum: [INT0, INT1] + - const: INT1 + + vddio-supply: true + + vsupply-supply: true + +required: + - compatible + - reg + - interrupts + - interrupt-names + - vddio-supply + - vsupply-supply + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + accelerometer@54 { + compatible = "adi,adxl380"; + reg = <0x54>; + vddio-supply = <&vddio>; + vsupply-supply = <&vsupply>; + interrupt-parent = <&gpio>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "INT0"; + }; + }; + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + accelerometer@0 { + compatible = "adi,adxl380"; + reg = <0>; + spi-max-frequency = <8000000>; + vddio-supply = <&vddio>; + vsupply-supply = <&vsupply>; + interrupt-parent = <&gpio>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "INT0"; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml index 6ddb03f61bd9c1..951a3a2ba8fc6e 100644 --- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml +++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml @@ -16,6 +16,7 @@ properties: - kionix,kxcj91008 - kionix,kxtj21009 - kionix,kxtf9 + - kionix,kx022-1020 - kionix,kx023-1025 reg: diff --git a/Documentation/devicetree/bindings/iio/accel/lis302.txt b/Documentation/devicetree/bindings/iio/accel/lis302.txt index 764e28ec1a0a8f..457539647f36ff 100644 --- a/Documentation/devicetree/bindings/iio/accel/lis302.txt +++ b/Documentation/devicetree/bindings/iio/accel/lis302.txt @@ -36,7 +36,7 @@ Optional properties for all bus drivers: - st,irq{1,2}-disable: disable IRQ 1/2 - st,irq{1,2}-ff-wu-1: raise IRQ 1/2 on FF_WU_1 condition - st,irq{1,2}-ff-wu-2: raise IRQ 1/2 on FF_WU_2 condition - - st,irq{1,2}-data-ready: raise IRQ 1/2 on data ready contition + - st,irq{1,2}-data-ready: raise IRQ 1/2 on data ready condition - st,irq{1,2}-click: raise IRQ 1/2 on click condition - st,irq-open-drain: consider IRQ lines open-drain - st,irq-active-low: make IRQ lines active low diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml new file mode 100644 index 00000000000000..e413a9d8d2a222 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/adi,ad4000.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD4000 and similar Analog to Digital Converters + +maintainers: + - Marcelo Schmitt + +description: | + Analog Devices AD4000 family of Analog to Digital Converters with SPI support. + Specifications can be found at: + https://www.analog.com/media/en/technical-documentation/data-sheets/ad4000-4004-4008.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/ad4001-4005.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/ad4002-4006-4010.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/ad4003-4007-4011.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/ad4020-4021-4022.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4001.pdf + https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4003.pdf + +$ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + oneOf: + - const: adi,ad4000 + - items: + - enum: + - adi,ad4004 + - adi,ad4008 + - const: adi,ad4000 + + - const: adi,ad4001 + - items: + - enum: + - adi,ad4005 + - const: adi,ad4001 + + - const: adi,ad4002 + - items: + - enum: + - adi,ad4006 + - adi,ad4010 + - const: adi,ad4002 + + - const: adi,ad4003 + - items: + - enum: + - adi,ad4007 + - adi,ad4011 + - const: adi,ad4003 + + - const: adi,ad4020 + - items: + - enum: + - adi,ad4021 + - adi,ad4022 + - const: adi,ad4020 + + - const: adi,adaq4001 + + - const: adi,adaq4003 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 102040816 # for VIO > 2.7 V, 81300813 for VIO > 1.7 V + + adi,sdi-pin: + $ref: /schemas/types.yaml#/definitions/string + enum: [ high, low, cs, sdi ] + default: sdi + description: + Describes how the ADC SDI pin is wired. A value of "sdi" indicates that + the ADC SDI is connected to host SDO. "high" indicates that the ADC SDI + pin is hard-wired to logic high (VIO). "low" indicates that it is + hard-wired low (GND). "cs" indicates that the ADC SDI pin is connected to + the host CS line. + + '#daisy-chained-devices': true + + vdd-supply: + description: A 1.8V supply that powers the chip (VDD). + + vio-supply: + description: + A 1.8V to 5.5V supply for the digital inputs and outputs (VIO). + + ref-supply: + description: + A 2.5 to 5V supply for the external reference voltage (REF). + + cnv-gpios: + description: + When provided, this property indicates the GPIO that is connected to the + CNV pin. + maxItems: 1 + + adi,high-z-input: + type: boolean + description: + High-Z mode allows the amplifier and RC filter in front of the ADC to be + chosen based on the signal bandwidth of interest, rather than the settling + requirements of the switched capacitor SAR ADC inputs. + + adi,gain-milli: + description: | + The hardware gain applied to the ADC input (in milli units). + The gain provided by the ADC input scaler is defined by the hardware + connections between chip pins OUT+, R1K-, R1K1-, R1K+, R1K1+, and OUT-. + If not present, default to 1000 (no actual gain applied). + $ref: /schemas/types.yaml#/definitions/uint16 + enum: [454, 909, 1000, 1900] + default: 1000 + + interrupts: + description: + The SDO pin can also function as a busy indicator. This node should be + connected to an interrupt that is triggered when the SDO line goes low + while the SDI line is high and the CNV line is low ("3-wire" mode) or the + SDI line is low and the CNV line is high ("4-wire" mode); or when the SDO + line goes high while the SDI and CNV lines are high (chain mode), + maxItems: 1 + +required: + - compatible + - reg + - vdd-supply + - vio-supply + - ref-supply + +allOf: + # The configuration register can only be accessed if SDI is connected to MOSI + - if: + required: + - adi,sdi-pin + then: + properties: + adi,high-z-input: false + # chain mode has lower SCLK max rate + - if: + required: + - '#daisy-chained-devices' + then: + properties: + spi-max-frequency: + maximum: 50000000 # for VIO > 2.7 V, 40000000 for VIO > 1.7 V + # Gain property only applies to ADAQ devices + - if: + properties: + compatible: + not: + contains: + enum: + - adi,adaq4001 + - adi,adaq4003 + then: + properties: + adi,gain-milli: false + +unevaluatedProperties: false + +examples: + - | + #include + spi { + #address-cells = <1>; + #size-cells = <0>; + adc@0 { + compatible = "adi,ad4020"; + reg = <0>; + spi-max-frequency = <71000000>; + vdd-supply = <&supply_1_8V>; + vio-supply = <&supply_1_8V>; + ref-supply = <&supply_5V>; + adi,sdi-pin = "cs"; + cnv-gpios = <&gpio0 88 GPIO_ACTIVE_HIGH>; + }; + }; + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + adc@0 { + compatible = "adi,adaq4003"; + reg = <0>; + spi-max-frequency = <80000000>; + vdd-supply = <&supply_1_8V>; + vio-supply = <&supply_1_8V>; + ref-supply = <&supply_5V>; + adi,high-z-input; + adi,gain-milli = /bits/ 16 <454>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml new file mode 100644 index 00000000000000..310f046e139f99 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/adi,ad4695.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices Easy Drive Multiplexed SAR Analog to Digital Converters + +maintainers: + - Michael Hennerich + - Nuno Sá + +description: | + A family of similar multi-channel analog to digital converters with SPI bus. + + * https://www.analog.com/en/products/ad4695.html + * https://www.analog.com/en/products/ad4696.html + * https://www.analog.com/en/products/ad4697.html + * https://www.analog.com/en/products/ad4698.html + +$ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + enum: + - adi,ad4695 + - adi,ad4696 + - adi,ad4697 + - adi,ad4698 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 80000000 + + spi-cpol: true + spi-cpha: true + + spi-rx-bus-width: + minimum: 1 + maximum: 4 + + avdd-supply: + description: Analog power supply. + + vio-supply: + description: I/O pin power supply. + + ldo-in-supply: + description: Internal LDO Input. Mutually exclusive with vdd-supply. + + vdd-supply: + description: Core power supply. Mutually exclusive with ldo-in-supply. + + ref-supply: + description: + External reference voltage. Mutually exclusive with refin-supply. + + refin-supply: + description: + Internal reference buffer input. Mutually exclusive with ref-supply. + + com-supply: + description: Common voltage supply for pseudo-differential analog inputs. + + adi,no-ref-current-limit: + $ref: /schemas/types.yaml#/definitions/flag + description: + When this flag is present, the REF Overvoltage Reduced Current protection + is disabled. + + adi,no-ref-high-z: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable this flag if the ref-supply requires Reference Input High-Z Mode + to be disabled for proper operation. + + cnv-gpios: + description: The Convert Input (CNV). If omitted, CNV is tied to SPI CS. + maxItems: 1 + + reset-gpios: + description: The Reset Input (RESET). Should be configured GPIO_ACTIVE_LOW. + maxItems: 1 + + interrupts: + minItems: 1 + items: + - description: Signal coming from the BSY_ALT_GP0 pin (ALERT or BUSY). + - description: Signal coming from the GP2 pin (ALERT). + - description: Signal coming from the GP3 pin (BUSY). + + interrupt-names: + minItems: 1 + items: + - const: gp0 + - const: gp2 + - const: gp3 + + gpio-controller: true + + "#gpio-cells": + const: 2 + description: | + The first cell is the GPn number: 0 to 3. + The second cell takes standard GPIO flags. + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +patternProperties: + "^in(?:[13579]|1[135])-supply$": + description: + Optional voltage supply for odd numbered channels when they are used as + the negative input for a pseudo-differential channel. + + "^channel@[0-9a-f]$": + type: object + $ref: adc.yaml + unevaluatedProperties: false + description: + Describes each individual channel. In addition the properties defined + below, bipolar from adc.yaml is also supported. + + properties: + reg: + maximum: 15 + + common-mode-channel: + description: + Describes the common mode channel for single channels. 0xFF is REFGND + and OxFE is COM. Macros are available for these values in + dt-bindings/iio/adi,ad4695.h. Values 1 to 15 correspond to INx inputs. + Only odd numbered INx inputs can be used as common mode channels. + enum: [1, 3, 5, 7, 9, 11, 13, 15, 0xFE, 0xFF] + default: 0xFF + + adi,no-high-z: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable this flag if the input pin requires the Analog Input High-Z + Mode to be disabled for proper operation. + + required: + - reg + + allOf: + # bipolar mode can't be used with REFGND + - if: + properties: + common-mode-channel: + const: 0xFF + then: + properties: + bipolar: false + +required: + - compatible + - reg + - avdd-supply + - vio-supply + +allOf: + - oneOf: + - required: + - ldo-in-supply + - required: + - vdd-supply + + - oneOf: + - required: + - ref-supply + - required: + - refin-supply + + # the internal reference buffer always requires high-z mode + - if: + required: + - refin-supply + then: + properties: + adi,no-ref-high-z: false + + # limit channels for 8-channel chips + - if: + properties: + compatible: + contains: + enum: + - adi,ad4697 + - adi,ad4698 + then: + patternProperties: + "^in(?:9|1[135])-supply$": false + "^channel@[0-7]$": + properties: + reg: + maximum: 7 + common-mode-channel: + enum: [1, 3, 5, 7, 0xFE, 0xFF] + "^channel@[8-9a-f]$": false + +unevaluatedProperties: false + +examples: + - | + #include + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + compatible = "adi,ad4695"; + reg = <0>; + spi-cpol; + spi-cpha; + spi-max-frequency = <80000000>; + avdd-supply = <&power_supply>; + ldo-in-supply = <&power_supply>; + vio-supply = <&io_supply>; + refin-supply = <&supply_5V>; + com-supply = <&supply_2V5>; + in3-supply = <&supply_2V5>; + reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>; + + #address-cells = <1>; + #size-cells = <0>; + + /* Pseudo-differential channel between IN0 and REFGND. */ + channel@0 { + reg = <0>; + }; + + /* Pseudo-differential channel between IN1 and COM. */ + channel@1 { + reg = <1>; + common-mode-channel = ; + bipolar; + }; + + /* Pseudo-differential channel between IN2 and IN3. */ + channel@2 { + reg = <2>; + common-mode-channel = <3>; + bipolar; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml index 190889c7b62ae2..66dd1c549bd3c0 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml @@ -39,11 +39,21 @@ properties: clocks: maxItems: 1 - description: phandle to the master clock (mclk) + description: + Optionally, either a crystal can be attached externally between MCLK1 and + MCLK2 pins, or an external CMOS-compatible clock can drive the MCLK2 + pin. If absent, internal 4.92MHz clock is used, which can be made + available on MCLK2 pin. clock-names: - items: - - const: mclk + enum: + - xtal + - mclk + + "#clock-cells": + const: 0 + description: + If present when internal clock is used, configured as clock provider. interrupts: maxItems: 1 @@ -134,8 +144,6 @@ patternProperties: required: - compatible - reg - - clocks - - clock-names - interrupts - dvdd-supply - avdd-supply @@ -156,6 +164,18 @@ allOf: then: patternProperties: "^channel@[0-9a-f]+$": false + - if: + anyOf: + - required: + - clocks + - required: + - clock-names + then: + properties: + "#clock-cells": false + required: + - clocks + - clock-names unevaluatedProperties: false @@ -201,8 +221,7 @@ examples: spi-max-frequency = <1000000>; spi-cpol; spi-cpha; - clocks = <&ad7192_mclk>; - clock-names = "mclk"; + #clock-cells = <0>; interrupts = <25 0x2>; interrupt-parent = <&gpio>; aincom-supply = <&aincom>; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml index 899b777017ce3b..bd19abb867d98d 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml @@ -15,10 +15,17 @@ description: | * https://www.analog.com/en/products/ad7381.html * https://www.analog.com/en/products/ad7383.html * https://www.analog.com/en/products/ad7384.html + * https://www.analog.com/en/products/ad7386.html + * https://www.analog.com/en/products/ad7387.html + * https://www.analog.com/en/products/ad7388.html * https://www.analog.com/en/products/ad7380-4.html * https://www.analog.com/en/products/ad7381-4.html * https://www.analog.com/en/products/ad7383-4.html * https://www.analog.com/en/products/ad7384-4.html + * https://www.analog.com/en/products/ad7386-4.html + * https://www.analog.com/en/products/ad7387-4.html + * https://www.analog.com/en/products/ad7388-4.html + $ref: /schemas/spi/spi-peripheral-props.yaml# @@ -29,10 +36,16 @@ properties: - adi,ad7381 - adi,ad7383 - adi,ad7384 + - adi,ad7386 + - adi,ad7387 + - adi,ad7388 - adi,ad7380-4 - adi,ad7381-4 - adi,ad7383-4 - adi,ad7384-4 + - adi,ad7386-4 + - adi,ad7387-4 + - adi,ad7388-4 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml index 00fdaed11cbd18..69408cae3db960 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml @@ -35,65 +35,83 @@ properties: avcc-supply: true + vdrive-supply: + description: + Determines the voltage level at which the interface logic pins will + operate. + + refin-supply: + description: + The voltage supply for optional external reference voltage. + interrupts: + description: + The BUSY pin falling edge indicates that the conversion is over, and thus + new data is available. maxItems: 1 adi,conversion-start-gpios: description: - Must be the device tree identifier of the CONVST pin. - This logic input is used to initiate conversions on the analog - input channels. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. - maxItems: 1 + Must be the device tree identifier of the CONVST pin(s). This logic input + is used to initiate conversions on the analog input channels. As the line + is active high, it should be marked GPIO_ACTIVE_HIGH. + minItems: 1 + maxItems: 2 reset-gpios: description: - Must be the device tree identifier of the RESET pin. If specified, - it will be asserted during driver probe. As the line is active high, - it should be marked GPIO_ACTIVE_HIGH. + Must be the device tree identifier of the RESET pin. If specified, it will + be asserted during driver probe. On the AD7606x, as the line is active + high, it should be marked GPIO_ACTIVE_HIGH. On the AD7616, as the line is + active low, it should be marked GPIO_ACTIVE_LOW. maxItems: 1 standby-gpios: description: - Must be the device tree identifier of the STBY pin. This pin is used - to place the AD7606 into one of two power-down modes, Standby mode or + Must be the device tree identifier of the STBY pin. This pin is used to + place the AD7606 into one of two power-down modes, Standby mode or Shutdown mode. As the line is active low, it should be marked GPIO_ACTIVE_LOW. maxItems: 1 adi,first-data-gpios: description: - Must be the device tree identifier of the FRSTDATA pin. - The FRSTDATA output indicates when the first channel, V1, is - being read back on either the parallel, byte or serial interface. - As the line is active high, it should be marked GPIO_ACTIVE_HIGH. + Must be the device tree identifier of the FRSTDATA pin. The FRSTDATA + output indicates when the first channel, V1, is being read back on either + the parallel, byte or serial interface. As the line is active high, it + should be marked GPIO_ACTIVE_HIGH. maxItems: 1 adi,range-gpios: description: - Must be the device tree identifier of the RANGE pin. The polarity on - this pin determines the input range of the analog input channels. If - this pin is tied to a logic high, the analog input range is ±10V for - all channels. If this pin is tied to a logic low, the analog input range + Must be the device tree identifier of the RANGE pin. The state on this + pin determines the input range of the analog input channels. If this pin + is tied to a logic high, the analog input range is ±10V for all channels. + On the AD760X, if this pin is tied to a logic low, the analog input range is ±5V for all channels. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. - maxItems: 1 + GPIO_ACTIVE_HIGH. On the AD7616, there are 2 pins, and if the 2 pins are + tied to a logic high, software mode is enabled, otherwise one of the 3 + possible range values is selected. + minItems: 1 + maxItems: 2 adi,oversampling-ratio-gpios: description: - Must be the device tree identifier of the over-sampling - mode pins. As the line is active high, it should be marked - GPIO_ACTIVE_HIGH. + Must be the device tree identifier of the over-sampling mode pins. As the + line is active high, it should be marked GPIO_ACTIVE_HIGH. On the AD7606X + parts that support it, if all 3 pins are tied to a logic high, software + mode is enabled. maxItems: 3 adi,sw-mode: description: - Software mode of operation, so far available only for ad7616 and ad7606b. - It is enabled when all three oversampling mode pins are connected to - high level. The device is configured by the corresponding registers. If the - adi,oversampling-ratio-gpios property is defined, then the driver will set the - oversampling gpios to high. Otherwise, it is assumed that the pins are hardwired - to VDD. + Software mode of operation, so far available only for AD7616 and AD7606B. + It is enabled when all three oversampling mode pins are connected to high + level for the AD7606B, or both the range selection are connected to high + level for the AD7616. The device is configured by the corresponding + registers. If the adi,oversampling-ratio-gpios property is defined, then + the driver will set the oversampling gpios to high. Otherwise, it is + assumed that the pins are hardwired to VDD. type: boolean required: @@ -101,12 +119,57 @@ required: - reg - spi-cpha - avcc-supply + - vdrive-supply - interrupts - adi,conversion-start-gpios allOf: - $ref: /schemas/spi/spi-peripheral-props.yaml# + - if: + properties: + compatible: + contains: + const: adi,ad7616 + then: + properties: + adi,first-data-gpios: false + standby-gpios: false + adi,range-gpios: + maxItems: 2 + else: + properties: + adi,range-gpios: + maxItems: 1 + + - if: + properties: + compatible: + contains: + enum: + - adi,ad7605-4 + - adi,ad7616 + then: + properties: + adi,oversampling-ratio-gpios: false + + - if: + properties: + compatible: + contains: + enum: + - adi,ad7605-4 + - adi,ad7606-4 + - adi,ad7606-6 + - adi,ad7606-8 + then: + properties: + adi,sw-mode: false + else: + properties: + adi,conversion-start-gpios: + maxItems: 1 + unevaluatedProperties: false examples: @@ -125,6 +188,7 @@ examples: spi-cpha; avcc-supply = <&adc_vref>; + vdrive-supply = <&vdd_supply>; interrupts = <25 IRQ_TYPE_EDGE_FALLING>; interrupt-parent = <&gpio>; @@ -136,7 +200,6 @@ examples: <&gpio 23 GPIO_ACTIVE_HIGH>, <&gpio 26 GPIO_ACTIVE_HIGH>; standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>; - adi,sw-mode; }; }; ... diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml index eecd5fbab69581..2606c0c5dfc61f 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml @@ -28,6 +28,9 @@ properties: - adi,ad9265 - adi,ad9434 - adi,ad9467 + - adi,ad9643 + - adi,ad9649 + - adi,ad9652 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml new file mode 100644 index 00000000000000..12e56b1b3d3fd8 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/microchip,pac1921.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip PAC1921 High-Side Power/Current Monitor with Anaog Output + +maintainers: + - Matteo Martelli + +description: | + The PAC1921 is a power/current monitoring device with an analog output + and I2C/SMBus interface. + + Datasheet can be found here: + https://ww1.microchip.com/downloads/en/DeviceDoc/PAC1921-Data-Sheet-DS20005293E.pdf + +properties: + compatible: + const: microchip,pac1921 + + reg: + maxItems: 1 + + vdd-supply: true + + "#io-channel-cells": + const: 1 + + shunt-resistor-micro-ohms: + description: + Value in micro Ohms of the shunt resistor connected between + the SENSE+ and SENSE- inputs, across which the current is measured. + Value is needed to compute the scaling of the measured current. + + label: + description: Unique name to identify which device this is. + + read-integrate-gpios: + description: + READ/INT input pin to control the current state of the device, either in + the INTEGRATE state when driven high, or in the READ state when driven low. + When not connected the pin is floating and it can be overridden by the + INT_EN register bit after asserting the READ/INT_OVR register bit. + maxItems: 1 + +required: + - compatible + - reg + - vdd-supply + - shunt-resistor-micro-ohms + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + adc@4c { + compatible = "microchip,pac1921"; + reg = <0x4c>; + vdd-supply = <&vdd>; + #io-channel-cells = <1>; + label = "vbat"; + shunt-resistor-micro-ohms = <10000>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml index aa24b841393c02..fd93ed3991e059 100644 --- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml @@ -16,6 +16,9 @@ properties: - const: rockchip,rk3066-tsadc - const: rockchip,rk3399-saradc - const: rockchip,rk3588-saradc + - items: + - const: rockchip,rk3576-saradc + - const: rockchip,rk3588-saradc - items: - enum: - rockchip,px30-saradc diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml index cab0d425eaa426..c3a116427dc3be 100644 --- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml +++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml @@ -18,18 +18,39 @@ properties: - sd-modulator - ads1201 + '#io-backend-cells': + const: 0 + '#io-channel-cells': const: 0 + vref-supply: + description: Phandle to the vref input analog reference voltage. + +dependencies: + vref-supply: [ '#io-backend-cells' ] + required: - compatible - - '#io-channel-cells' + +anyOf: + - required: ['#io-backend-cells'] + - required: ['#io-channel-cells'] additionalProperties: false examples: - | - ads1202: adc { + // Backend binding example. SD modulator configured as an IIO backend device + ads1201_0: adc { + compatible = "sd-modulator"; + vref-supply = <&vdd_adc>; + #io-backend-cells = <0>; + }; + + - | + // Legacy binding example. SD modulator configured as an IIO channel provider + ads1201_1: adc { compatible = "sd-modulator"; #io-channel-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml new file mode 100644 index 00000000000000..f652b98615f738 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/sophgo,cv1800b-saradc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: + Sophgo CV1800B SoC 3 channels Successive Approximation Analog to + Digital Converters + +maintainers: + - Thomas Bonnefille + +description: + Datasheet at https://github.com/sophgo/sophgo-doc/releases + +properties: + compatible: + const: sophgo,cv1800b-saradc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + "^channel@[0-2]$": + $ref: adc.yaml + + properties: + reg: + items: + - minimum: 0 + maximum: 2 + + required: + - reg + + additionalProperties: false + +required: + - compatible + - reg + - clocks + - '#address-cells' + - '#size-cells' + +additionalProperties: false + +examples: + - | + #include + #include + adc@30f0000 { + compatible = "sophgo,cv1800b-saradc"; + reg = <0x030f0000 0x1000>; + clocks = <&clk CLK_SARADC>; + interrupts = <100 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + + channel@1 { + reg = <1>; + }; + + channel@2 { + reg = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml index ec34c48d48782b..ef9dcc365eab56 100644 --- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml @@ -54,7 +54,9 @@ properties: It's not present on stm32f4. It's required on stm32h7 and stm32mp1. - clock-names: true + clock-names: + minItems: 1 + maxItems: 2 st,max-clk-rate-hz: description: diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml index 2722edab1d9a19..c24ac98bbb3d49 100644 --- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml @@ -102,9 +102,11 @@ patternProperties: items: minimum: 0 maximum: 7 + deprecated: true st,adc-channel-names: description: List of single-ended channel names. + deprecated: true st,filter-order: description: | @@ -118,6 +120,12 @@ patternProperties: "#io-channel-cells": const: 1 + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + st,adc-channel-types: description: | Single-ended channel input type. @@ -128,6 +136,7 @@ patternProperties: items: enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ] $ref: /schemas/types.yaml#/definitions/non-unique-string-array + deprecated: true st,adc-channel-clk-src: description: | @@ -139,6 +148,7 @@ patternProperties: items: enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ] $ref: /schemas/types.yaml#/definitions/non-unique-string-array + deprecated: true st,adc-alt-channel: description: @@ -147,6 +157,7 @@ patternProperties: If not set, channel n is connected to SPI input n. If set, channel n is connected to SPI input n + 1. type: boolean + deprecated: true st,filter0-sync: description: @@ -165,11 +176,60 @@ patternProperties: - compatible - reg - interrupts - - st,adc-channels - - st,adc-channel-names - st,filter-order - "#io-channel-cells" + patternProperties: + "^channel@[0-7]$": + type: object + $ref: adc.yaml + unevaluatedProperties: false + description: Represents the external channels which are connected to the DFSDM. + + properties: + reg: + maximum: 7 + + label: + description: + Unique name to identify which channel this is. + + st,adc-channel-type: + description: | + Single-ended channel input type. + - "SPI_R": SPI with data on rising edge (default) + - "SPI_F": SPI with data on falling edge + - "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1 + - "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0 + $ref: /schemas/types.yaml#/definitions/string + enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ] + + st,adc-channel-clk-src: + description: | + Conversion clock source. + - "CLKIN": external SPI clock (CLKIN x) + - "CLKOUT": internal SPI clock (CLKOUT) (default) + - "CLKOUT_F": internal SPI clock divided by 2 (falling edge). + - "CLKOUT_R": internal SPI clock divided by 2 (rising edge). + $ref: /schemas/types.yaml#/definitions/string + enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ] + + st,adc-alt-channel: + description: + Must be defined if two sigma delta modulators are + connected on same SPI input. + If not set, channel n is connected to SPI input n. + If set, channel n is connected to SPI input n + 1. + type: boolean + + io-backends: + description: + Used to pipe external sigma delta modulator or internal ADC backend to DFSDM channel. + maxItems: 1 + + required: + - reg + allOf: - if: properties: @@ -199,9 +259,19 @@ patternProperties: description: From common IIO binding. Used to pipe external sigma delta modulator or internal ADC output to DFSDM channel. + deprecated: true - required: - - io-channels + if: + required: + - st,adc-channels + then: + required: + - io-channels + + patternProperties: + "^channel@[0-7]$": + required: + - io-backends - if: properties: @@ -298,6 +368,7 @@ examples: #address-cells = <1>; #size-cells = <0>; + // Example 1: Audio use case with generic binding dfsdm0: filter@0 { compatible = "st,stm32-dfsdm-dmic"; reg = <0>; @@ -305,12 +376,18 @@ examples: dmas = <&dmamux1 101 0x400 0x01>; dma-names = "rx"; #io-channel-cells = <1>; - st,adc-channels = <1>; - st,adc-channel-names = "dmic0"; - st,adc-channel-types = "SPI_R"; - st,adc-channel-clk-src = "CLKOUT"; + #address-cells = <1>; + #size-cells = <0>; st,filter-order = <5>; + channel@1 { + reg = <1>; + label = "dmic0"; + st,adc-channel-type = "SPI_R"; + st,adc-channel-clk-src = "CLKOUT"; + st,adc-alt-channel; + }; + asoc_pdm0: dfsdm-dai { compatible = "st,stm32h7-dfsdm-dai"; #sound-dai-cells = <0>; @@ -318,19 +395,34 @@ examples: }; }; - dfsdm_pdm1: filter@1 { + // Example 2: Analog use case with generic binding + dfsdm1: filter@1 { compatible = "st,stm32-dfsdm-adc"; reg = <1>; interrupts = ; dmas = <&dmamux1 102 0x400 0x01>; dma-names = "rx"; - #io-channel-cells = <1>; - st,adc-channels = <2 3>; - st,adc-channel-names = "in2", "in3"; - st,adc-channel-types = "SPI_R", "SPI_R"; - st,adc-channel-clk-src = "CLKOUT_F", "CLKOUT_F"; - io-channels = <&sd_adc2 &sd_adc3>; st,filter-order = <1>; + #io-channel-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + + channel@2 { + reg = <2>; + label = "in2"; + st,adc-channel-type = "SPI_F"; + st,adc-channel-clk-src = "CLKOUT"; + st,adc-alt-channel; + io-backends = <&sd_adc2>; + }; + + channel@3 { + reg = <3>; + label = "in3"; + st,adc-channel-type = "SPI_R"; + st,adc-channel-clk-src = "CLKOUT"; + io-backends = <&sd_adc3>; + }; }; }; diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml index d40689f233f220..1caa896fce821a 100644 --- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml @@ -37,6 +37,17 @@ description: | 3 | batt_dischrg_i 4 | ts_v + AXP717 + ------ + 0 | batt_v + 1 | ts_v + 2 | vbus_v + 3 | vsys_v + 4 | pmic_temp + 5 | batt_chrg_i + 6 | vmid_v + 7 | bkup_batt_v + AXP813 ------ 0 | pmic_temp @@ -52,6 +63,7 @@ properties: oneOf: - const: x-powers,axp209-adc - const: x-powers,axp221-adc + - const: x-powers,axp717-adc - const: x-powers,axp813-adc - items: diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml new file mode 100644 index 00000000000000..33490853497b85 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml @@ -0,0 +1,181 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/dac/adi,ltc2664.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices LTC2664 DAC + +maintainers: + - Michael Hennerich + - Kim Seer Paller + +description: | + Analog Devices LTC2664 4 channel, 12-/16-Bit, +-10V DAC + https://www.analog.com/media/en/technical-documentation/data-sheets/2664fa.pdf + +properties: + compatible: + enum: + - adi,ltc2664 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 50000000 + + vcc-supply: + description: Analog Supply Voltage Input. + + v-pos-supply: + description: Positive Supply Voltage Input. + + v-neg-supply: + description: Negative Supply Voltage Input. + + iovcc-supply: + description: Digital Input/Output Supply Voltage. + + ref-supply: + description: + Reference Input/Output. The voltage at the REF pin sets the full-scale + range of all channels. If not provided the internal reference is used and + also provided on the VREF pin. + + reset-gpios: + description: + Active-low Asynchronous Clear Input. A logic low at this level-triggered + input clears the part to the reset code and range determined by the + hardwired option chosen using the MSPAN pins. The control registers are + cleared to zero. + maxItems: 1 + + adi,manual-span-operation-config: + description: + This property must mimic the MSPAN pin configurations. By tying the MSPAN + pins (MSP2, MSP1 and MSP0) to GND and/or VCC, any output range can be + hardware-configured with different mid-scale or zero-scale reset options. + The hardware configuration is latched during power on reset for proper + operation. + 0 - MPS2=GND, MPS1=GND, MSP0=GND (+-10V, reset to 0V) + 1 - MPS2=GND, MPS1=GND, MSP0=VCC (+-5V, reset to 0V) + 2 - MPS2=GND, MPS1=VCC, MSP0=GND (+-2.5V, reset to 0V) + 3 - MPS2=GND, MPS1=VCC, MSP0=VCC (0V to 10, reset to 0V) + 4 - MPS2=VCC, MPS1=GND, MSP0=GND (0V to 10V, reset to 5V) + 5 - MPS2=VCC, MPS1=GND, MSP0=VCC (0V to 5V, reset to 0V) + 6 - MPS2=VCC, MPS1=VCC, MSP0=GND (0V to 5V, reset to 2.5V) + 7 - MPS2=VCC, MPS1=VCC, MSP0=VCC (0V to 5V, reset to 0V, enables SoftSpan) + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3, 4, 5, 6, 7] + default: 7 + + io-channels: + description: + ADC channel to monitor voltages and temperature at the MUXOUT pin. + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + "^channel@[0-3]$": + $ref: dac.yaml + type: object + additionalProperties: false + + properties: + reg: + description: The channel number representing the DAC output channel. + maximum: 3 + + adi,toggle-mode: + description: + Set the channel as a toggle enabled channel. Toggle operation enables + fast switching of a DAC output between two different DAC codes without + any SPI transaction. + type: boolean + + output-range-microvolt: + description: + This property is only allowed when SoftSpan is enabled. If not present, + [0, 5000000] is the default output range. + oneOf: + - items: + - const: 0 + - enum: [5000000, 10000000] + - items: + - const: -5000000 + - const: 5000000 + - items: + - const: -10000000 + - const: 10000000 + - items: + - const: -2500000 + - const: 2500000 + + required: + - reg + + allOf: + - if: + not: + properties: + adi,manual-span-operation-config: + const: 7 + then: + patternProperties: + "^channel@[0-3]$": + properties: + output-range-microvolt: false + +required: + - compatible + - reg + - spi-max-frequency + - vcc-supply + - iovcc-supply + - v-pos-supply + - v-neg-supply + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + dac@0 { + compatible = "adi,ltc2664"; + reg = <0>; + spi-max-frequency = <10000000>; + + vcc-supply = <&vcc>; + iovcc-supply = <&vcc>; + ref-supply = <&vref>; + v-pos-supply = <&vpos>; + v-neg-supply = <&vneg>; + + io-channels = <&adc 0>; + + #address-cells = <1>; + #size-cells = <0>; + channel@0 { + reg = <0>; + adi,toggle-mode; + output-range-microvolt = <(-10000000) 10000000>; + }; + + channel@1 { + reg = <1>; + output-range-microvolt= <0 10000000>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml new file mode 100644 index 00000000000000..c8c434c1064348 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/dac/adi,ltc2672.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices LTC2672 DAC + +maintainers: + - Michael Hennerich + - Kim Seer Paller + +description: | + Analog Devices LTC2672 5 channel, 12-/16-Bit, 300mA DAC + https://www.analog.com/media/en/technical-documentation/data-sheets/ltc2672.pdf + +properties: + compatible: + enum: + - adi,ltc2672 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 50000000 + + vcc-supply: + description: Analog Supply Voltage Input. + + v-neg-supply: + description: Negative Supply Voltage Input. + + vdd0-supply: + description: Positive Supply Voltage Input for DAC OUT0. + + vdd1-supply: + description: Positive Supply Voltage Input for DAC OUT1. + + vdd2-supply: + description: Positive Supply Voltage Input for DAC OUT2. + + vdd3-supply: + description: Positive Supply Voltage Input for DAC OUT3. + + vdd4-supply: + description: Positive Supply Voltage Input for DAC OUT4. + + iovcc-supply: + description: Digital Input/Output Supply Voltage. + + ref-supply: + description: + Reference Input/Output. The voltage at the REF pin sets the full-scale + range of all channels. If not provided the internal reference is used and + also provided on the VREF pin. + + reset-gpios: + description: + Active Low Asynchronous Clear Input. A logic low at this level triggered + input clears the device to the default reset code and output range, which + is zero-scale with the outputs off. The control registers are cleared to + zero. + maxItems: 1 + + adi,rfsadj-ohms: + description: + If FSADJ is tied to VCC, an internal RFSADJ (20 kΩ) is selected, which + results in nominal output ranges. When an external resistor of 19 kΩ to + 41 kΩ can be used instead by connecting the resistor between FSADJ and GND + it controls the scaling of the ranges, and the internal resistor is + automatically disconnected. + minimum: 19000 + maximum: 41000 + default: 20000 + + io-channels: + description: + ADC channel to monitor voltages and currents at the MUX pin. + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + "^channel@[0-4]$": + $ref: dac.yaml + type: object + additionalProperties: false + + properties: + reg: + description: The channel number representing the DAC output channel. + maximum: 4 + + adi,toggle-mode: + description: + Set the channel as a toggle enabled channel. Toggle operation enables + fast switching of a DAC output between two different DAC codes without + any SPI transaction. + type: boolean + + output-range-microamp: + items: + - const: 0 + - enum: [3125000, 6250000, 12500000, 25000000, 50000000, 100000000, + 200000000, 300000000] + + required: + - reg + - output-range-microamp + +required: + - compatible + - reg + - spi-max-frequency + - vcc-supply + - iovcc-supply + - v-neg-supply + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + dac@0 { + compatible = "adi,ltc2672"; + reg = <0>; + spi-max-frequency = <10000000>; + + vcc-supply = <&vcc>; + iovcc-supply = <&vcc>; + ref-supply = <&vref>; + v-neg-supply = <&vneg>; + + io-channels = <&adc 0>; + + #address-cells = <1>; + #size-cells = <0>; + channel@0 { + reg = <0>; + adi,toggle-mode; + output-range-microamp = <0 3125000>; + }; + + channel@1 { + reg = <1>; + output-range-microamp = <0 6250000>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/dac/dac.yaml b/Documentation/devicetree/bindings/iio/dac/dac.yaml new file mode 100644 index 00000000000000..daa40724e1cf58 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/dac.yaml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/dac/dac.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: IIO Common Properties for DAC Channels + +maintainers: + - Jonathan Cameron + +description: + A few properties are defined in a common way for DAC channels. + +properties: + $nodename: + pattern: "^channel(@[0-9a-f]+)?$" + description: + A channel index should match reg. + + reg: + maxItems: 1 + + label: + description: Unique name to identify which channel this is. + + output-range-microamp: + maxItems: 2 + minItems: 2 + description: + Specify the channel output full scale range in microamperes. + + output-range-microvolt: + maxItems: 2 + minItems: 2 + description: + Specify the channel output full scale range in microvolts. + +anyOf: + - oneOf: + - required: + - reg + - output-range-microamp + - required: + - reg + - output-range-microvolt + - required: + - reg + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml index aa6a3193b4e035..5f950ee9aec761 100644 --- a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml +++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml @@ -17,6 +17,7 @@ description: | applications. https://www.analog.com/en/products/adf4377.html + https://www.analog.com/en/products/adf4378.html properties: compatible: @@ -73,6 +74,15 @@ required: allOf: - $ref: /schemas/spi/spi-peripheral-props.yaml# + - if: + properties: + compatible: + contains: + enum: + - adi,adf4378 + then: + properties: + clk2-enable-gpios: false unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml new file mode 100644 index 00000000000000..ed0ea938f7f855 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/humidity/sciosense,ens210.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ScioSense ENS210 temperature and humidity sensor + +maintainers: + - Joshua Felmeden + +description: | + Temperature and Humidity sensor. + + Datasheet: + https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf + https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf + +properties: + compatible: + oneOf: + - items: + - enum: + - sciosense,ens210a + - sciosense,ens211 + - sciosense,ens212 + - sciosense,ens213a + - sciosense,ens215 + - const: sciosense,ens210 + - const: sciosense,ens210 + + reg: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + temperature-sensor@43 { + compatible = "sciosense,ens210"; + reg = <0x43>; + }; + }; +... + diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml index 7de1b0e721ca29..877e955d4ebd1f 100644 --- a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml +++ b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml @@ -14,7 +14,9 @@ description: properties: compatible: - const: liteon,ltrf216a + enum: + - liteon,ltr308 + - liteon,ltrf216a reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml new file mode 100644 index 00000000000000..44896795c67ef3 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/light/rohm,bh1745.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ROHM BH1745 colour sensor + +maintainers: + - Mudit Sharma + +description: + BH1745 is an I2C colour sensor with red, green, blue and clear + channels. It has a programmable active low interrupt pin. + Interrupt occurs when the signal from the selected interrupt + source channel crosses set interrupt threshold high/low level. + +properties: + compatible: + const: rohm,bh1745 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + + colour-sensor@38 { + compatible = "rohm,bh1745"; + reg = <0x38>; + interrupt-parent = <&gpio>; + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + vdd-supply = <&vdd>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml deleted file mode 100644 index 30a109a1bf3b47..00000000000000 --- a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/iio/light/rohm,bu27034.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: ROHM BU27034 ambient light sensor - -maintainers: - - Matti Vaittinen - -description: | - ROHM BU27034 is an ambient light sesnor with 3 channels and 3 photo diodes - capable of detecting a very wide range of illuminance. Typical application - is adjusting LCD and backlight power of TVs and mobile phones. - https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf - -properties: - compatible: - const: rohm,bu27034 - - reg: - maxItems: 1 - - vdd-supply: true - -required: - - compatible - - reg - -additionalProperties: false - -examples: - - | - i2c { - #address-cells = <1>; - #size-cells = <0>; - - light-sensor@38 { - compatible = "rohm,bu27034"; - reg = <0x38>; - vdd-supply = <&vdd>; - }; - }; - -... diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml new file mode 100644 index 00000000000000..29c90ca5b25836 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/light/rohm,bu27034anuc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ROHM BU27034ANUC ambient light sensor + +maintainers: + - Matti Vaittinen + +description: | + ROHM BU27034ANUC is an ambient light sensor with 2 channels and 2 photo diodes + capable of detecting a very wide range of illuminance. Typical application + is adjusting LCD and backlight power of TVs and mobile phones. + +properties: + compatible: + const: rohm,bu27034anuc + + reg: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + light-sensor@38 { + compatible = "rohm,bu27034anuc"; + reg = <0x38>; + vdd-supply = <&vdd>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml index f6e22dc9814a25..e4341fdced98ca 100644 --- a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml +++ b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml @@ -18,10 +18,15 @@ allOf: properties: compatible: - enum: - - sensortek,stk3310 - - sensortek,stk3311 - - sensortek,stk3335 + oneOf: + - enum: + - sensortek,stk3310 + - sensortek,stk3311 + - sensortek,stk3335 + - items: + - enum: + - sensortek,stk3013 + - const: sensortek,stk3310 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml index 9790f75fc669ef..e8ca9a2340275c 100644 --- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml +++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml @@ -18,12 +18,15 @@ properties: - asahi-kasei,ak09911 - asahi-kasei,ak09912 - asahi-kasei,ak09916 + - items: + # ak09918 is register compatible with ak09912. + - const: asahi-kasei,ak09918 + - const: asahi-kasei,ak09912 - enum: - ak8975 - ak8963 - ak09911 - ak09912 - - ak09916 deprecated: true reg: diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml index 2867ab6bf9b081..a3838ab0c524ac 100644 --- a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml +++ b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml @@ -36,6 +36,9 @@ properties: interrupts: maxItems: 1 + mount-matrix: + description: an optional 3x3 mounting rotation matrix. + additionalProperties: false required: diff --git a/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml new file mode 100644 index 00000000000000..813239f6879af3 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/pressure/sensirion,sdp500.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: sdp500/sdp510 pressure sensor with I2C bus interface + +maintainers: + - Petar Stoykov + +description: | + Pressure sensor from Sensirion with I2C bus interface. + There is no software difference between sdp500 and sdp510. + +properties: + compatible: + oneOf: + - items: + - const: sensirion,sdp510 + - const: sensirion,sdp500 + - const: sensirion,sdp500 + + reg: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + pressure@40 { + compatible = "sensirion,sdp500"; + reg = <0x40>; + vdd-supply = <&foo>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml new file mode 100644 index 00000000000000..7a83ceced11c4b --- /dev/null +++ b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/proximity/awinic,aw96103.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Awinic's AW96103 capacitive proximity sensor and similar + +maintainers: + - Wang Shuaijie + +description: | + Awinic's AW96103/AW96105 proximity sensor. + The specific absorption rate (SAR) is a metric that measures + the degree of absorption of electromagnetic radiation emitted by + wireless devices, such as mobile phones and tablets, by human tissue. + In mobile phone applications, the proximity sensor is primarily + used to detect the proximity of the human body to the phone. When the + phone approaches the human body, it will actively reduce the transmit + power of the antenna to keep the SAR within a safe range. Therefore, + we also refer to the proximity sensor as a SAR sensor. + +properties: + compatible: + enum: + - awinic,aw96103 + - awinic,aw96105 + + reg: + maxItems: 1 + + interrupts: + description: + Generated by the device to announce that a close/far + proximity event has happened. + maxItems: 1 + + vcc-supply: true + +required: + - compatible + - reg + - interrupts + - vcc-supply + +additionalProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + proximity@12 { + compatible = "awinic,aw96103"; + reg = <0x12>; + interrupt-parent = <&gpio>; + interrupts = <23 IRQ_TYPE_EDGE_FALLING>; + vcc-supply = <&pp1800_prox>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml new file mode 100644 index 00000000000000..64ce8bc8bd365a --- /dev/null +++ b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/proximity/tyhx,hx9023s.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TYHX HX9023S capacitive proximity sensor + +maintainers: + - Yasin Lee + +description: | + TYHX HX9023S proximity sensor. Datasheet can be found here: + http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf + +properties: + compatible: + const: tyhx,hx9023s + + reg: + maxItems: 1 + + interrupts: + description: + Generated by device to announce preceding read request has finished + and data is available or that a close/far proximity event has happened. + maxItems: 1 + + vdd-supply: true + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +patternProperties: + "^channel@[0-4]$": + $ref: /schemas/iio/adc/adc.yaml + type: object + unevaluatedProperties: false + + properties: + reg: + minimum: 0 + maximum: 4 + description: The channel number. + +required: + - compatible + - reg + - vdd-supply + +unevaluatedProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + proximity@2a { + compatible = "tyhx,hx9023s"; + reg = <0x2a>; + interrupt-parent = <&pio>; + interrupts = <16 IRQ_TYPE_EDGE_FALLING>; + vdd-supply = <&pp1800_prox>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + single-channel = <0>; + }; + channel@1 { + reg = <1>; + single-channel = <1>; + }; + channel@2 { + reg = <2>; + single-channel = <2>; + }; + channel@3 { + reg = <3>; + diff-channels = <1 0>; + }; + channel@4 { + reg = <4>; + diff-channels = <2 0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/incomplete-devices.yaml b/Documentation/devicetree/bindings/incomplete-devices.yaml index cfc1d39441b160..4bb6c0141e9f73 100644 --- a/Documentation/devicetree/bindings/incomplete-devices.yaml +++ b/Documentation/devicetree/bindings/incomplete-devices.yaml @@ -35,40 +35,184 @@ properties: - description: Legacy compatibles used on Macintosh devices enum: + - AAPL,3500 + - AAPL,7500 + - AAPL,8500 + - AAPL,9500 + - AAPL,accelerometer_1 + - AAPL,e411 + - AAPL,Gossamer + - AAPL,PowerBook1998 + - AAPL,ShinerESB - adm1030 + - amd-0137 + - B5221 - bmac+ + - burgundy + - cobalt + - cy28508 + - daca + - fcu + - gatwick + - gmac + - heathrow + - heathrow-ata - heathrow-media-bay + - i2sbus + - i2s-modem + - iMac + - K2-GMAC + - k2-i2c + - K2-Keylargo + - K2-UATA + - kauai-ata + - Keylargo + - keylargo-ata - keylargo-media-bay - lm87cimt - MAC,adm1030 - MAC,ds1775 + - MacRISC + - MacRISC2 + - MacRISC3 + - MacRISC4 - max6690 + - ohare - ohare-media-bay - ohare-swim3 + - PowerBook1,1 + - PowerBook2,1 + - PowerBook2,2 + - PowerBook3,1 + - PowerBook3,2 + - PowerBook3,3 + - PowerBook3,4 + - PowerBook3,5 + - PowerBook4,1 + - PowerBook4,2 + - PowerBook4,3 + - PowerBook5,1 + - PowerBook5,2 + - PowerBook5,3 + - PowerBook5,4 + - PowerBook5,5 + - PowerBook5,6 + - PowerBook5,7 + - PowerBook5,8 + - PowerBook5,9 + - PowerBook6,3 + - PowerBook6,5 + - PowerBook6,7 + - PowerMac10,1 + - PowerMac10,2 + - PowerMac1,1 + - PowerMac11,2 + - PowerMac12,1 + - PowerMac2,1 + - PowerMac2,2 + - PowerMac3,1 + - PowerMac3,4 + - PowerMac3,5 + - PowerMac3,6 + - PowerMac4,1 + - PowerMac4,2 + - PowerMac4,4 + - PowerMac4,5 + - PowerMac7,2 + - PowerMac7,3 + - PowerMac8,1 + - PowerMac8,2 + - PowerMac9,1 + - paddington + - RackMac1,1 + - RackMac1,2 + - RackMac3,1 + - screamer + - shasta-ata + - sms + - smu-rpm-fans - smu-sat + - smu-sensors + - snapper - swim3 + - tumbler + - u3-agp + - u3-dart + - u3-ht + - u4-dart + - u4-pcie + - U4-pcie + - uni-n-i2c + - uni-north - description: Legacy compatibles used on other PowerPC devices enum: + - 1682m-gizmo + - 1682m-gpio - 1682m-rng + - 1682m-sdc + - amcc,ppc440epx-rng + - amcc,ppc460ex-bcsr + - amcc,ppc460ex-crypto + - amcc,ppc460ex-rng + - amcc,ppc460sx-crypto + - amcc,ppc4xx-crypto + - amcc,sata-460ex + - CBEA,platform-open-pic + - CBEA,platform-spider-pic + - direct-mapped + - display + - gpio-mdio + - hawk-bridge + - hawk-pci + - IBM,CBEA - IBM,lhca - IBM,lhea - IBM,lhea-ethernet + - ibm,axon-msic + - Momentum,Apache + - Momentum,Maple + - mai-logic,articia-s + - mpc10x-pci - mpc5200b-fec-phy - mpc5200-serial - mpc5200-sram + - nintendo,flipper + - nintendo,flipper-exi + - nintendo,flipper-pi + - nintendo,flipper-pic + - nintendo,hollywood + - nintendo,hollywood-pic + - nintendo,latte-exi + - nintendo,latte-srnprot - ohci-be - ohci-bigendian - ohci-le + - PA6T-1682M + - pasemi,1682m-iob + - pasemi,localbus + - pasemi,localbus-nand + - pasemi,nemo + - pasemi,pwrficient + - pasemi,pwrficient-rng + - pasemi,rootbus + - pasemi,sdc + - soc + - sony,ps3 + - sti,platform-spider-pic - description: Legacy compatibles used on SPARC devices enum: - bq4802 - ds1287 + - i2cpcf,8584 - isa-m5819p - isa-m5823p - m5819 + - qcn - sab82532 + - su + - sun4v - SUNW,bbc-beep - SUNW,bbc-i2c - SUNW,CS4231 @@ -96,9 +240,13 @@ properties: - compat1 - compat2 - compat3 + - gpio-mockup + - gpio-simulator + - gpio-virtuser - linux,spi-loopback-test - mailbox-test - regulator-virtual-consumer + - test-device - description: Devices on MIPS platform, without any DTS users. These are diff --git a/Documentation/devicetree/bindings/input/adi,adp5588.yaml b/Documentation/devicetree/bindings/input/adi,adp5588.yaml index 26ea66834ae249..336bc352579a27 100644 --- a/Documentation/devicetree/bindings/input/adi,adp5588.yaml +++ b/Documentation/devicetree/bindings/input/adi,adp5588.yaml @@ -49,7 +49,10 @@ properties: interrupt-controller: description: This property applies if either keypad,num-rows lower than 8 or - keypad,num-columns lower than 10. + keypad,num-columns lower than 10. This property is optional if + keypad,num-rows or keypad,num-columns are not specified as the + device is then configured to be used purely for gpio during which + interrupts may or may not be utilized. '#interrupt-cells': const: 2 @@ -65,13 +68,23 @@ properties: minItems: 1 maxItems: 2 +dependencies: + keypad,num-rows: + - linux,keymap + - keypad,num-columns + keypad,num-columns: + - linux,keymap + - keypad,num-rows + linux,keymap: + - keypad,num-rows + - keypad,num-columns + - interrupts + interrupt-controller: + - interrupts + required: - compatible - reg - - interrupts - - keypad,num-rows - - keypad,num-columns - - linux,keymap unevaluatedProperties: false @@ -108,4 +121,19 @@ examples: >; }; }; + + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + gpio@34 { + compatible = "adi,adp5588"; + reg = <0x34>; + + #gpio-cells = <2>; + gpio-controller; + }; + }; + ... diff --git a/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml new file mode 100644 index 00000000000000..a0d2460c55ab6f --- /dev/null +++ b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/cirrus,ep9307-keypad.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus ep93xx keypad + +maintainers: + - Alexander Sverdlin + +allOf: + - $ref: /schemas/input/matrix-keymap.yaml# + +description: + The KPP is designed to interface with a keypad matrix with 2-point contact + or 3-point contact keys. The KPP is designed to simplify the software task + of scanning a keypad matrix. The KPP is capable of detecting, debouncing, + and decoding one or multiple keys pressed simultaneously on a keypad. + +properties: + compatible: + oneOf: + - const: cirrus,ep9307-keypad + - items: + - enum: + - cirrus,ep9312-keypad + - cirrus,ep9315-keypad + - const: cirrus,ep9307-keypad + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + debounce-delay-ms: + description: | + Time in microseconds that key must be pressed or + released for state change interrupt to trigger. + + cirrus,prescale: + description: row/column counter pre-scaler load value + $ref: /schemas/types.yaml#/definitions/uint16 + maximum: 1023 + +required: + - compatible + - reg + - interrupts + - clocks + - linux,keymap + +unevaluatedProperties: false + +examples: + - | + #include + #include + keypad@800f0000 { + compatible = "cirrus,ep9307-keypad"; + reg = <0x800f0000 0x0c>; + interrupt-parent = <&vic0>; + interrupts = <29>; + clocks = <&eclk EP93XX_CLK_KEYPAD>; + pinctrl-names = "default"; + pinctrl-0 = <&keypad_default_pins>; + linux,keymap = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml index a62916d07a08c7..cb3e1801b0d3f5 100644 --- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml +++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml @@ -23,7 +23,9 @@ properties: - enum: - elan,ekth5015m - const: elan,ekth6915 - - const: elan,ekth6915 + - enum: + - elan,ekth6915 + - elan,ekth6a12nay reg: const: 0x10 diff --git a/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml b/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml index 2025d6a5423e28..76a286ec595947 100644 --- a/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml +++ b/Documentation/devicetree/bindings/input/qcom,pm8xxx-vib.yaml @@ -19,6 +19,7 @@ properties: - qcom,pmi632-vib - items: - enum: + - qcom,pm6150-vib - qcom,pm7250b-vib - qcom,pm7325b-vib - qcom,pm7550ba-vib diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt deleted file mode 100644 index a644408b33b8f1..00000000000000 --- a/Documentation/devicetree/bindings/input/rotary-encoder.txt +++ /dev/null @@ -1,50 +0,0 @@ -Rotary encoder DT bindings - -Required properties: -- gpios: a spec for at least two GPIOs to be used, most significant first - -Optional properties: -- linux,axis: the input subsystem axis to map to this rotary encoder. - Defaults to 0 (ABS_X / REL_X) -- rotary-encoder,steps: Number of steps in a full turnaround of the - encoder. Only relevant for absolute axis. Defaults to 24 which is a - typical value for such devices. -- rotary-encoder,relative-axis: register a relative axis rather than an - absolute one. Relative axis will only generate +1/-1 events on the input - device, hence no steps need to be passed. -- rotary-encoder,rollover: Automatic rollover when the rotary value becomes - greater than the specified steps or smaller than 0. For absolute axis only. -- rotary-encoder,steps-per-period: Number of steps (stable states) per period. - The values have the following meaning: - 1: Full-period mode (default) - 2: Half-period mode - 4: Quarter-period mode -- wakeup-source: Boolean, rotary encoder can wake up the system. -- rotary-encoder,encoding: String, the method used to encode steps. - Supported are "gray" (the default and more common) and "binary". - -Deprecated properties: -- rotary-encoder,half-period: Makes the driver work on half-period mode. - This property is deprecated. Instead, a 'steps-per-period ' value should - be used, such as "rotary-encoder,steps-per-period = <2>". - -See Documentation/input/devices/rotary-encoder.rst for more information. - -Example: - - rotary@0 { - compatible = "rotary-encoder"; - gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */ - linux,axis = <0>; /* REL_X */ - rotary-encoder,encoding = "gray"; - rotary-encoder,relative-axis; - }; - - rotary@1 { - compatible = "rotary-encoder"; - gpios = <&gpio 21 0>, <&gpio 22 0>; - linux,axis = <1>; /* ABS_Y */ - rotary-encoder,steps = <24>; - rotary-encoder,encoding = "binary"; - rotary-encoder,rollover; - }; diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.yaml b/Documentation/devicetree/bindings/input/rotary-encoder.yaml new file mode 100644 index 00000000000000..e315aab7f584f0 --- /dev/null +++ b/Documentation/devicetree/bindings/input/rotary-encoder.yaml @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/rotary-encoder.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rotary encoder + +maintainers: + - Frank Li + +description: + See Documentation/input/devices/rotary-encoder.rst for more information. + +properties: + compatible: + const: rotary-encoder + + gpios: + minItems: 2 + + linux,axis: + default: 0 + description: + the input subsystem axis to map to this rotary encoder. + Defaults to 0 (ABS_X / REL_X) + + rotary-encoder,steps: + $ref: /schemas/types.yaml#/definitions/uint32 + default: 24 + description: + Number of steps in a full turnaround of the + encoder. Only relevant for absolute axis. Defaults to 24 which is a + typical value for such devices. + + rotary-encoder,relative-axis: + $ref: /schemas/types.yaml#/definitions/flag + description: + register a relative axis rather than an + absolute one. Relative axis will only generate +1/-1 events on the input + device, hence no steps need to be passed. + + rotary-encoder,rollover: + $ref: /schemas/types.yaml#/definitions/int32 + description: + Automatic rollover when the rotary value becomes + greater than the specified steps or smaller than 0. For absolute axis only. + + rotary-encoder,steps-per-period: + $ref: /schemas/types.yaml#/definitions/uint32 + default: 1 + enum: [1, 2, 4] + description: | + Number of steps (stable states) per period. + The values have the following meaning: + 1: Full-period mode (default) + 2: Half-period mode + 4: Quarter-period mode + + wakeup-source: true + + rotary-encoder,encoding: + $ref: /schemas/types.yaml#/definitions/string + description: the method used to encode steps. + enum: [gray, binary] + + rotary-encoder,half-period: + $ref: /schemas/types.yaml#/definitions/flag + deprecated: true + description: + Makes the driver work on half-period mode. + This property is deprecated. Instead, a 'steps-per-period ' value should + be used, such as "rotary-encoder,steps-per-period = <2>". + +required: + - compatible + - gpios + +additionalProperties: false + +examples: + - | + rotary { + compatible = "rotary-encoder"; + gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */ + linux,axis = <0>; /* REL_X */ + rotary-encoder,encoding = "gray"; + rotary-encoder,relative-axis; + }; + diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt deleted file mode 100644 index afa38dc069f018..00000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt +++ /dev/null @@ -1,71 +0,0 @@ -* Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C) - -Required properties: -- compatible : for SPI slave, use "adi,ad7879" - for I2C slave, use "adi,ad7879-1" -- reg : SPI chipselect/I2C slave address - See spi-bus.txt for more SPI slave properties -- interrupts : touch controller interrupt -- touchscreen-max-pressure : maximum reported pressure -- adi,resistance-plate-x : total resistance of X-plate (for pressure - calculation) -Optional properties: -- touchscreen-swapped-x-y : X and Y axis are swapped (boolean) -- adi,first-conversion-delay : 0-12: In 128us steps (starting with 128us) - 13 : 2.560ms - 14 : 3.584ms - 15 : 4.096ms - This property has to be a '/bits/ 8' value -- adi,acquisition-time : 0: 2us - 1: 4us - 2: 8us - 3: 16us - This property has to be a '/bits/ 8' value -- adi,median-filter-size : 0: disabled - 1: 4 measurements - 2: 8 measurements - 3: 16 measurements - This property has to be a '/bits/ 8' value -- adi,averaging : 0: 2 middle values (1 if median disabled) - 1: 4 middle values - 2: 8 middle values - 3: 16 values - This property has to be a '/bits/ 8' value -- adi,conversion-interval: : 0 : convert one time only - 1-255: 515us + val * 35us (up to 9.440ms) - This property has to be a '/bits/ 8' value -- gpio-controller : Switch AUX/VBAT/GPIO pin to GPIO mode - -Example: - - touchscreen0@2c { - compatible = "adi,ad7879-1"; - reg = <0x2c>; - interrupt-parent = <&gpio1>; - interrupts = <13 IRQ_TYPE_EDGE_FALLING>; - touchscreen-max-pressure = <4096>; - adi,resistance-plate-x = <120>; - adi,first-conversion-delay = /bits/ 8 <3>; - adi,acquisition-time = /bits/ 8 <1>; - adi,median-filter-size = /bits/ 8 <2>; - adi,averaging = /bits/ 8 <1>; - adi,conversion-interval = /bits/ 8 <255>; - }; - - touchscreen1@1 { - compatible = "adi,ad7879"; - spi-max-frequency = <5000000>; - reg = <1>; - spi-cpol; - spi-cpha; - gpio-controller; - interrupt-parent = <&gpio1>; - interrupts = <13 IRQ_TYPE_EDGE_FALLING>; - touchscreen-max-pressure = <4096>; - adi,resistance-plate-x = <120>; - adi,first-conversion-delay = /bits/ 8 <3>; - adi,acquisition-time = /bits/ 8 <1>; - adi,median-filter-size = /bits/ 8 <2>; - adi,averaging = /bits/ 8 <1>; - adi,conversion-interval = /bits/ 8 <255>; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml new file mode 100644 index 00000000000000..caa5fa3cc3f1fb --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/adi,ad7879.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C) + +maintainers: + - Frank Li + +properties: + compatible: + description: | + for SPI slave, use "adi,ad7879" + for I2C slave, use "adi,ad7879-1" + enum: + - adi,ad7879 + - adi,ad7879-1 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + touchscreen-max-pressure: + $ref: /schemas/types.yaml#/definitions/uint32 + description: maximum reported pressure + + adi,resistance-plate-x: + $ref: /schemas/types.yaml#/definitions/uint32 + description: total resistance of X-plate (for pressure calculation) + + touchscreen-swapped-x-y: + $ref: /schemas/types.yaml#/definitions/flag + description: X and Y axis are swapped (boolean) + + adi,first-conversion-delay: + $ref: /schemas/types.yaml#/definitions/uint8 + default: 0 + minimum: 0 + maximum: 15 + description: | + 0-12: In 128us steps (starting with 128us) + 13 : 2.560ms + 14 : 3.584ms + 15 : 4.096ms + This property has to be a '/bits/ 8' value + + adi,acquisition-time: + $ref: /schemas/types.yaml#/definitions/uint8 + default: 0 + enum: [0, 1, 2, 3] + description: | + 0: 2us + 1: 4us + 2: 8us + 3: 16us + This property has to be a '/bits/ 8' value + + adi,median-filter-size: + $ref: /schemas/types.yaml#/definitions/uint8 + default: 0 + enum: [0, 1, 2, 3] + description: | + 0: disabled + 1: 4 measurements + 2: 8 measurements + 3: 16 measurements + This property has to be a '/bits/ 8' value + + adi,averaging: + $ref: /schemas/types.yaml#/definitions/uint8 + default: 0 + enum: [0, 1, 2, 3] + description: | + 0: 2 middle values (1 if median disabled) + 1: 4 middle values + 2: 8 middle values + 3: 16 values + This property has to be a '/bits/ 8' value + + adi,conversion-interval: + $ref: /schemas/types.yaml#/definitions/uint8 + default: 0 + description: | + 0 : convert one time only + 1-255: 515us + val * 35us (up to 9.440ms) + This property has to be a '/bits/ 8' value + + gpio-controller: true + + "#gpio-cells": + const: 1 + +required: + - compatible + - reg + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml + +unevaluatedProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen0@2c { + compatible = "adi,ad7879-1"; + reg = <0x2c>; + interrupt-parent = <&gpio1>; + interrupts = <13 IRQ_TYPE_EDGE_FALLING>; + touchscreen-max-pressure = <4096>; + adi,resistance-plate-x = <120>; + adi,first-conversion-delay = /bits/ 8 <3>; + adi,acquisition-time = /bits/ 8 <1>; + adi,median-filter-size = /bits/ 8 <2>; + adi,averaging = /bits/ 8 <1>; + adi,conversion-interval = /bits/ 8 <255>; + }; + }; + + - | + #include + spi { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen1@1 { + compatible = "adi,ad7879"; + reg = <1>; + spi-max-frequency = <5000000>; + gpio-controller; + #gpio-cells = <1>; + interrupt-parent = <&gpio1>; + interrupts = <13 IRQ_TYPE_EDGE_FALLING>; + touchscreen-max-pressure = <4096>; + adi,resistance-plate-x = <120>; + adi,first-conversion-delay = /bits/ 8 <3>; + adi,acquisition-time = /bits/ 8 <1>; + adi,median-filter-size = /bits/ 8 <2>; + adi,averaging = /bits/ 8 <1>; + adi,conversion-interval = /bits/ 8 <255>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt deleted file mode 100644 index 399c87782935c0..00000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt +++ /dev/null @@ -1,107 +0,0 @@ -Device tree bindings for TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046 -SPI driven touch screen controllers. - -The node for this driver must be a child node of a SPI controller, hence -all mandatory properties described in - - Documentation/devicetree/bindings/spi/spi-bus.txt - -must be specified. - -Additional required properties: - - compatible Must be one of the following, depending on the - model: - "ti,tsc2046" - "ti,ads7843" - "ti,ads7845" - "ti,ads7846" - "ti,ads7873" - - interrupts An interrupt node describing the IRQ line the chip's - !PENIRQ pin is connected to. - vcc-supply A regulator node for the supply voltage. - - -Optional properties: - - ti,vref-delay-usecs vref supply delay in usecs, 0 for - external vref (u16). - ti,vref-mv The VREF voltage, in millivolts (u16). - Set to 0 to use internal references - (ADS7846). - ti,keep-vref-on set to keep vref on for differential - measurements as well - ti,settle-delay-usec Settling time of the analog signals; - a function of Vcc and the capacitance - on the X/Y drivers. If set to non-zero, - two samples are taken with settle_delay - us apart, and the second one is used. - ~150 uSec with 0.01uF caps (u16). - ti,penirq-recheck-delay-usecs If set to non-zero, after samples are - taken this delay is applied and penirq - is rechecked, to help avoid false - events. This value is affected by the - material used to build the touch layer - (u16). - ti,x-plate-ohms Resistance of the X-plate, - in Ohms (u16). - ti,y-plate-ohms Resistance of the Y-plate, - in Ohms (u16). - ti,x-min Minimum value on the X axis (u16). - ti,y-min Minimum value on the Y axis (u16). - ti,debounce-tol Tolerance used for filtering (u16). - ti,debounce-rep Additional consecutive good readings - required after the first two (u16). - ti,pendown-gpio-debounce Platform specific debounce time for the - pendown-gpio (u32). - pendown-gpio GPIO handle describing the pin the !PENIRQ - line is connected to. - ti,hsync-gpios GPIO line to poll for hsync - wakeup-source use any event on touchscreen as wakeup event. - (Legacy property support: "linux,wakeup") - touchscreen-size-x General touchscreen binding, see [1]. - touchscreen-size-y General touchscreen binding, see [1]. - touchscreen-max-pressure General touchscreen binding, see [1]. - touchscreen-min-pressure General touchscreen binding, see [1]. - touchscreen-average-samples General touchscreen binding, see [1]. - touchscreen-inverted-x General touchscreen binding, see [1]. - touchscreen-inverted-y General touchscreen binding, see [1]. - touchscreen-swapped-x-y General touchscreen binding, see [1]. - -[1] All general touchscreen properties are described in - Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt. - -Deprecated properties: - - ti,swap-xy swap x and y axis - ti,x-max Maximum value on the X axis (u16). - ti,y-max Maximum value on the Y axis (u16). - ti,pressure-min Minimum reported pressure value - (threshold) - u16. - ti,pressure-max Maximum reported pressure value (u16). - ti,debounce-max Max number of additional readings per - sample (u16). - -Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC:: - - spi_controller { - tsc2046@0 { - reg = <0>; /* CS0 */ - compatible = "ti,tsc2046"; - interrupt-parent = <&gpio1>; - interrupts = <8 0>; /* BOOT6 / GPIO 8 */ - spi-max-frequency = <1000000>; - pendown-gpio = <&gpio1 8 0>; - vcc-supply = <®_vcc3>; - - ti,x-min = /bits/ 16 <0>; - ti,x-max = /bits/ 16 <8000>; - ti,y-min = /bits/ 16 <0>; - ti,y-max = /bits/ 16 <4800>; - ti,x-plate-ohms = /bits/ 16 <40>; - ti,pressure-max = /bits/ 16 <255>; - - wakeup-source; - }; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml index 8cf371b99f19cd..e4dbbafb377990 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml @@ -666,7 +666,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - touch@56 { + touchscreen@56 { compatible = "azoteq,iqs7210a"; reg = <0x56>; irq-gpios = <&gpio 4 GPIO_ACTIVE_LOW>; @@ -704,7 +704,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - touch@56 { + touchscreen@56 { compatible = "azoteq,iqs7211e"; reg = <0x56>; irq-gpios = <&gpio 4 (GPIO_ACTIVE_LOW | diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt deleted file mode 100644 index ca304357c374ad..00000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt +++ /dev/null @@ -1,34 +0,0 @@ -* Toradex Colibri VF50 Touchscreen driver - -Required Properties: -- compatible must be toradex,vf50-touchscreen -- io-channels: adc channels being used by the Colibri VF50 module - IIO ADC for Y-, X-, Y+, X+ connections -- xp-gpios: FET gate driver for input of X+ -- xm-gpios: FET gate driver for input of X- -- yp-gpios: FET gate driver for input of Y+ -- ym-gpios: FET gate driver for input of Y- -- interrupts: pen irq interrupt for touch detection, signal from X plate -- pinctrl-names: "idle", "default" -- pinctrl-0: pinctrl node for pen/touch detection, pinctrl must provide - pull-up resistor on X+, X-. -- pinctrl-1: pinctrl node for X/Y and pressure measurement (ADC) state pinmux -- vf50-ts-min-pressure: pressure level at which to stop measuring X/Y values - -Example: - - touchctrl: vf50_touchctrl { - compatible = "toradex,vf50-touchscreen"; - io-channels = <&adc1 0>,<&adc0 0>, - <&adc0 1>,<&adc1 2>; - xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; - xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; - yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; - ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; - interrupt-parent = <&gpio0>; - interrupts = <8 IRQ_TYPE_LEVEL_LOW>; - pinctrl-names = "idle","default"; - pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>; - pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>; - vf50-ts-min-pressure = <200>; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml index 51d48d4130d380..70a922e213f2a6 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml @@ -126,7 +126,7 @@ examples: i2c { #address-cells = <1>; #size-cells = <0>; - edt-ft5x06@38 { + touchscreen@38 { compatible = "edt,edt-ft5406"; reg = <0x38>; interrupt-parent = <&gpio2>; diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml index 2a2d86cfd10487..eb4992f708b70f 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml @@ -69,7 +69,7 @@ examples: i2c { #address-cells = <1>; #size-cells = <0>; - gt928@5d { + touchscreen@5d { compatible = "goodix,gt928"; reg = <0x5d>; interrupt-parent = <&gpio>; diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml new file mode 100644 index 00000000000000..604921733d2c71 --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/ti,ads7843.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI's SPI driven touch screen controllers + +maintainers: + - Alexander Stein + - Dmitry Torokhov + - Marek Vasut + +description: + TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046 SPI driven touch screen + controllers. + +properties: + compatible: + enum: + - ti,ads7843 + - ti,ads7845 + - ti,ads7846 + - ti,ads7873 + - ti,tsc2046 + + interrupts: + maxItems: 1 + + pendown-gpio: + maxItems: 1 + description: + GPIO handle describing the pin the !PENIRQ line is connected to. + + vcc-supply: + description: + A regulator node for the supply voltage. + + wakeup-source: true + + ti,debounce-max: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Max number of additional readings per sample. + + ti,debounce-rep: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Additional consecutive good readings required after the first two. + + ti,debounce-tol: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Tolerance used for filtering. + + ti,hsync-gpios: + maxItems: 1 + description: + GPIO line to poll for hsync. + + ti,keep-vref-on: + $ref: /schemas/types.yaml#/definitions/flag + description: + Set to keep Vref on for differential measurements as well. + + ti,pendown-gpio-debounce: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Platform specific debounce time for the pendown-gpio. + + ti,penirq-recheck-delay-usecs: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + If set to non-zero, after samples are taken this delay is applied and + penirq is rechecked, to help avoid false events. This value is + affected by the material used to build the touch layer. + + ti,pressure-max: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Maximum reported pressure value. + + ti,pressure-min: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Minimum reported pressure value (threshold). + + ti,settle-delay-usec: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Settling time of the analog signals; a function of Vcc and the + capacitance on the X/Y drivers. If set to non-zero, two samples are + taken with settle_delay us apart, and the second one is used. ~150 + uSec with 0.01uF caps. + + ti,swap-xy: + deprecated: true + $ref: /schemas/types.yaml#/definitions/flag + description: + Swap x and y axis. + + ti,vref-delay-usecs: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Vref supply delay in usecs, 0 for external Vref. + + ti,vref-mv: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + The VREF voltage, in millivolts. + Set to 0 to use internal references (ADS7846). + + ti,x-plate-ohms: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Resistance of the X-plate, in Ohms. + + ti,x-max: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Maximum value on the X axis. + + ti,x-min: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Minimum value on the X axis. + + ti,y-plate-ohms: + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Resistance of the Y-plate, in Ohms. + + ti,y-max: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Maximum value on the Y axis. + + ti,y-min: + deprecated: true + $ref: /schemas/types.yaml#/definitions/uint16 + description: + Minimum value on the Y axis. + +required: + - compatible + - reg + +allOf: + - $ref: touchscreen.yaml# + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + spi{ + #address-cells = <1>; + #size-cells = <0>; + + touchscreen@0 { + compatible = "ti,tsc2046"; + reg = <0>; /* CS0 */ + interrupt-parent = <&gpio1>; + interrupts = <8 0>; /* BOOT6 / GPIO 8 */ + pendown-gpio = <&gpio1 8 0>; + spi-max-frequency = <1000000>; + vcc-supply = <®_vcc3>; + wakeup-source; + + ti,pressure-max = /bits/ 16 <255>; + ti,x-max = /bits/ 16 <8000>; + ti,x-min = /bits/ 16 <0>; + ti,x-plate-ohms = /bits/ 16 <40>; + ti,y-max = /bits/ 16 <4800>; + ti,y-min = /bits/ 16 <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml new file mode 100644 index 00000000000000..5094c5183c7496 --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/toradex,vf50-touchscreen.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Toradex Colibri VF50 Touchscreen + +maintainers: + - Dmitry Torokhov + - Sanchayan Maity + +properties: + compatible: + const: toradex,vf50-touchscreen + + interrupts: + maxItems: 1 + + io-channels: + maxItems: 4 + description: + adc channels being used by the Colibri VF50 module + IIO ADC for Y-, X-, Y+, X+ connections + + xp-gpios: + description: FET gate driver for input of X+ + + xm-gpios: + description: FET gate driver for input of X- + + yp-gpios: + description: FET gate driver for input of Y+ + + ym-gpios: + description: FET gate driver for input of Y- + + vf50-ts-min-pressure: + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 50 + maximum: 2000 + description: pressure level at which to stop measuring X/Y values + +required: + - compatible + - io-channels + - xp-gpios + - xm-gpios + - yp-gpios + - ym-gpios + - interrupts + - vf50-ts-min-pressure + +allOf: + - $ref: touchscreen.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + #include + + touchscreen { + compatible = "toradex,vf50-touchscreen"; + interrupt-parent = <&gpio0>; + interrupts = <8 IRQ_TYPE_LEVEL_LOW>; + io-channels = <&adc1 0>, <&adc0 0>, <&adc0 1>, <&adc1 2>; + xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; + yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; + ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; + pinctrl-names = "idle", "default"; + pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>; + pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>; + vf50-ts-min-pressure = <200>; + }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml index b1507463a03e99..3f663ce3e44ece 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml @@ -16,6 +16,7 @@ maintainers: allOf: - $ref: touchscreen.yaml# + - $ref: ../input.yaml# properties: $nodename: @@ -79,6 +80,15 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32 enum: [1, 2] + linux,keycodes: + description: + This property specifies an array of keycodes assigned to the + touch-keys that can be present in some touchscreen configurations. + If the touch-keys are enabled, controller firmware will assign some + touch sense lines to those keys. + minItems: 1 + maxItems: 8 + touchscreen-size-x: true touchscreen-size-y: true touchscreen-fuzz-x: true diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml index fd15ab5014fb58..4b08be72bbd7da 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml @@ -4,14 +4,14 @@ $id: http://devicetree.org/schemas/interconnect/qcom,msm8939.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm MSM8939 Network-On-Chip interconnect +title: Qualcomm MSM8937/MSM8939/MSM8976 Network-On-Chip interconnect maintainers: - Konrad Dybcio -description: | - The Qualcomm MSM8939 interconnect providers support adjusting the - bandwidth requirements between the various NoC fabrics. +description: + The Qualcomm MSM8937/MSM8939/MSM8976 interconnect providers support + adjusting the bandwidth requirements between the various NoC fabrics. allOf: - $ref: qcom,rpm-common.yaml# @@ -19,9 +19,15 @@ allOf: properties: compatible: enum: + - qcom,msm8937-bimc + - qcom,msm8937-pcnoc + - qcom,msm8937-snoc - qcom,msm8939-bimc - qcom,msm8939-pcnoc - qcom,msm8939-snoc + - qcom,msm8976-bimc + - qcom,msm8976-pcnoc + - qcom,msm8976-snoc reg: maxItems: 1 @@ -39,7 +45,10 @@ patternProperties: properties: compatible: - const: qcom,msm8939-snoc-mm + enum: + - qcom,msm8937-snoc-mm + - qcom,msm8939-snoc-mm + - qcom,msm8976-snoc-mm required: - compatible @@ -60,12 +69,6 @@ examples: compatible = "qcom,msm8939-snoc"; reg = <0x00580000 0x14000>; #interconnect-cells = <1>; - }; - - bimc: interconnect@400000 { - compatible = "qcom,msm8939-bimc"; - reg = <0x00400000 0x62000>; - #interconnect-cells = <1>; snoc_mm: interconnect-snoc { compatible = "qcom,msm8939-snoc-mm"; diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml index 732e9fa001a4c7..343ff62d7b65be 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml @@ -13,8 +13,7 @@ description: | The Qualcomm MSM8953 interconnect providers support adjusting the bandwidth requirements between the various NoC fabrics. - See also: - - dt-bindings/interconnect/qcom,msm8953.h + See also: include/dt-bindings/interconnect/qcom,msm8953.h properties: compatible: diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml index 2cd1f5590fd9bb..189f5900ee50d6 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml @@ -26,6 +26,7 @@ properties: - items: - enum: - qcom,qcm2290-cpu-bwmon + - qcom,sa8775p-cpu-bwmon - qcom,sc7180-cpu-bwmon - qcom,sc7280-cpu-bwmon - qcom,sc8280xp-cpu-bwmon @@ -39,6 +40,7 @@ properties: - const: qcom,sdm845-bwmon # BWMON v4, unified register space - items: - enum: + - qcom,sa8775p-llcc-bwmon - qcom,sc7180-llcc-bwmon - qcom,sc8280xp-llcc-bwmon - qcom,sm6350-cpu-bwmon diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml index 9318b845ec359b..1b9164dc162f35 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml @@ -71,7 +71,7 @@ properties: - qcom,sdx65-system-noc - qcom,sm8150-aggre1-noc - qcom,sm8150-aggre2-noc - - qcom,sm8150-camnoc-noc + - qcom,sm8150-camnoc-virt - qcom,sm8150-compute-noc - qcom,sm8150-config-noc - qcom,sm8150-dc-noc @@ -113,6 +113,9 @@ allOf: properties: compatible: enum: + - qcom,sc8180x-camnoc-virt + - qcom,sc8180x-mc-virt + - qcom,sc8180x-qup-virt - qcom,sdx65-mc-virt - qcom,sm8250-qup-virt then: diff --git a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml index 698588e9aa869c..4be9b596a7904b 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml @@ -31,13 +31,25 @@ description: | This device also represents the FIQ interrupt sources on platforms using AIC, which do not go through a discrete interrupt controller. + IPIs may be performed via MMIO registers on all variants of AIC. Starting + from A11, system registers may also be used for "fast" IPIs. Starting from + M1, even faster IPIs within the same cluster may be achieved by writing to + a "local" fast IPI register as opposed to using the "global" fast IPI + register. + allOf: - $ref: /schemas/interrupt-controller.yaml# properties: compatible: items: - - const: apple,t8103-aic + - enum: + - apple,s5l8960x-aic + - apple,t7000-aic + - apple,s8000-aic + - apple,t8010-aic + - apple,t8015-aic + - apple,t8103-aic - const: apple,aic interrupt-controller: true diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml index 0f4a062c9d6fe3..5f051c666cbe5f 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml @@ -60,7 +60,7 @@ properties: The 4th cell is a phandle to a node describing a set of CPUs this interrupt is affine to. The interrupt must be a PPI, and the node pointed must be a subnode of the "ppi-partitions" subnode. For - interrupt types other than PPI or PPIs that are not partitionned, + interrupt types other than PPI or PPIs that are not partitioned, this cell must be zero. See the "ppi-partitions" node description below. diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt deleted file mode 100644 index e3fea0758d2535..00000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt +++ /dev/null @@ -1,23 +0,0 @@ -Aspeed Vectored Interrupt Controller - -These bindings are for the Aspeed interrupt controller. The AST2400 and -AST2500 SoC families include a legacy register layout before a re-designed -layout, but the bindings do not prescribe the use of one or the other. - -Required properties: - -- compatible : "aspeed,ast2400-vic" - "aspeed,ast2500-vic" - -- interrupt-controller : Identifies the node as an interrupt controller -- #interrupt-cells : Specifies the number of cells needed to encode an - interrupt source. The value shall be 1. - -Example: - - vic: interrupt-controller@1e6c0080 { - compatible = "aspeed,ast2400-vic"; - interrupt-controller; - #interrupt-cells = <1>; - reg = <0x1e6c0080 0x80>; - }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml new file mode 100644 index 00000000000000..73e8b9a39bd726 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2400-vic.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Aspeed Vectored Interrupt Controller + +maintainers: + - Andrew Jeffery + +description: + The AST2400 and AST2500 SoC families include a legacy register layout before + a redesigned layout, but the bindings do not prescribe the use of one or the + other. + +properties: + compatible: + enum: + - aspeed,ast2400-vic + - aspeed,ast2500-vic + + reg: + maxItems: 1 + + interrupt-controller: true + + "#interrupt-cells": + const: 1 + description: + Specifies the number of cells needed to encode an interrupt source. It + must be 1 as the VIC has no configuration options for interrupt sources. + The single cell defines the interrupt number. + + valid-sources: + $ref: /schemas/types.yaml#/definitions/uint32-array + maxItems: 2 + description: + A bitmap of supported sources for the implementation. + +required: + - compatible + - reg + - interrupt-controller + - "#interrupt-cells" + +allOf: + - $ref: /schemas/interrupt-controller.yaml + +additionalProperties: false + +examples: + - | + interrupt-controller@1e6c0080 { + compatible = "aspeed,ast2400-vic"; + reg = <0x1e6c0080 0x80>; + interrupt-controller; + #interrupt-cells = <1>; + valid-sources = <0xffffffff 0x0007ffff>; + }; + +... diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt deleted file mode 100644 index 8ced1696c325bb..00000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt +++ /dev/null @@ -1,37 +0,0 @@ -BCM2836 per-CPU interrupt controller - -The BCM2836 has a per-cpu interrupt controller for the timer, PMU -events, and SMP IPIs. One of the CPUs may receive interrupts for the -peripheral (GPU) events, which chain to the BCM2835-style interrupt -controller. - -Required properties: - -- compatible: Should be "brcm,bcm2836-l1-intc" -- reg: Specifies base physical address and size of the - registers -- interrupt-controller: Identifies the node as an interrupt controller -- #interrupt-cells: Specifies the number of cells needed to encode an - interrupt source. The value shall be 2 - -Please refer to interrupts.txt in this directory for details of the common -Interrupt Controllers bindings used by client devices. - -The interrupt sources are as follows: - -0: CNTPSIRQ -1: CNTPNSIRQ -2: CNTHPIRQ -3: CNTVIRQ -8: GPU_FAST -9: PMU_FAST - -Example: - -local_intc: local_intc { - compatible = "brcm,bcm2836-l1-intc"; - reg = <0x40000000 0x100>; - interrupt-controller; - #interrupt-cells = <2>; - interrupt-parent = <&local_intc>; -}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml new file mode 100644 index 00000000000000..5fda626c80ce39 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.yaml @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm2836-l1-intc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: BCM2836 per-CPU interrupt controller + +maintainers: + - Stefan Wahren + - Raspberry Pi Kernel Maintenance + +description: + The BCM2836 has a per-cpu interrupt controller for the timer, PMU + events, and SMP IPIs. One of the CPUs may receive interrupts for the + peripheral (GPU) events, which chain to the BCM2835-style interrupt + controller. + +allOf: + - $ref: /schemas/interrupt-controller.yaml# + +properties: + compatible: + const: brcm,bcm2836-l1-intc + + reg: + maxItems: 1 + + interrupt-controller: true + + '#interrupt-cells': + const: 2 + +required: + - compatible + - reg + - interrupt-controller + - '#interrupt-cells' + +additionalProperties: false + +examples: + - | + local_intc: interrupt-controller@40000000 { + compatible = "brcm,bcm2836-l1-intc"; + reg = <0x40000000 0x100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&local_intc>; + }; +... diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml index aae676ba30edc5..6076ddf56bb5af 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml @@ -17,6 +17,7 @@ properties: - enum: - fsl,imx8m-irqsteer - fsl,imx8mp-irqsteer + - fsl,imx8qm-irqsteer - fsl,imx8qxp-irqsteer - const: fsl,imx-irqsteer @@ -83,6 +84,7 @@ allOf: contains: enum: - fsl,imx8mp-irqsteer + - fsl,imx8qm-irqsteer - fsl,imx8qxp-irqsteer then: required: diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml index 985fa10abb9949..b1ea08a41bb0c3 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml @@ -27,6 +27,7 @@ properties: items: - enum: - qcom,qdu1000-pdc + - qcom,sa8255p-pdc - qcom,sa8775p-pdc - qcom,sc7180-pdc - qcom,sc7280-pdc diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml index 709b2211276bd9..7e1451f9786a8b 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml @@ -67,6 +67,7 @@ properties: - allwinner,sun20i-d1-plic - sophgo,cv1800b-plic - sophgo,cv1812h-plic + - sophgo,sg2002-plic - sophgo,sg2042-plic - thead,th1520-plic - const: thead,c900-plic diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 280b4e49f21919..92d350b8e01a83 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -36,7 +36,9 @@ properties: items: - enum: - qcom,qcm2290-smmu-500 + - qcom,qcs8300-smmu-500 - qcom,qdu1000-smmu-500 + - qcom,sa8255p-smmu-500 - qcom,sa8775p-smmu-500 - qcom,sc7180-smmu-500 - qcom,sc7280-smmu-500 @@ -84,6 +86,7 @@ properties: items: - enum: - qcom,qcm2290-smmu-500 + - qcom,sa8255p-smmu-500 - qcom,sa8775p-smmu-500 - qcom,sc7280-smmu-500 - qcom,sc8180x-smmu-500 @@ -552,7 +555,9 @@ allOf: - cavium,smmu-v2 - marvell,ap806-smmu-500 - nvidia,smmu-500 + - qcom,qcs8300-smmu-500 - qcom,qdu1000-smmu-500 + - qcom,sa8255p-smmu-500 - qcom,sc7180-smmu-500 - qcom,sdm670-smmu-500 - qcom,sdm845-smmu-500 diff --git a/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml index 54d6d1f08e2489..17e971903ee911 100644 --- a/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml +++ b/Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml @@ -66,7 +66,7 @@ patternProperties: IMAXled = 160000 * (592 / 600.5) * (1 / max-current-switch-number) And the minimum output current formula: IMINled = 3300 * (592 / 600.5) * (1 / max-current-switch-number) - where max-current-switch-number is determinated by led configuration + where max-current-switch-number is determined by led configuration and depends on how leds are physically connected to the led driver. allOf: diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml index 8a3c2398b10ce0..bf9a101e4d4206 100644 --- a/Documentation/devicetree/bindings/leds/common.yaml +++ b/Documentation/devicetree/bindings/leds/common.yaml @@ -113,6 +113,8 @@ properties: # LED indicates NAND memory activity (deprecated), # in new implementations use "mtd" - nand-disk + # LED indicates network activity + - netdev # No trigger assigned to the LED. This is the default mode # if trigger is absent - none diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt deleted file mode 100644 index b1103d961d6caa..00000000000000 --- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt +++ /dev/null @@ -1,65 +0,0 @@ -* Texas Instruments - LM3692x Highly Efficient White LED Driver - -The LM3692x is an ultra-compact, highly efficient, -white-LED driver designed for LCD display backlighting. - -The main difference between the LM36922 and LM36923 is the number of -LED strings it supports. The LM36922 supports two strings while the LM36923 -supports three strings. - -Required properties: - - compatible: - "ti,lm36922" - "ti,lm36923" - - reg : I2C slave address - - #address-cells : 1 - - #size-cells : 0 - -Optional properties: - - enable-gpios : gpio pin to enable/disable the device. - - vled-supply : LED supply - - ti,ovp-microvolt: Overvoltage protection in - micro-volt, can be 17000000, 21000000, 25000000 or - 29000000. If ti,ovp-microvolt is not specified it - defaults to 29000000. - -Required child properties: - - reg : 0 - Will enable all LED sync paths - 1 - Will enable the LED1 sync - 2 - Will enable the LED2 sync - 3 - Will enable the LED3 sync (LM36923 only) - -Optional child properties: - - function : see Documentation/devicetree/bindings/leds/common.txt - - color : see Documentation/devicetree/bindings/leds/common.txt - - label : see Documentation/devicetree/bindings/leds/common.txt (deprecated) - - linux,default-trigger : - see Documentation/devicetree/bindings/leds/common.txt - - led-max-microamp : - see Documentation/devicetree/bindings/leds/common.txt - -Example: - -#include - -led-controller@36 { - compatible = "ti,lm3692x"; - reg = <0x36>; - #address-cells = <1>; - #size-cells = <0>; - - enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; - vled-supply = <&vbatt>; - ti,ovp-microvolt = <29000000>; - - led@0 { - reg = <0>; - function = LED_FUNCTION_BACKLIGHT; - color = ; - linux,default-trigger = "backlight"; - led-max-microamp = <20000>; - }; -} - -For more product information please see the link below: -https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf diff --git a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt b/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt deleted file mode 100644 index df2b4e1c492be0..00000000000000 --- a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt +++ /dev/null @@ -1,43 +0,0 @@ -LEDs connected to Spreadtrum SC27XX PMIC breathing light controller - -The SC27xx breathing light controller supports to 3 outputs: -red LED, green LED and blue LED. Each LED can work at normal -PWM mode or breath light mode. - -Required properties: -- compatible: Should be "sprd,sc2731-bltc". -- #address-cells: Must be 1. -- #size-cells: Must be 0. -- reg: Specify the controller address. - -Required child properties: -- reg: Port this LED is connected to. - -Optional child properties: -- function: See Documentation/devicetree/bindings/leds/common.txt. -- color: See Documentation/devicetree/bindings/leds/common.txt. -- label: See Documentation/devicetree/bindings/leds/common.txt (deprecated). - -Examples: - -led-controller@200 { - compatible = "sprd,sc2731-bltc"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0x200>; - - led@0 { - color = ; - reg = <0x0>; - }; - - led@1 { - color = ; - reg = <0x1>; - }; - - led@2 { - color = ; - reg = <0x2>; - }; -}; diff --git a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml index 654915c1f687f3..ab8c90cbadb5ca 100644 --- a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml +++ b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml @@ -11,19 +11,21 @@ maintainers: - Marek Vasut description: - The NXP PCA9952/PCA9955B are programmable LED controllers connected via I2C - that can drive 16 separate lines. Each of them can be individually switched + The NXP PCA995x family are programmable LED controllers connected via I2C + that can drive separate lines. Each of them can be individually switched on and off, and brightness can be controlled via individual PWM. Datasheets are available at https://www.nxp.com/docs/en/data-sheet/PCA9952_PCA9955.pdf https://www.nxp.com/docs/en/data-sheet/PCA9955B.pdf + https://www.nxp.com/docs/en/data-sheet/PCA9956B.pdf properties: compatible: enum: - nxp,pca9952 - nxp,pca9955b + - nxp,pca9956b reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml new file mode 100644 index 00000000000000..5853410c7a4597 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/leds/sprd,sc2731-bltc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Spreadtrum SC2731 PMIC breathing light controller + +maintainers: + - Orson Zhai + - Baolin Wang + - Chunyan Zhang + +description: | + The SC2731 breathing light controller supports up to 3 outputs: + red LED, green LED and blue LED. Each LED can work at normal PWM mode + or breath light mode. + +properties: + compatible: + const: sprd,sc2731-bltc + + reg: + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + "^led@[0-2]$": + type: object + $ref: common.yaml# + unevaluatedProperties: false + + properties: + reg: + minimum: 0 + maximum: 2 + + required: + - reg + +required: + - compatible + - reg + - '#address-cells' + - '#size-cells' + +additionalProperties: false + +examples: + - | + #include + + pmic { + #address-cells = <1>; + #size-cells = <0>; + + led-controller@200 { + compatible = "sprd,sc2731-bltc"; + reg = <0x200>; + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0x0>; + color = ; + }; + + led@1 { + reg = <0x1>; + color = ; + }; + + led@2 { + reg = <0x2>; + color = ; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/leds/ti.lm36922.yaml b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml new file mode 100644 index 00000000000000..8ffbc6b785a3e5 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/leds/ti.lm36922.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments - LM3692x Highly Efficient White LED Driver + +maintainers: + - Dan Murphy + +description: | + The LM3692x is an ultra-compact, highly efficient, + white-LED driver designed for LCD display backlighting. + + The main difference between the LM36922 and LM36923 is the number of + LED strings it supports. The LM36922 supports two strings while the LM36923 + supports three strings. + + For more product information please see the link below: + https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf + +properties: + compatible: + enum: + - ti,lm36922 + - ti,lm36923 + + reg: + maxItems: 1 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + enable-gpios: + description: gpio pin to enable/disable the device. + + vled-supply: + description: LED supply + + ti,ovp-microvolt: + description: Overvoltage protection. + default: 29000000 + enum: [17000000, 21000000, 25000000, 29000000] + +patternProperties: + '^led@[0-3]$': + type: object + $ref: common.yaml + properties: + reg: + enum: [0, 1, 2, 3] + description: | + 0 - Will enable all LED sync paths + 1 - Will enable the LED1 sync + 2 - Will enable the LED2 sync + 3 - Will enable the LED3 sync (LM36923 only) + + unevaluatedProperties: false + +required: + - compatible + - reg + - "#address-cells" + - "#size-cells" + +allOf: + - if: + properties: + compatible: + contains: + const: ti,lm36922 + then: + properties: + led@3: false + +additionalProperties: false + +examples: + - | + #include + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + led-controller@36 { + compatible = "ti,lm36922"; + reg = <0x36>; + #address-cells = <1>; + #size-cells = <0>; + + enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; + vled-supply = <&vbatt>; + ti,ovp-microvolt = <29000000>; + + led@0 { + reg = <0>; + function = LED_FUNCTION_BACKLIGHT; + color = ; + linux,default-trigger = "backlight"; + led-max-microamp = <20000>; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt index c80065a1eb9786..bf0c998b860302 100644 --- a/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt +++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt @@ -24,7 +24,7 @@ Required properties: number of completion messages for which FlexRM will inject one MSI interrupt to CPU. - The 3nd cell contains MSI timer value representing time for + The 3rd cell contains MSI timer value representing time for which FlexRM will wait to accumulate N completion messages where N is the value specified by 2nd cell above. If FlexRM does not get required number of completion messages in time diff --git a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml index 72c1d9e82c897c..8a1369df4ecb22 100644 --- a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml +++ b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml @@ -17,9 +17,15 @@ description: | properties: compatible: - enum: - - mediatek,mt8195-adsp-mbox - - mediatek,mt8186-adsp-mbox + oneOf: + - enum: + - mediatek,mt8186-adsp-mbox + - mediatek,mt8195-adsp-mbox + - items: + - enum: + - mediatek,mt8188-adsp-mbox + - const: mediatek,mt8186-adsp-mbox + "#mbox-cells": const: 0 diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml index 05e4e1d51713a6..2d66770ed3612f 100644 --- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml +++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml @@ -24,7 +24,9 @@ properties: compatible: items: - enum: + - qcom,qcs8300-ipcc - qcom,qdu1000-ipcc + - qcom,sa8255p-ipcc - qcom,sa8775p-ipcc - qcom,sc7280-ipcc - qcom,sc8280xp-ipcc diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml index 55930f6107c9c5..47dce75aeae65a 100644 --- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml +++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml @@ -31,7 +31,8 @@ properties: - items: - enum: - amlogic,gxbb-vdec # GXBB (S905) - - amlogic,gxl-vdec # GXL (S905X, S905D) + - amlogic,gxl-vdec # GXL (S905D, S905W, S905X, S905Y) + - amlogic,gxlx-vdec # GXLX (S905L) - amlogic,gxm-vdec # GXM (S912) - const: amlogic,gx-vdec - enum: diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml new file mode 100644 index 00000000000000..ca57c01739d2b9 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (c) 2023-2024 Linaro Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/media/i2c/ovti,og01a1b.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: OmniVision OG01A1B Image Sensor + +maintainers: + - Vladimir Zapolskiy + +description: + The OmniVision OG01A1B is black and white CMOS 1.3 Megapixel (1280x1024) + image sensor controlled over an I2C-compatible SCCB bus. + The sensor transmits images on a MIPI CSI-2 output interface with one or + two data lanes. + +allOf: + - $ref: /schemas/media/video-interface-devices.yaml# + +properties: + compatible: + const: ovti,og01a1b + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + reset-gpios: + description: Active low GPIO connected to XSHUTDOWN pad of the sensor. + maxItems: 1 + + strobe-gpios: + description: Input GPIO connected to strobe pad of the sensor. + maxItems: 1 + + avdd-supply: + description: Analogue circuit voltage supply. + + dovdd-supply: + description: I/O circuit voltage supply. + + dvdd-supply: + description: Digital circuit voltage supply. + + port: + $ref: /schemas/graph.yaml#/$defs/port-base + additionalProperties: false + description: + Output port node, single endpoint describing the CSI-2 transmitter. + + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + + properties: + data-lanes: + minItems: 1 + maxItems: 2 + items: + enum: [1, 2] + + link-frequencies: true + + required: + - data-lanes + - link-frequencies + +required: + - compatible + - reg + - clocks + - port + +unevaluatedProperties: false + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + sensor@60 { + compatible = "ovti,og01a1b"; + reg = <0x60>; + clocks = <&clk 0>; + reset-gpios = <&gpio 117 GPIO_ACTIVE_LOW>; + avdd-supply = <&vreg_3v3>; + dovdd-supply = <&vreg_1p8>; + dvdd-supply = <&vreg_1p2>; + + port { + og01a1b_ep: endpoint { + remote-endpoint = <&csiphy_ep>; + data-lanes = <1 2>; + link-frequencies = /bits/ 64 <500000000>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml index 106c36ee966db0..77bf3a4ee89db3 100644 --- a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml +++ b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml @@ -75,6 +75,8 @@ additionalProperties: false examples: - | + #include + i2c { #address-cells = <1>; #size-cells = <0>; @@ -92,6 +94,8 @@ examples: ovdd-supply = <&camera_vddo_1v8>; dvdd-supply = <&camera_vddd_1v2>; + reset-gpios = <&gpio 50 GPIO_ACTIVE_LOW>; + port { imx335: endpoint { remote-endpoint = <&cam>; diff --git a/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml b/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml index 1978fbb77a6ce5..535acf2b88a908 100644 --- a/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml +++ b/Documentation/devicetree/bindings/media/i2c/thine,thp7312.yaml @@ -16,7 +16,7 @@ description: can be connected to CMOS image sensors from various vendors, supporting both MIPI CSI-2 and parallel interfaces. It can also output on either MIPI CSI-2 or parallel. The hardware is capable of transmitting and receiving MIPI - interlaved data strams with data types or multiple virtual channel + interleaved data streams with data types or multiple virtual channel identifiers. allOf: diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml index 8f9b6433aeb8c1..10c334e6b3dcf2 100644 --- a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml +++ b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml @@ -43,6 +43,7 @@ properties: - const: vcodec_bus iommus: + minItems: 1 maxItems: 2 interconnects: diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.yaml b/Documentation/devicetree/bindings/media/renesas,fcp.yaml index c6abe719881b85..f94dacd9627822 100644 --- a/Documentation/devicetree/bindings/media/renesas,fcp.yaml +++ b/Documentation/devicetree/bindings/media/renesas,fcp.yaml @@ -27,6 +27,7 @@ properties: - renesas,fcpf # FCP for FDP - items: - enum: + - renesas,r9a07g043u-fcpvd # RZ/G2UL - renesas,r9a07g044-fcpvd # RZ/G2{L,LC} - renesas,r9a07g054-fcpvd # RZ/V2L - const: renesas,fcpv # Generic FCP for VSP fallback @@ -62,6 +63,7 @@ allOf: compatible: contains: enum: + - renesas,r9a07g043u-fcpvd - renesas,r9a07g044-fcpvd - renesas,r9a07g054-fcpvd then: diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml index 5539d0f8e74d2e..cf54176f4fbd6f 100644 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml @@ -52,8 +52,12 @@ properties: - renesas,vin-r8a77980 # R-Car V3H - renesas,vin-r8a77990 # R-Car E3 - renesas,vin-r8a77995 # R-Car D3 + - items: + - enum: - renesas,vin-r8a779a0 # R-Car V3U - renesas,vin-r8a779g0 # R-Car V4H + - renesas,vin-r8a779h0 # R-Car V4M + - const: renesas,rcar-gen4-vin # Generic R-Car Gen4 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml index 3265e922647c76..1a03e67462a4fe 100644 --- a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml +++ b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml @@ -23,6 +23,7 @@ properties: - renesas,vsp2 # R-Car Gen3 and RZ/G2 - items: - enum: + - renesas,r9a07g043u-vsp2 # RZ/G2UL - renesas,r9a07g054-vsp2 # RZ/V2L - const: renesas,r9a07g044-vsp2 # RZ/G2L fallback diff --git a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml index 9d90d8d0565a15..947ad699cc5e7f 100644 --- a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml +++ b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml @@ -17,6 +17,7 @@ properties: compatible: enum: - rockchip,rk3568-vepu + - rockchip,rk3588-vepu121 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml index c57e1f488895b3..719aeb2dc59342 100644 --- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml +++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml @@ -26,11 +26,16 @@ properties: - rockchip,rk3568-vpu - rockchip,rk3588-av1-vpu - items: - - const: rockchip,rk3188-vpu + - enum: + - rockchip,rk3128-vpu + - rockchip,rk3188-vpu - const: rockchip,rk3066-vpu - items: - const: rockchip,rk3228-vpu - const: rockchip,rk3399-vpu + - items: + - const: rockchip,rk3588-vpu121 + - const: rockchip,rk3568-vpu reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/media/s5p-mfc.txt b/Documentation/devicetree/bindings/media/s5p-mfc.txt deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml b/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml index 271d0577a83c71..2ba27b230559b4 100644 --- a/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml +++ b/Documentation/devicetree/bindings/media/samsung,exynos4210-fimc.yaml @@ -77,7 +77,7 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32-array maxItems: 2 description: | - An array specyfing minimum image size in pixels at the FIMC input and + An array specifying minimum image size in pixels at the FIMC input and output DMA, in the first and second cell respectively. Default value is <16 16>. diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml index 3f40ca5b13f660..ce4ec94a561cef 100644 --- a/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml @@ -134,9 +134,8 @@ allOf: properties: fsl,weim-cs-timing: items: - items: - - description: CSxU - - description: CSxL + - description: CSxU + - description: CSxL - if: properties: compatible: @@ -151,10 +150,9 @@ allOf: properties: fsl,weim-cs-timing: items: - items: - - description: CSCRxU - - description: CSCRxL - - description: CSCRxA + - description: CSCRxU + - description: CSCRxL + - description: CSCRxA - if: properties: compatible: @@ -171,13 +169,12 @@ allOf: properties: fsl,weim-cs-timing: items: - items: - - description: CSxGCR1 - - description: CSxGCR2 - - description: CSxRCR1 - - description: CSxRCR2 - - description: CSxWCR1 - - description: CSxWCR2 + - description: CSxGCR1 + - description: CSxGCR2 + - description: CSxRCR1 + - description: CSxRCR2 + - description: CSxWCR1 + - description: CSxWCR2 additionalProperties: false diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml index d7745dd53b51ce..4f4bc953e31a05 100644 --- a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml @@ -67,7 +67,9 @@ properties: - const: dirmap - const: wbuf - clocks: true + clocks: + minItems: 1 + maxItems: 2 interrupts: maxItems: 1 diff --git a/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml new file mode 100644 index 00000000000000..ee2272f754a339 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mfd/adi,adp5585.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices ADP5585 Keypad Decoder and I/O Expansion + +maintainers: + - Laurent Pinchart + +description: + The ADP5585 is a 10/11 input/output port expander with a built in keypad + matrix decoder, programmable logic, reset generator, and PWM generator. + +properties: + compatible: + items: + - enum: + - adi,adp5585-00 # Default + - adi,adp5585-01 # 11 GPIOs + - adi,adp5585-02 # No pull-up resistors by default on special pins + - adi,adp5585-03 # Alternate I2C address + - adi,adp5585-04 # Pull-down resistors on all pins by default + - const: adi,adp5585 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + vdd-supply: true + + gpio-controller: true + + '#gpio-cells': + const: 2 + + gpio-reserved-ranges: true + + "#pwm-cells": + const: 3 + +patternProperties: + "-hog(-[0-9]+)?$": + type: object + + required: + - gpio-hog + +required: + - compatible + - reg + - gpio-controller + - "#gpio-cells" + - "#pwm-cells" + +allOf: + - if: + properties: + compatible: + contains: + const: adi,adp5585-01 + then: + properties: + gpio-reserved-ranges: false + else: + properties: + gpio-reserved-ranges: + maxItems: 1 + items: + items: + - const: 5 + - const: 1 + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + io-expander@34 { + compatible = "adi,adp5585-00", "adi,adp5585"; + reg = <0x34>; + + vdd-supply = <®_3v3>; + + gpio-controller; + #gpio-cells = <2>; + gpio-reserved-ranges = <5 1>; + + #pwm-cells = <3>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml index 37423c2e0fdfa3..b67fbe0e7a63dd 100644 --- a/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml +++ b/Documentation/devicetree/bindings/mfd/mediatek,mt6357.yaml @@ -37,6 +37,24 @@ properties: "#interrupt-cells": const: 2 + mediatek,hp-pull-down: + description: + Earphone driver positive output stage short to + the audio reference ground. + type: boolean + + mediatek,micbias0-microvolt: + description: Selects MIC Bias 0 output voltage. + enum: [1700000, 1800000, 1900000, 2000000, + 2100000, 2500000, 2600000, 2700000] + default: 1700000 + + mediatek,micbias1-microvolt: + description: Selects MIC Bias 1 output voltage. + enum: [1700000, 1800000, 1900000, 2000000, + 2100000, 2500000, 2600000, 2700000] + default: 1700000 + regulators: type: object $ref: /schemas/regulator/mediatek,mt6357-regulator.yaml @@ -83,6 +101,9 @@ examples: interrupt-controller; #interrupt-cells = <2>; + mediatek,micbias0-microvolt = <1700000>; + mediatek,micbias1-microvolt = <1700000>; + regulators { mt6357_vproc_reg: buck-vproc { regulator-name = "vproc"; diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml index c6bd14ec5aa0fc..7d0b0b40315051 100644 --- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml +++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml @@ -21,6 +21,7 @@ properties: - qcom,msm8998-tcsr - qcom,qcm2290-tcsr - qcom,qcs404-tcsr + - qcom,sa8775p-tcsr - qcom,sc7180-tcsr - qcom,sc7280-tcsr - qcom,sc8280xp-tcsr diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml index d381125a0a152a..efee3de0d9ad29 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd96801-pmic.yaml @@ -25,7 +25,7 @@ properties: description: The PMIC provides intb and errb IRQ lines. The errb IRQ line is used for fatal IRQs which will cause the PMIC to shut down power outputs. - In many systems this will shut down the SoC contolling the PMIC and + In many systems this will shut down the SoC controlling the PMIC and connecting/handling the errb can be omitted. However, there are cases where the SoC is not powered by the PMIC or has a short time backup energy to handle shutdown of critical hardware. In that case it may be diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml index bc8b5940b1c5c4..a4be642de33ce6 100644 --- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml +++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml @@ -53,7 +53,7 @@ properties: samsung,s2mps11-wrstbi-ground: description: | Indicates that WRSTBI pin of PMIC is pulled down. When the system is - suspended it will always go down thus triggerring unwanted buck warm + suspended it will always go down thus triggering unwanted buck warm reset (setting buck voltages to default values). type: boolean diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml index 9dc594ea365452..cc9b17ad69f23d 100644 --- a/Documentation/devicetree/bindings/mfd/syscon.yaml +++ b/Documentation/devicetree/bindings/mfd/syscon.yaml @@ -103,6 +103,7 @@ select: - rockchip,rk3368-qos - rockchip,rk3399-qos - rockchip,rk3568-qos + - rockchip,rk3576-qos - rockchip,rk3588-qos - rockchip,rv1126-qos - st,spear1340-misc @@ -113,6 +114,7 @@ select: - ti,am625-dss-oldi-io-ctrl - ti,am62p-cpsw-mac-efuse - ti,am654-dss-oldi-io-ctrl + - ti,j784s4-acspcie-proxy-ctrl - ti,j784s4-pcie-ctrl - ti,keystone-pllctrl required: @@ -198,6 +200,7 @@ properties: - rockchip,rk3368-qos - rockchip,rk3399-qos - rockchip,rk3568-qos + - rockchip,rk3576-qos - rockchip,rk3588-qos - rockchip,rv1126-qos - st,spear1340-misc diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt index 06e9dd7a0d96aa..dfd8683ede0cf9 100644 --- a/Documentation/devicetree/bindings/mfd/twl6040.txt +++ b/Documentation/devicetree/bindings/mfd/twl6040.txt @@ -2,7 +2,7 @@ Texas Instruments TWL6040 family The TWL6040s are 8-channel high quality low-power audio codecs providing audio, vibra and GPO functionality on OMAP4+ platforms. -They are connected ot the host processor via i2c for commands, McPDM for audio +They are connected to the host processor via i2c for commands, McPDM for audio data and commands. Required properties: diff --git a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml index b8e8db0d58e9c3..14ab367fc88715 100644 --- a/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml +++ b/Documentation/devicetree/bindings/mfd/x-powers,axp152.yaml @@ -274,7 +274,7 @@ properties: Defines the work frequency of DC-DC in kHz. patternProperties: - "^(([a-f])?ldo[0-9]|dcdc[0-7a-e]|ldo(_|-)io(0|1)|(dc1)?sw|rtc(_|-)ldo|cpusldo|drivevbus|dc5ldo)$": + "^(([a-f])?ldo[0-9]|dcdc[0-7a-e]|ldo(_|-)io(0|1)|(dc1)?sw|rtc(_|-)ldo|cpusldo|drivevbus|dc5ldo|boost)$": $ref: /schemas/regulator/regulator.yaml# type: object unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml b/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml new file mode 100644 index 00000000000000..accf1a7ecf1213 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/aspeed,ast2400-cvic.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/misc/aspeed,ast2400-cvic.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Aspeed Coprocessor Vectored Interrupt Controller + +maintainers: + - Andrew Jeffery + +description: + The Aspeed AST2400 and AST2500 SoCs have a controller that provides interrupts + to the ColdFire coprocessor. It's not a normal interrupt controller and it + would be rather inconvenient to create an interrupt tree for it, as it + somewhat shares some of the same sources as the main ARM interrupt controller + but with different numbers. + + The AST2500 also supports a software generated interrupt. + +properties: + compatible: + items: + - enum: + - aspeed,ast2400-cvic + - aspeed,ast2500-cvic + - const: aspeed,cvic + + reg: + maxItems: 1 + + valid-sources: + $ref: /schemas/types.yaml#/definitions/uint32-array + maxItems: 1 + description: + A bitmap of supported sources for the implementation. + + copro-sw-interrupts: + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 32 + description: + A list of interrupt numbers that can be used as software interrupts from + the ARM to the coprocessor. + +required: + - compatible + - reg + - valid-sources + +additionalProperties: false + +examples: + - | + interrupt-controller@1e6c2000 { + compatible = "aspeed,ast2500-cvic", "aspeed,cvic"; + reg = <0x1e6c2000 0x80>; + valid-sources = <0xffffffff>; + copro-sw-interrupts = <1>; + }; diff --git a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt deleted file mode 100644 index d62c783d1d5ea2..00000000000000 --- a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt +++ /dev/null @@ -1,35 +0,0 @@ -* ASPEED AST2400 and AST2500 coprocessor interrupt controller - -This file describes the bindings for the interrupt controller present -in the AST2400 and AST2500 BMC SoCs which provides interrupt to the -ColdFire coprocessor. - -It is not a normal interrupt controller and it would be rather -inconvenient to create an interrupt tree for it as it somewhat shares -some of the same sources as the main ARM interrupt controller but with -different numbers. - -The AST2500 supports a SW generated interrupt - -Required properties: -- reg: address and length of the register for the device. -- compatible: "aspeed,cvic" and one of: - "aspeed,ast2400-cvic" - or - "aspeed,ast2500-cvic" - -- valid-sources: One cell, bitmap of supported sources for the implementation - -Optional properties; -- copro-sw-interrupts: List of interrupt numbers that can be used as - SW interrupts from the ARM to the coprocessor. - (AST2500 only) - -Example: - - cvic: copro-interrupt-controller@1e6c2000 { - compatible = "aspeed,ast2500-cvic"; - valid-sources = <0xffffffff>; - copro-sw-interrupts = <1>; - reg = <0x1e6c2000 0x80>; - }; diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml index c27a8f33d8d769..0840a3d925130b 100644 --- a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml +++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml @@ -26,6 +26,7 @@ properties: - mdsp - sdsp - cdsp + - cdsp1 memory-region: maxItems: 1 @@ -81,7 +82,7 @@ patternProperties: iommus: minItems: 1 - maxItems: 3 + maxItems: 10 qcom,nsessions: $ref: /schemas/types.yaml#/definitions/uint32 diff --git a/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml b/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml new file mode 100644 index 00000000000000..8c8ade88e8fe81 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/atmel,sama5d2-sdhci.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mmc/atmel,sama5d2-sdhci.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Atmel SDHCI controller + +maintainers: + - Aubin Constans + - Nicolas Ferre + +description: + Bindings for the SDHCI controller found in Atmel/Microchip SoCs. + +properties: + compatible: + oneOf: + - enum: + - atmel,sama5d2-sdhci + - microchip,sam9x60-sdhci + - items: + - enum: + - microchip,sam9x7-sdhci + - microchip,sama7g5-sdhci + - const: microchip,sam9x60-sdhci + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: hclock + - description: multclk + - description: baseclk + minItems: 2 + + clock-names: + items: + - const: hclock + - const: multclk + - const: baseclk + minItems: 2 + + microchip,sdcal-inverted: + type: boolean + description: + When present, polarity on the SDCAL SoC pin is inverted. The default + polarity for this signal is described in the datasheet. For instance on + SAMA5D2, the pin is usually tied to the GND with a resistor and a + capacitor (see "SDMMC I/O Calibration" chapter). + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +allOf: + - $ref: sdhci-common.yaml# + - if: + properties: + compatible: + contains: + enum: + - atmel,sama5d2-sdhci + then: + properties: + clocks: + minItems: 3 + clock-names: + minItems: 3 + +unevaluatedProperties: false + +examples: + - | + #include + #include + mmc@a0000000 { + compatible = "atmel,sama5d2-sdhci"; + reg = <0xa0000000 0x300>; + interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>; + clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>; + clock-names = "hclock", "multclk", "baseclk"; + assigned-clocks = <&sdmmc0_gclk>; + assigned-clock-rates = <480000000>; + }; diff --git a/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml b/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml new file mode 100644 index 00000000000000..4d787147c300c8 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/nuvoton,ma35d1-sdhci.yaml @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mmc/nuvoton,ma35d1-sdhci.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Nuvoton MA35D1 SD/SDIO/MMC Controller + +maintainers: + - Shan-Chun Hung + +allOf: + - $ref: sdhci-common.yaml# + +properties: + compatible: + enum: + - nuvoton,ma35d1-sdhci + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + pinctrl-names: + minItems: 1 + items: + - const: default + - const: state_uhs + + pinctrl-0: + description: + Should contain default/high speed pin ctrl. + maxItems: 1 + + pinctrl-1: + description: + Should contain uhs mode pin ctrl. + maxItems: 1 + + resets: + maxItems: 1 + + nuvoton,sys: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to access GCR (Global Control Register) registers. + +required: + - compatible + - reg + - interrupts + - clocks + - pinctrl-names + - pinctrl-0 + - resets + - nuvoton,sys + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + mmc@40190000 { + compatible = "nuvoton,ma35d1-sdhci"; + reg = <0x0 0x40190000 0x0 0x2000>; + interrupts = ; + clocks = <&clk SDH1_GATE>; + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&pinctrl_sdhci1>; + pinctrl-1 = <&pinctrl_sdhci1_uhs>; + resets = <&sys MA35D1_RESET_SDH1>; + nuvoton,sys = <&sys>; + vqmmc-supply = <&sdhci1_vqmmc_regulator>; + bus-width = <8>; + max-frequency = <200000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml index 3d0e61e59856b9..af378b9ff3f426 100644 --- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml +++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml @@ -18,6 +18,7 @@ properties: - renesas,sdhi-r7s9210 # SH-Mobile AG5 - renesas,sdhi-r8a73a4 # R-Mobile APE6 - renesas,sdhi-r8a7740 # R-Mobile A1 + - renesas,sdhi-r9a09g057 # RZ/V2H(P) - renesas,sdhi-sh73a0 # R-Mobile APE6 - items: - enum: @@ -75,9 +76,13 @@ properties: minItems: 1 maxItems: 3 - clocks: true + clocks: + minItems: 1 + maxItems: 4 - clock-names: true + clock-names: + minItems: 1 + maxItems: 4 dmas: minItems: 4 @@ -118,7 +123,9 @@ allOf: properties: compatible: contains: - const: renesas,rzg2l-sdhi + enum: + - renesas,sdhi-r9a09g057 + - renesas,rzg2l-sdhi then: properties: clocks: diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml index 211cd0b0bc5f34..06df1269f2476e 100644 --- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml +++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml @@ -43,6 +43,8 @@ properties: - rockchip,rv1108-dw-mshc - rockchip,rv1126-dw-mshc - const: rockchip,rk3288-dw-mshc + # for Rockchip RK3576 with phase tuning inside the controller + - const: rockchip,rk3576-dw-mshc reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt deleted file mode 100644 index a9fb0a91245feb..00000000000000 --- a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt +++ /dev/null @@ -1,35 +0,0 @@ -* Atmel SDHCI controller - -This file documents the differences between the core properties in -Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the -sdhci-of-at91 driver. - -Required properties: -- compatible: Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci" - or "microchip,sam9x7-sdhci", "microchip,sam9x60-sdhci". -- clocks: Phandlers to the clocks. -- clock-names: Must be "hclock", "multclk", "baseclk" for - "atmel,sama5d2-sdhci". - Must be "hclock", "multclk" for "microchip,sam9x60-sdhci". - Must be "hclock", "multclk" for "microchip,sam9x7-sdhci". - -Optional properties: -- assigned-clocks: The same with "multclk". -- assigned-clock-rates The rate of "multclk" in order to not rely on the - gck configuration set by previous components. -- microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is - inverted. The default polarity for this signal is described in the datasheet. - For instance on SAMA5D2, the pin is usually tied to the GND with a resistor - and a capacitor (see "SDMMC I/O Calibration" chapter). - -Example: - -mmc0: sdio-host@a0000000 { - compatible = "atmel,sama5d2-sdhci"; - reg = <0xa0000000 0x300>; - interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>; - clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>; - clock-names = "hclock", "multclk", "baseclk"; - assigned-clocks = <&sdmmc0_gclk>; - assigned-clock-rates = <480000000>; -}; diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml index 4d3031d9965f33..c3d5e0230af1a6 100644 --- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml @@ -10,18 +10,20 @@ maintainers: - Ulf Hansson - Jisheng Zhang -allOf: - - $ref: mmc-controller.yaml# - properties: compatible: - enum: - - rockchip,rk3568-dwcmshc - - rockchip,rk3588-dwcmshc - - snps,dwcmshc-sdhci - - sophgo,cv1800b-dwcmshc - - sophgo,sg2002-dwcmshc - - thead,th1520-dwcmshc + oneOf: + - items: + - const: rockchip,rk3576-dwcmshc + - const: rockchip,rk3588-dwcmshc + - enum: + - rockchip,rk3568-dwcmshc + - rockchip,rk3588-dwcmshc + - snps,dwcmshc-sdhci + - sophgo,cv1800b-dwcmshc + - sophgo,sg2002-dwcmshc + - sophgo,sg2042-dwcmshc + - thead,th1520-dwcmshc reg: maxItems: 1 @@ -31,22 +33,14 @@ properties: clocks: minItems: 1 - items: - - description: core clock - - description: bus clock for optional - - description: axi clock for rockchip specified - - description: block clock for rockchip specified - - description: timer clock for rockchip specified - + maxItems: 5 clock-names: minItems: 1 - items: - - const: core - - const: bus - - const: axi - - const: block - - const: timer + maxItems: 5 + + power-domains: + maxItems: 1 resets: maxItems: 5 @@ -63,7 +57,6 @@ properties: description: Specify the number of delay for tx sampling. $ref: /schemas/types.yaml#/definitions/uint8 - required: - compatible - reg @@ -71,6 +64,60 @@ required: - clocks - clock-names +allOf: + - $ref: mmc-controller.yaml# + + - if: + properties: + compatible: + contains: + const: sophgo,sg2042-dwcmshc + + then: + properties: + clocks: + items: + - description: core clock + - description: bus clock + - description: timer clock + clock-names: + items: + - const: core + - const: bus + - const: timer + else: + properties: + clocks: + minItems: 1 + items: + - description: core clock + - description: bus clock for optional + - description: axi clock for rockchip specified + - description: block clock for rockchip specified + - description: timer clock for rockchip specified + clock-names: + minItems: 1 + items: + - const: core + - const: bus + - const: axi + - const: block + - const: timer + + - if: + properties: + compatible: + contains: + const: rockchip,rk3576-dwcmshc + + then: + required: + - power-domains + + else: + properties: + power-domains: false + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/mtd/technologic,nand.yaml b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml new file mode 100644 index 00000000000000..f9d87c46094b8e --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/technologic,nand.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Technologic Systems NAND controller + +maintainers: + - Nikita Shubin + +allOf: + - $ref: nand-controller.yaml + +properties: + compatible: + oneOf: + - const: technologic,ts7200-nand + - items: + - enum: + - technologic,ts7300-nand + - technologic,ts7260-nand + - technologic,ts7250-nand + - const: technologic,ts7200-nand + + reg: + maxItems: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + nand-controller@60000000 { + compatible = "technologic,ts7200-nand"; + reg = <0x60000000 0x8000000>; + #address-cells = <1>; + #size-cells = <0>; + nand@0 { + reg = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml b/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml index 115682fa81b7b9..00540302bcae6a 100644 --- a/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml +++ b/Documentation/devicetree/bindings/mtd/ti,gpmc-nand.yaml @@ -61,12 +61,9 @@ properties: GPIO connection to R/B signal from NAND chip maxItems: 1 -patternProperties: - "@[0-9a-f]+$": - $ref: /schemas/mtd/partitions/partition.yaml - allOf: - $ref: /schemas/memory-controllers/ti,gpmc-child.yaml + - $ref: mtd.yaml# required: - compatible diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml index ee7a65b528cdac..d1e2bca3c50343 100644 --- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml @@ -58,18 +58,18 @@ allOf: - const: timing-adjustment amlogic,tx-delay-ns: - $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 2, 4, 6] + default: 2 description: - The internal RGMII TX clock delay (provided by this driver) in - nanoseconds. Allowed values are 0ns, 2ns, 4ns, 6ns. - When phy-mode is set to "rgmii" then the TX delay should be - explicitly configured. When not configured a fallback of 2ns is - used. When the phy-mode is set to either "rgmii-id" or "rgmii-txid" - the TX clock delay is already provided by the PHY. In that case - this property should be set to 0ns (which disables the TX clock - delay in the MAC to prevent the clock from going off because both - PHY and MAC are adding a delay). - Any configuration is ignored when the phy-mode is set to "rmii". + The internal RGMII TX clock delay (provided by this driver) + in nanoseconds. When phy-mode is set to "rgmii" then the TX + delay should be explicitly configured. When the phy-mode is + set to either "rgmii-id" or "rgmii-txid" the TX clock delay + is already provided by the PHY. In that case this property + should be set to 0ns (which disables the TX clock delay in + the MAC to prevent the clock from going off because both + PHY and MAC are adding a delay). Any configuration is + ignored when the phy-mode is set to "rmii". amlogic,rx-delay-ns: deprecated: true diff --git a/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml new file mode 100644 index 00000000000000..6fd7557039d22e --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (C) 2024 Amlogic, Inc. All rights reserved +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/amlogic,w155s2-bt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Amlogic Bluetooth chips + +description: + The W155S2 is an Amlogic Bluetooth and Wi-Fi combo chip. It works on + the standard H4 protocol via a 4-wire UART interface, with baud rates + up to 4 Mbps. + +maintainers: + - Yang Li + +properties: + compatible: + oneOf: + - items: + - enum: + - amlogic,w265s1-bt + - amlogic,w265p1-bt + - const: amlogic,w155s2-bt + - enum: + - amlogic,w155s2-bt + - amlogic,w265s2-bt + + clocks: + maxItems: 1 + description: clock provided to the controller (32.768KHz) + + enable-gpios: + maxItems: 1 + + vddio-supply: + description: VDD_IO supply regulator handle + + firmware-name: + maxItems: 1 + description: specify the path of firmware bin to load + +required: + - compatible + - clocks + - enable-gpios + - vddio-supply + - firmware-name + +additionalProperties: false + +examples: + - | + #include + bluetooth { + compatible = "amlogic,w155s2-bt"; + clocks = <&extclk>; + enable-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; + vddio-supply = <&wcn_3v3>; + firmware-name = "amlogic/aml_w155s2_bt_uart.bin"; + }; + diff --git a/Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml new file mode 100644 index 00000000000000..3c410cadff2304 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/brcm,bluetooth.yaml @@ -0,0 +1,163 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/brcm,bluetooth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom Bluetooth Chips + +maintainers: + - Linus Walleij + +description: + This binding describes Broadcom UART-attached bluetooth chips. + +properties: + compatible: + oneOf: + - items: + - enum: + - infineon,cyw43439-bt + - const: brcm,bcm4329-bt + - enum: + - brcm,bcm20702a1 + - brcm,bcm4329-bt + - brcm,bcm4330-bt + - brcm,bcm4334-bt + - brcm,bcm43430a0-bt + - brcm,bcm43430a1-bt + - brcm,bcm43438-bt + - brcm,bcm4345c5 + - brcm,bcm43540-bt + - brcm,bcm4335a0 + - brcm,bcm4349-bt + - cypress,cyw4373a0-bt + - infineon,cyw55572-bt + + shutdown-gpios: + maxItems: 1 + description: GPIO specifier for the line BT_REG_ON used to + power on the BT module + + reset-gpios: + maxItems: 1 + description: GPIO specifier for the line BT_RST_N used to + reset the BT module. This should be marked as + GPIO_ACTIVE_LOW. + + device-wakeup-gpios: + maxItems: 1 + description: GPIO specifier for the line BT_WAKE used to + wakeup the controller. This is using the BT_GPIO_0 + pin on the chip when in use. + + host-wakeup-gpios: + maxItems: 1 + deprecated: true + description: GPIO specifier for the line HOST_WAKE used + to wakeup the host processor. This is using he BT_GPIO_1 + pin on the chip when in use. This is deprecated and replaced + by interrupts and "host-wakeup" interrupt-names + + clocks: + minItems: 1 + maxItems: 2 + description: 1 or 2 clocks as defined in clock-names below, + in that order + + clock-names: + description: Names of the 1 to 2 supplied clocks + oneOf: + - const: extclk + deprecated: true + description: Deprecated in favor of txco + + - const: txco + description: > + external reference clock (not a standalone crystal) + + - const: lpo + description: > + external low power 32.768 kHz clock + + - items: + - const: txco + - const: lpo + + vbat-supply: + description: phandle to regulator supply for VBAT + + vddio-supply: + description: phandle to regulator supply for VDDIO + + brcm,bt-pcm-int-params: + $ref: /schemas/types.yaml#/definitions/uint8-array + minItems: 5 + maxItems: 5 + description: |- + configure PCM parameters via a 5-byte array: + sco-routing: 0 = PCM, 1 = Transport, 2 = Codec, 3 = I2S + pcm-interface-rate: 128KBps, 256KBps, 512KBps, 1024KBps, 2048KBps + pcm-frame-type: short, long + pcm-sync-mode: slave, master + pcm-clock-mode: slave, master + + brcm,requires-autobaud-mode: + type: boolean + description: + Set this property if autobaud mode is required. Autobaud mode is required + if the device's initial baud rate in normal mode is not supported by the + host or if the device requires autobaud mode startup before loading FW. + + interrupts: + items: + - description: Handle to the line HOST_WAKE used to wake + up the host processor. This uses the BT_GPIO_1 pin on + the chip when in use. + + interrupt-names: + items: + - const: host-wakeup + +required: + - compatible + +dependencies: + brcm,requires-autobaud-mode: [ shutdown-gpios ] + +allOf: + - $ref: /schemas/serial/serial-peripheral-props.yaml# + - if: + not: + properties: + compatible: + contains: + enum: + - brcm,bcm20702a1 + - brcm,bcm4329-bt + - brcm,bcm4330-bt + then: + properties: + reset-gpios: false + +unevaluatedProperties: false + +examples: + - | + #include + #include + + uart { + uart-has-rtscts; + + bluetooth { + compatible = "brcm,bcm4330-bt"; + max-speed = <921600>; + brcm,bt-pcm-int-params = [01 02 00 01 01]; + shutdown-gpios = <&gpio 30 GPIO_ACTIVE_HIGH>; + device-wakeup-gpios = <&gpio 7 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 9 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio>; + interrupts = <8 IRQ_TYPE_EDGE_FALLING>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml b/Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml new file mode 100644 index 00000000000000..2fc36874deb74b --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/marvell,88w8897.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/marvell,88w8897.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell Bluetooth chips + +description: | + This documents the binding structure and common properties for serial + attached Marvell Bluetooth devices. + +maintainers: + - Rob Herring + +properties: + compatible: + enum: + - mrvl,88w8897 + - mrvl,88w8997 + + max-speed: true + +required: + - compatible + +allOf: + - $ref: /schemas/serial/serial-peripheral-props.yaml# + - if: + properties: + compatible: + contains: + const: mrvl,88w8997 + then: + properties: + max-speed: true + else: + properties: + max-speed: false + +additionalProperties: false + +examples: + - | + serial { + bluetooth { + compatible = "mrvl,88w8897"; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt b/Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt new file mode 100644 index 00000000000000..988c72685cbfd4 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt @@ -0,0 +1,80 @@ +MediaTek UART based Bluetooth Devices +================================== + +This device is a serial attached device to UART device and thus it must be a +child node of the serial node with UART. + +Please refer to the following documents for generic properties: + + Documentation/devicetree/bindings/serial/serial.yaml + +Required properties: + +- compatible: Must be + "mediatek,mt7663u-bluetooth": for MT7663U device + "mediatek,mt7668u-bluetooth": for MT7668U device +- vcc-supply: Main voltage regulator + +If the pin controller on the platform can support both pinmux and GPIO +control such as the most of MediaTek platform. Please use below properties. + +- pinctrl-names: Should be "default", "runtime" +- pinctrl-0: Should contain UART RXD low when the device is powered up to + enter proper bootstrap mode. +- pinctrl-1: Should contain UART mode pin ctrl + +Else, the pin controller on the platform only can support pinmux control and +the GPIO control still has to rely on the dedicated GPIO controller such as +a legacy MediaTek SoC, MT7621. Please use the below properties. + +- boot-gpios: GPIO same to the pin as UART RXD and used to keep LOW when + the device is powered up to enter proper bootstrap mode when +- pinctrl-names: Should be "default" +- pinctrl-0: Should contain UART mode pin ctrl + +Optional properties: + +- reset-gpios: GPIO used to reset the device whose initial state keeps low, + if the GPIO is missing, then board-level design should be + guaranteed. +- clocks: Should be the clock specifiers corresponding to the entry in + clock-names property. If the clock is missing, then board-level + design should be guaranteed. +- clock-names: Should contain "osc" entry for the external oscillator. +- current-speed: Current baud rate of the device whose defaults to 921600 + +Example: + + uart1_pins_boot: uart1-default { + pins-dat { + pinmux = ; + output-low; + }; + }; + + uart1_pins_runtime: uart1-runtime { + pins-dat { + pinmux = , + ; + }; + }; + + uart1: serial@11003000 { + compatible = "mediatek,mt7623-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11003000 0 0x400>; + interrupts = ; + clocks = <&pericfg CLK_PERI_UART1_SEL>, + <&pericfg CLK_PERI_UART1>; + clock-names = "baud", "bus"; + + bluetooth { + compatible = "mediatek,mt7663u-bluetooth"; + vcc-supply = <®_5v>; + reset-gpios = <&pio 24 GPIO_ACTIVE_LOW>; + pinctrl-names = "default", "runtime"; + pinctrl-0 = <&uart1_pins_boot>; + pinctrl-1 = <&uart1_pins_runtime>; + current-speed = <921600>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt b/Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt new file mode 100644 index 00000000000000..42be7dc9a70ba7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/nokia,h4p-bluetooth.txt @@ -0,0 +1,51 @@ +Nokia Bluetooth Chips +--------------------- + +Nokia phones often come with UART connected bluetooth chips from different +vendors and modified device API. Those devices speak a protocol named H4+ +(also known as h4p) by Nokia, which is similar to the H4 protocol from the +Bluetooth standard. In addition to the H4 protocol it specifies two more +UART status lines for wakeup of UART transceivers to improve power management +and a few new packet types used to negotiate uart speed. + +Required properties: + + - compatible: should contain "nokia,h4p-bluetooth" as well as one of the following: + * "brcm,bcm2048-nokia" + * "ti,wl1271-bluetooth-nokia" + - reset-gpios: GPIO specifier, used to reset the BT module (active low) + - bluetooth-wakeup-gpios: GPIO specifier, used to wakeup the BT module (active high) + - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor (active high) + - clock-names: should be "sysclk" + - clocks: should contain a clock specifier for every name in clock-names + +Optional properties: + + - None + +Example: + +/ { + /* controlled (enabled/disabled) directly by BT module */ + bluetooth_clk: vctcxo { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <38400000>; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; + + bluetooth { + compatible = "ti,wl1271-bluetooth-nokia", "nokia,h4p-bluetooth"; + + reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>; /* gpio26 */ + host-wakeup-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */ + bluetooth-wakeup-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; /* gpio37 */ + + clocks = <&bluetooth_clk>; + clock-names = "sysclk"; + }; +}; diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml index 68c5ed1114178c..7bb68311c609d2 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml @@ -72,7 +72,7 @@ properties: description: VDD_RFA_CMN supply regulator handle vddrfa0p8-supply: - description: VDD_RFA_0P8 suppply regulator handle + description: VDD_RFA_0P8 supply regulator handle vddrfa1p7-supply: description: VDD_RFA_1P7 supply regulator handle @@ -98,8 +98,7 @@ properties: vddwlmx-supply: description: VDD_WLMX supply regulator handle - max-speed: - description: see Documentation/devicetree/bindings/serial/serial.yaml + max-speed: true firmware-name: description: specify the name of nvm firmware to load @@ -118,6 +117,7 @@ additionalProperties: false allOf: - $ref: bluetooth-controller.yaml# + - $ref: /schemas/serial/serial-peripheral-props.yaml# - if: properties: compatible: @@ -172,14 +172,14 @@ allOf: - qcom,wcn6855-bt then: required: - - enable-gpios - - swctrl-gpios - - vddio-supply - - vddbtcxmx-supply - vddrfacmn-supply + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddbtcmx-supply - vddrfa0p8-supply - vddrfa1p2-supply - - vddrfa1p7-supply + - vddrfa1p8-supply - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml new file mode 100644 index 00000000000000..7d567122bac9bb --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/realtek,bluetooth.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/realtek,bluetooth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS Bluetooth + +maintainers: + - Vasily Khoruzhick + - Alistair Francis + +description: + RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS is a WiFi + BT chip. WiFi part + is connected over SDIO, while BT is connected over serial. It speaks + H5 protocol with few extra commands to upload firmware and change + module speed. + +properties: + compatible: + oneOf: + - enum: + - realtek,rtl8723bs-bt + - realtek,rtl8723cs-bt + - realtek,rtl8723ds-bt + - realtek,rtl8822cs-bt + - items: + - enum: + - realtek,rtl8821cs-bt + - const: realtek,rtl8723bs-bt + + device-wake-gpios: + maxItems: 1 + description: GPIO specifier, used to wakeup the BT module + + enable-gpios: + maxItems: 1 + description: GPIO specifier, used to enable the BT module + + host-wake-gpios: + maxItems: 1 + description: GPIO specifier, used to wakeup the host processor + + max-speed: true + +required: + - compatible + +allOf: + - $ref: /schemas/serial/serial-peripheral-props.yaml# + +additionalProperties: false + +examples: + - | + #include + + uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; + uart-has-rtscts; + + bluetooth { + compatible = "realtek,rtl8723bs-bt"; + device-wake-gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */ + host-wake-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml new file mode 100644 index 00000000000000..290abc22e18aa1 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/ti,bluetooth.yaml @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/ti,bluetooth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments Bluetooth Chips + +maintainers: + - David Lechner + +description: | + This documents the binding structure and common properties for serial + attached TI Bluetooth devices. The following chips are included in this + binding: + + * TI CC256x Bluetooth devices + * TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices + + TI WiLink devices have a UART interface for providing Bluetooth, FM radio, + and GPS over what's called "shared transport". The shared transport is + standard BT HCI protocol with additional channels for the other functions. + + TI WiLink devices also have a separate WiFi interface as described in + wireless/ti,wlcore.yaml. + + This bindings follows the UART slave device binding in ../serial/serial.yaml. + +properties: + compatible: + enum: + - ti,cc2560 + - ti,wl1271-st + - ti,wl1273-st + - ti,wl1281-st + - ti,wl1283-st + - ti,wl1285-st + - ti,wl1801-st + - ti,wl1805-st + - ti,wl1807-st + - ti,wl1831-st + - ti,wl1835-st + - ti,wl1837-st + + enable-gpios: + maxItems: 1 + + vio-supply: + description: Vio input supply (1.8V) + + vbat-supply: + description: Vbat input supply (2.9-4.8V) + + clocks: + maxItems: 1 + + clock-names: + items: + - const: ext_clock + + max-speed: + default: 3000000 + + nvmem-cells: + maxItems: 1 + description: + Nvmem data cell that contains a 6 byte BD address with the most + significant byte first (big-endian). + + nvmem-cell-names: + items: + - const: bd-address + +required: + - compatible + +allOf: + - $ref: /schemas/serial/serial-peripheral-props.yaml# + +additionalProperties: false + +examples: + - | + #include + + serial { + bluetooth { + compatible = "ti,wl1835-st"; + enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; + clocks = <&clk32k_wl18xx>; + clock-names = "ext_clock"; + nvmem-cells = <&bd_address>; + nvmem-cell-names = "bd-address"; + }; + }; diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml b/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml deleted file mode 100644 index 4a1bfc2b35849c..00000000000000 --- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/net/broadcom-bluetooth.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Broadcom Bluetooth Chips - -maintainers: - - Linus Walleij - -description: - This binding describes Broadcom UART-attached bluetooth chips. - -properties: - compatible: - oneOf: - - items: - - enum: - - infineon,cyw43439-bt - - const: brcm,bcm4329-bt - - enum: - - brcm,bcm20702a1 - - brcm,bcm4329-bt - - brcm,bcm4330-bt - - brcm,bcm4334-bt - - brcm,bcm43430a0-bt - - brcm,bcm43430a1-bt - - brcm,bcm43438-bt - - brcm,bcm4345c5 - - brcm,bcm43540-bt - - brcm,bcm4335a0 - - brcm,bcm4349-bt - - cypress,cyw4373a0-bt - - infineon,cyw55572-bt - - shutdown-gpios: - maxItems: 1 - description: GPIO specifier for the line BT_REG_ON used to - power on the BT module - - reset-gpios: - maxItems: 1 - description: GPIO specifier for the line BT_RST_N used to - reset the BT module. This should be marked as - GPIO_ACTIVE_LOW. - - device-wakeup-gpios: - maxItems: 1 - description: GPIO specifier for the line BT_WAKE used to - wakeup the controller. This is using the BT_GPIO_0 - pin on the chip when in use. - - host-wakeup-gpios: - maxItems: 1 - deprecated: true - description: GPIO specifier for the line HOST_WAKE used - to wakeup the host processor. This is using he BT_GPIO_1 - pin on the chip when in use. This is deprecated and replaced - by interrupts and "host-wakeup" interrupt-names - - clocks: - minItems: 1 - maxItems: 2 - description: 1 or 2 clocks as defined in clock-names below, - in that order - - clock-names: - description: Names of the 1 to 2 supplied clocks - oneOf: - - const: extclk - deprecated: true - description: Deprecated in favor of txco - - - const: txco - description: > - external reference clock (not a standalone crystal) - - - const: lpo - description: > - external low power 32.768 kHz clock - - - items: - - const: txco - - const: lpo - - vbat-supply: - description: phandle to regulator supply for VBAT - - vddio-supply: - description: phandle to regulator supply for VDDIO - - brcm,bt-pcm-int-params: - $ref: /schemas/types.yaml#/definitions/uint8-array - minItems: 5 - maxItems: 5 - description: |- - configure PCM parameters via a 5-byte array: - sco-routing: 0 = PCM, 1 = Transport, 2 = Codec, 3 = I2S - pcm-interface-rate: 128KBps, 256KBps, 512KBps, 1024KBps, 2048KBps - pcm-frame-type: short, long - pcm-sync-mode: slave, master - pcm-clock-mode: slave, master - - brcm,requires-autobaud-mode: - type: boolean - description: - Set this property if autobaud mode is required. Autobaud mode is required - if the device's initial baud rate in normal mode is not supported by the - host or if the device requires autobaud mode startup before loading FW. - - interrupts: - items: - - description: Handle to the line HOST_WAKE used to wake - up the host processor. This uses the BT_GPIO_1 pin on - the chip when in use. - - interrupt-names: - items: - - const: host-wakeup - - max-speed: true - current-speed: true - -required: - - compatible - -dependencies: - brcm,requires-autobaud-mode: [ shutdown-gpios ] - -if: - not: - properties: - compatible: - contains: - enum: - - brcm,bcm20702a1 - - brcm,bcm4329-bt - - brcm,bcm4330-bt -then: - properties: - reset-gpios: false - -additionalProperties: false - -examples: - - | - #include - #include - - uart { - uart-has-rtscts; - - bluetooth { - compatible = "brcm,bcm4330-bt"; - max-speed = <921600>; - brcm,bt-pcm-int-params = [01 02 00 01 01]; - shutdown-gpios = <&gpio 30 GPIO_ACTIVE_HIGH>; - device-wakeup-gpios = <&gpio 7 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio 9 GPIO_ACTIVE_LOW>; - interrupt-parent = <&gpio>; - interrupts = <8 IRQ_TYPE_EDGE_FALLING>; - }; - }; diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml index f197d9b516bb2a..97dd1a7c5ed26b 100644 --- a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml +++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml @@ -17,6 +17,7 @@ properties: compatible: oneOf: - enum: + - fsl,imx95-flexcan - fsl,imx93-flexcan - fsl,imx8qm-flexcan - fsl,imx8mp-flexcan @@ -38,9 +39,6 @@ properties: - fsl,imx6ul-flexcan - fsl,imx6sx-flexcan - const: fsl,imx6q-flexcan - - items: - - const: fsl,imx95-flexcan - - const: fsl,imx93-flexcan - items: - enum: - fsl,ls1028ar1-flexcan @@ -80,6 +78,10 @@ properties: node then controller is assumed to be little endian. If this property is present then controller is assumed to be big endian. + can-transceiver: + $ref: can-transceiver.yaml# + unevaluatedProperties: false + fsl,stop-mode: description: | Register bits of stop mode control. diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml new file mode 100644 index 00000000000000..db446dde684204 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip MCP251X stand-alone CAN controller + +maintainers: + - Marc Kleine-Budde + +properties: + compatible: + enum: + - microchip,mcp2510 + - microchip,mcp2515 + - microchip,mcp25625 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + interrupts: + maxItems: 1 + + vdd-supply: + description: Regulator that powers the CAN controller. + + xceiver-supply: + description: Regulator that powers the CAN transceiver. + + gpio-controller: true + + "#gpio-cells": + const: 2 + +required: + - compatible + - reg + - clocks + - interrupts + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + can@1 { + compatible = "microchip,mcp2515"; + reg = <1>; + clocks = <&clk24m>; + interrupt-parent = <&gpio4>; + interrupts = <13 IRQ_TYPE_LEVEL_LOW>; + vdd-supply = <®5v0>; + xceiver-supply = <®5v0>; + gpio-controller; + #gpio-cells = <2>; + }; + }; + diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt deleted file mode 100644 index 381f8fb3e865a0..00000000000000 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt +++ /dev/null @@ -1,30 +0,0 @@ -* Microchip MCP251X stand-alone CAN controller device tree bindings - -Required properties: - - compatible: Should be one of the following: - - "microchip,mcp2510" for MCP2510. - - "microchip,mcp2515" for MCP2515. - - "microchip,mcp25625" for MCP25625. - - reg: SPI chip select. - - clocks: The clock feeding the CAN controller. - - interrupts: Should contain IRQ line for the CAN controller. - -Optional properties: - - vdd-supply: Regulator that powers the CAN controller. - - xceiver-supply: Regulator that powers the CAN transceiver. - - gpio-controller: Indicates this device is a GPIO controller. - - #gpio-cells: Should be two. The first cell is the pin number and - the second cell is used to specify the gpio polarity. - -Example: - can0: can@1 { - compatible = "microchip,mcp2515"; - reg = <1>; - clocks = <&clk24m>; - interrupt-parent = <&gpio4>; - interrupts = <13 IRQ_TYPE_LEVEL_LOW>; - vdd-supply = <®5v0>; - xceiver-supply = <®5v0>; - gpio-controller; - #gpio-cells = <2>; - }; diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml index d3f45d29fa0a55..7c5ac5d2e880bb 100644 --- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml +++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml @@ -32,6 +32,7 @@ properties: - enum: - renesas,r8a779a0-canfd # R-Car V3U - renesas,r8a779g0-canfd # R-Car V4H + - renesas,r8a779h0-canfd # R-Car V4M - const: renesas,rcar-gen4-canfd # R-Car Gen4 - items: @@ -163,14 +164,23 @@ allOf: maxItems: 1 - if: - not: - properties: - compatible: - contains: - const: renesas,rcar-gen4-canfd + properties: + compatible: + contains: + const: renesas,r8a779h0-canfd then: patternProperties: - "^channel[2-7]$": false + "^channel[5-7]$": false + else: + if: + not: + properties: + compatible: + contains: + const: renesas,rcar-gen4-canfd + then: + patternProperties: + "^channel[2-7]$": false unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml b/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml new file mode 100644 index 00000000000000..a077c033001378 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/rockchip,rk3568v2-canfd.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: + Rockchip CAN-FD controller + +maintainers: + - Marc Kleine-Budde + +allOf: + - $ref: can-controller.yaml# + +properties: + compatible: + oneOf: + - const: rockchip,rk3568v2-canfd + - items: + - const: rockchip,rk3568v3-canfd + - const: rockchip,rk3568v2-canfd + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: baud + - const: pclk + + resets: + maxItems: 2 + + reset-names: + items: + - const: core + - const: apb + +required: + - compatible + - reg + - interrupts + - clocks + - resets + +additionalProperties: false + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + can@fe570000 { + compatible = "rockchip,rk3568v2-canfd"; + reg = <0x0 0xfe570000 0x0 0x1000>; + interrupts = ; + clocks = <&cru CLK_CAN0>, <&cru PCLK_CAN0>; + clock-names = "baud", "pclk"; + resets = <&cru SRST_CAN0>, <&cru SRST_P_CAN0>; + reset-names = "core", "apb"; + }; + }; diff --git a/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml new file mode 100644 index 00000000000000..ad0915307095e3 --- /dev/null +++ b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/cirrus,ep9301-eth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: EP93xx SoC Ethernet Controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-eth + - items: + - enum: + - cirrus,ep9302-eth + - cirrus,ep9307-eth + - cirrus,ep9312-eth + - cirrus,ep9315-eth + - const: cirrus,ep9301-eth + + reg: + items: + - description: The physical base address and size of IO range + + interrupts: + items: + - description: Combined signal for various interrupt events + + phy-handle: true + + mdio: + $ref: mdio.yaml# + unevaluatedProperties: false + description: optional node for embedded MDIO controller + +required: + - compatible + - reg + - interrupts + - phy-handle + +additionalProperties: false + +examples: + - | + ethernet@80010000 { + compatible = "cirrus,ep9301-eth"; + reg = <0x80010000 0x10000>; + interrupt-parent = <&vic1>; + interrupts = <7>; + phy-handle = <&phy0>; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml index 7e405ad96eb27c..ea979bcae1d6ea 100644 --- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml +++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml @@ -92,6 +92,10 @@ properties: Built-in switch of the MT7988 SoC const: mediatek,mt7988-switch + - description: + Built-in switch of the Airoha EN7581 SoC + const: airoha,en7581-switch + reg: maxItems: 1 @@ -284,7 +288,9 @@ allOf: - if: properties: compatible: - const: mediatek,mt7988-switch + enum: + - mediatek,mt7988-switch + - airoha,en7581-switch then: $ref: "#/$defs/mt7530-dsa-port" properties: diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml index 52acc15ebcbf36..30c0c3e6f37a4a 100644 --- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml +++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml @@ -22,7 +22,9 @@ properties: - microchip,ksz8794 - microchip,ksz8795 - microchip,ksz8863 + - microchip,ksz8864 # 4-port version of KSZ8895 family switch - microchip,ksz8873 + - microchip,ksz8895 # 5-port version of KSZ8895 family switch - microchip,ksz9477 - microchip,ksz9897 - microchip,ksz9896 @@ -51,6 +53,11 @@ properties: Set if the output SYNCLKO clock should be disabled. Do not mix with microchip,synclko-125. + microchip,pme-active-high: + $ref: /schemas/types.yaml#/definitions/flag + description: + Indicates if the PME pin polarity is active-high. + microchip,io-drive-strength-microamp: description: IO Pad Drive Strength diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml index b99d7a694b70ed..51cf574249becd 100644 --- a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml +++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml @@ -52,6 +52,25 @@ properties: allOf: - $ref: dsa.yaml#/$defs/ethernet-ports +patternProperties: + "^(ethernet-)?ports$": + additionalProperties: true + patternProperties: + "^(ethernet-)?port@6$": + allOf: + - if: + properties: + phy-mode: + contains: + enum: + - rgmii + then: + properties: + rx-internal-delay-ps: + $ref: "#/$defs/internal-delay-ps" + tx-internal-delay-ps: + $ref: "#/$defs/internal-delay-ps" + # This checks if reg is a chipselect so the device is on an SPI # bus, the if-clause will fail if reg is a tuple such as for a # platform device. @@ -67,6 +86,15 @@ required: - compatible - reg +$defs: + internal-delay-ps: + description: + Disable tunable delay lines using 0 ps, or enable them and select + the phase between 1400 ps and 2000 ps in increments of 300 ps. + default: 2000 + enum: + [0, 1400, 1700, 2000] + unevaluatedProperties: false examples: @@ -108,6 +136,8 @@ examples: reg = <6>; ethernet = <&gmac1>; phy-mode = "rgmii"; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; fixed-link { speed = <1000>; full-duplex; @@ -150,6 +180,8 @@ examples: ethernet-port@6 { reg = <6>; ethernet = <&enet0>; + rx-internal-delay-ps = <0>; + tx-internal-delay-ps = <0>; phy-mode = "rgmii"; fixed-link { speed = <1000>; diff --git a/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml b/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml new file mode 100644 index 00000000000000..da836477e8bad7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,cpm-enet.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,cpm-enet.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Network for cpm enet + +maintainers: + - Frank Li + +properties: + compatible: + oneOf: + - enum: + - fsl,cpm1-scc-enet + - fsl,cpm2-scc-enet + - fsl,cpm1-fec-enet + - fsl,cpm2-fcc-enet + - fsl,qe-enet + - items: + - enum: + - fsl,mpc8272-fcc-enet + - const: fsl,cpm2-fcc-enet + + reg: + minItems: 1 + maxItems: 3 + + interrupts: + maxItems: 1 + + fsl,cpm-command: + $ref: /schemas/types.yaml#/definitions/uint32 + description: cpm command + +required: + - compatible + - reg + - interrupts + +allOf: + - $ref: ethernet-controller.yaml + +unevaluatedProperties: false + +examples: + - | + ethernet@11300 { + compatible = "fsl,mpc8272-fcc-enet", + "fsl,cpm2-fcc-enet"; + reg = <0x11300 0x20 0x8400 0x100 0x11390 1>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <20 8>; + interrupt-parent = <&pic>; + phy-handle = <&phy0>; + fsl,cpm-command = <0x12000300>; + }; + diff --git a/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml new file mode 100644 index 00000000000000..b1791a3c490e2f --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,cpm-mdio.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,cpm-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale CPM MDIO Device + +maintainers: + - Frank Li + +properties: + compatible: + oneOf: + - enum: + - fsl,pq1-fec-mdio + - fsl,cpm2-mdio-bitbang + - items: + - const: fsl,mpc8272ads-mdio-bitbang + - const: fsl,mpc8272-mdio-bitbang + - const: fsl,cpm2-mdio-bitbang + + reg: + maxItems: 1 + + fsl,mdio-pin: + $ref: /schemas/types.yaml#/definitions/uint32 + description: pin of port C controlling mdio data + + fsl,mdc-pin: + $ref: /schemas/types.yaml#/definitions/uint32 + description: pin of port C controlling mdio clock + +required: + - compatible + - reg + +allOf: + - $ref: mdio.yaml# + +unevaluatedProperties: false + +examples: + - | + mdio@10d40 { + compatible = "fsl,mpc8272ads-mdio-bitbang", + "fsl,mpc8272-mdio-bitbang", + "fsl,cpm2-mdio-bitbang"; + reg = <0x10d40 0x14>; + #address-cells = <1>; + #size-cells = <0>; + fsl,mdio-pin = <12>; + fsl,mdc-pin = <13>; + }; + diff --git a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml index 42f9843d1868ac..be8a2163b73edb 100644 --- a/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml +++ b/Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml @@ -24,20 +24,12 @@ properties: maxItems: 1 description: The DPMAC number - phy-handle: true - - phy-connection-type: true - - phy-mode: true - pcs-handle: maxItems: 1 description: A reference to a node representing a PCS PHY device found on the internal MDIO bus. - managed: true - phys: description: A reference to the SerDes lane(s) maxItems: 1 @@ -45,7 +37,7 @@ properties: required: - reg -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml b/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml deleted file mode 100644 index 188a42ca6cebea..00000000000000 --- a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/net/marvell-bluetooth.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Marvell Bluetooth chips - -description: | - This documents the binding structure and common properties for serial - attached Marvell Bluetooth devices. - -maintainers: - - Rob Herring - -properties: - compatible: - enum: - - mrvl,88w8897 - - mrvl,88w8997 - - max-speed: - description: see Documentation/devicetree/bindings/serial/serial.yaml - -required: - - compatible - -allOf: - - if: - properties: - compatible: - contains: - const: mrvl,88w8997 - then: - properties: - max-speed: true - else: - properties: - max-speed: false - -additionalProperties: false - -examples: - - | - serial { - bluetooth { - compatible = "mrvl,88w8897"; - }; - }; diff --git a/Documentation/devicetree/bindings/net/maxim,ds26522.txt b/Documentation/devicetree/bindings/net/maxim,ds26522.txt deleted file mode 100644 index ee8bb725f245a6..00000000000000 --- a/Documentation/devicetree/bindings/net/maxim,ds26522.txt +++ /dev/null @@ -1,13 +0,0 @@ -* Maxim (Dallas) DS26522 Dual T1/E1/J1 Transceiver - -Required properties: -- compatible: Should contain "maxim,ds26522". -- reg: SPI CS. -- spi-max-frequency: SPI clock. - -Example: - slic@1 { - compatible = "maxim,ds26522"; - reg = <1>; - spi-max-frequency = <2000000>; /* input clock */ - }; diff --git a/Documentation/devicetree/bindings/net/maxim,ds26522.yaml b/Documentation/devicetree/bindings/net/maxim,ds26522.yaml new file mode 100644 index 00000000000000..6c97eda217e83b --- /dev/null +++ b/Documentation/devicetree/bindings/net/maxim,ds26522.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/maxim,ds26522.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Maxim (Dallas) DS26522 Dual T1/E1/J1 Transceiver + +maintainers: + - Frank Li + +properties: + compatible: + items: + - const: maxim,ds26522 + + reg: + maxItems: 1 + +required: + - compatible + - reg + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml + +unevaluatedProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + transceiver@1 { + compatible = "maxim,ds26522"; + reg = <1>; + spi-max-frequency = <2000000>; /* input clock */ + }; + }; diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml index a266ade918ca7c..bed3987a8fbf61 100644 --- a/Documentation/devicetree/bindings/net/mdio.yaml +++ b/Documentation/devicetree/bindings/net/mdio.yaml @@ -19,7 +19,7 @@ description: properties: $nodename: - pattern: "^mdio(@.*)?" + pattern: '^mdio(-(bus|external))?(@.+|-([0-9]+))?$' "#address-cells": const: 1 diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml index 686b5c2fae402c..9e02fd80af835f 100644 --- a/Documentation/devicetree/bindings/net/mediatek,net.yaml +++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml @@ -30,8 +30,13 @@ properties: reg: maxItems: 1 - clocks: true - clock-names: true + clocks: + minItems: 2 + maxItems: 24 + + clock-names: + minItems: 2 + maxItems: 24 interrupts: minItems: 1 @@ -127,6 +132,7 @@ allOf: then: properties: interrupts: + minItems: 3 maxItems: 3 clocks: @@ -183,6 +189,7 @@ allOf: then: properties: interrupts: + minItems: 3 maxItems: 3 clocks: @@ -222,6 +229,7 @@ allOf: then: properties: interrupts: + minItems: 3 maxItems: 3 clocks: diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt deleted file mode 100644 index 988c72685cbfd4..00000000000000 --- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt +++ /dev/null @@ -1,80 +0,0 @@ -MediaTek UART based Bluetooth Devices -================================== - -This device is a serial attached device to UART device and thus it must be a -child node of the serial node with UART. - -Please refer to the following documents for generic properties: - - Documentation/devicetree/bindings/serial/serial.yaml - -Required properties: - -- compatible: Must be - "mediatek,mt7663u-bluetooth": for MT7663U device - "mediatek,mt7668u-bluetooth": for MT7668U device -- vcc-supply: Main voltage regulator - -If the pin controller on the platform can support both pinmux and GPIO -control such as the most of MediaTek platform. Please use below properties. - -- pinctrl-names: Should be "default", "runtime" -- pinctrl-0: Should contain UART RXD low when the device is powered up to - enter proper bootstrap mode. -- pinctrl-1: Should contain UART mode pin ctrl - -Else, the pin controller on the platform only can support pinmux control and -the GPIO control still has to rely on the dedicated GPIO controller such as -a legacy MediaTek SoC, MT7621. Please use the below properties. - -- boot-gpios: GPIO same to the pin as UART RXD and used to keep LOW when - the device is powered up to enter proper bootstrap mode when -- pinctrl-names: Should be "default" -- pinctrl-0: Should contain UART mode pin ctrl - -Optional properties: - -- reset-gpios: GPIO used to reset the device whose initial state keeps low, - if the GPIO is missing, then board-level design should be - guaranteed. -- clocks: Should be the clock specifiers corresponding to the entry in - clock-names property. If the clock is missing, then board-level - design should be guaranteed. -- clock-names: Should contain "osc" entry for the external oscillator. -- current-speed: Current baud rate of the device whose defaults to 921600 - -Example: - - uart1_pins_boot: uart1-default { - pins-dat { - pinmux = ; - output-low; - }; - }; - - uart1_pins_runtime: uart1-runtime { - pins-dat { - pinmux = , - ; - }; - }; - - uart1: serial@11003000 { - compatible = "mediatek,mt7623-uart", - "mediatek,mt6577-uart"; - reg = <0 0x11003000 0 0x400>; - interrupts = ; - clocks = <&pericfg CLK_PERI_UART1_SEL>, - <&pericfg CLK_PERI_UART1>; - clock-names = "baud", "bus"; - - bluetooth { - compatible = "mediatek,mt7663u-bluetooth"; - vcc-supply = <®_5v>; - reset-gpios = <&pio 24 GPIO_ACTIVE_LOW>; - pinctrl-names = "default", "runtime"; - pinctrl-0 = <&uart1_pins_boot>; - pinctrl-1 = <&uart1_pins_runtime>; - current-speed = <921600>; - }; - }; diff --git a/Documentation/devicetree/bindings/net/microchip,lan8650.yaml b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml new file mode 100644 index 00000000000000..61e11d4a07c407 --- /dev/null +++ b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/microchip,lan8650.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip LAN8650/1 10BASE-T1S MACPHY Ethernet Controllers + +maintainers: + - Parthiban Veerasooran + +description: + The LAN8650/1 combines a Media Access Controller (MAC) and an Ethernet + PHY to enable 10BASE‑T1S networks. The Ethernet Media Access Controller + (MAC) module implements a 10 Mbps half duplex Ethernet MAC, compatible + with the IEEE 802.3 standard and a 10BASE-T1S physical layer transceiver + integrated into the LAN8650/1. The communication between the Host and + the MAC-PHY is specified in the OPEN Alliance 10BASE-T1x MACPHY Serial + Interface (TC6). + +allOf: + - $ref: /schemas/net/ethernet-controller.yaml# + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + oneOf: + - const: microchip,lan8650 + - items: + - const: microchip,lan8651 + - const: microchip,lan8650 + + reg: + maxItems: 1 + + interrupts: + description: + Interrupt from MAC-PHY asserted in the event of Receive Chunks + Available, Transmit Chunk Credits Available and Extended Status + Event. + maxItems: 1 + + spi-max-frequency: + minimum: 15000000 + maximum: 25000000 + +required: + - compatible + - reg + - interrupts + - spi-max-frequency + +unevaluatedProperties: false + +examples: + - | + #include + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + ethernet@0 { + compatible = "microchip,lan8651", "microchip,lan8650"; + reg = <0>; + pinctrl-names = "default"; + pinctrl-0 = <ð0_pins>; + interrupt-parent = <&gpio>; + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; + local-mac-address = [04 05 06 01 02 03]; + spi-max-frequency = <15000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/nokia-bluetooth.txt b/Documentation/devicetree/bindings/net/nokia-bluetooth.txt deleted file mode 100644 index 42be7dc9a70ba7..00000000000000 --- a/Documentation/devicetree/bindings/net/nokia-bluetooth.txt +++ /dev/null @@ -1,51 +0,0 @@ -Nokia Bluetooth Chips ---------------------- - -Nokia phones often come with UART connected bluetooth chips from different -vendors and modified device API. Those devices speak a protocol named H4+ -(also known as h4p) by Nokia, which is similar to the H4 protocol from the -Bluetooth standard. In addition to the H4 protocol it specifies two more -UART status lines for wakeup of UART transceivers to improve power management -and a few new packet types used to negotiate uart speed. - -Required properties: - - - compatible: should contain "nokia,h4p-bluetooth" as well as one of the following: - * "brcm,bcm2048-nokia" - * "ti,wl1271-bluetooth-nokia" - - reset-gpios: GPIO specifier, used to reset the BT module (active low) - - bluetooth-wakeup-gpios: GPIO specifier, used to wakeup the BT module (active high) - - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor (active high) - - clock-names: should be "sysclk" - - clocks: should contain a clock specifier for every name in clock-names - -Optional properties: - - - None - -Example: - -/ { - /* controlled (enabled/disabled) directly by BT module */ - bluetooth_clk: vctcxo { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <38400000>; - }; -}; - -&uart2 { - pinctrl-names = "default"; - pinctrl-0 = <&uart2_pins>; - - bluetooth { - compatible = "ti,wl1271-bluetooth-nokia", "nokia,h4p-bluetooth"; - - reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>; /* gpio26 */ - host-wakeup-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */ - bluetooth-wakeup-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; /* gpio37 */ - - clocks = <&bluetooth_clk>; - clock-names = "sysclk"; - }; -}; diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml index 6992d56832bf95..d08abcb012113a 100644 --- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml +++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml @@ -23,6 +23,9 @@ properties: '#pse-cells': const: 1 + reset-gpios: + maxItems: 1 + channels: description: each set of 8 ports can be assigned to one physical channels or two for PoE4. This parameter describes the configuration diff --git a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml deleted file mode 100644 index 043e118c605c83..00000000000000 --- a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/net/realtek-bluetooth.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS Bluetooth - -maintainers: - - Vasily Khoruzhick - - Alistair Francis - -description: - RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS is a WiFi + BT chip. WiFi part - is connected over SDIO, while BT is connected over serial. It speaks - H5 protocol with few extra commands to upload firmware and change - module speed. - -properties: - compatible: - oneOf: - - enum: - - realtek,rtl8723bs-bt - - realtek,rtl8723cs-bt - - realtek,rtl8723ds-bt - - realtek,rtl8822cs-bt - - items: - - enum: - - realtek,rtl8821cs-bt - - const: realtek,rtl8723bs-bt - - device-wake-gpios: - maxItems: 1 - description: GPIO specifier, used to wakeup the BT module - - enable-gpios: - maxItems: 1 - description: GPIO specifier, used to enable the BT module - - host-wake-gpios: - maxItems: 1 - description: GPIO specifier, used to wakeup the host processor - - max-speed: true - -required: - - compatible - -additionalProperties: false - -examples: - - | - #include - - uart1 { - pinctrl-names = "default"; - pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; - uart-has-rtscts; - - bluetooth { - compatible = "realtek,rtl8723bs-bt"; - device-wake-gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */ - host-wake-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ - }; - }; diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml index 21a92f179093d9..1e00ef5b3acda5 100644 --- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml +++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml @@ -62,15 +62,27 @@ properties: - renesas,r9a08g045-gbeth # RZ/G3S - const: renesas,rzg2l-gbeth # RZ/{G2L,G2UL,V2L} family - reg: true + reg: + minItems: 1 + items: + - description: MAC register block + - description: Stream buffer - interrupts: true + interrupts: + minItems: 1 + maxItems: 29 - interrupt-names: true + interrupt-names: + minItems: 1 + maxItems: 29 - clocks: true + clocks: + minItems: 1 + maxItems: 3 - clock-names: true + clock-names: + minItems: 1 + maxItems: 3 iommus: maxItems: 1 @@ -150,14 +162,11 @@ allOf: then: properties: reg: - items: - - description: MAC register block - - description: Stream buffer + minItems: 2 else: properties: reg: - items: - - description: MAC register block + maxItems: 1 - if: properties: diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml index 6bbe96e3525094..f8a576611d6c10 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml @@ -25,6 +25,7 @@ select: - rockchip,rk3368-gmac - rockchip,rk3399-gmac - rockchip,rk3568-gmac + - rockchip,rk3576-gmac - rockchip,rk3588-gmac - rockchip,rv1108-gmac - rockchip,rv1126-gmac @@ -52,6 +53,7 @@ properties: - items: - enum: - rockchip,rk3568-gmac + - rockchip,rk3576-gmac - rockchip,rk3588-gmac - rockchip,rv1126-gmac - const: snps,dwmac-4.20a diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 3eb65e63fdaec9..4e2ba1bf788c98 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -80,6 +80,7 @@ properties: - rockchip,rk3328-gmac - rockchip,rk3366-gmac - rockchip,rk3368-gmac + - rockchip,rk3576-gmac - rockchip,rk3588-gmac - rockchip,rk3399-gmac - rockchip,rv1108-gmac diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml index b0ebcef6801ce5..4eb63b303cffc3 100644 --- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml +++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml @@ -41,13 +41,17 @@ properties: minItems: 1 maxItems: 4 - clock-names: true + clock-names: + minItems: 1 + maxItems: 4 resets: minItems: 1 maxItems: 2 - reset-names: true + reset-names: + minItems: 1 + maxItems: 2 socionext,syscon-phy-mode: $ref: /schemas/types.yaml#/definitions/phandle-array diff --git a/Documentation/devicetree/bindings/net/ti,bluetooth.yaml b/Documentation/devicetree/bindings/net/ti,bluetooth.yaml deleted file mode 100644 index 81616f9fb4935f..00000000000000 --- a/Documentation/devicetree/bindings/net/ti,bluetooth.yaml +++ /dev/null @@ -1,92 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/net/ti,bluetooth.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Texas Instruments Bluetooth Chips - -maintainers: - - David Lechner - -description: | - This documents the binding structure and common properties for serial - attached TI Bluetooth devices. The following chips are included in this - binding: - - * TI CC256x Bluetooth devices - * TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices - - TI WiLink devices have a UART interface for providing Bluetooth, FM radio, - and GPS over what's called "shared transport". The shared transport is - standard BT HCI protocol with additional channels for the other functions. - - TI WiLink devices also have a separate WiFi interface as described in - wireless/ti,wlcore.yaml. - - This bindings follows the UART slave device binding in ../serial/serial.yaml. - -properties: - compatible: - enum: - - ti,cc2560 - - ti,wl1271-st - - ti,wl1273-st - - ti,wl1281-st - - ti,wl1283-st - - ti,wl1285-st - - ti,wl1801-st - - ti,wl1805-st - - ti,wl1807-st - - ti,wl1831-st - - ti,wl1835-st - - ti,wl1837-st - - enable-gpios: - maxItems: 1 - - vio-supply: - description: Vio input supply (1.8V) - - vbat-supply: - description: Vbat input supply (2.9-4.8V) - - clocks: - maxItems: 1 - - clock-names: - items: - - const: ext_clock - - max-speed: - default: 3000000 - - nvmem-cells: - maxItems: 1 - description: - Nvmem data cell that contains a 6 byte BD address with the most - significant byte first (big-endian). - - nvmem-cell-names: - items: - - const: bd-address - -required: - - compatible - -additionalProperties: false - -examples: - - | - #include - - serial { - bluetooth { - compatible = "ti,wl1835-st"; - enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; - clocks = <&clk32k_wl18xx>; - clock-names = "ext_clock"; - nvmem-cells = <&bd_address>; - nvmem-cell-names = "bd-address"; - }; - }; diff --git a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml index 3dde10de4630e6..4f425344154771 100644 --- a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml +++ b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml @@ -29,6 +29,12 @@ properties: reset-gpios: maxItems: 1 + bootloader-backdoor-gpios: + maxItems: 1 + description: | + gpios to enable bootloader backdoor in cc1352p7 bootloader to allow + flashing new firmware. + vdds-supply: true required: @@ -46,6 +52,7 @@ examples: clocks = <&sclk_hf 0>, <&sclk_lf 25>; clock-names = "sclk_hf", "sclk_lf"; reset-gpios = <&pio 35 GPIO_ACTIVE_LOW>; + bootloader-backdoor-gpios = <&pio 36 GPIO_ACTIVE_LOW>; vdds-supply = <&vdds>; }; }; diff --git a/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml b/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml new file mode 100644 index 00000000000000..1715b22e0dcf89 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/marvell,sd8787.yaml @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/marvell,sd8787.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices + +maintainers: + - Brian Norris + - Frank Li + +description: + This node provides properties for describing the Marvell SDIO/PCIE wireless device. + The node is expected to be specified as a child node to the SDIO/PCIE controller that + connects the device to the system. + +properties: + compatible: + enum: + - marvell,sd8787 + - marvell,sd8897 + - marvell,sd8978 + - marvell,sd8997 + - nxp,iw416 + - pci11ab,2b42 + - pci1b4b,2b42 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + wakeup-source: true + + marvell,caldata-txpwrlimit-2g: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: Calibration data for the 2GHz band. + maxItems: 566 + + marvell,caldata-txpwrlimit-5g-sub0: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: Calibration data for sub-band 0 in the 5GHz band. + maxItems: 502 + + marvell,caldata-txpwrlimit-5g-sub1: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: Calibration data for sub-band 1 in the 5GHz band. + maxItems: 688 + + marvell,caldata-txpwrlimit-5g-sub2: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: Calibration data for sub-band 2 in the 5GHz band. + maxItems: 750 + + marvell,caldata-txpwrlimit-5g-sub3: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: Calibration data for sub-band 3 in the 5GHz band. + maxItems: 502 + + marvell,wakeup-pin: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Provides the pin number for the wakeup pin from the device's point of + view. The wakeup pin is used for the device to wake the host system + from sleep. This property is only necessary if the wakeup pin is + wired in a non-standard way, such that the default pin assignments + are invalid. + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + + mmc { + #address-cells = <1>; + #size-cells = <0>; + + wifi@1 { + compatible = "marvell,sd8897"; + reg = <1>; + interrupt-parent = <&pio>; + interrupts = <38 IRQ_TYPE_LEVEL_LOW>; + marvell,wakeup-pin = <3>; + }; + }; + diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt deleted file mode 100644 index cdc303caf5f45c..00000000000000 --- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt +++ /dev/null @@ -1,70 +0,0 @@ -Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices ------- - -This node provides properties for controlling the Marvell SDIO/PCIE wireless device. -The node is expected to be specified as a child node to the SDIO/PCIE controller that -connects the device to the system. - -Required properties: - - - compatible : should be one of the following: - * "marvell,sd8787" - * "marvell,sd8897" - * "marvell,sd8978" - * "marvell,sd8997" - * "nxp,iw416" - * "pci11ab,2b42" - * "pci1b4b,2b42" - -Optional properties: - - - marvell,caldata* : A series of properties with marvell,caldata prefix, - represent calibration data downloaded to the device during - initialization. This is an array of unsigned 8-bit values. - the properties should follow below property name and - corresponding array length: - "marvell,caldata-txpwrlimit-2g" (length = 566). - "marvell,caldata-txpwrlimit-5g-sub0" (length = 502). - "marvell,caldata-txpwrlimit-5g-sub1" (length = 688). - "marvell,caldata-txpwrlimit-5g-sub2" (length = 750). - "marvell,caldata-txpwrlimit-5g-sub3" (length = 502). - - marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured - to firmware. Firmware will wakeup the host using this pin - during suspend/resume. - - interrupts : interrupt pin number to the cpu. driver will request an irq based on - this interrupt number. during system suspend, the irq will be enabled - so that the wifi chip can wakeup host platform under certain condition. - during system resume, the irq will be disabled to make sure - unnecessary interrupt is not received. - - vmmc-supply: a phandle of a regulator, supplying VCC to the card - - mmc-pwrseq: phandle to the MMC power sequence node. See "mmc-pwrseq-*" - for documentation of MMC power sequence bindings. - -Example: - -Tx power limit calibration data is configured in below example. -The calibration data is an array of unsigned values, the length -can vary between hw versions. -IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured -so that firmware can wakeup host using this device side pin. - -&mmc3 { - vmmc-supply = <&wlan_en_reg>; - mmc-pwrseq = <&wifi_pwrseq>; - bus-width = <4>; - cap-power-off-card; - keep-power-in-suspend; - - #address-cells = <1>; - #size-cells = <0>; - mwifiex: wifi@1 { - compatible = "marvell,sd8897"; - reg = <1>; - interrupt-parent = <&pio>; - interrupts = <38 IRQ_TYPE_LEVEL_LOW>; - - marvell,caldata_00_txpwrlimit_2g_cfg_set = /bits/ 8 < - 0x01 0x00 0x06 0x00 0x08 0x02 0x89 0x01>; - marvell,wakeup-pin = <3>; - }; -}; diff --git a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml index bbe89ea9590ceb..e95c216282818e 100644 --- a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml +++ b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml @@ -34,6 +34,7 @@ properties: and length of the AXI DMA controller IO space, unless axistream-connected is specified, in which case the reg attribute of the node referenced by it is used. + minItems: 1 maxItems: 2 interrupts: @@ -181,7 +182,7 @@ examples: clock-names = "s_axi_lite_clk", "axis_clk", "ref_clk", "mgt_clk"; clocks = <&axi_clk>, <&axi_clk>, <&pl_enet_ref_clk>, <&mgt_clk>; phy-mode = "mii"; - reg = <0x00 0x40000000 0x00 0x40000>; + reg = <0x40000000 0x40000>; xlnx,rxcsum = <0x2>; xlnx,rxmem = <0x800>; xlnx,txcsum = <0x2>; diff --git a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml index 70fb2ad251037a..1b20b49eee7923 100644 --- a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml +++ b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml @@ -15,6 +15,7 @@ description: | allOf: - $ref: nvmem.yaml# + - $ref: nvmem-deprecated-cells.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml index e21c06e9a741c8..b2cb76cf9053a8 100644 --- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml +++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml @@ -14,7 +14,7 @@ maintainers: description: | This binding represents the on-chip eFuse OTP controller found on i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL, - i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93 SoCs. + i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93/5 SoCs. allOf: - $ref: nvmem.yaml# @@ -36,6 +36,7 @@ properties: - fsl,imx8mq-ocotp - fsl,imx8mm-ocotp - fsl,imx93-ocotp + - fsl,imx95-ocotp - const: syscon - items: - enum: diff --git a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml index 3b40f788077469..38250706065112 100644 --- a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml +++ b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml @@ -21,6 +21,7 @@ oneOf: - $ref: fixed-layout.yaml - $ref: kontron,sl28-vpd.yaml - $ref: onie,tlv-layout.yaml + - $ref: u-boot,env.yaml properties: compatible: true diff --git a/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml new file mode 100644 index 00000000000000..56a8f55d4a096c --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/nvmem/layouts/u-boot,env.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: U-Boot environment variables layout + +description: | + U-Boot uses environment variables to store device parameters and + configuration. They may be used for booting process, setup or keeping end user + info. + + Data is stored using U-Boot specific formats (variant specific header and NUL + separated key-value pairs). + + Environment data can be stored on various storage entities, e.g.: + 1. Raw flash partition + 2. UBI volume + + This binding allows marking storage device (as containing env data) and + specifying used format. + + Variables can be defined as NVMEM device subnodes. + +maintainers: + - Rafał Miłecki + +properties: + compatible: + oneOf: + - description: A standalone env data block + const: u-boot,env + - description: Two redundant blocks with active one flagged + const: u-boot,env-redundant-bool + - description: Two redundant blocks with active having higher counter + const: u-boot,env-redundant-count + - description: Broadcom's variant with custom header + const: brcm,env + + reg: + description: Partition offset and size for env on top of MTD + maxItems: 1 + + bootcmd: + type: object + description: Command to use for automatic booting + + ethaddr: + type: object + description: Ethernet interfaces base MAC address. + additionalProperties: false + + properties: + "#nvmem-cell-cells": + description: The first argument is a MAC address offset. + const: 1 + +allOf: + - if: + properties: + $nodename: + not: + contains: + pattern: "^partition@[0-9a-f]+$" + then: + properties: + reg: false + +additionalProperties: false + +examples: + - | + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + reg = <0x0 0x40000>; + label = "u-boot"; + read-only; + }; + + env: partition@40000 { + compatible = "u-boot,env"; + reg = <0x40000 0x10000>; + + mac: ethaddr { + #nvmem-cell-cells = <1>; + }; + }; + }; + - | + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + reg = <0x0 0x100000>; + compatible = "brcm,u-boot"; + label = "u-boot"; + + partition-u-boot-env { + compatible = "brcm,env"; + + ethaddr { + }; + }; + }; + }; + - | + partition@0 { + reg = <0x0 0x100000>; + label = "ubi"; + compatible = "linux,ubi"; + + volumes { + ubi-volume-u-boot-env { + volname = "env"; + + nvmem-layout { + compatible = "u-boot,env"; + + ethaddr { + #nvmem-cell-cells = <1>; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml index 92bfe25f0571eb..3b2aa605a5515c 100644 --- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml +++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml @@ -17,6 +17,7 @@ maintainers: allOf: - $ref: nvmem.yaml# + - $ref: nvmem-deprecated-cells.yaml# properties: compatible: @@ -32,6 +33,8 @@ properties: patternProperties: "^.*@[0-9a-f]+$": type: object + $ref: layouts/fixed-cell.yaml + unevaluatedProperties: false properties: st,non-secure-otp: diff --git a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml b/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml deleted file mode 100644 index 9c36afc7084b02..00000000000000 --- a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml +++ /dev/null @@ -1,103 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/nvmem/u-boot,env.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: U-Boot environment variables - -description: | - U-Boot uses environment variables to store device parameters and - configuration. They may be used for booting process, setup or keeping end user - info. - - Data is stored using U-Boot specific formats (variant specific header and NUL - separated key-value pairs). - - Environment data can be stored on various storage entities, e.g.: - 1. Raw flash partition - 2. UBI volume - - This binding allows marking storage device (as containing env data) and - specifying used format. - - Right now only flash partition case is covered but it may be extended to e.g. - UBI volumes in the future. - - Variables can be defined as NVMEM device subnodes. - -maintainers: - - Rafał Miłecki - -properties: - compatible: - oneOf: - - description: A standalone env data block - const: u-boot,env - - description: Two redundant blocks with active one flagged - const: u-boot,env-redundant-bool - - description: Two redundant blocks with active having higher counter - const: u-boot,env-redundant-count - - description: Broadcom's variant with custom header - const: brcm,env - - reg: - maxItems: 1 - - bootcmd: - type: object - description: Command to use for automatic booting - - ethaddr: - type: object - description: Ethernet interfaces base MAC address. - additionalProperties: false - - properties: - "#nvmem-cell-cells": - description: The first argument is a MAC address offset. - const: 1 - -additionalProperties: false - -examples: - - | - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - reg = <0x0 0x40000>; - label = "u-boot"; - read-only; - }; - - env: partition@40000 { - compatible = "u-boot,env"; - reg = <0x40000 0x10000>; - - mac: ethaddr { - #nvmem-cell-cells = <1>; - }; - }; - }; - - | - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - reg = <0x0 0x100000>; - compatible = "brcm,u-boot"; - label = "u-boot"; - - partition-u-boot-env { - compatible = "brcm,env"; - - ethaddr { - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml index 02d1d2c17129ee..fd0c8d5c5f3e7e 100644 --- a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml +++ b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml @@ -19,7 +19,7 @@ description: the hardware description for the scheme mentioned above. maintainers: - - Nishanth Menon + - Dhruva Gole allOf: - $ref: opp-v2-base.yaml# diff --git a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt deleted file mode 100644 index 9514c327d31b7b..00000000000000 --- a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt +++ /dev/null @@ -1,27 +0,0 @@ -* Altera PCIe MSI controller - -Required properties: -- compatible: should contain "altr,msi-1.0" -- reg: specifies the physical base address of the controller and - the length of the memory mapped region. -- reg-names: must include the following entries: - "csr": CSR registers - "vector_slave": vectors slave port region -- interrupts: specifies the interrupt source of the parent interrupt - controller. The format of the interrupt specifier depends on the - parent interrupt controller. -- num-vectors: number of vectors, range 1 to 32. -- msi-controller: indicates that this is MSI controller node - - -Example -msi0: msi@0xFF200000 { - compatible = "altr,msi-1.0"; - reg = <0xFF200000 0x00000010 - 0xFF200010 0x00000080>; - reg-names = "csr", "vector_slave"; - interrupt-parent = <&hps_0_arm_gic_0>; - interrupts = <0 42 4>; - msi-controller; - num-vectors = <32>; -}; diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt deleted file mode 100644 index 816b244a221e6c..00000000000000 --- a/Documentation/devicetree/bindings/pci/altera-pcie.txt +++ /dev/null @@ -1,50 +0,0 @@ -* Altera PCIe controller - -Required properties: -- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0" -- reg: a list of physical base address and length for TXS and CRA. - For "altr,pcie-root-port-2.0", additional HIP base address and length. -- reg-names: must include the following entries: - "Txs": TX slave port region - "Cra": Control register access region - "Hip": Hard IP region (if "altr,pcie-root-port-2.0") -- interrupts: specifies the interrupt source of the parent interrupt - controller. The format of the interrupt specifier depends - on the parent interrupt controller. -- device_type: must be "pci" -- #address-cells: set to <3> -- #size-cells: set to <2> -- #interrupt-cells: set to <1> -- ranges: describes the translation of addresses for root ports and - standard PCI regions. -- interrupt-map-mask and interrupt-map: standard PCI properties to define the - mapping of the PCIe interface to interrupt numbers. - -Optional properties: -- msi-parent: Link to the hardware entity that serves as the MSI controller - for this PCIe controller. -- bus-range: PCI bus numbers covered - -Example - pcie_0: pcie@c00000000 { - compatible = "altr,pcie-root-port-1.0"; - reg = <0xc0000000 0x20000000>, - <0xff220000 0x00004000>; - reg-names = "Txs", "Cra"; - interrupt-parent = <&hps_0_arm_gic_0>; - interrupts = <0 40 4>; - interrupt-controller; - #interrupt-cells = <1>; - bus-range = <0x0 0xFF>; - device_type = "pci"; - msi-parent = <&msi_to_gic_gen_0>; - #address-cells = <3>; - #size-cells = <2>; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &pcie_0 1>, - <0 0 0 2 &pcie_0 2>, - <0 0 0 3 &pcie_0 3>, - <0 0 0 4 &pcie_0 4>; - ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000 - 0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>; - }; diff --git a/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml new file mode 100644 index 00000000000000..98814862d00647 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright (C) 2015, 2024, Intel Corporation +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/altr,msi-controller.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Altera PCIe MSI controller + +maintainers: + - Matthew Gerlach + +properties: + compatible: + enum: + - altr,msi-1.0 + + reg: + items: + - description: CSR registers + - description: Vectors slave port region + + reg-names: + items: + - const: csr + - const: vector_slave + + interrupts: + maxItems: 1 + + msi-controller: true + + num-vectors: + description: number of vectors + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + maximum: 32 + +required: + - compatible + - reg + - reg-names + - interrupts + - msi-controller + - num-vectors + +allOf: + - $ref: /schemas/interrupt-controller/msi-controller.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + #include + msi@ff200000 { + compatible = "altr,msi-1.0"; + reg = <0xff200000 0x00000010>, + <0xff200010 0x00000080>; + reg-names = "csr", "vector_slave"; + interrupt-parent = <&hps_0_arm_gic_0>; + interrupts = ; + msi-controller; + num-vectors = <32>; + }; diff --git a/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml new file mode 100644 index 00000000000000..52533fccc134a8 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright (C) 2015, 2019, 2024, Intel Corporation +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/altr,pcie-root-port.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Altera PCIe Root Port + +maintainers: + - Matthew Gerlach + +properties: + compatible: + enum: + - altr,pcie-root-port-1.0 + - altr,pcie-root-port-2.0 + + reg: + items: + - description: TX slave port region + - description: Control register access region + - description: Hard IP region + minItems: 2 + + reg-names: + items: + - const: Txs + - const: Cra + - const: Hip + minItems: 2 + + interrupts: + maxItems: 1 + + interrupt-controller: true + + interrupt-map-mask: + items: + - const: 0 + - const: 0 + - const: 0 + - const: 7 + + interrupt-map: + maxItems: 4 + + "#interrupt-cells": + const: 1 + + msi-parent: true + +required: + - compatible + - reg + - reg-names + - interrupts + - "#interrupt-cells" + - interrupt-controller + - interrupt-map + - interrupt-map-mask + +allOf: + - $ref: /schemas/pci/pci-host-bridge.yaml# + - if: + properties: + compatible: + enum: + - altr,pcie-root-port-1.0 + then: + properties: + reg: + maxItems: 2 + + reg-names: + maxItems: 2 + + else: + properties: + reg: + minItems: 3 + + reg-names: + minItems: 3 + + +unevaluatedProperties: false + +examples: + - | + #include + #include + pcie_0: pcie@c00000000 { + compatible = "altr,pcie-root-port-1.0"; + reg = <0xc0000000 0x20000000>, + <0xff220000 0x00004000>; + reg-names = "Txs", "Cra"; + interrupt-parent = <&hps_0_arm_gic_0>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + bus-range = <0x0 0xff>; + device_type = "pci"; + msi-parent = <&msi_to_gic_gen_0>; + #address-cells = <3>; + #size-cells = <2>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_0 0 0 0 1>, + <0 0 0 2 &pcie_0 0 0 0 2>, + <0 0 0 3 &pcie_0 0 0 0 3>, + <0 0 0 4 &pcie_0 0 0 0 4>; + ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000>, + <0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>; + }; diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml index 11f8ea33240cfd..0925c520195ae3 100644 --- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Brcmstb PCIe Host Controller maintainers: - - Nicolas Saenz Julienne + - Jim Quinlan properties: compatible: @@ -16,11 +16,12 @@ properties: - brcm,bcm2711-pcie # The Raspberry Pi 4 - brcm,bcm4908-pcie - brcm,bcm7211-pcie # Broadcom STB version of RPi4 - - brcm,bcm7278-pcie # Broadcom 7278 Arm - brcm,bcm7216-pcie # Broadcom 7216 Arm - - brcm,bcm7445-pcie # Broadcom 7445 Arm + - brcm,bcm7278-pcie # Broadcom 7278 Arm - brcm,bcm7425-pcie # Broadcom 7425 MIPs - brcm,bcm7435-pcie # Broadcom 7435 MIPs + - brcm,bcm7445-pcie # Broadcom 7445 Arm + - brcm,bcm7712-pcie # Broadcom STB sibling of Rpi 5 reg: maxItems: 1 @@ -95,6 +96,14 @@ properties: minItems: 1 maxItems: 3 + resets: + minItems: 1 + maxItems: 3 + + reset-names: + minItems: 1 + maxItems: 3 + required: - compatible - reg @@ -118,8 +127,7 @@ allOf: then: properties: resets: - items: - - description: reset controller handling the PERST# signal + maxItems: 1 reset-names: items: @@ -136,12 +144,32 @@ allOf: then: properties: resets: + maxItems: 1 + + reset-names: items: - - description: phandle pointing to the RESCAL reset controller + - const: rescal + + required: + - resets + - reset-names + + - if: + properties: + compatible: + contains: + const: brcm,bcm7712-pcie + then: + properties: + resets: + minItems: 3 + maxItems: 3 reset-names: items: - const: rescal + - const: bridge + - const: swinit required: - resets diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml index a06f75df8458ae..84ca12e8b25be5 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml @@ -65,12 +65,14 @@ allOf: then: properties: reg: - minItems: 2 - maxItems: 2 + minItems: 4 + maxItems: 4 reg-names: items: - const: dbi - const: addr_space + - const: dbi2 + - const: atu - if: properties: @@ -129,8 +131,11 @@ examples: pcie_ep: pcie-ep@33800000 { compatible = "fsl,imx8mp-pcie-ep"; - reg = <0x33800000 0x000400000>, <0x18000000 0x08000000>; - reg-names = "dbi", "addr_space"; + reg = <0x33800000 0x100000>, + <0x18000000 0x8000000>, + <0x33900000 0x100000>, + <0x33b00000 0x100000>; + reg-names = "dbi", "addr_space", "dbi2", "atu"; clocks = <&clk IMX8MP_CLK_HSIO_ROOT>, <&clk IMX8MP_CLK_HSIO_AXI>, <&clk IMX8MP_CLK_PCIE_ROOT>; diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml index 8b8d77b1154b5c..1e05c560d79750 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml @@ -30,6 +30,7 @@ properties: - fsl,imx8mm-pcie - fsl,imx8mp-pcie - fsl,imx95-pcie + - fsl,imx8q-pcie clocks: minItems: 3 @@ -184,6 +185,21 @@ allOf: - const: pcie_bus - const: pcie_aux + - if: + properties: + compatible: + enum: + - fsl,imx8q-pcie + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: dbi + - const: mstr + - const: slv + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml index 793986c5af7ff3..be79712836c444 100644 --- a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml @@ -22,18 +22,20 @@ description: properties: compatible: - enum: - - fsl,ls1021a-pcie - - fsl,ls2080a-pcie - - fsl,ls2085a-pcie - - fsl,ls2088a-pcie - - fsl,ls1088a-pcie - - fsl,ls1046a-pcie - - fsl,ls1043a-pcie - - fsl,ls1012a-pcie - - fsl,ls1028a-pcie - - fsl,lx2160a-pcie - + oneOf: + - enum: + - fsl,ls1012a-pcie + - fsl,ls1021a-pcie + - fsl,ls1028a-pcie + - fsl,ls1043a-pcie + - fsl,ls1046a-pcie + - fsl,ls1088a-pcie + - fsl,ls2080a-pcie + - fsl,ls2085a-pcie + - fsl,ls2088a-pcie + - items: + - const: fsl,lx2160ar2-pcie + - const: fsl,ls2088a-pcie reg: maxItems: 2 @@ -43,10 +45,15 @@ properties: - const: config fsl,pcie-scfg: - $ref: /schemas/types.yaml#/definitions/phandle + $ref: /schemas/types.yaml#/definitions/phandle-array description: A phandle to the SCFG device node. The second entry is the physical PCIe controller index starting from '0'. This is used to get SCFG PEXN registers. + items: + items: + - description: A phandle to the SCFG device node + - description: PCIe controller index starting from '0' + maxItems: 1 big-endian: $ref: /schemas/types.yaml#/definitions/flag @@ -67,6 +74,14 @@ properties: minItems: 1 maxItems: 2 + num-viewport: + $ref: /schemas/types.yaml#/definitions/uint32 + deprecated: true + description: + Number of outbound view ports configured in hardware. It's the same as + the number of outbound AT windows. + maximum: 256 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml index c9f04999c9cf7d..e863519f31617e 100644 --- a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml @@ -37,7 +37,8 @@ properties: minItems: 3 maxItems: 4 - clocks: true + clocks: + maxItems: 5 clock-names: items: diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml index bcfbaf5582cc9c..420d551e9af978 100644 --- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml +++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml @@ -102,8 +102,6 @@ properties: As described in IEEE Std 1275-1994, but must provide at least a definition of non-prefetchable memory. One or both of prefetchable Memory and IO Space may also be provided. - minItems: 1 - maxItems: 3 dma-coherent: true iommu-map: true diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml index 76d742051f7346..898c1be2d6a435 100644 --- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml @@ -53,6 +53,7 @@ properties: - mediatek,mt8195-pcie - const: mediatek,mt8192-pcie - const: mediatek,mt8192-pcie + - const: airoha,en7581-pcie reg: maxItems: 1 @@ -76,20 +77,20 @@ properties: resets: minItems: 1 - maxItems: 2 + maxItems: 3 reset-names: minItems: 1 - maxItems: 2 + maxItems: 3 items: - enum: [ phy, mac ] + enum: [ phy, mac, phy-lane0, phy-lane1, phy-lane2 ] clocks: - minItems: 4 + minItems: 1 maxItems: 6 clock-names: - minItems: 4 + minItems: 1 maxItems: 6 assigned-clocks: @@ -147,6 +148,9 @@ allOf: const: mediatek,mt8192-pcie then: properties: + clocks: + minItems: 4 + clock-names: items: - const: pl_250m @@ -155,6 +159,15 @@ allOf: - const: tl_32k - const: peri_26m - const: top_133m + + resets: + minItems: 1 + maxItems: 2 + + reset-names: + minItems: 1 + maxItems: 2 + - if: properties: compatible: @@ -164,6 +177,9 @@ allOf: - mediatek,mt8195-pcie then: properties: + clocks: + minItems: 4 + clock-names: items: - const: pl_250m @@ -172,6 +188,15 @@ allOf: - const: tl_32k - const: peri_26m - const: peri_mem + + resets: + minItems: 1 + maxItems: 2 + + reset-names: + minItems: 1 + maxItems: 2 + - if: properties: compatible: @@ -180,6 +205,9 @@ allOf: - mediatek,mt7986-pcie then: properties: + clocks: + minItems: 4 + clock-names: items: - const: pl_250m @@ -187,6 +215,36 @@ allOf: - const: peri_26m - const: top_133m + resets: + minItems: 1 + maxItems: 2 + + reset-names: + minItems: 1 + maxItems: 2 + + - if: + properties: + compatible: + const: airoha,en7581-pcie + then: + properties: + clocks: + maxItems: 1 + + clock-names: + items: + - const: sys-ck + + resets: + minItems: 3 + + reset-names: + items: + - const: phy-lane0 + - const: phy-lane1 + - const: phy-lane2 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml index d1eef48252072a..f75000e3093db8 100644 --- a/Documentation/devicetree/bindings/pci/pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml @@ -10,7 +10,8 @@ description: | Common properties for PCI Endpoint Controller Nodes. maintainers: - - Kishon Vijay Abraham I + - Kishon Vijay Abraham I + - Manivannan Sadhasivam properties: $nodename: @@ -41,6 +42,17 @@ properties: default: 1 maximum: 16 + linux,pci-domain: + description: + If present this property assigns a fixed PCI domain number to a PCI + Endpoint Controller, otherwise an unstable (across boots) unique number + will be assigned. It is required to either not set this property at all + or set it for all PCI endpoint controllers in the system, otherwise + potentially conflicting domain numbers may be assigned to endpoint + controllers. The domain number for each endpoint controller in the system + must be unique. + $ref: /schemas/types.yaml#/definitions/uint32 + required: - compatible diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml index 0a39bbfcb28b08..e18900c4157616 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml @@ -21,11 +21,11 @@ properties: interrupts: minItems: 1 - maxItems: 8 + maxItems: 9 interrupt-names: minItems: 1 - maxItems: 8 + maxItems: 9 iommu-map: minItems: 1 @@ -78,6 +78,9 @@ properties: description: GPIO controlled connection to WAKE# signal maxItems: 1 + vddpe-3v3-supply: + description: PCIe endpoint power supply + required: - reg - reg-names diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml index 46802f7d94826d..1226ee5d08d1ae 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml @@ -280,4 +280,5 @@ examples: phy-names = "pciephy"; max-link-speed = <3>; num-lanes = <2>; + linux,pci-domain = <0>; }; diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml index 634da24ec3ed09..76cb9fbfd476fb 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml @@ -53,11 +53,19 @@ properties: - const: aggre1 # Aggre NoC PCIe1 AXI clock interrupts: - maxItems: 1 + minItems: 8 + maxItems: 8 interrupt-names: items: - - const: msi + - const: msi0 + - const: msi1 + - const: msi2 + - const: msi3 + - const: msi4 + - const: msi5 + - const: msi6 + - const: msi7 resets: maxItems: 1 @@ -66,9 +74,6 @@ properties: items: - const: pci - vddpe-3v3-supply: - description: PCIe endpoint power supply - allOf: - $ref: qcom,pcie-common.yaml# @@ -137,8 +142,16 @@ examples: dma-coherent; - interrupts = ; - interrupt-names = "msi"; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "msi0", "msi1", "msi2", "msi3", + "msi4", "msi5", "msi6", "msi7"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml index 25c9f13ae97723..15ba2385eb73c4 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml @@ -58,9 +58,6 @@ properties: items: - const: pci - vddpe-3v3-supply: - description: A phandle to the PCIe endpoint power supply - required: - interconnects - interconnect-names diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml index d8c0afaa4b1922..46bd59eefadba1 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml @@ -55,8 +55,8 @@ properties: - const: aggre1 # Aggre NoC PCIe1 AXI clock interrupts: - minItems: 8 - maxItems: 8 + minItems: 9 + maxItems: 9 interrupt-names: items: @@ -68,6 +68,7 @@ properties: - const: msi5 - const: msi6 - const: msi7 + - const: global operating-points-v2: true opp-table: @@ -149,9 +150,10 @@ examples: , , , - ; + , + ; interrupt-names = "msi0", "msi1", "msi2", "msi3", - "msi4", "msi5", "msi6", "msi7"; + "msi4", "msi5", "msi6", "msi7", "global"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml index f867746b1ae594..ffabbac57fc167 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml @@ -91,6 +91,9 @@ properties: vdda_refclk-supply: description: A phandle to the core analog power supply for IC which generates reference clock + vddpe-3v3-supply: + description: A phandle to the PCIe endpoint power supply + phys: maxItems: 1 diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml index 91b81ac75592c4..b23293314a6d05 100644 --- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml @@ -19,6 +19,7 @@ properties: - enum: - renesas,r8a779f0-pcie-ep # R-Car S4-8 - renesas,r8a779g0-pcie-ep # R-Car V4H + - renesas,r8a779h0-pcie-ep # R-Car V4M - const: renesas,rcar-gen4-pcie-ep # R-Car Gen4 reg: diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml index 955c664f1fbb2b..bb3f843c59d910 100644 --- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml +++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml @@ -19,6 +19,7 @@ properties: - enum: - renesas,r8a779f0-pcie # R-Car S4-8 - renesas,r8a779g0-pcie # R-Car V4H + - renesas,r8a779h0-pcie # R-Car V4M - const: renesas,rcar-gen4-pcie # R-Car Gen4 reg: diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml index b288cdb1ec70a5..065b7508d2888f 100644 --- a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml +++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml @@ -42,9 +42,13 @@ properties: interrupts: maxItems: 1 - clocks: true + clocks: + minItems: 1 + maxItems: 3 - clock-names: true + clock-names: + minItems: 1 + maxItems: 3 resets: maxItems: 1 diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml index f0d8e486a07da7..93f3d0f4bb9423 100644 --- a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml +++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml @@ -38,13 +38,17 @@ properties: minItems: 1 maxItems: 2 - clock-names: true + clock-names: + minItems: 1 + maxItems: 2 resets: minItems: 1 maxItems: 2 - reset-names: true + reset-names: + minItems: 1 + maxItems: 2 num-ib-windows: const: 16 diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml index 15a2658ceeeff2..69b499c96c716d 100644 --- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml +++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml @@ -38,6 +38,16 @@ properties: - const: reg - const: cfg + ti,syscon-acspcie-proxy-ctrl: + $ref: /schemas/types.yaml#/definitions/phandle-array + items: + - items: + - description: Phandle to the ACSPCIE Proxy Control Register + - description: Bitmask corresponding to the PAD IO Buffer + output enable fields (Active Low). + description: Specifier for enabling the ACSPCIE PAD outputs to drive + the reference clock to the Endpoint device. + ti,syscon-pcie-ctrl: $ref: /schemas/types.yaml#/definitions/phandle-array items: diff --git a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml index 9cad860c51a335..9de3c09efb6ea6 100644 --- a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml @@ -61,6 +61,11 @@ properties: interrupt-map: maxItems: 4 + phys: + minItems: 1 + maxItems: 4 + description: One phy per logical lane, in order + power-domains: maxItems: 1 @@ -110,6 +115,7 @@ examples: - | #include #include + #include #include soc { #address-cells = <2>; @@ -138,6 +144,7 @@ examples: <0x0 0x0 0x0 0x3 &pcie_intc 0x3>, <0x0 0x0 0x0 0x4 &pcie_intc 0x4>; msi-parent = <&nwl_pcie>; + phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>; power-domains = <&zynqmp_firmware PD_PCIE>; iommus = <&smmu 0x4d0>; pcie_intc: legacy-interrupt-controller { diff --git a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml index 2f59b3a73dd27f..f1efd919c3514f 100644 --- a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml +++ b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml @@ -14,10 +14,21 @@ allOf: properties: compatible: - const: xlnx,xdma-host-3.00 + enum: + - xlnx,xdma-host-3.00 + - xlnx,qdma-host-3.00 reg: - maxItems: 1 + items: + - description: configuration region and XDMA bridge register. + - description: QDMA bridge register. + minItems: 1 + + reg-names: + items: + - const: cfg + - const: breg + minItems: 1 ranges: maxItems: 2 @@ -76,6 +87,27 @@ required: - "#interrupt-cells" - interrupt-controller +if: + properties: + compatible: + contains: + enum: + - xlnx,qdma-host-3.00 +then: + properties: + reg: + minItems: 2 + reg-names: + minItems: 2 + required: + - reg-names +else: + properties: + reg: + maxItems: 1 + reg-names: + maxItems: 1 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml index 2e51072e794a67..0e9d665584e65f 100644 --- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml +++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml @@ -16,6 +16,7 @@ properties: - arm,cmn-600 - arm,cmn-650 - arm,cmn-700 + - arm,cmn-s3 - arm,ci-700 reg: diff --git a/Documentation/devicetree/bindings/perf/arm,ni.yaml b/Documentation/devicetree/bindings/perf/arm,ni.yaml new file mode 100644 index 00000000000000..d66fffa256d5ff --- /dev/null +++ b/Documentation/devicetree/bindings/perf/arm,ni.yaml @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/perf/arm,ni.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm NI (Network-on-Chip Interconnect) Performance Monitors + +maintainers: + - Robin Murphy + +properties: + compatible: + const: arm,ni-700 + + reg: + items: + - description: Complete configuration register space + + interrupts: + minItems: 1 + maxItems: 32 + description: Overflow interrupts, one per clock domain, in order of domain ID + +required: + - compatible + - reg + - interrupts + +additionalProperties: false diff --git a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt index e1bb12711fbf82..602cf952b92bf5 100644 --- a/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt +++ b/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt @@ -36,7 +36,7 @@ Optional properties: 3-tuple setting for each (up to 3) supported link speed on the host. Range is 0 to 273000 in unit of uV. Default is 0. -- apm,tx-pre-cursor2 : 2st pre-cursor emphasis taps control. Two set of +- apm,tx-pre-cursor2 : 2nd pre-cursor emphasis taps control. Two set of 3-tuple setting for each (up to 3) supported link speed on the host. Range is 0 to 127400 in unit uV. Default is 0x0. diff --git a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml index f4b1ca2fb56253..ce665a2779b796 100644 --- a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml @@ -87,6 +87,12 @@ properties: maximum: 119 default: 100 + nxp,sim: + description: + The system integration module (SIM) provides system control and chip + configuration registers. + $ref: /schemas/types.yaml#/definitions/phandle + required: - compatible - reg @@ -110,6 +116,17 @@ allOf: required: - fsl,anatop + - if: + properties: + compatible: + const: fsl,imx7ulp-usbphy + then: + required: + - nxp,sim + else: + properties: + nxp,sim: false + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml new file mode 100644 index 00000000000000..81001966f65796 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/hisilicon,hi3798cv200-combphy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon STB PCIE/SATA/USB3 PHY + +maintainers: + - Shawn Guo + +properties: + compatible: + const: hisilicon,hi3798cv200-combphy + + reg: + maxItems: 1 + + '#phy-cells': + description: The cell contains the PHY mode + const: 1 + + clocks: + maxItems: 1 + + resets: + maxItems: 1 + + hisilicon,fixed-mode: + description: If the phy device doesn't support mode select but a fixed mode + setting, the property should be present to specify the particular mode. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [ 1, 2, 4] # SATA, PCIE, USB3 + + hisilicon,mode-select-bits: + description: If the phy device support mode select, this property should be + present to specify the register bits in peripheral controller. + items: + - description: register_offset + - description: bit shift + - description: bit mask + +required: + - compatible + - reg + - '#phy-cells' + - clocks + - resets + +oneOf: + - required: ['hisilicon,fixed-mode'] + - required: ['hisilicon,mode-select-bits'] + +additionalProperties: false + +... diff --git a/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml index cfb3ca97f87c68..cc9d0d4eeeeb8f 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,mt7988-xfi-tphy.yaml @@ -41,7 +41,7 @@ properties: description: One instance of the T-PHY on MT7988 suffers from a performance problem in 10GBase-R mode which needs a work-around in the driver. - This flag enables a work-around ajusting an analog phy setting and + This flag enables a work-around adjusting an analog phy setting and is required for XFI Port0 of the MT7988 SoC to be in compliance with the SFP specification. diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml index acba0720125ddd..423b7c4e62f2a9 100644 --- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml +++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml @@ -240,7 +240,7 @@ patternProperties: The force mode is used to manually switch the shared phy mode between USB3 and PCIe, when USB3 phy type is selected by the consumer, and force-mode is set, will cause phy's power and pipe toggled and force - phy as USB3 mode which switched from default PCIe mode. But perfer to + phy as USB3 mode which switched from default PCIe mode. But prefer to use the property "mediatek,syscon-type" for newer SoCs that support it. type: boolean diff --git a/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml new file mode 100644 index 00000000000000..fff858c909a06f --- /dev/null +++ b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/nuvoton,ma35d1-usb2-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Nuvoton MA35D1 USB2 phy + +maintainers: + - Hui-Ping Chen + +properties: + compatible: + enum: + - nuvoton,ma35d1-usb2-phy + + "#phy-cells": + const: 0 + + clocks: + maxItems: 1 + + nuvoton,sys: + $ref: /schemas/types.yaml#/definitions/phandle + description: + phandle to syscon for checking the PHY clock status. + +required: + - compatible + - "#phy-cells" + - clocks + - nuvoton,sys + +additionalProperties: false + +examples: + - | + #include + + usb_phy: usb-phy { + compatible = "nuvoton,ma35d1-usb2-phy"; + clocks = <&clk USBD_GATE>; + nuvoton,sys = <&sys>; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt b/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt deleted file mode 100644 index 17b0c761370a4a..00000000000000 --- a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt +++ /dev/null @@ -1,59 +0,0 @@ -HiSilicon STB PCIE/SATA/USB3 PHY - -Required properties: -- compatible: Should be "hisilicon,hi3798cv200-combphy" -- reg: Should be the address space for COMBPHY configuration and state - registers in peripheral controller, e.g. PERI_COMBPHY0_CFG and - PERI_COMBPHY0_STATE for COMBPHY0 Hi3798CV200 SoC. -- #phy-cells: Should be 1. The cell number is used to select the phy mode - as defined in . -- clocks: The phandle to clock provider and clock specifier pair. -- resets: The phandle to reset controller and reset specifier pair. - -Refer to phy/phy-bindings.txt for the generic PHY binding properties. - -Optional properties: -- hisilicon,fixed-mode: If the phy device doesn't support mode select - but a fixed mode setting, the property should be present to specify - the particular mode. -- hisilicon,mode-select-bits: If the phy device support mode select, - this property should be present to specify the register bits in - peripheral controller, as a 3 integers tuple: - . - -Notes: -- Between hisilicon,fixed-mode and hisilicon,mode-select-bits, one and only - one of them should be present. -- The device node should be a child of peripheral controller that contains - COMBPHY configuration/state and PERI_CTRL register used to select PHY mode. - Refer to arm/hisilicon/hisilicon.txt for the parent peripheral controller - bindings. - -Examples: - -perictrl: peripheral-controller@8a20000 { - compatible = "hisilicon,hi3798cv200-perictrl", "syscon", - "simple-mfd"; - reg = <0x8a20000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x8a20000 0x1000>; - - combphy0: phy@850 { - compatible = "hisilicon,hi3798cv200-combphy"; - reg = <0x850 0x8>; - #phy-cells = <1>; - clocks = <&crg HISTB_COMBPHY0_CLK>; - resets = <&crg 0x188 4>; - hisilicon,fixed-mode = ; - }; - - combphy1: phy@858 { - compatible = "hisilicon,hi3798cv200-combphy"; - reg = <0x858 0x8>; - #phy-cells = <1>; - clocks = <&crg HISTB_COMBPHY1_CLK>; - resets = <&crg 0x188 12>; - hisilicon,mode-select-bits = <0x0008 11 (0x3 << 11)>; - }; -}; diff --git a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml index 83fe4b39b56f45..78607ee3e2e84f 100644 --- a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml @@ -14,6 +14,7 @@ properties: compatible: enum: - qcom,hdmi-phy-8996 + - qcom,hdmi-phy-8998 reg: maxItems: 6 diff --git a/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml new file mode 100644 index 00000000000000..0bf18d32c1331a --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/qcom,sata-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SATA PHY Controller + +maintainers: + - Bjorn Andersson + - Konrad Dybcio + +description: + The Qualcomm SATA PHY describes on-chip SATA Physical layer controllers. + +properties: + compatible: + enum: + - qcom,ipq806x-sata-phy + - qcom,apq8064-sata-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: cfg + + '#phy-cells': + const: 0 + +required: + - compatible + - reg + - clocks + - clock-names + - '#phy-cells' + +additionalProperties: false + +examples: + - | + #include + sata_phy: sata-phy@1b400000 { + compatible = "qcom,ipq806x-sata-phy"; + reg = <0x1b400000 0x200>; + + clocks = <&gcc SATA_PHY_CFG_CLK>; + clock-names = "cfg"; + + #phy-cells = <0>; + }; + diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml index 03dbd02cf9e79f..dcf4fa55fbba58 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml @@ -40,6 +40,7 @@ properties: - qcom,sm8650-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen3x2-pcie-phy - qcom,x1e80100-qmp-gen4x2-pcie-phy + - qcom,x1e80100-qmp-gen4x4-pcie-phy reg: minItems: 1 @@ -118,6 +119,7 @@ allOf: contains: enum: - qcom,sc8280xp-qmp-gen3x4-pcie-phy + - qcom,x1e80100-qmp-gen4x4-pcie-phy then: properties: reg: @@ -169,6 +171,7 @@ allOf: - qcom,sc8280xp-qmp-gen3x1-pcie-phy - qcom,sc8280xp-qmp-gen3x2-pcie-phy - qcom,sc8280xp-qmp-gen3x4-pcie-phy + - qcom,x1e80100-qmp-gen4x4-pcie-phy then: properties: clocks: diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml index 90d79491e28154..d16a543a784887 100644 --- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml @@ -43,7 +43,7 @@ properties: qcom,tune-usb2-amplitude: $ref: /schemas/types.yaml#/definitions/uint8 - description: High-Speed trasmit amplitude + description: High-Speed transmit amplitude minimum: 0 maximum: 15 default: 8 diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt b/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt deleted file mode 100644 index 2cb2168cef411b..00000000000000 --- a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt +++ /dev/null @@ -1,76 +0,0 @@ -Qualcomm's APQ8016/MSM8916 USB transceiver controller - -- compatible: - Usage: required - Value type: - Definition: Should contain "qcom,usb-8x16-phy". - -- reg: - Usage: required - Value type: - Definition: USB PHY base address and length of the register map - -- clocks: - Usage: required - Value type: - Definition: See clock-bindings.txt section "consumers". List of - two clock specifiers for interface and core controller - clocks. - -- clock-names: - Usage: required - Value type: - Definition: Must contain "iface" and "core" strings. - -- vddcx-supply: - Usage: required - Value type: - Definition: phandle to the regulator VDCCX supply node. - -- v1p8-supply: - Usage: required - Value type: - Definition: phandle to the regulator 1.8V supply node. - -- v3p3-supply: - Usage: required - Value type: - Definition: phandle to the regulator 3.3V supply node. - -- resets: - Usage: required - Value type: - Definition: See reset.txt section "consumers". PHY reset specifier. - -- reset-names: - Usage: required - Value type: - Definition: Must contain "phy" string. - -- switch-gpio: - Usage: optional - Value type: - Definition: Some boards are using Dual SPDT USB Switch, witch is - controlled by GPIO to de/multiplex D+/D- USB lines - between connectors. - -Example: - usb_phy: phy@78d9000 { - compatible = "qcom,usb-8x16-phy"; - reg = <0x78d9000 0x400>; - - vddcx-supply = <&pm8916_s1_corner>; - v1p8-supply = <&pm8916_l7>; - v3p3-supply = <&pm8916_l13>; - - clocks = <&gcc GCC_USB_HS_AHB_CLK>, - <&gcc GCC_USB_HS_SYSTEM_CLK>; - clock-names = "iface", "core"; - - resets = <&gcc GCC_USB2A_PHY_BCR>; - reset-names = "phy"; - - // D+/D- lines: 1 - Routed to HUB, 0 - Device connector - switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>; - }; - diff --git a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt deleted file mode 100644 index 952f6c96bab903..00000000000000 --- a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt +++ /dev/null @@ -1,24 +0,0 @@ -Qualcomm APQ8064 SATA PHY Controller ------------------------------------- - -SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers. -Each SATA PHY controller should have its own node. - -Required properties: -- compatible: compatible list, contains "qcom,apq8064-sata-phy". -- reg: offset and length of the SATA PHY register set; -- #phy-cells: must be zero -- clocks: a list of phandles and clock-specifier pairs, one for each entry in - clock-names. -- clock-names: must be "cfg" for phy config clock. - -Example: - sata_phy: sata-phy@1b400000 { - compatible = "qcom,apq8064-sata-phy"; - reg = <0x1b400000 0x200>; - - clocks = <&gcc SATA_PHY_CFG_CLK>; - clock-names = "cfg"; - - #phy-cells = <0>; - }; diff --git a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt deleted file mode 100644 index 76bfbd056202e4..00000000000000 --- a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt +++ /dev/null @@ -1,23 +0,0 @@ -Qualcomm IPQ806x SATA PHY Controller ------------------------------------- - -SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers. -Each SATA PHY controller should have its own node. - -Required properties: -- compatible: compatible list, contains "qcom,ipq806x-sata-phy" -- reg: offset and length of the SATA PHY register set; -- #phy-cells: must be zero -- clocks: must be exactly one entry -- clock-names: must be "cfg" - -Example: - sata_phy: sata-phy@1b400000 { - compatible = "qcom,ipq806x-sata-phy"; - reg = <0x1b400000 0x200>; - - clocks = <&gcc SATA_PHY_CFG_CLK>; - clock-names = "cfg"; - - #phy-cells = <0>; - }; diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml index f82649a55e91b9..af275cea3456be 100644 --- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml +++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml @@ -13,7 +13,9 @@ properties: compatible: oneOf: - items: - - const: renesas,usb2-phy-r8a77470 # RZ/G1C + - enum: + - renesas,usb2-phy-r8a77470 # RZ/G1C + - renesas,usb2-phy-r9a08g045 # RZ/G3S - items: - enum: diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml index 54e822c715f3f4..84fe59dbcf487c 100644 --- a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml +++ b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml @@ -27,6 +27,9 @@ properties: - const: ref - const: apb + "#clock-cells": + const: 0 + "#phy-cells": const: 0 diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml index de3cffc850bc96..e34b875a1bb8ed 100644 --- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml +++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml @@ -30,13 +30,17 @@ properties: minItems: 1 maxItems: 2 - clock-names: true + clock-names: + minItems: 1 + maxItems: 6 resets: minItems: 2 maxItems: 6 - reset-names: true + reset-names: + minItems: 2 + maxItems: 6 allOf: - if: diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml index b3ed2f74a4145f..9fc0e87c508e50 100644 --- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml @@ -31,13 +31,17 @@ properties: minItems: 1 maxItems: 2 - clock-names: true + clock-names: + minItems: 1 + maxItems: 2 resets: minItems: 1 maxItems: 2 - reset-names: true + reset-names: + minItems: 1 + maxItems: 2 socionext,syscon: $ref: /schemas/types.yaml#/definitions/phandle diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml index 2107d98ace1562..25c4159f86e486 100644 --- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml +++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml @@ -34,12 +34,15 @@ properties: minItems: 2 maxItems: 3 - clock-names: true + clock-names: + minItems: 2 + maxItems: 3 resets: maxItems: 2 - reset-names: true + reset-names: + maxItems: 2 vbus-supply: description: A phandle to the regulator for USB VBUS diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml index 8f5aa6238bf301..1f663e9901da75 100644 --- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml +++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml @@ -35,12 +35,15 @@ properties: minItems: 2 maxItems: 3 - clock-names: true + clock-names: + minItems: 2 + maxItems: 3 resets: maxItems: 2 - reset-names: true + reset-names: + maxItems: 2 vbus-supply: description: A phandle to the regulator for USB VBUS, only for USB host diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt deleted file mode 100644 index 0aa1a53012d6c0..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt +++ /dev/null @@ -1,178 +0,0 @@ -* Atmel AT91 Pinmux Controller - -The AT91 Pinmux Controller, enables the IC -to share one PAD to several functional blocks. The sharing is done by -multiplexing the PAD input/output signals. For each PAD there are up to -8 muxing options (called periph modes). Since different modules require -different PAD settings (like pull up, keeper, etc) the controller controls -also the PAD settings parameters. - -Please refer to pinctrl-bindings.txt in this directory for details of the -common pinctrl bindings used by client devices, including the meaning of the -phrase "pin configuration node". - -Atmel AT91 pin configuration node is a node of a group of pins which can be -used for a specific device or function. This node represents both mux and config -of the pins in that group. The 'pins' selects the function mode(also named pin -mode) this pin can work on and the 'config' configures various pad settings -such as pull-up, multi drive, etc. - -Required properties for iomux controller: -- compatible: "atmel,at91rm9200-pinctrl" or "atmel,at91sam9x5-pinctrl" - or "atmel,sama5d3-pinctrl" or "microchip,sam9x60-pinctrl" - or "microchip,sam9x7-pinctrl", "microchip,sam9x60-pinctrl" -- atmel,mux-mask: array of mask (periph per bank) to describe if a pin can be - configured in this periph mode. All the periph and bank need to be describe. - -How to create such array: - -Each column will represent the possible peripheral of the pinctrl -Each line will represent a pio bank - -Take an example on the 9260 -Peripheral: 2 ( A and B) -Bank: 3 (A, B and C) -=> - - /* A B */ - 0xffffffff 0xffc00c3b /* pioA */ - 0xffffffff 0x7fff3ccf /* pioB */ - 0xffffffff 0x007fffff /* pioC */ - -For each peripheral/bank we will describe in a u32 if a pin can be -configured in it by putting 1 to the pin bit (1 << pin) - -Let's take the pioA on peripheral B -From the datasheet Table 10-2. -Peripheral B -PA0 MCDB0 -PA1 MCCDB -PA2 -PA3 MCDB3 -PA4 MCDB2 -PA5 MCDB1 -PA6 -PA7 -PA8 -PA9 -PA10 ETX2 -PA11 ETX3 -PA12 -PA13 -PA14 -PA15 -PA16 -PA17 -PA18 -PA19 -PA20 -PA21 -PA22 ETXER -PA23 ETX2 -PA24 ETX3 -PA25 ERX2 -PA26 ERX3 -PA27 ERXCK -PA28 ECRS -PA29 ECOL -PA30 RXD4 -PA31 TXD4 - -=> 0xffc00c3b - -Required properties for pin configuration node: -- atmel,pins: 4 integers array, represents a group of pins mux and config - setting. The format is atmel,pins = . - The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B... - PIN_BANK 0 is pioA, PIN_BANK 1 is pioB... - -Bits used for CONFIG: -PULL_UP (1 << 0): indicate this pin needs a pull up. -MULTIDRIVE (1 << 1): indicate this pin needs to be configured as multi-drive. - Multi-drive is equivalent to open-drain type output. -DEGLITCH (1 << 2): indicate this pin needs deglitch. -PULL_DOWN (1 << 3): indicate this pin needs a pull down. -DIS_SCHMIT (1 << 4): indicate this pin needs to the disable schmitt trigger. -DRIVE_STRENGTH (3 << 5): indicate the drive strength of the pin using the - following values: - 00 - No change (reset state value kept) - 01 - Low - 10 - Medium - 11 - High -OUTPUT (1 << 7): indicate this pin need to be configured as an output. -OUTPUT_VAL (1 << 8): output val (1 = high, 0 = low) -SLEWRATE (1 << 9): slew rate of the pin: 0 = disable, 1 = enable -DEBOUNCE (1 << 16): indicate this pin needs debounce. -DEBOUNCE_VAL (0x3fff << 17): debounce value. - -NOTE: -Some requirements for using atmel,at91rm9200-pinctrl binding: -1. We have pin function node defined under at91 controller node to represent - what pinmux functions this SoC supports. -2. The driver can use the function node's name and pin configuration node's - name describe the pin function and group hierarchy. - For example, Linux at91 pinctrl driver takes the function node's name - as the function name and pin configuration node's name as group name to - create the map table. -3. Each pin configuration node should have a phandle, devices can set pins - configurations by referring to the phandle of that pin configuration node. -4. The gpio controller must be describe in the pinctrl simple-bus. - -For each bank the required properties are: -- compatible: "atmel,at91sam9x5-gpio" or "atmel,at91rm9200-gpio" or - "microchip,sam9x60-gpio" - or "microchip,sam9x7-gpio", "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio" -- reg: physical base address and length of the controller's registers -- interrupts: interrupt outputs from the controller -- interrupt-controller: marks the device node as an interrupt controller -- #interrupt-cells: should be 2; refer to ../interrupt-controller/interrupts.txt - for more details. -- gpio-controller -- #gpio-cells: should be 2; the first cell is the GPIO number and the second - cell specifies GPIO flags as defined in . -- clocks: bank clock - -Examples: - -pinctrl@fffff400 { - #address-cells = <1>; - #size-cells = <1>; - ranges; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; - reg = <0xfffff400 0x600>; - - pioA: gpio@fffff400 { - compatible = "atmel,at91sam9x5-gpio"; - reg = <0xfffff400 0x200>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>; - #gpio-cells = <2>; - gpio-controller; - interrupt-controller; - #interrupt-cells = <2>; - clocks = <&pmc PMC_TYPE_PERIPHERAL 2>; - }; - - atmel,mux-mask = < - /* A B */ - 0xffffffff 0xffc00c3b /* pioA */ - 0xffffffff 0x7fff3ccf /* pioB */ - 0xffffffff 0x007fffff /* pioC */ - >; - - /* shared pinctrl settings */ - dbgu { - pinctrl_dbgu: dbgu-0 { - atmel,pins = - <1 14 0x1 0x0 /* PB14 periph A */ - 1 15 0x1 0x1>; /* PB15 periph A with pullup */ - }; - }; -}; - -dbgu: serial@fffff200 { - compatible = "atmel,at91sam9260-usart"; - reg = <0xfffff200 0x200>; - interrupts = <1 4 7>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_dbgu>; -}; diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml new file mode 100644 index 00000000000000..1bb386b42039cb --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91rm9200-pinctrl.yaml @@ -0,0 +1,184 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/atmel,at91rm9200-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip PIO3 Pinmux Controller + +maintainers: + - Manikandan Muralidharan + +description: + The AT91 Pinmux Controller, enables the IC to share one PAD to several + functional blocks. The sharing is done by multiplexing the PAD input/output + signals. For each PAD there are up to 8 muxing options (called periph modes). + Since different modules require different PAD settings (like pull up, keeper, + etc) the controller controls also the PAD settings parameters. + +properties: + compatible: + oneOf: + - items: + - enum: + - atmel,at91rm9200-pinctrl + - atmel,at91sam9x5-pinctrl + - atmel,sama5d3-pinctrl + - microchip,sam9x60-pinctrl + - const: simple-mfd + - items: + - enum: + - microchip,sam9x7-pinctrl + - const: microchip,sam9x60-pinctrl + - const: simple-mfd + + '#address-cells': + const: 1 + + '#size-cells': + const: 1 + + ranges: true + + atmel,mux-mask: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: | + Array of mask (periph per bank) to describe if a pin can be + configured in this periph mode. All the periph and bank need to + be described. + + #How to create such array: + + Each column will represent the possible peripheral of the pinctrl + Each line will represent a pio bank + + #Example: + + In at91sam9260.dtsi, + Peripheral: 2 ( A and B) + Bank: 3 (A, B and C) + + # A B + 0xffffffff 0xffc00c3b # pioA + 0xffffffff 0x7fff3ccf # pioB + 0xffffffff 0x007fffff # pioC + + For each peripheral/bank we will describe in a u32 if a pin can be + configured in it by putting 1 to the pin bit (1 << pin) + + Let's take the pioA on peripheral B whose value is 0xffc00c3b + From the datasheet Table 10-2. + Peripheral B + PA0 MCDB0 + PA1 MCCDB + PA2 + PA3 MCDB3 + PA4 MCDB2 + PA5 MCDB1 + PA6 + PA7 + PA8 + PA9 + PA10 ETX2 + PA11 ETX3 + PA12 + PA13 + PA14 + PA15 + PA16 + PA17 + PA18 + PA19 + PA20 + PA21 + PA22 ETXER + PA23 ETX2 + PA24 ETX3 + PA25 ERX2 + PA26 ERX3 + PA27 ERXCK + PA28 ECRS + PA29 ECOL + PA30 RXD4 + PA31 TXD4 + +allOf: + - $ref: pinctrl.yaml# + +required: + - compatible + - ranges + - "#address-cells" + - "#size-cells" + - atmel,mux-mask + +patternProperties: + 'gpio@[0-9a-f]+$': + $ref: /schemas/gpio/atmel,at91rm9200-gpio.yaml + unevaluatedProperties: false + +additionalProperties: + type: object + additionalProperties: + type: object + additionalProperties: false + + properties: + atmel,pins: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: | + Each entry consists of 4 integers and represents the pins + mux and config setting.The format is + atmel,pins = . + Supported pin number and mux varies for different SoCs, and + are defined in . + items: + items: + - description: + Pin bank + - description: + Pin bank index + - description: + Peripheral function + - description: + Pad configuration + +examples: + - | + #include + #include + #include + + pinctrl@fffff400 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; + ranges = <0xfffff400 0xfffff400 0x600>; + + atmel,mux-mask = < + /* A B */ + 0xffffffff 0xffc00c3b /* pioA */ + 0xffffffff 0x7fff3ccf /* pioB */ + 0xffffffff 0x007fffff /* pioC */ + >; + + dbgu { + pinctrl_dbgu: dbgu-0 { + atmel,pins = + ; + }; + }; + + pioA: gpio@fffff400 { + compatible = "atmel,at91rm9200-gpio"; + reg = <0xfffff400 0x200>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + #interrupt-cells = <2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 2>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml deleted file mode 100644 index 5f00604bf48c42..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml +++ /dev/null @@ -1,242 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/pinctrl/mobileye,eyeq5-pinctrl.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Mobileye EyeQ5 pin controller - -description: > - The EyeQ5 pin controller handles the two pin banks of the system. It belongs - to a system-controller block called OLB. - - Pin control is about bias (pull-down, pull-up), drive strength and muxing. Pin - muxing supports two functions for each pin: first is GPIO, second is - pin-dependent. - - Pins and groups are bijective. - -maintainers: - - Grégory Clement - - Théo Lebrun - - Vladimir Kondratiev - -$ref: pinctrl.yaml# - -properties: - compatible: - enum: - - mobileye,eyeq5-pinctrl - - reg: - maxItems: 1 - -patternProperties: - "-pins?$": - type: object - description: Pin muxing configuration. - $ref: pinmux-node.yaml# - additionalProperties: false - properties: - pins: true - function: - enum: [gpio, - # Bank A - timer0, timer1, timer2, timer5, uart0, uart1, can0, can1, spi0, - spi1, refclk0, - # Bank B - timer3, timer4, timer6, uart2, can2, spi2, spi3, mclk0] - bias-disable: true - bias-pull-down: true - bias-pull-up: true - drive-strength: true - required: - - pins - - function - allOf: - - if: - properties: - function: - const: gpio - then: - properties: - pins: - items: # PA0 - PA28, PB0 - PB22 - pattern: '^(P(A|B)1?[0-9]|PA2[0-8]|PB2[0-2])$' - - if: - properties: - function: - const: timer0 - then: - properties: - pins: - items: - enum: [PA0, PA1] - - if: - properties: - function: - const: timer1 - then: - properties: - pins: - items: - enum: [PA2, PA3] - - if: - properties: - function: - const: timer2 - then: - properties: - pins: - items: - enum: [PA4, PA5] - - if: - properties: - function: - const: timer5 - then: - properties: - pins: - items: - enum: [PA6, PA7, PA8, PA9] - - if: - properties: - function: - const: uart0 - then: - properties: - pins: - items: - enum: [PA10, PA11] - - if: - properties: - function: - const: uart1 - then: - properties: - pins: - items: - enum: [PA12, PA13] - - if: - properties: - function: - const: can0 - then: - properties: - pins: - items: - enum: [PA14, PA15] - - if: - properties: - function: - const: can1 - then: - properties: - pins: - items: - enum: [PA16, PA17] - - if: - properties: - function: - const: spi0 - then: - properties: - pins: - items: - enum: [PA18, PA19, PA20, PA21, PA22] - - if: - properties: - function: - const: spi1 - then: - properties: - pins: - items: - enum: [PA23, PA24, PA25, PA26, PA27] - - if: - properties: - function: - const: refclk0 - then: - properties: - pins: - items: - enum: [PA28] - - if: - properties: - function: - const: timer3 - then: - properties: - pins: - items: - enum: [PB0, PB1] - - if: - properties: - function: - const: timer4 - then: - properties: - pins: - items: - enum: [PB2, PB3] - - if: - properties: - function: - const: timer6 - then: - properties: - pins: - items: - enum: [PB4, PB5, PB6, PB7] - - if: - properties: - function: - const: uart2 - then: - properties: - pins: - items: - enum: [PB8, PB9] - - if: - properties: - function: - const: can2 - then: - properties: - pins: - items: - enum: [PB10, PB11] - - if: - properties: - function: - const: spi2 - then: - properties: - pins: - items: - enum: [PB12, PB13, PB14, PB15, PB16] - - if: - properties: - function: - const: spi3 - then: - properties: - pins: - items: - enum: [PB17, PB18, PB19, PB20, PB21] - - if: - properties: - function: - const: mclk0 - then: - properties: - pins: - items: - enum: [PB22] - -required: - - compatible - - reg - -additionalProperties: false diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml index 814b9598edd183..8cd1f442240e68 100644 --- a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml @@ -71,51 +71,49 @@ patternProperties: One or more groups of pins to mux to a certain function items: enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi, - smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b, - smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21, - smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3, - spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2, - spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2, - bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen, - r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3, - fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10, - fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2, - pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2, - ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2, - smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1, - sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, - mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk, - scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c, - smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2, - spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio, - wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0, - hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4, - bu4b, bu5, bu5b, bu6, gpo187 ] + smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15, + smb16, smb17, smb18, smb19, smb20, smb21, smb22, smb23, + smb23b, smb4d, smb14, smb5, smb4, smb3, spi0cs1, spi0cs2, + spi0cs3, spi1cs0, spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c, + smb3b, bmcuart0a, uart1, jtag2, bmcuart1, uart2, sg1mdio, + bmcuart0b, r1err, r1md, r1oen, r2oen, rmii3, r3oen, smb3d, + fanin0, fanin1, fanin2, fanin3, fanin4, fanin5, fanin6, + fanin7, fanin8, fanin9, fanin10, fanin11, fanin12, fanin13, + fanin14, fanin15, pwm0, pwm1, pwm2, pwm3, r2, r2err, r2md, + r3rxer, ga20kbc, smb5d, lpc, espi, rg2, ddr, i3c0, i3c1, + i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2, smb2c, smb2b, smb1c, + smb1b, smb8, smb9, smb10, smb11, sd1, sd1pwr, pwm4, pwm5, + pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, mmc8, mmc, mmcwp, mmccd, + mmcrst, clkout, serirq, scipme, smi, smb6, smb6b, smb6c, + smb6d, smb7, smb7b, smb7c, smb7d, spi1, faninx, r1, spi3, + spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi, smb0b, smb0c, + smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2, smb12, smb13, + spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2, hgpio3, hgpio4, + hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b, bu6, gpo187 ] function: description: The function that a group of pins is muxed to - enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi, - smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b, - smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21, - smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3, - spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2, - spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2, - bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen, - r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3, - fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10, + enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi, smb5b, + smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15, smb16, + smb17, smb18, smb19, smb20, smb21, smb22, smb23, smb23b, smb4d, + smb14, smb5, smb4, smb3, spi0cs1, spi0cs2, spi0cs3, spi1cs0, + spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, + uart1, jtag2, bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, + r1oen, r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, + fanin3, fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10, fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2, pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2, ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2, smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1, sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, - mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk, - scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c, - smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2, - spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio, - wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0, - hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4, - bu4b, bu5, bu5b, bu6, gpo187 ] + mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, scipme, smi, + smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c, smb7d, spi1, + faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi, + smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2, + smb12, smb13, spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2, + hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b, + bu6, gpo187 ] dependencies: groups: [ function ] diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml index d0af21a564b44f..cbfcf215e571d9 100644 --- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml +++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml @@ -96,6 +96,9 @@ properties: type: boolean description: disable schmitt-trigger mode + input-schmitt-microvolt: + description: threshold strength for schmitt-trigger + input-debounce: $ref: /schemas/types.yaml#/definitions/uint32-array description: Takes the debounce time in usec as argument or 0 to disable diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt deleted file mode 100644 index 4e90ddd777846e..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt +++ /dev/null @@ -1,95 +0,0 @@ -Qualcomm APQ8064 TLMM block - -Required properties: -- compatible: "qcom,apq8064-pinctrl" -- reg: Should be the base address and length of the TLMM block. -- interrupts: Should be the parent IRQ of the TLMM block. -- interrupt-controller: Marks the device node as an interrupt controller. -- #interrupt-cells: Should be two. -- gpio-controller: Marks the device node as a GPIO controller. -- #gpio-cells : Should be two. - The first cell is the gpio pin number and the - second cell is used for optional parameters. -- gpio-ranges: see ../gpio/gpio.txt - -Optional properties: - -- gpio-reserved-ranges: see ../gpio/gpio.txt - -Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for -a general description of GPIO and interrupt bindings. - -Please refer to pinctrl-bindings.txt in this directory for details of the -common pinctrl bindings used by client devices, including the meaning of the -phrase "pin configuration node". - -Qualcomm's pin configuration nodes act as a container for an arbitrary number of -subnodes. Each of these subnodes represents some desired configuration for a -pin, a group, or a list of pins or groups. This configuration can include the -mux function to select on those pin(s)/group(s), and various pin configuration -parameters, such as pull-up, drive strength, etc. - -The name of each subnode is not important; all subnodes should be enumerated -and processed purely based on their content. - -Each subnode only affects those parameters that are explicitly listed. In -other words, a subnode that lists a mux function but no pin configuration -parameters implies no information about any pin configuration parameters. -Similarly, a pin subnode that describes a pullup parameter implies no -information about e.g. the mux function. - - -The following generic properties as defined in pinctrl-bindings.txt are valid -to specify in a pin configuration subnode: - - pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength, - output-low, output-high. - -Non-empty subnodes must specify the 'pins' property. - -Valid values for pins are: - gpio0-gpio89 - -Valid values for function are: - cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a, gp_clk_0b, gp_clk_1a, - gp_clk_1b, gp_clk_2a, gp_clk_2b, gpio, gsbi1, gsbi2, gsbi3, gsbi4, - gsbi4_cam_i2c, gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, - gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3, gsbi7, gsbi7_spi_cs1, - gsbi7_spi_cs2, gsbi7_spi_cs3, gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm, - riva_wlan, sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic, ps_hold - -Example: - - msmgpio: pinctrl@800000 { - compatible = "qcom,apq8064-pinctrl"; - reg = <0x800000 0x4000>; - - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - interrupts = <0 16 0x4>; - - pinctrl-names = "default"; - pinctrl-0 = <&gsbi5_uart_default>; - gpio-ranges = <&msmgpio 0 0 90>; - - gsbi5_uart_default: gsbi5_uart_default { - mux { - pins = "gpio51", "gpio52"; - function = "gsbi5"; - }; - - tx { - pins = "gpio51"; - drive-strength = <4>; - bias-disable; - }; - - rx { - pins = "gpio52"; - drive-strength = <2>; - bias-pull-up; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml new file mode 100644 index 00000000000000..f251dcd4bb7f76 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/qcom,apq8064-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. APQ8064 TLMM block + +maintainers: + - Bjorn Andersson + +description: | + Top Level Mode Multiplexer pin controller in Qualcomm APQ8064 SoC. + +allOf: + - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml# + +properties: + compatible: + const: qcom,apq8064-pinctrl + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + gpio-reserved-ranges: true + +patternProperties: + "-state$": + oneOf: + - $ref: "#/$defs/qcom-apq8064-tlmm-state" + - patternProperties: + "-pins$": + $ref: "#/$defs/qcom-apq8064-tlmm-state" + additionalProperties: false + +$defs: + qcom-apq8064-tlmm-state: + type: object + description: + Pinctrl node's client devices use subnodes for desired pin configuration. + Client device subnodes use below standard properties. + $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state + unevaluatedProperties: false + + properties: + pins: + description: + List of gpio pins affected by the properties specified in this + subnode. + items: + oneOf: + - pattern: "^gpio([0-9]|[1-8][0-9])$" + - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc3_clk, sdc3_cmd, sdc3_data ] + minItems: 1 + maxItems: 36 + + function: + description: + Specify the alternative function to be configured for the specified + pins. + enum: [ cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a, + gp_clk_0b, gp_clk_1a, gp_clk_1b, gp_clk_2a, gp_clk_2b, + gpio, gsbi1, gsbi2, gsbi3, gsbi4, gsbi4_cam_i2c, + gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, + gsbi6, gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3, + gsbi7, gsbi7_spi_cs1, gsbi7_spi_cs2, gsbi7_spi_cs3, + gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm, riva_wlan, + sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic, + ps_hold ] + + required: + - pins + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + tlmm: pinctrl@800000 { + compatible = "qcom,apq8064-pinctrl"; + reg = <0x800000 0x4000>; + + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&tlmm 0 0 90>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + + uart-state { + rx-pins { + pins = "gpio52"; + function = "gsbi5"; + bias-pull-up; + }; + + tx-pins { + pins = "gpio51"; + function = "gsbi5"; + bias-disable; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt deleted file mode 100644 index c9782397ff1483..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt +++ /dev/null @@ -1,188 +0,0 @@ -Qualcomm APQ8084 TLMM block - -This binding describes the Top Level Mode Multiplexer block found in the -MSM8960 platform. - -- compatible: - Usage: required - Value type: - Definition: must be "qcom,apq8084-pinctrl" - -- reg: - Usage: required - Value type: - Definition: the base address and size of the TLMM register space. - -- interrupts: - Usage: required - Value type: - Definition: should specify the TLMM summary IRQ. - -- interrupt-controller: - Usage: required - Value type: - Definition: identifies this node as an interrupt controller - -- #interrupt-cells: - Usage: required - Value type: - Definition: must be 2. Specifying the pin number and flags, as defined - in - -- gpio-controller: - Usage: required - Value type: - Definition: identifies this node as a gpio controller - -- #gpio-cells: - Usage: required - Value type: - Definition: must be 2. Specifying the pin number and flags, as defined - in - -- gpio-ranges: - Usage: required - Definition: see ../gpio/gpio.txt - -- gpio-reserved-ranges: - Usage: optional - Definition: see ../gpio/gpio.txt - -Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for -a general description of GPIO and interrupt bindings. - -Please refer to pinctrl-bindings.txt in this directory for details of the -common pinctrl bindings used by client devices, including the meaning of the -phrase "pin configuration node". - -The pin configuration nodes act as a container for an arbitrary number of -subnodes. Each of these subnodes represents some desired configuration for a -pin, a group, or a list of pins or groups. This configuration can include the -mux function to select on those pin(s)/group(s), and various pin configuration -parameters, such as pull-up, drive strength, etc. - - -PIN CONFIGURATION NODES: - -The name of each subnode is not important; all subnodes should be enumerated -and processed purely based on their content. - -Each subnode only affects those parameters that are explicitly listed. In -other words, a subnode that lists a mux function but no pin configuration -parameters implies no information about any pin configuration parameters. -Similarly, a pin subnode that describes a pullup parameter implies no -information about e.g. the mux function. - - -The following generic properties as defined in pinctrl-bindings.txt are valid -to specify in a pin configuration subnode: - -- pins: - Usage: required - Value type: - Definition: List of gpio pins affected by the properties specified in - this subnode. Valid pins are: - gpio0-gpio146, - sdc1_clk, - sdc1_cmd, - sdc1_data - sdc2_clk, - sdc2_cmd, - sdc2_data - -- function: - Usage: required - Value type: - Definition: Specify the alternative function to be configured for the - specified pins. Functions are only valid for gpio pins. - Valid values are: - adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3, - blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8, - blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12, - blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, blsp_spi5, - blsp_spi6, blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10, - blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2, blsp_uart3, - blsp_uart4, blsp_uart5, blsp_uart6, blsp_uart7, blsp_uart8, - blsp_uart9, blsp_uart10, blsp_uart11, blsp_uart12, - blsp_uim1, blsp_uim2, blsp_uim3, blsp_uim4, blsp_uim5, - blsp_uim6, blsp_uim7, blsp_uim8, blsp_uim9, blsp_uim10, - blsp_uim11, blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2, - cam_mclk3, cci_async, cci_async_in0, cci_i2c0, cci_i2c1, - cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4, - edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3, gcc_obt, gcc_vtt,i - gp_mn, gp_pdm0, gp_pdm1, gp_pdm2, gp0_clk, gp1_clk, gpio, - hdmi_cec, hdmi_ddc, hdmi_dtest, hdmi_hpd, hdmi_rcv, hsic, - ldo_en, ldo_update, mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst, - pci_e1, pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s, - qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n, - sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus, - spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s, tsif1, - tsif2, uim, uim_batt_alarm - -- bias-disable: - Usage: optional - Value type: - Definition: The specified pins should be configured as no pull. - -- bias-pull-down: - Usage: optional - Value type: - Definition: The specified pins should be configured as pull down. - -- bias-pull-up: - Usage: optional - Value type: - Definition: The specified pins should be configured as pull up. - -- output-high: - Usage: optional - Value type: - Definition: The specified pins are configured in output mode, driven - high. - Not valid for sdc pins. - -- output-low: - Usage: optional - Value type: - Definition: The specified pins are configured in output mode, driven - low. - Not valid for sdc pins. - -- drive-strength: - Usage: optional - Value type: - Definition: Selects the drive strength for the specified pins, in mA. - Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16 - -Example: - - tlmm: pinctrl@fd510000 { - compatible = "qcom,apq8084-pinctrl"; - reg = <0xfd510000 0x4000>; - - gpio-controller; - #gpio-cells = <2>; - gpio-ranges = <&tlmm 0 0 147>; - interrupt-controller; - #interrupt-cells = <2>; - interrupts = <0 208 0>; - - uart2: uart2-default { - mux { - pins = "gpio4", "gpio5"; - function = "blsp_uart2"; - }; - - tx { - pins = "gpio4"; - drive-strength = <4>; - bias-disable; - }; - - rx { - pins = "gpio5"; - drive-strength = <2>; - bias-pull-up; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml new file mode 100644 index 00000000000000..38877d8b97ffe3 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/qcom,apq8084-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. APQ8084 TLMM block + +maintainers: + - Bjorn Andersson + +description: | + Top Level Mode Multiplexer pin controller in Qualcomm APQ8084 SoC. + +allOf: + - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml# + +properties: + compatible: + const: qcom,apq8084-pinctrl + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + gpio-reserved-ranges: true + +patternProperties: + "-state$": + oneOf: + - $ref: "#/$defs/qcom-apq8084-tlmm-state" + - patternProperties: + "-pins$": + $ref: "#/$defs/qcom-apq8084-tlmm-state" + additionalProperties: false + +$defs: + qcom-apq8084-tlmm-state: + type: object + description: + Pinctrl node's client devices use subnodes for desired pin configuration. + Client device subnodes use below standard properties. + $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state + unevaluatedProperties: false + + properties: + pins: + description: + List of gpio pins affected by the properties specified in this + subnode. + items: + oneOf: + - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-3][0-9]|14[0-6])$" + - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd, + sdc2_data ] + minItems: 1 + maxItems: 36 + + function: + description: + Specify the alternative function to be configured for the specified + pins. + enum: [ adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3, + blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8, + blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12, + blsp_spi1, blsp_spi1_cs1, blsp_spi1_cs2, blsp_spi1_cs3, + blsp_spi2, blsp_spi3, blsp_spi3_cs1, blsp_spi3_cs2, + blsp_spi3_cs3, blsp_spi4, blsp_spi5, blsp_spi6, + blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10, + blsp_spi10_cs1, blsp_spi10_cs2, blsp_spi10_cs3, + blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2, + blsp_uart3, blsp_uart4, blsp_uart5, blsp_uart6, + blsp_uart7, blsp_uart8, blsp_uart9, blsp_uart10, + blsp_uart11, blsp_uart12, blsp_uim1, blsp_uim2, + blsp_uim3, blsp_uim4, blsp_uim5, blsp_uim6, blsp_uim7, + blsp_uim8, blsp_uim9, blsp_uim10, blsp_uim11, + blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2, cam_mclk3, + cci_async, cci_async_in0, cci_i2c0, cci_i2c1, + cci_timer0, cci_timer1, cci_timer2, cci_timer3, + cci_timer4, edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3, + gcc_obt, gcc_vtt, gp_mn, gp_pdm0, gp_pdm1, gp_pdm2, + gp0_clk, gp1_clk, gpio, hdmi_cec, hdmi_ddc, hdmi_dtest, + hdmi_hpd, hdmi_rcv, hsic, ldo_en, ldo_update, + mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst, pci_e1, + pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s, + qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n, + sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus, + spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s, + tsif1, tsif2, uim, uim_batt_alarm ] + + required: + - pins + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + tlmm: pinctrl@fd510000 { + compatible = "qcom,apq8084-pinctrl"; + reg = <0xfd510000 0x4000>; + + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&tlmm 0 0 147>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + + uart-state { + rx-pins { + pins = "gpio5"; + function = "blsp_uart2"; + bias-pull-up; + }; + + tx-pins { + pins = "gpio4"; + function = "blsp_uart2"; + bias-disable; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt deleted file mode 100644 index 97858a7c07a274..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt +++ /dev/null @@ -1,85 +0,0 @@ -Qualcomm Atheros IPQ4019 TLMM block - -This is the Top Level Mode Multiplexor block found on the Qualcomm IPQ8019 -platform, it provides pinctrl, pinmux, pinconf, and gpiolib facilities. - -Required properties: -- compatible: "qcom,ipq4019-pinctrl" -- reg: Should be the base address and length of the TLMM block. -- interrupts: Should be the parent IRQ of the TLMM block. -- interrupt-controller: Marks the device node as an interrupt controller. -- #interrupt-cells: Should be two. -- gpio-controller: Marks the device node as a GPIO controller. -- #gpio-cells : Should be two. - The first cell is the gpio pin number and the - second cell is used for optional parameters. -- gpio-ranges: see ../gpio/gpio.txt - -Optional properties: - -- gpio-reserved-ranges: see ../gpio/gpio.txt - -Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for -a general description of GPIO and interrupt bindings. - -Please refer to pinctrl-bindings.txt in this directory for details of the -common pinctrl bindings used by client devices, including the meaning of the -phrase "pin configuration node". - -The pin configuration nodes act as a container for an arbitrary number of -subnodes. Each of these subnodes represents some desired configuration for a -pin, a group, or a list of pins or groups. This configuration can include the -mux function to select on those pin(s)/group(s), and various pin configuration -parameters, such as pull-up, drive strength, etc. - -The name of each subnode is not important; all subnodes should be enumerated -and processed purely based on their content. - -Each subnode only affects those parameters that are explicitly listed. In -other words, a subnode that lists a mux function but no pin configuration -parameters implies no information about any pin configuration parameters. -Similarly, a pin subnode that describes a pullup parameter implies no -information about e.g. the mux function. - - -The following generic properties as defined in pinctrl-bindings.txt are valid -to specify in a pin configuration subnode: - pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-open-drain, - drive-strength. - -Non-empty subnodes must specify the 'pins' property. -Note that not all properties are valid for all pins. - - -Valid values for qcom,pins are: - gpio0-gpio99 - Supports mux, bias and drive-strength - -Valid values for qcom,function are: -aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0, blsp_spi1, blsp_uart0, -blsp_uart1, chip_rst, gpio, i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx, -jtag, led0, led1, led2, led3, led4, led5, led6, led7, led8, led9, led10, led11, -mdc, mdio, pcie, pmu, prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1, -smart2, smart3, tm, wifi0, wifi1 - -Example: - - tlmm: pinctrl@1000000 { - compatible = "qcom,ipq4019-pinctrl"; - reg = <0x1000000 0x300000>; - - gpio-controller; - #gpio-cells = <2>; - gpio-ranges = <&tlmm 0 0 100>; - interrupt-controller; - #interrupt-cells = <2>; - interrupts = <0 208 0>; - - serial_pins: serial_pinmux { - mux { - pins = "gpio60", "gpio61"; - function = "blsp_uart0"; - bias-disable; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml new file mode 100644 index 00000000000000..cc5de9f7768014 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/qcom,ipq4019-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. IPQ4019 TLMM block + +maintainers: + - Bjorn Andersson + +description: | + Top Level Mode Multiplexer pin controller in Qualcomm IPQ4019 SoC. + +allOf: + - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml# + +properties: + compatible: + const: qcom,ipq4019-pinctrl + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + gpio-reserved-ranges: true + +patternProperties: + "-state$": + oneOf: + - $ref: "#/$defs/qcom-ipq4019-tlmm-state" + - patternProperties: + "-pins$": + $ref: "#/$defs/qcom-ipq4019-tlmm-state" + additionalProperties: false + + "-hog(-[0-9]+)?$": + type: object + required: + - gpio-hog + +$defs: + qcom-ipq4019-tlmm-state: + type: object + description: + Pinctrl node's client devices use subnodes for desired pin configuration. + Client device subnodes use below standard properties. + $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state + unevaluatedProperties: false + + properties: + pins: + description: + List of gpio pins affected by the properties specified in this + subnode. + items: + pattern: "^gpio([0-9]|[1-9][0-9])$" + minItems: 1 + maxItems: 36 + + function: + description: + Specify the alternative function to be configured for the specified + pins. + enum: [ aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0, + blsp_spi1, blsp_uart0, blsp_uart1, chip_rst, gpio, + i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx, + jtag, led0, led1, led2, led3, led4, led5, led6, led7, + led8, led9, led10, led11, mdc, mdio, pcie, pmu, + prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1, + smart2, smart3, tm, wifi0, wifi1 ] + + required: + - pins + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + tlmm: pinctrl@1000000 { + compatible = "qcom,ipq4019-pinctrl"; + reg = <0x01000000 0x300000>; + + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&tlmm 0 0 100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + + uart-state { + pins = "gpio16", "gpio17"; + function = "blsp_uart0"; + bias-disable; + }; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt deleted file mode 100644 index a7aaaa7db83b44..00000000000000 --- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt +++ /dev/null @@ -1,101 +0,0 @@ -Qualcomm IPQ8064 TLMM block - -Required properties: -- compatible: "qcom,ipq8064-pinctrl" -- reg: Should be the base address and length of the TLMM block. -- interrupts: Should be the parent IRQ of the TLMM block. -- interrupt-controller: Marks the device node as an interrupt controller. -- #interrupt-cells: Should be two. -- gpio-controller: Marks the device node as a GPIO controller. -- #gpio-cells : Should be two. - The first cell is the gpio pin number and the - second cell is used for optional parameters. -- gpio-ranges: see ../gpio/gpio.txt - -Optional properties: - -- gpio-reserved-ranges: see ../gpio/gpio.txt - -Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for -a general description of GPIO and interrupt bindings. - -Please refer to pinctrl-bindings.txt in this directory for details of the -common pinctrl bindings used by client devices, including the meaning of the -phrase "pin configuration node". - -Qualcomm's pin configuration nodes act as a container for an arbitrary number of -subnodes. Each of these subnodes represents some desired configuration for a -pin, a group, or a list of pins or groups. This configuration can include the -mux function to select on those pin(s)/group(s), and various pin configuration -parameters, such as pull-up, drive strength, etc. - -The name of each subnode is not important; all subnodes should be enumerated -and processed purely based on their content. - -Each subnode only affects those parameters that are explicitly listed. In -other words, a subnode that lists a mux function but no pin configuration -parameters implies no information about any pin configuration parameters. -Similarly, a pin subnode that describes a pullup parameter implies no -information about e.g. the mux function. - - -The following generic properties as defined in pinctrl-bindings.txt are valid -to specify in a pin configuration subnode: - - pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength, - output-low, output-high. - -Non-empty subnodes must specify the 'pins' property. - -Valid values for qcom,pins are: - gpio0-gpio68 - Supports mux, bias, and drive-strength - - sdc3_clk, sdc3_cmd, sdc3_data - Supports bias and drive-strength - - -Valid values for function are: - mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5, - gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1, - spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata, - pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt, - pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren, - pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n, - pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold - -Example: - - pinmux: pinctrl@800000 { - compatible = "qcom,ipq8064-pinctrl"; - reg = <0x800000 0x4000>; - - gpio-controller; - #gpio-cells = <2>; - gpio-ranges = <&pinmux 0 0 69>; - interrupt-controller; - #interrupt-cells = <2>; - interrupts = <0 32 0x4>; - - pinctrl-names = "default"; - pinctrl-0 = <&gsbi5_uart_default>; - - gsbi5_uart_default: gsbi5_uart_default { - mux { - pins = "gpio18", "gpio19"; - function = "gsbi5"; - }; - - tx { - pins = "gpio18"; - drive-strength = <4>; - bias-disable; - }; - - rx { - pins = "gpio19"; - drive-strength = <2>; - bias-pull-up; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml new file mode 100644 index 00000000000000..58f11e1bdd4fe8 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/qcom,ipq8064-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. IPQ8064 TLMM block + +maintainers: + - Bjorn Andersson + +description: | + Top Level Mode Multiplexer pin controller in Qualcomm IPQ8064 SoC. + +allOf: + - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml# + +properties: + compatible: + const: qcom,ipq8064-pinctrl + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + gpio-reserved-ranges: true + +patternProperties: + "-state$": + oneOf: + - $ref: "#/$defs/qcom-ipq8064-tlmm-state" + - patternProperties: + "-pins$": + $ref: "#/$defs/qcom-ipq8064-tlmm-state" + additionalProperties: false + +$defs: + qcom-ipq8064-tlmm-state: + type: object + description: + Pinctrl node's client devices use subnodes for desired pin configuration. + Client device subnodes use below standard properties. + $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state + unevaluatedProperties: false + + properties: + pins: + description: + List of gpio pins affected by the properties specified in this + subnode. + items: + oneOf: + - pattern: "^gpio([0-9]|[1-5][0-9]|6[0-8])$" + - enum: [ sdc3_clk, sdc3_cmd, sdc3_data ] + minItems: 1 + maxItems: 36 + + function: + description: + Specify the alternative function to be configured for the specified + pins. + enum: [ mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5, + gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1, + spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata, + pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt, + pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren, + pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n, + pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold ] + + required: + - pins + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + tlmm: pinctrl@800000 { + compatible = "qcom,ipq8064-pinctrl"; + reg = <0x00800000 0x4000>; + + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&tlmm 0 0 69>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = ; + + uart-state { + rx-pins { + pins = "gpio19"; + function = "gsbi5"; + bias-pull-up; + }; + + tx-pins { + pins = "gpio18"; + function = "gsbi5"; + bias-disable; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml index 2784d32fdde237..c1b799167d81b0 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml +++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml @@ -425,6 +425,7 @@ patternProperties: additionalProperties: false "-hog(-[0-9]+)?$": + type: object required: - gpio-hog diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml index dfe5616b9b858f..0f331844608c69 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml @@ -43,6 +43,7 @@ patternProperties: additionalProperties: false "-hog(-[0-9]+)?$": + type: object required: - gpio-hog diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml index 5d84364d13589c..cfe00457336612 100644 --- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml +++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml @@ -25,6 +25,7 @@ properties: - renesas,pfc-r8a7745 # RZ/G1E - renesas,pfc-r8a77470 # RZ/G1C - renesas,pfc-r8a774a1 # RZ/G2M + - renesas,pfc-r8a774a3 # RZ/G2M v3.0 - renesas,pfc-r8a774b1 # RZ/G2N - renesas,pfc-r8a774c0 # RZ/G2E - renesas,pfc-r8a774e1 # RZ/G2H diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml index 20e806dce1ecb8..6a23d845f1f2c5 100644 --- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml @@ -45,6 +45,7 @@ properties: - rockchip,rk3368-pinctrl - rockchip,rk3399-pinctrl - rockchip,rk3568-pinctrl + - rockchip,rk3576-pinctrl - rockchip,rk3588-pinctrl - rockchip,rv1108-pinctrl - rockchip,rv1126-pinctrl diff --git a/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml new file mode 100644 index 00000000000000..1e6a55afe26a7a --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pinctrl/sophgo,cv1800-pinctrl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sophgo CV1800 Pin Controller + +maintainers: + - Inochi Amaoto + +properties: + compatible: + enum: + - sophgo,cv1800b-pinctrl + - sophgo,cv1812h-pinctrl + - sophgo,sg2000-pinctrl + - sophgo,sg2002-pinctrl + + reg: + items: + - description: pinctrl for system domain + - description: pinctrl for rtc domain + + reg-names: + items: + - const: sys + - const: rtc + + resets: + maxItems: 1 + +patternProperties: + '-cfg$': + type: object + description: + A pinctrl node should contain at least one subnode representing the + pinctrl groups available on the machine. + + additionalProperties: false + + patternProperties: + '-pins$': + type: object + description: | + Each subnode will list the pins it needs, and how they should + be configured, with regard to muxer configuration, bias, input + enable/disable, input schmitt trigger, slew-rate, drive strength + and bus hold state. In addition, all pins in the same subnode + should have the same power domain. For configuration detail, + refer to https://github.com/sophgo/sophgo-doc/. + + allOf: + - $ref: pincfg-node.yaml# + - $ref: pinmux-node.yaml# + + properties: + pinmux: + description: | + The list of GPIOs and their mux settings that properties in the + node apply to. This should be set using the GPIOMUX or GPIOMUX2 + macro. + + bias-pull-up: + type: boolean + + bias-pull-down: + type: boolean + + drive-strength-microamp: + description: typical current when output high level. + + input-schmitt-microvolt: + description: typical threshold for schmitt trigger. + + power-source: + description: power supplies at X mV. + enum: [ 1800, 3300 ] + + slew-rate: + description: slew rate for output buffer (0 is fast, 1 is slow) + enum: [ 0, 1 ] + + bias-bus-hold: true + + required: + - pinmux + - power-source + + additionalProperties: false + +required: + - compatible + - reg + - reg-names + +additionalProperties: false + +examples: + - | + #include + + pinctrl@3001000 { + compatible = "sophgo,cv1800b-pinctrl"; + reg = <0x03001000 0x1000>, + <0x05027000 0x1000>; + reg-names = "sys", "rtc"; + + uart0_cfg: uart0-cfg { + uart0-pins { + pinmux = , + ; + bias-pull-up; + drive-strength-microamp = <10800>; + input-schmitt-microvolt = <0>; + power-source = <3300>; + slew-rate = <0>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml index e1eb45a9eda44f..a28d77748095ac 100644 --- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml @@ -11,7 +11,7 @@ maintainers: - Alexandre TORGUE description: | - STMicroelectronics's STM32 MCUs intregrate a GPIO and Pin mux/config hardware + STMicroelectronics's STM32 MCUs integrate a GPIO and Pin mux/config hardware controller. It controls the input/output settings on the available pins and also provides ability to multiplex and configure the output of various on-chip controllers onto these pads. @@ -164,7 +164,7 @@ patternProperties: This macro is available here: - include/dt-bindings/pinctrl/stm32-pinfunc.h Some examples of using macro: - /* GPIO A9 set as alernate function 2 */ + /* GPIO A9 set as alternate function 2 */ ... { pinmux = ; }; diff --git a/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml b/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml new file mode 100644 index 00000000000000..b33d26f15b2afa --- /dev/null +++ b/Documentation/devicetree/bindings/platform/microsoft,surface-sam.yaml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/platform/microsoft,surface-sam.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Surface System Aggregator Module (SAM, SSAM) + +maintainers: + - Konrad Dybcio + +description: | + Surface devices use a standardized embedded controller to let the + operating system interface with various hardware functions. The + specific functionalities are modeled as subdevices and matched on + five levels: domain, category, target, instance and function. + +properties: + compatible: + const: microsoft,surface-sam + + interrupts: + maxItems: 1 + + current-speed: true + +required: + - compatible + - interrupts + +additionalProperties: false + +examples: + - | + #include + uart { + embedded-controller { + compatible = "microsoft,surface-sam"; + + interrupts-extended = <&tlmm 91 IRQ_TYPE_EDGE_RISING>; + + pinctrl-0 = <&ssam_state>; + pinctrl-names = "default"; + + current-speed = <4000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml index e76fb273490ff5..347571e2545a8e 100644 --- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml +++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml @@ -25,6 +25,7 @@ properties: - renesas,r8a7745-sysc # RZ/G1E - renesas,r8a77470-sysc # RZ/G1C - renesas,r8a774a1-sysc # RZ/G2M + - renesas,r8a774a3-sysc # RZ/G2M v3.0 - renesas,r8a774b1-sysc # RZ/G2N - renesas,r8a774c0-sysc # RZ/G2E - renesas,r8a774e1-sysc # RZ/G2H diff --git a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml index 0d5e999a58f1b0..650dc0aae6f518 100644 --- a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml +++ b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml @@ -41,6 +41,7 @@ properties: - rockchip,rk3368-power-controller - rockchip,rk3399-power-controller - rockchip,rk3568-power-controller + - rockchip,rk3576-power-controller - rockchip,rk3588-power-controller - rockchip,rv1126-power-controller diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml b/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml index d71fc72d446444..c434277218eadc 100644 --- a/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml +++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.yaml @@ -50,6 +50,7 @@ properties: - rockchip,rk3188-io-voltage-domain - rockchip,rk3228-io-voltage-domain - rockchip,rk3288-io-voltage-domain + - rockchip,rk3308-io-voltage-domain - rockchip,rk3328-io-voltage-domain - rockchip,rk3368-io-voltage-domain - rockchip,rk3368-pmu-io-voltage-domain @@ -71,6 +72,7 @@ allOf: - $ref: "#/$defs/rk3188" - $ref: "#/$defs/rk3228" - $ref: "#/$defs/rk3288" + - $ref: "#/$defs/rk3308" - $ref: "#/$defs/rk3328" - $ref: "#/$defs/rk3368" - $ref: "#/$defs/rk3368-pmu" @@ -194,6 +196,28 @@ $defs: wifi-supply: description: The supply connected to APIO3_VDD. Also known as SDIO0. + rk3308: + if: + properties: + compatible: + contains: + const: rockchip,rk3308-io-voltage-domain + + then: + properties: + vccio0-supply: + description: The supply connected to VCCIO0. + vccio1-supply: + description: The supply connected to VCCIO1. + vccio2-supply: + description: The supply connected to VCCIO2. + vccio3-supply: + description: The supply connected to VCCIO3. + vccio4-supply: + description: The supply connected to VCCIO4. + vccio5-supply: + description: The supply connected to VCCIO5. + rk3328: if: properties: diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml index de43e45a43b7c3..9108a2841caf66 100644 --- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml +++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml @@ -27,6 +27,9 @@ properties: battery-detect-gpios: maxItems: 1 + interrupts: + maxItems: 1 + io-channels: items: - description: Battery Temperature ADC @@ -53,6 +56,7 @@ required: - compatible - reg - battery-detect-gpios + - interrupts - io-channels - io-channel-names - nvmem-cells @@ -88,6 +92,8 @@ examples: compatible = "sprd,sc2731-fgu"; reg = <0xa00>; battery-detect-gpios = <&pmic_eic 9 GPIO_ACTIVE_HIGH>; + interrupt-parent = <&sc2731_pmic>; + interrupts = <4>; io-channels = <&pmic_adc 5>, <&pmic_adc 14>; io-channel-names = "bat-temp", "charge-vol"; nvmem-cells = <&fgu_calib>; diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml index e0b95ecbbebd46..5ccd375eb2941b 100644 --- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml +++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml @@ -23,11 +23,18 @@ properties: - const: x-powers,axp202-battery-power-supply - const: x-powers,axp209-battery-power-supply - const: x-powers,axp221-battery-power-supply + - const: x-powers,axp717-battery-power-supply - items: - const: x-powers,axp803-battery-power-supply - const: x-powers,axp813-battery-power-supply - const: x-powers,axp813-battery-power-supply + monitored-battery: + description: + Specifies the phandle of an optional simple-battery connected to + this gauge. + $ref: /schemas/types.yaml#/definitions/phandle + required: - compatible diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml index 34b7959d6772f0..2ec036405ae470 100644 --- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml +++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml @@ -15,9 +15,6 @@ maintainers: - Chen-Yu Tsai - Sebastian Reichel -allOf: - - $ref: power-supply.yaml# - properties: compatible: oneOf: @@ -26,13 +23,82 @@ properties: - x-powers,axp202-usb-power-supply - x-powers,axp221-usb-power-supply - x-powers,axp223-usb-power-supply + - x-powers,axp717-usb-power-supply - x-powers,axp813-usb-power-supply - items: - const: x-powers,axp803-usb-power-supply - const: x-powers,axp813-usb-power-supply + input-current-limit-microamp: + description: + Optional value to clamp the maximum input current limit to for + the device. If omitted, the programmed value from the EFUSE will + be used. + minimum: 100000 + maximum: 4000000 required: - compatible +allOf: + - $ref: power-supply.yaml# + - if: + properties: + compatible: + contains: + enum: + - x-powers,axp192-usb-power-supply + then: + properties: + input-current-limit-microamp: + enum: [100000, 500000] + + - if: + properties: + compatible: + contains: + enum: + - x-powers,axp202-usb-power-supply + - x-powers,axp223-usb-power-supply + then: + properties: + input-current-limit-microamp: + enum: [100000, 500000, 900000] + + - if: + properties: + compatible: + contains: + enum: + - x-powers,axp221-usb-power-supply + then: + properties: + input-current-limit-microamp: + enum: [500000, 900000] + + - if: + properties: + compatible: + contains: + enum: + - x-powers,axp717-usb-power-supply + then: + properties: + input-current-limit-microamp: + description: Maximum input current in increments of 50000 uA. + minimum: 100000 + maximum: 3250000 + + - if: + properties: + compatible: + contains: + enum: + - x-powers,axp813-usb-power-supply + then: + properties: + input-current-limit-microamp: + enum: [100000, 500000, 900000, 1500000, 2000000, 2500000, + 3000000, 3500000, 4000000] + additionalProperties: false diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index a6c8978964aa16..27f1797be96375 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt @@ -25,8 +25,8 @@ List of legacy properties and respective binding document 2. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt 3. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt Documentation/devicetree/bindings/mfd/tc3589x.txt - Documentation/devicetree/bindings/input/touchscreen/ads7846.txt -4. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt + Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml +4. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8921-keypad.yaml 5. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung,s3c6410-keypad.yaml 6. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt diff --git a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml index 3bb8615e3e919f..42ca895f3c4eb6 100644 --- a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml +++ b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml @@ -11,11 +11,14 @@ maintainers: properties: compatible: - enum: - - fsl,etsec-ptp - - fsl,fman-ptp-timer - - fsl,dpaa2-ptp - - fsl,enetc-ptp + oneOf: + - enum: + - fsl,etsec-ptp + - fsl,fman-ptp-timer + - fsl,dpaa2-ptp + - items: + - const: pci1957,ee02 + - const: fsl,enetc-ptp reg: maxItems: 1 @@ -123,6 +126,15 @@ required: - compatible - reg +allOf: + - if: + properties: + compatible: + contains: + const: fsl,enetc-ptp + then: + $ref: /schemas/pci/pci-device.yaml + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml index 66e400f2a3a4f0..1b192e197b114f 100644 --- a/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml +++ b/Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml @@ -46,10 +46,11 @@ properties: - description: Module Clock - description: Bus Clock - # Even though it only applies to subschemas under the conditionals, - # not listing them here will trigger a warning because of the - # additionalsProperties set to false. - clock-names: true + clock-names: + minItems: 1 + items: + - const: mod + - const: bus resets: maxItems: 1 diff --git a/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml new file mode 100644 index 00000000000000..903210ef9c31a8 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pwm/cirrus,ep9301-pwm.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic ep93xx PWM controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +allOf: + - $ref: pwm.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-pwm + - items: + - enum: + - cirrus,ep9302-pwm + - cirrus,ep9307-pwm + - cirrus,ep9312-pwm + - cirrus,ep9315-pwm + - const: cirrus,ep9301-pwm + + reg: + maxItems: 1 + + clocks: + items: + - description: SoC PWM clock + + "#pwm-cells": + const: 3 + +required: + - compatible + - reg + - clocks + +unevaluatedProperties: false + +examples: + - | + #include + pwm@80910000 { + compatible = "cirrus,ep9301-pwm"; + reg = <0x80910000 0x10>; + clocks = <&syscon EP93XX_CLK_PWM>; + #pwm-cells = <3>; + }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml b/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml index 1d71d4f8f32878..e021cf59421a6e 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml +++ b/Documentation/devicetree/bindings/pwm/pwm-amlogic.yaml @@ -37,6 +37,10 @@ properties: - enum: - amlogic,meson8-pwm-v2 - amlogic,meson-s4-pwm + - items: + - enum: + - amlogic,meson-a1-pwm + - const: amlogic,meson-s4-pwm - items: - enum: - amlogic,meson8b-pwm-v2 @@ -56,6 +60,9 @@ properties: minItems: 1 maxItems: 2 + power-domains: + maxItems: 1 + "#pwm-cells": const: 3 @@ -136,6 +143,16 @@ allOf: required: - clocks + - if: + properties: + compatible: + contains: + enum: + - amlogic,meson-a1-pwm + then: + required: + - power-domains + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml index 6b6a302a175ceb..2fe1992e29088c 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml +++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml @@ -37,6 +37,7 @@ properties: - renesas,pwm-r8a77995 # R-Car D3 - renesas,pwm-r8a779a0 # R-Car V3U - renesas,pwm-r8a779g0 # R-Car V4H + - renesas,pwm-r8a779h0 # R-Car V4M - const: renesas,pwm-rcar reg: diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml index a3e52b22dd1804..a4dfa09344dd72 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml +++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml @@ -41,6 +41,7 @@ properties: - renesas,tpu-r8a77980 # R-Car V3H - renesas,tpu-r8a779a0 # R-Car V3U - renesas,tpu-r8a779g0 # R-Car V4H + - renesas,tpu-r8a779h0 # R-Car V4M - const: renesas,tpu reg: diff --git a/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml b/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml new file mode 100644 index 00000000000000..50db6782a09000 --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/mediatek,mt6397-regulator.yaml @@ -0,0 +1,238 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/regulator/mediatek,mt6397-regulator.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek MT6397 Regulator + +maintainers: + - Sen Chu + - Macpaul Lin + +description: + Regulator node of the PMIC. This node should under the PMIC's device node. + All voltage regulators provided by the PMIC are described as sub-nodes of + this node. + +properties: + compatible: + items: + - const: mediatek,mt6397-regulator + +patternProperties: + "^(buck_)?v(core|drm|gpu|io18|pca(7|15)|sramca(7|15))$": + description: Buck regulators + type: object + $ref: regulator.yaml# + properties: + regulator-allowed-modes: + description: | + BUCK regulators can set regulator-initial-mode and regulator-allowed-modes to + values specified in dt-bindings/regulator/mediatek,mt6397-regulator.h + items: + enum: [0, 1] + unevaluatedProperties: false + + "^(ldo_)?v(tcxo|(a|io)28)$": + description: LDOs with fixed 2.8V output and 0~100/10mV tuning + type: object + $ref: regulator.yaml# + properties: + regulator-allowed-modes: false + unevaluatedProperties: false + + "^(ldo_)?vusb$": + description: LDOs with fixed 3.0V output and 0~100/10mV tuning + type: object + $ref: regulator.yaml# + properties: + regulator-allowed-modes: false + unevaluatedProperties: false + + "^(ldo_)?v(cama|emc3v3|gp[123456]|ibr|mc|mch)$": + description: LDOs with variable output and 0~100/10mV tuning + type: object + $ref: regulator.yaml# + properties: + regulator-allowed-modes: false + unevaluatedProperties: false + +required: + - compatible + +additionalProperties: false + +examples: + - | + #include + + mt6397_regulators: regulators { + compatible = "mediatek,mt6397-regulator"; + + mt6397_vpca15_reg: buck_vpca15 { + regulator-name = "vpca15"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <200>; + }; + + mt6397_vpca7_reg: buck_vpca7 { + regulator-name = "vpca7"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vsramca15_reg: buck_vsramca15 { + regulator-name = "vsramca15"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vsramca7_reg: buck_vsramca7 { + regulator-name = "vsramca7"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vcore_reg: buck_vcore { + regulator-name = "vcore"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vgpu_reg: buck_vgpu { + regulator-name = "vgpu"; + regulator-min-microvolt = < 700000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vdrm_reg: buck_vdrm { + regulator-name = "vdrm"; + regulator-min-microvolt = < 800000>; + regulator-max-microvolt = <1400000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <500>; + }; + + mt6397_vio18_reg: buck_vio18 { + regulator-name = "vio18"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2120000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <500>; + }; + + mt6397_vtcxo_reg: ldo_vtcxo { + regulator-name = "vtcxo"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <90>; + }; + + mt6397_va28_reg: ldo_va28 { + regulator-name = "va28"; + /* fixed output 2.8 V */ + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vcama_reg: ldo_vcama { + regulator-name = "vcama"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vio28_reg: ldo_vio28 { + regulator-name = "vio28"; + /* fixed output 2.8 V */ + regulator-enable-ramp-delay = <240>; + }; + + mt6397_usb_reg: ldo_vusb { + regulator-name = "vusb"; + /* fixed output 3.3 V */ + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vmc_reg: ldo_vmc { + regulator-name = "vmc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vmch_reg: ldo_vmch { + regulator-name = "vmch"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vemc_3v3_reg: ldo_vemc3v3 { + regulator-name = "vemc_3v3"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp1_reg: ldo_vgp1 { + regulator-name = "vcamd"; + regulator-min-microvolt = <1220000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <240>; + }; + + mt6397_vgp2_reg: ldo_vgp2 { + regulator-name = "vcamio"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp3_reg: ldo_vgp3 { + regulator-name = "vcamaf"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp4_reg: ldo_vgp4 { + regulator-name = "vgp4"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp5_reg: ldo_vgp5 { + regulator-name = "vgp5"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3000000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp6_reg: ldo_vgp6 { + regulator-name = "vgp6"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vibr_reg: ldo_vibr { + regulator-name = "vibr"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + }; diff --git a/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml b/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml index 1aca3646789eff..c3e1fc6e260e5d 100644 --- a/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml +++ b/Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml @@ -28,6 +28,21 @@ properties: reg: maxItems: 1 + lvin-supply: + description: Input supply phandle for LDO1 and LDO2 + + pvin1-supply: + description: Input supply phandle for VDD_IO (BUCK1) + + pvin2-supply: + description: Input supply phandle for VDD_DDR (BUCK2) + + pvin3-supply: + description: Input supply phandle for VDD_CORE (BUCK3) + + pvin4-supply: + description: Input supply phandle for VDD_OTHER (BUCK4) + regulators: type: object additionalProperties: false @@ -68,6 +83,11 @@ examples: pmic@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; regulators { VDD_IO { diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt deleted file mode 100644 index c080086d3e629e..00000000000000 --- a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt +++ /dev/null @@ -1,220 +0,0 @@ -Mediatek MT6397 Regulator - -Required properties: -- compatible: "mediatek,mt6397-regulator" -- mt6397regulator: List of regulators provided by this controller. It is named - according to its regulator type, buck_ and ldo_. - The definition for each of these nodes is defined using the standard binding - for regulators at Documentation/devicetree/bindings/regulator/regulator.txt. - -The valid names for regulators are:: -BUCK: - buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu, - buck_vdrm, buck_vio18 -LDO: - ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch, - ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6, - ldo_vibr - -BUCK regulators can set regulator-initial-mode and regulator-allowed-modes to -values specified in dt-bindings/regulator/mediatek,mt6397-regulator.h - -Example: - pmic { - compatible = "mediatek,mt6397"; - - mt6397regulator: mt6397regulator { - compatible = "mediatek,mt6397-regulator"; - - mt6397_vpca15_reg: buck_vpca15 { - regulator-compatible = "buck_vpca15"; - regulator-name = "vpca15"; - regulator-min-microvolt = < 850000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <200>; - }; - - mt6397_vpca7_reg: buck_vpca7 { - regulator-compatible = "buck_vpca7"; - regulator-name = "vpca7"; - regulator-min-microvolt = < 850000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <115>; - }; - - mt6397_vsramca15_reg: buck_vsramca15 { - regulator-compatible = "buck_vsramca15"; - regulator-name = "vsramca15"; - regulator-min-microvolt = < 850000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <115>; - - }; - - mt6397_vsramca7_reg: buck_vsramca7 { - regulator-compatible = "buck_vsramca7"; - regulator-name = "vsramca7"; - regulator-min-microvolt = < 850000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <115>; - - }; - - mt6397_vcore_reg: buck_vcore { - regulator-compatible = "buck_vcore"; - regulator-name = "vcore"; - regulator-min-microvolt = < 850000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <115>; - }; - - mt6397_vgpu_reg: buck_vgpu { - regulator-compatible = "buck_vgpu"; - regulator-name = "vgpu"; - regulator-min-microvolt = < 700000>; - regulator-max-microvolt = <1350000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <115>; - }; - - mt6397_vdrm_reg: buck_vdrm { - regulator-compatible = "buck_vdrm"; - regulator-name = "vdrm"; - regulator-min-microvolt = < 800000>; - regulator-max-microvolt = <1400000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <500>; - }; - - mt6397_vio18_reg: buck_vio18 { - regulator-compatible = "buck_vio18"; - regulator-name = "vio18"; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <2120000>; - regulator-ramp-delay = <12500>; - regulator-enable-ramp-delay = <500>; - }; - - mt6397_vtcxo_reg: ldo_vtcxo { - regulator-compatible = "ldo_vtcxo"; - regulator-name = "vtcxo"; - regulator-min-microvolt = <2800000>; - regulator-max-microvolt = <2800000>; - regulator-enable-ramp-delay = <90>; - }; - - mt6397_va28_reg: ldo_va28 { - regulator-compatible = "ldo_va28"; - regulator-name = "va28"; - /* fixed output 2.8 V */ - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vcama_reg: ldo_vcama { - regulator-compatible = "ldo_vcama"; - regulator-name = "vcama"; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <2800000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vio28_reg: ldo_vio28 { - regulator-compatible = "ldo_vio28"; - regulator-name = "vio28"; - /* fixed output 2.8 V */ - regulator-enable-ramp-delay = <240>; - }; - - mt6397_usb_reg: ldo_vusb { - regulator-compatible = "ldo_vusb"; - regulator-name = "vusb"; - /* fixed output 3.3 V */ - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vmc_reg: ldo_vmc { - regulator-compatible = "ldo_vmc"; - regulator-name = "vmc"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vmch_reg: ldo_vmch { - regulator-compatible = "ldo_vmch"; - regulator-name = "vmch"; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vemc_3v3_reg: ldo_vemc3v3 { - regulator-compatible = "ldo_vemc3v3"; - regulator-name = "vemc_3v3"; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vgp1_reg: ldo_vgp1 { - regulator-compatible = "ldo_vgp1"; - regulator-name = "vcamd"; - regulator-min-microvolt = <1220000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <240>; - }; - - mt6397_vgp2_reg: ldo_vgp2 { - egulator-compatible = "ldo_vgp2"; - regulator-name = "vcamio"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vgp3_reg: ldo_vgp3 { - regulator-compatible = "ldo_vgp3"; - regulator-name = "vcamaf"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vgp4_reg: ldo_vgp4 { - regulator-compatible = "ldo_vgp4"; - regulator-name = "vgp4"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vgp5_reg: ldo_vgp5 { - regulator-compatible = "ldo_vgp5"; - regulator-name = "vgp5"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3000000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vgp6_reg: ldo_vgp6 { - regulator-compatible = "ldo_vgp6"; - regulator-name = "vgp6"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - - mt6397_vibr_reg: ldo_vibr { - regulator-compatible = "ldo_vibr"; - regulator-name = "vibr"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3300000>; - regulator-enable-ramp-delay = <218>; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml index 3aaa9653419a42..11ed04c9554299 100644 --- a/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml +++ b/Documentation/devicetree/bindings/regulator/qcom,qca6390-pmu.yaml @@ -18,6 +18,7 @@ properties: compatible: enum: - qcom,qca6390-pmu + - qcom,wcn6855-pmu - qcom,wcn7850-pmu vdd-supply: @@ -65,7 +66,11 @@ properties: bt-enable-gpios: maxItems: 1 - description: GPIO line enabling the ATH11K Bluetooth module supplied by the PMU + description: GPIO line enabling the Bluetooth module supplied by the PMU + + swctrl-gpios: + maxItems: 1 + description: GPIO line indicating the state of the clock supply to the BT module clocks: maxItems: 1 @@ -104,6 +109,21 @@ allOf: - vddpcie1p3-supply - vddpcie1p9-supply - vddio-supply + - if: + properties: + compatible: + contains: + const: qcom,wcn6855-pmu + then: + required: + - vddio-supply + - vddaon-supply + - vddpmu-supply + - vddrfa0p95-supply + - vddrfa1p3-supply + - vddrfa1p9-supply + - vddpcie1p3-supply + - vddpcie1p9-supply - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml index c5dc3c2820d71c..adc6b3f36fde49 100644 --- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml +++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml @@ -93,7 +93,7 @@ patternProperties: Each SCP core has own cache memory. The SRAM and L1TCM are shared by cores. The power of cache, SRAM and L1TCM power should be enabled before booting SCP cores. The size of cache, SRAM, and L1TCM are varied - on differnt SoCs. + on different SoCs. The SCP cores do not use an MMU, but has a set of registers to control the translations between 32-bit CPU addresses into system bus diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml index 3766d4513b3790..c54234247ab3bb 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,glink-rpm-edge.yaml @@ -90,7 +90,7 @@ examples: qcom,rpm-msg-ram = <&rpm_msg_ram>; rpm-requests { - compatible = "qcom,rpm-msm8996"; + compatible = "qcom,rpm-msm8996", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; /* ... */ diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml index 61cf4fe19ca53e..540bdfca53d97d 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,rpm-proc.yaml @@ -142,7 +142,7 @@ examples: qcom,smd-edge = <15>; rpm-requests { - compatible = "qcom,rpm-msm8916"; + compatible = "qcom,rpm-msm8916", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; /* ... */ }; @@ -163,7 +163,7 @@ examples: mboxes = <&apcs_glb 0>; rpm-requests { - compatible = "qcom,rpm-qcm2290"; + compatible = "qcom,rpm-qcm2290", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; /* ... */ }; diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml index 73fda7565cd12f..d7fad7b3c2c687 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml @@ -16,6 +16,7 @@ description: properties: compatible: enum: + - qcom,sdx75-mpss-pas - qcom,sm8550-adsp-pas - qcom,sm8550-cdsp-pas - qcom,sm8550-mpss-pas @@ -113,6 +114,7 @@ allOf: properties: compatible: enum: + - qcom,sdx75-mpss-pas - qcom,sm8650-mpss-pas then: properties: @@ -146,6 +148,7 @@ allOf: properties: compatible: enum: + - qcom,sdx75-mpss-pas - qcom,sm8550-mpss-pas - qcom,sm8650-mpss-pas then: diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml new file mode 100644 index 00000000000000..2bd0752b6ba9e8 --- /dev/null +++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml @@ -0,0 +1,125 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/remoteproc/ti,k3-m4f-rproc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI K3 M4F processor subsystems + +maintainers: + - Hari Nagalla + - Mathieu Poirier + +description: | + Some K3 family SoCs have Arm Cortex M4F cores. AM64x is a SoC in K3 + family with a M4F core. Typically safety oriented applications may use + the M4F core in isolation without an IPC. Where as some industrial and + home automation applications, may use the M4F core as a remote processor + with IPC communications. + +$ref: /schemas/arm/keystone/ti,k3-sci-common.yaml# + +properties: + compatible: + enum: + - ti,am64-m4fss + + power-domains: + maxItems: 1 + + "#address-cells": + const: 2 + + "#size-cells": + const: 2 + + reg: + items: + - description: IRAM internal memory region + - description: DRAM internal memory region + + reg-names: + items: + - const: iram + - const: dram + + resets: + maxItems: 1 + + firmware-name: + maxItems: 1 + description: Name of firmware to load for the M4F core + + mboxes: + description: + OMAP Mailbox specifier denoting the sub-mailbox, to be used for + communication with the remote processor. This property should match + with the sub-mailbox node used in the firmware image. + maxItems: 1 + + memory-region: + description: + phandle to the reserved memory nodes to be associated with the + remoteproc device. Optional memory regions available for firmware + specific purposes. + (see reserved-memory/reserved-memory.yaml in dtschema project) + maxItems: 8 + items: + - description: regions used for DMA allocations like vrings, vring buffers + and memory dedicated to firmware's specific purposes. + additionalItems: true + +required: + - compatible + - reg + - reg-names + - ti,sci + - ti,sci-dev-id + - ti,sci-proc-ids + - resets + - firmware-name + +unevaluatedProperties: false + +examples: + - | + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + + mcu_m4fss_dma_memory_region: m4f-dma-memory@9cb00000 { + compatible = "shared-dma-pool"; + reg = <0x00 0x9cb00000 0x00 0x100000>; + no-map; + }; + + mcu_m4fss_memory_region: m4f-memory@9cc00000 { + compatible = "shared-dma-pool"; + reg = <0x00 0x9cc00000 0x00 0xe00000>; + no-map; + }; + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + + mailbox0_cluster0: mailbox-0 { + #mbox-cells = <1>; + }; + + remoteproc@5000000 { + compatible = "ti,am64-m4fss"; + reg = <0x00 0x5000000 0x00 0x30000>, + <0x00 0x5040000 0x00 0x10000>; + reg-names = "iram", "dram"; + resets = <&k3_reset 9 1>; + firmware-name = "am62-mcu-m4f0_0-fw"; + mboxes = <&mailbox0_cluster0>, <&mbox_m4_0>; + memory-region = <&mcu_m4fss_dma_memory_region>, + <&mcu_m4fss_memory_region>; + ti,sci = <&dmsc>; + ti,sci-dev-id = <9>; + ti,sci-proc-ids = <0x18 0xff>; + }; + }; diff --git a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml index 6f13da11f59396..ee63c03949c93f 100644 --- a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml +++ b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml @@ -62,6 +62,7 @@ properties: patternProperties: "^r(.*)@[0-9a-f]+$": type: object + additionalProperties: false description: | The RPU is located in the Low Power Domain of the Processor Subsystem. Each processor includes separate L1 instruction and data caches and diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml index f0c6c0df0ce3f3..695ef38a7bb346 100644 --- a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml +++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml @@ -19,6 +19,7 @@ properties: - amlogic,meson-a1-reset # Reset Controller on A1 and compatible SoCs - amlogic,meson-s4-reset # Reset Controller on S4 and compatible SoCs - amlogic,c3-reset # Reset Controller on C3 and compatible SoCs + - amlogic,t7-reset reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml b/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml deleted file mode 100644 index 062b4518347bd0..00000000000000 --- a/Documentation/devicetree/bindings/reset/mobileye,eyeq5-reset.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/reset/mobileye,eyeq5-reset.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Mobileye EyeQ5 reset controller - -description: - The EyeQ5 reset driver handles three reset domains. Its registers live in a - shared region called OLB. - -maintainers: - - Grégory Clement - - Théo Lebrun - - Vladimir Kondratiev - -properties: - compatible: - const: mobileye,eyeq5-reset - - reg: - maxItems: 3 - - reg-names: - items: - - const: d0 - - const: d1 - - const: d2 - - "#reset-cells": - const: 2 - description: - The first cell is the domain (0 to 2 inclusive) and the second one is the - reset index inside that domain. - -required: - - compatible - - reg - - reg-names - - "#reset-cells" - -additionalProperties: false diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.yaml b/Documentation/devicetree/bindings/reset/renesas,rst.yaml index 58b4a45d338006..7a81491379b05b 100644 --- a/Documentation/devicetree/bindings/reset/renesas,rst.yaml +++ b/Documentation/devicetree/bindings/reset/renesas,rst.yaml @@ -29,6 +29,7 @@ properties: - renesas,r8a7745-rst # RZ/G1E - renesas,r8a77470-rst # RZ/G1C - renesas,r8a774a1-rst # RZ/G2M + - renesas,r8a774a3-rst # RZ/G2M v3.0 - renesas,r8a774b1-rst # RZ/G2N - renesas,r8a774c0-rst # RZ/G2E - renesas,r8a774e1-rst # RZ/G2H diff --git a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml index fa253c518d799f..babc563ae61ef0 100644 --- a/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml +++ b/Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml @@ -38,13 +38,17 @@ properties: minItems: 1 maxItems: 2 - clock-names: true + clock-names: + minItems: 1 + maxItems: 2 resets: minItems: 1 maxItems: 2 - reset-names: true + reset-names: + minItems: 1 + maxItems: 2 allOf: - if: diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index a06dbc6b492895..2cf2026cff574d 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -171,6 +171,13 @@ properties: memory types as ratified in the 20191213 version of the privileged ISA specification. + - const: svvptc + description: + The standard Svvptc supervisor-level extension for + address-translation cache behaviour with respect to invalid entries + as ratified at commit 4a69197e5617 ("Update to ratified state") of + riscv-svvptc. + - const: zacas description: | The Zacas extension for Atomic Compare-and-Swap (CAS) instructions diff --git a/Documentation/devicetree/bindings/riscv/sophgo.yaml b/Documentation/devicetree/bindings/riscv/sophgo.yaml index 9bc813dad0987a..a14cb10ff3f074 100644 --- a/Documentation/devicetree/bindings/riscv/sophgo.yaml +++ b/Documentation/devicetree/bindings/riscv/sophgo.yaml @@ -26,6 +26,11 @@ properties: - enum: - sophgo,huashan-pi - const: sophgo,cv1812h + - items: + - enum: + - sipeed,licheerv-nano-b + - const: sipeed,licheerv-nano + - const: sophgo,sg2002 - items: - enum: - milkv,pioneer diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml new file mode 100644 index 00000000000000..e0595814a6d986 --- /dev/null +++ b/Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/rockchip,rk3568-rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip RK3568 TRNG + +description: True Random Number Generator on Rockchip RK3568 SoC + +maintainers: + - Aurelien Jarno + - Daniel Golle + +properties: + compatible: + enum: + - rockchip,rk3568-rng + + reg: + maxItems: 1 + + clocks: + items: + - description: TRNG clock + - description: TRNG AHB clock + + clock-names: + items: + - const: core + - const: ahb + + resets: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - resets + +additionalProperties: false + +examples: + - | + #include + bus { + #address-cells = <2>; + #size-cells = <2>; + + rng@fe388000 { + compatible = "rockchip,rk3568-rng"; + reg = <0x0 0xfe388000 0x0 0x4000>; + clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>; + clock-names = "core", "ahb"; + resets = <&cru SRST_TRNG_NS>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml b/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml index 388102ae30cd83..3ec111f2fdc40f 100644 --- a/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml +++ b/Documentation/devicetree/bindings/rtc/fsl,ls-ftm-alarm.yaml @@ -42,7 +42,7 @@ properties: minItems: 1 description: phandle to rcpm node, Please refer - Documentation/devicetree/bindings/soc/fsl/rcpm.txt + Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml big-endian: $ref: /schemas/types.yaml#/definitions/flag diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml index 5ade5dfad048ab..cda8ad7c120375 100644 --- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml +++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml @@ -22,6 +22,9 @@ properties: interrupts: maxItems: 1 + "#clock-cells": + const: 0 + trickle-resistor-ohms: enum: - 3000 diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml new file mode 100644 index 00000000000000..f3d20e976965a2 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/sprd,sc2731-rtc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Spreadtrum SC2731 Real Time Clock + +maintainers: + - Orson Zhai + - Baolin Wang + - Chunyan Zhang + +properties: + compatible: + const: sprd,sc2731-rtc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +allOf: + - $ref: rtc.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + pmic { + #address-cells = <1>; + #size-cells = <0>; + + rtc@280 { + compatible = "sprd,sc2731-rtc"; + reg = <0x280>; + interrupt-parent = <&sc2731_pmic>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt deleted file mode 100644 index 1f5754299d31e4..00000000000000 --- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt +++ /dev/null @@ -1,26 +0,0 @@ -Spreadtrum SC27xx Real Time Clock - -Required properties: -- compatible: should be "sprd,sc2731-rtc". -- reg: address offset of rtc register. -- interrupts: rtc alarm interrupt. - -Example: - - sc2731_pmic: pmic@0 { - compatible = "sprd,sc2731"; - reg = <0>; - spi-max-frequency = <26000000>; - interrupts = ; - interrupt-controller; - #interrupt-cells = <2>; - #address-cells = <1>; - #size-cells = <0>; - - rtc@280 { - compatible = "sprd,sc2731-rtc"; - reg = <0x280>; - interrupt-parent = <&sc2731_pmic>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; - }; - }; diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml index 7a0fab721cf1da..aae06e570c2214 100644 --- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml @@ -53,6 +53,28 @@ properties: override default rtc_ck parent clock phandle of the new parent clock of rtc_ck maxItems: 1 +patternProperties: + "^rtc-[a-z]+-[0-9]+$": + type: object + $ref: /schemas/pinctrl/pinmux-node.yaml + description: | + Configuration of STM32 RTC pins description. STM32 RTC is able to output + some signals on specific pins: + - LSCO (Low Speed Clock Output) that allow to output LSE clock on a pin. + - Alarm out that allow to send a pulse on a pin when alarm A of the RTC + expires. + additionalProperties: false + properties: + function: + enum: + - lsco + - alarm-a + pins: + enum: + - out1 + - out2 + - out2_rmp + allOf: - if: properties: @@ -68,6 +90,9 @@ allOf: clock-names: false + patternProperties: + "^rtc-[a-z]+-[0-9]+$": false + required: - st,syscfg @@ -83,6 +108,9 @@ allOf: minItems: 2 maxItems: 2 + patternProperties: + "^rtc-[a-z]+-[0-9]+$": false + required: - clock-names - st,syscfg diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml index fffd759c603f84..7330a720083121 100644 --- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml +++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml @@ -38,12 +38,13 @@ properties: - dallas,ds1672 # Extremely Accurate I²C RTC with Integrated Crystal and SRAM - dallas,ds3232 + # SD2405AL Real-Time Clock + - dfrobot,sd2405al # EM Microelectronic EM3027 RTC - emmicro,em3027 # I2C-BUS INTERFACE REAL TIME CLOCK MODULE - epson,rx8010 # I2C-BUS INTERFACE REAL TIME CLOCK MODULE - - epson,rx8025 - epson,rx8035 # I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM - epson,rx8111 @@ -52,10 +53,6 @@ properties: - epson,rx8581 # Android Goldfish Real-time Clock - google,goldfish-rtc - # Intersil ISL1208 Low Power RTC with Battery Backed SRAM - - isil,isl1208 - # Intersil ISL1218 Low Power RTC with Battery Backed SRAM - - isil,isl1218 # Mvebu Real-time Clock - marvell,orion-rtc # Maxim DS1742/DS1743 Real-time Clock @@ -68,8 +65,6 @@ properties: - microcrystal,rv8523 # NXP LPC32xx SoC Real-time Clock - nxp,lpc3220-rtc - # Real-time Clock Module - - pericom,pt7c4338 # I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC - ricoh,r2025sd # I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml index 6a7be42da523c6..4b78de6b46a207 100644 --- a/Documentation/devicetree/bindings/serial/8250_omap.yaml +++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml @@ -76,6 +76,7 @@ properties: clock-frequency: true current-speed: true overrun-throttle-ms: true + wakeup-source: true required: - compatible diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml index eb2992a447d79c..f466c38518c417 100644 --- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml +++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml @@ -23,13 +23,20 @@ properties: - const: atmel,at91sam9260-dbgu - const: atmel,at91sam9260-usart - items: - - const: microchip,sam9x60-usart + - enum: + - microchip,sam9x60-usart + - microchip,sam9x7-usart - const: atmel,at91sam9260-usart - items: - const: microchip,sam9x60-dbgu - const: microchip,sam9x60-usart - const: atmel,at91sam9260-dbgu - const: atmel,at91sam9260-usart + - items: + - const: microchip,sam9x7-dbgu + - const: atmel,at91sam9260-dbgu + - const: microchip,sam9x7-usart + - const: atmel,at91sam9260-usart reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml index ff61ffdcad1dbd..1b02f0b197ff46 100644 --- a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml +++ b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml @@ -36,6 +36,7 @@ properties: - mediatek,mt7622-uart - mediatek,mt7623-uart - mediatek,mt7629-uart + - mediatek,mt7981-uart - mediatek,mt7986-uart - mediatek,mt7988-uart - mediatek,mt8127-uart diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml index a5d67563cd53a6..29d48da81531ff 100644 --- a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml +++ b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.yaml @@ -78,7 +78,7 @@ properties: we use nvidia,adjust-baud-rates. As an example, consider there is deviation observed in TX for baud rates as listed below. 0 - to 9600 has 1% deviation 9600 to 115200 2% deviation. This slight deviation is expcted and + to 9600 has 1% deviation 9600 to 115200 2% deviation. This slight deviation is expected and Tegra UART is expected to handle it. Due to the issue stated above, baud rate on Tegra UART should be set equal to or above deviation observed for avoiding frame errors. Property should be set like this: diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml index afc7c05898a18f..51d9fb0f476330 100644 --- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml +++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml @@ -46,6 +46,7 @@ properties: - items: - enum: - renesas,scif-r8a774a1 # RZ/G2M + - renesas,scif-r8a774a3 # RZ/G2M v3.0 - renesas,scif-r8a774b1 # RZ/G2N - renesas,scif-r8a774c0 # RZ/G2E - renesas,scif-r8a774e1 # RZ/G2H diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml index 0f0131026911cd..788c80e47831cb 100644 --- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml +++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml @@ -56,14 +56,8 @@ properties: maxItems: 5 clock-names: - description: N = 0 is allowed for SoCs without internal baud clock mux. minItems: 2 - items: - - const: uart - - pattern: '^clk_uart_baud[0-3]$' - - pattern: '^clk_uart_baud[0-3]$' - - pattern: '^clk_uart_baud[0-3]$' - - pattern: '^clk_uart_baud[0-3]$' + maxItems: 5 dmas: items: @@ -103,18 +97,45 @@ allOf: compatible: contains: enum: - - samsung,s5pv210-uart + - samsung,s3c6400-uart then: properties: clocks: - minItems: 2 + minItems: 3 maxItems: 3 + + clock-names: + items: + - const: uart + - const: clk_uart_baud2 + - const: clk_uart_baud3 + + else: + properties: clock-names: minItems: 2 items: - const: uart - - pattern: '^clk_uart_baud[0-1]$' - - pattern: '^clk_uart_baud[0-1]$' + - const: clk_uart_baud0 + - const: clk_uart_baud1 + - const: clk_uart_baud2 + - const: clk_uart_baud3 + + - if: + properties: + compatible: + contains: + enum: + - samsung,s5pv210-uart + then: + properties: + clocks: + minItems: 3 + maxItems: 3 + + clock-names: + minItems: 3 + maxItems: 3 - if: properties: @@ -129,10 +150,9 @@ allOf: properties: clocks: maxItems: 2 + clock-names: - items: - - const: uart - - const: clk_uart_baud0 + maxItems: 2 - if: properties: @@ -146,6 +166,12 @@ allOf: properties: reg-io-width: false + clocks: + maxItems: 2 + + clock-names: + maxItems: 2 + unevaluatedProperties: false examples: @@ -163,3 +189,19 @@ examples: <&clocks SCLK_UART>; samsung,uart-fifosize = <16>; }; + - | + #include + #include + #include + + serial_0: serial@10a00000 { + compatible = "google,gs101-uart"; + reg = <0x10a00000 0xc0>; + clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0>, + <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>; + clock-names = "uart", "clk_uart_baud0"; + interrupts = ; + pinctrl-0 = <&uart0_bus>; + pinctrl-names = "default"; + samsung,uart-fifosize = <256>; + }; diff --git a/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml b/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml new file mode 100644 index 00000000000000..b4a73214d20d7b --- /dev/null +++ b/Documentation/devicetree/bindings/serial/serial-peripheral-props.yaml @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/serial/serial-peripheral-props.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Common Properties for Serial-attached Devices + +maintainers: + - Rob Herring + - Greg Kroah-Hartman + +description: + Devices connected over serial/UART, expressed as children of a serial + controller, might need similar properties, e.g. for configuring the baud + rate. + +properties: + max-speed: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum baud rate the device operates at. + This should only be present if the maximum is less than the slave + device can support. For example, a particular board has some + signal quality issue or the host processor can't support higher + baud rates. + + current-speed: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + The current baud rate the device operates at. + This should only be present in case a driver has no chance to know + the baud rate of the slave device. + Examples: + * device supports auto-baud + * the rate is setup by a bootloader and there is no way to reset + the device + * device baud rate is configured by its firmware but there is no + way to request the actual settings + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml index ffc9198ae21469..6aa9cfae417b89 100644 --- a/Documentation/devicetree/bindings/serial/serial.yaml +++ b/Documentation/devicetree/bindings/serial/serial.yaml @@ -88,10 +88,12 @@ properties: TX FIFO threshold configuration (in bytes). patternProperties: - "^(bluetooth|bluetooth-gnss|gnss|gps|mcu|onewire)$": + "^(bluetooth|bluetooth-gnss|embedded-controller|gnss|gps|mcu|onewire)$": if: type: object then: + additionalProperties: true + $ref: serial-peripheral-props.yaml# description: Serial attached devices shall be a child node of the host UART device the slave device is attached to. It is expected that the attached @@ -103,28 +105,6 @@ patternProperties: description: Compatible of the device connected to the serial port. - max-speed: - $ref: /schemas/types.yaml#/definitions/uint32 - description: - The maximum baud rate the device operates at. - This should only be present if the maximum is less than the slave - device can support. For example, a particular board has some - signal quality issue or the host processor can't support higher - baud rates. - - current-speed: - $ref: /schemas/types.yaml#/definitions/uint32 - description: | - The current baud rate the device operates at. - This should only be present in case a driver has no chance to know - the baud rate of the slave device. - Examples: - * device supports auto-baud - * the rate is setup by a bootloader and there is no way to reset - the device - * device baud rate is configured by its firmware but there is no - way to request the actual settings - required: - compatible diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml new file mode 100644 index 00000000000000..e02d9d7e7d9a2e --- /dev/null +++ b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2711-avs-monitor.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/bcm/brcm,bcm2711-avs-monitor.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom AVS Monitor + +maintainers: + - Stefan Wahren + +properties: + compatible: + items: + - const: brcm,bcm2711-avs-monitor + - const: syscon + - const: simple-mfd + + reg: + maxItems: 1 + + thermal: + $ref: /schemas/thermal/brcm,avs-ro-thermal.yaml + description: Broadcom AVS ring oscillator thermal + +required: + - compatible + - reg + - thermal + +additionalProperties: false + +examples: + - | + avs-monitor@7d5d2000 { + compatible = "brcm,bcm2711-avs-monitor", "syscon", "simple-mfd"; + reg = <0x7d5d2000 0xf00>; + + thermal: thermal { + compatible = "brcm,bcm2711-thermal"; + #thermal-sensor-cells = <0>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml new file mode 100644 index 00000000000000..7cb1b411498503 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/cirrus/cirrus,ep9301-syscon.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic EP93xx Platforms System Controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +description: | + Central resources are controlled by a set of software-locked registers, + which can be used to prevent accidental accesses. Syscon generates + the various bus and peripheral clocks and controls the system startup + configuration. + + The System Controller (Syscon) provides: + - Clock control + - Power management + - System configuration management + + Syscon registers are common for all EP93xx SoC's, through some actual peripheral + may be missing depending on actual SoC model. + +properties: + compatible: + oneOf: + - items: + - enum: + - cirrus,ep9302-syscon + - cirrus,ep9307-syscon + - cirrus,ep9312-syscon + - cirrus,ep9315-syscon + - const: cirrus,ep9301-syscon + - const: syscon + - items: + - const: cirrus,ep9301-syscon + - const: syscon + + reg: + maxItems: 1 + + "#clock-cells": + const: 1 + + clocks: + items: + - description: reference clock + +patternProperties: + '^pins-': + type: object + description: pin node + $ref: /schemas/pinctrl/pinmux-node.yaml + + properties: + function: + enum: [ spi, ac97, i2s, pwm, keypad, pata, lcd, gpio ] + + groups: + enum: [ ssp, ac97, i2s_on_ssp, i2s_on_ac97, pwm1, gpio1agrp, + gpio2agrp, gpio3agrp, gpio4agrp, gpio6agrp, gpio7agrp, + rasteronsdram0grp, rasteronsdram3grp, keypadgrp, idegrp ] + + required: + - function + - groups + + unevaluatedProperties: false + +required: + - compatible + - reg + - "#clock-cells" + - clocks + +additionalProperties: false + +examples: + - | + syscon@80930000 { + compatible = "cirrus,ep9301-syscon", "syscon"; + reg = <0x80930000 0x1000>; + + #clock-cells = <1>; + clocks = <&xtali>; + + spi_default_pins: pins-spi { + function = "spi"; + groups = "ssp"; + }; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml new file mode 100644 index 00000000000000..3b50e0a003cac1 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml @@ -0,0 +1,210 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-tsa.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: PowerQUICC QE Time-slot assigner (TSA) controller + +maintainers: + - Herve Codina + +description: + The TSA is the time-slot assigner that can be found on some PowerQUICC SoC. + Its purpose is to route some TDM time-slots to other internal serial + controllers. + +properties: + compatible: + items: + - enum: + - fsl,mpc8321-tsa + - const: fsl,qe-tsa + + reg: + items: + - description: SI (Serial Interface) register base + - description: SI RAM base + + reg-names: + items: + - const: si_regs + - const: si_ram + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^tdm@[0-3]$': + description: + The TDM managed by this controller + type: object + + additionalProperties: false + + properties: + reg: + minimum: 0 + maximum: 3 + description: + The TDM number for this TDM, 0 for TDMa, 1 for TDMb, 2 for TDMc and 3 + for TDMd. + + fsl,common-rxtx-pins: + $ref: /schemas/types.yaml#/definitions/flag + description: + The hardware can use four dedicated pins for Tx clock, Tx sync, Rx + clock and Rx sync or use only two pins, Tx/Rx clock and Tx/Rx sync. + Without the 'fsl,common-rxtx-pins' property, the four pins are used. + With the 'fsl,common-rxtx-pins' property, two pins are used. + + clocks: + minItems: 2 + items: + - description: Receive sync clock + - description: Receive data clock + - description: Transmit sync clock + - description: Transmit data clock + + clock-names: + minItems: 2 + items: + - const: rsync + - const: rclk + - const: tsync + - const: tclk + + fsl,rx-frame-sync-delay-bits: + enum: [0, 1, 2, 3] + default: 0 + description: | + Receive frame sync delay in number of bits. + Indicates the delay between the Rx sync and the first bit of the Rx + frame. + + fsl,tx-frame-sync-delay-bits: + enum: [0, 1, 2, 3] + default: 0 + description: | + Transmit frame sync delay in number of bits. + Indicates the delay between the Tx sync and the first bit of the Tx + frame. + + fsl,clock-falling-edge: + $ref: /schemas/types.yaml#/definitions/flag + description: + Data is sent on falling edge of the clock (and received on the rising + edge). If not present, data is sent on the rising edge (and received + on the falling edge). + + fsl,fsync-rising-edge: + $ref: /schemas/types.yaml#/definitions/flag + description: + Frame sync pulses are sampled with the rising edge of the channel + clock. If not present, pulses are sampled with the falling edge. + + fsl,fsync-active-low: + $ref: /schemas/types.yaml#/definitions/flag + description: + Frame sync signals are active on low logic level. + If not present, sync signals are active on high level. + + fsl,double-speed-clock: + $ref: /schemas/types.yaml#/definitions/flag + description: + The channel clock is twice the data rate. + + patternProperties: + '^fsl,[rt]x-ts-routes$': + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: | + A list of tuple that indicates the Tx or Rx time-slots routes. + items: + items: + - description: + The number of time-slots + minimum: 1 + maximum: 64 + - description: | + The source (Tx) or destination (Rx) serial interface + (dt-bindings/soc/qe-fsl,tsa.h defines these values) + - 0: No destination + - 1: UCC1 + - 2: UCC2 + - 3: UCC3 + - 4: UCC4 + - 5: UCC5 + enum: [0, 1, 2, 3, 4, 5] + minItems: 1 + maxItems: 64 + + allOf: + # If fsl,common-rxtx-pins is present, only 2 clocks are needed. + # Else, the 4 clocks must be present. + - if: + required: + - fsl,common-rxtx-pins + then: + properties: + clocks: + maxItems: 2 + clock-names: + maxItems: 2 + else: + properties: + clocks: + minItems: 4 + clock-names: + minItems: 4 + + required: + - reg + - clocks + - clock-names + +required: + - compatible + - reg + - reg-names + - '#address-cells' + - '#size-cells' + +additionalProperties: false + +examples: + - | + #include + + tsa@ae0 { + compatible = "fsl,mpc8321-tsa", "fsl,qe-tsa"; + reg = <0xae0 0x10>, + <0xc00 0x200>; + reg-names = "si_regs", "si_ram"; + + #address-cells = <1>; + #size-cells = <0>; + + tdm@0 { + /* TDMa */ + reg = <0>; + + clocks = <&clk_l1rsynca>, <&clk_l1rclka>; + clock-names = "rsync", "rclk"; + + fsl,common-rxtx-pins; + fsl,fsync-rising-edge; + + fsl,tx-ts-routes = <2 0>, /* TS 0..1 */ + <24 FSL_QE_TSA_UCC4>, /* TS 2..25 */ + <1 0>, /* TS 26 */ + <5 FSL_QE_TSA_UCC3>; /* TS 27..31 */ + + fsl,rx-ts-routes = <2 0>, /* TS 0..1 */ + <24 FSL_QE_TSA_UCC4>, /* 2..25 */ + <1 0>, /* TS 26 */ + <5 FSL_QE_TSA_UCC3>; /* TS 27..31 */ + }; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml new file mode 100644 index 00000000000000..71ae64cb8a4f14 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: PowerQUICC QE QUICC Multichannel Controller (QMC) + +maintainers: + - Herve Codina + +description: + The QMC (QUICC Multichannel Controller) emulates up to 64 channels within one + serial controller using the same TDM physical interface routed from TSA. + +properties: + compatible: + items: + - enum: + - fsl,mpc8321-ucc-qmc + - const: fsl,qe-ucc-qmc + + reg: + items: + - description: UCC (Unified communication controller) register base + - description: Dual port ram base + + reg-names: + items: + - const: ucc_regs + - const: dpram + + interrupts: + maxItems: 1 + description: UCC interrupt line in the QE interrupt controller + + fsl,tsa-serial: + $ref: /schemas/types.yaml#/definitions/phandle-array + items: + - items: + - description: phandle to TSA node + - enum: [1, 2, 3, 4, 5] + description: | + TSA serial interface (dt-bindings/soc/qe-fsl,tsa.h defines these + values) + - 1: UCC1 + - 2: UCC2 + - 3: UCC3 + - 4: UCC4 + - 5: UCC5 + description: + Should be a phandle/number pair. The phandle to TSA node and the TSA + serial interface to use. + + fsl,soft-qmc: + $ref: /schemas/types.yaml#/definitions/string + description: + Soft QMC firmware name to load. If this property is omitted, no firmware + are used. + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^channel@([0-9]|[1-5][0-9]|6[0-3])$': + description: + A channel managed by this controller + type: object + additionalProperties: false + + properties: + compatible: + items: + - enum: + - fsl,mpc8321-ucc-qmc-hdlc + - const: fsl,qe-ucc-qmc-hdlc + - const: fsl,qmc-hdlc + + reg: + minimum: 0 + maximum: 63 + description: + The channel number + + fsl,operational-mode: + $ref: /schemas/types.yaml#/definitions/string + enum: [transparent, hdlc] + default: transparent + description: | + The channel operational mode + - hdlc: The channel handles HDLC frames + - transparent: The channel handles raw data without any processing + + fsl,reverse-data: + $ref: /schemas/types.yaml#/definitions/flag + description: + The bit order as seen on the channels is reversed, + transmitting/receiving the MSB of each octet first. + This flag is used only in 'transparent' mode. + + fsl,tx-ts-mask: + $ref: /schemas/types.yaml#/definitions/uint64 + description: + Channel assigned Tx time-slots within the Tx time-slots routed by the + TSA to this cell. + + fsl,rx-ts-mask: + $ref: /schemas/types.yaml#/definitions/uint64 + description: + Channel assigned Rx time-slots within the Rx time-slots routed by the + TSA to this cell. + + fsl,framer: + $ref: /schemas/types.yaml#/definitions/phandle + description: + phandle to the framer node. The framer is in charge of an E1/T1 line + interface connected to the TDM bus. It can be used to get the E1/T1 line + status such as link up/down. + + allOf: + - if: + properties: + compatible: + not: + contains: + const: fsl,qmc-hdlc + then: + properties: + fsl,framer: false + + required: + - reg + - fsl,tx-ts-mask + - fsl,rx-ts-mask + +required: + - compatible + - reg + - reg-names + - interrupts + - fsl,tsa-serial + - '#address-cells' + - '#size-cells' + +additionalProperties: false + +examples: + - | + #include + + qmc@a60 { + compatible = "fsl,mpc8321-ucc-qmc", "fsl,qe-ucc-qmc"; + reg = <0x3200 0x200>, + <0x10000 0x1000>; + reg-names = "ucc_regs", "dpram"; + interrupts = <35>; + interrupt-parent = <&qeic>; + fsl,soft-qmc = "fsl_qe_ucode_qmc_8321_11.bin"; + + #address-cells = <1>; + #size-cells = <0>; + + fsl,tsa-serial = <&tsa FSL_QE_TSA_UCC4>; + + channel@16 { + /* Ch16 : First 4 even TS from all routed from TSA */ + reg = <16>; + fsl,operational-mode = "transparent"; + fsl,reverse-data; + fsl,tx-ts-mask = <0x00000000 0x000000aa>; + fsl,rx-ts-mask = <0x00000000 0x000000aa>; + }; + + channel@17 { + /* Ch17 : First 4 odd TS from all routed from TSA */ + reg = <17>; + fsl,operational-mode = "transparent"; + fsl,reverse-data; + fsl,tx-ts-mask = <0x00000000 0x00000055>; + fsl,rx-ts-mask = <0x00000000 0x00000055>; + }; + + channel@19 { + /* Ch19 : 8 TS (TS 8..15) from all routed from TSA */ + compatible = "fsl,mpc8321-ucc-qmc-hdlc", + "fsl,qe-ucc-qmc-hdlc", + "fsl,qmc-hdlc"; + reg = <19>; + fsl,operational-mode = "hdlc"; + fsl,tx-ts-mask = <0x00000000 0x0000ff00>; + fsl,rx-ts-mask = <0x00000000 0x0000ff00>; + fsl,framer = <&framer>; + }; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml new file mode 100644 index 00000000000000..64ffbf75dd9d29 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,ucc-hdlc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: High-Level Data Link Control(HDLC) + +description: HDLC part in Universal communication controllers (UCCs) + +maintainers: + - Frank Li + +properties: + compatible: + const: fsl,ucc-hdlc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + cell-index: + $ref: /schemas/types.yaml#/definitions/uint32 + + rx-clock-name: + $ref: /schemas/types.yaml#/definitions/string + oneOf: + - pattern: "^brg([0-9]|1[0-6])$" + - pattern: "^clk([0-9]|1[0-9]|2[0-4])$" + + tx-clock-name: + $ref: /schemas/types.yaml#/definitions/string + oneOf: + - pattern: "^brg([0-9]|1[0-6])$" + - pattern: "^clk([0-9]|1[0-9]|2[0-4])$" + + fsl,tdm-interface: + $ref: /schemas/types.yaml#/definitions/flag + description: Specify that hdlc is based on tdm-interface + + fsl,rx-sync-clock: + $ref: /schemas/types.yaml#/definitions/string + description: rx-sync + enum: + - none + - rsync_pin + - brg9 + - brg10 + - brg11 + - brg13 + - brg14 + - brg15 + + fsl,tx-sync-clock: + $ref: /schemas/types.yaml#/definitions/string + description: tx-sync + enum: + - none + - tsync_pin + - brg9 + - brg10 + - brg11 + - brg13 + - brg14 + - brg15 + + fsl,tdm-framer-type: + $ref: /schemas/types.yaml#/definitions/string + description: required for tdm interface + enum: [e1, t1] + + fsl,tdm-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: number of TDM ID + + fsl,tx-timeslot-mask: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + required for tdm interface. + time slot mask for TDM operation. Indicates which time + slots used for transmitting and receiving. + + fsl,rx-timeslot-mask: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + required for tdm interface. + time slot mask for TDM operation. Indicates which time + slots used for transmitting and receiving. + + fsl,siram-entry-id: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + required for tdm interface + Must be 0,2,4...64. the number of TDM entry. + + fsl,tdm-internal-loopback: + $ref: /schemas/types.yaml#/definitions/flag + description: + optional for tdm interface + Internal loopback connecting on TDM layer. + + fsl,hmask: + $ref: /schemas/types.yaml#/definitions/uint16 + description: | + HDLC address recognition. Set to zero to disable + address filtering of packets: + fsl,hmask = /bits/ 16 <0x0000>; + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + communication@2000 { + compatible = "fsl,ucc-hdlc"; + reg = <0x2000 0x200>; + rx-clock-name = "clk8"; + tx-clock-name = "clk9"; + fsl,rx-sync-clock = "rsync_pin"; + fsl,tx-sync-clock = "tsync_pin"; + fsl,tx-timeslot-mask = <0xfffffffe>; + fsl,rx-timeslot-mask = <0xfffffffe>; + fsl,tdm-framer-type = "e1"; + fsl,tdm-id = <0>; + fsl,siram-entry-id = <0>; + fsl,tdm-interface; + }; + + - | + communication@2000 { + compatible = "fsl,ucc-hdlc"; + reg = <0x2000 0x200>; + rx-clock-name = "brg1"; + tx-clock-name = "brg1"; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt deleted file mode 100644 index 6d2dd8a31482a4..00000000000000 --- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt +++ /dev/null @@ -1,130 +0,0 @@ -* Network - -Currently defined compatibles: -- fsl,cpm1-scc-enet -- fsl,cpm2-scc-enet -- fsl,cpm1-fec-enet -- fsl,cpm2-fcc-enet (third resource is GFEMR) -- fsl,qe-enet - -Example: - - ethernet@11300 { - compatible = "fsl,mpc8272-fcc-enet", - "fsl,cpm2-fcc-enet"; - reg = <11300 20 8400 100 11390 1>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <20 8>; - interrupt-parent = <&PIC>; - phy-handle = <&PHY0>; - fsl,cpm-command = <12000300>; - }; - -* MDIO - -Currently defined compatibles: -fsl,pq1-fec-mdio (reg is same as first resource of FEC device) -fsl,cpm2-mdio-bitbang (reg is port C registers) - -Properties for fsl,cpm2-mdio-bitbang: -fsl,mdio-pin : pin of port C controlling mdio data -fsl,mdc-pin : pin of port C controlling mdio clock - -Example: - mdio@10d40 { - compatible = "fsl,mpc8272ads-mdio-bitbang", - "fsl,mpc8272-mdio-bitbang", - "fsl,cpm2-mdio-bitbang"; - reg = <10d40 14>; - #address-cells = <1>; - #size-cells = <0>; - fsl,mdio-pin = <12>; - fsl,mdc-pin = <13>; - }; - -* HDLC - -Currently defined compatibles: -- fsl,ucc-hdlc - -Properties for fsl,ucc-hdlc: -- rx-clock-name -- tx-clock-name - Usage: required - Value type: - Definition : Must be "brg1"-"brg16" for internal clock source, - Must be "clk1"-"clk24" for external clock source. - -- fsl,tdm-interface - Usage: optional - Value type: - Definition : Specify that hdlc is based on tdm-interface - -The property below is dependent on fsl,tdm-interface: -- fsl,rx-sync-clock - Usage: required - Value type: - Definition : Must be "none", "rsync_pin", "brg9-11" and "brg13-15". - -- fsl,tx-sync-clock - Usage: required - Value type: - Definition : Must be "none", "tsync_pin", "brg9-11" and "brg13-15". - -- fsl,tdm-framer-type - Usage: required for tdm interface - Value type: - Definition : "e1" or "t1".Now e1 and t1 are used, other framer types - are not supported. - -- fsl,tdm-id - Usage: required for tdm interface - Value type: - Definition : number of TDM ID - -- fsl,tx-timeslot-mask -- fsl,rx-timeslot-mask - Usage: required for tdm interface - Value type: - Definition : time slot mask for TDM operation. Indicates which time - slots used for transmitting and receiving. - -- fsl,siram-entry-id - Usage: required for tdm interface - Value type: - Definition : Must be 0,2,4...64. the number of TDM entry. - -- fsl,tdm-internal-loopback - usage: optional for tdm interface - value type: - Definition : Internal loopback connecting on TDM layer. -- fsl,hmask - usage: optional - Value type: - Definition: HDLC address recognition. Set to zero to disable - address filtering of packets: - fsl,hmask = /bits/ 16 <0x0000>; - -Example for tdm interface: - - ucc@2000 { - compatible = "fsl,ucc-hdlc"; - rx-clock-name = "clk8"; - tx-clock-name = "clk9"; - fsl,rx-sync-clock = "rsync_pin"; - fsl,tx-sync-clock = "tsync_pin"; - fsl,tx-timeslot-mask = <0xfffffffe>; - fsl,rx-timeslot-mask = <0xfffffffe>; - fsl,tdm-framer-type = "e1"; - fsl,tdm-id = <0>; - fsl,siram-entry-id = <0>; - fsl,tdm-interface; - }; - -Example for hdlc without tdm interface: - - ucc@2000 { - compatible = "fsl,ucc-hdlc"; - rx-clock-name = "brg1"; - tx-clock-name = "brg1"; - }; diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml index 2a456c8af992e0..2958ef45b3e9b0 100644 --- a/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml +++ b/Documentation/devicetree/bindings/soc/fsl/fsl,layerscape-scfg.yaml @@ -23,6 +23,9 @@ properties: - fsl,ls1028a-scfg - fsl,ls1043a-scfg - fsl,ls1046a-scfg + - fsl,ls1088a-isc + - fsl,ls2080a-isc + - fsl,lx2160a-isc - const: syscon reg: diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml new file mode 100644 index 00000000000000..03d71ab930d791 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/fsl/fsl,rcpm.yaml @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/fsl/fsl,rcpm.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Run Control and Power Management + +description: + The RCPM performs all device-level tasks associated with device run control + and power management. + +maintainers: + - Frank Li + +properties: + compatible: + oneOf: + - items: + - enum: + - fsl,p2041-rcpm + - fsl,p3041-rcpm + - fsl,p4080-rcpm + - fsl,p5020-rcpm + - fsl,p5040-rcpm + - const: fsl,qoriq-rcpm-1.0 + - items: + - enum: + - fsl,b4420-rcpm + - fsl,b4860-rcpm + - fsl,t4240-rcpm + - const: fsl,qoriq-rcpm-2.0 + - items: + - enum: + - fsl,t1040-rcpm + - const: fsl,qoriq-rcpm-2.1 + - items: + - enum: + - fsl,ls1012a-rcpm + - fsl,ls1021a-rcpm + - fsl,ls1028a-rcpm + - fsl,ls1043a-rcpm + - fsl,ls1046a-rcpm + - fsl,ls1088a-rcpm + - fsl,ls208xa-rcpm + - fsl,lx2160a-rcpm + - const: fsl,qoriq-rcpm-2.1+ + + reg: + maxItems: 1 + + "#fsl,rcpm-wakeup-cells": + description: | + The number of IPPDEXPCR register cells in the + fsl,rcpm-wakeup property. + + Freescale RCPM Wakeup Source Device Tree Bindings + + Required fsl,rcpm-wakeup property should be added to a device node if + the device can be used as a wakeup source. + + fsl,rcpm-wakeup: Consists of a phandle to the rcpm node and the IPPDEXPCR + register cells. The number of IPPDEXPCR register cells is defined in + "#fsl,rcpm-wakeup-cells" in the rcpm node. The first register cell is + the bit mask that should be set in IPPDEXPCR0, and the second register + cell is for IPPDEXPCR1, and so on. + + Note: IPPDEXPCR(IP Powerdown Exception Control Register) provides a + mechanism for keeping certain blocks awake during STANDBY and MEM, in + order to use them as wake-up sources. + + little-endian: + $ref: /schemas/types.yaml#/definitions/flag + description: + RCPM register block is Little Endian. Without it RCPM + will be Big Endian (default case). + +additionalProperties: false + +examples: + - | + #include + global-utilities@e2000 { + compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0"; + reg = <0xe2000 0x1000>; + #fsl,rcpm-wakeup-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt deleted file mode 100644 index 5a33619d881d05..00000000000000 --- a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt +++ /dev/null @@ -1,69 +0,0 @@ -* Run Control and Power Management -------------------------------------------- -The RCPM performs all device-level tasks associated with device run control -and power management. - -Required properites: - - reg : Offset and length of the register set of the RCPM block. - - #fsl,rcpm-wakeup-cells : The number of IPPDEXPCR register cells in the - fsl,rcpm-wakeup property. - - compatible : Must contain a chip-specific RCPM block compatible string - and (if applicable) may contain a chassis-version RCPM compatible - string. Chip-specific strings are of the form "fsl,-rcpm", - such as: - * "fsl,p2041-rcpm" - * "fsl,p5020-rcpm" - * "fsl,t4240-rcpm" - - Chassis-version strings are of the form "fsl,qoriq-rcpm-", - such as: - * "fsl,qoriq-rcpm-1.0": for chassis 1.0 rcpm - * "fsl,qoriq-rcpm-2.0": for chassis 2.0 rcpm - * "fsl,qoriq-rcpm-2.1": for chassis 2.1 rcpm - * "fsl,qoriq-rcpm-2.1+": for chassis 2.1+ rcpm - -All references to "1.0" and "2.0" refer to the QorIQ chassis version to -which the chip complies. -Chassis Version Example Chips ---------------- ------------------------------- -1.0 p4080, p5020, p5040, p2041, p3041 -2.0 t4240, b4860, b4420 -2.1 t1040, -2.1+ ls1021a, ls1012a, ls1043a, ls1046a - -Optional properties: - - little-endian : RCPM register block is Little Endian. Without it RCPM - will be Big Endian (default case). - -Example: -The RCPM node for T4240: - rcpm: global-utilities@e2000 { - compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0"; - reg = <0xe2000 0x1000>; - #fsl,rcpm-wakeup-cells = <2>; - }; - -* Freescale RCPM Wakeup Source Device Tree Bindings -------------------------------------------- -Required fsl,rcpm-wakeup property should be added to a device node if the device -can be used as a wakeup source. - - - fsl,rcpm-wakeup: Consists of a phandle to the rcpm node and the IPPDEXPCR - register cells. The number of IPPDEXPCR register cells is defined in - "#fsl,rcpm-wakeup-cells" in the rcpm node. The first register cell is - the bit mask that should be set in IPPDEXPCR0, and the second register - cell is for IPPDEXPCR1, and so on. - - Note: IPPDEXPCR(IP Powerdown Exception Control Register) provides a - mechanism for keeping certain blocks awake during STANDBY and MEM, in - order to use them as wake-up sources. - -Example: - lpuart0: serial@2950000 { - compatible = "fsl,ls1021a-lpuart"; - reg = <0x0 0x2950000 0x0 0x1000>; - interrupts = ; - clocks = <&sysclk>; - clock-names = "ipg"; - fsl,rcpm-wakeup = <&rcpm 0x0 0x40000000>; - }; diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml index 4512390f90f043..2d3fe0b54243e8 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml @@ -30,6 +30,11 @@ properties: - qcom,sm8450-pmic-glink - qcom,sm8550-pmic-glink - const: qcom,pmic-glink + - items: + - enum: + - qcom,sm7325-pmic-glink + - const: qcom,qcm6490-pmic-glink + - const: qcom,pmic-glink - items: - enum: - qcom,sm8650-pmic-glink diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml index 2fa725b8af5db2..270bcd079f8861 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml @@ -30,31 +30,37 @@ maintainers: properties: compatible: - enum: - - qcom,rpm-apq8084 - - qcom,rpm-ipq6018 - - qcom,rpm-ipq9574 - - qcom,rpm-mdm9607 - - qcom,rpm-msm8226 - - qcom,rpm-msm8610 - - qcom,rpm-msm8909 - - qcom,rpm-msm8916 - - qcom,rpm-msm8917 - - qcom,rpm-msm8936 - - qcom,rpm-msm8937 - - qcom,rpm-msm8952 - - qcom,rpm-msm8953 - - qcom,rpm-msm8974 - - qcom,rpm-msm8976 - - qcom,rpm-msm8994 - - qcom,rpm-msm8996 - - qcom,rpm-msm8998 - - qcom,rpm-qcm2290 - - qcom,rpm-qcs404 - - qcom,rpm-sdm660 - - qcom,rpm-sm6115 - - qcom,rpm-sm6125 - - qcom,rpm-sm6375 + oneOf: + - items: + - enum: + - qcom,rpm-apq8084 + - qcom,rpm-mdm9607 + - qcom,rpm-msm8226 + - qcom,rpm-msm8610 + - qcom,rpm-msm8909 + - qcom,rpm-msm8916 + - qcom,rpm-msm8917 + - qcom,rpm-msm8936 + - qcom,rpm-msm8937 + - qcom,rpm-msm8952 + - qcom,rpm-msm8953 + - qcom,rpm-msm8974 + - qcom,rpm-msm8976 + - qcom,rpm-msm8994 + - const: qcom,smd-rpm + - items: + - enum: + - qcom,rpm-ipq6018 + - qcom,rpm-ipq9574 + - qcom,rpm-msm8996 + - qcom,rpm-msm8998 + - qcom,rpm-qcm2290 + - qcom,rpm-qcs404 + - qcom,rpm-sdm660 + - qcom,rpm-sm6115 + - qcom,rpm-sm6125 + - qcom,rpm-sm6375 + - const: qcom,glink-smd-rpm clock-controller: $ref: /schemas/clock/qcom,rpmcc.yaml# @@ -84,21 +90,7 @@ if: properties: compatible: contains: - enum: - - qcom,rpm-apq8084 - - qcom,rpm-mdm9607 - - qcom,rpm-msm8226 - - qcom,rpm-msm8610 - - qcom,rpm-msm8909 - - qcom,rpm-msm8916 - - qcom,rpm-msm8917 - - qcom,rpm-msm8936 - - qcom,rpm-msm8937 - - qcom,rpm-msm8952 - - qcom,rpm-msm8953 - - qcom,rpm-msm8974 - - qcom,rpm-msm8976 - - qcom,rpm-msm8994 + const: qcom,smd-rpm then: properties: qcom,glink-channels: false @@ -129,7 +121,7 @@ examples: qcom,smd-edge = <15>; rpm-requests { - compatible = "qcom,rpm-msm8916"; + compatible = "qcom,rpm-msm8916", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; clock-controller { diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml index 4819ce90d20641..d9fabefc8147b2 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml @@ -56,7 +56,7 @@ examples: qcom,smd-edge = <15>; rpm-requests { - compatible = "qcom,rpm-msm8974"; + compatible = "qcom,rpm-msm8974", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; clock-controller { diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml index 09d3ce97efa2a3..b7acb65bdecd2a 100644 --- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml +++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml @@ -127,6 +127,18 @@ properties: - const: hoperun,hihope-rzg2m - const: renesas,r8a774a1 + - description: RZ/G2M v3.0 (R8A774A3) + items: + - enum: + - hoperun,hihope-rzg2m # HopeRun HiHope RZ/G2M platform + - const: renesas,r8a774a3 + + - items: + - enum: + - hoperun,hihope-rzg2-ex # HopeRun expansion board for HiHope RZ/G2 platforms + - const: hoperun,hihope-rzg2m + - const: renesas,r8a774a3 + - description: RZ/G2N (R8A774B1) items: - enum: @@ -515,6 +527,8 @@ properties: - description: RZ/V2H(P) (R9A09G057) items: + - enum: + - renesas,rzv2h-evk # RZ/V2H EVK - enum: - renesas,r9a09g057h41 # RZ/V2H - renesas,r9a09g057h42 # RZ/V2H with Mali-G31 support diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml index 35b20e53b51328..50d727f4b76c62 100644 --- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml +++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml @@ -20,6 +20,20 @@ properties: - rockchip,rk3568-pipe-grf - rockchip,rk3568-pipe-phy-grf - rockchip,rk3568-usb2phy-grf + - rockchip,rk3576-bigcore-grf + - rockchip,rk3576-cci-grf + - rockchip,rk3576-gpu-grf + - rockchip,rk3576-litcore-grf + - rockchip,rk3576-npu-grf + - rockchip,rk3576-php-grf + - rockchip,rk3576-pipe-phy-grf + - rockchip,rk3576-pmu1-grf + - rockchip,rk3576-sdgmac-grf + - rockchip,rk3576-sys-grf + - rockchip,rk3576-usb-grf + - rockchip,rk3576-usbdpphy-grf + - rockchip,rk3576-vo0-grf + - rockchip,rk3576-vop-grf - rockchip,rk3588-bigcore0-grf - rockchip,rk3588-bigcore1-grf - rockchip,rk3588-hdptxphy-grf @@ -64,6 +78,8 @@ properties: - rockchip,rk3399-pmugrf - rockchip,rk3568-grf - rockchip,rk3568-pmugrf + - rockchip,rk3576-ioc-grf + - rockchip,rk3576-pmu0-grf - rockchip,rk3588-usb2phy-grf - rockchip,rv1108-grf - rockchip,rv1108-pmugrf diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml index 8b478d6cdc303c..f80fcbc3128bfb 100644 --- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml +++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml @@ -32,11 +32,16 @@ properties: - enum: - samsung,exynos850-usi - reg: true + reg: + maxItems: 1 - clocks: true + clocks: + maxItems: 2 - clock-names: true + clock-names: + items: + - const: pclk + - const: ipclk ranges: true @@ -113,9 +118,7 @@ then: - description: Operating clock for UART/SPI/I2C protocol clock-names: - items: - - const: pclk - - const: ipclk + maxItems: 2 required: - reg diff --git a/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml index a10a3b89ae05e8..94b36943a50ffa 100644 --- a/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml +++ b/Documentation/devicetree/bindings/soc/ti/ti,am654-serdes-ctrl.yaml @@ -14,6 +14,7 @@ properties: items: - const: ti,am654-serdes-ctrl - const: syscon + - const: simple-mfd reg: maxItems: 1 @@ -31,7 +32,7 @@ additionalProperties: false examples: - | clock@4080 { - compatible = "ti,am654-serdes-ctrl", "syscon"; + compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd"; reg = <0x4080 0x4>; mux-controller { diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml index c402cb2928e893..3cb1471cc6b62e 100644 --- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml +++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml @@ -278,6 +278,26 @@ patternProperties: additionalProperties: false + ^pa-stats@[a-f0-9]+$: + description: | + PA-STATS sub-module represented as a SysCon. PA_STATS is a set of + registers where different statistics related to ICSSG, are dumped by + ICSSG firmware. This syscon sub-module will help the device to + access/read/write those statistics. + + type: object + + additionalProperties: false + + properties: + compatible: + items: + - const: ti,pruss-pa-st + - const: syscon + + reg: + maxItems: 1 + interrupt-controller@[a-f0-9]+$: description: | PRUSS INTC Node. Each PRUSS has a single interrupt controller instance diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml index 5db718e4d0e7ab..4f13e8ab50b2a4 100644 --- a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml +++ b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.yaml @@ -26,6 +26,13 @@ properties: A list off component DAPM widget. Each entry is a pair of strings, the first being the widget type, the second being the widget name + clocks: + minItems: 1 + maxItems: 3 + description: + Base PLL clocks of audio susbsytem, used to configure base clock + frequencies for different audio use-cases. + patternProperties: "^dai-link-[0-9]+$": type: object diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml index 0ecdaf7190e9f1..413b4777818187 100644 --- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml +++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml @@ -27,6 +27,13 @@ properties: A list off component DAPM widget. Each entry is a pair of strings, the first being the widget type, the second being the widget name + clocks: + minItems: 1 + maxItems: 3 + description: + Base PLL clocks of audio susbsytem, used to configure base clock + frequencies for different audio use-cases. + patternProperties: "^dai-link-[0-9]+$": type: object diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml new file mode 100644 index 00000000000000..68fbf5cc208f5b --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cirrus,cs4271.yaml @@ -0,0 +1,101 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/cirrus,cs4271.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic CS4271 audio CODEC + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +description: + The CS4271 is a stereo audio codec. This device supports both the I2C + and the SPI bus. + +allOf: + - $ref: dai-common.yaml# + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: cirrus,cs4271 + + reg: + maxItems: 1 + + spi-cpha: true + + spi-cpol: true + + '#sound-dai-cells': + const: 0 + + reset-gpios: + description: + This pin will be deasserted before communication to the codec starts. + maxItems: 1 + + va-supply: + description: Analog power supply. + + vd-supply: + description: Digital power supply. + + vl-supply: + description: Serial Control Port power supply. + + port: + $ref: audio-graph-port.yaml# + unevaluatedProperties: false + + cirrus,amuteb-eq-bmutec: + description: + When given, the Codec's AMUTEB=BMUTEC flag is enabled. + type: boolean + + cirrus,enable-soft-reset: + description: | + The CS4271 requires its LRCLK and MCLK to be stable before its RESET + line is de-asserted. That also means that clocks cannot be changed + without putting the chip back into hardware reset, which also requires + a complete re-initialization of all registers. + + One (undocumented) workaround is to assert and de-assert the PDN bit + in the MODE2 register. This workaround can be enabled with this DT + property. + + Note that this is not needed in case the clocks are stable + throughout the entire runtime of the codec. + type: boolean + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + spi { + #address-cells = <1>; + #size-cells = <0>; + codec@0 { + compatible = "cirrus,cs4271"; + reg = <0>; + #sound-dai-cells = <0>; + spi-max-frequency = <6000000>; + spi-cpol; + spi-cpha; + reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; + port { + endpoint { + remote-endpoint = <&i2s_ep>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml index 453d493c941f6d..4693e85aed37fc 100644 --- a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml +++ b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml @@ -40,6 +40,20 @@ properties: - const: sclk - const: lrclk + dmas: + items: + - description: out DMA channel + - description: in DMA channel + + dma-names: + items: + - const: tx + - const: rx + + port: + $ref: audio-graph-port.yaml# + unevaluatedProperties: false + required: - compatible - '#sound-dai-cells' @@ -61,6 +75,8 @@ examples: <&syscon 30>, <&syscon 31>; clock-names = "mclk", "sclk", "lrclk"; + dmas = <&dma0 0 1>, <&dma0 0 2>; + dma-names = "tx", "rx"; }; ... diff --git a/Documentation/devicetree/bindings/sound/cs4271.txt b/Documentation/devicetree/bindings/sound/cs4271.txt deleted file mode 100644 index 6e699ceabacded..00000000000000 --- a/Documentation/devicetree/bindings/sound/cs4271.txt +++ /dev/null @@ -1,57 +0,0 @@ -Cirrus Logic CS4271 DT bindings - -This driver supports both the I2C and the SPI bus. - -Required properties: - - - compatible: "cirrus,cs4271" - -For required properties on SPI, please consult -Documentation/devicetree/bindings/spi/spi-bus.txt - -Required properties on I2C: - - - reg: the i2c address - - -Optional properties: - - - reset-gpio: a GPIO spec to define which pin is connected to the chip's - !RESET pin - - cirrus,amuteb-eq-bmutec: When given, the Codec's AMUTEB=BMUTEC flag - is enabled. - - cirrus,enable-soft-reset: - The CS4271 requires its LRCLK and MCLK to be stable before its RESET - line is de-asserted. That also means that clocks cannot be changed - without putting the chip back into hardware reset, which also requires - a complete re-initialization of all registers. - - One (undocumented) workaround is to assert and de-assert the PDN bit - in the MODE2 register. This workaround can be enabled with this DT - property. - - Note that this is not needed in case the clocks are stable - throughout the entire runtime of the codec. - - - vd-supply: Digital power - - vl-supply: Logic power - - va-supply: Analog Power - -Examples: - - codec_i2c: cs4271@10 { - compatible = "cirrus,cs4271"; - reg = <0x10>; - reset-gpio = <&gpio 23 0>; - vd-supply = <&vdd_3v3_reg>; - vl-supply = <&vdd_3v3_reg>; - va-supply = <&vdd_3v3_reg>; - }; - - codec_spi: cs4271@0 { - compatible = "cirrus,cs4271"; - reg = <0x0>; - reset-gpio = <&gpio 23 0>; - spi-max-frequency = <6000000>; - }; - diff --git a/Documentation/devicetree/bindings/sound/da7213.txt b/Documentation/devicetree/bindings/sound/da7213.txt deleted file mode 100644 index 94584c96c4ae22..00000000000000 --- a/Documentation/devicetree/bindings/sound/da7213.txt +++ /dev/null @@ -1,45 +0,0 @@ -Dialog Semiconductor DA7212/DA7213 Audio Codec bindings - -====== - -Required properties: -- compatible : Should be "dlg,da7212" or "dlg,da7213" -- reg: Specifies the I2C slave address - -Optional properties: -- clocks : phandle and clock specifier for codec MCLK. -- clock-names : Clock name string for 'clocks' attribute, should be "mclk". - -- dlg,micbias1-lvl : Voltage (mV) for Mic Bias 1 - [<1600>, <2200>, <2500>, <3000>] -- dlg,micbias2-lvl : Voltage (mV) for Mic Bias 2 - [<1600>, <2200>, <2500>, <3000>] -- dlg,dmic-data-sel : DMIC channel select based on clock edge. - ["lrise_rfall", "lfall_rrise"] -- dlg,dmic-samplephase : When to sample audio from DMIC. - ["on_clkedge", "between_clkedge"] -- dlg,dmic-clkrate : DMIC clock frequency (Hz). - [<1500000>, <3000000>] - - - VDDA-supply : Regulator phandle for Analogue power supply - - VDDMIC-supply : Regulator phandle for Mic Bias - - VDDIO-supply : Regulator phandle for I/O power supply - -====== - -Example: - - codec_i2c: da7213@1a { - compatible = "dlg,da7213"; - reg = <0x1a>; - - clocks = <&clks 201>; - clock-names = "mclk"; - - dlg,micbias1-lvl = <2500>; - dlg,micbias2-lvl = <2500>; - - dlg,dmic-data-sel = "lrise_rfall"; - dlg,dmic-samplephase = "between_clkedge"; - dlg,dmic-clkrate = <3000000>; - }; diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml index 7735e08d35ba14..ab3206ffa4af88 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml +++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml @@ -102,7 +102,7 @@ properties: default: 2 interrupts: - anyOf: + oneOf: - minItems: 1 items: - description: TX interrupt diff --git a/Documentation/devicetree/bindings/sound/dlg,da7213.yaml b/Documentation/devicetree/bindings/sound/dlg,da7213.yaml new file mode 100644 index 00000000000000..c2dede1e82ffa4 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/dlg,da7213.yaml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/dlg,da7213.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Dialog Semiconductor DA7212/DA7213 Audio Codec + +maintainers: + - Support Opensource + +allOf: + - $ref: dai-common.yaml# + +properties: + compatible: + enum: + - dlg,da7212 + - dlg,da7213 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: mclk + + "#sound-dai-cells": + const: 0 + + dlg,micbias1-lvl: + description: Voltage (mV) for Mic Bias 1 + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [ 1600, 2200, 2500, 3000 ] + + dlg,micbias2-lvl: + description: Voltage (mV) for Mic Bias 2 + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [ 1600, 2200, 2500, 3000 ] + + dlg,dmic-data-sel: + description: DMIC channel select based on clock edge + enum: [ lrise_rfall, lfall_rrise ] + + dlg,dmic-samplephase: + description: When to sample audio from DMIC + enum: [ on_clkedge, between_clkedge ] + + dlg,dmic-clkrate: + description: DMIC clock frequency (Hz) + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [ 1500000, 3000000 ] + + VDDA-supply: + description: Analogue power supply + + VDDIO-supply: + description: I/O power supply + + VDDMIC-supply: + description: Mic Bias + + VDDSP-supply: + description: Speaker supply + + ports: + $ref: audio-graph-port.yaml#/definitions/ports + + port: + $ref: audio-graph-port.yaml# + unevaluatedProperties: false + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + codec@1a { + compatible = "dlg,da7213"; + reg = <0x1a>; + + clocks = <&clks 201>; + clock-names = "mclk"; + + #sound-dai-cells = <0>; + + dlg,micbias1-lvl = <2500>; + dlg,micbias2-lvl = <2500>; + + dlg,dmic-data-sel = "lrise_rfall"; + dlg,dmic-samplephase = "between_clkedge"; + dlg,dmic-clkrate = <3000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/everest,es8326.yaml b/Documentation/devicetree/bindings/sound/everest,es8326.yaml index 8c82d47375ec76..d51431df7acf9c 100644 --- a/Documentation/devicetree/bindings/sound/everest,es8326.yaml +++ b/Documentation/devicetree/bindings/sound/everest,es8326.yaml @@ -32,7 +32,7 @@ properties: description: | just the value of reg 57. Bit(3) decides whether the jack polarity is inverted. Bit(2) decides whether the button on the headset is inverted. - Bit(1)/(0) decides the mic properity to be OMTP/CTIA or auto. + Bit(1)/(0) decides the mic property to be OMTP/CTIA or auto. minimum: 0x00 maximum: 0x0f default: 0x0f diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml new file mode 100644 index 00000000000000..5eb6f5812cf2a5 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/fsl,imx-audio-es8328.yaml @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/fsl,imx-audio-es8328.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX audio complex with ES8328 codec + +maintainers: + - Shawn Guo + - Sascha Hauer + +allOf: + - $ref: sound-card-common.yaml# + +properties: + compatible: + const: fsl,imx-audio-es8328 + + model: + $ref: /schemas/types.yaml#/definitions/string + description: The user-visible name of this sound complex + + ssi-controller: + $ref: /schemas/types.yaml#/definitions/phandle + description: The phandle of the i.MX SSI controller + + jack-gpio: + description: Optional GPIO for headphone jack + maxItems: 1 + + audio-amp-supply: + description: Power regulator for speaker amps + + audio-codec: + $ref: /schemas/types.yaml#/definitions/phandle + description: The phandle to the ES8328 audio codec + + audio-routing: + $ref: /schemas/types.yaml#/definitions/non-unique-string-array + description: | + A list of the connections between audio components. Each entry + is a pair of strings, the first being the connection's sink, the second + being the connection's source. Valid names could be power supplies, + ES8328 pins, and the jacks on the board: + + Power supplies: + * audio-amp + + ES8328 pins: + * LOUT1 + * LOUT2 + * ROUT1 + * ROUT2 + * LINPUT1 + * LINPUT2 + * RINPUT1 + * RINPUT2 + * Mic PGA + + Board connectors: + * Headphone + * Speaker + * Mic Jack + + mux-int-port: + $ref: /schemas/types.yaml#/definitions/uint32 + description: The internal port of the i.MX audio muxer (AUDMUX) + enum: [1, 2, 7] + default: 1 + + mux-ext-port: + $ref: /schemas/types.yaml#/definitions/uint32 + description: The external port of the i.MX audio muxer (AUDMIX) + enum: [3, 4, 5, 6] + default: 3 + +required: + - compatible + - model + - ssi-controller + - jack-gpio + - audio-amp-supply + - audio-codec + - audio-routing + - mux-int-port + - mux-ext-port + +unevaluatedProperties: false + +examples: + - | + sound { + compatible = "fsl,imx-audio-es8328"; + model = "imx-audio-es8328"; + ssi-controller = <&ssi1>; + audio-codec = <&codec>; + jack-gpio = <&gpio5 15 0>; + audio-amp-supply = <®_audio_amp>; + audio-routing = + "Speaker", "LOUT2", + "Speaker", "ROUT2", + "Speaker", "audio-amp", + "Headphone", "ROUT1", + "Headphone", "LOUT1", + "LINPUT1", "Mic Jack", + "RINPUT1", "Mic Jack", + "Mic Jack", "Mic Bias"; + mux-int-port = <1>; + mux-ext-port = <3>; + }; diff --git a/Documentation/devicetree/bindings/sound/fsl,saif.yaml b/Documentation/devicetree/bindings/sound/fsl,saif.yaml new file mode 100644 index 00000000000000..0b5db6bb1b7ca6 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/fsl,saif.yaml @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/fsl,saif.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale MXS Serial Audio Interface (SAIF) + +maintainers: + - Lukasz Majewski + +allOf: + - $ref: dai-common.yaml# + +description: + The SAIF is based on I2S module that is used to communicate with audio codecs, + but only with half-duplex manner (i.e. it can either transmit or receive PCM + audio). + +properties: + compatible: + const: fsl,imx28-saif + + reg: + maxItems: 1 + + "#sound-dai-cells": + const: 0 + + interrupts: + maxItems: 1 + + dmas: + maxItems: 1 + + dma-names: + const: rx-tx + + "#clock-cells": + description: Configure the I2S device as MCLK clock provider. + const: 0 + + clocks: + maxItems: 1 + + fsl,saif-master: + description: Indicate that saif is a slave and its phandle points to master + $ref: /schemas/types.yaml#/definitions/phandle + +required: + - compatible + - reg + - "#sound-dai-cells" + - interrupts + - dmas + - dma-names + - clocks + +unevaluatedProperties: false + +examples: + - | + saif0: saif@80042000 { + compatible = "fsl,imx28-saif"; + reg = <0x80042000 2000>; + #sound-dai-cells = <0>; + interrupts = <59>; + dmas = <&dma_apbx 4>; + dma-names = "rx-tx"; + #clock-cells = <0>; + clocks = <&clks 53>; + }; + - | + saif1: saif@80046000 { + compatible = "fsl,imx28-saif"; + reg = <0x80046000 2000>; + #sound-dai-cells = <0>; + interrupts = <58>; + dmas = <&dma_apbx 5>; + dma-names = "rx-tx"; + clocks = <&clks 53>; + fsl,saif-master = <&saif0>; + }; diff --git a/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt b/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt deleted file mode 100644 index 07b68ab206fbcf..00000000000000 --- a/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt +++ /dev/null @@ -1,60 +0,0 @@ -Freescale i.MX audio complex with ES8328 codec - -Required properties: -- compatible : "fsl,imx-audio-es8328" -- model : The user-visible name of this sound complex -- ssi-controller : The phandle of the i.MX SSI controller -- jack-gpio : Optional GPIO for headphone jack -- audio-amp-supply : Power regulator for speaker amps -- audio-codec : The phandle of the ES8328 audio codec -- audio-routing : A list of the connections between audio components. - Each entry is a pair of strings, the first being the - connection's sink, the second being the connection's - source. Valid names could be power supplies, ES8328 - pins, and the jacks on the board: - - Power supplies: - * audio-amp - - ES8328 pins: - * LOUT1 - * LOUT2 - * ROUT1 - * ROUT2 - * LINPUT1 - * LINPUT2 - * RINPUT1 - * RINPUT2 - * Mic PGA - - Board connectors: - * Headphone - * Speaker - * Mic Jack -- mux-int-port : The internal port of the i.MX audio muxer (AUDMUX) -- mux-ext-port : The external port of the i.MX audio muxer (AUDMIX) - -Note: The AUDMUX port numbering should start at 1, which is consistent with -hardware manual. - -Example: - -sound { - compatible = "fsl,imx-audio-es8328"; - model = "imx-audio-es8328"; - ssi-controller = <&ssi1>; - audio-codec = <&codec>; - jack-gpio = <&gpio5 15 0>; - audio-amp-supply = <®_audio_amp>; - audio-routing = - "Speaker", "LOUT2", - "Speaker", "ROUT2", - "Speaker", "audio-amp", - "Headphone", "ROUT1", - "Headphone", "LOUT1", - "LINPUT1", "Mic Jack", - "RINPUT1", "Mic Jack", - "Mic Jack", "Mic Bias"; - mux-int-port = <1>; - mux-ext-port = <3>; -}; diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml new file mode 100644 index 00000000000000..45ad56d3723435 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt8365-afe.yaml @@ -0,0 +1,130 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt8365-afe.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek Audio Front End PCM controller for MT8365 + +maintainers: + - Alexandre Mergnat + +properties: + compatible: + const: mediatek,mt8365-afe-pcm + + reg: + maxItems: 1 + + "#sound-dai-cells": + const: 0 + + clocks: + items: + - description: 26M clock + - description: mux for audio clock + - description: audio i2s0 mck + - description: audio i2s1 mck + - description: audio i2s2 mck + - description: audio i2s3 mck + - description: engen 1 clock + - description: engen 2 clock + - description: audio 1 clock + - description: audio 2 clock + - description: mux for i2s0 + - description: mux for i2s1 + - description: mux for i2s2 + - description: mux for i2s3 + + clock-names: + items: + - const: top_clk26m_clk + - const: top_audio_sel + - const: audio_i2s0_m + - const: audio_i2s1_m + - const: audio_i2s2_m + - const: audio_i2s3_m + - const: engen1 + - const: engen2 + - const: aud1 + - const: aud2 + - const: i2s0_m_sel + - const: i2s1_m_sel + - const: i2s2_m_sel + - const: i2s3_m_sel + + interrupts: + maxItems: 1 + + power-domains: + maxItems: 1 + + mediatek,dmic-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Indicates how many data pins are used to transmit two channels of PDM + signal. 1 means two wires, 0 means one wire. Default value is 0. + enum: + - 0 # one wire + - 1 # two wires + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - power-domains + +additionalProperties: false + +examples: + - | + #include + #include + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + audio-controller@11220000 { + compatible = "mediatek,mt8365-afe-pcm"; + reg = <0 0x11220000 0 0x1000>; + #sound-dai-cells = <0>; + clocks = <&clk26m>, + <&topckgen CLK_TOP_AUDIO_SEL>, + <&topckgen CLK_TOP_AUD_I2S0_M>, + <&topckgen CLK_TOP_AUD_I2S1_M>, + <&topckgen CLK_TOP_AUD_I2S2_M>, + <&topckgen CLK_TOP_AUD_I2S3_M>, + <&topckgen CLK_TOP_AUD_ENGEN1_SEL>, + <&topckgen CLK_TOP_AUD_ENGEN2_SEL>, + <&topckgen CLK_TOP_AUD_1_SEL>, + <&topckgen CLK_TOP_AUD_2_SEL>, + <&topckgen CLK_TOP_APLL_I2S0_SEL>, + <&topckgen CLK_TOP_APLL_I2S1_SEL>, + <&topckgen CLK_TOP_APLL_I2S2_SEL>, + <&topckgen CLK_TOP_APLL_I2S3_SEL>; + clock-names = "top_clk26m_clk", + "top_audio_sel", + "audio_i2s0_m", + "audio_i2s1_m", + "audio_i2s2_m", + "audio_i2s3_m", + "engen1", + "engen2", + "aud1", + "aud2", + "i2s0_m_sel", + "i2s1_m_sel", + "i2s2_m_sel", + "i2s3_m_sel"; + interrupts = ; + power-domains = <&spm MT8365_POWER_DOMAIN_AUDIO>; + mediatek,dmic-mode = <1>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml new file mode 100644 index 00000000000000..ff9ebb63a05ff8 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mediatek,mt8365-mt6357.yaml @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/mediatek,mt8365-mt6357.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek MT8365 ASoC sound card + +maintainers: + - Alexandre Mergnat + +properties: + compatible: + const: mediatek,mt8365-mt6357 + + pinctrl-names: + minItems: 1 + items: + - const: default + - const: dmic + - const: miso_off + - const: miso_on + - const: mosi_off + - const: mosi_on + + mediatek,platform: + $ref: /schemas/types.yaml#/definitions/phandle + description: The phandle of MT8365 ASoC platform. + +patternProperties: + "^dai-link-[0-9]+$": + type: object + description: + Container for dai-link level properties and CODEC sub-nodes. + + properties: + codec: + type: object + description: Holds subnode which indicates codec dai. + + properties: + sound-dai: + maxItems: 1 + description: phandle of the codec DAI + + additionalProperties: false + + link-name: + description: Indicates dai-link name and PCM stream name + enum: + - I2S_IN_BE + - I2S_OUT_BE + - PCM1_BE + - PDM1_BE + - PDM2_BE + - PDM3_BE + - PDM4_BE + - SPDIF_IN_BE + - SPDIF_OUT_BE + - TDM_IN_BE + - TDM_OUT_BE + + sound-dai: + maxItems: 1 + description: phandle of the CPU DAI + + required: + - link-name + - sound-dai + + additionalProperties: false + +required: + - compatible + - pinctrl-names + - mediatek,platform + +additionalProperties: false + +examples: + - | + sound { + compatible = "mediatek,mt8365-mt6357"; + pinctrl-names = "default", + "dmic", + "miso_off", + "miso_on", + "mosi_off", + "mosi_on"; + pinctrl-0 = <&aud_default_pins>; + pinctrl-1 = <&aud_dmic_pins>; + pinctrl-2 = <&aud_miso_off_pins>; + pinctrl-3 = <&aud_miso_on_pins>; + pinctrl-4 = <&aud_mosi_off_pins>; + pinctrl-5 = <&aud_mosi_on_pins>; + mediatek,platform = <&afe>; + + /* hdmi interface */ + dai-link-0 { + link-name = "I2S_OUT_BE"; + sound-dai = <&afe>; + + codec { + sound-dai = <&it66121hdmitx>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml b/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml index 2f43c684ab88d9..7fbab5871be4a7 100644 --- a/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml +++ b/Documentation/devicetree/bindings/sound/microchip,sama7g5-spdifrx.yaml @@ -13,6 +13,9 @@ description: The Microchip Sony/Philips Digital Interface Receiver is a serial port compliant with the IEC-60958 standard. +allOf: + - $ref: dai-common.yaml# + properties: "#sound-dai-cells": const: 0 @@ -53,7 +56,7 @@ required: - dmas - dma-names -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/sound/mxs-saif.txt b/Documentation/devicetree/bindings/sound/mxs-saif.txt deleted file mode 100644 index 7ba07a118e370d..00000000000000 --- a/Documentation/devicetree/bindings/sound/mxs-saif.txt +++ /dev/null @@ -1,41 +0,0 @@ -* Freescale MXS Serial Audio Interface (SAIF) - -Required properties: -- compatible: Should be "fsl,-saif" -- reg: Should contain registers location and length -- interrupts: Should contain ERROR interrupt number -- dmas: DMA specifier, consisting of a phandle to DMA controller node - and SAIF DMA channel ID. - Refer to dma.txt and fsl-mxs-dma.txt for details. -- dma-names: Must be "rx-tx". - -Optional properties: -- fsl,saif-master: phandle to the master SAIF. It's only required for - the slave SAIF. - -Note: Each SAIF controller should have an alias correctly numbered -in "aliases" node. - -Example: - -aliases { - saif0 = &saif0; - saif1 = &saif1; -}; - -saif0: saif@80042000 { - compatible = "fsl,imx28-saif"; - reg = <0x80042000 2000>; - interrupts = <59>; - dmas = <&dma_apbx 4>; - dma-names = "rx-tx"; -}; - -saif1: saif@80046000 { - compatible = "fsl,imx28-saif"; - reg = <0x80046000 2000>; - interrupts = <58>; - dmas = <&dma_apbx 5>; - dma-names = "rx-tx"; - fsl,saif-master = <&saif0>; -}; diff --git a/Documentation/devicetree/bindings/sound/pcm512x.txt b/Documentation/devicetree/bindings/sound/pcm512x.txt deleted file mode 100644 index 47878a6df60872..00000000000000 --- a/Documentation/devicetree/bindings/sound/pcm512x.txt +++ /dev/null @@ -1,53 +0,0 @@ -PCM512x and TAS575x audio CODECs/amplifiers - -These devices support both I2C and SPI (configured with pin strapping -on the board). The TAS575x devices only support I2C. - -Required properties: - - - compatible : One of "ti,pcm5121", "ti,pcm5122", "ti,pcm5141", - "ti,pcm5142", "ti,pcm5242", "ti,tas5754" or "ti,tas5756" - - - reg : the I2C address of the device for I2C, the chip select - number for SPI. - - - AVDD-supply, DVDD-supply, and CPVDD-supply : power supplies for the - device, as covered in bindings/regulator/regulator.txt - -Optional properties: - - - clocks : A clock specifier for the clock connected as SCLK. If this - is absent the device will be configured to clock from BCLK. If pll-in - and pll-out are specified in addition to a clock, the device is - configured to accept clock input on a specified gpio pin. - - - pll-in, pll-out : gpio pins used to connect the pll using <1> - through <6>. The device will be configured for clock input on the - given pll-in pin and PLL output on the given pll-out pin. An - external connection from the pll-out pin to the SCLK pin is assumed. - Caution: the TAS-desvices only support gpios 1,2 and 3 - -Examples: - - pcm5122: pcm5122@4c { - compatible = "ti,pcm5122"; - reg = <0x4c>; - - AVDD-supply = <®_3v3_analog>; - DVDD-supply = <®_1v8>; - CPVDD-supply = <®_3v3>; - }; - - - pcm5142: pcm5142@4c { - compatible = "ti,pcm5142"; - reg = <0x4c>; - - AVDD-supply = <®_3v3_analog>; - DVDD-supply = <®_1v8>; - CPVDD-supply = <®_3v3>; - - clocks = <&sck>; - pll-in = <3>; - pll-out = <6>; - }; diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml new file mode 100644 index 00000000000000..6ad451549036f8 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc-sndcard.yaml @@ -0,0 +1,205 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/qcom,apq8016-sbc-sndcard.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm APQ8016 and similar sound cards + +maintainers: + - Srinivas Kandagatla + - Stephan Gerhold + +properties: + compatible: + enum: + - qcom,apq8016-sbc-sndcard + - qcom,msm8916-qdsp6-sndcard + + reg: + items: + - description: Microphone I/O mux register address + - description: Speaker I/O mux register address + + reg-names: + items: + - const: mic-iomux + - const: spkr-iomux + + audio-routing: + $ref: /schemas/types.yaml#/definitions/non-unique-string-array + description: + A list of the connections between audio components. Each entry is a + pair of strings, the first being the connection's sink, the second + being the connection's source. Valid names could be power supplies, + MicBias of codec and the jacks on the board. + + aux-devs: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: | + List of phandles pointing to auxiliary devices, such + as amplifiers, to be added to the sound card. + + model: + $ref: /schemas/types.yaml#/definitions/string + description: User visible long sound card name + + pin-switches: + description: List of widget names for which pin switches should be created. + $ref: /schemas/types.yaml#/definitions/string-array + + widgets: + description: User specified audio sound widgets. + $ref: /schemas/types.yaml#/definitions/non-unique-string-array + +patternProperties: + ".*-dai-link$": + description: + Each subnode represents a dai link. Subnodes of each dai links would be + cpu/codec dais. + + type: object + + properties: + link-name: + description: Indicates dai-link name and PCM stream name. + $ref: /schemas/types.yaml#/definitions/string + maxItems: 1 + + cpu: + description: Holds subnode which indicates cpu dai. + type: object + additionalProperties: false + + properties: + sound-dai: + maxItems: 1 + + platform: + description: Holds subnode which indicates platform dai. + type: object + additionalProperties: false + + properties: + sound-dai: + maxItems: 1 + + codec: + description: Holds subnode which indicates codec dai. + type: object + additionalProperties: false + + properties: + sound-dai: + minItems: 1 + maxItems: 8 + + required: + - link-name + - cpu + + additionalProperties: false + +required: + - compatible + - reg + - reg-names + - model + +additionalProperties: false + +examples: + - | + #include + sound@7702000 { + compatible = "qcom,apq8016-sbc-sndcard"; + reg = <0x07702000 0x4>, <0x07702004 0x4>; + reg-names = "mic-iomux", "spkr-iomux"; + + model = "DB410c"; + audio-routing = + "AMIC2", "MIC BIAS Internal2", + "AMIC3", "MIC BIAS External1"; + + pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>; + pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>; + pinctrl-names = "default", "sleep"; + + quaternary-dai-link { + link-name = "ADV7533"; + cpu { + sound-dai = <&lpass MI2S_QUATERNARY>; + }; + codec { + sound-dai = <&adv_bridge 0>; + }; + }; + + primary-dai-link { + link-name = "WCD"; + cpu { + sound-dai = <&lpass MI2S_PRIMARY>; + }; + codec { + sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; + }; + }; + + tertiary-dai-link { + link-name = "WCD-Capture"; + cpu { + sound-dai = <&lpass MI2S_TERTIARY>; + }; + codec { + sound-dai = <&lpass_codec 1>, <&wcd_codec 1>; + }; + }; + }; + + - | + #include + #include + sound@7702000 { + compatible = "qcom,msm8916-qdsp6-sndcard"; + reg = <0x07702000 0x4>, <0x07702004 0x4>; + reg-names = "mic-iomux", "spkr-iomux"; + + model = "msm8916"; + widgets = + "Speaker", "Speaker", + "Headphone", "Headphones"; + pin-switches = "Speaker"; + audio-routing = + "Speaker", "Speaker Amp OUT", + "Speaker Amp IN", "HPH_R", + "Headphones", "HPH_L", + "Headphones", "HPH_R", + "AMIC1", "MIC BIAS Internal1", + "AMIC2", "MIC BIAS Internal2", + "AMIC3", "MIC BIAS Internal3"; + aux-devs = <&speaker_amp>; + + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&cdc_pdm_lines_act>; + pinctrl-1 = <&cdc_pdm_lines_sus>; + + mm1-dai-link { + link-name = "MultiMedia1"; + cpu { + sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>; + }; + }; + + primary-dai-link { + link-name = "Primary MI2S"; + cpu { + sound-dai = <&q6afedai PRIMARY_MI2S_RX>; + }; + platform { + sound-dai = <&q6routing>; + }; + codec { + sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml index 06b5f7be360829..6f5644a89febbd 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml @@ -64,6 +64,7 @@ allOf: compatible: enum: - qcom,sc7280-lpass-wsa-macro + - qcom,sm8250-lpass-wsa-macro - qcom,sm8450-lpass-wsa-macro - qcom,sc8280xp-lpass-wsa-macro then: @@ -79,24 +80,6 @@ allOf: - const: dcodec - const: fsgen - - if: - properties: - compatible: - enum: - - qcom,sm8250-lpass-wsa-macro - then: - properties: - clocks: - minItems: 6 - clock-names: - items: - - const: mclk - - const: npl - - const: macro - - const: dcodec - - const: va - - const: fsgen - - if: properties: compatible: @@ -130,8 +113,7 @@ examples: <&audiocc 0>, <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&aoncc LPASS_CDC_VA_MCLK>, <&vamacro>; - clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen"; + clock-names = "mclk", "npl", "macro", "dcodec", "fsgen"; clock-output-names = "mclk"; }; diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml index c9076dcd44c116..2e2e01493a5f4f 100644 --- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml @@ -27,11 +27,10 @@ properties: - qcom,sm8650-sndcard - const: qcom,sm8450-sndcard - enum: - - qcom,apq8016-sbc-sndcard - qcom,apq8096-sndcard - - qcom,msm8916-qdsp6-sndcard - qcom,qcm6490-idp-sndcard - qcom,qcs6490-rb3gen2-sndcard + - qcom,qrb4210-rb2-sndcard - qcom,qrb5165-rb5-sndcard - qcom,sc7180-qdsp6-sndcard - qcom,sc8280xp-sndcard @@ -58,18 +57,6 @@ properties: $ref: /schemas/types.yaml#/definitions/string description: User visible long sound card name - pin-switches: - description: List of widget names for which pin switches should be created. - $ref: /schemas/types.yaml#/definitions/string-array - - widgets: - description: User specified audio sound widgets. - $ref: /schemas/types.yaml#/definitions/non-unique-string-array - - # Only valid for some compatibles (see allOf if below) - reg: true - reg-names: true - patternProperties: ".*-dai-link$": description: @@ -122,34 +109,6 @@ required: - compatible - model -allOf: - - if: - properties: - compatible: - contains: - enum: - - qcom,apq8016-sbc-sndcard - - qcom,msm8916-qdsp6-sndcard - then: - properties: - reg: - items: - - description: Microphone I/O mux register address - - description: Speaker I/O mux register address - reg-names: - items: - - const: mic-iomux - - const: spkr-iomux - required: - - compatible - - model - - reg - - reg-names - else: - properties: - reg: false - reg-names: false - additionalProperties: false examples: @@ -231,98 +190,3 @@ examples: }; }; }; - - - | - #include - sound@7702000 { - compatible = "qcom,apq8016-sbc-sndcard"; - reg = <0x07702000 0x4>, <0x07702004 0x4>; - reg-names = "mic-iomux", "spkr-iomux"; - - model = "DB410c"; - audio-routing = - "AMIC2", "MIC BIAS Internal2", - "AMIC3", "MIC BIAS External1"; - - pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>; - pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>; - pinctrl-names = "default", "sleep"; - - quaternary-dai-link { - link-name = "ADV7533"; - cpu { - sound-dai = <&lpass MI2S_QUATERNARY>; - }; - codec { - sound-dai = <&adv_bridge 0>; - }; - }; - - primary-dai-link { - link-name = "WCD"; - cpu { - sound-dai = <&lpass MI2S_PRIMARY>; - }; - codec { - sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; - }; - }; - - tertiary-dai-link { - link-name = "WCD-Capture"; - cpu { - sound-dai = <&lpass MI2S_TERTIARY>; - }; - codec { - sound-dai = <&lpass_codec 1>, <&wcd_codec 1>; - }; - }; - }; - - - | - #include - #include - sound@7702000 { - compatible = "qcom,msm8916-qdsp6-sndcard"; - reg = <0x07702000 0x4>, <0x07702004 0x4>; - reg-names = "mic-iomux", "spkr-iomux"; - - model = "msm8916"; - widgets = - "Speaker", "Speaker", - "Headphone", "Headphones"; - pin-switches = "Speaker"; - audio-routing = - "Speaker", "Speaker Amp OUT", - "Speaker Amp IN", "HPH_R", - "Headphones", "HPH_L", - "Headphones", "HPH_R", - "AMIC1", "MIC BIAS Internal1", - "AMIC2", "MIC BIAS Internal2", - "AMIC3", "MIC BIAS Internal3"; - aux-devs = <&speaker_amp>; - - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&cdc_pdm_lines_act>; - pinctrl-1 = <&cdc_pdm_lines_sus>; - - mm1-dai-link { - link-name = "MultiMedia1"; - cpu { - sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>; - }; - }; - - primary-dai-link { - link-name = "Primary MI2S"; - cpu { - sound-dai = <&q6afedai PRIMARY_MI2S_RX>; - }; - platform { - sound-dai = <&q6routing>; - }; - codec { - sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml index 248320804e5fcf..29071044c66e98 100644 --- a/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml +++ b/Documentation/devicetree/bindings/sound/realtek,rt5616.yaml @@ -30,6 +30,18 @@ properties: reg: maxItems: 1 + clocks: + items: + - description: Master clock to the CODEC + + clock-names: + items: + - const: mclk + + port: + $ref: audio-graph-port.yaml# + unevaluatedProperties: false + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml index 07ec6247d9defc..6d0d1514cd4211 100644 --- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml +++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml @@ -112,6 +112,12 @@ properties: description: List of necessary clock names. # details are defined below + post-init-providers: + description: At least if rsnd is using DPCM connection on Audio-Graph-Card2, + fw_devlink might doesn't have enough information to break the cycle. rsnd + driver will not be probed in such case. Same problem might occur with + Multi-CPU/Codec or Codec2Codec. + # ports is below port: $ref: audio-graph-port.yaml#/definitions/port-base @@ -296,7 +302,7 @@ allOf: reg-names: items: enum: - - scu + - sru - ssi - adg # for Gen2/Gen3 diff --git a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml index 8b9695f5deccb9..f4610eaed1e188 100644 --- a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml +++ b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml @@ -87,6 +87,10 @@ properties: '#sound-dai-cells': const: 0 + port: + $ref: audio-graph-port.yaml#/definitions/port-base + description: Connection to controller providing I2S signals + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml index b77284e3e26aa7..c3dea852cc8d34 100644 --- a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml +++ b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml @@ -27,11 +27,6 @@ properties: - const: samsung,odroid-xu4-audio deprecated: true - assigned-clock-parents: true - assigned-clock-rates: true - assigned-clocks: true - clocks: true - cpu: type: object additionalProperties: false diff --git a/Documentation/devicetree/bindings/sound/serial-midi.yaml b/Documentation/devicetree/bindings/sound/serial-midi.yaml index f6a807329a5acb..3b2f6dd5bffa02 100644 --- a/Documentation/devicetree/bindings/sound/serial-midi.yaml +++ b/Documentation/devicetree/bindings/sound/serial-midi.yaml @@ -22,6 +22,9 @@ description: configure the clocks of the parent serial device so that a requested baud of 38.4 kBaud results in the standard MIDI baud rate, and set the 'current-speed' property to 38400 (default) +allOf: + - $ref: /schemas/serial/serial-peripheral-props.yaml# + properties: compatible: const: serial-midi diff --git a/Documentation/devicetree/bindings/sound/st,sta350.txt b/Documentation/devicetree/bindings/sound/st,sta350.txt index 307398ef2317d4..e3d84864e0e425 100644 --- a/Documentation/devicetree/bindings/sound/st,sta350.txt +++ b/Documentation/devicetree/bindings/sound/st,sta350.txt @@ -77,7 +77,7 @@ Optional properties: - st,odd-pwm-speed-mode: If present, PWM speed mode run on odd speed mode (341.3 kHz) on all - channels. If not present, normal PWM spped mode (384 kHz) will be used. + channels. If not present, normal PWM speed mode (384 kHz) will be used. - st,distortion-compensation: If present, distortion compensation variable uses DCC coefficient. diff --git a/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml b/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml new file mode 100644 index 00000000000000..21ea9ff5a2bb79 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ti,pcm512x.yaml @@ -0,0 +1,101 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/ti,pcm512x.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: PCM512x and TAS575x audio CODECs/amplifiers + +maintainers: + - Animesh Agarwal + +allOf: + - $ref: dai-common.yaml# + +properties: + compatible: + enum: + - ti,pcm5121 + - ti,pcm5122 + - ti,pcm5141 + - ti,pcm5142 + - ti,pcm5242 + - ti,tas5754 + - ti,tas5756 + + reg: + maxItems: 1 + + AVDD-supply: true + + DVDD-supply: true + + CPVDD-supply: true + + clocks: + maxItems: 1 + description: A clock specifier for the clock connected as SCLK. If this is + absent the device will be configured to clock from BCLK. If pll-in and + pll-out are specified in addition to a clock, the device is configured to + accept clock input on a specified gpio pin. + + '#sound-dai-cells': + const: 0 + + pll-in: + description: GPIO pin used to connect the pll using <1> through <6>. The + device will be configured for clock input on the given pll-in pin. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + maximum: 6 + + pll-out: + description: GPIO pin used to connect the pll using <1> through <6>. The + device will be configured for PLL output on the given pll-out pin. An + external connection from the pll-out pin to the SCLK pin is assumed. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + maximum: 6 + +required: + - compatible + - reg + - AVDD-supply + - DVDD-supply + - CPVDD-supply + +if: + properties: + compatible: + contains: + enum: + - ti,tas5754 + - ti,tas5756 + +then: + properties: + pll-in: + maximum: 3 + + pll-out: + maximum: 3 + +unevaluatedProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + codec@4c { + compatible = "ti,pcm5142"; + reg = <0x4c>; + AVDD-supply = <®_3v3_analog>; + DVDD-supply = <®_1v8>; + CPVDD-supply = <®_3v3>; + #sound-dai-cells = <0>; + clocks = <&sck>; + pll-in = <3>; + pll-out = <6>; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml b/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml new file mode 100644 index 00000000000000..85e937e34962da --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ti,tlv320dac3100.yaml @@ -0,0 +1,127 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/ti,tlv320dac3100.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments - tlv320aic31xx Codec module + +maintainers: + - Shenghao Ding + +description: | + CODEC output pins: + * HPL + * HPR + * SPL, devices with stereo speaker amp + * SPR, devices with stereo speaker amp + * SPK, devices with mono speaker amp + * MICBIAS + + CODEC input pins: + * MIC1LP, devices with ADC + * MIC1RP, devices with ADC + * MIC1LM, devices with ADC + * AIN1, devices without ADC + * AIN2, devices without ADC + + The pins can be used in referring sound node's audio-routing property. + +properties: + compatible: + enum: + - ti,tlv320aic310x # - Generic TLV320AIC31xx with mono speaker amp + - ti,tlv320aic311x # - Generic TLV320AIC31xx with stereo speaker amp + - ti,tlv320aic3100 # - TLV320AIC3100 (mono speaker amp, no MiniDSP) + - ti,tlv320aic3110 # - TLV320AIC3110 (stereo speaker amp, no MiniDSP) + - ti,tlv320aic3120 # - TLV320AIC3120 (mono speaker amp, MiniDSP) + - ti,tlv320aic3111 # - TLV320AIC3111 (stereo speaker amp, MiniDSP) + - ti,tlv320dac3100 # - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP) + - ti,tlv320dac3101 # - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP) + + reg: + maxItems: 1 + + '#sound-dai-cells': + const: 0 + + HPVDD-supply: true + + SPRVDD-supply: true + + SPLVDD-supply: true + + AVDD-supply: true + + IOVDD-supply: true + + DVDD-supply: true + + reset-gpios: + description: GPIO specification for the active low RESET input. + + ai31xx-micbias-vg: + $ref: /schemas/types.yaml#/definitions/uint32 + default: 1 + enum: [1, 2, 3] + description: | + MicBias Voltage setting + 1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V + 2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V + 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD + + ai31xx-ocmv: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2, 3] + description: | + output common-mode voltage setting + 0 - 1.35V, + 1 - 1.5V, + 2 - 1.65V, + 3 - 1.8V + + gpio-reset: + description: gpio pin number used for codec reset + deprecated: true + + +required: + - compatible + - reg + - HPVDD-supply + - SPRVDD-supply + - SPLVDD-supply + - AVDD-supply + - IOVDD-supply + - DVDD-supply + +allOf: + - $ref: dai-common.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + sound@18 { + compatible = "ti,tlv320aic311x"; + reg = <0x18>; + + ai31xx-micbias-vg = ; + reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; + + HPVDD-supply = <®ulator>; + SPRVDD-supply = <®ulator>; + SPLVDD-supply = <®ulator>; + AVDD-supply = <®ulator>; + IOVDD-supply = <®ulator>; + DVDD-supply = <®ulator>; + }; + }; + diff --git a/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml b/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml new file mode 100644 index 00000000000000..a42bf9bde69400 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/ti,tpa6130a2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments - tpa6130a2 Codec module + +maintainers: + - Sebastian Reichel + +description: + Stereo, analog input headphone amplifier + +properties: + compatible: + enum: + - ti,tpa6130a2 + - ti,tpa6140a2 + + reg: + maxItems: 1 + + Vdd-supply: + description: power supply regulator + + power-gpio: + description: gpio pin to power the device + +required: + - compatible + - reg + - Vdd-supply + +allOf: + - $ref: dai-common.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + amplifier@60 { + compatible = "ti,tpa6130a2"; + reg = <0x60>; + Vdd-supply = <&vmmc2>; + power-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>; + }; + }; + diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt deleted file mode 100644 index bbad98d5b98622..00000000000000 --- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt +++ /dev/null @@ -1,77 +0,0 @@ -Texas Instruments - tlv320aic31xx Codec module - -The tlv320aic31xx serial control bus communicates through I2C protocols - -Required properties: - -- compatible - "string" - One of: - "ti,tlv320aic310x" - Generic TLV320AIC31xx with mono speaker amp - "ti,tlv320aic311x" - Generic TLV320AIC31xx with stereo speaker amp - "ti,tlv320aic3100" - TLV320AIC3100 (mono speaker amp, no MiniDSP) - "ti,tlv320aic3110" - TLV320AIC3110 (stereo speaker amp, no MiniDSP) - "ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP) - "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP) - "ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP) - "ti,tlv320dac3101" - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP) - -- reg - - I2C slave address -- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply, - DVDD-supply : power supplies for the device as covered in - Documentation/devicetree/bindings/regulator/regulator.txt - - -Optional properties: - -- reset-gpios - GPIO specification for the active low RESET input. -- ai31xx-micbias-vg - MicBias Voltage setting - 1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V - 2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V - 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD - If this node is not mentioned or if the value is unknown, then - micbias is set to 2.0V. -- ai31xx-ocmv - output common-mode voltage setting - 0 - 1.35V, - 1 - 1.5V, - 2 - 1.65V, - 3 - 1.8V - -Deprecated properties: - -- gpio-reset - gpio pin number used for codec reset - -CODEC output pins: - * HPL - * HPR - * SPL, devices with stereo speaker amp - * SPR, devices with stereo speaker amp - * SPK, devices with mono speaker amp - * MICBIAS - -CODEC input pins: - * MIC1LP, devices with ADC - * MIC1RP, devices with ADC - * MIC1LM, devices with ADC - * AIN1, devices without ADC - * AIN2, devices without ADC - -The pins can be used in referring sound node's audio-routing property. - -Example: -#include -#include - -tlv320aic31xx: tlv320aic31xx@18 { - compatible = "ti,tlv320aic311x"; - reg = <0x18>; - - ai31xx-micbias-vg = ; - - reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; - - HPVDD-supply = <®ulator>; - SPRVDD-supply = <®ulator>; - SPLVDD-supply = <®ulator>; - AVDD-supply = <®ulator>; - IOVDD-supply = <®ulator>; - DVDD-supply = <®ulator>; -}; diff --git a/Documentation/devicetree/bindings/sound/tpa6130a2.txt b/Documentation/devicetree/bindings/sound/tpa6130a2.txt deleted file mode 100644 index 6dfa740e4b2d87..00000000000000 --- a/Documentation/devicetree/bindings/sound/tpa6130a2.txt +++ /dev/null @@ -1,27 +0,0 @@ -Texas Instruments - tpa6130a2 Codec module - -The tpa6130a2 serial control bus communicates through I2C protocols - -Required properties: - -- compatible - "string" - One of: - "ti,tpa6130a2" - TPA6130A2 - "ti,tpa6140a2" - TPA6140A2 - - -- reg - - I2C slave address - -- Vdd-supply - - power supply regulator - -Optional properties: - -- power-gpio - gpio pin to power the device - -Example: - -tpa6130a2: tpa6130a2@60 { - compatible = "ti,tpa6130a2"; - reg = <0x60>; - Vdd-supply = <&vmmc2>; - power-gpio = <&gpio4 2 GPIO_ACTIVE_HIGH>; -}; diff --git a/Documentation/devicetree/bindings/spi/cdns,xspi.yaml b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml index eb0f9246818506..38a5795589de25 100644 --- a/Documentation/devicetree/bindings/spi/cdns,xspi.yaml +++ b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml @@ -15,24 +15,27 @@ description: | single, dual, quad or octal wire transmission modes for read/write access to slaves such as SPI-NOR flash. -allOf: - - $ref: spi-controller.yaml# - properties: compatible: - const: cdns,xspi-nor + enum: + - cdns,xspi-nor + - marvell,cn10-xspi-nor reg: items: - description: address and length of the controller register set - description: address and length of the Slave DMA data port - description: address and length of the auxiliary registers + - description: address and length of the xfer registers + minItems: 3 reg-names: items: - const: io - const: sdma - const: aux + - const: xfer + minItems: 3 interrupts: maxItems: 1 @@ -42,6 +45,27 @@ required: - reg - interrupts +allOf: + - $ref: spi-controller.yaml# + - if: + properties: + compatible: + contains: + enum: + - marvell,cn10-xspi-nor + then: + properties: + reg: + minItems: 4 + reg-names: + minItems: 4 + else: + properties: + reg: + maxItems: 3 + reg-names: + maxItems: 3 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml new file mode 100644 index 00000000000000..73980a27dc0093 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/cirrus,ep9301-spi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: EP93xx SoC SPI controller + +maintainers: + - Alexander Sverdlin + - Nikita Shubin + +allOf: + - $ref: spi-controller.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-spi + - items: + - enum: + - cirrus,ep9302-spi + - cirrus,ep9307-spi + - cirrus,ep9312-spi + - cirrus,ep9315-spi + - const: cirrus,ep9301-spi + + reg: + items: + - description: SPI registers region + + interrupts: + maxItems: 1 + + clocks: + items: + - description: SPI Controller reference clock source + + dmas: + items: + - description: rx DMA channel + - description: tx DMA channel + + dma-names: + items: + - const: rx + - const: tx + +required: + - compatible + - reg + - interrupts + - clocks + +unevaluatedProperties: false + +examples: + - | + #include + #include + spi@808a0000 { + compatible = "cirrus,ep9301-spi"; + reg = <0x808a0000 0x18>; + interrupt-parent = <&vic1>; + interrupts = <21>; + clocks = <&syscon EP93XX_CLK_SPI>; + dmas = <&dma1 10 2>, <&dma1 10 1>; + dma-names = "rx", "tx"; + cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml index b6249880c3f927..e1f5bfa4433cfd 100644 --- a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml +++ b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml @@ -33,6 +33,7 @@ properties: - const: mediatek,mt6765-spi - items: - enum: + - mediatek,mt7981-spi-ipm - mediatek,mt7986-spi-ipm - mediatek,mt8188-spi-ipm - const: mediatek,spi-ipm diff --git a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml index ffa8d1b48f8bd1..62a568bdbfa05f 100644 --- a/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml +++ b/Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml @@ -17,9 +17,14 @@ properties: compatible: oneOf: - items: - - const: microchip,mpfs-qspi + - enum: + - microchip,mpfs-qspi + - microchip,pic64gx-qspi - const: microchip,coreqspi-rtl-v2 - const: microchip,coreqspi-rtl-v2 # FPGA QSPI + - items: + - const: microchip,pic64gx-spi + - const: microchip,mpfs-spi - const: microchip,mpfs-spi reg: diff --git a/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml b/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml new file mode 100644 index 00000000000000..43753a94837c42 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/nxp,sc18is.yaml @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/nxp,sc18is.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP SC18IS602/SC18IS603 I2C to SPI bridge + +maintainers: + - Frank Li + +properties: + compatible: + enum: + - nxp,sc18is602 + - nxp,sc18is602b + - nxp,sc18is603 + + reg: + maxItems: 1 + + clock-frequency: + $ref: /schemas/types.yaml#/definitions/uint32 + default: 7372000 + description: + external oscillator clock frequency. The clock-frequency property is + relevant and needed only if the chip has an external oscillator + (SC18IS603). + +allOf: + - $ref: spi-controller.yaml# + +unevaluatedProperties: false + +required: + - compatible + - reg + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + spi@28 { + compatible = "nxp,sc18is603"; + reg = <0x28>; + clock-frequency = <14744000>; + }; + }; + diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml index 4a5f41bde00f3c..902db92da83207 100644 --- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml +++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml @@ -21,6 +21,7 @@ properties: - nxp,imx8mm-fspi - nxp,imx8mp-fspi - nxp,imx8qxp-fspi + - nxp,imx8ulp-fspi - nxp,lx2160a-fspi - items: - enum: diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index e4941e9212d139..46d9d6ee092348 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -35,6 +35,7 @@ properties: - rockchip,rk3368-spi - rockchip,rk3399-spi - rockchip,rk3568-spi + - rockchip,rk3576-spi - rockchip,rk3588-spi - rockchip,rv1126-spi - const: rockchip,rk3066-spi diff --git a/Documentation/devicetree/bindings/spi/spi-sc18is602.txt b/Documentation/devicetree/bindings/spi/spi-sc18is602.txt deleted file mode 100644 index 02f9033270a246..00000000000000 --- a/Documentation/devicetree/bindings/spi/spi-sc18is602.txt +++ /dev/null @@ -1,23 +0,0 @@ -NXP SC18IS602/SCIS603 - -Required properties: - - compatible : Should be one of - "nxp,sc18is602" - "nxp,sc18is602b" - "nxp,sc18is603" - - reg: I2C bus address - -Optional properties: - - clock-frequency : external oscillator clock frequency. If not - specified, the SC18IS602 default frequency (7372000) will be used. - -The clock-frequency property is relevant and needed only if the chip has an -external oscillator (SC18IS603). - -Example: - - sc18is603@28 { - compatible = "nxp,sc18is603"; - reg = <0x28>; - clock-frequency = <14744000>; - } diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml index 725303e1a364c4..70b273271754b9 100644 --- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml @@ -32,6 +32,9 @@ properties: clocks: maxItems: 1 + power-domains: + maxItems: 1 + amlogic,ao-secure: description: phandle to the ao-secure syscon $ref: /schemas/types.yaml#/definitions/phandle diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml index 72048c5a0412e3..a12fddc8195500 100644 --- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml @@ -51,6 +51,7 @@ properties: - qcom,msm8996-tsens - qcom,msm8998-tsens - qcom,qcm2290-tsens + - qcom,sa8255p-tsens - qcom,sa8775p-tsens - qcom,sc7180-tsens - qcom,sc7280-tsens @@ -310,7 +311,7 @@ examples: - | #include - // Example 1 (new calbiration data: for pre v1 IP): + // Example 1 (new calibration data: for pre v1 IP): thermal-sensor@4a9000 { compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1"; reg = <0x4a9000 0x1000>, /* TM */ diff --git a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt deleted file mode 100644 index 844bd5fbd04c13..00000000000000 --- a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.txt +++ /dev/null @@ -1,22 +0,0 @@ -BCM2835 System Timer - -The System Timer peripheral provides four 32-bit timer channels and a -single 64-bit free running counter. Each channel has an output compare -register, which is compared against the 32 least significant bits of the -free running counter values, and generates an interrupt. - -Required properties: - -- compatible : should be "brcm,bcm2835-system-timer" -- reg : Specifies base physical address and size of the registers. -- interrupts : A list of 4 interrupt sinks; one per timer channel. -- clock-frequency : The frequency of the clock that drives the counter, in Hz. - -Example: - -timer { - compatible = "brcm,bcm2835-system-timer"; - reg = <0x7e003000 0x1000>; - interrupts = <1 0>, <1 1>, <1 2>, <1 3>; - clock-frequency = <1000000>; -}; diff --git a/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml new file mode 100644 index 00000000000000..f5804b5b0e63a6 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/brcm,bcm2835-system-timer.yaml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/brcm,bcm2835-system-timer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: BCM2835 System Timer + +maintainers: + - Stefan Wahren + - Raspberry Pi Kernel Maintenance + +description: + The System Timer peripheral provides four 32-bit timer channels and a + single 64-bit free running counter. Each channel has an output compare + register, which is compared against the 32 least significant bits of the + free running counter values, and generates an interrupt. + +properties: + compatible: + const: brcm,bcm2835-system-timer + + reg: + maxItems: 1 + + interrupts: + items: + - description: System Timer Compare 0 match (used by VideoCore GPU) + - description: System Timer Compare 1 match (usable for ARM core) + - description: System Timer Compare 2 match (used by VideoCore GPU) + - description: System Timer Compare 3 match (usable for ARM core) + + clock-frequency: true + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + timer@7e003000 { + compatible = "brcm,bcm2835-system-timer"; + reg = <0x7e003000 0x1000>; + interrupts = <1 0>, <1 1>, <1 2>, <1 3>; + clock-frequency = <1000000>; + }; +... diff --git a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt deleted file mode 100644 index aa8c40230e5e08..00000000000000 --- a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.txt +++ /dev/null @@ -1,31 +0,0 @@ -Freescale FlexTimer Module (FTM) Timer - -Required properties: - -- compatible : should be "fsl,ftm-timer" -- reg : Specifies base physical address and size of the register sets for the - clock event device and clock source device. -- interrupts : Should be the clock event device interrupt. -- clocks : The clocks provided by the SoC to drive the timer, must contain an - entry for each entry in clock-names. -- clock-names : Must include the following entries: - o "ftm-evt" - o "ftm-src" - o "ftm-evt-counter-en" - o "ftm-src-counter-en" -- big-endian: One boolean property, the big endian mode will be in use if it is - present, or the little endian mode will be in use for all the device registers. - -Example: -ftm: ftm@400b8000 { - compatible = "fsl,ftm-timer"; - reg = <0x400b8000 0x1000 0x400b9000 0x1000>; - interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>; - clock-names = "ftm-evt", "ftm-src", - "ftm-evt-counter-en", "ftm-src-counter-en"; - clocks = <&clks VF610_CLK_FTM2>, - <&clks VF610_CLK_FTM3>, - <&clks VF610_CLK_FTM2_EXT_FIX_EN>, - <&clks VF610_CLK_FTM3_EXT_FIX_EN>; - big-endian; -}; diff --git a/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml new file mode 100644 index 00000000000000..0e4a8ddc3de327 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/fsl,ftm-timer.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/fsl,ftm-timer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale FlexTimer Module (FTM) Timer + +maintainers: + - Animesh Agarwal + +properties: + compatible: + const: fsl,ftm-timer + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + description: The clocks provided by the SoC to drive the timer, must + contain an entry for each entry in clock-names. + minItems: 4 + maxItems: 4 + + clock-names: + items: + - const: ftm-evt + - const: ftm-src + - const: ftm-evt-counter-en + - const: ftm-src-counter-en + + big-endian: true + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include + #include + + ftm@400b8000 { + compatible = "fsl,ftm-timer"; + reg = <0x400b8000 0x1000>; + interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>; + clock-names = "ftm-evt", "ftm-src", "ftm-evt-counter-en", "ftm-src-counter-en"; + clocks = <&clks VF610_CLK_FTM2>, <&clks VF610_CLK_FTM3>, + <&clks VF610_CLK_FTM2_EXT_FIX_EN>, <&clks VF610_CLK_FTM3_EXT_FIX_EN>; + big-endian; + }; diff --git a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt deleted file mode 100644 index 51b05a0e70d191..00000000000000 --- a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt +++ /dev/null @@ -1,26 +0,0 @@ -* NXP LPC3220 timer - -The NXP LPC3220 timer is used on a wide range of NXP SoCs. This -includes LPC32xx, LPC178x, LPC18xx and LPC43xx parts. - -Required properties: -- compatible: - Should be "nxp,lpc3220-timer". -- reg: - Address and length of the register set. -- interrupts: - Reference to the timer interrupt -- clocks: - Should contain a reference to timer clock. -- clock-names: - Should contain "timerclk". - -Example: - -timer1: timer@40085000 { - compatible = "nxp,lpc3220-timer"; - reg = <0x40085000 0x1000>; - interrupts = <13>; - clocks = <&ccu1 CLK_CPU_TIMER1>; - clock-names = "timerclk"; -}; diff --git a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml new file mode 100644 index 00000000000000..3ae2eb0625da56 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/nxp,lpc3220-timer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP LPC3220 timer + +maintainers: + - Animesh Agarwal + +description: | + The NXP LPC3220 timer is used on a wide range of NXP SoCs. This includes + LPC32xx, LPC178x, LPC18xx and LPC43xx parts. + +properties: + compatible: + const: nxp,lpc3220-timer + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: timerclk + + resets: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include + #include + + timer@4004c000 { + compatible = "nxp,lpc3220-timer"; + reg = <0x4004c000 0x1000>; + interrupts = <17 IRQ_TYPE_LEVEL_LOW>; + clocks = <&clk LPC32XX_CLK_TIMER1>; + clock-names = "timerclk"; + }; diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml index 19e56b7577a08b..6d0eb0014eee75 100644 --- a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml +++ b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml @@ -24,6 +24,7 @@ properties: - rockchip,rk3228-timer - rockchip,rk3229-timer - rockchip,rk3368-timer + - rockchip,rk3576-timer - rockchip,rk3588-timer - rockchip,px30-timer - const: rockchip,rk3288-timer diff --git a/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml b/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml new file mode 100644 index 00000000000000..e9646f4e86cc91 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/ti,da830-timer.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/ti,da830-timer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI DaVinci Timer + +maintainers: + - Kousik Sanagavarapu + +description: | + This is a 64-bit timer found on TI's DaVinci architecture devices. The timer + can be configured as a general-purpose 64-bit timer, dual general-purpose + 32-bit timers. When configured as dual 32-bit timers, each half can operate + in conjunction (chain mode) or independently (unchained mode) of each other. + + The timer is a free running up-counter and can generate interrupts when the + counter reaches preset counter values. + +properties: + compatible: + const: ti,da830-timer + + reg: + maxItems: 1 + + interrupts: + minItems: 2 + maxItems: 10 + + interrupt-names: + minItems: 2 + items: + - const: tint12 + - const: tint34 + - const: cmpint0 + - const: cmpint1 + - const: cmpint2 + - const: cmpint3 + - const: cmpint4 + - const: cmpint5 + - const: cmpint6 + - const: cmpint7 + + clocks: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - interrupt-names + - clocks + +additionalProperties: false + +examples: + - | + timer@20000 { + compatible = "ti,da830-timer"; + reg = <0x20000 0x1000>; + interrupts = <21>, <22>; + interrupt-names = "tint12", "tint34"; + clocks = <&pll0_auxclk>; + }; + +... diff --git a/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt b/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt deleted file mode 100644 index 29bf91ccf5b754..00000000000000 --- a/Documentation/devicetree/bindings/timer/ti,davinci-timer.txt +++ /dev/null @@ -1,37 +0,0 @@ -* Device tree bindings for Texas Instruments DaVinci timer - -This document provides bindings for the 64-bit timer in the DaVinci -architecture devices. The timer can be configured as a general-purpose 64-bit -timer, dual general-purpose 32-bit timers. When configured as dual 32-bit -timers, each half can operate in conjunction (chain mode) or independently -(unchained mode) of each other. - -The timer is a free running up-counter and can generate interrupts when the -counter reaches preset counter values. - -Also see ../watchdog/davinci-wdt.txt for timers that are configurable as -watchdog timers. - -Required properties: - -- compatible : should be "ti,da830-timer". -- reg : specifies base physical address and count of the registers. -- interrupts : interrupts generated by the timer. -- interrupt-names: should be "tint12", "tint34", "cmpint0", "cmpint1", - "cmpint2", "cmpint3", "cmpint4", "cmpint5", "cmpint6", - "cmpint7" ("cmpintX" may be omitted if not present in the - hardware). -- clocks : the clock feeding the timer clock. - -Example: - - clocksource: timer@20000 { - compatible = "ti,da830-timer"; - reg = <0x20000 0x1000>; - interrupts = <21>, <22>, <74>, <75>, <76>, <77>, <78>, <79>, - <80>, <81>; - interrupt-names = "tint12", "tint34", "cmpint0", "cmpint1", - "cmpint2", "cmpint3", "cmpint4", "cmpint5", - "cmpint6", "cmpint7"; - clocks = <&pll0_auxclk>; - }; diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index 7913ca9b6b5402..0108d750721519 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml @@ -33,15 +33,12 @@ properties: # Acbel fsg032 power supply - acbel,fsg032 # SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin - - ad,ad7414 + - ad,ad7414 # Deprecated, use adi,ad7414 + - adi,ad7414 # ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems - ad,adm9240 # AD5110 - Nonvolatile Digital Potentiometer - adi,ad5110 - # Analog Devices ADP5585 Keypad Decoder and I/O Expansion - - adi,adp5585 - # Analog Devices ADP5585 Keypad Decoder and I/O Expansion with support for Row5 - - adi,adp5585-02 # Analog Devices ADP5589 Keypad Decoder and I/O Expansion - adi,adp5589 # Analog Devices LT7182S Dual Channel 6A, 20V PolyPhase Step-Down Silent Switcher @@ -50,8 +47,6 @@ properties: - ams,iaq-core # Temperature monitoring of Astera Labs PT5161L PCIe retimer - asteralabs,pt5161l - # i2c serial eeprom (24cxx) - - at,24c08 # i2c h/w elliptic curve crypto module - atmel,atecc508a # ATSHA204 - i2c h/w symmetric crypto module @@ -74,14 +69,10 @@ properties: - dallas,ds1631 # Total-Elapsed-Time Recorder with Alarm - dallas,ds1682 - # Tiny Digital Thermometer and Thermostat - - dallas,ds1775 # CPU Peripheral Monitor - dallas,ds1780 # CPU Supervisor with Nonvolatile Memory and Programmable I/O - dallas,ds4510 - # Digital Thermometer and Thermostat - - dallas,ds75 # Delta AHE-50DC Open19 power shelf fan control module - delta,ahe50dc-fan # Delta Electronics DPS-650-AB power supply @@ -110,6 +101,8 @@ properties: - domintech,dmard09 # DMARD10: 3-axis Accelerometer - domintech,dmard10 + # Elgin SPI-controlled LCD + - elgin,jg10309-01 # MMA7660FC: 3-Axis Orientation/Motion Detection Sensor - fsl,mma7660 # MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer @@ -164,6 +157,8 @@ properties: - isil,isl29030 # Intersil ISL68137 Digital Output Configurable PWM Controller - isil,isl68137 + # Intersil ISL69260 PMBus Voltage Regulator + - isil,isl69260 # Intersil ISL69269 PMBus Voltage Regulator - isil,isl69269 # Intersil ISL76682 Ambient Light Sensor @@ -182,8 +177,6 @@ properties: - maxim,ds1803-100 # 10 kOhm digital potentiometer with I2C interface - maxim,ds3502 - # Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs - - maxim,max1237 # Temperature Sensor, I2C interface - maxim,max1619 # 3-Channel Remote Temperature Sensor @@ -198,8 +191,6 @@ properties: - maxim,max5484 # PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion - maxim,max6621 - # 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface - - maxim,max6625 # mCube 3-axis 8-bit digital accelerometer - mcube,mc3230 # Measurement Specialities I2C temperature and humidity sensor @@ -364,8 +355,6 @@ properties: - skyworks,sky81452 # SparkFun Qwiic Joystick (COM-15168) with i2c interface - sparkfun,qwiic-joystick - # i2c serial eeprom (24cxx) - - st,24c256 # Sierra Wireless mangOH Green SPI IoT interface - swir,mangoh-iotport-spi # Ambient Light Sensor with SMBUS/Two Wire Serial Interface @@ -397,8 +386,6 @@ properties: - ti,tmp121 - ti,tmp122 - ti,tmp125 - # Digital Temperature Sensor - - ti,tmp275 # TI DC-DC converter on PMBus - ti,tps40400 # TI Dual channel DCAP+ multiphase controller TPS53676 with AVSBus @@ -412,6 +399,7 @@ properties: - ti,tps544b25 - ti,tps544c20 - ti,tps544c25 + - ti,tps546d24 # I2C Touch-Screen Controller - ti,tsc2003 # Vicor Corporation Digital Supervisor diff --git a/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml new file mode 100644 index 00000000000000..a44bdf391887f9 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/fsl,ls1028a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale layerscape SuperSpeed DWC3 USB SoC controller + +maintainers: + - Frank Li + +select: + properties: + compatible: + contains: + enum: + - fsl,ls1028a-dwc3 + required: + - compatible + +properties: + compatible: + items: + - enum: + - fsl,ls1028a-dwc3 + - const: snps,dwc3 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +allOf: + - $ref: snps,dwc3.yaml# + +examples: + - | + #include + + usb@fe800000 { + compatible = "fsl,ls1028a-dwc3", "snps,dwc3"; + reg = <0xfe800000 0x100000>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt deleted file mode 100644 index afc30e98b1237a..00000000000000 --- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt +++ /dev/null @@ -1,110 +0,0 @@ -MSM SoC HSUSB controllers - -EHCI - -Required properties: -- compatible: Should contain "qcom,ehci-host" -- regs: offset and length of the register set in the memory map -- usb-phy: phandle for the PHY device - -Example EHCI controller device node: - - ehci: ehci@f9a55000 { - compatible = "qcom,ehci-host"; - reg = <0xf9a55000 0x400>; - usb-phy = <&usb_otg>; - }; - -USB PHY with optional OTG: - -Required properties: -- compatible: Should contain: - "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY - "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY - -- regs: Offset and length of the register set in the memory map -- interrupts: interrupt-specifier for the OTG interrupt. - -- clocks: A list of phandle + clock-specifier pairs for the - clocks listed in clock-names -- clock-names: Should contain the following: - "phy" USB PHY reference clock - "core" Protocol engine clock - "iface" Interface bus clock - "alt_core" Protocol engine clock for targets with asynchronous - reset methodology. (optional) - -- vdccx-supply: phandle to the regulator for the vdd supply for - digital circuit operation. -- v1p8-supply: phandle to the regulator for the 1.8V supply -- v3p3-supply: phandle to the regulator for the 3.3V supply - -- resets: A list of phandle + reset-specifier pairs for the - resets listed in reset-names -- reset-names: Should contain the following: - "phy" USB PHY controller reset - "link" USB LINK controller reset - -- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of - 1 - PHY control - 2 - PMIC control - -Optional properties: -- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg" - -- switch-gpio: A phandle + gpio-specifier pair. Some boards are using Dual - SPDT USB Switch, witch is controlled by GPIO to de/multiplex - D+/D- USB lines between connectors. - -- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device - Mode Eye Diagram test. Start address at which these values will be - written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as - "do not overwrite default value at this address". - For example: qcom,phy-init-sequence = < -1 0x63 >; - Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1. - -- qcom,phy-num: Select number of pyco-phy to use, can be one of - 0 - PHY one, default - 1 - Second PHY - Some platforms may have configuration to allow USB - controller work with any of the two HSPHYs present. - -- qcom,vdd-levels: This property must be a list of three integer values - (no, min, max) where each value represents either a voltage - in microvolts or a value corresponding to voltage corner. - -- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy - and controller driver therefore enables pull-up explicitly - before starting controller using usbcmd run/stop bit. - -- extcon: phandles to external connector devices. First phandle - should point to external connector, which provide "USB" - cable events, the second should point to external connector - device, which provide "USB-HOST" cable events. If one of - the external connector devices is not required empty <0> - phandle should be specified. - -Example HSUSB OTG controller device node: - - usb@f9a55000 { - compatible = "qcom,usb-otg-snps"; - reg = <0xf9a55000 0x400>; - interrupts = <0 134 0>; - dr_mode = "peripheral"; - - clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>, - <&gcc GCC_USB_HS_AHB_CLK>; - - clock-names = "phy", "core", "iface"; - - vddcx-supply = <&pm8841_s2_corner>; - v1p8-supply = <&pm8941_l6>; - v3p3-supply = <&pm8941_l24>; - - resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>; - reset-names = "phy", "link"; - - qcom,otg-control = <1>; - qcom,phy-init-sequence = < -1 0x63 >; - qcom,vdd-levels = <1 5 7>; - }; diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index efde47a5b14556..18758efb8d2966 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -52,6 +52,7 @@ properties: - qcom,sm8550-dwc3 - qcom,sm8650-dwc3 - qcom,x1e80100-dwc3 + - qcom,x1e80100-dwc3-mp - const: qcom,dwc3 reg: @@ -164,6 +165,7 @@ allOf: contains: enum: - qcom,ipq4019-dwc3 + - qcom,ipq5332-dwc3 then: properties: clocks: @@ -267,7 +269,6 @@ allOf: contains: enum: - qcom,ipq5018-dwc3 - - qcom,ipq5332-dwc3 - qcom,msm8994-dwc3 - qcom,qcs404-dwc3 then: @@ -289,6 +290,7 @@ allOf: - qcom,sc8280xp-dwc3 - qcom,sc8280xp-dwc3-mp - qcom,x1e80100-dwc3 + - qcom,x1e80100-dwc3-mp then: properties: clocks: @@ -428,6 +430,21 @@ allOf: contains: enum: - qcom,ipq5332-dwc3 + then: + properties: + interrupts: + maxItems: 3 + interrupt-names: + items: + - const: pwr_event + - const: dp_hs_phy_irq + - const: dm_hs_phy_irq + + - if: + properties: + compatible: + contains: + enum: - qcom,x1e80100-dwc3 then: properties: @@ -486,6 +503,7 @@ allOf: contains: enum: - qcom,sc8180x-dwc3-mp + - qcom,x1e80100-dwc3-mp then: properties: interrupts: diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml index 95ff9791baea09..653a89586f4e9b 100644 --- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml +++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml @@ -13,10 +13,9 @@ properties: compatible: oneOf: - const: ti,j721e-usb - - const: ti,am64-usb - items: - - const: ti,j721e-usb - const: ti,am64-usb + - const: ti,j721e-usb reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index a70ce43b3dc032..b320a39de7fe40 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -368,6 +368,8 @@ patternProperties: description: Devantech, Ltd. "^dfi,.*": description: DFI Inc. + "^dfrobot,.*": + description: DFRobot Corporation "^dh,.*": description: DH electronics GmbH "^difrnce,.*": @@ -804,6 +806,8 @@ patternProperties: description: Lantiq Semiconductor "^lattice,.*": description: Lattice Semiconductor + "^lckfb,.*": + description: Shenzhen JLC Technology Group Co., Ltd. "^lctech,.*": description: Shenzen LC Technology Co., Ltd. "^leadtek,.*": @@ -1476,6 +1480,8 @@ patternProperties: description: Terasic Inc. "^tesla,.*": description: Tesla, Inc. + "^test,.*": + description: Reserved for use by tests. For example, KUnit. "^tfc,.*": description: Three Five Corp "^thead,.*": @@ -1535,6 +1541,8 @@ patternProperties: description: Turing Machines, Inc. "^tyan,.*": description: Tyan Computer Corporation + "^tyhx,.*": + description: NanjingTianyihexin Electronics Ltd. "^u-blox,.*": description: u-blox "^u-boot,.*": diff --git a/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml new file mode 100644 index 00000000000000..5dbe891c70c6fa --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/cirrus,ep9301-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cirrus Logic EP93xx Watchdog Timer + +maintainers: + - Nikita Shubin + - Alexander Sverdlin + +allOf: + - $ref: watchdog.yaml# + +properties: + compatible: + oneOf: + - const: cirrus,ep9301-wdt + - items: + - enum: + - cirrus,ep9302-wdt + - cirrus,ep9307-wdt + - cirrus,ep9312-wdt + - cirrus,ep9315-wdt + - const: cirrus,ep9301-wdt + + reg: + maxItems: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + watchdog@80940000 { + compatible = "cirrus,ep9301-wdt"; + reg = <0x80940000 0x08>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt deleted file mode 100644 index aa10b8ec36e259..00000000000000 --- a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt +++ /dev/null @@ -1,24 +0,0 @@ -Texas Instruments DaVinci/Keystone Watchdog Timer (WDT) Controller - -Required properties: -- compatible : Should be "ti,davinci-wdt", "ti,keystone-wdt" -- reg : Should contain WDT registers location and length - -Optional properties: -- timeout-sec : Contains the watchdog timeout in seconds -- clocks : the clock feeding the watchdog timer. - Needed if platform uses clocks. - See clock-bindings.txt - -Documentation: -Davinci DM646x - https://www.ti.com/lit/ug/spruer5b/spruer5b.pdf -Keystone - https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf - -Examples: - -wdt: wdt@2320000 { - compatible = "ti,davinci-wdt"; - reg = <0x02320000 0x80>; - timeout-sec = <30>; - clocks = <&clkwdtimer0>; -}; diff --git a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt b/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt deleted file mode 100644 index 09f6b24969e0fd..00000000000000 --- a/Documentation/devicetree/bindings/watchdog/lpc18xx-wdt.txt +++ /dev/null @@ -1,19 +0,0 @@ -* NXP LPC18xx Watchdog Timer (WDT) - -Required properties: -- compatible: Should be "nxp,lpc1850-wwdt" -- reg: Should contain WDT registers location and length -- clocks: Must contain an entry for each entry in clock-names. -- clock-names: Should contain "wdtclk" and "reg"; the watchdog counter - clock and register interface clock respectively. -- interrupts: Should contain WDT interrupt - -Examples: - -watchdog@40080000 { - compatible = "nxp,lpc1850-wwdt"; - reg = <0x40080000 0x24>; - clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>; - clock-names = "wdtclk", "reg"; - interrupts = <49>; -}; diff --git a/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml b/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml new file mode 100644 index 00000000000000..52878fdbe3ad1c --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/nxp,lpc1850-wwdt.yaml @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/nxp,lpc1850-wwdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP LPC18xx Watchdog Timer (WDT) + +maintainers: + - Animesh Agarwal + +properties: + compatible: + const: nxp,lpc1850-wwdt + + reg: + maxItems: 1 + + clocks: + items: + - description: Watchdog counter clock + - description: Register interface clock + + clock-names: + items: + - const: wdtclk + - const: reg + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: false + +examples: + - | + #include + #include + + watchdog@40080000 { + compatible = "nxp,lpc1850-wwdt"; + reg = <0x40080000 0x24>; + clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_WWDT>; + clock-names = "wdtclk", "reg"; + interrupts = <49>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml index 47587971fb0b7c..932393f8c649af 100644 --- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml @@ -26,6 +26,7 @@ properties: - qcom,apss-wdt-msm8994 - qcom,apss-wdt-qcm2290 - qcom,apss-wdt-qcs404 + - qcom,apss-wdt-sa8255p - qcom,apss-wdt-sa8775p - qcom,apss-wdt-sc7180 - qcom,apss-wdt-sc7280 diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml index eba454d1680f15..29ada89fdcdc5a 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml @@ -75,6 +75,8 @@ properties: - renesas,r8a779h0-wdt # R-Car V4M - const: renesas,rcar-gen4-wdt # R-Car Gen4 + - const: renesas,r9a09g057-wdt # RZ/V2H(P) + reg: maxItems: 1 @@ -113,7 +115,6 @@ properties: required: - compatible - reg - - interrupts - clocks allOf: @@ -137,6 +138,7 @@ allOf: compatible: contains: enum: + - renesas,r9a09g057-wdt - renesas,rzg2l-wdt - renesas,rzv2m-wdt then: @@ -171,6 +173,19 @@ allOf: interrupts: maxItems: 1 + - if: + properties: + compatible: + contains: + const: renesas,r9a09g057-wdt + then: + properties: + interrupts: false + interrupt-names: false + else: + required: + - interrupts + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml index c7aab0418a3202..b5a3dc37707069 100644 --- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml @@ -29,6 +29,7 @@ properties: - rockchip,rk3368-wdt - rockchip,rk3399-wdt - rockchip,rk3568-wdt + - rockchip,rk3576-wdt - rockchip,rk3588-wdt - rockchip,rv1108-wdt - const: snps,dw-wdt diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml index 6b13bfc11e114d..86bd39d5085002 100644 --- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml +++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml @@ -36,6 +36,12 @@ properties: minItems: 1 maxItems: 2 + interrupts: + maxItems: 1 + description: Pre-timeout interrupt from the watchdog. + + wakeup-source: true + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml new file mode 100644 index 00000000000000..3c78f60f5f48c9 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/ti,davinci-wdt.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/ti,davinci-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI DaVinci/Keystone Watchdog Timer Controller + +maintainers: + - Kousik Sanagavarapu + +description: | + TI's Watchdog Timer Controller for DaVinci and Keystone Processors. + + Datasheets + + Davinci DM646x - https://www.ti.com/lit/ug/spruer5b/spruer5b.pdf + Keystone - https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf + +allOf: + - $ref: watchdog.yaml# + +properties: + compatible: + oneOf: + - items: + - const: ti,keystone-wdt + - const: ti,davinci-wdt + - items: + - const: ti,davinci-wdt + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + clocks: + maxItems: 1 + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + wdt: watchdog@22f0080 { + compatible = "ti,keystone-wdt", "ti,davinci-wdt"; + reg = <0x022f0080 0x80>; + clocks = <&clkwdtimer0>; + }; + +... diff --git a/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml b/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml new file mode 100644 index 00000000000000..9dbaa941538e32 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/zii,rave-wdt.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/zii,rave-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Zodiac RAVE Watchdog Timer + +maintainers: + - Martyn Welch + - Guenter Roeck + - Wim Van Sebroeck + +properties: + compatible: + const: zii,rave-wdt + + reg: + maxItems: 1 + description: i2c slave address of device, usually 0x38 + + reset-duration-ms: + description: + Duration of the pulse generated when the watchdog times + out. + +required: + - compatible + - reg + +allOf: + - $ref: watchdog.yaml# + +unevaluatedProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + watchdog@38 { + compatible = "zii,rave-wdt"; + reg = <0x38>; + timeout-sec = <30>; + reset-duration-ms = <30>; + }; + }; + diff --git a/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt b/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt deleted file mode 100644 index 3d878184ec3ff7..00000000000000 --- a/Documentation/devicetree/bindings/watchdog/ziirave-wdt.txt +++ /dev/null @@ -1,19 +0,0 @@ -Zodiac RAVE Watchdog Timer - -Required properties: -- compatible: must be "zii,rave-wdt" -- reg: i2c slave address of device, usually 0x38 - -Optional Properties: -- timeout-sec: Watchdog timeout value in seconds. -- reset-duration-ms: Duration of the pulse generated when the watchdog times - out. Value in milliseconds. - -Example: - - watchdog@38 { - compatible = "zii,rave-wdt"; - reg = <0x38>; - timeout-sec = <30>; - reset-duration-ms = <30>; - }; diff --git a/Documentation/doc-guide/checktransupdate.rst b/Documentation/doc-guide/checktransupdate.rst new file mode 100644 index 00000000000000..dfaf9d37374768 --- /dev/null +++ b/Documentation/doc-guide/checktransupdate.rst @@ -0,0 +1,54 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Checking for needed translation updates +======================================= + +This script helps track the translation status of the documentation in +different locales, i.e., whether the documentation is up-to-date with +the English counterpart. + +How it works +------------ + +It uses ``git log`` command to track the latest English commit from the +translation commit (order by author date) and the latest English commits +from HEAD. If any differences occur, the file is considered as out-of-date, +then commits that need to be updated will be collected and reported. + +Features implemented + +- check all files in a certain locale +- check a single file or a set of files +- provide options to change output format +- track the translation status of files that have no translation + +Usage +----- + +:: + + ./scripts/checktransupdate.py --help + +Please refer to the output of argument parser for usage details. + +Samples + +- ``./scripts/checktransupdate.py -l zh_CN`` + This will print all the files that need to be updated in the zh_CN locale. +- ``./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst`` + This will only print the status of the specified file. + +Then the output is something like: + +:: + + Documentation/dev-tools/kfence.rst + No translation in the locale of zh_CN + + Documentation/translations/zh_CN/dev-tools/testing-overview.rst + commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs") + 1 commits needs resolving in total + +Features to be implemented + +- files can be a folder instead of only a file diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst index 7c7d9778462661..24d058faa75c2c 100644 --- a/Documentation/doc-guide/index.rst +++ b/Documentation/doc-guide/index.rst @@ -12,6 +12,7 @@ How to write kernel documentation parse-headers contributing maintainer-profile + checktransupdate .. only:: subproject and html diff --git a/Documentation/dontdiff b/Documentation/dontdiff index 3c399f132e2db0..de2cb8de6112e3 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -180,6 +180,7 @@ modpost modules-only.symvers modules.builtin modules.builtin.modinfo +modules.builtin.ranges modules.nsdeps modules.order modversions.h* @@ -262,7 +263,7 @@ vsyscall_32.lds wanxlfw.inc uImage unifdef -utf8data.h +utf8data.c wakeup.bin wakeup.elf wakeup.lds diff --git a/Documentation/driver-api/cxl/access-coordinates.rst b/Documentation/driver-api/cxl/access-coordinates.rst new file mode 100644 index 00000000000000..b07950ea30c906 --- /dev/null +++ b/Documentation/driver-api/cxl/access-coordinates.rst @@ -0,0 +1,91 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: + +================================== +CXL Access Coordinates Computation +================================== + +Shared Upstream Link Calculation +================================ +For certain CXL region construction with endpoints behind CXL switches (SW) or +Root Ports (RP), there is the possibility of the total bandwidth for all +the endpoints behind a switch being more than the switch upstream link. +A similar situation can occur within the host, upstream of the root ports. +The CXL driver performs an additional pass after all the targets have +arrived for a region in order to recalculate the bandwidths with possible +upstream link being a limiting factor in mind. + +The algorithm assumes the configuration is a symmetric topology as that +maximizes performance. When asymmetric topology is detected, the calculation +is aborted. An asymmetric topology is detected during topology walk where the +number of RPs detected as a grandparent is not equal to the number of devices +iterated in the same iteration loop. The assumption is made that subtle +asymmetry in properties does not happen and all paths to EPs are equal. + +There can be multiple switches under an RP. There can be multiple RPs under +a CXL Host Bridge (HB). There can be multiple HBs under a CXL Fixed Memory +Window Structure (CFMWS). + +An example hierarchy: + +> CFMWS 0 +> | +> _________|_________ +> | | +> ACPI0017-0 ACPI0017-1 +> GP0/HB0/ACPI0016-0 GP1/HB1/ACPI0016-1 +> | | | | +> RP0 RP1 RP2 RP3 +> | | | | +> SW 0 SW 1 SW 2 SW 3 +> | | | | | | | | +> EP0 EP1 EP2 EP3 EP4 EP5 EP6 EP7 + +Computation for the example hierarchy: + +Min (GP0 to CPU BW, + Min(SW 0 Upstream Link to RP0 BW, + Min(SW0SSLBIS for SW0DSP0 (EP0), EP0 DSLBIS, EP0 Upstream Link) + + Min(SW0SSLBIS for SW0DSP1 (EP1), EP1 DSLBIS, EP1 Upstream link)) + + Min(SW 1 Upstream Link to RP1 BW, + Min(SW1SSLBIS for SW1DSP0 (EP2), EP2 DSLBIS, EP2 Upstream Link) + + Min(SW1SSLBIS for SW1DSP1 (EP3), EP3 DSLBIS, EP3 Upstream link))) + +Min (GP1 to CPU BW, + Min(SW 2 Upstream Link to RP2 BW, + Min(SW2SSLBIS for SW2DSP0 (EP4), EP4 DSLBIS, EP4 Upstream Link) + + Min(SW2SSLBIS for SW2DSP1 (EP5), EP5 DSLBIS, EP5 Upstream link)) + + Min(SW 3 Upstream Link to RP3 BW, + Min(SW3SSLBIS for SW3DSP0 (EP6), EP6 DSLBIS, EP6 Upstream Link) + + Min(SW3SSLBIS for SW3DSP1 (EP7), EP7 DSLBIS, EP7 Upstream link)))) + +The calculation starts at cxl_region_shared_upstream_perf_update(). A xarray +is created to collect all the endpoint bandwidths via the +cxl_endpoint_gather_bandwidth() function. The min() of bandwidth from the +endpoint CDAT and the upstream link bandwidth is calculated. If the endpoint +has a CXL switch as a parent, then min() of calculated bandwidth and the +bandwidth from the SSLBIS for the switch downstream port that is associated +with the endpoint is calculated. The final bandwidth is stored in a +'struct cxl_perf_ctx' in the xarray indexed by a device pointer. If the +endpoint is direct attached to a root port (RP), the device pointer would be an +RP device. If the endpoint is behind a switch, the device pointer would be the +upstream device of the parent switch. + +At the next stage, the code walks through one or more switches if they exist +in the topology. For endpoints directly attached to RPs, this step is skipped. +If there is another switch upstream, the code takes the min() of the current +gathered bandwidth and the upstream link bandwidth. If there's a switch +upstream, then the SSLBIS of the upstream switch. + +Once the topology walk reaches the RP, whether it's direct attached endpoints +or walking through the switch(es), cxl_rp_gather_bandwidth() is called. At +this point all the bandwidths are aggregated per each host bridge, which is +also the index for the resulting xarray. + +The next step is to take the min() of the per host bridge bandwidth and the +bandwidth from the Generic Port (GP). The bandwidths for the GP is retrieved +via ACPI tables SRAT/HMAT. The min bandwidth are aggregated under the same +ACPI0017 device to form a new xarray. + +Finally, the cxl_region_update_bandwidth() is called and the aggregated +bandwidth from all the members of the last xarray is updated for the +access coordinates residing in the cxl region (cxlr) context. diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst index 12b82725d32294..965ba90e8fb7d7 100644 --- a/Documentation/driver-api/cxl/index.rst +++ b/Documentation/driver-api/cxl/index.rst @@ -8,6 +8,7 @@ Compute Express Link :maxdepth: 1 memory-devices + access-coordinates maturity-map diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst index ea8d16600e16a8..e6855cd37e8525 100644 --- a/Documentation/driver-api/dpll.rst +++ b/Documentation/driver-api/dpll.rst @@ -214,6 +214,27 @@ offset values are fractional with 3-digit decimal places and shell be divided with ``DPLL_PIN_PHASE_OFFSET_DIVIDER`` to get integer part and modulo divided to get fractional part. +Embedded SYNC +============= + +Device may provide ability to use Embedded SYNC feature. It allows +to embed additional SYNC signal into the base frequency of a pin - a one +special pulse of base frequency signal every time SYNC signal pulse +happens. The user can configure the frequency of Embedded SYNC. +The Embedded SYNC capability is always related to a given base frequency +and HW capabilities. The user is provided a range of Embedded SYNC +frequencies supported, depending on current base frequency configured for +the pin. + + ========================================= ================================= + ``DPLL_A_PIN_ESYNC_FREQUENCY`` current Embedded SYNC frequency + ``DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED`` nest available Embedded SYNC + frequency ranges + ``DPLL_A_PIN_FREQUENCY_MIN`` attr minimum value of frequency + ``DPLL_A_PIN_FREQUENCY_MAX`` attr maximum value of frequency + ``DPLL_A_PIN_ESYNC_PULSE`` pulse type of Embedded SYNC + ========================================= ================================= + Configuration commands group ============================ diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index ac9ee7441887dd..5f2ee8d717b1de 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -391,7 +391,7 @@ PCI devm_pci_remap_cfgspace() : ioremap PCI configuration space devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource - pcim_enable_device() : after success, all PCI ops become managed + pcim_enable_device() : after success, some PCI ops become managed pcim_iomap() : do iomap() on a single BAR pcim_iomap_regions() : do request_region() and iomap() on multiple BARs pcim_iomap_regions_request_all() : do request_region() on all and iomap() on multiple BARs diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst index d3cfa73cbb2b47..28a32410f7d23c 100644 --- a/Documentation/driver-api/firewire.rst +++ b/Documentation/driver-api/firewire.rst @@ -43,6 +43,8 @@ Firewire core transaction interfaces Firewire Isochronous I/O interfaces =================================== +.. kernel-doc:: include/linux/firewire.h + :functions: fw_iso_context_schedule_flush_completions .. kernel-doc:: drivers/firewire/core-iso.c :export: diff --git a/Documentation/driver-api/iio/buffers.rst b/Documentation/driver-api/iio/buffers.rst index e83026aebe9758..63f364e862d1c9 100644 --- a/Documentation/driver-api/iio/buffers.rst +++ b/Documentation/driver-api/iio/buffers.rst @@ -15,8 +15,8 @@ trigger source. Multiple data channels can be read at once from IIO buffer sysfs interface ========================== An IIO buffer has an associated attributes directory under -:file:`/sys/bus/iio/iio:device{X}/buffer/*`. Here are some of the existing -attributes: +:file:`/sys/bus/iio/devices/iio:device{X}/buffer/*`. Here are some of the +existing attributes: * :file:`length`, the total number of data samples (capacity) that can be stored by the buffer. @@ -28,8 +28,8 @@ IIO buffer setup The meta information associated with a channel reading placed in a buffer is called a scan element. The important bits configuring scan elements are exposed to userspace applications via the -:file:`/sys/bus/iio/iio:device{X}/scan_elements/` directory. This directory contains -attributes of the following form: +:file:`/sys/bus/iio/devices/iio:device{X}/scan_elements/` directory. This +directory contains attributes of the following form: * :file:`enable`, used for enabling a channel. If and only if its attribute is non *zero*, then a triggered capture will contain data samples for this diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst index 715cf29482a146..dfe438dc91a781 100644 --- a/Documentation/driver-api/iio/core.rst +++ b/Documentation/driver-api/iio/core.rst @@ -24,7 +24,7 @@ then we will show how a device driver makes use of an IIO device. There are two ways for a user space application to interact with an IIO driver. -1. :file:`/sys/bus/iio/iio:device{X}/`, this represents a hardware sensor +1. :file:`/sys/bus/iio/devices/iio:device{X}/`, this represents a hardware sensor and groups together the data channels of the same chip. 2. :file:`/dev/iio:device{X}`, character device node interface used for buffered data transfer and for events information retrieval. @@ -51,8 +51,8 @@ IIO device sysfs interface Attributes are sysfs files used to expose chip info and also allowing applications to set various configuration parameters. For device with -index X, attributes can be found under /sys/bus/iio/iio:deviceX/ directory. -Common attributes are: +index X, attributes can be found under /sys/bus/iio/devices/iio:deviceX/ +directory. Common attributes are: * :file:`name`, description of the physical chip. * :file:`dev`, shows the major:minor pair associated with @@ -140,16 +140,16 @@ Here is how we can make use of the channel's modifiers:: This channel's definition will generate two separate sysfs files for raw data retrieval: -* :file:`/sys/bus/iio/iio:device{X}/in_intensity_ir_raw` -* :file:`/sys/bus/iio/iio:device{X}/in_intensity_both_raw` +* :file:`/sys/bus/iio/devices/iio:device{X}/in_intensity_ir_raw` +* :file:`/sys/bus/iio/devices/iio:device{X}/in_intensity_both_raw` one file for processed data: -* :file:`/sys/bus/iio/iio:device{X}/in_illuminance_input` +* :file:`/sys/bus/iio/devices/iio:device{X}/in_illuminance_input` and one shared sysfs file for sampling frequency: -* :file:`/sys/bus/iio/iio:device{X}/sampling_frequency`. +* :file:`/sys/bus/iio/devices/iio:device{X}/sampling_frequency`. Here is how we can make use of the channel's indexing:: diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index f10decc2c14b65..7f83e05769b4a4 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -124,6 +124,7 @@ Subsystem-specific APIs pps ptp pwm + pwrseq regulator reset rfkill diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst index e224e47b6b0944..dfa021eacd63c4 100644 --- a/Documentation/driver-api/ipmi.rst +++ b/Documentation/driver-api/ipmi.rst @@ -540,7 +540,7 @@ at module load time (for a module) with:: alerts_broken The addresses are normal I2C addresses. The adapter is the string -name of the adapter, as shown in /sys/class/i2c-adapter/i2c-/name. +name of the adapter, as shown in /sys/bus/i2c/devices/i2c-/name. It is *NOT* i2c- itself. Also, the comparison is done ignoring spaces, so if the name is "This is an I2C chip" you can say adapter_name=ThisisanI2cchip. This is because it's hard to pass in diff --git a/Documentation/driver-api/media/mc-core.rst b/Documentation/driver-api/media/mc-core.rst index 2456950ce8ffb7..1d010bd7ec49be 100644 --- a/Documentation/driver-api/media/mc-core.rst +++ b/Documentation/driver-api/media/mc-core.rst @@ -144,7 +144,8 @@ valid values are described at :c:func:`media_create_pad_link()` and Graph traversal ^^^^^^^^^^^^^^^ -The media framework provides APIs to iterate over entities in a graph. +The media framework provides APIs to traverse media graphs, locating connected +entities and links. To iterate over all entities belonging to a media device, drivers can use the media_device_for_each_entity macro, defined in @@ -159,31 +160,6 @@ the media_device_for_each_entity macro, defined in ... } -Drivers might also need to iterate over all entities in a graph that can be -reached only through enabled links starting at a given entity. The media -framework provides a depth-first graph traversal API for that purpose. - -.. note:: - - Graphs with cycles (whether directed or undirected) are **NOT** - supported by the graph traversal API. To prevent infinite loops, the graph - traversal code limits the maximum depth to ``MEDIA_ENTITY_ENUM_MAX_DEPTH``, - currently defined as 16. - -Drivers initiate a graph traversal by calling -:c:func:`media_graph_walk_start()` - -The graph structure, provided by the caller, is initialized to start graph -traversal at the given entity. - -Drivers can then retrieve the next entity by calling -:c:func:`media_graph_walk_next()` - -When the graph traversal is complete the function will return ``NULL``. - -Graph traversal can be interrupted at any moment. No cleanup function call -is required and the graph structure can be freed normally. - Helper functions can be used to find a link between two given pads, or a pad connected to another pad through an enabled link (:c:func:`media_entity_find_link()`, :c:func:`media_pad_remote_pad_first()`, @@ -276,6 +252,45 @@ Subsystems should facilitate link validation by providing subsystem specific helper functions to provide easy access for commonly needed information, and in the end provide a way to use driver-specific callbacks. +Pipeline traversal +^^^^^^^^^^^^^^^^^^ + +Once a pipeline has been constructed with :c:func:`media_pipeline_start()`, +drivers can iterate over entities or pads in the pipeline with the +:c:macro:´media_pipeline_for_each_entity` and +:c:macro:´media_pipeline_for_each_pad` macros. Iterating over pads is +straightforward: + +.. code-block:: c + + media_pipeline_pad_iter iter; + struct media_pad *pad; + + media_pipeline_for_each_pad(pipe, &iter, pad) { + /* 'pad' will point to each pad in turn */ + ... + } + +To iterate over entities, the iterator needs to be initialized and cleaned up +as an additional steps: + +.. code-block:: c + + media_pipeline_entity_iter iter; + struct media_entity *entity; + int ret; + + ret = media_pipeline_entity_iter_init(pipe, &iter); + if (ret) + ...; + + media_pipeline_for_each_entity(pipe, &iter, entity) { + /* 'entity' will point to each entity in turn */ + ... + } + + media_pipeline_entity_iter_cleanup(&iter); + Media Controller Device Allocator API ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/Documentation/driver-api/mmc/index.rst b/Documentation/driver-api/mmc/index.rst index 7339736ac77454..8188863e595964 100644 --- a/Documentation/driver-api/mmc/index.rst +++ b/Documentation/driver-api/mmc/index.rst @@ -10,4 +10,5 @@ MMC/SD/SDIO card support mmc-dev-attrs mmc-dev-parts mmc-async-req + mmc-test mmc-tools diff --git a/Documentation/driver-api/mmc/mmc-test.rst b/Documentation/driver-api/mmc/mmc-test.rst new file mode 100644 index 00000000000000..1fe33eb43742f0 --- /dev/null +++ b/Documentation/driver-api/mmc/mmc-test.rst @@ -0,0 +1,299 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================== +MMC Test Framework +======================== + +Overview +======== + +The `mmc_test` framework is designed to test the performance and reliability of host controller drivers and all devices handled by the MMC subsystem. This includes not only MMC devices but also SD cards and other devices supported by the subsystem. + +The framework provides a variety of tests to evaluate different aspects of the host controller and device interactions, such as read and write performance, data integrity, and error handling. These tests help ensure that the host controller drivers and devices operate correctly under various conditions. + +The `mmc_test` framework is particularly useful for: + +- Verifying the functionality and performance of MMC and SD host controller drivers. +- Ensuring compatibility and reliability of MMC and SD devices. +- Identifying and diagnosing issues in the MMC subsystem. + +The results of the tests are logged in the kernel log, providing detailed information about the test outcomes and any encountered issues. + +Note: whatever is on your card will be overwritten by these tests. + +Initialization +============== + +To use the ``mmc_test`` framework, follow these steps: + +1. **Enable the MMC Test Framework**: + + Ensure that the ``CONFIG_MMC_TEST`` kernel configuration option is enabled. This can be done by configuring the kernel: + + .. code-block:: none + + make menuconfig + + Navigate to: + + Device Drivers ---> + <*> MMC/SD/SDIO card support ---> + [*] MMC host test driver + + Alternatively, you can enable it directly in the kernel configuration file: + + .. code-block:: none + + echo "CONFIG_MMC_TEST=y" >> .config + + Rebuild and install the kernel if necessary. + +2. **Load the MMC Test Module**: + + If the ``mmc_test`` framework is built as a module, you need to load it using ``modprobe``: + + .. code-block:: none + + modprobe mmc_test + +Binding the MMC Card for Testing +================================ + +To enable MMC testing, you need to unbind the MMC card from the ``mmcblk`` driver and bind it to the ``mmc_test`` driver. This allows the ``mmc_test`` framework to take control of the MMC card for testing purposes. + +1. Identify the MMC card: + + .. code-block:: sh + + ls /sys/bus/mmc/devices/ + + This will list the MMC devices, such as ``mmc0:0001``. + +2. Unbind the MMC card from the ``mmcblk`` driver: + + .. code-block:: sh + + echo 'mmc0:0001' > /sys/bus/mmc/drivers/mmcblk/unbind + +3. Bind the MMC card to the ``mmc_test`` driver: + + .. code-block:: sh + + echo 'mmc0:0001' > /sys/bus/mmc/drivers/mmc_test/bind + +After binding, you should see a line in the kernel log indicating that the card has been claimed for testing: + +.. code-block:: none + + mmc_test mmc0:0001: Card claimed for testing. + + +Usage - Debugfs Entries +======================= + +Once the ``mmc_test`` framework is enabled, you can interact with the following debugfs entries located in ``/sys/kernel/debug/mmc0/mmc0:0001``: + +1. **test**: + + This file is used to run specific tests. Write the test number to this file to execute a test. + + .. code-block:: sh + + echo > /sys/kernel/debug/mmc0/mmc0:0001/test + + The test result is indicated in the kernel log info. You can view the kernel log using the `dmesg` command or by checking the log file in `/var/log/`. + + .. code-block:: sh + + dmesg | grep mmc0 + + Example: + + To run test number 4 (Basic read with data verification): + + .. code-block:: sh + + echo 4 > /sys/kernel/debug/mmc0/mmc0:0001/test + + Check the kernel log for the result: + + .. code-block:: sh + + dmesg | grep mmc0 + +2. **testlist**: + + This file lists all available tests. You can read this file to see the list of tests and their corresponding numbers. + + .. code-block:: sh + + cat /sys/kernel/debug/mmc0/mmc0:0001/testlist + + The available tests are listed in the table below: + ++------+--------------------------------+---------------------------------------------+ +| Test | Test Name | Test Description | ++======+================================+=============================================+ +| 0 | Run all tests | Runs all available tests | ++------+--------------------------------+---------------------------------------------+ +| 1 | Basic write | Performs a basic write operation of a | +| | | single 512-Byte block to the MMC card | +| | | without data verification. | ++------+--------------------------------+---------------------------------------------+ +| 2 | Basic read | Same for read | ++------+--------------------------------+---------------------------------------------+ +| 3 | Basic write | Performs a basic write operation of a | +| | (with data verification) | single 512-Byte block to the MMC card | +| | | with data verification by reading back | +| | | the written data and comparing it. | ++------+--------------------------------+---------------------------------------------+ +| 4 | Basic read | Same for read | +| | (with data verification) | | ++------+--------------------------------+---------------------------------------------+ +| 5 | Multi-block write | Performs a multi-block write operation of | +| | | 8 blocks (each 512 bytes) to the MMC card. | ++------+--------------------------------+---------------------------------------------+ +| 6 | Multi-block read | Same for read | ++------+--------------------------------+---------------------------------------------+ +| 7 | Power of two block writes | Performs write operations with block sizes | +| | | that are powers of two, starting from 1 | +| | | byte up to 256 bytes, to the MMC card. | ++------+--------------------------------+---------------------------------------------+ +| 8 | Power of two block reads | Same for read | ++------+--------------------------------+---------------------------------------------+ +| 9 | Weird sized block writes | Performs write operations with varying | +| | | block sizes starting from 3 bytes and | +| | | increasing by 7 bytes each iteration, up | +| | | to 511 bytes, to the MMC card. | ++------+--------------------------------+---------------------------------------------+ +| 10 | Weird sized block reads | same for read | ++------+--------------------------------+---------------------------------------------+ +| 11 | Badly aligned write | Performs write operations with buffers | +| | | starting at different alignments (0 to 7 | +| | | bytes offset) to test how the MMC card | +| | | handles unaligned data transfers. | ++------+--------------------------------+---------------------------------------------+ +| 12 | Badly aligned read | same for read | ++------+--------------------------------+---------------------------------------------+ +| 13 | Badly aligned multi-block write| same for multi-write | ++------+--------------------------------+---------------------------------------------+ +| 14 | Badly aligned multi-block read | same for multi-read | ++------+--------------------------------+---------------------------------------------+ +| 15 | Proper xfer_size at write | intentionally create a broken transfer by | +| | (Start failure) | modifying the MMC request in a way that it | +| | | will not perform as expected, e.g. use | +| | | MMC_WRITE_BLOCK for a multi-block transfer | ++------+--------------------------------+---------------------------------------------+ +| 16 | Proper xfer_size at read | same for read | +| | (Start failure) | | ++------+--------------------------------+---------------------------------------------+ +| 17 | Proper xfer_size at write | same for 2 blocks | +| | (Midway failure) | | ++------+--------------------------------+---------------------------------------------+ +| 18 | Proper xfer_size at read | same for read | +| | (Midway failure) | | ++------+--------------------------------+---------------------------------------------+ +| 19 | Highmem write | use a high memory page | ++------+--------------------------------+---------------------------------------------+ +| 20 | Highmem read | same for read | ++------+--------------------------------+---------------------------------------------+ +| 21 | Multi-block highmem write | same for multi-write | ++------+--------------------------------+---------------------------------------------+ +| 22 | Multi-block highmem read | same for mult-read | ++------+--------------------------------+---------------------------------------------+ +| 23 | Best-case read performance | Performs 512K sequential read (non sg) | ++------+--------------------------------+---------------------------------------------+ +| 24 | Best-case write performance | same for write | ++------+--------------------------------+---------------------------------------------+ +| 25 | Best-case read performance | Same using sg | +| | (Into scattered pages) | | ++------+--------------------------------+---------------------------------------------+ +| 26 | Best-case write performance | same for write | +| | (From scattered pages) | | ++------+--------------------------------+---------------------------------------------+ +| 27 | Single read performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 28 | Single write performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 29 | Single trim performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 30 | Consecutive read performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 31 | Consecutive write performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 32 | Consecutive trim performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 33 | Random read performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 34 | Random write performance | By transfer size | ++------+--------------------------------+---------------------------------------------+ +| 35 | Large sequential read | Into scattered pages | ++------+--------------------------------+---------------------------------------------+ +| 36 | Large sequential write | From scattered pages | ++------+--------------------------------+---------------------------------------------+ +| 37 | Write performance | With blocking req 4k to 4MB | ++------+--------------------------------+---------------------------------------------+ +| 38 | Write performance | With non-blocking req 4k to 4MB | ++------+--------------------------------+---------------------------------------------+ +| 39 | Read performance | With blocking req 4k to 4MB | ++------+--------------------------------+---------------------------------------------+ +| 40 | Read performance | With non-blocking req 4k to 4MB | ++------+--------------------------------+---------------------------------------------+ +| 41 | Write performance | Blocking req 1 to 512 sg elems | ++------+--------------------------------+---------------------------------------------+ +| 42 | Write performance | Non-blocking req 1 to 512 sg elems | ++------+--------------------------------+---------------------------------------------+ +| 43 | Read performance | Blocking req 1 to 512 sg elems | ++------+--------------------------------+---------------------------------------------+ +| 44 | Read performance | Non-blocking req 1 to 512 sg elems | ++------+--------------------------------+---------------------------------------------+ +| 45 | Reset test | | ++------+--------------------------------+---------------------------------------------+ +| 46 | Commands during read | No Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ +| 47 | Commands during write | No Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ +| 48 | Commands during read | Use Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ +| 49 | Commands during write | Use Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ +| 50 | Commands during non-blocking | Read - use Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ +| 51 | Commands during non-blocking | Write - use Set Block Count (CMD23) | ++------+--------------------------------+---------------------------------------------+ + +Test Results +============ + +The results of the tests are logged in the kernel log. Each test logs the start, end, and result of the test. The possible results are: + +- **OK**: The test completed successfully. +- **FAILED**: The test failed. +- **UNSUPPORTED (by host)**: The test is unsupported by the host. +- **UNSUPPORTED (by card)**: The test is unsupported by the card. +- **ERROR**: An error occurred during the test. + +Example Kernel Log Output +========================= + +When running a test, you will see log entries similar to the following in the kernel log: + +.. code-block:: none + + [ 1234.567890] mmc0: Starting tests of card mmc0:0001... + [ 1234.567891] mmc0: Test case 4. Basic read (with data verification)... + [ 1234.567892] mmc0: Result: OK + [ 1234.567893] mmc0: Tests completed. + +In this example, test case 4 (Basic read with data verification) was executed, and the result was OK. + + +Contributing +============ + +Contributions to the `mmc_test` framework are welcome. Please follow the standard Linux kernel contribution guidelines and submit patches to the appropriate maintainers. + +Contact +======= + +For more information or to report issues, please contact the MMC subsystem maintainers. diff --git a/Documentation/driver-api/pwrseq.rst b/Documentation/driver-api/pwrseq.rst new file mode 100644 index 00000000000000..a644084ded17a9 --- /dev/null +++ b/Documentation/driver-api/pwrseq.rst @@ -0,0 +1,95 @@ +.. SPDX-License-Identifier: GPL-2.0-only +.. Copyright 2024 Linaro Ltd. + +==================== +Power Sequencing API +==================== + +:Author: Bartosz Golaszewski + +Introduction +============ + +This framework is designed to abstract complex power-up sequences that are +shared between multiple logical devices in the linux kernel. + +The intention is to allow consumers to obtain a power sequencing handle +exposed by the power sequence provider and delegate the actual requesting and +control of the underlying resources as well as to allow the provider to +mitigate any potential conflicts between multiple users behind the scenes. + +Glossary +-------- + +The power sequencing API uses a number of terms specific to the subsystem: + +Unit + + A unit is a discreet chunk of a power sequence. For instance one unit may + enable a set of regulators, another may enable a specific GPIO. Units can + define dependencies in the form of other units that must be enabled before + it itself can be. + +Target + + A target is a set of units (composed of the "final" unit and its + dependencies) that a consumer selects by its name when requesting a handle + to the power sequencer. Via the dependency system, multiple targets may + share the same parts of a power sequence but ignore parts that are + irrelevant. + +Descriptor + + A handle passed by the pwrseq core to every consumer that serves as the + entry point to the provider layer. It ensures coherence between different + users and keeps reference counting consistent. + +Consumer interface +================== + +The consumer API is aimed to be as simple as possible. The driver interested in +getting a descriptor from the power sequencer should call pwrseq_get() and +specify the name of the target it wants to reach in the sequence after calling +pwrseq_power_up(). The descriptor can be released by calling pwrseq_put() and +the consumer can request the powering down of its target with +pwrseq_power_off(). Note that there is no guarantee that pwrseq_power_off() +will have any effect as there may be multiple users of the underlying resources +who may keep them active. + +Provider interface +================== + +The provider API is admittedly not nearly as straightforward as the one for +consumers but it makes up for it in flexibility. + +Each provider can logically split the power-up sequence into descrete chunks +(units) and define their dependencies. They can then expose named targets that +consumers may use as the final point in the sequence that they wish to reach. + +To that end the providers fill out a set of configuration structures and +register with the pwrseq subsystem by calling pwrseq_device_register(). + +Dynamic consumer matching +------------------------- + +The main difference between pwrseq and other linux kernel providers is the +mechanism for dynamic matching of consumers and providers. Every power sequence +provider driver must implement the `match()` callback and pass it to the pwrseq +core when registering with the subsystems. + +When a client requests a sequencer handle, the core will call this callback for +every registered provider and let it flexibly figure out whether the proposed +client device is indeed its consumer. For example: if the provider binds to the +device-tree node representing a power management unit of a chipset and the +consumer driver controls one of its modules, the provider driver may parse the +relevant regulator supply properties in device tree and see if they lead from +the PMU to the consumer. + +API reference +============= + +.. kernel-doc:: include/linux/pwrseq/provider.h + :internal: + +.. kernel-doc:: drivers/power/sequencing/core.c + :export: diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst index 978198f8a18bed..c803b89b7248f9 100644 --- a/Documentation/driver-api/thermal/sysfs-api.rst +++ b/Documentation/driver-api/thermal/sysfs-api.rst @@ -58,10 +58,9 @@ temperature) and throttle appropriate devices. ops: thermal zone device call-backs. - .bind: - bind the thermal zone device with a thermal cooling device. - .unbind: - unbind the thermal zone device with a thermal cooling device. + .should_bind: + check whether or not a given cooling device should be bound to + a given trip point in this thermal zone. .get_temp: get the current temperature of the thermal zone. .set_trips: @@ -246,56 +245,6 @@ temperature) and throttle appropriate devices. It deletes the corresponding entry from /sys/class/thermal folder and unbinds itself from all the thermal zone devices using it. -1.3 interface for binding a thermal zone device with a thermal cooling device ------------------------------------------------------------------------------ - - :: - - int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, - int trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, unsigned int weight); - - This interface function binds a thermal cooling device to a particular trip - point of a thermal zone device. - - This function is usually called in the thermal zone device .bind callback. - - tz: - the thermal zone device - cdev: - thermal cooling device - trip: - indicates which trip point in this thermal zone the cooling device - is associated with. - upper: - the Maximum cooling state for this trip point. - THERMAL_NO_LIMIT means no upper limit, - and the cooling device can be in max_state. - lower: - the Minimum cooling state can be used for this trip point. - THERMAL_NO_LIMIT means no lower limit, - and the cooling device can be in cooling state 0. - weight: - the influence of this cooling device in this thermal - zone. See 1.4.1 below for more information. - - :: - - int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, - int trip, struct thermal_cooling_device *cdev); - - This interface function unbinds a thermal cooling device from a particular - trip point of a thermal zone device. This function is usually called in - the thermal zone device .unbind callback. - - tz: - the thermal zone device - cdev: - thermal cooling device - trip: - indicates which trip point in this thermal zone the cooling device - is associated with. - 1.4 Thermal Zone Parameters --------------------------- @@ -366,8 +315,6 @@ Thermal cooling device sys I/F, created once it's registered:: Then next two dynamic attributes are created/removed in pairs. They represent the relationship between a thermal zone and its associated cooling device. -They are created/removed for each successful execution of -thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device. :: @@ -459,14 +406,7 @@ are supposed to implement the callback. If they don't, the thermal framework calculated the trend by comparing the previous and the current temperature values. -4.2. get_thermal_instance -------------------------- - -This function returns the thermal_instance corresponding to a given -{thermal_zone, cooling_device, trip_point} combination. Returns NULL -if such an instance does not exist. - -4.3. thermal_cdev_update +4.2. thermal_cdev_update ------------------------ This function serves as an arbitrator to set the state of a cooling diff --git a/Documentation/driver-api/wmi.rst b/Documentation/driver-api/wmi.rst index 6ca58c8249e548..4e8dbdb1fc6780 100644 --- a/Documentation/driver-api/wmi.rst +++ b/Documentation/driver-api/wmi.rst @@ -7,12 +7,11 @@ WMI Driver API The WMI driver core supports a more modern bus-based interface for interacting with WMI devices, and an older GUID-based interface. The latter interface is considered to be deprecated, so new WMI drivers should generally avoid it since -it has some issues with multiple WMI devices and events sharing the same GUIDs -and/or notification IDs. The modern bus-based interface instead maps each -WMI device to a :c:type:`struct wmi_device `, so it supports -WMI devices sharing GUIDs and/or notification IDs. Drivers can then register -a :c:type:`struct wmi_driver `, which will be bound to compatible -WMI devices by the driver core. +it has some issues with multiple WMI devices sharing the same GUID. +The modern bus-based interface instead maps each WMI device to a +:c:type:`struct wmi_device `, so it supports WMI devices sharing the +same GUID. Drivers can then register a :c:type:`struct wmi_driver ` +which will be bound to compatible WMI devices by the driver core. .. kernel-doc:: include/linux/wmi.h :internal: diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 70380a2a01b452..8b8aeea71c685b 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -141,6 +141,14 @@ configuration of fault-injection capabilities. default is 'Y', setting it to 'N' will also inject failures into highmem/user allocations (__GFP_HIGHMEM allocations). +- /sys/kernel/debug/failslab/cache-filter + Format: { 'Y' | 'N' } + + default is 'N', setting it to 'Y' will only inject failures when + objects are requests from certain caches. + + Select the cache by writing '1' to /sys/kernel/slab//failslab: + - /sys/kernel/debug/failslab/ignore-gfp-wait: - /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait: @@ -283,7 +291,7 @@ kernel may crash because it may not be able to handle the error. There are 4 types of errors defined in include/asm-generic/error-injection.h EI_ETYPE_NULL - This function will return `NULL` if it fails. e.g. return an allocateed + This function will return `NULL` if it fails. e.g. return an allocated object address. EI_ETYPE_ERRNO @@ -459,6 +467,18 @@ Application Examples losetup -d $DEVICE rm testfile.img +------------------------------------------------------------------------------ + +- Inject only skbuff allocation failures :: + + # mark skbuff_head_cache as faulty + echo 1 > /sys/kernel/slab/skbuff_head_cache/failslab + # Turn on cache filter (off by default) + echo 1 > /sys/kernel/debug/failslab/cache-filter + # Turn on fault injection + echo 1 > /sys/kernel/debug/failslab/times + echo 1 > /sys/kernel/debug/failslab/probability + Tool to run command with failslab or fail_page_alloc ---------------------------------------------------- diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt deleted file mode 100644 index 5a7508b8c967e2..00000000000000 --- a/Documentation/features/vm/PG_uncached/arch-support.txt +++ /dev/null @@ -1,30 +0,0 @@ -# -# Feature name: PG_uncached -# Kconfig: ARCH_USES_PG_UNCACHED -# description: arch supports the PG_uncached page flag -# - ----------------------- - | arch |status| - ----------------------- - | alpha: | TODO | - | arc: | TODO | - | arm: | TODO | - | arm64: | TODO | - | csky: | TODO | - | hexagon: | TODO | - | loongarch: | TODO | - | m68k: | TODO | - | microblaze: | TODO | - | mips: | TODO | - | nios2: | TODO | - | openrisc: | TODO | - | parisc: | TODO | - | powerpc: | TODO | - | riscv: | TODO | - | s390: | TODO | - | sh: | TODO | - | sparc: | TODO | - | um: | TODO | - | x86: | ok | - | xtensa: | TODO | - ----------------------- diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst index 1e0e0bb6fdf912..2bbf68b56b0d71 100644 --- a/Documentation/filesystems/9p.rst +++ b/Documentation/filesystems/9p.rst @@ -31,7 +31,7 @@ Other applications are described in the following papers: * PROSE I/O: Using 9p to enable Application Partitions http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf * VirtFS: A Virtualization Aware File System pass-through - http://goo.gl/3WPDg + https://kernel.org/doc/ols/2010/ols2010-pages-109-120.pdf Usage ===== @@ -48,11 +48,66 @@ For server running on QEMU host with virtio transport:: mount -t 9p -o trans=virtio /mnt/9 -where mount_tag is the tag associated by the server to each of the exported +where mount_tag is the tag generated by the server to each of the exported mount points. Each 9P export is seen by the client as a virtio device with an associated "mount_tag" property. Available mount tags can be seen by reading /sys/bus/virtio/drivers/9pnet_virtio/virtio/mount_tag files. +USBG Usage +========== + +To mount a 9p FS on a USB Host accessible via the gadget at runtime:: + + mount -t 9p -o trans=usbg,aname=/path/to/fs /mnt/9 + +To mount a 9p FS on a USB Host accessible via the gadget as root filesystem:: + + root= rootfstype=9p rootflags=trans=usbg,cache=loose,uname=root,access=0,dfltuid=0,dfltgid=0,aname=/path/to/rootfs + +where is the tag associated by the usb gadget transport. +It is defined by the configfs instance name. + +USBG Example +============ + +The USB host exports a filesystem, while the gadget on the USB device +side makes it mountable. + +Diod (9pfs server) and the forwarder are on the development host, where +the root filesystem is actually stored. The gadget is initialized during +boot (or later) on the embedded board. Then the forwarder will find it +on the USB bus and start forwarding requests. + +In this case the 9p requests come from the device and are handled by the +host. The reason is that USB device ports are normally not available on +PCs, so a connection in the other direction would not work. + +When using the usbg transport, for now there is no native usb host +service capable to handle the requests from the gadget driver. For +this we have to use the extra python tool p9_fwd.py from tools/usb. + +Just start the 9pfs capable network server like diod/nfs-ganesha e.g.:: + + $ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD + +Optionaly scan your bus if there are more then one usbg gadgets to find their path:: + + $ python $kernel_dir/tools/usb/p9_fwd.py list + + Bus | Addr | Manufacturer | Product | ID | Path + --- | ---- | ---------------- | ---------------- | --------- | ---- + 2 | 67 | unknown | unknown | 1d6b:0109 | 2-1.1.2 + 2 | 68 | unknown | unknown | 1d6b:0109 | 2-1.1.3 + +Then start the python transport:: + + $ python $kernel_dir/tools/usb/p9_fwd.py --path 2-1.1.2 connect -p 9999 + +After that the gadget driver can be used as described above. + +One use-case is to use it as an alternative to NFS root booting during +the development of embedded Linux devices. + Options ======= @@ -68,6 +123,7 @@ Options virtio connect to the next virtio channel available (from QEMU with trans_virtio module) rdma connect to a specified RDMA channel + usbg connect to a specified usb gadget channel ======== ============================================ uname=name user name to attempt mount as on the remote server. The diff --git a/Documentation/filesystems/autofs.rst b/Documentation/filesystems/autofs.rst index 3b6e38e646cd8c..1ac576458c69a8 100644 --- a/Documentation/filesystems/autofs.rst +++ b/Documentation/filesystems/autofs.rst @@ -18,7 +18,7 @@ key advantages: 2. The names and locations of filesystems can be stored in a remote database and can change at any time. The content - in that data base at the time of access will be used to provide + in that database at the time of access will be used to provide a target for the access. The interpretation of names in the filesystem can even be programmatic rather than database-backed, allowing wildcards for example, and can vary based on the user who @@ -423,7 +423,7 @@ The available ioctl commands are: and objects are expired if the are not in use. **AUTOFS_EXP_FORCED** causes the in use status to be ignored - and objects are expired ieven if they are in use. This assumes + and objects are expired even if they are in use. This assumes that the daemon has requested this because it is capable of performing the umount. diff --git a/Documentation/filesystems/bcachefs/CodingStyle.rst b/Documentation/filesystems/bcachefs/CodingStyle.rst index 0c45829a4899af..01de555e21d857 100644 --- a/Documentation/filesystems/bcachefs/CodingStyle.rst +++ b/Documentation/filesystems/bcachefs/CodingStyle.rst @@ -175,7 +175,7 @@ errors in our thinking by running our code and seeing what happens. If your time is being wasted because your tools are bad or too slow - don't accept it, fix it. -Put effort into your documentation, commmit messages, and code comments - but +Put effort into your documentation, commit messages, and code comments - but don't go overboard. A good commit message is wonderful - but if the information was important enough to go in a commit message, ask yourself if it would be even better as a code comment. diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst index 13e4b18e5dbbb9..0e2fac7a16da1a 100644 --- a/Documentation/filesystems/fsverity.rst +++ b/Documentation/filesystems/fsverity.rst @@ -86,6 +86,16 @@ authenticating fs-verity file hashes include: signature in their "security.ima" extended attribute, as controlled by the IMA policy. For more information, see the IMA documentation. +- Integrity Policy Enforcement (IPE). IPE supports enforcing access + control decisions based on immutable security properties of files, + including those protected by fs-verity's built-in signatures. + "IPE policy" specifically allows for the authorization of fs-verity + files using properties ``fsverity_digest`` for identifying + files by their verity digest, and ``fsverity_signature`` to authorize + files with a verified fs-verity's built-in signature. For + details on configuring IPE policies and understanding its operational + modes, please refer to :doc:`IPE admin guide `. + - Trusted userspace code in combination with `Built-in signature verification`_. This approach should be used only with great care. @@ -457,7 +467,11 @@ Enabling this option adds the following: On success, the ioctl persists the signature alongside the Merkle tree. Then, any time the file is opened, the kernel verifies the file's actual digest against this signature, using the certificates - in the ".fs-verity" keyring. + in the ".fs-verity" keyring. This verification happens as long as the + file's signature exists, regardless of the state of the sysctl variable + "fs.verity.require_signatures" described in the next item. The IPE LSM + relies on this behavior to recognize and label fsverity files + that contain a verified built-in fsverity signature. 3. A new sysctl "fs.verity.require_signatures" is made available. When set to 1, the kernel requires that all verity files have a @@ -481,7 +495,7 @@ be carefully considered before using them: - Builtin signature verification does *not* make the kernel enforce that any files actually have fs-verity enabled. Thus, it is not a - complete authentication policy. Currently, if it is used, the only + complete authentication policy. Currently, if it is used, one way to complete the authentication policy is for trusted userspace code to explicitly check whether files have fs-verity enabled with a signature before they are accessed. (With @@ -490,6 +504,15 @@ be carefully considered before using them: could just store the signature alongside the file and verify it itself using a cryptographic library, instead of using this feature. +- Another approach is to utilize fs-verity builtin signature + verification in conjunction with the IPE LSM, which supports defining + a kernel-enforced, system-wide authentication policy that allows only + files with a verified fs-verity builtin signature to perform certain + operations, such as execution. Note that IPE doesn't require + fs.verity.require_signatures=1. + Please refer to :doc:`IPE admin guide ` for + more details. + - A file's builtin signature can only be set at the same time that fs-verity is being enabled on the file. Changing or deleting the builtin signature later requires re-creating the file. diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst index ac0af679e61e5c..77930c77fcfe6e 100644 --- a/Documentation/filesystems/idmappings.rst +++ b/Documentation/filesystems/idmappings.rst @@ -821,7 +821,7 @@ the same idmapping to the mount. We now perform three steps: /* Map the userspace id down into a kernel id in the filesystem's idmapping. */ make_kuid(u0:k20000:r10000, u1000) = k21000 -2. Verify that the caller's kernel ids can be mapped to userspace ids in the +3. Verify that the caller's kernel ids can be mapped to userspace ids in the filesystem's idmapping:: from_kuid(u0:k20000:r10000, k21000) = u1000 @@ -854,10 +854,10 @@ The same translation algorithm works with the third example. /* Map the userspace id down into a kernel id in the filesystem's idmapping. */ make_kuid(u0:k0:r4294967295, u1000) = k1000 -2. Verify that the caller's kernel ids can be mapped to userspace ids in the +3. Verify that the caller's kernel ids can be mapped to userspace ids in the filesystem's idmapping:: - from_kuid(u0:k0:r4294967295, k21000) = u1000 + from_kuid(u0:k0:r4294967295, k1000) = u1000 So the ownership that lands on disk will be ``u1000``. @@ -994,7 +994,7 @@ from above::: /* Map the userspace id down into a kernel id in the filesystem's idmapping. */ make_kuid(u0:k0:r4294967295, u1000) = k1000 -2. Verify that the caller's filesystem ids can be mapped to userspace ids in the +3. Verify that the caller's filesystem ids can be mapped to userspace ids in the filesystem's idmapping:: from_kuid(u0:k0:r4294967295, k1000) = u1000 diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst index f8ee3427bc1a5f..b0d0188a095e55 100644 --- a/Documentation/filesystems/iomap/design.rst +++ b/Documentation/filesystems/iomap/design.rst @@ -142,9 +142,9 @@ Definitions * **pure overwrite**: A write operation that does not require any metadata or zeroing operations to perform during either submission or completion. - This implies that the fileystem must have already allocated space + This implies that the filesystem must have already allocated space on disk as ``IOMAP_MAPPED`` and the filesystem must not place any - constaints on IO alignment or size. + constraints on IO alignment or size. The only constraints on I/O alignment are device level (minimum I/O size and alignment, typically sector size). @@ -165,7 +165,7 @@ structure below: u16 flags; struct block_device *bdev; struct dax_device *dax_dev; - voidw *inline_data; + void *inline_data; void *private; const struct iomap_folio_ops *folio_ops; u64 validity_cookie; @@ -394,7 +394,7 @@ iomap is concerned: * The **upper** level primitive is provided by the filesystem to coordinate access to different iomap operations. - The exact primitive is specifc to the filesystem and operation, + The exact primitive is specific to the filesystem and operation, but is often a VFS inode, pagecache invalidation, or folio lock. For example, a filesystem might take ``i_rwsem`` before calling ``iomap_file_buffered_write`` and ``iomap_file_unshare`` to prevent @@ -426,7 +426,7 @@ iomap is concerned: The exact locking requirements are specific to the filesystem; for certain operations, some of these locks can be elided. -All further mention of locking are *recommendations*, not mandates. +All further mentions of locking are *recommendations*, not mandates. Each filesystem author must figure out the locking for themself. Bugs and Limitations diff --git a/Documentation/filesystems/journalling.rst b/Documentation/filesystems/journalling.rst index e18f90ffc6fd22..0254f7d5742993 100644 --- a/Documentation/filesystems/journalling.rst +++ b/Documentation/filesystems/journalling.rst @@ -137,7 +137,7 @@ Fast commits JBD2 to also allows you to perform file-system specific delta commits known as fast commits. In order to use fast commits, you will need to set following -callbacks that perform correspodning work: +callbacks that perform corresponding work: `journal->j_fc_cleanup_cb`: Cleanup function called after every full commit and fast commit. @@ -149,7 +149,7 @@ File system is free to perform fast commits as and when it wants as long as it gets permission from JBD2 to do so by calling the function :c:func:`jbd2_fc_begin_commit()`. Once a fast commit is done, the client file system should tell JBD2 about it by calling -:c:func:`jbd2_fc_end_commit()`. If file system wants JBD2 to perform a full +:c:func:`jbd2_fc_end_commit()`. If the file system wants JBD2 to perform a full commit immediately after stopping the fast commit it can do so by calling :c:func:`jbd2_fc_end_commit_fallback()`. This is useful if fast commit operation fails for some reason and the only way to guarantee consistency is for JBD2 to @@ -199,7 +199,7 @@ Journal Level .. kernel-doc:: fs/jbd2/recovery.c :internal: -Transasction Level +Transaction Level ~~~~~~~~~~~~~~~~~~ .. kernel-doc:: fs/jbd2/transaction.c diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index e664061ed55dc1..f5e3676db954b5 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -251,10 +251,10 @@ prototypes:: void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, - struct page **pagep, void **fsdata); + struct folio **foliop, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); + struct folio *folio, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); bool (*release_folio)(struct folio *, gfp_t); @@ -280,7 +280,7 @@ read_folio: yes, unlocks shared writepages: dirty_folio: maybe readahead: yes, unlocks shared -write_begin: locks the page exclusive +write_begin: locks the folio exclusive write_end: yes, unlocks exclusive bmap: invalidate_folio: yes exclusive diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 4cc657d743f7f3..f0d2cb257bb8d9 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -116,7 +116,7 @@ The following services are provided: * Handle local caching, allowing cached data and server-read data to be interleaved for a single request. - * Handle clearing of bufferage that aren't on the server. + * Handle clearing of bufferage that isn't on the server. * Handle retrying of reads that failed, switching reads from the cache to the server as necessary. diff --git a/Documentation/filesystems/nfs/index.rst b/Documentation/filesystems/nfs/index.rst index 8536134f31fdd6..95c2c009874cef 100644 --- a/Documentation/filesystems/nfs/index.rst +++ b/Documentation/filesystems/nfs/index.rst @@ -8,6 +8,7 @@ NFS client-identifier exporting + localio pnfs rpc-cache rpc-server-gss diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst new file mode 100644 index 00000000000000..bd1967e2eab329 --- /dev/null +++ b/Documentation/filesystems/nfs/localio.rst @@ -0,0 +1,357 @@ +=========== +NFS LOCALIO +=========== + +Overview +======== + +The LOCALIO auxiliary RPC protocol allows the Linux NFS client and +server to reliably handshake to determine if they are on the same +host. Select "NFS client and server support for LOCALIO auxiliary +protocol" in menuconfig to enable CONFIG_NFS_LOCALIO in the kernel +config (both CONFIG_NFS_FS and CONFIG_NFSD must also be enabled). + +Once an NFS client and server handshake as "local", the client will +bypass the network RPC protocol for read, write and commit operations. +Due to this XDR and RPC bypass, these operations will operate faster. + +The LOCALIO auxiliary protocol's implementation, which uses the same +connection as NFS traffic, follows the pattern established by the NFS +ACL protocol extension. + +The LOCALIO auxiliary protocol is needed to allow robust discovery of +clients local to their servers. In a private implementation that +preceded use of this LOCALIO protocol, a fragile sockaddr network +address based match against all local network interfaces was attempted. +But unlike the LOCALIO protocol, the sockaddr-based matching didn't +handle use of iptables or containers. + +The robust handshake between local client and server is just the +beginning, the ultimate use case this locality makes possible is the +client is able to open files and issue reads, writes and commits +directly to the server without having to go over the network. The +requirement is to perform these loopback NFS operations as efficiently +as possible, this is particularly useful for container use cases +(e.g. kubernetes) where it is possible to run an IO job local to the +server. + +The performance advantage realized from LOCALIO's ability to bypass +using XDR and RPC for reads, writes and commits can be extreme, e.g.: + +fio for 20 secs with directio, qd of 8, 16 libaio threads: + - With LOCALIO: + 4K read: IOPS=979k, BW=3825MiB/s (4011MB/s)(74.7GiB/20002msec) + 4K write: IOPS=165k, BW=646MiB/s (678MB/s)(12.6GiB/20002msec) + 128K read: IOPS=402k, BW=49.1GiB/s (52.7GB/s)(982GiB/20002msec) + 128K write: IOPS=11.5k, BW=1433MiB/s (1503MB/s)(28.0GiB/20004msec) + + - Without LOCALIO: + 4K read: IOPS=79.2k, BW=309MiB/s (324MB/s)(6188MiB/20003msec) + 4K write: IOPS=59.8k, BW=234MiB/s (245MB/s)(4671MiB/20002msec) + 128K read: IOPS=33.9k, BW=4234MiB/s (4440MB/s)(82.7GiB/20004msec) + 128K write: IOPS=11.5k, BW=1434MiB/s (1504MB/s)(28.0GiB/20011msec) + +fio for 20 secs with directio, qd of 8, 1 libaio thread: + - With LOCALIO: + 4K read: IOPS=230k, BW=898MiB/s (941MB/s)(17.5GiB/20001msec) + 4K write: IOPS=22.6k, BW=88.3MiB/s (92.6MB/s)(1766MiB/20001msec) + 128K read: IOPS=38.8k, BW=4855MiB/s (5091MB/s)(94.8GiB/20001msec) + 128K write: IOPS=11.4k, BW=1428MiB/s (1497MB/s)(27.9GiB/20001msec) + + - Without LOCALIO: + 4K read: IOPS=77.1k, BW=301MiB/s (316MB/s)(6022MiB/20001msec) + 4K write: IOPS=32.8k, BW=128MiB/s (135MB/s)(2566MiB/20001msec) + 128K read: IOPS=24.4k, BW=3050MiB/s (3198MB/s)(59.6GiB/20001msec) + 128K write: IOPS=11.4k, BW=1430MiB/s (1500MB/s)(27.9GiB/20001msec) + +FAQ +=== + +1. What are the use cases for LOCALIO? + + a. Workloads where the NFS client and server are on the same host + realize improved IO performance. In particular, it is common when + running containerised workloads for jobs to find themselves + running on the same host as the knfsd server being used for + storage. + +2. What are the requirements for LOCALIO? + + a. Bypass use of the network RPC protocol as much as possible. This + includes bypassing XDR and RPC for open, read, write and commit + operations. + b. Allow client and server to autonomously discover if they are + running local to each other without making any assumptions about + the local network topology. + c. Support the use of containers by being compatible with relevant + namespaces (e.g. network, user, mount). + d. Support all versions of NFS. NFSv3 is of particular importance + because it has wide enterprise usage and pNFS flexfiles makes use + of it for the data path. + +3. Why doesn’t LOCALIO just compare IP addresses or hostnames when + deciding if the NFS client and server are co-located on the same + host? + + Since one of the main use cases is containerised workloads, we cannot + assume that IP addresses will be shared between the client and + server. This sets up a requirement for a handshake protocol that + needs to go over the same connection as the NFS traffic in order to + identify that the client and the server really are running on the + same host. The handshake uses a secret that is sent over the wire, + and can be verified by both parties by comparing with a value stored + in shared kernel memory if they are truly co-located. + +4. Does LOCALIO improve pNFS flexfiles? + + Yes, LOCALIO complements pNFS flexfiles by allowing it to take + advantage of NFS client and server locality. Policy that initiates + client IO as closely to the server where the data is stored naturally + benefits from the data path optimization LOCALIO provides. + +5. Why not develop a new pNFS layout to enable LOCALIO? + + A new pNFS layout could be developed, but doing so would put the + onus on the server to somehow discover that the client is co-located + when deciding to hand out the layout. + There is value in a simpler approach (as provided by LOCALIO) that + allows the NFS client to negotiate and leverage locality without + requiring more elaborate modeling and discovery of such locality in a + more centralized manner. + +6. Why is having the client perform a server-side file OPEN, without + using RPC, beneficial? Is the benefit pNFS specific? + + Avoiding the use of XDR and RPC for file opens is beneficial to + performance regardless of whether pNFS is used. Especially when + dealing with small files its best to avoid going over the wire + whenever possible, otherwise it could reduce or even negate the + benefits of avoiding the wire for doing the small file I/O itself. + Given LOCALIO's requirements the current approach of having the + client perform a server-side file open, without using RPC, is ideal. + If in the future requirements change then we can adapt accordingly. + +7. Why is LOCALIO only supported with UNIX Authentication (AUTH_UNIX)? + + Strong authentication is usually tied to the connection itself. It + works by establishing a context that is cached by the server, and + that acts as the key for discovering the authorisation token, which + can then be passed to rpc.mountd to complete the authentication + process. On the other hand, in the case of AUTH_UNIX, the credential + that was passed over the wire is used directly as the key in the + upcall to rpc.mountd. This simplifies the authentication process, and + so makes AUTH_UNIX easier to support. + +8. How do export options that translate RPC user IDs behave for LOCALIO + operations (eg. root_squash, all_squash)? + + Export options that translate user IDs are managed by nfsd_setuser() + which is called by nfsd_setuser_and_check_port() which is called by + __fh_verify(). So they get handled exactly the same way for LOCALIO + as they do for non-LOCALIO. + +9. How does LOCALIO make certain that object lifetimes are managed + properly given NFSD and NFS operate in different contexts? + + See the detailed "NFS Client and Server Interlock" section below. + +RPC +=== + +The LOCALIO auxiliary RPC protocol consists of a single "UUID_IS_LOCAL" +RPC method that allows the Linux NFS client to verify the local Linux +NFS server can see the nonce (single-use UUID) the client generated and +made available in nfs_common. This protocol isn't part of an IETF +standard, nor does it need to be considering it is Linux-to-Linux +auxiliary RPC protocol that amounts to an implementation detail. + +The UUID_IS_LOCAL method encodes the client generated uuid_t in terms of +the fixed UUID_SIZE (16 bytes). The fixed size opaque encode and decode +XDR methods are used instead of the less efficient variable sized +methods. + +The RPC program number for the NFS_LOCALIO_PROGRAM is 400122 (as assigned +by IANA, see https://www.iana.org/assignments/rpc-program-numbers/ ): +Linux Kernel Organization 400122 nfslocalio + +The LOCALIO protocol spec in rpcgen syntax is:: + + /* raw RFC 9562 UUID */ + #define UUID_SIZE 16 + typedef u8 uuid_t; + + program NFS_LOCALIO_PROGRAM { + version LOCALIO_V1 { + void + NULL(void) = 0; + + void + UUID_IS_LOCAL(uuid_t) = 1; + } = 1; + } = 400122; + +LOCALIO uses the same transport connection as NFS traffic. As such, +LOCALIO is not registered with rpcbind. + +NFS Common and Client/Server Handshake +====================================== + +fs/nfs_common/nfslocalio.c provides interfaces that enable an NFS client +to generate a nonce (single-use UUID) and associated short-lived +nfs_uuid_t struct, register it with nfs_common for subsequent lookup and +verification by the NFS server and if matched the NFS server populates +members in the nfs_uuid_t struct. The NFS client then uses nfs_common to +transfer the nfs_uuid_t from its nfs_uuids to the nn->nfsd_serv +clients_list from the nfs_common's uuids_list. See: +fs/nfs/localio.c:nfs_local_probe() + +nfs_common's nfs_uuids list is the basis for LOCALIO enablement, as such +it has members that point to nfsd memory for direct use by the client +(e.g. 'net' is the server's network namespace, through it the client can +access nn->nfsd_serv with proper rcu read access). It is this client +and server synchronization that enables advanced usage and lifetime of +objects to span from the host kernel's nfsd to per-container knfsd +instances that are connected to nfs client's running on the same local +host. + +NFS Client and Server Interlock +=============================== + +LOCALIO provides the nfs_uuid_t object and associated interfaces to +allow proper network namespace (net-ns) and NFSD object refcounting: + + We don't want to keep a long-term counted reference on each NFSD's + net-ns in the client because that prevents a server container from + completely shutting down. + + So we avoid taking a reference at all and rely on the per-cpu + reference to the server (detailed below) being sufficient to keep + the net-ns active. This involves allowing the NFSD's net-ns exit + code to iterate all active clients and clear their ->net pointers + (which are needed to find the per-cpu-refcount for the nfsd_serv). + + Details: + + - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head + that can be used to find the client. It does add the 16-byte + uuid_t to nfs_client so it is bigger than needed (given that + uuid_t is only used during the initial NFS client and server + LOCALIO handshake to determine if they are local to each other). + If that is really a problem we can find a fix. + + - When the nfs server confirms that the uuid_t is local, it moves + the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net. + + - When each server's net-ns is shutting down - in a "pre_exit" + handler, all these nfs_uuid_t have their ->net cleared. There is + an rcu_synchronize() call between pre_exit() handlers and exit() + handlers so any caller that sees nfs_uuid_t ->net as not NULL can + safely manage the per-cpu-refcount for nfsd_serv. + + - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it + can safely dereference ->net in a private rcu_read_lock() section + to allow safe access to the associated nfsd_net and nfsd_serv. + +So LOCALIO required the introduction and use of NFSD's percpu_ref to +interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each +nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and +warrants a more detailed explanation: + + nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its + nfsd_file handle and then the caller (NFS client) must drop the + reference for the nfsd_file and associated nn->nfsd_serv using + nfs_file_put_local() once it has completed its IO. + + This interlock working relies heavily on nfsd_open_local_fh() being + afforded the ability to safely deal with the possibility that the + NFSD's net-ns (and nfsd_net by association) may have been destroyed + by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only + possible given the nfs_uuid_t ->net pointer managemenet detailed + above. + +All told, this elaborate interlock of the NFS client and server has been +verified to fix an easy to hit crash that would occur if an NFSD +instance running in a container, with a LOCALIO client mounted, is +shutdown. Upon restart of the container and associated NFSD the client +would go on to crash due to NULL pointer dereference that occurred due +to the LOCALIO client's attempting to nfsd_open_local_fh(), using +nn->nfsd_serv, without having a proper reference on nn->nfsd_serv. + +NFS Client issues IO instead of Server +====================================== + +Because LOCALIO is focused on protocol bypass to achieve improved IO +performance, alternatives to the traditional NFS wire protocol (SUNRPC +with XDR) must be provided to access the backing filesystem. + +See fs/nfs/localio.c:nfs_local_open_fh() and +fs/nfsd/localio.c:nfsd_open_local_fh() for the interface that makes +focused use of select nfs server objects to allow a client local to a +server to open a file pointer without needing to go over the network. + +The client's fs/nfs/localio.c:nfs_local_open_fh() will call into the +server's fs/nfsd/localio.c:nfsd_open_local_fh() and carefully access +both the associated nfsd network namespace and nn->nfsd_serv in terms of +RCU. If nfsd_open_local_fh() finds that the client no longer sees valid +nfsd objects (be it struct net or nn->nfsd_serv) it returns -ENXIO +to nfs_local_open_fh() and the client will try to reestablish the +LOCALIO resources needed by calling nfs_local_probe() again. This +recovery is needed if/when an nfsd instance running in a container were +to reboot while a LOCALIO client is connected to it. + +Once the client has an open nfsd_file pointer it will issue reads, +writes and commits directly to the underlying local filesystem (normally +done by the nfs server). As such, for these operations, the NFS client +is issuing IO to the underlying local filesystem that it is sharing with +the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and +fs/nfs/localio.c:nfs_local_commit(). + +Security +======== + +Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka +AUTH_SYS) is used. + +Care is taken to ensure the same NFS security mechanisms are used +(authentication, etc) regardless of whether LOCALIO or regular NFS +access is used. The auth_domain established as part of the traditional +NFS client access to the NFS server is also used for LOCALIO. + +Relative to containers, LOCALIO gives the client access to the network +namespace the server has. This is required to allow the client to access +the server's per-namespace nfsd_net struct. With traditional NFS, the +client is afforded this same level of access (albeit in terms of the NFS +protocol via SUNRPC). No other namespaces (user, mount, etc) have been +altered or purposely extended from the server to the client. + +Testing +======= + +The LOCALIO auxiliary protocol and associated NFS LOCALIO read, write +and commit access have proven stable against various test scenarios: + +- Client and server both on the same host. + +- All permutations of client and server support enablement for both + local and remote client and server. + +- Testing against NFS storage products that don't support the LOCALIO + protocol was also performed. + +- Client on host, server within a container (for both v3 and v4.2). + The container testing was in terms of podman managed containers and + includes successful container stop/restart scenario. + +- Formalizing these test scenarios in terms of existing test + infrastructure is on-going. Initial regular coverage is provided in + terms of ktest running xfstests against a LOCALIO-enabled NFS loopback + mount configuration, and includes lockdep and KASAN coverage, see: + https://evilpiepirate.org/~testdashboard/ci?user=snitzer&branch=snitm-nfs-next + https://github.com/koverstreet/ktest + +- Various kdevops testing (in terms of "Chuck's BuildBot") has been + performed to regularly verify the LOCALIO changes haven't caused any + regressions to non-LOCALIO NFS use cases. + +- All of Hammerspace's various sanity tests pass with LOCALIO enabled + (this includes numerous pNFS and flexfiles tests). diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst index 16551440144183..34364471234097 100644 --- a/Documentation/filesystems/overlayfs.rst +++ b/Documentation/filesystems/overlayfs.rst @@ -367,8 +367,11 @@ Metadata only copy up When the "metacopy" feature is enabled, overlayfs will only copy up metadata (as opposed to whole file), when a metadata specific operation -like chown/chmod is performed. Full file will be copied up later when -file is opened for WRITE operation. +like chown/chmod is performed. An upper file in this state is marked with +"trusted.overlayfs.metacopy" xattr which indicates that the upper file +contains no data. The data will be copied up later when file is opened for +WRITE operation. After the lower file's data is copied up, +the "trusted.overlayfs.metacopy" xattr is removed from the upper file. In other words, this is delayed data copy up operation and data is copied up when there is a need to actually modify data. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 6e903a903f8f69..0b18af3f954eb7 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -810,7 +810,7 @@ cache in your filesystem. The following members are defined: struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); + struct folio *folio, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); bool (*release_folio)(struct folio *, gfp_t); @@ -913,8 +913,7 @@ cache in your filesystem. The following members are defined: stop attempting I/O, it can simply return. The caller will remove the remaining pages from the address space, unlock them and decrement the page refcount. Set PageUptodate if the I/O - completes successfully. Setting PageError on any page will be - ignored; simply unlock the page if an I/O error occurs. + completes successfully. ``write_begin`` Called by the generic buffered write code to ask the filesystem @@ -926,12 +925,12 @@ cache in your filesystem. The following members are defined: (if they haven't been read already) so that the updated blocks can be written out properly. - The filesystem must return the locked pagecache page for the - specified offset, in ``*pagep``, for the caller to write into. + The filesystem must return the locked pagecache folio for the + specified offset, in ``*foliop``, for the caller to write into. It must be able to cope with short writes (where the length passed to write_begin is greater than the number of bytes copied - into the page). + into the folio). A void * may be returned in fsdata, which then gets passed into write_end. @@ -944,8 +943,8 @@ cache in your filesystem. The following members are defined: called. len is the original len passed to write_begin, and copied is the amount that was able to be copied. - The filesystem must take care of unlocking the page and - releasing it refcount, and updating i_size. + The filesystem must take care of unlocking the folio, + decrementing its refcount, and updating i_size. Returns < 0 on failure, otherwise the number of bytes (<= 'copied') that were able to be copied into pagecache. diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst index 467e6843aef633..32723a925377ee 100644 --- a/Documentation/gpu/amdgpu/driver-core.rst +++ b/Documentation/gpu/amdgpu/driver-core.rst @@ -179,4 +179,4 @@ IP Blocks :doc: IP Blocks .. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h - :identifiers: amd_ip_block_type amd_ip_funcs + :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index 8435e8621cc087..c3e58856f75b36 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -181,7 +181,7 @@ Bridge Operations Bridge Connector Helper ----------------------- -.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c +.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c :doc: overview @@ -204,7 +204,7 @@ MIPI-DSI bridge operation Bridge Connector Helper Reference --------------------------------- -.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c +.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c :export: Panel-Bridge Helper Reference diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst index b7c0baf97dbee2..3cd0c8860b9494 100644 --- a/Documentation/gpu/introduction.rst +++ b/Documentation/gpu/introduction.rst @@ -154,11 +154,11 @@ Conference talks * `An Overview of the Linux and Userspace Graphics Stack `_ - Paul Kocialkowski (2020) * `Getting pixels on screen on Linux: introduction to Kernel Mode Setting `_ - Simon Ser (2020) -* `Everything Great about Upstream Graphics `_ - Daniel Vetter (2019) +* `Everything Great about Upstream Graphics `_ - Simona Vetter (2019) * `An introduction to the Linux DRM subsystem `_ - Maxime Ripard (2017) -* `Embrace the Atomic (Display) Age `_ - Daniel Vetter (2016) +* `Embrace the Atomic (Display) Age `_ - Simona Vetter (2016) * `Anatomy of an Atomic KMS Driver `_ - Laurent Pinchart (2015) -* `Atomic Modesetting for Drivers `_ - Daniel Vetter (2015) +* `Atomic Modesetting for Drivers `_ - Simona Vetter (2015) * `Anatomy of an Embedded KMS Driver `_ - Laurent Pinchart (2013) Slides and articles @@ -169,8 +169,8 @@ Slides and articles * `Understanding the Linux Graphics Stack `_ - Bootlin (2022) * `DRM KMS overview `_ - STMicroelectronics (2021) * `Linux graphic stack `_ - Nathan Gauër (2017) -* `Atomic mode setting design overview, part 1 `_ - Daniel Vetter (2015) -* `Atomic mode setting design overview, part 2 `_ - Daniel Vetter (2015) +* `Atomic mode setting design overview, part 1 `_ - Simona Vetter (2015) +* `Atomic mode setting design overview, part 2 `_ - Simona Vetter (2015) * `The DRM/KMS subsystem from a newbie’s point of view `_ - Boris Brezillon (2014) * `A brief introduction to the Linux graphics stack `_ - Iago Toral (2014) * `The Linux Graphics Stack `_ - Jasper St. Pierre (2012) diff --git a/Documentation/gpu/komeda-kms.rst b/Documentation/gpu/komeda-kms.rst index 633a016563ae4a..eaea40eb725b75 100644 --- a/Documentation/gpu/komeda-kms.rst +++ b/Documentation/gpu/komeda-kms.rst @@ -86,7 +86,7 @@ types of working mode: - Single display mode Two pipelines work together to drive only one display output. - On this mode, pipeline_B doesn't work indenpendently, but outputs its + On this mode, pipeline_B doesn't work independently, but outputs its composition result into pipeline_A, and its pixel timing also derived from pipeline_A.timing_ctrlr. The pipeline_B works just like a "slave" of pipeline_A(master) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 2ea6ffc9b22bae..2b281e3c75a4b2 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -37,7 +37,7 @@ Audit each individual driver, make sure it'll work with the generic implementation (there's lots of outdated locking leftovers in various implementations), and then remove it. -Contact: Daniel Vetter, respective driver maintainers +Contact: Simona Vetter, respective driver maintainers Level: Intermediate @@ -61,7 +61,7 @@ do by directly using the new atomic helper driver callbacks. .. [2] https://lwn.net/Articles/653071/ .. [3] https://lwn.net/Articles/653466/ -Contact: Daniel Vetter, respective driver maintainers +Contact: Simona Vetter, respective driver maintainers Level: Advanced @@ -75,7 +75,7 @@ helper should also be moved from drm_plane_helper.c to the atomic helpers, to avoid confusion - the other helpers in that file are all deprecated legacy helpers. -Contact: Ville Syrjälä, Daniel Vetter, driver maintainers +Contact: Ville Syrjälä, Simona Vetter, driver maintainers Level: Advanced @@ -97,7 +97,7 @@ with the current helpers: - Then we could go through all the drivers and remove the more-or-less confused checks for plane_state->fb and plane_state->crtc. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Advanced @@ -116,7 +116,7 @@ Somewhat related is the legacy_cursor_update hack, which should be replaced with the new atomic_async_check/commit functionality in the helpers in drivers that still look at that flag. -Contact: Daniel Vetter, respective driver maintainers +Contact: Simona Vetter, respective driver maintainers Level: Advanced @@ -169,7 +169,7 @@ interfaces to fix these issues: ``_helper_funcs`` since they are not part of the core ABI. There's a ``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate @@ -194,7 +194,7 @@ performance-critical drivers it might also be better to go with a more fine-grained per-buffer object and per-context lockings scheme. Currently only the ``msm`` and `i915` drivers use ``struct_mutex``. -Contact: Daniel Vetter, respective driver maintainers +Contact: Simona Vetter, respective driver maintainers Level: Advanced @@ -251,7 +251,7 @@ being rewritten without dependencies on the fbdev module. Some of the helpers could further benefit from using struct iosys_map instead of raw pointers. -Contact: Thomas Zimmermann , Daniel Vetter +Contact: Thomas Zimmermann , Simona Vetter Level: Advanced @@ -297,7 +297,7 @@ Various hold-ups: version of the varios drm_gem_fb_create functions. Maybe called drm_gem_fb_create/_with_dirty/_with_funcs as needed. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate @@ -329,7 +329,7 @@ everything after it has done the write-protect/mkwrite trickery: Might be good to also have some igt testcases for this. -Contact: Daniel Vetter, Noralf Tronnes +Contact: Simona Vetter, Noralf Tronnes Level: Advanced @@ -359,7 +359,7 @@ between setting up the &drm_driver structure and calling drm_dev_register(). - Once all drivers are converted, remove the load/unload callbacks. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate @@ -422,7 +422,7 @@ The task is to use struct iosys_map where it makes sense. * TTM might benefit from using struct iosys_map internally. * Framebuffer copying and blitting helpers should operate on struct iosys_map. -Contact: Thomas Zimmermann , Christian König, Daniel Vetter +Contact: Thomas Zimmermann , Christian König, Simona Vetter Level: Intermediate @@ -475,25 +475,22 @@ Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in drm_panel"), we have a check in the drm_panel core to make sure nobody double-calls prepare/enable/disable/unprepare. Eventually that should probably -be turned into a WARN_ON() or somehow made louder, but right now we actually -expect it to trigger and so we don't want it to be too loud. - -Specifically, that warning will trigger for panel-edp and panel-simple at -shutdown time because those panels hardcode a call to drm_panel_disable() -and drm_panel_unprepare() at shutdown and remove time that they call regardless -of panel state. On systems with a properly coded DRM modeset driver that -calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause -the warning to fire. - -Unfortunately we can't safely remove the calls in panel-edp and panel-simple -until we're sure that all DRM modeset drivers that are used with those panels -properly call drm_atomic_helper_shutdown(). This TODO item is to validate -that all DRM modeset drivers used with panel-edp and panel-simple properly -call drm_atomic_helper_shutdown() and then remove the calls to -disable/unprepare from those panels. Alternatively, this TODO item could be -removed by convincing stakeholders that those calls are fine and downgrading -the error message in drm_panel_disable() / drm_panel_unprepare() to a -debug-level message. +be turned into a WARN_ON() or somehow made louder. + +At the moment, we expect that we may still encounter the warnings in the +drm_panel core when using panel-simple and panel-edp. Since those panel +drivers are used with a lot of different DRM modeset drivers they still +make an extra effort to disable/unprepare the panel themsevles at shutdown +time. Specifically we could still encounter those warnings if the panel +driver gets shutdown() _before_ the DRM modeset driver and the DRM modeset +driver properly calls drm_atomic_helper_shutdown() in its own shutdown() +callback. Warnings could be avoided in such a case by using something like +device links to ensure that the panel gets shutdown() after the DRM modeset +driver. + +Once all DRM modeset drivers are known to shutdown properly, the extra +calls to disable/unprepare in remove/shutdown in panel-simple and panel-edp +should be removed and this TODO item marked complete. Contact: Douglas Anderson @@ -561,7 +558,7 @@ This is a really varied tasks with lots of little bits and pieces: `_ for some example code that could be reused. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Advanced @@ -590,7 +587,7 @@ There's a bunch of issues with it: this (together with the drm_minor->drm_device move) would allow us to remove debugfs_init. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate @@ -611,7 +608,7 @@ Both these problems can be solved by switching over to drmm_kzalloc(), and the various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(), drmm_universal_plane_alloc(), ... and so on. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate @@ -631,7 +628,7 @@ cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over this problem for USB devices by fishing out the USB host controller device, as long as that supports DMA. Otherwise importing can still needlessly fail. -Contact: Thomas Zimmermann , Daniel Vetter +Contact: Thomas Zimmermann , Simona Vetter Level: Advanced @@ -712,7 +709,7 @@ Plan to fix this: 2. In all, only look at one of the three status bits set by the above helpers. 3. Remove the other two status bits. -Contact: Daniel Vetter +Contact: Simona Vetter Level: Intermediate diff --git a/Documentation/gpu/xe/xe_mm.rst b/Documentation/gpu/xe/xe_mm.rst index 6c8fd8b4a4667a..95864a4502dd94 100644 --- a/Documentation/gpu/xe/xe_mm.rst +++ b/Documentation/gpu/xe/xe_mm.rst @@ -7,6 +7,21 @@ Memory Management .. kernel-doc:: drivers/gpu/drm/xe/xe_bo_doc.h :doc: Buffer Objects (BO) +GGTT +==== + +.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c + :doc: Global Graphics Translation Table (GGTT) + +GGTT Internal API +----------------- + +.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt_types.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c + :internal: + Pagetable building ================== diff --git a/Documentation/hid/intel-ish-hid.rst b/Documentation/hid/intel-ish-hid.rst index 55cbaa719a7942..2adc174fb576cd 100644 --- a/Documentation/hid/intel-ish-hid.rst +++ b/Documentation/hid/intel-ish-hid.rst @@ -404,6 +404,35 @@ For more detailed information, please refer to the flow descriptions provided be | ISHTP Driver | | ISH Bootloader | +---------------+ +-----------------+ +Vendor Custom Firmware Loading +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The firmware running inside ISH can be provided by Intel or developed by vendors using the Firmware Development Kit (FDK) provided by Intel. +Intel will upstream the Intel-built firmware to the ``linux-firmware.git`` repository, located under the path ``intel/ish/``. For the Lunar Lake platform, the Intel-built ISH firmware will be named ``ish_lnlm.bin``. +Vendors who wish to upstream their custom firmware should follow these guidelines for naming their firmware files: + +- The firmware filename should use one of the following patterns: + + - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}_${PRODUCT_SKU_CRC32}.bin`` + - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin`` + - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}.bin`` + - ``ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}.bin`` + +- ``${intel_plat_gen}`` indicates the Intel platform generation (e.g., ``lnlm`` for Lunar Lake) and must not exceed 8 characters in length. +- ``${SYS_VENDOR_CRC32}`` is the CRC32 checksum of the ``sys_vendor`` value from the DMI field ``DMI_SYS_VENDOR``. +- ``${PRODUCT_NAME_CRC32}`` is the CRC32 checksum of the ``product_name`` value from the DMI field ``DMI_PRODUCT_NAME``. +- ``${PRODUCT_SKU_CRC32}`` is the CRC32 checksum of the ``product_sku`` value from the DMI field ``DMI_PRODUCT_SKU``. + +During system boot, the ISH Linux driver will attempt to load the firmware in the following order, prioritizing custom firmware with more precise matching patterns: + +1. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}_${PRODUCT_SKU_CRC32}.bin`` +2. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin`` +3. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}_${PRODUCT_NAME_CRC32}.bin`` +4. ``intel/ish/ish_${intel_plat_gen}_${SYS_VENDOR_CRC32}.bin`` +5. ``intel/ish/ish_${intel_plat_gen}.bin`` + +The driver will load the first matching firmware and skip the rest. If no matching firmware is found, it will proceed to the next pattern in the specified order. If all searches fail, the default Intel firmware, listed last in the order above, will be loaded. + ISH Debugging ------------- diff --git a/Documentation/hwmon/hwmon-kernel-api.rst b/Documentation/hwmon/hwmon-kernel-api.rst index 6cacf7daf25c04..8297acfa3a2d82 100644 --- a/Documentation/hwmon/hwmon-kernel-api.rst +++ b/Documentation/hwmon/hwmon-kernel-api.rst @@ -38,8 +38,6 @@ register/unregister functions:: void hwmon_device_unregister(struct device *dev); - void devm_hwmon_device_unregister(struct device *dev); - char *hwmon_sanitize_name(const char *name); char *devm_hwmon_sanitize_name(struct device *dev, const char *name); @@ -64,11 +62,6 @@ monitoring device structure. This function must be called from the driver remove function if the hardware monitoring device was registered with hwmon_device_register_with_info. -devm_hwmon_device_unregister does not normally have to be called. It is only -needed for error handling, and only needed if the driver probe fails after -the call to devm_hwmon_device_register_with_info and if the automatic (device -managed) removal would be too late. - All supported hwmon device registration functions only accept valid device names. Device names including invalid characters (whitespace, '*', or '-') will be rejected. The 'name' parameter is mandatory. diff --git a/Documentation/hwmon/ina2xx.rst b/Documentation/hwmon/ina2xx.rst index 27d2e39bc8ac71..7f1939b40f74fe 100644 --- a/Documentation/hwmon/ina2xx.rst +++ b/Documentation/hwmon/ina2xx.rst @@ -99,6 +99,10 @@ Sysfs entries for ina226, ina230 and ina231 only ------------------------------------------------ ======================= ==================================================== +curr1_lcrit Critical low current +curr1_crit Critical high current +curr1_lcrit_alarm Current critical low alarm +curr1_crit_alarm Current critical high alarm in0_lcrit Critical low shunt voltage in0_crit Critical high shunt voltage in0_lcrit_alarm Shunt voltage critical low alarm diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst index 913c11390a457c..ea3b5be8fe4f7a 100644 --- a/Documentation/hwmon/index.rst +++ b/Documentation/hwmon/index.rst @@ -206,6 +206,7 @@ Hardware Monitoring Kernel Drivers sch5636 scpi-hwmon sfctemp + sg2042-mcu sht15 sht21 sht3x diff --git a/Documentation/hwmon/lm92.rst b/Documentation/hwmon/lm92.rst index c131b923ed36e2..d71cdb2af339fd 100644 --- a/Documentation/hwmon/lm92.rst +++ b/Documentation/hwmon/lm92.rst @@ -3,29 +3,29 @@ Kernel driver lm92 Supported chips: - * National Semiconductor LM92 + * National Semiconductor / Texas Instruments LM92 Prefix: 'lm92' Addresses scanned: I2C 0x48 - 0x4b - Datasheet: http://www.national.com/pf/LM/LM92.html + Datasheet: https://www.ti.com/lit/gpn/LM92 - * National Semiconductor LM76 + * National Semiconductor / Texas Instruments LM76 Prefix: 'lm92' - Addresses scanned: none, force parameter needed + Addresses scanned: none, must be instantiated explicitly - Datasheet: http://www.national.com/pf/LM/LM76.html + Datasheet: https://www.ti.com/lit/gpn/LM76 - * Maxim MAX6633/MAX6634/MAX6635 + * Maxim /Analog Devices MAX6633/MAX6634/MAX6635 Prefix: 'max6635' - Addresses scanned: none, force parameter needed + Addresses scanned: none, must be instantiated explicitly - Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074 + Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/max6633-max6635.pdf Authors: @@ -36,13 +36,13 @@ Authors: Description ----------- -This driver implements support for the National Semiconductor LM92 -temperature sensor. +This driver implements support for the National Semiconductor / Texas +Instruments LM92 temperature sensor. Each LM92 temperature sensor supports a single temperature sensor. There are alarms for high, low, and critical thresholds. There's also an hysteresis to control the thresholds for resetting alarms. -Support was added later for the LM76 and Maxim MAX6633/MAX6634/MAX6635, -which are mostly compatible. They have not all been tested, so you -may need to use the force parameter. +The driver also supports LM76 and Maxim MAX6633/MAX6634/MAX6635, which are +mostly compatible but do not have a vendor ID register and therefore must be +instantiated explicitly. diff --git a/Documentation/hwmon/max1619.rst b/Documentation/hwmon/max1619.rst index e25956e70f7322..b5fc175ae18d41 100644 --- a/Documentation/hwmon/max1619.rst +++ b/Documentation/hwmon/max1619.rst @@ -27,7 +27,3 @@ All temperature values are given in degrees Celsius. Resolution is 1.0 degree for the local temperature and for the remote temperature. Only the external sensor has high and low limits. - -The max1619 driver will not update its values more frequently than every -other second; reading them more often will do no harm, but will return -'old' values. diff --git a/Documentation/hwmon/oxp-sensors.rst b/Documentation/hwmon/oxp-sensors.rst index 55b1ef61625ebc..581c4dafbfa13e 100644 --- a/Documentation/hwmon/oxp-sensors.rst +++ b/Documentation/hwmon/oxp-sensors.rst @@ -10,41 +10,59 @@ Authors: Description: ------------ -Handheld devices from One Netbook and Aya Neo provide fan readings and fan -control through their embedded controllers. +Handheld devices from OneNetbook, AOKZOE, AYANEO, And OrangePi provide fan +readings and fan control through their embedded controllers. -Currently only supports AMD boards from One X Player, AOK ZOE, and some Aya -Neo devices. One X Player Intel boards could be supported if we could figure -out the EC registers and values to write to since the EC layout and model is -different. Aya Neo devices preceding the AIR may not be supportable as the EC -model is different and do not appear to have manual control capabilities. +Currently supports OneXPlayer devices, AOKZOE, AYANEO, and OrangePi +handheld devices. AYANEO devices preceding the AIR and OneXPlayer devices +preceding the Mini A07 are not supportable as the EC model is different +and do not have manual control capabilities. -Some models have a toggle for changing the behaviour of the "Turbo/Silent" -button of the device. It will change the key event that it triggers with -a flip of the `tt_toggle` attribute. See below for boards that support this -function. +Some OneXPlayer and AOKZOE models have a toggle for changing the behaviour +of the "Turbo/Silent" button of the device. It will change the key event +that it triggers with a flip of the `tt_toggle` attribute. See below for +boards that support this function. Supported devices ----------------- Currently the driver supports the following handhelds: - - AOK ZOE A1 - - AOK ZOE A1 PRO - - Aya Neo 2 - - Aya Neo AIR - - Aya Neo AIR Plus (Mendocino) - - Aya Neo AIR Pro - - Aya Neo Geek + - AOKZOE A1 + - AOKZOE A1 PRO + - AYANEO 2 + - AYANEO 2S + - AYANEO AIR + - AYANEO AIR 1S + - AYANEO AIR Plus (Mendocino) + - AYANEO AIR Pro + - AYANEO Flip DS + - AYANEO Flip KB + - AYANEO Geek + - AYANEO Geek 1S + - AYANEO KUN + - OneXPlayer 2 + - OneXPlayer 2 Pro - OneXPlayer AMD - OneXPlayer mini AMD - OneXPlayer mini AMD PRO + - OneXPlayer OneXFly + - OneXPlayer X1 A + - OneXPlayer X1 i + - OneXPlayer X1 mini + - OrangePi NEO-01 "Turbo/Silent" button behaviour toggle is only supported on: - AOK ZOE A1 - AOK ZOE A1 PRO + - OneXPlayer 2 + - OneXPlayer 2 Pro - OneXPlayer mini AMD (only with updated alpha BIOS) - OneXPlayer mini AMD PRO + - OneXPlayer OneXFly + - OneXPlayer X1 A + - OneXPlayer X1 i + - OneXPlayer X1 mini Sysfs entries ------------- @@ -52,7 +70,7 @@ Sysfs entries The following attributes are supported: fan1_input - Read Only. Reads current fan RMP. + Read Only. Reads current fan RPM. pwm1_enable Read Write. Enable manual fan control. Write "1" to set to manual, write "0" diff --git a/Documentation/hwmon/sg2042-mcu.rst b/Documentation/hwmon/sg2042-mcu.rst new file mode 100644 index 00000000000000..077e79841d2e90 --- /dev/null +++ b/Documentation/hwmon/sg2042-mcu.rst @@ -0,0 +1,78 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Kernel driver sg2042-mcu +======================== + +Supported chips: + + * Onboard MCU for sg2042 + + Addresses scanned: - + + Prefix: 'sg2042-mcu' + +Authors: + + - Inochi Amaoto + +Description +----------- + +This driver supprts hardware monitoring for onboard MCU with +i2c interface. + +Usage Notes +----------- + +This driver does not auto-detect devices. You will have to instantiate +the devices explicitly. +Please see Documentation/i2c/instantiating-devices.rst for details. + +Sysfs Attributes +---------------- + +The following table shows the standard entries support by the driver: + +================= ===================================================== +Name Description +================= ===================================================== +temp1_input Measured temperature of SoC +temp1_crit Critical high temperature +temp1_crit_hyst hysteresis temperature restore from Critical +temp2_input Measured temperature of the base board +================= ===================================================== + +The following table shows the extra entries support by the driver +(the MCU device is in i2c subsystem): + +================= ======= ============================================= +Name Perm Description +================= ======= ============================================= +reset_count RO Reset count of the SoC +uptime RO Seconds after the MCU is powered +reset_reason RO Reset reason for the last reset +repower_policy RW Execution policy when triggering repower +================= ======= ============================================= + +``repower_policy`` + The repower is triggered when the temperature of the SoC falls below + the hysteresis temperature after triggering a shutdown due to + reaching the critical temperature. + The valid values for this entry are "repower" and "keep". "keep" will + leave the SoC down when the triggering repower, and "repower" will + boot the SoC. + +Debugfs Interfaces +------------------ + +If debugfs is available, this driver exposes some hardware specific +data in ``/sys/kernel/debug/sg2042-mcu/*/``. + +================= ======= ============================================= +Name Format Description +================= ======= ============================================= +firmware_version 0x%02x firmware version of the MCU +pcb_version 0x%02x version number of the base board +board_type 0x%02x identifiers for the base board +mcu_type %d type of the MCU: 0 is STM32, 1 is GD32 +================= ======= ============================================= diff --git a/Documentation/i2c/slave-testunit-backend.rst b/Documentation/i2c/slave-testunit-backend.rst index 37142a48ab35cf..d752f433be0779 100644 --- a/Documentation/i2c/slave-testunit-backend.rst +++ b/Documentation/i2c/slave-testunit-backend.rst @@ -20,11 +20,25 @@ Instantiating the device is regular. Example for bus 0, address 0x30:: # echo "slave-testunit 0x1030" > /sys/bus/i2c/devices/i2c-0/new_device -After that, you will have a write-only device listening. Reads will just return -an 8-bit version number of the testunit. When writing, the device consists of 4 -8-bit registers and, except for some "partial" commands, all registers must be -written to start a testcase, i.e. you usually write 4 bytes to the device. The -registers are: +Or using firmware nodes. Here is a devicetree example (note this is only a +debug device, so there are no official DT bindings):: + + &i2c0 { + ... + + testunit@30 { + compatible = "slave-testunit"; + reg = <(0x30 | I2C_OWN_SLAVE_ADDRESS)>; + }; + }; + +After that, you will have the device listening. Reading will return a single +byte. Its value is 0 if the testunit is idle, otherwise the command number of +the currently running command. + +When writing, the device consists of 4 8-bit registers and, except for some +"partial" commands, all registers must be written to start a testcase, i.e. you +usually write 4 bytes to the device. The registers are: .. csv-table:: :header: "Offset", "Name", "Description" @@ -75,7 +89,7 @@ from another device on the bus. If the bus master under test also wants to access the bus at the same time, the bus will be busy. Example to read 128 bytes from device 0x50 after 50ms of delay:: - # i2cset -y 0 0x30 0x01 0x50 0x80 0x05 i + # i2cset -y 0 0x30 1 0x50 0x80 5 i 0x02 SMBUS_HOST_NOTIFY ~~~~~~~~~~~~~~~~~~~~~~ @@ -95,9 +109,9 @@ bytes from device 0x50 after 50ms of delay:: Also needs master mode. This test will send an SMBUS_HOST_NOTIFY message to the host. Note that the status word is currently ignored in the Linux Kernel. -Example to send a notification after 10ms:: +Example to send a notification with status word 0x6442 after 10ms:: - # i2cset -y 0 0x30 0x02 0x42 0x64 0x01 i + # i2cset -y 0 0x30 2 0x42 0x64 1 i If the host controller supports HostNotify, this message with debug level should appear (Linux 6.11 and later):: @@ -116,7 +130,7 @@ should appear (Linux 6.11 and later):: - DELAY * - 0x03 - - must be '1', i.e. one further byte will be written + - 0x01 (i.e. one further byte will be written) - number of bytes to be sent back - leave out, partial command! @@ -131,5 +145,91 @@ from length-1 to 0. Here is an example which emulates i2c_smbus_block_process_call() using i2ctransfer (you need i2c-tools v4.2 or later):: - # i2ctransfer -y 0 w3@0x30 0x03 0x01 0x10 r? + # i2ctransfer -y 0 w3@0x30 3 1 0x10 r? 0x10 0x0f 0x0e 0x0d 0x0c 0x0b 0x0a 0x09 0x08 0x07 0x06 0x05 0x04 0x03 0x02 0x01 0x00 + +0x04 GET_VERSION_WITH_REP_START +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. list-table:: + :header-rows: 1 + + * - CMD + - DATAL + - DATAH + - DELAY + + * - 0x04 + - currently unused + - currently unused + - leave out, partial command! + +Partial command. After sending this command, the testunit will reply to a read +message with a NUL terminated version string based on UTS_RELEASE. The first +character is always a 'v' and the length of the version string is at maximum +128 bytes. However, it will only respond if the read message is connected to +the write message via repeated start. If your controller driver handles +repeated start correctly, this will work:: + + # i2ctransfer -y 0 w3@0x30 4 0 0 r128 + 0x76 0x36 0x2e 0x31 0x31 0x2e 0x30 0x2d 0x72 0x63 0x31 0x2d 0x30 0x30 0x30 0x30 ... + +If you have i2c-tools 4.4 or later, you can print out the data right away:: + + # i2ctransfer -y -b 0 w3@0x30 4 0 0 r128 + v6.11.0-rc1-00009-gd37a1b4d3fd0 + +STOP/START combinations between the two messages will *not* work because they +are not equivalent to a REPEATED START. As an example, this returns just the +default response:: + + # i2cset -y 0 0x30 4 0 0 i; i2cget -y 0 0x30 + 0x00 + +0x05 SMBUS_ALERT_REQUEST +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. list-table:: + :header-rows: 1 + + * - CMD + - DATAL + - DATAH + - DELAY + + * - 0x05 + - response value (7 MSBs interpreted as I2C address) + - currently unused + - n * 10ms + +This test raises an interrupt via the SMBAlert pin which the host controller +must handle. The pin must be connected to the testunit as a GPIO. GPIO access +is not allowed to sleep. Currently, this can only be described using firmware +nodes. So, for devicetree, you would add something like this to the testunit +node:: + + gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; + +The following command will trigger the alert with a response of 0xc9 after 1 +second of delay:: + + # i2cset -y 0 0x30 5 0xc9 0x00 100 i + +If the host controller supports SMBusAlert, this message with debug level +should appear:: + + smbus_alert 0-000c: SMBALERT# from dev 0x64, flag 1 + +This message may appear more than once because the testunit is software not +hardware and, thus, may not be able to react to the response of the host fast +enough. The interrupt count should increase only by one, though:: + + # cat /proc/interrupts | grep smbus_alert + 93: 1 gpio-rcar 26 Edge smbus_alert + +If the host does not respond to the alert within 1 second, the test will be +aborted and the testunit will report an error. + +For this test, the testunit will shortly drop its assigned address and listen +on the SMBus Alert Response Address (0x0c). It will reassign its original +address afterwards. diff --git a/Documentation/iio/ad4000.rst b/Documentation/iio/ad4000.rst new file mode 100644 index 00000000000000..de8fd3ae6e6205 --- /dev/null +++ b/Documentation/iio/ad4000.rst @@ -0,0 +1,131 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +============= +AD4000 driver +============= + +Device driver for Analog Devices Inc. AD4000 series of ADCs. + +Supported devices +================= + +* `AD4000 `_ +* `AD4001 `_ +* `AD4002 `_ +* `AD4003 `_ +* `AD4004 `_ +* `AD4005 `_ +* `AD4006 `_ +* `AD4007 `_ +* `AD4008 `_ +* `AD4010 `_ +* `AD4011 `_ +* `AD4020 `_ +* `AD4021 `_ +* `AD4022 `_ +* `ADAQ4001 `_ +* `ADAQ4003 `_ + +Wiring connections +------------------ + +Devices of the AD4000 series can be connected to the SPI host controller in a +few different modes. + +CS mode, 3-wire turbo mode +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Datasheet "3-wire" mode is what most resembles standard SPI connection which, +for these devices, comprises of connecting the controller CS line to device CNV +pin and other SPI lines as usual. This configuration is (misleadingly) called +"CS Mode, 3-Wire Turbo Mode" connection in datasheets. +NOTE: The datasheet definition of 3-wire mode for the AD4000 series is NOT the +same of standard spi-3wire mode. +This is the only connection mode that allows configuration register access but +it requires the SPI controller to support the ``SPI_MOSI_IDLE_HIGH`` feature. + +Omit the ``adi,sdi-pin`` property in device tree to select this mode. + +:: + + +-------------+ + + ----------------------------------| SDO | + | | | + | +-------------------| CS | + | v | | + | +--------------------+ | HOST | + | | CNV | | | + +--->| SDI AD4000 SDO |-------->| SDI | + | SCK | | | + +--------------------+ | | + ^ | | + +--------------------| SCLK | + +-------------+ + +CS mode, 3-wire, without busy indicator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Another wiring configuration supported as "3-wire" mode has the SDI pin +hard-wired to digital input/output interface supply (VIO). In this setup, the +controller is not required to support ``SPI_MOSI_IDLE_HIGH`` but register access +is not possible. This connection mode saves one wire and works with any SPI +controller. + +Set the ``adi,sdi-pin`` device tree property to ``"high"`` to select this mode. + +:: + + +-------------+ + +--------------------| CS | + v | | + VIO +--------------------+ | HOST | + | | CNV | | | + +--->| SDI AD4000 SDO |-------->| SDI | + | SCK | | | + +--------------------+ | | + ^ | | + +--------------------| SCLK | + +-------------+ + +Alternatively, a GPIO may be connected to the device CNV pin. This is similar to +the previous wiring configuration but saves the use of a CS line. + +:: + + +-------------+ + +--------------------| GPIO | + v | | + VIO +--------------------+ | HOST | + | | CNV | | | + +--->| SDI AD4000 SDO |-------->| SDI | + | SCK | | | + +--------------------+ | | + ^ | | + +--------------------| SCLK | + +-------------+ + +CS mode, 4-wire without busy indicator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In datasheet "4-wire" mode, the controller CS line is connected to the ADC SDI +pin and a GPIO is connected to the ADC CNV pin. This connection mode may better +suit scenarios where multiple ADCs can share one CNV trigger. + +Set ``adi,sdi-pin`` to ``"cs"`` to select this mode. + + +:: + + +-------------+ + + ----------------------------------| CS | + | | | + | +-------------------| GPIO | + | v | | + | +--------------------+ | HOST | + | | CNV | | | + +--->| SDI AD4000 SDO |-------->| SDI | + | SCK | | | + +--------------------+ | | + ^ | | + +--------------------| SCLK | + +-------------+ diff --git a/Documentation/iio/ad4695.rst b/Documentation/iio/ad4695.rst new file mode 100644 index 00000000000000..33ed29b7c98a63 --- /dev/null +++ b/Documentation/iio/ad4695.rst @@ -0,0 +1,167 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +============= +AD4695 driver +============= + +ADC driver for Analog Devices Inc. AD4695 and similar devices. The module name +is ``ad4695``. + + +Supported devices +================= + +The following chips are supported by this driver: + +* `AD4695 `_ +* `AD4696 `_ +* `AD4697 `_ +* `AD4698 `_ + + +Supported features +================== + +SPI wiring modes +---------------- + +The driver currently supports the following SPI wiring configuration: + +4-wire mode +^^^^^^^^^^^ + +In this mode, CNV and CS are tied together and there is a single SDO line. + +.. code-block:: + + +-------------+ +-------------+ + | CS |<-+------| CS | + | CNV |<-+ | | + | ADC | | HOST | + | | | | + | SDI |<--------| SDO | + | SDO |-------->| SDI | + | SCLK |<--------| SCLK | + +-------------+ +-------------+ + +To use this mode, in the device tree, omit the ``cnv-gpios`` and +``spi-rx-bus-width`` properties. + +Channel configuration +--------------------- + +Since the chip supports multiple ways to configure each channel, this must be +described in the device tree based on what is actually wired up to the inputs. + +There are three typical configurations: + +An ``INx`` pin is used as the positive input with the ``REFGND``, ``COM`` or +the next ``INx`` pin as the negative input. + +Pairing with REFGND +^^^^^^^^^^^^^^^^^^^ + +Each ``INx`` pin can be used as a pseudo-differential input in conjunction with +the ``REFGND`` pin. The device tree will look like this: + +.. code-block:: + + channel@0 { + reg = <0>; /* IN0 */ + }; + +If no other channel properties are needed (e.g. ``adi,no-high-z``), the channel +node can be omitted entirely. + +This will appear on the IIO bus as the ``voltage0`` channel. The processed value +(*raw × scale*) will be the voltage present on the ``IN0`` pin relative to +``REFGND``. (Offset is always 0 when pairing with ``REFGND``.) + +Pairing with COM +^^^^^^^^^^^^^^^^ + +Each ``INx`` pin can be used as a pseudo-differential input in conjunction with +the ``COM`` pin. The device tree will look like this: + +.. code-block:: + + com-supply = <&vref_div_2>; + + channel@1 { + reg = <1>; /* IN1 */ + common-mode-channel = ; + bipolar; + }; + +This will appear on the IIO bus as the ``voltage1`` channel. The processed value +(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin +relative to ``REFGND``. (The offset is determined by the ``com-supply`` voltage.) + +The macro comes from: + +.. code-block:: + + #include + +Pairing two INx pins +^^^^^^^^^^^^^^^^^^^^ + +An even-numbered ``INx`` pin and the following odd-numbered ``INx`` pin can be +used as a pseudo-differential input. The device tree for using ``IN2`` as the +positive input and ``IN3`` as the negative input will look like this: + +.. code-block:: + + in3-supply = <&vref_div_2>; + + channel@2 { + reg = <2>; /* IN2 */ + common-mode-channel = <3>; /* IN3 */ + bipolar; + }; + +This will appear on the IIO bus as the ``voltage2`` channel. The processed value +(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin +relative to ``REFGND``. (Offset is determined by the ``in3-supply`` voltage.) + +VCC supply +---------- + +The chip supports being powered by an external LDO via the ``VCC`` input or an +internal LDO via the ``LDO_IN`` input. The driver looks at the device tree to +determine which is being used. If ``ldo-supply`` is present, then the internal +LDO is used. If ``vcc-supply`` is present, then the external LDO is used and +the internal LDO is disabled. + +Reference voltage +----------------- + +The chip supports an external reference voltage via the ``REF`` input or an +internal buffered reference voltage via the ``REFIN`` input. The driver looks +at the device tree to determine which is being used. If ``ref-supply`` is +present, then the external reference voltage is used and the internal buffer is +disabled. If ``refin-supply`` is present, then the internal buffered reference +voltage is used. + +Gain/offset calibration +----------------------- + +System calibration is supported using the channel gain and offset registers via +the ``calibscale`` and ``calibbias`` attributes respectively. + +Unimplemented features +---------------------- + +- Additional wiring modes +- Threshold events +- Oversampling +- GPIO support +- CRC support + +Device buffers +============== + +This driver supports hardware triggered buffers. This uses the "advanced +sequencer" feature of the chip to trigger a burst of conversions. + +Also see :doc:`iio_devbuf` for more general information. diff --git a/Documentation/iio/ad7380.rst b/Documentation/iio/ad7380.rst new file mode 100644 index 00000000000000..9c784c1e652e9a --- /dev/null +++ b/Documentation/iio/ad7380.rst @@ -0,0 +1,130 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +============= +AD7380 driver +============= + +ADC driver for Analog Devices Inc. AD7380 and similar devices. The module name +is ``ad7380``. + + +Supported devices +================= + +The following chips are supported by this driver: + +* `AD7380 `_ +* `AD7381 `_ +* `AD7383 `_ +* `AD7384 `_ +* `AD7386 `_ +* `AD7387 `_ +* `AD7388 `_ +* `AD7380-4 `_ +* `AD7381-4 `_ +* `AD7383-4 `_ +* `AD7384-4 `_ +* `AD7386-4 `_ +* `AD7387-4 `_ +* `AD7388-4 `_ + + +Supported features +================== + +SPI wiring modes +---------------- + +ad738x ADCs can output data on several SDO lines (1/2/4). The driver currently +supports only 1 SDO line. + +Reference voltage +----------------- + +2 possible reference voltage sources are supported: + +- Internal reference (2.5V) +- External reference (2.5V to 3.3V) + +The source is determined by the device tree. If ``refio-supply`` is present, +then the external reference is used, else the internal reference is used. + +Oversampling and resolution boost +--------------------------------- + +This family supports 2 types of oversampling: normal average and rolling +average. Only normal average is supported by the driver, as rolling average can +be achieved by processing a captured data buffer. The following ratios are +available: 1 (oversampling disabled)/2/4/8/16/32. + +When the on-chip oversampling function is enabled the performance of the ADC can +exceed the default resolution. To accommodate the performance boost achievable, +it is possible to enable an additional two bits of resolution. Because the +resolution boost feature can only be enabled when oversampling is enabled and +oversampling is not as useful without the resolution boost, the driver +automatically enables the resolution boost if and only if oversampling is +enabled. + +Since the resolution boost feature causes 16-bit chips to now have 18-bit data +which means the storagebits has to change from 16 to 32 bits, we use the new +ext_scan_type feature to allow changing the scan_type at runtime. Unfortunately +libiio does not support it. So when enabling or disabling oversampling, user +must restart iiod using the following command: + +.. code-block:: bash + + root:~# systemctl restart iiod + +Channel selection and sequencer (single-end chips only) +------------------------------------------------------- + +Single-ended chips of this family (ad7386/7/8(-4)) have a 2:1 multiplexer in +front of each ADC. They also include additional configuration registers that +allow for either manual selection or automatic switching (sequencer mode), of +the multiplexer inputs. + +From an IIO point of view, all inputs are exported, i.e ad7386/7/8 +export 4 channels and ad7386-4/7-4/8-4 export 8 channels. + +Inputs ``AinX0`` of multiplexers correspond to the first half of IIO channels (i.e +0-1 or 0-3) and inputs ``AinX1`` correspond to second half (i.e 2-3 or 4-7). +Example for AD7386/7/8 (2 channels parts): + +.. code-block:: + + IIO | AD7386/7/8 + | +---------------------------- + | | _____ ______ + | | | | | | + voltage0 | AinA0 --|--->| | | | + | | | mux |----->| ADCA |--- + voltage2 | AinA1 --|--->| | | | + | | |_____| |_____ | + | | _____ ______ + | | | | | | + voltage1 | AinB0 --|--->| | | | + | | | mux |----->| ADCB |--- + voltage3 | AinB1 --|--->| | | | + | | |_____| |______| + | | + | +---------------------------- + + +When enabling sequencer mode, the effective sampling rate is divided by two. + +Unimplemented features +---------------------- + +- 2/4 SDO lines +- Rolling average oversampling +- Power down mode +- CRC indication +- Alert + + +Device buffers +============== + +This driver supports IIO triggered buffers. + +See :doc:`iio_devbuf` for more information. diff --git a/Documentation/iio/adxl380.rst b/Documentation/iio/adxl380.rst new file mode 100644 index 00000000000000..376dee5fe1dda4 --- /dev/null +++ b/Documentation/iio/adxl380.rst @@ -0,0 +1,233 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +ADXL380 driver +=============== + +This driver supports Analog Device's ADXL380/382 on SPI/I2C bus. + +1. Supported devices +==================== + +* `ADXL380 `_ +* `ADXL382 `_ + +The ADXL380/ADXL382 is a low noise density, low power, 3-axis accelerometer with +selectable measurement ranges. The ADXL380 supports the ±4 g, ±8 g, and ±16 g +ranges, and the ADXL382 supports ±15 g, ±30 g, and ±60 g ranges. + +2. Device attributes +==================== + +Accelerometer measurements are always provided. + +Temperature data are also provided. This data can be used to monitor the +internal system temperature or to improve the temperature stability of the +device via calibration. + +Each IIO device, has a device folder under ``/sys/bus/iio/devices/iio:deviceX``, +where X is the IIO index of the device. Under these folders reside a set of +device files, depending on the characteristics and features of the hardware +device in questions. These files are consistently generalized and documented in +the IIO ABI documentation. + +The following tables show the adxl380 related device files, found in the +specific device folder path ``/sys/bus/iio/devices/iio:deviceX``. + ++---------------------------------------------------+----------------------------------------------------------+ +| 3-Axis Accelerometer related device files | Description | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_scale | Scale for the accelerometer channels. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_filter_high_pass_3db_frequency | Low pass filter bandwidth. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_filter_high_pass_3db_frequency_available | Available low pass filter bandwidth configurations. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_filter_low_pass_3db_frequency | High pass filter bandwidth. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_filter_low_pass_3db_frequency_available | Available high pass filter bandwidth configurations. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_x_raw | Raw X-axis accelerometer channel value. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_y_calibbias | y-axis acceleration offset correction | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_y_raw | Raw Y-axis accelerometer channel value. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_z_calibbias | Calibration offset for the Z-axis accelerometer channel. | ++---------------------------------------------------+----------------------------------------------------------+ +| in_accel_z_raw | Raw Z-axis accelerometer channel value. | ++---------------------------------------------------+----------------------------------------------------------+ + ++----------------------------------+--------------------------------------------+ +| Temperature sensor related files | Description | ++----------------------------------+--------------------------------------------+ +| in_temp_raw | Raw temperature channel value. | ++----------------------------------+--------------------------------------------+ +| in_temp_offset | Offset for the temperature sensor channel. | ++----------------------------------+--------------------------------------------+ +| in_temp_scale | Scale for the temperature sensor channel. | ++----------------------------------+--------------------------------------------+ + ++------------------------------+----------------------------------------------+ +| Miscellaneous device files | Description | ++------------------------------+----------------------------------------------+ +| name | Name of the IIO device. | ++------------------------------+----------------------------------------------+ +| sampling_frequency | Currently selected sample rate. | ++------------------------------+----------------------------------------------+ +| sampling_frequency_available | Available sampling frequency configurations. | ++------------------------------+----------------------------------------------+ + +Channels processed values +------------------------- + +A channel value can be read from its _raw attribute. The value returned is the +raw value as reported by the devices. To get the processed value of the channel, +apply the following formula: + +.. code-block:: bash + + processed value = (_raw + _offset) * _scale + +Where _offset and _scale are device attributes. If no _offset attribute is +present, simply assume its value is 0. + +The adis16475 driver offers data for 2 types of channels, the table below shows +the measurement units for the processed value, which are defined by the IIO +framework: + ++-------------------------------------+---------------------------+ +| Channel type | Measurement unit | ++-------------------------------------+---------------------------+ +| Acceleration on X, Y, and Z axis | Meters per Second squared | ++-------------------------------------+---------------------------+ +| Temperature | Millidegrees Celsius | ++-------------------------------------+---------------------------+ + +Usage examples +-------------- + +Show device name: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> cat name + adxl382 + +Show accelerometer channels value: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_raw + -1771 + root:/sys/bus/iio/devices/iio:device0> cat in_accel_y_raw + 282 + root:/sys/bus/iio/devices/iio:device0> cat in_accel_z_raw + -1523 + root:/sys/bus/iio/devices/iio:device0> cat in_accel_scale + 0.004903325 + +- X-axis acceleration = in_accel_x_raw * in_accel_scale = −8.683788575 m/s^2 +- Y-axis acceleration = in_accel_y_raw * in_accel_scale = 1.38273765 m/s^2 +- Z-axis acceleration = in_accel_z_raw * in_accel_scale = -7.467763975 m/s^2 + +Set calibration offset for accelerometer channels: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias + 0 + + root:/sys/bus/iio/devices/iio:device0> echo 50 > in_accel_x_calibbias + root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias + 50 + +Set sampling frequency: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency + 16000 + root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency_available + 16000 32000 64000 + + root:/sys/bus/iio/devices/iio:device0> echo 32000 > sampling_frequency + root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency + 32000 + +Set low pass filter bandwidth for accelerometer channels: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency + 32000 + root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency_available + 32000 8000 4000 2000 + + root:/sys/bus/iio/devices/iio:device0> echo 2000 > in_accel_filter_low_pass_3db_frequency + root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency + 2000 + +3. Device buffers +================= + +This driver supports IIO buffers. + +All devices support retrieving the raw acceleration and temperature measurements +using buffers. + +Usage examples +-------------- + +Select channels for buffer read: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_x_en + root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_y_en + root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_z_en + root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_temp_en + +Set the number of samples to be stored in the buffer: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> echo 10 > buffer/length + +Enable buffer readings: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> echo 1 > buffer/enable + +Obtain buffered data: + +.. code-block:: bash + + root:/sys/bus/iio/devices/iio:device0> hexdump -C /dev/iio\:device0 + ... + 002bc300 f7 e7 00 a8 fb c5 24 80 f7 e7 01 04 fb d6 24 80 |......$.......$.| + 002bc310 f7 f9 00 ab fb dc 24 80 f7 c3 00 b8 fb e2 24 80 |......$.......$.| + 002bc320 f7 fb 00 bb fb d1 24 80 f7 b1 00 5f fb d1 24 80 |......$...._..$.| + 002bc330 f7 c4 00 c6 fb a6 24 80 f7 a6 00 68 fb f1 24 80 |......$....h..$.| + 002bc340 f7 b8 00 a3 fb e7 24 80 f7 9a 00 b1 fb af 24 80 |......$.......$.| + 002bc350 f7 b1 00 67 fb ee 24 80 f7 96 00 be fb 92 24 80 |...g..$.......$.| + 002bc360 f7 ab 00 7a fc 1b 24 80 f7 b6 00 ae fb 76 24 80 |...z..$......v$.| + 002bc370 f7 ce 00 a3 fc 02 24 80 f7 c0 00 be fb 8b 24 80 |......$.......$.| + 002bc380 f7 c3 00 93 fb d0 24 80 f7 ce 00 d8 fb c8 24 80 |......$.......$.| + 002bc390 f7 bd 00 c0 fb 82 24 80 f8 00 00 e8 fb db 24 80 |......$.......$.| + 002bc3a0 f7 d8 00 d3 fb b4 24 80 f8 0b 00 e5 fb c3 24 80 |......$.......$.| + 002bc3b0 f7 eb 00 c8 fb 92 24 80 f7 e7 00 ea fb cb 24 80 |......$.......$.| + 002bc3c0 f7 fd 00 cb fb 94 24 80 f7 e3 00 f2 fb b8 24 80 |......$.......$.| + ... + +See ``Documentation/iio/iio_devbuf.rst`` for more information about how buffered +data is structured. + +4. IIO Interfacing Tools +======================== + +See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO +interfacing tools. diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst index 9cb4c50cb20d6a..dfcf9618568a44 100644 --- a/Documentation/iio/index.rst +++ b/Documentation/iio/index.rst @@ -18,8 +18,12 @@ Industrial I/O Kernel Drivers .. toctree:: :maxdepth: 1 + ad4000 + ad4695 + ad7380 ad7944 adis16475 adis16480 + adxl380 bno055 ep93xx_adc diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst index 9c8d1d046ea564..1796b3eba37bd1 100644 --- a/Documentation/kbuild/kbuild.rst +++ b/Documentation/kbuild/kbuild.rst @@ -22,6 +22,11 @@ modules.builtin.modinfo This file contains modinfo from all modules that are built into the kernel. Unlike modinfo of a separate module, all fields are prefixed with module name. +modules.builtin.ranges +---------------------- +This file contains address offset ranges (per ELF section) for all modules +that are built into the kernel. Together with System.map, it can be used +to associate module names with symbols. Environment variables ===================== @@ -129,6 +134,11 @@ KBUILD_OUTPUT ------------- Specify the output directory when building the kernel. +This variable can also be used to point to the kernel output directory when +building external modules against a pre-built kernel in a separate build +directory. Please note that this does NOT specify the output directory for the +external modules themselves. + The output directory can also be specified using "O=...". Setting "O=..." takes precedence over KBUILD_OUTPUT. diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 71b38a7670f37d..43037be96a16f1 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -70,7 +70,11 @@ applicable everywhere (see syntax). Every menu entry can have at most one prompt, which is used to display to the user. Optionally dependencies only for this prompt can be added - with "if". + with "if". If a prompt is not present, the config option is a non-visible + symbol, meaning its value cannot be directly changed by the user (such as + altering the value in ``.config``) and the option will not appear in any + config menus. Its value can only be set via "default" and "select" (see + below). - default value: "default" ["if" ] diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index be43990f1e7fd8..7964e0c245aebe 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -1665,6 +1665,5 @@ Credits TODO ==== -- Describe how kbuild supports shipped files with _shipped. - Generating offset header files. - Add more variables to chapters 7 or 9? diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst index 131863142cbb35..cd5a54d91e6d29 100644 --- a/Documentation/kbuild/modules.rst +++ b/Documentation/kbuild/modules.rst @@ -4,41 +4,12 @@ Building External Modules This document describes how to build an out-of-tree kernel module. -.. Table of Contents - - === 1 Introduction - === 2 How to Build External Modules - --- 2.1 Command Syntax - --- 2.2 Options - --- 2.3 Targets - --- 2.4 Building Separate Files - === 3. Creating a Kbuild File for an External Module - --- 3.1 Shared Makefile - --- 3.2 Separate Kbuild file and Makefile - --- 3.3 Binary Blobs - --- 3.4 Building Multiple Modules - === 4. Include Files - --- 4.1 Kernel Includes - --- 4.2 Single Subdirectory - --- 4.3 Several Subdirectories - === 5. Module Installation - --- 5.1 INSTALL_MOD_PATH - --- 5.2 INSTALL_MOD_DIR - === 6. Module Versioning - --- 6.1 Symbols From the Kernel (vmlinux + modules) - --- 6.2 Symbols and External Modules - --- 6.3 Symbols From Another External Module - === 7. Tips & Tricks - --- 7.1 Testing for CONFIG_FOO_BAR - - - -1. Introduction -=============== +Introduction +============ "kbuild" is the build system used by the Linux kernel. Modules must use kbuild to stay compatible with changes in the build infrastructure and -to pick up the right flags to "gcc." Functionality for building modules +to pick up the right flags to the compiler. Functionality for building modules both in-tree and out-of-tree is provided. The method for building either is similar, and all modules are initially developed and built out-of-tree. @@ -48,11 +19,11 @@ in building out-of-tree (or "external") modules. The author of an external module should supply a makefile that hides most of the complexity, so one only has to type "make" to build the module. This is easily accomplished, and a complete example will be presented in -section 3. +section `Creating a Kbuild File for an External Module`_. -2. How to Build External Modules -================================ +How to Build External Modules +============================= To build external modules, you must have a prebuilt kernel available that contains the configuration and header files used in the build. @@ -69,12 +40,12 @@ NOTE: "modules_prepare" will not build Module.symvers even if CONFIG_MODVERSIONS is set; therefore, a full kernel build needs to be executed to make module versioning work. -2.1 Command Syntax -================== +Command Syntax +-------------- The command to build an external module is:: - $ make -C M=$PWD + $ make -C M=$PWD The kbuild system knows that an external module is being built due to the "M=" option given in the command. @@ -88,15 +59,18 @@ executed to make module versioning work. $ make -C /lib/modules/`uname -r`/build M=$PWD modules_install -2.2 Options -=========== +Options +------- - ($KDIR refers to the path of the kernel source directory.) + ($KDIR refers to the path of the kernel source directory, or the path + of the kernel output directory if the kernel was built in a separate + build directory.) make -C $KDIR M=$PWD -C $KDIR - The directory where the kernel source is located. + The directory that contains the kernel and relevant build + artifacts used for building an external module. "make" will actually change to the specified directory when executing and will change back when finished. @@ -106,8 +80,8 @@ executed to make module versioning work. directory where the external module (kbuild file) is located. -2.3 Targets -=========== +Targets +------- When building an external module, only a subset of the "make" targets are available. @@ -129,7 +103,8 @@ executed to make module versioning work. modules_install Install the external module(s). The default location is /lib/modules//updates/, but a prefix may - be added with INSTALL_MOD_PATH (discussed in section 5). + be added with INSTALL_MOD_PATH (discussed in section + `Module Installation`_). clean Remove all generated files in the module directory only. @@ -137,8 +112,8 @@ executed to make module versioning work. help List the available targets for external modules. -2.4 Building Separate Files -=========================== +Building Separate Files +----------------------- It is possible to build single files that are part of a module. This works equally well for the kernel, a module, and even for @@ -152,8 +127,8 @@ executed to make module versioning work. make -C $KDIR M=$PWD ./ -3. Creating a Kbuild File for an External Module -================================================ +Creating a Kbuild File for an External Module +============================================= In the last section we saw the command to build a module for the running kernel. The module is not actually built, however, because a @@ -180,10 +155,9 @@ module 8123.ko, which is built from the following files:: 8123_if.c 8123_if.h 8123_pci.c - 8123_bin.o_shipped <= Binary blob -3.1 Shared Makefile -------------------- +Shared Makefile +--------------- An external module always includes a wrapper makefile that supports building the module using "make" with no arguments. @@ -198,7 +172,7 @@ module 8123.ko, which is built from the following files:: ifneq ($(KERNELRELEASE),) # kbuild part of makefile obj-m := 8123.o - 8123-y := 8123_if.o 8123_pci.o 8123_bin.o + 8123-y := 8123_if.o 8123_pci.o else # normal makefile @@ -207,10 +181,6 @@ module 8123.ko, which is built from the following files:: default: $(MAKE) -C $(KDIR) M=$$PWD - # Module specific targets - genbin: - echo "X" > 8123_bin.o_shipped - endif The check for KERNELRELEASE is used to separate the two parts @@ -221,19 +191,18 @@ module 8123.ko, which is built from the following files:: line; the second pass is by the kbuild system, which is initiated by the parameterized "make" in the default target. -3.2 Separate Kbuild File and Makefile -------------------------------------- +Separate Kbuild File and Makefile +--------------------------------- - In newer versions of the kernel, kbuild will first look for a - file named "Kbuild," and only if that is not found, will it - then look for a makefile. Utilizing a "Kbuild" file allows us - to split up the makefile from example 1 into two files: + Kbuild will first look for a file named "Kbuild", and if it is not + found, it will then look for "Makefile". Utilizing a "Kbuild" file + allows us to split up the "Makefile" from example 1 into two files: Example 2:: --> filename: Kbuild obj-m := 8123.o - 8123-y := 8123_if.o 8123_pci.o 8123_bin.o + 8123-y := 8123_if.o 8123_pci.o --> filename: Makefile KDIR ?= /lib/modules/`uname -r`/build @@ -241,68 +210,13 @@ module 8123.ko, which is built from the following files:: default: $(MAKE) -C $(KDIR) M=$$PWD - # Module specific targets - genbin: - echo "X" > 8123_bin.o_shipped - The split in example 2 is questionable due to the simplicity of each file; however, some external modules use makefiles consisting of several hundred lines, and here it really pays off to separate the kbuild part from the rest. - The next example shows a backward compatible version. - - Example 3:: - - --> filename: Kbuild - obj-m := 8123.o - 8123-y := 8123_if.o 8123_pci.o 8123_bin.o - - --> filename: Makefile - ifneq ($(KERNELRELEASE),) - # kbuild part of makefile - include Kbuild - - else - # normal makefile - KDIR ?= /lib/modules/`uname -r`/build - - default: - $(MAKE) -C $(KDIR) M=$$PWD - - # Module specific targets - genbin: - echo "X" > 8123_bin.o_shipped - - endif - - Here the "Kbuild" file is included from the makefile. This - allows an older version of kbuild, which only knows of - makefiles, to be used when the "make" and kbuild parts are - split into separate files. - -3.3 Binary Blobs ----------------- - - Some external modules need to include an object file as a blob. - kbuild has support for this, but requires the blob file to be - named _shipped. When the kbuild rules kick in, a copy - of _shipped is created with _shipped stripped off, - giving us . This shortened filename can be used in - the assignment to the module. - - Throughout this section, 8123_bin.o_shipped has been used to - build the kernel module 8123.ko; it has been included as - 8123_bin.o:: - - 8123-y := 8123_if.o 8123_pci.o 8123_bin.o - - Although there is no distinction between the ordinary source - files and the binary file, kbuild will pick up different rules - when creating the object file for the module. - -3.4 Building Multiple Modules -============================= +Building Multiple Modules +------------------------- kbuild supports building multiple modules with a single build file. For example, if you wanted to build two modules, foo.ko @@ -315,8 +229,8 @@ module 8123.ko, which is built from the following files:: It is that simple! -4. Include Files -================ +Include Files +============= Within the kernel, header files are kept in standard locations according to the following rule: @@ -334,19 +248,19 @@ according to the following rule: include/scsi; and architecture specific headers are located under arch/$(SRCARCH)/include/. -4.1 Kernel Includes -------------------- +Kernel Includes +--------------- To include a header file located under include/linux/, simply use:: #include - kbuild will add options to "gcc" so the relevant directories + kbuild will add options to the compiler so the relevant directories are searched. -4.2 Single Subdirectory ------------------------ +Single Subdirectory +------------------- External modules tend to place header files in a separate include/ directory where their source is located, although this @@ -360,15 +274,11 @@ according to the following rule: --> filename: Kbuild obj-m := 8123.o - ccflags-y := -Iinclude - 8123-y := 8123_if.o 8123_pci.o 8123_bin.o - - Note that in the assignment there is no space between -I and - the path. This is a limitation of kbuild: there must be no - space present. + ccflags-y := -I $(src)/include + 8123-y := 8123_if.o 8123_pci.o -4.3 Several Subdirectories --------------------------- +Several Subdirectories +---------------------- kbuild can handle files that are spread over several directories. Consider the following example:: @@ -407,8 +317,8 @@ according to the following rule: file is located. -5. Module Installation -====================== +Module Installation +=================== Modules which are included in the kernel are installed in the directory: @@ -419,8 +329,8 @@ And external modules are installed in: /lib/modules/$(KERNELRELEASE)/updates/ -5.1 INSTALL_MOD_PATH --------------------- +INSTALL_MOD_PATH +---------------- Above are the default directories but as always some level of customization is possible. A prefix can be added to the @@ -434,8 +344,8 @@ And external modules are installed in: calling "make." This has effect when installing both in-tree and out-of-tree modules. -5.2 INSTALL_MOD_DIR -------------------- +INSTALL_MOD_DIR +--------------- External modules are by default installed to a directory under /lib/modules/$(KERNELRELEASE)/updates/, but you may wish to @@ -448,8 +358,8 @@ And external modules are installed in: => Install dir: /lib/modules/$(KERNELRELEASE)/gandalf/ -6. Module Versioning -==================== +Module Versioning +================= Module versioning is enabled by the CONFIG_MODVERSIONS tag, and is used as a simple ABI consistency check. A CRC value of the full prototype @@ -461,8 +371,8 @@ module. Module.symvers contains a list of all exported symbols from a kernel build. -6.1 Symbols From the Kernel (vmlinux + modules) ------------------------------------------------ +Symbols From the Kernel (vmlinux + modules) +------------------------------------------- During a kernel build, a file named Module.symvers will be generated. Module.symvers contains all exported symbols from @@ -486,8 +396,8 @@ build. 1) It lists all exported symbols from vmlinux and all modules. 2) It lists the CRC if CONFIG_MODVERSIONS is enabled. -6.2 Symbols and External Modules --------------------------------- +Symbols and External Modules +---------------------------- When building an external module, the build system needs access to the symbols from the kernel to check if all external symbols @@ -496,8 +406,8 @@ build. tree. During the MODPOST step, a new Module.symvers file will be written containing all exported symbols from that external module. -6.3 Symbols From Another External Module ----------------------------------------- +Symbols From Another External Module +------------------------------------ Sometimes, an external module uses exported symbols from another external module. Kbuild needs to have full knowledge of @@ -537,11 +447,11 @@ build. initialization of its symbol tables. -7. Tips & Tricks -================ +Tips & Tricks +============= -7.1 Testing for CONFIG_FOO_BAR ------------------------------- +Testing for CONFIG_FOO_BAR +-------------------------- Modules often need to check for certain `CONFIG_` options to decide if a specific feature is included in the module. In @@ -553,9 +463,3 @@ build. ext2-y := balloc.o bitmap.o dir.o ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o - - External modules have traditionally used "grep" to check for - specific `CONFIG_` settings directly in .config. This usage is - broken. As introduced before, external modules should use - kbuild for building and can therefore use the same methods as - in-tree modules when testing for `CONFIG_` definitions. diff --git a/Documentation/leds/leds-blinkm.rst b/Documentation/leds/leds-blinkm.rst index 2d3c226a371ac9..647be1c6c55274 100644 --- a/Documentation/leds/leds-blinkm.rst +++ b/Documentation/leds/leds-blinkm.rst @@ -13,9 +13,31 @@ The device accepts RGB and HSB color values through separate commands. Also you can store blinking sequences as "scripts" in the controller and run them. Also fading is an option. -The interface this driver provides is 2-fold: +The interface this driver provides is 3-fold: -a) LED class interface for use with triggers +a) LED multicolor class interface for use with triggers +####################################################### + +The registration follows the scheme:: + + blinkm--:rgb:indicator + + $ ls -h /sys/class/leds/blinkm-1-9:rgb:indicator + brightness device max_brightness multi_index multi_intensity power subsystem trigger uevent + +Hue is controlled by the multi_intensity file and lightness is controlled by +the brightness file. + +The order in which to write the intensity values can be found in multi_index. +Exactly three values between 0 and 255 must be written to multi_intensity to +change the color:: + + $ echo 255 100 50 > multi_intensity + +The overall lightness be changed by writing a value between 0 and 255 to the +brightness file. + +b) LED class interface for use with triggers ############################################ The registration follows the scheme:: @@ -79,6 +101,7 @@ E.g.:: -as of 6/2012 +as of 07/2024 dl9pf gmx de +jstrauss mailbox org diff --git a/Documentation/leds/leds-mlxcpld.rst b/Documentation/leds/leds-mlxcpld.rst index 528582429e0bb8..c520a134d91e17 100644 --- a/Documentation/leds/leds-mlxcpld.rst +++ b/Documentation/leds/leds-mlxcpld.rst @@ -115,4 +115,4 @@ Driver provides the following LEDs for the system "msn2100": - [1,1,1,1] = Blue blink 6Hz Driver supports HW blinking at 3Hz and 6Hz frequency (50% duty cycle). -For 3Hz duty cylce is about 167 msec, for 6Hz is about 83 msec. +For 3Hz duty cycle is about 167 msec, for 6Hz is about 83 msec. diff --git a/Documentation/leds/well-known-leds.txt b/Documentation/leds/well-known-leds.txt index 67b44704801f23..17ef78faf1f309 100644 --- a/Documentation/leds/well-known-leds.txt +++ b/Documentation/leds/well-known-leds.txt @@ -72,6 +72,14 @@ Good: "platform:*:charging" (allwinner sun50i, leds-cht-wcove) Good: ":backlight" (Motorola Droid 4) +* Indicators + +Good: ":indicator" (Blinkm) + +* RGB + +Good: ":rgb" (Blinkm) + * Ethernet LEDs Currently two types of Network LEDs are support, those controlled by diff --git a/Documentation/livepatch/livepatch.rst b/Documentation/livepatch/livepatch.rst index 68e3651e8af925..acb90164929e32 100644 --- a/Documentation/livepatch/livepatch.rst +++ b/Documentation/livepatch/livepatch.rst @@ -50,7 +50,7 @@ some limitations, see below. 3. Consistency model ==================== -Functions are there for a reason. They take some input parameters, get or +Functions are there for a reason. They take some input parameters, acquire or release locks, read, process, and even write some data in a defined way, have return values. In other words, each function has a defined semantic. diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 4202174a6262c5..93d58d9a428b87 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -88,7 +88,6 @@ CONTENTS (*) The effects of the cpu cache. - - Cache coherency. - Cache coherency vs DMA. - Cache coherency vs MMIO. @@ -677,8 +676,6 @@ include/linux/rcupdate.h. This permits the current target of an RCU'd pointer to be replaced with a new modified target, without the replacement target appearing to be incompletely initialised. -See also the subsection on "Cache Coherency" for a more thorough example. - CONTROL DEPENDENCIES -------------------- diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 8730c246ceaa26..f9c50525bdbf5e 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -586,7 +586,7 @@ API, and return the results to the user-space. The ABIs are designed to be used for user space applications development, rather than human beings' fingers. Human users are recommended to use such user space tools. One such Python-written user space tool is available at -Github (https://github.com/awslabs/damo), Pypi +Github (https://github.com/damonitor/damo), Pypi (https://pypistats.org/packages/damo), and Fedora (https://packages.fedoraproject.org/pkgs/python-damo/damo/). diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst index feccf6a0f6c3b0..2365c9a3c1f08f 100644 --- a/Documentation/mm/damon/maintainer-profile.rst +++ b/Documentation/mm/damon/maintainer-profile.rst @@ -7,23 +7,27 @@ The DAMON subsystem covers the files that are listed in 'DATA ACCESS MONITOR' section of 'MAINTAINERS' file. The mailing lists for the subsystem are damon@lists.linux.dev and -linux-mm@kvack.org. Patches should be made against the mm-unstable tree [1]_ -whenever possible and posted to the mailing lists. +linux-mm@kvack.org. Patches should be made against the mm-unstable `tree +` whenever possible and posted to +the mailing lists. SCM Trees --------- There are multiple Linux trees for DAMON development. Patches under -development or testing are queued in damon/next [2]_ by the DAMON maintainer. -Sufficiently reviewed patches will be queued in mm-unstable [1]_ by the memory -management subsystem maintainer. After more sufficient tests, the patches will -be queued in mm-stable [3]_ , and finally pull-requested to the mainline by the -memory management subsystem maintainer. - -Note again the patches for mm-unstable tree [1]_ are queued by the memory +development or testing are queued in `damon/next +` by the DAMON maintainer. +Sufficiently reviewed patches will be queued in `mm-unstable +` by the memory management +subsystem maintainer. After more sufficient tests, the patches will be queued +in `mm-stable ` , and finally +pull-requested to the mainline by the memory management subsystem maintainer. + +Note again the patches for mm-unstable `tree +` are queued by the memory management subsystem maintainer. If the patches requires some patches in -damon/next tree [2]_ which not yet merged in mm-unstable, please make sure the -requirement is clearly specified. +damon/next `tree ` which not yet merged +in mm-unstable, please make sure the requirement is clearly specified. Submit checklist addendum ------------------------- @@ -32,18 +36,27 @@ When making DAMON changes, you should do below. - Build changes related outputs including kernel and documents. - Ensure the builds introduce no new errors or warnings. -- Run and ensure no new failures for DAMON selftests [4]_ and kunittests [5]_ . +- Run and ensure no new failures for DAMON `selftests + ` and + `kunittests + `. Further doing below and putting the results will be helpful. -- Run damon-tests/corr [6]_ for normal changes. -- Run damon-tests/perf [7]_ for performance changes. +- Run `damon-tests/corr + ` for normal + changes. +- Run `damon-tests/perf + ` for performance + changes. Key cycle dates --------------- -Patches can be sent anytime. Key cycle dates of the mm-unstable [1]_ and -mm-stable [3]_ trees depend on the memory management subsystem maintainer. +Patches can be sent anytime. Key cycle dates of the `mm-unstable +` and `mm-stable +` trees depend on the memory +management subsystem maintainer. Review cadence -------------- @@ -58,16 +71,17 @@ Mailing tool Like many other Linux kernel subsystems, DAMON uses the mailing lists (damon@lists.linux.dev and linux-mm@kvack.org) as the major communication -channel. There is a simple tool called HacKerMaiL (``hkml``) [8]_ , which is -for people who are not very familiar with the mailing lists based -communication. The tool could be particularly helpful for DAMON community -members since it is developed and maintained by DAMON maintainer. The tool is -also officially announced to support DAMON and general Linux kernel development -workflow. - -In other words, ``hkml`` [8]_ is a mailing tool for DAMON community, which -DAMON maintainer is committed to support. Please feel free to try and report -issues or feature requests for the tool to the maintainer. +channel. There is a simple tool called `HacKerMaiL +` (``hkml``), which is for people who +are not very familiar with the mailing lists based communication. The tool +could be particularly helpful for DAMON community members since it is developed +and maintained by DAMON maintainer. The tool is also officially announced to +support DAMON and general Linux kernel development workflow. + +In other words, `hkml ` is a mailing +tool for DAMON community, which DAMON maintainer is committed to support. +Please feel free to try and report issues or feature requests for the tool to +the maintainer. Community meetup ---------------- @@ -83,17 +97,9 @@ members including the maintainer. The maintainer shares the available time slots, and attendees should reserve one of those at least 24 hours before the time slot, by reaching out to the maintainer. -Schedules and available reservation time slots are available at the Google doc -[9]_ . DAMON maintainer will also provide periodic reminder to the mailing -list (damon@lists.linux.dev). - - -.. [1] https://git.kernel.org/akpm/mm/h/mm-unstable -.. [2] https://git.kernel.org/sj/h/damon/next -.. [3] https://git.kernel.org/akpm/mm/h/mm-stable -.. [4] https://github.com/awslabs/damon-tests/blob/master/corr/run.sh#L49 -.. [5] https://github.com/awslabs/damon-tests/blob/master/corr/tests/kunit.sh -.. [6] https://github.com/awslabs/damon-tests/tree/master/corr -.. [7] https://github.com/awslabs/damon-tests/tree/master/perf -.. [8] https://github.com/damonitor/hackermail -.. [9] https://docs.google.com/document/d/1v43Kcj3ly4CYqmAkMaZzLiM2GEnWfgdGbZAH3mi2vpM/edit?usp=sharing +Schedules and available reservation time slots are available at the Google `doc +`. +There is also a public Google `calendar +` +that has the events. Anyone can subscribe it. DAMON maintainer will also +provide periodic reminder to the mailing list (damon@lists.linux.dev). diff --git a/Documentation/mm/hmm.rst b/Documentation/mm/hmm.rst index 0595098a74d9db..f6d53c37a2ca8e 100644 --- a/Documentation/mm/hmm.rst +++ b/Documentation/mm/hmm.rst @@ -66,7 +66,7 @@ combinatorial explosion in the library entry points. Finally, with the advance of high level language constructs (in C++ but in other languages too) it is now possible for the compiler to leverage GPUs and other devices without programmer knowledge. Some compiler identified patterns -are only do-able with a shared address space. It is also more reasonable to use +are only doable with a shared address space. It is also more reasonable to use a shared address space for all other patterns. @@ -267,7 +267,7 @@ functions are designed to make drivers easier to write and to centralize common code across drivers. Before migrating pages to device private memory, special device private -``struct page`` need to be created. These will be used as special "swap" +``struct page`` needs to be created. These will be used as special "swap" page table entries so that a CPU process will fault if it tries to access a page that has been migrated to device private memory. @@ -322,7 +322,7 @@ between device driver specific code and shared common code: The ``invalidate_range_start()`` callback is passed a ``struct mmu_notifier_range`` with the ``event`` field set to ``MMU_NOTIFY_MIGRATE`` and the ``owner`` field set to - the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is + the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This allows the device driver to skip the invalidation callback and only invalidate device private MMU mappings that are actually migrating. This is explained more in the next section. @@ -405,7 +405,7 @@ can be used to make a memory range inaccessible from userspace. This replaces all mappings for pages in the given range with special swap entries. Any attempt to access the swap entry results in a fault which is -resovled by replacing the entry with the original mapping. A driver gets +resolved by replacing the entry with the original mapping. A driver gets notified that the mapping has been changed by MMU notifiers, after which point it will no longer have exclusive access to the page. Exclusive access is guaranteed to last until the driver drops the page lock and page reference, at @@ -431,7 +431,7 @@ Same decision was made for memory cgroup. Device memory pages are accounted against same memory cgroup a regular page would be accounted to. This does simplify migration to and from device memory. This also means that migration back from device memory to regular memory cannot fail because it would -go above memory cgroup limit. We might revisit this choice latter on once we +go above memory cgroup limit. We might revisit this choice later on once we get more experience in how device memory is used and its impact on memory resource control. diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst index f1ce67a2661528..519b35a4caf5b2 100644 --- a/Documentation/mm/page_migration.rst +++ b/Documentation/mm/page_migration.rst @@ -63,15 +63,15 @@ and then a low level description of how the low level details work. In kernel use of migrate_pages() ================================ -1. Remove pages from the LRU. +1. Remove folios from the LRU. - Lists of pages to be migrated are generated by scanning over - pages and moving them into lists. This is done by - calling isolate_lru_page(). - Calling isolate_lru_page() increases the references to the page - so that it cannot vanish while the page migration occurs. + Lists of folios to be migrated are generated by scanning over + folios and moving them into lists. This is done by + calling folio_isolate_lru(). + Calling folio_isolate_lru() increases the references to the folio + so that it cannot vanish while the folio migration occurs. It also prevents the swapper or other scans from encountering - the page. + the folio. 2. We need to have a function of type new_folio_t that can be passed to migrate_pages(). This function should figure out @@ -84,10 +84,10 @@ In kernel use of migrate_pages() How migrate_pages() works ========================= -migrate_pages() does several passes over its list of pages. A page is moved -if all references to a page are removable at the time. The page has -already been removed from the LRU via isolate_lru_page() and the refcount -is increased so that the page cannot be freed while page migration occurs. +migrate_pages() does several passes over its list of folios. A folio is moved +if all references to a folio are removable at the time. The folio has +already been removed from the LRU via folio_isolate_lru() and the refcount +is increased so that the folio cannot be freed while folio migration occurs. Steps: diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index 1ba0ad63246c53..a2cd8800d52799 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -31,10 +31,10 @@ Design principles feature that applies to all dynamic high order allocations in the kernel) -get_user_pages and follow_page -============================== +get_user_pages and pin_user_pages +================================= -get_user_pages and follow_page if run on a hugepage, will return the +get_user_pages and pin_user_pages if run on a hugepage, will return the head or tail pages as usual (exactly as they would do on hugetlbfs). Most GUP users will only care about the actual physical address of the page and its temporary pinning to release after the I/O diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst index 2feb2ed51ae27e..8d11fe6a085431 100644 --- a/Documentation/mm/unevictable-lru.rst +++ b/Documentation/mm/unevictable-lru.rst @@ -80,7 +80,7 @@ on an additional LRU list for a few reasons: (2) We want to be able to migrate unevictable folios between nodes for memory defragmentation, workload management and memory hotplug. The Linux kernel can only migrate folios that it can successfully isolate from the LRU - lists (or "Movable" pages: outside of consideration here). If we were to + lists (or "Movable" folios: outside of consideration here). If we were to maintain folios elsewhere than on an LRU-like list, where they can be detected by folio_isolate_lru(), we would prevent their migration. @@ -230,7 +230,7 @@ In Nick's patch, he used one of the struct page LRU list link fields as a count of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years earlier). But this use of the link field for a count prevented the management of the pages on an LRU list, and thus mlocked pages were not migratable as -isolate_lru_page() could not detect them, and the LRU list link field was not +folio_isolate_lru() could not detect them, and the LRU list link field was not available to the migration subsystem. Nick resolved this by putting mlocked pages back on the LRU list before @@ -253,8 +253,8 @@ Basic Management mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable pages. When such a page has been "noticed" by the memory management subsystem, -the page is marked with the PG_mlocked flag. This can be manipulated using the -PageMlocked() functions. +the folio is marked with the PG_mlocked flag. This can be manipulated using +folio_set_mlocked() and folio_clear_mlocked() functions. A PG_mlocked page will be placed on the unevictable list when it is added to the LRU. Such pages can be "noticed" by memory management in several places: diff --git a/Documentation/mm/vmalloced-kernel-stacks.rst b/Documentation/mm/vmalloced-kernel-stacks.rst index 4edca515bfd772..5bc0f7ceea8948 100644 --- a/Documentation/mm/vmalloced-kernel-stacks.rst +++ b/Documentation/mm/vmalloced-kernel-stacks.rst @@ -110,7 +110,7 @@ Bulk of the code is in: `kernel/fork.c `. stack_vm_area pointer in task_struct keeps track of the virtually allocated -stack and a non-null stack_vm_area pointer serves as a indication that the +stack and a non-null stack_vm_area pointer serves as an indication that the virtually mapped kernel stacks are enabled. :: @@ -120,8 +120,8 @@ virtually mapped kernel stacks are enabled. Stack overflow handling ----------------------- -Leading and trailing guard pages help detect stack overflows. When stack -overflows into the guard pages, handlers have to be careful not overflow +Leading and trailing guard pages help detect stack overflows. When the stack +overflows into the guard pages, handlers have to be careful not to overflow the stack again. When handlers are called, it is likely that very little stack space is left. @@ -148,6 +148,6 @@ Conclusions - THREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and simply embed the thread_info (containing only flags) and 'int cpu' into task_struct. -- The thread stack can be free'ed as soon as the task is dead (without +- The thread stack can be freed as soon as the task is dead (without waiting for RCU) and then, if vmapped stacks are in use, cache the entire stack for reuse on the same cpu. diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml index 94132d30e0e035..f2894ca35de840 100644 --- a/Documentation/netlink/specs/dpll.yaml +++ b/Documentation/netlink/specs/dpll.yaml @@ -345,6 +345,26 @@ attribute-sets: Value is in PPM (parts per million). This may be implemented for example for pin of type PIN_TYPE_SYNCE_ETH_PORT. + - + name: esync-frequency + type: u64 + doc: | + Frequency of Embedded SYNC signal. If provided, the pin is configured + with a SYNC signal embedded into its base clock frequency. + - + name: esync-frequency-supported + type: nest + multi-attr: true + nested-attributes: frequency-range + doc: | + If provided a pin is capable of embedding a SYNC signal (within given + range) into its base frequency signal. + - + name: esync-pulse + type: u32 + doc: | + A ratio of high to low state of a SYNC signal pulse embedded + into base clock frequency. Value is in percents. - name: pin-parent-device subset-of: pin @@ -510,6 +530,9 @@ operations: - phase-adjust-max - phase-adjust - fractional-frequency-offset + - esync-frequency + - esync-frequency-supported + - esync-pulse dump: request: @@ -536,6 +559,7 @@ operations: - parent-device - parent-pin - phase-adjust + - esync-frequency - name: pin-create-ntf doc: Notification about pin appearing diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index ea21fe135b97eb..6a050d755b9cb4 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -39,6 +39,11 @@ definitions: - ovld-detected - power-not-available - short-detected + - + name: phy-upstream-type + enum-name: + type: enum + entries: [ mac, phy ] attribute-sets: - @@ -54,6 +59,9 @@ attribute-sets: name: flags type: u32 enum: header-flags + - + name: phy-index + type: u32 - name: bitset-bit @@ -659,6 +667,9 @@ attribute-sets: - name: code type: u8 + - + name: src + type: u32 - name: cable-fault-length attributes: @@ -668,6 +679,9 @@ attribute-sets: - name: cm type: u32 + - + name: src + type: u32 - name: cable-nest attributes: @@ -1022,12 +1036,16 @@ attribute-sets: - name: indir type: binary + sub-type: u32 - name: hkey type: binary - name: input_xfrm type: u32 + - + name: start-context + type: u32 - name: plca attributes: @@ -1085,6 +1103,35 @@ attribute-sets: - name: total type: uint + - + name: phy + attributes: + - + name: header + type: nest + nested-attributes: header + - + name: index + type: u32 + - + name: drvname + type: string + - + name: name + type: string + - + name: upstream-type + type: u32 + enum: phy-upstream-type + - + name: upstream-index + type: u32 + - + name: upstream-sfp-name + type: string + - + name: downstream-sfp-name + type: string operations: enum-model: directional @@ -1749,12 +1796,12 @@ operations: attribute-set: rss - do: &rss-get-op + do: request: attributes: - header - context - reply: + reply: &rss-reply attributes: - header - context @@ -1762,6 +1809,12 @@ operations: - indir - hkey - input_xfrm + dump: + request: + attributes: + - header + - start-context + reply: *rss-reply - name: plca-get-cfg doc: Get PLCA params. @@ -1877,3 +1930,24 @@ operations: - status-msg - done - total + - + name: phy-get + doc: Get PHY devices attached to an interface + + attribute-set: phy + + do: &phy-get-op + request: + attributes: + - header + reply: + attributes: + - header + - index + - drvname + - name + - upstream-type + - upstream-index + - upstream-sfp-name + - downstream-sfp-name + dump: *phy-get-op diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml index 959755be4d7f9c..08412c279297bf 100644 --- a/Documentation/netlink/specs/netdev.yaml +++ b/Documentation/netlink/specs/netdev.yaml @@ -167,6 +167,10 @@ attribute-sets: "re-attached", they are just waiting to disappear. Attribute is absent if Page Pool has not been detached, and can still be used to allocate new memory. + - + name: dmabuf + doc: ID of the dmabuf this page-pool is attached to. + type: u32 - name: page-pool-info subset-of: page-pool @@ -268,6 +272,10 @@ attribute-sets: name: napi-id doc: ID of the NAPI instance which services this queue. type: u32 + - + name: dmabuf + doc: ID of the dmabuf attached to this queue, if any. + type: u32 - name: qstats @@ -457,6 +465,39 @@ attribute-sets: Number of times driver re-started accepting send requests to this queue from the stack. type: uint + - + name: queue-id + subset-of: queue + attributes: + - + name: id + - + name: type + - + name: dmabuf + attributes: + - + name: ifindex + doc: netdev ifindex to bind the dmabuf to. + type: u32 + checks: + min: 1 + - + name: queues + doc: receive queues to bind the dmabuf to. + type: nest + nested-attributes: queue-id + multi-attr: true + - + name: fd + doc: dmabuf file descriptor to bind. + type: u32 + - + name: id + doc: id of the dmabuf binding + type: u32 + checks: + min: 1 operations: list: @@ -510,6 +551,7 @@ operations: - inflight - inflight-mem - detach-time + - dmabuf dump: reply: *pp-reply config-cond: page-pool @@ -574,6 +616,7 @@ operations: - type - napi-id - ifindex + - dmabuf dump: request: attributes: @@ -619,6 +662,24 @@ operations: - rx-bytes - tx-packets - tx-bytes + - + name: bind-rx + doc: Bind dmabuf to netdev + attribute-set: dmabuf + flags: [ admin-perm ] + do: + request: + attributes: + - ifindex + - fd + - queues + reply: + attributes: + - id + +kernel-family: + headers: [ "linux/list.h"] + sock-priv: struct list_head mcast-groups: list: diff --git a/Documentation/netlink/specs/nftables.yaml b/Documentation/netlink/specs/nftables.yaml index dff2a18f3d9094..bd938bd01b6bf2 100644 --- a/Documentation/netlink/specs/nftables.yaml +++ b/Documentation/netlink/specs/nftables.yaml @@ -62,6 +62,13 @@ definitions: - sdif - sdifname - bri-broute + - + name: bitwise-ops + type: enum + entries: + - bool + - lshift + - rshift - name: cmp-ops type: enum @@ -125,6 +132,99 @@ definitions: - object - concat - expr + - + name: lookup-flags + type: flags + entries: + - invert + - + name: ct-keys + type: enum + entries: + - state + - direction + - status + - mark + - secmark + - expiration + - helper + - l3protocol + - src + - dst + - protocol + - proto-src + - proto-dst + - labels + - pkts + - bytes + - avgpkt + - zone + - eventmask + - src-ip + - dst-ip + - src-ip6 + - dst-ip6 + - ct-id + - + name: ct-direction + type: enum + entries: + - original + - reply + - + name: quota-flags + type: flags + entries: + - invert + - depleted + - + name: verdict-code + type: enum + entries: + - name: continue + value: 0xffffffff + - name: break + value: 0xfffffffe + - name: jump + value: 0xfffffffd + - name: goto + value: 0xfffffffc + - name: return + value: 0xfffffffb + - name: drop + value: 0 + - name: accept + value: 1 + - name: stolen + value: 2 + - name: queue + value: 3 + - name: repeat + value: 4 + - + name: fib-result + type: enum + entries: + - oif + - oifname + - addrtype + - + name: fib-flags + type: flags + entries: + - saddr + - daddr + - mark + - iif + - oif + - present + - + name: reject-types + type: enum + entries: + - icmp-unreach + - tcp-rst + - icmpx-unreach attribute-sets: - @@ -611,9 +711,10 @@ attribute-sets: type: u64 byte-order: big-endian - - name: flags # TODO + name: flags type: u32 byte-order: big-endian + enum: quota-flags - name: pad type: pad @@ -664,6 +765,38 @@ attribute-sets: name: devs type: nest nested-attributes: hook-dev-attrs + - + name: expr-bitwise-attrs + attributes: + - + name: sreg + type: u32 + byte-order: big-endian + - + name: dreg + type: u32 + byte-order: big-endian + - + name: len + type: u32 + byte-order: big-endian + - + name: mask + type: nest + nested-attributes: data-attrs + - + name: xor + type: nest + nested-attributes: data-attrs + - + name: op + type: u32 + byte-order: big-endian + enum: bitwise-ops + - + name: data + type: nest + nested-attributes: data-attrs - name: expr-cmp-attrs attributes: @@ -698,6 +831,7 @@ attribute-sets: name: code type: u32 byte-order: big-endian + enum: verdict-code - name: chain type: string @@ -718,6 +852,43 @@ attribute-sets: - name: pad type: pad + - + name: expr-fib-attrs + attributes: + - + name: dreg + type: u32 + byte-order: big-endian + - + name: result + type: u32 + byte-order: big-endian + enum: fib-result + - + name: flags + type: u32 + byte-order: big-endian + enum: fib-flags + - + name: expr-ct-attrs + attributes: + - + name: dreg + type: u32 + byte-order: big-endian + - + name: key + type: u32 + byte-order: big-endian + enum: ct-keys + - + name: direction + type: u8 + enum: ct-direction + - + name: sreg + type: u32 + byte-order: big-endian - name: expr-flow-offload-attrs attributes: @@ -736,6 +907,31 @@ attribute-sets: name: data type: nest nested-attributes: data-attrs + - + name: expr-lookup-attrs + attributes: + - + name: set + type: string + doc: Name of set to use + - + name: set id + type: u32 + byte-order: big-endian + doc: ID of set to use + - + name: sreg + type: u32 + byte-order: big-endian + - + name: dreg + type: u32 + byte-order: big-endian + - + name: flags + type: u32 + byte-order: big-endian + enum: lookup-flags - name: expr-meta-attrs attributes: @@ -820,6 +1016,30 @@ attribute-sets: name: csum-flags type: u32 byte-order: big-endian + - + name: expr-reject-attrs + attributes: + - + name: type + type: u32 + byte-order: big-endian + enum: reject-types + - + name: icmp-code + type: u8 + - + name: expr-target-attrs + attributes: + - + name: name + type: string + - + name: rev + type: u32 + byte-order: big-endian + - + name: info + type: binary - name: expr-tproxy-attrs attributes: @@ -835,13 +1055,38 @@ attribute-sets: name: reg-port type: u32 byte-order: big-endian + - + name: expr-objref-attrs + attributes: + - + name: imm-type + type: u32 + byte-order: big-endian + - + name: imm-name + type: string + doc: object name + - + name: set-sreg + type: u32 + byte-order: big-endian + - + name: set-name + type: string + doc: name of object map + - + name: set-id + type: u32 + byte-order: big-endian + doc: id of object map sub-messages: - name: expr-ops formats: - - value: bitwise # TODO + value: bitwise + attribute-set: expr-bitwise-attrs - value: cmp attribute-set: expr-cmp-attrs @@ -849,7 +1094,11 @@ sub-messages: value: counter attribute-set: expr-counter-attrs - - value: ct # TODO + value: ct + attribute-set: expr-ct-attrs + - + value: fib + attribute-set: expr-fib-attrs - value: flow_offload attribute-set: expr-flow-offload-attrs @@ -857,16 +1106,29 @@ sub-messages: value: immediate attribute-set: expr-immediate-attrs - - value: lookup # TODO + value: lookup + attribute-set: expr-lookup-attrs - value: meta attribute-set: expr-meta-attrs - value: nat attribute-set: expr-nat-attrs + - + value: objref + attribute-set: expr-objref-attrs - value: payload attribute-set: expr-payload-attrs + - + value: quota + attribute-set: quota-attrs + - + value: reject + attribute-set: expr-reject-attrs + - + value: target + attribute-set: expr-target-attrs - value: tproxy attribute-set: expr-tproxy-attrs diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml index de08c12fd56f85..0c4d5d40cae905 100644 --- a/Documentation/netlink/specs/rt_link.yaml +++ b/Documentation/netlink/specs/rt_link.yaml @@ -903,6 +903,22 @@ definitions: - cfm-config - cfm-status - mst + - + name: netkit-policy + type: enum + entries: + - + name: forward + value: 0 + - + name: blackhole + value: 2 + - + name: netkit-mode + type: enum + entries: + - name: l2 + - name: l3 attribute-sets: - @@ -2109,6 +2125,28 @@ attribute-sets: - name: id type: u32 + - + name: linkinfo-netkit-attrs + name-prefix: ifla-netkit- + attributes: + - + name: peer-info + type: binary + - + name: primary + type: u8 + - + name: policy + type: u32 + enum: netkit-policy + - + name: peer-policy + type: u32 + enum: netkit-policy + - + name: mode + type: u32 + enum: netkit-mode sub-messages: - @@ -2147,6 +2185,9 @@ sub-messages: - value: vrf attribute-set: linkinfo-vrf-attrs + - + value: netkit + attribute-set: linkinfo-netkit-attrs - name: linkinfo-member-data-msg formats: diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst index a4c7d0c65fd7e5..4561e8ab9e085a 100644 --- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst +++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst @@ -230,6 +230,11 @@ per-queue stats) from the device. In addition the driver logs the stats to syslog upon device reset. +On supported instance types, the statistics will also include the +ENA Express data (fields prefixed with `ena_srd`). For a complete +documentation of ENA Express data refer to +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ena-express.html#ena-express-monitor + MTU === diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 6932d8c043c2c1..6fc1961492b772 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -44,6 +44,7 @@ Contents: marvell/octeon_ep marvell/octeon_ep_vf mellanox/mlx5/index + meta/fbnic microsoft/netvsc neterion/s2io netronome/nfp diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst index 3bd72577af9ae1..99d95be4d159fe 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst @@ -218,6 +218,22 @@ the software port. [#accel]_. - Informative + * - `rx[i]_hds_nosplit_packets` + - Number of packets that were not split in header/data split mode. A + packet will not get split when the hardware does not support its + protocol splitting. An example such a protocol is ICMPv4/v6. Currently + TCP and UDP with IPv4/IPv6 are supported for header/data split + [#accel]_. + - Informative + + * - `rx[i]_hds_nosplit_bytes` + - Number of bytes for packets that were not split in header/data split + mode. A packet will not get split when the hardware does not support its + protocol splitting. An example such a protocol is ICMPv4/v6. Currently + TCP and UDP with IPv4/IPv6 are supported for header/data split + [#accel]_. + - Informative + * - `rx[i]_lro_packets` - The number of LRO packets received on ring i [#accel]_. - Acceleration diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst index 20d3b7e87049a7..34e9114801085f 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst @@ -130,6 +130,9 @@ Enabling the driver and kconfig options | Build support for software-managed steering in the NIC. +**CONFIG_MLX5_HW_STEERING=(y/n)** + +| Build support for hardware-managed steering in the NIC. **CONFIG_MLX5_TC_CT=(y/n)** diff --git a/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst new file mode 100644 index 00000000000000..32ff114f5c26a8 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/meta/fbnic.rst @@ -0,0 +1,29 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +===================================== +Meta Platforms Host Network Interface +===================================== + +Firmware Versions +----------------- + +fbnic has three components stored on the flash which are provided in one PLDM +image: + +1. fw - The control firmware used to view and modify firmware settings, request + firmware actions, and retrieve firmware counters outside of the data path. + This is the firmware which fbnic_fw.c interacts with. +2. bootloader - The firmware which validate firmware security and control basic + operations including loading and updating the firmware. This is also known + as the cmrt firmware. +3. undi - This is the UEFI driver which is based on the Linux driver. + +fbnic stores two copies of these three components on flash. This allows fbnic +to fall back to an older version of firmware automatically in case firmware +fails to boot. Version information for both is provided as running and stored. +The undi is only provided in stored as it is not actively running once the Linux +driver takes over. + +devlink dev info provides version information for all three components. In +addition to the version the hg commit hash of the build is included as a +separate entry. diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst new file mode 100644 index 00000000000000..a55bf21f671c3c --- /dev/null +++ b/Documentation/networking/devmem.rst @@ -0,0 +1,269 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Device Memory TCP +================= + + +Intro +===== + +Device memory TCP (devmem TCP) enables receiving data directly into device +memory (dmabuf). The feature is currently implemented for TCP sockets. + + +Opportunity +----------- + +A large number of data transfers have device memory as the source and/or +destination. Accelerators drastically increased the prevalence of such +transfers. Some examples include: + +- Distributed training, where ML accelerators, such as GPUs on different hosts, + exchange data. + +- Distributed raw block storage applications transfer large amounts of data with + remote SSDs. Much of this data does not require host processing. + +Typically the Device-to-Device data transfers in the network are implemented as +the following low-level operations: Device-to-Host copy, Host-to-Host network +transfer, and Host-to-Device copy. + +The flow involving host copies is suboptimal, especially for bulk data transfers, +and can put significant strains on system resources such as host memory +bandwidth and PCIe bandwidth. + +Devmem TCP optimizes this use case by implementing socket APIs that enable +the user to receive incoming network packets directly into device memory. + +Packet payloads go directly from the NIC to device memory. + +Packet headers go to host memory and are processed by the TCP/IP stack +normally. The NIC must support header split to achieve this. + +Advantages: + +- Alleviate host memory bandwidth pressure, compared to existing + network-transfer + device-copy semantics. + +- Alleviate PCIe bandwidth pressure, by limiting data transfer to the lowest + level of the PCIe tree, compared to the traditional path which sends data + through the root complex. + + +More Info +--------- + + slides, video + https://netdevconf.org/0x17/sessions/talk/device-memory-tcp.html + + patchset + [PATCH net-next v24 00/13] Device Memory TCP + https://lore.kernel.org/netdev/20240831004313.3713467-1-almasrymina@google.com/ + + +Interface +========= + + +Example +------- + +tools/testing/selftests/net/ncdevmem.c:do_server shows an example of setting up +the RX path of this API. + + +NIC Setup +--------- + +Header split, flow steering, & RSS are required features for devmem TCP. + +Header split is used to split incoming packets into a header buffer in host +memory, and a payload buffer in device memory. + +Flow steering & RSS are used to ensure that only flows targeting devmem land on +an RX queue bound to devmem. + +Enable header split & flow steering:: + + # enable header split + ethtool -G eth1 tcp-data-split on + + + # enable flow steering + ethtool -K eth1 ntuple on + +Configure RSS to steer all traffic away from the target RX queue (queue 15 in +this example):: + + ethtool --set-rxfh-indir eth1 equal 15 + + +The user must bind a dmabuf to any number of RX queues on a given NIC using +the netlink API:: + + /* Bind dmabuf to NIC RX queue 15 */ + struct netdev_queue *queues; + queues = malloc(sizeof(*queues) * 1); + + queues[0]._present.type = 1; + queues[0]._present.idx = 1; + queues[0].type = NETDEV_RX_QUEUE_TYPE_RX; + queues[0].idx = 15; + + *ys = ynl_sock_create(&ynl_netdev_family, &yerr); + + req = netdev_bind_rx_req_alloc(); + netdev_bind_rx_req_set_ifindex(req, 1 /* ifindex */); + netdev_bind_rx_req_set_dmabuf_fd(req, dmabuf_fd); + __netdev_bind_rx_req_set_queues(req, queues, n_queue_index); + + rsp = netdev_bind_rx(*ys, req); + + dmabuf_id = rsp->dmabuf_id; + + +The netlink API returns a dmabuf_id: a unique ID that refers to this dmabuf +that has been bound. + +The user can unbind the dmabuf from the netdevice by closing the netlink socket +that established the binding. We do this so that the binding is automatically +unbound even if the userspace process crashes. + +Note that any reasonably well-behaved dmabuf from any exporter should work with +devmem TCP, even if the dmabuf is not actually backed by devmem. An example of +this is udmabuf, which wraps user memory (non-devmem) in a dmabuf. + + +Socket Setup +------------ + +The socket must be flow steered to the dmabuf bound RX queue:: + + ethtool -N eth1 flow-type tcp4 ... queue 15 + + +Receiving data +-------------- + +The user application must signal to the kernel that it is capable of receiving +devmem data by passing the MSG_SOCK_DEVMEM flag to recvmsg:: + + ret = recvmsg(fd, &msg, MSG_SOCK_DEVMEM); + +Applications that do not specify the MSG_SOCK_DEVMEM flag will receive an EFAULT +on devmem data. + +Devmem data is received directly into the dmabuf bound to the NIC in 'NIC +Setup', and the kernel signals such to the user via the SCM_DEVMEM_* cmsgs:: + + for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { + if (cm->cmsg_level != SOL_SOCKET || + (cm->cmsg_type != SCM_DEVMEM_DMABUF && + cm->cmsg_type != SCM_DEVMEM_LINEAR)) + continue; + + dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm); + + if (cm->cmsg_type == SCM_DEVMEM_DMABUF) { + /* Frag landed in dmabuf. + * + * dmabuf_cmsg->dmabuf_id is the dmabuf the + * frag landed on. + * + * dmabuf_cmsg->frag_offset is the offset into + * the dmabuf where the frag starts. + * + * dmabuf_cmsg->frag_size is the size of the + * frag. + * + * dmabuf_cmsg->frag_token is a token used to + * refer to this frag for later freeing. + */ + + struct dmabuf_token token; + token.token_start = dmabuf_cmsg->frag_token; + token.token_count = 1; + continue; + } + + if (cm->cmsg_type == SCM_DEVMEM_LINEAR) + /* Frag landed in linear buffer. + * + * dmabuf_cmsg->frag_size is the size of the + * frag. + */ + continue; + + } + +Applications may receive 2 cmsgs: + +- SCM_DEVMEM_DMABUF: this indicates the fragment landed in the dmabuf indicated + by dmabuf_id. + +- SCM_DEVMEM_LINEAR: this indicates the fragment landed in the linear buffer. + This typically happens when the NIC is unable to split the packet at the + header boundary, such that part (or all) of the payload landed in host + memory. + +Applications may receive no SO_DEVMEM_* cmsgs. That indicates non-devmem, +regular TCP data that landed on an RX queue not bound to a dmabuf. + + +Freeing frags +------------- + +Frags received via SCM_DEVMEM_DMABUF are pinned by the kernel while the user +processes the frag. The user must return the frag to the kernel via +SO_DEVMEM_DONTNEED:: + + ret = setsockopt(client_fd, SOL_SOCKET, SO_DEVMEM_DONTNEED, &token, + sizeof(token)); + +The user must ensure the tokens are returned to the kernel in a timely manner. +Failure to do so will exhaust the limited dmabuf that is bound to the RX queue +and will lead to packet drops. + + +Implementation & Caveats +======================== + +Unreadable skbs +--------------- + +Devmem payloads are inaccessible to the kernel processing the packets. This +results in a few quirks for payloads of devmem skbs: + +- Loopback is not functional. Loopback relies on copying the payload, which is + not possible with devmem skbs. + +- Software checksum calculation fails. + +- TCP Dump and bpf can't access devmem packet payloads. + + +Testing +======= + +More realistic example code can be found in the kernel source under +``tools/testing/selftests/net/ncdevmem.c`` + +ncdevmem is a devmem TCP netcat. It works very similarly to netcat, but +receives data directly into a udmabuf. + +To run ncdevmem, you need to run it on a server on the machine under test, and +you need to run netcat on a peer to provide the TX data. + +ncdevmem has a validation mode as well that expects a repeating pattern of +incoming data and validates it as such. For example, you can launch +ncdevmem on the server by:: + + ncdevmem -s -c -f eth1 -d 3 -n 0000:06:00.0 -l \ + -p 5201 -v 7 + +On client side, use regular netcat to send TX data to ncdevmem process +on the server:: + + yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \ + tr \\n \\0 | head -c 5G | nc 5201 -p 5201 diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index d5f246aceb9f48..295563e9108247 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -57,6 +57,7 @@ Structure of this header is ``ETHTOOL_A_HEADER_DEV_INDEX`` u32 device ifindex ``ETHTOOL_A_HEADER_DEV_NAME`` string device name ``ETHTOOL_A_HEADER_FLAGS`` u32 flags common for all requests + ``ETHTOOL_A_HEADER_PHY_INDEX`` u32 phy device index ============================== ====== ============================= ``ETHTOOL_A_HEADER_DEV_INDEX`` and ``ETHTOOL_A_HEADER_DEV_NAME`` identify the @@ -81,6 +82,12 @@ the behaviour is backward compatible, i.e. requests from old clients not aware of the flag should be interpreted the way the client expects. A client must not set flags it does not understand. +``ETHTOOL_A_HEADER_PHY_INDEX`` identifies the Ethernet PHY the message relates to. +As there are numerous commands that are related to PHY configuration, and because +there may be more than one PHY on the link, the PHY index can be passed in the +request for the commands that needs it. It is, however, not mandatory, and if it +is not passed for commands that target a PHY, the net_device.phydev pointer +is used. Bit sets ======== @@ -934,18 +941,18 @@ Request contents: ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by -driver. Driver may impose additional constraints and may not suspport all +driver. Driver may impose additional constraints and may not support all attributes. ``ETHTOOL_A_RINGS_CQE_SIZE`` specifies the completion queue event size. -Completion queue events(CQE) are the events posted by NIC to indicate the -completion status of a packet when the packet is sent(like send success or -error) or received(like pointers to packet fragments). The CQE size parameter +Completion queue events (CQE) are the events posted by NIC to indicate the +completion status of a packet when the packet is sent (like send success or +error) or received (like pointers to packet fragments). The CQE size parameter enables to modify the CQE size other than default size if NIC supports it. -A bigger CQE can have more receive buffer pointers inturn NIC can transfer -a bigger frame from wire. Based on the NIC hardware, the overall completion -queue size can be adjusted in the driver if CQE size is modified. +A bigger CQE can have more receive buffer pointers, and in turn the NIC can +transfer a bigger frame from wire. Based on the NIC hardware, the overall +completion queue size can be adjusted in the driver if CQE size is modified. CHANNELS_GET ============ @@ -989,7 +996,7 @@ Request contents: ===================================== ====== ========================== Kernel checks that requested channel counts do not exceed limits reported by -driver. Driver may impose additional constraints and may not suspport all +driver. Driver may impose additional constraints and may not support all attributes. @@ -1307,12 +1314,17 @@ information. +-+-+-----------------------------------------+--------+---------------------+ | | | ``ETHTOOL_A_CABLE_RESULTS_CODE`` | u8 | result code | +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_CABLE_RESULT_SRC`` | u32 | information source | + +-+-+-----------------------------------------+--------+---------------------+ | | ``ETHTOOL_A_CABLE_NEST_FAULT_LENGTH`` | nested | cable length | +-+-+-----------------------------------------+--------+---------------------+ | | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR`` | u8 | pair number | +-+-+-----------------------------------------+--------+---------------------+ | | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_CM`` | u32 | length in cm | +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_CABLE_FAULT_LENGTH_SRC`` | u32 | information source | + +-+-+-----------------------------------------+--------+---------------------+ + CABLE_TEST TDR ============== @@ -1756,7 +1768,7 @@ Kernel response contents: When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies the operational state of the PoDL PSE functions. The operational state of the PSE function can be changed using the ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` -action. This option is corresponding to ``IEEE 802.3-2018`` 30.15.1.1.2 +action. This attribute corresponds to ``IEEE 802.3-2018`` 30.15.1.1.2 aPoDLPSEAdminState. Possible values are: .. kernel-doc:: include/uapi/linux/ethtool.h @@ -1770,8 +1782,8 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` implementing When set, the optional ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` attribute identifies the power detection status of the PoDL PSE. The status depend on internal PSE -state machine and automatic PD classification support. This option is -corresponding to ``IEEE 802.3-2018`` 30.15.1.1.3 aPoDLPSEPowerDetectionStatus. +state machine and automatic PD classification support. This attribute +corresponds to ``IEEE 802.3-2018`` 30.15.1.1.3 aPoDLPSEPowerDetectionStatus. Possible values are: .. kernel-doc:: include/uapi/linux/ethtool.h @@ -1785,12 +1797,13 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_PW_D_STATUS`` implementing When set, the optional ``ETHTOOL_A_C33_PSE_PW_CLASS`` attribute identifies the power class of the C33 PSE. It depends on the class negotiated between -the PSE and the PD. This option is corresponding to ``IEEE 802.3-2022`` +the PSE and the PD. This attribute corresponds to ``IEEE 802.3-2022`` 30.9.1.1.8 aPSEPowerClassification. When set, the optional ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` attribute identifies -This option is corresponding to ``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower. -Actual power is reported in mW. +the actual power drawn by the C33 PSE. This attribute corresponds to +``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower. Actual power is reported +in mW. When set, the optional ``ETHTOOL_A_C33_PSE_EXT_STATE`` attribute identifies the extended error state of the C33 PSE. Possible values are: @@ -1839,7 +1852,7 @@ Request contents: ====================================== ====== ============================= When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used -to control PoDL PSE Admin functions. This option is implementing +to control PoDL PSE Admin functions. This option implements ``IEEE 802.3-2018`` 30.15.1.2.1 acPoDLPSEAdminControl. See ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` for supported values. @@ -1866,10 +1879,18 @@ RSS context of an interface similar to ``ETHTOOL_GRSSH`` ioctl request. Request contents: -===================================== ====== ========================== +===================================== ====== ============================ ``ETHTOOL_A_RSS_HEADER`` nested request header ``ETHTOOL_A_RSS_CONTEXT`` u32 context number -===================================== ====== ========================== + ``ETHTOOL_A_RSS_START_CONTEXT`` u32 start context number (dumps) +===================================== ====== ============================ + +``ETHTOOL_A_RSS_CONTEXT`` specifies which RSS context number to query, +if not set context 0 (the main context) is queried. Dumps can be filtered +by device (only listing contexts of a given netdev). Filtering single +context number is not supported but ``ETHTOOL_A_RSS_START_CONTEXT`` +can be used to start dumping context from the given number (primarily +used to ignore context 0s and only dump additional contexts). Kernel response contents: @@ -1927,7 +1948,7 @@ When set, the optional ``ETHTOOL_A_PLCA_VERSION`` attribute indicates which standard and version the PLCA management interface complies to. When not set, the interface is vendor-specific and (possibly) supplied by the driver. The OPEN Alliance SIG specifies a standard register map for 10BASE-T1S PHYs -embedding the PLCA Reconcialiation Sublayer. See "10BASE-T1S PLCA Management +embedding the PLCA Reconciliation Sublayer. See "10BASE-T1S PLCA Management Registers" at https://www.opensig.org/about/specifications/. When set, the optional ``ETHTOOL_A_PLCA_ENABLED`` attribute indicates the @@ -1989,7 +2010,7 @@ Request contents: ``ETHTOOL_A_PLCA_ENABLED`` u8 PLCA Admin State ``ETHTOOL_A_PLCA_NODE_ID`` u8 PLCA unique local node ID ``ETHTOOL_A_PLCA_NODE_CNT`` u8 Number of PLCA nodes on the - netkork, including the + network, including the coordinator ``ETHTOOL_A_PLCA_TO_TMR`` u8 Transmit Opportunity Timer value in bit-times (BT) @@ -2176,6 +2197,49 @@ string. The ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` and ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL`` attributes encode the completed and total amount of work, respectively. +PHY_GET +======= + +Retrieve information about a given Ethernet PHY sitting on the link. The DO +operation returns all available information about dev->phydev. User can also +specify a PHY_INDEX, in which case the DO request returns information about that +specific PHY. + +As there can be more than one PHY, the DUMP operation can be used to list the PHYs +present on a given interface, by passing an interface index or name in +the dump request. + +For more information, refer to :ref:`phy_link_topology` + +Request contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHY_HEADER`` nested request header + ==================================== ====== ========================== + +Kernel response contents: + + ===================================== ====== =============================== + ``ETHTOOL_A_PHY_HEADER`` nested request header + ``ETHTOOL_A_PHY_INDEX`` u32 the phy's unique index, that can + be used for phy-specific + requests + ``ETHTOOL_A_PHY_DRVNAME`` string the phy driver name + ``ETHTOOL_A_PHY_NAME`` string the phy device name + ``ETHTOOL_A_PHY_UPSTREAM_TYPE`` u32 the type of device this phy is + connected to + ``ETHTOOL_A_PHY_UPSTREAM_INDEX`` u32 the PHY index of the upstream + PHY + ``ETHTOOL_A_PHY_UPSTREAM_SFP_NAME`` string if this PHY is connected to + its parent PHY through an SFP + bus, the name of this sfp bus + ``ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME`` string if the phy controls an sfp bus, + the name of the sfp bus + ===================================== ====== =============================== + +When ``ETHTOOL_A_PHY_UPSTREAM_TYPE`` is PHY_UPSTREAM_PHY, the PHY's parent is +another PHY. + Request translation =================== @@ -2283,4 +2347,5 @@ are netlink only. n/a ``ETHTOOL_MSG_MM_GET`` n/a ``ETHTOOL_MSG_MM_SET`` n/a ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT`` + n/a ``ETHTOOL_MSG_PHY_GET`` =================================== ===================================== diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index d1af04b952f813..803dfc1efb7518 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -49,6 +49,7 @@ Contents: cdc_mbim dccp dctcp + devmem dns_resolver driver eql @@ -87,10 +88,12 @@ Contents: nexthop-group-resilient nf_conntrack-sysctl nf_flowtable + oa-tc6-framework openvswitch operstates packet_mmap phonet + phy-link-topology pktgen plip ppp_generic diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 3616389c8c2df2..eacf8983e23074 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -2362,6 +2362,20 @@ ra_honor_pio_life - BOOLEAN Default: 0 (disabled) +ra_honor_pio_pflag - BOOLEAN + The Prefix Information Option P-flag indicates the network can + allocate a unique IPv6 prefix per client using DHCPv6-PD. + This sysctl can be enabled when a userspace DHCPv6-PD client + is running to cause the P-flag to take effect: i.e. the + P-flag suppresses any effects of the A-flag within the same + PIO. For a given PIO, P=1 and A=1 is treated as A=0. + + - If disabled, the P-flag is ignored. + - If enabled, the P-flag will disable SLAAC autoconfiguration + for the given Prefix Information Option. + + Default: 0 (disabled) + accept_ra_rt_info_min_plen - INTEGER Minimum prefix length of Route Information in RA. diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst index 8496b467dea492..e8cf8b3e60ac96 100644 --- a/Documentation/networking/l2tp.rst +++ b/Documentation/networking/l2tp.rst @@ -638,9 +638,8 @@ Tunnels are identified by a unique tunnel id. The id is 16-bit for L2TPv2 and 32-bit for L2TPv3. Internally, the id is stored as a 32-bit value. -Tunnels are kept in a per-net list, indexed by tunnel id. The tunnel -id namespace is shared by L2TPv2 and L2TPv3. The tunnel context can be -derived from the socket's sk_user_data. +Tunnels are kept in a per-net list, indexed by tunnel id. The +tunnel id namespace is shared by L2TPv2 and L2TPv3. Handling tunnel socket close is perhaps the most tricky part of the L2TP implementation. If userspace closes a tunnel socket, the L2TP @@ -652,9 +651,7 @@ socket's encap_destroy handler is invoked, which L2TP uses to initiate its tunnel close actions. For L2TPIP sockets, the socket's close handler initiates the same tunnel close actions. All sessions are first closed. Each session drops its tunnel ref. When the tunnel ref -reaches zero, the tunnel puts its socket ref. When the socket is -eventually destroyed, its sk_destruct finally frees the L2TP tunnel -context. +reaches zero, the tunnel drops its socket ref. Sessions -------- @@ -667,10 +664,7 @@ pseudowire) or other data types such as PPP, ATM, HDLC or Frame Relay. Linux currently implements only Ethernet and PPP session types. Some L2TP session types also have a socket (PPP pseudowires) while -others do not (Ethernet pseudowires). We can't therefore use the -socket reference count as the reference count for session -contexts. The L2TP implementation therefore has its own internal -reference counts on the session contexts. +others do not (Ethernet pseudowires). Like tunnels, L2TP sessions are identified by a unique session id. Just as with tunnel ids, the session id is 16-bit for @@ -680,21 +674,19 @@ value. Sessions hold a ref on their parent tunnel to ensure that the tunnel stays extant while one or more sessions references it. -Sessions are kept in a per-tunnel list, indexed by session id. L2TPv3 -sessions are also kept in a per-net list indexed by session id, -because L2TPv3 session ids are unique across all tunnels and L2TPv3 -data packets do not contain a tunnel id in the header. This list is -therefore needed to find the session context associated with a -received data packet when the tunnel context cannot be derived from -the tunnel socket. +Sessions are kept in a per-net list. L2TPv2 sessions and L2TPv3 +sessions are stored in separate lists. L2TPv2 sessions are keyed +by a 32-bit key made up of the 16-bit tunnel ID and 16-bit +session ID. L2TPv3 sessions are keyed by the 32-bit session ID, since +L2TPv3 session ids are unique across all tunnels. Although the L2TPv3 RFC specifies that L2TPv3 session ids are not -scoped by the tunnel, the kernel does not police this for L2TPv3 UDP -tunnels and does not add sessions of L2TPv3 UDP tunnels into the -per-net session list. In the UDP receive code, we must trust that the -tunnel can be identified using the tunnel socket's sk_user_data and -lookup the session in the tunnel's session list instead of the per-net -session list. +scoped by the tunnel, the Linux implementation has historically +allowed this. Such session id collisions are supported using a per-net +hash table keyed by sk and session ID. When looking up L2TPv3 +sessions, the list entry may link to multiple sessions with that +session ID, in which case the session matching the given sk (tunnel) +is used. PPP --- @@ -714,10 +706,9 @@ The L2TP PPP implementation handles the closing of a PPPoL2TP socket by closing its corresponding L2TP session. This is complicated because it must consider racing with netlink session create/destroy requests and pppol2tp_connect trying to reconnect with a session that is in the -process of being closed. Unlike tunnels, PPP sessions do not hold a -ref on their associated socket, so code must be careful to sock_hold -the socket where necessary. For all the details, see commit -3d609342cc04129ff7568e19316ce3d7451a27e8. +process of being closed. PPP sessions hold a ref on their associated +socket in order that the socket remains extants while the session +references it. Ethernet -------- @@ -761,15 +752,10 @@ Limitations The current implementation has a number of limitations: - 1) Multiple UDP sockets with the same 5-tuple address cannot be - used. The kernel's tunnel context is identified using private - data associated with the socket so it is important that each - socket is uniquely identified by its address. - - 2) Interfacing with openvswitch is not yet implemented. It may be + 1) Interfacing with openvswitch is not yet implemented. It may be useful to map OVS Ethernet and VLAN ports into L2TPv3 tunnels. - 3) VLAN pseudowires are implemented using an ``l2tpethN`` interface + 2) VLAN pseudowires are implemented using an ``l2tpethN`` interface configured with a VLAN sub-interface. Since L2TPv3 VLAN pseudowires carry one and only one VLAN, it may be better to use a single netdevice rather than an ``l2tpethN`` and ``l2tpethN``:M diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst index fd514bba8c431c..95598c21fc8e87 100644 --- a/Documentation/networking/mptcp-sysctl.rst +++ b/Documentation/networking/mptcp-sysctl.rst @@ -34,6 +34,17 @@ available_schedulers - STRING Shows the available schedulers choices that are registered. More packet schedulers may be available, but not loaded. +blackhole_timeout - INTEGER (seconds) + Initial time period in second to disable MPTCP on active MPTCP sockets + when a MPTCP firewall blackhole issue happens. This time period will + grow exponentially when more blackhole issues get detected right after + MPTCP is re-enabled and will reset to the initial value when the + blackhole issue goes away. + + 0 to disable the blackhole detection. + + Default: 3600 + checksum_enabled - BOOLEAN Control whether DSS checksum can be enabled. diff --git a/Documentation/networking/multi-pf-netdev.rst b/Documentation/networking/multi-pf-netdev.rst index 268819225866cb..2cd25d81aaa7a0 100644 --- a/Documentation/networking/multi-pf-netdev.rst +++ b/Documentation/networking/multi-pf-netdev.rst @@ -111,11 +111,11 @@ The relation between PF, irq, napi, and queue can be observed via netlink spec:: Here you can clearly observe our channels distribution policy:: $ ls /proc/irq/{36,39,40,41,42}/mlx5* -d -1 - /proc/irq/36/mlx5_comp1@pci:0000:08:00.0 - /proc/irq/39/mlx5_comp1@pci:0000:09:00.0 - /proc/irq/40/mlx5_comp2@pci:0000:08:00.0 - /proc/irq/41/mlx5_comp2@pci:0000:09:00.0 - /proc/irq/42/mlx5_comp3@pci:0000:08:00.0 + /proc/irq/36/mlx5_comp0@pci:0000:08:00.0 + /proc/irq/39/mlx5_comp0@pci:0000:09:00.0 + /proc/irq/40/mlx5_comp1@pci:0000:08:00.0 + /proc/irq/41/mlx5_comp1@pci:0000:09:00.0 + /proc/irq/42/mlx5_comp2@pci:0000:08:00.0 Steering ======== diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst index 7bf7b95c4f7af3..dfa5d549be9c03 100644 --- a/Documentation/networking/napi.rst +++ b/Documentation/networking/napi.rst @@ -144,9 +144,8 @@ IRQ should only be unmasked after a successful call to napi_complete_done(): napi_schedule_irqoff() is a variant of napi_schedule() which takes advantage of guarantees given by being invoked in IRQ context (no need to -mask interrupts). Note that PREEMPT_RT forces all interrupts -to be threaded so the interrupt may need to be marked ``IRQF_NO_THREAD`` -to avoid issues on real-time kernel configurations. +mask interrupts). napi_schedule_irqoff() will fall back to napi_schedule() if +IRQs are threaded (such as if ``PREEMPT_RT`` is enabled). Instance to queue mapping ------------------------- diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst index 70c4fb9d4e5ce0..22b07c814f4a45 100644 --- a/Documentation/networking/net_cachelines/net_device.rst +++ b/Documentation/networking/net_cachelines/net_device.rst @@ -7,6 +7,8 @@ net_device struct fast path usage breakdown Type Name fastpath_tx_access fastpath_rx_access Comments ..struct ..net_device +unsigned_long:32 priv_flags read_mostly - __dev_queue_xmit(tx) +unsigned_long:1 lltx read_mostly - HARD_TX_LOCK,HARD_TX_TRYLOCK,HARD_TX_UNLOCK(tx) char name[16] - - struct_netdev_name_node* name_node struct_dev_ifalias* ifalias @@ -23,7 +25,6 @@ struct_list_head ptype_specific struct adj_list unsigned_int flags read_mostly read_mostly __dev_queue_xmit,__dev_xmit_skb,ip6_output,__ip6_finish_output(tx);ip6_rcv_core(rx) xdp_features_t xdp_features -unsigned_long_long priv_flags read_mostly - __dev_queue_xmit(tx) struct_net_device_ops* netdev_ops read_mostly - netdev_core_pick_tx,netdev_start_xmit(tx) struct_xdp_metadata_ops* xdp_metadata_ops int ifindex - read_mostly ip6_rcv_core @@ -98,7 +99,7 @@ unsigned_int num_rx_queues unsigned_int real_num_rx_queues - read_mostly get_rps_cpu struct_bpf_prog* xdp_prog - read_mostly netif_elide_gro() unsigned_long gro_flush_timeout - read_mostly napi_complete_done -int napi_defer_hard_irqs - read_mostly napi_complete_done +u32 napi_defer_hard_irqs - read_mostly napi_complete_done unsigned_int gro_max_size - read_mostly skb_gro_receive unsigned_int gro_ipv4_max_size - read_mostly skb_gro_receive rx_handler_func_t* rx_handler read_mostly - __netif_receive_skb_core @@ -163,6 +164,10 @@ struct_lock_class_key* qdisc_tx_busylock bool proto_down unsigned:1 wol_enabled unsigned:1 threaded - - napi_poll(napi_enable,dev_set_threaded) +unsigned_long:1 see_all_hwtstamp_requests +unsigned_long:1 change_proto_down +unsigned_long:1 netns_local +unsigned_long:1 fcoe_mtu struct_list_head net_notifier_list struct_macsec_ops* macsec_ops struct_udp_tunnel_nic_info* udp_tunnel_nic_info @@ -176,3 +181,5 @@ netdevice_tracker dev_registered_tracker struct_rtnl_hw_stats64* offload_xstats_l3 struct_devlink_port* devlink_port struct_dpll_pin* dpll_pin +struct hlist_head page_pools +struct dim_irq_moder* irq_moder diff --git a/Documentation/networking/netdev-features.rst b/Documentation/networking/netdev-features.rst index d7b15bb64deb63..5014f7cc1398ba 100644 --- a/Documentation/networking/netdev-features.rst +++ b/Documentation/networking/netdev-features.rst @@ -139,21 +139,6 @@ chained skbs (skb->next/prev list). Features contained in NETIF_F_SOFT_FEATURES are features of networking stack. Driver should not change behaviour based on them. - * LLTX driver (deprecated for hardware drivers) - -NETIF_F_LLTX is meant to be used by drivers that don't need locking at all, -e.g. software tunnels. - -This is also used in a few legacy drivers that implement their -own locking, don't use it for new (hardware) drivers. - - * netns-local device - -NETIF_F_NETNS_LOCAL is set for devices that are not allowed to move between -network namespaces (e.g. loopback). - -Don't use it in drivers. - * VLAN challenged NETIF_F_VLAN_CHALLENGED should be set for devices which can't cope with VLAN diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst index c2476917a6c37d..857c9784f87efc 100644 --- a/Documentation/networking/netdevices.rst +++ b/Documentation/networking/netdevices.rst @@ -258,11 +258,11 @@ ndo_get_stats: ndo_start_xmit: Synchronization: __netif_tx_lock spinlock. - When the driver sets NETIF_F_LLTX in dev->features this will be + When the driver sets dev->lltx this will be called without holding netif_tx_lock. In this case the driver has to lock by itself when needed. The locking there should also properly protect against - set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated. + set_rx_mode. WARNING: use of dev->lltx is deprecated. Don't use it for new drivers. Context: Process with BHs disabled or BH (timer), diff --git a/Documentation/networking/oa-tc6-framework.rst b/Documentation/networking/oa-tc6-framework.rst new file mode 100644 index 00000000000000..fe2aabde923a57 --- /dev/null +++ b/Documentation/networking/oa-tc6-framework.rst @@ -0,0 +1,497 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +========================================================================= +OPEN Alliance 10BASE-T1x MAC-PHY Serial Interface (TC6) Framework Support +========================================================================= + +Introduction +------------ + +The IEEE 802.3cg project defines two 10 Mbit/s PHYs operating over a +single pair of conductors. The 10BASE-T1L (Clause 146) is a long reach +PHY supporting full duplex point-to-point operation over 1 km of single +balanced pair of conductors. The 10BASE-T1S (Clause 147) is a short reach +PHY supporting full / half duplex point-to-point operation over 15 m of +single balanced pair of conductors, or half duplex multidrop bus +operation over 25 m of single balanced pair of conductors. + +Furthermore, the IEEE 802.3cg project defines the new Physical Layer +Collision Avoidance (PLCA) Reconciliation Sublayer (Clause 148) meant to +provide improved determinism to the CSMA/CD media access method. PLCA +works in conjunction with the 10BASE-T1S PHY operating in multidrop mode. + +The aforementioned PHYs are intended to cover the low-speed / low-cost +applications in industrial and automotive environment. The large number +of pins (16) required by the MII interface, which is specified by the +IEEE 802.3 in Clause 22, is one of the major cost factors that need to be +addressed to fulfil this objective. + +The MAC-PHY solution integrates an IEEE Clause 4 MAC and a 10BASE-T1x PHY +exposing a low pin count Serial Peripheral Interface (SPI) to the host +microcontroller. This also enables the addition of Ethernet functionality +to existing low-end microcontrollers which do not integrate a MAC +controller. + +Overview +-------- + +The MAC-PHY is specified to carry both data (Ethernet frames) and control +(register access) transactions over a single full-duplex serial peripheral +interface. + +Protocol Overview +----------------- + +Two types of transactions are defined in the protocol: data transactions +for Ethernet frame transfers and control transactions for register +read/write transfers. A chunk is the basic element of data transactions +and is composed of 4 bytes of overhead plus 64 bytes of payload size for +each chunk. Ethernet frames are transferred over one or more data chunks. +Control transactions consist of one or more register read/write control +commands. + +SPI transactions are initiated by the SPI host with the assertion of CSn +low to the MAC-PHY and ends with the deassertion of CSn high. In between +each SPI transaction, the SPI host may need time for additional +processing and to setup the next SPI data or control transaction. + +SPI data transactions consist of an equal number of transmit (TX) and +receive (RX) chunks. Chunks in both transmit and receive directions may +or may not contain valid frame data independent from each other, allowing +for the simultaneous transmission and reception of different length +frames. + +Each transmit data chunk begins with a 32-bit data header followed by a +data chunk payload on MOSI. The data header indicates whether transmit +frame data is present and provides the information to determine which +bytes of the payload contain valid frame data. + +In parallel, receive data chunks are received on MISO. Each receive data +chunk consists of a data chunk payload ending with a 32-bit data footer. +The data footer indicates if there is receive frame data present within +the payload or not and provides the information to determine which bytes +of the payload contain valid frame data. + +Reference +--------- + +10BASE-T1x MAC-PHY Serial Interface Specification, + +Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf + +Hardware Architecture +--------------------- + +.. code-block:: none + + +----------+ +-------------------------------------+ + | | | MAC-PHY | + | |<---->| +-----------+ +-------+ +-------+ | + | SPI Host | | | SPI Slave | | MAC | | PHY | | + | | | +-----------+ +-------+ +-------+ | + +----------+ +-------------------------------------+ + +Software Architecture +--------------------- + +.. code-block:: none + + +----------------------------------------------------------+ + | Networking Subsystem | + +----------------------------------------------------------+ + / \ / \ + | | + | | + \ / | + +----------------------+ +-----------------------------+ + | MAC Driver |<--->| OPEN Alliance TC6 Framework | + +----------------------+ +-----------------------------+ + / \ / \ + | | + | | + | \ / + +----------------------------------------------------------+ + | SPI Subsystem | + +----------------------------------------------------------+ + / \ + | + | + \ / + +----------------------------------------------------------+ + | 10BASE-T1x MAC-PHY Device | + +----------------------------------------------------------+ + +Implementation +-------------- + +MAC Driver +~~~~~~~~~~ + +- Probed by SPI subsystem. + +- Initializes OA TC6 framework for the MAC-PHY. + +- Registers and configures the network device. + +- Sends the tx ethernet frames from n/w subsystem to OA TC6 framework. + +OPEN Alliance TC6 Framework +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Initializes PHYLIB interface. + +- Registers mac-phy interrupt. + +- Performs mac-phy register read/write operation using the control + transaction protocol specified in the OPEN Alliance 10BASE-T1x MAC-PHY + Serial Interface specification. + +- Performs Ethernet frames transaction using the data transaction protocol + for Ethernet frames specified in the OPEN Alliance 10BASE-T1x MAC-PHY + Serial Interface specification. + +- Forwards the received Ethernet frame from 10Base-T1x MAC-PHY to n/w + subsystem. + +Data Transaction +~~~~~~~~~~~~~~~~ + +The Ethernet frames that are typically transferred from the SPI host to +the MAC-PHY will be converted into multiple transmit data chunks. Each +transmit data chunk will have a 4 bytes header which contains the +information needed to determine the validity and the location of the +transmit frame data within the 64 bytes data chunk payload. + +.. code-block:: none + + +---------------------------------------------------+ + | Tx Chunk | + | +---------------------------+ +----------------+ | MOSI + | | 64 bytes chunk payload | | 4 bytes header | |------------> + | +---------------------------+ +----------------+ | + +---------------------------------------------------+ + +4 bytes header contains the below fields, + +DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI + transaction. For TX data chunks, this bit shall be ’1’. + 0 - Control command + 1 - Data chunk + +SEQ (Bit 30) - Data Chunk Sequence. This bit is used to indicate an + even/odd transmit data chunk sequence to the MAC-PHY. + +NORX (Bit 29) - No Receive flag. The SPI host may set this bit to prevent + the MAC-PHY from conveying RX data on the MISO for the + current chunk (DV = 0 in the footer), indicating that the + host would not process it. Typically, the SPI host should + set NORX = 0 indicating that it will accept and process + any receive frame data within the current chunk. + +RSVD (Bit 28..24) - Reserved: All reserved bits shall be ‘0’. + +VS (Bit 23..22) - Vendor Specific. These bits are implementation specific. + If the MAC-PHY does not implement these bits, the host + shall set them to ‘0’. + +DV (Bit 21) - Data Valid flag. The SPI host uses this bit to indicate + whether the current chunk contains valid transmit frame data + (DV = 1) or not (DV = 0). When ‘0’, the MAC-PHY ignores the + chunk payload. Note that the receive path is unaffected by + the setting of the DV bit in the data header. + +SV (Bit 20) - Start Valid flag. The SPI host shall set this bit when the + beginning of an Ethernet frame is present in the current + transmit data chunk payload. Otherwise, this bit shall be + zero. This bit is not to be confused with the Start-of-Frame + Delimiter (SFD) byte described in IEEE 802.3 [2]. + +SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field shall + contain the 32-bit word offset into the transmit data + chunk payload that points to the start of a new + Ethernet frame to be transmitted. The host shall write + this field as zero when SV = 0. + +RSVD (Bit 15) - Reserved: All reserved bits shall be ‘0’. + +EV (Bit 14) - End Valid flag. The SPI host shall set this bit when the end + of an Ethernet frame is present in the current transmit data + chunk payload. Otherwise, this bit shall be zero. + +EBO (Bit 13..8) - End Byte Offset. When EV = 1, this field shall contain + the byte offset into the transmit data chunk payload + that points to the last byte of the Ethernet frame to + transmit. This field shall be zero when EV = 0. + +TSC (Bit 7..6) - Timestamp Capture. Request a timestamp capture when the + frame is transmitted onto the network. + 00 - Do not capture a timestamp + 01 - Capture timestamp into timestamp capture register A + 10 - Capture timestamp into timestamp capture register B + 11 - Capture timestamp into timestamp capture register C + +RSVD (Bit 5..1) - Reserved: All reserved bits shall be ‘0’. + +P (Bit 0) - Parity. Parity bit calculated over the transmit data header. + Method used is odd parity. + +The number of buffers available in the MAC-PHY to store the incoming +transmit data chunk payloads is represented as transmit credits. The +available transmit credits in the MAC-PHY can be read either from the +Buffer Status Register or footer (Refer below for the footer info) +received from the MAC-PHY. The SPI host should not write more data chunks +than the available transmit credits as this will lead to transmit buffer +overflow error. + +In case the previous data footer had no transmit credits available and +once the transmit credits become available for transmitting transmit data +chunks, the MAC-PHY interrupt is asserted to SPI host. On reception of the +first data header this interrupt will be deasserted and the received +footer for the first data chunk will have the transmit credits available +information. + +The Ethernet frames that are typically transferred from MAC-PHY to SPI +host will be sent as multiple receive data chunks. Each receive data +chunk will have 64 bytes of data chunk payload followed by 4 bytes footer +which contains the information needed to determine the validity and the +location of the receive frame data within the 64 bytes data chunk payload. + +.. code-block:: none + + +---------------------------------------------------+ + | Rx Chunk | + | +----------------+ +---------------------------+ | MISO + | | 4 bytes footer | | 64 bytes chunk payload | |------------> + | +----------------+ +---------------------------+ | + +---------------------------------------------------+ + +4 bytes footer contains the below fields, + +EXST (Bit 31) - Extended Status. This bit is set when any bit in the + STATUS0 or STATUS1 registers are set and not masked. + +HDRB (Bit 30) - Received Header Bad. When set, indicates that the MAC-PHY + received a control or data header with a parity error. + +SYNC (Bit 29) - Configuration Synchronized flag. This bit reflects the + state of the SYNC bit in the CONFIG0 configuration + register (see Table 12). A zero indicates that the MAC-PHY + configuration may not be as expected by the SPI host. + Following configuration, the SPI host sets the + corresponding bitin the configuration register which is + reflected in this field. + +RCA (Bit 28..24) - Receive Chunks Available. The RCA field indicates to + the SPI host the minimum number of additional receive + data chunks of frame data that are available for + reading beyond the current receive data chunk. This + field is zero when there is no receive frame data + pending in the MAC-PHY’s buffer for reading. + +VS (Bit 23..22) - Vendor Specific. These bits are implementation specific. + If not implemented, the MAC-PHY shall set these bits to + ‘0’. + +DV (Bit 21) - Data Valid flag. The MAC-PHY uses this bit to indicate + whether the current receive data chunk contains valid + receive frame data (DV = 1) or not (DV = 0). When ‘0’, the + SPI host shall ignore the chunk payload. + +SV (Bit 20) - Start Valid flag. The MAC-PHY sets this bit when the current + chunk payload contains the start of an Ethernet frame. + Otherwise, this bit is zero. The SV bit is not to be + confused with the Start-of-Frame Delimiter (SFD) byte + described in IEEE 802.3 [2]. + +SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field contains the + 32-bit word offset into the receive data chunk payload + containing the first byte of a new received Ethernet + frame. When a receive timestamp has been added to the + beginning of the received Ethernet frame (RTSA = 1) + then SWO points to the most significant byte of the + timestamp. This field will be zero when SV = 0. + +FD (Bit 15) - Frame Drop. When set, this bit indicates that the MAC has + detected a condition for which the SPI host should drop the + received Ethernet frame. This bit is only valid at the end + of a received Ethernet frame (EV = 1) and shall be zero at + all other times. + +EV (Bit 14) - End Valid flag. The MAC-PHY sets this bit when the end of a + received Ethernet frame is present in this receive data + chunk payload. + +EBO (Bit 13..8) - End Byte Offset: When EV = 1, this field contains the + byte offset into the receive data chunk payload that + locates the last byte of the received Ethernet frame. + This field is zero when EV = 0. + +RTSA (Bit 7) - Receive Timestamp Added. This bit is set when a 32-bit or + 64-bit timestamp has been added to the beginning of the + received Ethernet frame. The MAC-PHY shall set this bit to + zero when SV = 0. + +RTSP (Bit 6) - Receive Timestamp Parity. Parity bit calculated over the + 32-bit/64-bit timestamp added to the beginning of the + received Ethernet frame. Method used is odd parity. The + MAC-PHY shall set this bit to zero when RTSA = 0. + +TXC (Bit 5..1) - Transmit Credits. This field contains the minimum number + of transmit data chunks of frame data that the SPI host + can write in a single transaction without incurring a + transmit buffer overflow error. + +P (Bit 0) - Parity. Parity bit calculated over the receive data footer. + Method used is odd parity. + +SPI host will initiate the data receive transaction based on the receive +chunks available in the MAC-PHY which is provided in the receive chunk +footer (RCA - Receive Chunks Available). SPI host will create data invalid +transmit data chunks (empty chunks) or data valid transmit data chunks in +case there are valid Ethernet frames to transmit to the MAC-PHY. The +receive chunks available in MAC-PHY can be read either from the Buffer +Status Register or footer. + +In case the previous data footer had no receive data chunks available and +once the receive data chunks become available again for reading, the +MAC-PHY interrupt is asserted to SPI host. On reception of the first data +header this interrupt will be deasserted and the received footer for the +first data chunk will have the receive chunks available information. + +MAC-PHY Interrupt +~~~~~~~~~~~~~~~~~ + +The MAC-PHY interrupt is asserted when the following conditions are met. + +Receive chunks available - This interrupt is asserted when the previous +data footer had no receive data chunks available and once the receive +data chunks become available for reading. On reception of the first data +header this interrupt will be deasserted. + +Transmit chunk credits available - This interrupt is asserted when the +previous data footer indicated no transmit credits available and once the +transmit credits become available for transmitting transmit data chunks. +On reception of the first data header this interrupt will be deasserted. + +Extended status event - This interrupt is asserted when the previous data +footer indicated no extended status and once the extended event become +available. In this case the host should read status #0 register to know +the corresponding error/event. On reception of the first data header this +interrupt will be deasserted. + +Control Transaction +~~~~~~~~~~~~~~~~~~~ + +4 bytes control header contains the below fields, + +DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI + transaction. For control commands, this bit shall be ‘0’. + 0 - Control command + 1 - Data chunk + +HDRB (Bit 30) - Received Header Bad. When set by the MAC-PHY, indicates + that a header was received with a parity error. The SPI + host should always clear this bit. The MAC-PHY ignores the + HDRB value sent by the SPI host on MOSI. + +WNR (Bit 29) - Write-Not-Read. This bit indicates if data is to be written + to registers (when set) or read from registers + (when clear). + +AID (Bit 28) - Address Increment Disable. When clear, the address will be + automatically post-incremented by one following each + register read or write. When set, address auto increment is + disabled allowing successive reads and writes to occur at + the same register address. + +MMS (Bit 27..24) - Memory Map Selector. This field selects the specific + register memory map to access. + +ADDR (Bit 23..8) - Address. Address of the first register within the + selected memory map to access. + +LEN (Bit 7..1) - Length. Specifies the number of registers to read/write. + This field is interpreted as the number of registers + minus 1 allowing for up to 128 consecutive registers read + or written starting at the address specified in ADDR. A + length of zero shall read or write a single register. + +P (Bit 0) - Parity. Parity bit calculated over the control command header. + Method used is odd parity. + +Control transactions consist of one or more control commands. Control +commands are used by the SPI host to read and write registers within the +MAC-PHY. Each control commands are composed of a 4 bytes control command +header followed by register write data in case of control write command. + +The MAC-PHY ignores the final 4 bytes of data from the SPI host at the end +of the control write command. The control write command is also echoed +from the MAC-PHY back to the SPI host to identify which register write +failed in case of any bus errors. The echoed Control write command will +have the first 4 bytes unused value to be ignored by the SPI host +followed by 4 bytes echoed control header followed by echoed register +write data. Control write commands can write either a single register or +multiple consecutive registers. When multiple consecutive registers are +written, the address is automatically post-incremented by the MAC-PHY. +Writing to any unimplemented or undefined registers shall be ignored and +yield no effect. + +The MAC-PHY ignores all data from the SPI host following the control +header for the remainder of the control read command. The control read +command is also echoed from the MAC-PHY back to the SPI host to identify +which register read is failed in case of any bus errors. The echoed +Control read command will have the first 4 bytes of unused value to be +ignored by the SPI host followed by 4 bytes echoed control header followed +by register read data. Control read commands can read either a single +register or multiple consecutive registers. When multiple consecutive +registers are read, the address is automatically post-incremented by the +MAC-PHY. Reading any unimplemented or undefined registers shall return +zero. + +Device drivers API +================== + +The include/linux/oa_tc6.h defines the following functions: + +.. c:function:: struct oa_tc6 *oa_tc6_init(struct spi_device *spi, \ + struct net_device *netdev) + +Initialize OA TC6 lib. + +.. c:function:: void oa_tc6_exit(struct oa_tc6 *tc6) + +Free allocated OA TC6 lib. + +.. c:function:: int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, \ + u32 value) + +Write a single register in the MAC-PHY. + +.. c:function:: int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, \ + u32 value[], u8 length) + +Writing multiple consecutive registers starting from @address in the MAC-PHY. +Maximum of 128 consecutive registers can be written starting at @address. + +.. c:function:: int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, \ + u32 *value) + +Read a single register in the MAC-PHY. + +.. c:function:: int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, \ + u32 value[], u8 length) + +Reading multiple consecutive registers starting from @address in the MAC-PHY. +Maximum of 128 consecutive registers can be read starting at @address. + +.. c:function:: netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, \ + struct sk_buff *skb); + +The transmit Ethernet frame in the skb is or going to be transmitted through +the MAC-PHY. + +.. c:function:: int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6); + +Zero align receive frame feature can be enabled to align all receive ethernet +frames data to start at the beginning of any receive data chunk payload with a +start word offset (SWO) of zero. diff --git a/Documentation/networking/phy-link-topology.rst b/Documentation/networking/phy-link-topology.rst new file mode 100644 index 00000000000000..4dec5d7d6513d9 --- /dev/null +++ b/Documentation/networking/phy-link-topology.rst @@ -0,0 +1,121 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. _phy_link_topology: + +================= +PHY link topology +================= + +Overview +======== + +The PHY link topology representation in the networking stack aims at representing +the hardware layout for any given Ethernet link. + +An Ethernet interface from userspace's point of view is nothing but a +:c:type:`struct net_device `, which exposes configuration options +through the legacy ioctls and the ethtool netlink commands. The base assumption +when designing these configuration APIs were that the link looks something like :: + + +-----------------------+ +----------+ +--------------+ + | Ethernet Controller / | | Ethernet | | Connector / | + | MAC | ------ | PHY | ---- | Port | ---... to LP + +-----------------------+ +----------+ +--------------+ + struct net_device struct phy_device + +Commands that needs to configure the PHY will go through the net_device.phydev +field to reach the PHY and perform the relevant configuration. + +This assumption falls apart in more complex topologies that can arise when, +for example, using SFP transceivers (although that's not the only specific case). + +Here, we have 2 basic scenarios. Either the MAC is able to output a serialized +interface, that can directly be fed to an SFP cage, such as SGMII, 1000BaseX, +10GBaseR, etc. + +The link topology then looks like this (when an SFP module is inserted) :: + + +-----+ SGMII +------------+ + | MAC | ------- | SFP Module | + +-----+ +------------+ + +Knowing that some modules embed a PHY, the actual link is more like :: + + +-----+ SGMII +--------------+ + | MAC | -------- | PHY (on SFP) | + +-----+ +--------------+ + +In this case, the SFP PHY is handled by phylib, and registered by phylink through +its SFP upstream ops. + +Now some Ethernet controllers aren't able to output a serialized interface, so +we can't directly connect them to an SFP cage. However, some PHYs can be used +as media-converters, to translate the non-serialized MAC MII interface to a +serialized MII interface fed to the SFP :: + + +-----+ RGMII +-----------------------+ SGMII +--------------+ + | MAC | ------- | PHY (media converter) | ------- | PHY (on SFP) | + +-----+ +-----------------------+ +--------------+ + +This is where the model of having a single net_device.phydev pointer shows its +limitations, as we now have 2 PHYs on the link. + +The phy_link topology framework aims at providing a way to keep track of every +PHY on the link, for use by both kernel drivers and subsystems, but also to +report the topology to userspace, allowing to target individual PHYs in configuration +commands. + +API +=== + +The :c:type:`struct phy_link_topology ` is a per-netdevice +resource, that gets initialized at netdevice creation. Once it's initialized, +it is then possible to register PHYs to the topology through : + +:c:func:`phy_link_topo_add_phy` + +Besides registering the PHY to the topology, this call will also assign a unique +index to the PHY, which can then be reported to userspace to refer to this PHY +(akin to the ifindex). This index is a u32, ranging from 1 to U32_MAX. The value +0 is reserved to indicate the PHY doesn't belong to any topology yet. + +The PHY can then be removed from the topology through + +:c:func:`phy_link_topo_del_phy` + +These function are already hooked into the phylib subsystem, so all PHYs that +are linked to a net_device through :c:func:`phy_attach_direct` will automatically +join the netdev's topology. + +PHYs that are on a SFP module will also be automatically registered IF the SFP +upstream is phylink (so, no media-converter). + +PHY drivers that can be used as SFP upstream need to call :c:func:`phy_sfp_attach_phy` +and :c:func:`phy_sfp_detach_phy`, which can be used as a +.attach_phy / .detach_phy implementation for the +:c:type:`struct sfp_upstream_ops `. + +UAPI +==== + +There exist a set of netlink commands to query the link topology from userspace, +see ``Documentation/networking/ethtool-netlink.rst``. + +The whole point of having a topology representation is to assign the phyindex +field in :c:type:`struct phy_device `. This index is reported to +userspace using the ``ETHTOOL_MSG_PHY_GET`` ethtnl command. Performing a DUMP operation +will result in all PHYs from all net_device being listed. The DUMP command +accepts either a ``ETHTOOL_A_HEADER_DEV_INDEX`` or ``ETHTOOL_A_HEADER_DEV_NAME`` +to be passed in the request to filter the DUMP to a single net_device. + +The retrieved index can then be passed as a request parameter using the +``ETHTOOL_A_HEADER_PHY_INDEX`` field in the following ethnl commands : + +* ``ETHTOOL_MSG_STRSET_GET`` to get the stats string set from a given PHY +* ``ETHTOOL_MSG_CABLE_TEST_ACT`` and ``ETHTOOL_MSG_CABLE_TEST_ACT``, to perform + cable testing on a given PHY on the link (most likely the outermost PHY) +* ``ETHTOOL_MSG_PSE_SET`` and ``ETHTOOL_MSG_PSE_GET`` for PHY-controlled PoE and PSE settings +* ``ETHTOOL_MSG_PLCA_GET_CFG``, ``ETHTOOL_MSG_PLCA_SET_CFG`` and ``ETHTOOL_MSG_PLCA_GET_STATUS`` + to set the PLCA (Physical Layer Collision Avoidance) parameters + +Note that the PHY index can be passed to other requests, which will silently +ignore it if present and irrelevant. diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst index 758f1dae3fce20..f355f0166f1b4f 100644 --- a/Documentation/networking/switchdev.rst +++ b/Documentation/networking/switchdev.rst @@ -137,10 +137,10 @@ would be sub-port 0 on port 1 on switch 1. Port Features ^^^^^^^^^^^^^ -NETIF_F_NETNS_LOCAL +dev->netns_local If the switchdev driver (and device) only supports offloading of the default -network namespace (netns), the driver should set this feature flag to prevent +network namespace (netns), the driver should set this private flag to prevent the port netdev from being moved out of the default netns. A netns-aware driver/device would not set this flag and be responsible for partitioning hardware to preserve netns containment. This means hardware cannot forward diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst index 5e93cd71f99f1b..8199e691767189 100644 --- a/Documentation/networking/timestamping.rst +++ b/Documentation/networking/timestamping.rst @@ -158,7 +158,8 @@ SOF_TIMESTAMPING_SYS_HARDWARE: SOF_TIMESTAMPING_RAW_HARDWARE: Report hardware timestamps as generated by - SOF_TIMESTAMPING_TX_HARDWARE when available. + SOF_TIMESTAMPING_TX_HARDWARE or SOF_TIMESTAMPING_RX_HARDWARE + when available. 1.3.3 Timestamp Options @@ -266,6 +267,23 @@ SOF_TIMESTAMPING_OPT_TX_SWHW: two separate messages will be looped to the socket's error queue, each containing just one timestamp. +SOF_TIMESTAMPING_OPT_RX_FILTER: + Filter out spurious receive timestamps: report a receive timestamp + only if the matching timestamp generation flag is enabled. + + Receive timestamps are generated early in the ingress path, before a + packet's destination socket is known. If any socket enables receive + timestamps, packets for all socket will receive timestamped packets. + Including those that request timestamp reporting with + SOF_TIMESTAMPING_SOFTWARE and/or SOF_TIMESTAMPING_RAW_HARDWARE, but + do not request receive timestamp generation. This can happen when + requesting transmit timestamps only. + + Receiving spurious timestamps is generally benign. A process can + ignore the unexpected non-zero value. But it makes behavior subtly + dependent on other sockets. This flag isolates the socket for more + deterministic behavior. + New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate regardless of the setting of sysctl net.core.tstamp_allow_data. diff --git a/Documentation/networking/tproxy.rst b/Documentation/networking/tproxy.rst index 00dc3a1a66b484..7f7c1ff6f159ed 100644 --- a/Documentation/networking/tproxy.rst +++ b/Documentation/networking/tproxy.rst @@ -17,7 +17,7 @@ The idea is that you identify packets with destination address matching a local socket on your box, set the packet mark to a certain value:: # iptables -t mangle -N DIVERT - # iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT + # iptables -t mangle -A PREROUTING -p tcp -m socket --transparent -j DIVERT # iptables -t mangle -A DIVERT -j MARK --set-mark 1 # iptables -t mangle -A DIVERT -j ACCEPT diff --git a/Documentation/nvme/feature-and-quirk-policy.rst b/Documentation/nvme/feature-and-quirk-policy.rst index c01d836d8e4151..e21966bf20a812 100644 --- a/Documentation/nvme/feature-and-quirk-policy.rst +++ b/Documentation/nvme/feature-and-quirk-policy.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -======================================= -Linux NVMe feature and and quirk policy -======================================= +=================================== +Linux NVMe feature and quirk policy +=================================== This file explains the policy used to decide what is supported by the Linux NVMe driver and what is not. diff --git a/Documentation/power/pci.rst b/Documentation/power/pci.rst index e2c1fb8a569a8c..9ebecb7b00b244 100644 --- a/Documentation/power/pci.rst +++ b/Documentation/power/pci.rst @@ -979,18 +979,17 @@ subsections can be defined as a separate function, it often is convenient to point two or more members of struct dev_pm_ops to the same routine. There are a few convenience macros that can be used for this purpose. -The SIMPLE_DEV_PM_OPS macro declares a struct dev_pm_ops object with one +The DEFINE_SIMPLE_DEV_PM_OPS() declares a struct dev_pm_ops object with one suspend routine pointed to by the .suspend(), .freeze(), and .poweroff() members and one resume routine pointed to by the .resume(), .thaw(), and .restore() members. The other function pointers in this struct dev_pm_ops are unset. -The UNIVERSAL_DEV_PM_OPS macro is similar to SIMPLE_DEV_PM_OPS, but it -additionally sets the .runtime_resume() pointer to the same value as -.resume() (and .thaw(), and .restore()) and the .runtime_suspend() pointer to -the same value as .suspend() (and .freeze() and .poweroff()). +The DEFINE_RUNTIME_DEV_PM_OPS() is similar to DEFINE_SIMPLE_DEV_PM_OPS(), but it +additionally sets the .runtime_resume() pointer to pm_runtime_force_resume() +and the .runtime_suspend() pointer to pm_runtime_force_suspend(). -The SET_SYSTEM_SLEEP_PM_OPS can be used inside of a declaration of struct +The SYSTEM_SLEEP_PM_OPS() can be used inside of a declaration of struct dev_pm_ops to indicate that one suspend routine is to be pointed to by the .suspend(), .freeze(), and .poweroff() members and one resume routine is to be pointed to by the .resume(), .thaw(), and .restore() members. diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst index 5c4e730f38d05e..53d1996460abfc 100644 --- a/Documentation/power/runtime_pm.rst +++ b/Documentation/power/runtime_pm.rst @@ -811,8 +811,8 @@ subsystem-level dev_pm_ops structure. Device drivers that wish to use the same function as a system suspend, freeze, poweroff and runtime suspend callback, and similarly for system resume, thaw, -restore, and runtime resume, can achieve this with the help of the -UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its +restore, and runtime resume, can achieve similar behaviour with the help of the +DEFINE_RUNTIME_DEV_PM_OPS() defined in include/linux/pm_runtime.h (possibly setting its last argument to NULL). 8. "No-Callback" Devices diff --git a/Documentation/process/backporting.rst b/Documentation/process/backporting.rst index e1a6ea0a1e8a17..a71480fcf3b42c 100644 --- a/Documentation/process/backporting.rst +++ b/Documentation/process/backporting.rst @@ -73,7 +73,7 @@ Once you have the patch in git, you can go ahead and cherry-pick it into your source tree. Don't forget to cherry-pick with ``-x`` if you want a written record of where the patch came from! -Note that if you are submiting a patch for stable, the format is +Note that if you are submitting a patch for stable, the format is slightly different; the first line after the subject line needs tobe either:: @@ -147,7 +147,7 @@ divergence. It's important to always identify the commit or commits that caused the conflict, as otherwise you cannot be confident in the correctness of your resolution. As an added bonus, especially if the patch is in an -area you're not that famliar with, the changelogs of these commits will +area you're not that familiar with, the changelogs of these commits will often give you the context to understand the code and potential problems or pitfalls with your conflict resolution. @@ -197,7 +197,7 @@ git blame Another way to find prerequisite commits (albeit only the most recent one for a given conflict) is to run ``git blame``. In this case, you need to run it against the parent commit of the patch you are -cherry-picking and the file where the conflict appared, i.e.:: +cherry-picking and the file where the conflict appeared, i.e.:: git blame ^ -- diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 3fc63f27c226d8..00f1ed7c59c321 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -64,6 +64,7 @@ GNU tar 1.28 tar --version gtags (optional) 6.6.5 gtags --version mkimage (optional) 2017.01 mkimage --version Python (optional) 3.5.x python3 --version +GNU AWK (optional) 5.1.0 gawk --version ====================== =============== ======================================== .. [#f1] Sphinx is needed only to build the Kernel documentation @@ -192,6 +193,12 @@ platforms. The tool is available via the ``u-boot-tools`` package or can be built from the U-Boot source code. See the instructions at https://docs.u-boot.org/en/latest/build/tools.html#building-tools-for-linux +GNU AWK +------- + +GNU AWK is needed if you want kernel builds to generate address range data for +builtin modules (CONFIG_BUILTIN_MODULE_RANGES). + System utilities **************** diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst index 8e30c8f7697d5e..19d2ed47ff7966 100644 --- a/Documentation/process/coding-style.rst +++ b/Documentation/process/coding-style.rst @@ -986,7 +986,7 @@ that can go into these 5 milliseconds. A reasonable rule of thumb is to not put inline at functions that have more than 3 lines of code in them. An exception to this rule are the cases where -a parameter is known to be a compiletime constant, and as a result of this +a parameter is known to be a compile time constant, and as a result of this constantness you *know* the compiler will be able to optimize most of your function away at compile time. For a good example of this later case, see the kmalloc() inline function. diff --git a/Documentation/process/email-clients.rst b/Documentation/process/email-clients.rst index dd22c46d1d021b..e6b9173a1845c2 100644 --- a/Documentation/process/email-clients.rst +++ b/Documentation/process/email-clients.rst @@ -216,7 +216,7 @@ Mutt is highly customizable. Here is a minimum configuration to start using Mutt to send patches through Gmail:: # .muttrc - # ================ IMAP ==================== + # ================ IMAP ==================== set imap_user = 'yourusername@gmail.com' set imap_pass = 'yourpassword' set spoolfile = imaps://imap.gmail.com/INBOX diff --git a/Documentation/process/maintainer-tip.rst b/Documentation/process/maintainer-tip.rst index ba312345d03069..349a27a5334359 100644 --- a/Documentation/process/maintainer-tip.rst +++ b/Documentation/process/maintainer-tip.rst @@ -154,7 +154,7 @@ Examples for illustration: We modify the hot cpu handling to cancel the delayed work on the dying cpu and run the worker immediately on a different cpu in same domain. We - donot flush the worker because the MBM overflow worker reschedules the + do not flush the worker because the MBM overflow worker reschedules the worker on same CPU and scans the domain->cpu_mask to get the domain pointer. diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index f310f2f36666f2..1518bd57adab50 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -842,6 +842,14 @@ Make sure that base commit is in an official maintainer/mainline tree and not in some internal, accessible only to you tree - otherwise it would be worthless. +Tooling +------- + +Many of the technical aspects of this process can be automated using +b4, documented at . This can +help with things like tracking dependencies, running checkpatch and +with formatting and sending mails. + References ---------- diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst index e3f388ef4ee423..6146b49b6a9881 100644 --- a/Documentation/rust/general-information.rst +++ b/Documentation/rust/general-information.rst @@ -15,6 +15,8 @@ but not `std `_. Crates for use in the kernel must opt into this behavior using the ``#![no_std]`` attribute. +.. _rust_code_documentation: + Code documentation ------------------ @@ -22,10 +24,17 @@ Rust kernel code is documented using ``rustdoc``, its built-in documentation generator. The generated HTML docs include integrated search, linked items (e.g. types, -functions, constants), source code, etc. They may be read at (TODO: link when -in mainline and generated alongside the rest of the documentation): +functions, constants), source code, etc. They may be read at: + + https://rust.docs.kernel.org + +For linux-next, please see: + + https://rust.docs.kernel.org/next/ - http://kernel.org/ +There are also tags for each main release, e.g.: + + https://rust.docs.kernel.org/6.10/ The docs can also be easily generated and read locally. This is quite fast (same order as compiling the code itself) and no special tools or environment @@ -75,7 +84,7 @@ should provide as-safe-as-possible abstractions as needed. .. code-block:: rust/bindings/ - (rust/helpers.c) + (rust/helpers/) include/ -----+ <-+ | | @@ -112,7 +121,7 @@ output files in the ``rust/bindings/`` directory. For parts of the C header that ``bindgen`` does not auto generate, e.g. C ``inline`` functions or non-trivial macros, it is acceptable to add a small -wrapper function to ``rust/helpers.c`` to make it available for the Rust side as +wrapper function to ``rust/helpers/`` to make it available for the Rust side as well. Abstractions @@ -142,3 +151,11 @@ configuration: #[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`) #[cfg(CONFIG_X="m")] // Enabled as a module (`m`) #[cfg(not(CONFIG_X))] // Disabled + +For other predicates that Rust's ``cfg`` does not support, e.g. expressions with +numerical comparisons, one may define a new Kconfig symbol: + +.. code-block:: kconfig + + config RUSTC_VERSION_MIN_107900 + def_bool y if RUSTC_VERSION >= 107900 diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst index 46d35bd395cf5c..55dcde9e9e7e9e 100644 --- a/Documentation/rust/index.rst +++ b/Documentation/rust/index.rst @@ -25,13 +25,27 @@ support is still in development/experimental, especially for certain kernel configurations. +Code documentation +------------------ + +Given a kernel configuration, the kernel may generate Rust code documentation, +i.e. HTML rendered by the ``rustdoc`` tool. + .. only:: rustdoc and html - You can also browse `rustdoc documentation `_. + This kernel documentation was built with `Rust code documentation + `_. .. only:: not rustdoc and html - This documentation does not include rustdoc generated information. + This kernel documentation was not built with Rust code documentation. + +A pregenerated version is provided at: + + https://rust.docs.kernel.org + +Please see the :ref:`Code documentation ` section for +more details. .. toctree:: :maxdepth: 1 diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst index 8e3ad96787195f..2d107982c87bbe 100644 --- a/Documentation/rust/quick-start.rst +++ b/Documentation/rust/quick-start.rst @@ -39,8 +39,8 @@ of the box, e.g.:: Debian ****** -Debian Unstable (Sid), outside of the freeze period, provides recent Rust -releases and thus it should generally work out of the box, e.g.:: +Debian Testing and Debian Unstable (Sid), outside of the freeze period, provide +recent Rust releases and thus they should generally work out of the box, e.g.:: apt install rustc rust-src bindgen rustfmt rust-clippy diff --git a/Documentation/scheduler/completion.rst b/Documentation/scheduler/completion.rst index f19aca2062bd1f..adf0c0a56d0200 100644 --- a/Documentation/scheduler/completion.rst +++ b/Documentation/scheduler/completion.rst @@ -51,7 +51,7 @@ which has only two fields:: struct completion { unsigned int done; - wait_queue_head_t wait; + struct swait_queue_head wait; }; This provides the ->wait waitqueue to place tasks on for waiting (if any), and diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst index 43bd8a145b7a9b..5dd53e47bc0c26 100644 --- a/Documentation/scheduler/index.rst +++ b/Documentation/scheduler/index.rst @@ -12,6 +12,7 @@ Scheduler sched-bwc sched-deadline sched-design-CFS + sched-eevdf sched-domains sched-capacity sched-energy @@ -20,6 +21,7 @@ Scheduler sched-nice-design sched-rt-group sched-stats + sched-ext sched-debug text_files diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst index 9fe4846079bbe0..22838ed8e13aa9 100644 --- a/Documentation/scheduler/sched-deadline.rst +++ b/Documentation/scheduler/sched-deadline.rst @@ -749,21 +749,19 @@ Appendix A. Test suite of the command line options. Please refer to rt-app documentation for more details (`/doc/*.json`). - The second testing application is a modification of schedtool, called - schedtool-dl, which can be used to setup SCHED_DEADLINE parameters for a - certain pid/application. schedtool-dl is available at: - https://github.com/scheduler-tools/schedtool-dl.git. + The second testing application is done using chrt which has support + for SCHED_DEADLINE. The usage is straightforward:: - # schedtool -E -t 10000000:100000000 -e ./my_cpuhog_app + # chrt -d -T 10000000 -D 100000000 0 ./my_cpuhog_app With this, my_cpuhog_app is put to run inside a SCHED_DEADLINE reservation - of 10ms every 100ms (note that parameters are expressed in microseconds). - You can also use schedtool to create a reservation for an already running + of 10ms every 100ms (note that parameters are expressed in nanoseconds). + You can also use chrt to create a reservation for an already running application, given that you know its pid:: - # schedtool -E -t 10000000:100000000 my_app_pid + # chrt -d -T 10000000 -D 100000000 -p 0 my_app_pid Appendix B. Minimal main() ========================== diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst index bc1e507269c67b..8786f219fc7381 100644 --- a/Documentation/scheduler/sched-design-CFS.rst +++ b/Documentation/scheduler/sched-design-CFS.rst @@ -8,10 +8,12 @@ CFS Scheduler 1. OVERVIEW ============ -CFS stands for "Completely Fair Scheduler," and is the new "desktop" process -scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. It is the -replacement for the previous vanilla scheduler's SCHED_OTHER interactivity -code. +CFS stands for "Completely Fair Scheduler," and is the "desktop" process +scheduler implemented by Ingo Molnar and merged in Linux 2.6.23. When +originally merged, it was the replacement for the previous vanilla +scheduler's SCHED_OTHER interactivity code. Nowadays, CFS is making room +for EEVDF, for which documentation can be found in +Documentation/scheduler/sched-eevdf.rst. 80% of CFS's design can be summed up in a single sentence: CFS basically models an "ideal, precise multi-tasking CPU" on real hardware. diff --git a/Documentation/scheduler/sched-eevdf.rst b/Documentation/scheduler/sched-eevdf.rst new file mode 100644 index 00000000000000..83efe7c0a30dba --- /dev/null +++ b/Documentation/scheduler/sched-eevdf.rst @@ -0,0 +1,43 @@ +=============== +EEVDF Scheduler +=============== + +The "Earliest Eligible Virtual Deadline First" (EEVDF) was first introduced +in a scientific publication in 1995 [1]. The Linux kernel began +transitioning to EEVDF in version 6.6 (as a new option in 2024), moving +away from the earlier Completely Fair Scheduler (CFS) in favor of a version +of EEVDF proposed by Peter Zijlstra in 2023 [2-4]. More information +regarding CFS can be found in +Documentation/scheduler/sched-design-CFS.rst. + +Similarly to CFS, EEVDF aims to distribute CPU time equally among all +runnable tasks with the same priority. To do so, it assigns a virtual run +time to each task, creating a "lag" value that can be used to determine +whether a task has received its fair share of CPU time. In this way, a task +with a positive lag is owed CPU time, while a negative lag means the task +has exceeded its portion. EEVDF picks tasks with lag greater or equal to +zero and calculates a virtual deadline (VD) for each, selecting the task +with the earliest VD to execute next. It's important to note that this +allows latency-sensitive tasks with shorter time slices to be prioritized, +which helps with their responsiveness. + +There are ongoing discussions on how to manage lag, especially for sleeping +tasks; but at the time of writing EEVDF uses a "decaying" mechanism based +on virtual run time (VRT). This prevents tasks from exploiting the system +by sleeping briefly to reset their negative lag: when a task sleeps, it +remains on the run queue but marked for "deferred dequeue," allowing its +lag to decay over VRT. Hence, long-sleeping tasks eventually have their lag +reset. Finally, tasks can preempt others if their VD is earlier, and tasks +can request specific time slices using the new sched_setattr() system call, +which further facilitates the job of latency-sensitive applications. + +REFERENCES +========== + +[1] https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=805acf7726282721504c8f00575d91ebfd750564 + +[2] https://lore.kernel.org/lkml/a79014e6-ea83-b316-1e12-2ae056bda6fa@linux.vnet.ibm.com/ + +[3] https://lwn.net/Articles/969062/ + +[4] https://lwn.net/Articles/925371/ diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst new file mode 100644 index 00000000000000..6c0d70e2e27df5 --- /dev/null +++ b/Documentation/scheduler/sched-ext.rst @@ -0,0 +1,326 @@ +========================== +Extensible Scheduler Class +========================== + +sched_ext is a scheduler class whose behavior can be defined by a set of BPF +programs - the BPF scheduler. + +* sched_ext exports a full scheduling interface so that any scheduling + algorithm can be implemented on top. + +* The BPF scheduler can group CPUs however it sees fit and schedule them + together, as tasks aren't tied to specific CPUs at the time of wakeup. + +* The BPF scheduler can be turned on and off dynamically anytime. + +* The system integrity is maintained no matter what the BPF scheduler does. + The default scheduling behavior is restored anytime an error is detected, + a runnable task stalls, or on invoking the SysRq key sequence + :kbd:`SysRq-S`. + +* When the BPF scheduler triggers an error, debug information is dumped to + aid debugging. The debug dump is passed to and printed out by the + scheduler binary. The debug dump can also be accessed through the + `sched_ext_dump` tracepoint. The SysRq key sequence :kbd:`SysRq-D` + triggers a debug dump. This doesn't terminate the BPF scheduler and can + only be read through the tracepoint. + +Switching to and from sched_ext +=============================== + +``CONFIG_SCHED_CLASS_EXT`` is the config option to enable sched_ext and +``tools/sched_ext`` contains the example schedulers. The following config +options should be enabled to use sched_ext: + +.. code-block:: none + + CONFIG_BPF=y + CONFIG_SCHED_CLASS_EXT=y + CONFIG_BPF_SYSCALL=y + CONFIG_BPF_JIT=y + CONFIG_DEBUG_INFO_BTF=y + CONFIG_BPF_JIT_ALWAYS_ON=y + CONFIG_BPF_JIT_DEFAULT_ON=y + CONFIG_PAHOLE_HAS_SPLIT_BTF=y + CONFIG_PAHOLE_HAS_BTF_TAG=y + +sched_ext is used only when the BPF scheduler is loaded and running. + +If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be +treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is +loaded. + +When the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is not set +in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and +``SCHED_EXT`` tasks are scheduled by sched_ext. + +However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is +set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled +by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and +``SCHED_IDLE`` policies are scheduled by CFS. + +Terminating the sched_ext scheduler program, triggering :kbd:`SysRq-S`, or +detection of any internal error including stalled runnable tasks aborts the +BPF scheduler and reverts all tasks back to CFS. + +.. code-block:: none + + # make -j16 -C tools/sched_ext + # tools/sched_ext/scx_simple + local=0 global=3 + local=5 global=24 + local=9 global=44 + local=13 global=56 + local=17 global=72 + ^CEXIT: BPF scheduler unregistered + +The current status of the BPF scheduler can be determined as follows: + +.. code-block:: none + + # cat /sys/kernel/sched_ext/state + enabled + # cat /sys/kernel/sched_ext/root/ops + simple + +You can check if any BPF scheduler has ever been loaded since boot by examining +this monotonically incrementing counter (a value of zero indicates that no BPF +scheduler has been loaded): + +.. code-block:: none + + # cat /sys/kernel/sched_ext/enable_seq + 1 + +``tools/sched_ext/scx_show_state.py`` is a drgn script which shows more +detailed information: + +.. code-block:: none + + # tools/sched_ext/scx_show_state.py + ops : simple + enabled : 1 + switching_all : 1 + switched_all : 1 + enable_state : enabled (2) + bypass_depth : 0 + nr_rejected : 0 + enable_seq : 1 + +If ``CONFIG_SCHED_DEBUG`` is set, whether a given task is on sched_ext can +be determined as follows: + +.. code-block:: none + + # grep ext /proc/self/sched + ext.enabled : 1 + +The Basics +========== + +Userspace can implement an arbitrary BPF scheduler by loading a set of BPF +programs that implement ``struct sched_ext_ops``. The only mandatory field +is ``ops.name`` which must be a valid BPF object name. All operations are +optional. The following modified excerpt is from +``tools/sched_ext/scx_simple.bpf.c`` showing a minimal global FIFO scheduler. + +.. code-block:: c + + /* + * Decide which CPU a task should be migrated to before being + * enqueued (either at wakeup, fork time, or exec time). If an + * idle core is found by the default ops.select_cpu() implementation, + * then dispatch the task directly to SCX_DSQ_LOCAL and skip the + * ops.enqueue() callback. + * + * Note that this implementation has exactly the same behavior as the + * default ops.select_cpu implementation. The behavior of the scheduler + * would be exactly same if the implementation just didn't define the + * simple_select_cpu() struct_ops prog. + */ + s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) + { + s32 cpu; + /* Need to initialize or the BPF verifier will reject the program */ + bool direct = false; + + cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &direct); + + if (direct) + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); + + return cpu; + } + + /* + * Do a direct dispatch of a task to the global DSQ. This ops.enqueue() + * callback will only be invoked if we failed to find a core to dispatch + * to in ops.select_cpu() above. + * + * Note that this implementation has exactly the same behavior as the + * default ops.enqueue implementation, which just dispatches the task + * to SCX_DSQ_GLOBAL. The behavior of the scheduler would be exactly same + * if the implementation just didn't define the simple_enqueue struct_ops + * prog. + */ + void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) + { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } + + s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init) + { + /* + * By default, all SCHED_EXT, SCHED_OTHER, SCHED_IDLE, and + * SCHED_BATCH tasks should use sched_ext. + */ + return 0; + } + + void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) + { + exit_type = ei->type; + } + + SEC(".struct_ops") + struct sched_ext_ops simple_ops = { + .select_cpu = (void *)simple_select_cpu, + .enqueue = (void *)simple_enqueue, + .init = (void *)simple_init, + .exit = (void *)simple_exit, + .name = "simple", + }; + +Dispatch Queues +--------------- + +To match the impedance between the scheduler core and the BPF scheduler, +sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a +priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``), +and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage +an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and +``scx_bpf_destroy_dsq()``. + +A CPU always executes a task from its local DSQ. A task is "dispatched" to a +DSQ. A non-local DSQ is "consumed" to transfer a task to the consuming CPU's +local DSQ. + +When a CPU is looking for the next task to run, if the local DSQ is not +empty, the first task is picked. Otherwise, the CPU tries to consume the +global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()`` +is invoked. + +Scheduling Cycle +---------------- + +The following briefly shows how a waking task is scheduled and executed. + +1. When a task is waking up, ``ops.select_cpu()`` is the first operation + invoked. This serves two purposes. First, CPU selection optimization + hint. Second, waking up the selected CPU if idle. + + The CPU selected by ``ops.select_cpu()`` is an optimization hint and not + binding. The actual decision is made at the last step of scheduling. + However, there is a small performance gain if the CPU + ``ops.select_cpu()`` returns matches the CPU the task eventually runs on. + + A side-effect of selecting a CPU is waking it up from idle. While a BPF + scheduler can wake up any cpu using the ``scx_bpf_kick_cpu()`` helper, + using ``ops.select_cpu()`` judiciously can be simpler and more efficient. + + A task can be immediately dispatched to a DSQ from ``ops.select_cpu()`` by + calling ``scx_bpf_dispatch()``. If the task is dispatched to + ``SCX_DSQ_LOCAL`` from ``ops.select_cpu()``, it will be dispatched to the + local DSQ of whichever CPU is returned from ``ops.select_cpu()``. + Additionally, dispatching directly from ``ops.select_cpu()`` will cause the + ``ops.enqueue()`` callback to be skipped. + + Note that the scheduler core will ignore an invalid CPU selection, for + example, if it's outside the allowed cpumask of the task. + +2. Once the target CPU is selected, ``ops.enqueue()`` is invoked (unless the + task was dispatched directly from ``ops.select_cpu()``). ``ops.enqueue()`` + can make one of the following decisions: + + * Immediately dispatch the task to either the global or local DSQ by + calling ``scx_bpf_dispatch()`` with ``SCX_DSQ_GLOBAL`` or + ``SCX_DSQ_LOCAL``, respectively. + + * Immediately dispatch the task to a custom DSQ by calling + ``scx_bpf_dispatch()`` with a DSQ ID which is smaller than 2^63. + + * Queue the task on the BPF side. + +3. When a CPU is ready to schedule, it first looks at its local DSQ. If + empty, it then looks at the global DSQ. If there still isn't a task to + run, ``ops.dispatch()`` is invoked which can use the following two + functions to populate the local DSQ. + + * ``scx_bpf_dispatch()`` dispatches a task to a DSQ. Any target DSQ can + be used - ``SCX_DSQ_LOCAL``, ``SCX_DSQ_LOCAL_ON | cpu``, + ``SCX_DSQ_GLOBAL`` or a custom DSQ. While ``scx_bpf_dispatch()`` + currently can't be called with BPF locks held, this is being worked on + and will be supported. ``scx_bpf_dispatch()`` schedules dispatching + rather than performing them immediately. There can be up to + ``ops.dispatch_max_batch`` pending tasks. + + * ``scx_bpf_consume()`` tranfers a task from the specified non-local DSQ + to the dispatching DSQ. This function cannot be called with any BPF + locks held. ``scx_bpf_consume()`` flushes the pending dispatched tasks + before trying to consume the specified DSQ. + +4. After ``ops.dispatch()`` returns, if there are tasks in the local DSQ, + the CPU runs the first one. If empty, the following steps are taken: + + * Try to consume the global DSQ. If successful, run the task. + + * If ``ops.dispatch()`` has dispatched any tasks, retry #3. + + * If the previous task is an SCX task and still runnable, keep executing + it (see ``SCX_OPS_ENQ_LAST``). + + * Go idle. + +Note that the BPF scheduler can always choose to dispatch tasks immediately +in ``ops.enqueue()`` as illustrated in the above simple example. If only the +built-in DSQs are used, there is no need to implement ``ops.dispatch()`` as +a task is never queued on the BPF scheduler and both the local and global +DSQs are consumed automatically. + +``scx_bpf_dispatch()`` queues the task on the FIFO of the target DSQ. Use +``scx_bpf_dispatch_vtime()`` for the priority queue. Internal DSQs such as +``SCX_DSQ_LOCAL`` and ``SCX_DSQ_GLOBAL`` do not support priority-queue +dispatching, and must be dispatched to with ``scx_bpf_dispatch()``. See the +function documentation and usage in ``tools/sched_ext/scx_simple.bpf.c`` for +more information. + +Where to Look +============= + +* ``include/linux/sched/ext.h`` defines the core data structures, ops table + and constants. + +* ``kernel/sched/ext.c`` contains sched_ext core implementation and helpers. + The functions prefixed with ``scx_bpf_`` can be called from the BPF + scheduler. + +* ``tools/sched_ext/`` hosts example BPF scheduler implementations. + + * ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a + custom DSQ. + + * ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five + levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``. + +ABI Instability +=============== + +The APIs provided by sched_ext to BPF schedulers programs have no stability +guarantees. This includes the ops table callbacks and constants defined in +``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in +``kernel/sched/ext.c``. + +While we will attempt to provide a relatively stable API surface when +possible, they are subject to change without warning between kernel +versions. diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst index 59f8fc106cb068..3e0a7114a86222 100644 --- a/Documentation/security/index.rst +++ b/Documentation/security/index.rst @@ -19,3 +19,4 @@ Security Documentation digsig landlock secrets/index + ipe diff --git a/Documentation/security/ipe.rst b/Documentation/security/ipe.rst new file mode 100644 index 00000000000000..4a7d953abcdc31 --- /dev/null +++ b/Documentation/security/ipe.rst @@ -0,0 +1,446 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Integrity Policy Enforcement (IPE) - Kernel Documentation +========================================================= + +.. NOTE:: + + This is documentation targeted at developers, instead of administrators. + If you're looking for documentation on the usage of IPE, please see + :doc:`IPE admin guide `. + +Historical Motivation +--------------------- + +The original issue that prompted IPE's implementation was the creation +of a locked-down system. This system would be born-secure, and have +strong integrity guarantees over both the executable code, and specific +*data files* on the system, that were critical to its function. These +specific data files would not be readable unless they passed integrity +policy. A mandatory access control system would be present, and +as a result, xattrs would have to be protected. This lead to a selection +of what would provide the integrity claims. At the time, there were two +main mechanisms considered that could guarantee integrity for the system +with these requirements: + + 1. IMA + EVM Signatures + 2. DM-Verity + +Both options were carefully considered, however the choice to use DM-Verity +over IMA+EVM as the *integrity mechanism* in the original use case of IPE +was due to three main reasons: + + 1. Protection of additional attack vectors: + + * With IMA+EVM, without an encryption solution, the system is vulnerable + to offline attack against the aforementioned specific data files. + + Unlike executables, read operations (like those on the protected data + files), cannot be enforced to be globally integrity verified. This means + there must be some form of selector to determine whether a read should + enforce the integrity policy, or it should not. + + At the time, this was done with mandatory access control labels. An IMA + policy would indicate what labels required integrity verification, which + presented an issue: EVM would protect the label, but if an attacker could + modify filesystem offline, the attacker could wipe all the xattrs - + including the SELinux labels that would be used to determine whether the + file should be subject to integrity policy. + + With DM-Verity, as the xattrs are saved as part of the Merkel tree, if + offline mount occurs against the filesystem protected by dm-verity, the + checksum no longer matches and the file fails to be read. + + * As userspace binaries are paged in Linux, dm-verity also offers the + additional protection against a hostile block device. In such an attack, + the block device reports the appropriate content for the IMA hash + initially, passing the required integrity check. Then, on the page fault + that accesses the real data, will report the attacker's payload. Since + dm-verity will check the data when the page fault occurs (and the disk + access), this attack is mitigated. + + 2. Performance: + + * dm-verity provides integrity verification on demand as blocks are + read versus requiring the entire file being read into memory for + validation. + + 3. Simplicity of signing: + + * No need for two signatures (IMA, then EVM): one signature covers + an entire block device. + * Signatures can be stored externally to the filesystem metadata. + * The signature supports an x.509-based signing infrastructure. + +The next step was to choose a *policy* to enforce the integrity mechanism. +The minimum requirements for the policy were: + + 1. The policy itself must be integrity verified (preventing trivial + attack against it). + 2. The policy itself must be resistant to rollback attacks. + 3. The policy enforcement must have a permissive-like mode. + 4. The policy must be able to be updated, in its entirety, without + a reboot. + 5. Policy updates must be atomic. + 6. The policy must support *revocations* of previously authored + components. + 7. The policy must be auditable, at any point-of-time. + +IMA, as the only integrity policy mechanism at the time, was +considered against these list of requirements, and did not fulfill +all of the minimum requirements. Extending IMA to cover these +requirements was considered, but ultimately discarded for a +two reasons: + + 1. Regression risk; many of these changes would result in + dramatic code changes to IMA, which is already present in the + kernel, and therefore might impact users. + + 2. IMA was used in the system for measurement and attestation; + separation of measurement policy from local integrity policy + enforcement was considered favorable. + +Due to these reasons, it was decided that a new LSM should be created, +whose responsibility would be only the local integrity policy enforcement. + +Role and Scope +-------------- + +IPE, as its name implies, is fundamentally an integrity policy enforcement +solution; IPE does not mandate how integrity is provided, but instead +leaves that decision to the system administrator to set the security bar, +via the mechanisms that they select that suit their individual needs. +There are several different integrity solutions that provide a different +level of security guarantees; and IPE allows sysadmins to express policy for +theoretically all of them. + +IPE does not have an inherent mechanism to ensure integrity on its own. +Instead, there are more effective layers available for building systems that +can guarantee integrity. It's important to note that the mechanism for proving +integrity is independent of the policy for enforcing that integrity claim. + +Therefore, IPE was designed around: + + 1. Easy integrations with integrity providers. + 2. Ease of use for platform administrators/sysadmins. + +Design Rationale: +----------------- + +IPE was designed after evaluating existing integrity policy solutions +in other operating systems and environments. In this survey of other +implementations, there were a few pitfalls identified: + + 1. Policies were not readable by humans, usually requiring a binary + intermediary format. + 2. A single, non-customizable action was implicitly taken as a default. + 3. Debugging the policy required manual steps to determine what rule was violated. + 4. Authoring a policy required an in-depth knowledge of the larger system, + or operating system. + +IPE attempts to avoid all of these pitfalls. + +Policy +~~~~~~ + +Plain Text +^^^^^^^^^^ + +IPE's policy is plain-text. This introduces slightly larger policy files than +other LSMs, but solves two major problems that occurs with some integrity policy +solutions on other platforms. + +The first issue is one of code maintenance and duplication. To author policies, +the policy has to be some form of string representation (be it structured, +through XML, JSON, YAML, etcetera), to allow the policy author to understand +what is being written. In a hypothetical binary policy design, a serializer +is necessary to write the policy from the human readable form, to the binary +form, and a deserializer is needed to interpret the binary form into a data +structure in the kernel. + +Eventually, another deserializer will be needed to transform the binary from +back into the human-readable form with as much information preserved. This is because a +user of this access control system will have to keep a lookup table of a checksum +and the original file itself to try to understand what policies have been deployed +on this system and what policies have not. For a single user, this may be alright, +as old policies can be discarded almost immediately after the update takes hold. +For users that manage computer fleets in the thousands, if not hundreds of thousands, +with multiple different operating systems, and multiple different operational needs, +this quickly becomes an issue, as stale policies from years ago may be present, +quickly resulting in the need to recover the policy or fund extensive infrastructure +to track what each policy contains. + +With now three separate serializer/deserializers, maintenance becomes costly. If the +policy avoids the binary format, there is only one required serializer: from the +human-readable form to the data structure in kernel, saving on code maintenance, +and retaining operability. + +The second issue with a binary format is one of transparency. As IPE controls +access based on the trust of the system's resources, it's policy must also be +trusted to be changed. This is done through signatures, resulting in needing +signing as a process. Signing, as a process, is typically done with a +high security bar, as anything signed can be used to attack integrity +enforcement systems. It is also important that, when signing something, that +the signer is aware of what they are signing. A binary policy can cause +obfuscation of that fact; what signers see is an opaque binary blob. A +plain-text policy, on the other hand, the signers see the actual policy +submitted for signing. + +Boot Policy +~~~~~~~~~~~ + +IPE, if configured appropriately, is able to enforce a policy as soon as a +kernel is booted and usermode starts. That implies some level of storage +of the policy to apply the minute usermode starts. Generally, that storage +can be handled in one of three ways: + + 1. The policy file(s) live on disk and the kernel loads the policy prior + to an code path that would result in an enforcement decision. + 2. The policy file(s) are passed by the bootloader to the kernel, who + parses the policy. + 3. There is a policy file that is compiled into the kernel that is + parsed and enforced on initialization. + +The first option has problems: the kernel reading files from userspace +is typically discouraged and very uncommon in the kernel. + +The second option also has problems: Linux supports a variety of bootloaders +across its entire ecosystem - every bootloader would have to support this +new methodology or there must be an independent source. It would likely +result in more drastic changes to the kernel startup than necessary. + +The third option is the best but it's important to be aware that the policy +will take disk space against the kernel it's compiled in. It's important to +keep this policy generalized enough that userspace can load a new, more +complicated policy, but restrictive enough that it will not overauthorize +and cause security issues. + +The initramfs provides a way that this bootup path can be established. The +kernel starts with a minimal policy, that trusts the initramfs only. Inside +the initramfs, when the real rootfs is mounted, but not yet transferred to, +it deploys and activates a policy that trusts the new root filesystem. +This prevents overauthorization at any step, and keeps the kernel policy +to a minimal size. + +Startup +^^^^^^^ + +Not every system, however starts with an initramfs, so the startup policy +compiled into the kernel will need some flexibility to express how trust +is established for the next phase of the bootup. To this end, if we just +make the compiled-in policy a full IPE policy, it allows system builders +to express the first stage bootup requirements appropriately. + +Updatable, Rebootless Policy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As requirements change over time (vulnerabilities are found in previously +trusted applications, keys roll, etcetera). Updating a kernel to change the +meet those security goals is not always a suitable option, as updates are not +always risk-free, and blocking a security update leaves systems vulnerable. +This means IPE requires a policy that can be completely updated (allowing +revocations of existing policy) from a source external to the kernel (allowing +policies to be updated without updating the kernel). + +Additionally, since the kernel is stateless between invocations, and reading +policy files off the disk from kernel space is a bad idea(tm), then the +policy updates have to be done rebootlessly. + +To allow an update from an external source, it could be potentially malicious, +so this policy needs to have a way to be identified as trusted. This is +done via a signature chained to a trust source in the kernel. Arbitrarily, +this is the ``SYSTEM_TRUSTED_KEYRING``, a keyring that is initially +populated at kernel compile-time, as this matches the expectation that the +author of the compiled-in policy described above is the same entity that can +deploy policy updates. + +Anti-Rollback / Anti-Replay +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Over time, vulnerabilities are found and trusted resources may not be +trusted anymore. IPE's policy has no exception to this. There can be +instances where a mistaken policy author deploys an insecure policy, +before correcting it with a secure policy. + +Assuming that as soon as the insecure policy is signed, and an attacker +acquires the insecure policy, IPE needs a way to prevent rollback +from the secure policy update to the insecure policy update. + +Initially, IPE's policy can have a policy_version that states the +minimum required version across all policies that can be active on +the system. This will prevent rollback while the system is live. + +.. WARNING:: + + However, since the kernel is stateless across boots, this policy + version will be reset to 0.0.0 on the next boot. System builders + need to be aware of this, and ensure the new secure policies are + deployed ASAP after a boot to ensure that the window of + opportunity is minimal for an attacker to deploy the insecure policy. + +Implicit Actions: +~~~~~~~~~~~~~~~~~ + +The issue of implicit actions only becomes visible when you consider +a mixed level of security bars across multiple operations in a system. +For example, consider a system that has strong integrity guarantees +over both the executable code, and specific *data files* on the system, +that were critical to its function. In this system, three types of policies +are possible: + + 1. A policy in which failure to match any rules in the policy results + in the action being denied. + 2. A policy in which failure to match any rules in the policy results + in the action being allowed. + 3. A policy in which the action taken when no rules are matched is + specified by the policy author. + +The first option could make a policy like this:: + + op=EXECUTE integrity_verified=YES action=ALLOW + +In the example system, this works well for the executables, as all +executables should have integrity guarantees, without exception. The +issue becomes with the second requirement about specific data files. +This would result in a policy like this (assuming each line is +evaluated in order):: + + op=EXECUTE integrity_verified=YES action=ALLOW + + op=READ integrity_verified=NO label=critical_t action=DENY + op=READ action=ALLOW + +This is somewhat clear if you read the docs, understand the policy +is executed in order and that the default is a denial; however, the +last line effectively changes that default to an ALLOW. This is +required, because in a realistic system, there are some unverified +reads (imagine appending to a log file). + +The second option, matching no rules results in an allow, is clearer +for the specific data files:: + + op=READ integrity_verified=NO label=critical_t action=DENY + +And, like the first option, falls short with the execution scenario, +effectively needing to override the default:: + + op=EXECUTE integrity_verified=YES action=ALLOW + op=EXECUTE action=DENY + + op=READ integrity_verified=NO label=critical_t action=DENY + +This leaves the third option. Instead of making users be clever +and override the default with an empty rule, force the end-user +to consider what the appropriate default should be for their +scenario and explicitly state it:: + + DEFAULT op=EXECUTE action=DENY + op=EXECUTE integrity_verified=YES action=ALLOW + + DEFAULT op=READ action=ALLOW + op=READ integrity_verified=NO label=critical_t action=DENY + +Policy Debugging: +~~~~~~~~~~~~~~~~~ + +When developing a policy, it is useful to know what line of the policy +is being violated to reduce debugging costs; narrowing the scope of the +investigation to the exact line that resulted in the action. Some integrity +policy systems do not provide this information, instead providing the +information that was used in the evaluation. This then requires a correlation +with the policy to evaluate what went wrong. + +Instead, IPE just emits the rule that was matched. This limits the scope +of the investigation to the exact policy line (in the case of a specific +rule), or the section (in the case of a DEFAULT). This decreases iteration +and investigation times when policy failures are observed while evaluating +policies. + +IPE's policy engine is also designed in a way that it makes it obvious to +a human of how to investigate a policy failure. Each line is evaluated in +the sequence that is written, so the algorithm is very simple to follow +for humans to recreate the steps and could have caused the failure. In other +surveyed systems, optimizations occur (sorting rules, for instance) when loading +the policy. In those systems, it requires multiple steps to debug, and the +algorithm may not always be clear to the end-user without reading the code first. + +Simplified Policy: +~~~~~~~~~~~~~~~~~~ + +Finally, IPE's policy is designed for sysadmins, not kernel developers. Instead +of covering individual LSM hooks (or syscalls), IPE covers operations. This means +instead of sysadmins needing to know that the syscalls ``mmap``, ``mprotect``, +``execve``, and ``uselib`` must have rules protecting them, they must simple know +that they want to restrict code execution. This limits the amount of bypasses that +could occur due to a lack of knowledge of the underlying system; whereas the +maintainers of IPE, being kernel developers can make the correct choice to determine +whether something maps to these operations, and under what conditions. + +Implementation Notes +-------------------- + +Anonymous Memory +~~~~~~~~~~~~~~~~ + +Anonymous memory isn't treated any differently from any other access in IPE. +When anonymous memory is mapped with ``+X``, it still comes into the ``file_mmap`` +or ``file_mprotect`` hook, but with a ``NULL`` file object. This is submitted to +the evaluation, like any other file. However, all current trust properties will +evaluate to false, as they are all file-based and the operation is not +associated with a file. + +.. WARNING:: + + This also occurs with the ``kernel_load_data`` hook, when the kernel is + loading data from a userspace buffer that is not backed by a file. In this + scenario all current trust properties will also evaluate to false. + +Securityfs Interface +~~~~~~~~~~~~~~~~~~~~ + +The per-policy securityfs tree is somewhat unique. For example, for +a standard securityfs policy tree:: + + MyPolicy + |- active + |- delete + |- name + |- pkcs7 + |- policy + |- update + |- version + +The policy is stored in the ``->i_private`` data of the MyPolicy inode. + +Tests +----- + +IPE has KUnit Tests for the policy parser. Recommended kunitconfig:: + + CONFIG_KUNIT=y + CONFIG_SECURITY=y + CONFIG_SECURITYFS=y + CONFIG_PKCS7_MESSAGE_PARSER=y + CONFIG_SYSTEM_DATA_VERIFICATION=y + CONFIG_FS_VERITY=y + CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y + CONFIG_BLOCK=y + CONFIG_MD=y + CONFIG_BLK_DEV_DM=y + CONFIG_DM_VERITY=y + CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y + CONFIG_NET=y + CONFIG_AUDIT=y + CONFIG_AUDITSYSCALL=y + CONFIG_BLK_DEV_INITRD=y + + CONFIG_SECURITY_IPE=y + CONFIG_IPE_PROP_DM_VERITY=y + CONFIG_IPE_PROP_DM_VERITY_SIGNATURE=y + CONFIG_IPE_PROP_FS_VERITY=y + CONFIG_IPE_PROP_FS_VERITY_BUILTIN_SIG=y + CONFIG_SECURITY_IPE_KUNIT_TEST=y + +In addition, IPE has a python based integration +`test suite `_ that +can test both user interfaces and enforcement functionalities. diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst index 829c672d9fe668..04254474fa048f 100644 --- a/Documentation/sound/alsa-configuration.rst +++ b/Documentation/sound/alsa-configuration.rst @@ -1059,6 +1059,9 @@ power_save Automatic power-saving timeout (in second, 0 = disable) power_save_controller Reset HD-audio controller in power-saving mode (default = on) +pm_blacklist + Enable / disable power-management deny-list (default = look up PM + deny-list, 0 = skip PM deny-list, 1 = force to turn off runtime PM) align_buffer_size Force rounding of buffer/period sizes to multiples of 128 bytes. This is more efficient in terms of memory access but isn't diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst index ef6a4513cce77a..e199131bf5abbb 100644 --- a/Documentation/sound/hd-audio/notes.rst +++ b/Documentation/sound/hd-audio/notes.rst @@ -321,12 +321,6 @@ Kernel Configuration -------------------- In general, I recommend you to enable the sound debug option, ``CONFIG_SND_DEBUG=y``, no matter whether you are debugging or not. -This enables snd_printd() macro and others, and you'll get additional -kernel messages at probing. - -In addition, you can enable ``CONFIG_SND_DEBUG_VERBOSE=y``. But this -will give you far more messages. Thus turn this on only when you are -sure to want it. Don't forget to turn on the appropriate ``CONFIG_SND_HDA_CODEC_*`` options. Note that each of them corresponds to the codec chip, not diff --git a/Documentation/sound/index.rst b/Documentation/sound/index.rst index 7e67e12730d3b4..c437f2a4bc85f0 100644 --- a/Documentation/sound/index.rst +++ b/Documentation/sound/index.rst @@ -13,6 +13,7 @@ Sound Subsystem Documentation alsa-configuration hd-audio/index cards/index + utimers .. only:: subproject and html diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst index 801b0bb57e9740..895752cbcedd6b 100644 --- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst +++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst @@ -4030,31 +4030,6 @@ located in the new subdirectory, sound/pci/xyz. Useful Functions ================ -:c:func:`snd_printk()` and friends ----------------------------------- - -.. note:: This subsection describes a few helper functions for - decorating a bit more on the standard :c:func:`printk()` & co. - However, in general, the use of such helpers is no longer recommended. - If possible, try to stick with the standard functions like - :c:func:`dev_err()` or :c:func:`pr_err()`. - -ALSA provides a verbose version of the :c:func:`printk()` function. -If a kernel config ``CONFIG_SND_VERBOSE_PRINTK`` is set, this function -prints the given message together with the file name and the line of the -caller. The ``KERN_XXX`` prefix is processed as well as the original -:c:func:`printk()` does, so it's recommended to add this prefix, -e.g. snd_printk(KERN_ERR "Oh my, sorry, it's extremely bad!\\n"); - -There are also :c:func:`printk()`'s for debugging. -:c:func:`snd_printd()` can be used for general debugging purposes. -If ``CONFIG_SND_DEBUG`` is set, this function is compiled, and works -just like :c:func:`snd_printk()`. If the ALSA is compiled without -the debugging flag, it's ignored. - -:c:func:`snd_printdd()` is compiled in only when -``CONFIG_SND_DEBUG_VERBOSE`` is set. - :c:func:`snd_BUG()` ------------------- diff --git a/Documentation/sound/utimers.rst b/Documentation/sound/utimers.rst new file mode 100644 index 00000000000000..ec21567d3f723d --- /dev/null +++ b/Documentation/sound/utimers.rst @@ -0,0 +1,126 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================= +Userspace-driven timers +======================= + +:Author: Ivan Orlov + +Preface +======= + +This document describes the userspace-driven timers: virtual ALSA timers +which could be created and controlled by userspace applications using +IOCTL calls. Such timers could be useful when synchronizing audio +stream with timer sources which we don't have ALSA timers exported for +(e.g. PTP clocks), and when synchronizing the audio stream going through +two virtual sound devices using ``snd-aloop`` (for instance, when +we have a network application sending frames to one snd-aloop device, +and another sound application listening on the other end of snd-aloop). + +Enabling userspace-driven timers +================================ + +The userspace-driven timers could be enabled in the kernel using the +``CONFIG_SND_UTIMER`` configuration option. It depends on the +``CONFIG_SND_TIMER`` option, so it also should be enabled. + +Userspace-driven timers API +=========================== + +Userspace application can create a userspace-driven ALSA timer by +executing the ``SNDRV_TIMER_IOCTL_CREATE`` ioctl call on the +``/dev/snd/timer`` device file descriptor. The ``snd_timer_uinfo`` +structure should be passed as an ioctl argument: + +:: + + struct snd_timer_uinfo { + __u64 resolution; + int fd; + unsigned int id; + unsigned char reserved[16]; + } + +The ``resolution`` field sets the desired resolution in nanoseconds for +the virtual timer. ``resolution`` field simply provides an information +about the virtual timer, but does not affect the timing itself. ``id`` +field gets overwritten by the ioctl, and the identifier you get in this +field after the call can be used as a timer subdevice number when +passing the timer to ``snd-aloop`` kernel module or other userspace +applications. There could be up to 128 userspace-driven timers in the +system at one moment of time, thus the id value ranges from 0 to 127. + +Besides from overwriting the ``snd_timer_uinfo`` struct, ioctl stores +a timer file descriptor, which can be used to trigger the timer, in the +``fd`` field of the ``snd_timer_uinfo`` struct. Allocation of a file +descriptor for the timer guarantees that the timer can only be triggered +by the process which created it. The timer then can be triggered with +``SNDRV_TIMER_IOCTL_TRIGGER`` ioctl call on the timer file descriptor. + +So, the example code for creating and triggering the timer would be: + +:: + + static struct snd_timer_uinfo utimer_info = { + /* Timer is going to tick (presumably) every 1000000 ns */ + .resolution = 1000000ULL, + .id = -1, + }; + + int timer_device_fd = open("/dev/snd/timer", O_RDWR | O_CLOEXEC); + + if (ioctl(timer_device_fd, SNDRV_TIMER_IOCTL_CREATE, &utimer_info)) { + perror("Failed to create the timer"); + return -1; + } + + ... + + /* + * Now we want to trigger the timer. Callbacks of all of the + * timer instances binded to this timer will be executed after + * this call. + */ + ioctl(utimer_info.fd, SNDRV_TIMER_IOCTL_TRIGGER, NULL); + + ... + + /* Now, destroy the timer */ + close(timer_info.fd); + + +More detailed example of creating and ticking the timer could be found +in the utimer ALSA selftest. + +Userspace-driven timers and snd-aloop +------------------------------------- + +Userspace-driven timers could be easily used with ``snd-aloop`` module +when synchronizing two sound applications on both ends of the virtual +sound loopback. For instance, if one of the applications receives sound +frames from network and sends them to snd-aloop pcm device, and another +application listens for frames on the other snd-aloop pcm device, it +makes sense that the ALSA middle layer should initiate a data +transaction when the new period of data is received through network, but +not when the certain amount of jiffies elapses. Userspace-driven ALSA +timers could be used to achieve this. + +To use userspace-driven ALSA timer as a timer source of snd-aloop, pass +the following string as the snd-aloop ``timer_source`` parameter: + +:: + + # modprobe snd-aloop timer_source="-1.4." + +Where ``utimer_id`` is the id of the timer you created with +``SNDRV_TIMER_IOCTL_CREATE``, and ``4`` is the number of +userspace-driven timers device (``SNDRV_TIMER_GLOBAL_UDRIVEN``). + +``resolution`` for the userspace-driven ALSA timer used with snd-aloop +should be calculated as ``1000000000ULL / frame_rate * period_size`` as +the timer is going to tick every time a new period of frames is ready. + +After that, each time you trigger the timer with +``SNDRV_TIMER_IOCTL_TRIGGER`` the new period of data will be transferred +from one snd-aloop device to another. diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty index d479cfa73658ad..5d68395539fe92 100644 --- a/Documentation/sphinx/kerneldoc-preamble.sty +++ b/Documentation/sphinx/kerneldoc-preamble.sty @@ -199,6 +199,8 @@ % Inactivate CJK after tableofcontents \apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{} \xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC + % Suppress extra white space at latin .. non-latin in literal blocks + \AtBeginEnvironment{sphinxVerbatim}{\CJKsetecglue{}} }{ % Don't enable CJK % Custom macros to on/off CJK and switch CJK fonts (Dummy) \newcommand{\kerneldocCJKon}{} diff --git a/Documentation/spi/spi-summary.rst b/Documentation/spi/spi-summary.rst index 7f8accfae6f98f..6e21e6f86912ba 100644 --- a/Documentation/spi/spi-summary.rst +++ b/Documentation/spi/spi-summary.rst @@ -614,6 +614,89 @@ queue, and then start some asynchronous transfer engine (unless it's already running). +Extensions to the SPI protocol +------------------------------ +The fact that SPI doesn't have a formal specification or standard permits chip +manufacturers to implement the SPI protocol in slightly different ways. In most +cases, SPI protocol implementations from different vendors are compatible among +each other. For example, in SPI mode 0 (CPOL=0, CPHA=0) the bus lines may behave +like the following: + +:: + + nCSx ___ ___ + \_________________________________________________________________/ + • • + • • + SCLK ___ ___ ___ ___ ___ ___ ___ ___ + _______/ \___/ \___/ \___/ \___/ \___/ \___/ \___/ \_____ + • : ; : ; : ; : ; : ; : ; : ; : ; • + • : ; : ; : ; : ; : ; : ; : ; : ; • + MOSI XXX__________ _______ _______ ________XXX + 0xA5 XXX__/ 1 \_0_____/ 1 \_0_______0_____/ 1 \_0_____/ 1 \_XXX + • ; ; ; ; ; ; ; ; • + • ; ; ; ; ; ; ; ; • + MISO XXX__________ _______________________ _______ XXX + 0xBA XXX__/ 1 \_____0_/ 1 1 1 \_____0__/ 1 \____0__XXX + +Legend:: + + • marks the start/end of transmission; + : marks when data is clocked into the peripheral; + ; marks when data is clocked into the controller; + X marks when line states are not specified. + +In some few cases, chips extend the SPI protocol by specifying line behaviors +that other SPI protocols don't (e.g. data line state for when CS is not +asserted). Those distinct SPI protocols, modes, and configurations are supported +by different SPI mode flags. + +MOSI idle state configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Common SPI protocol implementations don't specify any state or behavior for the +MOSI line when the controller is not clocking out data. However, there do exist +peripherals that require specific MOSI line state when data is not being clocked +out. For example, if the peripheral expects the MOSI line to be high when the +controller is not clocking out data (``SPI_MOSI_IDLE_HIGH``), then a transfer in +SPI mode 0 would look like the following: + +:: + + nCSx ___ ___ + \_________________________________________________________________/ + • • + • • + SCLK ___ ___ ___ ___ ___ ___ ___ ___ + _______/ \___/ \___/ \___/ \___/ \___/ \___/ \___/ \_____ + • : ; : ; : ; : ; : ; : ; : ; : ; • + • : ; : ; : ; : ; : ; : ; : ; : ; • + MOSI _____ _______ _______ _______________ ___ + 0x56 \_0_____/ 1 \_0_____/ 1 \_0_____/ 1 1 \_0_____/ + • ; ; ; ; ; ; ; ; • + • ; ; ; ; ; ; ; ; • + MISO XXX__________ _______________________ _______ XXX + 0xBA XXX__/ 1 \_____0_/ 1 1 1 \_____0__/ 1 \____0__XXX + +Legend:: + + • marks the start/end of transmission; + : marks when data is clocked into the peripheral; + ; marks when data is clocked into the controller; + X marks when line states are not specified. + +In this extension to the usual SPI protocol, the MOSI line state is specified to +be kept high when CS is asserted but the controller is not clocking out data to +the peripheral and also when CS is not asserted. + +Peripherals that require this extension must request it by setting the +``SPI_MOSI_IDLE_HIGH`` bit into the mode attribute of their ``struct +spi_device`` and call spi_setup(). Controllers that support this extension +should indicate it by setting ``SPI_MOSI_IDLE_HIGH`` in the mode_bits attribute +of their ``struct spi_controller``. The configuration to idle MOSI low is +analogous but uses the ``SPI_MOSI_IDLE_LOW`` mode bit. + + THANKS TO --------- Contributors to Linux-SPI discussions include (in alphabetical order, diff --git a/Documentation/staging/xz.rst b/Documentation/staging/xz.rst index b2f5ff12a161f5..6953a189e5f269 100644 --- a/Documentation/staging/xz.rst +++ b/Documentation/staging/xz.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: 0BSD + ============================ XZ data compression in Linux ============================ @@ -6,62 +8,55 @@ Introduction ============ XZ is a general purpose data compression format with high compression -ratio and relatively fast decompression. The primary compression -algorithm (filter) is LZMA2. Additional filters can be used to improve -compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters -improve compression ratio of executable data. - -The XZ decompressor in Linux is called XZ Embedded. It supports -the LZMA2 filter and optionally also BCJ filters. CRC32 is supported -for integrity checking. The home page of XZ Embedded is at -, where you can find the -latest version and also information about using the code outside -the Linux kernel. - -For userspace, XZ Utils provide a zlib-like compression library -and a gzip-like command line tool. XZ Utils can be downloaded from -. +ratio. The XZ decompressor in Linux is called XZ Embedded. It supports +the LZMA2 filter and optionally also Branch/Call/Jump (BCJ) filters +for executable code. CRC32 is supported for integrity checking. + +See the `XZ Embedded`_ home page for the latest version which includes +a few optional extra features that aren't required in the Linux kernel +and information about using the code outside the Linux kernel. + +For userspace, `XZ Utils`_ provide a zlib-like compression library +and a gzip-like command line tool. + +.. _XZ Embedded: https://tukaani.org/xz/embedded.html +.. _XZ Utils: https://tukaani.org/xz/ XZ related components in the kernel =================================== The xz_dec module provides XZ decompressor with single-call (buffer -to buffer) and multi-call (stateful) APIs. The usage of the xz_dec -module is documented in include/linux/xz.h. - -The xz_dec_test module is for testing xz_dec. xz_dec_test is not -useful unless you are hacking the XZ decompressor. xz_dec_test -allocates a char device major dynamically to which one can write -.xz files from userspace. The decompressed output is thrown away. -Keep an eye on dmesg to see diagnostics printed by xz_dec_test. -See the xz_dec_test source code for the details. +to buffer) and multi-call (stateful) APIs in include/linux/xz.h. For decompressing the kernel image, initramfs, and initrd, there is a wrapper function in lib/decompress_unxz.c. Its API is the same as in other decompress_*.c files, which is defined in include/linux/decompress/generic.h. -scripts/xz_wrap.sh is a wrapper for the xz command line tool found -from XZ Utils. The wrapper sets compression options to values suitable -for compressing the kernel image. +For kernel makefiles, three commands are provided for use with +``$(call if_changed)``. They require the xz tool from XZ Utils. + +- ``$(call if_changed,xzkern)`` is for compressing the kernel image. + It runs the script scripts/xz_wrap.sh which uses arch-optimized + options and a big LZMA2 dictionary. + +- ``$(call if_changed,xzkern_with_size)`` is like ``xzkern`` above but + this also appends a four-byte trailer containing the uncompressed size + of the file. The trailer is needed by the boot code on some archs. -For kernel makefiles, two commands are provided for use with -$(call if_needed). The kernel image should be compressed with -$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 -dictionary. It will also append a four-byte trailer containing the -uncompressed size of the file, which is needed by the boot code. -Other things should be compressed with $(call if_needed,xzmisc) -which will use no BCJ filter and 1 MiB LZMA2 dictionary. +- Other things can be compressed with ``$(call if_needed,xzmisc)`` + which will use no BCJ filter and 1 MiB LZMA2 dictionary. Notes on compression options ============================ -Since the XZ Embedded supports only streams with no integrity check or -CRC32, make sure that you don't use some other integrity check type -when encoding files that are supposed to be decoded by the kernel. With -liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 -when encoding. With the xz command line tool, use --check=none or ---check=crc32. +Since the XZ Embedded supports only streams with CRC32 or no integrity +check, make sure that you don't use some other integrity check type +when encoding files that are supposed to be decoded by the kernel. +With liblzma from XZ Utils, you need to use either ``LZMA_CHECK_CRC32`` +or ``LZMA_CHECK_NONE`` when encoding. With the ``xz`` command line tool, +use ``--check=crc32`` or ``--check=none`` to override the default +``--check=crc64``. Using CRC32 is strongly recommended unless there is some other layer which will verify the integrity of the uncompressed data anyway. @@ -71,57 +66,33 @@ by the decoder; you can only change the integrity check type (or disable it) for the actual uncompressed data. In userspace, LZMA2 is typically used with dictionary sizes of several -megabytes. The decoder needs to have the dictionary in RAM, thus big -dictionaries cannot be used for files that are intended to be decoded -by the kernel. 1 MiB is probably the maximum reasonable dictionary -size for in-kernel use (maybe more is OK for initramfs). The presets -in XZ Utils may not be optimal when creating files for the kernel, -so don't hesitate to use custom settings. Example:: - - xz --check=crc32 --lzma2=dict=512KiB inputfile - -An exception to above dictionary size limitation is when the decoder -is used in single-call mode. Decompressing the kernel itself is an -example of this situation. In single-call mode, the memory usage -doesn't depend on the dictionary size, and it is perfectly fine to -use a big dictionary: for maximum compression, the dictionary should -be at least as big as the uncompressed data itself. - -Future plans -============ +megabytes. The decoder needs to have the dictionary in RAM: + +- In multi-call mode the dictionary is allocated as part of the + decoder state. The reasonable maximum dictionary size for in-kernel + use will depend on the target hardware: a few megabytes is fine for + desktop systems while 64 KiB to 1 MiB might be more appropriate on + some embedded systems. + +- In single-call mode the output buffer is used as the dictionary + buffer. That is, the size of the dictionary doesn't affect the + decompressor memory usage at all. Only the base data structures + are allocated which take a little less than 30 KiB of memory. + For the best compression, the dictionary should be at least + as big as the uncompressed data. A notable example of single-call + mode is decompressing the kernel itself (except on PowerPC). + +The compression presets in XZ Utils may not be optimal when creating +files for the kernel, so don't hesitate to use custom settings to, +for example, set the dictionary size. Also, xz may produce a smaller +file in single-threaded mode so setting that explicitly is recommended. +Example:: + + xz --threads=1 --check=crc32 --lzma2=dict=512KiB inputfile + +xz_dec API +========== + +This is available with ``#include ``. -Creating a limited XZ encoder may be considered if people think it is -useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at -the fastest settings, so it isn't clear if LZMA2 encoder is wanted -into the kernel. - -Support for limited random-access reading is planned for the -decompression code. I don't know if it could have any use in the -kernel, but I know that it would be useful in some embedded projects -outside the Linux kernel. - -Conformance to the .xz file format specification -================================================ - -There are a couple of corner cases where things have been simplified -at expense of detecting errors as early as possible. These should not -matter in practice all, since they don't cause security issues. But -it is good to know this if testing the code e.g. with the test files -from XZ Utils. - -Reporting bugs -============== - -Before reporting a bug, please check that it's not fixed already -at upstream. See to get the -latest code. - -Report bugs to or visit #tukaani on -Freenode and talk to Larhzu. I don't actively read LKML or other -kernel-related mailing lists, so if there's something I should know, -you should email to me personally or use IRC. - -Don't bother Igor Pavlov with questions about the XZ implementation -in the kernel or about XZ Utils. While these two implementations -include essential code that is directly based on Igor Pavlov's code, -these implementations aren't maintained nor supported by him. +.. kernel-doc:: include/linux/xz.h diff --git a/Documentation/timers/timers-howto.rst b/Documentation/timers/timers-howto.rst index 5c169e3d29a8e5..ef7a4652ccc9dc 100644 --- a/Documentation/timers/timers-howto.rst +++ b/Documentation/timers/timers-howto.rst @@ -19,7 +19,7 @@ it really need to delay in atomic context?" If so... ATOMIC CONTEXT: You must use the `*delay` family of functions. These - functions use the jiffie estimation of clock speed + functions use the jiffy estimation of clock speed and will busy wait for enough loop cycles to achieve the desired delay: diff --git a/Documentation/trace/debugging.rst b/Documentation/trace/debugging.rst new file mode 100644 index 00000000000000..54fb16239d7033 --- /dev/null +++ b/Documentation/trace/debugging.rst @@ -0,0 +1,159 @@ +============================== +Using the tracer for debugging +============================== + +Copyright 2024 Google LLC. + +:Author: Steven Rostedt +:License: The GNU Free Documentation License, Version 1.2 + (dual licensed under the GPL v2) + +- Written for: 6.12 + +Introduction +------------ +The tracing infrastructure can be very useful for debugging the Linux +kernel. This document is a place to add various methods of using the tracer +for debugging. + +First, make sure that the tracefs file system is mounted:: + + $ sudo mount -t tracefs tracefs /sys/kernel/tracing + + +Using trace_printk() +-------------------- + +trace_printk() is a very lightweight utility that can be used in any context +inside the kernel, with the exception of "noinstr" sections. It can be used +in normal, softirq, interrupt and even NMI context. The trace data is +written to the tracing ring buffer in a lockless way. To make it even +lighter weight, when possible, it will only record the pointer to the format +string, and save the raw arguments into the buffer. The format and the +arguments will be post processed when the ring buffer is read. This way the +trace_printk() format conversions are not done during the hot path, where +the trace is being recorded. + +trace_printk() is meant only for debugging, and should never be added into +a subsystem of the kernel. If you need debugging traces, add trace events +instead. If a trace_printk() is found in the kernel, the following will +appear in the dmesg:: + + ********************************************************** + ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE ** + ** ** + ** trace_printk() being used. Allocating extra memory. ** + ** ** + ** This means that this is a DEBUG kernel and it is ** + ** unsafe for production use. ** + ** ** + ** If you see this message and you are not debugging ** + ** the kernel, report this immediately to your vendor! ** + ** ** + ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE ** + ********************************************************** + +Debugging kernel crashes +------------------------ +There is various methods of acquiring the state of the system when a kernel +crash occurs. This could be from the oops message in printk, or one could +use kexec/kdump. But these just show what happened at the time of the crash. +It can be very useful in knowing what happened up to the point of the crash. +The tracing ring buffer, by default, is a circular buffer than will +overwrite older events with newer ones. When a crash happens, the content of +the ring buffer will be all the events that lead up to the crash. + +There are several kernel command line parameters that can be used to help in +this. The first is "ftrace_dump_on_oops". This will dump the tracing ring +buffer when a oops occurs to the console. This can be useful if the console +is being logged somewhere. If a serial console is used, it may be prudent to +make sure the ring buffer is relatively small, otherwise the dumping of the +ring buffer may take several minutes to hours to finish. Here's an example +of the kernel command line:: + + ftrace_dump_on_oops trace_buf_size=50K + +Note, the tracing buffer is made up of per CPU buffers where each of these +buffers is broken up into sub-buffers that are by default PAGE_SIZE. The +above trace_buf_size option above sets each of the per CPU buffers to 50K, +so, on a machine with 8 CPUs, that's actually 400K total. + +Persistent buffers across boots +------------------------------- +If the system memory allows it, the tracing ring buffer can be specified at +a specific location in memory. If the location is the same across boots and +the memory is not modified, the tracing buffer can be retrieved from the +following boot. There's two ways to reserve memory for the use of the ring +buffer. + +The more reliable way (on x86) is to reserve memory with the "memmap" kernel +command line option and then use that memory for the trace_instance. This +requires a bit of knowledge of the physical memory layout of the system. The +advantage of using this method, is that the memory for the ring buffer will +always be the same:: + + memmap==12M$0x284500000 trace_instance=boot_map@0x284500000:12M + +The memmap above reserves 12 megabytes of memory at the physical memory +location 0x284500000. Then the trace_instance option will create a trace +instance "boot_map" at that same location with the same amount of memory +reserved. As the ring buffer is broke up into per CPU buffers, the 12 +megabytes will be broken up evenly between those CPUs. If you have 8 CPUs, +each per CPU ring buffer will be 1.5 megabytes in size. Note, that also +includes meta data, so the amount of memory actually used by the ring buffer +will be slightly smaller. + +Another more generic but less robust way to allocate a ring buffer mapping +at boot is with the "reserve_mem" option:: + + reserve_mem=12M:4096:trace trace_instance=boot_map@trace + +The reserve_mem option above will find 12 megabytes that are available at +boot up, and align it by 4096 bytes. It will label this memory as "trace" +that can be used by later command line options. + +The trace_instance option creates a "boot_map" instance and will use the +memory reserved by reserve_mem that was labeled as "trace". This method is +more generic but may not be as reliable. Due to KASLR, the memory reserved +by reserve_mem may not be located at the same location. If this happens, +then the ring buffer will not be from the previous boot and will be reset. + +Sometimes, by using a larger alignment, it can keep KASLR from moving things +around in such a way that it will move the location of the reserve_mem. By +using a larger alignment, you may find better that the buffer is more +consistent to where it is placed:: + + reserve_mem=12M:0x2000000:trace trace_instance=boot_map@trace + +On boot up, the memory reserved for the ring buffer is validated. It will go +through a series of tests to make sure that the ring buffer contains valid +data. If it is, it will then set it up to be available to read from the +instance. If it fails any of the tests, it will clear the entire ring buffer +and initialize it as new. + +The layout of this mapped memory may not be consistent from kernel to +kernel, so only the same kernel is guaranteed to work if the mapping is +preserved. Switching to a different kernel version may find a different +layout and mark the buffer as invalid. + +Using trace_printk() in the boot instance +----------------------------------------- +By default, the content of trace_printk() goes into the top level tracing +instance. But this instance is never preserved across boots. To have the +trace_printk() content, and some other internal tracing go to the preserved +buffer (like dump stacks), either set the instance to be the trace_printk() +destination from the kernel command line, or set it after boot up via the +trace_printk_dest option. + +After boot up:: + + echo 1 > /sys/kernel/tracing/instances/boot_map/options/trace_printk_dest + +From the kernel command line:: + + reserve_mem=12M:4096:trace trace_instance=boot_map^traceprintk^traceoff@trace + +If setting it from the kernel command line, it is recommended to also +disable tracing with the "traceoff" flag, and enable tracing after boot up. +Otherwise the trace from the most recent boot will be mixed with the trace +from the previous boot, and may make it confusing to read. diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 5aba74872ba77d..4073ca48af4ad2 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -1186,6 +1186,18 @@ Here are the available options: trace_printk Can disable trace_printk() from writing into the buffer. + trace_printk_dest + Set to have trace_printk() and similar internal tracing functions + write into this instance. Note, only one trace instance can have + this set. By setting this flag, it clears the trace_printk_dest flag + of the instance that had it set previously. By default, the top + level trace has this set, and will get it set again if another + instance has it set then clears it. + + This flag cannot be cleared by the top level instance, as it is the + default instance. The only way the top level instance has this flag + cleared, is by it being set in another instance. + annotate It is sometimes confusing when the CPU buffers are full and one CPU buffer had a lot of events recently, thus diff --git a/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst b/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst new file mode 100644 index 00000000000000..526ae534dd861e --- /dev/null +++ b/Documentation/translations/ko_KR/core-api/wrappers/memory-barriers.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 + This is a simple wrapper to bring memory-barriers.txt into the RST world + until such a time as that file can be converted directly. + +========================= +리눅스 커널 메모리 배리어 +========================= + +.. raw:: latex + + \footnotesize + +.. include:: ../../memory-barriers.txt + :literal: + +.. raw:: latex + + \normalsize diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst deleted file mode 100644 index 34f14899c1559d..00000000000000 --- a/Documentation/translations/ko_KR/howto.rst +++ /dev/null @@ -1,619 +0,0 @@ -.. raw:: latex - - \kerneldocCJKoff - -NOTE: -This is a version of Documentation/process/howto.rst translated into korean -This document is maintained by Minchan Kim -If you find any difference between this document and the original file or -a problem with the translation, please contact the maintainer of this file. - -Please also note that the purpose of this file is to be easier to -read for non English (read: korean) speakers and is not intended as -a fork. So if you have any comments or updates for this file please -try to update the original English file first. - ----------------------------------- - -.. raw:: latex - - \kerneldocCJKon - -이 문서는 -Documentation/process/howto.rst -의 한글 번역입니다. - -역자: 김민찬 -감수: 이제이미 - ----------------------------------- - - -어떻게 리눅스 커널 개발을 하는가 -================================ - -이 문서는 커널 개발에 있어 가장 중요한 문서이다. 이 문서는 -리눅스 커널 개발자가 되는 법과 리눅스 커널 개발 커뮤니티와 일하는 -법을 담고있다. 커널 프로그래밍의 기술적인 측면과 관련된 내용들은 -포함하지 않으려고 하였지만 올바른 길로 여러분을 안내하는 데는 도움이 -될 것이다. - -이 문서에서 오래된 것을 발견하면 문서의 아래쪽에 나열된 메인테이너에게 -패치를 보내달라. - - -소개 ----- - -자, 여러분은 리눅스 커널 개발자가 되는 법을 배우고 싶은가? 아니면 -상사로부터"이 장치를 위한 리눅스 드라이버를 작성하시오"라는 말을 -들었는가? 이 문서의 목적은 여러분이 겪게 될 과정과 커뮤니티와 협력하는 -법을 조언하여 여러분의 목적을 달성하기 위해 필요한 것 모두를 알려주기 -위함이다. - -커널은 대부분은 C로 작성되어 있고 몇몇 아키텍쳐의 의존적인 부분은 -어셈블리로 작성되어 있다. 커널 개발을 위해 C를 잘 이해하고 있어야 한다. -여러분이 특정 아키텍쳐의 low-level 개발을 할 것이 아니라면 -어셈블리(특정 아키텍쳐)는 잘 알아야 할 필요는 없다. -다음의 참고서적들은 기본에 충실한 C 교육이나 수년간의 경험에 견주지는 -못하지만 적어도 참고 용도로는 좋을 것이다 - - - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall] - - "Practical C Programming" by Steve Oualline [O'Reilly] - - "C: A Reference Manual" by Harbison and Steele [Prentice Hall] - -커널은 GNU C와 GNU 툴체인을 사용하여 작성되었다. 이 툴들은 ISO C11 표준을 -따르는 반면 표준에 있지 않은 많은 확장기능도 가지고 있다. 커널은 표준 C -라이브러리와는 관계없이 freestanding C 환경이어서 C 표준의 일부는 -지원되지 않는다. 임의의 long long 나누기나 floating point는 지원되지 않는다. -때론 이런 이유로 커널이 그런 확장 기능을 가진 툴체인을 가지고 만들어졌다는 -것이 이해하기 어려울 수도 있고 게다가 불행하게도 그런 것을 정확하게 설명하는 -어떤 참고문서도 있지 않다. 정보를 얻기 위해서는 gcc info (`info gcc`)페이지를 -살펴보라. - -여러분은 기존의 개발 커뮤니티와 협력하는 법을 배우려고 하고 있다는 것을 -기억하라. 코딩, 스타일, 함수에 관한 훌륭한 표준을 가진 사람들이 모인 -다양한 그룹이 있다. 이 표준들은 오랜동안 크고 지역적으로 분산된 팀들에 -의해 가장 좋은 방법으로 일하기 위하여 찾은 것을 기초로 만들어져 왔다. -그 표준들은 문서화가 잘 되어있기 때문에 가능한한 미리 많은 표준들에 -관하여 배우려고 시도하라. 다른 사람들은 여러분이나 여러분의 회사가 -일하는 방식에 적응하는 것을 원하지는 않는다. - - -법적 문제 ---------- - -리눅스 커널 소스 코드는 GPL로 배포(release)되었다. 소스트리의 메인 -디렉토리에 있는 라이센스에 관하여 상세하게 쓰여 있는 COPYING이라는 -파일을 봐라. 리눅스 커널 라이센싱 규칙과 소스 코드 안의 `SPDX -`_ 식별자 사용법은 -:ref:`Documentation/process/license-rules.rst ` 에 설명되어 -있다. 여러분이 라이센스에 관한 더 깊은 문제를 가지고 있다면 리눅스 커널 메일링 -리스트에 묻지말고 변호사와 연락하라. 메일링 리스트들에 있는 사람들은 변호사가 -아니기 때문에 법적 문제에 관하여 그들의 말에 의지해서는 안된다. - -GPL에 관한 잦은 질문들과 답변들은 다음을 참조하라. - - https://www.gnu.org/licenses/gpl-faq.html - - -문서 ----- - -리눅스 커널 소스 트리는 커널 커뮤니티와 협력하는 법을 배우기위해 훌륭한 -다양한 문서들을 가지고 있다. 새로운 기능들이 커널에 들어가게 될 때, -그 기능을 어떻게 사용하는지에 관한 설명을 위하여 새로운 문서 파일을 -추가하는 것을 권장한다. 커널이 유저스페이스로 노출하는 인터페이스를 -변경하게 되면 변경을 설명하는 메뉴얼 페이지들에 대한 패치나 정보를 -mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. - -다음은 커널 소스 트리에 있는 읽어야 할 파일들의 리스트이다. - - :ref:`Documentation/admin-guide/README.rst ` - 이 파일은 리눅스 커널에 관하여 간단한 배경 설명과 커널을 설정하고 - 빌드하기 위해 필요한 것을 설명한다. 커널에 입문하는 사람들은 여기서 - 시작해야 한다. - - :ref:`Documentation/process/changes.rst ` - 이 파일은 커널을 성공적으로 빌드하고 실행시키기 위해 필요한 다양한 - 소프트웨어 패키지들의 최소 버젼을 나열한다. - - :ref:`Documentation/process/coding-style.rst ` - 이 문서는 리눅스 커널 코딩 스타일과 그렇게 한 몇몇 이유를 설명한다. - 모든 새로운 코드는 이 문서에 가이드라인들을 따라야 한다. 대부분의 - 메인테이너들은 이 규칙을 따르는 패치들만을 받아들일 것이고 많은 사람들이 - 그 패치가 올바른 스타일일 경우만 코드를 검토할 것이다. - - :ref:`Documentation/process/submitting-patches.rst ` - 이 파일들은 성공적으로 패치를 만들고 보내는 법을 다음의 내용들로 - 굉장히 상세히 설명하고 있다(그러나 다음으로 한정되진 않는다). - - - Email 내용들 - - Email 양식 - - 그것을 누구에게 보낼지 - - 이러한 규칙들을 따르는 것이 성공(역자주: 패치가 받아들여 지는 것)을 - 보장하진 않는다(왜냐하면 모든 패치들은 내용과 스타일에 관하여 - 면밀히 검토되기 때문이다). 그러나 규칙을 따르지 않는다면 거의 - 성공하지도 못할 것이다. - - 올바른 패치들을 만드는 법에 관한 훌륭한 다른 문서들이 있다. - - "The Perfect Patch" - https://www.ozlabs.org/~akpm/stuff/tpp.txt - - "Linux kernel patch submission format" - https://web.archive.org/web/20180829112450/http://linux.yyz.us/patch-format.html - - :ref:`Documentation/process/stable-api-nonsense.rst ` - 이 문서는 의도적으로 커널이 불변하는 API를 갖지 않도록 결정한 - 이유를 설명하며 다음과 같은 것들을 포함한다. - - - 서브시스템 shim-layer(호환성을 위해?) - - 운영체제들간의 드라이버 이식성 - - 커널 소스 트리내에 빠른 변화를 늦추는 것(또는 빠른 변화를 막는 것) - - 이 문서는 리눅스 개발 철학을 이해하는데 필수적이며 다른 운영체제에서 - 리눅스로 전향하는 사람들에게는 매우 중요하다. - - - :ref:`Documentation/process/security-bugs.rst ` - 여러분들이 리눅스 커널의 보안 문제를 발견했다고 생각한다면 이 문서에 - 나온 단계에 따라서 커널 개발자들에게 알리고 그 문제를 해결할 수 있도록 - 도와 달라. - - :ref:`Documentation/process/management-style.rst ` - 이 문서는 리눅스 커널 메인테이너들이 그들의 방법론에 녹아 있는 - 정신을 어떻게 공유하고 운영하는지를 설명한다. 이것은 커널 개발에 입문하는 - 모든 사람들(또는 커널 개발에 작은 호기심이라도 있는 사람들)이 - 읽어야 할 중요한 문서이다. 왜냐하면 이 문서는 커널 메인테이너들의 - 독특한 행동에 관하여 흔히 있는 오해들과 혼란들을 해소하고 있기 - 때문이다. - - :ref:`Documentation/process/stable-kernel-rules.rst ` - 이 문서는 안정적인 커널 배포가 이루어지는 규칙을 설명하고 있으며 - 여러분들이 이러한 배포들 중 하나에 변경을 하길 원한다면 - 무엇을 해야 하는지를 설명한다. - - :ref:`Documentation/process/kernel-docs.rst ` - 커널 개발에 관계된 외부 문서의 리스트이다. 커널 내의 포함된 문서들 - 중에 여러분이 찾고 싶은 문서를 발견하지 못할 경우 이 리스트를 - 살펴보라. - - :ref:`Documentation/process/applying-patches.rst ` - 패치가 무엇이며 그것을 커널의 다른 개발 브랜치들에 어떻게 - 적용하는지에 관하여 자세히 설명하고 있는 좋은 입문서이다. - -커널은 소스 코드 그 자체에서 또는 이것과 같은 ReStructuredText 마크업 (ReST) 을 -통해 자동적으로 만들어질 수 있는 많은 문서들을 가지고 있다. 이것은 커널 내의 -API에 대한 모든 설명, 그리고 락킹을 올바르게 처리하는 법에 관한 규칙을 포함하고 -있다. - -모든 그런 문서들은 커널 소스 디렉토리에서 다음 커맨드를 실행하는 것을 통해 PDF -나 HTML 의 형태로 만들어질 수 있다:: - - make pdfdocs - make htmldocs - -ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된다. 해당 -문서들은 다음의 커맨드를 사용하면 LaTeX 이나 ePub 로도 만들어질 수 있다:: - - make latexdocs - make epubdocs - -커널 개발자가 되는 것 ---------------------- - -여러분이 리눅스 커널 개발에 관하여 아무것도 모른다면 Linux KernelNewbies -프로젝트를 봐야 한다. - - https://kernelnewbies.org - -그곳은 거의 모든 종류의 기본적인 커널 개발 질문들(질문하기 전에 먼저 -아카이브를 찾아봐라. 과거에 이미 답변되었을 수도 있다)을 할 수 있는 도움이 -될만한 메일링 리스트가 있다. 또한 실시간으로 질문 할 수 있는 IRC 채널도 -가지고 있으며 리눅스 커널 개발을 배우는 데 유용한 문서들을 보유하고 있다. - -웹사이트는 코드구성, 서브시스템들, 그리고 현재 프로젝트들 -(트리 내, 외부에 존재하는)에 관한 기본적인 정보들을 가지고 있다. 또한 -그곳은 커널 컴파일이나 패치를 하는 법과 같은 기본적인 것들을 설명한다. - -여러분이 어디서 시작해야 할진 모르지만 커널 개발 커뮤니티에 참여할 수 -있는 일들을 찾길 원한다면 리눅스 커널 Janitor 프로젝트를 살펴봐라. - - https://kernelnewbies.org/KernelJanitors - -그곳은 시작하기에 훌륭한 장소이다. 그곳은 리눅스 커널 소스 트리내에 -간단히 정리되고 수정될 수 있는 문제들에 관하여 설명한다. 여러분은 이 -프로젝트를 대표하는 개발자들과 일하면서 자신의 패치를 리눅스 커널 트리에 -반영하기 위한 기본적인 것들을 배우게 될것이며 여러분이 아직 아이디어를 -가지고 있지 않다면 다음에 무엇을 해야할지에 관한 방향을 배울 수 있을 -것이다. - -리눅스 커널 코드에 실제 변경을 하기 전에 반드시 그 코드가 어떻게 -동작하는지 이해하고 있어야 한다. 코드를 분석하기 위하여 특정한 툴의 -도움을 빌려서라도 코드를 직접 읽는 것보다 좋은 것은 없다(대부분의 -자잘한 부분들은 잘 코멘트되어 있다). 그런 툴들 중에 특히 추천할만한 -것은 Linux Cross-Reference project이며 그것은 자기 참조 방식이며 -소스코드를 인덱스된 웹 페이지들의 형태로 보여준다. 최신의 멋진 커널 -코드 저장소는 다음을 통하여 참조할 수 있다. - - https://elixir.bootlin.com/ - - -개발 프로세스 -------------- - -리눅스 커널 개발 프로세스는 현재 몇몇 다른 메인 커널 "브랜치들"과 -서브시스템에 특화된 커널 브랜치들로 구성된다. 몇몇 다른 메인 -브랜치들은 다음과 같다. - - - 리누스의 메인라인 트리 - - 여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들 - - 서브시스템을 위한 커널 트리들 - - 통합 테스트를 위한 linux-next 커널 트리 - -메인라인 트리 -~~~~~~~~~~~~~ - -메인라인 트리는 Linus Torvalds가 관리하며 https://kernel.org 또는 소스 -저장소에서 참조될 수 있다.개발 프로세스는 다음과 같다. - - - 새로운 커널이 배포되자마자 2주의 시간이 주어진다. 이 기간동은 - 메인테이너들은 큰 diff들을 Linus에게 제출할 수 있다. 대개 이 패치들은 - 몇 주 동안 linux-next 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는 - 데 선호되는 방법은 git(커널의 소스 관리 툴, 더 많은 정보들은 - https://git-scm.com/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한 - 패치파일의 형식으로 보내는 것도 무관하다. - - 2주 후에 -rc1 커널이 릴리즈되며 여기서부터의 주안점은 새로운 커널을 - 가능한한 안정되게 하는 것이다. 이 시점에서의 대부분의 패치들은 - 회귀(역자주: 이전에는 존재하지 않았지만 새로운 기능추가나 변경으로 인해 - 생겨난 버그)를 고쳐야 한다. 이전부터 존재한 버그는 회귀가 아니므로, 그런 - 버그에 대한 수정사항은 중요한 경우에만 보내져야 한다. 완전히 새로운 - 드라이버(혹은 파일시스템)는 -rc1 이후에만 받아들여진다는 것을 기억해라. - 왜냐하면 변경이 자체내에서만 발생하고 추가된 코드가 드라이버 외부의 다른 - 부분에는 영향을 주지 않으므로 그런 변경은 회귀를 일으킬 만한 위험을 가지고 - 있지 않기 때문이다. -rc1이 배포된 이후에 git를 사용하여 패치들을 Linus에게 - 보낼수 있지만 패치들은 공식적인 메일링 리스트로 보내서 검토를 받을 필요가 - 있다. - - 새로운 -rc는 Linus가 현재 git tree가 테스트 하기에 충분히 안정된 상태에 - 있다고 판단될 때마다 배포된다. 목표는 새로운 -rc 커널을 매주 배포하는 - 것이다. - - 이러한 프로세스는 커널이 "준비(ready)"되었다고 여겨질때까지 계속된다. - 프로세스는 대체로 6주간 지속된다. - -커널 배포에 있어서 언급할만한 가치가 있는 리눅스 커널 메일링 리스트의 -Andrew Morton의 글이 있다. - - *"커널이 언제 배포될지는 아무도 모른다. 왜냐하면 배포는 알려진 - 버그의 상황에 따라 배포되는 것이지 미리정해 놓은 시간에 따라 - 배포되는 것은 아니기 때문이다."* - -여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -세개의 버젼 넘버로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 해당 -메이저 메인라인 릴리즈에서 발견된 큰 회귀들이나 보안 문제들 중 비교적 작고 -중요한 수정들을 포함한다. 주요 stable 시리즈 릴리즈는 세번째 버젼 넘버를 -증가시키며 앞의 두 버젼 넘버는 그대로 유지한다. - -이것은 가장 최근의 안정적인 커널을 원하는 사용자에게 추천되는 브랜치이며, -개발/실험적 버젼을 테스트하는 것을 돕고자 하는 사용자들과는 별로 관련이 없다. - --stable 트리들은 "stable" 팀에 의해 관리되며 거의 매번 -격주로 배포된다. - -커널 트리 문서들 내의 :ref:`Documentation/process/stable-kernel-rules.rst ` -파일은 어떤 종류의 변경들이 -stable 트리로 들어왔는지와 -배포 프로세스가 어떻게 진행되는지를 설명한다. - -서브시스템 커널 트리들 -~~~~~~~~~~~~~~~~~~~~~~ - -다양한 커널 서브시스템의 메인테이너들 --- 그리고 많은 커널 서브시스템 개발자들 ---- 은 그들의 현재 개발 상태를 소스 저장소로 노출한다. 이를 통해 다른 사람들도 -커널의 다른 영역에 어떤 변화가 이루어지고 있는지 알 수 있다. 급속히 개발이 -진행되는 영역이 있고 그렇지 않은 영역이 있으므로, 개발자는 다른 개발자가 제출한 -수정 사항과 자신의 수정사항의 충돌이나 동일한 일을 동시에 두사람이 따로 -진행하는 사태를 방지하기 위해 급속히 개발이 진행되고 있는 영역에 작업의 -베이스를 맞춰줄 것이 요구된다. - -대부분의 이러한 저장소는 git 트리지만, git이 아닌 SCM으로 관리되거나, quilt -시리즈로 제공되는 패치들도 존재한다. 이러한 서브시스템 저장소들은 MAINTAINERS -파일에 나열되어 있다. 대부분은 https://git.kernel.org 에서 볼 수 있다. - -제안된 패치는 서브시스템 트리에 커밋되기 전에 메일링 리스트를 통해 -리뷰된다(아래의 관련 섹션을 참고하기 바란다). 일부 커널 서브시스템의 경우, 이 -리뷰 프로세스는 patchwork라는 도구를 통해 추적된다. patchwork은 등록된 패치와 -패치에 대한 코멘트, 패치의 버젼을 볼 수 있는 웹 인터페이스를 제공하고, -메인테이너는 패치를 리뷰 중, 리뷰 통과, 또는 반려됨으로 표시할 수 있다. -대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 에 나열되어 -있다. - -통합 테스트를 위한 linux-next 커널 트리 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -서브시스템 트리들의 변경사항들은 mainline 트리로 들어오기 전에 통합 테스트를 -거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 매일 -받아가는 특수한 테스트 저장소가 존재한다: - - https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git - -이런 식으로, linux-next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤 -변경이 가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 linux-next -커널에서 테스트를 수행하는 것도 좋을 것이다. - - -버그 보고 ---------- - -메인 커널 소스 디렉토리에 있는 'Documentation/admin-guide/reporting-issues.rst' -파일은 커널 버그라고 생각되는 것을 어떻게 보고하면 되는지, 그리고 문제를 -추적하기 위해서 커널 개발자들이 필요로 하는 정보가 무엇들인지를 상세히 설명하고 -있다. - - -버그 리포트들의 관리 --------------------- - -여러분의 해킹 기술을 연습하는 가장 좋은 방법 중의 하는 다른 사람들이 -보고한 버그들을 수정하는 것이다. 여러분은 커널을 더욱 안정화시키는데 -도움을 줄 뿐만이 아니라 실제있는 문제들을 수정하는 법을 배우게 되고 -그와 함께 여러분들의 기술은 향상될 것이며 다른 개발자들이 여러분의 -존재에 대해 알게 될 것이다. 버그를 수정하는 것은 개발자들 사이에서 -점수를 얻을 수 있는 가장 좋은 방법중의 하나이다. 왜냐하면 많은 사람들은 -다른 사람들의 버그들을 수정하기 위하여 시간을 낭비하지 않기 때문이다. - -이미 보고된 버그 리포트들을 가지고 작업하기 위해서는 여러분이 관심있는 -서브시스템을 찾아라. 해당 서브시스템의 버그들이 어디로 리포트 되는지 -MAINTAINERS 파일을 체크하라; 그건 대부분 메일링 리스트이고, 가끔은 버그 추적 -시스템이다. 그 장소에 있는 최근 버그 리포트 기록들을 검색하고 여러분이 보기에 -적합하다 싶은 것을 도와라. 여러분은 버그 리포트를 위해 -https://bugzilla.kernel.org 를 체크하고자 할 수도 있다; 소수의 커널 -서브시스템들만이 버그 신고와 추적을 위해 해당 시스템을 실제로 사용하고 있지만, -전체 커널의 버그들이 그곳에 정리된다. - - -메일링 리스트들 ---------------- - -위의 몇몇 문서들이 설명하였지만 핵심 커널 개발자들의 대다수는 -리눅스 커널 메일링 리스트에 참여하고 있다. 리스트에 등록하고 해지하는 -방법에 관한 자세한 사항은 다음에서 참조할 수 있다. - - http://vger.kernel.org/vger-lists.html#linux-kernel - -웹상의 많은 다른 곳에도 메일링 리스트의 아카이브들이 있다. -이러한 아카이브들을 찾으려면 검색 엔진을 사용하라. 예를 들어: - - https://lore.kernel.org/lkml/ - -여러분이 새로운 문제에 관해 리스트에 올리기 전에 말하고 싶은 주제에 관한 -것을 아카이브에서 먼저 찾아보기를 강력히 권장한다. 이미 상세하게 토론된 많은 -것들이 메일링 리스트의 아카이브에 기록되어 있다. - -각각의 커널 서브시스템들의 대부분은 자신들의 개발에 관한 노력들로 이루어진 -분리된 메일링 리스트를 따로 가지고 있다. 다른 그룹들이 무슨 리스트를 가지고 -있는지는 MAINTAINERS 파일을 참조하라. - -많은 리스트들은 kernel.org에서 호스트되고 있다. 그 정보들은 다음에서 참조될 수 있다. - - http://vger.kernel.org/vger-lists.html - -리스트들을 사용할 때는 올바른 예절을 따를 것을 유념해라. -대단하진 않지만 다음 URL은 리스트(혹은 모든 리스트)와 대화하는 몇몇 간단한 -가이드라인을 가지고 있다. - - http://www.albion.com/netiquette/ - -여러 사람들이 여러분의 메일에 응답한다면 CC: 즉 수신 리스트는 꽤 커지게 -될 것이다. 아무 이유없이 CC에서 어떤 사람도 제거하거나 리스트 주소로만 -회신하지 마라. 메일을 보낸 사람으로서 하나를 받고 리스트로부터 또 -하나를 받아 두번 받는 것에 익숙하여 있으니 mail-header를 조작하려고 하지 -말아라. 사람들은 그런 것을 좋아하지 않을 것이다. - -여러분의 회신의 문맥을 원래대로 유지해야 한다. 여러분들의 회신의 윗부분에 -"John 커널해커는 작성했다...."를 유지하며 여러분들의 의견을 그 메일의 윗부분에 -작성하지 말고 각 인용한 단락들 사이에 넣어라. - -여러분들이 패치들을 메일에 넣는다면 그것들은 -:ref:`Documentation/process/submitting-patches.rst ` 에 -나와있는데로 명백히(plain) 읽을 수 있는 텍스트여야 한다. 커널 개발자들은 -첨부파일이나 압축된 패치들을 원하지 않는다. 그들은 여러분들의 패치의 -각 라인 단위로 코멘트를 하길 원하며 압축하거나 첨부하지 않고 보내는 것이 -그렇게 할 수 있는 유일한 방법이다. 여러분들이 사용하는 메일 프로그램이 -스페이스나 탭 문자들을 조작하지 않는지 확인하라. 가장 좋은 첫 테스트는 -메일을 자신에게 보내보고 스스로 그 패치를 적용해보라. 그것이 동작하지 -않는다면 여러분의 메일 프로그램을 고치던가 제대로 동작하는 프로그램으로 -바꾸어라. - -무엇보다도 메일링 리스트의 다른 구독자들에게 보여주려 한다는 것을 기억하라. - - -커뮤니티와 협력하는 법 ----------------------- - -커널 커뮤니티의 목적은 가능한한 가장 좋은 커널을 제공하는 것이다. 여러분이 -받아들여질 패치를 제출하게 되면 그 패치의 기술적인 이점으로 검토될 것이다. -그럼 여러분들은 무엇을 기대하고 있어야 하는가? - - - 비판 - - 의견 - - 변경을 위한 요구 - - 당위성을 위한 요구 - - 침묵 - -기억하라. 이것들은 여러분의 패치가 커널로 들어가기 위한 과정이다. 여러분의 -패치들은 비판과 다른 의견을 받을 수 있고 그것들을 기술적인 레벨로 평가하고 -재작업하거나 또는 왜 수정하면 안되는지에 관하여 명료하고 간결한 이유를 -말할 수 있어야 한다. 여러분이 제출한 것에 어떤 응답도 있지 않다면 몇 일을 -기다려보고 다시 시도해라. 때론 너무 많은 메일들 속에 묻혀버리기도 한다. - -여러분은 무엇을 해서는 안되는가? - - - 여러분의 패치가 아무 질문 없이 받아들여지기를 기대하는 것 - - 방어적이 되는 것 - - 의견을 무시하는 것 - - 요청된 변경을 하지 않고 패치를 다시 제출하는 것 - -가능한한 가장 좋은 기술적인 해답을 찾고 있는 커뮤니티에서는 항상 -어떤 패치가 얼마나 좋은지에 관하여 다른 의견들이 있을 수 있다. 여러분은 -협조적이어야 하고 기꺼이 여러분의 생각을 커널 내에 맞추어야 한다. 아니면 -적어도 여러분의 것이 가치있다는 것을 증명하여야 한다. 잘못된 것도 여러분이 -올바른 방향의 해결책으로 이끌어갈 의지가 있다면 받아들여질 것이라는 점을 -기억하라. - -여러분의 첫 패치에 여러분이 수정해야하는 십여개 정도의 회신이 오는 -경우도 흔하다. 이것은 여러분의 패치가 받아들여지지 않을 것이라는 것을 -의미하는 것이 아니고 개인적으로 여러분에게 감정이 있어서 그러는 것도 -아니다. 간단히 여러분의 패치에 제기된 문제들을 수정하고 그것을 다시 -보내라. - - -커널 커뮤니티와 기업 조직간의 차이점 ------------------------------------- -커널 커뮤니티는 가장 전통적인 회사의 개발 환경과는 다르다. 여기에 여러분들의 -문제를 피하기 위한 목록이 있다. - - 여러분들이 제안한 변경들에 관하여 말할 때 좋은 것들 : - - - "이것은 여러 문제들을 해결합니다." - - "이것은 2000 라인의 코드를 줄입니다." - - "이것은 내가 말하려는 것에 관해 설명하는 패치입니다." - - "나는 5개의 다른 아키텍쳐에서 그것을 테스트 했으므로..." - - "여기에 일련의 작은 패치들이 있으므로..." - - "이것은 일반적인 머신에서 성능을 향상함으로..." - - 여러분들이 말할 때 피해야 할 좋지 않은 것들 : - - - "우리는 그것을 AIX/ptx/Solaris에서 이러한 방법으로 했다. 그러므로 그것은 좋은 것임에 틀림없다..." - - "나는 20년동안 이것을 해왔다. 그러므로..." - - "이것은 돈을 벌기위해 나의 회사가 필요로 하는 것이다." - - "이것은 우리의 엔터프라이즈 상품 라인을 위한 것이다." - - "여기에 나의 생각을 말하고 있는 1000 페이지 설계 문서가 있다." - - "나는 6달동안 이것을 했으니..." - - "여기에 5000 라인 짜리 패치가 있으니..." - - "나는 현재 뒤죽박죽인 것을 재작성했다. 그리고 여기에..." - - "나는 마감시한을 가지고 있으므로 이 패치는 지금 적용될 필요가 있다." - -커널 커뮤니티가 전통적인 소프트웨어 엔지니어링 개발 환경들과 -또 다른 점은 얼굴을 보지 않고 일한다는 점이다. 이메일과 irc를 대화의 -주요수단으로 사용하는 것의 한가지 장점은 성별이나 인종의 차별이 -없다는 것이다. 리눅스 커널의 작업 환경에서는 단지 이메일 주소만 -알수 있기 때문에 여성과 소수 민족들도 모두 받아들여진다. 국제적으로 -일하게 되는 측면은 사람의 이름에 근거하여 성별을 추측할 수 없게 -하기때문에 차별을 없애는 데 도움을 준다. Andrea라는 이름을 가진 남자와 -Pat이라는 이름을 가진 여자가 있을 수도 있는 것이다. 리눅스 커널에서 -작업하며 생각을 표현해왔던 대부분의 여성들은 긍정적인 경험을 가지고 -있다. - -언어 장벽은 영어에 익숙하지 않은 몇몇 사람들에게 문제가 될 수도 있다. -언어의 훌륭한 구사는 메일링 리스트에서 올바르게 자신의 생각을 -표현하기 위하여 필요하다. 그래서 여러분은 이메일을 보내기 전에 -영어를 올바르게 사용하고 있는지를 체크하는 것이 바람직하다. - - -여러분의 변경을 나누어라 ------------------------- - -리눅스 커널 커뮤니티는 한꺼번에 굉장히 큰 코드의 묶음(chunk)을 쉽게 -받아들이지 않는다. 변경은 적절하게 소개되고, 검토되고, 각각의 -부분으로 작게 나누어져야 한다. 이것은 회사에서 하는 것과는 정확히 -반대되는 것이다. 여러분들의 제안은 개발 초기에 일찍이 소개되야 한다. -그래서 여러분들은 자신이 하고 있는 것에 관하여 피드백을 받을 수 있게 -된다. 커뮤니티가 여러분들이 커뮤니티와 함께 일하고 있다는 것을 -느끼도록 만들고 커뮤니티가 여러분의 기능을 위한 쓰레기 장으로써 -사용되지 않고 있다는 것을 느끼게 하자. 그러나 메일링 리스트에 한번에 -50개의 이메일을 보내지는 말아라. 여러분들의 일련의 패치들은 항상 -더 작아야 한다. - -패치를 나누는 이유는 다음과 같다. - -1) 작은 패치들은 여러분의 패치들이 적용될 수 있는 확률을 높여준다. - 왜냐하면 다른 사람들은 정확성을 검증하기 위하여 많은 시간과 노력을 - 들이기를 원하지 않는다. 5줄의 패치는 메인테이너가 거의 몇 초간 힐끗 - 보면 적용될 수 있다. 그러나 500 줄의 패치는 정확성을 검토하기 위하여 - 몇시간이 걸릴 수도 있다(걸리는 시간은 패치의 크기 혹은 다른 것에 - 비례하여 기하급수적으로 늘어난다). - - 패치를 작게 만드는 것은 무엇인가 잘못되었을 때 디버그하는 것을 - 쉽게 만든다. 즉, 그렇게 만드는 것은 매우 큰 패치를 적용한 후에 - 조사하는 것 보다 작은 패치를 적용한 후에 (그리고 몇몇의 것이 - 깨졌을 때) 하나씩 패치들을 제거해가며 디버그 하기 쉽도록 만들어 준다. - -2) 작은 패치들을 보내는 것뿐만 아니라 패치들을 제출하기전에 재작성하고 - 간단하게(혹은 간단한게 재배치하여) 하는 것도 중요하다. - -여기에 커널 개발자 Al Viro의 이야기가 있다. - - *"학생의 수학 숙제를 채점하는 선생님을 생각해보라. 선생님은 학생들이 - 답을 얻을때까지 겪은 시행착오를 보길 원하지 않는다. 선생님들은 - 간결하고 가장 뛰어난 답을 보길 원한다. 훌륭한 학생은 이것을 알고 - 마지막으로 답을 얻기 전 중간 과정들을 제출하진 않는다.* - - *커널 개발도 마찬가지이다. 메인테이너들과 검토하는 사람들은 문제를 - 풀어나가는 과정속에 숨겨진 과정을 보길 원하진 않는다. 그들은 - 간결하고 멋진 답을 보길 원한다."* - -커뮤니티와 협력하며 뛰어난 답을 찾는 것과 여러분들의 끝마치지 못한 작업들 -사이에 균형을 유지해야 하는 것은 어려울지도 모른다. 그러므로 프로세스의 -초반에 여러분의 작업을 향상시키기위한 피드백을 얻는 것 뿐만 아니라 -여러분들의 변경들을 작은 묶음으로 유지해서 심지어는 여러분의 작업의 -모든 부분이 지금은 포함될 준비가 되어있지 않지만 작은 부분은 벌써 -받아들여질 수 있도록 유지하는 것이 바람직하다. - -또한 완성되지 않았고 "나중에 수정될 것이다." 와 같은 것들을 포함하는 -패치들은 받아들여지지 않을 것이라는 점을 유념하라. - - -변경을 정당화해라 ------------------ - -여러분들의 나누어진 패치들을 리눅스 커뮤니티가 왜 반영해야 하는지를 -알도록 하는 것은 매우 중요하다. 새로운 기능들이 필요하고 유용하다는 -것은 반드시 그에 합당한 이유가 있어야 한다. - - -변경을 문서화해라 ------------------ - -여러분이 패치를 보내려 할때는 여러분이 무엇을 말하려고 하는지를 충분히 -생각하여 이메일을 작성해야 한다. 이 정보는 패치를 위한 ChangeLog가 될 -것이다. 그리고 항상 그 내용을 보길 원하는 모든 사람들을 위해 보존될 -것이다. 패치는 완벽하게 다음과 같은 내용들을 포함하여 설명해야 한다. - - - 변경이 왜 필요한지 - - 패치에 관한 전체 설계 접근(approach) - - 구현 상세들 - - 테스트 결과들 - -이것이 무엇인지 더 자세한 것을 알고 싶다면 다음 문서의 ChageLog 항을 봐라. - - "The Perfect Patch" - - https://www.ozlabs.org/~akpm/stuff/tpp.txt - - -이 모든 것을 하는 것은 매우 어려운 일이다. 완벽히 소화하는 데는 적어도 몇년이 -걸릴 수도 있다. 많은 인내와 결심이 필요한 계속되는 개선의 과정이다. 그러나 -가능한한 포기하지 말라. 많은 사람들은 이전부터 해왔던 것이고 그 사람들도 -정확하게 여러분들이 지금 서 있는 그 곳부터 시작했었다. - - - - ----------- - -"개발 프로세스"(https://lwn.net/Articles/94386/) 섹션을 -작성하는데 있어 참고할 문서를 사용하도록 허락해준 Paolo Ciarrocchi에게 -감사한다. 여러분들이 말해야 할 것과 말해서는 안되는 것의 목록 중 일부를 제공해준 -Randy Dunlap과 Gerrit Huizenga에게 감사한다. 또한 검토와 의견 그리고 -공헌을 아끼지 않은 Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers, -Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi Kleen, -Vadim Lobanov, Jesper Juhl, Adrian Bunk, Keri Harris, Frans Pop, -David A. Wheeler, Junio Hamano, Michael Kerrisk, and Alex Shepard에게도 감사를 전한다. -그들의 도움이 없었다면 이 문서는 존재하지 않았을 것이다. - - - -메인테이너: Greg Kroah-Hartman diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst index 4add6b2fe1f216..a20772f9d61c1f 100644 --- a/Documentation/translations/ko_KR/index.rst +++ b/Documentation/translations/ko_KR/index.rst @@ -11,19 +11,9 @@ .. toctree:: :maxdepth: 1 - howto - - -리눅스 커널 메모리 배리어 -------------------------- - -.. raw:: latex - - \footnotesize - -.. include:: ./memory-barriers.txt - :literal: + process/howto + core-api/wrappers/memory-barriers.rst .. raw:: latex - }\kerneldocEndKR + }\kerneldocEndKR diff --git a/Documentation/translations/ko_KR/process/howto.rst b/Documentation/translations/ko_KR/process/howto.rst new file mode 100644 index 00000000000000..34f14899c1559d --- /dev/null +++ b/Documentation/translations/ko_KR/process/howto.rst @@ -0,0 +1,619 @@ +.. raw:: latex + + \kerneldocCJKoff + +NOTE: +This is a version of Documentation/process/howto.rst translated into korean +This document is maintained by Minchan Kim +If you find any difference between this document and the original file or +a problem with the translation, please contact the maintainer of this file. + +Please also note that the purpose of this file is to be easier to +read for non English (read: korean) speakers and is not intended as +a fork. So if you have any comments or updates for this file please +try to update the original English file first. + +---------------------------------- + +.. raw:: latex + + \kerneldocCJKon + +이 문서는 +Documentation/process/howto.rst +의 한글 번역입니다. + +역자: 김민찬 +감수: 이제이미 + +---------------------------------- + + +어떻게 리눅스 커널 개발을 하는가 +================================ + +이 문서는 커널 개발에 있어 가장 중요한 문서이다. 이 문서는 +리눅스 커널 개발자가 되는 법과 리눅스 커널 개발 커뮤니티와 일하는 +법을 담고있다. 커널 프로그래밍의 기술적인 측면과 관련된 내용들은 +포함하지 않으려고 하였지만 올바른 길로 여러분을 안내하는 데는 도움이 +될 것이다. + +이 문서에서 오래된 것을 발견하면 문서의 아래쪽에 나열된 메인테이너에게 +패치를 보내달라. + + +소개 +---- + +자, 여러분은 리눅스 커널 개발자가 되는 법을 배우고 싶은가? 아니면 +상사로부터"이 장치를 위한 리눅스 드라이버를 작성하시오"라는 말을 +들었는가? 이 문서의 목적은 여러분이 겪게 될 과정과 커뮤니티와 협력하는 +법을 조언하여 여러분의 목적을 달성하기 위해 필요한 것 모두를 알려주기 +위함이다. + +커널은 대부분은 C로 작성되어 있고 몇몇 아키텍쳐의 의존적인 부분은 +어셈블리로 작성되어 있다. 커널 개발을 위해 C를 잘 이해하고 있어야 한다. +여러분이 특정 아키텍쳐의 low-level 개발을 할 것이 아니라면 +어셈블리(특정 아키텍쳐)는 잘 알아야 할 필요는 없다. +다음의 참고서적들은 기본에 충실한 C 교육이나 수년간의 경험에 견주지는 +못하지만 적어도 참고 용도로는 좋을 것이다 + + - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall] + - "Practical C Programming" by Steve Oualline [O'Reilly] + - "C: A Reference Manual" by Harbison and Steele [Prentice Hall] + +커널은 GNU C와 GNU 툴체인을 사용하여 작성되었다. 이 툴들은 ISO C11 표준을 +따르는 반면 표준에 있지 않은 많은 확장기능도 가지고 있다. 커널은 표준 C +라이브러리와는 관계없이 freestanding C 환경이어서 C 표준의 일부는 +지원되지 않는다. 임의의 long long 나누기나 floating point는 지원되지 않는다. +때론 이런 이유로 커널이 그런 확장 기능을 가진 툴체인을 가지고 만들어졌다는 +것이 이해하기 어려울 수도 있고 게다가 불행하게도 그런 것을 정확하게 설명하는 +어떤 참고문서도 있지 않다. 정보를 얻기 위해서는 gcc info (`info gcc`)페이지를 +살펴보라. + +여러분은 기존의 개발 커뮤니티와 협력하는 법을 배우려고 하고 있다는 것을 +기억하라. 코딩, 스타일, 함수에 관한 훌륭한 표준을 가진 사람들이 모인 +다양한 그룹이 있다. 이 표준들은 오랜동안 크고 지역적으로 분산된 팀들에 +의해 가장 좋은 방법으로 일하기 위하여 찾은 것을 기초로 만들어져 왔다. +그 표준들은 문서화가 잘 되어있기 때문에 가능한한 미리 많은 표준들에 +관하여 배우려고 시도하라. 다른 사람들은 여러분이나 여러분의 회사가 +일하는 방식에 적응하는 것을 원하지는 않는다. + + +법적 문제 +--------- + +리눅스 커널 소스 코드는 GPL로 배포(release)되었다. 소스트리의 메인 +디렉토리에 있는 라이센스에 관하여 상세하게 쓰여 있는 COPYING이라는 +파일을 봐라. 리눅스 커널 라이센싱 규칙과 소스 코드 안의 `SPDX +`_ 식별자 사용법은 +:ref:`Documentation/process/license-rules.rst ` 에 설명되어 +있다. 여러분이 라이센스에 관한 더 깊은 문제를 가지고 있다면 리눅스 커널 메일링 +리스트에 묻지말고 변호사와 연락하라. 메일링 리스트들에 있는 사람들은 변호사가 +아니기 때문에 법적 문제에 관하여 그들의 말에 의지해서는 안된다. + +GPL에 관한 잦은 질문들과 답변들은 다음을 참조하라. + + https://www.gnu.org/licenses/gpl-faq.html + + +문서 +---- + +리눅스 커널 소스 트리는 커널 커뮤니티와 협력하는 법을 배우기위해 훌륭한 +다양한 문서들을 가지고 있다. 새로운 기능들이 커널에 들어가게 될 때, +그 기능을 어떻게 사용하는지에 관한 설명을 위하여 새로운 문서 파일을 +추가하는 것을 권장한다. 커널이 유저스페이스로 노출하는 인터페이스를 +변경하게 되면 변경을 설명하는 메뉴얼 페이지들에 대한 패치나 정보를 +mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. + +다음은 커널 소스 트리에 있는 읽어야 할 파일들의 리스트이다. + + :ref:`Documentation/admin-guide/README.rst ` + 이 파일은 리눅스 커널에 관하여 간단한 배경 설명과 커널을 설정하고 + 빌드하기 위해 필요한 것을 설명한다. 커널에 입문하는 사람들은 여기서 + 시작해야 한다. + + :ref:`Documentation/process/changes.rst ` + 이 파일은 커널을 성공적으로 빌드하고 실행시키기 위해 필요한 다양한 + 소프트웨어 패키지들의 최소 버젼을 나열한다. + + :ref:`Documentation/process/coding-style.rst ` + 이 문서는 리눅스 커널 코딩 스타일과 그렇게 한 몇몇 이유를 설명한다. + 모든 새로운 코드는 이 문서에 가이드라인들을 따라야 한다. 대부분의 + 메인테이너들은 이 규칙을 따르는 패치들만을 받아들일 것이고 많은 사람들이 + 그 패치가 올바른 스타일일 경우만 코드를 검토할 것이다. + + :ref:`Documentation/process/submitting-patches.rst ` + 이 파일들은 성공적으로 패치를 만들고 보내는 법을 다음의 내용들로 + 굉장히 상세히 설명하고 있다(그러나 다음으로 한정되진 않는다). + + - Email 내용들 + - Email 양식 + - 그것을 누구에게 보낼지 + + 이러한 규칙들을 따르는 것이 성공(역자주: 패치가 받아들여 지는 것)을 + 보장하진 않는다(왜냐하면 모든 패치들은 내용과 스타일에 관하여 + 면밀히 검토되기 때문이다). 그러나 규칙을 따르지 않는다면 거의 + 성공하지도 못할 것이다. + + 올바른 패치들을 만드는 법에 관한 훌륭한 다른 문서들이 있다. + + "The Perfect Patch" + https://www.ozlabs.org/~akpm/stuff/tpp.txt + + "Linux kernel patch submission format" + https://web.archive.org/web/20180829112450/http://linux.yyz.us/patch-format.html + + :ref:`Documentation/process/stable-api-nonsense.rst ` + 이 문서는 의도적으로 커널이 불변하는 API를 갖지 않도록 결정한 + 이유를 설명하며 다음과 같은 것들을 포함한다. + + - 서브시스템 shim-layer(호환성을 위해?) + - 운영체제들간의 드라이버 이식성 + - 커널 소스 트리내에 빠른 변화를 늦추는 것(또는 빠른 변화를 막는 것) + + 이 문서는 리눅스 개발 철학을 이해하는데 필수적이며 다른 운영체제에서 + 리눅스로 전향하는 사람들에게는 매우 중요하다. + + + :ref:`Documentation/process/security-bugs.rst ` + 여러분들이 리눅스 커널의 보안 문제를 발견했다고 생각한다면 이 문서에 + 나온 단계에 따라서 커널 개발자들에게 알리고 그 문제를 해결할 수 있도록 + 도와 달라. + + :ref:`Documentation/process/management-style.rst ` + 이 문서는 리눅스 커널 메인테이너들이 그들의 방법론에 녹아 있는 + 정신을 어떻게 공유하고 운영하는지를 설명한다. 이것은 커널 개발에 입문하는 + 모든 사람들(또는 커널 개발에 작은 호기심이라도 있는 사람들)이 + 읽어야 할 중요한 문서이다. 왜냐하면 이 문서는 커널 메인테이너들의 + 독특한 행동에 관하여 흔히 있는 오해들과 혼란들을 해소하고 있기 + 때문이다. + + :ref:`Documentation/process/stable-kernel-rules.rst ` + 이 문서는 안정적인 커널 배포가 이루어지는 규칙을 설명하고 있으며 + 여러분들이 이러한 배포들 중 하나에 변경을 하길 원한다면 + 무엇을 해야 하는지를 설명한다. + + :ref:`Documentation/process/kernel-docs.rst ` + 커널 개발에 관계된 외부 문서의 리스트이다. 커널 내의 포함된 문서들 + 중에 여러분이 찾고 싶은 문서를 발견하지 못할 경우 이 리스트를 + 살펴보라. + + :ref:`Documentation/process/applying-patches.rst ` + 패치가 무엇이며 그것을 커널의 다른 개발 브랜치들에 어떻게 + 적용하는지에 관하여 자세히 설명하고 있는 좋은 입문서이다. + +커널은 소스 코드 그 자체에서 또는 이것과 같은 ReStructuredText 마크업 (ReST) 을 +통해 자동적으로 만들어질 수 있는 많은 문서들을 가지고 있다. 이것은 커널 내의 +API에 대한 모든 설명, 그리고 락킹을 올바르게 처리하는 법에 관한 규칙을 포함하고 +있다. + +모든 그런 문서들은 커널 소스 디렉토리에서 다음 커맨드를 실행하는 것을 통해 PDF +나 HTML 의 형태로 만들어질 수 있다:: + + make pdfdocs + make htmldocs + +ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된다. 해당 +문서들은 다음의 커맨드를 사용하면 LaTeX 이나 ePub 로도 만들어질 수 있다:: + + make latexdocs + make epubdocs + +커널 개발자가 되는 것 +--------------------- + +여러분이 리눅스 커널 개발에 관하여 아무것도 모른다면 Linux KernelNewbies +프로젝트를 봐야 한다. + + https://kernelnewbies.org + +그곳은 거의 모든 종류의 기본적인 커널 개발 질문들(질문하기 전에 먼저 +아카이브를 찾아봐라. 과거에 이미 답변되었을 수도 있다)을 할 수 있는 도움이 +될만한 메일링 리스트가 있다. 또한 실시간으로 질문 할 수 있는 IRC 채널도 +가지고 있으며 리눅스 커널 개발을 배우는 데 유용한 문서들을 보유하고 있다. + +웹사이트는 코드구성, 서브시스템들, 그리고 현재 프로젝트들 +(트리 내, 외부에 존재하는)에 관한 기본적인 정보들을 가지고 있다. 또한 +그곳은 커널 컴파일이나 패치를 하는 법과 같은 기본적인 것들을 설명한다. + +여러분이 어디서 시작해야 할진 모르지만 커널 개발 커뮤니티에 참여할 수 +있는 일들을 찾길 원한다면 리눅스 커널 Janitor 프로젝트를 살펴봐라. + + https://kernelnewbies.org/KernelJanitors + +그곳은 시작하기에 훌륭한 장소이다. 그곳은 리눅스 커널 소스 트리내에 +간단히 정리되고 수정될 수 있는 문제들에 관하여 설명한다. 여러분은 이 +프로젝트를 대표하는 개발자들과 일하면서 자신의 패치를 리눅스 커널 트리에 +반영하기 위한 기본적인 것들을 배우게 될것이며 여러분이 아직 아이디어를 +가지고 있지 않다면 다음에 무엇을 해야할지에 관한 방향을 배울 수 있을 +것이다. + +리눅스 커널 코드에 실제 변경을 하기 전에 반드시 그 코드가 어떻게 +동작하는지 이해하고 있어야 한다. 코드를 분석하기 위하여 특정한 툴의 +도움을 빌려서라도 코드를 직접 읽는 것보다 좋은 것은 없다(대부분의 +자잘한 부분들은 잘 코멘트되어 있다). 그런 툴들 중에 특히 추천할만한 +것은 Linux Cross-Reference project이며 그것은 자기 참조 방식이며 +소스코드를 인덱스된 웹 페이지들의 형태로 보여준다. 최신의 멋진 커널 +코드 저장소는 다음을 통하여 참조할 수 있다. + + https://elixir.bootlin.com/ + + +개발 프로세스 +------------- + +리눅스 커널 개발 프로세스는 현재 몇몇 다른 메인 커널 "브랜치들"과 +서브시스템에 특화된 커널 브랜치들로 구성된다. 몇몇 다른 메인 +브랜치들은 다음과 같다. + + - 리누스의 메인라인 트리 + - 여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들 + - 서브시스템을 위한 커널 트리들 + - 통합 테스트를 위한 linux-next 커널 트리 + +메인라인 트리 +~~~~~~~~~~~~~ + +메인라인 트리는 Linus Torvalds가 관리하며 https://kernel.org 또는 소스 +저장소에서 참조될 수 있다.개발 프로세스는 다음과 같다. + + - 새로운 커널이 배포되자마자 2주의 시간이 주어진다. 이 기간동은 + 메인테이너들은 큰 diff들을 Linus에게 제출할 수 있다. 대개 이 패치들은 + 몇 주 동안 linux-next 커널내에 이미 있었던 것들이다. 큰 변경들을 제출하는 + 데 선호되는 방법은 git(커널의 소스 관리 툴, 더 많은 정보들은 + https://git-scm.com/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한 + 패치파일의 형식으로 보내는 것도 무관하다. + - 2주 후에 -rc1 커널이 릴리즈되며 여기서부터의 주안점은 새로운 커널을 + 가능한한 안정되게 하는 것이다. 이 시점에서의 대부분의 패치들은 + 회귀(역자주: 이전에는 존재하지 않았지만 새로운 기능추가나 변경으로 인해 + 생겨난 버그)를 고쳐야 한다. 이전부터 존재한 버그는 회귀가 아니므로, 그런 + 버그에 대한 수정사항은 중요한 경우에만 보내져야 한다. 완전히 새로운 + 드라이버(혹은 파일시스템)는 -rc1 이후에만 받아들여진다는 것을 기억해라. + 왜냐하면 변경이 자체내에서만 발생하고 추가된 코드가 드라이버 외부의 다른 + 부분에는 영향을 주지 않으므로 그런 변경은 회귀를 일으킬 만한 위험을 가지고 + 있지 않기 때문이다. -rc1이 배포된 이후에 git를 사용하여 패치들을 Linus에게 + 보낼수 있지만 패치들은 공식적인 메일링 리스트로 보내서 검토를 받을 필요가 + 있다. + - 새로운 -rc는 Linus가 현재 git tree가 테스트 하기에 충분히 안정된 상태에 + 있다고 판단될 때마다 배포된다. 목표는 새로운 -rc 커널을 매주 배포하는 + 것이다. + - 이러한 프로세스는 커널이 "준비(ready)"되었다고 여겨질때까지 계속된다. + 프로세스는 대체로 6주간 지속된다. + +커널 배포에 있어서 언급할만한 가치가 있는 리눅스 커널 메일링 리스트의 +Andrew Morton의 글이 있다. + + *"커널이 언제 배포될지는 아무도 모른다. 왜냐하면 배포는 알려진 + 버그의 상황에 따라 배포되는 것이지 미리정해 놓은 시간에 따라 + 배포되는 것은 아니기 때문이다."* + +여러 메이저 넘버를 갖는 다양한 안정된 커널 트리들 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +세개의 버젼 넘버로 이루어진 버젼의 커널들은 -stable 커널들이다. 그것들은 해당 +메이저 메인라인 릴리즈에서 발견된 큰 회귀들이나 보안 문제들 중 비교적 작고 +중요한 수정들을 포함한다. 주요 stable 시리즈 릴리즈는 세번째 버젼 넘버를 +증가시키며 앞의 두 버젼 넘버는 그대로 유지한다. + +이것은 가장 최근의 안정적인 커널을 원하는 사용자에게 추천되는 브랜치이며, +개발/실험적 버젼을 테스트하는 것을 돕고자 하는 사용자들과는 별로 관련이 없다. + +-stable 트리들은 "stable" 팀에 의해 관리되며 거의 매번 +격주로 배포된다. + +커널 트리 문서들 내의 :ref:`Documentation/process/stable-kernel-rules.rst ` +파일은 어떤 종류의 변경들이 -stable 트리로 들어왔는지와 +배포 프로세스가 어떻게 진행되는지를 설명한다. + +서브시스템 커널 트리들 +~~~~~~~~~~~~~~~~~~~~~~ + +다양한 커널 서브시스템의 메인테이너들 --- 그리고 많은 커널 서브시스템 개발자들 +--- 은 그들의 현재 개발 상태를 소스 저장소로 노출한다. 이를 통해 다른 사람들도 +커널의 다른 영역에 어떤 변화가 이루어지고 있는지 알 수 있다. 급속히 개발이 +진행되는 영역이 있고 그렇지 않은 영역이 있으므로, 개발자는 다른 개발자가 제출한 +수정 사항과 자신의 수정사항의 충돌이나 동일한 일을 동시에 두사람이 따로 +진행하는 사태를 방지하기 위해 급속히 개발이 진행되고 있는 영역에 작업의 +베이스를 맞춰줄 것이 요구된다. + +대부분의 이러한 저장소는 git 트리지만, git이 아닌 SCM으로 관리되거나, quilt +시리즈로 제공되는 패치들도 존재한다. 이러한 서브시스템 저장소들은 MAINTAINERS +파일에 나열되어 있다. 대부분은 https://git.kernel.org 에서 볼 수 있다. + +제안된 패치는 서브시스템 트리에 커밋되기 전에 메일링 리스트를 통해 +리뷰된다(아래의 관련 섹션을 참고하기 바란다). 일부 커널 서브시스템의 경우, 이 +리뷰 프로세스는 patchwork라는 도구를 통해 추적된다. patchwork은 등록된 패치와 +패치에 대한 코멘트, 패치의 버젼을 볼 수 있는 웹 인터페이스를 제공하고, +메인테이너는 패치를 리뷰 중, 리뷰 통과, 또는 반려됨으로 표시할 수 있다. +대부분의 이러한 patchwork 사이트는 https://patchwork.kernel.org/ 에 나열되어 +있다. + +통합 테스트를 위한 linux-next 커널 트리 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +서브시스템 트리들의 변경사항들은 mainline 트리로 들어오기 전에 통합 테스트를 +거쳐야 한다. 이런 목적으로, 모든 서브시스템 트리의 변경사항을 거의 매일 +받아가는 특수한 테스트 저장소가 존재한다: + + https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git + +이런 식으로, linux-next 커널을 통해 다음 머지 기간에 메인라인 커널에 어떤 +변경이 가해질 것인지 간략히 알 수 있다. 모험심 강한 테스터라면 linux-next +커널에서 테스트를 수행하는 것도 좋을 것이다. + + +버그 보고 +--------- + +메인 커널 소스 디렉토리에 있는 'Documentation/admin-guide/reporting-issues.rst' +파일은 커널 버그라고 생각되는 것을 어떻게 보고하면 되는지, 그리고 문제를 +추적하기 위해서 커널 개발자들이 필요로 하는 정보가 무엇들인지를 상세히 설명하고 +있다. + + +버그 리포트들의 관리 +-------------------- + +여러분의 해킹 기술을 연습하는 가장 좋은 방법 중의 하는 다른 사람들이 +보고한 버그들을 수정하는 것이다. 여러분은 커널을 더욱 안정화시키는데 +도움을 줄 뿐만이 아니라 실제있는 문제들을 수정하는 법을 배우게 되고 +그와 함께 여러분들의 기술은 향상될 것이며 다른 개발자들이 여러분의 +존재에 대해 알게 될 것이다. 버그를 수정하는 것은 개발자들 사이에서 +점수를 얻을 수 있는 가장 좋은 방법중의 하나이다. 왜냐하면 많은 사람들은 +다른 사람들의 버그들을 수정하기 위하여 시간을 낭비하지 않기 때문이다. + +이미 보고된 버그 리포트들을 가지고 작업하기 위해서는 여러분이 관심있는 +서브시스템을 찾아라. 해당 서브시스템의 버그들이 어디로 리포트 되는지 +MAINTAINERS 파일을 체크하라; 그건 대부분 메일링 리스트이고, 가끔은 버그 추적 +시스템이다. 그 장소에 있는 최근 버그 리포트 기록들을 검색하고 여러분이 보기에 +적합하다 싶은 것을 도와라. 여러분은 버그 리포트를 위해 +https://bugzilla.kernel.org 를 체크하고자 할 수도 있다; 소수의 커널 +서브시스템들만이 버그 신고와 추적을 위해 해당 시스템을 실제로 사용하고 있지만, +전체 커널의 버그들이 그곳에 정리된다. + + +메일링 리스트들 +--------------- + +위의 몇몇 문서들이 설명하였지만 핵심 커널 개발자들의 대다수는 +리눅스 커널 메일링 리스트에 참여하고 있다. 리스트에 등록하고 해지하는 +방법에 관한 자세한 사항은 다음에서 참조할 수 있다. + + http://vger.kernel.org/vger-lists.html#linux-kernel + +웹상의 많은 다른 곳에도 메일링 리스트의 아카이브들이 있다. +이러한 아카이브들을 찾으려면 검색 엔진을 사용하라. 예를 들어: + + https://lore.kernel.org/lkml/ + +여러분이 새로운 문제에 관해 리스트에 올리기 전에 말하고 싶은 주제에 관한 +것을 아카이브에서 먼저 찾아보기를 강력히 권장한다. 이미 상세하게 토론된 많은 +것들이 메일링 리스트의 아카이브에 기록되어 있다. + +각각의 커널 서브시스템들의 대부분은 자신들의 개발에 관한 노력들로 이루어진 +분리된 메일링 리스트를 따로 가지고 있다. 다른 그룹들이 무슨 리스트를 가지고 +있는지는 MAINTAINERS 파일을 참조하라. + +많은 리스트들은 kernel.org에서 호스트되고 있다. 그 정보들은 다음에서 참조될 수 있다. + + http://vger.kernel.org/vger-lists.html + +리스트들을 사용할 때는 올바른 예절을 따를 것을 유념해라. +대단하진 않지만 다음 URL은 리스트(혹은 모든 리스트)와 대화하는 몇몇 간단한 +가이드라인을 가지고 있다. + + http://www.albion.com/netiquette/ + +여러 사람들이 여러분의 메일에 응답한다면 CC: 즉 수신 리스트는 꽤 커지게 +될 것이다. 아무 이유없이 CC에서 어떤 사람도 제거하거나 리스트 주소로만 +회신하지 마라. 메일을 보낸 사람으로서 하나를 받고 리스트로부터 또 +하나를 받아 두번 받는 것에 익숙하여 있으니 mail-header를 조작하려고 하지 +말아라. 사람들은 그런 것을 좋아하지 않을 것이다. + +여러분의 회신의 문맥을 원래대로 유지해야 한다. 여러분들의 회신의 윗부분에 +"John 커널해커는 작성했다...."를 유지하며 여러분들의 의견을 그 메일의 윗부분에 +작성하지 말고 각 인용한 단락들 사이에 넣어라. + +여러분들이 패치들을 메일에 넣는다면 그것들은 +:ref:`Documentation/process/submitting-patches.rst ` 에 +나와있는데로 명백히(plain) 읽을 수 있는 텍스트여야 한다. 커널 개발자들은 +첨부파일이나 압축된 패치들을 원하지 않는다. 그들은 여러분들의 패치의 +각 라인 단위로 코멘트를 하길 원하며 압축하거나 첨부하지 않고 보내는 것이 +그렇게 할 수 있는 유일한 방법이다. 여러분들이 사용하는 메일 프로그램이 +스페이스나 탭 문자들을 조작하지 않는지 확인하라. 가장 좋은 첫 테스트는 +메일을 자신에게 보내보고 스스로 그 패치를 적용해보라. 그것이 동작하지 +않는다면 여러분의 메일 프로그램을 고치던가 제대로 동작하는 프로그램으로 +바꾸어라. + +무엇보다도 메일링 리스트의 다른 구독자들에게 보여주려 한다는 것을 기억하라. + + +커뮤니티와 협력하는 법 +---------------------- + +커널 커뮤니티의 목적은 가능한한 가장 좋은 커널을 제공하는 것이다. 여러분이 +받아들여질 패치를 제출하게 되면 그 패치의 기술적인 이점으로 검토될 것이다. +그럼 여러분들은 무엇을 기대하고 있어야 하는가? + + - 비판 + - 의견 + - 변경을 위한 요구 + - 당위성을 위한 요구 + - 침묵 + +기억하라. 이것들은 여러분의 패치가 커널로 들어가기 위한 과정이다. 여러분의 +패치들은 비판과 다른 의견을 받을 수 있고 그것들을 기술적인 레벨로 평가하고 +재작업하거나 또는 왜 수정하면 안되는지에 관하여 명료하고 간결한 이유를 +말할 수 있어야 한다. 여러분이 제출한 것에 어떤 응답도 있지 않다면 몇 일을 +기다려보고 다시 시도해라. 때론 너무 많은 메일들 속에 묻혀버리기도 한다. + +여러분은 무엇을 해서는 안되는가? + + - 여러분의 패치가 아무 질문 없이 받아들여지기를 기대하는 것 + - 방어적이 되는 것 + - 의견을 무시하는 것 + - 요청된 변경을 하지 않고 패치를 다시 제출하는 것 + +가능한한 가장 좋은 기술적인 해답을 찾고 있는 커뮤니티에서는 항상 +어떤 패치가 얼마나 좋은지에 관하여 다른 의견들이 있을 수 있다. 여러분은 +협조적이어야 하고 기꺼이 여러분의 생각을 커널 내에 맞추어야 한다. 아니면 +적어도 여러분의 것이 가치있다는 것을 증명하여야 한다. 잘못된 것도 여러분이 +올바른 방향의 해결책으로 이끌어갈 의지가 있다면 받아들여질 것이라는 점을 +기억하라. + +여러분의 첫 패치에 여러분이 수정해야하는 십여개 정도의 회신이 오는 +경우도 흔하다. 이것은 여러분의 패치가 받아들여지지 않을 것이라는 것을 +의미하는 것이 아니고 개인적으로 여러분에게 감정이 있어서 그러는 것도 +아니다. 간단히 여러분의 패치에 제기된 문제들을 수정하고 그것을 다시 +보내라. + + +커널 커뮤니티와 기업 조직간의 차이점 +------------------------------------ +커널 커뮤니티는 가장 전통적인 회사의 개발 환경과는 다르다. 여기에 여러분들의 +문제를 피하기 위한 목록이 있다. + + 여러분들이 제안한 변경들에 관하여 말할 때 좋은 것들 : + + - "이것은 여러 문제들을 해결합니다." + - "이것은 2000 라인의 코드를 줄입니다." + - "이것은 내가 말하려는 것에 관해 설명하는 패치입니다." + - "나는 5개의 다른 아키텍쳐에서 그것을 테스트 했으므로..." + - "여기에 일련의 작은 패치들이 있으므로..." + - "이것은 일반적인 머신에서 성능을 향상함으로..." + + 여러분들이 말할 때 피해야 할 좋지 않은 것들 : + + - "우리는 그것을 AIX/ptx/Solaris에서 이러한 방법으로 했다. 그러므로 그것은 좋은 것임에 틀림없다..." + - "나는 20년동안 이것을 해왔다. 그러므로..." + - "이것은 돈을 벌기위해 나의 회사가 필요로 하는 것이다." + - "이것은 우리의 엔터프라이즈 상품 라인을 위한 것이다." + - "여기에 나의 생각을 말하고 있는 1000 페이지 설계 문서가 있다." + - "나는 6달동안 이것을 했으니..." + - "여기에 5000 라인 짜리 패치가 있으니..." + - "나는 현재 뒤죽박죽인 것을 재작성했다. 그리고 여기에..." + - "나는 마감시한을 가지고 있으므로 이 패치는 지금 적용될 필요가 있다." + +커널 커뮤니티가 전통적인 소프트웨어 엔지니어링 개발 환경들과 +또 다른 점은 얼굴을 보지 않고 일한다는 점이다. 이메일과 irc를 대화의 +주요수단으로 사용하는 것의 한가지 장점은 성별이나 인종의 차별이 +없다는 것이다. 리눅스 커널의 작업 환경에서는 단지 이메일 주소만 +알수 있기 때문에 여성과 소수 민족들도 모두 받아들여진다. 국제적으로 +일하게 되는 측면은 사람의 이름에 근거하여 성별을 추측할 수 없게 +하기때문에 차별을 없애는 데 도움을 준다. Andrea라는 이름을 가진 남자와 +Pat이라는 이름을 가진 여자가 있을 수도 있는 것이다. 리눅스 커널에서 +작업하며 생각을 표현해왔던 대부분의 여성들은 긍정적인 경험을 가지고 +있다. + +언어 장벽은 영어에 익숙하지 않은 몇몇 사람들에게 문제가 될 수도 있다. +언어의 훌륭한 구사는 메일링 리스트에서 올바르게 자신의 생각을 +표현하기 위하여 필요하다. 그래서 여러분은 이메일을 보내기 전에 +영어를 올바르게 사용하고 있는지를 체크하는 것이 바람직하다. + + +여러분의 변경을 나누어라 +------------------------ + +리눅스 커널 커뮤니티는 한꺼번에 굉장히 큰 코드의 묶음(chunk)을 쉽게 +받아들이지 않는다. 변경은 적절하게 소개되고, 검토되고, 각각의 +부분으로 작게 나누어져야 한다. 이것은 회사에서 하는 것과는 정확히 +반대되는 것이다. 여러분들의 제안은 개발 초기에 일찍이 소개되야 한다. +그래서 여러분들은 자신이 하고 있는 것에 관하여 피드백을 받을 수 있게 +된다. 커뮤니티가 여러분들이 커뮤니티와 함께 일하고 있다는 것을 +느끼도록 만들고 커뮤니티가 여러분의 기능을 위한 쓰레기 장으로써 +사용되지 않고 있다는 것을 느끼게 하자. 그러나 메일링 리스트에 한번에 +50개의 이메일을 보내지는 말아라. 여러분들의 일련의 패치들은 항상 +더 작아야 한다. + +패치를 나누는 이유는 다음과 같다. + +1) 작은 패치들은 여러분의 패치들이 적용될 수 있는 확률을 높여준다. + 왜냐하면 다른 사람들은 정확성을 검증하기 위하여 많은 시간과 노력을 + 들이기를 원하지 않는다. 5줄의 패치는 메인테이너가 거의 몇 초간 힐끗 + 보면 적용될 수 있다. 그러나 500 줄의 패치는 정확성을 검토하기 위하여 + 몇시간이 걸릴 수도 있다(걸리는 시간은 패치의 크기 혹은 다른 것에 + 비례하여 기하급수적으로 늘어난다). + + 패치를 작게 만드는 것은 무엇인가 잘못되었을 때 디버그하는 것을 + 쉽게 만든다. 즉, 그렇게 만드는 것은 매우 큰 패치를 적용한 후에 + 조사하는 것 보다 작은 패치를 적용한 후에 (그리고 몇몇의 것이 + 깨졌을 때) 하나씩 패치들을 제거해가며 디버그 하기 쉽도록 만들어 준다. + +2) 작은 패치들을 보내는 것뿐만 아니라 패치들을 제출하기전에 재작성하고 + 간단하게(혹은 간단한게 재배치하여) 하는 것도 중요하다. + +여기에 커널 개발자 Al Viro의 이야기가 있다. + + *"학생의 수학 숙제를 채점하는 선생님을 생각해보라. 선생님은 학생들이 + 답을 얻을때까지 겪은 시행착오를 보길 원하지 않는다. 선생님들은 + 간결하고 가장 뛰어난 답을 보길 원한다. 훌륭한 학생은 이것을 알고 + 마지막으로 답을 얻기 전 중간 과정들을 제출하진 않는다.* + + *커널 개발도 마찬가지이다. 메인테이너들과 검토하는 사람들은 문제를 + 풀어나가는 과정속에 숨겨진 과정을 보길 원하진 않는다. 그들은 + 간결하고 멋진 답을 보길 원한다."* + +커뮤니티와 협력하며 뛰어난 답을 찾는 것과 여러분들의 끝마치지 못한 작업들 +사이에 균형을 유지해야 하는 것은 어려울지도 모른다. 그러므로 프로세스의 +초반에 여러분의 작업을 향상시키기위한 피드백을 얻는 것 뿐만 아니라 +여러분들의 변경들을 작은 묶음으로 유지해서 심지어는 여러분의 작업의 +모든 부분이 지금은 포함될 준비가 되어있지 않지만 작은 부분은 벌써 +받아들여질 수 있도록 유지하는 것이 바람직하다. + +또한 완성되지 않았고 "나중에 수정될 것이다." 와 같은 것들을 포함하는 +패치들은 받아들여지지 않을 것이라는 점을 유념하라. + + +변경을 정당화해라 +----------------- + +여러분들의 나누어진 패치들을 리눅스 커뮤니티가 왜 반영해야 하는지를 +알도록 하는 것은 매우 중요하다. 새로운 기능들이 필요하고 유용하다는 +것은 반드시 그에 합당한 이유가 있어야 한다. + + +변경을 문서화해라 +----------------- + +여러분이 패치를 보내려 할때는 여러분이 무엇을 말하려고 하는지를 충분히 +생각하여 이메일을 작성해야 한다. 이 정보는 패치를 위한 ChangeLog가 될 +것이다. 그리고 항상 그 내용을 보길 원하는 모든 사람들을 위해 보존될 +것이다. 패치는 완벽하게 다음과 같은 내용들을 포함하여 설명해야 한다. + + - 변경이 왜 필요한지 + - 패치에 관한 전체 설계 접근(approach) + - 구현 상세들 + - 테스트 결과들 + +이것이 무엇인지 더 자세한 것을 알고 싶다면 다음 문서의 ChageLog 항을 봐라. + + "The Perfect Patch" + + https://www.ozlabs.org/~akpm/stuff/tpp.txt + + +이 모든 것을 하는 것은 매우 어려운 일이다. 완벽히 소화하는 데는 적어도 몇년이 +걸릴 수도 있다. 많은 인내와 결심이 필요한 계속되는 개선의 과정이다. 그러나 +가능한한 포기하지 말라. 많은 사람들은 이전부터 해왔던 것이고 그 사람들도 +정확하게 여러분들이 지금 서 있는 그 곳부터 시작했었다. + + + + +---------- + +"개발 프로세스"(https://lwn.net/Articles/94386/) 섹션을 +작성하는데 있어 참고할 문서를 사용하도록 허락해준 Paolo Ciarrocchi에게 +감사한다. 여러분들이 말해야 할 것과 말해서는 안되는 것의 목록 중 일부를 제공해준 +Randy Dunlap과 Gerrit Huizenga에게 감사한다. 또한 검토와 의견 그리고 +공헌을 아끼지 않은 Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers, +Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi Kleen, +Vadim Lobanov, Jesper Juhl, Adrian Bunk, Keri Harris, Frans Pop, +David A. Wheeler, Junio Hamano, Michael Kerrisk, and Alex Shepard에게도 감사를 전한다. +그들의 도움이 없었다면 이 문서는 존재하지 않았을 것이다. + + + +메인테이너: Greg Kroah-Hartman diff --git a/Documentation/translations/sp_SP/scheduler/index.rst b/Documentation/translations/sp_SP/scheduler/index.rst index 768488d6f001e2..32f9fd7517b278 100644 --- a/Documentation/translations/sp_SP/scheduler/index.rst +++ b/Documentation/translations/sp_SP/scheduler/index.rst @@ -6,3 +6,4 @@ :maxdepth: 1 sched-design-CFS + sched-eevdf diff --git a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst index 90a153cad4e865..dc728c739e28d2 100644 --- a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst +++ b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst @@ -14,10 +14,10 @@ Gestor de tareas CFS CFS viene de las siglas en inglés de "Gestor de tareas totalmente justo" ("Completely Fair Scheduler"), y es el nuevo gestor de tareas de escritorio -implementado por Ingo Molnar e integrado en Linux 2.6.23. Es el sustituto de -el previo gestor de tareas SCHED_OTHER. - -Nota: El planificador EEVDF fue incorporado más recientemente al kernel. +implementado por Ingo Molnar e integrado en Linux 2.6.23. Es el sustituto +del previo gestor de tareas SCHED_OTHER. Hoy en día se está abriendo camino +para el gestor de tareas EEVDF, cuya documentación se puede ver en +Documentation/scheduler/sched-eevdf.rst El 80% del diseño de CFS puede ser resumido en una única frase: CFS básicamente modela una "CPU ideal, precisa y multi-tarea" sobre hardware @@ -109,7 +109,7 @@ para que se ejecute, y la tarea en ejecución es interrumpida. ================================== CFS usa una granularidad de nanosegundos y no depende de ningún -jiffie o detalles como HZ. De este modo, el gestor de tareas CFS no tiene +jiffy o detalles como HZ. De este modo, el gestor de tareas CFS no tiene noción de "ventanas de tiempo" de la forma en que tenía el gestor de tareas previo, y tampoco tiene heurísticos. Únicamente hay un parámetro central ajustable (se ha de cambiar en CONFIG_SCHED_DEBUG): diff --git a/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst b/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst new file mode 100644 index 00000000000000..d54736f297b835 --- /dev/null +++ b/Documentation/translations/sp_SP/scheduler/sched-eevdf.rst @@ -0,0 +1,58 @@ + +.. include:: ../disclaimer-sp.rst + +:Original: Documentation/scheduler/sched-eevdf.rst +:Translator: Sergio González Collado + +====================== +Gestor de tareas EEVDF +====================== + +El gestor de tareas EEVDF, del inglés: "Earliest Eligible Virtual Deadline +First", fue presentado por primera vez en una publicación científica en +1995 [1]. El kernel de Linux comenzó a transicionar hacia EEVPF en la +versión 6.6 (y como una nueva opción en 2024), alejándose del gestor +de tareas CFS, en favor de una versión de EEVDF propuesta por Peter +Zijlstra en 2023 [2-4]. Más información relativa a CFS puede encontrarse +en Documentation/scheduler/sched-design-CFS.rst. + +De forma parecida a CFS, EEVDF intenta distribuir el tiempo de ejecución +de la CPU de forma equitativa entre todas las tareas que tengan la misma +prioridad y puedan ser ejecutables. Para eso, asigna un tiempo de +ejecución virtual a cada tarea, creando un "retraso" que puede ser usado +para determinar si una tarea ha recibido su cantidad justa de tiempo +de ejecución en la CPU. De esta manera, una tarea con un "retraso" +positivo, es porque se le debe tiempo de ejecución, mientras que una +con "retraso" negativo implica que la tarea ha excedido su cuota de +tiempo. EEVDF elige las tareas con un "retraso" mayor igual a cero y +calcula un tiempo límite de ejecución virtual (VD, del inglés: virtual +deadline) para cada una, eligiendo la tarea con la VD más próxima para +ser ejecutada a continuación. Es importante darse cuenta que esto permite +que la tareas que sean sensibles a la latencia que tengan porciones de +tiempos de ejecución de CPU más cortos ser priorizadas, lo cual ayuda con +su menor tiempo de respuesta. + +Ahora mismo se está discutiendo cómo gestionar esos "retrasos", especialmente +en tareas que estén en un estado durmiente; pero en el momento en el que +se escribe este texto EEVDF usa un mecanismo de "decaimiento" basado en el +tiempo virtual de ejecución (VRT, del inglés: virtual run time). Esto previene +a las tareas de abusar del sistema simplemente durmiendo brevemente para +reajustar su retraso negativo: cuando una tarea duerme, esta permanece en +la cola de ejecución pero marcada para "desencolado diferido", permitiendo +a su retraso decaer a lo largo de VRT. Por tanto, las tareas que duerman +por más tiempo eventualmente eliminarán su retraso. Finalmente, las tareas +pueden adelantarse a otras si su VD es más próximo en el tiempo, y las +tareas podrán pedir porciones de tiempo específicas con la nueva llamada +del sistema sched_setattr(), todo esto facilitara el trabajo de las aplicaciones +que sean sensibles a las latencias. + +REFERENCIAS +=========== + +[1] https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=805acf7726282721504c8f00575d91ebfd750564 + +[2] https://lore.kernel.org/lkml/a79014e6-ea83-b316-1e12-2ae056bda6fa@linux.vnet.ibm.com/ + +[3] https://lwn.net/Articles/969062/ + +[4] https://lwn.net/Articles/925371/ diff --git a/Documentation/translations/zh_CN/admin-guide/index.rst b/Documentation/translations/zh_CN/admin-guide/index.rst index 0db80ab830a049..15d9ab5993a790 100644 --- a/Documentation/translations/zh_CN/admin-guide/index.rst +++ b/Documentation/translations/zh_CN/admin-guide/index.rst @@ -37,7 +37,6 @@ Todolist: reporting-issues reporting-regressions - security-bugs bug-hunting bug-bisect tainted-kernels diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst index bf21ff84f396cd..cff7b6f98c5941 100644 --- a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst +++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst @@ -15,7 +15,7 @@ 本文通过演示DAMON的默认用户空间工具,简要地介绍了如何使用DAMON。请注意,为了简洁 起见,本文档只描述了它的部分功能。更多细节请参考该工具的使用文档。 -`doc `_ . +`doc `_ . 前提条件 @@ -31,7 +31,7 @@ ------------ 在演示中,我们将使用DAMON的默认用户空间工具,称为DAMON Operator(DAMO)。它可以在 -https://github.com/awslabs/damo找到。下面的例子假设DAMO在你的$PATH上。当然,但 +https://github.com/damonitor/damo找到。下面的例子假设DAMO在你的$PATH上。当然,但 这并不是强制性的。 因为DAMO使用了DAMON的sysfs接口(详情请参考:doc:`usage`),你应该确保 diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst index da2745464ece45..50f6f0b6bf11ab 100644 --- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst +++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst @@ -16,16 +16,16 @@ DAMON 为不同的用户提供了下面这些接口。 - *DAMON用户空间工具。* - `这 `_ 为有这特权的人, 如系统管理员,希望有一个刚好 + `这 `_ 为有这特权的人, 如系统管理员,希望有一个刚好 可以工作的人性化界面。 使用它,用户可以以人性化的方式使用DAMON的主要功能。不过,它可能不会为特殊情况进行高度调整。 它同时支持虚拟和物理地址空间的监测。更多细节,请参考它的 `使用文档 - `_。 + `_。 - *sysfs接口。* :ref:`这 ` 是为那些希望更高级的使用DAMON的特权用户空间程序员准备的。 使用它,用户可以通过读取和写入特殊的sysfs文件来使用DAMON的主要功能。因此,你可以编写和使 用你个性化的DAMON sysfs包装程序,代替你读/写sysfs文件。 `DAMON用户空间工具 - `_ 就是这种程序的一个例子 它同时支持虚拟和物理地址 + `_ 就是这种程序的一个例子 它同时支持虚拟和物理地址 空间的监测。注意,这个界面只提供简单的监测结果 :ref:`统计 `。对于详细的监测 结果,DAMON提供了一个:ref:`跟踪点 `。 - *debugfs interface.* @@ -332,7 +332,7 @@ tried_regions// # echo 500 > watermarks/mid # echo 300 > watermarks/low -请注意,我们强烈建议使用用户空间的工具,如 `damo `_ , +请注意,我们强烈建议使用用户空间的工具,如 `damo `_ , 而不是像上面那样手动读写文件。以上只是一个例子。 debugfs接口 diff --git a/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst b/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst index 59e51e3539b43b..9ff4ba94391d87 100644 --- a/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst +++ b/Documentation/translations/zh_CN/admin-guide/reporting-issues.rst @@ -300,7 +300,7 @@ Documentation/admin-guide/reporting-regressions.rst 对此进行了更详细的 添加到回归跟踪列表中,以确保它不会被忽略。 什么是安全问题留给您自己判断。在继续之前,请考虑阅读 -Documentation/translations/zh_CN/admin-guide/security-bugs.rst , +Documentation/translations/zh_CN/process/security-bugs.rst , 因为它提供了如何最恰当地处理安全问题的额外细节。 当发生了完全无法接受的糟糕事情时,此问题就是一个“非常严重的问题”。例如, @@ -983,7 +983,7 @@ Documentation/admin-guide/reporting-regressions.rst ;它还提供了大量其 报告,请将报告的文本转发到这些地址;但请在报告的顶部加上注释,表明您提交了 报告,并附上工单链接。 -更多信息请参见 Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。 +更多信息请参见 Documentation/translations/zh_CN/process/security-bugs.rst 。 发布报告后的责任 diff --git a/Documentation/translations/zh_CN/admin-guide/security-bugs.rst b/Documentation/translations/zh_CN/admin-guide/security-bugs.rst deleted file mode 100644 index d6b8f8a4e7f63d..00000000000000 --- a/Documentation/translations/zh_CN/admin-guide/security-bugs.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. include:: ../disclaimer-zh_CN.rst - -:Original: :doc:`../../../process/security-bugs` - -:译者: - - 吴想成 Wu XiangCheng - -安全缺陷 -========= - -Linux内核开发人员非常重视安全性。因此我们想知道何时发现了安全漏洞,以便尽快 -修复和披露。请向Linux内核安全团队报告安全漏洞。 - -联络 ------ - -可以通过电子邮件联系Linux内核安全团队。这是一个安全人员 -的私有列表,他们将帮助验证错误报告并开发和发布修复程序。如果您已经有了一个 -修复,请将其包含在您的报告中,这样可以大大加快进程。安全团队可能会从区域维护 -人员那里获得额外的帮助,以理解和修复安全漏洞。 - -与任何缺陷一样,提供的信息越多,诊断和修复就越容易。如果您不清楚哪些信息有用, -请查看“Documentation/translations/zh_CN/admin-guide/reporting-issues.rst”中 -概述的步骤。任何利用漏洞的攻击代码都非常有用,未经报告者同意不会对外发布,除 -非已经公开。 - -请尽可能发送无附件的纯文本电子邮件。如果所有的细节都藏在附件里,那么就很难对 -一个复杂的问题进行上下文引用的讨论。把它想象成一个 -:doc:`常规的补丁提交 <../process/submitting-patches>` (即使你还没有补丁): -描述问题和影响,列出复现步骤,然后给出一个建议的解决方案,所有这些都是纯文本的。 - -披露和限制信息 ---------------- - -安全列表不是公开渠道。为此,请参见下面的协作。 - -一旦开发出了健壮的补丁,发布过程就开始了。对公开的缺陷的修复会立即发布。 - -尽管我们倾向于在未公开缺陷的修复可用时即发布补丁,但应报告者或受影响方的请求, -这可能会被推迟到发布过程开始后的7日内,如果根据缺陷的严重性需要更多的时间, -则可额外延长到14天。推迟发布修复的唯一有效原因是为了适应QA的逻辑和需要发布 -协调的大规模部署。 - -虽然可能与受信任的个人共享受限信息以开发修复,但未经报告者许可,此类信息不会 -与修复程序一起发布或发布在任何其他披露渠道上。这包括但不限于原始错误报告和 -后续讨论(如有)、漏洞、CVE信息或报告者的身份。 - -换句话说,我们唯一感兴趣的是修复缺陷。提交给安全列表的所有其他资料以及对报告 -的任何后续讨论,即使在解除限制之后,也将永久保密。 - -协调 ------- - -对敏感缺陷(例如那些可能导致权限提升的缺陷)的修复可能需要与私有邮件列表 -进行协调,以便分发供应商做好准备,在公开披露 -上游补丁时发布一个已修复的内核。发行版将需要一些时间来测试建议的补丁,通常 -会要求至少几天的限制,而供应商更新发布更倾向于周二至周四。若合适,安全团队 -可以协助这种协调,或者报告者可以从一开始就包括linux发行版。在这种情况下,请 -记住在电子邮件主题行前面加上“[vs]”,如linux发行版wiki中所述: -。 - -CVE分配 --------- - -安全团队通常不分配CVE,我们也不需要它们来进行报告或修复,因为这会使过程不必 -要的复杂化,并可能耽误缺陷处理。如果报告者希望在公开披露之前分配一个CVE编号, -他们需要联系上述的私有linux-distros列表。当在提供补丁之前已有这样的CVE编号时, -如报告者愿意,最好在提交消息中提及它。 - -保密协议 ---------- - -Linux内核安全团队不是一个正式的机构实体,因此无法签订任何保密协议。 diff --git a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst index f1e9ab18206c33..472761938682c0 100644 --- a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst +++ b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst @@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC:: | Devices | +---------+ +高级扩展IRQ模型 +=============== + +在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC, +CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接 +送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC +统一收集,再直接到达CPUINTC:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI相关的定义 ============== diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst index 922cabf7b5dd5a..453a02cd1f4486 100644 --- a/Documentation/translations/zh_CN/core-api/index.rst +++ b/Documentation/translations/zh_CN/core-api/index.rst @@ -49,6 +49,7 @@ generic-radix-tree packing this_cpu_ops + union_find ======= diff --git a/Documentation/translations/zh_CN/core-api/unaligned-memory-access.rst b/Documentation/translations/zh_CN/core-api/unaligned-memory-access.rst index 29c33e7e085596..fbe0989a8ce57f 100644 --- a/Documentation/translations/zh_CN/core-api/unaligned-memory-access.rst +++ b/Documentation/translations/zh_CN/core-api/unaligned-memory-access.rst @@ -175,7 +175,7 @@ field2会导致非对齐访问,这并不是不合理的。你会期望field2 避免非对齐访问 ============== -避免非对齐访问的最简单方法是使用头文件提供的get_unaligned()和 +避免非对齐访问的最简单方法是使用头文件提供的get_unaligned()和 put_unaligned()宏。 回到前面的一个可能导致非对齐访问的代码例子:: diff --git a/Documentation/translations/zh_CN/core-api/union_find.rst b/Documentation/translations/zh_CN/core-api/union_find.rst new file mode 100644 index 00000000000000..bb93fa8c653325 --- /dev/null +++ b/Documentation/translations/zh_CN/core-api/union_find.rst @@ -0,0 +1,92 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/core-api/union_find.rst + +============================= +Linux中的并查集(Union-Find) +============================= + + +:日期: 2024年6月21日 +:作者: Xavier + +何为并查集,它有什么用? +------------------------ + +并查集是一种数据结构,用于处理一些不交集的合并及查询问题。并查集支持的主要操作: + 初始化:将每个元素初始化为单独的集合,每个集合的初始父节点指向自身。 + + 查询:查询某个元素属于哪个集合,通常是返回集合中的一个“代表元素”。这个操作是为 + 了判断两个元素是否在同一个集合之中。 + + 合并:将两个集合合并为一个。 + +并查集作为一种用于维护集合(组)的数据结构,它通常用于解决一些离线查询、动态连通性和 +图论等相关问题,同时也是用于计算最小生成树的克鲁斯克尔算法中的关键,由于最小生成树在 +网络路由等场景下十分重要,并查集也得到了广泛的引用。此外,并查集在符号计算,寄存器分 +配等方面也有应用。 + +空间复杂度: O(n),n为节点数。 + +时间复杂度:使用路径压缩可以减少查找操作的时间复杂度,使用按秩合并可以减少合并操作的 +时间复杂度,使得并查集每个查询和合并操作的平均时间复杂度仅为O(α(n)),其中α(n)是反阿 +克曼函数,可以粗略地认为并查集的操作有常数的时间复杂度。 + +本文档涵盖了对Linux并查集实现的使用方法。更多关于并查集的性质和实现的信息,参见: + + 维基百科并查集词条 + https://en.wikipedia.org/wiki/Disjoint-set_data_structure + +并查集的Linux实现 +------------------ + +Linux的并查集实现在文件“lib/union_find.c”中。要使用它,需要 +“#include ”。 + +并查集的数据结构定义如下:: + + struct uf_node { + struct uf_node *parent; + unsigned int rank; + }; + +其中parent为当前节点的父节点,rank为当前树的高度,在合并时将rank小的节点接到rank大 +的节点下面以增加平衡性。 + +初始化并查集 +------------- + +可以采用静态或初始化接口完成初始化操作。初始化时,parent 指针指向自身,rank 设置 +为 0。 +示例:: + + struct uf_node my_node = UF_INIT_NODE(my_node); + +或 + + uf_node_init(&my_node); + +查找并查集的根节点 +------------------ + +主要用于判断两个并查集是否属于一个集合,如果根相同,那么他们就是一个集合。在查找过程中 +会对路径进行压缩,提高后续查找效率。 +示例:: + + int connected; + struct uf_node *root1 = uf_find(&node_1); + struct uf_node *root2 = uf_find(&node_2); + if (root1 == root2) + connected = 1; + else + connected = 0; + +合并两个并查集 +-------------- + +对于两个相交的并查集进行合并,会首先查找它们各自的根节点,然后根据根节点秩大小,将小的 +节点连接到大的节点下面。 +示例:: + + uf_union(&node_1, &node_2); diff --git a/Documentation/translations/zh_CN/dev-tools/index.rst b/Documentation/translations/zh_CN/dev-tools/index.rst index c540e4a7d5db99..6a8c637c0be147 100644 --- a/Documentation/translations/zh_CN/dev-tools/index.rst +++ b/Documentation/translations/zh_CN/dev-tools/index.rst @@ -21,6 +21,7 @@ Documentation/translations/zh_CN/dev-tools/testing-overview.rst testing-overview sparse kcov + kcsan gcov kasan ubsan @@ -32,7 +33,6 @@ Todolist: - checkpatch - coccinelle - kmsan - - kcsan - kfence - kgdb - kselftest diff --git a/Documentation/translations/zh_CN/dev-tools/kcsan.rst b/Documentation/translations/zh_CN/dev-tools/kcsan.rst new file mode 100644 index 00000000000000..8c495c17f1090b --- /dev/null +++ b/Documentation/translations/zh_CN/dev-tools/kcsan.rst @@ -0,0 +1,320 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/dev-tools/kcsan.rst +:Translator: 刘浩阳 Haoyang Liu + +内核并发消毒剂(KCSAN) +===================== + +内核并发消毒剂(KCSAN)是一个动态竞争检测器,依赖编译时插桩,并且使用基于观察 +点的采样方法来检测竞争。KCSAN 的主要目的是检测 `数据竞争`_。 + +使用 +---- + +KCSAN 受 GCC 和 Clang 支持。使用 GCC 需要版本 11 或更高,使用 Clang 也需要 +版本 11 或更高。 + +为了启用 KCSAN,用如下参数配置内核:: + + CONFIG_KCSAN = y + +KCSAN 提供了几个其他的配置选项来自定义行为(见 ``lib/Kconfig.kcsan`` 中的各自的 +帮助文档以获取更多信息)。 + +错误报告 +~~~~~~~~ + +一个典型数据竞争的报告如下所示:: + + ================================================================== + BUG: KCSAN: data-race in test_kernel_read / test_kernel_write + + write to 0xffffffffc009a628 of 8 bytes by task 487 on cpu 0: + test_kernel_write+0x1d/0x30 + access_thread+0x89/0xd0 + kthread+0x23e/0x260 + ret_from_fork+0x22/0x30 + + read to 0xffffffffc009a628 of 8 bytes by task 488 on cpu 6: + test_kernel_read+0x10/0x20 + access_thread+0x89/0xd0 + kthread+0x23e/0x260 + ret_from_fork+0x22/0x30 + + value changed: 0x00000000000009a6 -> 0x00000000000009b2 + + Reported by Kernel Concurrency Sanitizer on: + CPU: 6 PID: 488 Comm: access_thread Not tainted 5.12.0-rc2+ #1 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 + ================================================================== + +报告的头部提供了一个关于竞争中涉及到的函数的简短总结。随后是竞争中的两个线程的 +访问类型和堆栈信息。如果 KCSAN 发现了一个值的变化,那么那个值的旧值和新值会在 +“value changed”这一行单独显示。 + +另一个不太常见的数据竞争类型的报告如下所示:: + + ================================================================== + BUG: KCSAN: data-race in test_kernel_rmw_array+0x71/0xd0 + + race at unknown origin, with read to 0xffffffffc009bdb0 of 8 bytes by task 515 on cpu 2: + test_kernel_rmw_array+0x71/0xd0 + access_thread+0x89/0xd0 + kthread+0x23e/0x260 + ret_from_fork+0x22/0x30 + + value changed: 0x0000000000002328 -> 0x0000000000002329 + + Reported by Kernel Concurrency Sanitizer on: + CPU: 2 PID: 515 Comm: access_thread Not tainted 5.12.0-rc2+ #1 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 + ================================================================== + +这个报告是当另一个竞争线程不可能被发现,但是可以从观测的内存地址的值改变而推断 +出来的时候生成的。这类报告总是会带有“value changed”行。这类报告的出现通常是因 +为在竞争线程中缺少插桩,也可能是因为其他原因,比如 DMA 访问。这类报告只会在 +设置了内核参数 ``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` 时才会出现,而这 +个参数是默认启用的。 + +选择性分析 +~~~~~~~~~~ + +对于一些特定的访问,函数,编译单元或者整个子系统,可能需要禁用数据竞争检测。 +对于静态黑名单,有如下可用的参数: + +* KCSAN 支持使用 ``data_race(expr)`` 注解,这个注解告诉 KCSAN 任何由访问 + ``expr`` 所引起的数据竞争都应该被忽略,其产生的行为后果被认为是安全的。请查阅 + `在 LKMM 中 "标记共享内存访问"`_ 获得更多信息。 + +* 与 ``data_race(...)`` 相似,可以使用类型限定符 ``__data_racy`` 来标记一个变量 + ,所有访问该变量而导致的数据竞争都是故意为之并且应该被 KCSAN 忽略:: + + struct foo { + ... + int __data_racy stats_counter; + ... + }; + +* 使用函数属性 ``__no_kcsan`` 可以对整个函数禁用数据竞争检测:: + + __no_kcsan + void foo(void) { + ... + + 为了动态限制该为哪些函数生成报告,查阅 `Debug 文件系统接口`_ 黑名单/白名单特性。 + +* 为特定的编译单元禁用数据竞争检测,将下列参数加入到 ``Makefile`` 中:: + + KCSAN_SANITIZE_file.o := n + +* 为 ``Makefile`` 中的所有编译单元禁用数据竞争检测,将下列参数添加到相应的 + ``Makefile`` 中:: + + KCSAN_SANITIZE := n + +.. _在 LKMM 中 "标记共享内存访问": https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/access-marking.txt + +此外,KCSAN 可以根据偏好设置显示或隐藏整个类别的数据竞争。可以使用如下 +Kconfig 参数进行更改: + +* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: 如果启用了该参数并且通过观测点 + (watchpoint) 观测到一个有冲突的写操作,但是对应的内存地址中存储的值没有改变, + 则不会报告这起数据竞争。 + +* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: 假设默认情况下,不超过字大小的简 + 单对齐写入操作是原子的。假设这些写入操作不会受到不安全的编译器优化影响,从而导 + 致数据竞争。该选项使 KCSAN 不报告仅由不超过字大小的简单对齐写入操作引起 + 的冲突所导致的数据竞争。 + +* ``CONFIG_KCSAN_PERMISSIVE``: 启用额外的宽松规则来忽略某些常见类型的数据竞争。 + 与上面的规则不同,这条规则更加复杂,涉及到值改变模式,访问类型和地址。这个 + 选项依赖编译选项 ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=y``。请查看 + ``kernel/kcsan/permissive.h`` 获取更多细节。对于只侧重于特定子系统而不是整个 + 内核报告的测试者和维护者,建议禁用该选项。 + +要使用尽可能严格的规则,选择 ``CONFIG_KCSAN_STRICT=y``,这将配置 KCSAN 尽可 +能紧密地遵循 Linux 内核内存一致性模型(LKMM)。 + +Debug 文件系统接口 +~~~~~~~~~~~~~~~~~~ + +文件 ``/sys/kernel/debug/kcsan`` 提供了如下接口: + +* 读 ``/sys/kernel/debug/kcsan`` 返回不同的运行时统计数据。 + +* 将 ``on`` 或 ``off`` 写入 ``/sys/kernel/debug/kcsan`` 允许打开或关闭 KCSAN。 + +* 将 ``!some_func_name`` 写入 ``/sys/kernel/debug/kcsan`` 会将 + ``some_func_name`` 添加到报告过滤列表中,该列表(默认)会将数据竞争报告中的顶 + 层堆栈帧是列表中函数的情况列入黑名单。 + +* 将 ``blacklist`` 或 ``whitelist`` 写入 ``/sys/kernel/debug/kcsan`` 会改变报告 + 过滤行为。例如,黑名单的特性可以用来过滤掉经常发生的数据竞争。白名单特性可以帮 + 助复现和修复测试。 + +性能调优 +~~~~~~~~ + +影响 KCSAN 整体的性能和 bug 检测能力的核心参数是作为内核命令行参数公开的,其默认 +值也可以通过相应的 Kconfig 选项更改。 + +* ``kcsan.skip_watch`` (``CONFIG_KCSAN_SKIP_WATCH``): 在另一个观测点设置之前每 + 个 CPU 要跳过的内存操作次数。更加频繁的设置观测点将增加观察到竞争情况的可能性 + 。这个参数对系统整体的性能和竞争检测能力影响最显著。 + +* ``kcsan.udelay_task`` (``CONFIG_KCSAN_UDELAY_TASK``): 对于任务,观测点设置之 + 后暂停执行的微秒延迟。值越大,检测到竞争情况的可能性越高。 + +* ``kcsan.udelay_interrupt`` (``CONFIG_KCSAN_UDELAY_INTERRUPT``): 对于中断, + 观测点设置之后暂停执行的微秒延迟。中断对于延迟的要求更加严格,其延迟通常应该小 + 于为任务选择的延迟。 + +它们可以通过 ``/sys/module/kcsan/parameters/`` 在运行时进行调整。 + +数据竞争 +-------- + +在一次执行中,如果两个内存访问存在 *冲突*,在不同的线程中并发执行,并且至少 +有一个访问是 *简单访问*,则它们就形成了 *数据竞争*。如果它们访问了同一个内存地址并且 +至少有一个是写操作,则称它们存在 *冲突*。有关更详细的讨论和定义,见 +`LKMM 中的 "简单访问和数据竞争"`_。 + +.. _LKMM 中的 "简单访问和数据竞争": https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922 + +与 Linux 内核内存一致性模型(LKMM)的关系 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +LKMM 定义了各种内存操作的传播和排序规则,让开发者可以推理并发代码。最终这允许确 +定并发代码可能的执行情况并判断这些代码是否存在数据竞争。 + +KCSAN 可以识别 *被标记的原子操作* ( ``READ_ONCE``, ``WRITE_ONCE`` , ``atomic_*`` +等),以及内存屏障所隐含的一部分顺序保证。启用 ``CONFIG_KCSAN_WEAK_MEMORY=y`` +配置,KCSAN 会对加载或存储缓冲区进行建模,并可以检测遗漏的 +``smp_mb()``, ``smp_wmb()``, ``smp_rmb()``, ``smp_store_release()``,以及所有的 +具有等效隐含内存屏障的 ``atomic_*`` 操作。 + +请注意,KCSAN 不会报告所有由于缺失内存顺序而导致的数据竞争,特别是在需要内存屏障 +来禁止后续内存操作在屏障之前重新排序的情况下。因此,开发人员应该仔细考虑那些未 +被检查的内存顺序要求。 + +数据竞争以外的竞争检测 +--------------------------- + +对于有着复杂并发设计的代码,竞争状况不总是表现为数据竞争。如果并发操作引起了意 +料之外的系统行为,则认为发生了竞争状况。另一方面,数据竞争是在 C 语言层面定义 +的。内核定义了一些宏定义用来检测非数据竞争的漏洞并发代码的属性。 + +.. note:: + 为了不引入新的文档编译警告,这里不展示宏定义的具体内容,如果想查看具体 + 宏定义可以结合原文(Documentation/dev-tools/kcsan.rst)阅读。 + +实现细节 +-------- + +KCSAN 需要观测两个并发访问。特别重要的是,我们想要(a)增加观测到竞争的机会(尤 +其是很少发生的竞争),以及(b)能够实际观测到这些竞争。我们可以通过(a)注入 +不同的延迟,以及(b)使用地址观测点(或断点)来实现。 + +如果我们在设置了地址观察点的情况下故意延迟一个内存访问,然后观察到观察点被触发 +,那么两个对同一地址的访问就发生了竞争。使用硬件观察点,这是 `DataCollider +`_ 中采用 +的方法。与 DataCollider 不同,KCSAN 不使用硬件观察点,而是依赖于编译器插桩和“软 +观测点”。 + +在 KCSAN 中,观察点是通过一种高效的编码实现的,该编码将访问类型、大小和地址存储 +在一个长整型变量中;使用“软观察点”的好处是具有可移植性和更大的灵活性。然后, +KCSAN依赖于编译器对普通访问的插桩。对于每个插桩的普通访问: + +1. 检测是否存在一个符合的观测点,如果存在,并且至少有一个操作是写操作,则我们发 + 现了一个竞争访问。 + +2. 如果不存在匹配的观察点,则定期的设置一个观测点并随机延迟一小段时间。 + +3. 在延迟前检查数据值,并在延迟后重新检查数据值;如果值不匹配,我们推测存在一个 + 未知来源的竞争状况。 + +为了检测普通访问和标记访问之间的数据竞争,KCSAN 也对标记访问进行标记,但仅用于 +检查是否存在观察点;即 KCSAN 不会在标记访问上设置观察点。通过不在标记操作上设 +置观察点,如果对一个变量的所有并发访问都被正确标记,KCSAN 将永远不会触发观察点 +,因此也不会报告这些访问。 + +弱内存建模 +~~~~~~~~~~ + +KCSAN 通过建模访问重新排序(使用 ``CONFIG_KCSAN_WEAK_MEMORY=y``)来检测由于缺少 +内存屏障而导致的数据竞争。每个设置了观察点的普通内存访问也会被选择在其函数范围 +内进行模拟重新排序(最多一个正在进行的访问)。 + +一旦某个访问被选择用于重新排序,它将在函数范围内与每个其他访问进行检查。如果遇 +到适当的内存屏障,该访问将不再被考虑进行模拟重新排序。 + +当内存操作的结果应该由屏障排序时,KCSAN 可以检测到仅由于缺失屏障而导致的冲突的 +数据竞争。考虑下面的例子:: + + int x, flag; + void T1(void) + { + x = 1; // data race! + WRITE_ONCE(flag, 1); // correct: smp_store_release(&flag, 1) + } + void T2(void) + { + while (!READ_ONCE(flag)); // correct: smp_load_acquire(&flag) + ... = x; // data race! + } + +当启用了弱内存建模,KCSAN 将考虑对 ``T1`` 中的 ``x`` 进行模拟重新排序。在写入 +``flag`` 之后,x再次被检查是否有并发访问:因为 ``T2`` 可以在写入 +``flag`` 之后继续进行,因此检测到数据竞争。如果遇到了正确的屏障, ``x`` 在正确 +释放 ``flag`` 后将不会被考虑重新排序,因此不会检测到数据竞争。 + +在复杂性上的权衡以及实际的限制意味着只能检测到一部分由于缺失内存屏障而导致的数 +据竞争。由于当前可用的编译器支持,KCSAN 的实现仅限于建模“缓冲”(延迟访问)的 +效果,因为运行时不能“预取”访问。同时要注意,观测点只设置在普通访问上,这是唯 +一一个 KCSAN 会模拟重新排序的访问类型。这意味着标记访问的重新排序不会被建模。 + +上述情况的一个后果是获取 (acquire) 操作不需要屏障插桩(不需要预取)。此外,引 +入地址或控制依赖的标记访问不需要特殊处理(标记访问不能重新排序,后续依赖的访问 +不能被预取)。 + +关键属性 +~~~~~~~~ + +1. **内存开销**:整体的内存开销只有几 MiB,取决于配置。当前的实现是使用一个小长 + 整型数组来编码观测点信息,几乎可以忽略不计。 + +2. **性能开销**:KCSAN 的运行时旨在性能开销最小化,使用一个高效的观测点编码,在 + 快速路径中不需要获取任何锁。在拥有 8 个 CPU 的系统上的内核启动来说: + + - 使用默认 KCSAN 配置时,性能下降 5 倍; + - 仅因运行时快速路径开销导致性能下降 2.8 倍(设置非常大的 + ``KCSAN_SKIP_WATCH`` 并取消设置 ``KCSAN_SKIP_WATCH_RANDOMIZE``)。 + +3. **注解开销**:KCSAN 运行时之外需要的注释很少。因此,随着内核的发展维护的开 + 销也很小。 + +4. **检测设备的竞争写入**:由于设置观测点时会检查数据值,设备的竞争写入也可以 + 被检测到。 + +5. **内存排序**:KCSAN 只了解一部分 LKMM 排序规则;这可能会导致漏报数据竞争( + 假阴性)。 + +6. **分析准确率**: 对于观察到的执行,由于使用采样策略,分析是 *不健全* 的 + (可能有假阴性),但期望得到完整的分析(没有假阳性)。 + +考虑的替代方案 +-------------- + +一个内核数据竞争检测的替代方法是 `Kernel Thread Sanitizer (KTSAN) +`_。KTSAN 是一 +个基于先行发生关系(happens-before)的数据竞争检测器,它显式建立内存操作之间的先 +后发生顺序,这可以用来确定 `数据竞争`_ 中定义的数据竞争。 + +为了建立正确的先行发生关系,KTSAN 必须了解 LKMM 的所有排序规则和同步原语。不幸 +的是,任何遗漏都会导致大量的假阳性,这在包含众多自定义同步机制的内核上下文中特 +别有害。为了跟踪前因后果关系,KTSAN 的实现需要为每个内存位置提供元数据(影子内 +存),这意味着每页内存对应 4 页影子内存,在大型系统上可能会带来数十 GiB 的开销 +。 diff --git a/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst b/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst new file mode 100644 index 00000000000000..d20b4ce66b9fd0 --- /dev/null +++ b/Documentation/translations/zh_CN/doc-guide/checktransupdate.rst @@ -0,0 +1,55 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/doc-guide/checktransupdate.rst + +:译者: 慕冬亮 Dongliang Mu + +检查翻译更新 + +这个脚本帮助跟踪不同语言的文档翻译状态,即文档是否与对应的英文版本保持更新。 + +工作原理 +------------ + +它使用 ``git log`` 命令来跟踪翻译提交的最新英文提交(按作者日期排序)和英文文档的 +最新提交。如果有任何差异,则该文件被认为是过期的,然后需要更新的提交将被收集并报告。 + +实现的功能 + +- 检查特定语言中的所有文件 +- 检查单个文件或一组文件 +- 提供更改输出格式的选项 +- 跟踪没有翻译过的文件的翻译状态 + +用法 +----- + +:: + + ./scripts/checktransupdate.py --help + +具体用法请参考参数解析器的输出 + +示例 + +- ``./scripts/checktransupdate.py -l zh_CN`` + 这将打印 zh_CN 语言中需要更新的所有文件。 +- ``./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst`` + 这将只打印指定文件的状态。 + +然后输出类似如下的内容: + +:: + + Documentation/dev-tools/kfence.rst + No translation in the locale of zh_CN + + Documentation/translations/zh_CN/dev-tools/testing-overview.rst + commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs") + 1 commits needs resolving in total + +待实现的功能 + +- 文件参数可以是文件夹而不仅仅是单个文件 diff --git a/Documentation/translations/zh_CN/doc-guide/index.rst b/Documentation/translations/zh_CN/doc-guide/index.rst index 78c2e9a1697fa9..0ac1fc9315ea35 100644 --- a/Documentation/translations/zh_CN/doc-guide/index.rst +++ b/Documentation/translations/zh_CN/doc-guide/index.rst @@ -18,6 +18,7 @@ parse-headers contributing maintainer-profile + checktransupdate .. only:: subproject and html diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst index 20b9d4270d1f83..7574e167318091 100644 --- a/Documentation/translations/zh_CN/index.rst +++ b/Documentation/translations/zh_CN/index.rst @@ -89,10 +89,10 @@ TODOList: admin-guide/index admin-guide/reporting-issues.rst userspace-api/index + 内核构建系统 TODOList: -* 内核构建系统 * 用户空间工具 也可参考独立于内核文档的 `Linux 手册页 `_ 。 diff --git a/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst b/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst new file mode 100644 index 00000000000000..67a8abbf5887ab --- /dev/null +++ b/Documentation/translations/zh_CN/kbuild/gcc-plugins.rst @@ -0,0 +1,126 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/kbuild/gcc-plugins.rst +:Translator: 慕冬亮 Dongliang Mu + +================ +GCC 插件基础设施 +================ + + +介绍 +==== + +GCC 插件是为编译器提供额外功能的可加载模块 [1]_。它们对于运行时插装和静态分析非常有用。 +我们可以在编译过程中通过回调 [2]_,GIMPLE [3]_,IPA [4]_ 和 RTL Passes [5]_ +(译者注:Pass 是编译器所采用的一种结构化技术,用于完成编译对象的分析、优化或转换等功能) +来分析、修改和添加更多的代码。 + +内核的 GCC 插件基础设施支持构建树外模块、交叉编译和在单独的目录中构建。插件源文件必须由 +C++ 编译器编译。 + +目前 GCC 插件基础设施只支持一些架构。搜索 "select HAVE_GCC_PLUGINS" 来查找支持 +GCC 插件的架构。 + +这个基础设施是从 grsecurity [6]_ 和 PaX [7]_ 移植过来的。 + +-- + +.. [1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html +.. [2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API +.. [3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html +.. [4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html +.. [5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html +.. [6] https://grsecurity.net/ +.. [7] https://pax.grsecurity.net/ + + +目的 +==== + +GCC 插件的设计目的是提供一个用于试验 GCC 或 Clang 上游没有的潜在编译器功能的场所。 +一旦它们的实用性得到验证,这些功能将被添加到 GCC(和 Clang)的上游。随后,在所有 +支持的 GCC 版本都支持这些功能后,它们会被从内核中移除。 + +具体来说,新插件应该只实现上游编译器(GCC 和 Clang)不支持的功能。 + +当 Clang 中存在 GCC 中不存在的某项功能时,应努力将该功能做到 GCC 上游(而不仅仅 +是作为内核专用的 GCC 插件),以使整个生态都能从中受益。 + +类似的,如果 GCC 插件提供的功能在 Clang 中 **不** 存在,但该功能被证明是有用的,也应 +努力将该功能上传到 GCC(和 Clang)。 + +在上游 GCC 提供了某项功能后,该插件将无法在相应的 GCC 版本(以及更高版本)下编译。 +一旦所有内核支持的 GCC 版本都提供了该功能,该插件将从内核中移除。 + + +文件 +==== + +**$(src)/scripts/gcc-plugins** + + 这是 GCC 插件的目录。 + +**$(src)/scripts/gcc-plugins/gcc-common.h** + + 这是 GCC 插件的兼容性头文件。 + 应始终包含它,而不是单独的 GCC 头文件。 + +**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h, +$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h, +$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h, +$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h** + + 这些头文件可以自动生成 GIMPLE、SIMPLE_IPA、IPA 和 RTL passes 的注册结构。 + 与手动创建结构相比,它们更受欢迎。 + + +用法 +==== + +你必须为你的 GCC 版本安装 GCC 插件头文件,以 Ubuntu 上的 gcc-10 为例:: + + apt-get install gcc-10-plugin-dev + +或者在 Fedora 上:: + + dnf install gcc-plugin-devel libmpc-devel + +或者在 Fedora 上使用包含插件的交叉编译器时:: + + dnf install libmpc-devel + +在内核配置中启用 GCC 插件基础设施与一些你想使用的插件:: + + CONFIG_GCC_PLUGINS=y + CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y + ... + +运行 gcc(本地或交叉编译器),确保能够检测到插件头文件:: + + gcc -print-file-name=plugin + CROSS_COMPILE=arm-linux-gnu- ${CROSS_COMPILE}gcc -print-file-name=plugin + +"plugin" 这个词意味着它们没有被检测到:: + + plugin + +完整的路径则表示插件已经被检测到:: + + /usr/lib/gcc/x86_64-redhat-linux/12/plugin + +编译包括插件在内的最小工具集:: + + make scripts + +或者直接在内核中运行 make,使用循环复杂性 GCC 插件编译整个内核。 + + +4. 如何添加新的 GCC 插件 +======================== + +GCC 插件位于 scripts/gcc-plugins/。你需要将插件源文件放在 scripts/gcc-plugins/ 目录下。 +子目录创建并不支持,你必须添加在 scripts/gcc-plugins/Makefile、scripts/Makefile.gcc-plugins +和相关的 Kconfig 文件中。 diff --git a/Documentation/translations/zh_CN/kbuild/headers_install.rst b/Documentation/translations/zh_CN/kbuild/headers_install.rst new file mode 100644 index 00000000000000..02cb8896e555a0 --- /dev/null +++ b/Documentation/translations/zh_CN/kbuild/headers_install.rst @@ -0,0 +1,39 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/kbuild/headers_install.rst +:Translator: 慕冬亮 Dongliang Mu + +============================ +导出内核头文件供用户空间使用 +============================ + +"make headers_install" 命令以适合于用户空间程序的形式导出内核头文件。 + +Linux 内核导出的头文件描述了用户空间程序尝试使用内核服务的 API。这些内核 +头文件被系统的 C 库(例如 glibc 和 uClibc)用于定义可用的系统调用,以及 +与这些系统调用一起使用的常量和结构。C 库的头文件包括来自 linux 子目录的 +内核头文件。系统的 libc 头文件通常被安装在默认位置 /usr/include,而内核 +头文件在该位置的子目录中(主要是 /usr/include/linux 和 /usr/include/asm)。 + +内核头文件向后兼容,但不向前兼容。这意味着使用旧内核头文件的 C 库构建的程序 +可以在新内核上运行(尽管它可能无法访问新特性),但使用新内核头文件构建的程序 +可能无法在旧内核上运行。 + +"make headers_install" 命令可以在内核源代码的顶层目录中运行(或使用标准 +的树外构建)。它接受两个可选参数:: + + make headers_install ARCH=i386 INSTALL_HDR_PATH=/usr + +ARCH 表明为其生成头文件的架构,默认为当前架构。导出内核头文件的 linux/asm +目录是基于特定平台的,要查看支持架构的完整列表,使用以下命令:: + + ls -d include/asm-* | sed 's/.*-//' + +INSTALL_HDR_PATH 表明头文件的安装位置,默认为 "./usr"。 + +该命令会在 INSTALL_HDR_PATH 中自动创建创建一个 'include' 目录,而头文件 +会被安装在 INSTALL_HDR_PATH/include 中。 + +内核头文件导出的基础设施由 David Woodhouse 维护。 diff --git a/Documentation/translations/zh_CN/kbuild/index.rst b/Documentation/translations/zh_CN/kbuild/index.rst new file mode 100644 index 00000000000000..b51655d981f692 --- /dev/null +++ b/Documentation/translations/zh_CN/kbuild/index.rst @@ -0,0 +1,35 @@ +.. SPDX-License-Identifier: GPL-2.0 + +.. include:: ../disclaimer-zh_CN.rst + +:Original: Documentation/kbuild/index.rst +:Translator: 慕冬亮 Dongliang Mu + +============ +内核编译系统 +============ + +.. toctree:: + :maxdepth: 1 + + headers_install + gcc-plugins + +TODO: + +- kconfig-language +- kconfig-macro-language +- kbuild +- kconfig +- makefiles +- modules +- issues +- reproducible-builds +- llvm + +.. only:: subproject and html + + 目录 + ===== + + * :ref:`genindex` diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst index f95063826a150d..8c8461c6cb9fb3 100644 --- a/Documentation/translations/zh_CN/mm/page_migration.rst +++ b/Documentation/translations/zh_CN/mm/page_migration.rst @@ -50,8 +50,8 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_ 1. 从LRU中移除页面。 - 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 isolate_lru_page() - 来完成的。调用isolate_lru_page()增加了对该页的引用,这样在页面迁移发生时它就不会 + 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 folio_isolate_lru() + 来完成的。调用folio_isolate_lru()增加了对该页的引用,这样在页面迁移发生时它就不会 消失。它还可以防止交换器或其他扫描器遇到该页。 @@ -65,7 +65,7 @@ migrate_pages()如何工作 ======================= migrate_pages()对它的页面列表进行了多次处理。如果当时对一个页面的所有引用都可以被移除, -那么这个页面就会被移动。该页已经通过isolate_lru_page()从LRU中移除,并且refcount被 +那么这个页面就会被移动。该页已经通过folio_isolate_lru()从LRU中移除,并且refcount被 增加,以便在页面迁移发生时不释放该页。 步骤: diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst index 5a5cd7c01c620b..3bcb3bdaf5332d 100644 --- a/Documentation/translations/zh_CN/process/index.rst +++ b/Documentation/translations/zh_CN/process/index.rst @@ -49,10 +49,11 @@ TODOLIST: embargoed-hardware-issues cve + security-bugs TODOLIST: -* security-bugs +* handling-regressions 其它大多数开发人员感兴趣的社区指南: diff --git a/Documentation/translations/zh_CN/process/security-bugs.rst b/Documentation/translations/zh_CN/process/security-bugs.rst new file mode 100644 index 00000000000000..a8f5fcbfadc9a6 --- /dev/null +++ b/Documentation/translations/zh_CN/process/security-bugs.rst @@ -0,0 +1,84 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +.. include:: ../disclaimer-zh_CN.rst + +:Original: :doc:`../../../process/security-bugs` + +:译者: + + 吴想成 Wu XiangCheng + 慕冬亮 Dongliang Mu + +安全缺陷 +========= + +Linux内核开发人员非常重视安全性。因此我们想知道何时发现了安全漏洞,以便尽快 +修复和披露。请向Linux内核安全团队报告安全漏洞。 + +联络 +----- + +可以通过电子邮件联系Linux内核安全团队。这是一个安全人员 +的私有列表,他们将帮助验证错误报告并开发和发布修复程序。如果您已经有了一个 +修复,请将其包含在您的报告中,这样可以大大加快处理进程。安全团队可能会从区域维护 +人员那里获得额外的帮助,以理解和修复安全漏洞。 + +与任何缺陷一样,提供的信息越多,诊断和修复就越容易。如果您不清楚哪些信息有用, +请查看“Documentation/translations/zh_CN/admin-guide/reporting-issues.rst”中 +概述的步骤。任何利用漏洞的攻击代码都非常有用,未经报告者同意不会对外发布, +除非已经公开。 + +请尽可能发送无附件的纯文本电子邮件。如果所有的细节都藏在附件里,那么就很难对 +一个复杂的问题进行上下文引用的讨论。把它想象成一个 +:doc:`常规的补丁提交 <../process/submitting-patches>` (即使你还没有补丁): +描述问题和影响,列出复现步骤,然后给出一个建议的解决方案,所有这些都是纯文本的。 + +披露和限制信息 +--------------- + +安全列表不是公开渠道。为此,请参见下面的协作。 + +一旦开发出了健壮的补丁,发布过程就开始了。对公开的缺陷的修复会立即发布。 + +尽管我们倾向于在未公开缺陷的修复可用时即发布补丁,但应报告者或受影响方的请求, +这可能会被推迟到发布过程开始后的7日内,如果根据缺陷的严重性需要更多的时间, +则可额外延长到14天。推迟发布修复的唯一有效原因是为了适应QA的逻辑和需要发布 +协调的大规模部署。 + +虽然可能与受信任的个人共享受限信息以开发修复,但未经报告者许可,此类信息不会 +与修复程序一起发布或发布在任何其他披露渠道上。这包括但不限于原始错误报告和 +后续讨论(如有)、漏洞、CVE信息或报告者的身份。 + +换句话说,我们唯一感兴趣的是修复缺陷。提交给安全列表的所有其他资料以及对报告 +的任何后续讨论,即使在解除限制之后,也将永久保密。 + +与其他团队协调 +-------------- + +虽然内核安全团队仅关注修复漏洞,但还有其他组织关注修复发行版上的安全问题以及协调 +操作系统厂商的漏洞披露。协调通常由 "linux-distros" 邮件列表处理,而披露则由 +公共 "oss-security" 邮件列表进行。两者紧密关联且被展示在 linux-distros 维基: + + +请注意,这三个列表的各自政策和规则是不同的,因为它们追求不同的目标。内核安全团队 +与其他团队之间的协调很困难,因为对于内核安全团队,保密期(即最大允许天数)是从补丁 +可用时开始,而 "linux-distros" 则从首次发布到列表时开始计算,无论是否存在补丁。 + +因此,内核安全团队强烈建议,作为一位潜在安全问题的报告者,在受影响代码的维护者 +接受补丁之前,且在您阅读上述发行版维基页面并完全理解联系 "linux-distros" +邮件列表会对您和内核社区施加的要求之前,不要联系 "linux-distros" 邮件列表。 +这也意味着通常情况下不要同时抄送两个邮件列表,除非在协调时有已接受但尚未合并的补丁。 +换句话说,在补丁被接受之前,不要抄送 "linux-distros";在修复程序被合并之后, +不要抄送内核安全团队。 + +CVE分配 +-------- + +安全团队不分配 CVE,同时我们也不需要 CVE 来报告或修复漏洞,因为这会使过程不必要 +的复杂化,并可能延误漏洞处理。如果报告者希望为确认的问题分配一个 CVE 编号, +可以联系 :doc:`内核 CVE 分配团队 <../process/cve>` 获取。 + +保密协议 +--------- + +Linux内核安全团队不是一个正式的机构实体,因此无法签订任何保密协议。 diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst index 7864107e60a85c..7ca16bda37092a 100644 --- a/Documentation/translations/zh_CN/process/submitting-patches.rst +++ b/Documentation/translations/zh_CN/process/submitting-patches.rst @@ -208,7 +208,7 @@ torvalds@linux-foundation.org 。他收到的邮件很多,所以一般来说 如果您有修复可利用安全漏洞的补丁,请将该补丁发送到 security@kernel.org 。对于 严重的bug,可以考虑短期禁令以允许分销商(有时间)向用户发布补丁;在这种情况下, 显然不应将补丁发送到任何公共列表。 -参见 Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。 +参见 Documentation/translations/zh_CN/process/security-bugs.rst 。 修复已发布内核中严重错误的补丁程序应该抄送给稳定版维护人员,方法是把以下列行 放进补丁的签准区(注意,不是电子邮件收件人):: diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst index 1822956be0e093..57d36bfbb1b355 100644 --- a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst +++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst @@ -15,7 +15,7 @@ 本文通過演示DAMON的默認用戶空間工具,簡要地介紹瞭如何使用DAMON。請注意,爲了簡潔 起見,本文檔只描述了它的部分功能。更多細節請參考該工具的使用文檔。 -`doc `_ . +`doc `_ . 前提條件 @@ -31,7 +31,7 @@ ------------ 在演示中,我們將使用DAMON的默認用戶空間工具,稱爲DAMON Operator(DAMO)。它可以在 -https://github.com/awslabs/damo找到。下面的例子假設DAMO在你的$PATH上。當然,但 +https://github.com/damonitor/damo找到。下面的例子假設DAMO在你的$PATH上。當然,但 這並不是強制性的。 因爲DAMO使用了DAMON的sysfs接口(詳情請參考:doc:`usage`),你應該確保 diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst index 7464279f9b7de0..fbbbbad59ee472 100644 --- a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst +++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst @@ -16,16 +16,16 @@ DAMON 爲不同的用戶提供了下面這些接口。 - *DAMON用戶空間工具。* - `這 `_ 爲有這特權的人, 如系統管理員,希望有一個剛好 + `這 `_ 爲有這特權的人, 如系統管理員,希望有一個剛好 可以工作的人性化界面。 使用它,用戶可以以人性化的方式使用DAMON的主要功能。不過,它可能不會爲特殊情況進行高度調整。 它同時支持虛擬和物理地址空間的監測。更多細節,請參考它的 `使用文檔 - `_。 + `_。 - *sysfs接口。* :ref:`這 ` 是爲那些希望更高級的使用DAMON的特權用戶空間程序員準備的。 使用它,用戶可以通過讀取和寫入特殊的sysfs文件來使用DAMON的主要功能。因此,你可以編寫和使 用你個性化的DAMON sysfs包裝程序,代替你讀/寫sysfs文件。 `DAMON用戶空間工具 - `_ 就是這種程序的一個例子 它同時支持虛擬和物理地址 + `_ 就是這種程序的一個例子 它同時支持虛擬和物理地址 空間的監測。注意,這個界面只提供簡單的監測結果 :ref:`統計 `。對於詳細的監測 結果,DAMON提供了一個:ref:`跟蹤點 `。 - *debugfs interface.* @@ -332,7 +332,7 @@ tried_regions// # echo 500 > watermarks/mid # echo 300 > watermarks/low -請注意,我們強烈建議使用用戶空間的工具,如 `damo `_ , +請注意,我們強烈建議使用用戶空間的工具,如 `damo `_ , 而不是像上面那樣手動讀寫文件。以上只是一個例子。 debugfs接口 diff --git a/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst b/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst index bc132b25f2aed8..1d4e4c7a675051 100644 --- a/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst +++ b/Documentation/translations/zh_TW/admin-guide/reporting-issues.rst @@ -301,7 +301,7 @@ Documentation/admin-guide/reporting-regressions.rst 對此進行了更詳細的 添加到迴歸跟蹤列表中,以確保它不會被忽略。 什麼是安全問題留給您自己判斷。在繼續之前,請考慮閱讀 -Documentation/translations/zh_CN/admin-guide/security-bugs.rst , +Documentation/translations/zh_CN/process/security-bugs.rst , 因爲它提供瞭如何最恰當地處理安全問題的額外細節。 當發生了完全無法接受的糟糕事情時,此問題就是一個“非常嚴重的問題”。例如, @@ -984,7 +984,7 @@ Documentation/admin-guide/reporting-regressions.rst ;它還提供了大量其 報告,請將報告的文本轉發到這些地址;但請在報告的頂部加上註釋,表明您提交了 報告,並附上工單鏈接。 -更多信息請參見 Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。 +更多信息請參見 Documentation/translations/zh_CN/process/security-bugs.rst 。 發佈報告後的責任 diff --git a/Documentation/translations/zh_TW/process/submitting-patches.rst b/Documentation/translations/zh_TW/process/submitting-patches.rst index f12f2f193f855c..64de92c07906af 100644 --- a/Documentation/translations/zh_TW/process/submitting-patches.rst +++ b/Documentation/translations/zh_TW/process/submitting-patches.rst @@ -209,7 +209,7 @@ torvalds@linux-foundation.org 。他收到的郵件很多,所以一般來說 如果您有修復可利用安全漏洞的補丁,請將該補丁發送到 security@kernel.org 。對於 嚴重的bug,可以考慮短期禁令以允許分銷商(有時間)向用戶發佈補丁;在這種情況下, 顯然不應將補丁發送到任何公共列表。 -參見 Documentation/translations/zh_CN/admin-guide/security-bugs.rst 。 +參見 Documentation/translations/zh_CN/process/security-bugs.rst 。 修復已發佈內核中嚴重錯誤的補丁程序應該抄送給穩定版維護人員,方法是把以下列行 放進補丁的籤準區(注意,不是電子郵件收件人):: diff --git a/Documentation/usb/functionfs-desc.rst b/Documentation/usb/functionfs-desc.rst new file mode 100644 index 00000000000000..39649774da5472 --- /dev/null +++ b/Documentation/usb/functionfs-desc.rst @@ -0,0 +1,39 @@ +====================== +FunctionFS Descriptors +====================== + +Some of the descriptors that can be written to the FFS gadget are +described below. Device and configuration descriptors are handled +by the composite gadget and are not written by the user to the +FFS gadget. + +Descriptors are written to the "ep0" file in the FFS gadget +following the descriptor header. + +.. kernel-doc:: include/uapi/linux/usb/functionfs.h + :doc: descriptors + +Interface Descriptors +--------------------- + +Standard USB interface descriptors may be written. The class/subclass of the +most recent interface descriptor determines what type of class-specific +descriptors are accepted. + +Class-Specific Descriptors +-------------------------- + +Class-specific descriptors are accepted only for the class/subclass of the +most recent interface descriptor. The following are some of the +class-specific descriptors that are supported. + +DFU Functional Descriptor +~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the interface class is USB_CLASS_APP_SPEC and the interface subclass +is USB_SUBCLASS_DFU, a DFU functional descriptor can be provided. +The DFU functional descriptor is a described in the USB specification for +Device Firmware Upgrade (DFU), version 1.1 as of this writing. + +.. kernel-doc:: include/uapi/linux/usb/functionfs.h + :doc: usb_dfu_functional_descriptor diff --git a/Documentation/usb/functionfs.rst b/Documentation/usb/functionfs.rst index d05a775bc45bc0..f7487b0d805784 100644 --- a/Documentation/usb/functionfs.rst +++ b/Documentation/usb/functionfs.rst @@ -25,6 +25,8 @@ interface numbers starting from zero). The FunctionFS changes them as needed also handling situation when numbers differ in different configurations. +For more information about FunctionFS descriptors see :doc:`functionfs-desc` + When descriptors and strings are written "ep#" files appear (one for each declared endpoint) which handle communication on a single endpoint. Again, FunctionFS takes care of the real diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst index b086c7ab72f065..bf555c2270f5c0 100644 --- a/Documentation/usb/gadget-testing.rst +++ b/Documentation/usb/gadget-testing.rst @@ -765,6 +765,17 @@ The uac2 function provides these attributes in its function directory: req_number the number of pre-allocated request for both capture and playback function_name name of the interface + if_ctrl_name topology control name + clksrc_in_name input clock name + clksrc_out_name output clock name + p_it_name playback input terminal name + p_it_ch_name playback input first channel name + p_ot_name playback output terminal name + p_fu_vol_name playback function unit name + c_it_name capture input terminal name + c_it_ch_name capture input first channel name + c_ot_name capture output terminal name + c_fu_vol_name capture functional unit name c_terminal_type code of the capture terminal type p_terminal_type code of the playback terminal type ================ ==================================================== @@ -957,6 +968,14 @@ The uac1 function provides these attributes in its function directory: req_number the number of pre-allocated requests for both capture and playback function_name name of the interface + p_it_name playback input terminal name + p_it_ch_name playback channels name + p_ot_name playback output terminal name + p_fu_vol_name playback mute/volume functional unit name + c_it_name capture input terminal name + c_it_ch_name capture channels name + c_ot_name capture output terminal name + c_fu_vol_name capture mute/volume functional unit name ================ ==================================================== The attributes have sane default values. diff --git a/Documentation/usb/index.rst b/Documentation/usb/index.rst index 27955dad95e124..826492c813acd6 100644 --- a/Documentation/usb/index.rst +++ b/Documentation/usb/index.rst @@ -11,6 +11,7 @@ USB support dwc3 ehci functionfs + functionfs-desc gadget_configfs gadget_hid gadget_multi diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index e91c0376ee5934..e4be1378ba26dc 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -78,6 +78,7 @@ Code Seq# Include File Comments 0x03 all linux/hdreg.h 0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these. 0x06 all linux/lp.h +0x07 9F-D0 linux/vmw_vmci_defs.h, uapi/linux/vm_sockets.h 0x09 all linux/raid/md_u.h 0x10 00-0F drivers/char/s390/vmcp.h 0x10 10-1F arch/s390/include/uapi/sclp_ctl.h @@ -292,6 +293,7 @@ Code Seq# Include File Comments 't' 80-8F linux/isdn_ppp.h 't' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM 'u' 00-1F linux/smb_fs.h gone +'u' 00-2F linux/ublk_cmd.h conflict! 'u' 20-3F linux/uvcvideo.h USB video class host driver 'u' 40-4f linux/udmabuf.h userspace dma-buf misc device 'v' 00-1F linux/ext2_fs.h conflict! diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst index 37dafce8038bbb..c8d3e46badc5bc 100644 --- a/Documentation/userspace-api/landlock.rst +++ b/Documentation/userspace-api/landlock.rst @@ -8,7 +8,7 @@ Landlock: unprivileged access control ===================================== :Author: Mickaël Salaün -:Date: July 2024 +:Date: September 2024 The goal of Landlock is to enable to restrict ambient rights (e.g. global filesystem or network access) for a set of processes. Because Landlock @@ -81,6 +81,9 @@ to be explicit about the denied-by-default access rights. .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | LANDLOCK_ACCESS_NET_CONNECT_TCP, + .scoped = + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET | + LANDLOCK_SCOPE_SIGNAL, }; Because we may not know on which kernel version an application will be @@ -119,6 +122,11 @@ version, and only use the available subset of access rights: case 4: /* Removes LANDLOCK_ACCESS_FS_IOCTL_DEV for ABI < 5 */ ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_IOCTL_DEV; + __attribute__((fallthrough)); + case 5: + /* Removes LANDLOCK_SCOPE_* for ABI < 6 */ + ruleset_attr.scoped &= ~(LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET | + LANDLOCK_SCOPE_SIGNAL); } This enables to create an inclusive ruleset that will contain our rules. @@ -306,6 +314,38 @@ To be allowed to use :manpage:`ptrace(2)` and related syscalls on a target process, a sandboxed process should have a subset of the target process rules, which means the tracee must be in a sub-domain of the tracer. +IPC scoping +----------- + +Similar to the implicit `Ptrace restrictions`_, we may want to further restrict +interactions between sandboxes. Each Landlock domain can be explicitly scoped +for a set of actions by specifying it on a ruleset. For example, if a +sandboxed process should not be able to :manpage:`connect(2)` to a +non-sandboxed process through abstract :manpage:`unix(7)` sockets, we can +specify such restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``. +Moreover, if a sandboxed process should not be able to send a signal to a +non-sandboxed process, we can specify this restriction with +``LANDLOCK_SCOPE_SIGNAL``. + +A sandboxed process can connect to a non-sandboxed process when its domain is +not scoped. If a process's domain is scoped, it can only connect to sockets +created by processes in the same scope. +Moreover, If a process is scoped to send signal to a non-scoped process, it can +only send signals to processes in the same scope. + +A connected datagram socket behaves like a stream socket when its domain is +scoped, meaning if the domain is scoped after the socket is connected , it can +still :manpage:`send(2)` data just like a stream socket. However, in the same +scenario, a non-connected datagram socket cannot send data (with +:manpage:`sendto(2)`) outside its scope. + +A process with a scoped domain can inherit a socket created by a non-scoped +process. The process cannot connect to this socket since it has a scoped +domain. + +IPC scoping does not support exceptions, so if a domain is scoped, no rules can +be added to allow access to resources or processes outside of the scope. + Truncating files ---------------- @@ -404,7 +444,7 @@ Access rights ------------- .. kernel-doc:: include/uapi/linux/landlock.h - :identifiers: fs_access net_access + :identifiers: fs_access net_access scope Creating a new ruleset ---------------------- @@ -541,6 +581,20 @@ earlier ABI. Starting with the Landlock ABI version 5, it is possible to restrict the use of :manpage:`ioctl(2)` using the new ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right. +Abstract UNIX socket scoping (ABI < 6) +-------------------------------------- + +Starting with the Landlock ABI version 6, it is possible to restrict +connections to an abstract :manpage:`unix(7)` socket by setting +``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET`` to the ``scoped`` ruleset attribute. + +Signal scoping (ABI < 6) +------------------------ + +Starting with the Landlock ABI version 6, it is possible to restrict +:manpage:`signal(7)` sending by setting ``LANDLOCK_SCOPE_SIGNAL`` to the +``scoped`` ruleset attribute. + .. _kernel_support: Kernel support diff --git a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst index d5e014ce19b581..1d5248979a6dab 100644 --- a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst +++ b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst @@ -137,6 +137,12 @@ returns the information to the application. The ioctl never fails. - 0x00000100 - If this capability is set, then :ref:`CEC_ADAP_G_CONNECTOR_INFO` can be used. + * .. _`CEC-CAP-REPLY-VENDOR-ID`: + + - ``CEC_CAP_REPLY_VENDOR_ID`` + - 0x00000200 + - If this capability is set, then + :ref:`CEC_MSG_FL_REPLY_VENDOR_ID ` can be used. Return Value ============ diff --git a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst index 364938ad34df91..3e6c511e054f40 100644 --- a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst +++ b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst @@ -232,6 +232,21 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). capability. If that is not set, then the ``EPERM`` error code is returned. + * .. _`CEC-MSG-FL-REPLY-VENDOR-ID`: + + - ``CEC_MSG_FL_REPLY_VENDOR_ID`` + - 4 + - This flag is only available if the ``CEC_CAP_REPLY_VENDOR_ID`` capability + is set. If this flag is set, then the reply is expected to consist of + the ``CEC_MSG_VENDOR_COMMAND_WITH_ID`` opcode followed by the Vendor ID + (in bytes 1-4 of the message), followed by the ``struct cec_msg`` + ``reply`` field. + + Note that this assumes that the byte after the Vendor ID is a + vendor-specific opcode. + + This flag makes it easier to wait for replies to vendor commands. + .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{10.8cm}| .. _cec-tx-status: diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst index 72aef1759b60bb..35674eeae20d7c 100644 --- a/Documentation/userspace-api/media/v4l/biblio.rst +++ b/Documentation/userspace-api/media/v4l/biblio.rst @@ -334,6 +334,17 @@ VESA DMT :author: Video Electronics Standards Association (http://www.vesa.org) +.. _vesaeddc: + +E-DDC +===== + + +:title: VESA Enhanced Display Data Channel (E-DDC) Standard +:subtitle: Version 1.3 + +:author: Video Electronics Standards Association (http://www.vesa.org) + .. _vesaedid: EDID diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst index 52bbee81c08078..856874341882fb 100644 --- a/Documentation/userspace-api/media/v4l/buffer.rst +++ b/Documentation/userspace-api/media/v4l/buffer.rst @@ -694,41 +694,6 @@ enum v4l2_memory - 4 - The buffer is used for :ref:`DMA shared buffer ` I/O. -.. _memory-flags: - -Memory Consistency Flags ------------------------- - -.. raw:: latex - - \small - -.. tabularcolumns:: |p{7.0cm}|p{2.1cm}|p{8.4cm}| - -.. cssclass:: longtable - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - :widths: 3 1 4 - - * .. _`V4L2-MEMORY-FLAG-NON-COHERENT`: - - - ``V4L2_MEMORY_FLAG_NON_COHERENT`` - - 0x00000001 - - A buffer is allocated either in coherent (it will be automatically - coherent between the CPU and the bus) or non-coherent memory. The - latter can provide performance gains, for instance the CPU cache - sync/flush operations can be avoided if the buffer is accessed by the - corresponding device only and the CPU does not read/write to/from that - buffer. However, this requires extra care from the driver -- it must - guarantee memory consistency by issuing a cache flush/sync when - consistency is needed. If this flag is set V4L2 will attempt to - allocate the buffer in non-coherent memory. The flag takes effect - only if the buffer is used for :ref:`memory mapping ` I/O and the - queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS - ` capability. - .. raw:: latex \normalsize diff --git a/Documentation/userspace-api/media/v4l/capture.c.rst b/Documentation/userspace-api/media/v4l/capture.c.rst index eef6772967a1a6..349541b1dac03e 100644 --- a/Documentation/userspace-api/media/v4l/capture.c.rst +++ b/Documentation/userspace-api/media/v4l/capture.c.rst @@ -333,7 +333,7 @@ file: media/v4l/capture.c if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) { if (EINVAL == errno) { fprintf(stderr, "%s does not support " - "memory mappingn", dev_name); + "memory mapping\n", dev_name); exit(EXIT_FAILURE); } else { errno_exit("VIDIOC_REQBUFS"); @@ -391,7 +391,7 @@ file: media/v4l/capture.c if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) { if (EINVAL == errno) { fprintf(stderr, "%s does not support " - "user pointer i/on", dev_name); + "user pointer i/o\n", dev_name); exit(EXIT_FAILURE); } else { errno_exit("VIDIOC_REQBUFS"); @@ -547,7 +547,7 @@ file: media/v4l/capture.c } if (!S_ISCHR(st.st_mode)) { - fprintf(stderr, "%s is no devicen", dev_name); + fprintf(stderr, "%s is no device\n", dev_name); exit(EXIT_FAILURE); } diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst index 22bde00d42df55..0da635691fdcbd 100644 --- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst +++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst @@ -2993,7 +2993,11 @@ This structure contains all loop filter related parameters. See sections - Applications and drivers must set this to zero. * - __u16 - ``max_frame_width_minus_1`` - - specifies the maximum frame width minus 1 for the frames represented by + - Specifies the maximum frame width minus 1 for the frames represented by + this sequence header. + * - __u16 + - ``max_frame_height_minus_1`` + - Specifies the maximum frame height minus 1 for the frames represented by this sequence header. .. _av1_sequence_flags: @@ -3374,7 +3378,7 @@ semantics" of :ref:`av1`. - ``uv_pri_strength[V4L2_AV1_CDEF_MAX]`` - Specifies the strength of the primary filter. * - __u8 - - ``uv_secondary_strength[V4L2_AV1_CDEF_MAX]`` + - ``uv_sec_strength[V4L2_AV1_CDEF_MAX]`` - Specifies the strength of the secondary filter. .. c:type:: v4l2_av1_segment_feature @@ -3439,7 +3443,7 @@ semantics" of :ref:`av1`. - Bitmask defining which features are enabled in each segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask. * - __u16 - - `feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]`` + - ``feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]`` - Data attached to each feature. Data entry is only valid if the feature is enabled. @@ -3490,7 +3494,7 @@ AV1 Loop filter params as defined in section 6.8.10 "Loop filter semantics" of .. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}| -.. flat-table:: struct v4l2_av1_global_motion +.. flat-table:: struct v4l2_av1_loop_filter :header-rows: 0 :stub-columns: 0 :widths: 1 1 2 @@ -3806,12 +3810,12 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`. * - struct :c:type:`v4l2_av1_quantization` - ``quantization`` - Quantization parameters. - * - struct :c:type:`v4l2_av1_segmentation` - - ``segmentation`` - - Segmentation parameters. * - __u8 - ``superres_denom`` - The denominator for the upscaling ratio. + * - struct :c:type:`v4l2_av1_segmentation` + - ``segmentation`` + - Segmentation parameters. * - struct :c:type:`v4l2_av1_loop_filter` - ``loop_filter`` - Loop filter params @@ -3829,7 +3833,7 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`. * - struct :c:type:`v4l2_av1_loop_restoration` - ``loop_restoration`` - Loop restoration parameters. - * - struct :c:type:`v4l2_av1_loop_global_motion` + * - struct :c:type:`v4l2_av1_global_motion` - ``global_motion`` - Global motion parameters. * - __u32 diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst index b1c2ab2854af1d..27803dca8d3e13 100644 --- a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst +++ b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst @@ -31,7 +31,7 @@ Image Process Control IDs Pixel sampling rate in the device's pixel array. This control is read-only and its unit is pixels / second. - Some devices use horizontal and vertical balanking to configure the frame + Some devices use horizontal and vertical blanking to configure the frame rate. The frame rate can be calculated from the pixel rate, analogue crop rectangle as well as horizontal and vertical blanking. The pixel rate control may be present in a different sub-device than the blanking controls diff --git a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst index fa04f00bcd2ed0..959f6bde8695a3 100644 --- a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst +++ b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst @@ -1,28 +1,67 @@ .. SPDX-License-Identifier: GPL-2.0 -.. _v4l2-meta-fmt-rk-isp1-params: - .. _v4l2-meta-fmt-rk-isp1-stat-3a: -***************************************************************************** -V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s') -***************************************************************************** +************************************************************************************************************************ +V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s'), V4L2_META_FMT_RK_ISP1_EXT_PARAMS ('rk1e') +************************************************************************************************************************ +======================== Configuration parameters ======================== -The configuration parameters are passed to the +The configuration of the RkISP1 ISP is performed by userspace by providing +parameters for the ISP to the driver using the :c:type:`v4l2_meta_format` +interface. + +There are two methods that allow to configure the ISP, the `fixed parameters` +configuration format and the `extensible parameters` configuration +format. + +.. _v4l2-meta-fmt-rk-isp1-params: + +Fixed parameters configuration format +===================================== + +When using the fixed configuration format, parameters are passed to the :ref:`rkisp1_params ` metadata output video node, using -the :c:type:`v4l2_meta_format` interface. The buffer contains -a single instance of the C structure :c:type:`rkisp1_params_cfg` defined in -``rkisp1-config.h``. So the structure can be obtained from the buffer by: +the `V4L2_META_FMT_RK_ISP1_PARAMS` meta format. + +The buffer contains a single instance of the C structure +:c:type:`rkisp1_params_cfg` defined in ``rkisp1-config.h``. So the structure can +be obtained from the buffer by: .. code-block:: c struct rkisp1_params_cfg *params = (struct rkisp1_params_cfg*) buffer; +This method supports a subset of the ISP features only, new applications should +use the extensible parameters method. + +.. _v4l2-meta-fmt-rk-isp1-ext-params: + +Extensible parameters configuration format +========================================== + +When using the extensible configuration format, parameters are passed to the +:ref:`rkisp1_params ` metadata output video node, using +the `V4L2_META_FMT_RK_ISP1_EXT_PARAMS` meta format. + +The buffer contains a single instance of the C structure +:c:type:`rkisp1_ext_params_cfg` defined in ``rkisp1-config.h``. The +:c:type:`rkisp1_ext_params_cfg` structure is designed to allow userspace to +populate the data buffer with only the configuration data for the ISP blocks it +intends to configure. The extensible parameters format design allows developers +to define new block types to support new configuration parameters, and defines a +versioning scheme so that it can be extended and versioned without breaking +compatibility with existing applications. + +For these reasons, this configuration method is preferred over the `fixed +parameters` format alternative. + .. rkisp1_stat_buffer +=========================== 3A and histogram statistics =========================== diff --git a/Documentation/userspace-api/media/v4l/mt2110t.svg b/Documentation/userspace-api/media/v4l/mt2110t.svg new file mode 100644 index 00000000000000..a6e82f2c73dff6 --- /dev/null +++ b/Documentation/userspace-api/media/v4l/mt2110t.svg @@ -0,0 +1,315 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 16 px + + + + + + + + + + + 32 px + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2bits + + + + + + + + + + + + + + + + + + + + 16 bytes + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + 4 rows of upper 8 bits + + + + + + + 4 rows of lower 2 bits + + + + + + + + + + + + + + + + + + + + 64 bytes + + + + + + + + diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst index 886ba7b08d6bfc..ac52485252d9df 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst @@ -275,19 +275,6 @@ please make a proposal on the linux-media mailing list. Decoder's implementation can be found here, `aspeed_codec `__ - * .. _V4L2-PIX-FMT-MT2110T: - - - ``V4L2_PIX_FMT_MT2110T`` - - 'MT2110T' - - This format is two-planar 10-Bit tile mode and having similitude with - ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for VP9, AV1 - and HEVC. - * .. _V4L2-PIX-FMT-MT2110R: - - - ``V4L2_PIX_FMT_MT2110R`` - - 'MT2110R' - - This format is two-planar 10-Bit raster mode and having similitude with - ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC. * .. _V4L2-PIX-FMT-HEXTILE: - ``V4L2_PIX_FMT_HEXTILE`` diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst index 1840224faa4126..b788f693385541 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst @@ -144,6 +144,20 @@ All components are stored with the same number of bits per component. - Cb, Cr - Yes - 4x4 tiles + * - V4L2_PIX_FMT_MT2110T + - 'MT2T' + - 15 + - 4:2:0 + - Cb, Cr + - No + - 16x32 / 16x16 tiles tiled low bits + * - V4L2_PIX_FMT_MT2110R + - 'MT2R' + - 15 + - 4:2:0 + - Cb, Cr + - No + - 16x32 / 16x16 tiles raster low bits * - V4L2_PIX_FMT_NV16 - 'NV16' - 8 @@ -295,8 +309,6 @@ of the luma plane. .. _V4L2-PIX-FMT-NV12-32L32: .. _V4L2-PIX-FMT-NV12M-8L128: .. _V4L2-PIX-FMT-NV12-8L128: -.. _V4L2-PIX-FMT-NV12M-10BE-8L128: -.. _V4L2-PIX-FMT-NV12-10BE-8L128: .. _V4L2-PIX-FMT-MM21: Tiled NV12 @@ -322,6 +334,22 @@ If the vertical resolution is an odd number of tiles, the last row of tiles is stored in linear order. The layouts of the luma and chroma planes are identical. +.. _nv12mt: + +.. kernel-figure:: nv12mt.svg + :alt: nv12mt.svg + :align: center + + V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout + +.. _nv12mt_ex: + +.. kernel-figure:: nv12mt_example.svg + :alt: nv12mt_example.svg + :align: center + + Example V4L2_PIX_FMT_NV12MT memory layout of tiles + ``V4L2_PIX_FMT_NV12_4L4`` stores pixels in 4x4 tiles, and stores tiles linearly in memory. The line stride and image height must be aligned to a multiple of 4. The layouts of the luma and chroma planes are @@ -345,6 +373,27 @@ The layouts of the luma and chroma planes are identical. ``V4L2_PIX_FMT_NV12_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_8L128`` but stores two planes in one memory. +``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels +in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the +image height must be aligned to a multiple of 32. The number of luma and chroma +tiles are identical, even though the tile size differ. The image is formed of +two non-contiguous planes. + + +.. _V4L2-PIX-FMT-NV15-4L4: +.. _V4L2-PIX-FMT-NV12M-10BE-8L128: +.. _V4L2-PIX-FMT-NV12-10BE-8L128: +.. _V4L2-PIX-FMT-MT2110T: +.. _V4L2-PIX-FMT-MT2110R: + +Tiled NV15 +---------- + +``V4L2_PIX_FMT_NV15_4L4`` Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling. +All components are packed without any padding between each other. +As a side-effect, each group of 4 components are stored over 5 bytes +(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes). + ``V4L2_PIX_FMT_NV12M_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M`` but stores 10 bits pixels in 2D 8x128 tiles, and stores tiles linearly in memory. the data is arranged in big endian order. @@ -363,37 +412,119 @@ byte 4: Y3(bits 7-0) ``V4L2_PIX_FMT_NV12_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_10BE_8L128`` but stores two planes in one memory. -``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels -in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the -image height must be aligned to a multiple of 32. The number of luma and chroma -tiles are identical, even though the tile size differ. The image is formed of -two non-contiguous planes. - -.. _nv12mt: +``V4L2_PIX_FMT_MT2110T`` is one of Mediatek packed 10bit YUV 4:2:0 formats. +It is fully packed 10bit 4:2:0 format like NV15 (15 bits per pixel), except +that the lower two bits data is stored in separate partitions. The format is +composed of 16x32 luma tiles, and 16x16 chroma tiles. Each tiles is 640 bytes +long, divided into 8 partitions of 80 bytes. The first 16 bytes of the +partition represent the 2 least significant bits of pixel data. The remaining +64 bytes represent the 8 most significant bits of pixel data. -.. kernel-figure:: nv12mt.svg - :alt: nv12mt.svg +.. kernel-figure:: mt2110t.svg + :alt: mt2110t.svg :align: center - V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout - -.. _nv12mt_ex: + Layout of MT2110T Chroma Tile -.. kernel-figure:: nv12mt_example.svg - :alt: nv12mt_example.svg - :align: center +Filtering out the upper part of each partitions results in a valid +``V4L2_PIX_FMT_MM21`` frame. A partition is a sub-tile of size 16 x 4. The +lower two bits is said to be tiled since each bytes contains the lower two +bits of the column of for pixel matching the same index. The chroma tiles +only have 4 partitions. - Example V4L2_PIX_FMT_NV12MT memory layout of tiles +.. flat-table:: MT2110T LSB bits layout + :header-rows: 1 + :stub-columns: 1 -.. _V4L2-PIX-FMT-NV15-4L4: + * - + - start + 0: + - start + 1: + - . . . + - start\ +\ 15: + * - Bits 1:0 + - Y'\ :sub:`0:0` + - Y'\ :sub:`0:1` + - . . . + - Y'\ :sub:`0:15` + * - Bit 3:2 + - Y'\ :sub:`1:0` + - Y'\ :sub:`1:1` + - . . . + - Y'\ :sub:`1:15` + * - Bits 5:4 + - Y'\ :sub:`2:0` + - Y'\ :sub:`2:1` + - . . . + - Y'\ :sub:`2:15` + * - Bits 7:6 + - Y'\ :sub:`3:0` + - Y'\ :sub:`3:1` + - . . . + - Y'\ :sub:`3:15` + +``V4L2_PIX_FMT_MT2110R`` is identical to ``V4L2_PIX_FMT_MT2110T`` except that +the least significant two bits layout is in raster order. This means the first byte +contains 4 pixels of the first row, with 4 bytes per line. + +.. flat-table:: MT2110R LSB bits layout + :header-rows: 2 + :stub-columns: 1 -Tiled NV15 ----------- + * - + - :cspan:`3` Byte 0 + - ... + - :cspan:`3` Byte 3 + * - + - 7:6 + - 5:4 + - 3:2 + - 1:0 + - ... + - 7:6 + - 5:4 + - 3:2 + - 1:0 + * - start + 0: + - Y'\ :sub:`0:3` + - Y'\ :sub:`0:2` + - Y'\ :sub:`0:1` + - Y'\ :sub:`0:0` + - ... + - Y'\ :sub:`0:15` + - Y'\ :sub:`0:14` + - Y'\ :sub:`0:13` + - Y'\ :sub:`0:12` + * - start + 4: + - Y'\ :sub:`1:3` + - Y'\ :sub:`1:2` + - Y'\ :sub:`1:1` + - Y'\ :sub:`1:0` + - ... + - Y'\ :sub:`1:15` + - Y'\ :sub:`1:14` + - Y'\ :sub:`1:13` + - Y'\ :sub:`1:12` + * - start + 8: + - Y'\ :sub:`2:3` + - Y'\ :sub:`2:2` + - Y'\ :sub:`2:1` + - Y'\ :sub:`2:0` + - ... + - Y'\ :sub:`2:15` + - Y'\ :sub:`2:14` + - Y'\ :sub:`2:13` + - Y'\ :sub:`2:12` + * - start\ +\ 12: + - Y'\ :sub:`3:3` + - Y'\ :sub:`3:2` + - Y'\ :sub:`3:1` + - Y'\ :sub:`3:0` + - ... + - Y'\ :sub:`3:15` + - Y'\ :sub:`3:14` + - Y'\ :sub:`3:13` + - Y'\ :sub:`3:12` -Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling. -All components are packed without any padding between each other. -As a side-effect, each group of 4 components are stored over 5 bytes -(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes). .. _V4L2-PIX-FMT-NV16: .. _V4L2-PIX-FMT-NV61: diff --git a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst index 6c57b84283566f..3d11d86d9cbfa7 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst @@ -244,6 +244,17 @@ specification the ioctl returns an ``EINVAL`` error code. - 0x01000000 - The device supports the :c:func:`read()` and/or :c:func:`write()` I/O methods. + * - ``V4L2_CAP_EDID`` + - 0x02000000 + - The device stores the EDID for a video input, or retrieves the EDID for a video + output. It is a standalone EDID device, so no video streaming etc. will take place. + + For a video input this is typically an eeprom that supports the + :ref:`VESA Enhanced Display Data Channel Standard `. It can be something + else as well, for example a micro controller. + + For a video output this is typically read from an external device such as an + HDMI splitter accessed by a serial port. * - ``V4L2_CAP_STREAMING`` - 0x04000000 - The device supports the :ref:`streaming ` I/O method. diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index bbc22dd760325d..daf9a6621b50c5 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -73,6 +73,8 @@ aborting or finishing any DMA in progress, an implicit .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.5cm}| +.. cssclass:: longtable + .. flat-table:: struct v4l2_requestbuffers :header-rows: 0 :stub-columns: 0 @@ -123,14 +125,6 @@ aborting or finishing any DMA in progress, an implicit .. _V4L2-BUF-CAP-SUPPORTS-MAX-NUM-BUFFERS: .. _V4L2-BUF-CAP-SUPPORTS-REMOVE-BUFS: -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{8.1cm}|p{2.2cm}|p{7.0cm}| - -.. cssclass:: longtable - .. flat-table:: V4L2 Buffer Capabilities Flags :header-rows: 0 :stub-columns: 0 @@ -166,6 +160,36 @@ aborting or finishing any DMA in progress, an implicit :ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE `, :ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN ` and :ref:`V4L2_MEMORY_FLAG_NON_COHERENT `. + * - ``V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS`` + - 0x00000080 + - If set, then the ``max_num_buffers`` field in ``struct v4l2_create_buffers`` + is valid. If not set, then the maximum is ``VIDEO_MAX_FRAME`` buffers. + * - ``V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS`` + - 0x00000100 + - If set, then ``VIDIOC_REMOVE_BUFS`` is supported. + +.. _memory-flags: +.. _V4L2-MEMORY-FLAG-NON-COHERENT: + +.. flat-table:: Memory Consistency Flags + :header-rows: 0 + :stub-columns: 0 + :widths: 3 1 4 + + * - ``V4L2_MEMORY_FLAG_NON_COHERENT`` + - 0x00000001 + - A buffer is allocated either in coherent (it will be automatically + coherent between the CPU and the bus) or non-coherent memory. The + latter can provide performance gains, for instance the CPU cache + sync/flush operations can be avoided if the buffer is accessed by the + corresponding device only and the CPU does not read/write to/from that + buffer. However, this requires extra care from the driver -- it must + guarantee memory consistency by issuing a cache flush/sync when + consistency is needed. If this flag is set V4L2 will attempt to + allocate the buffer in non-coherent memory. The flag takes effect + only if the buffer is used for :ref:`memory mapping ` I/O and the + queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. .. raw:: latex diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions index bdc628e8c1d694..d67fd4038d22a4 100644 --- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions +++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions @@ -197,6 +197,7 @@ replace define V4L2_CAP_META_OUTPUT device-capabilities replace define V4L2_CAP_DEVICE_CAPS device-capabilities replace define V4L2_CAP_TOUCH device-capabilities replace define V4L2_CAP_IO_MC device-capabilities +replace define V4L2_CAP_EDID device-capabilities # V4L2 pix flags replace define V4L2_PIX_FMT_PRIV_MAGIC :c:type:`v4l2_pix_format` diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index b3be87489108e8..e32471977d0a23 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4214,7 +4214,9 @@ whether or not KVM_CAP_X86_USER_SPACE_MSR's KVM_MSR_EXIT_REASON_FILTER is enabled. If KVM_MSR_EXIT_REASON_FILTER is enabled, KVM will exit to userspace on denied accesses, i.e. userspace effectively intercepts the MSR access. If KVM_MSR_EXIT_REASON_FILTER is not enabled, KVM will inject a #GP into the guest -on denied accesses. +on denied accesses. Note, if an MSR access is denied during emulation of MSR +load/stores during VMX transitions, KVM ignores KVM_MSR_EXIT_REASON_FILTER. +See the below warning for full details. If an MSR access is allowed by userspace, KVM will emulate and/or virtualize the access in accordance with the vCPU model. Note, KVM may still ultimately @@ -4229,9 +4231,22 @@ filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes an error. .. warning:: - MSR accesses as part of nested VM-Enter/VM-Exit are not filtered. - This includes both writes to individual VMCS fields and reads/writes - through the MSR lists pointed to by the VMCS. + MSR accesses that are side effects of instruction execution (emulated or + native) are not filtered as hardware does not honor MSR bitmaps outside of + RDMSR and WRMSR, and KVM mimics that behavior when emulating instructions + to avoid pointless divergence from hardware. E.g. RDPID reads MSR_TSC_AUX, + SYSENTER reads the SYSENTER MSRs, etc. + + MSRs that are loaded/stored via dedicated VMCS fields are not filtered as + part of VM-Enter/VM-Exit emulation. + + MSRs that are loaded/store via VMX's load/store lists _are_ filtered as part + of VM-Enter/VM-Exit emulation. If an MSR access is denied on VM-Enter, KVM + synthesizes a consistency check VM-Exit(EXIT_REASON_MSR_LOAD_FAIL). If an + MSR access is denied on VM-Exit, KVM synthesizes a VM-Abort. In short, KVM + extends Intel's architectural list of MSRs that cannot be loaded/saved via + the VM-Enter/VM-Exit MSR list. It is platform owner's responsibility to + to communicate any such restrictions to their end users. x2APIC MSR accesses cannot be filtered (KVM silently ignores filters that cover any x2APIC MSRs). @@ -8082,6 +8097,14 @@ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if guest CPUID on writes to MISC_ENABLE if KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is disabled. + +KVM_X86_QUIRK_SLOT_ZAP_ALL By default, KVM invalidates all SPTEs in + fast way for memslot deletion when VM type + is KVM_X86_DEFAULT_VM. + When this quirk is disabled or when VM type + is other than KVM_X86_DEFAULT_VM, KVM zaps + only leaf SPTEs that are within the range of + the memslot being deleted. =================================== ============================================ 7.32 KVM_CAP_MAX_VCPU_ID diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst index 17be111f493f56..af7bc2c2e0cb41 100644 --- a/Documentation/virt/kvm/arm/hypercalls.rst +++ b/Documentation/virt/kvm/arm/hypercalls.rst @@ -44,3 +44,101 @@ Provides a discovery mechanism for other KVM/arm64 hypercalls. ---------------------------------------- See ptp_kvm.rst + +``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO`` +---------------------------------- + +Query the memory protection parameters for a pKVM protected virtual machine. + ++---------------------+-------------------------------------------------------------+ +| Presence: | Optional; pKVM protected guests only. | ++---------------------+-------------------------------------------------------------+ +| Calling convention: | HVC64 | ++---------------------+----------+--------------------------------------------------+ +| Function ID: | (uint32) | 0xC6000002 | ++---------------------+----------+----+---------------------------------------------+ +| Arguments: | (uint64) | R1 | Reserved / Must be zero | +| +----------+----+---------------------------------------------+ +| | (uint64) | R2 | Reserved / Must be zero | +| +----------+----+---------------------------------------------+ +| | (uint64) | R3 | Reserved / Must be zero | ++---------------------+----------+----+---------------------------------------------+ +| Return Values: | (int64) | R0 | ``INVALID_PARAMETER (-3)`` on error, else | +| | | | memory protection granule in bytes | ++---------------------+----------+----+---------------------------------------------+ + +``ARM_SMCCC_KVM_FUNC_MEM_SHARE`` +-------------------------------- + +Share a region of memory with the KVM host, granting it read, write and execute +permissions. The size of the region is equal to the memory protection granule +advertised by ``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``. + ++---------------------+-------------------------------------------------------------+ +| Presence: | Optional; pKVM protected guests only. | ++---------------------+-------------------------------------------------------------+ +| Calling convention: | HVC64 | ++---------------------+----------+--------------------------------------------------+ +| Function ID: | (uint32) | 0xC6000003 | ++---------------------+----------+----+---------------------------------------------+ +| Arguments: | (uint64) | R1 | Base IPA of memory region to share | +| +----------+----+---------------------------------------------+ +| | (uint64) | R2 | Reserved / Must be zero | +| +----------+----+---------------------------------------------+ +| | (uint64) | R3 | Reserved / Must be zero | ++---------------------+----------+----+---------------------------------------------+ +| Return Values: | (int64) | R0 | ``SUCCESS (0)`` | +| | | +---------------------------------------------+ +| | | | ``INVALID_PARAMETER (-3)`` | ++---------------------+----------+----+---------------------------------------------+ + +``ARM_SMCCC_KVM_FUNC_MEM_UNSHARE`` +---------------------------------- + +Revoke access permission from the KVM host to a memory region previously shared +with ``ARM_SMCCC_KVM_FUNC_MEM_SHARE``. The size of the region is equal to the +memory protection granule advertised by ``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``. + ++---------------------+-------------------------------------------------------------+ +| Presence: | Optional; pKVM protected guests only. | ++---------------------+-------------------------------------------------------------+ +| Calling convention: | HVC64 | ++---------------------+----------+--------------------------------------------------+ +| Function ID: | (uint32) | 0xC6000004 | ++---------------------+----------+----+---------------------------------------------+ +| Arguments: | (uint64) | R1 | Base IPA of memory region to unshare | +| +----------+----+---------------------------------------------+ +| | (uint64) | R2 | Reserved / Must be zero | +| +----------+----+---------------------------------------------+ +| | (uint64) | R3 | Reserved / Must be zero | ++---------------------+----------+----+---------------------------------------------+ +| Return Values: | (int64) | R0 | ``SUCCESS (0)`` | +| | | +---------------------------------------------+ +| | | | ``INVALID_PARAMETER (-3)`` | ++---------------------+----------+----+---------------------------------------------+ + +``ARM_SMCCC_KVM_FUNC_MMIO_GUARD`` +---------------------------------- + +Request that a given memory region is handled as MMIO by the hypervisor, +allowing accesses to this region to be emulated by the KVM host. The size of the +region is equal to the memory protection granule advertised by +``ARM_SMCCC_KVM_FUNC_HYP_MEMINFO``. + ++---------------------+-------------------------------------------------------------+ +| Presence: | Optional; pKVM protected guests only. | ++---------------------+-------------------------------------------------------------+ +| Calling convention: | HVC64 | ++---------------------+----------+--------------------------------------------------+ +| Function ID: | (uint32) | 0xC6000007 | ++---------------------+----------+----+---------------------------------------------+ +| Arguments: | (uint64) | R1 | Base IPA of MMIO memory region | +| +----------+----+---------------------------------------------+ +| | (uint64) | R2 | Reserved / Must be zero | +| +----------+----+---------------------------------------------+ +| | (uint64) | R3 | Reserved / Must be zero | ++---------------------+----------+----+---------------------------------------------+ +| Return Values: | (int64) | R0 | ``SUCCESS (0)`` | +| | | +---------------------------------------------+ +| | | | ``INVALID_PARAMETER (-3)`` | ++---------------------+----------+----+---------------------------------------------+ diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index ad13ec55ddfe51..9ca5a45c2140a9 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -14,6 +14,7 @@ KVM s390/index ppc-pv x86/index + loongarch/index locking vcpu-requests diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 02880d5552d5fa..20a9a37d1cdd23 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -11,6 +11,8 @@ The acquisition orders for mutexes are as follows: - cpus_read_lock() is taken outside kvm_lock +- kvm_usage_lock is taken outside cpus_read_lock() + - kvm->lock is taken outside vcpu->mutex - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock @@ -24,6 +26,13 @@ The acquisition orders for mutexes are as follows: are taken on the waiting side when modifying memslots, so MMU notifiers must not take either kvm->slots_lock or kvm->slots_arch_lock. +cpus_read_lock() vs kvm_lock: + +- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that + being the official ordering, as it is quite easy to unknowingly trigger + cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list, + e.g. avoid complex operations when possible. + For SRCU: - ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections @@ -227,10 +236,16 @@ time it will be set using the Dirty tracking mechanism described above. :Type: mutex :Arch: any :Protects: - vm_list - - kvm_usage_count + +``kvm_usage_lock`` +^^^^^^^^^^^^^^^^^^ + +:Type: mutex +:Arch: any +:Protects: - kvm_usage_count - hardware virtualization enable/disable -:Comment: KVM also disables CPU hotplug via cpus_read_lock() during - enable/disable. +:Comment: Exists to allow taking cpus_read_lock() while kvm_usage_count is + protected, which simplifies the virtualization enabling logic. ``kvm->mn_invalidate_lock`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -290,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above. wakeup. ``vendor_module_lock`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^ :Type: mutex :Arch: x86 :Protects: loading a vendor module (kvm_amd or kvm_intel) -:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is - taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and - many operations need to take cpu_hotplug_lock when loading a vendor module, - e.g. updating static calls. +:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken + in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while + cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many + operations need to take cpu_hotplug_lock when loading a vendor module, e.g. + updating static calls. diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst new file mode 100644 index 00000000000000..2d6b94031f1b76 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/hypercalls.rst @@ -0,0 +1,89 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +The LoongArch paravirtual interface +=================================== + +KVM hypercalls use the HVCL instruction with code 0x100 and the hypercall +number is put in a0. Up to five arguments may be placed in registers a1 - a5. +The return value is placed in v0 (an alias of a0). + +Source code for this interface can be found in arch/loongarch/kvm*. + +Querying for existence +====================== + +To determine if the host is running on KVM, we can utilize the cpucfg() +function at index CPUCFG_KVM_BASE (0x40000000). + +The CPUCFG_KVM_BASE range, spanning from 0x40000000 to 0x400000FF, The +CPUCFG_KVM_BASE range between 0x40000000 - 0x400000FF is marked as reserved. +Consequently, all current and future processors will not implement any +feature within this range. + +On a KVM-virtualized Linux system, a read operation on cpucfg() at index +CPUCFG_KVM_BASE (0x40000000) returns the magic string 'KVM\0'. + +Once you have determined that your host is running on a paravirtualization- +capable KVM, you may now use hypercalls as described below. + +KVM hypercall ABI +================= + +The KVM hypercall ABI is simple, with one scratch register a0 (v0) and at most +five generic registers (a1 - a5) used as input parameters. The FP (Floating- +point) and vector registers are not utilized as input registers and must +remain unmodified during a hypercall. + +Hypercall functions can be inlined as it only uses one scratch register. + +The parameters are as follows: + + ======== ================= ================ + Register IN OUT + ======== ================= ================ + a0 function number Return code + a1 1st parameter - + a2 2nd parameter - + a3 3rd parameter - + a4 4th parameter - + a5 5th parameter - + ======== ================= ================ + +The return codes may be one of the following: + + ==== ========================= + Code Meaning + ==== ========================= + 0 Success + -1 Hypercall not implemented + -2 Bad Hypercall parameter + ==== ========================= + +KVM Hypercalls Documentation +============================ + +The template for each hypercall is as follows: + +1. Hypercall name +2. Purpose + +1. KVM_HCALL_FUNC_IPI +------------------------ + +:Purpose: Send IPIs to multiple vCPUs. + +- a0: KVM_HCALL_FUNC_IPI +- a1: Lower part of the bitmap for destination physical CPUIDs +- a2: Higher part of the bitmap for destination physical CPUIDs +- a3: The lowest physical CPUID in the bitmap + +The hypercall lets a guest send multiple IPIs (Inter-Process Interrupts) with +at most 128 destinations per hypercall. The destinations are represented in a +bitmap contained in the first two input registers (a1 and a2). + +Bit 0 of a1 corresponds to the physical CPUID in the third input register (a3) +and bit 1 corresponds to the physical CPUID in a3+1, and so on. + +PV IPI on LoongArch includes both PV IPI multicast sending and PV IPI receiving, +and SWI is used for PV IPI inject since there is no VM-exits accessing SWI registers. diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst new file mode 100644 index 00000000000000..83387b4c534550 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/index.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +KVM for LoongArch systems +========================= + +.. toctree:: + :maxdepth: 2 + + hypercalls.rst diff --git a/Documentation/virt/uml/user_mode_linux_howto_v2.rst b/Documentation/virt/uml/user_mode_linux_howto_v2.rst index 27942446f406c0..584000b743f381 100644 --- a/Documentation/virt/uml/user_mode_linux_howto_v2.rst +++ b/Documentation/virt/uml/user_mode_linux_howto_v2.rst @@ -217,6 +217,8 @@ remote UML and other VM instances. +-----------+--------+------------------------------------+------------+ | fd | vector | dependent on fd type | varies | +-----------+--------+------------------------------------+------------+ +| vde | vector | dep. on VDE VPN: Virt.Net Locator | varies | ++-----------+--------+------------------------------------+------------+ | tuntap | legacy | none | ~ 500Mbit | +-----------+--------+------------------------------------+------------+ | daemon | legacy | none | ~ 450Mbit | @@ -573,6 +575,41 @@ https://github.com/NetSys/bess/wiki/Built-In-Modules-and-Ports BESS transport does not require any special privileges. +VDE vector transport +-------------------- + +Virtual Distributed Ethernet (VDE) is a project whose main goal is to provide a +highly flexible support for virtual networking. + +http://wiki.virtualsquare.org/#/tutorials/vdebasics + +Common usages of VDE include fast prototyping and teaching. + +Examples: + + ``vecX:transport=vde,vnl=tap://tap0`` + +use tap0 + + ``vecX:transport=vde,vnl=slirp://`` + +use slirp + + ``vec0:transport=vde,vnl=vde:///tmp/switch`` + +connect to a vde switch + + ``vecX:transport=\"vde,vnl=cmd://ssh remote.host //tmp/sshlirp\"`` + +connect to a remote slirp (instant VPN: convert ssh to VPN, it uses sshlirp) +https://github.com/virtualsquare/sshlirp + + ``vec0:transport=vde,vnl=vxvde://234.0.0.1`` + +connect to a local area cloud (all the UML nodes using the same +multicast address running on hosts in the same multicast domain (LAN) +will be automagically connected together to a virtual LAN. + Configuring Legacy transports ============================= diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.rst b/Documentation/watchdog/convert_drivers_to_kernel_api.rst index a1c3f038ce0e7c..e83609a5d0071d 100644 --- a/Documentation/watchdog/convert_drivers_to_kernel_api.rst +++ b/Documentation/watchdog/convert_drivers_to_kernel_api.rst @@ -75,7 +75,6 @@ Example conversion:: -static const struct file_operations s3c2410wdt_fops = { - .owner = THIS_MODULE, - - .llseek = no_llseek, - .write = s3c2410wdt_write, - .unlocked_ioctl = s3c2410wdt_ioctl, - .open = s3c2410wdt_open, diff --git a/Documentation/watchdog/watchdog-api.rst b/Documentation/watchdog/watchdog-api.rst index 800dcd7586f2db..78e228c272cf8f 100644 --- a/Documentation/watchdog/watchdog-api.rst +++ b/Documentation/watchdog/watchdog-api.rst @@ -249,7 +249,7 @@ Note that not all devices support these two calls, and some only support the GETBOOTSTATUS call. Some drivers can measure the temperature using the GETTEMP ioctl. The -returned value is the temperature in degrees fahrenheit:: +returned value is the temperature in degrees Fahrenheit:: int temperature; ioctl(fd, WDIOC_GETTEMP, &temperature); diff --git a/Documentation/wmi/devices/dell-wmi-ddv.rst b/Documentation/wmi/devices/dell-wmi-ddv.rst index 2fcdfcf0332708..e0c20af309481a 100644 --- a/Documentation/wmi/devices/dell-wmi-ddv.rst +++ b/Documentation/wmi/devices/dell-wmi-ddv.rst @@ -8,7 +8,7 @@ Introduction ============ Many Dell notebooks made after ~2020 support a WMI-based interface for -retrieving various system data like battery temperature, ePPID, diagostic data +retrieving various system data like battery temperature, ePPID, diagnostic data and fan/thermal sensor data. This interface is likely used by the `Dell Data Vault` software on Windows, @@ -277,7 +277,7 @@ Reverse-Engineering the DDV WMI interface 4. Try to deduce the meaning of a certain WMI method by comparing the control flow with other ACPI methods (_BIX or _BIF for battery related methods for example). -5. Use the built-in UEFI diagostics to view sensor types/values for fan/thermal +5. Use the built-in UEFI diagnostics to view sensor types/values for fan/thermal related methods (sometimes overwriting static ACPI data fields can be used to test different sensor type values, since on some machines this data is not reinitialized upon a warm reset). diff --git a/LICENSES/deprecated/0BSD b/LICENSES/deprecated/0BSD new file mode 100644 index 00000000000000..e4b95b7499665f --- /dev/null +++ b/LICENSES/deprecated/0BSD @@ -0,0 +1,23 @@ +Valid-License-Identifier: 0BSD +SPDX-URL: https://spdx.org/licenses/0BSD.html +Usage-Guide: + To use the BSD Zero Clause License put the following SPDX tag/value + pair into a comment according to the placement guidelines in the + licensing rules documentation: + SPDX-License-Identifier: 0BSD +License-Text: + +BSD Zero Clause License + +Copyright (c) + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/MAINTAINERS b/MAINTAINERS index cc40a9d9b8cd10..a097afd76ded46 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -334,6 +334,7 @@ L: linux-acpi@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/acpi/arm64 +F: include/linux/acpi_iort.h ACPI FOR RISC-V (ACPI/riscv) M: Sunil V L @@ -448,6 +449,7 @@ S: Supported W: https://wiki.analog.com/resources/tools-software/linux-drivers/iio-adc/ad738x W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml +F: Documentation/iio/ad7380.rst F: drivers/iio/adc/ad7380.c AD7877 TOUCHSCREEN DRIVER @@ -537,6 +539,17 @@ F: drivers/leds/leds-adp5520.c F: drivers/mfd/adp5520.c F: drivers/video/backlight/adp5520_bl.c +ADP5585 GPIO EXPANDER, PWM AND KEYPAD CONTROLLER DRIVER +M: Laurent Pinchart +L: linux-gpio@vger.kernel.org +L: linux-pwm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/*/adi,adp5585*.yaml +F: drivers/gpio/gpio-adp5585.c +F: drivers/mfd/adp5585.c +F: drivers/pwm/pwm-adp5585.c +F: include/linux/mfd/adp5585.h + ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587) M: Michael Hennerich S: Supported @@ -619,6 +632,17 @@ F: drivers/iio/accel/adxl372.c F: drivers/iio/accel/adxl372_i2c.c F: drivers/iio/accel/adxl372_spi.c +ADXL380 THREE-AXIS DIGITAL ACCELEROMETER DRIVER +M: Ramona Gradinariu +M: Antoniu Miclaus +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml +F: drivers/iio/accel/adxl380.c +F: drivers/iio/accel/adxl380.h +F: drivers/iio/accel/adxl380_i2c.c +F: drivers/iio/accel/adxl380_spi.c + AF8133J THREE-AXIS MAGNETOMETER DRIVER M: Ondřej Jirman S: Maintained @@ -836,7 +860,7 @@ F: drivers/crypto/allwinner/ ALLWINNER DMIC DRIVERS M: Ban Tao -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/allwinner,sun50i-h6-dmic.yaml F: sound/soc/sunxi/sun50i-dmic.c @@ -1013,6 +1037,13 @@ S: Supported T: git https://gitlab.freedesktop.org/agd5f/linux.git F: drivers/gpu/drm/amd/display/ +AMD DISPLAY CORE - DML +M: Chaitanya Dhere +M: Jun Lei +S: Supported +F: drivers/gpu/drm/amd/display/dc/dml/ +F: drivers/gpu/drm/amd/display/dc/dml2/ + AMD FAM15H PROCESSOR POWER MONITORING DRIVER M: Huang Rui L: linux-hwmon@vger.kernel.org @@ -1128,6 +1159,14 @@ L: dmaengine@vger.kernel.org S: Maintained F: drivers/dma/ptdma/ +AMD QDMA DRIVER +M: Nishad Saraf +M: Lizhi Hou +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/amd/qdma/ +F: include/linux/platform_data/amd_qdma.h + AMD SEATTLE DEVICE TREE SUPPORT M: Suravee Suthikulpanit M: Tom Lendacky @@ -1153,6 +1192,13 @@ S: Supported F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi F: drivers/net/ethernet/amd/xgbe/ +AMLOGIC BLUETOOTH DRIVER +M: Yang Li +L: linux-bluetooth@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/bluetooth/amlogic,w155s2-bt.yaml +F: drivers/bluetooth/hci_aml.c + AMLOGIC DDR PMU DRIVER M: Jiucheng Xu L: linux-amlogic@lists.infradead.org @@ -1202,6 +1248,15 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml F: drivers/iio/dac/ad3552r.c +ANALOG DEVICES INC AD4000 DRIVER +M: Marcelo Schmitt +L: linux-iio@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml +F: Documentation/iio/ad4000.rst +F: drivers/iio/adc/ad4000.c + ANALOG DEVICES INC AD4130 DRIVER M: Cosmin Tanislav L: linux-iio@vger.kernel.org @@ -1211,6 +1266,18 @@ F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml F: drivers/iio/adc/ad4130.c +ANALOG DEVICES INC AD4695 DRIVER +M: Michael Hennerich +M: Nuno Sá +R: David Lechner +L: linux-iio@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml +F: Documentation/iio/ad4695.rst +F: drivers/iio/adc/ad4695.c +F: include/dt-bindings/iio/adi,ad4695.h + ANALOG DEVICES INC AD7091R DRIVER M: Marcelo Schmitt L: linux-iio@vger.kernel.org @@ -1277,6 +1344,16 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml F: drivers/iio/adc/ad7780.c +ANALOG DEVICES INC AD9467 DRIVER +M: Michael Hennerich +M: Nuno Sa +L: linux-iio@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/ABI/testing/debugfs-iio-ad9467 +F: Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml +F: drivers/iio/adc/ad9467.c + ANALOG DEVICES INC AD9739a DRIVER M: Nuno Sa M: Dragos Bogdan @@ -1440,7 +1517,7 @@ F: drivers/iio/gyro/adxrs290.c ANALOG DEVICES INC ASOC CODEC DRIVERS M: Lars-Peter Clausen M: Nuno Sá -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported W: http://wiki.analog.com/ W: https://ez.analog.com/linux-software-drivers @@ -1517,7 +1594,7 @@ F: drivers/rtc/rtc-goldfish.c AOA (Apple Onboard Audio) ALSA DRIVER M: Johannes Berg L: linuxppc-dev@lists.ozlabs.org -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: sound/aoa/ @@ -1609,6 +1686,14 @@ F: Documentation/admin-guide/perf/xgene-pmu.rst F: Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt F: drivers/perf/xgene_pmu.c +APPLIED MICRO QT2025 PHY DRIVER +M: FUJITA Tomonori +R: Trevor Gross +L: netdev@vger.kernel.org +L: rust-for-linux@vger.kernel.org +S: Maintained +F: drivers/net/phy/qt2025.rs + APTINA CAMERA SENSOR PLL M: Laurent Pinchart L: linux-media@vger.kernel.org @@ -1737,6 +1822,17 @@ F: drivers/mtd/maps/physmap-versatile.* F: drivers/power/reset/arm-versatile-reboot.c F: drivers/soc/versatile/ +ARM INTERCONNECT PMU DRIVERS +M: Robin Murphy +S: Supported +F: Documentation/admin-guide/perf/arm-cmn.rst +F: Documentation/admin-guide/perf/arm-ni.rst +F: Documentation/devicetree/bindings/perf/arm,cmn.yaml +F: Documentation/devicetree/bindings/perf/arm,ni.yaml +F: drivers/perf/arm-cmn.c +F: drivers/perf/arm-ni.c +F: tools/perf/pmu-events/arch/arm64/arm/cmn/ + ARM KOMEDA DRM-KMS DRIVER M: Liviu Dudau S: Supported @@ -1754,6 +1850,7 @@ L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: Documentation/gpu/panfrost.rst +F: drivers/gpu/drm/ci/xfails/panfrost* F: drivers/gpu/drm/panfrost/ F: include/uapi/drm/panfrost_drm.h @@ -1994,7 +2091,7 @@ F: drivers/crypto/amlogic/ ARM/Amlogic Meson SoC Sound Drivers M: Jerome Brunet -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/amlogic* F: sound/soc/meson/ @@ -2032,7 +2129,7 @@ F: drivers/*/*alpine* ARM/APPLE MACHINE SOUND DRIVERS M: Martin Povišer L: asahi@lists.linux.dev -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/adi,ssm3515.yaml F: Documentation/devicetree/bindings/sound/apple,* @@ -2175,6 +2272,7 @@ N: clps711x ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE M: Hartley Sweeten M: Alexander Sverdlin +M: Nikita Shubin L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/iio/adc/cirrus,ep9301-adc.yaml @@ -2432,18 +2530,28 @@ N: lpc18xx ARM/LPC32XX SOC SUPPORT M: Vladimir Zapolskiy +M: Piotr Wojtaszczyk L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://github.com/vzapolskiy/linux-lpc32xx.git F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml F: arch/arm/boot/dts/nxp/lpc/lpc32* F: arch/arm/mach-lpc32xx/ +F: drivers/dma/lpc32xx-dmamux.c F: drivers/i2c/busses/i2c-pnx.c F: drivers/net/ethernet/nxp/lpc_eth.c F: drivers/usb/host/ohci-nxp.c F: drivers/watchdog/pnx4008_wdt.c N: lpc32xx +LPC32XX DMAMUX SUPPORT +M: J.M.B. Downing +M: Piotr Wojtaszczyk +R: Vladimir Zapolskiy +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/dma/nxp,lpc3220-dmamux.yaml + ARM/Marvell Dove/MV78xx0/Orion SOC support M: Andrew Lunn M: Sebastian Hesselbarth @@ -2731,7 +2839,7 @@ F: drivers/iommu/msm* F: drivers/mfd/ssbi.c F: drivers/mmc/host/mmci_qcom* F: drivers/mmc/host/sdhci-msm.c -F: drivers/pci/controller/dwc/pcie-qcom.c +F: drivers/pci/controller/dwc/pcie-qcom* F: drivers/phy/qualcomm/ F: drivers/power/*/msm* F: drivers/reset/reset-qcom-* @@ -3624,7 +3732,7 @@ F: arch/arm/boot/dts/microchip/at91-tse850-3.dts AXENTIA ASOC DRIVERS M: Peter Rosin -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/axentia,* F: sound/soc/atmel/tse850-pcm5142.c @@ -3786,10 +3894,9 @@ F: Documentation/filesystems/befs.rst F: fs/befs/ BFQ I/O SCHEDULER -M: Paolo Valente -M: Jens Axboe +M: Yu Kuai L: linux-block@vger.kernel.org -S: Maintained +S: Odd Fixes F: Documentation/block/bfq-iosched.rst F: block/bfq-* @@ -3936,7 +4043,7 @@ F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml F: drivers/iio/imu/bmi323/ BPF JIT for ARC -M: Shahab Vahedi +M: Shahab Vahedi L: bpf@vger.kernel.org S: Maintained F: arch/arc/net/ @@ -4103,6 +4210,7 @@ F: include/uapi/linux/btf* F: include/uapi/linux/filter.h F: kernel/bpf/ F: kernel/trace/bpf_trace.c +F: lib/buildid.c F: lib/test_bpf.c F: net/bpf/ F: net/core/filter.c @@ -4223,6 +4331,7 @@ L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/stackmap.c F: kernel/trace/bpf_trace.c +F: lib/buildid.c BROADCOM ASP 2.0 ETHERNET DRIVER M: Justin Chen @@ -4742,7 +4851,7 @@ F: include/uapi/linux/bsg.h BT87X AUDIO DRIVER M: Clemens Ladisch -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: Documentation/sound/cards/bt87x.rst @@ -4804,7 +4913,7 @@ F: drivers/net/can/bxcan.c C-MEDIA CMI8788 DRIVER M: Clemens Ladisch -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: sound/pci/oxygen/ @@ -5101,10 +5210,8 @@ F: Documentation/devicetree/bindings/media/cec/cec-gpio.yaml F: drivers/media/cec/platform/cec-gpio/ CELL BROADBAND ENGINE ARCHITECTURE -M: Arnd Bergmann L: linuxppc-dev@lists.ozlabs.org -S: Supported -W: http://www.ibm.com/developerworks/power/cell/ +S: Orphan F: arch/powerpc/include/asm/cell*.h F: arch/powerpc/include/asm/spu*.h F: arch/powerpc/include/uapi/asm/spu*.h @@ -5145,6 +5252,7 @@ S: Maintained F: Documentation/admin-guide/module-signing.rst F: certs/ F: scripts/sign-file.c +F: scripts/ssl-common.h F: tools/certs/ CFAG12864B LCD DRIVER @@ -5620,8 +5728,7 @@ L: linux-cxl@vger.kernel.org S: Maintained F: Documentation/driver-api/cxl F: drivers/cxl/ -F: include/linux/einj-cxl.h -F: include/linux/cxl-event.h +F: include/cxl/ F: include/uapi/linux/cxl_mem.h F: tools/testing/cxl/ @@ -5703,9 +5810,12 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git F: Documentation/admin-guide/cgroup-v1/cpusets.rst F: include/linux/cpuset.h +F: kernel/cgroup/cpuset-internal.h +F: kernel/cgroup/cpuset-v1.c F: kernel/cgroup/cpuset.c F: tools/testing/selftests/cgroup/test_cpuset.c F: tools/testing/selftests/cgroup/test_cpuset_prs.sh +F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) M: Johannes Weiner @@ -5824,6 +5934,9 @@ CPU POWER MONITORING SUBSYSTEM M: Thomas Renninger M: Shuah Khan M: Shuah Khan +M: John B. Wyatt IV +M: John B. Wyatt IV +M: John Kacur L: linux-pm@vger.kernel.org S: Maintained F: tools/power/cpupower/ @@ -6480,6 +6593,12 @@ F: include/net/devlink.h F: include/uapi/linux/devlink.h F: net/devlink/ +DFROBOT SD2405AL RTC DRIVER +M: Tóth János +L: linux-rtc@vger.kernel.org +S: Maintained +F: drivers/rtc/rtc-sd2405al.c + DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT M: Christoph Niedermaier L: kernel@dh-electronics.com @@ -6506,6 +6625,7 @@ F: Documentation/devicetree/bindings/regulator/da92*.txt F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml F: Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml F: Documentation/devicetree/bindings/sound/da[79]*.txt +F: Documentation/devicetree/bindings/sound/dlg,da7213.yaml F: Documentation/devicetree/bindings/thermal/dlg,da9062-thermal.yaml F: Documentation/devicetree/bindings/watchdog/dlg,da9062-watchdog.yaml F: Documentation/hwmon/da90??.rst @@ -6666,6 +6786,7 @@ F: drivers/dma-buf/dma-heap.c F: drivers/dma-buf/heaps/* F: include/linux/dma-heap.h F: include/uapi/linux/dma-heap.h +F: tools/testing/selftests/dmabuf-heaps/ DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422 M: Lukasz Luba @@ -6717,6 +6838,7 @@ DOCUMENTATION PROCESS M: Jonathan Corbet L: workflows@vger.kernel.org S: Maintained +F: Documentation/dev-tools/ F: Documentation/maintainer/ F: Documentation/process/ @@ -6724,6 +6846,7 @@ DOCUMENTATION REPORTING ISSUES M: Thorsten Leemhuis L: linux-doc@vger.kernel.org S: Maintained +F: Documentation/admin-guide/bug-bisect.rst F: Documentation/admin-guide/quickly-build-trimmed-linux.rst F: Documentation/admin-guide/reporting-issues.rst F: Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst @@ -7338,10 +7461,10 @@ F: drivers/gpu/drm/udl/ DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS) M: Rodrigo Siqueira -M: Melissa Wen M: Maíra Canal R: Haneen Mohammed -R: Daniel Vetter +R: Simona Vetter +R: Melissa Wen L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -7374,7 +7497,7 @@ F: drivers/gpu/drm/panel/panel-widechips-ws2401.c DRM DRIVERS M: David Airlie -M: Daniel Vetter +M: Simona Vetter L: dri-devel@lists.freedesktop.org S: Maintained B: https://gitlab.freedesktop.org/drm @@ -7470,7 +7593,6 @@ M: Kyungmin Park L: dri-devel@lists.freedesktop.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git -F: Documentation/devicetree/bindings/display/exynos/ F: Documentation/devicetree/bindings/display/samsung/ F: drivers/gpu/drm/exynos/ F: include/uapi/drm/exynos_drm.h @@ -7710,6 +7832,8 @@ F: drivers/gpu/drm/xlnx/ DRM GPU SCHEDULER M: Luben Tuikov M: Matthew Brost +M: Danilo Krummrich +M: Philipp Stanner L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -8130,7 +8254,7 @@ F: drivers/edac/ti_edac.c EDIROL UA-101/UA-1000 DRIVER M: Clemens Ladisch -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: sound/usb/misc/ua101.c @@ -8345,6 +8469,7 @@ F: include/linux/mii.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h +F: include/linux/phy_link_topology.h F: include/linux/phylib_stubs.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/linux/platform_data/mdio-gpio.h @@ -8360,6 +8485,7 @@ L: netdev@vger.kernel.org L: rust-for-linux@vger.kernel.org S: Maintained F: rust/kernel/net/phy.rs +F: rust/kernel/net/phy/reg.rs EXEC & BINFMT API, ELF R: Eric Biederman @@ -8384,6 +8510,7 @@ N: binfmt EXFAT FILE SYSTEM M: Namjae Jeon M: Sungjong Seo +R: Yuezhang Mo L: linux-fsdevel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat.git @@ -8467,6 +8594,13 @@ F: lib/bootconfig.c F: tools/bootconfig/* F: tools/bootconfig/scripts/* +EXTRON DA HD 4K PLUS CEC DRIVER +M: Hans Verkuil +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/cec/usb/extron-da-hd-4k-plus/ + EXYNOS DP DRIVER M: Jingoo Han L: dri-devel@lists.freedesktop.org @@ -8542,6 +8676,7 @@ M: Akinobu Mita S: Supported F: Documentation/fault-injection/ F: lib/fault-inject.c +F: tools/testing/fault-injection/ FBTFT Framebuffer drivers L: dri-devel@lists.freedesktop.org @@ -8603,6 +8738,7 @@ M: Christian Brauner R: Jan Kara L: linux-fsdevel@vger.kernel.org S: Maintained +T: git https://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs.git F: fs/* F: include/linux/fs.h F: include/linux/fs_types.h @@ -8680,7 +8816,7 @@ F: drivers/net/can/usb/f81604.c FIREWIRE AUDIO DRIVERS and IEC 61883-1/6 PACKET STREAMING ENGINE M: Clemens Ladisch M: Takashi Sakamoto -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: include/uapi/sound/firewire.h @@ -8754,7 +8890,7 @@ F: drivers/input/joystick/fsia6b.c FOCUSRITE SCARLETT2 MIXER DRIVER (Scarlett Gen 2+ and Clarett) M: Geoffrey D. Bennett -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained W: https://github.com/geoffreybennett/scarlett-gen2 B: https://github.com/geoffreybennett/scarlett-gen2/issues @@ -8778,7 +8914,7 @@ F: include/linux/fortify-string.h F: lib/fortify_kunit.c F: lib/memcpy_kunit.c F: lib/test_fortify/* -F: scripts/test_fortify.sh +K: \bunsafe_memcpy\b K: \b__NO_FORTIFY\b FPGA DFL DRIVERS @@ -8815,7 +8951,7 @@ W: https://floatingpoint.billm.au/ F: arch/x86/math-emu/ FRAMEBUFFER CORE -M: Daniel Vetter +M: Simona Vetter S: Odd Fixes T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/video/fbdev/core/ @@ -9012,6 +9148,7 @@ M: Herve Codina L: linuxppc-dev@lists.ozlabs.org S: Maintained F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-scc-qmc.yaml +F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml F: drivers/soc/fsl/qe/qmc.c F: include/soc/fsl/qe/qmc.h @@ -9027,9 +9164,11 @@ M: Herve Codina L: linuxppc-dev@lists.ozlabs.org S: Maintained F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-tsa.yaml +F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml F: drivers/soc/fsl/qe/tsa.c F: drivers/soc/fsl/qe/tsa.h F: include/dt-bindings/soc/cpm1-fsl,tsa.h +F: include/dt-bindings/soc/qe-fsl,tsa.h FREESCALE QUICC ENGINE UCC ETHERNET DRIVER L: netdev@vger.kernel.org @@ -9073,7 +9212,7 @@ M: Shengjiu Wang M: Xiubo Li R: Fabio Estevam R: Nicolin Chen -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: sound/soc/fsl/fsl* @@ -9083,7 +9222,7 @@ FREESCALE SOC LPC32XX SOUND DRIVERS M: J.M.B. Downing M: Piotr Wojtaszczyk R: Vladimir Zapolskiy -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: Documentation/devicetree/bindings/sound/nxp,lpc3220-i2s.yaml @@ -9091,7 +9230,7 @@ F: sound/soc/fsl/lpc3xxx-* FREESCALE SOC SOUND QMC DRIVER M: Herve Codina -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained F: Documentation/devicetree/bindings/sound/fsl,qmc-audio.yaml @@ -10877,6 +11016,7 @@ M: Nuno Sa R: Olivier Moysan L: linux-iio@vger.kernel.org S: Maintained +F: Documentation/ABI/testing/debugfs-iio-backend F: drivers/iio/industrialio-backend.c F: include/linux/iio/backend.h @@ -10979,6 +11119,7 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml F: Documentation/gpu/imagination/ +F: drivers/gpu/drm/ci/xfails/powervr* F: drivers/gpu/drm/imagination/ F: include/uapi/drm/pvr_drm.h @@ -11016,7 +11157,7 @@ F: drivers/iio/pressure/dps310.c INFINEON PEB2466 ASoC CODEC M: Herve Codina -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/infineon,peb2466.yaml F: sound/soc/codecs/peb2466.c @@ -11104,10 +11245,17 @@ F: Documentation/devicetree/bindings/serio/ F: Documentation/input/ F: drivers/input/ F: include/dt-bindings/input/ +F: include/linux/gameport.h +F: include/linux/i8042.h F: include/linux/input.h F: include/linux/input/ +F: include/linux/libps2.h +F: include/linux/serio.h +F: include/uapi/linux/gameport.h F: include/uapi/linux/input-event-codes.h F: include/uapi/linux/input.h +F: include/uapi/linux/serio.h +F: include/uapi/linux/uinput.h INPUT MULTITOUCH (MT) PROTOCOL M: Henrik Rydberg @@ -11134,6 +11282,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git F: security/integrity/ F: security/integrity/ima/ +INTEGRITY POLICY ENFORCEMENT (IPE) +M: Fan Wu +L: linux-security-module@vger.kernel.org +S: Supported +T: git https://github.com/microsoft/ipe.git +F: Documentation/admin-guide/LSM/ipe.rst +F: Documentation/security/ipe.rst +F: scripts/ipe/ +F: security/ipe/ + INTEL 810/815 FRAMEBUFFER DRIVER M: Antonino Daplas L: linux-fbdev@vger.kernel.org @@ -11162,7 +11320,7 @@ M: Bard Liao M: Ranjani Sridharan M: Kai Vehmanen R: Pierre-Louis Bossart -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported F: sound/soc/intel/ @@ -11341,7 +11499,7 @@ F: include/uapi/linux/idxd.h INTEL IN FIELD SCAN (IFS) DEVICE M: Jithu Joseph -R: Ashok Raj +R: Ashok Raj R: Tony Luck S: Maintained F: drivers/platform/x86/intel/ifs @@ -11477,6 +11635,24 @@ S: Maintained F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update F: drivers/fpga/intel-m10-bmc-sec-update.c +INTEL MID (Mobile Internet Device) PLATFORM +M: Andy Shevchenko +L: linux-kernel@vger.kernel.org +S: Supported +F: arch/x86/include/asm/intel-mid.h +F: arch/x86/pci/intel_mid_pci.c +F: arch/x86/platform/intel-mid/ +F: drivers/extcon/extcon-intel-mrfld.c +F: drivers/iio/adc/intel_mrfld_adc.c +F: drivers/mfd/intel_soc_pmic_mrfld.c +F: drivers/platform/x86/intel/mrfld_pwrbtn.c +F: drivers/platform/x86/intel_scu_* +F: drivers/staging/media/atomisp/ +F: drivers/watchdog/intel-mid_wdt.c +F: include/linux/mfd/intel_soc_pmic_mrfld.h +F: include/linux/platform_data/x86/intel-mid_wdt.h +F: include/linux/platform_data/x86/intel_scu_ipc.h + INTEL P-Unit IPC DRIVER M: Zha Qipeng L: platform-driver-x86@vger.kernel.org @@ -11539,8 +11715,8 @@ F: drivers/counter/intel-qep.c INTEL SCU DRIVERS M: Mika Westerberg S: Maintained -F: arch/x86/include/asm/intel_scu_ipc.h F: drivers/platform/x86/intel_scu_* +F: include/linux/platform_data/x86/intel_scu_ipc.h INTEL SDSI DRIVER M: David E. Box @@ -11615,7 +11791,8 @@ F: drivers/platform/x86/intel/uncore-frequency/ INTEL VENDOR SPECIFIC EXTENDED CAPABILITIES DRIVER M: David E. Box S: Supported -F: drivers/platform/x86/intel/vsec.* +F: drivers/platform/x86/intel/vsec.c +F: include/linux/intel_vsec.h INTEL VIRTUAL BUTTON DRIVER M: AceLan Kao @@ -11738,6 +11915,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git F: drivers/iommu/dma-iommu.c F: drivers/iommu/dma-iommu.h F: drivers/iommu/iova.c +F: include/linux/iommu-dma.h F: include/linux/iova.h IOMMU SUBSYSTEM @@ -11826,7 +12004,7 @@ F: drivers/tty/ipwireless/ IRON DEVICE AUDIO CODEC DRIVERS M: Kiseok Jo -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/irondevice,* F: sound/soc/codecs/sma* @@ -12168,6 +12346,7 @@ F: include/linux/randomize_kstack.h F: kernel/configs/hardening.config F: lib/usercopy_kunit.c F: mm/usercopy.c +F: security/Kconfig.hardening K: \b(add|choose)_random_kstack_offset\b K: \b__check_(object_size|heap_object)\b K: \b__counted_by\b @@ -12284,7 +12463,7 @@ F: virt/kvm/* KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) M: Marc Zyngier M: Oliver Upton -R: James Morse +R: Joey Gouly R: Suzuki K Poulose R: Zenghui Yu L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -12308,6 +12487,7 @@ L: kvm@vger.kernel.org L: loongarch@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: Documentation/virt/kvm/loongarch/ F: arch/loongarch/include/asm/kvm* F: arch/loongarch/include/uapi/asm/kvm* F: arch/loongarch/kvm/ @@ -12926,6 +13106,7 @@ M: Michael Ellerman R: Nicholas Piggin R: Christophe Leroy R: Naveen N Rao +R: Madhavan Srinivasan L: linuxppc-dev@lists.ozlabs.org S: Supported W: https://github.com/linuxppc/wiki/wiki @@ -13009,6 +13190,7 @@ R: Daniel Lustig R: Joel Fernandes L: linux-kernel@vger.kernel.org L: linux-arch@vger.kernel.org +L: lkmm@lists.linux.dev S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: Documentation/atomic_bitops.txt @@ -13277,6 +13459,16 @@ S: Maintained F: Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml F: drivers/iio/dac/ltc1660.c +LTC2664 IIO DAC DRIVER +M: Michael Hennerich +M: Kim Seer Paller +L: linux-iio@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml +F: Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml +F: drivers/iio/dac/ltc2664.c + LTC2688 IIO DAC DRIVER M: Nuno Sá L: linux-iio@vger.kernel.org @@ -13517,7 +13709,7 @@ S: Maintained F: Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml F: drivers/input/misc/88pm886-onkey.c F: drivers/mfd/88pm886.c -F: drivers/regulators/88pm886-regulator.c +F: drivers/regulator/88pm886-regulator.c F: include/linux/mfd/88pm886.h MARVELL ARMADA 3700 PHY DRIVERS @@ -13576,7 +13768,6 @@ M: Sebastian Hesselbarth L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/mv643xx_eth.* -F: include/linux/mv643xx.h MARVELL MV88X3310 PHY DRIVER M: Russell King @@ -13765,7 +13956,7 @@ F: drivers/media/i2c/max96717.c MAX9860 MONO AUDIO VOICE CODEC DRIVER M: Peter Rosin -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/max9860.txt F: sound/soc/codecs/max9860.* @@ -14228,8 +14419,8 @@ M: Sean Wang L: linux-bluetooth@vger.kernel.org L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: Documentation/devicetree/bindings/net/bluetooth/mediatek,bluetooth.txt F: Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7921s-bluetooth.yaml -F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt F: drivers/bluetooth/btmtkuart.c MEDIATEK BOARD LEVEL SHUTDOWN DRIVERS @@ -14507,7 +14698,7 @@ MELLANOX ETHERNET DRIVER (mlx4_en) M: Tariq Toukan L: netdev@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlx4/en_* @@ -14516,7 +14707,7 @@ M: Saeed Mahameed M: Tariq Toukan L: netdev@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_* @@ -14524,7 +14715,7 @@ MELLANOX ETHERNET INNOVA DRIVERS R: Boris Pismenny L: netdev@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* @@ -14535,7 +14726,7 @@ M: Ido Schimmel M: Petr Machata L: netdev@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlxsw/ F: tools/testing/selftests/drivers/net/mlxsw/ @@ -14544,7 +14735,7 @@ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) M: mlxsw@nvidia.com L: netdev@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlxfw/ @@ -14563,7 +14754,7 @@ M: Tariq Toukan L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: drivers/net/ethernet/mellanox/mlx4/ F: include/linux/mlx4/ @@ -14572,7 +14763,7 @@ MELLANOX MLX4 IB driver M: Yishai Hadas L: linux-rdma@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/hw/mlx4/ F: include/linux/mlx4/ @@ -14585,7 +14776,7 @@ M: Tariq Toukan L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: https://patchwork.kernel.org/project/netdevbpf/list/ F: Documentation/networking/device_drivers/ethernet/mellanox/ F: drivers/net/ethernet/mellanox/mlx5/core/ @@ -14595,7 +14786,7 @@ MELLANOX MLX5 IB driver M: Leon Romanovsky L: linux-rdma@vger.kernel.org S: Supported -W: http://www.mellanox.com +W: https://www.nvidia.com/networking/ Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/hw/mlx5/ F: include/linux/mlx5/ @@ -14823,6 +15014,7 @@ M: Alexander Duyck M: Jakub Kicinski R: kernel-team@meta.com S: Supported +F: Documentation/networking/device_drivers/ethernet/meta/ F: drivers/net/ethernet/meta/ METHODE UDPU SUPPORT @@ -14897,7 +15089,7 @@ F: drivers/spi/spi-at91-usart.c MICROCHIP AUDIO ASOC DRIVERS M: Claudiu Beznea -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/sound/atmel* F: Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt @@ -14969,6 +15161,13 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/microchip/lan743x_* +MICROCHIP LAN8650/1 10BASE-T1S MACPHY ETHERNET DRIVER +M: Parthiban Veerasooran +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/microchip,lan8650.yaml +F: drivers/net/ethernet/microchip/lan865x/lan865x.c + MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER M: Arun Ramadoss R: UNGLinuxDriver@microchip.com @@ -15038,6 +15237,13 @@ F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml F: drivers/nvmem/microchip-otpc.c F: include/dt-bindings/nvmem/microchip,sama7g5-otpc.h +MICROCHIP PAC1921 POWER/CURRENT MONITOR DRIVER +M: Matteo Martelli +L: linux-iio@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml +F: drivers/iio/adc/pac1921.c + MICROCHIP PAC1934 POWER/ENERGY MONITOR DRIVER M: Marius Cristea L: linux-iio@vger.kernel.org @@ -15218,6 +15424,12 @@ S: Maintained F: Documentation/hwmon/surface_fan.rst F: drivers/hwmon/surface_fan.c +MICROSOFT SURFACE SENSOR THERMAL DRIVER +M: Maximilian Luz +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/surface_temp.c + MICROSOFT SURFACE GPE LID SUPPORT DRIVER M: Maximilian Luz L: platform-driver-x86@vger.kernel.org @@ -15470,6 +15682,9 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h MODULE SUPPORT M: Luis Chamberlain +R: Petr Pavlu +R: Sami Tolvanen +R: Daniel Gomez L: linux-modules@vger.kernel.org L: linux-kernel@vger.kernel.org S: Maintained @@ -15746,7 +15961,7 @@ F: include/linux/mtd/*nand*.h NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER M: Daniel Mack -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained W: http://www.native-instruments.com F: sound/usb/caiaq/ @@ -15788,6 +16003,7 @@ M: Breno Leitao S: Maintained F: Documentation/networking/netconsole.rst F: drivers/net/netconsole.c +F: tools/testing/selftests/drivers/net/netcons_basic.sh NETDEVSIM M: Jakub Kicinski @@ -16516,7 +16732,7 @@ F: drivers/extcon/extcon-ptn5150.c NXP SGTL5000 DRIVER M: Fabio Estevam -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/fsl,sgtl5000.yaml F: sound/soc/codecs/sgtl5000* @@ -16540,7 +16756,7 @@ K: "nxp,tda998x" NXP TFA9879 DRIVER M: Peter Rosin -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/nxp,tfa9879.yaml F: sound/soc/codecs/tfa9879* @@ -16552,7 +16768,7 @@ F: drivers/nfc/nxp-nci NXP/Goodix TFA989X (TFA1) DRIVER M: Stephan Gerhold -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml F: sound/soc/codecs/tfa989x.c @@ -16638,7 +16854,7 @@ F: include/uapi/misc/ocxl.h OMAP AUDIO SUPPORT M: Peter Ujfalusi M: Jarkko Nikula -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org L: linux-omap@vger.kernel.org S: Maintained F: sound/soc/ti/n810.c @@ -16831,6 +17047,7 @@ OMNIVISION OG01A1B SENSOR DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml F: drivers/media/i2c/og01a1b.c OMNIVISION OV01A10 SENSOR DRIVER @@ -17101,6 +17318,14 @@ L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/ulp/opa_vnic +OPEN ALLIANCE 10BASE-T1S MACPHY SERIAL INTERFACE FRAMEWORK +M: Parthiban Veerasooran +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/networking/oa-tc6-framework.rst +F: drivers/net/ethernet/oa_tc6.c +F: include/linux/oa_tc6.h + OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Rob Herring M: Saravana Kannan @@ -17186,7 +17411,7 @@ F: include/linux/pm_opp.h OPL4 DRIVER M: Clemens Ladisch -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: sound/drivers/opl4/ @@ -17412,7 +17637,7 @@ PCI DRIVER FOR ALTERA PCIE IP M: Joyce Ooi L: linux-pci@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/pci/altera-pcie.txt +F: Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml F: drivers/pci/controller/pcie-altera.c PCI DRIVER FOR APPLIEDMICRO XGENE @@ -17644,7 +17869,7 @@ PCI MSI DRIVER FOR ALTERA MSI IP M: Joyce Ooi L: linux-pci@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt +F: Documentation/devicetree/bindings/pci/altr,msi-controller.yaml F: drivers/pci/controller/pcie-altera-msi.c PCI MSI DRIVER FOR APPLIEDMICRO XGENE @@ -17797,6 +18022,7 @@ M: Manivannan Sadhasivam L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained +F: drivers/pci/controller/dwc/pcie-qcom-common.c F: drivers/pci/controller/dwc/pcie-qcom.c PCIE DRIVER FOR ROCKCHIP @@ -17833,6 +18059,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +F: drivers/pci/controller/dwc/pcie-qcom-common.c F: drivers/pci/controller/dwc/pcie-qcom-ep.c PCMCIA SUBSYSTEM @@ -18244,6 +18471,7 @@ M: Bartosz Golaszewski L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git +F: Documentation/driver-api/pwrseq.rst F: drivers/power/sequencing/ F: include/linux/pwrseq/ @@ -18354,7 +18582,7 @@ F: tools/testing/selftests/proc/ PROC SYSCTL M: Luis Chamberlain M: Kees Cook -M: Joel Granados +M: Joel Granados L: linux-kernel@vger.kernel.org L: linux-fsdevel@vger.kernel.org S: Maintained @@ -18566,7 +18794,7 @@ F: drivers/crypto/intel/qat/ QCOM AUDIO (ASoC) DRIVERS M: Srinivas Kandagatla -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/soc/qcom/qcom,apr* @@ -18788,7 +19016,7 @@ M: Bryan O'Donoghue L: linux-media@vger.kernel.org S: Maintained F: Documentation/admin-guide/media/qcom_camss.rst -F: Documentation/devicetree/bindings/media/*camss* +F: Documentation/devicetree/bindings/media/qcom,*camss* F: drivers/media/platform/qcom/camss/ QUALCOMM CLOCK DRIVERS @@ -18803,7 +19031,6 @@ F: include/dt-bindings/clock/qcom,* QUALCOMM CLOUD AI (QAIC) DRIVER M: Jeffrey Hugo R: Carl Vanderlip -R: Pranjal Ramajor Asha Kanojiya L: linux-arm-msm@vger.kernel.org L: dri-devel@lists.freedesktop.org S: Supported @@ -18898,6 +19125,7 @@ L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml F: drivers/soc/qcom/icc-bwmon.c +F: drivers/soc/qcom/trace_icc-bwmon.h QUALCOMM IOMMU M: Rob Clark @@ -19123,10 +19351,7 @@ F: drivers/char/random.c F: include/linux/random.h F: include/uapi/linux/random.h F: drivers/virt/vmgenid.c -F: include/vdso/getrandom.h -F: lib/vdso/getrandom.c -F: arch/x86/entry/vdso/vgetrandom* -F: arch/x86/include/asm/vdso/getrandom* +N: ^.*/vdso/[^/]*getrandom[^/]+$ RAPIDIO SUBSYSTEM M: Matt Porter @@ -19237,6 +19462,7 @@ S: Supported W: https://oss.oracle.com/projects/rds/ F: Documentation/networking/rds.rst F: net/rds/ +F: tools/testing/selftests/net/rds/ RDT - RESOURCE ALLOCATION M: Fenghua Yu @@ -19430,7 +19656,7 @@ F: drivers/net/ethernet/renesas/rtsn.* RENESAS IDT821034 ASoC CODEC M: Herve Codina -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/renesas,idt821034.yaml F: sound/soc/codecs/idt821034.c @@ -19739,6 +19965,14 @@ F: Documentation/ABI/*/sysfs-driver-hid-roccat* F: drivers/hid/hid-roccat* F: include/linux/hid-roccat* +ROCKCHIP CAN-FD DRIVER +M: Marc Kleine-Budde +R: kernel@pengutronix.de +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/can/rockchip,rk3568v2-canfd.yaml +F: drivers/net/can/rockchip/ + ROCKCHIP CRYPTO DRIVERS M: Corentin Labbe L: linux-crypto@vger.kernel.org @@ -19765,6 +19999,13 @@ F: Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst F: drivers/media/platform/rockchip/rkisp1 F: include/uapi/linux/rkisp1-config.h +ROCKCHIP RK3568 RANDOM NUMBER GENERATOR SUPPORT +M: Daniel Golle +M: Aurelien Jarno +S: Maintained +F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml +F: drivers/char/hw_random/rockchip-rng.c + ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob Chen M: Ezequiel Garcia @@ -19807,6 +20048,12 @@ S: Supported F: drivers/power/supply/bd99954-charger.c F: drivers/power/supply/bd99954-charger.h +ROHM BH1745 COLOUR SENSOR +M: Mudit Sharma +L: linux-iio@vger.kernel.org +S: Maintained +F: drivers/iio/light/bh1745.c + ROHM BH1750 AMBIENT LIGHT SENSOR DRIVER M: Tomasz Duszynski S: Maintained @@ -19881,12 +20128,26 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml F: drivers/media/platform/sunxi/sun8i-rotate/ +RPMB SUBSYSTEM +M: Jens Wiklander +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/misc/rpmb-core.c +F: include/linux/rpmb.h + RPMSG TTY DRIVER M: Arnaud Pouliquen L: linux-remoteproc@vger.kernel.org S: Maintained F: drivers/tty/rpmsg_tty.c +RTASE ETHERNET DRIVER +M: Justin Lai +M: Larry Chiu +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/realtek/rtase/ + RTL2830 MEDIA DRIVER L: linux-media@vger.kernel.org S: Orphan @@ -19953,6 +20214,7 @@ R: Björn Roy Baron R: Benno Lossin R: Andreas Hindborg R: Alice Ryhl +R: Trevor Gross L: rust-for-linux@vger.kernel.org S: Supported W: https://rust-for-linux.com @@ -20145,12 +20407,22 @@ F: security/safesetid/ SAMSUNG AUDIO (ASoC) DRIVERS M: Sylwester Nawrocki -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/sound/samsung* F: sound/soc/samsung/ +SAMSUNG EXYNOS850 SoC SUPPORT +M: Sam Protsenko +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-samsung-soc@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml +F: arch/arm64/boot/dts/exynos/exynos850* +F: drivers/clk/samsung/clk-exynos850.c +F: include/dt-bindings/clock/exynos850.h + SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER M: Krzysztof Kozlowski L: linux-crypto@vger.kernel.org @@ -20338,6 +20610,19 @@ F: include/linux/wait.h F: include/uapi/linux/sched.h F: kernel/sched/ +SCHEDULER - SCHED_EXT +R: Tejun Heo +R: David Vernet +L: linux-kernel@vger.kernel.org +S: Maintained +W: https://github.com/sched-ext/scx +T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git +F: include/linux/sched/ext.h +F: kernel/sched/ext.h +F: kernel/sched/ext.c +F: tools/sched_ext/ +F: tools/testing/selftests/sched_ext + SCIOSENSE ENS160 MULTI-GAS SENSOR DRIVER M: Gustavo Silva S: Maintained @@ -20622,6 +20907,12 @@ S: Maintained F: Documentation/devicetree/bindings/iio/chemical/sensirion,scd4x.yaml F: drivers/iio/chemical/scd4x.c +SENSIRION SDP500 DIFFERENTIAL PRESSURE SENSOR DRIVER +M: Petar Stoykov +S: Maintained +F: Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml +F: drivers/iio/pressure/sdp500.c + SENSIRION SGP40 GAS SENSOR DRIVER M: Andreas Klinger S: Maintained @@ -20652,7 +20943,7 @@ F: drivers/media/rc/serial_ir.c SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus) M: Srinivas Kandagatla -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/slimbus/ F: drivers/slimbus/ @@ -21086,7 +21377,7 @@ F: Documentation/devicetree/bindings/i2c/socionext,synquacer-i2c.yaml F: drivers/i2c/busses/i2c-synquacer.c SOCIONEXT UNIPHIER SOUND DRIVER -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Orphan F: sound/soc/uniphier/ @@ -21345,7 +21636,7 @@ F: tools/testing/selftests/alsa SOUND - COMPRESSED AUDIO M: Vinod Koul -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: Documentation/sound/designs/compress-offload.rst @@ -21408,7 +21699,7 @@ M: Vinod Koul M: Bard Liao R: Pierre-Louis Bossart R: Sanyog Kale -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire.git F: Documentation/driver-api/soundwire/ @@ -21538,10 +21829,8 @@ F: include/linux/spmi.h F: include/trace/events/spmi.h SPU FILE SYSTEM -M: Jeremy Kerr L: linuxppc-dev@lists.ozlabs.org -S: Supported -W: http://www.ibm.com/developerworks/power/cell/ +S: Orphan F: Documentation/filesystems/spufs/spufs.rst F: arch/powerpc/platforms/cell/spufs/ @@ -21883,7 +22172,7 @@ F: kernel/static_call.c STI AUDIO (ASoC) DRIVERS M: Arnaud Pouliquen -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt F: sound/soc/sti/ @@ -21904,7 +22193,7 @@ F: drivers/media/usb/stk1160/ STM32 AUDIO (ASoC) DRIVERS M: Olivier Moysan M: Arnaud Pouliquen -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml @@ -22470,6 +22759,7 @@ M: Jens Wiklander R: Sumit Garg L: op-tee@lists.trustedfirmware.org S: Maintained +F: Documentation/ABI/testing/sysfs-class-tee F: Documentation/driver-api/tee.rst F: Documentation/tee/ F: Documentation/userspace-api/tee.rst @@ -22515,6 +22805,7 @@ M: Thierry Reding R: Krishna Reddy L: linux-tegra@vger.kernel.org S: Supported +F: drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c F: drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c F: drivers/iommu/tegra* @@ -22605,7 +22896,7 @@ F: drivers/irqchip/irq-xtensa-* TEXAS INSTRUMENTS ASoC DRIVERS M: Peter Ujfalusi -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml F: sound/soc/ti/ @@ -22614,18 +22905,17 @@ TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS M: Shenghao Ding M: Kevin Lu M: Baojun Xu -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/tas2552.txt F: Documentation/devicetree/bindings/sound/ti,tas2562.yaml F: Documentation/devicetree/bindings/sound/ti,tas2770.yaml F: Documentation/devicetree/bindings/sound/ti,tas27xx.yaml +F: Documentation/devicetree/bindings/sound/ti,tpa6130a2.yaml F: Documentation/devicetree/bindings/sound/ti,pcm1681.yaml F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml F: Documentation/devicetree/bindings/sound/ti,tlv320adcx140.yaml -F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt -F: Documentation/devicetree/bindings/sound/tpa6130a2.txt F: include/sound/tas2*.h F: include/sound/tlv320*.h F: include/sound/tpa6130a2-plat.h @@ -22983,7 +23273,7 @@ F: drivers/soc/ti/* TI LM49xxx FAMILY ASoC CODEC DRIVERS M: M R Swami Reddy M: Vishwas A Deshpande -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: sound/soc/codecs/isabelle* F: sound/soc/codecs/lm49453* @@ -22998,14 +23288,14 @@ F: drivers/iio/adc/ti-lmp92064.c TI PCM3060 ASoC CODEC DRIVER M: Kirill Marinushkin -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/pcm3060.txt F: sound/soc/codecs/pcm3060* TI TAS571X FAMILY ASoC CODEC DRIVER M: Kevin Cernekee -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Odd Fixes F: sound/soc/codecs/tas571x* @@ -23033,7 +23323,7 @@ F: drivers/iio/adc/ti-tsc2046.c TI TWL4030 SERIES SOC CODEC DRIVER M: Peter Ujfalusi -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: sound/soc/codecs/twl4030* @@ -23203,6 +23493,7 @@ Q: https://patchwork.kernel.org/project/linux-integrity/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git F: Documentation/devicetree/bindings/tpm/ F: drivers/char/tpm/ +F: tools/testing/selftests/tpm2/ TPS546D24 DRIVER M: Duke Du @@ -23215,9 +23506,8 @@ TQ SYSTEMS BOARD & DRIVER SUPPORT L: linux@ew.tq-group.com S: Supported W: https://www.tq-group.com/en/products/tq-embedded/ -F: arch/arm/boot/dts/imx*mba*.dts* -F: arch/arm/boot/dts/imx*tqma*.dts* -F: arch/arm/boot/dts/mba*.dtsi +F: arch/arm/boot/dts/nxp/imx/*mba*.dts* +F: arch/arm/boot/dts/nxp/imx/*tqma*.dts* F: arch/arm64/boot/dts/freescale/fsl-*tqml*.dts* F: arch/arm64/boot/dts/freescale/imx*mba*.dts* F: arch/arm64/boot/dts/freescale/imx*tqma*.dts* @@ -23385,7 +23675,8 @@ F: drivers/media/pci/tw686x/ U-BOOT ENVIRONMENT VARIABLES M: Rafał Miłecki S: Maintained -F: Documentation/devicetree/bindings/nvmem/u-boot,env.yaml +F: Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml +F: drivers/nvmem/layouts/u-boot-env.c F: drivers/nvmem/u-boot-env.c UACCE ACCELERATOR FRAMEWORK @@ -23502,6 +23793,15 @@ F: drivers/cdrom/cdrom.c F: include/linux/cdrom.h F: include/uapi/linux/cdrom.h +UNION-FIND +M: Xavier +L: linux-kernel@vger.kernel.org +S: Maintained +F: Documentation/core-api/union_find.rst +F: Documentation/translations/zh_CN/core-api/union_find.rst +F: include/linux/union_find.h +F: lib/union_find.c + UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER R: Alim Akhtar R: Avri Altman @@ -23699,7 +23999,7 @@ F: drivers/usb/storage/ USB MIDI DRIVER M: Clemens Ladisch -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git F: sound/usb/midi.* @@ -24235,6 +24535,7 @@ F: include/linux/vdpa.h F: include/linux/virtio*.h F: include/linux/vringh.h F: include/uapi/linux/virtio_*.h +F: net/vmw_vsock/virtio* F: tools/virtio/ F: tools/testing/selftests/drivers/net/virtio_net/ @@ -24358,7 +24659,7 @@ VIRTIO SOUND DRIVER M: Anton Yakovlev M: "Michael S. Tsirkin" L: virtualization@lists.linux.dev -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Maintained F: include/uapi/linux/virtio_snd.h F: sound/virtio/* @@ -24425,6 +24726,20 @@ F: include/uapi/linux/vsockmon.h F: net/vmw_vsock/ F: tools/testing/vsock/ +VMA +M: Andrew Morton +R: Liam R. Howlett +R: Vlastimil Babka +R: Lorenzo Stoakes +L: linux-mm@kvack.org +S: Maintained +W: https://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: mm/vma.c +F: mm/vma.h +F: mm/vma_internal.h +F: tools/testing/vma/ + VMALLOC M: Andrew Morton R: Uladzislau Rezki @@ -24814,6 +25129,17 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core F: Documentation/arch/x86/ F: Documentation/devicetree/bindings/x86/ F: arch/x86/ +F: tools/testing/selftests/x86 + +X86 CPUID DATABASE +M: Borislav Petkov +M: Thomas Gleixner +M: x86@kernel.org +R: Ahmed S. Darwish +L: x86-cpuid@lists.linux.dev +S: Maintained +W: https://x86-cpuid.org +F: tools/arch/x86/kcpuid/cpuid.csv X86 ENTRY CODE M: Andy Lutomirski @@ -25062,7 +25388,7 @@ F: include/xen/interface/io/usbif.h XEN SOUND FRONTEND DRIVER M: Oleksandr Andrushchenko L: xen-devel@lists.xenproject.org (moderated for non-subscribers) -L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: linux-sound@vger.kernel.org S: Supported F: sound/xen/* @@ -25256,6 +25582,19 @@ S: Maintained F: drivers/spi/spi-xtensa-xtfpga.c F: sound/soc/xtensa/xtfpga-i2s.c +XZ EMBEDDED +M: Lasse Collin +S: Maintained +W: https://tukaani.org/xz/embedded.html +B: https://github.com/tukaani-project/xz-embedded/issues +C: irc://irc.libera.chat/tukaani +F: Documentation/staging/xz.rst +F: include/linux/decompress/unxz.h +F: include/linux/xz.h +F: lib/decompress_unxz.c +F: lib/xz/ +F: scripts/xz_wrap.sh + YAM DRIVER FOR AX.25 M: Jean-Paul Roubelat L: linux-hams@vger.kernel.org @@ -25280,7 +25619,6 @@ F: tools/net/ynl/ YEALINK PHONE DRIVER M: Henk Vergonet -L: usbb2k-api-dev@nongnu.org S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* diff --git a/Makefile b/Makefile index 34bd1d5f967201..c5493c0c0ca128 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 -PATCHLEVEL = 11 +PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc2 NAME = Baby Opossum Posse # *DOCUMENTATION* @@ -579,10 +579,6 @@ else RUSTC_OR_CLIPPY = $(RUSTC) endif -ifdef RUST_LIB_SRC - export RUST_LIB_SRC -endif - # Allows the usage of unstable features in stable compilers. export RUSTC_BOOTSTRAP := 1 @@ -649,9 +645,11 @@ endif # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. -# CC_VERSION_TEXT is referenced from Kconfig (so it needs export), -# and from include/config/auto.conf.cmd to detect the compiler upgrade. +# CC_VERSION_TEXT and RUSTC_VERSION_TEXT are referenced from Kconfig (so they +# need export), and from include/config/auto.conf.cmd to detect the compiler +# upgrade. CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1)) +RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null)) ifneq ($(findstring clang,$(CC_VERSION_TEXT)),) include $(srctree)/scripts/Makefile.clang @@ -672,7 +670,7 @@ ifdef config-build # KBUILD_DEFCONFIG may point out an alternative default configuration # used for 'make defconfig' include $(srctree)/arch/$(SRCARCH)/Makefile -export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT +export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT RUSTC_VERSION_TEXT config: outputmakefile scripts_basic FORCE $(Q)$(MAKE) $(build)=scripts/kconfig $@ @@ -928,6 +926,7 @@ ifdef CONFIG_SHADOW_CALL_STACK ifndef CONFIG_DYNAMIC_SCS CC_FLAGS_SCS := -fsanitize=shadow-call-stack KBUILD_CFLAGS += $(CC_FLAGS_SCS) +KBUILD_RUSTFLAGS += -Zsanitizer=shadow-call-stack endif export CC_FLAGS_SCS endif @@ -952,6 +951,16 @@ endif ifdef CONFIG_CFI_CLANG CC_FLAGS_CFI := -fsanitize=kcfi +ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS + CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers +endif +ifdef CONFIG_RUST + # Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects + # CONFIG_CFI_ICALL_NORMALIZE_INTEGERS. + RUSTC_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers + KBUILD_RUSTFLAGS += $(RUSTC_FLAGS_CFI) + export RUSTC_FLAGS_CFI +endif KBUILD_CFLAGS += $(CC_FLAGS_CFI) export CC_FLAGS_CFI endif @@ -1483,6 +1492,7 @@ endif # CONFIG_MODULES # Directories & files removed with 'make clean' CLEAN_FILES += vmlinux.symvers modules-only.symvers \ modules.builtin modules.builtin.modinfo modules.nsdeps \ + modules.builtin.ranges vmlinux.o.map \ compile_commands.json rust/test \ rust-project.json .vmlinux.objs .vmlinux.export.c @@ -1635,7 +1645,7 @@ help: echo '* dtbs - Build device tree blobs for enabled boards'; \ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \ echo ' dt_binding_check - Validate device tree binding documents and examples'; \ - echo ' dt_binding_schema - Build processed device tree binding schemas'; \ + echo ' dt_binding_schemas - Build processed device tree binding schemas'; \ echo ' dtbs_check - Validate device tree source files';\ echo '') @@ -1947,7 +1957,7 @@ clean: $(clean-dirs) -o -name '*.c.[012]*.*' \ -o -name '*.ll' \ -o -name '*.gcno' \ - -o -name '*.*.symversions' \) -type f -print \ + \) -type f -print \ -o -name '.tmp_*' -print \ | xargs rm -rf diff --git a/arch/Kconfig b/arch/Kconfig index 975dd22a2dbd22..8af374ea1adc24 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -17,6 +17,15 @@ config CPU_MITIGATIONS def_bool y endif +# +# Selected by architectures that need custom DMA operations for e.g. legacy +# IOMMUs not handled by dma-iommu. Drivers must never select this symbol. +# +config ARCH_HAS_DMA_OPS + depends on HAS_DMA + select DMA_OPS_HELPERS + bool + menu "General architecture-dependent options" config ARCH_HAS_SUBPAGE_FAULTS @@ -826,6 +835,38 @@ config CFI_CLANG https://clang.llvm.org/docs/ControlFlowIntegrity.html +config CFI_ICALL_NORMALIZE_INTEGERS + bool "Normalize CFI tags for integers" + depends on CFI_CLANG + depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS + help + This option normalizes the CFI tags for integer types so that all + integer types of the same size and signedness receive the same CFI + tag. + + The option is separate from CONFIG_RUST because it affects the ABI. + When working with build systems that care about the ABI, it is + convenient to be able to turn on this flag first, before Rust is + turned on. + + This option is necessary for using CFI with Rust. If unsure, say N. + +config HAVE_CFI_ICALL_NORMALIZE_INTEGERS + def_bool !GCOV_KERNEL && !KASAN + depends on CFI_CLANG + depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers) + help + Is CFI_ICALL_NORMALIZE_INTEGERS supported with the set of compilers + currently in use? + + This option defaults to false if GCOV or KASAN is enabled, as there is + an LLVM bug that makes normalized integers tags incompatible with + KASAN and GCOV. Kconfig currently does not have the infrastructure to + detect whether your rustc compiler contains the fix for this bug, so + it is assumed that it doesn't. If your compiler has the fix, you can + explicitly enable this option in your config file. The Kconfig logic + needed to detect this will be added in a future kernel release. + config CFI_PERMISSIVE bool "Use CFI in permissive mode" depends on CFI_CLANG @@ -862,7 +903,7 @@ config HAVE_CONTEXT_TRACKING_USER_OFFSTACK Architecture neither relies on exception_enter()/exception_exit() nor on schedule_user(). Also preempt_schedule_notrace() and preempt_schedule_irq() can't be called in a preemptible section - while context tracking is CONTEXT_USER. This feature reflects a sane + while context tracking is CT_STATE_USER. This feature reflects a sane entry implementation where the following requirements are met on critical entry code, ie: before user_exit() or after user_enter(): diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 50ff06d5b799c9..109a4cddcd1389 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -4,12 +4,12 @@ config ALPHA default y select ARCH_32BIT_USTAT_F_TINODE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_DMA_OPS if PCI select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF - select DMA_OPS if PCI select FORCE_PCI select PCI_DOMAINS if PCI select PCI_SYSCALL if PCI diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 91d4a4d9258cd2..ae1b96479d0cc2 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h @@ -3,17 +3,232 @@ #define _ALPHA_CMPXCHG_H /* - * Atomic exchange routines. + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). */ -#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) -#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) -#include +static inline unsigned long +____xchg_u8(volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extbl %2,%4,%0\n" + " mskbl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg_u16(volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extwl %2,%4,%0\n" + " mskwl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg_u32(volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg_u64(volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" + " stq_c %1,%2\n" + " beq %1,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid xchg(). */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(volatile void *ptr, unsigned long x, int size) +{ + return + size == 1 ? ____xchg_u8(ptr, x) : + size == 2 ? ____xchg_u16(ptr, x) : + size == 4 ? ____xchg_u32(ptr, x) : + size == 8 ? ____xchg_u64(ptr, x) : + (__xchg_called_with_bad_pointer(), x); +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +static inline unsigned long +____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extbl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskbl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extwl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskwl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg_u32(volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, + int size) +{ + return + size == 1 ? ____cmpxchg_u8(ptr, old, new) : + size == 2 ? ____cmpxchg_u16(ptr, old, new) : + size == 4 ? ____cmpxchg_u32(ptr, old, new) : + size == 8 ? ____cmpxchg_u64(ptr, old, new) : + (__cmpxchg_called_with_bad_pointer(), old); +} #define xchg_local(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\ + (__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_, \ sizeof(*(ptr))); \ }) @@ -21,7 +236,7 @@ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_, \ (unsigned long)_n_, \ sizeof(*(ptr))); \ }) @@ -32,12 +247,6 @@ cmpxchg_local((ptr), (o), (n)); \ }) -#undef ____xchg -#undef ____cmpxchg -#define ____xchg(type, args...) __arch_xchg ##type(args) -#define ____cmpxchg(type, args...) __cmpxchg ##type(args) -#include - /* * The leading and the trailing memory barriers guarantee that these * operations are fully ordered. @@ -48,7 +257,7 @@ __typeof__(*(ptr)) _x_ = (x); \ smp_mb(); \ __ret = (__typeof__(*(ptr))) \ - __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + ____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ smp_mb(); \ __ret; \ }) @@ -59,7 +268,7 @@ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ smp_mb(); \ - __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \ + __ret = (__typeof__(*(ptr))) ____cmpxchg((ptr), \ (unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\ smp_mb(); \ __ret; \ @@ -71,6 +280,4 @@ arch_cmpxchg((ptr), (o), (n)); \ }) -#undef ____cmpxchg - #endif /* _ALPHA_CMPXCHG_H */ diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h deleted file mode 100644 index 7adb80c6746ac3..00000000000000 --- a/arch/alpha/include/asm/xchg.h +++ /dev/null @@ -1,246 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ALPHA_CMPXCHG_H -#error Do not include xchg.h directly! -#else -/* - * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code - * except that local version do not have the expensive memory barrier. - * So this file is included twice from asm/cmpxchg.h. - */ - -/* - * Atomic exchange. - * Since it can be used to implement critical sections - * it must clobber "memory" (also for interrupts in UP). - */ - -static inline unsigned long -____xchg(_u8, volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -____xchg(_u16, volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -____xchg(_u32, volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -____xchg(_u64, volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -____xchg(, volatile void *ptr, unsigned long x, int size) -{ - switch (size) { - case 1: - return ____xchg(_u8, ptr, x); - case 2: - return ____xchg(_u16, ptr, x); - case 4: - return ____xchg(_u32, ptr, x); - case 8: - return ____xchg(_u64, ptr, x); - } - __xchg_called_with_bad_pointer(); - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -static inline unsigned long -____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -____cmpxchg(_u32, volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static inline unsigned long -____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new, - int size) -{ - switch (size) { - case 1: - return ____cmpxchg(_u8, ptr, old, new); - case 2: - return ____cmpxchg(_u16, ptr, old, new); - case 4: - return ____cmpxchg(_u32, ptr, old, new); - case 8: - return ____cmpxchg(_u64, ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#endif diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index e94f621903feed..251b73c5481eaa 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -140,6 +140,12 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SO_DEVMEM_LINEAR 78 +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 79 +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 80 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index e5f881bc82881c..c0424de9e7cda2 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -160,10 +160,10 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, .count = count }; - if (!arg.file) + if (!fd_file(arg)) return -EBADF; - error = iterate_dir(arg.file, &buf.ctx); + error = iterate_dir(fd_file(arg), &buf.ctx); if (error >= 0) error = buf.error; if (count != buf.count) @@ -1229,7 +1229,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { unsigned long limit; diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 6afae65e9a8b32..a9a38c80c4a7af 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index fd0b0a0d4686ac..5b248814204147 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,6 +13,7 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_NEED_CMPXCHG_1_EMU select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT @@ -553,7 +554,7 @@ config ARC_BUILTIN_DTB_NAME string "Built in DTB" help Set the name of the DTB to embed in the vmlinux binary - Leaving it blank selects the minimal "skeleton" dtb + Leaving it blank selects the "nsim_700" dtb. endmenu # "ARC Architecture Configuration" diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 89720d6d7e0dba..319bbe2703223d 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_DRM=m diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 73ec01ed0492cf..8c1f1a111a1753 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_FB=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 4da0f626fa9db6..75cab9f25b5bb3 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y # CONFIG_HWMON is not set CONFIG_DRM=m diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 1a68e4beebca28..5aba3d850fa2fa 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -60,6 +60,7 @@ CONFIG_SERIAL_8250_DW=y # CONFIG_HW_RANDOM is not set CONFIG_I2C=y # CONFIG_I2C_COMPAT is not set +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index e138fde067dea5..58045c89834045 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -46,6 +47,9 @@ __typeof__(*(ptr)) _prev_; \ \ switch(sizeof((_p_))) { \ + case 1: \ + _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \ + break; \ case 4: \ _prev_ = __cmpxchg(_p_, _o_, _n_); \ break; \ @@ -65,8 +69,6 @@ __typeof__(*(ptr)) _prev_; \ unsigned long __flags; \ \ - BUILD_BUG_ON(sizeof(_p_) != 4); \ - \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 4fdb7350636c32..f57cb5a6b62403 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include #ifdef CONFIG_ISA_ARCV2 #include diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 9febf5bc3de6d0..4ae2db59d494cb 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -14,6 +14,7 @@ typedef struct { unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; +struct pt_regs; extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *); #endif diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h deleted file mode 100644 index cf5a02382e0e09..00000000000000 --- a/arch/arc/include/asm/unaligned.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef _ASM_ARC_UNALIGNED_H -#define _ASM_ARC_UNALIGNED_H - -/* ARC700 can't handle unaligned Data accesses. */ - -#include -#include - -#ifdef CONFIG_ARC_EMUL_UNALIGNED -int misaligned_fixup(unsigned long address, struct pt_regs *regs, - struct callee_regs *cregs); -#else -static inline int -misaligned_fixup(unsigned long address, struct pt_regs *regs, - struct callee_regs *cregs) -{ - /* Not fixed */ - return 1; -} -#endif - -#endif /* _ASM_ARC_UNALIGNED_H */ diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index a19751e824fb4c..8d2ea2cbd98b0f 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -18,8 +18,9 @@ #include #include #include -#include +#include #include +#include "unaligned.h" void die(const char *str, struct pt_regs *regs, unsigned long address) { diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 99a9b92ed98d62..d2f5ceaaed1b4a 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -12,6 +12,7 @@ #include #include #include +#include "unaligned.h" #ifdef CONFIG_CPU_BIG_ENDIAN #define BE 1 diff --git a/arch/arc/kernel/unaligned.h b/arch/arc/kernel/unaligned.h new file mode 100644 index 00000000000000..5244453bb85f7f --- /dev/null +++ b/arch/arc/kernel/unaligned.h @@ -0,0 +1,16 @@ +struct pt_regs; +struct callee_regs; + +#ifdef CONFIG_ARC_EMUL_UNALIGNED +int misaligned_fixup(unsigned long address, struct pt_regs *regs, + struct callee_regs *cregs); +#else +static inline int +misaligned_fixup(unsigned long address, struct pt_regs *regs, + struct callee_regs *cregs) +{ + /* Not fixed */ + return 1; +} +#endif + diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 9270d0a713c316..d8969dab12d42d 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include extern char __start_unwind[], __end_unwind[]; diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 69a91529715599..2185afe8d59f7a 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -23,7 +23,8 @@ */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 173159e93c99c4..749179a1d16297 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -10,6 +10,7 @@ config ARM select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DMA_ALLOC if MMU + select ARCH_HAS_DMA_OPS select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE @@ -54,7 +55,6 @@ config ARM select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DMA_DECLARE_COHERENT select DMA_GLOBAL_POOL if !MMU - select DMA_OPS select DMA_NONCOHERENT_MMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB @@ -64,6 +64,7 @@ config ARM select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IRQ_IPI if SMP select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_DEVICES select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP select GENERIC_IRQ_MULTI_HANDLER diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 71afdd98ddf27f..aafebf145738ab 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -183,7 +183,6 @@ machine-$(CONFIG_ARCH_CLPS711X) += clps711x machine-$(CONFIG_ARCH_DAVINCI) += davinci machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor machine-$(CONFIG_ARCH_DOVE) += dove -machine-$(CONFIG_ARCH_EP93XX) += ep93xx machine-$(CONFIG_ARCH_EXYNOS) += exynos machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge machine-$(CONFIG_ARCH_GEMINI) += gemini diff --git a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts index 3da47349eaaf75..49890eb12781f4 100644 --- a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts +++ b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts @@ -34,8 +34,6 @@ emmc_pwrseq: emmc-pwrseq { gpio-keys { compatible = "gpio-keys-polled"; - #address-cells = <1>; - #size-cells = <0>; poll-interval = <100>; pal-switch { diff --git a/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi index 26783d053ac7fd..40f7515aa068db 100644 --- a/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi +++ b/arch/arm/boot/dts/arm/arm-realview-eb-mp.dtsi @@ -103,7 +103,7 @@ twd_wdog: watchdog@1f000620 { }; /* PMU with one IRQ line per core */ - pmu: pmu@0 { + pmu: pmu { compatible = "arm,arm11mpcore-pmu"; interrupt-parent = <&intc>; interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts index ce35748f3d25d0..db1b6793cd2c62 100644 --- a/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm/arm-realview-pb11mp.dts @@ -92,7 +92,7 @@ intc_tc11mp: interrupt-controller@1f000100 { <0x1f000100 0x100>; }; - L2: cache-controller { + L2: cache-controller@1f002000 { compatible = "arm,l220-cache"; reg = <0x1f002000 0x1000>; interrupt-parent = <&intc_tc11mp>; diff --git a/arch/arm/boot/dts/arm/arm-realview-pba8.dts b/arch/arm/boot/dts/arm/arm-realview-pba8.dts index d3238c252b5977..d2e0082245f9b0 100644 --- a/arch/arm/boot/dts/arm/arm-realview-pba8.dts +++ b/arch/arm/boot/dts/arm/arm-realview-pba8.dts @@ -40,7 +40,7 @@ cpu0: cpu@0 { }; }; - pmu: pmu@0 { + pmu: pmu { compatible = "arm,cortex-a8-pmu"; interrupt-parent = <&intc>; interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts index 85d3968fbb9171..507ad7ac497416 100644 --- a/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts +++ b/arch/arm/boot/dts/arm/arm-realview-pbx-a9.dts @@ -97,7 +97,7 @@ twd_wdog: watchdog@1f000620 { interrupts = <1 14 0xf04>; }; - pmu: pmu@0 { + pmu: pmu { compatible = "arm,cortex-a9-pmu"; interrupt-parent = <&intc>; interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm/boot/dts/aspeed/Makefile b/arch/arm/boot/dts/aspeed/Makefile index e51c6d2037255a..c4f064e4b0738e 100644 --- a/arch/arm/boot/dts/aspeed/Makefile +++ b/arch/arm/boot/dts/aspeed/Makefile @@ -17,6 +17,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-bytedance-g220a.dtb \ aspeed-bmc-delta-ahe50dc.dtb \ aspeed-bmc-facebook-bletchley.dtb \ + aspeed-bmc-facebook-catalina.dtb \ aspeed-bmc-facebook-cmm.dtb \ aspeed-bmc-facebook-elbert.dtb \ aspeed-bmc-facebook-fuji.dtb \ @@ -32,8 +33,10 @@ dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-bmc-facebook-yamp.dtb \ aspeed-bmc-facebook-yosemitev2.dtb \ aspeed-bmc-facebook-yosemite4.dtb \ + aspeed-bmc-ibm-blueridge.dtb \ aspeed-bmc-ibm-bonnell.dtb \ aspeed-bmc-ibm-everest.dtb \ + aspeed-bmc-ibm-fuji.dtb \ aspeed-bmc-ibm-rainier.dtb \ aspeed-bmc-ibm-rainier-1s4u.dtb \ aspeed-bmc-ibm-rainier-4u.dtb \ diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts index 8ab5f301f92680..31c5d319aa0a9f 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtjade.dts @@ -49,6 +49,11 @@ aliases { */ i2c80 = &nvme_m2_0; i2c81 = &nvme_m2_1; + + /* + * i2c bus 82 assigned to OCP slot + */ + i2c82 = &ocpslot; }; chosen { @@ -420,6 +425,17 @@ i2c-mux@70 { reg = <0x70>; i2c-mux-idle-disconnect; + ocpslot: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + + ocpslot_temp: temperature-sensor@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; + }; + nvmeslot_0_7: i2c@3 { #address-cells = <1>; #size-cells = <0>; @@ -672,10 +688,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &pwm_tacho { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts index 3c8925034a8c4d..0295f5adcfbc84 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ampere-mtmitchell.dts @@ -15,6 +15,32 @@ aliases { serial7 = &uart8; serial8 = &uart9; + /* + * I2C temperature alias port + */ + i2c20 = &i2c4_bus70_chn0; + i2c21 = &i2c4_bus70_chn1; + i2c22 = &i2c4_bus70_chn2; + i2c23 = &i2c4_bus70_chn3; + + /* + * i2c bus 30-31 assigned to OCP slot 0-1 + */ + i2c30 = &ocpslot_0; + i2c31 = &ocpslot_1; + + /* + * i2c bus 32-33 assigned to Riser slot 0-1 + */ + i2c32 = &i2c_riser0; + i2c33 = &i2c_riser1; + + /* + * i2c bus 38-39 assigned to FRU on Riser slot 0-1 + */ + i2c38 = &i2c_riser0_chn_0; + i2c39 = &i2c_riser1_chn_0; + /* * I2C NVMe alias port */ @@ -87,6 +113,37 @@ vga_memory: region@bf000000 { }; }; + leds { + compatible = "gpio-leds"; + /* + * Use gpio-leds to configure GPIOW5 (bmc-ready) pin to be reseted when + * watchdog timeout. + */ + led-bmc-ready { + gpios = <&gpio0 ASPEED_GPIO(W, 5) (GPIO_ACTIVE_HIGH | GPIO_TRANSITORY)>; + }; + + led-sw-heartbeat { + gpios = <&gpio0 ASPEED_GPIO(N, 3) GPIO_ACTIVE_HIGH>; + }; + + led-identify { + gpios = <&gpio0 ASPEED_GPIO(S, 3) GPIO_ACTIVE_HIGH>; + }; + + led-fault { + gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_HIGH>; + }; + + led-fan-fault { + gpios = <&gpio_expander1 0 GPIO_ACTIVE_HIGH>; + }; + + led-psu-fault { + gpios = <&gpio_expander1 1 GPIO_ACTIVE_HIGH>; + }; + }; + voltage_mon_reg: voltage-mon-regulator { compatible = "regulator-fixed"; regulator-name = "ltc2497_reg"; @@ -515,6 +572,80 @@ i2c-mux@70 { #size-cells = <0>; reg = <0x70>; i2c-mux-idle-disconnect; + + ocpslot_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + + ocpslot_0_temp: temperature-sensor@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; + }; + + ocpslot_1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + + ocpslot_1_temp: temperature-sensor@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; + }; + + i2c_riser0: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + + i2c-mux@72 { + compatible = "nxp,pca9546"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x72>; + i2c-mux-idle-disconnect; + + i2c_riser0_chn_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + + eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + pagesize = <16>; + }; + }; + }; + }; + + i2c_riser1: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x3>; + + i2c-mux@72 { + compatible = "nxp,pca9546"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x72>; + i2c-mux-idle-disconnect; + + i2c_riser1_chn_0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + + eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + pagesize = <16>; + }; + }; + }; + }; }; }; @@ -790,6 +921,10 @@ nvme_m2_1: i2c@1 { }; }; +&i2c10 { + status = "okay"; +}; + &i2c11 { status = "okay"; ssif-bmc@10 { @@ -812,6 +947,25 @@ bmc_ast2600_cpu: temperature-sensor@35 { }; }; +&i2c15 { + status = "okay"; + gpio_expander1: gpio-expander@22 { + compatible = "nxp,pca9535"; + reg = <0x22>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = + "fan-fault","psu-fault", + "","", + "","", + "","", + "","", + "","", + "","", + "",""; + }; +}; + &adc0 { status = "okay"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts index 7c6af7f226e75c..29c68c37e7f5aa 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-arm-stardragon4800-rep2.dts @@ -200,10 +200,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &gpio { pin_gpio_c7 { gpio-hog; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts index 555485871e7a7d..c4097e4f2ca4ee 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-spc621d8hm3.dts @@ -110,11 +110,15 @@ eeprom@50 { compatible = "st,24c128", "atmel,24c128"; reg = <0x50>; pagesize = <16>; - #address-cells = <1>; - #size-cells = <1>; - eth0_macaddress: macaddress@3f80 { - reg = <0x3f80 6>; + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + eth0_macaddress: macaddress@3f80 { + reg = <0x3f80 6>; + }; }; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts index 8dee4faa9e07dc..0943e0bf1305ae 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-asrock-x570d4u.dts @@ -254,10 +254,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &vhub { status = "okay"; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts new file mode 100644 index 00000000000000..82835e96317d19 --- /dev/null +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-catalina.dts @@ -0,0 +1,1110 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021 Facebook Inc. +/dts-v1/; + +#include "aspeed-g6.dtsi" +#include +#include +#include +#include +#include + +/ { + model = "Facebook Catalina BMC"; + compatible = "facebook,catalina-bmc", "aspeed,ast2600"; + + aliases { + serial0 = &uart1; + serial2 = &uart3; + serial3 = &uart4; + serial4 = &uart5; + i2c16 = &i2c1mux0ch0; + i2c17 = &i2c1mux0ch1; + i2c18 = &i2c1mux0ch2; + i2c19 = &i2c1mux0ch3; + i2c20 = &i2c1mux0ch4; + i2c21 = &i2c1mux0ch5; + i2c22 = &i2c1mux0ch6; + i2c23 = &i2c1mux0ch7; + i2c24 = &i2c0mux0ch0; + i2c25 = &i2c0mux0ch1; + i2c26 = &i2c0mux0ch2; + i2c27 = &i2c0mux0ch3; + i2c28 = &i2c0mux1ch0; + i2c29 = &i2c0mux1ch1; + i2c30 = &i2c0mux1ch2; + i2c31 = &i2c0mux1ch3; + i2c32 = &i2c0mux2ch0; + i2c33 = &i2c0mux2ch1; + i2c34 = &i2c0mux2ch2; + i2c35 = &i2c0mux2ch3; + i2c36 = &i2c0mux3ch0; + i2c37 = &i2c0mux3ch1; + i2c38 = &i2c0mux3ch2; + i2c39 = &i2c0mux3ch3; + i2c40 = &i2c0mux4ch0; + i2c41 = &i2c0mux4ch1; + i2c42 = &i2c0mux4ch2; + i2c43 = &i2c0mux4ch3; + i2c44 = &i2c0mux5ch0; + i2c45 = &i2c0mux5ch1; + i2c46 = &i2c0mux5ch2; + i2c47 = &i2c0mux5ch3; + i2c48 = &i2c30mux0ch0; + i2c49 = &i2c30mux0ch1; + i2c50 = &i2c30mux0ch2; + i2c51 = &i2c30mux0ch3; + i2c52 = &i2c30mux0ch4; + i2c53 = &i2c30mux0ch5; + i2c54 = &i2c30mux0ch6; + i2c55 = &i2c30mux0ch7; + }; + + chosen { + stdout-path = "serial4:57600n8"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x80000000>; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, + <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, + <&adc1 2>; + }; + + spi1_gpio: spi { + compatible = "spi-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; + miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>; + cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>; + num-chipselects = <1>; + + tpm@0 { + compatible = "infineon,slb9670", "tcg,tpm_tis-spi"; + spi-max-frequency = <33000000>; + reg = <0>; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-0 { + label = "bmc_heartbeat_amber"; + gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + + led-1 { + label = "fp_id_amber"; + default-state = "off"; + gpios = <&gpio0 ASPEED_GPIO(B, 5) GPIO_ACTIVE_HIGH>; + }; + + led-2 { + label = "bmc_ready_noled"; + gpios = <&gpio0 ASPEED_GPIO(B, 3) (GPIO_ACTIVE_HIGH|GPIO_TRANSITORY)>; + }; + + led-3 { + label = "bmc_ready_cpld_noled"; + gpios = <&gpio0 ASPEED_GPIO(P, 5) (GPIO_ACTIVE_HIGH|GPIO_TRANSITORY)>; + }; + }; + + p1v8_bmc_aux: regulator-p1v8-bmc-aux { + compatible = "regulator-fixed"; + regulator-name = "p1v8_bmc_aux"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + p2v5_bmc_aux: regulator-p2v5-bmc-aux { + compatible = "regulator-fixed"; + regulator-name = "p2v5_bmc_aux"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-always-on; + }; +}; + +&uart1 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&uart4 { + status = "okay"; +}; + +&uart5 { + status = "okay"; +}; + +&mac3 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ncsi4_default>; + use-ncsi; +}; + +&fmc { + status = "okay"; + flash@0 { + status = "okay"; + m25p,fast-read; + label = "bmc"; + spi-max-frequency = <50000000>; +#include "openbmc-flash-layout-128.dtsi" + }; + flash@1 { + status = "okay"; + m25p,fast-read; + label = "alt-bmc"; + spi-max-frequency = <50000000>; + }; +}; + +&i2c0 { + status = "okay"; + + i2c-mux@71 { + compatible = "nxp,pca9546"; + reg = <0x71>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux0ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux0ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c0mux0ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c0mux0ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + + i2c-mux@72 { + compatible = "nxp,pca9546"; + reg = <0x72>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux1ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux1ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + // IO Mezz 0 IOEXP + io_expander7: gpio@20 { + compatible = "nxp,pca9535"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + }; + + // IO Mezz 0 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + i2c0mux1ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + i2c-mux@70 { + compatible = "nxp,pca9548"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + + i2c30mux0ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c30mux0ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c30mux0ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c30mux0ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + i2c30mux0ch4: i2c@4 { + #address-cells = <1>; + #size-cells = <0>; + reg = <4>; + }; + i2c30mux0ch5: i2c@5 { + #address-cells = <1>; + #size-cells = <0>; + reg = <5>; + }; + i2c30mux0ch6: i2c@6 { + #address-cells = <1>; + #size-cells = <0>; + reg = <6>; + // HDD FRU EEPROM + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + i2c30mux0ch7: i2c@7 { + #address-cells = <1>; + #size-cells = <0>; + reg = <7>; + + power-sensor@40 { + compatible = "ti,ina230"; + reg = <0x40>; + shunt-resistor = <2000>; + }; + power-sensor@41 { + compatible = "ti,ina230"; + reg = <0x41>; + shunt-resistor = <2000>; + }; + power-sensor@44 { + compatible = "ti,ina230"; + reg = <0x44>; + shunt-resistor = <2000>; + }; + power-sensor@45 { + compatible = "ti,ina230"; + reg = <0x45>; + shunt-resistor = <2000>; + }; + }; + }; + }; + i2c0mux1ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + + i2c-mux@73 { + compatible = "nxp,pca9546"; + reg = <0x73>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux2ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux2ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c0mux2ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c0mux2ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + + i2c-mux@75 { + compatible = "nxp,pca9546"; + reg = <0x75>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux3ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux3ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c0mux3ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c0mux3ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + + i2c-mux@76 { + compatible = "nxp,pca9546"; + reg = <0x76>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux4ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux4ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + // IO Mezz 1 IOEXP + io_expander8: gpio@21 { + compatible = "nxp,pca9535"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + }; + + // IO Mezz 1 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + i2c0mux4ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c0mux4ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; + + i2c-mux@77 { + compatible = "nxp,pca9546"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c0mux5ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + }; + i2c0mux5ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + i2c0mux5ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + }; + i2c0mux5ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + }; + }; +}; + +&i2c1 { + status = "okay"; + i2c-mux@70 { + compatible = "nxp,pca9548"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x70>; + i2c-mux-idle-disconnect; + + i2c1mux0ch0: i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0>; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <500>; + }; + power-sensor@42 { + compatible = "ti,ina238"; + reg = <0x42>; + shunt-resistor = <500>; + }; + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <500>; + }; + }; + i2c1mux0ch1: i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1>; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + }; + power-sensor@43 { + compatible = "ti,ina238"; + reg = <0x43>; + }; + }; + i2c1mux0ch2: i2c@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x2>; + }; + i2c1mux0ch3: i2c@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x3>; + }; + i2c1mux0ch4: i2c@4 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x4>; + + power-monitor@42 { + compatible = "lltc,ltc4287"; + reg = <0x42>; + shunt-resistor-micro-ohms = <200>; + }; + power-monitor@43 { + compatible = "lltc,ltc4287"; + reg = <0x43>; + shunt-resistor-micro-ohms = <200>; + }; + }; + i2c1mux0ch5: i2c@5 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x5>; + + // PDB FRU EEPROM + eeprom@54 { + compatible = "atmel,24c64"; + reg = <0x54>; + }; + + // PDB TEMP SENSOR + temperature-sensor@4f { + compatible = "ti,tmp75"; + reg = <0x4f>; + }; + }; + i2c1mux0ch6: i2c@6 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x6>; + + // PDB IOEXP + io_expander5: gpio@27 { + compatible = "nxp,pca9554"; + reg = <0x27>; + gpio-controller; + #gpio-cells = <2>; + }; + + // OSFP IOEXP + io_expander6: gpio@25 { + compatible = "nxp,pca9555"; + reg = <0x25>; + gpio-controller; + #gpio-cells = <2>; + }; + + // OSFP FRU EEPROM + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + i2c1mux0ch7: i2c@7 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0x7>; + + // FIO FRU EEPROM + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + + // FIO TEMP SENSOR + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; + }; + }; +}; + +&i2c2 { + status = "okay"; + + // Module 0 IOEXP + io_expander0: gpio@20 { + compatible = "nxp,pca9555"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&gpio0>; + interrupts = ; + }; + + // Module 1 IOEXP + io_expander1: gpio@21 { + compatible = "nxp,pca9555"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&gpio0>; + interrupts = ; + }; + + // HMC IOEXP + io_expander2: gpio@27 { + compatible = "nxp,pca9555"; + reg = <0x27>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&gpio0>; + interrupts = ; + }; + + // Module 0 EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + // Module 1 EEPROM + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; +}; + +&i2c3 { + status = "okay"; +}; + +&i2c4 { + status = "okay"; +}; + +&i2c5 { + status = "okay"; +}; + +&i2c6 { + status = "okay"; + + // BMC IOEXP on Module 0 + io_expander3: gpio@21 { + compatible = "nxp,pca9555"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + }; + + rtc@6f { + compatible = "nuvoton,nct3018y"; + reg = <0x6f>; + }; +}; + +&i2c7 { + status = "okay"; +}; + +&i2c8 { + status = "okay"; +}; + +&i2c9 { + status = "okay"; + + // SCM CPLD IOEXP + io_expander4: gpio@4f { + compatible = "nxp,pca9555"; + reg = <0x4f>; + gpio-controller; + #gpio-cells = <2>; + }; + + // SCM TEMP SENSOR + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; + + // SCM FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + // BSM FRU EEPROM + eeprom@56 { + compatible = "atmel,24c64"; + reg = <0x56>; + }; +}; + +&i2c10 { + status = "okay"; + + // OCP NIC0 TEMP + temperature-sensor@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; + + // OCP NIC0 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; +}; + +&i2c11 { + status = "okay"; + + ssif-bmc@10 { + compatible = "ssif-bmc"; + reg = <0x10>; + }; +}; + +&i2c12 { + status = "okay"; + + // Module 1 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; +}; + +&i2c13 { + status = "okay"; + + // Module 0 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + // Left CBC FRU EEPROM + eeprom@54 { + compatible = "atmel,24c02"; + reg = <0x54>; + }; + + // Right CBC FRU EEPROM + eeprom@55 { + compatible = "atmel,24c02"; + reg = <0x55>; + }; + + // HMC FRU EEPROM + eeprom@57 { + compatible = "atmel,24c02"; + reg = <0x57>; + }; +}; + +&i2c14 { + status = "okay"; + + // PDB CPLD IOEXP 0x10 + io_expander9: gpio@10 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x10>; + gpio-controller; + #gpio-cells = <2>; + }; + + // PDB CPLD IOEXP 0x11 + io_expander10: gpio@11 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x11>; + gpio-controller; + #gpio-cells = <2>; + }; + + // PDB CPLD IOEXP 0x12 + io_expander11: gpio@12 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x12>; + gpio-controller; + #gpio-cells = <2>; + }; + + // PDB CPLD IOEXP 0x13 + io_expander12: gpio@13 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x13>; + gpio-controller; + #gpio-cells = <2>; + }; + + // PDB CPLD IOEXP 0x14 + io_expander13: gpio@14 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x14>; + gpio-controller; + #gpio-cells = <2>; + }; + + // PDB CPLD IOEXP 0x15 + io_expander14: gpio@15 { + compatible = "nxp,pca9555"; + interrupt-parent = <&gpio0>; + interrupts = ; + reg = <0x15>; + gpio-controller; + #gpio-cells = <2>; + }; +}; + +&i2c15 { + status = "okay"; + + // OCP NIC1 TEMP + temperature-sensor@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; + + // OCP NIC1 FRU EEPROM + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; +}; + +&adc0 { + vref-supply = <&p1v8_bmc_aux>; + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default + &pinctrl_adc2_default &pinctrl_adc3_default + &pinctrl_adc4_default &pinctrl_adc5_default + &pinctrl_adc6_default &pinctrl_adc7_default>; +}; + +&adc1 { + vref-supply = <&p2v5_bmc_aux>; + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc10_default>; +}; + +&ehci0 { + status = "okay"; +}; + +&wdt1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdtrst1_default>; + aspeed,reset-type = "soc"; + aspeed,external-signal; + aspeed,ext-push-pull; + aspeed,ext-active-high; + aspeed,ext-pulse-duration = <256>; +}; + +&pinctrl { + pinctrl_ncsi3_default: ncsi3_default { + function = "RMII3"; + groups = "NCSI3"; + }; + + pinctrl_ncsi4_default: ncsi4_default { + function = "RMII4"; + groups = "NCSI4"; + }; +}; + +&gpio0 { + gpio-line-names = + /*A0-A7*/ "","","","","","","","", + /*B0-B7*/ "BATTERY_DETECT","PRSNT1_HPM_SCM_N", + "BMC_I2C1_FPGA_ALERT_L","BMC_READY", + "IOEXP_INT_L","FM_ID_LED", + "","", + /*C0-C7*/ "","","","", + "PMBUS_REQ_N","PSU_FW_UPDATE_REQ_N", + "","BMC_I2C_SSIF_ALERT_L", + /*D0-D7*/ "","","","","","","","", + /*E0-E7*/ "","","","","","","","", + /*F0-F7*/ "","","","","","","","", + /*G0-G7*/ "","","","","","", + "FM_DEBUG_PORT_PRSNT_N","FM_BMC_DBP_PRESENT_N", + /*H0-H7*/ "PWR_BRAKE_L","RUN_POWER_EN", + "SHDN_FORCE_L","SHDN_REQ_L", + "","","","", + /*I0-I7*/ "","","","", + "","FLASH_WP_STATUS", + "FM_PDB_HEALTH_N","RUN_POWER_PG", + /*J0-J7*/ "","","","","","","","", + /*K0-K7*/ "","","","","","","","", + /*L0-L7*/ "","","","","","","","", + /*M0-M7*/ "PCIE_EP_RST_EN","BMC_FRU_WP", + "SCM_HPM_STBY_RST_N","SCM_HPM_STBY_EN", + "STBY_POWER_PG_3V3","TH500_SHDN_OK_L","","", + /*N0-N7*/ "LED_POSTCODE_0","LED_POSTCODE_1", + "LED_POSTCODE_2","LED_POSTCODE_3", + "LED_POSTCODE_4","LED_POSTCODE_5", + "LED_POSTCODE_6","LED_POSTCODE_7", + /*O0-O7*/ "HMC_I2C3_FPGA_ALERT_L","FPGA_READY_HMC", + "CHASSIS_AC_LOSS_L","BSM_PRSNT_R_N", + "PSU_SMB_ALERT_L","FM_TPM_PRSNT_0_N", + "","USBDBG_IPMI_EN_L", + /*P0-P7*/ "PWR_BTN_BMC_N","IPEX_CABLE_PRSNT_L", + "ID_RST_BTN_BMC_N","RST_BMC_RSTBTN_OUT_N", + "host0-ready","BMC_READY_CPLD","","BMC_HEARTBEAT_N", + /*Q0-Q7*/ "IRQ_PCH_TPM_SPI_N","USB_OC0_REAR_R_N", + "UART_MUX_SEL","I2C_MUX_RESET_L", + "RSVD_NV_PLT_DETECT","SPI_TPM_INT_L", + "CPU_JTAG_MUX_SELECT","THERM_BB_OVERT_L", + /*R0-R7*/ "THERM_BB_WARN_L","SPI_BMC_FPGA_INT_L", + "CPU_BOOT_DONE","PMBUS_GNT_L", + "CHASSIS_PWR_BRK_L","PCIE_WAKE_L", + "PDB_THERM_OVERT_L","HMC_I2C2_FPGA_ALERT_L", + /*S0-S7*/ "","","SYS_BMC_PWRBTN_R_N","FM_TPM_PRSNT_1_N", + "FM_BMC_DEBUG_SW_N","UID_LED_N", + "SYS_FAULT_LED_N","RUN_POWER_FAULT_L", + /*T0-T7*/ "","","","","","","","", + /*U0-U7*/ "","","","","","","","", + /*V0-V7*/ "L2_RST_REQ_OUT_L","L0L1_RST_REQ_OUT_L", + "BMC_ID_BEEP_SEL","BMC_I2C0_FPGA_ALERT_L", + "SMB_BMC_TMP_ALERT","PWR_LED_N", + "SYS_RST_OUT_L","IRQ_TPM_SPI_N", + /*W0-W7*/ "","","","","","","","", + /*X0-X7*/ "","","","","","","","", + /*Y0-Y7*/ "","RST_BMC_SELF_HW", + "FM_FLASH_LATCH_N","BMC_EMMC_RST_N", + "","","","", + /*Z0-Z7*/ "","","","","","","",""; +}; + +&io_expander0 { + gpio-line-names = + "FPGA_THERM_OVERT_L","FPGA_READY_BMC", + "HMC_BMC_DETECT","HMC_PGOOD", + "","BMC_SELF_PWR_CYCLE", + "FPGA_EROT_FATAL_ERROR_L","WP_HW_EXT_CTRL_L", + "EROT_FPGA_RST_L","FPGA_EROT_RECOVERY_L", + "BMC_EROT_FPGA_SPI_MUX_SEL","USB2_HUB_RESET_L", + "NCSI_CS1_SEL","SGPIO_EN_L", + "B2B_IOEXP_INT_L","I2C_BUS_MUX_RESET_L"; +}; + +&io_expander1 { + gpio-line-names = + "SEC_FPGA_THERM_OVERT_L","SEC_FPGA_READY_BMC", + "","", + "","", + "SEC_FPGA_EROT_FATAL_ERROR_L","SEC_WP_HW_EXT_CTRL_L", + "SEC_EROT_FPGA_RST_L","SEC_FPGA_EROT_RECOVERY_L", + "SEC_BMC_EROT_FPGA_SPI_MUX_SEL","", + "","", + "","SEC_I2C_BUS_MUX_RESET_L"; +}; + +&io_expander2 { + gpio-line-names = + "HMC_PRSNT_L","HMC_READY", + "HMC_EROT_FATAL_ERROR_L","I2C_MUX_SEL", + "HMC_EROT_SPI_MUX_SEL","HMC_EROT_RECOVERY_L", + "HMC_EROT_RST_L","GLOBAL_WP_HMC", + "FPGA_RST_L","USB2_HUB_RST", + "CPU_UART_MUX_SEL","", + "","","",""; +}; + +&io_expander3 { + gpio-line-names = + "RTC_MUX_SEL","PCI_MUX_SEL","TPM_MUX_SEL","FAN_MUX-SEL", + "SGMII_MUX_SEL","DP_MUX_SEL","UPHY3_USB_SEL","NCSI_MUX_SEL", + "BMC_PHY_RST","RTC_CLR_L","BMC_12V_CTRL","PS_RUN_IO0_PG", + "","","",""; +}; + +&io_expander4 { + gpio-line-names = + "stby_power_en_cpld","stby_power_gd_cpld","","", + "","","","", + "","","","", + "","","",""; +}; + +&io_expander5 { + gpio-line-names = + "JTAG_MUX_SEL","IOX_BMC_RESET","","", + "","","",""; +}; + +&io_expander6 { + gpio-line-names = + "OSFP_PHASE_ID0","OSFP_PHASE_ID1", + "OSFP_PHASE_ID2","OSFP_PHASE_ID3", + "","","","", + "OSFP_BOARD_ID0","OSFP_BOARD_ID1", + "OSFP_BOARD_ID2","PWRGD_P3V3_N1", + "PWRGD_P3V3_N2","","",""; +}; + +&io_expander7 { + gpio-line-names = + "RST_CX7_0","RST_CX7_1", + "CX0_SSD0_PRSNT_L","CX1_SSD1_PRSNT_L", + "CX_BOOT_CMPLT_CX0","CX_BOOT_CMPLT_CX1", + "CX_TWARN_CX0_L","CX_TWARN_CX1_L", + "CX_OVT_SHDN_CX0","CX_OVT_SHDN_CX1", + "FNP_L_CX0","FNP_L_CX1", + "","MCU_GPIO","MCU_RST_N","MCU_RECOVERY_N"; +}; + +&io_expander8 { + gpio-line-names = + "SEC_RST_CX7_0","SEC_RST_CX7_1", + "SEC_CX0_SSD0_PRSNT_L","SEC_CX1_SSD1_PRSNT_L", + "SEC_CX_BOOT_CMPLT_CX0","SEC_CX_BOOT_CMPLT_CX1", + "SEC_CX_TWARN_CX0_L","SEC_CX_TWARN_CX1_L", + "SEC_CX_OVT_SHDN_CX0","SEC_CX_OVT_SHDN_CX1", + "SEC_FNP_L_CX0","SEC_FNP_L_CX1", + "","SEC_MCU_GPIO","SEC_MCU_RST_N","SEC_MCU_RECOVERY_N"; +}; + +&io_expander9 { + gpio-line-names = + "LEAK3_DETECT_R","LEAK1_DETECT_R", + "LEAK2_DETECT_R","LEAK0_DETECT_R", + "CHASSIS3_LEAK_Q_N_PLD","CHASSIS1_LEAK_Q_N_PLD", + "CHASSIS2_LEAK_Q_N_PLD","CHASSIS0_LEAK_Q_N_PLD", + "P12V_AUX_FAN_ALERT_PLD_N","P12V_AUX_FAN_OC_PLD_N", + "P12V_AUX_FAN_FAULT_PLD_N","LEAK_DETECT_RMC_N_R", + "RSVD_RMC_GPIO3_R","SMB_RJ45_FIO_TMP_ALERT", + "",""; +}; + +&io_expander10 { + gpio-line-names = + "FM_P12V_NIC1_FLTB_R_N","FM_P3V3_NIC1_FAULT_R_N", + "OCP_V3_2_PWRBRK_FROM_HOST_ISO_PLD_N", + "P12V_AUX_NIC1_SENSE_ALERT_R_N", + "FM_P12V_NIC0_FLTB_R_N","FM_P3V3_NIC0_FAULT_R_N", + "OCP_SFF_PWRBRK_FROM_HOST_ISO_PLD_N", + "P12V_AUX_NIC0_SENSE_ALERT_R_N", + "P12V_AUX_PSU_SMB_ALERT_R_L","P12V_SCM_SENSE_ALERT_R_N", + "NODEB_PSU_SMB_ALERT_R_L","NODEA_PSU_SMB_ALERT_R_L", + "P52V_SENSE_ALERT_PLD_N","P48V_HS2_FAULT_N_PLD", + "P48V_HS1_FAULT_N_PLD",""; +}; + +&io_expander11 { + gpio-line-names = + "FAN_7_PRESENT_N","FAN_6_PRESENT_N", + "FAN_5_PRESENT_N","FAN_4_PRESENT_N", + "FAN_3_PRESENT_N","FAN_2_PRESENT_N", + "FAN_1_PRESENT_N","FAN_0_PRESENT_N", + "PRSNT_CHASSIS3_LEAK_CABLE_R_N","PRSNT_CHASSIS1_LEAK_CABLE_R_N", + "PRSNT_CHASSIS2_LEAK_CABLE_R_N","PRSNT_CHASSIS0_LEAK_CABLE_R_N", + "PRSNT_RJ45_FIO_N_R","PRSNT_HDDBD_POWER_CABLE_N", + "PRSNT_OSFP_POWER_CABLE_N",""; +}; + +&io_expander12 { + gpio-line-names = + "RST_OCP_V3_1_R_N","NIC0_PERST_N", + "OCP_SFF_PERST_FROM_HOST_ISO_PLD_N","OCP_SFF_MAIN_PWR_EN", + "FM_OCP_SFF_PWR_GOOD_PLD","OCP_SFF_AUX_PWR_PLD_EN_R", + "HP_LVC3_OCP_V3_1_PWRGD_PLD","HP_OCP_V3_1_HSC_PWRGD_PLD_R", + "RST_OCP_V3_2_R_N","NIC1_PERST_N", + "OCP_V3_2_PERST_FROM_HOST_ISO_PLD_N","OCP_V3_2_MAIN_PWR_EN", + "FM_OCP_V3_2_PWR_GOOD_PLD","OCP_V3_2_AUX_PWR_PLD_EN_R", + "HP_LVC3_OCP_V3_2_PWRGD_PLD","HP_OCP_V3_2_HSC_PWRGD_PLD_R"; +}; + +&io_expander13 { + gpio-line-names = + "NODEA_NODEB_PWOK_PLD_ISO_R","PWR_EN_NICS", + "PWRGD_P12V_AUX_FAN_PLD","P12V_AUX_FAN_EN_PLD", + "PWRGD_P3V3_AUX_PLD","PWRGD_P12V_AUX_PLD_ISO_R", + "FM_MAIN_PWREN_FROM_RMC_R","FM_MAIN_PWREN_RMC_EN_ISO_R", + "PWRGD_RMC_R","PWRGD_P12V_AUX_FAN_PLD", + "P12V_AUX_FAN_EN_PLD","FM_SYS_THROTTLE_N", + "HP_LVC3_OCP_V3_2_PRSNT2_PLD_N","HP_LVC3_OCP_V3_1_PRSNT2_PLD_N", + "",""; +}; + +&io_expander14 { + gpio-line-names = + "","","","","","","","", + "FM_BOARD_BMC_SKU_ID3","FM_BOARD_BMC_SKU_ID2", + "FM_BOARD_BMC_SKU_ID1","FM_BOARD_BMC_SKU_ID0", + "FAB_BMC_REV_ID2","FAB_BMC_REV_ID1", + "FAB_BMC_REV_ID0",""; +}; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts index 998598c15fd084..49914a4a179fa2 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-greatlakes.dts @@ -201,7 +201,7 @@ eeprom@54 { &i2c12 { status = "okay"; temperature-sensor@4f { - compatible = "lm75"; + compatible = "national,lm75"; reg = <0x4f>; }; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts index c118d473a76fb4..cf3f807a38fe26 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-harma.dts @@ -20,10 +20,6 @@ aliases { i2c21 = &imux21; i2c22 = &imux22; i2c23 = &imux23; - i2c24 = &imux24; - i2c25 = &imux25; - i2c26 = &imux26; - i2c27 = &imux27; i2c28 = &imux28; i2c29 = &imux29; i2c30 = &imux30; @@ -70,19 +66,19 @@ led-2 { }; }; - spi_gpio: spi-gpio { + spi_gpio: spi { status = "okay"; compatible = "spi-gpio"; #address-cells = <1>; #size-cells = <0>; - gpio-sck = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>; - gpio-mosi = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; - gpio-miso = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>; + sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; + miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>; num-chipselects = <1>; cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>; - tpmdev@0 { + tpm@0 { compatible = "infineon,slb9670", "tcg,tpm_tis-spi"; spi-max-frequency = <33000000>; reg = <0>; @@ -137,7 +133,6 @@ &mac3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rmii4_default>; use-ncsi; - mellanox,multi-host; }; &rtc { @@ -198,6 +193,35 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; &i2c1 { @@ -224,6 +248,35 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; &i2c3 { @@ -276,11 +329,15 @@ temperature-sensor@49 { reg = <0x49>; }; - power-monitor@22 { - compatible = "lltc,ltc4286"; - reg = <0x22>; - adi,vrange-low-enable; - shunt-resistor-micro-ohms = <500>; + power-monitor@44 { + compatible = "lltc,ltc4287"; + reg = <0x44>; + shunt-resistor-micro-ohms = <250>; + }; + + power-monitor@40 { + compatible = "infineon,xdp710"; + reg = <0x40>; }; }; @@ -321,6 +378,14 @@ &i2c8 { &i2c9 { status = "okay"; + mctp-controller; + multi-master; + + mctp@10 { + compatible = "mctp-i2c-controller"; + reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; + }; + gpio@30 { compatible = "nxp,pca9555"; reg = <0x30>; @@ -340,33 +405,6 @@ gpio@31 { "","","",""; }; - i2c-mux@71 { - compatible = "nxp,pca9546"; - reg = <0x71>; - #address-cells = <1>; - #size-cells = <0>; - - imux24: i2c@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - }; - imux25: i2c@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - }; - imux26: i2c@2 { - #address-cells = <1>; - #size-cells = <0>; - reg = <2>; - }; - imux27: i2c@3 { - #address-cells = <1>; - #size-cells = <0>; - reg = <3>; - }; - }; // PTTV FRU eeprom@52 { compatible = "atmel,24c64"; @@ -376,6 +414,31 @@ eeprom@52 { &i2c11 { status = "okay"; + + gpio@30 { + compatible = "nxp,pca9555"; + reg = <0x30>; + gpio-controller; + #gpio-cells = <2>; + }; + gpio@31 { + compatible = "nxp,pca9555"; + reg = <0x31>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "","","","", + "","","presence-cmm","", + "","","","", + "","","",""; + }; + + // Aegis FRU + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; }; &i2c12 { @@ -399,6 +462,30 @@ imux28: i2c@0 { #address-cells = <1>; #size-cells = <0>; reg = <0>; + power-monitor@61 { + compatible = "isil,isl69260"; + reg = <0x61>; + }; + power-monitor@62 { + compatible = "isil,isl69260"; + reg = <0x62>; + }; + power-monitor@63 { + compatible = "isil,isl69260"; + reg = <0x63>; + }; + power-monitor@64 { + compatible = "infineon,xdpe152c4"; + reg = <0x64>; + }; + power-monitor@66 { + compatible = "infineon,xdpe152c4"; + reg = <0x66>; + }; + power-monitor@68 { + compatible = "infineon,xdpe152c4"; + reg = <0x68>; + }; }; imux29: i2c@1 { #address-cells = <1>; @@ -497,13 +584,14 @@ &gpio0 { /*O0-O7*/ "","","","","","","","", /*P0-P7*/ "power-button","power-host-control", "reset-button","","led-power","","","", - /*Q0-Q7*/ "","","","","","","","", + /*Q0-Q7*/ "","","","","","power-chassis-control","","", /*R0-R7*/ "","","","","","","","", /*S0-S7*/ "","","","","","","","", /*T0-T7*/ "","","","","","","","", /*U0-U7*/ "","","","","","","led-identify-gate","", /*V0-V7*/ "","","","", - "rtc-battery-voltage-read-enable","","","", + "rtc-battery-voltage-read-enable","", + "power-chassis-good","", /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","","","","","", /*Y0-Y7*/ "","","","","","","","", @@ -521,7 +609,6 @@ &gpio1 { &sgpiom0 { status = "okay"; - max-ngpios = <128>; ngpios = <128>; bus-frequency = <2000000>; gpio-line-names = diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts index 942e53d5c71409..41e2246cfbd1ca 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-minerva.dts @@ -11,7 +11,8 @@ / { compatible = "facebook,minerva-cmc", "aspeed,ast2600"; aliases { - serial5 = &uart5; + serial4 = &uart5; + serial5 = &uart6; /* * PCA9548 (2-0077) provides 8 channels connecting to * 6 pcs of FCB (Fan Controller Board). @@ -22,6 +23,8 @@ aliases { i2c19 = &imux19; i2c20 = &imux20; i2c21 = &imux21; + + spi1 = &spi_gpio; }; chosen { @@ -43,11 +46,54 @@ iio-hwmon { leds { compatible = "gpio-leds"; - led-fan-fault { - label = "led-fan-fault"; + led-0 { + label = "bmc_heartbeat_amber"; + gpios = <&gpio0 ASPEED_GPIO(P, 7) GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + + led-1 { + label = "fp_id_amber"; + default-state = "off"; + gpios = <&gpio0 ASPEED_GPIO(B, 5) GPIO_ACTIVE_HIGH>; + }; + + led-2 { + label = "power_blue"; + default-state = "off"; + gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_HIGH>; + }; + + led-3 { + label = "fan_status_led"; gpios = <&leds_gpio 9 GPIO_ACTIVE_HIGH>; default-state = "off"; }; + + led-4 { + label = "fan_fault_led_n"; + gpios = <&leds_gpio 10 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + }; + + spi_gpio: spi { + status = "okay"; + compatible = "spi-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + sck-gpios = <&gpio0 ASPEED_GPIO(Z, 3) GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio0 ASPEED_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; + miso-gpios = <&gpio0 ASPEED_GPIO(Z, 5) GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; + cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>; + + tpm@0 { + compatible = "infineon,slb9670", "tcg,tpm_tis-spi"; + spi-max-frequency = <33000000>; + reg = <0>; + }; }; }; @@ -77,6 +123,10 @@ fixed-link { }; }; +&mdio3 { + status = "okay"; +}; + &fmc { status = "okay"; flash@0 { @@ -94,10 +144,6 @@ flash@1 { }; }; -&rtc { - status = "okay"; -}; - &sgpiom0 { status = "okay"; ngpios = <128>; @@ -119,14 +165,15 @@ power-monitor@41 { shunt-resistor = <1000>; }; - power-monitor@67 { - compatible = "adi,ltc2945"; - reg = <0x67>; + power-monitor@44 { + compatible = "lltc,ltc4287"; + reg = <0x44>; + shunt-resistor-micro-ohms = <2000>; }; - power-monitor@68 { - compatible = "adi,ltc2945"; - reg = <0x68>; + power-monitor@43 { + compatible = "infineon,xdp710"; + reg = <0x43>; }; leds_gpio: gpio@19 { @@ -145,9 +192,9 @@ temperature-sensor@4b { reg = <0x4b>; }; - temperature-sensor@48 { + temperature-sensor@4f { compatible = "ti,tmp75"; - reg = <0x48>; + reg = <0x4f>; }; eeprom@54 { @@ -182,6 +229,35 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; imux17: i2c@1 { @@ -200,6 +276,35 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; imux18: i2c@2 { @@ -218,6 +323,35 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; imux19: i2c@3 { @@ -236,9 +370,38 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; - imux20: i2c@4 { + imux20: i2c@5 { #address-cells = <1>; #size-cells = <0>; reg = <4>; @@ -254,9 +417,37 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; - imux21: i2c@5 { + imux21: i2c@4 { #address-cells = <1>; #size-cells = <0>; reg = <5>; @@ -272,6 +463,34 @@ pwm@5e{ #address-cells = <1>; #size-cells = <0>; }; + + power-sensor@40 { + compatible = "ti,ina238"; + reg = <0x40>; + shunt-resistor = <1000>; + }; + + power-sensor@41 { + compatible = "ti,ina238"; + reg = <0x41>; + shunt-resistor = <1000>; + }; + + power-sensor@44 { + compatible = "ti,ina238"; + reg = <0x44>; + shunt-resistor = <1000>; + }; + + power-sensor@45 { + compatible = "ti,ina238"; + reg = <0x45>; + shunt-resistor = <1000>; + }; + temperature-sensor@4b { + compatible = "ti,tmp75"; + reg = <0x4b>; + }; }; }; }; @@ -302,14 +521,16 @@ &i2c8 { &i2c9 { status = "okay"; -}; -&i2c10 { - status = "okay"; -}; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; -&i2c11 { - status = "okay"; + rtc@51 { + compatible = "nxp,pcf8563"; + reg = <0x51>; + }; }; &i2c12 { @@ -338,6 +559,11 @@ eeprom@50 { compatible = "atmel,24c128"; reg = <0x50>; }; + + eeprom@56 { + compatible = "atmel,24c64"; + reg = <0x56>; + }; }; &adc0 { @@ -355,6 +581,10 @@ &adc1 { pinctrl-0 = <&pinctrl_adc10_default>; }; +&ehci0 { + status = "okay"; +}; + &ehci1 { status = "okay"; }; @@ -381,12 +611,12 @@ &gpio0 { /*N0-N7*/ "","","","","","","","", /*O0-O7*/ "","","","","","","","", /*P0-P7*/ "","","","","","","","", - /*Q0-Q7*/ "","","","","","","","", + /*Q0-Q7*/ "","","","","","power-chassis-control","","", /*R0-R7*/ "","","","","","","","", - /*S0-S7*/ "","","","","","","","", + /*S0-S7*/ "","","","","","","","host0-ready", /*T0-T7*/ "","","","","","","","", /*U0-U7*/ "","","","","","","","", - /*V0-V7*/ "","","","","BAT_DETECT","","","", + /*V0-V7*/ "","","","","BAT_DETECT","","power-chassis-good","", /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","BLADE_UART_SEL3","","","","","", /*Y0-Y7*/ "","","","","","","","", @@ -397,118 +627,118 @@ &sgpiom0 { gpio-line-names = /*"input pin","output pin"*/ /*A0 - A7*/ - "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN", - "PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN", - "PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN", - "PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN", - "PRSNT_MTIA_BLADE4_N","PWREN_MTIA_BLADE4_EN", - "PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN", - "PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN", - "PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN", + "PRSNT_MTIA_BLADE0_N","PWREN_MTIA_BLADE0_EN_N", + "PRSNT_MTIA_BLADE1_N","PWREN_MTIA_BLADE1_EN_N", + "PRSNT_MTIA_BLADE2_N","PWREN_MTIA_BLADE2_EN_N", + "PRSNT_MTIA_BLADE3_N","PWREN_MTIA_BLADE3_EN_N", + "PRSNT_MTIA_BLADE4_N","PWREN_MTIA_BLADE4_EN_N", + "PRSNT_MTIA_BLADE5_N","PWREN_MTIA_BLADE5_EN_N", + "PRSNT_MTIA_BLADE6_N","PWREN_MTIA_BLADE6_EN_N", + "PRSNT_MTIA_BLADE7_N","PWREN_MTIA_BLADE7_EN_N", /*B0 - B7*/ - "PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN", - "PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN", - "PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN", - "PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN", - "PRSNT_MTIA_BLADE12_N","PWREN_MTIA_BLADE12_EN", - "PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN", - "PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN", - "PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN", + "PRSNT_MTIA_BLADE8_N","PWREN_MTIA_BLADE8_EN_N", + "PRSNT_MTIA_BLADE9_N","PWREN_MTIA_BLADE9_EN_N", + "PRSNT_MTIA_BLADE10_N","PWREN_MTIA_BLADE10_EN_N", + "PRSNT_MTIA_BLADE11_N","PWREN_MTIA_BLADE11_EN_N", + "PRSNT_MTIA_BLADE12_N","PWREN_MTIA_BLADE12_EN_N", + "PRSNT_MTIA_BLADE13_N","PWREN_MTIA_BLADE13_EN_N", + "PRSNT_MTIA_BLADE14_N","PWREN_MTIA_BLADE14_EN_N", + "PRSNT_MTIA_BLADE15_N","PWREN_MTIA_BLADE15_EN_N", /*C0 - C7*/ - "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN", - "PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN", - "PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN", - "PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN", - "PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN", - "PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN", - "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN", - "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN", + "PRSNT_NW_BLADE0_N","PWREN_NW_BLADE0_EN_N", + "PRSNT_NW_BLADE1_N","PWREN_NW_BLADE1_EN_N", + "PRSNT_NW_BLADE2_N","PWREN_NW_BLADE2_EN_N", + "PRSNT_NW_BLADE3_N","PWREN_NW_BLADE3_EN_N", + "PRSNT_NW_BLADE4_N","PWREN_NW_BLADE4_EN_N", + "PRSNT_NW_BLADE5_N","PWREN_NW_BLADE5_EN_N", + "PRSNT_FCB_TOP_0_N","PWREN_MTIA_BLADE0_HSC_EN_N", + "PRSNT_FCB_TOP_1_N","PWREN_MTIA_BLADE1_HSC_EN_N", /*D0 - D7*/ - "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN", - "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN", - "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE4_HSC_EN", - "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE5_HSC_EN", - "PWRGD_MTIA_BLADE0_PWROK_L_BUF","PWREN_MTIA_BLADE6_HSC_EN", - "PWRGD_MTIA_BLADE1_PWROK_L_BUF","PWREN_MTIA_BLADE7_HSC_EN", - "PWRGD_MTIA_BLADE2_PWROK_L_BUF","PWREN_MTIA_BLADE8_HSC_EN", - "PWRGD_MTIA_BLADE3_PWROK_L_BUF","PWREN_MTIA_BLADE9_HSC_EN", + "PRSNT_FCB_MIDDLE_0_N","PWREN_MTIA_BLADE2_HSC_EN_N", + "PRSNT_FCB_MIDDLE_1_N","PWREN_MTIA_BLADE3_HSC_EN_N", + "PRSNT_FCB_BOTTOM_1_N","PWREN_MTIA_BLADE4_HSC_EN_N", + "PRSNT_FCB_BOTTOM_0_N","PWREN_MTIA_BLADE5_HSC_EN_N", + "PWRGD_MTIA_BLADE0_PWROK_N","PWREN_MTIA_BLADE6_HSC_EN_N", + "PWRGD_MTIA_BLADE1_PWROK_N","PWREN_MTIA_BLADE7_HSC_EN_N", + "PWRGD_MTIA_BLADE2_PWROK_N","PWREN_MTIA_BLADE8_HSC_EN_N", + "PWRGD_MTIA_BLADE3_PWROK_N","PWREN_MTIA_BLADE9_HSC_EN_N", /*E0 - E7*/ - "PWRGD_MTIA_BLADE4_PWROK_L_BUF","PWREN_MTIA_BLADE10_HSC_EN", - "PWRGD_MTIA_BLADE5_PWROK_L_BUF","PWREN_MTIA_BLADE11_HSC_EN", - "PWRGD_MTIA_BLADE6_PWROK_L_BUF","PWREN_MTIA_BLADE12_HSC_EN", - "PWRGD_MTIA_BLADE7_PWROK_L_BUF","PWREN_MTIA_BLADE13_HSC_EN", - "PWRGD_MTIA_BLADE8_PWROK_L_BUF","PWREN_MTIA_BLADE14_HSC_EN", - "PWRGD_MTIA_BLADE9_PWROK_L_BUF","PWREN_MTIA_BLADE15_HSC_EN", - "PWRGD_MTIA_BLADE10_PWROK_L_BUF","PWREN_NW_BLADE0_HSC_EN", - "PWRGD_MTIA_BLADE11_PWROK_L_BUF","PWREN_NW_BLADE1_HSC_EN", + "PWRGD_MTIA_BLADE4_PWROK_N","PWREN_MTIA_BLADE10_HSC_EN_N", + "PWRGD_MTIA_BLADE5_PWROK_N","PWREN_MTIA_BLADE11_HSC_EN_N", + "PWRGD_MTIA_BLADE6_PWROK_N","PWREN_MTIA_BLADE12_HSC_EN_N", + "PWRGD_MTIA_BLADE7_PWROK_N","PWREN_MTIA_BLADE13_HSC_EN_N", + "PWRGD_MTIA_BLADE8_PWROK_N","PWREN_MTIA_BLADE14_HSC_EN_N", + "PWRGD_MTIA_BLADE9_PWROK_N","PWREN_MTIA_BLADE15_HSC_EN_N", + "PWRGD_MTIA_BLADE10_PWROK_N","PWREN_NW_BLADE0_HSC_EN_N", + "PWRGD_MTIA_BLADE11_PWROK_N","PWREN_NW_BLADE1_HSC_EN_N", /*F0 - F7*/ - "PWRGD_MTIA_BLADE12_PWROK_L_BUF","PWREN_NW_BLADE2_HSC_EN", - "PWRGD_MTIA_BLADE13_PWROK_L_BUF","PWREN_NW_BLADE3_HSC_EN", - "PWRGD_MTIA_BLADE14_PWROK_L_BUF","PWREN_NW_BLADE4_HSC_EN", - "PWRGD_MTIA_BLADE15_PWROK_L_BUF","PWREN_NW_BLADE5_HSC_EN", - "PWRGD_NW_BLADE0_PWROK_L_BUF","PWREN_FCB_TOP_L_EN", - "PWRGD_NW_BLADE1_PWROK_L_BUF","PWREN_FCB_TOP_R_EN", - "PWRGD_NW_BLADE2_PWROK_L_BUF","PWREN_FCB_MIDDLE_L_EN", - "PWRGD_NW_BLADE3_PWROK_L_BUF","PWREN_FCB_MIDDLE_R_EN", + "PWRGD_MTIA_BLADE12_PWROK_N","PWREN_NW_BLADE2_HSC_EN_N", + "PWRGD_MTIA_BLADE13_PWROK_N","PWREN_NW_BLADE3_HSC_EN_N", + "PWRGD_MTIA_BLADE14_PWROK_N","PWREN_NW_BLADE4_HSC_EN_N", + "PWRGD_MTIA_BLADE15_PWROK_N","PWREN_NW_BLADE5_HSC_EN_N", + "PWRGD_NW_BLADE0_PWROK_N","PWREN_FCB_TOP_0_EN_N", + "PWRGD_NW_BLADE1_PWROK_N","PWREN_FCB_TOP_1_EN_N", + "PWRGD_NW_BLADE2_PWROK_N","PWREN_FCB_MIDDLE_0_EN_N", + "PWRGD_NW_BLADE3_PWROK_N","PWREN_FCB_MIDDLE_1_EN_N", /*G0 - G7*/ - "PWRGD_NW_BLADE4_PWROK_L_BUF","PWREN_FCB_BOTTOM_L_EN", - "PWRGD_NW_BLADE5_PWROK_L_BUF","PWREN_FCB_BOTTOM_R_EN", - "PWRGD_FCB_TOP_0_PWROK_L_BUF","FM_CMM_AC_CYCLE_N", - "PWRGD_FCB_TOP_1_PWROK_L_BUF","MGMT_SFP_TX_DIS", - "PWRGD_FCB_MIDDLE_0_PWROK_L_BUF","", - "PWRGD_FCB_MIDDLE_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE0_1_N", - "PWRGD_FCB_BOTTOM_0_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE2_3_N", - "PWRGD_FCB_BOTTOM_1_PWROK_L_BUF","RST_I2CRST_MTIA_BLADE4_5_N", + "PWRGD_NW_BLADE4_PWROK_N","PWREN_FCB_BOTTOM_1_EN_N", + "PWRGD_NW_BLADE5_PWROK_N","PWREN_FCB_BOTTOM_0_EN_N", + "PWRGD_FCB_TOP_0_PWROK_N","FM_CMM_AC_CYCLE_N", + "PWRGD_FCB_TOP_1_PWROK_N","MGMT_SFP_TX_DIS", + "PWRGD_FCB_MIDDLE_0_PWROK_N","FM_MDIO_SW_SEL", + "PWRGD_FCB_MIDDLE_1_PWROK_N","FM_P24V_SMPWR_EN", + "PWRGD_FCB_BOTTOM_1_PWROK_N","", + "PWRGD_FCB_BOTTOM_0_PWROK_N","", /*H0 - H7*/ - "LEAK_DETECT_MTIA_BLADE0_N_BUF","RST_I2CRST_MTIA_BLADE6_7_N", - "LEAK_DETECT_MTIA_BLADE1_N_BUF","RST_I2CRST_MTIA_BLADE8_9_N", - "LEAK_DETECT_MTIA_BLADE2_N_BUF","RST_I2CRST_MTIA_BLADE10_11_N", - "LEAK_DETECT_MTIA_BLADE3_N_BUF","RST_I2CRST_MTIA_BLADE12_13_N", - "LEAK_DETECT_MTIA_BLADE4_N_BUF","RST_I2CRST_MTIA_BLADE14_15_N", - "LEAK_DETECT_MTIA_BLADE5_N_BUF","RST_I2CRST_NW_BLADE0_1_2_N", - "LEAK_DETECT_MTIA_BLADE6_N_BUF","RST_I2CRST_NW_BLADE3_4_5_N", - "LEAK_DETECT_MTIA_BLADE7_N_BUF","RST_I2CRST_FCB_N", + "LEAK_DETECT_MTIA_BLADE0_N","", + "LEAK_DETECT_MTIA_BLADE1_N","", + "LEAK_DETECT_MTIA_BLADE2_N","", + "LEAK_DETECT_MTIA_BLADE3_N","", + "LEAK_DETECT_MTIA_BLADE4_N","", + "LEAK_DETECT_MTIA_BLADE5_N","", + "LEAK_DETECT_MTIA_BLADE6_N","", + "LEAK_DETECT_MTIA_BLADE7_N","", /*I0 - I7*/ - "LEAK_DETECT_MTIA_BLADE8_N_BUF","RST_I2CRST_FCB_B_L_N", - "LEAK_DETECT_MTIA_BLADE9_N_BUF","RST_I2CRST_FCB_B_R_N", - "LEAK_DETECT_MTIA_BLADE10_N_BUF","RST_I2CRST_FCB_M_L_N", - "LEAK_DETECT_MTIA_BLADE11_N_BUF","RST_I2CRST_FCB_M_R_N", - "LEAK_DETECT_MTIA_BLADE12_N_BUF","RST_I2CRST_FCB_T_L_N", - "LEAK_DETECT_MTIA_BLADE13_N_BUF","RST_I2CRST_FCB_T_R_N", - "LEAK_DETECT_MTIA_BLADE14_N_BUF","BMC_READY", - "LEAK_DETECT_MTIA_BLADE15_N_BUF","wFM_88E6393X_BIN_UPDATE_EN_N", + "LEAK_DETECT_MTIA_BLADE8_N","RST_I2CRST_FCB_BOTTOM_1_N", + "LEAK_DETECT_MTIA_BLADE9_N","RST_I2CRST_FCB_BOTTOM_0_N", + "LEAK_DETECT_MTIA_BLADE10_N","RST_I2CRST_FCB_MIDDLE_0_N", + "LEAK_DETECT_MTIA_BLADE11_N","RST_I2CRST_FCB_MIDDLE_1_N", + "LEAK_DETECT_MTIA_BLADE12_N","RST_I2CRST_FCB_TOP_0_N", + "LEAK_DETECT_MTIA_BLADE13_N","RST_I2CRST_FCB_TOP_1_N", + "LEAK_DETECT_MTIA_BLADE14_N","BMC_READY", + "LEAK_DETECT_MTIA_BLADE15_N","FM_88E6393X_BIN_UPDATE_EN_N", /*J0 - J7*/ - "LEAK_DETECT_NW_BLADE0_N_BUF","WATER_VALVE_CLOSED_N", - "LEAK_DETECT_NW_BLADE1_N_BUF","", - "LEAK_DETECT_NW_BLADE2_N_BUF","", - "LEAK_DETECT_NW_BLADE3_N_BUF","", - "LEAK_DETECT_NW_BLADE4_N_BUF","", - "LEAK_DETECT_NW_BLADE5_N_BUF","", - "MTIA_BLADE0_STATUS_LED","", - "MTIA_BLADE1_STATUS_LED","", + "LEAK_DETECT_NW_BLADE0_N","WATER_VALVE_CLOSED_N", + "LEAK_DETECT_NW_BLADE1_N","", + "LEAK_DETECT_NW_BLADE2_N","", + "LEAK_DETECT_NW_BLADE3_N","", + "LEAK_DETECT_NW_BLADE4_N","", + "LEAK_DETECT_NW_BLADE5_N","", + "PWRGD_MTIA_BLADE0_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE1_HSC_PWROK_N","", /*K0 - K7*/ - "MTIA_BLADE2_STATUS_LED","", - "MTIA_BLADE3_STATUS_LED","", - "MTIA_BLADE4_STATUS_LED","", - "MTIA_BLADE5_STATUS_LED","", - "MTIA_BLADE6_STATUS_LED","", - "MTIA_BLADE7_STATUS_LED","", - "MTIA_BLADE8_STATUS_LED","", - "MTIA_BLADE9_STATUS_LED","", + "PWRGD_MTIA_BLADE2_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE3_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE4_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE5_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE6_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE7_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE8_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE9_HSC_PWROK_N","", /*L0 - L7*/ - "MTIA_BLADE10_STATUS_LED","", - "MTIA_BLADE11_STATUS_LED","", - "MTIA_BLADE12_STATUS_LED","", - "MTIA_BLADE13_STATUS_LED","", - "MTIA_BLADE14_STATUS_LED","", - "MTIA_BLADE15_STATUS_LED","", - "NW_BLADE0_STATUS_LED","", - "NW_BLADE1_STATUS_LED","", + "PWRGD_MTIA_BLADE10_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE11_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE12_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE13_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE14_HSC_PWROK_N","", + "PWRGD_MTIA_BLADE15_HSC_PWROK_N","", + "PWRGD_NW_BLADE0_HSC_PWROK_N","", + "PWRGD_NW_BLADE1_HSC_PWROK_N","", /*M0 - M7*/ - "NW_BLADE2_STATUS_LED","", - "NW_BLADE3_STATUS_LED","", - "NW_BLADE4_STATUS_LED","", - "NW_BLADE5_STATUS_LED","", + "PWRGD_NW_BLADE2_HSC_PWROK_N","", + "PWRGD_NW_BLADE3_HSC_PWROK_N","", + "PWRGD_NW_BLADE4_HSC_PWROK_N","", + "PWRGD_NW_BLADE5_HSC_PWROK_N","", "RPU_READY","", "IT_GEAR_RPU_LINK_N","", "IT_GEAR_LEAK","", @@ -516,28 +746,28 @@ &sgpiom0 { /*N0 - N7*/ "VALVE_STS0","", "VALVE_STS1","", - "VALVE_STS2","", - "VALVE_STS3","", - "CR_TOGGLE_BOOT_BUF_N","", - "CMM_LC_RDY_LED_N","", - "CMM_LC_UNRDY_LED_N","", + "PCA9555_IRQ0_N","", + "PCA9555_IRQ1_N","", + "CR_TOGGLE_BOOT_N","", + "IRQ_FCB_TOP0_N","", + "IRQ_FCB_TOP1_N","", "CMM_CABLE_CARTRIDGE_PRSNT_BOT_N","", /*O0 - O7*/ "CMM_CABLE_CARTRIDGE_PRSNT_TOP_N","", "BOT_BCB_CABLE_PRSNT_N","", "TOP_BCB_CABLE_PRSNT_N","", - "CHASSIS0_LEAK_Q_N","", - "CHASSIS1_LEAK_Q_N","", - "LEAK0_DETECT","", - "LEAK1_DETECT","", - "MGMT_SFP_PRSNT_N","", + "IRQ_FCB_MID0_N","", + "IRQ_FCB_MID1_N","", + "CHASSIS_LEAK0_DETECT_N","", + "CHASSIS_LEAK1_DETECT_N","", + "VALVE_RMON_A_1","", /*P0 - P7*/ - "MGMT_SFP_TX_FAULT","", - "MGMT_SFP_RX_LOS","", - "","", - "","", - "","", - "","", - "","", - "",""; + "VALVE_RMON_A_2","", + "VALVE_RMON_B_1","", + "VALVE_RMON_B_2","", + "RPU_READY_SPARE","", + "IT_GEAR_LEAK_SPARE","", + "IT_GEAR_RPU_LINK_SPARE_N","", + "IRQ_FCB_BOT0_N","", + "IRQ_FCB_BOT0_N",""; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts new file mode 100644 index 00000000000000..839aad4ddd9171 --- /dev/null +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge-4u.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2024 IBM Corp. +/dts-v1/; + +#include "aspeed-bmc-ibm-blueridge.dts" + +/ { + model = "Blueridge 4U"; +}; + +&i2c3 { + power-supply@6a { + compatible = "ibm,cffps"; + reg = <0x6a>; + }; + + power-supply@6b { + compatible = "ibm,cffps"; + reg = <0x6b>; + }; +}; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts new file mode 100644 index 00000000000000..dfe5cc3edb5238 --- /dev/null +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-blueridge.dts @@ -0,0 +1,1686 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2024 IBM Corp. +/dts-v1/; + +#include +#include +#include +#include "aspeed-g6.dtsi" +#include "ibm-power11-quad.dtsi" + +/ { + model = "Blueridge 2U"; + compatible = "ibm,blueridge-bmc", "aspeed,ast2600"; + + aliases { + serial4 = &uart5; + i2c16 = &i2c2mux0; + i2c17 = &i2c2mux1; + i2c18 = &i2c2mux2; + i2c19 = &i2c2mux3; + i2c20 = &i2c4mux0chn0; + i2c21 = &i2c4mux0chn1; + i2c22 = &i2c4mux0chn2; + i2c23 = &i2c5mux0chn0; + i2c24 = &i2c5mux0chn1; + i2c25 = &i2c6mux0chn0; + i2c26 = &i2c6mux0chn1; + i2c27 = &i2c6mux0chn2; + i2c28 = &i2c6mux0chn3; + i2c29 = &i2c11mux0chn0; + i2c30 = &i2c11mux0chn1; + }; + + chosen { + stdout-path = &uart5; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x40000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + event_log: region@b3d00000 { + reg = <0xb3d00000 0x100000>; + no-map; + }; + + ramoops@b3e00000 { + compatible = "ramoops"; + reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */ + record-size = <0x8000>; + console-size = <0x8000>; + ftrace-size = <0x8000>; + pmsg-size = <0x8000>; + max-reason = <3>; /* KMSG_DUMP_EMERG */ + }; + + /* LPC FW cycle bridge region requires natural alignment */ + flash_memory: region@b4000000 { + reg = <0xb4000000 0x04000000>; /* 64M */ + no-map; + }; + + /* VGA region is dictated by hardware strapping */ + vga_memory: region@bf000000 { + compatible = "shared-dma-pool"; + reg = <0xbf000000 0x01000000>; /* 16M */ + no-map; + }; + }; + + i2c-mux { + compatible = "i2c-mux-gpio"; + #address-cells = <1>; + #size-cells = <0>; + i2c-parent = <&i2c2>; + idle-state = <0>; + mux-gpios = <&gpio0 ASPEED_GPIO(G, 4) GPIO_ACTIVE_HIGH>, + <&gpio0 ASPEED_GPIO(G, 5) GPIO_ACTIVE_HIGH>; + + i2c2mux0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c2mux1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c2mux2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c2mux3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + leds { + compatible = "gpio-leds"; + + /* BMC Card fault LED at the back */ + led-bmc-ingraham0 { + gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure ID LED at the back */ + led-rear-enc-id0 { + gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure fault LED at the back */ + led-rear-enc-fault0 { + gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>; + }; + + /* PCIE slot power LED */ + led-pcieslot-power { + gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + poll-interval = <1000>; + + event-fan0-presence { + gpios = <&pca0 6 GPIO_ACTIVE_LOW>; + label = "fan0-presence"; + linux,code = <6>; + }; + + event-fan1-presence { + gpios = <&pca0 7 GPIO_ACTIVE_LOW>; + label = "fan1-presence"; + linux,code = <7>; + }; + + event-fan2-presence { + gpios = <&pca0 8 GPIO_ACTIVE_LOW>; + label = "fan2-presence"; + linux,code = <8>; + }; + + event-fan3-presence { + gpios = <&pca0 9 GPIO_ACTIVE_LOW>; + label = "fan3-presence"; + linux,code = <9>; + }; + + event-fan4-presence { + gpios = <&pca0 10 GPIO_ACTIVE_LOW>; + label = "fan4-presence"; + linux,code = <10>; + }; + + event-fan5-presence { + gpios = <&pca0 11 GPIO_ACTIVE_LOW>; + label = "fan5-presence"; + linux,code = <11>; + }; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc1 7>; + }; +}; + +&adc1 { + status = "okay"; + aspeed,int-vref-microvolt = <2500000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default + &pinctrl_adc10_default &pinctrl_adc11_default + &pinctrl_adc12_default &pinctrl_adc13_default + &pinctrl_adc14_default &pinctrl_adc15_default>; +}; + +&ehci1 { + status = "okay"; +}; + +&uhci { + status = "okay"; +}; + +&gpio0 { + gpio-line-names = + /*A0-A7*/ "","","","","","","","", + /*B0-B7*/ "bmc-management-ready","","","","","","checkstop","", + /*C0-C7*/ "","","","","","","","", + /*D0-D7*/ "","","","","","","","", + /*E0-E7*/ "","","","","","","","", + /*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","", + "factory-reset-toggle","", + /*G0-G7*/ "","","","","","","","", + /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","", + /*I0-I7*/ "","","","","","","bmc-secure-boot","", + /*J0-J7*/ "","","","","","","","", + /*K0-K7*/ "","","","","","","","", + /*L0-L7*/ "","","","","","","","", + /*M0-M7*/ "","","","","","","","", + /*N0-N7*/ "","","","","","","","", + /*O0-O7*/ "","","","usb-power","","","","", + /*P0-P7*/ "","","","","pcieslot-power","","","", + /*Q0-Q7*/ "cfam-reset","","regulator-standby-faulted","","","","","", + /*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","","","", + "", + /*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3", + "power-ffs-sync-history","","","", + /*T0-T7*/ "","","","","","","","", + /*U0-U7*/ "","","","","","","","", + /*V0-V7*/ "","","","","","","","", + /*W0-W7*/ "","","","","","","","", + /*X0-X7*/ "","","","","","","","", + /*Y0-Y7*/ "","","","","","","","", + /*Z0-Z7*/ "","","","","","","",""; + + i2c3-mux-oe-n-hog { + gpio-hog; + gpios = ; + line-name = "I2C3_MUX_OE_N"; + output-high; + }; + + usb-power-hog { + gpio-hog; + gpios = ; + output-high; + }; +}; + +&emmc_controller { + status = "okay"; +}; + +&pinctrl_emmc_default { + bias-disable; +}; + +&emmc { + status = "okay"; + clk-phase-mmc-hs200 = <180>, <180>; +}; + +&ibt { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + gpio@20 { + compatible = "ti,tca9554"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = "", + "RUSSEL_FW_I2C_ENABLE_N", + "RUSSEL_OPPANEL_PRESENCE_N", + "BLYTH_OPPANEL_PRESENCE_N", + "CPU_TPM_CARD_PRESENT_N", + "DASD_BP2_PRESENT_N", + "DASD_BP1_PRESENT_N", + "DASD_BP0_PRESENT_N"; + }; +}; + +&i2c1 { + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + status = "okay"; + + power-supply@68 { + compatible = "ibm,cffps"; + reg = <0x68>; + }; + + power-supply@69 { + compatible = "ibm,cffps"; + reg = <0x69>; + }; + + led-controller@61 { + compatible = "nxp,pca9552"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "SLOT0_PRSNT_EN_RSVD", "SLOT1_PRSNT_EN_RSVD", + "SLOT2_PRSNT_EN_RSVD", "SLOT3_PRSNT_EN_RSVD", + "SLOT4_PRSNT_EN_RSVD", "SLOT0_EXPANDER_PRSNT_N", + "SLOT1_EXPANDER_PRSNT_N", "SLOT2_EXPANDER_PRSNT_N", + "SLOT3_EXPANDER_PRSNT_N", "SLOT4_EXPANDER_PRSNT_N", + "", "", "", "", "", ""; + }; +}; + +&i2c4 { + status = "okay"; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + temperature-sensor@49 { + compatible = "ti,tmp275"; + reg = <0x49>; + }; + + temperature-sensor@4a { + compatible = "ti,tmp275"; + reg = <0x4a>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c4mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard0-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard0-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c4mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + + i2c4mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + }; +}; + +&i2c5 { + status = "okay"; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + temperature-sensor@49 { + compatible = "ti,tmp275"; + reg = <0x49>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c5mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard3-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard3-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c5mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@61 { + compatible = "nxp,pca9551"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard4-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard4-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + }; +}; + +&i2c6 { + status = "okay"; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + temperature-sensor@4a { + compatible = "ti,tmp275"; + reg = <0x4a>; + }; + + temperature-sensor@4b { + compatible = "ti,tmp275"; + reg = <0x4b>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c6mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c6mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + }; + + i2c6mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c6mux0chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + }; +}; + +&i2c7 { + multi-master; + status = "okay"; + + led-controller@30 { + compatible = "ibm,pca9552"; + reg = <0x30>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "pcieslot0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "pcieslot1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "pcieslot2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "pcieslot3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "pcieslot4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "cpu1"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "cpu-vrm1"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "lcd-russel"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@31 { + compatible = "ibm,pca9552"; + reg = <0x31>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm5"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm6"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm7"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm8"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm9"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm10"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm11"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm12"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm13"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm14"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm15"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@32 { + compatible = "ibm,pca9552"; + reg = <0x32>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm16"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm17"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm18"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm19"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm20"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm21"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm22"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm23"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm24"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm25"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm26"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm27"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm28"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm29"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm30"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm31"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@33 { + compatible = "ibm,pca9552"; + reg = <0x33>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "planar"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cpu0"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "dasd-pyramid0"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "dasd-pyramid1"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "dasd-pyramid2"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "cpu0-vrm0"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "rtc-battery"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "base-blyth"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "pcieslot6"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "pcieslot7"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "pcieslot8"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "pcieslot9"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "pcieslot10"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "pcieslot11"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "tpm-wilson"; + retain-state-shutdown; + type = ; + }; + }; + + humidity-sensor@40 { + compatible = "silabs,si7020"; + reg = <0x40>; + }; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + pwm@52 { + compatible = "maxim,max31785a"; + reg = <0x52>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "front-sys-id0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "front-check-log0"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "front-enc-fault1"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "front-sys-pwron0"; + retain-state-shutdown; + type = ; + }; + }; + + pca0: led-controller@61 { + compatible = "nxp,pca9552"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "fan0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "fan1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "fan2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "fan3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "fan4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "fan5"; + retain-state-shutdown; + type = ; + }; + }; + + lcd-controller@62 { + compatible = "ibm,op-panel"; + reg = <(0x62 | I2C_OWN_SLAVE_ADDRESS)>; + }; + + pressure-sensor@76 { + compatible = "infineon,dps310"; + reg = <0x76>; + #io-channel-cells = <0>; + }; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; +}; + +&i2c8 { + status = "okay"; + + pmic@11 { + compatible = "ti,ucd90320"; + reg = <0x11>; + }; + + rtc@32 { + compatible = "epson,rx8900"; + reg = <0x32>; + }; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + temperature-sensor@4a { + compatible = "ti,tmp275"; + reg = <0x4a>; + }; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@60 { + compatible = "nxp,pca9552"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "", "", "", "", "", "", "", "", + "", "", "", "", "", "", "power-config-full-load", ""; + }; + + led-controller@61 { + compatible = "nxp,pca9552"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "SLOT6_PRSNT_EN_RSVD", "SLOT7_PRSNT_EN_RSVD", + "SLOT8_PRSNT_EN_RSVD", "SLOT9_PRSNT_EN_RSVD", + "SLOT10_PRSNT_EN_RSVD", "SLOT11_PRSNT_EN_RSVD", + "SLOT6_EXPANDER_PRSNT_N", "SLOT7_EXPANDER_PRSNT_N", + "SLOT8_EXPANDER_PRSNT_N", "SLOT9_EXPANDER_PRSNT_N", + "SLOT10_EXPANDER_PRSNT_N", "SLOT11_EXPANDER_PRSNT_N", + "", "", "", ""; + }; + +}; + +&i2c9 { + status = "okay"; + + temperature-sensor@4c { + compatible = "ti,tmp423"; + reg = <0x4c>; + }; + + temperature-sensor@4d { + compatible = "ti,tmp423"; + reg = <0x4d>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; +}; + +&i2c10 { + status = "okay"; + + temperature-sensor@4c { + compatible = "ti,tmp423"; + reg = <0x4c>; + }; + + temperature-sensor@4d { + compatible = "ti,tmp423"; + reg = <0x4d>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; +}; + +&i2c11 { + status = "okay"; + + temperature-sensor@48 { + compatible = "ti,tmp275"; + reg = <0x48>; + }; + + temperature-sensor@49 { + compatible = "ti,tmp275"; + reg = <0x49>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c11mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard10-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard10-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c11mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + }; + }; +}; + +&i2c12 { + status = "okay"; + + tpm@2e { + compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c"; + reg = <0x2e>; + memory-region = <&event_log>; + }; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; +}; + +&i2c13 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9552"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "nvme0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "nvme1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "nvme2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "nvme3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "nvme4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "nvme5"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "nvme6"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "nvme7"; + retain-state-shutdown; + type = ; + }; + }; +}; + +&i2c14 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9552"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "nvme8"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "nvme9"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "nvme10"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "nvme11"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "nvme12"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "nvme13"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "nvme14"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "nvme15"; + retain-state-shutdown; + type = ; + }; + }; +}; + +&i2c15 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9552"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "nvme16"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "nvme17"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "nvme18"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "nvme19"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "nvme20"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "nvme21"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "nvme22"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "nvme23"; + retain-state-shutdown; + type = ; + }; + }; +}; + +&uart2 { + status = "okay"; +}; + +&vuart1 { + status = "okay"; +}; + +&vuart2 { + status = "okay"; +}; + +&lpc_ctrl { + status = "okay"; + memory-region = <&flash_memory>; +}; + +&mac2 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rmii3_default>; + clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>, + <&syscon ASPEED_CLK_MAC3RCLK>; + clock-names = "MACCLK", "RCLK"; + use-ncsi; +}; + +&mac3 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rmii4_default>; + clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>, + <&syscon ASPEED_CLK_MAC4RCLK>; + clock-names = "MACCLK", "RCLK"; + use-ncsi; +}; + +&wdt1 { + aspeed,reset-type = "none"; + aspeed,external-signal; + aspeed,ext-push-pull; + aspeed,ext-active-high; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdtrst1_default>; +}; + +&wdt2 { + status = "okay"; +}; + +&kcs2 { + status = "okay"; + aspeed,lpc-io-reg = <0xca8 0xcac>; +}; + +&kcs3 { + status = "okay"; + aspeed,lpc-io-reg = <0xca2>; + aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>; +}; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts index 6fdda42575df59..7364adc6b80d60 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-bonnell.dts @@ -570,11 +570,6 @@ &wdt2 { status = "okay"; }; -&xdma { - status = "okay"; - memory-region = <&vga_memory>; -}; - &kcs2 { status = "okay"; aspeed,lpc-io-reg = <0xca8 0xcac>; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts index 214b2e6a4c6df6..513077a1f4be69 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-everest.dts @@ -2486,11 +2486,6 @@ &wdt2 { status = "okay"; }; -&xdma { - status = "okay"; - memory-region = <&vga_memory>; -}; - &kcs2 { status = "okay"; aspeed,lpc-io-reg = <0xca8 0xcac>; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts new file mode 100644 index 00000000000000..c24e464e5faa17 --- /dev/null +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-fuji.dts @@ -0,0 +1,3876 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2024 IBM Corp. +/dts-v1/; + +#include +#include +#include +#include "aspeed-g6.dtsi" +#include "ibm-power11-quad.dtsi" + +/ { + model = "Fuji"; + compatible = "ibm,fuji-bmc", "aspeed,ast2600"; + + aliases { + i2c500 = &cfam4_i2c0; + i2c501 = &cfam4_i2c1; + i2c510 = &cfam4_i2c10; + i2c511 = &cfam4_i2c11; + i2c512 = &cfam4_i2c12; + i2c513 = &cfam4_i2c13; + i2c514 = &cfam4_i2c14; + i2c515 = &cfam4_i2c15; + i2c602 = &cfam5_i2c2; + i2c603 = &cfam5_i2c3; + i2c610 = &cfam5_i2c10; + i2c611 = &cfam5_i2c11; + i2c614 = &cfam5_i2c14; + i2c615 = &cfam5_i2c15; + i2c616 = &cfam5_i2c16; + i2c617 = &cfam5_i2c17; + i2c700 = &cfam6_i2c0; + i2c701 = &cfam6_i2c1; + i2c710 = &cfam6_i2c10; + i2c711 = &cfam6_i2c11; + i2c712 = &cfam6_i2c12; + i2c713 = &cfam6_i2c13; + i2c714 = &cfam6_i2c14; + i2c715 = &cfam6_i2c15; + i2c802 = &cfam7_i2c2; + i2c803 = &cfam7_i2c3; + i2c810 = &cfam7_i2c10; + i2c811 = &cfam7_i2c11; + i2c814 = &cfam7_i2c14; + i2c815 = &cfam7_i2c15; + i2c816 = &cfam7_i2c16; + i2c817 = &cfam7_i2c17; + + i2c16 = &i2c4mux0chn0; + i2c17 = &i2c4mux0chn1; + i2c18 = &i2c4mux0chn2; + i2c19 = &i2c5mux0chn0; + i2c20 = &i2c5mux0chn1; + i2c21 = &i2c5mux0chn2; + i2c22 = &i2c5mux0chn3; + i2c23 = &i2c6mux0chn0; + i2c24 = &i2c6mux0chn1; + i2c25 = &i2c6mux0chn2; + i2c26 = &i2c6mux0chn3; + i2c27 = &i2c14mux0chn0; + i2c28 = &i2c14mux0chn1; + i2c29 = &i2c14mux0chn2; + i2c30 = &i2c14mux0chn3; + i2c31 = &i2c14mux1chn0; + i2c32 = &i2c14mux1chn1; + i2c33 = &i2c14mux1chn2; + i2c34 = &i2c14mux1chn3; + i2c35 = &i2c15mux0chn0; + i2c36 = &i2c15mux0chn1; + i2c37 = &i2c15mux0chn2; + i2c38 = &i2c15mux0chn3; + i2c39 = &i2c15mux1chn0; + i2c40 = &i2c15mux1chn1; + i2c41 = &i2c15mux1chn2; + i2c42 = &i2c15mux1chn3; + i2c43 = &i2c15mux2chn0; + i2c44 = &i2c15mux2chn1; + i2c45 = &i2c15mux2chn2; + i2c46 = &i2c15mux2chn3; + i2c47 = &i2c8mux0chn0; + i2c48 = &i2c8mux0chn1; + + serial4 = &uart5; + + sbefifo500 = &sbefifo500; + sbefifo501 = &sbefifo501; + sbefifo510 = &sbefifo510; + sbefifo511 = &sbefifo511; + sbefifo512 = &sbefifo512; + sbefifo513 = &sbefifo513; + sbefifo514 = &sbefifo514; + sbefifo515 = &sbefifo515; + sbefifo602 = &sbefifo602; + sbefifo603 = &sbefifo603; + sbefifo610 = &sbefifo610; + sbefifo611 = &sbefifo611; + sbefifo614 = &sbefifo614; + sbefifo615 = &sbefifo615; + sbefifo616 = &sbefifo616; + sbefifo617 = &sbefifo617; + sbefifo700 = &sbefifo700; + sbefifo701 = &sbefifo701; + sbefifo710 = &sbefifo710; + sbefifo711 = &sbefifo711; + sbefifo712 = &sbefifo712; + sbefifo713 = &sbefifo713; + sbefifo714 = &sbefifo714; + sbefifo715 = &sbefifo715; + sbefifo802 = &sbefifo802; + sbefifo803 = &sbefifo803; + sbefifo810 = &sbefifo810; + sbefifo811 = &sbefifo811; + sbefifo814 = &sbefifo814; + sbefifo815 = &sbefifo815; + sbefifo816 = &sbefifo816; + sbefifo817 = &sbefifo817; + + scom500 = &scom500; + scom501 = &scom501; + scom510 = &scom510; + scom511 = &scom511; + scom512 = &scom512; + scom513 = &scom513; + scom514 = &scom514; + scom515 = &scom515; + scom602 = &scom602; + scom603 = &scom603; + scom610 = &scom610; + scom611 = &scom611; + scom614 = &scom614; + scom615 = &scom615; + scom616 = &scom616; + scom617 = &scom617; + scom700 = &scom700; + scom701 = &scom701; + scom710 = &scom710; + scom711 = &scom711; + scom712 = &scom712; + scom713 = &scom713; + scom714 = &scom714; + scom715 = &scom715; + scom802 = &scom802; + scom803 = &scom803; + scom810 = &scom810; + scom811 = &scom811; + scom814 = &scom814; + scom815 = &scom815; + scom816 = &scom816; + scom817 = &scom817; + + spi50 = &cfam4_spi0; + spi51 = &cfam4_spi1; + spi52 = &cfam4_spi2; + spi53 = &cfam4_spi3; + spi60 = &cfam5_spi0; + spi61 = &cfam5_spi1; + spi62 = &cfam5_spi2; + spi63 = &cfam5_spi3; + spi70 = &cfam6_spi0; + spi71 = &cfam6_spi1; + spi72 = &cfam6_spi2; + spi73 = &cfam6_spi3; + spi80 = &cfam7_spi0; + spi81 = &cfam7_spi1; + spi82 = &cfam7_spi2; + spi83 = &cfam7_spi3; + }; + + chosen { + stdout-path = &uart5; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x40000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + event_log: region@b3d00000 { + reg = <0xb3d00000 0x100000>; + no-map; + }; + + ramoops@b3e00000 { + compatible = "ramoops"; + reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */ + record-size = <0x8000>; + console-size = <0x8000>; + ftrace-size = <0x8000>; + pmsg-size = <0x8000>; + max-reason = <3>; /* KMSG_DUMP_EMERG */ + }; + + /* LPC FW cycle bridge region requires natural alignment */ + flash_memory: region@b4000000 { + reg = <0xb4000000 0x04000000>; /* 64M */ + no-map; + }; + + /* VGA region is dictated by hardware strapping */ + vga_memory: region@bf000000 { + compatible = "shared-dma-pool"; + reg = <0xbf000000 0x01000000>; /* 16M */ + no-map; + }; + }; + + gpio-keys-polled { + compatible = "gpio-keys-polled"; + poll-interval = <1000>; + + event-fan0-presence { + gpios = <&pca0 15 GPIO_ACTIVE_LOW>; + label = "fan0-presence"; + linux,code = <15>; + }; + + event-fan1-presence { + gpios = <&pca0 14 GPIO_ACTIVE_LOW>; + label = "fan1-presence"; + linux,code = <14>; + }; + + event-fan2-presence { + gpios = <&pca0 13 GPIO_ACTIVE_LOW>; + label = "fan2-presence"; + linux,code = <13>; + }; + + event-fan3-presence { + gpios = <&pca0 12 GPIO_ACTIVE_LOW>; + label = "fan3-presence"; + linux,code = <12>; + }; + }; + + leds { + compatible = "gpio-leds"; + + /* RTC battery fault LED at the back */ + led-rtc-battery { + gpios = <&gpio0 ASPEED_GPIO(H, 0) GPIO_ACTIVE_LOW>; + }; + + /* BMC Card fault LED at the back */ + led-bmc { + gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure Identify LED at the back */ + led-rear-enc-id0 { + gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>; + }; + + /* Enclosure fault LED at the back */ + led-rear-enc-fault0 { + gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>; + }; + + /* PCIE slot power LED */ + led-pcieslot-power { + gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>; + }; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc1 7>; + }; +}; + +&adc1 { + status = "okay"; + aspeed,int-vref-microvolt = <2500000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default + &pinctrl_adc10_default &pinctrl_adc11_default + &pinctrl_adc12_default &pinctrl_adc13_default + &pinctrl_adc14_default &pinctrl_adc15_default>; +}; + +&gpio0 { + gpio-line-names = + /*A0-A7*/ "","","","","","","","", + /*B0-B7*/ "bmc-management-ready","","","","","","checkstop","", + /*C0-C7*/ "","","","","","","","", + /*D0-D7*/ "","","","","","","","", + /*E0-E7*/ "","","","","","","","", + /*F0-F7*/ "","","rtc-battery-voltage-read-enable","reset-cause-pinhole","","", + "factory-reset-toggle","", + /*G0-G7*/ "","","","","","","","", + /*H0-H7*/ "led-rtc-battery","led-bmc","led-rear-enc-id0","led-rear-enc-fault0","","", + "","", + /*I0-I7*/ "","","","","","","bmc-secure-boot","", + /*J0-J7*/ "","","","","","","","", + /*K0-K7*/ "","","","","","","","", + /*L0-L7*/ "","","","","","","","", + /*M0-M7*/ "","","","","","","","", + /*N0-N7*/ "","","","","","","","", + /*O0-O7*/ "","","","usb-power","","","","", + /*P0-P7*/ "","","","","led-pcieslot-power","","","", + /*Q0-Q7*/ "","","regulator-standby-faulted","","","","","", + /*R0-R7*/ "bmc-tpm-reset","power-chassis-control","power-chassis-good","","", + "I2C_FLASH_MICRO_N","","", + /*S0-S7*/ "","","","","power-ffs-sync-history","","","", + /*T0-T7*/ "","","","","","","","", + /*U0-U7*/ "","","","","","","","", + /*V0-V7*/ "","BMC_3RESTART_ATTEMPT_P","","","","","","", + /*W0-W7*/ "","","","","","","","", + /*X0-X7*/ "","","","","","","","", + /*Y0-Y7*/ "","","","","","","","", + /*Z0-Z7*/ "","","","","","","",""; + + usb-power-hog { + gpio-hog; + gpios = ; + output-high; + }; +}; + +&i2c0 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@62 { + compatible = "nxp,pca9552"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-ps0", + "presence-ps1", + "presence-ps2", + "presence-ps3", + "presence-pdb", + "presence-tpm", + "", "", + "presence-cp0", + "presence-cp1", + "presence-cp2", + "presence-cp3", + "presence-dasd", + "presence-lcd-op", + "presence-base-op", + ""; + }; +}; + +&i2c1 { + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + status = "okay"; + + eeprom@54 { + compatible = "atmel,24c128"; + reg = <0x54>; + }; + + power-supply@68 { + compatible = "ibm,cffps"; + reg = <0x68>; + }; + + power-supply@69 { + compatible = "ibm,cffps"; + reg = <0x69>; + }; + + power-supply@6b { + compatible = "ibm,cffps"; + reg = <0x6b>; + }; + + power-supply@6d { + compatible = "ibm,cffps"; + reg = <0x6d>; + }; +}; + +&i2c4 { + status = "okay"; + + led-controller@65 { + compatible = "nxp,pca9552"; + reg = <0x65>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-cable-card1", + "presence-cable-card2", + "presence-cable-card3", + "presence-cable-card4", + "presence-cable-card5", + "expander-cable-card1", + "expander-cable-card2", + "expander-cable-card3", + "expander-cable-card4", + "expander-cable-card5"; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c4mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + + led-controller@62 { + compatible = "nxp,pca9551"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c01-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c01-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c4mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c02-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c02-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c4mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@61 { + compatible = "nxp,pca9551"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c03-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c03-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + }; +}; + +&i2c5 { + status = "okay"; + + led-controller@66 { + compatible = "nxp,pca9552"; + reg = <0x66>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "presence-cable-card6", + "presence-cable-card7", + "presence-cable-card8", + "presence-cable-card9", + "presence-cable-card10", + "presence-cable-card11", + "expander-cable-card6", + "expander-cable-card7", + "expander-cable-card8", + "expander-cable-card9", + "expander-cable-card10", + "expander-cable-card11"; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c5mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c04-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c04-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c5mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@61 { + compatible = "nxp,pca9551"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c05-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c05-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c5mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + + led-controller@62 { + compatible = "nxp,pca9551"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c06-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c06-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c5mux0chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + + led-controller@63 { + compatible = "nxp,pca9551"; + reg = <0x63>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c07-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c07-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + }; +}; + +&i2c6 { + status = "okay"; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c6mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c08-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c08-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c6mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@52 { + compatible = "atmel,24c64"; + reg = <0x52>; + }; + + led-controller@62 { + compatible = "nxp,pca9551"; + reg = <0x62>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c09-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c09-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c6mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + + led-controller@63 { + compatible = "nxp,pca9551"; + reg = <0x63>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c10-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c10-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c6mux0chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + led-controller@61 { + compatible = "nxp,pca9551"; + reg = <0x61>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "cablecard-c11-cxp-top"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "cablecard-c11-cxp-bot"; + retain-state-shutdown; + type = ; + }; + }; + }; + }; + + led-controller@65 { + compatible = "nxp,pca9552"; + reg = <0x65>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "pcieslot-c01"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "pcieslot-c02"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "pcieslot-c03"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "pcieslot-c04"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "pcieslot-c05"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "pcieslot-c06"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "pcieslot-c07"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "pcieslot-c08"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "pcieslot-c09"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "pcieslot-c10"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "pcieslot-c11"; + retain-state-shutdown; + type = ; + }; + }; +}; + +&i2c7 { + status = "okay"; + + led-controller@31 { + compatible = "ibm,pca9552"; + reg = <0x31>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm5"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm6"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm7"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm8"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm9"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm10"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm11"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm12"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm13"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm14"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm15"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@32 { + compatible = "ibm,pca9552"; + reg = <0x32>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm16"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm17"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm18"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm19"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm20"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm21"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm22"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm23"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm24"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm25"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm26"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm27"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm28"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm29"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm30"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm31"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@33 { + compatible = "ibm,pca9552"; + reg = <0x33>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm32"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm33"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm34"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm35"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm36"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm37"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm38"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm39"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm40"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm41"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm42"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm43"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm44"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm45"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm46"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm47"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@30 { + compatible = "ibm,pca9552"; + reg = <0x30>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "ddimm48"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "ddimm49"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "ddimm50"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "ddimm51"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "ddimm52"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "ddimm53"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "ddimm54"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "ddimm55"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "ddimm56"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "ddimm57"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "ddimm58"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "ddimm59"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "ddimm60"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "ddimm61"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "ddimm62"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "ddimm63"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@34 { + compatible = "ibm,pca9552"; + reg = <0x34>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "planar"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "tpm"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "cpu3-c61"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "cpu0-c14"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "opencapi-connector3"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "opencapi-connector4"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "opencapi-connector5"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "vrm4"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "vrm5"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "vrm6"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "vrm7"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "vrm12"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "vrm13"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "vrm14"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "vrm15"; + retain-state-shutdown; + type = ; + }; + }; + + led-controller@35 { + compatible = "ibm,pca9552"; + reg = <0x35>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "dasd-backplane"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "power-distribution"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "cpu1-c19"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "cpu2-c56"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "opencapi-connector0"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "opencapi-connector1"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "opencapi-connector2"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "vrm0"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "vrm1"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "vrm2"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "vrm3"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "vrm8"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "vrm9"; + retain-state-shutdown; + type = ; + }; + + led@14 { + reg = <14>; + default-state = "keep"; + label = "vrm10"; + retain-state-shutdown; + type = ; + }; + + led@15 { + reg = <15>; + default-state = "keep"; + label = "vrm11"; + retain-state-shutdown; + type = ; + }; + }; +}; + +&i2c8 { + status = "okay"; + + pmic@11 { + compatible = "ti,ucd90320"; + reg = <0x11>; + }; + + rtc@32 { + compatible = "epson,rx8900"; + reg = <0x32>; + }; + + eeprom@51 { + compatible = "atmel,24c64"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + reset-gpio = <&gpio0 ASPEED_GPIO(S, 5) GPIO_ACTIVE_LOW>; + + i2c8mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c8mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; +}; + +&i2c9 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c10 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c11 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c12 { + status = "okay"; + + tpm@2e { + compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c"; + reg = <0x2e>; + memory-region = <&event_log>; + }; +}; + +&i2c13 { + status = "okay"; + + eeprom@51 { + compatible = "atmel,24c128"; + reg = <0x51>; + }; + + eeprom@50 { + compatible = "atmel,24c128"; + reg = <0x50>; + }; + + eeprom@53 { + compatible = "atmel,24c128"; + reg = <0x53>; + }; + + eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + }; +}; + +&i2c14 { + multi-master; + status = "okay"; + + lcd-controller@62 { + compatible = "ibm,op-panel"; + reg = <(0x62 | I2C_OWN_SLAVE_ADDRESS)>; + }; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + idle-state = <1>; + + i2c14mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + }; + + i2c14mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + }; + }; + + i2c14mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + + led-controller@60 { + compatible = "nxp,pca9551"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "front-sys-id0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "front-check-log0"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "front-enc-fault1"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "front-sys-pwron0"; + retain-state-shutdown; + type = ; + }; + }; + }; + + i2c14mux0chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + pwm@52 { + compatible = "maxim,max31785a"; + reg = <0x52>; + #address-cells = <1>; + #size-cells = <0>; + }; + + led-controller@60 { + compatible = "nxp,pca9552"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + gpio-controller; + #gpio-cells = <2>; + + led@0 { + reg = <0>; + default-state = "keep"; + label = "nvme0"; + retain-state-shutdown; + type = ; + }; + + led@1 { + reg = <1>; + default-state = "keep"; + label = "nvme1"; + retain-state-shutdown; + type = ; + }; + + led@2 { + reg = <2>; + default-state = "keep"; + label = "nvme2"; + retain-state-shutdown; + type = ; + }; + + led@3 { + reg = <3>; + default-state = "keep"; + label = "nvme3"; + retain-state-shutdown; + type = ; + }; + + led@4 { + reg = <4>; + default-state = "keep"; + label = "nvme4"; + retain-state-shutdown; + type = ; + }; + + led@5 { + reg = <5>; + default-state = "keep"; + label = "nvme5"; + retain-state-shutdown; + type = ; + }; + + led@6 { + reg = <6>; + default-state = "keep"; + label = "nvme6"; + retain-state-shutdown; + type = ; + }; + + led@7 { + reg = <7>; + default-state = "keep"; + label = "nvme7"; + retain-state-shutdown; + type = ; + }; + + led@8 { + reg = <8>; + default-state = "keep"; + label = "nvme8"; + retain-state-shutdown; + type = ; + }; + + led@9 { + reg = <9>; + default-state = "keep"; + label = "nvme9"; + retain-state-shutdown; + type = ; + }; + + led@10 { + reg = <10>; + default-state = "keep"; + label = "fan0"; + retain-state-shutdown; + type = ; + }; + + led@11 { + reg = <11>; + default-state = "keep"; + label = "fan1"; + retain-state-shutdown; + type = ; + }; + + led@12 { + reg = <12>; + default-state = "keep"; + label = "fan2"; + retain-state-shutdown; + type = ; + }; + + led@13 { + reg = <13>; + default-state = "keep"; + label = "fan3"; + retain-state-shutdown; + type = ; + }; + }; + + pca0: led-controller@61 { + compatible = "nxp,pca9552"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x61>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "","","","", + "","","","", + "","","","", + "presence-fan3", + "presence-fan2", + "presence-fan1", + "presence-fan0"; + }; + }; + }; + + i2c-mux@71 { + compatible = "nxp,pca9546"; + reg = <0x71>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c14mux1chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + + i2c14mux1chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + }; + }; +}; + +&i2c15 { + status = "okay"; + + i2c-mux@70 { + compatible = "nxp,pca9546"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c15mux0chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux0chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux0chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux0chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + }; + + i2c-mux@71 { + compatible = "nxp,pca9546"; + reg = <0x71>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c15mux1chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux1chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux1chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux1chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + }; + + i2c-mux@72 { + compatible = "nxp,pca9546"; + reg = <0x72>; + #address-cells = <1>; + #size-cells = <0>; + i2c-mux-idle-disconnect; + + i2c15mux2chn0: i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux2chn1: i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@53 { + compatible = "atmel,24c64"; + reg = <0x53>; + }; + }; + + i2c15mux2chn2: i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c15mux2chn3: i2c@3 { + reg = <3>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; +}; + +&ehci0 { + status = "okay"; +}; + +&ehci1 { + status = "okay"; +}; + +&uhci { + status = "okay"; +}; + +&emmc_controller { + status = "okay"; +}; + +&pinctrl_emmc_default { + bias-disable; +}; + +&emmc { + status = "okay"; + clk-phase-mmc-hs200 = <210>, <228>; +}; + +&ibt { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&vuart1 { + status = "okay"; +}; + +&vuart2 { + status = "okay"; +}; + +&lpc_ctrl { + status = "okay"; + memory-region = <&flash_memory>; +}; + +&mac2 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rmii3_default>; + clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>, + <&syscon ASPEED_CLK_MAC3RCLK>; + clock-names = "MACCLK", "RCLK"; + use-ncsi; +}; + +&mac3 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rmii4_default>; + clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>, + <&syscon ASPEED_CLK_MAC4RCLK>; + clock-names = "MACCLK", "RCLK"; + use-ncsi; +}; + +&wdt1 { + aspeed,reset-type = "none"; + aspeed,external-signal; + aspeed,ext-push-pull; + aspeed,ext-active-high; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdtrst1_default>; +}; + +&wdt2 { + status = "okay"; +}; + +&kcs2 { + status = "okay"; + aspeed,lpc-io-reg = <0xca8 0xcac>; +}; + +&kcs3 { + status = "okay"; + aspeed,lpc-io-reg = <0xca2>; + aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>; +}; + +&fsi_hub0 { + cfam@4,0 { /* DCM2_C0 */ + reg = <4 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <4>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam4_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom500: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo500: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom501: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo501: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom510: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo510: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom511: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo511: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom512: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo512: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom513: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo513: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom514: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo514: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam4_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom515: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo515: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam4_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam4_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam4_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam4_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; + + cfam@5,0 { /* DCM2_C1 */ + reg = <5 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <5>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam5_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom602: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo602: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom603: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo603: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom610: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo610: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom611: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo611: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom614: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo614: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom615: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo615: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom616: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo616: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam5_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom617: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo617: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam5_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam5_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam5_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam5_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; + + cfam@6,0 { /* DCM3_C0 */ + reg = <6 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <6>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam6_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom700: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo700: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom701: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo701: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom710: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo710: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom711: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo711: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom712: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo712: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom713: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo713: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom714: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo714: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam6_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom715: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo715: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam6_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam6_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam6_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam6_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; + + cfam@7,0 { /* DCM3_C1 */ + reg = <7 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <7>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam7_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom802: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo802: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom803: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo803: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom810: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo810: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom811: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo811: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom814: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo814: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom815: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo815: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom816: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo816: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam7_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom817: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo817: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam7_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam7_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam7_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam7_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; +}; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts index 5cb0094e21e0b0..0776b72c2199c7 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-rainier.dts @@ -1722,11 +1722,6 @@ &wdt2 { status = "okay"; }; -&xdma { - status = "okay"; - memory-region = <&vga_memory>; -}; - &kcs2 { status = "okay"; aspeed,lpc-io-reg = <0xca8 0xcac>; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts index dcbc16308ab50b..f3efecc7eb8d04 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-ibm-system1.dts @@ -1138,7 +1138,7 @@ i2c8mux0chn6: i2c@6 { reg = <6>; temperature-sensor@4c { - compatible = "ti,tmp423"; + compatible = "ti,tmp432"; reg = <0x4c>; }; }; @@ -1599,7 +1599,7 @@ i2c15mux0chn6: i2c@6 { reg = <6>; temperature-sensor@4c { - compatible = "ti,tmp423"; + compatible = "ti,tmp432"; reg = <0x4c>; }; }; @@ -1615,7 +1615,7 @@ regulator@40 { }; temperature-sensor@4c { - compatible = "ti,tmp423"; + compatible = "ti,tmp432"; reg = <0x4c>; }; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts index 0dea014e4f30f3..78a5656ef75d9c 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-fp5280g2.dts @@ -814,10 +814,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &wdt1 { aspeed,reset-type = "none"; aspeed,external-signal; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts index 5a98a19f445e03..7a78c34cff407e 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-inspur-on5263m5.dts @@ -123,10 +123,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &pwm_tacho { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts index d5b7d28cda887c..da55e7b29fac5a 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-intel-s2600wf.dts @@ -118,10 +118,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &pwm_tacho { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts index c0847636f20b90..370738572a55b2 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-lanyang.dts @@ -263,10 +263,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &gpio { pin_gpio_b0 { gpio-hog; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts index ac0d666ca10ef4..b1d0ff85d397ad 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-nicole.dts @@ -284,10 +284,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &ibt { status = "okay"; }; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts index 893e621ecab117..24df24ad9c80b1 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-romulus.dts @@ -289,10 +289,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &pwm_tacho { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts index bbf864f84d37f2..a0e8c97e944ac9 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-swift.dts @@ -938,10 +938,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &wdt1 { aspeed,reset-type = "none"; aspeed,external-signal; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts index 213023bc5aec41..b31eb8e58c6b47 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts @@ -870,11 +870,6 @@ &pinctrl { <&pinctrl_lsirq_default>; }; -&xdma { - status = "okay"; - memory-region = <&vga_memory>; -}; - &kcs2 { status = "okay"; aspeed,lpc-io-reg = <0xca8 0xcac>; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts index a20a532fc2805f..8b1e82c8cdfed4 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-witherspoon.dts @@ -661,10 +661,6 @@ &gfx { memory-region = <&gfx_memory>; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &wdt1 { aspeed,reset-type = "none"; aspeed,external-signal; @@ -696,9 +692,4 @@ &video { memory-region = <&video_engine_memory>; }; -&xdma { - status = "okay"; - memory-region = <&vga_memory>; -}; - #include "ibm-power9-dual.dtsi" diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts index 3d2d8db73ca660..9904f0a58cfac6 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-zaius.dts @@ -466,8 +466,6 @@ &i2c13 { }; &pinctrl { - aspeed,external-nodes = <&gfx &lhc>; - pinctrl_gpioh_unbiased: gpioi_unbiased { pins = "A8", "C7", "B7", "A7", "D7", "B6", "A6", "E7"; bias-disable; diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts index 50f3c6a5c0c8df..b961dff388d107 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts +++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-supermicro-x11spi.dts @@ -123,10 +123,6 @@ &gfx { status = "okay"; }; -&pinctrl { - aspeed,external-nodes = <&gfx &lhc>; -}; - &pwm_tacho { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi index c669ec20208592..78c96781249289 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi @@ -122,8 +122,8 @@ vic: interrupt-controller@1e6c0080 { reg = <0x1e6c0080 0x80>; }; - cvic: copro-interrupt-controller@1e6c2000 { - compatible = "aspeed,ast2400-cvic", "aspeed-cvic"; + cvic: interrupt-controller@1e6c2000 { + compatible = "aspeed,ast2400-cvic", "aspeed,cvic"; valid-sources = <0x7fffffff>; reg = <0x1e6c2000 0x80>; }; @@ -230,6 +230,9 @@ adc: adc@1e6e9000 { sram: sram@1e720000 { compatible = "mmio-sram"; reg = <0x1e720000 0x8000>; // 32K + ranges; + #address-cells = <1>; + #size-cells = <1>; }; video: video@1e700000 { diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi index 6e05cbcce49cac..57a699a7c1499e 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi @@ -139,8 +139,8 @@ vic: interrupt-controller@1e6c0080 { reg = <0x1e6c0080 0x80>; }; - cvic: copro-interrupt-controller@1e6c2000 { - compatible = "aspeed,ast2500-cvic", "aspeed-cvic"; + cvic: interrupt-controller@1e6c2000 { + compatible = "aspeed,ast2500-cvic", "aspeed,cvic"; valid-sources = <0xffffffff>; copro-sw-interrupts = <1>; reg = <0x1e6c2000 0x80>; @@ -281,17 +281,6 @@ gfx: display@1e6e6000 { interrupts = <0x19>; }; - xdma: xdma@1e6e7000 { - compatible = "aspeed,ast2500-xdma"; - reg = <0x1e6e7000 0x100>; - clocks = <&syscon ASPEED_CLK_GATE_BCLK>; - resets = <&syscon ASPEED_RESET_XDMA>; - interrupts-extended = <&vic 6>, <&scu_ic ASPEED_AST2500_SCU_IC_PCIE_RESET_LO_TO_HI>; - aspeed,pcie-device = "bmc"; - aspeed,scu = <&syscon>; - status = "disabled"; - }; - adc: adc@1e6e9000 { compatible = "aspeed,ast2500-adc"; reg = <0x1e6e9000 0xb0>; @@ -314,6 +303,9 @@ video: video@1e700000 { sram: sram@1e720000 { compatible = "mmio-sram"; reg = <0x1e720000 0x9000>; // 36K + ranges; + #address-cells = <1>; + #size-cells = <1>; }; sdmmc: sd-controller@1e740000 { diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi index 0c00882f111aa3..8ed715bd53aab2 100644 --- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi +++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi @@ -231,41 +231,33 @@ mdio3: mdio@1e650018 { resets = <&syscon ASPEED_RESET_MII>; }; - mac0: ftgmac@1e660000 { + mac0: ethernet@1e660000 { compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; reg = <0x1e660000 0x180>; - #address-cells = <1>; - #size-cells = <0>; interrupts = ; clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>; status = "disabled"; }; - mac1: ftgmac@1e680000 { + mac1: ethernet@1e680000 { compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; reg = <0x1e680000 0x180>; - #address-cells = <1>; - #size-cells = <0>; interrupts = ; clocks = <&syscon ASPEED_CLK_GATE_MAC2CLK>; status = "disabled"; }; - mac2: ftgmac@1e670000 { + mac2: ethernet@1e670000 { compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; reg = <0x1e670000 0x180>; - #address-cells = <1>; - #size-cells = <0>; interrupts = ; clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>; status = "disabled"; }; - mac3: ftgmac@1e690000 { + mac3: ethernet@1e690000 { compatible = "aspeed,ast2600-mac", "faraday,ftgmac100"; reg = <0x1e690000 0x180>; - #address-cells = <1>; - #size-cells = <0>; interrupts = ; clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>; status = "disabled"; @@ -398,19 +390,6 @@ gfx: display@1e6e6000 { interrupts = ; }; - xdma: xdma@1e6e7000 { - compatible = "aspeed,ast2600-xdma"; - reg = <0x1e6e7000 0x100>; - clocks = <&syscon ASPEED_CLK_GATE_BCLK>; - resets = <&syscon ASPEED_RESET_DEV_XDMA>, <&syscon ASPEED_RESET_RC_XDMA>; - reset-names = "device", "root-complex"; - interrupts-extended = <&gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, - <&scu_ic0 ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI>; - aspeed,pcie-device = "bmc"; - aspeed,scu = <&syscon>; - status = "disabled"; - }; - adc0: adc@1e6e9000 { compatible = "aspeed,ast2600-adc0"; reg = <0x1e6e9000 0x100>; diff --git a/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi b/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi new file mode 100644 index 00000000000000..68c941a194b643 --- /dev/null +++ b/arch/arm/boot/dts/aspeed/ibm-power11-quad.dtsi @@ -0,0 +1,1539 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2024 IBM Corp. + +/ { + aliases { + i2c100 = &cfam0_i2c0; + i2c101 = &cfam0_i2c1; + i2c110 = &cfam0_i2c10; + i2c111 = &cfam0_i2c11; + i2c112 = &cfam0_i2c12; + i2c113 = &cfam0_i2c13; + i2c114 = &cfam0_i2c14; + i2c115 = &cfam0_i2c15; + i2c202 = &cfam1_i2c2; + i2c203 = &cfam1_i2c3; + i2c210 = &cfam1_i2c10; + i2c211 = &cfam1_i2c11; + i2c214 = &cfam1_i2c14; + i2c215 = &cfam1_i2c15; + i2c216 = &cfam1_i2c16; + i2c217 = &cfam1_i2c17; + i2c300 = &cfam2_i2c0; + i2c301 = &cfam2_i2c1; + i2c310 = &cfam2_i2c10; + i2c311 = &cfam2_i2c11; + i2c312 = &cfam2_i2c12; + i2c313 = &cfam2_i2c13; + i2c314 = &cfam2_i2c14; + i2c315 = &cfam2_i2c15; + i2c402 = &cfam3_i2c2; + i2c403 = &cfam3_i2c3; + i2c410 = &cfam3_i2c10; + i2c411 = &cfam3_i2c11; + i2c414 = &cfam3_i2c14; + i2c415 = &cfam3_i2c15; + i2c416 = &cfam3_i2c16; + i2c417 = &cfam3_i2c17; + + sbefifo100 = &sbefifo100; + sbefifo101 = &sbefifo101; + sbefifo110 = &sbefifo110; + sbefifo111 = &sbefifo111; + sbefifo112 = &sbefifo112; + sbefifo113 = &sbefifo113; + sbefifo114 = &sbefifo114; + sbefifo115 = &sbefifo115; + sbefifo202 = &sbefifo202; + sbefifo203 = &sbefifo203; + sbefifo210 = &sbefifo210; + sbefifo211 = &sbefifo211; + sbefifo214 = &sbefifo214; + sbefifo215 = &sbefifo215; + sbefifo216 = &sbefifo216; + sbefifo217 = &sbefifo217; + sbefifo300 = &sbefifo300; + sbefifo301 = &sbefifo301; + sbefifo310 = &sbefifo310; + sbefifo311 = &sbefifo311; + sbefifo312 = &sbefifo312; + sbefifo313 = &sbefifo313; + sbefifo314 = &sbefifo314; + sbefifo315 = &sbefifo315; + sbefifo402 = &sbefifo402; + sbefifo403 = &sbefifo403; + sbefifo410 = &sbefifo410; + sbefifo411 = &sbefifo411; + sbefifo414 = &sbefifo414; + sbefifo415 = &sbefifo415; + sbefifo416 = &sbefifo416; + sbefifo417 = &sbefifo417; + + scom100 = &scom100; + scom101 = &scom101; + scom110 = &scom110; + scom111 = &scom111; + scom112 = &scom112; + scom113 = &scom113; + scom114 = &scom114; + scom115 = &scom115; + scom202 = &scom202; + scom203 = &scom203; + scom210 = &scom210; + scom211 = &scom211; + scom214 = &scom214; + scom215 = &scom215; + scom216 = &scom216; + scom217 = &scom217; + scom300 = &scom300; + scom301 = &scom301; + scom310 = &scom310; + scom311 = &scom311; + scom312 = &scom312; + scom313 = &scom313; + scom314 = &scom314; + scom315 = &scom315; + scom402 = &scom402; + scom403 = &scom403; + scom410 = &scom410; + scom411 = &scom411; + scom414 = &scom414; + scom415 = &scom415; + scom416 = &scom416; + scom417 = &scom417; + + spi10 = &cfam0_spi0; + spi11 = &cfam0_spi1; + spi12 = &cfam0_spi2; + spi13 = &cfam0_spi3; + spi20 = &cfam1_spi0; + spi21 = &cfam1_spi1; + spi22 = &cfam1_spi2; + spi23 = &cfam1_spi3; + spi30 = &cfam2_spi0; + spi31 = &cfam2_spi1; + spi32 = &cfam2_spi2; + spi33 = &cfam2_spi3; + spi40 = &cfam3_spi0; + spi41 = &cfam3_spi1; + spi42 = &cfam3_spi2; + spi43 = &cfam3_spi3; + }; +}; + +&fsim0 { + #address-cells = <2>; + #size-cells = <0>; + status = "okay"; + bus-frequency = <100000000>; + cfam-reset-gpios = <&gpio0 ASPEED_GPIO(Q, 0) GPIO_ACTIVE_HIGH>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam0_i2c0: i2c-bus@0 { + reg = <0>; /* OMI01 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom100: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo100: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c1: i2c-bus@1 { + reg = <1>; /* OMI23 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom101: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo101: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom110: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo110: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom111: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo111: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom112: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo112: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom113: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo113: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom114: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo114: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam0_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom115: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo115: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam0_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam0_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam0_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam0_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi_hub0: fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + }; + }; +}; + +&fsi_hub0 { + cfam@1,0 { + reg = <1 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <1>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam1_i2c2: i2c-bus@2 { + reg = <2>; /* OMI45 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom202: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo202: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c3: i2c-bus@3 { + reg = <3>; /* OMI67 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom203: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo203: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom210: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo210: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom211: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo211: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom214: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo214: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom215: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo215: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom216: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo216: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam1_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom217: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo217: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam1_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam1_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam1_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam1_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; + + cfam@2,0 { + reg = <2 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <2>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam2_i2c0: i2c-bus@0 { + reg = <0>; /* OM01 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom300: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo300: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c1: i2c-bus@1 { + reg = <1>; /* OM23 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom301: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo301: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom310: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo310: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom311: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo311: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c12: i2c-bus@c { + reg = <12>; /* OP4A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom312: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo312: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c13: i2c-bus@d { + reg = <13>; /* OP4B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom313: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo313: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom314: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo314: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam2_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom315: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo315: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam2_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam2_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam2_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam2_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; + + cfam@3,0 { + reg = <3 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <3>; + + scom@1000 { + compatible = "ibm,p9-scom"; + reg = <0x1000 0x400>; + }; + + i2c@1800 { + compatible = "ibm,i2c-fsi"; + reg = <0x1800 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam3_i2c2: i2c-bus@2 { + reg = <2>; /* OM45 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom402: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo402: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c3: i2c-bus@3 { + reg = <3>; /* OM67 */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom403: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo403: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c10: i2c-bus@a { + reg = <10>; /* OP3A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom410: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo410: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c11: i2c-bus@b { + reg = <11>; /* OP3B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom411: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo411: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c14: i2c-bus@e { + reg = <14>; /* OP5A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom414: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo414: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c15: i2c-bus@f { + reg = <15>; /* OP5B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom415: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo415: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c16: i2c-bus@10 { + reg = <16>; /* OP6A */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom416: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo416: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + + cfam3_i2c17: i2c-bus@11 { + reg = <17>; /* OP6B */ + #address-cells = <1>; + #size-cells = <0>; + + fsi@20 { + compatible = "ibm,i2cr-fsi-master"; + reg = <0x20>; + #address-cells = <2>; + #size-cells = <0>; + + cfam@0,0 { + reg = <0 0>; + #address-cells = <1>; + #size-cells = <1>; + chip-id = <0>; + + scom417: scom@1000 { + compatible = "ibm,i2cr-scom"; + reg = <0x1000 0x400>; + }; + + sbefifo417: sbefifo@2400 { + compatible = "ibm,odyssey-sbefifo"; + reg = <0x2400 0x400>; + }; + }; + }; + }; + }; + + fsi2spi@1c00 { + compatible = "ibm,fsi2spi"; + reg = <0x1c00 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + cfam3_spi0: spi@0 { + compatible = "ibm,spi-fsi"; + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam3_spi1: spi@20 { + compatible = "ibm,spi-fsi"; + reg = <0x20>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam3_spi2: spi@40 { + compatible = "ibm,spi-fsi"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + + cfam3_spi3: spi@60 { + compatible = "ibm,spi-fsi"; + reg = <0x60>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@0 { + compatible = "atmel,at25"; + reg = <0>; + address-width = <24>; + pagesize = <256>; + size = <0x80000>; + spi-max-frequency = <10000000>; + }; + }; + }; + + sbefifo@2400 { + compatible = "ibm,p9-sbefifo"; + reg = <0x2400 0x400>; + + occ { + compatible = "ibm,p10-occ"; + + hwmon { + compatible = "ibm,p10-occ-hwmon"; + ibm,no-poll-on-init; + }; + }; + }; + + fsi@3400 { + compatible = "ibm,p9-fsi-controller"; + reg = <0x3400 0x400>; + #address-cells = <2>; + #size-cells = <0>; + no-scan-on-init; + }; + }; +}; diff --git a/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts b/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts index 8789fae178bf80..4f8ddc1b3ab7b4 100644 --- a/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts +++ b/arch/arm/boot/dts/broadcom/bcm21664-garnet.dts @@ -11,6 +11,10 @@ / { model = "BCM21664 Garnet board"; compatible = "brcm,bcm21664-garnet", "brcm,bcm21664"; + chosen { + bootargs = "console=ttyS0,115200n8"; + }; + memory@80000000 { device_type = "memory"; reg = <0x80000000 0x40000000>; /* 1 GB */ diff --git a/arch/arm/boot/dts/broadcom/bcm21664.dtsi b/arch/arm/boot/dts/broadcom/bcm21664.dtsi index fa73600e883e70..f0d0300079b60d 100644 --- a/arch/arm/boot/dts/broadcom/bcm21664.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm21664.dtsi @@ -1,21 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2014 Broadcom Corporation -#include -#include -#include +#include "bcm2166x-common.dtsi" / { - #address-cells = <1>; - #size-cells = <1>; - model = "BCM21664 SoC"; - compatible = "brcm,bcm21664"; interrupt-parent = <&gic>; - chosen { - bootargs = "console=ttyS0,115200n8"; - }; - cpus { #address-cells = <1>; #size-cells = <0>; @@ -34,312 +24,46 @@ cpu1: cpu@1 { reg = <1>; }; }; +}; - gic: interrupt-controller@3ff00100 { - compatible = "arm,cortex-a9-gic"; - #interrupt-cells = <3>; - #address-cells = <0>; - interrupt-controller; - reg = <0x3ff01000 0x1000>, - <0x3ff00100 0x100>; - }; - - smc@3404e000 { - compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; - reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */ - }; - - uartb: serial@3e000000 { - compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart"; - reg = <0x3e000000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uartb2: serial@3e001000 { - compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart"; - reg = <0x3e001000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uartb3: serial@3e002000 { - compatible = "brcm,bcm21664-dw-apb-uart", "snps,dw-apb-uart"; - reg = <0x3e002000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - L2: cache-controller@3ff20000 { - compatible = "arm,pl310-cache"; - reg = <0x3ff20000 0x1000>; - cache-unified; - cache-level = <2>; - }; - - brcm,resetmgr@35001f00 { - compatible = "brcm,bcm21664-resetmgr"; - reg = <0x35001f00 0x24>; - }; - - timer@35006000 { - compatible = "brcm,kona-timer"; - reg = <0x35006000 0x1c>; - interrupts = ; - clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>; - }; - - gpio: gpio@35003000 { - compatible = "brcm,bcm21664-gpio", "brcm,kona-gpio"; - reg = <0x35003000 0x524>; - interrupts = , - , - , - ; - #gpio-cells = <2>; - #interrupt-cells = <2>; - gpio-controller; - interrupt-controller; - }; - - sdio1: mmc@3f180000 { - compatible = "brcm,kona-sdhci"; - reg = <0x3f180000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>; - status = "disabled"; - }; - - sdio2: mmc@3f190000 { - compatible = "brcm,kona-sdhci"; - reg = <0x3f190000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>; - status = "disabled"; - }; - - sdio3: mmc@3f1a0000 { - compatible = "brcm,kona-sdhci"; - reg = <0x3f1a0000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>; - status = "disabled"; - }; - - sdio4: mmc@3f1b0000 { - compatible = "brcm,kona-sdhci"; - reg = <0x3f1b0000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>; - status = "disabled"; - }; - - bsc1: i2c@3e016000 { - compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; - reg = <0x3e016000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>; - status = "disabled"; - }; - - bsc2: i2c@3e017000 { - compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; - reg = <0x3e017000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>; - status = "disabled"; - }; - - bsc3: i2c@3e018000 { - compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; - reg = <0x3e018000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>; - status = "disabled"; - }; - - bsc4: i2c@3e01c000 { - compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; - reg = <0x3e01c000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>; - status = "disabled"; - }; - - clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - /* - * Fixed clocks are defined before CCUs whose - * clocks may depend on them. - */ - - ref_32k_clk: ref_32k { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <32768>; - }; - - bbl_32k_clk: bbl_32k { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <32768>; - }; - - ref_13m_clk: ref_13m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <13000000>; - }; - - var_13m_clk: var_13m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <13000000>; - }; - - dft_19_5m_clk: dft_19_5m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <19500000>; - }; - - ref_crystal_clk: ref_crystal { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <26000000>; - }; - - ref_52m_clk: ref_52m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - var_52m_clk: var_52m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - usb_otg_ahb_clk: usb_otg_ahb { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - ref_96m_clk: ref_96m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <96000000>; - }; - - var_96m_clk: var_96m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <96000000>; - }; - - ref_104m_clk: ref_104m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <104000000>; - }; - - var_104m_clk: var_104m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <104000000>; - }; - - ref_156m_clk: ref_156m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <156000000>; +&apps { + gic: interrupt-controller@1c01000 { + compatible = "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x01c01000 0x1000>, + <0x01c00100 0x100>; }; - var_156m_clk: var_156m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <156000000>; + L2: cache-controller@1c20000 { + compatible = "arm,pl310-cache"; + reg = <0x01c20000 0x1000>; + cache-unified; + cache-level = <2>; }; +}; - root_ccu: root_ccu@35001000 { - compatible = "brcm,bcm21664-root-ccu"; - reg = <0x35001000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "frac_1m"; - }; +&bsc1 { + compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; +}; - aon_ccu: aon_ccu@35002000 { - compatible = "brcm,bcm21664-aon-ccu"; - reg = <0x35002000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "hub_timer"; - }; +&bsc2 { + compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; +}; - master_ccu: master_ccu@3f001000 { - compatible = "brcm,bcm21664-master-ccu"; - reg = <0x3f001000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "sdio1", - "sdio2", - "sdio3", - "sdio4", - "sdio1_sleep", - "sdio2_sleep", - "sdio3_sleep", - "sdio4_sleep"; - }; +&bsc3 { + compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; +}; - slave_ccu: slave_ccu@3e011000 { - compatible = "brcm,bcm21664-slave-ccu"; - reg = <0x3e011000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "uartb", - "uartb2", - "uartb3", - "bsc1", - "bsc2", - "bsc3", - "bsc4"; - }; - }; +&bsc4 { + compatible = "brcm,bcm21664-i2c", "brcm,kona-i2c"; +}; - usbotg: usb@3f120000 { - compatible = "snps,dwc2"; - reg = <0x3f120000 0x10000>; - interrupts = ; - clocks = <&usb_otg_ahb_clk>; - clock-names = "otg"; - phys = <&usbphy>; - phy-names = "usb2-phy"; - status = "disabled"; - }; +&gpio { + compatible = "brcm,bcm21664-gpio", "brcm,kona-gpio"; +}; - usbphy: usb-phy@3f130000 { - compatible = "brcm,kona-usb2-phy"; - reg = <0x3f130000 0x28>; - #phy-cells = <0>; - status = "disabled"; - }; +&smc { + compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; }; diff --git a/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi new file mode 100644 index 00000000000000..87180b7fd695e6 --- /dev/null +++ b/arch/arm/boot/dts/broadcom/bcm2166x-common.dtsi @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Common device tree for components shared between the BCM21664 and BCM23550 + * SoCs. + * + * Copyright (C) 2016 Broadcom + */ + +/dts-v1/; + +#include +#include +#include + +/ { + #address-cells = <1>; + #size-cells = <1>; + + /* Hub bus */ + hub: hub-bus@34000000 { + compatible = "simple-bus"; + ranges = <0 0x34000000 0x102f83ac>; + #address-cells = <1>; + #size-cells = <1>; + + smc: smc@4e000 { + /* Compatible filled by SoC DTSI */ + reg = <0x0004e000 0x400>; /* 1 KiB in SRAM */ + }; + + resetmgr: reset-controller@1001f00 { + compatible = "brcm,bcm21664-resetmgr"; + reg = <0x01001f00 0x24>; + }; + + gpio: gpio@1003000 { + /* Compatible filled by SoC DTSI */ + reg = <0x01003000 0x524>; + interrupts = , + , + , + ; + #gpio-cells = <2>; + #interrupt-cells = <2>; + gpio-controller; + interrupt-controller; + }; + + timer@1006000 { + compatible = "brcm,kona-timer"; + reg = <0x01006000 0x1c>; + interrupts = ; + clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>; + }; + }; + + /* Slaves bus */ + slaves: slaves-bus@3e000000 { + compatible = "simple-bus"; + ranges = <0 0x3e000000 0x0001c070>; + #address-cells = <1>; + #size-cells = <1>; + + uartb: serial@0 { + compatible = "snps,dw-apb-uart"; + reg = <0x00000000 0x118>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uartb2: serial@1000 { + compatible = "snps,dw-apb-uart"; + reg = <0x00001000 0x118>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uartb3: serial@2000 { + compatible = "snps,dw-apb-uart"; + reg = <0x00002000 0x118>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + bsc1: i2c@16000 { + /* Compatible filled by SoC DTSI */ + reg = <0x00016000 0x70>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>; + status = "disabled"; + }; + + bsc2: i2c@17000 { + /* Compatible filled by SoC DTSI */ + reg = <0x00017000 0x70>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>; + status = "disabled"; + }; + + bsc3: i2c@18000 { + /* Compatible filled by SoC DTSI */ + reg = <0x00018000 0x70>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>; + status = "disabled"; + }; + + bsc4: i2c@1c000 { + /* Compatible filled by SoC DTSI */ + reg = <0x0001c000 0x70>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>; + status = "disabled"; + }; + }; + + /* Apps bus */ + apps: apps-bus@3e300000 { + compatible = "simple-bus"; + ranges = <0 0x3e300000 0x01c02000>; + #address-cells = <1>; + #size-cells = <1>; + + usbotg: usb@e20000 { + compatible = "snps,dwc2"; + reg = <0x00e20000 0x10000>; + interrupts = ; + clocks = <&usb_otg_ahb_clk>; + clock-names = "otg"; + phys = <&usbphy>; + phy-names = "usb2-phy"; + status = "disabled"; + }; + + usbphy: usb-phy@e30000 { + compatible = "brcm,kona-usb2-phy"; + reg = <0x00e30000 0x28>; + #phy-cells = <0>; + status = "disabled"; + }; + + sdio1: mmc@e80000 { + compatible = "brcm,kona-sdhci"; + reg = <0x00e80000 0x801c>; + interrupts = ; + clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>; + status = "disabled"; + }; + + sdio2: mmc@e90000 { + compatible = "brcm,kona-sdhci"; + reg = <0x00e90000 0x801c>; + interrupts = ; + clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>; + status = "disabled"; + }; + + sdio3: mmc@ea0000 { + compatible = "brcm,kona-sdhci"; + reg = <0x00ea0000 0x801c>; + interrupts = ; + clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>; + status = "disabled"; + }; + + sdio4: mmc@eb0000 { + compatible = "brcm,kona-sdhci"; + reg = <0x00eb0000 0x801c>; + interrupts = ; + clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>; + status = "disabled"; + }; + }; + + clocks { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + /* + * Fixed clocks are defined before CCUs whose + * clocks may depend on them. + */ + + ref_32k_clk: ref_32k { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <32768>; + }; + + bbl_32k_clk: bbl_32k { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <32768>; + }; + + ref_13m_clk: ref_13m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <13000000>; + }; + + var_13m_clk: var_13m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <13000000>; + }; + + dft_19_5m_clk: dft_19_5m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <19500000>; + }; + + ref_crystal_clk: ref_crystal { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <26000000>; + }; + + ref_52m_clk: ref_52m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <52000000>; + }; + + var_52m_clk: var_52m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <52000000>; + }; + + usb_otg_ahb_clk: usb_otg_ahb { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <52000000>; + }; + + ref_96m_clk: ref_96m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <96000000>; + }; + + var_96m_clk: var_96m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <96000000>; + }; + + ref_104m_clk: ref_104m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <104000000>; + }; + + var_104m_clk: var_104m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <104000000>; + }; + + ref_156m_clk: ref_156m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <156000000>; + }; + + var_156m_clk: var_156m { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <156000000>; + }; + + root_ccu: root_ccu@35001000 { + compatible = "brcm,bcm21664-root-ccu"; + reg = <0x35001000 0x0f00>; + #clock-cells = <1>; + clock-output-names = "frac_1m"; + }; + + aon_ccu: aon_ccu@35002000 { + compatible = "brcm,bcm21664-aon-ccu"; + reg = <0x35002000 0x0f00>; + #clock-cells = <1>; + clock-output-names = "hub_timer"; + }; + + slave_ccu: slave_ccu@3e011000 { + compatible = "brcm,bcm21664-slave-ccu"; + reg = <0x3e011000 0x0f00>; + #clock-cells = <1>; + clock-output-names = "uartb", + "uartb2", + "uartb3", + "bsc1", + "bsc2", + "bsc3", + "bsc4"; + }; + + master_ccu: master_ccu@3f001000 { + compatible = "brcm,bcm21664-master-ccu"; + reg = <0x3f001000 0x0f00>; + #clock-cells = <1>; + clock-output-names = "sdio1", + "sdio2", + "sdio3", + "sdio4", + "sdio1_sleep", + "sdio2_sleep", + "sdio3_sleep", + "sdio4_sleep"; + }; + }; +}; diff --git a/arch/arm/boot/dts/broadcom/bcm23550.dtsi b/arch/arm/boot/dts/broadcom/bcm23550.dtsi index 50ebe93d6bd067..c1c69381286ba2 100644 --- a/arch/arm/boot/dts/broadcom/bcm23550.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm23550.dtsi @@ -1,45 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause /* - * BSD LICENSE + * Device tree for the BCM23550 SoC. * - * Copyright(c) 2016 Broadcom. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (C) 2016 Broadcom */ -/* BCM23550 and BCM21664 have almost identical clocks */ -#include -#include -#include +#include "bcm2166x-common.dtsi" / { - #address-cells = <1>; - #size-cells = <1>; - model = "BCM23550 SoC"; - compatible = "brcm,bcm23550"; interrupt-parent = <&gic>; cpus { @@ -80,180 +48,9 @@ cpu3: cpu@3 { clock-frequency = <1000000000>; }; }; +}; - /* Hub bus */ - hub@34000000 { - compatible = "simple-bus"; - ranges = <0 0x34000000 0x102f83ac>; - #address-cells = <1>; - #size-cells = <1>; - - smc@4e000 { - compatible = "brcm,bcm23550-smc", "brcm,kona-smc"; - reg = <0x0004e000 0x400>; /* 1 KiB in SRAM */ - }; - - resetmgr: reset-controller@1001f00 { - compatible = "brcm,bcm21664-resetmgr"; - reg = <0x01001f00 0x24>; - }; - - gpio: gpio@1003000 { - compatible = "brcm,bcm23550-gpio", "brcm,kona-gpio"; - reg = <0x01003000 0x524>; - interrupts = , - , - , - ; - #gpio-cells = <2>; - #interrupt-cells = <2>; - gpio-controller; - interrupt-controller; - }; - - timer@1006000 { - compatible = "brcm,kona-timer"; - reg = <0x01006000 0x1c>; - interrupts = ; - clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>; - }; - }; - - /* Slaves bus */ - slaves@3e000000 { - compatible = "simple-bus"; - ranges = <0 0x3e000000 0x0001c070>; - #address-cells = <1>; - #size-cells = <1>; - - uartb: serial@0 { - compatible = "snps,dw-apb-uart"; - reg = <0x00000000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uartb2: serial@1000 { - compatible = "snps,dw-apb-uart"; - reg = <0x00001000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uartb3: serial@2000 { - compatible = "snps,dw-apb-uart"; - reg = <0x00002000 0x118>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - bsc1: i2c@16000 { - compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; - reg = <0x00016000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>; - status = "disabled"; - }; - - bsc2: i2c@17000 { - compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; - reg = <0x00017000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>; - status = "disabled"; - }; - - bsc3: i2c@18000 { - compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; - reg = <0x00018000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>; - status = "disabled"; - }; - - bsc4: i2c@1c000 { - compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; - reg = <0x0001c000 0x70>; - interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>; - status = "disabled"; - }; - }; - - /* Apps bus */ - apps@3e300000 { - compatible = "simple-bus"; - ranges = <0 0x3e300000 0x01b77000>; - #address-cells = <1>; - #size-cells = <1>; - - usbotg: usb@e20000 { - compatible = "snps,dwc2"; - reg = <0x00e20000 0x10000>; - interrupts = ; - clocks = <&usb_otg_ahb_clk>; - clock-names = "otg"; - phys = <&usbphy>; - phy-names = "usb2-phy"; - status = "disabled"; - }; - - usbphy: usb-phy@e30000 { - compatible = "brcm,kona-usb2-phy"; - reg = <0x00e30000 0x28>; - #phy-cells = <0>; - status = "disabled"; - }; - - sdio1: mmc@e80000 { - compatible = "brcm,kona-sdhci"; - reg = <0x00e80000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>; - status = "disabled"; - }; - - sdio2: mmc@e90000 { - compatible = "brcm,kona-sdhci"; - reg = <0x00e90000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>; - status = "disabled"; - }; - - sdio3: mmc@ea0000 { - compatible = "brcm,kona-sdhci"; - reg = <0x00ea0000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>; - status = "disabled"; - }; - - sdio4: mmc@eb0000 { - compatible = "brcm,kona-sdhci"; - reg = <0x00eb0000 0x801c>; - interrupts = ; - clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>; - status = "disabled"; - }; - +&apps { cdc: cdc@1b0e000 { compatible = "brcm,bcm23550-cdc"; reg = <0x01b0e000 0x78>; @@ -267,147 +64,28 @@ gic: interrupt-controller@1b21000 { reg = <0x01b21000 0x1000>, <0x01b22000 0x1000>; }; - }; - - clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - /* - * Fixed clocks are defined before CCUs whose - * clocks may depend on them. - */ - - ref_32k_clk: ref_32k { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <32768>; - }; - - bbl_32k_clk: bbl_32k { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <32768>; - }; - - ref_13m_clk: ref_13m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <13000000>; - }; - - var_13m_clk: var_13m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <13000000>; - }; - - dft_19_5m_clk: dft_19_5m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <19500000>; - }; - - ref_crystal_clk: ref_crystal { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <26000000>; - }; - - ref_52m_clk: ref_52m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - var_52m_clk: var_52m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - usb_otg_ahb_clk: usb_otg_ahb { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <52000000>; - }; - - ref_96m_clk: ref_96m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <96000000>; - }; - - var_96m_clk: var_96m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <96000000>; - }; - - ref_104m_clk: ref_104m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <104000000>; - }; - - var_104m_clk: var_104m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <104000000>; - }; +}; - ref_156m_clk: ref_156m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <156000000>; - }; +&bsc1 { + compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; +}; - var_156m_clk: var_156m { - #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <156000000>; - }; +&bsc2 { + compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; +}; - root_ccu: root_ccu@35001000 { - compatible = "brcm,bcm21664-root-ccu"; - reg = <0x35001000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "frac_1m"; - }; +&bsc3 { + compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; +}; - aon_ccu: aon_ccu@35002000 { - compatible = "brcm,bcm21664-aon-ccu"; - reg = <0x35002000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "hub_timer"; - }; +&bsc4 { + compatible = "brcm,bcm23550-i2c", "brcm,kona-i2c"; +}; - slave_ccu: slave_ccu@3e011000 { - compatible = "brcm,bcm21664-slave-ccu"; - reg = <0x3e011000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "uartb", - "uartb2", - "uartb3", - "bsc1", - "bsc2", - "bsc3", - "bsc4"; - }; +&gpio { + compatible = "brcm,bcm23550-gpio", "brcm,kona-gpio"; +}; - master_ccu: master_ccu@3f001000 { - compatible = "brcm,bcm21664-master-ccu"; - reg = <0x3f001000 0x0f00>; - #clock-cells = <1>; - clock-output-names = "sdio1", - "sdio2", - "sdio3", - "sdio4", - "sdio1_sleep", - "sdio2_sleep", - "sdio3_sleep", - "sdio4_sleep"; - }; - }; +&smc { + compatible = "brcm,bcm23550-smc", "brcm,kona-smc"; }; diff --git a/arch/arm/boot/dts/broadcom/bcm2837.dtsi b/arch/arm/boot/dts/broadcom/bcm2837.dtsi index 84c08b46519da0..c281697142b1a7 100644 --- a/arch/arm/boot/dts/broadcom/bcm2837.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2837.dtsi @@ -9,7 +9,7 @@ soc { <0x40000000 0x40000000 0x00001000>; dma-ranges = <0xc0000000 0x00000000 0x3f000000>; - local_intc: local_intc@40000000 { + local_intc: interrupt-controller@40000000 { compatible = "brcm,bcm2836-l1-intc"; reg = <0x40000000 0x100>; interrupt-controller; diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts index 25eeacf6a2484c..45bd27906f2959 100644 --- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts +++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts @@ -215,11 +215,15 @@ eeprom: eeprom@50 { reg = <0x50>; pagesize = <32>; read-only; - #address-cells = <1>; - #size-cells = <1>; - mac_address: mac-address@66 { - reg = <0x66 0x6>; + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + mac_address: mac-address@66 { + reg = <0x66 0x6>; + }; }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi b/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi index b0854d881ac680..71a8b77b46f452 100644 --- a/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm958625-meraki-mx6x-common.dtsi @@ -55,11 +55,15 @@ eeprom@50 { reg = <0x50>; pagesize = <32>; read-only; - #address-cells = <1>; - #size-cells = <1>; - mac_address: mac-address@66 { - reg = <0x66 0x6>; + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + mac_address: mac-address@66 { + reg = <0x66 0x6>; + }; }; }; }; diff --git a/arch/arm/boot/dts/cirrus/Makefile b/arch/arm/boot/dts/cirrus/Makefile index e944d3e2129db4..e6015983e46402 100644 --- a/arch/arm/boot/dts/cirrus/Makefile +++ b/arch/arm/boot/dts/cirrus/Makefile @@ -3,3 +3,7 @@ dtb-$(CONFIG_ARCH_CLPS711X) += \ ep7211-edb7211.dtb dtb-$(CONFIG_ARCH_CLPS711X) += \ ep7211-edb7211.dtb +dtb-$(CONFIG_ARCH_EP93XX) += \ + ep93xx-edb9302.dtb \ + ep93xx-bk3.dtb \ + ep93xx-ts7250.dtb diff --git a/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts new file mode 100644 index 00000000000000..40bc9b2a6ba8be --- /dev/null +++ b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree file for Liebherr controller BK3.1 based on Cirrus EP9302 SoC + */ +/dts-v1/; +#include "ep93xx.dtsi" + +/ { + model = "Liebherr controller BK3.1"; + compatible = "liebherr,bk3", "cirrus,ep9301"; + #address-cells = <1>; + #size-cells = <1>; + + chosen { + }; + + memory@0 { + device_type = "memory"; + /* should be set from ATAGS */ + reg = <0x00000000 0x02000000>, + <0x000530c0 0x01fdd000>; + }; + + leds { + compatible = "gpio-leds"; + led-0 { + label = "grled"; + gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + function = LED_FUNCTION_HEARTBEAT; + }; + + led-1 { + label = "rdled"; + gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_FAULT; + }; + }; +}; + +&ebi { + nand-controller@60000000 { + compatible = "technologic,ts7200-nand"; + reg = <0x60000000 0x8000000>; + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "System"; + reg = <0x00000000 0x01e00000>; + read-only; + }; + + partition@1e00000 { + label = "Data"; + reg = <0x01e00000 0x05f20000>; + }; + + partition@7d20000 { + label = "RedBoot"; + reg = <0x07d20000 0x002e0000>; + read-only; + }; + }; + }; + }; +}; + +ð0 { + phy-handle = <&phy0>; +}; + +&i2s { + dmas = <&dma0 0 1>, <&dma0 0 2>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&i2s_on_ac97_pins>; + status = "okay"; +}; + +&gpio1 { + /* PWM */ + gpio-ranges = <&syscon 6 163 1>; +}; + +&gpio4 { + gpio-ranges = <&syscon 0 97 2>; + status = "okay"; +}; + +&gpio6 { + gpio-ranges = <&syscon 0 87 2>; + status = "okay"; +}; + +&gpio7 { + gpio-ranges = <&syscon 2 199 4>; + status = "okay"; +}; + +&mdio0 { + phy0: ethernet-phy@1 { + reg = <1>; + device_type = "ethernet-phy"; + }; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts new file mode 100644 index 00000000000000..312b2be1c63855 --- /dev/null +++ b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +/* + * Device Tree file for Cirrus Logic EDB9302 board based on EP9302 SoC + */ +/dts-v1/; +#include "ep93xx.dtsi" + +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cirrus,edb9302", "cirrus,ep9301"; + model = "cirrus,edb9302"; + + chosen { + }; + + memory@0 { + device_type = "memory"; + /* should be set from ATAGS */ + reg = <0x0000000 0x800000>, + <0x1000000 0x800000>, + <0x4000000 0x800000>, + <0x5000000 0x800000>; + }; + + sound { + compatible = "audio-graph-card2"; + label = "EDB93XX"; + links = <&i2s_port>; + }; + + leds { + compatible = "gpio-leds"; + led-0 { + label = "grled"; + gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + function = LED_FUNCTION_HEARTBEAT; + }; + + led-1 { + label = "rdled"; + gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_FAULT; + }; + }; +}; + +&adc { + status = "okay"; +}; + +&ebi { + flash@60000000 { + compatible = "cfi-flash"; + reg = <0x60000000 0x1000000>; + bank-width = <2>; + }; +}; + +ð0 { + phy-handle = <&phy0>; +}; + +&gpio0 { + gpio-ranges = <&syscon 0 153 1>, + <&syscon 1 152 1>, + <&syscon 2 151 1>, + <&syscon 3 148 1>, + <&syscon 4 147 1>, + <&syscon 5 146 1>, + <&syscon 6 145 1>, + <&syscon 7 144 1>; +}; + +&gpio1 { + gpio-ranges = <&syscon 0 143 1>, + <&syscon 1 142 1>, + <&syscon 2 141 1>, + <&syscon 3 140 1>, + <&syscon 4 165 1>, + <&syscon 5 164 1>, + <&syscon 6 163 1>, + <&syscon 7 160 1>; +}; + +&gpio2 { + gpio-ranges = <&syscon 0 115 1>; +}; + +/* edb9302 doesn't have GPIO Port D present */ +&gpio3 { + status = "disabled"; +}; + +&gpio4 { + gpio-ranges = <&syscon 0 97 2>; +}; + +&gpio5 { + gpio-ranges = <&syscon 1 170 1>, + <&syscon 2 169 1>, + <&syscon 3 168 1>; +}; + +&gpio6 { + gpio-ranges = <&syscon 0 87 2>; +}; + +&gpio7 { + gpio-ranges = <&syscon 2 199 4>; +}; + +&i2s { + pinctrl-names = "default"; + pinctrl-0 = <&i2s_on_ac97_pins>; + status = "okay"; + i2s_port: port { + i2s_ep: endpoint { + system-clock-direction-out; + frame-master; + bitclock-master; + mclk-fs = <256>; + dai-format = "i2s"; + convert-channels = <2>; + convert-sample-format = "s32_le"; + remote-endpoint = <&codec_ep>; + }; + }; +}; + +&mdio0 { + phy0: ethernet-phy@1 { + reg = <1>; + device_type = "ethernet-phy"; + }; +}; + +&spi0 { + cs-gpios = <&gpio0 6 GPIO_ACTIVE_LOW + &gpio0 7 GPIO_ACTIVE_LOW>; + dmas = <&dma1 10 2>, <&dma1 10 1>; + dma-names = "rx", "tx"; + status = "okay"; + + cs4271: codec@0 { + compatible = "cirrus,cs4271"; + reg = <0>; + #sound-dai-cells = <0>; + spi-max-frequency = <6000000>; + spi-cpol; + spi-cpha; + reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; + port { + codec_ep: endpoint { + remote-endpoint = <&i2s_ep>; + }; + }; + }; + + at25f1024: eeprom@1 { + compatible = "atmel,at25"; + reg = <1>; + address-width = <8>; + size = <0x20000>; + pagesize = <256>; + spi-max-frequency = <20000000>; + }; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts new file mode 100644 index 00000000000000..9e03f93d9fc852 --- /dev/null +++ b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree file for Technologic Systems ts7250 board based on Cirrus EP9302 SoC + */ +/dts-v1/; +#include "ep93xx.dtsi" + +/ { + compatible = "technologic,ts7250", "cirrus,ep9301"; + model = "TS-7250 SBC"; + #address-cells = <1>; + #size-cells = <1>; + + chosen { + }; + + memory@0 { + device_type = "memory"; + /* should be set from ATAGS */ + reg = <0x00000000 0x02000000>, + <0x000530c0 0x01fdd000>; + }; + + leds { + compatible = "gpio-leds"; + led-0 { + label = "grled"; + gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + function = LED_FUNCTION_HEARTBEAT; + }; + + led-1 { + label = "rdled"; + gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_FAULT; + }; + }; +}; + +&ebi { + nand-controller@60000000 { + compatible = "technologic,ts7200-nand"; + reg = <0x60000000 0x8000000>; + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "TS-BOOTROM"; + reg = <0x00000000 0x00020000>; + read-only; + }; + + partition@20000 { + label = "Linux"; + reg = <0x00020000 0x07d00000>; + }; + + partition@7d20000 { + label = "RedBoot"; + reg = <0x07d20000 0x002e0000>; + read-only; + }; + }; + }; + }; + + rtc@10800000 { + compatible = "st,m48t86"; + reg = <0x10800000 0x1>, + <0x11700000 0x1>; + }; + + watchdog@23800000 { + compatible = "technologic,ts7200-wdt"; + reg = <0x23800000 0x01>, + <0x23c00000 0x01>; + timeout-sec = <30>; + }; +}; + +ð0 { + phy-handle = <&phy0>; +}; + +&gpio1 { + /* PWM */ + gpio-ranges = <&syscon 6 163 1>; +}; + +/* ts7250 doesn't have GPIO Port D present */ +&gpio3 { + status = "disabled"; +}; + +&gpio4 { + gpio-ranges = <&syscon 0 97 2>; +}; + +&gpio6 { + gpio-ranges = <&syscon 0 87 2>; +}; + +&gpio7 { + gpio-ranges = <&syscon 2 199 4>; +}; + +&spi0 { + cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>; + dmas = <&dma1 10 2>, <&dma1 10 1>; + dma-names = "rx", "tx"; + status = "okay"; + + tmp122: temperature-sensor@0 { + compatible = "ti,tmp122"; + reg = <0>; + spi-max-frequency = <2000000>; + }; +}; + +&mdio0 { + phy0: ethernet-phy@1 { + reg = <1>; + device_type = "ethernet-phy"; + }; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/cirrus/ep93xx.dtsi b/arch/arm/boot/dts/cirrus/ep93xx.dtsi new file mode 100644 index 00000000000000..0dd1eee346ca78 --- /dev/null +++ b/arch/arm/boot/dts/cirrus/ep93xx.dtsi @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree file for Cirrus Logic systems EP93XX SoC + */ +#include +#include +#include +#include +/ { + soc: soc { + compatible = "simple-bus"; + ranges; + #address-cells = <1>; + #size-cells = <1>; + + syscon: syscon@80930000 { + compatible = "cirrus,ep9301-syscon", "syscon"; + reg = <0x80930000 0x1000>; + + #clock-cells = <1>; + clocks = <&xtali>; + + spi_default_pins: pins-spi { + function = "spi"; + groups = "ssp"; + }; + + ac97_default_pins: pins-ac97 { + function = "ac97"; + groups = "ac97"; + }; + + i2s_on_ssp_pins: pins-i2sonssp { + function = "i2s"; + groups = "i2s_on_ssp"; + }; + + i2s_on_ac97_pins: pins-i2sonac97 { + function = "i2s"; + groups = "i2s_on_ac97"; + }; + + gpio1_default_pins: pins-gpio1 { + function = "gpio"; + groups = "gpio1agrp"; + }; + + pwm1_default_pins: pins-pwm1 { + function = "pwm"; + groups = "pwm1"; + }; + + gpio2_default_pins: pins-gpio2 { + function = "gpio"; + groups = "gpio2agrp"; + }; + + gpio3_default_pins: pins-gpio3 { + function = "gpio"; + groups = "gpio3agrp"; + }; + + keypad_default_pins: pins-keypad { + function = "keypad"; + groups = "keypadgrp"; + }; + + gpio4_default_pins: pins-gpio4 { + function = "gpio"; + groups = "gpio4agrp"; + }; + + gpio6_default_pins: pins-gpio6 { + function = "gpio"; + groups = "gpio6agrp"; + }; + + gpio7_default_pins: pins-gpio7 { + function = "gpio"; + groups = "gpio7agrp"; + }; + + ide_default_pins: pins-ide { + function = "pata"; + groups = "idegrp"; + }; + + lcd_on_dram0_pins: pins-rasteronsdram0 { + function = "lcd"; + groups = "rasteronsdram0grp"; + }; + + lcd_on_dram3_pins: pins-rasteronsdram3 { + function = "lcd"; + groups = "rasteronsdram3grp"; + }; + }; + + adc: adc@80900000 { + compatible = "cirrus,ep9301-adc"; + reg = <0x80900000 0x28>; + clocks = <&syscon EP93XX_CLK_ADC>; + interrupt-parent = <&vic0>; + interrupts = <30>; + status = "disabled"; + }; + + /* + * The EP93XX expansion bus is a set of up to 7 each up to 16MB + * windows in the 256MB space from 0x50000000 to 0x5fffffff. + * But since we don't require to setup it in any way, we can + * represent it as a simple-bus. + */ + ebi: bus@80080000 { + compatible = "simple-bus"; + reg = <0x80080000 0x20>; + native-endian; + #address-cells = <1>; + #size-cells = <1>; + ranges; + }; + + dma0: dma-controller@80000000 { + compatible = "cirrus,ep9301-dma-m2p"; + reg = <0x80000000 0x0040>, + <0x80000040 0x0040>, + <0x80000080 0x0040>, + <0x800000c0 0x0040>, + <0x80000240 0x0040>, + <0x80000200 0x0040>, + <0x800002c0 0x0040>, + <0x80000280 0x0040>, + <0x80000340 0x0040>, + <0x80000300 0x0040>; + clocks = <&syscon EP93XX_CLK_M2P0>, + <&syscon EP93XX_CLK_M2P1>, + <&syscon EP93XX_CLK_M2P2>, + <&syscon EP93XX_CLK_M2P3>, + <&syscon EP93XX_CLK_M2P4>, + <&syscon EP93XX_CLK_M2P5>, + <&syscon EP93XX_CLK_M2P6>, + <&syscon EP93XX_CLK_M2P7>, + <&syscon EP93XX_CLK_M2P8>, + <&syscon EP93XX_CLK_M2P9>; + clock-names = "m2p0", "m2p1", + "m2p2", "m2p3", + "m2p4", "m2p5", + "m2p6", "m2p7", + "m2p8", "m2p9"; + interrupt-parent = <&vic0>; + interrupts = <7>, <8>, <9>, <10>, <11>, + <12>, <13>, <14>, <15>, <16>; + #dma-cells = <2>; + }; + + dma1: dma-controller@80000100 { + compatible = "cirrus,ep9301-dma-m2m"; + reg = <0x80000100 0x0040>, + <0x80000140 0x0040>; + clocks = <&syscon EP93XX_CLK_M2M0>, + <&syscon EP93XX_CLK_M2M1>; + clock-names = "m2m0", "m2m1"; + interrupt-parent = <&vic0>; + interrupts = <17>, <18>; + #dma-cells = <2>; + }; + + eth0: ethernet@80010000 { + compatible = "cirrus,ep9301-eth"; + reg = <0x80010000 0x10000>; + interrupt-parent = <&vic1>; + interrupts = <7>; + mdio0: mdio { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + gpio0: gpio@80840000 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840000 0x04>, + <0x80840010 0x04>, + <0x80840090 0x1c>; + reg-names = "data", "dir", "intr"; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&vic1>; + interrupts = <27>; + }; + + gpio1: gpio@80840004 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840004 0x04>, + <0x80840014 0x04>, + <0x808400ac 0x1c>; + reg-names = "data", "dir", "intr"; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&vic1>; + interrupts = <27>; + }; + + gpio2: gpio@80840008 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840008 0x04>, + <0x80840018 0x04>; + reg-names = "data", "dir"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio2_default_pins>; + }; + + gpio3: gpio@8084000c { + compatible = "cirrus,ep9301-gpio"; + reg = <0x8084000c 0x04>, + <0x8084001c 0x04>; + reg-names = "data", "dir"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio3_default_pins>; + }; + + gpio4: gpio@80840020 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840020 0x04>, + <0x80840024 0x04>; + reg-names = "data", "dir"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio4_default_pins>; + }; + + gpio5: gpio@80840030 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840030 0x04>, + <0x80840034 0x04>, + <0x8084004c 0x1c>; + reg-names = "data", "dir", "intr"; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts-extended = <&vic0 19>, <&vic0 20>, + <&vic0 21>, <&vic0 22>, + <&vic1 15>, <&vic1 16>, + <&vic1 17>, <&vic1 18>; + }; + + gpio6: gpio@80840038 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840038 0x04>, + <0x8084003c 0x04>; + reg-names = "data", "dir"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio6_default_pins>; + }; + + gpio7: gpio@80840040 { + compatible = "cirrus,ep9301-gpio"; + reg = <0x80840040 0x04>, + <0x80840044 0x04>; + reg-names = "data", "dir"; + gpio-controller; + #gpio-cells = <2>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio7_default_pins>; + }; + + i2s: i2s@80820000 { + compatible = "cirrus,ep9301-i2s"; + reg = <0x80820000 0x100>; + #sound-dai-cells = <0>; + interrupt-parent = <&vic1>; + interrupts = <28>; + clocks = <&syscon EP93XX_CLK_I2S_MCLK>, + <&syscon EP93XX_CLK_I2S_SCLK>, + <&syscon EP93XX_CLK_I2S_LRCLK>; + clock-names = "mclk", "sclk", "lrclk"; + dmas = <&dma0 0 1>, <&dma0 0 2>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + ide: ide@800a0000 { + compatible = "cirrus,ep9312-pata"; + reg = <0x800a0000 0x38>; + interrupt-parent = <&vic1>; + interrupts = <8>; + pinctrl-names = "default"; + pinctrl-0 = <&ide_default_pins>; + status = "disabled"; + }; + + vic0: interrupt-controller@800b0000 { + compatible = "arm,pl192-vic"; + reg = <0x800b0000 0x1000>; + interrupt-controller; + #interrupt-cells = <1>; + valid-mask = <0x7ffffffc>; + valid-wakeup-mask = <0x0>; + }; + + vic1: interrupt-controller@800c0000 { + compatible = "arm,pl192-vic"; + reg = <0x800c0000 0x1000>; + interrupt-controller; + #interrupt-cells = <1>; + valid-mask = <0x1fffffff>; + valid-wakeup-mask = <0x0>; + }; + + keypad: keypad@800f0000 { + compatible = "cirrus,ep9307-keypad"; + reg = <0x800f0000 0x0c>; + interrupt-parent = <&vic0>; + interrupts = <29>; + clocks = <&syscon EP93XX_CLK_KEYPAD>; + pinctrl-names = "default"; + pinctrl-0 = <&keypad_default_pins>; + linux,keymap = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; + + pwm0: pwm@80910000 { + compatible = "cirrus,ep9301-pwm"; + reg = <0x80910000 0x10>; + clocks = <&syscon EP93XX_CLK_PWM>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@80910020 { + compatible = "cirrus,ep9301-pwm"; + reg = <0x80910020 0x10>; + clocks = <&syscon EP93XX_CLK_PWM>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1_default_pins>; + status = "disabled"; + }; + + rtc0: rtc@80920000 { + compatible = "cirrus,ep9301-rtc"; + reg = <0x80920000 0x100>; + }; + + spi0: spi@808a0000 { + compatible = "cirrus,ep9301-spi"; + reg = <0x808a0000 0x18>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&vic1>; + interrupts = <21>; + clocks = <&syscon EP93XX_CLK_SPI>; + pinctrl-names = "default"; + pinctrl-0 = <&spi_default_pins>; + status = "disabled"; + }; + + timer: timer@80810000 { + compatible = "cirrus,ep9301-timer"; + reg = <0x80810000 0x100>; + interrupt-parent = <&vic1>; + interrupts = <19>; + }; + + uart0: serial@808c0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x808c0000 0x1000>; + arm,primecell-periphid = <0x00041010>; + clocks = <&syscon EP93XX_CLK_UART1>, <&syscon EP93XX_CLK_UART>; + clock-names = "uartclk", "apb_pclk"; + interrupt-parent = <&vic1>; + interrupts = <20>; + status = "disabled"; + }; + + uart1: uart@808d0000 { + compatible = "arm,primecell"; + reg = <0x808d0000 0x1000>; + arm,primecell-periphid = <0x00041010>; + clocks = <&syscon EP93XX_CLK_UART2>, <&syscon EP93XX_CLK_UART>; + clock-names = "apb:uart2", "apb_pclk"; + interrupt-parent = <&vic1>; + interrupts = <22>; + status = "disabled"; + }; + + uart2: uart@808b0000 { + compatible = "arm,primecell"; + reg = <0x808b0000 0x1000>; + arm,primecell-periphid = <0x00041010>; + clocks = <&syscon EP93XX_CLK_UART3>, <&syscon EP93XX_CLK_UART>; + clock-names = "apb:uart3", "apb_pclk"; + interrupt-parent = <&vic1>; + interrupts = <23>; + status = "disabled"; + }; + + usb0: usb@80020000 { + compatible = "generic-ohci"; + reg = <0x80020000 0x10000>; + interrupt-parent = <&vic1>; + interrupts = <24>; + clocks = <&syscon EP93XX_CLK_USB>; + status = "disabled"; + }; + + watchdog0: watchdog@80940000 { + compatible = "cirrus,ep9301-wdt"; + reg = <0x80940000 0x08>; + }; + }; + + xtali: oscillator { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <14745600>; + clock-output-names = "xtali"; + }; +}; diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts index 65f390bf897534..84f39dec3c4250 100644 --- a/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts +++ b/arch/arm/boot/dts/intel/socfpga/socfpga_cyclone5_vining_fpga.dts @@ -130,8 +130,8 @@ gpio: pca9557@1f { #gpio-cells = <2>; }; - temp: lm75@48 { - compatible = "lm75"; + temp: temperature-sensor@48 { + compatible = "national,lm75"; reg = <0x48>; }; diff --git a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi index f3a3cb6ac31148..8208c6a9627a8c 100644 --- a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi +++ b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi @@ -423,14 +423,14 @@ &i2c0 { status = "okay"; /* U26 temperature sensor placed near SoC */ - temp1: nct75@4c { - compatible = "lm75"; + temp1: temperature-sensor@4c { + compatible = "ti,tmp75c"; reg = <0x4c>; }; /* U27 temperature sensor placed near RTC battery */ - temp2: nct75@4d { - compatible = "lm75"; + temp2: temperature-sensor@4d { + compatible = "ti,tmp75c"; reg = <0x4d>; }; diff --git a/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts index c6fbdd29019f39..b9ffd9e5faacc3 100644 --- a/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts +++ b/arch/arm/boot/dts/microchip/at91-sam9x60_curiosity.dts @@ -198,8 +198,6 @@ i2c0: i2c@600 { dmas = <0>, <0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flx0_default>; - #address-cells = <1>; - #size-cells = <0>; i2c-analog-filter; i2c-digital-filter; i2c-digital-filter-width-ns = <35>; diff --git a/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts b/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts index f3cbb675cea4ae..3b38707d736eed 100644 --- a/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts +++ b/arch/arm/boot/dts/microchip/at91-sam9x60ek.dts @@ -207,8 +207,6 @@ &flx0 { status = "okay"; i2c0: i2c@600 { - #address-cells = <1>; - #size-cells = <0>; dmas = <0>, <0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flx0_default>; @@ -254,8 +252,6 @@ &flx6 { status = "okay"; i2c6: i2c@600 { - #address-cells = <1>; - #size-cells = <0>; dmas = <0>, <0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flx6_default>; diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi index 4617805c774882..c173f49cb91074 100644 --- a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi +++ b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1.dtsi @@ -31,6 +31,14 @@ main_xtal { }; }; + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "VDD_MAIN"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; + wifi_pwrseq: wifi_pwrseq { compatible = "mmc-pwrseq-wilc1000"; reset-gpios = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>; @@ -70,6 +78,11 @@ &i2c1 { mcp16502@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; status = "okay"; lpm-gpios = <&pioBU 0 GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts index 6b02b7bcfd4978..951a0c97d3c6bb 100644 --- a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts +++ b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts @@ -84,6 +84,14 @@ memory@20000000 { device_type = "memory"; reg = <0x20000000 0x20000000>; }; + + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "5V_MAIN"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; }; &adc { @@ -144,6 +152,11 @@ &i2c0 { mcp16502@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; status = "okay"; lpm-gpios = <&pioBU 0 GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts b/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts index 999adeca6f3360..5e2bb517a4809e 100644 --- a/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts +++ b/arch/arm/boot/dts/microchip/at91-sama5d2_icp.dts @@ -78,6 +78,14 @@ led-blue { linux,default-trigger = "heartbeat"; }; }; + + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "VDD_MAIN_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; }; &adc { @@ -190,6 +198,11 @@ i2c6: i2c@600 { mcp16502@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; status = "okay"; lpm-gpios = <&pioBU 7 GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts index 009d2c83242102..645e49fdb7fe79 100644 --- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts +++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts @@ -72,6 +72,14 @@ memory@60000000 { device_type = "memory"; reg = <0x60000000 0x10000000>; /* 256 MiB DDR3L-1066 16-bit */ }; + + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "5V_MAIN"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; }; &adc { @@ -189,6 +197,11 @@ eeprom@51 { pmic@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; regulators { vdd_3v3: VDD_IO { diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts index 20b2497657ae48..ed75d491a24603 100644 --- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts +++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts @@ -88,6 +88,14 @@ memory@60000000 { reg = <0x60000000 0x20000000>; }; + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "5V_MAIN"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; + sound: sound { compatible = "simple-audio-card"; simple-audio-card,name = "sama7g5ek audio"; @@ -239,6 +247,11 @@ i2c1: i2c@600 { mcp16502@5b { compatible = "microchip,mcp16502"; reg = <0x5b>; + lvin-supply = <®_5v>; + pvin1-supply = <®_5v>; + pvin2-supply = <®_5v>; + pvin3-supply = <®_5v>; + pvin4-supply = <®_5v>; status = "okay"; regulators { @@ -403,6 +416,42 @@ i2c8: i2c@600 { i2c-digital-filter; i2c-digital-filter-width-ns = <35>; status = "okay"; + + eeprom0: eeprom@52 { + compatible = "microchip,24aa025e48"; + reg = <0x52>; + size = <256>; + pagesize = <16>; + vcc-supply = <&vdd_3v3>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + eeprom0_eui48: eui48@fa { + reg = <0xfa 0x6>; + }; + }; + }; + + eeprom1: eeprom@53 { + compatible = "microchip,24aa025e48"; + reg = <0x53>; + size = <256>; + pagesize = <16>; + vcc-supply = <&vdd_3v3>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + eeprom1_eui48: eui48@fa { + reg = <0xfa 0x6>; + }; + }; + }; }; }; @@ -440,6 +489,8 @@ &pinctrl_gmac0_mdio_default &pinctrl_gmac0_txck_default &pinctrl_gmac0_phy_irq>; phy-mode = "rgmii-id"; + nvmem-cells = <&eeprom0_eui48>; + nvmem-cell-names = "mac-address"; status = "okay"; ethernet-phy@7 { @@ -457,6 +508,8 @@ &gmac1 { &pinctrl_gmac1_mdio_default &pinctrl_gmac1_phy_irq>; phy-mode = "rmii"; + nvmem-cells = <&eeprom1_eui48>; + nvmem-cell-names = "mac-address"; status = "okay"; /* Conflict with pdmc0. */ ethernet-phy@0 { diff --git a/arch/arm/boot/dts/microchip/at91rm9200.dtsi b/arch/arm/boot/dts/microchip/at91rm9200.dtsi index 16c675e3a89030..02a838541dc35f 100644 --- a/arch/arm/boot/dts/microchip/at91rm9200.dtsi +++ b/arch/arm/boot/dts/microchip/at91rm9200.dtsi @@ -225,7 +225,7 @@ macb0: ethernet@fffbc000 { pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x800>; atmel,mux-mask = < diff --git a/arch/arm/boot/dts/microchip/at91sam9260.dtsi b/arch/arm/boot/dts/microchip/at91sam9260.dtsi index e56d5546554c3c..0038183e9a531f 100644 --- a/arch/arm/boot/dts/microchip/at91sam9260.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9260.dtsi @@ -170,7 +170,7 @@ tcb1: timer@fffdc000 { pinctrl: pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x600>; atmel,mux-mask = < diff --git a/arch/arm/boot/dts/microchip/at91sam9261.dtsi b/arch/arm/boot/dts/microchip/at91sam9261.dtsi index 307b60658014c0..b57a7fd67197f9 100644 --- a/arch/arm/boot/dts/microchip/at91sam9261.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9261.dtsi @@ -317,7 +317,7 @@ dbgu: serial@fffff200 { pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x600>; atmel,mux-mask = diff --git a/arch/arm/boot/dts/microchip/at91sam9263.dtsi b/arch/arm/boot/dts/microchip/at91sam9263.dtsi index 75d8ff2d12c8aa..b95d4016ae9f6f 100644 --- a/arch/arm/boot/dts/microchip/at91sam9263.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9263.dtsi @@ -167,7 +167,7 @@ poweroff@fffffd10 { pinctrl@fffff200 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff200 0xfffff200 0xa00>; atmel,mux-mask = < diff --git a/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts b/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts index 172af6ff4b1894..3e5eab57d1a5ad 100644 --- a/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts +++ b/arch/arm/boot/dts/microchip/at91sam9g20ek_2mmc.dts @@ -40,13 +40,13 @@ pinctrl_board_mmc0_slot0: mmc0_slot0-board { leds { compatible = "gpio-leds"; - ds1 { + led-ds1 { label = "ds1"; gpios = <&pioB 9 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; }; - ds5 { + led-ds5 { label = "ds5"; gpios = <&pioB 8 GPIO_ACTIVE_LOW>; }; diff --git a/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts b/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts index af70eb8a3a0212..e0c1e8df81b10f 100644 --- a/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts +++ b/arch/arm/boot/dts/microchip/at91sam9g25-gardena-smart-gateway.dts @@ -37,71 +37,71 @@ button { leds { compatible = "gpio-leds"; - power_blue { + led-power-blue { label = "smartgw:power:blue"; gpios = <&pioC 21 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - power_green { + led-power-green { label = "smartgw:power:green"; gpios = <&pioC 20 GPIO_ACTIVE_HIGH>; default-state = "on"; }; - power_red { + led-power-red { label = "smartgw:power:red"; gpios = <&pioC 19 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - radio_blue { + led-radio-blue { label = "smartgw:radio:blue"; gpios = <&pioC 18 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - radio_green { + led-radio-green { label = "smartgw:radio:green"; gpios = <&pioC 17 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - radio_red { + led-radio-red { label = "smartgw:radio:red"; gpios = <&pioC 16 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - internet_blue { + led-internet-blue { label = "smartgw:internet:blue"; gpios = <&pioC 15 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - internet_green { + led-internet-green { label = "smartgw:internet:green"; gpios = <&pioC 14 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - internet_red { + led-internet-red { label = "smartgw:internet:red"; gpios = <&pioC 13 GPIO_ACTIVE_HIGH>; default-state = "off"; }; - heartbeat { + led-heartbeat { label = "smartgw:heartbeat"; gpios = <&pioB 8 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; }; - pb18 { + led-pb18 { status = "disabled"; }; - pd21 { + led-pd21 { status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi index 325c63a531189f..c54eb21d5cba82 100644 --- a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi @@ -190,7 +190,7 @@ dma: dma-controller@ffffec00 { pinctrl@fffff200 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff200 0xfffff200 0xa00>; atmel,mux-mask = < diff --git a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi index 8dc04e9031a6c3..844bd50943fcf3 100644 --- a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi @@ -226,7 +226,7 @@ dma: dma-controller@ffffec00 { pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91sam9x5-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x800>; atmel,mux-mask = < diff --git a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts index 4c644d4c6be727..643c3b2ab97e52 100644 --- a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts @@ -207,19 +207,19 @@ bl_reg: backlight_regulator { leds { compatible = "gpio-leds"; - d8 { + led-d8 { label = "d8"; gpios = <&pioB 4 GPIO_ACTIVE_LOW>; linux,default-trigger = "mmc0"; }; - d9 { + led-d9 { label = "d9"; gpios = <&pioB 5 GPIO_ACTIVE_LOW>; linux,default-trigger = "nand-disk"; }; - d10 { + led-d10 { label = "d10"; gpios = <&pioB 6 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; diff --git a/arch/arm/boot/dts/microchip/at91sam9rl.dtsi b/arch/arm/boot/dts/microchip/at91sam9rl.dtsi index 7436b5c862b10b..1fec9fcc7cd184 100644 --- a/arch/arm/boot/dts/microchip/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9rl.dtsi @@ -339,7 +339,7 @@ dbgu: serial@fffff200 { pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91rm9200-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x800>; atmel,mux-mask = diff --git a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi index a7456c2191fa1c..27c1f2861cc34d 100644 --- a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi @@ -202,7 +202,7 @@ dma1: dma-controller@ffffee00 { pinctrl: pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,at91sam9x5-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x800>; /* shared pinctrl settings */ diff --git a/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi b/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi index cdd37f67280bfe..fb3c19bdfcb66c 100644 --- a/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/microchip/at91sam9x5cm.dtsi @@ -120,13 +120,13 @@ rootfs@800000 { leds { compatible = "gpio-leds"; - pb18 { + led-pb18 { label = "pb18"; gpios = <&pioB 18 GPIO_ACTIVE_LOW>; linux,default-trigger = "heartbeat"; }; - pd21 { + led-pd21 { label = "pd21"; gpios = <&pioD 21 GPIO_ACTIVE_HIGH>; }; diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi index 291540e5d81e76..04a6d716ecaf8a 100644 --- a/arch/arm/boot/dts/microchip/sam9x60.dtsi +++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi @@ -215,6 +215,8 @@ i2c4: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 13>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -284,6 +286,8 @@ i2c5: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 14>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -394,6 +398,8 @@ i2c11: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 32>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -443,6 +449,8 @@ i2c12: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 33>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -600,6 +608,8 @@ i2c6: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 9>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -649,6 +659,8 @@ i2c7: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 10>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -698,6 +710,8 @@ i2c8: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 11>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -766,6 +780,8 @@ i2c0: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 5>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -834,6 +850,8 @@ i2c1: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 6>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -902,6 +920,8 @@ i2c2: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 7>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -970,6 +990,8 @@ i2c3: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 8>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -1074,6 +1096,8 @@ i2c9: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <15 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 15>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -1123,6 +1147,8 @@ i2c10: i2c@600 { compatible = "microchip,sam9x60-i2c"; reg = <0x600 0x200>; interrupts = <16 IRQ_TYPE_LEVEL_HIGH 7>; + #address-cells = <1>; + #size-cells = <0>; clocks = <&pmc PMC_TYPE_PERIPHERAL 16>; dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | @@ -1223,7 +1249,7 @@ AT91_XDMAC_DT_PERID(28))>, pinctrl: pinctrl@fffff400 { #address-cells = <1>; #size-cells = <1>; - compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "microchip,sam9x60-pinctrl", "simple-mfd"; ranges = <0xfffff400 0xfffff400 0x800>; /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */ @@ -1236,7 +1262,7 @@ pinctrl: pinctrl@fffff400 { >; pioA: gpio@fffff400 { - compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; + compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>; #gpio-cells = <2>; @@ -1247,7 +1273,7 @@ pioA: gpio@fffff400 { }; pioB: gpio@fffff600 { - compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; + compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff600 0x200>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH 1>; #gpio-cells = <2>; @@ -1259,7 +1285,7 @@ pioB: gpio@fffff600 { }; pioC: gpio@fffff800 { - compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; + compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff800 0x200>; interrupts = <4 IRQ_TYPE_LEVEL_HIGH 1>; #gpio-cells = <2>; @@ -1270,7 +1296,7 @@ pioC: gpio@fffff800 { }; pioD: gpio@fffffa00 { - compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; + compatible = "microchip,sam9x60-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffffa00 0x200>; interrupts = <44 IRQ_TYPE_LEVEL_HIGH 1>; #gpio-cells = <2>; @@ -1312,7 +1338,7 @@ rtt: rtc@fffffe20 { compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xfffffe20 0x20>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; }; pit: timer@fffffe40 { @@ -1338,7 +1364,7 @@ rtc: rtc@fffffea8 { compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc"; reg = <0xfffffea8 0x100>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; }; watchdog: watchdog@ffffff80 { diff --git a/arch/arm/boot/dts/microchip/sama5d3.dtsi b/arch/arm/boot/dts/microchip/sama5d3.dtsi index d4fc0c1dfc10a9..39865133aa5670 100644 --- a/arch/arm/boot/dts/microchip/sama5d3.dtsi +++ b/arch/arm/boot/dts/microchip/sama5d3.dtsi @@ -493,7 +493,7 @@ aic: interrupt-controller@fffff000 { pinctrl: pinctrl@fffff200 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus"; + compatible = "atmel,sama5d3-pinctrl", "simple-mfd"; ranges = <0xfffff200 0xfffff200 0xa00>; atmel,mux-mask = < /* A B C */ diff --git a/arch/arm/boot/dts/microchip/sama5d4.dtsi b/arch/arm/boot/dts/microchip/sama5d4.dtsi index 58ceed997889bf..b253ba33fc3850 100644 --- a/arch/arm/boot/dts/microchip/sama5d4.dtsi +++ b/arch/arm/boot/dts/microchip/sama5d4.dtsi @@ -791,7 +791,7 @@ dbgu: serial@fc069000 { pinctrl: pinctrl@fc06a000 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus"; + compatible = "atmel,sama5d3-pinctrl", "simple-mfd"; ranges = <0xfc068000 0xfc068000 0x100 0xfc06a000 0xfc06a000 0x4000>; /* WARNING: revisit as pin spec has changed */ diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi index 75778be126a3d9..17bcdcf0cf4a05 100644 --- a/arch/arm/boot/dts/microchip/sama7g5.dtsi +++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi @@ -272,7 +272,7 @@ rtt: rtc@e001d020 { compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xe001d020 0x30>; interrupts = ; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; }; clk32k: clock-controller@e001d050 { diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts index 1f07ba38291087..886a87dfcd0d83 100644 --- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts +++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm730-kudo.dts @@ -531,8 +531,8 @@ i2c@4 { reg = <4>; // INLET1_T - lm75@5c { - compatible = "ti,lm75"; + temperature-sensor@5c { + compatible = "national,lm75"; reg = <0x5c>; }; }; @@ -543,8 +543,8 @@ i2c@5 { reg = <5>; // OUTLET1_T - lm75@5c { - compatible = "ti,lm75"; + temperature-sensor@5c { + compatible = "national,lm75"; reg = <0x5c>; }; }; @@ -555,8 +555,8 @@ i2c@6 { reg = <6>; // OUTLET2_T - lm75@5c { - compatible = "ti,lm75"; + temperature-sensor@5c { + compatible = "national,lm75"; reg = <0x5c>; }; }; @@ -567,8 +567,8 @@ i2c@7 { reg = <7>; // OUTLET3_T - lm75@5c { - compatible = "ti,lm75"; + temperature-sensor@5c { + compatible = "national,lm75"; reg = <0x5c>; }; }; @@ -697,8 +697,8 @@ i2c@3 { reg = <3>; // M2_ZONE_T - lm75@28 { - compatible = "ti,lm75"; + temperature-sensor@28 { + compatible = "national,lm75"; reg = <0x28>; }; }; @@ -709,8 +709,8 @@ i2c@4 { reg = <4>; // BATT_ZONE_T - lm75@29 { - compatible = "ti,lm75"; + temperature-sensor@29 { + compatible = "national,lm75"; reg = <0x29>; }; }; @@ -721,8 +721,8 @@ i2c@5 { reg = <5>; // NBM1_ZONE_T - lm75@28 { - compatible = "ti,lm75"; + temperature-sensor@28 { + compatible = "national,lm75"; reg = <0x28>; }; }; @@ -732,8 +732,8 @@ i2c@6 { reg = <6>; // NBM2_ZONE_T - lm75@29 { - compatible = "ti,lm75"; + temperature-sensor@29 { + compatible = "national,lm75"; reg = <0x29>; }; }; diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts index f53d45fa1de878..bcdcb30c7bf6e5 100644 --- a/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts +++ b/arch/arm/boot/dts/nuvoton/nuvoton-npcm750-evb.dts @@ -198,7 +198,7 @@ &i2c0 { clock-frequency = <100000>; status = "okay"; lm75@48 { - compatible = "lm75"; + compatible = "national,lm75"; reg = <0x48>; status = "okay"; }; @@ -208,8 +208,8 @@ lm75@48 { &i2c1 { clock-frequency = <100000>; status = "okay"; - lm75@48 { - compatible = "lm75"; + temperature-sensor@48 { + compatible = "national,lm75"; reg = <0x48>; status = "okay"; }; diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts index b78c116cbc18ee..edb907f740bffc 100644 --- a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts +++ b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts @@ -34,7 +34,7 @@ gpio-keys { pinctrl-names = "default"; pinctrl-0 = <&key_pins>; - uid { + button-uid { label = "UID button"; linux,code = ; gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; @@ -46,12 +46,12 @@ gpio-leds { pinctrl-names = "default"; pinctrl-0 = <&led_pins>; - uid { + led-uid { label = "UID"; gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; }; - heartbeat { + led-heartbeat { label = "heartbeat"; gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; }; diff --git a/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts b/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts index 763ab812eb87f7..f02e2cf65fe82d 100644 --- a/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts +++ b/arch/arm/boot/dts/nvidia/tegra114-asus-tf701t.dts @@ -57,10 +57,24 @@ trustzone@bfe00000 { }; host1x@50000000 { + hdmi@54280000 { + status = "okay"; + + hdmi-supply = <&hdmi_5v0_sys>; + pll-supply = <&avdd_hdmi_pll>; + vdd-supply = <&avdd_hdmi>; + + port { + hdmi_out: endpoint { + remote-endpoint = <&connector_in>; + }; + }; + }; + dsi@54300000 { status = "okay"; - avdd-dsi-csi-supply = <&tps65913_ldo2>; + avdd-dsi-csi-supply = <&avdd_dsi_csi>; nvidia,ganged-mode = <&dsib>; @@ -70,7 +84,7 @@ panel_primary: panel@0 { link2 = <&panel_secondary>; - power-supply = <&vdd_lcd>; + power-supply = <&dvdd_1v8_lcd>; backlight = <&backlight>; }; }; @@ -78,7 +92,7 @@ panel_primary: panel@0 { dsi@54400000 { status = "okay"; - avdd-dsi-csi-supply = <&tps65913_ldo2>; + avdd-dsi-csi-supply = <&avdd_dsi_csi>; panel_secondary: panel@0 { compatible = "sharp,lq101r1sx01"; @@ -87,177 +101,1099 @@ panel_secondary: panel@0 { }; }; + vde@6001a000 { + assigned-clocks = <&tegra_car TEGRA114_CLK_VDE>; + assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_P>; + assigned-clock-rates = <408000000>; + }; + pinmux@70000868 { - asus_pad_ec_default: pinmux-asus-pad-ec-default { - ec-interrupt { - nvidia,pins = "kb_col5_pq5"; + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: pinmux { + /* WLAN SDIO pinmux */ + sdmmc1-clk { + nvidia,pins = "sdmmc1_clk_pz0"; + nvidia,function = "sdmmc1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + sdmmc1-cmd { + nvidia,pins = "sdmmc1_cmd_pz1", + "sdmmc1_dat0_py7", + "sdmmc1_dat1_py6", + "sdmmc1_dat2_py5", + "sdmmc1_dat3_py4"; + nvidia,function = "sdmmc1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + wlan-power { + nvidia,pins = "clk2_req_pcc5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + wlan-reset { + nvidia,pins = "gpio_x7_aud_px7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + wlan-host-wake { + nvidia,pins = "pu5"; + nvidia,function = "pwm2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + wlan-3v3-com { + nvidia,pins = "pu1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* UART-A pinmux */ + uarta-cts { + nvidia,pins = "kb_row10_ps2"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uarta-rts { + nvidia,pins = "kb_row9_ps1"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* GNSS UART-B pinmux */ + uartb-cts { + nvidia,pins = "uart2_cts_n_pj5"; + nvidia,function = "uartb"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uartb-rts { + nvidia,pins = "uart2_rts_n_pj6"; + nvidia,function = "uartb"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uartb-rxd { + nvidia,pins = "uart2_rxd_pc3"; + nvidia,function = "irda"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uartb-txd { + nvidia,pins = "uart2_txd_pc2"; + nvidia,function = "irda"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* Bluetooth UART-C pinmux */ + uartc-cts-rxd { + nvidia,pins = "uart3_cts_n_pa1", + "uart3_rxd_pw7"; + nvidia,function = "uartc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uartc-rts-txd { + nvidia,pins = "uart3_rts_n_pc0", + "uart3_txd_pw6"; + nvidia,function = "uartc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + bt-shutdown { + nvidia,pins = "kb_col6_pq6", + "kb_col7_pq7"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + bt-dev-wake { + nvidia,pins = "clk3_req_pee1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + bt-host-wake { + nvidia,pins = "pu6"; + nvidia,function = "pwm3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + bt-pcm-dap4-out { + nvidia,pins = "dap4_fs_pp4", + "dap4_dout_pp6", + "dap4_sclk_pp7"; + nvidia,function = "i2s3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + bt-pcm-dap4-in { + nvidia,pins = "dap4_din_pp5"; + nvidia,function = "i2s3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* UART-D pinmux */ + uartd-cts { + nvidia,pins = "gmi_a17_pb0"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + uartd-rts { + nvidia,pins = "gmi_a16_pj7", + "gmi_a19_pk7"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* MicroSD pinmux */ + sdmmc3-clk { + nvidia,pins = "sdmmc3_clk_pa6"; + nvidia,function = "sdmmc3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + sdmmc3-data { + nvidia,pins = "sdmmc3_cmd_pa7", + "sdmmc3_dat0_pb7", + "sdmmc3_dat1_pb6", + "sdmmc3_dat2_pb5", + "sdmmc3_dat3_pb4", + "kb_col4_pq4", + "sdmmc3_cd_n_pv2", + "sdmmc3_clk_lb_out_pee4", + "sdmmc3_clk_lb_in_pee5"; + nvidia,function = "sdmmc3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + microsd-pwr { + nvidia,pins = "gmi_clk_pk1"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* EMMC pinmux */ + sdmmc4-clk-cmd { + nvidia,pins = "sdmmc4_clk_pcc4"; + nvidia,function = "sdmmc4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + sdmmc4-data { + nvidia,pins = "sdmmc4_cmd_pt7", + "sdmmc4_dat0_paa0", + "sdmmc4_dat1_paa1", + "sdmmc4_dat2_paa2", + "sdmmc4_dat3_paa3", + "sdmmc4_dat4_paa4", + "sdmmc4_dat5_paa5", + "sdmmc4_dat6_paa6", + "sdmmc4_dat7_paa7"; + nvidia,function = "sdmmc4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* I2C pinmux */ + gen1-i2c { + nvidia,pins = "gen1_i2c_scl_pc4", + "gen1_i2c_sda_pc5"; + nvidia,function = "i2c1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,lock = ; + }; + + gen2-i2c { + nvidia,pins = "gen2_i2c_scl_pt5", + "gen2_i2c_sda_pt6"; + nvidia,function = "i2c2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,lock = ; + }; + + cam-i2c { + nvidia,pins = "cam_i2c_scl_pbb1", + "cam_i2c_sda_pbb2"; + nvidia,function = "i2c3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,lock = ; + }; + + ddc-i2c { + nvidia,pins = "ddc_scl_pv4", + "ddc_sda_pv5"; + nvidia,function = "i2c4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,lock = ; + }; + + pwr-i2c { + nvidia,pins = "pwr_i2c_scl_pz6", + "pwr_i2c_sda_pz7"; + nvidia,function = "i2cpwr"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,lock = ; + }; + + /* SPI pinmux */ + spi1-out { + nvidia,pins = "ulpi_clk_py0", + "ulpi_nxt_py2", + "ulpi_stp_py3"; + nvidia,function = "spi1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spi1-in { + nvidia,pins = "ulpi_dir_py1"; + nvidia,function = "spi1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spi2 { + nvidia,pins = "ulpi_data4_po5", + "ulpi_data7_po0"; + nvidia,function = "spi2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spi4-out { + nvidia,pins = "gmi_ad6_pg6", + "gmi_wr_n_pi0"; + nvidia,function = "spi4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spi4-in { + nvidia,pins = "gmi_ad5_pg5", + "gmi_ad7_pg7"; + nvidia,function = "spi4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* GPIO keys pinmux */ + hall-switch { + nvidia,pins = "ulpi_data4_po5"; + nvidia,function = "spi2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + lineout-switch { + nvidia,pins = "gpio_x5_aud_px5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + power-key { + nvidia,pins = "kb_col0_pq0"; nvidia,function = "kbc"; nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + volume-keys { + nvidia,pins = "kb_row1_pr1", + "kb_row2_pr2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* Sensors pinmux */ + nct-irq { + nvidia,pins = "ulpi_data3_po4"; + nvidia,function = "ulpi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + mpu-irq { + nvidia,pins = "kb_row3_pr3"; + nvidia,function = "rsvd3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* HDMI pinmux */ + hdmi-hpd { + nvidia,pins = "hdmi_int_pn7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + hdmi-en { + nvidia,pins = "dap3_dout_pp2"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + hdmi-cec { + nvidia,pins = "hdmi_cec_pee3"; + nvidia,function = "cec"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* LED pinmux */ + backlight-pwm { + nvidia,pins = "gmi_ad9_ph1"; + nvidia,function = "pwm1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + backlight-en { + nvidia,pins = "gmi_ad10_ph2"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* Touchscreen pinmux */ + touch-irq { + nvidia,pins = "gmi_cs4_n_pk2"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + touch-rst { + nvidia,pins = "gmi_cs3_n_pk4"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + touch-pwr { + nvidia,pins = "gmi_ad8_ph0"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + touch-vio { + nvidia,pins = "gmi_ad12_ph4"; + nvidia,function = "rsvd4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* AUDIO pinmux */ + audio-ldo1 { + nvidia,pins = "sdmmc1_wp_n_pv3"; + nvidia,function = "sdmmc1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + hp-detect { + nvidia,pins = "kb_row7_pr7"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s0-in { + nvidia,pins = "dap1_din_pn1"; + nvidia,function = "i2s0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s0-out { + nvidia,pins = "dap1_dout_pn2", + "dap1_fs_pn0", + "dap1_sclk_pn3"; + nvidia,function = "i2s0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s1-in { + nvidia,pins = "dap2_din_pa4"; + nvidia,function = "i2s1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s1-out { + nvidia,pins = "dap2_dout_pa5", + "dap2_fs_pa2", + "dap2_sclk_pa3"; + nvidia,function = "i2s1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s2-in { + nvidia,pins = "dap3_fs_pp0", + "dap3_sclk_pp3"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dap-i2s2-out { + nvidia,pins = "dap3_din_pp1"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spdif-in { + nvidia,pins = "spdif_in_pk6"; + nvidia,function = "rsvd3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + spdif-out { + nvidia,pins = "spdif_out_pk5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* AsusEC pinmux */ + ec-irq { + nvidia,pins = "kb_col5_pq5"; + nvidia,function = "kbc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + ec-req { + nvidia,pins = "kb_col2_pq2"; + nvidia,function = "kbc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + hotplug-i2c { + nvidia,pins = "ulpi_data7_po0"; + nvidia,function = "spi2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + ps2-irq { + nvidia,pins = "gpio_w2_aud_pw2"; + nvidia,function = "spi6"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + kbd-irq { + nvidia,pins = "gmi_cs0_n_pj0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + dvfs-pin { + nvidia,pins = "dvfs_pwm_px0", + "dvfs_clk_px2"; + nvidia,function = "cldvfs"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* Core pinmux */ + clk-32k-out { + nvidia,pins = "clk_32k_out_pa0"; + nvidia,function = "soc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + sys-clk-req { + nvidia,pins = "sys_clk_req_pz5"; + nvidia,function = "sysclk"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + core-pwr-req { + nvidia,pins = "core_pwr_req"; + nvidia,function = "pwron"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + cpu-pwr-req { + nvidia,pins = "cpu_pwr_req"; + nvidia,function = "cpu"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + pwr-int-n { + nvidia,pins = "pwr_int_n"; + nvidia,function = "pmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + clk-32k-in { + nvidia,pins = "clk_32k_in"; + nvidia,function = "clk"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + owr { + nvidia,pins = "owr"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + reset-out-n { + nvidia,pins = "reset_out_n"; + nvidia,function = "reset_out_n"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* ULPI pinmux */ + ulpi-data0-6 { + nvidia,pins = "ulpi_data0_po1", + "ulpi_data6_po7"; + nvidia,function = "ulpi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + ulpi-data1-5 { + nvidia,pins = "ulpi_data1_po2", + "ulpi_data5_po6"; + nvidia,function = "ulpi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + ulpi-data2-3 { + nvidia,pins = "ulpi_data2_po3", + "ulpi_data3_po4"; + nvidia,function = "ulpi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* PORT V */ + pv0-gpio { + nvidia,pins = "pv0"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + pv1-gpio { + nvidia,pins = "pv1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* PORT U */ + pu0-gpio { + nvidia,pins = "pu0"; + nvidia,function = "rsvd3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + pu2-gpio { + nvidia,pins = "pu2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* PWM pinmux */ + pwm0 { + nvidia,pins = "pu3"; + nvidia,function = "pwm0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + pwm1 { + nvidia,pins = "pu4"; + nvidia,function = "pwm1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* EXTPERIPH pinmux */ + clk1-out { + nvidia,pins = "clk1_out_pw4"; + nvidia,function = "extperiph1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + clk2-out { + nvidia,pins = "clk2_out_pw5"; + nvidia,function = "extperiph2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + clk3-out { + nvidia,pins = "clk3_out_pee0"; + nvidia,function = "extperiph3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + clk1-req { + nvidia,pins = "clk1_req_pee2"; + nvidia,function = "rsvd3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* GMI pinmux */ + gmi-wp-n { + nvidia,pins = "gmi_wp_n_pc7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-adv { + nvidia,pins = "gmi_adv_n_pk0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-ad0-ad1 { + nvidia,pins = "gmi_ad0_pg0", + "gmi_ad1_pg1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-ad2-ad3 { + nvidia,pins = "gmi_ad2_pg2", + "gmi_ad3_pg3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-iordy { + nvidia,pins = "gmi_iordy_pi5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-a18 { + nvidia,pins = "gmi_a18_pb1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-wait { + nvidia,pins = "gmi_wait_pi7"; + nvidia,function = "nand"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-cs6-n { + nvidia,pins = "gmi_cs6_n_pi3"; + nvidia,function = "nand"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-cs7-n { + nvidia,pins = "gmi_cs7_n_pi6"; + nvidia,function = "nand"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-dqs-p { + nvidia,pins = "gmi_dqs_p_pj3"; + nvidia,function = "nand"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-cs2-ad { + nvidia,pins = "gmi_cs2_n_pk3", + "gmi_ad14_ph6", + "gmi_ad15_ph7"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-cs4-clk { + nvidia,pins = "gmi_cs4_n_pk2", + "gmi_clk_lb"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-ad11 { + nvidia,pins = "gmi_ad11_ph3"; + nvidia,function = "gmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-cs1-oe { + nvidia,pins = "gmi_cs1_n_pj2", + "gmi_oe_n_pi1"; + nvidia,function = "soc"; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - ec-request { - nvidia,pins = "kb_col2_pq2"; - nvidia,function = "kbc"; + gmi-ad4 { + nvidia,pins = "gmi_ad4_pg4"; + nvidia,function = "rsvd4"; nvidia,pull = ; nvidia,tristate = ; - nvidia,enable-input = ; + nvidia,enable-input = ; }; - }; - backlight_default: pinmux-backlight-default { - backlight-enable { - nvidia,pins = "gmi_ad10_ph2"; - nvidia,function = "gmi"; + gmi-ad13 { + nvidia,pins = "gmi_ad13_ph5"; + nvidia,function = "rsvd4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + gmi-rst-n { + nvidia,pins = "gmi_rst_n_pi4"; + nvidia,function = "rsvd4"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - codec_default: pinmux-codec-default { - interrupt { - nvidia,pins = "gpio_w2_aud_pw2", - "gpio_w3_aud_pw3"; - nvidia,function = "spi6"; - nvidia,pull = ; + /* PORT CC */ + pcc-gpio { + nvidia,pins = "pcc1", "pcc2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - ldo1-en { - nvidia,pins = "sdmmc1_wp_n_pv3"; - nvidia,function = "sdmmc1"; + /* PORT BB */ + pbb3-gpio { + nvidia,pins = "pbb3"; + nvidia,function = "rsvd4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + pbb4-5-6-gpio { + nvidia,pins = "pbb4", "pbb5", "pbb6"; + nvidia,function = "rsvd4"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - gpio_hall_sensor_default: pinmux-gpio-hall-sensor-default { - ulpi_data4_po5 { - nvidia,pins = "ulpi_data4_po5"; - nvidia,function = "spi2"; + pbb7-gpio { + nvidia,pins = "pbb7"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + }; + + /* KBC pinmux */ + kb-r0-c1 { + nvidia,pins = "kb_row0_pr0", + "kb_col1_pq1"; + nvidia,function = "rsvd2"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - gpio_keys_default: pinmux-gpio-keys-default { - power { - nvidia,pins = "kb_col0_pq0"; + kb-row4 { + nvidia,pins = "kb_row4_pr4"; nvidia,function = "kbc"; - nvidia,pull = ; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - volume { - nvidia,pins = "kb_row1_pr1", - "kb_row2_pr2"; - nvidia,function = "rsvd2"; + kb-row5 { + nvidia,pins = "kb_row5_pr5"; + nvidia,function = "kbc"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - hp_det_default: pinmux-hp-det-default { - gmi_iordy_pi5 { - nvidia,pins = "kb_row7_pr7"; - nvidia,function = "rsvd2"; - nvidia,pull = ; + kb-row6 { + nvidia,pins = "kb_row6_pr6"; + nvidia,function = "kbc"; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - imu_default: pinmux-imu-default { - kb_row3_pr3 { - nvidia,pins = "kb_row3_pr3"; - nvidia,function = "rsvd3"; - nvidia,pull = ; + kb-r8-c3 { + nvidia,pins = "kb_row8_ps0", + "kb_col3_pq3"; + nvidia,function = "kbc"; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - pwm_default: pinmux-pwm-default { - gmi_ad9_ph1 { - nvidia,pins = "gmi_ad9_ph1"; - nvidia,function = "pwm1"; + /* VI pinmux */ + cam-mclk { + nvidia,pins = "cam_mclk_pcc0", + "pbb0"; + nvidia,function = "vi_alt3"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - /* XXX make this something more sensible */ - pwm_sleep: pinmux-pwm-sleep { - gmi_ad9_ph1 { - nvidia,pins = "gmi_ad9_ph1"; - nvidia,function = "pwm1"; + /* AUD pinmux */ + gpio-x4-aud { + nvidia,pins = "gpio_x4_aud_px4"; + nvidia,function = "rsvd1"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - sdmmc3_default: pinmux-sdmmc3-default { - drive_sdio3 { - nvidia,pins = "drive_sdio3"; - nvidia,high-speed-mode = ; - nvidia,schmitt = ; - nvidia,pull-down-strength = <22>; - nvidia,pull-up-strength = <36>; - nvidia,slew-rate-rising = ; - nvidia,slew-rate-falling = ; + gpio-x1-aud { + nvidia,pins = "gpio_x1_aud_px1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; }; - sdmmc3_clk_pa6 { - nvidia,pins = "sdmmc3_clk_pa6"; - nvidia,function = "sdmmc3"; - nvidia,pull = ; + gpio-x3-aud { + nvidia,pins = "gpio_x3_aud_px3"; + nvidia,function = "rsvd3"; + nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - sdmmc3_cmd_pa7 { - nvidia,pins = "sdmmc3_cmd_pa7", - "sdmmc3_dat0_pb7", - "sdmmc3_dat1_pb6", - "sdmmc3_dat2_pb5", - "sdmmc3_dat3_pb4", - "kb_col4_pq4", - "sdmmc3_clk_lb_out_pee4", - "sdmmc3_clk_lb_in_pee5", - "sdmmc3_cd_n_pv2"; - nvidia,function = "sdmmc3"; + gpio-x6-aud { + nvidia,pins = "gpio_x6_aud_px6"; + nvidia,function = "rsvd4"; nvidia,pull = ; nvidia,tristate = ; nvidia,enable-input = ; }; - }; - sdmmc3_vdd_default: pinmux-sdmmc3-vdd-default { - gmi_clk_pk1 { - nvidia,pins = "gmi_clk_pk1"; - nvidia,function = "gmi"; - nvidia,pull = ; - nvidia,tristate = ; + usb-vbus { + nvidia,pins = "usb_vbus_en0_pn4", + "usb_vbus_en1_pn5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; nvidia,enable-input = ; }; - }; - vdd_lcd_default: pinmux-vdd-lcd-default { - sdmmc4_clk_pcc4 { - nvidia,pins = "sdmmc4_clk_pcc4"; - nvidia,function = "sdmmc4"; - nvidia,pull = ; - nvidia,tristate = ; - nvidia,enable-input = ; + /* GPIO power/drive control */ + drive-sdio1 { + nvidia,pins = "drive_sdio1"; + nvidia,high-speed-mode = ; + nvidia,schmitt = ; + nvidia,pull-down-strength = <36>; + nvidia,pull-up-strength = <20>; + nvidia,slew-rate-rising = ; + nvidia,slew-rate-falling = ; + }; + + drive-sdio3 { + nvidia,pins = "drive_sdio3"; + nvidia,high-speed-mode = ; + nvidia,schmitt = ; + nvidia,pull-down-strength = <22>; + nvidia,pull-up-strength = <36>; + nvidia,slew-rate-rising = ; + nvidia,slew-rate-falling = ; + }; + + drive-gma { + nvidia,pins = "drive_gma"; + nvidia,high-speed-mode = ; + nvidia,schmitt = ; + nvidia,pull-down-strength = <2>; + nvidia,pull-up-strength = <2>; + nvidia,slew-rate-rising = ; + nvidia,slew-rate-falling = ; }; }; }; @@ -267,7 +1203,33 @@ serial@70006040 { }; serial@70006200 { - /* Bluetooth */ + compatible = "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart"; + reset-names = "serial"; + /delete-property/ reg-shift; + status = "okay"; + + nvidia,adjust-baud-rates = <0 9600 100>, + <9600 115200 200>, + <1000000 4000000 136>; + + bluetooth { + compatible = "brcm,bcm4334-bt"; + max-speed = <4000000>; + + clocks = <&tegra_pmc TEGRA_PMC_CLK_BLINK>; + clock-names = "txco"; + + interrupt-parent = <&gpio>; + interrupts = ; + interrupt-names = "host-wakeup"; + + device-wakeup-gpios = <&gpio TEGRA_GPIO(EE, 1) GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio TEGRA_GPIO(Q, 6) GPIO_ACTIVE_LOW>; + + vbat-supply = <&vdd_3v3_com>; + vddio-supply = <&vdd_1v8_vio>; + }; }; serial@70006300 { @@ -278,10 +1240,6 @@ serial@70006300 { pwm@7000a000 { status = "okay"; - - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&pwm_default>; - pinctrl-1 = <&pwm_sleep>; }; i2c@7000c000 { @@ -292,27 +1250,35 @@ magnetometer@c { compatible = "asahi-kasei,ak09911"; reg = <0xc>; - vdd-supply = <&vdd_3v3_sys>; + /* no DRDY (polling) */ + + vdd-supply = <&vdd_2v85_sen>; + vid-supply = <&vdd_1v8_vio>; + + mount-matrix = "0", "1", "0", + "1", "0", "0", + "0", "0","-1"; }; rt5639: audio-codec@1c { compatible = "realtek,rt5639"; reg = <0x1c>; - interrupt-parent = <&gpio>; - interrupts = ; - - realtek,ldo1-en-gpios = <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>; + realtek,ldo1-en-gpios = + <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&codec_default>; + clocks = <&tegra_pmc TEGRA_PMC_CLK_OUT_1>; + clock-names = "mclk"; }; temp_sensor: temperature-sensor@4c { compatible = "onnn,nct1008"; reg = <0x4c>; - vcc-supply = <&vdd_3v3_sys>; + interrupt-parent = <&gpio>; + interrupts = ; + + vcc-supply = <&vdd_1v8_vio>; #thermal-sensor-cells = <1>; }; @@ -323,12 +1289,12 @@ motion-tracker@68 { interrupt-parent = <&gpio>; interrupts = ; + vdd-supply = <&vdd_2v85_sen>; + vddio-supply = <&vdd_1v8_vio>; + mount-matrix = "0", "-1", "0", "1", "0", "0", "0", "0", "1"; - - pinctrl-names = "default"; - pinctrl-0 = <&imu_default>; }; }; @@ -339,6 +1305,8 @@ i2c@7000c400 { power-sensor@44 { compatible = "ti,ina230"; reg = <0x44>; + + shunt-resistor = <5000>; }; }; @@ -350,12 +1318,13 @@ light-sensor@1c { compatible = "dynaimage,al3320a"; reg = <0x1c>; - vdd-supply = <&vdd_3v3_sys>; + vdd-supply = <&vdd_1v8_vio>; }; }; - i2c@7000c700 { - /* HDMI DDC */ + hdmi_ddc: i2c@7000c700 { + status = "okay"; + clock-frequency = <10000>; }; i2c@7000d000 { @@ -372,12 +1341,36 @@ palmas: pmic@58 { ti,system-power-controller; + palmas_gpadc: adc { + compatible = "ti,palmas-gpadc"; + interrupts = <18 IRQ_TYPE_NONE>, + <16 IRQ_TYPE_NONE>, + <17 IRQ_TYPE_NONE>; + + ti,channel0-current-microamp = <5>; + ti,channel3-current-microamp = <400>; + ti,enable-extended-delay; + + #io-channel-cells = <1>; + }; + + palmas_extcon: extcon { + compatible = "ti,palmas-usb-vid"; + ti,enable-vbus-detection; + ti,enable-id-detection; + }; + palmas_gpio: gpio { compatible = "ti,palmas-gpio"; gpio-controller; #gpio-cells = <2>; }; + palmas_clk32kg@0 { + compatible = "ti,palmas-clk32kg"; + #clock-cells = <0>; + }; + pinmux { compatible = "ti,tps65913-pinctrl"; ti,palmas-enable-dvfs1; @@ -441,17 +1434,18 @@ pin_vac { pmic { compatible = "ti,tps65913-pmic", "ti,palmas-pmic"; - ldo1-in-supply = <&tps65913_smps7>; - ldo2-in-supply = <&tps65913_smps7>; - ldo4-in-supply = <&tps65913_smps8>; - ldo5-in-supply = <&tps65913_smps9>; - ldo6-in-supply = <&tps65913_smps9>; - ldo7-in-supply = <&tps65913_smps9>; - ldo9-in-supply = <&tps65913_smps9>; + ldo1-in-supply = <&vddio_ddr>; + ldo2-in-supply = <&vddio_ddr>; + ldo4-in-supply = <&vdd_1v8_vio>; + ldo5-in-supply = <&vcore_emmc>; + ldo6-in-supply = <&vcore_emmc>; + ldo7-in-supply = <&vcore_emmc>; + ldo9-in-supply = <&vcore_emmc>; + ldoln-in-supply = <&vdd_smps10_out2>; regulators { - tps65913_smps123: smps123 { - regulator-name = "vdd-cpu"; + vdd_cpu: smps123 { + regulator-name = "vdd_cpu"; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1350000>; regulator-always-on; @@ -460,8 +1454,8 @@ tps65913_smps123: smps123 { ti,mode-sleep = <3>; }; - tps65913_smps45: smps45 { - regulator-name = "vdd-core"; + vdd_core: smps45 { + regulator-name = "vdd_core"; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1400000>; regulator-always-on; @@ -469,101 +1463,95 @@ tps65913_smps45: smps45 { ti,roof-floor = <3>; }; - smps6 { - regulator-name = "va-lcd-hv"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <1000000>; - regulator-always-on; - regulator-boot-on; - }; + /* smps6 disabled */ - tps65913_smps7: smps7 { - regulator-name = "vdd-ddr"; + vddio_ddr: smps7 { + regulator-name = "vddio_ddr"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <1350000>; regulator-always-on; regulator-boot-on; }; - tps65913_smps8: smps8 { - regulator-name = "vdd-1v8"; + vdd_1v8_vio: smps8 { + regulator-name = "vdd_1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-always-on; regulator-boot-on; }; - tps65913_smps9: smps9 { - regulator-name = "vdd-sd"; + vcore_emmc: smps9 { + regulator-name = "vdd_emmc"; regulator-min-microvolt = <2900000>; regulator-max-microvolt = <2900000>; - regulator-always-on; + regulator-boot-on; }; - tps65913_smps10_out1: smps10_out1 { - regulator-name = "vd-smps10-out1"; + smps10_out1 { + regulator-name = "vd_smps10_out1"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; regulator-boot-on; }; - tps65913_smps10_out2: smps10_out2 { - regulator-name = "vd-smps10-out2"; + vdd_smps10_out2: smps10_out2 { + regulator-name = "vd_smps10_out2"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; regulator-boot-on; }; - tps65913_ldo1: ldo1 { - regulator-name = "vdd-hdmi-pll"; + avdd_hdmi_pll: ldo1 { + regulator-name = "avdd_hdmi_pll"; regulator-min-microvolt = <1050000>; regulator-max-microvolt = <1050000>; regulator-always-on; + regulator-boot-on; ti,roof-floor = <3>; }; - tps65913_ldo2: ldo2 { - regulator-name = "vdd-2v8-dsi-csi"; + avdd_dsi_csi: ldo2 { + regulator-name = "avdd_dsi_csi"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-boot-on; }; ldo3 { - regulator-name = "vpp-fuse"; + regulator-name = "vpp_fuse"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; - ldo4 { - regulator-name = "vdd-1v2-cam"; + vdd_1v2_cam: ldo4 { + regulator-name = "vdd_1v2_cam"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; }; - ldo5 { - regulator-name = "vdd-cam"; + avdd_2v8_cam: ldo5 { + regulator-name = "avdd_cam2"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; }; - ldo6 { - regulator-name = "vdd-dev"; + vdd_2v85_sen: ldo6 { + regulator-name = "vdd_dev"; regulator-min-microvolt = <2850000>; regulator-max-microvolt = <2850000>; - regulator-boot-on; }; - ldo7 { - regulator-name = "vdd-2v8-cam"; + avdd_2v8_af: ldo7 { + regulator-name = "avdd_2v8_cam"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; }; - tps65913_ldo8: ldo8 { - regulator-name = "vdd-rtc"; + ldo8 { + regulator-name = "vdd_rtc"; regulator-min-microvolt = <950000>; regulator-max-microvolt = <950000>; regulator-always-on; @@ -571,23 +1559,24 @@ tps65913_ldo8: ldo8 { ti,enable-ldo8-tracking; }; - tps65913_ldo9: ldo9 { - regulator-name = "vdd-sdmmc"; - regulator-min-microvolt = <1800000>; + vddio_usd: ldo9 { + regulator-name = "vddio_usd"; + /* min voltage of 1.8v is not stable */ + regulator-min-microvolt = <2900000>; regulator-max-microvolt = <2900000>; }; - tps65913_ldoln: ldoln { - regulator-name = "vdd-hdmi"; + avdd_hdmi: ldoln { + regulator-name = "avdd_hdmi"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + regulator-boot-on; }; - ldousb { - regulator-name = "vdd-usb"; + avdd_usb: ldousb { + regulator-name = "avdd_usb"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - regulator-always-on; regulator-boot-on; }; }; @@ -596,19 +1585,89 @@ ldousb { rtc { compatible = "ti,palmas-rtc"; interrupt-parent = <&palmas>; - interrupts = <8 0>; + interrupts = <8 IRQ_TYPE_NONE>; }; }; }; + pmc@7000e400 { + status = "okay"; + nvidia,suspend-mode = <2>; + nvidia,cpu-pwr-good-time = <300>; + nvidia,cpu-pwr-off-time = <300>; + nvidia,core-pwr-good-time = <641 3845>; + nvidia,core-pwr-off-time = <2000>; + nvidia,core-power-req-active-high; + nvidia,sys-clock-req-active-high; + + /* Clear DEV_ON bit in DEV_CTRL register of TPS65913 PMIC */ + i2c-thermtrip { + nvidia,i2c-controller-id = <4>; + nvidia,bus-addr = <0x58>; + nvidia,reg-addr = <0xA0>; + nvidia,reg-data = <0x00>; + }; + }; + ahub@70080000 { - i2s@70080300 { + /* HIFI CODEC (i2s1) */ + i2s@70080400 { + status = "okay"; + }; + + /* BT SCO (i2s3) */ + i2s@70080600 { status = "okay"; }; }; + brcm_wifi_pwrseq: pwrseq-wifi { + compatible = "mmc-pwrseq-simple"; + + clocks = <&tegra_pmc TEGRA_PMC_CLK_BLINK>; + clock-names = "ext_clock"; + + reset-gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>; + post-power-on-delay-ms = <300>; + power-off-delay-us = <300>; + }; + + /* WiFi */ mmc@78000000 { - /* WiFi */ + status = "okay"; + + #address-cells = <1>; + #size-cells = <0>; + + assigned-clocks = <&tegra_car TEGRA114_CLK_SDMMC1>; + assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_P>; + assigned-clock-rates = <82000000>; + + max-frequency = <82000000>; + keep-power-in-suspend; + bus-width = <4>; + non-removable; + + sd-uhs-ddr50; + mmc-ddr-1_8v; + + power-gpios = <&gpio TEGRA_GPIO(CC, 5) GPIO_ACTIVE_HIGH>; + + nvidia,default-tap = <0x2>; + nvidia,default-trim = <0x2>; + + mmc-pwrseq = <&brcm_wifi_pwrseq>; + vmmc-supply = <&vdd_3v3_com>; + vqmmc-supply = <&vdd_1v8_vio>; + + wifi@1 { + compatible = "brcm,bcm4329-fmac"; + reg = <1>; + + interrupt-parent = <&gpio>; + interrupts = ; + interrupt-names = "host-wake"; + }; }; /* MicroSD card */ @@ -621,33 +1680,38 @@ mmc@78000400 { nvidia,default-tap = <0x3>; nvidia,default-trim = <0x3>; - vmmc-supply = <&vdd_usd>; - vqmmc-supply = <&tps65913_ldo9>; - - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc3_default>; + vmmc-supply = <&vdd_2v9_usd>; + vqmmc-supply = <&vddio_usd>; }; + /* eMMC */ mmc@78000600 { - /* eMMC */ + status = "okay"; + bus-width = <8>; + + non-removable; + mmc-ddr-1_8v; + + vmmc-supply = <&vcore_emmc>; + vqmmc-supply = <&vdd_1v8_vio>; }; + /* Peripheral USB via ASUS connector */ usb@7d000000 { compatible = "nvidia,tegra114-udc"; status = "okay"; dr_mode = "peripheral"; - - /* Peripheral USB via ASUS connector */ }; usb-phy@7d000000 { status = "okay"; + dr_mode = "peripheral"; + vbus-supply = <&avdd_usb>; }; + /* Host USB via dock */ usb@7d008000 { status = "okay"; - - /* Host USB via dock */ }; usb-phy@7d008000 { @@ -658,16 +1722,12 @@ usb-phy@7d008000 { backlight: backlight { compatible = "pwm-backlight"; - enable-gpios = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>; - power-supply = <&vdd_5v0_sys>; + power-supply = <&vdd_3v7_bl>; pwms = <&pwm 1 1000000>; brightness-levels = <1 255>; num-interpolated-steps = <254>; default-brightness-level = <224>; - - pinctrl-names = "default"; - pinctrl-0 = <&backlight_default>; }; /* PMIC has a built-in 32KHz oscillator which is used by PMC */ @@ -678,13 +1738,22 @@ clk32k_in: clock-32k { clock-output-names = "pmic-oscillator"; }; - gpio-hall-sensor { - compatible = "gpio-keys"; + connector { + compatible = "hdmi-connector"; + type = "d"; - label = "GPIO Hall Effect Sensor"; + hpd-gpios = <&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>; + ddc-i2c-bus = <&hdmi_ddc>; - pinctrl-names = "default"; - pinctrl-0 = <&gpio_hall_sensor_default>; + port { + connector_in: endpoint { + remote-endpoint = <&hdmi_out>; + }; + }; + }; + + extcon-keys { + compatible = "gpio-keys"; switch-hall-sensor { label = "Hall Effect Sensor"; @@ -694,17 +1763,20 @@ switch-hall-sensor { linux,can-disable; wakeup-source; }; + + switch-lineout-detect { + label = "Audio dock line-out detect"; + gpios = <&gpio TEGRA_GPIO(X, 5) GPIO_ACTIVE_LOW>; + linux,input-type = ; + linux,code = ; + debounce-interval = <10>; + }; }; gpio-keys { compatible = "gpio-keys"; - label = "GPIO Buttons"; - - pinctrl-names = "default"; - pinctrl-0 = <&gpio_keys_default>; - - button-power { + key-power { label = "Power"; gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>; linux,code = ; @@ -712,14 +1784,14 @@ button-power { wakeup-source; }; - button-volume-down { + key-volume-down { label = "Volume Down"; gpios = <&gpio TEGRA_GPIO(R, 1) GPIO_ACTIVE_LOW>; linux,code = ; debounce-interval = <10>; }; - button-volume-up { + key-volume-up { label = "Volume Up"; gpios = <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_LOW>; linux,code = ; @@ -739,13 +1811,16 @@ sound { "Speakers", "SPORN", "Speakers", "SPOLP", "Speakers", "SPOLN", - "Mic Jack", "MICBIAS1", - "IN2P", "Mic Jack"; + "IN1P", "Mic Jack", + "IN1N", "Mic Jack", + "DMIC1", "Int Mic", + "DMIC2", "Int Mic"; - nvidia,i2s-controller = <&tegra_i2s0>; + nvidia,i2s-controller = <&tegra_i2s1>; nvidia,audio-codec = <&rt5639>; nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_LOW>; + nvidia,int-mic-en-gpios = <&gpio TEGRA_GPIO(K, 3) GPIO_ACTIVE_HIGH>; clocks = <&tegra_car TEGRA114_CLK_PLL_A>, <&tegra_car TEGRA114_CLK_PLL_A_OUT0>, @@ -757,14 +1832,11 @@ sound { assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_A_OUT0>, <&tegra_car TEGRA114_CLK_EXTERN1>; - - pinctrl-names = "default"; - pinctrl-0 = <&hp_det_default>; }; vdd_5v0_sys: regulator-5v0-sys { compatible = "regulator-fixed"; - regulator-name = "vdd_5v0"; + regulator-name = "vdd_5v0_sys"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; @@ -773,37 +1845,119 @@ vdd_5v0_sys: regulator-5v0-sys { vdd_3v3_sys: regulator-3v3-sys { compatible = "regulator-fixed"; - regulator-name = "vdd_3v3"; + regulator-name = "vdd_3v3_sys"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-always-on; regulator-boot-on; }; - vdd_lcd: regulator-vdd-lcd { + dvdd_1v8_lcd: regulator-vdd-lcd { compatible = "regulator-fixed"; - regulator-name = "vdd_lcd_1v8"; + regulator-name = "dvdd_1v8_lcd"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; - vin-supply = <&tps65913_smps8>; + regulator-boot-on; + gpio = <&palmas_gpio 4 GPIO_ACTIVE_HIGH>; enable-active-high; - gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>; + vin-supply = <&vdd_1v8_vio>; + }; + + vdd_3v7_bl: regulator-bl-en { + compatible = "regulator-fixed"; + regulator-name = "vdd_3v7_bl"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(H, 2) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_5v0_sys>; + }; - pinctrl-names = "default"; - pinctrl-0 = <&vdd_lcd_default>; + hdmi_5v0_sys: regulator-hdmi { + compatible = "regulator-fixed"; + regulator-name = "vdd_5v0_hdmi"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(P, 2) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_smps10_out2>; }; - vdd_usd: regulator-vdd-usd { + vdd_2v9_usd: regulator-vdd-usd { compatible = "regulator-fixed"; regulator-name = "vdd_sd_slot"; regulator-min-microvolt = <2900000>; regulator-max-microvolt = <2900000>; - vin-supply = <&tps65913_smps9>; - enable-active-high; + regulator-boot-on; gpio = <&gpio TEGRA_GPIO(K, 1) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vcore_emmc>; + }; - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc3_vdd_default>; + vdd_1v8_cam: regulator-cam-vio { + compatible = "regulator-fixed"; + regulator-name = "vdd_1v8_cam"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + gpio = <&palmas_gpio 6 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_1v8_vio>; + }; + + vdd_1v2_xusb: regulator-xusb-vio { + compatible = "regulator-fixed"; + regulator-name = "avddio_1v2_xusb"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-boot-on; + gpio = <&palmas_gpio 3 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + vdd_3v3_xusb: regulator-xusb-vdd { + compatible = "regulator-fixed"; + regulator-name = "hvdd_3v3_xusb"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + gpio = <&palmas_gpio 1 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + vdd_3v3_com: regulator-com { + compatible = "regulator-fixed"; + regulator-name = "vdd_3v3_com"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(U, 1) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_3v3_sys>; + }; + + vdd_3v3_touch: regulator-touch-pwr { + compatible = "regulator-fixed"; + regulator-name = "vdd_3v3_touch"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_3v3_sys>; + }; + + vdd_1v8_touch: regulator-touch-vio { + compatible = "regulator-fixed"; + regulator-name = "vdd_1v8_touch"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + gpio = <&gpio TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <&vdd_3v3_sys>; }; }; diff --git a/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts b/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts index 7cae6ad5754469..4caeeb9f1e1d8a 100644 --- a/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/nvidia/tegra20-trimslice.dts @@ -2,6 +2,7 @@ /dts-v1/; #include +#include #include "tegra20.dtsi" #include "tegra20-cpu-opp.dtsi" @@ -201,16 +202,17 @@ uca { conf_ata { nvidia,pins = "ata", "atc", "atd", "ate", "crtp", "dap2", "dap3", "dap4", "dta", - "dtb", "dtc", "dtd", "dte", "gmb", - "gme", "i2cp", "pta", "slxc", "slxd", - "spdi", "spdo", "uda"; + "dtb", "dtc", "dtd", "gmb", "gme", + "i2cp", "pta", "slxc", "slxd", "spdi", + "spdo", "uda"; nvidia,pull = ; nvidia,tristate = ; }; conf_atb { nvidia,pins = "atb", "cdev1", "cdev2", "dap1", - "gma", "gmc", "gmd", "gpu", "gpu7", - "gpv", "sdio1", "slxa", "slxk", "uac"; + "dte", "gma", "gmc", "gmd", "gpu", + "gpu7", "gpv", "sdio1", "slxa", "slxk", + "uac"; nvidia,pull = ; nvidia,tristate = ; }; @@ -408,6 +410,24 @@ key-power { }; }; + leds { + compatible = "gpio-leds"; + + led-ds2 { + color = ; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <2>; + gpios = <&gpio TEGRA_GPIO(D, 2) GPIO_ACTIVE_LOW>; + }; + + led-ds3 { + color = ; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <3>; + gpios = <&gpio TEGRA_GPIO(BB, 5) GPIO_ACTIVE_LOW>; + }; + }; + poweroff { compatible = "gpio-poweroff"; gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>; diff --git a/arch/arm/boot/dts/nxp/imx/imx1.dtsi b/arch/arm/boot/dts/nxp/imx/imx1.dtsi index 389ecb1ebf8f1b..a1a89ccacf0558 100644 --- a/arch/arm/boot/dts/nxp/imx/imx1.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx1.dtsi @@ -134,7 +134,7 @@ pwm: pwm@208000 { clock-names = "ipg", "per"; }; - dma: dma@209000 { + dma: dma-controller@209000 { compatible = "fsl,imx1-dma"; reg = <0x00209000 0x1000>; interrupts = <61 60>; diff --git a/arch/arm/boot/dts/nxp/imx/imx27.dtsi b/arch/arm/boot/dts/nxp/imx/imx27.dtsi index ec3ccc8f4095ff..989b7659b6692a 100644 --- a/arch/arm/boot/dts/nxp/imx/imx27.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx27.dtsi @@ -88,7 +88,7 @@ aipi1: bus@10000000 { /* AIPI1 */ reg = <0x10000000 0x20000>; ranges; - dma: dma@10001000 { + dma: dma-controller@10001000 { compatible = "fsl,imx27-dma"; reg = <0x10001000 0x1000>; interrupts = <32>; diff --git a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts index 2117de872703bf..0d336cbdb4513e 100644 --- a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts +++ b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts @@ -175,8 +175,8 @@ expander: pca9554@20 { gpio-controller; }; - sensor2: lm75@49 { - compatible = "lm75"; + sensor2: temperature-sensor@49 { + compatible = "national,lm75b"; reg = <0x49>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso index 151e9cee3c87ee..2527bfe1314595 100644 --- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso +++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso @@ -34,9 +34,7 @@ reg_1p2v: regulator-1p2v { &display0 { status = "okay"; -}; -&display0 { port@1 { display0_out: endpoint { remote-endpoint = <&sii9022_in>; @@ -83,7 +81,3 @@ sii9022_out: endpoint { &panel_dpi { status = "disabled"; }; - -&tve { - status = "disabled"; -}; diff --git a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi index b2d7271d1d24cc..c34ee84bd71675 100644 --- a/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx53-tqma53.dtsi @@ -254,8 +254,8 @@ pmic: mc34708@8 { interrupts = <6 4>; /* PATA_DATA6, active high */ }; - sensor1: lm75@48 { - compatible = "lm75"; + sensor1: temperature-sensor@48 { + compatible = "national,lm75b"; reg = <0x48>; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts b/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts index 95b49fc83f7be9..299106fbe51c4a 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6q-cm-fx6.dts @@ -127,12 +127,21 @@ simple-audio-card,codec { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-out; - spdif-in; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>, <&spdif_in>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts index a7d5693c5ab752..8d2b608e0b90fc 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6q-prti6q.dts @@ -111,12 +111,21 @@ simple-audio-card,codec { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-in; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>, <&spdif_in>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts b/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts index 7c298d9aa21e10..5353a0c2442050 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6q-tbs2910.dts @@ -90,11 +90,16 @@ sound-sgtl5000 { ssi-controller = <&ssi1>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "On-board SPDIF"; - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi index ea40623d12e5fd..edf55760a5c1a2 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi @@ -197,11 +197,20 @@ sound { ssi-controller = <&ssi1>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound_spdif: sound-spdif { compatible = "fsl,imx-audio-spdif"; - spdif-controller = <&spdif>; - spdif-in; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>, <&spdif_in>; model = "imx-spdif"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi index 3a46ade3b6bd93..9e97ef5e43f273 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apf6dev.dtsi @@ -121,11 +121,16 @@ sound { mux-ext-port = <3>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi index 758eaf9d93d2a4..f7fac86f0a6bc1 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-aristainetos2.dtsi @@ -506,7 +506,7 @@ MX6QDL_PAD_EIM_A22__GPIO2_IO16 0x1b0b0 /* PCIe reset */ >; }; - pinctrl_gpmi_nand: gpmi-nand { + pinctrl_gpmi_nand: gpminandgrp { fsl,pins = < MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1 MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi index d3a7a6eeb8e09e..b01670cdd52c38 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi @@ -142,12 +142,21 @@ sound { ssi-controller = <&ssi1>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + /* Optional S/PDIF in on SODIMM 88 and out on SODIMM 90, 137 or 168 */ sound_spdif: sound-spdif { compatible = "fsl,imx-audio-spdif"; - spdif-controller = <&spdif>; - spdif-in; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>, <&spdif_in>; model = "imx-spdif"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi index 761566ae3cf5c0..bd66430c1d78d0 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-cubox-i.dtsi @@ -100,12 +100,17 @@ v_usb1: regulator-v-usb1 { vin-supply = <&v_5v0>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "Integrated SPDIF"; /* IMX6 doesn't implement this yet */ - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; gpio-keys { diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi index 082a2e3a391fe9..b57f4073f881e3 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw52xx.dtsi @@ -761,7 +761,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x170b9 @@ -774,7 +774,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi index 8ec442038ea01a..090c0057d1179e 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw53xx.dtsi @@ -750,7 +750,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -763,7 +763,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi index 9df9f79affae7e..0ed6d25024a24c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw54xx.dtsi @@ -833,7 +833,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -846,7 +846,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi index 7f16c602cc0757..c6e231de674aa3 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw553x.dtsi @@ -704,7 +704,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -717,7 +717,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi index 7693f92195d50f..d0f648938cae78 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw560x.dtsi @@ -896,7 +896,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -909,7 +909,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi index 9d0836df0fed41..71911df881ccca 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5903.dtsi @@ -680,7 +680,7 @@ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b0b0 /* OC */ >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x4001b0b0 /* EMMY_EN */ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x4001b0b0 /* EMMY_CFG1# */ @@ -710,7 +710,7 @@ MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x17059 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9 @@ -723,7 +723,7 @@ MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x170b9 >; }; - pinctrl_usdhc2_200mhz: usdhc2grp200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9 @@ -752,7 +752,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -768,7 +768,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi index f4cb9e1d34a9ac..716c324a745809 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5904.dtsi @@ -817,7 +817,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -833,7 +833,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi index 424dc7fcd53352..453dee4d9227f0 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5910.dtsi @@ -629,7 +629,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x170b9 @@ -642,7 +642,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi index 49ea25c7196766..add700bc11cc19 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-gw5912.dtsi @@ -569,7 +569,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -582,7 +582,7 @@ MX6QDL_PAD_NANDF_CS1__SD3_VSELECT 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi index a955c77cd4998a..d1ad65ab6b72ed 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-hummingboard.dtsi @@ -140,12 +140,17 @@ sound_codec: simple-audio-card,codec { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "On-board SPDIF"; /* IMX6 doesn't implement this yet */ - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi index d339957cc09730..dff184a119f372 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-icore-rqs.dtsi @@ -397,7 +397,7 @@ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f059 /* PWR */ >; }; - pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170B1 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100B1 @@ -408,7 +408,7 @@ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170B1 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170F9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100F9 @@ -434,7 +434,7 @@ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17070 >; }; - pinctrl_usdhc4_100mhz: usdhc4grp_100mhz { + pinctrl_usdhc4_100mhz: usdhc4-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD4_CMD__SD4_CMD 0x170B1 MX6QDL_PAD_SD4_CLK__SD4_CLK 0x100B1 @@ -449,7 +449,7 @@ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x170B1 >; }; - pinctrl_usdhc4_200mhz: usdhc4grp_200mhz { + pinctrl_usdhc4_200mhz: usdhc4-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD4_CMD__SD4_CMD 0x170F9 MX6QDL_PAD_SD4_CLK__SD4_CLK 0x100F9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi index 807f3c95e3ce9b..aca320ee8f476c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6a.dtsi @@ -13,7 +13,7 @@ &fec { &i2c1 { lm75: temperature-sensor@49 { - compatible = "national,lm75"; + compatible = "national,lm75a"; reg = <0x49>; vs-supply = <®_mba6_3p3v>; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi index 789733a45b9590..c7bbd6195fef1b 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6b.dtsi @@ -23,7 +23,7 @@ &i2c1 { &i2c3 { lm75: temperature-sensor@49 { - compatible = "national,lm75"; + compatible = "national,lm75a"; reg = <0x49>; vs-supply = <®_mba6_3p3v>; }; @@ -50,12 +50,3 @@ rtc0: rtc@68 { reg = <0x68>; }; }; - -&iomuxc { - pinctrl_i2c1: i2c1grp { - fsl,pins = < - MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b899 - MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b899 - >; - }; -}; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi index 0a3deaf92eeab9..35b6bec7a3fab6 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-sabreauto.dtsi @@ -143,12 +143,17 @@ sound-cs42888 { "AIN2R", "Line In Jack"; }; + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-sabreauto-spdif", "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-in; + audio-cpu = <&spdif>; + audio-codec = <&spdif_in>; }; backlight { @@ -690,7 +695,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -705,7 +710,7 @@ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi index 344ea935c7dac7..6152a9ed476822 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi @@ -59,20 +59,6 @@ MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x4001b899 >; }; - pinctrl_i2c3: i2c3grp { - fsl,pins = < - MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b899 - MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899 - >; - }; - - pinctrl_i2c3_recovery: i2c3recoverygrp { - fsl,pins = < - MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x4001b899 - MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b899 - >; - }; - pinctrl_pmic: pmicgrp { fsl,pins = < MX6QDL_PAD_NANDF_RB0__GPIO6_IO10 0x1b099 /* PMIC irq */ diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi index 68525f0205d3a6..828996382f2469 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi @@ -27,8 +27,8 @@ pmic: pmic@8 { reg = <0x08>; }; - sensor@48 { - compatible = "national,lm75"; + temperature-sensor@48 { + compatible = "national,lm75a"; reg = <0x48>; vs-supply = <®_3p3v>; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi index aeba0a2736002a..1d0966b8d99e2c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi @@ -20,8 +20,8 @@ pmic: pmic@8 { reg = <0x08>; }; - sensor@48 { - compatible = "national,lm75"; + temperature-sensor@48 { + compatible = "national,lm75a"; reg = <0x48>; vs-supply = <®_3p3v>; }; @@ -33,3 +33,19 @@ eeprom@50 { vcc-supply = <®_3p3v>; }; }; + +&iomuxc { + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b899 + MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899 + >; + }; + + pinctrl_i2c3_recovery: i2c3recoverygrp { + fsl,pins = < + MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x4001b899 + MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b899 + >; + }; +}; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi index e2fe337f7d9ed6..5a194f4c0cb9b3 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi @@ -373,7 +373,7 @@ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0 /* SSI1_FS */ >; }; - pinctrl_disp0_1: disp0grp-1 { + pinctrl_disp0_1: disp0-1-grp { fsl,pins = < MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10 MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10 @@ -406,7 +406,7 @@ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10 >; }; - pinctrl_disp0_2: disp0grp-2 { + pinctrl_disp0_2: disp0-2-grp { fsl,pins = < MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10 MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi index 200559d7158dc0..d8283eade43e7c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-dart.dtsi @@ -346,7 +346,7 @@ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x17071 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170B9 MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100B9 @@ -357,7 +357,7 @@ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170B9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170F9 MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100F9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi index a1ea33c4eeb75c..59833e8d11d862 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-var-som.dtsi @@ -436,7 +436,7 @@ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x13059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhzgrp { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170B9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100B9 @@ -451,7 +451,7 @@ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x130B9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhzgrp { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170F9 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100F9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi index 38abb6b50f6cbc..7130b9c3b3aa05 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-wandboard.dtsi @@ -26,11 +26,16 @@ sound { mux-ext-port = <3>; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; reg_1p5v: regulator-1p5v { diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts index 31eee0419af71c..7c899291ab0dad 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6sl-evk.dts @@ -457,7 +457,7 @@ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9 MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9 @@ -472,7 +472,7 @@ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9 MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9 @@ -498,7 +498,7 @@ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9 MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9 @@ -509,7 +509,7 @@ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9 >; }; - pinctrl_usdhc2_200mhz: usdhc2grp200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9 MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9 @@ -531,7 +531,7 @@ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9 MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9 @@ -542,7 +542,7 @@ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9 MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts b/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts index 9d7c8884892a9b..2545c0fe47c828 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6sl-warp.dts @@ -166,7 +166,7 @@ MX6SL_PAD_SD2_RST__SD2_RESET 0x417059 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170b9 MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100b9 @@ -182,7 +182,7 @@ MX6SL_PAD_SD2_RST__SD2_RESET 0x4170b9 >; }; - pinctrl_usdhc2_200mhz: usdhc2grp200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX6SL_PAD_SD2_CMD__SD2_CMD 0x4170f9 MX6SL_PAD_SD2_CLK__SD2_CLK 0x4100f9 @@ -209,7 +209,7 @@ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x417059 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170b9 MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100b9 @@ -220,7 +220,7 @@ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x4170b9 >; }; - pinctrl_usdhc3_200mhz: usdhc3grp200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX6SL_PAD_SD3_CMD__SD3_CMD 0x4170f9 MX6SL_PAD_SD3_CLK__SD3_CLK 0x4100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts b/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts index b0c27b9b02446b..dfbfb8119bf3b6 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sabreauto.dts @@ -97,11 +97,16 @@ sound-cs42888 { "AIN2R", "Line In Jack"; }; + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-in; + audio-cpu = <&spdif>; + audio-codec = <&spdif_in>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi index 7d4170c2773284..277a6e039045b5 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6sx-sdb.dtsi @@ -183,12 +183,17 @@ panel_in: endpoint { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx6sx-sdb-spdif", "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif>; - spdif-out; + audio-cpu = <&spdif>; + audio-codec = <&spdif_out>; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi index 725d0b5cb55f6a..bbf792ac4896a7 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6sx-udoo-neo.dtsi @@ -72,6 +72,11 @@ reg_wlan: regulator-wlan { }; }; +&clks { + assigned-clocks = <&clks IMX6SX_CLK_ENET_REF>; + assigned-clock-rates = <50000000>; +}; + &fec1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet1>; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi index 9cfb99ac9e9daa..b74ee8948a7817 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi @@ -608,7 +608,7 @@ MX6UL_PAD_GPIO1_IO09__GPIO1_IO09 0x17059 /* SD1 RESET */ >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -620,7 +620,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts index ad7f63ca521a9d..0d3b1ab82eabad 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcexpress.dts @@ -112,7 +112,7 @@ MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0xb0 >; }; - pinctrl_ecspi3_master: ecspi3grp1 { + pinctrl_ecspi3_master: ecspi3-1-grp { fsl,pins = < MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0 MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0 @@ -121,7 +121,7 @@ MX6UL_PAD_UART2_TX_DATA__GPIO1_IO20 0x10b0 /* Chip Select */ >; }; - pinctrl_ecspi3_slave: ecspi3grp2 { + pinctrl_ecspi3_slave: ecspi3-2-grp { fsl,pins = < MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0 MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts index ed61ae8524fa2a..8aea8c99e2af59 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsbcpro.dts @@ -248,7 +248,7 @@ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 >; }; - pinctrl_ecspi1_master: ecspi1grp1 { + pinctrl_ecspi1_master: ecspi1-1-grp { fsl,pins = < MX6UL_PAD_LCD_DATA20__ECSPI1_SCLK 0x10b0 MX6UL_PAD_LCD_DATA22__ECSPI1_MOSI 0x10b0 @@ -309,7 +309,7 @@ MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x1020 >; }; - pinctrl_lcdif_dat0_17: lcdifdatgrp0-17 { + pinctrl_lcdif_dat0_17: lcdifdat0-17-grp { fsl,pins = < MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79 MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79 @@ -332,14 +332,14 @@ MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79 >; }; - pinctrl_lcdif_clken: lcdifctrlgrp1 { + pinctrl_lcdif_clken: lcdifctrl-1-grp { fsl,pins = < MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x17050 MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79 >; }; - pinctrl_lcdif_hvsync: lcdifctrlgrp2 { + pinctrl_lcdif_hvsync: lcdifctrl-2-grp { fsl,pins = < MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79 MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79 @@ -370,7 +370,7 @@ MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07 0x10b0 >; }; - pinctrl_sai2_sleep: sai2grp-sleep { + pinctrl_sai2_sleep: sai2-sleep-grp { fsl,pins = < MX6UL_PAD_JTAG_TRST_B__GPIO1_IO15 0x3000 MX6UL_PAD_JTAG_TCK__GPIO1_IO14 0x3000 @@ -381,7 +381,7 @@ MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07 0x3000 >; }; - pinctrl_uart2_4wires: uart2grp-4wires { + pinctrl_uart2_4wires: uart2-4wires-grp { fsl,pins = < MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1 MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1 @@ -390,7 +390,7 @@ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x1b0b1 >; }; - pinctrl_uart3_2wires: uart3grp-2wires { + pinctrl_uart3_2wires: uart3-2wires-grp { fsl,pins = < MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1 MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi index 4a03ea6d24dc6f..9cc3eebb6b05e5 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-ccimx6ulsom.dtsi @@ -232,7 +232,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 >; }; - pinctrl_usdhc1_sleep: usdhc1grp-sleep { + pinctrl_usdhc1_sleep: usdhc1-sleep-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__GPIO2_IO16 0x3000 MX6UL_PAD_SD1_CLK__GPIO2_IO17 0x3000 @@ -250,7 +250,7 @@ MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x08a0 >; }; - pinctrl_wifibt_ctrl_sleep: wifibt-ctrl-grp-sleep { + pinctrl_wifibt_ctrl_sleep: wifibt-ctrl-sleep-grp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x3000 MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x3000 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts index cdbb8c435cd6aa..2a6bb5ff808add 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts @@ -365,7 +365,7 @@ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0 }; pinctrl_tsc: tscgrp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0 MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0 MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 @@ -410,7 +410,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -421,7 +421,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi index ee86c36205f955..118df2a457c952 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi @@ -346,7 +346,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -357,7 +357,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi index d8f7877349c98f..29d2f86d5e34a7 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi @@ -351,7 +351,7 @@ MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX 0x1b0b1 >; }; - pinctrl_usbotg1: usbotg1 { + pinctrl_usbotg1: usbotg1grp { fsl,pins = < MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0x1b0b0 >; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts index 1d863a16bcf09c..5e62272acfba89 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-liteboard.dts @@ -100,7 +100,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 >; }; - pinctrl_usb_otg1_vbus: usb-otg1-vbus { + pinctrl_usb_otg1_vbus: usb-otg1-vbus-grp { fsl,pins = < MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08 0x79 >; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi index 04477fd4b9a98d..4a45fb784ff770 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin-peb-wlbt-05.dtsi @@ -31,7 +31,7 @@ MX6UL_PAD_JTAG_MOD__GPIO1_IO10 0x3031 /* DEV WAKEUP */ >; }; - pinctrl_uart2_bt: uart2grp-bt { + pinctrl_uart2_bt: uart2-bt-grp { fsl,pins = < MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x17059 MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x17059 @@ -40,7 +40,7 @@ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x17059 >; }; - pinctrl_usdhc2_wl: usdhc2grp-wl { + pinctrl_usdhc2_wl: usdhc2-wl-grp { fsl,pins = < MX6UL_PAD_LCD_DATA18__USDHC2_CMD 0x10051 MX6UL_PAD_LCD_DATA19__USDHC2_CLK 0x10061 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi index 38ea4dcfa2281d..bef5eb38a90d3d 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-phytec-segin.dtsi @@ -219,7 +219,7 @@ MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b010 >; }; - pinctrl_flexcan1: flexcan1 { + pinctrl_flexcan1: flexcan1grp { fsl,pins = < MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x0b0b0 MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x0b0b0 @@ -275,7 +275,7 @@ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -286,7 +286,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi index 57e647fc323742..c9c0794f01a2b8 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi @@ -202,7 +202,7 @@ MX6UL_PAD_UART2_RX_DATA__GPIO1_IO21 0x4001b8b0 >; }; - pinctrl_pmic: pmic { + pinctrl_pmic: pmicgrp { fsl,pins = < /* PMIC irq */ MX6UL_PAD_CSI_DATA03__GPIO4_IO24 0x1b099 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts index ef76ece21010b1..20c810a81403a3 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul-mainboard.dts @@ -198,7 +198,7 @@ MX6UL_PAD_CSI_DATA01__GPIO4_IO22 0x0b0b0 /* WLAN_RESET */ >; }; - pinctrl_disp0_3: disp0grp-3 { + pinctrl_disp0_3: disp0-3-grp { fsl,pins = < MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */ diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi index 864173e307097e..278120404d3175 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi @@ -578,19 +578,13 @@ &usdhc1 { }; &iomuxc { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hog>; - - pinctrl_hog: hoggrp { - }; - pinctrl_led: ledgrp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x0b0b0 /* LED */ >; }; - pinctrl_disp0_1: disp0grp-1 { + pinctrl_disp0_1: disp0-1-grp { fsl,pins = < MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */ @@ -623,7 +617,7 @@ MX6UL_PAD_LCD_DATA23__LCDIF_DATA23 0x10 >; }; - pinctrl_disp0_2: disp0grp-2 { + pinctrl_disp0_2: disp0-2-grp { fsl,pins = < MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x10 /* LSCLK */ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x10 /* OE_ACD */ @@ -713,25 +707,25 @@ MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07 0x0b0b0 /* ETN PHY POWER */ >; }; - pinctrl_etnphy0_int: etnphy-intgrp-0 { + pinctrl_etnphy0_int: etnphy-int-0-grp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05 0x0b0b0 /* ETN PHY INT */ >; }; - pinctrl_etnphy0_rst: etnphy-rstgrp-0 { + pinctrl_etnphy0_rst: etnphy-rst-0-grp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06 0x0b0b0 /* ETN PHY RESET */ >; }; - pinctrl_etnphy1_int: etnphy-intgrp-1 { + pinctrl_etnphy1_int: etnphy-int-1-grp { fsl,pins = < MX6UL_PAD_CSI_DATA06__GPIO4_IO27 0x0b0b0 /* ETN PHY INT */ >; }; - pinctrl_etnphy1_rst: etnphy-rstgrp-1 { + pinctrl_etnphy1_rst: etnphy-rst-1-grp { fsl,pins = < MX6UL_PAD_CSI_DATA07__GPIO4_IO28 0x0b0b0 /* ETN PHY RESET */ >; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi index d03694feaf5c4a..83b9de17cee2de 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi @@ -169,7 +169,7 @@ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -180,7 +180,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 @@ -206,7 +206,7 @@ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9 MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9 @@ -221,7 +221,7 @@ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170b9 >; }; - pinctrl_usdhc2_200mhz: usdhc2grp200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9 MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi index 6bb12e0bbc7ec6..28fddbcdc55e70 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi @@ -323,7 +323,7 @@ MX6UL_PAD_GPIO1_IO08__GPIO1_IO08 0x79 >; }; - pinctrl_reg_vmmc: usdhc1regvmmc { + pinctrl_reg_vmmc: usdhc1regvmmc-grp { fsl,pins = < MX6UL_PAD_GPIO1_IO09__GPIO1_IO09 0x17059 >; @@ -339,14 +339,14 @@ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0 }; pinctrl_uart1: uart1grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1 MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1 >; }; pinctrl_uart2: uart2grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1 MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1 MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS 0x1b0b1 @@ -355,7 +355,7 @@ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x1b0b1 }; pinctrl_uart3: uart3grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1 MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1 MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS 0x1b0b1 @@ -364,21 +364,21 @@ MX6UL_PAD_UART3_RTS_B__UART3_DCE_RTS 0x1b0b1 }; pinctrl_uart4: uart4grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX 0x1b0b1 MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX 0x1b0b1 >; }; pinctrl_uart5: uart5grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1 MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1 >; }; pinctrl_usb_otg1_id: usbotg1idgrp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059 >; }; @@ -394,7 +394,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 @@ -405,7 +405,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 @@ -416,7 +416,7 @@ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9 >; }; - pinctrl_usdhc1_cd: usdhc1cd { + pinctrl_usdhc1_cd: usdhc1cd-grp { fsl,pins = < MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059 >; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi index f5ad6b5c1ad01c..278152875f8e6c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi.dtsi @@ -102,7 +102,7 @@ MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07 0x0b0b1 >; }; - pinctrl_reg_vqmmc: usdhc1regvqmmc { + pinctrl_reg_vqmmc: usdhc1regvqmmcgrp { fsl,pins = < MX6UL_PAD_GPIO1_IO05__GPIO1_IO05 0x17059 >; @@ -123,7 +123,7 @@ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9 MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9 @@ -138,7 +138,7 @@ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170b9 >; }; - pinctrl_usdhc2_200mhz: usdhc2grp200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9 MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts b/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts index c92e4e2f6ab9ca..6159ed70d96616 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts @@ -94,7 +94,7 @@ &wdog1 { }; &iomuxc { - pinctrl_gpmi_nand: gpmi-nand { + pinctrl_gpmi_nand: gpminandgrp { fsl,pins = < MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0xb0b1 MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0xb0b1 diff --git a/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi index 9fe51884af79f5..62e41edcaf1d74 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi @@ -903,7 +903,7 @@ MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79 /* SODIMM 82 */ >; }; - pinctrl_lvds_transceiver: lvdstx { + pinctrl_lvds_transceiver: lvdstxgrp { fsl,pins = < MX7D_PAD_ENET1_RGMII_RD2__GPIO7_IO2 0x14 /* SODIMM 63 */ MX7D_PAD_ENET1_RGMII_RD3__GPIO7_IO3 0x74 /* SODIMM 55 */ diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi index 52869e68f833c4..e1c401f468e16f 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi @@ -81,6 +81,12 @@ led2 { }; }; + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, + <&adc2 0>, <&adc2 1>, <&adc2 2>, <&adc2 3>; + }; + reg_sd1_vmmc: regulator-sd1-vmmc { compatible = "regulator-fixed"; regulator-name = "VCC3V3_SD1"; @@ -310,7 +316,7 @@ &flexcan2 { &i2c1 { lm75: temperature-sensor@49 { - compatible = "national,lm75"; + compatible = "national,lm75a"; reg = <0x49>; vs-supply = <®_vcc_3v3>; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts b/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts index 9c6476bda4a020..7ee66be8bccbe6 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-nitrogen7.dts @@ -419,7 +419,7 @@ &iomuxc { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hog_1 &pinctrl_j2>; - pinctrl_hog_1: hoggrp-1 { + pinctrl_hog_1: hoggrp { fsl,pins = < MX7D_PAD_SD3_RESET_B__GPIO6_IO11 0x5d MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x7d @@ -665,7 +665,7 @@ &iomuxc_lpsr { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hog_2>; - pinctrl_hog_2: hoggrp-2 { + pinctrl_hog_2: hoggrp { fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x7d MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi b/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi index 8d5037ac03c7d1..a1574ccec89cb1 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx7d-pico.dtsi @@ -444,14 +444,14 @@ MX7D_PAD_SD3_RESET_B__GPIO6_IO11 0x1 /* Ethernet reset */ >; }; - pinctrl_can1: can1frp { + pinctrl_can1: can1frpgrp { fsl,pins = < MX7D_PAD_SAI1_RX_DATA__FLEXCAN1_RX 0x59 MX7D_PAD_SAI1_TX_BCLK__FLEXCAN1_TX 0x59 >; }; - pinctrl_can2: can2frp { + pinctrl_can2: can2frpgrp { fsl,pins = < MX7D_PAD_SAI1_TX_SYNC__FLEXCAN2_RX 0x59 MX7D_PAD_SAI1_TX_DATA__FLEXCAN2_TX 0x59 @@ -499,19 +499,19 @@ MX7D_PAD_LCD_RESET__GPIO3_IO4 0x14 >; }; - pinctrl_pwm1: pwm1 { + pinctrl_pwm1: pwm1grp { fsl,pins = < MX7D_PAD_GPIO1_IO08__PWM1_OUT 0x7f >; }; - pinctrl_pwm2: pwm2 { + pinctrl_pwm2: pwm2grp { fsl,pins = < MX7D_PAD_GPIO1_IO09__PWM2_OUT 0x7f >; }; - pinctrl_pwm3: pwm3 { + pinctrl_pwm3: pwm3grp { fsl,pins = < MX7D_PAD_GPIO1_IO10__PWM3_OUT 0x7f >; @@ -563,7 +563,7 @@ MX7D_PAD_ECSPI2_MISO__UART7_DCE_RTS 0x79 >; }; - pinctrl_usbotg1_pwr: usbotg_pwr { + pinctrl_usbotg1_pwr: usbotgpwrgrp { fsl,pins = < MX7D_PAD_UART3_TX_DATA__GPIO4_IO5 0x14 >; @@ -581,7 +581,7 @@ MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x15 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp_100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX7D_PAD_SD1_CMD__SD1_CMD 0x5a MX7D_PAD_SD1_CLK__SD1_CLK 0x1a @@ -593,7 +593,7 @@ MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x15 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp_200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX7D_PAD_SD1_CMD__SD1_CMD 0x5b MX7D_PAD_SD1_CLK__SD1_CLK 0x1b @@ -631,7 +631,7 @@ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5a MX7D_PAD_SD3_CLK__SD3_CLK 0x1a @@ -646,7 +646,7 @@ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a >; }; - pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5b MX7D_PAD_SD3_CLK__SD3_CLK 0x1b diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts index 92cb45dacda616..eec526a9631118 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-remarkable2.dts @@ -508,7 +508,7 @@ MX7D_PAD_SD2_DATA3__SD2_DATA3 0x59 >; }; - pinctrl_usdhc2_100mhz: usdhc2grp_100mhz { + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { fsl,pins = < MX7D_PAD_SD2_CMD__SD2_CMD 0x5a MX7D_PAD_SD2_CLK__SD2_CLK 0x1a @@ -519,7 +519,7 @@ MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5a >; }; - pinctrl_usdhc2_200mhz: usdhc2grp_200mhz { + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { fsl,pins = < MX7D_PAD_SD2_CMD__SD2_CMD 0x5b MX7D_PAD_SD2_CLK__SD2_CLK 0x1b @@ -546,7 +546,7 @@ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5a MX7D_PAD_SD3_CLK__SD3_CLK 0x1a @@ -562,7 +562,7 @@ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a >; }; - pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5b MX7D_PAD_SD3_CLK__SD3_CLK 0x1b diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts index cabdaa6dc518df..40156cd9195f82 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb-reva.dts @@ -21,23 +21,21 @@ &fec2 { }; &iomuxc { - imx7d-sdb { - pinctrl_tsc2046_pendown: tsc2046_pendown { - fsl,pins = < - MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x59 - >; - }; - - pinctrl_hog: hoggrp { - fsl,pins = < - MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */ - >; - }; - - pinctrl_usb_otg2_vbus_reg_reva: usbotg2vbusregrevagrp { - fsl,pins = < - MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 - >; - }; + pinctrl_tsc2046_pendown: tsc2046-pendowngrp { + fsl,pins = < + MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x59 + >; + }; + + pinctrl_hog: hoggrp { + fsl,pins = < + MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */ + >; + }; + + pinctrl_usb_otg2_vbus_reg_reva: usbotg2vbusregrevagrp { + fsl,pins = < + MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 + >; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts index 0462e43ec09be5..f712537fca161a 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts @@ -537,342 +537,340 @@ &iomuxc { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hog>; - imx7d-sdb { - pinctrl_brcm_reg: brcmreggrp { - fsl,pins = < - MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21 0x14 - >; - }; + pinctrl_brcm_reg: brcmreggrp { + fsl,pins = < + MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21 0x14 + >; + }; - pinctrl_ecspi3: ecspi3grp { - fsl,pins = < - MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO 0x2 - MX7D_PAD_SAI2_TX_BCLK__ECSPI3_MOSI 0x2 - MX7D_PAD_SAI2_RX_DATA__ECSPI3_SCLK 0x2 - MX7D_PAD_SD2_CD_B__GPIO5_IO9 0x59 - >; - }; + pinctrl_ecspi3: ecspi3grp { + fsl,pins = < + MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO 0x2 + MX7D_PAD_SAI2_TX_BCLK__ECSPI3_MOSI 0x2 + MX7D_PAD_SAI2_RX_DATA__ECSPI3_SCLK 0x2 + MX7D_PAD_SD2_CD_B__GPIO5_IO9 0x59 + >; + }; - pinctrl_enet1: enet1grp { - fsl,pins = < - MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x3 - MX7D_PAD_GPIO1_IO11__ENET1_MDC 0x3 - MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1 - MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1 - MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1 - MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1 - MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1 - MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1 - MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1 - MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1 - MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1 - MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1 - MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1 - MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1 - >; - }; + pinctrl_enet1: enet1grp { + fsl,pins = < + MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x3 + MX7D_PAD_GPIO1_IO11__ENET1_MDC 0x3 + MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1 + MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1 + MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1 + MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1 + MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1 + MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1 + MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1 + MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1 + MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1 + MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1 + MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1 + MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1 + >; + }; - pinctrl_enet2: enet2grp { - fsl,pins = < - MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1 - MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1 - MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1 - MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1 - MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1 - MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1 - MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1 - MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1 - MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1 - MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1 - MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1 - MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 - >; - }; + pinctrl_enet2: enet2grp { + fsl,pins = < + MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1 + MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1 + MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1 + MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1 + MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1 + MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1 + MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1 + MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1 + MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1 + MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1 + MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1 + MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 + >; + }; - pinctrl_enet2_reg: enet2reggrp { - fsl,pins = < - MX7D_PAD_LPSR_GPIO1_IO04__GPIO1_IO4 0x14 - >; - }; + pinctrl_enet2_reg: enet2reggrp { + fsl,pins = < + MX7D_PAD_LPSR_GPIO1_IO04__GPIO1_IO4 0x14 + >; + }; - pinctrl_flexcan2: flexcan2grp { - fsl,pins = < - MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59 - MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59 - >; - }; + pinctrl_flexcan2: flexcan2grp { + fsl,pins = < + MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59 + MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59 + >; + }; - pinctrl_flexcan2_reg: flexcan2reggrp { - fsl,pins = < - MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x59 /* CAN_STBY */ - >; - }; + pinctrl_flexcan2_reg: flexcan2reggrp { + fsl,pins = < + MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x59 /* CAN_STBY */ + >; + }; - pinctrl_gpio_keys: gpio_keysgrp { - fsl,pins = < - MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x59 - MX7D_PAD_SD2_WP__GPIO5_IO10 0x59 - >; - }; + pinctrl_gpio_keys: gpio-keysgrp { + fsl,pins = < + MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x59 + MX7D_PAD_SD2_WP__GPIO5_IO10 0x59 + >; + }; - pinctrl_hog: hoggrp { - fsl,pins = < - MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */ - MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x59 /* headphone detect */ - >; - }; + pinctrl_hog: hoggrp { + fsl,pins = < + MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x34 /* bt reg on */ + MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x59 /* headphone detect */ + >; + }; - pinctrl_i2c1: i2c1grp { - fsl,pins = < - MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f - MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f - >; - }; + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f + MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f + >; + }; - pinctrl_i2c2: i2c2grp { - fsl,pins = < - MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f - MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f - >; - }; + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f + MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f + >; + }; - pinctrl_i2c3: i2c3grp { - fsl,pins = < - MX7D_PAD_I2C3_SDA__I2C3_SDA 0x4000007f - MX7D_PAD_I2C3_SCL__I2C3_SCL 0x4000007f - >; - }; + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX7D_PAD_I2C3_SDA__I2C3_SDA 0x4000007f + MX7D_PAD_I2C3_SCL__I2C3_SCL 0x4000007f + >; + }; - pinctrl_i2c4: i2c4grp { - fsl,pins = < - MX7D_PAD_SAI1_RX_BCLK__I2C4_SDA 0x4000007f - MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL 0x4000007f - >; - }; + pinctrl_i2c4: i2c4grp { + fsl,pins = < + MX7D_PAD_SAI1_RX_BCLK__I2C4_SDA 0x4000007f + MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL 0x4000007f + >; + }; - pinctrl_lcdif: lcdifgrp { - fsl,pins = < - MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79 - MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79 - MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79 - MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79 - MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79 - MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79 - MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79 - MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79 - MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79 - MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79 - MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79 - MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79 - MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79 - MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79 - MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79 - MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79 - MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79 - MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79 - MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79 - MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79 - MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79 - MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79 - MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79 - MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79 - MX7D_PAD_LCD_CLK__LCD_CLK 0x79 - MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79 - MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79 - MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79 - MX7D_PAD_LCD_RESET__LCD_RESET 0x79 - >; - }; + pinctrl_lcdif: lcdifgrp { + fsl,pins = < + MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79 + MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79 + MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79 + MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79 + MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79 + MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79 + MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79 + MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79 + MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79 + MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79 + MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79 + MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79 + MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79 + MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79 + MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79 + MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79 + MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79 + MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79 + MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79 + MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79 + MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79 + MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79 + MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79 + MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79 + MX7D_PAD_LCD_CLK__LCD_CLK 0x79 + MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79 + MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79 + MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79 + MX7D_PAD_LCD_RESET__LCD_RESET 0x79 + >; + }; - pinctrl_sai1: sai1grp { - fsl,pins = < - MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f - MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f - MX7D_PAD_ENET1_CRS__SAI1_TX_SYNC 0x1f - MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30 - MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f - >; - }; + pinctrl_sai1: sai1grp { + fsl,pins = < + MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f + MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f + MX7D_PAD_ENET1_CRS__SAI1_TX_SYNC 0x1f + MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30 + MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f + >; + }; - pinctrl_sai2: sai2grp { - fsl,pins = < - MX7D_PAD_SAI2_TX_BCLK__SAI2_TX_BCLK 0x1f - MX7D_PAD_SAI2_TX_SYNC__SAI2_TX_SYNC 0x1f - MX7D_PAD_SAI2_TX_DATA__SAI2_TX_DATA0 0x30 - MX7D_PAD_SAI2_RX_DATA__SAI2_RX_DATA0 0x1f - >; - }; + pinctrl_sai2: sai2grp { + fsl,pins = < + MX7D_PAD_SAI2_TX_BCLK__SAI2_TX_BCLK 0x1f + MX7D_PAD_SAI2_TX_SYNC__SAI2_TX_SYNC 0x1f + MX7D_PAD_SAI2_TX_DATA__SAI2_TX_DATA0 0x30 + MX7D_PAD_SAI2_RX_DATA__SAI2_RX_DATA0 0x1f + >; + }; - pinctrl_sai3: sai3grp { - fsl,pins = < - MX7D_PAD_UART3_TX_DATA__SAI3_TX_BCLK 0x1f - MX7D_PAD_UART3_CTS_B__SAI3_TX_SYNC 0x1f - MX7D_PAD_UART3_RTS_B__SAI3_TX_DATA0 0x30 - >; - }; + pinctrl_sai3: sai3grp { + fsl,pins = < + MX7D_PAD_UART3_TX_DATA__SAI3_TX_BCLK 0x1f + MX7D_PAD_UART3_CTS_B__SAI3_TX_SYNC 0x1f + MX7D_PAD_UART3_RTS_B__SAI3_TX_DATA0 0x30 + >; + }; - pinctrl_spi4: spi4grp { - fsl,pins = < - MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59 - MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59 - MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59 - >; - }; + pinctrl_spi4: spi4grp { + fsl,pins = < + MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59 + MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59 + MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59 + >; + }; - pinctrl_tsc2046_pendown: tsc2046_pendown { - fsl,pins = < - MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59 - >; - }; + pinctrl_tsc2046_pendown: tsc2046-pendowngrp { + fsl,pins = < + MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59 + >; + }; - pinctrl_uart1: uart1grp { - fsl,pins = < - MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79 - MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79 - >; - }; + pinctrl_uart1: uart1grp { + fsl,pins = < + MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79 + MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79 + >; + }; - pinctrl_uart5: uart5grp { - fsl,pins = < - MX7D_PAD_SAI1_TX_BCLK__UART5_DCE_TX 0x79 - MX7D_PAD_SAI1_RX_DATA__UART5_DCE_RX 0x79 - MX7D_PAD_SAI1_TX_SYNC__UART5_DCE_CTS 0x79 - MX7D_PAD_SAI1_TX_DATA__UART5_DCE_RTS 0x79 - >; - }; + pinctrl_uart5: uart5grp { + fsl,pins = < + MX7D_PAD_SAI1_TX_BCLK__UART5_DCE_TX 0x79 + MX7D_PAD_SAI1_RX_DATA__UART5_DCE_RX 0x79 + MX7D_PAD_SAI1_TX_SYNC__UART5_DCE_CTS 0x79 + MX7D_PAD_SAI1_TX_DATA__UART5_DCE_RTS 0x79 + >; + }; - pinctrl_uart6: uart6grp { - fsl,pins = < - MX7D_PAD_ECSPI1_MOSI__UART6_DCE_TX 0x79 - MX7D_PAD_ECSPI1_SCLK__UART6_DCE_RX 0x79 - MX7D_PAD_ECSPI1_SS0__UART6_DCE_CTS 0x79 - MX7D_PAD_ECSPI1_MISO__UART6_DCE_RTS 0x79 - >; - }; + pinctrl_uart6: uart6grp { + fsl,pins = < + MX7D_PAD_ECSPI1_MOSI__UART6_DCE_TX 0x79 + MX7D_PAD_ECSPI1_SCLK__UART6_DCE_RX 0x79 + MX7D_PAD_ECSPI1_SS0__UART6_DCE_CTS 0x79 + MX7D_PAD_ECSPI1_MISO__UART6_DCE_RTS 0x79 + >; + }; - pinctrl_usdhc1_gpio: usdhc1_gpiogrp { - fsl,pins = < - MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x59 /* CD */ - MX7D_PAD_SD1_WP__GPIO5_IO1 0x59 /* WP */ - MX7D_PAD_SD1_RESET_B__GPIO5_IO2 0x59 /* vmmc */ - MX7D_PAD_GPIO1_IO08__SD1_VSELECT 0x59 /* VSELECT */ - >; - }; + pinctrl_usdhc1_gpio: usdhc1-gpiogrp { + fsl,pins = < + MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x59 /* CD */ + MX7D_PAD_SD1_WP__GPIO5_IO1 0x59 /* WP */ + MX7D_PAD_SD1_RESET_B__GPIO5_IO2 0x59 /* vmmc */ + MX7D_PAD_GPIO1_IO08__SD1_VSELECT 0x59 /* VSELECT */ + >; + }; - pinctrl_usdhc1: usdhc1grp { - fsl,pins = < - MX7D_PAD_SD1_CMD__SD1_CMD 0x59 - MX7D_PAD_SD1_CLK__SD1_CLK 0x19 - MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59 - MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59 - MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59 - MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59 - >; - }; + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX7D_PAD_SD1_CMD__SD1_CMD 0x59 + MX7D_PAD_SD1_CLK__SD1_CLK 0x19 + MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59 + MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59 + MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59 + MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59 + >; + }; - pinctrl_usdhc1_100mhz: usdhc1grp_100mhz { - fsl,pins = < - MX7D_PAD_SD1_CMD__SD1_CMD 0x5a - MX7D_PAD_SD1_CLK__SD1_CLK 0x1a - MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5a - MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5a - MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5a - MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5a - >; - }; + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { + fsl,pins = < + MX7D_PAD_SD1_CMD__SD1_CMD 0x5a + MX7D_PAD_SD1_CLK__SD1_CLK 0x1a + MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5a + MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5a + MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5a + MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5a + >; + }; - pinctrl_usdhc1_200mhz: usdhc1grp_200mhz { - fsl,pins = < - MX7D_PAD_SD1_CMD__SD1_CMD 0x5b - MX7D_PAD_SD1_CLK__SD1_CLK 0x1b - MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5b - MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5b - MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5b - MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5b - >; - }; + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { + fsl,pins = < + MX7D_PAD_SD1_CMD__SD1_CMD 0x5b + MX7D_PAD_SD1_CLK__SD1_CLK 0x1b + MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5b + MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5b + MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5b + MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5b + >; + }; - pinctrl_usdhc2: usdhc2grp { - fsl,pins = < - MX7D_PAD_SD2_CMD__SD2_CMD 0x59 - MX7D_PAD_SD2_CLK__SD2_CLK 0x19 - MX7D_PAD_SD2_DATA0__SD2_DATA0 0x59 - MX7D_PAD_SD2_DATA1__SD2_DATA1 0x59 - MX7D_PAD_SD2_DATA2__SD2_DATA2 0x59 - MX7D_PAD_SD2_DATA3__SD2_DATA3 0x59 - >; - }; + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX7D_PAD_SD2_CMD__SD2_CMD 0x59 + MX7D_PAD_SD2_CLK__SD2_CLK 0x19 + MX7D_PAD_SD2_DATA0__SD2_DATA0 0x59 + MX7D_PAD_SD2_DATA1__SD2_DATA1 0x59 + MX7D_PAD_SD2_DATA2__SD2_DATA2 0x59 + MX7D_PAD_SD2_DATA3__SD2_DATA3 0x59 + >; + }; - pinctrl_usdhc2_100mhz: usdhc2grp_100mhz { - fsl,pins = < - MX7D_PAD_SD2_CMD__SD2_CMD 0x5a - MX7D_PAD_SD2_CLK__SD2_CLK 0x1a - MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5a - MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5a - MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5a - MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5a - >; - }; + pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp { + fsl,pins = < + MX7D_PAD_SD2_CMD__SD2_CMD 0x5a + MX7D_PAD_SD2_CLK__SD2_CLK 0x1a + MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5a + MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5a + MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5a + MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5a + >; + }; - pinctrl_usdhc2_200mhz: usdhc2grp_200mhz { - fsl,pins = < - MX7D_PAD_SD2_CMD__SD2_CMD 0x5b - MX7D_PAD_SD2_CLK__SD2_CLK 0x1b - MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5b - MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5b - MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5b - MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5b - >; - }; + pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp { + fsl,pins = < + MX7D_PAD_SD2_CMD__SD2_CMD 0x5b + MX7D_PAD_SD2_CLK__SD2_CLK 0x1b + MX7D_PAD_SD2_DATA0__SD2_DATA0 0x5b + MX7D_PAD_SD2_DATA1__SD2_DATA1 0x5b + MX7D_PAD_SD2_DATA2__SD2_DATA2 0x5b + MX7D_PAD_SD2_DATA3__SD2_DATA3 0x5b + >; + }; - pinctrl_usdhc3: usdhc3grp { - fsl,pins = < - MX7D_PAD_SD3_CMD__SD3_CMD 0x59 - MX7D_PAD_SD3_CLK__SD3_CLK 0x19 - MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59 - MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59 - MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59 - MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59 - MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59 - MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59 - MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59 - MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59 - MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19 - >; - }; + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x59 + MX7D_PAD_SD3_CLK__SD3_CLK 0x19 + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59 + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59 + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59 + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59 + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59 + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59 + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59 + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59 + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19 + >; + }; - pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { - fsl,pins = < - MX7D_PAD_SD3_CMD__SD3_CMD 0x5a - MX7D_PAD_SD3_CLK__SD3_CLK 0x1a - MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a - MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a - MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a - MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a - MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a - MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a - MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a - MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a - MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a - >; - }; + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x5a + MX7D_PAD_SD3_CLK__SD3_CLK 0x1a + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a + >; + }; - pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { - fsl,pins = < - MX7D_PAD_SD3_CMD__SD3_CMD 0x5b - MX7D_PAD_SD3_CLK__SD3_CLK 0x1b - MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b - MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b - MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b - MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b - MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b - MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b - MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b - MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b - MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b - >; - }; + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { + fsl,pins = < + MX7D_PAD_SD3_CMD__SD3_CMD 0x5b + MX7D_PAD_SD3_CLK__SD3_CLK 0x1b + MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b + MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b + MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b + MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b + MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b + MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b + MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b + MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b + MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b + >; }; }; @@ -901,7 +899,7 @@ MX7D_PAD_LPSR_GPIO1_IO07__GPIO1_IO7 0x14 >; }; - pinctrl_sai3_mclk: sai3grp_mclk { + pinctrl_sai3_mclk: sai3-mclk-grp { fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO03__SAI3_MCLK 0x1f >; diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts index 521493342fe972..8f5566027c25a2 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts @@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59 &iomuxc_lpsr { pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp { - fsl,phy = < + fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08 >; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts index 7bab113ca6da79..af4acc311572c1 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts @@ -459,7 +459,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x19 >; }; - pinctrl_usdhc3_100mhz: usdhc3grp_100mhz { + pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5a MX7D_PAD_SD3_CLK__SD3_CLK 0x1a @@ -475,7 +475,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x1a >; }; - pinctrl_usdhc3_200mhz: usdhc3grp_200mhz { + pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp { fsl,pins = < MX7D_PAD_SD3_CMD__SD3_CMD 0x5b MX7D_PAD_SD3_CLK__SD3_CLK 0x1b diff --git a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi index e78d0a7d8cd28c..941d9860218e96 100644 --- a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi @@ -505,7 +505,7 @@ MX6UL_PAD_CSI_HSYNC__UART6_DCE_CTS 0x1b0b1 >; }; - pinctrl_uart6dte: uart6dte { + pinctrl_uart6dte: uart6dtegrp { fsl,pins = < MX6UL_PAD_CSI_PIXCLK__UART6_DTE_TX 0x1b0b1 MX6UL_PAD_CSI_MCLK__UART6_DTE_RX 0x1b0b1 @@ -537,7 +537,7 @@ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x0001b099 >; }; - pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x00017069 MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x000170b9 @@ -552,7 +552,7 @@ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x0001b099 >; }; - pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp { fsl,pins = < MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x00017069 MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x000170f9 diff --git a/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts index 224f80a4a31d80..4aefbc01dfc0fe 100644 --- a/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts +++ b/arch/arm/boot/dts/nxp/lpc/lpc4357-ea4357-devkit.dts @@ -482,8 +482,8 @@ mma7455@1d { reg = <0x1d>; }; - lm75@48 { - compatible = "nxp,lm75"; + temperature-sensor@48 { + compatible = "national,lm75b"; reg = <0x48>; }; diff --git a/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts b/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts index 1f84654df50c38..846afb8ccbf1de 100644 --- a/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts +++ b/arch/arm/boot/dts/nxp/lpc/lpc4357-myd-lpc4357.dts @@ -511,7 +511,7 @@ &i2c1 { clock-frequency = <400000>; sensor@49 { - compatible = "lm75"; + compatible = "national,lm75"; reg = <0x49>; }; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts b/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts index 7365fe4581a3a2..33b36af1656f05 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx23-evk.dts @@ -52,7 +52,7 @@ panel_in: endpoint { }; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { nand-controller@8000c000 { pinctrl-names = "default"; pinctrl-0 = <&gpmi_pins_a &gpmi_pins_fixup>; @@ -99,7 +99,7 @@ display_out: endpoint { }; }; - apbx@80040000 { + apbx-bus@80040000 { lradc@80050000 { status = "okay"; fsl,lradc-touchscreen-wires = <4>; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts b/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts index 229e727b222ecf..e372e9327a475e 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx23-olinuxino.dts @@ -19,7 +19,7 @@ memory@40000000 { }; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { ssp0: spi@80010000 { compatible = "fsl,imx23-mmc"; pinctrl-names = "default"; @@ -64,7 +64,7 @@ ssp1: spi@80034000 { }; }; - apbx@80040000 { + apbx-bus@80040000 { lradc@80050000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts index b23e7ada9c804f..cb661bf2d1578f 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts @@ -55,7 +55,7 @@ memory@40000000 { }; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { ssp0: spi@80010000 { compatible = "fsl,imx23-mmc"; pinctrl-names = "default"; @@ -100,7 +100,7 @@ MX23_PAD_PWM3__GPIO_1_29 }; }; - apbx@80040000 { + apbx-bus@80040000 { pwm: pwm@80064000 { pinctrl-names = "default"; pinctrl-0 = <&pwm2_pins_a>; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts b/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts index 69124ba6a6667c..b2b6f851499954 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx23-stmp378x_devb.dts @@ -16,7 +16,7 @@ memory@40000000 { }; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { ssp0: spi@80010000 { compatible = "fsl,imx23-mmc"; pinctrl-names = "default"; @@ -44,7 +44,7 @@ MX23_PAD_PWM4__GPIO_1_30 }; }; - apbx@80040000 { + apbx-bus@80040000 { auart0: serial@8006c000 { pinctrl-names = "default"; pinctrl-0 = <&auart0_pins_a>; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts index 28341d8315c25b..0b088c8ab6b646 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx23-xfi3.dts @@ -54,7 +54,7 @@ memory@40000000 { }; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { ssp0: spi@80010000 { compatible = "fsl,imx23-mmc"; pinctrl-names = "default"; @@ -101,7 +101,7 @@ MX23_PAD_ROTARYB__GPIO_2_8 }; }; - apbx@80040000 { + apbx-bus@80040000 { i2c: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c_pins_a>; diff --git a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi index 0309592af1e1eb..5e21252fb7c969 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi +++ b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi @@ -45,7 +45,7 @@ apb@80000000 { reg = <0x80000000 0x80000>; ranges; - apbh@80000000 { + apbh-bus@80000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; @@ -476,7 +476,7 @@ tvenc@80038000 { }; }; - apbx@80040000 { + apbx-bus@80040000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts b/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts index f9bf40d9656810..4c4ea91c286f95 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx28-apx4devkit.dts @@ -11,19 +11,13 @@ memory@40000000 { reg = <0x40000000 0x04000000>; }; - regulators { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <0>; - - reg_3p3v: regulator@0 { - compatible = "regulator-fixed"; - reg = <0>; - regulator-name = "3P3V"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - }; + + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; }; sound { diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts index c72fe2d392f100..fd177daa63853f 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx28-cfa10037.dts @@ -14,7 +14,7 @@ / { compatible = "crystalfontz,cfa10037", "crystalfontz,cfa10036", "fsl,imx28"; apb@80000000 { - apbh@80000000 { + apbh-bus@80000000 { pinctrl@80018000 { usb_pins_cfa10037: usb-10037@0 { reg = <0>; @@ -38,7 +38,7 @@ MX28_PAD_SSP2_SS2__GPIO_2_21 }; }; - apbx@80040000 { + apbx-bus@80040000 { usbphy1: usbphy@8007e000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi index 69fcb0dde94026..410dfe17f8ca62 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi +++ b/arch/arm/boot/dts/nxp/mxs/imx28-lwe.dtsi @@ -55,23 +55,6 @@ &i2c0 { status = "okay"; }; -&saif0 { - pinctrl-names = "default"; - pinctrl-0 = <&saif0_pins_a>; - #sound-dai-cells = <0>; - assigned-clocks = <&clks 53>; - assigned-clock-rates = <12000000>; - status = "okay"; -}; - -&saif1 { - pinctrl-names = "default"; - pinctrl-0 = <&saif1_pins_a>; - fsl,saif-master = <&saif0>; - #sound-dai-cells = <0>; - status = "okay"; -}; - &spi3_pins_a { fsl,pinmux-ids = < MX28_PAD_AUART2_RX__SSP3_D4 @@ -109,7 +92,7 @@ &ssp3 { flash@0 { compatible = "jedec,spi-nor"; - spi-max-frequency = <40000000>; + spi-max-frequency = <20000000>; reg = <0>; partitions { @@ -133,14 +116,21 @@ partition@90000 { reg = <0x90000 0x10000>; }; - partition@100000 { - label = "kernel"; - reg = <0x100000 0x400000>; + partition@a0000 { + label = "rescue"; + reg = <0xa0000 0xf40000>; + }; + + partition@fe0000 { + /* 1st sector for SPL boot img source data */ + label = "spl-boot-data1"; + reg = <0xfe0000 0x10000>; }; - partition@500000 { - label = "swupdate"; - reg = <0x500000 0x800000>; + partition@ff0000 { + /* 2nd sector for SPL boot img source data */ + label = "spl-boot-data2"; + reg = <0xff0000 0x10000>; }; }; }; diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts index d38183edf0fd52..9290635352f12c 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts +++ b/arch/arm/boot/dts/nxp/mxs/imx28-tx28.dts @@ -615,13 +615,13 @@ MX28_PAD_JTAG_RTCK__GPIO_4_20 /* USBH_OC */ &saif0 { pinctrl-names = "default"; pinctrl-0 = <&saif0_pins_b>; - fsl,saif-master; status = "okay"; }; &saif1 { pinctrl-names = "default"; pinctrl-0 = <&saif1_pins_a>; + fsl,saif-master = <&saif0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi index 4817fba2d938b9..bbea8b77386f11 100644 --- a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi +++ b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi @@ -56,7 +56,7 @@ apb@80000000 { reg = <0x80000000 0x80000>; ranges; - apbh@80000000 { + apbh-bus@80000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; @@ -1092,7 +1092,7 @@ armjtag: armjtag@8003c800 { }; }; - apbx@80040000 { + apbx-bus@80040000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/qcom/pma8084.dtsi b/arch/arm/boot/dts/qcom/pma8084.dtsi index 2985f4805b93ee..309f5256754bb9 100644 --- a/arch/arm/boot/dts/qcom/pma8084.dtsi +++ b/arch/arm/boot/dts/qcom/pma8084.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include @@ -19,12 +20,17 @@ rtc@6000 { interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>; }; - pwrkey@800 { - compatible = "qcom,pm8941-pwrkey"; + pon@800 { + compatible = "qcom,pm8941-pon"; reg = <0x800>; - interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>; - debounce = <15625>; - bias-pull-up; + + pwrkey { + compatible = "qcom,pm8941-pwrkey"; + interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>; + debounce = <15625>; + bias-pull-up; + linux,code = ; + }; }; pma8084_gpios: gpio@c000 { diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts index d460743fbb9401..94718399285056 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts +++ b/arch/arm/boot/dts/qcom/qcom-apq8064-asus-nexus7-flo.dts @@ -125,8 +125,6 @@ &gsbi1 { &gsbi1_i2c { status = "okay"; clock-frequency = <200000>; - pinctrl-0 = <&i2c1_pins>; - pinctrl-names = "default"; eeprom@52 { compatible = "atmel,24c128"; @@ -148,8 +146,6 @@ &gsbi3 { &gsbi3_i2c { clock-frequency = <200000>; - pinctrl-0 = <&i2c3_pins>; - pinctrl-names = "default"; status = "okay"; trackpad@10 { diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts index 671d58cc2741b0..178c55c1efeb51 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts +++ b/arch/arm/boot/dts/qcom/qcom-apq8064-cm-qs600.dts @@ -188,24 +188,17 @@ &sdcc4 { }; &tlmm_pinmux { - card_detect: card_detect { - mux { - pins = "gpio26"; - function = "gpio"; - bias-disable; - }; + card_detect: card-detect-state { + pins = "gpio26"; + function = "gpio"; + bias-disable; }; - pcie_pins: pcie_pinmux { - mux { - pins = "gpio27"; - function = "gpio"; - }; - conf { - pins = "gpio27"; - drive-strength = <12>; - bias-disable; - }; + pcie_pins: pcie-state { + pins = "gpio27"; + function = "gpio"; + drive-strength = <12>; + bias-disable; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts index ed86b24119c97f..b3ff8010b14985 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts +++ b/arch/arm/boot/dts/qcom/qcom-apq8064-ifc6410.dts @@ -321,24 +321,17 @@ &sdcc4 { }; &tlmm_pinmux { - card_detect: card_detect { - mux { - pins = "gpio26"; - function = "gpio"; - bias-disable; - }; + card_detect: card-detect-state { + pins = "gpio26"; + function = "gpio"; + bias-disable; }; - pcie_pins: pcie_pinmux { - mux { - pins = "gpio27"; - function = "gpio"; - }; - conf { - pins = "gpio27"; - drive-strength = <12>; - bias-disable; - }; + pcie_pins: pcie-state { + pins = "gpio27"; + function = "gpio"; + drive-strength = <12>; + bias-disable; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi index 7c545c50847b6a..e53de709e9d108 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-apq8064-pins.dtsi @@ -1,318 +1,218 @@ // SPDX-License-Identifier: GPL-2.0 &tlmm_pinmux { - sdc4_gpios: sdc4-gpios { - pios { - pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; - function = "sdc4"; - }; - }; - - sdcc1_pins: sdcc1-pin-active { - clk { + sdcc1_default_state: sdcc1-default-state { + clk-pins { pins = "sdc1_clk"; - drive-strengh = <16>; + drive-strength = <16>; bias-disable; }; - cmd { + cmd-pins { pins = "sdc1_cmd"; - drive-strengh = <10>; + drive-strength = <10>; bias-pull-up; }; - data { + data-pins { pins = "sdc1_data"; - drive-strengh = <10>; + drive-strength = <10>; bias-pull-up; }; }; - sdcc3_pins: sdcc3-pin-active { - clk { + sdcc3_default_state: sdcc3-default-state { + clk-pins { pins = "sdc3_clk"; - drive-strengh = <8>; + drive-strength = <8>; bias-disable; }; - cmd { + cmd-pins { pins = "sdc3_cmd"; - drive-strengh = <8>; + drive-strength = <8>; bias-pull-up; }; - data { + data-pins { pins = "sdc3_data"; - drive-strengh = <8>; + drive-strength = <8>; bias-pull-up; }; }; - ps_hold: ps_hold { - mux { - pins = "gpio78"; - function = "ps_hold"; - }; + sdc4_default_state: sdc4-default-state { + pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; + function = "sdc4"; }; - i2c1_pins: i2c1 { - mux { - pins = "gpio20", "gpio21"; - function = "gsbi1"; - }; + gsbi1_uart_2pins: gsbi1-uart-2pins-state { + pins = "gpio18", "gpio19"; + function = "gsbi1"; + }; - pinconf { - pins = "gpio20", "gpio21"; - drive-strength = <16>; - bias-disable; - }; + gsbi1_uart_4pins: gsbi1-uart-4pins-state { + pins = "gpio18", "gpio19", "gpio20", "gpio21"; + function = "gsbi1"; }; - i2c1_pins_sleep: i2c1_pins_sleep { - mux { - pins = "gpio20", "gpio21"; - function = "gpio"; - }; - pinconf { - pins = "gpio20", "gpio21"; + gsbi4_uart_pin_a: gsbi4-uart-pin-active-state { + rx-pins { + pins = "gpio11"; + function = "gsbi4"; drive-strength = <2>; bias-disable; }; - }; - gsbi1_uart_2pins: gsbi1_uart_2pins { - mux { - pins = "gpio18", "gpio19"; - function = "gsbi1"; + tx-pins { + pins = "gpio10"; + function = "gsbi4"; + drive-strength = <4>; + bias-disable; }; }; - gsbi1_uart_4pins: gsbi1_uart_4pins { - mux { - pins = "gpio18", "gpio19", "gpio20", "gpio21"; - function = "gsbi1"; - }; + gsbi6_uart_2pins: gsbi6-uart-2pins-state { + pins = "gpio14", "gpio15"; + function = "gsbi6"; }; - i2c2_pins: i2c2 { - mux { - pins = "gpio24", "gpio25"; - function = "gsbi2"; - }; - - pinconf { - pins = "gpio24", "gpio25"; - drive-strength = <16>; - bias-disable; - }; + gsbi6_uart_4pins: gsbi6-uart-4pins-state { + pins = "gpio14", "gpio15", "gpio16", "gpio17"; + function = "gsbi6"; }; - i2c2_pins_sleep: i2c2_pins_sleep { - mux { - pins = "gpio24", "gpio25"; - function = "gpio"; - }; - - pinconf { - pins = "gpio24", "gpio25"; - drive-strength = <2>; - bias-disable; - }; + gsbi7_uart_2pins: gsbi7-uart-2pins-state { + pins = "gpio82", "gpio83"; + function = "gsbi7"; }; - i2c3_pins: i2c3 { - mux { - pins = "gpio8", "gpio9"; - function = "gsbi3"; - }; - - pinconf { - pins = "gpio8", "gpio9"; - drive-strength = <16>; - bias-disable; - }; + gsbi7_uart_4pins: gsbi7_uart_4pins-state { + pins = "gpio82", "gpio83", "gpio84", "gpio85"; + function = "gsbi7"; }; - i2c3_pins_sleep: i2c3_pins_sleep { - mux { - pins = "gpio8", "gpio9"; - function = "gpio"; - }; - pinconf { - pins = "gpio8", "gpio9"; - drive-strength = <2>; - bias-disable; - }; + i2c1_default_state: i2c1-default-state { + pins = "gpio20", "gpio21"; + function = "gsbi1"; + drive-strength = <16>; + bias-disable; }; - i2c4_pins: i2c4 { - mux { - pins = "gpio12", "gpio13"; - function = "gsbi4"; - }; - - pinconf { - pins = "gpio12", "gpio13"; - drive-strength = <16>; - bias-disable; - }; + i2c1_sleep_state: i2c1-sleep-state { + pins = "gpio20", "gpio21"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - i2c4_pins_sleep: i2c4_pins_sleep { - mux { - pins = "gpio12", "gpio13"; - function = "gpio"; - }; - pinconf { - pins = "gpio12", "gpio13"; - drive-strength = <2>; - bias-disable; - }; + i2c2_default_state: i2c2-default-state { + pins = "gpio24", "gpio25"; + function = "gsbi2"; + drive-strength = <16>; + bias-disable; }; - spi5_default: spi5_default { - pinmux { - pins = "gpio51", "gpio52", "gpio54"; - function = "gsbi5"; - }; - - pinmux_cs { - function = "gpio"; - pins = "gpio53"; - }; - - pinconf { - pins = "gpio51", "gpio52", "gpio54"; - drive-strength = <16>; - bias-disable; - }; - - pinconf_cs { - pins = "gpio53"; - drive-strength = <16>; - bias-disable; - output-high; - }; + i2c2_sleep_state: i2c2-sleep-state { + pins = "gpio24", "gpio25"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - spi5_sleep: spi5_sleep { - pinmux { - function = "gpio"; - pins = "gpio51", "gpio52", "gpio53", "gpio54"; - }; - - pinconf { - pins = "gpio51", "gpio52", "gpio53", "gpio54"; - drive-strength = <2>; - bias-pull-down; - }; + i2c3_default_state: i2c3-default-state { + pins = "gpio8", "gpio9"; + function = "gsbi3"; + drive-strength = <16>; + bias-disable; }; - i2c6_pins: i2c6 { - mux { - pins = "gpio16", "gpio17"; - function = "gsbi6"; - }; - - pinconf { - pins = "gpio16", "gpio17"; - drive-strength = <16>; - bias-disable; - }; + i2c3_sleep_state: i2c3-sleep-state { + pins = "gpio8", "gpio9"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - i2c6_pins_sleep: i2c6_pins_sleep { - mux { - pins = "gpio16", "gpio17"; - function = "gpio"; - }; - pinconf { - pins = "gpio16", "gpio17"; - drive-strength = <2>; - bias-disable; - }; + i2c4_default_state: i2c4-default-state { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <16>; + bias-disable; }; - gsbi4_uart_pin_a: gsbi4-uart-pin-active-state { - rx-pins { - pins = "gpio11"; - function = "gsbi4"; - drive-strength = <2>; - bias-disable; - }; - - tx-pins { - pins = "gpio10"; - function = "gsbi4"; - drive-strength = <4>; - bias-disable; - }; + i2c4_sleep_state: i2c4-sleep-state { + pins = "gpio12", "gpio13"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - gsbi6_uart_2pins: gsbi6_uart_2pins { - mux { - pins = "gpio14", "gpio15"; - function = "gsbi6"; - }; + i2c6_default_state: i2c6-default-state { + pins = "gpio16", "gpio17"; + function = "gsbi6"; + drive-strength = <16>; + bias-disable; }; - gsbi6_uart_4pins: gsbi6_uart_4pins { - mux { - pins = "gpio14", "gpio15", "gpio16", "gpio17"; - function = "gsbi6"; - }; + i2c6_sleep_state: i2c6-sleep-state { + pins = "gpio16", "gpio17"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - gsbi7_uart_2pins: gsbi7_uart_2pins { - mux { - pins = "gpio82", "gpio83"; - function = "gsbi7"; - }; + i2c7_default_state: i2c7-default-state { + pins = "gpio84", "gpio85"; + function = "gsbi7"; + drive-strength = <16>; + bias-disable; }; - gsbi7_uart_4pins: gsbi7_uart_4pins { - mux { - pins = "gpio82", "gpio83", "gpio84", "gpio85"; - function = "gsbi7"; - }; + i2c7_sleep_state: i2c7-sleep-state { + pins = "gpio84", "gpio85"; + function = "gpio"; + drive-strength = <2>; + bias-disable; }; - i2c7_pins: i2c7 { - mux { - pins = "gpio84", "gpio85"; - function = "gsbi7"; + spi5_default_state: spi5-default-state { + spi5-pins { + pins = "gpio51", "gpio52", "gpio54"; + function = "gsbi5"; + drive-strength = <16>; + bias-disable; }; - pinconf { - pins = "gpio84", "gpio85"; + spi5-cs-pins { + pins = "gpio53"; + function = "gpio"; drive-strength = <16>; bias-disable; + output-high; }; }; - i2c7_pins_sleep: i2c7_pins_sleep { - mux { - pins = "gpio84", "gpio85"; + spi5_sleep_state: spi5-sleep-state { + spi5-pins { + pins = "gpio51", "gpio52", "gpio53", "gpio54"; function = "gpio"; - }; - pinconf { - pins = "gpio84", "gpio85"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; - riva_fm_pin_a: riva-fm-active { + riva_fm_pin_a: riva-fm-active-state { pins = "gpio14", "gpio15"; function = "riva_fm"; }; - riva_bt_pin_a: riva-bt-active { + riva_bt_pin_a: riva-bt-active-state { pins = "gpio16", "gpio17"; function = "riva_bt"; }; - riva_wlan_pin_a: riva-wlan-active { + riva_wlan_pin_a: riva-wlan-active-state { pins = "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; function = "riva_wlan"; @@ -320,22 +220,24 @@ riva_wlan_pin_a: riva-wlan-active { bias-pull-down; }; - hdmi_pinctrl: hdmi-pinctrl { - mux { - pins = "gpio70", "gpio71", "gpio72"; - function = "hdmi"; - }; - - pinconf_ddc { + hdmi_pinctrl: hdmi-pinctrl-state { + ddc-pins { pins = "gpio70", "gpio71"; + function = "hdmi"; bias-pull-up; drive-strength = <2>; }; - pinconf_hpd { + hpd-pins { pins = "gpio72"; + function = "hdmi"; bias-pull-down; drive-strength = <16>; }; }; + + ps_hold_default_state: ps-hold-default-state { + pins = "gpio78"; + function = "ps_hold"; + }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts b/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts index 2412aa3e3e8dba..7752f07973f9ea 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts +++ b/arch/arm/boot/dts/qcom/qcom-apq8064-sony-xperia-lagan-yuga.dts @@ -373,21 +373,21 @@ &sdcc3 { cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; - pinctrl-0 = <&sdcc3_pins>, <&sdcc3_cd_pin_a>; + pinctrl-0 = <&sdcc3_default_state>, <&sdcc3_cd_pin_a>; status = "okay"; }; &tlmm_pinmux { - gsbi5_uart_pin_a: gsbi5-uart-pin-active { - rx { + gsbi5_uart_pin_a: gsbi5-uart-pin-active-state { + rx-pins { pins = "gpio52"; function = "gsbi5"; drive-strength = <2>; bias-pull-up; }; - tx { + tx-pins { pins = "gpio51"; function = "gsbi5"; drive-strength = <4>; @@ -396,7 +396,7 @@ tx { }; - sdcc3_cd_pin_a: sdcc3-cd-pin-active { + sdcc3_cd_pin_a: sdcc3-cd-pin-active-state { pins = "gpio26"; function = "gpio"; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi index 769e151747c354..ac7494ed633e1b 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi @@ -302,7 +302,7 @@ tlmm_pinmux: pinctrl@800000 { interrupts = ; pinctrl-names = "default"; - pinctrl-0 = <&ps_hold>; + pinctrl-0 = <&ps_hold_default_state>; }; sfpb_wrapper_mutex: syscon@1200000 { @@ -435,8 +435,8 @@ gsbi1_serial: serial@12450000 { gsbi1_i2c: i2c@12460000 { compatible = "qcom,i2c-qup-v1.1.1"; - pinctrl-0 = <&i2c1_pins>; - pinctrl-1 = <&i2c1_pins_sleep>; + pinctrl-0 = <&i2c1_default_state>; + pinctrl-1 = <&i2c1_sleep_state>; pinctrl-names = "default", "sleep"; reg = <0x12460000 0x1000>; interrupts = ; @@ -465,8 +465,8 @@ gsbi2: gsbi@12480000 { gsbi2_i2c: i2c@124a0000 { compatible = "qcom,i2c-qup-v1.1.1"; reg = <0x124a0000 0x1000>; - pinctrl-0 = <&i2c2_pins>; - pinctrl-1 = <&i2c2_pins_sleep>; + pinctrl-0 = <&i2c2_default_state>; + pinctrl-1 = <&i2c2_sleep_state>; pinctrl-names = "default", "sleep"; interrupts = ; clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>; @@ -489,8 +489,8 @@ gsbi3: gsbi@16200000 { ranges; gsbi3_i2c: i2c@16280000 { compatible = "qcom,i2c-qup-v1.1.1"; - pinctrl-0 = <&i2c3_pins>; - pinctrl-1 = <&i2c3_pins_sleep>; + pinctrl-0 = <&i2c3_default_state>; + pinctrl-1 = <&i2c3_sleep_state>; pinctrl-names = "default", "sleep"; reg = <0x16280000 0x1000>; interrupts = ; @@ -528,8 +528,8 @@ gsbi4_serial: serial@16340000 { gsbi4_i2c: i2c@16380000 { compatible = "qcom,i2c-qup-v1.1.1"; - pinctrl-0 = <&i2c4_pins>; - pinctrl-1 = <&i2c4_pins_sleep>; + pinctrl-0 = <&i2c4_default_state>; + pinctrl-1 = <&i2c4_sleep_state>; pinctrl-names = "default", "sleep"; reg = <0x16380000 0x1000>; interrupts = ; @@ -565,8 +565,8 @@ gsbi5_spi: spi@1a280000 { compatible = "qcom,spi-qup-v1.1.1"; reg = <0x1a280000 0x1000>; interrupts = ; - pinctrl-0 = <&spi5_default>; - pinctrl-1 = <&spi5_sleep>; + pinctrl-0 = <&spi5_default_state>; + pinctrl-1 = <&spi5_sleep_state>; pinctrl-names = "default", "sleep"; clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>; clock-names = "core", "iface"; @@ -599,8 +599,8 @@ gsbi6_serial: serial@16540000 { gsbi6_i2c: i2c@16580000 { compatible = "qcom,i2c-qup-v1.1.1"; - pinctrl-0 = <&i2c6_pins>; - pinctrl-1 = <&i2c6_pins_sleep>; + pinctrl-0 = <&i2c6_default_state>; + pinctrl-1 = <&i2c6_sleep_state>; pinctrl-names = "default", "sleep"; reg = <0x16580000 0x1000>; interrupts = ; @@ -635,8 +635,8 @@ gsbi7_serial: serial@16640000 { gsbi7_i2c: i2c@16680000 { compatible = "qcom,i2c-qup-v1.1.1"; - pinctrl-0 = <&i2c7_pins>; - pinctrl-1 = <&i2c7_pins_sleep>; + pinctrl-0 = <&i2c7_default_state>; + pinctrl-1 = <&i2c7_sleep_state>; pinctrl-names = "default", "sleep"; reg = <0x16680000 0x1000>; interrupts = ; @@ -871,7 +871,6 @@ sata_phy0: phy@1b400000 { compatible = "qcom,apq8064-sata-phy"; status = "disabled"; reg = <0x1b400000 0x200>; - reg-names = "phy_mem"; clocks = <&gcc SATA_PHY_CFG_CLK>; clock-names = "cfg"; #phy-cells = <0>; @@ -890,9 +889,9 @@ sata0: sata@29000000 { <&gcc SATA_PMALIVE_CLK>; clock-names = "slave_iface", "iface", - "bus", + "core", "rxoob", - "core_pmalive"; + "pmalive"; assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>; @@ -945,7 +944,7 @@ sdcc4: mmc@121c0000 { dmas = <&sdcc4bam 2>, <&sdcc4bam 1>; dma-names = "tx", "rx"; pinctrl-names = "default"; - pinctrl-0 = <&sdc4_gpios>; + pinctrl-0 = <&sdc4_default_state>; }; sdcc4bam: dma-controller@121c2000 { @@ -962,7 +961,7 @@ sdcc1: mmc@12400000 { status = "disabled"; compatible = "arm,pl18x", "arm,primecell"; pinctrl-names = "default"; - pinctrl-0 = <&sdcc1_pins>; + pinctrl-0 = <&sdcc1_default_state>; arm,primecell-periphid = <0x00051180>; reg = <0x12400000 0x2000>; interrupts = ; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi index 2b52e5d5eb5170..014e6c5ee88984 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-apq8084.dtsi @@ -792,7 +792,7 @@ smd-edge { qcom,smd-edge = <15>; rpm-requests { - compatible = "qcom,rpm-apq8084"; + compatible = "qcom,rpm-apq8084", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; regulators-0 { diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi index da67d55fa55730..0d23c03fae33f1 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-ap120c-ac.dtsi @@ -28,46 +28,42 @@ key-reset { }; &tlmm { - i2c0_pins: i2c0_pinmux { - mux_i2c { - function = "blsp_i2c0"; - pins = "gpio58", "gpio59"; - drive-strength = <16>; - bias-disable; - }; + i2c0_pins: i2c0-state { + function = "blsp_i2c0"; + pins = "gpio58", "gpio59"; + drive-strength = <16>; + bias-disable; }; - mdio_pins: mdio_pinmux { - mux_mdio { + mdio_pins: mdio-state { + mdio-pins { pins = "gpio53"; function = "mdio"; bias-pull-up; }; - mux_mdc { + mdc-pins { pins = "gpio52"; function = "mdc"; bias-pull-up; }; }; - serial0_pins: serial0_pinmux { - mux_uart { - pins = "gpio60", "gpio61"; - function = "blsp_uart0"; - bias-disable; - }; + serial0_pins: serial0-state { + pins = "gpio60", "gpio61"; + function = "blsp_uart0"; + bias-disable; }; - spi0_pins: spi0_pinmux { - mux_spi { + spi0_pins: spi0-state { + spi0-pins { function = "blsp_spi0"; pins = "gpio55", "gpio56", "gpio57"; drive-strength = <12>; bias-disable; }; - mux_cs { + spi0-cs-pins { function = "gpio"; pins = "gpio54", "gpio4"; drive-strength = <2>; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts index 365fbac417fd13..ac3b30072a2271 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts +++ b/arch/arm/boot/dts/qcom/qcom-ipq4018-jalapeno.dts @@ -11,40 +11,35 @@ / { }; &tlmm { - mdio_pins: mdio_pinmux { - pinmux_1 { + mdio_pins: mdio-state { + mdio-pins { pins = "gpio53"; function = "mdio"; + bias-pull-up; }; - pinmux_2 { + mdc-pins { pins = "gpio52"; function = "mdc"; - }; - - pinconf { - pins = "gpio52", "gpio53"; bias-pull-up; }; }; - serial_pins: serial_pinmux { - mux { - pins = "gpio60", "gpio61"; - function = "blsp_uart0"; - bias-disable; - }; + serial_pins: serial-state{ + pins = "gpio60", "gpio61"; + function = "blsp_uart0"; + bias-disable; }; - spi_0_pins: spi_0_pinmux { - pin { + spi_0_pins: spi-0-state { + spi0-pins { function = "blsp_spi0"; pins = "gpio55", "gpio56", "gpio57"; drive-strength = <2>; bias-disable; }; - pin_cs { + spi0-cs-pins { function = "gpio"; pins = "gpio54", "gpio59"; drive-strength = <2>; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi index f7ac8f9d0b6fc0..efbe89dd47937f 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk01.1.dtsi @@ -34,30 +34,22 @@ &prng { }; &tlmm { - serial_pins: serial_pinmux { - mux { - pins = "gpio60", "gpio61"; - function = "blsp_uart0"; - bias-disable; - }; + serial_pins: serial-state { + pins = "gpio60", "gpio61"; + function = "blsp_uart0"; + bias-disable; }; - spi_0_pins: spi_0_pinmux { - pinmux { - function = "blsp_spi0"; - pins = "gpio55", "gpio56", "gpio57"; - }; - pinmux_cs { - function = "gpio"; - pins = "gpio54"; - }; - pinconf { + spi_0_pins: spi-0-state { + spi0-pins { pins = "gpio55", "gpio56", "gpio57"; + function = "blsp_spi0"; drive-strength = <12>; bias-disable; }; - pinconf_cs { + spi0-cs-pins { pins = "gpio54"; + function = "gpio"; drive-strength = <2>; bias-disable; output-high; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi index 374af6dd360a72..91e296d2ea827a 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk04.1.dtsi @@ -24,26 +24,26 @@ memory { soc { pinctrl@1000000 { - serial_0_pins: serial0-pinmux { + serial_0_pins: serial0-state { pins = "gpio16", "gpio17"; function = "blsp_uart0"; bias-disable; }; - serial_1_pins: serial1-pinmux { + serial_1_pins: serial1-state { pins = "gpio8", "gpio9", "gpio10", "gpio11"; function = "blsp_uart1"; bias-disable; }; - spi_0_pins: spi-0-pinmux { - pinmux { + spi_0_pins: spi-0-state { + spi0-pins { function = "blsp_spi0"; pins = "gpio13", "gpio14", "gpio15"; bias-disable; }; - pinmux_cs { + spi0-cs-pins { function = "gpio"; pins = "gpio12"; bias-disable; @@ -51,13 +51,13 @@ pinmux_cs { }; }; - i2c_0_pins: i2c-0-pinmux { + i2c_0_pins: i2c-0-state { pins = "gpio20", "gpio21"; function = "blsp_i2c0"; bias-disable; }; - nand_pins: nand-pins { + nand_pins: nand-state { pins = "gpio53", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio62", "gpio63", diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts index ea2987fcbff8d3..41c5874f6f976b 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts +++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c1.dts @@ -19,20 +19,20 @@ spi@78b6000 { }; pinctrl@1000000 { - serial_1_pins: serial1-pinmux { + serial_1_pins: serial1-state { pins = "gpio8", "gpio9", "gpio10", "gpio11"; function = "blsp_uart1"; bias-disable; }; - spi_0_pins: spi-0-pinmux { - pinmux { + spi_0_pins: spi-0-state { + spi0-pins { function = "blsp_spi0"; pins = "gpio13", "gpio14", "gpio15"; bias-disable; }; - pinmux_cs { + spio-cs-pins { function = "gpio"; pins = "gpio12"; bias-disable; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts index bd3553dd2070bf..67ee99d6975793 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts +++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1-c2.dts @@ -9,7 +9,7 @@ / { soc { pinctrl@1000000 { - serial_1_pins: serial1-pinmux { + serial_1_pins: serial1-state { pins = "gpio8", "gpio9"; function = "blsp_uart1"; bias-disable; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi index 7ef635997efa45..cc88cf5f0d9bae 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-ipq4019-ap.dk07.1.dtsi @@ -24,19 +24,19 @@ chosen { soc { pinctrl@1000000 { - serial_0_pins: serial0-pinmux { + serial_0_pins: serial0-state { pins = "gpio16", "gpio17"; function = "blsp_uart0"; bias-disable; }; - i2c_0_pins: i2c-0-pinmux { + i2c_0_pins: i2c-0-state { pins = "gpio20", "gpio21"; function = "blsp_i2c0"; bias-disable; }; - nand_pins: nand-pins { + nand_pins: nand-state { pins = "gpio53", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio62", "gpio63", diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts b/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts index a654d3c22c4f55..5a8bf1a6f5597b 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts +++ b/arch/arm/boot/dts/qcom/qcom-ipq8064-ap148.dts @@ -7,12 +7,11 @@ / { soc { pinmux@800000 { - buttons_pins: buttons_pins { - mux { - pins = "gpio54", "gpio65"; - drive-strength = <2>; - bias-pull-up; - }; + buttons_pins: buttons-state { + pins = "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts b/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts index 12e806adcda803..f09da9460c86af 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts +++ b/arch/arm/boot/dts/qcom/qcom-ipq8064-rb3011.dts @@ -404,59 +404,49 @@ main@800000 { }; &qcom_pinmux { - buttons_pins: buttons_pins { - mux { - pins = "gpio66"; - drive-strength = <16>; - bias-disable; - }; + buttons_pins: buttons-state { + pins = "gpio66"; + function = "gpio"; + drive-strength = <16>; + bias-disable; }; - leds_pins: leds_pins { - mux { - pins = "gpio33"; - drive-strength = <16>; - bias-disable; - }; + leds_pins: leds-state { + pins = "gpio33"; + function = "gpio"; + drive-strength = <16>; + bias-disable; }; - mdio1_pins: mdio1_pins { - mux { - pins = "gpio10", "gpio11"; - function = "gpio"; - drive-strength = <8>; - bias-disable; - }; + mdio1_pins: mdio1-state { + pins = "gpio10", "gpio11"; + function = "gpio"; + drive-strength = <8>; + bias-disable; }; - sw0_reset_pin: sw0_reset_pin { - mux { - pins = "gpio16"; - drive-strength = <16>; - function = "gpio"; - bias-disable; - input-disable; - }; + sw0_reset_pin: sw0-reset-state { + pins = "gpio16"; + drive-strength = <16>; + function = "gpio"; + bias-disable; + input-disable; }; - sw1_reset_pin: sw1_reset_pin { - mux { - pins = "gpio17"; - drive-strength = <16>; - function = "gpio"; - bias-disable; - input-disable; - }; + sw1_reset_pin: sw1-reset-state { + pins = "gpio17"; + drive-strength = <16>; + function = "gpio"; + bias-disable; + input-disable; }; - usb1_pwr_en_pins: usb1_pwr_en_pins { - mux { - pins = "gpio4"; - function = "gpio"; - drive-strength = <16>; - bias-disable; - output-high; - }; + usb1_pwr_en_pins: usb1-pwr-en-state { + pins = "gpio4"; + function = "gpio"; + drive-strength = <16>; + bias-disable; + output-high; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi index da0fd75f471111..759a59c2bdbcfa 100644 --- a/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-ipq8064.dtsi @@ -399,70 +399,58 @@ qcom_pinmux: pinmux@800000 { #interrupt-cells = <2>; interrupts = ; - pcie0_pins: pcie0_pinmux { - mux { - pins = "gpio3"; - function = "pcie1_rst"; - drive-strength = <12>; - bias-disable; - }; + pcie0_pins: pcie0-state { + pins = "gpio3"; + function = "pcie1_rst"; + drive-strength = <12>; + bias-disable; }; - pcie1_pins: pcie1_pinmux { - mux { - pins = "gpio48"; - function = "pcie2_rst"; - drive-strength = <12>; - bias-disable; - }; + pcie1_pins: pcie1-state { + pins = "gpio48"; + function = "pcie2_rst"; + drive-strength = <12>; + bias-disable; }; - pcie2_pins: pcie2_pinmux { - mux { - pins = "gpio63"; - function = "pcie3_rst"; - drive-strength = <12>; - bias-disable; - }; + pcie2_pins: pcie2-state { + pins = "gpio63"; + function = "pcie3_rst"; + drive-strength = <12>; + bias-disable; }; - i2c4_pins: i2c4-default { + i2c4_pins: i2c4-state { pins = "gpio12", "gpio13"; function = "gsbi4"; drive-strength = <12>; bias-disable; }; - spi_pins: spi_pins { - mux { - pins = "gpio18", "gpio19", "gpio21"; - function = "gsbi5"; - drive-strength = <10>; - bias-none; - }; + spi_pins: spi-state { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + drive-strength = <10>; + bias-disable; }; - leds_pins: leds_pins { - mux { - pins = "gpio7", "gpio8", "gpio9", - "gpio26", "gpio53"; - function = "gpio"; - drive-strength = <2>; - bias-pull-down; - output-low; - }; + leds_pins: leds-state { + pins = "gpio7", "gpio8", "gpio9", + "gpio26", "gpio53"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + output-low; }; - buttons_pins: buttons_pins { - mux { - pins = "gpio54"; - drive-strength = <2>; - bias-pull-up; - }; + buttons_pins: buttons-state { + pins = "gpio54"; + drive-strength = <2>; + bias-pull-up; }; - nand_pins: nand_pins { - mux { + nand_pins: nand-state { + nand-pins { pins = "gpio34", "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", @@ -473,14 +461,14 @@ mux { bias-disable; }; - pullups { + nand-pullup-pins { pins = "gpio39"; function = "nand"; drive-strength = <10>; bias-pull-up; }; - hold { + nand-hold-pins { pins = "gpio40", "gpio41", "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47"; @@ -490,25 +478,21 @@ hold { }; }; - mdio0_pins: mdio0-pins { - mux { - pins = "gpio0", "gpio1"; - function = "mdio"; - drive-strength = <8>; - bias-disable; - }; + mdio0_pins: mdio0-state { + pins = "gpio0", "gpio1"; + function = "mdio"; + drive-strength = <8>; + bias-disable; }; - rgmii2_pins: rgmii2-pins { - mux { - pins = "gpio27", "gpio28", "gpio29", - "gpio30", "gpio31", "gpio32", - "gpio51", "gpio52", "gpio59", - "gpio60", "gpio61", "gpio62"; - function = "rgmii2"; - drive-strength = <8>; - bias-disable; - }; + rgmii2_pins: rgmii2-state { + pins = "gpio27", "gpio28", "gpio29", + "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", + "gpio60", "gpio61", "gpio62"; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; }; }; @@ -1292,7 +1276,7 @@ sata: sata@29000000 { <&gcc SATA_A_CLK>, <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>; - clock-names = "slave_face", "iface", "core", + clock-names = "slave_iface", "iface", "core", "rxoob", "pmalive"; assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi index 8839b23fc69364..ca76bf8af75e46 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi @@ -84,6 +84,32 @@ smem_region: smem@fa00000 { }; }; +&blsp1_i2c2 { + status = "okay"; + + magnetometer: magnetometer@c { + compatible = "asahi-kasei,ak09911"; + reg = <0x0c>; + + vdd-supply = <&pm8226_l15>; + vid-supply = <&pm8226_l6>; + }; + + accelerometer: accelerometer@1e { + compatible = "kionix,kx022-1020"; + reg = <0x1e>; + + interrupts-extended = <&tlmm 63 IRQ_TYPE_EDGE_RISING>; + + vdd-supply = <&pm8226_l15>; + vddio-supply = <&pm8226_l6>; + + mount-matrix = "1", "0", "0", + "0", "-1", "0", + "0", "0", "1"; + }; +}; + &blsp1_i2c5 { status = "okay"; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts index 992b7115b5f8f5..a28a83cb534055 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts +++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-moneypenny.dts @@ -10,6 +10,9 @@ #include "qcom-msm8226-microsoft-common.dtsi" +/* This device has no magnetometer */ +/delete-node/ &magnetometer; + / { model = "Nokia Lumia 630"; compatible = "microsoft,moneypenny", "qcom,msm8226"; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi index b2f92ad6499acd..3a685ff7e8ccf5 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi @@ -12,6 +12,7 @@ #include #include #include +#include / { #address-cells = <1>; @@ -44,8 +45,11 @@ CPU0: cpu@0 { device_type = "cpu"; reg = <0>; next-level-cache = <&L2>; + clocks = <&apcs>; + operating-points-v2 = <&cpu_opp_table>; qcom,acc = <&acc0>; qcom,saw = <&saw0>; + #cooling-cells = <2>; }; CPU1: cpu@1 { @@ -54,8 +58,11 @@ CPU1: cpu@1 { device_type = "cpu"; reg = <1>; next-level-cache = <&L2>; + clocks = <&apcs>; + operating-points-v2 = <&cpu_opp_table>; qcom,acc = <&acc1>; qcom,saw = <&saw1>; + #cooling-cells = <2>; }; CPU2: cpu@2 { @@ -64,8 +71,11 @@ CPU2: cpu@2 { device_type = "cpu"; reg = <2>; next-level-cache = <&L2>; + clocks = <&apcs>; + operating-points-v2 = <&cpu_opp_table>; qcom,acc = <&acc2>; qcom,saw = <&saw2>; + #cooling-cells = <2>; }; CPU3: cpu@3 { @@ -74,8 +84,11 @@ CPU3: cpu@3 { device_type = "cpu"; reg = <3>; next-level-cache = <&L2>; + clocks = <&apcs>; + operating-points-v2 = <&cpu_opp_table>; qcom,acc = <&acc3>; qcom,saw = <&saw3>; + #cooling-cells = <2>; }; L2: l2-cache { @@ -98,6 +111,29 @@ memory@0 { reg = <0x0 0x0>; }; + cpu_opp_table: opp-table-cpu { + compatible = "operating-points-v2"; + opp-shared; + + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + }; + + opp-384000000 { + opp-hz = /bits/ 64 <384000000>; + }; + + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + }; + + opp-787200000 { + opp-hz = /bits/ 64 <787200000>; + }; + + /* Higher CPU frequencies need speedbin support */ + }; + pmu { compatible = "arm,cortex-a7-pmu"; interrupts = ; - qcom,ipc = <&apcs 8 0>; + mboxes = <&apcs 0>; qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8226"; + compatible = "qcom,rpm-msm8226", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { @@ -199,7 +235,7 @@ smp2p-adsp { interrupt-parent = <&intc>; interrupts = ; - qcom,ipc = <&apcs 8 10>; + mboxes = <&apcs 10>; qcom,local-pid = <0>; qcom,remote-pid = <2>; @@ -231,9 +267,75 @@ intc: interrupt-controller@f9000000 { #interrupt-cells = <3>; }; - apcs: syscon@f9011000 { - compatible = "syscon"; + apcs: mailbox@f9011000 { + compatible = "qcom,msm8226-apcs-kpss-global", + "qcom,msm8916-apcs-kpss-global", "syscon"; reg = <0xf9011000 0x1000>; + #mbox-cells = <1>; + clocks = <&a7pll>, <&gcc GPLL0_VOTE>; + clock-names = "pll", "aux"; + #clock-cells = <0>; + }; + + a7pll: clock@f9016000 { + compatible = "qcom,msm8226-a7pll"; + reg = <0xf9016000 0x40>; + #clock-cells = <0>; + clocks = <&xo_board>; + clock-names = "xo"; + operating-points-v2 = <&a7pll_opp_table>; + + a7pll_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-768000000 { + opp-hz = /bits/ 64 <768000000>; + }; + + opp-787200000 { + opp-hz = /bits/ 64 <787200000>; + }; + + opp-998400000 { + opp-hz = /bits/ 64 <998400000>; + }; + + opp-1094400000 { + opp-hz = /bits/ 64 <1094400000>; + }; + + opp-1190400000 { + opp-hz = /bits/ 64 <1190400000>; + }; + + opp-1305600000 { + opp-hz = /bits/ 64 <1305600000>; + }; + + opp-1344000000 { + opp-hz = /bits/ 64 <1344000000>; + }; + + opp-1401600000 { + opp-hz = /bits/ 64 <1401600000>; + }; + + opp-1497600000 { + opp-hz = /bits/ 64 <1497600000>; + }; + + opp-1593600000 { + opp-hz = /bits/ 64 <1593600000>; + }; + + opp-1689600000 { + opp-hz = /bits/ 64 <1689600000>; + }; + + opp-1785600000 { + opp-hz = /bits/ 64 <1785600000>; + }; + }; }; saw_l2: power-manager@f9012000 { @@ -571,7 +673,7 @@ gcc: clock-controller@fc400000 { #reset-cells = <1>; #power-domain-cells = <1>; - clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, + clocks = <&xo_board>, <&sleep_clk>; clock-names = "xo", "sleep_clk"; @@ -1130,7 +1232,7 @@ adsp: remoteproc@fe200000 { smd-edge { interrupts = ; - qcom,ipc = <&apcs 8 8>; + mboxes = <&apcs 8>; qcom,smd-edge = <1>; label = "lpass"; @@ -1159,6 +1261,16 @@ cpu0-thermal { thermal-sensors = <&tsens 5>; + cooling-maps { + map0 { + trip = <&cpu_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + trips { cpu_alert0: trip0 { temperature = <75000>; @@ -1180,6 +1292,16 @@ cpu1-thermal { thermal-sensors = <&tsens 2>; + cooling-maps { + map0 { + trip = <&cpu_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + trips { cpu_alert1: trip0 { temperature = <75000>; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts index 53a6d4e85959fb..55077a5f2e34f8 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts +++ b/arch/arm/boot/dts/qcom/qcom-msm8926-microsoft-tesla.dts @@ -13,6 +13,9 @@ /* This device has touchscreen on i2c1 instead */ /delete-node/ &touchscreen; +/* The magnetometer used on this device is currently unknown */ +/delete-node/ &magnetometer; + / { model = "Nokia Lumia 830"; compatible = "microsoft,tesla", "qcom,msm8926", "qcom,msm8226"; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi index 15568579459ab3..1bd87170252df7 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi @@ -136,7 +136,7 @@ smd-edge { qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8974"; + compatible = "qcom,rpm-msm8974", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { @@ -149,7 +149,7 @@ rpmcc: clock-controller { }; }; - reserved-memory { + reserved_memory: reserved-memory { #address-cells = <1>; #size-cells = <1>; ranges; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi index b5443fd5b425e0..d3959741d2ea9e 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte-common.dtsi @@ -438,6 +438,19 @@ fuelgauge_pin: fuelgauge-int-state { }; }; +&reserved_memory { + ramoops@3e8e0000 { + compatible = "ramoops"; + reg = <0x3e8e0000 0x200000>; + + console-size = <0x100000>; + record-size = <0x10000>; + ftrace-size = <0x10000>; + pmsg-size = <0x80000>; + ecc-size = <8>; + }; +}; + &remoteproc_adsp { status = "okay"; cx-supply = <&pma8084_s2>; diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi index 23e633387c24dd..d4572146d135d9 100644 --- a/arch/arm/boot/dts/rockchip/rk3128.dtsi +++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi @@ -254,6 +254,30 @@ power-domain@RK3128_PD_GPU { }; }; + vpu: video-codec@10106000 { + compatible = "rockchip,rk3128-vpu", "rockchip,rk3066-vpu"; + reg = <0x10106000 0x800>; + interrupts = , + ; + interrupt-names = "vepu", "vdpu"; + clocks = <&cru ACLK_VDPU>, <&cru HCLK_VDPU>, + <&cru ACLK_VEPU>, <&cru HCLK_VEPU>; + clock-names = "aclk_vdpu", "hclk_vdpu", + "aclk_vepu", "hclk_vepu"; + iommus = <&vpu_mmu>; + power-domains = <&power RK3128_PD_VIDEO>; + }; + + vpu_mmu: iommu@10106800 { + compatible = "rockchip,iommu"; + reg = <0x10106800 0x100>; + interrupts = ; + clocks = <&cru ACLK_VEPU>, <&cru HCLK_VDPU>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3128_PD_VIDEO>; + #iommu-cells = <0>; + }; + vop: vop@1010e000 { compatible = "rockchip,rk3126-vop"; reg = <0x1010e000 0x300>; @@ -429,7 +453,7 @@ sfc: spi@1020c000 { compatible = "rockchip,sfc"; reg = <0x1020c000 0x8000>; interrupts = ; - clocks = <&cru SCLK_SFC>, <&cru 479>; + clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>; clock-names = "clk_sfc", "hclk_sfc"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts index 2d9994379eb235..89ca2f8d38098d 100644 --- a/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts +++ b/arch/arm/boot/dts/rockchip/rv1108-elgin-r1.dts @@ -168,8 +168,8 @@ &spi { pinctrl-0 = <&spim1_clk &spim1_cs0 &spim1_tx &spim1_rx>; status = "okay"; - dh2228fv: dac@0 { - compatible = "rohm,dh2228fv"; + display: display@0 { + compatible = "elgin,jg10309-01"; reg = <0>; spi-max-frequency = <24000000>; spi-cpha; diff --git a/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi b/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi index 06b1d7f2d8585a..35ef6732281fd9 100644 --- a/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi +++ b/arch/arm/boot/dts/rockchip/rv1126-pinctrl.dtsi @@ -97,6 +97,156 @@ i2c2_xfer: i2c2-xfer { <0 RK_PC3 1 &pcfg_pull_none_drv_level_0_smt>; }; }; + i2c3 { + /omit-if-no-ref/ + i2c3m0_xfer: i2c3m0-xfer { + rockchip,pins = + /* i2c3_scl_m0 */ + <3 RK_PA4 5 &pcfg_pull_none>, + /* i2c3_sda_m0 */ + <3 RK_PA5 5 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + i2c3m1_xfer: i2c3m1-xfer { + rockchip,pins = + /* i2c3_scl_m1 */ + <2 RK_PD4 7 &pcfg_pull_none>, + /* i2c3_sda_m1 */ + <2 RK_PD5 7 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + i2c3m2_xfer: i2c3m2-xfer { + rockchip,pins = + /* i2c3_scl_m2 */ + <1 RK_PD6 3 &pcfg_pull_none>, + /* i2c3_sda_m2 */ + <1 RK_PD7 3 &pcfg_pull_none>; + }; + }; + i2s0 { + i2s0m0_lrck_tx: i2s0m0-lrck-tx { + rockchip,pins = + /* i2s0_lrck_tx_m0 */ + <3 RK_PD3 1 &pcfg_pull_none>; + }; + i2s0m0_lrck_rx: i2s0m0-lrck-rx { + rockchip,pins = + /* i2s0_lrck_rx_m0 */ + <3 RK_PD4 1 &pcfg_pull_none>; + }; + i2s0m0_mclk: i2s0m0-mclk { + rockchip,pins = + /* i2s0_mclk_m0 */ + <3 RK_PD2 1 &pcfg_pull_none>; + }; + i2s0m0_sclk_rx: i2s0m0-sclk-rx { + rockchip,pins = + /* i2s0_sclk_rx_m0 */ + <3 RK_PD1 1 &pcfg_pull_none>; + }; + i2s0m0_sclk_tx: i2s0m0-sclk-tx { + rockchip,pins = + /* i2s0_sclk_tx_m0 */ + <3 RK_PD0 1 &pcfg_pull_none>; + }; + i2s0m0_sdi0: i2s0m0-sdi0 { + rockchip,pins = + /* i2s0_sdi0_m0 */ + <3 RK_PD6 1 &pcfg_pull_none>; + }; + i2s0m0_sdo0: i2s0m0-sdo0 { + rockchip,pins = + /* i2s0_sdo0_m0 */ + <3 RK_PD5 1 &pcfg_pull_none>; + }; + i2s0m0_sdo1_sdi3: i2s0m0-sdo1-sdi3 { + rockchip,pins = + /* i2s0_sdo1_sdi3_m0 */ + <3 RK_PD7 1 &pcfg_pull_none>; + }; + i2s0m0_sdo2_sdi2: i2s0m0-sdo2-sdi2 { + rockchip,pins = + /* i2s0_sdo2_sdi2_m0 */ + <4 RK_PA0 1 &pcfg_pull_none>; + }; + i2s0m0_sdo3_sdi1: i2s0m0-sdo3-sdi1 { + rockchip,pins = + /* i2s0_sdo3_sdi1_m0 */ + <4 RK_PA1 1 &pcfg_pull_none>; + }; + i2s0m1_lrck_tx: i2s0m1-lrck-tx { + rockchip,pins = + /* i2s0_lrck_tx_m1 */ + <3 RK_PA5 3 &pcfg_pull_none>; + }; + i2s0m1_lrck_rx: i2s0m1-lrck-rx { + rockchip,pins = + /* i2s0_lrck_rx_m1 */ + <3 RK_PB2 3 &pcfg_pull_none>; + }; + i2s0m1_mclk: i2s0m1-mclk { + rockchip,pins = + /* i2s0_mclk_m1 */ + <3 RK_PB0 3 &pcfg_pull_none>; + }; + i2s0m1_sclk_rx: i2s0m1-sclk-rx { + rockchip,pins = + /* i2s0_sclk_rx_m1 */ + <3 RK_PB1 3 &pcfg_pull_none>; + }; + i2s0m1_sclk_tx: i2s0m1-sclk-tx { + rockchip,pins = + /* i2s0_sclk_tx_m1 */ + <3 RK_PA4 3 &pcfg_pull_none>; + }; + i2s0m1_sdi0: i2s0m1-sdi0 { + rockchip,pins = + /* i2s0_sdi0_m1 */ + <3 RK_PA7 3 &pcfg_pull_none>; + }; + i2s0m1_sdo0: i2s0m1-sdo0 { + rockchip,pins = + /* i2s0_sdo0_m1 */ + <3 RK_PA6 3 &pcfg_pull_none>; + }; + i2s0m1_sdo1_sdi3: i2s0m1-sdo1-sdi3 { + rockchip,pins = + /* i2s0_sdo1_sdi3_m1 */ + <3 RK_PB3 3 &pcfg_pull_none>; + }; + i2s0m1_sdo2_sdi2: i2s0m1-sdo2-sdi2 { + rockchip,pins = + /* i2s0_sdo2_sdi2_m1 */ + <3 RK_PB4 3 &pcfg_pull_none>; + }; + i2s0m1_sdo3_sdi1: i2s0m1-sdo3-sdi1 { + rockchip,pins = + /* i2s0_sdo3_sdi1_m1 */ + <3 RK_PB5 3 &pcfg_pull_none>; + }; + }; + pwm0 { + /omit-if-no-ref/ + pwm0m0_pins: pwm0m0-pins { + rockchip,pins = + /* pwm0_pin_m0 */ + <0 RK_PB6 3 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm0m1_pins: pwm0m1-pins { + rockchip,pins = + /* pwm0_pin_m1 */ + <2 RK_PB3 5 &pcfg_pull_none>; + }; + }; + pwm1 { + /omit-if-no-ref/ + pwm1m0_pins: pwm1m0-pins { + rockchip,pins = + /* pwm1_pin_m0 */ + <0 RK_PB7 3 &pcfg_pull_none>; + }; + }; pwm2 { /omit-if-no-ref/ pwm2m0_pins: pwm2m0-pins { @@ -104,6 +254,106 @@ pwm2m0_pins: pwm2m0-pins { /* pwm2_pin_m0 */ <0 RK_PC0 3 &pcfg_pull_none>; }; + /omit-if-no-ref/ + pwm2m1_pins: pwm2m1-pins { + rockchip,pins = + /* pwm2_pin_m1 */ + <2 RK_PB1 5 &pcfg_pull_none>; + }; + }; + pwm3 { + /omit-if-no-ref/ + pwm3m0_pins: pwm3m0-pins { + rockchip,pins = + /* pwm3_pin_m0 */ + <0 RK_PC1 3 &pcfg_pull_none>; + }; + }; + pwm4 { + /omit-if-no-ref/ + pwm4m0_pins: pwm4m0-pins { + rockchip,pins = + /* pwm4_pin_m0 */ + <0 RK_PC2 3 &pcfg_pull_none>; + }; + }; + pwm5 { + /omit-if-no-ref/ + pwm5m0_pins: pwm5m0-pins { + rockchip,pins = + /* pwm5_pin_m0 */ + <0 RK_PC3 3 &pcfg_pull_none>; + }; + }; + pwm6 { + /omit-if-no-ref/ + pwm6m0_pins: pwm6m0-pins { + rockchip,pins = + /* pwm6_pin_m0 */ + <0 RK_PB2 3 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm6m1_pins: pwm6m1-pins { + rockchip,pins = + /* pwm6_pin_m1 */ + <2 RK_PD4 5 &pcfg_pull_none>; + }; + }; + pwm7 { + /omit-if-no-ref/ + pwm7m0_pins: pwm7m0-pins { + rockchip,pins = + /* pwm7_pin_m0 */ + <0 RK_PB1 3 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm7m1_pins: pwm7m1-pins { + rockchip,pins = + /* pwm7_pin_m1 */ + <3 RK_PA0 5 &pcfg_pull_none>; + }; + }; + pwm8 { + /omit-if-no-ref/ + pwm8m0_pins: pwm8m0-pins { + rockchip,pins = + /* pwm8_pin_m0 */ + <3 RK_PA4 6 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm8m1_pins: pwm8m1-pins { + rockchip,pins = + /* pwm8_pin_m1 */ + <2 RK_PD7 5 &pcfg_pull_none>; + }; + }; + pwm9 { + /omit-if-no-ref/ + pwm9m0_pins: pwm9m0-pins { + rockchip,pins = + /* pwm9_pin_m0 */ + <3 RK_PA5 6 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm9m1_pins: pwm9m1-pins { + rockchip,pins = + /* pwm9_pin_m1 */ + <2 RK_PD6 5 &pcfg_pull_none>; + }; + }; + pwm10 { + /omit-if-no-ref/ + pwm10m0_pins: pwm10m0-pins { + rockchip,pins = + /* pwm10_pin_m0 */ + <3 RK_PA6 6 &pcfg_pull_none>; + }; + /omit-if-no-ref/ + pwm10m1_pins: pwm10m1-pins { + rockchip,pins = + /* pwm10_pin_m1 */ + <2 RK_PD5 5 &pcfg_pull_none>; + }; }; pwm11 { /omit-if-no-ref/ @@ -112,6 +362,12 @@ pwm11m0_pins: pwm11m0-pins { /* pwm11_pin_m0 */ <3 RK_PA7 6 &pcfg_pull_none>; }; + /omit-if-no-ref/ + pwm11m1_pins: pwm11m1-pins { + rockchip,pins = + /* pwm11_pin_m1 */ + <3 RK_PA1 5 &pcfg_pull_none>; + }; }; rgmii { /omit-if-no-ref/ diff --git a/arch/arm/boot/dts/rockchip/rv1126.dtsi b/arch/arm/boot/dts/rockchip/rv1126.dtsi index bb603cae13dfc7..434846b85c957f 100644 --- a/arch/arm/boot/dts/rockchip/rv1126.dtsi +++ b/arch/arm/boot/dts/rockchip/rv1126.dtsi @@ -22,6 +22,7 @@ / { aliases { i2c0 = &i2c0; i2c2 = &i2c2; + i2c3 = &i2c3; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -268,6 +269,28 @@ uart1: serial@ff410000 { status = "disabled"; }; + pwm0: pwm@ff430000 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff430000 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm0m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm1: pwm@ff430010 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff430010 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + pwm2: pwm@ff430020 { compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; reg = <0xff430020 0x10>; @@ -279,6 +302,61 @@ pwm2: pwm@ff430020 { status = "disabled"; }; + pwm3: pwm@ff430030 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff430030 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM0>, <&pmucru PCLK_PWM0>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm3m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm4: pwm@ff440000 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff440000 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm4m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm5: pwm@ff440010 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff440010 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm5m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm6: pwm@ff440020 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff440020 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm6m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm7: pwm@ff440030 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff440030 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&pmucru CLK_PWM1>, <&pmucru PCLK_PWM1>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm7m0_pins>; + #pwm-cells = <3>; + status = "disabled"; + }; + pmucru: clock-controller@ff480000 { compatible = "rockchip,rv1126-pmucru"; reg = <0xff480000 0x1000>; @@ -308,6 +386,53 @@ dmac: dma-controller@ff4e0000 { clock-names = "apb_pclk"; }; + i2c3: i2c@ff520000 { + compatible = "rockchip,rv1126-i2c", "rockchip,rk3399-i2c"; + reg = <0xff520000 0x1000>; + interrupts = ; + clocks = <&cru CLK_I2C3>, <&cru PCLK_I2C3>; + clock-names = "i2c", "pclk"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c3m0_xfer>; + rockchip,grf = <&pmugrf>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + pwm8: pwm@ff550000 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff550000 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>; + pinctrl-0 = <&pwm8m0_pins>; + pinctrl-names = "default"; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm9: pwm@ff550010 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff550010 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>; + pinctrl-0 = <&pwm9m0_pins>; + pinctrl-names = "default"; + #pwm-cells = <3>; + status = "disabled"; + }; + + pwm10: pwm@ff550020 { + compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; + reg = <0xff550020 0x10>; + clock-names = "pwm", "pclk"; + clocks = <&cru CLK_PWM2>, <&cru PCLK_PWM2>; + pinctrl-0 = <&pwm10m0_pins>; + pinctrl-names = "default"; + #pwm-cells = <3>; + status = "disabled"; + }; + pwm11: pwm@ff550030 { compatible = "rockchip,rv1126-pwm", "rockchip,rk3328-pwm"; reg = <0xff550030 0x10>; @@ -419,6 +544,32 @@ timer0: timer@ff660000 { clock-names = "pclk", "timer"; }; + i2s0: i2s@ff800000 { + compatible = "rockchip,rv1126-i2s-tdm"; + reg = <0xff800000 0x1000>; + interrupts = ; + clocks = <&cru MCLK_I2S0_TX>, <&cru MCLK_I2S0_RX>, <&cru HCLK_I2S0>; + clock-names = "mclk_tx", "mclk_rx", "hclk"; + dmas = <&dmac 20>, <&dmac 19>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&i2s0m0_sclk_tx>, + <&i2s0m0_sclk_rx>, + <&i2s0m0_mclk>, + <&i2s0m0_lrck_tx>, + <&i2s0m0_lrck_rx>, + <&i2s0m0_sdi0>, + <&i2s0m0_sdo0>, + <&i2s0m0_sdo1_sdi3>, + <&i2s0m0_sdo2_sdi2>, + <&i2s0m0_sdo3_sdi1>; + resets = <&cru SRST_I2S0_TX_M>, <&cru SRST_I2S0_RX_M>; + reset-names = "tx-m", "rx-m"; + rockchip,grf = <&grf>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + vop: vop@ffb00000 { compatible = "rockchip,rv1126-vop"; reg = <0xffb00000 0x200>, <0xffb00a00 0x400>; diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile index 015903d09323f8..eab3a9bd435f5f 100644 --- a/arch/arm/boot/dts/st/Makefile +++ b/arch/arm/boot/dts/st/Makefile @@ -35,8 +35,11 @@ dtb-$(CONFIG_ARCH_STM32) += \ stm32mp151a-prtt1c.dtb \ stm32mp151a-prtt1s.dtb \ stm32mp151a-dhcor-testbench.dtb \ + stm32mp151c-mecio1r0.dtb \ + stm32mp151c-mect1s.dtb \ stm32mp153c-dhcom-drc02.dtb \ stm32mp153c-dhcor-drc-compact.dtb \ + stm32mp153c-mecio1r1.dtb \ stm32mp157a-avenger96.dtb \ stm32mp157a-dhcor-avenger96.dtb \ stm32mp157a-dk1.dtb \ diff --git a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi index c9f588a6509445..8db1ec4a3b2686 100644 --- a/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi +++ b/arch/arm/boot/dts/st/stm32mp13-pinctrl.dtsi @@ -94,14 +94,20 @@ pins2 { /omit-if-no-ref/ eth1_rgmii_sleep_pins_a: eth1-rgmii-sleep-0 { pins1 { + pinmux = , /* ETH_MDIO */ + ; /* ETH_MDC */ + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + + pins2 { pinmux = , /* ETH_RGMII_TXD0 */ , /* ETH_RGMII_TXD1 */ , /* ETH_RGMII_TXD2 */ , /* ETH_RGMII_TXD3 */ , /* ETH_RGMII_TX_CTL */ , /* ETH_RGMII_GTX_CLK */ - , /* ETH_MDIO */ - , /* ETH_MDC */ , /* ETH_RGMII_RXD0 */ , /* ETH_RGMII_RXD1 */ , /* ETH_RGMII_RXD1 */ @@ -178,14 +184,20 @@ pins2 { /omit-if-no-ref/ eth2_rgmii_sleep_pins_a: eth2-rgmii-sleep-0 { pins1 { + pinmux = , /* ETH_MDIO */ + ; /* ETH_MDC */ + bias-disable; + drive-push-pull; + slew-rate = <2>; + }; + + pins2 { pinmux = , /* ETH_RGMII_TXD0 */ , /* ETH_RGMII_TXD1 */ , /* ETH_RGMII_TXD2 */ , /* ETH_RGMII_TXD3 */ , /* ETH_RGMII_TX_CTL */ , /* ETH_RGMII_GTX_CLK */ - , /* ETH_MDIO */ - , /* ETH_MDC */ , /* ETH_RGMII_RXD0 */ , /* ETH_RGMII_RXD1 */ , /* ETH_RGMII_RXD2 */ diff --git a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts index bacb70b4256bc3..853dc21449d99a 100644 --- a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts +++ b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts @@ -75,6 +75,8 @@ channel@12 { }; ðernet1 { + nvmem-cell-names = "mac-address"; + nvmem-cells = <ðernet_mac1_address>; phy-handle = <ðphy1>; phy-mode = "rgmii-id"; pinctrl-0 = <ð1_rgmii_pins_a>; @@ -94,14 +96,36 @@ ethphy1: ethernet-phy@1 { interrupt-parent = <&gpiog>; interrupts = <12 IRQ_TYPE_LEVEL_LOW>; reg = <1>; + realtek,clkout-disable; reset-assert-us = <15000>; reset-deassert-us = <55000>; reset-gpios = <&gpioa 11 GPIO_ACTIVE_LOW>; + + leds { + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + color = ; + function = LED_FUNCTION_WAN; + linux,default-trigger = "netdev"; + }; + + led@1 { + reg = <1>; + color = ; + function = LED_FUNCTION_WAN; + linux,default-trigger = "netdev"; + }; + }; }; }; }; ðernet2 { + nvmem-cell-names = "mac-address"; + nvmem-cells = <ðernet_mac2_address>; phy-handle = <ðphy2>; phy-mode = "rgmii-id"; pinctrl-0 = <ð2_rgmii_pins_a>; @@ -121,9 +145,29 @@ ethphy2: ethernet-phy@1 { interrupt-parent = <&gpiog>; interrupts = <15 IRQ_TYPE_LEVEL_LOW>; reg = <1>; + realtek,clkout-disable; reset-assert-us = <15000>; reset-deassert-us = <55000>; reset-gpios = <&gpiog 8 GPIO_ACTIVE_LOW>; + + leds { + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + color = ; + function = LED_FUNCTION_LAN; + linux,default-trigger = "netdev"; + }; + + led@1 { + reg = <1>; + color = ; + function = LED_FUNCTION_LAN; + linux,default-trigger = "netdev"; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi index ae83e7b1023231..70e132dc6147f5 100644 --- a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi +++ b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi @@ -2229,6 +2229,9 @@ pins { , /* SDMMC2_D5 */ , /* SDMMC2_D6 */ ; /* SDMMC2_D7 */ + slew-rate = <1>; + drive-push-pull; + bias-pull-up; }; }; diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts index 75874eafde11ef..8e1dd84e0c0a4f 100644 --- a/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts +++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1a.dts @@ -28,16 +28,12 @@ phy0: ethernet-phy@0 { }; }; -&pwm5_pins_a { - pins { - pinmux = ; /* TIM5_CH1 */ - }; +&{pwm5_pins_a/pins} { + pinmux = ; /* TIM5_CH1 */ }; -&pwm5_sleep_pins_a { - pins { - pinmux = ; /* TIM5_CH1 */ - }; +&{pwm5_sleep_pins_a/pins} { + pinmux = ; /* TIM5_CH1 */ }; &timers5 { diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts index c90d815f906b8f..3b33b7093b6868 100644 --- a/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts +++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1c.dts @@ -168,52 +168,42 @@ &sdmmc2 { status = "okay"; }; -&sdmmc2_b4_od_pins_a { - pins1 { - pinmux = , /* SDMMC2_D0 */ - , /* SDMMC2_D1 */ - , /* SDMMC2_D2 */ - ; /* SDMMC2_D3 */ - }; +&{sdmmc2_b4_od_pins_a/pins1} { + pinmux = , /* SDMMC2_D0 */ + , /* SDMMC2_D1 */ + , /* SDMMC2_D2 */ + ; /* SDMMC2_D3 */ }; -&sdmmc2_b4_pins_a { - pins1 { - pinmux = , /* SDMMC2_D0 */ - , /* SDMMC2_D1 */ - , /* SDMMC2_D2 */ - , /* SDMMC2_D3 */ - ; /* SDMMC2_CMD */ - }; +&{sdmmc2_b4_pins_a/pins1} { + pinmux = , /* SDMMC2_D0 */ + , /* SDMMC2_D1 */ + , /* SDMMC2_D2 */ + , /* SDMMC2_D3 */ + ; /* SDMMC2_CMD */ }; -&sdmmc2_b4_sleep_pins_a { - pins { - pinmux = , /* SDMMC2_D0 */ - , /* SDMMC2_D1 */ - , /* SDMMC2_D2 */ - , /* SDMMC2_D3 */ - , /* SDMMC2_CK */ - ; /* SDMMC2_CMD */ - }; +&{sdmmc2_b4_sleep_pins_a/pins} { + pinmux = , /* SDMMC2_D0 */ + , /* SDMMC2_D1 */ + , /* SDMMC2_D2 */ + , /* SDMMC2_D3 */ + , /* SDMMC2_CK */ + ; /* SDMMC2_CMD */ }; -&sdmmc2_d47_pins_a { - pins { - pinmux = , /* SDMMC2_D4 */ - , /* SDMMC2_D5 */ - , /* SDMMC2_D6 */ - ; /* SDMMC2_D7 */ - }; +&{sdmmc2_d47_pins_a/pins} { + pinmux = , /* SDMMC2_D4 */ + , /* SDMMC2_D5 */ + , /* SDMMC2_D6 */ + ; /* SDMMC2_D7 */ }; -&sdmmc2_d47_sleep_pins_a { - pins { - pinmux = , /* SDMMC2_D4 */ - , /* SDMMC2_D5 */ - , /* SDMMC2_D6 */ - ; /* SDMMC2_D7 */ - }; +&{sdmmc2_d47_sleep_pins_a/pins} { + pinmux = , /* SDMMC2_D4 */ + , /* SDMMC2_D5 */ + , /* SDMMC2_D6 */ + ; /* SDMMC2_D7 */ }; &sdmmc3 { @@ -238,34 +228,28 @@ mmc@1 { }; }; -&sdmmc3_b4_od_pins_b { - pins1 { - pinmux = , /* SDMMC3_D0 */ - , /* SDMMC3_D1 */ - , /* SDMMC3_D2 */ - ; /* SDMMC3_D3 */ - }; +&{sdmmc3_b4_od_pins_b/pins1} { + pinmux = , /* SDMMC3_D0 */ + , /* SDMMC3_D1 */ + , /* SDMMC3_D2 */ + ; /* SDMMC3_D3 */ }; -&sdmmc3_b4_pins_b { - pins1 { - pinmux = , /* SDMMC3_D0 */ - , /* SDMMC3_D1 */ - , /* SDMMC3_D2 */ - , /* SDMMC3_D3 */ - ; /* SDMMC3_CMD */ - }; +&{sdmmc3_b4_pins_b/pins1} { + pinmux = , /* SDMMC3_D0 */ + , /* SDMMC3_D1 */ + , /* SDMMC3_D2 */ + , /* SDMMC3_D3 */ + ; /* SDMMC3_CMD */ }; -&sdmmc3_b4_sleep_pins_b { - pins { - pinmux = , /* SDMMC3_D0 */ - , /* SDMMC3_D1 */ - , /* SDMMC3_D2 */ - , /* SDMMC3_D3 */ - , /* SDMMC3_CK */ - ; /* SDMMC3_CMD */ - }; +&{sdmmc3_b4_sleep_pins_b/pins} { + pinmux = , /* SDMMC3_D0 */ + , /* SDMMC3_D1 */ + , /* SDMMC3_D2 */ + , /* SDMMC3_D3 */ + , /* SDMMC3_CK */ + ; /* SDMMC3_CMD */ }; &spi1 { diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi b/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi index 3938d357e198f4..98a31c2b5d456a 100644 --- a/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi +++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1l.dtsi @@ -69,30 +69,27 @@ ðernet0 { status = "okay"; }; -ðernet0_rmii_pins_a { - pins1 { - pinmux = , /* ETH1_RMII_TXD0 */ - , /* ETH1_RMII_TXD1 */ - ; /* ETH1_RMII_TX_EN */ - }; - pins2 { - pinmux = , /* ETH1_RMII_RXD0 */ - , /* ETH1_RMII_RXD1 */ - , /* ETH1_RMII_REF_CLK input */ - ; /* ETH1_RMII_CRS_DV */ - }; +&{ethernet0_rmii_pins_a/pins1} { + pinmux = , /* ETH1_RMII_TXD0 */ + , /* ETH1_RMII_TXD1 */ + ; /* ETH1_RMII_TX_EN */ }; -ðernet0_rmii_sleep_pins_a { - pins1 { - pinmux = , /* ETH1_RMII_TXD0 */ - , /* ETH1_RMII_TXD1 */ - , /* ETH1_RMII_TX_EN */ - , /* ETH1_RMII_RXD0 */ - , /* ETH1_RMII_RXD1 */ - , /* ETH1_RMII_REF_CLK */ - ; /* ETH1_RMII_CRS_DV */ - }; +&{ethernet0_rmii_pins_a/pins2} { + pinmux = , /* ETH1_RMII_RXD0 */ + , /* ETH1_RMII_RXD1 */ + , /* ETH1_RMII_REF_CLK input */ + ; /* ETH1_RMII_CRS_DV */ +}; + +&{ethernet0_rmii_sleep_pins_a/pins1} { + pinmux = , /* ETH1_RMII_TXD0 */ + , /* ETH1_RMII_TXD1 */ + , /* ETH1_RMII_TX_EN */ + , /* ETH1_RMII_RXD0 */ + , /* ETH1_RMII_RXD1 */ + , /* ETH1_RMII_REF_CLK */ + ; /* ETH1_RMII_CRS_DV */ }; &iwdg2 { @@ -122,12 +119,11 @@ flash@0 { }; }; -&qspi_bk1_pins_a { - pins1 { - bias-pull-up; - drive-push-pull; - slew-rate = <1>; - }; +&{qspi_bk1_pins_a/pins} { + /delete-property/ bias-disable; + bias-pull-up; + drive-push-pull; + slew-rate = <1>; }; &rng1 { @@ -147,22 +143,24 @@ &sdmmc1 { status = "okay"; }; -&sdmmc1_b4_od_pins_a { - pins1 { - bias-pull-up; - }; - pins2 { - bias-pull-up; - }; +&{sdmmc1_b4_od_pins_a/pins1} { + /delete-property/ bias-disable; + bias-pull-up; }; -&sdmmc1_b4_pins_a { - pins1 { - bias-pull-up; - }; - pins2 { - bias-pull-up; - }; +&{sdmmc1_b4_od_pins_a/pins2} { + /delete-property/ bias-disable; + bias-pull-up; +}; + +&{sdmmc1_b4_pins_a/pins1} { + /delete-property/ bias-disable; + bias-pull-up; +}; + +&{sdmmc1_b4_pins_a/pins2} { + /delete-property/ bias-disable; + bias-pull-up; }; &uart4 { @@ -175,34 +173,30 @@ &uart4 { status = "okay"; }; -&uart4_idle_pins_a { - pins1 { - pinmux = ; /* UART4_TX */ - }; - pins2 { - pinmux = ; /* UART4_RX */ - bias-pull-up; - }; +&{uart4_idle_pins_a/pins1} { + pinmux = ; /* UART4_TX */ }; -&uart4_pins_a { - pins1 { - pinmux = ; /* UART4_TX */ - bias-disable; - drive-push-pull; - slew-rate = <0>; - }; - pins2 { - pinmux = ; /* UART4_RX */ - bias-pull-up; - }; +&{uart4_idle_pins_a/pins2} { + pinmux = ; /* UART4_RX */ + /delete-property/ bias-disable; + bias-pull-up; }; -&uart4_sleep_pins_a { - pins { - pinmux = , /* UART4_TX */ - ; /* UART4_RX */ - }; +&{uart4_pins_a/pins1} { + pinmux = ; /* UART4_TX */ + slew-rate = <0>; +}; + +&{uart4_pins_a/pins2} { + pinmux = ; /* UART4_RX */ + /delete-property/ bias-disable; + bias-pull-up; +}; + +&{uart4_sleep_pins_a/pins} { + pinmux = , /* UART4_TX */ + ; /* UART4_RX */ }; &usbh_ehci { diff --git a/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts b/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts index ad25929e64e6e0..b6be61b159e72b 100644 --- a/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts +++ b/arch/arm/boot/dts/st/stm32mp151a-prtt1s.dts @@ -36,18 +36,14 @@ co2-sensor@62 { }; }; -&i2c1_pins_a { - pins { - pinmux = , /* I2C1_SCL */ - ; /* I2C1_SDA */ - }; +&{i2c1_pins_a/pins} { + pinmux = , /* I2C1_SCL */ + ; /* I2C1_SDA */ }; -&i2c1_sleep_pins_a { - pins { - pinmux = , /* I2C1_SCL */ - ; /* I2C1_SDA */ - }; +&{i2c1_sleep_pins_a/pins} { + pinmux = , /* I2C1_SCL */ + ; /* I2C1_SDA */ }; &mdio0 { diff --git a/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts b/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts new file mode 100644 index 00000000000000..a5ea1431c3991c --- /dev/null +++ b/arch/arm/boot/dts/st/stm32mp151c-mecio1r0.dts @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (C) Protonic Holland + * Author: David Jander + */ +/dts-v1/; + +#include "stm32mp151.dtsi" +#include "stm32mp15xc.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include "stm32mp15x-mecio1-io.dtsi" +#include +#include +#include + +/ { + model = "Protonic MECIO1r0"; + compatible = "prt,mecio1r0", "st,stm32mp151"; + + led { + compatible = "gpio-leds"; + + led-0 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 13 GPIO_ACTIVE_HIGH>; + }; + + led-1 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 14 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; +}; + +&clk_hse { + clock-frequency = <25000000>; +}; + +ðernet0 { + assigned-clocks = <&rcc ETHCK_K>, <&rcc PLL3_Q>; + assigned-clock-parents = <&rcc PLL3_Q>; + assigned-clock-rates = <125000000>; /* Clock PLL3 to 625Mhz in tf-a. */ + st,eth-clk-sel; +}; diff --git a/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts b/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts new file mode 100644 index 00000000000000..a1b8c3646e98bb --- /dev/null +++ b/arch/arm/boot/dts/st/stm32mp151c-mect1s.dts @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (C) Protonic Holland + * Author: David Jander + */ +/dts-v1/; + +#include "stm32mp151.dtsi" +#include "stm32mp15xc.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include +#include +#include + +/ { + model = "Protonic MECT1S"; + compatible = "prt,mect1s", "st,stm32mp151"; + + chosen { + stdout-path = "serial0:1500000n8"; + }; + + aliases { + serial0 = &uart4; + ethernet0 = ðernet0; + ethernet1 = ðernet1; + ethernet2 = ðernet2; + ethernet3 = ðernet3; + ethernet4 = ðernet4; + }; + + v3v3: regulator-v3v3 { + compatible = "regulator-fixed"; + regulator-name = "v3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + v5v: regulator-v5v { + compatible = "regulator-fixed"; + regulator-name = "v5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; + + led { + compatible = "gpio-leds"; + + led-0 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 13 GPIO_ACTIVE_LOW>; + }; + + led-1 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 14 GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + }; +}; + +&clk_hse { + clock-frequency = <24000000>; +}; + +&clk_lse { + status = "disabled"; +}; + +ðernet0 { + status = "okay"; + pinctrl-0 = <ðernet0_rmii_pins_a>; + pinctrl-1 = <ðernet0_rmii_sleep_pins_a>; + pinctrl-names = "default", "sleep"; + phy-mode = "rmii"; + max-speed = <100>; + st,eth-clk-sel; + + fixed-link { + speed = <100>; + full-duplex; + }; + + mdio0: mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + }; +}; + +&{ethernet0_rmii_pins_a/pins1} { + pinmux = , /* ETH1_RMII_TXD0 */ + , /* ETH1_RMII_TXD1 */ + , /* ETH1_RMII_TX_EN */ + , /* ETH1_MDIO */ + ; /* ETH1_MDC */ +}; + +&{ethernet0_rmii_pins_a/pins2} { + pinmux = , /* ETH1_RMII_RXD0 */ + , /* ETH1_RMII_RXD1 */ + , /* ETH1_RMII_REF_CLK input */ + ; /* ETH1_RMII_CRS_DV */ +}; + +&{ethernet0_rmii_sleep_pins_a/pins1} { + pinmux = , /* ETH1_RMII_TXD0 */ + , /* ETH1_RMII_TXD1 */ + , /* ETH1_RMII_TX_EN */ + , /* ETH1_RMII_RXD0 */ + , /* ETH1_RMII_RXD1 */ + , /* ETH1_RMII_REF_CLK */ + ; /* ETH1_RMII_CRS_DV */ +}; + +&mdio0 { + /* All this DP83TG720R PHYs can't be probed before switch@0 is + * probed so we need to use compatible with PHYid + */ + /* TI DP83TG720R */ + t1_phy0: ethernet-phy@8 { + compatible = "ethernet-phy-id2000.a284"; + reg = <8>; + interrupts-extended = <&gpioi 5 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpioh 13 GPIO_ACTIVE_LOW>; + reset-assert-us = <10>; + reset-deassert-us = <35>; + }; + + /* TI DP83TG720R */ + t1_phy1: ethernet-phy@c { + compatible = "ethernet-phy-id2000.a284"; + reg = <12>; + interrupts-extended = <&gpioj 0 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpioh 14 GPIO_ACTIVE_LOW>; + reset-assert-us = <10>; + reset-deassert-us = <35>; + }; + + /* TI DP83TG720R */ + t1_phy2: ethernet-phy@4 { + compatible = "ethernet-phy-id2000.a284"; + reg = <4>; + interrupts-extended = <&gpioi 7 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>; + reset-assert-us = <10>; + reset-deassert-us = <35>; + }; + + /* TI DP83TG720R */ + t1_phy3: ethernet-phy@d { + compatible = "ethernet-phy-id2000.a284"; + reg = <13>; + interrupts-extended = <&gpioi 15 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpioi 13 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + reset-deassert-us = <1000>; + }; +}; + +&qspi { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qspi_clk_pins_a + &qspi_bk1_pins_a + &qspi_cs1_pins_a>; + pinctrl-1 = <&qspi_clk_sleep_pins_a + &qspi_bk1_sleep_pins_a + &qspi_cs1_sleep_pins_a>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-rx-bus-width = <4>; + spi-max-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; + +&{qspi_bk1_pins_a/pins} { + /delete-property/ bias-disable; + bias-pull-up; + drive-push-pull; + slew-rate = <1>; +}; + +&spi2 { + pinctrl-0 = <&spi2_pins_b>; + pinctrl-names = "default"; + cs-gpios = <&gpioj 3 GPIO_ACTIVE_LOW>; + /delete-property/dmas; + /delete-property/dma-names; + status = "okay"; + + switch@0 { + compatible = "nxp,sja1105q"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-rx-delay-us = <1>; + spi-tx-delay-us = <1>; + spi-cpha; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + ethernet1: port@0 { + reg = <0>; + label = "t10"; + phy-mode = "rgmii-id"; + phy-handle = <&t1_phy0>; + }; + + ethernet2: port@1 { + reg = <1>; + label = "t11"; + phy-mode = "rgmii-id"; + phy-handle = <&t1_phy1>; + }; + + ethernet3: port@2 { + reg = <2>; + label = "t12"; + phy-mode = "rgmii-id"; + phy-handle = <&t1_phy2>; + }; + + ethernet4: port@3 { + reg = <3>; + label = "t13"; + phy-mode = "rgmii-id"; + phy-handle = <&t1_phy3>; + }; + + port@4 { + reg = <4>; + label = "cpu"; + ethernet = <ðernet0>; + phy-mode = "rmii"; + + /* RGMII mode is not working properly, using RMII instead. */ + fixed-link { + speed = <100>; + full-duplex; + }; + }; + }; + }; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + /delete-property/dmas; + /delete-property/dma-names; + status = "okay"; +}; + +&usbh_ehci { + status = "okay"; +}; + +&usbotg_hs { + dr_mode = "host"; + pinctrl-0 = <&usbotg_hs_pins_a>; + pinctrl-names = "default"; + phys = <&usbphyc_port1 0>; + phy-names = "usb2-phy"; + vbus-supply = <&v5v>; + status = "okay"; +}; + +&usbphyc { + status = "okay"; +}; + +&usbphyc_port0 { + phy-supply = <&v3v3>; +}; + +&usbphyc_port1 { + phy-supply = <&v3v3>; +}; diff --git a/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts b/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts new file mode 100644 index 00000000000000..16b814c19350c7 --- /dev/null +++ b/arch/arm/boot/dts/st/stm32mp153c-mecio1r1.dts @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (C) Protonic Holland + * Author: David Jander + */ +/dts-v1/; + +#include "stm32mp153.dtsi" +#include "stm32mp15xc.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include "stm32mp15x-mecio1-io.dtsi" +#include +#include +#include + +/ { + model = "Protonic MECIO1r1"; + compatible = "prt,mecio1r1", "st,stm32mp153"; + + led { + compatible = "gpio-leds"; + + led-0 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 13 GPIO_ACTIVE_LOW>; + }; + + led-1 { + color = ; + function = LED_FUNCTION_DEBUG; + gpios = <&gpioa 14 GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + }; +}; + +&clk_hse { + clock-frequency = <24000000>; +}; + +&m_can1 { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&m_can1_pins_b>; + pinctrl-1 = <&m_can1_sleep_pins_b>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi b/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi new file mode 100644 index 00000000000000..915ba2526f4515 --- /dev/null +++ b/arch/arm/boot/dts/st/stm32mp15x-mecio1-io.dtsi @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright (C) Protonic Holland + * Author: David Jander + */ + +#include "stm32mp15xc.dtsi" +#include "stm32mp15-pinctrl.dtsi" +#include "stm32mp15xxaa-pinctrl.dtsi" +#include +#include + +/ { + chosen { + stdout-path = "serial0:1500000n8"; + }; + + aliases { + serial0 = &uart4; + ethernet0 = ðernet0; + spi1 = &spi1; + spi2 = &spi2; + spi3 = &spi3; + spi4 = &spi4; + spi5 = &spi5; + spi6 = &spi6; + }; + + memory@c0000000 { + device_type = "memory"; + reg = <0xC0000000 0x10000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mcuram2: mcuram2@10000000 { + compatible = "shared-dma-pool"; + reg = <0x10000000 0x40000>; + no-map; + }; + + vdev0vring0: vdev0vring0@10040000 { + compatible = "shared-dma-pool"; + reg = <0x10040000 0x1000>; + no-map; + }; + + vdev0vring1: vdev0vring1@10041000 { + compatible = "shared-dma-pool"; + reg = <0x10041000 0x1000>; + no-map; + }; + + vdev0buffer: vdev0buffer@10042000 { + compatible = "shared-dma-pool"; + reg = <0x10042000 0x4000>; + no-map; + }; + + mcuram: mcuram@30000000 { + compatible = "shared-dma-pool"; + reg = <0x30000000 0x40000>; + no-map; + }; + + retram: retram@38000000 { + compatible = "shared-dma-pool"; + reg = <0x38000000 0x10000>; + no-map; + }; + }; + + v3v3: regulator-v3v3 { + compatible = "regulator-fixed"; + regulator-name = "v3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + v5v: regulator-v5v { + compatible = "regulator-fixed"; + regulator-name = "v5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; +}; + +&adc { + /* ANA0, ANA1 are dedicated pins and don't need pinctrl: only in6. */ + pinctrl-0 = <&adc12_pins_mecsbc>; + pinctrl-names = "default"; + vdd-supply = <&v3v3>; + vdda-supply = <&v3v3>; + vref-supply = <&v3v3>; + status = "okay"; +}; + +&adc1 { + status = "okay"; + + channel@0 { + reg = <0>; + /* 16.5 ck_cycles sampling time */ + st,min-sample-time-ns = <5000>; + label = "p24v_stp"; + }; + + channel@1 { + reg = <1>; + st,min-sample-time-ns = <5000>; + label = "p24v_hpdcm"; + }; + + channel@2 { + reg = <2>; + st,min-sample-time-ns = <5000>; + label = "ain0"; + }; + + channel@3 { + reg = <3>; + st,min-sample-time-ns = <5000>; + label = "hpdcm1_i2"; + }; + + channel@5 { + reg = <5>; + st,min-sample-time-ns = <5000>; + label = "hpout1_i"; + }; + + channel@6 { + reg = <6>; + st,min-sample-time-ns = <5000>; + label = "ain1"; + }; + + channel@9 { + reg = <9>; + st,min-sample-time-ns = <5000>; + label = "hpout0_i"; + }; + + channel@10 { + reg = <10>; + st,min-sample-time-ns = <5000>; + label = "phint0_ain"; + }; + + channel@13 { + reg = <13>; + st,min-sample-time-ns = <5000>; + label = "phint1_ain"; + }; + + channel@15 { + reg = <15>; + st,min-sample-time-ns = <5000>; + label = "hpdcm0_i1"; + }; + + channel@16 { + reg = <16>; + st,min-sample-time-ns = <5000>; + label = "lsin"; + }; + + channel@18 { + reg = <18>; + st,min-sample-time-ns = <5000>; + label = "hpdcm0_i2"; + }; + + channel@19 { + reg = <19>; + st,min-sample-time-ns = <5000>; + label = "hpdcm1_i1"; + }; +}; + +&adc2 { + status = "okay"; + + channel@2 { + reg = <2>; + /* 16.5 ck_cycles sampling time */ + st,min-sample-time-ns = <5000>; + label = "ain2"; + }; + + channel@6 { + reg = <6>; + st,min-sample-time-ns = <5000>; + label = "ain3"; + }; +}; + +ðernet0 { + status = "okay"; + pinctrl-0 = <ðernet0_rgmii_pins_x>; + pinctrl-1 = <ðernet0_rgmii_sleep_pins_x>; + pinctrl-names = "default", "sleep"; + phy-mode = "rgmii-id"; + max-speed = <1000>; + phy-handle = <&phy0>; + st,eth-clk-sel; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@8 { + reg = <8>; + interrupts-extended = <&gpiog 7 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpiog 10 GPIO_ACTIVE_LOW>; + reset-assert-us = <10>; + reset-deassert-us = <35>; + }; + }; +}; + +&gpiod { + gpio-line-names = "", "", "", "", + "", "", "", "", + "", "", "", "", + "STP_RESETN", "STP_ENABLEN", "HPOUT0", "HPOUT0_ALERTN"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog_d_mecsbc>; +}; + +&gpioe { + gpio-line-names = "HPOUT0_RESETN", "HPOUT1", "HPOUT1_ALERTN", "", + "", "", "HPOUT1_RESETN", + "LPOUT0", "LPOUT0_ALERTN", "GPOUT0_RESETN", + "LPOUT1", "LPOUT1_ALERTN", "GPOUT1_RESETN", + "LPOUT2", "LPOUT2_ALERTN", "GPOUT2_RESETN"; +}; + +&gpiof { + gpio-line-names = "LPOUT3", "LPOUT3_ALERTN", "GPOUT3_RESETN", + "LPOUT4", "LPOUT4_ALERTN", "GPOUT4_RESETN", + "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpiog { + gpio-line-names = "LPOUT5", "LPOUT5_ALERTN", "", "LPOUT5_RESETN", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpioh { + gpio-line-names = "", "", "", "", + "", "", "", "", + "GPIO0_RESETN", "", "", "", + "", "", "", ""; +}; + +&gpioi { + gpio-line-names = "", "", "", "", + "", "", "", "", + "HPDCM0_SLEEPN", "HPDCM1_SLEEPN", "GPIO1_RESETN", "", + "", "", "", ""; +}; + +&gpioj { + gpio-line-names = "HSIN10", "HSIN11", "HSIN12", "HSIN13", + "HSIN14", "HSIN15", "", "", + "", "", "", "", + "", "RTD_RESETN", "", ""; +}; + +&gpiok { + gpio-line-names = "", "", "HSIN0", "HSIN1", + "HSIN2", "HSIN3", "HSIN4", "HSIN5"; +}; + +&gpioz { + gpio-line-names = "", "", "", "HSIN6", + "HSIN7", "HSIN8", "HSIN9", ""; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins_a>; + pinctrl-1 = <&i2c2_sleep_pins_a>; + status = "okay"; + + gpio0: gpio@20 { + compatible = "ti,tca6416"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "HSIN0_BIAS", "HSIN1_BIAS", "HSIN2_BIAS", "HSIN3_BIAS", + "", "", "HSIN_VREF0_LVL", "HSIN_VREF1_LVL", + "HSIN4_BIAS", "HSIN5_BIAS", "HSIN6_BIAS", "HSIN9_BIAS", + "", "", "", ""; + }; + + gpio1: gpio@21 { + compatible = "ti,tca6416"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "HSIN8_BIAS", "HSIN9_BIAS", "HSIN10_BIAS", "HSIN11_BIAS", + "", "", "HSIN_VREF2_LVL", "HSIN_VREF3_LVL", + "HSIN12_BIAS", "HSIN13_BIAS", "HSIN14_BIAS", "HSIN15_BIAS", + "", "", "LSIN8_BIAS", "LSIN9_BIAS"; + }; +}; + +&qspi { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qspi_clk_pins_a + &qspi_bk1_pins_a + &qspi_cs1_pins_a>; + pinctrl-1 = <&qspi_clk_sleep_pins_a + &qspi_bk1_sleep_pins_a + &qspi_cs1_sleep_pins_a>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-rx-bus-width = <4>; + spi-max-frequency = <104000000>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; + +&{qspi_bk1_pins_a/pins} { + pinmux = , /* QSPI_BK1_IO0 */ + , /* QSPI_BK1_IO1 */ + , /* QSPI_BK1_IO2 */ + ; /* QSPI_BK1_IO3 */ + /delete-property/ bias-disable; + bias-pull-up; +}; + +&timers1 { + /delete-property/dmas; + /delete-property/dma-names; + status = "okay"; + + hpdcm0_pwm: pwm { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pwm1_pins_mecio1>; + pinctrl-1 = <&pwm1_sleep_pins_mecio1>; + status = "okay"; + }; +}; + +&timers8 { + /delete-property/dmas; + /delete-property/dma-names; + status = "okay"; + + hpdcm1_pwm: pwm { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pwm8_pins_mecio1>; + pinctrl-1 = <&pwm8_sleep_pins_mecio1>; + status = "okay"; + }; +}; + +&uart4 { + pinctrl-names = "default", "sleep", "idle"; + pinctrl-0 = <&uart4_pins_a>; + pinctrl-1 = <&uart4_sleep_pins_a>; + pinctrl-2 = <&uart4_idle_pins_a>; + /delete-property/dmas; + /delete-property/dma-names; + status = "okay"; +}; + +&{uart4_pins_a/pins1} { + pinmux = ; /* UART4_TX */ +}; + +&{uart4_pins_a/pins2} { + pinmux = ; /* UART4_RX */ + /delete-property/ bias-disable; + bias-pull-up; +}; + +&usbotg_hs { + dr_mode = "host"; + pinctrl-0 = <&usbotg_hs_pins_a>; + pinctrl-names = "default"; + phys = <&usbphyc_port1 0>; + phy-names = "usb2-phy"; + vbus-supply = <&v5v>; + status = "okay"; +}; + +&usbphyc { + status = "okay"; +}; + +&usbphyc_port0 { + phy-supply = <&v3v3>; +}; + +&usbphyc_port1 { + phy-supply = <&v3v3>; +}; + +&pinctrl { + adc12_pins_mecsbc: adc12-ain-mecsbc-0 { + pins { + pinmux = , /* ADC1_INP2 */ + , /* ADC1_INP6 */ + , /* ADC2_INP2 */ + , /* ADC2_INP6 */ + , /* ADC1_INP16 */ + , /* ADC1_INP15 */ + , /* ADC1_INP18 */ + , /* ADC1_INP19 */ + , /* ADC1_INP3 */ + , /* ADC1_INP9 */ + , /* ADC1_INP5 */ + , /* ADC1_INP10 */ + ; /* ADC1_INP13 */ + }; + }; + + pinctrl_hog_d_mecsbc: hog-d-0 { + pins { + pinmux = ; /* STP_RESETn */ + bias-pull-up; + drive-push-pull; + slew-rate = <0>; + }; + }; + + pwm1_pins_mecio1: pwm1-mecio1-0 { + pins { + pinmux = , /* TIM1_CH1 */ + ; /* TIM1_CH2 */ + bias-pull-down; + drive-push-pull; + slew-rate = <0>; + }; + }; + + pwm1_sleep_pins_mecio1: pwm1-sleep-mecio1-0 { + pins { + pinmux = , /* TIM1_CH1 */ + ; /* TIM1_CH2 */ + }; + }; + + pwm8_pins_mecio1: pwm8-mecio1-0 { + pins { + pinmux = , /* TIM8_CH1 */ + ; /* TIM8_CH2 */ + bias-pull-down; + drive-push-pull; + slew-rate = <0>; + }; + }; + + pwm8_sleep_pins_mecio1: pwm8-sleep-mecio1-0 { + pins { + pinmux = , /* TIM8_CH1 */ + ; /* TIM8_CH2 */ + }; + }; + + ethernet0_rgmii_pins_x: rgmii-0 { + pins1 { + pinmux = , /* ETH_RGMII_CLK125 */ + , /* ETH_RGMII_GTX_CLK */ + , /* ETH_RGMII_TXD0 */ + , /* ETH_RGMII_TXD1 */ + , /* ETH_RGMII_TXD2 */ + , /* ETH_RGMII_TXD3 */ + , /* ETH_RGMII_TX_CTL */ + ; /* ETH_MDC */ + bias-disable; + drive-push-pull; + slew-rate = <3>; + }; + pins2 { + pinmux = ; /* ETH_MDIO */ + bias-disable; + drive-push-pull; + slew-rate = <0>; + }; + pins3 { + pinmux = , /* ETH_RGMII_RXD0 */ + , /* ETH_RGMII_RXD1 */ + , /* ETH_RGMII_RXD2 */ + , /* ETH_RGMII_RXD3 */ + , /* ETH_RGMII_RX_CLK */ + ; /* ETH_RGMII_RX_CTL */ + bias-disable; + }; + }; + + ethernet0_rgmii_sleep_pins_x: rgmii-sleep-0 { + pins1 { + pinmux = , /* ETH_RGMII_CLK125 */ + , /* ETH_RGMII_GTX_CLK */ + , /* ETH_RGMII_TXD0 */ + , /* ETH_RGMII_TXD1 */ + , /* ETH_RGMII_TXD2 */ + , /* ETH_RGMII_TXD3 */ + , /* ETH_RGMII_TX_CTL */ + , /* ETH_MDIO */ + , /* ETH_MDC */ + , /* ETH_RGMII_RXD0 */ + , /* ETH_RGMII_RXD1 */ + , /* ETH_RGMII_RXD2 */ + , /* ETH_RGMII_RXD3 */ + , /* ETH_RGMII_RX_CLK */ + ; /* ETH_RGMII_RX_CTL */ + }; + }; +}; diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi index 466d9701add0f6..171d7c7658fa86 100644 --- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi +++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi @@ -192,15 +192,11 @@ sgtl5000_port: port { sgtl5000_tx_endpoint: endpoint@0 { reg = <0>; remote-endpoint = <&sai2a_endpoint>; - frame-master = <&sgtl5000_tx_endpoint>; - bitclock-master = <&sgtl5000_tx_endpoint>; }; sgtl5000_rx_endpoint: endpoint@1 { reg = <1>; remote-endpoint = <&sai2b_endpoint>; - frame-master = <&sgtl5000_rx_endpoint>; - bitclock-master = <&sgtl5000_rx_endpoint>; }; }; @@ -245,10 +241,12 @@ sai2a: audio-controller@4400b004 { sai2a_port: port { sai2a_endpoint: endpoint { remote-endpoint = <&sgtl5000_tx_endpoint>; + bitclock-master; dai-format = "i2s"; - mclk-fs = <512>; dai-tdm-slot-num = <2>; dai-tdm-slot-width = <16>; + frame-master; + mclk-fs = <256>; }; }; }; @@ -263,10 +261,12 @@ sai2b: audio-controller@4400b024 { sai2b_port: port { sai2b_endpoint: endpoint { remote-endpoint = <&sgtl5000_rx_endpoint>; + bitclock-master; dai-format = "i2s"; - mclk-fs = <512>; dai-tdm-slot-num = <2>; dai-tdm-slot-width = <16>; + frame-master; + mclk-fs = <256>; }; }; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi index 2d0216840ff5b2..a0fb431aec8411 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi @@ -221,10 +221,14 @@ baseboard_eeprom: baseboard_eeprom@50 { reg = <0x50>; vcc-supply = <&ldo4_reg>; - #address-cells = <1>; - #size-cells = <1>; - baseboard_data: baseboard_data@0 { - reg = <0 0x100>; + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + baseboard_data: baseboard_data@0 { + reg = <0 0x100>; + }; }; }; }; @@ -239,40 +243,60 @@ &i2c2 { cape_eeprom0: cape_eeprom0@54 { compatible = "atmel,24c256"; reg = <0x54>; - #address-cells = <1>; - #size-cells = <1>; - cape0_data: cape_data@0 { - reg = <0 0x100>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + cape0_data: cape_data@0 { + reg = <0 0x100>; + }; }; }; cape_eeprom1: cape_eeprom1@55 { compatible = "atmel,24c256"; reg = <0x55>; - #address-cells = <1>; - #size-cells = <1>; - cape1_data: cape_data@0 { - reg = <0 0x100>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + cape1_data: cape_data@0 { + reg = <0 0x100>; + }; }; }; cape_eeprom2: cape_eeprom2@56 { compatible = "atmel,24c256"; reg = <0x56>; - #address-cells = <1>; - #size-cells = <1>; - cape2_data: cape_data@0 { - reg = <0 0x100>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + cape2_data: cape_data@0 { + reg = <0 0x100>; + }; }; }; cape_eeprom3: cape_eeprom3@57 { compatible = "atmel,24c256"; reg = <0x57>; - #address-cells = <1>; - #size-cells = <1>; - cape3_data: cape_data@0 { - reg = <0 0x100>; + + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + cape3_data: cape_data@0 { + reg = <0 0x100>; + }; }; }; }; @@ -385,7 +409,7 @@ ethphy0: ethernet-phy@0 { /* Support GPIO reset on revision C3 boards */ reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; reset-assert-us = <300>; - reset-deassert-us = <6500>; + reset-deassert-us = <13000>; }; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts b/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts index 801399702547d0..8878da773d6790 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts +++ b/arch/arm/boot/dts/ti/omap/am335x-boneblue.dts @@ -317,10 +317,14 @@ baseboard_eeprom: baseboard_eeprom@50 { compatible = "atmel,24c256"; reg = <0x50>; - #address-cells = <1>; - #size-cells = <1>; - baseboard_data: baseboard_data@0 { - reg = <0 0x100>; + nvmem-layout { + compatible = "fixed-layout"; + #address-cells = <1>; + #size-cells = <1>; + + baseboard_data: baseboard_data@0 { + reg = <0 0x100>; + }; }; }; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-nano.dts b/arch/arm/boot/dts/ti/omap/am335x-nano.dts index 26b5510cb3d166..56929059f5af09 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-nano.dts +++ b/arch/arm/boot/dts/ti/omap/am335x-nano.dts @@ -231,7 +231,7 @@ tps: tps@24 { }; temperature-sensor@48 { - compatible = "lm75"; + compatible = "national,lm75"; reg = <0x48>; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi b/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi index 625db3bcd3656d..287d209a0ea92b 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi +++ b/arch/arm/boot/dts/ti/omap/am335x-regor.dtsi @@ -5,6 +5,9 @@ * */ +#include +#include + / { model = "Phytec AM335x phyBOARD-REGOR"; compatible = "phytec,am335x-regor", "phytec,am335x-phycore-som", "ti,am33xx"; @@ -188,7 +191,7 @@ uart1_rs485_pins: pinmux-uart1-rs485-pins { pinctrl-single,pins = < AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0) - AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLUP, MUX_MODE0) + AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0) >; }; }; @@ -198,4 +201,9 @@ &uart1 { pinctrl-0 = <&uart1_rs485_pins>; status = "okay"; linux,rs485-enabled-at-boot-time; + /* + * un-intuitively, yet with the default (active-high), + * am335x RTS is high on idle and gets low on active ! + */ + rs485-rts-active-low; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi b/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi index cb27ff464dbeb4..d0c290d7d06262 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi +++ b/arch/arm/boot/dts/ti/omap/am335x-wega.dtsi @@ -14,7 +14,7 @@ sound: sound { simple-audio-card,format = "i2s"; simple-audio-card,bitclock-master = <&sound_iface_main>; simple-audio-card,frame-master = <&sound_iface_main>; - simple-audio-card,mclk-fs = <32>; + simple-audio-card,mclk-fs = <512>; simple-audio-card,widgets = "Line", "Line In", "Line", "Line Out", @@ -27,13 +27,12 @@ sound: sound { "LINE1L", "Line In", "LINE1R", "Line In"; - simple-audio-card,cpu { + sound_iface_main: simple-audio-card,cpu { sound-dai = <&mcasp0>; }; - sound_iface_main: simple-audio-card,codec { + simple-audio-card,codec { sound-dai = <&tlv320aic3007>; - clocks = <&mcasp0_fck>; }; }; diff --git a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi index dfb1fbafe3aa4e..33b02e05ce8273 100644 --- a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi +++ b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi @@ -97,9 +97,9 @@ &i2c0 { status = "okay"; clock-frequency = <400000>; - stlm75@49 { + temperature-sensor@49 { status = "okay"; - compatible = "lm75"; + compatible = "st,stlm75"; reg = <0x49>; }; diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 6eabe2313c9a87..2022a7fca0f981 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -16,6 +16,7 @@ CONFIG_ARCH_AT91=y CONFIG_SOC_AT91RM9200=y CONFIG_SOC_AT91SAM9=y CONFIG_SOC_SAM9X60=y +CONFIG_SOC_SAM9X7=y # CONFIG_ATMEL_CLOCKSOURCE_PIT is not set CONFIG_AEABI=y CONFIG_UACCESS_WITH_MEMCPY=y diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig index 0376a65e8bc1a3..e19c1039fb9384 100644 --- a/arch/arm/configs/hisi_defconfig +++ b/arch/arm/configs/hisi_defconfig @@ -43,6 +43,7 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_PL022=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 62734530a3d6ba..9a5f5c439b8791 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -413,6 +413,7 @@ CONFIG_I2C_AT91=m CONFIG_I2C_BCM2835=y CONFIG_I2C_CADENCE=y CONFIG_I2C_DAVINCI=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_DIGICOLOR=m CONFIG_I2C_EMEV2=m diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index f2ca5c9131b52f..e1cb170c2bf061 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -277,6 +277,7 @@ CONFIG_HW_RANDOM=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_MUX_PCA954x=m CONFIG_I2C_MUX_PINCTRL=m +CONFIG_I2C_DESIGNWARE_CORE=m CONFIG_I2C_DESIGNWARE_PLATFORM=m CONFIG_I2C_GPIO=y CONFIG_I2C_PXA_SLAVE=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 56925adfe84229..0e380e450a620e 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -24,6 +24,7 @@ CONFIG_CPUFREQ_DT=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index e82c3866b810a0..294906c8f16e8f 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -83,6 +83,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2 CONFIG_SERIAL_8250_DW=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_CADENCE_QUADSPI=y diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig index c8128a6180e788..a8f992fdb30d1c 100644 --- a/arch/arm/configs/spear13xx_defconfig +++ b/arch/arm/configs/spear13xx_defconfig @@ -62,6 +62,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set CONFIG_RAW_DRIVER=y CONFIG_I2C=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_PL022=y diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig index 97ea2e9a6f075a..8dc5a388759c85 100644 --- a/arch/arm/configs/spear3xx_defconfig +++ b/arch/arm/configs/spear3xx_defconfig @@ -42,6 +42,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set CONFIG_RAW_DRIVER=y CONFIG_I2C=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_PL022=y diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig index a7a3413ac96834..4e9e1a6ff3817d 100644 --- a/arch/arm/configs/spear6xx_defconfig +++ b/arch/arm/configs/spear6xx_defconfig @@ -33,6 +33,7 @@ CONFIG_STMMAC_ETH=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_I2C=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_PL022=y diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 847b7a0033569c..5ff49a5e9afc9d 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -166,10 +166,9 @@ config CRYPTO_AES_ARM config CRYPTO_AES_ARM_BS tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)" depends on KERNEL_MODE_NEON + select CRYPTO_AES_ARM select CRYPTO_SKCIPHER select CRYPTO_LIB_AES - select CRYPTO_AES - select CRYPTO_CBC select CRYPTO_SIMD help Length-preserving ciphers: AES cipher algorithms (FIPS-197) @@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode and for XTS mode encryption, CBC and XTS mode decryption speedup is around 25%. (CBC encryption speed is not affected by this driver.) - This implementation does not rely on any lookup tables so it is - believed to be invulnerable to cache timing attacks. + + The bit sliced AES code does not use lookup tables, so it is believed + to be invulnerable to cache timing attacks. However, since the bit + sliced AES code cannot process single blocks efficiently, in certain + cases table-based code with some countermeasures against cache timing + attacks will still be used as a fallback method; specifically CBC + encryption (not CBC decryption), the encryption of XTS tweaks, XTS + ciphertext stealing when the message isn't a multiple of 16 bytes, and + CTR when invoked in a context in which NEON instructions are unusable. config CRYPTO_AES_ARM_CE tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)" diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index b668c97663ec0c..21df5e7f51f9c9 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include @@ -711,7 +711,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c index 6dfaef2d8f913c..29efb783396031 100644 --- a/arch/arm/crypto/aes-cipher-glue.c +++ b/arch/arm/crypto/aes-cipher-glue.c @@ -9,9 +9,10 @@ #include #include #include +#include "aes-cipher.h" -asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); -asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out); +EXPORT_SYMBOL_GPL(__aes_arm_encrypt); +EXPORT_SYMBOL_GPL(__aes_arm_decrypt); static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h new file mode 100644 index 00000000000000..d5db2b87eb69b3 --- /dev/null +++ b/arch/arm/crypto/aes-cipher.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef ARM_CRYPTO_AES_CIPHER_H +#define ARM_CRYPTO_AES_CIPHER_H + +#include +#include + +asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); +asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds, + const u8 *in, u8 *out); + +#endif /* ARM_CRYPTO_AES_CIPHER_H */ diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 201eb35dde37e0..f6be80b5938b14 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -9,24 +9,22 @@ #include #include #include -#include #include #include #include #include #include +#include "aes-cipher.h" MODULE_AUTHOR("Ard Biesheuvel "); MODULE_DESCRIPTION("Bit sliced AES using NEON instructions"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ecb(aes)"); -MODULE_ALIAS_CRYPTO("cbc(aes)-all"); +MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); - asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], @@ -52,13 +50,13 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_skcipher *enc_tfm; + struct crypto_aes_ctx fallback; }; struct aesbs_xts_ctx { struct aesbs_ctx key; - struct crypto_cipher *cts_tfm; - struct crypto_cipher *tweak_tfm; + struct crypto_aes_ctx fallback; + struct crypto_aes_ctx tweak_key; }; struct aesbs_ctr_ctx { @@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_aes_ctx rk; int err; - err = aes_expandkey(&rk, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; ctx->key.rounds = 6 + key_len / 4; kernel_neon_begin(); - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); + aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); kernel_neon_end(); - memzero_explicit(&rk, sizeof(rk)); - return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); + return 0; } static int cbc_encrypt(struct skcipher_request *req) { - struct skcipher_request *subreq = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; - skcipher_request_set_tfm(subreq, ctx->enc_tfm); - skcipher_request_set_callback(subreq, - skcipher_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); + err = skcipher_walk_virt(&walk, req, false); - return crypto_skcipher_encrypt(subreq); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 *prev = walk.iv; + + do { + crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE); + __aes_arm_encrypt(ctx->fallback.key_enc, + ctx->key.rounds, dst, dst); + prev = dst; + src += AES_BLOCK_SIZE; + dst += AES_BLOCK_SIZE; + nbytes -= AES_BLOCK_SIZE; + } while (nbytes >= AES_BLOCK_SIZE); + memcpy(walk.iv, prev, AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, nbytes); + } + return err; } static int cbc_decrypt(struct skcipher_request *req) @@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int cbc_init(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned int reqsize; - - ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->enc_tfm)) - return PTR_ERR(ctx->enc_tfm); - - reqsize = sizeof(struct skcipher_request); - reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); - crypto_skcipher_set_reqsize(tfm, reqsize); - - return 0; -} - -static void cbc_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_skcipher(ctx->enc_tfm); -} - static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req) static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned long flags; - - /* - * Temporarily disable interrupts to avoid races where - * cachelines are evicted when the CPU is interrupted - * to do something else. - */ - local_irq_save(flags); - aes_encrypt(&ctx->fallback, dst, src); - local_irq_restore(flags); + + __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst); } static int ctr_encrypt_sync(struct skcipher_request *req) @@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return err; key_len /= 2; - err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len); + err = aes_expandkey(&ctx->fallback, in_key, key_len); if (err) return err; - err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); + err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len); if (err) return err; return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->cts_tfm)) - return PTR_ERR(ctx->cts_tfm); - - ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tweak_tfm)) - crypto_free_cipher(ctx->cts_tfm); - - return PTR_ERR_OR_ZERO(ctx->tweak_tfm); -} - -static void xts_exit(struct crypto_skcipher *tfm) -{ - struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->tweak_tfm); - crypto_free_cipher(ctx->cts_tfm); -} - static int __xts_crypt(struct skcipher_request *req, bool encrypt, void (*fn)(u8 out[], u8 const in[], u8 const rk[], int rounds, int blocks, u8 iv[], int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + const int rounds = ctx->key.rounds; int tail = req->cryptlen % AES_BLOCK_SIZE; struct skcipher_request subreq; u8 buf[2 * AES_BLOCK_SIZE]; @@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, if (err) return err; - crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); + __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv); while (walk.nbytes >= AES_BLOCK_SIZE) { unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, kernel_neon_begin(); fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, - ctx->key.rounds, blocks, walk.iv, reorder_last_tweak); + rounds, blocks, walk.iv, reorder_last_tweak); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes - blocks * AES_BLOCK_SIZE); @@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, crypto_xor(buf, req->iv, AES_BLOCK_SIZE); if (encrypt) - crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf); else - crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf); + __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf); crypto_xor(buf, req->iv, AES_BLOCK_SIZE); @@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, - .init = cbc_init, - .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", @@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, - .init = xts_init, - .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; @@ -540,7 +491,7 @@ static int __init aes_init(void) algname = aes_algs[i].base.cra_name + 2; drvname = aes_algs[i].base.cra_driver_name + 2; basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 4ff18044af070b..20b4dff13e3a66 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #define PMULL_MIN_LEN 64L /* minimum size of buffer * for crc32_pmull_le_16 */ diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 3ddf05b4234d8e..3af9970825340d 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c index 8482e302c45a44..4464ffbf8fd168 100644 --- a/arch/arm/crypto/poly1305-glue.c +++ b/arch/arm/crypto/poly1305-glue.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c index c62ce89dd3e0d8..aeac45bfbf9f1f 100644 --- a/arch/arm/crypto/sha2-ce-glue.c +++ b/arch/arm/crypto/sha2-ce-glue.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "sha256_glue.h" diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index a41b503b7dcde0..f63ba8986b2488 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -127,6 +127,12 @@ static inline u32 read_pmuver(void) return (dfr0 >> 24) & 0xf; } +static inline bool pmuv3_has_icntr(void) +{ + /* FEAT_PMUv3_ICNTR not accessible for 32-bit */ + return false; +} + static inline void write_pmcr(u32 val) { write_sysreg(val, PMCR); @@ -152,6 +158,13 @@ static inline u64 read_pmccntr(void) return read_sysreg(PMCCNTR); } +static inline void write_pmicntr(u64 val) {} + +static inline u64 read_pmicntr(void) +{ + return 0; +} + static inline void write_pmcntenset(u32 val) { write_sysreg(val, PMCNTENSET); @@ -177,6 +190,13 @@ static inline void write_pmccfiltr(u32 val) write_sysreg(val, PMCCFILTR); } +static inline void write_pmicfiltr(u64 val) {} + +static inline u64 read_pmicfiltr(void) +{ + return 0; +} + static inline void write_pmovsclr(u32 val) { write_sysreg(val, PMOVSR); diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index bd6fdb4b922d49..9d8863537aa5a8 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -11,7 +11,6 @@ #include struct cpuinfo_arm { - struct cpu cpu; u32 cpuid; #ifdef CONFIG_SMP unsigned int loops_per_jiffy; diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 82ec1ccf1fee4c..2ce4c5683e6d3c 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -24,7 +24,7 @@ struct dma_iommu_mapping { }; struct dma_iommu_mapping * -arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size); +arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size); void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h index bd61502b97153c..8a648e506540fe 100644 --- a/arch/arm/include/asm/hypervisor.h +++ b/arch/arm/include/asm/hypervisor.h @@ -7,4 +7,6 @@ void kvm_init_hyp_services(void); bool kvm_arm_hyp_service_available(u32 func_id); +static inline void kvm_arch_init_hyp_services(void) { }; + #endif diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index dfab3e982cbf65..944fc995552853 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -106,6 +106,11 @@ /* * TTBCR register bits. + * + * The ORGN0 and IRGN0 bits enables different forms of caching when + * walking the translation table. Clearing these bits (which is claimed + * to be the reset default) means "normal memory, [outer|inner] + * non-cacheable" */ #define TTBCR_EAE (1 << 31) #define TTBCR_IMP (1 << 30) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 6c9c16d767cfd5..f90be312418e87 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7b33b157fca0dc..e6a857bf0ce69e 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1201,20 +1201,10 @@ void __init setup_arch(char **cmdline_p) mdesc->init_early(); } - -static int __init topology_init(void) +bool arch_cpu_is_hotpluggable(int num) { - int cpu; - - for_each_possible_cpu(cpu) { - struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); - cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); - register_cpu(&cpuinfo->cpu, cpu); - } - - return 0; + return platform_can_hotplug_cpu(num); } -subsys_initcall(topology_init); #ifdef CONFIG_HAVE_PROC_CPU static int __init proc_cpu_init(void) diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index d00f4040a9f593..f5781ff54a5ca3 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -239,19 +239,19 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, struct flock64 flock; long err = -EBADF; - if (!f.file) + if (!fd_file(f)) goto out; switch (cmd) { case F_GETLK64: case F_OFD_GETLK: - err = security_file_fcntl(f.file, cmd, arg); + err = security_file_fcntl(fd_file(f), cmd, arg); if (err) break; err = get_oabi_flock(&flock, argp); if (err) break; - err = fcntl_getlk64(f.file, cmd, &flock); + err = fcntl_getlk64(fd_file(f), cmd, &flock); if (!err) err = put_oabi_flock(&flock, argp); break; @@ -259,13 +259,13 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: - err = security_file_fcntl(f.file, cmd, arg); + err = security_file_fcntl(fd_file(f), cmd, arg); if (err) break; err = get_oabi_flock(&flock, argp); if (err) break; - err = fcntl_setlk64(fd, f.file, cmd, &flock); + err = fcntl_setlk64(fd, fd_file(f), cmd, &flock); break; default: err = sys_fcntl64(fd, cmd, arg); diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c index 522510baed4902..cf57fca979089a 100644 --- a/arch/arm/lib/xor-neon.c +++ b/arch/arm/lib/xor-neon.c @@ -8,6 +8,7 @@ #include #include +MODULE_DESCRIPTION("NEON accelerated XOR implementation"); MODULE_LICENSE("GPL"); #ifndef __ARM_NEON__ diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index a8c022b4c0535a..344f5305f69afc 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -141,11 +141,27 @@ config SOC_SAM9X60 help Select this if you are using Microchip's SAM9X60 SoC +config SOC_SAM9X7 + bool "SAM9X7" + depends on ARCH_MULTI_V5 + select ATMEL_AIC5_IRQ + select ATMEL_PM if PM + select CPU_ARM926T + select HAVE_AT91_USB_CLK + select HAVE_AT91_GENERATED_CLK + select HAVE_AT91_SAM9X60_PLL + select MEMORY + select PINCTRL_AT91 + select SOC_SAM_V4_V5 + select SRAM if PM + help + Select this if you are using Microchip's SAM9X7 SoC + comment "Clocksource driver selection" config ATMEL_CLOCKSOURCE_PIT bool "Periodic Interval Timer (PIT) support" - depends on SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5 + depends on SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5 default SOC_AT91SAM9 || SOC_SAMA5 select ATMEL_PIT help @@ -155,7 +171,7 @@ config ATMEL_CLOCKSOURCE_PIT config ATMEL_CLOCKSOURCE_TCB bool "Timer Counter Blocks (TCB) support" - default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5 + default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA5 select ATMEL_TCB_CLKSRC help Select this to get a high precision clocksource based on a @@ -166,7 +182,7 @@ config ATMEL_CLOCKSOURCE_TCB config MICROCHIP_CLOCKSOURCE_PIT64B bool "64-bit Periodic Interval Timer (PIT64B) support" - default SOC_SAM9X60 || SOC_SAMA7 + default SOC_SAM9X60 || SOC_SAM9X7 || SOC_SAMA7 select MICROCHIP_PIT64B help Select this to get a high resolution clockevent (SAM9X60) or diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 794bd12ab0a8e2..7d8a7bc44e6549 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o obj-$(CONFIG_SOC_AT91SAM9) += at91sam9.o obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o +obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o obj-$(CONFIG_SOC_SAMA5) += sama5.o sam_secure.o obj-$(CONFIG_SOC_SAMA7) += sama7.o obj-$(CONFIG_SOC_SAMV7) += samv7.o diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index 0c3960a8b3ebb3..acf0b3c82a30d8 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -12,6 +12,7 @@ extern void __init at91rm9200_pm_init(void); extern void __init at91sam9_pm_init(void); extern void __init sam9x60_pm_init(void); +extern void __init sam9x7_pm_init(void); extern void __init sama5_pm_init(void); extern void __init sama5d2_pm_init(void); extern void __init sama7_pm_init(void); @@ -19,6 +20,7 @@ extern void __init sama7_pm_init(void); static inline void __init at91rm9200_pm_init(void) { } static inline void __init at91sam9_pm_init(void) { } static inline void __init sam9x60_pm_init(void) { } +static inline void __init sam9x7_pm_init(void) { } static inline void __init sama5_pm_init(void) { } static inline void __init sama5d2_pm_init(void) { } static inline void __init sama7_pm_init(void) { } diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 345b91dc6627b4..b9b995f8a36e14 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -233,6 +233,13 @@ static const struct of_device_id sama7g5_ws_ids[] = { { /* sentinel */ } }; +static const struct of_device_id sam9x7_ws_ids[] = { + { .compatible = "microchip,sam9x7-rtc", .data = &ws_info[1] }, + { .compatible = "microchip,sam9x7-rtt", .data = &ws_info[4] }, + { .compatible = "microchip,sam9x7-gem", .data = &ws_info[5] }, + { /* sentinel */ } +}; + static int at91_pm_config_ws(unsigned int pm_mode, bool set) { const struct wakeup_source_info *wsi; @@ -1361,6 +1368,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = { { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] }, { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] }, { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] }, + { .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] }, { .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] }, { /* sentinel */ }, }; @@ -1499,6 +1507,27 @@ void __init sam9x60_pm_init(void) soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws; } +void __init sam9x7_pm_init(void) +{ + static const int modes[] __initconst = { + AT91_PM_STANDBY, AT91_PM_ULP0, + }; + int ret; + + if (!IS_ENABLED(CONFIG_SOC_SAM9X7)) + return; + + at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); + ret = at91_dt_ramc(false); + if (ret) + return; + + at91_pm_init(NULL); + + soc_pm.ws_ids = sam9x7_ws_ids; + soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws; +} + void __init at91sam9_pm_init(void) { int ret; diff --git a/arch/arm/mach-at91/sam9x7.c b/arch/arm/mach-at91/sam9x7.c new file mode 100644 index 00000000000000..e1ff30b5b09b20 --- /dev/null +++ b/arch/arm/mach-at91/sam9x7.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Setup code for SAM9X7. + * + * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries + * + * Author: Varshini Rajendran + */ + +#include +#include + +#include + +#include "generic.h" + +static void __init sam9x7_init(void) +{ + of_platform_default_populate(NULL, NULL, NULL); + + sam9x7_pm_init(); +} + +static const char * const sam9x7_dt_board_compat[] __initconst = { + "microchip,sam9x7", + NULL +}; + +DT_MACHINE_START(sam9x7_dt, "Microchip SAM9X7") + /* Maintainer: Microchip */ + .init_machine = sam9x7_init, + .dt_compat = sam9x7_dt_board_compat, +MACHINE_END diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 7318d8789e2486..24bc6e18d80674 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -185,6 +185,7 @@ config ARCH_BRCMSTB select ARCH_HAS_RESET_CONTROLLER select ARM_AMBA select ARM_GIC + select ARM_GIC_V3 select ARM_ERRATA_798181 if SMP select HAVE_ARM_ARCH_TIMER select ZONE_DMA if ARM_LPAE diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile index 450883ea0e7364..31d22a5d8e1e11 100644 --- a/arch/arm/mach-davinci/Makefile +++ b/arch/arm/mach-davinci/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_ARCH_DAVINCI_DA850) += da850.o pdata-quirks.o obj-y += da8xx-dt.o # Power Management -obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_HAVE_CLK) += pm_domain.o ifeq ($(CONFIG_SUSPEND),y) obj-$(CONFIG_ARCH_DAVINCI_DA850) += pm.o sleep.o diff --git a/arch/arm/mach-davinci/common.h b/arch/arm/mach-davinci/common.h index 8aa6d4fc3f6f41..81a2b06b46e981 100644 --- a/arch/arm/mach-davinci/common.h +++ b/arch/arm/mach-davinci/common.h @@ -52,7 +52,6 @@ struct davinci_soc_info { extern struct davinci_soc_info davinci_soc_info; extern void davinci_common_init(const struct davinci_soc_info *soc_info); -extern void davinci_init_ide(void); void davinci_init_late(void); #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c deleted file mode 100644 index 78a1575c387d9f..00000000000000 --- a/arch/arm/mach-davinci/cpuidle.c +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * CPU idle for DaVinci SoCs - * - * Copyright (C) 2009 Texas Instruments Incorporated. https://www.ti.com/ - * - * Derived from Marvell Kirkwood CPU idle code - * (arch/arm/mach-kirkwood/cpuidle.c) - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "cpuidle.h" -#include "ddr2.h" - -#define DAVINCI_CPUIDLE_MAX_STATES 2 - -static void __iomem *ddr2_reg_base; -static bool ddr2_pdown; - -static void davinci_save_ddr_power(int enter, bool pdown) -{ - u32 val; - - val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); - - if (enter) { - if (pdown) - val |= DDR2_SRPD_BIT; - else - val &= ~DDR2_SRPD_BIT; - val |= DDR2_LPMODEN_BIT; - } else { - val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); - } - - __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); -} - -/* Actual code that puts the SoC in different idle states */ -static __cpuidle int davinci_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) -{ - davinci_save_ddr_power(1, ddr2_pdown); - cpu_do_idle(); - davinci_save_ddr_power(0, ddr2_pdown); - - return index; -} - -static struct cpuidle_driver davinci_idle_driver = { - .name = "cpuidle-davinci", - .owner = THIS_MODULE, - .states[0] = ARM_CPUIDLE_WFI_STATE, - .states[1] = { - .enter = davinci_enter_idle, - .exit_latency = 10, - .target_residency = 10000, - .name = "DDR SR", - .desc = "WFI and DDR Self Refresh", - }, - .state_count = DAVINCI_CPUIDLE_MAX_STATES, -}; - -static int __init davinci_cpuidle_probe(struct platform_device *pdev) -{ - struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; - - if (!pdata) { - dev_err(&pdev->dev, "cannot get platform data\n"); - return -ENOENT; - } - - ddr2_reg_base = pdata->ddr2_ctlr_base; - - ddr2_pdown = pdata->ddr2_pdown; - - return cpuidle_register(&davinci_idle_driver, NULL); -} - -static struct platform_driver davinci_cpuidle_driver = { - .driver = { - .name = "cpuidle-davinci", - }, -}; - -static int __init davinci_cpuidle_init(void) -{ - return platform_driver_probe(&davinci_cpuidle_driver, - davinci_cpuidle_probe); -} -device_initcall(davinci_cpuidle_init); - diff --git a/arch/arm/mach-davinci/cpuidle.h b/arch/arm/mach-davinci/cpuidle.h deleted file mode 100644 index 976d43073597e0..00000000000000 --- a/arch/arm/mach-davinci/cpuidle.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * TI DaVinci cpuidle platform support - * - * 2009 (C) Texas Instruments, Inc. https://www.ti.com/ - */ -#ifndef _MACH_DAVINCI_CPUIDLE_H -#define _MACH_DAVINCI_CPUIDLE_H - -struct davinci_cpuidle_config { - u32 ddr2_pdown; - void __iomem *ddr2_ctlr_base; -}; - -#endif diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 6939166c33c298..5e73a725d5daf5 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -21,7 +21,6 @@ #include "common.h" #include "cputype.h" #include "da8xx.h" -#include "cpuidle.h" #include "irqs.h" #include "sram.h" diff --git a/arch/arm/mach-davinci/mux.h b/arch/arm/mach-davinci/mux.h index 38f0e427291eda..05fd3902df658b 100644 --- a/arch/arm/mach-davinci/mux.h +++ b/arch/arm/mach-davinci/mux.h @@ -654,14 +654,9 @@ enum davinci_da850_index { #ifdef CONFIG_DAVINCI_MUX /* setup pin muxing */ extern int davinci_cfg_reg(unsigned long reg_cfg); -extern int davinci_cfg_reg_list(const short pins[]); #else /* boot loader does it all (no warnings from CONFIG_DAVINCI_MUX_WARNINGS) */ static inline int davinci_cfg_reg(unsigned long reg_cfg) { return 0; } -static inline int davinci_cfg_reg_list(const short pins[]) -{ - return 0; -} #endif diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 3aea90bbb41aa1..35e97851459102 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -82,7 +82,7 @@ static void __init dove_clk_init(void) { struct clk *usb0, *usb1, *sata, *pex0, *pex1, *sdio0, *sdio1; struct clk *nand, *camera, *i2s0, *i2s1, *crypto, *ac97, *pdma; - struct clk *xor0, *xor1, *ge, *gephy; + struct clk *xor0, *xor1, *ge; tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, dove_tclk); @@ -102,7 +102,7 @@ static void __init dove_clk_init(void) pdma = dove_register_gate("pdma", "tclk", CLOCK_GATING_BIT_PDMA); xor0 = dove_register_gate("xor0", "tclk", CLOCK_GATING_BIT_XOR0); xor1 = dove_register_gate("xor1", "tclk", CLOCK_GATING_BIT_XOR1); - gephy = dove_register_gate("gephy", "tclk", CLOCK_GATING_BIT_GIGA_PHY); + dove_register_gate("gephy", "tclk", CLOCK_GATING_BIT_GIGA_PHY); ge = dove_register_gate("ge", "gephy", CLOCK_GATING_BIT_GBE); orion_clkdev_add(NULL, "orion_spi.0", tclk); diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig index 703f3d232a60db..812b71dcf60e01 100644 --- a/arch/arm/mach-ep93xx/Kconfig +++ b/arch/arm/mach-ep93xx/Kconfig @@ -3,27 +3,27 @@ menuconfig ARCH_EP93XX bool "EP93xx-based" depends on ATAGS depends on ARCH_MULTI_V4T + # CONFIG_ARCH_MULTI_V7 is not set depends on CPU_LITTLE_ENDIAN + select ARCH_HAS_RESET_CONTROLLER select ARCH_SPARSEMEM_ENABLE select ARM_AMBA select ARM_VIC + select ARM_APPENDED_DTB # Old Redboot bootloaders deployed + select ARM_ATAG_DTB_COMPAT # we need this to update dt memory node + select COMMON_CLK_EP93XX + select EP93XX_TIMER select CLKSRC_MMIO select CPU_ARM920T select GPIOLIB + select PINCTRL + select PINCTRL_EP93XX help This enables support for the Cirrus EP93xx series of CPUs. if ARCH_EP93XX -menu "Cirrus EP93xx Implementation Options" - -config EP93XX_SOC_COMMON - bool - default y - select SOC_BUS - select LEDS_GPIO_REGISTER - -comment "EP93xx Platforms" +# menu "EP93xx Platforms" config MACH_BK3 bool "Support Liebherr BK3.1" @@ -103,6 +103,6 @@ config MACH_VISION_EP9307 Say 'Y' here if you want your kernel to support the Vision Engraving Systems EP9307 SoM. -endmenu +# endmenu endif diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile deleted file mode 100644 index 62e37403df14db..00000000000000 --- a/arch/arm/mach-ep93xx/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the linux kernel. -# -obj-y := core.o clock.o timer-ep93xx.o - -obj-$(CONFIG_EP93XX_DMA) += dma.o - -obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o -obj-$(CONFIG_MACH_TS72XX) += ts72xx.o -obj-$(CONFIG_MACH_VISION_EP9307)+= vision_ep9307.o diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c deleted file mode 100644 index 85a496ddc6197e..00000000000000 --- a/arch/arm/mach-ep93xx/clock.c +++ /dev/null @@ -1,733 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/clock.c - * Clock control for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hardware.h" - -#include - -#include "soc.h" - -static DEFINE_SPINLOCK(clk_lock); - -static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 }; -static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 }; -static char pclk_divisors[] = { 1, 2, 4, 8 }; - -static char adc_divisors[] = { 16, 4 }; -static char sclk_divisors[] = { 2, 4 }; -static char lrclk_divisors[] = { 32, 64, 128 }; - -static const char * const mux_parents[] = { - "xtali", - "pll1", - "pll2" -}; - -/* - * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS - */ -static unsigned long calc_pll_rate(unsigned long long rate, u32 config_word) -{ - int i; - - rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ - rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ - do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ - for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */ - rate >>= 1; - - return (unsigned long)rate; -} - -struct clk_psc { - struct clk_hw hw; - void __iomem *reg; - u8 bit_idx; - u32 mask; - u8 shift; - u8 width; - char *div; - u8 num_div; - spinlock_t *lock; -}; - -#define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw) - -static int ep93xx_clk_is_enabled(struct clk_hw *hw) -{ - struct clk_psc *psc = to_clk_psc(hw); - u32 val = readl(psc->reg); - - return (val & BIT(psc->bit_idx)) ? 1 : 0; -} - -static int ep93xx_clk_enable(struct clk_hw *hw) -{ - struct clk_psc *psc = to_clk_psc(hw); - unsigned long flags = 0; - u32 val; - - if (psc->lock) - spin_lock_irqsave(psc->lock, flags); - - val = __raw_readl(psc->reg); - val |= BIT(psc->bit_idx); - - ep93xx_syscon_swlocked_write(val, psc->reg); - - if (psc->lock) - spin_unlock_irqrestore(psc->lock, flags); - - return 0; -} - -static void ep93xx_clk_disable(struct clk_hw *hw) -{ - struct clk_psc *psc = to_clk_psc(hw); - unsigned long flags = 0; - u32 val; - - if (psc->lock) - spin_lock_irqsave(psc->lock, flags); - - val = __raw_readl(psc->reg); - val &= ~BIT(psc->bit_idx); - - ep93xx_syscon_swlocked_write(val, psc->reg); - - if (psc->lock) - spin_unlock_irqrestore(psc->lock, flags); -} - -static const struct clk_ops clk_ep93xx_gate_ops = { - .enable = ep93xx_clk_enable, - .disable = ep93xx_clk_disable, - .is_enabled = ep93xx_clk_is_enabled, -}; - -static struct clk_hw *ep93xx_clk_register_gate(const char *name, - const char *parent_name, - void __iomem *reg, - u8 bit_idx) -{ - struct clk_init_data init; - struct clk_psc *psc; - struct clk *clk; - - psc = kzalloc(sizeof(*psc), GFP_KERNEL); - if (!psc) - return ERR_PTR(-ENOMEM); - - init.name = name; - init.ops = &clk_ep93xx_gate_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = (parent_name ? &parent_name : NULL); - init.num_parents = (parent_name ? 1 : 0); - - psc->reg = reg; - psc->bit_idx = bit_idx; - psc->hw.init = &init; - psc->lock = &clk_lock; - - clk = clk_register(NULL, &psc->hw); - if (IS_ERR(clk)) { - kfree(psc); - return ERR_CAST(clk); - } - - return &psc->hw; -} - -static u8 ep93xx_mux_get_parent(struct clk_hw *hw) -{ - struct clk_psc *psc = to_clk_psc(hw); - u32 val = __raw_readl(psc->reg); - - if (!(val & EP93XX_SYSCON_CLKDIV_ESEL)) - return 0; - - if (!(val & EP93XX_SYSCON_CLKDIV_PSEL)) - return 1; - - return 2; -} - -static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index) -{ - struct clk_psc *psc = to_clk_psc(hw); - unsigned long flags = 0; - u32 val; - - if (index >= ARRAY_SIZE(mux_parents)) - return -EINVAL; - - if (psc->lock) - spin_lock_irqsave(psc->lock, flags); - - val = __raw_readl(psc->reg); - val &= ~(EP93XX_SYSCON_CLKDIV_ESEL | EP93XX_SYSCON_CLKDIV_PSEL); - - - if (index != 0) { - val |= EP93XX_SYSCON_CLKDIV_ESEL; - val |= (index - 1) ? EP93XX_SYSCON_CLKDIV_PSEL : 0; - } - - ep93xx_syscon_swlocked_write(val, psc->reg); - - if (psc->lock) - spin_unlock_irqrestore(psc->lock, flags); - - return 0; -} - -static bool is_best(unsigned long rate, unsigned long now, - unsigned long best) -{ - return abs(rate - now) < abs(rate - best); -} - -static int ep93xx_mux_determine_rate(struct clk_hw *hw, - struct clk_rate_request *req) -{ - unsigned long rate = req->rate; - struct clk *best_parent = NULL; - unsigned long __parent_rate; - unsigned long best_rate = 0, actual_rate, mclk_rate; - unsigned long best_parent_rate; - int __div = 0, __pdiv = 0; - int i; - - /* - * Try the two pll's and the external clock - * Because the valid predividers are 2, 2.5 and 3, we multiply - * all the clocks by 2 to avoid floating point math. - * - * This is based on the algorithm in the ep93xx raster guide: - * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf - * - */ - for (i = 0; i < ARRAY_SIZE(mux_parents); i++) { - struct clk *parent = clk_get_sys(mux_parents[i], NULL); - - __parent_rate = clk_get_rate(parent); - mclk_rate = __parent_rate * 2; - - /* Try each predivider value */ - for (__pdiv = 4; __pdiv <= 6; __pdiv++) { - __div = mclk_rate / (rate * __pdiv); - if (__div < 2 || __div > 127) - continue; - - actual_rate = mclk_rate / (__pdiv * __div); - if (is_best(rate, actual_rate, best_rate)) { - best_rate = actual_rate; - best_parent_rate = __parent_rate; - best_parent = parent; - } - } - } - - if (!best_parent) - return -EINVAL; - - req->best_parent_rate = best_parent_rate; - req->best_parent_hw = __clk_get_hw(best_parent); - req->rate = best_rate; - - return 0; -} - -static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_psc *psc = to_clk_psc(hw); - unsigned long rate = 0; - u32 val = __raw_readl(psc->reg); - int __pdiv = ((val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & 0x03); - int __div = val & 0x7f; - - if (__div > 0) - rate = (parent_rate * 2) / ((__pdiv + 3) * __div); - - return rate; -} - -static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_psc *psc = to_clk_psc(hw); - int pdiv = 0, div = 0; - unsigned long best_rate = 0, actual_rate, mclk_rate; - int __div = 0, __pdiv = 0; - u32 val; - - mclk_rate = parent_rate * 2; - - for (__pdiv = 4; __pdiv <= 6; __pdiv++) { - __div = mclk_rate / (rate * __pdiv); - if (__div < 2 || __div > 127) - continue; - - actual_rate = mclk_rate / (__pdiv * __div); - if (is_best(rate, actual_rate, best_rate)) { - pdiv = __pdiv - 3; - div = __div; - best_rate = actual_rate; - } - } - - if (!best_rate) - return -EINVAL; - - val = __raw_readl(psc->reg); - - /* Clear old dividers */ - val &= ~0x37f; - - /* Set the new pdiv and div bits for the new clock rate */ - val |= (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div; - ep93xx_syscon_swlocked_write(val, psc->reg); - - return 0; -} - -static const struct clk_ops clk_ddiv_ops = { - .enable = ep93xx_clk_enable, - .disable = ep93xx_clk_disable, - .is_enabled = ep93xx_clk_is_enabled, - .get_parent = ep93xx_mux_get_parent, - .set_parent = ep93xx_mux_set_parent_lock, - .determine_rate = ep93xx_mux_determine_rate, - .recalc_rate = ep93xx_ddiv_recalc_rate, - .set_rate = ep93xx_ddiv_set_rate, -}; - -static struct clk_hw *clk_hw_register_ddiv(const char *name, - void __iomem *reg, - u8 bit_idx) -{ - struct clk_init_data init; - struct clk_psc *psc; - struct clk *clk; - - psc = kzalloc(sizeof(*psc), GFP_KERNEL); - if (!psc) - return ERR_PTR(-ENOMEM); - - init.name = name; - init.ops = &clk_ddiv_ops; - init.flags = 0; - init.parent_names = mux_parents; - init.num_parents = ARRAY_SIZE(mux_parents); - - psc->reg = reg; - psc->bit_idx = bit_idx; - psc->lock = &clk_lock; - psc->hw.init = &init; - - clk = clk_register(NULL, &psc->hw); - if (IS_ERR(clk)) { - kfree(psc); - return ERR_CAST(clk); - } - return &psc->hw; -} - -static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_psc *psc = to_clk_psc(hw); - u32 val = __raw_readl(psc->reg); - u8 index = (val & psc->mask) >> psc->shift; - - if (index > psc->num_div) - return 0; - - return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); -} - -static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct clk_psc *psc = to_clk_psc(hw); - unsigned long best = 0, now, maxdiv; - int i; - - maxdiv = psc->div[psc->num_div - 1]; - - for (i = 0; i < psc->num_div; i++) { - if ((rate * psc->div[i]) == *parent_rate) - return DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]); - - now = DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]); - - if (is_best(rate, now, best)) - best = now; - } - - if (!best) - best = DIV_ROUND_UP_ULL(*parent_rate, maxdiv); - - return best; -} - -static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_psc *psc = to_clk_psc(hw); - u32 val = __raw_readl(psc->reg) & ~psc->mask; - int i; - - for (i = 0; i < psc->num_div; i++) - if (rate == parent_rate / psc->div[i]) { - val |= i << psc->shift; - break; - } - - if (i == psc->num_div) - return -EINVAL; - - ep93xx_syscon_swlocked_write(val, psc->reg); - - return 0; -} - -static const struct clk_ops ep93xx_div_ops = { - .enable = ep93xx_clk_enable, - .disable = ep93xx_clk_disable, - .is_enabled = ep93xx_clk_is_enabled, - .recalc_rate = ep93xx_div_recalc_rate, - .round_rate = ep93xx_div_round_rate, - .set_rate = ep93xx_div_set_rate, -}; - -static struct clk_hw *clk_hw_register_div(const char *name, - const char *parent_name, - void __iomem *reg, - u8 enable_bit, - u8 shift, - u8 width, - char *clk_divisors, - u8 num_div) -{ - struct clk_init_data init; - struct clk_psc *psc; - struct clk *clk; - - psc = kzalloc(sizeof(*psc), GFP_KERNEL); - if (!psc) - return ERR_PTR(-ENOMEM); - - init.name = name; - init.ops = &ep93xx_div_ops; - init.flags = 0; - init.parent_names = (parent_name ? &parent_name : NULL); - init.num_parents = 1; - - psc->reg = reg; - psc->bit_idx = enable_bit; - psc->mask = GENMASK(shift + width - 1, shift); - psc->shift = shift; - psc->div = clk_divisors; - psc->num_div = num_div; - psc->lock = &clk_lock; - psc->hw.init = &init; - - clk = clk_register(NULL, &psc->hw); - if (IS_ERR(clk)) { - kfree(psc); - return ERR_CAST(clk); - } - return &psc->hw; -} - -struct ep93xx_gate { - unsigned int bit; - const char *dev_id; - const char *con_id; -}; - -static struct ep93xx_gate ep93xx_uarts[] = { - {EP93XX_SYSCON_DEVCFG_U1EN, "apb:uart1", NULL}, - {EP93XX_SYSCON_DEVCFG_U2EN, "apb:uart2", NULL}, - {EP93XX_SYSCON_DEVCFG_U3EN, "apb:uart3", NULL}, -}; - -static void __init ep93xx_uart_clock_init(void) -{ - unsigned int i; - struct clk_hw *hw; - u32 value; - unsigned int clk_uart_div; - - value = __raw_readl(EP93XX_SYSCON_PWRCNT); - if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD) - clk_uart_div = 1; - else - clk_uart_div = 2; - - hw = clk_hw_register_fixed_factor(NULL, "uart", "xtali", 0, 1, clk_uart_div); - - /* parenting uart gate clocks to uart clock */ - for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) { - hw = ep93xx_clk_register_gate(ep93xx_uarts[i].dev_id, - "uart", - EP93XX_SYSCON_DEVCFG, - ep93xx_uarts[i].bit); - - clk_hw_register_clkdev(hw, NULL, ep93xx_uarts[i].dev_id); - } -} - -static struct ep93xx_gate ep93xx_dmas[] = { - {EP93XX_SYSCON_PWRCNT_DMA_M2P0, NULL, "m2p0"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P1, NULL, "m2p1"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P2, NULL, "m2p2"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P3, NULL, "m2p3"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P4, NULL, "m2p4"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P5, NULL, "m2p5"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P6, NULL, "m2p6"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P7, NULL, "m2p7"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P8, NULL, "m2p8"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2P9, NULL, "m2p9"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2M0, NULL, "m2m0"}, - {EP93XX_SYSCON_PWRCNT_DMA_M2M1, NULL, "m2m1"}, -}; - -static void __init ep93xx_dma_clock_init(void) -{ - unsigned int i; - struct clk_hw *hw; - int ret; - - for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) { - hw = clk_hw_register_gate(NULL, ep93xx_dmas[i].con_id, - "hclk", 0, - EP93XX_SYSCON_PWRCNT, - ep93xx_dmas[i].bit, - 0, - &clk_lock); - - ret = clk_hw_register_clkdev(hw, ep93xx_dmas[i].con_id, NULL); - if (ret) - pr_err("%s: failed to register lookup %s\n", - __func__, ep93xx_dmas[i].con_id); - } -} - -static int __init ep93xx_clock_init(void) -{ - u32 value; - struct clk_hw *hw; - unsigned long clk_pll1_rate; - unsigned long clk_f_rate; - unsigned long clk_h_rate; - unsigned long clk_p_rate; - unsigned long clk_pll2_rate; - unsigned int clk_f_div; - unsigned int clk_h_div; - unsigned int clk_p_div; - unsigned int clk_usb_div; - unsigned long clk_spi_div; - - hw = clk_hw_register_fixed_rate(NULL, "xtali", NULL, 0, EP93XX_EXT_CLK_RATE); - clk_hw_register_clkdev(hw, NULL, "xtali"); - - /* Determine the bootloader configured pll1 rate */ - value = __raw_readl(EP93XX_SYSCON_CLKSET1); - if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1)) - clk_pll1_rate = EP93XX_EXT_CLK_RATE; - else - clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); - - hw = clk_hw_register_fixed_rate(NULL, "pll1", "xtali", 0, clk_pll1_rate); - clk_hw_register_clkdev(hw, NULL, "pll1"); - - /* Initialize the pll1 derived clocks */ - clk_f_div = fclk_divisors[(value >> 25) & 0x7]; - clk_h_div = hclk_divisors[(value >> 20) & 0x7]; - clk_p_div = pclk_divisors[(value >> 18) & 0x3]; - - hw = clk_hw_register_fixed_factor(NULL, "fclk", "pll1", 0, 1, clk_f_div); - clk_f_rate = clk_get_rate(hw->clk); - hw = clk_hw_register_fixed_factor(NULL, "hclk", "pll1", 0, 1, clk_h_div); - clk_h_rate = clk_get_rate(hw->clk); - hw = clk_hw_register_fixed_factor(NULL, "pclk", "hclk", 0, 1, clk_p_div); - clk_p_rate = clk_get_rate(hw->clk); - - clk_hw_register_clkdev(hw, "apb_pclk", NULL); - - ep93xx_dma_clock_init(); - - /* Determine the bootloader configured pll2 rate */ - value = __raw_readl(EP93XX_SYSCON_CLKSET2); - if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2)) - clk_pll2_rate = EP93XX_EXT_CLK_RATE; - else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN) - clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); - else - clk_pll2_rate = 0; - - hw = clk_hw_register_fixed_rate(NULL, "pll2", "xtali", 0, clk_pll2_rate); - clk_hw_register_clkdev(hw, NULL, "pll2"); - - /* Initialize the pll2 derived clocks */ - /* - * These four bits set the divide ratio between the PLL2 - * output and the USB clock. - * 0000 - Divide by 1 - * 0001 - Divide by 2 - * 0010 - Divide by 3 - * 0011 - Divide by 4 - * 0100 - Divide by 5 - * 0101 - Divide by 6 - * 0110 - Divide by 7 - * 0111 - Divide by 8 - * 1000 - Divide by 9 - * 1001 - Divide by 10 - * 1010 - Divide by 11 - * 1011 - Divide by 12 - * 1100 - Divide by 13 - * 1101 - Divide by 14 - * 1110 - Divide by 15 - * 1111 - Divide by 1 - * On power-on-reset these bits are reset to 0000b. - */ - clk_usb_div = (((value >> 28) & 0xf) + 1); - hw = clk_hw_register_fixed_factor(NULL, "usb_clk", "pll2", 0, 1, clk_usb_div); - hw = clk_hw_register_gate(NULL, "ohci-platform", - "usb_clk", 0, - EP93XX_SYSCON_PWRCNT, - EP93XX_SYSCON_PWRCNT_USH_EN, - 0, - &clk_lock); - clk_hw_register_clkdev(hw, NULL, "ohci-platform"); - - /* - * EP93xx SSP clock rate was doubled in version E2. For more information - * see: - * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf - */ - clk_spi_div = 1; - if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2) - clk_spi_div = 2; - hw = clk_hw_register_fixed_factor(NULL, "ep93xx-spi.0", "xtali", 0, 1, clk_spi_div); - clk_hw_register_clkdev(hw, NULL, "ep93xx-spi.0"); - - /* pwm clock */ - hw = clk_hw_register_fixed_factor(NULL, "pwm_clk", "xtali", 0, 1, 1); - clk_hw_register_clkdev(hw, "pwm_clk", NULL); - - pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n", - clk_pll1_rate / 1000000, clk_pll2_rate / 1000000); - pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n", - clk_f_rate / 1000000, clk_h_rate / 1000000, - clk_p_rate / 1000000); - - ep93xx_uart_clock_init(); - - /* touchscreen/adc clock */ - hw = clk_hw_register_div("ep93xx-adc", - "xtali", - EP93XX_SYSCON_KEYTCHCLKDIV, - EP93XX_SYSCON_KEYTCHCLKDIV_TSEN, - EP93XX_SYSCON_KEYTCHCLKDIV_ADIV, - 1, - adc_divisors, - ARRAY_SIZE(adc_divisors)); - - clk_hw_register_clkdev(hw, NULL, "ep93xx-adc"); - - /* keypad clock */ - hw = clk_hw_register_div("ep93xx-keypad", - "xtali", - EP93XX_SYSCON_KEYTCHCLKDIV, - EP93XX_SYSCON_KEYTCHCLKDIV_KEN, - EP93XX_SYSCON_KEYTCHCLKDIV_KDIV, - 1, - adc_divisors, - ARRAY_SIZE(adc_divisors)); - - clk_hw_register_clkdev(hw, NULL, "ep93xx-keypad"); - - /* On reset PDIV and VDIV is set to zero, while PDIV zero - * means clock disable, VDIV shouldn't be zero. - * So i set both dividers to minimum. - */ - /* ENA - Enable CLK divider. */ - /* PDIV - 00 - Disable clock */ - /* VDIV - at least 2 */ - /* Check and enable video clk registers */ - value = __raw_readl(EP93XX_SYSCON_VIDCLKDIV); - value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; - ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_VIDCLKDIV); - - /* check and enable i2s clk registers */ - value = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); - value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; - ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_I2SCLKDIV); - - /* video clk */ - hw = clk_hw_register_ddiv("ep93xx-fb", - EP93XX_SYSCON_VIDCLKDIV, - EP93XX_SYSCON_CLKDIV_ENABLE); - - clk_hw_register_clkdev(hw, NULL, "ep93xx-fb"); - - /* i2s clk */ - hw = clk_hw_register_ddiv("mclk", - EP93XX_SYSCON_I2SCLKDIV, - EP93XX_SYSCON_CLKDIV_ENABLE); - - clk_hw_register_clkdev(hw, "mclk", "ep93xx-i2s"); - - /* i2s sclk */ -#define EP93XX_I2SCLKDIV_SDIV_SHIFT 16 -#define EP93XX_I2SCLKDIV_SDIV_WIDTH 1 - hw = clk_hw_register_div("sclk", - "mclk", - EP93XX_SYSCON_I2SCLKDIV, - EP93XX_SYSCON_I2SCLKDIV_SENA, - EP93XX_I2SCLKDIV_SDIV_SHIFT, - EP93XX_I2SCLKDIV_SDIV_WIDTH, - sclk_divisors, - ARRAY_SIZE(sclk_divisors)); - - clk_hw_register_clkdev(hw, "sclk", "ep93xx-i2s"); - - /* i2s lrclk */ -#define EP93XX_I2SCLKDIV_LRDIV32_SHIFT 17 -#define EP93XX_I2SCLKDIV_LRDIV32_WIDTH 3 - hw = clk_hw_register_div("lrclk", - "sclk", - EP93XX_SYSCON_I2SCLKDIV, - EP93XX_SYSCON_I2SCLKDIV_SENA, - EP93XX_I2SCLKDIV_LRDIV32_SHIFT, - EP93XX_I2SCLKDIV_LRDIV32_WIDTH, - lrclk_divisors, - ARRAY_SIZE(lrclk_divisors)); - - clk_hw_register_clkdev(hw, "lrclk", "ep93xx-i2s"); - - return 0; -} -postcore_initcall(ep93xx_clock_init); diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c deleted file mode 100644 index 8b1ec60a9a467a..00000000000000 --- a/arch/arm/mach-ep93xx/core.c +++ /dev/null @@ -1,1018 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/core.c - * Core routines for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek - * Copyright (C) 2007 Herbert Valerio Riedel - * - * Thanks go to Michael Burian and Ray Lehtiniemi for their key - * role in the ep93xx linux community. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hardware.h" -#include -#include -#include -#include - -#include "gpio-ep93xx.h" - -#include -#include - -#include "soc.h" -#include "irqs.h" - -/************************************************************************* - * Static I/O mappings that are needed for all EP93xx platforms - *************************************************************************/ -static struct map_desc ep93xx_io_desc[] __initdata = { - { - .virtual = EP93XX_AHB_VIRT_BASE, - .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE), - .length = EP93XX_AHB_SIZE, - .type = MT_DEVICE, - }, { - .virtual = EP93XX_APB_VIRT_BASE, - .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE), - .length = EP93XX_APB_SIZE, - .type = MT_DEVICE, - }, -}; - -void __init ep93xx_map_io(void) -{ - iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc)); -} - -/************************************************************************* - * EP93xx IRQ handling - *************************************************************************/ -void __init ep93xx_init_irq(void) -{ - vic_init(EP93XX_VIC1_BASE, IRQ_EP93XX_VIC0, EP93XX_VIC1_VALID_IRQ_MASK, 0); - vic_init(EP93XX_VIC2_BASE, IRQ_EP93XX_VIC1, EP93XX_VIC2_VALID_IRQ_MASK, 0); -} - - -/************************************************************************* - * EP93xx System Controller Software Locked register handling - *************************************************************************/ - -/* - * syscon_swlock prevents anything else from writing to the syscon - * block while a software locked register is being written. - */ -static DEFINE_SPINLOCK(syscon_swlock); - -void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg) -{ - unsigned long flags; - - spin_lock_irqsave(&syscon_swlock, flags); - - __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); - __raw_writel(val, reg); - - spin_unlock_irqrestore(&syscon_swlock, flags); -} - -void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits) -{ - unsigned long flags; - unsigned int val; - - spin_lock_irqsave(&syscon_swlock, flags); - - val = __raw_readl(EP93XX_SYSCON_DEVCFG); - val &= ~clear_bits; - val |= set_bits; - __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); - __raw_writel(val, EP93XX_SYSCON_DEVCFG); - - spin_unlock_irqrestore(&syscon_swlock, flags); -} - -/** - * ep93xx_chip_revision() - returns the EP93xx chip revision - * - * See "platform.h" for more information. - */ -unsigned int ep93xx_chip_revision(void) -{ - unsigned int v; - - v = __raw_readl(EP93XX_SYSCON_SYSCFG); - v &= EP93XX_SYSCON_SYSCFG_REV_MASK; - v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT; - return v; -} -EXPORT_SYMBOL_GPL(ep93xx_chip_revision); - -/************************************************************************* - * EP93xx GPIO - *************************************************************************/ -static struct resource ep93xx_gpio_resource[] = { - DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO_AB), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO0MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO1MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO2MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO3MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO4MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO5MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO6MUX), - DEFINE_RES_IRQ(IRQ_EP93XX_GPIO7MUX), -}; - -static struct platform_device ep93xx_gpio_device = { - .name = "gpio-ep93xx", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), - .resource = ep93xx_gpio_resource, -}; - -/************************************************************************* - * EP93xx peripheral handling - *************************************************************************/ -#define EP93XX_UART_MCR_OFFSET (0x0100) - -static void ep93xx_uart_set_mctrl(struct amba_device *dev, - void __iomem *base, unsigned int mctrl) -{ - unsigned int mcr; - - mcr = 0; - if (mctrl & TIOCM_RTS) - mcr |= 2; - if (mctrl & TIOCM_DTR) - mcr |= 1; - - __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); -} - -static struct amba_pl010_data ep93xx_uart_data = { - .set_mctrl = ep93xx_uart_set_mctrl, -}; - -static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE, - { IRQ_EP93XX_UART1 }, &ep93xx_uart_data); - -static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE, - { IRQ_EP93XX_UART2 }, NULL); - -static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE, - { IRQ_EP93XX_UART3 }, &ep93xx_uart_data); - -static struct resource ep93xx_rtc_resource[] = { - DEFINE_RES_MEM(EP93XX_RTC_PHYS_BASE, 0x10c), -}; - -static struct platform_device ep93xx_rtc_device = { - .name = "ep93xx-rtc", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_rtc_resource), - .resource = ep93xx_rtc_resource, -}; - -/************************************************************************* - * EP93xx OHCI USB Host - *************************************************************************/ - -static struct clk *ep93xx_ohci_host_clock; - -static int ep93xx_ohci_power_on(struct platform_device *pdev) -{ - if (!ep93xx_ohci_host_clock) { - ep93xx_ohci_host_clock = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(ep93xx_ohci_host_clock)) - return PTR_ERR(ep93xx_ohci_host_clock); - } - - return clk_prepare_enable(ep93xx_ohci_host_clock); -} - -static void ep93xx_ohci_power_off(struct platform_device *pdev) -{ - clk_disable(ep93xx_ohci_host_clock); -} - -static struct usb_ohci_pdata ep93xx_ohci_pdata = { - .power_on = ep93xx_ohci_power_on, - .power_off = ep93xx_ohci_power_off, - .power_suspend = ep93xx_ohci_power_off, -}; - -static struct resource ep93xx_ohci_resources[] = { - DEFINE_RES_MEM(EP93XX_USB_PHYS_BASE, 0x1000), - DEFINE_RES_IRQ(IRQ_EP93XX_USB), -}; - -static u64 ep93xx_ohci_dma_mask = DMA_BIT_MASK(32); - -static struct platform_device ep93xx_ohci_device = { - .name = "ohci-platform", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_ohci_resources), - .resource = ep93xx_ohci_resources, - .dev = { - .dma_mask = &ep93xx_ohci_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - .platform_data = &ep93xx_ohci_pdata, - }, -}; - -/************************************************************************* - * EP93xx physmap'ed flash - *************************************************************************/ -static struct physmap_flash_data ep93xx_flash_data; - -static struct resource ep93xx_flash_resource = { - .flags = IORESOURCE_MEM, -}; - -static struct platform_device ep93xx_flash = { - .name = "physmap-flash", - .id = 0, - .dev = { - .platform_data = &ep93xx_flash_data, - }, - .num_resources = 1, - .resource = &ep93xx_flash_resource, -}; - -/** - * ep93xx_register_flash() - Register the external flash device. - * @width: bank width in octets - * @start: resource start address - * @size: resource size - */ -void __init ep93xx_register_flash(unsigned int width, - resource_size_t start, resource_size_t size) -{ - ep93xx_flash_data.width = width; - - ep93xx_flash_resource.start = start; - ep93xx_flash_resource.end = start + size - 1; - - platform_device_register(&ep93xx_flash); -} - - -/************************************************************************* - * EP93xx ethernet peripheral handling - *************************************************************************/ -static struct ep93xx_eth_data ep93xx_eth_data; - -static struct resource ep93xx_eth_resource[] = { - DEFINE_RES_MEM(EP93XX_ETHERNET_PHYS_BASE, 0x10000), - DEFINE_RES_IRQ(IRQ_EP93XX_ETHERNET), -}; - -static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32); - -static struct platform_device ep93xx_eth_device = { - .name = "ep93xx-eth", - .id = -1, - .dev = { - .platform_data = &ep93xx_eth_data, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &ep93xx_eth_dma_mask, - }, - .num_resources = ARRAY_SIZE(ep93xx_eth_resource), - .resource = ep93xx_eth_resource, -}; - -/** - * ep93xx_register_eth - Register the built-in ethernet platform device. - * @data: platform specific ethernet configuration (__initdata) - * @copy_addr: flag indicating that the MAC address should be copied - * from the IndAd registers (as programmed by the bootloader) - */ -void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) -{ - if (copy_addr) - memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6); - - ep93xx_eth_data = *data; - platform_device_register(&ep93xx_eth_device); -} - - -/************************************************************************* - * EP93xx i2c peripheral handling - *************************************************************************/ - -/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ -static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { - .dev_id = "i2c-gpio.0", - .table = { - /* Use local offsets on gpiochip/port "G" */ - GPIO_LOOKUP_IDX("G", 1, NULL, 0, - GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - GPIO_LOOKUP_IDX("G", 0, NULL, 1, - GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - { } - }, -}; - -static struct platform_device ep93xx_i2c_device = { - .name = "i2c-gpio", - .id = 0, - .dev = { - .platform_data = NULL, - }, -}; - -/** - * ep93xx_register_i2c - Register the i2c platform device. - * @devices: platform specific i2c bus device information (__initdata) - * @num: the number of devices on the i2c bus - */ -void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num) -{ - /* - * FIXME: this just sets the two pins as non-opendrain, as no - * platforms tries to do that anyway. Flag the applicable lines - * as open drain in the GPIO_LOOKUP above and the driver or - * gpiolib will handle open drain/open drain emulation as need - * be. Right now i2c-gpio emulates open drain which is not - * optimal. - */ - __raw_writel((0 << 1) | (0 << 0), - EP93XX_GPIO_EEDRIVE); - - i2c_register_board_info(0, devices, num); - gpiod_add_lookup_table(&ep93xx_i2c_gpiod_table); - platform_device_register(&ep93xx_i2c_device); -} - -/************************************************************************* - * EP93xx SPI peripheral handling - *************************************************************************/ -static struct ep93xx_spi_info ep93xx_spi_master_data; - -static struct resource ep93xx_spi_resources[] = { - DEFINE_RES_MEM(EP93XX_SPI_PHYS_BASE, 0x18), - DEFINE_RES_IRQ(IRQ_EP93XX_SSP), -}; - -static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); - -static struct platform_device ep93xx_spi_device = { - .name = "ep93xx-spi", - .id = 0, - .dev = { - .platform_data = &ep93xx_spi_master_data, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &ep93xx_spi_dma_mask, - }, - .num_resources = ARRAY_SIZE(ep93xx_spi_resources), - .resource = ep93xx_spi_resources, -}; - -/** - * ep93xx_register_spi() - registers spi platform device - * @info: ep93xx board specific spi master info (__initdata) - * @devices: SPI devices to register (__initdata) - * @num: number of SPI devices to register - * - * This function registers platform device for the EP93xx SPI controller and - * also makes sure that SPI pins are muxed so that I2S is not using those pins. - */ -void __init ep93xx_register_spi(struct ep93xx_spi_info *info, - struct spi_board_info *devices, int num) -{ - /* - * When SPI is used, we need to make sure that I2S is muxed off from - * SPI pins. - */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP); - - ep93xx_spi_master_data = *info; - spi_register_board_info(devices, num); - platform_device_register(&ep93xx_spi_device); -} - -/************************************************************************* - * EP93xx LEDs - *************************************************************************/ -static const struct gpio_led ep93xx_led_pins[] __initconst = { - { - .name = "platform:grled", - }, { - .name = "platform:rdled", - }, -}; - -static const struct gpio_led_platform_data ep93xx_led_data __initconst = { - .num_leds = ARRAY_SIZE(ep93xx_led_pins), - .leds = ep93xx_led_pins, -}; - -static struct gpiod_lookup_table ep93xx_leds_gpio_table = { - .dev_id = "leds-gpio", - .table = { - /* Use local offsets on gpiochip/port "E" */ - GPIO_LOOKUP_IDX("E", 0, NULL, 0, GPIO_ACTIVE_HIGH), - GPIO_LOOKUP_IDX("E", 1, NULL, 1, GPIO_ACTIVE_HIGH), - { } - }, -}; - -/************************************************************************* - * EP93xx pwm peripheral handling - *************************************************************************/ -static struct resource ep93xx_pwm0_resource[] = { - DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE, 0x10), -}; - -static struct platform_device ep93xx_pwm0_device = { - .name = "ep93xx-pwm", - .id = 0, - .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource), - .resource = ep93xx_pwm0_resource, -}; - -static struct resource ep93xx_pwm1_resource[] = { - DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE + 0x20, 0x10), -}; - -static struct platform_device ep93xx_pwm1_device = { - .name = "ep93xx-pwm", - .id = 1, - .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource), - .resource = ep93xx_pwm1_resource, -}; - -void __init ep93xx_register_pwm(int pwm0, int pwm1) -{ - if (pwm0) - platform_device_register(&ep93xx_pwm0_device); - - /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */ - if (pwm1) - platform_device_register(&ep93xx_pwm1_device); -} - -int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) -{ - int err; - - if (pdev->id == 0) { - err = 0; - } else if (pdev->id == 1) { - err = gpio_request(EP93XX_GPIO_LINE_EGPIO14, - dev_name(&pdev->dev)); - if (err) - return err; - err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0); - if (err) - goto fail; - - /* PWM 1 output on EGPIO[14] */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG); - } else { - err = -ENODEV; - } - - return err; - -fail: - gpio_free(EP93XX_GPIO_LINE_EGPIO14); - return err; -} -EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio); - -void ep93xx_pwm_release_gpio(struct platform_device *pdev) -{ - if (pdev->id == 1) { - gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14); - gpio_free(EP93XX_GPIO_LINE_EGPIO14); - - /* EGPIO[14] used for GPIO */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG); - } -} -EXPORT_SYMBOL(ep93xx_pwm_release_gpio); - - -/************************************************************************* - * EP93xx video peripheral handling - *************************************************************************/ -static struct ep93xxfb_mach_info ep93xxfb_data; - -static struct resource ep93xx_fb_resource[] = { - DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE, 0x800), -}; - -static struct platform_device ep93xx_fb_device = { - .name = "ep93xx-fb", - .id = -1, - .dev = { - .platform_data = &ep93xxfb_data, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask, - }, - .num_resources = ARRAY_SIZE(ep93xx_fb_resource), - .resource = ep93xx_fb_resource, -}; - -/* The backlight use a single register in the framebuffer's register space */ -#define EP93XX_RASTER_REG_BRIGHTNESS 0x20 - -static struct resource ep93xx_bl_resources[] = { - DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE + - EP93XX_RASTER_REG_BRIGHTNESS, 0x04), -}; - -static struct platform_device ep93xx_bl_device = { - .name = "ep93xx-bl", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_bl_resources), - .resource = ep93xx_bl_resources, -}; - -/** - * ep93xx_register_fb - Register the framebuffer platform device. - * @data: platform specific framebuffer configuration (__initdata) - */ -void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data) -{ - ep93xxfb_data = *data; - platform_device_register(&ep93xx_fb_device); - platform_device_register(&ep93xx_bl_device); -} - - -/************************************************************************* - * EP93xx matrix keypad peripheral handling - *************************************************************************/ -static struct ep93xx_keypad_platform_data ep93xx_keypad_data; - -static struct resource ep93xx_keypad_resource[] = { - DEFINE_RES_MEM(EP93XX_KEY_MATRIX_PHYS_BASE, 0x0c), - DEFINE_RES_IRQ(IRQ_EP93XX_KEY), -}; - -static struct platform_device ep93xx_keypad_device = { - .name = "ep93xx-keypad", - .id = -1, - .dev = { - .platform_data = &ep93xx_keypad_data, - }, - .num_resources = ARRAY_SIZE(ep93xx_keypad_resource), - .resource = ep93xx_keypad_resource, -}; - -/** - * ep93xx_register_keypad - Register the keypad platform device. - * @data: platform specific keypad configuration (__initdata) - */ -void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data) -{ - ep93xx_keypad_data = *data; - platform_device_register(&ep93xx_keypad_device); -} - -int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) -{ - int err; - int i; - - for (i = 0; i < 8; i++) { - err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev)); - if (err) - goto fail_gpio_c; - err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev)); - if (err) - goto fail_gpio_d; - } - - /* Enable the keypad controller; GPIO ports C and D used for keypad */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS | - EP93XX_SYSCON_DEVCFG_GONK); - - return 0; - -fail_gpio_d: - gpio_free(EP93XX_GPIO_LINE_C(i)); -fail_gpio_c: - for (--i; i >= 0; --i) { - gpio_free(EP93XX_GPIO_LINE_C(i)); - gpio_free(EP93XX_GPIO_LINE_D(i)); - } - return err; -} -EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio); - -void ep93xx_keypad_release_gpio(struct platform_device *pdev) -{ - int i; - - for (i = 0; i < 8; i++) { - gpio_free(EP93XX_GPIO_LINE_C(i)); - gpio_free(EP93XX_GPIO_LINE_D(i)); - } - - /* Disable the keypad controller; GPIO ports C and D used for GPIO */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | - EP93XX_SYSCON_DEVCFG_GONK); -} -EXPORT_SYMBOL(ep93xx_keypad_release_gpio); - -/************************************************************************* - * EP93xx I2S audio peripheral handling - *************************************************************************/ -static struct resource ep93xx_i2s_resource[] = { - DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), - DEFINE_RES_IRQ(IRQ_EP93XX_SAI), -}; - -static struct platform_device ep93xx_i2s_device = { - .name = "ep93xx-i2s", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), - .resource = ep93xx_i2s_resource, -}; - -static struct platform_device ep93xx_pcm_device = { - .name = "ep93xx-pcm-audio", - .id = -1, -}; - -void __init ep93xx_register_i2s(void) -{ - platform_device_register(&ep93xx_i2s_device); - platform_device_register(&ep93xx_pcm_device); -} - -#define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \ - EP93XX_SYSCON_DEVCFG_I2SONAC97) - -#define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \ - EP93XX_SYSCON_I2SCLKDIV_SPOL) - -int ep93xx_i2s_acquire(void) -{ - unsigned val; - - ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97, - EP93XX_SYSCON_DEVCFG_I2S_MASK); - - /* - * This is potentially racy with the clock api for i2s_mclk, sclk and - * lrclk. Since the i2s driver is the only user of those clocks we - * rely on it to prevent parallel use of this function and the - * clock api for the i2s clocks. - */ - val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV); - val &= ~EP93XX_I2SCLKDIV_MASK; - val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL; - ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV); - - return 0; -} -EXPORT_SYMBOL(ep93xx_i2s_acquire); - -void ep93xx_i2s_release(void) -{ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK); -} -EXPORT_SYMBOL(ep93xx_i2s_release); - -/************************************************************************* - * EP93xx AC97 audio peripheral handling - *************************************************************************/ -static struct resource ep93xx_ac97_resources[] = { - DEFINE_RES_MEM(EP93XX_AAC_PHYS_BASE, 0xac), - DEFINE_RES_IRQ(IRQ_EP93XX_AACINTR), -}; - -static struct platform_device ep93xx_ac97_device = { - .name = "ep93xx-ac97", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_ac97_resources), - .resource = ep93xx_ac97_resources, -}; - -void __init ep93xx_register_ac97(void) -{ - /* - * Make sure that the AC97 pins are not used by I2S. - */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); - - platform_device_register(&ep93xx_ac97_device); - platform_device_register(&ep93xx_pcm_device); -} - -/************************************************************************* - * EP93xx Watchdog - *************************************************************************/ -static struct resource ep93xx_wdt_resources[] = { - DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08), -}; - -static struct platform_device ep93xx_wdt_device = { - .name = "ep93xx-wdt", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_wdt_resources), - .resource = ep93xx_wdt_resources, -}; - -/************************************************************************* - * EP93xx IDE - *************************************************************************/ -static struct resource ep93xx_ide_resources[] = { - DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38), - DEFINE_RES_IRQ(IRQ_EP93XX_EXT3), -}; - -static struct platform_device ep93xx_ide_device = { - .name = "ep93xx-ide", - .id = -1, - .dev = { - .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, - .num_resources = ARRAY_SIZE(ep93xx_ide_resources), - .resource = ep93xx_ide_resources, -}; - -void __init ep93xx_register_ide(void) -{ - platform_device_register(&ep93xx_ide_device); -} - -int ep93xx_ide_acquire_gpio(struct platform_device *pdev) -{ - int err; - int i; - - err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev)); - if (err) - return err; - err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev)); - if (err) - goto fail_egpio15; - for (i = 2; i < 8; i++) { - err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev)); - if (err) - goto fail_gpio_e; - } - for (i = 4; i < 8; i++) { - err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev)); - if (err) - goto fail_gpio_g; - } - for (i = 0; i < 8; i++) { - err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev)); - if (err) - goto fail_gpio_h; - } - - /* GPIO ports E[7:2], G[7:4] and H used by IDE */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE | - EP93XX_SYSCON_DEVCFG_GONIDE | - EP93XX_SYSCON_DEVCFG_HONIDE); - return 0; - -fail_gpio_h: - for (--i; i >= 0; --i) - gpio_free(EP93XX_GPIO_LINE_H(i)); - i = 8; -fail_gpio_g: - for (--i; i >= 4; --i) - gpio_free(EP93XX_GPIO_LINE_G(i)); - i = 8; -fail_gpio_e: - for (--i; i >= 2; --i) - gpio_free(EP93XX_GPIO_LINE_E(i)); - gpio_free(EP93XX_GPIO_LINE_EGPIO15); -fail_egpio15: - gpio_free(EP93XX_GPIO_LINE_EGPIO2); - return err; -} -EXPORT_SYMBOL(ep93xx_ide_acquire_gpio); - -void ep93xx_ide_release_gpio(struct platform_device *pdev) -{ - int i; - - for (i = 2; i < 8; i++) - gpio_free(EP93XX_GPIO_LINE_E(i)); - for (i = 4; i < 8; i++) - gpio_free(EP93XX_GPIO_LINE_G(i)); - for (i = 0; i < 8; i++) - gpio_free(EP93XX_GPIO_LINE_H(i)); - gpio_free(EP93XX_GPIO_LINE_EGPIO15); - gpio_free(EP93XX_GPIO_LINE_EGPIO2); - - - /* GPIO ports E[7:2], G[7:4] and H used by GPIO */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE | - EP93XX_SYSCON_DEVCFG_GONIDE | - EP93XX_SYSCON_DEVCFG_HONIDE); -} -EXPORT_SYMBOL(ep93xx_ide_release_gpio); - -/************************************************************************* - * EP93xx ADC - *************************************************************************/ -static struct resource ep93xx_adc_resources[] = { - DEFINE_RES_MEM(EP93XX_ADC_PHYS_BASE, 0x28), - DEFINE_RES_IRQ(IRQ_EP93XX_TOUCH), -}; - -static struct platform_device ep93xx_adc_device = { - .name = "ep93xx-adc", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_adc_resources), - .resource = ep93xx_adc_resources, -}; - -void __init ep93xx_register_adc(void) -{ - /* Power up ADC, deactivate Touch Screen Controller */ - ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_TIN, - EP93XX_SYSCON_DEVCFG_ADCPD); - - platform_device_register(&ep93xx_adc_device); -} - -/************************************************************************* - * EP93xx Security peripheral - *************************************************************************/ - -/* - * The Maverick Key is 256 bits of micro fuses blown at the factory during - * manufacturing to uniquely identify a part. - * - * See: http://arm.cirrus.com/forum/viewtopic.php?t=486&highlight=maverick+key - */ -#define EP93XX_SECURITY_REG(x) (EP93XX_SECURITY_BASE + (x)) -#define EP93XX_SECURITY_SECFLG EP93XX_SECURITY_REG(0x2400) -#define EP93XX_SECURITY_FUSEFLG EP93XX_SECURITY_REG(0x2410) -#define EP93XX_SECURITY_UNIQID EP93XX_SECURITY_REG(0x2440) -#define EP93XX_SECURITY_UNIQCHK EP93XX_SECURITY_REG(0x2450) -#define EP93XX_SECURITY_UNIQVAL EP93XX_SECURITY_REG(0x2460) -#define EP93XX_SECURITY_SECID1 EP93XX_SECURITY_REG(0x2500) -#define EP93XX_SECURITY_SECID2 EP93XX_SECURITY_REG(0x2504) -#define EP93XX_SECURITY_SECCHK1 EP93XX_SECURITY_REG(0x2520) -#define EP93XX_SECURITY_SECCHK2 EP93XX_SECURITY_REG(0x2524) -#define EP93XX_SECURITY_UNIQID2 EP93XX_SECURITY_REG(0x2700) -#define EP93XX_SECURITY_UNIQID3 EP93XX_SECURITY_REG(0x2704) -#define EP93XX_SECURITY_UNIQID4 EP93XX_SECURITY_REG(0x2708) -#define EP93XX_SECURITY_UNIQID5 EP93XX_SECURITY_REG(0x270c) - -static char ep93xx_soc_id[33]; - -static const char __init *ep93xx_get_soc_id(void) -{ - unsigned int id, id2, id3, id4, id5; - - if (__raw_readl(EP93XX_SECURITY_UNIQVAL) != 1) - return "bad Hamming code"; - - id = __raw_readl(EP93XX_SECURITY_UNIQID); - id2 = __raw_readl(EP93XX_SECURITY_UNIQID2); - id3 = __raw_readl(EP93XX_SECURITY_UNIQID3); - id4 = __raw_readl(EP93XX_SECURITY_UNIQID4); - id5 = __raw_readl(EP93XX_SECURITY_UNIQID5); - - if (id != id2) - return "invalid"; - - /* Toss the unique ID into the entropy pool */ - add_device_randomness(&id2, 4); - add_device_randomness(&id3, 4); - add_device_randomness(&id4, 4); - add_device_randomness(&id5, 4); - - snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id), - "%08x%08x%08x%08x", id2, id3, id4, id5); - - return ep93xx_soc_id; -} - -static const char __init *ep93xx_get_soc_rev(void) -{ - int rev = ep93xx_chip_revision(); - - switch (rev) { - case EP93XX_CHIP_REV_D0: - return "D0"; - case EP93XX_CHIP_REV_D1: - return "D1"; - case EP93XX_CHIP_REV_E0: - return "E0"; - case EP93XX_CHIP_REV_E1: - return "E1"; - case EP93XX_CHIP_REV_E2: - return "E2"; - default: - return "unknown"; - } -} - -static const char __init *ep93xx_get_machine_name(void) -{ - return kasprintf(GFP_KERNEL,"%s", machine_desc->name); -} - -static struct device __init *ep93xx_init_soc(void) -{ - struct soc_device_attribute *soc_dev_attr; - struct soc_device *soc_dev; - - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); - if (!soc_dev_attr) - return NULL; - - soc_dev_attr->machine = ep93xx_get_machine_name(); - soc_dev_attr->family = "Cirrus Logic EP93xx"; - soc_dev_attr->revision = ep93xx_get_soc_rev(); - soc_dev_attr->soc_id = ep93xx_get_soc_id(); - - soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr->machine); - kfree(soc_dev_attr); - return NULL; - } - - return soc_device_to_device(soc_dev); -} - -struct device __init *ep93xx_init_devices(void) -{ - struct device *parent; - - /* Disallow access to MaverickCrunch initially */ - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); - - /* Default all ports to GPIO */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | - EP93XX_SYSCON_DEVCFG_GONK | - EP93XX_SYSCON_DEVCFG_EONIDE | - EP93XX_SYSCON_DEVCFG_GONIDE | - EP93XX_SYSCON_DEVCFG_HONIDE); - - parent = ep93xx_init_soc(); - - /* Get the GPIO working early, other devices need it */ - platform_device_register(&ep93xx_gpio_device); - - amba_device_register(&uart1_device, &iomem_resource); - amba_device_register(&uart2_device, &iomem_resource); - amba_device_register(&uart3_device, &iomem_resource); - - platform_device_register(&ep93xx_rtc_device); - platform_device_register(&ep93xx_ohci_device); - platform_device_register(&ep93xx_wdt_device); - - gpiod_add_lookup_table(&ep93xx_leds_gpio_table); - gpio_led_register_device(-1, &ep93xx_led_data); - - return parent; -} - -void ep93xx_restart(enum reboot_mode mode, const char *cmd) -{ - /* - * Set then clear the SWRST bit to initiate a software reset - */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST); - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST); - - while (1) - ; -} diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c deleted file mode 100644 index 74515acab8efdb..00000000000000 --- a/arch/arm/mach-ep93xx/dma.c +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/dma.c - * - * Platform support code for the EP93xx dmaengine driver. - * - * Copyright (C) 2011 Mika Westerberg - * - * This work is based on the original dma-m2p implementation with - * following copyrights: - * - * Copyright (C) 2006 Lennert Buytenhek - * Copyright (C) 2006 Applied Data Systems - * Copyright (C) 2009 Ryan Mallon - */ - -#include -#include -#include -#include -#include -#include - -#include -#include "hardware.h" - -#include "soc.h" - -#define DMA_CHANNEL(_name, _base, _irq) \ - { .name = (_name), .base = (_base), .irq = (_irq) } - -/* - * DMA M2P channels. - * - * On the EP93xx chip the following peripherals my be allocated to the 10 - * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). - * - * I2S contains 3 Tx and 3 Rx DMA Channels - * AAC contains 3 Tx and 3 Rx DMA Channels - * UART1 contains 1 Tx and 1 Rx DMA Channels - * UART2 contains 1 Tx and 1 Rx DMA Channels - * UART3 contains 1 Tx and 1 Rx DMA Channels - * IrDA contains 1 Tx and 1 Rx DMA Channels - * - * Registers are mapped statically in ep93xx_map_io(). - */ -static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { - DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), - DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), - DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), - DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), - DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), - DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), - DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), - DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), - DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), - DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), -}; - -static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { - .channels = ep93xx_dma_m2p_channels, - .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), -}; - -static u64 ep93xx_dma_m2p_mask = DMA_BIT_MASK(32); - -static struct platform_device ep93xx_dma_m2p_device = { - .name = "ep93xx-dma-m2p", - .id = -1, - .dev = { - .platform_data = &ep93xx_dma_m2p_data, - .dma_mask = &ep93xx_dma_m2p_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -/* - * DMA M2M channels. - * - * There are 2 M2M channels which support memcpy/memset and in addition simple - * hardware requests from/to SSP and IDE. We do not implement an external - * hardware requests. - * - * Registers are mapped statically in ep93xx_map_io(). - */ -static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { - DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), - DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), -}; - -static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { - .channels = ep93xx_dma_m2m_channels, - .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), -}; - -static u64 ep93xx_dma_m2m_mask = DMA_BIT_MASK(32); - -static struct platform_device ep93xx_dma_m2m_device = { - .name = "ep93xx-dma-m2m", - .id = -1, - .dev = { - .platform_data = &ep93xx_dma_m2m_data, - .dma_mask = &ep93xx_dma_m2m_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -static int __init ep93xx_dma_init(void) -{ - platform_device_register(&ep93xx_dma_m2p_device); - platform_device_register(&ep93xx_dma_m2m_device); - return 0; -} -arch_initcall(ep93xx_dma_init); diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c deleted file mode 100644 index dbdb822a0100df..00000000000000 --- a/arch/arm/mach-ep93xx/edb93xx.c +++ /dev/null @@ -1,368 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/edb93xx.c - * Cirrus Logic EDB93xx Development Board support. - * - * EDB93XX, EDB9301, EDB9307A - * Copyright (C) 2008-2009 H Hartley Sweeten - * - * EDB9302 - * Copyright (C) 2006 George Kashperko - * - * EDB9302A, EDB9315, EDB9315A - * Copyright (C) 2006 Lennert Buytenhek - * - * EDB9307 - * Copyright (C) 2007 Herbert Valerio Riedel - * - * EDB9312 - * Copyright (C) 2006 Infosys Technologies Limited - * Toufeeq Hussain - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "hardware.h" -#include -#include -#include "gpio-ep93xx.h" - -#include -#include - -#include "soc.h" - -static void __init edb93xx_register_flash(void) -{ - if (machine_is_edb9307() || machine_is_edb9312() || - machine_is_edb9315()) { - ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); - } else { - ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); - } -} - -static struct ep93xx_eth_data __initdata edb93xx_eth_data = { - .phy_id = 1, -}; - - -/************************************************************************* - * EDB93xx i2c peripheral handling - *************************************************************************/ - -static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { - { - I2C_BOARD_INFO("isl1208", 0x6f), - }, -}; - -static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { - { - I2C_BOARD_INFO("ds1337", 0x68), - }, -}; - -static void __init edb93xx_register_i2c(void) -{ - if (machine_is_edb9302a() || machine_is_edb9307a() || - machine_is_edb9315a()) { - ep93xx_register_i2c(edb93xxa_i2c_board_info, - ARRAY_SIZE(edb93xxa_i2c_board_info)); - } else if (machine_is_edb9302() || machine_is_edb9307() - || machine_is_edb9312() || machine_is_edb9315()) { - ep93xx_register_i2c(edb93xx_i2c_board_info, - ARRAY_SIZE(edb93xx_i2c_board_info)); - } -} - - -/************************************************************************* - * EDB93xx SPI peripheral handling - *************************************************************************/ -static struct cs4271_platform_data edb93xx_cs4271_data = { - /* Intentionally left blank */ -}; - -static struct spi_board_info edb93xx_spi_board_info[] __initdata = { - { - .modalias = "cs4271", - .platform_data = &edb93xx_cs4271_data, - .max_speed_hz = 6000000, - .bus_num = 0, - .chip_select = 0, - .mode = SPI_MODE_3, - }, -}; - -static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = { - .dev_id = "spi0", - .table = { - GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct ep93xx_spi_info edb93xx_spi_info __initdata = { - /* Intentionally left blank */ -}; - -static struct gpiod_lookup_table edb93xx_cs4272_edb9301_gpio_table = { - .dev_id = "spi0.0", /* CS0 on SPI0 */ - .table = { - GPIO_LOOKUP("A", 1, "reset", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct gpiod_lookup_table edb93xx_cs4272_edb9302_gpio_table = { - .dev_id = "spi0.0", /* CS0 on SPI0 */ - .table = { - GPIO_LOOKUP("H", 2, "reset", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct gpiod_lookup_table edb93xx_cs4272_edb9315_gpio_table = { - .dev_id = "spi0.0", /* CS0 on SPI0 */ - .table = { - GPIO_LOOKUP("B", 6, "reset", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static void __init edb93xx_register_spi(void) -{ - if (machine_is_edb9301() || machine_is_edb9302()) - gpiod_add_lookup_table(&edb93xx_cs4272_edb9301_gpio_table); - else if (machine_is_edb9302a() || machine_is_edb9307a()) - gpiod_add_lookup_table(&edb93xx_cs4272_edb9302_gpio_table); - else if (machine_is_edb9315a()) - gpiod_add_lookup_table(&edb93xx_cs4272_edb9315_gpio_table); - - gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table); - ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info, - ARRAY_SIZE(edb93xx_spi_board_info)); -} - - -/************************************************************************* - * EDB93xx I2S - *************************************************************************/ -static struct platform_device edb93xx_audio_device = { - .name = "edb93xx-audio", - .id = -1, -}; - -static int __init edb93xx_has_audio(void) -{ - return (machine_is_edb9301() || machine_is_edb9302() || - machine_is_edb9302a() || machine_is_edb9307a() || - machine_is_edb9315a()); -} - -static void __init edb93xx_register_i2s(void) -{ - if (edb93xx_has_audio()) { - ep93xx_register_i2s(); - platform_device_register(&edb93xx_audio_device); - } -} - - -/************************************************************************* - * EDB93xx pwm - *************************************************************************/ -static void __init edb93xx_register_pwm(void) -{ - if (machine_is_edb9301() || - machine_is_edb9302() || machine_is_edb9302a()) { - /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */ - ep93xx_register_pwm(0, 1); - } else if (machine_is_edb9307() || machine_is_edb9307a()) { - /* EP9307 only has pwm.0 (PWMOUT) */ - ep93xx_register_pwm(1, 0); - } else { - /* EP9312 and EP9315 have both */ - ep93xx_register_pwm(1, 1); - } -} - - -/************************************************************************* - * EDB93xx framebuffer - *************************************************************************/ -static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { - .flags = 0, -}; - -static int __init edb93xx_has_fb(void) -{ - /* These platforms have an ep93xx with video capability */ - return machine_is_edb9307() || machine_is_edb9307a() || - machine_is_edb9312() || machine_is_edb9315() || - machine_is_edb9315a(); -} - -static void __init edb93xx_register_fb(void) -{ - if (!edb93xx_has_fb()) - return; - - if (machine_is_edb9307a() || machine_is_edb9315a()) - edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0; - else - edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3; - - ep93xx_register_fb(&edb93xxfb_info); -} - - -/************************************************************************* - * EDB93xx IDE - *************************************************************************/ -static int __init edb93xx_has_ide(void) -{ - /* - * Although EDB9312 and EDB9315 do have IDE capability, they have - * INTRQ line wired as pull-up, which makes using IDE interface - * problematic. - */ - return machine_is_edb9312() || machine_is_edb9315() || - machine_is_edb9315a(); -} - -static void __init edb93xx_register_ide(void) -{ - if (!edb93xx_has_ide()) - return; - - ep93xx_register_ide(); -} - - -static void __init edb93xx_init_machine(void) -{ - ep93xx_init_devices(); - edb93xx_register_flash(); - ep93xx_register_eth(&edb93xx_eth_data, 1); - edb93xx_register_i2c(); - edb93xx_register_spi(); - edb93xx_register_i2s(); - edb93xx_register_pwm(); - edb93xx_register_fb(); - edb93xx_register_ide(); - ep93xx_register_adc(); -} - - -#ifdef CONFIG_MACH_EDB9301 -MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") - /* Maintainer: H Hartley Sweeten */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9302 -MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") - /* Maintainer: George Kashperko */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9302A -MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9307 -MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") - /* Maintainer: Herbert Valerio Riedel */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9307A -MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") - /* Maintainer: H Hartley Sweeten */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9312 -MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") - /* Maintainer: Toufeeq Hussain */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9315 -MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_EDB9315A -MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ep93xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = edb93xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END -#endif diff --git a/arch/arm/mach-ep93xx/ep93xx-regs.h b/arch/arm/mach-ep93xx/ep93xx-regs.h deleted file mode 100644 index 8fa3646de0a4d7..00000000000000 --- a/arch/arm/mach-ep93xx/ep93xx-regs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_EP93XX_REGS_H -#define __ASM_ARCH_EP93XX_REGS_H - -/* - * EP93xx linux memory map: - * - * virt phys size - * fe800000 5M per-platform mappings - * fed00000 80800000 2M APB - * fef00000 80000000 1M AHB - */ - -#define EP93XX_AHB_PHYS_BASE 0x80000000 -#define EP93XX_AHB_VIRT_BASE 0xfef00000 -#define EP93XX_AHB_SIZE 0x00100000 - -#define EP93XX_AHB_PHYS(x) (EP93XX_AHB_PHYS_BASE + (x)) -#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x)) - -#define EP93XX_APB_PHYS_BASE 0x80800000 -#define EP93XX_APB_VIRT_BASE 0xfed00000 -#define EP93XX_APB_SIZE 0x00200000 - -#define EP93XX_APB_PHYS(x) (EP93XX_APB_PHYS_BASE + (x)) -#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x)) - -/* APB UARTs */ -#define EP93XX_UART1_PHYS_BASE EP93XX_APB_PHYS(0x000c0000) -#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000) - -#define EP93XX_UART2_PHYS_BASE EP93XX_APB_PHYS(0x000d0000) -#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000) - -#define EP93XX_UART3_PHYS_BASE EP93XX_APB_PHYS(0x000e0000) -#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000) - -#endif diff --git a/arch/arm/mach-ep93xx/gpio-ep93xx.h b/arch/arm/mach-ep93xx/gpio-ep93xx.h deleted file mode 100644 index 7b46eb7e550727..00000000000000 --- a/arch/arm/mach-ep93xx/gpio-ep93xx.h +++ /dev/null @@ -1,111 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Include file for the EP93XX GPIO controller machine specifics */ - -#ifndef __GPIO_EP93XX_H -#define __GPIO_EP93XX_H - -#include "ep93xx-regs.h" - -#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000) -#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) -#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) -#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c) -#define EP93XX_GPIO_A_INT_STATUS EP93XX_GPIO_REG(0xa0) -#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc) -#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8) - -/* GPIO port A. */ -#define EP93XX_GPIO_LINE_A(x) ((x) + 0) -#define EP93XX_GPIO_LINE_EGPIO0 EP93XX_GPIO_LINE_A(0) -#define EP93XX_GPIO_LINE_EGPIO1 EP93XX_GPIO_LINE_A(1) -#define EP93XX_GPIO_LINE_EGPIO2 EP93XX_GPIO_LINE_A(2) -#define EP93XX_GPIO_LINE_EGPIO3 EP93XX_GPIO_LINE_A(3) -#define EP93XX_GPIO_LINE_EGPIO4 EP93XX_GPIO_LINE_A(4) -#define EP93XX_GPIO_LINE_EGPIO5 EP93XX_GPIO_LINE_A(5) -#define EP93XX_GPIO_LINE_EGPIO6 EP93XX_GPIO_LINE_A(6) -#define EP93XX_GPIO_LINE_EGPIO7 EP93XX_GPIO_LINE_A(7) - -/* GPIO port B. */ -#define EP93XX_GPIO_LINE_B(x) ((x) + 8) -#define EP93XX_GPIO_LINE_EGPIO8 EP93XX_GPIO_LINE_B(0) -#define EP93XX_GPIO_LINE_EGPIO9 EP93XX_GPIO_LINE_B(1) -#define EP93XX_GPIO_LINE_EGPIO10 EP93XX_GPIO_LINE_B(2) -#define EP93XX_GPIO_LINE_EGPIO11 EP93XX_GPIO_LINE_B(3) -#define EP93XX_GPIO_LINE_EGPIO12 EP93XX_GPIO_LINE_B(4) -#define EP93XX_GPIO_LINE_EGPIO13 EP93XX_GPIO_LINE_B(5) -#define EP93XX_GPIO_LINE_EGPIO14 EP93XX_GPIO_LINE_B(6) -#define EP93XX_GPIO_LINE_EGPIO15 EP93XX_GPIO_LINE_B(7) - -/* GPIO port C. */ -#define EP93XX_GPIO_LINE_C(x) ((x) + 40) -#define EP93XX_GPIO_LINE_ROW0 EP93XX_GPIO_LINE_C(0) -#define EP93XX_GPIO_LINE_ROW1 EP93XX_GPIO_LINE_C(1) -#define EP93XX_GPIO_LINE_ROW2 EP93XX_GPIO_LINE_C(2) -#define EP93XX_GPIO_LINE_ROW3 EP93XX_GPIO_LINE_C(3) -#define EP93XX_GPIO_LINE_ROW4 EP93XX_GPIO_LINE_C(4) -#define EP93XX_GPIO_LINE_ROW5 EP93XX_GPIO_LINE_C(5) -#define EP93XX_GPIO_LINE_ROW6 EP93XX_GPIO_LINE_C(6) -#define EP93XX_GPIO_LINE_ROW7 EP93XX_GPIO_LINE_C(7) - -/* GPIO port D. */ -#define EP93XX_GPIO_LINE_D(x) ((x) + 24) -#define EP93XX_GPIO_LINE_COL0 EP93XX_GPIO_LINE_D(0) -#define EP93XX_GPIO_LINE_COL1 EP93XX_GPIO_LINE_D(1) -#define EP93XX_GPIO_LINE_COL2 EP93XX_GPIO_LINE_D(2) -#define EP93XX_GPIO_LINE_COL3 EP93XX_GPIO_LINE_D(3) -#define EP93XX_GPIO_LINE_COL4 EP93XX_GPIO_LINE_D(4) -#define EP93XX_GPIO_LINE_COL5 EP93XX_GPIO_LINE_D(5) -#define EP93XX_GPIO_LINE_COL6 EP93XX_GPIO_LINE_D(6) -#define EP93XX_GPIO_LINE_COL7 EP93XX_GPIO_LINE_D(7) - -/* GPIO port E. */ -#define EP93XX_GPIO_LINE_E(x) ((x) + 32) -#define EP93XX_GPIO_LINE_GRLED EP93XX_GPIO_LINE_E(0) -#define EP93XX_GPIO_LINE_RDLED EP93XX_GPIO_LINE_E(1) -#define EP93XX_GPIO_LINE_DIORn EP93XX_GPIO_LINE_E(2) -#define EP93XX_GPIO_LINE_IDECS1n EP93XX_GPIO_LINE_E(3) -#define EP93XX_GPIO_LINE_IDECS2n EP93XX_GPIO_LINE_E(4) -#define EP93XX_GPIO_LINE_IDEDA0 EP93XX_GPIO_LINE_E(5) -#define EP93XX_GPIO_LINE_IDEDA1 EP93XX_GPIO_LINE_E(6) -#define EP93XX_GPIO_LINE_IDEDA2 EP93XX_GPIO_LINE_E(7) - -/* GPIO port F. */ -#define EP93XX_GPIO_LINE_F(x) ((x) + 16) -#define EP93XX_GPIO_LINE_WP EP93XX_GPIO_LINE_F(0) -#define EP93XX_GPIO_LINE_MCCD1 EP93XX_GPIO_LINE_F(1) -#define EP93XX_GPIO_LINE_MCCD2 EP93XX_GPIO_LINE_F(2) -#define EP93XX_GPIO_LINE_MCBVD1 EP93XX_GPIO_LINE_F(3) -#define EP93XX_GPIO_LINE_MCBVD2 EP93XX_GPIO_LINE_F(4) -#define EP93XX_GPIO_LINE_VS1 EP93XX_GPIO_LINE_F(5) -#define EP93XX_GPIO_LINE_READY EP93XX_GPIO_LINE_F(6) -#define EP93XX_GPIO_LINE_VS2 EP93XX_GPIO_LINE_F(7) - -/* GPIO port G. */ -#define EP93XX_GPIO_LINE_G(x) ((x) + 48) -#define EP93XX_GPIO_LINE_EECLK EP93XX_GPIO_LINE_G(0) -#define EP93XX_GPIO_LINE_EEDAT EP93XX_GPIO_LINE_G(1) -#define EP93XX_GPIO_LINE_SLA0 EP93XX_GPIO_LINE_G(2) -#define EP93XX_GPIO_LINE_SLA1 EP93XX_GPIO_LINE_G(3) -#define EP93XX_GPIO_LINE_DD12 EP93XX_GPIO_LINE_G(4) -#define EP93XX_GPIO_LINE_DD13 EP93XX_GPIO_LINE_G(5) -#define EP93XX_GPIO_LINE_DD14 EP93XX_GPIO_LINE_G(6) -#define EP93XX_GPIO_LINE_DD15 EP93XX_GPIO_LINE_G(7) - -/* GPIO port H. */ -#define EP93XX_GPIO_LINE_H(x) ((x) + 56) -#define EP93XX_GPIO_LINE_DD0 EP93XX_GPIO_LINE_H(0) -#define EP93XX_GPIO_LINE_DD1 EP93XX_GPIO_LINE_H(1) -#define EP93XX_GPIO_LINE_DD2 EP93XX_GPIO_LINE_H(2) -#define EP93XX_GPIO_LINE_DD3 EP93XX_GPIO_LINE_H(3) -#define EP93XX_GPIO_LINE_DD4 EP93XX_GPIO_LINE_H(4) -#define EP93XX_GPIO_LINE_DD5 EP93XX_GPIO_LINE_H(5) -#define EP93XX_GPIO_LINE_DD6 EP93XX_GPIO_LINE_H(6) -#define EP93XX_GPIO_LINE_DD7 EP93XX_GPIO_LINE_H(7) - -/* maximum value for gpio line identifiers */ -#define EP93XX_GPIO_LINE_MAX EP93XX_GPIO_LINE_H(7) - -/* maximum value for irq capable line identifiers */ -#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7) - -#endif /* __GPIO_EP93XX_H */ diff --git a/arch/arm/mach-ep93xx/hardware.h b/arch/arm/mach-ep93xx/hardware.h deleted file mode 100644 index e7d850e04782df..00000000000000 --- a/arch/arm/mach-ep93xx/hardware.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * arch/arm/mach-ep93xx/include/mach/hardware.h - */ - -#ifndef __ASM_ARCH_HARDWARE_H -#define __ASM_ARCH_HARDWARE_H - -#include "platform.h" - -/* - * The EP93xx has two external crystal oscillators. To generate the - * required high-frequency clocks, the processor uses two phase-locked- - * loops (PLLs) to multiply the incoming external clock signal to much - * higher frequencies that are then divided down by programmable dividers - * to produce the needed clocks. The PLLs operate independently of one - * another. - */ -#define EP93XX_EXT_CLK_RATE 14745600 -#define EP93XX_EXT_RTC_RATE 32768 - -#define EP93XX_KEYTCHCLK_DIV4 (EP93XX_EXT_CLK_RATE / 4) -#define EP93XX_KEYTCHCLK_DIV16 (EP93XX_EXT_CLK_RATE / 16) - -#endif diff --git a/arch/arm/mach-ep93xx/irqs.h b/arch/arm/mach-ep93xx/irqs.h deleted file mode 100644 index 353201b90c6658..00000000000000 --- a/arch/arm/mach-ep93xx/irqs.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_IRQS_H -#define __ASM_ARCH_IRQS_H - -#define IRQ_EP93XX_VIC0 1 - -#define IRQ_EP93XX_COMMRX (IRQ_EP93XX_VIC0 + 2) -#define IRQ_EP93XX_COMMTX (IRQ_EP93XX_VIC0 + 3) -#define IRQ_EP93XX_TIMER1 (IRQ_EP93XX_VIC0 + 4) -#define IRQ_EP93XX_TIMER2 (IRQ_EP93XX_VIC0 + 5) -#define IRQ_EP93XX_AACINTR (IRQ_EP93XX_VIC0 + 6) -#define IRQ_EP93XX_DMAM2P0 (IRQ_EP93XX_VIC0 + 7) -#define IRQ_EP93XX_DMAM2P1 (IRQ_EP93XX_VIC0 + 8) -#define IRQ_EP93XX_DMAM2P2 (IRQ_EP93XX_VIC0 + 9) -#define IRQ_EP93XX_DMAM2P3 (IRQ_EP93XX_VIC0 + 10) -#define IRQ_EP93XX_DMAM2P4 (IRQ_EP93XX_VIC0 + 11) -#define IRQ_EP93XX_DMAM2P5 (IRQ_EP93XX_VIC0 + 12) -#define IRQ_EP93XX_DMAM2P6 (IRQ_EP93XX_VIC0 + 13) -#define IRQ_EP93XX_DMAM2P7 (IRQ_EP93XX_VIC0 + 14) -#define IRQ_EP93XX_DMAM2P8 (IRQ_EP93XX_VIC0 + 15) -#define IRQ_EP93XX_DMAM2P9 (IRQ_EP93XX_VIC0 + 16) -#define IRQ_EP93XX_DMAM2M0 (IRQ_EP93XX_VIC0 + 17) -#define IRQ_EP93XX_DMAM2M1 (IRQ_EP93XX_VIC0 + 18) -#define IRQ_EP93XX_GPIO0MUX (IRQ_EP93XX_VIC0 + 19) -#define IRQ_EP93XX_GPIO1MUX (IRQ_EP93XX_VIC0 + 20) -#define IRQ_EP93XX_GPIO2MUX (IRQ_EP93XX_VIC0 + 21) -#define IRQ_EP93XX_GPIO3MUX (IRQ_EP93XX_VIC0 + 22) -#define IRQ_EP93XX_UART1RX (IRQ_EP93XX_VIC0 + 23) -#define IRQ_EP93XX_UART1TX (IRQ_EP93XX_VIC0 + 24) -#define IRQ_EP93XX_UART2RX (IRQ_EP93XX_VIC0 + 25) -#define IRQ_EP93XX_UART2TX (IRQ_EP93XX_VIC0 + 26) -#define IRQ_EP93XX_UART3RX (IRQ_EP93XX_VIC0 + 27) -#define IRQ_EP93XX_UART3TX (IRQ_EP93XX_VIC0 + 28) -#define IRQ_EP93XX_KEY (IRQ_EP93XX_VIC0 + 29) -#define IRQ_EP93XX_TOUCH (IRQ_EP93XX_VIC0 + 30) -#define EP93XX_VIC1_VALID_IRQ_MASK 0x7ffffffc - -#define IRQ_EP93XX_VIC1 (IRQ_EP93XX_VIC0 + 32) - -#define IRQ_EP93XX_EXT0 (IRQ_EP93XX_VIC1 + 0) -#define IRQ_EP93XX_EXT1 (IRQ_EP93XX_VIC1 + 1) -#define IRQ_EP93XX_EXT2 (IRQ_EP93XX_VIC1 + 2) -#define IRQ_EP93XX_64HZ (IRQ_EP93XX_VIC1 + 3) -#define IRQ_EP93XX_WATCHDOG (IRQ_EP93XX_VIC1 + 4) -#define IRQ_EP93XX_RTC (IRQ_EP93XX_VIC1 + 5) -#define IRQ_EP93XX_IRDA (IRQ_EP93XX_VIC1 + 6) -#define IRQ_EP93XX_ETHERNET (IRQ_EP93XX_VIC1 + 7) -#define IRQ_EP93XX_EXT3 (IRQ_EP93XX_VIC1 + 8) -#define IRQ_EP93XX_PROG (IRQ_EP93XX_VIC1 + 9) -#define IRQ_EP93XX_1HZ (IRQ_EP93XX_VIC1 + 10) -#define IRQ_EP93XX_VSYNC (IRQ_EP93XX_VIC1 + 11) -#define IRQ_EP93XX_VIDEO_FIFO (IRQ_EP93XX_VIC1 + 12) -#define IRQ_EP93XX_SSP1RX (IRQ_EP93XX_VIC1 + 13) -#define IRQ_EP93XX_SSP1TX (IRQ_EP93XX_VIC1 + 14) -#define IRQ_EP93XX_GPIO4MUX (IRQ_EP93XX_VIC1 + 15) -#define IRQ_EP93XX_GPIO5MUX (IRQ_EP93XX_VIC1 + 16) -#define IRQ_EP93XX_GPIO6MUX (IRQ_EP93XX_VIC1 + 17) -#define IRQ_EP93XX_GPIO7MUX (IRQ_EP93XX_VIC1 + 18) -#define IRQ_EP93XX_TIMER3 (IRQ_EP93XX_VIC1 + 19) -#define IRQ_EP93XX_UART1 (IRQ_EP93XX_VIC1 + 20) -#define IRQ_EP93XX_SSP (IRQ_EP93XX_VIC1 + 21) -#define IRQ_EP93XX_UART2 (IRQ_EP93XX_VIC1 + 22) -#define IRQ_EP93XX_UART3 (IRQ_EP93XX_VIC1 + 23) -#define IRQ_EP93XX_USB (IRQ_EP93XX_VIC1 + 24) -#define IRQ_EP93XX_ETHERNET_PME (IRQ_EP93XX_VIC1 + 25) -#define IRQ_EP93XX_DSP (IRQ_EP93XX_VIC1 + 26) -#define IRQ_EP93XX_GPIO_AB (IRQ_EP93XX_VIC1 + 27) -#define IRQ_EP93XX_SAI (IRQ_EP93XX_VIC1 + 28) -#define EP93XX_VIC2_VALID_IRQ_MASK 0x1fffffff - -#define NR_EP93XX_IRQS (IRQ_EP93XX_VIC1 + 32 + 24) - -#define EP93XX_BOARD_IRQ(x) (NR_EP93XX_IRQS + (x)) -#define EP93XX_BOARD_IRQS 32 - -#endif diff --git a/arch/arm/mach-ep93xx/platform.h b/arch/arm/mach-ep93xx/platform.h deleted file mode 100644 index 5fb1b919133f9b..00000000000000 --- a/arch/arm/mach-ep93xx/platform.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * arch/arm/mach-ep93xx/include/mach/platform.h - */ - -#ifndef __ASSEMBLY__ - -#include -#include - -struct device; -struct i2c_board_info; -struct spi_board_info; -struct platform_device; -struct ep93xxfb_mach_info; -struct ep93xx_keypad_platform_data; -struct ep93xx_spi_info; - -void ep93xx_map_io(void); -void ep93xx_init_irq(void); - -void ep93xx_register_flash(unsigned int width, - resource_size_t start, resource_size_t size); - -void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr); -void ep93xx_register_i2c(struct i2c_board_info *devices, int num); -void ep93xx_register_spi(struct ep93xx_spi_info *info, - struct spi_board_info *devices, int num); -void ep93xx_register_fb(struct ep93xxfb_mach_info *data); -void ep93xx_register_pwm(int pwm0, int pwm1); -void ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data); -void ep93xx_register_i2s(void); -void ep93xx_register_ac97(void); -void ep93xx_register_ide(void); -void ep93xx_register_adc(void); - -struct device *ep93xx_init_devices(void); -extern void ep93xx_timer_init(void); - -void ep93xx_restart(enum reboot_mode, const char *); - -#endif diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h deleted file mode 100644 index 3245ebbd506984..00000000000000 --- a/arch/arm/mach-ep93xx/soc.h +++ /dev/null @@ -1,212 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * arch/arm/mach-ep93xx/soc.h - * - * Copyright (C) 2012 Open Kernel Labs - * Copyright (C) 2012 Ryan Mallon - */ - -#ifndef _EP93XX_SOC_H -#define _EP93XX_SOC_H - -#include "ep93xx-regs.h" -#include "irqs.h" - -/* - * EP93xx Physical Memory Map: - * - * The ASDO pin is sampled at system reset to select a synchronous or - * asynchronous boot configuration. When ASDO is "1" (i.e. pulled-up) - * the synchronous boot mode is selected. When ASDO is "0" (i.e - * pulled-down) the asynchronous boot mode is selected. - * - * In synchronous boot mode nSDCE3 is decoded starting at physical address - * 0x00000000 and nCS0 is decoded starting at 0xf0000000. For asynchronous - * boot mode they are swapped with nCS0 decoded at 0x00000000 ann nSDCE3 - * decoded at 0xf0000000. - * - * There is known errata for the EP93xx dealing with External Memory - * Configurations. Please refer to "AN273: EP93xx Silicon Rev E Design - * Guidelines" for more information. This document can be found at: - * - * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf - */ - -#define EP93XX_CS0_PHYS_BASE_ASYNC 0x00000000 /* ASDO Pin = 0 */ -#define EP93XX_SDCE3_PHYS_BASE_SYNC 0x00000000 /* ASDO Pin = 1 */ -#define EP93XX_CS1_PHYS_BASE 0x10000000 -#define EP93XX_CS2_PHYS_BASE 0x20000000 -#define EP93XX_CS3_PHYS_BASE 0x30000000 -#define EP93XX_PCMCIA_PHYS_BASE 0x40000000 -#define EP93XX_CS6_PHYS_BASE 0x60000000 -#define EP93XX_CS7_PHYS_BASE 0x70000000 -#define EP93XX_SDCE0_PHYS_BASE 0xc0000000 -#define EP93XX_SDCE1_PHYS_BASE 0xd0000000 -#define EP93XX_SDCE2_PHYS_BASE 0xe0000000 -#define EP93XX_SDCE3_PHYS_BASE_ASYNC 0xf0000000 /* ASDO Pin = 0 */ -#define EP93XX_CS0_PHYS_BASE_SYNC 0xf0000000 /* ASDO Pin = 1 */ - -/* AHB peripherals */ -#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000) - -#define EP93XX_ETHERNET_PHYS_BASE EP93XX_AHB_PHYS(0x00010000) -#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000) - -#define EP93XX_USB_PHYS_BASE EP93XX_AHB_PHYS(0x00020000) -#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000) - -#define EP93XX_RASTER_PHYS_BASE EP93XX_AHB_PHYS(0x00030000) -#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000) - -#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000) - -#define EP93XX_SDRAM_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00060000) - -#define EP93XX_PCMCIA_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00080000) - -#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000) - -#define EP93XX_IDE_PHYS_BASE EP93XX_AHB_PHYS(0x000a0000) -#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000) - -#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000) - -#define EP93XX_VIC2_BASE EP93XX_AHB_IOMEM(0x000c0000) - -/* APB peripherals */ -#define EP93XX_TIMER_BASE EP93XX_APB_IOMEM(0x00010000) - -#define EP93XX_I2S_PHYS_BASE EP93XX_APB_PHYS(0x00020000) -#define EP93XX_I2S_BASE EP93XX_APB_IOMEM(0x00020000) - -#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000) - -#define EP93XX_AAC_PHYS_BASE EP93XX_APB_PHYS(0x00080000) -#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000) - -#define EP93XX_SPI_PHYS_BASE EP93XX_APB_PHYS(0x000a0000) -#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000) - -#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000) - -#define EP93XX_KEY_MATRIX_PHYS_BASE EP93XX_APB_PHYS(0x000f0000) -#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000) - -#define EP93XX_ADC_PHYS_BASE EP93XX_APB_PHYS(0x00100000) -#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000) -#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000) - -#define EP93XX_PWM_PHYS_BASE EP93XX_APB_PHYS(0x00110000) -#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000) - -#define EP93XX_RTC_PHYS_BASE EP93XX_APB_PHYS(0x00120000) -#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000) - -#define EP93XX_WATCHDOG_PHYS_BASE EP93XX_APB_PHYS(0x00140000) -#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000) - -/* System controller */ -#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000) -#define EP93XX_SYSCON_REG(x) (EP93XX_SYSCON_BASE + (x)) -#define EP93XX_SYSCON_POWER_STATE EP93XX_SYSCON_REG(0x00) -#define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04) -#define EP93XX_SYSCON_PWRCNT_FIR_EN (1<<31) -#define EP93XX_SYSCON_PWRCNT_UARTBAUD (1<<29) -#define EP93XX_SYSCON_PWRCNT_USH_EN 28 -#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27 -#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17 -#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16 -#define EP93XX_SYSCON_HALT EP93XX_SYSCON_REG(0x08) -#define EP93XX_SYSCON_STANDBY EP93XX_SYSCON_REG(0x0c) -#define EP93XX_SYSCON_CLKSET1 EP93XX_SYSCON_REG(0x20) -#define EP93XX_SYSCON_CLKSET1_NBYP1 (1<<23) -#define EP93XX_SYSCON_CLKSET2 EP93XX_SYSCON_REG(0x24) -#define EP93XX_SYSCON_CLKSET2_NBYP2 (1<<19) -#define EP93XX_SYSCON_CLKSET2_PLL2_EN (1<<18) -#define EP93XX_SYSCON_DEVCFG EP93XX_SYSCON_REG(0x80) -#define EP93XX_SYSCON_DEVCFG_SWRST (1<<31) -#define EP93XX_SYSCON_DEVCFG_D1ONG (1<<30) -#define EP93XX_SYSCON_DEVCFG_D0ONG (1<<29) -#define EP93XX_SYSCON_DEVCFG_IONU2 (1<<28) -#define EP93XX_SYSCON_DEVCFG_GONK (1<<27) -#define EP93XX_SYSCON_DEVCFG_TONG (1<<26) -#define EP93XX_SYSCON_DEVCFG_MONG (1<<25) -#define EP93XX_SYSCON_DEVCFG_U3EN 24 -#define EP93XX_SYSCON_DEVCFG_CPENA (1<<23) -#define EP93XX_SYSCON_DEVCFG_A2ONG (1<<22) -#define EP93XX_SYSCON_DEVCFG_A1ONG (1<<21) -#define EP93XX_SYSCON_DEVCFG_U2EN 20 -#define EP93XX_SYSCON_DEVCFG_EXVC (1<<19) -#define EP93XX_SYSCON_DEVCFG_U1EN 18 -#define EP93XX_SYSCON_DEVCFG_TIN (1<<17) -#define EP93XX_SYSCON_DEVCFG_HC3IN (1<<15) -#define EP93XX_SYSCON_DEVCFG_HC3EN (1<<14) -#define EP93XX_SYSCON_DEVCFG_HC1IN (1<<13) -#define EP93XX_SYSCON_DEVCFG_HC1EN (1<<12) -#define EP93XX_SYSCON_DEVCFG_HONIDE (1<<11) -#define EP93XX_SYSCON_DEVCFG_GONIDE (1<<10) -#define EP93XX_SYSCON_DEVCFG_PONG (1<<9) -#define EP93XX_SYSCON_DEVCFG_EONIDE (1<<8) -#define EP93XX_SYSCON_DEVCFG_I2SONSSP (1<<7) -#define EP93XX_SYSCON_DEVCFG_I2SONAC97 (1<<6) -#define EP93XX_SYSCON_DEVCFG_RASONP3 (1<<4) -#define EP93XX_SYSCON_DEVCFG_RAS (1<<3) -#define EP93XX_SYSCON_DEVCFG_ADCPD (1<<2) -#define EP93XX_SYSCON_DEVCFG_KEYS (1<<1) -#define EP93XX_SYSCON_DEVCFG_SHENA (1<<0) -#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84) -#define EP93XX_SYSCON_CLKDIV_ENABLE 15 -#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14) -#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13) -#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8 -#define EP93XX_SYSCON_I2SCLKDIV EP93XX_SYSCON_REG(0x8c) -#define EP93XX_SYSCON_I2SCLKDIV_SENA 31 -#define EP93XX_SYSCON_I2SCLKDIV_ORIDE (1<<29) -#define EP93XX_SYSCON_I2SCLKDIV_SPOL (1<<19) -#define EP93XX_I2SCLKDIV_SDIV (1 << 16) -#define EP93XX_I2SCLKDIV_LRDIV32 (0 << 17) -#define EP93XX_I2SCLKDIV_LRDIV64 (1 << 17) -#define EP93XX_I2SCLKDIV_LRDIV128 (2 << 17) -#define EP93XX_I2SCLKDIV_LRDIV_MASK (3 << 17) -#define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90) -#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31 -#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16 -#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15 -#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0) -#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c) -#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000) -#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28) -#define EP93XX_SYSCON_SYSCFG_SBOOT (1<<8) -#define EP93XX_SYSCON_SYSCFG_LCSN7 (1<<7) -#define EP93XX_SYSCON_SYSCFG_LCSN6 (1<<6) -#define EP93XX_SYSCON_SYSCFG_LASDO (1<<5) -#define EP93XX_SYSCON_SYSCFG_LEEDA (1<<4) -#define EP93XX_SYSCON_SYSCFG_LEECLK (1<<3) -#define EP93XX_SYSCON_SYSCFG_LCSN2 (1<<1) -#define EP93XX_SYSCON_SYSCFG_LCSN1 (1<<0) -#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) - -/* EP93xx System Controller software locked register write */ -void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg); -void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits); - -static inline void ep93xx_devcfg_set_bits(unsigned int bits) -{ - ep93xx_devcfg_set_clear(bits, 0x00); -} - -static inline void ep93xx_devcfg_clear_bits(unsigned int bits) -{ - ep93xx_devcfg_set_clear(0x00, bits); -} - -#endif /* _EP93XX_SOC_H */ diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c deleted file mode 100644 index a9efa7bc2fa127..00000000000000 --- a/arch/arm/mach-ep93xx/timer-ep93xx.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "soc.h" -#include "platform.h" - -/************************************************************************* - * Timer handling for EP93xx - ************************************************************************* - * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and - * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate - * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, - * is free-running, and can't generate interrupts. - * - * The 508 kHz timers are ideal for use for the timer interrupt, as the - * most common values of HZ divide 508 kHz nicely. We pick the 32 bit - * timer (timer 3) to get as long sleep intervals as possible when using - * CONFIG_NO_HZ. - * - * The higher clock rate of timer 4 makes it a better choice than the - * other timers for use as clock source and for sched_clock(), providing - * a stable 40 bit time base. - ************************************************************************* - */ -#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) -#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) -#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) -#define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) -#define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) -#define EP93XX_TIMER123_CONTROL_MODE (1 << 6) -#define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) -#define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) -#define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) -#define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) -#define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) -#define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) -#define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) -#define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) -#define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) -#define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) -#define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) -#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) -#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) - -#define EP93XX_TIMER123_RATE 508469 -#define EP93XX_TIMER4_RATE 983040 - -static u64 notrace ep93xx_read_sched_clock(void) -{ - u64 ret; - - ret = readl(EP93XX_TIMER4_VALUE_LOW); - ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); - return ret; -} - -static u64 ep93xx_clocksource_read(struct clocksource *c) -{ - u64 ret; - - ret = readl(EP93XX_TIMER4_VALUE_LOW); - ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); - return (u64) ret; -} - -static int ep93xx_clkevt_set_next_event(unsigned long next, - struct clock_event_device *evt) -{ - /* Default mode: periodic, off, 508 kHz */ - u32 tmode = EP93XX_TIMER123_CONTROL_MODE | - EP93XX_TIMER123_CONTROL_CLKSEL; - - /* Clear timer */ - writel(tmode, EP93XX_TIMER3_CONTROL); - - /* Set next event */ - writel(next, EP93XX_TIMER3_LOAD); - writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, - EP93XX_TIMER3_CONTROL); - return 0; -} - - -static int ep93xx_clkevt_shutdown(struct clock_event_device *evt) -{ - /* Disable timer */ - writel(0, EP93XX_TIMER3_CONTROL); - - return 0; -} - -static struct clock_event_device ep93xx_clockevent = { - .name = "timer1", - .features = CLOCK_EVT_FEAT_ONESHOT, - .set_state_shutdown = ep93xx_clkevt_shutdown, - .set_state_oneshot = ep93xx_clkevt_shutdown, - .tick_resume = ep93xx_clkevt_shutdown, - .set_next_event = ep93xx_clkevt_set_next_event, - .rating = 300, -}; - -static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - /* Writing any value clears the timer interrupt */ - writel(1, EP93XX_TIMER3_CLEAR); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -void __init ep93xx_timer_init(void) -{ - int irq = IRQ_EP93XX_TIMER3; - unsigned long flags = IRQF_TIMER | IRQF_IRQPOLL; - - /* Enable and register clocksource and sched_clock on timer 4 */ - writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, - EP93XX_TIMER4_VALUE_HIGH); - clocksource_mmio_init(NULL, "timer4", - EP93XX_TIMER4_RATE, 200, 40, - ep93xx_clocksource_read); - sched_clock_register(ep93xx_read_sched_clock, 40, - EP93XX_TIMER4_RATE); - - /* Set up clockevent on timer 3 */ - if (request_irq(irq, ep93xx_timer_interrupt, flags, "ep93xx timer", - &ep93xx_clockevent)) - pr_err("Failed to request irq %d (ep93xx timer)\n", irq); - clockevents_config_and_register(&ep93xx_clockevent, - EP93XX_TIMER123_RATE, - 1, - 0xffffffffU); -} diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c deleted file mode 100644 index d3de7283ecb3f6..00000000000000 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ /dev/null @@ -1,422 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/ts72xx.c - * Technologic Systems TS72xx SBC support. - * - * Copyright (C) 2006 Lennert Buytenhek - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "gpio-ep93xx.h" -#include "hardware.h" - -#include -#include -#include - -#include "soc.h" -#include "ts72xx.h" - -/************************************************************************* - * IO map - *************************************************************************/ -static struct map_desc ts72xx_io_desc[] __initdata = { - { - .virtual = (unsigned long)TS72XX_MODEL_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_MODEL_PHYS_BASE), - .length = TS72XX_MODEL_SIZE, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)TS72XX_OPTIONS_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE), - .length = TS72XX_OPTIONS_SIZE, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)TS72XX_OPTIONS2_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE), - .length = TS72XX_OPTIONS2_SIZE, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)TS72XX_CPLDVER_VIRT_BASE, - .pfn = __phys_to_pfn(TS72XX_CPLDVER_PHYS_BASE), - .length = TS72XX_CPLDVER_SIZE, - .type = MT_DEVICE, - } -}; - -static void __init ts72xx_map_io(void) -{ - ep93xx_map_io(); - iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc)); -} - - -/************************************************************************* - * NAND flash - *************************************************************************/ -#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */ -#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */ - -static void ts72xx_nand_hwcontrol(struct nand_chip *chip, - int cmd, unsigned int ctrl) -{ - if (ctrl & NAND_CTRL_CHANGE) { - void __iomem *addr = chip->legacy.IO_ADDR_R; - unsigned char bits; - - addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE); - - bits = __raw_readb(addr) & ~0x07; - bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */ - bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */ - bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */ - - __raw_writeb(bits, addr); - } - - if (cmd != NAND_CMD_NONE) - __raw_writeb(cmd, chip->legacy.IO_ADDR_W); -} - -static int ts72xx_nand_device_ready(struct nand_chip *chip) -{ - void __iomem *addr = chip->legacy.IO_ADDR_R; - - addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE); - - return !!(__raw_readb(addr) & 0x20); -} - -#define TS72XX_BOOTROM_PART_SIZE (SZ_16K) -#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) - -static struct mtd_partition ts72xx_nand_parts[] = { - { - .name = "TS-BOOTROM", - .offset = 0, - .size = TS72XX_BOOTROM_PART_SIZE, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, { - .name = "Linux", - .offset = MTDPART_OFS_RETAIN, - .size = TS72XX_REDBOOT_PART_SIZE, - /* leave so much for last partition */ - }, { - .name = "RedBoot", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, -}; - -static struct platform_nand_data ts72xx_nand_data = { - .chip = { - .nr_chips = 1, - .chip_offset = 0, - .chip_delay = 15, - }, - .ctrl = { - .cmd_ctrl = ts72xx_nand_hwcontrol, - .dev_ready = ts72xx_nand_device_ready, - }, -}; - -static struct resource ts72xx_nand_resource[] = { - { - .start = 0, /* filled in later */ - .end = 0, /* filled in later */ - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device ts72xx_nand_flash = { - .name = "gen_nand", - .id = -1, - .dev.platform_data = &ts72xx_nand_data, - .resource = ts72xx_nand_resource, - .num_resources = ARRAY_SIZE(ts72xx_nand_resource), -}; - -static void __init ts72xx_register_flash(struct mtd_partition *parts, int n, - resource_size_t start) -{ - /* - * TS7200 has NOR flash all other TS72xx board have NAND flash. - */ - if (board_is_ts7200()) { - ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); - } else { - ts72xx_nand_resource[0].start = start; - ts72xx_nand_resource[0].end = start + SZ_16M - 1; - - ts72xx_nand_data.chip.partitions = parts; - ts72xx_nand_data.chip.nr_partitions = n; - - platform_device_register(&ts72xx_nand_flash); - } -} - -/************************************************************************* - * RTC M48T86 - *************************************************************************/ -#define TS72XX_RTC_INDEX_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x00800000) -#define TS72XX_RTC_DATA_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x01700000) - -static struct resource ts72xx_rtc_resources[] = { - DEFINE_RES_MEM(TS72XX_RTC_INDEX_PHYS_BASE, 0x01), - DEFINE_RES_MEM(TS72XX_RTC_DATA_PHYS_BASE, 0x01), -}; - -static struct platform_device ts72xx_rtc_device = { - .name = "rtc-m48t86", - .id = -1, - .resource = ts72xx_rtc_resources, - .num_resources = ARRAY_SIZE(ts72xx_rtc_resources), -}; - -/************************************************************************* - * Watchdog (in CPLD) - *************************************************************************/ -#define TS72XX_WDT_CONTROL_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03800000) -#define TS72XX_WDT_FEED_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03c00000) - -static struct resource ts72xx_wdt_resources[] = { - DEFINE_RES_MEM(TS72XX_WDT_CONTROL_PHYS_BASE, 0x01), - DEFINE_RES_MEM(TS72XX_WDT_FEED_PHYS_BASE, 0x01), -}; - -static struct platform_device ts72xx_wdt_device = { - .name = "ts72xx-wdt", - .id = -1, - .resource = ts72xx_wdt_resources, - .num_resources = ARRAY_SIZE(ts72xx_wdt_resources), -}; - -/************************************************************************* - * ETH - *************************************************************************/ -static struct ep93xx_eth_data __initdata ts72xx_eth_data = { - .phy_id = 1, -}; - -/************************************************************************* - * SPI SD/MMC host - *************************************************************************/ -#define BK3_EN_SDCARD_PHYS_BASE 0x12400000 -#define BK3_EN_SDCARD_PWR 0x0 -#define BK3_DIS_SDCARD_PWR 0x0C -static void bk3_mmc_spi_setpower(struct device *dev, unsigned int vdd) -{ - void __iomem *pwr_sd = ioremap(BK3_EN_SDCARD_PHYS_BASE, SZ_4K); - - if (!pwr_sd) { - pr_err("Failed to enable SD card power!"); - return; - } - - pr_debug("%s: SD card pwr %s VDD:0x%x\n", __func__, - !!vdd ? "ON" : "OFF", vdd); - - if (!!vdd) - __raw_writeb(BK3_EN_SDCARD_PWR, pwr_sd); - else - __raw_writeb(BK3_DIS_SDCARD_PWR, pwr_sd); - - iounmap(pwr_sd); -} - -static struct mmc_spi_platform_data bk3_spi_mmc_data = { - .detect_delay = 500, - .powerup_msecs = 100, - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .caps = MMC_CAP_NONREMOVABLE, - .setpower = bk3_mmc_spi_setpower, -}; - -/************************************************************************* - * SPI Bus - SD card access - *************************************************************************/ -static struct spi_board_info bk3_spi_board_info[] __initdata = { - { - .modalias = "mmc_spi", - .platform_data = &bk3_spi_mmc_data, - .max_speed_hz = 7.4E6, - .bus_num = 0, - .chip_select = 0, - .mode = SPI_MODE_0, - }, -}; - -/* - * This is a stub -> the FGPIO[3] pin is not connected on the schematic - * The all work is performed automatically by !SPI_FRAME (SFRM1) and - * goes through CPLD - */ -static struct gpiod_lookup_table bk3_spi_cs_gpio_table = { - .dev_id = "spi0", - .table = { - GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct ep93xx_spi_info bk3_spi_master __initdata = { - .use_dma = 1, -}; - -/************************************************************************* - * TS72XX support code - *************************************************************************/ -#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX) - -/* Relative to EP93XX_CS1_PHYS_BASE */ -#define TS73XX_FPGA_LOADER_BASE 0x03c00000 - -static struct resource ts73xx_fpga_resources[] = { - { - .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE, - .end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device ts73xx_fpga_device = { - .name = "ts73xx-fpga-mgr", - .id = -1, - .resource = ts73xx_fpga_resources, - .num_resources = ARRAY_SIZE(ts73xx_fpga_resources), -}; - -#endif - -/************************************************************************* - * SPI Bus - *************************************************************************/ -static struct spi_board_info ts72xx_spi_devices[] __initdata = { - { - .modalias = "tmp122", - .max_speed_hz = 2 * 1000 * 1000, - .bus_num = 0, - .chip_select = 0, - }, -}; - -static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = { - .dev_id = "spi0", - .table = { - /* DIO_17 */ - GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct ep93xx_spi_info ts72xx_spi_info __initdata = { - /* Intentionally left blank */ -}; - -static void __init ts72xx_init_machine(void) -{ - ep93xx_init_devices(); - ts72xx_register_flash(ts72xx_nand_parts, ARRAY_SIZE(ts72xx_nand_parts), - is_ts9420_installed() ? - EP93XX_CS7_PHYS_BASE : EP93XX_CS6_PHYS_BASE); - platform_device_register(&ts72xx_rtc_device); - platform_device_register(&ts72xx_wdt_device); - - ep93xx_register_eth(&ts72xx_eth_data, 1); -#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX) - if (board_is_ts7300()) - platform_device_register(&ts73xx_fpga_device); -#endif - gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table); - ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices, - ARRAY_SIZE(ts72xx_spi_devices)); -} - -MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") - /* Maintainer: Lennert Buytenhek */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ts72xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = ts72xx_init_machine, - .restart = ep93xx_restart, -MACHINE_END - -/************************************************************************* - * EP93xx I2S audio peripheral handling - *************************************************************************/ -static struct resource ep93xx_i2s_resource[] = { - DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100), - DEFINE_RES_IRQ_NAMED(IRQ_EP93XX_SAI, "spilink i2s slave"), -}; - -static struct platform_device ep93xx_i2s_device = { - .name = "ep93xx-spilink-i2s", - .id = -1, - .num_resources = ARRAY_SIZE(ep93xx_i2s_resource), - .resource = ep93xx_i2s_resource, -}; - -/************************************************************************* - * BK3 support code - *************************************************************************/ -static struct mtd_partition bk3_nand_parts[] = { - { - .name = "System", - .offset = 0x00000000, - .size = 0x01e00000, - }, { - .name = "Data", - .offset = 0x01e00000, - .size = 0x05f20000 - }, { - .name = "RedBoot", - .offset = 0x07d20000, - .size = 0x002e0000, - .mask_flags = MTD_WRITEABLE, /* force RO */ - }, -}; - -static void __init bk3_init_machine(void) -{ - ep93xx_init_devices(); - - ts72xx_register_flash(bk3_nand_parts, ARRAY_SIZE(bk3_nand_parts), - EP93XX_CS6_PHYS_BASE); - - ep93xx_register_eth(&ts72xx_eth_data, 1); - - gpiod_add_lookup_table(&bk3_spi_cs_gpio_table); - ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info, - ARRAY_SIZE(bk3_spi_board_info)); - - /* Configure ep93xx's I2S to use AC97 pins */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97); - platform_device_register(&ep93xx_i2s_device); -} - -MACHINE_START(BK3, "Liebherr controller BK3.1") - /* Maintainer: Lukasz Majewski */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS, - .map_io = ts72xx_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = bk3_init_machine, - .restart = ep93xx_restart, -MACHINE_END diff --git a/arch/arm/mach-ep93xx/ts72xx.h b/arch/arm/mach-ep93xx/ts72xx.h deleted file mode 100644 index 00b4941d29c950..00000000000000 --- a/arch/arm/mach-ep93xx/ts72xx.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * arch/arm/mach-ep93xx/include/mach/ts72xx.h - */ - -/* - * TS72xx memory map: - * - * virt phys size - * febff000 22000000 4K model number register (bits 0-2) - * febfe000 22400000 4K options register - * febfd000 22800000 4K options register #2 - * febfc000 23400000 4K CPLD version register - */ - -#ifndef __TS72XX_H_ -#define __TS72XX_H_ - -#define TS72XX_MODEL_PHYS_BASE 0x22000000 -#define TS72XX_MODEL_VIRT_BASE IOMEM(0xfebff000) -#define TS72XX_MODEL_SIZE 0x00001000 - -#define TS72XX_MODEL_TS7200 0x00 -#define TS72XX_MODEL_TS7250 0x01 -#define TS72XX_MODEL_TS7260 0x02 -#define TS72XX_MODEL_TS7300 0x03 -#define TS72XX_MODEL_TS7400 0x04 -#define TS72XX_MODEL_MASK 0x07 - - -#define TS72XX_OPTIONS_PHYS_BASE 0x22400000 -#define TS72XX_OPTIONS_VIRT_BASE IOMEM(0xfebfe000) -#define TS72XX_OPTIONS_SIZE 0x00001000 - -#define TS72XX_OPTIONS_COM2_RS485 0x02 -#define TS72XX_OPTIONS_MAX197 0x01 - - -#define TS72XX_OPTIONS2_PHYS_BASE 0x22800000 -#define TS72XX_OPTIONS2_VIRT_BASE IOMEM(0xfebfd000) -#define TS72XX_OPTIONS2_SIZE 0x00001000 - -#define TS72XX_OPTIONS2_TS9420 0x04 -#define TS72XX_OPTIONS2_TS9420_BOOT 0x02 - -#define TS72XX_CPLDVER_PHYS_BASE 0x23400000 -#define TS72XX_CPLDVER_VIRT_BASE IOMEM(0xfebfc000) -#define TS72XX_CPLDVER_SIZE 0x00001000 - -#ifndef __ASSEMBLY__ - -static inline int ts72xx_model(void) -{ - return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK; -} - -static inline int board_is_ts7200(void) -{ - return ts72xx_model() == TS72XX_MODEL_TS7200; -} - -static inline int board_is_ts7250(void) -{ - return ts72xx_model() == TS72XX_MODEL_TS7250; -} - -static inline int board_is_ts7260(void) -{ - return ts72xx_model() == TS72XX_MODEL_TS7260; -} - -static inline int board_is_ts7300(void) -{ - return ts72xx_model() == TS72XX_MODEL_TS7300; -} - -static inline int board_is_ts7400(void) -{ - return ts72xx_model() == TS72XX_MODEL_TS7400; -} - -static inline int is_max197_installed(void) -{ - return !!(__raw_readb(TS72XX_OPTIONS_VIRT_BASE) & - TS72XX_OPTIONS_MAX197); -} - -static inline int is_ts9420_installed(void) -{ - return !!(__raw_readb(TS72XX_OPTIONS2_VIRT_BASE) & - TS72XX_OPTIONS2_TS9420); -} -#endif -#endif /* __TS72XX_H_ */ diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c deleted file mode 100644 index 9471938df64c86..00000000000000 --- a/arch/arm/mach-ep93xx/vision_ep9307.c +++ /dev/null @@ -1,321 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * arch/arm/mach-ep93xx/vision_ep9307.c - * Vision Engraving Systems EP9307 SoM support. - * - * Copyright (C) 2008-2011 Vision Engraving Systems - * H Hartley Sweeten - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "hardware.h" -#include -#include -#include "gpio-ep93xx.h" - -#include -#include -#include - -#include "soc.h" - -/************************************************************************* - * Static I/O mappings for the FPGA - *************************************************************************/ -#define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE -#define VISION_VIRT_BASE 0xfebff000 - -static struct map_desc vision_io_desc[] __initdata = { - { - .virtual = VISION_VIRT_BASE, - .pfn = __phys_to_pfn(VISION_PHYS_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, -}; - -static void __init vision_map_io(void) -{ - ep93xx_map_io(); - - iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc)); -} - -/************************************************************************* - * Ethernet - *************************************************************************/ -static struct ep93xx_eth_data vision_eth_data __initdata = { - .phy_id = 1, -}; - -/************************************************************************* - * Framebuffer - *************************************************************************/ -#define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1 - -static int vision_lcd_setup(struct platform_device *pdev) -{ - int err; - - err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_INIT_HIGH, - dev_name(&pdev->dev)); - if (err) - return err; - - ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS | - EP93XX_SYSCON_DEVCFG_RASONP3 | - EP93XX_SYSCON_DEVCFG_EXVC); - - return 0; -} - -static void vision_lcd_teardown(struct platform_device *pdev) -{ - gpio_free(VISION_LCD_ENABLE); -} - -static void vision_lcd_blank(int blank_mode, struct fb_info *info) -{ - if (blank_mode) - gpio_set_value(VISION_LCD_ENABLE, 0); - else - gpio_set_value(VISION_LCD_ENABLE, 1); -} - -static struct ep93xxfb_mach_info ep93xxfb_info __initdata = { - .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING, - .setup = vision_lcd_setup, - .teardown = vision_lcd_teardown, - .blank = vision_lcd_blank, -}; - - -/************************************************************************* - * GPIO Expanders - *************************************************************************/ -#define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1) -#define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16) -#define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16) -#define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16) - -static struct pca953x_platform_data pca953x_74_gpio_data = { - .gpio_base = PCA9539_74_GPIO_BASE, - .irq_base = EP93XX_BOARD_IRQ(0), -}; - -static struct pca953x_platform_data pca953x_75_gpio_data = { - .gpio_base = PCA9539_75_GPIO_BASE, - .irq_base = -1, -}; - -static struct pca953x_platform_data pca953x_76_gpio_data = { - .gpio_base = PCA9539_76_GPIO_BASE, - .irq_base = -1, -}; - -static struct pca953x_platform_data pca953x_77_gpio_data = { - .gpio_base = PCA9539_77_GPIO_BASE, - .irq_base = -1, -}; - -/************************************************************************* - * I2C Bus - *************************************************************************/ - -static struct i2c_board_info vision_i2c_info[] __initdata = { - { - I2C_BOARD_INFO("isl1208", 0x6f), - .irq = IRQ_EP93XX_EXT1, - }, { - I2C_BOARD_INFO("pca9539", 0x74), - .platform_data = &pca953x_74_gpio_data, - }, { - I2C_BOARD_INFO("pca9539", 0x75), - .platform_data = &pca953x_75_gpio_data, - }, { - I2C_BOARD_INFO("pca9539", 0x76), - .platform_data = &pca953x_76_gpio_data, - }, { - I2C_BOARD_INFO("pca9539", 0x77), - .platform_data = &pca953x_77_gpio_data, - }, -}; - -/************************************************************************* - * SPI CS4271 Audio Codec - *************************************************************************/ -static struct cs4271_platform_data vision_cs4271_data = { - /* Intentionally left blank */ -}; - -/************************************************************************* - * SPI Flash - *************************************************************************/ -static struct mtd_partition vision_spi_flash_partitions[] = { - { - .name = "SPI bootstrap", - .offset = 0, - .size = SZ_4K, - }, { - .name = "Bootstrap config", - .offset = MTDPART_OFS_APPEND, - .size = SZ_4K, - }, { - .name = "System config", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, - }, -}; - -static struct flash_platform_data vision_spi_flash_data = { - .name = "SPI Flash", - .parts = vision_spi_flash_partitions, - .nr_parts = ARRAY_SIZE(vision_spi_flash_partitions), -}; - -/************************************************************************* - * SPI SD/MMC host - *************************************************************************/ -static struct mmc_spi_platform_data vision_spi_mmc_data = { - .detect_delay = 100, - .powerup_msecs = 100, - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .caps2 = MMC_CAP2_RO_ACTIVE_HIGH, -}; - -static struct gpiod_lookup_table vision_spi_mmc_gpio_table = { - .dev_id = "mmc_spi.2", /* "mmc_spi @ CS2 */ - .table = { - /* Card detect */ - GPIO_LOOKUP_IDX("B", 7, NULL, 0, GPIO_ACTIVE_LOW), - /* Write protect */ - GPIO_LOOKUP_IDX("F", 0, NULL, 1, GPIO_ACTIVE_HIGH), - { }, - }, -}; - -/************************************************************************* - * SPI Bus - *************************************************************************/ -static struct spi_board_info vision_spi_board_info[] __initdata = { - { - .modalias = "cs4271", - .platform_data = &vision_cs4271_data, - .max_speed_hz = 6000000, - .bus_num = 0, - .chip_select = 0, - .mode = SPI_MODE_3, - }, { - .modalias = "sst25l", - .platform_data = &vision_spi_flash_data, - .max_speed_hz = 20000000, - .bus_num = 0, - .chip_select = 1, - .mode = SPI_MODE_3, - }, { - .modalias = "mmc_spi", - .platform_data = &vision_spi_mmc_data, - .max_speed_hz = 20000000, - .bus_num = 0, - .chip_select = 2, - .mode = SPI_MODE_3, - }, -}; - -static struct gpiod_lookup_table vision_spi_cs4271_gpio_table = { - .dev_id = "spi0.0", /* cs4271 @ CS0 */ - .table = { - /* RESET */ - GPIO_LOOKUP_IDX("H", 2, NULL, 0, GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct gpiod_lookup_table vision_spi_cs_gpio_table = { - .dev_id = "spi0", - .table = { - GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW), - { }, - }, -}; - -static struct ep93xx_spi_info vision_spi_master __initdata = { - .use_dma = 1, -}; - -/************************************************************************* - * I2S Audio - *************************************************************************/ -static struct platform_device vision_audio_device = { - .name = "edb93xx-audio", - .id = -1, -}; - -static void __init vision_register_i2s(void) -{ - ep93xx_register_i2s(); - platform_device_register(&vision_audio_device); -} - -/************************************************************************* - * Machine Initialization - *************************************************************************/ -static void __init vision_init_machine(void) -{ - ep93xx_init_devices(); - ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M); - ep93xx_register_eth(&vision_eth_data, 1); - ep93xx_register_fb(&ep93xxfb_info); - ep93xx_register_pwm(1, 0); - - /* - * Request the gpio expander's interrupt gpio line now to prevent - * the kernel from doing a WARN in gpiolib:gpio_ensure_requested(). - */ - if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_DIR_IN, - "pca9539:74")) - pr_warn("cannot request interrupt gpio for pca9539:74\n"); - - vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7)); - - ep93xx_register_i2c(vision_i2c_info, - ARRAY_SIZE(vision_i2c_info)); - gpiod_add_lookup_table(&vision_spi_cs4271_gpio_table); - gpiod_add_lookup_table(&vision_spi_mmc_gpio_table); - gpiod_add_lookup_table(&vision_spi_cs_gpio_table); - ep93xx_register_spi(&vision_spi_master, vision_spi_board_info, - ARRAY_SIZE(vision_spi_board_info)); - vision_register_i2s(); -} - -MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") - /* Maintainer: H Hartley Sweeten */ - .atag_offset = 0x100, - .nr_irqs = NR_EP93XX_IRQS + EP93XX_BOARD_IRQS, - .map_io = vision_map_io, - .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, - .init_machine = vision_init_machine, - .restart = ep93xx_restart, -MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c index 9ababf4ac210b9..3feb31ab556e12 100644 --- a/arch/arm/mach-imx/mach-imx6sx.c +++ b/arch/arm/mach-imx/mach-imx6sx.c @@ -7,37 +7,15 @@ #include #include #include -#include #include #include "common.h" #include "cpuidle.h" -static void __init imx6sx_enet_clk_sel(void) -{ - struct regmap *gpr; - - gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sx-iomuxc-gpr"); - if (!IS_ERR(gpr)) { - regmap_update_bits(gpr, IOMUXC_GPR1, - IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK, 0); - regmap_update_bits(gpr, IOMUXC_GPR1, - IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK, 0); - } else { - pr_err("failed to find fsl,imx6sx-iomux-gpr regmap\n"); - } -} - -static inline void imx6sx_enet_init(void) -{ - imx6sx_enet_clk_sel(); -} - static void __init imx6sx_init_machine(void) { of_platform_default_populate(NULL, NULL, NULL); - imx6sx_enet_init(); imx_anatop_init(); imx6sx_pm_init(); } diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c index 9587885fb1ac8b..87632ae0201c86 100644 --- a/arch/arm/mach-imx/mach-imx7d.c +++ b/arch/arm/mach-imx/mach-imx7d.c @@ -48,7 +48,7 @@ static void __init imx7d_enet_clk_sel(void) } } -static inline void imx7d_enet_init(void) +static void __init imx7d_enet_init(void) { imx7d_enet_phy_init(); imx7d_enet_clk_sel(); diff --git a/arch/arm/mach-lpc32xx/Kconfig b/arch/arm/mach-lpc32xx/Kconfig index 35730d3696d029..138599545c24c6 100644 --- a/arch/arm/mach-lpc32xx/Kconfig +++ b/arch/arm/mach-lpc32xx/Kconfig @@ -8,5 +8,6 @@ config ARCH_LPC32XX select CLKSRC_LPC32XX select CPU_ARM926T select GPIOLIB + select LPC32XX_DMAMUX if AMBA_PL08X help Support for the NXP LPC32XX family of processors diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index fd5d0c8ff69586..04ad651d13a016 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -86,6 +86,9 @@ static int __init mvebu_scan_mem(unsigned long node, const char *uname, base = dt_mem_next_cell(dt_root_addr_cells, ®); size = dt_mem_next_cell(dt_root_size_cells, ®); + if (size < MVEBU_DDR_TRAINING_AREA_SZ) + pr_warn("Too little memory to reserve for DDR training\n"); + memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ); } diff --git a/arch/arm/mach-omap1/omap-dma.c b/arch/arm/mach-omap1/omap-dma.c index 9ee472f8ead12f..f091f78631d098 100644 --- a/arch/arm/mach-omap1/omap-dma.c +++ b/arch/arm/mach-omap1/omap-dma.c @@ -59,19 +59,6 @@ static struct omap_dma_dev_attr *d; static int enable_1510_mode; static u32 errata; -struct dma_link_info { - int *linked_dmach_q; - int no_of_lchs_linked; - - int q_count; - int q_tail; - int q_head; - - int chain_state; - int chain_mode; - -}; - static int dma_lch_count; static int dma_chan_count; static int omap_dma_reserve_channels; diff --git a/arch/arm/mach-omap1/pm.h b/arch/arm/mach-omap1/pm.h index d4373a5c469700..b2763fb097ea98 100644 --- a/arch/arm/mach-omap1/pm.h +++ b/arch/arm/mach-omap1/pm.h @@ -114,13 +114,9 @@ extern void omap1_pm_suspend(void); extern void omap1510_cpu_suspend(unsigned long, unsigned long); extern void omap1610_cpu_suspend(unsigned long, unsigned long); -extern void omap1510_idle_loop_suspend(void); -extern void omap1610_idle_loop_suspend(void); extern unsigned int omap1510_cpu_suspend_sz; extern unsigned int omap1610_cpu_suspend_sz; -extern unsigned int omap1510_idle_loop_suspend_sz; -extern unsigned int omap1610_idle_loop_suspend_sz; #ifdef CONFIG_OMAP_SERIAL_WAKE extern void omap_serial_wake_trigger(int enable); diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 7ad74db951f66c..f18ef45e2fe102 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -333,7 +333,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) omap_pm_ops.scu_prepare(cpu, power_state); /* - * CPU never retuns back if targeted power state is OFF mode. + * CPU never returns back if targeted power state is OFF mode. * CPU ONLINE follows normal CPU ONLINE ptah via * omap4_secondary_startup(). */ diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index fca7869c8075a3..800980057373b1 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -315,7 +315,7 @@ static struct omap_device *omap_device_alloc(struct platform_device *pdev, od->hwmods_cnt = oh_cnt; - hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL); + hwmods = kmemdup_array(ohs, oh_cnt, sizeof(*hwmods), GFP_KERNEL); if (!hwmods) goto oda_exit2; diff --git a/arch/arm/mach-orion5x/board-mss2.c b/arch/arm/mach-orion5x/board-mss2.c index b0f16d223adf58..9e3d69891d2f6a 100644 --- a/arch/arm/mach-orion5x/board-mss2.c +++ b/arch/arm/mach-orion5x/board-mss2.c @@ -82,5 +82,5 @@ static void mss2_power_off(void) void __init mss2_init(void) { /* register mss2 specific power-off method */ - pm_power_off = mss2_power_off; + register_platform_power_off(mss2_power_off); } diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index 062109efa0ecc6..fcd38ff7ca459c 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -700,7 +700,7 @@ static void __init dns323_init(void) if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); - pm_power_off = dns323a_power_off; + register_platform_power_off(dns323a_power_off); break; case DNS323_REV_B1: /* 5182 built-in SATA init */ @@ -717,7 +717,7 @@ static void __init dns323_init(void) if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); - pm_power_off = dns323b_power_off; + register_platform_power_off(dns323b_power_off); break; case DNS323_REV_C1: /* 5182 built-in SATA init */ @@ -727,7 +727,7 @@ static void __init dns323_init(void) if (gpio_request(DNS323C_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323C_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); - pm_power_off = dns323c_power_off; + register_platform_power_off(dns323c_power_off); /* Now, -this- should theoretically be done by the sata_mv driver * once I figure out what's going on there. Maybe the behaviour diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c index acba0661808010..339b10891808fa 100644 --- a/arch/arm/mach-orion5x/kurobox_pro-setup.c +++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c @@ -373,7 +373,7 @@ static void __init kurobox_pro_init(void) i2c_register_board_info(0, &kurobox_pro_i2c_rtc, 1); /* register Kurobox Pro specific power-off method */ - pm_power_off = kurobox_pro_power_off; + register_platform_power_off(kurobox_pro_power_off); } #ifdef CONFIG_MACH_KUROBOX_PRO diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c index b7327a61283531..5b0249f109cde8 100644 --- a/arch/arm/mach-orion5x/mv2120-setup.c +++ b/arch/arm/mach-orion5x/mv2120-setup.c @@ -238,7 +238,7 @@ static void __init mv2120_init(void) if (gpio_request(MV2120_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(MV2120_GPIO_POWER_OFF, 1) != 0) pr_err("mv2120: failed to setup power-off GPIO\n"); - pm_power_off = mv2120_power_off; + register_platform_power_off(mv2120_power_off); } /* Warning: HP uses a wrong mach-type (=526) in their bootloader */ diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c index 6ad9740b426b63..4afd9b4c71a949 100644 --- a/arch/arm/mach-orion5x/net2big-setup.c +++ b/arch/arm/mach-orion5x/net2big-setup.c @@ -423,7 +423,7 @@ static void __init net2big_init(void) if (gpio_request(NET2BIG_GPIO_POWER_OFF, "power-off") == 0 && gpio_direction_output(NET2BIG_GPIO_POWER_OFF, 0) == 0) - pm_power_off = net2big_power_off; + register_platform_power_off(net2big_power_off); else pr_err("net2big: failed to configure power-off GPIO\n"); diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c index 23a5521c683368..a9f01859d10121 100644 --- a/arch/arm/mach-orion5x/terastation_pro2-setup.c +++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c @@ -349,7 +349,7 @@ static void __init tsp2_init(void) i2c_register_board_info(0, &tsp2_i2c_rtc, 1); /* register Terastation Pro II specific power-off method */ - pm_power_off = tsp2_power_off; + register_platform_power_off(tsp2_power_off); } MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live") diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c index bab8ba0e01ab96..de9092e992c564 100644 --- a/arch/arm/mach-orion5x/ts209-setup.c +++ b/arch/arm/mach-orion5x/ts209-setup.c @@ -314,7 +314,7 @@ static void __init qnap_ts209_init(void) i2c_register_board_info(0, &qnap_ts209_i2c_rtc, 1); /* register tsx09 specific power-off method */ - pm_power_off = qnap_tsx09_power_off; + register_platform_power_off(qnap_tsx09_power_off); } MACHINE_START(TS209, "QNAP TS-109/TS-209") diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c index 8131982c10d97e..725688aa5cba7e 100644 --- a/arch/arm/mach-orion5x/ts409-setup.c +++ b/arch/arm/mach-orion5x/ts409-setup.c @@ -312,7 +312,7 @@ static void __init qnap_ts409_init(void) platform_device_register(&ts409_leds); /* register tsx09 specific power-off method */ - pm_power_off = qnap_tsx09_power_off; + register_platform_power_off(qnap_tsx09_power_off); } MACHINE_START(TS409, "QNAP TS-409") diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 452bf7aac1fadf..33533e35720f88 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -378,38 +378,56 @@ static const uint32_t spitz_keymap[] = { KEY(6, 8, KEY_RIGHT), }; -static const struct matrix_keymap_data spitz_keymap_data = { - .keymap = spitz_keymap, - .keymap_size = ARRAY_SIZE(spitz_keymap), +static const struct software_node_ref_args spitz_mkp_row_gpios[] = { + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 12, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 17, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 91, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 34, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 36, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 38, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 39, GPIO_ACTIVE_HIGH), }; -static const uint32_t spitz_row_gpios[] = - { 12, 17, 91, 34, 36, 38, 39 }; -static const uint32_t spitz_col_gpios[] = - { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 }; - -static struct matrix_keypad_platform_data spitz_mkp_pdata = { - .keymap_data = &spitz_keymap_data, - .row_gpios = spitz_row_gpios, - .col_gpios = spitz_col_gpios, - .num_row_gpios = ARRAY_SIZE(spitz_row_gpios), - .num_col_gpios = ARRAY_SIZE(spitz_col_gpios), - .col_scan_delay_us = 10, - .debounce_ms = 10, - .wakeup = 1, +static const struct software_node_ref_args spitz_mkp_col_gpios[] = { + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 88, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 23, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 24, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 25, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 26, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 27, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 52, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 103, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 107, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 108, GPIO_ACTIVE_HIGH), + SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 114, GPIO_ACTIVE_HIGH), }; -static struct platform_device spitz_mkp_device = { +static const struct property_entry spitz_mkp_properties[] = { + PROPERTY_ENTRY_U32_ARRAY("linux,keymap", spitz_keymap), + PROPERTY_ENTRY_REF_ARRAY("row-gpios", spitz_mkp_row_gpios), + PROPERTY_ENTRY_REF_ARRAY("col-gpios", spitz_mkp_col_gpios), + PROPERTY_ENTRY_U32("col-scan-delay-us", 10), + PROPERTY_ENTRY_U32("debounce-delay-ms", 10), + PROPERTY_ENTRY_BOOL("wakeup-source"), + { } +}; + +static const struct platform_device_info spitz_mkp_info __initconst = { .name = "matrix-keypad", - .id = -1, - .dev = { - .platform_data = &spitz_mkp_pdata, - }, + .id = PLATFORM_DEVID_NONE, + .properties = spitz_mkp_properties, }; + static void __init spitz_mkp_init(void) { - platform_device_register(&spitz_mkp_device); + struct platform_device *pd; + int err; + + pd = platform_device_register_full(&spitz_mkp_info); + err = PTR_ERR_OR_ZERO(pd); + if (err) + pr_err("failed to create keypad device: %d\n", err); } #else static inline void spitz_mkp_init(void) {} @@ -419,45 +437,82 @@ static inline void spitz_mkp_init(void) {} * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) -static struct gpio_keys_button spitz_gpio_keys[] = { - { - .type = EV_PWR, - .code = KEY_SUSPEND, - .gpio = SPITZ_GPIO_ON_KEY, - .desc = "On Off", - .wakeup = 1, - }, - /* Two buttons detecting the lid state */ - { - .type = EV_SW, - .code = 0, - .gpio = SPITZ_GPIO_SWA, - .desc = "Display Down", - }, - { - .type = EV_SW, - .code = 1, - .gpio = SPITZ_GPIO_SWB, - .desc = "Lid Closed", - }, +static const struct software_node spitz_gpio_keys_node = { + .name = "spitz-gpio-keys", }; -static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = { - .buttons = spitz_gpio_keys, - .nbuttons = ARRAY_SIZE(spitz_gpio_keys), +static const struct property_entry spitz_suspend_key_props[] = { + PROPERTY_ENTRY_U32("linux,input-type", EV_PWR), + PROPERTY_ENTRY_U32("linux,code", KEY_SUSPEND), + PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node, + SPITZ_GPIO_ON_KEY, GPIO_ACTIVE_HIGH), + PROPERTY_ENTRY_STRING("label", "On Off"), + PROPERTY_ENTRY_BOOL("wakeup-source"), + { } }; -static struct platform_device spitz_gpio_keys_device = { - .name = "gpio-keys", - .id = -1, - .dev = { - .platform_data = &spitz_gpio_keys_platform_data, - }, +static const struct software_node spitz_suspend_key_node = { + .parent = &spitz_gpio_keys_node, + .properties = spitz_suspend_key_props, +}; + +static const struct property_entry spitz_sw1_props[] = { + PROPERTY_ENTRY_U32("linux,input-type", EV_SW), + PROPERTY_ENTRY_U32("linux,code", 0), + PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node, + SPITZ_GPIO_SWA, GPIO_ACTIVE_HIGH), + PROPERTY_ENTRY_STRING("label", "Display Down"), + { } +}; + +static const struct software_node spitz_sw1_node = { + .parent = &spitz_gpio_keys_node, + .properties = spitz_sw1_props, +}; + +static const struct property_entry spitz_sw2_props[] = { + PROPERTY_ENTRY_U32("linux,input-type", EV_SW), + PROPERTY_ENTRY_U32("linux,code", 1), + PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node, + SPITZ_GPIO_SWB, GPIO_ACTIVE_HIGH), + PROPERTY_ENTRY_STRING("label", "Lid Closed"), + { } +}; + +static const struct software_node spitz_sw2_node = { + .parent = &spitz_gpio_keys_node, + .properties = spitz_sw2_props, +}; + +static const struct software_node *spitz_gpio_keys_swnodes[] = { + &spitz_gpio_keys_node, + &spitz_suspend_key_node, + &spitz_sw1_node, + &spitz_sw2_node, + NULL }; static void __init spitz_keys_init(void) { - platform_device_register(&spitz_gpio_keys_device); + struct platform_device_info keys_info = { + .name = "gpio-keys", + .id = PLATFORM_DEVID_NONE, + }; + struct platform_device *pd; + int err; + + err = software_node_register_node_group(spitz_gpio_keys_swnodes); + if (err) { + pr_err("failed to register gpio-keys software nodes: %d\n", err); + return; + } + + keys_info.fwnode = software_node_fwnode(&spitz_gpio_keys_node); + + pd = platform_device_register_full(&keys_info); + err = PTR_ERR_OR_ZERO(pd); + if (err) + pr_err("failed to create gpio-keys device: %d\n", err); } #else static inline void spitz_keys_init(void) {} diff --git a/arch/arm/mach-s3c/irq-uart-s3c64xx.h b/arch/arm/mach-s3c/irq-uart-s3c64xx.h index 78eccdce95a719..e754b0359c8a98 100644 --- a/arch/arm/mach-s3c/irq-uart-s3c64xx.h +++ b/arch/arm/mach-s3c/irq-uart-s3c64xx.h @@ -12,5 +12,3 @@ struct s3c_uart_irq { unsigned int parent_irq; }; -extern void s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs); - diff --git a/arch/arm/mach-s3c/mach-crag6410-module.c b/arch/arm/mach-s3c/mach-crag6410-module.c index 2de1a89f6e9976..4ffcf024b09df0 100644 --- a/arch/arm/mach-s3c/mach-crag6410-module.c +++ b/arch/arm/mach-s3c/mach-crag6410-module.c @@ -446,7 +446,7 @@ static int wlf_gf_module_probe(struct i2c_client *i2c) } static const struct i2c_device_id wlf_gf_module_id[] = { - { "wlf-gf-module", 0 }, + { "wlf-gf-module" }, { } }; diff --git a/arch/arm/mach-s3c/pm.h b/arch/arm/mach-s3c/pm.h index 35d266ab69583c..d48d1508852526 100644 --- a/arch/arm/mach-s3c/pm.h +++ b/arch/arm/mach-s3c/pm.h @@ -48,8 +48,6 @@ extern unsigned long s3c_pm_flags; /* from sleep.S */ -extern int s3c2410_cpu_suspend(unsigned long); - #ifdef CONFIG_PM_SLEEP extern int s3c_irq_wake(struct irq_data *data, unsigned int state); extern void s3c_cpu_resume(void); diff --git a/arch/arm/mach-s3c/s3c64xx.h b/arch/arm/mach-s3c/s3c64xx.h index 92258e4f60f609..0505728f3f7b31 100644 --- a/arch/arm/mach-s3c/s3c64xx.h +++ b/arch/arm/mach-s3c/s3c64xx.h @@ -23,17 +23,6 @@ struct device_node; void s3c64xx_set_xtal_freq(unsigned long freq); void s3c64xx_set_xusbxti_freq(unsigned long freq); -#ifdef CONFIG_CPU_S3C6400 - -extern int s3c6400_init(void); -extern void s3c6400_init_irq(void); -extern void s3c6400_map_io(void); - -#else -#define s3c6400_map_io NULL -#define s3c6400_init NULL -#endif - #ifdef CONFIG_CPU_S3C6410 extern int s3c6410_init(void); diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c index 6965a1de727b07..d38b2e174257e8 100644 --- a/arch/arm/mach-versatile/platsmp-realview.c +++ b/arch/arm/mach-versatile/platsmp-realview.c @@ -70,6 +70,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) return; } map = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(map)) { pr_err("PLATSMP: No syscon regmap\n"); return; diff --git a/arch/arm/mach-versatile/spc.c b/arch/arm/mach-versatile/spc.c index 5e44170e1a9afb..790092734cf615 100644 --- a/arch/arm/mach-versatile/spc.c +++ b/arch/arm/mach-versatile/spc.c @@ -73,7 +73,7 @@ /* * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS - * operation, the operation could start just before jiffie is about + * operation, the operation could start just before jiffy is about * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz */ #define TIMEOUT_US 20000 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index f8dd0b3cc8e040..3c6ddb1afdc463 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include "fault.h" diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5adf1769eee424..88c2d68a69c9ee 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1532,7 +1532,7 @@ static const struct dma_map_ops iommu_ops = { /** * arm_iommu_create_mapping - * @bus: pointer to the bus holding the client device (for IOMMU calls) + * @dev: pointer to the client device (for IOMMU calls) * @base: start address of the valid IO address space * @size: maximum size of the valid IO address space * @@ -1544,7 +1544,7 @@ static const struct dma_map_ops iommu_ops = { * arm_iommu_attach_device function. */ struct dma_iommu_mapping * -arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size) +arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size) { unsigned int bits = size >> PAGE_SHIFT; unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); @@ -1585,9 +1585,11 @@ arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size) spin_lock_init(&mapping->lock); - mapping->domain = iommu_domain_alloc(bus); - if (!mapping->domain) + mapping->domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(mapping->domain)) { + err = PTR_ERR(mapping->domain); goto err4; + } kref_init(&mapping->kref); return mapping; @@ -1718,7 +1720,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev) dma_base = dma_range_map_min(dev->dma_range_map); size = dma_range_map_max(dev->dma_range_map) - dma_base; } - mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); + mapping = arm_iommu_create_mapping(dev, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", size, dev_name(dev)); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 2286c2ea60ec48..831793cd6ff944 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -61,7 +61,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } -#if USE_SPLIT_PTE_PTLOCKS +#if defined(CONFIG_SPLIT_PTE_PTLOCKS) /* * If we are using split PTE locks, then we need to take the page * lock here. Otherwise we are using shared mm->page_table_lock @@ -80,10 +80,10 @@ static inline void do_pte_unlock(spinlock_t *ptl) { spin_unlock(ptl); } -#else /* !USE_SPLIT_PTE_PTLOCKS */ +#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */ static inline void do_pte_lock(spinlock_t *ptl) {} static inline void do_pte_unlock(spinlock_t *ptl) {} -#endif /* USE_SPLIT_PTE_PTLOCKS */ +#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index d65d0e6ed10ab4..3dbb383c26d543 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -28,7 +28,8 @@ */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -78,8 +79,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long len, const unsigned long pgoff, + const unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3f774856ca6763..f85c177cdf8d2c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1638,7 +1638,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc) { pgtables_remap *lpae_pgtables_remap; unsigned long pa_pgd; - unsigned int cr, ttbcr; + u32 cr, ttbcr, tmp; long long offset; if (!mdesc->pv_fixup) @@ -1688,7 +1688,9 @@ static void __init early_paging_init(const struct machine_desc *mdesc) cr = get_cr(); set_cr(cr & ~(CR_I | CR_C)); ttbcr = cpu_get_ttbcr(); - cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10)); + /* Disable all kind of caching of the translation table */ + tmp = ttbcr & ~(TTBCR_ORGN0_MASK | TTBCR_IRGN0_MASK); + cpu_set_ttbcr(tmp); flush_cache_all(); /* diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h index 3c7938fd40aad6..32090b0fb250b8 100644 --- a/arch/arm/vfp/vfpinstr.h +++ b/arch/arm/vfp/vfpinstr.h @@ -64,33 +64,37 @@ #ifdef CONFIG_AS_VFP_VMRS_FPINST -#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm(".fpu vfpv2\n" \ - "vmrs %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm(".fpu vfpv2\n" \ - "vmsr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile (".fpu vfpv2\n" \ + "vmrs %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile (".fpu vfpv2\n" \ + "vmsr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +}) #else #define vfpreg(_vfp_) #_vfp_ -#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmrx %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmxr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +}) #endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a2f8ff354ca670..fd9df6dcc59372 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -24,6 +24,7 @@ config ARM64 select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE + select ARCH_HAS_DMA_OPS if XEN select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_FAST_MULTIPLIER @@ -34,6 +35,7 @@ config ARM64 select ARCH_HAS_KERNEL_FPU_SUPPORT if KERNEL_MODE_NEON select ARCH_HAS_KEEPINITRD select ARCH_HAS_MEMBARRIER_SYNC_CORE + select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PTE_DEVMAP @@ -99,6 +101,8 @@ config ARM64 select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_SUPPORTS_PER_VMA_LOCK + select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE + select ARCH_SUPPORTS_RT select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT @@ -196,7 +200,8 @@ config ARM64 select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ - if $(cc-option,-fpatchable-function-entry=2) + if (GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS || \ + CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS) select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \ if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ @@ -231,7 +236,7 @@ config ARM64 select HAVE_FUNCTION_ARG_ACCESS_API select MMU_GATHER_RCU_TABLE_FREE select HAVE_RSEQ - select HAVE_RUST if CPU_LITTLE_ENDIAN + select HAVE_RUST if RUSTC_SUPPORTS_ARM64 select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES @@ -262,19 +267,30 @@ config ARM64 select TRACE_IRQFLAGS_NMI_SUPPORT select HAVE_SOFTIRQ_ON_OWN_STACK select USER_STACKTRACE_SUPPORT + select VDSO_GETRANDOM help ARM 64-bit (AArch64) Linux support. +config RUSTC_SUPPORTS_ARM64 + def_bool y + depends on CPU_LITTLE_ENDIAN + # Shadow call stack is only supported on certain rustc versions. + # + # When using the UNWIND_PATCH_PAC_INTO_SCS option, rustc version 1.80+ is + # required due to use of the -Zfixed-x18 flag. + # + # Otherwise, rustc version 1.82+ is required due to use of the + # -Zsanitizer=shadow-call-stack flag. + depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 || RUSTC_VERSION >= 108000 && UNWIND_PATCH_PAC_INTO_SCS + config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS def_bool CC_IS_CLANG # https://github.com/ClangBuiltLinux/linux/issues/1507 depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600)) - select HAVE_DYNAMIC_FTRACE_WITH_ARGS config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS def_bool CC_IS_GCC depends on $(cc-option,-fpatchable-function-entry=2) - select HAVE_DYNAMIC_FTRACE_WITH_ARGS config 64BIT def_bool y @@ -423,7 +439,7 @@ config AMPERE_ERRATUM_AC03_CPU_38 default y help This option adds an alternative code sequence to work around Ampere - erratum AC03_CPU_38 on AmpereOne. + errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne. The affected design reports FEAT_HAFDBS as not implemented in ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 @@ -1080,6 +1096,7 @@ config ARM64_ERRATUM_3194386 * ARM Cortex-A78C erratum 3324346 * ARM Cortex-A78C erratum 3324347 * ARM Cortex-A710 erratam 3324338 + * ARM Cortex-A715 errartum 3456084 * ARM Cortex-A720 erratum 3456091 * ARM Cortex-A725 erratum 3456106 * ARM Cortex-X1 erratum 3324344 @@ -1090,6 +1107,7 @@ config ARM64_ERRATUM_3194386 * ARM Cortex-X925 erratum 3324334 * ARM Neoverse-N1 erratum 3324349 * ARM Neoverse N2 erratum 3324339 + * ARM Neoverse-N3 erratum 3456111 * ARM Neoverse-V1 erratum 3324341 * ARM Neoverse V2 erratum 3324336 * ARM Neoverse-V3 erratum 3312417 @@ -2100,7 +2118,8 @@ config ARM64_MTE depends on ARM64_PAN select ARCH_HAS_SUBPAGE_FAULTS select ARCH_USES_HIGH_VMA_FLAGS - select ARCH_USES_PG_ARCH_X + select ARCH_USES_PG_ARCH_2 + select ARCH_USES_PG_ARCH_3 help Memory Tagging (part of the ARMv8.5 Extensions) provides architectural support for run-time, always-on detection of @@ -2137,6 +2156,29 @@ config ARM64_EPAN if the cpu does not implement the feature. endmenu # "ARMv8.7 architectural features" +menu "ARMv8.9 architectural features" + +config ARM64_POE + prompt "Permission Overlay Extension" + def_bool y + select ARCH_USES_HIGH_VMA_FLAGS + select ARCH_HAS_PKEYS + help + The Permission Overlay Extension is used to implement Memory + Protection Keys. Memory Protection Keys provides a mechanism for + enforcing page-based protections, but without requiring modification + of the page tables when an application changes protection domains. + + For details, see Documentation/core-api/protection-keys.rst + + If unsure, say y. + +config ARCH_PKEY_BITS + int + default 3 + +endmenu # "ARMv8.9 architectural features" + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f6bc3da1ef110e..9efd3f37c2fd9d 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,7 +10,7 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=--no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour @@ -57,9 +57,11 @@ KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) ifneq ($(CONFIG_UNWIND_TABLES),y) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +KBUILD_RUSTFLAGS += -Cforce-unwind-tables=n else KBUILD_CFLAGS += -fasynchronous-unwind-tables KBUILD_AFLAGS += -fasynchronous-unwind-tables +KBUILD_RUSTFLAGS += -Cforce-unwind-tables=y -Zuse-sync-unwind=n endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) @@ -114,6 +116,7 @@ endif ifeq ($(CONFIG_SHADOW_CALL_STACK), y) KBUILD_CFLAGS += -ffixed-x18 +KBUILD_RUSTFLAGS += -Zfixed-x18 endif ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 607a67a649c463..b5a08333bc57b5 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -17,7 +17,7 @@ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo \ - Image.zst image.fit + Image.zst Image.xz image.fit $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) @@ -40,6 +40,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE $(obj)/Image.zst: $(obj)/Image FORCE $(call if_changed,zstd) +$(obj)/Image.xz: $(obj)/Image FORCE + $(call if_changed,xzkern) + $(obj)/image.fit: $(obj)/Image $(obj)/dts/dtbs-list FORCE $(call if_changed,fit) diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile index 0db7b60b49a196..00bed412ee31c5 100644 --- a/arch/arm64/boot/dts/allwinner/Makefile +++ b/arch/arm64/boot/dts/allwinner/Makefile @@ -49,5 +49,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-2024.dtb -dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-h.dtb +dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-plus.dtb +dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h700-anbernic-rg35xx-sp.dtb diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index e868ca5ae753a9..a5c3920e0f048e 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -263,6 +263,14 @@ gpu0_thermal: gpu0-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&ths 1>; + + trips { + gpu0_crit: gpu0-crit { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; gpu1_thermal: gpu1-thermal { @@ -270,6 +278,14 @@ gpu1_thermal: gpu1-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&ths 2>; + + trips { + gpu1_crit: gpu1-crit { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts index b69032c4455754..526443bb736c36 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts @@ -45,16 +45,40 @@ reg_gmac_3v3: gmac-3v3 { startup-delay-us = <100000>; enable-active-high; gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>; + vin-supply = <®_vcc3v3>; + }; + + reg_gmac_2v5: gmac-2v5 { + /* 2V5 supply for GMAC PHY IO */ + compatible = "regulator-fixed"; + regulator-name = "gmac-2v5"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-always-on; + vin-supply = <®_vcc3v3>; + }; + + reg_vcc5v: regulator-vcc5v { + /* board 5V supply from micro USB or pin headers */ + compatible = "regulator-fixed"; + regulator-name = "vcc-5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; }; reg_vcc3v3: vcc3v3 { + /* board 3V3 supply by SY8089A */ compatible = "regulator-fixed"; regulator-name = "vcc3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + regulator-always-on; + vin-supply = <®_vcc5v>; }; vdd_cpux: gpio-regulator { + /* cpu voltage regulator MP2143DJ */ compatible = "regulator-gpio"; regulator-name = "vdd-cpux"; regulator-type = "voltage"; @@ -66,6 +90,7 @@ vdd_cpux: gpio-regulator { gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; gpios-states = <0x1>; states = <1100000 0>, <1300000 1>; + vin-supply = <®_vcc5v>; }; wifi_pwrseq: pwrseq { @@ -146,6 +171,18 @@ &ohci3 { status = "okay"; }; +&pio { + vcc-pa-supply = <®_vcc3v3>; + vcc-pc-supply = <®_vcc3v3>; + vcc-pd-supply = <®_gmac_2v5>; + vcc-pf-supply = <®_vcc3v3>; + vcc-pg-supply = <®_vcc3v3>; +}; + +&r_pio { + vcc-pl-supply = <®_vcc3v3>; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pa_pins>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi index b29ce7321317b6..e88c1fbac6acc8 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h616.dtsi @@ -914,6 +914,8 @@ r_i2c: i2c@7081400 { dmas = <&dma 48>, <&dma 48>; dma-names = "rx", "tx"; resets = <&r_ccu RST_R_APB2_I2C>; + pinctrl-names = "default"; + pinctrl-0 = <&r_i2c_pins>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts index afb49e65859f98..80ccab7b5ba750 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts @@ -21,6 +21,12 @@ aliases { serial0 = &uart0; }; + battery: battery { + compatible = "simple-battery"; + constant-charge-current-max-microamp = <1024000>; + voltage-max-design-microvolt = <4200000>; + }; + chosen { stdout-path = "serial0:115200n8"; }; @@ -201,12 +207,12 @@ &pio { vcc-pi-supply = <®_cldo3>; }; -&r_rsb { +&r_i2c { status = "okay"; - axp717: pmic@3a3 { + axp717: pmic@34 { compatible = "x-powers,axp717"; - reg = <0x3a3>; + reg = <0x34>; interrupt-controller; #interrupt-cells = <1>; interrupt-parent = <&nmi_intc>; @@ -217,6 +223,16 @@ axp717: pmic@3a3 { vin3-supply = <®_vcc5v>; vin4-supply = <®_vcc5v>; + axp_adc: adc { + compatible = "x-powers,axp717-adc"; + #io-channel-cells = <1>; + }; + + battery_power: battery-power { + compatible = "x-powers,axp717-battery-power-supply"; + monitored-battery = <&battery>; + }; + regulators { reg_dcdc1: dcdc1 { regulator-always-on; @@ -307,6 +323,11 @@ reg_cpusldo: cpusldo { /* unused */ }; }; + + usb_power: usb-power { + compatible = "x-powers,axp717-usb-power-supply"; + input-current-limit-microamp = <1500000>; + }; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts new file mode 100644 index 00000000000000..0cf16dc903cd6a --- /dev/null +++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-sp.dts @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Copyright (C) 2024 Ryan Walklin . + * Copyright (C) 2024 Chris Morgan . + */ + +#include +#include "sun50i-h700-anbernic-rg35xx-plus.dts" + +/ { + model = "Anbernic RG35XX SP"; + compatible = "anbernic,rg35xx-sp", "allwinner,sun50i-h700"; + + gpio-keys-lid { + compatible = "gpio-keys"; + + lid-switch { + label = "Lid Switch"; + gpios = <&pio 4 7 GPIO_ACTIVE_LOW>; /* PE7 */ + linux,can-disable; + linux,code = ; + linux,input-type = ; + wakeup-event-action = ; + wakeup-source; + }; + }; +}; + +&r_i2c { + rtc_ext: rtc@51 { + compatible = "nxp,pcf8563"; + reg = <0x51>; + }; +}; diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile index 29417f04f88668..2fbda8419c65a3 100644 --- a/arch/arm64/boot/dts/amlogic/Makefile +++ b/arch/arm64/boot/dts/amlogic/Makefile @@ -2,6 +2,7 @@ dtb-$(CONFIG_ARCH_MESON) += amlogic-a4-a113l2-ba400.dtb dtb-$(CONFIG_ARCH_MESON) += amlogic-a5-a113x2-av400.dtb dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c302x-aw409.dtb +dtb-$(CONFIG_ARCH_MESON) += amlogic-c3-c308l-aw419.dtb dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-an400.dtb dtb-$(CONFIG_ARCH_MESON) += amlogic-t7-a311d2-khadas-vim4.dtb dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi index b6106ad4a07261..54d7a2d56ef649 100644 --- a/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/amlogic-a4-common.dtsi @@ -52,6 +52,12 @@ apb: bus@fe000000 { #size-cells = <2>; ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>; + watchdog@2100 { + compatible = "amlogic,a4-wdt", "amlogic,t7-wdt"; + reg = <0x0 0x2100 0x0 0x10>; + clocks = <&xtal>; + }; + uart_b: serial@7a000 { compatible = "amlogic,a4-uart", "amlogic,meson-s4-uart"; @@ -61,6 +67,14 @@ uart_b: serial@7a000 { clock-names = "xtal", "pclk", "baud"; status = "disabled"; }; + + sec_ao: ao-secure@10220 { + compatible = "amlogic,a4-ao-secure", + "amlogic,meson-gx-ao-secure", + "syscon"; + reg = <0x0 0x10220 0x0 0x140>; + amlogic,has-chip-id; + }; }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi index 43f68a7da2f742..17a6316de8918f 100644 --- a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi +++ b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi @@ -4,6 +4,7 @@ */ #include "amlogic-a4-common.dtsi" +#include / { cpus { #address-cells = <2>; @@ -37,4 +38,13 @@ cpu3: cpu@300 { enable-method = "psci"; }; }; + + sm: secure-monitor { + compatible = "amlogic,meson-gxbb-sm"; + + pwrc: power-controller { + compatible = "amlogic,a5-pwrc"; + #power-domain-cells = <1>; + }; + }; }; diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts b/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts index edce8850b33860..a6736ad2a648e9 100644 --- a/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts +++ b/arch/arm64/boot/dts/amlogic/amlogic-c3-c302x-aw409.dts @@ -16,14 +16,245 @@ / { aliases { serial0 = &uart_b; + spi0 = &spifc; }; memory@0 { device_type = "memory"; reg = <0x0 0x0 0x0 0x10000000>; }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* 9 MiB reserved for ARM Trusted Firmware */ + secmon_reserved: secmon@7f00000 { + compatible = "shared-dma-pool"; + reg = <0x0 0x07f00000 0x0 0x900000>; + no-map; + }; + }; + + main_12v: regulator-main-12v { + compatible = "regulator-fixed"; + regulator-name = "12V"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_5v: regulator-vcc-5v { + compatible = "regulator-fixed"; + regulator-name = "VCC5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddq: regulator-vddq { + compatible = "regulator-fixed"; + regulator-name = "VDDQ"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddao_3v3: regulator-vddao-3v3 { + compatible = "regulator-fixed"; + regulator-name = "VDDAO3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddao_1v8: regulator-vddao-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VDDAO1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + ddr4_2v5: regulator-ddr4-2v5 { + compatible = "regulator-fixed"; + regulator-name = "DDR4_2V5"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_3v3: regulator-vcc-3v3 { + compatible = "regulator-fixed"; + regulator-name = "VCC3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_1v8: regulator-vcc-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VCC1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vdd_1v8: regulator-vdd-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VDD1V8_BOOT"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vddio_b: regulator-vddio-3v3-b { + compatible = "regulator-fixed"; + regulator-name = "VDDIO_B"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + sdcard: regulator-sdcard { + compatible = "regulator-fixed"; + regulator-name = "SDCARD_POWER"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vddao_3v3>; + gpio = <&gpio GPIOA_4 GPIO_ACTIVE_LOW>; + regulator-boot-on; + regulator-always-on; + }; }; &uart_b { status = "okay"; }; + +&nand { + status = "okay"; + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + nand@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + nand-on-flash-bbt; + + partition@0 { + label = "boot"; + reg = <0x0 0x00200000>; + }; + partition@200000 { + label = "env"; + reg = <0x00200000 0x00400000>; + }; + partition@600000 { + label = "system"; + reg = <0x00600000 0x00a00000>; + }; + partition@1000000 { + label = "rootfs"; + reg = <0x01000000 0x03000000>; + }; + partition@4000000 { + label = "media"; + reg = <0x04000000 0x8000000>; + }; + }; +}; + +ðmac { + status = "okay"; + phy-handle = <&internal_ephy>; + phy-mode = "rmii"; +}; + +&spifc { + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&spifc_pins>; + pinctrl-names = "default"; + + nand@0 { + compatible = "spi-nand"; + reg = <0>; + spi-max-frequency = <83000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + status = "disabled"; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "boot"; + reg = <0 0x200000>; + }; + + partition@200000 { + label = "env"; + reg = <0x200000 0x400000>; + }; + + partition@600000 { + label = "system"; + reg = <0x600000 0xa00000>; + }; + + partition@1000000 { + label = "rootfs"; + reg = <0x1000000 0x3000000>; + }; + + partition@4000000 { + label = "data"; + reg = <0x4000000 0x8000000>; + }; + }; + }; +}; + +&sd { + status = "okay"; + pinctrl-0 = <&sdcard_pins>; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default","clk-gate"; + + bus-width = <4>; + cap-sd-highspeed; + max-frequency = <50000000>; + disable-wp; + + cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>; + vmmc-supply = <&sdcard>; + vqmmc-supply = <&sdcard>; +}; diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts b/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts new file mode 100644 index 00000000000000..45f8631f9feb6e --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/amlogic-c3-c308l-aw419.dts @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2024 Amlogic, Inc. All rights reserved. + */ + +/dts-v1/; + +#include "amlogic-c3.dtsi" + +/ { + model = "Amlogic C308l aw419 Development Board"; + compatible = "amlogic,aw419", "amlogic,c3"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + serial0 = &uart_b; + spi0 = &spifc; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x80000000>; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* 9 MiB reserved for ARM Trusted Firmware */ + secmon_reserved: secmon@7f00000 { + compatible = "shared-dma-pool"; + reg = <0x0 0x07f00000 0x0 0x900000>; + no-map; + }; + }; + + main_12v: regulator-main-12v { + compatible = "regulator-fixed"; + regulator-name = "12V"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_5v: regulator-vcc-5v { + compatible = "regulator-fixed"; + regulator-name = "VCC5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddq: regulator-vddq { + compatible = "regulator-fixed"; + regulator-name = "VDDQ"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddao_3v3: regulator-vddao-3v3 { + compatible = "regulator-fixed"; + regulator-name = "VDDAO3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&main_12v>; + regulator-boot-on; + regulator-always-on; + }; + + vddao_1v8: regulator-vddao-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VDDAO1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + ddr4_2v5: regulator-ddr4-2v5 { + compatible = "regulator-fixed"; + regulator-name = "DDR4_2V5"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_3v3: regulator-vcc-3v3 { + compatible = "regulator-fixed"; + regulator-name = "VCC3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vddao_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vcc_1v8: regulator-vcc-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VCC1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vdd_1v8: regulator-vdd-1v8 { + compatible = "regulator-fixed"; + regulator-name = "VDD1V8_BOOT"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + vddio_b: regulator-vddio-3v3-b { + compatible = "regulator-fixed"; + regulator-name = "VDDIO_B"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3>; + regulator-boot-on; + regulator-always-on; + }; + + sdcard: regulator-sdcard { + compatible = "regulator-fixed"; + regulator-name = "SDCARD_POWER"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vddao_3v3>; + gpio = <&gpio GPIOA_4 GPIO_ACTIVE_LOW>; + regulator-boot-on; + regulator-always-on; + }; +}; + +&uart_b { + status = "okay"; +}; + +&nand { + status = "okay"; + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + nand@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + nand-on-flash-bbt; + + partition@0 { + label = "boot"; + reg = <0x0 0x00200000>; + }; + partition@200000 { + label = "env"; + reg = <0x00200000 0x00400000>; + }; + partition@600000 { + label = "system"; + reg = <0x00600000 0x00a00000>; + }; + partition@1000000 { + label = "rootfs"; + reg = <0x01000000 0x03000000>; + }; + partition@4000000 { + label = "media"; + reg = <0x04000000 0x8000000>; + }; + }; +}; + +ðmac { + status = "okay"; + phy-handle = <&internal_ephy>; + phy-mode = "rmii"; +}; + +&spifc { + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&spifc_pins>; + pinctrl-names = "default"; + + nand@0 { + compatible = "spi-nand"; + reg = <0>; + spi-max-frequency = <83000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + status = "disabled"; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "boot"; + reg = <0 0x200000>; + }; + + partition@200000 { + label = "env"; + reg = <0x200000 0x400000>; + }; + + partition@600000 { + label = "system"; + reg = <0x600000 0xa00000>; + }; + + partition@1000000 { + label = "rootfs"; + reg = <0x1000000 0x3000000>; + }; + + partition@4000000 { + label = "data"; + reg = <0x4000000 0x8000000>; + }; + }; + }; +}; + +&sd { + status = "okay"; + pinctrl-0 = <&sdcard_pins>; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default","clk-gate"; + + bus-width = <4>; + cap-sd-highspeed; + max-frequency = <50000000>; + disable-wp; + + cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>; + vmmc-supply = <&sdcard>; + vqmmc-supply = <&sdcard>; +}; diff --git a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi index f8fb060c49aebb..d0cda759c25d07 100644 --- a/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi +++ b/arch/arm64/boot/dts/amlogic/amlogic-c3.dtsi @@ -7,6 +7,11 @@ #include #include #include +#include +#include +#include +#include +#include / { cpus { @@ -57,6 +62,34 @@ pwrc: power-controller { }; }; + sram@7f50e00 { + compatible = "mmio-sram"; + reg = <0x0 0x07f50e00 0x0 0x100>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x07f50e00 0x100>; + + scmi_shmem: sram@0 { + compatible = "arm,scmi-shmem"; + reg = <0x0 0x100>; + }; + }; + + firmware { + scmi: scmi { + compatible = "arm,scmi-smc"; + arm,smc-id = <0x820000C1>; + shmem = <&scmi_shmem>; + #address-cells = <1>; + #size-cells = <0>; + + scmi_clk: protocol@14 { + reg = <0x14>; + #clock-cells = <1>; + }; + }; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -82,6 +115,44 @@ apb4: bus@fe000000 { #size-cells = <2>; ranges = <0x0 0x0 0x0 0xfe000000 0x0 0x480000>; + clkc_periphs: clock-controller@0 { + compatible = "amlogic,c3-peripherals-clkc"; + reg = <0x0 0x0 0x0 0x49c>; + #clock-cells = <1>; + clocks = <&xtal>, + <&scmi_clk CLKID_OSC>, + <&scmi_clk CLKID_FIXED_PLL_OSC>, + <&clkc_pll CLKID_FCLK_DIV2>, + <&clkc_pll CLKID_FCLK_DIV2P5>, + <&clkc_pll CLKID_FCLK_DIV3>, + <&clkc_pll CLKID_FCLK_DIV4>, + <&clkc_pll CLKID_FCLK_DIV5>, + <&clkc_pll CLKID_FCLK_DIV7>, + <&clkc_pll CLKID_GP0_PLL>, + <&scmi_clk CLKID_GP1_PLL_OSC>, + <&clkc_pll CLKID_HIFI_PLL>, + <&scmi_clk CLKID_SYS_CLK>, + <&scmi_clk CLKID_AXI_CLK>, + <&scmi_clk CLKID_SYS_PLL_DIV16>, + <&scmi_clk CLKID_CPU_CLK_DIV16>; + clock-names = "xtal_24m", + "oscin", + "fix", + "fdiv2", + "fdiv2p5", + "fdiv3", + "fdiv4", + "fdiv5", + "fdiv7", + "gp0", + "gp1", + "hifi", + "sysclk", + "axiclk", + "sysplldiv16", + "cpudiv16"; + }; + reset: reset-controller@2000 { compatible = "amlogic,c3-reset"; reg = <0x0 0x2000 0x0 0x98>; @@ -98,16 +169,247 @@ periphs_pinctrl: pinctrl@4000 { compatible = "amlogic,c3-periphs-pinctrl"; #address-cells = <2>; #size-cells = <2>; - ranges; + ranges = <0x0 0x0 0x0 0x4000 0x0 0x02de>; - gpio: bank@4000 { - reg = <0x0 0x4000 0x0 0x004c>, - <0x0 0x4100 0x0 0x01de>; + gpio: bank@0 { + reg = <0x0 0x0 0x0 0x004c>, + <0x0 0x100 0x0 0x01de>; reg-names = "mux", "gpio"; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&periphs_pinctrl 0 0 55>; }; + + i2c0_pins1: i2c0-pins1 { + mux { + groups = "i2c0_sda_e", + "i2c0_scl_e"; + function = "i2c0"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c0_pins2: i2c0-pins2 { + mux { + groups = "i2c0_sda_d", + "i2c0_scl_d"; + function = "i2c0"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c1_pins1: i2c1-pins1 { + mux { + groups = "i2c1_sda_x", + "i2c1_scl_x"; + function = "i2c1"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c1_pins2: i2c1-pins2 { + mux { + groups = "i2c1_sda_d", + "i2c1_scl_d"; + function = "i2c1"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c1_pins3: i2c1-pins3 { + mux { + groups = "i2c1_sda_a", + "i2c1_scl_a"; + function = "i2c1"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c1_pins4: i2c1-pins4 { + mux { + groups = "i2c1_sda_b", + "i2c1_scl_b"; + function = "i2c1"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c2_pins1: i2c2-pins1 { + mux { + groups = "i2c2_sda", + "i2c2_scl"; + function = "i2c2"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c3_pins1: i2c3-pins1 { + mux { + groups = "i2c3_sda_c", + "i2c3_scl_c"; + function = "i2c3"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c3_pins2: i2c3-pins2 { + mux { + groups = "i2c3_sda_x", + "i2c3_scl_x"; + function = "i2c3"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + i2c3_pins3: i2c3-pins3 { + mux { + groups = "i2c3_sda_d", + "i2c3_scl_d"; + function = "i2c3"; + bias-disable; + drive-strength-microamp = <3000>; + }; + }; + + nand_pins: nand-pins { + mux { + groups = "emmc_nand_d0", + "emmc_nand_d1", + "emmc_nand_d2", + "emmc_nand_d3", + "emmc_nand_d4", + "emmc_nand_d5", + "emmc_nand_d6", + "emmc_nand_d7", + "nand_ce0", + "nand_ale", + "nand_cle", + "nand_wen_clk", + "nand_ren_wr"; + function = "nand"; + input-enable; + }; + }; + + sdcard_pins: sdcard-pins { + mux { + groups = "sdcard_d0", + "sdcard_d1", + "sdcard_d2", + "sdcard_d3", + "sdcard_clk", + "sdcard_cmd"; + function = "sdcard"; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + }; + + sdcard_clk_gate_pins: sdcard-clk-cmd-pins { + mux { + groups = "GPIOC_4"; + function = "gpio_periphs"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + + sdio_m_clk_gate_pins: sdio-m-clk-cmd-pins { + mux { + groups = "sdio_clk"; + function = "sdio"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + + sdio_m_pins: sdio-m-all-pins { + mux { + groups = "sdio_d0", + "sdio_d1", + "sdio_d2", + "sdio_d3", + "sdio_clk", + "sdio_cmd"; + function = "sdio"; + input-enable; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + }; + + spicc0_pins1: spicc0-pins1 { + mux { + groups = "spi_a_mosi_b", + "spi_a_miso_b", + "spi_a_clk_b"; + function = "spi_a"; + drive-strength-microamp = <3000>; + }; + }; + + spicc0_pins2: spicc0-pins2 { + mux { + groups = "spi_a_mosi_c", + "spi_a_miso_c", + "spi_a_clk_c"; + function = "spi_a"; + drive-strength-microamp = <3000>; + }; + }; + + spicc0_pins3: spicc0-pins3 { + mux { + groups = "spi_a_mosi_x", + "spi_a_miso_x", + "spi_a_clk_x"; + function = "spi_a"; + drive-strength-microamp = <3000>; + }; + }; + + spicc1_pins1: spicc1-pins1 { + mux { + groups = "spi_b_mosi_d", + "spi_b_miso_d", + "spi_b_clk_d"; + function = "spi_b"; + drive-strength-microamp = <3000>; + }; + }; + + spicc1_pins2: spicc1-pins2 { + mux { + groups = "spi_b_mosi_x", + "spi_b_miso_x", + "spi_b_clk_x"; + function = "spi_b"; + drive-strength-microamp = <3000>; + }; + }; + + spifc_pins: spifc-pins { + mux { + groups = "spif_mo", + "spif_mi", + "spif_clk", + "spif_cs", + "spif_hold", + "spif_wp", + "spif_clk_loop"; + function = "spif"; + drive-strength-microamp = <4000>; + }; + }; }; gpio_intc: interrupt-controller@4080 { @@ -119,16 +421,207 @@ gpio_intc: interrupt-controller@4080 { <10 11 12 13 14 15 16 17 18 19 20 21>; }; + clkc_pll: clock-controller@8000 { + compatible = "amlogic,c3-pll-clkc"; + reg = <0x0 0x8000 0x0 0x1a4>; + #clock-cells = <1>; + clocks = <&scmi_clk CLKID_TOP_PLL_OSC>, + <&scmi_clk CLKID_MCLK_PLL_OSC>, + <&scmi_clk CLKID_FIXED_PLL_OSC>; + clock-names = "top", + "mclk", + "fix"; + }; + + eth_phy: mdio-multiplexer@28000 { + compatible = "amlogic,g12a-mdio-mux"; + reg = <0x0 0x28000 0x0 0xa4>; + + clocks = <&clkc_periphs CLKID_SYS_ETH_PHY>, + <&xtal>, + <&clkc_pll CLKID_FCLK_50M>; + clock-names = "pclk", "clkin0", "clkin1"; + mdio-parent-bus = <&mdio0>; + #address-cells = <1>; + #size-cells = <0>; + + ext_mdio: mdio@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + int_mdio: mdio@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + internal_ephy: ethernet_phy@8 { + compatible = "ethernet-phy-id0180.3301", + "ethernet-phy-ieee802.3-c22"; + interrupts = ; + reg = <8>; + max-speed = <100>; + }; + }; + }; + + spicc0: spi@50000 { + compatible = "amlogic,meson-g12a-spicc"; + reg = <0x0 0x50000 0x0 0x44>; + interrupts = ; + clocks = <&clkc_periphs CLKID_SYS_SPICC_0>, + <&clkc_periphs CLKID_SPICC_A>; + clock-names = "core", "pclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spicc1: spi@52000 { + compatible = "amlogic,meson-g12a-spicc"; + reg = <0x0 0x52000 0x0 0x44>; + interrupts = ; + clocks = <&clkc_periphs CLKID_SYS_SPICC_1>, + <&clkc_periphs CLKID_SPICC_B>; + clock-names = "core", "pclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spifc: spi@56000 { + compatible = "amlogic,a1-spifc"; + reg = <0x0 0x56000 0x0 0x290>; + interrupts = ; + clocks = <&clkc_periphs CLKID_SPIFC>; + clock-names = "core"; + status = "disabled"; + }; + + i2c0: i2c@66000 { + compatible = "amlogic,meson-axg-i2c"; + reg = <0x0 0x66000 0x0 0x24>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkc_periphs CLKID_SYS_I2C_M_A>; + status = "disabled"; + }; + + i2c1: i2c@68000 { + compatible = "amlogic,meson-axg-i2c"; + reg = <0x0 0x68000 0x0 0x24>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkc_periphs CLKID_SYS_I2C_M_B>; + status = "disabled"; + }; + + i2c2: i2c@6a000 { + compatible = "amlogic,meson-axg-i2c"; + reg = <0x0 0x6a000 0x0 0x24>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkc_periphs CLKID_SYS_I2C_M_C>; + status = "disabled"; + }; + + i2c3: i2c@6c000 { + compatible = "amlogic,meson-axg-i2c"; + reg = <0x0 0x6c000 0x0 0x24>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkc_periphs CLKID_SYS_I2C_M_D>; + status = "disabled"; + }; + uart_b: serial@7a000 { compatible = "amlogic,meson-s4-uart", "amlogic,meson-ao-uart"; reg = <0x0 0x7a000 0x0 0x18>; interrupts = ; status = "disabled"; - clocks = <&xtal>, <&xtal>, <&xtal>; + clocks = <&xtal>, <&clkc_periphs CLKID_SYS_UART_B>, <&xtal>; clock-names = "xtal", "pclk", "baud"; }; + sec_ao: ao-secure@10220 { + compatible = "amlogic,c3-ao-secure", + "amlogic,meson-gx-ao-secure", + "syscon"; + reg = <0x0 0x10220 0x0 0x140>; + amlogic,has-chip-id; + }; + + sdio: mmc@88000 { + compatible = "amlogic,meson-axg-mmc"; + reg = <0x0 0x88000 0x0 0x800>; + interrupts = ; + power-domains = <&pwrc PWRC_C3_SDIOA_ID>; + clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_A>, + <&clkc_periphs CLKID_SD_EMMC_A>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core","clkin0", "clkin1"; + no-mmc; + no-sd; + resets = <&reset RESET_SD_EMMC_A>; + status = "disabled"; + }; + + sd: mmc@8a000 { + compatible = "amlogic,meson-axg-mmc"; + reg = <0x0 0x8a000 0x0 0x800>; + interrupts = ; + power-domains = <&pwrc PWRC_C3_SDCARD_ID>; + clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_B>, + <&clkc_periphs CLKID_SD_EMMC_B>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core", "clkin0", "clkin1"; + no-mmc; + no-sdio; + resets = <&reset RESET_SD_EMMC_B>; + status = "disabled"; + }; + + nand: nand-controller@8d000 { + compatible = "amlogic,meson-axg-nfc"; + reg = <0x0 0x8d000 0x0 0x200>, + <0x0 0x8C000 0x0 0x4>; + reg-names = "nfc", "emmc"; + interrupts = ; + clocks = <&clkc_periphs CLKID_SYS_SD_EMMC_C>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core", "device"; + status = "disabled"; + }; + }; + + ethmac: ethernet@fdc00000 { + compatible = "amlogic,meson-g12a-dwmac", + "snps,dwmac-3.70a", + "snps,dwmac"; + reg = <0x0 0xfdc00000 0x0 0x10000>, + <0x0 0xfe024000 0x0 0x8>; + interrupts = ; + interrupt-names = "macirq"; + power-domains = <&pwrc PWRC_C3_ETH_ID>; + clocks = <&clkc_periphs CLKID_SYS_ETH_MAC>, + <&clkc_pll CLKID_FCLK_DIV2>, + <&clkc_pll CLKID_FCLK_50M>; + clock-names = "stmmaceth", "clkin0", "clkin1"; + rx-fifo-depth = <4096>; + tx-fifo-depth = <2048>; + status = "disabled"; + + mdio0: mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + }; }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi index c23efc6c7ac027..ec743cad57dbf9 100644 --- a/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi +++ b/arch/arm64/boot/dts/amlogic/amlogic-t7.dtsi @@ -194,6 +194,14 @@ uart_a: serial@78000 { interrupts = ; status = "disabled"; }; + + sec_ao: ao-secure@10220 { + compatible = "amlogic,t7-ao-secure", + "amlogic,meson-gx-ao-secure", + "syscon"; + reg = <0x0 0x10220 0x0 0x140>; + amlogic,has-chip-id; + }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts index 7ed526f45175f6..9611775b81eee3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts +++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts @@ -268,6 +268,10 @@ sound { "Speaker1 Right", "SPK1 OUT_D", "Linein AINL", "Linein", "Linein AINR", "Linein"; + clocks = <&clkc CLKID_HIFI_PLL>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_HIFI_PLL>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts index af211d8f39520d..a457b3f4397b33 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts @@ -176,6 +176,10 @@ sound { "SPDIFOUT_A IN 1", "FRDDR_B OUT 3", "SPDIFOUT_A IN 2", "FRDDR_C OUT 3"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts index 15b9bc28070617..c779a5da7d1ea0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts @@ -138,6 +138,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts index 61cb8135a39255..ea51341f031b5c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts @@ -201,6 +201,10 @@ sound { "TODDR_B IN 1", "TDMIN_B OUT", "TODDR_C IN 1", "TDMIN_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts index 0e239939ade6cc..f70a46967e2b45 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts @@ -238,6 +238,10 @@ sound { "Lineout", "10U2 OUTL", "Lineout", "10U2 OUTR"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts index 05c7a1e3f1b71a..32f98a1924942f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts @@ -158,6 +158,10 @@ sound { "SPDIFOUT_A IN 1", "FRDDR_B OUT 3", "SPDIFOUT_A IN 2", "FRDDR_C OUT 3"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts index 13d478f9c89146..2d74456e685df9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-cm4io.dts @@ -70,6 +70,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts index 003efed529ba3b..0f48c32bec976c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4-mnt-reform2.dts @@ -79,6 +79,10 @@ sound { "LINPUT1", "Mic Jack", "Mic Jack", "MICB"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi index 6a346cb86a5394..d4e1990b5f26b8 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi @@ -194,6 +194,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi index 3a24c24115522b..de35fa2d7a6de3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-dreambox.dtsi @@ -38,6 +38,12 @@ sound { "SPDIFOUT_A IN 0", "FRDDR_A OUT 3", "SPDIFOUT_A IN 1", "FRDDR_B OUT 3", "SPDIFOUT_A IN 2", "FRDDR_C OUT 3"; + + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts index bb73e10b5e741e..369c5cf889b6c8 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts @@ -48,6 +48,10 @@ sound { "TDMOUT_A IN 2", "FRDDR_C OUT 1", "TDM_A Playback", "TDMOUT_A OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts index 6eeedd54ab9176..654449afd3a4cb 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts @@ -49,6 +49,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts index 0da386cabe1a5a..e203113867451e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts @@ -37,6 +37,10 @@ sound { "SPDIFOUT_A IN 1", "FRDDR_B OUT 3", "SPDIFOUT_A IN 2", "FRDDR_C OUT 3"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts index eed2a23047caf0..e21831dfceeea7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts @@ -234,6 +234,10 @@ sound { "Internal Speakers", "Speaker Amplifier OUTL", "Internal Speakers", "Speaker Amplifier OUTR"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi index 86eb8111223213..3bca8023638d4b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi @@ -95,6 +95,10 @@ sound { "Lineout", "U19 OUTL", "Lineout", "U19 OUTR"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts index e26f3e3258e1f8..1b9097a302518b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2l.dts @@ -39,6 +39,10 @@ sound { "TODDR_B IN 6", "TDMIN_LB OUT", "TODDR_C IN 6", "TDMIN_LB OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts index 8445701100d0e4..39feba7f2d0830 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts @@ -176,6 +176,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts index 6396f190d703ed..4c1a75b926ee34 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts @@ -32,6 +32,10 @@ sound { "SPDIFOUT_A IN 1", "FRDDR_B OUT 3", "SPDIFOUT_A IN 2", "FRDDR_C OUT 3"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi index efd662a452e881..d38c3a224fbed4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi @@ -194,6 +194,10 @@ sound { "AU2 INR", "ACODEC LORN", "7J4-14 LEFT", "AU2 OUTL", "7J4-11 RIGHT", "AU2 OUTR"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index 08d6b69ba46918..45ccddd1aaf054 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -129,6 +129,10 @@ sound { "AU2 INR", "ACODEC LORN", "Lineout", "AU2 OUTL", "Lineout", "AU2 OUTR"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts index f28452b9f00fdb..073b47ce8c3c4f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts @@ -45,6 +45,10 @@ button-reset { sound { compatible = "amlogic,gx-sound-card"; model = "KII-PRO"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 1fd2e56e6b085d..cf2e2ef8168077 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -135,6 +135,10 @@ hdmi_connector_in: endpoint { sound { compatible = "amlogic,gx-sound-card"; model = "NANOPI-K2"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index cca129ce2c5834..7d7dde93fff3f9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -142,6 +142,10 @@ hdmi_connector_in: endpoint { sound { compatible = "amlogic,gx-sound-card"; model = "NEXBOX-A95X"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index c37cc6b036cd71..959bd8d77a82eb 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -177,6 +177,10 @@ hdmi_connector_in: endpoint { sound { compatible = "amlogic,gx-sound-card"; model = "ODROID-C2"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts index 7f94716876d39f..bfac00e76ba3e4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts @@ -68,6 +68,10 @@ button-menu { sound { compatible = "amlogic,gx-sound-card"; model = "P200"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts index 6f81eed83becc3..c10f66031ecd4b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts @@ -17,6 +17,10 @@ / { sound { compatible = "amlogic,gx-sound-card"; model = "P201"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 255e93a0b36d9e..3807a184810b86 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -108,6 +108,10 @@ sdio_pwrseq: sdio-pwrseq { sound { compatible = "amlogic,gx-sound-card"; model = "VEGA-S95"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts index af9ea32a2876b7..ec281a9e9e7768 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts @@ -16,6 +16,10 @@ / { sound { compatible = "amlogic,gx-sound-card"; model = "WETEK-HUB"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts index 376760d8676615..924414861b72f1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts @@ -48,6 +48,10 @@ button { sound { compatible = "amlogic,gx-sound-card"; model = "WETEK-PLAY2"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts index 90ef9c17d80bac..c6132fb71dfc48 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts @@ -123,6 +123,10 @@ sound { "Speaker", "9J5-2 RIGHT"; audio-routing = "9J5-3 LEFT", "ACODEC LOLN", "9J5-2 RIGHT", "ACODEC LORN"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts index 08a4718219b108..c5e2306ad7a4b5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts @@ -128,6 +128,10 @@ sound { "AU2 INR", "ACODEC LORN", "Lineout", "AU2 OUTL", "Lineout", "AU2 OUTR"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts index fea65f20523aa4..a80f0ea2773be6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts @@ -67,6 +67,10 @@ hdmi_connector_in: endpoint { sound { compatible = "amlogic,gx-sound-card"; model = "KHADAS-VIM"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts index 63b20860067c09..6cbdfde00e12a4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts @@ -160,6 +160,10 @@ vcc_1v8: regulator-vcc-1v8 { sound { compatible = "amlogic,gx-sound-card"; model = "LIBRETECH-CC-V2"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 8b26c9661be1f6..401064b0428de9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -142,6 +142,10 @@ sound { "AU2 INR", "ACODEC LORN", "Lineout", "AU2 OUTL", "Lineout", "AU2 OUTR"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts index 9b4ea6a4939888..8b41e340f91997 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts @@ -50,6 +50,10 @@ sound { "AU2 INR", "ACODEC LORN", "Lineout", "AU2 OUTL", "Lineout", "AU2 OUTR"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts index de996e930b824f..a9c5881c9783bd 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-vero4k.dts @@ -90,6 +90,11 @@ sound { "AU2 INR", "ACODEC LORN", "Lineout", "AU2 OUTL", "Lineout", "AU2 OUTR"; + + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts index 1221f454513089..942df754a0ed29 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxlx-s905l-p271.dts @@ -38,10 +38,6 @@ mali: gpu@c0000 { }; }; -&saradc { - compatible = "amlogic,meson-gxlx-saradc", "amlogic,meson-saradc"; -}; - &usb { dr_mode = "host"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 07e7c3bedea008..96a3dd2d8a99dd 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -150,6 +150,10 @@ wifi32k: wifi32k { sound { compatible = "amlogic,gx-sound-card"; model = "KHADAS-VIM2"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index ad2dd4ad0a313f..773107cc47ddb5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -86,6 +86,10 @@ hdmi_connector_in: endpoint { sound { compatible = "amlogic,gx-sound-card"; model = "NEXBOX-A1"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index d05dde8da5c566..7356d3b628b162 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -101,6 +101,10 @@ sdio_pwrseq: sdio-pwrseq { sound { compatible = "amlogic,gx-sound-card"; model = "RBOX-PRO"; + clocks = <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>, + <&clkc CLKID_MPLL2>; + assigned-clocks = <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>, <&clkc CLKID_MPLL2>; diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi index e78cc9b577a055..7daa9b122d5c05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi @@ -182,6 +182,10 @@ sound { "TODDR_B IN 0", "TDMIN_A OUT", "TODDR_C IN 0", "TDMIN_A OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi index 082b72703cdf95..929e4720ae7683 100644 --- a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi @@ -200,6 +200,10 @@ sound { <&tdmin_a>, <&tdmin_b>, <&tdmin_c>, <&dioo2133>; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts index 983caddc409c35..6730c44642d291 100644 --- a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts +++ b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts @@ -34,6 +34,111 @@ secmon_reserved: secmon@5000000 { no-map; }; }; + + sdio_32k: sdio-32k { + compatible = "pwm-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */ + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; + clocks = <&sdio_32k>; + clock-names = "ext_clock"; + }; + + main_12v: regulator-main-12v { + compatible = "regulator-fixed"; + regulator-name = "12V"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + }; + + vddao_3v3: regulator-vddao-3v3 { + compatible = "regulator-fixed"; + regulator-name = "VDDAO_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&main_12v>; + regulator-always-on; + }; + + vddio_ao1v8: regulator-vddio-ao1v8 { + compatible = "regulator-fixed"; + regulator-name = "VDDIO_AO1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vddao_3v3>; + regulator-always-on; + }; + + /* SY8120B1ABC DC/DC Regulator. */ + vddcpu: regulator-vddcpu { + compatible = "pwm-regulator"; + + regulator-name = "VDDCPU"; + regulator-min-microvolt = <689000>; + regulator-max-microvolt = <1049000>; + + vin-supply = <&main_12v>; + + pwms = <&pwm_ij 1 1500 0>; + pwm-dutycycle-range = <100 0>; + + regulator-boot-on; + regulator-always-on; + /* Voltage Duty-Cycle */ + voltage-table = <1049000 0>, + <1039000 3>, + <1029000 6>, + <1019000 9>, + <1009000 12>, + <999000 14>, + <989000 17>, + <979000 20>, + <969000 23>, + <959000 26>, + <949000 29>, + <939000 31>, + <929000 34>, + <919000 37>, + <909000 40>, + <899000 43>, + <889000 45>, + <879000 48>, + <869000 51>, + <859000 54>, + <849000 56>, + <839000 59>, + <829000 62>, + <819000 65>, + <809000 68>, + <799000 70>, + <789000 73>, + <779000 76>, + <769000 79>, + <759000 81>, + <749000 84>, + <739000 87>, + <729000 89>, + <719000 92>, + <709000 95>, + <699000 98>, + <689000 100>; + }; +}; + +&pwm_ef { + status = "okay"; + pinctrl-0 = <&pwm_e_pins1>; + pinctrl-names = "default"; +}; + +&pwm_ij { + status = "okay"; }; &uart_b { @@ -46,6 +151,40 @@ &ir { pinctrl-names = "default"; }; +&sdio { + pinctrl-0 = <&sdio_pins>; + pinctrl-1 = <&sdio_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; + #address-cells = <1>; + #size-cells = <0>; + bus-width = <4>; + cap-sd-highspeed; + sd-uhs-sdr50; + sd-uhs-sdr104; + max-frequency = <200000000>; + non-removable; + disable-wp; + no-sd; + no-mmc; + vmmc-supply = <&vddao_3v3>; + vqmmc-supply = <&vddio_ao1v8>; +}; + +&sd { + status = "okay"; + pinctrl-0 = <&sdcard_pins>; + pinctrl-1 = <&sdcard_clk_gate_pins>; + pinctrl-names = "default", "clk-gate"; + bus-width = <4>; + cap-sd-highspeed; + max-frequency = <200000000>; + disable-wp; + + cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>; + vmmc-supply = <&vddao_3v3>; + vqmmc-supply = <&vddao_3v3>; +}; + &nand { status = "okay"; #address-cells = <1>; @@ -90,3 +229,9 @@ &spicc0 { pinctrl-0 = <&spicc0_pins_x>; cs-gpios = <&gpio GPIOX_10 GPIO_ACTIVE_LOW>; }; + +ðmac { + status = "okay"; + phy-handle = <&internal_ephy>; + phy-mode = "rmii"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi index b686eacb966207..957577d986c067 100644 --- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi @@ -10,6 +10,7 @@ #include #include #include +#include / { cpus { @@ -466,6 +467,93 @@ mux { }; }; + sdcard_pins: sdcard-pins { + mux { + groups = "sdcard_d0_c", + "sdcard_d1_c", + "sdcard_d2_c", + "sdcard_d3_c", + "sdcard_clk_c", + "sdcard_cmd_c"; + function = "sdcard"; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + }; + + sdcard_clk_gate_pins: sdcard-clk-gate-pins { + mux { + groups = "GPIOC_4"; + function = "gpio_periphs"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + + emmc_pins: emmc-pins { + mux-0 { + groups = "emmc_nand_d0", + "emmc_nand_d1", + "emmc_nand_d2", + "emmc_nand_d3", + "emmc_nand_d4", + "emmc_nand_d5", + "emmc_nand_d6", + "emmc_nand_d7", + "emmc_cmd"; + function = "emmc"; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + mux-1 { + groups = "emmc_clk"; + function = "emmc"; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + }; + + emmc_ds_pins: emmc-ds-pins { + mux { + groups = "emmc_nand_ds"; + function = "emmc"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + + emmc_clk_gate_pins: emmc-clk-gate-pins { + mux { + groups = "GPIOB_8"; + function = "gpio_periphs"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + + sdio_pins: sdio-pins { + mux { + groups = "sdio_d0", + "sdio_d1", + "sdio_d2", + "sdio_d3", + "sdio_clk", + "sdio_cmd"; + function = "sdio"; + bias-pull-up; + drive-strength-microamp = <4000>; + }; + }; + + sdio_clk_gate_pins: sdio-clk-gate-pins { + mux { + groups = "GPIOX_4"; + function = "gpio_periphs"; + bias-pull-down; + drive-strength-microamp = <4000>; + }; + }; + spicc0_pins_x: spicc0-pins_x { mux { groups = "spi_a_mosi_x", @@ -675,6 +763,14 @@ reset: reset-controller@2000 { #reset-cells = <1>; }; + sec_ao: ao-secure@10220 { + compatible = "amlogic,s4-ao-secure", + "amlogic,meson-gx-ao-secure", + "syscon"; + reg = <0x0 0x10220 0x0 0x140>; + amlogic,has-chip-id; + }; + ir: ir@84040 { compatible = "amlogic,meson-s4-ir"; reg = <0x0 0x84040 0x0 0x30>; @@ -712,5 +808,45 @@ mdio0: mdio { compatible = "snps,dwmac-mdio"; }; }; + + sdio: mmc@fe088000 { + compatible = "amlogic,meson-axg-mmc"; + reg = <0x0 0xfe088000 0x0 0x800>; + interrupts = ; + clocks = <&clkc_periphs CLKID_SDEMMC_A>, + <&xtal>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core", "clkin0", "clkin1"; + resets = <&reset RESET_SD_EMMC_A>; + cap-sdio-irq; + keep-power-in-suspend; + status = "disabled"; + }; + + sd: mmc@fe08a000 { + compatible = "amlogic,meson-axg-mmc"; + reg = <0x0 0xfe08a000 0x0 0x800>; + interrupts = ; + clocks = <&clkc_periphs CLKID_SDEMMC_B>, + <&clkc_periphs CLKID_SD_EMMC_B>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core", "clkin0", "clkin1"; + resets = <&reset RESET_SD_EMMC_B>; + status = "disabled"; + }; + + emmc: mmc@fe08c000 { + compatible = "amlogic,meson-axg-mmc"; + reg = <0x0 0xfe08c000 0x0 0x800>; + interrupts = ; + clocks = <&clkc_periphs CLKID_NAND>, + <&xtal>, + <&clkc_pll CLKID_FCLK_DIV2>; + clock-names = "core", "clkin0", "clkin1"; + resets = <&reset RESET_NAND_EMMC>; + no-sdio; + no-sd; + status = "disabled"; + }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts index 9b2eb6e4265170..3c43d3490e14a5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air-gbit.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts index 6e34fd80ed717b..445c1671ede78c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-a95xf3-air.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts index 586034316ec3a6..eeaff22edade8f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m2-pro.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts index f045bf8516387e..697855fec4760f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts @@ -57,6 +57,10 @@ sound { "Lineout", "ACODEC LOLP", "Lineout", "ACODEC LORP"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts index e6e9410d40cb27..7b3a014d4cde2e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-h96-max.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi index 951eb8e3f0c0c9..7b0e9817a615dd 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi @@ -174,6 +174,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 3581e14cbf18db..2e3397e55da2b4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -239,6 +239,10 @@ sound { "TODDR_B IN 1", "TDMIN_B OUT", "TODDR_C IN 1", "TDMIN_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts index fc9b961133cdb1..e4a3a2a8ad0647 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air-gbit.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts index 9ea969255b4f8a..fff92e0d6dd51d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-x96-air.dts @@ -22,6 +22,10 @@ sound { "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT"; + clocks = <&clkc CLKID_MPLL2>, + <&clkc CLKID_MPLL0>, + <&clkc CLKID_MPLL1>; + assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 532401bc9c6607..6ad4703925dc5f 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -997,7 +997,7 @@ mdio { compatible = "apm,xgene-mdio"; #address-cells = <1>; #size-cells = <0>; - menetphy: menetphy@3 { + menetphy: ethernet-phy@3 { compatible = "ethernet-phy-id001c.c915"; reg = <0x3>; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 93f1e7c026b8c3..083be35495b395 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -18,7 +18,9 @@ / { #address-cells = <2>; #size-cells = <2>; - chosen { }; + chosen { + stdout-path = "serial0:115200n8"; + }; aliases { serial0 = &v2m_serial0; diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts index 85f1c15cc65d06..19973ab4ea6b53 100644 --- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts @@ -24,7 +24,9 @@ / { #address-cells = <2>; #size-cells = <2>; - chosen { }; + chosen { + stdout-path = "serial0:115200n8"; + }; aliases { serial0 = &v2m_serial0; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index afdf954206f1d1..7f7226711d4bb8 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -23,7 +23,9 @@ / { #address-cells = <2>; #size-cells = <2>; - chosen { }; + chosen { + stdout-path = "serial0:115200n8"; + }; aliases { serial0 = &v2m_serial0; diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile index 8b4591ddd27ccc..92565e9781ad3c 100644 --- a/arch/arm64/boot/dts/broadcom/Makefile +++ b/arch/arm64/boot/dts/broadcom/Makefile @@ -6,6 +6,7 @@ DTC_FLAGS := -@ dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \ bcm2711-rpi-4-b.dtb \ bcm2711-rpi-cm4-io.dtb \ + bcm2712-rpi-5-b.dtb \ bcm2837-rpi-3-a-plus.dtb \ bcm2837-rpi-3-b.dtb \ bcm2837-rpi-3-b-plus.dtb \ diff --git a/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts new file mode 100644 index 00000000000000..2bdbb6780242a1 --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/bcm2712-rpi-5-b.dts @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/dts-v1/; + +#include +#include "bcm2712.dtsi" + +/ { + compatible = "raspberrypi,5-model-b", "brcm,bcm2712"; + model = "Raspberry Pi 5"; + + aliases { + serial10 = &uart10; + }; + + chosen: chosen { + stdout-path = "serial10:115200n8"; + }; + + /* Will be filled by the bootloader */ + memory@0 { + device_type = "memory"; + reg = <0 0 0 0x28000000>; + }; + + sd_io_1v8_reg: sd-io-1v8-reg { + compatible = "regulator-gpio"; + regulator-name = "vdd-sd-io"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + regulator-settling-time-us = <5000>; + gpios = <&gio_aon 3 GPIO_ACTIVE_HIGH>; + states = <1800000 1>, + <3300000 0>; + }; + + sd_vcc_reg: sd-vcc-reg { + compatible = "regulator-fixed"; + regulator-name = "vcc-sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + enable-active-high; + gpios = <&gio_aon 4 GPIO_ACTIVE_HIGH>; + }; +}; + +/* The Debug UART, on Rpi5 it's on JST-SH 1.0mm 3-pin connector + * labeled "UART", i.e. the interface with the system console. + */ +&uart10 { + status = "okay"; +}; + +/* SDIO1 is used to drive the SD card */ +&sdio1 { + vqmmc-supply = <&sd_io_1v8_reg>; + vmmc-supply = <&sd_vcc_reg>; + bus-width = <4>; + sd-uhs-sdr50; + sd-uhs-ddr50; + sd-uhs-sdr104; +}; diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi new file mode 100644 index 00000000000000..6e5a984c1d4ea1 --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +#include + +/ { + compatible = "brcm,bcm2712"; + + #address-cells = <2>; + #size-cells = <2>; + + interrupt-parent = <&gicv2>; + + clocks { + /* The oscillator is the root of the clock tree. */ + clk_osc: clk-osc { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-output-names = "osc"; + clock-frequency = <54000000>; + }; + + clk_vpu: clk-vpu { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <750000000>; + clock-output-names = "vpu-clock"; + }; + + clk_uart: clk-uart { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <9216000>; + clock-output-names = "uart-clock"; + }; + + clk_emmc2: clk-emmc2 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <200000000>; + clock-output-names = "emmc2-clock"; + }; + }; + + cpus: cpus { + #address-cells = <1>; + #size-cells = <0>; + + /* Source for L1 d/i cache-line-size, cache-sets, cache-size + * https://developer.arm.com/documentation/100798/0401/L1-memory-system/About-the-L1-memory-system?lang=en + * Source for L2 cache-line-size and cache-sets: + * https://developer.arm.com/documentation/100798/0401/L2-memory-system/About-the-L2-memory-system?lang=en + * and for cache-size: + * https://www.raspberrypi.com/documentation/computers/processors.html#bcm2712 + */ + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a76"; + reg = <0x000>; + enable-method = "psci"; + d-cache-size = <0x10000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + i-cache-size = <0x10000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + next-level-cache = <&l2_cache_l0>; + + l2_cache_l0: l2-cache-l0 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <128>; + cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a76"; + reg = <0x100>; + enable-method = "psci"; + d-cache-size = <0x10000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + i-cache-size = <0x10000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + next-level-cache = <&l2_cache_l1>; + + l2_cache_l1: l2-cache-l1 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <128>; + cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + }; + + cpu2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a76"; + reg = <0x200>; + enable-method = "psci"; + d-cache-size = <0x10000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + i-cache-size = <0x10000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + next-level-cache = <&l2_cache_l2>; + + l2_cache_l2: l2-cache-l2 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <128>; + cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + }; + + cpu3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a76"; + reg = <0x300>; + enable-method = "psci"; + d-cache-size = <0x10000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + i-cache-size = <0x10000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set + next-level-cache = <&l2_cache_l3>; + + l2_cache_l3: l2-cache-l3 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <128>; + cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + }; + + /* Source for cache-line-size and cache-sets: + * https://developer.arm.com/documentation/100453/0401/L3-cache?lang=en + * Source for cache-size: + * https://www.raspberrypi.com/documentation/computers/processors.html#bcm2712 + */ + l3_cache: l3-cache { + compatible = "cache"; + cache-size = <0x200000>; + cache-line-size = <64>; + cache-sets = <2048>; // 2MiB(size)/64(line-size)=32768ways/16-way set + cache-level = <3>; + cache-unified; + }; + }; + + psci { + method = "smc"; + compatible = "arm,psci-1.0", "arm,psci-0.2"; + }; + + rmem: reserved-memory { + ranges; + #address-cells = <2>; + #size-cells = <2>; + + atf@0 { + reg = <0x0 0x0 0x0 0x80000>; + no-map; + }; + + cma: linux,cma { + compatible = "shared-dma-pool"; + size = <0x0 0x4000000>; /* 64MB */ + reusable; + linux,cma-default; + alloc-ranges = <0x0 0x00000000 0x0 0x40000000>; + }; + }; + + soc: soc@107c000000 { + compatible = "simple-bus"; + ranges = <0x00000000 0x10 0x00000000 0x80000000>; + #address-cells = <1>; + #size-cells = <1>; + + sdio1: mmc@fff000 { + compatible = "brcm,bcm2712-sdhci", + "brcm,sdhci-brcmstb"; + reg = <0x00fff000 0x260>, + <0x00fff400 0x200>; + reg-names = "host", "cfg"; + interrupts = ; + clocks = <&clk_emmc2>; + clock-names = "sw_sdio"; + mmc-ddr-3_3v; + }; + + system_timer: timer@7c003000 { + compatible = "brcm,bcm2835-system-timer"; + reg = <0x7c003000 0x1000>; + interrupts = , + , + , + ; + clock-frequency = <1000000>; + }; + + mailbox: mailbox@7c013880 { + compatible = "brcm,bcm2835-mbox"; + reg = <0x7c013880 0x40>; + interrupts = ; + #mbox-cells = <0>; + }; + + local_intc: interrupt-controller@7cd00000 { + compatible = "brcm,bcm2836-l1-intc"; + reg = <0x7cd00000 0x100>; + }; + + uart10: serial@7d001000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x7d001000 0x200>; + interrupts = ; + clocks = <&clk_uart>, <&clk_vpu>; + clock-names = "uartclk", "apb_pclk"; + arm,primecell-periphid = <0x00241011>; + status = "disabled"; + }; + + interrupt-controller@7d517000 { + compatible = "brcm,bcm7271-l2-intc"; + reg = <0x7d517000 0x10>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + }; + + gio_aon: gpio@7d517c00 { + compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio"; + reg = <0x7d517c00 0x40>; + gpio-controller; + #gpio-cells = <2>; + brcm,gpio-bank-widths = <17 6>; + /* The lack of 'interrupt-controller' property here is intended: + * don't use GIO_AON as an interrupt controller because it will + * clash with the firmware monitoring the PMIC interrupt via the VPU. + */ + }; + + gicv2: interrupt-controller@7fff9000 { + compatible = "arm,gic-400"; + reg = <0x7fff9000 0x1000>, + <0x7fffa000 0x2000>, + <0x7fffc000 0x2000>, + <0x7fffe000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + , + ; + }; +}; diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts index 47a389d9ff7d71..9d74fa6bfed9fb 100644 --- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts +++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts @@ -32,7 +32,7 @@ memory@80000000 { device_type = "memory"; reg = <0x0 0x80000000 0x3da00000>, <0x0 0xc0000000 0x40000000>, - <0x8 0x80000000 0x40000000>; + <0x8 0x80000000 0x80000000>; }; gpio-keys { diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi index 0248329da49a63..b36292a7db6408 100644 --- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi +++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi @@ -251,6 +251,52 @@ cmu_fsys2: clock-controller@17c00000 { "dout_fsys2_clkcmu_ethernet"; }; + cmu_dpum: clock-controller@18c00000 { + compatible = "samsung,exynosautov9-cmu-dpum"; + reg = <0x18c00000 0x8000>; + #clock-cells = <1>; + + clocks = <&xtcxo>, + <&cmu_top DOUT_CLKCMU_DPUM_BUS>; + clock-names = "oscclk", "bus"; + }; + + sysmmu_dpum_0: sysmmu@18c80000 { + compatible = "samsung,exynos-sysmmu"; + reg = <0x18c80000 0x10000>; + interrupts = ; + clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D0_CLK>; + clock-names = "sysmmu"; + #iommu-cells = <0>; + }; + + sysmmu_dpum_1: sysmmu@18c90000 { + compatible = "samsung,exynos-sysmmu"; + reg = <0x18c90000 0x10000>; + interrupts = ; + clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D1_CLK>; + clock-names = "sysmmu"; + #iommu-cells = <0>; + }; + + sysmmu_dpum_2: sysmmu@18ca0000 { + compatible = "samsung,exynos-sysmmu"; + reg = <0x18ca0000 0x10000>; + interrupts = ; + clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D2_CLK>; + clock-names = "sysmmu"; + #iommu-cells = <0>; + }; + + sysmmu_dpum_3: sysmmu@18cb0000 { + compatible = "samsung,exynos-sysmmu"; + reg = <0x18cb0000 0x10000>; + interrupts = ; + clocks = <&cmu_dpum CLK_GOUT_DPUM_SYSMMU_D3_CLK>; + clock-names = "sysmmu"; + #iommu-cells = <0>; + }; + cmu_core: clock-controller@1b030000 { compatible = "samsung,exynosautov9-cmu-core"; reg = <0x1b030000 0x8000>; diff --git a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi index c1c8566d74f597..91882b37fdb32b 100644 --- a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi +++ b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi @@ -6,6 +6,7 @@ * */ +#include #include #include @@ -38,17 +39,6 @@ xtcxo: clock { clock-output-names = "oscclk"; }; - /* - * FIXME: Keep the stub clock for serial driver, until proper clock - * driver is implemented. - */ - clock_usi: clock-usi { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <200000000>; - clock-output-names = "usi"; - }; - cpus: cpus { #address-cells = <2>; #size-cells = <0>; @@ -192,6 +182,19 @@ gic: interrupt-controller@10400000 { interrupts = ; }; + cmu_peric0: clock-controller@10800000 { + compatible = "samsung,exynosautov920-cmu-peric0"; + reg = <0x10800000 0x8000>; + #clock-cells = <1>; + + clocks = <&xtcxo>, + <&cmu_top DOUT_CLKCMU_PERIC0_NOC>, + <&cmu_top DOUT_CLKCMU_PERIC0_IP>; + clock-names = "oscclk", + "noc", + "ip"; + }; + syscon_peric0: syscon@10820000 { compatible = "samsung,exynosautov920-peric0-sysreg", "syscon"; @@ -213,7 +216,8 @@ usi_0: usi@108800c0 { #address-cells = <1>; #size-cells = <1>; ranges; - clocks = <&clock_usi>, <&clock_usi>; + clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>, + <&cmu_peric0 CLK_DOUT_PERIC0_USI00_USI>; clock-names = "pclk", "ipclk"; status = "disabled"; @@ -224,7 +228,8 @@ serial_0: serial@10880000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart0_bus>; - clocks = <&clock_usi>, <&clock_usi>; + clocks = <&cmu_peric0 CLK_MOUT_PERIC0_NOC_USER>, + <&cmu_peric0 CLK_DOUT_PERIC0_USI00_USI>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <256>; status = "disabled"; @@ -254,6 +259,15 @@ pinctrl_peric1: pinctrl@10c30000 { interrupts = ; }; + cmu_top: clock-controller@11000000 { + compatible = "samsung,exynosautov920-cmu-top"; + reg = <0x11000000 0x8000>; + #clock-cells = <1>; + + clocks = <&xtcxo>; + clock-names = "oscclk"; + }; + pinctrl_alive: pinctrl@11850000 { compatible = "samsung,exynosautov920-pinctrl"; reg = <0x11850000 0x10000>; diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi index eadb8822e6d4fd..302c5beb224aa4 100644 --- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi +++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi @@ -1394,6 +1394,21 @@ sysreg_apm: syscon@174204e0 { pmu_system_controller: system-controller@17460000 { compatible = "google,gs101-pmu", "syscon"; reg = <0x17460000 0x10000>; + + poweroff: syscon-poweroff { + compatible = "syscon-poweroff"; + regmap = <&pmu_system_controller>; + offset = <0x3e9c>; /* PAD_CTRL_PWR_HOLD */ + mask = <0x100>; /* reset value */ + }; + + reboot: syscon-reboot { + compatible = "syscon-reboot"; + regmap = <&pmu_system_controller>; + offset = <0x3a00>; /* SYSTEM_CONFIGURATION */ + mask = <0x2>; /* SWRESET_SYSTEM */ + value = <0x2>; /* reset value */ + }; }; pinctrl_gpio_alive: pinctrl@174d0000 { diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile index f04c22b7de72e3..9d3df8b218a2e4 100644 --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile @@ -129,11 +129,11 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw72xx-0x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw73xx-0x.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw75xx-0x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7901.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7902.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7903.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7904.dtb -dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw7905-0x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-dahlia.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-dev.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-verdin-nonwifi-mallow.dtb @@ -174,16 +174,19 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-icore-mx8mp-edimm2.2.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-msc-sm2s-ep1.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-navqp.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb +imx8mp-phyboard-pollux-rdk-no-eth-dtbs += imx8mp-phyboard-pollux-rdk.dtb imx8mp-phycore-no-eth.dtbo +dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk-no-eth.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-hdmi.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-mi1010ait-1cp1.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mp-var-som-symphony.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw71xx-2x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw72xx-2x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw73xx-2x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw74xx.dtb -dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw7905-2x.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mp-venice-gw75xx-2x.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-dahlia.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-dev.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mp-verdin-nonwifi-mallow.dtb @@ -238,6 +241,8 @@ dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqp-mba8xx.dtb dtb-$(CONFIG_ARCH_MXC) += imx8ulp-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb.dtb dtb-$(CONFIG_ARCH_MXC) += imx93-11x11-evk.dtb +dtb-$(CONFIG_ARCH_MXC) += imx93-14x14-evk.dtb +dtb-$(CONFIG_ARCH_MXC) += imx93-kontron-bl-osm-s.dtb dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-segin.dtb dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxca.dtb dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxla.dtb diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts index 2517528f684fe0..75081ce3e9a6f0 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts @@ -20,6 +20,12 @@ sys_mclk: clock-mclk { clock-frequency = <25000000>; }; + sc16is7xx_clk: clock-sc16is7xx { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + }; + reg_1p8v: regulator-1p8v { compatible = "regulator-fixed"; regulator-name = "1P8V"; @@ -69,12 +75,6 @@ serial@0 { clocks = <&sc16is7xx_clk>; interrupt-parent = <&gpio1>; interrupts = <13 IRQ_TYPE_EDGE_FALLING>; - - sc16is7xx_clk: clock-sc16is7xx { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <24000000>; - }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index e61ea7e0737e42..dd479889658d45 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -164,7 +164,6 @@ esdhc0: mmc@1560000 { QORIQ_CLK_PLL_DIV(1)>; voltage-ranges = <1800 1800 3300 3300>; sdhci,auto-cmd12; - big-endian; bus-width = <4>; status = "disabled"; }; @@ -183,7 +182,6 @@ esdhc1: mmc@1580000 { QORIQ_CLK_PLL_DIV(1)>; voltage-ranges = <1800 1800 3300 3300>; sdhci,auto-cmd12; - big-endian; broken-cd; bus-width = <4>; status = "disabled"; @@ -541,7 +539,6 @@ pcie1: pcie@3400000 { #address-cells = <3>; #size-cells = <2>; device_type = "pci"; - num-viewport = <2>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -556,7 +553,7 @@ pcie1: pcie@3400000 { status = "disabled"; }; - rcpm: power-controller@1ee2140 { + rcpm: wakeup-controller@1ee2140 { compatible = "fsl,ls1012a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1ee2140 0x0 0x4>; #fsl,rcpm-wakeup-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts index 195bdbafdf7c9e..d9fac647f4327d 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts @@ -26,6 +26,13 @@ pwm-fan { cooling-levels = <1 128 192 255>; }; + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + sound { #address-cells = <1>; #size-cells = <0>; @@ -107,6 +114,11 @@ wm8904: audio-codec@1a { clock-names = "mclk"; assigned-clocks = <&mclk>; assigned-clock-rates = <1250000>; + AVDD-supply = <®_3p3v>; + CPVDD-supply = <®_3p3v>; + DBVDD-supply = <®_3p3v>; + DCVDD-supply = <®_3p3v>; + MICVDD-supply = <®_3p3v>; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index acf293310f7a09..7d172d7e5737c4 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -112,13 +112,6 @@ optee: optee { }; }; - reboot { - compatible = "syscon-reboot"; - regmap = <&rst>; - offset = <0>; - mask = <0x02>; - }; - timer { compatible = "arm,armv8-timer"; interrupts = ; reg = <0x0 0x06020000 0 0x20000>;/* GIC Translater */ }; }; @@ -235,10 +229,16 @@ fspi_clk: clock-controller@900 { }; }; - rst: syscon@1e60000 { - compatible = "syscon"; + syscon@1e60000 { + compatible = "fsl,ls1028a-reset", "syscon", "simple-mfd"; reg = <0x0 0x1e60000 0x0 0x10000>; little-endian; + + reboot { + compatible = "syscon-reboot"; + offset = <0>; + mask = <0x02>; + }; }; sfp: efuse@1e80000 { @@ -381,7 +381,6 @@ dspi0: spi@2100000 { dmas = <&edma0 0 62>, <&edma0 0 60>; dma-names = "tx", "rx"; spi-num-chipselects = <4>; - little-endian; status = "disabled"; }; @@ -397,7 +396,6 @@ dspi1: spi@2110000 { dmas = <&edma0 0 58>, <&edma0 0 56>; dma-names = "tx", "rx"; spi-num-chipselects = <4>; - little-endian; status = "disabled"; }; @@ -413,7 +411,6 @@ dspi2: spi@2120000 { dmas = <&edma0 0 54>, <&edma0 0 2>; dma-names = "tx", "rx"; spi-num-chipselects = <3>; - little-endian; status = "disabled"; }; @@ -662,7 +659,7 @@ pcie1: pcie@3400000 { bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x80 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, @@ -701,7 +698,7 @@ pcie2: pcie@3500000 { bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x88 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, @@ -1080,7 +1077,7 @@ pcie@1f0000000 { /* Integrated Endpoint Root Complex */ reg = <0x01 0xf0000000 0x0 0x100000>; #address-cells = <3>; #size-cells = <2>; - msi-parent = <&its>; + msi-parent = <&its 0>; device_type = "pci"; bus-range = <0x0 0x0>; dma-coherent; @@ -1319,7 +1316,7 @@ pwm7: pwm@2870000 { status = "disabled"; }; - rcpm: power-controller@1e34040 { + rcpm: wakeup-controller@1e34040 { compatible = "fsl,ls1028a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1e34040 0x0 0x1c>; #fsl,rcpm-wakeup-cells = <7>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi index 5c4d7eef8b615a..ca7cd7a33c017e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -29,6 +29,7 @@ enet0: ethernet@e0000 { enet1: ethernet@e2000 { pcsphy-handle = <&pcsphy1>, <&qsgmiib_pcs1>; + pcs-handle = <&pcsphy1>, <&qsgmiib_pcs1>; pcs-handle-names = "sgmii", "qsgmii"; }; @@ -40,11 +41,13 @@ enet3: ethernet@e6000 { enet4: ethernet@e8000 { pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs2>; + pcs-handle = <&pcsphy4>, <&qsgmiib_pcs2>; pcs-handle-names = "sgmii", "qsgmii"; }; enet5: ethernet@ea000 { pcsphy-handle = <&pcsphy5>, <&qsgmiib_pcs3>; + pcs-handle = <&pcsphy5>, <&qsgmiib_pcs3>; pcs-handle-names = "sgmii", "qsgmii"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts index 11b1356e95d5bf..e850551b16acb5 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts @@ -211,7 +211,7 @@ ethernet@f0000 { /* DTSEC9/10GEC1 */ }; &fpga { - mdio-mux-emi1@54 { + mdio-mux@54 { compatible = "mdio-mux-mmioreg", "mdio-mux"; mdio-parent-bus = <&mdio0>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index ab4c919e3e1659..c0e3e8fa1e7947 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -431,7 +431,6 @@ esdhc: mmc@1560000 { clock-frequency = <0>; voltage-ranges = <1800 1800 3300 3300>; sdhci,auto-cmd12; - big-endian; bus-width = <4>; }; @@ -439,7 +438,6 @@ ddr: memory-controller@1080000 { compatible = "fsl,qoriq-memory-controller"; reg = <0x0 0x1080000 0x0 0x1000>; interrupts = ; - big-endian; }; tmu: tmu@1f00000 { @@ -653,7 +651,7 @@ gpio4: gpio@2330000 { #interrupt-cells = <2>; }; - uqe: uqe@2400000 { + uqe: uqe-bus@2400000 { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,qe", "simple-bus"; @@ -667,7 +665,6 @@ uqe: uqe@2400000 { qeic: qeic@80 { compatible = "fsl,qe-ic"; reg = <0x80 0x80>; - #address-cells = <0>; interrupt-controller; #interrupt-cells = <1>; interrupts = , @@ -675,16 +672,12 @@ qeic: qeic@80 { }; si1: si@700 { - #address-cells = <1>; - #size-cells = <0>; compatible = "fsl,ls1043-qe-si", "fsl,t1040-qe-si"; reg = <0x700 0x80>; }; siram1: siram@1000 { - #address-cells = <1>; - #size-cells = <1>; compatible = "fsl,ls1043-qe-siram", "fsl,t1040-qe-siram"; reg = <0x1000 0x800>; @@ -804,7 +797,7 @@ QORIQ_CLK_PLL_DIV(1)>, QORIQ_CLK_PLL_DIV(1)>; }; - aux_bus: aux-bus { + aux_bus: bus { #address-cells = <2>; #size-cells = <2>; compatible = "simple-bus"; @@ -962,7 +955,7 @@ pcie3: pcie@3600000 { }; qdma: dma-controller@8380000 { - compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma"; + compatible = "fsl,ls1043a-qdma", "fsl,ls1021a-qdma"; reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ <0x0 0x8390000 0x0 0x10000>, /* Status regs */ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */ @@ -983,7 +976,7 @@ qdma: dma-controller@8380000 { big-endian; }; - rcpm: power-controller@1ee2140 { + rcpm: wakeup-controller@1ee2140 { compatible = "fsl,ls1043a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1ee2140 0x0 0x4>; #fsl,rcpm-wakeup-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi index 4e33450939436f..15ff7c569d288f 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi @@ -24,6 +24,7 @@ &fman0 { /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { pcsphy-handle = <&qsgmiib_pcs3>; + pcs-handle = <&qsgmiib_pcs3>; pcs-handle-names = "qsgmii"; }; @@ -38,11 +39,13 @@ enet3: ethernet@e6000 { enet4: ethernet@e8000 { pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs1>; + pcs-handle = <&pcsphy4>, <&qsgmiib_pcs1>; pcs-handle-names = "sgmii", "qsgmii"; }; enet5: ethernet@ea000 { pcsphy-handle = <&pcsphy5>, <&pcsphy5>; + pcs-handle = <&pcsphy5>, <&pcsphy5>; pcs-handle-names = "sgmii", "qsgmii"; }; @@ -51,6 +54,7 @@ enet6: ethernet@f0000 { enet7: ethernet@f2000 { pcsphy-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>; + pcs-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>; pcs-handle-names = "sgmii", "qsgmii", "xfi"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts index e5296e51f656fe..a1d9102ff32be9 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts @@ -237,7 +237,7 @@ &fpga { #address-cells = <1>; #size-cells = <1>; - mdio-mux-emi1 { + mdio-mux@54 { compatible = "mdio-mux-mmioreg", "mdio-mux"; mdio-parent-bus = <&mdio0>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index 55019866d6a25b..0baf256b44003f 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -282,7 +282,6 @@ ddr: memory-controller@1080000 { compatible = "fsl,qoriq-memory-controller"; reg = <0x0 0x1080000 0x0 0x1000>; interrupts = ; - big-endian; }; ifc: memory-controller@1530000 { @@ -315,7 +314,6 @@ esdhc: mmc@1560000 { clocks = <&clockgen QORIQ_CLK_HWACCEL 1>; voltage-ranges = <1800 1800 3300 3300>; sdhci,auto-cmd12; - big-endian; bus-width = <4>; }; @@ -694,7 +692,6 @@ wdog0: watchdog@2ad0000 { interrupts = ; clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(2)>; - big-endian; }; edma0: dma-controller@2c00000 { @@ -715,7 +712,7 @@ QORIQ_CLK_PLL_DIV(2)>, QORIQ_CLK_PLL_DIV(2)>; }; - aux_bus: aux-bus { + aux_bus: bus { #address-cells = <2>; #size-cells = <2>; compatible = "simple-bus"; @@ -823,7 +820,7 @@ pcie1: pcie@3400000 { }; pcie_ep1: pcie_ep@3400000 { - compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + compatible = "fsl,ls1046a-pcie-ep"; reg = <0x00 0x03400000 0x0 0x00100000>, <0x40 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -862,7 +859,7 @@ pcie2: pcie@3500000 { }; pcie_ep2: pcie_ep@3500000 { - compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + compatible = "fsl,ls1046a-pcie-ep"; reg = <0x00 0x03500000 0x0 0x00100000>, <0x48 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -901,7 +898,7 @@ pcie3: pcie@3600000 { }; pcie_ep3: pcie_ep@3600000 { - compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"; + compatible = "fsl,ls1046a-pcie-ep"; reg = <0x00 0x03600000 0x0 0x00100000>, <0x50 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -935,7 +932,7 @@ qdma: dma-controller@8380000 { big-endian; }; - rcpm: power-controller@1ee2140 { + rcpm: wakeup-controller@1ee2140 { compatible = "fsl,ls1046a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1ee2140 0x0 0x4>; #fsl,rcpm-wakeup-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts index ee8e932628d17f..2df16bfb901c2e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts @@ -170,6 +170,13 @@ rtc@51 { /* IRQ_RTC_B -> IRQ0_B(CPLD) -> IRQ00(CPU), active low */ interrupts-extended = <&extirq 0 IRQ_TYPE_LEVEL_LOW>; }; + + rtc@53 { + compatible = "nxp,pcf2131"; + reg = <0x53>; + /* IRQ_RTC_B -> IRQ0_B(CPLD) -> IRQ00(CPU), active low */ + interrupts-extended = <&extirq 0 IRQ_TYPE_LEVEL_LOW>; + }; }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts index d4867d6cf47cdc..bc0d89427fbe5a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts @@ -220,7 +220,7 @@ sfpgpio: gpio@76 { #gpio-cells = <2>; gpio-controller; - admin_led_lower { + admin-led-lower-hog { gpio-hog; gpios = <13 GPIO_ACTIVE_HIGH>; output-low; @@ -323,9 +323,9 @@ partition@580000 { reg = <0x580000 0x40000>; }; - partition@5C0000 { + partition@5c0000 { label = "dpc"; - reg = <0x5C0000 0x40000>; + reg = <0x5c0000 0x40000>; }; partition@600000 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi index e3a7db21fe29a3..9d5726378aa015 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi @@ -126,6 +126,7 @@ gic: interrupt-controller@6000000 { its: msi-controller@6020000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x6020000 0 0x20000>; }; }; @@ -575,7 +576,7 @@ pcie1: pcie@3400000 { bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 109 IRQ_TYPE_LEVEL_HIGH>, @@ -587,7 +588,7 @@ pcie1: pcie@3400000 { }; pcie_ep1: pcie-ep@3400000 { - compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"; + compatible = "fsl,ls1088a-pcie-ep"; reg = <0x00 0x03400000 0x0 0x00100000>, <0x20 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -614,7 +615,7 @@ pcie2: pcie@3500000 { bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 114 IRQ_TYPE_LEVEL_HIGH>, @@ -626,7 +627,7 @@ pcie2: pcie@3500000 { }; pcie_ep2: pcie-ep@3500000 { - compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"; + compatible = "fsl,ls1088a-pcie-ep"; reg = <0x00 0x03500000 0x0 0x00100000>, <0x28 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -652,7 +653,7 @@ pcie3: pcie@3600000 { bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>, @@ -664,7 +665,7 @@ pcie3: pcie@3600000 { }; pcie_ep3: pcie-ep@3600000 { - compatible = "fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"; + compatible = "fsl,ls1088a-pcie-ep"; reg = <0x00 0x03600000 0x0 0x00100000>, <0x30 0x00000000 0x8 0x00000000>; reg-names = "regs", "addr_space"; @@ -964,7 +965,7 @@ fsl_mc: fsl-mc@80c000000 { compatible = "fsl,qoriq-mc"; reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ - msi-parent = <&its>; + msi-parent = <&its 0>; iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ dma-coherent; #address-cells = <3>; @@ -1033,7 +1034,7 @@ dpmac10: ethernet@a { }; }; - rcpm: power-controller@1e34040 { + rcpm: wakeup-controller@1e34040 { compatible = "fsl,ls1088a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1e34040 0x0 0x18>; #fsl,rcpm-wakeup-cells = <6>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi index 9178cd61c78691..556d8c5f3180a9 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi @@ -64,7 +64,7 @@ boardctrl: board-control@3,0 { reg = <3 0 0x1000>; ranges = <0 3 0 0x1000>; - mdio-mux-emi1@54 { + mdio-mux@54 { compatible = "mdio-mux-mmioreg", "mdio-mux"; mdio-parent-bus = <&emdio1>; reg = <0x54 1>; /* BRDCFG4 */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 1b306d6802ce3d..9421fdd7e30e35 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -63,20 +63,20 @@ gic: interrupt-controller@6000000 { its: msi-controller@6020000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x6020000 0 0x20000>; }; }; rstcr: syscon@1e60000 { - compatible = "fsl,ls2080a-rstcr", "syscon"; + compatible = "fsl,ls1028a-reset", "syscon", "simple-mfd"; reg = <0x0 0x1e60000 0x0 0x4>; - }; - reboot { - compatible = "syscon-reboot"; - regmap = <&rstcr>; - offset = <0x0>; - mask = <0x2>; + reboot { + compatible = "syscon-reboot"; + offset = <0x0>; + mask = <0x2>; + }; }; thermal-zones { @@ -758,7 +758,7 @@ fsl_mc: fsl-mc@80c000000 { compatible = "fsl,qoriq-mc"; reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ - msi-parent = <&its>; + msi-parent = <&its 0>; iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ dma-coherent; #address-cells = <3>; @@ -1075,7 +1075,7 @@ QORIQ_CLK_PLL_DIV(4)>, }; pcie1: pcie@3400000 { - compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie"; + compatible = "fsl,ls2080a-pcie"; reg-names = "regs", "config"; interrupts = ; interrupt-names = "intr"; @@ -1085,7 +1085,7 @@ pcie1: pcie@3400000 { dma-coherent; num-viewport = <6>; bus-range = <0x0 0xff>; - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, @@ -1097,7 +1097,7 @@ pcie1: pcie@3400000 { }; pcie2: pcie@3500000 { - compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie"; + compatible = "fsl,ls2080a-pcie"; reg-names = "regs", "config"; interrupts = ; interrupt-names = "intr"; @@ -1107,7 +1107,7 @@ pcie2: pcie@3500000 { dma-coherent; num-viewport = <6>; bus-range = <0x0 0xff>; - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, @@ -1119,7 +1119,7 @@ pcie2: pcie@3500000 { }; pcie3: pcie@3600000 { - compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie"; + compatible = "fsl,ls2080a-pcie"; reg-names = "regs", "config"; interrupts = ; interrupt-names = "intr"; @@ -1129,7 +1129,7 @@ pcie3: pcie@3600000 { dma-coherent; num-viewport = <256>; bus-range = <0x0 0xff>; - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, @@ -1141,7 +1141,7 @@ pcie3: pcie@3600000 { }; pcie4: pcie@3700000 { - compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie"; + compatible = "fsl,ls2080a-pcie"; reg-names = "regs", "config"; interrupts = ; interrupt-names = "intr"; @@ -1151,7 +1151,7 @@ pcie4: pcie@3700000 { dma-coherent; num-viewport = <6>; bus-range = <0x0 0xff>; - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, @@ -1218,7 +1218,7 @@ ccn@4000000 { interrupts = ; }; - rcpm: power-controller@1e34040 { + rcpm: wakeup-controller@1e34040 { compatible = "fsl,ls208xa-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1e34040 0x0 0x18>; #fsl,rcpm-wakeup-cells = <6>; diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts index da0f58e26b9aae..f6a4f8d5430156 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-tqmlx2160a-mblx2160a.dts @@ -320,7 +320,7 @@ hub_2_0: hub@1 { reg = <1>; peer-hub = <&hub_3_0>; reset-gpios = <&gpioex1 0 GPIO_ACTIVE_LOW>; - vcc-supply = <®_vcc3v3>; + vdd-supply = <®_vcc3v3>; }; hub_3_0: hub@2 { @@ -328,7 +328,7 @@ hub_3_0: hub@2 { reg = <2>; peer-hub = <&hub_2_0>; reset-gpios = <&gpioex1 0 GPIO_ACTIVE_LOW>; - vcc-supply = <®_vcc3v3>; + vdd-supply = <®_vcc3v3>; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi index bd75a658767ddf..927ecf66a74042 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi @@ -398,6 +398,7 @@ gic: interrupt-controller@6000000 { its: msi-controller@6020000 { compatible = "arm,gic-v3-its"; msi-controller; + #msi-cells = <1>; reg = <0x0 0x6020000 0 0x20000>; }; }; @@ -1078,7 +1079,7 @@ watchdog@23a0000 { timeout-sec = <30>; }; - rcpm: power-controller@1e34040 { + rcpm: wakeup-controller@1e34040 { compatible = "fsl,lx2160a-rcpm", "fsl,qoriq-rcpm-2.1+"; reg = <0x0 0x1e34040 0x0 0x1c>; #fsl,rcpm-wakeup-cells = <7>; @@ -1181,7 +1182,7 @@ pcie1: pcie@3400000 { ppio-wins = <8>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, @@ -1209,7 +1210,7 @@ pcie2: pcie@3500000 { ppio-wins = <8>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, @@ -1237,7 +1238,7 @@ pcie3: pcie@3600000 { ppio-wins = <24>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0x90 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, @@ -1265,7 +1266,7 @@ pcie4: pcie@3700000 { ppio-wins = <8>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0x98 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, @@ -1293,7 +1294,7 @@ pcie5: pcie@3800000 { ppio-wins = <24>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0xa0 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>, @@ -1321,7 +1322,7 @@ pcie6: pcie@3900000 { ppio-wins = <8>; bus-range = <0x0 0xff>; ranges = <0x82000000 0x0 0x40000000 0xa8 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ - msi-parent = <&its>; + msi-parent = <&its 0>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, @@ -1777,7 +1778,7 @@ fsl_mc: fsl-mc@80c000000 { compatible = "fsl,qoriq-mc"; reg = <0x00000008 0x0c000000 0 0x40>, <0x00000000 0x08340000 0 0x40000>; - msi-parent = <&its>; + msi-parent = <&its 0>; /* iommu-map property is fixed up by u-boot */ iommu-map = <0 &smmu 0 0>; dma-coherent; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi index f7a91d43a0ffe1..575be8115e427e 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi @@ -34,6 +34,8 @@ lpspi0: spi@5a000000 { assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <60000000>; power-domains = <&pd IMX_SC_R_SPI_0>; + dmas = <&edma2 1 0 0>, <&edma2 0 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -50,6 +52,8 @@ lpspi1: spi@5a010000 { assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <60000000>; power-domains = <&pd IMX_SC_R_SPI_1>; + dmas = <&edma2 3 0 0>, <&edma2 2 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -66,6 +70,8 @@ lpspi2: spi@5a020000 { assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <60000000>; power-domains = <&pd IMX_SC_R_SPI_2>; + dmas = <&edma2 5 0 0>, <&edma2 4 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -82,6 +88,8 @@ lpspi3: spi@5a030000 { assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>; assigned-clock-rates = <60000000>; power-domains = <&pd IMX_SC_R_SPI_3>; + dmas = <&edma2 7 0 0>, <&edma2 6 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -303,6 +311,8 @@ adma_pwm_lpcg: clock-controller@5a590000 { i2c0: i2c@5a800000 { reg = <0x5a800000 0x4000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = ; clocks = <&i2c0_lpcg IMX_LPCG_CLK_0>, <&i2c0_lpcg IMX_LPCG_CLK_4>; @@ -315,6 +325,8 @@ i2c0: i2c@5a800000 { i2c1: i2c@5a810000 { reg = <0x5a810000 0x4000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = ; clocks = <&i2c1_lpcg IMX_LPCG_CLK_0>, <&i2c1_lpcg IMX_LPCG_CLK_4>; @@ -327,6 +339,8 @@ i2c1: i2c@5a810000 { i2c2: i2c@5a820000 { reg = <0x5a820000 0x4000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = ; clocks = <&i2c2_lpcg IMX_LPCG_CLK_0>, <&i2c2_lpcg IMX_LPCG_CLK_4>; @@ -339,6 +353,8 @@ i2c2: i2c@5a820000 { i2c3: i2c@5a830000 { reg = <0x5a830000 0x4000>; + #address-cells = <1>; + #size-cells = <0>; interrupts = ; clocks = <&i2c3_lpcg IMX_LPCG_CLK_0>, <&i2c3_lpcg IMX_LPCG_CLK_4>; @@ -362,7 +378,7 @@ adc0: adc@5a880000 { assigned-clock-rates = <24000000>; power-domains = <&pd IMX_SC_R_ADC_0>; status = "disabled"; - }; + }; adc1: adc@5a890000 { compatible = "nxp,imx8qxp-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi index 77d2928997b4be..d39242c1b9f795 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-img.dtsi @@ -26,7 +26,6 @@ jpegdec: jpegdec@58400000 { assigned-clock-rates = <200000000>, <200000000>; power-domains = <&pd IMX_SC_R_MJPEG_DEC_MP>, <&pd IMX_SC_R_MJPEG_DEC_S0>; - slot = <0>; }; jpegenc: jpegenc@58450000 { @@ -39,7 +38,6 @@ jpegenc: jpegenc@58450000 { assigned-clock-rates = <200000000>, <200000000>; power-domains = <&pd IMX_SC_R_MJPEG_ENC_MP>, <&pd IMX_SC_R_MJPEG_ENC_S0>; - slot = <0>; }; img_jpeg_dec_lpcg: clock-controller@585d0000 { diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi new file mode 100644 index 00000000000000..d00036204a8c24 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0-only and MIT + +/* + * Copyright 2024 NXP + */ + +lvds0_subsys: bus@56240000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x56240000 0x0 0x56240000 0x10000>; + + qm_lvds0_lis_lpcg: qxp_mipi1_lis_lpcg: clock-controller@56243000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56243000 0x4>; + #clock-cells = <1>; + clock-output-names = "mipi1_lis_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1>; + }; + + qm_lvds0_pwm_lpcg: qxp_mipi1_pwm_lpcg: clock-controller@5624300c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5624300c 0x4>; + #clock-cells = <1>; + clock-output-names = "mipi1_pwm_lpcg_clk", + "mipi1_pwm_lpcg_ipg_clk", + "mipi1_pwm_lpcg_32k_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>; + }; + + qm_lvds0_i2c0_lpcg: qxp_mipi1_i2c0_lpcg: clock-controller@56243010 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56243010 0x4>; + #clock-cells = <1>; + clock-output-names = "mipi1_i2c0_lpcg_clk", + "mipi1_i2c0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + }; + + qm_pwm_lvds0: qxp_pwm_mipi_lvds1: pwm@56244000 { + compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm"; + reg = <0x56244000 0x1000>; + clock-names = "ipg", "per"; + assigned-clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + #pwm-cells = <3>; + power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>; + status = "disabled"; + }; + + qm_i2c0_lvds0: qxp_i2c0_mipi_lvds1: i2c@56246000 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x56246000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + clock-names = "per", "ipg"; + assigned-clocks = <&clk IMX_SC_R_MIPI_1_I2C_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi new file mode 100644 index 00000000000000..12ae4f48e1e1c3 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-lvds1.dtsi @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-only and MIT + +/* + * Copyright 2024 NXP + */ + +lvds1_subsys: bus@57240000 { + compatible = "simple-bus"; + interrupt-parent = <&irqsteer_lvds1>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x57240000 0x0 0x57240000 0x10000>; + + irqsteer_lvds1: interrupt-controller@57240000 { + compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer"; + reg = <0x57240000 0x1000>; + interrupts = ; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <1>; + clocks = <&lvds1_lis_lpcg IMX_LPCG_CLK_4>; + clock-names = "ipg"; + power-domains = <&pd IMX_SC_R_LVDS_1>; + fsl,channel = <0>; + fsl,num-irqs = <32>; + }; + + lvds1_lis_lpcg: clock-controller@57243000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57243000 0x4>; + #clock-cells = <1>; + clocks = <&lvds_ipg_clk>; + clock-indices = ; + clock-output-names = "lvds1_lis_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_LVDS_1>; + }; + + lvds1_pwm_lpcg: clock-controller@5724300c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5724300c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_LVDS_1_PWM_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; + clock-output-names = "lvds1_pwm_lpcg_clk", + "lvds1_pwm_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_LVDS_1_PWM_0>; + }; + + lvds1_i2c0_lpcg: clock-controller@57243010 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57243010 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; + clock-output-names = "lvds1_i2c0_lpcg_clk", + "lvds1_i2c0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>; + }; + + lvds1_i2c1_lpcg: clock-controller@57243014 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57243014 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; + clock-output-names = "lvds1_i2c1_lpcg_clk", + "lvds1_i2c1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>; + }; + + pwm_lvds1: pwm@57244000 { + compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm"; + reg = <0x57244000 0x1000>; + clocks = <&lvds1_pwm_lpcg IMX_LPCG_CLK_4>, + <&lvds1_pwm_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per"; + assigned-clocks = <&clk IMX_SC_R_LVDS_1_PWM_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + #pwm-cells = <3>; + power-domains = <&pd IMX_SC_R_LVDS_1_PWM_0>; + status = "disabled"; + }; + + i2c0_lvds1: i2c@57246000 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x57246000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + clocks = <&lvds1_i2c0_lpcg IMX_LPCG_CLK_0>, + <&lvds1_i2c0_lpcg IMX_LPCG_CLK_4>; + clock-names = "per", "ipg"; + assigned-clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>; + status = "disabled"; + }; + + i2c1_lvds1: i2c@57247000 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x57247000 0x1000>; + interrupts = <9>; + clocks = <&lvds1_i2c1_lpcg IMX_LPCG_CLK_0>, + <&lvds1_i2c1_lpcg IMX_LPCG_CLK_4>; + clock-names = "per", "ipg"; + assigned-clocks = <&clk IMX_SC_R_LVDS_1_I2C_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_LVDS_1_I2C_0>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi new file mode 100644 index 00000000000000..9c5b0cbdfcbdd7 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-mipi0.dtsi @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only and MIT + +/* + * Copyright 2024 NXP + */ + +mipi0_subsys: bus@56220000 { + compatible = "simple-bus"; + interrupt-parent = <&irqsteer_mipi0>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x56220000 0x0 0x56220000 0x10000>; + + irqsteer_mipi0: interrupt-controller@56220000 { + compatible = "fsl,imx8qxp-irqsteer", "fsl,imx-irqsteer"; + reg = <0x56220000 0x1000>; + interrupts = ; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <1>; + clocks = <&mipi0_lis_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg"; + power-domains = <&pd IMX_SC_R_MIPI_0>; + fsl,channel = <0>; + fsl,num-irqs = <32>; + }; + + mipi0_lis_lpcg: clock-controller@56223000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223000 0x4>; + #clock-cells = <1>; + power-domains = <&pd IMX_SC_R_MIPI_0>; + }; + + mipi0_pwm_lpcg: clock-controller@5622300c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5622300c 0x4>; + #clock-cells = <1>; + power-domains = <&pd IMX_SC_R_MIPI_0_PWM_0>; + }; + + mipi0_i2c0_lpcg_ipg_clk: clock-controller@56223014 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223014 0x4>; + #clock-cells = <1>; + clocks = <&mipi0_i2c0_lpcg_ipg_s_clk IMX_LPCG_CLK_0>; + clock-indices = ; + clock-output-names = "mipi0_i2c0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>; + }; + + mipi0_i2c0_lpcg_ipg_s_clk: clock-controller@56223018 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223018 0x4>; + #clock-cells = <1>; + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi0_i2c0_lpcg_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>; + }; + + mipi0_i2c0_lpcg_clk: clock-controller@5622301c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5622301c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_MIPI_0_I2C_0 IMX_SC_PM_CLK_MISC2>; + clock-indices = ; + clock-output-names = "mipi0_i2c0_lpcg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>; + }; + + mipi0_i2c1_lpcg_ipg_clk: clock-controller@56223024 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223024 0x4>; + #clock-cells = <1>; + clocks = <&mipi0_i2c1_lpcg_ipg_s_clk IMX_LPCG_CLK_0>; + clock-indices = ; + clock-output-names = "mipi0_i2c1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>; + }; + + mipi0_i2c1_lpcg_ipg_s_clk: clock-controller@56223028 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56223028 0x4>; + #clock-cells = <1>; + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi0_i2c1_lpcg_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>; + }; + + mipi0_i2c1_lpcg_clk: clock-controller@5622302c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5622302c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_MIPI_0_I2C_1 IMX_SC_PM_CLK_MISC2>; + clock-indices = ; + clock-output-names = "mipi0_i2c1_lpcg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_1>; + }; + + pwm_mipi0: pwm@56224000 { + compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm"; + reg = <0x56224000 0x1000>; + clocks = <&mipi0_pwm_lpcg IMX_LPCG_CLK_4>, + <&mipi0_pwm_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per"; + assigned-clocks = <&clk IMX_SC_R_MIPI_0_PWM_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + #pwm-cells = <3>; + power-domains = <&pd IMX_SC_R_MIPI_0_PWM_0>; + status = "disabled"; + }; + + i2c0_mipi0: i2c@56226000 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x56226000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + clocks = <&mipi0_i2c0_lpcg_clk IMX_LPCG_CLK_0>, + <&mipi0_i2c0_lpcg_ipg_clk IMX_LPCG_CLK_0>; + clock-names = "per", "ipg"; + assigned-clocks = <&mipi0_i2c0_lpcg_clk IMX_LPCG_CLK_0>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_MIPI_0_I2C_0>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi new file mode 100644 index 00000000000000..5b1f08e412b24a --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8-ss-mipi1.dtsi @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only and MIT + +/* + * Copyright 2024 NXP + */ + +mipi1_subsys: bus@57220000 { + compatible = "simple-bus"; + interrupt-parent = <&irqsteer_mipi1>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x57220000 0x0 0x57220000 0x10000>; + + irqsteer_mipi1: interrupt-controller@57220000 { + compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer"; + reg = <0x57220000 0x1000>; + interrupts = ; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <1>; + clocks = <&mipi1_lis_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg"; + power-domains = <&pd IMX_SC_R_MIPI_1>; + fsl,channel = <0>; + fsl,num-irqs = <32>; + }; + + mipi1_lis_lpcg: clock-controller@57223000 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57223000 0x4>; + #clock-cells = <1>; + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi1_lis_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1>; + }; + + mipi1_pwm_lpcg: clock-controller@5722300c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5722300c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>, + <&dsi_ipg_clk>; + clock-indices = , ; + clock-output-names = "mipi1_pwm_lpcg_clk", + "mipi1_pwm_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>; + }; + + mipi1_i2c0_lpcg_clk: clock-controller@5722301c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5722301c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_MIPI_1_I2C_0 IMX_SC_PM_CLK_MISC2>; + clock-indices = ; + clock-output-names = "mipi1_i2c0_lpcg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + }; + + mipi1_i2c0_lpcg_ipg_clk: clock-controller@57223014 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57223014 0x4>; + #clock-cells = <1>; + clocks = <&mipi1_i2c0_lpcg_ipg_s_clk IMX_LPCG_CLK_0>; + clock-indices = ; + clock-output-names = "mipi1_i2c0_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + }; + + mipi1_i2c0_lpcg_ipg_s_clk: clock-controller@57223018 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57223018 0x4>; + #clock-cells = <1>; + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi1_i2c0_lpcg_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + }; + + mipi1_i2c1_lpcg_ipg_clk: clock-controller@57223024 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57223024 0x4>; + #clock-cells = <1>; + clocks = <&mipi1_i2c1_lpcg_ipg_s_clk IMX_LPCG_CLK_0>; + clock-indices = ; + clock-output-names = "mipi1_i2c1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>; + }; + + mipi1_i2c1_lpcg_ipg_s_clk: clock-controller@57223028 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x57223028 0x4>; + #clock-cells = <1>; + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi1_i2c1_lpcg_ipg_s_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>; + }; + + mipi1_i2c1_lpcg_clk: clock-controller@5722302c { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x5722302c 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_MIPI_1_I2C_1 IMX_SC_PM_CLK_MISC2>; + clock-indices = ; + clock-output-names = "mipi1_i2c1_lpcg_clk"; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_1>; + }; + + pwm_mipi1: pwm@57224000 { + compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm"; + reg = <0x57224000 0x1000>; + clocks = <&mipi1_pwm_lpcg IMX_LPCG_CLK_4>, + <&mipi1_pwm_lpcg IMX_LPCG_CLK_0>; + clock-names = "ipg", "per"; + assigned-clocks = <&clk IMX_SC_R_MIPI_1_PWM_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + #pwm-cells = <3>; + power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>; + status = "disabled"; + }; + + i2c0_mipi1: i2c@57226000 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x57226000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + interrupt-parent = <&irqsteer_mipi1>; + clocks = <&mipi1_i2c0_lpcg_clk IMX_LPCG_CLK_0>, + <&mipi1_i2c0_lpcg_ipg_clk IMX_LPCG_CLK_0>; + clock-names = "per", "ipg"; + assigned-clocks = <&mipi1_i2c0_lpcg_clk IMX_LPCG_CLK_0>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi index 66b0fcc6687df3..4d1ad052c5b661 100644 --- a/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8dx-colibri.dtsi @@ -9,3 +9,14 @@ / { model = "Toradex Colibri iMX8DX Module"; }; + +&thermal_zones { + pmic-thermal { + cooling-maps { + map0 { + cooling-device = <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts index 1a74ac3ee4ee90..4caaecc1922771 100644 --- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts @@ -722,12 +722,6 @@ &lpspi3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpspi3>; status = "okay"; - - spidev0: spi@0 { - reg = <0>; - compatible = "rohm,dh2228fv"; - spi-max-frequency = <30000000>; - }; }; &iomuxc { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi index 6086dae2e5fbef..ea1d5b9c6bae04 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi @@ -56,6 +56,20 @@ pcie0_refclk_gated: pcie0-refclk-gated { enable-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>; }; + reg_1v5: regulator-1v5 { + compatible = "regulator-fixed"; + regulator-name = "1V5"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + }; + + reg_1v8: regulator-1v8 { + compatible = "regulator-fixed"; + regulator-name = "1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + reg_audio: regulator-audio { compatible = "regulator-fixed"; regulator-name = "3v3_aud"; @@ -187,6 +201,8 @@ camera@10 { assigned-clock-parents = <&clk IMX8MM_CLK_24M>; assigned-clock-rates = <24000000>; AVDD-supply = <®_camera>; /* 2.8v */ + DVDD-supply = <®_1v5>; + DOVDD-supply = <®_1v8>; powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts index 905c98cb080d2f..97ff1ddd631888 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts @@ -62,8 +62,8 @@ adv_bridge: hdmi@3d { compatible = "adi,adv7535"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hdmi_bridge>; - reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>; - reg-names = "main", "cec", "edid", "packet"; + reg = <0x3d>, <0x3e>, <0x3c>, <0x3f>; + reg-names = "main", "edid", "cec", "packet"; adi,dsi-lanes = <4>; avdd-supply = <®_hdmi>; a2vdd-supply = <®_hdmi>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts index b1f2beb40a98f0..472c584fb3bd29 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts @@ -168,7 +168,7 @@ &fec1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec1>; phy-mode = "rgmii-id"; - phy-handle = <&fec1_phy>; + phy-handle = <&fec1_phy_bcm>; phy-supply = <&buck4_reg>; fsl,magic-packet; status = "okay"; @@ -178,7 +178,7 @@ mdio { #size-cells = <0>; /* Atheros AR8031 PHY */ - fec1_phy: ethernet-phy@0 { + fec1_phy_ath: ethernet-phy@0 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <0>; /* @@ -191,6 +191,7 @@ fec1_phy: ethernet-phy@0 { reset-deassert-us = <10000>; qca,keep-pll-enabled; vddio-supply = <&vddio>; + status = "disabled"; vddio: vddio-regulator { regulator-name = "VDDIO"; @@ -202,6 +203,20 @@ vddh: vddh-regulator { regulator-name = "VDDH"; }; }; + + /* Broadcom BCM54213PE PHY */ + fec1_phy_bcm: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + /* + * Dedicated ENET_INT# and ENET_WOL# signals are + * unused, the PHY does not provide cable detect + * interrupt. + */ + reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + reset-deassert-us = <10000>; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts b/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts index 1c4e4d175989e6..7d2cb74c64eeee 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-emtop-baseboard.dts @@ -11,5 +11,53 @@ / { model = "Emtop Embedded Solutions i.MX8M Mini Baseboard V1"; compatible = "ees,imx8mm-emtop-baseboard", "ees,imx8mm-emtop-som", "fsl,imx8mm"; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@4 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <4>; + reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + vddio-supply = <&vddio>; + + vddio: vddio-regulator { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + }; + }; +}; +&iomuxc { + pinctrl_fec1: fec1grp { + fsl,pins = < + MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3 + MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3 + MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f + MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f + MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f + MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f + MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91 + MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91 + MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91 + MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91 + MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f + MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 + MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 + MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f + MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19 + >; + }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi index 930e14fec42370..5f8336217bb88b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi @@ -180,12 +180,21 @@ cpu { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif1>; - spdif-out; - spdif-in; + audio-cpu = <&spdif1>; + audio-codec = <&spdif_out>, <&spdif_in>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts index 92e62fe319290b..5eacbd9611eef3 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts @@ -220,6 +220,7 @@ &pcie_phy { }; &rv3028 { + aux-voltage-chargeable = <1>; trickle-resistor-ohms = <3000>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso index 353ace3601dc8d..78f4e8d5814daa 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso @@ -14,16 +14,11 @@ /dts-v1/; /plugin/; -&{/} { - compatible = "phytec,imx8mm-phygate-tauri-l"; - -}; - &gpio3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gpio3_hog>; - uart4_rs485_en { + uart4-rs485-en-hog { gpio-hog; gpios = <20 GPIO_ACTIVE_HIGH>; output-low; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso index 8a75d6783ad2b4..66288948bdd39d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso @@ -15,16 +15,11 @@ /dts-v1/; /plugin/; -&{/} { - compatible = "phytec,imx8mm-phygate-tauri-l"; - -}; - &gpio3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gpio3_hog>; - uart4_rs485_en { + uart4-rs485-en-hog { gpio-hog; gpios = <20 GPIO_ACTIVE_HIGH>; output-high; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso index 107f743fbb1c30..4719f5fbad0307 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rts-cts.dtso @@ -14,12 +14,6 @@ /dts-v1/; /plugin/; - -&{/} { - compatible = "phytec,imx8mm-phygate-tauri-l"; - -}; - &uart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart2>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts index ba6ce3c7f4779d..c3835b2d860add 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts @@ -215,6 +215,7 @@ &pwm4 { /* RTC */ &rv3028 { + aux-voltage-chargeable = <1>; trickle-resistor-ohms = <3000>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi index ca0205b9019e6f..8f58c84e14c8eb 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi @@ -83,7 +83,6 @@ &gpu_3d { }; &i2c1 { - clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c1>; pinctrl-1 = <&pinctrl_i2c1_gpio>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi index d7830df5b6f966..cdfacbc35db57b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi @@ -8,7 +8,6 @@ / { model = "Variscite VAR-SOM-MX8MM module"; - compatible = "variscite,var-som-mx8mm", "fsl,imx8mm"; chosen { stdout-path = &uart4; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso index 4eaf8aabcbfff9..c09aa80d2ba23b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-imx219.dtso @@ -13,6 +13,20 @@ &{/} { compatible = "gw,imx8mm-gw72xx-0x", "fsl,imx8mm"; + reg_vana: regulator-2p8v { + compatible = "regulator-fixed"; + regulator-name = "2P8V"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + + reg_vddl: regulator-1p2v { + compatible = "regulator-fixed"; + regulator-name = "1P2V"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + reg_cam: regulator-cam { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_cam>; @@ -45,6 +59,8 @@ imx219: sensor@10 { reg = <0x10>; clocks = <&cam24m>; VDIG-supply = <®_cam>; + VANA-supply = <®_vana>; + VDDL-supply = <®_vddl>; port { /* MIPI CSI-2 bus endpoint */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso index f6ad1a4b8b6659..bb2056746f8c9a 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso @@ -15,12 +15,8 @@ /dts-v1/; /plugin/; -&{/} { - compatible = "gw,imx8mm-gw72xx-0x"; -}; - &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-low; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso index c3cd9f2b0db342..45ac8bdce86995 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs422.dtso @@ -18,19 +18,15 @@ /dts-v1/; /plugin/; -&{/} { - compatible = "gw,imx8mm-gw72xx-0x"; -}; - &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-high; line-name = "rs485_en"; }; - rs485_hd { + rs485-hd-hog { gpio-hog; gpios = <2 GPIO_ACTIVE_HIGH>; output-low; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso index cc0a287226ab8f..30aa620d7004dd 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs485.dtso @@ -18,19 +18,15 @@ /dts-v1/; /plugin/; -&{/} { - compatible = "gw,imx8mm-gw72xx-0x"; -}; - &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-high; line-name = "rs485_en"; }; - rs485_hd { + rs485-hd-hog { gpio-hog; gpios = <2 GPIO_ACTIVE_HIGH>; output-high; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso index f3ece4b7fbbded..cfc014eb038d69 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-imx219.dtso @@ -13,6 +13,20 @@ &{/} { compatible = "gw,imx8mm-gw73xx-0x", "fsl,imx8mm"; + reg_vana: regulator-2p8v { + compatible = "regulator-fixed"; + regulator-name = "2P8V"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + + reg_vddl: regulator-1p2v { + compatible = "regulator-fixed"; + regulator-name = "1P2V"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; + reg_cam: regulator-cam { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_cam>; @@ -45,6 +59,8 @@ imx219: sensor@10 { reg = <0x10>; clocks = <&cam24m>; VDIG-supply = <®_cam>; + VANA-supply = <®_vana>; + VDDL-supply = <®_vddl>; port { /* MIPI CSI-2 bus endpoint */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso index 1f8ea20dfafcb9..9bee7159a67b57 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso @@ -20,7 +20,7 @@ }; &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-low; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso index 3e6404340d5299..e98f50bcec57fe 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs422.dtso @@ -23,14 +23,14 @@ }; &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-high; line-name = "rs485_en"; }; - rs485_hd { + rs485-hd-hog { gpio-hog; gpios = <2 GPIO_ACTIVE_HIGH>; output-low; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso index 2c71ab9854cb36..e875ff4637bd51 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs485.dtso @@ -23,14 +23,14 @@ }; &gpio4 { - rs485_en { + rs485-en-hog { gpio-hog; gpios = <0 GPIO_ACTIVE_HIGH>; output-high; line-name = "rs485_en"; }; - rs485_hd { + rs485-hd-hog { gpio-hog; gpios = <2 GPIO_ACTIVE_HIGH>; output-high; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts new file mode 100644 index 00000000000000..04f06a55da5ce0 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx-0x.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2023 Gateworks Corporation + */ + +/dts-v1/; + +#include "imx8mm.dtsi" +#include "imx8mm-venice-gw700x.dtsi" +#include "imx8mm-venice-gw75xx.dtsi" + +/ { + model = "Gateworks Venice GW75xx-0x i.MX8MM Development Kit"; + compatible = "gateworks,imx8mm-gw75xx-0x", "fsl,imx8mm"; + + chosen { + stdout-path = &uart2; + }; +}; + +/* Disable SOM interfaces not used on baseboard */ +&fec1 { + status = "disabled"; +}; + +&usdhc1 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi new file mode 100644 index 00000000000000..5eb92005195cfe --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw75xx.dtsi @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2023 Gateworks Corporation + */ + +#include +#include +#include + +/ { + led-controller { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio_leds>; + + led-0 { + function = LED_FUNCTION_STATUS; + color = ; + gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; + default-state = "on"; + linux,default-trigger = "heartbeat"; + }; + + led-1 { + function = LED_FUNCTION_STATUS; + color = ; + gpios = <&gpio4 2 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + pcie0_refclk: clock-pcie0 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + pps { + compatible = "pps-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pps>; + gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; + status = "okay"; + }; + + reg_usb2_vbus: regulator-usb2-vbus { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usb2_en>; + regulator-name = "usb2_vbus"; + gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + reg_usdhc2_vmmc: regulator-usdhc2 { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; + regulator-name = "SD2_3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +/* off-board header */ +&ecspi2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi2>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&gpio1 { + gpio-line-names = + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "gpioa", "gpiob", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpio4 { + gpio-line-names = + "", "", "", "pci_usb_sel", + "", "", "", "pci_wdis#", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpio5 { + gpio-line-names = + "", "", "", "", + "gpioc", "gpiod", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&i2c2 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; + + eeprom@52 { + compatible = "atmel,24c32"; + reg = <0x52>; + pagesize = <32>; + }; +}; + +/* off-board header */ +&i2c3 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "okay"; +}; + +&pcie_phy { + fsl,refclk-pad-mode = ; + fsl,clkreq-unsupported; + clocks = <&pcie0_refclk>; + clock-names = "ref"; + status = "okay"; +}; + +&pcie0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie0>; + reset-gpio = <&gpio4 6 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +/* GPS */ +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +/* USB1 - Type C front panel SINK port J14 */ +&usbotg1 { + dr_mode = "peripheral"; + status = "okay"; +}; + +/* USB2 4-port USB3.0 HUB: + * P1 - USBC connector (host only) + * P2 - USB2 test connector + * P3 - miniPCIe full card + * P4 - miniPCIe half card + */ +&usbotg2 { + dr_mode = "host"; + vbus-supply = <®_usb2_vbus>; + status = "okay"; +}; + +/* microSD */ +&usdhc2 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + bus-width = <4>; + status = "okay"; +}; + +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_hog: hoggrp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000040 /* GPIOA */ + MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x40000040 /* GPIOB */ + MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x40000106 /* PCI_USBSEL */ + MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000106 /* PCIE_WDIS# */ + MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x40000040 /* GPIOD */ + MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x40000040 /* GPIOC */ + >; + }; + + pinctrl_gpio_leds: gpioledgrp { + fsl,pins = < + MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x6 /* LEDG */ + MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x6 /* LEDR */ + >; + }; + + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c2 + MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c2 + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c2 + MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c2 + >; + }; + + pinctrl_pcie0: pciegrp { + fsl,pins = < + MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x106 + >; + }; + + pinctrl_pps: ppsgrp { + fsl,pins = < + MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x106 + >; + }; + + pinctrl_reg_usb2_en: regusb2grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x6 /* USBHUB_RST# (ext p/u) */ + >; + }; + + pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x40 + >; + }; + + pinctrl_spi2: spi2grp { + fsl,pins = < + MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x140 + MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x140 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x140 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x140 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140 + MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190 + MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0 + MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0 + MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0 + MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 + MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194 + MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4 + MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4 + MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4 + MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 + MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196 + MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6 + MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6 + MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6 + MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 + MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_gpio: usdhc2gpiogrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts index 136cb30df03a66..35ae0faa815bc5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts @@ -364,6 +364,8 @@ gsc: gsc@20 { interrupts = <16 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; adc { compatible = "gw,gsc-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts index 1d56f2a6c06a5c..c11260c26d0b43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts @@ -314,6 +314,8 @@ gsc: gsc@20 { interrupts = <6 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; adc { compatible = "gw,gsc-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts index 45470160f98f51..db1737bf637df1 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts @@ -280,6 +280,8 @@ gsc: gsc@20 { interrupts = <26 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; adc { compatible = "gw,gsc-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts index ef951bc9f0dd41..05489a31e7fd8d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts @@ -330,6 +330,8 @@ gsc: gsc@20 { interrupts = <26 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; adc { compatible = "gw,gsc-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts deleted file mode 100644 index 914753f062cd7f..00000000000000 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905-0x.dts +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright 2023 Gateworks Corporation - */ - -/dts-v1/; - -#include "imx8mm.dtsi" -#include "imx8mm-venice-gw700x.dtsi" -#include "imx8mm-venice-gw7905.dtsi" - -/ { - model = "Gateworks Venice GW7905-0x i.MX8MM Development Kit"; - compatible = "gateworks,imx8mm-gw7905-0x", "fsl,imx8mm"; - - chosen { - stdout-path = &uart2; - }; -}; - -/* Disable SOM interfaces not used on baseboard */ -&fec1 { - status = "disabled"; -}; - -&usdhc1 { - status = "disabled"; -}; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi deleted file mode 100644 index 5eb92005195cfe..00000000000000 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7905.dtsi +++ /dev/null @@ -1,303 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright 2023 Gateworks Corporation - */ - -#include -#include -#include - -/ { - led-controller { - compatible = "gpio-leds"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_gpio_leds>; - - led-0 { - function = LED_FUNCTION_STATUS; - color = ; - gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; - default-state = "on"; - linux,default-trigger = "heartbeat"; - }; - - led-1 { - function = LED_FUNCTION_STATUS; - color = ; - gpios = <&gpio4 2 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - }; - - pcie0_refclk: clock-pcie0 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <100000000>; - }; - - pps { - compatible = "pps-gpio"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pps>; - gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; - status = "okay"; - }; - - reg_usb2_vbus: regulator-usb2-vbus { - compatible = "regulator-fixed"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usb2_en>; - regulator-name = "usb2_vbus"; - gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - }; - - reg_usdhc2_vmmc: regulator-usdhc2 { - compatible = "regulator-fixed"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; - regulator-name = "SD2_3P3V"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; - enable-active-high; - }; -}; - -/* off-board header */ -&ecspi2 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_spi2>; - cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; - status = "okay"; -}; - -&gpio1 { - gpio-line-names = - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "gpioa", "gpiob", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", ""; -}; - -&gpio4 { - gpio-line-names = - "", "", "", "pci_usb_sel", - "", "", "", "pci_wdis#", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", ""; -}; - -&gpio5 { - gpio-line-names = - "", "", "", "", - "gpioc", "gpiod", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", ""; -}; - -&i2c2 { - clock-frequency = <400000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c2>; - status = "okay"; - - eeprom@52 { - compatible = "atmel,24c32"; - reg = <0x52>; - pagesize = <32>; - }; -}; - -/* off-board header */ -&i2c3 { - clock-frequency = <400000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c3>; - status = "okay"; -}; - -&pcie_phy { - fsl,refclk-pad-mode = ; - fsl,clkreq-unsupported; - clocks = <&pcie0_refclk>; - clock-names = "ref"; - status = "okay"; -}; - -&pcie0 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pcie0>; - reset-gpio = <&gpio4 6 GPIO_ACTIVE_LOW>; - status = "okay"; -}; - -/* GPS */ -&uart1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_uart1>; - status = "okay"; -}; - -/* USB1 - Type C front panel SINK port J14 */ -&usbotg1 { - dr_mode = "peripheral"; - status = "okay"; -}; - -/* USB2 4-port USB3.0 HUB: - * P1 - USBC connector (host only) - * P2 - USB2 test connector - * P3 - miniPCIe full card - * P4 - miniPCIe half card - */ -&usbotg2 { - dr_mode = "host"; - vbus-supply = <®_usb2_vbus>; - status = "okay"; -}; - -/* microSD */ -&usdhc2 { - pinctrl-names = "default", "state_100mhz", "state_200mhz"; - pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; - pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; - pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; - cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - vmmc-supply = <®_usdhc2_vmmc>; - bus-width = <4>; - status = "okay"; -}; - -&iomuxc { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hog>; - - pinctrl_hog: hoggrp { - fsl,pins = < - MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000040 /* GPIOA */ - MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x40000040 /* GPIOB */ - MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x40000106 /* PCI_USBSEL */ - MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000106 /* PCIE_WDIS# */ - MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x40000040 /* GPIOD */ - MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x40000040 /* GPIOC */ - >; - }; - - pinctrl_gpio_leds: gpioledgrp { - fsl,pins = < - MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x6 /* LEDG */ - MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x6 /* LEDR */ - >; - }; - - pinctrl_i2c2: i2c2grp { - fsl,pins = < - MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c2 - MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c2 - >; - }; - - pinctrl_i2c3: i2c3grp { - fsl,pins = < - MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c2 - MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c2 - >; - }; - - pinctrl_pcie0: pciegrp { - fsl,pins = < - MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x106 - >; - }; - - pinctrl_pps: ppsgrp { - fsl,pins = < - MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x106 - >; - }; - - pinctrl_reg_usb2_en: regusb2grp { - fsl,pins = < - MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x6 /* USBHUB_RST# (ext p/u) */ - >; - }; - - pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { - fsl,pins = < - MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x40 - >; - }; - - pinctrl_spi2: spi2grp { - fsl,pins = < - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x140 - MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x140 - MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x140 - MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x140 - >; - }; - - pinctrl_uart1: uart1grp { - fsl,pins = < - MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140 - MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140 - >; - }; - - pinctrl_usdhc2: usdhc2grp { - fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190 - MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0 - MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0 - MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0 - MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 - MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { - fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194 - MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4 - MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4 - MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4 - MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 - MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { - fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196 - MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6 - MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6 - MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6 - MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 - MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_gpio: usdhc2gpiogrp { - fsl,pins = < - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4 - >; - }; -}; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi index 20018ee2c803e3..77d14ea459e57f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi @@ -40,6 +40,20 @@ led-3 { }; }; + reg_1v5: regulator-1v5 { + compatible = "regulator-fixed"; + regulator-name = "1V5"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + }; + + reg_1v8: regulator-1v8 { + compatible = "regulator-fixed"; + regulator-name = "1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + reg_audio: regulator-audio { compatible = "regulator-fixed"; regulator-name = "3v3_aud"; @@ -158,6 +172,8 @@ camera@10 { assigned-clock-parents = <&clk IMX8MN_CLK_24M>; assigned-clock-rates = <24000000>; AVDD-supply = <®_camera>; /* 2.8v */ + DVDD-supply = <®_1v5>; + DOVDD-supply = <®_1v8>; powerdown-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts index bbd80896db9648..1df5ceb1138793 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts @@ -62,8 +62,8 @@ adv_bridge: hdmi@3d { compatible = "adi,adv7535"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hdmi_bridge>; - reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>; - reg-names = "main", "cec", "edid", "packet"; + reg = <0x3d>, <0x3e>, <0x3c>, <0x3f>; + reg-names = "main", "edid", "cec", "packet"; adi,dsi-lanes = <4>; avdd-supply = <®_hdmi>; a2vdd-supply = <®_hdmi>; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi index 9e0259ddf4bca3..33d73f3dc18759 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi @@ -124,12 +124,21 @@ sound-wm8524 { "Line Out Jack", "LINEVOUTR"; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif1>; - spdif-out; - spdif-in; + audio-cpu = <&spdif1>; + audio-codec = <&spdif_out>, <&spdif_in>; }; sound-micfil { diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts index 72004ab6bda550..0b1fa04f1d6781 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts @@ -312,6 +312,8 @@ gsc: gsc@20 { interrupts = <6 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; adc { compatible = "gw,gsc-adc"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts index cc9b81d4618868..31c33acb560c6c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts @@ -105,6 +105,17 @@ hdmi_con: endpoint { }; }; + hdmi-connector { + compatible = "hdmi-connector"; + type = "a"; + + port { + hdmi_connector: endpoint { + remote-endpoint = <&hdmi_to_connector>; + }; + }; + }; + leds { compatible = "gpio-leds"; pinctrl-names = "default"; @@ -282,6 +293,26 @@ usb-mux-hog { }; }; +&hdmi_tx { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hdmi>; + status = "okay"; + + ports { + port@1 { + reg = <1>; + + hdmi_to_connector:endpoint { + remote-endpoint = <&hdmi_connector>; + }; + }; + }; +}; + +&hdmi_tx_phy { + status = "okay"; +}; + &i2c2 { clock-frequency = <384000>; pinctrl-names = "default"; @@ -344,6 +375,10 @@ pcieclk: clock-generator@68 { }; }; +&hdmi_pvi { + status = "okay"; +}; + &i2c3 { /* Connected to USB Hub */ usb-typec@52 { @@ -464,6 +499,10 @@ &lcdif1 { status = "okay"; }; +&lcdif3 { + status = "okay"; +}; + &micfil { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pdm>; @@ -646,6 +685,15 @@ MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x140 >; }; + pinctrl_hdmi: hdmigrp { + fsl,pins = < + MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c2 + MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c2 + MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010 + MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000010 + >; + }; + pinctrl_i2c2: i2c2grp { fsl,pins = < MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2 diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts index 7e1b58dbe23a7f..d0fc5977258fbf 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts @@ -59,6 +59,18 @@ clk_pwm4: clock-pwm4 { pwms = <&pwm4 0 83 0>; }; + hdmi-connector { + compatible = "hdmi-connector"; + label = "J17"; + type = "a"; + + port { + hdmi_connector_in: endpoint { + remote-endpoint = <&hdmi_tx_out>; + }; + }; + }; + panel: panel { /* Compatible string is filled in by panel board DT Overlay. */ backlight = <&backlight>; @@ -311,6 +323,33 @@ &gpio5 { "", "SPI3_CS#", "", "", "", "", "", ""; }; +&hdmi_pvi { + status = "okay"; +}; + +&hdmi_tx { + ddc-i2c-bus = <&i2c5>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hdmi>; + status = "okay"; + + ports { + port@1 { + hdmi_tx_out: endpoint { + remote-endpoint = <&hdmi_connector_in>; + }; + }; + }; +}; + +&hdmi_tx_phy { + status = "okay"; +}; + +&lcdif3 { + status = "okay"; +}; + &i2c1 { clock-frequency = <100000>; pinctrl-names = "default", "gpio"; @@ -499,7 +538,6 @@ &pwm4 { }; &sai3 { - #clock-cells = <0>; #sound-dai-cells = <0>; assigned-clocks = <&clk IMX8MP_CLK_SAI3>; assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>; @@ -682,6 +720,13 @@ MX8MP_IOMUXC_SPDIF_TX__CAN1_TX 0x154 >; }; + pinctrl_hdmi: hdmi-grp { + fsl,pins = < + MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x154 + MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x154 + >; + }; + pinctrl_hog_feature: hog-feature-grp { fsl,pins = < /* GPIO5_IO03 */ diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts index 938347704136ac..d26930f1a9e9d0 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts @@ -56,6 +56,18 @@ memory@40000000 { <0x1 0x00000000 0 0xc0000000>; }; + native-hdmi-connector { + compatible = "hdmi-connector"; + label = "HDMI OUT"; + type = "a"; + + port { + hdmi_in: endpoint { + remote-endpoint = <&hdmi_tx_out>; + }; + }; + }; + pcie0_refclk: pcie0-refclk { compatible = "fixed-clock"; #clock-cells = <0>; @@ -408,6 +420,28 @@ &flexcan2 { status = "disabled";/* can2 pin conflict with pdm */ }; +&hdmi_pvi { + status = "okay"; +}; + +&hdmi_tx { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hdmi>; + status = "okay"; + + ports { + port@1 { + hdmi_tx_out: endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + }; +}; + +&hdmi_tx_phy { + status = "okay"; +}; + &i2c1 { clock-frequency = <400000>; pinctrl-names = "default"; @@ -604,6 +638,10 @@ &lcdif1 { status = "okay"; }; +&lcdif3 { + status = "okay"; +}; + &micfil { #sound-dai-cells = <0>; pinctrl-names = "default"; @@ -858,6 +896,14 @@ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x140 >; }; + pinctrl_hdmi: hdmigrp { + fsl,pins = < + MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x1c2 + MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x1c2 + MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x10 + >; + }; + pinctrl_hog: hoggrp { fsl,pins = < MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010 diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts index 00a240484c254e..50debe821c4212 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts @@ -6,6 +6,7 @@ /dts-v1/; +#include #include #include #include "imx8mp-phycore-som.dtsi" @@ -43,6 +44,15 @@ panel1_in: endpoint { }; }; + reg_vcc_5v_sw: regulator-vcc-5v-sw { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <5000000>; + regulator-min-microvolt = <5000000>; + regulator-name = "VCC_5V_SW"; + }; + reg_can1_stby: regulator-can1-stby { compatible = "regulator-fixed"; pinctrl-names = "default"; @@ -103,6 +113,22 @@ reg_vcc_3v3_sw: regulator-vcc-3v3-sw { }; }; +/* TPM */ +&ecspi1 { + #address-cells = <1>; + #size-cells = <0>; + cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ecspi1>; + status = "okay"; + + tpm: tpm@0 { + compatible = "infineon,slb9670", "tcg,tpm_tis-spi"; + reg = <0>; + spi-max-frequency = <38000000>; + }; +}; + &eqos { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_eqos>; @@ -155,6 +181,7 @@ eeprom@51 { compatible = "atmel,24c02"; reg = <0x51>; pagesize = <16>; + vcc-supply = <®_vcc_3v3_sw>; }; leds@62 { @@ -195,6 +222,23 @@ &snvs_pwrkey { status = "okay"; }; +&pcie_phy { + clocks = <&hsio_blk_ctrl>; + clock-names = "ref"; + fsl,refclk-pad-mode = ; + fsl,clkreq-unsupported; + status = "okay"; +}; + +/* Mini PCIe */ +&pcie { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie0>; + reset-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>; + vpcie-supply = <®_vcc_3v3_sw>; + status = "okay"; +}; + &pwm3 { status = "okay"; pinctrl-names = "default"; @@ -206,6 +250,7 @@ &rv3028 { pinctrl-0 = <&pinctrl_rtc>; interrupt-parent = <&gpio4>; interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + aux-voltage-chargeable = <1>; wakeup-source; trickle-resistor-ohms = <3000>; }; @@ -234,6 +279,7 @@ &usb_dwc3_0 { /* USB2 4-port USB3.0 HUB */ &usb3_phy1 { + vbus-supply = <®_vcc_5v_sw>; status = "okay"; }; @@ -267,7 +313,9 @@ &usdhc2 { pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_pins>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_pins>; cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + disable-wp; vmmc-supply = <®_usdhc2_vmmc>; + vqmmc-supply = <&ldo5>; bus-width = <4>; status = "okay"; }; @@ -300,6 +348,15 @@ &gpio4 { }; &iomuxc { + pinctrl_ecspi1: ecspi1grp { + fsl,pins = < + MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x80 + MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x80 + MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x80 + MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x00 + >; + }; + pinctrl_eqos: eqosgrp { fsl,pins = < MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2 @@ -366,6 +423,15 @@ MX8MP_IOMUXC_SD2_WP__GPIO2_IO20 0x12 >; }; + pinctrl_pcie0: pcie0grp { + fsl,pins = < + MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x40 + MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x60 + MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11 0x60 /* open drain, pull up */ + MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x40 + >; + }; + pinctrl_pwm3: pwm3grp { fsl,pins = < MX8MP_IOMUXC_SPDIF_TX__PWM3_OUT 0x12 diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso b/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso new file mode 100644 index 00000000000000..5f0278bf61ee92 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-no-eth.dtso @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 PHYTEC Messtechnik GmbH + * Author: Cem Tenruh + */ + +/dts-v1/; +/plugin/; + +ðphy1 { + status = "disabled"; +}; + +&fec { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi index e6ffa6a6b68bb4..a5ecdca8bc0ead 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi @@ -20,6 +20,15 @@ memory@40000000 { device_type = "memory"; reg = <0x0 0x40000000 0 0x80000000>; }; + + reg_vdd_io: regulator-vdd-io { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "VDD_IO"; + }; }; &A53_0 { @@ -170,6 +179,7 @@ eeprom@51 { compatible = "atmel,24c32"; reg = <0x51>; pagesize = <32>; + vcc-supply = <®_vdd_io>; }; rv3028: rtc@52 { diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts b/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts new file mode 100644 index 00000000000000..36d3eb86520234 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som-symphony.dts @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2024 Variscite Ltd. + */ + +#include "imx8mp-var-som.dtsi" + +/ { + model = "Variscite VAR-SOM-MX8M-PLUS on Symphony-Board"; + compatible = "variscite,var-som-mx8mp-symphony", "variscite,var-som-mx8mp", "fsl,imx8mp"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi new file mode 100644 index 00000000000000..b2ac2583a59292 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mp-var-som.dtsi @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2024 Variscite Ltd. + * + * Author: Tarang Raval + */ + +/dts-v1/; + +#include +#include +#include +#include "imx8mp.dtsi" + +/ { + model = "Variscite VAR-SOM-MX8M Plus module"; + + chosen { + stdout-path = &uart2; + }; + + gpio-leds { + compatible = "gpio-leds"; + + led-0 { + function = LED_FUNCTION_POWER; + gpios = <&pca9534 0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0x0 0x40000000 0 0xc0000000>, + <0x1 0x00000000 0 0xc0000000>; + }; + + + reg_usdhc2_vmmc: regulator-usdhc2-vmmc { + compatible = "regulator-fixed"; + regulator-name = "VSD_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>; + enable-active-high; + startup-delay-us = <100>; + off-on-delay-us = <12000>; + }; +}; + +&A53_0 { + cpu-supply = <&buck2>; +}; + +&A53_1 { + cpu-supply = <&buck2>; +}; + +&A53_2 { + cpu-supply = <&buck2>; +}; + +&A53_3 { + cpu-supply = <&buck2>; +}; + +&i2c1 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + status = "okay"; + + pmic@25 { + compatible = "nxp,pca9450c"; + reg = <0x25>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pmic>; + interrupt-parent = <&gpio5>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; + + regulators { + buck1: BUCK1 { + regulator-name = "BUCK1"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <2187500>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + }; + + buck2: BUCK2 { + regulator-name = "BUCK2"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <2187500>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + nxp,dvs-run-voltage = <950000>; + nxp,dvs-standby-voltage = <850000>; + }; + + buck4: BUCK4 { + regulator-name = "BUCK4"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <3400000>; + regulator-boot-on; + regulator-always-on; + }; + + buck5: BUCK5 { + regulator-name = "BUCK5"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <3400000>; + regulator-boot-on; + regulator-always-on; + }; + + buck6: BUCK6 { + regulator-name = "BUCK6"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <3400000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo1: LDO1 { + regulator-name = "LDO1"; + regulator-min-microvolt = <1600000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo2: LDO2 { + regulator-name = "LDO2"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1150000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo3: LDO3 { + regulator-name = "LDO3"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo4: LDO4 { + regulator-name = "LDO4"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + ldo5: LDO5 { + regulator-name = "LDO5"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + }; + }; + }; +}; + +&i2c3 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "okay"; + + /* GPIO expander */ + pca9534: gpio@20 { + compatible = "nxp,pca9534"; + reg = <0x20>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pca9534>; + gpio-controller; + #gpio-cells = <2>; + interrupt-parent = <&gpio1>; + interrupts = <15 IRQ_TYPE_EDGE_FALLING>; + wakeup-source; + + usb3-sata-sel-hog { + gpio-hog; + gpios = <4 0>; + output-low; + line-name = "usb3_sata_sel"; + }; + }; +}; + +/* Console */ +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + status = "okay"; +}; + +/* SD-card */ +&usdhc2 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + bus-width = <4>; + status = "okay"; +}; + +/* eMMC */ +&usdhc3 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc3>; + pinctrl-1 = <&pinctrl_usdhc3_100mhz>; + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + bus-width = <8>; + non-removable; + status = "okay"; +}; + +&wdog1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; + +&iomuxc { + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX8MP_IOMUXC_SD1_DATA4__I2C1_SCL 0x400001c2 + MX8MP_IOMUXC_SD1_DATA5__I2C1_SDA 0x400001c2 + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 + MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 + >; + }; + + pinctrl_pca9534: pca9534grp { + fsl,pins = < + MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0xc0 + >; + }; + + pinctrl_pmic: pmicgrp { + fsl,pins = < + MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x1c0 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x40 + MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x40 + >; + }; + + pinctrl_usdhc2_gpio: usdhc2-gpiogrp { + fsl,pins = < + MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x1c4 + MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x10 + MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0xc0 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190 + MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0 + MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0 + MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0 + MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0 + MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0 + MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0 + MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0 + MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0 + MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0 + MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190 + >; + }; + + pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194 + MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4 + MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4 + MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4 + MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4 + MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4 + MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4 + MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4 + MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4 + MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4 + MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194 + >; + }; + + pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196 + MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6 + MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6 + MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6 + MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6 + MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6 + MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6 + MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6 + MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6 + MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6 + MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196 + >; + }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso index edf22ff549a476..7d9fcdee58a7ff 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso +++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx-imx219.dtso @@ -11,7 +11,19 @@ /plugin/; &{/} { - compatible = "gw,imx8mp-gw74xx", "fsl,imx8mp"; + reg_vana: regulator-2p8v { + compatible = "regulator-fixed"; + regulator-name = "2P8V"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + }; + + reg_vddl: regulator-1p2v { + compatible = "regulator-fixed"; + regulator-name = "1P2V"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + }; reg_cam: regulator-cam { pinctrl-names = "default"; @@ -41,6 +53,8 @@ imx219: sensor@10 { reg = <0x10>; clocks = <&cam24m>; VDIG-supply = <®_cam>; + VANA-supply = <®_vana>; + VDDL-supply = <®_vddl>; port { /* MIPI CSI-2 bus endpoint */ diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts new file mode 100644 index 00000000000000..7ca68df9e516cd --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx-2x.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2023 Gateworks Corporation + */ + +/dts-v1/; + +#include "imx8mp.dtsi" +#include "imx8mp-venice-gw702x.dtsi" +#include "imx8mp-venice-gw75xx.dtsi" + +/ { + model = "Gateworks Venice GW75xx-2x i.MX8MP Development Kit"; + compatible = "gateworks,imx8mp-gw75xx-2x", "fsl,imx8mp"; + + chosen { + stdout-path = &uart2; + }; +}; + +/* Disable SOM interfaces not used on baseboard */ +&eqos { + status = "disabled"; +}; + +&usdhc1 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi new file mode 100644 index 00000000000000..0d40cb0f05f677 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw75xx.dtsi @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2023 Gateworks Corporation + */ + +#include +#include +#include + +/ { + led-controller { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio_leds>; + + led-0 { + function = LED_FUNCTION_STATUS; + color = ; + gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>; + default-state = "on"; + linux,default-trigger = "heartbeat"; + }; + + led-1 { + function = LED_FUNCTION_STATUS; + color = ; + gpios = <&gpio4 27 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + pcie0_refclk: pcie0-refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + pps { + compatible = "pps-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pps>; + gpios = <&gpio4 21 GPIO_ACTIVE_HIGH>; + status = "okay"; + }; + + reg_usb2_vbus: regulator-usb2-vbus { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usb2_en>; + compatible = "regulator-fixed"; + regulator-name = "usb2_vbus"; + gpio = <&gpio4 12 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + reg_usdhc2_vmmc: regulator-usdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; + compatible = "regulator-fixed"; + regulator-name = "SD2_3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +/* off-board header */ +&ecspi2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi2>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&gpio4 { + gpio-line-names = + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "gpioa", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", ""; +}; + +&gpio4 { + gpio-line-names = + "", "gpiod", "", "", + "gpiob", "gpioc", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "pci_usb_sel", "", + "pci_wdis#", "", "", ""; +}; + +&i2c2 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; + + eeprom@52 { + compatible = "atmel,24c32"; + reg = <0x52>; + pagesize = <32>; + }; +}; + +/* off-board header */ +&i2c3 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "okay"; +}; + +&pcie_phy { + fsl,refclk-pad-mode = ; + fsl,clkreq-unsupported; + clocks = <&pcie0_refclk>; + clock-names = "ref"; + status = "okay"; +}; + +&pcie { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie0>; + reset-gpio = <&gpio4 29 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +/* GPS */ +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +/* USB1 - Type C front panel SINK port J14 */ +&usb3_0 { + status = "okay"; +}; + +&usb3_phy0 { + status = "okay"; +}; + +&usb_dwc3_0 { + dr_mode = "peripheral"; + status = "okay"; +}; + +/* USB2 4-port USB3.0 HUB: + * P1 - USBC connector (host only) + * P2 - USB2 test connector + * P3 - miniPCIe full card + * P4 - miniPCIe half card + */ +&usb3_phy1 { + vbus-supply = <®_usb2_vbus>; + status = "okay"; +}; + +&usb3_1 { + fsl,permanently-attached; + fsl,disable-port-power-control; + status = "okay"; +}; + +&usb_dwc3_1 { + dr_mode = "host"; + status = "okay"; +}; + +/* microSD */ +&usdhc2 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + bus-width = <4>; + status = "okay"; +}; + +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_hog: hoggrp { + fsl,pins = < + MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0x40000040 /* GPIOA */ + MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x40000040 /* GPIOD */ + MX8MP_IOMUXC_SAI1_RXD2__GPIO4_IO04 0x40000040 /* GPIOB */ + MX8MP_IOMUXC_SAI1_RXD3__GPIO4_IO05 0x40000040 /* GPIOC */ + MX8MP_IOMUXC_SAI2_TXD0__GPIO4_IO26 0x40000106 /* PCI_USBSEL */ + MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x40000106 /* PCI_WDIS# */ + >; + }; + + pinctrl_gpio_leds: gpioledgrp { + fsl,pins = < + MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x6 /* LEDG */ + MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x6 /* LEDR */ + >; + }; + + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2 + MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2 + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 + MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 + >; + }; + + pinctrl_pcie0: pciegrp { + fsl,pins = < + MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x106 + >; + }; + + pinctrl_pps: ppsgrp { + fsl,pins = < + MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x106 + >; + }; + + pinctrl_reg_usb2_en: regusb2grp { + fsl,pins = < + MX8MP_IOMUXC_SAI1_TXD0__GPIO4_IO12 0x6 /* USBHUB_RST# (ext p/u) */ + >; + }; + + pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40 + >; + }; + + pinctrl_spi2: spi2grp { + fsl,pins = < + MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x140 + MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x140 + MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x140 + MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x140 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140 + MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196 + MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6 + MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6 + MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 + MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 + MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 + >; + }; + + pinctrl_usdhc2_gpio: usdhc2gpiogrp { + fsl,pins = < + MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts deleted file mode 100644 index 4a1bbbbe19e66a..00000000000000 --- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905-2x.dts +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright 2023 Gateworks Corporation - */ - -/dts-v1/; - -#include "imx8mp.dtsi" -#include "imx8mp-venice-gw702x.dtsi" -#include "imx8mp-venice-gw7905.dtsi" - -/ { - model = "Gateworks Venice GW7905-2x i.MX8MP Development Kit"; - compatible = "gateworks,imx8mp-gw7905-2x", "fsl,imx8mp"; - - chosen { - stdout-path = &uart2; - }; -}; - -/* Disable SOM interfaces not used on baseboard */ -&eqos { - status = "disabled"; -}; - -&usdhc1 { - status = "disabled"; -}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi deleted file mode 100644 index 0d40cb0f05f677..00000000000000 --- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw7905.dtsi +++ /dev/null @@ -1,309 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright 2023 Gateworks Corporation - */ - -#include -#include -#include - -/ { - led-controller { - compatible = "gpio-leds"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_gpio_leds>; - - led-0 { - function = LED_FUNCTION_STATUS; - color = ; - gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>; - default-state = "on"; - linux,default-trigger = "heartbeat"; - }; - - led-1 { - function = LED_FUNCTION_STATUS; - color = ; - gpios = <&gpio4 27 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - }; - - pcie0_refclk: pcie0-refclk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <100000000>; - }; - - pps { - compatible = "pps-gpio"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pps>; - gpios = <&gpio4 21 GPIO_ACTIVE_HIGH>; - status = "okay"; - }; - - reg_usb2_vbus: regulator-usb2-vbus { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usb2_en>; - compatible = "regulator-fixed"; - regulator-name = "usb2_vbus"; - gpio = <&gpio4 12 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - }; - - reg_usdhc2_vmmc: regulator-usdhc2 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; - compatible = "regulator-fixed"; - regulator-name = "SD2_3P3V"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; - enable-active-high; - }; -}; - -/* off-board header */ -&ecspi2 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_spi2>; - cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; - status = "okay"; -}; - -&gpio4 { - gpio-line-names = - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "gpioa", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", ""; -}; - -&gpio4 { - gpio-line-names = - "", "gpiod", "", "", - "gpiob", "gpioc", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "pci_usb_sel", "", - "pci_wdis#", "", "", ""; -}; - -&i2c2 { - clock-frequency = <400000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c2>; - status = "okay"; - - eeprom@52 { - compatible = "atmel,24c32"; - reg = <0x52>; - pagesize = <32>; - }; -}; - -/* off-board header */ -&i2c3 { - clock-frequency = <400000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c3>; - status = "okay"; -}; - -&pcie_phy { - fsl,refclk-pad-mode = ; - fsl,clkreq-unsupported; - clocks = <&pcie0_refclk>; - clock-names = "ref"; - status = "okay"; -}; - -&pcie { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_pcie0>; - reset-gpio = <&gpio4 29 GPIO_ACTIVE_LOW>; - status = "okay"; -}; - -/* GPS */ -&uart1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_uart1>; - status = "okay"; -}; - -/* USB1 - Type C front panel SINK port J14 */ -&usb3_0 { - status = "okay"; -}; - -&usb3_phy0 { - status = "okay"; -}; - -&usb_dwc3_0 { - dr_mode = "peripheral"; - status = "okay"; -}; - -/* USB2 4-port USB3.0 HUB: - * P1 - USBC connector (host only) - * P2 - USB2 test connector - * P3 - miniPCIe full card - * P4 - miniPCIe half card - */ -&usb3_phy1 { - vbus-supply = <®_usb2_vbus>; - status = "okay"; -}; - -&usb3_1 { - fsl,permanently-attached; - fsl,disable-port-power-control; - status = "okay"; -}; - -&usb_dwc3_1 { - dr_mode = "host"; - status = "okay"; -}; - -/* microSD */ -&usdhc2 { - pinctrl-names = "default", "state_100mhz", "state_200mhz"; - pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; - pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; - pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; - cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - vmmc-supply = <®_usdhc2_vmmc>; - bus-width = <4>; - status = "okay"; -}; - -&iomuxc { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_hog>; - - pinctrl_hog: hoggrp { - fsl,pins = < - MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0x40000040 /* GPIOA */ - MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x40000040 /* GPIOD */ - MX8MP_IOMUXC_SAI1_RXD2__GPIO4_IO04 0x40000040 /* GPIOB */ - MX8MP_IOMUXC_SAI1_RXD3__GPIO4_IO05 0x40000040 /* GPIOC */ - MX8MP_IOMUXC_SAI2_TXD0__GPIO4_IO26 0x40000106 /* PCI_USBSEL */ - MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x40000106 /* PCI_WDIS# */ - >; - }; - - pinctrl_gpio_leds: gpioledgrp { - fsl,pins = < - MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x6 /* LEDG */ - MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x6 /* LEDR */ - >; - }; - - pinctrl_i2c2: i2c2grp { - fsl,pins = < - MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2 - MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2 - >; - }; - - pinctrl_i2c3: i2c3grp { - fsl,pins = < - MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 - MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 - >; - }; - - pinctrl_pcie0: pciegrp { - fsl,pins = < - MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x106 - >; - }; - - pinctrl_pps: ppsgrp { - fsl,pins = < - MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x106 - >; - }; - - pinctrl_reg_usb2_en: regusb2grp { - fsl,pins = < - MX8MP_IOMUXC_SAI1_TXD0__GPIO4_IO12 0x6 /* USBHUB_RST# (ext p/u) */ - >; - }; - - pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { - fsl,pins = < - MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40 - >; - }; - - pinctrl_spi2: spi2grp { - fsl,pins = < - MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x140 - MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x140 - MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x140 - MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x140 - >; - }; - - pinctrl_uart1: uart1grp { - fsl,pins = < - MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140 - MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140 - >; - }; - - pinctrl_usdhc2: usdhc2grp { - fsl,pins = < - MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190 - MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0 - MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0 - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { - fsl,pins = < - MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194 - MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4 - MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4 - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { - fsl,pins = < - MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196 - MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6 - MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6 - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - - pinctrl_usdhc2_gpio: usdhc2gpiogrp { - fsl,pins = < - MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4 - >; - }; -}; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi index fbcd93e33aeaa0..da8902c5f7e5b2 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi @@ -65,6 +65,11 @@ reg_pcie: regulator-pcie { }; }; +/* Verdin HDMI_1 Audio */ +&aud2htx { + status = "okay"; +}; + &backlight { power-supply = <®_3p3v>; }; @@ -219,6 +224,11 @@ &sai1 { status = "okay"; }; +/* Verdin HDMI_1 Audio */ +&sound_hdmi { + status = "okay"; +}; + /* Verdin UART_1 */ &uart1 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi index 09733fea036dd3..a38e7c947a421c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi @@ -64,6 +64,11 @@ simple-audio-card,cpu { }; }; +/* Verdin HDMI_1 Audio */ +&aud2htx { + status = "okay"; +}; + &backlight { power-supply = <®_3p3v>; }; @@ -215,6 +220,11 @@ &sai1 { status = "okay"; }; +/* Verdin HDMI_1 Audio */ +&sound_hdmi { + status = "okay"; +}; + /* Verdin UART_1, connector X50 through RS485 transceiver */ &uart1 { linux,rs485-enabled-at-boot-time; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi index 3a40338cf2d88f..11cf3bdc95c4ee 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-mallow.dtsi @@ -62,6 +62,11 @@ led-3 { }; }; +/* Verdin HDMI_1 Audio */ +&aud2htx { + status = "okay"; +}; + &backlight { power-supply = <®_3p3v>; }; @@ -182,6 +187,11 @@ ®_usdhc2_vmmc { vin-supply = <®_3p3v>; }; +/* Verdin HDMI_1 Audio */ +&sound_hdmi { + status = "okay"; +}; + /* Verdin UART_1 */ &uart1 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi index efcab00c014213..cae06cb67cd3ea 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-wifi.dtsi @@ -75,7 +75,6 @@ bluetooth { &usdhc1 { bus-width = <4>; keep-power-in-suspend; - max-frequency = <100000000>; non-removable; pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_wifi_ctrl>; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi index 533b7fe218ce66..cc389cda2af2eb 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-yavia.dtsi @@ -85,6 +85,11 @@ led-5 { }; }; +/* Verdin HDMI_1 Audio */ +&aud2htx { + status = "okay"; +}; + &backlight { power-supply = <®_3p3v>; }; @@ -192,6 +197,11 @@ ®_usdhc2_vmmc { vin-supply = <®_3p3v>; }; +/* Verdin HDMI_1 Audio */ +&sound_hdmi { + status = "okay"; +}; + /* Verdin UART_1 */ &uart1 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi index d23a3942174d8a..a19ad5ee7f792b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi @@ -77,6 +77,14 @@ key-wakeup { }; }; + sound_hdmi: sound-hdmi { + compatible = "fsl,imx-audio-hdmi"; + model = "audio-hdmi"; + audio-cpu = <&aud2htx>; + hdmi-out; + status = "disabled"; + }; + /* Carrier Board Supplies */ reg_1p8v: regulator-1p8v { compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index 603dfe80216f88..f3531cfb0d7913 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -1673,6 +1673,50 @@ isi_in_1: endpoint { }; }; + isp_0: isp@32e10000 { + compatible = "fsl,imx8mp-isp"; + reg = <0x32e10000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>, + <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>, + <&clk IMX8MP_CLK_MEDIA_APB_ROOT>; + clock-names = "isp", "aclk", "hclk"; + power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_ISP>; + fsl,blk-ctrl = <&media_blk_ctrl 0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <1>; + }; + }; + }; + + isp_1: isp@32e20000 { + compatible = "fsl,imx8mp-isp"; + reg = <0x32e20000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>, + <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>, + <&clk IMX8MP_CLK_MEDIA_APB_ROOT>; + clock-names = "isp", "aclk", "hclk"; + power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_ISP>; + fsl,blk-ctrl = <&media_blk_ctrl 1>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + reg = <1>; + }; + }; + }; + dewarp: dwe@32e30000 { compatible = "nxp,imx8mp-dw100"; reg = <0x32e30000 0x10000>; @@ -1687,7 +1731,7 @@ mipi_csi_0: csi@32e40000 { compatible = "fsl,imx8mp-mipi-csi2", "fsl,imx8mm-mipi-csi2"; reg = <0x32e40000 0x10000>; interrupts = ; - clock-frequency = <266000000>; + clock-frequency = <250000000>; clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>, <&clk IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT>, <&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>, @@ -1695,9 +1739,8 @@ mipi_csi_0: csi@32e40000 { clock-names = "pclk", "wrap", "phy", "axi"; assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM1_PIX>, <&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>; - assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>, + assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_250M>, <&clk IMX8MP_CLK_24M>; - assigned-clock-rates = <266000000>; power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_MIPI_CSI2_1>; status = "disabled"; @@ -1723,7 +1766,7 @@ mipi_csi_1: csi@32e50000 { compatible = "fsl,imx8mp-mipi-csi2", "fsl,imx8mm-mipi-csi2"; reg = <0x32e50000 0x10000>; interrupts = ; - clock-frequency = <266000000>; + clock-frequency = <250000000>; clocks = <&clk IMX8MP_CLK_MEDIA_APB_ROOT>, <&clk IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT>, <&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>, @@ -1731,9 +1774,8 @@ mipi_csi_1: csi@32e50000 { clock-names = "pclk", "wrap", "phy", "axi"; assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM2_PIX>, <&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>; - assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>, + assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_250M>, <&clk IMX8MP_CLK_24M>; - assigned-clock-rates = <266000000>; power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_MIPI_CSI2_2>; status = "disabled"; @@ -1871,17 +1913,26 @@ media_blk_ctrl: blk-ctrl@32ec0000 { clock-names = "apb", "axi", "cam1", "cam2", "disp1", "disp2", "isp", "phy"; + /* + * The ISP maximum frequency is 400MHz in normal mode + * and 500MHz in overdrive mode. The 400MHz operating + * point hasn't been successfully tested yet, so set + * IMX8MP_CLK_MEDIA_ISP to 500MHz for the time being. + */ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>, <&clk IMX8MP_CLK_MEDIA_APB>, <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>, <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>, + <&clk IMX8MP_CLK_MEDIA_ISP>, <&clk IMX8MP_VIDEO_PLL1>; assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>, <&clk IMX8MP_SYS_PLL1_800M>, <&clk IMX8MP_VIDEO_PLL1_OUT>, - <&clk IMX8MP_VIDEO_PLL1_OUT>; + <&clk IMX8MP_VIDEO_PLL1_OUT>, + <&clk IMX8MP_SYS_PLL2_500M>; assigned-clock-rates = <500000000>, <200000000>, - <0>, <0>, <1039500000>; + <0>, <0>, <500000000>, + <1039500000>; #power-domain-cells = <1>; lvds_bridge: bridge@5c { diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 7507548cdb16bb..a87d0692c3bb37 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts @@ -125,19 +125,33 @@ link_codec: simple-audio-card,codec { }; }; + spdif_out: spdif-out { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + }; + + spdif_in: spdif-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; + }; + sound-spdif { compatible = "fsl,imx-audio-spdif"; model = "imx-spdif"; - spdif-controller = <&spdif1>; - spdif-out; - spdif-in; + audio-cpu = <&spdif1>; + audio-codec = <&spdif_out>, <&spdif_in>; + }; + + hdmi_arc_in: hdmi-arc-in { + compatible = "linux,spdif-dir"; + #sound-dai-cells = <0>; }; sound-hdmi-arc { compatible = "fsl,imx-audio-spdif"; model = "imx-hdmi-arc"; - spdif-controller = <&spdif2>; - spdif-in; + audio-cpu = <&spdif2>; + audio-codec = <&hdmi_arc_in>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts index 778741dbbb3381..62203eed6a6cb1 100644 --- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts @@ -6,6 +6,7 @@ /dts-v1/; +#include #include "imx8qm.dtsi" / { @@ -31,6 +32,99 @@ memory@80000000 { reg = <0x00000000 0x80000000 0 0x40000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + vdev0vring0: memory@90000000 { + reg = <0 0x90000000 0 0x8000>; + no-map; + }; + + vdev0vring1: memory@90008000 { + reg = <0 0x90008000 0 0x8000>; + no-map; + }; + + vdev1vring0: memory@90010000 { + reg = <0 0x90010000 0 0x8000>; + no-map; + }; + + vdev1vring1: memory@90018000 { + reg = <0 0x90018000 0 0x8000>; + no-map; + }; + + rsc_table0: memory@900ff000 { + reg = <0 0x900ff000 0 0x1000>; + no-map; + }; + + vdev2vring0: memory@90100000 { + reg = <0 0x90100000 0 0x8000>; + no-map; + }; + + vdev2vring1: memory@90108000 { + reg = <0 0x90108000 0 0x8000>; + no-map; + }; + + vdev3vring0: memory@90110000 { + reg = <0 0x90110000 0 0x8000>; + no-map; + }; + + vdev3vring1: memory@90118000 { + reg = <0 0x90118000 0 0x8000>; + no-map; + }; + + rsc_table1: memory@901ff000 { + reg = <0 0x901ff000 0 0x1000>; + no-map; + }; + + vdevbuffer: memory@90400000 { + compatible = "shared-dma-pool"; + reg = <0 0x90400000 0 0x100000>; + no-map; + }; + }; + + lvds_backlight0: backlight-lvds0 { + compatible = "pwm-backlight"; + pwms = <&qm_pwm_lvds0 0 100000 0>; + brightness-levels = <0 100>; + num-interpolated-steps = <100>; + default-brightness-level = <80>; + }; + + lvds_backlight1: backlight-lvds1 { + compatible = "pwm-backlight"; + pwms = <&pwm_lvds1 0 100000 0>; + brightness-levels = <0 100>; + num-interpolated-steps = <100>; + default-brightness-level = <80>; + }; + + mux-controller { + compatible = "nxp,cbdtu02043", "gpio-sbu-mux"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_typec_mux>; + select-gpios = <&lsio_gpio4 6 GPIO_ACTIVE_LOW>; + enable-gpios = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>; + orientation-switch; + + port { + usb3_data_ss: endpoint { + remote-endpoint = <&typec_con_ss>; + }; + }; + }; + reg_usdhc2_vmmc: usdhc2-vmmc { compatible = "regulator-fixed"; regulator-name = "SD1_SPWR"; @@ -133,6 +227,37 @@ sound-wm8960 { "LINPUT1", "Mic Jack", "Mic Jack", "MICB"; }; + + imx8qm-cm4-0 { + compatible = "fsl,imx8qm-cm4"; + clocks = <&clk_dummy>; + mbox-names = "tx", "rx", "rxdb"; + mboxes = <&lsio_mu5 0 1 + &lsio_mu5 1 1 + &lsio_mu5 3 1>; + memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>, + <&vdev1vring0>, <&vdev1vring1>, <&rsc_table0>; + power-domains = <&pd IMX_SC_R_M4_0_PID0>, <&pd IMX_SC_R_M4_0_MU_1A>; + + fsl,resource-id = ; + fsl,entry-address = <0x34fe0000>; + }; + + imx8qm-cm4-1 { + compatible = "fsl,imx8qm-cm4"; + clocks = <&clk_dummy>; + mbox-names = "tx", "rx", "rxdb"; + mboxes = <&lsio_mu6 0 1 + &lsio_mu6 1 1 + &lsio_mu6 3 1>; + memory-region = <&vdevbuffer>, <&vdev2vring0>, <&vdev2vring1>, + <&vdev3vring0>, <&vdev3vring1>, <&rsc_table1>; + power-domains = <&pd IMX_SC_R_M4_1_PID0>, <&pd IMX_SC_R_M4_1_MU_1A>; + + fsl,resource-id = ; + fsl,entry-address = <0x38fe0000>; + }; + }; &adc0 { @@ -212,6 +337,44 @@ gyrometer@69 { compatible = "st,l3g4200d-gyro"; reg = <0x69>; }; + + ptn5110: tcpc@51 { + compatible = "nxp,ptn5110", "tcpci"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_typec>; + reg = <0x51>; + interrupt-parent = <&lsio_gpio4>; + interrupts = <26 IRQ_TYPE_LEVEL_LOW>; + status = "okay"; + + usb_con1: connector { + compatible = "usb-c-connector"; + label = "USB-C"; + power-role = "source"; + data-role = "dual"; + source-pdos = ; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + typec_dr_sw: endpoint { + remote-endpoint = <&usb3_drd_sw>; + }; + }; + + port@1 { + reg = <1>; + typec_con_ss: endpoint { + remote-endpoint = <&usb3_data_ss>; + }; + }; + }; + }; + }; }; &i2c1 { @@ -241,6 +404,34 @@ wm8960: audio-codec@1a { }; }; +&i2c1_lvds0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lvds0_lpi2c1>; + clock-frequency = <100000>; + status = "okay"; +}; + +&i2c1_lvds1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lvds1_lpi2c1>; + clock-frequency = <100000>; + status = "okay"; +}; + +&i2c0_mipi0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mipi0_lpi2c0>; + clock-frequency = <100000>; + status = "okay"; +}; + +&i2c0_mipi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mipi1_lpi2c0>; + clock-frequency = <100000>; + status = "okay"; +}; + &flexcan1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_flexcan1>; @@ -287,12 +478,6 @@ &lpspi2 { pinctrl-0 = <&pinctrl_lpspi2 &pinctrl_lpspi2_cs>; cs-gpios = <&lsio_gpio3 10 GPIO_ACTIVE_LOW>; status = "okay"; - - spidev0: spi@0 { - reg = <0>; - compatible = "rohm,dh2228fv"; - spi-max-frequency = <30000000>; - }; }; &lsio_mu5 { @@ -356,6 +541,18 @@ &fec2 { status = "okay"; }; +&qm_pwm_lvds0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm_lvds0>; + status = "okay"; +}; + +&pwm_lvds1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm_lvds1>; + status = "okay"; +}; + &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; @@ -376,6 +573,26 @@ &usdhc2 { status = "okay"; }; +&usb3_phy { + status = "okay"; +}; + +&usbotg3 { + status = "okay"; +}; + +&usbotg3_cdns3 { + dr_mode = "otg"; + usb-role-switch; + status = "okay"; + + port { + usb3_drd_sw: endpoint { + remote-endpoint = <&typec_dr_sw>; + }; + }; +}; + &sai0 { #sound-dai-cells = <0>; assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>, @@ -501,6 +718,22 @@ IMX8QM_SPI2_CS0_LSIO_GPIO3_IO10 0x21 >; }; + pinctrl_mipi0_lpi2c0: mipi0_lpi2c0grp { + fsl,pins = < + IMX8QM_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL 0xc6000020 + IMX8QM_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA 0xc6000020 + IMX8QM_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO19 0x00000020 + >; + }; + + pinctrl_mipi1_lpi2c0: mipi1_lpi2c0grp { + fsl,pins = < + IMX8QM_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL 0xc6000020 + IMX8QM_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA 0xc6000020 + IMX8QM_MIPI_DSI1_GPIO0_01_LSIO_GPIO1_IO23 0x00000020 + >; + }; + pinctrl_flexspi0: flexspi0grp { fsl,pins = < IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 0x06000021 @@ -582,6 +815,32 @@ IMX8QM_M41_GPIO0_01_DMA_UART3_TX 0x06000020 >; }; + pinctrl_lvds0_lpi2c1: lvds0lpi2c1grp { + fsl,pins = < + IMX8QM_LVDS0_I2C1_SCL_LVDS0_I2C1_SCL 0xc600004c + IMX8QM_LVDS0_I2C1_SDA_LVDS0_I2C1_SDA 0xc600004c + >; + }; + + pinctrl_lvds1_lpi2c1: lvds1lpi2c1grp { + fsl,pins = < + IMX8QM_LVDS1_I2C1_SCL_LVDS1_I2C1_SCL 0xc600004c + IMX8QM_LVDS1_I2C1_SDA_LVDS1_I2C1_SDA 0xc600004c + >; + }; + + pinctrl_pwm_lvds0: pwmlvds0grp { + fsl,pins = < + IMX8QM_LVDS0_GPIO00_LVDS0_PWM0_OUT 0x00000020 + >; + }; + + pinctrl_pwm_lvds1: pwmlvds1grp { + fsl,pins = < + IMX8QM_LVDS1_GPIO00_LVDS1_PWM0_OUT 0x00000020 + >; + }; + pinctrl_sai0: sai0grp { fsl,pins = < IMX8QM_SPI0_CS1_AUD_SAI0_TXC 0x0600004c @@ -600,6 +859,19 @@ IMX8QM_SAI1_TXC_AUD_SAI1_TXC 0x06000040 >; }; + pinctrl_typec: typecgrp { + fsl,pins = < + IMX8QM_QSPI1A_DATA0_LSIO_GPIO4_IO26 0x00000021 + >; + }; + + pinctrl_typec_mux: typecmuxgrp { + fsl,pins = < + IMX8QM_QSPI1A_SS0_B_LSIO_GPIO4_IO19 0x60 + IMX8QM_USB_SS3_TC3_LSIO_GPIO4_IO06 0x60 + >; + }; + pinctrl_usdhc1: usdhc1grp { fsl,pins = < IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041 diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi new file mode 100644 index 00000000000000..0514d8b2af75cc --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-lvds.dtsi @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2024 NXP + */ + +&qm_lvds0_lis_lpcg { + clocks = <&lvds_ipg_clk>; + clock-indices = ; +}; + +&qm_lvds0_pwm_lpcg { + clocks = <&clk IMX_SC_R_LVDS_0_PWM_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; +}; + +&qm_lvds0_i2c0_lpcg { + clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; +}; + +&qm_pwm_lvds0 { + clocks = <&qm_lvds0_pwm_lpcg IMX_LPCG_CLK_4>, + <&qm_lvds0_pwm_lpcg IMX_LPCG_CLK_0>; +}; + +&qm_i2c0_lvds0 { + clocks = <&qm_lvds0_i2c0_lpcg IMX_LPCG_CLK_0>, + <&qm_lvds0_i2c0_lpcg IMX_LPCG_CLK_4>; +}; + +&lvds0_subsys { + interrupt-parent = <&irqsteer_lvds0>; + + irqsteer_lvds0: interrupt-controller@56240000 { + compatible = "fsl,imx8qm-irqsteer", "fsl,imx-irqsteer"; + reg = <0x56240000 0x1000>; + interrupts = ; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <1>; + clocks = <&qm_lvds0_lis_lpcg IMX_LPCG_CLK_4>; + clock-names = "ipg"; + power-domains = <&pd IMX_SC_R_LVDS_0>; + + fsl,channel = <0>; + fsl,num-irqs = <32>; + }; + + lvds0_i2c1_lpcg: clock-controller@56243014 { + compatible = "fsl,imx8qxp-lpcg"; + reg = <0x56243014 0x4>; + #clock-cells = <1>; + clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>, + <&lvds_ipg_clk>; + clock-indices = , ; + clock-output-names = "lvds0_i2c1_lpcg_clk", + "lvds0_i2c1_lpcg_ipg_clk"; + power-domains = <&pd IMX_SC_R_LVDS_0_I2C_0>; + }; + + i2c1_lvds0: i2c@56247000 { + compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x56247000 0x1000>; + interrupts = <9>; + clocks = <&lvds0_i2c1_lpcg IMX_LPCG_CLK_0>, + <&lvds0_i2c1_lpcg IMX_LPCG_CLK_4>; + clock-names = "per", "ipg"; + assigned-clocks = <&clk IMX_SC_R_LVDS_0_I2C_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_LVDS_0_I2C_0>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi new file mode 100644 index 00000000000000..f4c393fe720441 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-mipi.dtsi @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2024 NXP + */ + +&mipi0_lis_lpcg { + clocks = <&dsi_ipg_clk>; + clock-indices = ; + clock-output-names = "mipi0_lis_lpcg_ipg_clk"; +}; + +&mipi0_pwm_lpcg { + clocks = <&clk IMX_SC_R_MIPI_0_PWM_0 IMX_SC_PM_CLK_PER>, + <&dsi_ipg_clk>; + clock-indices = , ; + clock-output-names = "mipi0_pwm_lpcg_clk", + "mipi0_pwm_lpcg_ipg_clk"; +}; \ No newline at end of file diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi index 61986e0639e531..3ee6e2869e3cf5 100644 --- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi @@ -560,11 +560,36 @@ clk_spdif1_rx: clock-spdif1-rx { clock-output-names = "spdif1_rx"; }; + lvds_ipg_clk: clock-controller-lvds-ipg { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "lvds0_ipg_clk"; + }; + + dsi_ipg_clk: clock-controller-dsi-ipg { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <120000000>; + clock-output-names = "dsi_ipg_clk"; + }; + + mipi_pll_div2_clk: clock-controller-mipi-div2-pll { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <432000000>; + clock-output-names = "mipi_pll_div2_clk"; + }; + /* sorted in register address */ #include "imx8-ss-cm41.dtsi" #include "imx8-ss-audio.dtsi" #include "imx8-ss-vpu.dtsi" #include "imx8-ss-gpu0.dtsi" + #include "imx8-ss-mipi0.dtsi" + #include "imx8-ss-lvds0.dtsi" + #include "imx8-ss-mipi1.dtsi" + #include "imx8-ss-lvds1.dtsi" #include "imx8-ss-img.dtsi" #include "imx8-ss-dma.dtsi" #include "imx8-ss-conn.dtsi" @@ -576,3 +601,5 @@ clk_spdif1_rx: clock-spdif1-rx { #include "imx8qm-ss-conn.dtsi" #include "imx8qm-ss-lsio.dtsi" #include "imx8qm-ss-audio.dtsi" +#include "imx8qm-ss-lvds.dtsi" +#include "imx8qm-ss-mipi.dtsi" diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi index bc659066e19af0..f7bbb2153ae01a 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-aster.dtsi @@ -3,10 +3,24 @@ * Copyright 2018-2021 Toradex */ +/* Colibri Analogue Inputs */ +&adc0 { + status = "okay"; +}; + +/* Colibri PWM_A */ +&adma_pwm { + status = "okay"; +}; + &colibri_gpio_keys { status = "okay"; }; +&extcon_usbc_det { + status = "okay"; +}; + /* Colibri Ethernet */ &fec1 { status = "okay"; @@ -38,6 +52,28 @@ &lpuart3 { status = "okay"; }; +/* USB PHY for usbotg3 */ +&usb3_phy { + status = "okay"; +}; + +&usbotg1 { + status = "okay"; +}; + +&usbotg3 { + status = "okay"; +}; + +&usbotg3_cdns3 { + status = "okay"; +}; + +/* USB PHY for usbotg1 */ +&usbphy1 { + status = "okay"; +}; + /* Colibri SDCard */ &usdhc2 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi index 9af769ab8cebbd..f75499765d85dc 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi @@ -19,10 +19,24 @@ clk16m: clock-16mhz { }; }; +/* Colibri Analogue Inputs */ +&adc0 { + status = "okay"; +}; + +/* Colibri PWM_A */ +&adma_pwm { + status = "okay"; +}; + &colibri_gpio_keys { status = "okay"; }; +&extcon_usbc_det { + status = "okay"; +}; + &i2c1 { status = "okay"; @@ -90,6 +104,28 @@ &fec1 { status = "okay"; }; +/* USB PHY for usbotg3 */ +&usb3_phy { + status = "okay"; +}; + +&usbotg1 { + status = "okay"; +}; + +&usbotg3 { + status = "okay"; +}; + +&usbotg3_cdns3 { + status = "okay"; +}; + +/* USB PHY for usbotg1 */ +&usbphy1 { + status = "okay"; +}; + /* Colibri SD/MMC Card */ &usdhc2 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi index 8d06925a8ebd05..54393a0c5cbfcd 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi @@ -17,10 +17,24 @@ reg_3v3: regulator-3v3 { }; }; +/* Colibri Analogue Inputs */ +&adc0 { + status = "okay"; +}; + +/* Colibri PWM_A */ +&adma_pwm { + status = "okay"; +}; + &colibri_gpio_keys { status = "okay"; }; +&extcon_usbc_det { + status = "okay"; +}; + /* Colibri FastEthernet */ &fec1 { status = "okay"; @@ -108,6 +122,28 @@ &lsio_pwm2 { status = "okay"; }; +/* USB PHY for usbotg3 */ +&usb3_phy { + status = "okay"; +}; + +&usbotg1 { + status = "okay"; +}; + +&usbotg3 { + status = "okay"; +}; + +&usbotg3_cdns3 { + status = "okay"; +}; + +/* USB PHY for usbotg1 */ +&usbphy1 { + status = "okay"; +}; + /* Colibri SD/MMC Card */ &usdhc2 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi index 49d105eb476919..edba5b58241465 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi @@ -23,17 +23,76 @@ key-wakeup { }; }; + extcon_usbc_det: usbc-det { + compatible = "linux,extcon-usb-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbc_det>; + id-gpios = <&lsio_gpio5 9 GPIO_ACTIVE_HIGH>; + status = "disabled"; + }; + reg_module_3v3: regulator-module-3v3 { compatible = "regulator-fixed"; regulator-name = "+V3.3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; + + reg_module_3v3_avdd: regulator-module-3v3-avdd { + compatible = "regulator-fixed"; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "+V3.3_AVDD_AUDIO"; + }; + + reg_module_vref_1v8: regulator-module-vref-1v8 { + compatible = "regulator-fixed"; + regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-name = "vref-1v8"; + }; + + reg_usbh_vbus: regulator-usbh-vbus { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1_reg>; + gpio = <&lsio_gpio4 3 GPIO_ACTIVE_LOW>; + regulator-always-on; + regulator-max-microvolt = <5000000>; + regulator-min-microvolt = <5000000>; + regulator-name = "usbh_vbus"; + }; + + sound-card { + compatible = "simple-audio-card"; + simple-audio-card,bitclock-master = <&dailink_master>; + simple-audio-card,format = "i2s"; + simple-audio-card,frame-master = <&dailink_master>; + simple-audio-card,name = "colibri-imx8x"; + + dailink_master: simple-audio-card,codec { + clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>; + sound-dai = <&sgtl5000_a>; + }; + + simple-audio-card,cpu { + sound-dai = <&sai0>; + }; + }; }; -/* TODO Analogue Inputs */ +/* Colibri Analogue Inputs */ +&adc0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc0>; + vref-supply = <®_module_vref_1v8>; +}; -/* TODO Cooling maps for DX */ +/* Colibri PWM_A */ +&adma_pwm { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm_a>; +}; &cpu_alert0 { hysteresis = <2000>; @@ -47,9 +106,20 @@ &cpu_crit0 { type = "critical"; }; -/* TODO flexcan1 - 3 */ - -/* TODO GPU */ +&enet0_lpcg { + clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>, + <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>, + <&conn_axi_clk>, + <&clk IMX_SC_R_ENET_0 IMX_SC_C_DISABLE_50>, + <&conn_ipg_clk>, + <&conn_ipg_clk>; + clock-output-names = "enet0_lpcg_timer_clk", + "enet0_lpcg_txc_sampling_clk", + "enet0_lpcg_ahb_clk", + "enet0_lpcg_ref_50mhz_clk", + "enet0_lpcg_ipg_clk", + "enet0_lpcg_ipg_s_clk"; +}; /* On-module I2C */ &i2c0 { @@ -60,6 +130,41 @@ &i2c0 { pinctrl-0 = <&pinctrl_i2c0>, <&pinctrl_sgtl5000_usb_clk>; status = "okay"; + /* USB HUB USB3803 */ + usb-hub@8 { + compatible = "smsc,usb3803"; + reg = <0x8>; + assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>, + <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>, + <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>, + <&mclkout0_lpcg IMX_LPCG_CLK_0>; + assigned-clock-rates = <786432000>, <49152000>, <12000000>, <12000000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb3503a>; + bypass-gpios = <&gpio_expander_43 5 GPIO_ACTIVE_LOW>; + clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>; + clock-names = "refclk"; + disabled-ports = <2>; + initial-mode = <1>; + intn-gpios = <&lsio_gpio3 4 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio_expander_43 4 GPIO_ACTIVE_LOW>; + }; + + sgtl5000_a: audio-codec@a { + compatible = "fsl,sgtl5000"; + reg = <0xa>; + #sound-dai-cells = <0>; + assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>, + <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>, + <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>, + <&mclkout0_lpcg IMX_LPCG_CLK_0>; + assigned-clock-rates = <786432000>, <49152000>, <12000000>, <12000000>; + clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>; + VDDA-supply = <®_module_3v3_avdd>; + VDDD-supply = <®_module_vref_1v8>; + VDDIO-supply = <®_module_3v3>; + }; + /* Touch controller */ touchscreen@2c { compatible = "adi,ad7879-1"; @@ -77,6 +182,21 @@ touchscreen@2c { adi,conversion-interval = /bits/ 8 <255>; status = "disabled"; }; + + gpio_expander_43: gpio@43 { + compatible = "fcs,fxl6408"; + reg = <0x43>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "Wi-Fi_W_DISABLE", + "Wi-Fi_WKUP_WLAN", + "PWR_EN_+V3.3_WiFi_N", + "PCIe_REF_CLK_EN", + "USB_RESET_N", + "USB_BYPASS_N", + "Wi-Fi_PDn", + "Wi-Fi_WKUP_BT"; + }; }; /* TODO i2c lvds0 accessible on FFC (X2) */ @@ -321,13 +441,74 @@ &lsio_pwm2 { pinctrl-names = "default"; }; +/* VPU Mailboxes */ +&mu_m0 { + status="okay"; +}; + +&mu1_m0 { + status="okay"; +}; + /* TODO MIPI CSI */ /* TODO MIPI DSI with DSI-to-HDMI bridge lt8912 */ /* TODO on-module PCIe for Wi-Fi */ -/* TODO On-module i2s / Audio */ +/* On-module I2S */ +&sai0 { + #sound-dai-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai0>; + status = "okay"; +}; + +&thermal_zones { + pmic-thermal { + polling-delay-passive = <250>; + polling-delay = <2000>; + thermal-sensors = <&tsens IMX_SC_R_PMIC_0>; + + trips { + pmic_alert0: trip0 { + temperature = <110000>; + hysteresis = <2000>; + type = "passive"; + }; + + pmic_crit0: trip1 { + temperature = <125000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + pmic_cooling_map0: map0 { + trip = <&pmic_alert0>; + cooling-device = <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&A35_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&A35_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; +}; + +&usbotg1 { + adp-disable; + disable-over-current; + extcon = <&extcon_usbc_det &extcon_usbc_det>; + hnp-disable; + power-active-high; + srp-disable; + vbus-supply = <®_usbh_vbus>; +}; + +&usbotg3_cdns3 { + dr_mode = "host"; +}; /* On-module eMMC */ &usdhc1 { @@ -356,11 +537,24 @@ &usdhc2 { no-1-8-v; }; -/* TODO USB Client/Host */ +&vpu { + compatible = "nxp,imx8qxp-vpu"; + status = "okay"; +}; -/* TODO USB Host */ +/* VPU Decoder */ +&vpu_core0 { + reg = <0x2d040000 0x10000>; + memory-region = <&decoder_boot>, <&decoder_rpc>; + status = "okay"; +}; -/* TODO VPU Encoder/Decoder */ +/* VPU Encoder */ +&vpu_core1 { + reg = <0x2d050000 0x10000>; + memory-region = <&encoder_boot>, <&encoder_rpc>; + status = "okay"; +}; &iomuxc { /* On-module touch pen-down interrupt */ diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts index a15987f49e8d6b..8d036b3962e9d1 100644 --- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts @@ -62,6 +62,15 @@ vdevbuffer: vdevbuffer@a4020000 { }; + reg_vdd_12v: regulator-vdd-12v { + compatible = "regulator-fixed"; + regulator-name = "VDD_12V"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + gpio = <&pcal6524 14 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + reg_vref_1v8: regulator-adc-vref { compatible = "regulator-fixed"; regulator-name = "vref_1v8"; @@ -80,6 +89,68 @@ reg_usdhc2_vmmc: regulator-usdhc2 { off-on-delay-us = <12000>; enable-active-high; }; + + backlight_lvds: backlight-lvds { + compatible = "pwm-backlight"; + pwms = <&adp5585 0 100000 0>; + brightness-levels = <0 100>; + num-interpolated-steps = <100>; + default-brightness-level = <100>; + power-supply = <®_vdd_12v>; + enable-gpios = <&adp5585 9 GPIO_ACTIVE_HIGH>; + status = "disabled"; + }; + + bt_sco_codec: bt-sco-codec { + compatible = "linux,bt-sco"; + #sound-dai-cells = <1>; + }; + + sound-bt-sco { + compatible = "simple-audio-card"; + simple-audio-card,name = "bt-sco-audio"; + simple-audio-card,format = "dsp_a"; + simple-audio-card,bitclock-inversion; + simple-audio-card,frame-master = <&btcpu>; + simple-audio-card,bitclock-master = <&btcpu>; + + btcpu: simple-audio-card,cpu { + sound-dai = <&sai1>; + dai-tdm-slot-num = <2>; + dai-tdm-slot-width = <16>; + }; + + simple-audio-card,codec { + sound-dai = <&bt_sco_codec 1>; + }; + }; + + sound-micfil { + compatible = "fsl,imx-audio-card"; + model = "micfil-audio"; + + pri-dai-link { + link-name = "micfil hifi"; + format = "i2s"; + + cpu { + sound-dai = <&micfil>; + }; + }; + }; + + sound-xcvr { + compatible = "fsl,imx-audio-card"; + model = "imx-audio-xcvr"; + + pri-dai-link { + link-name = "XCVR PCM"; + + cpu { + sound-dai = <&xcvr>; + }; + }; + }; }; &adc1 { @@ -145,9 +216,19 @@ ethphy2: ethernet-phy@2 { }; }; +&lpi2c1 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c1>; + status = "okay"; + + inertial-meter@6a { + compatible = "st,lsm6dso"; + reg = <0x6a>; + }; +}; + &lpi2c2 { - #address-cells = <1>; - #size-cells = <0>; clock-frequency = <400000>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&pinctrl_lpi2c2>; @@ -241,11 +322,19 @@ ldo5: LDO5 { }; }; }; + + adp5585: io-expander@34 { + compatible = "adi,adp5585-00", "adi,adp5585"; + reg = <0x34>; + vdd-supply = <&buck4>; + gpio-controller; + #gpio-cells = <2>; + gpio-reserved-ranges = <5 1>; + #pwm-cells = <3>; + }; }; &lpi2c3 { - #address-cells = <1>; - #size-cells = <0>; clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpi2c3>; @@ -337,6 +426,16 @@ &lpuart5 { status = "okay"; }; +&micfil { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pinctrl_pdm>; + pinctrl-1 = <&pinctrl_pdm_sleep>; + assigned-clocks = <&clk IMX93_CLK_PDM>; + assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>; + assigned-clock-rates = <49152000>; + status = "okay"; +}; + &mu1 { status = "okay"; }; @@ -345,6 +444,17 @@ &mu2 { status = "okay"; }; +&sai1 { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pinctrl_sai1>; + pinctrl-1 = <&pinctrl_sai1_sleep>; + assigned-clocks = <&clk IMX93_CLK_SAI1>; + assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>; + assigned-clock-rates = <12288000>; + fsl,sai-mclk-direction-output; + status = "okay"; +}; + &usbotg1 { dr_mode = "otg"; hnp-disable; @@ -408,6 +518,18 @@ &wdog3 { status = "okay"; }; +&xcvr { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&pinctrl_spdif>; + pinctrl-1 = <&pinctrl_spdif_sleep>; + assigned-clocks = <&clk IMX93_CLK_SPDIF>, + <&clk IMX93_CLK_AUDIO_XCVR>; + assigned-clock-parents = <&clk IMX93_CLK_AUDIO_PLL>, + <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>; + assigned-clock-rates = <12288000>, <200000000>; + status = "okay"; +}; + &iomuxc { pinctrl_eqos: eqosgrp { fsl,pins = < @@ -508,6 +630,13 @@ MX93_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x31e >; }; + pinctrl_lpi2c1: lpi2c1grp { + fsl,pins = < + MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e + MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e + >; + }; + pinctrl_lpi2c2: lpi2c2grp { fsl,pins = < MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e @@ -528,6 +657,40 @@ MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e >; }; + pinctrl_pdm: pdmgrp { + fsl,pins = < + MX93_PAD_PDM_CLK__PDM_CLK 0x31e + MX93_PAD_PDM_BIT_STREAM0__PDM_BIT_STREAM00 0x31e + MX93_PAD_PDM_BIT_STREAM1__PDM_BIT_STREAM01 0x31e + >; + }; + + pinctrl_pdm_sleep: pdmsleepgrp { + fsl,pins = < + MX93_PAD_PDM_CLK__GPIO1_IO08 0x31e + MX93_PAD_PDM_BIT_STREAM0__GPIO1_IO09 0x31e + MX93_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x31e + >; + }; + + pinctrl_sai1: sai1grp { + fsl,pins = < + MX93_PAD_SAI1_TXC__SAI1_TX_BCLK 0x31e + MX93_PAD_SAI1_TXFS__SAI1_TX_SYNC 0x31e + MX93_PAD_SAI1_TXD0__SAI1_TX_DATA00 0x31e + MX93_PAD_SAI1_RXD0__SAI1_RX_DATA00 0x31e + >; + }; + + pinctrl_sai1_sleep: sai1sleepgrp { + fsl,pins = < + MX93_PAD_SAI1_TXC__GPIO1_IO12 0x51e + MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x51e + MX93_PAD_SAI1_TXD0__GPIO1_IO13 0x51e + MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x51e + >; + }; + /* need to config the SION for data and cmd pad, refer to ERR052021 */ pinctrl_usdhc1: usdhc1grp { fsl,pins = < @@ -585,6 +748,20 @@ MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e >; }; + pinctrl_spdif: spdifgrp { + fsl,pins = < + MX93_PAD_GPIO_IO22__SPDIF_IN 0x31e + MX93_PAD_GPIO_IO23__SPDIF_OUT 0x31e + >; + }; + + pinctrl_spdif_sleep: spdifsleepgrp { + fsl,pins = < + MX93_PAD_GPIO_IO22__GPIO2_IO22 0x31e + MX93_PAD_GPIO_IO23__GPIO2_IO23 0x31e + >; + }; + pinctrl_usdhc2_gpio: usdhc2gpiogrp { fsl,pins = < MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e diff --git a/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts new file mode 100644 index 00000000000000..236a44c1782ae1 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx93-14x14-evk.dts @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright 2024 NXP + */ + +/dts-v1/; + +#include +#include "imx93.dtsi" + +/ { + model = "NXP i.MX93 14X14 EVK board"; + compatible = "fsl,imx93-14x14-evk", "fsl,imx93"; + + chosen { + stdout-path = &lpuart1; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + linux,cma { + compatible = "shared-dma-pool"; + reusable; + alloc-ranges = <0 0x80000000 0 0x40000000>; + size = <0 0x10000000>; + linux,cma-default; + }; + + vdev0vring0: vdev0vring0@a4000000 { + reg = <0 0xa4000000 0 0x8000>; + no-map; + }; + + vdev0vring1: vdev0vring1@a4008000 { + reg = <0 0xa4008000 0 0x8000>; + no-map; + }; + + vdev1vring0: vdev1vring0@a4010000 { + reg = <0 0xa4010000 0 0x8000>; + no-map; + }; + + vdev1vring1: vdev1vring1@a4018000 { + reg = <0 0xa4018000 0 0x8000>; + no-map; + }; + + rsc_table: rsc-table@2021e000 { + reg = <0 0x2021e000 0 0x1000>; + no-map; + }; + + vdevbuffer: vdevbuffer@a4020000 { + compatible = "shared-dma-pool"; + reg = <0 0xa4020000 0 0x100000>; + no-map; + }; + }; + + reg_can1_stby: regulator-can1-stby { + compatible = "regulator-fixed"; + regulator-name = "can1-stby"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pcal6524_2 10 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_can1_en>; + }; + + reg_can1_en: regulator-can1-en { + compatible = "regulator-fixed"; + regulator-name = "can1-en"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pcal6524_2 12 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_can2_stby: regulator-can2-stby { + compatible = "regulator-fixed"; + regulator-name = "can2-stby"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pcal6524_2 11 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_can2_en>; + }; + + reg_can2_en: regulator-can2-en { + compatible = "regulator-fixed"; + regulator-name = "can2-en"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pcal6524_2 13 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_usdhc2_vmmc: regulator-usdhc2 { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; + regulator-name = "VSD_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>; + enable-active-high; + off-on-delay-us = <12000>; + }; + + reg_vdd_12v: regulator-vdd-12v { + compatible = "regulator-fixed"; + regulator-name = "reg_vdd_12v"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + gpio = <&pcal6524 14 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_vref_1v8: regulator-adc-vref { + compatible = "regulator-fixed"; + regulator-name = "vref_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; +}; + +&adc1 { + vref-supply = <®_vref_1v8>; + status = "okay"; +}; + +&cm33 { + mbox-names = "tx", "rx", "rxdb"; + mboxes = <&mu1 0 1>, + <&mu1 1 1>, + <&mu1 3 1>; + memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>, + <&vdev1vring0>, <&vdev1vring1>, <&rsc_table>; + status = "okay"; +}; + +&fec { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy2>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + clock-frequency = <5000000>; + + ethphy2: ethernet-phy@2 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <2>; + eee-broken-1000t; + reset-gpios = <&pcal6524 16 GPIO_ACTIVE_LOW>; + reset-assert-us = <10000>; + reset-deassert-us = <80000>; + realtek,clkout-disable; + }; + }; +}; + +&flexcan1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan1>; + xceiver-supply = <®_can1_stby>; + status = "okay"; +}; + +&flexcan2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan2>; + xceiver-supply = <®_can2_stby>; + status = "okay"; +}; + +&lpi2c1 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c1>; + status = "okay"; + + lsm6dsm@6a { + compatible = "st,lsm6dso"; + reg = <0x6a>; + }; +}; + +&lpi2c2 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c2>; + status = "okay"; + + pcal6524_2: gpio@20 { + compatible = "nxp,pcal6524"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + }; + + pcal6524: gpio@22 { + compatible = "nxp,pcal6524"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcal6524>; + reg = <0x22>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&gpio3>; + interrupts = <27 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +&lpi2c3 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c3>; + status = "okay"; +}; + +&lpuart1 { /* console */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +&mu1 { + status = "okay"; +}; + +&mu2 { + status = "okay"; +}; + +&usbotg1 { + dr_mode = "otg"; + hnp-disable; + srp-disable; + adp-disable; + disable-over-current; + samsung,picophy-pre-emp-curr-control = <3>; + samsung,picophy-dc-vol-level-adjust = <7>; + status = "okay"; +}; + +&usbotg2 { + dr_mode = "host"; + disable-over-current; + samsung,picophy-pre-emp-curr-control = <3>; + samsung,picophy-dc-vol-level-adjust = <7>; + status = "okay"; +}; + +&usdhc1 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + bus-width = <8>; + non-removable; + status = "okay"; +}; + +&usdhc2 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>; + vmmc-supply = <®_usdhc2_vmmc>; + bus-width = <4>; + no-mmc; + status = "okay"; +}; + +&wdog3 { + status = "okay"; +}; + +&iomuxc { + pinctrl_flexcan1: flexcan1grp { + fsl,pins = < + MX93_PAD_PDM_CLK__CAN1_TX 0x139e + MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e + >; + }; + + pinctrl_flexcan2: flexcan2grp { + fsl,pins = < + MX93_PAD_GPIO_IO25__CAN2_TX 0x139e + MX93_PAD_GPIO_IO27__CAN2_RX 0x139e + >; + }; + + pinctrl_lpi2c1: lpi2c1grp { + fsl,pins = < + MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e + MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e + >; + }; + + pinctrl_lpi2c2: lpi2c2grp { + fsl,pins = < + MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e + MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e + >; + }; + + pinctrl_lpi2c3: lpi2c3grp { + fsl,pins = < + MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e + MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e + >; + }; + + pinctrl_pcal6524: pcal6524grp { + fsl,pins = < + MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e + >; + }; + + pinctrl_fec: fecgrp { + fsl,pins = < + MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e + MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x57e + MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e + MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e + MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e + MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e + MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x58e + MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e + MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x57e + MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x57e + MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x57e + MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x57e + MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x58e + MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x57e + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX93_PAD_UART1_RXD__LPUART1_RX 0x31e + MX93_PAD_UART1_TXD__LPUART1_TX 0x31e + >; + }; + + pinctrl_uart5: uart5grp { + fsl,pins = < + MX93_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x31e + MX93_PAD_DAP_TDI__LPUART5_RX 0x31e + MX93_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x31e + MX93_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x31e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582 + MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382 + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382 + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382 + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382 + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382 + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382 + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382 + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382 + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382 + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582 + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e + MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe + MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013fe + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013fe + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013fe + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013fe + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013fe + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013fe + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013fe + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe + >; + }; + + pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { + fsl,pins = < + MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e + >; + }; + + pinctrl_usdhc2_gpio: usdhc2gpiogrp { + fsl,pins = < + MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582 + MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382 + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382 + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382 + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382 + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382 + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe + MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts index 950dece83c2466..f8a73612fa0514 100644 --- a/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts +++ b/arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts @@ -178,8 +178,6 @@ rtc@53 { }; &lpi2c2 { - #address-cells = <1>; - #size-cells = <0>; clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lpi2c2>; diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts new file mode 100644 index 00000000000000..89e97c604bd3e4 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx93-kontron-bl-osm-s.dts @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0+ OR MIT +/* + * Copyright (C) 2024 Kontron Electronics GmbH + */ + +/dts-v1/; + +#include "imx93-kontron-osm-s.dtsi" + +/ { + model = "Kontron BL i.MX93 OSM-S"; + compatible = "kontron,imx93-bl-osm-s", "kontron,imx93-osm-s", "fsl,imx93"; + + aliases { + ethernet0 = &fec; + ethernet1 = &eqos; + }; + + leds { + compatible = "gpio-leds"; + + led1 { + label = "led1"; + gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; + + pwm-beeper { + compatible = "pwm-beeper"; + pwms = <&tpm6 1 5000 0>; + }; + + reg_vcc_panel: regulator-vcc-panel { + compatible = "regulator-fixed"; + gpio = <&gpio4 3 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "VCC_PANEL"; + }; +}; + +&eqos { /* Second ethernet (OSM-S ETH_B) */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_eqos_rgmii>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy1>; + status = "okay"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + ethphy1: ethernet-phy@1 { + compatible = "ethernet-phy-id4f51.e91b"; + reg = <1>; + reset-assert-us = <10000>; + reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&fec { /* First ethernet (OSM-S ETH_A) */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet_rgmii>; + phy-connection-type = "rgmii-id"; + phy-handle = <ðphy0>; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@1 { + compatible = "ethernet-phy-id4f51.e91b"; + reg = <1>; + reset-assert-us = <10000>; + reset-gpios = <&gpio2 18 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&flexcan1 { + status = "okay"; +}; + +&lpi2c2 { + status = "okay"; + + gpio_expander_dio: gpio@20 { + compatible = "ti,tca6408"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "DIO1_OUT","DIO1_IN", "DIO2_OUT","DIO2_IN", + "DIO3_OUT","DIO3_IN", "DIO4_OUT","DIO4_IN"; + interrupt-parent = <&gpio4>; + interrupts = <28 IRQ_TYPE_EDGE_FALLING>; + reset-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; + }; +}; + +&lpspi8 { + assigned-clocks = <&clk IMX93_CLK_LPSPI8>; + assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>; + assigned-clock-rates = <100000000>; + status = "okay"; + + eeram@0 { + compatible = "microchip,48l640"; + reg = <0>; + spi-max-frequency = <20000000>; + }; +}; + +&lpuart1 { + status = "okay"; +}; + +&lpuart7 { + uart-has-rtscts; + status = "okay"; +}; + +&lpuart6 { + linux,rs485-enabled-at-boot-time; + uart-has-rtscts; + status = "okay"; +}; + +&tpm6 { + status = "okay"; +}; + +&usbotg1 { + #address-cells = <1>; + #size-cells = <0>; + disable-over-current; + dr_mode = "host"; + status = "okay"; + + usb1@1 { + compatible = "usb424,2514"; + reg = <1>; + }; +}; + +&usbotg2 { + adp-disable; + hnp-disable; + srp-disable; + disable-over-current; + dr_mode = "otg"; + usb-role-switch; + status = "okay"; +}; + +&usdhc2 { + vmmc-supply = <®_vdd_3v3>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi new file mode 100644 index 00000000000000..47c1363a2f99ab --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0+ OR MIT +/* + * Copyright (C) 2024 Kontron Electronics GmbH + */ + +#include +#include "imx93.dtsi" + +/ { + model = "Kontron OSM-S i.MX93"; + compatible = "kontron,imx93-osm-s", "fsl,imx93"; + + aliases { + rtc0 = &rv3028; + rtc1 = &bbnsm_rtc; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0x0 0x40000000 0 0x80000000>; + }; + + chosen { + stdout-path = &lpuart1; + }; + + reg_usdhc2_vcc: regulator-usdhc2-vcc { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2_vcc>; + gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "VCC_SDIO_A"; + }; + + reg_vdd_carrier: regulator-vdd-carrier { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_vdd_carrier>; + gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + regulator-boot-on; + regulator-name = "VDD_CARRIER"; + + regulator-state-standby { + regulator-on-in-suspend; + }; + + regulator-state-mem { + regulator-off-in-suspend; + }; + + regulator-state-disk { + regulator-off-in-suspend; + }; + }; +}; + +&flexcan1 { /* OSM-S CAN_A */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan1>; +}; + +&flexcan2 { /* OSM-S CAN_B */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan2>; +}; + +&gpio1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio1>; + gpio-line-names = "", "", "I2C_A_SCL", "I2C_A_SDA", + "UART_CON_RX", "UART_CON_TX", "UART_C_RX", "UART_C_TX", + "CAN_A_TX", "CAN_A_RX", "GPIO_A_0", "SPI_A_CS0", + "SPI_A_SDI", "SPI_A_SCK","SPI_A_SDO"; +}; + +&gpio2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio2>; + gpio-line-names = "I2C_B_SDA", "I2C_B_SCL", "GPIO_B_1", "GPIO_A_2", + "UART_B_TX", "UART_B_RX", "UART_B_RTS", "UART_B_CTS", + "UART_A_TX", "UART_A_RX", "UART_A_RTS", "UART_A_CTS", + "SPI_B_CS0", "SPI_B_SDI", "SPI_B_SDO", "SPI_B_SCK", + "I2S_BITCLK", "I2S_MCLK", "GPIO_A_1", "I2S_A_DATA_OUT", + "I2S_A_DATA_IN", "PWM_2", "GPIO_A_3", "PWM_1", + "PWM_0", "CAN_B_TX", "I2S_LRCLK", "CAN_B_RX", "GPIO_A_4", + "GPIO_A_5"; +}; + +&gpio3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio3>; + gpio-line-names = "SDIO_A_CD", "SDIO_A_CLK", "SDIO_A_CMD", "SDIO_A_D0", + "SDIO_A_D1", "SDIO_A_D2", "SDIO_A_D3", "SDIO_A_PWR_EN", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "SDIO_B_CLK", "SDIO_B_CMD", "SDIO_B_D0", "SDIO_B_D1", + "SDIO_B_D2", "SDIO_B_D3", "GPIO_A_6", "GPIO_A_7"; +}; + +&gpio4 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio4>; + gpio-line-names = "ETH_B_MDC", "ETH_B_MDIO", "ETH_B_TXD4", "ETH_B_TXD3", + "ETH_B_TXD2", "ETH_B_TXD1", "ETH_B_TX_EN", "ETH_B_TX_CLK", + "ETH_B_RX_CTL", "ETH_B_RX_CLK", "ETH_B_RXD0", "ETH_B_RXD1", + "ETH_B_RXD2", "ETH_B_RXD3", "ETH_MDC", "ETH_MDIO", + "ETH_A_TXD3", "ETH_A_TXD2", "ETH_A_TXD1", "ETH_A_TXD0", + "ETH_A_TX_EN", "ETH_A_TX_CLK", "ETH_A_RX_CTL", "ETH_A_RX_CLK", + "ETH_A_RXD0", "ETH_A_RXD1", "ETH_A_RXD2", "ETH_A_RXD3", + "GPIO_B_0", "CARRIER_PWR_EN"; +}; + +&lpi2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c1>; + status = "okay"; + + pca9451: pmic@25 { + compatible = "nxp,pca9451a"; + reg = <0x25>; + nxp,i2c-lt-enable; + + regulators { + reg_vdd_soc: BUCK1 { /* dual phase with BUCK3 */ + regulator-name = "+0V8_VDD_SOC (BUCK1)"; + regulator-min-microvolt = <650000>; + regulator-max-microvolt = <950000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + }; + + reg_vddq_ddr: BUCK2 { + regulator-name = "+0V6_VDDQ_DDR (BUCK2)"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <600000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + }; + + reg_vdd_3v3: BUCK4 { + regulator-name = "+3V3 (BUCK4)"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_vdd_1v8: BUCK5 { + regulator-name = "+1V8 (BUCK5)"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_nvcc_dram: BUCK6 { + regulator-name = "+1V1_NVCC_DRAM (BUCK6)"; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_nvcc_snvs: LDO1 { + regulator-name = "+1V8_NVCC_SNVS (LDO1)"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_vdd_ana: LDO4 { + regulator-name = "+0V8_VDD_ANA (LDO4)"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_nvcc_sd: LDO5 { + regulator-name = "NVCC_SD (LDO5)"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + }; + }; + }; + + eeprom@50 { + compatible = "onnn,n24s64b", "atmel,24c64"; + reg = <0x50>; + pagesize = <32>; + size = <8192>; + num-addresses = <1>; + }; + + rv3028: rtc@52 { + compatible = "microcrystal,rv3028"; + reg = <0x52>; + }; +}; + +&lpi2c2 { /* OSM-S I2C_A */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c2>; +}; + +&lpi2c3 { /* OSM-S I2C_B */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c3>; +}; + +&lpspi1 { /* OSM-S SPI_A */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpspi1>; + cs-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; +}; + +&lpspi8 { /* OSM-S SPI_B */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpspi8>; + cs-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; +}; + +&lpuart1 { /* OSM-S UART_CON */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart1>; +}; + +&lpuart2 { /* OSM-S UART_C */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart2>; +}; + +&lpuart6 { /* OSM-S UART_B */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart6>; +}; + +&lpuart7 { /* OSM-S UART_A */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart7>; +}; + +&tpm3 { /* OSM-S PWM_0 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_tpm3>; +}; + +&tpm4 { /* OSM-S PWM_2 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_tpm4>; +}; + +&tpm6 { /* OSM-S PWM_1 */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_tpm6>; +}; + +&usdhc1 { /* eMMC */ + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + vmmc-supply = <®_vdd_3v3>; + vqmmc-supply = <®_vdd_1v8>; + bus-width = <8>; + non-removable; + status = "okay"; +}; + +&usdhc2 { /* OSM-S SDIO_A */ + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>; + vmmc-supply = <®_usdhc2_vcc>; + cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>; +}; + +&usdhc3 { /* OSM-S SDIO_B */ + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc3>; + pinctrl-1 = <&pinctrl_usdhc3_100mhz>; + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + vqmmc-supply = <®_vdd_1v8>; +}; + +&wdog3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; + +&iomuxc { + pinctrl_enet_rgmii: enetrgmiigrp { + fsl,pins = < + MX93_PAD_ENET2_MDC__ENET1_MDC 0x57e /* ETH_MDC */ + MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x57e /* ETH_MDIO */ + MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e /* ETH_A_(S)(R)(G)MII_RXD0 */ + MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e /* ETH_A_(S)(R)(G)MII_RXD1 */ + MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e /* ETH_A_(R)(G)MII_RXD2 */ + MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e /* ETH_A_(R)(G)MII_RXD3 */ + MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe /* ETH_A_(R)(G)MII_RX_CLK */ + MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e /* ETH_A_(R)(G)MII_RX_DV(_ER) */ + MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x57e /* ETH_A_(S)(R)(G)MII_TXD0 */ + MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x57e /* ETH_A_(S)(R)(G)MII_TXD1 */ + MX93_PAD_ENET2_TD2__ENET1_RGMII_TD2 0x57e /* ETH_A_(S)(R)(G)MII_TXD2 */ + MX93_PAD_ENET2_TD3__ENET1_RGMII_TD3 0x57e /* ETH_A_(S)(R)(G)MII_TXD3 */ + MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x5fe /* ETH_A_(R)(G)MII_TX_CLK */ + MX93_PAD_ENET2_TX_CTL__ENET1_RGMII_TX_CTL 0x57e /* ETH_A_(R)(G)MII_TX_EN(_ER) */ + >; + }; + + pinctrl_eqos_rgmii: eqosrgmiigrp { + fsl,pins = < + MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x57e /* ETH_B_MDC */ + MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x57e /* ETH_B_MDIO */ + MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e /* ETH_B_(S)(R)(G)MII_RXD0 */ + MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e /* ETH_B_(S)(R)(G)MII_RXD1 */ + MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e /* ETH_B_(R)(G)MII_RXD2 */ + MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e /* ETH_B_(R)(G)MII_RXD3 */ + MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x57e /* ETH_B_(R)(G)MII_RX_CLK */ + MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e /* ETH_B_(R)(G)MII_RX_DV(_ER) */ + MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x57e /* ETH_B_(S)(R)(G)MII_TXD0 */ + MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x57e /* ETH_B_(S)(R)(G)MII_TXD1 */ + MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x57e /* ETH_B_(S)(R)(G)MII_TXD2 */ + MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x57e /* ETH_B_(S)(R)(G)MII_TXD3 */ + MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x57e /* ETH_B_(R)(G)MII_TX_CLK */ + MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x57e /* ETH_B_(R)(G)MII_TX_EN(_ER) */ + >; + }; + + pinctrl_flexcan1: flexcan1grp { + fsl,pins = < + MX93_PAD_PDM_CLK__CAN1_TX 0x139e /* CAN_A_TX */ + MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e /* CAN_A_RX */ + >; + }; + + pinctrl_flexcan2: flexcan2grp { + fsl,pins = < + MX93_PAD_GPIO_IO25__CAN2_TX 0x139e /* CAN_B_TX */ + MX93_PAD_GPIO_IO27__CAN2_RX 0x139e /* CAN_B_RX */ + >; + }; + + pinctrl_gpio1: gpio1grp { + fsl,pins = < + MX93_PAD_PDM_BIT_STREAM1__GPIO1_IO10 0x31e /* GPIO_A_0 */ + >; + }; + + pinctrl_gpio2: gpio2grp { + fsl,pins = < + MX93_PAD_GPIO_IO18__GPIO2_IO18 0x31e /* GPIO_A_1 */ + MX93_PAD_GPIO_IO03__GPIO2_IO03 0x31e /* GPIO_A_2 */ + MX93_PAD_GPIO_IO22__GPIO2_IO22 0x31e /* GPIO_A_3 */ + MX93_PAD_GPIO_IO28__GPIO2_IO28 0x31e /* GPIO_A_4 */ + MX93_PAD_GPIO_IO29__GPIO2_IO29 0x31e /* GPIO_A_5 */ + MX93_PAD_GPIO_IO02__GPIO2_IO02 0x31e /* GPIO_B_1 */ + >; + }; + + pinctrl_gpio3: gpio3grp { + fsl,pins = < + MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x31e /* GPIO_A_6 */ + MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x31e /* GPIO_A_7 */ + >; + }; + + pinctrl_gpio4: gpio4grp { + fsl,pins = < + MX93_PAD_CCM_CLKO3__GPIO4_IO28 0x31e /* GPIO_B_0 */ + >; + }; + + pinctrl_lpi2c1: lpi2c1grp { + fsl,pins = < + MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e + MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e + >; + }; + + pinctrl_lpi2c2: lpi2c2grp { + fsl,pins = < + MX93_PAD_I2C2_SCL__LPI2C2_SCL 0x40000b9e /* I2C_A_SCL */ + MX93_PAD_I2C2_SDA__LPI2C2_SDA 0x40000b9e /* I2C_A_SDA */ + >; + }; + + pinctrl_lpi2c3: lpi2c3grp { + fsl,pins = < + MX93_PAD_GPIO_IO01__LPI2C3_SCL 0x40000b9e /* I2C_B_SCL */ + MX93_PAD_GPIO_IO00__LPI2C3_SDA 0x40000b9e /* I2C_B_SDA */ + >; + }; + + pinctrl_lpspi1: lpspi1grp { + fsl,pins = < + MX93_PAD_SAI1_TXC__LPSPI1_SIN 0x3fe /* SPI_A_SDI_(IO0) */ + MX93_PAD_SAI1_RXD0__LPSPI1_SOUT 0x3fe /* SPI_A_SDO_(IO1) */ + MX93_PAD_SAI1_TXD0__LPSPI1_SCK 0x3fe /* SPI_A_SCK */ + MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x3fe /* SPI_A_CS0# */ + >; + }; + + pinctrl_lpspi8: lpspi8grp { + fsl,pins = < + MX93_PAD_GPIO_IO13__LPSPI8_SIN 0x3fe /* SPI_B_SDI */ + MX93_PAD_GPIO_IO14__LPSPI8_SOUT 0x3fe /* SPI_B_SDO */ + MX93_PAD_GPIO_IO15__LPSPI8_SCK 0x3fe /* SPI_B_SCK */ + MX93_PAD_GPIO_IO12__GPIO2_IO12 0x3fe /* SPI_B_CS0# */ + >; + }; + + pinctrl_lpuart1: lpuart1grp { + fsl,pins = < + MX93_PAD_UART1_RXD__LPUART1_RX 0x31e /* UART_CON_RX */ + MX93_PAD_UART1_TXD__LPUART1_TX 0x31e /* UART_CON_TX */ + >; + }; + + pinctrl_lpuart2: lpuart2grp { + fsl,pins = < + MX93_PAD_UART2_RXD__LPUART2_RX 0x31e /* UART_C_RX */ + MX93_PAD_UART2_TXD__LPUART2_TX 0x31e /* UART_C_TX */ + >; + }; + + pinctrl_lpuart6: lpuart6grp { + fsl,pins = < + MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e /* UART_B_RX */ + MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e /* UART_B_TX */ + MX93_PAD_GPIO_IO07__LPUART6_RTS_B 0x31e /* UART_B_CTS */ + MX93_PAD_GPIO_IO06__LPUART6_CTS_B 0x31e /* UART_B_RTS */ + >; + }; + + pinctrl_lpuart7: lpuart7grp { + fsl,pins = < + MX93_PAD_GPIO_IO09__LPUART7_RX 0x31e /* UART_A_RX */ + MX93_PAD_GPIO_IO08__LPUART7_TX 0x31e /* UART_A_TX */ + MX93_PAD_GPIO_IO11__LPUART7_RTS_B 0x31e /* UART_A_CTS */ + MX93_PAD_GPIO_IO10__LPUART7_CTS_B 0x31e /* UART_A_RTS */ + >; + }; + + pinctrl_reg_usdhc2_vcc: regusdhc2vccgrp { + fsl,pins = < + MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x31e /* SDIO_A_PWR_EN */ + >; + }; + + pinctrl_reg_vdd_carrier: regvddcarriergrp { + fsl,pins = < + MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x31e /* CARRIER_PWR_EN */ + >; + }; + + pinctrl_sai3: sai3grp { + fsl,pins = < + MX93_PAD_GPIO_IO20__SAI3_RX_DATA00 0x31e /* I2S_A_DATA_IN */ + MX93_PAD_GPIO_IO19__SAI3_TX_DATA00 0x31e /* I2S_A_DATA_OUT */ + MX93_PAD_GPIO_IO17__SAI3_MCLK 0x31e /* I2S_MCLK */ + MX93_PAD_GPIO_IO26__SAI3_TX_SYNC 0x31e /* I2S_LRCLK */ + MX93_PAD_GPIO_IO16__SAI3_TX_BCLK 0x31e /* I2S_BITCLK */ + >; + }; + + pinctrl_tpm3: tpm3grp { + fsl,pins = < + MX93_PAD_GPIO_IO24__TPM3_CH3 0x57e /* PWM_0 */ + >; + }; + + pinctrl_tpm4: tpm4grp { + fsl,pins = < + MX93_PAD_GPIO_IO21__TPM4_CH1 0x57e /* PWM_2 */ + >; + }; + + pinctrl_tpm6: tpm6grp { + fsl,pins = < + MX93_PAD_GPIO_IO23__TPM6_CH1 0x57e /* PWM_1 */ + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x1582 + MX93_PAD_SD1_CMD__USDHC1_CMD 0x40001382 + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x40001382 + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x40001382 + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x40001382 + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x40001382 + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x40001382 + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x40001382 + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x40001382 + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x40001382 + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1582 + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x158e + MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000138e + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000138e + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000138e + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000138e + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000138e + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000138e + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000138e + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000138e + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000138e + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x158e + >; + }; + + /* need to config the SION for data and cmd pad, refer to ERR052021 */ + pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp { + fsl,pins = < + MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe + MX93_PAD_SD1_CMD__USDHC1_CMD 0x400013fe + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x400013fe + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x400013fe + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x400013fe + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x400013fe + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x400013fe + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x400013fe + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x400013fe + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x400013fe + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x1582 /* SDIO_A_CLK */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x40001382 /* SDIO_A_CMD */ + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x40001382 /* SDIO_A_D0 */ + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382 /* SDIO_A_D1 */ + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382 /* SDIO_A_D2 */ + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382 /* SDIO_A_D3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x158e /* SDIO_A_CLK */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000138e /* SDIO_A_CMD */ + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e /* SDIO_A_D0 */ + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e /* SDIO_A_D1 */ + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e /* SDIO_A_D2 */ + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e /* SDIO_A_D3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe /* SDIO_A_CLK */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x400013fe /* SDIO_A_CMD */ + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x400013fe /* SDIO_A_D0 */ + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe /* SDIO_A_D1 */ + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe /* SDIO_A_D2 */ + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe /* SDIO_A_D3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0 + >; + }; + + pinctrl_usdhc2_gpio: usdhc2gpiogrp { + fsl,pins = < + MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e /* SDIO_A_CD# */ + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX93_PAD_SD3_CLK__USDHC3_CLK 0x1582 /* SDIO_B_CLK */ + MX93_PAD_SD3_CMD__USDHC3_CMD 0x40001382 /* SDIO_B_CMD */ + MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x40001382 /* SDIO_B_D0 */ + MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x40001382 /* SDIO_B_D1 */ + MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x40001382 /* SDIO_B_D2 */ + MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x40001382 /* SDIO_B_D3 */ + >; + }; + + pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp { + fsl,pins = < + MX93_PAD_SD3_CLK__USDHC3_CLK 0x158e /* SDIO_B_CLK */ + MX93_PAD_SD3_CMD__USDHC3_CMD 0x4000138e /* SDIO_B_CMD */ + MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x4000138e /* SDIO_B_D0 */ + MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x4000138e /* SDIO_B_D1 */ + MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x4000138e /* SDIO_B_D2 */ + MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x4000138e /* SDIO_B_D3 */ + >; + }; + + pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp { + fsl,pins = < + MX93_PAD_SD3_CLK__USDHC3_CLK 0x15fe /* SDIO_B_CLK */ + MX93_PAD_SD3_CMD__USDHC3_CMD 0x400013fe /* SDIO_B_CMD */ + MX93_PAD_SD3_DATA0__USDHC3_DATA0 0x400013fe /* SDIO_B_D0 */ + MX93_PAD_SD3_DATA1__USDHC3_DATA1 0x400013fe /* SDIO_B_D1 */ + MX93_PAD_SD3_DATA2__USDHC3_DATA2 0x400013fe /* SDIO_B_D2 */ + MX93_PAD_SD3_DATA3__USDHC3_DATA3 0x400013fe /* SDIO_B_D3 */ + >; + }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0xc6 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts index 852dd3d2eac74d..599df32976e245 100644 --- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts +++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts @@ -26,6 +26,8 @@ chosen { aliases { eeprom0 = &eeprom0; + ethernet0 = &fec; + ethernet1 = &eqos; rtc0 = &pcf85063; rtc1 = &bbnsm_rtc; }; @@ -274,6 +276,16 @@ &flexcan2 { }; &gpio1 { + gpio-line-names = + /* 00 */ "", "", "USB_C_ALERT#", "PMIC_IRQ#", + /* 04 */ "", "", "", "", + /* 08 */ "", "", "", "BM2_TEMP_EVENT_MOD#", + /* 12 */ "PEX_INT#", "", "RTC_EVENT#", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "", "", + /* 28 */ "", "", "", ""; + expander-irq-hog { gpio-hog; gpios = <12 GPIO_ACTIVE_LOW>; @@ -289,6 +301,45 @@ tcpc-irq-hog { }; }; +&gpio2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio2>; + + gpio-line-names = + /* 00 */ "SPI6_PCS0#", "", "", "", + /* 04 */ "", "", "", "", + /* 08 */ "", "FAN_RPM", "MIPI_CSI_TRIGGER", "MIPI_CSI_SYNC", + /* 12 */ "", "", "", "", + /* 16 */ "X1_11", "X1_21", "X1_17", "X1_13", + /* 20 */ "X1_15", "X1_9", "", "", + /* 24 */ "", "", "X1_7", "", + /* 28 */ "", "", "", ""; +}; + +&gpio3 { + gpio-line-names = + /* 00 */ "SD2_CD#", "", "", "", + /* 04 */ "", "", "", "SD2_RST#", + /* 08 */ "", "", "", "", + /* 12 */ "", "", "", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "ENET1_INT#", "ENET2_INT#", + /* 28 */ "", "", "", ""; +}; + +&gpio4 { + gpio-line-names = + /* 00 */ "", "", "", "", + /* 04 */ "", "", "", "", + /* 08 */ "", "", "", "", + /* 12 */ "", "", "", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "", "", + /* 28 */ "", "DP_INT", "", ""; +}; + &lpi2c3 { #address-cells = <1>; #size-cells = <0>; @@ -495,6 +546,22 @@ &lpuart8 { status = "okay"; }; +&pcf85063 { + /* RTC_EVENT# from SoM is connected on mainboard */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcf85063>; + interrupt-parent = <&gpio1>; + interrupts = <14 IRQ_TYPE_EDGE_FALLING>; +}; + +&se97_som { + /* TEMP_EVENT# from SoM is connected on mainboard */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_temp_sensor_som>; + interrupt-parent = <&gpio1>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; +}; + &tpm5 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_tpm5>; @@ -533,7 +600,7 @@ &usbotg2 { samsung,picophy-dc-vol-level-adjust = <7>; status = "okay"; - hub_2_0: hub@1 { + hub_2_0: usb-hub@1 { compatible = "usb424,2517"; reg = <1>; reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>; @@ -559,22 +626,23 @@ &iomuxc { pinctrl_eqos: eqosgrp { fsl,pins = < /* PD | FSEL_2 | DSE X4 */ - MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e - MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000051e - /* PD | FSEL_2 | DSE X6 */ - MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e - MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e - MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e - MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e - /* PD | FSEL_3 | DSE X6 */ - MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe - MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e + MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e + /* SION | HYS | FSEL_2 | DSE X4 */ + MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000 + MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000 + MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x1000 + MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x1000 + MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x1000 + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x1400 /* PD | FSEL_2 | DSE X4 */ - MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e - MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e - MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e - MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e - MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e + MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e + MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e + MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e + MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e + MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e /* PD | FSEL_3 | DSE X3 */ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e >; @@ -582,7 +650,8 @@ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e pinctrl_eqos_phy: eqosphygrp { fsl,pins = < - MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1306 + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1000 >; }; @@ -590,15 +659,16 @@ pinctrl_fec: fecgrp { fsl,pins = < /* PD | FSEL_2 | DSE X4 */ MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e - MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000051e - /* PD | FSEL_2 | DSE X6 */ - MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e - MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e - MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e - MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e - /* PD | FSEL_3 | DSE X6 */ - MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe - MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e + /* SION | HYS | FSEL_2 | DSE X4 */ + MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000 + MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000 + MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x1000 + MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x1000 + MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x1000 + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x1400 /* PD | FSEL_2 | DSE X4 */ MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x51e MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x51e @@ -612,147 +682,224 @@ MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x58e pinctrl_fec_phy: fecphygrp { fsl,pins = < - MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1306 + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1000 >; }; pinctrl_flexcan1: flexcan1grp { fsl,pins = < - MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e - MX93_PAD_PDM_CLK__CAN1_TX 0x139e + /* HYS | PU | FSEL_0 | DSE no drive */ + MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x1200 + /* PU | FSEL_3 | DSE X4 */ + MX93_PAD_PDM_CLK__CAN1_TX 0x039e >; }; pinctrl_flexcan2: flexcan2grp { fsl,pins = < - MX93_PAD_GPIO_IO25__CAN2_TX 0x139e - MX93_PAD_GPIO_IO27__CAN2_RX 0x139e + /* HYS | PU | FSEL_0 | DSE no drive */ + MX93_PAD_GPIO_IO27__CAN2_RX 0x1200 + /* PU | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO25__CAN2_TX 0x039e + >; + }; + + pinctrl_gpio2: gpio2grp { + fsl,pins = < + /* HYS | PD | FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO16__GPIO2_IO16 0x151e + MX93_PAD_GPIO_IO17__GPIO2_IO17 0x151e + MX93_PAD_GPIO_IO18__GPIO2_IO18 0x151e + MX93_PAD_GPIO_IO19__GPIO2_IO19 0x151e + MX93_PAD_GPIO_IO20__GPIO2_IO20 0x151e + MX93_PAD_GPIO_IO21__GPIO2_IO21 0x151e + MX93_PAD_GPIO_IO26__GPIO2_IO26 0x151e + >; + }; + + pinctrl_jtag: jtaggrp { + fsl,pins = < + MX93_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x051e + MX93_PAD_DAP_TDI__JTAG_MUX_TDI 0x1200 + MX93_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x031e + MX93_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x1200 >; }; pinctrl_lpi2c3: lpi2c3grp { fsl,pins = < - MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e - MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e + /* SION | HYS | OD | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x4000199e + MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x4000199e >; }; pinctrl_lpi2c5: lpi2c5grp { fsl,pins = < - MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x40000b9e - MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x40000b9e + /* SION | HYS | OD | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x4000199e + MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x4000199e >; }; pinctrl_lpspi6: lpspi6grp { fsl,pins = < - MX93_PAD_GPIO_IO00__LPSPI6_PCS0 0x3fe - MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x3fe - MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x3fe - MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x3fe + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO00__LPSPI6_PCS0 0x011e + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x1400 + /* PD | FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x051e + MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x051e + >; + }; + + pinctrl_pcf85063: pcf85063grp { + fsl,pins = < + MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000 + >; + }; + + pinctrl_mipi_csi: mipicsigrp { + fsl,pins = < + MX93_PAD_CCM_CLKO3__CCMSRCGPCMIX_CLKO3 0x051e /* MCLK */ + MX93_PAD_GPIO_IO10__GPIO2_IO10 0x051e /* TRIGGER */ + MX93_PAD_GPIO_IO11__GPIO2_IO11 0x1400 /* SYNC */ >; }; pinctrl_pexp_irq: pexpirqgrp { fsl,pins = < - MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1306 + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1000 >; }; pinctrl_pwmfan: pwmfangrp { fsl,pins = < - MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1306 + /* HYS | PU | FSEL_0 | no DSE */ + MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1200 + >; + }; + + pinctrl_temp_sensor_som: tempsensorsomgrp { + fsl,pins = < + /* HYS | FSEL_0 | no DSE */ + MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000 + >; + }; + + pinctrl_tc9595: tc9595-grp { + fsl,pins = < + /* HYS | PD | FSEL_0 | no DSE */ + MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400 >; }; pinctrl_tpm5: tpm5grp { fsl,pins = < - MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e + MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e >; }; pinctrl_tpm6: tpm6grp { fsl,pins = < - MX93_PAD_GPIO_IO08__TPM6_CH0 0x57e + MX93_PAD_GPIO_IO08__TPM6_CH0 0x57e >; }; pinctrl_typec: typecgrp { fsl,pins = < - MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1306 + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1000 >; }; pinctrl_uart1: uart1grp { fsl,pins = < - MX93_PAD_UART1_RXD__LPUART1_RX 0x31e - MX93_PAD_UART1_TXD__LPUART1_TX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_UART1_RXD__LPUART1_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_UART1_TXD__LPUART1_TX 0x011e >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX93_PAD_UART2_TXD__LPUART2_TX 0x31e - MX93_PAD_UART2_RXD__LPUART2_RX 0x31e - MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_UART2_RXD__LPUART2_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_UART2_TXD__LPUART2_TX 0x011e + /* FSEL_2 | DSE X4 */ + MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x011e >; }; pinctrl_uart3: uart3grp { fsl,pins = < - MX93_PAD_GPIO_IO14__LPUART3_TX 0x31e - MX93_PAD_GPIO_IO15__LPUART3_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO15__LPUART3_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO14__LPUART3_TX 0x011e >; }; pinctrl_uart6: uart6grp { fsl,pins = < - MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e - MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO05__LPUART6_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO04__LPUART6_TX 0x011e >; }; pinctrl_uart8: uart8grp { fsl,pins = < - MX93_PAD_GPIO_IO12__LPUART8_TX 0x31e - MX93_PAD_GPIO_IO13__LPUART8_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO13__LPUART8_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO12__LPUART8_TX 0x011e >; }; pinctrl_usdhc2_gpio: usdhc2gpiogrp { fsl,pins = < - MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_SD2_CD_B__GPIO3_IO00 0x1000 >; }; + /* enable SION for data and cmd pad due to ERR052021 */ pinctrl_usdhc2_hs: usdhc2hsgrp { fsl,pins = < - /* HYS | PD | PU | FSEL_3 | DSE X5 */ - MX93_PAD_SD2_CLK__USDHC2_CLK 0x17be - /* HYS | PD | PU | FSEL_3 | DSE X4 */ - MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e - /* HYS | PD | PU | FSEL_3 | DSE X3 */ - MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e - MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e - MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x138e - MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x138e - /* PD | PU | FSEL_2 | DSE X3 */ - MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e + /* PD | FSEL_3 | DSE X5 */ + MX93_PAD_SD2_CLK__USDHC2_CLK 0x05be + /* HYS | PU | FSEL_3 | DSE X4 */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e + /* HYS | PU | FSEL_3 | DSE X3 */ + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e + /* FSEL_2 | DSE X3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e >; }; + /* enable SION for data and cmd pad due to ERR052021 */ pinctrl_usdhc2_uhs: usdhc2uhsgrp { fsl,pins = < - /* HYS | PD | PU | FSEL_3 | DSE X6 */ - MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe - /* HYS | PD | PU | FSEL_3 | DSE X4 */ - MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e - MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x139e - MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x139e - MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e - MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e - /* PD | PU | FSEL_2 | DSE X3 */ - MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e + /* PD | FSEL_3 | DSE X6 */ + MX93_PAD_SD2_CLK__USDHC2_CLK 0x05fe + /* HYS | PU | FSEL_3 | DSE X4 */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e + /* FSEL_2 | DSE X3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e >; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts index e2ee9f5a042cb1..0b4b3bb866d064 100644 --- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts +++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts @@ -26,6 +26,8 @@ chosen { aliases { eeprom0 = &eeprom0; + ethernet0 = &fec; + ethernet1 = &eqos; rtc0 = &pcf85063; rtc1 = &bbnsm_rtc; }; @@ -207,6 +209,16 @@ &flexcan2 { }; &gpio1 { + gpio-line-names = + /* 00 */ "", "", "USB_C_ALERT#", "PMIC_IRQ#", + /* 04 */ "", "", "", "", + /* 08 */ "", "", "", "BM2_TEMP_EVENT_MOD#", + /* 12 */ "PEX_INT#", "", "RTC_EVENT#", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "", "", + /* 28 */ "", "", "", ""; + expander-irq-hog { gpio-hog; gpios = <12 GPIO_ACTIVE_LOW>; @@ -222,19 +234,63 @@ rtc-irq-hog { }; }; +&gpio2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio2>; + + gpio-line-names = + /* 00 */ "", "", "", "", + /* 04 */ "", "", "", "AFE_RESET#", + /* 08 */ "AFE_SYNC", "AFE_DRDY", "MIPI_CSI_TRIGGER", "MIPI_CSI_SYNC", + /* 12 */ "", "", "", "", + /* 16 */ "X1_19", "X1_29", "X1_25", "X1_21", + /* 20 */ "X1_23", "X1_17", "", "", + /* 24 */ "AFE_INT#", "", "X1_15", "", + /* 28 */ "", "", "", ""; +}; + &gpio3 { + gpio-line-names = + /* 00 */ "SD2_CD#", "", "", "", + /* 04 */ "", "", "", "SD2_RST#", + /* 08 */ "", "", "", "", + /* 12 */ "", "", "", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "ENET1_INT#", "ENET2_INT#", + /* 28 */ "", "", "", ""; + ethphy-eqos-irq-hog { gpio-hog; gpios = <26 GPIO_ACTIVE_LOW>; input; - line-name = "ENET0_IRQ#"; + line-name = "ENET1_INT#"; }; ethphy-fec-irq-hog { gpio-hog; gpios = <27 GPIO_ACTIVE_LOW>; input; - line-name = "ENET1_IRQ#"; + line-name = "ENET2_INT#"; + }; +}; + +&gpio4 { + gpio-line-names = + /* 00 */ "", "", "", "", + /* 04 */ "", "", "", "", + /* 08 */ "", "", "", "", + /* 12 */ "", "", "", "", + /* 16 */ "", "", "", "", + /* 20 */ "", "", "", "", + /* 24 */ "", "", "", "", + /* 28 */ "", "DP_INT", "", ""; + + dp-int-hog { + gpio-hog; + gpios = <29 GPIO_ACTIVE_LOW>; + input; + line-name = "DP_INT"; }; }; @@ -371,7 +427,7 @@ expander2: gpio@72 { #gpio-cells = <2>; vcc-supply = <®_3v3>; gpio-line-names = "LCD_RESET#", "LCD_PWR_EN", - "LCD_BL_EN", "DP_EN", + "LCD_BLT_EN", "DP_EN", "MIPI_CSI_EN", "MIPI_CSI_RST#", "USER_LED1", "USER_LED2"; }; @@ -414,6 +470,13 @@ dp_dsi_in: endpoint { }; }; +&lpspi6 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpspi6>, <&pinctrl_lpspi6_cs>; + cs-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + &lpuart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; @@ -447,13 +510,21 @@ &lpuart8 { }; &pcf85063 { - /* RTC_EVENT# is connected on MBa93xxLA */ + /* RTC_EVENT# from SoM is connected on mainboard */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcf85063>; interrupt-parent = <&gpio1>; interrupts = <14 IRQ_TYPE_EDGE_FALLING>; }; +&se97_som { + /* TEMP_EVENT# from SoM is connected on mainboard */ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_temp_sensor_som>; + interrupt-parent = <&gpio1>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; +}; + &tpm5 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_tpm5>; @@ -486,7 +557,7 @@ &usbotg2 { samsung,picophy-dc-vol-level-adjust = <7>; status = "okay"; - hub_2_0: hub@1 { + hub_2_0: usb-hub@1 { compatible = "usb424,2517"; reg = <1>; reset-gpios = <&expander1 2 GPIO_ACTIVE_LOW>; @@ -509,25 +580,39 @@ &usdhc2 { }; &iomuxc { + pinctrl_afe: afegrp { + fsl,pins = < + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO07__GPIO2_IO07 0x011e + /* PD | FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO08__GPIO2_IO08 0x051e + /* HYS | PD */ + MX93_PAD_GPIO_IO09__GPIO2_IO09 0x1400 + /* HYS */ + MX93_PAD_GPIO_IO24__GPIO2_IO24 0x1000 + >; + }; + pinctrl_eqos: eqosgrp { fsl,pins = < /* PD | FSEL_2 | DSE X4 */ - MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e - MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000051e - /* PD | FSEL_2 | DSE X6 */ - MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x57e - MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x57e - MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x57e - MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x57e - /* PD | FSEL_3 | DSE X6 */ - MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x5fe - MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x57e + MX93_PAD_ENET1_MDC__ENET_QOS_MDC 0x51e + /* SION | HYS | FSEL_2 | DSE X4 */ + MX93_PAD_ENET1_MDIO__ENET_QOS_MDIO 0x4000111e + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_ENET1_RD0__ENET_QOS_RGMII_RD0 0x1000 + MX93_PAD_ENET1_RD1__ENET_QOS_RGMII_RD1 0x1000 + MX93_PAD_ENET1_RD2__ENET_QOS_RGMII_RD2 0x1000 + MX93_PAD_ENET1_RD3__ENET_QOS_RGMII_RD3 0x1000 + MX93_PAD_ENET1_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x1000 + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_ENET1_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x1400 /* PD | FSEL_2 | DSE X4 */ - MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e - MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e - MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e - MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e - MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e + MX93_PAD_ENET1_TD0__ENET_QOS_RGMII_TD0 0x51e + MX93_PAD_ENET1_TD1__ENET_QOS_RGMII_TD1 0x51e + MX93_PAD_ENET1_TD2__ENET_QOS_RGMII_TD2 0x51e + MX93_PAD_ENET1_TD3__ENET_QOS_RGMII_TD3 0x51e + MX93_PAD_ENET1_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x51e /* PD | FSEL_3 | DSE X3 */ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e >; @@ -535,7 +620,8 @@ MX93_PAD_ENET1_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x58e pinctrl_eqos_phy: eqosphygrp { fsl,pins = < - MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1306 + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_CCM_CLKO1__GPIO3_IO26 0x1000 >; }; @@ -543,15 +629,16 @@ pinctrl_fec: fecgrp { fsl,pins = < /* PD | FSEL_2 | DSE X4 */ MX93_PAD_ENET2_MDC__ENET1_MDC 0x51e - MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000051e - /* PD | FSEL_2 | DSE X6 */ - MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x57e - MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x57e - MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x57e - MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x57e - /* PD | FSEL_3 | DSE X6 */ - MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x5fe - MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x57e + /* SION | HYS | FSEL_2 | DSE X4 */ + MX93_PAD_ENET2_MDIO__ENET1_MDIO 0x4000111e + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_ENET2_RD0__ENET1_RGMII_RD0 0x1000 + MX93_PAD_ENET2_RD1__ENET1_RGMII_RD1 0x1000 + MX93_PAD_ENET2_RD2__ENET1_RGMII_RD2 0x1000 + MX93_PAD_ENET2_RD3__ENET1_RGMII_RD3 0x1000 + MX93_PAD_ENET2_RX_CTL__ENET1_RGMII_RX_CTL 0x1000 + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_ENET2_RXC__ENET1_RGMII_RXC 0x1400 /* PD | FSEL_2 | DSE X4 */ MX93_PAD_ENET2_TD0__ENET1_RGMII_TD0 0x51e MX93_PAD_ENET2_TD1__ENET1_RGMII_TD1 0x51e @@ -565,139 +652,216 @@ MX93_PAD_ENET2_TXC__ENET1_RGMII_TXC 0x58e pinctrl_fec_phy: fecphygrp { fsl,pins = < - MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1306 + /* HYS | FSEL_0 | DSE no drive */ + MX93_PAD_CCM_CLKO2__GPIO3_IO27 0x1000 >; }; pinctrl_flexcan1: flexcan1grp { fsl,pins = < - MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x139e - MX93_PAD_PDM_CLK__CAN1_TX 0x139e + /* HYS | PU | FSEL_0 | DSE no drive */ + MX93_PAD_PDM_BIT_STREAM0__CAN1_RX 0x1200 + /* PU | FSEL_3 | DSE X4 */ + MX93_PAD_PDM_CLK__CAN1_TX 0x039e >; }; pinctrl_flexcan2: flexcan2grp { fsl,pins = < - MX93_PAD_GPIO_IO25__CAN2_TX 0x139e - MX93_PAD_GPIO_IO27__CAN2_RX 0x139e + /* HYS | PU | FSEL_0 | DSE no drive */ + MX93_PAD_GPIO_IO27__CAN2_RX 0x1200 + /* PU | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO25__CAN2_TX 0x039e + >; + }; + + pinctrl_gpio2: gpio2grp { + fsl,pins = < + /* HYS | PD | FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO16__GPIO2_IO16 0x151e + MX93_PAD_GPIO_IO17__GPIO2_IO17 0x151e + MX93_PAD_GPIO_IO18__GPIO2_IO18 0x151e + MX93_PAD_GPIO_IO19__GPIO2_IO19 0x151e + MX93_PAD_GPIO_IO20__GPIO2_IO20 0x151e + MX93_PAD_GPIO_IO21__GPIO2_IO21 0x151e + MX93_PAD_GPIO_IO26__GPIO2_IO26 0x151e + >; + }; + + pinctrl_jtag: jtaggrp { + fsl,pins = < + MX93_PAD_DAP_TCLK_SWCLK__JTAG_MUX_TCK 0x051e + MX93_PAD_DAP_TDI__JTAG_MUX_TDI 0x1200 + MX93_PAD_DAP_TDO_TRACESWO__JTAG_MUX_TDO 0x031e + MX93_PAD_DAP_TMS_SWDIO__JTAG_MUX_TMS 0x1200 >; }; pinctrl_lpi2c3: lpi2c3grp { fsl,pins = < - MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e - MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e + /* SION | HYS | OD | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO28__LPI2C3_SDA 0x4000199e + MX93_PAD_GPIO_IO29__LPI2C3_SCL 0x4000199e >; }; pinctrl_lpi2c5: lpi2c5grp { fsl,pins = < - MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x40000b9e - MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x40000b9e + /* SION | HYS | OD | FSEL_3 | DSE X4 */ + MX93_PAD_GPIO_IO22__LPI2C5_SDA 0x4000199e + MX93_PAD_GPIO_IO23__LPI2C5_SCL 0x4000199e + >; + }; + + pinctrl_lpspi6: lpspi6grp { + fsl,pins = < + /* HYS | PD | FSEL_0 | DSE no drive */ + MX93_PAD_GPIO_IO01__LPSPI6_SIN 0x1400 + /* PD | FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO02__LPSPI6_SOUT 0x051e + MX93_PAD_GPIO_IO03__LPSPI6_SCK 0x051e + >; + }; + + pinctrl_lpspi6_cs: lpspi6csgrp { + fsl,pins = < + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO00__GPIO2_IO00 0x011e + >; + }; + + pinctrl_mipi_csi: mipicsigrp { + fsl,pins = < + MX93_PAD_CCM_CLKO3__CCMSRCGPCMIX_CLKO3 0x051e /* MCLK */ + MX93_PAD_GPIO_IO10__GPIO2_IO10 0x051e /* TRIGGER */ + MX93_PAD_GPIO_IO11__GPIO2_IO11 0x1400 /* SYNC */ >; }; pinctrl_pcf85063: pcf85063grp { fsl,pins = < - MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1306 + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000 >; }; pinctrl_pexp_irq: pexpirqgrp { fsl,pins = < - MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1306 + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_SAI1_TXC__GPIO1_IO12 0x1000 >; }; pinctrl_tc9595: tc9595-grp { fsl,pins = < - /* DP_IRQ */ - MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1306 + /* HYS | PD | FSEL_0 | no DSE */ + MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400 + >; + }; + + pinctrl_temp_sensor_som: tempsensorsomgrp { + fsl,pins = < + /* HYS | FSEL_0 | no DSE */ + MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000 >; }; pinctrl_tpm5: tpm5grp { fsl,pins = < - MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e + MX93_PAD_GPIO_IO06__TPM5_CH0 0x57e >; }; pinctrl_typec: typecgrp { fsl,pins = < - MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1306 + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_I2C2_SCL__GPIO1_IO02 0x1000 >; }; pinctrl_uart1: uart1grp { fsl,pins = < - MX93_PAD_UART1_RXD__LPUART1_RX 0x31e - MX93_PAD_UART1_TXD__LPUART1_TX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_UART1_RXD__LPUART1_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_UART1_TXD__LPUART1_TX 0x011e >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX93_PAD_UART2_TXD__LPUART2_TX 0x31e - MX93_PAD_UART2_RXD__LPUART2_RX 0x31e - MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_UART2_RXD__LPUART2_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_UART2_TXD__LPUART2_TX 0x011e + MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x011e >; }; pinctrl_uart3: uart3grp { fsl,pins = < - MX93_PAD_GPIO_IO14__LPUART3_TX 0x31e - MX93_PAD_GPIO_IO15__LPUART3_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO15__LPUART3_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO14__LPUART3_TX 0x011e >; }; pinctrl_uart6: uart6grp { fsl,pins = < - MX93_PAD_GPIO_IO04__LPUART6_TX 0x31e - MX93_PAD_GPIO_IO05__LPUART6_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO05__LPUART6_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO04__LPUART6_TX 0x011e >; }; pinctrl_uart8: uart8grp { fsl,pins = < - MX93_PAD_GPIO_IO12__LPUART8_TX 0x31e - MX93_PAD_GPIO_IO13__LPUART8_RX 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_GPIO_IO13__LPUART8_RX 0x1000 + /* FSEL_2 | DSE X4 */ + MX93_PAD_GPIO_IO12__LPUART8_TX 0x011e >; }; pinctrl_usdhc2_gpio: usdhc2gpiogrp { fsl,pins = < - MX93_PAD_SD2_CD_B__GPIO3_IO00 0x31e + /* HYS | FSEL_0 | No DSE */ + MX93_PAD_SD2_CD_B__GPIO3_IO00 0x1000 >; }; + /* enable SION for data and cmd pad due to ERR052021 */ pinctrl_usdhc2_hs: usdhc2hsgrp { fsl,pins = < - /* HYS | PD | PU | FSEL_3 | DSE X5 */ - MX93_PAD_SD2_CLK__USDHC2_CLK 0x17be - /* HYS | PD | PU | FSEL_3 | DSE X4 */ - MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e - /* HYS | PD | PU | FSEL_3 | DSE X3 */ - MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x138e - MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x138e - MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x138e - MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x138e - /* PD | PU | FSEL_2 | DSE X3 */ - MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e + /* PD | FSEL_3 | DSE X5 */ + MX93_PAD_SD2_CLK__USDHC2_CLK 0x05be + /* HYS | PU | FSEL_3 | DSE X4 */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e + /* HYS | PU | FSEL_3 | DSE X3 */ + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000138e + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e + /* FSEL_2 | DSE X3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e >; }; + /* enable SION for data and cmd pad due to ERR052021 */ pinctrl_usdhc2_uhs: usdhc2uhsgrp { fsl,pins = < - /* HYS | PD | PU | FSEL_3 | DSE X6 */ - MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe - /* HYS | PD | PU | FSEL_3 | DSE X4 */ - MX93_PAD_SD2_CMD__USDHC2_CMD 0x139e - MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x139e - MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x139e - MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x139e - MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x139e - /* PD | PU | FSEL_2 | DSE X3 */ - MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x50e + /* PD | FSEL_3 | DSE X6 */ + MX93_PAD_SD2_CLK__USDHC2_CLK 0x05fe + /* HYS | PU | FSEL_3 | DSE X4 */ + MX93_PAD_SD2_CMD__USDHC2_CMD 0x4000139e + MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x4000139e + MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000139e + MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000139e + MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000139e + /* FSEL_2 | DSE X3 */ + MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x010e >; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi index 72a9a5d4e27a3c..2cabdae2422739 100644 --- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi @@ -25,20 +25,6 @@ linux,cma { }; }; - reg_v1v8: regulator-v1v8 { - compatible = "regulator-fixed"; - regulator-name = "V_1V8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - }; - - reg_v3v3: regulator-v3v3 { - compatible = "regulator-fixed"; - regulator-name = "V_3V3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - }; - /* SD2 RST# via PMIC SW_EN */ reg_usdhc2_vmmc: regulator-usdhc2 { compatible = "regulator-fixed"; @@ -47,14 +33,14 @@ reg_usdhc2_vmmc: regulator-usdhc2 { regulator-name = "VSD_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - vin-supply = <®_v3v3>; + vin-supply = <&buck4>; gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>; enable-active-high; }; }; &adc1 { - vref-supply = <®_v1v8>; + vref-supply = <&buck5>; }; &flexspi1 { @@ -105,6 +91,91 @@ se97_som: temperature-sensor@1b { reg = <0x1b>; }; + pca9451a: pmic@25 { + compatible = "nxp,pca9451a"; + reg = <0x25>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pca9451>; + interrupt-parent = <&gpio1>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + + regulators { + /* V_0V8_SOC - hw developer guide: 0.75 .. 0.9 */ + buck1: BUCK1 { + regulator-name = "BUCK1"; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <900000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + }; + + /* V_DDRQ - 1.1 LPDDR4 or 0.6 LPDDR4X */ + buck2: BUCK2 { + regulator-name = "BUCK2"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1100000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <3125>; + }; + + /* V_3V3 - EEPROM, RTC, ... */ + buck4: BUCK4 { + regulator-name = "BUCK4"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + /* V_1V8 - SPI NOR, eMMC, RAM VDD1... */ + buck5: BUCK5 { + regulator-name = "BUCK5"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + /* V_1V1 - RAM VDD2*/ + buck6: BUCK6 { + regulator-name = "BUCK6"; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + regulator-boot-on; + regulator-always-on; + }; + + /* V_1V8_BBSM, fix 1.8 */ + ldo1: LDO1 { + regulator-name = "LDO1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + /* V_0V8_ANA */ + ldo4: LDO4 { + regulator-name = "LDO4"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + regulator-boot-on; + regulator-always-on; + }; + + /* V_SD2 - 3.3/1.8V USDHC2 io Voltage */ + ldo5: LDO5 { + regulator-name = "LDO5"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; + pcf85063: rtc@51 { compatible = "nxp,pcf85063a"; reg = <0x51>; @@ -116,28 +187,28 @@ eeprom0: eeprom@53 { reg = <0x53>; pagesize = <16>; read-only; - vcc-supply = <®_v3v3>; + vcc-supply = <&buck4>; }; eeprom1: eeprom@57 { compatible = "atmel,24c64"; reg = <0x57>; pagesize = <32>; - vcc-supply = <®_v3v3>; + vcc-supply = <&buck4>; }; /* protectable identification memory (part of M24C64-D @57) */ eeprom@5f { compatible = "atmel,24c64d-wl"; reg = <0x5f>; - vcc-supply = <®_v3v3>; + vcc-supply = <&buck4>; }; imu@6a { compatible = "st,ism330dhcx"; reg = <0x6a>; - vdd-supply = <®_v3v3>; - vddio-supply = <®_v3v3>; + vdd-supply = <&buck4>; + vddio-supply = <&buck4>; }; }; @@ -146,6 +217,8 @@ &usdhc1 { pinctrl-0 = <&pinctrl_usdhc1>; pinctrl-1 = <&pinctrl_usdhc1>; pinctrl-2 = <&pinctrl_usdhc1>; + vmmc-supply = <&buck4>; + vqmmc-supply = <&buck5>; bus-width = <8>; non-removable; no-sdio; @@ -163,55 +236,64 @@ &wdog3 { &iomuxc { pinctrl_flexspi1: flexspi1grp { fsl,pins = < - MX93_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x3fe - MX93_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x3fe - MX93_PAD_SD3_DATA0__FLEXSPI1_A_DATA00 0x3fe - MX93_PAD_SD3_DATA1__FLEXSPI1_A_DATA01 0x3fe - MX93_PAD_SD3_DATA2__FLEXSPI1_A_DATA02 0x3fe - MX93_PAD_SD3_DATA3__FLEXSPI1_A_DATA03 0x3fe + /* FSEL 3 | DSE X6 */ + MX93_PAD_SD3_CMD__FLEXSPI1_A_SS0_B 0x01fe + MX93_PAD_SD3_CLK__FLEXSPI1_A_SCLK 0x01fe + /* HYS | PU | FSEL 3 | DSE X6 */ + MX93_PAD_SD3_DATA0__FLEXSPI1_A_DATA00 0x13fe + MX93_PAD_SD3_DATA1__FLEXSPI1_A_DATA01 0x13fe + /* HYS | FSEL 3 | DSE X6 (external PU) */ + MX93_PAD_SD3_DATA2__FLEXSPI1_A_DATA02 0x11fe + MX93_PAD_SD3_DATA3__FLEXSPI1_A_DATA03 0x11fe >; }; pinctrl_lpi2c1: lpi2c1grp { fsl,pins = < - MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x40000b9e - MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x40000b9e + /* SION | OD | FSEL 3 | DSE X4 */ + MX93_PAD_I2C1_SCL__LPI2C1_SCL 0x4000199e + MX93_PAD_I2C1_SDA__LPI2C1_SDA 0x4000199e >; }; pinctrl_pca9451: pca9451grp { fsl,pins = < - MX93_PAD_I2C2_SDA__GPIO1_IO03 0x1306 + /* HYS | PU */ + MX93_PAD_I2C2_SDA__GPIO1_IO03 0x1200 >; }; pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { fsl,pins = < - MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x1306 + /* FSEL 2 | DSE X2 */ + MX93_PAD_SD2_RESET_B__GPIO3_IO07 0x106 >; }; + /* enable SION for data and cmd pad due to ERR052021 */ pinctrl_usdhc1: usdhc1grp { fsl,pins = < - /* HYS | PU | PD | FSEL_3 | X5 */ - MX93_PAD_SD1_CLK__USDHC1_CLK 0x17be - MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17be - /* HYS | PU | FSEL_3 | X5 */ - MX93_PAD_SD1_CMD__USDHC1_CMD 0x13be - /* HYS | PU | FSEL_3 | X4 */ - MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x139e - MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x139e - MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x139e - MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x139e - MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x139e - MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x139e - MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x139e - MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x139e + /* PD | FSEL 3 | DSE X5 */ + MX93_PAD_SD1_CLK__USDHC1_CLK 0x5be + /* HYS | FSEL 0 | no drive */ + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1000 + /* HYS | FSEL 3 | X5 */ + MX93_PAD_SD1_CMD__USDHC1_CMD 0x400011be + /* HYS | FSEL 3 | X4 */ + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000119e + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000119e + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000119e + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000119e + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000119e + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000119e + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000119e + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000119e >; }; pinctrl_wdog: wdoggrp { fsl,pins = < + /* PU | FSEL 1 | DSE X4 */ MX93_PAD_WDOG_ANY__WDOG1_WDOG_ANY 0x31e >; }; diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi index a0993022c102da..04b9b3d31f4faf 100644 --- a/arch/arm64/boot/dts/freescale/imx93.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93.dtsi @@ -69,6 +69,13 @@ A55_0: cpu@0 { enable-method = "psci"; #cooling-cells = <2>; cpu-idle-states = <&cpu_pd_wait>; + i-cache-size = <32768>; + i-cache-line-size = <64>; + i-cache-sets = <128>; + d-cache-size = <32768>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l0>; }; A55_1: cpu@100 { @@ -78,8 +85,43 @@ A55_1: cpu@100 { enable-method = "psci"; #cooling-cells = <2>; cpu-idle-states = <&cpu_pd_wait>; + i-cache-size = <32768>; + i-cache-line-size = <64>; + i-cache-sets = <128>; + d-cache-size = <32768>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l1>; }; + l2_cache_l0: l2-cache-l0 { + compatible = "cache"; + cache-size = <65536>; + cache-line-size = <64>; + cache-sets = <256>; + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + + l2_cache_l1: l2-cache-l1 { + compatible = "cache"; + cache-size = <65536>; + cache-line-size = <64>; + cache-sets = <256>; + cache-level = <2>; + cache-unified; + next-level-cache = <&l3_cache>; + }; + + l3_cache: l3-cache { + compatible = "cache"; + cache-size = <262144>; + cache-line-size = <64>; + cache-sets = <256>; + cache-level = <3>; + cache-unified; + }; }; osc_32k: clock-osc-32k { @@ -425,6 +467,7 @@ sai1: sai@443b0000 { clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&edma1 22 0 FSL_EDMA_RX>, <&edma1 21 0 0>; dma-names = "rx", "tx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -524,6 +567,7 @@ micfil: micfil@44520000 { clock-names = "ipg_clk", "ipg_clk_app", "pll8k"; dmas = <&edma1 29 0 5>; dma-names = "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -846,6 +890,7 @@ sai2: sai@42650000 { clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&edma2 59 0 FSL_EDMA_RX>, <&edma2 58 0 0>; dma-names = "rx", "tx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -859,6 +904,7 @@ sai3: sai@42660000 { clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&edma2 61 0 FSL_EDMA_RX>, <&edma2 60 0 0>; dma-names = "rx", "tx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -878,6 +924,7 @@ xcvr: xcvr@42680000 { clock-names = "ipg", "phy", "spba", "pll_ipg"; dmas = <&edma2 65 0 FSL_EDMA_RX>, <&edma2 66 0 0>; dma-names = "rx", "tx"; + #sound-dai-cells = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts index d14a54ab4fd473..37a1d4ca1b2079 100644 --- a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts @@ -5,6 +5,7 @@ /dts-v1/; +#include #include "imx95.dtsi" / { @@ -17,6 +18,11 @@ aliases { serial0 = &lpuart1; }; + bt_sco_codec: audio-codec-bt-sco { + #sound-dai-cells = <1>; + compatible = "linux,bt-sco"; + }; + chosen { stdout-path = &lpuart1; }; @@ -26,6 +32,13 @@ memory@80000000 { reg = <0x0 0x80000000 0 0x80000000>; }; + fan0: pwm-fan { + compatible = "pwm-fan"; + #cooling-cells = <2>; + pwms = <&tpm6 2 4000000 PWM_POLARITY_INVERTED>; + cooling-levels = <64 128 192 255>; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; @@ -40,6 +53,34 @@ linux_cma: linux,cma { }; }; + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "+V3.3_SW"; + }; + + reg_audio_pwr: regulator-audio-pwr { + compatible = "regulator-fixed"; + regulator-name = "audio-pwr"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&i2c4_gpio_expander_21 1 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + }; + + reg_audio_slot: regulator-audio-slot { + compatible = "regulator-fixed"; + regulator-name = "audio-wm8962"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&i2c4_gpio_expander_21 7 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + status = "disabled"; + }; + reg_m2_pwr: regulator-m2-pwr { compatible = "regulator-fixed"; regulator-name = "M.2-power"; @@ -79,6 +120,116 @@ reg_usdhc2_vmmc: regulator-usdhc2 { enable-active-high; off-on-delay-us = <12000>; }; + + sound-bt-sco { + compatible = "simple-audio-card"; + simple-audio-card,name = "bt-sco-audio"; + simple-audio-card,format = "dsp_a"; + simple-audio-card,bitclock-inversion; + simple-audio-card,frame-master = <&btcpu>; + simple-audio-card,bitclock-master = <&btcpu>; + + btcpu: simple-audio-card,cpu { + sound-dai = <&sai1>; + dai-tdm-slot-num = <2>; + dai-tdm-slot-width = <16>; + }; + + simple-audio-card,codec { + sound-dai = <&bt_sco_codec 1>; + }; + }; + + sound-micfil { + compatible = "fsl,imx-audio-card"; + model = "micfil-audio"; + + pri-dai-link { + link-name = "micfil hifi"; + format = "i2s"; + cpu { + sound-dai = <&micfil>; + }; + }; + }; + + sound-wm8962 { + compatible = "fsl,imx-audio-wm8962"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hp>; + model = "wm8962-audio"; + audio-cpu = <&sai3>; + audio-codec = <&wm8962>; + hp-det-gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>; + audio-routing = "Headphone Jack", "HPOUTL", + "Headphone Jack", "HPOUTR", + "Ext Spk", "SPKOUTL", + "Ext Spk", "SPKOUTR", + "AMIC", "MICBIAS", + "IN3R", "AMIC", + "IN1R", "AMIC"; + }; +}; + +&flexspi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexspi1>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexspi1_reset>; + reset-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <200000000>; + spi-tx-bus-width = <8>; + spi-rx-bus-width = <8>; + }; +}; + +&lpi2c4 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpi2c4>; + status = "okay"; + + wm8962: audio-codec@1a { + compatible = "wlf,wm8962"; + reg = <0x1a>; + clocks = <&scmi_clk IMX95_CLK_SAI3>; + DCVDD-supply = <®_audio_pwr>; + DBVDD-supply = <®_audio_pwr>; + AVDD-supply = <®_audio_pwr>; + CPVDD-supply = <®_audio_pwr>; + MICVDD-supply = <®_audio_pwr>; + PLLVDD-supply = <®_audio_pwr>; + SPKVDD1-supply = <®_audio_pwr>; + SPKVDD2-supply = <®_audio_pwr>; + gpio-cfg = < 0x0000 /* 0:Default */ + 0x0000 /* 1:Default */ + 0x0000 /* 2:FN_DMICCLK */ + 0x0000 /* 3:Default */ + 0x0000 /* 4:FN_DMICCDAT */ + 0x0000 /* 5:Default */ + >; + }; + + i2c4_gpio_expander_21: gpio@21 { + compatible = "nxp,pcal6408"; + reg = <0x21>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&gpio2>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c4_pcal6408>; + vcc-supply = <®_3p3v>; + }; }; &lpi2c7 { @@ -108,6 +259,23 @@ &lpuart1 { status = "okay"; }; +&micfil { + #sound-dai-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pdm>; + assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>, + <&scmi_clk IMX95_CLK_AUDIOPLL2>, + <&scmi_clk IMX95_CLK_PDM>; + assigned-clock-parents = <0>, <0>, <0>, <0>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>; + assigned-clock-rates = <3932160000>, + <3612672000>, <393216000>, + <361267200>, <49152000>; + status = "okay"; +}; + &mu7 { status = "okay"; }; @@ -128,6 +296,42 @@ &pcie1 { status = "okay"; }; +&sai1 { + #sound-dai-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai1>; + assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>, + <&scmi_clk IMX95_CLK_AUDIOPLL2>, + <&scmi_clk IMX95_CLK_SAI1>; + assigned-clock-parents = <0>, <0>, <0>, <0>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>; + assigned-clock-rates = <3932160000>, + <3612672000>, <393216000>, + <361267200>, <12288000>; + fsl,sai-mclk-direction-output; + status = "okay"; +}; + +&sai3 { + #sound-dai-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai3>; + assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>, + <&scmi_clk IMX95_CLK_AUDIOPLL2>, + <&scmi_clk IMX95_CLK_SAI3>; + assigned-clock-parents = <0>, <0>, <0>, <0>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>; + assigned-clock-rates = <3932160000>, + <3612672000>, <393216000>, + <361267200>, <12288000>; + fsl,sai-mclk-direction-output; + status = "okay"; +}; + &usdhc1 { pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep"; pinctrl-0 = <&pinctrl_usdhc1>; @@ -159,12 +363,53 @@ &wdog3 { }; &scmi_iomuxc { + pinctrl_flexspi1: flexspi1grp { + fsl,pins = < + IMX95_PAD_XSPI1_SS0_B__FLEXSPI1_A_SS0_B 0x3fe + IMX95_PAD_XSPI1_SCLK__FLEXSPI1_A_SCLK 0x3fe + IMX95_PAD_XSPI1_DQS__FLEXSPI1_A_DQS 0x3fe + IMX95_PAD_XSPI1_DATA0__FLEXSPI1_A_DATA_BIT0 0x3fe + IMX95_PAD_XSPI1_DATA1__FLEXSPI1_A_DATA_BIT1 0x3fe + IMX95_PAD_XSPI1_DATA2__FLEXSPI1_A_DATA_BIT2 0x3fe + IMX95_PAD_XSPI1_DATA3__FLEXSPI1_A_DATA_BIT3 0x3fe + IMX95_PAD_XSPI1_DATA4__FLEXSPI1_A_DATA_BIT4 0x3fe + IMX95_PAD_XSPI1_DATA5__FLEXSPI1_A_DATA_BIT5 0x3fe + IMX95_PAD_XSPI1_DATA6__FLEXSPI1_A_DATA_BIT6 0x3fe + IMX95_PAD_XSPI1_DATA7__FLEXSPI1_A_DATA_BIT7 0x3fe + >; + }; + + pinctrl_flexspi1_reset: flexspi1-reset-grp { + fsl,pins = < + IMX95_PAD_XSPI1_SS1_B__GPIO5_IO_BIT11 0x3fe + >; + }; + + pinctrl_hp: hpgrp { + fsl,pins = < + IMX95_PAD_GPIO_IO11__GPIO2_IO_BIT11 0x31e + >; + }; + + pinctrl_i2c4_pcal6408: i2c4pcal6498grp { + fsl,pins = < + IMX95_PAD_GPIO_IO18__GPIO2_IO_BIT18 0x31e + >; + }; + pinctrl_i2c7_pcal6524: i2c7pcal6524grp { fsl,pins = < IMX95_PAD_GPIO_IO36__GPIO5_IO_BIT16 0x31e >; }; + pinctrl_lpi2c4: lpi2c4grp { + fsl,pins = < + IMX95_PAD_GPIO_IO30__LPI2C4_SDA 0x40000b9e + IMX95_PAD_GPIO_IO31__LPI2C4_SCL 0x40000b9e + >; + }; + pinctrl_lpi2c7: lpi2c7grp { fsl,pins = < IMX95_PAD_GPIO_IO08__LPI2C7_SDA 0x40000b9e @@ -184,6 +429,54 @@ IMX95_PAD_GPIO_IO35__HSIOMIX_TOP_PCIE2_CLKREQ_B 0x4000031e >; }; + pinctrl_pdm: pdmgrp { + fsl,pins = < + IMX95_PAD_PDM_CLK__AONMIX_TOP_PDM_CLK 0x31e + IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x31e + >; + }; + + pinctrl_sai1: sai1grp { + fsl,pins = < + IMX95_PAD_SAI1_RXD0__AONMIX_TOP_SAI1_RX_DATA_BIT0 0x31e + IMX95_PAD_SAI1_TXC__AONMIX_TOP_SAI1_TX_BCLK 0x31e + IMX95_PAD_SAI1_TXFS__AONMIX_TOP_SAI1_TX_SYNC 0x31e + IMX95_PAD_SAI1_TXD0__AONMIX_TOP_SAI1_TX_DATA_BIT0 0x31e + >; + }; + + pinctrl_sai2: sai2grp { + fsl,pins = < + IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_SAI2_RX_BCLK 0x31e + IMX95_PAD_ENET2_MDC__NETCMIX_TOP_SAI2_RX_SYNC 0x31e + IMX95_PAD_ENET2_TD3__NETCMIX_TOP_SAI2_RX_DATA_BIT0 0x31e + IMX95_PAD_ENET2_TD2__NETCMIX_TOP_SAI2_RX_DATA_BIT1 0x31e + IMX95_PAD_ENET2_TXC__NETCMIX_TOP_SAI2_TX_BCLK 0x31e + IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_SAI2_TX_SYNC 0x31e + IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_SAI2_TX_DATA_BIT0 0x31e + IMX95_PAD_ENET2_RXC__NETCMIX_TOP_SAI2_TX_DATA_BIT1 0x31e + IMX95_PAD_ENET2_RD0__NETCMIX_TOP_SAI2_TX_DATA_BIT2 0x31e + IMX95_PAD_ENET2_RD1__NETCMIX_TOP_SAI2_TX_DATA_BIT3 0x31e + IMX95_PAD_ENET2_RD2__NETCMIX_TOP_SAI2_MCLK 0x31e + >; + }; + + pinctrl_sai3: sai3grp { + fsl,pins = < + IMX95_PAD_GPIO_IO17__SAI3_MCLK 0x31e + IMX95_PAD_GPIO_IO16__SAI3_TX_BCLK 0x31e + IMX95_PAD_GPIO_IO26__SAI3_TX_SYNC 0x31e + IMX95_PAD_GPIO_IO20__SAI3_RX_DATA_BIT0 0x31e + IMX95_PAD_GPIO_IO21__SAI3_TX_DATA_BIT0 0x31e + >; + }; + + pinctrl_tpm6: tpm6grp { + fsl,pins = < + IMX95_PAD_GPIO_IO19__TPM6_CH2 0x51e + >; + }; + pinctrl_uart1: uart1grp { fsl,pins = < IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x31e @@ -287,3 +580,50 @@ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e >; }; }; + +&thermal_zones { + a55-thermal { + trips { + atrip2: trip2 { + temperature = <55000>; + hysteresis = <2000>; + type = "active"; + }; + + atrip3: trip3 { + temperature = <65000>; + hysteresis = <2000>; + type = "active"; + }; + + atrip4: trip4 { + temperature = <75000>; + hysteresis = <2000>; + type = "active"; + }; + }; + + cooling-maps { + map1 { + trip = <&atrip2>; + cooling-device = <&fan0 0 1>; + }; + + map2 { + trip = <&atrip3>; + cooling-device = <&fan0 1 2>; + }; + + map3 { + trip = <&atrip4>; + cooling-device = <&fan0 2 3>; + }; + }; + }; +}; + +&tpm6 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_tpm6>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi index 425272aa5a8160..03661e76550f4d 100644 --- a/arch/arm64/boot/dts/freescale/imx95.dtsi +++ b/arch/arm64/boot/dts/freescale/imx95.dtsi @@ -3,6 +3,7 @@ * Copyright 2024 NXP */ +#include #include #include #include @@ -221,6 +222,13 @@ core5 { }; }; + dummy: clock-dummy { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + clock-output-names = "dummy"; + }; + clk_ext1: clock-ext1 { compatible = "fixed-clock"; #clock-cells = <0>; @@ -281,7 +289,7 @@ sram1: sram@204c0000 { firmware { scmi { compatible = "arm,scmi"; - mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>; + mboxes = <&mu2 5 0>, <&mu2 3 0>, <&mu2 3 1>, <&mu2 5 1>; shmem = <&scmi_buf0>, <&scmi_buf1>; #address-cells = <1>; #size-cells = <0>; @@ -318,7 +326,7 @@ pmu { interrupts = ; }; - thermal-zones { + thermal_zones: thermal-zones { a55-thermal { polling-delay-passive = <250>; polling-delay = <2000>; @@ -405,6 +413,152 @@ aips2: bus@42000000 { #address-cells = <1>; #size-cells = <1>; + edma2: dma-controller@42000000 { + compatible = "fsl,imx95-edma5"; + reg = <0x42000000 0x210000>; + #dma-cells = <3>; + dma-channels = <64>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>; + clock-names = "dma"; + }; + + edma3: dma-controller@42210000 { + compatible = "fsl,imx95-edma5"; + reg = <0x42210000 0x210000>; + #dma-cells = <3>; + dma-channels = <64>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>; + clock-names = "dma"; + }; + mu7: mailbox@42430000 { compatible = "fsl,imx95-mu"; reg = <0x42430000 0x10000>; @@ -464,6 +618,8 @@ lpi2c3: i2c@42530000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 8 0 0>, <&edma2 9 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -476,6 +632,8 @@ lpi2c4: i2c@42540000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 10 0 0>, <&edma2 11 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -488,6 +646,8 @@ lpspi3: spi@42550000 { clocks = <&scmi_clk IMX95_CLK_LPSPI3>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 12 0 0>, <&edma2 13 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -500,6 +660,8 @@ lpspi4: spi@42560000 { clocks = <&scmi_clk IMX95_CLK_LPSPI4>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 14 0 0>, <&edma2 15 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -510,6 +672,8 @@ lpuart3: serial@42570000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART3>; clock-names = "ipg"; + dmas = <&edma2 18 0 FSL_EDMA_RX>, <&edma2 17 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -520,6 +684,8 @@ lpuart4: serial@42580000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART4>; clock-names = "ipg"; + dmas = <&edma2 20 0 FSL_EDMA_RX>, <&edma2 19 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -530,6 +696,8 @@ lpuart5: serial@42590000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART5>; clock-names = "ipg"; + dmas = <&edma2 22 0 FSL_EDMA_RX>, <&edma2 21 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -540,6 +708,110 @@ lpuart6: serial@425a0000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART6>; clock-names = "ipg"; + dmas = <&edma2 24 0 FSL_EDMA_RX>, <&edma2 23 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + flexcan2: can@425b0000 { + compatible = "fsl,imx95-flexcan"; + reg = <0x425b0000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, + <&scmi_clk IMX95_CLK_CAN2>; + clock-names = "ipg", "per"; + assigned-clocks = <&scmi_clk IMX95_CLK_CAN2>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <40000000>; + fsl,clk-source = /bits/ 8 <0>; + status = "disabled"; + }; + + flexcan3: can@42600000 { + compatible = "fsl,imx95-flexcan"; + reg = <0x42600000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, + <&scmi_clk IMX95_CLK_CAN3>; + clock-names = "ipg", "per"; + assigned-clocks = <&scmi_clk IMX95_CLK_CAN3>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <40000000>; + fsl,clk-source = /bits/ 8 <0>; + status = "disabled"; + }; + + flexspi1: spi@425e0000 { + compatible = "nxp,imx8mm-fspi"; + reg = <0x425e0000 0x10000>, <0x28000000 0x8000000>; + reg-names = "fspi_base", "fspi_mmap"; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_FLEXSPI1>, + <&scmi_clk IMX95_CLK_FLEXSPI1>; + clock-names = "fspi_en", "fspi"; + assigned-clocks = <&scmi_clk IMX95_CLK_FLEXSPI1>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1>; + assigned-clock-rates = <200000000>; + status = "disabled"; + }; + + sai3: sai@42650000 { + compatible = "fsl,imx95-sai"; + reg = <0x42650000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>, + <&scmi_clk IMX95_CLK_SAI3>, <&dummy>, + <&dummy>; + clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; + dmas = <&edma2 61 0 FSL_EDMA_RX>, <&edma2 60 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + sai4: sai@42660000 { + compatible = "fsl,imx95-sai"; + reg = <0x42660000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>, + <&scmi_clk IMX95_CLK_SAI4>, <&dummy>, + <&dummy>; + clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; + dmas = <&edma2 68 0 FSL_EDMA_RX>, <&edma2 67 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + sai5: sai@42670000 { + compatible = "fsl,imx95-sai"; + reg = <0x42670000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, <&dummy>, + <&scmi_clk IMX95_CLK_SAI5>, <&dummy>, + <&dummy>; + clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; + dmas = <&edma2 70 0 FSL_EDMA_RX>, <&edma2 69 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + xcvr: xcvr@42680000 { + compatible = "fsl,imx95-xcvr"; + reg = <0x42680000 0x800>, <0x42680800 0x400>, + <0x42680c00 0x080>, <0x42680e00 0x080>; + reg-names = "ram", "regs", "rxfifo", "txfifo"; + interrupts = /* XCVR IRQ 0 */ + , + /* XCVR IRQ 1 */ + ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, + <&scmi_clk IMX95_CLK_SPDIF>, + <&dummy>, + <&scmi_clk IMX95_CLK_AUDIOXCVR>; + clock-names = "ipg", "phy", "spba", "pll_ipg"; + dmas = <&edma2 65 0 1>, <&edma2 66 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -550,6 +822,8 @@ lpuart7: serial@42690000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART7>; clock-names = "ipg"; + dmas = <&edma2 26 0 FSL_EDMA_RX>, <&edma2 25 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -560,6 +834,8 @@ lpuart8: serial@426a0000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART8>; clock-names = "ipg"; + dmas = <&edma2 28 0 FSL_EDMA_RX>, <&edma2 27 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -572,6 +848,8 @@ lpi2c5: i2c@426b0000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 71 0 0>, <&edma2 72 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -584,6 +862,8 @@ lpi2c6: i2c@426c0000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 73 0 0>, <&edma2 74 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -596,6 +876,8 @@ lpi2c7: i2c@426d0000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 75 0 0>, <&edma2 76 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -608,6 +890,8 @@ lpi2c8: i2c@426e0000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma2 77 0 0>, <&edma2 78 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -620,6 +904,8 @@ lpspi5: spi@426f0000 { clocks = <&scmi_clk IMX95_CLK_LPSPI5>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 79 0 0>, <&edma2 80 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -632,6 +918,8 @@ lpspi6: spi@42700000 { clocks = <&scmi_clk IMX95_CLK_LPSPI6>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 81 0 0>, <&edma2 82 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -644,6 +932,8 @@ lpspi7: spi@42710000 { clocks = <&scmi_clk IMX95_CLK_LPSPI7>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 83 0 0>, <&edma2 84 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -656,6 +946,8 @@ lpspi8: spi@42720000 { clocks = <&scmi_clk IMX95_CLK_LPSPI8>, <&scmi_clk IMX95_CLK_BUSWAKEUP>; clock-names = "per", "ipg"; + dmas = <&edma2 85 0 0>, <&edma2 86 0 FSL_EDMA_RX>; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -667,6 +959,34 @@ mu8: mailbox@42730000 { #mbox-cells = <2>; status = "disabled"; }; + + flexcan4: can@427c0000 { + compatible = "fsl,imx95-flexcan"; + reg = <0x427c0000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, + <&scmi_clk IMX95_CLK_CAN4>; + clock-names = "ipg", "per"; + assigned-clocks = <&scmi_clk IMX95_CLK_CAN4>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <40000000>; + fsl,clk-source = /bits/ 8 <0>; + status = "disabled"; + }; + + flexcan5: can@427d0000 { + compatible = "fsl,imx95-flexcan"; + reg = <0x427d0000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>, + <&scmi_clk IMX95_CLK_CAN5>; + clock-names = "ipg", "per"; + assigned-clocks = <&scmi_clk IMX95_CLK_CAN5>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <40000000>; + fsl,clk-source = /bits/ 8 <0>; + status = "disabled"; + }; }; aips3: bus@42800000 { @@ -796,6 +1116,46 @@ aips1: bus@44000000 { #address-cells = <1>; #size-cells = <1>; + edma1: dma-controller@44000000 { + compatible = "fsl,imx93-edma3"; + reg = <0x44000000 0x200000>; + #dma-cells = <3>; + dma-channels = <31>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&scmi_clk IMX95_CLK_BUSAON>; + clock-names = "dma"; + }; + mu1: mailbox@44220000 { compatible = "fsl,imx95-mu"; reg = <0x44220000 0x10000>; @@ -830,6 +1190,8 @@ lpi2c1: i2c@44340000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma1 12 0 0>, <&edma1 13 0 FSL_EDMA_RX> ; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -842,6 +1204,8 @@ lpi2c2: i2c@44350000 { clock-names = "per", "ipg"; #address-cells = <1>; #size-cells = <0>; + dmas = <&edma1 14 0 0>, <&edma1 15 0 FSL_EDMA_RX> ; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -854,6 +1218,8 @@ lpspi1: spi@44360000 { clocks = <&scmi_clk IMX95_CLK_LPSPI1>, <&scmi_clk IMX95_CLK_BUSAON>; clock-names = "per", "ipg"; + dmas = <&edma1 16 0 FSL_EDMA_RX>, <&edma1 17 0 0> ; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -866,6 +1232,8 @@ lpspi2: spi@44370000 { clocks = <&scmi_clk IMX95_CLK_LPSPI2>, <&scmi_clk IMX95_CLK_BUSAON>; clock-names = "per", "ipg"; + dmas = <&edma1 18 0 FSL_EDMA_RX>, <&edma1 19 0 0> ; + dma-names = "tx", "rx"; status = "disabled"; }; @@ -876,6 +1244,8 @@ lpuart1: serial@44380000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART1>; clock-names = "ipg"; + dmas = <&edma1 21 0 FSL_EDMA_RX>, <&edma1 20 0 0>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -886,6 +1256,54 @@ lpuart2: serial@44390000 { interrupts = ; clocks = <&scmi_clk IMX95_CLK_LPUART2>; clock-names = "ipg"; + dmas = <&edma1 23 0 FSL_EDMA_RX>, <&edma1 22 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + flexcan1: can@443a0000 { + compatible = "fsl,imx95-flexcan"; + reg = <0x443a0000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSAON>, + <&scmi_clk IMX95_CLK_CAN1>; + clock-names = "ipg", "per"; + assigned-clocks = <&scmi_clk IMX95_CLK_CAN1>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <40000000>; + fsl,clk-source = /bits/ 8 <0>; + status = "disabled"; + }; + + sai1: sai@443b0000 { + compatible = "fsl,imx95-sai"; + reg = <0x443b0000 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSAON>, <&dummy>, + <&scmi_clk IMX95_CLK_SAI1>, <&dummy>, + <&dummy>; + clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; + dmas = <&edma1 25 0 FSL_EDMA_RX>, <&edma1 24 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + micfil: micfil@44520000 { + compatible = "fsl,imx95-micfil", "fsl,imx93-micfil"; + reg = <0x44520000 0x10000>; + interrupts = , + , + , + ; + clocks = <&scmi_clk IMX95_CLK_BUSAON>, + <&scmi_clk IMX95_CLK_PDM>, + <&scmi_clk IMX95_CLK_AUDIOPLL1>, + <&scmi_clk IMX95_CLK_AUDIOPLL2>, + <&dummy>; + clock-names = "ipg_clk", "ipg_clk_app", + "pll8k", "pll11k", "clkext3"; + dmas = <&edma1 6 0 5>; + dma-names = "rx"; status = "disabled"; }; @@ -1188,5 +1606,37 @@ pcie1_ep: pcie-ep@4c380000 { power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>; status = "disabled"; }; + + netcmix_blk_ctrl: syscon@4c810000 { + compatible = "nxp,imx95-netcmix-blk-ctrl", "syscon"; + reg = <0x0 0x4c810000 0x0 0x10000>; + #clock-cells = <1>; + clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>; + assigned-clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>; + assigned-clock-parents = <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>; + assigned-clock-rates = <133333333>; + power-domains = <&scmi_devpd IMX95_PD_NETC>; + status = "disabled"; + }; + + sai2: sai@4c880000 { + compatible = "fsl,imx95-sai"; + reg = <0x0 0x4c880000 0x0 0x10000>; + interrupts = ; + clocks = <&scmi_clk IMX95_CLK_BUSNETCMIX>, <&dummy>, + <&scmi_clk IMX95_CLK_SAI2>, <&dummy>, + <&dummy>; + clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; + power-domains = <&scmi_devpd IMX95_PD_NETC>; + dmas = <&edma2 59 0 FSL_EDMA_RX>, <&edma2 58 0 0>; + dma-names = "rx", "tx"; + status = "disabled"; + }; + + ddr-pmu@4e090dc0 { + compatible = "fsl,imx95-ddr-pmu", "fsl,imx93-ddr-pmu"; + reg = <0x0 0x4e090dc0 0x0 0x200>; + interrupts = ; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi index 815241526a0d3d..c60c7a9e54aff9 100644 --- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi +++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi @@ -185,6 +185,8 @@ ethphy0: ethernet-phy@e { reset-gpios = <&expander2 7 GPIO_ACTIVE_LOW>; reset-assert-us = <500000>; reset-deassert-us = <500>; + interrupt-parent = <&expander2>; + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; }; }; }; @@ -237,7 +239,6 @@ expander1: gpio@24 { }; &i2c2 { - clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; pinctrl-1 = <&pinctrl_i2c2_gpio>; @@ -258,6 +259,11 @@ sensor1: temperator-sensor@1f { reg = <0x1f>; }; + /* + * TUSB8041 is at 0x41, but not connected by default + * Note: TUSB8041 only supports 100 kHz! + */ + eeprom3: eeprom@57 { compatible = "nxp,se97b", "atmel,24c02"; reg = <0x57>; @@ -274,7 +280,6 @@ pcieclk: clk@68 { }; &i2c3 { - clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c3>; pinctrl-1 = <&pinctrl_i2c3_gpio>; diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi index 65f7b5a50eb51d..1b2b20c6126dc2 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi @@ -27,6 +27,7 @@ ethernet@f0000 { reg = <0xf0000 0x1000>; fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>; pcsphy-handle = <&pcsphy6>; + pcs-handle = <&pcsphy6>; }; mdio@f1000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi index 3f70482c98c30e..55d78f6f7c6ca1 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi @@ -27,6 +27,7 @@ ethernet@f2000 { reg = <0xf2000 0x1000>; fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>; pcsphy-handle = <&pcsphy7>; + pcs-handle = <&pcsphy7>; }; mdio@f3000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi index 78841c1f32527e..18916a860c2ed9 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi @@ -26,6 +26,7 @@ ethernet@e0000 { fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; ptp-timer = <&ptp_timer0>; pcsphy-handle = <&pcsphy0>; + pcs-handle = <&pcsphy0>; }; mdio@e1000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi index 1f43fa66622218..e90af445a293d7 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi @@ -26,6 +26,7 @@ ethernet@e2000 { fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; ptp-timer = <&ptp_timer0>; pcsphy-handle = <&pcsphy1>; + pcs-handle = <&pcsphy1>; }; mdio@e3000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi index de0aa017701dd9..fec93905bc818e 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi @@ -26,6 +26,7 @@ ethernet@e4000 { fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>; ptp-timer = <&ptp_timer0>; pcsphy-handle = <&pcsphy2>; + pcs-handle = <&pcsphy2>; }; mdio@e5000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi index 6904aa5d8e5479..2aa953faa62b11 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi @@ -26,6 +26,7 @@ ethernet@e6000 { fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>; ptp-timer = <&ptp_timer0>; pcsphy-handle = <&pcsphy3>; + pcs-handle = <&pcsphy3>; }; mdio@e7000 { diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi index a3d29d470297e6..948e39411415e6 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi @@ -26,6 +26,7 @@ ethernet@e8000 { fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>; ptp-timer = <&ptp_timer0>; pcsphy-handle = <&pcsphy4>; + pcs-handle = <&pcsphy4>; }; mdio@e9000 { diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi index fc19ae2e8d3bc4..fa054bfe7d5c55 100644 --- a/arch/arm64/boot/dts/freescale/s32g2.dtsi +++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi @@ -114,6 +114,56 @@ soc@0 { #size-cells = <1>; ranges = <0 0 0 0x80000000>; + pinctrl: pinctrl@4009c240 { + compatible = "nxp,s32g2-siul2-pinctrl"; + /* MSCR0-MSCR101 registers on siul2_0 */ + reg = <0x4009c240 0x198>, + /* MSCR112-MSCR122 registers on siul2_1 */ + <0x44010400 0x2c>, + /* MSCR144-MSCR190 registers on siul2_1 */ + <0x44010480 0xbc>, + /* IMCR0-IMCR83 registers on siul2_0 */ + <0x4009ca40 0x150>, + /* IMCR119-IMCR397 registers on siul2_1 */ + <0x44010c1c 0x45c>, + /* IMCR430-IMCR495 registers on siul2_1 */ + <0x440110f8 0x108>; + + jtag_pins: jtag-pins { + jtag-grp0 { + pinmux = <0x0>; + input-enable; + bias-pull-up; + slew-rate = <166>; + }; + + jtag-grp1 { + pinmux = <0x11>; + slew-rate = <166>; + }; + + jtag-grp2 { + pinmux = <0x40>; + input-enable; + bias-pull-down; + slew-rate = <166>; + }; + + jtag-grp3 { + pinmux = <0x23c0>, + <0x23d0>, + <0x2320>; + }; + + jtag-grp4 { + pinmux = <0x51>; + input-enable; + bias-pull-up; + slew-rate = <166>; + }; + }; + }; + uart0: serial@401c8000 { compatible = "nxp,s32g2-linflexuart", "fsl,s32v234-linflexuart"; diff --git a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts index 00070c949e2ab2..dbe498798bd912 100644 --- a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts +++ b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts @@ -34,5 +34,6 @@ &uart0 { }; &usdhc0 { + disable-wp; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts index b3fc12899cae52..ab1e5caaeae74e 100644 --- a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts +++ b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts @@ -40,5 +40,6 @@ &uart1 { }; &usdhc0 { + disable-wp; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/s32g3.dtsi b/arch/arm64/boot/dts/freescale/s32g3.dtsi index c1b08992754b0c..b4226a9143c80e 100644 --- a/arch/arm64/boot/dts/freescale/s32g3.dtsi +++ b/arch/arm64/boot/dts/freescale/s32g3.dtsi @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* - * Copyright 2021-2023 NXP + * Copyright 2021-2024 NXP * * Authors: Ghennadi Procopciuc * Ciprian Costea @@ -171,6 +171,56 @@ soc@0 { #size-cells = <1>; ranges = <0 0 0 0x80000000>; + pinctrl: pinctrl@4009c240 { + compatible = "nxp,s32g2-siul2-pinctrl"; + /* MSCR0-MSCR101 registers on siul2_0 */ + reg = <0x4009c240 0x198>, + /* MSCR112-MSCR122 registers on siul2_1 */ + <0x44010400 0x2c>, + /* MSCR144-MSCR190 registers on siul2_1 */ + <0x44010480 0xbc>, + /* IMCR0-IMCR83 registers on siul2_0 */ + <0x4009ca40 0x150>, + /* IMCR119-IMCR397 registers on siul2_1 */ + <0x44010c1c 0x45c>, + /* IMCR430-IMCR495 registers on siul2_1 */ + <0x440110f8 0x108>; + + jtag_pins: jtag-pins { + jtag-grp0 { + pinmux = <0x0>; + input-enable; + bias-pull-up; + slew-rate = <166>; + }; + + jtag-grp1 { + pinmux = <0x11>; + slew-rate = <166>; + }; + + jtag-grp2 { + pinmux = <0x40>; + input-enable; + bias-pull-down; + slew-rate = <166>; + }; + + jtag-grp3 { + pinmux = <0x23c0>, + <0x23d0>, + <0x2320>; + }; + + jtag-grp4 { + pinmux = <0x51>; + input-enable; + bias-pull-up; + slew-rate = <166>; + }; + }; + }; + uart0: serial@401c8000 { compatible = "nxp,s32g3-linflexuart", "fsl,s32v234-linflexuart"; diff --git a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts index 9d674819876e7f..176e5af191c84a 100644 --- a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts +++ b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* - * Copyright 2021-2023 NXP + * Copyright 2021-2024 NXP * * NXP S32G3 Reference Design Board 3 (S32G-VNP-RDB3) */ @@ -41,5 +41,6 @@ &uart1 { &usdhc0 { bus-width = <8>; + disable-wp; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/s32v234.dtsi b/arch/arm64/boot/dts/freescale/s32v234.dtsi index 42409ec567925f..bf608ded5dda6e 100644 --- a/arch/arm64/boot/dts/freescale/s32v234.dtsi +++ b/arch/arm64/boot/dts/freescale/s32v234.dtsi @@ -89,7 +89,7 @@ IRQ_TYPE_LEVEL_LOW)>, }; gic: interrupt-controller@7d001000 { - compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; + compatible = "arm,cortex-a15-gic"; #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; diff --git a/arch/arm64/boot/dts/mediatek/mt6357.dtsi b/arch/arm64/boot/dts/mediatek/mt6357.dtsi index 3330a03c2f7453..5fafa842d312f3 100644 --- a/arch/arm64/boot/dts/mediatek/mt6357.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6357.dtsi @@ -10,6 +10,11 @@ &pwrap { mt6357_pmic: pmic { compatible = "mediatek,mt6357"; + pmic_adc: adc { + compatible = "mediatek,mt6357-auxadc"; + #io-channel-cells = <1>; + }; + regulators { mt6357_vproc_reg: buck-vproc { regulator-name = "vproc"; diff --git a/arch/arm64/boot/dts/mediatek/mt6358.dtsi b/arch/arm64/boot/dts/mediatek/mt6358.dtsi index a1b96013f8141a..641d452fbc0830 100644 --- a/arch/arm64/boot/dts/mediatek/mt6358.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6358.dtsi @@ -10,6 +10,11 @@ pmic: pmic { interrupt-controller; #interrupt-cells = <2>; + pmic_adc: adc { + compatible = "mediatek,mt6358-auxadc"; + #io-channel-cells = <1>; + }; + mt6358codec: mt6358codec { compatible = "mediatek,mt6358-sound"; mediatek,dmic-mode = <0>; /* two-wires */ diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi index df3e822232d340..8e1b8c85c6ede9 100644 --- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi @@ -9,6 +9,11 @@ pmic: pmic { interrupt-controller; #interrupt-cells = <2>; + pmic_adc: adc { + compatible = "mediatek,mt6359-auxadc"; + #io-channel-cells = <1>; + }; + mt6359codec: mt6359codec { }; diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi index 64aeeb24efac87..5cbea9cd411fb2 100644 --- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi @@ -94,6 +94,39 @@ pwm@10048000 { #pwm-cells = <2>; }; + serial@11002000 { + compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart"; + reg = <0 0x11002000 0 0x100>; + interrupts = ; + interrupt-names = "uart", "wakeup"; + clocks = <&infracfg CLK_INFRA_UART0_SEL>, + <&infracfg CLK_INFRA_UART0_CK>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + serial@11003000 { + compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart"; + reg = <0 0x11003000 0 0x100>; + interrupts = ; + interrupt-names = "uart", "wakeup"; + clocks = <&infracfg CLK_INFRA_UART1_SEL>, + <&infracfg CLK_INFRA_UART1_CK>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + + serial@11004000 { + compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart"; + reg = <0 0x11004000 0 0x100>; + interrupts = ; + interrupt-names = "uart", "wakeup"; + clocks = <&infracfg CLK_INFRA_UART2_SEL>, + <&infracfg CLK_INFRA_UART2_CK>; + clock-names = "baud", "bus"; + status = "disabled"; + }; + i2c@11007000 { compatible = "mediatek,mt7981-i2c"; reg = <0 0x11007000 0 0x1000>, @@ -109,6 +142,48 @@ i2c@11007000 { status = "disabled"; }; + spi@11009000 { + compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm"; + reg = <0 0x11009000 0 0x1000>; + interrupts = ; + clocks = <&topckgen CLK_TOP_CB_M_D2>, + <&topckgen CLK_TOP_SPI_SEL>, + <&infracfg CLK_INFRA_SPI2_CK>, + <&infracfg CLK_INFRA_SPI2_HCK_CK>; + clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi@1100a000 { + compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm"; + reg = <0 0x1100a000 0 0x1000>; + interrupts = ; + clocks = <&topckgen CLK_TOP_CB_M_D2>, + <&topckgen CLK_TOP_SPI_SEL>, + <&infracfg CLK_INFRA_SPI0_CK>, + <&infracfg CLK_INFRA_SPI0_HCK_CK>; + clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi@1100b000 { + compatible = "mediatek,mt7981-spi-ipm", "mediatek,spi-ipm"; + reg = <0 0x1100b000 0 0x1000>; + interrupts = ; + clocks = <&topckgen CLK_TOP_CB_M_D2>, + <&topckgen CLK_TOP_SPI_SEL>, + <&infracfg CLK_INFRA_SPI1_CK>, + <&infracfg CLK_INFRA_SPI1_HCK_CK>; + clock-names = "parent-clk", "sel-clk", "spi-clk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + pio: pinctrl@11d00000 { compatible = "mediatek,mt7981-pinctrl"; reg = <0 0x11d00000 0 0x1000>, diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi index fa4ab4d2899f9b..783c333107bcbf 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -91,16 +91,11 @@ cros_ec_pwm: pwm { &dsi0 { status = "okay"; - /delete-property/#size-cells; - /delete-property/#address-cells; /delete-node/panel@0; - ports { - port { - dsi_out: endpoint { - remote-endpoint = <&anx7625_in>; - }; - }; - }; +}; + +&dsi_out { + remote-endpoint = <&anx7625_in>; }; &i2c0 { diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi index 6345e969efae5f..22924f61ec9ed2 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi @@ -24,7 +24,7 @@ chosen { backlight_lcd0: backlight_lcd0 { compatible = "pwm-backlight"; pwms = <&pwm0 0 500000>; - power-supply = <&bl_pp5000>; + power-supply = <®_vsys>; enable-gpios = <&pio 176 0>; brightness-levels = <0 1023>; num-interpolated-steps = <1023>; @@ -47,10 +47,9 @@ clk32k: oscillator1 { it6505_pp18_reg: regulator0 { compatible = "regulator-fixed"; regulator-name = "it6505_pp18"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; gpio = <&pio 178 0>; enable-active-high; + vin-supply = <&pp1800_alw>; }; lcd_pp3300: regulator1 { @@ -62,27 +61,16 @@ lcd_pp3300: regulator1 { regulator-boot-on; }; - bl_pp5000: regulator2 { - compatible = "regulator-fixed"; - regulator-name = "bl_pp5000"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - regulator-always-on; - regulator-boot-on; - }; - mmc1_fixed_power: regulator3 { compatible = "regulator-fixed"; regulator-name = "mmc1_power"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; + vin-supply = <&pp3300_alw>; }; mmc1_fixed_io: regulator4 { compatible = "regulator-fixed"; regulator-name = "mmc1_io"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; + vin-supply = <&pp1800_alw>; }; pp1800_alw: regulator5 { @@ -92,6 +80,7 @@ pp1800_alw: regulator5 { regulator-boot-on; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + vin-supply = <®_vsys>; }; pp3300_alw: regulator6 { @@ -101,6 +90,7 @@ pp3300_alw: regulator6 { regulator-boot-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + vin-supply = <®_vsys>; }; /* system wide semi-regulated power rail from charger */ @@ -868,10 +858,6 @@ &mfg { domain-supply = <&mt6358_vgpu_reg>; }; -&soc_data { - status = "okay"; -}; - &spi0 { pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index fbf145639b8c90..266441e999f211 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -872,8 +872,6 @@ power-domain@MT8183_POWER_DOMAIN_CONN { mfg_async: power-domain@MT8183_POWER_DOMAIN_MFG_ASYNC { reg = ; - clocks = <&topckgen CLK_TOP_MUX_MFG>; - clock-names = "mfg"; #address-cells = <1>; #size-cells = <0>; #power-domain-cells = <1>; @@ -1838,6 +1836,17 @@ dsi0: dsi@14014000 { phy-names = "dphy"; }; + dpi0: dpi@14015000 { + compatible = "mediatek,mt8183-dpi"; + reg = <0 0x14015000 0 0x1000>; + interrupts = ; + power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; + clocks = <&mmsys CLK_MM_DPI_IF>, + <&mmsys CLK_MM_DPI_MM>, + <&apmixedsys CLK_APMIXED_TVDPLL>; + clock-names = "pixel", "engine", "pll"; + }; + mutex: mutex@14016000 { compatible = "mediatek,mt8183-disp-mutex"; reg = <0 0x14016000 0 0x1000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi index afdab5724eaaac..682c6ad2574d00 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi @@ -169,7 +169,7 @@ adsp_dma_mem: memory@61000000 { adsp_mem: memory@60000000 { compatible = "shared-dma-pool"; - reg = <0 0x60000000 0 0xA00000>; + reg = <0 0x60000000 0 0x1000000>; no-map; }; @@ -353,7 +353,8 @@ &dpi { pinctrl-names = "default", "sleep"; pinctrl-0 = <&dpi_pins_default>; pinctrl-1 = <&dpi_pins_sleep>; - status = "okay"; + /* TODO Re-enable after DP to Type-C port muxing can be described */ + status = "disabled"; }; &dpi_out { diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi index 4763ed5dc86cfb..148c332018b0d8 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi @@ -13,6 +13,8 @@ #include #include #include +#include +#include / { compatible = "mediatek,mt8186"; @@ -731,7 +733,7 @@ opp-850000000 { opp-900000000-3 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; }; opp-900000000-4 { @@ -743,13 +745,13 @@ opp-900000000-4 { opp-900000000-5 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <825000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; }; opp-950000000-3 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <900000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; }; opp-950000000-4 { @@ -761,13 +763,13 @@ opp-950000000-4 { opp-950000000-5 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; }; opp-1000000000-3 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <950000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; }; opp-1000000000-4 { @@ -779,7 +781,7 @@ opp-1000000000-4 { opp-1000000000-5 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <875000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; }; }; @@ -1361,6 +1363,29 @@ spi0: spi@1100a000 { status = "disabled"; }; + lvts: thermal-sensor@1100b000 { + compatible = "mediatek,mt8186-lvts"; + reg = <0 0x1100b000 0 0x1000>; + interrupts = ; + clocks = <&infracfg_ao CLK_INFRA_AO_THERM>; + resets = <&infracfg_ao MT8186_INFRA_THERMAL_CTRL_RST>; + nvmem-cells = <&lvts_efuse_data1 &lvts_efuse_data2>; + nvmem-cell-names = "lvts-calib-data-1", "lvts-calib-data-2"; + #thermal-sensor-cells = <1>; + }; + + svs: svs@1100bc00 { + compatible = "mediatek,mt8186-svs"; + reg = <0 0x1100bc00 0 0x400>; + interrupts = ; + clocks = <&infracfg_ao CLK_INFRA_AO_THERM>; + clock-names = "main"; + nvmem-cells = <&svs_calibration>, <&lvts_efuse_data1>; + nvmem-cell-names = "svs-calibration-data", "t-calibration-data"; + resets = <&infracfg_ao MT8186_INFRA_PTP_CTRL_RST>; + reset-names = "svs_rst"; + }; + pwm0: pwm@1100e000 { compatible = "mediatek,mt8186-disp-pwm", "mediatek,mt8183-disp-pwm"; reg = <0 0x1100e000 0 0x1000>; @@ -1676,6 +1701,18 @@ efuse: efuse@11cb0000 { #address-cells = <1>; #size-cells = <1>; + lvts_efuse_data1: lvts1-calib@1cc { + reg = <0x1cc 0x14>; + }; + + lvts_efuse_data2: lvts2-calib@2f8 { + reg = <0x2f8 0x14>; + }; + + svs_calibration: calib@550 { + reg = <0x550 0x50>; + }; + gpu_speedbin: gpu-speedbin@59c { reg = <0x59c 0x4>; bits = <0 3>; @@ -1824,6 +1861,7 @@ dpi: dpi@1400a000 { assigned-clocks = <&topckgen CLK_TOP_DPI>; assigned-clock-parents = <&topckgen CLK_TOP_TVDPLL_D2>; interrupts = ; + power-domains = <&spm MT8186_POWER_DOMAIN_DIS>; status = "disabled"; port { @@ -2178,4 +2216,299 @@ larb19: smi@1c10f000 { power-domains = <&spm MT8186_POWER_DOMAIN_IPE>; }; }; + + thermal_zones: thermal-zones { + cpu-little0-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts MT8186_LITTLE_CPU0>; + + trips { + cpu_little0_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little0_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little0_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little0_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-little1-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts MT8186_LITTLE_CPU1>; + + trips { + cpu_little1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little1_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-little2-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts MT8186_LITTLE_CPU2>; + + trips { + cpu_little2_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little2_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little2_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little2_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cam-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts MT8186_CAM>; + + trips { + cam_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cam_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cam_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + nna-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts MT8186_NNA>; + + trips { + nna_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + nna_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + nna_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + adsp-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts MT8186_ADSP>; + + trips { + adsp_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + adsp_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + adsp_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + gpu-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts MT8186_GPU>; + + trips { + gpu_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + gpu_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + gpu_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&gpu_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-big0-thermal { + polling-delay = <1000>; + polling-delay-passive = <100>; + thermal-sensors = <&lvts MT8186_BIG_CPU0>; + + trips { + cpu_big0_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_big0_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_big0_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_big0_alert0>; + cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-big1-thermal { + polling-delay = <1000>; + polling-delay-passive = <100>; + thermal-sensors = <&lvts MT8186_BIG_CPU1>; + + trips { + cpu_big1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_big1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_big1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_big1_alert0>; + cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi index 29d012d28edb1b..cd27966d2e3c05 100644 --- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi @@ -12,6 +12,9 @@ #include #include #include +#include +#include +#include / { compatible = "mediatek,mt8188"; @@ -417,6 +420,450 @@ psci { method = "smc"; }; + thermal_zones: thermal-zones { + cpu-little0-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU0>; + + trips { + cpu_little0_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little0_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little0_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little0_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-little1-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU1>; + + trips { + cpu_little1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little1_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-little2-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU2>; + + trips { + cpu_little2_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little2_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little2_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little2_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-little3-thermal { + polling-delay = <1000>; + polling-delay-passive = <150>; + thermal-sensors = <&lvts_mcu MT8188_MCU_LITTLE_CPU3>; + + trips { + cpu_little3_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_little3_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_little3_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_little3_alert0>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-big0-thermal { + polling-delay = <1000>; + polling-delay-passive = <100>; + thermal-sensors = <&lvts_mcu MT8188_MCU_BIG_CPU0>; + + trips { + cpu_big0_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_big0_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_big0_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_big0_alert0>; + cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + cpu-big1-thermal { + polling-delay = <1000>; + polling-delay-passive = <100>; + thermal-sensors = <&lvts_mcu MT8188_MCU_BIG_CPU1>; + + trips { + cpu_big1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_big1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cpu_big1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_big1_alert0>; + cooling-device = <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + apu-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_APU>; + + trips { + apu_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + apu_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + apu_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + gpu-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_GPU0>; + + trips { + gpu_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + gpu_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + gpu_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&gpu_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + gpu1-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_GPU1>; + + trips { + gpu1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + gpu1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + gpu1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&gpu1_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + adsp-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_ADSP>; + + trips { + soc_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + soc_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + soc_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + vdo-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_VDO>; + + trips { + soc1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + soc1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + soc1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + infra-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_INFRA>; + + trips { + soc2_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + soc2_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + soc2_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + cam1-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_CAM1>; + + trips { + cam1_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cam1_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cam1_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + + cam2-thermal { + polling-delay = <1000>; + polling-delay-passive = <250>; + thermal-sensors = <&lvts_ap MT8188_AP_CAM2>; + + trips { + cam2_alert0: trip-alert0 { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + cam2_alert1: trip-alert1 { + temperature = <95000>; + hysteresis = <2000>; + type = "hot"; + }; + + cam2_crit: trip-crit { + temperature = <100000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + }; + timer: timer { compatible = "arm,armv8-timer"; interrupt-parent = <&gic>; @@ -464,6 +911,7 @@ infracfg_ao: syscon@10001000 { compatible = "mediatek,mt8188-infracfg-ao", "syscon"; reg = <0 0x10001000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; pericfg: syscon@10003000 { @@ -937,6 +1385,17 @@ spi0: spi@1100a000 { status = "disabled"; }; + lvts_ap: thermal-sensor@1100b000 { + compatible = "mediatek,mt8188-lvts-ap"; + reg = <0 0x1100b000 0 0xc00>; + interrupts = ; + clocks = <&infracfg_ao CLK_INFRA_AO_THERM>; + resets = <&infracfg_ao MT8188_INFRA_RST1_THERMAL_CTRL_RST>; + nvmem-cells = <&lvts_efuse_data1>; + nvmem-cell-names = "lvts-calib-data-1"; + #thermal-sensor-cells = <1>; + }; + spi1: spi@11010000 { compatible = "mediatek,mt8188-spi-ipm", "mediatek,spi-ipm"; #address-cells = <1>; @@ -1050,6 +1509,17 @@ mmc1: mmc@11240000 { status = "disabled"; }; + lvts_mcu: thermal-sensor@11278000 { + compatible = "mediatek,mt8188-lvts-mcu"; + reg = <0 0x11278000 0 0x1000>; + interrupts = ; + clocks = <&infracfg_ao CLK_INFRA_AO_THERM>; + resets = <&infracfg_ao MT8188_INFRA_RST1_THERMAL_MCU_RST>; + nvmem-cells = <&lvts_efuse_data1>; + nvmem-cell-names = "lvts-calib-data-1"; + #thermal-sensor-cells = <1>; + }; + i2c0: i2c@11280000 { compatible = "mediatek,mt8188-i2c"; reg = <0 0x11280000 0 0x1000>, @@ -1273,6 +1743,17 @@ imp_iic_wrap_en: clock-controller@11ec2000 { #clock-cells = <1>; }; + efuse: efuse@11f20000 { + compatible = "mediatek,mt8188-efuse", "mediatek,efuse"; + reg = <0 0x11f20000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + lvts_efuse_data1: lvts1-calib@1ac { + reg = <0x1ac 0x40>; + }; + }; + gpu: gpu@13000000 { compatible = "mediatek,mt8188-mali", "arm,mali-valhall-jm"; reg = <0 0x13000000 0 0x4000>; @@ -1287,6 +1768,7 @@ gpu: gpu@13000000 { <&spm MT8188_POWER_DOMAIN_MFG3>, <&spm MT8188_POWER_DOMAIN_MFG4>; power-domain-names = "core0", "core1", "core2"; + #cooling-cells = <2>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts index 88123842c818d3..49664de99b8827 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-dojo-r1.dts @@ -82,12 +82,17 @@ &pio_default { pins-low-power-hdmi-disable { pinmux = , , - , - , - ; + ; input-enable; bias-pull-down; }; + + pins-low-power-hdmi-rsel-disable { + pinmux = , + ; + input-enable; + bias-pull-down = <75000>; + }; }; &sound { diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts index 2fe20e0dad836d..2d6522c144b751 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts @@ -19,13 +19,18 @@ &pio_default { pins-low-power-hdmi-disable { pinmux = , , - , - , - ; + ; input-enable; bias-pull-down; }; + pins-low-power-hdmi-rsel-disable { + pinmux = , + ; + input-enable; + bias-pull-down = <75000>; + }; + pins-low-power-pcie0-disable { pinmux = , , diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts index dd294ca98194cc..9049d362a5e015 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts @@ -20,13 +20,18 @@ &pio_default { pins-low-power-hdmi-disable { pinmux = , , - , - , - ; + ; input-enable; bias-pull-down; }; + pins-low-power-hdmi-rsel-disable { + pinmux = , + ; + input-enable; + bias-pull-down = <75000>; + }; + pins-low-power-pcie0-disable { pinmux = , , diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi index fe5400e17b0f43..75d56b2d5a3d34 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi @@ -1228,10 +1228,6 @@ cros_ec: ec@0 { spi-max-frequency = <3000000>; wakeup-source; - keyboard-backlight { - compatible = "google,cros-kbd-led-backlight"; - }; - i2c_tunnel: i2c-tunnel { compatible = "google,cros-ec-i2c-tunnel"; google,remote-bus = <0>; @@ -1401,9 +1397,11 @@ &xhci0 { &xhci1 { status = "okay"; + phys = <&u2port1 PHY_TYPE_USB2>; rx-fifo-depth = <3072>; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&usb_vbus>; + mediatek,u3p-dis-msk = <1>; }; &xhci2 { diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 2ee45752583c00..e89ba384c4aafc 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -1444,7 +1444,7 @@ xhci1: usb@11290000 { <0 0x11293e00 0 0x0100>; reg-names = "mac", "ippc"; interrupts = ; - phys = <&u2port1 PHY_TYPE_USB2>; + phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>; assigned-clocks = <&topckgen CLK_TOP_USB_TOP_1P>, <&topckgen CLK_TOP_SSUSB_XHCI_1P>; assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>, @@ -2037,6 +2037,7 @@ dma-controller@14001000 { mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x1000 0x1000>; mediatek,gce-events = , ; + mediatek,scp = <&scp>; power-domains = <&spm MT8195_POWER_DOMAIN_VPPSYS0>; iommus = <&iommu_vpp M4U_PORT_L4_MDP_RDMA>; clocks = <&vppsys0 CLK_VPP0_MDP_RDMA>; @@ -3251,10 +3252,10 @@ dp_intf0: dp-intf@1c015000 { compatible = "mediatek,mt8195-dp-intf"; reg = <0 0x1c015000 0 0x1000>; interrupts = ; - clocks = <&vdosys0 CLK_VDO0_DP_INTF0>, - <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + <&vdosys0 CLK_VDO0_DP_INTF0>, <&apmixedsys CLK_APMIXED_TVDPLL1>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; }; @@ -3521,10 +3522,10 @@ dp_intf1: dp-intf@1c113000 { reg = <0 0x1c113000 0 0x1000>; interrupts = ; power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>; - clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>, - <&vdosys1 CLK_VDO1_DPINTF>, + clocks = <&vdosys1 CLK_VDO1_DPINTF>, + <&vdosys1 CLK_VDO1_DP_INTF0_MM>, <&apmixedsys CLK_APMIXED_TVDPLL2>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts index 4211a992dd9db4..7d90112a7e2746 100644 --- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts +++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts @@ -4,6 +4,7 @@ * Authors: * Fabien Parent * Bernhard Rosenkränzer + * Alexandre Mergnat */ /dts-v1/; @@ -86,6 +87,28 @@ optee_reserved: optee@43200000 { reg = <0 0x43200000 0 0x00c00000>; }; }; + + sound: sound { + compatible = "mediatek,mt8365-mt6357"; + pinctrl-names = "default", + "dmic", + "miso_off", + "miso_on", + "mosi_off", + "mosi_on"; + pinctrl-0 = <&aud_default_pins>; + pinctrl-1 = <&aud_dmic_pins>; + pinctrl-2 = <&aud_miso_off_pins>; + pinctrl-3 = <&aud_miso_on_pins>; + pinctrl-4 = <&aud_mosi_off_pins>; + pinctrl-5 = <&aud_mosi_on_pins>; + mediatek,platform = <&afe>; + }; +}; + +&afe { + mediatek,dmic-mode = <1>; + status = "okay"; }; &cpu0 { @@ -178,9 +201,72 @@ &mt6357_pmic { interrupts-extended = <&pio 145 IRQ_TYPE_LEVEL_HIGH>; interrupt-controller; #interrupt-cells = <2>; + mediatek,micbias0-microvolt = <1900000>; + mediatek,micbias1-microvolt = <1700000>; }; &pio { + aud_default_pins: audiodefault-pins { + clk-dat-pins { + pinmux = , + , + , + ; + }; + }; + + aud_dmic_pins: audiodmic-pins { + clk-dat-pins { + pinmux = , + , + ; + }; + }; + + aud_miso_off_pins: misooff-pins { + clk-dat-pins { + pinmux = , + , + , + ; + input-enable; + bias-pull-down; + drive-strength = <2>; + }; + }; + + aud_miso_on_pins: misoon-pins { + clk-dat-pins { + pinmux = , + , + , + ; + drive-strength = <6>; + }; + }; + + aud_mosi_off_pins: mosioff-pins { + clk-dat-pins { + pinmux = , + , + , + ; + input-enable; + bias-pull-down; + drive-strength = <2>; + }; + }; + + aud_mosi_on_pins: mosion-pins { + clk-dat-pins { + pinmux = , + , + , + ; + drive-strength = <6>; + }; + }; + ethernet_pins: ethernet-pins { phy_reset_pins { pinmux = ; diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi index eb449bfa88030c..9c91fe8ea0f969 100644 --- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi @@ -2,9 +2,11 @@ /* * (C) 2018 MediaTek Inc. * Copyright (C) 2022 BayLibre SAS - * Fabien Parent - * Bernhard Rosenkränzer + * Authors: Fabien Parent + * Bernhard Rosenkränzer + * Alexandre Mergnat */ + #include #include #include @@ -812,6 +814,43 @@ apu: syscon@19020000 { reg = <0 0x19020000 0 0x1000>; #clock-cells = <1>; }; + + afe: audio-controller@11220000 { + compatible = "mediatek,mt8365-afe-pcm"; + reg = <0 0x11220000 0 0x1000>; + #sound-dai-cells = <0>; + clocks = <&clk26m>, + <&topckgen CLK_TOP_AUDIO_SEL>, + <&topckgen CLK_TOP_AUD_I2S0_M>, + <&topckgen CLK_TOP_AUD_I2S1_M>, + <&topckgen CLK_TOP_AUD_I2S2_M>, + <&topckgen CLK_TOP_AUD_I2S3_M>, + <&topckgen CLK_TOP_AUD_ENGEN1_SEL>, + <&topckgen CLK_TOP_AUD_ENGEN2_SEL>, + <&topckgen CLK_TOP_AUD_1_SEL>, + <&topckgen CLK_TOP_AUD_2_SEL>, + <&topckgen CLK_TOP_APLL_I2S0_SEL>, + <&topckgen CLK_TOP_APLL_I2S1_SEL>, + <&topckgen CLK_TOP_APLL_I2S2_SEL>, + <&topckgen CLK_TOP_APLL_I2S3_SEL>; + clock-names = "top_clk26m_clk", + "top_audio_sel", + "audio_i2s0_m", + "audio_i2s1_m", + "audio_i2s2_m", + "audio_i2s3_m", + "engen1", + "engen2", + "aud1", + "aud2", + "i2s0_m_sel", + "i2s1_m_sel", + "i2s2_m_sel", + "i2s3_m_sel"; + interrupts = ; + power-domains = <&spm MT8365_POWER_DOMAIN_AUDIO>; + status = "disabled"; + }; }; timer { diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts index a06610fff8adef..1ef6262b65c9ac 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts @@ -904,8 +904,6 @@ &xhci0 { }; &xhci1 { - phys = <&u2port1 PHY_TYPE_USB2>, - <&u3port1 PHY_TYPE_USB3>; vusb33-supply = <&mt6359_vusb_ldo_reg>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts index e4b2af9489a893..e2e75b8ff91880 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-kontron-3-5-sbc-i1200.dts @@ -1111,6 +1111,7 @@ &xhci0 { /* USB2.0 M.2 Key-B */ &xhci1 { + phys = <&u2port1 PHY_TYPE_USB2>; vusb33-supply = <&mt6359_vusb_ldo_reg>; mediatek,u3p-dis-msk = <0x01>; status = "okay"; diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts index 4b5f6cf16f7076..14ec970c4e491f 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts @@ -894,10 +894,12 @@ &xhci0 { }; &xhci1 { + phys = <&u2port1 PHY_TYPE_USB2>; /* MT7921's USB Bluetooth has issues with USB2 LPM */ usb2-lpm-disable; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&vsys>; + mediatek,u3p-dis-msk = <1>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts b/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts index b89e2be6abaeb0..9482bec1aa5736 100644 --- a/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts +++ b/arch/arm64/boot/dts/nuvoton/ma35d1-iot-512m.dts @@ -14,6 +14,10 @@ / { aliases { serial0 = &uart0; + serial10 = &uart10; + serial12 = &uart12; + serial13 = &uart13; + serial14 = &uart14; }; chosen { @@ -33,10 +37,6 @@ clk_hxt: clock-hxt { }; }; -&uart0 { - status = "okay"; -}; - &clk { assigned-clocks = <&clk CAPLL>, <&clk DDRPLL>, @@ -54,3 +54,75 @@ &clk { "integer", "integer"; }; + +&pinctrl { + uart-grp { + pinctrl_uart0: uart0-pins { + nuvoton,pins = <4 14 1>, + <4 15 1>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart10: uart10-pins { + nuvoton,pins = <7 4 2>, + <7 5 2>, + <7 6 2>, + <7 7 2>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart12: uart12-pins { + nuvoton,pins = <2 13 2>, + <2 14 2>, + <2 15 2>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart13: uart13-pins { + nuvoton,pins = <7 12 3>, + <7 13 3>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart14: uart14-pins { + nuvoton,pins = <7 14 2>, + <7 15 2>; + bias-disable; + power-source = <1>; + }; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart0>; + status = "okay"; +}; + +&uart10 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart10>; + status = "okay"; +}; + +&uart12 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart12>; + status = "okay"; +}; + +&uart13 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart13>; + status = "okay"; +}; + +&uart14 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart14>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts b/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts index a1ebddecb7f816..f6f20a17e50187 100644 --- a/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts +++ b/arch/arm64/boot/dts/nuvoton/ma35d1-som-256m.dts @@ -14,6 +14,10 @@ / { aliases { serial0 = &uart0; + serial11 = &uart11; + serial12 = &uart12; + serial14 = &uart14; + serial16 = &uart16; }; chosen { @@ -33,10 +37,6 @@ clk_hxt: clock-hxt { }; }; -&uart0 { - status = "okay"; -}; - &clk { assigned-clocks = <&clk CAPLL>, <&clk DDRPLL>, @@ -54,3 +54,78 @@ &clk { "integer", "integer"; }; + +&pinctrl { + uart-grp { + pinctrl_uart0: uart0-pins { + nuvoton,pins = <4 14 1>, + <4 15 1>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart11: uart11-pins { + nuvoton,pins = <11 0 2>, + <11 1 2>, + <11 2 2>, + <11 3 2>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart12: uart12-pins { + nuvoton,pins = <8 1 2>, + <8 2 2>, + <8 3 2>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart14: uart14-pins { + nuvoton,pins = <8 5 2>, + <8 6 2>, + <8 7 2>; + bias-disable; + power-source = <1>; + }; + + pinctrl_uart16: uart16-pins { + nuvoton,pins = <10 0 2>, + <10 1 2>, + <10 2 2>, + <10 3 2>; + bias-disable; + power-source = <1>; + }; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart0>; + status = "okay"; +}; + +&uart11 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart11>; + status = "okay"; +}; + +&uart12 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart12>; + status = "okay"; +}; + +&uart14 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart14>; + status = "okay"; +}; + +&uart16 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart16>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi b/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi index 781cdae566a012..e51b98f5bdce4c 100644 --- a/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi +++ b/arch/arm64/boot/dts/nuvoton/ma35d1.dtsi @@ -83,7 +83,7 @@ soc { ranges; sys: system-management@40460000 { - compatible = "nuvoton,ma35d1-reset"; + compatible = "nuvoton,ma35d1-reset", "syscon"; reg = <0x0 0x40460000 0x0 0x200>; #reset-cells = <1>; }; @@ -95,6 +95,155 @@ clk: clock-controller@40460200 { clocks = <&clk_hxt>; }; + pinctrl: pinctrl@40040000 { + compatible = "nuvoton,ma35d1-pinctrl"; + reg = <0x0 0x40040000 0x0 0xc00>; + #address-cells = <1>; + #size-cells = <1>; + nuvoton,sys = <&sys>; + ranges = <0x0 0x0 0x40040000 0x400>; + + gpioa: gpio@0 { + reg = <0x0 0x40>; + interrupts = ; + clocks = <&clk GPA_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiob: gpio@40 { + reg = <0x40 0x40>; + interrupts = ; + clocks = <&clk GPB_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpioc: gpio@80 { + reg = <0x80 0x40>; + interrupts = ; + clocks = <&clk GPC_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiod: gpio@c0 { + reg = <0xc0 0x40>; + interrupts = ; + clocks = <&clk GPD_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpioe: gpio@100 { + reg = <0x100 0x40>; + interrupts = ; + clocks = <&clk GPE_GATE>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiof: gpio@140 { + reg = <0x140 0x40>; + interrupts = ; + clocks = <&clk GPF_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiog: gpio@180 { + reg = <0x180 0x40>; + interrupts = ; + clocks = <&clk GPG_GATE>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpioh: gpio@1c0 { + reg = <0x1c0 0x40>; + interrupts = ; + clocks = <&clk GPH_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpioi: gpio@200 { + reg = <0x200 0x40>; + interrupts = ; + clocks = <&clk GPI_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpioj: gpio@240 { + reg = <0x240 0x40>; + interrupts = ; + clocks = <&clk GPJ_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiok: gpio@280 { + reg = <0x280 0x40>; + interrupts = ; + clocks = <&clk GPK_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiol: gpio@2c0 { + reg = <0x2c0 0x40>; + interrupts = ; + clocks = <&clk GPL_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpiom: gpio@300 { + reg = <0x300 0x40>; + interrupts = ; + clocks = <&clk GPM_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpion: gpio@340 { + reg = <0x340 0x40>; + interrupts = ; + clocks = <&clk GPN_GATE>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + uart0: serial@40700000 { compatible = "nuvoton,ma35d1-uart"; reg = <0x0 0x40700000 0x0 0x100>; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi index 0ae5a44f7d0700..c00db75e391057 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi @@ -33,6 +33,51 @@ serial@70006000 { status = "okay"; }; + serial@70006300 { + /delete-property/ reg-shift; + status = "okay"; + compatible = "nvidia,tegra30-hsuart"; + reset-names = "serial"; + + bluetooth { + compatible = "brcm,bcm43540-bt"; + device-wakeup-gpios = <&gpio TEGRA_GPIO(H, 3) GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpio TEGRA_GPIO(H, 4) GPIO_ACTIVE_HIGH>; + interrupt-parent = <&gpio>; + interrupts = ; + interrupt-names = "host-wakeup"; + }; + }; + + i2c@7000c400 { + status = "okay"; + + power-sensor@40 { + compatible = "ti,ina3221"; + reg = <0x40>; + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + reg = <0x0>; + label = "VDD_IN"; + shunt-resistor-micro-ohms = <20000>; + }; + + input@1 { + reg = <0x1>; + label = "VDD_GPU"; + shunt-resistor-micro-ohms = <10000>; + }; + + input@2 { + reg = <0x2>; + label = "VDD_CPU"; + shunt-resistor-micro-ohms = <10000>; + }; + }; + }; + i2c@7000c500 { status = "okay"; @@ -295,6 +340,25 @@ pmc@7000e400 { nvidia,sys-clock-req-active-high; }; + mmc@700b0200 { + status = "okay"; + bus-width = <4>; + non-removable; + power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; + vqmmc-supply = <&vdd_1v8>; + vmmc-supply = <&vdd_3v3_sys>; + #address-cells = <1>; + #size-cells = <0>; + + wifi@1 { + compatible = "brcm,bcm4354-fmac"; + reg = <1>; + interrupt-parent = <&gpio>; + interrupts = ; + interrupt-names = "host-wake"; + }; + }; + /* eMMC */ mmc@700b0600 { status = "okay"; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index b4a1108c2dd74f..63b94a04308e86 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1319,6 +1319,56 @@ i2c@7000c400 { status = "okay"; clock-frequency = <100000>; + power-sensor@42 { + compatible = "ti,ina3221"; + reg = <0x42>; + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + reg = <0x0>; + label = "VDD_MUX"; + shunt-resistor-micro-ohms = <20000>; + }; + + input@1 { + reg = <0x1>; + label = "VDD_5V_IO_SYS"; + shunt-resistor-micro-ohms = <5000>; + }; + + input@2 { + reg = <0x2>; + label = "VDD_3V3_SYS"; + shunt-resistor-micro-ohms = <10000>; + }; + }; + + power-sensor@43 { + compatible = "ti,ina3221"; + reg = <0x43>; + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + reg = <0x0>; + label = "VDD_3V3_IO"; + shunt-resistor-micro-ohms = <10000>; + }; + + input@1 { + reg = <0x1>; + label = "VDD_1V8_IO"; + shunt-resistor-micro-ohms = <10000>; + }; + + input@2 { + reg = <0x2>; + label = "VDD_M2_IN"; + shunt-resistor-micro-ohms = <10000>; + }; + }; + exp1: gpio@74 { compatible = "ti,tca9539"; reg = <0x74>; @@ -1517,6 +1567,7 @@ mmc@700b0000 { bus-width = <4>; cd-gpios = <&gpio TEGRA_GPIO(Z, 1) GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; vqmmc-supply = <&vddio_sdmmc>; vmmc-supply = <&vdd_3v3_sd>; @@ -1603,7 +1654,7 @@ vdd_3v3_sd: regulator-vdd-3v3-sd { regulator-name = "VDD_3V3_SD"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpio = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; + gpio = <&gpio TEGRA_GPIO(Z, 3) GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_3v3_sys>; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi index cb792041fc621d..d977f4901c0947 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0000.dtsi @@ -1,146 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include "tegra234.dtsi" #include "tegra234-p3701.dtsi" / { model = "NVIDIA Jetson AGX Orin"; compatible = "nvidia,p3701-0000", "nvidia,tegra234"; - bus@0 { - i2c@3160000 { - status = "okay"; - - eeprom@50 { - compatible = "atmel,24c02"; - reg = <0x50>; - - label = "module"; - vcc-supply = <&vdd_1v8_hs>; - address-width = <8>; - pagesize = <8>; - size = <256>; - read-only; - }; - }; - - spi@3270000 { - status = "okay"; - - flash@0 { - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <102000000>; - spi-tx-bus-width = <4>; - spi-rx-bus-width = <4>; - }; - }; - - mmc@3400000 { - status = "okay"; - bus-width = <4>; - cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>; - disable-wp; - }; - - mmc@3460000 { - status = "okay"; - bus-width = <8>; - non-removable; - }; - - padctl@3520000 { - vclamp-usb-supply = <&vdd_1v8_ao>; - avdd-usb-supply = <&vdd_3v3_ao>; - - ports { - usb2-0 { - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-1 { - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-2 { - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-3 { - vbus-supply = <&vdd_5v0_sys>; - }; - }; - }; - - rtc@c2a0000 { - status = "okay"; - }; - - pmc@c360000 { - nvidia,invert-interrupt; - }; - }; - - vdd_5v0_sys: regulator-vdd-5v0-sys { - compatible = "regulator-fixed"; - regulator-name = "VIN_SYS_5V0"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - regulator-always-on; - regulator-boot-on; - }; - - vdd_1v8_ls: regulator-vdd-1v8-ls { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_LS"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_1v8_hs: regulator-vdd-1v8-hs { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_HS"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_1v8_ao: regulator-vdd-1v8-ao { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_AO"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_3v3_ao: regulator-vdd-3v3-ao { - compatible = "regulator-fixed"; - regulator-name = "VDD_3V3_AO"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - }; - - vdd_3v3_pcie: regulator-vdd-3v3-pcie { - compatible = "regulator-fixed"; - regulator-name = "VDD_3V3_PCIE"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio TEGRA234_MAIN_GPIO(H, 4) GPIO_ACTIVE_HIGH>; - regulator-boot-on; - enable-active-high; - }; - - vdd_12v_pcie: regulator-vdd-12v-pcie { - compatible = "regulator-fixed"; - regulator-name = "VDD_12V_PCIE"; - regulator-min-microvolt = <12000000>; - regulator-max-microvolt = <12000000>; - gpio = <&gpio TEGRA234_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>; - regulator-boot-on; - }; - thermal-zones { tj-thermal { polling-delay = <1000>; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi index 553fa4ba1cd48a..0809634e57320d 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi @@ -1,145 +1,29 @@ // SPDX-License-Identifier: GPL-2.0 -#include "tegra234.dtsi" #include "tegra234-p3701.dtsi" / { compatible = "nvidia,p3701-0008", "nvidia,tegra234"; - bus@0 { - i2c@3160000 { + thermal-zones { + tj-thermal { + polling-delay = <1000>; + polling-delay-passive = <1000>; status = "okay"; - eeprom@50 { - compatible = "atmel,24c02"; - reg = <0x50>; - label = "module"; - vcc-supply = <&vdd_1v8_hs>; - address-width = <8>; - pagesize = <8>; - size = <256>; - read-only; - }; - }; - - spi@3270000 { - status = "okay"; - - flash@0 { - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <102000000>; - spi-tx-bus-width = <4>; - spi-rx-bus-width = <4>; - }; - }; - - mmc@3460000 { - status = "okay"; - bus-width = <8>; - non-removable; - }; - - i2c@c240000 { - status = "okay"; - }; - - i2c@c250000 { - power-sensor@41 { - compatible = "ti,ina3221"; - reg = <0x41>; - #address-cells = <1>; - #size-cells = <0>; - - input@0 { - reg = <0x0>; - label = "CVB_ATX_12V"; - shunt-resistor-micro-ohms = <2000>; - }; - - input@1 { - reg = <0x1>; - label = "CVB_ATX_3V3"; - shunt-resistor-micro-ohms = <2000>; + trips { + tj_trip_active0: active-0 { + temperature = <85000>; + hysteresis = <4000>; + type = "active"; }; - input@2 { - reg = <0x2>; - label = "CVB_ATX_5V"; - shunt-resistor-micro-ohms = <2000>; + tj_trip_active1: active-1 { + temperature = <105000>; + hysteresis = <4000>; + type = "active"; }; }; - - power-sensor@44 { - compatible = "ti,ina219"; - reg = <0x44>; - shunt-resistor = <2000>; - }; - }; - - rtc@c2a0000 { - status = "okay"; }; - - pmc@c360000 { - nvidia,invert-interrupt; - }; - }; - - bpmp { - i2c { - status = "okay"; - - thermal-sensor@4c { - status = "okay"; - reg = <0x4c>; - vcc-supply = <&vdd_1v8_ao>; - }; - }; - - thermal { - status = "okay"; - }; - }; - - vdd_1v8_ao: regulator-vdd-1v8-ao { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_AO"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_1v8_hs: regulator-vdd-1v8-hs { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_HS"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_1v8_ls: regulator-vdd-1v8-ls { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_LS"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; - - vdd_3v3_ao: regulator-vdd-3v3-ao { - compatible = "regulator-fixed"; - regulator-name = "vdd-AO-3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - }; - - vdd_5v0_sys: regulator-vdd-5v0-sys { - compatible = "regulator-fixed"; - regulator-name = "VIN_SYS_5V0"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - regulator-always-on; - regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi index 320c8e9b06b46d..9086a0d010e5c5 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701.dtsi @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#include "tegra234.dtsi" + / { compatible = "nvidia,p3701", "nvidia,tegra234"; @@ -45,6 +47,63 @@ interrupt-controller@2a40000 { }; }; + i2c@3160000 { + status = "okay"; + + eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + + label = "module"; + vcc-supply = <&vdd_1v8_hs>; + address-width = <8>; + pagesize = <8>; + size = <256>; + read-only; + }; + }; + + spi@3270000 { + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <102000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + }; + }; + + mmc@3460000 { + status = "okay"; + bus-width = <8>; + non-removable; + }; + + padctl@3520000 { + vclamp-usb-supply = <&vdd_1v8_ao>; + avdd-usb-supply = <&vdd_3v3_ao>; + + ports { + usb2-0 { + vbus-supply = <&vdd_5v0_sys>; + }; + + usb2-1 { + vbus-supply = <&vdd_5v0_sys>; + }; + + usb2-2 { + vbus-supply = <&vdd_5v0_sys>; + }; + + usb2-3 { + vbus-supply = <&vdd_5v0_sys>; + }; + }; + }; + i2c@c240000 { status = "okay"; @@ -97,5 +156,71 @@ input@2 { }; }; }; + + rtc@c2a0000 { + status = "okay"; + }; + + pmc@c360000 { + nvidia,invert-interrupt; + }; + }; + + bpmp { + i2c { + status = "okay"; + + thermal-sensor@4c { + compatible = "ti,tmp451"; + status = "okay"; + reg = <0x4c>; + vcc-supply = <&vdd_1v8_ao>; + }; + }; + + thermal { + status = "okay"; + }; + }; + + vdd_1v8_ao: regulator-vdd-1v8-ao { + compatible = "regulator-fixed"; + regulator-name = "VDD_1V8_AO"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + vdd_1v8_hs: regulator-vdd-1v8-hs { + compatible = "regulator-fixed"; + regulator-name = "VDD_1V8_HS"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + vdd_1v8_ls: regulator-vdd-1v8-ls { + compatible = "regulator-fixed"; + regulator-name = "VDD_1V8_LS"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + vdd_3v3_ao: regulator-vdd-3v3-ao { + compatible = "regulator-fixed"; + regulator-name = "VDD_3V3_AO"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vdd_5v0_sys: regulator-vdd-5v0-sys { + compatible = "regulator-fixed"; + regulator-name = "VIN_SYS_5V0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts index 69db584253dae8..90f12277aede8b 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts @@ -3,9 +3,9 @@ #include #include +#include #include "tegra234-p3701-0000.dtsi" -#include "tegra234-p3737-0000.dtsi" / { model = "NVIDIA Jetson AGX Orin Developer Kit"; @@ -22,23 +22,97 @@ chosen { }; bus@0 { + aconnect@2900000 { + ahub@2900800 { + i2s@2901000 { + ports { + port@1 { + endpoint { + dai-format = "i2s"; + remote-endpoint = <&rt5640_ep>; + }; + }; + }; + }; + }; + }; + serial@3100000 { compatible = "nvidia,tegra194-hsuart"; reset-names = "serial"; status = "okay"; }; + i2c@3160000 { + status = "okay"; + + eeprom@56 { + compatible = "atmel,24c02"; + reg = <0x56>; + + label = "system"; + vcc-supply = <&vdd_1v8_sys>; + address-width = <8>; + pagesize = <8>; + size = <256>; + read-only; + }; + }; + serial@31d0000 { current-speed = <115200>; status = "okay"; }; + i2c@31e0000 { + status = "okay"; + + audio-codec@1c { + compatible = "realtek,rt5640"; + reg = <0x1c>; + interrupt-parent = <&gpio>; + interrupts = ; + clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>; + clock-names = "mclk"; + realtek,dmic1-data-pin = ; + realtek,dmic2-data-pin = ; + realtek,jack-detect-source = ; + sound-name-prefix = "CVB-RT"; + + port { + rt5640_ep: endpoint { + remote-endpoint = <&i2s1_dap>; + mclk-fs = <256>; + }; + }; + }; + }; + + pwm@3280000 { + status = "okay"; + }; + pwm@32a0000 { assigned-clocks = <&bpmp TEGRA234_CLK_PWM3>; assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>; status = "okay"; }; + pwm@32c0000 { + status = "okay"; + }; + + pwm@32f0000 { + status = "okay"; + }; + + mmc@3400000 { + status = "okay"; + bus-width = <4>; + cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>; + disable-wp; + }; + hda@3510000 { nvidia,model = "NVIDIA Jetson AGX Orin HDA"; status = "okay"; @@ -341,8 +415,11 @@ key-suspend { }; }; - pwm-fan { + fan: pwm-fan { + compatible = "pwm-fan"; cooling-levels = <66 215 255>; + pwms = <&pwm3 0 45334>; + #cooling-cells = <2>; }; serial { @@ -444,4 +521,31 @@ map-active-1 { }; }; }; + + vdd_1v8_sys: regulator-vdd-1v8-sys { + compatible = "regulator-fixed"; + regulator-name = "VDD_1V8_SYS"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + vdd_3v3_pcie: regulator-vdd-3v3-pcie { + compatible = "regulator-fixed"; + regulator-name = "VDD_3V3_PCIE"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio TEGRA234_MAIN_GPIO(H, 4) GPIO_ACTIVE_HIGH>; + regulator-boot-on; + enable-active-high; + }; + + vdd_12v_pcie: regulator-vdd-12v-pcie { + compatible = "regulator-fixed"; + regulator-name = "VDD_12V_PCIE"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + gpio = <&gpio TEGRA234_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>; + regulator-boot-on; + }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi deleted file mode 100644 index eb79e80a98521c..00000000000000 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000.dtsi +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include - -/ { - compatible = "nvidia,p3737-0000"; - - bus@0 { - aconnect@2900000 { - ahub@2900800 { - i2s@2901000 { - ports { - port@1 { - endpoint { - dai-format = "i2s"; - remote-endpoint = <&rt5640_ep>; - }; - }; - }; - }; - }; - }; - - i2c@3160000 { - status = "okay"; - - eeprom@56 { - compatible = "atmel,24c02"; - reg = <0x56>; - - label = "system"; - vcc-supply = <&vdd_1v8_sys>; - address-width = <8>; - pagesize = <8>; - size = <256>; - read-only; - }; - }; - - i2c@31e0000 { - status = "okay"; - - audio-codec@1c { - compatible = "realtek,rt5640"; - reg = <0x1c>; - interrupt-parent = <&gpio>; - interrupts = ; - clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>; - clock-names = "mclk"; - realtek,dmic1-data-pin = ; - realtek,dmic2-data-pin = ; - realtek,jack-detect-source = ; - sound-name-prefix = "CVB-RT"; - - port { - rt5640_ep: endpoint { - remote-endpoint = <&i2s1_dap>; - mclk-fs = <256>; - }; - }; - }; - }; - - pwm@3280000 { - status = "okay"; - }; - - pwm@32c0000 { - status = "okay"; - }; - - pwm@32f0000 { - status = "okay"; - }; - }; - - fan: pwm-fan { - compatible = "pwm-fan"; - pwms = <&pwm3 0 45334>; - #cooling-cells = <2>; - }; - - vdd_1v8_sys: regulator-vdd-1v8-sys { - compatible = "regulator-fixed"; - regulator-name = "VDD_1V8_SYS"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; -}; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts index bac611d735c589..36e88805374606 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts @@ -3,8 +3,8 @@ #include #include +#include #include "tegra234-p3701-0008.dtsi" -#include "tegra234-p3740-0002.dtsi" / { model = "NVIDIA IGX Orin Development Kit"; @@ -20,6 +20,32 @@ chosen { }; bus@0 { + aconnect@2900000 { + ahub@2900800 { + i2s@2901300 { + ports { + port@1 { + endpoint { + dai-format = "i2s"; + remote-endpoint = <&rt5640_ep>; + }; + }; + }; + }; + + i2s@2901500 { + ports { + port@1 { + endpoint { + bitclock-master; + frame-master; + }; + }; + }; + }; + }; + }; + serial@3100000 { compatible = "nvidia,tegra194-hsuart"; reset-names = "serial"; @@ -45,6 +71,40 @@ i2c@31b0000 { i2c@31c0000 { status = "okay"; + rt5640: audio-codec@1c { + compatible = "realtek,rt5640"; + reg = <0x1c>; + interrupt-parent = <&gpio>; + interrupts = ; + clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>; + clock-names = "mclk"; + + realtek,dmic1-data-pin = ; + realtek,dmic2-data-pin = ; + realtek,jack-detect-source = ; + + sound-name-prefix = "CVB-RT"; + + port { + rt5640_ep: endpoint { + remote-endpoint = <&i2s4_dap>; + mclk-fs = <256>; + }; + }; + }; + + /* carrier board ID EEPROM */ + eeprom@55 { + compatible = "atmel,24c02"; + reg = <0x55>; + + label = "system"; + vcc-supply = <&vdd_1v8_ls>; + address-width = <8>; + pagesize = <8>; + size = <256>; + read-only; + }; }; i2c@31e0000 { @@ -60,6 +120,115 @@ hda@3510000 { status = "okay"; }; + padctl@3520000 { + status = "okay"; + + pads { + usb2 { + lanes { + usb2-0 { + nvidia,function = "xusb"; + status = "okay"; + }; + + usb2-1 { + nvidia,function = "xusb"; + status = "okay"; + }; + + usb2-2 { + nvidia,function = "xusb"; + status = "okay"; + }; + + usb2-3 { + nvidia,function = "xusb"; + status = "okay"; + }; + }; + }; + + usb3 { + lanes { + usb3-0 { + nvidia,function = "xusb"; + status = "okay"; + }; + + usb3-1 { + nvidia,function = "xusb"; + status = "okay"; + }; + + usb3-2 { + nvidia,function = "xusb"; + status = "okay"; + }; + }; + }; + }; + + ports { + usb2-0 { + mode = "otg"; + usb-role-switch; + status = "okay"; + }; + + usb2-1 { + mode = "host"; + status = "okay"; + }; + + usb2-2 { + mode = "host"; + status = "okay"; + }; + + usb2-3 { + mode = "host"; + status = "okay"; + }; + + usb3-0 { + nvidia,usb2-companion = <2>; + status = "okay"; + }; + + usb3-1 { + nvidia,usb2-companion = <0>; + status = "okay"; + }; + + usb3-2 { + nvidia,usb2-companion = <1>; + status = "okay"; + }; + }; + }; + + usb@3550000 { + status = "okay"; + + phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>, + <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>; + phy-names = "usb2-0", "usb3-0"; + }; + + usb@3610000 { + status = "okay"; + + phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>, + <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>, + <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>, + <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>, + <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>, + <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>, + <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>; + phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", + "usb3-0", "usb3-1", "usb3-2"; + }; + fuse@3810000 { status = "okay"; }; @@ -70,6 +239,37 @@ i2c@c240000 { i2c@c250000 { status = "okay"; + + power-sensor@41 { + compatible = "ti,ina3221"; + reg = <0x41>; + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + reg = <0x0>; + label = "CVB_ATX_12V"; + shunt-resistor-micro-ohms = <2000>; + }; + + input@1 { + reg = <0x1>; + label = "CVB_ATX_3V3"; + shunt-resistor-micro-ohms = <2000>; + }; + + input@2 { + reg = <0x2>; + label = "CVB_ATX_5V"; + shunt-resistor-micro-ohms = <2000>; + }; + }; + + power-sensor@44 { + compatible = "ti,ina219"; + reg = <0x44>; + shunt-resistor = <2000>; + }; }; host1x@13e00000 { @@ -235,4 +435,32 @@ sound { "CVB-RT DMIC1", "CVB-RT MIC", "CVB-RT DMIC2", "CVB-RT MIC"; }; + + vdd_3v3_dp: regulator-vdd-3v3-dp { + compatible = "regulator-fixed"; + regulator-name = "VDD_3V3_DP"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vdd_3v3_sys>; + gpio = <&gpio TEGRA234_MAIN_GPIO(H, 6) 0>; + enable-active-high; + regulator-always-on; + }; + + vdd_3v3_sys: regulator-vdd-3v3-sys { + compatible = "regulator-fixed"; + regulator-name = "VDD_3V3_SYS"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + vdd_3v3_wifi: regulator-vdd-3v3-wifi { + compatible = "regulator-fixed"; + regulator-name = "VDD_3V3_WIFI"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio TEGRA234_MAIN_GPIO(G, 3) GPIO_ACTIVE_HIGH>; + regulator-boot-on; + enable-active-high; + }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi deleted file mode 100644 index 527f2f3aee3ad4..00000000000000 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi +++ /dev/null @@ -1,215 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include - -/ { - compatible = "nvidia,p3740-0002"; - - bus@0 { - aconnect@2900000 { - ahub@2900800 { - i2s@2901300 { - ports { - port@1 { - endpoint { - dai-format = "i2s"; - remote-endpoint = <&rt5640_ep>; - }; - }; - }; - }; - - i2s@2901500 { - ports { - port@1 { - endpoint { - bitclock-master; - frame-master; - }; - }; - }; - }; - }; - }; - - i2c@31c0000 { - rt5640: audio-codec@1c { - compatible = "realtek,rt5640"; - reg = <0x1c>; - interrupt-parent = <&gpio>; - interrupts = ; - clocks = <&bpmp TEGRA234_CLK_AUD_MCLK>; - clock-names = "mclk"; - - realtek,dmic1-data-pin = ; - realtek,dmic2-data-pin = ; - realtek,jack-detect-source = ; - - sound-name-prefix = "CVB-RT"; - - port { - rt5640_ep: endpoint { - remote-endpoint = <&i2s4_dap>; - mclk-fs = <256>; - }; - }; - }; - - /* carrier board ID EEPROM */ - eeprom@55 { - compatible = "atmel,24c02"; - reg = <0x55>; - - label = "system"; - vcc-supply = <&vdd_1v8_ls>; - address-width = <8>; - pagesize = <8>; - size = <256>; - read-only; - }; - }; - - padctl@3520000 { - vclamp-usb-supply = <&vdd_1v8_ao>; - avdd-usb-supply = <&vdd_3v3_ao>; - status = "okay"; - - pads { - usb2 { - lanes { - usb2-0 { - nvidia,function = "xusb"; - status = "okay"; - }; - - usb2-1 { - nvidia,function = "xusb"; - status = "okay"; - }; - - usb2-2 { - nvidia,function = "xusb"; - status = "okay"; - }; - - usb2-3 { - nvidia,function = "xusb"; - status = "okay"; - }; - }; - }; - - usb3 { - lanes { - usb3-0 { - nvidia,function = "xusb"; - status = "okay"; - }; - - usb3-1 { - nvidia,function = "xusb"; - status = "okay"; - }; - - usb3-2 { - nvidia,function = "xusb"; - status = "okay"; - }; - }; - }; - }; - - ports { - usb2-0 { - mode = "otg"; - usb-role-switch; - status = "okay"; - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-1 { - mode = "host"; - status = "okay"; - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-2 { - mode = "host"; - status = "okay"; - vbus-supply = <&vdd_5v0_sys>; - }; - - usb2-3 { - mode = "host"; - status = "okay"; - vbus-supply = <&vdd_5v0_sys>; - }; - - usb3-0 { - nvidia,usb2-companion = <2>; - status = "okay"; - }; - - usb3-1 { - nvidia,usb2-companion = <0>; - status = "okay"; - }; - - usb3-2 { - nvidia,usb2-companion = <1>; - status = "okay"; - }; - }; - }; - - usb@3550000 { - status = "okay"; - - phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>; - phy-names = "usb2-0", "usb3-0"; - }; - - usb@3610000 { - status = "okay"; - - phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>, - <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>, - <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>, - <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-1}>, - <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>; - phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", - "usb3-0", "usb3-1", "usb3-2"; - }; - }; - - vdd_3v3_dp: regulator-vdd-3v3-dp { - compatible = "regulator-fixed"; - regulator-name = "VDD_3V3_DP"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - vin-supply = <&vdd_3v3_sys>; - gpio = <&gpio TEGRA234_MAIN_GPIO(H, 6) 0>; - enable-active-high; - regulator-always-on; - }; - - vdd_3v3_sys: regulator-vdd-3v3-sys { - compatible = "regulator-fixed"; - regulator-name = "VDD_3V3_SYS"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - }; - - vdd_3v3_wifi: regulator-vdd-3v3-wifi { - compatible = "regulator-fixed"; - regulator-name = "VDD_3V3_WIFI"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio TEGRA234_MAIN_GPIO(G, 3) GPIO_ACTIVE_HIGH>; - regulator-boot-on; - enable-active-high; - }; -}; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts index 82a59e33c46c9b..5dc974bb360cd3 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767-0000.dts @@ -7,24 +7,7 @@ / { compatible = "nvidia,p3768-0000+p3767-0000", "nvidia,p3767-0000", "nvidia,tegra234"; model = "NVIDIA Jetson Orin NX Engineering Reference Developer Kit"; - aliases { - serial1 = &uarta; - serial2 = &uarte; - }; - bus@0 { - serial@3100000 { - compatible = "nvidia,tegra194-hsuart"; - reset-names = "serial"; - status = "okay"; - }; - - serial@3140000 { - compatible = "nvidia,tegra194-hsuart"; - reset-names = "serial"; - status = "okay"; - }; - hda@3510000 { nvidia,model = "NVIDIA Jetson Orin NX HDA"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi index 6d64a24fa25193..19340d13f789f0 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi @@ -9,6 +9,8 @@ / { aliases { serial0 = &tcu; + serial1 = &uarta; + serial2 = &uarte; }; chosen { @@ -16,6 +18,18 @@ chosen { }; bus@0 { + serial@3100000 { + compatible = "nvidia,tegra194-hsuart"; + reset-names = "serial"; + status = "okay"; + }; + + serial@3140000 { + compatible = "nvidia,tegra194-hsuart"; + reset-names = "serial"; + status = "okay"; + }; + i2c@3160000 { status = "okay"; @@ -172,6 +186,18 @@ pcie@14160000 { phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; }; + pcie-ep@14160000 {/* C4 - End Point */ + phys = <&p2u_hsio_4>, <&p2u_hsio_5>, <&p2u_hsio_6>, + <&p2u_hsio_7>; + phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; + reset-gpios = <&gpio + TEGRA234_MAIN_GPIO(L, 1) + GPIO_ACTIVE_LOW>; + nvidia,refclk-select-gpios = <&gpio_aon + TEGRA234_AON_GPIO(AA, 4) + GPIO_ACTIVE_HIGH>; + }; + /* C7 - M.2 Key-M */ pcie@141e0000 { status = "okay"; diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index f2e2d8d6845bf1..984c85eab41afd 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -2763,6 +2763,8 @@ uarta: serial@3100000 { interrupts = ; clocks = <&bpmp TEGRA234_CLK_UARTA>; resets = <&bpmp TEGRA234_RESET_UARTA>; + dmas = <&gpcdma 8>, <&gpcdma 8>; + dma-names = "rx", "tx"; status = "disabled"; }; @@ -4840,6 +4842,37 @@ pcie@14160000 { status = "disabled"; }; + pcie-ep@14160000 { + compatible = "nvidia,tegra234-pcie-ep"; + power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BB>; + reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ + 0x00 0x36080000 0x0 0x00040000 /* DBI space (256K) */ + 0x21 0x40000000 0x3 0x00000000>; /* Address Space (12G) */ + reg-names = "appl", "atu_dma", "dbi", "addr_space"; + num-lanes = <4>; + clocks = <&bpmp TEGRA234_CLK_PEX0_C4_CORE>; + clock-names = "core"; + resets = <&bpmp TEGRA234_RESET_PEX0_CORE_4_APB>, + <&bpmp TEGRA234_RESET_PEX0_CORE_4>; + reset-names = "apb", "core"; + + interrupts = ; /* controller interrupt */ + interrupt-names = "intr"; + nvidia,bpmp = <&bpmp 4>; + nvidia,enable-ext-refclk; + nvidia,aspm-cmrt-us = <60>; + nvidia,aspm-pwr-on-t-us = <20>; + nvidia,aspm-l0s-entrance-latency-us = <3>; + + interconnects = <&mc TEGRA234_MEMORY_CLIENT_PCIE4R &emc>, + <&mc TEGRA234_MEMORY_CLIENT_PCIE4W &emc>; + interconnect-names = "dma-mem", "write"; + iommus = <&smmu_niso0 TEGRA234_SID_PCIE4>; + dma-coherent; + status = "disabled"; + }; + pcie@14180000 { compatible = "nvidia,tegra234-pcie"; power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BA>; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 0e5c810304fbef..ae002c7cf1268a 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -48,18 +48,24 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-grandmax.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-grandprimelte.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-gt510.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-gt58.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j3ltetw.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j5.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-j5x.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-rossa.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-serranove.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-thwc-uf896.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-thwc-ufi001c.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86518.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt86528.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-wingtech-wt88047.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8916-yiming-uz801v3.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8929-wingtech-wt82918hd.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8939-huawei-kiwi.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8939-longcheer-l9100.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8939-samsung-a7.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8939-sony-xperia-kanuti-tulip.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8939-wingtech-wt82918.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8939-wingtech-wt82918hd.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8953-motorola-potter.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8953-xiaomi-daisy.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8953-xiaomi-mido.dtb @@ -69,6 +75,7 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8956-sony-xperia-loire-kugo.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8956-sony-xperia-loire-suzu.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-bullhead-rev-10.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-bullhead-rev-101.dtb +dtb-$(CONFIG_ARCH_QCOM) += msm8992-lg-h815.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-msft-lumia-octagon-talkman.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-xiaomi-libra.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8994-huawei-angler-rev-101.dtb @@ -261,7 +268,10 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk-display-card.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8650-hdk.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8650-mtp.dtb dtb-$(CONFIG_ARCH_QCOM) += sm8650-qrd.dtb +dtb-$(CONFIG_ARCH_QCOM) += x1e78100-lenovo-thinkpad-t14s.dtb dtb-$(CONFIG_ARCH_QCOM) += x1e80100-asus-vivobook-s15.dtb dtb-$(CONFIG_ARCH_QCOM) += x1e80100-crd.dtb dtb-$(CONFIG_ARCH_QCOM) += x1e80100-lenovo-yoga-slim7x.dtb +dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus13.dtb +dtb-$(CONFIG_ARCH_QCOM) += x1e80100-microsoft-romulus15.dtb dtb-$(CONFIG_ARCH_QCOM) += x1e80100-qcp.dtb diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi index 0a74ed4f72cc77..71328b22353114 100644 --- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi @@ -7,6 +7,7 @@ #include #include +#include #include / { @@ -208,6 +209,7 @@ gcc: clock-controller@1800000 { reg = <0x01800000 0x80000>; #clock-cells = <1>; #reset-cells = <1>; + #interconnect-cells = <1>; clocks = <&xo_board>, <&sleep_clk>, <0>, @@ -327,11 +329,9 @@ usb: usb@8af8800 { "dm_hs_phy_irq"; clocks = <&gcc GCC_USB0_MASTER_CLK>, - <&gcc GCC_SNOC_USB_CLK>, <&gcc GCC_USB0_SLEEP_CLK>, <&gcc GCC_USB0_MOCK_UTMI_CLK>; clock-names = "core", - "iface", "sleep", "mock_utmi"; @@ -342,6 +342,9 @@ usb: usb@8af8800 { #address-cells = <1>; #size-cells = <1>; ranges; + interconnects = <&gcc MASTER_SNOC_USB &gcc SLAVE_SNOC_USB>, + <&gcc MASTER_SNOC_USB &gcc SLAVE_SNOC_USB>; + interconnect-names = "usb-ddr", "apps-usb"; status = "disabled"; diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index e1e45da7f787ea..8edd535a188f2d 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -168,7 +168,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-ipq6018"; + compatible = "qcom,rpm-ipq6018", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; regulators { diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi index 48dfafea46a747..08a82a5cf66758 100644 --- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi @@ -181,7 +181,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-ipq9574"; + compatible = "qcom,rpm-ipq9574", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts index 3b7fdb6797a942..2cc54eaf72027a 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts @@ -125,6 +125,26 @@ led@2 { }; }; }; + + flash-led-controller@53 { + compatible = "silergy,sy7802"; + reg = <0x53>; + + enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&camera_rear_flash_default>; + pinctrl-names = "default"; + + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + function = LED_FUNCTION_FLASH; + color = ; + led-sources = <0>, <1>; + }; + }; }; &blsp_i2c3 { @@ -278,6 +298,13 @@ camera_front_flash_default: camera-front-flash-default-state { bias-disable; }; + camera_rear_flash_default: camera-rear-flash-default-state { + pins = "gpio9", "gpio16", "gpio117"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + gpio_keys_default: gpio-keys-default-state { pins = "gpio107"; function = "gpio"; diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi index 81b3e0760154f7..7a7e99b015d9bf 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-fortuna-common.dtsi @@ -262,6 +262,8 @@ touchscreen: touchscreen@20 { pinctrl-0 = <&tsp_int_default>; pinctrl-names = "default"; + + linux,keycodes = ; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts index 135df1739dbda1..5ddb69bf8e78a9 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-grandmax.dts @@ -47,12 +47,34 @@ &battery { constant-charge-voltage-max-microvolt = <4400000>; }; +&blsp_i2c5 { + status = "okay"; + + touchscreen@50 { + compatible = "imagis,ist3038"; + reg = <0x50>; + + interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>; + + touchscreen-size-x = <720>; + touchscreen-size-y = <1280>; + + vdd-supply = <®_vdd_tsp_a>; + vddio-supply = <&pm8916_l6>; + + pinctrl-0 = <&ts_int_default>; + pinctrl-names = "default"; + + linux,keycodes = ; + }; +}; + ®_motor_vdd { gpio = <&tlmm 72 GPIO_ACTIVE_HIGH>; }; ®_touch_key { - status = "disabled"; + status = "disabled"; /* Using Imagis touch key */ }; &sound { diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts index 579312ed53ce1a..3d6d9dd3b8a89a 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt58.dts @@ -75,6 +75,7 @@ touchscreen@20 { touchscreen-size-x = <768>; touchscreen-size-y = <1024>; + linux,keycodes = ; vcca-supply = <®_vdd_tsp>; vdd-supply = <&pm8916_l6>; diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi new file mode 100644 index 00000000000000..1d74cccc438a2c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3-common.dtsi @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "msm8916-samsung-j5-common.dtsi" + +/ { + reserved-memory { + /delete-node/ tz-apps@85500000; + + /* Additional memory used by Samsung firmware modifications */ + tz-apps@85800000 { + reg = <0x0 0x85800000 0x0 0x800000>; + no-map; + }; + }; + + reg_vdd_tsp_a: regulator-vdd-tsp-a { + compatible = "regulator-fixed"; + regulator-name = "vdd_tsp_a"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + + gpio = <&tlmm 16 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&tsp_ldo_en_default>; + pinctrl-names = "default"; + }; +}; + +&accelerometer { + vdd-supply = <&pm8916_l5>; + vddio-supply = <&pm8916_l5>; + + mount-matrix = "0", "-1", "0", + "1", "0", "0", + "0", "0", "-1"; +}; + +&gpio_hall_sensor { + status = "disabled"; +}; + +&i2c_muic { + /* GPIO pins vary depending on model variant */ +}; + +&i2c_sensors { + /* GPIO pins vary depending on model variant */ +}; + +&touchscreen { + vdd-supply = <®_vdd_tsp_a>; +}; + +&tlmm { + tsp_ldo_en_default: tsp-ldo-en-default-state { + pins = "gpio16"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts new file mode 100644 index 00000000000000..a26d2fd13c94d3 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j3ltetw.dts @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8916-samsung-j3-common.dtsi" + +/ { + model = "Samsung Galaxy J3 (2016) (SM-J320YZ)"; + compatible = "samsung,j3ltetw", "qcom,msm8916"; + chassis-type = "handset"; +}; + +&i2c_muic { + sda-gpios = <&tlmm 0 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + scl-gpios = <&tlmm 1 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; +}; + +&i2c_sensors { + /* I2C2 */ + sda-gpios = <&tlmm 6 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; + scl-gpios = <&tlmm 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>; +}; + +&muic_i2c_default { + pins = "gpio0", "gpio1"; +}; + +&sensors_i2c_default { + /* I2C2 */ + pins = "gpio6", "gpio7"; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts index 1981bb71f6a928..3413b0970c4abc 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts +++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-rossa.dts @@ -16,6 +16,26 @@ &battery { constant-charge-voltage-max-microvolt = <4400000>; }; +&blsp_i2c5 { + touchscreen@50 { + compatible = "imagis,ist3038"; + reg = <0x50>; + + interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>; + + touchscreen-size-x = <480>; + touchscreen-size-y = <800>; + + vdd-supply = <®_vdd_tsp_a>; + vddio-supply = <&pm8916_l6>; + + pinctrl-0 = <&tsp_int_default>; + pinctrl-names = "default"; + + linux,keycodes = ; + }; +}; + &mpss_mem { /* Firmware for rossa needs more space */ reg = <0x0 0x86800000 0x0 0x5800000>; diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts new file mode 100644 index 00000000000000..3cfa80e38a9e3e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86518.dts @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8916-wingtech-wt865x8.dtsi" + +/ { + model = "Lenovo A6000 (Wingtech WT86518)"; + compatible = "wingtech,wt86518", "qcom,msm8916"; + chassis-type = "handset"; + + speaker_amp: audio-amplifier { + compatible = "awinic,aw8738"; + + pinctrl-0 = <&spk_ext_pa_default>; + pinctrl-names = "default"; + + mode-gpios = <&tlmm 119 GPIO_ACTIVE_HIGH>; + sound-name-prefix = "Speaker Amp"; + awinic,mode = <1>; + }; +}; + +&blsp_i2c2 { + accelerometer@e { + compatible = "kionix,kxcj91008"; + reg = <0xe>; + + vdd-supply = <&pm8916_l6>; + vddio-supply = <&pm8916_l6>; + + mount-matrix = "0", "-1", "0", + "-1", "0", "0", + "0", "0", "1"; + }; +}; + +&headphones_switch { + VCC-supply = <&pm8916_l17>; +}; + +&pm8916_bms { + power-supplies = <&pm8916_charger>; +}; + +&pm8916_charger { + qcom,fast-charge-safe-current = <900000>; + qcom,fast-charge-safe-voltage = <4300000>; + + monitored-battery = <&battery>; + + status = "okay"; +}; + +&sound { + model = "wt88047"; + widgets = "Speaker", "Speaker", + "Headphone", "Headphones"; + pin-switches = "Speaker", "Headphones"; + audio-routing = "Speaker", "Speaker Amp OUT", + "Speaker Amp IN", "HPH_R", + "Headphones", "Headphones Switch OUTL", + "Headphones", "Headphones Switch OUTR", + "Headphones Switch INL", "HPH_L", + "Headphones Switch INR", "HPH_R", + "AMIC1", "MIC BIAS Internal1", + "AMIC2", "MIC BIAS Internal2"; + aux-devs = <&speaker_amp>, <&headphones_switch>; +}; + +&usb { + dr_mode = "peripheral"; + extcon = <&pm8916_charger>; +}; + +&usb_hs_phy { + extcon = <&pm8916_charger>; +}; + +&tlmm { + spk_ext_pa_default: spk-ext-pa-default-state { + pins = "gpio119"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts new file mode 100644 index 00000000000000..ec2c4dcd3eada9 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt86528.dts @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8916-wingtech-wt865x8.dtsi" + +/ { + model = "Lenovo A6010 (Wingtech WT86528)"; + compatible = "wingtech,wt86528", "qcom,msm8916"; + chassis-type = "handset"; + + /* left AW8736 */ + speaker_amp_left: audio-amplifier-left { + compatible = "awinic,aw8738"; + + pinctrl-0 = <&spk_ext_pa_left_default>; + pinctrl-names = "default"; + + mode-gpios = <&tlmm 119 GPIO_ACTIVE_HIGH>; + sound-name-prefix = "Speaker Amp L"; + awinic,mode = <3>; + }; + + /* right AW8736 */ + speaker_amp_right: audio-amplifier-right { + compatible = "awinic,aw8738"; + + pinctrl-0 = <&spk_ext_pa_right_default>; + pinctrl-names = "default"; + + mode-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>; + sound-name-prefix = "Speaker Amp R"; + awinic,mode = <3>; + }; + + gpio-leds { + compatible = "gpio-leds"; + + pinctrl-0 = <&gpio_leds_default>; + pinctrl-names = "default"; + + led-0 { + gpios = <&tlmm 16 GPIO_ACTIVE_LOW>; + label = "red"; + default-state = "off"; + retain-state-suspended; + }; + + led-1 { + gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>; + label = "green"; + default-state = "off"; + retain-state-suspended; + }; + }; + + usb_id: usb-id { + compatible = "linux,extcon-usb-gpio"; + id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb_id_default>; + pinctrl-names = "default"; + }; +}; + +&blsp_i2c2 { + magnetometer@c { + compatible = "asahi-kasei,ak09911"; + reg = <0x0c>; + + vdd-supply = <&pm8916_l17>; + vid-supply = <&pm8916_l6>; + }; + + imu@68 { + compatible = "invensense,mpu6880"; + reg = <0x68>; + + interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_RISING>; + + vdd-supply = <&pm8916_l17>; + vddio-supply = <&pm8916_l6>; + + pinctrl-0 = <&imu_default>; + pinctrl-names = "default"; + + mount-matrix = "1", "0", "0", + "0", "-1", "0", + "0", "0", "1"; + }; +}; + +&pm8916_codec { + qcom,micbias1-ext-cap; +}; + +&sound { + model = "wt86528"; + widgets = "Speaker", "Speaker", + "Headphone", "Headphones"; + pin-switches = "Speaker", "Headphones"; + audio-routing = "Speaker", "Speaker Amp L OUT", + "Speaker", "Speaker Amp R OUT", + "Speaker Amp L IN", "HPH_L", + "Speaker Amp R IN", "HPH_R", + "Headphones", "Headphones Switch OUTL", + "Headphones", "Headphones Switch OUTR", + "Headphones Switch INL", "HPH_L", + "Headphones Switch INR", "HPH_R", + "AMIC1", "MIC BIAS External1", + "AMIC2", "MIC BIAS Internal2", + "AMIC3", "MIC BIAS External1"; + aux-devs = <&speaker_amp_left>, <&speaker_amp_right>, <&headphones_switch>; +}; + +&usb { + extcon = <&usb_id>, <&usb_id>; +}; + +&usb_hs_phy { + extcon = <&usb_id>; +}; + +&tlmm { + gpio_leds_default: gpio-leds-default-state { + pins = "gpio16", "gpio17"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + imu_default: imu-default-state { + pins = "gpio115"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + spk_ext_pa_left_default: spk-ext-pa-left-default-state { + pins = "gpio119"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + spk_ext_pa_right_default: spk-ext-pa-right-default-state { + pins = "gpio121"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + usb_id_default: usb-id-default-state { + pins = "gpio110"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi new file mode 100644 index 00000000000000..1a7c347dc3f084 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt865x8.dtsi @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "msm8916-pm8916.dtsi" +#include "msm8916-modem-qdsp6.dtsi" + +#include +#include + +/ { + aliases { + mmc0 = &sdhc_1; /* eMMC */ + mmc1 = &sdhc_2; /* SD card */ + serial0 = &blsp_uart2; + }; + + chosen { + stdout-path = "serial0"; + }; + + headphones_switch: audio-switch { + compatible = "simple-audio-amplifier"; + + pinctrl-0 = <&headphones_switch_default>; + pinctrl-names = "default"; + + enable-gpios = <&tlmm 120 GPIO_ACTIVE_HIGH>; + sound-name-prefix = "Headphones Switch"; + }; + + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pm8916_pwm 0 100000>; + + brightness-levels = <0 255>; + num-interpolated-steps = <255>; + default-brightness-level = <255>; + }; + + battery: battery { + compatible = "simple-battery"; + voltage-min-design-microvolt = <3400000>; + voltage-max-design-microvolt = <4350000>; + energy-full-design-microwatt-hours = <8740000>; + charge-full-design-microamp-hours = <2300000>; + + ocv-capacity-celsius = <25>; + ocv-capacity-table-0 = <4328000 100>, <4266000 95>, <4208000 90>, + <4154000 85>, <4102000 80>, <4062000 75>, <3992000 70>, + <3960000 65>, <3914000 60>, <3870000 55>, <3840000 50>, + <3818000 45>, <3800000 40>, <3784000 35>, <3770000 30>, + <3756000 25>, <3736000 20>, <3714000 16>, <3696000 13>, + <3690000 11>, <3689000 10>, <3688000 9>, <3686000 8>, + <3682000 7>, <3670000 6>, <3639000 5>, <3592000 4>, + <3530000 3>, <3448000 2>, <3320000 1>, <3000000 0>; + }; + + gpio-keys { + compatible = "gpio-keys"; + + pinctrl-0 = <&gpio_keys_default>; + pinctrl-names = "default"; + + label = "GPIO Buttons"; + + volume-up-button { + label = "Volume Up"; + gpios = <&tlmm 107 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; +}; + +&blsp_i2c5 { + status = "okay"; + + touchscreen@38 { + compatible = "edt,edt-ft5306"; + reg = <0x38>; + + interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>; + + vcc-supply = <&pm8916_l17>; + iovcc-supply = <&pm8916_l6>; + + reset-gpios = <&tlmm 12 GPIO_ACTIVE_LOW>; + + touchscreen-size-x = <720>; + touchscreen-size-y = <1280>; + + pinctrl-0 = <&touchscreen_default>; + pinctrl-names = "default"; + }; +}; + +&blsp_uart2 { + status = "okay"; +}; + +&mpss_mem { + reg = <0x0 0x86800000 0x0 0x5500000>; +}; + +&pm8916_bms { + monitored-battery = <&battery>; + status = "okay"; +}; + +&pm8916_codec { + qcom,micbias-lvl = <2800>; + qcom,mbhc-vthreshold-low = <75 150 237 450 500>; + qcom,mbhc-vthreshold-high = <75 150 237 450 500>; + qcom,hphl-jack-type-normally-open; +}; + +&pm8916_pwm { + pinctrl-0 = <&pwm_out>; + pinctrl-names = "default"; + status = "okay"; +}; + +&pm8916_resin { + linux,code = ; + status = "okay"; +}; + +&pm8916_rpm_regulators { + pm8916_l17: l17 { + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <2850000>; + }; +}; + +&pm8916_vib { + status = "okay"; +}; + +&sdhc_1 { + status = "okay"; +}; + +&sdhc_2 { + pinctrl-0 = <&sdc2_default>; + pinctrl-1 = <&sdc2_sleep>; + pinctrl-names = "default", "sleep"; + + non-removable; + + status = "okay"; +}; + +&usb { + status = "okay"; +}; + +&venus { + status = "okay"; +}; + +&venus_mem { + status = "okay"; +}; + +&wcnss { + status = "okay"; +}; + +&wcnss_iris { + compatible = "qcom,wcn3620"; +}; + +&wcnss_mem { + status = "okay"; +}; + +&tlmm { + gpio_keys_default: gpio-keys-default-state { + pins = "gpio107"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + + headphones_switch_default: headphones-switch-default-state { + pins = "gpio120"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + touchscreen_default: touchscreen-default-state { + touchscreen-pins { + pins = "gpio13"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + + reset-pins { + pins = "gpio12"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + }; +}; + +&pm8916_mpps { + pwm_out: mpp4-state { + pins = "mpp4"; + function = "digital"; + power-source = ; + output-low; + qcom,dtest = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 7383bcc603abc2..0ee44706b70ba3 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -312,7 +312,7 @@ smd-edge { qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8916"; + compatible = "qcom,rpm-msm8916", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi new file mode 100644 index 00000000000000..c2bf25997e9bd5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8929-pm8916.dtsi @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * msm8929-pm8916.dtsi describes common properties (e.g. regulator connections) + * that apply to most devices that make use of the MSM8929 SoC and PM8916 PMIC. + * Many regulators have a fixed purpose in the original reference design and + * were rarely re-used for different purposes. Devices that deviate from the + * typical reference design should not make use of this include and instead add + * the necessary properties in the board-specific device tree. + */ + +#include "msm8929.dtsi" +#include "pm8916.dtsi" + +&mdss_dsi0 { + vdda-supply = <&pm8916_l2>; + vddio-supply = <&pm8916_l6>; +}; + +&mdss_dsi0_phy { + vddio-supply = <&pm8916_l6>; +}; + +&mdss_dsi1 { + vdda-supply = <&pm8916_l2>; + vddio-supply = <&pm8916_l6>; +}; + +&mdss_dsi1_phy { + vddio-supply = <&pm8916_l6>; +}; + +&mpss { + pll-supply = <&pm8916_l7>; +}; + +&pm8916_codec { + vdd-cdc-io-supply = <&pm8916_l5>; + vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>; + vdd-micbias-supply = <&pm8916_l13>; +}; + +&rpm_requests { + pm8916_rpm_regulators: regulators { + compatible = "qcom,rpm-pm8916-regulators"; + vdd_l1_l2_l3-supply = <&pm8916_s3>; + vdd_l4_l5_l6-supply = <&pm8916_s4>; + vdd_l7-supply = <&pm8916_s4>; + + /* pm8916_s1 is managed by rpmpd (MSM8939_VDDMDCX) */ + /* pm8916_s2 is managed by rpmpd (MSM8939_VDDCX) */ + pm8916_s3: s3 { + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1350000>; + regulator-always-on; /* Needed for L2 */ + }; + pm8916_s4: s4 { + regulator-min-microvolt = <1850000>; + regulator-max-microvolt = <2150000>; + regulator-always-on; /* Needed for L5/L7 */ + }; + + /* + * Some of the regulators are unused or managed by another + * processor (e.g. the modem). We should still define nodes for + * them to ensure the vote from the application processor can be + * dropped in case the regulators are already on during boot. + * + * The labels for these nodes are omitted on purpose because + * boards should configure a proper voltage before using them. + */ + l1 {}; + + pm8916_l2: l2 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-always-on; /* Needed for LPDDR RAM */ + }; + + /* pm8916_l3 is managed by rpmpd (MSM8939_VDDMX) */ + + l4 {}; + + pm8916_l5: l5 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; /* Needed for most digital I/O */ + }; + + pm8916_l6: l6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + pm8916_l7: l7 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; /* Needed for CPU PLL */ + }; + + pm8916_l8: l8 { + regulator-min-microvolt = <2900000>; + regulator-max-microvolt = <2900000>; + }; + + pm8916_l9: l9 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + l10 {}; + + pm8916_l11: l11 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + regulator-allow-set-load; + regulator-system-load = <200000>; + }; + + pm8916_l12: l12 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2950000>; + }; + + pm8916_l13: l13 { + regulator-min-microvolt = <3075000>; + regulator-max-microvolt = <3075000>; + }; + + l14 {}; + l15 {}; + l16 {}; + l17 {}; + l18 {}; + }; +}; + +&sdhc_1 { + vmmc-supply = <&pm8916_l8>; + vqmmc-supply = <&pm8916_l5>; +}; + +&sdhc_2 { + vmmc-supply = <&pm8916_l11>; + vqmmc-supply = <&pm8916_l12>; +}; + +&usb_hs_phy { + v1p8-supply = <&pm8916_l7>; + v3p3-supply = <&pm8916_l13>; +}; + +&wcnss { + vddpx-supply = <&pm8916_l7>; +}; + +&wcnss_iris { + vddxo-supply = <&pm8916_l7>; + vddrfa-supply = <&pm8916_s3>; + vddpa-supply = <&pm8916_l9>; + vdddig-supply = <&pm8916_l5>; +}; + diff --git a/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts b/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts new file mode 100644 index 00000000000000..8feecffb16bf70 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8929-wingtech-wt82918hd.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8929-pm8916.dtsi" +#include "msm8939-wingtech-wt82918.dtsi" + +/ { + model = "Lenovo Vibe K5 (HD) (Wingtech WT82918)"; + compatible = "wingtech,wt82918hd", "qcom,msm8929"; + chassis-type = "handset"; +}; + +&touchscreen { + touchscreen-size-x = <720>; + touchscreen-size-y = <1280>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8929.dtsi b/arch/arm64/boot/dts/qcom/msm8929.dtsi new file mode 100644 index 00000000000000..ef7bb1ced95437 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8929.dtsi @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "msm8939.dtsi" + +&opp_table { + /delete-node/ opp-550000000; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts index e3404c4455cf8d..b845da4fa23e61 100644 --- a/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts +++ b/arch/arm64/boot/dts/qcom/msm8939-longcheer-l9100.dts @@ -159,6 +159,26 @@ led@2 { }; }; }; + + flash-led-controller@53 { + compatible = "silergy,sy7802"; + reg = <0x53>; + + enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&camera_rear_flash_default>; + pinctrl-names = "default"; + + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + function = LED_FUNCTION_FLASH; + color = ; + led-sources = <0>, <1>; + }; + }; }; &blsp_i2c3 { @@ -318,6 +338,13 @@ camera_front_flash_default: camera-front-flash-default-state { bias-disable; }; + camera_rear_flash_default: camera-rear-flash-default-state { + pins = "gpio9", "gpio16", "gpio51"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + gpio_hall_sensor_default: gpio-hall-sensor-default-state { pins = "gpio20"; function = "gpio"; diff --git a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts index 91acdb16022711..ceba6e73b21126 100644 --- a/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts +++ b/arch/arm64/boot/dts/qcom/msm8939-samsung-a7.dts @@ -198,7 +198,7 @@ touchkey@20 { }; }; - pwm_vibrator: pwm-vibrator { + pwm_vibrator: pwm { compatible = "clk-pwm"; #pwm-cells = <2>; diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts new file mode 100644 index 00000000000000..aa6b699aa2a191 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8939-pm8916.dtsi" +#include "msm8939-wingtech-wt82918.dtsi" + +/ { + model = "Lenovo Vibe K5 (Wingtech WT82918)"; + compatible = "wingtech,wt82918", "qcom,msm8939"; + chassis-type = "handset"; +}; + +&touchscreen { + touchscreen-size-x = <1080>; + touchscreen-size-y = <1920>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi new file mode 100644 index 00000000000000..800e0747a2f79f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918.dtsi @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "msm8916-modem-qdsp6.dtsi" + +#include +#include +#include + +/ { + aliases { + mmc0 = &sdhc_1; /* eMMC */ + mmc1 = &sdhc_2; /* SD card */ + serial0 = &blsp_uart2; + }; + + chosen { + stdout-path = "serial0"; + }; + + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pm8916_pwm 0 100000>; + brightness-levels = <0 255>; + num-interpolated-steps = <255>; + default-brightness-level = <128>; + }; + + flash-led-controller { + compatible = "sgmicro,sgm3140"; + enable-gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>; + flash-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&camera_front_flash_default>; + pinctrl-names = "default"; + + flash_led: led { + function = LED_FUNCTION_FLASH; + color = ; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + pinctrl-0 = <&gpio_keys_default>; + pinctrl-names = "default"; + + label = "GPIO Buttons"; + + button-volume-up { + label = "Volume Up"; + gpios = <&tlmm 107 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + pinctrl-0 = <&gpio_leds_default>; + pinctrl-names = "default"; + + led-0 { + gpios = <&tlmm 69 GPIO_ACTIVE_LOW>; + function = LED_FUNCTION_CHARGING; + color = ; + default-state = "off"; + retain-state-suspended; + }; + + led-1 { + gpios = <&tlmm 36 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_STATUS; + color = ; + default-state = "off"; + retain-state-suspended; + }; + }; + + usb_id: usb-id { + compatible = "linux,extcon-usb-gpio"; + id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb_id_default>; + pinctrl-names = "default"; + }; +}; + +&blsp_i2c2 { + status = "okay"; + + accelerometer@68 { + compatible = "invensense,icm20608"; + reg = <0x68>; + + interrupts-extended = <&tlmm 115 IRQ_TYPE_EDGE_FALLING>; + + pinctrl-0 = <&accelerometer_default>; + pinctrl-names = "default"; + + vdd-supply = <&pm8916_l17>; + vddio-supply = <&pm8916_l6>; + + mount-matrix = "-1", "0", "0", + "0", "1", "0", + "0", "0", "1"; + }; +}; + +&blsp_i2c5 { + status = "okay"; + + touchscreen: touchscreen@38 { + compatible = "edt,edt-ft5306"; + reg = <0x38>; + + interrupts-extended = <&tlmm 13 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-0 = <&touchscreen_default>; + pinctrl-names = "default"; + + vcc-supply = <&pm8916_l17>; + iovcc-supply = <&pm8916_l6>; + + reset-gpios = <&tlmm 12 GPIO_ACTIVE_LOW>; + }; +}; + +&blsp_uart2 { + status = "okay"; +}; + +&mpss_mem { + reg = <0x0 0x86800000 0x0 0x5500000>; +}; + +&pm8916_pwm { + pinctrl-0 = <&pwm_out>; + pinctrl-names = "default"; + status = "okay"; +}; + +&pm8916_resin { + linux,code = ; + status = "okay"; +}; + +&pm8916_rpm_regulators { + pm8916_l17: l17 { + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <2850000>; + }; +}; + +&pm8916_vib { + status = "okay"; +}; + +&sdhc_1 { + status = "okay"; +}; + +&sdhc_2 { + pinctrl-0 = <&sdc2_default>; + pinctrl-1 = <&sdc2_sleep>; + pinctrl-names = "default", "sleep"; + non-removable; + status = "okay"; +}; + +&usb { + extcon = <&usb_id>, <&usb_id>; + status = "okay"; +}; + +&usb_hs_phy { + extcon = <&usb_id>; +}; + +&wcnss { + status = "okay"; +}; + +&wcnss_iris { + compatible = "qcom,wcn3620"; +}; + +&wcnss_mem { + status = "okay"; +}; + +&tlmm { + accelerometer_default: accelerometer-default-state { + pins = "gpio115"; + function = "gpio"; + drive-strength = <6>; + bias-pull-up; + }; + + camera_front_flash_default: camera-front-flash-default-state { + pins = "gpio31", "gpio32"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + gpio_keys_default: gpio-keys-default-state { + pins = "gpio107"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + + gpio_leds_default: gpio-leds-default-state { + pins = "gpio36", "gpio69"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + touchscreen_default: touchscreen-default-state { + reset-pins { + pins = "gpio12"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + touchscreen-pins { + pins = "gpio13"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + usb_id_default: usb-id-default-state { + pins = "gpio110"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; +}; + +&pm8916_mpps { + pwm_out: mpp4-state { + pins = "mpp4"; + function = "digital"; + power-source = ; + output-low; + qcom,dtest = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts new file mode 100644 index 00000000000000..59414db4250829 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8939-wingtech-wt82918hd.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/dts-v1/; + +#include "msm8939-pm8916.dtsi" +#include "msm8939-wingtech-wt82918.dtsi" + +/ { + model = "Lenovo Vibe K5 (HD) (Wingtech WT82918)"; + compatible = "wingtech,wt82918hdhw39", "qcom,msm8939"; + chassis-type = "handset"; +}; + +&touchscreen { + touchscreen-size-x = <720>; + touchscreen-size-y = <1280>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi index 46d9480cd46456..28634789a8a970 100644 --- a/arch/arm64/boot/dts/qcom/msm8939.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi @@ -252,7 +252,7 @@ smd-edge { qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8936"; + compatible = "qcom,rpm-msm8936", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi index a4bfb624fb8ada..d20fd3d7c46e4f 100644 --- a/arch/arm64/boot/dts/qcom/msm8953.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi @@ -199,7 +199,7 @@ smd-edge { qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8953"; + compatible = "qcom,rpm-msm8953", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi index d62dcb76fa4854..06af6e5ec578ed 100644 --- a/arch/arm64/boot/dts/qcom/msm8976.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi @@ -247,7 +247,7 @@ smd-edge { qcom,smd-edge = <15>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8976"; + compatible = "qcom,rpm-msm8976", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { @@ -663,6 +663,11 @@ tsens: thermal-sensor@4a9000 { #thermal-sensor-cells = <1>; }; + restart@4ab000 { + compatible = "qcom,pshold"; + reg = <0x004ab000 0x4>; + }; + tlmm: pinctrl@1000000 { compatible = "qcom,msm8976-pinctrl"; reg = <0x01000000 0x300000>; diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts new file mode 100644 index 00000000000000..38b305816d2f76 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8992-lg-h815.dts @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * MSM8992 LG G4 (h815) device tree. + * + * Copyright (c) 2024, Alexander Reimelt + */ + +/dts-v1/; + +#include "msm8992.dtsi" +#include "pm8994.dtsi" +#include "pmi8994.dtsi" +#include + +/* different mapping */ +/delete-node/ &cont_splash_mem; + +/* disabled downstream */ +/delete-node/ &dfps_data_mem; + +/ { + model = "LG G4 (H815)"; + compatible = "lg,h815", "qcom,msm8992"; + chassis-type = "handset"; + + qcom,msm-id = <0xfb 0x0>; + qcom,pmic-id = <0x10009 0x1000a 0x0 0x0>; + qcom,board-id = <0xb64 0x0>; + + /* psci is broken */ + /delete-node/ psci; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + spin-table@6000000 { + reg = <0x0 0x06000000 0x0 0x00001000>; + no-map; + }; + + ramoops@ff00000 { + compatible = "ramoops"; + reg = <0x0 0x0ff00000 0x0 0x00100000>; + console-size = <0x20000>; + pmsg-size = <0x20000>; + record-size = <0x10000>; + ecc-size = <0x10>; + }; + + cont_splash_mem: fb@3400000 { + reg = <0x0 0x03400000 0x0 0x00c00000>; + no-map; + }; + + crash_fb_mem: crash-fb@4000000 { + reg = <0x0 0x04000000 0x0 0x00c00000>; + no-map; + }; + }; + + gpio-hall-sensor { + compatible = "gpio-keys"; + + pinctrl-0 = <&hall_sensor_default>; + pinctrl-names = "default"; + + label = "Hall Effect Sensor"; + + event-hall-sensor { + gpios = <&tlmm 75 GPIO_ACTIVE_LOW>; + label = "hall effect sensor"; + linux,input-type = ; + linux,code = ; + linux,can-disable; + wakeup-source; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + key-vol-up { + label = "volume up"; + gpios = <&pm8994_gpios 3 GPIO_ACTIVE_LOW>; + linux,code = ; + wakeup-source; + debounce-interval = <15>; + }; + }; +}; + +&CPU0 { + enable-method = "spin-table"; +}; + +&CPU1 { + enable-method = "spin-table"; +}; + +&CPU2 { + enable-method = "spin-table"; +}; + +&CPU3 { + enable-method = "spin-table"; +}; + +&CPU4 { + enable-method = "spin-table"; +}; + +&CPU5 { + enable-method = "spin-table"; +}; + +&pm8994_resin { + linux,code = ; + status = "okay"; +}; + +&rpm_requests { + regulators-0 { + compatible = "qcom,rpm-pm8994-regulators"; + + vdd_s3-supply = <&vph_pwr>; + vdd_s4-supply = <&vph_pwr>; + vdd_s5-supply = <&vph_pwr>; + vdd_s7-supply = <&vph_pwr>; + vdd_l1-supply = <&pmi8994_s1>; + vdd_l2_26_28-supply = <&pm8994_s3>; + vdd_l3_11-supply = <&pm8994_s3>; + vdd_l4_27_31-supply = <&pm8994_s3>; + vdd_l5_7-supply = <&pm8994_s5>; + vdd_l6_12_32-supply = <&pm8994_s5>; + vdd_l8_16_30-supply = <&vph_pwr>; + vdd_l9_10_18_22-supply = <&pmi8994_bby>; + vdd_l13_19_23_24-supply = <&pmi8994_bby>; + vdd_l14_15-supply = <&pm8994_s5>; + vdd_l17_29-supply = <&pmi8994_bby>; + vdd_l20_21-supply = <&pmi8994_bby>; + vdd_l25-supply = <&pm8994_s5>; + vdd_lvs1_2-supply = <&pm8994_s4>; + + pm8994_s3: s3 { + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <1300000>; + }; + + /* sdhc1 vqmmc and bcm */ + pm8994_s4: s4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-system-load = <325000>; + regulator-allow-set-load; + }; + + pm8994_s5: s5 { + regulator-min-microvolt = <2150000>; + regulator-max-microvolt = <2150000>; + }; + + /* sdhc2 vqmmc */ + pm8994_l13: l13 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2950000>; + regulator-system-load = <22000>; + regulator-allow-set-load; + }; + + /* sdhc1 vmmc */ + pm8994_l20: l20 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + regulator-system-load = <570000>; + regulator-allow-set-load; + }; + + /* sdhc2 vmmc */ + pm8994_l21: l21 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + regulator-system-load = <800000>; + regulator-allow-set-load; + }; + }; + + regulators-1 { + compatible = "qcom,rpm-pmi8994-regulators"; + + vdd_s1-supply = <&vph_pwr>; + vdd_bst_byp-supply = <&vph_pwr>; + + pmi8994_s1: s1 { + regulator-min-microvolt = <1025000>; + regulator-max-microvolt = <1025000>; + }; + + /* S2 & S3 - VDD_GFX */ + + pmi8994_bby: boost-bypass { + regulator-min-microvolt = <3150000>; + regulator-max-microvolt = <3600000>; + }; + }; +}; + +&sdhc1 { + mmc-hs400-1_8v; + vmmc-supply = <&pm8994_l20>; + vqmmc-supply = <&pm8994_s4>; + non-removable; + status = "okay"; +}; + +&sdhc2 { + vmmc-supply = <&pm8994_l21>; + vqmmc-supply = <&pm8994_l13>; + cd-gpios = <&pm8994_gpios 8 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&tlmm { + hall_sensor_default: hall-sensor-default-state { + pins = "gpio75"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index 917fa246857d7e..fc2a7f13f690ee 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -188,7 +188,7 @@ smd-edge { qcom,remote-pid = <6>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8994"; + compatible = "qcom,rpm-msm8994", "qcom,smd-rpm"; qcom,smd-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 0fd2b1b944a5e6..e5966724f37c69 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -472,7 +472,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8996"; + compatible = "qcom,rpm-msm8996", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 7f44807b1b9745..9aa9c5cee355b2 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -352,7 +352,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-msm8998"; + compatible = "qcom,rpm-msm8998", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { @@ -1586,6 +1586,33 @@ gpucc: clock-controller@5065000 { "gpll0"; }; + lpass_q6_smmu: iommu@5100000 { + compatible = "qcom,msm8998-smmu-v2", "qcom,smmu-v2"; + reg = <0x05100000 0x40000>; + clocks = <&gcc HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>; + clock-names = "bus"; + + #global-interrupts = <0>; + #iommu-cells = <1>; + interrupts = + , + , + , + , + , + , + , + , + , + , + , + , + ; + + power-domains = <&gcc LPASS_ADSP_GDSC>; + status = "disabled"; + }; + remoteproc_slpi: remoteproc@5800000 { compatible = "qcom,msm8998-slpi-pas"; reg = <0x05800000 0x4040>; diff --git a/arch/arm64/boot/dts/qcom/pm8950.dtsi b/arch/arm64/boot/dts/qcom/pm8950.dtsi index f03095779de057..ed72c61018130d 100644 --- a/arch/arm64/boot/dts/qcom/pm8950.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8950.dtsi @@ -18,7 +18,7 @@ pmic@0 { #address-cells = <1>; #size-cells = <0>; - pon@800 { + pm8950_pon: pon@800 { compatible = "qcom,pm8916-pon"; reg = <0x0800>; mode-bootloader = <0x2>; @@ -31,6 +31,14 @@ pwrkey { bias-pull-up; linux,code = ; }; + + pm8950_resin: resin { + compatible = "qcom,pm8941-resin"; + interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>; + debounce = <15625>; + bias-pull-up; + status = "disabled"; + }; }; pm8950_temp: temp-alarm@2400 { diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi index b4822cb17a377f..4aff437263a293 100644 --- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi @@ -84,9 +84,8 @@ pmic@3 { #address-cells = <1>; #size-cells = <0>; - pmi8950_pwm: pwm@b000 { + pmi8950_pwm: pwm { compatible = "qcom,pmi8950-pwm"; - reg = <0xb000 0x100>; #pwm-cells = <2>; status = "disabled"; diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi index 36d6a1fb553ac3..9ee59e6d2cdb9d 100644 --- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi @@ -57,8 +57,11 @@ pmi8994_wled: wled@d800 { interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; interrupt-names = "ovp", "short"; + label = "backlight"; + qcom,cabc; qcom,external-pfet; + status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi index 8f3be4c75db389..79bc42ffb6a1ff 100644 --- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi +++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi @@ -215,7 +215,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-qcm2290"; + compatible = "qcom,rpm-qcm2290", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts index a0668f767e4bf9..84c45419cb8d13 100644 --- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts +++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts @@ -641,6 +641,21 @@ &remoteproc_wpss { status = "okay"; }; +&sdc2_clk { + bias-disable; + drive-strength = <16>; +}; + +&sdc2_cmd { + bias-pull-up; + drive-strength = <10>; +}; + +&sdc2_data { + bias-pull-up; + drive-strength = <10>; +}; + &sdhc_1 { non-removable; no-sd; @@ -652,9 +667,27 @@ &sdhc_1 { status = "okay"; }; +&sdhc_2 { + status = "okay"; + + pinctrl-0 = <&sdc2_clk>, <&sdc2_cmd>, <&sdc2_data>, <&sd_cd>; + pinctrl-1 = <&sdc2_clk_sleep>, <&sdc2_cmd_sleep>, <&sdc2_data_sleep>, <&sd_cd>; + + vmmc-supply = <&vreg_l9c_2p96>; + vqmmc-supply = <&vreg_l6c_2p96>; + + cd-gpios = <&tlmm 91 GPIO_ACTIVE_LOW>; +}; + &tlmm { gpio-reserved-ranges = <32 2>, /* ADSP */ <48 4>; /* NFC */ + + sd_cd: sd-cd-state { + pins = "gpio91"; + function = "gpio"; + bias-pull-up; + }; }; &uart5 { diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index c291bbed6073ea..cddc16bac0cea4 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -177,7 +177,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-qcs404"; + compatible = "qcom,rpm-qcs404", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/sa8155p.dtsi b/arch/arm64/boot/dts/qcom/sa8155p.dtsi index 9e70effc72e105..d678ed822378b5 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155p.dtsi @@ -9,6 +9,10 @@ #include "sm8150.dtsi" +&camcc { + power-domains = <&rpmhpd SA8155P_CX>; +}; + &dispcc { power-domains = <&rpmhpd SA8155P_CX>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts index 78e933c42c3144..2fd1dafe63ce7a 100644 --- a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts +++ b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts @@ -9,6 +9,7 @@ #include #include #include +#include #include "sa8540p.dtsi" #include "sa8540p-pmics.dtsi" @@ -109,6 +110,46 @@ edp3_connector_in: endpoint { }; }; + regulator-usb2-vbus { + compatible = "regulator-fixed"; + regulator-name = "USB2_VBUS"; + gpio = <&pmm8540c_gpios 9 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb2_en>; + pinctrl-names = "default"; + enable-active-high; + regulator-always-on; + }; + + regulator-usb3-vbus { + compatible = "regulator-fixed"; + regulator-name = "USB3_VBUS"; + gpio = <&pmm8540e_gpios 5 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb3_en>; + pinctrl-names = "default"; + enable-active-high; + regulator-always-on; + }; + + regulator-usb4-vbus { + compatible = "regulator-fixed"; + regulator-name = "USB4_VBUS"; + gpio = <&pmm8540g_gpios 5 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb4_en>; + pinctrl-names = "default"; + enable-active-high; + regulator-always-on; + }; + + regulator-usb5-vbus { + compatible = "regulator-fixed"; + regulator-name = "USB5_VBUS"; + gpio = <&pmm8540g_gpios 9 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb5_en>; + pinctrl-names = "default"; + enable-active-high; + regulator-always-on; + }; + reserved-memory { gpu_mem: gpu-mem@8bf00000 { reg = <0 0x8bf00000 0 0x2000>; @@ -637,6 +678,10 @@ &usb_1_qmpphy { status = "okay"; }; +&usb_2 { + status = "okay"; +}; + &usb_2_hsphy0 { vdda-pll-supply = <&vreg_l5a>; vdda18-supply = <&vreg_l7g>; @@ -697,6 +742,44 @@ max20411_en: max20411-en-state { }; }; +&pmm8540c_gpios { + usb2_en: usb2-en-state { + pins = "gpio9"; + function = "normal"; + qcom,drive-strength = ; + output-enable; + power-source = <0>; + }; +}; + +&pmm8540e_gpios { + usb3_en: usb3-en-state { + pins = "gpio5"; + function = "normal"; + qcom,drive-strength = ; + output-enable; + power-source = <0>; + }; +}; + +&pmm8540g_gpios { + usb4_en: usb4-en-state { + pins = "gpio5"; + function = "normal"; + qcom,drive-strength = ; + output-enable; + power-source = <0>; + }; + + usb5_en: usb5-en-state { + pins = "gpio9"; + function = "normal"; + qcom,drive-strength = ; + output-enable; + power-source = <0>; + }; +}; + &tlmm { pcie2a_default: pcie2a-default-state { clkreq-n-pins { diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi index 2a6170623ea95a..0c1b21def4b62c 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi @@ -702,6 +702,31 @@ &pcie1_phy { status = "okay"; }; +&remoteproc_adsp { + firmware-name = "qcom/sa8775p/adsp.mbn"; + status = "okay"; +}; + +&remoteproc_cdsp0 { + firmware-name = "qcom/sa8775p/cdsp0.mbn"; + status = "okay"; +}; + +&remoteproc_cdsp1 { + firmware-name = "qcom/sa8775p/cdsp1.mbn"; + status = "okay"; +}; + +&remoteproc_gpdsp0 { + firmware-name = "qcom/sa8775p/gpdsp0.mbn"; + status = "okay"; +}; + +&remoteproc_gpdsp1 { + firmware-name = "qcom/sa8775p/gpdsp1.mbn"; + status = "okay"; +}; + &uart10 { compatible = "qcom,geni-debug-uart"; pinctrl-0 = <&qup_uart10_default>; diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 23f1b2e5e62471..e8dbc8d820a64f 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include @@ -42,6 +44,8 @@ CPU0: cpu@0 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 0>; next-level-cache = <&L2_0>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_0: l2-cache { compatible = "cache"; cache-level = <2>; @@ -62,6 +66,8 @@ CPU1: cpu@100 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 0>; next-level-cache = <&L2_1>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_1: l2-cache { compatible = "cache"; cache-level = <2>; @@ -77,6 +83,8 @@ CPU2: cpu@200 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 0>; next-level-cache = <&L2_2>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_2: l2-cache { compatible = "cache"; cache-level = <2>; @@ -92,6 +100,8 @@ CPU3: cpu@300 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 0>; next-level-cache = <&L2_3>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_3: l2-cache { compatible = "cache"; cache-level = <2>; @@ -107,6 +117,8 @@ CPU4: cpu@10000 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 1>; next-level-cache = <&L2_4>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_4: l2-cache { compatible = "cache"; cache-level = <2>; @@ -128,6 +140,8 @@ CPU5: cpu@10100 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 1>; next-level-cache = <&L2_5>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_5: l2-cache { compatible = "cache"; cache-level = <2>; @@ -143,6 +157,8 @@ CPU6: cpu@10200 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 1>; next-level-cache = <&L2_6>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_6: l2-cache { compatible = "cache"; cache-level = <2>; @@ -158,6 +174,8 @@ CPU7: cpu@10300 { enable-method = "psci"; qcom,freq-domain = <&cpufreq_hw 1>; next-level-cache = <&L2_7>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <100>; L2_7: l2-cache { compatible = "cache"; cache-level = <2>; @@ -203,6 +221,48 @@ core3 { }; }; }; + + idle-states { + entry-method = "psci"; + + GOLD_CPU_SLEEP_0: cpu-sleep-0 { + compatible = "arm,idle-state"; + idle-state-name = "gold-power-collapse"; + arm,psci-suspend-param = <0x40000003>; + entry-latency-us = <549>; + exit-latency-us = <901>; + min-residency-us = <1774>; + local-timer-stop; + }; + + GOLD_RAIL_CPU_SLEEP_0: cpu-sleep-1 { + compatible = "arm,idle-state"; + idle-state-name = "gold-rail-power-collapse"; + arm,psci-suspend-param = <0x40000004>; + entry-latency-us = <702>; + exit-latency-us = <1061>; + min-residency-us = <4488>; + local-timer-stop; + }; + }; + + domain-idle-states { + CLUSTER_SLEEP_GOLD: cluster-sleep-0 { + compatible = "domain-idle-state"; + arm,psci-suspend-param = <0x41000044>; + entry-latency-us = <2752>; + exit-latency-us = <3048>; + min-residency-us = <6118>; + }; + + CLUSTER_SLEEP_APSS_RSC_PC: cluster-sleep-1 { + compatible = "domain-idle-state"; + arm,psci-suspend-param = <0x42000144>; + entry-latency-us = <3263>; + exit-latency-us = <6562>; + min-residency-us = <9987>; + }; + }; }; dummy-sink { @@ -332,6 +392,79 @@ pmu { psci { compatible = "arm,psci-1.0"; method = "smc"; + + CPU_PD0: power-domain-cpu0 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_0_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD1: power-domain-cpu1 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_0_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD2: power-domain-cpu2 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_0_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD3: power-domain-cpu3 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_0_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD4: power-domain-cpu4 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_1_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD5: power-domain-cpu5 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_1_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD6: power-domain-cpu6 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_1_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CPU_PD7: power-domain-cpu7 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_1_PD>; + domain-idle-states = <&GOLD_CPU_SLEEP_0>, + <&GOLD_RAIL_CPU_SLEEP_0>; + }; + + CLUSTER_0_PD: power-domain-cluster0 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_2_PD>; + domain-idle-states = <&CLUSTER_SLEEP_GOLD>; + }; + + CLUSTER_1_PD: power-domain-cluster1 { + #power-domain-cells = <0>; + power-domains = <&CLUSTER_2_PD>; + domain-idle-states = <&CLUSTER_SLEEP_GOLD>; + }; + + CLUSTER_2_PD: power-domain-cluster2 { + #power-domain-cells = <0>; + domain-idle-states = <&CLUSTER_SLEEP_APSS_RSC_PC>; + }; }; reserved-memory { @@ -564,6 +697,121 @@ cpucp_fw_mem: cpucp-fw@db200000 { }; }; + smp2p-adsp { + compatible = "qcom,smp2p"; + qcom,smem = <443>, <429>; + interrupts-extended = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_LPASS IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <2>; + + smp2p_adsp_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_adsp_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-cdsp0 { + compatible = "qcom,smp2p"; + qcom,smem = <94>, <432>; + interrupts-extended = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_CDSP IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <5>; + + smp2p_cdsp0_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_cdsp0_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-cdsp1 { + compatible = "qcom,smp2p"; + qcom,smem = <617>, <616>; + interrupts-extended = <&ipcc IPCC_CLIENT_NSP1 + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_NSP1 IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <12>; + + smp2p_cdsp1_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_cdsp1_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-gpdsp0 { + compatible = "qcom,smp2p"; + qcom,smem = <617>, <616>; + interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0 + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_GPDSP0 IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <17>; + + smp2p_gpdsp0_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_gpdsp0_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + smp2p-gpdsp1 { + compatible = "qcom,smp2p"; + qcom,smem = <617>, <616>; + interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP1 + IPCC_MPROC_SIGNAL_SMP2P + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_GPDSP1 IPCC_MPROC_SIGNAL_SMP2P>; + + qcom,local-pid = <0>; + qcom,remote-pid = <18>; + + smp2p_gpdsp1_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_gpdsp1_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + soc: soc@0 { compatible = "simple-bus"; #address-cells = <2>; @@ -2892,6 +3140,101 @@ serdes1: phy@8902000 { status = "disabled"; }; + pmu@9091000 { + compatible = "qcom,sa8775p-llcc-bwmon", "qcom,sc7280-llcc-bwmon"; + reg = <0x0 0x9091000 0x0 0x1000>; + interrupts = ; + interconnects = <&mc_virt MASTER_LLCC QCOM_ICC_TAG_ACTIVE_ONLY + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>; + + operating-points-v2 = <&llcc_bwmon_opp_table>; + + llcc_bwmon_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-0 { + opp-peak-kBps = <762000>; + }; + + opp-1 { + opp-peak-kBps = <1720000>; + }; + + opp-2 { + opp-peak-kBps = <2086000>; + }; + + opp-3 { + opp-peak-kBps = <2601000>; + }; + + opp-4 { + opp-peak-kBps = <2929000>; + }; + + opp-5 { + opp-peak-kBps = <5931000>; + }; + + opp-6 { + opp-peak-kBps = <6515000>; + }; + + opp-7 { + opp-peak-kBps = <7984000>; + }; + + opp-8 { + opp-peak-kBps = <10437000>; + }; + + opp-9 { + opp-peak-kBps = <12195000>; + }; + }; + }; + + pmu@90b5400 { + compatible = "qcom,sa8775p-cpu-bwmon", "qcom,sdm845-bwmon"; + reg = <0x0 0x90b5400 0x0 0x600>; + interrupts = ; + interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>; + + operating-points-v2 = <&cpu_bwmon_opp_table>; + + cpu_bwmon_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-0 { + opp-peak-kBps = <9155000>; + }; + + opp-1 { + opp-peak-kBps = <12298000>; + }; + + opp-2 { + opp-peak-kBps = <14236000>; + }; + + opp-3 { + opp-peak-kBps = <16265000>; + }; + }; + + }; + + pmu@90b6400 { + compatible = "qcom,sa8775p-cpu-bwmon", "qcom,sdm845-bwmon"; + reg = <0x0 0x90b6400 0x0 0x600>; + interrupts = ; + interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ACTIVE_ONLY>; + + operating-points-v2 = <&cpu_bwmon_opp_table>; + }; + llcc: system-cache-controller@9200000 { compatible = "qcom,sa8775p-llcc"; reg = <0x0 0x09200000 0x0 0x80000>, @@ -3070,6 +3413,7 @@ apps_smmu: iommu@15000000 { reg = <0x0 0x15000000 0x0 0x100000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent; interrupts = , , @@ -3208,6 +3552,7 @@ pcie_smmu: iommu@15200000 { reg = <0x0 0x15200000 0x0 0x80000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent; interrupts = , , @@ -3445,6 +3790,92 @@ cpufreq_hw: cpufreq@18591000 { #freq-domain-cells = <1>; }; + remoteproc_gpdsp0: remoteproc@20c00000 { + compatible = "qcom,sa8775p-gpdsp0-pas"; + reg = <0x0 0x20c00000 0x0 0x10000>; + + interrupts-extended = <&intc GIC_SPI 768 IRQ_TYPE_EDGE_RISING>, + <&smp2p_gpdsp0_in 0 0>, + <&smp2p_gpdsp0_in 2 0>, + <&smp2p_gpdsp0_in 1 0>, + <&smp2p_gpdsp0_in 3 0>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_CX>, + <&rpmhpd RPMHPD_MXC>; + power-domain-names = "cx", "mxc"; + + interconnects = <&gpdsp_anoc MASTER_DSP0 0 + &config_noc SLAVE_CLK_CTL 0>; + + memory-region = <&pil_gdsp0_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_gpdsp0_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP0 + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_GPDSP0 + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "gpdsp0"; + qcom,remote-pid = <17>; + }; + }; + + remoteproc_gpdsp1: remoteproc@21c00000 { + compatible = "qcom,sa8775p-gpdsp1-pas"; + reg = <0x0 0x21c00000 0x0 0x10000>; + + interrupts-extended = <&intc GIC_SPI 624 IRQ_TYPE_EDGE_RISING>, + <&smp2p_gpdsp1_in 0 0>, + <&smp2p_gpdsp1_in 2 0>, + <&smp2p_gpdsp1_in 1 0>, + <&smp2p_gpdsp1_in 3 0>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_CX>, + <&rpmhpd RPMHPD_MXC>; + power-domain-names = "cx", "mxc"; + + interconnects = <&gpdsp_anoc MASTER_DSP1 0 + &config_noc SLAVE_CLK_CTL 0>; + + memory-region = <&pil_gdsp1_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_gpdsp1_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_GPDSP1 + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_GPDSP1 + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "gpdsp1"; + qcom,remote-pid = <18>; + }; + }; + ethernet1: ethernet@23000000 { compatible = "qcom,sa8775p-ethqos"; reg = <0x0 0x23000000 0x0 0x10000>, @@ -3464,6 +3895,12 @@ ethernet1: ethernet@23000000 { "ptp_ref", "phyaux"; + interconnects = <&aggre1_noc MASTER_EMAC_1 QCOM_ICC_TAG_ALWAYS + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS + &config_noc SLAVE_EMAC1_CFG QCOM_ICC_TAG_ALWAYS>; + interconnect-names = "mac-mem", "cpu-mac"; + power-domains = <&gcc EMAC1_GDSC>; phys = <&serdes1>; @@ -3499,6 +3936,12 @@ ethernet0: ethernet@23040000 { "ptp_ref", "phyaux"; + interconnects = <&aggre1_noc MASTER_EMAC QCOM_ICC_TAG_ALWAYS + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS + &config_noc SLAVE_EMAC_CFG QCOM_ICC_TAG_ALWAYS>; + interconnect-names = "mac-mem", "cpu-mac"; + power-domains = <&gcc EMAC0_GDSC>; phys = <&serdes0>; @@ -3514,6 +3957,569 @@ ethernet0: ethernet@23040000 { status = "disabled"; }; + + remoteproc_cdsp0: remoteproc@26300000 { + compatible = "qcom,sa8775p-cdsp0-pas"; + reg = <0x0 0x26300000 0x0 0x10000>; + + interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp0_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp0_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp0_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp0_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_CX>, + <&rpmhpd RPMHPD_MXC>, + <&rpmhpd RPMHPD_NSP0>; + power-domain-names = "cx", "mxc", "nsp"; + + interconnects = <&nspa_noc MASTER_CDSP_PROC 0 + &mc_virt SLAVE_EBI1 0>; + + memory-region = <&pil_cdsp0_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_cdsp0_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_CDSP + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "cdsp"; + qcom,remote-pid = <5>; + + fastrpc { + compatible = "qcom,fastrpc"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + label = "cdsp"; + #address-cells = <1>; + #size-cells = <0>; + + compute-cb@1 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <1>; + iommus = <&apps_smmu 0x2141 0x04a0>, + <&apps_smmu 0x2161 0x04a0>, + <&apps_smmu 0x2181 0x0400>, + <&apps_smmu 0x21c1 0x04a0>, + <&apps_smmu 0x21e1 0x04a0>, + <&apps_smmu 0x2541 0x04a0>, + <&apps_smmu 0x2561 0x04a0>, + <&apps_smmu 0x2581 0x0400>, + <&apps_smmu 0x25c1 0x04a0>, + <&apps_smmu 0x25e1 0x04a0>; + dma-coherent; + }; + + compute-cb@2 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <2>; + iommus = <&apps_smmu 0x2142 0x04a0>, + <&apps_smmu 0x2162 0x04a0>, + <&apps_smmu 0x2182 0x0400>, + <&apps_smmu 0x21c2 0x04a0>, + <&apps_smmu 0x21e2 0x04a0>, + <&apps_smmu 0x2542 0x04a0>, + <&apps_smmu 0x2562 0x04a0>, + <&apps_smmu 0x2582 0x0400>, + <&apps_smmu 0x25c2 0x04a0>, + <&apps_smmu 0x25e2 0x04a0>; + dma-coherent; + }; + + compute-cb@3 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <3>; + iommus = <&apps_smmu 0x2143 0x04a0>, + <&apps_smmu 0x2163 0x04a0>, + <&apps_smmu 0x2183 0x0400>, + <&apps_smmu 0x21c3 0x04a0>, + <&apps_smmu 0x21e3 0x04a0>, + <&apps_smmu 0x2543 0x04a0>, + <&apps_smmu 0x2563 0x04a0>, + <&apps_smmu 0x2583 0x0400>, + <&apps_smmu 0x25c3 0x04a0>, + <&apps_smmu 0x25e3 0x04a0>; + dma-coherent; + }; + + compute-cb@4 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <4>; + iommus = <&apps_smmu 0x2144 0x04a0>, + <&apps_smmu 0x2164 0x04a0>, + <&apps_smmu 0x2184 0x0400>, + <&apps_smmu 0x21c4 0x04a0>, + <&apps_smmu 0x21e4 0x04a0>, + <&apps_smmu 0x2544 0x04a0>, + <&apps_smmu 0x2564 0x04a0>, + <&apps_smmu 0x2584 0x0400>, + <&apps_smmu 0x25c4 0x04a0>, + <&apps_smmu 0x25e4 0x04a0>; + dma-coherent; + }; + + compute-cb@5 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <5>; + iommus = <&apps_smmu 0x2145 0x04a0>, + <&apps_smmu 0x2165 0x04a0>, + <&apps_smmu 0x2185 0x0400>, + <&apps_smmu 0x21c5 0x04a0>, + <&apps_smmu 0x21e5 0x04a0>, + <&apps_smmu 0x2545 0x04a0>, + <&apps_smmu 0x2565 0x04a0>, + <&apps_smmu 0x2585 0x0400>, + <&apps_smmu 0x25c5 0x04a0>, + <&apps_smmu 0x25e5 0x04a0>; + dma-coherent; + }; + + compute-cb@6 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <6>; + iommus = <&apps_smmu 0x2146 0x04a0>, + <&apps_smmu 0x2166 0x04a0>, + <&apps_smmu 0x2186 0x0400>, + <&apps_smmu 0x21c6 0x04a0>, + <&apps_smmu 0x21e6 0x04a0>, + <&apps_smmu 0x2546 0x04a0>, + <&apps_smmu 0x2566 0x04a0>, + <&apps_smmu 0x2586 0x0400>, + <&apps_smmu 0x25c6 0x04a0>, + <&apps_smmu 0x25e6 0x04a0>; + dma-coherent; + }; + + compute-cb@7 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <7>; + iommus = <&apps_smmu 0x2147 0x04a0>, + <&apps_smmu 0x2167 0x04a0>, + <&apps_smmu 0x2187 0x0400>, + <&apps_smmu 0x21c7 0x04a0>, + <&apps_smmu 0x21e7 0x04a0>, + <&apps_smmu 0x2547 0x04a0>, + <&apps_smmu 0x2567 0x04a0>, + <&apps_smmu 0x2587 0x0400>, + <&apps_smmu 0x25c7 0x04a0>, + <&apps_smmu 0x25e7 0x04a0>; + dma-coherent; + }; + + compute-cb@8 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <8>; + iommus = <&apps_smmu 0x2148 0x04a0>, + <&apps_smmu 0x2168 0x04a0>, + <&apps_smmu 0x2188 0x0400>, + <&apps_smmu 0x21c8 0x04a0>, + <&apps_smmu 0x21e8 0x04a0>, + <&apps_smmu 0x2548 0x04a0>, + <&apps_smmu 0x2568 0x04a0>, + <&apps_smmu 0x2588 0x0400>, + <&apps_smmu 0x25c8 0x04a0>, + <&apps_smmu 0x25e8 0x04a0>; + dma-coherent; + }; + + compute-cb@9 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <9>; + iommus = <&apps_smmu 0x2149 0x04a0>, + <&apps_smmu 0x2169 0x04a0>, + <&apps_smmu 0x2189 0x0400>, + <&apps_smmu 0x21c9 0x04a0>, + <&apps_smmu 0x21e9 0x04a0>, + <&apps_smmu 0x2549 0x04a0>, + <&apps_smmu 0x2569 0x04a0>, + <&apps_smmu 0x2589 0x0400>, + <&apps_smmu 0x25c9 0x04a0>, + <&apps_smmu 0x25e9 0x04a0>; + dma-coherent; + }; + + compute-cb@10 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <10>; + iommus = <&apps_smmu 0x214a 0x04a0>, + <&apps_smmu 0x216a 0x04a0>, + <&apps_smmu 0x218a 0x0400>, + <&apps_smmu 0x21ca 0x04a0>, + <&apps_smmu 0x21ea 0x04a0>, + <&apps_smmu 0x254a 0x04a0>, + <&apps_smmu 0x256a 0x04a0>, + <&apps_smmu 0x258a 0x0400>, + <&apps_smmu 0x25ca 0x04a0>, + <&apps_smmu 0x25ea 0x04a0>; + dma-coherent; + }; + + compute-cb@11 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <11>; + iommus = <&apps_smmu 0x214b 0x04a0>, + <&apps_smmu 0x216b 0x04a0>, + <&apps_smmu 0x218b 0x0400>, + <&apps_smmu 0x21cb 0x04a0>, + <&apps_smmu 0x21eb 0x04a0>, + <&apps_smmu 0x254b 0x04a0>, + <&apps_smmu 0x256b 0x04a0>, + <&apps_smmu 0x258b 0x0400>, + <&apps_smmu 0x25cb 0x04a0>, + <&apps_smmu 0x25eb 0x04a0>; + dma-coherent; + }; + }; + }; + }; + + remoteproc_cdsp1: remoteproc@2a300000 { + compatible = "qcom,sa8775p-cdsp1-pas"; + reg = <0x0 0x2A300000 0x0 0x10000>; + + interrupts-extended = <&intc GIC_SPI 798 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp1_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp1_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp1_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_cdsp1_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_CX>, + <&rpmhpd RPMHPD_MXC>, + <&rpmhpd RPMHPD_NSP1>; + power-domain-names = "cx", "mxc", "nsp"; + + interconnects = <&nspb_noc MASTER_CDSP_PROC_B 0 + &mc_virt SLAVE_EBI1 0>; + + memory-region = <&pil_cdsp1_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_cdsp1_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_NSP1 + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_NSP1 + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "cdsp"; + qcom,remote-pid = <12>; + + fastrpc { + compatible = "qcom,fastrpc"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + label = "cdsp1"; + #address-cells = <1>; + #size-cells = <0>; + + compute-cb@1 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <1>; + iommus = <&apps_smmu 0x2941 0x04a0>, + <&apps_smmu 0x2961 0x04a0>, + <&apps_smmu 0x2981 0x0400>, + <&apps_smmu 0x29c1 0x04a0>, + <&apps_smmu 0x29e1 0x04a0>, + <&apps_smmu 0x2d41 0x04a0>, + <&apps_smmu 0x2d61 0x04a0>, + <&apps_smmu 0x2d81 0x0400>, + <&apps_smmu 0x2dc1 0x04a0>, + <&apps_smmu 0x2de1 0x04a0>; + dma-coherent; + }; + + compute-cb@2 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <2>; + iommus = <&apps_smmu 0x2942 0x04a0>, + <&apps_smmu 0x2962 0x04a0>, + <&apps_smmu 0x2982 0x0400>, + <&apps_smmu 0x29c2 0x04a0>, + <&apps_smmu 0x29e2 0x04a0>, + <&apps_smmu 0x2d42 0x04a0>, + <&apps_smmu 0x2d62 0x04a0>, + <&apps_smmu 0x2d82 0x0400>, + <&apps_smmu 0x2dc2 0x04a0>, + <&apps_smmu 0x2de2 0x04a0>; + dma-coherent; + }; + + compute-cb@3 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <3>; + iommus = <&apps_smmu 0x2943 0x04a0>, + <&apps_smmu 0x2963 0x04a0>, + <&apps_smmu 0x2983 0x0400>, + <&apps_smmu 0x29c3 0x04a0>, + <&apps_smmu 0x29e3 0x04a0>, + <&apps_smmu 0x2d43 0x04a0>, + <&apps_smmu 0x2d63 0x04a0>, + <&apps_smmu 0x2d83 0x0400>, + <&apps_smmu 0x2dc3 0x04a0>, + <&apps_smmu 0x2de3 0x04a0>; + dma-coherent; + }; + + compute-cb@4 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <4>; + iommus = <&apps_smmu 0x2944 0x04a0>, + <&apps_smmu 0x2964 0x04a0>, + <&apps_smmu 0x2984 0x0400>, + <&apps_smmu 0x29c4 0x04a0>, + <&apps_smmu 0x29e4 0x04a0>, + <&apps_smmu 0x2d44 0x04a0>, + <&apps_smmu 0x2d64 0x04a0>, + <&apps_smmu 0x2d84 0x0400>, + <&apps_smmu 0x2dc4 0x04a0>, + <&apps_smmu 0x2de4 0x04a0>; + dma-coherent; + }; + + compute-cb@5 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <5>; + iommus = <&apps_smmu 0x2945 0x04a0>, + <&apps_smmu 0x2965 0x04a0>, + <&apps_smmu 0x2985 0x0400>, + <&apps_smmu 0x29c5 0x04a0>, + <&apps_smmu 0x29e5 0x04a0>, + <&apps_smmu 0x2d45 0x04a0>, + <&apps_smmu 0x2d65 0x04a0>, + <&apps_smmu 0x2d85 0x0400>, + <&apps_smmu 0x2dc5 0x04a0>, + <&apps_smmu 0x2de5 0x04a0>; + dma-coherent; + }; + + compute-cb@6 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <6>; + iommus = <&apps_smmu 0x2946 0x04a0>, + <&apps_smmu 0x2966 0x04a0>, + <&apps_smmu 0x2986 0x0400>, + <&apps_smmu 0x29c6 0x04a0>, + <&apps_smmu 0x29e6 0x04a0>, + <&apps_smmu 0x2d46 0x04a0>, + <&apps_smmu 0x2d66 0x04a0>, + <&apps_smmu 0x2d86 0x0400>, + <&apps_smmu 0x2dc6 0x04a0>, + <&apps_smmu 0x2de6 0x04a0>; + dma-coherent; + }; + + compute-cb@7 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <7>; + iommus = <&apps_smmu 0x2947 0x04a0>, + <&apps_smmu 0x2967 0x04a0>, + <&apps_smmu 0x2987 0x0400>, + <&apps_smmu 0x29c7 0x04a0>, + <&apps_smmu 0x29e7 0x04a0>, + <&apps_smmu 0x2d47 0x04a0>, + <&apps_smmu 0x2d67 0x04a0>, + <&apps_smmu 0x2d87 0x0400>, + <&apps_smmu 0x2dc7 0x04a0>, + <&apps_smmu 0x2de7 0x04a0>; + dma-coherent; + }; + + compute-cb@8 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <8>; + iommus = <&apps_smmu 0x2948 0x04a0>, + <&apps_smmu 0x2968 0x04a0>, + <&apps_smmu 0x2988 0x0400>, + <&apps_smmu 0x29c8 0x04a0>, + <&apps_smmu 0x29e8 0x04a0>, + <&apps_smmu 0x2d48 0x04a0>, + <&apps_smmu 0x2d68 0x04a0>, + <&apps_smmu 0x2d88 0x0400>, + <&apps_smmu 0x2dc8 0x04a0>, + <&apps_smmu 0x2de8 0x04a0>; + dma-coherent; + }; + + compute-cb@9 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <9>; + iommus = <&apps_smmu 0x2949 0x04a0>, + <&apps_smmu 0x2969 0x04a0>, + <&apps_smmu 0x2989 0x0400>, + <&apps_smmu 0x29c9 0x04a0>, + <&apps_smmu 0x29e9 0x04a0>, + <&apps_smmu 0x2d49 0x04a0>, + <&apps_smmu 0x2d69 0x04a0>, + <&apps_smmu 0x2d89 0x0400>, + <&apps_smmu 0x2dc9 0x04a0>, + <&apps_smmu 0x2de9 0x04a0>; + dma-coherent; + }; + + compute-cb@10 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <10>; + iommus = <&apps_smmu 0x294a 0x04a0>, + <&apps_smmu 0x296a 0x04a0>, + <&apps_smmu 0x298a 0x0400>, + <&apps_smmu 0x29ca 0x04a0>, + <&apps_smmu 0x29ea 0x04a0>, + <&apps_smmu 0x2d4a 0x04a0>, + <&apps_smmu 0x2d6a 0x04a0>, + <&apps_smmu 0x2d8a 0x0400>, + <&apps_smmu 0x2dca 0x04a0>, + <&apps_smmu 0x2dea 0x04a0>; + dma-coherent; + }; + + compute-cb@11 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <11>; + iommus = <&apps_smmu 0x294b 0x04a0>, + <&apps_smmu 0x296b 0x04a0>, + <&apps_smmu 0x298b 0x0400>, + <&apps_smmu 0x29cb 0x04a0>, + <&apps_smmu 0x29eb 0x04a0>, + <&apps_smmu 0x2d4b 0x04a0>, + <&apps_smmu 0x2d6b 0x04a0>, + <&apps_smmu 0x2d8b 0x0400>, + <&apps_smmu 0x2dcb 0x04a0>, + <&apps_smmu 0x2deb 0x04a0>; + dma-coherent; + }; + + compute-cb@12 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <12>; + iommus = <&apps_smmu 0x294c 0x04a0>, + <&apps_smmu 0x296c 0x04a0>, + <&apps_smmu 0x298c 0x0400>, + <&apps_smmu 0x29cc 0x04a0>, + <&apps_smmu 0x29ec 0x04a0>, + <&apps_smmu 0x2d4c 0x04a0>, + <&apps_smmu 0x2d6c 0x04a0>, + <&apps_smmu 0x2d8c 0x0400>, + <&apps_smmu 0x2dcc 0x04a0>, + <&apps_smmu 0x2dec 0x04a0>; + dma-coherent; + }; + + compute-cb@13 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <13>; + iommus = <&apps_smmu 0x294d 0x04a0>, + <&apps_smmu 0x296d 0x04a0>, + <&apps_smmu 0x298d 0x0400>, + <&apps_smmu 0x29Cd 0x04a0>, + <&apps_smmu 0x29ed 0x04a0>, + <&apps_smmu 0x2d4d 0x04a0>, + <&apps_smmu 0x2d6d 0x04a0>, + <&apps_smmu 0x2d8d 0x0400>, + <&apps_smmu 0x2dcd 0x04a0>, + <&apps_smmu 0x2ded 0x04a0>; + dma-coherent; + }; + }; + }; + }; + + remoteproc_adsp: remoteproc@30000000 { + compatible = "qcom,sa8775p-adsp-pas"; + reg = <0x0 0x30000000 0x0 0x100>; + + interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", "handover", + "stop-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_LCX>, + <&rpmhpd RPMHPD_LMX>; + power-domain-names = "lcx", "lmx"; + + interconnects = <&lpass_ag_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>; + + memory-region = <&pil_adsp_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_adsp_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + remoteproc_adsp_glink: glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_GLINK_QMP + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_LPASS + IPCC_MPROC_SIGNAL_GLINK_QMP>; + + label = "lpass"; + qcom,remote-pid = <2>; + + fastrpc { + compatible = "qcom,fastrpc"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + label = "adsp"; + memory-region = <&adsp_rpc_remote_heap_mem>; + qcom,vmids = ; + #address-cells = <1>; + #size-cells = <0>; + + compute-cb@3 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <3>; + iommus = <&apps_smmu 0x3003 0x0>; + dma-coherent; + }; + + compute-cb@4 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <4>; + iommus = <&apps_smmu 0x3004 0x0>; + dma-coherent; + }; + + compute-cb@5 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <5>; + iommus = <&apps_smmu 0x3005 0x0>; + qcom,nsessions = <5>; + dma-coherent; + }; + }; + }; + }; }; thermal-zones { diff --git a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts index 5b226577f9d8af..62de4774c556d3 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts +++ b/arch/arm64/boot/dts/qcom/sc8180x-lenovo-flex-5g.dts @@ -484,6 +484,10 @@ &pcie3_phy { status = "okay"; }; +&pmc8180_pwrkey { + status = "okay"; +}; + &pmc8180c_lpg { status = "okay"; }; @@ -557,6 +561,40 @@ &ufs_mem_phy { status = "okay"; }; +&usb_mp { + status = "okay"; +}; + +&usb_mp_hsphy0 { + vdda-pll-supply = <&vreg_l5e_0p88>; + vdda18-supply = <&vreg_l12a_1p8>; + vdda33-supply = <&vreg_l16e_3p0>; + + status = "okay"; +}; + +&usb_mp_hsphy1 { + vdda-pll-supply = <&vreg_l5e_0p88>; + vdda18-supply = <&vreg_l12a_1p8>; + vdda33-supply = <&vreg_l16e_3p0>; + + status = "okay"; +}; + +&usb_mp_qmpphy0 { + vdda-phy-supply = <&vreg_l3c_1p2>; + vdda-pll-supply = <&vreg_l5e_0p88>; + + status = "okay"; +}; + +&usb_mp_qmpphy1 { + vdda-phy-supply = <&vreg_l3c_1p2>; + vdda-pll-supply = <&vreg_l5e_0p88>; + + status = "okay"; +}; + &usb_prim_hsphy { vdda-pll-supply = <&vreg_l5e_0p88>; vdda18-supply = <&vreg_l12a_1p8>; diff --git a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi index 1c6f12fafe1d40..451c9b984f1fd9 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8180x-pmics.dtsi @@ -75,7 +75,7 @@ pmc8180_0: pmic@0 { pon: pon@800 { compatible = "qcom,pm8916-pon"; reg = <0x0800>; - pwrkey { + pmc8180_pwrkey: pwrkey { compatible = "qcom,pm8941-pwrkey"; interrupts = <0x0 0x8 0x0 IRQ_TYPE_EDGE_BOTH>; debounce = <15625>; @@ -139,11 +139,11 @@ rtc@6000 { interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>; }; - pmc8180_gpios: gpio@c000 { + pmc8180_1_gpios: gpio@c000 { compatible = "qcom,pmc8180-gpio", "qcom,spmi-gpio"; reg = <0xc000>; gpio-controller; - gpio-ranges = <&pmc8180_gpios 0 0 10>; + gpio-ranges = <&pmc8180_1_gpios 0 0 10>; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; @@ -198,11 +198,21 @@ pmic@6 { #size-cells = <0>; }; - pmic@8 { + pmc8180_2: pmic@8 { compatible = "qcom,pm8150", "qcom,spmi-pmic"; reg = <0x8 SPMI_USID>; #address-cells = <1>; #size-cells = <0>; + + pmc8180_2_gpios: gpio@c000 { + compatible = "qcom,pmc8180-gpio", "qcom,spmi-gpio"; + reg = <0xc000>; + gpio-controller; + gpio-ranges = <&pmc8180_2_gpios 0 0 10>; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; }; pmic@a { diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts index 65d923497a057d..79b4d293ea1e70 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts +++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts @@ -223,6 +223,32 @@ vreg_s4a_1p8: pm8150-s4 { vin-supply = <&vph_pwr>; }; + vreg_usb2_host_en: regulator-usb2-host-en { + compatible = "regulator-fixed"; + regulator-name = "usb2_host_en"; + + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + + gpio = <&pmc8180_1_gpios 9 GPIO_ACTIVE_HIGH>; + enable-active-high; + + regulator-always-on; + }; + + vreg_usb3_host_en: regulator-usb3-host-en { + compatible = "regulator-fixed"; + regulator-name = "usb3_host_en"; + + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + + gpio = <&pmc8180_2_gpios 9 GPIO_ACTIVE_HIGH>; + enable-active-high; + + regulator-always-on; + }; + usbprim-sbu-mux { compatible = "pericom,pi3usb102", "gpio-sbu-mux"; @@ -552,6 +578,10 @@ &pcie1_phy { status = "okay"; }; +&pmc8180_pwrkey { + status = "okay"; +}; + &pmc8180c_lpg { status = "okay"; }; @@ -623,6 +653,40 @@ &ufs_mem_phy { status = "okay"; }; +&usb_mp { + status = "okay"; +}; + +&usb_mp_hsphy0 { + vdda-pll-supply = <&vreg_l5e_0p88>; + vdda18-supply = <&vreg_l12a_1p8>; + vdda33-supply = <&vreg_l16e_3p0>; + + status = "okay"; +}; + +&usb_mp_hsphy1 { + vdda-pll-supply = <&vreg_l5e_0p88>; + vdda18-supply = <&vreg_l12a_1p8>; + vdda33-supply = <&vreg_l16e_3p0>; + + status = "okay"; +}; + +&usb_mp_qmpphy0 { + vdda-phy-supply = <&vreg_l3c_1p2>; + vdda-pll-supply = <&vreg_l5e_0p88>; + + status = "okay"; +}; + +&usb_mp_qmpphy1 { + vdda-phy-supply = <&vreg_l3c_1p2>; + vdda-pll-supply = <&vreg_l5e_0p88>; + + status = "okay"; +}; + &usb_prim_hsphy { vdda-pll-supply = <&vreg_l5e_0p88>; vdda18-supply = <&vreg_l12a_1p8>; diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi index 6e707d993aeb36..0e9429684dd97b 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi @@ -2507,6 +2507,34 @@ usb_sec_hsphy: phy@88e3000 { status = "disabled"; }; + usb_mp_hsphy0: phy@88e4000 { + compatible = "qcom,sc8180x-usb-hs-phy", + "qcom,usb-snps-hs-7nm-phy"; + reg = <0 0x088e4000 0 0x400>; + #phy-cells = <0>; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_MP0_BCR>; + + status = "disabled"; + }; + + usb_mp_hsphy1: phy@88e5000 { + compatible = "qcom,sc8180x-usb-hs-phy", + "qcom,usb-snps-hs-7nm-phy"; + reg = <0 0x088e5000 0 0x400>; + #phy-cells = <0>; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_MP1_BCR>; + + status = "disabled"; + }; + usb_prim_qmpphy: phy@88e8000 { compatible = "qcom,sc8180x-qmp-usb3-dp-phy"; reg = <0 0x088e8000 0 0x3000>; @@ -2555,6 +2583,60 @@ port@2 { }; }; + usb_mp_qmpphy0: phy@88eb000 { + compatible = "qcom,sc8180x-qmp-usb3-uni-phy"; + reg = <0 0x088eb000 0 0x1000>; + + clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>, + <&gcc GCC_USB3_PRIM_CLKREF_CLK>, + <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>; + clock-names = "aux", + "ref", + "com_aux", + "pipe"; + + resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>, + <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>; + reset-names = "phy", "phy_phy"; + + power-domains = <&gcc USB30_MP_GDSC>; + + #clock-cells = <0>; + clock-output-names = "usb2_phy0_pipe_clk"; + + #phy-cells = <0>; + + status = "disabled"; + }; + + usb_mp_qmpphy1: phy@88ec000 { + compatible = "qcom,sc8180x-qmp-usb3-uni-phy"; + reg = <0 0x088ec000 0 0x1000>; + + clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>, + <&gcc GCC_USB3_PRIM_CLKREF_CLK>, + <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>; + clock-names = "aux", + "ref", + "com_aux", + "pipe"; + + resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>, + <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>; + reset-names = "phy", "phy_phy"; + + power-domains = <&gcc USB30_MP_GDSC>; + + #clock-cells = <0>; + clock-output-names = "usb2_phy1_pipe_clk"; + + #phy-cells = <0>; + + status = "disabled"; + }; + usb_sec_qmpphy: phy@88ee000 { compatible = "qcom,sc8180x-qmp-usb3-dp-phy"; reg = <0 0x088ed000 0 0x3000>; @@ -2622,17 +2704,89 @@ gem_noc: interconnect@9680000 { qcom,bcm-voters = <&apps_bcm_voter>; }; + usb_mp: usb@a4f8800 { + compatible = "qcom,sc8180x-dwc3-mp", "qcom,dwc3"; + reg = <0 0x0a4f8800 0 0x400>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + dma-ranges; + + clocks = <&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>, + <&gcc GCC_USB30_MP_MASTER_CLK>, + <&gcc GCC_AGGRE_USB3_MP_AXI_CLK>, + <&gcc GCC_USB30_MP_SLEEP_CLK>, + <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>, + <&gcc GCC_USB3_SEC_CLKREF_CLK>; + clock-names = "cfg_noc", + "core", + "iface", + "sleep", + "mock_utmi", + "xo"; + + interconnects = <&aggre1_noc MASTER_USB3_2 0 &mc_virt SLAVE_EBI_CH0 0>, + <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_USB3_2 0>; + interconnect-names = "usb-ddr", "apps-usb"; + + assigned-clocks = <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_MP_MASTER_CLK>; + assigned-clock-rates = <19200000>, <200000000>; + + interrupts-extended = <&intc GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 655 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 59 IRQ_TYPE_EDGE_BOTH>, + <&pdc 46 IRQ_TYPE_EDGE_BOTH>, + <&pdc 71 IRQ_TYPE_EDGE_BOTH>, + <&pdc 68 IRQ_TYPE_EDGE_BOTH>, + <&pdc 7 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 30 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "pwr_event_1", "pwr_event_2", + "hs_phy_1", "hs_phy_2", + "dp_hs_phy_1", "dm_hs_phy_1", + "dp_hs_phy_2", "dm_hs_phy_2", + "ss_phy_1", "ss_phy_2"; + + power-domains = <&gcc USB30_MP_GDSC>; + + resets = <&gcc GCC_USB30_MP_BCR>; + + status = "disabled"; + + usb_mp_dwc3: usb@a400000 { + compatible = "snps,dwc3"; + reg = <0 0x0a400000 0 0xcd00>; + interrupts = ; + iommus = <&apps_smmu 0x60 0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + phys = <&usb_mp_hsphy0>, + <&usb_mp_qmpphy0>, + <&usb_mp_hsphy1>, + <&usb_mp_qmpphy1>; + phy-names = "usb2-0", + "usb3-0", + "usb2-1", + "usb3-1"; + dr_mode = "host"; + }; + }; + usb_prim: usb@a6f8800 { compatible = "qcom,sc8180x-dwc3", "qcom,dwc3"; reg = <0 0x0a6f8800 0 0x400>; - interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, - <&pdc 6 IRQ_TYPE_LEVEL_HIGH>, + interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 9 IRQ_TYPE_EDGE_BOTH>, <&pdc 8 IRQ_TYPE_EDGE_BOTH>, - <&pdc 9 IRQ_TYPE_EDGE_BOTH>; - interrupt-names = "hs_phy_irq", - "ss_phy_irq", + <&pdc 6 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "pwr_event", + "hs_phy_irq", + "dp_hs_phy_irq", "dm_hs_phy_irq", - "dp_hs_phy_irq"; + "ss_phy_irq"; clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, <&gcc GCC_USB30_PRIM_MASTER_CLK>, @@ -2714,12 +2868,17 @@ usb_sec: usb@a8f8800 { "xo"; resets = <&gcc GCC_USB30_SEC_BCR>; power-domains = <&gcc USB30_SEC_GDSC>; - interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, - <&pdc 40 IRQ_TYPE_LEVEL_HIGH>, + + interrupts-extended = <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 11 IRQ_TYPE_EDGE_BOTH>, <&pdc 10 IRQ_TYPE_EDGE_BOTH>, - <&pdc 11 IRQ_TYPE_EDGE_BOTH>; - interrupt-names = "hs_phy_irq", "ss_phy_irq", - "dm_hs_phy_irq", "dp_hs_phy_irq"; + <&pdc 40 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "pwr_event", + "hs_phy_irq", + "dp_hs_phy_irq", + "dm_hs_phy_irq", + "ss_phy_irq"; assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>, <&gcc GCC_USB30_SEC_MASTER_CLK>; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts index b98b2f7752b5b2..6020582b0a59d7 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts @@ -848,15 +848,15 @@ perst-n-pins { pins = "gpio143"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { - pins = "gpio145"; - function = "gpio"; - drive-strength = <2>; - bias-pull-up; - }; + pins = "gpio145"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; }; pcie3a_default: pcie3a-default-state { @@ -871,7 +871,7 @@ perst-n-pins { pins = "gpio151"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { @@ -894,7 +894,7 @@ perst-n-pins { pins = "gpio141"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts index b27143f81867ab..6a28cab971891d 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts @@ -592,6 +592,57 @@ vreg_l10d: ldo10 { }; }; +&camss { + vdda-phy-supply = <&vreg_l6d>; + vdda-pll-supply = <&vreg_l4d>; + + status = "okay"; + + ports { + port@0 { + csiphy0_lanes01_ep: endpoint@0 { + reg = <0>; + clock-lanes = <7>; + data-lanes = <0 1>; + remote-endpoint = <&ov5675_ep>; + }; + }; + }; +}; + +&cci2 { + status = "okay"; +}; + +&cci2_i2c1 { + camera@10 { + compatible = "ovti,ov5675"; + reg = <0x10>; + + reset-gpios = <&tlmm 15 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&cam_rgb_default>; + + clocks = <&camcc CAMCC_MCLK3_CLK>; + + orientation = <0>; /* Front facing */ + + avdd-supply = <&vreg_l6q>; + dvdd-supply = <&vreg_l2q>; + dovdd-supply = <&vreg_l7q>; + + port { + ov5675_ep: endpoint { + clock-lanes = <0>; + data-lanes = <1 2>; + link-frequencies = /bits/ 64 <450000000>; + remote-endpoint = <&csiphy0_lanes01_ep>; + }; + }; + + }; +}; + &dispcc0 { status = "okay"; }; @@ -1436,6 +1487,22 @@ cam_indicator_en: cam-indicator-en-state { bias-disable; }; + cam_rgb_default: cam-rgb-default-state { + mclk-pins { + pins = "gpio17"; + function = "cam_mclk"; + drive-strength = <16>; + bias-disable; + }; + + sc-rgb-xshut-n-pins { + pins = "gpio15"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + }; + edp_reg_en: edp-reg-en-state { pins = "gpio25"; function = "gpio"; @@ -1509,15 +1576,15 @@ perst-n-pins { pins = "gpio143"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { - pins = "gpio145"; - function = "gpio"; - drive-strength = <2>; - bias-pull-up; - }; + pins = "gpio145"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; }; pcie3a_default: pcie3a-default-state { @@ -1532,7 +1599,7 @@ perst-n-pins { pins = "gpio151"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { @@ -1555,7 +1622,7 @@ perst-n-pins { pins = "gpio141"; function = "gpio"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; wake-n-pins { diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi index c7e3764a8cf321..c8da5cb8d04e99 100644 --- a/arch/arm64/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi @@ -372,7 +372,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-sdm660"; + compatible = "qcom,rpm-sdm660", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts index fde16308c7e24e..f1bbe7ab01ab0b 100644 --- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts +++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts @@ -282,6 +282,12 @@ &qupv3_id_0 { status = "okay"; }; +&remoteproc_mpss { + firmware-name = "qcom/sdx75/modem.mbn", + "qcom/sdx75/modem_dtb.mbn"; + status = "okay"; +}; + &sdhc { cd-gpios = <&tlmm 103 GPIO_ACTIVE_LOW>; vmmc-supply = <®_2v95_vdd>; diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi index 9b93f6501d55c7..7cf3fcb469a868 100644 --- a/arch/arm64/boot/dts/qcom/sdx75.dtsi +++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi @@ -366,7 +366,12 @@ uefi_log_mem: uefi-log@87f75000 { no-map; }; - qdss_mem: qdss@88800000 { + qdss_mem: qdss@88500000 { + reg = <0x0 0x88500000 0x0 0x300000>; + no-map; + }; + + qlink_logging_mem: qlink-logging@88800000 { reg = <0x0 0x88800000 0x0 0x300000>; no-map; }; @@ -377,8 +382,13 @@ audio_heap_mem: audio-heap@88b00000 { no-map; }; - mpss_dsmharq_mem: mpss-dsmharq@88f00000 { - reg = <0x0 0x88f00000 0x0 0x5080000>; + mpss_dsm_mem_2: mpss-dsm-2@88f00000 { + reg = <0x0 0x88f00000 0x0 0x2500000>; + no-map; + }; + + mpss_dsm_mem: mpss-dsm@8b400000 { + reg = <0x0 0x8b400000 0x0 0x2b80000>; no-map; }; @@ -388,7 +398,7 @@ q6_mpss_dtb_mem: q6-mpss-dtb@8df80000 { }; mpssadsp_mem: mpssadsp@8e000000 { - reg = <0x0 0x8e000000 0x0 0xf400000>; + reg = <0x0 0x8e000000 0x0 0xf100000>; no-map; }; @@ -881,6 +891,53 @@ tcsr: syscon@1fc0000 { reg = <0x0 0x01fc0000 0x0 0x30000>; }; + remoteproc_mpss: remoteproc@4080000 { + compatible = "qcom,sdx75-mpss-pas"; + reg = <0 0x04080000 0 0x4040>; + + interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 3 IRQ_TYPE_EDGE_RISING>, + <&smp2p_modem_in 7 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", + "fatal", + "ready", + "handover", + "stop-ack", + "shutdown-ack"; + + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + + power-domains = <&rpmhpd RPMHPD_CX>, + <&rpmhpd RPMHPD_MSS>; + power-domain-names = "cx", + "mss"; + + memory-region = <&mpssadsp_mem>, <&q6_mpss_dtb_mem>, + <&mpss_dsm_mem>, <&mpss_dsm_mem_2>, + <&qlink_logging_mem>; + + qcom,qmp = <&aoss_qmp>; + + qcom,smem-states = <&smp2p_modem_out 0>; + qcom,smem-state-names = "stop"; + + status = "disabled"; + + glink-edge { + interrupts-extended = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_PING + IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_PING>; + label = "mpss"; + qcom,remote-pid = <1>; + }; + }; + sdhc: mmc@8804000 { compatible = "qcom,sdx75-sdhci", "qcom,sdhci-msm-v5"; reg = <0x0 0x08804000 0x0 0x1000>; diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi index 9c9919e78fbdbf..1e05cd00b635ee 100644 --- a/arch/arm64/boot/dts/qcom/sm4450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi @@ -4,7 +4,10 @@ */ #include +#include +#include #include +#include #include #include #include @@ -422,6 +425,41 @@ tcsr_mutex: hwlock@1f40000 { #hwlock-cells = <1>; }; + gpucc: clock-controller@3d90000 { + compatible = "qcom,sm4450-gpucc"; + reg = <0x0 0x03d90000 0x0 0xa000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_GPU_GPLL0_CLK_SRC>, + <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + + camcc: clock-controller@ade0000 { + compatible = "qcom,sm4450-camcc"; + reg = <0x0 0x0ade0000 0x0 0x20000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_CAMERA_AHB_CLK>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + + dispcc: clock-controller@af00000 { + compatible = "qcom,sm4450-dispcc"; + reg = <0x0 0x0af00000 0x0 0x20000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&rpmhcc RPMH_CXO_CLK_A>, + <&gcc GCC_DISP_AHB_CLK>, + <&sleep_clk>, + <0>, + <0>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + pdc: interrupt-controller@b220000 { compatible = "qcom,sm4450-pdc", "qcom,pdc"; reg = <0 0x0b220000 0 0x30000>, <0 0x174000f0 0 0x64>; diff --git a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts index 4a30024aa48fb4..f60d36c03b9b51 100644 --- a/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts +++ b/arch/arm64/boot/dts/qcom/sm6115-fxtec-pro1x.dts @@ -1,13 +1,16 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* - * Copyright (c) 2023, Dang Huynh + * Copyright (c) 2023 - 2024, Dang Huynh */ /dts-v1/; #include "sm6115.dtsi" #include "pm6125.dtsi" +#include "pmi632.dtsi" #include +#include +#include / { model = "F(x)tec Pro1X (QX1050)"; @@ -32,12 +35,48 @@ framebuffer0: framebuffer@5c000000 { }; }; + disp_elvdd_supply: disp-elvdd-supply { + compatible = "regulator-fixed"; + regulator-name = "disp_elvdd_supply"; + }; + + disp_elvss_supply: disp-elvss-supply { + compatible = "regulator-fixed"; + regulator-name = "disp_elvss_supply"; + }; + + disp_vcc_supply: disp-vcc-supply { + compatible = "regulator-fixed"; + regulator-name = "disp_vcc_supply"; + }; + + disp_vci_supply: disp-vci-supply { + compatible = "regulator-fixed"; + regulator-name = "disp_vci_supply"; + }; + gpio-keys { compatible = "gpio-keys"; - pinctrl-0 = <&vol_up_n>; + pinctrl-0 = <&hall_sensor_n>, <&key_camera_n>, <&vol_up_n>; pinctrl-names = "default"; + hall-switch { + label = "Hall Switch"; + linux,input-type = ; + linux,code = ; + gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>; + debounce-interval = <90>; + wakeup-source; + }; + + key-camera { + label = "Camera Button"; + linux,code = ; + gpios = <&tlmm 18 GPIO_ACTIVE_LOW>; + debounce-interval = <15>; + }; + key-volume-up { label = "Volume Up"; linux,code = ; @@ -47,11 +86,119 @@ key-volume-up { wakeup-source; }; }; + + gpio-leds { + compatible = "gpio-leds"; + + capslock-led { + label = "green:capslock"; + function = LED_FUNCTION_CAPSLOCK; + color = ; + gpios = <&pca9534 1 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "kbd-capslock"; + default-state = "off"; + }; + }; + + ts_vdd_supply: ts-vdd-supply { + compatible = "regulator-fixed"; + regulator-name = "ts_vdd_supply"; + gpio = <&pca9534 3 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + ts_vddio_supply: ts-vddio-supply { + compatible = "regulator-fixed"; + regulator-name = "ts_vddio_supply"; + gpio = <&pca9534 2 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; }; -&dispcc { - /* HACK: disable until a panel driver is ready to retain simplefb */ - status = "disabled"; +&gpi_dma0 { + status = "okay"; +}; + +&gpu { + status = "okay"; + + zap-shader { + firmware-name = "qcom/sm6115/Fxtec/QX1050/a610_zap.mbn"; + }; +}; + +&i2c1 { + clock-frequency = <100000>; + + status = "okay"; + + pca9534: gpio@21 { + compatible = "nxp,pca9534"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + }; +}; + +&i2c2 { + status = "okay"; + /* Clock frequency was not specified downstream, let's park it to 100 KHz */ + clock-frequency = <100000>; + + touchscreen@14 { + compatible = "goodix,gt9286"; + reg = <0x14>; + + interrupts-extended = <&tlmm 80 IRQ_TYPE_LEVEL_LOW>; + + irq-gpios = <&tlmm 80 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&tlmm 71 GPIO_ACTIVE_HIGH>; + AVDD28-supply = <&ts_vdd_supply>; + VDDIO-supply = <&ts_vddio_supply>; + + pinctrl-0 = <&ts_int_n>, <&ts_rst_n>; + pinctrl-names = "default"; + }; +}; + +&mdss { + status = "okay"; +}; + +&mdss_dsi0 { + vdda-supply = <&pm6125_l18a>; + status = "okay"; + + panel: panel@0 { + compatible = "boe,bf060y8m-aj0"; + reg = <0>; + + reset-gpios = <&tlmm 82 GPIO_ACTIVE_LOW>; + + elvdd-supply = <&disp_elvdd_supply>; + elvss-supply = <&disp_elvss_supply>; + vcc-supply = <&disp_vcc_supply>; + vci-supply = <&disp_vci_supply>; + vddio-supply = <&pm6125_l9a>; + + pinctrl-0 = <&mdss_dsi_n &panel_en_n>; + pinctrl-names = "default"; + + port { + panel_in: endpoint { + remote-endpoint = <&mdss_dsi0_out>; + }; + }; + }; +}; + +&mdss_dsi0_out { + data-lanes = <0 1 2 3>; + remote-endpoint = <&panel_in>; +}; + +&mdss_dsi0_phy { + status = "okay"; }; &pm6125_gpios { @@ -64,6 +211,73 @@ vol_up_n: vol-up-n-state { }; }; +&pmi632_lpg { + status = "okay"; + + multi-led { + color = ; + function = LED_FUNCTION_STATUS; + + #address-cells = <1>; + #size-cells = <0>; + + led@1 { + reg = <1>; + color = ; + }; + + led@2 { + reg = <2>; + color = ; + }; + + led@3 { + reg = <3>; + color = ; + }; + }; +}; + +&pmi632_typec { + status = "okay"; + + connector { + compatible = "usb-c-connector"; + + power-role = "dual"; + data-role = "dual"; + self-powered; + + typec-power-opmode = "default"; + pd-disable; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + pmi632_hs_in: endpoint { + remote-endpoint = <&usb_dwc3_hs>; + }; + }; + + port@1 { + reg = <1>; + pmi632_ss_in: endpoint { + remote-endpoint = <&usb_qmpphy_out>; + }; + }; + }; + }; +}; + +&pmi632_vbus { + regulator-min-microamp = <500000>; + regulator-max-microamp = <1000000>; + status = "okay"; +}; + &pon_pwrkey { status = "okay"; }; @@ -73,6 +287,25 @@ &pon_resin { status = "okay"; }; +&qupv3_id_0 { + status = "okay"; +}; + +&remoteproc_adsp { + firmware-name = "qcom/sm6115/Fxtec/QX1050/adsp.mbn"; + status = "okay"; +}; + +&remoteproc_cdsp { + firmware-name = "qcom/sm6115/Fxtec/QX1050/cdsp.mbn"; + status = "okay"; +}; + +&remoteproc_mpss { + firmware-name = "qcom/sm6115/Fxtec/QX1050/modem.mbn"; + status = "okay"; +}; + &rpm_requests { regulators-0 { compatible = "qcom,rpm-pm6125-regulators"; @@ -105,6 +338,7 @@ pm6125_l4a: l4 { pm6125_l5a: l5 { regulator-min-microvolt = <1648000>; regulator-max-microvolt = <3056000>; + regulator-allow-set-load; }; pm6125_l6a: l6 { @@ -206,12 +440,84 @@ pm6125_l24a: l24 { }; }; +&sdc2_state_off { + cd-pins { + pins = "gpio88"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; +}; + +&sdc2_state_on { + cd-pins { + pins = "gpio88"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; +}; + +&sdhc_2 { + pinctrl-0 = <&sdc2_state_on>; + pinctrl-1 = <&sdc2_state_off>; + pinctrl-names = "default", "sleep"; + + cd-gpios = <&tlmm 88 GPIO_ACTIVE_LOW>; + + vmmc-supply = <&pm6125_l22a>; + vqmmc-supply = <&pm6125_l5a>; + + status = "okay"; +}; + &sleep_clk { clock-frequency = <32764>; }; &tlmm { gpio-reserved-ranges = <0 4>, <14 4>; + + key_camera_n: key-camera-n-state { + pins = "gpio18"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + + panel_en_n: panel-en-n-state { + pins = "gpio65"; + function = "gpio"; + bias-disable; + }; + + ts_rst_n: ts-rst-n-state { + pins = "gpio71"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + + ts_int_n: ts-int-n-state { + pins = "gpio80"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + + mdss_dsi_n: mdss-dsi-n-state { + pins = "gpio82"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + + hall_sensor_n: hall-sensor-n-state { + pins = "gpio96"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; }; &ufs_mem_hc { @@ -233,10 +539,8 @@ &usb { status = "okay"; }; -&usb_dwc3 { - /delete-property/ usb-role-switch; - maximum-speed = "high-speed"; - dr_mode = "peripheral"; +&usb_dwc3_hs { + remote-endpoint = <&pmi632_hs_in>; }; &usb_hsphy { @@ -246,6 +550,27 @@ &usb_hsphy { status = "okay"; }; +&usb_qmpphy { + vdda-phy-supply = <&pm6125_l4a>; + vdda-pll-supply = <&pm6125_l12a>; + status = "okay"; +}; + +&usb_qmpphy_out { + remote-endpoint = <&pmi632_ss_in>; +}; + +&wifi { + vdd-0.8-cx-mx-supply = <&pm6125_l8a>; + vdd-1.8-xo-supply = <&pm6125_l16a>; + vdd-1.3-rfa-supply = <&pm6125_l17a>; + vdd-3.3-ch0-supply = <&pm6125_l23a>; + + qcom,ath10k-calibration-variant = "Fxtec_QX1050"; + + status = "okay"; +}; + &xo_board { clock-frequency = <19200000>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi index e374733f3b856e..41216cc319d65e 100644 --- a/arch/arm64/boot/dts/qcom/sm6115.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi @@ -376,7 +376,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-sm6115"; + compatible = "qcom,rpm-sm6115", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index 777c380c2fa044..133610d14fc41a 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -192,7 +192,7 @@ glink-edge { mboxes = <&apcs_glb 0>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-sm6125"; + compatible = "qcom,rpm-sm6125", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi index ddea681b536db0..4d519dd6e7ef2f 100644 --- a/arch/arm64/boot/dts/qcom/sm6375.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi @@ -653,7 +653,7 @@ IPCC_MPROC_SIGNAL_GLINK_QMP mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>; rpm_requests: rpm-requests { - compatible = "qcom,rpm-sm6375"; + compatible = "qcom,rpm-sm6375", "qcom,glink-smd-rpm"; qcom,glink-channels = "rpm_requests"; rpmcc: clock-controller { diff --git a/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi index 29289fa41b1344..b9cff60efe6fc0 100644 --- a/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sm7125-xiaomi-common.dtsi @@ -411,6 +411,8 @@ sd-cd-pins { }; &ufs_mem_hc { + reset-gpios = <&tlmm 119 GPIO_ACTIVE_LOW>; + vcc-supply = <&vreg_l19a_3p0>; vcc-max-microamp = <600000>; vccq2-supply = <&vreg_l12a_1p8>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts index 286350ac77517e..256a1ba9494560 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dts @@ -355,11 +355,6 @@ &gmu { }; &gpu { - /* - * NOTE: "amd,imageon" makes Adreno start in headless mode, remove it - * after display support is added on this board. - */ - compatible = "qcom,adreno-640.1", "qcom,adreno", "amd,imageon"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 3e236adb9397b6..27f87835bc5595 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -17,6 +17,7 @@ #include #include #include +#include #include / { @@ -3759,6 +3760,18 @@ camnoc_virt: interconnect@ac00000 { qcom,bcm-voters = <&apps_bcm_voter>; }; + camcc: clock-controller@ad00000 { + compatible = "qcom,sm8150-camcc"; + reg = <0 0x0ad00000 0 0x10000>; + clocks = <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_CAMERA_AHB_CLK>; + power-domains = <&rpmhpd SM8150_MMCX>; + required-opps = <&rpmhpd_opp_low_svs>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + mdss: display-subsystem@ae00000 { compatible = "qcom,sm8150-mdss"; reg = <0 0x0ae00000 0 0x1000>; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index 9d6c97d1fd9d6e..630f4eff20bf81 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -8,8 +8,6 @@ #include #include #include -#include -#include #include #include #include @@ -2633,14 +2631,13 @@ tcsr: syscon@1fc0000 { wsamacro: codec@3240000 { compatible = "qcom,sm8250-lpass-wsa-macro"; reg = <0 0x03240000 0 0x1000>; - clocks = <&audiocc LPASS_CDC_WSA_MCLK>, - <&audiocc LPASS_CDC_WSA_NPL>, + clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, + <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&aoncc LPASS_CDC_VA_MCLK>, <&vamacro>; - clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen"; + clock-names = "mclk", "npl", "macro", "dcodec", "fsgen"; #clock-cells = <0>; clock-output-names = "mclk"; @@ -2674,20 +2671,10 @@ swr0: soundwire@3250000 { status = "disabled"; }; - audiocc: clock-controller@3300000 { - compatible = "qcom,sm8250-lpass-audiocc"; - reg = <0 0x03300000 0 0x30000>; - #clock-cells = <1>; - clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>; - clock-names = "core", "audio", "bus"; - }; - vamacro: codec@3370000 { compatible = "qcom,sm8250-lpass-va-macro"; reg = <0 0x03370000 0 0x1000>; - clocks = <&aoncc LPASS_CDC_VA_MCLK>, + clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>; @@ -2792,16 +2779,6 @@ swr2: soundwire@3230000 { #size-cells = <0>; }; - aoncc: clock-controller@3380000 { - compatible = "qcom,sm8250-lpass-aoncc"; - reg = <0 0x03380000 0 0x40000>; - #clock-cells = <1>; - clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>, - <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>; - clock-names = "core", "audio", "bus"; - }; - lpass_tlmm: pinctrl@33c0000 { compatible = "qcom,sm8250-lpass-lpi-pinctrl"; reg = <0 0x033c0000 0x0 0x20000>, diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 38ee0850c33582..37a2aba0d4cae0 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -2251,6 +2251,12 @@ usb_2_hsphy: phy@88e4000 { resets = <&gcc GCC_QUSB2PHY_SEC_BCR>; }; + refgen: regulator@88e7000 { + compatible = "qcom,sm8350-refgen-regulator", + "qcom,sm8250-refgen-regulator"; + reg = <0x0 0x088e7000 0x0 0x84>; + }; + usb_1_qmpphy: phy@88e8000 { compatible = "qcom,sm8350-qmp-usb3-dp-phy"; reg = <0 0x088e8000 0 0x3000>; @@ -2490,8 +2496,12 @@ mdss: display-subsystem@ae00000 { reg-names = "mdss"; interconnects = <&mmss_noc MASTER_MDP0 0 &mc_virt SLAVE_EBI1 0>, - <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>; - interconnect-names = "mdp0-mem", "mdp1-mem"; + <&mmss_noc MASTER_MDP1 0 &mc_virt SLAVE_EBI1 0>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>; + interconnect-names = "mdp0-mem", + "mdp1-mem", + "cpu-cfg"; power-domains = <&dispcc MDSS_GDSC>; resets = <&dispcc DISP_CC_MDSS_CORE_BCR>; @@ -2706,6 +2716,7 @@ mdss_dsi0: dsi@ae94000 { operating-points-v2 = <&dsi0_opp_table>; power-domains = <&rpmhpd RPMHPD_MMCX>; + refgen-supply = <&refgen>; phys = <&mdss_dsi0_phy>; @@ -2804,6 +2815,7 @@ mdss_dsi1: dsi@ae96000 { operating-points-v2 = <&dsi1_opp_table>; power-domains = <&rpmhpd RPMHPD_MMCX>; + refgen-supply = <&refgen>; phys = <&mdss_dsi1_phy>; diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts index 2e12219006c922..01c92160260572 100644 --- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts +++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts @@ -279,6 +279,65 @@ platform { }; }; }; + + wcn7850-pmu { + compatible = "qcom,wcn7850-pmu"; + + pinctrl-names = "default"; + pinctrl-0 = <&wlan_en>, <&bt_default>, <&pmk8550_sleep_clk>; + + wlan-enable-gpios = <&tlmm 80 GPIO_ACTIVE_HIGH>; + bt-enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; + + vdd-supply = <&vreg_s5g_0p85>; + vddio-supply = <&vreg_l15b_1p8>; + vddaon-supply = <&vreg_s2g_0p85>; + vdddig-supply = <&vreg_s4e_0p95>; + vddrfa1p2-supply = <&vreg_s4g_1p25>; + vddrfa1p8-supply = <&vreg_s6g_1p86>; + + regulators { + vreg_pmu_rfa_cmn: ldo0 { + regulator-name = "vreg_pmu_rfa_cmn"; + }; + + vreg_pmu_aon_0p59: ldo1 { + regulator-name = "vreg_pmu_aon_0p59"; + }; + + vreg_pmu_wlcx_0p8: ldo2 { + regulator-name = "vreg_pmu_wlcx_0p8"; + }; + + vreg_pmu_wlmx_0p85: ldo3 { + regulator-name = "vreg_pmu_wlmx_0p85"; + }; + + vreg_pmu_btcmx_0p85: ldo4 { + regulator-name = "vreg_pmu_btcmx_0p85"; + }; + + vreg_pmu_rfa_0p8: ldo5 { + regulator-name = "vreg_pmu_rfa_0p8"; + }; + + vreg_pmu_rfa_1p2: ldo6 { + regulator-name = "vreg_pmu_rfa_1p2"; + }; + + vreg_pmu_rfa_1p8: ldo7 { + regulator-name = "vreg_pmu_rfa_1p8"; + }; + + vreg_pmu_pcie_0p9: ldo8 { + regulator-name = "vreg_pmu_pcie_0p9"; + }; + + vreg_pmu_pcie_1p8: ldo9 { + regulator-name = "vreg_pmu_pcie_1p8"; + }; + }; + }; }; &apps_rsc { @@ -953,6 +1012,23 @@ &pcie0 { status = "okay"; }; +&pcieport0 { + wifi@0 { + compatible = "pci17cb,1107"; + reg = <0x10000 0x0 0x0 0x0 0x0>; + + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; + vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>; + vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>; + }; +}; + &pcie0_phy { vdda-phy-supply = <&vreg_l1e_0p88>; vdda-pll-supply = <&vreg_l3e_1p2>; @@ -1041,6 +1117,17 @@ &pon_resin { status = "okay"; }; +&pmk8550_gpios { + pmk8550_sleep_clk: sleep-clk-state { + pins = "gpio3"; + function = "func1"; + input-disable; + output-enable; + bias-disable; + power-source = <0>; + }; +}; + &qupv3_id_0 { status = "okay"; }; @@ -1203,6 +1290,13 @@ wcd_default: wcd-reset-n-active-state { bias-disable; output-low; }; + + wlan_en: wlan-en-state { + pins = "gpio80"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; }; &uart7 { @@ -1215,20 +1309,15 @@ &uart14 { bluetooth { compatible = "qcom,wcn7850-bt"; - vddio-supply = <&vreg_l15b_1p8>; - vddaon-supply = <&vreg_s4e_0p95>; - vdddig-supply = <&vreg_s4e_0p95>; - vddrfa0p8-supply = <&vreg_s4e_0p95>; - vddrfa1p2-supply = <&vreg_s4g_1p25>; - vddrfa1p9-supply = <&vreg_s6g_1p86>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; max-speed = <3200000>; - - enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; - swctrl-gpios = <&tlmm 82 GPIO_ACTIVE_HIGH>; - - pinctrl-0 = <&bt_default>; - pinctrl-names = "default"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts index 774bdfcffec324..6052dd922ec55c 100644 --- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts @@ -219,13 +219,10 @@ wcn7850-pmu { compatible = "qcom,wcn7850-pmu"; pinctrl-names = "default"; - pinctrl-0 = <&wlan_en>, <&pmk8550_sleep_clk>; + pinctrl-0 = <&wlan_en>, <&bt_default>, <&pmk8550_sleep_clk>; wlan-enable-gpios = <&tlmm 80 GPIO_ACTIVE_HIGH>; - /* - * TODO Add bt-enable-gpios once the Bluetooth driver is - * converted to using the power sequencer. - */ + bt-enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; vdd-supply = <&vreg_s5g_0p85>; vddio-supply = <&vreg_l15b_1p8>; @@ -1175,20 +1172,15 @@ &uart14 { bluetooth { compatible = "qcom,wcn7850-bt"; - vddio-supply = <&vreg_l15b_1p8>; - vddaon-supply = <&vreg_s4e_0p95>; - vdddig-supply = <&vreg_s4e_0p95>; - vddrfa0p8-supply = <&vreg_s4e_0p95>; - vddrfa1p2-supply = <&vreg_s4g_1p25>; - vddrfa1p9-supply = <&vreg_s6g_1p86>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; max-speed = <3200000>; - - enable-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; - swctrl-gpios = <&tlmm 82 GPIO_ACTIVE_HIGH>; - - pinctrl-0 = <&bt_default>; - pinctrl-names = "default"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi index 4c9820adcf52d4..9dc0ee3eb98f87 100644 --- a/arch/arm64/boot/dts/qcom/sm8550.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi @@ -2747,6 +2747,98 @@ videocc: clock-controller@aaf0000 { #power-domain-cells = <1>; }; + cci0: cci@ac15000 { + compatible = "qcom,sm8550-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac15000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_0_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci0_0_default &cci0_1_default>; + pinctrl-1 = <&cci0_0_sleep &cci0_1_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci0_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + cci0_i2c1: i2c-bus@1 { + reg = <1>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + cci1: cci@ac16000 { + compatible = "qcom,sm8550-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac16000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_1_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci1_0_default>; + pinctrl-1 = <&cci1_0_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci1_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + cci2: cci@ac17000 { + compatible = "qcom,sm8550-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac17000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_2_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci2_0_default &cci2_1_default>; + pinctrl-1 = <&cci2_0_sleep &cci2_1_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci2_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + cci2_i2c1: i2c-bus@1 { + reg = <1>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + camcc: clock-controller@ade0000 { compatible = "qcom,sm8550-camcc"; reg = <0 0x0ade0000 0 0x20000>; @@ -3393,6 +3485,166 @@ tlmm: pinctrl@f100000 { gpio-ranges = <&tlmm 0 0 211>; wakeup-parent = <&pdc>; + cci0_0_default: cci0-0-default-state { + sda-pins { + pins = "gpio110"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio111"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci0_0_sleep: cci0-0-sleep-state { + sda-pins { + pins = "gpio110"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio111"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci0_1_default: cci0-1-default-state { + sda-pins { + pins = "gpio112"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio113"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci0_1_sleep: cci0-1-sleep-state { + sda-pins { + pins = "gpio112"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio113"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci1_0_default: cci1-0-default-state { + sda-pins { + pins = "gpio114"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio115"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci1_0_sleep: cci1-0-sleep-state { + sda-pins { + pins = "gpio114"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio115"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci2_0_default: cci2-0-default-state { + sda-pins { + pins = "gpio74"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio75"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci2_0_sleep: cci2-0-sleep-state { + sda-pins { + pins = "gpio74"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio75"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci2_1_default: cci2-1-default-state { + sda-pins { + pins = "gpio0"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio1"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci2_1_sleep: cci2-1-sleep-state { + sda-pins { + pins = "gpio0"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio1"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + hub_i2c0_data_clk: hub-i2c0-data-clk-state { /* SDA, SCL */ pins = "gpio16", "gpio17"; diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts index 591e6ab9bf5b3d..127c7aacd4fc31 100644 --- a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts +++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts @@ -271,13 +271,10 @@ wcn7850-pmu { compatible = "qcom,wcn7850-pmu"; pinctrl-names = "default"; - pinctrl-0 = <&wlan_en>; + pinctrl-0 = <&wlan_en>, <&bt_default>; wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; - /* - * TODO Add bt-enable-gpios once the Bluetooth driver is - * converted to using the power sequencer. - */ + bt-enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>; vdd-supply = <&vreg_s4i_0p85>; vddio-supply = <&vreg_l15b_1p8>; @@ -1272,20 +1269,15 @@ &uart14 { bluetooth { compatible = "qcom,wcn7850-bt"; - vddio-supply = <&vreg_l3c_1p2>; - vddaon-supply = <&vreg_l15b_1p8>; - vdddig-supply = <&vreg_s3c_0p9>; - vddrfa0p8-supply = <&vreg_s3c_0p9>; - vddrfa1p2-supply = <&vreg_s1c_1p2>; - vddrfa1p9-supply = <&vreg_s6c_1p8>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; max-speed = <3200000>; - - enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>; - swctrl-gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>; - - pinctrl-0 = <&bt_default>; - pinctrl-names = "default"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts index b0d7927b708f1d..8ca0d28eba9bd0 100644 --- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts @@ -208,13 +208,10 @@ wcn7850-pmu { compatible = "qcom,wcn7850-pmu"; pinctrl-names = "default"; - pinctrl-0 = <&wlan_en>; + pinctrl-0 = <&wlan_en>, <&bt_default>; wlan-enable-gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; - /* - * TODO Add bt-enable-gpios once the Bluetooth driver is - * converted to using the power sequencer. - */ + bt-enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>; vdd-supply = <&vreg_s4i_0p85>; vddio-supply = <&vreg_l15b_1p8>; @@ -1255,22 +1252,15 @@ &uart14 { bluetooth { compatible = "qcom,wcn7850-bt"; - clocks = <&rpmhcc RPMH_RF_CLK1>; - - vddio-supply = <&vreg_l3c_1p2>; - vddaon-supply = <&vreg_l15b_1p8>; - vdddig-supply = <&vreg_s3c_0p9>; - vddrfa0p8-supply = <&vreg_s3c_0p9>; - vddrfa1p2-supply = <&vreg_s1c_1p2>; - vddrfa1p9-supply = <&vreg_s6c_1p8>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; max-speed = <3200000>; - - enable-gpios = <&tlmm 17 GPIO_ACTIVE_HIGH>; - swctrl-gpios = <&tlmm 18 GPIO_ACTIVE_HIGH>; - - pinctrl-0 = <&bt_default>; - pinctrl-names = "default"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi index 9d9bbb9aca6445..01ac3769ffa62f 100644 --- a/arch/arm64/boot/dts/qcom/sm8650.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi @@ -3329,6 +3329,105 @@ videocc: clock-controller@aaf0000 { #power-domain-cells = <1>; }; + cci0: cci@ac15000 { + compatible = "qcom,sm8650-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac15000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_0_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci0_0_default &cci0_1_default>; + pinctrl-1 = <&cci0_0_sleep &cci0_1_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci0_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + cci0_i2c1: i2c-bus@1 { + reg = <1>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + cci1: cci@ac16000 { + compatible = "qcom,sm8650-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac16000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_1_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci1_0_default &cci1_1_default>; + pinctrl-1 = <&cci1_0_sleep &cci1_1_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci1_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + cci1_i2c1: i2c-bus@1 { + reg = <1>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + cci2: cci@ac17000 { + compatible = "qcom,sm8650-cci", "qcom,msm8996-cci"; + reg = <0 0x0ac17000 0 0x1000>; + interrupts = ; + power-domains = <&camcc CAM_CC_TITAN_TOP_GDSC>; + clocks = <&camcc CAM_CC_CAMNOC_AXI_NRT_CLK>, + <&camcc CAM_CC_CPAS_AHB_CLK>, + <&camcc CAM_CC_CCI_2_CLK>; + clock-names = "camnoc_axi", + "cpas_ahb", + "cci"; + pinctrl-0 = <&cci2_0_default &cci2_1_default>; + pinctrl-1 = <&cci2_0_sleep &cci2_1_sleep>; + pinctrl-names = "default", "sleep"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + + cci2_i2c0: i2c-bus@0 { + reg = <0>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + cci2_i2c1: i2c-bus@1 { + reg = <1>; + clock-frequency = <1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + camcc: clock-controller@ade0000 { compatible = "qcom,sm8650-camcc"; reg = <0 0x0ade0000 0 0x20000>; @@ -4029,6 +4128,198 @@ tlmm: pinctrl@f100000 { wakeup-parent = <&pdc>; + cci0_0_default: cci0-0-default-state { + sda-pins { + pins = "gpio113"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio114"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci0_0_sleep: cci0-0-sleep-state { + sda-pins { + pins = "gpio113"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio114"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci0_1_default: cci0-1-default-state { + sda-pins { + pins = "gpio115"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio116"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci0_1_sleep: cci0-1-sleep-state { + sda-pins { + pins = "gpio115"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio116"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci1_0_default: cci1-0-default-state { + sda-pins { + pins = "gpio117"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio118"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci1_0_sleep: cci1-0-sleep-state { + sda-pins { + pins = "gpio117"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio118"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci1_1_default: cci1-1-default-state { + sda-pins { + pins = "gpio12"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio13"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci1_1_sleep: cci1-1-sleep-state { + sda-pins { + pins = "gpio12"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio13"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci2_0_default: cci2-0-default-state { + sda-pins { + pins = "gpio112"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio153"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci2_0_sleep: cci2-0-sleep-state { + sda-pins { + pins = "gpio112"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio153"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + cci2_1_default: cci2-1-default-state { + sda-pins { + pins = "gpio119"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + + scl-pins { + pins = "gpio120"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-up = <2200>; + }; + }; + + cci2_1_sleep: cci2-1-sleep-state { + sda-pins { + pins = "gpio119"; + function = "cci_i2c_sda"; + drive-strength = <2>; + bias-pull-down; + }; + + scl-pins { + pins = "gpio120"; + function = "cci_i2c_scl"; + drive-strength = <2>; + bias-pull-down; + }; + }; + hub_i2c0_data_clk: hub-i2c0-data-clk-state { /* SDA, SCL */ pins = "gpio64", "gpio65"; diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts new file mode 100644 index 00000000000000..941dfddd67139f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2024, Linaro Limited + */ + +/dts-v1/; + +#include +#include +#include +#include + +#include "x1e80100.dtsi" +#include "x1e80100-pmics.dtsi" + +/ { + model = "Lenovo ThinkPad T14s Gen 6"; + compatible = "lenovo,thinkpad-t14s", "qcom,x1e78100", "qcom,x1e80100"; + chassis-type = "laptop"; + + gpio-keys { + compatible = "gpio-keys"; + + pinctrl-0 = <&hall_int_n_default>; + pinctrl-names = "default"; + + switch-lid { + gpios = <&tlmm 92 GPIO_ACTIVE_LOW>; + linux,input-type = ; + linux,code = ; + wakeup-source; + wakeup-event-action = ; + }; + }; + + pmic-glink { + compatible = "qcom,x1e80100-pmic-glink", + "qcom,sm8550-pmic-glink", + "qcom,pmic-glink"; + orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>, + <&tlmm 123 GPIO_ACTIVE_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + + /* Display-adjacent port */ + connector@0 { + compatible = "usb-c-connector"; + reg = <0>; + power-role = "dual"; + data-role = "dual"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + pmic_glink_ss0_hs_in: endpoint { + remote-endpoint = <&usb_1_ss0_dwc3_hs>; + }; + }; + + port@1 { + reg = <1>; + + pmic_glink_ss0_ss_in: endpoint { + remote-endpoint = <&usb_1_ss0_qmpphy_out>; + }; + }; + }; + }; + + /* User-adjacent port */ + connector@1 { + compatible = "usb-c-connector"; + reg = <1>; + power-role = "dual"; + data-role = "dual"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + pmic_glink_ss1_hs_in: endpoint { + remote-endpoint = <&usb_1_ss1_dwc3_hs>; + }; + }; + + port@1 { + reg = <1>; + + pmic_glink_ss1_ss_in: endpoint { + remote-endpoint = <&usb_1_ss1_qmpphy_out>; + }; + }; + }; + }; + }; + + reserved-memory { + linux,cma { + compatible = "shared-dma-pool"; + size = <0x0 0x8000000>; + reusable; + linux,cma-default; + }; + }; + + vreg_edp_3p3: regulator-edp-3p3 { + compatible = "regulator-fixed"; + + regulator-name = "VREG_EDP_3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&edp_reg_en>; + pinctrl-names = "default"; + + regulator-boot-on; + }; + + vreg_nvme: regulator-nvme { + compatible = "regulator-fixed"; + + regulator-name = "VREG_NVME_3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&nvme_reg_en>; + pinctrl-names = "default"; + }; + + vph_pwr: regulator-vph-pwr { + compatible = "regulator-fixed"; + + regulator-name = "vph_pwr"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + + regulator-always-on; + regulator-boot-on; + }; +}; + +&apps_rsc { + regulators-0 { + compatible = "qcom,pm8550-rpmh-regulators"; + qcom,pmic-id = "b"; + + vdd-bob1-supply = <&vph_pwr>; + vdd-bob2-supply = <&vph_pwr>; + vdd-l1-l4-l10-supply = <&vreg_s4c_1p8>; + vdd-l2-l13-l14-supply = <&vreg_bob1>; + vdd-l5-l16-supply = <&vreg_bob1>; + vdd-l6-l7-supply = <&vreg_bob2>; + vdd-l8-l9-supply = <&vreg_bob1>; + vdd-l12-supply = <&vreg_s5j_1p2>; + vdd-l15-supply = <&vreg_s4c_1p8>; + vdd-l17-supply = <&vreg_bob2>; + + vreg_bob1: bob1 { + regulator-name = "vreg_bob1"; + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3960000>; + regulator-initial-mode = ; + }; + + vreg_bob2: bob2 { + regulator-name = "vreg_bob2"; + regulator-min-microvolt = <2504000>; + regulator-max-microvolt = <3008000>; + regulator-initial-mode = ; + }; + + vreg_l2b_3p0: ldo2 { + regulator-name = "vreg_l2b_3p0"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l4b_1p8: ldo4 { + regulator-name = "vreg_l4b_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l6b_1p8: ldo6 { + regulator-name = "vreg_l6b_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2960000>; + regulator-initial-mode = ; + }; + + vreg_l8b_3p0: ldo8 { + regulator-name = "vreg_l8b_3p0"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l9b_2p9: ldo9 { + regulator-name = "vreg_l9b_2p9"; + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + regulator-initial-mode = ; + }; + + vreg_l10b_1p8: ldo10 { + regulator-name = "vreg_l10b_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l12b_1p2: ldo12 { + regulator-name = "vreg_l12b_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l13b_3p0: ldo13 { + regulator-name = "vreg_l13b_3p0"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l14b_3p0: ldo14 { + regulator-name = "vreg_l14b_3p0"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l15b_1p8: ldo15 { + regulator-name = "vreg_l15b_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l17b_2p5: ldo17 { + regulator-name = "vreg_l17b_2p5"; + regulator-min-microvolt = <2504000>; + regulator-max-microvolt = <2504000>; + regulator-initial-mode = ; + }; + }; + + regulators-1 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "c"; + + vdd-l1-supply = <&vreg_s5j_1p2>; + vdd-l2-supply = <&vreg_s1f_0p7>; + vdd-l3-supply = <&vreg_s1f_0p7>; + vdd-s4-supply = <&vph_pwr>; + + vreg_s4c_1p8: smps4 { + regulator-name = "vreg_s4c_1p8"; + regulator-min-microvolt = <1856000>; + regulator-max-microvolt = <2000000>; + regulator-initial-mode = ; + }; + + vreg_l1c_1p2: ldo1 { + regulator-name = "vreg_l1c_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l2c_0p8: ldo2 { + regulator-name = "vreg_l2c_0p8"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; + + vreg_l3c_0p8: ldo3 { + regulator-name = "vreg_l3c_0p8"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <912000>; + regulator-initial-mode = ; + }; + }; + + regulators-2 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "d"; + + vdd-l1-supply = <&vreg_s1f_0p7>; + vdd-l2-supply = <&vreg_s1f_0p7>; + vdd-l3-supply = <&vreg_s4c_1p8>; + vdd-s1-supply = <&vph_pwr>; + + vreg_l1d_0p8: ldo1 { + regulator-name = "vreg_l1d_0p8"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; + + vreg_l2d_0p9: ldo2 { + regulator-name = "vreg_l2d_0p9"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <912000>; + regulator-initial-mode = ; + }; + + vreg_l3d_1p8: ldo3 { + regulator-name = "vreg_l3d_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + }; + + regulators-3 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "e"; + + vdd-l2-supply = <&vreg_s1f_0p7>; + vdd-l3-supply = <&vreg_s5j_1p2>; + + vreg_l2e_0p8: ldo2 { + regulator-name = "vreg_l2e_0p8"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; + + vreg_l3e_1p2: ldo3 { + regulator-name = "vreg_l3e_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + }; + + regulators-4 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "f"; + + vdd-l1-supply = <&vreg_s5j_1p2>; + vdd-l2-supply = <&vreg_s5j_1p2>; + vdd-l3-supply = <&vreg_s5j_1p2>; + vdd-s1-supply = <&vph_pwr>; + + vreg_s1f_0p7: smps1 { + regulator-name = "vreg_s1f_0p7"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1100000>; + regulator-initial-mode = ; + }; + }; + + regulators-6 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "i"; + + vdd-l1-supply = <&vreg_s4c_1p8>; + vdd-l2-supply = <&vreg_s5j_1p2>; + vdd-l3-supply = <&vreg_s1f_0p7>; + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + + vreg_l1i_1p8: ldo1 { + regulator-name = "vreg_l1i_1p8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l2i_1p2: ldo2 { + regulator-name = "vreg_l2i_1p2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l3i_0p8: ldo3 { + regulator-name = "vreg_l3i_0p8"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; + }; + + regulators-7 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "j"; + + vdd-l1-supply = <&vreg_s1f_0p7>; + vdd-l2-supply = <&vreg_s5j_1p2>; + vdd-l3-supply = <&vreg_s1f_0p7>; + vdd-s5-supply = <&vph_pwr>; + + vreg_s5j_1p2: smps5 { + regulator-name = "vreg_s5j_1p2"; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1304000>; + regulator-initial-mode = ; + }; + + vreg_l1j_0p8: ldo1 { + regulator-name = "vreg_l1j_0p8"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <912000>; + regulator-initial-mode = ; + }; + + vreg_l2j_1p2: ldo2 { + regulator-name = "vreg_l2j_1p2"; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; + regulator-initial-mode = ; + }; + + vreg_l3j_0p8: ldo3 { + regulator-name = "vreg_l3j_0p8"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; + }; +}; + +&gpu { + status = "okay"; + + zap-shader { + firmware-name = "qcom/x1e80100/LENOVO/21N1/qcdxkmsuc8380.mbn"; + }; +}; + +&i2c0 { + clock-frequency = <400000>; + + status = "okay"; + + /* ELAN06E2 or ELAN06E3 */ + touchpad@15 { + compatible = "hid-over-i2c"; + reg = <0x15>; + + hid-descr-addr = <0x1>; + interrupts-extended = <&tlmm 3 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-0 = <&tpad_default>; + pinctrl-names = "default"; + + wakeup-source; + }; + + /* TODO: second-sourced SYNA8022 or SYNA8024 touchpad @ 0x2c */ + + /* ELAN06F1 or SYNA06F2 */ + keyboard@3a { + compatible = "hid-over-i2c"; + reg = <0x3a>; + + hid-descr-addr = <0x1>; + interrupts-extended = <&tlmm 67 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-0 = <&kybd_default>; + pinctrl-names = "default"; + + wakeup-source; + }; +}; + +&i2c8 { + clock-frequency = <400000>; + + status = "okay"; + + /* ILIT2911 or GTCH1563 */ + touchscreen@10 { + compatible = "hid-over-i2c"; + reg = <0x10>; + + hid-descr-addr = <0x1>; + interrupts-extended = <&tlmm 51 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-0 = <&ts0_default>; + pinctrl-names = "default"; + }; + + /* TODO: second-sourced touchscreen @ 0x41 */ +}; + +&mdss { + status = "okay"; +}; + +&mdss_dp3 { + compatible = "qcom,x1e80100-dp"; + /delete-property/ #sound-dai-cells; + + status = "okay"; + + aux-bus { + panel { + compatible = "edp-panel"; + enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>; + power-supply = <&vreg_edp_3p3>; + + pinctrl-0 = <&edp_bl_en>; + pinctrl-names = "default"; + + port { + edp_panel_in: endpoint { + remote-endpoint = <&mdss_dp3_out>; + }; + }; + }; + }; + + ports { + port@1 { + reg = <1>; + + mdss_dp3_out: endpoint { + data-lanes = <0 1 2 3>; + link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>; + + remote-endpoint = <&edp_panel_in>; + }; + }; + }; +}; + +&mdss_dp3_phy { + vdda-phy-supply = <&vreg_l3j_0p8>; + vdda-pll-supply = <&vreg_l2j_1p2>; + + status = "okay"; +}; + +&pcie4 { + perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; + + pinctrl-0 = <&pcie4_default>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&pcie4_phy { + vdda-phy-supply = <&vreg_l3i_0p8>; + vdda-pll-supply = <&vreg_l3e_1p2>; + + status = "okay"; +}; + +&pcie6a { + perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; + + vddpe-3v3-supply = <&vreg_nvme>; + + pinctrl-0 = <&pcie6a_default>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&pcie6a_phy { + vdda-phy-supply = <&vreg_l1d_0p8>; + vdda-pll-supply = <&vreg_l2j_1p2>; + + status = "okay"; +}; + +&pmc8380_3_gpios { + edp_bl_en: edp-bl-en-state { + pins = "gpio4"; + function = "normal"; + power-source = <1>; + input-disable; + output-enable; + }; +}; + +&qupv3_0 { + status = "okay"; +}; + +&qupv3_1 { + status = "okay"; +}; + +&qupv3_2 { + status = "okay"; +}; + +&remoteproc_adsp { + firmware-name = "qcom/x1e80100/LENOVO/21N1/qcadsp8380.mbn", + "qcom/x1e80100/LENOVO/21N1/adsp_dtbs.elf"; + + status = "okay"; +}; + +&remoteproc_cdsp { + firmware-name = "qcom/x1e80100/LENOVO/21N1/qccdsp8380.mbn", + "qcom/x1e80100/LENOVO/21N1/cdsp_dtbs.elf"; + + status = "okay"; +}; + +&smb2360_0_eusb2_repeater { + vdd18-supply = <&vreg_l3d_1p8>; + vdd3-supply = <&vreg_l2b_3p0>; +}; + +&smb2360_1_eusb2_repeater { + vdd18-supply = <&vreg_l3d_1p8>; + vdd3-supply = <&vreg_l14b_3p0>; +}; + +&tlmm { + gpio-reserved-ranges = <34 2>, /* Unused */ + <44 4>, /* SPI (TPM) */ + <72 2>, /* Secure EC I2C connection (?) */ + <238 1>; /* UFS Reset */ + + tpad_default: tpad-default-state { + pins = "gpio3"; + function = "gpio"; + bias-pull-up; + }; + + nvme_reg_en: nvme-reg-en-state { + pins = "gpio18"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + ts0_default: ts0-default-state { + reset-n-pins { + pins = "gpio48"; + function = "gpio"; + output-high; + drive-strength = <16>; + }; + + int-n-pins { + pins = "gpio51"; + function = "gpio"; + bias-disable; + }; + }; + + kybd_default: kybd-default-state { + pins = "gpio67"; + function = "gpio"; + bias-disable; + }; + + edp_reg_en: edp-reg-en-state { + pins = "gpio70"; + function = "gpio"; + drive-strength = <16>; + bias-disable; + }; + + hall_int_n_default: hall-int-n-state { + pins = "gpio92"; + function = "gpio"; + bias-disable; + }; + + pcie4_default: pcie4-default-state { + clkreq-n-pins { + pins = "gpio147"; + function = "pcie4_clk"; + drive-strength = <2>; + bias-pull-up; + }; + + perst-n-pins { + pins = "gpio146"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + wake-n-pins { + pins = "gpio148"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + pcie6a_default: pcie6a-default-state { + clkreq-n-pins { + pins = "gpio153"; + function = "pcie6a_clk"; + drive-strength = <2>; + bias-pull-up; + }; + + perst-n-pins { + pins = "gpio152"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + wake-n-pins { + pins = "gpio154"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + wcd_default: wcd-reset-n-active-state { + pins = "gpio191"; + function = "gpio"; + drive-strength = <16>; + bias-disable; + output-low; + }; +}; + +&usb_1_ss0_hsphy { + vdd-supply = <&vreg_l3j_0p8>; + vdda12-supply = <&vreg_l2j_1p2>; + + phys = <&smb2360_0_eusb2_repeater>; + + status = "okay"; +}; + +&usb_1_ss0_qmpphy { + vdda-phy-supply = <&vreg_l3e_1p2>; + vdda-pll-supply = <&vreg_l1j_0p8>; + + status = "okay"; +}; + +&usb_1_ss0 { + status = "okay"; +}; + +&usb_1_ss0_dwc3 { + dr_mode = "host"; +}; + +&usb_1_ss0_dwc3_hs { + remote-endpoint = <&pmic_glink_ss0_hs_in>; +}; + +&usb_1_ss0_qmpphy_out { + remote-endpoint = <&pmic_glink_ss0_ss_in>; +}; + +&usb_1_ss1_hsphy { + vdd-supply = <&vreg_l3j_0p8>; + vdda12-supply = <&vreg_l2j_1p2>; + + phys = <&smb2360_1_eusb2_repeater>; + + status = "okay"; +}; + +&usb_1_ss1_qmpphy { + vdda-phy-supply = <&vreg_l3e_1p2>; + vdda-pll-supply = <&vreg_l2d_0p9>; + + status = "okay"; +}; + +&usb_1_ss1 { + status = "okay"; +}; + +&usb_1_ss1_dwc3 { + dr_mode = "host"; +}; + +&usb_1_ss1_dwc3_hs { + remote-endpoint = <&pmic_glink_ss1_hs_in>; +}; + +&usb_1_ss1_qmpphy_out { + remote-endpoint = <&pmic_glink_ss1_ss_in>; +}; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts index 9caa14dda58552..20616bd4aa6c5c 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts @@ -501,10 +501,6 @@ &smb2360_1_eusb2_repeater { vdd3-supply = <&vreg_l14b_3p0>; }; -&smb2360_2 { - status = "disabled"; -}; - &tlmm { gpio-reserved-ranges = <34 2>, /* Unused */ <44 4>, /* SPI (TPM) */ diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts index e17ab8251e2a55..10b28d870f0819 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts @@ -6,6 +6,8 @@ /dts-v1/; #include +#include +#include #include #include "x1e80100.dtsi" @@ -49,6 +51,21 @@ chosen { stdout-path = "serial0:115200n8"; }; + gpio-keys { + compatible = "gpio-keys"; + + pinctrl-0 = <&hall_int_n_default>; + pinctrl-names = "default"; + + switch-lid { + gpios = <&tlmm 92 GPIO_ACTIVE_LOW>; + linux,input-type = ; + linux,code = ; + wakeup-source; + wakeup-event-action = ; + }; + }; + pmic-glink { compatible = "qcom,x1e80100-pmic-glink", "qcom,sm8550-pmic-glink", @@ -284,6 +301,22 @@ vreg_nvme: regulator-nvme { pinctrl-names = "default"; pinctrl-0 = <&nvme_reg_en>; }; + + vreg_wwan: regulator-wwan { + compatible = "regulator-fixed"; + + regulator-name = "SDX_VPH_PWR"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&tlmm 221 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&wwan_sw_en>; + pinctrl-names = "default"; + + regulator-boot-on; + }; }; &apps_rsc { @@ -783,6 +816,25 @@ &pcie4_phy { status = "okay"; }; +&pcie5 { + perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>; + wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>; + + vddpe-3v3-supply = <&vreg_wwan>; + + pinctrl-0 = <&pcie5_default>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&pcie5_phy { + vdda-phy-supply = <&vreg_l3i_0p8>; + vdda-pll-supply = <&vreg_l3e_1p2>; + + status = "okay"; +}; + &pcie6a { perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; @@ -848,6 +900,10 @@ &smb2360_1_eusb2_repeater { vdd3-supply = <&vreg_l14b_3p0>; }; +&smb2360_2 { + status = "okay"; +}; + &smb2360_2_eusb2_repeater { vdd18-supply = <&vreg_l3d_1p8>; vdd3-supply = <&vreg_l8b_3p0>; @@ -868,6 +924,7 @@ left_woofer: speaker@0,0 { sound-name-prefix = "WooferLeft"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <1 2 3 7 10 13>; }; /* WSA8845, Left Tweeter */ @@ -879,6 +936,7 @@ left_tweeter: speaker@0,1 { sound-name-prefix = "TwitterLeft"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <4 5 6 7 11 13>; }; }; @@ -919,6 +977,7 @@ right_woofer: speaker@0,0 { sound-name-prefix = "WooferRight"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <1 2 3 7 10 13>; }; /* WSA8845, Right Tweeter */ @@ -930,6 +989,7 @@ right_tweeter: speaker@0,1 { sound-name-prefix = "TwitterRight"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <4 5 6 7 11 13>; }; }; @@ -945,6 +1005,12 @@ edp_reg_en: edp-reg-en-state { bias-disable; }; + hall_int_n_default: hall-int-n-state { + pins = "gpio92"; + function = "gpio"; + bias-disable; + }; + kybd_default: kybd-default-state { pins = "gpio67"; function = "gpio"; @@ -981,6 +1047,29 @@ wake-n-pins { }; }; + pcie5_default: pcie5-default-state { + clkreq-n-pins { + pins = "gpio150"; + function = "pcie5_clk"; + drive-strength = <2>; + bias-pull-up; + }; + + perst-n-pins { + pins = "gpio149"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + wake-n-pins { + pins = "gpio151"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + pcie6a_default: pcie6a-default-state { clkreq-n-pins { pins = "gpio153"; @@ -1032,6 +1121,13 @@ wcd_default: wcd-reset-n-active-state { bias-disable; output-low; }; + + wwan_sw_en: wwan-sw-en-state { + pins = "gpio221"; + function = "gpio"; + drive-strength = <4>; + bias-disable; + }; }; &uart21 { diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts index 1943bdbfb8c00c..3c13331a9ef420 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts @@ -190,7 +190,6 @@ vreg_edp_3p3: regulator-edp-3p3 { pinctrl-0 = <&edp_reg_en>; pinctrl-names = "default"; - regulator-always-on; regulator-boot-on; }; @@ -592,9 +591,13 @@ &mdss_dp3 { aux-bus { panel { - compatible = "edp-panel"; + compatible = "samsung,atna45dc02", "samsung,atna33xc20"; + enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>; power-supply = <&vreg_edp_3p3>; + pinctrl-0 = <&edp_bl_en>; + pinctrl-names = "default"; + port { edp_panel_in: endpoint { remote-endpoint = <&mdss_dp3_out>; @@ -669,6 +672,16 @@ &pcie6a_phy { status = "okay"; }; +&pmc8380_3_gpios { + edp_bl_en: edp-bl-en-state { + pins = "gpio4"; + function = "normal"; + power-source = <0>; + input-disable; + output-enable; + }; +}; + &qupv3_0 { status = "okay"; }; @@ -704,6 +717,10 @@ &smb2360_1_eusb2_repeater { vdd3-supply = <&vreg_l14b_3p0>; }; +&smb2360_2 { + status = "okay"; +}; + &smb2360_2_eusb2_repeater { vdd18-supply = <&vreg_l3d_1p8>; vdd3-supply = <&vreg_l8b_3p0>; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi new file mode 100644 index 00000000000000..42e02ad6a9c328 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi @@ -0,0 +1,835 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "x1e80100.dtsi" +#include "x1e80100-pmics.dtsi" + +/ { + aliases { + serial0 = &uart2; + i2c0 = &i2c0; + i2c3 = &i2c3; + i2c4 = &i2c4; + i2c5 = &i2c5; + i2c7 = &i2c7; + }; + + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pmk8550_pwm 0 5000000>; + enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>; + /* TODO: power-supply? */ + + pinctrl-0 = <&edp_bl_en>, <&edp_bl_pwm>; + pinctrl-names = "default"; + }; + + leds { + compatible = "gpio-leds"; + + pinctrl-names = "default"; + pinctrl-0 = <&cam_indicator_en>; + + led-camera-indicator { + label = "white:camera-indicator"; + function = LED_FUNCTION_INDICATOR; + color = ; + gpios = <&tlmm 225 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "none"; + default-state = "off"; + /* Reuse as a panic indicator until we get a "camera on" trigger */ + panic-indicator; + }; + }; + + pmic-glink { + compatible = "qcom,x1e80100-pmic-glink", + "qcom,sm8550-pmic-glink", + "qcom,pmic-glink"; + #address-cells = <1>; + #size-cells = <0>; + orientation-gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>, + <&tlmm 123 GPIO_ACTIVE_HIGH>; + + /* Left-side rear port */ + connector@0 { + compatible = "usb-c-connector"; + reg = <0>; + power-role = "dual"; + data-role = "dual"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + pmic_glink_ss0_hs_in: endpoint { + remote-endpoint = <&usb_1_ss0_dwc3_hs>; + }; + }; + + port@1 { + reg = <1>; + + pmic_glink_ss0_ss_in: endpoint { + remote-endpoint = <&usb_1_ss0_qmpphy_out>; + }; + }; + }; + }; + + /* Left-side front port */ + connector@1 { + compatible = "usb-c-connector"; + reg = <1>; + power-role = "dual"; + data-role = "dual"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + pmic_glink_ss1_hs_in: endpoint { + remote-endpoint = <&usb_1_ss1_dwc3_hs>; + }; + }; + + port@1 { + reg = <1>; + + pmic_glink_ss1_ss_in: endpoint { + remote-endpoint = <&usb_1_ss1_qmpphy_out>; + }; + }; + }; + }; + }; + + reserved-memory { + linux,cma { + compatible = "shared-dma-pool"; + size = <0x0 0x8000000>; + reusable; + linux,cma-default; + }; + }; + + vph_pwr: vph-pwr-regulator { + compatible = "regulator-fixed"; + + regulator-name = "vph_pwr"; + regulator-min-microvolt = <3700000>; + regulator-max-microvolt = <3700000>; + + regulator-always-on; + regulator-boot-on; + }; + + vreg_edp_3p3: regulator-edp-3p3 { + compatible = "regulator-fixed"; + + regulator-name = "VREG_EDP_3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&tlmm 70 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&edp_reg_en>; + pinctrl-names = "default"; + + regulator-boot-on; + }; + + vreg_nvme: regulator-nvme { + compatible = "regulator-fixed"; + + regulator-name = "VREG_NVME_3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&tlmm 18 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-0 = <&nvme_reg_en>; + pinctrl-names = "default"; + }; +}; + +&apps_rsc { + regulators-0 { + compatible = "qcom,pm8550-rpmh-regulators"; + qcom,pmic-id = "b"; + + vdd-bob1-supply = <&vph_pwr>; + vdd-bob2-supply = <&vph_pwr>; + vdd-l1-l4-l10-supply = <&vreg_s4c>; + vdd-l2-l13-l14-supply = <&vreg_bob1>; + vdd-l5-l16-supply = <&vreg_bob1>; + vdd-l6-l7-supply = <&vreg_bob2>; + vdd-l8-l9-supply = <&vreg_bob1>; + vdd-l12-supply = <&vreg_s5j>; + vdd-l15-supply = <&vreg_s4c>; + vdd-l17-supply = <&vreg_bob2>; + + vreg_bob1: bob1 { + regulator-name = "vreg_bob1"; + regulator-min-microvolt = <3008000>; + regulator-max-microvolt = <3960000>; + regulator-initial-mode = ; + }; + + vreg_bob2: bob2 { + regulator-name = "vreg_bob2"; + regulator-min-microvolt = <2504000>; + regulator-max-microvolt = <3008000>; + regulator-initial-mode = ; + }; + + vreg_l1b: ldo1 { + regulator-name = "vreg_l1b"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l2b: ldo2 { + regulator-name = "vreg_l2b"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l4b: ldo4 { + regulator-name = "vreg_l4b"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l5b: ldo5 { + regulator-name = "vreg_l5b"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-initial-mode = ; + }; + + vreg_l6b: ldo6 { + regulator-name = "vreg_l6b"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2960000>; + regulator-initial-mode = ; + }; + + vreg_l7b: ldo7 { + regulator-name = "vreg_l7b"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-initial-mode = ; + }; + + vreg_l8b: ldo8 { + regulator-name = "vreg_l8b"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l9b: ldo9 { + regulator-name = "vreg_l9b"; + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <2960000>; + regulator-initial-mode = ; + }; + + vreg_l10b: ldo10 { + regulator-name = "vreg_l10b"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l12b: ldo12 { + regulator-name = "vreg_l12b"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l13b: ldo13 { + regulator-name = "vreg_l13b"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l14b: ldo14 { + regulator-name = "vreg_l14b"; + regulator-min-microvolt = <3072000>; + regulator-max-microvolt = <3072000>; + regulator-initial-mode = ; + }; + + vreg_l15b: ldo15 { + regulator-name = "vreg_l15b"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l16b: ldo16 { + regulator-name = "vreg_l16b"; + regulator-min-microvolt = <2912000>; + regulator-max-microvolt = <2912000>; + regulator-initial-mode = ; + }; + + vreg_l17b: ldo17 { + regulator-name = "vreg_l17b"; + regulator-min-microvolt = <2504000>; + regulator-max-microvolt = <2504000>; + regulator-initial-mode = ; + }; + }; + + regulators-1 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "c"; + + vdd-l1-supply = <&vreg_s5j>; + vdd-l2-supply = <&vreg_s1f>; + vdd-l3-supply = <&vreg_s1f>; + vdd-s4-supply = <&vph_pwr>; + + vreg_s4c: smps4 { + regulator-name = "vreg_s4c"; + regulator-min-microvolt = <1856000>; + regulator-max-microvolt = <2000000>; + regulator-initial-mode = ; + }; + + vreg_l1c: ldo1 { + regulator-name = "vreg_l1c"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l2c: ldo2 { + regulator-name = "vreg_l2c"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_l3c: ldo3 { + regulator-name = "vreg_l3c"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + }; + + regulators-2 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "d"; + + vdd-l1-supply = <&vreg_s1f>; + vdd-l2-supply = <&vreg_s1f>; + vdd-l3-supply = <&vreg_s4c>; + vdd-s1-supply = <&vph_pwr>; + + vreg_l1d: ldo1 { + regulator-name = "vreg_l1d"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_l2d: ldo2 { + regulator-name = "vreg_l2d"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_l3d: ldo3 { + regulator-name = "vreg_l3d"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + }; + + regulators-3 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "e"; + + vdd-l2-supply = <&vreg_s1f>; + vdd-l3-supply = <&vreg_s5j>; + + vreg_l2e: ldo2 { + regulator-name = "vreg_l2e"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_l3e: ldo3 { + regulator-name = "vreg_l3e"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + }; + + regulators-4 { + compatible = "qcom,pmc8380-rpmh-regulators"; + qcom,pmic-id = "f"; + + vdd-l1-supply = <&vreg_s5j>; + vdd-l2-supply = <&vreg_s5j>; + vdd-l3-supply = <&vreg_s5j>; + vdd-s1-supply = <&vph_pwr>; + + vreg_s1f: smps1 { + regulator-name = "vreg_s1f"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1100000>; + regulator-initial-mode = ; + }; + + vreg_l1f: ldo1 { + regulator-name = "vreg_l1f"; + regulator-min-microvolt = <1024000>; + regulator-max-microvolt = <1024000>; + regulator-initial-mode = ; + }; + + vreg_l2f: ldo2 { + regulator-name = "vreg_l2f"; + regulator-min-microvolt = <1024000>; + regulator-max-microvolt = <1024000>; + regulator-initial-mode = ; + }; + + vreg_l3f: ldo3 { + regulator-name = "vreg_l3f"; + regulator-min-microvolt = <1024000>; + regulator-max-microvolt = <1024000>; + regulator-initial-mode = ; + }; + }; + + regulators-6 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "i"; + + vdd-l1-supply = <&vreg_s4c>; + vdd-l2-supply = <&vreg_s5j>; + vdd-l3-supply = <&vreg_s1f>; + vdd-s1-supply = <&vph_pwr>; + vdd-s2-supply = <&vph_pwr>; + + vreg_s1i: smps1 { + regulator-name = "vreg_s1i"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_s2i: smps2 { + regulator-name = "vreg_s2i"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1100000>; + regulator-initial-mode = ; + }; + + vreg_l1i: ldo1 { + regulator-name = "vreg_l1i"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-initial-mode = ; + }; + + vreg_l2i: ldo2 { + regulator-name = "vreg_l2i"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-initial-mode = ; + }; + + vreg_l3i: ldo3 { + regulator-name = "vreg_l3i"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + }; + + regulators-7 { + compatible = "qcom,pm8550ve-rpmh-regulators"; + qcom,pmic-id = "j"; + + vdd-l1-supply = <&vreg_s1f>; + vdd-l2-supply = <&vreg_s5j>; + vdd-l3-supply = <&vreg_s1f>; + vdd-s5-supply = <&vph_pwr>; + + vreg_s5j: smps5 { + regulator-name = "vreg_s5j"; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1304000>; + regulator-initial-mode = ; + }; + + vreg_l1j: ldo1 { + regulator-name = "vreg_l1j"; + regulator-min-microvolt = <912000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + + vreg_l2j: ldo2 { + regulator-name = "vreg_l2j"; + regulator-min-microvolt = <1256000>; + regulator-max-microvolt = <1256000>; + regulator-initial-mode = ; + }; + + vreg_l3j: ldo3 { + regulator-name = "vreg_l3j"; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <920000>; + regulator-initial-mode = ; + }; + }; +}; + +&gpu { + status = "okay"; + + zap-shader { + memory-region = <&gpu_microcode_mem>; + firmware-name = "qcom/x1e80100/microsoft/qcdxkmsuc8380.mbn"; + }; +}; + +&i2c0 { + clock-frequency = <100000>; + + status = "okay"; + + /* Something @39, @3e, @44 */ +}; + +&i2c3 { + clock-frequency = <400000>; + + status = "okay"; + + /* PS8830 USB retimer @8 */ +}; + +&i2c4 { + clock-frequency = <400000>; + + status = "okay"; + + /* Something @18, @2c, @2e */ +}; + +&i2c5 { + clock-frequency = <400000>; + + status = "okay"; + + /* Something @4f */ +}; + +&i2c7 { + clock-frequency = <400000>; + + status = "okay"; + + /* PS8830 USB retimer @8 */ +}; + + +&mdss { + status = "okay"; +}; + +&mdss_dp3 { + compatible = "qcom,x1e80100-dp"; + /delete-property/ #sound-dai-cells; + + status = "okay"; + + aux-bus { + panel { + compatible = "edp-panel"; + + backlight = <&backlight>; + power-supply = <&vreg_edp_3p3>; + + port { + edp_panel_in: endpoint { + remote-endpoint = <&mdss_dp3_out>; + }; + }; + }; + }; + + ports { + port@1 { + reg = <1>; + + mdss_dp3_out: endpoint { + data-lanes = <0 1 2 3>; + link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>; + + remote-endpoint = <&edp_panel_in>; + }; + }; + }; +}; + +&mdss_dp3_phy { + vdda-phy-supply = <&vreg_l3j>; + vdda-pll-supply = <&vreg_l2j>; + + status = "okay"; +}; + +&pcie4 { + status = "okay"; +}; + +&pcie4_phy { + vdda-phy-supply = <&vreg_l3i>; + vdda-pll-supply = <&vreg_l3e>; + + status = "okay"; +}; + +&pcie6a { + perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; + + vddpe-3v3-supply = <&vreg_nvme>; + + pinctrl-0 = <&pcie6a_default>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&pcie6a_phy { + vdda-phy-supply = <&vreg_l1d>; + vdda-pll-supply = <&vreg_l2j>; + + status = "okay"; +}; + +&pmc8380_3_gpios { + edp_bl_en: edp-bl-en-state { + pins = "gpio4"; + function = "normal"; + power-source = <1>; /* 1.8V */ + input-disable; + output-enable; + }; +}; + +&pmk8550_pwm { + status = "okay"; +}; + +&pmk8550_gpios { + edp_bl_pwm: edp-bl-pwm-state { + pins = "gpio5"; + function = "func3"; + }; +}; + +&qupv3_0 { + status = "okay"; +}; + +&qupv3_1 { + status = "okay"; +}; + +&qupv3_2 { + status = "okay"; +}; + +&remoteproc_adsp { + firmware-name = "qcom/x1e80100/microsoft/Romulus/qcadsp8380.mbn", + "qcom/x1e80100/microsoft/Romulus/adsp_dtb.mbn"; + + status = "okay"; +}; + +&remoteproc_cdsp { + firmware-name = "qcom/x1e80100/microsoft/Romulus/qccdsp8380.mbn", + "qcom/x1e80100/microsoft/Romulus/cdsp_dtb.mbn"; + + status = "okay"; +}; + +&smb2360_0_eusb2_repeater { + vdd18-supply = <&vreg_l3d>; + vdd3-supply = <&vreg_l2b>; +}; + +&smb2360_1_eusb2_repeater { + vdd18-supply = <&vreg_l3d>; + vdd3-supply = <&vreg_l14b>; +}; + +&tlmm { + gpio-reserved-ranges = <44 4>, /* SPI (TPM) */ + <238 1>; /* UFS Reset */ + + nvme_reg_en: nvme-reg-en-state { + pins = "gpio18"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + edp_reg_en: edp-reg-en-state { + pins = "gpio70"; + function = "gpio"; + drive-strength = <16>; + bias-disable; + }; + + ssam_state: ssam-state-state { + pins = "gpio91"; + function = "gpio"; + bias-disable; + }; + + pcie6a_default: pcie6a-default-state { + perst-n-pins { + pins = "gpio152"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; + + clkreq-n-pins { + pins = "gpio153"; + function = "pcie6a_clk"; + drive-strength = <2>; + bias-pull-up; + }; + + wake-n-pins { + pins = "gpio154"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + cam_indicator_en: cam-indicator-en-state { + pins = "gpio225"; + function = "gpio"; + drive-strength = <2>; + bias-disable; + }; +}; + +&uart2 { + status = "okay"; + + embedded-controller { + compatible = "microsoft,surface-sam"; + + interrupts-extended = <&tlmm 91 IRQ_TYPE_EDGE_RISING>; + + current-speed = <4000000>; + + pinctrl-0 = <&ssam_state>; + pinctrl-names = "default"; + }; +}; + +&usb_1_ss0_hsphy { + vdd-supply = <&vreg_l3j>; + vdda12-supply = <&vreg_l2j>; + + phys = <&smb2360_0_eusb2_repeater>; + + status = "okay"; +}; + +&usb_1_ss0_qmpphy { + vdda-phy-supply = <&vreg_l3e>; + vdda-pll-supply = <&vreg_l1j>; + + status = "okay"; +}; + +&usb_1_ss0 { + status = "okay"; +}; + +&usb_1_ss0_dwc3 { + dr_mode = "host"; +}; + +&usb_1_ss0_dwc3_hs { + remote-endpoint = <&pmic_glink_ss0_hs_in>; +}; + +&usb_1_ss0_qmpphy_out { + remote-endpoint = <&pmic_glink_ss0_ss_in>; +}; + +&usb_1_ss1_hsphy { + vdd-supply = <&vreg_l3j>; + vdda12-supply = <&vreg_l2j>; + + phys = <&smb2360_1_eusb2_repeater>; + + status = "okay"; +}; + +&usb_1_ss1_qmpphy { + vdda-phy-supply = <&vreg_l3e>; + vdda-pll-supply = <&vreg_l2d>; + + status = "okay"; +}; + +&usb_1_ss1 { + status = "okay"; +}; + +&usb_1_ss1_dwc3 { + dr_mode = "host"; +}; + +&usb_1_ss1_dwc3_hs { + remote-endpoint = <&pmic_glink_ss1_hs_in>; +}; + +&usb_1_ss1_qmpphy_out { + remote-endpoint = <&pmic_glink_ss1_ss_in>; +}; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts new file mode 100644 index 00000000000000..eb7580dd968469 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus13.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/dts-v1/; + +#include "x1e80100-microsoft-romulus.dtsi" + +/ { + model = "Microsoft Surface Laptop 7 (13.8 inch)"; + compatible = "microsoft,romulus13", "qcom,x1e80100"; +}; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts new file mode 100644 index 00000000000000..4751ad9b510bee --- /dev/null +++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus15.dts @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/dts-v1/; + +#include "x1e80100-microsoft-romulus.dtsi" + +/ { + model = "Microsoft Surface Laptop 7 (15 inch)"; + compatible = "microsoft,romulus15", "qcom,x1e80100"; +}; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi index e34e70922cd335..5b54ee79f048e3 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100-pmics.dtsi @@ -249,6 +249,14 @@ pmk8550_gpios: gpio@8800 { interrupt-controller; #interrupt-cells = <2>; }; + + pmk8550_pwm: pwm { + compatible = "qcom,pmk8550-pwm"; + + #pwm-cells = <2>; + + status = "disabled"; + }; }; /* PMC8380C */ @@ -509,6 +517,8 @@ smb2360_2: pmic@b { #address-cells = <1>; #size-cells = <0>; + status = "disabled"; + smb2360_2_eusb2_repeater: phy@fd00 { compatible = "qcom,smb2360-eusb2-repeater"; reg = <0xfd00>; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts index 8098e6730ae52f..1c3a6a7b3ed628 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts @@ -729,10 +729,6 @@ &remoteproc_cdsp { status = "okay"; }; -&smb2360_3 { - status = "okay"; -}; - &smb2360_0_eusb2_repeater { vdd18-supply = <&vreg_l3d_1p8>; vdd3-supply = <&vreg_l2b_3p0>; @@ -743,11 +739,19 @@ &smb2360_1_eusb2_repeater { vdd3-supply = <&vreg_l14b_3p0>; }; +&smb2360_2 { + status = "okay"; +}; + &smb2360_2_eusb2_repeater { vdd18-supply = <&vreg_l3d_1p8>; vdd3-supply = <&vreg_l8b_3p0>; }; +&smb2360_3 { + status = "okay"; +}; + &swr0 { pinctrl-0 = <&wsa_swr_active>, <&spkr_01_sd_n_active>; pinctrl-names = "default"; @@ -763,6 +767,7 @@ left_spkr: speaker@0,0 { sound-name-prefix = "SpkrLeft"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <1 2 3 7 10 13>; }; /* WSA8845, Right Speaker */ @@ -774,6 +779,7 @@ right_spkr: speaker@0,1 { sound-name-prefix = "SpkrRight"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; + qcom,port-mapping = <4 5 6 7 11 13>; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi index cd732ef88cd8e0..a36076e3c56b5b 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -745,7 +746,7 @@ gcc: clock-controller@100000 { <&sleep_clk>, <0>, <&pcie4_phy>, - <0>, + <&pcie5_phy>, <&pcie6a_phy>, <0>, <&usb_1_ss0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>, @@ -1979,7 +1980,7 @@ qupv3_0: geniqup@bc0000 { i2c0: i2c@b80000 { compatible = "qcom,geni-i2c"; - reg = <0 0xb80000 0 0x4000>; + reg = <0 0x00b80000 0 0x4000>; interrupts = ; @@ -2142,9 +2143,31 @@ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>, status = "disabled"; }; + uart2: serial@b88000 { + compatible = "qcom,geni-uart"; + reg = <0 0x00b88000 0 0x4000>; + + interrupts = ; + + clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; + clock-names = "se"; + + interconnects = <&clk_virt MASTER_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS + &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS + &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>; + interconnect-names = "qup-core", + "qup-config"; + + pinctrl-0 = <&qup_uart2_default>; + pinctrl-names = "default"; + + status = "disabled"; + }; + spi2: spi@b88000 { compatible = "qcom,geni-spi"; - reg = <0 0xb88000 0 0x4000>; + reg = <0 0x00b88000 0 0x4000>; interrupts = ; @@ -2243,7 +2266,7 @@ &config_noc SLAVE_QUP_0 QCOM_ICC_TAG_ALWAYS>, i2c4: i2c@b90000 { compatible = "qcom,geni-i2c"; - reg = <0 0xb90000 0 0x4000>; + reg = <0 0x00b90000 0 0x4000>; interrupts = ; @@ -2603,6 +2626,8 @@ usb_1_ss0_qmpphy: phy@fd5000 { #clock-cells = <1>; #phy-cells = <1>; + orientation-switch; + status = "disabled"; ports { @@ -2671,6 +2696,8 @@ usb_1_ss1_qmpphy: phy@fda000 { #clock-cells = <1>; #phy-cells = <1>; + orientation-switch; + status = "disabled"; ports { @@ -2739,6 +2766,8 @@ usb_1_ss2_qmpphy: phy@fdf000 { #clock-cells = <1>; #phy-cells = <1>; + orientation-switch; + status = "disabled"; ports { @@ -2772,7 +2801,7 @@ usb_1_ss2_qmpphy_dp_in: endpoint { cnoc_main: interconnect@1500000 { compatible = "qcom,x1e80100-cnoc-main"; - reg = <0 0x1500000 0 0x14400>; + reg = <0 0x01500000 0 0x14400>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2781,7 +2810,7 @@ cnoc_main: interconnect@1500000 { config_noc: interconnect@1600000 { compatible = "qcom,x1e80100-cnoc-cfg"; - reg = <0 0x1600000 0 0x6600>; + reg = <0 0x01600000 0 0x6600>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2790,7 +2819,7 @@ config_noc: interconnect@1600000 { system_noc: interconnect@1680000 { compatible = "qcom,x1e80100-system-noc"; - reg = <0 0x1680000 0 0x1c080>; + reg = <0 0x01680000 0 0x1c080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2799,7 +2828,7 @@ system_noc: interconnect@1680000 { pcie_south_anoc: interconnect@16c0000 { compatible = "qcom,x1e80100-pcie-south-anoc"; - reg = <0 0x16c0000 0 0xd080>; + reg = <0 0x016c0000 0 0xd080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2808,7 +2837,7 @@ pcie_south_anoc: interconnect@16c0000 { pcie_center_anoc: interconnect@16d0000 { compatible = "qcom,x1e80100-pcie-center-anoc"; - reg = <0 0x16d0000 0 0x7000>; + reg = <0 0x016d0000 0 0x7000>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2817,7 +2846,7 @@ pcie_center_anoc: interconnect@16d0000 { aggre1_noc: interconnect@16e0000 { compatible = "qcom,x1e80100-aggre1-noc"; - reg = <0 0x16E0000 0 0x14400>; + reg = <0 0x016e0000 0 0x14400>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2826,7 +2855,7 @@ aggre1_noc: interconnect@16e0000 { aggre2_noc: interconnect@1700000 { compatible = "qcom,x1e80100-aggre2-noc"; - reg = <0 0x1700000 0 0x1c400>; + reg = <0 0x01700000 0 0x1c400>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2835,7 +2864,7 @@ aggre2_noc: interconnect@1700000 { pcie_north_anoc: interconnect@1740000 { compatible = "qcom,x1e80100-pcie-north-anoc"; - reg = <0 0x1740000 0 0x9080>; + reg = <0 0x01740000 0 0x9080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2844,7 +2873,7 @@ pcie_north_anoc: interconnect@1740000 { usb_center_anoc: interconnect@1750000 { compatible = "qcom,x1e80100-usb-center-anoc"; - reg = <0 0x1750000 0 0x8800>; + reg = <0 0x01750000 0 0x8800>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2853,7 +2882,7 @@ usb_center_anoc: interconnect@1750000 { usb_north_anoc: interconnect@1760000 { compatible = "qcom,x1e80100-usb-north-anoc"; - reg = <0 0x1760000 0 0x7080>; + reg = <0 0x01760000 0 0x7080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2862,7 +2891,7 @@ usb_north_anoc: interconnect@1760000 { usb_south_anoc: interconnect@1770000 { compatible = "qcom,x1e80100-usb-south-anoc"; - reg = <0 0x1770000 0 0xf080>; + reg = <0 0x01770000 0 0xf080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -2871,7 +2900,7 @@ usb_south_anoc: interconnect@1770000 { mmss_noc: interconnect@1780000 { compatible = "qcom,x1e80100-mmss-noc"; - reg = <0 0x1780000 0 0x5B800>; + reg = <0 0x01780000 0 0x5B800>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -3000,6 +3029,126 @@ pcie6a_phy: phy@1bfc000 { status = "disabled"; }; + pcie5: pci@1c00000 { + device_type = "pci"; + compatible = "qcom,pcie-x1e80100"; + reg = <0 0x01c00000 0 0x3000>, + <0 0x7e000000 0 0xf1d>, + <0 0x7e000f40 0 0xa8>, + <0 0x7e001000 0 0x1000>, + <0 0x7e100000 0 0x100000>, + <0 0x01c03000 0 0x1000>; + reg-names = "parf", + "dbi", + "elbi", + "atu", + "config", + "mhi"; + #address-cells = <3>; + #size-cells = <2>; + ranges = <0x01000000 0x0 0x00000000 0x0 0x7e200000 0x0 0x100000>, + <0x02000000 0x0 0x7e300000 0x0 0x7e300000 0x0 0x1d00000>; + bus-range = <0x00 0xff>; + + dma-coherent; + + linux,pci-domain = <5>; + num-lanes = <2>; + + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "msi0", + "msi1", + "msi2", + "msi3", + "msi4", + "msi5", + "msi6", + "msi7"; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 0 0 70 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &intc 0 0 0 71 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &intc 0 0 0 72 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &intc 0 0 0 73 IRQ_TYPE_LEVEL_HIGH>; + + clocks = <&gcc GCC_PCIE_5_AUX_CLK>, + <&gcc GCC_PCIE_5_CFG_AHB_CLK>, + <&gcc GCC_PCIE_5_MSTR_AXI_CLK>, + <&gcc GCC_PCIE_5_SLV_AXI_CLK>, + <&gcc GCC_PCIE_5_SLV_Q2A_AXI_CLK>, + <&gcc GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK>, + <&gcc GCC_CNOC_PCIE_NORTH_SF_AXI_CLK>; + clock-names = "aux", + "cfg", + "bus_master", + "bus_slave", + "slave_q2a", + "noc_aggr", + "cnoc_sf_axi"; + + assigned-clocks = <&gcc GCC_PCIE_5_AUX_CLK>; + assigned-clock-rates = <19200000>; + + interconnects = <&pcie_south_anoc MASTER_PCIE_5 QCOM_ICC_TAG_ALWAYS + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS + &cnoc_main SLAVE_PCIE_5 QCOM_ICC_TAG_ALWAYS>; + interconnect-names = "pcie-mem", + "cpu-pcie"; + + resets = <&gcc GCC_PCIE_5_BCR>, + <&gcc GCC_PCIE_5_LINK_DOWN_BCR>; + reset-names = "pci", + "link_down"; + + power-domains = <&gcc GCC_PCIE_5_GDSC>; + required-opps = <&rpmhpd_opp_nom>; + + phys = <&pcie5_phy>; + phy-names = "pciephy"; + + status = "disabled"; + }; + + pcie5_phy: phy@1c06000 { + compatible = "qcom,x1e80100-qmp-gen3x2-pcie-phy"; + reg = <0 0x01c06000 0 0x2000>; + + clocks = <&gcc GCC_PCIE_5_AUX_CLK>, + <&gcc GCC_PCIE_5_CFG_AHB_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>, + <&gcc GCC_PCIE_5_PIPE_CLK>; + clock-names = "aux", + "cfg_ahb", + "ref", + "rchng", + "pipe"; + + resets = <&gcc GCC_PCIE_5_PHY_BCR>; + reset-names = "phy"; + + assigned-clocks = <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>; + assigned-clock-rates = <100000000>; + + power-domains = <&gcc GCC_PCIE_5_PHY_GDSC>; + + #clock-cells = <0>; + clock-output-names = "pcie5_pipe_clk"; + + #phy-cells = <0>; + + status = "disabled"; + }; + pcie4: pci@1c08000 { device_type = "pci"; compatible = "qcom,pcie-x1e80100"; @@ -3350,7 +3499,7 @@ gem_noc: interconnect@26400000 { nsp_noc: interconnect@320c0000 { compatible = "qcom,x1e80100-nsp-noc"; - reg = <0 0x320C0000 0 0xE080>; + reg = <0 0x320C0000 0 0xe080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -3385,6 +3534,8 @@ swr3: soundwire@6ab0000 { pinctrl-0 = <&wsa2_swr_active>; pinctrl-names = "default"; + resets = <&lpass_audiocc LPASS_AUDIO_SWR_WSA2_CGCR>; + reset-names = "swr_audio_cgcr"; qcom,din-ports = <4>; qcom,dout-ports = <9>; @@ -3433,6 +3584,8 @@ swr1: soundwire@6ad0000 { pinctrl-0 = <&rx_swr_active>; pinctrl-names = "default"; + resets = <&lpass_audiocc LPASS_AUDIO_SWR_RX_CGCR>; + reset-names = "swr_audio_cgcr"; qcom,din-ports = <1>; qcom,dout-ports = <11>; @@ -3497,6 +3650,8 @@ swr0: soundwire@6b10000 { pinctrl-0 = <&wsa_swr_active>; pinctrl-names = "default"; + resets = <&lpass_audiocc LPASS_AUDIO_SWR_WSA_CGCR>; + reset-names = "swr_audio_cgcr"; qcom,din-ports = <4>; qcom,dout-ports = <9>; @@ -3517,6 +3672,13 @@ swr0: soundwire@6b10000 { status = "disabled"; }; + lpass_audiocc: clock-controller@6b6c000 { + compatible = "qcom,x1e80100-lpassaudiocc", "qcom,sc8280xp-lpassaudiocc"; + reg = <0 0x06b6c000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + swr2: soundwire@6d30000 { compatible = "qcom,soundwire-v2.0.0"; reg = <0 0x06d30000 0 0x10000>; @@ -3526,6 +3688,8 @@ swr2: soundwire@6d30000 { ; interrupt-names = "core", "wakeup"; label = "TX"; + resets = <&lpasscc LPASS_AUDIO_SWR_TX_CGCR>; + reset-names = "swr_audio_cgcr"; pinctrl-0 = <&tx_swr_active>; pinctrl-names = "default"; @@ -3682,9 +3846,16 @@ data-pins { }; }; + lpasscc: clock-controller@6ea0000 { + compatible = "qcom,x1e80100-lpasscc", "qcom,sc8280xp-lpasscc"; + reg = <0 0x06ea0000 0 0x12000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + lpass_ag_noc: interconnect@7e40000 { compatible = "qcom,x1e80100-lpass-ag-noc"; - reg = <0 0x7e40000 0 0xE080>; + reg = <0 0x07e40000 0 0xe080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -3693,7 +3864,7 @@ lpass_ag_noc: interconnect@7e40000 { lpass_lpiaon_noc: interconnect@7400000 { compatible = "qcom,x1e80100-lpass-lpiaon-noc"; - reg = <0 0x7400000 0 0x19080>; + reg = <0 0x07400000 0 0x19080>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -3702,7 +3873,7 @@ lpass_lpiaon_noc: interconnect@7400000 { lpass_lpicx_noc: interconnect@7430000 { compatible = "qcom,x1e80100-lpass-lpicx-noc"; - reg = <0 0x7430000 0 0x3A200>; + reg = <0 0x07430000 0 0x3A200>; qcom,bcm-voters = <&apps_bcm_voter>; @@ -3723,6 +3894,90 @@ usb_2_hsphy: phy@88e0000 { status = "disabled"; }; + usb_mp_hsphy0: phy@88e1000 { + compatible = "qcom,x1e80100-snps-eusb2-phy", + "qcom,sm8550-snps-eusb2-phy"; + reg = <0 0x088e1000 0 0x154>; + #phy-cells = <0>; + + clocks = <&tcsr TCSR_USB3_MP0_CLKREF_EN>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_HS0_MP_BCR>; + + status = "disabled"; + }; + + usb_mp_hsphy1: phy@88e2000 { + compatible = "qcom,x1e80100-snps-eusb2-phy", + "qcom,sm8550-snps-eusb2-phy"; + reg = <0 0x088e2000 0 0x154>; + #phy-cells = <0>; + + clocks = <&tcsr TCSR_USB3_MP1_CLKREF_EN>; + clock-names = "ref"; + + resets = <&gcc GCC_QUSB2PHY_HS1_MP_BCR>; + + status = "disabled"; + }; + + usb_mp_qmpphy0: phy@88e3000 { + compatible = "qcom,x1e80100-qmp-usb3-uni-phy"; + reg = <0 0x088e3000 0 0x2000>; + + clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_MP_PHY_PIPE_0_CLK>; + clock-names = "aux", + "ref", + "com_aux", + "pipe"; + + resets = <&gcc GCC_USB3_UNIPHY_MP0_BCR>, + <&gcc GCC_USB3UNIPHY_PHY_MP0_BCR>; + reset-names = "phy", + "phy_phy"; + + power-domains = <&gcc GCC_USB3_MP_SS0_PHY_GDSC>; + + #clock-cells = <0>; + clock-output-names = "usb_mp_phy0_pipe_clk"; + + #phy-cells = <0>; + + status = "disabled"; + }; + + usb_mp_qmpphy1: phy@88e5000 { + compatible = "qcom,x1e80100-qmp-usb3-uni-phy"; + reg = <0 0x088e5000 0 0x2000>; + + clocks = <&gcc GCC_USB3_MP_PHY_AUX_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_USB3_MP_PHY_COM_AUX_CLK>, + <&gcc GCC_USB3_MP_PHY_PIPE_1_CLK>; + clock-names = "aux", + "ref", + "com_aux", + "pipe"; + + resets = <&gcc GCC_USB3_UNIPHY_MP1_BCR>, + <&gcc GCC_USB3UNIPHY_PHY_MP1_BCR>; + reset-names = "phy", + "phy_phy"; + + power-domains = <&gcc GCC_USB3_MP_SS1_PHY_GDSC>; + + #clock-cells = <0>; + clock-output-names = "usb_mp_phy1_pipe_clk"; + + #phy-cells = <0>; + + status = "disabled"; + }; + usb_1_ss2: usb@a0f8800 { compatible = "qcom,x1e80100-dwc3", "qcom,dwc3"; reg = <0 0x0a0f8800 0 0x400>; @@ -3897,6 +4152,92 @@ usb_2_dwc3_hs: endpoint { }; }; + usb_mp: usb@a4f8800 { + compatible = "qcom,x1e80100-dwc3-mp", "qcom,dwc3"; + reg = <0 0x0a4f8800 0 0x400>; + + clocks = <&gcc GCC_CFG_NOC_USB3_MP_AXI_CLK>, + <&gcc GCC_USB30_MP_MASTER_CLK>, + <&gcc GCC_AGGRE_USB3_MP_AXI_CLK>, + <&gcc GCC_USB30_MP_SLEEP_CLK>, + <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>, + <&gcc GCC_AGGRE_USB_NOC_AXI_CLK>, + <&gcc GCC_AGGRE_NOC_USB_NORTH_AXI_CLK>, + <&gcc GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK>, + <&gcc GCC_SYS_NOC_USB_AXI_CLK>; + clock-names = "cfg_noc", + "core", + "iface", + "sleep", + "mock_utmi", + "noc_aggr", + "noc_aggr_north", + "noc_aggr_south", + "noc_sys"; + + assigned-clocks = <&gcc GCC_USB30_MP_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_MP_MASTER_CLK>; + assigned-clock-rates = <19200000>, + <200000000>; + + interrupts-extended = <&intc GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>, + <&intc GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 52 IRQ_TYPE_EDGE_BOTH>, + <&pdc 51 IRQ_TYPE_EDGE_BOTH>, + <&pdc 54 IRQ_TYPE_EDGE_BOTH>, + <&pdc 53 IRQ_TYPE_EDGE_BOTH>, + <&pdc 55 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 56 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "pwr_event_1", "pwr_event_2", + "hs_phy_1", "hs_phy_2", + "dp_hs_phy_1", "dm_hs_phy_1", + "dp_hs_phy_2", "dm_hs_phy_2", + "ss_phy_1", "ss_phy_2"; + + power-domains = <&gcc GCC_USB30_MP_GDSC>; + required-opps = <&rpmhpd_opp_nom>; + + resets = <&gcc GCC_USB30_MP_BCR>; + + interconnects = <&usb_north_anoc MASTER_USB3_MP QCOM_ICC_TAG_ALWAYS + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS + &config_noc SLAVE_USB3_MP QCOM_ICC_TAG_ALWAYS>; + interconnect-names = "usb-ddr", + "apps-usb"; + + wakeup-source; + + #address-cells = <2>; + #size-cells = <2>; + ranges; + + status = "disabled"; + + usb_mp_dwc3: usb@a400000 { + compatible = "snps,dwc3"; + reg = <0 0x0a400000 0 0xcd00>; + + interrupts = ; + + iommus = <&apps_smmu 0x1400 0x0>; + + phys = <&usb_mp_hsphy0>, <&usb_mp_qmpphy0>, + <&usb_mp_hsphy1>, <&usb_mp_qmpphy1>; + phy-names = "usb2-0", "usb3-0", + "usb2-1", "usb3-1"; + dr_mode = "host"; + + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + snps,usb3_lpm_capable; + + dma-coherent; + }; + }; + usb_1_ss0: usb@a6f8800 { compatible = "qcom,x1e80100-dwc3", "qcom,dwc3"; reg = <0 0x0a6f8800 0 0x400>; @@ -4215,11 +4556,11 @@ opp-575000000 { mdss_dp0: displayport-controller@ae90000 { compatible = "qcom,x1e80100-dp"; - reg = <0 0xae90000 0 0x200>, - <0 0xae90200 0 0x200>, - <0 0xae90400 0 0x600>, - <0 0xae91000 0 0x400>, - <0 0xae91400 0 0x400>; + reg = <0 0x0ae90000 0 0x200>, + <0 0x0ae90200 0 0x200>, + <0 0x0ae90400 0 0x600>, + <0 0x0ae91000 0 0x400>, + <0 0x0ae91400 0 0x400>; interrupts-extended = <&mdss 12>; @@ -4298,11 +4639,11 @@ opp-810000000 { mdss_dp1: displayport-controller@ae98000 { compatible = "qcom,x1e80100-dp"; - reg = <0 0xae98000 0 0x200>, - <0 0xae98200 0 0x200>, - <0 0xae98400 0 0x600>, - <0 0xae99000 0 0x400>, - <0 0xae99400 0 0x400>; + reg = <0 0x0ae98000 0 0x200>, + <0 0x0ae98200 0 0x200>, + <0 0x0ae98400 0 0x600>, + <0 0x0ae99000 0 0x400>, + <0 0x0ae99400 0 0x400>; interrupts-extended = <&mdss 13>; @@ -4381,11 +4722,11 @@ opp-810000000 { mdss_dp2: displayport-controller@ae9a000 { compatible = "qcom,x1e80100-dp"; - reg = <0 0xae9a000 0 0x200>, - <0 0xae9a200 0 0x200>, - <0 0xae9a400 0 0x600>, - <0 0xae9b000 0 0x400>, - <0 0xae9b400 0 0x400>; + reg = <0 0x0ae9a000 0 0x200>, + <0 0x0ae9a200 0 0x200>, + <0 0x0ae9a400 0 0x600>, + <0 0x0ae9b000 0 0x400>, + <0 0x0ae9b400 0 0x400>; interrupts-extended = <&mdss 14>; @@ -4402,14 +4743,14 @@ mdss_dp2: displayport-controller@ae9a000 { assigned-clocks = <&dispcc DISP_CC_MDSS_DPTX2_LINK_CLK_SRC>, <&dispcc DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC>; - assigned-clock-parents = <&mdss_dp2_phy 0>, - <&mdss_dp2_phy 1>; + assigned-clock-parents = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>, + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; operating-points-v2 = <&mdss_dp2_opp_table>; power-domains = <&rpmhpd RPMHPD_MMCX>; - phys = <&mdss_dp2_phy>; + phys = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_PHY>; phy-names = "dp"; #sound-dai-cells = <0>; @@ -4463,11 +4804,11 @@ opp-810000000 { mdss_dp3: displayport-controller@aea0000 { compatible = "qcom,x1e80100-dp"; - reg = <0 0xaea0000 0 0x200>, - <0 0xaea0200 0 0x200>, - <0 0xaea0400 0 0x600>, - <0 0xaea1000 0 0x400>, - <0 0xaea1400 0 0x400>; + reg = <0 0x0aea0000 0 0x200>, + <0 0x0aea0200 0 0x200>, + <0 0x0aea0400 0 0x600>, + <0 0x0aea1000 0 0x400>, + <0 0x0aea1400 0 0x400>; interrupts-extended = <&mdss 15>; @@ -4597,8 +4938,8 @@ dispcc: clock-controller@af00000 { <&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, <&usb_1_ss1_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp1 */ <&usb_1_ss1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, - <&mdss_dp2_phy 0>, /* dp2 */ - <&mdss_dp2_phy 1>, + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp2 */ + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, <&mdss_dp3_phy 0>, /* dp3 */ <&mdss_dp3_phy 1>; power-domains = <&rpmhpd RPMHPD_MMCX>; @@ -4631,6 +4972,11 @@ aoss_qmp: power-management@c300000 { #clock-cells = <0>; }; + sram@c3f0000 { + compatible = "qcom,rpmh-stats"; + reg = <0 0x0c3f0000 0 0x400>; + }; + spmi: arbiter@c400000 { compatible = "qcom,x1e80100-spmi-pmic-arb"; reg = <0 0x0c400000 0 0x3000>, @@ -5241,12 +5587,50 @@ qup_spi23_data_clk: qup-spi23-data-clk-state { bias-disable; }; + qup_uart2_default: qup-uart2-default-state { + cts-pins { + pins = "gpio8"; + function = "qup0_se2"; + drive-strength = <2>; + bias-disable; + }; + + rts-pins { + pins = "gpio9"; + function = "qup0_se2"; + drive-strength = <2>; + bias-disable; + }; + + tx-pins { + pins = "gpio10"; + function = "qup0_se2"; + drive-strength = <2>; + bias-disable; + }; + + rx-pins { + pins = "gpio11"; + function = "qup0_se2"; + drive-strength = <2>; + bias-disable; + }; + }; + qup_uart21_default: qup-uart21-default-state { - /* TX, RX */ - pins = "gpio86", "gpio87"; - function = "qup2_se5"; - drive-strength = <2>; - bias-disable; + tx-pins { + pins = "gpio86"; + function = "qup2_se5"; + drive-strength = <2>; + bias-disable; + }; + + rx-pins { + pins = "gpio87"; + function = "qup2_se5"; + drive-strength = <2>; + bias-disable; + }; }; }; diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile index fbd214a1a63825..97228a3cb99c16 100644 --- a/arch/arm64/boot/dts/renesas/Makefile +++ b/arch/arm64/boot/dts/renesas/Makefile @@ -112,9 +112,12 @@ dtb-$(CONFIG_ARCH_R8A77965) += r8a779m5-salvator-xs-panel-aa104xd12.dtb dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc.dtb dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-cru-csi-ov5645.dtbo +dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-du-adv7513.dtbo dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043-smarc-pmod.dtbo r9a07g043u11-smarc-cru-csi-ov5645-dtbs := r9a07g043u11-smarc.dtb r9a07g043u11-smarc-cru-csi-ov5645.dtbo dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-cru-csi-ov5645.dtb +r9a07g043u11-smarc-du-adv7513-dtbs := r9a07g043u11-smarc.dtb r9a07g043u11-smarc-du-adv7513.dtbo +dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-du-adv7513.dtb r9a07g043u11-smarc-pmod-dtbs := r9a07g043u11-smarc.dtb r9a07g043-smarc-pmod.dtbo dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc-pmod.dtb @@ -137,5 +140,7 @@ dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc.dtb dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb +dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h44-rzv2h-evk.dtb + dtb-$(CONFIG_ARCH_RCAR_GEN3) += draak-ebisu-panel-aa104xd12.dtbo dtb-$(CONFIG_ARCH_RCAR_GEN3) += salvator-panel-aa104xd12.dtbo diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 1dbf9d56c68da8..f065ee90649a4a 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi @@ -2277,6 +2277,7 @@ sdhi0: mmc@ee100000 { max-frequency = <200000000>; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 314>; + iommus = <&ipmmu_ds1 32>; status = "disabled"; }; @@ -2290,6 +2291,7 @@ sdhi1: mmc@ee120000 { max-frequency = <200000000>; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 313>; + iommus = <&ipmmu_ds1 33>; status = "disabled"; }; @@ -2303,6 +2305,7 @@ sdhi2: mmc@ee140000 { max-frequency = <200000000>; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 312>; + iommus = <&ipmmu_ds1 34>; status = "disabled"; }; @@ -2316,6 +2319,7 @@ sdhi3: mmc@ee160000 { max-frequency = <200000000>; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 311>; + iommus = <&ipmmu_ds1 35>; status = "disabled"; }; @@ -2464,6 +2468,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A774A1_PD_A3VC>; resets = <&cpg 615>; + iommus = <&ipmmu_vc0 16>; }; fcpvb0: fcp@fe96f000 { @@ -2472,6 +2477,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A774A1_PD_A3VC>; resets = <&cpg 607>; + iommus = <&ipmmu_vi0 5>; }; fcpvd0: fcp@fea27000 { diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi index 10f22c52e79ecf..117cb6950f91f9 100644 --- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi @@ -2004,6 +2004,14 @@ audma0: dma-controller@ec700000 { resets = <&cpg 502>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>, + <&ipmmu_mp 2>, <&ipmmu_mp 3>, + <&ipmmu_mp 4>, <&ipmmu_mp 5>, + <&ipmmu_mp 6>, <&ipmmu_mp 7>, + <&ipmmu_mp 8>, <&ipmmu_mp 9>, + <&ipmmu_mp 10>, <&ipmmu_mp 11>, + <&ipmmu_mp 12>, <&ipmmu_mp 13>, + <&ipmmu_mp 14>, <&ipmmu_mp 15>; }; audma1: dma-controller@ec720000 { @@ -2038,6 +2046,14 @@ audma1: dma-controller@ec720000 { resets = <&cpg 501>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>, + <&ipmmu_mp 18>, <&ipmmu_mp 19>, + <&ipmmu_mp 20>, <&ipmmu_mp 21>, + <&ipmmu_mp 22>, <&ipmmu_mp 23>, + <&ipmmu_mp 24>, <&ipmmu_mp 25>, + <&ipmmu_mp 26>, <&ipmmu_mp 27>, + <&ipmmu_mp 28>, <&ipmmu_mp 29>, + <&ipmmu_mp 30>, <&ipmmu_mp 31>; }; xhci0: usb@ee000000 { @@ -2145,6 +2161,7 @@ sdhi0: mmc@ee100000 { max-frequency = <200000000>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 314>; + iommus = <&ipmmu_ds1 32>; status = "disabled"; }; @@ -2158,6 +2175,7 @@ sdhi1: mmc@ee120000 { max-frequency = <200000000>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 313>; + iommus = <&ipmmu_ds1 33>; status = "disabled"; }; @@ -2171,6 +2189,7 @@ sdhi2: mmc@ee140000 { max-frequency = <200000000>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 312>; + iommus = <&ipmmu_ds1 34>; status = "disabled"; }; @@ -2184,6 +2203,7 @@ sdhi3: mmc@ee160000 { max-frequency = <200000000>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 311>; + iommus = <&ipmmu_ds1 35>; status = "disabled"; }; @@ -2211,6 +2231,7 @@ sata: sata@ee300000 { clocks = <&cpg CPG_MOD 815>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 815>; + iommus = <&ipmmu_hc 2>; status = "disabled"; }; @@ -2343,6 +2364,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A774B1_PD_A3VP>; resets = <&cpg 615>; + iommus = <&ipmmu_vp0 0>; }; vspb: vsp@fe960000 { @@ -2395,6 +2417,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A774B1_PD_A3VP>; resets = <&cpg 607>; + iommus = <&ipmmu_vp0 5>; }; fcpvd0: fcp@fea27000 { @@ -2403,6 +2426,7 @@ fcpvd0: fcp@fea27000 { clocks = <&cpg CPG_MOD 603>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; }; fcpvd1: fcp@fea2f000 { @@ -2411,6 +2435,7 @@ fcpvd1: fcp@fea2f000 { clocks = <&cpg CPG_MOD 602>; power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>; resets = <&cpg 602>; + iommus = <&ipmmu_vi0 9>; }; fcpvi0: fcp@fe9af000 { @@ -2419,6 +2444,7 @@ fcpvi0: fcp@fe9af000 { clocks = <&cpg CPG_MOD 611>; power-domains = <&sysc R8A774B1_PD_A3VP>; resets = <&cpg 611>; + iommus = <&ipmmu_vp0 8>; }; csi20: csi2@fea80000 { diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index 3e2af50ce7c64b..7655d5e3a03416 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -1637,6 +1637,7 @@ sdhi0: mmc@ee100000 { max-frequency = <200000000>; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 314>; + iommus = <&ipmmu_ds1 32>; status = "disabled"; }; @@ -1650,6 +1651,7 @@ sdhi1: mmc@ee120000 { max-frequency = <200000000>; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 313>; + iommus = <&ipmmu_ds1 33>; status = "disabled"; }; @@ -1663,6 +1665,7 @@ sdhi3: mmc@ee160000 { max-frequency = <200000000>; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 311>; + iommus = <&ipmmu_ds1 35>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi index 1eeb4c7b4c4b92..f845ca604de069 100644 --- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi @@ -2652,6 +2652,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 615>; + iommus = <&ipmmu_vp0 0>; }; fcpf1: fcp@fe951000 { @@ -2660,6 +2661,7 @@ fcpf1: fcp@fe951000 { clocks = <&cpg CPG_MOD 614>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 614>; + iommus = <&ipmmu_vp1 1>; }; fcpvb0: fcp@fe96f000 { @@ -2668,6 +2670,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 607>; + iommus = <&ipmmu_vp0 5>; }; fcpvb1: fcp@fe92f000 { @@ -2676,6 +2679,7 @@ fcpvb1: fcp@fe92f000 { clocks = <&cpg CPG_MOD 606>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 606>; + iommus = <&ipmmu_vp1 7>; }; fcpvi0: fcp@fe9af000 { @@ -2684,6 +2688,7 @@ fcpvi0: fcp@fe9af000 { clocks = <&cpg CPG_MOD 611>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 611>; + iommus = <&ipmmu_vp0 8>; }; fcpvi1: fcp@fe9bf000 { @@ -2692,6 +2697,7 @@ fcpvi1: fcp@fe9bf000 { clocks = <&cpg CPG_MOD 610>; power-domains = <&sysc R8A774E1_PD_A3VP>; resets = <&cpg 610>; + iommus = <&ipmmu_vp1 9>; }; fcpvd0: fcp@fea27000 { @@ -2700,6 +2706,7 @@ fcpvd0: fcp@fea27000 { clocks = <&cpg CPG_MOD 603>; power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>; resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; }; fcpvd1: fcp@fea2f000 { @@ -2708,6 +2715,7 @@ fcpvd1: fcp@fea2f000 { clocks = <&cpg CPG_MOD 602>; power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>; resets = <&cpg 602>; + iommus = <&ipmmu_vi0 9>; }; csi20: csi2@fea80000 { diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi index 1122c470b72f87..ee80f52dc7cf45 100644 --- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi @@ -2652,6 +2652,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A7796_PD_A3VC>; resets = <&cpg 615>; + iommus = <&ipmmu_vc0 16>; }; fcpvb0: fcp@fe96f000 { @@ -2660,6 +2661,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A7796_PD_A3VC>; resets = <&cpg 607>; + iommus = <&ipmmu_vi0 5>; }; fcpvi0: fcp@fe9af000 { diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi index bf1130af7de39c..3b9066043a71e8 100644 --- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi @@ -2502,6 +2502,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A77961_PD_A3VC>; resets = <&cpg 615>; + iommus = <&ipmmu_vc0 16>; }; fcpvb0: fcp@fe96f000 { @@ -2510,6 +2511,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A77961_PD_A3VC>; resets = <&cpg 607>; + iommus = <&ipmmu_vi0 5>; }; fcpvi0: fcp@fe9af000 { diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index f02d1547b88171..557bdf8fab179c 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi @@ -2185,6 +2185,14 @@ audma0: dma-controller@ec700000 { resets = <&cpg 502>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>, + <&ipmmu_mp 2>, <&ipmmu_mp 3>, + <&ipmmu_mp 4>, <&ipmmu_mp 5>, + <&ipmmu_mp 6>, <&ipmmu_mp 7>, + <&ipmmu_mp 8>, <&ipmmu_mp 9>, + <&ipmmu_mp 10>, <&ipmmu_mp 11>, + <&ipmmu_mp 12>, <&ipmmu_mp 13>, + <&ipmmu_mp 14>, <&ipmmu_mp 15>; }; audma1: dma-controller@ec720000 { @@ -2219,6 +2227,14 @@ audma1: dma-controller@ec720000 { resets = <&cpg 501>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_mp 16>, <&ipmmu_mp 17>, + <&ipmmu_mp 18>, <&ipmmu_mp 19>, + <&ipmmu_mp 20>, <&ipmmu_mp 21>, + <&ipmmu_mp 22>, <&ipmmu_mp 23>, + <&ipmmu_mp 24>, <&ipmmu_mp 25>, + <&ipmmu_mp 26>, <&ipmmu_mp 27>, + <&ipmmu_mp 28>, <&ipmmu_mp 29>, + <&ipmmu_mp 30>, <&ipmmu_mp 31>; }; xhci0: usb@ee000000 { @@ -2396,6 +2412,7 @@ sata: sata@ee300000 { clocks = <&cpg CPG_MOD 815>; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 815>; + iommus = <&ipmmu_hc 2>; status = "disabled"; }; @@ -2490,6 +2507,7 @@ fcpf0: fcp@fe950000 { clocks = <&cpg CPG_MOD 615>; power-domains = <&sysc R8A77965_PD_A3VP>; resets = <&cpg 615>; + iommus = <&ipmmu_vp0 0>; }; vspb: vsp@fe960000 { @@ -2542,6 +2560,7 @@ fcpvb0: fcp@fe96f000 { clocks = <&cpg CPG_MOD 607>; power-domains = <&sysc R8A77965_PD_A3VP>; resets = <&cpg 607>; + iommus = <&ipmmu_vp0 5>; }; fcpvd0: fcp@fea27000 { @@ -2550,6 +2569,7 @@ fcpvd0: fcp@fea27000 { clocks = <&cpg CPG_MOD 603>; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; }; fcpvd1: fcp@fea2f000 { @@ -2558,6 +2578,7 @@ fcpvd1: fcp@fea2f000 { clocks = <&cpg CPG_MOD 602>; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 602>; + iommus = <&ipmmu_vi0 9>; }; fcpvi0: fcp@fe9af000 { @@ -2566,6 +2587,7 @@ fcpvi0: fcp@fe9af000 { clocks = <&cpg CPG_MOD 611>; power-domains = <&sysc R8A77965_PD_A3VP>; resets = <&cpg 611>; + iommus = <&ipmmu_vp0 8>; }; cmm0: cmm@fea40000 { diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi index 64fb95b1c89ac6..38145fd6acf024 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi @@ -1092,6 +1092,7 @@ fcpvd0: fcp@fea27000 { clocks = <&cpg CPG_MOD 603>; power-domains = <&sysc R8A77970_PD_ALWAYS_ON>; resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; }; csi40: csi2@feaa0000 { diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi index 0c2b157036e75e..55a6c622f87325 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi @@ -1266,6 +1266,7 @@ gether: ethernet@e7400000 { clocks = <&cpg CPG_MOD 813>; power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; resets = <&cpg 813>; + iommus = <&ipmmu_ds1 34>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -1430,6 +1431,7 @@ fcpvd0: fcp@fea27000 { clocks = <&cpg CPG_MOD 603>; power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; }; csi40: csi2@feaa0000 { diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi index d76347001cc13c..1f4ab27acc3398 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi @@ -707,6 +707,7 @@ avb0: ethernet@e6800000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 0>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -755,6 +756,7 @@ avb1: ethernet@e6810000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 1>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -803,6 +805,7 @@ avb2: ethernet@e6820000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 2>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -851,6 +854,7 @@ avb3: ethernet@e6830000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 3>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -899,6 +903,7 @@ avb4: ethernet@e6840000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 4>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -947,6 +952,7 @@ avb5: ethernet@e6850000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_ds1 11>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -1168,7 +1174,8 @@ msiof5: spi@e6c28000 { }; vin00: video@e6ef0000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 730>; @@ -1196,7 +1203,8 @@ vin00isp0: endpoint@0 { }; vin01: video@e6ef1000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 731>; @@ -1224,7 +1232,8 @@ vin01isp0: endpoint@0 { }; vin02: video@e6ef2000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef2000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 800>; @@ -1252,7 +1261,8 @@ vin02isp0: endpoint@0 { }; vin03: video@e6ef3000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef3000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 801>; @@ -1280,7 +1290,8 @@ vin03isp0: endpoint@0 { }; vin04: video@e6ef4000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef4000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 802>; @@ -1308,7 +1319,8 @@ vin04isp0: endpoint@0 { }; vin05: video@e6ef5000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef5000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 803>; @@ -1336,7 +1348,8 @@ vin05isp0: endpoint@0 { }; vin06: video@e6ef6000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef6000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 804>; @@ -1364,7 +1377,8 @@ vin06isp0: endpoint@0 { }; vin07: video@e6ef7000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef7000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 805>; @@ -1392,7 +1406,8 @@ vin07isp0: endpoint@0 { }; vin08: video@e6ef8000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef8000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 806>; @@ -1420,7 +1435,8 @@ vin08isp1: endpoint@1 { }; vin09: video@e6ef9000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef9000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 807>; @@ -1448,7 +1464,8 @@ vin09isp1: endpoint@1 { }; vin10: video@e6efa000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efa000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 808>; @@ -1476,7 +1493,8 @@ vin10isp1: endpoint@1 { }; vin11: video@e6efb000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efb000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 809>; @@ -1504,7 +1522,8 @@ vin11isp1: endpoint@1 { }; vin12: video@e6efc000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efc000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 810>; @@ -1532,7 +1551,8 @@ vin12isp1: endpoint@1 { }; vin13: video@e6efd000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efd000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 811>; @@ -1560,7 +1580,8 @@ vin13isp1: endpoint@1 { }; vin14: video@e6efe000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efe000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 812>; @@ -1588,7 +1609,8 @@ vin14isp1: endpoint@1 { }; vin15: video@e6eff000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6eff000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 813>; @@ -1616,7 +1638,8 @@ vin15isp1: endpoint@1 { }; vin16: video@e6ed0000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed0000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 814>; @@ -1644,7 +1667,8 @@ vin16isp2: endpoint@2 { }; vin17: video@e6ed1000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed1000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 815>; @@ -1672,7 +1696,8 @@ vin17isp2: endpoint@2 { }; vin18: video@e6ed2000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed2000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 816>; @@ -1700,7 +1725,8 @@ vin18isp2: endpoint@2 { }; vin19: video@e6ed3000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed3000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 817>; @@ -1728,7 +1754,8 @@ vin19isp2: endpoint@2 { }; vin20: video@e6ed4000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed4000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 818>; @@ -1756,7 +1783,8 @@ vin20isp2: endpoint@2 { }; vin21: video@e6ed5000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed5000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 819>; @@ -1784,7 +1812,8 @@ vin21isp2: endpoint@2 { }; vin22: video@e6ed6000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed6000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 820>; @@ -1812,7 +1841,8 @@ vin22isp2: endpoint@2 { }; vin23: video@e6ed7000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed7000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 821>; @@ -1840,7 +1870,8 @@ vin23isp2: endpoint@2 { }; vin24: video@e6ed8000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed8000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 822>; @@ -1868,7 +1899,8 @@ vin24isp3: endpoint@3 { }; vin25: video@e6ed9000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ed9000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 823>; @@ -1896,7 +1928,8 @@ vin25isp3: endpoint@3 { }; vin26: video@e6eda000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6eda000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 824>; @@ -1924,7 +1957,8 @@ vin26isp3: endpoint@3 { }; vin27: video@e6edb000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6edb000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 825>; @@ -1952,7 +1986,8 @@ vin27isp3: endpoint@3 { }; vin28: video@e6edc000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6edc000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 826>; @@ -1980,7 +2015,8 @@ vin28isp3: endpoint@3 { }; vin29: video@e6edd000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6edd000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 827>; @@ -2008,7 +2044,8 @@ vin29isp3: endpoint@3 { }; vin30: video@e6ede000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ede000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 828>; @@ -2036,7 +2073,8 @@ vin30isp3: endpoint@3 { }; vin31: video@e6edf000 { - compatible = "renesas,vin-r8a779a0"; + compatible = "renesas,vin-r8a779a0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6edf000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 829>; @@ -2096,6 +2134,14 @@ dmac1: dma-controller@e7350000 { resets = <&cpg 709>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>, + <&ipmmu_ds0 2>, <&ipmmu_ds0 3>, + <&ipmmu_ds0 4>, <&ipmmu_ds0 5>, + <&ipmmu_ds0 6>, <&ipmmu_ds0 7>, + <&ipmmu_ds0 8>, <&ipmmu_ds0 9>, + <&ipmmu_ds0 10>, <&ipmmu_ds0 11>, + <&ipmmu_ds0 12>, <&ipmmu_ds0 13>, + <&ipmmu_ds0 14>, <&ipmmu_ds0 15>; }; dmac2: dma-controller@e7351000 { @@ -2121,6 +2167,10 @@ dmac2: dma-controller@e7351000 { resets = <&cpg 710>; #dma-cells = <1>; dma-channels = <8>; + iommus = <&ipmmu_ds0 16>, <&ipmmu_ds0 17>, + <&ipmmu_ds0 18>, <&ipmmu_ds0 19>, + <&ipmmu_ds0 20>, <&ipmmu_ds0 21>, + <&ipmmu_ds0 22>, <&ipmmu_ds0 23>; }; mmc0: mmc@ee140000 { @@ -2278,6 +2328,7 @@ fcpvd0: fcp@fea10000 { clocks = <&cpg CPG_MOD 508>; power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; resets = <&cpg 508>; + iommus = <&ipmmu_vi1 6>; }; fcpvd1: fcp@fea11000 { @@ -2286,6 +2337,7 @@ fcpvd1: fcp@fea11000 { clocks = <&cpg CPG_MOD 509>; power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; resets = <&cpg 509>; + iommus = <&ipmmu_vi1 7>; }; vspd0: vsp@fea20000 { @@ -2449,7 +2501,8 @@ du_out_dsi1: endpoint { }; isp0: isp@fed00000 { - compatible = "renesas,r8a779a0-isp"; + compatible = "renesas,r8a779a0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed00000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 612>; @@ -2532,7 +2585,8 @@ isp0vin07: endpoint { }; isp1: isp@fed20000 { - compatible = "renesas,r8a779a0-isp"; + compatible = "renesas,r8a779a0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed20000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 613>; @@ -2615,7 +2669,8 @@ isp1vin15: endpoint { }; isp2: isp@fed30000 { - compatible = "renesas,r8a779a0-isp"; + compatible = "renesas,r8a779a0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed30000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 614>; @@ -2698,7 +2753,8 @@ isp2vin23: endpoint { }; isp3: isp@fed40000 { - compatible = "renesas,r8a779a0-isp"; + compatible = "renesas,r8a779a0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed40000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 615>; diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi index 53d1d4d8197af5..12900ebd098b0b 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi @@ -175,6 +175,20 @@ extalr_clk: extalr { clock-frequency = <0>; }; + pcie0_clkref: pcie0-clkref { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + pcie1_clkref: pcie1-clkref { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + pmu_a76 { compatible = "arm,cortex-a76-pmu"; interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>; @@ -553,6 +567,20 @@ tmu4: timer@ffc00000 { status = "disabled"; }; + tsn0: ethernet@e6460000 { + compatible = "renesas,r8a779g0-ethertsn", "renesas,rcar-gen4-ethertsn"; + reg = <0 0xe6460000 0 0x7000>, + <0 0xe6449000 0 0x500>; + reg-names = "tsnes", "gptp"; + interrupts = , + ; + interrupt-names = "tx", "rx"; + clocks = <&cpg CPG_MOD 2723>; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 2723>; + status = "disabled"; + }; + i2c0: i2c@e6500000 { compatible = "renesas,i2c-r8a779g0", "renesas,rcar-gen4-i2c"; @@ -723,6 +751,126 @@ hscif3: serial@e66a0000 { status = "disabled"; }; + pciec0: pcie@e65d0000 { + compatible = "renesas,r8a779g0-pcie", + "renesas,rcar-gen4-pcie"; + reg = <0 0xe65d0000 0 0x1000>, <0 0xe65d2000 0 0x0800>, + <0 0xe65d3000 0 0x2000>, <0 0xe65d5000 0 0x1200>, + <0 0xe65d6200 0 0x0e00>, <0 0xe65d7000 0 0x0400>, + <0 0xfe000000 0 0x400000>; + reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "config"; + interrupts = , + , + , + ; + interrupt-names = "msi", "dma", "sft_ce", "app"; + clocks = <&cpg CPG_MOD 624>, <&pcie0_clkref>; + clock-names = "core", "ref"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 624>; + reset-names = "pwr"; + max-link-speed = <4>; + num-lanes = <2>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + device_type = "pci"; + ranges = <0x01000000 0 0x00000000 0 0xfe000000 0 0x00400000>, + <0x02000000 0 0x30000000 0 0x30000000 0 0x10000000>; + dma-ranges = <0x42000000 0 0x00000000 0 0x00000000 1 0x00000000>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>; + snps,enable-cdm-check; + status = "disabled"; + }; + + pciec1: pcie@e65d8000 { + compatible = "renesas,r8a779g0-pcie", + "renesas,rcar-gen4-pcie"; + reg = <0 0xe65d8000 0 0x1000>, <0 0xe65da000 0 0x0800>, + <0 0xe65db000 0 0x2000>, <0 0xe65dd000 0 0x1200>, + <0 0xe65de200 0 0x0e00>, <0 0xe65df000 0 0x0400>, + <0 0xee900000 0 0x400000>; + reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "config"; + interrupts = , + , + , + ; + interrupt-names = "msi", "dma", "sft_ce", "app"; + clocks = <&cpg CPG_MOD 625>, <&pcie1_clkref>; + clock-names = "core", "ref"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 625>; + reset-names = "pwr"; + max-link-speed = <4>; + num-lanes = <2>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + device_type = "pci"; + ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00400000>, + <0x02000000 0 0xc0000000 0 0xc0000000 0 0x10000000>; + dma-ranges = <0x42000000 0 0x00000000 0 0x00000000 1 0x00000000>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>; + snps,enable-cdm-check; + status = "disabled"; + }; + + pciec0_ep: pcie-ep@e65d0000 { + compatible = "renesas,r8a779g0-pcie-ep", + "renesas,rcar-gen4-pcie-ep"; + reg = <0 0xe65d0000 0 0x2000>, <0 0xe65d2000 0 0x1000>, + <0 0xe65d3000 0 0x2000>, <0 0xe65d5000 0 0x1200>, + <0 0xe65d6200 0 0x0e00>, <0 0xe65d7000 0 0x0400>, + <0 0xfe000000 0 0x400000>; + reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "addr_space"; + interrupts = , + , + ; + interrupt-names = "dma", "sft_ce", "app"; + clocks = <&cpg CPG_MOD 624>, <&pcie0_clkref>; + clock-names = "core", "ref"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 624>; + reset-names = "pwr"; + max-link-speed = <4>; + num-lanes = <2>; + max-functions = /bits/ 8 <2>; + status = "disabled"; + }; + + pciec1_ep: pcie-ep@e65d8000 { + compatible = "renesas,r8a779g0-pcie-ep", + "renesas,rcar-gen4-pcie-ep"; + reg = <0 0xe65d8000 0 0x2000>, <0 0xe65da000 0 0x1000>, + <0 0xe65db000 0 0x2000>, <0 0xe65dd000 0 0x1200>, + <0 0xe65de200 0 0x0e00>, <0 0xe65df000 0 0x0400>, + <0 0xee900000 0 0x400000>; + reg-names = "dbi", "dbi2", "atu", "dma", "app", "phy", "addr_space"; + interrupts = , + , + ; + interrupt-names = "dma", "sft_ce", "app"; + clocks = <&cpg CPG_MOD 625>, <&pcie1_clkref>; + clock-names = "core", "ref"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 625>; + reset-names = "pwr"; + max-link-speed = <4>; + num-lanes = <2>; + max-functions = /bits/ 8 <2>; + status = "disabled"; + }; + canfd: can@e6660000 { compatible = "renesas,r8a779g0-canfd", "renesas,rcar-gen4-canfd"; @@ -815,6 +963,7 @@ avb0: ethernet@e6800000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_hc 0>; status = "disabled"; }; @@ -860,6 +1009,7 @@ avb1: ethernet@e6810000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_hc 1>; status = "disabled"; }; @@ -905,6 +1055,7 @@ avb2: ethernet@e6820000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_hc 2>; status = "disabled"; }; @@ -1184,7 +1335,8 @@ msiof5: spi@e6c28000 { }; vin00: video@e6ef0000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 730>; @@ -1212,7 +1364,8 @@ vin00isp0: endpoint@0 { }; vin01: video@e6ef1000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 731>; @@ -1240,7 +1393,8 @@ vin01isp0: endpoint@0 { }; vin02: video@e6ef2000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef2000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 800>; @@ -1268,7 +1422,8 @@ vin02isp0: endpoint@0 { }; vin03: video@e6ef3000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef3000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 801>; @@ -1296,7 +1451,8 @@ vin03isp0: endpoint@0 { }; vin04: video@e6ef4000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef4000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 802>; @@ -1324,7 +1480,8 @@ vin04isp0: endpoint@0 { }; vin05: video@e6ef5000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef5000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 803>; @@ -1352,7 +1509,8 @@ vin05isp0: endpoint@0 { }; vin06: video@e6ef6000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef6000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 804>; @@ -1380,7 +1538,8 @@ vin06isp0: endpoint@0 { }; vin07: video@e6ef7000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef7000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 805>; @@ -1408,7 +1567,8 @@ vin07isp0: endpoint@0 { }; vin08: video@e6ef8000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef8000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 806>; @@ -1436,7 +1596,8 @@ vin08isp1: endpoint@1 { }; vin09: video@e6ef9000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef9000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 807>; @@ -1464,7 +1625,8 @@ vin09isp1: endpoint@1 { }; vin10: video@e6efa000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efa000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 808>; @@ -1492,7 +1654,8 @@ vin10isp1: endpoint@1 { }; vin11: video@e6efb000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efb000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 809>; @@ -1520,7 +1683,8 @@ vin11isp1: endpoint@1 { }; vin12: video@e6efc000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efc000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 810>; @@ -1548,7 +1712,8 @@ vin12isp1: endpoint@1 { }; vin13: video@e6efd000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efd000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 811>; @@ -1576,7 +1741,8 @@ vin13isp1: endpoint@1 { }; vin14: video@e6efe000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efe000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 812>; @@ -1604,7 +1770,8 @@ vin14isp1: endpoint@1 { }; vin15: video@e6eff000 { - compatible = "renesas,vin-r8a779g0"; + compatible = "renesas,vin-r8a779g0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6eff000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 813>; @@ -1987,6 +2154,7 @@ fcpvd0: fcp@fea10000 { clocks = <&cpg CPG_MOD 508>; power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; resets = <&cpg 508>; + iommus = <&ipmmu_vi1 6>; }; fcpvd1: fcp@fea11000 { @@ -1995,6 +2163,7 @@ fcpvd1: fcp@fea11000 { clocks = <&cpg CPG_MOD 509>; power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; resets = <&cpg 509>; + iommus = <&ipmmu_vi1 7>; }; vspd0: vsp@fea20000 { @@ -2054,7 +2223,8 @@ du_out_dsi1: endpoint { }; isp0: isp@fed00000 { - compatible = "renesas,r8a779g0-isp"; + compatible = "renesas,r8a779g0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed00000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 612>; @@ -2137,7 +2307,8 @@ isp0vin07: endpoint { }; isp1: isp@fed20000 { - compatible = "renesas,r8a779g0-isp"; + compatible = "renesas,r8a779g0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed20000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 613>; diff --git a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts index 2f79e5a6124897..50a428572d9bd9 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts +++ b/arch/arm64/boot/dts/renesas/r8a779g2-white-hawk-single.dts @@ -24,3 +24,54 @@ &hscif0_pins { groups = "hscif0_data", "hscif0_ctrl"; function = "hscif0"; }; + +&pfc { + tsn0_pins: tsn0 { + mux { + groups = "tsn0_link", "tsn0_mdio", "tsn0_rgmii", + "tsn0_txcrefclk"; + function = "tsn0"; + }; + + link { + groups = "tsn0_link"; + bias-disable; + }; + + mdio { + groups = "tsn0_mdio"; + drive-strength = <24>; + bias-disable; + }; + + rgmii { + groups = "tsn0_rgmii"; + drive-strength = <24>; + bias-disable; + }; + }; +}; + +&tsn0 { + pinctrl-0 = <&tsn0_pins>; + pinctrl-names = "default"; + phy-mode = "rgmii"; + phy-handle = <&phy3>; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>; + reset-post-delay-us = <4000>; + + phy3: ethernet-phy@0 { + compatible = "ethernet-phy-id002b.0980", + "ethernet-phy-ieee802.3-c22"; + reg = <0>; + interrupt-parent = <&gpio4>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts index 2b9a19bb1c5d30..9a1917b87f6138 100644 --- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts +++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts @@ -5,10 +5,31 @@ * Copyright (C) 2023 Renesas Electronics Corp. * Copyright (C) 2024 Glider bv */ +/* + * [How to use Sound] + * + * Because R-Car V4M has only 1 SSI, it cannot handle both Playback/Capture + * at the same time. You need to switch the direction which is controlled + * by the GP0_01 pin via amixer. + * + * Playback (CN9500) + * > amixer set "MUX" "Playback" // for GP0_01 + * > amixer set "DAC 1" 85% + * > aplay xxx.wav + * + * Capture (CN9501) + * > amixer set "MUX" "Capture" // for GP0_01 + * > amixer set "Mic 1" 80% + * > amixer set "ADC 1" on + * > amixer set 'ADC 1' 80% + * > arecord xxx hoge.wav + */ /dts-v1/; #include +#include +#include #include "r8a779h0.dtsi" @@ -26,11 +47,74 @@ aliases { ethernet0 = &avb0; }; + can_transceiver0: can-phy0 { + compatible = "nxp,tjr1443"; + #phy-cells = <0>; + enable-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; + max-bitrate = <5000000>; + }; + chosen { bootargs = "ignore_loglevel rw root=/dev/nfs ip=on"; stdout-path = "serial0:921600n8"; }; + keys { + compatible = "gpio-keys"; + + pinctrl-0 = <&keys_pins>; + pinctrl-names = "default"; + + key-1 { + gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + linux,code = ; + label = "SW47"; + wakeup-source; + debounce-interval = <20>; + }; + + key-2 { + gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; + linux,code = ; + label = "SW48"; + wakeup-source; + debounce-interval = <20>; + }; + + key-3 { + gpios = <&gpio5 2 GPIO_ACTIVE_LOW>; + linux,code = ; + label = "SW49"; + wakeup-source; + debounce-interval = <20>; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-1 { + gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + color = ; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <1>; + }; + + led-2 { + gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; + color = ; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <2>; + }; + + led-3 { + gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>; + color = ; + function = LED_FUNCTION_INDICATOR; + function-enumerator = <3>; + }; + }; + memory@48000000 { device_type = "memory"; /* first 128MB is reserved for secure area. */ @@ -59,6 +143,24 @@ reg_3p3v: regulator-3p3v { regulator-boot-on; regulator-always-on; }; + + sound_mux: sound-mux { + compatible = "simple-audio-mux"; + mux-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; + state-labels = "Playback", "Capture"; + }; + + sound_card: sound { + compatible = "audio-graph-card2"; + label = "rcar-sound"; + aux-devs = <&sound_mux>; // for GP0_01 + + links = <&rsnd_port>; // AK4619 Audio Codec + }; +}; + +&audio_clkin { + clock-frequency = <24576000>; }; &avb0 { @@ -79,6 +181,25 @@ phy0: ethernet-phy@0 { }; }; +&can_clk { + clock-frequency = <40000000>; +}; + +&canfd { + pinctrl-0 = <&canfd0_pins>, <&canfd1_pins>, <&can_clk_pins>; + pinctrl-names = "default"; + status = "okay"; + + channel0 { + status = "okay"; + phys = <&can_transceiver0>; + }; + + channel1 { + status = "okay"; + }; +}; + &extal_clk { clock-frequency = <16666666>; }; @@ -87,6 +208,15 @@ &extalr_clk { clock-frequency = <32768>; }; +&gpio1 { + audio-power-hog { + gpio-hog; + gpios = <8 GPIO_ACTIVE_HIGH>; + output-high; + line-name = "Audio-Power"; + }; +}; + &hscif0 { pinctrl-0 = <&hscif0_pins>; pinctrl-names = "default"; @@ -139,6 +269,29 @@ eeprom@53 { }; }; +&i2c3 { + pinctrl-0 = <&i2c3_pins>; + pinctrl-names = "default"; + + status = "okay"; + clock-frequency = <400000>; + + codec@10 { + compatible = "asahi-kasei,ak4619"; + reg = <0x10>; + + clocks = <&rcar_sound>; + clock-names = "mclk"; + + #sound-dai-cells = <0>; + port { + ak4619_endpoint: endpoint { + remote-endpoint = <&rsnd_endpoint>; + }; + }; + }; +}; + &mmc0 { pinctrl-0 = <&mmc_pins>; pinctrl-1 = <&mmc_pins>; @@ -178,6 +331,21 @@ pins_mii { }; }; + can_clk_pins: can-clk { + groups = "can_clk"; + function = "can_clk"; + }; + + canfd0_pins: canfd0 { + groups = "canfd0_data"; + function = "canfd0"; + }; + + canfd1_pins: canfd1 { + groups = "canfd1_data"; + function = "canfd1"; + }; + hscif0_pins: hscif0 { groups = "hscif0_data", "hscif0_ctrl"; function = "hscif0"; @@ -193,6 +361,16 @@ i2c0_pins: i2c0 { function = "i2c0"; }; + i2c3_pins: i2c3 { + groups = "i2c3"; + function = "i2c3"; + }; + + keys_pins: keys { + pins = "GP_5_0", "GP_5_1", "GP_5_2"; + bias-pull-up; + }; + mmc_pins: mmc { groups = "mmc_data8", "mmc_ctrl", "mmc_ds"; function = "mmc"; @@ -213,6 +391,40 @@ scif_clk2_pins: scif-clk2 { groups = "scif_clk2"; function = "scif_clk2"; }; + + sound_clk_pins: sound_clk { + groups = "audio_clkin", "audio_clkout"; + function = "audio_clk"; + }; + + sound_pins: sound { + groups = "ssi_ctrl", "ssi_data"; + function = "ssi"; + }; +}; + +&rcar_sound { + pinctrl-0 = <&sound_clk_pins>, <&sound_pins>; + pinctrl-names = "default"; + + status = "okay"; + + /* audio_clkout */ + clock-frequency = <12288000>; + + ports { + rsnd_port: port { + rsnd_endpoint: endpoint { + remote-endpoint = <&ak4619_endpoint>; + bitclock-master; + frame-master; + + /* see above [How to use Sound] */ + playback = <&ssi0>; + capture = <&ssi0>; + }; + }; + }; }; &rpc { diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi index a03ab2b6a859b7..12d8be3fd579cb 100644 --- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi @@ -21,6 +21,13 @@ audio_clkin: audio_clkin { clock-frequency = <0>; }; + /* External CAN clock - to be overridden by boards that provide it */ + can_clk: can-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + cluster0_opp: opp-table-0 { compatible = "operating-points-v2"; @@ -636,6 +643,40 @@ hscif3: serial@e66a0000 { status = "disabled"; }; + canfd: can@e6660000 { + compatible = "renesas,r8a779h0-canfd", + "renesas,rcar-gen4-canfd"; + reg = <0 0xe6660000 0 0x8500>; + interrupts = , + ; + interrupt-names = "ch_int", "g_int"; + clocks = <&cpg CPG_MOD 328>, + <&cpg CPG_CORE R8A779H0_CLK_CANFD>, + <&can_clk>; + clock-names = "fck", "canfd", "can_clk"; + assigned-clocks = <&cpg CPG_CORE R8A779H0_CLK_CANFD>; + assigned-clock-rates = <80000000>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 328>; + status = "disabled"; + + channel0 { + status = "disabled"; + }; + + channel1 { + status = "disabled"; + }; + + channel2 { + status = "disabled"; + }; + + channel3 { + status = "disabled"; + }; + }; + avb0: ethernet@e6800000 { compatible = "renesas,etheravb-r8a779h0", "renesas,etheravb-rcar-gen4"; @@ -728,6 +769,7 @@ avb1: ethernet@e6810000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_hc 1>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -776,11 +818,62 @@ avb2: ethernet@e6820000 { phy-mode = "rgmii"; rx-internal-delay-ps = <0>; tx-internal-delay-ps = <0>; + iommus = <&ipmmu_hc 2>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; + pwm0: pwm@e6e30000 { + compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar"; + reg = <0 0xe6e30000 0 0x10>; + #pwm-cells = <2>; + clocks = <&cpg CPG_MOD 628>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 628>; + status = "disabled"; + }; + + pwm1: pwm@e6e31000 { + compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar"; + reg = <0 0xe6e31000 0 0x10>; + #pwm-cells = <2>; + clocks = <&cpg CPG_MOD 628>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 628>; + status = "disabled"; + }; + + pwm2: pwm@e6e32000 { + compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar"; + reg = <0 0xe6e32000 0 0x10>; + #pwm-cells = <2>; + clocks = <&cpg CPG_MOD 628>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 628>; + status = "disabled"; + }; + + pwm3: pwm@e6e33000 { + compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar"; + reg = <0 0xe6e33000 0 0x10>; + #pwm-cells = <2>; + clocks = <&cpg CPG_MOD 628>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 628>; + status = "disabled"; + }; + + pwm4: pwm@e6e34000 { + compatible = "renesas,pwm-r8a779h0", "renesas,pwm-rcar"; + reg = <0 0xe6e34000 0 0x10>; + #pwm-cells = <2>; + clocks = <&cpg CPG_MOD 628>; + power-domains = <&sysc R8A779H0_PD_ALWAYS_ON>; + resets = <&cpg 628>; + status = "disabled"; + }; + scif0: serial@e6e60000 { compatible = "renesas,scif-r8a779h0", "renesas,rcar-gen4-scif", "renesas,scif"; @@ -946,7 +1039,8 @@ msiof5: spi@e6c28000 { }; vin00: video@e6ef0000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef0000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 730>; @@ -974,7 +1068,8 @@ vin00isp0: endpoint@0 { }; vin01: video@e6ef1000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef1000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 731>; @@ -1002,7 +1097,8 @@ vin01isp0: endpoint@0 { }; vin02: video@e6ef2000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef2000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 800>; @@ -1030,7 +1126,8 @@ vin02isp0: endpoint@0 { }; vin03: video@e6ef3000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef3000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 801>; @@ -1058,7 +1155,8 @@ vin03isp0: endpoint@0 { }; vin04: video@e6ef4000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef4000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 802>; @@ -1086,7 +1184,8 @@ vin04isp0: endpoint@0 { }; vin05: video@e6ef5000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef5000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 803>; @@ -1114,7 +1213,8 @@ vin05isp0: endpoint@0 { }; vin06: video@e6ef6000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef6000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 804>; @@ -1142,7 +1242,8 @@ vin06isp0: endpoint@0 { }; vin07: video@e6ef7000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef7000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 805>; @@ -1170,7 +1271,8 @@ vin07isp0: endpoint@0 { }; vin08: video@e6ef8000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef8000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 806>; @@ -1198,7 +1300,8 @@ vin08isp1: endpoint@1 { }; vin09: video@e6ef9000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6ef9000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 807>; @@ -1226,7 +1329,8 @@ vin09isp1: endpoint@1 { }; vin10: video@e6efa000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efa000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 808>; @@ -1254,7 +1358,8 @@ vin10isp1: endpoint@1 { }; vin11: video@e6efb000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efb000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 809>; @@ -1282,7 +1387,8 @@ vin11isp1: endpoint@1 { }; vin12: video@e6efc000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efc000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 810>; @@ -1310,7 +1416,8 @@ vin12isp1: endpoint@1 { }; vin13: video@e6efd000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efd000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 811>; @@ -1338,7 +1445,8 @@ vin13isp1: endpoint@1 { }; vin14: video@e6efe000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6efe000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 812>; @@ -1366,7 +1474,8 @@ vin14isp1: endpoint@1 { }; vin15: video@e6eff000 { - compatible = "renesas,vin-r8a779h0"; + compatible = "renesas,vin-r8a779h0", + "renesas,rcar-gen4-vin"; reg = <0 0xe6eff000 0 0x1000>; interrupts = ; clocks = <&cpg CPG_MOD 813>; @@ -1720,7 +1829,8 @@ csi41isp1: endpoint { }; isp0: isp@fed00000 { - compatible = "renesas,r8a779h0-isp"; + compatible = "renesas,r8a779h0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed00000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 612>; @@ -1803,7 +1913,8 @@ isp0vin07: endpoint { }; isp1: isp@fed20000 { - compatible = "renesas,r8a779h0-isp"; + compatible = "renesas,r8a779h0-isp", + "renesas,rcar-gen4-isp"; reg = <0 0xfed20000 0 0x10000>; interrupts = ; clocks = <&cpg CPG_MOD 613>; diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi index 2eccab9c896202..593c66b27ad126 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi @@ -725,6 +725,10 @@ phyrst: usbphy-ctrl@11c40000 { power-domains = <&cpg>; #reset-cells = <1>; status = "disabled"; + + usb0_vbus_otg: regulator-vbus { + regulator-name = "vbus"; + }; }; ohci0: usb@11c50000 { diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi index 18ef297db93363..a3998e5928f7c9 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi @@ -129,6 +129,55 @@ csi2cru: endpoint@0 { }; }; + vspd: vsp@10870000 { + compatible = "renesas,r9a07g043u-vsp2", "renesas,r9a07g044-vsp2"; + reg = <0 0x10870000 0 0x10000>; + interrupts = ; + clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>; + clock-names = "aclk", "pclk", "vclk"; + power-domains = <&cpg>; + resets = <&cpg R9A07G043_LCDC_RESET_N>; + renesas,fcp = <&fcpvd>; + }; + + fcpvd: fcp@10880000 { + compatible = "renesas,r9a07g043u-fcpvd", "renesas,fcpv"; + reg = <0 0x10880000 0 0x10000>; + clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>; + clock-names = "aclk", "pclk", "vclk"; + power-domains = <&cpg>; + resets = <&cpg R9A07G043_LCDC_RESET_N>; + }; + + du: display@10890000 { + compatible = "renesas,r9a07g043u-du"; + reg = <0 0x10890000 0 0x10000>; + interrupts = ; + clocks = <&cpg CPG_MOD R9A07G043_LCDC_CLK_A>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_P>, + <&cpg CPG_MOD R9A07G043_LCDC_CLK_D>; + clock-names = "aclk", "pclk", "vclk"; + power-domains = <&cpg>; + resets = <&cpg R9A07G043_LCDC_RESET_N>; + renesas,vsps = <&vspd 0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + du_out_rgb: endpoint { + }; + }; + }; + }; + irqc: interrupt-controller@110a0000 { compatible = "renesas,r9a07g043u-irqc", "renesas,rzg2l-irqc"; @@ -210,8 +259,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = ; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso new file mode 100644 index 00000000000000..ecd43a67100029 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc-du-adv7513.dtso @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree overlay for the RZ/G2UL SMARC EVK with ADV7513 transmitter + * connected to DU enabled. + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +/dts-v1/; +/plugin/; + +#include + +#define ADV7513_PARENT_I2C i2c1 +#include "rz-smarc-du-adv7513.dtsi" + +&pinctrl { + du_pins: du { + data { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + drive-strength = <2>; + }; + + sync { + pinmux = , /* HSYNC */ + ; /* VSYNC */ + drive-strength = <2>; + }; + + de { + pinmux = ; /* DE */ + drive-strength = <2>; + }; + + clk { + pinmux = ; /* CLK */ + }; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index d3838e5820fca1..6b1c77cd8261ca 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -1043,8 +1043,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = ; }; @@ -1129,6 +1129,10 @@ phyrst: usbphy-ctrl@11c40000 { power-domains = <&cpg>; #reset-cells = <1>; status = "disabled"; + + usb0_vbus_otg: regulator-vbus { + regulator-name = "vbus"; + }; }; ohci0: usb@11c50000 { diff --git a/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts index 0b90367b6d1e31..ee5bf2c5805114 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts +++ b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc.dts @@ -47,6 +47,9 @@ #error "Cannot set as both PMOD_MTU3 and SW_RSPI_CAN are mutually exclusive" #endif +/* Please set SW_I2S0_I2S1. Default value is 0 */ +#define SW_I2S0_I2S1 0 + #include "r9a07g044c2.dtsi" #include "rzg2lc-smarc-som.dtsi" #include "rzg2lc-smarc.dtsi" diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index 1de2e5f0917d91..01f59914dd0997 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -1051,8 +1051,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = ; }; @@ -1137,6 +1137,10 @@ phyrst: usbphy-ctrl@11c40000 { power-domains = <&cpg>; #reset-cells = <1>; status = "disabled"; + + usb0_vbus_otg: regulator-vbus { + regulator-name = "vbus"; + }; }; ohci0: usb@11c50000 { diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi index 0d5c47a65e46c5..067a26a66c24ca 100644 --- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi @@ -72,6 +72,94 @@ scif0: serial@1004b800 { status = "disabled"; }; + i2c0: i2c@10090000 { + compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057"; + reg = <0 0x10090000 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD R9A08G045_I2C0_PCLK>; + clock-frequency = <100000>; + resets = <&cpg R9A08G045_I2C0_MRST>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c1: i2c@10090400 { + compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057"; + reg = <0 0x10090400 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD R9A08G045_I2C1_PCLK>; + clock-frequency = <100000>; + resets = <&cpg R9A08G045_I2C1_MRST>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@10090800 { + compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057"; + reg = <0 0x10090800 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD R9A08G045_I2C2_PCLK>; + clock-frequency = <100000>; + resets = <&cpg R9A08G045_I2C2_MRST>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@10090c00 { + compatible = "renesas,riic-r9a08g045", "renesas,riic-r9a09g057"; + reg = <0 0x10090c00 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD R9A08G045_I2C3_PCLK>; + clock-frequency = <100000>; + resets = <&cpg R9A08G045_I2C3_MRST>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + cpg: clock-controller@11010000 { compatible = "renesas,r9a08g045-cpg"; reg = <0 0x11010000 0 0x10000>; @@ -181,6 +269,44 @@ irqc: interrupt-controller@11050000 { resets = <&cpg R9A08G045_IA55_RESETN>; }; + dmac: dma-controller@11820000 { + compatible = "renesas,r9a08g045-dmac", + "renesas,rz-dmac"; + reg = <0 0x11820000 0 0x10000>, + <0 0x11830000 0 0x10000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15"; + clocks = <&cpg CPG_MOD R9A08G045_DMAC_ACLK>, + <&cpg CPG_MOD R9A08G045_DMAC_PCLK>; + clock-names = "main", "register"; + power-domains = <&cpg>; + resets = <&cpg R9A08G045_DMAC_ARESETN>, + <&cpg R9A08G045_DMAC_RST_ASYNC>; + reset-names = "arst", "rst_async"; + #dma-cells = <1>; + dma-channels = <16>; + }; + sdhi0: mmc@11c00000 { compatible = "renesas,sdhi-r9a08g045", "renesas,rzg2l-sdhi"; reg = <0x0 0x11c00000 0 0x10000>; @@ -269,8 +395,8 @@ gic: interrupt-controller@12400000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x12400000 0 0x40000>, - <0x0 0x12440000 0 0x60000>; + reg = <0x0 0x12400000 0 0x20000>, + <0x0 0x12440000 0 0x40000>; interrupts = ; }; diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi new file mode 100644 index 00000000000000..1ad5a1b6917fee --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Device Tree Source for the RZ/V2H(P) SoC + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +#include +#include + +/ { + compatible = "renesas,r9a09g057"; + #address-cells = <2>; + #size-cells = <2>; + + audio_extal_clk: audio-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "arm,cortex-a55"; + reg = <0>; + device_type = "cpu"; + next-level-cache = <&L3_CA55>; + enable-method = "psci"; + }; + + cpu1: cpu@100 { + compatible = "arm,cortex-a55"; + reg = <0x100>; + device_type = "cpu"; + next-level-cache = <&L3_CA55>; + enable-method = "psci"; + }; + + cpu2: cpu@200 { + compatible = "arm,cortex-a55"; + reg = <0x200>; + device_type = "cpu"; + next-level-cache = <&L3_CA55>; + enable-method = "psci"; + }; + + cpu3: cpu@300 { + compatible = "arm,cortex-a55"; + reg = <0x300>; + device_type = "cpu"; + next-level-cache = <&L3_CA55>; + enable-method = "psci"; + }; + + L3_CA55: cache-controller-0 { + compatible = "cache"; + cache-unified; + cache-size = <0x100000>; + cache-level = <3>; + }; + }; + + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2"; + method = "smc"; + }; + + qextal_clk: qextal-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + rtxin_clk: rtxin-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + soc: soc { + compatible = "simple-bus"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + pinctrl: pinctrl@10410000 { + compatible = "renesas,r9a09g057-pinctrl"; + reg = <0 0x10410000 0 0x10000>; + clocks = <&cpg CPG_CORE R9A09G057_IOTOP_0_SHCLK>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 0 96>; + #interrupt-cells = <2>; + interrupt-controller; + power-domains = <&cpg>; + resets = <&cpg 0xa5>, <&cpg 0xa6>; + }; + + cpg: clock-controller@10420000 { + compatible = "renesas,r9a09g057-cpg"; + reg = <0 0x10420000 0 0x10000>; + clocks = <&audio_extal_clk>, <&rtxin_clk>, <&qextal_clk>; + clock-names = "audio_extal", "rtxin", "qextal"; + #clock-cells = <2>; + #reset-cells = <1>; + #power-domain-cells = <0>; + }; + + sys: system-controller@10430000 { + compatible = "renesas,r9a09g057-sys"; + reg = <0 0x10430000 0 0x10000>; + clocks = <&cpg CPG_CORE R9A09G057_SYS_0_PCLK>; + resets = <&cpg 0x30>; + status = "disabled"; + }; + + ostm0: timer@11800000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x11800000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x43>; + resets = <&cpg 0x6d>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm1: timer@11801000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x11801000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x44>; + resets = <&cpg 0x6e>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm2: timer@14000000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x14000000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x45>; + resets = <&cpg 0x6f>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm3: timer@14001000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x14001000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x46>; + resets = <&cpg 0x70>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm4: timer@12c00000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x12c00000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x47>; + resets = <&cpg 0x71>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm5: timer@12c01000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x12c01000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x48>; + resets = <&cpg 0x72>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm6: timer@12c02000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x12c02000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x49>; + resets = <&cpg 0x73>; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm7: timer@12c03000 { + compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; + reg = <0x0 0x12c03000 0x0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 0x4a>; + resets = <&cpg 0x74>; + power-domains = <&cpg>; + status = "disabled"; + }; + + wdt0: watchdog@11c00400 { + compatible = "renesas,r9a09g057-wdt"; + reg = <0 0x11c00400 0 0x400>; + clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>; + clock-names = "pclk", "oscclk"; + resets = <&cpg 0x75>; + power-domains = <&cpg>; + status = "disabled"; + }; + + wdt1: watchdog@14400000 { + compatible = "renesas,r9a09g057-wdt"; + reg = <0 0x14400000 0 0x400>; + clocks = <&cpg CPG_MOD 0x4d>, <&cpg CPG_MOD 0x4e>; + clock-names = "pclk", "oscclk"; + resets = <&cpg 0x76>; + power-domains = <&cpg>; + status = "disabled"; + }; + + wdt2: watchdog@13000000 { + compatible = "renesas,r9a09g057-wdt"; + reg = <0 0x13000000 0 0x400>; + clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>; + clock-names = "pclk", "oscclk"; + resets = <&cpg 0x77>; + power-domains = <&cpg>; + status = "disabled"; + }; + + wdt3: watchdog@13000400 { + compatible = "renesas,r9a09g057-wdt"; + reg = <0 0x13000400 0 0x400>; + clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>; + clock-names = "pclk", "oscclk"; + resets = <&cpg 0x78>; + power-domains = <&cpg>; + status = "disabled"; + }; + + scif: serial@11c01400 { + compatible = "renesas,scif-r9a09g057"; + reg = <0 0x11c01400 0 0x400>; + interrupts = , + , + , + , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", "bri", "dri", + "tei", "tei-dri", "rxi-edge", "txi-edge"; + clocks = <&cpg CPG_MOD 0x8f>; + clock-names = "fck"; + power-domains = <&cpg>; + resets = <&cpg 0x95>; + status = "disabled"; + }; + + i2c0: i2c@14400400 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14400400 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x94>; + resets = <&cpg 0x98>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c1: i2c@14400800 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14400800 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x95>; + resets = <&cpg 0x99>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@14400c00 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14400c00 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x96>; + resets = <&cpg 0x9a>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@14401000 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14401000 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x97>; + resets = <&cpg 0x9b>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c4: i2c@14401400 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14401400 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x98>; + resets = <&cpg 0x9c>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c5: i2c@14401800 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14401800 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x99>; + resets = <&cpg 0x9d>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c6: i2c@14401c00 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14401c00 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x9a>; + resets = <&cpg 0x9e>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c7: i2c@14402000 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x14402000 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x9b>; + resets = <&cpg 0x9f>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c8: i2c@11c01000 { + compatible = "renesas,riic-r9a09g057"; + reg = <0 0x11c01000 0 0x400>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "tei", "ri", "ti", "spi", "sti", + "naki", "ali", "tmoi"; + clocks = <&cpg CPG_MOD 0x93>; + resets = <&cpg 0xa0>; + power-domains = <&cpg>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + gic: interrupt-controller@14900000 { + compatible = "arm,gic-v3"; + reg = <0x0 0x14900000 0 0x20000>, + <0x0 0x14940000 0 0x80000>; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + interrupts = ; + }; + + sdhi0: mmc@15c00000 { + compatible = "renesas,sdhi-r9a09g057"; + reg = <0x0 0x15c00000 0 0x10000>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 0xa3>, <&cpg CPG_MOD 0xa5>, + <&cpg CPG_MOD 0xa4>, <&cpg CPG_MOD 0xa6>; + clock-names = "core", "clkh", "cd", "aclk"; + resets = <&cpg 0xa7>; + power-domains = <&cpg>; + status = "disabled"; + }; + + sdhi1: mmc@15c10000 { + compatible = "renesas,sdhi-r9a09g057"; + reg = <0x0 0x15c10000 0 0x10000>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 0xa7>, <&cpg CPG_MOD 0xa9>, + <&cpg CPG_MOD 0xa8>, <&cpg CPG_MOD 0xaa>; + clock-names = "core", "clkh", "cd", "aclk"; + resets = <&cpg 0xa8>; + power-domains = <&cpg>; + status = "disabled"; + }; + + sdhi2: mmc@15c20000 { + compatible = "renesas,sdhi-r9a09g057"; + reg = <0x0 0x15c20000 0 0x10000>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 0xab>, <&cpg CPG_MOD 0xad>, + <&cpg CPG_MOD 0xac>, <&cpg CPG_MOD 0xae>; + clock-names = "core", "clkh", "cd", "aclk"; + resets = <&cpg 0xa9>; + power-domains = <&cpg>; + status = "disabled"; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", "hyp-virt"; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts new file mode 100644 index 00000000000000..4703da8e9cff4b --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Device Tree Source for the RZ/V2H EVK board + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +/dts-v1/; + +#include +#include +#include "r9a09g057.dtsi" + +/ { + model = "Renesas RZ/V2H EVK Board based on r9a09g057h44"; + compatible = "renesas,rzv2h-evk", "renesas,r9a09g057h44", "renesas,r9a09g057"; + + aliases { + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c6 = &i2c6; + i2c7 = &i2c7; + i2c8 = &i2c8; + mmc1 = &sdhi1; + serial0 = &scif; + }; + + chosen { + bootargs = "ignore_loglevel"; + stdout-path = "serial0:115200n8"; + }; + + memory@48000000 { + device_type = "memory"; + /* first 128MB is reserved for secure area. */ + reg = <0x0 0x48000000 0x1 0xF8000000>; + }; + + memory@240000000 { + device_type = "memory"; + reg = <0x2 0x40000000 0x2 0x00000000>; + }; + + reg_3p3v: regulator1 { + compatible = "regulator-fixed"; + + regulator-name = "fixed-3.3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + vqmmc_sdhi1: regulator-vccq-sdhi1 { + compatible = "regulator-gpio"; + regulator-name = "SDHI1 VccQ"; + gpios = <&pinctrl RZG2L_GPIO(10, 2) GPIO_ACTIVE_HIGH>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + gpios-states = <0>; + states = <3300000 0>, <1800000 1>; + }; +}; + +&audio_extal_clk { + clock-frequency = <22579200>; +}; + +&i2c0 { + pinctrl-0 = <&i2c0_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c1 { + pinctrl-0 = <&i2c1_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c2 { + pinctrl-0 = <&i2c2_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c3 { + pinctrl-0 = <&i2c3_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c6 { + pinctrl-0 = <&i2c6_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c7 { + pinctrl-0 = <&i2c7_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&i2c8 { + pinctrl-0 = <&i2c8_pins>; + pinctrl-names = "default"; + clock-frequency = <400000>; + + status = "okay"; +}; + +&ostm0 { + status = "okay"; +}; + +&ostm1 { + status = "okay"; +}; + +&ostm2 { + status = "okay"; +}; + +&ostm3 { + status = "okay"; +}; + +&ostm4 { + status = "okay"; +}; + +&ostm5 { + status = "okay"; +}; + +&ostm6 { + status = "okay"; +}; + +&ostm7 { + status = "okay"; +}; + +&pinctrl { + i2c0_pins: i2c0 { + pinmux = , /* I2C0_SDA */ + ; /* I2C0_SCL */ + }; + + i2c1_pins: i2c1 { + pinmux = , /* I2C1_SDA */ + ; /* I2C1_SCL */ + }; + + i2c2_pins: i2c2 { + pinmux = , /* I2C2_SDA */ + ; /* I2C2_SCL */ + }; + + i2c3_pins: i2c3 { + pinmux = , /* I2C3_SDA */ + ; /* I2C3_SCL */ + }; + + i2c6_pins: i2c6 { + pinmux = , /* I2C6_SDA */ + ; /* I2C6_SCL */ + }; + + i2c7_pins: i2c7 { + pinmux = , /* I2C7_SDA */ + ; /* I2C7_SCL */ + }; + + i2c8_pins: i2c8 { + pinmux = , /* I2C8_SDA */ + ; /* I2C8_SCL */ + }; + + scif_pins: scif { + pins = "SCIF_TXD", "SCIF_RXD"; + renesas,output-impedance = <1>; + }; + + sd1-pwr-en-hog { + gpio-hog; + gpios = ; + output-high; + line-name = "sd1_pwr_en"; + }; + + sdhi1_pins: sd1 { + sd1_dat_cmd { + pins = "SD1DAT0", "SD1DAT1", "SD1DAT2", "SD1DAT3", "SD1CMD"; + input-enable; + renesas,output-impedance = <3>; + slew-rate = <0>; + }; + + sd1_clk { + pins = "SD1CLK"; + renesas,output-impedance = <3>; + slew-rate = <0>; + }; + + sd1_cd { + pinmux = ; /* SD1_CD */ + }; + }; +}; + +&qextal_clk { + clock-frequency = <24000000>; +}; + +&rtxin_clk { + clock-frequency = <32768>; +}; + +&scif { + pinctrl-0 = <&scif_pins>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&sdhi1 { + pinctrl-0 = <&sdhi1_pins>; + pinctrl-1 = <&sdhi1_pins>; + pinctrl-names = "default", "state_uhs"; + vmmc-supply = <®_3p3v>; + vqmmc-supply = <&vqmmc_sdhi1>; + bus-width = <4>; + sd-uhs-sdr50; + sd-uhs-sdr104; + status = "okay"; +}; + +&wdt1 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi index b34855956ae011..63fa5cf1061b3b 100644 --- a/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi +++ b/arch/arm64/boot/dts/renesas/rz-smarc-common.dtsi @@ -131,9 +131,6 @@ &ohci1 { &phyrst { status = "okay"; - usb0_vbus_otg: regulator-vbus { - regulator-name = "vbus"; - }; }; &scif0 { diff --git a/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi b/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi new file mode 100644 index 00000000000000..36707576030d40 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/rz-smarc-du-adv7513.dtsi @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common Device Tree for the RZ/G2UL SMARC EVK (and alike EVKs) with + * ADV7513 transmitter connected to DU enabled. + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +&{/} { + hdmi-out { + compatible = "hdmi-connector"; + type = "d"; + + port { + hdmi_con_out: endpoint { + remote-endpoint = <&adv7513_out>; + }; + }; + }; +}; + +&du { + pinctrl-0 = <&du_pins>; + pinctrl-names = "default"; + + status = "okay"; + + ports { + port@0 { + du_out_rgb: endpoint { + remote-endpoint = <&adv7513_in>; + }; + }; + }; +}; + +&ADV7513_PARENT_I2C { + #address-cells = <1>; + #size-cells = <0>; + + adv7513: adv7513@39 { + compatible = "adi,adv7513"; + reg = <0x39>; + + adi,input-depth = <8>; + adi,input-colorspace = "rgb"; + adi,input-clock = "1x"; + + avdd-supply = <®_1p8v>; + dvdd-supply = <®_1p8v>; + pvdd-supply = <®_1p8v>; + dvdd-3v-supply = <®_3p3v>; + bgvdd-supply = <®_1p8v>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + adv7513_in: endpoint { + remote-endpoint = <&du_out_rgb>; + }; + }; + + port@1 { + reg = <1>; + + adv7513_out: endpoint { + remote-endpoint = <&hdmi_con_out>; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi index 18c526c7a4cfe0..e9f244c33d558e 100644 --- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-pinfunction.dtsi @@ -143,6 +143,12 @@ ssi0_pins: ssi0 { ; /* RXD */ }; + ssi1_pins: ssi1 { + pinmux = , /* BCK */ + , /* RCK */ + ; /* TXD */ + }; + usb0_pins: usb0 { pinmux = , /* VBUS */ , /* OVC */ diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi index 4409c47239b982..83f5642d0d35c2 100644 --- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi @@ -180,41 +180,63 @@ adc_pins: adc { }; eth0_pins: eth0 { - pinmux = , /* ET0_LINKSTA */ - , /* ET0_MDC */ - , /* ET0_MDIO */ - , /* ET0_TXC */ - , /* ET0_TX_CTL */ - , /* ET0_TXD0 */ - , /* ET0_TXD1 */ - , /* ET0_TXD2 */ - , /* ET0_TXD3 */ - , /* ET0_RXC */ - , /* ET0_RX_CTL */ - , /* ET0_RXD0 */ - , /* ET0_RXD1 */ - , /* ET0_RXD2 */ - , /* ET0_RXD3 */ - ; /* IRQ2 */ + txc { + pinmux = ; /* ET0_TXC */ + power-source = <1800>; + output-enable; + }; + + mux { + pinmux = , /* ET0_LINKSTA */ + , /* ET0_MDC */ + , /* ET0_MDIO */ + , /* ET0_TX_CTL */ + , /* ET0_TXD0 */ + , /* ET0_TXD1 */ + , /* ET0_TXD2 */ + , /* ET0_TXD3 */ + , /* ET0_RXC */ + , /* ET0_RX_CTL */ + , /* ET0_RXD0 */ + , /* ET0_RXD1 */ + , /* ET0_RXD2 */ + ; /* ET0_RXD3 */ + power-source = <1800>; + }; + + irq { + pinmux = ; /* IRQ2 */ + }; }; eth1_pins: eth1 { - pinmux = , /* ET1_LINKSTA */ - , /* ET1_MDC */ - , /* ET1_MDIO */ - , /* ET1_TXC */ - , /* ET1_TX_CTL */ - , /* ET1_TXD0 */ - , /* ET1_TXD1 */ - , /* ET1_TXD2 */ - , /* ET1_TXD3 */ - , /* ET1_RXC */ - , /* ET1_RX_CTL */ - , /* ET1_RXD0 */ - , /* ET1_RXD1 */ - , /* ET1_RXD2 */ - , /* ET1_RXD3 */ - ; /* IRQ3 */ + txc { + pinmux = ; /* ET1_TXC */ + power-source = <1800>; + output-enable; + }; + + mux { + pinmux = , /* ET1_LINKSTA */ + , /* ET1_MDC */ + , /* ET1_MDIO */ + , /* ET1_TX_CTL */ + , /* ET1_TXD0 */ + , /* ET1_TXD1 */ + , /* ET1_TXD2 */ + , /* ET1_TXD3 */ + , /* ET1_RXC */ + , /* ET1_RX_CTL */ + , /* ET1_RXD0 */ + , /* ET1_RXD1 */ + , /* ET1_RXD2 */ + ; /* ET1_RXD3 */ + power-source = <1800>; + }; + + irq { + pinmux = ; /* IRQ3 */ + }; }; gpio-sd0-pwr-en-hog { diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi index 887dffe1491087..ee3d96fdb6168b 100644 --- a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi @@ -30,6 +30,12 @@ hdmi_con_out: endpoint { }; }; }; + + sound_card { + compatible = "audio-graph-card"; + label = "HDMI-Audio"; + dais = <&i2s2_port>; + }; }; &cpu_dai { @@ -88,6 +94,13 @@ adv7535_out: endpoint { remote-endpoint = <&hdmi_con_out>; }; }; + + port@2 { + reg = <2>; + codec_endpoint: endpoint { + remote-endpoint = <&i2s2_cpu_endpoint>; + }; + }; }; }; }; @@ -170,6 +183,23 @@ &ssi0 { status = "okay"; }; +&ssi1 { + pinctrl-0 = <&ssi1_pins>; + pinctrl-names = "default"; + + status = "okay"; + + i2s2_port: port { + i2s2_cpu_endpoint: endpoint { + remote-endpoint = <&codec_endpoint>; + dai-format = "i2s"; + + bitclock-master = <&i2s2_cpu_endpoint>; + frame-master = <&i2s2_cpu_endpoint>; + }; + }; +}; + &vccq_sdhi1 { gpios = <&pinctrl RZG2L_GPIO(39, 1) GPIO_ACTIVE_HIGH>; }; diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi index 5e4209d6fb42f3..b4ef5ea8a9e345 100644 --- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi @@ -128,22 +128,33 @@ &ostm2 { &pinctrl { eth0_pins: eth0 { - pinmux = , /* ET0_LINKSTA */ - , /* ET0_MDC */ - , /* ET0_MDIO */ - , /* ET0_TXC */ - , /* ET0_TX_CTL */ - , /* ET0_TXD0 */ - , /* ET0_TXD1 */ - , /* ET0_TXD2 */ - , /* ET0_TXD3 */ - , /* ET0_RXC */ - , /* ET0_RX_CTL */ - , /* ET0_RXD0 */ - , /* ET0_RXD1 */ - , /* ET0_RXD2 */ - , /* ET0_RXD3 */ - ; /* IRQ0 */ + txc { + pinmux = ; /* ET0_TXC */ + power-source = <1800>; + output-enable; + }; + + mux { + pinmux = , /* ET0_LINKSTA */ + , /* ET0_MDC */ + , /* ET0_MDIO */ + , /* ET0_TX_CTL */ + , /* ET0_TXD0 */ + , /* ET0_TXD1 */ + , /* ET0_TXD2 */ + , /* ET0_TXD3 */ + , /* ET0_RXC */ + , /* ET0_RX_CTL */ + , /* ET0_RXD0 */ + , /* ET0_RXD1 */ + , /* ET0_RXD2 */ + ; /* ET0_RXD3 */ + power-source = <1800>; + }; + + irq { + pinmux = ; /* IRQ0 */ + }; }; gpio-sd0-pwr-en-hog { diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi index f21508640b6eaa..377849cbb462ea 100644 --- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi @@ -33,6 +33,16 @@ hdmi_con_out: endpoint { }; }; }; + +#if (SW_I2S0_I2S1) + /delete-node/ sound; + + sound_card { + compatible = "audio-graph-card"; + label = "HDMI-Audio"; + dais = <&i2s2_port>; + }; +#endif }; #if (SW_SCIF_CAN || SW_RSPI_CAN) @@ -48,9 +58,11 @@ &canfd { }; #endif +#if (!SW_I2S0_I2S1) &cpu_dai { sound-dai = <&ssi0>; }; +#endif &dsi { status = "okay"; @@ -104,6 +116,15 @@ adv7535_out: endpoint { remote-endpoint = <&hdmi_con_out>; }; }; + +#if (SW_I2S0_I2S1) + port@2 { + reg = <2>; + codec_endpoint: endpoint { + remote-endpoint = <&i2s2_cpu_endpoint>; + }; + }; +#endif }; }; }; @@ -177,6 +198,18 @@ &ssi0 { pinctrl-names = "default"; status = "okay"; + +#if (SW_I2S0_I2S1) + i2s2_port: port { + i2s2_cpu_endpoint: endpoint { + remote-endpoint = <&codec_endpoint>; + dai-format = "i2s"; + + bitclock-master = <&i2s2_cpu_endpoint>; + frame-master = <&i2s2_cpu_endpoint>; + }; + }; +#endif }; #if (SW_RSPI_CAN) diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi index 97cdad2a12e2ee..79443fb3f58103 100644 --- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi @@ -142,41 +142,63 @@ adc_pins: adc { }; eth0_pins: eth0 { - pinmux = , /* ET0_LINKSTA */ - , /* ET0_MDC */ - , /* ET0_MDIO */ - , /* ET0_TXC */ - , /* ET0_TX_CTL */ - , /* ET0_TXD0 */ - , /* ET0_TXD1 */ - , /* ET0_TXD2 */ - , /* ET0_TXD3 */ - , /* ET0_RXC */ - , /* ET0_RX_CTL */ - , /* ET0_RXD0 */ - , /* ET0_RXD1 */ - , /* ET0_RXD2 */ - , /* ET0_RXD3 */ - ; /* IRQ2 */ + txc { + pinmux = ; /* ET0_TXC */ + power-source = <1800>; + output-enable; + }; + + mux { + pinmux = , /* ET0_LINKSTA */ + , /* ET0_MDC */ + , /* ET0_MDIO */ + , /* ET0_TX_CTL */ + , /* ET0_TXD0 */ + , /* ET0_TXD1 */ + , /* ET0_TXD2 */ + , /* ET0_TXD3 */ + , /* ET0_RXC */ + , /* ET0_RX_CTL */ + , /* ET0_RXD0 */ + , /* ET0_RXD1 */ + , /* ET0_RXD2 */ + ; /* ET0_RXD3 */ + power-source = <1800>; + }; + + irq { + pinmux = ; /* IRQ2 */ + }; }; eth1_pins: eth1 { - pinmux = , /* ET1_LINKSTA */ - , /* ET1_MDC */ - , /* ET1_MDIO */ - , /* ET1_TXC */ - , /* ET1_TX_CTL */ - , /* ET1_TXD0 */ - , /* ET1_TXD1 */ - , /* ET1_TXD2 */ - , /* ET1_TXD3 */ - , /* ET1_RXC */ - , /* ET1_RX_CTL */ - , /* ET1_RXD0 */ - , /* ET1_RXD1 */ - , /* ET1_RXD2 */ - , /* ET1_RXD3 */ - ; /* IRQ7 */ + txc { + pinmux = ; /* ET1_TXC */ + power-source = <1800>; + output-enable; + }; + + mux { + pinmux = , /* ET1_LINKSTA */ + , /* ET1_MDC */ + , /* ET1_MDIO */ + , /* ET1_TX_CTL */ + , /* ET1_TXD0 */ + , /* ET1_TXD1 */ + , /* ET1_TXD2 */ + , /* ET1_TXD3 */ + , /* ET1_RXC */ + , /* ET1_RX_CTL */ + , /* ET1_RXD0 */ + , /* ET1_RXD1 */ + , /* ET1_RXD2 */ + ; /* ET1_RXD3 */ + power-source = <1800>; + }; + + irq { + pinmux = ; /* IRQ7 */ + }; }; sdhi0_emmc_pins: sd0emmc { diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi index 8a3d302f153578..21bfa4e03972ff 100644 --- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi @@ -32,6 +32,7 @@ / { compatible = "renesas,rzg3s-smarcm", "renesas,r9a08g045s33", "renesas,r9a08g045"; aliases { + i2c1 = &i2c1; mmc0 = &sdhi0; #if SW_CONFIG3 == SW_OFF mmc2 = &sdhi2; @@ -150,6 +151,10 @@ &extal_clk { clock-frequency = <24000000>; }; +&i2c1 { + status = "okay"; +}; + #if SW_CONFIG2 == SW_ON /* SD0 slot */ &sdhi0 { diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi index deb2ad37bb2e5d..7945d44e6ee159 100644 --- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi +++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi @@ -11,6 +11,7 @@ / { aliases { + i2c0 = &i2c0; serial0 = &scif0; mmc1 = &sdhi1; }; @@ -66,6 +67,12 @@ vccq_sdhi1: regulator-vccq-sdhi1 { }; }; +&i2c0 { + status = "okay"; + + clock-frequency = <1000000>; +}; + &pinctrl { key-1-gpio-hog { gpio-hog; diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi index 80496fb3d4765a..3845b413bd24cd 100644 --- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi +++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi @@ -117,6 +117,12 @@ mini_dp_con_in: endpoint { }; }; + pcie_clk: clk-9fgv0841-pci { + compatible = "fixed-clock"; + clock-frequency = <100000000>; + #clock-cells = <0>; + }; + reg_1p2v: regulator-1p2v { compatible = "regulator-fixed"; regulator-name = "fixed-1.2V"; @@ -288,6 +294,18 @@ &mmc0 { status = "okay"; }; +&pcie0_clkref { + compatible = "gpio-gate-clock"; + clocks = <&pcie_clk>; + enable-gpios = <&gpio4 21 GPIO_ACTIVE_LOW>; + /delete-property/ clock-frequency; +}; + +&pciec0 { + reset-gpio = <&io_expander_a 0 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + &pfc { pinctrl-0 = <&scif_clk_pins>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile index fda1b980eb4bc9..09423070c99282 100644 --- a/arch/arm64/boot/dts/rockchip/Makefile +++ b/arch/arm64/boot/dts/rockchip/Makefile @@ -3,6 +3,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-evb.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2-of10.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-edimm2.2.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-firefly-jd4-core-mb.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-evb.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-roc-cc.dtb @@ -20,6 +21,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2c.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2c-plus.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2s.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2s-plus.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-orangepi-r1-plus.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-orangepi-r1-plus-lts.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb @@ -81,6 +83,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353ps.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353v.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg353vs.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-anbernic-rg503.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-odroid-m1s.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v1.1.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-orangepi-3b-v2.1.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-pinenote-v1.1.dtb @@ -102,6 +105,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-blade.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-cm4.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-soquartz-model-a.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-box-demo.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lckfb-tspi.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lubancat-1.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-bpi-r2-pro.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb @@ -118,9 +122,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-roc-pc.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3a.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-rock-3b.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-display-vz.dtbo dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-evb.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-coolpi-cm5-genbook.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-io.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtbo dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-io.dtb @@ -128,6 +134,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-evb1-v10.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6-lts.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb @@ -139,9 +146,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-tiger-haikou.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-toybrick-x0.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-turing-rk1.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-coolpi-4b.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-gameforce-ace.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-indiedroid-nova.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-khadas-edge2.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-nanopi-r6s.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-nanopi-r6c.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-rock-5a.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-odroid-m2.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-orangepi-5.dtb diff --git a/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts new file mode 100644 index 00000000000000..d03e6aef54dc07 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core-mb.dts @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd + */ + +/dts-v1/; +#include +#include +#include "px30-firefly-jd4-core.dtsi" + +/ { + compatible = "firefly,px30-jd4-core-mb", "firefly,px30-jd4-core", + "rockchip,px30"; + model = "Firefly Core-PX30-JD4 on MB-JD4-PX30 baseboard"; + + aliases { + ethernet0 = &gmac; + mmc0 = &sdmmc; + mmc1 = &sdio; + mmc2 = &emmc; + }; + + chosen { + stdout-path = "serial2:115200n8"; + }; + + dc_12v: dc-12v-regulator { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 2>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1500000>; + poll-interval = <100>; + + button-recovery { + label = "Recovery"; + linux,code = ; + press-threshold-microvolt = <18000>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&blue_led>, <&green_led>; + + blue-led { + color = ; + default-state = "on"; + function = LED_FUNCTION_HEARTBEAT; + gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>; + label = "px30-mb-jd4:blue:work"; + linux,default-trigger = "heartbeat"; + }; + + green-led { + color = ; + default-state = "on"; + function = LED_FUNCTION_POWER; + gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_HIGH>; + label = "px30-mb-jd4:blue:diy"; + linux,default-trigger = "default-on"; + }; + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_enable_h>; + + /* + * On the module itself this is one of these (depending + * on the actual card populated): + * - SDIO_RESET_L_WL_REG_ON + * - PDN (power down when low) + */ + reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; /* GPIO3_A4 */ + }; + + vcc5v0_baseboard: vcc5v0-baseboard-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_baseboard"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; +}; + +&gmac { + clock_in_out = "output"; + phy-supply = <&vcc_rmii>; + snps,reset-gpio = <&gpio2 13 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + snps,reset-delays-us = <0 50000 50000>; + status = "okay"; +}; + +&pinctrl { + leds { + blue_led: blue-led { + rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + green_led: green-led { + rockchip,pins = <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + sdio-pwrseq { + wifi_enable_h: wifi-enable-h { + rockchip,pins = + <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + card-detect-delay = <800>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; + vmmc-supply = <&vcc_sd>; + vqmmc-supply = <&vccio_sd>; + status = "okay"; +}; + +&sdio { + bus-width = <4>; + cap-sd-highspeed; + keep-power-in-suspend; + non-removable; + mmc-pwrseq = <&sdio_pwrseq>; + sd-uhs-sdr104; + status = "okay"; +}; + +&u2phy { + status = "okay"; + + u2phy_host: host-port { + status = "okay"; + }; + + u2phy_otg: otg-port { + status = "okay"; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2m1_xfer>; + status = "okay"; +}; + +&usb20_otg { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi new file mode 100644 index 00000000000000..f18d7eb9a9c7b2 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/px30-firefly-jd4-core.dtsi @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd + */ + +#include +#include +#include "px30.dtsi" + +/ { + compatible = "firefly,px30-jd4-core", "rockchip,px30"; + + emmc_pwrseq: emmc-pwrseq { + compatible = "mmc-pwrseq-emmc"; + pinctrl-0 = <&emmc_reset>; + pinctrl-names = "default"; + reset-gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_HIGH>; + }; + + vcc5v0_sys: vcc5v0-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_baseboard>; + }; +}; + +&cpu0 { + cpu-supply = <&vdd_arm>; +}; + +&cpu1 { + cpu-supply = <&vdd_arm>; +}; + +&cpu2 { + cpu-supply = <&vdd_arm>; +}; + +&cpu3 { + cpu-supply = <&vdd_arm>; +}; + +&emmc { + bus-width = <8>; + cap-mmc-highspeed; + mmc-hs200-1_8v; + non-removable; + mmc-pwrseq = <&emmc_pwrseq>; + vmmc-supply = <&vcc_3v0>; + vqmmc-supply = <&vccio_flash>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_log>; + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + rk809: pmic@20 { + compatible = "rockchip,rk809"; + reg = <0x20>; + interrupt-parent = <&gpio0>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int>; + rockchip,system-power-controller; + wakeup-source; + #clock-cells = <0>; + clock-output-names = "xin32k"; + + vcc1-supply = <&vcc5v0_sys>; + vcc2-supply = <&vcc5v0_sys>; + vcc3-supply = <&vcc5v0_sys>; + vcc4-supply = <&vcc5v0_sys>; + vcc5-supply = <&vcc3v3_sys>; + vcc6-supply = <&vcc3v3_sys>; + vcc7-supply = <&vcc3v3_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc5v0_sys>; + + regulators { + vdd_log: DCDC_REG1 { + regulator-name = "vdd_log"; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <950000>; + }; + }; + + vdd_arm: DCDC_REG2 { + regulator-name = "vdd_arm"; + regulator-min-microvolt = <950000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <950000>; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_3v0: vcc_rmii: DCDC_REG4 { + regulator-name = "vcc_3v0"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcc3v3_sys: DCDC_REG5 { + regulator-name = "vcc3v3_sys"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc_1v0: LDO_REG1 { + regulator-name = "vcc_1v0"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + + vcc_1v8: vccio_flash: vccio_sdio: LDO_REG2 { + regulator-name = "vcc_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_1v0: LDO_REG3 { + regulator-name = "vdd_1v0"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + + vcc3v0_pmu: LDO_REG4 { + regulator-name = "vcc3v0_pmu"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vccio_sd: LDO_REG5 { + regulator-name = "vccio_sd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc_sd: LDO_REG6 { + regulator-name = "vcc_sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcc2v8_dvp: LDO_REG7 { + regulator-name = "vcc2v8_dvp"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <2800000>; + }; + }; + + vcc1v8_dvp: LDO_REG8 { + regulator-name = "vcc1v8_dvp"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc1v5_dvp: LDO_REG9 { + regulator-name = "vcc1v5_dvp"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <1500000>; + }; + }; + + vcc3v3_lcd: SWITCH_REG1 { + regulator-name = "vcc3v3_lcd"; + regulator-boot-on; + }; + + vcc5v0_host: SWITCH_REG2 { + regulator-name = "vcc5v0_host"; + regulator-always-on; + regulator-boot-on; + }; + }; + }; +}; + +&io_domains { + vccio1-supply = <&vccio_sdio>; + vccio2-supply = <&vccio_sd>; + vccio3-supply = <&vcc_3v0>; + vccio4-supply = <&vcc3v0_pmu>; + vccio5-supply = <&vcc_3v0>; + vccio6-supply = <&vccio_flash>; + status = "okay"; +}; + +&pinctrl { + emmc { + emmc_reset: emmc-reset { + rockchip,pins = <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pmic { + pmic_int: pmic_int { + rockchip,pins = + <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&pmu_io_domains { + pmuio1-supply = <&vcc3v0_pmu>; + pmuio2-supply = <&vcc3v0_pmu>; + status = "okay"; +}; + +&saradc { + vref-supply = <&vcc_1v8>; + status = "okay"; +}; + +&tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <1>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts new file mode 100644 index 00000000000000..cb81ba3f23ffd5 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * (C) Copyright 2018 FriendlyElec Computer Tech. Co., Ltd. + * (http://www.friendlyarm.com) + * + * (C) Copyright 2016 Rockchip Electronics Co., Ltd + */ + +/dts-v1/; +#include "rk3328-nanopi-r2s.dts" + +/ { + compatible = "friendlyarm,nanopi-r2s-plus", "rockchip,rk3328"; + model = "FriendlyElec NanoPi R2S Plus"; + + aliases { + mmc1 = &emmc; + }; +}; + +&emmc { + bus-width = <8>; + cap-mmc-highspeed; + disable-wp; + mmc-hs200-1_8v; + non-removable; + num-slots = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; + supports-emmc; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index b01efd6d042c8e..16b4faa22e4fd9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -910,6 +910,8 @@ sdmmc: mmc@ff500000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; + resets = <&cru SRST_MMC0>; + reset-names = "reset"; status = "disabled"; }; @@ -922,6 +924,8 @@ sdio: mmc@ff510000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; + resets = <&cru SRST_SDIO>; + reset-names = "reset"; status = "disabled"; }; @@ -934,6 +938,8 @@ emmc: mmc@ff520000 { clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; max-frequency = <150000000>; + resets = <&cru SRST_EMMC>; + reset-names = "reset"; status = "disabled"; }; @@ -1036,6 +1042,20 @@ usb_host0_ohci: usb@ff5d0000 { status = "disabled"; }; + sdmmc_ext: mmc@ff5f0000 { + compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc"; + reg = <0x0 0xff5f0000 0x0 0x4000>; + interrupts = ; + clocks = <&cru HCLK_SDMMC_EXT>, <&cru SCLK_SDMMC_EXT>, + <&cru SCLK_SDMMC_EXT_DRV>, <&cru SCLK_SDMMC_EXT_SAMPLE>; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + max-frequency = <150000000>; + resets = <&cru SRST_SDMMCEXT>; + reset-names = "reset"; + status = "disabled"; + }; + usbdrd3: usb@ff600000 { compatible = "rockchip,rk3328-dwc3", "snps,dwc3"; reg = <0x0 0xff600000 0x0 0x100000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi new file mode 100644 index 00000000000000..9d5f5b083e3cfa --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-base.dtsi @@ -0,0 +1,3019 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include + +/ { + compatible = "rockchip,rk3399"; + + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + gpio0 = &gpio0; + gpio1 = &gpio1; + gpio2 = &gpio2; + gpio3 = &gpio3; + gpio4 = &gpio4; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c4 = &i2c4; + i2c5 = &i2c5; + i2c6 = &i2c6; + i2c7 = &i2c7; + i2c8 = &i2c8; + serial0 = &uart0; + serial1 = &uart1; + serial2 = &uart2; + serial3 = &uart3; + serial4 = &uart4; + spi0 = &spi0; + spi1 = &spi1; + spi2 = &spi2; + spi3 = &spi3; + spi4 = &spi4; + spi5 = &spi5; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu-map { + cluster0 { /* Cortex-A53 */ + core0 { + cpu = <&cpu_l0>; + }; + core1 { + cpu = <&cpu_l1>; + }; + core2 { + cpu = <&cpu_l2>; + }; + core3 { + cpu = <&cpu_l3>; + }; + }; + + cluster1 { /* Cortex-A72 */ + core0 { + cpu = <&cpu_b0>; + }; + core1 { + cpu = <&cpu_b1>; + }; + }; + }; + + cpu_l0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x0>; + enable-method = "psci"; + capacity-dmips-mhz = <485>; + clocks = <&cru ARMCLKL>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <100>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l>; + }; + + cpu_l1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x1>; + enable-method = "psci"; + capacity-dmips-mhz = <485>; + clocks = <&cru ARMCLKL>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <100>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l>; + }; + + cpu_l2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x2>; + enable-method = "psci"; + capacity-dmips-mhz = <485>; + clocks = <&cru ARMCLKL>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <100>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l>; + }; + + cpu_l3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x3>; + enable-method = "psci"; + capacity-dmips-mhz = <485>; + clocks = <&cru ARMCLKL>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <100>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&l2_cache_l>; + }; + + cpu_b0: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a72"; + reg = <0x0 0x100>; + enable-method = "psci"; + capacity-dmips-mhz = <1024>; + clocks = <&cru ARMCLKB>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <436>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&l2_cache_b>; + + thermal-idle { + #cooling-cells = <2>; + duration-us = <10000>; + exit-latency-us = <500>; + }; + }; + + cpu_b1: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a72"; + reg = <0x0 0x101>; + enable-method = "psci"; + capacity-dmips-mhz = <1024>; + clocks = <&cru ARMCLKB>; + #cooling-cells = <2>; /* min followed by max */ + dynamic-power-coefficient = <436>; + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + i-cache-size = <0xC000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&l2_cache_b>; + + thermal-idle { + #cooling-cells = <2>; + duration-us = <10000>; + exit-latency-us = <500>; + }; + }; + + l2_cache_l: l2-cache-cluster0 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x80000>; + cache-line-size = <64>; + cache-sets = <512>; + }; + + l2_cache_b: l2-cache-cluster1 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; + }; + + idle-states { + entry-method = "psci"; + + CPU_SLEEP: cpu-sleep { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <120>; + exit-latency-us = <250>; + min-residency-us = <900>; + }; + + CLUSTER_SLEEP: cluster-sleep { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x1010000>; + entry-latency-us = <400>; + exit-latency-us = <500>; + min-residency-us = <2000>; + }; + }; + }; + + display-subsystem { + compatible = "rockchip,display-subsystem"; + ports = <&vopl_out>, <&vopb_out>; + }; + + dmc: memory-controller { + compatible = "rockchip,rk3399-dmc"; + rockchip,pmu = <&pmugrf>; + devfreq-events = <&dfi>; + clocks = <&cru SCLK_DDRC>; + clock-names = "dmc_clk"; + status = "disabled"; + }; + + pmu_a53 { + compatible = "arm,cortex-a53-pmu"; + interrupts = ; + }; + + pmu_a72 { + compatible = "arm,cortex-a72-pmu"; + interrupts = ; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + arm,no-tick-in-suspend; + }; + + xin24m: xin24m { + compatible = "fixed-clock"; + clock-frequency = <24000000>; + clock-output-names = "xin24m"; + #clock-cells = <0>; + }; + + pcie0: pcie@f8000000 { + compatible = "rockchip,rk3399-pcie"; + reg = <0x0 0xf8000000 0x0 0x2000000>, + <0x0 0xfd000000 0x0 0x1000000>; + reg-names = "axi-base", "apb-base"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + aspm-no-l0s; + bus-range = <0x0 0x1f>; + clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>, + <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>; + clock-names = "aclk", "aclk-perf", + "hclk", "pm"; + interrupts = , + , + ; + interrupt-names = "sys", "legacy", "client"; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie0_intc 0>, + <0 0 0 2 &pcie0_intc 1>, + <0 0 0 3 &pcie0_intc 2>, + <0 0 0 4 &pcie0_intc 3>; + max-link-speed = <1>; + msi-map = <0x0 &its 0x0 0x1000>; + phys = <&pcie_phy 0>, <&pcie_phy 1>, + <&pcie_phy 2>, <&pcie_phy 3>; + phy-names = "pcie-phy-0", "pcie-phy-1", + "pcie-phy-2", "pcie-phy-3"; + ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>, + <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>; + resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, + <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, + <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, + <&cru SRST_A_PCIE>; + reset-names = "core", "mgmt", "mgmt-sticky", "pipe", + "pm", "pclk", "aclk"; + status = "disabled"; + + pcie0_intc: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + + pcie0_ep: pcie-ep@f8000000 { + compatible = "rockchip,rk3399-pcie-ep"; + reg = <0x0 0xfd000000 0x0 0x1000000>, + <0x0 0xfa000000 0x0 0x2000000>; + reg-names = "apb-base", "mem-base"; + clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>, + <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>; + clock-names = "aclk", "aclk-perf", + "hclk", "pm"; + max-functions = /bits/ 8 <8>; + num-lanes = <4>; + resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, + <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, + <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, + <&cru SRST_A_PCIE>; + reset-names = "core", "mgmt", "mgmt-sticky", "pipe", + "pm", "pclk", "aclk"; + phys = <&pcie_phy 0>, <&pcie_phy 1>, + <&pcie_phy 2>, <&pcie_phy 3>; + phy-names = "pcie-phy-0", "pcie-phy-1", + "pcie-phy-2", "pcie-phy-3"; + rockchip,max-outbound-regions = <32>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreqnb_cpm>; + status = "disabled"; + }; + + gmac: ethernet@fe300000 { + compatible = "rockchip,rk3399-gmac"; + reg = <0x0 0xfe300000 0x0 0x10000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>, + <&cru SCLK_MAC_TX>, <&cru SCLK_MACREF>, + <&cru SCLK_MACREF_OUT>, <&cru ACLK_GMAC>, + <&cru PCLK_GMAC>; + clock-names = "stmmaceth", "mac_clk_rx", + "mac_clk_tx", "clk_mac_ref", + "clk_mac_refout", "aclk_mac", + "pclk_mac"; + power-domains = <&power RK3399_PD_GMAC>; + resets = <&cru SRST_A_GMAC>; + reset-names = "stmmaceth"; + rockchip,grf = <&grf>; + snps,txpbl = <0x4>; + status = "disabled"; + }; + + sdio0: mmc@fe310000 { + compatible = "rockchip,rk3399-dw-mshc", + "rockchip,rk3288-dw-mshc"; + reg = <0x0 0xfe310000 0x0 0x4000>; + interrupts = ; + max-frequency = <150000000>; + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + resets = <&cru SRST_SDIO0>; + reset-names = "reset"; + status = "disabled"; + }; + + sdmmc: mmc@fe320000 { + compatible = "rockchip,rk3399-dw-mshc", + "rockchip,rk3288-dw-mshc"; + reg = <0x0 0xfe320000 0x0 0x4000>; + interrupts = ; + max-frequency = <150000000>; + assigned-clocks = <&cru HCLK_SD>; + assigned-clock-rates = <200000000>; + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + power-domains = <&power RK3399_PD_SD>; + resets = <&cru SRST_SDMMC>; + reset-names = "reset"; + status = "disabled"; + }; + + sdhci: mmc@fe330000 { + compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1"; + reg = <0x0 0xfe330000 0x0 0x10000>; + interrupts = ; + arasan,soc-ctl-syscon = <&grf>; + assigned-clocks = <&cru SCLK_EMMC>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>; + clock-names = "clk_xin", "clk_ahb"; + clock-output-names = "emmc_cardclock"; + #clock-cells = <0>; + phys = <&emmc_phy>; + phy-names = "phy_arasan"; + power-domains = <&power RK3399_PD_EMMC>; + disable-cqe-dcmd; + status = "disabled"; + }; + + usb_host0_ehci: usb@fe380000 { + compatible = "generic-ehci"; + reg = <0x0 0xfe380000 0x0 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>, + <&u2phy0>; + phys = <&u2phy0_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host0_ohci: usb@fe3a0000 { + compatible = "generic-ohci"; + reg = <0x0 0xfe3a0000 0x0 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>, + <&u2phy0>; + phys = <&u2phy0_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host1_ehci: usb@fe3c0000 { + compatible = "generic-ehci"; + reg = <0x0 0xfe3c0000 0x0 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>, + <&u2phy1>; + phys = <&u2phy1_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host1_ohci: usb@fe3e0000 { + compatible = "generic-ohci"; + reg = <0x0 0xfe3e0000 0x0 0x20000>; + interrupts = ; + clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>, + <&u2phy1>; + phys = <&u2phy1_host>; + phy-names = "usb"; + status = "disabled"; + }; + + debug@fe430000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe430000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_L>; + clock-names = "apb_pclk"; + cpu = <&cpu_l0>; + }; + + debug@fe432000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe432000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_L>; + clock-names = "apb_pclk"; + cpu = <&cpu_l1>; + }; + + debug@fe434000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe434000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_L>; + clock-names = "apb_pclk"; + cpu = <&cpu_l2>; + }; + + debug@fe436000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe436000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_L>; + clock-names = "apb_pclk"; + cpu = <&cpu_l3>; + }; + + debug@fe610000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe610000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_B>; + clock-names = "apb_pclk"; + cpu = <&cpu_b0>; + }; + + debug@fe710000 { + compatible = "arm,coresight-cpu-debug", "arm,primecell"; + reg = <0 0xfe710000 0 0x1000>; + clocks = <&cru PCLK_COREDBG_B>; + clock-names = "apb_pclk"; + cpu = <&cpu_b1>; + }; + + usbdrd3_0: usb@fe800000 { + compatible = "rockchip,rk3399-dwc3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>, + <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_RKSOC_AXI_PERF>, + <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>; + clock-names = "ref_clk", "suspend_clk", + "bus_clk", "aclk_usb3_rksoc_axi_perf", + "aclk_usb3", "grf_clk"; + resets = <&cru SRST_A_USB3_OTG0>; + reset-names = "usb3-otg"; + status = "disabled"; + + usbdrd_dwc3_0: usb@fe800000 { + compatible = "snps,dwc3"; + reg = <0x0 0xfe800000 0x0 0x100000>; + interrupts = ; + clocks = <&cru SCLK_USB3OTG0_REF>, <&cru ACLK_USB3OTG0>, + <&cru SCLK_USB3OTG0_SUSPEND>; + clock-names = "ref", "bus_early", "suspend"; + dr_mode = "otg"; + phys = <&u2phy0_otg>, <&tcphy0_usb3>; + phy-names = "usb2-phy", "usb3-phy"; + phy_type = "utmi_wide"; + snps,dis_enblslpm_quirk; + snps,dis-u2-freeclk-exists-quirk; + snps,dis_u2_susphy_quirk; + snps,dis-del-phy-power-chg-quirk; + snps,dis-tx-ipgap-linecheck-quirk; + power-domains = <&power RK3399_PD_USB3>; + status = "disabled"; + }; + }; + + usbdrd3_1: usb@fe900000 { + compatible = "rockchip,rk3399-dwc3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>, + <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_RKSOC_AXI_PERF>, + <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>; + clock-names = "ref_clk", "suspend_clk", + "bus_clk", "aclk_usb3_rksoc_axi_perf", + "aclk_usb3", "grf_clk"; + resets = <&cru SRST_A_USB3_OTG1>; + reset-names = "usb3-otg"; + status = "disabled"; + + usbdrd_dwc3_1: usb@fe900000 { + compatible = "snps,dwc3"; + reg = <0x0 0xfe900000 0x0 0x100000>; + interrupts = ; + clocks = <&cru SCLK_USB3OTG1_REF>, <&cru ACLK_USB3OTG1>, + <&cru SCLK_USB3OTG1_SUSPEND>; + clock-names = "ref", "bus_early", "suspend"; + dr_mode = "otg"; + phys = <&u2phy1_otg>, <&tcphy1_usb3>; + phy-names = "usb2-phy", "usb3-phy"; + phy_type = "utmi_wide"; + snps,dis_enblslpm_quirk; + snps,dis-u2-freeclk-exists-quirk; + snps,dis_u2_susphy_quirk; + snps,dis-del-phy-power-chg-quirk; + snps,dis-tx-ipgap-linecheck-quirk; + power-domains = <&power RK3399_PD_USB3>; + status = "disabled"; + }; + }; + + cdn_dp: dp@fec00000 { + compatible = "rockchip,rk3399-cdn-dp"; + reg = <0x0 0xfec00000 0x0 0x100000>; + interrupts = ; + assigned-clocks = <&cru SCLK_DP_CORE>, <&cru SCLK_SPDIF_REC_DPTX>; + assigned-clock-rates = <100000000>, <200000000>; + clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>, + <&cru SCLK_SPDIF_REC_DPTX>, <&cru PCLK_VIO_GRF>; + clock-names = "core-clk", "pclk", "spdif", "grf"; + phys = <&tcphy0_dp>, <&tcphy1_dp>; + power-domains = <&power RK3399_PD_HDCP>; + resets = <&cru SRST_DPTX_SPDIF_REC>, <&cru SRST_P_UPHY0_DPTX>, + <&cru SRST_P_UPHY0_APB>, <&cru SRST_DP_CORE>; + reset-names = "spdif", "dptx", "apb", "core"; + rockchip,grf = <&grf>; + #sound-dai-cells = <1>; + status = "disabled"; + + ports { + dp_in: port { + #address-cells = <1>; + #size-cells = <0>; + + dp_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_dp>; + }; + + dp_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_dp>; + }; + }; + }; + }; + + gic: interrupt-controller@fee00000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <4>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + + reg = <0x0 0xfee00000 0 0x10000>, /* GICD */ + <0x0 0xfef00000 0 0xc0000>, /* GICR */ + <0x0 0xfff00000 0 0x10000>, /* GICC */ + <0x0 0xfff10000 0 0x10000>, /* GICH */ + <0x0 0xfff20000 0 0x10000>; /* GICV */ + interrupts = ; + its: msi-controller@fee20000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0x0 0xfee20000 0x0 0x20000>; + }; + + ppi-partitions { + ppi_cluster0: interrupt-partition-0 { + affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>; + }; + + ppi_cluster1: interrupt-partition-1 { + affinity = <&cpu_b0 &cpu_b1>; + }; + }; + }; + + saradc: saradc@ff100000 { + compatible = "rockchip,rk3399-saradc"; + reg = <0x0 0xff100000 0x0 0x100>; + interrupts = ; + #io-channel-cells = <1>; + clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; + clock-names = "saradc", "apb_pclk"; + resets = <&cru SRST_P_SARADC>; + reset-names = "saradc-apb"; + status = "disabled"; + }; + + crypto0: crypto@ff8b0000 { + compatible = "rockchip,rk3399-crypto"; + reg = <0x0 0xff8b0000 0x0 0x4000>; + interrupts = ; + clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>; + clock-names = "hclk_master", "hclk_slave", "sclk"; + resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>; + reset-names = "master", "slave", "crypto-rst"; + }; + + crypto1: crypto@ff8b8000 { + compatible = "rockchip,rk3399-crypto"; + reg = <0x0 0xff8b8000 0x0 0x4000>; + interrupts = ; + clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>; + clock-names = "hclk_master", "hclk_slave", "sclk"; + resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>; + reset-names = "master", "slave", "crypto-rst"; + }; + + i2c1: i2c@ff110000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff110000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C1>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@ff120000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff120000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C2>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@ff130000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff130000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C3>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c3_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c5: i2c@ff140000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff140000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C5>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c5_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c6: i2c@ff150000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff150000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C6>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c6_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c7: i2c@ff160000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff160000 0x0 0x1000>; + assigned-clocks = <&cru SCLK_I2C7>; + assigned-clock-rates = <200000000>; + clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c7_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + uart0: serial@ff180000 { + compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff180000 0x0 0x100>; + clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>; + clock-names = "baudclk", "apb_pclk"; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer>; + status = "disabled"; + }; + + uart1: serial@ff190000 { + compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff190000 0x0 0x100>; + clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; + clock-names = "baudclk", "apb_pclk"; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&uart1_xfer>; + status = "disabled"; + }; + + uart2: serial@ff1a0000 { + compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff1a0000 0x0 0x100>; + clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>; + clock-names = "baudclk", "apb_pclk"; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&uart2c_xfer>; + status = "disabled"; + }; + + uart3: serial@ff1b0000 { + compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff1b0000 0x0 0x100>; + clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>; + clock-names = "baudclk", "apb_pclk"; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&uart3_xfer>; + status = "disabled"; + }; + + spi0: spi@ff1c0000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff1c0000 0x0 0x1000>; + clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + dmas = <&dmac_peri 10>, <&dmac_peri 11>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi1: spi@ff1d0000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff1d0000 0x0 0x1000>; + clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + dmas = <&dmac_peri 12>, <&dmac_peri 13>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi2: spi@ff1e0000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff1e0000 0x0 0x1000>; + clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + dmas = <&dmac_peri 14>, <&dmac_peri 15>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi4: spi@ff1f0000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff1f0000 0x0 0x1000>; + clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + dmas = <&dmac_peri 18>, <&dmac_peri 19>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi5: spi@ff200000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff200000 0x0 0x1000>; + clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + dmas = <&dmac_bus 8>, <&dmac_bus 9>; + dma-names = "tx", "rx"; + pinctrl-names = "default"; + pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + thermal_zones: thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <100>; + polling-delay = <1000>; + + thermal-sensors = <&tsadc 0>; + + trips { + cpu_alert0: cpu_alert0 { + temperature = <70000>; + hysteresis = <2000>; + type = "passive"; + }; + cpu_alert1: cpu_alert1 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; + }; + cpu_crit: cpu_crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_alert0>; + cooling-device = + <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu_alert1>; + cooling-device = + <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_l1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_l2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_l3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + + gpu_thermal: gpu-thermal { + polling-delay-passive = <100>; + polling-delay = <1000>; + + thermal-sensors = <&tsadc 1>; + + trips { + gpu_alert0: gpu_alert0 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; + }; + gpu_crit: gpu_crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&gpu_alert0>; + cooling-device = + <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + + tsadc: tsadc@ff260000 { + compatible = "rockchip,rk3399-tsadc"; + reg = <0x0 0xff260000 0x0 0x100>; + interrupts = ; + assigned-clocks = <&cru SCLK_TSADC>; + assigned-clock-rates = <750000>; + clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; + clock-names = "tsadc", "apb_pclk"; + resets = <&cru SRST_TSADC>; + reset-names = "tsadc-apb"; + rockchip,grf = <&grf>; + rockchip,hw-tshut-temp = <95000>; + pinctrl-names = "init", "default", "sleep"; + pinctrl-0 = <&otp_pin>; + pinctrl-1 = <&otp_out>; + pinctrl-2 = <&otp_pin>; + #thermal-sensor-cells = <1>; + status = "disabled"; + }; + + qos_emmc: qos@ffa58000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa58000 0x0 0x20>; + }; + + qos_gmac: qos@ffa5c000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa5c000 0x0 0x20>; + }; + + qos_pcie: qos@ffa60080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa60080 0x0 0x20>; + }; + + qos_usb_host0: qos@ffa60100 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa60100 0x0 0x20>; + }; + + qos_usb_host1: qos@ffa60180 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa60180 0x0 0x20>; + }; + + qos_usb_otg0: qos@ffa70000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa70000 0x0 0x20>; + }; + + qos_usb_otg1: qos@ffa70080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa70080 0x0 0x20>; + }; + + qos_sd: qos@ffa74000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa74000 0x0 0x20>; + }; + + qos_sdioaudio: qos@ffa76000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa76000 0x0 0x20>; + }; + + qos_hdcp: qos@ffa90000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa90000 0x0 0x20>; + }; + + qos_iep: qos@ffa98000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffa98000 0x0 0x20>; + }; + + qos_isp0_m0: qos@ffaa0000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffaa0000 0x0 0x20>; + }; + + qos_isp0_m1: qos@ffaa0080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffaa0080 0x0 0x20>; + }; + + qos_isp1_m0: qos@ffaa8000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffaa8000 0x0 0x20>; + }; + + qos_isp1_m1: qos@ffaa8080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffaa8080 0x0 0x20>; + }; + + qos_rga_r: qos@ffab0000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffab0000 0x0 0x20>; + }; + + qos_rga_w: qos@ffab0080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffab0080 0x0 0x20>; + }; + + qos_video_m0: qos@ffab8000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffab8000 0x0 0x20>; + }; + + qos_video_m1_r: qos@ffac0000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffac0000 0x0 0x20>; + }; + + qos_video_m1_w: qos@ffac0080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffac0080 0x0 0x20>; + }; + + qos_vop_big_r: qos@ffac8000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffac8000 0x0 0x20>; + }; + + qos_vop_big_w: qos@ffac8080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffac8080 0x0 0x20>; + }; + + qos_vop_little: qos@ffad0000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffad0000 0x0 0x20>; + }; + + qos_perihp: qos@ffad8080 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffad8080 0x0 0x20>; + }; + + qos_gpu: qos@ffae0000 { + compatible = "rockchip,rk3399-qos", "syscon"; + reg = <0x0 0xffae0000 0x0 0x20>; + }; + + pmu: power-management@ff310000 { + compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd"; + reg = <0x0 0xff310000 0x0 0x1000>; + + /* + * Note: RK3399 supports 6 voltage domains including VD_CORE_L, + * VD_CORE_B, VD_CENTER, VD_GPU, VD_LOGIC and VD_PMU. + * Some of the power domains are grouped together for every + * voltage domain. + * The detail contents as below. + */ + power: power-controller { + compatible = "rockchip,rk3399-power-controller"; + #power-domain-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + + /* These power domains are grouped by VD_CENTER */ + power-domain@RK3399_PD_IEP { + reg = ; + clocks = <&cru ACLK_IEP>, + <&cru HCLK_IEP>; + pm_qos = <&qos_iep>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_RGA { + reg = ; + clocks = <&cru ACLK_RGA>, + <&cru HCLK_RGA>; + pm_qos = <&qos_rga_r>, + <&qos_rga_w>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_VCODEC { + reg = ; + clocks = <&cru ACLK_VCODEC>, + <&cru HCLK_VCODEC>; + pm_qos = <&qos_video_m0>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_VDU { + reg = ; + clocks = <&cru ACLK_VDU>, + <&cru HCLK_VDU>, + <&cru SCLK_VDU_CA>, + <&cru SCLK_VDU_CORE>; + pm_qos = <&qos_video_m1_r>, + <&qos_video_m1_w>; + #power-domain-cells = <0>; + }; + + /* These power domains are grouped by VD_GPU */ + power-domain@RK3399_PD_GPU { + reg = ; + clocks = <&cru ACLK_GPU>; + pm_qos = <&qos_gpu>; + #power-domain-cells = <0>; + }; + + /* These power domains are grouped by VD_LOGIC */ + power-domain@RK3399_PD_EDP { + reg = ; + clocks = <&cru PCLK_EDP_CTRL>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_EMMC { + reg = ; + clocks = <&cru ACLK_EMMC>; + pm_qos = <&qos_emmc>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_GMAC { + reg = ; + clocks = <&cru ACLK_GMAC>, + <&cru PCLK_GMAC>; + pm_qos = <&qos_gmac>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_SD { + reg = ; + clocks = <&cru HCLK_SDMMC>, + <&cru SCLK_SDMMC>; + pm_qos = <&qos_sd>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_SDIOAUDIO { + reg = ; + clocks = <&cru HCLK_SDIO>; + pm_qos = <&qos_sdioaudio>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_TCPD0 { + reg = ; + clocks = <&cru SCLK_UPHY0_TCPDCORE>, + <&cru SCLK_UPHY0_TCPDPHY_REF>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_TCPD1 { + reg = ; + clocks = <&cru SCLK_UPHY1_TCPDCORE>, + <&cru SCLK_UPHY1_TCPDPHY_REF>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_USB3 { + reg = ; + clocks = <&cru ACLK_USB3>; + pm_qos = <&qos_usb_otg0>, + <&qos_usb_otg1>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_VIO { + reg = ; + #power-domain-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + + power-domain@RK3399_PD_HDCP { + reg = ; + clocks = <&cru ACLK_HDCP>, + <&cru HCLK_HDCP>, + <&cru PCLK_HDCP>; + pm_qos = <&qos_hdcp>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_ISP0 { + reg = ; + clocks = <&cru ACLK_ISP0>, + <&cru HCLK_ISP0>; + pm_qos = <&qos_isp0_m0>, + <&qos_isp0_m1>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_ISP1 { + reg = ; + clocks = <&cru ACLK_ISP1>, + <&cru HCLK_ISP1>; + pm_qos = <&qos_isp1_m0>, + <&qos_isp1_m1>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_VO { + reg = ; + #power-domain-cells = <1>; + #address-cells = <1>; + #size-cells = <0>; + + power-domain@RK3399_PD_VOPB { + reg = ; + clocks = <&cru ACLK_VOP0>, + <&cru HCLK_VOP0>; + pm_qos = <&qos_vop_big_r>, + <&qos_vop_big_w>; + #power-domain-cells = <0>; + }; + power-domain@RK3399_PD_VOPL { + reg = ; + clocks = <&cru ACLK_VOP1>, + <&cru HCLK_VOP1>; + pm_qos = <&qos_vop_little>; + #power-domain-cells = <0>; + }; + }; + }; + }; + }; + + pmugrf: syscon@ff320000 { + compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd"; + reg = <0x0 0xff320000 0x0 0x1000>; + + pmu_io_domains: io-domains { + compatible = "rockchip,rk3399-pmu-io-voltage-domain"; + status = "disabled"; + }; + }; + + spi3: spi@ff350000 { + compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; + reg = <0x0 0xff350000 0x0 0x1000>; + clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>; + clock-names = "spiclk", "apb_pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + uart4: serial@ff370000 { + compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; + reg = <0x0 0xff370000 0x0 0x100>; + clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>; + clock-names = "baudclk", "apb_pclk"; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&uart4_xfer>; + status = "disabled"; + }; + + i2c0: i2c@ff3c0000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff3c0000 0x0 0x1000>; + assigned-clocks = <&pmucru SCLK_I2C0_PMU>; + assigned-clock-rates = <200000000>; + clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c4: i2c@ff3d0000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff3d0000 0x0 0x1000>; + assigned-clocks = <&pmucru SCLK_I2C4_PMU>; + assigned-clock-rates = <200000000>; + clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c8: i2c@ff3e0000 { + compatible = "rockchip,rk3399-i2c"; + reg = <0x0 0xff3e0000 0x0 0x1000>; + assigned-clocks = <&pmucru SCLK_I2C8_PMU>; + assigned-clock-rates = <200000000>; + clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>; + clock-names = "i2c", "pclk"; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&i2c8_xfer>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + pwm0: pwm@ff420000 { + compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; + reg = <0x0 0xff420000 0x0 0x10>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_pin>; + clocks = <&pmucru PCLK_RKPWM_PMU>; + status = "disabled"; + }; + + pwm1: pwm@ff420010 { + compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; + reg = <0x0 0xff420010 0x0 0x10>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1_pin>; + clocks = <&pmucru PCLK_RKPWM_PMU>; + status = "disabled"; + }; + + pwm2: pwm@ff420020 { + compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; + reg = <0x0 0xff420020 0x0 0x10>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm2_pin>; + clocks = <&pmucru PCLK_RKPWM_PMU>; + status = "disabled"; + }; + + pwm3: pwm@ff420030 { + compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; + reg = <0x0 0xff420030 0x0 0x10>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm3a_pin>; + clocks = <&pmucru PCLK_RKPWM_PMU>; + status = "disabled"; + }; + + dfi: dfi@ff630000 { + reg = <0x00 0xff630000 0x00 0x4000>; + compatible = "rockchip,rk3399-dfi"; + rockchip,pmu = <&pmugrf>; + interrupts = ; + clocks = <&cru PCLK_DDR_MON>; + clock-names = "pclk_ddr_mon"; + }; + + vpu: video-codec@ff650000 { + compatible = "rockchip,rk3399-vpu"; + reg = <0x0 0xff650000 0x0 0x800>; + interrupts = , + ; + interrupt-names = "vepu", "vdpu"; + clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; + clock-names = "aclk", "hclk"; + iommus = <&vpu_mmu>; + power-domains = <&power RK3399_PD_VCODEC>; + }; + + vpu_mmu: iommu@ff650800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff650800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; + clock-names = "aclk", "iface"; + #iommu-cells = <0>; + power-domains = <&power RK3399_PD_VCODEC>; + }; + + vdec: video-codec@ff660000 { + compatible = "rockchip,rk3399-vdec"; + reg = <0x0 0xff660000 0x0 0x480>; + interrupts = ; + clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>, + <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>; + clock-names = "axi", "ahb", "cabac", "core"; + iommus = <&vdec_mmu>; + power-domains = <&power RK3399_PD_VDU>; + }; + + vdec_mmu: iommu@ff660480 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3399_PD_VDU>; + #iommu-cells = <0>; + }; + + iep_mmu: iommu@ff670800 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff670800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_IEP>, <&cru HCLK_IEP>; + clock-names = "aclk", "iface"; + #iommu-cells = <0>; + status = "disabled"; + }; + + rga: rga@ff680000 { + compatible = "rockchip,rk3399-rga"; + reg = <0x0 0xff680000 0x0 0x10000>; + interrupts = ; + clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA_CORE>; + clock-names = "aclk", "hclk", "sclk"; + resets = <&cru SRST_RGA_CORE>, <&cru SRST_A_RGA>, <&cru SRST_H_RGA>; + reset-names = "core", "axi", "ahb"; + power-domains = <&power RK3399_PD_RGA>; + }; + + efuse0: efuse@ff690000 { + compatible = "rockchip,rk3399-efuse"; + reg = <0x0 0xff690000 0x0 0x80>; + #address-cells = <1>; + #size-cells = <1>; + clocks = <&cru PCLK_EFUSE1024NS>; + clock-names = "pclk_efuse"; + + /* Data cells */ + cpu_id: cpu-id@7 { + reg = <0x07 0x10>; + }; + cpub_leakage: cpu-leakage@17 { + reg = <0x17 0x1>; + }; + gpu_leakage: gpu-leakage@18 { + reg = <0x18 0x1>; + }; + center_leakage: center-leakage@19 { + reg = <0x19 0x1>; + }; + cpul_leakage: cpu-leakage@1a { + reg = <0x1a 0x1>; + }; + logic_leakage: logic-leakage@1b { + reg = <0x1b 0x1>; + }; + wafer_info: wafer-info@1c { + reg = <0x1c 0x1>; + }; + }; + + dmac_bus: dma-controller@ff6d0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x0 0xff6d0000 0x0 0x4000>; + interrupts = , + ; + #dma-cells = <1>; + arm,pl330-periph-burst; + clocks = <&cru ACLK_DMAC0_PERILP>; + clock-names = "apb_pclk"; + }; + + dmac_peri: dma-controller@ff6e0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x0 0xff6e0000 0x0 0x4000>; + interrupts = , + ; + #dma-cells = <1>; + arm,pl330-periph-burst; + clocks = <&cru ACLK_DMAC1_PERILP>; + clock-names = "apb_pclk"; + }; + + pmucru: clock-controller@ff750000 { + compatible = "rockchip,rk3399-pmucru"; + reg = <0x0 0xff750000 0x0 0x1000>; + clocks = <&xin24m>; + clock-names = "xin24m"; + rockchip,grf = <&pmugrf>; + #clock-cells = <1>; + #reset-cells = <1>; + assigned-clocks = <&pmucru PLL_PPLL>; + assigned-clock-rates = <676000000>; + }; + + cru: clock-controller@ff760000 { + compatible = "rockchip,rk3399-cru"; + reg = <0x0 0xff760000 0x0 0x1000>; + clocks = <&xin24m>; + clock-names = "xin24m"; + rockchip,grf = <&grf>; + #clock-cells = <1>; + #reset-cells = <1>; + assigned-clocks = + <&cru PLL_GPLL>, <&cru PLL_CPLL>, + <&cru PLL_NPLL>, + <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>, + <&cru PCLK_PERIHP>, + <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>, + <&cru PCLK_PERILP0>, <&cru ACLK_CCI>, + <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>, + <&cru ACLK_VIO>, <&cru ACLK_HDCP>, + <&cru ACLK_GIC_PRE>, + <&cru PCLK_DDR>, + <&cru ACLK_VDU>; + assigned-clock-rates = + <594000000>, <800000000>, + <1000000000>, + <150000000>, <75000000>, + <37500000>, + <100000000>, <100000000>, + <50000000>, <600000000>, + <100000000>, <50000000>, + <400000000>, <400000000>, + <200000000>, + <200000000>, + <400000000>; + }; + + grf: syscon@ff770000 { + compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd"; + reg = <0x0 0xff770000 0x0 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + + io_domains: io-domains { + compatible = "rockchip,rk3399-io-voltage-domain"; + status = "disabled"; + }; + + mipi_dphy_rx0: mipi-dphy-rx0 { + compatible = "rockchip,rk3399-mipi-dphy-rx0"; + clocks = <&cru SCLK_MIPIDPHY_REF>, + <&cru SCLK_DPHY_RX0_CFG>, + <&cru PCLK_VIO_GRF>; + clock-names = "dphy-ref", "dphy-cfg", "grf"; + power-domains = <&power RK3399_PD_VIO>; + #phy-cells = <0>; + status = "disabled"; + }; + + u2phy0: usb2phy@e450 { + compatible = "rockchip,rk3399-usb2phy"; + reg = <0xe450 0x10>; + clocks = <&cru SCLK_USB2PHY0_REF>; + clock-names = "phyclk"; + #clock-cells = <0>; + clock-output-names = "clk_usbphy0_480m"; + status = "disabled"; + + u2phy0_host: host-port { + #phy-cells = <0>; + interrupts = ; + interrupt-names = "linestate"; + status = "disabled"; + }; + + u2phy0_otg: otg-port { + #phy-cells = <0>; + interrupts = , + , + ; + interrupt-names = "otg-bvalid", "otg-id", + "linestate"; + status = "disabled"; + }; + }; + + u2phy1: usb2phy@e460 { + compatible = "rockchip,rk3399-usb2phy"; + reg = <0xe460 0x10>; + clocks = <&cru SCLK_USB2PHY1_REF>; + clock-names = "phyclk"; + #clock-cells = <0>; + clock-output-names = "clk_usbphy1_480m"; + status = "disabled"; + + u2phy1_host: host-port { + #phy-cells = <0>; + interrupts = ; + interrupt-names = "linestate"; + status = "disabled"; + }; + + u2phy1_otg: otg-port { + #phy-cells = <0>; + interrupts = , + , + ; + interrupt-names = "otg-bvalid", "otg-id", + "linestate"; + status = "disabled"; + }; + }; + + emmc_phy: phy@f780 { + compatible = "rockchip,rk3399-emmc-phy"; + reg = <0xf780 0x24>; + clocks = <&sdhci>; + clock-names = "emmcclk"; + drive-impedance-ohm = <50>; + #phy-cells = <0>; + status = "disabled"; + }; + + pcie_phy: pcie-phy { + compatible = "rockchip,rk3399-pcie-phy"; + clocks = <&cru SCLK_PCIEPHY_REF>; + clock-names = "refclk"; + #phy-cells = <1>; + resets = <&cru SRST_PCIEPHY>; + reset-names = "phy"; + status = "disabled"; + }; + }; + + tcphy0: phy@ff7c0000 { + compatible = "rockchip,rk3399-typec-phy"; + reg = <0x0 0xff7c0000 0x0 0x40000>; + clocks = <&cru SCLK_UPHY0_TCPDCORE>, + <&cru SCLK_UPHY0_TCPDPHY_REF>; + clock-names = "tcpdcore", "tcpdphy-ref"; + assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>; + assigned-clock-rates = <50000000>; + power-domains = <&power RK3399_PD_TCPD0>; + resets = <&cru SRST_UPHY0>, + <&cru SRST_UPHY0_PIPE_L00>, + <&cru SRST_P_UPHY0_TCPHY>; + reset-names = "uphy", "uphy-pipe", "uphy-tcphy"; + rockchip,grf = <&grf>; + status = "disabled"; + + tcphy0_dp: dp-port { + #phy-cells = <0>; + }; + + tcphy0_usb3: usb3-port { + #phy-cells = <0>; + }; + }; + + tcphy1: phy@ff800000 { + compatible = "rockchip,rk3399-typec-phy"; + reg = <0x0 0xff800000 0x0 0x40000>; + clocks = <&cru SCLK_UPHY1_TCPDCORE>, + <&cru SCLK_UPHY1_TCPDPHY_REF>; + clock-names = "tcpdcore", "tcpdphy-ref"; + assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>; + assigned-clock-rates = <50000000>; + power-domains = <&power RK3399_PD_TCPD1>; + resets = <&cru SRST_UPHY1>, + <&cru SRST_UPHY1_PIPE_L00>, + <&cru SRST_P_UPHY1_TCPHY>; + reset-names = "uphy", "uphy-pipe", "uphy-tcphy"; + rockchip,grf = <&grf>; + status = "disabled"; + + tcphy1_dp: dp-port { + #phy-cells = <0>; + }; + + tcphy1_usb3: usb3-port { + #phy-cells = <0>; + }; + }; + + watchdog@ff848000 { + compatible = "rockchip,rk3399-wdt", "snps,dw-wdt"; + reg = <0x0 0xff848000 0x0 0x100>; + clocks = <&cru PCLK_WDT>; + interrupts = ; + }; + + rktimer: rktimer@ff850000 { + compatible = "rockchip,rk3399-timer"; + reg = <0x0 0xff850000 0x0 0x1000>; + interrupts = ; + clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>; + clock-names = "pclk", "timer"; + }; + + spdif: spdif@ff870000 { + compatible = "rockchip,rk3399-spdif"; + reg = <0x0 0xff870000 0x0 0x1000>; + interrupts = ; + dmas = <&dmac_bus 7>; + dma-names = "tx"; + clock-names = "mclk", "hclk"; + clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>; + pinctrl-names = "default"; + pinctrl-0 = <&spdif_bus>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + + i2s0: i2s@ff880000 { + compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff880000 0x0 0x1000>; + rockchip,grf = <&grf>; + interrupts = ; + dmas = <&dmac_bus 0>, <&dmac_bus 1>; + dma-names = "tx", "rx"; + clock-names = "i2s_clk", "i2s_hclk"; + clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>; + pinctrl-names = "bclk_on", "bclk_off"; + pinctrl-0 = <&i2s0_8ch_bus>; + pinctrl-1 = <&i2s0_8ch_bus_bclk_off>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + + i2s1: i2s@ff890000 { + compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff890000 0x0 0x1000>; + interrupts = ; + dmas = <&dmac_bus 2>, <&dmac_bus 3>; + dma-names = "tx", "rx"; + clock-names = "i2s_clk", "i2s_hclk"; + clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>; + pinctrl-names = "default"; + pinctrl-0 = <&i2s1_2ch_bus>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + + i2s2: i2s@ff8a0000 { + compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff8a0000 0x0 0x1000>; + interrupts = ; + dmas = <&dmac_bus 4>, <&dmac_bus 5>; + dma-names = "tx", "rx"; + clock-names = "i2s_clk", "i2s_hclk"; + clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>; + power-domains = <&power RK3399_PD_SDIOAUDIO>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + + vopl: vop@ff8f0000 { + compatible = "rockchip,rk3399-vop-lit"; + reg = <0x0 0xff8f0000 0x0 0x2000>, <0x0 0xff8f2000 0x0 0x400>; + interrupts = ; + assigned-clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; + assigned-clock-rates = <400000000>, <100000000>; + clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + iommus = <&vopl_mmu>; + power-domains = <&power RK3399_PD_VOPL>; + resets = <&cru SRST_A_VOP1>, <&cru SRST_H_VOP1>, <&cru SRST_D_VOP1>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vopl_out: port { + #address-cells = <1>; + #size-cells = <0>; + + vopl_out_mipi: endpoint@0 { + reg = <0>; + remote-endpoint = <&mipi_in_vopl>; + }; + + vopl_out_edp: endpoint@1 { + reg = <1>; + remote-endpoint = <&edp_in_vopl>; + }; + + vopl_out_hdmi: endpoint@2 { + reg = <2>; + remote-endpoint = <&hdmi_in_vopl>; + }; + + vopl_out_mipi1: endpoint@3 { + reg = <3>; + remote-endpoint = <&mipi1_in_vopl>; + }; + + vopl_out_dp: endpoint@4 { + reg = <4>; + remote-endpoint = <&dp_in_vopl>; + }; + }; + }; + + vopl_mmu: iommu@ff8f3f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff8f3f00 0x0 0x100>; + interrupts = ; + clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3399_PD_VOPL>; + #iommu-cells = <0>; + status = "disabled"; + }; + + vopb: vop@ff900000 { + compatible = "rockchip,rk3399-vop-big"; + reg = <0x0 0xff900000 0x0 0x2000>, <0x0 0xff902000 0x0 0x1000>; + interrupts = ; + assigned-clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; + assigned-clock-rates = <400000000>, <100000000>; + clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + iommus = <&vopb_mmu>; + power-domains = <&power RK3399_PD_VOPB>; + resets = <&cru SRST_A_VOP0>, <&cru SRST_H_VOP0>, <&cru SRST_D_VOP0>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vopb_out: port { + #address-cells = <1>; + #size-cells = <0>; + + vopb_out_edp: endpoint@0 { + reg = <0>; + remote-endpoint = <&edp_in_vopb>; + }; + + vopb_out_mipi: endpoint@1 { + reg = <1>; + remote-endpoint = <&mipi_in_vopb>; + }; + + vopb_out_hdmi: endpoint@2 { + reg = <2>; + remote-endpoint = <&hdmi_in_vopb>; + }; + + vopb_out_mipi1: endpoint@3 { + reg = <3>; + remote-endpoint = <&mipi1_in_vopb>; + }; + + vopb_out_dp: endpoint@4 { + reg = <4>; + remote-endpoint = <&dp_in_vopb>; + }; + }; + }; + + vopb_mmu: iommu@ff903f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff903f00 0x0 0x100>; + interrupts = ; + clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3399_PD_VOPB>; + #iommu-cells = <0>; + status = "disabled"; + }; + + isp0: isp0@ff910000 { + compatible = "rockchip,rk3399-cif-isp"; + reg = <0x0 0xff910000 0x0 0x4000>; + interrupts = ; + clocks = <&cru SCLK_ISP0>, + <&cru ACLK_ISP0_WRAPPER>, + <&cru HCLK_ISP0_WRAPPER>; + clock-names = "isp", "aclk", "hclk"; + iommus = <&isp0_mmu>; + phys = <&mipi_dphy_rx0>; + phy-names = "dphy"; + power-domains = <&power RK3399_PD_ISP0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + }; + + isp0_mmu: iommu@ff914000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>; + interrupts = ; + clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>; + clock-names = "aclk", "iface"; + #iommu-cells = <0>; + power-domains = <&power RK3399_PD_ISP0>; + rockchip,disable-mmu-reset; + }; + + isp1: isp1@ff920000 { + compatible = "rockchip,rk3399-cif-isp"; + reg = <0x0 0xff920000 0x0 0x4000>; + interrupts = ; + clocks = <&cru SCLK_ISP1>, + <&cru ACLK_ISP1_WRAPPER>, + <&cru HCLK_ISP1_WRAPPER>; + clock-names = "isp", "aclk", "hclk"; + iommus = <&isp1_mmu>; + phys = <&mipi_dsi1>; + phy-names = "dphy"; + power-domains = <&power RK3399_PD_ISP1>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + }; + + isp1_mmu: iommu@ff924000 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>; + interrupts = ; + clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>; + clock-names = "aclk", "iface"; + #iommu-cells = <0>; + power-domains = <&power RK3399_PD_ISP1>; + rockchip,disable-mmu-reset; + }; + + hdmi_sound: hdmi-sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,mclk-fs = <256>; + simple-audio-card,name = "hdmi-sound"; + status = "disabled"; + + simple-audio-card,cpu { + sound-dai = <&i2s2>; + }; + simple-audio-card,codec { + sound-dai = <&hdmi>; + }; + }; + + hdmi: hdmi@ff940000 { + compatible = "rockchip,rk3399-dw-hdmi"; + reg = <0x0 0xff940000 0x0 0x20000>; + reg-io-width = <4>; + interrupts = ; + clocks = <&cru PCLK_HDMI_CTRL>, + <&cru SCLK_HDMI_SFR>, + <&cru SCLK_HDMI_CEC>, + <&cru PCLK_VIO_GRF>, + <&cru PLL_VPLL>; + clock-names = "iahb", "isfr", "cec", "grf", "ref"; + power-domains = <&power RK3399_PD_HDCP>; + rockchip,grf = <&grf>; + #sound-dai-cells = <0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + hdmi_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + hdmi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_hdmi>; + }; + hdmi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_hdmi>; + }; + }; + + hdmi_out: port@1 { + reg = <1>; + }; + }; + }; + + mipi_dsi: dsi@ff960000 { + compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; + reg = <0x0 0xff960000 0x0 0x8000>; + interrupts = ; + clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>, + <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>; + clock-names = "ref", "pclk", "phy_cfg", "grf"; + power-domains = <&power RK3399_PD_VIO>; + resets = <&cru SRST_P_MIPI_DSI0>; + reset-names = "apb"; + rockchip,grf = <&grf>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + mipi_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + mipi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_mipi>; + }; + + mipi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_mipi>; + }; + }; + + mipi_out: port@1 { + reg = <1>; + }; + }; + }; + + mipi_dsi1: dsi@ff968000 { + compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; + reg = <0x0 0xff968000 0x0 0x8000>; + interrupts = ; + clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI1>, + <&cru SCLK_DPHY_TX1RX1_CFG>, <&cru PCLK_VIO_GRF>; + clock-names = "ref", "pclk", "phy_cfg", "grf"; + power-domains = <&power RK3399_PD_VIO>; + resets = <&cru SRST_P_MIPI_DSI1>; + reset-names = "apb"; + rockchip,grf = <&grf>; + #address-cells = <1>; + #size-cells = <0>; + #phy-cells = <0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + mipi1_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + mipi1_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_mipi1>; + }; + + mipi1_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_mipi1>; + }; + }; + + mipi1_out: port@1 { + reg = <1>; + }; + }; + }; + + edp: dp@ff970000 { + compatible = "rockchip,rk3399-edp"; + reg = <0x0 0xff970000 0x0 0x8000>; + interrupts = ; + clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>; + clock-names = "dp", "pclk", "grf"; + pinctrl-names = "default"; + pinctrl-0 = <&edp_hpd>; + power-domains = <&power RK3399_PD_EDP>; + resets = <&cru SRST_P_EDP_CTRL>; + reset-names = "dp"; + rockchip,grf = <&grf>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + edp_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + edp_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_edp>; + }; + + edp_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_edp>; + }; + }; + + edp_out: port@1 { + reg = <1>; + }; + }; + }; + + gpu: gpu@ff9a0000 { + compatible = "rockchip,rk3399-mali", "arm,mali-t860"; + reg = <0x0 0xff9a0000 0x0 0x10000>; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; + clocks = <&cru ACLK_GPU>; + #cooling-cells = <2>; + dynamic-power-coefficient = <2640>; + power-domains = <&power RK3399_PD_GPU>; + status = "disabled"; + }; + + pinctrl: pinctrl { + compatible = "rockchip,rk3399-pinctrl"; + rockchip,grf = <&grf>; + rockchip,pmu = <&pmugrf>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + gpio0: gpio@ff720000 { + compatible = "rockchip,gpio-bank"; + reg = <0x0 0xff720000 0x0 0x100>; + clocks = <&pmucru PCLK_GPIO0_PMU>; + interrupts = ; + + gpio-controller; + #gpio-cells = <0x2>; + + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + gpio1: gpio@ff730000 { + compatible = "rockchip,gpio-bank"; + reg = <0x0 0xff730000 0x0 0x100>; + clocks = <&pmucru PCLK_GPIO1_PMU>; + interrupts = ; + + gpio-controller; + #gpio-cells = <0x2>; + + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + gpio2: gpio@ff780000 { + compatible = "rockchip,gpio-bank"; + reg = <0x0 0xff780000 0x0 0x100>; + clocks = <&cru PCLK_GPIO2>; + interrupts = ; + + gpio-controller; + #gpio-cells = <0x2>; + + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + gpio3: gpio@ff788000 { + compatible = "rockchip,gpio-bank"; + reg = <0x0 0xff788000 0x0 0x100>; + clocks = <&cru PCLK_GPIO3>; + interrupts = ; + + gpio-controller; + #gpio-cells = <0x2>; + + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + gpio4: gpio@ff790000 { + compatible = "rockchip,gpio-bank"; + reg = <0x0 0xff790000 0x0 0x100>; + clocks = <&cru PCLK_GPIO4>; + interrupts = ; + + gpio-controller; + #gpio-cells = <0x2>; + + interrupt-controller; + #interrupt-cells = <0x2>; + }; + + pcfg_pull_up: pcfg-pull-up { + bias-pull-up; + }; + + pcfg_pull_down: pcfg-pull-down { + bias-pull-down; + }; + + pcfg_pull_none: pcfg-pull-none { + bias-disable; + }; + + pcfg_pull_none_12ma: pcfg-pull-none-12ma { + bias-disable; + drive-strength = <12>; + }; + + pcfg_pull_none_13ma: pcfg-pull-none-13ma { + bias-disable; + drive-strength = <13>; + }; + + pcfg_pull_none_18ma: pcfg-pull-none-18ma { + bias-disable; + drive-strength = <18>; + }; + + pcfg_pull_none_20ma: pcfg-pull-none-20ma { + bias-disable; + drive-strength = <20>; + }; + + pcfg_pull_up_2ma: pcfg-pull-up-2ma { + bias-pull-up; + drive-strength = <2>; + }; + + pcfg_pull_up_8ma: pcfg-pull-up-8ma { + bias-pull-up; + drive-strength = <8>; + }; + + pcfg_pull_up_18ma: pcfg-pull-up-18ma { + bias-pull-up; + drive-strength = <18>; + }; + + pcfg_pull_up_20ma: pcfg-pull-up-20ma { + bias-pull-up; + drive-strength = <20>; + }; + + pcfg_pull_down_4ma: pcfg-pull-down-4ma { + bias-pull-down; + drive-strength = <4>; + }; + + pcfg_pull_down_8ma: pcfg-pull-down-8ma { + bias-pull-down; + drive-strength = <8>; + }; + + pcfg_pull_down_12ma: pcfg-pull-down-12ma { + bias-pull-down; + drive-strength = <12>; + }; + + pcfg_pull_down_18ma: pcfg-pull-down-18ma { + bias-pull-down; + drive-strength = <18>; + }; + + pcfg_pull_down_20ma: pcfg-pull-down-20ma { + bias-pull-down; + drive-strength = <20>; + }; + + pcfg_output_high: pcfg-output-high { + output-high; + }; + + pcfg_output_low: pcfg-output-low { + output-low; + }; + + pcfg_input_enable: pcfg-input-enable { + input-enable; + }; + + pcfg_input_pull_up: pcfg-input-pull-up { + input-enable; + bias-pull-up; + }; + + pcfg_input_pull_down: pcfg-input-pull-down { + input-enable; + bias-pull-down; + }; + + clock { + clk_32k: clk-32k { + rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>; + }; + }; + + cif { + cif_clkin: cif-clkin { + rockchip,pins = + <2 RK_PB2 3 &pcfg_pull_none>; + }; + + cif_clkouta: cif-clkouta { + rockchip,pins = + <2 RK_PB3 3 &pcfg_pull_none>; + }; + }; + + edp { + edp_hpd: edp-hpd { + rockchip,pins = + <4 RK_PC7 2 &pcfg_pull_none>; + }; + }; + + gmac { + rgmii_pins: rgmii-pins { + rockchip,pins = + /* mac_txclk */ + <3 RK_PC1 1 &pcfg_pull_none_13ma>, + /* mac_rxclk */ + <3 RK_PB6 1 &pcfg_pull_none>, + /* mac_mdio */ + <3 RK_PB5 1 &pcfg_pull_none>, + /* mac_txen */ + <3 RK_PB4 1 &pcfg_pull_none_13ma>, + /* mac_clk */ + <3 RK_PB3 1 &pcfg_pull_none>, + /* mac_rxdv */ + <3 RK_PB1 1 &pcfg_pull_none>, + /* mac_mdc */ + <3 RK_PB0 1 &pcfg_pull_none>, + /* mac_rxd1 */ + <3 RK_PA7 1 &pcfg_pull_none>, + /* mac_rxd0 */ + <3 RK_PA6 1 &pcfg_pull_none>, + /* mac_txd1 */ + <3 RK_PA5 1 &pcfg_pull_none_13ma>, + /* mac_txd0 */ + <3 RK_PA4 1 &pcfg_pull_none_13ma>, + /* mac_rxd3 */ + <3 RK_PA3 1 &pcfg_pull_none>, + /* mac_rxd2 */ + <3 RK_PA2 1 &pcfg_pull_none>, + /* mac_txd3 */ + <3 RK_PA1 1 &pcfg_pull_none_13ma>, + /* mac_txd2 */ + <3 RK_PA0 1 &pcfg_pull_none_13ma>; + }; + + rmii_pins: rmii-pins { + rockchip,pins = + /* mac_mdio */ + <3 RK_PB5 1 &pcfg_pull_none>, + /* mac_txen */ + <3 RK_PB4 1 &pcfg_pull_none_13ma>, + /* mac_clk */ + <3 RK_PB3 1 &pcfg_pull_none>, + /* mac_rxer */ + <3 RK_PB2 1 &pcfg_pull_none>, + /* mac_rxdv */ + <3 RK_PB1 1 &pcfg_pull_none>, + /* mac_mdc */ + <3 RK_PB0 1 &pcfg_pull_none>, + /* mac_rxd1 */ + <3 RK_PA7 1 &pcfg_pull_none>, + /* mac_rxd0 */ + <3 RK_PA6 1 &pcfg_pull_none>, + /* mac_txd1 */ + <3 RK_PA5 1 &pcfg_pull_none_13ma>, + /* mac_txd0 */ + <3 RK_PA4 1 &pcfg_pull_none_13ma>; + }; + }; + + i2c0 { + i2c0_xfer: i2c0-xfer { + rockchip,pins = + <1 RK_PB7 2 &pcfg_pull_none>, + <1 RK_PC0 2 &pcfg_pull_none>; + }; + }; + + i2c1 { + i2c1_xfer: i2c1-xfer { + rockchip,pins = + <4 RK_PA2 1 &pcfg_pull_none>, + <4 RK_PA1 1 &pcfg_pull_none>; + }; + }; + + i2c2 { + i2c2_xfer: i2c2-xfer { + rockchip,pins = + <2 RK_PA1 2 &pcfg_pull_none_12ma>, + <2 RK_PA0 2 &pcfg_pull_none_12ma>; + }; + }; + + i2c3 { + i2c3_xfer: i2c3-xfer { + rockchip,pins = + <4 RK_PC1 1 &pcfg_pull_none>, + <4 RK_PC0 1 &pcfg_pull_none>; + }; + }; + + i2c4 { + i2c4_xfer: i2c4-xfer { + rockchip,pins = + <1 RK_PB4 1 &pcfg_pull_none>, + <1 RK_PB3 1 &pcfg_pull_none>; + }; + }; + + i2c5 { + i2c5_xfer: i2c5-xfer { + rockchip,pins = + <3 RK_PB3 2 &pcfg_pull_none>, + <3 RK_PB2 2 &pcfg_pull_none>; + }; + }; + + i2c6 { + i2c6_xfer: i2c6-xfer { + rockchip,pins = + <2 RK_PB2 2 &pcfg_pull_none>, + <2 RK_PB1 2 &pcfg_pull_none>; + }; + }; + + i2c7 { + i2c7_xfer: i2c7-xfer { + rockchip,pins = + <2 RK_PB0 2 &pcfg_pull_none>, + <2 RK_PA7 2 &pcfg_pull_none>; + }; + }; + + i2c8 { + i2c8_xfer: i2c8-xfer { + rockchip,pins = + <1 RK_PC5 1 &pcfg_pull_none>, + <1 RK_PC4 1 &pcfg_pull_none>; + }; + }; + + i2s0 { + i2s0_2ch_bus: i2s0-2ch-bus { + rockchip,pins = + <3 RK_PD0 1 &pcfg_pull_none>, + <3 RK_PD1 1 &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>, + <4 RK_PA0 1 &pcfg_pull_none>; + }; + + i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off { + rockchip,pins = + <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, + <3 RK_PD1 1 &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>, + <4 RK_PA0 1 &pcfg_pull_none>; + }; + + i2s0_8ch_bus: i2s0-8ch-bus { + rockchip,pins = + <3 RK_PD0 1 &pcfg_pull_none>, + <3 RK_PD1 1 &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD4 1 &pcfg_pull_none>, + <3 RK_PD5 1 &pcfg_pull_none>, + <3 RK_PD6 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>, + <4 RK_PA0 1 &pcfg_pull_none>; + }; + + i2s0_8ch_bus_bclk_off: i2s0-8ch-bus-bclk-off { + rockchip,pins = + <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, + <3 RK_PD1 1 &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD4 1 &pcfg_pull_none>, + <3 RK_PD5 1 &pcfg_pull_none>, + <3 RK_PD6 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>, + <4 RK_PA0 1 &pcfg_pull_none>; + }; + }; + + i2s1 { + i2s1_2ch_bus: i2s1-2ch-bus { + rockchip,pins = + <4 RK_PA3 1 &pcfg_pull_none>, + <4 RK_PA4 1 &pcfg_pull_none>, + <4 RK_PA5 1 &pcfg_pull_none>, + <4 RK_PA6 1 &pcfg_pull_none>, + <4 RK_PA7 1 &pcfg_pull_none>; + }; + + i2s1_2ch_bus_bclk_off: i2s1-2ch-bus-bclk-off { + rockchip,pins = + <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>, + <4 RK_PA4 1 &pcfg_pull_none>, + <4 RK_PA5 1 &pcfg_pull_none>, + <4 RK_PA6 1 &pcfg_pull_none>, + <4 RK_PA7 1 &pcfg_pull_none>; + }; + }; + + sdio0 { + sdio0_bus1: sdio0-bus1 { + rockchip,pins = + <2 RK_PC4 1 &pcfg_pull_up>; + }; + + sdio0_bus4: sdio0-bus4 { + rockchip,pins = + <2 RK_PC4 1 &pcfg_pull_up>, + <2 RK_PC5 1 &pcfg_pull_up>, + <2 RK_PC6 1 &pcfg_pull_up>, + <2 RK_PC7 1 &pcfg_pull_up>; + }; + + sdio0_cmd: sdio0-cmd { + rockchip,pins = + <2 RK_PD0 1 &pcfg_pull_up>; + }; + + sdio0_clk: sdio0-clk { + rockchip,pins = + <2 RK_PD1 1 &pcfg_pull_none>; + }; + + sdio0_cd: sdio0-cd { + rockchip,pins = + <2 RK_PD2 1 &pcfg_pull_up>; + }; + + sdio0_pwr: sdio0-pwr { + rockchip,pins = + <2 RK_PD3 1 &pcfg_pull_up>; + }; + + sdio0_bkpwr: sdio0-bkpwr { + rockchip,pins = + <2 RK_PD4 1 &pcfg_pull_up>; + }; + + sdio0_wp: sdio0-wp { + rockchip,pins = + <0 RK_PA3 1 &pcfg_pull_up>; + }; + + sdio0_int: sdio0-int { + rockchip,pins = + <0 RK_PA4 1 &pcfg_pull_up>; + }; + }; + + sdmmc { + sdmmc_bus1: sdmmc-bus1 { + rockchip,pins = + <4 RK_PB0 1 &pcfg_pull_up>; + }; + + sdmmc_bus4: sdmmc-bus4 { + rockchip,pins = + <4 RK_PB0 1 &pcfg_pull_up>, + <4 RK_PB1 1 &pcfg_pull_up>, + <4 RK_PB2 1 &pcfg_pull_up>, + <4 RK_PB3 1 &pcfg_pull_up>; + }; + + sdmmc_clk: sdmmc-clk { + rockchip,pins = + <4 RK_PB4 1 &pcfg_pull_none>; + }; + + sdmmc_cmd: sdmmc-cmd { + rockchip,pins = + <4 RK_PB5 1 &pcfg_pull_up>; + }; + + sdmmc_cd: sdmmc-cd { + rockchip,pins = + <0 RK_PA7 1 &pcfg_pull_up>; + }; + + sdmmc_wp: sdmmc-wp { + rockchip,pins = + <0 RK_PB0 1 &pcfg_pull_up>; + }; + }; + + suspend { + ap_pwroff: ap-pwroff { + rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>; + }; + + ddrio_pwroff: ddrio-pwroff { + rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>; + }; + }; + + spdif { + spdif_bus: spdif-bus { + rockchip,pins = + <4 RK_PC5 1 &pcfg_pull_none>; + }; + + spdif_bus_1: spdif-bus-1 { + rockchip,pins = + <3 RK_PC0 3 &pcfg_pull_none>; + }; + }; + + spi0 { + spi0_clk: spi0-clk { + rockchip,pins = + <3 RK_PA6 2 &pcfg_pull_up>; + }; + spi0_cs0: spi0-cs0 { + rockchip,pins = + <3 RK_PA7 2 &pcfg_pull_up>; + }; + spi0_cs1: spi0-cs1 { + rockchip,pins = + <3 RK_PB0 2 &pcfg_pull_up>; + }; + spi0_tx: spi0-tx { + rockchip,pins = + <3 RK_PA5 2 &pcfg_pull_up>; + }; + spi0_rx: spi0-rx { + rockchip,pins = + <3 RK_PA4 2 &pcfg_pull_up>; + }; + }; + + spi1 { + spi1_clk: spi1-clk { + rockchip,pins = + <1 RK_PB1 2 &pcfg_pull_up>; + }; + spi1_cs0: spi1-cs0 { + rockchip,pins = + <1 RK_PB2 2 &pcfg_pull_up>; + }; + spi1_rx: spi1-rx { + rockchip,pins = + <1 RK_PA7 2 &pcfg_pull_up>; + }; + spi1_tx: spi1-tx { + rockchip,pins = + <1 RK_PB0 2 &pcfg_pull_up>; + }; + }; + + spi2 { + spi2_clk: spi2-clk { + rockchip,pins = + <2 RK_PB3 1 &pcfg_pull_up>; + }; + spi2_cs0: spi2-cs0 { + rockchip,pins = + <2 RK_PB4 1 &pcfg_pull_up>; + }; + spi2_rx: spi2-rx { + rockchip,pins = + <2 RK_PB1 1 &pcfg_pull_up>; + }; + spi2_tx: spi2-tx { + rockchip,pins = + <2 RK_PB2 1 &pcfg_pull_up>; + }; + }; + + spi3 { + spi3_clk: spi3-clk { + rockchip,pins = + <1 RK_PC1 1 &pcfg_pull_up>; + }; + spi3_cs0: spi3-cs0 { + rockchip,pins = + <1 RK_PC2 1 &pcfg_pull_up>; + }; + spi3_rx: spi3-rx { + rockchip,pins = + <1 RK_PB7 1 &pcfg_pull_up>; + }; + spi3_tx: spi3-tx { + rockchip,pins = + <1 RK_PC0 1 &pcfg_pull_up>; + }; + }; + + spi4 { + spi4_clk: spi4-clk { + rockchip,pins = + <3 RK_PA2 2 &pcfg_pull_up>; + }; + spi4_cs0: spi4-cs0 { + rockchip,pins = + <3 RK_PA3 2 &pcfg_pull_up>; + }; + spi4_rx: spi4-rx { + rockchip,pins = + <3 RK_PA0 2 &pcfg_pull_up>; + }; + spi4_tx: spi4-tx { + rockchip,pins = + <3 RK_PA1 2 &pcfg_pull_up>; + }; + }; + + spi5 { + spi5_clk: spi5-clk { + rockchip,pins = + <2 RK_PC6 2 &pcfg_pull_up>; + }; + spi5_cs0: spi5-cs0 { + rockchip,pins = + <2 RK_PC7 2 &pcfg_pull_up>; + }; + spi5_rx: spi5-rx { + rockchip,pins = + <2 RK_PC4 2 &pcfg_pull_up>; + }; + spi5_tx: spi5-tx { + rockchip,pins = + <2 RK_PC5 2 &pcfg_pull_up>; + }; + }; + + testclk { + test_clkout0: test-clkout0 { + rockchip,pins = + <0 RK_PA0 1 &pcfg_pull_none>; + }; + + test_clkout1: test-clkout1 { + rockchip,pins = + <2 RK_PD1 2 &pcfg_pull_none>; + }; + + test_clkout2: test-clkout2 { + rockchip,pins = + <0 RK_PB0 3 &pcfg_pull_none>; + }; + }; + + tsadc { + otp_pin: otp-pin { + rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + otp_out: otp-out { + rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>; + }; + }; + + uart0 { + uart0_xfer: uart0-xfer { + rockchip,pins = + <2 RK_PC0 1 &pcfg_pull_up>, + <2 RK_PC1 1 &pcfg_pull_none>; + }; + + uart0_cts: uart0-cts { + rockchip,pins = + <2 RK_PC2 1 &pcfg_pull_none>; + }; + + uart0_rts: uart0-rts { + rockchip,pins = + <2 RK_PC3 1 &pcfg_pull_none>; + }; + }; + + uart1 { + uart1_xfer: uart1-xfer { + rockchip,pins = + <3 RK_PB4 2 &pcfg_pull_up>, + <3 RK_PB5 2 &pcfg_pull_none>; + }; + }; + + uart2a { + uart2a_xfer: uart2a-xfer { + rockchip,pins = + <4 RK_PB0 2 &pcfg_pull_up>, + <4 RK_PB1 2 &pcfg_pull_none>; + }; + }; + + uart2b { + uart2b_xfer: uart2b-xfer { + rockchip,pins = + <4 RK_PC0 2 &pcfg_pull_up>, + <4 RK_PC1 2 &pcfg_pull_none>; + }; + }; + + uart2c { + uart2c_xfer: uart2c-xfer { + rockchip,pins = + <4 RK_PC3 1 &pcfg_pull_up>, + <4 RK_PC4 1 &pcfg_pull_none>; + }; + }; + + uart3 { + uart3_xfer: uart3-xfer { + rockchip,pins = + <3 RK_PB6 2 &pcfg_pull_up>, + <3 RK_PB7 2 &pcfg_pull_none>; + }; + + uart3_cts: uart3-cts { + rockchip,pins = + <3 RK_PC0 2 &pcfg_pull_none>; + }; + + uart3_rts: uart3-rts { + rockchip,pins = + <3 RK_PC1 2 &pcfg_pull_none>; + }; + }; + + uart4 { + uart4_xfer: uart4-xfer { + rockchip,pins = + <1 RK_PA7 1 &pcfg_pull_up>, + <1 RK_PB0 1 &pcfg_pull_none>; + }; + }; + + uarthdcp { + uarthdcp_xfer: uarthdcp-xfer { + rockchip,pins = + <4 RK_PC5 2 &pcfg_pull_up>, + <4 RK_PC6 2 &pcfg_pull_none>; + }; + }; + + pwm0 { + pwm0_pin: pwm0-pin { + rockchip,pins = + <4 RK_PC2 1 &pcfg_pull_none>; + }; + + pwm0_pin_pull_down: pwm0-pin-pull-down { + rockchip,pins = + <4 RK_PC2 1 &pcfg_pull_down>; + }; + + vop0_pwm_pin: vop0-pwm-pin { + rockchip,pins = + <4 RK_PC2 2 &pcfg_pull_none>; + }; + + vop1_pwm_pin: vop1-pwm-pin { + rockchip,pins = + <4 RK_PC2 3 &pcfg_pull_none>; + }; + }; + + pwm1 { + pwm1_pin: pwm1-pin { + rockchip,pins = + <4 RK_PC6 1 &pcfg_pull_none>; + }; + + pwm1_pin_pull_down: pwm1-pin-pull-down { + rockchip,pins = + <4 RK_PC6 1 &pcfg_pull_down>; + }; + }; + + pwm2 { + pwm2_pin: pwm2-pin { + rockchip,pins = + <1 RK_PC3 1 &pcfg_pull_none>; + }; + + pwm2_pin_pull_down: pwm2-pin-pull-down { + rockchip,pins = + <1 RK_PC3 1 &pcfg_pull_down>; + }; + }; + + pwm3a { + pwm3a_pin: pwm3a-pin { + rockchip,pins = + <0 RK_PA6 1 &pcfg_pull_none>; + }; + }; + + pwm3b { + pwm3b_pin: pwm3b-pin { + rockchip,pins = + <1 RK_PB6 1 &pcfg_pull_none>; + }; + }; + + hdmi { + hdmi_i2c_xfer: hdmi-i2c-xfer { + rockchip,pins = + <4 RK_PC1 3 &pcfg_pull_none>, + <4 RK_PC0 3 &pcfg_pull_none>; + }; + + hdmi_cec: hdmi-cec { + rockchip,pins = + <4 RK_PC7 1 &pcfg_pull_none>; + }; + }; + + pcie { + pcie_clkreqn_cpm: pci-clkreqn-cpm { + rockchip,pins = + <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + pcie_clkreqnb_cpm: pci-clkreqnb-cpm { + rockchip,pins = + <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts index 173da81fc23117..1489eb32e266a7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts @@ -8,7 +8,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "OPEN AI LAB EAIDK-610"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts index 55eca7a50a1f54..54e67d2dac092d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts @@ -5,7 +5,7 @@ /dts-v1/; #include -#include "rk3399.dtsi" +#include "rk3399-base.dtsi" / { model = "Rockchip RK3399 Evaluation Board"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index 260415d99aebf8..f4491317a1b032 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -9,7 +9,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Firefly-RK3399 Board"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 3cd63d1e8f15bb..776c0eec04d7f5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -6,8 +6,7 @@ */ #include -#include "rk3399.dtsi" -#include "rk3399-op1-opp.dtsi" +#include "rk3399-op1.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts index 4a6ab6c2e24cff..5a02502d21cd89 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts @@ -4,7 +4,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Hugsun X99 TV BOX"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi index 9d9297bc5f04a0..c772985ae4e51c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi @@ -9,7 +9,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts index 9586bb12a5d8f5..b0c1fb0b704e7e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts @@ -12,7 +12,6 @@ /dts-v1/; #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Kobol Helios64"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts index cb69e2145fa940..f12b1eb0057536 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts @@ -8,7 +8,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Leez RK3399 P710"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi index b7f1e47978a69e..7debc4a1b5faa5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi @@ -14,7 +14,6 @@ /dts-v1/; #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi deleted file mode 100644 index 783120e9cebeb4..00000000000000 --- a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi +++ /dev/null @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd - */ - -/ { - cluster0_opp: opp-table-0 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <800000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <825000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <850000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <900000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <975000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1100000>; - }; - opp06 { - opp-hz = /bits/ 64 <1512000000>; - opp-microvolt = <1150000>; - }; - }; - - cluster1_opp: opp-table-1 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <800000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <800000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <825000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <850000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <900000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <975000>; - }; - opp06 { - opp-hz = /bits/ 64 <1608000000>; - opp-microvolt = <1050000>; - }; - opp07 { - opp-hz = /bits/ 64 <1800000000>; - opp-microvolt = <1150000>; - }; - opp08 { - opp-hz = /bits/ 64 <2016000000>; - opp-microvolt = <1250000>; - }; - }; - - gpu_opp_table: opp-table-2 { - compatible = "operating-points-v2"; - - opp00 { - opp-hz = /bits/ 64 <200000000>; - opp-microvolt = <800000>; - }; - opp01 { - opp-hz = /bits/ 64 <297000000>; - opp-microvolt = <800000>; - }; - opp02 { - opp-hz = /bits/ 64 <400000000>; - opp-microvolt = <825000>; - }; - opp03 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <850000>; - }; - opp04 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <925000>; - }; - opp05 { - opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <1075000>; - }; - }; - - dmc_opp_table: opp-table-3 { - compatible = "operating-points-v2"; - - opp00 { - opp-hz = /bits/ 64 <400000000>; - opp-microvolt = <900000>; - }; - opp01 { - opp-hz = /bits/ 64 <666000000>; - opp-microvolt = <900000>; - }; - opp02 { - opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <900000>; - }; - opp03 { - opp-hz = /bits/ 64 <928000000>; - opp-microvolt = <925000>; - }; - }; -}; - -&cpu_l0 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l1 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l2 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l3 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_b0 { - operating-points-v2 = <&cluster1_opp>; -}; - -&cpu_b1 { - operating-points-v2 = <&cluster1_opp>; -}; - -&dmc { - operating-points-v2 = <&dmc_opp_table>; -}; - -&gpu { - operating-points-v2 = <&gpu_opp_table>; -}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi new file mode 100644 index 00000000000000..b24bff51151367 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-op1.dtsi @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd + */ + +#include "rk3399.dtsi" + +/ { + cluster0_opp: opp-table-0 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <800000>; + clock-latency-ns = <40000>; + }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <825000>; + }; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <850000>; + }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <900000>; + }; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <975000>; + }; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <1100000>; + }; + opp06 { + opp-hz = /bits/ 64 <1512000000>; + opp-microvolt = <1150000>; + }; + }; + + cluster1_opp: opp-table-1 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <800000>; + clock-latency-ns = <40000>; + }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <800000>; + }; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <825000>; + }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <850000>; + }; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <900000>; + }; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <975000>; + }; + opp06 { + opp-hz = /bits/ 64 <1608000000>; + opp-microvolt = <1050000>; + }; + opp07 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1150000>; + }; + opp08 { + opp-hz = /bits/ 64 <2016000000>; + opp-microvolt = <1250000>; + }; + }; + + gpu_opp_table: opp-table-2 { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <800000>; + }; + opp01 { + opp-hz = /bits/ 64 <297000000>; + opp-microvolt = <800000>; + }; + opp02 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <825000>; + }; + opp03 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <850000>; + }; + opp04 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <925000>; + }; + opp05 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1075000>; + }; + }; + + dmc_opp_table: opp-table-3 { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <900000>; + }; + opp01 { + opp-hz = /bits/ 64 <666000000>; + opp-microvolt = <900000>; + }; + opp02 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <900000>; + }; + opp03 { + opp-hz = /bits/ 64 <928000000>; + opp-microvolt = <925000>; + }; + }; +}; + +&cpu_l0 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l1 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l2 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l3 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_b0 { + operating-points-v2 = <&cluster1_opp>; +}; + +&cpu_b1 { + operating-points-v2 = <&cluster1_opp>; +}; + +&dmc { + operating-points-v2 = <&dmc_opp_table>; +}; + +&gpu { + operating-points-v2 = <&gpu_opp_table>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi deleted file mode 100644 index fee5e711127992..00000000000000 --- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd - */ - -/ { - cluster0_opp: opp-table-0 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <825000 825000 1250000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <825000 825000 1250000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <850000 850000 1250000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <925000 925000 1250000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <1000000 1000000 1250000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1125000 1125000 1250000>; - }; - }; - - cluster1_opp: opp-table-1 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <825000 825000 1250000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <825000 825000 1250000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <825000 825000 1250000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <875000 875000 1250000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <950000 950000 1250000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1025000 1025000 1250000>; - }; - opp06 { - opp-hz = /bits/ 64 <1608000000>; - opp-microvolt = <1100000 1100000 1250000>; - }; - opp07 { - opp-hz = /bits/ 64 <1800000000>; - opp-microvolt = <1200000 1200000 1250000>; - }; - }; - - gpu_opp_table: opp-table-2 { - compatible = "operating-points-v2"; - - opp00 { - opp-hz = /bits/ 64 <200000000>; - opp-microvolt = <825000 825000 1150000>; - }; - opp01 { - opp-hz = /bits/ 64 <297000000>; - opp-microvolt = <825000 825000 1150000>; - }; - opp02 { - opp-hz = /bits/ 64 <400000000>; - opp-microvolt = <825000 825000 1150000>; - }; - opp03 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <875000 875000 1150000>; - }; - opp04 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <925000 925000 1150000>; - }; - opp05 { - opp-hz = /bits/ 64 <800000000>; - opp-microvolt = <1100000 1100000 1150000>; - }; - }; -}; - -&cpu_l0 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l1 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l2 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l3 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_b0 { - operating-points-v2 = <&cluster1_opp>; -}; - -&cpu_b1 { - operating-points-v2 = <&cluster1_opp>; -}; - -&gpu { - operating-points-v2 = <&gpu_opp_table>; -}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts index e26e2d86279cb9..07ec33f3f55fae 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts @@ -10,7 +10,6 @@ #include #include "dt-bindings/usb/pd.h" #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Orange Pi RK3399 Board"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts index 294eb2de263deb..a5a7e374bc5947 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts @@ -12,7 +12,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Pine64 Pinebook Pro"; @@ -32,12 +31,12 @@ chosen { backlight: edp-backlight { compatible = "pwm-backlight"; power-supply = <&vcc_12v>; - pwms = <&pwm0 0 740740 0>; + pwms = <&pwm0 0 125000 0>; }; bat: battery { compatible = "simple-battery"; - charge-full-design-microamp-hours = <9800000>; + charge-full-design-microamp-hours = <10000000>; voltage-max-design-microvolt = <4350000>; voltage-min-design-microvolt = <3000000>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts index ef754ea30a940a..1a44582a49fb6c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts @@ -14,7 +14,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Pine64 PinePhone Pro"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index d24444cdf54afa..650b1ba9c19213 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -5,7 +5,6 @@ #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi index ca7a446b656895..d95b1cde1fc396 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi @@ -7,7 +7,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { model = "Firefly ROC-RK3399-PC Board"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts index 972aea843afd69..475d57f64d58f5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts @@ -7,8 +7,7 @@ /dts-v1/; #include -#include "rk3399.dtsi" -#include "rk3399-t-opp.dtsi" +#include "rk3399-t.dtsi" / { model = "Radxa ROCK 4C+"; @@ -53,6 +52,21 @@ led-1 { }; }; + rk809-sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "Analog RK809"; + simple-audio-card,mclk-fs = <256>; + + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + + simple-audio-card,codec { + sound-dai = <&rk809>; + }; + }; + sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; clocks = <&rk809 1>; @@ -201,10 +215,13 @@ rk809: pmic@20 { interrupt-parent = <&gpio1>; interrupts = ; #clock-cells = <1>; + clock-names = "mclk"; + clocks = <&cru SCLK_I2S_8CH_OUT>; clock-output-names = "rk808-clkout1", "rk808-clkout2"; pinctrl-names = "default"; - pinctrl-0 = <&pmic_int_l>; + pinctrl-0 = <&pmic_int_l>, <&i2s_8ch_mclk>; rockchip,system-power-controller; + #sound-dai-cells = <0>; wakeup-source; vcc1-supply = <&vcc5v0_sys>; @@ -446,6 +463,26 @@ &i2c3 { status = "okay"; }; +&i2s0 { + status = "okay"; +}; + +&i2s0_8ch_bus { + rockchip,pins = + <3 RK_PD0 1 &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>; +}; + +&i2s0_8ch_bus_bclk_off { + rockchip,pins = + <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, + <3 RK_PD2 1 &pcfg_pull_none>, + <3 RK_PD3 1 &pcfg_pull_none>, + <3 RK_PD7 1 &pcfg_pull_none>; +}; + &i2s2 { status = "okay"; }; @@ -473,6 +510,12 @@ bt_wake_l: bt-wake-l { }; }; + i2s0 { + i2s_8ch_mclk: i2s-8ch-mclk { + rockchip,pins = <4 RK_PA0 1 &pcfg_pull_none>; + }; + }; + leds { user_led1: user-led1 { rockchip,pins = <3 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts index 7cfc198bbae74e..a8b8d4acc33712 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4se.dts @@ -5,8 +5,8 @@ */ /dts-v1/; +#include "rk3399-t.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-t-opp.dtsi" / { model = "Radxa ROCK 4SE"; @@ -17,14 +17,6 @@ aliases { }; }; -&pinctrl { - usb2 { - vcc5v0_host_en: vcc5v0-host-en { - rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; -}; - &sdio0 { status = "okay"; @@ -56,10 +48,3 @@ bluetooth { vddio-supply = <&vcc_1v8>; }; }; - -&vcc5v0_host { - enable-active-high; - gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&vcc5v0_host_en>; -}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index b9d6284bb804f7..9666504cd1c146 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -4,11 +4,9 @@ * Copyright (c) 2019 Pragnesh Patel */ -/dts-v1/; #include #include #include -#include "rk3399.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts index f5a68d8d072d2d..725ac3c1f6f650 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a-plus.dts @@ -5,8 +5,8 @@ */ /dts-v1/; +#include "rk3399-op1.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-op1-opp.dtsi" / { model = "Radxa ROCK Pi 4A+"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts index c68f45849c441b..32d6bce5e3d4b9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4a.dts @@ -5,8 +5,8 @@ */ /dts-v1/; +#include "rk3399.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-opp.dtsi" / { model = "Radxa ROCK Pi 4A"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts index 8a17c1eaae15ee..682e8b7297c180 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts @@ -5,8 +5,8 @@ */ /dts-v1/; +#include "rk3399-op1.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-op1-opp.dtsi" / { model = "Radxa ROCK Pi 4B+"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts index 6ea3180e57ca73..55285c7c6e543b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts @@ -5,8 +5,8 @@ */ /dts-v1/; +#include "rk3399.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-opp.dtsi" / { model = "Radxa ROCK Pi 4B"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts index 5274938bf1b82d..82ad2ca6b5c2fc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts @@ -6,8 +6,8 @@ */ /dts-v1/; +#include "rk3399.dtsi" #include "rk3399-rock-pi-4.dtsi" -#include "rk3399-opp.dtsi" / { model = "Radxa ROCK Pi 4C"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi index c920ddf44bafd0..8146f870d2bd05 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi @@ -5,9 +5,8 @@ * Copyright (c) 2018 Linaro Ltd. */ -#include "rk3399.dtsi" -#include "rk3399-opp.dtsi" #include +#include "rk3399.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi index f30b82a10ca385..11d99d8b34a2be 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi @@ -7,7 +7,6 @@ #include #include #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { aliases { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index b3ef1c85e7549e..31832aae9ab6d3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -6,7 +6,6 @@ #include "dt-bindings/pwm/pwm.h" #include "dt-bindings/input/input.h" #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" / { compatible = "rockchip,rk3399-sapphire", "rockchip,rk3399"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi deleted file mode 100644 index 1ababadda9df63..00000000000000 --- a/arch/arm64/boot/dts/rockchip/rk3399-t-opp.dtsi +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -/* - * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd - * Copyright (c) 2022 Radxa Limited - */ - -/ { - cluster0_opp: opp-table-0 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <875000 875000 1250000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <875000 875000 1250000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <900000 900000 1250000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <975000 975000 1250000>; - }; - }; - - cluster1_opp: opp-table-1 { - compatible = "operating-points-v2"; - opp-shared; - - opp00 { - opp-hz = /bits/ 64 <408000000>; - opp-microvolt = <875000 875000 1250000>; - clock-latency-ns = <40000>; - }; - opp01 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <875000 875000 1250000>; - }; - opp02 { - opp-hz = /bits/ 64 <816000000>; - opp-microvolt = <875000 875000 1250000>; - }; - opp03 { - opp-hz = /bits/ 64 <1008000000>; - opp-microvolt = <925000 925000 1250000>; - }; - opp04 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <1000000 1000000 1250000>; - }; - opp05 { - opp-hz = /bits/ 64 <1416000000>; - opp-microvolt = <1075000 1075000 1250000>; - }; - opp06 { - opp-hz = /bits/ 64 <1512000000>; - opp-microvolt = <1150000 1150000 1250000>; - }; - }; - - gpu_opp_table: opp-table-2 { - compatible = "operating-points-v2"; - - opp00 { - opp-hz = /bits/ 64 <200000000>; - opp-microvolt = <875000 875000 1150000>; - }; - opp01 { - opp-hz = /bits/ 64 <300000000>; - opp-microvolt = <875000 875000 1150000>; - }; - opp02 { - opp-hz = /bits/ 64 <400000000>; - opp-microvolt = <875000 875000 1150000>; - }; - opp03 { - opp-hz = /bits/ 64 <600000000>; - opp-microvolt = <975000 975000 1150000>; - }; - }; -}; - -&cpu_l0 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l1 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l2 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_l3 { - operating-points-v2 = <&cluster0_opp>; -}; - -&cpu_b0 { - operating-points-v2 = <&cluster1_opp>; -}; - -&cpu_b1 { - operating-points-v2 = <&cluster1_opp>; -}; - -&gpu { - operating-points-v2 = <&gpu_opp_table>; -}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-t.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-t.dtsi new file mode 100644 index 00000000000000..72989f03fcb1cd --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-t.dtsi @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd + * Copyright (c) 2022 Radxa Limited + */ + +#include "rk3399-base.dtsi" + +/ { + cluster0_opp: opp-table-0 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <875000 875000 1250000>; + clock-latency-ns = <40000>; + }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <875000 875000 1250000>; + }; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <900000 900000 1250000>; + }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <975000 975000 1250000>; + }; + }; + + cluster1_opp: opp-table-1 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <875000 875000 1250000>; + clock-latency-ns = <40000>; + }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <875000 875000 1250000>; + }; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <875000 875000 1250000>; + }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <925000 925000 1250000>; + }; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1000000 1000000 1250000>; + }; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <1075000 1075000 1250000>; + }; + opp06 { + opp-hz = /bits/ 64 <1512000000>; + opp-microvolt = <1150000 1150000 1250000>; + }; + }; + + gpu_opp_table: opp-table-2 { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <875000 875000 1150000>; + }; + opp01 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <875000 875000 1150000>; + }; + opp02 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <875000 875000 1150000>; + }; + opp03 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <975000 975000 1150000>; + }; + }; +}; + +&cpu_l0 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l1 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l2 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_l3 { + operating-points-v2 = <&cluster0_opp>; +}; + +&cpu_b0 { + operating-points-v2 = <&cluster1_opp>; +}; + +&cpu_b1 { + operating-points-v2 = <&cluster1_opp>; +}; + +&gpu { + operating-points-v2 = <&gpu_opp_table>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 9d5f5b083e3cfa..6bc1249d99e6ba 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1,3019 +1,135 @@ // SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* - * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd + * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd */ -#include -#include -#include -#include -#include -#include -#include +#include "rk3399-base.dtsi" / { - compatible = "rockchip,rk3399"; + cluster0_opp: opp-table-0 { + compatible = "operating-points-v2"; + opp-shared; - interrupt-parent = <&gic>; - #address-cells = <2>; - #size-cells = <2>; - - aliases { - gpio0 = &gpio0; - gpio1 = &gpio1; - gpio2 = &gpio2; - gpio3 = &gpio3; - gpio4 = &gpio4; - i2c0 = &i2c0; - i2c1 = &i2c1; - i2c2 = &i2c2; - i2c3 = &i2c3; - i2c4 = &i2c4; - i2c5 = &i2c5; - i2c6 = &i2c6; - i2c7 = &i2c7; - i2c8 = &i2c8; - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - spi0 = &spi0; - spi1 = &spi1; - spi2 = &spi2; - spi3 = &spi3; - spi4 = &spi4; - spi5 = &spi5; - }; - - cpus { - #address-cells = <2>; - #size-cells = <0>; - - cpu-map { - cluster0 { /* Cortex-A53 */ - core0 { - cpu = <&cpu_l0>; - }; - core1 { - cpu = <&cpu_l1>; - }; - core2 { - cpu = <&cpu_l2>; - }; - core3 { - cpu = <&cpu_l3>; - }; - }; - - cluster1 { /* Cortex-A72 */ - core0 { - cpu = <&cpu_b0>; - }; - core1 { - cpu = <&cpu_b1>; - }; - }; - }; - - cpu_l0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x0 0x0>; - enable-method = "psci"; - capacity-dmips-mhz = <485>; - clocks = <&cru ARMCLKL>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <100>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0x8000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <128>; - next-level-cache = <&l2_cache_l>; - }; - - cpu_l1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x0 0x1>; - enable-method = "psci"; - capacity-dmips-mhz = <485>; - clocks = <&cru ARMCLKL>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <100>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0x8000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <128>; - next-level-cache = <&l2_cache_l>; - }; - - cpu_l2: cpu@2 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x0 0x2>; - enable-method = "psci"; - capacity-dmips-mhz = <485>; - clocks = <&cru ARMCLKL>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <100>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0x8000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <128>; - next-level-cache = <&l2_cache_l>; - }; - - cpu_l3: cpu@3 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0x0 0x3>; - enable-method = "psci"; - capacity-dmips-mhz = <485>; - clocks = <&cru ARMCLKL>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <100>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0x8000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <128>; - next-level-cache = <&l2_cache_l>; - }; - - cpu_b0: cpu@100 { - device_type = "cpu"; - compatible = "arm,cortex-a72"; - reg = <0x0 0x100>; - enable-method = "psci"; - capacity-dmips-mhz = <1024>; - clocks = <&cru ARMCLKB>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <436>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0xC000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <256>; - next-level-cache = <&l2_cache_b>; - - thermal-idle { - #cooling-cells = <2>; - duration-us = <10000>; - exit-latency-us = <500>; - }; + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <825000 825000 1250000>; + clock-latency-ns = <40000>; }; - - cpu_b1: cpu@101 { - device_type = "cpu"; - compatible = "arm,cortex-a72"; - reg = <0x0 0x101>; - enable-method = "psci"; - capacity-dmips-mhz = <1024>; - clocks = <&cru ARMCLKB>; - #cooling-cells = <2>; /* min followed by max */ - dynamic-power-coefficient = <436>; - cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; - i-cache-size = <0xC000>; - i-cache-line-size = <64>; - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; - d-cache-sets = <256>; - next-level-cache = <&l2_cache_b>; - - thermal-idle { - #cooling-cells = <2>; - duration-us = <10000>; - exit-latency-us = <500>; - }; - }; - - l2_cache_l: l2-cache-cluster0 { - compatible = "cache"; - cache-level = <2>; - cache-unified; - cache-size = <0x80000>; - cache-line-size = <64>; - cache-sets = <512>; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <825000 825000 1250000>; }; - - l2_cache_b: l2-cache-cluster1 { - compatible = "cache"; - cache-level = <2>; - cache-unified; - cache-size = <0x100000>; - cache-line-size = <64>; - cache-sets = <1024>; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <850000 850000 1250000>; }; - - idle-states { - entry-method = "psci"; - - CPU_SLEEP: cpu-sleep { - compatible = "arm,idle-state"; - local-timer-stop; - arm,psci-suspend-param = <0x0010000>; - entry-latency-us = <120>; - exit-latency-us = <250>; - min-residency-us = <900>; - }; - - CLUSTER_SLEEP: cluster-sleep { - compatible = "arm,idle-state"; - local-timer-stop; - arm,psci-suspend-param = <0x1010000>; - entry-latency-us = <400>; - exit-latency-us = <500>; - min-residency-us = <2000>; - }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <925000 925000 1250000>; }; - }; - - display-subsystem { - compatible = "rockchip,display-subsystem"; - ports = <&vopl_out>, <&vopb_out>; - }; - - dmc: memory-controller { - compatible = "rockchip,rk3399-dmc"; - rockchip,pmu = <&pmugrf>; - devfreq-events = <&dfi>; - clocks = <&cru SCLK_DDRC>; - clock-names = "dmc_clk"; - status = "disabled"; - }; - - pmu_a53 { - compatible = "arm,cortex-a53-pmu"; - interrupts = ; - }; - - pmu_a72 { - compatible = "arm,cortex-a72-pmu"; - interrupts = ; - }; - - psci { - compatible = "arm,psci-1.0"; - method = "smc"; - }; - - timer { - compatible = "arm,armv8-timer"; - interrupts = , - , - , - ; - arm,no-tick-in-suspend; - }; - - xin24m: xin24m { - compatible = "fixed-clock"; - clock-frequency = <24000000>; - clock-output-names = "xin24m"; - #clock-cells = <0>; - }; - - pcie0: pcie@f8000000 { - compatible = "rockchip,rk3399-pcie"; - reg = <0x0 0xf8000000 0x0 0x2000000>, - <0x0 0xfd000000 0x0 0x1000000>; - reg-names = "axi-base", "apb-base"; - device_type = "pci"; - #address-cells = <3>; - #size-cells = <2>; - #interrupt-cells = <1>; - aspm-no-l0s; - bus-range = <0x0 0x1f>; - clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>, - <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>; - clock-names = "aclk", "aclk-perf", - "hclk", "pm"; - interrupts = , - , - ; - interrupt-names = "sys", "legacy", "client"; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &pcie0_intc 0>, - <0 0 0 2 &pcie0_intc 1>, - <0 0 0 3 &pcie0_intc 2>, - <0 0 0 4 &pcie0_intc 3>; - max-link-speed = <1>; - msi-map = <0x0 &its 0x0 0x1000>; - phys = <&pcie_phy 0>, <&pcie_phy 1>, - <&pcie_phy 2>, <&pcie_phy 3>; - phy-names = "pcie-phy-0", "pcie-phy-1", - "pcie-phy-2", "pcie-phy-3"; - ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>, - <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>; - resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, - <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, - <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, - <&cru SRST_A_PCIE>; - reset-names = "core", "mgmt", "mgmt-sticky", "pipe", - "pm", "pclk", "aclk"; - status = "disabled"; - - pcie0_intc: interrupt-controller { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <1>; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1000000 1000000 1250000>; }; - }; - - pcie0_ep: pcie-ep@f8000000 { - compatible = "rockchip,rk3399-pcie-ep"; - reg = <0x0 0xfd000000 0x0 0x1000000>, - <0x0 0xfa000000 0x0 0x2000000>; - reg-names = "apb-base", "mem-base"; - clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>, - <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>; - clock-names = "aclk", "aclk-perf", - "hclk", "pm"; - max-functions = /bits/ 8 <8>; - num-lanes = <4>; - resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, - <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, - <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, - <&cru SRST_A_PCIE>; - reset-names = "core", "mgmt", "mgmt-sticky", "pipe", - "pm", "pclk", "aclk"; - phys = <&pcie_phy 0>, <&pcie_phy 1>, - <&pcie_phy 2>, <&pcie_phy 3>; - phy-names = "pcie-phy-0", "pcie-phy-1", - "pcie-phy-2", "pcie-phy-3"; - rockchip,max-outbound-regions = <32>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie_clkreqnb_cpm>; - status = "disabled"; - }; - - gmac: ethernet@fe300000 { - compatible = "rockchip,rk3399-gmac"; - reg = <0x0 0xfe300000 0x0 0x10000>; - interrupts = ; - interrupt-names = "macirq"; - clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>, - <&cru SCLK_MAC_TX>, <&cru SCLK_MACREF>, - <&cru SCLK_MACREF_OUT>, <&cru ACLK_GMAC>, - <&cru PCLK_GMAC>; - clock-names = "stmmaceth", "mac_clk_rx", - "mac_clk_tx", "clk_mac_ref", - "clk_mac_refout", "aclk_mac", - "pclk_mac"; - power-domains = <&power RK3399_PD_GMAC>; - resets = <&cru SRST_A_GMAC>; - reset-names = "stmmaceth"; - rockchip,grf = <&grf>; - snps,txpbl = <0x4>; - status = "disabled"; - }; - - sdio0: mmc@fe310000 { - compatible = "rockchip,rk3399-dw-mshc", - "rockchip,rk3288-dw-mshc"; - reg = <0x0 0xfe310000 0x0 0x4000>; - interrupts = ; - max-frequency = <150000000>; - clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, - <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; - fifo-depth = <0x100>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - resets = <&cru SRST_SDIO0>; - reset-names = "reset"; - status = "disabled"; - }; - - sdmmc: mmc@fe320000 { - compatible = "rockchip,rk3399-dw-mshc", - "rockchip,rk3288-dw-mshc"; - reg = <0x0 0xfe320000 0x0 0x4000>; - interrupts = ; - max-frequency = <150000000>; - assigned-clocks = <&cru HCLK_SD>; - assigned-clock-rates = <200000000>; - clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, - <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; - fifo-depth = <0x100>; - power-domains = <&power RK3399_PD_SD>; - resets = <&cru SRST_SDMMC>; - reset-names = "reset"; - status = "disabled"; - }; - - sdhci: mmc@fe330000 { - compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1"; - reg = <0x0 0xfe330000 0x0 0x10000>; - interrupts = ; - arasan,soc-ctl-syscon = <&grf>; - assigned-clocks = <&cru SCLK_EMMC>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>; - clock-names = "clk_xin", "clk_ahb"; - clock-output-names = "emmc_cardclock"; - #clock-cells = <0>; - phys = <&emmc_phy>; - phy-names = "phy_arasan"; - power-domains = <&power RK3399_PD_EMMC>; - disable-cqe-dcmd; - status = "disabled"; - }; - - usb_host0_ehci: usb@fe380000 { - compatible = "generic-ehci"; - reg = <0x0 0xfe380000 0x0 0x20000>; - interrupts = ; - clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>, - <&u2phy0>; - phys = <&u2phy0_host>; - phy-names = "usb"; - status = "disabled"; - }; - - usb_host0_ohci: usb@fe3a0000 { - compatible = "generic-ohci"; - reg = <0x0 0xfe3a0000 0x0 0x20000>; - interrupts = ; - clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>, - <&u2phy0>; - phys = <&u2phy0_host>; - phy-names = "usb"; - status = "disabled"; - }; - - usb_host1_ehci: usb@fe3c0000 { - compatible = "generic-ehci"; - reg = <0x0 0xfe3c0000 0x0 0x20000>; - interrupts = ; - clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>, - <&u2phy1>; - phys = <&u2phy1_host>; - phy-names = "usb"; - status = "disabled"; - }; - - usb_host1_ohci: usb@fe3e0000 { - compatible = "generic-ohci"; - reg = <0x0 0xfe3e0000 0x0 0x20000>; - interrupts = ; - clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>, - <&u2phy1>; - phys = <&u2phy1_host>; - phy-names = "usb"; - status = "disabled"; - }; - - debug@fe430000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe430000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_L>; - clock-names = "apb_pclk"; - cpu = <&cpu_l0>; - }; - - debug@fe432000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe432000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_L>; - clock-names = "apb_pclk"; - cpu = <&cpu_l1>; - }; - - debug@fe434000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe434000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_L>; - clock-names = "apb_pclk"; - cpu = <&cpu_l2>; - }; - - debug@fe436000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe436000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_L>; - clock-names = "apb_pclk"; - cpu = <&cpu_l3>; - }; - - debug@fe610000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe610000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_B>; - clock-names = "apb_pclk"; - cpu = <&cpu_b0>; - }; - - debug@fe710000 { - compatible = "arm,coresight-cpu-debug", "arm,primecell"; - reg = <0 0xfe710000 0 0x1000>; - clocks = <&cru PCLK_COREDBG_B>; - clock-names = "apb_pclk"; - cpu = <&cpu_b1>; - }; - - usbdrd3_0: usb@fe800000 { - compatible = "rockchip,rk3399-dwc3"; - #address-cells = <2>; - #size-cells = <2>; - ranges; - clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>, - <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_RKSOC_AXI_PERF>, - <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>; - clock-names = "ref_clk", "suspend_clk", - "bus_clk", "aclk_usb3_rksoc_axi_perf", - "aclk_usb3", "grf_clk"; - resets = <&cru SRST_A_USB3_OTG0>; - reset-names = "usb3-otg"; - status = "disabled"; - - usbdrd_dwc3_0: usb@fe800000 { - compatible = "snps,dwc3"; - reg = <0x0 0xfe800000 0x0 0x100000>; - interrupts = ; - clocks = <&cru SCLK_USB3OTG0_REF>, <&cru ACLK_USB3OTG0>, - <&cru SCLK_USB3OTG0_SUSPEND>; - clock-names = "ref", "bus_early", "suspend"; - dr_mode = "otg"; - phys = <&u2phy0_otg>, <&tcphy0_usb3>; - phy-names = "usb2-phy", "usb3-phy"; - phy_type = "utmi_wide"; - snps,dis_enblslpm_quirk; - snps,dis-u2-freeclk-exists-quirk; - snps,dis_u2_susphy_quirk; - snps,dis-del-phy-power-chg-quirk; - snps,dis-tx-ipgap-linecheck-quirk; - power-domains = <&power RK3399_PD_USB3>; - status = "disabled"; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <1125000 1125000 1250000>; }; }; - usbdrd3_1: usb@fe900000 { - compatible = "rockchip,rk3399-dwc3"; - #address-cells = <2>; - #size-cells = <2>; - ranges; - clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>, - <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_RKSOC_AXI_PERF>, - <&cru ACLK_USB3>, <&cru ACLK_USB3_GRF>; - clock-names = "ref_clk", "suspend_clk", - "bus_clk", "aclk_usb3_rksoc_axi_perf", - "aclk_usb3", "grf_clk"; - resets = <&cru SRST_A_USB3_OTG1>; - reset-names = "usb3-otg"; - status = "disabled"; + cluster1_opp: opp-table-1 { + compatible = "operating-points-v2"; + opp-shared; - usbdrd_dwc3_1: usb@fe900000 { - compatible = "snps,dwc3"; - reg = <0x0 0xfe900000 0x0 0x100000>; - interrupts = ; - clocks = <&cru SCLK_USB3OTG1_REF>, <&cru ACLK_USB3OTG1>, - <&cru SCLK_USB3OTG1_SUSPEND>; - clock-names = "ref", "bus_early", "suspend"; - dr_mode = "otg"; - phys = <&u2phy1_otg>, <&tcphy1_usb3>; - phy-names = "usb2-phy", "usb3-phy"; - phy_type = "utmi_wide"; - snps,dis_enblslpm_quirk; - snps,dis-u2-freeclk-exists-quirk; - snps,dis_u2_susphy_quirk; - snps,dis-del-phy-power-chg-quirk; - snps,dis-tx-ipgap-linecheck-quirk; - power-domains = <&power RK3399_PD_USB3>; - status = "disabled"; + opp00 { + opp-hz = /bits/ 64 <408000000>; + opp-microvolt = <825000 825000 1250000>; + clock-latency-ns = <40000>; }; - }; - - cdn_dp: dp@fec00000 { - compatible = "rockchip,rk3399-cdn-dp"; - reg = <0x0 0xfec00000 0x0 0x100000>; - interrupts = ; - assigned-clocks = <&cru SCLK_DP_CORE>, <&cru SCLK_SPDIF_REC_DPTX>; - assigned-clock-rates = <100000000>, <200000000>; - clocks = <&cru SCLK_DP_CORE>, <&cru PCLK_DP_CTRL>, - <&cru SCLK_SPDIF_REC_DPTX>, <&cru PCLK_VIO_GRF>; - clock-names = "core-clk", "pclk", "spdif", "grf"; - phys = <&tcphy0_dp>, <&tcphy1_dp>; - power-domains = <&power RK3399_PD_HDCP>; - resets = <&cru SRST_DPTX_SPDIF_REC>, <&cru SRST_P_UPHY0_DPTX>, - <&cru SRST_P_UPHY0_APB>, <&cru SRST_DP_CORE>; - reset-names = "spdif", "dptx", "apb", "core"; - rockchip,grf = <&grf>; - #sound-dai-cells = <1>; - status = "disabled"; - - ports { - dp_in: port { - #address-cells = <1>; - #size-cells = <0>; - - dp_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_dp>; - }; - - dp_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_dp>; - }; - }; + opp01 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <825000 825000 1250000>; }; - }; - - gic: interrupt-controller@fee00000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <4>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - interrupt-controller; - - reg = <0x0 0xfee00000 0 0x10000>, /* GICD */ - <0x0 0xfef00000 0 0xc0000>, /* GICR */ - <0x0 0xfff00000 0 0x10000>, /* GICC */ - <0x0 0xfff10000 0 0x10000>, /* GICH */ - <0x0 0xfff20000 0 0x10000>; /* GICV */ - interrupts = ; - its: msi-controller@fee20000 { - compatible = "arm,gic-v3-its"; - msi-controller; - #msi-cells = <1>; - reg = <0x0 0xfee20000 0x0 0x20000>; + opp02 { + opp-hz = /bits/ 64 <816000000>; + opp-microvolt = <825000 825000 1250000>; }; - - ppi-partitions { - ppi_cluster0: interrupt-partition-0 { - affinity = <&cpu_l0 &cpu_l1 &cpu_l2 &cpu_l3>; - }; - - ppi_cluster1: interrupt-partition-1 { - affinity = <&cpu_b0 &cpu_b1>; - }; + opp03 { + opp-hz = /bits/ 64 <1008000000>; + opp-microvolt = <875000 875000 1250000>; }; - }; - - saradc: saradc@ff100000 { - compatible = "rockchip,rk3399-saradc"; - reg = <0x0 0xff100000 0x0 0x100>; - interrupts = ; - #io-channel-cells = <1>; - clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; - clock-names = "saradc", "apb_pclk"; - resets = <&cru SRST_P_SARADC>; - reset-names = "saradc-apb"; - status = "disabled"; - }; - - crypto0: crypto@ff8b0000 { - compatible = "rockchip,rk3399-crypto"; - reg = <0x0 0xff8b0000 0x0 0x4000>; - interrupts = ; - clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>; - clock-names = "hclk_master", "hclk_slave", "sclk"; - resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>; - reset-names = "master", "slave", "crypto-rst"; - }; - - crypto1: crypto@ff8b8000 { - compatible = "rockchip,rk3399-crypto"; - reg = <0x0 0xff8b8000 0x0 0x4000>; - interrupts = ; - clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>; - clock-names = "hclk_master", "hclk_slave", "sclk"; - resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>; - reset-names = "master", "slave", "crypto-rst"; - }; - - i2c1: i2c@ff110000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff110000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C1>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c1_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c2: i2c@ff120000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff120000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C2>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c2_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c3: i2c@ff130000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff130000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C3>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c3_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c5: i2c@ff140000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff140000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C5>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c5_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c6: i2c@ff150000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff150000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C6>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c6_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c7: i2c@ff160000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff160000 0x0 0x1000>; - assigned-clocks = <&cru SCLK_I2C7>; - assigned-clock-rates = <200000000>; - clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c7_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - uart0: serial@ff180000 { - compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; - reg = <0x0 0xff180000 0x0 0x100>; - clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>; - clock-names = "baudclk", "apb_pclk"; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - pinctrl-names = "default"; - pinctrl-0 = <&uart0_xfer>; - status = "disabled"; - }; - - uart1: serial@ff190000 { - compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; - reg = <0x0 0xff190000 0x0 0x100>; - clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; - clock-names = "baudclk", "apb_pclk"; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - pinctrl-names = "default"; - pinctrl-0 = <&uart1_xfer>; - status = "disabled"; - }; - - uart2: serial@ff1a0000 { - compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; - reg = <0x0 0xff1a0000 0x0 0x100>; - clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>; - clock-names = "baudclk", "apb_pclk"; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - pinctrl-names = "default"; - pinctrl-0 = <&uart2c_xfer>; - status = "disabled"; - }; - - uart3: serial@ff1b0000 { - compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; - reg = <0x0 0xff1b0000 0x0 0x100>; - clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>; - clock-names = "baudclk", "apb_pclk"; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - pinctrl-names = "default"; - pinctrl-0 = <&uart3_xfer>; - status = "disabled"; - }; - - spi0: spi@ff1c0000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff1c0000 0x0 0x1000>; - clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - dmas = <&dmac_peri 10>, <&dmac_peri 11>; - dma-names = "tx", "rx"; - pinctrl-names = "default"; - pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - spi1: spi@ff1d0000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff1d0000 0x0 0x1000>; - clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - dmas = <&dmac_peri 12>, <&dmac_peri 13>; - dma-names = "tx", "rx"; - pinctrl-names = "default"; - pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - spi2: spi@ff1e0000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff1e0000 0x0 0x1000>; - clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - dmas = <&dmac_peri 14>, <&dmac_peri 15>; - dma-names = "tx", "rx"; - pinctrl-names = "default"; - pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - spi4: spi@ff1f0000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff1f0000 0x0 0x1000>; - clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - dmas = <&dmac_peri 18>, <&dmac_peri 19>; - dma-names = "tx", "rx"; - pinctrl-names = "default"; - pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - spi5: spi@ff200000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff200000 0x0 0x1000>; - clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - dmas = <&dmac_bus 8>, <&dmac_bus 9>; - dma-names = "tx", "rx"; - pinctrl-names = "default"; - pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - thermal_zones: thermal-zones { - cpu_thermal: cpu-thermal { - polling-delay-passive = <100>; - polling-delay = <1000>; - - thermal-sensors = <&tsadc 0>; - - trips { - cpu_alert0: cpu_alert0 { - temperature = <70000>; - hysteresis = <2000>; - type = "passive"; - }; - cpu_alert1: cpu_alert1 { - temperature = <75000>; - hysteresis = <2000>; - type = "passive"; - }; - cpu_crit: cpu_crit { - temperature = <95000>; - hysteresis = <2000>; - type = "critical"; - }; - }; - - cooling-maps { - map0 { - trip = <&cpu_alert0>; - cooling-device = - <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; - }; - map1 { - trip = <&cpu_alert1>; - cooling-device = - <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_l1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_l2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_l3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, - <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; - }; - }; + opp04 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <950000 950000 1250000>; }; - - gpu_thermal: gpu-thermal { - polling-delay-passive = <100>; - polling-delay = <1000>; - - thermal-sensors = <&tsadc 1>; - - trips { - gpu_alert0: gpu_alert0 { - temperature = <75000>; - hysteresis = <2000>; - type = "passive"; - }; - gpu_crit: gpu_crit { - temperature = <95000>; - hysteresis = <2000>; - type = "critical"; - }; - }; - - cooling-maps { - map0 { - trip = <&gpu_alert0>; - cooling-device = - <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; - }; - }; + opp05 { + opp-hz = /bits/ 64 <1416000000>; + opp-microvolt = <1025000 1025000 1250000>; }; - }; - - tsadc: tsadc@ff260000 { - compatible = "rockchip,rk3399-tsadc"; - reg = <0x0 0xff260000 0x0 0x100>; - interrupts = ; - assigned-clocks = <&cru SCLK_TSADC>; - assigned-clock-rates = <750000>; - clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; - clock-names = "tsadc", "apb_pclk"; - resets = <&cru SRST_TSADC>; - reset-names = "tsadc-apb"; - rockchip,grf = <&grf>; - rockchip,hw-tshut-temp = <95000>; - pinctrl-names = "init", "default", "sleep"; - pinctrl-0 = <&otp_pin>; - pinctrl-1 = <&otp_out>; - pinctrl-2 = <&otp_pin>; - #thermal-sensor-cells = <1>; - status = "disabled"; - }; - - qos_emmc: qos@ffa58000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa58000 0x0 0x20>; - }; - - qos_gmac: qos@ffa5c000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa5c000 0x0 0x20>; - }; - - qos_pcie: qos@ffa60080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa60080 0x0 0x20>; - }; - - qos_usb_host0: qos@ffa60100 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa60100 0x0 0x20>; - }; - - qos_usb_host1: qos@ffa60180 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa60180 0x0 0x20>; - }; - - qos_usb_otg0: qos@ffa70000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa70000 0x0 0x20>; - }; - - qos_usb_otg1: qos@ffa70080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa70080 0x0 0x20>; - }; - - qos_sd: qos@ffa74000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa74000 0x0 0x20>; - }; - - qos_sdioaudio: qos@ffa76000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa76000 0x0 0x20>; - }; - - qos_hdcp: qos@ffa90000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa90000 0x0 0x20>; - }; - - qos_iep: qos@ffa98000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffa98000 0x0 0x20>; - }; - - qos_isp0_m0: qos@ffaa0000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffaa0000 0x0 0x20>; - }; - - qos_isp0_m1: qos@ffaa0080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffaa0080 0x0 0x20>; - }; - - qos_isp1_m0: qos@ffaa8000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffaa8000 0x0 0x20>; - }; - - qos_isp1_m1: qos@ffaa8080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffaa8080 0x0 0x20>; - }; - - qos_rga_r: qos@ffab0000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffab0000 0x0 0x20>; - }; - - qos_rga_w: qos@ffab0080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffab0080 0x0 0x20>; - }; - - qos_video_m0: qos@ffab8000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffab8000 0x0 0x20>; - }; - - qos_video_m1_r: qos@ffac0000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffac0000 0x0 0x20>; - }; - - qos_video_m1_w: qos@ffac0080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffac0080 0x0 0x20>; - }; - - qos_vop_big_r: qos@ffac8000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffac8000 0x0 0x20>; - }; - - qos_vop_big_w: qos@ffac8080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffac8080 0x0 0x20>; - }; - - qos_vop_little: qos@ffad0000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffad0000 0x0 0x20>; - }; - - qos_perihp: qos@ffad8080 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffad8080 0x0 0x20>; - }; - - qos_gpu: qos@ffae0000 { - compatible = "rockchip,rk3399-qos", "syscon"; - reg = <0x0 0xffae0000 0x0 0x20>; - }; - - pmu: power-management@ff310000 { - compatible = "rockchip,rk3399-pmu", "syscon", "simple-mfd"; - reg = <0x0 0xff310000 0x0 0x1000>; - - /* - * Note: RK3399 supports 6 voltage domains including VD_CORE_L, - * VD_CORE_B, VD_CENTER, VD_GPU, VD_LOGIC and VD_PMU. - * Some of the power domains are grouped together for every - * voltage domain. - * The detail contents as below. - */ - power: power-controller { - compatible = "rockchip,rk3399-power-controller"; - #power-domain-cells = <1>; - #address-cells = <1>; - #size-cells = <0>; - - /* These power domains are grouped by VD_CENTER */ - power-domain@RK3399_PD_IEP { - reg = ; - clocks = <&cru ACLK_IEP>, - <&cru HCLK_IEP>; - pm_qos = <&qos_iep>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_RGA { - reg = ; - clocks = <&cru ACLK_RGA>, - <&cru HCLK_RGA>; - pm_qos = <&qos_rga_r>, - <&qos_rga_w>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_VCODEC { - reg = ; - clocks = <&cru ACLK_VCODEC>, - <&cru HCLK_VCODEC>; - pm_qos = <&qos_video_m0>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_VDU { - reg = ; - clocks = <&cru ACLK_VDU>, - <&cru HCLK_VDU>, - <&cru SCLK_VDU_CA>, - <&cru SCLK_VDU_CORE>; - pm_qos = <&qos_video_m1_r>, - <&qos_video_m1_w>; - #power-domain-cells = <0>; - }; - - /* These power domains are grouped by VD_GPU */ - power-domain@RK3399_PD_GPU { - reg = ; - clocks = <&cru ACLK_GPU>; - pm_qos = <&qos_gpu>; - #power-domain-cells = <0>; - }; - - /* These power domains are grouped by VD_LOGIC */ - power-domain@RK3399_PD_EDP { - reg = ; - clocks = <&cru PCLK_EDP_CTRL>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_EMMC { - reg = ; - clocks = <&cru ACLK_EMMC>; - pm_qos = <&qos_emmc>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_GMAC { - reg = ; - clocks = <&cru ACLK_GMAC>, - <&cru PCLK_GMAC>; - pm_qos = <&qos_gmac>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_SD { - reg = ; - clocks = <&cru HCLK_SDMMC>, - <&cru SCLK_SDMMC>; - pm_qos = <&qos_sd>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_SDIOAUDIO { - reg = ; - clocks = <&cru HCLK_SDIO>; - pm_qos = <&qos_sdioaudio>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_TCPD0 { - reg = ; - clocks = <&cru SCLK_UPHY0_TCPDCORE>, - <&cru SCLK_UPHY0_TCPDPHY_REF>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_TCPD1 { - reg = ; - clocks = <&cru SCLK_UPHY1_TCPDCORE>, - <&cru SCLK_UPHY1_TCPDPHY_REF>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_USB3 { - reg = ; - clocks = <&cru ACLK_USB3>; - pm_qos = <&qos_usb_otg0>, - <&qos_usb_otg1>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_VIO { - reg = ; - #power-domain-cells = <1>; - #address-cells = <1>; - #size-cells = <0>; - - power-domain@RK3399_PD_HDCP { - reg = ; - clocks = <&cru ACLK_HDCP>, - <&cru HCLK_HDCP>, - <&cru PCLK_HDCP>; - pm_qos = <&qos_hdcp>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_ISP0 { - reg = ; - clocks = <&cru ACLK_ISP0>, - <&cru HCLK_ISP0>; - pm_qos = <&qos_isp0_m0>, - <&qos_isp0_m1>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_ISP1 { - reg = ; - clocks = <&cru ACLK_ISP1>, - <&cru HCLK_ISP1>; - pm_qos = <&qos_isp1_m0>, - <&qos_isp1_m1>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_VO { - reg = ; - #power-domain-cells = <1>; - #address-cells = <1>; - #size-cells = <0>; - - power-domain@RK3399_PD_VOPB { - reg = ; - clocks = <&cru ACLK_VOP0>, - <&cru HCLK_VOP0>; - pm_qos = <&qos_vop_big_r>, - <&qos_vop_big_w>; - #power-domain-cells = <0>; - }; - power-domain@RK3399_PD_VOPL { - reg = ; - clocks = <&cru ACLK_VOP1>, - <&cru HCLK_VOP1>; - pm_qos = <&qos_vop_little>; - #power-domain-cells = <0>; - }; - }; - }; + opp06 { + opp-hz = /bits/ 64 <1608000000>; + opp-microvolt = <1100000 1100000 1250000>; }; - }; - - pmugrf: syscon@ff320000 { - compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd"; - reg = <0x0 0xff320000 0x0 0x1000>; - - pmu_io_domains: io-domains { - compatible = "rockchip,rk3399-pmu-io-voltage-domain"; - status = "disabled"; + opp07 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1200000 1200000 1250000>; }; }; - spi3: spi@ff350000 { - compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi"; - reg = <0x0 0xff350000 0x0 0x1000>; - clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>; - clock-names = "spiclk", "apb_pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - uart4: serial@ff370000 { - compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart"; - reg = <0x0 0xff370000 0x0 0x100>; - clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>; - clock-names = "baudclk", "apb_pclk"; - interrupts = ; - reg-shift = <2>; - reg-io-width = <4>; - pinctrl-names = "default"; - pinctrl-0 = <&uart4_xfer>; - status = "disabled"; - }; - - i2c0: i2c@ff3c0000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff3c0000 0x0 0x1000>; - assigned-clocks = <&pmucru SCLK_I2C0_PMU>; - assigned-clock-rates = <200000000>; - clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c0_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c4: i2c@ff3d0000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff3d0000 0x0 0x1000>; - assigned-clocks = <&pmucru SCLK_I2C4_PMU>; - assigned-clock-rates = <200000000>; - clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c4_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - i2c8: i2c@ff3e0000 { - compatible = "rockchip,rk3399-i2c"; - reg = <0x0 0xff3e0000 0x0 0x1000>; - assigned-clocks = <&pmucru SCLK_I2C8_PMU>; - assigned-clock-rates = <200000000>; - clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>; - clock-names = "i2c", "pclk"; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&i2c8_xfer>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - }; - - pwm0: pwm@ff420000 { - compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; - reg = <0x0 0xff420000 0x0 0x10>; - #pwm-cells = <3>; - pinctrl-names = "default"; - pinctrl-0 = <&pwm0_pin>; - clocks = <&pmucru PCLK_RKPWM_PMU>; - status = "disabled"; - }; - - pwm1: pwm@ff420010 { - compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; - reg = <0x0 0xff420010 0x0 0x10>; - #pwm-cells = <3>; - pinctrl-names = "default"; - pinctrl-0 = <&pwm1_pin>; - clocks = <&pmucru PCLK_RKPWM_PMU>; - status = "disabled"; - }; - - pwm2: pwm@ff420020 { - compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; - reg = <0x0 0xff420020 0x0 0x10>; - #pwm-cells = <3>; - pinctrl-names = "default"; - pinctrl-0 = <&pwm2_pin>; - clocks = <&pmucru PCLK_RKPWM_PMU>; - status = "disabled"; - }; - - pwm3: pwm@ff420030 { - compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm"; - reg = <0x0 0xff420030 0x0 0x10>; - #pwm-cells = <3>; - pinctrl-names = "default"; - pinctrl-0 = <&pwm3a_pin>; - clocks = <&pmucru PCLK_RKPWM_PMU>; - status = "disabled"; - }; - - dfi: dfi@ff630000 { - reg = <0x00 0xff630000 0x00 0x4000>; - compatible = "rockchip,rk3399-dfi"; - rockchip,pmu = <&pmugrf>; - interrupts = ; - clocks = <&cru PCLK_DDR_MON>; - clock-names = "pclk_ddr_mon"; - }; - - vpu: video-codec@ff650000 { - compatible = "rockchip,rk3399-vpu"; - reg = <0x0 0xff650000 0x0 0x800>; - interrupts = , - ; - interrupt-names = "vepu", "vdpu"; - clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; - clock-names = "aclk", "hclk"; - iommus = <&vpu_mmu>; - power-domains = <&power RK3399_PD_VCODEC>; - }; - - vpu_mmu: iommu@ff650800 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff650800 0x0 0x40>; - interrupts = ; - clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; - clock-names = "aclk", "iface"; - #iommu-cells = <0>; - power-domains = <&power RK3399_PD_VCODEC>; - }; - - vdec: video-codec@ff660000 { - compatible = "rockchip,rk3399-vdec"; - reg = <0x0 0xff660000 0x0 0x480>; - interrupts = ; - clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>, - <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>; - clock-names = "axi", "ahb", "cabac", "core"; - iommus = <&vdec_mmu>; - power-domains = <&power RK3399_PD_VDU>; - }; - - vdec_mmu: iommu@ff660480 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>; - interrupts = ; - clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>; - clock-names = "aclk", "iface"; - power-domains = <&power RK3399_PD_VDU>; - #iommu-cells = <0>; - }; - - iep_mmu: iommu@ff670800 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff670800 0x0 0x40>; - interrupts = ; - clocks = <&cru ACLK_IEP>, <&cru HCLK_IEP>; - clock-names = "aclk", "iface"; - #iommu-cells = <0>; - status = "disabled"; - }; - - rga: rga@ff680000 { - compatible = "rockchip,rk3399-rga"; - reg = <0x0 0xff680000 0x0 0x10000>; - interrupts = ; - clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA_CORE>; - clock-names = "aclk", "hclk", "sclk"; - resets = <&cru SRST_RGA_CORE>, <&cru SRST_A_RGA>, <&cru SRST_H_RGA>; - reset-names = "core", "axi", "ahb"; - power-domains = <&power RK3399_PD_RGA>; - }; - - efuse0: efuse@ff690000 { - compatible = "rockchip,rk3399-efuse"; - reg = <0x0 0xff690000 0x0 0x80>; - #address-cells = <1>; - #size-cells = <1>; - clocks = <&cru PCLK_EFUSE1024NS>; - clock-names = "pclk_efuse"; + gpu_opp_table: opp-table-2 { + compatible = "operating-points-v2"; - /* Data cells */ - cpu_id: cpu-id@7 { - reg = <0x07 0x10>; + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <825000 825000 1150000>; }; - cpub_leakage: cpu-leakage@17 { - reg = <0x17 0x1>; + opp01 { + opp-hz = /bits/ 64 <297000000>; + opp-microvolt = <825000 825000 1150000>; }; - gpu_leakage: gpu-leakage@18 { - reg = <0x18 0x1>; + opp02 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <825000 825000 1150000>; }; - center_leakage: center-leakage@19 { - reg = <0x19 0x1>; + opp03 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <875000 875000 1150000>; }; - cpul_leakage: cpu-leakage@1a { - reg = <0x1a 0x1>; + opp04 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <925000 925000 1150000>; }; - logic_leakage: logic-leakage@1b { - reg = <0x1b 0x1>; + opp05 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1100000 1100000 1150000>; }; - wafer_info: wafer-info@1c { - reg = <0x1c 0x1>; - }; - }; - - dmac_bus: dma-controller@ff6d0000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0x0 0xff6d0000 0x0 0x4000>; - interrupts = , - ; - #dma-cells = <1>; - arm,pl330-periph-burst; - clocks = <&cru ACLK_DMAC0_PERILP>; - clock-names = "apb_pclk"; - }; - - dmac_peri: dma-controller@ff6e0000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0x0 0xff6e0000 0x0 0x4000>; - interrupts = , - ; - #dma-cells = <1>; - arm,pl330-periph-burst; - clocks = <&cru ACLK_DMAC1_PERILP>; - clock-names = "apb_pclk"; - }; - - pmucru: clock-controller@ff750000 { - compatible = "rockchip,rk3399-pmucru"; - reg = <0x0 0xff750000 0x0 0x1000>; - clocks = <&xin24m>; - clock-names = "xin24m"; - rockchip,grf = <&pmugrf>; - #clock-cells = <1>; - #reset-cells = <1>; - assigned-clocks = <&pmucru PLL_PPLL>; - assigned-clock-rates = <676000000>; - }; - - cru: clock-controller@ff760000 { - compatible = "rockchip,rk3399-cru"; - reg = <0x0 0xff760000 0x0 0x1000>; - clocks = <&xin24m>; - clock-names = "xin24m"; - rockchip,grf = <&grf>; - #clock-cells = <1>; - #reset-cells = <1>; - assigned-clocks = - <&cru PLL_GPLL>, <&cru PLL_CPLL>, - <&cru PLL_NPLL>, - <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>, - <&cru PCLK_PERIHP>, - <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>, - <&cru PCLK_PERILP0>, <&cru ACLK_CCI>, - <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>, - <&cru ACLK_VIO>, <&cru ACLK_HDCP>, - <&cru ACLK_GIC_PRE>, - <&cru PCLK_DDR>, - <&cru ACLK_VDU>; - assigned-clock-rates = - <594000000>, <800000000>, - <1000000000>, - <150000000>, <75000000>, - <37500000>, - <100000000>, <100000000>, - <50000000>, <600000000>, - <100000000>, <50000000>, - <400000000>, <400000000>, - <200000000>, - <200000000>, - <400000000>; }; +}; - grf: syscon@ff770000 { - compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd"; - reg = <0x0 0xff770000 0x0 0x10000>; - #address-cells = <1>; - #size-cells = <1>; - - io_domains: io-domains { - compatible = "rockchip,rk3399-io-voltage-domain"; - status = "disabled"; - }; - - mipi_dphy_rx0: mipi-dphy-rx0 { - compatible = "rockchip,rk3399-mipi-dphy-rx0"; - clocks = <&cru SCLK_MIPIDPHY_REF>, - <&cru SCLK_DPHY_RX0_CFG>, - <&cru PCLK_VIO_GRF>; - clock-names = "dphy-ref", "dphy-cfg", "grf"; - power-domains = <&power RK3399_PD_VIO>; - #phy-cells = <0>; - status = "disabled"; - }; - - u2phy0: usb2phy@e450 { - compatible = "rockchip,rk3399-usb2phy"; - reg = <0xe450 0x10>; - clocks = <&cru SCLK_USB2PHY0_REF>; - clock-names = "phyclk"; - #clock-cells = <0>; - clock-output-names = "clk_usbphy0_480m"; - status = "disabled"; - - u2phy0_host: host-port { - #phy-cells = <0>; - interrupts = ; - interrupt-names = "linestate"; - status = "disabled"; - }; - - u2phy0_otg: otg-port { - #phy-cells = <0>; - interrupts = , - , - ; - interrupt-names = "otg-bvalid", "otg-id", - "linestate"; - status = "disabled"; - }; - }; - - u2phy1: usb2phy@e460 { - compatible = "rockchip,rk3399-usb2phy"; - reg = <0xe460 0x10>; - clocks = <&cru SCLK_USB2PHY1_REF>; - clock-names = "phyclk"; - #clock-cells = <0>; - clock-output-names = "clk_usbphy1_480m"; - status = "disabled"; - - u2phy1_host: host-port { - #phy-cells = <0>; - interrupts = ; - interrupt-names = "linestate"; - status = "disabled"; - }; +&cpu_l0 { + operating-points-v2 = <&cluster0_opp>; +}; - u2phy1_otg: otg-port { - #phy-cells = <0>; - interrupts = , - , - ; - interrupt-names = "otg-bvalid", "otg-id", - "linestate"; - status = "disabled"; - }; - }; +&cpu_l1 { + operating-points-v2 = <&cluster0_opp>; +}; - emmc_phy: phy@f780 { - compatible = "rockchip,rk3399-emmc-phy"; - reg = <0xf780 0x24>; - clocks = <&sdhci>; - clock-names = "emmcclk"; - drive-impedance-ohm = <50>; - #phy-cells = <0>; - status = "disabled"; - }; +&cpu_l2 { + operating-points-v2 = <&cluster0_opp>; +}; - pcie_phy: pcie-phy { - compatible = "rockchip,rk3399-pcie-phy"; - clocks = <&cru SCLK_PCIEPHY_REF>; - clock-names = "refclk"; - #phy-cells = <1>; - resets = <&cru SRST_PCIEPHY>; - reset-names = "phy"; - status = "disabled"; - }; - }; +&cpu_l3 { + operating-points-v2 = <&cluster0_opp>; +}; - tcphy0: phy@ff7c0000 { - compatible = "rockchip,rk3399-typec-phy"; - reg = <0x0 0xff7c0000 0x0 0x40000>; - clocks = <&cru SCLK_UPHY0_TCPDCORE>, - <&cru SCLK_UPHY0_TCPDPHY_REF>; - clock-names = "tcpdcore", "tcpdphy-ref"; - assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>; - assigned-clock-rates = <50000000>; - power-domains = <&power RK3399_PD_TCPD0>; - resets = <&cru SRST_UPHY0>, - <&cru SRST_UPHY0_PIPE_L00>, - <&cru SRST_P_UPHY0_TCPHY>; - reset-names = "uphy", "uphy-pipe", "uphy-tcphy"; - rockchip,grf = <&grf>; - status = "disabled"; +&cpu_b0 { + operating-points-v2 = <&cluster1_opp>; +}; - tcphy0_dp: dp-port { - #phy-cells = <0>; - }; +&cpu_b1 { + operating-points-v2 = <&cluster1_opp>; +}; - tcphy0_usb3: usb3-port { - #phy-cells = <0>; - }; - }; - - tcphy1: phy@ff800000 { - compatible = "rockchip,rk3399-typec-phy"; - reg = <0x0 0xff800000 0x0 0x40000>; - clocks = <&cru SCLK_UPHY1_TCPDCORE>, - <&cru SCLK_UPHY1_TCPDPHY_REF>; - clock-names = "tcpdcore", "tcpdphy-ref"; - assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>; - assigned-clock-rates = <50000000>; - power-domains = <&power RK3399_PD_TCPD1>; - resets = <&cru SRST_UPHY1>, - <&cru SRST_UPHY1_PIPE_L00>, - <&cru SRST_P_UPHY1_TCPHY>; - reset-names = "uphy", "uphy-pipe", "uphy-tcphy"; - rockchip,grf = <&grf>; - status = "disabled"; - - tcphy1_dp: dp-port { - #phy-cells = <0>; - }; - - tcphy1_usb3: usb3-port { - #phy-cells = <0>; - }; - }; - - watchdog@ff848000 { - compatible = "rockchip,rk3399-wdt", "snps,dw-wdt"; - reg = <0x0 0xff848000 0x0 0x100>; - clocks = <&cru PCLK_WDT>; - interrupts = ; - }; - - rktimer: rktimer@ff850000 { - compatible = "rockchip,rk3399-timer"; - reg = <0x0 0xff850000 0x0 0x1000>; - interrupts = ; - clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>; - clock-names = "pclk", "timer"; - }; - - spdif: spdif@ff870000 { - compatible = "rockchip,rk3399-spdif"; - reg = <0x0 0xff870000 0x0 0x1000>; - interrupts = ; - dmas = <&dmac_bus 7>; - dma-names = "tx"; - clock-names = "mclk", "hclk"; - clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>; - pinctrl-names = "default"; - pinctrl-0 = <&spdif_bus>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - #sound-dai-cells = <0>; - status = "disabled"; - }; - - i2s0: i2s@ff880000 { - compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; - reg = <0x0 0xff880000 0x0 0x1000>; - rockchip,grf = <&grf>; - interrupts = ; - dmas = <&dmac_bus 0>, <&dmac_bus 1>; - dma-names = "tx", "rx"; - clock-names = "i2s_clk", "i2s_hclk"; - clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>; - pinctrl-names = "bclk_on", "bclk_off"; - pinctrl-0 = <&i2s0_8ch_bus>; - pinctrl-1 = <&i2s0_8ch_bus_bclk_off>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - #sound-dai-cells = <0>; - status = "disabled"; - }; - - i2s1: i2s@ff890000 { - compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; - reg = <0x0 0xff890000 0x0 0x1000>; - interrupts = ; - dmas = <&dmac_bus 2>, <&dmac_bus 3>; - dma-names = "tx", "rx"; - clock-names = "i2s_clk", "i2s_hclk"; - clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>; - pinctrl-names = "default"; - pinctrl-0 = <&i2s1_2ch_bus>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - #sound-dai-cells = <0>; - status = "disabled"; - }; - - i2s2: i2s@ff8a0000 { - compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s"; - reg = <0x0 0xff8a0000 0x0 0x1000>; - interrupts = ; - dmas = <&dmac_bus 4>, <&dmac_bus 5>; - dma-names = "tx", "rx"; - clock-names = "i2s_clk", "i2s_hclk"; - clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>; - power-domains = <&power RK3399_PD_SDIOAUDIO>; - #sound-dai-cells = <0>; - status = "disabled"; - }; - - vopl: vop@ff8f0000 { - compatible = "rockchip,rk3399-vop-lit"; - reg = <0x0 0xff8f0000 0x0 0x2000>, <0x0 0xff8f2000 0x0 0x400>; - interrupts = ; - assigned-clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; - assigned-clock-rates = <400000000>, <100000000>; - clocks = <&cru ACLK_VOP1>, <&cru DCLK_VOP1>, <&cru HCLK_VOP1>; - clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; - iommus = <&vopl_mmu>; - power-domains = <&power RK3399_PD_VOPL>; - resets = <&cru SRST_A_VOP1>, <&cru SRST_H_VOP1>, <&cru SRST_D_VOP1>; - reset-names = "axi", "ahb", "dclk"; - status = "disabled"; - - vopl_out: port { - #address-cells = <1>; - #size-cells = <0>; - - vopl_out_mipi: endpoint@0 { - reg = <0>; - remote-endpoint = <&mipi_in_vopl>; - }; - - vopl_out_edp: endpoint@1 { - reg = <1>; - remote-endpoint = <&edp_in_vopl>; - }; - - vopl_out_hdmi: endpoint@2 { - reg = <2>; - remote-endpoint = <&hdmi_in_vopl>; - }; - - vopl_out_mipi1: endpoint@3 { - reg = <3>; - remote-endpoint = <&mipi1_in_vopl>; - }; - - vopl_out_dp: endpoint@4 { - reg = <4>; - remote-endpoint = <&dp_in_vopl>; - }; - }; - }; - - vopl_mmu: iommu@ff8f3f00 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff8f3f00 0x0 0x100>; - interrupts = ; - clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>; - clock-names = "aclk", "iface"; - power-domains = <&power RK3399_PD_VOPL>; - #iommu-cells = <0>; - status = "disabled"; - }; - - vopb: vop@ff900000 { - compatible = "rockchip,rk3399-vop-big"; - reg = <0x0 0xff900000 0x0 0x2000>, <0x0 0xff902000 0x0 0x1000>; - interrupts = ; - assigned-clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; - assigned-clock-rates = <400000000>, <100000000>; - clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>; - clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; - iommus = <&vopb_mmu>; - power-domains = <&power RK3399_PD_VOPB>; - resets = <&cru SRST_A_VOP0>, <&cru SRST_H_VOP0>, <&cru SRST_D_VOP0>; - reset-names = "axi", "ahb", "dclk"; - status = "disabled"; - - vopb_out: port { - #address-cells = <1>; - #size-cells = <0>; - - vopb_out_edp: endpoint@0 { - reg = <0>; - remote-endpoint = <&edp_in_vopb>; - }; - - vopb_out_mipi: endpoint@1 { - reg = <1>; - remote-endpoint = <&mipi_in_vopb>; - }; - - vopb_out_hdmi: endpoint@2 { - reg = <2>; - remote-endpoint = <&hdmi_in_vopb>; - }; - - vopb_out_mipi1: endpoint@3 { - reg = <3>; - remote-endpoint = <&mipi1_in_vopb>; - }; - - vopb_out_dp: endpoint@4 { - reg = <4>; - remote-endpoint = <&dp_in_vopb>; - }; - }; - }; - - vopb_mmu: iommu@ff903f00 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff903f00 0x0 0x100>; - interrupts = ; - clocks = <&cru ACLK_VOP0>, <&cru HCLK_VOP0>; - clock-names = "aclk", "iface"; - power-domains = <&power RK3399_PD_VOPB>; - #iommu-cells = <0>; - status = "disabled"; - }; - - isp0: isp0@ff910000 { - compatible = "rockchip,rk3399-cif-isp"; - reg = <0x0 0xff910000 0x0 0x4000>; - interrupts = ; - clocks = <&cru SCLK_ISP0>, - <&cru ACLK_ISP0_WRAPPER>, - <&cru HCLK_ISP0_WRAPPER>; - clock-names = "isp", "aclk", "hclk"; - iommus = <&isp0_mmu>; - phys = <&mipi_dphy_rx0>; - phy-names = "dphy"; - power-domains = <&power RK3399_PD_ISP0>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - }; - }; - }; - - isp0_mmu: iommu@ff914000 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>; - interrupts = ; - clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>; - clock-names = "aclk", "iface"; - #iommu-cells = <0>; - power-domains = <&power RK3399_PD_ISP0>; - rockchip,disable-mmu-reset; - }; - - isp1: isp1@ff920000 { - compatible = "rockchip,rk3399-cif-isp"; - reg = <0x0 0xff920000 0x0 0x4000>; - interrupts = ; - clocks = <&cru SCLK_ISP1>, - <&cru ACLK_ISP1_WRAPPER>, - <&cru HCLK_ISP1_WRAPPER>; - clock-names = "isp", "aclk", "hclk"; - iommus = <&isp1_mmu>; - phys = <&mipi_dsi1>; - phy-names = "dphy"; - power-domains = <&power RK3399_PD_ISP1>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - }; - }; - }; - - isp1_mmu: iommu@ff924000 { - compatible = "rockchip,iommu"; - reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>; - interrupts = ; - clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>; - clock-names = "aclk", "iface"; - #iommu-cells = <0>; - power-domains = <&power RK3399_PD_ISP1>; - rockchip,disable-mmu-reset; - }; - - hdmi_sound: hdmi-sound { - compatible = "simple-audio-card"; - simple-audio-card,format = "i2s"; - simple-audio-card,mclk-fs = <256>; - simple-audio-card,name = "hdmi-sound"; - status = "disabled"; - - simple-audio-card,cpu { - sound-dai = <&i2s2>; - }; - simple-audio-card,codec { - sound-dai = <&hdmi>; - }; - }; - - hdmi: hdmi@ff940000 { - compatible = "rockchip,rk3399-dw-hdmi"; - reg = <0x0 0xff940000 0x0 0x20000>; - reg-io-width = <4>; - interrupts = ; - clocks = <&cru PCLK_HDMI_CTRL>, - <&cru SCLK_HDMI_SFR>, - <&cru SCLK_HDMI_CEC>, - <&cru PCLK_VIO_GRF>, - <&cru PLL_VPLL>; - clock-names = "iahb", "isfr", "cec", "grf", "ref"; - power-domains = <&power RK3399_PD_HDCP>; - rockchip,grf = <&grf>; - #sound-dai-cells = <0>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - hdmi_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - hdmi_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_hdmi>; - }; - hdmi_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_hdmi>; - }; - }; - - hdmi_out: port@1 { - reg = <1>; - }; - }; - }; - - mipi_dsi: dsi@ff960000 { - compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; - reg = <0x0 0xff960000 0x0 0x8000>; - interrupts = ; - clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>, - <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>; - clock-names = "ref", "pclk", "phy_cfg", "grf"; - power-domains = <&power RK3399_PD_VIO>; - resets = <&cru SRST_P_MIPI_DSI0>; - reset-names = "apb"; - rockchip,grf = <&grf>; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - mipi_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - mipi_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_mipi>; - }; - - mipi_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_mipi>; - }; - }; - - mipi_out: port@1 { - reg = <1>; - }; - }; - }; - - mipi_dsi1: dsi@ff968000 { - compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; - reg = <0x0 0xff968000 0x0 0x8000>; - interrupts = ; - clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI1>, - <&cru SCLK_DPHY_TX1RX1_CFG>, <&cru PCLK_VIO_GRF>; - clock-names = "ref", "pclk", "phy_cfg", "grf"; - power-domains = <&power RK3399_PD_VIO>; - resets = <&cru SRST_P_MIPI_DSI1>; - reset-names = "apb"; - rockchip,grf = <&grf>; - #address-cells = <1>; - #size-cells = <0>; - #phy-cells = <0>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - mipi1_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - mipi1_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_mipi1>; - }; - - mipi1_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_mipi1>; - }; - }; - - mipi1_out: port@1 { - reg = <1>; - }; - }; - }; - - edp: dp@ff970000 { - compatible = "rockchip,rk3399-edp"; - reg = <0x0 0xff970000 0x0 0x8000>; - interrupts = ; - clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>; - clock-names = "dp", "pclk", "grf"; - pinctrl-names = "default"; - pinctrl-0 = <&edp_hpd>; - power-domains = <&power RK3399_PD_EDP>; - resets = <&cru SRST_P_EDP_CTRL>; - reset-names = "dp"; - rockchip,grf = <&grf>; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - edp_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - edp_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_edp>; - }; - - edp_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_edp>; - }; - }; - - edp_out: port@1 { - reg = <1>; - }; - }; - }; - - gpu: gpu@ff9a0000 { - compatible = "rockchip,rk3399-mali", "arm,mali-t860"; - reg = <0x0 0xff9a0000 0x0 0x10000>; - interrupts = , - , - ; - interrupt-names = "job", "mmu", "gpu"; - clocks = <&cru ACLK_GPU>; - #cooling-cells = <2>; - dynamic-power-coefficient = <2640>; - power-domains = <&power RK3399_PD_GPU>; - status = "disabled"; - }; - - pinctrl: pinctrl { - compatible = "rockchip,rk3399-pinctrl"; - rockchip,grf = <&grf>; - rockchip,pmu = <&pmugrf>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - - gpio0: gpio@ff720000 { - compatible = "rockchip,gpio-bank"; - reg = <0x0 0xff720000 0x0 0x100>; - clocks = <&pmucru PCLK_GPIO0_PMU>; - interrupts = ; - - gpio-controller; - #gpio-cells = <0x2>; - - interrupt-controller; - #interrupt-cells = <0x2>; - }; - - gpio1: gpio@ff730000 { - compatible = "rockchip,gpio-bank"; - reg = <0x0 0xff730000 0x0 0x100>; - clocks = <&pmucru PCLK_GPIO1_PMU>; - interrupts = ; - - gpio-controller; - #gpio-cells = <0x2>; - - interrupt-controller; - #interrupt-cells = <0x2>; - }; - - gpio2: gpio@ff780000 { - compatible = "rockchip,gpio-bank"; - reg = <0x0 0xff780000 0x0 0x100>; - clocks = <&cru PCLK_GPIO2>; - interrupts = ; - - gpio-controller; - #gpio-cells = <0x2>; - - interrupt-controller; - #interrupt-cells = <0x2>; - }; - - gpio3: gpio@ff788000 { - compatible = "rockchip,gpio-bank"; - reg = <0x0 0xff788000 0x0 0x100>; - clocks = <&cru PCLK_GPIO3>; - interrupts = ; - - gpio-controller; - #gpio-cells = <0x2>; - - interrupt-controller; - #interrupt-cells = <0x2>; - }; - - gpio4: gpio@ff790000 { - compatible = "rockchip,gpio-bank"; - reg = <0x0 0xff790000 0x0 0x100>; - clocks = <&cru PCLK_GPIO4>; - interrupts = ; - - gpio-controller; - #gpio-cells = <0x2>; - - interrupt-controller; - #interrupt-cells = <0x2>; - }; - - pcfg_pull_up: pcfg-pull-up { - bias-pull-up; - }; - - pcfg_pull_down: pcfg-pull-down { - bias-pull-down; - }; - - pcfg_pull_none: pcfg-pull-none { - bias-disable; - }; - - pcfg_pull_none_12ma: pcfg-pull-none-12ma { - bias-disable; - drive-strength = <12>; - }; - - pcfg_pull_none_13ma: pcfg-pull-none-13ma { - bias-disable; - drive-strength = <13>; - }; - - pcfg_pull_none_18ma: pcfg-pull-none-18ma { - bias-disable; - drive-strength = <18>; - }; - - pcfg_pull_none_20ma: pcfg-pull-none-20ma { - bias-disable; - drive-strength = <20>; - }; - - pcfg_pull_up_2ma: pcfg-pull-up-2ma { - bias-pull-up; - drive-strength = <2>; - }; - - pcfg_pull_up_8ma: pcfg-pull-up-8ma { - bias-pull-up; - drive-strength = <8>; - }; - - pcfg_pull_up_18ma: pcfg-pull-up-18ma { - bias-pull-up; - drive-strength = <18>; - }; - - pcfg_pull_up_20ma: pcfg-pull-up-20ma { - bias-pull-up; - drive-strength = <20>; - }; - - pcfg_pull_down_4ma: pcfg-pull-down-4ma { - bias-pull-down; - drive-strength = <4>; - }; - - pcfg_pull_down_8ma: pcfg-pull-down-8ma { - bias-pull-down; - drive-strength = <8>; - }; - - pcfg_pull_down_12ma: pcfg-pull-down-12ma { - bias-pull-down; - drive-strength = <12>; - }; - - pcfg_pull_down_18ma: pcfg-pull-down-18ma { - bias-pull-down; - drive-strength = <18>; - }; - - pcfg_pull_down_20ma: pcfg-pull-down-20ma { - bias-pull-down; - drive-strength = <20>; - }; - - pcfg_output_high: pcfg-output-high { - output-high; - }; - - pcfg_output_low: pcfg-output-low { - output-low; - }; - - pcfg_input_enable: pcfg-input-enable { - input-enable; - }; - - pcfg_input_pull_up: pcfg-input-pull-up { - input-enable; - bias-pull-up; - }; - - pcfg_input_pull_down: pcfg-input-pull-down { - input-enable; - bias-pull-down; - }; - - clock { - clk_32k: clk-32k { - rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>; - }; - }; - - cif { - cif_clkin: cif-clkin { - rockchip,pins = - <2 RK_PB2 3 &pcfg_pull_none>; - }; - - cif_clkouta: cif-clkouta { - rockchip,pins = - <2 RK_PB3 3 &pcfg_pull_none>; - }; - }; - - edp { - edp_hpd: edp-hpd { - rockchip,pins = - <4 RK_PC7 2 &pcfg_pull_none>; - }; - }; - - gmac { - rgmii_pins: rgmii-pins { - rockchip,pins = - /* mac_txclk */ - <3 RK_PC1 1 &pcfg_pull_none_13ma>, - /* mac_rxclk */ - <3 RK_PB6 1 &pcfg_pull_none>, - /* mac_mdio */ - <3 RK_PB5 1 &pcfg_pull_none>, - /* mac_txen */ - <3 RK_PB4 1 &pcfg_pull_none_13ma>, - /* mac_clk */ - <3 RK_PB3 1 &pcfg_pull_none>, - /* mac_rxdv */ - <3 RK_PB1 1 &pcfg_pull_none>, - /* mac_mdc */ - <3 RK_PB0 1 &pcfg_pull_none>, - /* mac_rxd1 */ - <3 RK_PA7 1 &pcfg_pull_none>, - /* mac_rxd0 */ - <3 RK_PA6 1 &pcfg_pull_none>, - /* mac_txd1 */ - <3 RK_PA5 1 &pcfg_pull_none_13ma>, - /* mac_txd0 */ - <3 RK_PA4 1 &pcfg_pull_none_13ma>, - /* mac_rxd3 */ - <3 RK_PA3 1 &pcfg_pull_none>, - /* mac_rxd2 */ - <3 RK_PA2 1 &pcfg_pull_none>, - /* mac_txd3 */ - <3 RK_PA1 1 &pcfg_pull_none_13ma>, - /* mac_txd2 */ - <3 RK_PA0 1 &pcfg_pull_none_13ma>; - }; - - rmii_pins: rmii-pins { - rockchip,pins = - /* mac_mdio */ - <3 RK_PB5 1 &pcfg_pull_none>, - /* mac_txen */ - <3 RK_PB4 1 &pcfg_pull_none_13ma>, - /* mac_clk */ - <3 RK_PB3 1 &pcfg_pull_none>, - /* mac_rxer */ - <3 RK_PB2 1 &pcfg_pull_none>, - /* mac_rxdv */ - <3 RK_PB1 1 &pcfg_pull_none>, - /* mac_mdc */ - <3 RK_PB0 1 &pcfg_pull_none>, - /* mac_rxd1 */ - <3 RK_PA7 1 &pcfg_pull_none>, - /* mac_rxd0 */ - <3 RK_PA6 1 &pcfg_pull_none>, - /* mac_txd1 */ - <3 RK_PA5 1 &pcfg_pull_none_13ma>, - /* mac_txd0 */ - <3 RK_PA4 1 &pcfg_pull_none_13ma>; - }; - }; - - i2c0 { - i2c0_xfer: i2c0-xfer { - rockchip,pins = - <1 RK_PB7 2 &pcfg_pull_none>, - <1 RK_PC0 2 &pcfg_pull_none>; - }; - }; - - i2c1 { - i2c1_xfer: i2c1-xfer { - rockchip,pins = - <4 RK_PA2 1 &pcfg_pull_none>, - <4 RK_PA1 1 &pcfg_pull_none>; - }; - }; - - i2c2 { - i2c2_xfer: i2c2-xfer { - rockchip,pins = - <2 RK_PA1 2 &pcfg_pull_none_12ma>, - <2 RK_PA0 2 &pcfg_pull_none_12ma>; - }; - }; - - i2c3 { - i2c3_xfer: i2c3-xfer { - rockchip,pins = - <4 RK_PC1 1 &pcfg_pull_none>, - <4 RK_PC0 1 &pcfg_pull_none>; - }; - }; - - i2c4 { - i2c4_xfer: i2c4-xfer { - rockchip,pins = - <1 RK_PB4 1 &pcfg_pull_none>, - <1 RK_PB3 1 &pcfg_pull_none>; - }; - }; - - i2c5 { - i2c5_xfer: i2c5-xfer { - rockchip,pins = - <3 RK_PB3 2 &pcfg_pull_none>, - <3 RK_PB2 2 &pcfg_pull_none>; - }; - }; - - i2c6 { - i2c6_xfer: i2c6-xfer { - rockchip,pins = - <2 RK_PB2 2 &pcfg_pull_none>, - <2 RK_PB1 2 &pcfg_pull_none>; - }; - }; - - i2c7 { - i2c7_xfer: i2c7-xfer { - rockchip,pins = - <2 RK_PB0 2 &pcfg_pull_none>, - <2 RK_PA7 2 &pcfg_pull_none>; - }; - }; - - i2c8 { - i2c8_xfer: i2c8-xfer { - rockchip,pins = - <1 RK_PC5 1 &pcfg_pull_none>, - <1 RK_PC4 1 &pcfg_pull_none>; - }; - }; - - i2s0 { - i2s0_2ch_bus: i2s0-2ch-bus { - rockchip,pins = - <3 RK_PD0 1 &pcfg_pull_none>, - <3 RK_PD1 1 &pcfg_pull_none>, - <3 RK_PD2 1 &pcfg_pull_none>, - <3 RK_PD3 1 &pcfg_pull_none>, - <3 RK_PD7 1 &pcfg_pull_none>, - <4 RK_PA0 1 &pcfg_pull_none>; - }; - - i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off { - rockchip,pins = - <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, - <3 RK_PD1 1 &pcfg_pull_none>, - <3 RK_PD2 1 &pcfg_pull_none>, - <3 RK_PD3 1 &pcfg_pull_none>, - <3 RK_PD7 1 &pcfg_pull_none>, - <4 RK_PA0 1 &pcfg_pull_none>; - }; - - i2s0_8ch_bus: i2s0-8ch-bus { - rockchip,pins = - <3 RK_PD0 1 &pcfg_pull_none>, - <3 RK_PD1 1 &pcfg_pull_none>, - <3 RK_PD2 1 &pcfg_pull_none>, - <3 RK_PD3 1 &pcfg_pull_none>, - <3 RK_PD4 1 &pcfg_pull_none>, - <3 RK_PD5 1 &pcfg_pull_none>, - <3 RK_PD6 1 &pcfg_pull_none>, - <3 RK_PD7 1 &pcfg_pull_none>, - <4 RK_PA0 1 &pcfg_pull_none>; - }; - - i2s0_8ch_bus_bclk_off: i2s0-8ch-bus-bclk-off { - rockchip,pins = - <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>, - <3 RK_PD1 1 &pcfg_pull_none>, - <3 RK_PD2 1 &pcfg_pull_none>, - <3 RK_PD3 1 &pcfg_pull_none>, - <3 RK_PD4 1 &pcfg_pull_none>, - <3 RK_PD5 1 &pcfg_pull_none>, - <3 RK_PD6 1 &pcfg_pull_none>, - <3 RK_PD7 1 &pcfg_pull_none>, - <4 RK_PA0 1 &pcfg_pull_none>; - }; - }; - - i2s1 { - i2s1_2ch_bus: i2s1-2ch-bus { - rockchip,pins = - <4 RK_PA3 1 &pcfg_pull_none>, - <4 RK_PA4 1 &pcfg_pull_none>, - <4 RK_PA5 1 &pcfg_pull_none>, - <4 RK_PA6 1 &pcfg_pull_none>, - <4 RK_PA7 1 &pcfg_pull_none>; - }; - - i2s1_2ch_bus_bclk_off: i2s1-2ch-bus-bclk-off { - rockchip,pins = - <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>, - <4 RK_PA4 1 &pcfg_pull_none>, - <4 RK_PA5 1 &pcfg_pull_none>, - <4 RK_PA6 1 &pcfg_pull_none>, - <4 RK_PA7 1 &pcfg_pull_none>; - }; - }; - - sdio0 { - sdio0_bus1: sdio0-bus1 { - rockchip,pins = - <2 RK_PC4 1 &pcfg_pull_up>; - }; - - sdio0_bus4: sdio0-bus4 { - rockchip,pins = - <2 RK_PC4 1 &pcfg_pull_up>, - <2 RK_PC5 1 &pcfg_pull_up>, - <2 RK_PC6 1 &pcfg_pull_up>, - <2 RK_PC7 1 &pcfg_pull_up>; - }; - - sdio0_cmd: sdio0-cmd { - rockchip,pins = - <2 RK_PD0 1 &pcfg_pull_up>; - }; - - sdio0_clk: sdio0-clk { - rockchip,pins = - <2 RK_PD1 1 &pcfg_pull_none>; - }; - - sdio0_cd: sdio0-cd { - rockchip,pins = - <2 RK_PD2 1 &pcfg_pull_up>; - }; - - sdio0_pwr: sdio0-pwr { - rockchip,pins = - <2 RK_PD3 1 &pcfg_pull_up>; - }; - - sdio0_bkpwr: sdio0-bkpwr { - rockchip,pins = - <2 RK_PD4 1 &pcfg_pull_up>; - }; - - sdio0_wp: sdio0-wp { - rockchip,pins = - <0 RK_PA3 1 &pcfg_pull_up>; - }; - - sdio0_int: sdio0-int { - rockchip,pins = - <0 RK_PA4 1 &pcfg_pull_up>; - }; - }; - - sdmmc { - sdmmc_bus1: sdmmc-bus1 { - rockchip,pins = - <4 RK_PB0 1 &pcfg_pull_up>; - }; - - sdmmc_bus4: sdmmc-bus4 { - rockchip,pins = - <4 RK_PB0 1 &pcfg_pull_up>, - <4 RK_PB1 1 &pcfg_pull_up>, - <4 RK_PB2 1 &pcfg_pull_up>, - <4 RK_PB3 1 &pcfg_pull_up>; - }; - - sdmmc_clk: sdmmc-clk { - rockchip,pins = - <4 RK_PB4 1 &pcfg_pull_none>; - }; - - sdmmc_cmd: sdmmc-cmd { - rockchip,pins = - <4 RK_PB5 1 &pcfg_pull_up>; - }; - - sdmmc_cd: sdmmc-cd { - rockchip,pins = - <0 RK_PA7 1 &pcfg_pull_up>; - }; - - sdmmc_wp: sdmmc-wp { - rockchip,pins = - <0 RK_PB0 1 &pcfg_pull_up>; - }; - }; - - suspend { - ap_pwroff: ap-pwroff { - rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>; - }; - - ddrio_pwroff: ddrio-pwroff { - rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>; - }; - }; - - spdif { - spdif_bus: spdif-bus { - rockchip,pins = - <4 RK_PC5 1 &pcfg_pull_none>; - }; - - spdif_bus_1: spdif-bus-1 { - rockchip,pins = - <3 RK_PC0 3 &pcfg_pull_none>; - }; - }; - - spi0 { - spi0_clk: spi0-clk { - rockchip,pins = - <3 RK_PA6 2 &pcfg_pull_up>; - }; - spi0_cs0: spi0-cs0 { - rockchip,pins = - <3 RK_PA7 2 &pcfg_pull_up>; - }; - spi0_cs1: spi0-cs1 { - rockchip,pins = - <3 RK_PB0 2 &pcfg_pull_up>; - }; - spi0_tx: spi0-tx { - rockchip,pins = - <3 RK_PA5 2 &pcfg_pull_up>; - }; - spi0_rx: spi0-rx { - rockchip,pins = - <3 RK_PA4 2 &pcfg_pull_up>; - }; - }; - - spi1 { - spi1_clk: spi1-clk { - rockchip,pins = - <1 RK_PB1 2 &pcfg_pull_up>; - }; - spi1_cs0: spi1-cs0 { - rockchip,pins = - <1 RK_PB2 2 &pcfg_pull_up>; - }; - spi1_rx: spi1-rx { - rockchip,pins = - <1 RK_PA7 2 &pcfg_pull_up>; - }; - spi1_tx: spi1-tx { - rockchip,pins = - <1 RK_PB0 2 &pcfg_pull_up>; - }; - }; - - spi2 { - spi2_clk: spi2-clk { - rockchip,pins = - <2 RK_PB3 1 &pcfg_pull_up>; - }; - spi2_cs0: spi2-cs0 { - rockchip,pins = - <2 RK_PB4 1 &pcfg_pull_up>; - }; - spi2_rx: spi2-rx { - rockchip,pins = - <2 RK_PB1 1 &pcfg_pull_up>; - }; - spi2_tx: spi2-tx { - rockchip,pins = - <2 RK_PB2 1 &pcfg_pull_up>; - }; - }; - - spi3 { - spi3_clk: spi3-clk { - rockchip,pins = - <1 RK_PC1 1 &pcfg_pull_up>; - }; - spi3_cs0: spi3-cs0 { - rockchip,pins = - <1 RK_PC2 1 &pcfg_pull_up>; - }; - spi3_rx: spi3-rx { - rockchip,pins = - <1 RK_PB7 1 &pcfg_pull_up>; - }; - spi3_tx: spi3-tx { - rockchip,pins = - <1 RK_PC0 1 &pcfg_pull_up>; - }; - }; - - spi4 { - spi4_clk: spi4-clk { - rockchip,pins = - <3 RK_PA2 2 &pcfg_pull_up>; - }; - spi4_cs0: spi4-cs0 { - rockchip,pins = - <3 RK_PA3 2 &pcfg_pull_up>; - }; - spi4_rx: spi4-rx { - rockchip,pins = - <3 RK_PA0 2 &pcfg_pull_up>; - }; - spi4_tx: spi4-tx { - rockchip,pins = - <3 RK_PA1 2 &pcfg_pull_up>; - }; - }; - - spi5 { - spi5_clk: spi5-clk { - rockchip,pins = - <2 RK_PC6 2 &pcfg_pull_up>; - }; - spi5_cs0: spi5-cs0 { - rockchip,pins = - <2 RK_PC7 2 &pcfg_pull_up>; - }; - spi5_rx: spi5-rx { - rockchip,pins = - <2 RK_PC4 2 &pcfg_pull_up>; - }; - spi5_tx: spi5-tx { - rockchip,pins = - <2 RK_PC5 2 &pcfg_pull_up>; - }; - }; - - testclk { - test_clkout0: test-clkout0 { - rockchip,pins = - <0 RK_PA0 1 &pcfg_pull_none>; - }; - - test_clkout1: test-clkout1 { - rockchip,pins = - <2 RK_PD1 2 &pcfg_pull_none>; - }; - - test_clkout2: test-clkout2 { - rockchip,pins = - <0 RK_PB0 3 &pcfg_pull_none>; - }; - }; - - tsadc { - otp_pin: otp-pin { - rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - otp_out: otp-out { - rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>; - }; - }; - - uart0 { - uart0_xfer: uart0-xfer { - rockchip,pins = - <2 RK_PC0 1 &pcfg_pull_up>, - <2 RK_PC1 1 &pcfg_pull_none>; - }; - - uart0_cts: uart0-cts { - rockchip,pins = - <2 RK_PC2 1 &pcfg_pull_none>; - }; - - uart0_rts: uart0-rts { - rockchip,pins = - <2 RK_PC3 1 &pcfg_pull_none>; - }; - }; - - uart1 { - uart1_xfer: uart1-xfer { - rockchip,pins = - <3 RK_PB4 2 &pcfg_pull_up>, - <3 RK_PB5 2 &pcfg_pull_none>; - }; - }; - - uart2a { - uart2a_xfer: uart2a-xfer { - rockchip,pins = - <4 RK_PB0 2 &pcfg_pull_up>, - <4 RK_PB1 2 &pcfg_pull_none>; - }; - }; - - uart2b { - uart2b_xfer: uart2b-xfer { - rockchip,pins = - <4 RK_PC0 2 &pcfg_pull_up>, - <4 RK_PC1 2 &pcfg_pull_none>; - }; - }; - - uart2c { - uart2c_xfer: uart2c-xfer { - rockchip,pins = - <4 RK_PC3 1 &pcfg_pull_up>, - <4 RK_PC4 1 &pcfg_pull_none>; - }; - }; - - uart3 { - uart3_xfer: uart3-xfer { - rockchip,pins = - <3 RK_PB6 2 &pcfg_pull_up>, - <3 RK_PB7 2 &pcfg_pull_none>; - }; - - uart3_cts: uart3-cts { - rockchip,pins = - <3 RK_PC0 2 &pcfg_pull_none>; - }; - - uart3_rts: uart3-rts { - rockchip,pins = - <3 RK_PC1 2 &pcfg_pull_none>; - }; - }; - - uart4 { - uart4_xfer: uart4-xfer { - rockchip,pins = - <1 RK_PA7 1 &pcfg_pull_up>, - <1 RK_PB0 1 &pcfg_pull_none>; - }; - }; - - uarthdcp { - uarthdcp_xfer: uarthdcp-xfer { - rockchip,pins = - <4 RK_PC5 2 &pcfg_pull_up>, - <4 RK_PC6 2 &pcfg_pull_none>; - }; - }; - - pwm0 { - pwm0_pin: pwm0-pin { - rockchip,pins = - <4 RK_PC2 1 &pcfg_pull_none>; - }; - - pwm0_pin_pull_down: pwm0-pin-pull-down { - rockchip,pins = - <4 RK_PC2 1 &pcfg_pull_down>; - }; - - vop0_pwm_pin: vop0-pwm-pin { - rockchip,pins = - <4 RK_PC2 2 &pcfg_pull_none>; - }; - - vop1_pwm_pin: vop1-pwm-pin { - rockchip,pins = - <4 RK_PC2 3 &pcfg_pull_none>; - }; - }; - - pwm1 { - pwm1_pin: pwm1-pin { - rockchip,pins = - <4 RK_PC6 1 &pcfg_pull_none>; - }; - - pwm1_pin_pull_down: pwm1-pin-pull-down { - rockchip,pins = - <4 RK_PC6 1 &pcfg_pull_down>; - }; - }; - - pwm2 { - pwm2_pin: pwm2-pin { - rockchip,pins = - <1 RK_PC3 1 &pcfg_pull_none>; - }; - - pwm2_pin_pull_down: pwm2-pin-pull-down { - rockchip,pins = - <1 RK_PC3 1 &pcfg_pull_down>; - }; - }; - - pwm3a { - pwm3a_pin: pwm3a-pin { - rockchip,pins = - <0 RK_PA6 1 &pcfg_pull_none>; - }; - }; - - pwm3b { - pwm3b_pin: pwm3b-pin { - rockchip,pins = - <1 RK_PB6 1 &pcfg_pull_none>; - }; - }; - - hdmi { - hdmi_i2c_xfer: hdmi-i2c-xfer { - rockchip,pins = - <4 RK_PC1 3 &pcfg_pull_none>, - <4 RK_PC0 3 &pcfg_pull_none>; - }; - - hdmi_cec: hdmi-cec { - rockchip,pins = - <4 RK_PC7 1 &pcfg_pull_none>; - }; - }; - - pcie { - pcie_clkreqn_cpm: pci-clkreqn-cpm { - rockchip,pins = - <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - pcie_clkreqnb_cpm: pci-clkreqnb-cpm { - rockchip,pins = - <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; - - }; +&gpu { + operating-points-v2 = <&gpu_opp_table>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts index c58fb7658d7a19..d3c628218ce39c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts @@ -7,7 +7,6 @@ /dts-v1/; #include "rk3399.dtsi" -#include "rk3399-opp.dtsi" #include #include "rk3399pro-vmarc-som.dtsi" diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts b/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts new file mode 100644 index 00000000000000..7cd91f8000cb0d --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3566-lckfb-tspi.dts @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/dts-v1/; + +#include +#include +#include +#include +#include +#include +#include "rk3566.dtsi" + +/ { + model = "LCKFB Taishan Pi RK3566"; + compatible = "lckfb,tspi-rk3566", "rockchip,rk3566"; + + aliases { + mmc0 = &sdmmc0; + mmc1 = &sdhci; + mmc2 = &sdmmc1; + }; + + chosen: chosen { + stdout-path = "serial2:1500000n8"; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + ramoops: ramoops@110000 { + compatible = "ramoops"; + reg = <0 0x110000 0 0xf0000>; + console-size = <0x80000>; + ftrace-size = <0x00000>; + pmsg-size = <0x50000>; + record-size = <0x20000>; + }; + }; + + adc_keys: adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + poll-interval = <100>; + + button-recovery { + label = "recovery"; + linux,code = ; + press-threshold-microvolt = <108>; + }; + }; + + hdmi_con: hdmi-con { + compatible = "hdmi-connector"; + type = "d"; + + port { + hdmi_con_in: endpoint { + remote-endpoint = <&hdmi_out_con>; + }; + }; + }; + + leds: leds { + compatible = "gpio-leds"; + + rgb_led_r: rgb-led-r { + color = ; + gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_LOW>; + label = "status-red"; + }; + + rgb_led_g: rgb-led-g { + gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>; + color = ; + label = "status-green"; + }; + + rgb_led_b: rgb-led-b { + gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>; + color = ; + label = "status-blue"; + }; + }; + + multi_leds: multi-led { + compatible = "leds-group-multicolor"; + color = ; + label = "status-rgb"; + function = LED_FUNCTION_INDICATOR; + leds = <&rgb_led_r>, <&rgb_led_g>, <&rgb_led_b>; + }; + + vcc12v0_dcin: regulator-12v0-dcin { + compatible = "regulator-fixed"; + regulator-name = "vcc12v0_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc3v3_sys: regulator-3v3-vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_sys: regulator-5v0-vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v0_dcin>; + }; + + vcc5v0_host: regulator-5v0-vcc-host { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_host_en>; + regulator-name = "vcc5v0_host"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&rk809 1>; + clock-names = "ext_clock"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_enable_h>; + post-power-on-delay-ms = <200>; + reset-gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_LOW>; + }; + + sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "Analog RK809"; + simple-audio-card,mclk-fs = <256>; + + simple-audio-card,cpu { + sound-dai = <&i2s1_8ch>; + }; + + simple-audio-card,codec { + sound-dai = <&rk809>; + }; + }; +}; + +&combphy1 { + status = "okay"; +}; + +&combphy2 { + status = "okay"; +}; + +&cpu0 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu1 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu2 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu3 { + cpu-supply = <&vdd_cpu>; +}; + +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + +&hdmi { + avdd-0v9-supply = <&vdda0v9_image>; + avdd-1v8-supply = <&vcca1v8_image>; + status = "okay"; +}; + +&hdmi_in { + hdmi_in_vp0: endpoint { + remote-endpoint = <&vp0_out_hdmi>; + }; +}; + +&hdmi_out { + hdmi_out_con: endpoint { + remote-endpoint = <&hdmi_con_in>; + }; +}; + +&hdmi_sound { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + vdd_cpu: regulator@1c { + compatible = "tcs,tcs4525"; + reg = <0x1c>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1150000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + rk809: pmic@20 { + compatible = "rockchip,rk809"; + reg = <0x20>; + assigned-clocks = <&cru I2S1_MCLKOUT_TX>; + assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>; + #clock-cells = <1>; + clock-output-names = "rk808-clkout1", "rk808-clkout2"; + clock-names = "mclk"; + clocks = <&cru I2S1_MCLKOUT_TX>; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int>, <&i2s1m0_mclk>; + rockchip,system-power-controller; + #sound-dai-cells = <0>; + wakeup-source; + + vcc1-supply = <&vcc3v3_sys>; + vcc2-supply = <&vcc3v3_sys>; + vcc3-supply = <&vcc3v3_sys>; + vcc4-supply = <&vcc3v3_sys>; + vcc5-supply = <&vcc3v3_sys>; + vcc6-supply = <&vcc3v3_sys>; + vcc7-supply = <&vcc3v3_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc3v3_sys>; + + regulators { + vdd_logic: DCDC_REG1 { + regulator-name = "vdd_logic"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: DCDC_REG2 { + regulator-name = "vdd_gpu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vdd_npu: DCDC_REG4 { + regulator-name = "vdd_npu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_image: LDO_REG1 { + regulator-name = "vdda0v9_image"; + regulator-boot-on; + regulator-always-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda_0v9: LDO_REG2 { + regulator-name = "vdda_0v9"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_pmu: LDO_REG3 { + regulator-name = "vdda0v9_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <900000>; + }; + }; + + vccio_acodec: LDO_REG4 { + regulator-name = "vccio_acodec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd: LDO_REG5 { + regulator-name = "vccio_sd"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_pmu: LDO_REG6 { + regulator-name = "vcc3v3_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcca_1v8: LDO_REG7 { + regulator-name = "vcca_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca1v8_pmu: LDO_REG8 { + regulator-name = "vcca1v8_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcca1v8_image: LDO_REG9 { + regulator-name = "vcca1v8_image"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG5 { + regulator-name = "vcc_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3: SWITCH_REG1 { + regulator-name = "vcc_3v3"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_sd: SWITCH_REG2 { + regulator-name = "vcc3v3_sd"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + + codec { + rockchip,mic-in-differential; + }; + }; +}; + +&i2c1 { + status = "okay"; + /* Touch Screen */ +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4m0_xfer>; + status = "okay"; + /* Camera */ +}; + +&i2s0_8ch { + status = "okay"; + /* HDMI */ +}; + +&i2s1_8ch { + pinctrl-names = "default"; + pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>; + rockchip,trcm-sync-tx-only; + status = "okay"; + /* PMIC */ +}; + +&i2s2_2ch { + rockchip,trcm-sync-tx-only; + status = "okay"; + /* AP6212 Bluetooth */ +}; + +&pinctrl { + bt { + bt_enable_h: bt-enable-h { + rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_host_wake_l: bt-host-wake-l { + rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + bt_wake_l: bt-wake-l { + rockchip,pins = <2 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + hp-detect { + hp_det: hp-det { + rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + pmic { + pmic_int: pmic-int { + rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + sdio-pwrseq { + wifi_enable_h: wifi-enable-h { + rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + wifi_host_wake_h: wifi-host-wake-l { + rockchip,pins = <2 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + usb2 { + vcc5v0_host_en: vcc5v0-host-en { + rockchip,pins = <4 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pmu_io_domains { + pmuio1-supply = <&vcc3v3_pmu>; + pmuio2-supply = <&vcc3v3_pmu>; + vccio1-supply = <&vccio_acodec>; + vccio2-supply = <&vcc_1v8>; + vccio3-supply = <&vccio_sd>; + vccio4-supply = <&vcc_1v8>; + vccio5-supply = <&vcc_3v3>; + vccio6-supply = <&vcc_1v8>; + vccio7-supply = <&vcc_3v3>; + status = "okay"; +}; + +&pmugrf { + reboot-mode { + compatible = "syscon-reboot-mode"; + offset = <0x200>; + mode-normal = ; + mode-loader = ; + mode-recovery = ; + mode-bootloader = ; + }; +}; + +&saradc { + vref-supply = <&vcca_1v8>; + status = "okay"; + /* Channel 0: Recovery Button */ + /* Channel 1: Hardware ID */ +}; + +&sdhci { + bus-width = <8>; + max-frequency = <200000000>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe &emmc_rstnout>; + vmmc-supply = <&vcc_3v3>; + vqmmc-supply = <&vcc_1v8>; + status = "okay"; +}; + +&sdmmc0 { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>; + sd-uhs-sdr104; + vmmc-supply = <&vcc3v3_sd>; + vqmmc-supply = <&vcc_1v8>; + status = "okay"; +}; + +&sdmmc1 { + bus-width = <4>; + cap-sd-highspeed; + cap-sdio-irq; + disable-wp; + keep-power-in-suspend; + max-frequency = <150000000>; + mmc-pwrseq = <&sdio_pwrseq>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>; + sd-uhs-sdr104; + vmmc-supply = <&vcc3v3_sys>; + vqmmc-supply = <&vcc_1v8>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + brcmf: wifi@1 { + compatible = "brcm,bcm4329-fmac"; + reg = <1>; + interrupt-parent = <&gpio2>; + interrupts = ; + interrupt-names = "host-wake"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_host_wake_h>; + }; +}; + +&tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <0>; + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>; + uart-has-rtscts; + status = "okay"; + + bluetooth: bluetooth { + compatible = "brcm,bcm43438-bt"; + clocks = <&rk809 1>; + clock-names = "lpo"; + max-speed = <3000000>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>; + shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>; + vbat-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc_1v8>; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2m0_xfer>; + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_xhci { + dr_mode = "otg"; + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_xhci { + dr_mode = "host"; + status = "okay"; +}; + +&usb2phy0 { + status = "okay"; +}; + +&usb2phy0_host { + phy-supply = <&vcc5v0_sys>; + status = "okay"; +}; + +&usb2phy0_otg { + phy-supply = <&vcc5v0_sys>; + status = "okay"; +}; + +&usb2phy1 { + status = "okay"; +}; + +&usb2phy1_host { + phy-supply = <&vcc5v0_host>; + status = "okay"; +}; + +&usb2phy1_otg { + phy-supply = <&vcc5v0_host>; + status = "okay"; +}; + +&vop { + assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>; + assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>; + status = "okay"; +}; + +&vop_mmu { + status = "okay"; +}; + +&vp0 { + vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 { + reg = ; + remote-endpoint = <&hdmi_in_vp0>; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts b/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts new file mode 100644 index 00000000000000..33bc5249d729b1 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3566-odroid-m1s.dts @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/dts-v1/; + +#include +#include +#include +#include +#include "rk3566.dtsi" + +/ { + model = "Hardkernel ODROID-M1S"; + compatible = "hardkernel,odroid-m1s", "rockchip,rk3566"; + + aliases { + ethernet0 = &gmac1; + mmc0 = &sdhci; + mmc1 = &sdmmc0; + }; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + hdmi-con { + compatible = "hdmi-connector"; + type = "a"; + + port { + hdmi_con_in: endpoint { + remote-endpoint = <&hdmi_out_con>; + }; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pwr_led>, <&sys_led>; + + led_pwr: led-0 { + color = ; + default-state = "on"; + function = LED_FUNCTION_POWER; + gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_LOW>; + linux,default-trigger = "default-on"; + }; + + led_sys: led-1 { + color = ; + default-state = "on"; + function = LED_FUNCTION_HEARTBEAT; + gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; + + vcc3v3_lcd: regulator-3v3-vcc-lcd { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_pwren>; + regulator-name = "vcc3v3_lcd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc3v3_sys>; + }; + + vcc3v3_pcie: regulator-3v3-vcc-pcie { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio2 RK_PC2 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_pwren>; + regulator-name = "vcc3v3_pcie"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc3v3_sys>; + }; + + vcc3v3_sys: regulator-3v3-vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_sys: regulator-5v0-vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + vcc5v0_usb2_host: regulator-5v0-vcc-usb2-host { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb2_host_pwren>; + regulator-name = "vcc5v0_usb2_host"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_usb2_otg: regulator-5v0-vcc-usb2-otg { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb2_otg_pwren>; + regulator-name = "vcc5v0_usb2_otg"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_usb3_host: regulator-5v0-vcc-usb3-host { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_host_pwren>; + regulator-name = "vcc5v0_usb3_host"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "Analog RK809"; + simple-audio-card,mclk-fs = <256>; + + simple-audio-card,cpu { + sound-dai = <&i2s1_8ch>; + }; + + simple-audio-card,codec { + sound-dai = <&rk809>; + }; + }; +}; + +&combphy1 { + status = "okay"; +}; + +&combphy2 { + status = "okay"; +}; + +&cpu0 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu1 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu2 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu3 { + cpu-supply = <&vdd_cpu>; +}; + +&gmac1 { + assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>; + assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>; + clock_in_out = "input"; + phy-handle = <&rgmii_phy1>; + phy-mode = "rgmii-id"; + phy-supply = <&vcc_3v3>; + pinctrl-names = "default"; + pinctrl-0 = <&gmac1m1_miim + &gmac1m1_tx_bus2 + &gmac1m1_rx_bus2 + &gmac1m1_rgmii_clk + &gmac1m1_rgmii_bus + &gmac1m1_clkinout>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + +&hdmi { + avdd-0v9-supply = <&vdda0v9_image>; + avdd-1v8-supply = <&vcca1v8_image>; + status = "okay"; +}; + +&hdmi_in { + hdmi_in_vp0: endpoint { + remote-endpoint = <&vp0_out_hdmi>; + }; +}; + +&hdmi_out { + hdmi_out_con: endpoint { + remote-endpoint = <&hdmi_con_in>; + }; +}; + +&hdmi_sound { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + vdd_cpu: regulator@1c { + compatible = "tcs,tcs4525"; + reg = <0x1c>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1390000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc3v3_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + rk809: pmic@20 { + compatible = "rockchip,rk809"; + reg = <0x20>; + assigned-clocks = <&cru I2S1_MCLKOUT_TX>; + assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>; + #clock-cells = <1>; + clocks = <&cru I2S1_MCLKOUT_TX>; + clock-names = "mclk"; + clock-output-names = "rk809-clkout1", "rk809-clkout2"; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int>, <&i2s1m0_mclk>; + #sound-dai-cells = <0>; + system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc3v3_sys>; + vcc2-supply = <&vcc3v3_sys>; + vcc3-supply = <&vcc3v3_sys>; + vcc4-supply = <&vcc3v3_sys>; + vcc5-supply = <&vcc3v3_sys>; + vcc6-supply = <&vcc3v3_sys>; + vcc7-supply = <&vcc3v3_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc3v3_sys>; + + regulators { + vdd_logic: DCDC_REG1 { + regulator-name = "vdd_logic"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: DCDC_REG2 { + regulator-name = "vdd_gpu"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vdd_npu: DCDC_REG4 { + regulator-name = "vdd_npu"; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG5 { + regulator-name = "vcc_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_image: LDO_REG1 { + regulator-name = "vdda0v9_image"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda_0v9: LDO_REG2 { + regulator-name = "vdda_0v9"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_pmu: LDO_REG3 { + regulator-name = "vdda0v9_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <900000>; + }; + }; + + vccio_acodec: LDO_REG4 { + regulator-name = "vccio_acodec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd: LDO_REG5 { + regulator-name = "vccio_sd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_pmu: LDO_REG6 { + regulator-name = "vcc3v3_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcca_1v8: LDO_REG7 { + regulator-name = "vcca_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca1v8_pmu: LDO_REG8 { + regulator-name = "vcca1v8_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcca1v8_image: LDO_REG9 { + regulator-name = "vcca1v8_image"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3: SWITCH_REG1 { + regulator-name = "vcc_3v3"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_sd: SWITCH_REG2 { + regulator-name = "vcc3v3_sd"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; +}; + +&i2s0_8ch { + status = "okay"; +}; + +&i2s1_8ch { + pinctrl-names = "default"; + pinctrl-0 = <&i2s1m0_sclktx + &i2s1m0_lrcktx + &i2s1m0_sdi0 + &i2s1m0_sdo0>; + rockchip,trcm-sync-tx-only; + status = "okay"; +}; + +&mdio1 { + rgmii_phy1: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + reset-assert-us = <20000>; + reset-deassert-us = <100000>; + reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>; + }; +}; + +&pcie2x1 { + pinctrl-names = "default"; + pinctrl-0 = <&pcie20_pins>; + reset-gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie>; + status = "okay"; +}; + +&pinctrl { + lcd { + lcd_pwren: lcd-pwren { + rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + leds { + pwr_led: pwr-led { + rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + sys_led: sys-led { + rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pcie { + pcie20_pins: pcie20-pins { + rockchip,pins = + <1 RK_PB0 4 &pcfg_pull_none>, + <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>, + <1 RK_PB1 4 &pcfg_pull_none>; + }; + + pcie_pwren: pcie-pwren { + rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pmic { + pmic_int: pmic-int { + rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + usb { + usb2_host_pwren: usb2-host-pwren { + rockchip,pins = <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usb2_otg_pwren: usb2-otg-pwren { + rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usb3_host_pwren: usb3-host-pwren { + rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pmu_io_domains { + pmuio1-supply = <&vcc3v3_pmu>; + pmuio2-supply = <&vcc3v3_pmu>; + vccio1-supply = <&vccio_acodec>; + vccio2-supply = <&vcc_1v8>; + vccio3-supply = <&vccio_sd>; + vccio4-supply = <&vcc_3v3>; + vccio5-supply = <&vcc_3v3>; + vccio6-supply = <&vcc_3v3>; + vccio7-supply = <&vcc_3v3>; + status = "okay"; +}; + +&saradc { + vref-supply = <&vcca_1v8>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + cap-mmc-highspeed; + max-frequency = <200000000>; + mmc-hs200-1_8v; + no-sd; + no-sdio; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe &emmc_rstnout>; + vmmc-supply = <&vcc_3v3>; + vqmmc-supply = <&vcc_1v8>; + status = "okay"; +}; + +&sdmmc0 { + bus-width = <4>; + cap-sd-highspeed; + disable-wp; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>; + sd-uhs-sdr50; + vmmc-supply = <&vcc3v3_sd>; + vqmmc-supply = <&vccio_sd>; + status = "okay"; +}; + +&tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <0>; + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host0_xhci { + status = "okay"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&usb_host1_xhci { + status = "okay"; +}; + +&usb2phy0 { + status = "okay"; +}; + +&usb2phy0_host { + phy-supply = <&vcc5v0_usb3_host>; + status = "okay"; +}; + +&usb2phy0_otg { + phy-supply = <&vcc5v0_usb2_otg>; + status = "okay"; +}; + +&usb2phy1 { + status = "okay"; +}; + +&usb2phy1_host { + phy-supply = <&vcc5v0_usb2_host>; + status = "okay"; +}; + +&usb2phy1_otg { + phy-supply = <&vcc5v0_usb2_host>; + status = "okay"; +}; + +&vop { + assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>; + assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>; + status = "okay"; +}; + +&vop_mmu { + status = "okay"; +}; + +&vp0 { + vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 { + reg = ; + remote-endpoint = <&hdmi_in_vp0>; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts index 13e599a85eb83c..c164074ddf54c9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts @@ -648,6 +648,8 @@ flash@0 { }; &tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <0>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi index 9cc7aa3298d00d..de390d92c35e30 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-zero-3.dtsi @@ -493,7 +493,6 @@ &uart2 { }; &usb_host0_xhci { - dr_mode = "peripheral"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts index b5e67990dd0f8b..8e5c182ef76cda 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts @@ -20,9 +20,9 @@ &sdmmc0 { cap-mmc-highspeed; cap-sd-highspeed; disable-wp; - max-frequency = <150000000>; no-sdio; no-mmc; + sd-uhs-sdr50; pinctrl-names = "default"; pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>; vmmc-supply = <&vcc3v3_sd>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts index ce2a5e1ccefc3f..d27eb37b5b35e8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts @@ -39,12 +39,6 @@ &gmac0_tx_bus2 &gmac0_rx_bus2 &gmac0_rgmii_clk &gmac0_rgmii_bus>; - snps,reset-gpio = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>; - snps,reset-active-low; - /* Reset time is 15ms, 50ms for rtl8211f */ - snps,reset-delays-us = <0 15000 50000>; - tx_delay = <0x3c>; - rx_delay = <0x2f>; status = "okay"; }; @@ -61,12 +55,6 @@ &gmac1m1_tx_bus2 &gmac1m1_rx_bus2 &gmac1m1_rgmii_clk &gmac1m1_rgmii_bus>; - snps,reset-gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>; - snps,reset-active-low; - /* Reset time is 15ms, 50ms for rtl8211f */ - snps,reset-delays-us = <0 15000 50000>; - tx_delay = <0x4f>; - rx_delay = <0x26>; status = "okay"; }; @@ -76,6 +64,9 @@ rgmii_phy0: ethernet-phy@1 { reg = <0x1>; pinctrl-0 = <ð_phy0_reset_pin>; pinctrl-names = "default"; + reset-assert-us = <20000>; + reset-deassert-us = <100000>; + reset-gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>; }; }; @@ -85,6 +76,9 @@ rgmii_phy1: ethernet-phy@1 { reg = <0x1>; pinctrl-0 = <ð_phy1_reset_pin>; pinctrl-names = "default"; + reset-assert-us = <20000>; + reset-deassert-us = <100000>; + reset-gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts index c2dfffc638d13d..c491dc4d4947dc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-mecsbc.dts @@ -89,6 +89,20 @@ vdd_npu: regulator-vdd-npu { }; }; +&can0 { + compatible = "rockchip,rk3568v3-canfd", "rockchip,rk3568v2-canfd"; + pinctrl-names = "default"; + pinctrl-0 = <&can0m0_pins>; + status = "okay"; +}; + +&can1 { + compatible = "rockchip,rk3568v3-canfd", "rockchip,rk3568v2-canfd"; + pinctrl-names = "default"; + pinctrl-0 = <&can1m1_pins>; + status = "okay"; +}; + &combphy0 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts index a337f547caf538..6a02db4f073f29 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts @@ -13,7 +13,7 @@ / { model = "Hardkernel ODROID-M1"; - compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568"; + compatible = "hardkernel,odroid-m1", "rockchip,rk3568"; aliases { ethernet0 = &gmac0; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts index 6a998166003c9e..e601d9271ba898 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-qnap-ts433.dts @@ -6,12 +6,190 @@ /dts-v1/; +#include +#include #include #include "rk3568.dtsi" / { model = "Qnap TS-433-4G NAS System 4-Bay"; compatible = "qnap,ts433", "rockchip,rk3568"; + + aliases { + ethernet0 = &gmac0; + mmc0 = &sdhci; + rtc0 = &rtc_rv8263; + }; + + chosen { + stdout-path = "serial2:115200n8"; + }; + + keys { + compatible = "gpio-keys"; + pinctrl-0 = <©_button_pin>, <&reset_button_pin>; + pinctrl-names = "default"; + + key-copy { + label = "copy"; + gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + key-reset { + label = "reset"; + gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + leds { + compatible = "gpio-leds"; + + led-0 { + color = ; + function = LED_FUNCTION_DISK; + gpios = <&gpio1 RK_PD5 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; + pinctrl-names = "default"; + pinctrl-0 = <&hdd1_led_pin>; + }; + + led-1 { + color = ; + function = LED_FUNCTION_DISK; + gpios = <&gpio1 RK_PD6 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; + pinctrl-names = "default"; + pinctrl-0 = <&hdd2_led_pin>; + }; + + led-2 { + color = ; + function = LED_FUNCTION_DISK; + gpios = <&gpio1 RK_PD7 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; + pinctrl-names = "default"; + pinctrl-0 = <&hdd3_led_pin>; + }; + + led-3 { + color = ; + function = LED_FUNCTION_DISK; + gpios = <&gpio2 RK_PA0 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; + pinctrl-names = "default"; + pinctrl-0 = <&hdd4_led_pin>; + }; + }; + + dc_12v: regulator-dc-12v { + compatible = "regulator-fixed"; + regulator-name = "dc_12v"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc3v3_pcie: regulator-vcc3v3-pcie { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_pcie"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + enable-active-high; + gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>; + vin-supply = <&dc_12v>; + }; + + vcc3v3_sys: regulator-vcc3v3-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&dc_12v>; + }; + + vcc5v0_host: regulator-vcc5v0-host { + compatible = "regulator-fixed"; + enable-active-high; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_host_en>; + gpio = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>; + regulator-name = "vcc5v0_host"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_usb>; + }; + + vcc5v0_otg: regulator-vcc5v0-otg { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_otg_en>; + regulator-name = "vcc5v0_otg"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_usb>; + }; + + vcc5v0_sys: regulator-vcc5v0-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; + + vcc5v0_usb: regulator-vcc5v0-usb { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_usb"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&dc_12v>; + }; +}; + +/* connected to usb_host0_xhci */ +&combphy0 { + status = "okay"; +}; + +/* connected to sata1 */ +&combphy1 { + status = "okay"; +}; + +/* connected to sata2 */ +&combphy2 { + status = "okay"; +}; + +&cpu0 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu1 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu2 { + cpu-supply = <&vdd_cpu>; +}; + +&cpu3 { + cpu-supply = <&vdd_cpu>; }; &gmac0 { @@ -20,35 +198,282 @@ &gmac0 { assigned-clock-rates = <0>, <125000000>; clock_in_out = "output"; phy-handle = <&rgmii_phy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; pinctrl-names = "default"; pinctrl-0 = <&gmac0_miim &gmac0_tx_bus2 &gmac0_rx_bus2 &gmac0_rgmii_clk &gmac0_rgmii_bus>; - rx_delay = <0x2f>; - tx_delay = <0x3c>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_gpu>; status = "okay"; }; &i2c0 { + status = "okay"; + pmic@20 { compatible = "rockchip,rk809"; reg = <0x20>; interrupt-parent = <&gpio0>; - interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + interrupts = ; + #clock-cells = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + system-power-controller; + vcc1-supply = <&vcc3v3_sys>; + vcc2-supply = <&vcc3v3_sys>; + vcc3-supply = <&vcc3v3_sys>; + vcc4-supply = <&vcc3v3_sys>; + vcc5-supply = <&vcc3v3_sys>; + vcc6-supply = <&vcc3v3_sys>; + vcc7-supply = <&vcc3v3_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc3v3_sys>; + wakeup-source; + + regulators { + vdd_logic: DCDC_REG1 { + regulator-name = "vdd_logic"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: DCDC_REG2 { + regulator-name = "vdd_gpu"; + regulator-always-on; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-initial-mode = <0x2>; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vdd_npu: DCDC_REG4 { + regulator-name = "vdd_npu"; + regulator-initial-mode = <0x2>; + regulator-min-microvolt = <500000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG5 { + regulator-name = "vcc_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_image: LDO_REG1 { + regulator-name = "vdda0v9_image"; + regulator-always-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda_0v9: LDO_REG2 { + regulator-name = "vdda_0v9"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda0v9_pmu: LDO_REG3 { + regulator-name = "vdda0v9_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <900000>; + }; + }; + + vccio_acodec: LDO_REG4 { + regulator-name = "vccio_acodec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd: LDO_REG5 { + regulator-name = "vccio_sd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_pmu: LDO_REG6 { + regulator-name = "vcc3v3_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vcca_1v8: LDO_REG7 { + regulator-name = "vcca_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca1v8_pmu: LDO_REG8 { + regulator-name = "vcca1v8_pmu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcca1v8_image: LDO_REG9 { + regulator-name = "vcca1v8_image"; + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3: SWITCH_REG1 { + regulator-name = "vcc_3v3"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_sd: SWITCH_REG2 { + regulator-name = "vcc3v3_sd"; + /* + * turning this off, breaks access to both + * PCIe controllers, refclk generator perhaps + */ + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; + + vdd_cpu: regulator@40 { + compatible = "silergy,syr827"; + reg = <0x40>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1390000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc5v0_sys>; }; }; &i2c1 { status = "okay"; - rtc@51 { + rtc_rv8263: rtc@51 { compatible = "microcrystal,rv8263"; reg = <0x51>; wakeup-source; }; + + /* eeprom for vital-product-data on the mainboard */ + eeprom@54 { + compatible = "giantec,gt24c04a", "atmel,24c04"; + reg = <0x54>; + label = "VPD_MB"; + num-addresses = <2>; + pagesize = <16>; + read-only; + }; + + /* eeprom for vital-product-data on the backplane */ + eeprom@56 { + compatible = "giantec,gt24c04a", "atmel,24c04"; + reg = <0x56>; + label = "VPD_BP"; + num-addresses = <2>; + pagesize = <16>; + read-only; + }; }; &mdio0 { @@ -59,12 +484,82 @@ rgmii_phy0: ethernet-phy@0 { }; &pcie30phy { + data-lanes = <1 2>; status = "okay"; }; +/* Connected to a JMicron AHCI SATA controller */ &pcie3x1 { - /* The downstream dts has: rockchip,bifurcation, XXX: find out what this is about */ reset-gpios = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie>; + status = "okay"; +}; + +/* Connected to the 2.5G NIC for the upper network jack */ +&pcie3x2 { + num-lanes = <1>; + reset-gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie>; + status = "okay"; +}; + +&pinctrl { + keys { + copy_button_pin: copy-button-pin { + rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + reset_button_pin: reset-button-pin { + rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + leds { + hdd1_led_pin: hdd1-led-pin { + rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + hdd2_led_pin: hdd2-led-pin { + rockchip,pins = <1 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + hdd3_led_pin: hdd3-led-pin { + rockchip,pins = <1 RK_PD7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + hdd4_led_pin: hdd4_led-pin { + rockchip,pins = <2 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + usb { + vcc5v0_host_en: vcc5v0-host-en { + rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + vcc5v0_otg_en: vcc5v0-otg-en { + rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pmu_io_domains { + vccio4-supply = <&vcc_1v8>; + vccio6-supply = <&vcc_1v8>; + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&sata2 { status = "okay"; }; @@ -75,6 +570,20 @@ &sdhci { status = "okay"; }; +&tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <0>; + status = "okay"; +}; + +/* + * Connected to an MCU, that provides access to more LEDs, + * buzzer, fan control and more. + */ +&uart0 { + status = "okay"; +}; + /* * Pins available on CN3 connector at TTL voltage level (3V3). * ,_ _. @@ -84,3 +593,53 @@ &sdhci { &uart2 { status = "okay"; }; + +&usb2phy0 { + status = "okay"; +}; + +/* connected to usb_host0_xhci */ +&usb2phy0_otg { + phy-supply = <&vcc5v0_otg>; + status = "okay"; +}; + +&usb2phy1 { + status = "okay"; +}; + +/* connected to usb_host1_ehci/ohci */ +&usb2phy1_host { + phy-supply = <&vcc5v0_host>; + status = "okay"; +}; + +/* connected to usb_host0_ehci/ohci */ +&usb2phy1_otg { + phy-supply = <&vcc5v0_host>; + status = "okay"; +}; + +/* right port backside */ +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +/* front port */ +&usb_host0_xhci { + dr_mode = "host"; + status = "okay"; +}; + +/* left port backside */ +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi index 45b03dcbbad455..19d309654bdb41 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi @@ -108,10 +108,6 @@ &cpu3 { cpu-supply = <&vdd_cpu>; }; -&display_subsystem { - status = "disabled"; -}; - &gpu { mali-supply = <&vdd_gpu>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts index 72ad74c38a2b40..84a0789fad96ad 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-e25.dts @@ -103,6 +103,10 @@ &combphy1 { phy-supply = <&vcc3v3_pcie30x1>; }; +&display_subsystem { + status = "disabled"; +}; + &pcie2x1 { pinctrl-names = "default"; pinctrl-0 = <&pcie20_reset_h>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso new file mode 100644 index 00000000000000..70c23e1bf14b70 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display-vz.dtso @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +/* + * Device tree overlay for the WolfVision PF5 Visualizer display. + * + * Copyright (C) 2024 WolfVision GmbH. + */ + +/dts-v1/; +/plugin/; + +#include "rk3568-wolfvision-pf5-display.dtsi" + +&st7789 { + compatible = "jasonic,jt240mhqs-hwt-ek-e3", + "sitronix,st7789v"; + rotation = <270>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi new file mode 100644 index 00000000000000..b22bb543ecbba6 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5-display.dtsi @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +/* + * Device tree overlay base for the WolfVision PF5 displays. + * + * Copyright (C) 2024 WolfVision GmbH. + */ + +/dts-v1/; +/plugin/; + +#include +#include +#include +#include +#include + +&{/} { + display_backlight: backlight { + compatible = "pwm-backlight"; + brightness-levels = <0 255>; + default-brightness-level = <255>; + num-interpolated-steps = <255>; + power-supply = <&vcc3v3_sd>; + pwms = <&pwm10 0 1000000 0>; + }; + + display_spi: spi { + compatible = "spi-gpio"; + #address-cells = <1>; + #size-cells = <0>; + cs-gpios = <&gpio3 RK_PA2 GPIO_ACTIVE_LOW>; + miso-gpios = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio3 RK_PB2 GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_spi>; + sck-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>; + + st7789: panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + assigned-clocks = <&cru PLL_VPLL>; + assigned-clock-rates = <700000000>; + backlight = <&display_backlight>; + pinctrl-names = "default"; + pinctrl-0 = <&lcdc_clock &lcdc_data18 &lcd_rstn>; + power-supply = <&vcc3v3_sw>; + reset-gpios = <&gpio3 RK_PC4 GPIO_ACTIVE_LOW>; + spi-max-frequency = <100000>; + + port { + panel_in_vp2: endpoint { + remote-endpoint = <&vp2_out_rgb>; + }; + }; + }; + }; +}; + +&i2c1 { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + st1624: touchscreen@55 { + compatible = "sitronix,st1624", "sitronix,st1633"; + reg = <0x55>; + interrupt-parent = <&gpio0>; + interrupts = ; + gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&touch_int &touch_rstn>; + wakeup-source; + }; +}; + +&pinctrl { + display: display-pinctrl { + lcd_rstn: lcd-rstn-pinctrl { + rockchip,pins = <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + lcd_spi: lcd-spi-pinctrl { + rockchip,pins = + /* lcd_sdo */ + <3 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>, + /* lcd_csn */ + <3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>, + /* lcd_scl */ + <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>, + /* lcd_sdi */ + <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + touchscreen: touchscreen-pinctrl { + touch_int: touch-int-pinctrl { + rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + touch_rstn: touch-rstn-pinctrl { + rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pwm10 { + pinctrl-names = "default"; + pinctrl-0 = <&pwm10m1_pins>; + status = "okay"; +}; + +&vp2 { + #address-cells = <1>; + #size-cells = <0>; + + vp2_out_rgb: endpoint@ROCKCHIP_VOP2_EP_RGB0 { + reg = ; + remote-endpoint = <&panel_in_vp2>; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi index f1be76a54ceb0c..0946310e8c1248 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi @@ -213,6 +213,45 @@ gmac0_mtl_tx_setup: tx-queues-config { }; }; + can0: can@fe570000 { + compatible = "rockchip,rk3568v2-canfd"; + reg = <0x0 0xfe570000 0x0 0x1000>; + interrupts = ; + clocks = <&cru CLK_CAN0>, <&cru PCLK_CAN0>; + clock-names = "baud", "pclk"; + resets = <&cru SRST_CAN0>, <&cru SRST_P_CAN0>; + reset-names = "core", "apb"; + pinctrl-names = "default"; + pinctrl-0 = <&can0m0_pins>; + status = "disabled"; + }; + + can1: can@fe580000 { + compatible = "rockchip,rk3568v2-canfd"; + reg = <0x0 0xfe580000 0x0 0x1000>; + interrupts = ; + clocks = <&cru CLK_CAN1>, <&cru PCLK_CAN1>; + clock-names = "baud", "pclk"; + resets = <&cru SRST_CAN1>, <&cru SRST_P_CAN1>; + reset-names = "core", "apb"; + pinctrl-names = "default"; + pinctrl-0 = <&can1m0_pins>; + status = "disabled"; + }; + + can2: can@fe590000 { + compatible = "rockchip,rk3568v2-canfd"; + reg = <0x0 0xfe590000 0x0 0x1000>; + interrupts = ; + clocks = <&cru CLK_CAN2>, <&cru PCLK_CAN2>; + clock-names = "baud", "pclk"; + resets = <&cru SRST_CAN2>, <&cru SRST_P_CAN2>; + reset-names = "core", "apb"; + pinctrl-names = "default"; + pinctrl-0 = <&can2m0_pins>; + status = "disabled"; + }; + combphy0: phy@fe820000 { compatible = "rockchip,rk3568-naneng-combphy"; reg = <0x0 0xfe820000 0x0 0x100>; @@ -257,6 +296,10 @@ power-domain@RK3568_PD_PIPE { }; }; +&rng { + status = "okay"; +}; + &usb_host0_xhci { phys = <&usb2phy0_otg>, <&combphy0 PHY_TYPE_USB3>; phy-names = "usb2-phy", "usb3-phy"; diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi index c72b3a608edd98..0ee0ada6f0ab0f 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi @@ -1113,6 +1113,15 @@ sdhci: mmc@fe310000 { status = "disabled"; }; + rng: rng@fe388000 { + compatible = "rockchip,rk3568-rng"; + reg = <0x0 0xfe388000 0x0 0x4000>; + clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>; + clock-names = "core", "ahb"; + resets = <&cru SRST_TRNG_NS>; + status = "disabled"; + }; + i2s0_8ch: i2s@fe400000 { compatible = "rockchip,rk3568-i2s-tdm"; reg = <0x0 0xfe400000 0x0 0x1000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi index 30db12c4fc82b5..d1368418502a5d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base-pinctrl.dtsi @@ -2449,15 +2449,15 @@ sdiom1_pins: sdiom1-pins { /* sdio_clk_m1 */ <3 RK_PA5 2 &pcfg_pull_none>, /* sdio_cmd_m1 */ - <3 RK_PA4 2 &pcfg_pull_none>, + <3 RK_PA4 2 &pcfg_pull_up>, /* sdio_d0_m1 */ - <3 RK_PA0 2 &pcfg_pull_none>, + <3 RK_PA0 2 &pcfg_pull_up>, /* sdio_d1_m1 */ - <3 RK_PA1 2 &pcfg_pull_none>, + <3 RK_PA1 2 &pcfg_pull_up>, /* sdio_d2_m1 */ - <3 RK_PA2 2 &pcfg_pull_none>, + <3 RK_PA2 2 &pcfg_pull_up>, /* sdio_d3_m1 */ - <3 RK_PA3 2 &pcfg_pull_none>; + <3 RK_PA3 2 &pcfg_pull_up>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi index ee99166ebd46f9..d97d84b888375c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi @@ -1122,6 +1122,118 @@ power-domain@RK3588_PD_SDMMC { }; }; + vpu121: video-codec@fdb50000 { + compatible = "rockchip,rk3588-vpu121", "rockchip,rk3568-vpu"; + reg = <0x0 0xfdb50000 0x0 0x800>; + interrupts = ; + interrupt-names = "vdpu"; + clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>; + clock-names = "aclk", "hclk"; + iommus = <&vpu121_mmu>; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vpu121_mmu: iommu@fdb50800 { + compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu"; + reg = <0x0 0xfdb50800 0x0 0x40>; + interrupts = ; + clock-names = "aclk", "iface"; + clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>; + power-domains = <&power RK3588_PD_VDPU>; + #iommu-cells = <0>; + }; + + rga: rga@fdb80000 { + compatible = "rockchip,rk3588-rga", "rockchip,rk3288-rga"; + reg = <0x0 0xfdb80000 0x0 0x180>; + interrupts = ; + clocks = <&cru ACLK_RGA2>, <&cru HCLK_RGA2>, <&cru CLK_RGA2_CORE>; + clock-names = "aclk", "hclk", "sclk"; + resets = <&cru SRST_RGA2_CORE>, <&cru SRST_A_RGA2>, <&cru SRST_H_RGA2>; + reset-names = "core", "axi", "ahb"; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vepu121_0: video-codec@fdba0000 { + compatible = "rockchip,rk3588-vepu121"; + reg = <0x0 0xfdba0000 0x0 0x800>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER0>, <&cru HCLK_JPEG_ENCODER0>; + clock-names = "aclk", "hclk"; + iommus = <&vepu121_0_mmu>; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vepu121_0_mmu: iommu@fdba0800 { + compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu"; + reg = <0x0 0xfdba0800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER0>, <&cru HCLK_JPEG_ENCODER0>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3588_PD_VDPU>; + #iommu-cells = <0>; + }; + + vepu121_1: video-codec@fdba4000 { + compatible = "rockchip,rk3588-vepu121"; + reg = <0x0 0xfdba4000 0x0 0x800>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER1>, <&cru HCLK_JPEG_ENCODER1>; + clock-names = "aclk", "hclk"; + iommus = <&vepu121_1_mmu>; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vepu121_1_mmu: iommu@fdba4800 { + compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu"; + reg = <0x0 0xfdba4800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER1>, <&cru HCLK_JPEG_ENCODER1>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3588_PD_VDPU>; + #iommu-cells = <0>; + }; + + vepu121_2: video-codec@fdba8000 { + compatible = "rockchip,rk3588-vepu121"; + reg = <0x0 0xfdba8000 0x0 0x800>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER2>, <&cru HCLK_JPEG_ENCODER2>; + clock-names = "aclk", "hclk"; + iommus = <&vepu121_2_mmu>; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vepu121_2_mmu: iommu@fdba8800 { + compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu"; + reg = <0x0 0xfdba8800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER2>, <&cru HCLK_JPEG_ENCODER2>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3588_PD_VDPU>; + #iommu-cells = <0>; + }; + + vepu121_3: video-codec@fdbac000 { + compatible = "rockchip,rk3588-vepu121"; + reg = <0x0 0xfdbac000 0x0 0x800>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER3>, <&cru HCLK_JPEG_ENCODER3>; + clock-names = "aclk", "hclk"; + iommus = <&vepu121_3_mmu>; + power-domains = <&power RK3588_PD_VDPU>; + }; + + vepu121_3_mmu: iommu@fdbac800 { + compatible = "rockchip,rk3588-iommu", "rockchip,rk3568-iommu"; + reg = <0x0 0xfdbac800 0x0 0x40>; + interrupts = ; + clocks = <&cru ACLK_JPEG_ENCODER3>, <&cru HCLK_JPEG_ENCODER3>; + clock-names = "aclk", "iface"; + power-domains = <&power RK3588_PD_VDPU>; + #iommu-cells = <0>; + }; + av1d: video-codec@fdc70000 { compatible = "rockchip,rk3588-av1-vpu"; reg = <0x0 0xfdc70000 0x0 0x800>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts new file mode 100644 index 00000000000000..6418286efe40d3 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2024 Rockchip Electronics Co., Ltd. + * + */ + +/dts-v1/; + +#include +#include "rk3588-coolpi-cm5.dtsi" + +/ { + model = "CoolPi CM5 GenBook"; + compatible = "coolpi,pi-cm5-genbook", "coolpi,pi-cm5", "rockchip,rk3588"; + + backlight: backlight { + compatible = "pwm-backlight"; + enable-gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&bl_en>; + power-supply = <&vcc12v_dcin>; + pwms = <&pwm6 0 25000 0>; + }; + + battery: battery { + compatible = "simple-battery"; + charge-full-design-microamp-hours = <9800000>; + voltage-max-design-microvolt = <4350000>; + voltage-min-design-microvolt = <3000000>; + }; + + charger: dc-charger { + compatible = "gpio-charger"; + charger-type = "mains"; + gpios = <&gpio1 RK_PC0 GPIO_ACTIVE_LOW>; + }; + + leds: leds { + compatible = "gpio-leds"; + + heartbeat_led: led-0 { + color = ; + function = LED_FUNCTION_STATUS; + gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + + wlan_led: led-1 { + color = ; + function = LED_FUNCTION_WLAN; + gpios = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>; + }; + + charging_red: led-2 { + function = LED_FUNCTION_CHARGING; + color = ; + gpios = <&gpio3 RK_PD2 GPIO_ACTIVE_HIGH>; + }; + }; + + vcc12v_dcin: vcc12v-dcin-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc12v_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc_sys: vcc-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <7000000>; + regulator-max-microvolt = <7000000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc5v0_sys: vcc5v0-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <7000000>; + regulator-max-microvolt = <7000000>; + vin-supply = <&vcc_sys>; + }; + + vcc3v3_sys: vcc3v3-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_lcd: vcc3v3-lcd-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_lcd"; + enable-active-high; + gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&lcdpwr_en>; + vin-supply = <&vcc3v3_sys>; + }; + + vcc5v0_usb: vcc5v0-usb-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_usb"; + regulator-boot-on; + regulator-always-on; + enable-active-high; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio1 RK_PD5 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb_pwren>; + vin-supply = <&vcc_sys>; + }; + + vcc5v0_usb_host0: vcc5v0_usb30_host: vcc5v0-usb-host-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_host"; + regulator-boot-on; + regulator-always-on; + enable-active-high; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb_host_pwren>; + vin-supply = <&vcc5v0_usb>; + }; +}; + +&i2c4 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c4m3_xfer>; + + cw2015@62 { + compatible = "cellwise,cw2015"; + reg = <0x62>; + + cellwise,battery-profile = /bits/ 8 < + 0x17 0x67 0x69 0x63 0x63 0x62 0x62 0x5F + 0x52 0x73 0x4C 0x5A 0x5B 0x4B 0x42 0x3A + 0x33 0x2D 0x29 0x28 0x2E 0x31 0x3C 0x49 + 0x2C 0x2C 0x0C 0xCD 0x30 0x51 0x50 0x66 + 0x74 0x74 0x75 0x78 0x41 0x1B 0x84 0x5F + 0x0B 0x34 0x1C 0x45 0x89 0x92 0xA0 0x13 + 0x2C 0x55 0xAB 0xCB 0x80 0x5E 0x7B 0xCB + 0x2F 0x00 0x64 0xA5 0xB5 0x10 0x18 0x21 + >; + + cellwise,monitor-interval-ms = <3000>; + monitored-battery = <&battery>; + power-supplies = <&charger>; + }; +}; + +&i2c5 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c5m3_xfer>; + + touchpad: touchpad@2c { + compatible = "hid-over-i2c"; + reg = <0x2c>; + interrupt-parent = <&gpio1>; + interrupts = ; + hid-descr-addr = <0x0020>; + }; +}; + +&gmac0 { + status = "disabled"; +}; + +/* M.2 E-Key */ +&pcie2x1l0 { + reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_sys>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreq &pcie_wake &pcie_rst &wifi_pwron &bt_pwron>; + status = "okay"; +}; + +&pcie2x1l2 { + status = "disabled"; +}; + +&pcie30phy { + status = "okay"; +}; + +/* M.2 M-Key ssd */ +&pcie3x4 { + reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_sys>; + status = "okay"; +}; + +&pinctrl { + lcd { + lcdpwr_en: lcdpwr-en { + rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + bl_en: bl-en { + rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + usb { + usb_pwren: usb-pwren { + rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + usb_otg_pwren: usb-otg-pwren { + rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + usb_host_pwren: usb-host-pwren { + rockchip,pins = <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + wifi { + bt_pwron: bt-pwron { + rockchip,pins = <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + pcie_clkreq: pcie-clkreq { + rockchip,pins = <4 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + pcie_rst: pcie-rst { + rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + wifi_pwron: wifi-pwron { + rockchip,pins = <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + pcie_wake: pcie-wake { + rockchip,pins = <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&pwm6 { + pinctrl-0 = <&pwm6m1_pins>; + status = "okay"; +}; + +&sdmmc { + status = "disabled"; +}; + +&sfc { + pinctrl-names = "default"; + pinctrl-0 = <&fspim2_pins>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0x0>; + spi-max-frequency = <100000000>; + spi-rx-bus-width = <4>; + spi-tx-bus-width = <1>; + }; +}; + +&u2phy0 { + status = "okay"; +}; + +&u2phy0_otg { + status = "okay"; +}; + +&usbdp_phy0 { + status = "okay"; +}; + +&u2phy1 { + status = "okay"; +}; + +&u2phy1_otg { + status = "okay"; +}; + +&u2phy2 { + status = "okay"; +}; + +&u2phy3 { + status = "okay"; +}; + +&u2phy2_host { + phy-supply = <&vcc5v0_usb_host0>; + status = "okay"; +}; + +&u2phy3_host { + phy-supply = <&vcc5v0_usb>; + status = "okay"; +}; + +&usbdp_phy1 { + status = "okay"; +}; + +/* For Keypad */ +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +/* Type C port */ +&usb_host0_xhci { + dr_mode = "peripheral"; + maximum-speed = "high-speed"; + status = "okay"; +}; + +/* connected to a HUB for camera and BT */ +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +/* USB A out */ +&usb_host1_xhci { + dr_mode = "host"; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts new file mode 100644 index 00000000000000..2d92bbb4027d3a --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6-lts.dts @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2021 Rockchip Electronics Co., Ltd. + * Copyright (c) 2023 Thomas McKahan + * Copyright (c) 2024 Linaro Ltd. + * + */ + +/dts-v1/; + +#include "rk3588-nanopc-t6.dtsi" + +/ { + model = "FriendlyElec NanoPC-T6 LTS"; + compatible = "friendlyarm,nanopc-t6-lts", "rockchip,rk3588"; + + /* provide power for on-board USB 2.0 hub */ + vcc5v0_usb20_host: vcc5v0-usb20-host-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio1 RK_PA4 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&usb20_host_pwren>; + pinctrl-names = "default"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <5000000>; + regulator-min-microvolt = <5000000>; + regulator-name = "vcc5v0_usb20_host"; + vin-supply = <&vcc5v0_sys>; + }; +}; + +&pinctrl { + usb { + usb20_host_pwren: usb20-host-pwren { + rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&u2phy1 { + status = "okay"; +}; + +&u2phy1_otg { + status = "okay"; +}; + +&u2phy2_host { + phy-supply = <&vcc5v0_usb20_host>; +}; + +&usbdp_phy1 { + status = "okay"; +}; + +&usb_host1_xhci { + dr_mode = "host"; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts index ad8e36a339dc45..92321c1d3ff10e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts @@ -2,175 +2,18 @@ /* * Copyright (c) 2021 Rockchip Electronics Co., Ltd. * Copyright (c) 2023 Thomas McKahan + * Copyright (c) 2024 Linaro Ltd. * */ /dts-v1/; -#include -#include -#include -#include "rk3588.dtsi" +#include "rk3588-nanopc-t6.dtsi" / { model = "FriendlyElec NanoPC-T6"; compatible = "friendlyarm,nanopc-t6", "rockchip,rk3588"; - aliases { - mmc0 = &sdhci; - mmc1 = &sdmmc; - }; - - chosen { - stdout-path = "serial2:1500000n8"; - }; - - leds { - compatible = "gpio-leds"; - - sys_led: led-0 { - gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>; - label = "system-led"; - linux,default-trigger = "heartbeat"; - pinctrl-names = "default"; - pinctrl-0 = <&sys_led_pin>; - }; - - usr_led: led-1 { - gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; - label = "user-led"; - pinctrl-names = "default"; - pinctrl-0 = <&usr_led_pin>; - }; - }; - - sound { - compatible = "simple-audio-card"; - pinctrl-names = "default"; - pinctrl-0 = <&hp_det>; - - simple-audio-card,name = "realtek,rt5616-codec"; - simple-audio-card,format = "i2s"; - simple-audio-card,mclk-fs = <256>; - - simple-audio-card,hp-det-gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>; - simple-audio-card,hp-pin-name = "Headphones"; - - simple-audio-card,widgets = - "Headphone", "Headphones", - "Microphone", "Microphone Jack"; - simple-audio-card,routing = - "Headphones", "HPOL", - "Headphones", "HPOR", - "MIC1", "Microphone Jack", - "Microphone Jack", "micbias1"; - - simple-audio-card,cpu { - sound-dai = <&i2s0_8ch>; - }; - simple-audio-card,codec { - sound-dai = <&rt5616>; - }; - }; - - vcc12v_dcin: vcc12v-dcin-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc12v_dcin"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <12000000>; - regulator-max-microvolt = <12000000>; - }; - - /* vcc5v0_sys powers peripherals */ - vcc5v0_sys: vcc5v0-sys-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc5v0_sys"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - vin-supply = <&vcc12v_dcin>; - }; - - /* vcc4v0_sys powers the RK806, RK860's */ - vcc4v0_sys: vcc4v0-sys-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc4v0_sys"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <4000000>; - regulator-max-microvolt = <4000000>; - vin-supply = <&vcc12v_dcin>; - }; - - vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc-1v1-nldo-s3"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1100000>; - regulator-max-microvolt = <1100000>; - vin-supply = <&vcc4v0_sys>; - }; - - vcc_3v3_pcie20: vcc3v3-pcie20-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc_3v3_pcie20"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - vin-supply = <&vcc_3v3_s3>; - }; - - vbus5v0_typec: vbus5v0-typec-regulator { - compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&typec5v_pwren>; - regulator-name = "vbus5v0_typec"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - vin-supply = <&vcc5v0_sys>; - }; - - vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator { - compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie_m2_1_pwren>; - regulator-name = "vcc3v3_pcie2x1l0"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - vin-supply = <&vcc5v0_sys>; - }; - - vcc3v3_pcie30: vcc3v3-pcie30-regulator { - compatible = "regulator-fixed"; - enable-active-high; - gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie_m2_0_pwren>; - regulator-name = "vcc3v3_pcie30"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - vin-supply = <&vcc5v0_sys>; - }; - - vcc3v3_sd_s0: vcc3v3-sd-s0-regulator { - compatible = "regulator-fixed"; - enable-active-low; - gpio = <&gpio4 RK_PA5 GPIO_ACTIVE_LOW>; - regulator-boot-on; - regulator-max-microvolt = <3300000>; - regulator-min-microvolt = <3300000>; - regulator-name = "vcc3v3_sd_s0"; - vin-supply = <&vcc_3v3_s3>; - }; - vdd_4g_3v3: vdd-4g-3v3-regulator { compatible = "regulator-fixed"; enable-active-high; @@ -184,762 +27,14 @@ vdd_4g_3v3: vdd-4g-3v3-regulator { }; }; -&combphy0_ps { - status = "okay"; -}; - -&combphy1_ps { - status = "okay"; -}; - -&combphy2_psu { - status = "okay"; -}; - -&cpu_l0 { - cpu-supply = <&vdd_cpu_lit_s0>; -}; - -&cpu_l1 { - cpu-supply = <&vdd_cpu_lit_s0>; -}; - -&cpu_l2 { - cpu-supply = <&vdd_cpu_lit_s0>; -}; - -&cpu_l3 { - cpu-supply = <&vdd_cpu_lit_s0>; -}; - -&cpu_b0 { - cpu-supply = <&vdd_cpu_big0_s0>; -}; - -&cpu_b1 { - cpu-supply = <&vdd_cpu_big0_s0>; -}; - -&cpu_b2 { - cpu-supply = <&vdd_cpu_big1_s0>; -}; - -&cpu_b3 { - cpu-supply = <&vdd_cpu_big1_s0>; -}; - -&gpio0 { - gpio-line-names = /* GPIO0 A0-A7 */ - "", "", "", "", - "", "", "", "", - /* GPIO0 B0-B7 */ - "", "", "", "", - "", "", "", "", - /* GPIO0 C0-C7 */ - "", "", "", "", - "HEADER_10", "HEADER_08", "HEADER_32", "", - /* GPIO0 D0-D7 */ - "", "", "", "", - "", "", "", ""; -}; - -&gpio1 { - gpio-line-names = /* GPIO1 A0-A7 */ - "HEADER_27", "HEADER_28", "", "", - "", "", "", "HEADER_15", - /* GPIO1 B0-B7 */ - "HEADER_26", "HEADER_21", "HEADER_19", "HEADER_23", - "HEADER_24", "HEADER_22", "", "", - /* GPIO1 C0-C7 */ - "", "", "", "", - "", "", "", "", - /* GPIO1 D0-D7 */ - "", "", "", "", - "", "", "HEADER_05", "HEADER_03"; -}; - -&gpio2 { - gpio-line-names = /* GPIO2 A0-A7 */ - "", "", "", "", - "", "", "", "", - /* GPIO2 B0-B7 */ - "", "", "", "", - "", "", "", "", - /* GPIO2 C0-C7 */ - "", "CSI1_11", "CSI1_12", "", - "", "", "", "", - /* GPIO2 D0-D7 */ - "", "", "", "", - "", "", "", ""; -}; - -&gpio3 { - gpio-line-names = /* GPIO3 A0-A7 */ - "HEADER_35", "HEADER_38", "HEADER_40", "HEADER_36", - "HEADER_37", "", "DSI0_12", "", - /* GPIO3 B0-B7 */ - "HEADER_33", "DSI0_10", "HEADER_07", "HEADER_16", - "HEADER_18", "HEADER_29", "HEADER_31", "HEADER_12", - /* GPIO3 C0-C7 */ - "DSI0_08", "DSI0_14", "HEADER_11", "HEADER_13", - "", "", "", "", - /* GPIO3 D0-D7 */ - "", "", "", "", - "", "DSI1_10", "", ""; -}; - -&gpio4 { - gpio-line-names = /* GPIO4 A0-A7 */ - "DSI1_08", "DSI1_14", "", "DSI1_12", - "", "", "", "", - /* GPIO4 B0-B7 */ - "", "", "", "", - "", "", "", "", - /* GPIO4 C0-C7 */ - "", "", "", "", - "CSI0_11", "CSI0_12", "", "", - /* GPIO4 D0-D7 */ - "", "", "", "", - "", "", "", ""; -}; - -&i2c0 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c0m2_xfer>; - status = "okay"; - - vdd_cpu_big0_s0: regulator@42 { - compatible = "rockchip,rk8602"; - reg = <0x42>; - fcs,suspend-voltage-selector = <1>; - regulator-name = "vdd_cpu_big0_s0"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <1050000>; - regulator-ramp-delay = <2300>; - vin-supply = <&vcc4v0_sys>; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_cpu_big1_s0: regulator@43 { - compatible = "rockchip,rk8603", "rockchip,rk8602"; - reg = <0x43>; - fcs,suspend-voltage-selector = <1>; - regulator-name = "vdd_cpu_big1_s0"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <1050000>; - regulator-ramp-delay = <2300>; - vin-supply = <&vcc4v0_sys>; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; -}; - -&i2c2 { - status = "okay"; - - vdd_npu_s0: regulator@42 { - compatible = "rockchip,rk8602"; - reg = <0x42>; - rockchip,suspend-voltage-selector = <1>; - regulator-name = "vdd_npu_s0"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <950000>; - regulator-ramp-delay = <2300>; - vin-supply = <&vcc4v0_sys>; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; -}; - -&i2c6 { - clock-frequency = <200000>; - status = "okay"; - - fusb302: typec-portc@22 { - compatible = "fcs,fusb302"; - reg = <0x22>; - interrupt-parent = <&gpio0>; - interrupts = ; - pinctrl-0 = <&usbc0_int>; - pinctrl-names = "default"; - vbus-supply = <&vbus5v0_typec>; - - connector { - compatible = "usb-c-connector"; - data-role = "dual"; - label = "USB-C"; - power-role = "dual"; - try-power-role = "sink"; - source-pdos = ; - sink-pdos = ; - op-sink-microwatt = <1000000>; - }; - }; - - hym8563: rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - #clock-cells = <0>; - clock-output-names = "hym8563"; - pinctrl-names = "default"; - pinctrl-0 = <&hym8563_int>; - interrupt-parent = <&gpio0>; - interrupts = ; - wakeup-source; - }; -}; - -&i2c7 { - clock-frequency = <200000>; - status = "okay"; - - rt5616: codec@1b { - compatible = "realtek,rt5616"; - reg = <0x1b>; - clocks = <&cru I2S0_8CH_MCLKOUT>; - clock-names = "mclk"; - #sound-dai-cells = <0>; - assigned-clocks = <&cru I2S0_8CH_MCLKOUT>; - assigned-clock-rates = <12288000>; - - port { - rt5616_p0_0: endpoint { - remote-endpoint = <&i2s0_8ch_p0_0>; - }; - }; - }; - - /* connected with MIPI-CSI1 */ -}; - -&i2c8 { - pinctrl-0 = <&i2c8m2_xfer>; -}; - -&i2s0_8ch { - pinctrl-names = "default"; - pinctrl-0 = <&i2s0_lrck - &i2s0_mclk - &i2s0_sclk - &i2s0_sdi0 - &i2s0_sdo0>; - status = "okay"; - - i2s0_8ch_p0: port { - i2s0_8ch_p0_0: endpoint { - dai-format = "i2s"; - mclk-fs = <256>; - remote-endpoint = <&rt5616_p0_0>; - }; - }; -}; - -&pcie2x1l0 { - reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>; - vpcie3v3-supply = <&vcc_3v3_pcie20>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie2_0_rst>; - status = "okay"; -}; - -&pcie2x1l1 { - reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>; - vpcie3v3-supply = <&vcc3v3_pcie2x1l0>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie2_1_rst>; - status = "okay"; -}; - -&pcie2x1l2 { - reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>; - vpcie3v3-supply = <&vcc_3v3_pcie20>; - pinctrl-names = "default"; - pinctrl-0 = <&pcie2_2_rst>; - status = "okay"; -}; - -&pcie30phy { - status = "okay"; -}; - -&pcie3x4 { - reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; - vpcie3v3-supply = <&vcc3v3_pcie30>; - status = "okay"; -}; - &pinctrl { - gpio-leds { - sys_led_pin: sys-led-pin { - rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - usr_led_pin: usr-led-pin { - rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; - - headphone { - hp_det: hp-det { - rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; - - hym8563 { - hym8563_int: hym8563-int { - rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; - }; - }; - - pcie { - pcie2_0_rst: pcie2-0-rst { - rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - pcie2_1_rst: pcie2-1-rst { - rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - pcie2_2_rst: pcie2-2-rst { - rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - pcie_m2_0_pwren: pcie-m20-pwren { - rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - pcie_m2_1_pwren: pcie-m21-pwren { - rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; - usb { pin_4g_lte_pwren: 4g-lte-pwren { rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>; }; - - typec5v_pwren: typec5v-pwren { - rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; - }; - - usbc0_int: usbc0-int { - rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>; - }; - }; -}; - -&pwm1 { - pinctrl-0 = <&pwm1m1_pins>; - status = "okay"; -}; - -&saradc { - vref-supply = <&avcc_1v8_s0>; - status = "okay"; -}; - -&sdhci { - bus-width = <8>; - no-sdio; - no-sd; - non-removable; - max-frequency = <200000000>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; - status = "okay"; -}; - -&sdmmc { - bus-width = <4>; - cap-mmc-highspeed; - cap-sd-highspeed; - cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>; - disable-wp; - no-mmc; - no-sdio; - sd-uhs-sdr104; - vmmc-supply = <&vcc3v3_sd_s0>; - vqmmc-supply = <&vccio_sd_s0>; - status = "okay"; -}; - -&spi2 { - status = "okay"; - assigned-clocks = <&cru CLK_SPI2>; - assigned-clock-rates = <200000000>; - pinctrl-names = "default"; - pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>; - num-cs = <1>; - - pmic@0 { - compatible = "rockchip,rk806"; - spi-max-frequency = <1000000>; - reg = <0x0>; - - interrupt-parent = <&gpio0>; - interrupts = <7 IRQ_TYPE_LEVEL_LOW>; - - pinctrl-names = "default"; - pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>, - <&rk806_dvs2_null>, <&rk806_dvs3_null>; - - system-power-controller; - - vcc1-supply = <&vcc4v0_sys>; - vcc2-supply = <&vcc4v0_sys>; - vcc3-supply = <&vcc4v0_sys>; - vcc4-supply = <&vcc4v0_sys>; - vcc5-supply = <&vcc4v0_sys>; - vcc6-supply = <&vcc4v0_sys>; - vcc7-supply = <&vcc4v0_sys>; - vcc8-supply = <&vcc4v0_sys>; - vcc9-supply = <&vcc4v0_sys>; - vcc10-supply = <&vcc4v0_sys>; - vcc11-supply = <&vcc_2v0_pldo_s3>; - vcc12-supply = <&vcc4v0_sys>; - vcc13-supply = <&vcc_1v1_nldo_s3>; - vcc14-supply = <&vcc_1v1_nldo_s3>; - vcca-supply = <&vcc4v0_sys>; - - gpio-controller; - #gpio-cells = <2>; - - rk806_dvs1_null: dvs1-null-pins { - pins = "gpio_pwrctrl1"; - function = "pin_fun0"; - }; - - rk806_dvs2_null: dvs2-null-pins { - pins = "gpio_pwrctrl2"; - function = "pin_fun0"; - }; - - rk806_dvs3_null: dvs3-null-pins { - pins = "gpio_pwrctrl3"; - function = "pin_fun0"; - }; - - regulators { - vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 { - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <950000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_gpu_s0"; - regulator-enable-ramp-delay = <400>; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <950000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_cpu_lit_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_log_s0: dcdc-reg3 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <675000>; - regulator-max-microvolt = <750000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_log_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - regulator-suspend-microvolt = <750000>; - }; - }; - - vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <950000>; - regulator-init-microvolt = <750000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_vdenc_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_ddr_s0: dcdc-reg5 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <675000>; - regulator-max-microvolt = <900000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_ddr_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - regulator-suspend-microvolt = <850000>; - }; - }; - - vdd2_ddr_s3: dcdc-reg6 { - regulator-always-on; - regulator-boot-on; - regulator-name = "vdd2_ddr_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - }; - }; - - vcc_2v0_pldo_s3: dcdc-reg7 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <2000000>; - regulator-max-microvolt = <2000000>; - regulator-ramp-delay = <12500>; - regulator-name = "vdd_2v0_pldo_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - regulator-suspend-microvolt = <2000000>; - }; - }; - - vcc_3v3_s3: dcdc-reg8 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-name = "vcc_3v3_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - regulator-suspend-microvolt = <3300000>; - }; - }; - - vddq_ddr_s0: dcdc-reg9 { - regulator-always-on; - regulator-boot-on; - regulator-name = "vddq_ddr_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vcc_1v8_s3: dcdc-reg10 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-name = "vcc_1v8_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - regulator-suspend-microvolt = <1800000>; - }; - }; - - avcc_1v8_s0: pldo-reg1 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-name = "avcc_1v8_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vcc_1v8_s0: pldo-reg2 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-name = "vcc_1v8_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - regulator-suspend-microvolt = <1800000>; - }; - }; - - avdd_1v2_s0: pldo-reg3 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; - regulator-name = "avdd_1v2_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vcc_3v3_s0: pldo-reg4 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-ramp-delay = <12500>; - regulator-name = "vcc_3v3_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vccio_sd_s0: pldo-reg5 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-ramp-delay = <12500>; - regulator-name = "vccio_sd_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - pldo6_s3: pldo-reg6 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-name = "pldo6_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - regulator-suspend-microvolt = <1800000>; - }; - }; - - vdd_0v75_s3: nldo-reg1 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <750000>; - regulator-max-microvolt = <750000>; - regulator-name = "vdd_0v75_s3"; - - regulator-state-mem { - regulator-on-in-suspend; - regulator-suspend-microvolt = <750000>; - }; - }; - - vdd_ddr_pll_s0: nldo-reg2 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <850000>; - regulator-max-microvolt = <850000>; - regulator-name = "vdd_ddr_pll_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - regulator-suspend-microvolt = <850000>; - }; - }; - - avdd_0v75_s0: nldo-reg3 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <750000>; - regulator-max-microvolt = <750000>; - regulator-name = "avdd_0v75_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_0v85_s0: nldo-reg4 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <850000>; - regulator-max-microvolt = <850000>; - regulator-name = "vdd_0v85_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - - vdd_0v75_s0: nldo-reg5 { - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <750000>; - regulator-max-microvolt = <750000>; - regulator-name = "vdd_0v75_s0"; - - regulator-state-mem { - regulator-off-in-suspend; - }; - }; - }; }; }; -&tsadc { - status = "okay"; -}; - -&uart2 { - pinctrl-0 = <&uart2m0_xfer>; - status = "okay"; -}; - &u2phy2_host { phy-supply = <&vdd_4g_3v3>; - status = "okay"; -}; - -&u2phy3_host { - status = "okay"; -}; - -&u2phy2 { - status = "okay"; -}; - -&u2phy3 { - status = "okay"; -}; - -&usb_host0_ehci { - status = "okay"; -}; - -&usb_host0_ohci { - status = "okay"; -}; - -&usb_host1_ehci { - status = "okay"; -}; - -&usb_host1_ohci { - status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi new file mode 100644 index 00000000000000..fc131789b4c327 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2021 Rockchip Electronics Co., Ltd. + * Copyright (c) 2023 Thomas McKahan + * + */ + +/dts-v1/; + +#include +#include +#include +#include +#include "rk3588.dtsi" + +/ { + model = "FriendlyElec NanoPC-T6"; + compatible = "friendlyarm,nanopc-t6", "rockchip,rk3588"; + + aliases { + mmc0 = &sdhci; + mmc1 = &sdmmc; + }; + + adc-keys-0 { + compatible = "adc-keys"; + io-channels = <&saradc 0>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + poll-interval = <100>; + + button-maskrom { + label = "Mask Rom"; + linux,code = ; + press-threshold-microvolt = <2000>; + }; + }; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + ir-receiver { + compatible = "gpio-ir-receiver"; + gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&ir_receiver_pin>; + }; + + leds { + compatible = "gpio-leds"; + + sys_led: led-0 { + gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>; + label = "system-led"; + linux,default-trigger = "heartbeat"; + pinctrl-names = "default"; + pinctrl-0 = <&sys_led_pin>; + }; + + usr_led: led-1 { + gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; + label = "user-led"; + pinctrl-names = "default"; + pinctrl-0 = <&usr_led_pin>; + }; + }; + + sound { + compatible = "simple-audio-card"; + pinctrl-names = "default"; + pinctrl-0 = <&hp_det>; + + simple-audio-card,name = "realtek,rt5616-codec"; + simple-audio-card,format = "i2s"; + simple-audio-card,mclk-fs = <256>; + + simple-audio-card,hp-det-gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_LOW>; + + simple-audio-card,widgets = + "Headphone", "Headphones", + "Microphone", "Microphone Jack"; + simple-audio-card,routing = + "Headphones", "HPOL", + "Headphones", "HPOR", + "MIC1", "Microphone Jack", + "Microphone Jack", "micbias1"; + + simple-audio-card,cpu { + sound-dai = <&i2s0_8ch>; + }; + simple-audio-card,codec { + sound-dai = <&rt5616>; + }; + }; + + vcc12v_dcin: vcc12v-dcin-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc12v_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + /* vcc5v0_sys powers peripherals */ + vcc5v0_sys: vcc5v0-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; + }; + + /* vcc4v0_sys powers the RK806, RK860's */ + vcc4v0_sys: vcc4v0-sys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc4v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <4000000>; + regulator-max-microvolt = <4000000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc-1v1-nldo-s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + vin-supply = <&vcc4v0_sys>; + }; + + vcc_3v3_pcie20: vcc3v3-pcie20-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3_pcie20"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3_s3>; + }; + + vbus5v0_typec: vbus5v0-typec-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio1 RK_PD2 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&typec5v_pwren>; + regulator-always-on; + regulator-boot-on; + regulator-name = "vbus5v0_typec"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_m2_1_pwren>; + regulator-name = "vcc3v3_pcie2x1l0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_pcie30: vcc3v3-pcie30-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_m2_0_pwren>; + regulator-name = "vcc3v3_pcie30"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_sd_s0: vcc3v3-sd-s0-regulator { + compatible = "regulator-fixed"; + gpio = <&gpio4 RK_PA5 GPIO_ACTIVE_LOW>; + regulator-boot-on; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "vcc3v3_sd_s0"; + vin-supply = <&vcc_3v3_s3>; + }; +}; + +&combphy0_ps { + status = "okay"; +}; + +&combphy1_ps { + status = "okay"; +}; + +&combphy2_psu { + status = "okay"; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b2 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&cpu_b3 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&gpio0 { + gpio-line-names = /* GPIO0 A0-A7 */ + "", "", "", "", + "", "", "", "", + /* GPIO0 B0-B7 */ + "", "", "", "", + "", "", "", "", + /* GPIO0 C0-C7 */ + "", "", "", "", + "HEADER_10", "HEADER_08", "HEADER_32", "", + /* GPIO0 D0-D7 */ + "", "", "", "", + "IR receiver [PWM3_IR_M0]", "", "", ""; +}; + +&gpio1 { + gpio-line-names = /* GPIO1 A0-A7 */ + "HEADER_27", "HEADER_28", "", "", + "", "", "", "HEADER_15", + /* GPIO1 B0-B7 */ + "HEADER_26", "HEADER_21", "HEADER_19", "HEADER_23", + "HEADER_24", "HEADER_22", "", "", + /* GPIO1 C0-C7 */ + "", "", "", "", + "", "", "", "", + /* GPIO1 D0-D7 */ + "", "", "", "", + "", "", "HEADER_05", "HEADER_03"; +}; + +&gpio2 { + gpio-line-names = /* GPIO2 A0-A7 */ + "", "", "", "", + "", "", "", "", + /* GPIO2 B0-B7 */ + "", "", "", "", + "", "", "", "", + /* GPIO2 C0-C7 */ + "", "CSI1_11", "CSI1_12", "", + "", "", "", "", + /* GPIO2 D0-D7 */ + "", "", "", "", + "", "", "", ""; +}; + +&gpio3 { + gpio-line-names = /* GPIO3 A0-A7 */ + "HEADER_35", "HEADER_38", "HEADER_40", "HEADER_36", + "HEADER_37", "", "DSI0_12", "", + /* GPIO3 B0-B7 */ + "HEADER_33", "DSI0_10", "HEADER_07", "HEADER_16", + "HEADER_18", "HEADER_29", "HEADER_31", "HEADER_12", + /* GPIO3 C0-C7 */ + "DSI0_08", "DSI0_14", "HEADER_11", "HEADER_13", + "", "", "", "", + /* GPIO3 D0-D7 */ + "", "", "", "", + "", "DSI1_10", "", ""; +}; + +&gpio4 { + gpio-line-names = /* GPIO4 A0-A7 */ + "DSI1_08", "DSI1_14", "", "DSI1_12", + "", "", "", "", + /* GPIO4 B0-B7 */ + "", "", "", "", + "", "", "", "", + /* GPIO4 C0-C7 */ + "", "", "", "", + "CSI0_11", "CSI0_12", "", "", + /* GPIO4 D0-D7 */ + "", "", "", "", + "", "", "", ""; +}; + +&gpu { + mali-supply = <&vdd_gpu_s0>; + status = "okay"; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0m2_xfer>; + status = "okay"; + + vdd_cpu_big0_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu_big0_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_big1_s0: regulator@43 { + compatible = "rockchip,rk8603", "rockchip,rk8602"; + reg = <0x43>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu_big1_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c2 { + status = "okay"; + + vdd_npu_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_npu_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c6 { + clock-frequency = <200000>; + status = "okay"; + + fusb302: typec-portc@22 { + compatible = "fcs,fusb302"; + reg = <0x22>; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-0 = <&usbc0_int>; + pinctrl-names = "default"; + vbus-supply = <&vbus5v0_typec>; + + connector { + compatible = "usb-c-connector"; + data-role = "dual"; + label = "USB-C"; + power-role = "source"; + source-pdos = ; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + usbc0_hs: endpoint { + remote-endpoint = <&usb_host0_xhci_drd_sw>; + }; + }; + + port@1 { + reg = <1>; + usbc0_ss: endpoint { + remote-endpoint = <&usbdp_phy0_typec_ss>; + }; + }; + + port@2 { + reg = <2>; + usbc0_sbu: endpoint { + remote-endpoint = <&usbdp_phy0_typec_sbu>; + }; + }; + }; + }; + }; + + hym8563: rtc@51 { + compatible = "haoyu,hym8563"; + reg = <0x51>; + #clock-cells = <0>; + clock-output-names = "hym8563"; + pinctrl-names = "default"; + pinctrl-0 = <&hym8563_int>; + interrupt-parent = <&gpio0>; + interrupts = ; + wakeup-source; + }; +}; + +&i2c7 { + clock-frequency = <200000>; + status = "okay"; + + rt5616: codec@1b { + compatible = "realtek,rt5616"; + reg = <0x1b>; + clocks = <&cru I2S0_8CH_MCLKOUT>; + clock-names = "mclk"; + #sound-dai-cells = <0>; + assigned-clocks = <&cru I2S0_8CH_MCLKOUT>; + assigned-clock-rates = <12288000>; + + port { + rt5616_p0_0: endpoint { + remote-endpoint = <&i2s0_8ch_p0_0>; + }; + }; + }; + + /* connected with MIPI-CSI1 */ +}; + +&i2c8 { + pinctrl-0 = <&i2c8m2_xfer>; +}; + +&i2s0_8ch { + pinctrl-names = "default"; + pinctrl-0 = <&i2s0_lrck + &i2s0_mclk + &i2s0_sclk + &i2s0_sdi0 + &i2s0_sdo0>; + status = "okay"; + + i2s0_8ch_p0: port { + i2s0_8ch_p0_0: endpoint { + dai-format = "i2s"; + mclk-fs = <256>; + remote-endpoint = <&rt5616_p0_0>; + }; + }; +}; + +&pcie2x1l0 { + reset-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc_3v3_pcie20>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie2_0_rst>; + status = "okay"; +}; + +&pcie2x1l1 { + reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie2x1l0>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie2_1_rst>; + status = "okay"; +}; + +&pcie2x1l2 { + reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc_3v3_pcie20>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie2_2_rst>; + status = "okay"; +}; + +&pcie30phy { + status = "okay"; +}; + +&pcie3x4 { + reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie30>; + status = "okay"; +}; + +&pinctrl { + gpio-leds { + sys_led_pin: sys-led-pin { + rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usr_led_pin: usr-led-pin { + rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + headphone { + hp_det: hp-det { + rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + hym8563 { + hym8563_int: hym8563-int { + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + ir-receiver { + ir_receiver_pin: ir-receiver-pin { + rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pcie { + pcie2_0_rst: pcie2-0-rst { + rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + pcie2_1_rst: pcie2-1-rst { + rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + pcie2_2_rst: pcie2-2-rst { + rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + pcie_m2_0_pwren: pcie-m20-pwren { + rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + pcie_m2_1_pwren: pcie-m21-pwren { + rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + usb { + typec5v_pwren: typec5v-pwren { + rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usbc0_int: usbc0-int { + rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; +}; + +&pwm1 { + pinctrl-0 = <&pwm1m1_pins>; + status = "okay"; +}; + +&saradc { + vref-supply = <&avcc_1v8_s0>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + no-sdio; + no-sd; + non-removable; + max-frequency = <200000000>; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + status = "okay"; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>; + disable-wp; + no-mmc; + no-sdio; + sd-uhs-sdr104; + vmmc-supply = <&vcc3v3_sd_s0>; + vqmmc-supply = <&vccio_sd_s0>; + status = "okay"; +}; + +/* optional on non-LTS, populated on LTS version */ +&sfc { + pinctrl-names = "default"; + pinctrl-0 = <&fspim1_pins>; + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <104000000>; + spi-rx-bus-width = <4>; + spi-tx-bus-width = <1>; + }; +}; + +&spi2 { + status = "okay"; + assigned-clocks = <&cru CLK_SPI2>; + assigned-clock-rates = <200000000>; + pinctrl-names = "default"; + pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>; + num-cs = <1>; + + pmic@0 { + compatible = "rockchip,rk806"; + spi-max-frequency = <1000000>; + reg = <0x0>; + + interrupt-parent = <&gpio0>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; + + pinctrl-names = "default"; + pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>, + <&rk806_dvs2_null>, <&rk806_dvs3_null>; + + system-power-controller; + + vcc1-supply = <&vcc4v0_sys>; + vcc2-supply = <&vcc4v0_sys>; + vcc3-supply = <&vcc4v0_sys>; + vcc4-supply = <&vcc4v0_sys>; + vcc5-supply = <&vcc4v0_sys>; + vcc6-supply = <&vcc4v0_sys>; + vcc7-supply = <&vcc4v0_sys>; + vcc8-supply = <&vcc4v0_sys>; + vcc9-supply = <&vcc4v0_sys>; + vcc10-supply = <&vcc4v0_sys>; + vcc11-supply = <&vcc_2v0_pldo_s3>; + vcc12-supply = <&vcc4v0_sys>; + vcc13-supply = <&vcc_1v1_nldo_s3>; + vcc14-supply = <&vcc_1v1_nldo_s3>; + vcca-supply = <&vcc4v0_sys>; + + gpio-controller; + #gpio-cells = <2>; + + rk806_dvs1_null: dvs1-null-pins { + pins = "gpio_pwrctrl1"; + function = "pin_fun0"; + }; + + rk806_dvs2_null: dvs2-null-pins { + pins = "gpio_pwrctrl2"; + function = "pin_fun0"; + }; + + rk806_dvs3_null: dvs3-null-pins { + pins = "gpio_pwrctrl3"; + function = "pin_fun0"; + }; + + regulators { + vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 { + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_gpu_s0"; + regulator-enable-ramp-delay = <400>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_cpu_lit_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_log_s0: dcdc-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <750000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_log_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_vdenc_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_ddr_s0: dcdc-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <900000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_ddr_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + vdd2_ddr_s3: dcdc-reg6 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vdd2_ddr_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_2v0_pldo_s3: dcdc-reg7 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <2000000>; + regulator-max-microvolt = <2000000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_2v0_pldo_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <2000000>; + }; + }; + + vcc_3v3_s3: dcdc-reg8 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc_3v3_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vddq_ddr_s0: dcdc-reg9 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vddq_ddr_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s3: dcdc-reg10 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc_1v8_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + avcc_1v8_s0: pldo-reg1 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "avcc_1v8_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s0: pldo-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc_1v8_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + avdd_1v2_s0: pldo-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-name = "avdd_1v2_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3_s0: pldo-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <12500>; + regulator-name = "vcc_3v3_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd_s0: pldo-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <12500>; + regulator-name = "vccio_sd_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + pldo6_s3: pldo-reg6 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "pldo6_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_0v75_s3: nldo-reg1 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <750000>; + regulator-name = "vdd_0v75_s3"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdd_ddr_pll_s0: nldo-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + regulator-name = "vdd_ddr_pll_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + avdd_0v75_s0: nldo-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <750000>; + regulator-name = "avdd_0v75_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_0v85_s0: nldo-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + regulator-name = "vdd_0v85_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_0v75_s0: nldo-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <750000>; + regulator-name = "vdd_0v75_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; +}; + +&tsadc { + status = "okay"; +}; + +&uart2 { + pinctrl-0 = <&uart2m0_xfer>; + status = "okay"; +}; + +&u2phy0 { + status = "okay"; +}; + +&u2phy0_otg { + status = "okay"; +}; + +&u2phy2_host { + status = "okay"; +}; + +&u2phy3_host { + status = "okay"; +}; + +&u2phy2 { + status = "okay"; +}; + +&u2phy3 { + status = "okay"; +}; + +&usbdp_phy0 { + mode-switch; + orientation-switch; + sbu1-dc-gpios = <&gpio4 RK_PA6 GPIO_ACTIVE_HIGH>; + sbu2-dc-gpios = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>; + status = "okay"; + + port { + #address-cells = <1>; + #size-cells = <0>; + + usbdp_phy0_typec_ss: endpoint@0 { + reg = <0>; + remote-endpoint = <&usbc0_ss>; + }; + + usbdp_phy0_typec_sbu: endpoint@1 { + reg = <1>; + remote-endpoint = <&usbc0_sbu>; + }; + }; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host0_xhci { + dr_mode = "host"; + status = "okay"; + usb-role-switch; + + port { + usb_host0_xhci_drd_sw: endpoint { + remote-endpoint = <&usbc0_hs>; + }; + }; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts index e74871491ef56b..c3a6812cc93a2f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts @@ -105,6 +105,13 @@ led { }; }; + rfkill { + compatible = "rfkill-gpio"; + label = "rfkill-pcie-wlan"; + radio-type = "wlan"; + shutdown-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>; + }; + sound { compatible = "simple-audio-card"; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts new file mode 100644 index 00000000000000..467f69594089bf --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts @@ -0,0 +1,1237 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/dts-v1/; + +#include +#include +#include +#include +#include +#include +#include +#include "rk3588s.dtsi" + +/ { + model = "Gameforce Ace"; + chassis-type = "handset"; + compatible = "gameforce,ace", "rockchip,rk3588s"; + + aliases { + mmc0 = &sdhci; + mmc1 = &sdmmc; + mmc2 = &sdio; + }; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + adc_keys: adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 1>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + poll-interval = <60>; + + button-vol-up { + label = "VOLUMEUP"; + linux,code = ; + press-threshold-microvolt = <17000>; + }; + + button-vol-down { + label = "VOLUMEDOWN"; + linux,code = ; + press-threshold-microvolt = <417000>; + }; + }; + + /* Joystick range values based on hardware observation. */ + adc_joystick: adc-joystick { + compatible = "adc-joystick"; + io-channels = <&saradc 2>, <&saradc 3>, + <&saradc 4>, <&saradc 5>; + poll-interval = <60>; + #address-cells = <1>; + #size-cells = <0>; + + axis@0 { + reg = <0>; + abs-flat = <40>; + abs-fuzz = <30>; + abs-range = <0 4095>; + linux,code = ; + }; + + axis@1 { + reg = <1>; + abs-flat = <40>; + abs-fuzz = <30>; + abs-range = <0 4095>; + linux,code = ; + }; + + axis@2 { + reg = <2>; + abs-flat = <40>; + abs-fuzz = <30>; + abs-range = <0 4095>; + linux,code = ; + }; + + axis@3 { + reg = <3>; + abs-flat = <40>; + abs-fuzz = <30>; + abs-range = <0 4095>; + linux,code = ; + }; + }; + + /* Trigger range values based on hardware observation. */ + adc_triggers: adc-trigger { + compatible = "adc-joystick"; + io-channels = <&ti_adc 6>, + <&ti_adc 7>; + poll-interval = <60>; + #address-cells = <1>; + #size-cells = <0>; + + axis@0 { + reg = <0>; + abs-flat = <15>; + abs-fuzz = <15>; + abs-range = <890 1530>; + linux,code = ; + }; + + axis@1 { + reg = <1>; + abs-flat = <15>; + abs-fuzz = <15>; + abs-range = <1010 1550>; + linux,code = ; + }; + }; + + analog-sound { + compatible = "simple-audio-card"; + pinctrl-0 = <&hp_detect>; + pinctrl-names = "default"; + simple-audio-card,aux-devs = <&_headphone>, <&_speaker>; + simple-audio-card,bitclock-master = <&masterdai>; + simple-audio-card,format = "i2s"; + simple-audio-card,frame-master = <&masterdai>; + simple-audio-card,hp-det-gpio = <&gpio3 RK_PA6 GPIO_ACTIVE_LOW>; + simple-audio-card,mclk-fs = <256>; + simple-audio-card,name = "rockchip,es8388-codec"; + simple-audio-card,pin-switches = "Headphones", "Speaker"; + simple-audio-card,routing = + "Speaker Amplifier INL", "LOUT2", + "Speaker Amplifier INR", "ROUT2", + "Speaker", "Speaker Amplifier OUTL", + "Speaker", "Speaker Amplifier OUTR", + "Headphones Amplifier INL", "LOUT1", + "Headphones Amplifier INR", "ROUT1", + "Headphones", "Headphones Amplifier OUTL", + "Headphones", "Headphones Amplifier OUTR", + "LINPUT1", "Microphone Jack", + "RINPUT1", "Microphone Jack", + "LINPUT2", "Onboard Microphone", + "RINPUT2", "Onboard Microphone"; + simple-audio-card,widgets = + "Microphone", "Microphone Jack", + "Microphone", "Onboard Microphone", + "Headphone", "Headphones", + "Speaker", "Speaker"; + + masterdai: simple-audio-card,codec { + sound-dai = <&es8388>; + system-clock-frequency = <12288000>; + }; + + simple-audio-card,cpu { + sound-dai = <&i2s0_8ch>; + }; + }; + + backlight: backlight { + compatible = "pwm-backlight"; + enable-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&lcd_bl_en>; + pinctrl-names = "default"; + pwms = <&pwm13 0 25000 PWM_POLARITY_INVERTED>; + }; + + battery: battery { + compatible = "simple-battery"; + charge-full-design-microamp-hours = <3700000>; + constant-charge-current-max-microamp = <2500000>; + constant-charge-voltage-max-microvolt = <8750000>; + voltage-min-design-microvolt = <7400000>; + }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&btn_pins_ctrl>; + pinctrl-names = "default"; + + button-a { + gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>; + label = "EAST"; + linux,code = ; + }; + + button-b { + gpios = <&gpio1 RK_PA5 GPIO_ACTIVE_LOW>; + label = "SOUTH"; + linux,code = ; + }; + + button-down { + gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>; + label = "DPAD-DOWN"; + linux,code = ; + }; + + button-home { + gpios = <&gpio1 RK_PA0 GPIO_ACTIVE_LOW>; + label = "FUNCTION"; + linux,code = ; + }; + + button-l1 { + gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_LOW>; + label = "L1"; + linux,code = ; + }; + + button-left { + gpios = <&gpio1 RK_PD7 GPIO_ACTIVE_LOW>; + label = "DPAD-LEFT"; + linux,code = ; + }; + + button-menu { + gpios = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>; + label = "HOME"; + linux,code = ; + }; + + button-r1 { + gpios = <&gpio1 RK_PB2 GPIO_ACTIVE_LOW>; + label = "R1"; + linux,code = ; + }; + + button-right { + gpios = <&gpio1 RK_PB7 GPIO_ACTIVE_LOW>; + label = "DPAD-RIGHT"; + linux,code = ; + }; + + button-select { + gpios = <&gpio1 RK_PA3 GPIO_ACTIVE_LOW>; + label = "SELECT"; + linux,code = ; + }; + + button-start { + gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_LOW>; + label = "START"; + linux,code = ; + }; + + button-thumbl { + gpios = <&gpio1 RK_PA4 GPIO_ACTIVE_LOW>; + label = "THUMBL"; + linux,code = ; + }; + + button-thumbr { + gpios = <&gpio1 RK_PD6 GPIO_ACTIVE_LOW>; + label = "THUMBR"; + linux,code = ; + }; + + button-up { + gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_LOW>; + label = "DPAD-UP"; + linux,code = ; + }; + + button-x { + gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_LOW>; + label = "NORTH"; + linux,code = ; + }; + + button-y { + gpios = <&gpio1 RK_PB3 GPIO_ACTIVE_LOW>; + label = "WEST"; + linux,code = ; + }; + }; + + gpio_leds: gpio-leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&led_pins>; + + green_led: led-0 { + color = ; + gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_STATUS; + }; + + red_led: led-1 { + color = ; + gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_HIGH>; + function = LED_FUNCTION_CHARGING; + }; + }; + + amp_headphone: headphone-amplifier { + compatible = "simple-audio-amplifier"; + enable-gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&headphone_amplifier_en>; + pinctrl-names = "default"; + sound-name-prefix = "Headphones Amplifier"; + }; + + pwm_fan: pwm-fan { + compatible = "pwm-fan"; + #cooling-cells = <2>; + cooling-levels = <0 120 150 180 210 240 255>; + fan-supply = <&vcc5v0_sys>; + interrupt-parent = <&gpio4>; + interrupts = ; + pulses-per-revolution = <4>; + pwms = <&pwm12 0 50000 PWM_POLARITY_INVERTED>; + }; + + pwm_gpio33: pwm-33 { + compatible = "pwm-gpio"; + gpios = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&vib_right_h>; + pinctrl-names = "default"; + #pwm-cells = <3>; + }; + + pwm_gpio132: pwm-132 { + compatible = "pwm-gpio"; + gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&vib_left_h>; + pinctrl-names = "default"; + #pwm-cells = <3>; + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + clock-names = "ext_clock"; + clocks = <&rtc_hym8563>; + pinctrl-0 = <&wifi_enable_h>; + pinctrl-names = "default"; + post-power-on-delay-ms = <200>; + power-off-delay-us = <5000000>; + reset-gpios = <&gpio3 RK_PB4 GPIO_ACTIVE_LOW>; + }; + + amp_speaker: speaker-amplifier { + compatible = "simple-audio-amplifier"; + enable-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&speaker_amplifier_en>; + pinctrl-names = "default"; + sound-name-prefix = "Speaker Amplifier"; + VCC-supply = <&vcc5v0_spk>; + }; + + vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1100000>; + regulator-min-microvolt = <1100000>; + regulator-name = "vcc_1v1_nldo_s3"; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_lcd0_n: vcc3v3-lcd0-n-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PA7 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&vcc_lcd_h>; + pinctrl-names = "default"; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "vcc3v3_lcd0_n"; + vin-supply = <&vcc_3v3_s3>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3_sd_s0: vcc-3v3-sd-s0-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&sd_s0_pwr>; + pinctrl-names = "default"; + regulator-max-microvolt = <3000000>; + regulator-min-microvolt = <3000000>; + regulator-name = "vcc_3v3_sd_s0"; + vin-supply = <&vcc_3v3_s3>; + }; + + vcc5v0_spk: vcc5v0-spk-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&vcc5v0_spk_pwr>; + pinctrl-names = "default"; + regulator-max-microvolt = <5000000>; + regulator-min-microvolt = <5000000>; + regulator-name = "vcc5v0_spk"; + vin-supply = <&vcc5v0_sys>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc5v0_sys: vcc5v0-sys-regulator { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <5000000>; + regulator-min-microvolt = <5000000>; + regulator-name = "vcc5v0_sys"; + }; + + vibrator_l: vibrator-l { + compatible = "pwm-vibrator"; + pwm-names = "enable"; + pwms = <&pwm_gpio132 0 20000000 0>; + }; + + vibrator_r: vibrator-r { + compatible = "pwm-vibrator"; + pwm-names = "enable"; + pwms = <&pwm_gpio33 0 20000000 0>; + }; +}; + +&combphy2_psu { + status = "okay"; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b2 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&cpu_b3 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&gpu { + mali-supply = <&vdd_gpu_s0>; + status = "okay"; +}; + +&i2c0 { + pinctrl-0 = <&i2c0m2_xfer>; + pinctrl-names = "default"; + status = "okay"; + + vdd_cpu_big0_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-max-microvolt = <1050000>; + regulator-min-microvolt = <550000>; + regulator-name = "vdd_cpu_big0_s0"; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_big1_s0: regulator@43 { + compatible = "rockchip,rk8603", "rockchip,rk8602"; + reg = <0x43>; + fcs,suspend-voltage-selector = <1>; + regulator-max-microvolt = <1050000>; + regulator-min-microvolt = <550000>; + regulator-name = "vdd_cpu_big1_s0"; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c2 { + status = "okay"; + + vdd_npu_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-max-microvolt = <950000>; + regulator-min-microvolt = <550000>; + regulator-name = "vdd_npu_s0"; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c3 { + status = "okay"; + + touchscreen@14 { + compatible = "goodix,gt911"; + reg = <0x14>; + interrupt-parent = <&gpio1>; + interrupts = ; + irq-gpios = <&gpio1 RK_PA6 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&touch_int>, <&touch_rst>; + pinctrl-names = "default"; + reset-gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>; + touchscreen-inverted-x; + touchscreen-size-x = <1080>; + touchscreen-size-y = <1920>; + touchscreen-swapped-x-y; + }; +}; + +&i2c4 { + pinctrl-0 = <&i2c4m2_xfer>; + status = "okay"; + + ti_adc: adc@48 { + compatible = "ti,ads1015"; + reg = <0x48>; + #address-cells = <1>; + #io-channel-cells = <1>; + #size-cells = <0>; + + channel@4 { + reg = <4>; + }; + + channel@5 { + reg = <5>; + }; + + channel@6 { + reg = <6>; + }; + + channel@7 { + reg = <7>; + }; + }; + + imu@68 { + compatible = "invensense,mpu6880"; + reg = <0x68>; + interrupt-parent = <&gpio0>; + interrupts = ; + }; +}; + +&i2c6 { + pinctrl-0 = <&i2c6m3_xfer>; + status = "okay"; + + rtc_hym8563: rtc@51 { + compatible = "haoyu,hym8563"; + reg = <0x51>; + #clock-cells = <0>; + clock-output-names = "hym8563"; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-0 = <&hym8563_int>, <&clk32k_in>; + pinctrl-names = "default"; + wakeup-source; + }; + + /* Battery profile from BSP device tree. */ + battery@62 { + compatible = "cellwise,cw2015"; + reg = <0x62>; + + cellwise,battery-profile = /bits/ 8 + <0x18 0x0A 0x76 0x6A 0x6A 0x6A 0x68 0x66 + 0x62 0x5E 0x5A 0x58 0x5F 0x59 0x46 0x3D + 0x35 0x2D 0x28 0x21 0x29 0x38 0x44 0x50 + 0x1A 0x85 0x07 0xAE 0x14 0x28 0x48 0x56 + 0x66 0x66 0x66 0x6A 0x3E 0x1A 0x6C 0x3D + 0x09 0x38 0x1A 0x49 0x7B 0x96 0xA2 0x15 + 0x3B 0x77 0x9A 0xB1 0x80 0x87 0xB0 0xCB + 0x2F 0x00 0x64 0xA5 0xB5 0x1C 0xF0 0x49>; + cellwise,monitor-interval-ms = <5000>; + monitored-battery = <&battery>; + status = "okay"; + }; +}; + +&i2c7 { + status = "okay"; + + es8388: audio-codec@11 { + compatible = "everest,es8388"; + reg = <0x11>; + assigned-clock-rates = <12288000>; + assigned-clocks = <&cru I2S0_8CH_MCLKOUT>; + AVDD-supply = <&vcc_3v3_s3>; + clocks = <&cru I2S0_8CH_MCLKOUT>; + DVDD-supply = <&vcc_1v8_s3>; + HPVDD-supply = <&vcc_3v3_s3>; + PVDD-supply = <&vcc_1v8_s3>; + #sound-dai-cells = <0>; + }; +}; + +&i2s0_8ch { + pinctrl-0 = <&i2s0_lrck + &i2s0_mclk + &i2s0_sclk + &i2s0_sdi0 + &i2s0_sdo0>; + status = "okay"; +}; + +&package_thermal { + polling-delay = <1000>; + + trips { + package_fan0: package-fan0 { + temperature = <55000>; + hysteresis = <2000>; + type = "active"; + }; + + package_fan1: package-fan1 { + temperature = <65000>; + hysteresis = <2000>; + type = "active"; + }; + }; + + cooling-maps { + map1 { + trip = <&package_fan0>; + cooling-device = <&pwm_fan THERMAL_NO_LIMIT 1>; + }; + + map2 { + trip = <&package_fan1>; + cooling-device = <&pwm_fan 2 THERMAL_NO_LIMIT>; + }; + }; +}; + +/* + * Attempts to use an M.2 SATA in this slot worked intermittently + * with the correct nodes enabled in device-tree, but eventually + * resulted in a destroyed board. Advise caution. + */ +&pcie2x1l1 { + pinctrl-0 = <&pcie_rst>; + pinctrl-names = "default"; + reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&pinctrl { + audio-amplifier { + headphone_amplifier_en: headphone-amplifier-en { + rockchip,pins = + <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + hp_detect: headphone-detect { + rockchip,pins = + <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + speaker_amplifier_en: speaker-amplifier-en { + rockchip,pins = + <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + bt { + bt_enable_h: bt-enable-h { + rockchip,pins = + <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_host_wake_l: bt-host-wake-l { + rockchip,pins = + <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + bt_wake_l: bt-wake-l { + rockchip,pins = + <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + charger { + boost_enable_h: boost-enable-h { + rockchip,pins = + <4 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + charger_int_h: charger-int-h { + rockchip,pins = + <0 RK_PD5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + hym8563 { + hym8563_int: hym8563-int { + rockchip,pins = + <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + gpio-btns { + btn_pins_ctrl: btn-pins-ctrl { + rockchip,pins = + <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>, + <1 RK_PD7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + gpio-leds { + led_pins: led-pins { + rockchip,pins = + <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_up>, + <3 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + lcd_bl_en { + lcd_bl_en: lcd-bl-en { + rockchip,pins = + <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pcie-pins { + pcie_rst: pcie-rst { + rockchip,pins = + <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + sd-pwr { + sd_s0_pwr: sd-s0-pwr { + rockchip,pins = + <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + spk-pwr { + vcc5v0_spk_pwr: vcc5v0-spk-pwr { + rockchip,pins = + <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + touch { + touch_int: touch-int { + rockchip,pins = + <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + touch_rst: touch-rst { + rockchip,pins = + <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + usb-typec { + usbc0_int: usbc0-int { + rockchip,pins = + <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + vcc3v3-lcd { + vcc_lcd_h: vcc-lcd-h { + rockchip,pins = + <4 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + vibrator { + vib_left_h: vib-left-h { + rockchip,pins = + <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + vib_right_h: vib-right-h { + rockchip,pins = + <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_down>; + }; + }; + + wifi { + wifi_enable_h: wifi-enable-h { + rockchip,pins = + <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + wifi_host_wake_irq: wifi-host-wake-irq { + rockchip,pins = + <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_down>; + }; + }; +}; + +&pwm12 { + pinctrl-0 = <&pwm12m1_pins>; + status = "okay"; +}; + +&pwm13 { + pinctrl-0 = <&pwm13m1_pins>; + status = "okay"; +}; + +&saradc { + vref-supply = <&vcc_1v8_s0>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + no-sd; + no-sdio; + non-removable; + status = "okay"; +}; + +&sdio { + #address-cells = <1>; + bus-width = <4>; + cap-sd-highspeed; + cap-sdio-irq; + disable-wp; + keep-power-in-suspend; + max-frequency = <150000000>; + mmc-pwrseq = <&sdio_pwrseq>; + no-mmc; + no-sd; + sd-uhs-sdr104; + #size-cells = <0>; + status = "okay"; + + brcmf: wifi@1 { + compatible = "brcm,bcm43456-fmac", "brcm,bcm4329-fmac"; + reg = <1>; + interrupt-parent = <&gpio0>; + interrupts = ; + interrupt-names = "host-wake"; + pinctrl-0 = <&wifi_host_wake_irq>; + pinctrl-names = "default"; + }; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + disable-wp; + max-frequency = <150000000>; + no-sdio; + no-mmc; + sd-uhs-sdr104; + vmmc-supply = <&vcc_3v3_sd_s0>; + vqmmc-supply = <&vccio_sd_s0>; + status = "okay"; +}; + +&spi2 { + #address-cells = <1>; + assigned-clocks = <&cru CLK_SPI2>; + assigned-clock-rates = <200000000>; + num-cs = <1>; + pinctrl-0 = <&spi2m2_pins>, <&spi2m2_cs0>; + pinctrl-names = "default"; + #size-cells = <0>; + status = "okay"; + + pmic@0 { + compatible = "rockchip,rk806"; + reg = <0x0>; + #gpio-cells = <2>; + gpio-controller; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>, + <&rk806_dvs2_null>, <&rk806_dvs3_null>; + pinctrl-names = "default"; + spi-max-frequency = <1000000>; + system-power-controller; + + vcc1-supply = <&vcc5v0_sys>; + vcc2-supply = <&vcc5v0_sys>; + vcc3-supply = <&vcc5v0_sys>; + vcc4-supply = <&vcc5v0_sys>; + vcc5-supply = <&vcc5v0_sys>; + vcc6-supply = <&vcc5v0_sys>; + vcc7-supply = <&vcc5v0_sys>; + vcc8-supply = <&vcc5v0_sys>; + vcc9-supply = <&vcc5v0_sys>; + vcc10-supply = <&vcc5v0_sys>; + vcc11-supply = <&vcc_2v0_pldo_s3>; + vcc12-supply = <&vcc5v0_sys>; + vcc13-supply = <&vcc_1v1_nldo_s3>; + vcc14-supply = <&vcc_1v1_nldo_s3>; + vcca-supply = <&vcc5v0_sys>; + + rk806_dvs1_null: dvs1-null-pins { + pins = "gpio_pwrctrl1"; + function = "pin_fun0"; + }; + + rk806_dvs2_null: dvs2-null-pins { + pins = "gpio_pwrctrl2"; + function = "pin_fun0"; + }; + + rk806_dvs3_null: dvs3-null-pins { + pins = "gpio_pwrctrl3"; + function = "pin_fun0"; + }; + + regulators { + vdd_gpu_s0: dcdc-reg1 { + regulator-boot-on; + regulator-enable-ramp-delay = <400>; + regulator-max-microvolt = <950000>; + regulator-min-microvolt = <550000>; + regulator-name = "vdd_gpu_s0"; + regulator-ramp-delay = <12500>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_lit_s0: dcdc-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <950000>; + regulator-min-microvolt = <550000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_cpu_lit_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_logic_s0: dcdc-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <750000>; + regulator-min-microvolt = <675000>; + regulator-name = "vdd_logic_s0"; + regulator-ramp-delay = <12500>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdd_vdenc_s0: dcdc-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <950000>; + regulator-min-microvolt = <550000>; + regulator-name = "vdd_vdenc_s0"; + regulator-ramp-delay = <12500>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_ddr_s0: dcdc-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <900000>; + regulator-ramp-delay = <12500>; + regulator-name = "vdd_ddr_s0"; + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + vdd2_ddr_s3: dcdc-reg6 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vdd2_ddr_s3"; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_2v0_pldo_s3: dcdc-reg7 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <2000000>; + regulator-min-microvolt = <2000000>; + regulator-name = "vdd_2v0_pldo_s3"; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <2000000>; + }; + }; + + vcc_3v3_s3: dcdc-reg8 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "vcc_3v3_s3"; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vddq_ddr_s0: dcdc-reg9 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vddq_ddr_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s3: dcdc-reg10 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-name = "vcc_1v8_s3"; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + avcc_1v8_s0: pldo-reg1 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-name = "avcc_1v8_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s0: pldo-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-name = "vcc_1v8_s0"; + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + avdd_1v2_s0: pldo-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <1200000>; + regulator-name = "avdd_1v2_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v3_s0: pldo-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <3300000>; + regulator-name = "vcc_3v3_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd_s0: pldo-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1800000>; + regulator-name = "vccio_sd_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s3_pldo6: pldo-reg6 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-name = "vcc_1v8_s3_pldo6"; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_0v75_s3: nldo-reg1 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <750000>; + regulator-min-microvolt = <750000>; + regulator-name = "vdd_0v75_s3"; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdd_ddr_pll_s0: nldo-reg2 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <850000>; + regulator-min-microvolt = <850000>; + regulator-name = "vdd_ddr_pll_s0"; + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + avdd_0v75_s0: nldo-reg3 { + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <837500>; + regulator-min-microvolt = <837500>; + regulator-name = "avdd_0v75_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_0v85_s0: nldo-reg4 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + regulator-name = "vdd_0v85_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_0v75_s0: nldo-reg5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <750000>; + regulator-name = "vdd_0v75_s0"; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; +}; + +&tsadc { + status = "okay"; +}; + +&u2phy0 { + status = "okay"; +}; + +&u2phy0_otg { + status = "okay"; +}; + +&uart2 { + pinctrl-0 = <&uart2m0_xfer>; + status = "okay"; +}; + +&uart9 { + pinctrl-0 = <&uart9m2_xfer>, <&uart9m2_ctsn>, <&uart9m2_rtsn>; + uart-has-rtscts; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm4345c5"; + clocks = <&rtc_hym8563>; + clock-names = "lpo"; + device-wakeup-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_HIGH>; + interrupt-parent = <&gpio3>; + interrupts = ; + pinctrl-0 = <&bt_enable_h>, <&bt_host_wake_l>, <&bt_wake_l>; + pinctrl-names = "default"; + shutdown-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_HIGH>; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts new file mode 100644 index 00000000000000..63d91236ba9ff9 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts @@ -0,0 +1,903 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/dts-v1/; + +#include +#include +#include +#include +#include "rk3588s.dtsi" + +/ { + model = "Hardkernel ODROID-M2"; + compatible = "hardkernel,odroid-m2", "rockchip,rk3588s"; + + aliases { + ethernet0 = &gmac1; + mmc0 = &sdhci; + mmc1 = &sdmmc; + }; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pwr_led>, <&sys_led>; + + led_pwr: led-0 { + color = ; + default-state = "on"; + function = LED_FUNCTION_POWER; + gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-on"; + }; + + led_sys: led-1 { + color = ; + default-state = "on"; + function = LED_FUNCTION_HEARTBEAT; + gpios = <&gpio1 RK_PB1 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; + + fan: pwm-fan { + compatible = "pwm-fan"; + #cooling-cells = <2>; + cooling-levels = <0 192 224 255>; + fan-supply = <&vcc5v0_sys>; + pwms = <&pwm0 0 22222 0>; + }; + + vcc_1v1_nldo_s3: regulator-1v1-vcc-nldo-s3 { + compatible = "regulator-fixed"; + regulator-name = "vcc_1v1_nldo_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; + vin-supply = <&vcc4v0_sys>; + }; + + vcc3v3_lcd: regulator-3v3-vcc-lcd { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_pwren>; + regulator-name = "vcc3v3_lcd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3_s3>; + }; + + vcc3v3_pcie: regulator-3v3-vcc-pcie { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_pwren>; + regulator-name = "vcc3v3_pcie"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3_s3>; + }; + + vcc_3v3_s0: regulator-3v3-vcc-s0 { + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_3v3_s3>; + }; + + vcc4v0_sys: regulator-4v0-vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc4v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <4800000>; + regulator-max-microvolt = <4800000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc5v0_sys: regulator-5v0-vcc-sys { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_pwren>; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc5v0_usb2_host: regulator-5v0-vcc-usb2-host { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb2_host_pwren>; + regulator-name = "vcc5v0_usb2_host"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_usb3_host: regulator-5v0-vcc-usb3-host { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio1 RK_PA6 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_host_pwren>; + regulator-name = "vcc5v0_usb3_host"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_usb3_typec: regulator-5v0-vcc-usb3-typec { + compatible = "regulator-fixed"; + enable-active-high; + gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_typec_pwren>; + regulator-name = "vcc5v0_usb3_typec"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcca: regulator-5v0-vcca { + compatible = "regulator-fixed"; + regulator-name = "vcca"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc12v_dcin: regulator-12v0-vcc-dcin { + compatible = "regulator-fixed"; + regulator-name = "vcc12v_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; +}; + +&combphy0_ps { + status = "okay"; +}; + +&combphy2_psu { + status = "okay"; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_big0_s0>; +}; + +&cpu_b2 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&cpu_b3 { + cpu-supply = <&vdd_cpu_big1_s0>; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_lit_s0>; +}; + +&gmac1 { + clock_in_out = "output"; + phy-handle = <&rgmii_phy1>; + phy-mode = "rgmii-id"; + phy-supply = <&vcc_3v3_s0>; + pinctrl-names = "default"; + pinctrl-0 = <&gmac1_miim + &gmac1_tx_bus2 + &gmac1_rx_bus2 + &gmac1_rgmii_clk + &gmac1_rgmii_bus + &gmac1_clkinout>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_gpu_s0>; + status = "okay"; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0m2_xfer>; + status = "okay"; + + vdd_cpu_big0_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu_big0_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_big1_s0: regulator@43 { + compatible = "rockchip,rk8603", "rockchip,rk8602"; + reg = <0x43>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_cpu_big1_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c2 { + status = "okay"; + + vdd_npu_s0: regulator@42 { + compatible = "rockchip,rk8602"; + reg = <0x42>; + fcs,suspend-voltage-selector = <1>; + regulator-name = "vdd_npu_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <2300>; + vin-supply = <&vcc4v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c8 { + clock-frequency = <400000>; + pinctrl-names = "default"; + pinctrl-0 = <&i2c8m2_xfer>; + status = "okay"; + + usbc0: usb-typec@22 { + compatible = "fcs,fusb302"; + reg = <0x22>; + interrupt-parent = <&gpio4>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&usbc0_int>; + vbus-supply = <&vcc5v0_usb3_typec>; + + connector { + compatible = "usb-c-connector"; + data-role = "dual"; + label = "USB-C"; + op-sink-microwatt = <1000000>; + power-role = "dual"; + sink-pdos = ; + source-pdos = ; + try-power-role = "source"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + usbc0_role_switch: endpoint { + remote-endpoint = <&usb_host0_xhci_role_switch>; + }; + }; + + port@1 { + reg = <1>; + + usbc0_orientation_switch: endpoint { + remote-endpoint = <&usbdp_phy0_orientation_switch>; + }; + }; + + port@2 { + reg = <2>; + + usbc0_dp_altmode_mux: endpoint { + remote-endpoint = <&usbdp_phy0_dp_altmode_mux>; + }; + }; + }; + }; + }; + + pcf8563: rtc@51 { + compatible = "nxp,pcf8563"; + reg = <0x51>; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pcf8563_int>; + wakeup-source; + }; +}; + +&mdio1 { + rgmii_phy1: ethernet-phy@1 { + compatible = "ethernet-phy-id001c.c916"; + reg = <1>; + reset-assert-us = <20000>; + reset-deassert-us = <100000>; + reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>; + }; +}; + +&package_thermal { + polling-delay = <1000>; + + trips { + package_fan0: package-fan0 { + hysteresis = <2000>; + temperature = <60000>; + type = "active"; + }; + }; + + cooling-maps { + map0 { + cooling-device = <&fan 1 THERMAL_NO_LIMIT>; + trip = <&package_fan0>; + }; + }; +}; + +&pcie2x1l2 { + pinctrl-names = "default"; + pinctrl-0 = <&pcie20x1_pins>; + reset-gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_pcie>; + status = "okay"; +}; + +&pinctrl { + lcd { + lcd_pwren: lcd-pwren { + rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + leds { + pwr_led: pwr-led { + rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + sys_led: sys-led { + rockchip,pins = <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pcie { + pcie20x1_pins: pcie20x1-pins { + rockchip,pins = + <1 RK_PA0 4 &pcfg_pull_none>, + <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>, + <1 RK_PA1 4 &pcfg_pull_none>; + }; + + pcie_pwren: pcie-pwren { + rockchip,pins = <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + regulator { + vcc5v0_pwren: vcc5v0-pwren { + rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + rtc { + pcf8563_int: pcf8563-int { + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + usb { + usb2_host_pwren: usb2-host-pwren { + rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usb3_host_pwren: usb3-host-pwren { + rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usb3_typec_pwren: usb3-typec-pwren { + rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + usbc0_int: usbc0-int { + rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pwm0 { + pinctrl-names = "default"; + pinctrl-0 = <&pwm0m2_pins>; + status = "okay"; +}; + +&saradc { + vref-supply = <&vcca_1v8_s0>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + no-sd; + no-sdio; + non-removable; + vmmc-supply = <&vcc_3v3_s0>; + vqmmc-supply = <&vcc_1v8_s0>; + status = "okay"; +}; + +&sdmmc { + bus-width = <4>; + cap-sd-highspeed; + cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>; + disable-wp; + max-frequency = <150000000>; + no-mmc; + no-sdio; + sd-uhs-sdr104; + vmmc-supply = <&vcc_3v3_s3>; + vqmmc-supply = <&vccio_sd_s0>; + status = "okay"; +}; + +&spi2 { + assigned-clocks = <&cru CLK_SPI2>; + assigned-clock-rates = <200000000>; + num-cs = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&spi2m2_cs0>, <&spi2m2_pins>; + status = "okay"; + + pmic@0 { + compatible = "rockchip,rk806"; + reg = <0x0>; + #gpio-cells = <2>; + gpio-controller; + interrupt-parent = <&gpio0>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>, + <&rk806_dvs2_null>, <&rk806_dvs3_null>; + spi-max-frequency = <1000000>; + system-power-controller; + + vcc1-supply = <&vcc4v0_sys>; + vcc2-supply = <&vcc4v0_sys>; + vcc3-supply = <&vcc4v0_sys>; + vcc4-supply = <&vcc4v0_sys>; + vcc5-supply = <&vcc4v0_sys>; + vcc6-supply = <&vcc4v0_sys>; + vcc7-supply = <&vcc4v0_sys>; + vcc8-supply = <&vcc4v0_sys>; + vcc9-supply = <&vcc4v0_sys>; + vcc10-supply = <&vcc4v0_sys>; + vcc11-supply = <&vcc_2v0_pldo_s3>; + vcc12-supply = <&vcc4v0_sys>; + vcc13-supply = <&vcc_1v1_nldo_s3>; + vcc14-supply = <&vcc_1v1_nldo_s3>; + vcca-supply = <&vcca>; + + rk806_dvs1_null: dvs1-null-pins { + pins = "gpio_pwrctrl1"; + function = "pin_fun0"; + }; + + rk806_dvs2_null: dvs2-null-pins { + pins = "gpio_pwrctrl2"; + function = "pin_fun0"; + }; + + rk806_dvs3_null: dvs3-null-pins { + pins = "gpio_pwrctrl3"; + function = "pin_fun0"; + }; + + regulators { + vdd_gpu_s0: dcdc-reg1 { + regulator-name = "vdd_gpu_s0"; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <400>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_lit_s0: dcdc-reg2 { + regulator-name = "vdd_cpu_lit_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_logic_s0: dcdc-reg3 { + regulator-name = "vdd_logic_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <750000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdd_vdenc_s0: dcdc-reg4 { + regulator-name = "vdd_vdenc_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <950000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_ddr_s0: dcdc-reg5 { + regulator-name = "vdd_ddr_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <675000>; + regulator-max-microvolt = <900000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + vdd2_ddr_s3: dcdc-reg6 { + regulator-name = "vdd2_ddr_s3"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_2v0_pldo_s3: dcdc-reg7 { + regulator-name = "vdd_2v0_pldo_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <2000000>; + regulator-max-microvolt = <2000000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <2000000>; + }; + }; + + vcc_3v3_s3: dcdc-reg8 { + regulator-name = "vcc_3v3_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vddq_ddr_s0: dcdc-reg9 { + regulator-name = "vddq_ddr_s0"; + regulator-always-on; + regulator-boot-on; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s3: dcdc-reg10 { + regulator-name = "vcc_1v8_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_1v8_s0: pldo-reg1 { + regulator-name = "vcc_1v8_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca_1v8_s0: pldo-reg2 { + regulator-name = "vcca_1v8_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdda_1v2_s0: pldo-reg3 { + regulator-name = "vdda_1v2_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca_3v3_s0: pldo-reg4 { + regulator-name = "vcca_3v3_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vccio_sd_s0: pldo-reg5 { + regulator-name = "vccio_sd_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <12500>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v8_s3_pldo6: pldo-reg6 { + regulator-name = "vcc_1v8_s3_pldo6"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vdd_0v75_s3: nldo-reg1 { + regulator-name = "vdd_0v75_s3"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <750000>; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <750000>; + }; + }; + + vdda_ddr_pll_s0: nldo-reg2 { + regulator-name = "vdda_ddr_pll_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + regulator-state-mem { + regulator-off-in-suspend; + regulator-suspend-microvolt = <850000>; + }; + }; + + vdda_0v75_s0: nldo-reg3 { + regulator-name = "vdda_0v75_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <837500>; + regulator-max-microvolt = <837500>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdda_0v85_s0: nldo-reg4 { + regulator-name = "vdda_0v85_s0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + /* Schematics show not in use */ + nldo-reg5 { + }; + }; + }; +}; + +&tsadc { + status = "okay"; +}; + +&u2phy0 { + status = "okay"; +}; + +&u2phy0_otg { + status = "okay"; +}; + +&u2phy2 { + status = "okay"; +}; + +&u2phy2_host { + phy-supply = <&vcc5v0_usb2_host>; + status = "okay"; +}; + +&u2phy3 { + status = "okay"; +}; + +&u2phy3_host { + phy-supply = <&vcc5v0_usb3_host>; + status = "okay"; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2m0_xfer>; + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host0_xhci { + usb-role-switch; + status = "okay"; + + port { + usb_host0_xhci_role_switch: endpoint { + remote-endpoint = <&usbc0_role_switch>; + }; + }; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&usb_host2_xhci { + status = "okay"; +}; + +&usbdp_phy0 { + mode-switch; + orientation-switch; + sbu1-dc-gpios = <&gpio4 RK_PA0 GPIO_ACTIVE_HIGH>; + sbu2-dc-gpios = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>; + status = "okay"; + + port { + #address-cells = <1>; + #size-cells = <0>; + + usbdp_phy0_orientation_switch: endpoint@0 { + reg = <0>; + remote-endpoint = <&usbc0_orientation_switch>; + }; + + usbdp_phy0_dp_altmode_mux: endpoint@1 { + reg = <1>; + remote-endpoint = <&usbc0_dp_altmode_mux>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts index 03ed48246d36a2..294b99dd50da20 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts @@ -65,6 +65,18 @@ vcc12v_dcin: vcc12v-dcin-regulator { regulator-max-microvolt = <12000000>; }; + vcc3v3_wf: vcc3v3-wf-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_wf"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + enable-active-high; + gpio = <&gpio0 RK_PC5 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&pow_en>; + pinctrl-names = "default"; + vin-supply = <&vcc5v0_sys>; + }; + vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; regulator-name = "vcc5v0_host"; @@ -114,6 +126,10 @@ vcc_1v1_nldo_s3: vcc-1v1-nldo-s3-regulator { }; }; +&combphy0_ps { + status = "okay"; +}; + &combphy2_psu { status = "okay"; }; @@ -293,6 +309,14 @@ rgmii_phy1: ethernet-phy@1 { }; }; +&pcie2x1l2 { + pinctrl-0 = <&pcie20x1m0_pins>; + pinctrl-names = "default"; + reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>; + vpcie3v3-supply = <&vcc3v3_wf>; + status = "okay"; +}; + &pinctrl { leds { io_led: io-led { @@ -300,6 +324,12 @@ io_led: io-led { }; }; + pcie { + pow_en: pow-en { + rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + power { vcc_5v0_en: vcc-5v0-en { rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; @@ -317,28 +347,6 @@ vcc5v0_host_en: vcc5v0-host-en { rockchip,pins = <4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; }; }; - - wifibt { - wl_reset: wl-reset { - rockchip,pins = <0 RK_PD0 RK_FUNC_GPIO &pcfg_pull_up>; - }; - - wl_dis: wl-dis { - rockchip,pins = <0 RK_PD5 RK_FUNC_GPIO &pcfg_output_high>; - }; - - wl_wake_host: wl-wake-host { - rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>; - }; - - bt_dis: bt-dis { - rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_output_high>; - }; - - bt_wake_host: bt-wake-host { - rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; - }; - }; }; &pwm3 { @@ -754,8 +762,6 @@ &usbdp_phy0 { &usb_host0_ehci { status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&wl_reset &wl_dis &wl_wake_host &bt_dis &bt_wake_host>; }; &usb_host0_ohci { diff --git a/arch/arm64/boot/dts/sprd/sc2731.dtsi b/arch/arm64/boot/dts/sprd/sc2731.dtsi index e15409f55f438e..12136e68dada67 100644 --- a/arch/arm64/boot/dts/sprd/sc2731.dtsi +++ b/arch/arm64/boot/dts/sprd/sc2731.dtsi @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Spreadtrum SC2731 PMIC dts file * * Copyright (C) 2018, Spreadtrum Communications Inc. - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ &adi_bus { @@ -95,7 +94,7 @@ pmic_adc: adc@480 { nvmem-cells = <&adc_big_scale>, <&adc_small_scale>; }; - fgu@a00 { + fuel-gauge@a00 { compatible = "sprd,sc2731-fgu"; reg = <0xa00>; bat-detect-gpio = <&pmic_eic 9 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/sprd/sc9836-openphone.dts b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts index e5657c35cd10e1..b98589ea5ac291 100644 --- a/arch/arm64/boot/dts/sprd/sc9836-openphone.dts +++ b/arch/arm64/boot/dts/sprd/sc9836-openphone.dts @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR MIT) /* * Spreadtrum SC9836 openphone board DTS file * * Copyright (C) 2014, Spreadtrum Communications Inc. - * - * This file is licensed under a dual GPLv2 or X11 license. */ /dts-v1/; diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi index 8bb8a70966d209..bc3fc9fc3d90ba 100644 --- a/arch/arm64/boot/dts/sprd/sc9836.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR MIT) /* * Spreadtrum SC9836 SoC DTS file * * Copyright (C) 2014, Spreadtrum Communications Inc. - * - * This file is licensed under a dual GPLv2 or X11 license. */ #include "sharkl64.dtsi" diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi index 31952d361a8a9d..d2456d633c39cd 100644 --- a/arch/arm64/boot/dts/sprd/sc9860.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Spreadtrum SC9860 SoC * * Copyright (C) 2016, Spreadtrum Communications Inc. - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ #include diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi index 53e5b77d70b523..e5a2857721e2fb 100644 --- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi @@ -551,14 +551,14 @@ ap-ahb { #size-cells = <2>; ranges; - sdio0: sdio@20300000 { + sdio0: mmc@20300000 { compatible = "sprd,sdhci-r11"; reg = <0 0x20300000 0 0x1000>; interrupts = ; - clock-names = "sdio", "enable"; clocks = <&aon_clk CLK_SDIO0_2X>, <&apahb_gate CLK_SDIO0_EB>; + clock-names = "sdio", "enable"; assigned-clocks = <&aon_clk CLK_SDIO0_2X>; assigned-clock-parents = <&rpll CLK_RPLL_390M>; @@ -567,14 +567,14 @@ sdio0: sdio@20300000 { no-mmc; }; - sdio3: sdio@20600000 { + sdio3: mmc@20600000 { compatible = "sprd,sdhci-r11"; reg = <0 0x20600000 0 0x1000>; interrupts = ; - clock-names = "sdio", "enable"; clocks = <&aon_clk CLK_EMMC_2X>, <&apahb_gate CLK_EMMC_EB>; + clock-names = "sdio", "enable"; assigned-clocks = <&aon_clk CLK_EMMC_2X>; assigned-clock-parents = <&rpll CLK_RPLL_390M>; diff --git a/arch/arm64/boot/dts/sprd/sharkl64.dtsi b/arch/arm64/boot/dts/sprd/sharkl64.dtsi index 69f64e7fce7c4b..bf58702c4e07dc 100644 --- a/arch/arm64/boot/dts/sprd/sharkl64.dtsi +++ b/arch/arm64/boot/dts/sprd/sharkl64.dtsi @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR MIT) /* * Spreadtrum Sharkl64 platform DTS file * * Copyright (C) 2014, Spreadtrum Communications Inc. - * - * This file is licensed under a dual GPLv2 or X11 license. */ / { diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts index 1ce3cbbd966844..095b24a31313b8 100644 --- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts +++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Spreadtrum SP9860g board * * Copyright (C) 2017, Spreadtrum Communications Inc. - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ /dts-v1/; diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi index 4c080df487240e..efa14309cc4efe 100644 --- a/arch/arm64/boot/dts/sprd/ums512.dtsi +++ b/arch/arm64/boot/dts/sprd/ums512.dtsi @@ -849,9 +849,9 @@ sdio0: mmc@1100000 { compatible = "sprd,sdhci-r11"; reg = <0x1100000 0x1000>; interrupts = ; - clock-names = "sdio", "enable"; clocks = <&ap_clk CLK_SDIO0_2X>, <&apapb_gate CLK_SDIO0_EB>; + clock-names = "sdio", "enable"; assigned-clocks = <&ap_clk CLK_SDIO0_2X>; assigned-clock-parents = <&pll1 CLK_RPLL>; status = "disabled"; @@ -861,9 +861,9 @@ sdio3: mmc@1400000 { compatible = "sprd,sdhci-r11"; reg = <0x1400000 0x1000>; interrupts = ; - clock-names = "sdio", "enable"; clocks = <&ap_clk CLK_EMMC_2X>, <&apapb_gate CLK_EMMC_EB>; + clock-names = "sdio", "enable"; assigned-clocks = <&ap_clk CLK_EMMC_2X>; assigned-clock-parents = <&pll1 CLK_RPLL>; status = "disabled"; diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi index 7068bfd2f4c34d..a551e14ce8261f 100644 --- a/arch/arm64/boot/dts/sprd/whale2.dtsi +++ b/arch/arm64/boot/dts/sprd/whale2.dtsi @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Spreadtrum Whale2 platform peripherals * * Copyright (C) 2016, Spreadtrum Communications Inc. - * - * SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ #include @@ -75,9 +74,10 @@ uart0: serial@0 { "sprd,sc9836-uart"; reg = <0x0 0x100>; interrupts = ; - clock-names = "enable", "uart", "source"; clocks = <&apapb_gate CLK_UART0_EB>, - <&ap_clk CLK_UART0>, <&ext_26m>; + <&ap_clk CLK_UART0>, + <&ext_26m>; + clock-names = "enable", "uart", "source"; status = "disabled"; }; @@ -86,9 +86,10 @@ uart1: serial@100000 { "sprd,sc9836-uart"; reg = <0x100000 0x100>; interrupts = ; - clock-names = "enable", "uart", "source"; clocks = <&apapb_gate CLK_UART1_EB>, - <&ap_clk CLK_UART1>, <&ext_26m>; + <&ap_clk CLK_UART1>, + <&ext_26m>; + clock-names = "enable", "uart", "source"; status = "disabled"; }; @@ -97,9 +98,10 @@ uart2: serial@200000 { "sprd,sc9836-uart"; reg = <0x200000 0x100>; interrupts = ; - clock-names = "enable", "uart", "source"; clocks = <&apapb_gate CLK_UART2_EB>, - <&ap_clk CLK_UART2>, <&ext_26m>; + <&ap_clk CLK_UART2>, + <&ext_26m>; + clock-names = "enable", "uart", "source"; status = "disabled"; }; @@ -108,9 +110,10 @@ uart3: serial@300000 { "sprd,sc9836-uart"; reg = <0x300000 0x100>; interrupts = ; - clock-names = "enable", "uart", "source"; clocks = <&apapb_gate CLK_UART3_EB>, - <&ap_clk CLK_UART3>, <&ext_26m>; + <&ap_clk CLK_UART3>, + <&ext_26m>; + clock-names = "enable", "uart", "source"; status = "disabled"; }; }; @@ -129,19 +132,19 @@ ap_dma: dma-controller@20100000 { /* For backwards compatibility: */ #dma-channels = <32>; dma-channels = <32>; - clock-names = "enable"; clocks = <&apahb_gate CLK_DMA_EB>; + clock-names = "enable"; }; - sdio3: sdio@50430000 { + sdio3: mmc@50430000 { compatible = "sprd,sdhci-r11"; reg = <0 0x50430000 0 0x1000>; interrupts = ; - clock-names = "sdio", "enable", "2x_enable"; clocks = <&aon_prediv CLK_EMMC_2X>, - <&apahb_gate CLK_EMMC_EB>, - <&aon_gate CLK_EMMC_2X_EN>; + <&apahb_gate CLK_EMMC_EB>, + <&aon_gate CLK_EMMC_2X_EN>; + clock-names = "sdio", "enable", "2x_enable"; assigned-clocks = <&aon_prediv CLK_EMMC_2X>; assigned-clock-parents = <&clk_l0_409m6>; @@ -194,8 +197,8 @@ hwlock: hwspinlock@40500000 { compatible = "sprd,hwspinlock-r3p0"; reg = <0 0x40500000 0 0x1000>; #hwlock-cells = <1>; - clock-names = "enable"; clocks = <&aon_gate CLK_SPLK_EB>; + clock-names = "enable"; }; eic_debounce: gpio@40210000 { @@ -258,9 +261,9 @@ watchdog@40310000 { reg = <0 0x40310000 0 0x1000>; interrupts = ; timeout-sec = <12>; - clock-names = "enable", "rtc_enable"; clocks = <&aon_gate CLK_APCPU_WDG_EB>, - <&aon_gate CLK_AP_WDG_RTC_EB>; + <&aon_gate CLK_AP_WDG_RTC_EB>; + clock-names = "enable", "rtc_enable"; }; }; @@ -277,9 +280,9 @@ agcp_dma: dma-controller@41580000 { /* For backwards compatibility: */ #dma-channels = <32>; dma-channels = <32>; - clock-names = "enable", "ashb_eb"; clocks = <&agcp_gate CLK_AGCP_DMAAP_EB>, - <&agcp_gate CLK_AGCP_AP_ASHB_EB>; + <&agcp_gate CLK_AGCP_AP_ASHB_EB>; + clock-names = "enable", "ashb_eb"; }; }; }; diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile index e20b27ddf90111..bcd392c3206e50 100644 --- a/arch/arm64/boot/dts/ti/Makefile +++ b/arch/arm64/boot/dts/ti/Makefile @@ -73,10 +73,16 @@ k3-am654-gp-evm-dtbs := k3-am654-base-board.dtb \ k3-am654-pcie-usb3.dtbo k3-am654-evm-dtbs := k3-am654-base-board.dtb k3-am654-icssg2.dtbo k3-am654-idk-dtbs := k3-am654-evm.dtb k3-am654-idk.dtbo k3-am654-pcie-usb2.dtbo +k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie-dtbs := k3-am6548-iot2050-advanced-m2.dtb \ + k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtbo +k3-am6548-iot2050-advanced-m2-bkey-usb3-dtbs := k3-am6548-iot2050-advanced-m2.dtb \ + k3-am6548-iot2050-advanced-m2-bkey-usb3.dtbo dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic.dtb dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic-pg2.dtb dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced.dtb dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2.dtb +dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtb +dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-m2-bkey-usb3.dtb dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-pg2.dtb dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced-sm.dtb dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board.dtb @@ -110,6 +116,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-pcie1-ep.dtbo # Boards with J722s SoC +dtb-$(CONFIG_ARCH_K3) += k3-am67a-beagley-ai.dtb dtb-$(CONFIG_ARCH_K3) += k3-j722s-evm.dtb # Boards with J784s4 SoC diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi index 328929c740dc0f..5b92aef5b284b7 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi @@ -241,6 +241,7 @@ main_esm: esm@420000 { bootph-pre-ram; compatible = "ti,j721e-esm"; reg = <0x00 0x420000 0x00 0x1000>; + /* Interrupt sources: rti0, rti1, rti15, wrti0, rti2, rti3 */ ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi index e66d486ef1f210..bb43a411f59b28 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi @@ -19,6 +19,7 @@ mcu_esm: esm@4100000 { bootph-pre-ram; compatible = "ti,j721e-esm"; reg = <0x00 0x4100000 0x00 0x1000>; + /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */ ti,esm-pins = <0>, <1>, <2>, <85>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi index 12ba833002a11d..3c6a80aebd9f44 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-thermal.dtsi @@ -12,12 +12,29 @@ main0_thermal: main0-thermal { thermal-sensors = <&wkup_vtm0 0>; trips { + main0_alert: main0-alert { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + main0_crit: main0-crit { temperature = <105000>; /* milliCelsius */ hysteresis = <2000>; /* milliCelsius */ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&main0_alert>; + cooling-device = + <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; main1_thermal: main1-thermal { @@ -26,11 +43,28 @@ main1_thermal: main1-thermal { thermal-sensors = <&wkup_vtm0 1>; trips { + main1_alert: main1-alert { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + main1_crit: main1-crit { temperature = <105000>; /* milliCelsius */ hysteresis = <2000>; /* milliCelsius */ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&main1_alert>; + cooling-device = + <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts index 70de288d728e44..a1cd47d7f5e304 100644 --- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts +++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts @@ -888,7 +888,8 @@ &main_uart6 { mcu { compatible = "ti,cc1352p7"; - reset-gpios = <&main_gpio0 72 GPIO_ACTIVE_LOW>; + bootloader-backdoor-gpios = <&main_gpio0 13 GPIO_ACTIVE_HIGH>; + reset-gpios = <&main_gpio0 14 GPIO_ACTIVE_HIGH>; vdds-supply = <&vdd_3v3>; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi index 4014add6320d51..c3d1db47dc9f35 100644 --- a/arch/arm64/boot/dts/ti/k3-am625.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi @@ -50,6 +50,7 @@ cpu0: cpu@0 { next-level-cache = <&L2_0>; operating-points-v2 = <&a53_opp_table>; clocks = <&k3_clks 135 0>; + #cooling-cells = <2>; }; cpu1: cpu@1 { @@ -66,6 +67,7 @@ cpu1: cpu@1 { next-level-cache = <&L2_0>; operating-points-v2 = <&a53_opp_table>; clocks = <&k3_clks 136 0>; + #cooling-cells = <2>; }; cpu2: cpu@2 { @@ -82,6 +84,7 @@ cpu2: cpu@2 { next-level-cache = <&L2_0>; operating-points-v2 = <&a53_opp_table>; clocks = <&k3_clks 137 0>; + #cooling-cells = <2>; }; cpu3: cpu@3 { @@ -98,6 +101,7 @@ cpu3: cpu@3 { next-level-cache = <&L2_0>; operating-points-v2 = <&a53_opp_table>; clocks = <&k3_clks 138 0>; + #cooling-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index 916fcf3cc57d13..16a578ae2b412f 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -265,6 +265,14 @@ main_pmx0: pinctrl@f4000 { pinctrl-single,function-mask = <0xffffffff>; }; + main_esm: esm@420000 { + compatible = "ti,j721e-esm"; + reg = <0x0 0x420000 0x0 0x1000>; + bootph-pre-ram; + /* Interrupt sources: rti0, rti1, wrti0, rti4, rti2, rti3 */ + ti,esm-pins = <192>, <193>, <195>, <204>, <209>, <210>; + }; + main_timer0: timer@2400000 { compatible = "ti,am654-timer"; reg = <0x00 0x2400000 0x00 0x400>; @@ -1088,4 +1096,14 @@ vpu: video-codec@30210000 { clocks = <&k3_clks 204 2>; power-domains = <&k3_pds 204 TI_SCI_PD_EXCLUSIVE>; }; + + e5010: jpeg-encoder@fd20000 { + compatible = "ti,am62a-jpeg-enc", "img,e5010-jpeg-enc"; + reg = <0x00 0xfd20000 0x00 0x100>, + <0x00 0xfd20200 0x00 0x200>; + reg-names = "core", "mmu"; + clocks = <&k3_clks 201 0>; + power-domains = <&k3_pds 201 TI_SCI_PD_EXCLUSIVE>; + interrupts = ; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi index 8c36e56f413883..0469c766b769e4 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi @@ -15,6 +15,14 @@ mcu_pmx0: pinctrl@4084000 { status = "disabled"; }; + mcu_esm: esm@4100000 { + compatible = "ti,j721e-esm"; + reg = <0x0 0x4100000 0x0 0x1000>; + bootph-pre-ram; + /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */ + ti,esm-pins = <0>, <1>, <2>, <85>; + }; + /* * The MCU domain timer interrupts are routed only to the ESM module, * and not currently available for Linux. The MCU domain timers are diff --git a/arch/arm64/boot/dts/ti/k3-am62a.dtsi b/arch/arm64/boot/dts/ti/k3-am62a.dtsi index b1b884600293ff..4d79b3e9486af1 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a.dtsi @@ -61,6 +61,8 @@ cbass_main: bus@f0000 { <0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */ <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01d20000>, /* Second peripheral window */ <0x00 0x0fd00000 0x00 0x0fd00000 0x00 0x00020000>, /* GPU */ + <0x00 0x0fd20000 0x00 0x0fd20000 0x00 0x00000100>, /* JPEGENC0_CORE */ + <0x00 0x0fd20200 0x00 0x0fd20200 0x00 0x00000200>, /* JPEGENC0_CORE_MMU */ <0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */ <0x00 0x30040000 0x00 0x30040000 0x00 0x00080000>, /* PRUSS-M */ <0x00 0x30101000 0x00 0x30101000 0x00 0x00010100>, /* CSI window */ diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi index 9701fc69aed946..9b6f5137910837 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-main.dtsi @@ -260,8 +260,9 @@ main_pmx0: pinctrl@f4000 { main_esm: esm@420000 { compatible = "ti,j721e-esm"; reg = <0x00 0x420000 0x00 0x1000>; - ti,esm-pins = <160>, <161>, <162>, <163>, <177>, <178>; bootph-pre-ram; + /* Interrupt sources: rti0, rti1, wrti0 rti2, rti3, rti15 */ + ti,esm-pins = <224>, <225>, <227>, <241>, <242>, <248>; }; main_timer0: timer@2400000 { diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi index df7945156397b1..b33aff0d65c9de 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi @@ -26,9 +26,9 @@ mcu_pmx_range: gpio-range { mcu_esm: esm@4100000 { compatible = "ti,j721e-esm"; reg = <0x00 0x4100000 0x00 0x1000>; - ti,esm-pins = <0>, <1>, <2>, <85>; - status = "reserved"; bootph-pre-ram; + /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0, wrti0 */ + ti,esm-pins = <0>, <1>, <2>, <85>, <86>; }; /* diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi index 0ce9721b417653..420c77c8e9e5e2 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi @@ -65,5 +65,6 @@ &main_gpio0 { &main_gpio1 { gpio-ranges = <&main_pmx0 0 94 32>, <&main_pmx0 42 137 5>, <&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>; + gpio-reserved-ranges = <32 10>; ti,ngpio = <52>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts index ff65955551a328..3efa12bb725462 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts @@ -645,8 +645,6 @@ &mcu_pmx0 { wkup_uart0_pins_default: wkup-uart0-default-pins { pinctrl-single,pins = < - AM62PX_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */ - AM62PX_MCU_IOPAD(0x030, PIN_OUTPUT, 0) /* (C6) WKUP_UART0_RTSn */ AM62PX_MCU_IOPAD(0x024, PIN_INPUT, 0) /* (D8) WKUP_UART0_RXD */ AM62PX_MCU_IOPAD(0x028, PIN_OUTPUT, 0) /* (D7) WKUP_UART0_TXD */ >; diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi index f8370dd033502c..7eae18399caa6e 100644 --- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi @@ -389,7 +389,8 @@ main_esm: esm@420000 { bootph-pre-ram; compatible = "ti,j721e-esm"; reg = <0x00 0x420000 0x00 0x1000>; - ti,esm-pins = <160>, <161>; + /* Interrupt sources: rti0, rti1, rti8, rti9, rti10, rti11 */ + ti,esm-pins = <160>, <161>, <162>, <163>, <164>, <165>; }; main_uart0: serial@2800000 { @@ -677,6 +678,7 @@ cpsw3g: ethernet@8000000 { assigned-clock-parents = <&k3_clks 13 9>; clock-names = "fck"; power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>; + status = "disabled"; dmas = <&main_pktdma 0xC500 15>, <&main_pktdma 0xC501 15>, @@ -701,6 +703,7 @@ cpsw_port1: port@1 { phys = <&phy_gmii_sel 1>; mac-address = [00 00 00 00 00 00]; ti,syscon-efuse = <&main_conf 0x200>; + status = "disabled"; }; cpsw_port2: port@2 { @@ -709,6 +712,7 @@ cpsw_port2: port@2 { label = "port2"; phys = <&phy_gmii_sel 2>; mac-address = [00 00 00 00 00 00]; + status = "disabled"; }; }; @@ -759,7 +763,7 @@ timesync_router: pinctrl@a40000 { }; usbss0: cdns-usb@f900000 { - compatible = "ti,am64-usb"; + compatible = "ti,am64-usb", "ti,j721e-usb"; reg = <0x00 0xf900000 0x00 0x100>; power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 161 9>, <&k3_clks 161 1>; diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi index ec17285869da6e..ad4bed5d3f9eb2 100644 --- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi @@ -158,6 +158,7 @@ mcu_esm: esm@4100000 { bootph-pre-ram; compatible = "ti,j721e-esm"; reg = <0x00 0x4100000 0x00 0x1000>; - ti,esm-pins = <0>, <1>; + /* Interrupt sources: esm0_cfg, esm0_hi, esm0_low, mrti0 */ + ti,esm-pins = <0>, <1>, <2>, <85>; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi index ea7c58fb67e207..6bece2fb4e9531 100644 --- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi @@ -185,6 +185,7 @@ AM64X_IOPAD(0x0278, PIN_INPUT, 7) /* (C19) EXTINTn.GPIO1_70 */ &cpsw3g { pinctrl-names = "default"; pinctrl-0 = <&cpsw_rgmii1_pins_default>; + status = "okay"; }; &cpsw3g_mdio { @@ -208,10 +209,7 @@ cpsw3g_phy1: ethernet-phy@1 { &cpsw_port1 { phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy1>; -}; - -&cpsw_port2 { - status = "disabled"; + status = "okay"; }; &mailbox0_cluster2 { diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso index f08c0e272b53a6..92faf762894c8b 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso +++ b/arch/arm64/boot/dts/ti/k3-am642-evm-nand.dtso @@ -12,7 +12,7 @@ #include "k3-pinctrl.h" &main_pmx0 { - gpmc0_pins_default: gpmc0-pins-default { + gpmc0_default_pins: gpmc0-default-pins { bootph-all; pinctrl-single,pins = < AM64X_IOPAD(0x0094, PIN_INPUT, 7) /* (T19) GPMC0_BE1n.GPIO0_36 */ @@ -50,7 +50,7 @@ AM64X_IOPAD(0x00a4, PIN_OUTPUT, 0) /* (N17) GPMC0_DIR */ }; &main_gpio0 { - gpio0-36 { + gpmc0-hog { bootph-all; gpio-hog; gpios = <36 0>; @@ -67,7 +67,7 @@ &elm0 { &gpmc0 { status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&gpmc0_pins_default>; + pinctrl-0 = <&gpmc0_default_pins>; #address-cells = <2>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts index 6bb1ad2e56ec22..97ca16f00cd260 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts +++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts @@ -616,17 +616,20 @@ &cpsw3g { bootph-all; pinctrl-names = "default"; pinctrl-0 = <&rgmii1_pins_default>, <&rgmii2_pins_default>; + status = "okay"; }; &cpsw_port1 { bootph-all; phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy0>; + status = "okay"; }; &cpsw_port2 { phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy3>; + status = "okay"; }; &cpsw3g_mdio { @@ -646,6 +649,10 @@ cpsw3g_phy0: ethernet-phy@0 { &tscadc0 { /* ADC is reserved for R5 usage */ status = "reserved"; + + adc { + ti,adc-channels = <0 1 2 3 4 5 6 7>; + }; }; &ospi0 { diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts index 30729b49dd690d..60285d736e07a3 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts +++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts @@ -28,6 +28,8 @@ / { model = "PHYTEC phyBOARD-Electra-AM64x RDK"; aliases { + ethernet1 = &icssg0_emac0; + ethernet2 = &icssg0_emac1; mmc1 = &sdhci1; serial2 = &main_uart0; serial3 = &main_uart1; @@ -55,6 +57,73 @@ can_tc2: can-phy1 { standby-gpios = <&main_gpio0 35 GPIO_ACTIVE_HIGH>; }; + /* Dual Ethernet application node on PRU-ICSSG0 */ + ethernet { + compatible = "ti,am642-icssg-prueth"; + pinctrl-names = "default"; + pinctrl-0 = <&icssg0_rgmii1_pins_default>, <&icssg0_rgmii2_pins_default>; + + interrupt-parent = <&icssg0_intc>; + interrupts = <24 0 2>, <25 1 3>; + interrupt-names = "tx_ts0", "tx_ts1"; + + sram = <&oc_sram>; + firmware-name = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", + "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", + "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", + "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", + "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", + "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf"; + + dmas = <&main_pktdma 0xc100 15>, /* egress slice 0 */ + <&main_pktdma 0xc101 15>, /* egress slice 0 */ + <&main_pktdma 0xc102 15>, /* egress slice 0 */ + <&main_pktdma 0xc103 15>, /* egress slice 0 */ + <&main_pktdma 0xc104 15>, /* egress slice 1 */ + <&main_pktdma 0xc105 15>, /* egress slice 1 */ + <&main_pktdma 0xc106 15>, /* egress slice 1 */ + <&main_pktdma 0xc107 15>, /* egress slice 1 */ + <&main_pktdma 0x4100 15>, /* ingress slice 0 */ + <&main_pktdma 0x4101 15>; /* ingress slice 1 */ + dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", + "tx1-0", "tx1-1", "tx1-2", "tx1-3", + "rx0", "rx1"; + + ti,prus = <&pru0_0>, <&rtu0_0>, <&tx_pru0_0>, <&pru0_1>, <&rtu0_1>, <&tx_pru0_1>; + ti,pruss-gp-mux-sel = <2>, /* MII mode */ + <2>, + <2>, + <2>, /* MII mode */ + <2>, + <2>; + + ti,mii-g-rt = <&icssg0_mii_g_rt>; + ti,mii-rt = <&icssg0_mii_rt>; + ti,iep = <&icssg0_iep0>, <&icssg0_iep1>; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + icssg0_emac0: port@0 { + reg = <0>; + phy-handle = <&icssg0_phy1>; + phy-mode = "rgmii-id"; + /* Filled in by bootloader */ + local-mac-address = [00 00 00 00 00 00]; + ti,syscon-rgmii-delay = <&main_conf 0x4100>; + }; + + icssg0_emac1: port@1 { + reg = <1>; + phy-handle = <&icssg0_phy2>; + phy-mode = "rgmii-id"; + /* Filled in by bootloader */ + local-mac-address = [00 00 00 00 00 00]; + ti,syscon-rgmii-delay = <&main_conf 0x4104>; + }; + }; + }; + keys { compatible = "gpio-keys"; autorepeat; @@ -118,6 +187,12 @@ AM64X_IOPAD(0x0090, PIN_OUTPUT, 7) /* (P17) GPMC0_BE0n_CLE.GPIO0_35 */ >; }; + clkout0_pins_default: clkout0-default-pins { + pinctrl-single,pins = < + AM64X_IOPAD(0x0274, PIN_OUTPUT, 5) /* (A19) EXT_REFCLK1.CLKOUT0 */ + >; + }; + gpio_keys_pins_default: gpio-keys-default-pins { pinctrl-single,pins = < AM64X_IOPAD(0x0044, PIN_INPUT, 7) /* (T18) GPMC0_AD2.GPIO0_17 */ @@ -125,6 +200,49 @@ AM64X_IOPAD(0x0054, PIN_INPUT, 7) /* (V20) GPMC0_AD6.GPIO0_21 */ >; }; + icssg0_mdio_pins_default: icssg0-mdio-default-pins { + pinctrl-single,pins = < + AM64X_IOPAD(0x0200, PIN_INPUT, 0) /* (P2) PRG0_MDIO0_MDIO */ + AM64X_IOPAD(0x0204, PIN_OUTPUT, 0) /* (P3) PRG0_MDIO0_MDC */ + AM64X_IOPAD(0x01A8, PIN_OUTPUT, 7) /* (V1) PRG0_PRU0_GPO18.GPIO1_18 */ + AM64X_IOPAD(0x01AC, PIN_OUTPUT, 7) /* (W1) PRG0_PRU0_GPO19.GPIO1_19 */ + >; + }; + + icssg0_rgmii1_pins_default: icssg0-rgmii1-default-pins { + pinctrl-single,pins = < + AM64X_IOPAD(0x0160, PIN_INPUT, 2) /* (Y1) PRG0_PRU0_GPO0.PRG0_RGMII1_RD0 */ + AM64X_IOPAD(0x0164, PIN_INPUT, 2) /* (R4) PRG0_PRU0_GPO1.PRG0_RGMII1_RD1 */ + AM64X_IOPAD(0x0168, PIN_INPUT, 2) /* (U2) PRG0_PRU0_GPO2.PRG0_RGMII1_RD2 */ + AM64X_IOPAD(0x016c, PIN_INPUT, 2) /* (V2) PRG0_PRU0_GPO3.PRG0_RGMII1_RD3 */ + AM64X_IOPAD(0x0170, PIN_INPUT, 2) /* (AA2) PRG0_PRU0_GPO4.PRG0_RGMII1_RX_CTL */ + AM64X_IOPAD(0x0178, PIN_INPUT, 2) /* (T3) PRG0_PRU0_GPO6.PRG0_RGMII1_RXC */ + AM64X_IOPAD(0x018c, PIN_OUTPUT, 2) /* (Y3) PRG0_PRU0_GPO11.PRG0_RGMII1_TD0 */ + AM64X_IOPAD(0x0190, PIN_OUTPUT, 2) /* (AA3) PRG0_PRU0_GPO12.PRG0_RGMII1_TD1 */ + AM64X_IOPAD(0x0194, PIN_OUTPUT, 2) /* (R6) PRG0_PRU0_GPO13.PRG0_RGMII1_TD2 */ + AM64X_IOPAD(0x0198, PIN_OUTPUT, 2) /* (V4) PRG0_PRU0_GPO14.PRG0_RGMII1_TD3 */ + AM64X_IOPAD(0x019c, PIN_OUTPUT, 2) /* (T5) PRG0_PRU0_GPO15.PRG0_RGMII1_TX_CTL */ + AM64X_IOPAD(0x01a0, PIN_OUTPUT, 2) /* (U4) PRG0_PRU0_GPO16.PRG0_RGMII1_TXC */ + >; + }; + + icssg0_rgmii2_pins_default: icssg0-rgmii2-default-pins { + pinctrl-single,pins = < + AM64X_IOPAD(0x01b0, PIN_INPUT, 2) /* (Y2) PRG0_PRU1_GPO0.PRG0_RGMII2_RD0 */ + AM64X_IOPAD(0x01b4, PIN_INPUT, 2) /* (W2) PRG0_PRU1_GPO1.PRG0_RGMII2_RD1 */ + AM64X_IOPAD(0x01b8, PIN_INPUT, 2) /* (V3) PRG0_PRU1_GPO2.PRG0_RGMII2_RD2 */ + AM64X_IOPAD(0x01bc, PIN_INPUT, 2) /* (T4) PRG0_PRU1_GPO3.PRG0_RGMII2_RD3 */ + AM64X_IOPAD(0x01c0, PIN_INPUT, 2) /* (W3) PRG0_PRU1_GPO4.PRG0_RGMII2_RX_CTL */ + AM64X_IOPAD(0x01c8, PIN_INPUT, 2) /* (R5) PRG0_PRU1_GPO6.PRG0_RGMII2_RXC */ + AM64X_IOPAD(0x01dc, PIN_OUTPUT, 2) /* (W4) PRG0_PRU1_GPO11.PRG0_RGMII2_TD0 */ + AM64X_IOPAD(0x01e0, PIN_OUTPUT, 2) /* (Y4) PRG0_PRU1_GPO12.PRG0_RGMII2_TD1 */ + AM64X_IOPAD(0x01e4, PIN_OUTPUT, 2) /* (T6) PRG0_PRU1_GPO13.PRG0_RGMII2_TD2 */ + AM64X_IOPAD(0x01e8, PIN_OUTPUT, 2) /* (U6) PRG0_PRU1_GPO14.PRG0_RGMII2_TD3 */ + AM64X_IOPAD(0x01ec, PIN_OUTPUT, 2) /* (U5) PRG0_PRU1_GPO15.PRG0_RGMII2_TX_CTL */ + AM64X_IOPAD(0x01f0, PIN_OUTPUT, 2) /* (AA4) PRG0_PRU1_GPO16.PRG0_RGMII2_TXC */ + >; + }; + main_i2c1_pins_default: main-i2c1-default-pins { pinctrl-single,pins = < AM64X_IOPAD(0x0268, PIN_INPUT, 0) /* (C18) I2C1_SCL */ @@ -198,6 +316,34 @@ AM64X_IOPAD(0x0040, PIN_OUTPUT, 7) /* (U21) GPMC0_AD1.GPIO0_16 */ }; }; +&icssg0_mdio { + pinctrl-names = "default"; + pinctrl-0 = <&icssg0_mdio_pins_default &clkout0_pins_default>; + status = "okay"; + + icssg0_phy1: ethernet-phy@1 { + compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22"; + reg = <0x1>; + tx-fifo-depth = ; + rx-fifo-depth = ; + reset-gpios = <&main_gpio1 18 GPIO_ACTIVE_LOW>; + reset-assert-us = <1000>; + reset-deassert-us = <1000>; + ti,rx-internal-delay = ; + }; + + icssg0_phy2: ethernet-phy@2 { + compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22"; + reg = <0x2>; + tx-fifo-depth = ; + rx-fifo-depth = ; + reset-gpios = <&main_gpio1 19 GPIO_ACTIVE_LOW>; + reset-assert-us = <1000>; + reset-deassert-us = <1000>; + ti,rx-internal-delay = ; + }; +}; + &main_i2c1 { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts index 44ecbcf1c84474..86369525259c3e 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts @@ -527,16 +527,19 @@ &usb0 { &cpsw3g { pinctrl-names = "default"; pinctrl-0 = <&rgmii1_pins_default>, <&rgmii2_pins_default>; + status = "okay"; }; &cpsw_port1 { phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy0>; + status = "okay"; }; &cpsw_port2 { phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy1>; + status = "okay"; }; &cpsw3g_mdio { diff --git a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi index c19d0b8bbf0fca..a5cec9a075109a 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am642-sr-som.dtsi @@ -177,6 +177,7 @@ vdd_mmc0: regulator-vdd-mmc0 { &cpsw3g { pinctrl-names = "default"; pinctrl-0 = <&rgmii1_default_pins>; + status = "okay"; }; &cpsw3g_mdio { @@ -210,10 +211,7 @@ ethernet_phy0: ethernet-phy@0 { &cpsw_port1 { phy-mode = "rgmii-id"; phy-handle = <ðernet_phy0>; -}; - -&cpsw_port2 { - status = "disabled"; + status = "okay"; }; &icssg1_mdio { diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts index c40ad67cee0191..e06a3b178b3468 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts +++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts @@ -24,6 +24,8 @@ / { aliases { ethernet0 = &cpsw_port1; + ethernet1 = &icssg1_emac0; + ethernet2 = &icssg1_emac1; i2c1 = &mcu_i2c0; mmc1 = &sdhci1; serial0 = &mcu_uart0; @@ -71,6 +73,66 @@ led-1 { }; }; + icssg1_eth: icssg1-eth { + compatible = "ti,am642-icssg-prueth"; + pinctrl-names = "default"; + pinctrl-0 = <&pru_icssg1_rgmii1_pins>, <&pru_icssg1_rgmii2_pins>; + interrupt-parent = <&icssg1_intc>; + interrupts = <24 0 2>, <25 1 3>; + interrupt-names = "tx_ts0", "tx_ts1"; + dmas = <&main_pktdma 0xc200 15>, /* egress slice 0 */ + <&main_pktdma 0xc201 15>, /* egress slice 0 */ + <&main_pktdma 0xc202 15>, /* egress slice 0 */ + <&main_pktdma 0xc203 15>, /* egress slice 0 */ + <&main_pktdma 0xc204 15>, /* egress slice 1 */ + <&main_pktdma 0xc205 15>, /* egress slice 1 */ + <&main_pktdma 0xc206 15>, /* egress slice 1 */ + <&main_pktdma 0xc207 15>, /* egress slice 1 */ + <&main_pktdma 0x4200 15>, /* ingress slice 0 */ + <&main_pktdma 0x4201 15>; /* ingress slice 1 */ + dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", + "tx1-0", "tx1-1", "tx1-2", "tx1-3", + "rx0", "rx1"; + sram = <&oc_sram>; + firmware-name = "ti-pruss/am64x-sr2-pru0-prueth-fw.elf", + "ti-pruss/am64x-sr2-rtu0-prueth-fw.elf", + "ti-pruss/am64x-sr2-txpru0-prueth-fw.elf", + "ti-pruss/am64x-sr2-pru1-prueth-fw.elf", + "ti-pruss/am64x-sr2-rtu1-prueth-fw.elf", + "ti-pruss/am64x-sr2-txpru1-prueth-fw.elf"; + ti,prus = <&pru1_0>, <&rtu1_0>, <&tx_pru1_0>, <&pru1_1>, <&rtu1_1>, <&tx_pru1_1>; + ti,pruss-gp-mux-sel = <2>, /* MII mode */ + <2>, + <2>, + <2>, /* MII mode */ + <2>, + <2>; + ti,mii-g-rt = <&icssg1_mii_g_rt>; + ti,mii-rt = <&icssg1_mii_rt>; + ti,iep = <&icssg1_iep0>, <&icssg1_iep1>; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + icssg1_emac0: port@0 { + reg = <0>; + phy-handle = <&icssg1_phy0c>; + phy-mode = "rgmii-id"; + /* Filled in by bootloader */ + local-mac-address = [00 00 00 00 00 00]; + }; + + icssg1_emac1: port@1 { + reg = <1>; + phy-handle = <&icssg1_phy03>; + phy-mode = "rgmii-id"; + /* Filled in by bootloader */ + local-mac-address = [00 00 00 00 00 00]; + }; + }; + }; + fan0: pwm-fan { compatible = "pwm-fan"; pinctrl-names = "default"; @@ -119,15 +181,13 @@ reg_sd: regulator-sd { &cpsw3g { pinctrl-names = "default"; pinctrl-0 = <&cpsw_pins>; + status = "okay"; }; &cpsw_port1 { phy-mode = "rgmii-rxid"; phy-handle = <&cpsw3g_phy0>; -}; - -&cpsw_port2 { - status = "disabled"; + status = "okay"; }; &cpsw3g_mdio { @@ -154,6 +214,42 @@ &epwm5 { status = "okay"; }; +&icssg1_mdio { + pinctrl-names = "default"; + pinctrl-0 = <&pru_icssg1_mdio_pins>; + status = "okay"; + + /* phy-mode is fixed up to rgmii-rxid by prueth driver to account for + * the SoC integration, so the only rx-internal-delay and no + * tx-internal-delay is set for the PHYs. + */ + + icssg1_phy03: ethernet-phy@3 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0x3>; + reset-gpios = <&main_gpio1 47 GPIO_ACTIVE_LOW>; + reset-assert-us = <1000>; + reset-deassert-us = <1000>; + ti,rx-fifo-depth = ; + ti,tx-fifo-depth = ; + ti,rx-internal-delay = ; + ti,clk-output-sel = ; + }; + + icssg1_phy0c: ethernet-phy@c { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0xc>; + reset-gpios = <&main_gpio1 51 GPIO_ACTIVE_LOW>; + reset-assert-us = <1000>; + reset-deassert-us = <1000>; + ti,rx-fifo-depth = ; + ti,tx-fifo-depth = ; + ti,rx-internal-delay = ; + ti,clk-output-sel = ; + }; +}; + + &main_gpio0 { pinctrl-names = "default"; pinctrl-0 = <&main_gpio0_digital_pins>, diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi index e2584a5efe3438..b3c4c0eec3dc8f 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg2.dtsi @@ -9,11 +9,6 @@ * Common bits of the IOT2050 Basic and Advanced variants, PG2 */ -&mcu_r5fss0 { - /* lock-step mode not supported on PG2 boards */ - ti,cluster-mode = <0>; -}; - &main_pmx0 { cp2102n_reset_pin_default: cp2102n-reset-default-pins { pinctrl-single,pins = < diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi index ef34b851e178a7..e5136ed9476517 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi @@ -627,11 +627,62 @@ icssg0_eth0_phy: ethernet-phy@0 { reg = <0>; ti,rx-internal-delay = ; ti,fifo-depth = ; + + leds { + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + color = ; + function = LED_FUNCTION_LAN; + }; + + led@1 { + reg = <1>; + color = ; + function = LED_FUNCTION_SPEED_LAN; + }; + + led@2 { + reg = <2>; + color = ; + function = LED_FUNCTION_SPEED_LAN; + }; + }; }; icssg0_eth1_phy: ethernet-phy@1 { reg = <1>; ti,rx-internal-delay = ; ti,fifo-depth = ; + + leds { + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + color = ; + function = LED_FUNCTION_LAN; + }; + + led@1 { + reg = <1>; + color = ; + function = LED_FUNCTION_SPEED_LAN; + }; + + led@2 { + reg = <2>; + color = ; + function = LED_FUNCTION_SPEED_LAN; + }; + }; }; }; + +&mcu_r5fss0 { + /* lock-step mode not supported on iot2050 boards */ + ti,cluster-mode = <0>; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 1af3dedde1f67f..1f1af7ea233053 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -54,6 +54,14 @@ gic_its: msi-controller@1820000 { }; }; + main_esm: esm@700000 { + compatible = "ti,j721e-esm"; + reg = <0x00 0x700000 0x00 0x1000>; + bootph-pre-ram; + /* Interrupt sources: rti0, rti1, rti2, rti3 */ + ti,esm-pins = <224>, <225>, <226>, <227>; + }; + serdes0: serdes@900000 { compatible = "ti,phy-am654-serdes"; reg = <0x0 0x900000 0x0 0x2000>; @@ -478,7 +486,7 @@ scm_conf: scm-conf@100000 { ranges = <0x0 0x0 0x00100000 0x1c000>; serdes0_clk: clock@4080 { - compatible = "ti,am654-serdes-ctrl", "syscon"; + compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd"; reg = <0x4080 0x4>; serdes0_mux: mux-controller { @@ -489,7 +497,7 @@ serdes0_mux: mux-controller { }; serdes1_clk: clock@4090 { - compatible = "ti,am654-serdes-ctrl", "syscon"; + compatible = "ti,am654-serdes-ctrl", "syscon", "simple-mfd"; reg = <0x4090 0x4>; serdes1_mux: mux-controller { diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 43c6118d2bf0fb..7cf1f646500a16 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -292,13 +292,13 @@ fss: bus@47000000 { ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */ <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */ - <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */ - <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */ + <0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>, /* FSS data region 1 */ + <0x4 0x00000000 0x4 0x00000000 0x4 0x00000000>; /* FSS data region 0/3 */ ospi0: spi@47040000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47040000 0x0 0x100>, - <0x5 0x00000000 0x1 0x0000000>; + <0x5 0x00000000 0x1 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; @@ -316,7 +316,7 @@ ospi0: spi@47040000 { ospi1: spi@47050000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47050000 0x0 0x100>, - <0x7 0x00000000 0x1 0x00000000>; + <0x7 0x00000000 0x1 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; @@ -440,6 +440,14 @@ mcu_r5fss0_core1: r5f@41400000 { }; }; + mcu_esm: esm@40800000 { + compatible = "ti,j721e-esm"; + reg = <0x00 0x40800000 0x00 0x1000>; + bootph-pre-ram; + /* Interrupt sources: mrti0, mrti1 */ + ti,esm-pins = <104>, <105>; + }; + mcu_rti1: watchdog@40610000 { compatible = "ti,j7-rti-wdt"; reg = <0x0 0x40610000 0x0 0x100>; diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index c59baebc5a25b1..c74a0a25832cb4 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -69,11 +69,10 @@ cbass_main: bus@100000 { <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, - <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */ - <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; + <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>, + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; cbass_mcu: bus@28380000 { compatible = "simple-bus"; @@ -89,9 +88,8 @@ cbass_mcu: bus@28380000 { <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */ <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */ <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI space 1 */ - <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, /* FSS OSPI0 data region 1 */ - <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, /* FSS OSPI0 data region 3*/ - <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; /* FSS OSPI1 data region 3*/ + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */ + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */ cbass_wakeup: bus@42040000 { compatible = "simple-bus"; diff --git a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts index 29a31891b3db6c..4968a47f31ea79 100644 --- a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts +++ b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts @@ -22,8 +22,3 @@ / { compatible = "siemens,iot2050-basic", "ti,am654"; model = "SIMATIC IOT2050 Basic"; }; - -&mcu_r5fss0 { - /* lock-step mode not supported on this board */ - ti,cluster-mode = <0>; -}; diff --git a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso index 8bdb87fcbde007..b0ce2cb2fdc8e1 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso +++ b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso @@ -8,6 +8,7 @@ /dts-v1/; /plugin/; +#include #include #include "k3-pinctrl.h" @@ -58,9 +59,7 @@ icssg0_eth: icssg0-eth { <&main_udmap 0xc107>, /* egress slice 1 */ <&main_udmap 0x4100>, /* ingress slice 0 */ - <&main_udmap 0x4101>, /* ingress slice 1 */ - <&main_udmap 0x4102>, /* mgmnt rsp slice 0 */ - <&main_udmap 0x4103>; /* mgmnt rsp slice 1 */ + <&main_udmap 0x4101>; /* ingress slice 1 */ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", "tx1-0", "tx1-1", "tx1-2", "tx1-3", "rx0", "rx1"; @@ -126,9 +125,7 @@ icssg1_eth: icssg1-eth { <&main_udmap 0xc207>, /* egress slice 1 */ <&main_udmap 0x4200>, /* ingress slice 0 */ - <&main_udmap 0x4201>, /* ingress slice 1 */ - <&main_udmap 0x4202>, /* mgmnt rsp slice 0 */ - <&main_udmap 0x4203>; /* mgmnt rsp slice 1 */ + <&main_udmap 0x4201>; /* ingress slice 1 */ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", "tx1-0", "tx1-1", "tx1-2", "tx1-3", "rx0", "rx1"; @@ -154,6 +151,24 @@ icssg1_emac1: port@1 { }; }; }; + + transceiver1: can-phy0 { + compatible = "ti,tcan1042"; + #phy-cells = <0>; + max-bitrate = <5000000>; + pinctrl-names = "default"; + pinctrl-0 = <&mcan0_gpio_pins_default>; + standby-gpios = <&main_gpio1 47 GPIO_ACTIVE_LOW>; + }; + + transceiver2: can-phy1 { + compatible = "ti,tcan1042"; + #phy-cells = <0>; + max-bitrate = <5000000>; + pinctrl-names = "default"; + pinctrl-0 = <&mcan1_gpio_pins_default>; + standby-gpios = <&main_gpio1 67 GPIO_ACTIVE_LOW>; + }; }; &main_pmx0 { @@ -243,6 +258,34 @@ icssg1_iep0_pins_default: icssg1-iep0-default-pins { AM65X_IOPAD(0x012c, PIN_INPUT, 2) /* (AG26) PRG1_PRU0_GPO19.PRG1_IEP0_EDC_SYNC_OUT0 */ >; }; + + mcan0_gpio_pins_default: mcan0-gpio-default-pins { + pinctrl-single,pins = < + AM65X_IOPAD(0x023c, PIN_INPUT, 7) /* (V25) PRG0_PRU0_GPIO18:GPIO1_47 */ + >; + }; + + mcan1_gpio_pins_default: mcan1-gpio-default-pins { + pinctrl-single,pins = < + AM65X_IOPAD(0x028c, PIN_INPUT, 7) /* (Y26) PRG0_PRU1_GPIO18.GPIO1_67 */ + >; + }; +}; + +&wkup_pmx0 { + mcu_mcan0_pins_default: mcu-mcan0-default-pins { + pinctrl-single,pins = < + AM65X_WKUP_IOPAD(0x00ac, PIN_INPUT_PULLUP, 0) /* (W2) MCU_MCAN0_RX */ + AM65X_WKUP_IOPAD(0x00a8, PIN_OUTPUT_PULLUP, 0) /* (W1) MCU_MCAN0_TX */ + >; + }; + + mcu_mcan1_pins_default: mcu-mcan1-default-pins { + pinctrl-single,pins = < + AM65X_WKUP_IOPAD(0x00c4, PIN_INPUT_PULLUP, 1) /* (AD3) WKUP_GPIO0_5.MCU_MCAN1_RX */ + AM65X_WKUP_IOPAD(0x00c0, PIN_OUTPUT_PULLUP, 1) /* (AC3) WKUP_GPIO0_4.MCU_MCAN1_TX */ + >; + }; }; &icssg0_mdio { @@ -294,3 +337,17 @@ &icssg1_iep0 { pinctrl-names = "default"; pinctrl-0 = <&icssg1_iep0_pins_default>; }; + +&m_can0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_mcan0_pins_default>; + phys = <&transceiver1>; + status = "okay"; +}; + +&m_can1 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_mcan1_pins_default>; + phys = <&transceiver2>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso new file mode 100644 index 00000000000000..666237f6d79c3d --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-ekey-pcie.dtso @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IOT2050 M.2 variant, overlay for B-key PCIE0_LANE0 + E-key PCIE1_LANE0 + * Copyright (c) Siemens AG, 2022-2024 + * + * Authors: + * Chao Zeng + * Jan Kiszka + */ + +/dts-v1/; +/plugin/; + +#include +#include + +&pcie0_rc { + num-lanes = <1>; + phys = <&serdes0 PHY_TYPE_PCIE 1>; + phy-names = "pcie-phy0"; + reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&pcie1_rc { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso new file mode 100644 index 00000000000000..0f86235c97718f --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced-m2-bkey-usb3.dtso @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IOT2050 M.2 variant, overlay for B-key USB3.0 + E-key PCIE1_LANE0 + * Copyright (c) Siemens AG, 2022-2024 + * + * Authors: + * Chao Zeng + * Jan Kiszka + */ + +/dts-v1/; +/plugin/; + +#include +#include + +&serdes0 { + assigned-clock-parents = <&k3_clks 153 7>, <&k3_clks 153 4>; +}; + +&pcie0_rc { + status = "disabled"; +}; + +&pcie1_rc { + pinctrl-names = "default"; + pinctrl-0 = <&minipcie_pins_default>; + + num-lanes = <1>; + phys = <&serdes1 PHY_TYPE_PCIE 0>; + phy-names = "pcie-phy0"; + reset-gpios = <&wkup_gpio0 27 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&dwc3_0 { + assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ + <&k3_clks 151 8>; /* set PIPE3_TXB_CLK to WIZ8B2M4VSB */ + phys = <&serdes0 PHY_TYPE_USB3 0>; + phy-names = "usb3-phy"; +}; + +&usb0 { + maximum-speed = "super-speed"; + snps,dis-u1-entry-quirk; + snps,dis-u2-entry-quirk; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts new file mode 100644 index 00000000000000..44dfbdf892775c --- /dev/null +++ b/arch/arm64/boot/dts/ti/k3-am67a-beagley-ai.dts @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * https://beagley-ai.org/ + * + * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2024 Robert Nelson, BeagleBoard.org Foundation + */ + +/dts-v1/; + +#include +#include +#include "k3-j722s.dtsi" + +/ { + compatible = "beagle,am67a-beagley-ai", "ti,j722s"; + model = "BeagleBoard.org BeagleY-AI"; + + aliases { + serial0 = &wkup_uart0; + serial2 = &main_uart0; + mmc1 = &sdhci1; + rtc0 = &rtc; + }; + + chosen { + stdout-path = &main_uart0; + }; + + memory@80000000 { + /* 4G RAM */ + reg = <0x00000000 0x80000000 0x00000000 0x80000000>, + <0x00000008 0x80000000 0x00000000 0x80000000>; + device_type = "memory"; + bootph-pre-ram; + }; + + reserved_memory: reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + secure_tfa_ddr: tfa@9e780000 { + reg = <0x00 0x9e780000 0x00 0x80000>; + no-map; + }; + + secure_ddr: optee@9e800000 { + reg = <0x00 0x9e800000 0x00 0x01800000>; + no-map; + }; + + wkup_r5fss0_core0_memory_region: r5f-memory@a0100000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa0100000 0x00 0xf00000>; + no-map; + }; + }; + + vsys_5v0: regulator-1 { + compatible = "regulator-fixed"; + regulator-name = "vsys_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + regulator-boot-on; + bootph-all; + }; + + vdd_3v3: regulator-2 { + compatible = "regulator-fixed"; + regulator-name = "vdd_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vsys_5v0>; + regulator-always-on; + regulator-boot-on; + }; + + vdd_mmc1: regulator-3 { + compatible = "regulator-fixed"; + regulator-name = "vdd_mmc1"; + pinctrl-names = "default"; + pinctrl-0 = <&vdd_3v3_sd_pins_default>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + enable-active-high; + gpio = <&main_gpio1 50 GPIO_ACTIVE_HIGH>; + bootph-all; + }; + + vdd_sd_dv: regulator-4 { + compatible = "regulator-gpio"; + regulator-name = "tlv71033"; + pinctrl-names = "default"; + pinctrl-0 = <&vdd_sd_dv_pins_default>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + vin-supply = <&vsys_5v0>; + gpios = <&main_gpio1 49 GPIO_ACTIVE_HIGH>; + states = <1800000 0x0>, + <3300000 0x1>; + bootph-all; + }; + + vsys_io_1v8: regulator-5 { + compatible = "regulator-fixed"; + regulator-name = "vsys_io_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; + + vsys_io_1v2: regulator-6 { + compatible = "regulator-fixed"; + regulator-name = "vsys_io_1v2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-always-on; + regulator-boot-on; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&led_pins_default>; + + led-0 { + gpios = <&main_gpio0 11 GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led-1 { + gpios = <&main_gpio0 12 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + function = LED_FUNCTION_HEARTBEAT; + default-state = "on"; + }; + }; +}; + +&main_pmx0 { + main_i2c0_pins_default: main-i2c0-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (D23) I2C0_SCL */ + J722S_IOPAD(0x01e4, PIN_INPUT_PULLUP, 0) /* (B22) I2C0_SDA */ + >; + bootph-all; + }; + + main_uart0_pins_default: main-uart0-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x01c8, PIN_INPUT, 0) /* (A22) UART0_RXD */ + J722S_IOPAD(0x01cc, PIN_OUTPUT, 0) /* (B22) UART0_TXD */ + >; + bootph-all; + }; + + vdd_sd_dv_pins_default: vdd-sd-dv-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x0244, PIN_OUTPUT, 7) /* (A24) MMC1_SDWP.GPIO1_49 */ + >; + bootph-all; + }; + + main_mmc1_pins_default: main-mmc1-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x023c, PIN_INPUT, 0) /* (H22) MMC1_CMD */ + J722S_IOPAD(0x0234, PIN_OUTPUT, 0) /* (H24) MMC1_CLK */ + J722S_IOPAD(0x0230, PIN_INPUT, 0) /* (H23) MMC1_DAT0 */ + J722S_IOPAD(0x022c, PIN_INPUT_PULLUP, 0) /* (H20) MMC1_DAT1 */ + J722S_IOPAD(0x0228, PIN_INPUT_PULLUP, 0) /* (J23) MMC1_DAT2 */ + J722S_IOPAD(0x0224, PIN_INPUT_PULLUP, 0) /* (H25) MMC1_DAT3 */ + J722S_IOPAD(0x0240, PIN_INPUT, 7) /* (B24) MMC1_SDCD.GPIO1_48 */ + >; + bootph-all; + }; + + mdio_pins_default: mdio-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x0160, PIN_OUTPUT, 0) /* (AC24) MDIO0_MDC */ + J722S_IOPAD(0x015c, PIN_INPUT, 0) /* (AD25) MDIO0_MDIO */ + >; + }; + + rgmii1_pins_default: rgmii1-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x014c, PIN_INPUT, 0) /* (AC25) RGMII1_RD0 */ + J722S_IOPAD(0x0150, PIN_INPUT, 0) /* (AD27) RGMII1_RD1 */ + J722S_IOPAD(0x0154, PIN_INPUT, 0) /* (AE24) RGMII1_RD2 */ + J722S_IOPAD(0x0158, PIN_INPUT, 0) /* (AE26) RGMII1_RD3 */ + J722S_IOPAD(0x0148, PIN_INPUT, 0) /* (AE27) RGMII1_RXC */ + J722S_IOPAD(0x0144, PIN_INPUT, 0) /* (AD23) RGMII1_RX_CTL */ + J722S_IOPAD(0x0134, PIN_OUTPUT, 0) /* (AF27) RGMII1_TD0 */ + J722S_IOPAD(0x0138, PIN_OUTPUT, 0) /* (AE23) RGMII1_TD1 */ + J722S_IOPAD(0x013c, PIN_OUTPUT, 0) /* (AG25) RGMII1_TD2 */ + J722S_IOPAD(0x0140, PIN_OUTPUT, 0) /* (AF24) RGMII1_TD3 */ + J722S_IOPAD(0x0130, PIN_OUTPUT, 0) /* (AG26) RGMII1_TXC */ + J722S_IOPAD(0x012c, PIN_OUTPUT, 0) /* (AF25) RGMII1_TX_CTL */ + >; + }; + + led_pins_default: led-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x002c, PIN_OUTPUT, 7) /* (K26) OSPI0_CSn0.GPIO0_11 */ + J722S_IOPAD(0x0030, PIN_OUTPUT, 7) /* (K23) OSPI0_CSn1.GPIO0_12 */ + >; + }; + + pmic_irq_pins_default: pmic-irq-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x01f4, PIN_INPUT_PULLUP, 0) /* (B23) EXTINTn */ + >; + }; + + vdd_3v3_sd_pins_default: vdd-3v3-sd-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x0254, PIN_OUTPUT, 7) /* (E25) USB0_DRVVBUS.GPIO1_50 */ + >; + }; +}; + +&cpsw3g { + pinctrl-names = "default"; + pinctrl-0 = <&rgmii1_pins_default>; + status = "okay"; +}; + +&cpsw3g_mdio { + pinctrl-names = "default"; + pinctrl-0 = <&mdio_pins_default>; + status = "okay"; + + cpsw3g_phy0: ethernet-phy@0 { + reg = <0>; + ti,rx-internal-delay = ; + ti,fifo-depth = ; + ti,min-output-impedance; + }; +}; + +&cpsw_port1 { + phy-mode = "rgmii-rxid"; + phy-handle = <&cpsw3g_phy0>; + status = "okay"; +}; + +&main_gpio1 { + status = "okay"; +}; + +&main_uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_uart0_pins_default>; + bootph-all; + status = "okay"; +}; + +&mcu_pmx0 { + wkup_uart0_pins_default: wkup-uart0-default-pins { + pinctrl-single,pins = < + J722S_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */ + J722S_MCU_IOPAD(0x030, PIN_OUTPUT, 0) /* (C6) WKUP_UART0_RTSn */ + J722S_MCU_IOPAD(0x024, PIN_INPUT, 0) /* (D8) WKUP_UART0_RXD */ + J722S_MCU_IOPAD(0x028, PIN_OUTPUT, 0) /* (D7) WKUP_UART0_TXD */ + >; + bootph-all; + }; + + wkup_i2c0_pins_default: wkup-i2c0-default-pins { + pinctrl-single,pins = < + J722S_MCU_IOPAD(0x04c, PIN_INPUT_PULLUP, 0) /* (C7) WKUP_I2C0_SCL */ + J722S_MCU_IOPAD(0x050, PIN_INPUT_PULLUP, 0) /* (C6) WKUP_I2C1_SDA */ + >; + bootph-all; + }; +}; + +&wkup_uart0 { + /* WKUP UART0 is used by Device Manager firmware */ + pinctrl-names = "default"; + pinctrl-0 = <&wkup_uart0_pins_default>; + bootph-all; + status = "reserved"; +}; + +&wkup_i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&wkup_i2c0_pins_default>; + clock-frequency = <100000>; + bootph-all; + status = "okay"; + + tps65219: pmic@30 { + compatible = "ti,tps65219"; + reg = <0x30>; + buck1-supply = <&vsys_5v0>; + buck2-supply = <&vsys_5v0>; + buck3-supply = <&vsys_5v0>; + ldo1-supply = <&vdd_3v3>; + ldo3-supply = <&vdd_3v3>; + ldo4-supply = <&vdd_3v3>; + + pinctrl-names = "default"; + pinctrl-0 = <&pmic_irq_pins_default>; + interrupt-parent = <&gic500>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + bootph-all; + system-power-controller; + ti,power-button; + + regulators { + buck1_reg: buck1 { + regulator-name = "VDD_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + buck2_reg: buck2 { + regulator-name = "VDD_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo1_reg: ldo1 { + regulator-name = "VDDSHV5_SDIO"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-allow-bypass; + regulator-boot-on; + regulator-always-on; + }; + + ldo2_reg: ldo2 { + regulator-name = "VDD_1V2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo3_reg: ldo3 { + regulator-name = "VDDA_PHY_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo4_reg: ldo4 { + regulator-name = "VDDA_PLL_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; + + eeprom@50 { + compatible = "atmel,24c32"; + reg = <0x50>; + }; + + rtc: rtc@68 { + compatible = "dallas,ds1340"; + reg = <0x68>; + }; +}; + +&sdhci1 { + /* SD/MMC */ + vmmc-supply = <&vdd_mmc1>; + vqmmc-supply = <&vdd_sd_dv>; + pinctrl-names = "default"; + pinctrl-0 = <&main_mmc1_pins_default>; + disable-wp; + cd-gpios = <&main_gpio1 48 GPIO_ACTIVE_LOW>; + cd-debounce-delay-ms = <100>; + bootph-all; + ti,fails-without-test-cd; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts index 90dbe31c5b8116..d5ceab79536ca4 100644 --- a/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am68-sk-base-board.dts @@ -204,6 +204,7 @@ main_mmc1_pins_default: main-mmc1-default-pins { pinctrl-single,pins = < J721S2_IOPAD(0x104, PIN_INPUT, 0) /* (P23) MMC1_CLK */ J721S2_IOPAD(0x108, PIN_INPUT, 0) /* (N24) MMC1_CMD */ + J721S2_IOPAD(0x100, PIN_INPUT, 0) /* (###) MMC1_CLKLB */ J721S2_IOPAD(0x0fc, PIN_INPUT, 0) /* (M23) MMC1_DAT0 */ J721S2_IOPAD(0x0f8, PIN_INPUT, 0) /* (P24) MMC1_DAT1 */ J721S2_IOPAD(0x0f4, PIN_INPUT, 0) /* (R24) MMC1_DAT2 */ diff --git a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi index 5c66e0ec6e8211..5bc0d2fb4b8f39 100644 --- a/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am68-sk-som.dtsi @@ -215,9 +215,9 @@ partition@680000 { reg = <0x680000 0x40000>; }; - partition@740000 { + partition@6c0000 { label = "ospi.env.backup"; - reg = <0x740000 0x40000>; + reg = <0x6c0000 0x40000>; }; partition@800000 { @@ -302,6 +302,39 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer3 { + status = "reserved"; +}; + +&main_timer4 { + status = "reserved"; +}; + +&main_timer5 { + status = "reserved"; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts index 3f655852244ee7..1e36965a14032c 100644 --- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts @@ -979,6 +979,59 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer3 { + status = "reserved"; +}; + +&main_timer4 { + status = "reserved"; +}; + +&main_timer5 { + status = "reserved"; +}; + +&main_timer6 { + status = "reserved"; +}; + +&main_timer7 { + status = "reserved"; +}; + +&main_timer8 { + status = "reserved"; +}; + +&main_timer9 { + status = "reserved"; +}; + +&main_r5fss2 { + ti,cluster-mode = <0>; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi index 21fe194a576625..e78b4622a7d1ff 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi @@ -84,13 +84,13 @@ rtos_ipc_memory_region: ipc-memories@a4000000 { }; }; - mux0: mux-controller { + mux0: mux-controller-0 { compatible = "gpio-mux"; #mux-state-cells = <1>; mux-gpios = <&exp_som 1 GPIO_ACTIVE_HIGH>; }; - mux1: mux-controller { + mux1: mux-controller-1 { compatible = "gpio-mux"; #mux-state-cells = <1>; mux-gpios = <&exp_som 2 GPIO_ACTIVE_HIGH>; @@ -262,6 +262,23 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts index a2925555fe8180..fb899c99753ecd 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts @@ -123,7 +123,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; }; - c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -135,7 +135,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; }; - c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 6b6ef6a3061426..3731ffb4a5c963 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -354,8 +354,8 @@ fss: bus@47000000 { <0x0 0x47034000 0x0 0x47034000 0x0 0x100>, /* HBMC Control */ <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */ <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */ - <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* HBMC/OSPI0 Memory */ - <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */ + <0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>, /* FSS data region 1 */ + <0x4 0x00000000 0x4 0x00000000 0x4 0x00000000>; /* FSS data region 0/3 */ hbmc_mux: mux-controller@47000004 { compatible = "reg-mux"; @@ -367,7 +367,7 @@ hbmc_mux: mux-controller@47000004 { hbmc: hyperbus@47034000 { compatible = "ti,am654-hbmc"; reg = <0x00 0x47034000 0x00 0x100>, - <0x05 0x00000000 0x01 0x0000000>; + <0x05 0x00000000 0x01 0x00000000>; power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 102 0>; assigned-clocks = <&k3_clks 102 5>; @@ -381,7 +381,7 @@ hbmc: hyperbus@47034000 { ospi0: spi@47040000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47040000 0x0 0x100>, - <0x5 0x00000000 0x1 0x0000000>; + <0x5 0x00000000 0x1 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; @@ -399,7 +399,7 @@ ospi0: spi@47040000 { ospi1: spi@47050000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x0 0x47050000 0x0 0x100>, - <0x7 0x00000000 0x1 0x00000000>; + <0x7 0x00000000 0x1 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts index 89fbfb21e5d3b2..6285e8d94ddeb7 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts @@ -120,7 +120,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; }; - c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -132,7 +132,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; }; - c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; @@ -1311,6 +1311,43 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer12 { + status = "reserved"; +}; + +&main_timer13 { + status = "reserved"; +}; + +&main_timer14 { + status = "reserved"; +}; + +&main_timer15 { + status = "reserved"; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi index 5ba947771b842d..cef47c67493fcc 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi @@ -561,6 +561,43 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer12 { + status = "reserved"; +}; + +&main_timer13 { + status = "reserved"; +}; + +&main_timer14 { + status = "reserved"; +}; + +&main_timer15 { + status = "reserved"; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index 5a72c518ceb6be..a7f2f52f42f71b 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -145,8 +145,7 @@ cbass_main: bus@100000 { <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; cbass_mcu_wakeup: bus@28380000 { compatible = "simple-bus"; @@ -162,9 +161,8 @@ cbass_mcu_wakeup: bus@28380000 { <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */ <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */ <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */ - <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */ - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */ - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/ + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */ + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */ }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso b/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso index 1be28283c7d9a7..8583178fa1f3ed 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso +++ b/arch/arm64/boot/dts/ti/k3-j721s2-evm-gesi-exp-board.dtso @@ -48,7 +48,7 @@ J721S2_IOPAD(0x09c, PIN_OUTPUT, 6) /* (T24) MCASP0_AXR11.RGMII1_TX_CTL */ }; &exp1 { - p15 { + p15-hog { /* P15 - EXP_MUX2 */ gpio-hog; gpios = <13 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi index 8feb42c89e4760..9d96b19d0e7cf5 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi @@ -622,8 +622,8 @@ fss: bus@47000000 { #address-cells = <2>; #size-cells = <2>; ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; ospi0: spi@47040000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi index 82aacc01e8fe88..89252e4a5f1bc2 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi @@ -134,13 +134,13 @@ rtos_ipc_memory_region: ipc-memories@a8000000 { }; }; - mux0: mux-controller { + mux0: mux-controller-0 { compatible = "gpio-mux"; #mux-state-cells = <1>; mux-gpios = <&exp_som 1 GPIO_ACTIVE_HIGH>; }; - mux1: mux-controller { + mux1: mux-controller-1 { compatible = "gpio-mux"; #mux-state-cells = <1>; mux-gpios = <&exp_som 2 GPIO_ACTIVE_HIGH>; @@ -516,6 +516,39 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer3 { + status = "reserved"; +}; + +&main_timer4 { + status = "reserved"; +}; + +&main_timer5 { + status = "reserved"; +}; + &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi index 568e6a04619d82..ea16f82822ae3c 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi @@ -141,8 +141,7 @@ cbass_main: bus@100000 { <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; cbass_mcu_wakeup: bus@28380000 { compatible = "simple-bus"; @@ -158,9 +157,8 @@ cbass_mcu_wakeup: bus@28380000 { <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */ <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */ <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */ - <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */ - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */ - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/ + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */ + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */ }; diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts index dd3b5f7039d7c1..a00f4a7d20d98f 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts +++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts @@ -20,6 +20,7 @@ / { aliases { serial0 = &wkup_uart0; serial2 = &main_uart0; + serial3 = &main_uart5; mmc0 = &sdhci0; mmc1 = &sdhci1; }; @@ -51,12 +52,71 @@ secure_ddr: optee@9e800000 { no-map; }; + wkup_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa0000000 0x00 0x100000>; + no-map; + }; + wkup_r5fss0_core0_memory_region: r5f-memory@a0100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa0100000 0x00 0xf00000>; no-map; }; + mcu_r5fss0_core0_dma_memory_region: mcu-r5fss-dma-memory-region@a1000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa1000000 0x00 0x100000>; + no-map; + }; + + mcu_r5fss0_core0_memory_region: mcu-r5fss-memory-region@a1100000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa1100000 0x00 0xf00000>; + no-map; + }; + + main_r5fss0_core0_dma_memory_region: main-r5fss-dma-memory-region@a2000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa2000000 0x00 0x100000>; + no-map; + }; + + main_r5fss0_core0_memory_region: main-r5fss-memory-region@a2100000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa2100000 0x00 0xf00000>; + no-map; + }; + + c7x_0_dma_memory_region: c7x-dma-memory@a3000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa3000000 0x00 0x100000>; + no-map; + }; + + c7x_0_memory_region: c7x-memory@a3100000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa3100000 0x00 0xf00000>; + no-map; + }; + + c7x_1_dma_memory_region: c7x-dma-memory@a4000000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa4000000 0x00 0x100000>; + no-map; + }; + + c7x_1_memory_region: c7x-memory@a4100000 { + compatible = "shared-dma-pool"; + reg = <0x00 0xa4100000 0x00 0xf00000>; + no-map; + }; + + rtos_ipc_memory_region: ipc-memories@a5000000 { + reg = <0x00 0xa5000000 0x00 0x1c00000>; + alignment = <0x1000>; + no-map; + }; }; vmain_pd: regulator-0 { @@ -162,10 +222,39 @@ sound_master: simple-audio-card,codec { clocks = <&audio_refclk1>; }; }; + + transceiver0: can-phy0 { + compatible = "ti,tcan1042"; + #phy-cells = <0>; + max-bitrate = <5000000>; + pinctrl-names = "default"; + pinctrl-0 = <&mcu_mcan0_gpio_pins_default>; + standby-gpios = <&mcu_gpio0 12 GPIO_ACTIVE_HIGH>; + }; + + transceiver1: can-phy1 { + compatible = "ti,tcan1042"; + #phy-cells = <0>; + max-bitrate = <5000000>; + }; + + transceiver2: can-phy2 { + compatible = "ti,tcan1042"; + #phy-cells = <0>; + max-bitrate = <5000000>; + standby-gpios = <&exp1 17 GPIO_ACTIVE_HIGH>; + }; }; &main_pmx0 { + main_mcan0_pins_default: main-mcan0-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x1dc, PIN_INPUT, 0) /* (C22) MCAN0_RX */ + J722S_IOPAD(0x1d8, PIN_OUTPUT, 0) /*(D22) MCAN0_TX */ + >; + }; + main_i2c0_pins_default: main-i2c0-default-pins { pinctrl-single,pins = < J722S_IOPAD(0x01e0, PIN_INPUT_PULLUP, 0) /* (D23) I2C0_SCL */ @@ -182,6 +271,13 @@ J722S_IOPAD(0x01cc, PIN_OUTPUT, 0) /* (B22) UART0_TXD */ bootph-all; }; + main_uart5_pins_default: main-uart5-default-pins { + pinctrl-single,pins = < + J722S_IOPAD(0x0108, PIN_INPUT, 3) /* (J27) UART5_RXD */ + J722S_IOPAD(0x010c, PIN_OUTPUT, 3) /* (H27) UART5_TXD */ + >; + }; + vdd_sd_dv_pins_default: vdd-sd-dv-default-pins { pinctrl-single,pins = < J722S_IOPAD(0x0120, PIN_INPUT, 7) /* (F27) MMC2_CMD.GPIO0_70 */ @@ -301,8 +397,35 @@ &main_uart0 { bootph-all; }; +&main_uart5 { + /* MAIN UART 5 is used by System firmware */ + pinctrl-names = "default"; + pinctrl-0 = <&main_uart5_pins_default>; + status = "reserved"; +}; + &mcu_pmx0 { + mcu_mcan0_pins_default: mcu-mcan0-default-pins { + pinctrl-single,pins = < + J722S_MCU_IOPAD(0x038, PIN_INPUT, 0) /* (D8) MCU_MCAN0_RX */ + J722S_MCU_IOPAD(0x034, PIN_OUTPUT, 0) /* (B2) MCU_MCAN0_TX */ + >; + }; + + mcu_mcan1_pins_default: mcu-mcan1-default-pins { + pinctrl-single,pins = < + J722S_MCU_IOPAD(0x040, PIN_INPUT, 0) /* (B1) MCU_MCAN1_RX */ + J722S_MCU_IOPAD(0x03C, PIN_OUTPUT, 0) /*(C1) MCU_MCAN1_TX */ + >; + }; + + mcu_mcan0_gpio_pins_default: mcu-mcan0-gpio-default-pins { + pinctrl-single,pins = < + J722S_MCU_IOPAD(0x0030, PIN_OUTPUT, 7) /* (C3) MCU_GPIO0_12 */ + >; + }; + wkup_uart0_pins_default: wkup-uart0-default-pins { pinctrl-single,pins = < J722S_MCU_IOPAD(0x02c, PIN_INPUT, 0) /* (C7) WKUP_UART0_CTSn */ @@ -494,6 +617,104 @@ &sdhci1 { bootph-all; }; +&mailbox0_cluster0 { + status = "okay"; + + mbox_wkup_r5_0: mbox-wkup-r5-0 { + ti,mbox-rx = <0 0 0>; + ti,mbox-tx = <1 0 0>; + }; +}; + +&mailbox0_cluster1 { + status = "okay"; + + mbox_mcu_r5_0: mbox-mcu-r5-0 { + ti,mbox-rx = <0 0 0>; + ti,mbox-tx = <1 0 0>; + }; +}; + +&mailbox0_cluster2 { + status = "okay"; + + mbox_c7x_0: mbox-c7x-0 { + ti,mbox-rx = <0 0 0>; + ti,mbox-tx = <1 0 0>; + }; +}; + +&mailbox0_cluster3 { + status = "okay"; + + mbox_main_r5_0: mbox-main-r5-0 { + ti,mbox-rx = <0 0 0>; + ti,mbox-tx = <1 0 0>; + }; + + mbox_c7x_1: mbox-c7x-1 { + ti,mbox-rx = <2 0 0>; + ti,mbox-tx = <3 0 0>; + }; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&wkup_r5fss0 { + status = "okay"; +}; + +&wkup_r5fss0_core0 { + mboxes = <&mailbox0_cluster0 &mbox_wkup_r5_0>; + memory-region = <&wkup_r5fss0_core0_dma_memory_region>, + <&wkup_r5fss0_core0_memory_region>; +}; + +&mcu_r5fss0 { + status = "okay"; +}; + +&mcu_r5fss0_core0 { + mboxes = <&mailbox0_cluster1 &mbox_mcu_r5_0>; + memory-region = <&mcu_r5fss0_core0_dma_memory_region>, + <&mcu_r5fss0_core0_memory_region>; +}; + +&main_r5fss0 { + status = "okay"; +}; + +&main_r5fss0_core0 { + mboxes = <&mailbox0_cluster3 &mbox_main_r5_0>; + memory-region = <&main_r5fss0_core0_dma_memory_region>, + <&main_r5fss0_core0_memory_region>; +}; + +&c7x_0 { + mboxes = <&mailbox0_cluster2 &mbox_c7x_0>; + memory-region = <&c7x_0_dma_memory_region>, + <&c7x_0_memory_region>; + status = "okay"; +}; + +&c7x_1 { + mboxes = <&mailbox0_cluster3 &mbox_c7x_1>; + memory-region = <&c7x_1_dma_memory_region>, + <&c7x_1_memory_region>; + status = "okay"; +}; + &serdes_ln_ctrl { idle-states = , ; @@ -566,3 +787,28 @@ &mcasp1 { 0 0 0 0 >; }; + +&mcu_mcan0 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_mcan0_pins_default>; + phys = <&transceiver0>; + status = "okay"; +}; + +&mcu_mcan1 { + pinctrl-names = "default"; + pinctrl-0 = <&mcu_mcan1_pins_default>; + phys = <&transceiver1>; + status = "okay"; +}; + +&main_mcan0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_mcan0_pins_default>; + phys = <&transceiver2>; + status = "okay"; +}; + +&mcu_gpio0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi index dde4bd5c664539..ed6f4ba08afca1 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi @@ -153,6 +153,67 @@ usb1: usb@31200000{ dr_mode = "otg"; }; }; + + main_r5fss0: r5fss@78400000 { + compatible = "ti,am62-r5fss"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x78400000 0x00 0x78400000 0x8000>, + <0x78500000 0x00 0x78500000 0x8000>; + power-domains = <&k3_pds 261 TI_SCI_PD_EXCLUSIVE>; + status = "disabled"; + + main_r5fss0_core0: r5f@78400000 { + compatible = "ti,am62-r5f"; + reg = <0x78400000 0x00008000>, + <0x78500000 0x00008000>; + reg-names = "atcm", "btcm"; + resets = <&k3_reset 262 1>; + firmware-name = "j722s-main-r5f0_0-fw"; + ti,sci = <&dmsc>; + ti,sci-dev-id = <262>; + ti,sci-proc-ids = <0x04 0xff>; + ti,atcm-enable = <1>; + ti,btcm-enable = <1>; + ti,loczrama = <1>; + }; + }; + + c7x_0: dsp@7e000000 { + compatible = "ti,am62a-c7xv-dsp"; + reg = <0x00 0x7e000000 0x00 0x00200000>; + reg-names = "l2sram"; + resets = <&k3_reset 208 1>; + firmware-name = "j722s-c71_0-fw"; + ti,sci = <&dmsc>; + ti,sci-dev-id = <208>; + ti,sci-proc-ids = <0x30 0xff>; + status = "disabled"; + }; + + c7x_1: dsp@7e200000 { + compatible = "ti,am62a-c7xv-dsp"; + reg = <0x00 0x7e200000 0x00 0x00200000>; + reg-names = "l2sram"; + resets = <&k3_reset 268 1>; + firmware-name = "j722s-c71_1-fw"; + ti,sci = <&dmsc>; + ti,sci-dev-id = <268>; + ti,sci-proc-ids = <0x31 0xff>; + status = "disabled"; + }; +}; + +/* MCU domain overrides */ + +&mcu_r5fss0_core0 { + firmware-name = "j722s-mcu-r5f0_0-fw"; +}; + +/* Wakeup domain overrides */ + +&wkup_r5fss0_core0 { + firmware-name = "j722s-wkup-r5f0_0-fw"; }; &main_conf { @@ -214,5 +275,6 @@ &main_gpio0 { &main_gpio1 { gpio-ranges = <&main_pmx0 7 101 25>, <&main_pmx0 42 137 5>, <&main_pmx0 47 143 3>, <&main_pmx0 50 149 2>; + gpio-reserved-ranges = <0 7>, <32 10>; ti,ngpio = <73>; }; diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts index ffa38f41679d84..6695ebbcb4d0b1 100644 --- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts +++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts @@ -1154,6 +1154,59 @@ &mcu_r5fss0_core1 { <&mcu_r5fss0_core1_memory_region>; }; +&main_r5fss0 { + ti,cluster-mode = <0>; +}; + +&main_r5fss1 { + ti,cluster-mode = <0>; +}; + +&main_r5fss2 { + ti,cluster-mode = <0>; +}; + +/* Timers are used by Remoteproc firmware */ +&main_timer0 { + status = "reserved"; +}; + +&main_timer1 { + status = "reserved"; +}; + +&main_timer2 { + status = "reserved"; +}; + +&main_timer3 { + status = "reserved"; +}; + +&main_timer4 { + status = "reserved"; +}; + +&main_timer5 { + status = "reserved"; +}; + +&main_timer6 { + status = "reserved"; +}; + +&main_timer7 { + status = "reserved"; +}; + +&main_timer8 { + status = "reserved"; +}; + +&main_timer9 { + status = "reserved"; +}; + &main_r5fss0_core0 { status = "okay"; mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; @@ -1407,10 +1460,11 @@ &serdes1 { serdes1_pcie0_link: phy@0 { reg = <0>; - cdns,num-lanes = <2>; + cdns,num-lanes = <4>; #phy-cells = <0>; cdns,phy-type = ; - resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>; + resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>, + <&serdes_wiz1 3>, <&serdes_wiz1 4>; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi index d4ac1c9872a5e7..e73bb750b09ad5 100644 --- a/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j784s4-main.dtsi @@ -2429,7 +2429,7 @@ main_esm: esm@700000 { watchdog0: watchdog@2200000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2200000 0x00 0x100>; - clocks = <&k3_clks 348 1>; + clocks = <&k3_clks 348 0>; power-domains = <&k3_pds 348 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 348 0>; assigned-clock-parents = <&k3_clks 348 4>; @@ -2438,7 +2438,7 @@ watchdog0: watchdog@2200000 { watchdog1: watchdog@2210000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2210000 0x00 0x100>; - clocks = <&k3_clks 349 1>; + clocks = <&k3_clks 349 0>; power-domains = <&k3_pds 349 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 349 0>; assigned-clock-parents = <&k3_clks 349 4>; @@ -2447,7 +2447,7 @@ watchdog1: watchdog@2210000 { watchdog2: watchdog@2220000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2220000 0x00 0x100>; - clocks = <&k3_clks 350 1>; + clocks = <&k3_clks 350 0>; power-domains = <&k3_pds 350 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 350 0>; assigned-clock-parents = <&k3_clks 350 4>; @@ -2456,7 +2456,7 @@ watchdog2: watchdog@2220000 { watchdog3: watchdog@2230000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2230000 0x00 0x100>; - clocks = <&k3_clks 351 1>; + clocks = <&k3_clks 351 0>; power-domains = <&k3_pds 351 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 351 0>; assigned-clock-parents = <&k3_clks 351 4>; @@ -2465,7 +2465,7 @@ watchdog3: watchdog@2230000 { watchdog4: watchdog@2240000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2240000 0x00 0x100>; - clocks = <&k3_clks 352 1>; + clocks = <&k3_clks 352 0>; power-domains = <&k3_pds 352 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 352 0>; assigned-clock-parents = <&k3_clks 352 4>; @@ -2474,7 +2474,7 @@ watchdog4: watchdog@2240000 { watchdog5: watchdog@2250000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2250000 0x00 0x100>; - clocks = <&k3_clks 353 1>; + clocks = <&k3_clks 353 0>; power-domains = <&k3_pds 353 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 353 0>; assigned-clock-parents = <&k3_clks 353 4>; @@ -2483,7 +2483,7 @@ watchdog5: watchdog@2250000 { watchdog6: watchdog@2260000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2260000 0x00 0x100>; - clocks = <&k3_clks 354 1>; + clocks = <&k3_clks 354 0>; power-domains = <&k3_pds 354 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 354 0>; assigned-clock-parents = <&k3_clks 354 4>; @@ -2492,7 +2492,7 @@ watchdog6: watchdog@2260000 { watchdog7: watchdog@2270000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2270000 0x00 0x100>; - clocks = <&k3_clks 355 1>; + clocks = <&k3_clks 355 0>; power-domains = <&k3_pds 355 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 355 0>; assigned-clock-parents = <&k3_clks 355 4>; @@ -2506,7 +2506,7 @@ watchdog7: watchdog@2270000 { watchdog8: watchdog@22f0000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x22f0000 0x00 0x100>; - clocks = <&k3_clks 360 1>; + clocks = <&k3_clks 360 0>; power-domains = <&k3_pds 360 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 360 0>; assigned-clock-parents = <&k3_clks 360 4>; @@ -2517,7 +2517,7 @@ watchdog8: watchdog@22f0000 { watchdog9: watchdog@2300000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2300000 0x00 0x100>; - clocks = <&k3_clks 356 1>; + clocks = <&k3_clks 356 0>; power-domains = <&k3_pds 356 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 356 0>; assigned-clock-parents = <&k3_clks 356 4>; @@ -2528,7 +2528,7 @@ watchdog9: watchdog@2300000 { watchdog10: watchdog@2310000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2310000 0x00 0x100>; - clocks = <&k3_clks 357 1>; + clocks = <&k3_clks 357 0>; power-domains = <&k3_pds 357 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 357 0>; assigned-clock-parents = <&k3_clks 357 4>; @@ -2539,7 +2539,7 @@ watchdog10: watchdog@2310000 { watchdog11: watchdog@2320000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2320000 0x00 0x100>; - clocks = <&k3_clks 358 1>; + clocks = <&k3_clks 358 0>; power-domains = <&k3_pds 358 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 358 0>; assigned-clock-parents = <&k3_clks 358 4>; @@ -2550,7 +2550,7 @@ watchdog11: watchdog@2320000 { watchdog12: watchdog@2330000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2330000 0x00 0x100>; - clocks = <&k3_clks 359 1>; + clocks = <&k3_clks 359 0>; power-domains = <&k3_pds 359 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 359 0>; assigned-clock-parents = <&k3_clks 359 4>; @@ -2561,7 +2561,7 @@ watchdog12: watchdog@2330000 { watchdog13: watchdog@23c0000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x23c0000 0x00 0x100>; - clocks = <&k3_clks 361 1>; + clocks = <&k3_clks 361 0>; power-domains = <&k3_pds 361 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 361 0>; assigned-clock-parents = <&k3_clks 361 4>; @@ -2572,7 +2572,7 @@ watchdog13: watchdog@23c0000 { watchdog14: watchdog@23d0000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x23d0000 0x00 0x100>; - clocks = <&k3_clks 362 1>; + clocks = <&k3_clks 362 0>; power-domains = <&k3_pds 362 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 362 0>; assigned-clock-parents = <&k3_clks 362 4>; @@ -2583,7 +2583,7 @@ watchdog14: watchdog@23d0000 { watchdog15: watchdog@23e0000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x23e0000 0x00 0x100>; - clocks = <&k3_clks 363 1>; + clocks = <&k3_clks 363 0>; power-domains = <&k3_pds 363 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 363 0>; assigned-clock-parents = <&k3_clks 363 4>; @@ -2594,7 +2594,7 @@ watchdog15: watchdog@23e0000 { watchdog16: watchdog@23f0000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x23f0000 0x00 0x100>; - clocks = <&k3_clks 364 1>; + clocks = <&k3_clks 364 0>; power-domains = <&k3_pds 364 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 364 0>; assigned-clock-parents = <&k3_clks 364 4>; @@ -2605,7 +2605,7 @@ watchdog16: watchdog@23f0000 { watchdog17: watchdog@2540000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2540000 0x00 0x100>; - clocks = <&k3_clks 365 1>; + clocks = <&k3_clks 365 0>; power-domains = <&k3_pds 365 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 365 0>; assigned-clock-parents = <&k3_clks 366 4>; @@ -2616,7 +2616,7 @@ watchdog17: watchdog@2540000 { watchdog18: watchdog@2550000 { compatible = "ti,j7-rti-wdt"; reg = <0x00 0x2550000 0x00 0x100>; - clocks = <&k3_clks 366 1>; + clocks = <&k3_clks 366 0>; power-domains = <&k3_pds 366 TI_SCI_PD_EXCLUSIVE>; assigned-clocks = <&k3_clks 366 0>; assigned-clock-parents = <&k3_clks 366 4>; diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi index f3a6ed1c979d03..f603380fc91cf4 100644 --- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi @@ -678,16 +678,16 @@ fss: bus@47000000 { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x47000000 0x0 0x47000000 0x0 0x100>, /* FSS Control */ - <0x0 0x47040000 0x0 0x47040000 0x0 0x100>, /* OSPI0 Control */ - <0x0 0x47050000 0x0 0x47050000 0x0 0x100>, /* OSPI1 Control */ - <0x5 0x00000000 0x5 0x00000000 0x1 0x0000000>, /* OSPI0 Memory */ - <0x7 0x00000000 0x7 0x00000000 0x1 0x0000000>; /* OSPI1 Memory */ + ranges = <0x00 0x47000000 0x00 0x47000000 0x00 0x00000100>, /* FSS Control */ + <0x00 0x47040000 0x00 0x47040000 0x00 0x00000100>, /* OSPI0 Control */ + <0x00 0x47050000 0x00 0x47050000 0x00 0x00000100>, /* OSPI1 Control */ + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */ + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */ ospi0: spi@47040000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x00 0x47040000 0x00 0x100>, - <0x05 0x0000000 0x01 0x0000000>; + <0x05 0x00000000 0x01 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; @@ -705,7 +705,7 @@ ospi0: spi@47040000 { ospi1: spi@47050000 { compatible = "ti,am654-ospi", "cdns,qspi-nor"; reg = <0x00 0x47050000 0x00 0x100>, - <0x07 0x0000000 0x01 0x0000000>; + <0x07 0x00000000 0x01 0x00000000>; interrupts = ; cdns,fifo-depth = <256>; cdns,fifo-width = <4>; diff --git a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi index 73cc3c1fec08d3..5e84c6b4f5ad48 100644 --- a/arch/arm64/boot/dts/ti/k3-j784s4.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j784s4.dtsi @@ -271,8 +271,7 @@ cbass_main: bus@100000 { <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; cbass_mcu_wakeup: bus@28380000 { bootph-all; @@ -289,9 +288,8 @@ cbass_mcu_wakeup: bus@28380000 { <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */ <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */ <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, /* OSPI register space */ - <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS OSPI0/1 data region 0 */ - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS OSPI0 data region 3 */ - <0x07 0x00000000 0x07 0x00000000 0x01 0x00000000>; /* FSS OSPI1 data region 3*/ + <0x00 0x50000000 0x00 0x50000000 0x00 0x10000000>, /* FSS data region 1 */ + <0x04 0x00000000 0x04 0x00000000 0x04 0x00000000>; /* FSS data region 0/3 */ }; }; diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi index b04829b3175de0..39806f0ae51337 100644 --- a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi +++ b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi @@ -196,8 +196,8 @@ uart0: serial@28200000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart0_pins>; - clocks = <&pismu TMPV770X_CLK_PIUART0>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PIUART0>, <&pismu TMPV770X_CLK_PIUART0>; + clock-names = "uartclk", "apb_pclk"; status = "disabled"; }; @@ -207,8 +207,8 @@ uart1: serial@28201000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart1_pins>; - clocks = <&pismu TMPV770X_CLK_PIUART1>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PIUART1>, <&pismu TMPV770X_CLK_PIUART1>; + clock-names = "uartclk", "apb_pclk"; status = "disabled"; }; @@ -218,8 +218,8 @@ uart2: serial@28202000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart2_pins>; - clocks = <&pismu TMPV770X_CLK_PIUART2>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PIUART2>, <&pismu TMPV770X_CLK_PIUART2>; + clock-names = "uartclk", "apb_pclk"; status = "disabled"; }; @@ -229,8 +229,8 @@ uart3: serial@28203000 { interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart3_pins>; - clocks = <&pismu TMPV770X_CLK_PIUART2>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PIUART2>, <&pismu TMPV770X_CLK_PIUART2>; + clock-names = "uartclk", "apb_pclk"; status = "disabled"; }; @@ -360,8 +360,8 @@ spi0: spi@28140000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI1>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI1>, <&pismu TMPV770X_CLK_PISPI1>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -374,8 +374,8 @@ spi1: spi@28141000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI1>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI1>, <&pismu TMPV770X_CLK_PISPI1>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -388,8 +388,8 @@ spi2: spi@28142000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI2>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI2>, <&pismu TMPV770X_CLK_PISPI2>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -402,8 +402,8 @@ spi3: spi@28143000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI3>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI3>, <&pismu TMPV770X_CLK_PISPI3>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -416,8 +416,8 @@ spi4: spi@28144000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI4>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI4>, <&pismu TMPV770X_CLK_PISPI4>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -430,8 +430,8 @@ spi5: spi@28145000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI5>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI5>, <&pismu TMPV770X_CLK_PISPI5>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; @@ -444,8 +444,8 @@ spi6: spi@28146000 { num-cs = <1>; #address-cells = <1>; #size-cells = <0>; - clocks = <&pismu TMPV770X_CLK_PISPI6>; - clock-names = "apb_pclk"; + clocks = <&pismu TMPV770X_CLK_PISPI6>, <&pismu TMPV770X_CLK_PISPI6>; + clock-names = "sspclk", "apb_pclk"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts index ad8f23a0ec67ba..d2175f3dd09920 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts @@ -941,6 +941,7 @@ conf-pull-none { &pcie { status = "okay"; + phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>; }; &psgtr { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 362df939026383..5fdbfea7a5b295 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -232,6 +232,7 @@ CONFIG_PCIE_KIRIN=y CONFIG_PCIE_HISI_STB=y CONFIG_PCIE_ARMADA_8K=y CONFIG_PCIE_TEGRA194_HOST=m +CONFIG_PCIE_TEGRA194_EP=m CONFIG_PCIE_QCOM=y CONFIG_PCIE_RCAR_GEN4_HOST=m CONFIG_PCIE_RCAR_GEN4_EP=m @@ -366,6 +367,7 @@ CONFIG_R8169=m CONFIG_SH_ETH=y CONFIG_RAVB=y CONFIG_RENESAS_ETHER_SWITCH=y +CONFIG_RTSN=y CONFIG_SMC91X=y CONFIG_SMSC911X=y CONFIG_SNI_AVE=y @@ -516,6 +518,7 @@ CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_BCM2835=m CONFIG_I2C_CADENCE=m +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=m CONFIG_I2C_IMX=y @@ -655,6 +658,7 @@ CONFIG_GPIO_XGENE_SB=y CONFIG_GPIO_MAX732X=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_ADP5585=m CONFIG_GPIO_BD9571MWV=m CONFIG_GPIO_MAX77620=y CONFIG_GPIO_SL28CPLD=m @@ -736,6 +740,7 @@ CONFIG_UNIPHIER_WATCHDOG=y CONFIG_PM8916_WATCHDOG=m CONFIG_BCM2835_WDT=y CONFIG_BCM7038_WDT=m +CONFIG_MFD_ADP5585=m CONFIG_MFD_ALTERA_SYSMGR=y CONFIG_MFD_BD9571MWV=y CONFIG_MFD_AXP20X_I2C=y @@ -785,6 +790,7 @@ CONFIG_REGULATOR_PCA9450=y CONFIG_REGULATOR_PF8X00=y CONFIG_REGULATOR_PFUZE100=y CONFIG_REGULATOR_PWM=y +CONFIG_REGULATOR_QCOM_REFGEN=m CONFIG_REGULATOR_QCOM_RPMH=y CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y @@ -820,6 +826,7 @@ CONFIG_VIDEO_CADENCE_CSI2RX=m CONFIG_VIDEO_MEDIATEK_JPEG=m CONFIG_VIDEO_MEDIATEK_VCODEC=m CONFIG_VIDEO_WAVE_VPU=m +CONFIG_VIDEO_E5010_JPEG_ENC=m CONFIG_VIDEO_IMX7_CSI=m CONFIG_VIDEO_IMX_MIPI_CSIS=m CONFIG_VIDEO_IMX8_ISI=m @@ -961,6 +968,8 @@ CONFIG_SND_SOC_MT8192=m CONFIG_SND_SOC_MT8192_MT6359_RT1015_RT5682=m CONFIG_SND_SOC_MT8195=m CONFIG_SND_SOC_MT8195_MT6359=m +CONFIG_SND_SOC_MT8365=m +CONFIG_SND_SOC_MT8365_MT6357=m CONFIG_SND_MESON_AXG_SOUND_CARD=m CONFIG_SND_MESON_GX_SOUND_CARD=m CONFIG_SND_SOC_QCOM=m @@ -1006,6 +1015,7 @@ CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD=m CONFIG_SND_SOC_DAVINCI_MCASP=m CONFIG_SND_SOC_J721E_EVM=m CONFIG_SND_SOC_AK4613=m +CONFIG_SND_SOC_AK4619=m CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_ES7134=m CONFIG_SND_SOC_ES7241=m @@ -1485,6 +1495,7 @@ CONFIG_IIO_ST_MAGN_3AXIS=m CONFIG_IIO_CROS_EC_BARO=m CONFIG_MPL3115=m CONFIG_PWM=y +CONFIG_PWM_ADP5585=m CONFIG_PWM_BCM2835=m CONFIG_PWM_BRCMSTB=m CONFIG_PWM_CROS_EC=m diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index ce9b28e3c7d634..a523b519700f51 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c index e921823ca103a4..00b8749013c5bf 100644 --- a/arch/arm64/crypto/aes-ce-glue.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 97331b454ea861..da7b7ec1a664e1 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index cbc980fb02e335..22c9069c065054 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -473,7 +473,8 @@ poly1305_blocks_neon: subs $len,$len,#64 ldp x9,x13,[$inp,#48] add $in2,$inp,#96 - adr $zeros,.Lzeros + adrp $zeros,.Lzeros + add $zeros,$zeros,#:lo12:.Lzeros lsl $padbit,$padbit,#24 add x15,$ctx,#48 @@ -885,10 +886,13 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon +.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" +.popsection + .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index 9c4bfd62e789dc..18883ea438f3c1 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 1dd93e1fcb39a2..cbd14f208f8301 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 0a44d2e7ee1f7b..6b4866a88ded1c 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index 250e1377c481b3..5662c3ac49e91c 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index f3431fc6231540..071f6429322702 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index 54bf6ebcfffb15..1a71788c4cda5f 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c index 7182ee683f14ac..8dd71ce79b69bb 100644 --- a/arch/arm64/crypto/sm3-neon-glue.c +++ b/arch/arm64/crypto/sm3-neon-glue.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 7d7d97ad3cd5ab..4e350df9a02dd8 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -9,6 +9,7 @@ syscall-y += unistd_compat_32.h generic-y += early_ioremap.h generic-y += mcs_spinlock.h +generic-y += mmzone.h generic-y += qrwlock.h generic-y += qspinlock.h generic-y += parport.h diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index d328f549b1a60a..c8c77f9e36d60c 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -230,7 +230,11 @@ alternative_has_cap_likely(const unsigned long cpucap) return false; asm goto( +#ifdef BUILD_VDSO + ALTERNATIVE("b %l[l_no]", "nop", %[cpucap]) +#else ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops) +#endif : : [cpucap] "i" (cpucap) : diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index a4697a0b683584..468a049bc63b59 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -33,6 +33,14 @@ static inline void write_pmevtypern(int n, unsigned long val) PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } +#define RETURN_READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static inline unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVTYPERN); + return 0; +} + static inline unsigned long read_pmmir(void) { return read_cpuid(PMMIR_EL1); @@ -46,6 +54,14 @@ static inline u32 read_pmuver(void) ID_AA64DFR0_EL1_PMUVer_SHIFT); } +static inline bool pmuv3_has_icntr(void) +{ + u64 dfr1 = read_sysreg(id_aa64dfr1_el1); + + return !!cpuid_feature_extract_unsigned_field(dfr1, + ID_AA64DFR1_EL1_PMICNTR_SHIFT); +} + static inline void write_pmcr(u64 val) { write_sysreg(val, pmcr_el0); @@ -71,22 +87,32 @@ static inline u64 read_pmccntr(void) return read_sysreg(pmccntr_el0); } -static inline void write_pmcntenset(u32 val) +static inline void write_pmicntr(u64 val) +{ + write_sysreg_s(val, SYS_PMICNTR_EL0); +} + +static inline u64 read_pmicntr(void) +{ + return read_sysreg_s(SYS_PMICNTR_EL0); +} + +static inline void write_pmcntenset(u64 val) { write_sysreg(val, pmcntenset_el0); } -static inline void write_pmcntenclr(u32 val) +static inline void write_pmcntenclr(u64 val) { write_sysreg(val, pmcntenclr_el0); } -static inline void write_pmintenset(u32 val) +static inline void write_pmintenset(u64 val) { write_sysreg(val, pmintenset_el1); } -static inline void write_pmintenclr(u32 val) +static inline void write_pmintenclr(u64 val) { write_sysreg(val, pmintenclr_el1); } @@ -96,12 +122,27 @@ static inline void write_pmccfiltr(u64 val) write_sysreg(val, pmccfiltr_el0); } -static inline void write_pmovsclr(u32 val) +static inline u64 read_pmccfiltr(void) +{ + return read_sysreg(pmccfiltr_el0); +} + +static inline void write_pmicfiltr(u64 val) +{ + write_sysreg_s(val, SYS_PMICFILTR_EL0); +} + +static inline u64 read_pmicfiltr(void) +{ + return read_sysreg_s(SYS_PMICFILTR_EL0); +} + +static inline void write_pmovsclr(u64 val) { write_sysreg(val, pmovsclr_el0); } -static inline u32 read_pmovsclr(void) +static inline u64 read_pmovsclr(void) { return read_sysreg(pmovsclr_el0); } diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 55843426727156..3d261cc123c1e2 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -832,6 +832,12 @@ static inline bool system_supports_lpa2(void) return cpus_have_final_cap(ARM64_HAS_LPA2); } +static inline bool system_supports_poe(void) +{ + return IS_ENABLED(CONFIG_ARM64_POE) && + alternative_has_cap_unlikely(ARM64_HAS_S1POE); +} + int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5fd7caea441936..488f8e75134959 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -94,6 +94,7 @@ #define ARM_CPU_PART_NEOVERSE_V3 0xD84 #define ARM_CPU_PART_CORTEX_X925 0xD85 #define ARM_CPU_PART_CORTEX_A725 0xD87 +#define ARM_CPU_PART_NEOVERSE_N3 0xD8E #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 @@ -143,6 +144,7 @@ #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 #define AMPERE_CPU_PART_AMPERE1 0xAC3 +#define AMPERE_CPU_PART_AMPERE1A 0xAC4 #define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */ @@ -175,6 +177,7 @@ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) +#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -212,6 +215,7 @@ #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) +#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A) #define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index fd87c4b8f98403..e0ffdf13a18b3f 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -165,42 +165,53 @@ mrs x1, id_aa64dfr0_el1 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 cmp x1, #3 - b.lt .Lset_debug_fgt_\@ + b.lt .Lskip_spe_fgt_\@ /* Disable PMSNEVFR_EL1 read and write traps */ orr x0, x0, #(1 << 62) -.Lset_debug_fgt_\@: +.Lskip_spe_fgt_\@: msr_s SYS_HDFGRTR_EL2, x0 msr_s SYS_HDFGWTR_EL2, x0 mov x0, xzr mrs x1, id_aa64pfr1_el1 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4 - cbz x1, .Lset_pie_fgt_\@ + cbz x1, .Lskip_debug_fgt_\@ /* Disable nVHE traps of TPIDR2 and SMPRI */ orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK -.Lset_pie_fgt_\@: +.Lskip_debug_fgt_\@: mrs_s x1, SYS_ID_AA64MMFR3_EL1 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 - cbz x1, .Lset_fgt_\@ + cbz x1, .Lskip_pie_fgt_\@ /* Disable trapping of PIR_EL1 / PIRE0_EL1 */ orr x0, x0, #HFGxTR_EL2_nPIR_EL1 orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1 -.Lset_fgt_\@: +.Lskip_pie_fgt_\@: + mrs_s x1, SYS_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_S1POE_SHIFT, #4 + cbz x1, .Lskip_poe_fgt_\@ + + /* Disable trapping of POR_EL0 */ + orr x0, x0, #HFGxTR_EL2_nPOR_EL0 + +.Lskip_poe_fgt_\@: msr_s SYS_HFGRTR_EL2, x0 msr_s SYS_HFGWTR_EL2, x0 msr_s SYS_HFGITR_EL2, xzr mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4 - cbz x1, .Lskip_fgt_\@ + cbz x1, .Lskip_amu_fgt_\@ msr_s SYS_HAFGRTR_EL2, xzr + +.Lskip_amu_fgt_\@: + .Lskip_fgt_\@: .endm diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 56c148890daf5c..da6d2c1c0b030c 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -10,63 +10,63 @@ #include #include -#define ESR_ELx_EC_UNKNOWN (0x00) -#define ESR_ELx_EC_WFx (0x01) +#define ESR_ELx_EC_UNKNOWN UL(0x00) +#define ESR_ELx_EC_WFx UL(0x01) /* Unallocated EC: 0x02 */ -#define ESR_ELx_EC_CP15_32 (0x03) -#define ESR_ELx_EC_CP15_64 (0x04) -#define ESR_ELx_EC_CP14_MR (0x05) -#define ESR_ELx_EC_CP14_LS (0x06) -#define ESR_ELx_EC_FP_ASIMD (0x07) -#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ -#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ +#define ESR_ELx_EC_CP15_32 UL(0x03) +#define ESR_ELx_EC_CP15_64 UL(0x04) +#define ESR_ELx_EC_CP14_MR UL(0x05) +#define ESR_ELx_EC_CP14_LS UL(0x06) +#define ESR_ELx_EC_FP_ASIMD UL(0x07) +#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */ +#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */ /* Unallocated EC: 0x0A - 0x0B */ -#define ESR_ELx_EC_CP14_64 (0x0C) -#define ESR_ELx_EC_BTI (0x0D) -#define ESR_ELx_EC_ILL (0x0E) +#define ESR_ELx_EC_CP14_64 UL(0x0C) +#define ESR_ELx_EC_BTI UL(0x0D) +#define ESR_ELx_EC_ILL UL(0x0E) /* Unallocated EC: 0x0F - 0x10 */ -#define ESR_ELx_EC_SVC32 (0x11) -#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ -#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ +#define ESR_ELx_EC_SVC32 UL(0x11) +#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */ +#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */ /* Unallocated EC: 0x14 */ -#define ESR_ELx_EC_SVC64 (0x15) -#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ -#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ -#define ESR_ELx_EC_SYS64 (0x18) -#define ESR_ELx_EC_SVE (0x19) -#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ +#define ESR_ELx_EC_SVC64 UL(0x15) +#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */ +#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */ +#define ESR_ELx_EC_SYS64 UL(0x18) +#define ESR_ELx_EC_SVE UL(0x19) +#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */ /* Unallocated EC: 0x1B */ -#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ -#define ESR_ELx_EC_SME (0x1D) +#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */ +#define ESR_ELx_EC_SME UL(0x1D) /* Unallocated EC: 0x1E */ -#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ -#define ESR_ELx_EC_IABT_LOW (0x20) -#define ESR_ELx_EC_IABT_CUR (0x21) -#define ESR_ELx_EC_PC_ALIGN (0x22) +#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */ +#define ESR_ELx_EC_IABT_LOW UL(0x20) +#define ESR_ELx_EC_IABT_CUR UL(0x21) +#define ESR_ELx_EC_PC_ALIGN UL(0x22) /* Unallocated EC: 0x23 */ -#define ESR_ELx_EC_DABT_LOW (0x24) -#define ESR_ELx_EC_DABT_CUR (0x25) -#define ESR_ELx_EC_SP_ALIGN (0x26) -#define ESR_ELx_EC_MOPS (0x27) -#define ESR_ELx_EC_FP_EXC32 (0x28) +#define ESR_ELx_EC_DABT_LOW UL(0x24) +#define ESR_ELx_EC_DABT_CUR UL(0x25) +#define ESR_ELx_EC_SP_ALIGN UL(0x26) +#define ESR_ELx_EC_MOPS UL(0x27) +#define ESR_ELx_EC_FP_EXC32 UL(0x28) /* Unallocated EC: 0x29 - 0x2B */ -#define ESR_ELx_EC_FP_EXC64 (0x2C) +#define ESR_ELx_EC_FP_EXC64 UL(0x2C) /* Unallocated EC: 0x2D - 0x2E */ -#define ESR_ELx_EC_SERROR (0x2F) -#define ESR_ELx_EC_BREAKPT_LOW (0x30) -#define ESR_ELx_EC_BREAKPT_CUR (0x31) -#define ESR_ELx_EC_SOFTSTP_LOW (0x32) -#define ESR_ELx_EC_SOFTSTP_CUR (0x33) -#define ESR_ELx_EC_WATCHPT_LOW (0x34) -#define ESR_ELx_EC_WATCHPT_CUR (0x35) +#define ESR_ELx_EC_SERROR UL(0x2F) +#define ESR_ELx_EC_BREAKPT_LOW UL(0x30) +#define ESR_ELx_EC_BREAKPT_CUR UL(0x31) +#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32) +#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33) +#define ESR_ELx_EC_WATCHPT_LOW UL(0x34) +#define ESR_ELx_EC_WATCHPT_CUR UL(0x35) /* Unallocated EC: 0x36 - 0x37 */ -#define ESR_ELx_EC_BKPT32 (0x38) +#define ESR_ELx_EC_BKPT32 UL(0x38) /* Unallocated EC: 0x39 */ -#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ +#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */ /* Unallocated EC: 0x3B */ -#define ESR_ELx_EC_BRK64 (0x3C) +#define ESR_ELx_EC_BRK64 UL(0x3C) /* Unallocated EC: 0x3D - 0x3F */ -#define ESR_ELx_EC_MAX (0x3F) +#define ESR_ELx_EC_MAX UL(0x3F) #define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_WIDTH (6) @@ -122,8 +122,8 @@ #define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n)) /* Status codes for individual page table levels */ -#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n) -#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n) +#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n)) +#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + (n)) #define ESR_ELx_FSC_FAULT_nL (0x2C) #define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \ @@ -161,6 +161,7 @@ /* ISS field definitions for exceptions taken in to Hyp */ #define ESR_ELx_FSC_ADDRSZ (0x00) +#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n)) #define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_COND_SHIFT (20) #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index bc69ac368d73b1..f2a84efc361858 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -155,8 +155,6 @@ extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused); -extern u64 read_smcr_features(void); - /* * Helpers to translate bit indices in sve_vq_map to VQ values (and * vice versa). This allows find_next_bit() to be used to find the diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 4edd3b61df1124..a775adddecf256 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -157,6 +157,7 @@ #define KERNEL_HWCAP_SME_SF8FMA __khwcap2_feature(SME_SF8FMA) #define KERNEL_HWCAP_SME_SF8DP4 __khwcap2_feature(SME_SF8DP4) #define KERNEL_HWCAP_SME_SF8DP2 __khwcap2_feature(SME_SF8DP2) +#define KERNEL_HWCAP_POE __khwcap2_feature(POE) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h index 0ae427f352c8ca..409e239834d194 100644 --- a/arch/arm64/include/asm/hypervisor.h +++ b/arch/arm64/include/asm/hypervisor.h @@ -7,4 +7,15 @@ void kvm_init_hyp_services(void); bool kvm_arm_hyp_service_available(u32 func_id); +#ifdef CONFIG_ARM_PKVM_GUEST +void pkvm_init_hyp_services(void); +#else +static inline void pkvm_init_hyp_services(void) { }; +#endif + +static inline void kvm_arch_init_hyp_services(void) +{ + pkvm_init_hyp_services(); +}; + #endif diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 41fd90895dfc3d..1ada23a6ec1900 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -271,6 +271,10 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count) * I/O memory mapping functions. */ +typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size, + pgprot_t *prot); +int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook); + #define ioremap_prot ioremap_prot #define _PAGE_IOREMAP PROT_DEVICE_nGnRE diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index d81cc746e0ebd0..109a85ee691007 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,6 +107,7 @@ /* TCR_EL2 Registers bits */ #define TCR_EL2_DS (1UL << 32) #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) +#define TCR_EL2_HPD (1 << 24) #define TCR_EL2_TBI (1 << 20) #define TCR_EL2_PS_SHIFT 16 #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 2181a11b9d9252..b36a3b6cc01169 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -10,6 +10,7 @@ #include #include #include +#include #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) @@ -235,6 +236,9 @@ extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding); extern void __kvm_timer_set_cntvoff(u64 cntvoff); +extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); +extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); +extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); @@ -259,7 +263,7 @@ extern u64 __kvm_get_mdcr_el2(void); asm volatile( \ " mrs %1, spsr_el2\n" \ " mrs %2, elr_el2\n" \ - "1: at "at_op", %3\n" \ + "1: " __msr_s(at_op, "%3") "\n" \ " isb\n" \ " b 9f\n" \ "2: msr spsr_el2, %1\n" \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a33f5996ca9f1d..94cff508874bf2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -446,6 +446,12 @@ enum vcpu_sysreg { GCR_EL1, /* Tag Control Register */ TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ + POR_EL0, /* Permission Overlay Register 0 (EL0) */ + + /* FP/SIMD/SVE */ + SVCR, + FPMR, + /* 32bit specific registers. */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -517,6 +523,8 @@ enum vcpu_sysreg { VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ + VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */ + VNCR(HFGRTR_EL2), VNCR(HFGWTR_EL2), VNCR(HFGITR_EL2), @@ -530,6 +538,8 @@ enum vcpu_sysreg { VNCR(CNTP_CVAL_EL0), VNCR(CNTP_CTL_EL0), + VNCR(ICH_HCR_EL2), + NR_SYS_REGS /* Nothing after this line! */ }; @@ -595,6 +605,16 @@ struct kvm_host_data { struct cpu_sve_state *sve_state; }; + union { + /* HYP VA pointer to the host storage for FPMR */ + u64 *fpmr_ptr; + /* + * Used by pKVM only, as it needs to provide storage + * for the host + */ + u64 fpmr; + }; + /* Ownership of the FP regs */ enum { FP_STATE_FREE, @@ -664,8 +684,6 @@ struct kvm_vcpu_arch { void *sve_state; enum fp_type fp_type; unsigned int sve_max_vl; - u64 svcr; - u64 fpmr; /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; @@ -1330,12 +1348,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM -void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); -void kvm_clr_pmu_events(u32 clr); +void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); +void kvm_clr_pmu_events(u64 clr); bool kvm_set_pmuserenr(u64 val); #else -static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} -static inline void kvm_clr_pmu_events(u32 clr) {} +static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} +static inline void kvm_clr_pmu_events(u64 clr) {} static inline bool kvm_set_pmuserenr(u64 val) { return false; @@ -1423,11 +1441,6 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); sign_extend64(__val, id##_##fld##_WIDTH - 1); \ }) -#define expand_field_sign(id, fld, val) \ - (id##_##fld##_SIGNED ? \ - __expand_field_sign_signed(id, fld, val) : \ - __expand_field_sign_unsigned(id, fld, val)) - #define get_idreg_field_unsigned(kvm, id, fld) \ ({ \ u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ @@ -1443,20 +1456,26 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); #define get_idreg_field_enum(kvm, id, fld) \ get_idreg_field_unsigned(kvm, id, fld) -#define get_idreg_field(kvm, id, fld) \ +#define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ + (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) + +#define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ + (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) + +#define kvm_cmp_feat(kvm, id, fld, op, limit) \ (id##_##fld##_SIGNED ? \ - get_idreg_field_signed(kvm, id, fld) : \ - get_idreg_field_unsigned(kvm, id, fld)) + kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ + kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) #define kvm_has_feat(kvm, id, fld, limit) \ - (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit)) + kvm_cmp_feat(kvm, id, fld, >=, limit) #define kvm_has_feat_enum(kvm, id, fld, val) \ - (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val)) + kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) #define kvm_has_feat_range(kvm, id, fld, min, max) \ - (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \ - get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max)) + (kvm_cmp_feat(kvm, id, fld, >=, min) && \ + kvm_cmp_feat(kvm, id, fld, <=, max)) /* Check for a given level of PAuth support */ #define kvm_has_pauth(k, l) \ @@ -1473,4 +1492,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); (pa + pi + pa3) == 1; \ }) +#define kvm_has_fpmr(k) \ + (system_supports_fpmr() && \ + kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP)) + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 216ca424bb1684..cd4087fbda9afe 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -352,5 +352,11 @@ static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) return &kvm->arch.mmu != mmu; } +#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS +void kvm_s2_ptdump_create_debugfs(struct kvm *kvm); +#else +static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {} +#endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */ + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index 5b06c31035a246..e8bc6d67aba299 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -85,7 +85,7 @@ struct kvm_s2_trans { bool readable; int level; u32 esr; - u64 upper_attr; + u64 desc; }; static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans) @@ -115,7 +115,7 @@ static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans) static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans) { - return !(trans->upper_attr & BIT(54)); + return !(trans->desc & BIT(54)); } extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa, @@ -205,4 +205,40 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans) return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level); } +/* Adjust alignment for the contiguous bit as per StageOA() */ +#define contiguous_bit_shift(d, wi, l) \ + ({ \ + u8 shift = 0; \ + \ + if ((d) & PTE_CONT) { \ + switch (BIT((wi)->pgshift)) { \ + case SZ_4K: \ + shift = 4; \ + break; \ + case SZ_16K: \ + shift = (l) == 2 ? 5 : 7; \ + break; \ + case SZ_64K: \ + shift = 5; \ + break; \ + } \ + } \ + \ + shift; \ + }) + +static inline unsigned int ps_to_output_size(unsigned int ps) +{ + switch (ps) { + case 0: return 32; + case 1: return 36; + case 2: return 40; + case 3: return 42; + case 4: return 44; + case 5: + default: + return 48; + } +} + #endif /* __ARM64_KVM_NESTED_H */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 19278dfe797825..03f4c3d7839c2c 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -59,6 +59,48 @@ typedef u64 kvm_pte_t; #define KVM_PHYS_INVALID (-1ULL) +#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) + +#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) +#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) +#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) +#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 +#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) + +#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2) +#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) +#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) +#define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8) +#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 +#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) + +#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50) + +#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) + +#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) + +#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) + +#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) + +#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ + KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ + KVM_PTE_LEAF_ATTR_HI_S2_XN) + +#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) +#define KVM_MAX_OWNER_ID 1 + +/* + * Used to indicate a pte for which a 'break-before-make' sequence is in + * progress. + */ +#define KVM_INVALID_PTE_LOCKED BIT(10) + static inline bool kvm_pte_valid(kvm_pte_t pte) { return pte & KVM_PTE_VALID; diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h new file mode 100644 index 00000000000000..b0c9a86b13a46e --- /dev/null +++ b/arch/arm64/include/asm/mem_encrypt.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_MEM_ENCRYPT_H +#define __ASM_MEM_ENCRYPT_H + +struct arm64_mem_crypt_ops { + int (*encrypt)(unsigned long addr, int numpages); + int (*decrypt)(unsigned long addr, int numpages); +}; + +int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops); + +int set_memory_encrypted(unsigned long addr, int numpages); +int set_memory_decrypted(unsigned long addr, int numpages); + +#endif /* __ASM_MEM_ENCRYPT_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 54fb014eba0582..0480c61dbb4f30 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -110,6 +110,8 @@ #define PAGE_END (_PAGE_END(VA_BITS_MIN)) #endif /* CONFIG_KASAN */ +#define PHYSMEM_END __pa(PAGE_END - 1) + #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) /* diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 5966ee4a61542e..9e39217b4afbbb 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -2,12 +2,14 @@ #ifndef __ASM_MMAN_H__ #define __ASM_MMAN_H__ +#include + +#ifndef BUILD_VDSO #include #include -#include static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, - unsigned long pkey __always_unused) + unsigned long pkey) { unsigned long ret = 0; @@ -17,6 +19,14 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, if (system_supports_mte() && (prot & PROT_MTE)) ret |= VM_MTE; +#ifdef CONFIG_ARCH_HAS_PKEYS + if (system_supports_poe()) { + ret |= pkey & BIT(0) ? VM_PKEY_BIT0 : 0; + ret |= pkey & BIT(1) ? VM_PKEY_BIT1 : 0; + ret |= pkey & BIT(2) ? VM_PKEY_BIT2 : 0; + } +#endif + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) @@ -60,4 +70,6 @@ static inline bool arch_validate_flags(unsigned long vm_flags) } #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) +#endif /* !BUILD_VDSO */ + #endif /* ! __ASM_MMAN_H__ */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 65977c7783c581..2ec96d91acc66a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -25,6 +25,7 @@ typedef struct { refcount_t pinned; void *vdso; unsigned long flags; + u8 pkey_allocation_map; } mm_context_t; /* @@ -63,7 +64,6 @@ static inline bool arm64_kernel_unmapped_at_el0(void) extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); -extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index bd19f4c758b779..7c09d47e09cb54 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -15,12 +15,12 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -175,9 +175,36 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { atomic64_set(&mm->context.id, 0); refcount_set(&mm->context.pinned, 0); + + /* pkey 0 is the default, so always reserve it. */ + mm->context.pkey_allocation_map = BIT(0); + + return 0; +} + +static inline void arch_dup_pkeys(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + /* Duplicate the oldmm pkey state in mm: */ + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +{ + arch_dup_pkeys(oldmm, mm); + return 0; } +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) @@ -267,6 +294,23 @@ static inline unsigned long mm_untag_mask(struct mm_struct *mm) return -1UL >> 8; } +/* + * Only enforce protection keys on the current process, because there is no + * user context to access POR_EL0 for another address space. + */ +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + if (!system_supports_poe()) + return true; + + /* allow access if the VMA is not one from this process */ + if (foreign || vma_is_foreign(vma)) + return true; + + return por_el0_allows_pkey(vma_pkey(vma), write, execute); +} + #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h deleted file mode 100644 index fa17e01d9ab278..00000000000000 --- a/arch/arm64/include/asm/mmzone.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -#ifdef CONFIG_NUMA - -#include - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[(nid)]) - -#endif /* CONFIG_NUMA */ -#endif /* __ASM_MMZONE_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 1f60aa1bc750cb..fd330c1db289a6 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -135,7 +135,6 @@ /* * Section */ -#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) @@ -199,11 +198,26 @@ #define PTE_PI_IDX_2 53 /* PXN */ #define PTE_PI_IDX_3 54 /* UXN */ +/* + * POIndex[2:0] encoding (Permission Overlay Extension) + */ +#define PTE_PO_IDX_0 (_AT(pteval_t, 1) << 60) +#define PTE_PO_IDX_1 (_AT(pteval_t, 1) << 61) +#define PTE_PO_IDX_2 (_AT(pteval_t, 1) << 62) + +#define PTE_PO_IDX_MASK GENMASK_ULL(62, 60) + + /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ #define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2) +/* + * Hierarchical permission for Stage-1 tables + */ +#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61) + /* * Highest possible physical address supported. */ @@ -298,6 +312,10 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_HPD0_SHIFT 41 +#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT) +#define TCR_HPD1_SHIFT 42 +#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT) #define TCR_TBID0 (UL(1) << 51) #define TCR_TBID1 (UL(1) << 52) #define TCR_NFD0 (UL(1) << 53) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index b11cfb9fdd379a..2a11d0c10760b9 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -154,10 +154,10 @@ static inline bool __pure lpa2_is_enabled(void) #define PIE_E0 ( \ PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \ - PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW)) + PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \ + PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \ + PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \ + PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW_O)) #define PIE_E1 ( \ PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7a4f5604be3f75..c329ea061dc988 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -149,6 +150,24 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) #define pte_accessible(mm, pte) \ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) +static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) +{ + u64 por; + + if (!system_supports_poe()) + return true; + + por = read_sysreg_s(SYS_POR_EL0); + + if (write) + return por_elx_allows_write(por, pkey); + + if (execute) + return por_elx_allows_exec(por, pkey); + + return por_elx_allows_read(por, pkey); +} + /* * p??_access_permitted() is true for valid user mappings (PTE_USER * bit set, subject to the write permission check). For execute-only @@ -156,8 +175,11 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) * not set) must return false. PROT_NONE mappings do not have the * PTE_VALID bit set. */ -#define pte_access_permitted(pte, write) \ +#define pte_access_permitted_no_overlay(pte, write) \ (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) +#define pte_access_permitted(pte, write) \ + (pte_access_permitted_no_overlay(pte, write) && \ + por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) #define pmd_access_permitted(pmd, write) \ (pte_access_permitted(pmd_pte(pmd), (write))) #define pud_access_permitted(pud, write) \ @@ -373,10 +395,11 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) /* * If the PTE would provide user space access to the tags associated * with it then ensure that the MTE tags are synchronised. Although - * pte_access_permitted() returns false for exec only mappings, they - * don't expose tags (instruction fetches don't check tags). + * pte_access_permitted_no_overlay() returns false for exec only + * mappings, they don't expose tags (instruction fetches don't check + * tags). */ - if (system_supports_mte() && pte_access_permitted(pte, false) && + if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && !pte_special(pte) && pte_tagged(pte)) mte_sync_tags(pte, nr_pages); } @@ -384,6 +407,7 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) /* * Select all bits except the pfn */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pfn = pte_pfn(pte); @@ -577,6 +601,14 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); } +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP +#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) +static inline pmd_t pmd_mkspecial(pmd_t pmd) +{ + return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); +} +#endif + #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) @@ -594,6 +626,27 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP +#define pud_special(pte) pte_special(pud_pte(pud)) +#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud))) +#endif + +#define pmd_pgprot pmd_pgprot +static inline pgprot_t pmd_pgprot(pmd_t pmd) +{ + unsigned long pfn = pmd_pfn(pmd); + + return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); +} + +#define pud_pgprot pud_pgprot +static inline pgprot_t pud_pgprot(pud_t pud) +{ + unsigned long pfn = pud_pfn(pud); + + return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); +} + static inline void __set_pte_at(struct mm_struct *mm, unsigned long __always_unused addr, pte_t *ptep, pte_t pte, unsigned int nr) @@ -1103,7 +1156,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) */ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | - PTE_GP | PTE_ATTRINDX_MASK; + PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; + /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); diff --git a/arch/arm64/include/asm/pkeys.h b/arch/arm64/include/asm/pkeys.h new file mode 100644 index 00000000000000..0ca5f83ce148f5 --- /dev/null +++ b/arch/arm64/include/asm/pkeys.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Arm Ltd. + * + * Based on arch/x86/include/asm/pkeys.h + */ + +#ifndef _ASM_ARM64_PKEYS_H +#define _ASM_ARM64_PKEYS_H + +#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2) + +#define arch_max_pkey() 8 + +int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val); + +static inline bool arch_pkeys_enabled(void) +{ + return system_supports_poe(); +} + +static inline int vma_pkey(struct vm_area_struct *vma) +{ + return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT; +} + +static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, + int prot, int pkey) +{ + if (pkey != -1) + return pkey; + + return vma_pkey(vma); +} + +static inline int execute_only_pkey(struct mm_struct *mm) +{ + // Execute-only mappings are handled by EPAN/FEAT_PAN3. + return -1; +} + +#define mm_pkey_allocation_map(mm) (mm)->context.pkey_allocation_map +#define mm_set_pkey_allocated(mm, pkey) do { \ + mm_pkey_allocation_map(mm) |= (1U << pkey); \ +} while (0) +#define mm_set_pkey_free(mm, pkey) do { \ + mm_pkey_allocation_map(mm) &= ~(1U << pkey); \ +} while (0) + +static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) +{ + /* + * "Allocated" pkeys are those that have been returned + * from pkey_alloc() or pkey 0 which is allocated + * implicitly when the mm is created. + */ + if (pkey < 0 || pkey >= arch_max_pkey()) + return false; + + return mm_pkey_allocation_map(mm) & (1U << pkey); +} + +/* + * Returns a positive, 3-bit key on success, or -1 on failure. + */ +static inline int mm_pkey_alloc(struct mm_struct *mm) +{ + /* + * Note: this is the one and only place we make sure + * that the pkey is valid as far as the hardware is + * concerned. The rest of the kernel trusts that + * only good, valid pkeys come out of here. + */ + u8 all_pkeys_mask = GENMASK(arch_max_pkey() - 1, 0); + int ret; + + if (!arch_pkeys_enabled()) + return -1; + + /* + * Are we out of pkeys? We must handle this specially + * because ffz() behavior is undefined if there are no + * zeros. + */ + if (mm_pkey_allocation_map(mm) == all_pkeys_mask) + return -1; + + ret = ffz(mm_pkey_allocation_map(mm)); + + mm_set_pkey_allocated(mm, ret); + + return ret; +} + +static inline int mm_pkey_free(struct mm_struct *mm, int pkey) +{ + if (!mm_pkey_is_allocated(mm, pkey)) + return -EINVAL; + + mm_set_pkey_free(mm, pkey); + + return 0; +} + +#endif /* _ASM_ARM64_PKEYS_H */ diff --git a/arch/arm64/include/asm/por.h b/arch/arm64/include/asm/por.h new file mode 100644 index 00000000000000..e06e9f473675f4 --- /dev/null +++ b/arch/arm64/include/asm/por.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Arm Ltd. + */ + +#ifndef _ASM_ARM64_POR_H +#define _ASM_ARM64_POR_H + +#define POR_BITS_PER_PKEY 4 +#define POR_ELx_IDX(por_elx, idx) (((por_elx) >> ((idx) * POR_BITS_PER_PKEY)) & 0xf) + +static inline bool por_elx_allows_read(u64 por, u8 pkey) +{ + u8 perm = POR_ELx_IDX(por, pkey); + + return perm & POE_R; +} + +static inline bool por_elx_allows_write(u64 por, u8 pkey) +{ + u8 perm = POR_ELx_IDX(por, pkey); + + return perm & POE_W; +} + +static inline bool por_elx_allows_exec(u64 por, u8 pkey) +{ + u8 perm = POR_ELx_IDX(por, pkey); + + return perm & POE_X; +} + +#endif /* _ASM_ARM64_POR_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f77371232d8c6d..1438424f006437 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -184,6 +184,7 @@ struct thread_struct { u64 sctlr_user; u64 svcr; u64 tpidr2_el0; + u64 por_el0; }; static inline unsigned int thread_get_vl(struct thread_struct *thread, @@ -402,5 +403,10 @@ long get_tagged_addr_ctrl(struct task_struct *task); #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) #endif +int get_tsc_mode(unsigned long adr); +int set_tsc_mode(unsigned int val); +#define GET_TSC_CTL(adr) get_tsc_mode((adr)) +#define SET_TSC_CTL(val) set_tsc_mode((val)) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 5b1701c76d1cec..6cf4aae052191d 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -5,6 +5,8 @@ #ifndef __ASM_PTDUMP_H #define __ASM_PTDUMP_H +#include + #ifdef CONFIG_PTDUMP_CORE #include @@ -21,14 +23,53 @@ struct ptdump_info { unsigned long base_addr; }; +struct ptdump_prot_bits { + u64 mask; + u64 val; + const char *set; + const char *clear; +}; + +struct ptdump_pg_level { + const struct ptdump_prot_bits *bits; + char name[4]; + int num; + u64 mask; +}; + +/* + * The page dumper groups page table entries of the same type into a single + * description. It uses pg_state to track the range information while + * iterating over the pte entries. When the continuity is broken it then + * dumps out a description of the range. + */ +struct ptdump_pg_state { + struct ptdump_state ptdump; + struct ptdump_pg_level *pg_level; + struct seq_file *seq; + const struct addr_marker *marker; + const struct mm_struct *mm; + unsigned long start_address; + int level; + u64 current_prot; + bool check_wx; + unsigned long wx_pages; + unsigned long uxn_pages; +}; + void ptdump_walk(struct seq_file *s, struct ptdump_info *info); +void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, + u64 val); #ifdef CONFIG_PTDUMP_DEBUGFS #define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64 void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else static inline void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { } -#endif +#endif /* CONFIG_PTDUMP_DEBUGFS */ +#else +static inline void note_page(struct ptdump_state *pt_st, unsigned long addr, + int level, u64 val) { } #endif /* CONFIG_PTDUMP_CORE */ #endif /* __ASM_PTDUMP_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b7811871c..917761feeffdda 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -3,6 +3,7 @@ #ifndef _ASM_ARM64_SET_MEMORY_H #define _ASM_ARM64_SET_MEMORY_H +#include #include bool can_set_direct_map(void); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4a9ea103817e89..9ea97dddefc405 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -109,6 +109,9 @@ #define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) #define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x)) +/* Register-based PAN access, for save/restore purposes */ +#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3) + #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -325,7 +328,25 @@ #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) +/* When PAR_EL1.F == 1 */ #define SYS_PAR_EL1_FST GENMASK(6, 1) +#define SYS_PAR_EL1_PTW BIT(8) +#define SYS_PAR_EL1_S BIT(9) +#define SYS_PAR_EL1_AssuredOnly BIT(12) +#define SYS_PAR_EL1_TopLevel BIT(13) +#define SYS_PAR_EL1_Overlay BIT(14) +#define SYS_PAR_EL1_DirtyBit BIT(15) +#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48) +#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16)) +#define SYS_PAR_EL1_RES1 BIT(11) +/* When PAR_EL1.F == 0 */ +#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7) +#define SYS_PAR_EL1_NS BIT(9) +#define SYS_PAR_EL1_F0_IMPDEF BIT(10) +#define SYS_PAR_EL1_NSE BIT(11) +#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12) +#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56) +#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52)) /*** Statistical Profiling Extension ***/ #define PMSEVFR_EL1_RES0_IMP \ @@ -403,7 +424,6 @@ #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) #define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3) #define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4) -#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5) #define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6) #define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) #define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0) @@ -652,6 +672,7 @@ #define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5) #define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6) #define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7) +#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2) /* TLBI instructions */ #define TLBI_Op0 1 @@ -1077,6 +1098,9 @@ #define POE_RXW UL(0x7) #define POE_MASK UL(0xf) +/* Initial value for Permission Overlay Extension for EL0 */ +#define POR_EL0_INIT POE_RXW + #define ARM64_FEATURE_FIELD_BITS 4 /* Defined for compatibility only, do not add new users. */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e72a3bf9e56348..1114c1c3300a13 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -81,6 +81,7 @@ void arch_setup_new_exec(void); #define TIF_SME 27 /* SME in use */ #define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ #define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */ +#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -97,6 +98,7 @@ void arch_setup_new_exec(void); #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 0f6ef432fb8403..5fc3af9f8f29bc 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -5,6 +5,7 @@ #include #ifdef CONFIG_NUMA +#include struct pci_bus; int pcibus_to_node(struct pci_bus *bus); diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index eefe766d6161d2..d780d1bd2eacb9 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -25,6 +25,7 @@ try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn) void force_signal_inject(int signal, int code, unsigned long address, unsigned long err); void arm64_notify_segfault(unsigned long addr); void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); +void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey); void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str); diff --git a/arch/arm64/include/asm/vdso/getrandom.h b/arch/arm64/include/asm/vdso/getrandom.h new file mode 100644 index 00000000000000..342f807e204442 --- /dev/null +++ b/arch/arm64/include/asm/vdso/getrandom.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_GETRANDOM_H +#define __ASM_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/** + * getrandom_syscall - Invoke the getrandom() syscall. + * @buffer: Destination buffer to fill with random bytes. + * @len: Size of @buffer in bytes. + * @flags: Zero or more GRND_* flags. + * Returns: The number of random bytes written to @buffer, or a negative value indicating an error. + */ +static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags) +{ + register void *buffer asm ("x0") = _buffer; + register size_t len asm ("x1") = _len; + register unsigned int flags asm ("x2") = _flags; + register long ret asm ("x0"); + register long nr asm ("x8") = __NR_getrandom; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (buffer), "r" (len), "r" (flags), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void) +{ + /* + * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace + * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_ + * PAGE_OFFSET points to the real VVAR page. + */ + if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS) + return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * (1UL << CONFIG_PAGE_SHIFT); + return &_vdso_rng_data; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index f94b1457c117d4..5b6d0dd3cef548 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -2,11 +2,19 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H +#define __VDSO_RND_DATA_OFFSET 480 + #ifndef __ASSEMBLY__ #include #include +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES, +}; + #define VDSO_PRECISION_MASK ~(0xFF00ULL<<48) extern struct vdso_data *vdso_data; @@ -21,6 +29,13 @@ struct vdso_data *__arm64_get_k_vdso_data(void) } #define __arch_get_k_vdso_data __arm64_get_k_vdso_data +static __always_inline +struct vdso_rng_data *__arm64_get_k_vdso_rnd_data(void) +{ + return (void *)vdso_data + __VDSO_RND_DATA_OFFSET; +} +#define __arch_get_k_vdso_rng_data __arm64_get_k_vdso_rnd_data + static __always_inline void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) { diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h index df2c47c559728b..06f8ec0906a6e9 100644 --- a/arch/arm64/include/asm/vncr_mapping.h +++ b/arch/arm64/include/asm/vncr_mapping.h @@ -52,6 +52,7 @@ #define VNCR_PIRE0_EL1 0x290 #define VNCR_PIRE0_EL2 0x298 #define VNCR_PIR_EL1 0x2A0 +#define VNCR_POR_EL1 0x2A8 #define VNCR_ICH_LR0_EL2 0x400 #define VNCR_ICH_LR1_EL2 0x408 #define VNCR_ICH_LR2_EL2 0x410 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 285610e626f5ff..055381b2c61595 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -122,5 +122,6 @@ #define HWCAP2_SME_SF8FMA (1UL << 60) #define HWCAP2_SME_SF8DP4 (1UL << 61) #define HWCAP2_SME_SF8DP2 (1UL << 62) +#define HWCAP2_POE (1UL << 63) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h index 1e6482a838e11a..e7e0c8216243fa 100644 --- a/arch/arm64/include/uapi/asm/mman.h +++ b/arch/arm64/include/uapi/asm/mman.h @@ -7,4 +7,13 @@ #define PROT_BTI 0x10 /* BTI guarded page */ #define PROT_MTE 0x20 /* Normal Tagged mapping */ +/* Override any generic PKEY permission defines */ +#define PKEY_DISABLE_EXECUTE 0x4 +#define PKEY_DISABLE_READ 0x8 +#undef PKEY_ACCESS_MASK +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE |\ + PKEY_DISABLE_READ |\ + PKEY_DISABLE_EXECUTE) + #endif /* ! _UAPI__ASM_MMAN_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 8a45b7a411e045..bb7af77a30a726 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -98,6 +98,13 @@ struct esr_context { __u64 esr; }; +#define POE_MAGIC 0x504f4530 + +struct poe_context { + struct _aarch64_ctx head; + __u64 por_el0; +}; + /* * extra_context: describes extra space in the signal frame for * additional structures that don't fit in sigcontext.__reserved[]. @@ -320,10 +327,10 @@ struct zt_context { ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ / __SVE_VQ_BYTES * __SVE_VQ_BYTES) -#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) +#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES)) #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ - (SVE_SIG_ZREG_SIZE(vq) * n)) + (SVE_SIG_ZREG_SIZE(vq) * (n))) #define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) @@ -334,7 +341,7 @@ struct zt_context { #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) -#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n)) #define ZT_SIG_CONTEXT_SIZE(n) \ (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index f6b6b450735715..a78f247029aec3 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -439,6 +439,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), @@ -447,8 +448,10 @@ static const struct midr_range erratum_spec_ssbs_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), + MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), @@ -456,6 +459,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = { }; #endif +#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 +static const struct midr_range erratum_ac03_cpu_38_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + MIDR_ALL_VERSIONS(MIDR_AMPERE1A), + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -772,7 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "AmpereOne erratum AC03_CPU_38", .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, - ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), + ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), }, #endif { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 646ecd3069fdd9..718728a85430fa 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -466,6 +466,8 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = { + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE), + FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0), ARM64_FTR_END, @@ -2348,6 +2350,14 @@ static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused) sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn); } +#ifdef CONFIG_ARM64_POE +static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE); + sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE); +} +#endif + /* Internal helper functions to match cpu capability type */ static bool cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) @@ -2870,6 +2880,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_nv1, ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1) }, +#ifdef CONFIG_ARM64_POE + { + .desc = "Stage-1 Permission Overlay Extension (S1POE)", + .capability = ARM64_HAS_S1POE, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + .cpu_enable = cpu_enable_poe, + ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1POE, IMP) + }, +#endif {}, }; @@ -3034,6 +3054,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2), HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3), HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2), +#ifdef CONFIG_ARM64_POE + HWCAP_CAP(ID_AA64MMFR3_EL1, S1POE, IMP, CAP_HWCAP, KERNEL_HWCAP_POE), +#endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 09eeaa24d45605..44718d0482b3b4 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -143,6 +143,7 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SME_SF8FMA] = "smesf8fma", [KERNEL_HWCAP_SME_SF8DP4] = "smesf8dp4", [KERNEL_HWCAP_SME_SF8DP2] = "smesf8dp2", + [KERNEL_HWCAP_POE] = "poe", }; #ifdef CONFIG_COMPAT @@ -280,7 +281,7 @@ const struct seq_operations cpuinfo_op = { }; -static struct kobj_type cpuregs_kobj_type = { +static const struct kobj_type cpuregs_kobj_type = { .sysfs_ops = &kobj_sysfs_ops, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index b77a15955f28ba..3fcd9d080bf2a9 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -103,7 +103,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) static __always_inline void __enter_from_user_mode(void) { lockdep_hardirqs_off(CALLER_ADDR0); - CT_WARN_ON(ct_state() != CONTEXT_USER); + CT_WARN_ON(ct_state() != CT_STATE_USER); user_exit_irqoff(); trace_hardirqs_off_finish(); mte_disable_tco_entry(current); diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 02870beb271ed3..7b11d84f533c9b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -407,7 +407,7 @@ int swsusp_arch_resume(void) void *, phys_addr_t, phys_addr_t); struct trans_pgd_info trans_info = { .trans_alloc_page = hibernate_page_alloc, - .trans_alloc_arg = (void *)GFP_ATOMIC, + .trans_alloc_arg = (__force void *)GFP_ATOMIC, }; /* diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index f872c57e99095b..fd9a7bed83ce8c 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -6,28 +6,7 @@ * Copyright (C) 2014 ARM Ltd. */ -#include -#include -#include -#include -#include #include -#include -#include -#include - -#ifdef CONFIG_ACPI -/* - * Try to assign the IRQ number when probing a new device - */ -int pcibios_alloc_irq(struct pci_dev *dev) -{ - if (!acpi_disabled) - acpi_pci_irq_enable(dev); - - return 0; -} -#endif /* * raw_pci_read/write - Platform-specific PCI config space access. @@ -61,173 +40,3 @@ int pcibus_to_node(struct pci_bus *bus) EXPORT_SYMBOL(pcibus_to_node); #endif - -#ifdef CONFIG_ACPI - -struct acpi_pci_generic_root_info { - struct acpi_pci_root_info common; - struct pci_config_window *cfg; /* config space mapping */ -}; - -int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) -{ - struct pci_config_window *cfg = bus->sysdata; - struct acpi_device *adev = to_acpi_device(cfg->parent); - struct acpi_pci_root *root = acpi_driver_data(adev); - - return root->segment; -} - -int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) -{ - struct pci_config_window *cfg; - struct acpi_device *adev; - struct device *bus_dev; - - if (acpi_disabled) - return 0; - - cfg = bridge->bus->sysdata; - - /* - * On Hyper-V there is no corresponding ACPI device for a root bridge, - * therefore ->parent is set as NULL by the driver. And set 'adev' as - * NULL in this case because there is no proper ACPI device. - */ - if (!cfg->parent) - adev = NULL; - else - adev = to_acpi_device(cfg->parent); - - bus_dev = &bridge->bus->dev; - - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); - - return 0; -} - -static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) -{ - struct resource_entry *entry, *tmp; - int status; - - status = acpi_pci_probe_root_resources(ci); - resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { - if (!(entry->res->flags & IORESOURCE_WINDOW)) - resource_list_destroy_entry(entry); - } - return status; -} - -/* - * Lookup the bus range for the domain in MCFG, and set up config space - * mapping. - */ -static struct pci_config_window * -pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) -{ - struct device *dev = &root->device->dev; - struct resource *bus_res = &root->secondary; - u16 seg = root->segment; - const struct pci_ecam_ops *ecam_ops; - struct resource cfgres; - struct acpi_device *adev; - struct pci_config_window *cfg; - int ret; - - ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); - if (ret) { - dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); - return NULL; - } - - adev = acpi_resource_consumer(&cfgres); - if (adev) - dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, - dev_name(&adev->dev)); - else - dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", - &cfgres); - - cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); - if (IS_ERR(cfg)) { - dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, - PTR_ERR(cfg)); - return NULL; - } - - return cfg; -} - -/* release_info: free resources allocated by init_info */ -static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) -{ - struct acpi_pci_generic_root_info *ri; - - ri = container_of(ci, struct acpi_pci_generic_root_info, common); - pci_ecam_free(ri->cfg); - kfree(ci->ops); - kfree(ri); -} - -/* Interface called from ACPI code to setup PCI host controller */ -struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) -{ - struct acpi_pci_generic_root_info *ri; - struct pci_bus *bus, *child; - struct acpi_pci_root_ops *root_ops; - struct pci_host_bridge *host; - - ri = kzalloc(sizeof(*ri), GFP_KERNEL); - if (!ri) - return NULL; - - root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); - if (!root_ops) { - kfree(ri); - return NULL; - } - - ri->cfg = pci_acpi_setup_ecam_mapping(root); - if (!ri->cfg) { - kfree(ri); - kfree(root_ops); - return NULL; - } - - root_ops->release_info = pci_acpi_generic_release_info; - root_ops->prepare_resources = pci_acpi_root_prepare_resources; - root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; - bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); - if (!bus) - return NULL; - - /* If we must preserve the resource configuration, claim now */ - host = pci_find_host_bridge(bus); - if (host->preserve_config) - pci_bus_claim_resources(bus); - - /* - * Assign whatever was left unassigned. If we didn't claim above, - * this will reassign everything. - */ - pci_assign_unassigned_root_bus_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - return bus; -} - -void pcibios_add_bus(struct pci_bus *bus) -{ - acpi_pci_add_bus(bus); -} - -void pcibios_remove_bus(struct pci_bus *bus) -{ - acpi_pci_remove_bus(bus); -} - -#endif diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4ae31b7af6c311..0540653fbf382b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -43,6 +43,7 @@ #include #include +#include #include #include #include @@ -271,12 +272,21 @@ static void flush_tagged_addr_state(void) clear_thread_flag(TIF_TAGGED_ADDR); } +static void flush_poe(void) +{ + if (!system_supports_poe()) + return; + + write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); +} + void flush_thread(void) { fpsimd_flush_thread(); tls_thread_flush(); flush_ptrace_hw_breakpoint(current); flush_tagged_addr_state(); + flush_poe(); } void arch_release_task_struct(struct task_struct *tsk) @@ -371,6 +381,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (system_supports_tpidr2()) p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + if (system_supports_poe()) + p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); + if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; @@ -472,27 +485,63 @@ static void entry_task_switch(struct task_struct *next) } /* - * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. - * Ensure access is disabled when switching to a 32bit task, ensure - * access is enabled when switching to a 64bit task. + * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of + * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0} + * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is + * required or PR_TSC_SIGSEGV is set. */ -static void erratum_1418040_thread_switch(struct task_struct *next) +static void update_cntkctl_el1(struct task_struct *next) { - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || - !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) - return; + struct thread_info *ti = task_thread_info(next); - if (is_compat_thread(task_thread_info(next))) + if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) || + has_erratum_handler(read_cntvct_el0) || + (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && + this_cpu_has_cap(ARM64_WORKAROUND_1418040) && + is_compat_thread(ti))) sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); else sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); } -static void erratum_1418040_new_exec(void) +static void cntkctl_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + if ((read_ti_thread_flags(task_thread_info(prev)) & + (_TIF_32BIT | _TIF_TSC_SIGSEGV)) != + (read_ti_thread_flags(task_thread_info(next)) & + (_TIF_32BIT | _TIF_TSC_SIGSEGV))) + update_cntkctl_el1(next); +} + +static int do_set_tsc_mode(unsigned int val) { + bool tsc_sigsegv; + + if (val == PR_TSC_SIGSEGV) + tsc_sigsegv = true; + else if (val == PR_TSC_ENABLE) + tsc_sigsegv = false; + else + return -EINVAL; + preempt_disable(); - erratum_1418040_thread_switch(current); + update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv); + update_cntkctl_el1(current); preempt_enable(); + + return 0; +} + +static void permission_overlay_switch(struct task_struct *next) +{ + if (!system_supports_poe()) + return; + + current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); + if (current->thread.por_el0 != next->thread.por_el0) { + write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); + } } /* @@ -528,8 +577,9 @@ struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); ssbs_thread_switch(next); - erratum_1418040_thread_switch(next); + cntkctl_thread_switch(prev, next); ptrauth_thread_switch_user(next); + permission_overlay_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -645,7 +695,7 @@ void arch_setup_new_exec(void) current->mm->context.flags = mmflags; ptrauth_thread_init_user(); mte_thread_init_user(); - erratum_1418040_new_exec(); + do_set_tsc_mode(PR_TSC_ENABLE); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, @@ -754,3 +804,26 @@ int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, return prot; } #endif + +int get_tsc_mode(unsigned long adr) +{ + unsigned int val; + + if (is_compat_task()) + return -EINVAL; + + if (test_thread_flag(TIF_TSC_SIGSEGV)) + val = PR_TSC_SIGSEGV; + else + val = PR_TSC_ENABLE; + + return put_user(val, (unsigned int __user *)adr); +} + +int set_tsc_mode(unsigned int val) +{ + if (is_compat_task()) + return -EINVAL; + + return do_set_tsc_mode(val); +} diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 0d022599eb61b3..b756578aeaeea1 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1440,6 +1440,39 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct } #endif +#ifdef CONFIG_ARM64_POE +static int poe_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_poe()) + return -EINVAL; + + return membuf_write(&to, &target->thread.por_el0, + sizeof(target->thread.por_el0)); +} + +static int poe_set(struct task_struct *target, const struct + user_regset *regset, unsigned int pos, + unsigned int count, const void *kbuf, const + void __user *ubuf) +{ + int ret; + long ctrl; + + if (!system_supports_poe()) + return -EINVAL; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + target->thread.por_el0 = ctrl; + + return 0; +} +#endif + enum aarch64_regset { REGSET_GPR, REGSET_FPR, @@ -1469,6 +1502,9 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI REGSET_TAGGED_ADDR_CTRL, #endif +#ifdef CONFIG_ARM64_POE + REGSET_POE +#endif }; static const struct user_regset aarch64_regsets[] = { @@ -1628,6 +1664,16 @@ static const struct user_regset aarch64_regsets[] = { .set = tagged_addr_ctrl_set, }, #endif +#ifdef CONFIG_ARM64_POE + [REGSET_POE] = { + .core_note_type = NT_ARM_POE, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = poe_get, + .set = poe_set, + }, +#endif }; static const struct user_regset_view user_aarch64_view = { diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 4a77f4976e116a..56198694753047 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -61,6 +61,7 @@ struct rt_sigframe_user_layout { unsigned long za_offset; unsigned long zt_offset; unsigned long fpmr_offset; + unsigned long poe_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -185,6 +186,8 @@ struct user_ctxs { u32 zt_size; struct fpmr_context __user *fpmr; u32 fpmr_size; + struct poe_context __user *poe; + u32 poe_size; }; static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) @@ -258,6 +261,32 @@ static int restore_fpmr_context(struct user_ctxs *user) return err; } +static int preserve_poe_context(struct poe_context __user *ctx) +{ + int err = 0; + + __put_user_error(POE_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(*ctx), &ctx->head.size, err); + __put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err); + + return err; +} + +static int restore_poe_context(struct user_ctxs *user) +{ + u64 por_el0; + int err = 0; + + if (user->poe_size != sizeof(*user->poe)) + return -EINVAL; + + __get_user_error(por_el0, &(user->poe->por_el0), err); + if (!err) + write_sysreg_s(por_el0, SYS_POR_EL0); + + return err; +} + #ifdef CONFIG_ARM64_SVE static int preserve_sve_context(struct sve_context __user *ctx) @@ -621,6 +650,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->za = NULL; user->zt = NULL; user->fpmr = NULL; + user->poe = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -671,6 +701,17 @@ static int parse_user_sigframe(struct user_ctxs *user, /* ignore */ break; + case POE_MAGIC: + if (!system_supports_poe()) + goto invalid; + + if (user->poe) + goto invalid; + + user->poe = (struct poe_context __user *)head; + user->poe_size = size; + break; + case SVE_MAGIC: if (!system_supports_sve() && !system_supports_sme()) goto invalid; @@ -857,6 +898,9 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0 && system_supports_sme2() && user.zt) err = restore_zt_context(&user); + if (err == 0 && system_supports_poe() && user.poe) + err = restore_poe_context(&user); + return err; } @@ -980,6 +1024,13 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return err; } + if (system_supports_poe()) { + err = sigframe_alloc(user, &user->poe_offset, + sizeof(struct poe_context)); + if (err) + return err; + } + return sigframe_alloc_end(user); } @@ -1042,6 +1093,14 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_fpmr_context(fpmr_ctx); } + if (system_supports_poe() && err == 0 && user->poe_offset) { + struct poe_context __user *poe_ctx = + apply_user_offset(user, user->poe_offset); + + err |= preserve_poe_context(poe_ctx); + } + + /* ZA state if present */ if (system_supports_sme() && err == 0 && user->za_offset) { struct za_context __user *za_ctx = @@ -1178,6 +1237,9 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, sme_smstop(); } + if (system_supports_poe()) + write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); + if (ka->sa.sa_flags & SA_RESTORER) sigtramp = ka->sa.sa_restorer; else diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index f01f0fd7b7feb4..3b3f6b56e73303 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -68,7 +68,7 @@ enum ipi_msg_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, - IPI_CPU_CRASH_STOP, + IPI_CPU_STOP_NMI, IPI_TIMER, IPI_IRQ_WORK, NR_IPI, @@ -85,6 +85,8 @@ static int ipi_irq_base __ro_after_init; static int nr_ipi __ro_after_init = NR_IPI; static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init; +static bool crash_stop; + static void ipi_setup(int cpu); #ifdef CONFIG_HOTPLUG_CPU @@ -823,7 +825,7 @@ static const char *ipi_types[MAX_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", - [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_CPU_STOP_NMI] = "CPU stop NMIs", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", @@ -867,9 +869,9 @@ void arch_irq_work_raise(void) } #endif -static void __noreturn local_cpu_stop(void) +static void __noreturn local_cpu_stop(unsigned int cpu) { - set_cpu_online(smp_processor_id(), false); + set_cpu_online(cpu, false); local_daif_mask(); sdei_mask_local_cpu(); @@ -883,21 +885,26 @@ static void __noreturn local_cpu_stop(void) */ void __noreturn panic_smp_self_stop(void) { - local_cpu_stop(); + local_cpu_stop(smp_processor_id()); } -#ifdef CONFIG_KEXEC_CORE -static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); -#endif - static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) { #ifdef CONFIG_KEXEC_CORE + /* + * Use local_daif_mask() instead of local_irq_disable() to make sure + * that pseudo-NMIs are disabled. The "crash stop" code starts with + * an IRQ and falls back to NMI (which might be pseudo). If the IRQ + * finally goes through right as we're timing out then the NMI could + * interrupt us. It's better to prevent the NMI and let the IRQ + * finish since the pt_regs will be better. + */ + local_daif_mask(); + crash_save_cpu(regs, cpu); - atomic_dec(&waiting_for_crash_ipi); + set_cpu_online(cpu, false); - local_irq_disable(); sdei_mask_local_cpu(); if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -962,14 +969,12 @@ static void do_handle_IPI(int ipinr) break; case IPI_CPU_STOP: - local_cpu_stop(); - break; - - case IPI_CPU_CRASH_STOP: - if (IS_ENABLED(CONFIG_KEXEC_CORE)) { + case IPI_CPU_STOP_NMI: + if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) { ipi_cpu_crash_stop(cpu, get_irq_regs()); - unreachable(); + } else { + local_cpu_stop(cpu); } break; @@ -1024,8 +1029,7 @@ static bool ipi_should_be_nmi(enum ipi_msg_type ipi) return false; switch (ipi) { - case IPI_CPU_STOP: - case IPI_CPU_CRASH_STOP: + case IPI_CPU_STOP_NMI: case IPI_CPU_BACKTRACE: case IPI_KGDB_ROUNDUP: return true; @@ -1138,79 +1142,109 @@ static inline unsigned int num_other_online_cpus(void) void smp_send_stop(void) { + static unsigned long stop_in_progress; + cpumask_t mask; unsigned long timeout; - if (num_other_online_cpus()) { - cpumask_t mask; + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) + goto skip_ipi; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); + /* Only proceed if this is the first CPU to reach this code */ + if (test_and_set_bit(0, &stop_in_progress)) + return; - if (system_state <= SYSTEM_RUNNING) - pr_crit("SMP: stopping secondary CPUs\n"); - smp_cross_call(&mask, IPI_CPU_STOP); - } + /* + * Send an IPI to all currently online CPUs except the CPU running + * this code. + * + * NOTE: we don't do anything here to prevent other CPUs from coming + * online after we snapshot `cpu_online_mask`. Ideally, the calling code + * should do something to prevent other CPUs from coming up. This code + * can be called in the panic path and thus it doesn't seem wise to + * grab the CPU hotplug mutex ourselves. Worst case: + * - If a CPU comes online as we're running, we'll likely notice it + * during the 1 second wait below and then we'll catch it when we try + * with an NMI (assuming NMIs are enabled) since we re-snapshot the + * mask before sending an NMI. + * - If we leave the function and see that CPUs are still online we'll + * at least print a warning. Especially without NMIs this function + * isn't foolproof anyway so calling code will just have to accept + * the fact that there could be cases where a CPU can't be stopped. + */ + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); - /* Wait up to one second for other CPUs to stop */ + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + + /* + * Start with a normal IPI and wait up to one second for other CPUs to + * stop. We do this first because it gives other processors a chance + * to exit critical sections / drop locks and makes the rest of the + * stop process (especially console flush) more robust. + */ + smp_cross_call(&mask, IPI_CPU_STOP); timeout = USEC_PER_SEC; while (num_other_online_cpus() && timeout--) udelay(1); - if (num_other_online_cpus()) + /* + * If CPUs are still online, try an NMI. There's no excuse for this to + * be slow, so we only give them an extra 10 ms to respond. + */ + if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) { + smp_rmb(); + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + pr_info("SMP: retry stop with NMI for CPUs %*pbl\n", + cpumask_pr_args(&mask)); + + smp_cross_call(&mask, IPI_CPU_STOP_NMI); + timeout = USEC_PER_MSEC * 10; + while (num_other_online_cpus() && timeout--) + udelay(1); + } + + if (num_other_online_cpus()) { + smp_rmb(); + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", - cpumask_pr_args(cpu_online_mask)); + cpumask_pr_args(&mask)); + } +skip_ipi: sdei_mask_local_cpu(); } #ifdef CONFIG_KEXEC_CORE void crash_smp_send_stop(void) { - static int cpus_stopped; - cpumask_t mask; - unsigned long timeout; - /* * This function can be called twice in panic path, but obviously * we execute this only once. + * + * We use this same boolean to tell whether the IPI we send was a + * stop or a "crash stop". */ - if (cpus_stopped) + if (crash_stop) return; + crash_stop = 1; - cpus_stopped = 1; + smp_send_stop(); - /* - * If this cpu is the only one alive at this point in time, online or - * not, there are no stop messages to be sent around, so just back out. - */ - if (num_other_online_cpus() == 0) - goto skip_ipi; - - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); - - pr_crit("SMP: stopping secondary CPUs\n"); - smp_cross_call(&mask, IPI_CPU_CRASH_STOP); - - /* Wait up to one second for other CPUs to stop */ - timeout = USEC_PER_SEC; - while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) - udelay(1); - - if (atomic_read(&waiting_for_crash_ipi) > 0) - pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", - cpumask_pr_args(&mask)); - -skip_ipi: - sdei_mask_local_cpu(); sdei_handler_abort(); } bool smp_crash_stop_failed(void) { - return (atomic_read(&waiting_for_crash_ipi) > 0); + return num_other_online_cpus() != 0; } #endif diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 9e22683aa9214d..563cbce1112696 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -273,6 +273,12 @@ void arm64_force_sig_fault(int signo, int code, unsigned long far, force_sig_fault(signo, code, (void __user *)far); } +void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey) +{ + arm64_show_signal(SIGSEGV, str); + force_sig_pkuerr((void __user *)far, pkey); +} + void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str) { @@ -601,18 +607,26 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) { - int rt = ESR_ELx_SYS64_ISS_RT(esr); + if (test_thread_flag(TIF_TSC_SIGSEGV)) { + force_sig(SIGSEGV); + } else { + int rt = ESR_ELx_SYS64_ISS_RT(esr); - pt_regs_write_reg(regs, rt, arch_timer_read_counter()); - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + pt_regs_write_reg(regs, rt, arch_timer_read_counter()); + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + } } static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) { - int rt = ESR_ELx_SYS64_ISS_RT(esr); + if (test_thread_flag(TIF_TSC_SIGSEGV)) { + force_sig(SIGSEGV); + } else { + int rt = ESR_ELx_SYS64_ISS_RT(esr); - pt_regs_write_reg(regs, rt, arch_timer_get_rate()); - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + pt_regs_write_reg(regs, rt, arch_timer_get_rate()); + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + } } static void mrs_handler(unsigned long esr, struct pt_regs *regs) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 89b6e78400023d..706c9c3a7a50a4 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -34,12 +34,6 @@ enum vdso_abi { VDSO_ABI_AA32, }; -enum vvar_pages { - VVAR_DATA_PAGE_OFFSET, - VVAR_TIMENS_PAGE_OFFSET, - VVAR_NR_PAGES, -}; - struct vdso_abi_info { const char *name; const char *vdso_code_start; diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index d11da6461278f3..35685c0360441d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -9,7 +9,7 @@ # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile -obj-vdso := vgettimeofday.o note.o sigreturn.o +obj-vdso := vgettimeofday.o note.o sigreturn.o vgetrandom.o vgetrandom-chacha.o # Build rules targets := $(obj-vdso) vdso.so vdso.so.dbg @@ -34,19 +34,28 @@ ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO # -Wmissing-prototypes and -Wmissing-declarations are removed from -# the CFLAGS of vgettimeofday.c to make possible to build the -# kernel with CONFIG_WERROR enabled. -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \ - $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \ - $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ - -Wmissing-prototypes -Wmissing-declarations +# the CFLAGS to make possible to build the kernel with CONFIG_WERROR enabled. +CC_FLAGS_REMOVE_VDSO := $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) \ + $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) \ + $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ + -Wmissing-prototypes -Wmissing-declarations -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables +CC_FLAGS_ADD_VDSO := -O2 -mcmodel=tiny -fasynchronous-unwind-tables + +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_REMOVE_VDSO) +CFLAGS_REMOVE_vgetrandom.o = $(CC_FLAGS_REMOVE_VDSO) + +CFLAGS_vgettimeofday.o = $(CC_FLAGS_ADD_VDSO) +CFLAGS_vgetrandom.o = $(CC_FLAGS_ADD_VDSO) ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif +ifneq ($(c-getrandom-y),) + CFLAGS_vgetrandom.o += -include $(c-getrandom-y) +endif + targets += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 45354f2ddf7069..f204a9ddc83359 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -11,7 +11,9 @@ #include #include #include +#include #include +#include OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") OUTPUT_ARCH(aarch64) @@ -19,6 +21,7 @@ OUTPUT_ARCH(aarch64) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); + PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif @@ -102,6 +105,7 @@ VERSION __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; + __kernel_getrandom; local: *; }; } diff --git a/arch/arm64/kernel/vdso/vgetrandom-chacha.S b/arch/arm64/kernel/vdso/vgetrandom-chacha.S new file mode 100644 index 00000000000000..67890b4453092e --- /dev/null +++ b/arch/arm64/kernel/vdso/vgetrandom-chacha.S @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + + .text + +#define state0 v0 +#define state1 v1 +#define state2 v2 +#define state3 v3 +#define copy0 v4 +#define copy0_q q4 +#define copy1 v5 +#define copy2 v6 +#define copy3 v7 +#define copy3_d d7 +#define one_d d16 +#define one_q q16 +#define one_v v16 +#define tmp v17 +#define rot8 v18 + +/* + * ARM64 ChaCha20 implementation meant for vDSO. Produces a given positive + * number of blocks of output with nonce 0, taking an input key and 8-bytes + * counter. Importantly does not spill to the stack. + * + * This implementation avoids d8-d15 because they are callee-save in user + * space. + * + * void __arch_chacha20_blocks_nostack(uint8_t *dst_bytes, + * const uint8_t *key, + * uint32_t *counter, + * size_t nblocks) + * + * x0: output bytes + * x1: 32-byte key input + * x2: 8-byte counter input/output + * x3: number of 64-byte block to write to output + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) + + /* copy0 = "expand 32-byte k" */ + mov_q x8, 0x3320646e61707865 + mov_q x9, 0x6b20657479622d32 + mov copy0.d[0], x8 + mov copy0.d[1], x9 + + /* copy1,copy2 = key */ + ld1 { copy1.4s, copy2.4s }, [x1] + /* copy3 = counter || zero nonce */ + ld1 { copy3.2s }, [x2] + + movi one_v.2s, #1 + uzp1 one_v.4s, one_v.4s, one_v.4s + +.Lblock: + /* copy state to auxiliary vectors for the final add after the permute. */ + mov state0.16b, copy0.16b + mov state1.16b, copy1.16b + mov state2.16b, copy2.16b + mov state3.16b, copy3.16b + + mov w4, 20 +.Lpermute: + /* + * Permute one 64-byte block where the state matrix is stored in the four NEON + * registers state0-state3. It performs matrix operations on four words in parallel, + * but requires shuffling to rearrange the words after each round. + */ + +.Ldoubleround: + /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ + add state0.4s, state0.4s, state1.4s + eor state3.16b, state3.16b, state0.16b + rev32 state3.8h, state3.8h + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ + add state2.4s, state2.4s, state3.4s + eor tmp.16b, state1.16b, state2.16b + shl state1.4s, tmp.4s, #12 + sri state1.4s, tmp.4s, #20 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ + add state0.4s, state0.4s, state1.4s + eor tmp.16b, state3.16b, state0.16b + shl state3.4s, tmp.4s, #8 + sri state3.4s, tmp.4s, #24 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ + add state2.4s, state2.4s, state3.4s + eor tmp.16b, state1.16b, state2.16b + shl state1.4s, tmp.4s, #7 + sri state1.4s, tmp.4s, #25 + + /* state1[0,1,2,3] = state1[1,2,3,0] */ + ext state1.16b, state1.16b, state1.16b, #4 + /* state2[0,1,2,3] = state2[2,3,0,1] */ + ext state2.16b, state2.16b, state2.16b, #8 + /* state3[0,1,2,3] = state3[1,2,3,0] */ + ext state3.16b, state3.16b, state3.16b, #12 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ + add state0.4s, state0.4s, state1.4s + eor state3.16b, state3.16b, state0.16b + rev32 state3.8h, state3.8h + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ + add state2.4s, state2.4s, state3.4s + eor tmp.16b, state1.16b, state2.16b + shl state1.4s, tmp.4s, #12 + sri state1.4s, tmp.4s, #20 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ + add state0.4s, state0.4s, state1.4s + eor tmp.16b, state3.16b, state0.16b + shl state3.4s, tmp.4s, #8 + sri state3.4s, tmp.4s, #24 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ + add state2.4s, state2.4s, state3.4s + eor tmp.16b, state1.16b, state2.16b + shl state1.4s, tmp.4s, #7 + sri state1.4s, tmp.4s, #25 + + /* state1[0,1,2,3] = state1[3,0,1,2] */ + ext state1.16b, state1.16b, state1.16b, #12 + /* state2[0,1,2,3] = state2[2,3,0,1] */ + ext state2.16b, state2.16b, state2.16b, #8 + /* state3[0,1,2,3] = state3[1,2,3,0] */ + ext state3.16b, state3.16b, state3.16b, #4 + + subs w4, w4, #2 + b.ne .Ldoubleround + + /* output0 = state0 + state0 */ + add state0.4s, state0.4s, copy0.4s + /* output1 = state1 + state1 */ + add state1.4s, state1.4s, copy1.4s + /* output2 = state2 + state2 */ + add state2.4s, state2.4s, copy2.4s + /* output2 = state3 + state3 */ + add state3.4s, state3.4s, copy3.4s + st1 { state0.16b - state3.16b }, [x0] + + /* + * ++copy3.counter, the 'add' clears the upper half of the SIMD register + * which is the expected behaviour here. + */ + add copy3_d, copy3_d, one_d + + /* output += 64, --nblocks */ + add x0, x0, 64 + subs x3, x3, #1 + b.ne .Lblock + + /* counter = copy3.counter */ + st1 { copy3.2s }, [x2] + + /* Zero out the potentially sensitive regs, in case nothing uses these again. */ + movi state0.16b, #0 + movi state1.16b, #0 + movi state2.16b, #0 + movi state3.16b, #0 + movi copy1.16b, #0 + movi copy2.16b, #0 + ret +SYM_FUNC_END(__arch_chacha20_blocks_nostack) + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso/vgetrandom.c b/arch/arm64/kernel/vdso/vgetrandom.c new file mode 100644 index 00000000000000..832fe195292b34 --- /dev/null +++ b/arch/arm64/kernel/vdso/vgetrandom.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +typeof(__cvdso_getrandom) __kernel_getrandom; + +ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) +{ + if (alternative_has_cap_likely(ARM64_HAS_FPSIMD)) + return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); + + if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags)) + return -ENOSYS; + return getrandom_syscall(buffer, len, flags); +} diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 55a8e310ea12c6..58d89d997d050f 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -264,8 +264,7 @@ SECTIONS EXIT_DATA } - RUNTIME_CONST(shift, d_hash_shift) - RUNTIME_CONST(ptr, dentry_hashtable) + RUNTIME_CONST_VARIABLES PERCPU_SECTION(L1_CACHE_BYTES) HYPERVISOR_PERCPU_SECTION diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 8304eb342be9d9..ead632ad01b4cc 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -66,4 +66,21 @@ config PROTECTED_NVHE_STACKTRACE If unsure, or not using protected nVHE (pKVM), say N. +config PTDUMP_STAGE2_DEBUGFS + bool "Present the stage-2 pagetables to debugfs" + depends on KVM + depends on DEBUG_KERNEL + depends on DEBUG_FS + depends on GENERIC_PTDUMP + select PTDUMP_CORE + default n + help + Say Y here if you want to show the stage-2 kernel pagetables + layout in a debugfs file. This information is only useful for kernel developers + who are working in architecture specific areas of the kernel. + It is probably not a good idea to enable this feature in a production + kernel. + + If in doubt, say N. + endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 86a629aaf0a13f..3cf7adb2b5038c 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -17,7 +17,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ - arch_timer.o trng.o vmid.o emulate-nested.o nested.o \ + arch_timer.o trng.o vmid.o emulate-nested.o nested.o at.o \ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ @@ -27,6 +27,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o +kvm-$(CONFIG_PTDUMP_STAGE2_DEBUGFS) += ptdump.o always-y := hyp_constants.h hyp-constants.s diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9bef7638342ef7..a0d01c46e40845 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -46,6 +46,8 @@ #include #include +#include "sys_regs.h" + static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; enum kvm_wfx_trap_policy { @@ -228,6 +230,7 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) void kvm_arch_create_vm_debugfs(struct kvm *kvm) { kvm_sys_regs_create_debugfs(kvm); + kvm_s2_ptdump_create_debugfs(kvm); } static void kvm_destroy_mpidr_data(struct kvm *kvm) @@ -821,15 +824,13 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) return ret; } - if (vcpu_has_nv(vcpu)) { - ret = kvm_init_nv_sysregs(vcpu->kvm); - if (ret) - return ret; - } + ret = kvm_finalize_sys_regs(vcpu); + if (ret) + return ret; /* - * This needs to happen after NV has imposed its own restrictions on - * the feature set + * This needs to happen after any restriction has been applied + * to the feature set. */ kvm_calculate_traps(vcpu); @@ -2163,7 +2164,7 @@ static void cpu_hyp_uninit(void *discard) } } -int kvm_arch_hardware_enable(void) +int kvm_arch_enable_virtualization_cpu(void) { /* * Most calls to this function are made with migration @@ -2183,7 +2184,7 @@ int kvm_arch_hardware_enable(void) return 0; } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { kvm_timer_cpu_down(); kvm_vgic_cpu_down(); @@ -2379,7 +2380,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits) /* * The stub hypercalls are now disabled, so set our local flag to - * prevent a later re-init attempt in kvm_arch_hardware_enable(). + * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). */ __this_cpu_write(kvm_hyp_initialized, 1); preempt_enable(); diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c new file mode 100644 index 00000000000000..39f0e87a340e82 --- /dev/null +++ b/arch/arm64/kvm/at.c @@ -0,0 +1,1101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 - Linaro Ltd + * Author: Jintack Lim + */ + +#include + +#include +#include +#include + +enum trans_regime { + TR_EL10, + TR_EL20, + TR_EL2, +}; + +struct s1_walk_info { + u64 baddr; + enum trans_regime regime; + unsigned int max_oa_bits; + unsigned int pgshift; + unsigned int txsz; + int sl; + bool hpd; + bool be; + bool s2; +}; + +struct s1_walk_result { + union { + struct { + u64 desc; + u64 pa; + s8 level; + u8 APTable; + bool UXNTable; + bool PXNTable; + }; + struct { + u8 fst; + bool ptw; + bool s2; + }; + }; + bool failed; +}; + +static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool ptw, bool s2) +{ + wr->fst = fst; + wr->ptw = ptw; + wr->s2 = s2; + wr->failed = true; +} + +#define S1_MMU_DISABLED (-127) + +static int get_ia_size(struct s1_walk_info *wi) +{ + return 64 - wi->txsz; +} + +/* Return true if the IPA is out of the OA range */ +static bool check_output_size(u64 ipa, struct s1_walk_info *wi) +{ + return wi->max_oa_bits < 48 && (ipa & GENMASK_ULL(47, wi->max_oa_bits)); +} + +/* Return the translation regime that applies to an AT instruction */ +static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 op) +{ + /* + * We only get here from guest EL2, so the translation + * regime AT applies to is solely defined by {E2H,TGE}. + */ + switch (op) { + case OP_AT_S1E2R: + case OP_AT_S1E2W: + case OP_AT_S1E2A: + return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2; + break; + default: + return (vcpu_el2_e2h_is_set(vcpu) && + vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10; + } +} + +static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi, + struct s1_walk_result *wr, u64 va) +{ + u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr; + unsigned int stride, x; + bool va55, tbi, lva, as_el0; + + hcr = __vcpu_sys_reg(vcpu, HCR_EL2); + + wi->regime = compute_translation_regime(vcpu, op); + as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W); + + va55 = va & BIT(55); + + if (wi->regime == TR_EL2 && va55) + goto addrsz; + + wi->s2 = wi->regime == TR_EL10 && (hcr & (HCR_VM | HCR_DC)); + + switch (wi->regime) { + case TR_EL10: + sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); + tcr = vcpu_read_sys_reg(vcpu, TCR_EL1); + ttbr = (va55 ? + vcpu_read_sys_reg(vcpu, TTBR1_EL1) : + vcpu_read_sys_reg(vcpu, TTBR0_EL1)); + break; + case TR_EL2: + case TR_EL20: + sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2); + tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); + ttbr = (va55 ? + vcpu_read_sys_reg(vcpu, TTBR1_EL2) : + vcpu_read_sys_reg(vcpu, TTBR0_EL2)); + break; + default: + BUG(); + } + + tbi = (wi->regime == TR_EL2 ? + FIELD_GET(TCR_EL2_TBI, tcr) : + (va55 ? + FIELD_GET(TCR_TBI1, tcr) : + FIELD_GET(TCR_TBI0, tcr))); + + if (!tbi && (u64)sign_extend64(va, 55) != va) + goto addrsz; + + va = (u64)sign_extend64(va, 55); + + /* Let's put the MMU disabled case aside immediately */ + switch (wi->regime) { + case TR_EL10: + /* + * If dealing with the EL1&0 translation regime, 3 things + * can disable the S1 translation: + * + * - HCR_EL2.DC = 1 + * - HCR_EL2.{E2H,TGE} = {0,1} + * - SCTLR_EL1.M = 0 + * + * The TGE part is interesting. If we have decided that this + * is EL1&0, then it means that either {E2H,TGE} == {1,0} or + * {0,x}, and we only need to test for TGE == 1. + */ + if (hcr & (HCR_DC | HCR_TGE)) { + wr->level = S1_MMU_DISABLED; + break; + } + fallthrough; + case TR_EL2: + case TR_EL20: + if (!(sctlr & SCTLR_ELx_M)) + wr->level = S1_MMU_DISABLED; + break; + } + + if (wr->level == S1_MMU_DISABLED) { + if (va >= BIT(kvm_get_pa_bits(vcpu->kvm))) + goto addrsz; + + wr->pa = va; + return 0; + } + + wi->be = sctlr & SCTLR_ELx_EE; + + wi->hpd = kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, HPDS, IMP); + wi->hpd &= (wi->regime == TR_EL2 ? + FIELD_GET(TCR_EL2_HPD, tcr) : + (va55 ? + FIELD_GET(TCR_HPD1, tcr) : + FIELD_GET(TCR_HPD0, tcr))); + + /* Someone was silly enough to encode TG0/TG1 differently */ + if (va55) { + wi->txsz = FIELD_GET(TCR_T1SZ_MASK, tcr); + tg = FIELD_GET(TCR_TG1_MASK, tcr); + + switch (tg << TCR_TG1_SHIFT) { + case TCR_TG1_4K: + wi->pgshift = 12; break; + case TCR_TG1_16K: + wi->pgshift = 14; break; + case TCR_TG1_64K: + default: /* IMPDEF: treat any other value as 64k */ + wi->pgshift = 16; break; + } + } else { + wi->txsz = FIELD_GET(TCR_T0SZ_MASK, tcr); + tg = FIELD_GET(TCR_TG0_MASK, tcr); + + switch (tg << TCR_TG0_SHIFT) { + case TCR_TG0_4K: + wi->pgshift = 12; break; + case TCR_TG0_16K: + wi->pgshift = 14; break; + case TCR_TG0_64K: + default: /* IMPDEF: treat any other value as 64k */ + wi->pgshift = 16; break; + } + } + + /* R_PLCGL, R_YXNYW */ + if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR2_EL1, ST, 48_47)) { + if (wi->txsz > 39) + goto transfault_l0; + } else { + if (wi->txsz > 48 || (BIT(wi->pgshift) == SZ_64K && wi->txsz > 47)) + goto transfault_l0; + } + + /* R_GTJBY, R_SXWGM */ + switch (BIT(wi->pgshift)) { + case SZ_4K: + lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN4, 52_BIT); + lva &= tcr & (wi->regime == TR_EL2 ? TCR_EL2_DS : TCR_DS); + break; + case SZ_16K: + lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN16, 52_BIT); + lva &= tcr & (wi->regime == TR_EL2 ? TCR_EL2_DS : TCR_DS); + break; + case SZ_64K: + lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, VARange, 52); + break; + } + + if ((lva && wi->txsz < 12) || (!lva && wi->txsz < 16)) + goto transfault_l0; + + ia_bits = get_ia_size(wi); + + /* R_YYVYV, I_THCZK */ + if ((!va55 && va > GENMASK(ia_bits - 1, 0)) || + (va55 && va < GENMASK(63, ia_bits))) + goto transfault_l0; + + /* I_ZFSYQ */ + if (wi->regime != TR_EL2 && + (tcr & (va55 ? TCR_EPD1_MASK : TCR_EPD0_MASK))) + goto transfault_l0; + + /* R_BNDVG and following statements */ + if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) && + as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0))) + goto transfault_l0; + + /* AArch64.S1StartLevel() */ + stride = wi->pgshift - 3; + wi->sl = 3 - (((ia_bits - 1) - wi->pgshift) / stride); + + ps = (wi->regime == TR_EL2 ? + FIELD_GET(TCR_EL2_PS_MASK, tcr) : FIELD_GET(TCR_IPS_MASK, tcr)); + + wi->max_oa_bits = min(get_kvm_ipa_limit(), ps_to_output_size(ps)); + + /* Compute minimal alignment */ + x = 3 + ia_bits - ((3 - wi->sl) * stride + wi->pgshift); + + wi->baddr = ttbr & TTBRx_EL1_BADDR; + + /* R_VPBBF */ + if (check_output_size(wi->baddr, wi)) + goto addrsz; + + wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x); + + return 0; + +addrsz: /* Address Size Fault level 0 */ + fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false, false); + return -EFAULT; + +transfault_l0: /* Translation Fault level 0 */ + fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(0), false, false); + return -EFAULT; +} + +static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, + struct s1_walk_result *wr, u64 va) +{ + u64 va_top, va_bottom, baddr, desc; + int level, stride, ret; + + level = wi->sl; + stride = wi->pgshift - 3; + baddr = wi->baddr; + + va_top = get_ia_size(wi) - 1; + + while (1) { + u64 index, ipa; + + va_bottom = (3 - level) * stride + wi->pgshift; + index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3); + + ipa = baddr | index; + + if (wi->s2) { + struct kvm_s2_trans s2_trans = {}; + + ret = kvm_walk_nested_s2(vcpu, ipa, &s2_trans); + if (ret) { + fail_s1_walk(wr, + (s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level, + true, true); + return ret; + } + + if (!kvm_s2_trans_readable(&s2_trans)) { + fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level), + true, true); + + return -EPERM; + } + + ipa = kvm_s2_trans_output(&s2_trans); + } + + ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc)); + if (ret) { + fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), + true, false); + return ret; + } + + if (wi->be) + desc = be64_to_cpu((__force __be64)desc); + else + desc = le64_to_cpu((__force __le64)desc); + + /* Invalid descriptor */ + if (!(desc & BIT(0))) + goto transfault; + + /* Block mapping, check validity down the line */ + if (!(desc & BIT(1))) + break; + + /* Page mapping */ + if (level == 3) + break; + + /* Table handling */ + if (!wi->hpd) { + wr->APTable |= FIELD_GET(S1_TABLE_AP, desc); + wr->UXNTable |= FIELD_GET(PMD_TABLE_UXN, desc); + wr->PXNTable |= FIELD_GET(PMD_TABLE_PXN, desc); + } + + baddr = desc & GENMASK_ULL(47, wi->pgshift); + + /* Check for out-of-range OA */ + if (check_output_size(baddr, wi)) + goto addrsz; + + /* Prepare for next round */ + va_top = va_bottom - 1; + level++; + } + + /* Block mapping, check the validity of the level */ + if (!(desc & BIT(1))) { + bool valid_block = false; + + switch (BIT(wi->pgshift)) { + case SZ_4K: + valid_block = level == 1 || level == 2; + break; + case SZ_16K: + case SZ_64K: + valid_block = level == 2; + break; + } + + if (!valid_block) + goto transfault; + } + + if (check_output_size(desc & GENMASK(47, va_bottom), wi)) + goto addrsz; + + va_bottom += contiguous_bit_shift(desc, wi, level); + + wr->failed = false; + wr->level = level; + wr->desc = desc; + wr->pa = desc & GENMASK(47, va_bottom); + wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0); + + return 0; + +addrsz: + fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), true, false); + return -EINVAL; +transfault: + fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), true, false); + return -ENOENT; +} + +struct mmu_config { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 sctlr; + u64 vttbr; + u64 vtcr; + u64 hcr; +}; + +static void __mmu_config_save(struct mmu_config *config) +{ + config->ttbr0 = read_sysreg_el1(SYS_TTBR0); + config->ttbr1 = read_sysreg_el1(SYS_TTBR1); + config->tcr = read_sysreg_el1(SYS_TCR); + config->mair = read_sysreg_el1(SYS_MAIR); + config->sctlr = read_sysreg_el1(SYS_SCTLR); + config->vttbr = read_sysreg(vttbr_el2); + config->vtcr = read_sysreg(vtcr_el2); + config->hcr = read_sysreg(hcr_el2); +} + +static void __mmu_config_restore(struct mmu_config *config) +{ + write_sysreg(config->hcr, hcr_el2); + + /* + * ARM errata 1165522 and 1530923 require TGE to be 1 before + * we update the guest state. + */ + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); + + write_sysreg_el1(config->ttbr0, SYS_TTBR0); + write_sysreg_el1(config->ttbr1, SYS_TTBR1); + write_sysreg_el1(config->tcr, SYS_TCR); + write_sysreg_el1(config->mair, SYS_MAIR); + write_sysreg_el1(config->sctlr, SYS_SCTLR); + write_sysreg(config->vttbr, vttbr_el2); + write_sysreg(config->vtcr, vtcr_el2); +} + +static bool at_s1e1p_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + u64 host_pan; + bool fail; + + host_pan = read_sysreg_s(SYS_PSTATE_PAN); + write_sysreg_s(*vcpu_cpsr(vcpu) & PSTATE_PAN, SYS_PSTATE_PAN); + + switch (op) { + case OP_AT_S1E1RP: + fail = __kvm_at(OP_AT_S1E1RP, vaddr); + break; + case OP_AT_S1E1WP: + fail = __kvm_at(OP_AT_S1E1WP, vaddr); + break; + } + + write_sysreg_s(host_pan, SYS_PSTATE_PAN); + + return fail; +} + +#define MEMATTR(ic, oc) (MEMATTR_##oc << 4 | MEMATTR_##ic) +#define MEMATTR_NC 0b0100 +#define MEMATTR_Wt 0b1000 +#define MEMATTR_Wb 0b1100 +#define MEMATTR_WbRaWa 0b1111 + +#define MEMATTR_IS_DEVICE(m) (((m) & GENMASK(7, 4)) == 0) + +static u8 s2_memattr_to_attr(u8 memattr) +{ + memattr &= 0b1111; + + switch (memattr) { + case 0b0000: + case 0b0001: + case 0b0010: + case 0b0011: + return memattr << 2; + case 0b0100: + return MEMATTR(Wb, Wb); + case 0b0101: + return MEMATTR(NC, NC); + case 0b0110: + return MEMATTR(Wt, NC); + case 0b0111: + return MEMATTR(Wb, NC); + case 0b1000: + /* Reserved, assume NC */ + return MEMATTR(NC, NC); + case 0b1001: + return MEMATTR(NC, Wt); + case 0b1010: + return MEMATTR(Wt, Wt); + case 0b1011: + return MEMATTR(Wb, Wt); + case 0b1100: + /* Reserved, assume NC */ + return MEMATTR(NC, NC); + case 0b1101: + return MEMATTR(NC, Wb); + case 0b1110: + return MEMATTR(Wt, Wb); + case 0b1111: + return MEMATTR(Wb, Wb); + default: + unreachable(); + } +} + +static u8 combine_s1_s2_attr(u8 s1, u8 s2) +{ + bool transient; + u8 final = 0; + + /* Upgrade transient s1 to non-transient to simplify things */ + switch (s1) { + case 0b0001 ... 0b0011: /* Normal, Write-Through Transient */ + transient = true; + s1 = MEMATTR_Wt | (s1 & GENMASK(1,0)); + break; + case 0b0101 ... 0b0111: /* Normal, Write-Back Transient */ + transient = true; + s1 = MEMATTR_Wb | (s1 & GENMASK(1,0)); + break; + default: + transient = false; + } + + /* S2CombineS1AttrHints() */ + if ((s1 & GENMASK(3, 2)) == MEMATTR_NC || + (s2 & GENMASK(3, 2)) == MEMATTR_NC) + final = MEMATTR_NC; + else if ((s1 & GENMASK(3, 2)) == MEMATTR_Wt || + (s2 & GENMASK(3, 2)) == MEMATTR_Wt) + final = MEMATTR_Wt; + else + final = MEMATTR_Wb; + + if (final != MEMATTR_NC) { + /* Inherit RaWa hints form S1 */ + if (transient) { + switch (s1 & GENMASK(3, 2)) { + case MEMATTR_Wt: + final = 0; + break; + case MEMATTR_Wb: + final = MEMATTR_NC; + break; + } + } + + final |= s1 & GENMASK(1, 0); + } + + return final; +} + +#define ATTR_NSH 0b00 +#define ATTR_RSV 0b01 +#define ATTR_OSH 0b10 +#define ATTR_ISH 0b11 + +static u8 compute_sh(u8 attr, u64 desc) +{ + u8 sh; + + /* Any form of device, as well as NC has SH[1:0]=0b10 */ + if (MEMATTR_IS_DEVICE(attr) || attr == MEMATTR(NC, NC)) + return ATTR_OSH; + + sh = FIELD_GET(PTE_SHARED, desc); + if (sh == ATTR_RSV) /* Reserved, mapped to NSH */ + sh = ATTR_NSH; + + return sh; +} + +static u8 combine_sh(u8 s1_sh, u8 s2_sh) +{ + if (s1_sh == ATTR_OSH || s2_sh == ATTR_OSH) + return ATTR_OSH; + if (s1_sh == ATTR_ISH || s2_sh == ATTR_ISH) + return ATTR_ISH; + + return ATTR_NSH; +} + +static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par, + struct kvm_s2_trans *tr) +{ + u8 s1_parattr, s2_memattr, final_attr; + u64 par; + + /* If S2 has failed to translate, report the damage */ + if (tr->esr) { + par = SYS_PAR_EL1_RES1; + par |= SYS_PAR_EL1_F; + par |= SYS_PAR_EL1_S; + par |= FIELD_PREP(SYS_PAR_EL1_FST, tr->esr); + return par; + } + + s1_parattr = FIELD_GET(SYS_PAR_EL1_ATTR, s1_par); + s2_memattr = FIELD_GET(GENMASK(5, 2), tr->desc); + + if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_FWB) { + if (!kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, MTEPERM, IMP)) + s2_memattr &= ~BIT(3); + + /* Combination of R_VRJSW and R_RHWZM */ + switch (s2_memattr) { + case 0b0101: + if (MEMATTR_IS_DEVICE(s1_parattr)) + final_attr = s1_parattr; + else + final_attr = MEMATTR(NC, NC); + break; + case 0b0110: + case 0b1110: + final_attr = MEMATTR(WbRaWa, WbRaWa); + break; + case 0b0111: + case 0b1111: + /* Preserve S1 attribute */ + final_attr = s1_parattr; + break; + case 0b0100: + case 0b1100: + case 0b1101: + /* Reserved, do something non-silly */ + final_attr = s1_parattr; + break; + default: + /* MemAttr[2]=0, Device from S2 */ + final_attr = s2_memattr & GENMASK(1,0) << 2; + } + } else { + /* Combination of R_HMNDG, R_TNHFM and R_GQFSF */ + u8 s2_parattr = s2_memattr_to_attr(s2_memattr); + + if (MEMATTR_IS_DEVICE(s1_parattr) || + MEMATTR_IS_DEVICE(s2_parattr)) { + final_attr = min(s1_parattr, s2_parattr); + } else { + /* At this stage, this is memory vs memory */ + final_attr = combine_s1_s2_attr(s1_parattr & 0xf, + s2_parattr & 0xf); + final_attr |= combine_s1_s2_attr(s1_parattr >> 4, + s2_parattr >> 4) << 4; + } + } + + if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_CD) && + !MEMATTR_IS_DEVICE(final_attr)) + final_attr = MEMATTR(NC, NC); + + par = FIELD_PREP(SYS_PAR_EL1_ATTR, final_attr); + par |= tr->output & GENMASK(47, 12); + par |= FIELD_PREP(SYS_PAR_EL1_SH, + combine_sh(FIELD_GET(SYS_PAR_EL1_SH, s1_par), + compute_sh(final_attr, tr->desc))); + + return par; +} + +static u64 compute_par_s1(struct kvm_vcpu *vcpu, struct s1_walk_result *wr, + enum trans_regime regime) +{ + u64 par; + + if (wr->failed) { + par = SYS_PAR_EL1_RES1; + par |= SYS_PAR_EL1_F; + par |= FIELD_PREP(SYS_PAR_EL1_FST, wr->fst); + par |= wr->ptw ? SYS_PAR_EL1_PTW : 0; + par |= wr->s2 ? SYS_PAR_EL1_S : 0; + } else if (wr->level == S1_MMU_DISABLED) { + /* MMU off or HCR_EL2.DC == 1 */ + par = SYS_PAR_EL1_NSE; + par |= wr->pa & GENMASK_ULL(47, 12); + + if (regime == TR_EL10 && + (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_DC)) { + par |= FIELD_PREP(SYS_PAR_EL1_ATTR, + MEMATTR(WbRaWa, WbRaWa)); + par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_NSH); + } else { + par |= FIELD_PREP(SYS_PAR_EL1_ATTR, 0); /* nGnRnE */ + par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_OSH); + } + } else { + u64 mair, sctlr; + u8 sh; + + par = SYS_PAR_EL1_NSE; + + mair = (regime == TR_EL10 ? + vcpu_read_sys_reg(vcpu, MAIR_EL1) : + vcpu_read_sys_reg(vcpu, MAIR_EL2)); + + mair >>= FIELD_GET(PTE_ATTRINDX_MASK, wr->desc) * 8; + mair &= 0xff; + + sctlr = (regime == TR_EL10 ? + vcpu_read_sys_reg(vcpu, SCTLR_EL1) : + vcpu_read_sys_reg(vcpu, SCTLR_EL2)); + + /* Force NC for memory if SCTLR_ELx.C is clear */ + if (!(sctlr & SCTLR_EL1_C) && !MEMATTR_IS_DEVICE(mair)) + mair = MEMATTR(NC, NC); + + par |= FIELD_PREP(SYS_PAR_EL1_ATTR, mair); + par |= wr->pa & GENMASK_ULL(47, 12); + + sh = compute_sh(mair, wr->desc); + par |= FIELD_PREP(SYS_PAR_EL1_SH, sh); + } + + return par; +} + +static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime) +{ + u64 sctlr; + + if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, PAN, PAN3)) + return false; + + if (regime == TR_EL10) + sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); + else + sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2); + + return sctlr & SCTLR_EL1_EPAN; +} + +static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + bool perm_fail, ur, uw, ux, pr, pw, px; + struct s1_walk_result wr = {}; + struct s1_walk_info wi = {}; + int ret, idx; + + ret = setup_s1_walk(vcpu, op, &wi, &wr, vaddr); + if (ret) + goto compute_par; + + if (wr.level == S1_MMU_DISABLED) + goto compute_par; + + idx = srcu_read_lock(&vcpu->kvm->srcu); + + ret = walk_s1(vcpu, &wi, &wr, vaddr); + + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + if (ret) + goto compute_par; + + /* FIXME: revisit when adding indirect permission support */ + /* AArch64.S1DirectBasePermissions() */ + if (wi.regime != TR_EL2) { + switch (FIELD_GET(PTE_USER | PTE_RDONLY, wr.desc)) { + case 0b00: + pr = pw = true; + ur = uw = false; + break; + case 0b01: + pr = pw = ur = uw = true; + break; + case 0b10: + pr = true; + pw = ur = uw = false; + break; + case 0b11: + pr = ur = true; + pw = uw = false; + break; + } + + switch (wr.APTable) { + case 0b00: + break; + case 0b01: + ur = uw = false; + break; + case 0b10: + pw = uw = false; + break; + case 0b11: + pw = ur = uw = false; + break; + } + + /* We don't use px for anything yet, but hey... */ + px = !((wr.desc & PTE_PXN) || wr.PXNTable || uw); + ux = !((wr.desc & PTE_UXN) || wr.UXNTable); + + if (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) { + bool pan; + + pan = *vcpu_cpsr(vcpu) & PSR_PAN_BIT; + pan &= ur || uw || (pan3_enabled(vcpu, wi.regime) && ux); + pw &= !pan; + pr &= !pan; + } + } else { + ur = uw = ux = false; + + if (!(wr.desc & PTE_RDONLY)) { + pr = pw = true; + } else { + pr = true; + pw = false; + } + + if (wr.APTable & BIT(1)) + pw = false; + + /* XN maps to UXN */ + px = !((wr.desc & PTE_UXN) || wr.UXNTable); + } + + perm_fail = false; + + switch (op) { + case OP_AT_S1E1RP: + case OP_AT_S1E1R: + case OP_AT_S1E2R: + perm_fail = !pr; + break; + case OP_AT_S1E1WP: + case OP_AT_S1E1W: + case OP_AT_S1E2W: + perm_fail = !pw; + break; + case OP_AT_S1E0R: + perm_fail = !ur; + break; + case OP_AT_S1E0W: + perm_fail = !uw; + break; + case OP_AT_S1E1A: + case OP_AT_S1E2A: + break; + default: + BUG(); + } + + if (perm_fail) + fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false, false); + +compute_par: + return compute_par_s1(vcpu, &wr, wi.regime); +} + +/* + * Return the PAR_EL1 value as the result of a valid translation. + * + * If the translation is unsuccessful, the value may only contain + * PAR_EL1.F, and cannot be taken at face value. It isn't an + * indication of the translation having failed, only that the fast + * path did not succeed, *unless* it indicates a S1 permission fault. + */ +static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + struct mmu_config config; + struct kvm_s2_mmu *mmu; + bool fail; + u64 par; + + par = SYS_PAR_EL1_F; + + /* + * We've trapped, so everything is live on the CPU. As we will + * be switching contexts behind everybody's back, disable + * interrupts while holding the mmu lock. + */ + guard(write_lock_irqsave)(&vcpu->kvm->mmu_lock); + + /* + * If HCR_EL2.{E2H,TGE} == {1,1}, the MMU context is already + * the right one (as we trapped from vEL2). If not, save the + * full MMU context. + */ + if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) + goto skip_mmu_switch; + + /* + * Obtaining the S2 MMU for a L2 is horribly racy, and we may not + * find it (recycled by another vcpu, for example). When this + * happens, admit defeat immediately and use the SW (slow) path. + */ + mmu = lookup_s2_mmu(vcpu); + if (!mmu) + return par; + + __mmu_config_save(&config); + + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR0_EL1), SYS_TTBR0); + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1), SYS_TTBR1); + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1), SYS_TCR); + write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1), SYS_MAIR); + write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR); + __load_stage2(mmu, mmu->arch); + +skip_mmu_switch: + /* Clear TGE, enable S2 translation, we're rolling */ + write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2); + isb(); + + switch (op) { + case OP_AT_S1E1RP: + case OP_AT_S1E1WP: + fail = at_s1e1p_fast(vcpu, op, vaddr); + break; + case OP_AT_S1E1R: + fail = __kvm_at(OP_AT_S1E1R, vaddr); + break; + case OP_AT_S1E1W: + fail = __kvm_at(OP_AT_S1E1W, vaddr); + break; + case OP_AT_S1E0R: + fail = __kvm_at(OP_AT_S1E0R, vaddr); + break; + case OP_AT_S1E0W: + fail = __kvm_at(OP_AT_S1E0W, vaddr); + break; + case OP_AT_S1E1A: + fail = __kvm_at(OP_AT_S1E1A, vaddr); + break; + default: + WARN_ON_ONCE(1); + fail = true; + break; + } + + if (!fail) + par = read_sysreg_par(); + + if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))) + __mmu_config_restore(&config); + + return par; +} + +static bool par_check_s1_perm_fault(u64 par) +{ + u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par); + + return ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM && + !(par & SYS_PAR_EL1_S)); +} + +void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr); + + /* + * If PAR_EL1 reports that AT failed on a S1 permission fault, we + * know for sure that the PTW was able to walk the S1 tables and + * there's nothing else to do. + * + * If AT failed for any other reason, then we must walk the guest S1 + * to emulate the instruction. + */ + if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) + par = handle_at_slow(vcpu, op, vaddr); + + vcpu_write_sys_reg(vcpu, par, PAR_EL1); +} + +void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + u64 par; + + /* + * We've trapped, so everything is live on the CPU. As we will be + * switching context behind everybody's back, disable interrupts... + */ + scoped_guard(write_lock_irqsave, &vcpu->kvm->mmu_lock) { + struct kvm_s2_mmu *mmu; + u64 val, hcr; + bool fail; + + mmu = &vcpu->kvm->arch.mmu; + + val = hcr = read_sysreg(hcr_el2); + val &= ~HCR_TGE; + val |= HCR_VM; + + if (!vcpu_el2_e2h_is_set(vcpu)) + val |= HCR_NV | HCR_NV1; + + write_sysreg(val, hcr_el2); + isb(); + + par = SYS_PAR_EL1_F; + + switch (op) { + case OP_AT_S1E2R: + fail = __kvm_at(OP_AT_S1E1R, vaddr); + break; + case OP_AT_S1E2W: + fail = __kvm_at(OP_AT_S1E1W, vaddr); + break; + case OP_AT_S1E2A: + fail = __kvm_at(OP_AT_S1E1A, vaddr); + break; + default: + WARN_ON_ONCE(1); + fail = true; + } + + isb(); + + if (!fail) + par = read_sysreg_par(); + + write_sysreg(hcr, hcr_el2); + isb(); + } + + /* We failed the translation, let's replay it in slow motion */ + if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) + par = handle_at_slow(vcpu, op, vaddr); + + vcpu_write_sys_reg(vcpu, par, PAR_EL1); +} + +void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) +{ + struct kvm_s2_trans out = {}; + u64 ipa, par; + bool write; + int ret; + + /* Do the stage-1 translation */ + switch (op) { + case OP_AT_S12E1R: + op = OP_AT_S1E1R; + write = false; + break; + case OP_AT_S12E1W: + op = OP_AT_S1E1W; + write = true; + break; + case OP_AT_S12E0R: + op = OP_AT_S1E0R; + write = false; + break; + case OP_AT_S12E0W: + op = OP_AT_S1E0W; + write = true; + break; + default: + WARN_ON_ONCE(1); + return; + } + + __kvm_at_s1e01(vcpu, op, vaddr); + par = vcpu_read_sys_reg(vcpu, PAR_EL1); + if (par & SYS_PAR_EL1_F) + return; + + /* + * If we only have a single stage of translation (E2H=0 or + * TGE=1), exit early. Same thing if {VM,DC}=={0,0}. + */ + if (!vcpu_el2_e2h_is_set(vcpu) || vcpu_el2_tge_is_set(vcpu) || + !(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC))) + return; + + /* Do the stage-2 translation */ + ipa = (par & GENMASK_ULL(47, 12)) | (vaddr & GENMASK_ULL(11, 0)); + out.esr = 0; + ret = kvm_walk_nested_s2(vcpu, ipa, &out); + if (ret < 0) + return; + + /* Check the access permission */ + if (!out.esr && + ((!write && !out.readable) || (write && !out.writable))) + out.esr = ESR_ELx_FSC_PERM_L(out.level & 0x3); + + par = compute_par_s12(vcpu, par, &out); + vcpu_write_sys_reg(vcpu, par, PAR_EL1); +} diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 05166eccea0a63..05b6435d02a97f 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -83,14 +83,20 @@ enum cgt_group_id { CGT_CPTR_TAM, CGT_CPTR_TCPAC, + CGT_HCRX_EnFPM, CGT_HCRX_TCR2En, + CGT_ICH_HCR_TC, + CGT_ICH_HCR_TALL0, + CGT_ICH_HCR_TALL1, + CGT_ICH_HCR_TDIR, + /* * Anything after this point is a combination of coarse trap * controls, which must all be evaluated to decide what to do. */ __MULTIPLE_CONTROL_BITS__, - CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__, + CGT_HCR_IMO_FMO_ICH_HCR_TC = __MULTIPLE_CONTROL_BITS__, CGT_HCR_TID2_TID4, CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB_TTLBOS, @@ -105,6 +111,8 @@ enum cgt_group_id { CGT_MDCR_TDE_TDRA, CGT_MDCR_TDCC_TDE_TDA, + CGT_ICH_HCR_TC_TDIR, + /* * Anything after this point requires a callback evaluating a * complex trap condition. Ugly stuff. @@ -372,12 +380,42 @@ static const struct trap_bits coarse_trap_bits[] = { .mask = CPTR_EL2_TCPAC, .behaviour = BEHAVE_FORWARD_ANY, }, + [CGT_HCRX_EnFPM] = { + .index = HCRX_EL2, + .value = 0, + .mask = HCRX_EL2_EnFPM, + .behaviour = BEHAVE_FORWARD_ANY, + }, [CGT_HCRX_TCR2En] = { .index = HCRX_EL2, .value = 0, .mask = HCRX_EL2_TCR2En, .behaviour = BEHAVE_FORWARD_ANY, }, + [CGT_ICH_HCR_TC] = { + .index = ICH_HCR_EL2, + .value = ICH_HCR_TC, + .mask = ICH_HCR_TC, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_ICH_HCR_TALL0] = { + .index = ICH_HCR_EL2, + .value = ICH_HCR_TALL0, + .mask = ICH_HCR_TALL0, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_ICH_HCR_TALL1] = { + .index = ICH_HCR_EL2, + .value = ICH_HCR_TALL1, + .mask = ICH_HCR_TALL1, + .behaviour = BEHAVE_FORWARD_ANY, + }, + [CGT_ICH_HCR_TDIR] = { + .index = ICH_HCR_EL2, + .value = ICH_HCR_TDIR, + .mask = ICH_HCR_TDIR, + .behaviour = BEHAVE_FORWARD_ANY, + }, }; #define MCB(id, ...) \ @@ -387,7 +425,6 @@ static const struct trap_bits coarse_trap_bits[] = { } static const enum cgt_group_id *coarse_control_combo[] = { - MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO), MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4), MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS), MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS), @@ -402,6 +439,9 @@ static const enum cgt_group_id *coarse_control_combo[] = { MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA), MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA), MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA), + + MCB(CGT_HCR_IMO_FMO_ICH_HCR_TC, CGT_HCR_IMO, CGT_HCR_FMO, CGT_ICH_HCR_TC), + MCB(CGT_ICH_HCR_TC_TDIR, CGT_ICH_HCR_TC, CGT_ICH_HCR_TDIR), }; typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *); @@ -536,9 +576,9 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4), SR_RANGE_TRAP(SYS_ID_PFR0_EL1, sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3), - SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO), - SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO), - SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO), + SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC), + SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC), + SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC), SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0), sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP), SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0), @@ -786,6 +826,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV), SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV), SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV), + SR_TRAP(OP_AT_S1E2A, CGT_HCR_NV), SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV), SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV), SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV), @@ -867,6 +908,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT), SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT), SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT), + SR_TRAP(OP_AT_S1E1A, CGT_HCR_AT), SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN), SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN), SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN), @@ -1108,6 +1150,35 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN), SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN), SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN), + SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM), + /* + * IMPDEF choice: + * We treat ICC_SRE_EL2.{SRE,Enable) and ICV_SRE_EL1.SRE as + * RAO/WI. We therefore never consider ICC_SRE_EL2.Enable for + * ICC_SRE_EL1 access, and always handle it locally. + */ + SR_TRAP(SYS_ICC_AP0R0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_AP0R1_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_AP0R2_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_AP0R3_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_AP1R0_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_AP1R1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_AP1R2_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_AP1R3_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_BPR0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_BPR1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_CTLR_EL1, CGT_ICH_HCR_TC), + SR_TRAP(SYS_ICC_DIR_EL1, CGT_ICH_HCR_TC_TDIR), + SR_TRAP(SYS_ICC_EOIR0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_EOIR1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_HPPIR0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_HPPIR1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_IAR0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_IAR1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_IGRPEN0_EL1, CGT_ICH_HCR_TALL0), + SR_TRAP(SYS_ICC_IGRPEN1_EL1, CGT_ICH_HCR_TALL1), + SR_TRAP(SYS_ICC_PMR_EL1, CGT_ICH_HCR_TC), + SR_TRAP(SYS_ICC_RPR_EL1, CGT_ICH_HCR_TC), }; static DEFINE_XARRAY(sr_forward_xa); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index c53e5b14038dce..ea5484ce1f3ba3 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) */ *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); + *host_data_ptr(fpmr_ptr) = kern_hyp_va(¤t->thread.uw.fpmr); vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) @@ -134,8 +135,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fp_state.sve_state = vcpu->arch.sve_state; fp_state.sve_vl = vcpu->arch.sve_max_vl; fp_state.sme_state = NULL; - fp_state.svcr = &vcpu->arch.svcr; - fp_state.fpmr = &vcpu->arch.fpmr; + fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR); + fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR); fp_state.fp_type = &vcpu->arch.fp_type; if (vcpu_has_sve(vcpu)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 11098eb7eb44af..962f985977c2d7 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1045,6 +1045,11 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, mutex_lock(&kvm->slots_lock); + if (write && atomic_read(&kvm->nr_memslots_dirty_logging)) { + ret = -EBUSY; + goto out; + } + while (length > 0) { kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL); void *maddr; @@ -1059,6 +1064,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, page = pfn_to_online_page(pfn); if (!page) { /* Reject ZONE_DEVICE memory */ + kvm_release_pfn_clean(pfn); ret = -EFAULT; goto out; } diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h index 9e13c1bc2ad541..17df94570f03a5 100644 --- a/arch/arm64/kvm/hyp/include/hyp/fault.h +++ b/arch/arm64/kvm/hyp/include/hyp/fault.h @@ -14,6 +14,7 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) { + int ret; u64 par, tmp; /* @@ -27,7 +28,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg_par(); - if (!__kvm_at("s1e1r", far)) + ret = system_supports_poe() ? __kvm_at(OP_AT_S1E1A, far) : + __kvm_at(OP_AT_S1E1R, far); + if (!ret) tmp = read_sysreg_par(); else tmp = SYS_PAR_EL1_F; /* back to the guest */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 37ff87d782b62b..5310fe1da6165b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -338,7 +338,7 @@ static inline void __hyp_sve_save_host(void) struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR); - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), &sve_state->fpsr, true); @@ -403,6 +403,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) else __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); + if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) + write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR); + /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index 4c0fdabaf8aec1..1579a3c08a36b9 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -16,9 +16,15 @@ #include #include +static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); + static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); + + // POR_EL0 can affect uaccess, so must be saved/restored early. + if (ctxt_has_s1poe(ctxt)) + ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0); } static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) @@ -66,6 +72,17 @@ static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt) return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP); } +static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!system_supports_poe()) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP); +} + static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); @@ -80,6 +97,9 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); } + + if (ctxt_has_s1poe(ctxt)) + ctxt_sys_reg(ctxt, POR_EL1) = read_sysreg_el1(SYS_POR); } ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR); ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0); @@ -120,6 +140,10 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1); + + // POR_EL0 can affect uaccess, so must be saved/restored early. + if (ctxt_has_s1poe(ctxt)) + write_sysreg_s(ctxt_sys_reg(ctxt, POR_EL0), SYS_POR_EL0); } static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) @@ -158,6 +182,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); } + + if (ctxt_has_s1poe(ctxt)) + write_sysreg_el1(ctxt_sys_reg(ctxt, POR_EL1), SYS_POR); } write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR); write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0); diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index e715c157c2c43e..e433dfab882aa5 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -426,9 +426,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, return; } -static __always_inline void do_ffa_mem_xfer(const u64 func_id, - struct arm_smccc_res *res, - struct kvm_cpu_context *ctxt) +static void __do_ffa_mem_xfer(const u64 func_id, + struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) { DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, fraglen, ctxt, 2); @@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, u32 offset, nr_ranges; int ret = 0; - BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && - func_id != FFA_FN64_MEM_LEND); - if (addr_mbz || npages_mbz || fraglen > len || fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { ret = FFA_RET_INVALID_PARAMETERS; @@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; } + if (len > ffa_desc_buf.len) { + ret = FFA_RET_NO_MEMORY; + goto out_unlock; + } + buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen); @@ -512,6 +514,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; } +#define do_ffa_mem_xfer(fid, res, ctxt) \ + do { \ + BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ + (fid) != FFA_FN64_MEM_LEND); \ + __do_ffa_mem_xfer((fid), (res), (ctxt)); \ + } while (0); + static void do_ffa_mem_reclaim(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 07120b37da3509..401af1835be6b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -130,7 +130,7 @@ alternative_else_nop_endif /* Invalidate the stale TLBs from Bootloader */ tlbi alle2 - tlbi vmalls12e1 + tlbi alle1 dsb sy mov_q x0, INIT_SCTLR_EL2_MMU_ON diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index f43d845f3c4ecd..fefc89209f9e41 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -33,7 +33,7 @@ static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu) */ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true); - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); } static void __hyp_sve_restore_host(void) @@ -45,10 +45,11 @@ static void __hyp_sve_restore_host(void) * the host. The layout of the data when saving the sve state depends * on the VL, so use a consistent (i.e., the maximum) host VL. * - * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length - * supported by the system (or limited at EL3). + * Note that this constrains the PE to the maximum shared VL + * that was discovered, if we wish to use larger VLs this will + * need to be revisited. */ - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), &sve_state->fpsr, true); @@ -62,6 +63,8 @@ static void fpsimd_sve_flush(void) static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) { + bool has_fpmr; + if (!guest_owns_fp_regs()) return; @@ -73,11 +76,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) else __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs); + has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); + if (has_fpmr) + __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR); + if (system_supports_sve()) __hyp_sve_restore_host(); else __fpsimd_restore_state(*host_data_ptr(fpsimd_state)); + if (has_fpmr) + write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR); + *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; } @@ -479,7 +489,8 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) case ESR_ELx_EC_SVE: cpacr_clear_set(0, CPACR_ELx_ZEN); isb(); - sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, + SYS_ZCR_EL2); break; case ESR_ELx_EC_IABT_LOW: case ESR_ELx_EC_DABT_LOW: diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 187a5f4d56c0c6..077d4098548d2c 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -574,12 +574,14 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unlock: hyp_spin_unlock(&vm_table_lock); - if (ret) + if (ret) { unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); + return ret; + } hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); - return ret; + return 0; } static void diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 8f5c56d5b1cdf5..cc69106734ca73 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -197,6 +197,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) } else { __fpsimd_save_state(*host_data_ptr(fpsimd_state)); } + + if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) { + u64 val = read_sysreg_s(SYS_FPMR); + + if (unlikely(is_protected_kvm_enabled())) + *host_data_ptr(fpmr) = val; + else + **host_data_ptr(fpmr_ptr) = val; + } } static const exit_handler_fn hyp_exit_handlers[] = { diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index ca3c09df8d7c96..48da9ca9763f6e 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -132,10 +132,10 @@ static void exit_vmid_context(struct tlb_inv_context *cxt) else __load_host_stage2(); - if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { - /* Ensure write of the old VMID */ - isb(); + /* Ensure write of the old VMID */ + isb(); + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (!(cxt->sctlr & SCTLR_ELx_M)) { write_sysreg_el1(cxt->sctlr, SYS_SCTLR); isb(); diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 9e2bbee7749136..b11bcebac908a7 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -17,48 +17,6 @@ #define KVM_PTE_TYPE_PAGE 1 #define KVM_PTE_TYPE_TABLE 1 -#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) - -#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) -#define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ - ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ - ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) -#define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) -#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 -#define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) - -#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2) -#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) -#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) -#define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8) -#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 -#define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) - -#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50) - -#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) - -#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) - -#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) - -#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) - -#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ - KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ - KVM_PTE_LEAF_ATTR_HI_S2_XN) - -#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) -#define KVM_MAX_OWNER_ID 1 - -/* - * Used to indicate a pte for which a 'break-before-make' sequence is in - * progress. - */ -#define KVM_INVALID_PTE_LOCKED BIT(10) - struct kvm_pgtable_walk_data { struct kvm_pgtable_walker *walker; @@ -1547,7 +1505,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, */ new = kvm_init_table_pte(childp, mm_ops); stage2_make_pte(ctx, new); - dsb(ishst); return 0; } @@ -1559,8 +1516,11 @@ int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, .flags = KVM_PGTABLE_WALK_LEAF, .arg = mc, }; + int ret; - return kvm_pgtable_walk(pgt, addr, size, &walker); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + dsb(ishst); + return ret; } int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 7b397fad26f296..18d4677002b1a6 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -268,8 +268,16 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if) * starting to mess with the rest of the GIC, and VMCR_EL2 in * particular. This logic must be called before * __vgic_v3_restore_state(). + * + * However, if the vgic is disabled (ICH_HCR_EL2.EN==0), no GIC is + * provisioned at all. In order to prevent illegal accesses to the + * system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1 + * so that the trap bits can take effect. Yes, we *loves* the GIC. */ - if (!cpu_if->vgic_sre) { + if (!(cpu_if->vgic_hcr & ICH_HCR_EN)) { + write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1); + isb(); + } else if (!cpu_if->vgic_sre) { write_gicreg(0, ICC_SRE_EL1); isb(); write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); @@ -288,8 +296,9 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if) } /* - * Prevent the guest from touching the GIC system registers if - * SRE isn't enabled for GICv3 emulation. + * Prevent the guest from touching the ICC_SRE_EL1 system + * register. Note that this may not have any effect, as + * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation. */ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); @@ -297,10 +306,11 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if) /* * If we need to trap system registers, we must write * ICH_HCR_EL2 anyway, even if no interrupts are being - * injected, + * injected. Note that this also applies if we don't expect + * any system register access (no vgic at all). */ if (static_branch_unlikely(&vgic_v3_cpuif_trap) || - cpu_if->its_vpe.its_vm) + cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre) write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); } @@ -326,7 +336,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if) * no interrupts were being injected, and we disable it again here. */ if (static_branch_unlikely(&vgic_v3_cpuif_trap) || - cpu_if->its_vpe.its_vm) + cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre) write_gicreg(0, ICH_HCR_EL2); } @@ -1032,6 +1042,75 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt) write_gicreg(vmcr, ICH_VMCR_EL2); } +static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu, + u32 sysreg, bool is_read) +{ + u64 ich_hcr; + + if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) + return false; + + ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2); + + switch (sysreg) { + case SYS_ICC_IGRPEN0_EL1: + if (is_read && + (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + return true; + + if (!is_read && + (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + return true; + + fallthrough; + + case SYS_ICC_AP0Rn_EL1(0): + case SYS_ICC_AP0Rn_EL1(1): + case SYS_ICC_AP0Rn_EL1(2): + case SYS_ICC_AP0Rn_EL1(3): + case SYS_ICC_BPR0_EL1: + case SYS_ICC_EOIR0_EL1: + case SYS_ICC_HPPIR0_EL1: + case SYS_ICC_IAR0_EL1: + return ich_hcr & ICH_HCR_TALL0; + + case SYS_ICC_IGRPEN1_EL1: + if (is_read && + (__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + return true; + + if (!is_read && + (__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1)) + return true; + + fallthrough; + + case SYS_ICC_AP1Rn_EL1(0): + case SYS_ICC_AP1Rn_EL1(1): + case SYS_ICC_AP1Rn_EL1(2): + case SYS_ICC_AP1Rn_EL1(3): + case SYS_ICC_BPR1_EL1: + case SYS_ICC_EOIR1_EL1: + case SYS_ICC_HPPIR1_EL1: + case SYS_ICC_IAR1_EL1: + return ich_hcr & ICH_HCR_TALL1; + + case SYS_ICC_DIR_EL1: + if (ich_hcr & ICH_HCR_TDIR) + return true; + + fallthrough; + + case SYS_ICC_RPR_EL1: + case SYS_ICC_CTLR_EL1: + case SYS_ICC_PMR_EL1: + return ich_hcr & ICH_HCR_TC; + + default: + return false; + } +} + int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) { int rt; @@ -1041,6 +1120,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) bool is_read; u32 sysreg; + if (kern_hyp_va(vcpu->kvm)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3) + return 0; + esr = kvm_vcpu_get_esr(vcpu); if (vcpu_mode_is_32bit(vcpu)) { if (!kvm_condition_valid(vcpu)) { @@ -1055,6 +1137,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; + if (__vgic_v3_check_trap_forwarding(vcpu, sysreg, is_read)) + return 0; + switch (sysreg) { case SYS_ICC_IAR0_EL1: case SYS_ICC_IAR1_EL1: diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 77010b76c150f3..80581b1c399595 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) { __fpsimd_save_state(*host_data_ptr(fpsimd_state)); + + if (kvm_has_fpmr(vcpu->kvm)) + **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR); } static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index bab27f9d8cc659..f9e30dd34c7a18 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -62,7 +62,6 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) */ num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU; tmp = kvrealloc(kvm->arch.nested_mmus, - size_mul(sizeof(*kvm->arch.nested_mmus), kvm->arch.nested_mmus_size), size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus), GFP_KERNEL_ACCOUNT | __GFP_ZERO); if (!tmp) @@ -103,20 +102,6 @@ struct s2_walk_info { bool be; }; -static unsigned int ps_to_output_size(unsigned int ps) -{ - switch (ps) { - case 0: return 32; - case 1: return 36; - case 2: return 40; - case 3: return 42; - case 4: return 44; - case 5: - default: - return 48; - } -} - static u32 compute_fsc(int level, u32 fsc) { return fsc | (level & 0x3); @@ -256,7 +241,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa, /* Check for valid descriptor at this point */ if (!(desc & 1) || ((desc & 3) == 1 && level == 3)) { out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); - out->upper_attr = desc; + out->desc = desc; return 1; } @@ -266,7 +251,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa, if (check_output_size(wi, desc)) { out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); - out->upper_attr = desc; + out->desc = desc; return 1; } @@ -278,27 +263,24 @@ static int walk_nested_s2_pgd(phys_addr_t ipa, if (level < first_block_level) { out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); - out->upper_attr = desc; + out->desc = desc; return 1; } - /* - * We don't use the contiguous bit in the stage-2 ptes, so skip check - * for misprogramming of the contiguous bit. - */ - if (check_output_size(wi, desc)) { out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); - out->upper_attr = desc; + out->desc = desc; return 1; } if (!(desc & BIT(10))) { out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS); - out->upper_attr = desc; + out->desc = desc; return 1; } + addr_bottom += contiguous_bit_shift(desc, wi, level); + /* Calculate and return the result */ paddr = (desc & GENMASK_ULL(47, addr_bottom)) | (ipa & GENMASK_ULL(addr_bottom - 1, 0)); @@ -307,7 +289,7 @@ static int walk_nested_s2_pgd(phys_addr_t ipa, out->readable = desc & (0b01 << 6); out->writable = desc & (0b10 << 6); out->level = level; - out->upper_attr = desc & GENMASK_ULL(63, 52); + out->desc = desc; return 0; } @@ -954,19 +936,16 @@ static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1) int kvm_init_nv_sysregs(struct kvm *kvm) { u64 res0, res1; - int ret = 0; - mutex_lock(&kvm->arch.config_lock); + lockdep_assert_held(&kvm->arch.config_lock); if (kvm->arch.sysreg_masks) - goto out; + return 0; kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)), GFP_KERNEL_ACCOUNT); - if (!kvm->arch.sysreg_masks) { - ret = -ENOMEM; - goto out; - } + if (!kvm->arch.sysreg_masks) + return -ENOMEM; limit_nv_id_regs(kvm); @@ -1195,8 +1174,13 @@ int kvm_init_nv_sysregs(struct kvm *kvm) if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1)) res0 |= ~(res0 | res1); set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1); -out: - mutex_unlock(&kvm->arch.config_lock); - return ret; + /* SCTLR_EL1 */ + res0 = SCTLR_EL1_RES0; + res1 = SCTLR_EL1_RES1; + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3)) + res0 |= SCTLR_EL1_EPAN; + set_sysreg_masks(kvm, SCTLR_EL1, res0, res1); + + return 0; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 82a2a003259cdc..ac36c438b8c18c 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -233,7 +233,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) pmu->pmc[i].idx = i; } @@ -260,7 +260,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) { int i; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); irq_work_sync(&vcpu->arch.pmu.overflow_work); } @@ -291,7 +291,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { struct kvm_pmc *pmc; if (!(val & BIT(i))) @@ -323,7 +323,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu) || !val) return; - for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { + for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { struct kvm_pmc *pmc; if (!(val & BIT(i))) @@ -910,10 +910,10 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; /* - * The arm_pmu->num_events considers the cycle counter as well. - * Ignore that and return only the general-purpose counters. + * The arm_pmu->cntr_mask considers the fixed counter(s) as well. + * Ignore those and return only the general-purpose counters. */ - return arm_pmu->num_events - 1; + return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); } static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 329819806096b8..0b3adf3e17b49e 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -5,6 +5,8 @@ */ #include #include +#include +#include static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events); @@ -35,7 +37,7 @@ struct kvm_pmu_events *kvm_get_pmu_events(void) * Add events to track that we may want to switch at guest entry/exit * time. */ -void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) +void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) { struct kvm_pmu_events *pmu = kvm_get_pmu_events(); @@ -51,7 +53,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) /* * Stop tracking events */ -void kvm_clr_pmu_events(u32 clr) +void kvm_clr_pmu_events(u64 clr) { struct kvm_pmu_events *pmu = kvm_get_pmu_events(); @@ -62,79 +64,32 @@ void kvm_clr_pmu_events(u32 clr) pmu->events_guest &= ~clr; } -#define PMEVTYPER_READ_CASE(idx) \ - case idx: \ - return read_sysreg(pmevtyper##idx##_el0) - -#define PMEVTYPER_WRITE_CASE(idx) \ - case idx: \ - write_sysreg(val, pmevtyper##idx##_el0); \ - break - -#define PMEVTYPER_CASES(readwrite) \ - PMEVTYPER_##readwrite##_CASE(0); \ - PMEVTYPER_##readwrite##_CASE(1); \ - PMEVTYPER_##readwrite##_CASE(2); \ - PMEVTYPER_##readwrite##_CASE(3); \ - PMEVTYPER_##readwrite##_CASE(4); \ - PMEVTYPER_##readwrite##_CASE(5); \ - PMEVTYPER_##readwrite##_CASE(6); \ - PMEVTYPER_##readwrite##_CASE(7); \ - PMEVTYPER_##readwrite##_CASE(8); \ - PMEVTYPER_##readwrite##_CASE(9); \ - PMEVTYPER_##readwrite##_CASE(10); \ - PMEVTYPER_##readwrite##_CASE(11); \ - PMEVTYPER_##readwrite##_CASE(12); \ - PMEVTYPER_##readwrite##_CASE(13); \ - PMEVTYPER_##readwrite##_CASE(14); \ - PMEVTYPER_##readwrite##_CASE(15); \ - PMEVTYPER_##readwrite##_CASE(16); \ - PMEVTYPER_##readwrite##_CASE(17); \ - PMEVTYPER_##readwrite##_CASE(18); \ - PMEVTYPER_##readwrite##_CASE(19); \ - PMEVTYPER_##readwrite##_CASE(20); \ - PMEVTYPER_##readwrite##_CASE(21); \ - PMEVTYPER_##readwrite##_CASE(22); \ - PMEVTYPER_##readwrite##_CASE(23); \ - PMEVTYPER_##readwrite##_CASE(24); \ - PMEVTYPER_##readwrite##_CASE(25); \ - PMEVTYPER_##readwrite##_CASE(26); \ - PMEVTYPER_##readwrite##_CASE(27); \ - PMEVTYPER_##readwrite##_CASE(28); \ - PMEVTYPER_##readwrite##_CASE(29); \ - PMEVTYPER_##readwrite##_CASE(30) - /* * Read a value direct from PMEVTYPER where idx is 0-30 - * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + * or PMxCFILTR_EL0 where idx is 31-32. */ static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) { - switch (idx) { - PMEVTYPER_CASES(READ); - case ARMV8_PMU_CYCLE_IDX: - return read_sysreg(pmccfiltr_el0); - default: - WARN_ON(1); - } + if (idx == ARMV8_PMU_CYCLE_IDX) + return read_pmccfiltr(); + else if (idx == ARMV8_PMU_INSTR_IDX) + return read_pmicfiltr(); - return 0; + return read_pmevtypern(idx); } /* * Write a value direct to PMEVTYPER where idx is 0-30 - * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + * or PMxCFILTR_EL0 where idx is 31-32. */ static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) { - switch (idx) { - PMEVTYPER_CASES(WRITE); - case ARMV8_PMU_CYCLE_IDX: - write_sysreg(val, pmccfiltr_el0); - break; - default: - WARN_ON(1); - } + if (idx == ARMV8_PMU_CYCLE_IDX) + write_pmccfiltr(val); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicfiltr(val); + else + write_pmevtypern(idx, val); } /* @@ -145,7 +100,7 @@ static void kvm_vcpu_pmu_enable_el0(unsigned long events) u64 typer; u32 counter; - for_each_set_bit(counter, &events, 32) { + for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) { typer = kvm_vcpu_pmu_read_evtype_direct(counter); typer &= ~ARMV8_PMU_EXCLUDE_EL0; kvm_vcpu_pmu_write_evtype_direct(counter, typer); @@ -160,7 +115,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) u64 typer; u32 counter; - for_each_set_bit(counter, &events, 32) { + for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) { typer = kvm_vcpu_pmu_read_evtype_direct(counter); typer |= ARMV8_PMU_EXCLUDE_EL0; kvm_vcpu_pmu_write_evtype_direct(counter, typer); @@ -176,7 +131,7 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) { struct kvm_pmu_events *pmu; - u32 events_guest, events_host; + u64 events_guest, events_host; if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; @@ -197,7 +152,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) { struct kvm_pmu_events *pmu; - u32 events_guest, events_host; + u64 events_guest, events_host; if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; diff --git a/arch/arm64/kvm/ptdump.c b/arch/arm64/kvm/ptdump.c new file mode 100644 index 00000000000000..e4a342e903e25a --- /dev/null +++ b/arch/arm64/kvm/ptdump.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Debug helper used to dump the stage-2 pagetables of the system and their + * associated permissions. + * + * Copyright (C) Google, 2024 + * Author: Sebastian Ene + */ +#include +#include +#include + +#include +#include +#include + +#define MARKERS_LEN 2 +#define KVM_PGTABLE_MAX_LEVELS (KVM_PGTABLE_LAST_LEVEL + 1) + +struct kvm_ptdump_guest_state { + struct kvm *kvm; + struct ptdump_pg_state parser_state; + struct addr_marker ipa_marker[MARKERS_LEN]; + struct ptdump_pg_level level[KVM_PGTABLE_MAX_LEVELS]; + struct ptdump_range range[MARKERS_LEN]; +}; + +static const struct ptdump_prot_bits stage2_pte_bits[] = { + { + .mask = PTE_VALID, + .val = PTE_VALID, + .set = " ", + .clear = "F", + }, { + .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID, + .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID, + .set = "R", + .clear = " ", + }, { + .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID, + .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID, + .set = "W", + .clear = " ", + }, { + .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN | PTE_VALID, + .val = PTE_VALID, + .set = " ", + .clear = "X", + }, { + .mask = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID, + .val = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID, + .set = "AF", + .clear = " ", + }, { + .mask = PTE_TABLE_BIT | PTE_VALID, + .val = PTE_VALID, + .set = "BLK", + .clear = " ", + }, +}; + +static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) +{ + struct ptdump_pg_state *st = ctx->arg; + struct ptdump_state *pt_st = &st->ptdump; + + note_page(pt_st, ctx->addr, ctx->level, ctx->old); + + return 0; +} + +static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl) +{ + u32 i; + u64 mask; + + if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL)) + return -EINVAL; + + mask = 0; + for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++) + mask |= stage2_pte_bits[i].mask; + + for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) { + snprintf(level[i].name, sizeof(level[i].name), "%u", i); + + level[i].num = ARRAY_SIZE(stage2_pte_bits); + level[i].bits = stage2_pte_bits; + level[i].mask = mask; + } + + return 0; +} + +static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm *kvm) +{ + struct kvm_ptdump_guest_state *st; + struct kvm_s2_mmu *mmu = &kvm->arch.mmu; + struct kvm_pgtable *pgtable = mmu->pgt; + int ret; + + st = kzalloc(sizeof(struct kvm_ptdump_guest_state), GFP_KERNEL_ACCOUNT); + if (!st) + return ERR_PTR(-ENOMEM); + + ret = kvm_ptdump_build_levels(&st->level[0], pgtable->start_level); + if (ret) { + kfree(st); + return ERR_PTR(ret); + } + + st->ipa_marker[0].name = "Guest IPA"; + st->ipa_marker[1].start_address = BIT(pgtable->ia_bits); + st->range[0].end = BIT(pgtable->ia_bits); + + st->kvm = kvm; + st->parser_state = (struct ptdump_pg_state) { + .marker = &st->ipa_marker[0], + .level = -1, + .pg_level = &st->level[0], + .ptdump.range = &st->range[0], + .start_address = 0, + }; + + return st; +} + +static int kvm_ptdump_guest_show(struct seq_file *m, void *unused) +{ + int ret; + struct kvm_ptdump_guest_state *st = m->private; + struct kvm *kvm = st->kvm; + struct kvm_s2_mmu *mmu = &kvm->arch.mmu; + struct ptdump_pg_state *parser_state = &st->parser_state; + struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) { + .cb = kvm_ptdump_visitor, + .arg = parser_state, + .flags = KVM_PGTABLE_WALK_LEAF, + }; + + parser_state->seq = m; + + write_lock(&kvm->mmu_lock); + ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); + write_unlock(&kvm->mmu_lock); + + return ret; +} + +static int kvm_ptdump_guest_open(struct inode *m, struct file *file) +{ + struct kvm *kvm = m->i_private; + struct kvm_ptdump_guest_state *st; + int ret; + + if (!kvm_get_kvm_safe(kvm)) + return -ENOENT; + + st = kvm_ptdump_parser_create(kvm); + if (IS_ERR(st)) { + ret = PTR_ERR(st); + goto err_with_kvm_ref; + } + + ret = single_open(file, kvm_ptdump_guest_show, st); + if (!ret) + return 0; + + kfree(st); +err_with_kvm_ref: + kvm_put_kvm(kvm); + return ret; +} + +static int kvm_ptdump_guest_close(struct inode *m, struct file *file) +{ + struct kvm *kvm = m->i_private; + void *st = ((struct seq_file *)file->private_data)->private; + + kfree(st); + kvm_put_kvm(kvm); + + return single_release(m, file); +} + +static const struct file_operations kvm_ptdump_guest_fops = { + .open = kvm_ptdump_guest_open, + .read = seq_read, + .llseek = seq_lseek, + .release = kvm_ptdump_guest_close, +}; + +static int kvm_pgtable_range_show(struct seq_file *m, void *unused) +{ + struct kvm_pgtable *pgtable = m->private; + + seq_printf(m, "%2u\n", pgtable->ia_bits); + return 0; +} + +static int kvm_pgtable_levels_show(struct seq_file *m, void *unused) +{ + struct kvm_pgtable *pgtable = m->private; + + seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - pgtable->start_level); + return 0; +} + +static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file, + int (*show)(struct seq_file *, void *)) +{ + struct kvm *kvm = m->i_private; + struct kvm_pgtable *pgtable; + int ret; + + if (!kvm_get_kvm_safe(kvm)) + return -ENOENT; + + pgtable = kvm->arch.mmu.pgt; + + ret = single_open(file, show, pgtable); + if (ret < 0) + kvm_put_kvm(kvm); + return ret; +} + +static int kvm_pgtable_range_open(struct inode *m, struct file *file) +{ + return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show); +} + +static int kvm_pgtable_levels_open(struct inode *m, struct file *file) +{ + return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show); +} + +static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file) +{ + struct kvm *kvm = m->i_private; + + kvm_put_kvm(kvm); + return single_release(m, file); +} + +static const struct file_operations kvm_pgtable_range_fops = { + .open = kvm_pgtable_range_open, + .read = seq_read, + .llseek = seq_lseek, + .release = kvm_pgtable_debugfs_close, +}; + +static const struct file_operations kvm_pgtable_levels_fops = { + .open = kvm_pgtable_levels_open, + .read = seq_read, + .llseek = seq_lseek, + .release = kvm_pgtable_debugfs_close, +}; + +void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) +{ + debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry, + kvm, &kvm_ptdump_guest_fops); + debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry, kvm, + &kvm_pgtable_range_fops); + debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry, + kvm, &kvm_pgtable_levels_fops); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 31e49da867ffc3..dad88e31f9537f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -47,6 +48,13 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg); static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val); +static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + return false; +} + static bool bad_trap(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r, @@ -54,8 +62,7 @@ static bool bad_trap(struct kvm_vcpu *vcpu, { WARN_ONCE(1, "Unexpected %s\n", msg); print_sys_reg_instr(params); - kvm_inject_undefined(vcpu); - return false; + return undef_access(vcpu, params, r); } static bool read_from_write_only(struct kvm_vcpu *vcpu, @@ -346,10 +353,8 @@ static bool access_dcgsw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - if (!kvm_has_mte(vcpu->kvm)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_has_mte(vcpu->kvm)) + return undef_access(vcpu, p, r); /* Treat MTE S/W ops as we treat the classic ones: with contempt */ return access_dcsw(vcpu, p, r); @@ -386,10 +391,8 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, u64 val, mask, shift; if (reg_to_encoding(r) == SYS_TCR2_EL1 && - !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) { - kvm_inject_undefined(vcpu); - return false; - } + !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) + return undef_access(vcpu, p, r); BUG_ON(!p->is_write); @@ -436,10 +439,8 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, { bool g1; - if (!kvm_has_gicv3(vcpu->kvm)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_has_gicv3(vcpu->kvm)) + return undef_access(vcpu, p, r); if (!p->is_write) return read_from_write_only(vcpu, p, r); @@ -484,6 +485,9 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { + if (!kvm_has_gicv3(vcpu->kvm)) + return undef_access(vcpu, p, r); + if (p->is_write) return ignore_write(vcpu, p); @@ -501,14 +505,6 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, return read_zero(vcpu, p); } -static bool trap_undef(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ - kvm_inject_undefined(vcpu); - return false; -} - /* * ARMv8.1 mandates at least a trivial LORegion implementation, where all the * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 @@ -521,10 +517,8 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, { u32 sr = reg_to_encoding(r); - if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) + return undef_access(vcpu, p, r); if (p->is_write && sr == SYS_LORID_EL1) return write_to_read_only(vcpu, p, r); @@ -893,7 +887,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK; + __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; return __vcpu_sys_reg(vcpu, r->reg); } @@ -985,7 +979,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, else /* return PMSELR.SEL field */ p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) - & ARMV8_PMU_COUNTER_MASK; + & PMSELR_EL0_SEL_MASK; return true; } @@ -1053,8 +1047,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, if (pmu_access_event_counter_el0_disabled(vcpu)) return false; - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) - & ARMV8_PMU_COUNTER_MASK; + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, + __vcpu_sys_reg(vcpu, PMSELR_EL0)); } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ if (pmu_access_cycle_counter_el0_disabled(vcpu)) @@ -1104,7 +1098,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ - idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; + idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); reg = PMEVTYPER0_EL0 + idx; } else if (r->CRn == 14 && (r->CRm & 12) == 12) { idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); @@ -1257,10 +1251,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { - if (!vcpu_mode_priv(vcpu)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!vcpu_mode_priv(vcpu)) + return undef_access(vcpu, p, r); __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval & ARMV8_PMU_USERENR_MASK; @@ -1344,14 +1336,6 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, .reset = reset_pmevtyper, \ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } -static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ - kvm_inject_undefined(vcpu); - - return false; -} - /* Macro to expand the AMU counter and type registers*/ #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } @@ -1410,8 +1394,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, break; default: print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); - kvm_inject_undefined(vcpu); - return false; + return undef_access(vcpu, p, r); } if (p->is_write) @@ -1545,6 +1528,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); break; + case SYS_ID_AA64PFR2_EL1: + /* We only expose FPMR */ + val &= ID_AA64PFR2_EL1_FPMR; + break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | @@ -1562,6 +1549,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, case SYS_ID_AA64MMFR2_EL1: val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; break; + case SYS_ID_AA64MMFR3_EL1: + val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE; + break; case SYS_ID_MMFR4_EL1: val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); break; @@ -1675,6 +1665,24 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, return REG_HIDDEN; } +static unsigned int sme_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) + return 0; + + return REG_HIDDEN; +} + +static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (kvm_has_fpmr(vcpu->kvm)) + return 0; + + return REG_HIDDEN; +} + static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { @@ -2090,26 +2098,6 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu, #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v) #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) -/* - * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when - * HCR_EL2.E2H==1, and only in the sysreg table for convenience of - * handling traps. Given that, they are always hidden from userspace. - */ -static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) -{ - return REG_HIDDEN_USER; -} - -#define EL12_REG(name, acc, rst, v) { \ - SYS_DESC(SYS_##name##_EL12), \ - .access = acc, \ - .reset = rst, \ - .reg = name##_EL1, \ - .val = v, \ - .visibility = hidden_user_visibility, \ -} - /* * Since reset() callback and field val are not used for idregs, they will be * used for specific purposes for idregs. @@ -2217,6 +2205,18 @@ static bool access_spsr(struct kvm_vcpu *vcpu, return true; } +static bool access_cntkctl_el12(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval; + else + p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); + + return true; +} + static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 val = r->val; @@ -2261,6 +2261,15 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu, return true; } +static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S1POE, IMP)) + return 0; + + return REG_HIDDEN; +} + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -2307,7 +2316,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { // DBGDTR[TR]X_EL0 share the same encoding { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, - { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 }, + { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 }, { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, @@ -2365,16 +2374,15 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR0_EL1_MPAM | ID_AA64PFR0_EL1_SVE | ID_AA64PFR0_EL1_RAS | - ID_AA64PFR0_EL1_GIC | ID_AA64PFR0_EL1_AdvSIMD | ID_AA64PFR0_EL1_FP), }, ID_SANITISED(ID_AA64PFR1_EL1), - ID_UNALLOCATED(4,2), + ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), ID_UNALLOCATED(4,3), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ID_HIDDEN(ID_AA64SMFR0_EL1), ID_UNALLOCATED(4,6), - ID_UNALLOCATED(4,7), + ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0), /* CRm=5 */ { SYS_DESC(SYS_ID_AA64DFR0_EL1), @@ -2424,7 +2432,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64MMFR2_EL1_IDS | ID_AA64MMFR2_EL1_NV | ID_AA64MMFR2_EL1_CCIDX)), - ID_SANITISED(ID_AA64MMFR3_EL1), + ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | + ID_AA64MMFR3_EL1_S1POE)), ID_SANITISED(ID_AA64MMFR4_EL1), ID_UNALLOCATED(7,5), ID_UNALLOCATED(7,6), @@ -2455,6 +2464,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SPSR_EL1), access_spsr}, { SYS_DESC(SYS_ELR_EL1), access_elr}, + { SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, + { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, @@ -2498,6 +2509,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 }, { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 }, + { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1, + .visibility = s1poe_visibility }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, @@ -2509,18 +2522,31 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, - { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, - { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, - { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, - { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, - { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, + { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, + { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, + { SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, + { SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, - { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, - { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, - { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, + { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, + { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, + { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, + { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, @@ -2541,7 +2567,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { CTR_EL0_IDC_MASK | CTR_EL0_DminLine_MASK | CTR_EL0_IminLine_MASK), - { SYS_DESC(SYS_SVCR), undef_access }, + { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility }, + { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility }, { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, @@ -2584,6 +2611,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { .access = access_pmovs, .reg = PMOVSSET_EL0, .get_user = get_pmreg, .set_user = set_pmreg }, + { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0, + .visibility = s1poe_visibility }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, @@ -2764,7 +2793,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(VTTBR_EL2, reset_val, 0), EL2_REG_VNCR(VTCR_EL2, reset_val, 0), - { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 }, + { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 }, EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0), EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0), EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0), @@ -2773,20 +2802,16 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SP_EL1), access_sp_el1}, /* AArch32 SPSR_* are RES0 if trapped from a NV guest */ - { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi, - .visibility = hidden_user_visibility }, - { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi, - .visibility = hidden_user_visibility }, - { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi, - .visibility = hidden_user_visibility }, - { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi, - .visibility = hidden_user_visibility }, - - { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 }, + { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi }, + { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi }, + { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi }, + { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi }, + + { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 }, EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), EL2_REG_REDIR(ESR_EL2, reset_val, 0), - { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 }, + { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 }, EL2_REG_REDIR(FAR_EL2, reset_val, 0), EL2_REG(HPFAR_EL2, access_rw, reset_val, 0), @@ -2796,7 +2821,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(VBAR_EL2, access_rw, reset_val, 0), EL2_REG(RVBAR_EL2, access_rw, reset_val, 0), - { SYS_DESC(SYS_RMR_EL2), trap_undef }, + { SYS_DESC(SYS_RMR_EL2), undef_access }, + + EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0), EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0), EL2_REG(TPIDR_EL2, access_rw, reset_val, 0), @@ -2804,11 +2831,48 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), - EL12_REG(CNTKCTL, access_rw, reset_val, 0), + { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, EL2_REG(SP_EL2, NULL, reset_unknown, 0), }; +static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); + + __kvm_at_s1e01(vcpu, op, p->regval); + + return true; +} + +static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); + + /* There is no FGT associated with AT S1E2A :-( */ + if (op == OP_AT_S1E2A && + !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { + kvm_inject_undefined(vcpu); + return false; + } + + __kvm_at_s1e2(vcpu, op, p->regval); + + return true; +} + +static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); + + __kvm_at_s12(vcpu, op, p->regval); + + return true; +} + static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr) { struct kvm *kvm = vpcu->kvm; @@ -2830,10 +2894,8 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); - if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); write_lock(&vcpu->kvm->mmu_lock); @@ -2902,10 +2964,8 @@ static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); u64 limit, vttbr; - if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); @@ -2930,10 +2990,8 @@ static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u64 base, range, tg, num, scale; int shift; - if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); /* * Because the shadow S2 structure doesn't necessarily reflect that @@ -3001,10 +3059,8 @@ static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); - if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), &(union tlbi_info) { @@ -3044,10 +3100,8 @@ static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, WARN_ON(!vcpu_is_el2(vcpu)); - if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) { - kvm_inject_undefined(vcpu); - return false; - } + if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) + return undef_access(vcpu, p, r); kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), &(union tlbi_info) { @@ -3071,6 +3125,14 @@ static struct sys_reg_desc sys_insn_descs[] = { { SYS_DESC(SYS_DC_ISW), access_dcsw }, { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, + + SYS_INSN(AT_S1E1R, handle_at_s1e01), + SYS_INSN(AT_S1E1W, handle_at_s1e01), + SYS_INSN(AT_S1E0R, handle_at_s1e01), + SYS_INSN(AT_S1E0W, handle_at_s1e01), + SYS_INSN(AT_S1E1RP, handle_at_s1e01), + SYS_INSN(AT_S1E1WP, handle_at_s1e01), + { SYS_DESC(SYS_DC_CSW), access_dcsw }, { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, @@ -3150,19 +3212,27 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1), SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1), + SYS_INSN(AT_S1E2R, handle_at_s1e2), + SYS_INSN(AT_S1E2W, handle_at_s1e2), + SYS_INSN(AT_S12E1R, handle_at_s12), + SYS_INSN(AT_S12E1W, handle_at_s12), + SYS_INSN(AT_S12E0R, handle_at_s12), + SYS_INSN(AT_S12E0W, handle_at_s12), + SYS_INSN(AT_S1E2A, handle_at_s1e2), + SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is), SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is), - SYS_INSN(TLBI_ALLE2OS, trap_undef), - SYS_INSN(TLBI_VAE2OS, trap_undef), + SYS_INSN(TLBI_ALLE2OS, undef_access), + SYS_INSN(TLBI_VAE2OS, undef_access), SYS_INSN(TLBI_ALLE1OS, handle_alle1is), - SYS_INSN(TLBI_VALE2OS, trap_undef), + SYS_INSN(TLBI_VALE2OS, undef_access), SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is), - SYS_INSN(TLBI_RVAE2IS, trap_undef), - SYS_INSN(TLBI_RVALE2IS, trap_undef), + SYS_INSN(TLBI_RVAE2IS, undef_access), + SYS_INSN(TLBI_RVALE2IS, undef_access), SYS_INSN(TLBI_ALLE1IS, handle_alle1is), SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), @@ -3174,10 +3244,10 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is), SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is), - SYS_INSN(TLBI_RVAE2OS, trap_undef), - SYS_INSN(TLBI_RVALE2OS, trap_undef), - SYS_INSN(TLBI_RVAE2, trap_undef), - SYS_INSN(TLBI_RVALE2, trap_undef), + SYS_INSN(TLBI_RVAE2OS, undef_access), + SYS_INSN(TLBI_RVALE2OS, undef_access), + SYS_INSN(TLBI_RVAE2, undef_access), + SYS_INSN(TLBI_RVALE2, undef_access), SYS_INSN(TLBI_ALLE1, handle_alle1is), SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), @@ -3186,19 +3256,19 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is), - SYS_INSN(TLBI_ALLE2OSNXS, trap_undef), - SYS_INSN(TLBI_VAE2OSNXS, trap_undef), + SYS_INSN(TLBI_ALLE2OSNXS, undef_access), + SYS_INSN(TLBI_VAE2OSNXS, undef_access), SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is), - SYS_INSN(TLBI_VALE2OSNXS, trap_undef), + SYS_INSN(TLBI_VALE2OSNXS, undef_access), SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is), - SYS_INSN(TLBI_RVAE2ISNXS, trap_undef), - SYS_INSN(TLBI_RVALE2ISNXS, trap_undef), - SYS_INSN(TLBI_ALLE2ISNXS, trap_undef), - SYS_INSN(TLBI_VAE2ISNXS, trap_undef), + SYS_INSN(TLBI_RVAE2ISNXS, undef_access), + SYS_INSN(TLBI_RVALE2ISNXS, undef_access), + SYS_INSN(TLBI_ALLE2ISNXS, undef_access), + SYS_INSN(TLBI_VAE2ISNXS, undef_access), SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is), - SYS_INSN(TLBI_VALE2ISNXS, trap_undef), + SYS_INSN(TLBI_VALE2ISNXS, undef_access), SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is), SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is), SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is), @@ -3208,14 +3278,14 @@ static struct sys_reg_desc sys_insn_descs[] = { SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is), SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is), SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is), - SYS_INSN(TLBI_RVAE2OSNXS, trap_undef), - SYS_INSN(TLBI_RVALE2OSNXS, trap_undef), - SYS_INSN(TLBI_RVAE2NXS, trap_undef), - SYS_INSN(TLBI_RVALE2NXS, trap_undef), - SYS_INSN(TLBI_ALLE2NXS, trap_undef), - SYS_INSN(TLBI_VAE2NXS, trap_undef), + SYS_INSN(TLBI_RVAE2OSNXS, undef_access), + SYS_INSN(TLBI_RVALE2OSNXS, undef_access), + SYS_INSN(TLBI_RVAE2NXS, undef_access), + SYS_INSN(TLBI_RVALE2NXS, undef_access), + SYS_INSN(TLBI_ALLE2NXS, undef_access), + SYS_INSN(TLBI_VAE2NXS, undef_access), SYS_INSN(TLBI_ALLE1NXS, handle_alle1is), - SYS_INSN(TLBI_VALE2NXS, trap_undef), + SYS_INSN(TLBI_VALE2NXS, undef_access), SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is), }; @@ -3393,6 +3463,7 @@ static const struct sys_reg_desc cp15_regs[] = { /* TTBCR2 */ { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 }, { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 }, + { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, /* DFSR */ { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 }, { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 }, @@ -3442,8 +3513,28 @@ static const struct sys_reg_desc cp15_regs[] = { /* AMAIR1 */ { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 }, - /* ICC_SRE */ - { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, + { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, + { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, + { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 }, @@ -4280,7 +4371,7 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, int ret; r = id_to_sys_reg_desc(vcpu, reg->id, table, num); - if (!r || sysreg_hidden_user(vcpu, r)) + if (!r || sysreg_hidden(vcpu, r)) return -ENOENT; if (r->get_user) { @@ -4324,7 +4415,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, return -EFAULT; r = id_to_sys_reg_desc(vcpu, reg->id, table, num); - if (!r || sysreg_hidden_user(vcpu, r)) + if (!r || sysreg_hidden(vcpu, r)) return -ENOENT; if (sysreg_user_write_ignore(vcpu, r)) @@ -4410,7 +4501,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, if (!(rd->reg || rd->get_user)) return 0; - if (sysreg_hidden_user(vcpu, rd)) + if (sysreg_hidden(vcpu, rd)) return 0; if (!copy_reg_to_user(rd, uind)) @@ -4551,6 +4642,7 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) mutex_lock(&kvm->arch.config_lock); vcpu_set_hcr(vcpu); + vcpu_set_ich_hcr(vcpu); if (cpus_have_final_cap(ARM64_HAS_HCX)) { /* @@ -4566,6 +4658,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En; + + if (kvm_has_fpmr(kvm)) + vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM; } if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) @@ -4574,8 +4669,6 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 | - HFGxTR_EL2_nPOR_EL1 | - HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1 | HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK); @@ -4606,10 +4699,21 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS); + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) + kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A; + + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2)) + kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP | + HFGITR_EL2_ATS1E1WP); + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP)) kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1); + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP)) + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 | + HFGxTR_EL2_nPOR_EL0); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | HAFGRTR_EL2_RES1); @@ -4619,6 +4723,36 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) mutex_unlock(&kvm->arch.config_lock); } +/* + * Perform last adjustments to the ID registers that are implied by the + * configuration outside of the ID regs themselves, as well as any + * initialisation that directly depend on these ID registers (such as + * RES0/RES1 behaviours). This is not the place to configure traps though. + * + * Because this can be called once per CPU, changes must be idempotent. + */ +int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + + guard(mutex)(&kvm->arch.config_lock); + + if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && + irqchip_in_kernel(kvm) && + kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) { + kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK; + kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK; + } + + if (vcpu_has_nv(vcpu)) { + int ret = kvm_init_nv_sysregs(kvm); + if (ret) + return ret; + } + + return 0; +} + int __init kvm_sys_reg_table_init(void) { bool valid = true; diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 997eea21ba2ab3..1d94ed6efad2ca 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -95,9 +95,8 @@ struct sys_reg_desc { }; #define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */ -#define REG_HIDDEN_USER (1 << 1) /* hidden from userspace only */ -#define REG_RAZ (1 << 2) /* RAZ from userspace and guest */ -#define REG_USER_WI (1 << 3) /* WI from userspace only */ +#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */ +#define REG_USER_WI (1 << 2) /* WI from userspace only */ static __printf(2, 3) inline void print_sys_reg_msg(const struct sys_reg_params *p, @@ -165,15 +164,6 @@ static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu, return sysreg_visibility(vcpu, r) & REG_HIDDEN; } -static inline bool sysreg_hidden_user(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *r) -{ - if (likely(!r->visibility)) - return false; - - return r->visibility(vcpu, r) & (REG_HIDDEN | REG_HIDDEN_USER); -} - static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -235,6 +225,8 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index); +int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu); + #define AA32(_x) .aarch32_map = AA32_##_x #define Op0(_x) .Op0 = _x #define Op1(_x) .Op1 = _x @@ -248,4 +240,11 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index); CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \ Op2(sys_reg_Op2(reg)) +#define CP15_SYS_DESC(reg) \ + .name = #reg, \ + .aarch32_map = AA32_DIRECT, \ + Op0(0), Op1(sys_reg_Op1(reg)), \ + CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \ + Op2(sys_reg_Op2(reg)) + #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */ diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 3eecdd2f4b8f55..b217b256853c2a 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -292,6 +292,18 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) /* Get the show on the road... */ vgic_v3->vgic_hcr = ICH_HCR_EN; +} + +void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; + + /* Hide GICv3 sysreg if necessary */ + if (!kvm_has_gicv3(vcpu->kvm)) { + vgic_v3->vgic_hcr |= ICH_HCR_TALL0 | ICH_HCR_TALL1 | ICH_HCR_TC; + return; + } + if (group0_trap) vgic_v3->vgic_hcr |= ICH_HCR_TALL0; if (group1_trap) diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index abe29c7d85d05a..f50274fd558156 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -922,10 +922,13 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_load(struct kvm_vcpu *vcpu) { - if (unlikely(!vgic_initialized(vcpu->kvm))) + if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { + if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); return; + } - if (kvm_vgic_global_state.type == VGIC_V2) + if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_load(vcpu); else vgic_v3_load(vcpu); @@ -933,10 +936,13 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu) void kvm_vgic_put(struct kvm_vcpu *vcpu) { - if (unlikely(!vgic_initialized(vcpu->kvm))) + if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { + if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); return; + } - if (kvm_vgic_global_state.type == VGIC_V2) + if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_put(vcpu); else vgic_v3_put(vcpu); diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 8532bfe3fed40c..f2486b4d9f9566 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -346,11 +346,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); +void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu); + static inline bool kvm_has_gicv3(struct kvm *kvm) { - return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && - irqchip_in_kernel(kvm) && - kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3); + return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP); } #endif diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 60454256945b8b..2fc8c6dd04070b 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ - ioremap.o mmap.o pgd.o mmu.o \ + ioremap.o mmap.o pgd.o mem_encrypt.o mmu.o \ context.o proc.o pageattr.o fixmap.o obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index a3edced29ac138..55107d27d3f883 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma, ptep = contpte_align_down(ptep); start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + /* + * We are not advancing entry because __ptep_set_access_flags() + * only consumes access flags from entry. And since we have checked + * for the whole contpte block and returned early, pte_same() + * within __ptep_set_access_flags() is likely false. + */ for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) __ptep_set_access_flags(vma, addr, ptep, entry, 0); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 451ba7cbd5adb0..8b281cf308b30f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -486,6 +487,23 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } +static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, + unsigned int mm_flags) +{ + unsigned long iss2 = ESR_ELx_ISS2(esr); + + if (!system_supports_poe()) + return false; + + if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) + return true; + + return !arch_vma_access_permitted(vma, + mm_flags & FAULT_FLAG_WRITE, + mm_flags & FAULT_FLAG_INSTRUCTION, + false); +} + static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; @@ -511,6 +529,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, unsigned long addr = untagged_addr(far); struct vm_area_struct *vma; int si_code; + int pkey = -1; if (kprobe_page_fault(regs, esr)) return 0; @@ -575,6 +594,16 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, count_vm_vma_lock_event(VMA_LOCK_SUCCESS); goto bad_area; } + + if (fault_from_pkey(esr, vma, mm_flags)) { + pkey = vma_pkey(vma); + vma_end_read(vma); + fault = 0; + si_code = SEGV_PKUERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area; + } + fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) vma_end_read(vma); @@ -610,7 +639,16 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto bad_area; } + if (fault_from_pkey(esr, vma, mm_flags)) { + pkey = vma_pkey(vma); + mmap_read_unlock(mm); + fault = 0; + si_code = SEGV_PKUERR; + goto bad_area; + } + fault = handle_mm_fault(vma, addr, mm_flags, regs); + /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) @@ -669,8 +707,23 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); } else { + /* + * The pkey value that we return to userspace can be different + * from the pkey that caused the fault. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set POR_EL0 to deny access to pkey=4, touches, page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_lock, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. + */ /* Something tried to access memory that out of memory map */ - arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name); + if (si_code == SEGV_PKUERR) + arm64_force_sig_fault_pkey(far, inf->name, pkey); + else + arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name); } return 0; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9b5ab6818f7f39..27a32ff15412aa 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -114,36 +114,33 @@ static void __init arch_reserve_crashkernel(void) low_size, high); } -/* - * Return the maximum physical address for a zone accessible by the given bits - * limit. If DRAM starts above 32-bit, expand the zone to the maximum - * available memory, otherwise cap it at 32-bit. - */ -static phys_addr_t __init max_zone_phys(unsigned int zone_bits) +static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit) { - phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); - phys_addr_t phys_start = memblock_start_of_DRAM(); - - if (phys_start > U32_MAX) - zone_mask = PHYS_ADDR_MAX; - else if (phys_start > zone_mask) - zone_mask = U32_MAX; + /** + * Information we get from firmware (e.g. DT dma-ranges) describe DMA + * bus constraints. Devices using DMA might have their own limitations. + * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM + * DMA zone on platforms that have RAM there. + */ + if (memblock_start_of_DRAM() < U32_MAX) + zone_limit = min(zone_limit, U32_MAX); - return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; + return min(zone_limit, memblock_end_of_DRAM() - 1) + 1; } static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - unsigned int __maybe_unused acpi_zone_dma_bits; - unsigned int __maybe_unused dt_zone_dma_bits; - phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); + phys_addr_t __maybe_unused acpi_zone_dma_limit; + phys_addr_t __maybe_unused dt_zone_dma_limit; + phys_addr_t __maybe_unused dma32_phys_limit = + max_zone_phys(DMA_BIT_MASK(32)); #ifdef CONFIG_ZONE_DMA - acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); - dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); - zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); - arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); + acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address(); + dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL); + zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit); + arm64_dma_phys_limit = max_zone_phys(zone_dma_limit); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif #ifdef CONFIG_ZONE_DMA32 @@ -414,8 +411,16 @@ void __init mem_init(void) void free_initmem(void) { - free_reserved_area(lm_alias(__init_begin), - lm_alias(__init_end), + void *lm_init_begin = lm_alias(__init_begin); + void *lm_init_end = lm_alias(__init_end); + + WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE)); + WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE)); + + /* Delete __init region from memblock.reserved. */ + memblock_free(lm_init_begin, lm_init_end - lm_init_begin); + + free_reserved_area(lm_init_begin, lm_init_end, POISON_FREE_INITMEM, "unused kernel"); /* * Unmap the __init region but leave the VM area in place. This diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 269f2f63ab7dc4..6cc0b7e7eb0384 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -3,10 +3,22 @@ #include #include +static ioremap_prot_hook_t ioremap_prot_hook; + +int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook) +{ + if (WARN_ON(ioremap_prot_hook)) + return -EBUSY; + + ioremap_prot_hook = hook; + return 0; +} + void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot) { unsigned long last_addr = phys_addr + size - 1; + pgprot_t pgprot = __pgprot(prot); /* Don't allow outside PHYS_MASK */ if (last_addr & ~PHYS_MASK) @@ -16,7 +28,16 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) return NULL; - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + /* + * If a hook is registered (e.g. for confidential computing + * purposes), call that now and barf if it fails. + */ + if (unlikely(ioremap_prot_hook) && + WARN_ON(ioremap_prot_hook(phys_addr, size, &pgprot))) { + return NULL; + } + + return generic_ioremap_prot(phys_addr, size, pgprot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/arm64/mm/mem_encrypt.c b/arch/arm64/mm/mem_encrypt.c new file mode 100644 index 00000000000000..ee3c0ab043845f --- /dev/null +++ b/arch/arm64/mm/mem_encrypt.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the memory encryption/decryption API. + * + * Since the low-level details of the operation depend on the + * Confidential Computing environment (e.g. pKVM, CCA, ...), this just + * acts as a top-level dispatcher to whatever hooks may have been + * registered. + * + * Author: Will Deacon + * Copyright (C) 2024 Google LLC + * + * "Hello, boils and ghouls!" + */ + +#include +#include +#include +#include + +#include + +static const struct arm64_mem_crypt_ops *crypt_ops; + +int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops) +{ + if (WARN_ON(crypt_ops)) + return -EBUSY; + + crypt_ops = ops; + return 0; +} + +int set_memory_encrypted(unsigned long addr, int numpages) +{ + if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr))) + return 0; + + return crypt_ops->encrypt(addr, numpages); +} +EXPORT_SYMBOL_GPL(set_memory_encrypted); + +int set_memory_decrypted(unsigned long addr, int numpages) +{ + if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr))) + return 0; + + return crypt_ops->decrypt(addr, numpages); +} +EXPORT_SYMBOL_GPL(set_memory_decrypted); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 642bdf908b22f5..7e3ad97e27d80d 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -102,6 +102,17 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); +#ifdef CONFIG_ARCH_HAS_PKEYS + if (system_supports_poe()) { + if (vm_flags & VM_PKEY_BIT0) + prot |= PTE_PO_IDX_0; + if (vm_flags & VM_PKEY_BIT1) + prot |= PTE_PO_IDX_1; + if (vm_flags & VM_PKEY_BIT2) + prot |= PTE_PO_IDX_2; + } +#endif + return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 353ea5dc32b850..e55b02fbddc8f3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1549,3 +1550,47 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) cpu_uninstall_idmap(); } + +#ifdef CONFIG_ARCH_HAS_PKEYS +int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) +{ + u64 new_por = POE_RXW; + u64 old_por; + u64 pkey_shift; + + if (!system_supports_poe()) + return -ENOSPC; + + /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + if (WARN_ON_ONCE(pkey >= arch_max_pkey())) + return -EINVAL; + + /* Set the bits we need in POR: */ + new_por = POE_RXW; + if (init_val & PKEY_DISABLE_WRITE) + new_por &= ~POE_W; + if (init_val & PKEY_DISABLE_ACCESS) + new_por &= ~POE_RW; + if (init_val & PKEY_DISABLE_READ) + new_por &= ~POE_R; + if (init_val & PKEY_DISABLE_EXECUTE) + new_por &= ~POE_X; + + /* Shift the bits in to the correct place in POR for pkey: */ + pkey_shift = pkey * POR_BITS_PER_PKEY; + new_por <<= pkey_shift; + + /* Get old POR and mask off any old bits in place: */ + old_por = read_sysreg_s(SYS_POR_EL0); + old_por &= ~(POE_MASK << pkey_shift); + + /* Write old part along with new part: */ + write_sysreg_s(old_por | new_por, SYS_POR_EL0); + + return 0; +} +#endif diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index f4bc6c5bac0625..8abdc7fed3210d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -36,8 +36,6 @@ #define TCR_KASLR_FLAGS 0 #endif -#define TCR_SMP_FLAGS TCR_SHARED - /* PTWs cacheable, inner/outer WBWA */ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA @@ -469,7 +467,7 @@ SYM_FUNC_START(__cpu_setup) tcr .req x16 mov_q mair, MAIR_EL1_SET mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ - TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ + TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS tcr_clear_errata_bits tcr, x9, x5 diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 6986827e0d6451..264c5f9b97d866 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -38,33 +38,7 @@ seq_printf(m, fmt); \ }) -/* - * The page dumper groups page table entries of the same type into a single - * description. It uses pg_state to track the range information while - * iterating over the pte entries. When the continuity is broken it then - * dumps out a description of the range. - */ -struct pg_state { - struct ptdump_state ptdump; - struct seq_file *seq; - const struct addr_marker *marker; - const struct mm_struct *mm; - unsigned long start_address; - int level; - u64 current_prot; - bool check_wx; - unsigned long wx_pages; - unsigned long uxn_pages; -}; - -struct prot_bits { - u64 mask; - u64 val; - const char *set; - const char *clear; -}; - -static const struct prot_bits pte_bits[] = { +static const struct ptdump_prot_bits pte_bits[] = { { .mask = PTE_VALID, .val = PTE_VALID, @@ -143,14 +117,7 @@ static const struct prot_bits pte_bits[] = { } }; -struct pg_level { - const struct prot_bits *bits; - char name[4]; - int num; - u64 mask; -}; - -static struct pg_level pg_level[] __ro_after_init = { +static struct ptdump_pg_level kernel_pg_levels[] __ro_after_init = { { /* pgd */ .name = "PGD", .bits = pte_bits, @@ -174,7 +141,7 @@ static struct pg_level pg_level[] __ro_after_init = { }, }; -static void dump_prot(struct pg_state *st, const struct prot_bits *bits, +static void dump_prot(struct ptdump_pg_state *st, const struct ptdump_prot_bits *bits, size_t num) { unsigned i; @@ -192,7 +159,7 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, } } -static void note_prot_uxn(struct pg_state *st, unsigned long addr) +static void note_prot_uxn(struct ptdump_pg_state *st, unsigned long addr) { if (!st->check_wx) return; @@ -206,7 +173,7 @@ static void note_prot_uxn(struct pg_state *st, unsigned long addr) st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; } -static void note_prot_wx(struct pg_state *st, unsigned long addr) +static void note_prot_wx(struct ptdump_pg_state *st, unsigned long addr) { if (!st->check_wx) return; @@ -221,16 +188,17 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) st->wx_pages += (addr - st->start_address) / PAGE_SIZE; } -static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, - u64 val) +void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, + u64 val) { - struct pg_state *st = container_of(pt_st, struct pg_state, ptdump); + struct ptdump_pg_state *st = container_of(pt_st, struct ptdump_pg_state, ptdump); + struct ptdump_pg_level *pg_level = st->pg_level; static const char units[] = "KMGTPE"; u64 prot = 0; /* check if the current level has been folded dynamically */ - if ((level == 1 && mm_p4d_folded(st->mm)) || - (level == 2 && mm_pud_folded(st->mm))) + if (st->mm && ((level == 1 && mm_p4d_folded(st->mm)) || + (level == 2 && mm_pud_folded(st->mm)))) level = 0; if (level >= 0) @@ -286,15 +254,16 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, void ptdump_walk(struct seq_file *s, struct ptdump_info *info) { unsigned long end = ~0UL; - struct pg_state st; + struct ptdump_pg_state st; if (info->base_addr < TASK_SIZE_64) end = TASK_SIZE_64; - st = (struct pg_state){ + st = (struct ptdump_pg_state){ .seq = s, .marker = info->markers, .mm = info->mm, + .pg_level = &kernel_pg_levels[0], .level = -1, .ptdump = { .note_page = note_page, @@ -312,10 +281,10 @@ static void __init ptdump_initialize(void) { unsigned i, j; - for (i = 0; i < ARRAY_SIZE(pg_level); i++) - if (pg_level[i].bits) - for (j = 0; j < pg_level[i].num; j++) - pg_level[i].mask |= pg_level[i].bits[j].mask; + for (i = 0; i < ARRAY_SIZE(kernel_pg_levels); i++) + if (kernel_pg_levels[i].bits) + for (j = 0; j < kernel_pg_levels[i].num; j++) + kernel_pg_levels[i].mask |= kernel_pg_levels[i].bits[j].mask; } static struct ptdump_info kernel_ptdump_info __ro_after_init = { @@ -324,12 +293,13 @@ static struct ptdump_info kernel_ptdump_info __ro_after_init = { bool ptdump_check_wx(void) { - struct pg_state st = { + struct ptdump_pg_state st = { .seq = NULL, .marker = (struct addr_marker[]) { { 0, NULL}, { -1, NULL}, }, + .pg_level = &kernel_pg_levels[0], .level = -1, .check_wx = true, .ptdump = { diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 5139a28130c088..0f7b484cb2ff20 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) * the temporary mappings we use during restore. */ __set_pte(dst_ptep, pte_mkwrite_novma(pte)); - } else if ((debug_pagealloc_enabled() || - is_kfence_address((void *)addr)) && !pte_none(pte)) { + } else if (!pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if * the page isn't in use by the resume kernel. It may have * been in use by the original kernel, in which case we need * to put it back in our copy to do the restore. * + * Other cases include kfence / vmalloc / memfd_secret which + * may call `set_direct_map_invalid_noflush()`. + * * Before marking this entry valid, check the pfn should * be mapped. */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dd0bb069df4bbe..8bbd0b20136a87 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -26,9 +26,8 @@ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) -#define TCALL_CNT (MAX_BPF_JIT_REG + 2) +#define TCCNT_PTR (MAX_BPF_JIT_REG + 2) #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) -#define FP_BOTTOM (MAX_BPF_JIT_REG + 4) #define ARENA_VM_START (MAX_BPF_JIT_REG + 5) #define check_imm(bits, imm) do { \ @@ -63,11 +62,10 @@ static const int bpf2a64[] = { [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), [TMP_REG_3] = A64_R(12), - /* tail_call_cnt */ - [TCALL_CNT] = A64_R(26), + /* tail_call_cnt_ptr */ + [TCCNT_PTR] = A64_R(26), /* temporary register for blinding constants */ [BPF_REG_AX] = A64_R(9), - [FP_BOTTOM] = A64_R(27), /* callee saved register for kern_vm_start address */ [ARENA_VM_START] = A64_R(28), }; @@ -78,11 +76,15 @@ struct jit_ctx { int epilogue_offset; int *offset; int exentry_idx; + int nr_used_callee_reg; + u8 used_callee_reg[8]; /* r6~r9, fp, arena_vm_start */ __le32 *image; __le32 *ro_image; u32 stack_size; - int fpb_offset; u64 user_vm_start; + u64 arena_vm_start; + bool fp_used; + bool write; }; struct bpf_plt { @@ -96,7 +98,7 @@ struct bpf_plt { static inline void emit(const u32 insn, struct jit_ctx *ctx) { - if (ctx->image != NULL) + if (ctx->image != NULL && ctx->write) ctx->image[ctx->idx] = cpu_to_le32(insn); ctx->idx++; @@ -181,14 +183,47 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, } } -static inline void emit_call(u64 target, struct jit_ctx *ctx) +static bool should_emit_indirect_call(long target, const struct jit_ctx *ctx) +{ + long offset; + + /* when ctx->ro_image is not allocated or the target is unknown, + * emit indirect call + */ + if (!ctx->ro_image || !target) + return true; + + offset = target - (long)&ctx->ro_image[ctx->idx]; + return offset < -SZ_128M || offset >= SZ_128M; +} + +static void emit_direct_call(u64 target, struct jit_ctx *ctx) { - u8 tmp = bpf2a64[TMP_REG_1]; + u32 insn; + unsigned long pc; + + pc = (unsigned long)&ctx->ro_image[ctx->idx]; + insn = aarch64_insn_gen_branch_imm(pc, target, AARCH64_INSN_BRANCH_LINK); + emit(insn, ctx); +} + +static void emit_indirect_call(u64 target, struct jit_ctx *ctx) +{ + u8 tmp; + tmp = bpf2a64[TMP_REG_1]; emit_addr_mov_i64(tmp, target, ctx); emit(A64_BLR(tmp), ctx); } +static void emit_call(u64 target, struct jit_ctx *ctx) +{ + if (should_emit_indirect_call((long)target, ctx)) + emit_indirect_call(target, ctx); + else + emit_direct_call(target, ctx); +} + static inline int bpf2a64_offset(int bpf_insn, int off, const struct jit_ctx *ctx) { @@ -273,21 +308,143 @@ static bool is_lsi_offset(int offset, int scale) return true; } -/* generated prologue: +/* generated main prog prologue: * bti c // if CONFIG_ARM64_BTI_KERNEL * mov x9, lr * nop // POKE_OFFSET * paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL * stp x29, lr, [sp, #-16]! * mov x29, sp - * stp x19, x20, [sp, #-16]! - * stp x21, x22, [sp, #-16]! - * stp x25, x26, [sp, #-16]! - * stp x27, x28, [sp, #-16]! - * mov x25, sp - * mov tcc, #0 + * stp xzr, x26, [sp, #-16]! + * mov x26, sp * // PROLOGUE_OFFSET + * // save callee-saved registers */ +static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx) +{ + const bool is_main_prog = !bpf_is_subprog(ctx->prog); + const u8 ptr = bpf2a64[TCCNT_PTR]; + + if (is_main_prog) { + /* Initialize tail_call_cnt. */ + emit(A64_PUSH(A64_ZR, ptr, A64_SP), ctx); + emit(A64_MOV(1, ptr, A64_SP), ctx); + } else + emit(A64_PUSH(ptr, ptr, A64_SP), ctx); +} + +static void find_used_callee_regs(struct jit_ctx *ctx) +{ + int i; + const struct bpf_prog *prog = ctx->prog; + const struct bpf_insn *insn = &prog->insnsi[0]; + int reg_used = 0; + + for (i = 0; i < prog->len; i++, insn++) { + if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) + reg_used |= 1; + + if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) + reg_used |= 2; + + if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) + reg_used |= 4; + + if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) + reg_used |= 8; + + if (insn->dst_reg == BPF_REG_FP || insn->src_reg == BPF_REG_FP) { + ctx->fp_used = true; + reg_used |= 16; + } + } + + i = 0; + if (reg_used & 1) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_6]; + + if (reg_used & 2) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_7]; + + if (reg_used & 4) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_8]; + + if (reg_used & 8) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9]; + + if (reg_used & 16) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP]; + + if (ctx->arena_vm_start) + ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START]; + + ctx->nr_used_callee_reg = i; +} + +/* Save callee-saved registers */ +static void push_callee_regs(struct jit_ctx *ctx) +{ + int reg1, reg2, i; + + /* + * Program acting as exception boundary should save all ARM64 + * Callee-saved registers as the exception callback needs to recover + * all ARM64 Callee-saved registers in its epilogue. + */ + if (ctx->prog->aux->exception_boundary) { + emit(A64_PUSH(A64_R(19), A64_R(20), A64_SP), ctx); + emit(A64_PUSH(A64_R(21), A64_R(22), A64_SP), ctx); + emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); + emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx); + emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx); + } else { + find_used_callee_regs(ctx); + for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) { + reg1 = ctx->used_callee_reg[i]; + reg2 = ctx->used_callee_reg[i + 1]; + emit(A64_PUSH(reg1, reg2, A64_SP), ctx); + } + if (i < ctx->nr_used_callee_reg) { + reg1 = ctx->used_callee_reg[i]; + /* keep SP 16-byte aligned */ + emit(A64_PUSH(reg1, A64_ZR, A64_SP), ctx); + } + } +} + +/* Restore callee-saved registers */ +static void pop_callee_regs(struct jit_ctx *ctx) +{ + struct bpf_prog_aux *aux = ctx->prog->aux; + int reg1, reg2, i; + + /* + * Program acting as exception boundary pushes R23 and R24 in addition + * to BPF callee-saved registers. Exception callback uses the boundary + * program's stack frame, so recover these extra registers in the above + * two cases. + */ + if (aux->exception_boundary || aux->exception_cb) { + emit(A64_POP(A64_R(27), A64_R(28), A64_SP), ctx); + emit(A64_POP(A64_R(25), A64_R(26), A64_SP), ctx); + emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx); + emit(A64_POP(A64_R(21), A64_R(22), A64_SP), ctx); + emit(A64_POP(A64_R(19), A64_R(20), A64_SP), ctx); + } else { + i = ctx->nr_used_callee_reg - 1; + if (ctx->nr_used_callee_reg % 2 != 0) { + reg1 = ctx->used_callee_reg[i]; + emit(A64_POP(reg1, A64_ZR, A64_SP), ctx); + i--; + } + while (i > 0) { + reg1 = ctx->used_callee_reg[i - 1]; + reg2 = ctx->used_callee_reg[i]; + emit(A64_POP(reg1, reg2, A64_SP), ctx); + i -= 2; + } + } +} #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0) #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0) @@ -296,20 +453,13 @@ static bool is_lsi_offset(int offset, int scale) #define POKE_OFFSET (BTI_INSNS + 1) /* Tail call offset to jump into */ -#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8) +#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 4) -static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, - bool is_exception_cb, u64 arena_vm_start) +static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) { const struct bpf_prog *prog = ctx->prog; const bool is_main_prog = !bpf_is_subprog(prog); - const u8 r6 = bpf2a64[BPF_REG_6]; - const u8 r7 = bpf2a64[BPF_REG_7]; - const u8 r8 = bpf2a64[BPF_REG_8]; - const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tcc = bpf2a64[TCALL_CNT]; - const u8 fpb = bpf2a64[FP_BOTTOM]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const int idx0 = ctx->idx; int cur_offset; @@ -348,19 +498,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, emit(A64_MOV(1, A64_R(9), A64_LR), ctx); emit(A64_NOP, ctx); - if (!is_exception_cb) { + if (!prog->aux->exception_cb) { /* Sign lr */ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) emit(A64_PACIASP, ctx); + /* Save FP and LR registers to stay align with ARM64 AAPCS */ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); - /* Save callee-saved registers */ - emit(A64_PUSH(r6, r7, A64_SP), ctx); - emit(A64_PUSH(r8, r9, A64_SP), ctx); - emit(A64_PUSH(fp, tcc, A64_SP), ctx); - emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx); + prepare_bpf_tail_call_cnt(ctx); + + if (!ebpf_from_cbpf && is_main_prog) { + cur_offset = ctx->idx - idx0; + if (cur_offset != PROLOGUE_OFFSET) { + pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", + cur_offset, PROLOGUE_OFFSET); + return -1; + } + /* BTI landing pad for the tail call, done with a BR */ + emit_bti(A64_BTI_J, ctx); + } + push_callee_regs(ctx); } else { /* * Exception callback receives FP of Main Program as third @@ -372,58 +531,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, * callee-saved registers. The exception callback will not push * anything and re-use the main program's stack. * - * 10 registers are on the stack + * 12 registers are on the stack */ - emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx); + emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx); } - /* Set up BPF prog stack base register */ - emit(A64_MOV(1, fp, A64_SP), ctx); - - if (!ebpf_from_cbpf && is_main_prog) { - /* Initialize tail_call_cnt */ - emit(A64_MOVZ(1, tcc, 0, 0), ctx); - - cur_offset = ctx->idx - idx0; - if (cur_offset != PROLOGUE_OFFSET) { - pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", - cur_offset, PROLOGUE_OFFSET); - return -1; - } - - /* BTI landing pad for the tail call, done with a BR */ - emit_bti(A64_BTI_J, ctx); - } - - /* - * Program acting as exception boundary should save all ARM64 - * Callee-saved registers as the exception callback needs to recover - * all ARM64 Callee-saved registers in its epilogue. - */ - if (prog->aux->exception_boundary) { - /* - * As we are pushing two more registers, BPF_FP should be moved - * 16 bytes - */ - emit(A64_SUB_I(1, fp, fp, 16), ctx); - emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); - } - - emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx); + if (ctx->fp_used) + /* Set up BPF prog stack base register */ + emit(A64_MOV(1, fp, A64_SP), ctx); /* Stack must be multiples of 16B */ ctx->stack_size = round_up(prog->aux->stack_depth, 16); /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + if (ctx->stack_size) + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - if (arena_vm_start) - emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx); + if (ctx->arena_vm_start) + emit_a64_mov_i64(arena_vm_base, ctx->arena_vm_start, ctx); return 0; } -static int out_offset = -1; /* initialized on the first pass of build_body() */ static int emit_bpf_tail_call(struct jit_ctx *ctx) { /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ @@ -432,11 +561,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const u8 tmp = bpf2a64[TMP_REG_1]; const u8 prg = bpf2a64[TMP_REG_2]; - const u8 tcc = bpf2a64[TCALL_CNT]; - const int idx0 = ctx->idx; -#define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) + const u8 tcc = bpf2a64[TMP_REG_3]; + const u8 ptr = bpf2a64[TCCNT_PTR]; size_t off; + __le32 *branch1 = NULL; + __le32 *branch2 = NULL; + __le32 *branch3 = NULL; /* if (index >= array->map.max_entries) * goto out; @@ -446,16 +576,20 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR32(tmp, r2, tmp), ctx); emit(A64_MOV(0, r3, r3), ctx); emit(A64_CMP(0, r3, tmp), ctx); - emit(A64_B_(A64_COND_CS, jmp_offset), ctx); + branch1 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); /* - * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * if ((*tail_call_cnt_ptr) >= MAX_TAIL_CALL_CNT) * goto out; - * tail_call_cnt++; */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(A64_LDR64I(tcc, ptr, 0), ctx); emit(A64_CMP(1, tcc, tmp), ctx); - emit(A64_B_(A64_COND_CS, jmp_offset), ctx); + branch2 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); + + /* (*tail_call_cnt_ptr)++; */ emit(A64_ADD_I(1, tcc, tcc, 1), ctx); /* prog = array->ptrs[index]; @@ -467,27 +601,37 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_ADD(1, tmp, r2, tmp), ctx); emit(A64_LSL(1, prg, r3, 3), ctx); emit(A64_LDR64(prg, tmp, prg), ctx); - emit(A64_CBZ(1, prg, jmp_offset), ctx); + branch3 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); + + /* Update tail_call_cnt if the slot is populated. */ + emit(A64_STR64I(tcc, ptr, 0), ctx); + + /* restore SP */ + if (ctx->stack_size) + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + + pop_callee_regs(ctx); /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); - emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); emit(A64_BR(tmp), ctx); - /* out: */ - if (out_offset == -1) - out_offset = cur_offset; - if (cur_offset != out_offset) { - pr_err_once("tail_call out_offset = %d, expected %d!\n", - cur_offset, out_offset); - return -1; + if (ctx->image) { + off = &ctx->image[ctx->idx] - branch1; + *branch1 = cpu_to_le32(A64_B_(A64_COND_CS, off)); + + off = &ctx->image[ctx->idx] - branch2; + *branch2 = cpu_to_le32(A64_B_(A64_COND_CS, off)); + + off = &ctx->image[ctx->idx] - branch3; + *branch3 = cpu_to_le32(A64_CBZ(1, prg, off)); } + return 0; -#undef cur_offset -#undef jmp_offset } #ifdef CONFIG_ARM64_LSE_ATOMICS @@ -713,36 +857,18 @@ static void build_plt(struct jit_ctx *ctx) plt->target = (u64)&dummy_tramp; } -static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb) +static void build_epilogue(struct jit_ctx *ctx) { const u8 r0 = bpf2a64[BPF_REG_0]; - const u8 r6 = bpf2a64[BPF_REG_6]; - const u8 r7 = bpf2a64[BPF_REG_7]; - const u8 r8 = bpf2a64[BPF_REG_8]; - const u8 r9 = bpf2a64[BPF_REG_9]; - const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 fpb = bpf2a64[FP_BOTTOM]; + const u8 ptr = bpf2a64[TCCNT_PTR]; /* We're done with BPF stack */ - emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + if (ctx->stack_size) + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - /* - * Program acting as exception boundary pushes R23 and R24 in addition - * to BPF callee-saved registers. Exception callback uses the boundary - * program's stack frame, so recover these extra registers in the above - * two cases. - */ - if (ctx->prog->aux->exception_boundary || is_exception_cb) - emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx); + pop_callee_regs(ctx); - /* Restore x27 and x28 */ - emit(A64_POP(fpb, A64_R(28), A64_SP), ctx); - /* Restore fs (x25) and x26 */ - emit(A64_POP(fp, A64_R(26), A64_SP), ctx); - - /* Restore callee-saved register */ - emit(A64_POP(r8, r9, A64_SP), ctx); - emit(A64_POP(r6, r7, A64_SP), ctx); + emit(A64_POP(A64_ZR, ptr, A64_SP), ctx); /* Restore FP/LR registers */ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); @@ -862,7 +988,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 fpb = bpf2a64[FP_BOTTOM]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const s16 off = insn->off; const s32 imm = insn->imm; @@ -1314,9 +1439,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx); src = tmp2; } - if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - src_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (src == fp) { + src_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { src_adj = src; off_adj = off; @@ -1407,9 +1532,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); dst = tmp2; } - if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - dst_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (dst == fp) { + dst_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { dst_adj = dst; off_adj = off; @@ -1469,9 +1594,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); dst = tmp2; } - if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - dst_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (dst == fp) { + dst_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { dst_adj = dst; off_adj = off; @@ -1540,79 +1665,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, return 0; } -/* - * Return 0 if FP may change at runtime, otherwise find the minimum negative - * offset to FP, converts it to positive number, and align down to 8 bytes. - */ -static int find_fpb_offset(struct bpf_prog *prog) -{ - int i; - int offset = 0; - - for (i = 0; i < prog->len; i++) { - const struct bpf_insn *insn = &prog->insnsi[i]; - const u8 class = BPF_CLASS(insn->code); - const u8 mode = BPF_MODE(insn->code); - const u8 src = insn->src_reg; - const u8 dst = insn->dst_reg; - const s32 imm = insn->imm; - const s16 off = insn->off; - - switch (class) { - case BPF_STX: - case BPF_ST: - /* fp holds atomic operation result */ - if (class == BPF_STX && mode == BPF_ATOMIC && - ((imm == BPF_XCHG || - imm == (BPF_FETCH | BPF_ADD) || - imm == (BPF_FETCH | BPF_AND) || - imm == (BPF_FETCH | BPF_XOR) || - imm == (BPF_FETCH | BPF_OR)) && - src == BPF_REG_FP)) - return 0; - - if (mode == BPF_MEM && dst == BPF_REG_FP && - off < offset) - offset = insn->off; - break; - - case BPF_JMP32: - case BPF_JMP: - break; - - case BPF_LDX: - case BPF_LD: - /* fp holds load result */ - if (dst == BPF_REG_FP) - return 0; - - if (class == BPF_LDX && mode == BPF_MEM && - src == BPF_REG_FP && off < offset) - offset = off; - break; - - case BPF_ALU: - case BPF_ALU64: - default: - /* fp holds ALU result */ - if (dst == BPF_REG_FP) - return 0; - } - } - - if (offset < 0) { - /* - * safely be converted to a positive 'int', since insn->off - * is 's16' - */ - offset = -offset; - /* align down to 8 bytes */ - offset = ALIGN_DOWN(offset, 8); - } - - return offset; -} - static int build_body(struct jit_ctx *ctx, bool extra_pass) { const struct bpf_prog *prog = ctx->prog; @@ -1631,13 +1683,11 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) const struct bpf_insn *insn = &prog->insnsi[i]; int ret; - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; ret = build_insn(insn, ctx, extra_pass); if (ret > 0) { i++; - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; continue; } if (ret) @@ -1648,8 +1698,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) * the last element with the offset after the last * instruction (end of program) */ - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; return 0; } @@ -1701,9 +1750,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; - u64 arena_vm_start; u8 *image_ptr; u8 *ro_image_ptr; + int body_idx; + int exentry_idx; if (!prog->jit_requested) return orig_prog; @@ -1719,7 +1769,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog = tmp; } - arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); jit_data = prog->aux->jit_data; if (!jit_data) { jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); @@ -1749,17 +1798,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - ctx.fpb_offset = find_fpb_offset(prog); ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); + ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); - /* - * 1. Initial fake pass to compute ctx->idx and ctx->offset. + /* Pass 1: Estimate the maximum image size. * * BPF line info needs ctx->offset[i] to be the offset of * instruction[i] in jited image, so build prologue first. */ - if (build_prologue(&ctx, was_classic, prog->aux->exception_cb, - arena_vm_start)) { + if (build_prologue(&ctx, was_classic)) { prog = orig_prog; goto out_off; } @@ -1770,14 +1817,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } ctx.epilogue_offset = ctx.idx; - build_epilogue(&ctx, prog->aux->exception_cb); + build_epilogue(&ctx); build_plt(&ctx); extable_align = __alignof__(struct exception_table_entry); extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry); - /* Now we know the actual image size. */ + /* Now we know the maximum image size. */ prog_size = sizeof(u32) * ctx.idx; /* also allocate space for plt target */ extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); @@ -1790,7 +1837,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - /* 2. Now, the actual pass. */ + /* Pass 2: Determine jited position and result for each instruction */ /* * Use the image(RW) for writing the JITed instructions. But also save @@ -1806,30 +1853,56 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) skip_init_ctx: ctx.idx = 0; ctx.exentry_idx = 0; + ctx.write = true; + + build_prologue(&ctx, was_classic); - build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start); + /* Record exentry_idx and body_idx before first build_body */ + exentry_idx = ctx.exentry_idx; + body_idx = ctx.idx; + /* Dont write body instructions to memory for now */ + ctx.write = false; if (build_body(&ctx, extra_pass)) { prog = orig_prog; goto out_free_hdr; } - build_epilogue(&ctx, prog->aux->exception_cb); + ctx.epilogue_offset = ctx.idx; + ctx.exentry_idx = exentry_idx; + ctx.idx = body_idx; + ctx.write = true; + + /* Pass 3: Adjust jump offset and write final image */ + if (build_body(&ctx, extra_pass) || + WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) { + prog = orig_prog; + goto out_free_hdr; + } + + build_epilogue(&ctx); build_plt(&ctx); - /* 3. Extra pass to validate JITed code. */ + /* Extra pass to validate JITed code. */ if (validate_ctx(&ctx)) { prog = orig_prog; goto out_free_hdr; } + /* update the real prog size */ + prog_size = sizeof(u32) * ctx.idx; + /* And we're done. */ if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, prog_size, 2, ctx.image); if (!prog->is_func || extra_pass) { - if (extra_pass && ctx.idx != jit_data->ctx.idx) { - pr_err_once("multi-func JIT bug %d != %d\n", + /* The jited image may shrink since the jited result for + * BPF_CALL to subprog may be changed from indirect call + * to direct call. + */ + if (extra_pass && ctx.idx > jit_data->ctx.idx) { + pr_err_once("multi-func JIT bug %d > %d\n", ctx.idx, jit_data->ctx.idx); prog->bpf_func = NULL; prog->jited = 0; @@ -2300,6 +2373,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, .image = image, .ro_image = ro_image, .idx = 0, + .write = true, }; nregs = btf_func_model_nregs(m); diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index ac3429d892b9a7..eedb5acc21ed98 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -45,6 +45,7 @@ HAS_MOPS HAS_NESTED_VIRT HAS_PAN HAS_S1PIE +HAS_S1POE HAS_RAS_EXTN HAS_RNG HAS_SB diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 7ceaa1e0b4bc25..8d637ac4b7c6b9 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2029,6 +2029,31 @@ Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMICNTR_EL0 3 3 9 4 0 +Field 63:0 ICNT +EndSysreg + +Sysreg PMICFILTR_EL0 3 3 9 6 0 +Res0 63:59 +Field 58 SYNC +Field 57:56 VS +Res0 55:32 +Field 31 P +Field 30 U +Field 29 NSK +Field 28 NSU +Field 27 NSH +Field 26 M +Res0 25 +Field 24 SH +Field 23 T +Field 22 RLK +Field 21 RLU +Field 20 RLH +Res0 19:16 +Field 15:0 evtCount +EndSysreg + Sysreg PMSCR_EL1 3 0 9 9 0 Res0 63:8 Field 7:6 PCT @@ -2153,6 +2178,11 @@ Field 4 P Field 3:0 ALIGN EndSysreg +Sysreg PMSELR_EL0 3 3 9 12 5 +Res0 63:5 +Field 4:0 SEL +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c index 7f826331d409b2..1047865e82a903 100644 --- a/arch/csky/abiv1/mmap.c +++ b/arch/csky/abiv1/mmap.c @@ -23,7 +23,8 @@ */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c index 2ca886e4a458b3..5c9ef63c29f1d9 100644 --- a/arch/csky/kernel/vdso.c +++ b/arch/csky/kernel/vdso.c @@ -45,9 +45,16 @@ arch_initcall(vdso_init); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { + struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_len; int ret; + static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + }; + static struct vm_special_mapping vvar_mapping = { + .name = "[vvar]", + }; vdso_len = (vdso_pages + 1) << PAGE_SHIFT; @@ -65,22 +72,29 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, */ mm->context.vdso = (void *)vdso_base; - ret = - install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, + vdso_mapping.pages = vdso_pagelist; + vma = + _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), - vdso_pagelist); + &vdso_mapping); - if (unlikely(ret)) { + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); mm->context.vdso = NULL; goto end; } vdso_base += (vdso_pages << PAGE_SHIFT); - ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, - (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); + vvar_mapping.pages = &vdso_pagelist[vdso_pages]; + vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + (VM_READ | VM_MAYREAD), &vvar_mapping); - if (unlikely(ret)) + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); mm->context.vdso = NULL; + goto end; + } + ret = 0; end: mmap_write_unlock(mm); return ret; diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c index 2e4872d6212464..8119084dc519ba 100644 --- a/arch/hexagon/kernel/vdso.c +++ b/arch/hexagon/kernel/vdso.c @@ -51,7 +51,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long vdso_base; + struct vm_area_struct *vma; struct mm_struct *mm = current->mm; + static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + }; if (mmap_write_lock_killable(mm)) return -EINTR; @@ -66,16 +70,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) } /* MAYWRITE to allow gdb to COW and set breakpoints. */ - ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + vdso_mapping.pages = &vdso_page; + vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - &vdso_page); + &vdso_mapping); - if (ret) + ret = PTR_ERR(vma); + if (IS_ERR(vma)) goto up_fail; mm->context.vdso = (void *)vdso_base; - + ret = 0; up_fail: mmap_write_unlock(mm); return ret; diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 70f169210b523f..bb35c34f86d23a 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -25,6 +25,8 @@ config LOONGARCH select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SET_MEMORY + select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION @@ -82,9 +84,11 @@ config LOONGARCH select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_DEVICES + select GENERIC_CPU_VULNERABILITIES select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY select GENERIC_IOREMAP if !ARCH_IOREMAP + select GENERIC_IRQ_MATRIX_ALLOCATOR select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -146,7 +150,7 @@ config LOONGARCH select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS @@ -190,6 +194,7 @@ config LOONGARCH select TRACE_IRQFLAGS_SUPPORT select USE_PERCPU_NUMA_NODE_ID select USER_STACKTRACE_SUPPORT + select VDSO_GETRANDOM select ZONE_DMA32 config 32BIT @@ -265,7 +270,7 @@ config AS_HAS_FCSR_CLASS def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) config AS_HAS_THIN_ADD_SUB - def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM config AS_HAS_LSX_EXTENSION def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index b4252c357c8e23..75b366407a60a3 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -96,7 +96,6 @@ CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZBUD=y -CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=m # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c index 3eebea3a7b478d..b7d9782827f55e 100644 --- a/arch/loongarch/crypto/crc32-loongarch.c +++ b/arch/loongarch/crypto/crc32-loongarch.c @@ -13,7 +13,7 @@ #include #include -#include +#include #define _CRC32(crc, value, size, type) \ do { \ diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 2bb3676429c052..5b5a6c90e6e207 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -6,8 +6,8 @@ generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += user.h generic-y += ioctl.h +generic-y += mmzone.h generic-y += statfs.h generic-y += param.h diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index 99af8b3160a88f..c86f0ab922ec7d 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -15,6 +15,7 @@ #define __LL "ll.w " #define __SC "sc.w " #define __AMADD "amadd.w " +#define __AMOR "amor.w " #define __AMAND_DB "amand_db.w " #define __AMOR_DB "amor_db.w " #define __AMXOR_DB "amxor_db.w " @@ -22,6 +23,7 @@ #define __LL "ll.d " #define __SC "sc.d " #define __AMADD "amadd.d " +#define __AMOR "amor.d " #define __AMAND_DB "amand_db.d " #define __AMOR_DB "amor_db.d " #define __AMXOR_DB "amxor_db.d " diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index 2eafe6a6aca818..fc83bb32f9f09d 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -51,6 +51,7 @@ #define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS) #define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips) #define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR) +#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR) #define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB) #define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH) #define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT) @@ -65,5 +66,7 @@ #define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) #define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) #define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) +#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW) +#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT) #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index 48b9f7168bcca0..98cf4d7b4b0a08 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -87,18 +87,21 @@ enum cpu_type_enum { #define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ #define CPU_FEATURE_TLB 13 /* CPU has TLB */ #define CPU_FEATURE_CSR 14 /* CPU has CSR */ -#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ -#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ -#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ -#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ -#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ -#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ -#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ -#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ -#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ -#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ -#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ -#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ +#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */ +#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */ +#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */ +#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */ +#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */ +#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */ +#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */ +#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */ +#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */ +#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */ +#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */ +#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */ +#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */ +#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */ +#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) @@ -114,6 +117,7 @@ enum cpu_type_enum { #define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM) #define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS) #define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB) +#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR) #define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR) #define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH) #define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT) @@ -127,5 +131,7 @@ enum cpu_type_enum { #define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) #define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) #define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) +#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW) +#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT) #endif /* _ASM_CPU_H */ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 1d7feb71951572..10da8d6961cb04 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -12,12 +12,13 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq -#define NR_IPI 3 +#define NR_IPI 4 enum ipi_msg_type { IPI_RESCHEDULE, IPI_CALL_FUNCTION, IPI_IRQ_WORK, + IPI_CLEAR_VECTOR, }; typedef struct { diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 480418bc5071a5..9c2ca785faa9bd 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -39,11 +39,22 @@ void spurious_interrupt(void); #define NR_IRQS_LEGACY 16 +/* + * 256 Vectors Mapping for AVECINTC: + * + * 0 - 15: Mapping classic IPs, e.g. IP0-12. + * 16 - 255: Mapping vectors for external IRQ. + * + */ +#define NR_VECTORS 256 +#define NR_LEGACY_VECTORS 16 +#define IRQ_MATRIX_BITS NR_VECTORS + #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); #define MAX_IO_PICS 2 -#define NR_IRQS (64 + (256 * MAX_IO_PICS)) +#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { int node; @@ -65,7 +76,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS]; #define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) #define LOONGSON_CPU_IRQ_BASE 16 -#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) +#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15) #define LOONGSON_PCH_IRQ_BASE 64 #define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) @@ -88,20 +99,8 @@ struct acpi_madt_bio_pic; struct acpi_madt_msi_pic; struct acpi_madt_lpc_pic; -int liointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lio_pic *acpi_liointc); -int eiointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_eio_pic *acpi_eiointc); - -int htvec_acpi_init(struct irq_domain *parent, - struct acpi_madt_ht_pic *acpi_htvec); -int pch_lpc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lpc_pic *acpi_pchlpc); -int pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi); -int pch_pic_acpi_init(struct irq_domain *parent, - struct acpi_madt_bio_pic *acpi_pchpic); -int find_pch_pic(u32 gsi); +void complete_irq_moving(void); + struct fwnode_handle *get_pch_msi_handle(int pci_segment); extern struct acpi_madt_lio_pic *acpi_liointc; diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h index 724ca8b7b4011b..4a76ce796f1f40 100644 --- a/arch/loongarch/include/asm/kvm_csr.h +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -30,6 +30,7 @@ : [val] "+r" (__v) \ : [reg] "i" (csr) \ : "memory"); \ + __v; \ }) #define gcsr_xchg(v, m, csr) \ @@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx) #define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) #define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) +#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid)) + int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid) @@ -208,4 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, csr->csrs[gid] |= val & _mask; } +#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3) + #endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 5f0677e03817ba..d6bb72424027aa 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) +#define KVM_REQ_PMU KVM_ARCH_REQ(2) #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) @@ -60,9 +61,13 @@ struct kvm_arch_memory_slot { unsigned long flags; }; +#define HOST_MAX_PMNUM 16 struct kvm_context { unsigned long vpid_cache; struct kvm_vcpu *last_vcpu; + /* Host PMU CSR */ + u64 perf_ctrl[HOST_MAX_PMNUM]; + u64 perf_cntr[HOST_MAX_PMNUM]; }; struct kvm_world_switch { @@ -107,6 +112,8 @@ struct kvm_arch { unsigned int root_level; spinlock_t phyid_map_lock; struct kvm_phyid_map *phyid_map; + /* Enabled PV features */ + unsigned long pv_features; s64 time_offset; struct kvm_context __percpu *vmcs; @@ -133,8 +140,15 @@ enum emulation_result { #define KVM_LARCH_FPU (0x1 << 0) #define KVM_LARCH_LSX (0x1 << 1) #define KVM_LARCH_LASX (0x1 << 2) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 3) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 4) +#define KVM_LARCH_LBT (0x1 << 3) +#define KVM_LARCH_PMU (0x1 << 4) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 5) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 6) + +#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) +#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ + BIT(KVM_FEATURE_STEAL_TIME) | \ + BIT(KVM_FEATURE_VIRT_EXTIOI)) struct kvm_vcpu_arch { /* @@ -168,10 +182,14 @@ struct kvm_vcpu_arch { /* FPU state */ struct loongarch_fpu fpu FPU_ALIGN; + struct loongarch_lbt lbt; /* CSR state */ struct loongarch_csrs *csr; + /* Guest max PMU CSR id */ + int max_pmu_csrid; + /* GPR used as IO source/target */ u32 io_gpr; @@ -239,6 +257,21 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LASX; } +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); +} + +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[6] & CPUCFG6_PMP; +} + +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +{ + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 43ec61589e6cde..c4e84227280dc1 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -2,6 +2,8 @@ #ifndef _ASM_LOONGARCH_KVM_PARA_H #define _ASM_LOONGARCH_KVM_PARA_H +#include + /* * Hypercall code field */ @@ -154,10 +156,20 @@ static __always_inline long kvm_hypercall5(u64 fid, return ret; } +#ifdef CONFIG_PARAVIRT +bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); +#else +static inline bool kvm_para_available(void) +{ + return false; +} + static inline unsigned int kvm_arch_para_features(void) { return 0; } +#endif static inline unsigned int kvm_arch_para_hints(void) { diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 86570084e05aa8..d7e8f7d50ee0ce 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -75,6 +75,12 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu); +#else +static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu); @@ -124,4 +130,9 @@ static inline bool kvm_pvtime_supported(void) return !!sched_info_on(); } +static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature) +{ + return vcpu->kvm->arch.pv_features & BIT(feature); +} + #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 04a78010fc725e..26542413a5b0ea 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -62,6 +62,7 @@ #define LOONGARCH_CPUCFG1 0x1 #define CPUCFG1_ISGR32 BIT(0) #define CPUCFG1_ISGR64 BIT(1) +#define CPUCFG1_ISA GENMASK(1, 0) #define CPUCFG1_PAGING BIT(2) #define CPUCFG1_IOCSR BIT(3) #define CPUCFG1_PABITS GENMASK(11, 4) @@ -119,6 +120,7 @@ #define CPUCFG6_PMP BIT(0) #define CPUCFG6_PAMVER GENMASK(3, 1) #define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMNUM_SHIFT 4 #define CPUCFG6_PMBITS GENMASK(13, 8) #define CPUCFG6_UPM BIT(14) @@ -160,16 +162,8 @@ /* * CPUCFG index area: 0x40000000 -- 0x400000ff - * SW emulation for KVM hypervirsor + * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h */ -#define CPUCFG_KVM_BASE 0x40000000 -#define CPUCFG_KVM_SIZE 0x100 - -#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0) -#define KVM_SIGNATURE "KVM\0" -#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) -#define KVM_FEATURE_IPI BIT(1) -#define KVM_FEATURE_STEAL_TIME BIT(2) #ifndef __ASSEMBLY__ @@ -253,8 +247,8 @@ #define CSR_ESTAT_EXC_WIDTH 6 #define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) #define CSR_ESTAT_IS_SHIFT 0 -#define CSR_ESTAT_IS_WIDTH 14 -#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) +#define CSR_ESTAT_IS_WIDTH 15 +#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) #define LOONGARCH_CSR_ERA 0x6 /* ERA */ @@ -649,6 +643,13 @@ #define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ +#define LOONGARCH_CSR_ISR0 0xa0 +#define LOONGARCH_CSR_ISR1 0xa1 +#define LOONGARCH_CSR_ISR2 0xa2 +#define LOONGARCH_CSR_ISR3 0xa3 + +#define LOONGARCH_CSR_IRR 0xa4 + #define LOONGARCH_CSR_PRID 0xc0 /* Shadow MCSR : 0xc0 ~ 0xff */ @@ -1011,7 +1012,7 @@ /* * CSR_ECFG IM */ -#define ECFG0_IM 0x00001fff +#define ECFG0_IM 0x00005fff #define ECFGB_SIP0 0 #define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) #define ECFGB_SIP1 1 @@ -1054,6 +1055,7 @@ #define IOCSRF_EIODECODE BIT_ULL(9) #define IOCSRF_FLATMODE BIT_ULL(10) #define IOCSRF_VM BIT_ULL(11) +#define IOCSRF_AVEC BIT_ULL(15) #define LOONGARCH_IOCSR_VENDOR 0x10 @@ -1065,6 +1067,7 @@ #define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10) #define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) #define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) +#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51) #define LOONGARCH_IOCSR_CPUTEMP 0x428 @@ -1387,9 +1390,10 @@ __BUILD_CSR_OP(tlbidx) #define INT_TI 11 /* Timer */ #define INT_IPI 12 #define INT_NMI 13 +#define INT_AVEC 14 /* ExcCodes corresponding to interrupts */ -#define EXCCODE_INT_NUM (INT_NMI + 1) +#define EXCCODE_INT_NUM (INT_AVEC + 1) #define EXCCODE_INT_START 64 #define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1) diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h index 9f97c3453b9ceb..304363bd393524 100644 --- a/arch/loongarch/include/asm/mmu_context.h +++ b/arch/loongarch/include/asm/mmu_context.h @@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) /* Normal, classic get_new_mmu_context */ static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) { u64 asid = asid_cache(cpu); if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) - local_flush_tlb_user(); /* start new asid cycle */ + *need_flush = true; /* start new asid cycle */ cpu_context(cpu, mm) = asid_cache(cpu) = asid; } @@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) return 0; } +static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) +{ + __asm__ __volatile__( + "csrwr %[pgdl_val], %[pgdl_reg] \n\t" + "csrwr %[asid_val], %[asid_reg] \n\t" + : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) + : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL) + : "memory" + ); +} + static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + bool need_flush = false; unsigned int cpu = smp_processor_id(); /* Check if our ASID is of an older version and thus invalid */ if (!asid_valid(next, cpu)) - get_new_mmu_context(next, cpu); - - write_csr_asid(cpu_asid(cpu, next)); + get_new_mmu_context(next, cpu, &need_flush); if (next != &init_mm) - csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd); else - csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir); + + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ /* * Mark current->active_mm as not "active" anymore. @@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu) asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); if (asid == cpu_asid(cpu, mm)) { + bool need_flush = false; + if (!current->mm || (current->mm == mm)) { - get_new_mmu_context(mm, cpu); + get_new_mmu_context(mm, cpu, &need_flush); + write_csr_asid(cpu_asid(cpu, mm)); + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ + goto out; } } diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h deleted file mode 100644 index 2b9a90727e1912..00000000000000 --- a/arch/loongarch/include/asm/mmzone.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Author: Huacai Chen (chenhuacai@loongson.cn) - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited - */ -#ifndef _ASM_MMZONE_H_ -#define _ASM_MMZONE_H_ - -#include -#include - -extern struct pglist_data *node_data[]; - -#define NODE_DATA(nid) (node_data[(nid)]) - -#endif /* _ASM_MMZONE_H_ */ diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h index dddec49671ae42..3f4323603e6aa9 100644 --- a/arch/loongarch/include/asm/paravirt.h +++ b/arch/loongarch/include/asm/paravirt.h @@ -19,6 +19,7 @@ static inline u64 paravirt_steal_clock(int cpu) int __init pv_ipi_init(void); int __init pv_time_init(void); +int __init pv_spinlock_init(void); #else @@ -31,5 +32,11 @@ static inline int pv_time_init(void) { return 0; } + +static inline int pv_spinlock_init(void) +{ + return 0; +} + #endif // CONFIG_PARAVIRT #endif diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index 8f290e5546cf72..87be9b14e9dabf 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -68,75 +68,6 @@ PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP -static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size) -{ - unsigned long ret; - - switch (size) { - case 1: - __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n" - : [ret] "=&r"(ret) - : [ptr] "r"(ptr) - : "memory"); - break; - case 2: - __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n" - : [ret] "=&r"(ret) - : [ptr] "r"(ptr) - : "memory"); - break; - case 4: - __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n" - : [ret] "=&r"(ret) - : [ptr] "r"(ptr) - : "memory"); - break; - case 8: - __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n" - : [ret] "=&r"(ret) - : [ptr] "r"(ptr) - : "memory"); - break; - default: - ret = 0; - BUILD_BUG(); - } - - return ret; -} - -static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n" - : - : [val] "r" (val), [ptr] "r" (ptr) - : "memory"); - break; - case 2: - __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n" - : - : [val] "r" (val), [ptr] "r" (ptr) - : "memory"); - break; - case 4: - __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n" - : - : [val] "r" (val), [ptr] "r" (ptr) - : "memory"); - break; - case 8: - __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n" - : - : [val] "r" (val), [ptr] "r" (ptr) - : "memory"); - break; - default: - BUILD_BUG(); - } -} - static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { switch (size) { @@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, return 0; } +#define __pcpu_op_1(op) op ".b " +#define __pcpu_op_2(op) op ".h " +#define __pcpu_op_4(op) op ".w " +#define __pcpu_op_8(op) op ".d " + +#define _percpu_read(size, _pcp) \ +({ \ + typeof(_pcp) __pcp_ret; \ + \ + __asm__ __volatile__( \ + __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \ + : [ret] "=&r"(__pcp_ret) \ + : [ptr] "r"(&(_pcp)) \ + : "memory"); \ + \ + __pcp_ret; \ +}) + +#define _percpu_write(size, _pcp, _val) \ +do { \ + __asm__ __volatile__( \ + __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \ + : \ + : [val] "r"(_val), [ptr] "r"(&(_pcp)) \ + : "memory"); \ +} while (0) + /* this_cpu_cmpxchg */ #define _protect_cmpxchg_local(pcp, o, n) \ ({ \ @@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, __ret; \ }) -#define _percpu_read(pcp) \ -({ \ - typeof(pcp) __retval; \ - __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \ - __retval; \ -}) - -#define _percpu_write(pcp, val) \ -do { \ - __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \ -} while (0) \ - #define _pcp_protect(operation, pcp, val) \ ({ \ typeof(pcp) __retval; \ @@ -215,15 +161,15 @@ do { \ #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) -#define this_cpu_read_1(pcp) _percpu_read(pcp) -#define this_cpu_read_2(pcp) _percpu_read(pcp) -#define this_cpu_read_4(pcp) _percpu_read(pcp) -#define this_cpu_read_8(pcp) _percpu_read(pcp) +#define this_cpu_read_1(pcp) _percpu_read(1, pcp) +#define this_cpu_read_2(pcp) _percpu_read(2, pcp) +#define this_cpu_read_4(pcp) _percpu_read(4, pcp) +#define this_cpu_read_8(pcp) _percpu_read(8, pcp) -#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) -#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val) +#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val) +#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val) +#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val) #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 85431f20a14db7..9965f52ef65b6f 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -331,29 +331,23 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ + if (pte_none(ptep_get(buddy))) { #ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need to do - * this atomically. - */ - unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; - - __asm__ __volatile__ ( - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" - " nop \n" - "2: \n" - __WEAK_LLSC_MB - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); + /* + * For SMP, multiple CPUs can race, so we need + * to do this atomically. + */ + __asm__ __volatile__( + __AMOR "$zero, %[global], %[buddy] \n" + : [buddy] "+ZB" (buddy->pte) + : [global] "r" (_PAGE_GLOBAL) + : "memory"); + + DBAR(0b11000); /* o_wrw = 0b11000 */ #else /* !CONFIG_SMP */ - if (pte_none(ptep_get(buddy))) WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); #endif /* CONFIG_SMP */ + } } } diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h new file mode 100644 index 00000000000000..e76d3aa1e1ebe7 --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_QSPINLOCK_H +#define _ASM_LOONGARCH_QSPINLOCK_H + +#include + +#ifdef CONFIG_PARAVIRT + +DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); + +#define virt_spin_lock virt_spin_lock + +static inline bool virt_spin_lock(struct qspinlock *lock) +{ + int val; + + if (!static_branch_unlikely(&virt_spin_lock_key)) + return false; + + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + +__retry: + val = atomic_read(&lock->val); + + if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { + cpu_relax(); + goto __retry; + } + + return true; +} + +#endif /* CONFIG_PARAVIRT */ + +#include + +#endif // _ASM_LOONGARCH_QSPINLOCK_H diff --git a/arch/loongarch/include/asm/set_memory.h b/arch/loongarch/include/asm/set_memory.h new file mode 100644 index 00000000000000..d70505b6676cb3 --- /dev/null +++ b/arch/loongarch/include/asm/set_memory.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_SET_MEMORY_H +#define _ASM_LOONGARCH_SET_MEMORY_H + +/* + * Functions to change memory attributes. + */ +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); + +bool kernel_page_present(struct page *page); +int set_direct_map_default_noflush(struct page *page); +int set_direct_map_invalid_noflush(struct page *page); + +#endif /* _ASM_LOONGARCH_SET_MEMORY_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 50db503f44e3c7..3383c9d24e9424 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -70,10 +70,12 @@ extern int __cpu_logical_map[NR_CPUS]; #define ACTION_RESCHEDULE 1 #define ACTION_CALL_FUNCTION 2 #define ACTION_IRQ_WORK 3 +#define ACTION_CLEAR_VECTOR 4 #define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) #define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) #define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) #define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK) +#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR) struct secondary_data { unsigned long stack; diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h index 66128dec0bf6e0..50273c9187d020 100644 --- a/arch/loongarch/include/asm/topology.h +++ b/arch/loongarch/include/asm/topology.h @@ -8,6 +8,7 @@ #include #ifdef CONFIG_NUMA +#include extern cpumask_t cpus_on_node[]; diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h new file mode 100644 index 00000000000000..02f36772541b7d --- /dev/null +++ b/arch/loongarch/include/asm/vdso/getrandom.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Xi Ruoyao . All Rights Reserved. + */ +#ifndef __ASM_VDSO_GETRANDOM_H +#define __ASM_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +#include +#include + +static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags) +{ + register long ret asm("a0"); + register long nr asm("a7") = __NR_getrandom; + register void *buffer asm("a0") = _buffer; + register size_t len asm("a1") = _len; + register unsigned int flags asm("a2") = _flags; + + asm volatile( + " syscall 0\n" + : "+r" (ret) + : "r" (nr), "r" (buffer), "r" (len), "r" (flags) + : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8", + "memory"); + + return ret; +} + +static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void) +{ + return (const struct vdso_rng_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START * + PAGE_SIZE + offsetof(struct loongarch_vdso_data, rng_data)); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h index 5a12309d9fb555..e31ac7474513c7 100644 --- a/arch/loongarch/include/asm/vdso/vdso.h +++ b/arch/loongarch/include/asm/vdso/vdso.h @@ -4,6 +4,9 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#ifndef _ASM_VDSO_VDSO_H +#define _ASM_VDSO_VDSO_H + #ifndef __ASSEMBLY__ #include @@ -16,6 +19,7 @@ struct vdso_pcpu_data { struct loongarch_vdso_data { struct vdso_pcpu_data pdata[NR_CPUS]; + struct vdso_rng_data rng_data; }; /* @@ -63,3 +67,5 @@ static inline unsigned long get_vdso_data(void) } #endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h index 5de615383a22f4..b1273ce6f14069 100644 --- a/arch/loongarch/include/asm/vdso/vsyscall.h +++ b/arch/loongarch/include/asm/vdso/vsyscall.h @@ -8,6 +8,7 @@ #include extern struct vdso_data *vdso_data; +extern struct vdso_rng_data *vdso_rng_data; /* * Update the vDSO data page to keep in sync with kernel timekeeping. @@ -19,6 +20,13 @@ struct vdso_data *__loongarch_get_k_vdso_data(void) } #define __arch_get_k_vdso_data __loongarch_get_k_vdso_data +static __always_inline +struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void) +{ + return vdso_rng_data; +} +#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data + /* The asm-generic header needs to be included after the definitions above */ #include diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild index c6d141d7b7d734..517761419999a8 100644 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ b/arch/loongarch/include/uapi/asm/Kbuild @@ -1,4 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 syscall-y += unistd_64.h - -generic-y += kvm_para.h diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h index 6955a7cb2c65d1..2b34e56cfa9ed0 100644 --- a/arch/loongarch/include/uapi/asm/hwcap.h +++ b/arch/loongarch/include/uapi/asm/hwcap.h @@ -17,5 +17,6 @@ #define HWCAP_LOONGARCH_LBT_ARM (1 << 11) #define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) #define HWCAP_LOONGARCH_PTW (1 << 13) +#define HWCAP_LOONGARCH_LSPW (1 << 14) #endif /* _UAPI_ASM_HWCAP_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index ddc5cab0ffd005..70d89070bfeb46 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -64,6 +64,7 @@ struct kvm_fpu { #define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) #define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) #define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL) #define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) #define KVM_CSR_IDX_MASK 0x7fff #define KVM_CPUCFG_IDX_MASK 0x7fff @@ -77,11 +78,30 @@ struct kvm_fpu { /* Debugging: Special instruction for software breakpoint */ #define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) +/* LBT registers */ +#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6) + #define LOONGARCH_REG_SHIFT 3 #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +/* Device Control API on vm fd */ +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_LSX 0 +#define KVM_LOONGARCH_VM_FEAT_LASX 1 +#define KVM_LOONGARCH_VM_FEAT_X86BT 2 +#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 +#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 +#define KVM_LOONGARCH_VM_FEAT_PMU 5 +#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 +#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 + /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 #define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h new file mode 100644 index 00000000000000..b0604aa9b4bbd2 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_KVM_PARA_H +#define _UAPI_ASM_KVM_PARA_H + +#include + +/* + * CPUCFG index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000 +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0) +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_IPI 1 +#define KVM_FEATURE_STEAL_TIME 2 +/* BIT 24 - 31 are features configurable by user space vmm */ +#define KVM_FEATURE_VIRT_EXTIOI 24 + +#endif /* _UAPI_ASM_KVM_PARA_H */ diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h index 6c22f616b8f15d..5cd121275bacbc 100644 --- a/arch/loongarch/include/uapi/asm/sigcontext.h +++ b/arch/loongarch/include/uapi/asm/sigcontext.h @@ -9,7 +9,6 @@ #define _UAPI_ASM_SIGCONTEXT_H #include -#include /* FP context was used */ #define SC_USED_FP (1 << 0) diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 929a497c987e84..f1a74b80f22c54 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -212,6 +213,9 @@ void __init acpi_boot_table_init(void) /* Do not enable ACPI SPCR console by default */ acpi_parse_spcr(earlycon_acpi_spcr_enable, false); + if (IS_ENABLED(CONFIG_ACPI_BGRT)) + acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); + return; fdt_earlycon: diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 55320813ee0819..cbce099037b272 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -91,12 +91,30 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) unsigned int config; unsigned long asid_mask; - c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | - LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; + c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT; elf_hwcap = HWCAP_LOONGARCH_CPUCFG; config = read_cpucfg(LOONGARCH_CPUCFG1); + + switch (config & CPUCFG1_ISA) { + case 0: + set_isa(c, LOONGARCH_CPU_ISA_LA32R); + break; + case 1: + set_isa(c, LOONGARCH_CPU_ISA_LA32S); + break; + case 2: + set_isa(c, LOONGARCH_CPU_ISA_LA64); + break; + default: + pr_warn("Warning: unknown ISA level\n"); + } + + if (config & CPUCFG1_PAGING) + c->options |= LOONGARCH_CPU_TLB; + if (config & CPUCFG1_IOCSR) + c->options |= LOONGARCH_CPU_IOCSR; if (config & CPUCFG1_UAL) { c->options |= LOONGARCH_CPU_UAL; elf_hwcap |= HWCAP_LOONGARCH_UAL; @@ -106,7 +124,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) elf_hwcap |= HWCAP_LOONGARCH_CRC32; } - config = read_cpucfg(LOONGARCH_CPUCFG2); if (config & CPUCFG2_LAM) { c->options |= LOONGARCH_CPU_LAM; @@ -140,6 +157,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_PTW; elf_hwcap |= HWCAP_LOONGARCH_PTW; } + if (config & CPUCFG2_LSPW) { + c->options |= LOONGARCH_CPU_LSPW; + elf_hwcap |= HWCAP_LOONGARCH_LSPW; + } if (config & CPUCFG2_LVZP) { c->options |= LOONGARCH_CPU_LVZ; elf_hwcap |= HWCAP_LOONGARCH_LVZ; @@ -163,20 +184,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) if (config & CPUCFG6_PMP) c->options |= LOONGARCH_CPU_PMP; - config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); - if (config & IOCSRF_CSRIPI) - c->options |= LOONGARCH_CPU_CSRIPI; - if (config & IOCSRF_EXTIOI) - c->options |= LOONGARCH_CPU_EXTIOI; - if (config & IOCSRF_FREQSCALE) - c->options |= LOONGARCH_CPU_SCALEFREQ; - if (config & IOCSRF_FLATMODE) - c->options |= LOONGARCH_CPU_FLATMODE; - if (config & IOCSRF_EIODECODE) - c->options |= LOONGARCH_CPU_EIODECODE; - if (config & IOCSRF_VM) - c->options |= LOONGARCH_CPU_HYPERVISOR; - config = csr_read32(LOONGARCH_CSR_ASID); config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT; asid_mask = GENMASK(config - 1, 0); @@ -209,6 +216,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) default: pr_warn("Warning: unknown TLB type\n"); } + + if (get_num_brps() + get_num_wrps()) + c->options |= LOONGARCH_CPU_WATCH; } #define MAX_NAME_LEN 32 @@ -219,52 +229,67 @@ static char cpu_full_name[MAX_NAME_LEN] = " - "; static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu) { + uint32_t config; uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); + const char *core_name = "Unknown"; - if (!__cpu_full_name[cpu]) - __cpu_full_name[cpu] = cpu_full_name; - - *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); - *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); - - switch (c->processor_id & PRID_SERIES_MASK) { - case PRID_SERIES_LA132: + switch (BIT(fls(c->isa_level) - 1)) { + case LOONGARCH_CPU_ISA_LA32R: + case LOONGARCH_CPU_ISA_LA32S: c->cputype = CPU_LOONGSON32; - set_isa(c, LOONGARCH_CPU_ISA_LA32S); __cpu_family[cpu] = "Loongson-32bit"; - pr_info("32-bit Loongson Processor probed (LA132 Core)\n"); break; - case PRID_SERIES_LA264: + case LOONGARCH_CPU_ISA_LA64: c->cputype = CPU_LOONGSON64; - set_isa(c, LOONGARCH_CPU_ISA_LA64); __cpu_family[cpu] = "Loongson-64bit"; - pr_info("64-bit Loongson Processor probed (LA264 Core)\n"); + break; + } + + switch (c->processor_id & PRID_SERIES_MASK) { + case PRID_SERIES_LA132: + core_name = "LA132"; + break; + case PRID_SERIES_LA264: + core_name = "LA264"; break; case PRID_SERIES_LA364: - c->cputype = CPU_LOONGSON64; - set_isa(c, LOONGARCH_CPU_ISA_LA64); - __cpu_family[cpu] = "Loongson-64bit"; - pr_info("64-bit Loongson Processor probed (LA364 Core)\n"); + core_name = "LA364"; break; case PRID_SERIES_LA464: - c->cputype = CPU_LOONGSON64; - set_isa(c, LOONGARCH_CPU_ISA_LA64); - __cpu_family[cpu] = "Loongson-64bit"; - pr_info("64-bit Loongson Processor probed (LA464 Core)\n"); + core_name = "LA464"; break; case PRID_SERIES_LA664: - c->cputype = CPU_LOONGSON64; - set_isa(c, LOONGARCH_CPU_ISA_LA64); - __cpu_family[cpu] = "Loongson-64bit"; - pr_info("64-bit Loongson Processor probed (LA664 Core)\n"); + core_name = "LA664"; break; - default: /* Default to 64 bit */ - c->cputype = CPU_LOONGSON64; - set_isa(c, LOONGARCH_CPU_ISA_LA64); - __cpu_family[cpu] = "Loongson-64bit"; - pr_info("64-bit Loongson Processor probed (Unknown Core)\n"); } + + pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name); + + if (!cpu_has_iocsr) + return; + + if (!__cpu_full_name[cpu]) + __cpu_full_name[cpu] = cpu_full_name; + + *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); + *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); + + config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); + if (config & IOCSRF_CSRIPI) + c->options |= LOONGARCH_CPU_CSRIPI; + if (config & IOCSRF_EXTIOI) + c->options |= LOONGARCH_CPU_EXTIOI; + if (config & IOCSRF_FREQSCALE) + c->options |= LOONGARCH_CPU_SCALEFREQ; + if (config & IOCSRF_FLATMODE) + c->options |= LOONGARCH_CPU_FLATMODE; + if (config & IOCSRF_EIODECODE) + c->options |= LOONGARCH_CPU_EIODECODE; + if (config & IOCSRF_AVEC) + c->options |= LOONGARCH_CPU_AVECINT; + if (config & IOCSRF_VM) + c->options |= LOONGARCH_CPU_HYPERVISOR; } #ifdef CONFIG_64BIT diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index adac8fcbb2aca4..d129039b368b09 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -87,6 +87,18 @@ static void __init init_vec_parent_group(void) acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } +int __init arch_probe_nr_irqs(void) +{ + int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS); + + if (!cpu_has_avecint) + nr_irqs = (64 + NR_VECTORS * nr_io_pics); + else + nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics)); + + return NR_IRQS_LEGACY; +} + void __init init_IRQ(void) { int i; diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 8fe21f868f72d4..84fe7f8548209e 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -27,10 +27,7 @@ #include int numa_off; -struct pglist_data *node_data[MAX_NUMNODES]; unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES]; - -EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_distances); static struct numa_meminfo numa_meminfo; @@ -190,24 +187,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) return numa_add_memblk_to(nid, start, end, &numa_meminfo); } -static void __init alloc_node_data(int nid) -{ - void *nd; - unsigned long nd_pa; - size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE); - - nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid); - if (!nd_pa) { - pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid); - return; - } - - nd = __va(nd_pa); - - node_data[nid] = nd; - memset(nd, 0, sizeof(pg_data_t)); -} - static void __init node_mem_init(unsigned int node) { unsigned long start_pfn, end_pfn; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 9c9b75b76f62f2..a5fc61f8b3482c 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -13,6 +13,7 @@ static int has_steal_clock; struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); static u64 native_steal_clock(int cpu) { @@ -134,6 +135,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev) info->ipi_irqs[IPI_IRQ_WORK]++; } + if (action & SMP_CLEAR_VECTOR) { + complete_irq_moving(); + info->ipi_irqs[IPI_CLEAR_VECTOR]++; + } + return IRQ_HANDLED; } @@ -151,11 +157,14 @@ static void pv_init_ipi(void) } #endif -static bool kvm_para_available(void) +bool kvm_para_available(void) { int config; static int hypervisor_type; + if (!cpu_has_hypervisor) + return false; + if (!hypervisor_type) { config = read_cpucfg(CPUCFG_KVM_SIG); if (!memcmp(&config, KVM_SIGNATURE, 4)) @@ -165,17 +174,22 @@ static bool kvm_para_available(void) return hypervisor_type == HYPERVISOR_KVM; } -int __init pv_ipi_init(void) +unsigned int kvm_arch_para_features(void) { - int feature; + static unsigned int feature; - if (!cpu_has_hypervisor) - return 0; if (!kvm_para_available()) return 0; - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_IPI)) + if (!feature) + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + + return feature; +} + +int __init pv_ipi_init(void) +{ + if (!kvm_para_has_feature(KVM_FEATURE_IPI)) return 0; #ifdef CONFIG_SMP @@ -206,7 +220,7 @@ static int pv_enable_steal_time(void) } addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr); return 0; } @@ -214,7 +228,7 @@ static int pv_enable_steal_time(void) static void pv_disable_steal_time(void) { if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0); } #ifdef CONFIG_SMP @@ -258,15 +272,9 @@ static struct notifier_block pv_reboot_nb = { int __init pv_time_init(void) { - int r, feature; + int r; - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) + if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) return 0; has_steal_clock = 1; @@ -300,3 +308,13 @@ int __init pv_time_init(void) return 0; } + +int __init pv_spinlock_init(void) +{ + if (!cpu_has_hypervisor) + return 0; + + static_branch_enable(&virt_spin_lock_key); + + return 0; +} diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 0d33cbc47e5110..6ce46d92f1f19d 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -31,6 +31,7 @@ int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v) static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; + unsigned int isa = cpu_data[n].isa_level; unsigned int version = cpu_data[n].processor_id & 0xff; unsigned int fp_version = cpu_data[n].fpu_vers; struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; @@ -64,9 +65,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_pabits + 1, cpu_vabits + 1); seq_printf(m, "ISA\t\t\t:"); - if (cpu_has_loongarch32) - seq_printf(m, " loongarch32"); - if (cpu_has_loongarch64) + if (isa & LOONGARCH_CPU_ISA_LA32R) + seq_printf(m, " loongarch32r"); + if (isa & LOONGARCH_CPU_ISA_LA32S) + seq_printf(m, " loongarch32s"); + if (isa & LOONGARCH_CPU_ISA_LA64) seq_printf(m, " loongarch64"); seq_printf(m, "\n"); @@ -81,6 +84,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_complex) seq_printf(m, " complex"); if (cpu_has_crypto) seq_printf(m, " crypto"); if (cpu_has_ptw) seq_printf(m, " ptw"); + if (cpu_has_lspw) seq_printf(m, " lspw"); if (cpu_has_lvz) seq_printf(m, " lvz"); if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86"); if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm"); diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 0f0740f0be274a..00e307203ddb42 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -603,6 +603,8 @@ void __init setup_arch(char **cmdline_p) arch_mem_init(cmdline_p); resource_init(); + jump_label_init(); /* Initialise the static keys for paravirtualization */ + #ifdef CONFIG_SMP plat_smp_setup(); prefill_possible_map(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index ca405ab86aaef6..9afc2d8b341419 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -72,6 +72,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_CLEAR_VECTOR] = "Clear vector interrupts", }; void show_ipi_list(struct seq_file *p, int prec) @@ -248,6 +249,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++; } + if (action & SMP_CLEAR_VECTOR) { + complete_irq_moving(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++; + } + return IRQ_HANDLED; } @@ -476,7 +482,7 @@ core_initcall(ipi_pm_init); #endif /* Preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { unsigned int cpu, node, rr_node; @@ -509,6 +515,8 @@ void smp_prepare_boot_cpu(void) rr_node = next_node_in(rr_node, node_online_map); } } + + pv_spinlock_init(); } /* called from main before smp_init() */ diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index ba5d0930a74f76..168bd97540f8cf 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -79,7 +79,3 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs) syscall_exit_to_user_mode(regs); } - -#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET -STACK_FRAME_NON_STANDARD(do_syscall); -#endif diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index 90dfccb41c14a0..f6fcc52aefae00 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -37,6 +37,7 @@ static union { static struct page *vdso_pages[] = { NULL }; struct vdso_data *vdso_data = generic_vdso_data.data; struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; +struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data; static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ea73f9dc2cc6aa..90894f70ff4a50 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -50,9 +50,7 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; break; case CPUCFG_KVM_FEATURE: - ret = KVM_FEATURE_IPI; - if (kvm_pvtime_supported()) - ret |= KVM_FEATURE_STEAL_TIME; + ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; vcpu->arch.gprs[rd] = ret; break; default: @@ -127,6 +125,14 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) rj = inst.reg2csr_format.rj; csrid = inst.reg2csr_format.csr; + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) { + if (kvm_guest_has_pmu(&vcpu->arch)) { + vcpu->arch.pc -= 4; + kvm_make_request(KVM_REQ_PMU, vcpu); + return EMULATE_DONE; + } + } + /* Process CSR ops */ switch (rj) { case 0: /* process csrrd */ @@ -697,25 +703,22 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); switch (id) { - case KVM_FEATURE_STEAL_TIME: - if (!kvm_pvtime_supported()) - return KVM_HCALL_INVALID_CODE; - + case BIT(KVM_FEATURE_STEAL_TIME): if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) return KVM_HCALL_INVALID_PARAMETER; vcpu->arch.st.guest_addr = data; if (!(data & KVM_STEAL_PHYS_VALID)) - break; + return 0; vcpu->arch.st.last_steal = current->sched_info.run_delay; kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); - break; + return 0; default: - break; + return KVM_HCALL_INVALID_CODE; }; - return 0; + return KVM_HCALL_INVALID_CODE; }; /* @@ -748,6 +751,14 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lbt(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) { unsigned int min, cpu, i; @@ -781,19 +792,21 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) */ static void kvm_handle_service(struct kvm_vcpu *vcpu) { + long ret = KVM_HCALL_INVALID_CODE; unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); - long ret; switch (func) { case KVM_HCALL_FUNC_IPI: - kvm_send_pv_ipi(vcpu); - ret = KVM_HCALL_SUCCESS; + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) { + kvm_send_pv_ipi(vcpu); + ret = KVM_HCALL_SUCCESS; + } break; case KVM_HCALL_FUNC_NOTIFY: - ret = kvm_save_notify(vcpu); + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)) + ret = kvm_save_notify(vcpu); break; default: - ret = KVM_HCALL_INVALID_CODE; break; } @@ -865,6 +878,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, + [EXCCODE_BTDIS] = kvm_handle_lbt_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, [EXCCODE_HVC] = kvm_handle_hypercall, }; diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 844736b99d3874..27e9b94c0a0b6e 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -261,7 +261,7 @@ long kvm_arch_dev_ioctl(struct file *filp, return -ENOIOCTLCMD; } -int kvm_arch_hardware_enable(void) +int kvm_arch_enable_virtualization_cpu(void) { unsigned long env, gcfg = 0; @@ -300,7 +300,7 @@ int kvm_arch_hardware_enable(void) return 0; } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { write_csr_gcfg(0); write_csr_gstat(0); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 6905283f535b96..0697b106425116 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,126 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + context->perf_cntr[0] = read_csr_perfcntr0(); + context->perf_cntr[1] = read_csr_perfcntr1(); + context->perf_cntr[2] = read_csr_perfcntr2(); + context->perf_cntr[3] = read_csr_perfcntr3(); + context->perf_ctrl[0] = write_csr_perfctrl0(0); + context->perf_ctrl[1] = write_csr_perfctrl1(0); + context->perf_ctrl[2] = write_csr_perfctrl2(0); + context->perf_ctrl[3] = write_csr_perfctrl3(0); +} + +static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + write_csr_perfcntr0(context->perf_cntr[0]); + write_csr_perfcntr1(context->perf_cntr[1]); + write_csr_perfcntr2(context->perf_cntr[2]); + write_csr_perfcntr3(context->perf_cntr[3]); + write_csr_perfctrl0(context->perf_ctrl[0]); + write_csr_perfctrl1(context->perf_ctrl[1]); + write_csr_perfctrl2(context->perf_ctrl[2]); + write_csr_perfctrl3(context->perf_ctrl[3]); +} + + +static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + kvm_save_host_pmu(vcpu); + + /* Set PM0-PM(num) to guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + kvm_restore_guest_pmu(vcpu); + + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + return; + + kvm_save_guest_pmu(vcpu); + + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + /* + * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when + * exiting the guest, so that the next time trap into the guest. + * We don't need to deal with PMU CSRs contexts. + */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (!(val & KVM_PMU_EVENT_ENABLED)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; + + kvm_restore_host_pmu(vcpu); +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + kvm_make_request(KVM_REQ_PMU, vcpu); +} + +static void kvm_check_pmu(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_PMU, vcpu)) { + kvm_own_pmu(vcpu); + vcpu->arch.aux_inuse |= KVM_LARCH_PMU; + } +} + static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { u32 version; @@ -158,6 +279,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) /* Make sure the vcpu mode has been written */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); kvm_check_vpid(vcpu); + kvm_check_pmu(vcpu); /* * Called after function kvm_check_vpid() @@ -195,6 +317,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; + kvm_lose_pmu(vcpu); + guest_timing_exit_irqoff(); guest_state_exit_irqoff(); local_irq_enable(); @@ -468,6 +592,22 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) kvm_write_sw_gcsr(csr, id, val); + /* + * After modifying the PMU CSR register value of the vcpu. + * If the PMU CSRs are used, we need to set KVM_REQ_PMU. + */ + if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) { + unsigned long val; + + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + + if (val & KVM_PMU_EVENT_ENABLED) + kvm_make_request(KVM_REQ_PMU, vcpu); + } + return ret; } @@ -497,6 +637,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) *v |= CPUCFG2_LSX; if (cpu_has_lasx) *v |= CPUCFG2_LASX; + if (cpu_has_lbt_x86) + *v |= CPUCFG2_X86BT; + if (cpu_has_lbt_arm) + *v |= CPUCFG2_ARMBT; + if (cpu_has_lbt_mips) + *v |= CPUCFG2_MIPSBT; return 0; case LOONGARCH_CPUCFG3: @@ -506,6 +652,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) case LOONGARCH_CPUCFG5: *v = GENMASK(31, 0); return 0; + case LOONGARCH_CPUCFG6: + if (cpu_has_pmp) + *v = GENMASK(14, 0); + else + *v = 0; + return 0; case LOONGARCH_CPUCFG16: *v = GENMASK(16, 0); return 0; @@ -550,6 +702,17 @@ static int kvm_check_cpucfg(int id, u64 val) /* LASX architecturally implies LSX and FP but val does not satisfy that */ return -EINVAL; return 0; + case LOONGARCH_CPUCFG6: + if (val & CPUCFG6_PMP) { + u32 host = read_cpucfg(LOONGARCH_CPUCFG6); + if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) + return -EINVAL; + if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) + return -EINVAL; + if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM)) + return -EINVAL; + } + return 0; default: /* * Values for the other CPUCFG IDs are not being further validated @@ -577,6 +740,34 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, else ret = -EINVAL; break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + *v = vcpu->arch.lbt.scr0; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + *v = vcpu->arch.lbt.scr1; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + *v = vcpu->arch.lbt.scr2; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + *v = vcpu->arch.lbt.scr3; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + *v = vcpu->arch.lbt.eflags; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + *v = vcpu->arch.fpu.ftop; + break; + default: + ret = -EINVAL; + break; + } + break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { case KVM_REG_LOONGARCH_COUNTER: @@ -635,6 +826,37 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, if (ret) break; vcpu->arch.cpucfg[id] = (u32)v; + if (id == LOONGARCH_CPUCFG6) + vcpu->arch.max_pmu_csrid = + LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; + break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + vcpu->arch.lbt.scr0 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + vcpu->arch.lbt.scr1 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + vcpu->arch.lbt.scr2 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + vcpu->arch.lbt.scr3 = v; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + vcpu->arch.lbt.eflags = v; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + vcpu->arch.fpu.ftop = v; + break; + default: + ret = -EINVAL; + break; + } break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { @@ -728,7 +950,10 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { switch (attr->attr) { - case 2: + case LOONGARCH_CPUCFG2: + case LOONGARCH_CPUCFG6: + return 0; + case CPUCFG_KVM_FEATURE: return 0; default: return -ENXIO; @@ -740,8 +965,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; return 0; @@ -773,9 +998,18 @@ static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, uint64_t val; uint64_t __user *uaddr = (uint64_t __user *)attr->addr; - ret = _kvm_get_cpucfg_mask(attr->attr, &val); - if (ret) - return ret; + switch (attr->attr) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + ret = _kvm_get_cpucfg_mask(attr->attr, &val); + if (ret) + return ret; + break; + case CPUCFG_KVM_FEATURE: + val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; + break; + default: + return -ENXIO; + } put_user(val, uaddr); @@ -788,8 +1022,8 @@ static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, u64 gpa; u64 __user *user = (u64 __user *)attr->addr; - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; gpa = vcpu->arch.st.guest_addr; @@ -821,7 +1055,28 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { - return -ENXIO; + u64 val, valid; + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + + switch (attr->attr) { + case CPUCFG_KVM_FEATURE: + if (get_user(val, user)) + return -EFAULT; + + valid = LOONGARCH_PV_FEAT_MASK; + if (val & ~valid) + return -EINVAL; + + /* All vCPUs need set the same PV features */ + if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) + && ((kvm->arch.pv_features & valid) != val)) + return -EINVAL; + kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; + return 0; + default: + return -ENXIO; + } } static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, @@ -831,8 +1086,8 @@ static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, u64 gpa, __user *user = (u64 __user *)attr->addr; struct kvm *kvm = vcpu->kvm; - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; if (get_user(gpa, user)) @@ -977,12 +1232,66 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return 0; } +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + set_csr_euen(CSR_EUEN_LBTEN); + _restore_lbt(&vcpu->arch.lbt); + vcpu->arch.aux_inuse |= KVM_LARCH_LBT; + preempt_enable(); + + return 0; +} + +static void kvm_lose_lbt(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { + _save_lbt(&vcpu->arch.lbt); + clear_csr_euen(CSR_EUEN_LBTEN); + vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; + } + preempt_enable(); +} + +static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) +{ + /* + * If TM is enabled, top register save/restore will + * cause lbt exception, here enable lbt in advance + */ + if (fcsr & FPU_CSR_TM) + kvm_own_lbt(vcpu); +} + +static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) + return; + kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); + } +} +#else +static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } +static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } +static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } +#endif + /* Enable FPU and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - /* Enable FPU */ + /* + * Enable FPU for guest + * Set FR and FRE according to guest context + */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN); kvm_restore_fpu(&vcpu->arch.fpu); @@ -1002,6 +1311,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) preempt_disable(); /* Enable LSX for guest */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { case KVM_LARCH_FPU: @@ -1036,6 +1346,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu) preempt_disable(); + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { case KVM_LARCH_LSX: @@ -1067,6 +1378,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); + kvm_check_fcsr_alive(vcpu); if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { kvm_save_lasx(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); @@ -1089,6 +1401,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable FPU */ clear_csr_euen(CSR_EUEN_FPEN); } + kvm_lose_lbt(vcpu); preempt_enable(); } @@ -1235,6 +1548,9 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + /* Restore hardware PMU CSRs */ + kvm_restore_pmu(vcpu); + /* Don't bother restoring registers multiple times unless necessary */ if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) return 0; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 6b2e4f66ad2619..4ba734aaef87a7 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -5,6 +5,7 @@ #include #include +#include const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), @@ -39,6 +40,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.phyid_map_lock); kvm_init_vmcs(kvm); + + /* Enable all PV features by default */ + kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); + if (kvm_pvtime_supported()) + kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + kvm->arch.gpa_size = BIT(cpu_vabits - 1); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; kvm->arch.invalid_ptes[0] = 0; @@ -99,7 +106,67 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VM_FEAT_LSX: + if (cpu_has_lsx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_LASX: + if (cpu_has_lasx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_X86BT: + if (cpu_has_lbt_x86) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_ARMBT: + if (cpu_has_lbt_arm) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_MIPSBT: + if (cpu_has_lbt_mips) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PMU: + if (cpu_has_pmp) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PV_IPI: + return 0; + case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: + if (kvm_pvtime_supported()) + return 0; + return -ENXIO; + default: + return -ENXIO; + } +} + +static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_LOONGARCH_VM_FEAT_CTRL: + return kvm_vm_feature_has_attr(kvm, attr); + default: + return -ENXIO; + } +} + int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -ENOIOCTLCMD; + void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; + struct kvm_device_attr attr; + + switch (ioctl) { + case KVM_HAS_DEVICE_ATTR: + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + + return kvm_vm_has_attr(kvm, &attr); + default: + return -ENOIOCTLCMD; + } } diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile index e4d1e581dbae3a..278be2c8fc3699 100644 --- a/arch/loongarch/mm/Makefile +++ b/arch/loongarch/mm/Makefile @@ -4,7 +4,8 @@ # obj-y += init.o cache.o tlb.o tlbex.o extable.o \ - fault.o ioremap.o maccess.o mmap.o pgtable.o page.o + fault.o ioremap.o maccess.o mmap.o pgtable.o \ + page.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_KASAN) += kasan_init.o diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index 97b40defde0608..deefd9617d0085 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -31,11 +31,52 @@ int show_unhandled_signals = 1; +static int __kprobes spurious_fault(unsigned long write, unsigned long address) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (!(address & __UA_LIMIT)) + return 0; + + pgd = pgd_offset_k(address); + if (!pgd_present(pgdp_get(pgd))) + return 0; + + p4d = p4d_offset(pgd, address); + if (!p4d_present(p4dp_get(p4d))) + return 0; + + pud = pud_offset(p4d, address); + if (!pud_present(pudp_get(pud))) + return 0; + + pmd = pmd_offset(pud, address); + if (!pmd_present(pmdp_get(pmd))) + return 0; + + if (pmd_leaf(*pmd)) { + return write ? pmd_write(pmdp_get(pmd)) : 1; + } else { + pte = pte_offset_kernel(pmd, address); + if (!pte_present(ptep_get(pte))) + return 0; + + return write ? pte_write(ptep_get(pte)) : 1; + } +} + static void __kprobes no_context(struct pt_regs *regs, unsigned long write, unsigned long address) { const int field = sizeof(unsigned long) * 2; + if (spurious_fault(write, address)) + return; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c index 8890309851351c..914e82ff3f6563 100644 --- a/arch/loongarch/mm/mmap.c +++ b/arch/loongarch/mm/mmap.c @@ -89,7 +89,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, UP); @@ -101,7 +102,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, */ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, DOWN); diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c new file mode 100644 index 00000000000000..ffd8d76021d470 --- /dev/null +++ b/arch/loongarch/mm/pageattr.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +struct pageattr_masks { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) +{ + unsigned long new_val = val; + struct pageattr_masks *masks = walk->private; + + new_val &= ~(pgprot_val(masks->clear_mask)); + new_val |= (pgprot_val(masks->set_mask)); + + return new_val; +} + +static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pgd_t val = pgdp_get(pgd); + + if (pgd_leaf(val)) { + val = __pgd(set_pageattr_masks(pgd_val(val), walk)); + set_pgd(pgd, val); + } + + return 0; +} + +static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + p4d_t val = p4dp_get(p4d); + + if (p4d_leaf(val)) { + val = __p4d(set_pageattr_masks(p4d_val(val), walk)); + set_p4d(p4d, val); + } + + return 0; +} + +static int pageattr_pud_entry(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pud_t val = pudp_get(pud); + + if (pud_leaf(val)) { + val = __pud(set_pageattr_masks(pud_val(val), walk)); + set_pud(pud, val); + } + + return 0; +} + +static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pmd_t val = pmdp_get(pmd); + + if (pmd_leaf(val)) { + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); + set_pmd(pmd, val); + } + + return 0; +} + +static int pageattr_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pte_t val = ptep_get(pte); + + val = __pte(set_pageattr_masks(pte_val(val), walk)); + set_pte(pte, val); + + return 0; +} + +static int pageattr_pte_hole(unsigned long addr, unsigned long next, + int depth, struct mm_walk *walk) +{ + return 0; +} + +static const struct mm_walk_ops pageattr_ops = { + .pgd_entry = pageattr_pgd_entry, + .p4d_entry = pageattr_p4d_entry, + .pud_entry = pageattr_pud_entry, + .pmd_entry = pageattr_pmd_entry, + .pte_entry = pageattr_pte_entry, + .pte_hole = pageattr_pte_hole, + .walk_lock = PGWALK_RDLOCK, +}; + +static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) +{ + int ret; + unsigned long start = addr; + unsigned long end = start + PAGE_SIZE * numpages; + struct pageattr_masks masks = { + .set_mask = set_mask, + .clear_mask = clear_mask + }; + + if (!numpages) + return 0; + + mmap_write_lock(&init_mm); + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); + mmap_write_unlock(&init_mm); + + flush_tlb_kernel_range(start, end); + + return ret; +} + +int set_memory_x(unsigned long addr, int numpages) +{ + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC)); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0)); +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0)); +} + +bool kernel_page_present(struct page *page) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned long addr = (unsigned long)page_address(page); + + if (addr < vm_map_base) + return true; + + pgd = pgd_offset_k(addr); + if (pgd_none(pgdp_get(pgd))) + return false; + if (pgd_leaf(pgdp_get(pgd))) + return true; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(p4dp_get(p4d))) + return false; + if (p4d_leaf(p4dp_get(p4d))) + return true; + + pud = pud_offset(p4d, addr); + if (pud_none(pudp_get(pud))) + return false; + if (pud_leaf(pudp_get(pud))) + return true; + + pmd = pmd_offset(pud, addr); + if (pmd_none(pmdp_get(pmd))) + return false; + if (pmd_leaf(pmdp_get(pmd))) + return true; + + pte = pte_offset_kernel(pmd, addr); + return pte_present(ptep_get(pte)); +} + +int set_direct_map_default_noflush(struct page *page) +{ + unsigned long addr = (unsigned long)page_address(page); + + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0)); +} + +int set_direct_map_invalid_noflush(struct page *page) +{ + unsigned long addr = (unsigned long)page_address(page); + + if (addr < vm_map_base) + return 0; + + return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID)); +} diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 365f7de771cbb9..1da4dc46df43e5 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) if (bus) { memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window)); kfree(info); + kfree(root_ops); } else { struct pci_bus *child; diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index d724d46b07c842..40c1175823d61d 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -4,7 +4,8 @@ # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile -obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o +obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \ + vgetrandom-chacha.o sigreturn.o # Common compiler flags between ABIs. ccflags-vdso := \ @@ -29,6 +30,10 @@ ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif +ifneq ($(c-getrandom-y),) + CFLAGS_vgetrandom.o += -include $(c-getrandom-y) +endif + # VDSO linker flags. ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S index 56ad855896dee7..6b441bde4026ea 100644 --- a/arch/loongarch/vdso/vdso.lds.S +++ b/arch/loongarch/vdso/vdso.lds.S @@ -62,6 +62,7 @@ VERSION __vdso_clock_getres; __vdso_clock_gettime; __vdso_gettimeofday; + __vdso_getrandom; __vdso_rt_sigreturn; local: *; }; diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S new file mode 100644 index 00000000000000..c2733e6c3a8de8 --- /dev/null +++ b/arch/loongarch/vdso/vgetrandom-chacha.S @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Xi Ruoyao . All Rights Reserved. + */ + +#include +#include +#include + +.text + +.macro OP_4REG op d0 d1 d2 d3 s0 s1 s2 s3 + \op \d0, \d0, \s0 + \op \d1, \d1, \s1 + \op \d2, \d2, \s2 + \op \d3, \d3, \s3 +.endm + +/* + * Very basic LoongArch implementation of ChaCha20. Produces a given positive + * number of blocks of output with a nonce of 0, taking an input key and + * 8-byte counter. Importantly does not spill to the stack. Its arguments + * are: + * + * a0: output bytes + * a1: 32-byte key input + * a2: 8-byte counter input/output + * a3: number of 64-byte blocks to write to output + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) + +/* We don't need a frame pointer */ +#define s9 fp + +#define output a0 +#define key a1 +#define counter a2 +#define nblocks a3 +#define i a4 +#define state0 s0 +#define state1 s1 +#define state2 s2 +#define state3 s3 +#define state4 s4 +#define state5 s5 +#define state6 s6 +#define state7 s7 +#define state8 s8 +#define state9 s9 +#define state10 a5 +#define state11 a6 +#define state12 a7 +#define state13 t0 +#define state14 t1 +#define state15 t2 +#define cnt_lo t3 +#define cnt_hi t4 +#define copy0 t5 +#define copy1 t6 +#define copy2 t7 + +/* Reuse i as copy3 */ +#define copy3 i + +/* Packs to be used with OP_4REG */ +#define line0 state0, state1, state2, state3 +#define line1 state4, state5, state6, state7 +#define line2 state8, state9, state10, state11 +#define line3 state12, state13, state14, state15 + +#define line1_perm state5, state6, state7, state4 +#define line2_perm state10, state11, state8, state9 +#define line3_perm state15, state12, state13, state14 + +#define copy copy0, copy1, copy2, copy3 + +#define _16 16, 16, 16, 16 +#define _20 20, 20, 20, 20 +#define _24 24, 24, 24, 24 +#define _25 25, 25, 25, 25 + + /* + * The ABI requires s0-s9 saved, and sp aligned to 16-byte. + * This does not violate the stack-less requirement: no sensitive data + * is spilled onto the stack. + */ + PTR_ADDI sp, sp, (-SZREG * 10) & STACK_ALIGN + REG_S s0, sp, 0 + REG_S s1, sp, SZREG + REG_S s2, sp, SZREG * 2 + REG_S s3, sp, SZREG * 3 + REG_S s4, sp, SZREG * 4 + REG_S s5, sp, SZREG * 5 + REG_S s6, sp, SZREG * 6 + REG_S s7, sp, SZREG * 7 + REG_S s8, sp, SZREG * 8 + REG_S s9, sp, SZREG * 9 + + li.w copy0, 0x61707865 + li.w copy1, 0x3320646e + li.w copy2, 0x79622d32 + + ld.w cnt_lo, counter, 0 + ld.w cnt_hi, counter, 4 + +.Lblock: + /* state[0,1,2,3] = "expand 32-byte k" */ + move state0, copy0 + move state1, copy1 + move state2, copy2 + li.w state3, 0x6b206574 + + /* state[4,5,..,11] = key */ + ld.w state4, key, 0 + ld.w state5, key, 4 + ld.w state6, key, 8 + ld.w state7, key, 12 + ld.w state8, key, 16 + ld.w state9, key, 20 + ld.w state10, key, 24 + ld.w state11, key, 28 + + /* state[12,13] = counter */ + move state12, cnt_lo + move state13, cnt_hi + + /* state[14,15] = 0 */ + move state14, zero + move state15, zero + + li.w i, 10 +.Lpermute: + /* odd round */ + OP_4REG add.w line0, line1 + OP_4REG xor line3, line0 + OP_4REG rotri.w line3, _16 + + OP_4REG add.w line2, line3 + OP_4REG xor line1, line2 + OP_4REG rotri.w line1, _20 + + OP_4REG add.w line0, line1 + OP_4REG xor line3, line0 + OP_4REG rotri.w line3, _24 + + OP_4REG add.w line2, line3 + OP_4REG xor line1, line2 + OP_4REG rotri.w line1, _25 + + /* even round */ + OP_4REG add.w line0, line1_perm + OP_4REG xor line3_perm, line0 + OP_4REG rotri.w line3_perm, _16 + + OP_4REG add.w line2_perm, line3_perm + OP_4REG xor line1_perm, line2_perm + OP_4REG rotri.w line1_perm, _20 + + OP_4REG add.w line0, line1_perm + OP_4REG xor line3_perm, line0 + OP_4REG rotri.w line3_perm, _24 + + OP_4REG add.w line2_perm, line3_perm + OP_4REG xor line1_perm, line2_perm + OP_4REG rotri.w line1_perm, _25 + + addi.w i, i, -1 + bnez i, .Lpermute + + /* + * copy[3] = "expa", materialize it here because copy[3] shares the + * same register with i which just became dead. + */ + li.w copy3, 0x6b206574 + + /* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */ + OP_4REG add.w line0, copy + st.w state0, output, 0 + st.w state1, output, 4 + st.w state2, output, 8 + st.w state3, output, 12 + + /* from now on state[0,1,2,3] are scratch registers */ + + /* state[0,1,2,3] = lo32(key) */ + ld.w state0, key, 0 + ld.w state1, key, 4 + ld.w state2, key, 8 + ld.w state3, key, 12 + + /* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */ + OP_4REG add.w line1, line0 + st.w state4, output, 16 + st.w state5, output, 20 + st.w state6, output, 24 + st.w state7, output, 28 + + /* state[0,1,2,3] = hi32(key) */ + ld.w state0, key, 16 + ld.w state1, key, 20 + ld.w state2, key, 24 + ld.w state3, key, 28 + + /* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */ + OP_4REG add.w line2, line0 + st.w state8, output, 32 + st.w state9, output, 36 + st.w state10, output, 40 + st.w state11, output, 44 + + /* output[12,13,14,15] = state[12,13,14,15] + [cnt_lo, cnt_hi, 0, 0] */ + add.w state12, state12, cnt_lo + add.w state13, state13, cnt_hi + st.w state12, output, 48 + st.w state13, output, 52 + st.w state14, output, 56 + st.w state15, output, 60 + + /* ++counter */ + addi.w cnt_lo, cnt_lo, 1 + sltui state0, cnt_lo, 1 + add.w cnt_hi, cnt_hi, state0 + + /* output += 64 */ + PTR_ADDI output, output, 64 + /* --nblocks */ + PTR_ADDI nblocks, nblocks, -1 + bnez nblocks, .Lblock + + /* counter = [cnt_lo, cnt_hi] */ + st.w cnt_lo, counter, 0 + st.w cnt_hi, counter, 4 + + /* + * Zero out the potentially sensitive regs, in case nothing uses these + * again. As at now copy[0,1,2,3] just contains "expand 32-byte k" and + * state[0,...,9] are s0-s9 those we'll restore in the epilogue, so we + * only need to zero state[11,...,15]. + */ + move state10, zero + move state11, zero + move state12, zero + move state13, zero + move state14, zero + move state15, zero + + REG_L s0, sp, 0 + REG_L s1, sp, SZREG + REG_L s2, sp, SZREG * 2 + REG_L s3, sp, SZREG * 3 + REG_L s4, sp, SZREG * 4 + REG_L s5, sp, SZREG * 5 + REG_L s6, sp, SZREG * 6 + REG_L s7, sp, SZREG * 7 + REG_L s8, sp, SZREG * 8 + REG_L s9, sp, SZREG * 9 + PTR_ADDI sp, sp, -((-SZREG * 10) & STACK_ALIGN) + + jr ra +SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/loongarch/vdso/vgetrandom.c b/arch/loongarch/vdso/vgetrandom.c new file mode 100644 index 00000000000000..d5f258ac4a36d1 --- /dev/null +++ b/arch/loongarch/vdso/vgetrandom.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Xi Ruoyao . All Rights Reserved. + */ +#include + +ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) +{ + return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); +} diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 9a084943a68a4b..d01dc47d52ea2f 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -559,7 +559,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -636,7 +635,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 58ec725bf392f1..46808e581d7b47 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -516,7 +516,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -593,7 +592,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 63ab7e892e592a..4469a7839c9d1a 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -536,7 +536,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -613,7 +612,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index ca40744eec6000..c0719322c02837 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -585,7 +584,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 77bcc6faf468cd..8d429e63f8f214 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -518,7 +518,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -595,7 +594,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index e73d4032b65919..bafd33da27c110 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -535,7 +535,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -612,7 +611,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 638df8442c9886..6f5ca3f85ea160 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -621,7 +621,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -698,7 +697,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 2248db4260816b..d16b328c71363f 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -507,7 +507,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -584,7 +583,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 2975b66521f6e3..80f6c15a5ed5c8 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -508,7 +508,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -585,7 +584,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 0a0e32344033bf..0e81589f0ee26d 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -525,7 +525,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -602,7 +601,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index f16f1a99c2badb..8cd785290339db 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -582,7 +581,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 667bd61ae9b395..78035369f60f1b 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -506,7 +506,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=m @@ -583,7 +582,6 @@ CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m CONFIG_TEST_BITOPS=m CONFIG_TEST_VMALLOC=m -CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_FIND_BIT_BENCHMARK=m diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h index 4ba14f3535fcbe..71fbe5c5c5643c 100644 --- a/arch/m68k/include/asm/cmpxchg.h +++ b/arch/m68k/include/asm/cmpxchg.h @@ -3,6 +3,7 @@ #define __ARCH_M68K_CMPXCHG__ #include +#include #define __xg(type, x) ((volatile type *)(x)) @@ -11,25 +12,19 @@ extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); #ifndef CONFIG_RMW_INSNS static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size) { - unsigned long flags, tmp; + unsigned long flags; local_irq_save(flags); switch (size) { case 1: - tmp = *(u8 *)ptr; - *(u8 *)ptr = x; - x = tmp; + swap(*(u8 *)ptr, x); break; case 2: - tmp = *(u16 *)ptr; - *(u16 *)ptr = x; - x = tmp; + swap(*(u16 *)ptr, x); break; case 4: - tmp = *(u32 *)ptr; - *(u32 *)ptr = x; - x = tmp; + swap(*(u32 *)ptr, x); break; default: x = __invalid_xchg_size(x, ptr, size); diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 2584e94e213468..fda7eac23f872d 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -117,7 +117,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) { /* regs will be equal to current_pt_regs() */ struct kernel_clone_args args = { - .flags = regs->d1 & ~CSIGNAL, + .flags = (u32)(regs->d1) & ~CSIGNAL, .pidfd = (int __user *)regs->d3, .child_tid = (int __user *)regs->d4, .parent_tid = (int __user *)regs->d3, diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 37fb663559b474..c926da9d5ec28c 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p) pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); - pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", + pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n", __bss_stop, memory_start, memory_start, memory_end); memblock_add(_rambase, memory_end - _rambase); diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c index 10f1f294e91f97..14b774b9d308b5 100644 --- a/arch/m68k/q40/q40ints.c +++ b/arch/m68k/q40/q40ints.c @@ -106,7 +106,7 @@ void __init q40_init_IRQ(void) * this stuff doesn't really belong here.. */ -int ql_ticks; /* 200Hz ticks since last jiffie */ +int ql_ticks; /* 200Hz ticks since last jiffy */ static int sound_ticks; #define SVOL 45 diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h index 79a749f4ad04ee..edff4306fa7093 100644 --- a/arch/microblaze/include/asm/flat.h +++ b/arch/microblaze/include/asm/flat.h @@ -8,7 +8,7 @@ #ifndef _ASM_MICROBLAZE_FLAT_H #define _ASM_MICROBLAZE_FLAT_H -#include +#include /* * Microblaze works a little differently from other arches, because diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 60077e57693563..397edf05dd7225 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -8,6 +8,7 @@ config MIPS select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000 select ARCH_HAS_DEBUG_VIRTUAL if !64BIT + select ARCH_HAS_DMA_OPS if MACH_JAZZ select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA @@ -393,7 +394,6 @@ config MACH_JAZZ select ARC_PROMLIB select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO - select DMA_OPS select FW_ARC select FW_ARC32 select ARCH_MAY_HAVE_PC_FDC @@ -502,7 +502,6 @@ config MACH_LOONGSON64 select USE_OF select BUILTIN_DTB select PCI_HOST_GENERIC - select HAVE_ARCH_NODEDATA_EXTENSION if NUMA help This enables the support of Loongson-2/3 family of machines. @@ -735,7 +734,6 @@ config SGI_IP27 select WAR_R10000_LLSC select MIPS_L1_CACHE_SHIFT_7 select NUMA - select HAVE_ARCH_NODEDATA_EXTENSION help This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics workstations. To compile a Linux kernel that runs on these, say Y @@ -2613,9 +2611,6 @@ config NUMA config SYS_SUPPORTS_NUMA bool -config HAVE_ARCH_NODEDATA_EXTENSION - bool - config RELOCATABLE bool "Relocatable kernel" depends on SYS_SUPPORTS_RELOCATABLE diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c index 973049b5bd6182..44d8433b1f45d0 100644 --- a/arch/mips/alchemy/common/dma.c +++ b/arch/mips/alchemy/common/dma.c @@ -131,29 +131,6 @@ static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ }; -void dump_au1000_dma_channel(unsigned int dmanr) -{ - struct dma_chan *chan; - - if (dmanr >= NUM_AU1000_DMA_CHANNELS) - return; - chan = &au1000_dma_table[dmanr]; - - printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr); - printk(KERN_INFO " mode = 0x%08x\n", - __raw_readl(chan->io + DMA_MODE_SET)); - printk(KERN_INFO " addr = 0x%08x\n", - __raw_readl(chan->io + DMA_PERIPHERAL_ADDR)); - printk(KERN_INFO " start0 = 0x%08x\n", - __raw_readl(chan->io + DMA_BUFFER0_START)); - printk(KERN_INFO " start1 = 0x%08x\n", - __raw_readl(chan->io + DMA_BUFFER1_START)); - printk(KERN_INFO " count0 = 0x%08x\n", - __raw_readl(chan->io + DMA_BUFFER0_COUNT)); - printk(KERN_INFO " count1 = 0x%08x\n", - __raw_readl(chan->io + DMA_BUFFER1_COUNT)); -} - /* * Finds a free channel, and binds the requested device to it. * Returns the allocated channel number, or negative on error. diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 99f321b6e417bd..9cc8fbf218a596 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -42,7 +42,7 @@ static struct board_info __initdata board_cvg834g = { .expected_cpu_id = 0x3368, .ephy_reset_gpio = 36, - .ephy_reset_gpio_flags = GPIOF_INIT_HIGH, + .ephy_reset_gpio_flags = GPIOF_OUT_INIT_HIGH, .has_pci = 1, .has_uart0 = 1, .has_uart1 = 1, diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index adb6d5b0e6ebc0..90021c6a8cab5c 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include "decompress.h" diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config index 8cfbafa532e0cf..a5b5b510247222 100644 --- a/arch/mips/configs/generic/board-ocelot.config +++ b/arch/mips/configs/generic/board-ocelot.config @@ -31,6 +31,7 @@ CONFIG_MICROSEMI_PHY=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c index ec6d58008f8e10..a7a1d43a1b2ca5 100644 --- a/arch/mips/crypto/crc32-mips.c +++ b/arch/mips/crypto/crc32-mips.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include @@ -77,24 +77,26 @@ static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len) { u32 crc = crc_; -#ifdef CONFIG_64BIT - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); + if (IS_ENABLED(CONFIG_64BIT)) { + for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) { + u64 value = get_unaligned_le64(p); - CRC32(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } + CRC32(crc, value, d); + } - if (len & sizeof(u32)) { -#else /* !CONFIG_64BIT */ - while (len >= sizeof(u32)) { -#endif - u32 value = get_unaligned_le32(p); + if (len & sizeof(u32)) { + u32 value = get_unaligned_le32(p); + + CRC32(crc, value, w); + p += sizeof(u32); + } + } else { + for (; len >= sizeof(u32); len -= sizeof(u32)) { + u32 value = get_unaligned_le32(p); - CRC32(crc, value, w); - p += sizeof(u32); - len -= sizeof(u32); + CRC32(crc, value, w); + p += sizeof(u32); + } } if (len & sizeof(u16)) { @@ -117,24 +119,26 @@ static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len) { u32 crc = crc_; -#ifdef CONFIG_64BIT - while (len >= sizeof(u64)) { - u64 value = get_unaligned_le64(p); + if (IS_ENABLED(CONFIG_64BIT)) { + for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) { + u64 value = get_unaligned_le64(p); - CRC32C(crc, value, d); - p += sizeof(u64); - len -= sizeof(u64); - } + CRC32(crc, value, d); + } - if (len & sizeof(u32)) { -#else /* !CONFIG_64BIT */ - while (len >= sizeof(u32)) { -#endif - u32 value = get_unaligned_le32(p); + if (len & sizeof(u32)) { + u32 value = get_unaligned_le32(p); + + CRC32(crc, value, w); + p += sizeof(u32); + } + } else { + for (; len >= sizeof(u32); len -= sizeof(u32)) { + u32 value = get_unaligned_le32(p); - CRC32C(crc, value, w); - p += sizeof(u32); - len -= sizeof(u32); + CRC32(crc, value, w); + p += sizeof(u32); + } } if (len & sizeof(u16)) { diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c index 867728ee535af4..c03ad0bbe69cdc 100644 --- a/arch/mips/crypto/poly1305-glue.c +++ b/arch/mips/crypto/poly1305-glue.c @@ -5,7 +5,7 @@ * Copyright (C) 2019 Linaro Ltd. */ -#include +#include #include #include #include diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h index e9e87504bb0c10..71e20e6cd38d87 100644 --- a/arch/mips/include/asm/cmp.h +++ b/arch/mips/include/asm/cmp.h @@ -7,12 +7,4 @@ */ struct task_struct; -extern void cmp_smp_setup(void); -extern void cmp_smp_finish(void); -extern void cmp_boot_secondary(int cpu, struct task_struct *t); -extern void cmp_init_secondary(void); -extern void cmp_prepare_cpus(unsigned int max_cpus); - -/* This is platform specific */ -extern void cmp_send_ipi(int cpu, unsigned int action); #endif /* _ASM_CMP_H */ diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index 908e96e3a3117e..8fcad6984389cb 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -160,6 +160,5 @@ extern void prom_identify_arch(u32); extern void prom_init_cmdline(s32, s32 *, u32); extern void register_prom_console(void); -extern void unregister_prom_console(void); #endif /* _ASM_DEC_PROM_H */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 6743a57c1ab43d..f7222eb594ea60 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -728,8 +728,8 @@ struct kvm_mips_callbacks { int (*handle_fpe)(struct kvm_vcpu *vcpu); int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); int (*handle_guest_exit)(struct kvm_vcpu *vcpu); - int (*hardware_enable)(void); - void (*hardware_disable)(void); + int (*enable_virtualization_cpu)(void); + void (*disable_virtualization_cpu)(void); int (*check_extension)(struct kvm *kvm, long ext); int (*vcpu_init)(struct kvm_vcpu *vcpu); void (*vcpu_uninit)(struct kvm_vcpu *vcpu); diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h index b82e513c8523a4..18c24051a1f269 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h @@ -124,7 +124,6 @@ extern int request_au1000_dma(int dev_id, extern void free_au1000_dma(unsigned int dmanr); extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos, int length, int *eof, void *data); -extern void dump_au1000_dma_channel(unsigned int dmanr); extern spinlock_t au1000_dma_spin_lock; static inline struct dma_chan *get_dma_chan(unsigned int dmanr) diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h index 08c36e50a86054..56959eb9cb262c 100644 --- a/arch/mips/include/asm/mach-ip27/mmzone.h +++ b/arch/mips/include/asm/mach-ip27/mmzone.h @@ -22,7 +22,6 @@ struct node_data { extern struct node_data *__node_data[]; -#define NODE_DATA(n) (&__node_data[(n)]->pglist) #define hub_data(n) (&__node_data[(n)]->hub) #endif /* _ASM_MACH_MMZONE_H */ diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index a3d65d37b8b5d5..8fb70fd3c9c4ad 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -14,10 +14,6 @@ #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) #define nid_to_addrbase(nid) ((unsigned long)(nid) << NODE_ADDRSPACE_SHIFT) -extern struct pglist_data *__node_data[]; - -#define NODE_DATA(n) (__node_data[n]) - extern void __init prom_init_numa_memory(void); #endif /* _ASM_MACH_MMZONE_H */ diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index c904c24550f619..5befba569c9f37 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -73,7 +73,4 @@ extern void mips_pcibios_init(void); #define mips_pcibios_init() do { } while (0) #endif -extern void mips_scroll_message(void); -extern void mips_display_message(const char *str); - #endif /* __ASM_MIPS_BOARDS_GENERIC_H */ diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h index 28917f1582b311..6ea02af298762a 100644 --- a/arch/mips/include/asm/mips_mt.h +++ b/arch/mips/include/asm/mips_mt.h @@ -17,8 +17,6 @@ extern int vpelimit; extern cpumask_t mt_fpu_cpumask; extern unsigned long mt_fpemul_threshold; -extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); - #ifdef CONFIG_MIPS_MT extern void mips_mt_set_cpuoptions(void); #else diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h index d0a540e88bb41c..d10afd13ee5b35 100644 --- a/arch/mips/include/uapi/asm/sigcontext.h +++ b/arch/mips/include/uapi/asm/sigcontext.h @@ -56,7 +56,6 @@ struct sigcontext { #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 -#include /* * Keep this struct definition in sync with the sigcontext fragment * in arch/mips/kernel/asm-offsets.c diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 60ebaed28a4cab..8ab7582291abf2 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -151,6 +151,12 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SO_DEVMEM_LINEAR 78 +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 79 +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 80 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c index e318ea11c8583e..d21e5d441f5337 100644 --- a/arch/mips/jazz/setup.c +++ b/arch/mips/jazz/setup.c @@ -23,8 +23,6 @@ #include #include -extern asmlinkage void jazz_handle_int(void); - extern void jazz_machine_restart(char *command); static struct resource jazz_io_resources[] = { diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index c938ba208fc0f2..37676a44fefb1f 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -43,83 +43,6 @@ static int __init maxtcs(char *str) __setup("maxtcs=", maxtcs); -/* - * Dump new MIPS MT state for the core. Does not leave TCs halted. - * Takes an argument which taken to be a pre-call MVPControl value. - */ - -void mips_mt_regdump(unsigned long mvpctl) -{ - unsigned long flags; - unsigned long vpflags; - unsigned long mvpconf0; - int nvpe; - int ntc; - int i; - int tc; - unsigned long haltval; - unsigned long tcstatval; - - local_irq_save(flags); - vpflags = dvpe(); - printk("=== MIPS MT State Dump ===\n"); - printk("-- Global State --\n"); - printk(" MVPControl Passed: %08lx\n", mvpctl); - printk(" MVPControl Read: %08lx\n", vpflags); - printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - printk("-- per-VPE State --\n"); - for (i = 0; i < nvpe; i++) { - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { - printk(" VPE %d\n", i); - printk(" VPEControl : %08lx\n", - read_vpe_c0_vpecontrol()); - printk(" VPEConf0 : %08lx\n", - read_vpe_c0_vpeconf0()); - printk(" VPE%d.Status : %08lx\n", - i, read_vpe_c0_status()); - printk(" VPE%d.EPC : %08lx %pS\n", - i, read_vpe_c0_epc(), - (void *) read_vpe_c0_epc()); - printk(" VPE%d.Cause : %08lx\n", - i, read_vpe_c0_cause()); - printk(" VPE%d.Config7 : %08lx\n", - i, read_vpe_c0_config7()); - break; /* Next VPE */ - } - } - } - printk("-- per-TC State --\n"); - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if (read_tc_c0_tcbind() == read_c0_tcbind()) { - /* Are we dumping ourself? */ - haltval = 0; /* Then we're not halted, and mustn't be */ - tcstatval = flags; /* And pre-dump TCStatus is flags */ - printk(" TC %d (current TC with VPE EPC above)\n", tc); - } else { - haltval = read_tc_c0_tchalt(); - write_tc_c0_tchalt(1); - tcstatval = read_tc_c0_tcstatus(); - printk(" TC %d\n", tc); - } - printk(" TCStatus : %08lx\n", tcstatval); - printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); - printk(" TCRestart : %08lx %pS\n", - read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); - printk(" TCHalt : %08lx\n", haltval); - printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); - if (!haltval) - write_tc_c0_tchalt(0); - } - printk("===========================\n"); - evpe(vpflags); - local_irq_restore(flags); -} - static int mt_opt_rpsctl = -1; static int mt_opt_nblsu = -1; static int mt_opt_forceconfig7; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0362fc5df7b0ff..39e193cad2b9e4 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -439,7 +439,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } /* preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { if (mp_ops->prepare_boot_cpu) mp_ops->prepare_boot_cpu(); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index b5de770b092e3f..60b43ea85c125f 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -125,14 +125,14 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return 1; } -int kvm_arch_hardware_enable(void) +int kvm_arch_enable_virtualization_cpu(void) { - return kvm_mips_callbacks->hardware_enable(); + return kvm_mips_callbacks->enable_virtualization_cpu(); } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { - kvm_mips_callbacks->hardware_disable(); + kvm_mips_callbacks->disable_virtualization_cpu(); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 99d5a71e430007..ccab4d76b1265e 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2869,7 +2869,7 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) return ret + 1; } -static int kvm_vz_hardware_enable(void) +static int kvm_vz_enable_virtualization_cpu(void) { unsigned int mmu_size, guest_mmu_size, ftlb_size; u64 guest_cvmctl, cvmvmconfig; @@ -2983,7 +2983,7 @@ static int kvm_vz_hardware_enable(void) return 0; } -static void kvm_vz_hardware_disable(void) +static void kvm_vz_disable_virtualization_cpu(void) { u64 cvmvmconfig; unsigned int mmu_size; @@ -3280,8 +3280,8 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, .handle_guest_exit = kvm_trap_vz_handle_guest_exit, - .hardware_enable = kvm_vz_hardware_enable, - .hardware_disable = kvm_vz_hardware_disable, + .enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu, + .disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu, .check_extension = kvm_vz_check_extension, .vcpu_init = kvm_vz_vcpu_init, .vcpu_uninit = kvm_vz_vcpu_uninit, diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 68dafd6d3e2571..8388400d052fe8 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -29,8 +29,6 @@ unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; EXPORT_SYMBOL(__node_distances); -struct pglist_data *__node_data[MAX_NUMNODES]; -EXPORT_SYMBOL(__node_data); cpumask_t __node_cpumask[MAX_NUMNODES]; EXPORT_SYMBOL(__node_cpumask); @@ -83,12 +81,8 @@ static void __init init_topology_matrix(void) static void __init node_mem_init(unsigned int node) { - struct pglist_data *nd; unsigned long node_addrspace_offset; unsigned long start_pfn, end_pfn; - unsigned long nd_pa; - int tnid; - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); node_addrspace_offset = nid_to_addrbase(node); pr_info("Node%d's addrspace_offset is 0x%lx\n", @@ -98,16 +92,8 @@ static void __init node_mem_init(unsigned int node) pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", node, start_pfn, end_pfn); - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, node); - nd = __va(nd_pa); - memset(nd, 0, sizeof(struct pglist_data)); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != node) - pr_info("NODE_DATA(%d) on node %d\n", node, tnid); - __node_data[node] = nd; + alloc_node_data(node); + NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; @@ -198,13 +184,3 @@ void __init prom_init_numa_memory(void) pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain()); prom_meminit(); } - -pg_data_t * __init arch_alloc_nodedata(int nid) -{ - return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES); -} - -void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ - __node_data[nid] = pgdat; -} diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 7e11d7b5876107..5d2a1225785b81 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -98,7 +98,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, UP); @@ -110,7 +111,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, */ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, DOWN); diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c index 3bab51a5fb4c76..8bc566ea00e56b 100644 --- a/arch/mips/ralink/irq-gic.c +++ b/arch/mips/ralink/irq-gic.c @@ -10,6 +10,7 @@ #include #include #include +#include int get_c0_perfcount_int(void) { diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c index dcf2a44ac51eed..926082655a7803 100644 --- a/arch/mips/ralink/timer-gic.c +++ b/arch/mips/ralink/timer-gic.c @@ -11,6 +11,8 @@ #include #include +#include + #include "common.h" void __init plat_time_init(void) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index b8ca94cfb4fef3..1963313f55d8a1 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -35,7 +35,6 @@ #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) struct node_data *__node_data[MAX_NUMNODES]; - EXPORT_SYMBOL(__node_data); static u64 gen_region_mask(void) @@ -361,6 +360,7 @@ static void __init node_mem_init(nasid_t node) */ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); memset(__node_data[node], 0, PAGE_SIZE); + node_data[node] = &__node_data[node]->pglist; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; @@ -423,13 +423,3 @@ void __init mem_init(void) memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ } - -pg_data_t * __init arch_alloc_nodedata(int nid) -{ - return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES); -} - -void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ - __node_data[nid] = (struct node_data *)pgdat; -} diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index 5d2652a1d35a5e..62733e049570a8 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -70,11 +70,13 @@ void cpu_node_probe(void) gda_t *gdap = GDA; nodes_clear(node_online_map); + nodes_clear(node_possible_map); for (i = 0; i < MAX_NUMNODES; i++) { nasid_t nasid = gdap->g_nasidtable[i]; if (nasid == INVALID_NASID) break; node_set_online(nasid); + node_set(nasid, node_possible_map); highest = node_scan_cpus(nasid, highest); } diff --git a/arch/nios2/kernel/misaligned.c b/arch/nios2/kernel/misaligned.c index 23e0544e117cef..2f2862eab3c6d5 100644 --- a/arch/nios2/kernel/misaligned.c +++ b/arch/nios2/kernel/misaligned.c @@ -23,7 +23,7 @@ #include #include -#include +#include /* instructions we emulate */ #define INST_LDHU 0x0b diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 3459df28afeeeb..a2278485de19fb 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -82,6 +82,10 @@ void __init mmu_init(void) pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; +static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + .pages = kuser_page, +}; static int alloc_kuser_page(void) { @@ -106,18 +110,18 @@ arch_initcall(alloc_kuser_page); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - int ret; + struct vm_area_struct *vma; mmap_write_lock(mm); /* Map kuser helpers to user space address */ - ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, + vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | - VM_MAYEXEC, kuser_page); + VM_MAYEXEC, &vdso_mapping); mmap_write_unlock(mm); - return ret; + return IS_ERR(vma) ? PTR_ERR(vma) : 0; } const char *arch_vma_name(struct vm_area_struct *vma) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b0a2ac3ba91610..aa6a3cad275d91 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -10,6 +10,7 @@ config PARISC select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_CPU_CACHE_ALIASING select ARCH_HAS_DMA_ALLOC if PA11 + select ARCH_HAS_DMA_OPS select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX @@ -23,7 +24,6 @@ config PARISC select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select HAVE_RELIABLE_STACKTRACE - select DMA_OPS select RTC_CLASS select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE @@ -72,7 +72,7 @@ config PARISC select GENERIC_SCHED_CLOCK select GENERIC_IRQ_MIGRATION if SMP select HAVE_UNSTABLE_SCHED_CLOCK if SMP - select LEGACY_TIMER_TICK + select GENERIC_CLOCKEVENTS select CPU_NO_EFFICIENT_FFS select THREAD_INFO_IN_TASK select NEED_DMA_MAP_STATE diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index d389359e22ac6c..9c83bd06ef1542 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include "sizes.h" diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index 47c5a1991d1034..89b6beeda0b869 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -11,4 +11,18 @@ static inline bool arch_memory_deny_write_exec_supported(void) } #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported +static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +{ + /* + * The stack on parisc grows upwards, so if userspace requests memory + * for a stack, mark it with VM_GROWSUP so that the stack expansion in + * the fault handler will work. + */ + if (flags & MAP_STACK) + return VM_GROWSUP; + + return 0; +} +#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) + #endif /* __ASM_MMAN_H__ */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 982aca20f56f53..77fac02188e199 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -298,7 +298,7 @@ extern unsigned int toc_handler_csum; extern void do_cpu_irq_mask(struct pt_regs *); extern irqreturn_t timer_interrupt(int, void *); extern irqreturn_t ipi_interrupt(int, void *); -extern void start_cpu_itimer(void); +extern void parisc_clockevent_init(void); extern void handle_interruption(int, struct pt_regs *); /* called from assembly code: */ diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h deleted file mode 100644 index c0621295100d5c..00000000000000 --- a/arch/parisc/include/asm/unaligned.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_PARISC_UNALIGNED_H -#define _ASM_PARISC_UNALIGNED_H - -#include - -struct pt_regs; -void handle_unaligned(struct pt_regs *regs); -int check_unaligned(struct pt_regs *regs); - -#endif /* _ASM_PARISC_UNALIGNED_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index be264c2b1a1178..38fc0b188e0842 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -132,6 +132,12 @@ #define SO_PASSPIDFD 0x404A #define SO_PEERPIDFD 0x404B +#define SO_DEVMEM_LINEAR 78 +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 79 +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 80 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ab23e61a6f016a..ea57bcc21dc5fe 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1051,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) -#if 0 && defined(CONFIG_64BIT) - /* Revisit when we have 64-bit code above 4Gb */ +#if defined(CONFIG_64BIT) b,n intr_save2 skip_save_ior: @@ -1060,8 +1059,7 @@ skip_save_ior: * need to adjust iasq/iaoq here in the same way we adjusted isr/ior * above. */ - extrd,u,* %r8,PSW_W_BIT,1,%r1 - cmpib,COND(=),n 1,%r1,intr_save2 + bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 LDREG PT_IASQ0(%r29), %r16 LDREG PT_IAOQ0(%r29), %r17 /* adjust iasq/iaoq */ diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index b0f0816879df8f..5e8e37a722ef0f 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } static const struct file_operations perf_fops = { - .llseek = no_llseek, .read = perf_read, .write = perf_write, .unlocked_ioctl = perf_ioctl, diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 800eb64e91ad0a..b2d12ab728b171 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -297,7 +297,7 @@ smp_cpu_init(int cpunum) enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ - start_cpu_itimer(); + parisc_clockevent_init(); } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index f7722451276ef4..f852fe274abe46 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -167,7 +167,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, UP); @@ -175,7 +176,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, DOWN); diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 1f51aa9c8230cc..0fa81bf1466b15 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -243,10 +243,10 @@ linux_gateway_entry: #ifdef CONFIG_64BIT ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -379,10 +379,10 @@ tracesys_next: extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -1327,6 +1327,8 @@ ENTRY(sys_call_table) END(sys_call_table) #ifdef CONFIG_64BIT +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) .align 8 ENTRY(sys_call_table64) #include /* 64-bit syscalls */ diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9714fbd7c42d65..c17e2249115f54 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -1,166 +1,119 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/time.c + * Common time service routines for parisc machines. + * based on arch/loongarch/kernel/time.c * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King - * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills + * Copyright (C) 2024 Helge Deller */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include -#include -#include -#include +#include +#include +#include +#include +#include #include -#include +#include -#include -#include -#include -#include -#include -#include -#include +static u64 cr16_clock_freq; +static unsigned long clocktick; -#include +int time_keeper_id; /* CPU used for timekeeping */ -int time_keeper_id __read_mostly; /* CPU used for timekeeping. */ +static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device); -static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ +static void parisc_event_handler(struct clock_event_device *dev) +{ +} -/* - * We keep time on PA-RISC Linux by using the Interval Timer which is - * a pair of registers; one is read-only and one is write-only; both - * accessed through CR16. The read-only register is 32 or 64 bits wide, - * and increments by 1 every CPU clock tick. The architecture only - * guarantees us a rate between 0.5 and 2, but all implementations use a - * rate of 1. The write-only register is 32-bits wide. When the lowest - * 32 bits of the read-only register compare equal to the write-only - * register, it raises a maskable external interrupt. Each processor has - * an Interval Timer of its own and they are not synchronised. - * - * We want to generate an interrupt every 1/HZ seconds. So we program - * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data - * is programmed with the intended time of the next tick. We can be - * held off for an arbitrarily long period of time by interrupts being - * disabled, so we may miss one or more ticks. - */ -irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) +static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long now; - unsigned long next_tick; - unsigned long ticks_elapsed = 0; - unsigned int cpu = smp_processor_id(); - struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); - - /* gcc can optimize for "read-only" case with a local clocktick */ - unsigned long cpt = clocktick; - - /* Initialize next_tick to the old expected tick time. */ - next_tick = cpuinfo->it_value; - - /* Calculate how many ticks have elapsed. */ - now = mfctl(16); - do { - ++ticks_elapsed; - next_tick += cpt; - } while (next_tick - now > cpt); - - /* Store (in CR16 cycles) up to when we are accounting right now. */ - cpuinfo->it_value = next_tick; - - /* Go do system house keeping. */ - if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id)) - ticks_elapsed = 0; - legacy_timer_tick(ticks_elapsed); - - /* Skip clockticks on purpose if we know we would miss those. - * The new CR16 must be "later" than current CR16 otherwise - * itimer would not fire until CR16 wrapped - e.g 4 seconds - * later on a 1Ghz processor. We'll account for the missed - * ticks on the next timer interrupt. - * We want IT to fire modulo clocktick even if we miss/skip some. - * But those interrupts don't in fact get delivered that regularly. - * - * "next_tick - now" will always give the difference regardless - * if one or the other wrapped. If "now" is "bigger" we'll end up - * with a very large unsigned number. - */ - now = mfctl(16); - while (next_tick - now > cpt) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. - * Only bottom 32-bits of next_tick are writable in CR16! - * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 8000 cycles) to the - * next cycle, simply skip it. - */ - if (next_tick - now <= 8000) - next_tick += cpt; - mtctl(next_tick, 16); + unsigned long new_cr16; - return IRQ_HANDLED; -} + new_cr16 = mfctl(16) + delta; + mtctl(new_cr16, 16); + return 0; +} -unsigned long profile_pc(struct pt_regs *regs) +irqreturn_t timer_interrupt(int irq, void *data) { - unsigned long pc = instruction_pointer(regs); + struct clock_event_device *cd; + int cpu = smp_processor_id(); - if (regs->gr[0] & PSW_N) - pc -= 4; + cd = &per_cpu(parisc_clockevent_device, cpu); -#ifdef CONFIG_SMP - if (in_lock_functions(pc)) - pc = regs->gr[2]; -#endif + if (clockevent_state_periodic(cd)) + parisc_timer_next_event(clocktick, cd); - return pc; + if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd)) + cd->event_handler(cd); + + return IRQ_HANDLED; } -EXPORT_SYMBOL(profile_pc); +static int parisc_set_state_oneshot(struct clock_event_device *evt) +{ + parisc_timer_next_event(clocktick, evt); -/* clock source code */ + return 0; +} -static u64 notrace read_cr16(struct clocksource *cs) +static int parisc_set_state_periodic(struct clock_event_device *evt) { - return get_cycles(); + parisc_timer_next_event(clocktick, evt); + + return 0; } -static struct clocksource clocksource_cr16 = { - .name = "cr16", - .rating = 300, - .read = read_cr16, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; +static int parisc_set_state_shutdown(struct clock_event_device *evt) +{ + return 0; +} -void start_cpu_itimer(void) +void parisc_clockevent_init(void) { unsigned int cpu = smp_processor_id(); - unsigned long next_tick = mfctl(16) + clocktick; + unsigned long min_delta = 0x600; /* XXX */ + unsigned long max_delta = (1UL << (BITS_PER_LONG - 1)); + struct clock_event_device *cd; + + cd = &per_cpu(parisc_clockevent_device, cpu); + + cd->name = "cr16_clockevent"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_PERCPU; + + cd->irq = TIMER_IRQ; + cd->rating = 320; + cd->cpumask = cpumask_of(cpu); + cd->set_state_oneshot = parisc_set_state_oneshot; + cd->set_state_oneshot_stopped = parisc_set_state_shutdown; + cd->set_state_periodic = parisc_set_state_periodic; + cd->set_state_shutdown = parisc_set_state_shutdown; + cd->set_next_event = parisc_timer_next_event; + cd->event_handler = parisc_event_handler; + + clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta); +} + +unsigned long notrace profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); - mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ + if (regs->gr[0] & PSW_N) + pc -= 4; + +#ifdef CONFIG_SMP + if (in_lock_functions(pc)) + pc = regs->gr[2]; +#endif - per_cpu(cpu_data, cpu).it_value = next_tick; + return pc; } +EXPORT_SYMBOL(profile_pc); #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) @@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts) } } - static u64 notrace read_cr16_sched_clock(void) { return get_cycles(); } +static u64 notrace read_cr16(struct clocksource *cs) +{ + return get_cycles(); +} + +static struct clocksource clocksource_cr16 = { + .name = "cr16", + .rating = 300, + .read = read_cr16, + .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_VALID_FOR_HRES | + CLOCK_SOURCE_MUST_VERIFY | + CLOCK_SOURCE_VERIFY_PERCPU, +}; + /* * timer interrupt and sched_clock() initialization @@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void) void __init time_init(void) { - unsigned long cr16_hz; - - clocktick = (100 * PAGE0->mem_10msec) / HZ; - start_cpu_itimer(); /* get CPU 0 started */ - - cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ + cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */ + clocktick = cr16_clock_freq / HZ; /* register as sched_clock source */ - sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); -} + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq); -static int __init init_cr16_clocksource(void) -{ - /* - * The cr16 interval timers are not synchronized across CPUs. - */ - if (num_online_cpus() > 1 && !running_on_qemu) { - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - } + parisc_clockevent_init(); /* register at clocksource framework */ - clocksource_register_hz(&clocksource_cr16, - 100 * PAGE0->mem_10msec); - - return 0; + clocksource_register_hz(&clocksource_cr16, cr16_clock_freq); } - -device_initcall(init_cr16_clocksource); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 1107ca819ac8cd..b9b3d527bc9065 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include @@ -47,6 +47,8 @@ #include #include +#include "unaligned.h" + #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) #include #endif @@ -504,7 +506,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (((unsigned long)regs->iaoq[0] & 3) && ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { /* Kill the user process later */ - regs->iaoq[0] = 0 | 3; + regs->iaoq[0] = 0 | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; regs->iasq[0] = regs->iasq[1] = regs->sr[7]; regs->gr[0] &= ~PSW_B; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 3e79e40e361d52..f4626943633adc 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -12,9 +12,10 @@ #include #include #include -#include +#include #include #include +#include "unaligned.h" /* #define DEBUG_UNALIGNED 1 */ diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h new file mode 100644 index 00000000000000..c1aa4b12e284f9 --- /dev/null +++ b/arch/parisc/kernel/unaligned.h @@ -0,0 +1,3 @@ +struct pt_regs; +void handle_unaligned(struct pt_regs *regs); +int check_unaligned(struct pt_regs *regs); diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 0356199bd9e7e7..aa664f7ddb6398 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -40,7 +40,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); /* we need to make sure the colouring is OK */ - return arch_get_unmapped_area(file, addr, len, pgoff, flags); + return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); } diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d7b09b064a8ac5..8094a01974cca1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -133,6 +133,7 @@ config PPC select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES + select ARCH_HAS_DMA_OPS if PPC64 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV @@ -185,7 +186,6 @@ config PPC select CPUMASK_OFFSTACK if NR_CPUS >= 8192 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DMA_OPS_BYPASS if PPC64 - select DMA_OPS if PPC64 select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT @@ -269,6 +269,7 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ @@ -311,6 +312,7 @@ config PPC select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT + select VDSO_GETRANDOM # # Please keep this list sorted alphabetically. # @@ -853,8 +855,8 @@ config DATA_SHIFT_BOOL bool "Set custom data alignment" depends on ADVANCED_OPTIONS depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE - depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \ - PPC_85xx + depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \ + PPC_BOOK3S_32 || PPC_85xx help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -870,9 +872,9 @@ config DATA_SHIFT range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 - default 23 if STRICT_KERNEL_RWX && PPC_8xx - default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA - default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx + default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \ + (PIN_TLB_DATA || PIN_TLB_TEXT) + default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx default 24 if STRICT_KERNEL_RWX && PPC_85xx default PAGE_SHIFT help @@ -1026,6 +1028,10 @@ config PPC_MEM_KEYS If unsure, say y. +config ARCH_PKEY_BITS + int + default 5 + config PPC_SECURE_BOOT prompt "Enable secure boot support" bool @@ -1269,8 +1275,27 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx - default "0xb0000000" if PPC_BOOK3S_32 + default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM default "0xc0000000" + +config MODULES_SIZE_BOOL + bool "Set custom size for modules/execmem area" + depends on EXECMEM && ADVANCED_OPTIONS + help + This option allows you to set the size of kernel virtual address + space dedicated for modules/execmem. + For the time being it is only for 8xx and book3s/32. Other + platform share it with vmalloc space. + + Say N here unless you know what you are doing. + +config MODULES_SIZE + int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL + range 1 256 if EXECMEM + default 64 if EXECMEM && PPC_BOOK3S_32 + default 32 if EXECMEM && PPC_8xx + default 0 + endmenu if PPC64 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 3799ceceb04a38..0bbec4afc0d597 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -379,12 +379,6 @@ config FAIL_IOMMU If you are unsure, say N. -config PPC_FAST_ENDIAN_SWITCH - bool "Deprecated fast endian-switch syscall" - depends on DEBUG_KERNEL && PPC_BOOK3S_64 - help - If you're unsure what this is, say N. - config KASAN_SHADOW_OFFSET hex depends on KASAN diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h index ebfadd39e1924a..9506a96ebbcc2a 100644 --- a/arch/powerpc/boot/xz_config.h +++ b/arch/powerpc/boot/xz_config.h @@ -50,11 +50,8 @@ static inline void put_unaligned_be32(u32 val, void *p) /* prevent the inclusion of the xz-preboot MM headers */ #define DECOMPR_MM_H #define memmove memmove -#define XZ_EXTERN static /* xz.h needs to be included directly since we need enum xz_mode */ #include "../../../include/linux/xz.h" -#undef XZ_EXTERN - #endif diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 544a65fda77bcb..a5e3e7f97f4d72 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y @@ -93,6 +92,7 @@ CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_MEM_SOFT_DIRTY=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_ZONE_DEVICE=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 09ebcbdfb34f6b..46a4c85e85e245 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -107,6 +107,7 @@ config CRYPTO_AES_PPC_SPE config CRYPTO_AES_GCM_P10 tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" + depends on BROKEN depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_LIB_AES select CRYPTO_ALGAPI diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index f62ee54076c06d..f66ad56e765f04 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -5,7 +5,7 @@ * Copyright 2022- IBM Inc. All rights reserved */ -#include +#include #include #include #include diff --git a/arch/powerpc/crypto/curve25519-ppc64le-core.c b/arch/powerpc/crypto/curve25519-ppc64le-core.c index 4e3e44ea4484d3..f7810be0b292b7 100644 --- a/arch/powerpc/crypto/curve25519-ppc64le-core.c +++ b/arch/powerpc/crypto/curve25519-ppc64le-core.c @@ -295,5 +295,6 @@ module_exit(curve25519_mod_exit); MODULE_ALIAS_CRYPTO("curve25519"); MODULE_ALIAS_CRYPTO("curve25519-ppc64le"); +MODULE_DESCRIPTION("PPC64le Curve25519 scalar multiplication with 51 bits limbs"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Danny Tsen "); diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c index 95dd708573ee4d..369686e9370b44 100644 --- a/arch/powerpc/crypto/poly1305-p10-glue.c +++ b/arch/powerpc/crypto/poly1305-p10-glue.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 2bc53c646ccd7d..f48e644900a26e 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -26,19 +26,23 @@ #define PPC_MIN_STKFRM 112 #ifdef __BIG_ENDIAN__ -#define LHZX_BE stringify_in_c(lhzx) #define LWZX_BE stringify_in_c(lwzx) #define LDX_BE stringify_in_c(ldx) #define STWX_BE stringify_in_c(stwx) #define STDX_BE stringify_in_c(stdx) #else -#define LHZX_BE stringify_in_c(lhbrx) #define LWZX_BE stringify_in_c(lwbrx) #define LDX_BE stringify_in_c(ldbrx) #define STWX_BE stringify_in_c(stwbrx) #define STDX_BE stringify_in_c(stdbrx) #endif +#ifdef CONFIG_CC_IS_CLANG +#define DS_FORM_CONSTRAINT "Z<>" +#else +#define DS_FORM_CONSTRAINT "YZ<>" +#endif + #else /* 32-bit */ /* operations for longs and pointers */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 5bf6a4d49268c7..d1ea554c33ed7e 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,6 +11,7 @@ #include #include #include +#include /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); else - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); return t; } @@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); else - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 52971ee30717fe..42c3af90d1f0ff 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va); #endif #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) -#define MODULES_VADDR (MODULES_END - SZ_256M) +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index c654c376ef8b86..c3efacab4b9412 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -74,6 +74,26 @@ #define remap_4k_pfn(vma, addr, pfn, prot) \ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) +/* + * With 4K page size the real_pte machinery is all nops. + */ +#define __real_pte(e, p, o) ((real_pte_t){(e)}) +#define __rpte_to_pte(r) ((r).pte) +#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K + /* * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just * a matter of returning the PTE bits that need to be modified. On 64K PTE, diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 519b1743a0f4df..6d98e6f08d4de4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud) #ifndef __ASSEMBLY__ -/* - * This is the default implementation of various PTE accessors, it's - * used in all cases except Book3S with 64K pages where we have a - * concept of sub-pages - */ -#ifndef __real_pte - -#define __real_pte(e, p, o) ((real_pte_t){(e)}) -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * We expect this to be called only for user addresses or kernel virtual - * addresses other than the linear mapping. - */ -#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K - -#endif /* __real_pte */ - static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, unsigned long set, int huge) @@ -1124,6 +1098,7 @@ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); +extern pud_t pud_modify(pud_t pud, pgprot_t newprot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); extern void set_pud_at(struct mm_struct *mm, unsigned long addr, @@ -1384,6 +1359,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, #define __HAVE_ARCH_PMDP_INVALIDATE extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp); #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 0e29ccf903d093..e7f14720f63072 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -76,6 +76,43 @@ int patch_instruction(u32 *addr, ppc_inst_t instr); int raw_patch_instruction(u32 *addr, ppc_inst_t instr); int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr); +/* + * The data patching functions patch_uint() and patch_ulong(), etc., must be + * called on aligned addresses. + * + * The instruction patching functions patch_instruction() and similar must be + * called on addresses satisfying instruction alignment requirements. + */ + +#ifdef CONFIG_PPC64 + +int patch_uint(void *addr, unsigned int val); +int patch_ulong(void *addr, unsigned long val); + +#define patch_u64 patch_ulong + +#else + +static inline int patch_uint(void *addr, unsigned int val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int))) + return -EINVAL; + + return patch_instruction(addr, ppc_inst(val)); +} + +static inline int patch_ulong(void *addr, unsigned long val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long))) + return -EINVAL; + + return patch_instruction(addr, ppc_inst(val)); +} + +#endif + +#define patch_u32 patch_uint + static inline unsigned long patch_site_addr(s32 *site) { return (unsigned long)site + *site; diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 91a9fd53254fa9..5e34611de9ef40 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -308,6 +308,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed); int eeh_pe_configure(struct eeh_pe *pe); int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask); +int eeh_pe_inject_mmio_error(struct pci_dev *pdev); /** * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 2d6c886b40f447..23638d4e73ac03 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -177,7 +177,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) if (user_mode(regs)) { kuap_lock(); - CT_WARN_ON(ct_state() != CONTEXT_USER); + CT_WARN_ON(ct_state() != CT_STATE_USER); user_exit_irqoff(); account_cpu_user_entry(); @@ -189,8 +189,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * so avoid recursion. */ if (TRAP(regs) != INTERRUPT_PROGRAM) - CT_WARN_ON(ct_state() != CONTEXT_KERNEL && - ct_state() != CONTEXT_IDLE); + CT_WARN_ON(ct_state() != CT_STATE_KERNEL && + ct_state() != CT_STATE_IDLE); INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && search_kernel_restart_table(regs->nip)); diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 17a77d47ed6d64..42a51a993d942d 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -6,7 +6,7 @@ #include -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) && !defined(BUILD_VDSO) #include #include diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 37bffa0f79183b..a157ab513347dd 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -116,9 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) } #endif -extern int use_cop(unsigned long acop, struct mm_struct *mm); -extern void drop_cop(unsigned long acop, struct mm_struct *mm); - #ifdef CONFIG_PPC_BOOK3S_64 static inline void inc_mm_active_cpus(struct mm_struct *mm) { @@ -260,15 +257,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, extern void arch_exit_mmap(struct mm_struct *mm); -static inline void arch_unmap(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - unsigned long vdso_base = (unsigned long)mm->context.vdso; - - if (start <= vdso_base && vdso_base < end) - mm->context.vdso = NULL; -} - #ifdef CONFIG_PPC_MEM_KEYS bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign); diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index da827d2d08666e..d99863cd6cde48 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -20,12 +20,6 @@ #ifdef CONFIG_NUMA -extern struct pglist_data *node_data[]; -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (node_data[nid]) - /* * Following are specific to this numa platform. */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index a756a1e59c54d0..2986f9ba40b88b 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -170,8 +170,9 @@ #define mmu_linear_psize MMU_PAGE_8M -#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) #define MODULES_END PAGE_OFFSET +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index d06efac6d7aa66..bb5f3e8ea912df 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -19,8 +19,14 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); + +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) + memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, + (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +#endif + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index 634970ce13c6b9..ecf5ac70cfae64 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -23,7 +23,7 @@ DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged); (static_key_enabled(&__percpu_first_chunk_is_paged.key)) #else #define percpu_first_chunk_is_paged false -#endif /* CONFIG_PPC64 && CONFIG_SMP */ +#endif #include diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 264a6c09517aa0..2f72ad885332e8 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -65,6 +65,7 @@ static inline unsigned long pte_pfn(pte_t pte) /* * Select all bits except the pfn */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pte_flags; diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 065ffd1b2f8ada..04406162fc5ac3 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -397,6 +397,7 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) #define PSERIES_HP_ELOG_RESOURCE_SLOT 3 #define PSERIES_HP_ELOG_RESOURCE_PHB 4 #define PSERIES_HP_ELOG_RESOURCE_PMEM 6 +#define PSERIES_HP_ELOG_RESOURCE_DT 7 #define PSERIES_HP_ELOG_ACTION_ADD 1 #define PSERIES_HP_ELOG_ACTION_REMOVE 2 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 15c5691dd21844..6ebca2996f18f8 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -226,6 +226,10 @@ static inline int arch_within_stack_frames(const void * const stack, return BAD_STACK; } +#ifdef CONFIG_PPC32 +extern void *emergency_ctx[]; +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index fd594bf6c6a9c5..4f5a46a77fa2b6 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -6,6 +6,7 @@ #include #include #include +#include #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ @@ -92,12 +93,6 @@ __pu_failed: \ : label) #endif -#ifdef CONFIG_CC_IS_CLANG -#define DS_FORM_CONSTRAINT "Z<>" -#else -#define DS_FORM_CONSTRAINT "YZ<>" -#endif - #ifdef __powerpc64__ #ifdef CONFIG_PPC_KERNEL_PREFIXED #define __put_user_asm2_goto(x, ptr, label) \ diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h new file mode 100644 index 00000000000000..501d6bb14e8a72 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/getrandom.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Christophe Leroy , CS GROUP France + */ +#ifndef _ASM_POWERPC_VDSO_GETRANDOM_H +#define _ASM_POWERPC_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +static __always_inline int do_syscall_3(const unsigned long _r0, const unsigned long _r3, + const unsigned long _r4, const unsigned long _r5) +{ + register long r0 asm("r0") = _r0; + register unsigned long r3 asm("r3") = _r3; + register unsigned long r4 asm("r4") = _r4; + register unsigned long r5 asm("r5") = _r5; + register int ret asm ("r3"); + + asm volatile( + " sc\n" + " bns+ 1f\n" + " neg %0, %0\n" + "1:\n" + : "=r" (ret), "+r" (r4), "+r" (r5), "+r" (r0) + : "r" (r3) + : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr"); + + return ret; +} + +/** + * getrandom_syscall - Invoke the getrandom() syscall. + * @buffer: Destination buffer to fill with random bytes. + * @len: Size of @buffer in bytes. + * @flags: Zero or more GRND_* flags. + * Returns: The number of bytes written to @buffer, or a negative value indicating an error. + */ +static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags) +{ + return do_syscall_3(__NR_getrandom, (unsigned long)buffer, + (unsigned long)len, (unsigned long)flags); +} + +static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void) +{ + return NULL; +} + +ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, + size_t opaque_len, const struct vdso_rng_data *vd); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_VDSO_GETRANDOM_H */ diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h index 48cf23f1e27366..92f480d8cc6dcb 100644 --- a/arch/powerpc/include/asm/vdso/vsyscall.h +++ b/arch/powerpc/include/asm/vdso/vsyscall.h @@ -17,6 +17,12 @@ struct vdso_data *__arch_get_k_vdso_data(void) } #define __arch_get_k_vdso_data __arch_get_k_vdso_data +static __always_inline +struct vdso_rng_data *__arch_get_k_vdso_rng_data(void) +{ + return &vdso_data->rng_data; +} + /* The asm-generic header needs to be included after the definitions above */ #include diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index a585c8e538ff0f..248dee138f7bf5 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -83,6 +83,7 @@ struct vdso_arch_data { __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ struct vdso_data data[CS_BASES]; + struct vdso_rng_data rng_data; }; #else /* CONFIG_PPC64 */ @@ -95,6 +96,7 @@ struct vdso_arch_data { __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ struct vdso_data data[CS_BASES]; + struct vdso_rng_data rng_data; }; #endif /* CONFIG_PPC64 */ @@ -111,6 +113,21 @@ extern struct vdso_arch_data *vdso_data; addi \ptr, \ptr, (_vdso_datapage - 999b)@l .endm +#include +#include + +.macro get_realdatapage ptr scratch + get_datapage \ptr +#ifdef CONFIG_TIME_NS + lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr) + xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h + xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l + cntlzw \scratch, \scratch + rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT + add \ptr, \ptr, \scratch +#endif +.endm + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1784b6a6ca1dd6..f43c1198768c64 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -139,6 +139,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 23733282de4d9f..131a8cc10dbe8d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -335,6 +335,7 @@ int main(void) /* datapage offsets for use by vdso */ OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data); + OFFSET(VDSO_RNG_DATA_OFFSET, vdso_arch_data, rng_data); OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec); #ifdef CONFIG_PPC64 OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size); @@ -346,6 +347,8 @@ int main(void) #else OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map); #endif + OFFSET(VDSO_CLOCKMODE_OFFSET, vdso_arch_data, data[0].clock_mode); + DEFINE(VDSO_CLOCKMODE_TIMENS, VDSO_CLOCKMODE_TIMENS); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index f502337dd37d64..0fcc463b02e256 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -735,7 +735,7 @@ static const struct sysfs_ops cache_index_ops = { .show = cache_index_show, }; -static struct kobj_type cache_index_type = { +static const struct kobj_type cache_index_type = { .release = cache_index_release, .sysfs_ops = &cache_index_ops, .default_groups = cache_index_default_groups, diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index d03f17987fca7f..83fe99861eb178 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1537,10 +1537,6 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, if (!eeh_ops || !eeh_ops->err_inject) return -ENOENT; - /* Check on PCI error type */ - if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) - return -EINVAL; - /* Check on PCI error function */ if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX) return -EINVAL; @@ -1578,6 +1574,104 @@ static int proc_eeh_show(struct seq_file *m, void *v) } #endif /* CONFIG_PROC_FS */ +static int eeh_break_device(struct pci_dev *pdev) +{ + struct resource *bar = NULL; + void __iomem *mapped; + u16 old, bit; + int i, pos; + + /* Do we have an MMIO BAR to disable? */ + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + struct resource *r = &pdev->resource[i]; + + if (!r->flags || !r->start) + continue; + if (r->flags & IORESOURCE_IO) + continue; + if (r->flags & IORESOURCE_UNSET) + continue; + + bar = r; + break; + } + + if (!bar) { + pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); + return -ENXIO; + } + + pci_err(pdev, "Going to break: %pR\n", bar); + + if (pdev->is_virtfn) { +#ifndef CONFIG_PCI_IOV + return -ENXIO; +#else + /* + * VFs don't have a per-function COMMAND register, so the best + * we can do is clear the Memory Space Enable bit in the PF's + * SRIOV control reg. + * + * Unfortunately, this requires that we have a PF (i.e doesn't + * work for a passed-through VF) and it has the potential side + * effect of also causing an EEH on every other VF under the + * PF. Oh well. + */ + pdev = pdev->physfn; + if (!pdev) + return -ENXIO; /* passed through VFs have no PF */ + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pos += PCI_SRIOV_CTRL; + bit = PCI_SRIOV_CTRL_MSE; +#endif /* !CONFIG_PCI_IOV */ + } else { + bit = PCI_COMMAND_MEMORY; + pos = PCI_COMMAND; + } + + /* + * Process here is: + * + * 1. Disable Memory space. + * + * 2. Perform an MMIO to the device. This should result in an error + * (CA / UR) being raised by the device which results in an EEH + * PE freeze. Using the in_8() accessor skips the eeh detection hook + * so the freeze hook so the EEH Detection machinery won't be + * triggered here. This is to match the usual behaviour of EEH + * where the HW will asynchronously freeze a PE and it's up to + * the kernel to notice and deal with it. + * + * 3. Turn Memory space back on. This is more important for VFs + * since recovery will probably fail if we don't. For normal + * the COMMAND register is reset as a part of re-initialising + * the device. + * + * Breaking stuff is the point so who cares if it's racy ;) + */ + pci_read_config_word(pdev, pos, &old); + + mapped = ioremap(bar->start, PAGE_SIZE); + if (!mapped) { + pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); + return -ENXIO; + } + + pci_write_config_word(pdev, pos, old & ~bit); + in_8(mapped); + pci_write_config_word(pdev, pos, old); + + iounmap(mapped); + + return 0; +} + +int eeh_pe_inject_mmio_error(struct pci_dev *pdev) +{ + return eeh_break_device(pdev); +} + #ifdef CONFIG_DEBUG_FS @@ -1682,7 +1776,6 @@ static ssize_t eeh_force_recover_write(struct file *filp, static const struct file_operations eeh_force_recover_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_force_recover_write, }; @@ -1726,104 +1819,10 @@ static ssize_t eeh_dev_check_write(struct file *filp, static const struct file_operations eeh_dev_check_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_check_write, .read = eeh_debugfs_dev_usage, }; -static int eeh_debugfs_break_device(struct pci_dev *pdev) -{ - struct resource *bar = NULL; - void __iomem *mapped; - u16 old, bit; - int i, pos; - - /* Do we have an MMIO BAR to disable? */ - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { - struct resource *r = &pdev->resource[i]; - - if (!r->flags || !r->start) - continue; - if (r->flags & IORESOURCE_IO) - continue; - if (r->flags & IORESOURCE_UNSET) - continue; - - bar = r; - break; - } - - if (!bar) { - pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); - return -ENXIO; - } - - pci_err(pdev, "Going to break: %pR\n", bar); - - if (pdev->is_virtfn) { -#ifndef CONFIG_PCI_IOV - return -ENXIO; -#else - /* - * VFs don't have a per-function COMMAND register, so the best - * we can do is clear the Memory Space Enable bit in the PF's - * SRIOV control reg. - * - * Unfortunately, this requires that we have a PF (i.e doesn't - * work for a passed-through VF) and it has the potential side - * effect of also causing an EEH on every other VF under the - * PF. Oh well. - */ - pdev = pdev->physfn; - if (!pdev) - return -ENXIO; /* passed through VFs have no PF */ - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - pos += PCI_SRIOV_CTRL; - bit = PCI_SRIOV_CTRL_MSE; -#endif /* !CONFIG_PCI_IOV */ - } else { - bit = PCI_COMMAND_MEMORY; - pos = PCI_COMMAND; - } - - /* - * Process here is: - * - * 1. Disable Memory space. - * - * 2. Perform an MMIO to the device. This should result in an error - * (CA / UR) being raised by the device which results in an EEH - * PE freeze. Using the in_8() accessor skips the eeh detection hook - * so the freeze hook so the EEH Detection machinery won't be - * triggered here. This is to match the usual behaviour of EEH - * where the HW will asynchronously freeze a PE and it's up to - * the kernel to notice and deal with it. - * - * 3. Turn Memory space back on. This is more important for VFs - * since recovery will probably fail if we don't. For normal - * the COMMAND register is reset as a part of re-initialising - * the device. - * - * Breaking stuff is the point so who cares if it's racy ;) - */ - pci_read_config_word(pdev, pos, &old); - - mapped = ioremap(bar->start, PAGE_SIZE); - if (!mapped) { - pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); - return -ENXIO; - } - - pci_write_config_word(pdev, pos, old & ~bit); - in_8(mapped); - pci_write_config_word(pdev, pos, old); - - iounmap(mapped); - - return 0; -} - static ssize_t eeh_dev_break_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) @@ -1835,7 +1834,7 @@ static ssize_t eeh_dev_break_write(struct file *filp, if (IS_ERR(pdev)) return PTR_ERR(pdev); - ret = eeh_debugfs_break_device(pdev); + ret = eeh_break_device(pdev); pci_dev_put(pdev); if (ret < 0) @@ -1846,7 +1845,6 @@ static ssize_t eeh_dev_break_write(struct file *filp, static const struct file_operations eeh_dev_break_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_break_write, .read = eeh_debugfs_dev_usage, }; @@ -1893,7 +1891,6 @@ static ssize_t eeh_dev_can_recover(struct file *filp, static const struct file_operations eeh_dev_can_recover_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_can_recover, .read = eeh_debugfs_dev_usage, }; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eaf2f167c34285..195b075d116cf2 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1989,13 +1989,6 @@ INT_DEFINE_END(system_call) INTERRUPT_TO_KERNEL #endif -#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH -BEGIN_FTR_SECTION - cmpdi r0,0x1ebe - beq- 1f -END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) -#endif - /* We reach here with PACA in r13, r13 in r9. */ mfspr r11,SPRN_SRR0 mfspr r12,SPRN_SRR1 @@ -2015,16 +2008,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) b system_call_common #endif .endif - -#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH - /* Fast LE/BE switch system call */ -1: mfspr r12,SPRN_SRR1 - xori r12,r12,MSR_LE - mtspr SPRN_SRR1,r12 - mr r13,r9 - RFI_TO_USER /* return to userspace */ - b . /* prevent speculative execution */ -#endif .endm EXC_REAL_BEGIN(system_call, 0xc00, 0x100) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index ac74321b119287..811a7130505cd4 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -40,16 +40,6 @@ #include "head_32.h" -.macro compare_to_kernel_boundary scratch, addr -#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 -/* By simply checking Address >= 0x80000000, we know if its a kernel address */ - not. \scratch, \addr -#else - rlwinm \scratch, \addr, 16, 0xfff8 - cmpli cr0, \scratch, PAGE_OFFSET@h -#endif -.endm - #define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_8M 23 @@ -199,18 +189,7 @@ instruction_counter: mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) mtspr SPRN_MD_EPN, r10 -#ifdef CONFIG_EXECMEM - mfcr r11 - compare_to_kernel_boundary r10, r10 -#endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ -#ifdef CONFIG_EXECMEM - blt+ 3f - rlwinm r10, r10, 0, 20, 31 - oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha -3: - mtcr r11 -#endif lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 mfspr r10, SPRN_MD_TWC @@ -248,19 +227,12 @@ instruction_counter: START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 - mfcr r11 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_MD_EPN - compare_to_kernel_boundary r10, r10 mfspr r10, SPRN_M_TWB /* Get level 1 table */ - blt+ 3f - rlwinm r10, r10, 0, 20, 31 - oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha -3: - mtcr r11 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 @@ -332,15 +304,19 @@ instruction_counter: cmpwi cr1, r11, RPN_PATTERN beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */ DARFixed:/* Return from dcbx instruction bug workaround */ + mfspr r11, SPRN_DSISR + rlwinm r11, r11, 0, DSISR_NOHPTE + cmpwi cr1, r11, 0 + beq+ cr1, .Ldtlbie + mfspr r11, SPRN_DAR + tlbie r11 + rlwinm r11, r11, 16, 0xffff + cmplwi cr1, r11, TASK_SIZE@h + bge- cr1, FixupPGD +.Ldtlbie: EXCEPTION_PROLOG_1 /* 0x300 is DataAccess exception, needed by bad_page_fault() */ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1 - lwz r4, _DAR(r11) - lwz r5, _DSISR(r11) - andis. r10,r5,DSISR_NOHPTE@h - beq+ .Ldtlbie - tlbie r4 -.Ldtlbie: prepare_transfer_to_handler bl do_page_fault b interrupt_return @@ -394,6 +370,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */ __HEAD . = 0x2000 +FixupPGD: + mtspr SPRN_M_TW, r10 + mfspr r10, SPRN_DAR + mtspr SPRN_MD_EPN, r10 + mfspr r11, SPRN_M_TWB /* Get level 1 table */ + lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + cmpwi cr1, r10, 0 + bne cr1, 1f + + rlwinm r10, r11, 0, 20, 31 + oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha + lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r10) /* Get the level 1 entry */ + cmpwi cr1, r10, 0 + beq cr1, 1f + stw r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Set the level 1 entry */ + mfspr r10, SPRN_M_TW + mtcr r10 + mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi +1: + mfspr r10, SPRN_M_TW + b .Ldtlbie + /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. @@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r10, SPRN_SRR0 mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 - cmpli cr1, r11, PAGE_OFFSET@h + cmpli cr1, r11, TASK_SIZE@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f @@ -587,6 +587,10 @@ start_here: lis r0, (MD_TWAM | MD_RSV4I)@h mtspr SPRN_MD_CTR, r0 #endif +#ifndef CONFIG_PIN_TLB_TEXT + li r0, 0 + mtspr SPRN_MI_CTR, r0 +#endif #if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR) lis r0, MD_TWAM@h mtspr SPRN_MD_CTR, r0 @@ -683,6 +687,7 @@ SYM_FUNC_START_LOCAL(initial_mmu) blr SYM_FUNC_END(initial_mmu) +#ifdef CONFIG_PIN_TLB _GLOBAL(mmu_pin_tlb) lis r9, (1f - PAGE_OFFSET)@h ori r9, r9, (1f - PAGE_OFFSET)@l @@ -704,6 +709,7 @@ _GLOBAL(mmu_pin_tlb) mtspr SPRN_MD_CTR, r6 tlbia +#ifdef CONFIG_PIN_TLB_TEXT LOAD_REG_IMMEDIATE(r5, 28 << 8) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) @@ -724,6 +730,7 @@ _GLOBAL(mmu_pin_tlb) bdnzt lt, 2b lis r0, MI_RSV4I@h mtspr SPRN_MI_CTR, r0 +#endif LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) #ifdef CONFIG_PIN_TLB_DATA @@ -783,3 +790,4 @@ _GLOBAL(mmu_pin_tlb) mtspr SPRN_SRR1, r10 mtspr SPRN_SRR0, r11 rfi +#endif diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 57196883a00e36..cb2bca76be5350 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -411,39 +411,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) */ . = INTERRUPT_INST_TLB_MISS_603 InstructionTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_IMISS -#ifdef CONFIG_EXECMEM - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 -#endif + mfspr r0,SPRN_IMISS mfspr r2, SPRN_SDR1 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC rlwinm r2, r2, 28, 0xfffff000 + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ #ifdef CONFIG_EXECMEM - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + rlwinm r3, r0, 4, 0xf + subi r3, r3, (TASK_SIZE >> 28) & 0xf #endif -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- InstructionAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ andc. r1,r1,r2 /* check access & ~permission */ bne- InstructionAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ #ifdef CONFIG_EXECMEM - rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */ + rlwimi r2, r3, 1, 31, 31 /* userspace ? -> PP lsb */ #endif ori r1, r1, 0xe06 /* clear out reserved bits */ andc r1, r2, r1 /* PP = user? 1 : 0 */ @@ -451,7 +438,7 @@ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 - tlbli r3 + tlbli r0 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r3 rfi @@ -480,35 +467,24 @@ InstructionAddressInvalid: */ . = INTERRUPT_DATA_LOAD_TLB_MISS_603 DataLoadTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 + mfspr r0,SPRN_DMISS mfspr r2, SPRN_SDR1 - li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ - rlwinm r2, r2, 28, 0xfffff000 - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ + rlwinm r1, r2, 28, 0xfffff000 + rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r1) /* get pmd entry */ + rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + subi r3, r3, (TASK_SIZE >> 28) & 0xf + beq- 2f /* bail if no mapping */ +1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ + li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ andc. r1,r1,r2 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */ - rlwimi r2,r0,0,30,31 /* userspace ? -> PP */ + rlwimi r2,r3,2,30,31 /* userspace ? -> PP */ rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */ xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */ ori r1,r1,0xe04 /* clear out reserved bits */ @@ -518,25 +494,35 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 BEGIN_MMU_FTR_SECTION - li r0,1 + li r3,1 mfspr r1,SPRN_SPRG_603_LRU - rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ - slw r0,r0,r2 - xor r1,r0,r1 - srw r0,r1,r2 + rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */ + slw r3,r3,r2 + xor r1,r3,r1 + srw r3,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 - rlwimi r2,r0,31-14,14,14 + rlwimi r2,r3,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) + +2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + cmpwi cr0,r2,0 + beq- DataAddressInvalid /* return if no mapping */ + stw r2,0(r1) + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + b 1b DataAddressInvalid: mfspr r3,SPRN_SRR1 rlwinm r1,r3,9,6,6 /* Get load/store bit */ @@ -560,34 +546,24 @@ DataAddressInvalid: */ . = INTERRUPT_DATA_STORE_TLB_MISS_603 DataStoreTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 + mfspr r0,SPRN_DMISS mfspr r2, SPRN_SDR1 - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED - rlwinm r2, r2, 28, 0xfffff000 - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ + rlwinm r1, r2, 28, 0xfffff000 + rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r1) /* get pmd entry */ + rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + subi r3, r3, (TASK_SIZE >> 28) & 0xf + beq- 2f /* bail if no mapping */ +1: + rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED andc. r1,r1,r2 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */ + rlwimi r2,r3,1,31,31 /* userspace ? -> PP lsb */ li r1,0xe06 /* clear out reserved bits & PP msb */ andc r1,r2,r1 /* PP = user? 1: 0 */ BEGIN_FTR_SECTION @@ -597,26 +573,36 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 BEGIN_MMU_FTR_SECTION - li r0,1 + li r3,1 mfspr r1,SPRN_SPRG_603_LRU - rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ - slw r0,r0,r2 - xor r1,r0,r1 - srw r0,r1,r2 + rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */ + slw r3,r3,r2 + xor r1,r3,r1 + srw r3,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 - rlwimi r2,r0,31-14,14,14 + rlwimi r2,r3,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) +2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + cmpwi cr0,r2,0 + beq- DataAddressInvalid /* return if no mapping */ + stw r2,0(r1) + rlwinm r2,r2,0,0,19 /* extract address of pte page */ + b 1b + #ifndef CONFIG_ALTIVEC #define altivec_assist_exception unknown_exception #endif diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index eca293794a1e83..af62ec974b9702 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -266,7 +266,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, unsigned long ret = 0; bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); kuap_assert_locked(); @@ -344,7 +344,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs) BUG_ON(regs_is_unrecoverable(regs)); BUG_ON(arch_irq_disabled_regs(regs)); - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); /* * We don't need to restore AMR on the way back to userspace for KUAP. @@ -386,7 +386,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) && TRAP(regs) != INTERRUPT_PROGRAM && TRAP(regs) != INTERRUPT_PERFMON) - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); kuap = kuap_get_and_assert_locked(); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 14c5ddec305633..f8aa91bc3b1759 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -228,16 +228,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs kcb->kprobe_saved_msr = regs->msr; } -void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - ri->ret_addr = (kprobe_opcode_t *)regs->link; - ri->fp = NULL; - - /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)__kretprobe_trampoline; -} -NOKPROBE_SYMBOL(arch_prepare_kretprobe); - static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; @@ -394,49 +384,6 @@ int kprobe_handler(struct pt_regs *regs) } NOKPROBE_SYMBOL(kprobe_handler); -/* - * Function return probe trampoline: - * - init_kprobes() establishes a probepoint here - * - When the probed function returns, this probe - * causes the handlers to fire - */ -asm(".global __kretprobe_trampoline\n" - ".type __kretprobe_trampoline, @function\n" - "__kretprobe_trampoline:\n" - "nop\n" - "blr\n" - ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"); - -/* - * Called when the probe at kretprobe trampoline is hit - */ -static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) -{ - unsigned long orig_ret_address; - - orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); - /* - * We get here through one of two paths: - * 1. by taking a trap -> kprobe_handler() -> here - * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here - * - * When going back through (1), we need regs->nip to be setup properly - * as it is used to determine the return address from the trap. - * For (2), since nip is not honoured with optprobes, we instead setup - * the link register properly so that the subsequent 'blr' in - * __kretprobe_trampoline jumps back to the right instruction. - * - * For nip, we should set the address to the previous instruction since - * we end up emulating it in kprobe_handler(), which increments the nip - * again. - */ - regs_set_return_ip(regs, orig_ret_address - 4); - regs->link = orig_ret_address; - - return 0; -} -NOKPROBE_SYMBOL(trampoline_probe_handler); - /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "breakpoint" @@ -539,19 +486,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) } NOKPROBE_SYMBOL(kprobe_fault_handler); -static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, - .pre_handler = trampoline_probe_handler -}; - -int __init arch_init_kprobes(void) -{ - return register_kprobe(&trampoline_p); -} - int arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline) return 1; return 0; diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7112adc597a80b..e9bab599d0c274 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -651,12 +651,11 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, // func_desc_t is 8 bytes if ABIv2, else 16 bytes desc = func_desc(addr); for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) { - if (patch_instruction(((u32 *)&entry->funcdata) + i, - ppc_inst(((u32 *)(&desc))[i]))) + if (patch_u32(((u32 *)&entry->funcdata) + i, ((u32 *)&desc)[i])) return 0; } - if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC))) + if (patch_u32(&entry->magic, STUB_MAGIC)) return 0; return 1; diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index e385d3164648ce..f9c6568a91370e 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = { }; static void oops_to_nvram(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason); + struct kmsg_dump_detail *detail); static struct kmsg_dumper nvram_kmsg_dumper = { .dump = oops_to_nvram @@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists) * partition. If that's too much, go back and capture uncompressed text. */ static void oops_to_nvram(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; static unsigned int oops_count = 0; @@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ; int rc = -1; - switch (reason) { + switch (detail->reason) { case KMSG_DUMP_SHUTDOWN: /* These are almost always orderly shutdowns. */ return; @@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, break; default: pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", - __func__, (int) reason); + __func__, (int) detail->reason); return; } diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 004fae2044a3e0..c0b351d61058f7 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p) * has a 'nop' instruction, which can be emulated. * So further checks can be skipped. */ - if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline) return addr + sizeof(kprobe_opcode_t); /* diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3b506d4c55f37e..ff61a3e7984cec 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -72,8 +72,6 @@ #define TM_DEBUG(x...) do { } while(0) #endif -extern unsigned long _get_SP(void); - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Are we running in "Suspend disabled" mode? If so we have to block any @@ -2177,10 +2175,10 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, return 0; } +#ifdef CONFIG_PPC64 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { -#ifdef CONFIG_PPC64 unsigned long stack_page; unsigned long cpu = task_cpu(p); @@ -2208,10 +2206,26 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; # endif -#endif return 0; } +#else +static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, + unsigned long nbytes) +{ + unsigned long stack_page; + unsigned long cpu = task_cpu(p); + + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return 0; + + stack_page = (unsigned long)emergency_ctx[cpu] - THREAD_SIZE; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + + return 0; +} +#endif /* * validate the stack frame of a particular minimum size, used for when we are diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c new file mode 100644 index 00000000000000..5f5f47ae82cfa4 --- /dev/null +++ b/arch/powerpc/kernel/rethook.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PowerPC implementation of rethook. This depends on kprobes. + */ + +#include +#include + +/* + * Function return trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe + * causes the handlers to fire + */ +asm(".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" + "nop\n" + "blr\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"); + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs) +{ + return !rethook_trampoline_handler(regs, regs->gpr[1]); +} +NOKPROBE_SYMBOL(trampoline_rethook_handler); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + rh->ret_addr = regs->link; + rh->frame = regs->gpr[1]; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); + +/* This is called from rethook_trampoline_handler(). */ +void arch_rethook_fixup_return(struct pt_regs *regs, unsigned long orig_ret_address) +{ + /* + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * arch_rethook_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. + */ + regs_set_return_ip(regs, orig_ret_address - 4); + regs->link = orig_ret_address; +} +NOKPROBE_SYMBOL(arch_rethook_fixup_return); + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &arch_rethook_trampoline, + .pre_handler = trampoline_rethook_handler +}; + +/* rethook initializer */ +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c index eb3c053f323f77..fbeb1cbac01b2c 100644 --- a/arch/powerpc/kernel/secvar-sysfs.c +++ b/arch/powerpc/kernel/secvar-sysfs.c @@ -125,7 +125,7 @@ static const struct attribute_group secvar_attr_group = { }; __ATTRIBUTE_GROUPS(secvar_attr); -static struct kobj_type secvar_ktype = { +static const struct kobj_type secvar_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_groups = secvar_attr_groups, }; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 46e6d2cd7a2dfd..4ab9b8cee77a1a 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1166,7 +1166,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpu_smt_set_num_threads(num_threads, threads_per_core); } -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index e6a958a5da2763..90882b5175cd4b 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -133,12 +134,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum * arch-dependent code, they are generic. */ ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); -#ifdef CONFIG_KPROBES + /* * Mark stacktraces with kretprobed functions on them * as unreliable. */ - if (ip == (unsigned long)__kretprobe_trampoline) +#ifdef CONFIG_RETHOOK + if (ip == (unsigned long)arch_rethook_trampoline) return -EINVAL; #endif diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c index 863a7aa24650af..1502b7e439cafb 100644 --- a/arch/powerpc/kernel/static_call.c +++ b/arch/powerpc/kernel/static_call.c @@ -17,7 +17,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) mutex_lock(&text_mutex); if (func && !is_short) { - err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target)); + err = patch_ulong(tramp + PPC_SCT_DATA, target); if (err) goto out; } diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index f6f868e817e636..be159ad4b77bd3 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -27,7 +27,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) trace_hardirqs_off(); /* finish reconciling */ - CT_WARN_ON(ct_state() == CONTEXT_KERNEL); + CT_WARN_ON(ct_state() == CT_STATE_KERNEL); user_exit_irqoff(); BUG_ON(regs_is_unrecoverable(regs)); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 28d6472c380a89..edf5cabe5dfdbd 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -121,7 +121,7 @@ static void pmac_backlight_unblank(void) props = &pmac_backlight->props; props->brightness = props->max_brightness; - props->power = FB_BLANK_UNBLANK; + props->power = BACKLIGHT_POWER_ON; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 7a2ff9010f1727..ee4b9d676cff54 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -81,6 +81,21 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start); } +static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma) +{ + struct mm_struct *mm = vma->vm_mm; + + /* + * close() is called for munmap() but also for mremap(). In the mremap() + * case the vdso pointer has already been updated by the mremap() hook + * above, so it must not be set to NULL here. + */ + if (vma->vm_start != (unsigned long)mm->context.vdso) + return; + + mm->context.vdso = NULL; +} + static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf); @@ -92,11 +107,13 @@ static struct vm_special_mapping vvar_spec __ro_after_init = { static struct vm_special_mapping vdso32_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso32_mremap, + .close = vdso_close, }; static struct vm_special_mapping vdso64_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso64_mremap, + .close = vdso_close, }; #ifdef CONFIG_TIME_NS @@ -197,13 +214,6 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); - /* - * Put vDSO base into mm struct. We need to do this before calling - * install_special_mapping or the perf counter mmap tracking code - * will fail to recognise it as a vDSO. - */ - mm->context.vdso = (void __user *)vdso_base + vvar_size; - vma = _install_special_mapping(mm, vdso_base, vvar_size, VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP, &vvar_spec); @@ -223,10 +233,15 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, vdso_spec); - if (IS_ERR(vma)) + if (IS_ERR(vma)) { do_munmap(mm, vdso_base, vvar_size, NULL); + return PTR_ERR(vma); + } + + // Now that the mappings are in place, set the mm VDSO pointer + mm->context.vdso = (void __user *)vdso_base + vvar_size; - return PTR_ERR_OR_ZERO(vma); + return 0; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) @@ -240,8 +255,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; rc = __arch_setup_additional_pages(bprm, uses_interp); - if (rc) - mm->context.vdso = NULL; mmap_write_unlock(mm); return rc; diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index 1425b6edc66b24..31ca5a5470047e 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -8,30 +8,21 @@ include $(srctree)/lib/vdso/Makefile obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o +obj-vdso32 += getrandom-32.o vgetrandom-chacha-32.o +obj-vdso64 += getrandom-64.o vgetrandom-chacha-64.o + ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday-32.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-32.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-32.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-32.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-32.o = $(CC_FLAGS_FTRACE) - CFLAGS_REMOVE_vgettimeofday-32.o += -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc - # This flag is supported by clang for 64-bit but not 32-bit so it will cause - # an unused command line flag warning for this file. - ifdef CONFIG_CC_IS_CLANG - CFLAGS_REMOVE_vgettimeofday-32.o += -fno-stack-clash-protection - endif - CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-64.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-64.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-64.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-64.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-64.o = $(CC_FLAGS_FTRACE) # Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true # by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is # compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code # generation is minimal, it will just use r29 instead. - CFLAGS_vgettimeofday-64.o += $(call cc-option, -ffixed-r30) + CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) $(call cc-option, -ffixed-r30) +endif + +ifneq ($(c-getrandom-y),) + CFLAGS_vgetrandom-32.o += -include $(c-getrandom-y) + CFLAGS_vgetrandom-64.o += -include $(c-getrandom-y) endif # Build rules @@ -42,12 +33,18 @@ else VDSOCC := $(CC) endif -targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o +targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o vgetrandom-32.o +targets += crtsavres-32.o obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) -targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o +targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o vgetrandom-64.o obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) -ccflags-y := -fno-common -fno-builtin +ccflags-y := -fno-common -fno-builtin -DBUILD_VDSO +ccflags-y += $(DISABLE_LATENT_ENTROPY_PLUGIN) +ccflags-y += $(call cc-option, -fno-stack-protector) +ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -ffreestanding -fasynchronous-unwind-tables +ccflags-remove-y := $(CC_FLAGS_FTRACE) ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS) ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld) ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) @@ -56,6 +53,12 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS)) CC32FLAGS := -m32 +CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc + # This flag is supported by clang for 64-bit but not 32-bit so it will cause + # an unused command line flag warning for this file. +ifdef CONFIG_CC_IS_CLANG +CC32FLAGSREMOVE += -fno-stack-clash-protection +endif LD32FLAGS := -Wl,-soname=linux-vdso32.so.1 AS32FLAGS := -D__VDSO32__ @@ -68,20 +71,26 @@ targets += vdso64.lds CPPFLAGS_vdso64.lds += -P -C # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE +$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o $(obj)/vgetrandom-32.o $(obj)/crtsavres-32.o FORCE $(call if_changed,vdso32ld_and_check) -$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o $(obj)/vgetrandom-64.o FORCE $(call if_changed,vdso64ld_and_check) # assembly rules for the .S files $(obj-vdso32): %-32.o: %.S FORCE $(call if_changed_dep,vdso32as) +$(obj)/crtsavres-32.o: %-32.o: $(srctree)/arch/powerpc/lib/crtsavres.S FORCE + $(call if_changed_dep,vdso32as) $(obj)/vgettimeofday-32.o: %-32.o: %.c FORCE $(call if_changed_dep,vdso32cc) +$(obj)/vgetrandom-32.o: %-32.o: %.c FORCE + $(call if_changed_dep,vdso32cc) $(obj-vdso64): %-64.o: %.S FORCE $(call if_changed_dep,vdso64as) $(obj)/vgettimeofday-64.o: %-64.o: %.c FORCE $(call if_changed_dep,cc_o_c) +$(obj)/vgetrandom-64.o: %-64.o: %.c FORCE + $(call if_changed_dep,cc_o_c) # Generate VDSO offsets using helper script gen-vdso32sym := $(src)/gen_vdso32_offsets.sh @@ -102,7 +111,7 @@ quiet_cmd_vdso32ld_and_check = VDSO32L $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ - cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $< + cmd_vdso32cc = $(VDSOCC) $(filter-out $(CC32FLAGSREMOVE), $(c_flags)) $(CC32FLAGS) -c -o $@ $< quiet_cmd_vdso64ld_and_check = VDSO64L $@ cmd_vdso64ld_and_check = $(VDSOCC) $(ldflags-y) $(LD64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check) diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S index 0085ae464dac9c..3b2479bd2f9a1d 100644 --- a/arch/powerpc/kernel/vdso/cacheflush.S +++ b/arch/powerpc/kernel/vdso/cacheflush.S @@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) #ifdef CONFIG_PPC64 mflr r12 .cfi_register lr,r12 - get_datapage r10 + get_realdatapage r10, r11 mtlr r12 .cfi_restore lr #endif diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S index db8e167f01667e..2b19b6201a33a8 100644 --- a/arch/powerpc/kernel/vdso/datapage.S +++ b/arch/powerpc/kernel/vdso/datapage.S @@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) mflr r12 .cfi_register lr,r12 mr. r4,r3 - get_datapage r3 + get_realdatapage r3, r11 mtlr r12 #ifdef __powerpc64__ addi r3,r3,CFG_SYSCALL_MAP64 @@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 .cfi_register lr,r12 - get_datapage r3 + get_realdatapage r3, r11 #ifndef __powerpc64__ lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) #endif diff --git a/arch/powerpc/kernel/vdso/getrandom.S b/arch/powerpc/kernel/vdso/getrandom.S new file mode 100644 index 00000000000000..f3bbf931931ca2 --- /dev/null +++ b/arch/powerpc/kernel/vdso/getrandom.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Userland implementation of getrandom() for processes + * for use in the vDSO + * + * Copyright (C) 2024 Christophe Leroy , CS GROUP France + */ +#include +#include +#include +#include +#include +#include + +/* + * The macro sets two stack frames, one for the caller and one for the callee + * because there are no requirement for the caller to set a stack frame when + * calling VDSO so it may have omitted to set one, especially on PPC64 + */ + +.macro cvdso_call funct + .cfi_startproc + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + .cfi_adjust_cfa_offset PPC_MIN_STKFRM + mflr r0 + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + .cfi_adjust_cfa_offset PPC_MIN_STKFRM + PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) + .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF +#ifdef __powerpc64__ + PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) + .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT +#endif + get_realdatapage r8, r11 + addi r8, r8, VDSO_RNG_DATA_OFFSET + bl CFUNC(DOTSYM(\funct)) + PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) + .cfi_restore r2 +#endif + cmpwi r3, 0 + mtlr r0 + addi r1, r1, 2 * PPC_MIN_STKFRM + .cfi_restore lr + .cfi_def_cfa_offset 0 + crclr so + bgelr+ + crset so + neg r3, r3 + blr + .cfi_endproc +.endm + + .text +V_FUNCTION_BEGIN(__kernel_getrandom) + cvdso_call __c_kernel_getrandom +V_FUNCTION_END(__kernel_getrandom) diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S index 48fc6658053aa4..5540d7021fa25c 100644 --- a/arch/powerpc/kernel/vdso/gettimeofday.S +++ b/arch/powerpc/kernel/vdso/gettimeofday.S @@ -38,11 +38,7 @@ .else addi r4, r5, VDSO_DATA_OFFSET .endif -#ifdef __powerpc64__ bl CFUNC(DOTSYM(\funct)) -#else - bl \funct -#endif PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) #ifdef __powerpc64__ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) @@ -118,16 +114,3 @@ V_FUNCTION_END(__kernel_clock_getres) V_FUNCTION_BEGIN(__kernel_time) cvdso_call __c_kernel_time call_time=1 V_FUNCTION_END(__kernel_time) - -/* Routines for restoring integer registers, called by the compiler. */ -/* Called with r11 pointing to the stack header word of the caller of the */ -/* function, just beyond the end of the integer restore area. */ -#ifndef __powerpc64__ -_GLOBAL(_restgpr_31_x) -_GLOBAL(_rest32gpr_31_x) - lwz r0,4(r11) - lwz r31,-4(r11) - mtlr r0 - mr r1,r11 - blr -#endif diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index 8f57107000a247..7b41d5d256e814 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -130,6 +130,7 @@ VERSION #if defined(CONFIG_PPC64) || !defined(CONFIG_SMP) __kernel_getcpu; #endif + __kernel_getrandom; local: *; }; diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index 400819258c06b7..9481e4b892ed13 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -123,6 +123,7 @@ VERSION __kernel_sigtramp_rt64; __kernel_getcpu; __kernel_time; + __kernel_getrandom; local: *; }; diff --git a/arch/powerpc/kernel/vdso/vgetrandom-chacha.S b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S new file mode 100644 index 00000000000000..7f9061a9e8b46b --- /dev/null +++ b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Christophe Leroy , CS GROUP France + */ + +#include + +#include + +#define dst_bytes r3 +#define key r4 +#define counter r5 +#define nblocks r6 + +#define idx_r0 r0 +#define val4 r4 + +#define const0 0x61707865 +#define const1 0x3320646e +#define const2 0x79622d32 +#define const3 0x6b206574 + +#define key0 r5 +#define key1 r6 +#define key2 r7 +#define key3 r8 +#define key4 r9 +#define key5 r10 +#define key6 r11 +#define key7 r12 + +#define counter0 r14 +#define counter1 r15 + +#define state0 r16 +#define state1 r17 +#define state2 r18 +#define state3 r19 +#define state4 r20 +#define state5 r21 +#define state6 r22 +#define state7 r23 +#define state8 r24 +#define state9 r25 +#define state10 r26 +#define state11 r27 +#define state12 r28 +#define state13 r29 +#define state14 r30 +#define state15 r31 + +.macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 + add \a1, \a1, \b1 + add \a2, \a2, \b2 + add \a3, \a3, \b3 + add \a4, \a4, \b4 + xor \d1, \d1, \a1 + xor \d2, \d2, \a2 + xor \d3, \d3, \a3 + xor \d4, \d4, \a4 + rotlwi \d1, \d1, 16 + rotlwi \d2, \d2, 16 + rotlwi \d3, \d3, 16 + rotlwi \d4, \d4, 16 + add \c1, \c1, \d1 + add \c2, \c2, \d2 + add \c3, \c3, \d3 + add \c4, \c4, \d4 + xor \b1, \b1, \c1 + xor \b2, \b2, \c2 + xor \b3, \b3, \c3 + xor \b4, \b4, \c4 + rotlwi \b1, \b1, 12 + rotlwi \b2, \b2, 12 + rotlwi \b3, \b3, 12 + rotlwi \b4, \b4, 12 + add \a1, \a1, \b1 + add \a2, \a2, \b2 + add \a3, \a3, \b3 + add \a4, \a4, \b4 + xor \d1, \d1, \a1 + xor \d2, \d2, \a2 + xor \d3, \d3, \a3 + xor \d4, \d4, \a4 + rotlwi \d1, \d1, 8 + rotlwi \d2, \d2, 8 + rotlwi \d3, \d3, 8 + rotlwi \d4, \d4, 8 + add \c1, \c1, \d1 + add \c2, \c2, \d2 + add \c3, \c3, \d3 + add \c4, \c4, \d4 + xor \b1, \b1, \c1 + xor \b2, \b2, \c2 + xor \b3, \b3, \c3 + xor \b4, \b4, \c4 + rotlwi \b1, \b1, 7 + rotlwi \b2, \b2, 7 + rotlwi \b3, \b3, 7 + rotlwi \b4, \b4, 7 +.endm + +#define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ + quarterround4 state##a1 state##b1 state##c1 state##d1 \ + state##a2 state##b2 state##c2 state##d2 \ + state##a3 state##b3 state##c3 state##d3 \ + state##a4 state##b4 state##c4 state##d4 + +/* + * Very basic 32 bits implementation of ChaCha20. Produces a given positive number + * of blocks of output with a nonce of 0, taking an input key and 8-byte + * counter. Importantly does not spill to the stack. Its arguments are: + * + * r3: output bytes + * r4: 32-byte key input + * r5: 8-byte counter input/output (saved on stack) + * r6: number of 64-byte blocks to write to output + * + * r0: counter of blocks (initialised with r6) + * r4: Value '4' after key has been read. + * r5-r12: key + * r14-r15: counter + * r16-r31: state + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) +#ifdef __powerpc64__ + std counter, -216(r1) + + std r14, -144(r1) + std r15, -136(r1) + std r16, -128(r1) + std r17, -120(r1) + std r18, -112(r1) + std r19, -104(r1) + std r20, -96(r1) + std r21, -88(r1) + std r22, -80(r1) + std r23, -72(r1) + std r24, -64(r1) + std r25, -56(r1) + std r26, -48(r1) + std r27, -40(r1) + std r28, -32(r1) + std r29, -24(r1) + std r30, -16(r1) + std r31, -8(r1) +#else + stwu r1, -96(r1) + stw counter, 20(r1) +#ifdef __BIG_ENDIAN__ + stmw r14, 24(r1) +#else + stw r14, 24(r1) + stw r15, 28(r1) + stw r16, 32(r1) + stw r17, 36(r1) + stw r18, 40(r1) + stw r19, 44(r1) + stw r20, 48(r1) + stw r21, 52(r1) + stw r22, 56(r1) + stw r23, 60(r1) + stw r24, 64(r1) + stw r25, 68(r1) + stw r26, 72(r1) + stw r27, 76(r1) + stw r28, 80(r1) + stw r29, 84(r1) + stw r30, 88(r1) + stw r31, 92(r1) +#endif +#endif /* __powerpc64__ */ + + lwz counter0, 0(counter) + lwz counter1, 4(counter) +#ifdef __powerpc64__ + rldimi counter0, counter1, 32, 0 +#endif + mr idx_r0, nblocks + subi dst_bytes, dst_bytes, 4 + + lwz key0, 0(key) + lwz key1, 4(key) + lwz key2, 8(key) + lwz key3, 12(key) + lwz key4, 16(key) + lwz key5, 20(key) + lwz key6, 24(key) + lwz key7, 28(key) + + li val4, 4 +.Lblock: + li r31, 10 + + lis state0, const0@ha + lis state1, const1@ha + lis state2, const2@ha + lis state3, const3@ha + addi state0, state0, const0@l + addi state1, state1, const1@l + addi state2, state2, const2@l + addi state3, state3, const3@l + + mtctr r31 + + mr state4, key0 + mr state5, key1 + mr state6, key2 + mr state7, key3 + mr state8, key4 + mr state9, key5 + mr state10, key6 + mr state11, key7 + + mr state12, counter0 + mr state13, counter1 + + li state14, 0 + li state15, 0 + +.Lpermute: + QUARTERROUND4( 0, 4, 8,12, 1, 5, 9,13, 2, 6,10,14, 3, 7,11,15) + QUARTERROUND4( 0, 5,10,15, 1, 6,11,12, 2, 7, 8,13, 3, 4, 9,14) + + bdnz .Lpermute + + addis state0, state0, const0@ha + addis state1, state1, const1@ha + addis state2, state2, const2@ha + addis state3, state3, const3@ha + addi state0, state0, const0@l + addi state1, state1, const1@l + addi state2, state2, const2@l + addi state3, state3, const3@l + + add state4, state4, key0 + add state5, state5, key1 + add state6, state6, key2 + add state7, state7, key3 + add state8, state8, key4 + add state9, state9, key5 + add state10, state10, key6 + add state11, state11, key7 + + add state12, state12, counter0 + add state13, state13, counter1 + +#ifdef __BIG_ENDIAN__ + stwbrx state0, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state1, 0, dst_bytes + stwbrx state2, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state3, 0, dst_bytes + stwbrx state4, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state5, 0, dst_bytes + stwbrx state6, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state7, 0, dst_bytes + stwbrx state8, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state9, 0, dst_bytes + stwbrx state10, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state11, 0, dst_bytes + stwbrx state12, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state13, 0, dst_bytes + stwbrx state14, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state15, 0, dst_bytes +#else + stw state0, 4(dst_bytes) + stw state1, 8(dst_bytes) + stw state2, 12(dst_bytes) + stw state3, 16(dst_bytes) + stw state4, 20(dst_bytes) + stw state5, 24(dst_bytes) + stw state6, 28(dst_bytes) + stw state7, 32(dst_bytes) + stw state8, 36(dst_bytes) + stw state9, 40(dst_bytes) + stw state10, 44(dst_bytes) + stw state11, 48(dst_bytes) + stw state12, 52(dst_bytes) + stw state13, 56(dst_bytes) + stw state14, 60(dst_bytes) + stwu state15, 64(dst_bytes) +#endif + + subic. idx_r0, idx_r0, 1 /* subi. can't use r0 as source */ + +#ifdef __powerpc64__ + addi counter0, counter0, 1 + srdi counter1, counter0, 32 +#else + addic counter0, counter0, 1 + addze counter1, counter1 +#endif + + bne .Lblock + +#ifdef __powerpc64__ + ld counter, -216(r1) +#else + lwz counter, 20(r1) +#endif + stw counter0, 0(counter) + stw counter1, 4(counter) + + li r6, 0 + li r7, 0 + li r8, 0 + li r9, 0 + li r10, 0 + li r11, 0 + li r12, 0 + +#ifdef __powerpc64__ + ld r14, -144(r1) + ld r15, -136(r1) + ld r16, -128(r1) + ld r17, -120(r1) + ld r18, -112(r1) + ld r19, -104(r1) + ld r20, -96(r1) + ld r21, -88(r1) + ld r22, -80(r1) + ld r23, -72(r1) + ld r24, -64(r1) + ld r25, -56(r1) + ld r26, -48(r1) + ld r27, -40(r1) + ld r28, -32(r1) + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) +#else +#ifdef __BIG_ENDIAN__ + lmw r14, 24(r1) +#else + lwz r14, 24(r1) + lwz r15, 28(r1) + lwz r16, 32(r1) + lwz r17, 36(r1) + lwz r18, 40(r1) + lwz r19, 44(r1) + lwz r20, 48(r1) + lwz r21, 52(r1) + lwz r22, 56(r1) + lwz r23, 60(r1) + lwz r24, 64(r1) + lwz r25, 68(r1) + lwz r26, 72(r1) + lwz r27, 76(r1) + lwz r28, 80(r1) + lwz r29, 84(r1) + lwz r30, 88(r1) + lwz r31, 92(r1) +#endif + addi r1, r1, 96 +#endif /* __powerpc64__ */ + blr +SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/powerpc/kernel/vdso/vgetrandom.c b/arch/powerpc/kernel/vdso/vgetrandom.c new file mode 100644 index 00000000000000..5f855d45fb7bf9 --- /dev/null +++ b/arch/powerpc/kernel/vdso/vgetrandom.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Powerpc userspace implementation of getrandom() + * + * Copyright (C) 2024 Christophe Leroy , CS GROUP France + */ +#include +#include + +ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, + size_t opaque_len, const struct vdso_rng_data *vd) +{ + return __cvdso_getrandom_data(vd, buffer, len, flags, opaque_state, opaque_len); +} diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 3ff3de9a52acf2..34c0adb9fdbf2b 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -118,12 +118,12 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, struct fd f; f = fdget(tablefd); - if (!f.file) + if (!fd_file(f)) return -EBADF; rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { - if (stt == f.file->private_data) { + if (stt == fd_file(f)->private_data) { found = true; break; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8f7d7e37bc8c60..ba0492f9de650d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1922,14 +1922,22 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = EMULATE_FAIL; if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (cause == FSCR_MSGP_LG) + switch (cause) { + case FSCR_MSGP_LG: r = kvmppc_emulate_doorbell_instr(vcpu); - if (cause == FSCR_PM_LG) + break; + case FSCR_PM_LG: r = kvmppc_pmu_unavailable(vcpu); - if (cause == FSCR_EBB_LG) + break; + case FSCR_EBB_LG: r = kvmppc_ebb_unavailable(vcpu); - if (cause == FSCR_TM_LG) + break; + case FSCR_TM_LG: r = kvmppc_tm_unavailable(vcpu); + break; + default: + break; + } } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL | @@ -4049,7 +4057,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* Return to whole-core mode if we split the core earlier */ if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); - unsigned long loops = 0; hid0 &= ~HID0_POWER8_DYNLPARDIS; stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; @@ -4061,7 +4068,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) if (!(hid0 & stat_bit)) break; cpu_relax(); - ++loops; } split_info.do_nap = 0; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 5e6c7b5276775f..f14329989e9a71 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1938,11 +1938,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EBADF; f = fdget(cap->args[0]); - if (!f.file) + if (!fd_file(f)) break; r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); @@ -1957,11 +1957,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EBADF; f = fdget(cap->args[0]); - if (!f.file) + if (!fd_file(f)) break; r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) { if (xics_on_xive()) r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); @@ -1980,7 +1980,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EBADF; f = fdget(cap->args[0]); - if (!f.file) + if (!fd_file(f)) break; r = -ENXIO; @@ -1990,7 +1990,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, } r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) r = kvmppc_xive_native_connect_vcpu(dev, vcpu, cap->args[1]); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 0d1f3ee9111520..acdab294b340a8 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -20,15 +20,14 @@ #include #include -static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) +static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword) { - if (!ppc_inst_prefixed(instr)) { - u32 val = ppc_inst_val(instr); + if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) { + /* For big endian correctness: plain address would use the wrong half */ + u32 val32 = val; - __put_kernel_nofault(patch_addr, &val, u32, failed); + __put_kernel_nofault(patch_addr, &val32, u32, failed); } else { - u64 val = ppc_inst_as_ulong(instr); - __put_kernel_nofault(patch_addr, &val, u64, failed); } @@ -44,7 +43,10 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr int raw_patch_instruction(u32 *addr, ppc_inst_t instr) { - return __patch_instruction(addr, instr, addr); + if (ppc_inst_prefixed(instr)) + return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true); + else + return __patch_mem(addr, ppc_inst_val(instr), addr, false); } struct patch_context { @@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr) flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } -static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) +static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword) { int err; u32 *patch_addr; @@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) orig_mm = start_using_temp_mm(patching_mm); - err = __patch_instruction(addr, instr, patch_addr); + err = __patch_mem(addr, val, patch_addr, is_dword); /* context synchronisation performed by __patch_instruction (isync or exception) */ stop_using_temp_mm(patching_mm, orig_mm); @@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) return err; } -static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) +static int __do_patch_mem(void *addr, unsigned long val, bool is_dword) { int err; u32 *patch_addr; @@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) if (radix_enabled()) asm volatile("ptesync": : :"memory"); - err = __patch_instruction(addr, instr, patch_addr); + err = __patch_mem(addr, val, patch_addr, is_dword); pte_clear(&init_mm, text_poke_addr, pte); flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); @@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) return err; } -int patch_instruction(u32 *addr, ppc_inst_t instr) +static int patch_mem(void *addr, unsigned long val, bool is_dword) { int err; unsigned long flags; @@ -359,19 +361,57 @@ int patch_instruction(u32 *addr, ppc_inst_t instr) */ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || !static_branch_likely(&poking_init_done)) - return raw_patch_instruction(addr, instr); + return __patch_mem(addr, val, addr, is_dword); local_irq_save(flags); if (mm_patch_enabled()) - err = __do_patch_instruction_mm(addr, instr); + err = __do_patch_mem_mm(addr, val, is_dword); else - err = __do_patch_instruction(addr, instr); + err = __do_patch_mem(addr, val, is_dword); local_irq_restore(flags); return err; } + +#ifdef CONFIG_PPC64 + +int patch_instruction(u32 *addr, ppc_inst_t instr) +{ + if (ppc_inst_prefixed(instr)) + return patch_mem(addr, ppc_inst_as_ulong(instr), true); + else + return patch_mem(addr, ppc_inst_val(instr), false); +} NOKPROBE_SYMBOL(patch_instruction); +int patch_uint(void *addr, unsigned int val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int))) + return -EINVAL; + + return patch_mem(addr, val, false); +} +NOKPROBE_SYMBOL(patch_uint); + +int patch_ulong(void *addr, unsigned long val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long))) + return -EINVAL; + + return patch_mem(addr, val, true); +} +NOKPROBE_SYMBOL(patch_ulong); + +#else + +int patch_instruction(u32 *addr, ppc_inst_t instr) +{ + return patch_mem(addr, ppc_inst_val(instr), false); +} +NOKPROBE_SYMBOL(patch_instruction) + +#endif + static int patch_memset64(u64 *addr, u64 val, size_t count) { for (u64 *end = addr + count; addr < end; addr++) diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S index 7e5e1c28e56ac5..8967903c15e99f 100644 --- a/arch/powerpc/lib/crtsavres.S +++ b/arch/powerpc/lib/crtsavres.S @@ -46,7 +46,7 @@ .section ".text" -#ifndef CONFIG_PPC64 +#ifndef __powerpc64__ /* Routines for saving integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c index f76030087f9836..8cd3b32f805b0f 100644 --- a/arch/powerpc/lib/test-code-patching.c +++ b/arch/powerpc/lib/test-code-patching.c @@ -438,6 +438,46 @@ static void __init test_multi_instruction_patching(void) vfree(buf); } +static void __init test_data_patching(void) +{ + void *buf; + u32 *addr32; + + buf = vzalloc(PAGE_SIZE); + check(buf); + if (!buf) + return; + + addr32 = buf + 128; + + addr32[1] = 0xA0A1A2A3; + addr32[2] = 0xB0B1B2B3; + + check(!patch_uint(&addr32[1], 0xC0C1C2C3)); + + check(addr32[0] == 0); + check(addr32[1] == 0xC0C1C2C3); + check(addr32[2] == 0xB0B1B2B3); + check(addr32[3] == 0); + + /* Unaligned patch_ulong() should fail */ + if (IS_ENABLED(CONFIG_PPC64)) + check(patch_ulong(&addr32[1], 0xD0D1D2D3) == -EINVAL); + + check(!patch_ulong(&addr32[2], 0xD0D1D2D3)); + + check(addr32[0] == 0); + check(addr32[1] == 0xC0C1C2C3); + check(*(unsigned long *)(&addr32[2]) == 0xD0D1D2D3); + + if (!IS_ENABLED(CONFIG_PPC64)) + check(addr32[3] == 0); + + check(addr32[4] == 0); + + vfree(buf); +} + static int __init test_code_patching(void) { pr_info("Running code patching self-tests ...\n"); @@ -448,6 +488,7 @@ static int __init test_code_patching(void) test_translate_branch(); test_prefixed_patching(); test_multi_instruction_patching(); + test_data_patching(); return 0; } diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 625fe7d08e067b..2db167f4233f7b 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -223,6 +223,8 @@ int mmu_mark_initmem_nx(void) update_bats(); + BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE); + for (i = TASK_SIZE >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ if (is_module_segment(i << 28)) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6727a15ab94f95..e1eadd03f13390 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -125,7 +125,7 @@ int mmu_ci_restrictions; #endif static u8 *linear_map_hash_slots; static unsigned long linear_map_hash_count; -struct mmu_hash_ops mmu_hash_ops; +struct mmu_hash_ops mmu_hash_ops __ro_after_init; EXPORT_SYMBOL(mmu_hash_ops); /* diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index f4d8d3c40e5c69..5a4a7536904319 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -176,6 +176,17 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, return __pmd(old_pmd); } +pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp) +{ + unsigned long old_pud; + + VM_WARN_ON_ONCE(!pud_present(*pudp)); + old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID); + flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); + return __pud(old_pud); +} + pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full) { @@ -259,6 +270,15 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmdv &= _HPAGE_CHG_MASK; return pmd_set_protbits(__pmd(pmdv), newprot); } + +pud_t pud_modify(pud_t pud, pgprot_t newprot) +{ + unsigned long pudv; + + pudv = pud_val(pud); + pudv &= _HPAGE_CHG_MASK; + return pud_set_protbits(__pud(pudv), newprot); +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* For use by kexec, called with MMU off */ diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c index ef3ce37f1bb3ec..87307d0fc3b813 100644 --- a/arch/powerpc/mm/book3s64/slice.c +++ b/arch/powerpc/mm/book3s64/slice.c @@ -637,10 +637,11 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, + vm_flags_t vm_flags) { if (radix_enabled()) - return generic_get_unmapped_area(filp, addr, len, pgoff, flags); + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); return slice_get_unmapped_area(addr, len, flags, mm_ctx_user_psize(¤t->mm->context), 0); @@ -650,10 +651,11 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long flags, + vm_flags_t vm_flags) { if (radix_enabled()) - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags); + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags); return slice_get_unmapped_area(addr0, len, flags, mm_ctx_user_psize(¤t->mm->context), 1); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index da21cb018984eb..1221c561b43a0b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -216,7 +216,7 @@ static int __init mark_nonram_nosave(void) * everything else. GFP_DMA32 page allocations automatically fall back to * ZONE_DMA. * - * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the + * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by * ZONE_DMA. @@ -230,6 +230,7 @@ void __init paging_init(void) { unsigned long long total_ram = memblock_phys_mem_size(); phys_addr_t top_of_ram = memblock_end_of_DRAM(); + int zone_dma_bits; #ifdef CONFIG_HIGHMEM unsigned long v = __fix_to_virt(FIX_KMAP_END); @@ -256,6 +257,8 @@ void __init paging_init(void) else zone_dma_bits = 31; + zone_dma_limit = DMA_BIT_MASK(zone_dma_bits); + #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 1UL << (zone_dma_bits - PAGE_SHIFT)); @@ -410,6 +413,18 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); #ifdef CONFIG_EXECMEM static struct execmem_info execmem_info __ro_after_init; +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) +static void prealloc_execmem_pgtable(void) +{ + unsigned long va; + + for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE) + pte_alloc_kernel(pmd_off_k(va), va); +} +#else +static void prealloc_execmem_pgtable(void) { } +#endif + struct execmem_info __init *execmem_arch_setup(void) { pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; @@ -441,6 +456,8 @@ struct execmem_info __init *execmem_arch_setup(void) end = VMALLOC_END; #endif + prealloc_execmem_pgtable(); + execmem_info = (struct execmem_info){ .ranges = { [EXECMEM_DEFAULT] = { diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 388bba0ab3e7de..8b54f12d1889b7 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -150,11 +150,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) mmu_mapin_immr(); - mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true); if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); } @@ -177,7 +177,8 @@ int mmu_mark_initmem_nx(void) if (!debug_pagealloc_enabled_or_kfence()) err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); - mmu_pin_tlb(block_mapped_ram, false); + if (IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_pin_tlb(block_mapped_ram, false); return err; } @@ -206,6 +207,8 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, /* 8xx can only access 32MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); + + BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE); } int pud_clear_huge(pud_t *pud) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aa89899f0c1a1e..3c1da08304d032 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -43,11 +43,9 @@ static char *cmdline __initdata; int numa_cpu_lookup_table[NR_CPUS]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(node_to_cpumask_map); -EXPORT_SYMBOL(node_data); static int primary_domain_index; static int n_mem_addr_cells, n_mem_size_cells; @@ -1095,27 +1093,9 @@ void __init dump_numa_cpu_topology(void) static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { u64 spanned_pages = end_pfn - start_pfn; - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); - u64 nd_pa; - void *nd; - int tnid; - - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, nid); - - nd = __va(nd_pa); - - /* report and initialize */ - pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + + alloc_node_data(nid); + NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = spanned_pages; diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index 8c31802f97e82e..e89f64a0f24ae7 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -136,10 +136,10 @@ void pte_fragment_free(unsigned long *table, int kernel) #ifdef CONFIG_TRANSPARENT_HUGEPAGE void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { - struct page *page; + struct folio *folio; - page = virt_to_page(pgtable); - SetPageActive(page); + folio = virt_to_folio(pgtable); + folio_set_active(folio); pte_fragment_free((unsigned long *)pgtable, 0); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index ab0656115424f4..7316396e452d8d 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -297,6 +297,12 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, } #if defined(CONFIG_PPC_8xx) + +#if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS) +/* We need the same lock to protect the PMD table and the two PTE tables. */ +#error "8M hugetlb folios are incompatible with split page table locks" +#endif + static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val) { pte_basic_t *entry = (pte_basic_t *)ptep; diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 164cbcd4588e4c..e7b7bdaad341f1 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client) } static const struct i2c_device_id avr_id[] = { - { "akebono-avr", 0 }, + { "akebono-avr" }, { } }; diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 4a25b6b4861582..9668b052cd4b3a 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -504,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match); static struct platform_driver mpc512x_lpbfifo_driver = { .probe = mpc512x_lpbfifo_probe, - .remove_new = mpc512x_lpbfifo_remove, + .remove = mpc512x_lpbfifo_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc512x_lpbfifo_match, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 2bd6abcdc113bb..1ea591ec608334 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -644,7 +644,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file) static const struct file_operations mpc52xx_wdt_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .write = mpc52xx_wdt_write, .unlocked_ioctl = mpc52xx_wdt_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 34ce21f42623f5..e635b27ee7186c 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -143,7 +143,7 @@ static struct platform_driver gpio_halt_driver = { .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove_new = gpio_halt_remove, + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index a14d9d8997a4f0..8623aebfac482f 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -195,6 +195,13 @@ config PIN_TLB_IMMR CONFIG_PIN_TLB_DATA is also selected, it will reduce CONFIG_PIN_TLB_DATA to 24 Mbytes. +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y + help + This pins kernel text with 8M pages. + endmenu endmenu diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 4b0d7d4f88f661..1453ccc900c434 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -84,11 +84,8 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK - select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select HAVE_MOVE_PMD @@ -108,6 +105,14 @@ config PPC_BOOK3E_64 endchoice +config PPC_THP + def_bool y + depends on PPC_BOOK3S_64 + depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB) + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + choice prompt "CPU selection" help diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 87ad7d563cfa8c..cd7d42fc12a67d 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -66,8 +66,8 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, if (flags & SPU_CREATE_AFFINITY_SPU) { struct fd neighbor = fdget(neighbor_fd); ret = -EBADF; - if (neighbor.file) { - ret = calls->create_thread(name, flags, mode, neighbor.file); + if (fd_file(neighbor)) { + ret = calls->create_thread(name, flags, mode, fd_file(neighbor)); fdput(neighbor); } } else @@ -89,8 +89,8 @@ SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) ret = -EBADF; arg = fdget(fd); - if (arg.file) { - ret = calls->spu_run(arg.file, unpc, ustatus); + if (fd_file(arg)) { + ret = calls->spu_run(fd_file(arg), unpc, ustatus); fdput(arg); } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 7f4e0db8eb086f..d5a2c77bc90871 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = { .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, - .llseek = no_llseek, .mmap = spufs_cntl_mmap, }; @@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, - .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, @@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, - .llseek = no_llseek, }; /* low-level ibox access function */ @@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, - .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, @@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, - .llseek = no_llseek, }; /* low-level mailbox write */ @@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, - .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, @@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, - .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) @@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = { .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { @@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = { .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) @@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = { .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { @@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = { .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; /* @@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, - .llseek = no_llseek, }; static vm_fault_t @@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, - .llseek = no_llseek, }; @@ -1732,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = { .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .mmap = spufs_mfc_mmap, - .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) @@ -2102,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, - .llseek = no_llseek, }; static void spufs_get_proxydma_info(struct spu_context *ctx, @@ -2159,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, - .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) @@ -2442,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = { .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, - .llseek = no_llseek, }; /** diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 5c4f1a9ca154bd..6f4a41a9352aef 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #define PEGASOS2_MARVELL_REGBASE (0xf1000000) @@ -25,12 +25,15 @@ #define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE) #define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) ) - #define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #undef BE_VERBOSE +#define MV64340_BASE_ADDR_ENABLE 0x278 +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 +#define MV64340_SRAM_CONFIG 0x380 + static struct resource mv643xx_eth_shared_resources[] = { [0] = { .name = "ethernet shared base", diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h index 4f358b55c3413e..8ddbaa4ebd0b43 100644 --- a/arch/powerpc/platforms/maple/maple.h +++ b/arch/powerpc/platforms/maple/maple.h @@ -7,7 +7,6 @@ extern int maple_set_rtc_time(struct rtc_time *tm); extern void maple_get_rtc_time(struct rtc_time *tm); extern time64_t maple_get_boot_time(void); -extern void maple_calibrate_decr(void); extern void maple_pci_init(void); extern void maple_pci_irq_fixup(struct pci_dev *dev); extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 4e983af3294922..e4538d4712565e 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match); static struct platform_driver gpio_mdio_driver = { .probe = gpio_mdio_probe, - .remove_new = gpio_mdio_remove, + .remove = gpio_mdio_remove, .driver = { .name = "gpio-mdio-bitbang", .of_match_table = gpio_mdio_match, diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index 018c30665e1b30..6f6743b8e48d12 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -5,7 +5,6 @@ extern time64_t pas_get_boot_time(void); extern void pas_pci_init(void); struct pci_dev; -extern void pas_pci_irq_fixup(struct pci_dev *dev); extern void pas_pci_dma_dev_setup(struct pci_dev *dev); void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset); diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 085e0ad20eba58..8253de73737353 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -313,7 +313,7 @@ static void __init uninorth_install_pfunc(void) /* * Install handlers for the hwclock child if any */ - for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) + for_each_child_of_node(uninorth_node, np) if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 15644be31990d9..d21b681f52fb0e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_uint(vector, save_vector); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index af3a5d37a1496a..db3370d1673c3f 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, static const struct file_operations pnv_eeh_ei_fops = { .open = simple_open, - .llseek = no_llseek, .write = pnv_eeh_ei_write, }; @@ -860,7 +859,7 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) int64_t rc; /* Hot reset to the bus if firmware cannot handle */ - if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) + if (!dn || !of_property_present(dn, "ibm,reset-by-firmware")) return __pnv_eeh_bridge_reset(pdev, option); pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 16c5860f13720c..608e4b68c5ea9d 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = { }; ATTRIBUTE_GROUPS(dump_default); -static struct kobj_type dump_ktype = { +static const struct kobj_type dump_ktype = { .sysfs_ops = &dump_sysfs_ops, .release = &dump_release, .default_groups = dump_default_groups, diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 554fdd7f88b8a1..5db1e733143bfa 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = { }; ATTRIBUTE_GROUPS(elog_default); -static struct kobj_type elog_ktype = { +static const struct kobj_type elog_ktype = { .sysfs_ops = &elog_sysfs_ops, .release = &elog_release, .default_groups = elog_default_groups, diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c index 6c3bc4b4da9834..bb4218fa796e5e 100644 --- a/arch/powerpc/platforms/powernv/opal-kmsg.c +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c @@ -20,13 +20,13 @@ * message, it just ensures that OPAL completely flushes the console buffer. */ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { /* * Outside of a panic context the pollers will continue to run, * so we don't need to do any special flushing. */ - if (reason != KMSG_DUMP_PANIC) + if (detail->reason != KMSG_DUMP_PANIC) return; opal_flush_console(0); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index a16f07cdab267c..8a7f39e106bdbb 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -393,7 +393,7 @@ void __init opal_lpc_init(void) for_each_compatible_node(np, NULL, "ibm,power8-lpc") { if (!of_device_is_available(np)) continue; - if (!of_get_property(np, "primary", NULL)) + if (!of_property_present(np, "primary")) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); of_node_put(np); diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 24f04f20d3e85c..dc246ed4b7b4cc 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -443,7 +443,7 @@ static struct platform_driver opal_prd_driver = { .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, - .remove_new = opal_prd_remove, + .remove = opal_prd_remove, }; module_platform_driver(opal_prd_driver); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 957f2b47a3c0c2..93fba1f8661f96 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -274,7 +274,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn, int where, int size, u32 val); extern struct iommu_table *pnv_pci_table_alloc(int nid); -extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 47f8eabd1bee31..213aa26dc8b337 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -23,6 +23,7 @@ #include #include #include +#include static struct workqueue_struct *pseries_hp_wq; @@ -250,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn) struct device_node *child; int rc; - child = of_get_next_child(dn, NULL); - while (child) { + for_each_child_of_node(dn, child) dlpar_detach_node(child); - child = of_get_next_child(dn, child); - } rc = of_detach_node(dn); if (rc) @@ -264,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn) return 0; } +static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs, + struct device_node *dn) +{ + int rc; + + rc = of_changeset_attach_node(ocs, dn); + + if (!rc && dn->child) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child); + if (!rc && dn->sibling) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling); + + return rc; +} #define DR_ENTITY_SENSE 9003 #define DR_ENTITY_PRESENT 1 @@ -330,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index) return 0; } -int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +static struct device_node * +get_device_node_with_drc_index(u32 index) +{ + struct device_node *np = NULL; + u32 node_index; + int rc; + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", + &node_index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + return NULL; + } + + if (index == node_index) + break; + } + + return np; +} + +static struct device_node * +get_device_node_with_drc_info(u32 index) +{ + struct device_node *np = NULL; + struct of_drc_info drc; + struct property *info; + const __be32 *value; + u32 node_index; + int i, j, count; + + for_each_node_with_property(np, "ibm,drc-info") { + info = of_find_property(np, "ibm,drc-info", NULL); + if (info == NULL) { + /* XXX can this happen? */ + of_node_put(np); + return NULL; + } + value = of_prop_next_u32(info, NULL, &count); + if (value == NULL) + continue; + value++; + for (i = 0; i < count; i++) { + if (of_read_drc_info_cell(&info, &value, &drc)) + break; + if (index > drc.last_drc_index) + continue; + node_index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (index == node_index) + return np; + node_index += drc.sequential_inc; + } + } + } + + return NULL; +} + +static int dlpar_hp_dt_add(u32 index) +{ + struct device_node *np, *nodes; + struct of_changeset ocs; + int rc; + + /* + * Do not add device node(s) if already exists in the + * device tree. + */ + np = get_device_node_with_drc_index(index); + if (np) { + pr_err("%s: Adding device node for index (%d), but " + "already exists in the device tree\n", + __func__, index); + rc = -EINVAL; + goto out; + } + + np = get_device_node_with_drc_info(index); + + if (!np) + return -EIO; + + /* Next, configure the connector. */ + nodes = dlpar_configure_connector(cpu_to_be32(index), np); + if (!nodes) { + rc = -EIO; + goto out; + } + + /* + * Add the new nodes from dlpar_configure_connector() onto + * the device-tree. + */ + of_changeset_init(&ocs); + rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes); + + if (!rc) + rc = of_changeset_apply(&ocs); + else + dlpar_free_cc_nodes(nodes); + + of_changeset_destroy(&ocs); + +out: + of_node_put(np); + return rc; +} + +static int changeset_detach_node_recursive(struct of_changeset *ocs, + struct device_node *node) { + struct device_node *child; int rc; - /* pseries error logs are in BE format, convert to cpu type */ - switch (hp_elog->id_type) { - case PSERIES_HP_ELOG_ID_DRC_COUNT: - hp_elog->_drc_u.drc_count = - be32_to_cpu(hp_elog->_drc_u.drc_count); + for_each_child_of_node(node, child) { + rc = changeset_detach_node_recursive(ocs, child); + if (rc) { + of_node_put(child); + return rc; + } + } + + return of_changeset_detach_node(ocs, node); +} + +static int dlpar_hp_dt_remove(u32 drc_index) +{ + struct device_node *np; + struct of_changeset ocs; + u32 index; + int rc = 0; + + /* + * Prune all nodes with a matching index. + */ + of_changeset_init(&ocs); + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", &index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + goto out; + } + + if (index == drc_index) { + rc = changeset_detach_node_recursive(&ocs, np); + if (rc) { + of_node_put(np); + goto out; + } + } + } + + rc = of_changeset_apply(&ocs); + +out: + of_changeset_destroy(&ocs); + return rc; +} + +static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe) +{ + u32 drc_index; + int rc; + + if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) + return -EINVAL; + + drc_index = be32_to_cpu(phpe->_drc_u.drc_index); + + lock_device_hotplug(); + + switch (phpe->action) { + case PSERIES_HP_ELOG_ACTION_ADD: + rc = dlpar_hp_dt_add(drc_index); break; - case PSERIES_HP_ELOG_ID_DRC_INDEX: - hp_elog->_drc_u.drc_index = - be32_to_cpu(hp_elog->_drc_u.drc_index); + case PSERIES_HP_ELOG_ACTION_REMOVE: + rc = dlpar_hp_dt_remove(drc_index); + break; + default: + pr_err("Invalid action (%d) specified\n", phpe->action); + rc = -EINVAL; break; - case PSERIES_HP_ELOG_ID_DRC_IC: - hp_elog->_drc_u.ic.count = - be32_to_cpu(hp_elog->_drc_u.ic.count); - hp_elog->_drc_u.ic.index = - be32_to_cpu(hp_elog->_drc_u.ic.index); } + unlock_device_hotplug(); + + return rc; +} + +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +{ + int rc; + switch (hp_elog->resource) { case PSERIES_HP_ELOG_RESOURCE_MEM: rc = dlpar_memory(hp_elog); @@ -361,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_RESOURCE_PMEM: rc = dlpar_hp_pmem(hp_elog); break; + case PSERIES_HP_ELOG_RESOURCE_DT: + rc = dlpar_hp_dt(hp_elog); + break; default: pr_warn_ratelimited("Invalid resource (%d) specified\n", @@ -413,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; } else if (sysfs_streq(arg, "cpu")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; + } else if (sysfs_streq(arg, "dt")) { + hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT; } else { pr_err("Invalid resource specified.\n"); return -EINVAL; @@ -554,7 +750,7 @@ static ssize_t dlpar_store(const struct class *class, const struct class_attribu static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", "memory,cpu"); + return sprintf(buf, "%s\n", "memory,cpu,dt"); } static CLASS_ATTR_RW(dlpar); diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 3f1cdccebc9c15..8cb9d36ea49159 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -325,7 +325,6 @@ static const struct file_operations dtl_fops = { .open = dtl_file_open, .release = dtl_file_release, .read = dtl_file_read, - .llseek = no_llseek, }; static struct dentry *dtl_dir; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index b1ae0c0d118782..1893f66371fa43 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -784,6 +784,43 @@ static int pseries_notify_resume(struct eeh_dev *edev) } #endif +/** + * pseries_eeh_err_inject - Inject specified error to the indicated PE + * @pe: the indicated PE + * @type: error type + * @func: specific error type + * @addr: address + * @mask: address mask + * The routine is called to inject specified error, which is + * determined by @type and @func, to the indicated PE + */ +static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func, + unsigned long addr, unsigned long mask) +{ + struct eeh_dev *pdev; + + /* Check on PCI error type */ + if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) + return -EINVAL; + + switch (func) { + case EEH_ERR_FUNC_LD_MEM_ADDR: + case EEH_ERR_FUNC_LD_MEM_DATA: + case EEH_ERR_FUNC_ST_MEM_ADDR: + case EEH_ERR_FUNC_ST_MEM_DATA: + /* injects a MMIO error for all pdev's belonging to PE */ + pci_lock_rescan_remove(); + list_for_each_entry(pdev, &pe->edevs, entry) + eeh_pe_inject_mmio_error(pdev->pdev); + pci_unlock_rescan_remove(); + break; + default: + return -ERANGE; + } + + return 0; +} + static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .probe = pseries_eeh_probe, @@ -792,7 +829,7 @@ static struct eeh_ops pseries_eeh_ops = { .reset = pseries_eeh_reset, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, - .err_inject = NULL, + .err_inject = pseries_eeh_err_inject, .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config, .next_error = NULL, diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index e62835a12d73fc..6838a0fcda296b 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -757,7 +757,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) u32 drc_index; int rc; - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 3fe3ddb30c04b4..38dc4f7c9296b2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -817,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_ADD: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_add_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_add_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_add_by_ic(count, drc_index); break; default: @@ -838,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_REMOVE: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_remove_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_remove_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_remove_by_ic(count, drc_index); break; default: diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c index c29e85db5f351b..1574176e3ffc79 100644 --- a/arch/powerpc/platforms/pseries/papr-vpd.c +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -156,10 +156,7 @@ static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len) const char *old_ptr = blob->data; char *new_ptr; - new_ptr = old_ptr ? - kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) : - kvmalloc(len, GFP_KERNEL_ACCOUNT); - + new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); if (!new_ptr) return -ENOMEM; diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f6a70bc92e8357..9e297f88adc5d9 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #define BIND_ANY_ADDR (~0ul) @@ -1509,7 +1509,7 @@ static const struct of_device_id papr_scm_match[] = { static struct platform_driver papr_scm_driver = { .probe = papr_scm_probe, - .remove_new = papr_scm_remove, + .remove = papr_scm_remove, .driver = { .name = "papr_scm", .of_match_table = papr_scm_match, diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 3c290b9ed01b39..0f1d45f32e4a44 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) return -EINVAL; } - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c index f9f682724e7763..9e05a0e99cadfd 100644 --- a/arch/powerpc/platforms/pseries/vas-sysfs.c +++ b/arch/powerpc/platforms/pseries/vas-sysfs.c @@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = { .store = vas_type_store, }; -static struct kobj_type vas_def_attr_type = { +static const struct kobj_type vas_def_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_def_capab_groups, }; -static struct kobj_type vas_qos_attr_type = { +static const struct kobj_type vas_qos_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_qos_capab_groups, diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index e205135ae1feaa..1aa0cb097c9c9d 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -603,7 +603,7 @@ static struct platform_driver fsl_of_msi_driver = { .of_match_table = fsl_of_msi_ids, }, .probe = fsl_of_msi_probe, - .remove_new = fsl_of_msi_remove, + .remove = fsl_of_msi_remove, }; static __init int fsl_of_msi_init(void) diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 737f97fd67d72e..2511e586fe3111 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c @@ -193,7 +193,7 @@ static void pmi_of_remove(struct platform_device *dev) static struct platform_driver pmi_of_platform_driver = { .probe = pmi_of_probe, - .remove_new = pmi_of_remove, + .remove = pmi_of_remove, .driver = { .name = "pmi", .of_match_table = pmi_match, diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index bd4813bad317eb..e6cddbb2305f8d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3543,7 +3543,7 @@ scanhex(unsigned long *vp) } } else if (c == '$') { int i; - for (i=0; i<63; i++) { + for (i = 0; i < (KSYM_NAME_LEN - 1); i++) { c = inchar(); if (isspace(c) || c == '\0') { termch = c; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 939ea7f6a2289c..62545946ecf432 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -13,6 +13,7 @@ config 32BIT config RISCV def_bool y select ACPI_GENERIC_GSI if ACPI + select ACPI_MCFG if (ACPI && PCI) select ACPI_PPTT if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI select ACPI_SPCR_TABLE if ACPI @@ -64,10 +65,12 @@ config RISCV select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000 select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU select ARCH_SUPPORTS_PER_VMA_LOCK if MMU + select ARCH_SUPPORTS_RT select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK select ARCH_USE_CMPXCHG_LOCKREF if 64BIT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_SYM_ANNOTATIONS select ARCH_USES_CFI_TRAPS if CFI_CLANG select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU @@ -92,6 +95,7 @@ config RISCV select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_DEVICES + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO @@ -156,6 +160,7 @@ config RISCV select HAVE_KERNEL_LZO if !XIP_KERNEL && !EFI_ZBOOT select HAVE_KERNEL_UNCOMPRESSED if !XIP_KERNEL && !EFI_ZBOOT select HAVE_KERNEL_ZSTD if !XIP_KERNEL && !EFI_ZBOOT + select HAVE_KERNEL_XZ if !XIP_KERNEL && !EFI_ZBOOT select HAVE_KPROBES if !XIP_KERNEL select HAVE_KRETPROBES if !XIP_KERNEL # https://github.com/ClangBuiltLinux/linux/issues/1881 @@ -172,7 +177,7 @@ config RISCV select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RETHOOK if !XIP_KERNEL select HAVE_RSEQ - select HAVE_RUST if 64BIT + select HAVE_RUST if RUSTC_SUPPORTS_RISCV select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_STACKPROTECTOR @@ -188,6 +193,7 @@ config RISCV select OF_EARLY_FLATTREE select OF_IRQ select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) select PCI_MSI if PCI select RISCV_ALTERNATIVE if !XIP_KERNEL select RISCV_APLIC @@ -200,8 +206,16 @@ config RISCV select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT select UACCESS_MEMCPY if !MMU + select USER_STACKTRACE_SUPPORT select ZONE_DMA32 if 64BIT +config RUSTC_SUPPORTS_RISCV + def_bool y + depends on 64BIT + # Shadow call stack requires rustc version 1.82+ due to use of the + # -Zsanitizer=shadow-call-stack flag. + depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 + config CLANG_SUPPORTS_DYNAMIC_FTRACE def_bool CC_IS_CLANG # https://github.com/ClangBuiltLinux/linux/issues/1817 @@ -319,6 +333,11 @@ config GENERIC_HWEIGHT config FIX_EARLYCON_MEM def_bool MMU +config ILLEGAL_POINTER_VALUE + hex + default 0 if 32BIT + default 0xdead000000000000 if 64BIT + config PGTABLE_LEVELS int default 5 if 64BIT @@ -758,8 +777,7 @@ config IRQ_STACKS config THREAD_SIZE_ORDER int "Kernel stack size (in power-of-two numbers of page size)" if VMAP_STACK && EXPERT range 0 4 - default 1 if 32BIT && !KASAN - default 3 if 64BIT && KASAN + default 1 if 32BIT default 2 help Specify the Pages of thread stack size (from 4KB to 64KB), which also diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 6fe682139d2e58..d469db9f46f426 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -159,6 +159,7 @@ boot-image-$(CONFIG_KERNEL_LZ4) := Image.lz4 boot-image-$(CONFIG_KERNEL_LZMA) := Image.lzma boot-image-$(CONFIG_KERNEL_LZO) := Image.lzo boot-image-$(CONFIG_KERNEL_ZSTD) := Image.zst +boot-image-$(CONFIG_KERNEL_XZ) := Image.xz ifdef CONFIG_RISCV_M_MODE boot-image-$(CONFIG_ARCH_CANAAN) := loader.bin endif @@ -183,12 +184,12 @@ endif vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg -BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader loader.bin xipImage vmlinuz.efi +BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader loader.bin xipImage vmlinuz.efi all: $(notdir $(KBUILD_IMAGE)) loader.bin: loader -Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader xipImage vmlinuz.efi: Image +Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader xipImage vmlinuz.efi: Image $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @@ -225,6 +226,7 @@ define archhelp echo ' Image.lzma - Compressed kernel image (arch/riscv/boot/Image.lzma)' echo ' Image.lzo - Compressed kernel image (arch/riscv/boot/Image.lzo)' echo ' Image.zst - Compressed kernel image (arch/riscv/boot/Image.zst)' + echo ' Image.xz - Compressed kernel image (arch/riscv/boot/Image.xz)' echo ' vmlinuz.efi - Compressed EFI kernel image (arch/riscv/boot/vmlinuz.efi)' echo ' Default when CONFIG_EFI_ZBOOT=y' echo ' xipImage - Execute-in-place kernel image (arch/riscv/boot/xipImage)' diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 4e9e7a28bf9b1d..b25d524ce5eb43 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -64,6 +64,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE $(obj)/Image.zst: $(obj)/Image FORCE $(call if_changed,zstd) +$(obj)/Image.xz: $(obj)/Image FORCE + $(call if_changed,xzkern) + $(obj)/loader.bin: $(obj)/loader FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts index aa361f3a86bb58..7b5f57853690b3 100644 --- a/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts +++ b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts @@ -43,6 +43,15 @@ &osc { clock-frequency = <25000000>; }; +&sdhci0 { + status = "okay"; + bus-width = <4>; + no-1-8-v; + no-mmc; + no-sdio; + disable-wp; +}; + &uart0 { status = "okay"; }; diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi index 891932ae470f34..b724fb6d9689ef 100644 --- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi +++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi @@ -297,6 +297,22 @@ sdhci0: mmc@4310000 { status = "disabled"; }; + dmac: dma-controller@4330000 { + compatible = "snps,axi-dma-1.01a"; + reg = <0x04330000 0x1000>; + interrupts = <29 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk CLK_SDMA_AXI>, <&clk CLK_SDMA_AXI>; + clock-names = "core-clk", "cfgr-clk"; + #dma-cells = <1>; + dma-channels = <8>; + snps,block-size = <1024 1024 1024 1024 + 1024 1024 1024 1024>; + snps,priority = <0 1 2 3 4 5 6 7>; + snps,dma-masters = <2>; + snps,data-width = <4>; + status = "disabled"; + }; + plic: interrupt-controller@70000000 { reg = <0x70000000 0x4000000>; interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>; diff --git a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts index 80cb017974d883..a3f9d6f2256659 100644 --- a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts +++ b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts @@ -26,6 +26,83 @@ &cgi_dpll1 { clock-frequency = <25000000>; }; +&emmc { + bus-width = <4>; + no-sdio; + no-sd; + non-removable; + wp-inverted; + status = "okay"; +}; + +&i2c1 { + status = "okay"; + + mcu: syscon@17 { + compatible = "sophgo,sg2042-hwmon-mcu"; + reg = <0x17>; + #thermal-sensor-cells = <1>; + }; +}; + +&sd { + bus-width = <4>; + no-sdio; + no-mmc; + wp-inverted; + status = "okay"; +}; + &uart0 { status = "okay"; }; + +/ { + thermal-zones { + soc-thermal { + polling-delay-passive = <1000>; + polling-delay = <1000>; + thermal-sensors = <&mcu 0>; + + trips { + soc_active1: soc-active1 { + temperature = <30000>; + hysteresis = <8000>; + type = "active"; + }; + + soc_active2: soc-active2 { + temperature = <58000>; + hysteresis = <12000>; + type = "active"; + }; + + soc_active3: soc-active3 { + temperature = <70000>; + hysteresis = <10000>; + type = "active"; + }; + + soc_hot: soc-hot { + temperature = <80000>; + hysteresis = <5000>; + type = "hot"; + }; + }; + }; + + board-thermal { + polling-delay-passive = <1000>; + polling-delay = <1000>; + thermal-sensors = <&mcu 1>; + + trips { + board_active: board-active { + temperature = <75000>; + hysteresis = <8000>; + type = "active"; + }; + }; + }; + }; +}; diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi index 34c802bd3f9b8e..4e5fa659162306 100644 --- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi +++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi @@ -44,8 +44,127 @@ soc: soc { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; + interrupt-parent = <&intc>; ranges; + i2c0: i2c@7030005000 { + compatible = "snps,designware-i2c"; + reg = <0x70 0x30005000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_I2C>; + clock-names = "ref"; + clock-frequency = <100000>; + interrupts = <101 IRQ_TYPE_LEVEL_HIGH>; + resets = <&rstgen RST_I2C0>; + status = "disabled"; + }; + + i2c1: i2c@7030006000 { + compatible = "snps,designware-i2c"; + reg = <0x70 0x30006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_I2C>; + clock-names = "ref"; + clock-frequency = <100000>; + interrupts = <102 IRQ_TYPE_LEVEL_HIGH>; + resets = <&rstgen RST_I2C1>; + status = "disabled"; + }; + + i2c2: i2c@7030007000 { + compatible = "snps,designware-i2c"; + reg = <0x70 0x30007000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_I2C>; + clock-names = "ref"; + clock-frequency = <100000>; + interrupts = <103 IRQ_TYPE_LEVEL_HIGH>; + resets = <&rstgen RST_I2C2>; + status = "disabled"; + }; + + i2c3: i2c@7030008000 { + compatible = "snps,designware-i2c"; + reg = <0x70 0x30008000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_I2C>; + clock-names = "ref"; + clock-frequency = <100000>; + interrupts = <104 IRQ_TYPE_LEVEL_HIGH>; + resets = <&rstgen RST_I2C3>; + status = "disabled"; + }; + + gpio0: gpio@7030009000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x70 0x30009000 0x0 0x400>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_GPIO>, + <&clkgen GATE_CLK_GPIO_DB>; + clock-names = "bus", "db"; + + port0a: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <96 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + gpio1: gpio@703000a000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x70 0x3000a000 0x0 0x400>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_GPIO>, + <&clkgen GATE_CLK_GPIO_DB>; + clock-names = "bus", "db"; + + port1a: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <97 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + gpio2: gpio@703000b000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x70 0x3000b000 0x0 0x400>; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clkgen GATE_CLK_APB_GPIO>, + <&clkgen GATE_CLK_GPIO_DB>; + clock-names = "bus", "db"; + + port2a: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <98 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + pllclk: clock-controller@70300100c0 { compatible = "sophgo,sg2042-pll"; reg = <0x70 0x300100c0 0x0 0x40>; @@ -388,7 +507,6 @@ rstgen: reset-controller@7030013000 { uart0: serial@7040000000 { compatible = "snps,dw-apb-uart"; reg = <0x00000070 0x40000000 0x00000000 0x00001000>; - interrupt-parent = <&intc>; interrupts = <112 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <500000000>; clocks = <&clkgen GATE_CLK_UART_500M>, @@ -399,5 +517,33 @@ uart0: serial@7040000000 { resets = <&rstgen RST_UART0>; status = "disabled"; }; + + emmc: mmc@704002a000 { + compatible = "sophgo,sg2042-dwcmshc"; + reg = <0x70 0x4002a000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = <134 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clkgen GATE_CLK_EMMC_100M>, + <&clkgen GATE_CLK_AXI_EMMC>, + <&clkgen GATE_CLK_100K_EMMC>; + clock-names = "core", + "bus", + "timer"; + status = "disabled"; + }; + + sd: mmc@704002b000 { + compatible = "sophgo,sg2042-dwcmshc"; + reg = <0x70 0x4002b000 0x0 0x1000>; + interrupt-parent = <&intc>; + interrupts = <136 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clkgen GATE_CLK_SD_100M>, + <&clkgen GATE_CLK_AXI_SD>, + <&clkgen GATE_CLK_100K_SD>; + clock-names = "core", + "bus", + "timer"; + status = "disabled"; + }; }; }; diff --git a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts index d9b4de9e475742..497d961456f3a1 100644 --- a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts +++ b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts @@ -23,6 +23,7 @@ aliases { serial3 = &uart3; serial4 = &uart4; serial5 = &uart5; + spi0 = &spi0; }; chosen { @@ -44,18 +45,6 @@ &osc_32k { clock-frequency = <32768>; }; -&apb_clk { - clock-frequency = <62500000>; -}; - -&sdhci_clk { - clock-frequency = <198000000>; -}; - -&uart_sclk { - clock-frequency = <100000000>; -}; - &dmac0 { status = "okay"; }; @@ -79,3 +68,7 @@ &sdio0 { &uart0 { status = "okay"; }; + +&spi0 { + status = "okay"; +}; diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi index 1365d3a512a3b9..78977bdbbe3d31 100644 --- a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi +++ b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi @@ -25,18 +25,6 @@ &osc_32k { clock-frequency = <32768>; }; -&apb_clk { - clock-frequency = <62500000>; -}; - -&sdhci_clk { - clock-frequency = <198000000>; -}; - -&uart_sclk { - clock-frequency = <100000000>; -}; - &dmac0 { status = "okay"; }; diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts index 9a3884a73e1372..7738d2895c5acc 100644 --- a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts +++ b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts @@ -20,6 +20,7 @@ aliases { serial3 = &uart3; serial4 = &uart4; serial5 = &uart5; + spi0 = &spi0; }; chosen { @@ -30,3 +31,7 @@ chosen { &uart0 { status = "okay"; }; + +&spi0 { + status = "okay"; +}; diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi index 3c9974062c206b..6992060e6a54d2 100644 --- a/arch/riscv/boot/dts/thead/th1520.dtsi +++ b/arch/riscv/boot/dts/thead/th1520.dtsi @@ -5,6 +5,7 @@ */ #include +#include / { compatible = "thead,th1520"; @@ -215,25 +216,6 @@ osc_32k: 32k-oscillator { #clock-cells = <0>; }; - apb_clk: apb-clk-clock { - compatible = "fixed-clock"; - clock-output-names = "apb_clk"; - #clock-cells = <0>; - }; - - uart_sclk: uart-sclk-clock { - compatible = "fixed-clock"; - clock-output-names = "uart_sclk"; - #clock-cells = <0>; - }; - - sdhci_clk: sdhci-clock { - compatible = "fixed-clock"; - clock-frequency = <198000000>; - clock-output-names = "sdhci_clk"; - #clock-cells = <0>; - }; - soc { compatible = "simple-bus"; interrupt-parent = <&plic>; @@ -264,11 +246,22 @@ clint: timer@ffdc000000 { <&cpu3_intc 3>, <&cpu3_intc 7>; }; + spi0: spi@ffe700c000 { + compatible = "thead,th1520-spi", "snps,dw-apb-ssi"; + reg = <0xff 0xe700c000 0x0 0x1000>; + interrupts = <54 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk CLK_SPI>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + uart0: serial@ffe7014000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xe7014000 0x0 0x100>; interrupts = <36 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART0_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; @@ -278,7 +271,7 @@ emmc: mmc@ffe7080000 { compatible = "thead,th1520-dwcmshc"; reg = <0xff 0xe7080000 0x0 0x10000>; interrupts = <62 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&sdhci_clk>; + clocks = <&clk CLK_EMMC_SDIO>; clock-names = "core"; status = "disabled"; }; @@ -287,7 +280,7 @@ sdio0: mmc@ffe7090000 { compatible = "thead,th1520-dwcmshc"; reg = <0xff 0xe7090000 0x0 0x10000>; interrupts = <64 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&sdhci_clk>; + clocks = <&clk CLK_EMMC_SDIO>; clock-names = "core"; status = "disabled"; }; @@ -296,7 +289,7 @@ sdio1: mmc@ffe70a0000 { compatible = "thead,th1520-dwcmshc"; reg = <0xff 0xe70a0000 0x0 0x10000>; interrupts = <71 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&sdhci_clk>; + clocks = <&clk CLK_EMMC_SDIO>; clock-names = "core"; status = "disabled"; }; @@ -305,7 +298,8 @@ uart1: serial@ffe7f00000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xe7f00000 0x0 0x100>; interrupts = <37 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART1_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; @@ -315,7 +309,8 @@ uart3: serial@ffe7f04000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xe7f04000 0x0 0x100>; interrupts = <39 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART3_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; @@ -326,6 +321,7 @@ gpio2: gpio@ffe7f34000 { reg = <0xff 0xe7f34000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&clk CLK_GPIO2>; portc: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; @@ -344,6 +340,7 @@ gpio3: gpio@ffe7f38000 { reg = <0xff 0xe7f38000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&clk CLK_GPIO3>; portd: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; @@ -362,6 +359,7 @@ gpio0: gpio@ffec005000 { reg = <0xff 0xec005000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&clk CLK_GPIO0>; porta: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; @@ -380,6 +378,7 @@ gpio1: gpio@ffec006000 { reg = <0xff 0xec006000 0x0 0x1000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&clk CLK_GPIO1>; portb: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; @@ -397,17 +396,25 @@ uart2: serial@ffec010000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xec010000 0x0 0x4000>; interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART2_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; }; + clk: clock-controller@ffef010000 { + compatible = "thead,th1520-clk-ap"; + reg = <0xff 0xef010000 0x0 0x1000>; + clocks = <&osc>; + #clock-cells = <1>; + }; + dmac0: dma-controller@ffefc00000 { compatible = "snps,axi-dma-1.01a"; reg = <0xff 0xefc00000 0x0 0x1000>; interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&apb_clk>, <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>, <&clk CLK_PERI_APB_PCLK>; clock-names = "core-clk", "cfgr-clk"; #dma-cells = <1>; dma-channels = <4>; @@ -422,7 +429,7 @@ dmac0: dma-controller@ffefc00000 { timer0: timer@ffefc32000 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xefc32000 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <16 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -431,7 +438,7 @@ timer0: timer@ffefc32000 { timer1: timer@ffefc32014 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xefc32014 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -440,7 +447,7 @@ timer1: timer@ffefc32014 { timer2: timer@ffefc32028 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xefc32028 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <18 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -449,7 +456,7 @@ timer2: timer@ffefc32028 { timer3: timer@ffefc3203c { compatible = "snps,dw-apb-timer"; reg = <0xff 0xefc3203c 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -459,7 +466,8 @@ uart4: serial@fff7f08000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xf7f08000 0x0 0x4000>; interrupts = <40 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART4_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; @@ -469,7 +477,8 @@ uart5: serial@fff7f0c000 { compatible = "snps,dw-apb-uart"; reg = <0xff 0xf7f0c000 0x0 0x4000>; interrupts = <41 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&uart_sclk>; + clocks = <&clk CLK_UART_SCLK>, <&clk CLK_UART5_PCLK>; + clock-names = "baudclk", "apb_pclk"; reg-shift = <2>; reg-io-width = <4>; status = "disabled"; @@ -478,7 +487,7 @@ uart5: serial@fff7f0c000 { timer4: timer@ffffc33000 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xffc33000 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <20 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -487,7 +496,7 @@ timer4: timer@ffffc33000 { timer5: timer@ffffc33014 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xffc33014 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -496,7 +505,7 @@ timer5: timer@ffffc33014 { timer6: timer@ffffc33028 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xffc33028 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; @@ -505,7 +514,7 @@ timer6: timer@ffffc33028 { timer7: timer@ffffc3303c { compatible = "snps,dw-apb-timer"; reg = <0xff 0xffc3303c 0x0 0x14>; - clocks = <&apb_clk>; + clocks = <&clk CLK_PERI_APB_PCLK>; clock-names = "timer"; interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 0d678325444fcc..2341393cfac1ae 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y CONFIG_MACB=y CONFIG_E1000E=y CONFIG_R8169=y -CONFIG_RAVB=y CONFIG_STMMAC_ETH=m CONFIG_MICREL_PHY=y CONFIG_MICROSEMI_PHY=y CONFIG_MOTORCOMM_PHY=y -CONFIG_CAN_RCAR_CANFD=m CONFIG_INPUT_MOUSEDEV=y CONFIG_KEYBOARD_SUN4I_LRADC=m CONFIG_SERIAL_8250=y @@ -150,29 +148,30 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y -CONFIG_SERIAL_SH_SCI=y CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y CONFIG_HW_RANDOM_JH7110=m CONFIG_I2C=y CONFIG_I2C_CHARDEV=m +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_MV64XXX=m -CONFIG_I2C_RIIC=y CONFIG_SPI=y CONFIG_SPI_CADENCE_QUADSPI=m CONFIG_SPI_PL022=m -CONFIG_SPI_RSPI=m CONFIG_SPI_SIFIVE=y CONFIG_SPI_SUN6I=y # CONFIG_PTP_1588_CLOCK is not set +CONFIG_PINCTRL_SOPHGO_CV1800B=y +CONFIG_PINCTRL_SOPHGO_CV1812H=y +CONFIG_PINCTRL_SOPHGO_SG2000=y +CONFIG_PINCTRL_SOPHGO_SG2002=y CONFIG_GPIO_SIFIVE=y CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_SENSORS_SFCTEMP=m CONFIG_CPU_THERMAL=y CONFIG_DEVFREQ_THERMAL=y -CONFIG_RZG2L_THERMAL=y CONFIG_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MFD_AXP20X_I2C=y @@ -201,11 +200,11 @@ CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_RCAR is not set CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y CONFIG_USB_CDNS_SUPPORT=m @@ -217,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m CONFIG_USB_MUSB_SUNXI=m CONFIG_NOP_USB_XCEIV=m CONFIG_USB_GADGET=y -CONFIG_USB_RENESAS_USBHS_UDC=m CONFIG_USB_CONFIGFS=m CONFIG_USB_CONFIGFS_SERIAL=y CONFIG_USB_CONFIGFS_ACM=y @@ -235,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_DWCMSHC=y CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SPI=y -CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_STARFIVE=y CONFIG_MMC_SUNXI=y @@ -249,8 +246,10 @@ CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_MMIO=y CONFIG_CLK_SOPHGO_CV1800=y +CONFIG_CLK_SOPHGO_SG2042_PLL=y +CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y +CONFIG_CLK_SOPHGO_SG2042_RPGATE=y CONFIG_SUN8I_DE2_CCU=m -CONFIG_RENESAS_OSTM=y CONFIG_SUN50I_IOMMU=y CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_CTRL=y @@ -258,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y CONFIG_PM_DEVFREQ=y CONFIG_IIO=y CONFIG_PHY_SUN4I_USB=m -CONFIG_PHY_RCAR_GEN3_USB2=y CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m CONFIG_PHY_STARFIVE_JH7110_PCIE=m CONFIG_PHY_STARFIVE_JH7110_USB=m diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig index af9601da464363..87ff5a1233af86 100644 --- a/arch/riscv/configs/nommu_k210_defconfig +++ b/arch/riscv/configs/nommu_k210_defconfig @@ -58,6 +58,7 @@ CONFIG_I2C=y # CONFIG_I2C_COMPAT is not set CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y # CONFIG_SPI_MEM is not set diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig index dd460c64915289..95cbd574f291f7 100644 --- a/arch/riscv/configs/nommu_k210_sdcard_defconfig +++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig @@ -50,6 +50,7 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y # CONFIG_SPI_MEM is not set diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S index f1b9623fe1de4c..b1f7b636fe9af6 100644 --- a/arch/riscv/errata/sifive/errata_cip_453.S +++ b/arch/riscv/errata/sifive/errata_cip_453.S @@ -21,7 +21,7 @@ 1: .endm -ENTRY(sifive_cip_453_page_fault_trp) +SYM_FUNC_START(sifive_cip_453_page_fault_trp) ADD_SIGN_EXT a0, t0, t1 #ifdef CONFIG_MMU la t0, do_page_fault @@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp) la t0, do_trap_unknown #endif jr t0 -END(sifive_cip_453_page_fault_trp) +SYM_FUNC_END(sifive_cip_453_page_fault_trp) -ENTRY(sifive_cip_453_insn_fault_trp) +SYM_FUNC_START(sifive_cip_453_insn_fault_trp) ADD_SIGN_EXT a0, t0, t1 la t0, do_trap_insn_fault jr t0 -END(sifive_cip_453_insn_fault_trp) +SYM_FUNC_END(sifive_cip_453_insn_fault_trp) diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 5c589770f2a823..1461af12da6e2b 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -5,6 +5,7 @@ syscall-y += syscall_table_64.h generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h +generic-y += mmzone.h generic-y += parport.h generic-y += spinlock.h generic-y += spinlock_types.h diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index e0a1f84404f31c..6e13695120bc21 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI_NUMA -int acpi_numa_get_nid(unsigned int cpu); void acpi_map_cpus_to_nodes(void); #else -static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } static inline void acpi_map_cpus_to_nodes(void) { } #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 71af9ecfcfcb2b..fae152ea0508d2 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -222,44 +222,44 @@ static __always_inline int variable_fls(unsigned int x) #define __NOT(x) (~(x)) /** - * test_and_set_bit - Set a bit and return its old value + * arch_test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation may be reordered on other architectures than x86. */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(or, __NOP, nr, addr); } /** - * test_and_clear_bit - Clear a bit and return its old value + * arch_test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation can be reordered on other architectures other than x86. */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(and, __NOT, nr, addr); } /** - * test_and_change_bit - Change a bit and return its old value + * arch_test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr) { return __test_and_op_bit(xor, __NOP, nr, addr); } /** - * set_bit - Atomically set a bit in memory + * arch_set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(int nr, volatile unsigned long *addr) +static inline void arch_set_bit(int nr, volatile unsigned long *addr) { __op_bit(or, __NOP, nr, addr); } /** - * clear_bit - Clears a bit in memory + * arch_clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * @@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr) * on non x86 architectures, so if you are writing portable code, * make sure not to rely on its reordering guarantees. */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit(int nr, volatile unsigned long *addr) { __op_bit(and, __NOT, nr, addr); } /** - * change_bit - Toggle a bit in memory + * arch_change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * @@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile unsigned long *addr) +static inline void arch_change_bit(int nr, volatile unsigned long *addr) { __op_bit(xor, __NOP, nr, addr); } /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock( +static inline int arch_test_and_set_bit_lock( unsigned long nr, volatile unsigned long *addr) { return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock( +static inline void arch_clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { __op_bit_ord(and, __NOT, nr, addr, .rl); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -345,13 +345,13 @@ static inline void clear_bit_unlock( * non-atomic property here: it's a lot more instructions and we still have to * provide release semantics anyway. */ -static inline void __clear_bit_unlock( +static inline void arch___clear_bit_unlock( unsigned long nr, volatile unsigned long *addr) { - clear_bit_unlock(nr, addr); + arch_clear_bit_unlock(nr, addr); } -static inline bool xor_unlock_is_negative_byte(unsigned long mask, +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *addr) { unsigned long res; @@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, #undef __NOT #undef __AMO +#include +#include + #include #include #include diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index ce79c558a4c85a..8de73f91bfa371 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -46,7 +46,23 @@ do { \ } while (0) #ifdef CONFIG_64BIT -#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) +extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; +extern char _end[]; +#define flush_cache_vmap flush_cache_vmap +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + if (is_vmalloc_or_module_addr((void *)start)) { + int i; + + /* + * We don't care if concurrently a cpu resets this value since + * the only place this can happen is in handle_exception() where + * an sfence.vma is emitted. + */ + for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i) + new_vmalloc[i] = -1ULL; + } +} #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) #endif diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h new file mode 100644 index 00000000000000..07d9942682e0e1 --- /dev/null +++ b/arch/riscv/include/asm/exec.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h index 6bcd80325dfc17..182db7930edc22 100644 --- a/arch/riscv/include/asm/fence.h +++ b/arch/riscv/include/asm/fence.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_FENCE_H #define _ASM_RISCV_FENCE_H diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 5a0bd27fd11a6d..46d9de54179ed4 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -92,6 +92,7 @@ #define RISCV_ISA_EXT_ZCF 83 #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 +#define RISCV_ISA_EXT_SVVPTC 86 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h index 8e10a94430a2c2..7b038f3b7cb0b7 100644 --- a/arch/riscv/include/asm/irq.h +++ b/arch/riscv/include/asm/irq.h @@ -12,8 +12,68 @@ #include +#define INVALID_CONTEXT UINT_MAX + +#ifdef CONFIG_SMP +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); struct fwnode_handle *riscv_get_intc_hwnode(void); +#ifdef CONFIG_ACPI + +enum riscv_irqchip_type { + ACPI_RISCV_IRQCHIP_INTC = 0x00, + ACPI_RISCV_IRQCHIP_IMSIC = 0x01, + ACPI_RISCV_IRQCHIP_PLIC = 0x02, + ACPI_RISCV_IRQCHIP_APLIC = 0x03, +}; + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs); +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); +unsigned long acpi_rintc_index_to_hartid(u32 index); +unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); +unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); +unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); +int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); + +#else +static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + return 0; +} + +static inline unsigned long acpi_rintc_index_to_hartid(u32 index) +{ + return INVALID_HARTID; +} + +static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, + unsigned int ctxt_idx) +{ + return INVALID_HARTID; +} + +static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) +{ + return INVALID_CONTEXT; +} + +static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + return INVALID_CONTEXT; +} + +static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) +{ + return 0; +} + +#endif /* CONFIG_ACPI */ + #endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index fa0f535bbbf024..1d85b661750884 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -10,6 +10,7 @@ #define __KVM_VCPU_RISCV_PMU_H #include +#include #include #ifdef CONFIG_RISCV_PMU_SBI @@ -64,11 +65,11 @@ struct kvm_pmu { #if defined(CONFIG_32BIT) #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #else #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #endif int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); @@ -104,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); struct kvm_pmu { }; +static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return KVM_INSN_CONTINUE_NEXT_SEPC; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } +} + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = 0, .count = 0, .func = NULL }, +{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy }, static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h deleted file mode 100644 index fa17e01d9ab278..00000000000000 --- a/arch/riscv/include/asm/mmzone.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -#ifdef CONFIG_NUMA - -#include - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[(nid)]) - -#endif /* CONFIG_NUMA */ -#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 7ede2111c5917a..32d308a3355fd4 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -112,11 +112,13 @@ struct kernel_mapping { /* Offset between linear mapping virtual address and kernel load address */ unsigned long va_pa_offset; /* Offset between kernel mapping virtual address and kernel load address */ - unsigned long va_kernel_pa_offset; - unsigned long va_kernel_xip_pa_offset; #ifdef CONFIG_XIP_KERNEL + unsigned long va_kernel_xip_text_pa_offset; + unsigned long va_kernel_xip_data_pa_offset; uintptr_t xiprom; uintptr_t xiprom_sz; +#else + unsigned long va_kernel_pa_offset; #endif }; @@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base; #else void *linear_mapping_pa_to_va(unsigned long x); #endif + +#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = (unsigned long)(y); \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ - (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \ - (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ + (_y < phys_ram_base) ? \ + (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ + (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) +#endif + #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #ifndef CONFIG_DEBUG_VIRTUAL @@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x); #else phys_addr_t linear_mapping_va_to_pa(unsigned long x); #endif + +#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_va_to_pa(y) ({ \ unsigned long _y = (unsigned long)(y); \ - (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ - (_y - kernel_map.va_kernel_xip_pa_offset) : \ - (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ + (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ + (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ + (_y - kernel_map.va_kernel_xip_data_pa_offset); \ }) +#else +#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) +#endif #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 089f3c9f56a389..e79f15293492d5 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -107,13 +107,6 @@ #endif -#ifdef CONFIG_XIP_KERNEL -#define XIP_OFFSET SZ_32M -#define XIP_OFFSET_MASK (SZ_32M - 1) -#else -#define XIP_OFFSET 0 -#endif - #ifndef __ASSEMBLY__ #include @@ -142,11 +135,14 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_FIXUP(addr) ({ \ + extern char _sdata[], _start[], _end[]; \ + uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_sdata - (uintptr_t)&_start; \ + uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ + + (uintptr_t)&_end - (uintptr_t)&_start; \ uintptr_t __a = (uintptr_t)(addr); \ - (__a >= CONFIG_XIP_PHYS_ADDR && \ - __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ - __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ - __a; \ + (__a >= __rom_start_data && __a < __rom_end_data) ? \ + __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ }) #else #define XIP_FIXUP(addr) (addr) @@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) + : : : : svvptc); + /* * The kernel assumes that TLBs don't cache invalid entries, but * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a @@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, */ while (nr--) local_flush_tlb_page(address + nr * PAGE_SIZE); + +svvptc:; + /* + * Svvptc guarantees that the new valid pte will be visible within + * a bounded timeframe, so when the uarch does not cache invalid + * entries, we don't have to do anything. + */ } #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 7bd3746028c9e2..98f631b051dba8 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data { #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) #define RISCV_PMU_RAW_EVENT_IDX 0x20000 +#define RISCV_PLAT_FW_EVENT 0xFFFF /** General pmu event codes specified in SBI PMU extension */ enum sbi_pmu_hw_generic_events_t { diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index ec11001c3fe042..ab92fc84e1fc9e 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_STRICT_KERNEL_RWX +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) #ifdef CONFIG_64BIT #define SECTION_ALIGN (1 << 21) #else diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h index 63acaecc337478..2f901a410586d0 100644 --- a/arch/riscv/include/asm/sparsemem.h +++ b/arch/riscv/include/asm/sparsemem.h @@ -7,7 +7,7 @@ #ifdef CONFIG_64BIT #define MAX_PHYSMEM_BITS 56 #else -#define MAX_PHYSMEM_BITS 34 +#define MAX_PHYSMEM_BITS 32 #endif /* CONFIG_64BIT */ #define SECTION_SIZE_BITS 27 #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index a96b1fea24fe43..5ba77f60bf0b58 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t); extern asmlinkage void *memmove(void *, const void *, size_t); extern asmlinkage void *__memmove(void *, const void *, size_t); +#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) #define __HAVE_ARCH_STRCMP extern asmlinkage int strcmp(const char *cs, const char *ct); @@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *); #define __HAVE_ARCH_STRNCMP extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count); +#endif /* For those files which don't want to check by kasan. */ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index fca5c6be2b8112..9c10fb180f4389 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -13,7 +13,12 @@ #include /* thread information allocation */ -#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 +#else +#define KASAN_STACK_ORDER 0 +#endif +#define THREAD_SIZE_ORDER (CONFIG_THREAD_SIZE_ORDER + KASAN_STACK_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) /* @@ -61,6 +66,13 @@ struct thread_info { void *scs_base; void *scs_sp; #endif +#ifdef CONFIG_64BIT + /* + * Used in handle_exception() to save a0, a1 and a2 before knowing if we + * can access the kernel stack. + */ + unsigned long a0, a1, a2; +#endif }; #ifdef CONFIG_SHADOW_CALL_STACK @@ -112,8 +124,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) -#define _TIF_WORK_MASK \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) - #endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h index 61183688bdd54e..fe1a8bf6902d95 100644 --- a/arch/riscv/include/asm/topology.h +++ b/arch/riscv/include/asm/topology.h @@ -4,6 +4,10 @@ #include +#ifdef CONFIG_NUMA +#include +#endif + /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_tick topology_scale_freq_tick #define arch_set_freq_scale topology_set_freq_scale diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 51f6dfe19745aa..fefe94dc98e20d 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_RISCV_VMALLOC_H #define _ASM_RISCV_VMALLOC_H diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h index b65bf6306f69c6..f3d56299bc22c5 100644 --- a/arch/riscv/include/asm/xip_fixup.h +++ b/arch/riscv/include/asm/xip_fixup.h @@ -9,18 +9,36 @@ #ifdef CONFIG_XIP_KERNEL .macro XIP_FIXUP_OFFSET reg - REG_L t0, _xip_fixup + /* Fix-up address in Flash into address in RAM early during boot before + * MMU is up. Because generated code "thinks" data is in Flash, but it + * is actually in RAM (actually data is also in Flash, but Flash is + * read-only, thus we need to use the data residing in RAM). + * + * The start of data in Flash is _sdata and the start of data in RAM is + * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this: + * reg += CONFIG_PHYS_RAM_BASE - _start + */ + li t0, CONFIG_PHYS_RAM_BASE add \reg, \reg, t0 + la t0, _sdata + sub \reg, \reg, t0 .endm .macro XIP_FIXUP_FLASH_OFFSET reg + /* In linker script, at the transition from read-only section to + * writable section, the VMA is increased while LMA remains the same. + * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is + * changed) + * + * Consequently, early during boot before MMU is up, the generated code + * reads the "writable" section at wrong addresses, because VMA is used + * by compiler to generate code, but the data is located in Flash using + * LMA. + */ + la t0, _sdata + sub \reg, \reg, t0 la t0, __data_loc - REG_L t1, _xip_phys_offset - sub \reg, \reg, t1 add \reg, \reg, t0 .endm - -_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET -_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET #else .macro XIP_FIXUP_OFFSET reg .endm diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index ba957aaca5cbb8..6e0d333f57e556 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -311,29 +311,26 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) #ifdef CONFIG_PCI /* - * These interfaces are defined just to enable building ACPI core. - * TODO: Update it with actual implementation when external interrupt - * controller support is added in RISC-V ACPI. + * raw_pci_read/write - Platform-specific PCI config space access. */ -int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, - int reg, int len, u32 *val) +int raw_pci_read(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *val) { - return PCIBIOS_DEVICE_NOT_FOUND; -} + struct pci_bus *b = pci_find_bus(domain, bus); -int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, - int reg, int len, u32 val) -{ - return PCIBIOS_DEVICE_NOT_FOUND; + if (!b) + return PCIBIOS_DEVICE_NOT_FOUND; + return b->ops->read(b, devfn, reg, len, val); } -int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +int raw_pci_write(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 val) { - return -1; -} + struct pci_bus *b = pci_find_bus(domain, bus); -struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) -{ - return NULL; + if (!b) + return PCIBIOS_DEVICE_NOT_FOUND; + return b->ops->write(b, devfn, reg, len, val); } + #endif /* CONFIG_PCI */ diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c index ff95aeebee3ebc..130769e3a99c3b 100644 --- a/arch/riscv/kernel/acpi_numa.c +++ b/arch/riscv/kernel/acpi_numa.c @@ -30,7 +30,7 @@ static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; -int __init acpi_numa_get_nid(unsigned int cpu) +static int __init acpi_numa_get_nid(unsigned int cpu) { return acpi_early_node_map[cpu]; } diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index b09ca5f944f77b..e94180ba432f59 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -36,6 +36,8 @@ void asm_offsets(void) OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); + + OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); @@ -43,6 +45,11 @@ void asm_offsets(void) #ifdef CONFIG_SHADOW_CALL_STACK OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp); #endif +#ifdef CONFIG_64BIT + OFFSET(TASK_TI_A0, task_struct, thread_info.a0); + OFFSET(TASK_TI_A1, task_struct, thread_info.a1); + OFFSET(TASK_TI_A2, task_struct, thread_info.a2); +#endif OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu); OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index d6c108c50cba92..b320b1d9aa01ef 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, this_leaf->type = type; } +int init_cache_level(unsigned int cpu) +{ + return init_of_cache_level(cpu); +} + int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index b427188b28fc16..3a8eeaa9310c32 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), + __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), }; const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 11c0d2e0becfe9..3c37661801f95d 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | ENCODE_CJTYPE_IMM(val - addr); break; + case R_RISCV_ADD16: + *(u16 *)loc += val; + break; + case R_RISCV_SUB16: + *(u16 *)loc -= val; + break; case R_RISCV_ADD32: *(u32 *)loc += val; break; diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index ac2e908d4418d0..c200d329d4bdbe 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -19,6 +19,79 @@ .section .irqentry.text, "ax" +.macro new_vmalloc_check + REG_S a0, TASK_TI_A0(tp) + csrr a0, CSR_CAUSE + /* Exclude IRQs */ + blt a0, zero, _new_vmalloc_restore_context_a0 + + REG_S a1, TASK_TI_A1(tp) + /* Only check new_vmalloc if we are in page/protection fault */ + li a1, EXC_LOAD_PAGE_FAULT + beq a0, a1, _new_vmalloc_kernel_address + li a1, EXC_STORE_PAGE_FAULT + beq a0, a1, _new_vmalloc_kernel_address + li a1, EXC_INST_PAGE_FAULT + bne a0, a1, _new_vmalloc_restore_context_a1 + +_new_vmalloc_kernel_address: + /* Is it a kernel address? */ + csrr a0, CSR_TVAL + bge a0, zero, _new_vmalloc_restore_context_a1 + + /* Check if a new vmalloc mapping appeared that could explain the trap */ + REG_S a2, TASK_TI_A2(tp) + /* + * Computes: + * a0 = &new_vmalloc[BIT_WORD(cpu)] + * a1 = BIT_MASK(cpu) + */ + REG_L a2, TASK_TI_CPU(tp) + /* + * Compute the new_vmalloc element position: + * (cpu / 64) * 8 = (cpu >> 6) << 3 + */ + srli a1, a2, 6 + slli a1, a1, 3 + la a0, new_vmalloc + add a0, a0, a1 + /* + * Compute the bit position in the new_vmalloc element: + * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6 + * = cpu - ((cpu >> 6) << 3) << 3 + */ + slli a1, a1, 3 + sub a1, a2, a1 + /* Compute the "get mask": 1 << bit_pos */ + li a2, 1 + sll a1, a2, a1 + + /* Check the value of new_vmalloc for this cpu */ + REG_L a2, 0(a0) + and a2, a2, a1 + beq a2, zero, _new_vmalloc_restore_context + + /* Atomically reset the current cpu bit in new_vmalloc */ + amoxor.d a0, a1, (a0) + + /* Only emit a sfence.vma if the uarch caches invalid entries */ + ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1) + + REG_L a0, TASK_TI_A0(tp) + REG_L a1, TASK_TI_A1(tp) + REG_L a2, TASK_TI_A2(tp) + csrw CSR_SCRATCH, x0 + sret + +_new_vmalloc_restore_context: + REG_L a2, TASK_TI_A2(tp) +_new_vmalloc_restore_context_a1: + REG_L a1, TASK_TI_A1(tp) +_new_vmalloc_restore_context_a0: + REG_L a0, TASK_TI_A0(tp) +.endm + + SYM_CODE_START(handle_exception) /* * If coming from userspace, preserve the user thread pointer and load @@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception) .Lrestore_kernel_tpsp: csrr tp, CSR_SCRATCH + +#ifdef CONFIG_64BIT + /* + * The RISC-V kernel does not eagerly emit a sfence.vma after each + * new vmalloc mapping, which may result in exceptions: + * - if the uarch caches invalid entries, the new mapping would not be + * observed by the page table walker and an invalidation is needed. + * - if the uarch does not cache invalid entries, a reordered access + * could "miss" the new mapping and traps: in that case, we only need + * to retry the access, no sfence.vma is required. + */ + new_vmalloc_check +#endif + REG_S sp, TASK_TI_KERNEL_SP(tp) #ifdef CONFIG_VMAP_STACK @@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork) jalr s0 1: move a0, sp /* pt_regs */ - la ra, ret_from_exception - tail syscall_exit_to_user_mode + call syscall_exit_to_user_mode + j ret_from_exception SYM_CODE_END(ret_from_fork) #ifdef CONFIG_IRQ_STACKS diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 906f9a3a5d65de..1cd461f3d8726d 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, int res; unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); struct hlist_head *relocation_hashtable; - struct list_head used_buckets_list; unsigned int hashtable_bits; + LIST_HEAD(used_buckets_list); hashtable_bits = initialize_relocation_hashtable(num_relocations, &relocation_hashtable); @@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, if (!relocation_hashtable) return -ENOMEM; - INIT_LIST_HEAD(&used_buckets_list); - pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index 3348a61de7d998..c7468af77c663a 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -6,37 +6,9 @@ #include -/* - * Get the return address for a single stackframe and return a pointer to the - * next frame tail. - */ -static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, - unsigned long fp, unsigned long reg_ra) +static bool fill_callchain(void *entry, unsigned long pc) { - struct stackframe buftail; - unsigned long ra = 0; - unsigned long __user *user_frame_tail = - (unsigned long __user *)(fp - sizeof(struct stackframe)); - - /* Check accessibility of one struct frame_tail beyond */ - if (!access_ok(user_frame_tail, sizeof(buftail))) - return 0; - if (__copy_from_user_inatomic(&buftail, user_frame_tail, - sizeof(buftail))) - return 0; - - if (reg_ra != 0) - ra = reg_ra; - else - ra = buftail.ra; - - fp = buftail.fp; - if (ra != 0) - perf_callchain_store(entry, ra); - else - return 0; - - return fp; + return perf_callchain_store(entry, pc) == 0; } /* @@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - unsigned long fp = 0; - - fp = regs->s0; - perf_callchain_store(entry, regs->epc); - - fp = user_backtrace(entry, fp, regs->ra); - while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) - fp = user_backtrace(entry, fp, 0); -} - -static bool fill_callchain(void *entry, unsigned long pc) -{ - return perf_callchain_store(entry, pc) == 0; + arch_stack_walk_user(fill_callchain, entry, regs); } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile index 50bc5ef7dd2fe5..d5bf1bc7de62e2 100644 --- a/arch/riscv/kernel/pi/Makefile +++ b/arch/riscv/kernel/pi/Makefile @@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \ -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \ $(call cc-option,-mbranch-protection=none) \ -I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \ + -include $(srctree)/include/linux/hidden.h \ -D__DISABLE_EXPORTS -ffreestanding \ -fno-asynchronous-unwind-tables -fno-unwind-tables \ $(call cc-option,-fno-addrsig) @@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany CFLAGS_cmdline_early.o += -D__NO_FORTIFY CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY +CFLAGS_fdt_early.o += -D__NO_FORTIFY $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ --remove-section=.note.gnu.property \ @@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE $(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE $(call if_changed_rule,cc_o_c) -obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o +obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) diff --git a/arch/riscv/kernel/pi/archrandom_early.c b/arch/riscv/kernel/pi/archrandom_early.c new file mode 100644 index 00000000000000..3f05d3cf3b7b22 --- /dev/null +++ b/arch/riscv/kernel/pi/archrandom_early.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "pi.h" + +/* + * To avoid rewriting code include asm/archrandom.h and create macros + * for the functions that won't be included. + */ +#undef riscv_has_extension_unlikely +#define riscv_has_extension_likely(...) false +#undef pr_err_once +#define pr_err_once(...) + +#include + +u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa) +{ + unsigned long seed = 0; + + if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr")) + return 0; + + if (!csr_seed_long(&seed)) + return 0; + + return seed; +} diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c index f6d4dedffb8422..fbcdc9e4e14322 100644 --- a/arch/riscv/kernel/pi/cmdline_early.c +++ b/arch/riscv/kernel/pi/cmdline_early.c @@ -6,15 +6,9 @@ #include #include -static char early_cmdline[COMMAND_LINE_SIZE]; +#include "pi.h" -/* - * Declare the functions that are exported (but prefixed) here so that LLVM - * does not complain it lacks the 'static' keyword (which, if added, makes - * LLVM complain because the function is actually unused in this file). - */ -u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); -bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); +static char early_cmdline[COMMAND_LINE_SIZE]; static char *get_early_cmdline(uintptr_t dtb_pa) { diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c index 899610e042ab4d..9bdee2fafe47e4 100644 --- a/arch/riscv/kernel/pi/fdt_early.c +++ b/arch/riscv/kernel/pi/fdt_early.c @@ -2,13 +2,9 @@ #include #include #include +#include -/* - * Declare the functions that are exported (but prefixed) here so that LLVM - * does not complain it lacks the 'static' keyword (which, if added, makes - * LLVM complain because the function is actually unused in this file). - */ -u64 get_kaslr_seed(uintptr_t dtb_pa); +#include "pi.h" u64 get_kaslr_seed(uintptr_t dtb_pa) { @@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa) *prop = 0; return ret; } + +/** + * fdt_device_is_available - check if a device is available for use + * + * @fdt: pointer to the device tree blob + * @node: offset of the node whose property to find + * + * Returns true if the status property is absent or set to "okay" or "ok", + * false otherwise + */ +static bool fdt_device_is_available(const void *fdt, int node) +{ + const char *status; + int statlen; + + status = fdt_getprop(fdt, node, "status", &statlen); + if (!status) + return true; + + if (statlen > 0) { + if (!strcmp(status, "okay") || !strcmp(status, "ok")) + return true; + } + + return false; +} + +/* Copy of fdt_nodename_eq_ */ +static int fdt_node_name_eq(const void *fdt, int offset, + const char *s) +{ + int olen; + int len = strlen(s); + const char *p = fdt_get_name(fdt, offset, &olen); + + if (!p || olen < len) + /* short match */ + return 0; + + if (memcmp(p, s, len) != 0) + return 0; + + if (p[len] == '\0') + return 1; + else if (!memchr(s, '@', len) && (p[len] == '@')) + return 1; + else + return 0; +} + +/** + * isa_string_contains - check if isa string contains an extension + * + * @isa_str: isa string to search + * @ext_name: the extension to search for + * + * Returns true if the extension is in the given isa string, + * false otherwise + */ +static bool isa_string_contains(const char *isa_str, const char *ext_name) +{ + size_t i, single_end, len = strlen(ext_name); + char ext_end; + + /* Error must contain rv32/64 */ + if (strlen(isa_str) < 4) + return false; + + if (len == 1) { + single_end = strcspn(isa_str, "sSxXzZ"); + /* Search for single chars between rv32/64 and multi-letter extensions */ + for (i = 4; i < single_end; i++) { + if (tolower(isa_str[i]) == ext_name[0]) + return true; + } + return false; + } + + /* Skip to start of multi-letter extensions */ + isa_str = strpbrk(isa_str, "sSxXzZ"); + while (isa_str) { + if (strncasecmp(isa_str, ext_name, len) == 0) { + ext_end = isa_str[len]; + /* Check if matches the whole extension. */ + if (ext_end == '\0' || ext_end == '_') + return true; + } + /* Multi-letter extensions must be split from other multi-letter + * extensions with an "_", the end of a multi-letter extension will + * either be the null character or the "_" at the start of the next + * multi-letter extension. + */ + isa_str = strchr(isa_str, '_'); + if (isa_str) + isa_str++; + } + + return false; +} + +/** + * early_cpu_isa_ext_available - check if cpu node has an extension + * + * @fdt: pointer to the device tree blob + * @node: offset of the cpu node + * @ext_name: the extension to search for + * + * Returns true if the cpu node has the extension, + * false otherwise + */ +static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name) +{ + const void *prop; + int len; + + prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len); + if (prop && fdt_stringlist_contains(prop, len, ext_name)) + return true; + + prop = fdt_getprop(fdt, node, "riscv,isa", &len); + if (prop && isa_string_contains(prop, ext_name)) + return true; + + return false; +} + +/** + * fdt_early_match_extension_isa - check if all cpu nodes have an extension + * + * @fdt: pointer to the device tree blob + * @ext_name: the extension to search for + * + * Returns true if the all available the cpu nodes have the extension, + * false otherwise + */ +bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name) +{ + int node, parent; + bool ret = false; + + parent = fdt_path_offset(fdt, "/cpus"); + if (parent < 0) + return false; + + fdt_for_each_subnode(node, fdt, parent) { + if (!fdt_node_name_eq(fdt, node, "cpu")) + continue; + + if (!fdt_device_is_available(fdt, node)) + continue; + + if (!early_cpu_isa_ext_available(fdt, node, ext_name)) + return false; + + ret = true; + } + + return ret; +} diff --git a/arch/riscv/kernel/pi/pi.h b/arch/riscv/kernel/pi/pi.h new file mode 100644 index 00000000000000..21141d84fea603 --- /dev/null +++ b/arch/riscv/kernel/pi/pi.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _RISCV_PI_H_ +#define _RISCV_PI_H_ + +#include + +/* + * The following functions are exported (but prefixed). Declare them here so + * that LLVM does not complain it lacks the 'static' keyword (which, if + * added, makes LLVM complain because the function is unused). + */ + +u64 get_kaslr_seed(uintptr_t dtb_pa); +u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa); +bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); +u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); + +bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name); + +#endif /* _RISCV_PI_H_ */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e4bc61c4e58af9..e3142d8a6e284d 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -26,6 +27,7 @@ #include #include #include +#include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include @@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs) dump_backtrace(regs, NULL, KERN_DEFAULT); } +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_u32_below(PAGE_SIZE); + return sp & ~0xf; +} + #ifdef CONFIG_COMPAT static bool compat_mode_supported __read_mostly; diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index a72879b4249a5d..5ab1c7e1a6ed5d 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -12,9 +12,6 @@ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 8e6eb64459afb0..c180a647a30e44 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include #include @@ -33,6 +35,8 @@ enum ipi_message_type { IPI_CPU_CRASH_STOP, IPI_IRQ_WORK, IPI_TIMER, + IPI_CPU_BACKTRACE, + IPI_KGDB_ROUNDUP, IPI_MAX }; @@ -113,6 +117,7 @@ void arch_irq_work_raise(void) static irqreturn_t handle_IPI(int irq, void *data) { + unsigned int cpu = smp_processor_id(); int ipi = irq - ipi_virq_base; switch (ipi) { @@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data) ipi_stop(); break; case IPI_CPU_CRASH_STOP: - ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs()); + ipi_cpu_crash_stop(cpu, get_irq_regs()); break; case IPI_IRQ_WORK: irq_work_run(); @@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data) tick_receive_broadcast(); break; #endif + case IPI_CPU_BACKTRACE: + nmi_cpu_backtrace(get_irq_regs()); + break; + case IPI_KGDB_ROUNDUP: + kgdb_nmicallback(cpu, get_irq_regs()); + break; default: - pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); + pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi); break; } @@ -203,6 +214,8 @@ static const char * const ipi_names[] = { [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) @@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu) send_ipi_single(cpu, IPI_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); + +static void riscv_backtrace_ipi(cpumask_t *mask) +{ + send_ipi_mask(mask, IPI_CPU_BACKTRACE); +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi); +} + +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(void) +{ + int this_cpu = raw_smp_processor_id(); + int cpu; + + for_each_online_cpu(cpu) { + /* No need to roundup ourselves */ + if (cpu == this_cpu) + continue; + + send_ipi_single(cpu, IPI_KGDB_ROUNDUP); + } +} +#endif diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index c6d5de22463f9d..153a2db4c5fa14 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void { walk_stackframe(task, regs, consume_entry, cookie); } + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry, + void *cookie, unsigned long fp, + unsigned long reg_ra) +{ + struct stackframe buftail; + unsigned long ra = 0; + unsigned long __user *user_frame_tail = + (unsigned long __user *)(fp - sizeof(struct stackframe)); + + /* Check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + if (__copy_from_user_inatomic(&buftail, user_frame_tail, + sizeof(buftail))) + return 0; + + ra = reg_ra ? : buftail.ra; + + fp = buftail.fp; + if (!ra || !consume_entry(cookie, ra)) + return 0; + + return fp; +} + +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + unsigned long fp = 0; + + fp = regs->s0; + if (!consume_entry(cookie, regs->epc)) + return; + + fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra); + while (fp && !(fp & 0x7)) + fp = unwind_user_frame(consume_entry, cookie, fp, 0); +} diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index f7ef8ad9b550d0..960feb1526caa0 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) -LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \ +LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ --build-id=sha1 --hash-style=both --eh-frame-hdr # strip rule for the .so file diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c index ec688c88456ad3..51f302b6d5032e 100644 --- a/arch/riscv/kernel/vendor_extensions/andes.c +++ b/arch/riscv/kernel/vendor_extensions/andes.c @@ -8,7 +8,7 @@ #include /* All Andes vendor extensions supported in Linux */ -const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { +static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), }; diff --git a/arch/riscv/kernel/vmcore_info.c b/arch/riscv/kernel/vmcore_info.c index 6d7a22522d6309..d5e448aa90e746 100644 --- a/arch/riscv/kernel/vmcore_info.c +++ b/arch/riscv/kernel/vmcore_info.c @@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void) #endif #endif vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); +#ifdef CONFIG_XIP_KERNEL + /* TODO: Communicate with crash-utility developers on the information to + * export. The XIP case is more complicated, because the virtual-physical + * address offset depends on whether the address is in ROM or in RAM. + */ +#else vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", kernel_map.va_kernel_pa_offset); +#endif } diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S index 8c3daa1b05313a..a7611789bad5bf 100644 --- a/arch/riscv/kernel/vmlinux-xip.lds.S +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -14,6 +14,7 @@ #include #include #include +#include OUTPUT_ARCH(riscv) ENTRY(_start) @@ -65,10 +66,10 @@ SECTIONS * From this point, stuff is considered writable and will be copied to RAM */ __data_loc = ALIGN(PAGE_SIZE); /* location in file */ - . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ + . = ALIGN(SECTION_ALIGN); /* location in memory */ #undef LOAD_OFFSET -#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) +#define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc) _sdata = .; /* Start of data section */ _data = .; diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index bab2ec34cd8766..f3427f6de60807 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -20,7 +20,7 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } -int kvm_arch_hardware_enable(void) +int kvm_arch_enable_virtualization_cpu(void) { csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT); csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT); @@ -35,7 +35,7 @@ int kvm_arch_hardware_enable(void) return 0; } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { kvm_riscv_aia_disable(); diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index bcf41d6e0df0e3..2707a51b082ca7 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -391,19 +391,9 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu) { struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); - int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); - if (kvpmu->sdata) { - if (kvpmu->snapshot_addr != INVALID_GPA) { - memset(kvpmu->sdata, 0, snapshot_area_size); - kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, - kvpmu->sdata, snapshot_area_size); - } else { - pr_warn("snapshot address invalid\n"); - } - kfree(kvpmu->sdata); - kvpmu->sdata = NULL; - } + kfree(kvpmu->sdata); + kvpmu->sdata = NULL; kvpmu->snapshot_addr = INVALID_GPA; } diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 62f409d4176e41..7de128be8db9bc 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -127,8 +127,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[5] = cp->a5; - run->riscv_sbi.ret[0] = cp->a0; - run->riscv_sbi.ret[1] = cp->a1; + run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED; + run->riscv_sbi.ret[1] = 0; } void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 2b369f51b0a5ed..8eec6b69a875f8 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -3,9 +3,11 @@ lib-y += delay.o lib-y += memcpy.o lib-y += memset.o lib-y += memmove.o +ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),) lib-y += strcmp.o lib-y += strlen.o lib-y += strncmp.o +endif lib-y += csum.o ifeq ($(CONFIG_MMU), y) lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S index 35f358e70bdb6b..da23b8347e2d15 100644 --- a/arch/riscv/lib/memset.S +++ b/arch/riscv/lib/memset.S @@ -111,3 +111,5 @@ SYM_FUNC_START(__memset) ret SYM_FUNC_END(__memset) SYM_FUNC_ALIAS_WEAK(memset, __memset) +SYM_FUNC_ALIAS(__pi_memset, __memset) +SYM_FUNC_ALIAS(__pi___memset, __memset) diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S index 687b2bea5c438c..57a5c00662315d 100644 --- a/arch/riscv/lib/strcmp.S +++ b/arch/riscv/lib/strcmp.S @@ -120,3 +120,5 @@ strcmp_zbb: .option pop #endif SYM_FUNC_END(strcmp) +SYM_FUNC_ALIAS(__pi_strcmp, strcmp) +EXPORT_SYMBOL(strcmp) diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S index 8ae3064e45ff00..962983b73251e3 100644 --- a/arch/riscv/lib/strlen.S +++ b/arch/riscv/lib/strlen.S @@ -131,3 +131,4 @@ strlen_zbb: #endif SYM_FUNC_END(strlen) SYM_FUNC_ALIAS(__pi_strlen, strlen) +EXPORT_SYMBOL(strlen) diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S index aba5b3148621d8..7b2d0ff9ed6c7e 100644 --- a/arch/riscv/lib/strncmp.S +++ b/arch/riscv/lib/strncmp.S @@ -136,3 +136,5 @@ strncmp_zbb: .option pop #endif SYM_FUNC_END(strncmp) +SYM_FUNC_ALIAS(__pi_strncmp, strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 1785782c2e5542..0e8c20adcd98df 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -37,6 +37,8 @@ #include "../kernel/head.h" +u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; + struct kernel_mapping kernel_map __ro_after_init; EXPORT_SYMBOL(kernel_map); #ifdef CONFIG_XIP_KERNEL @@ -917,7 +919,7 @@ static void __init relocate_kernel(void) static void __init create_kernel_page_table(pgd_t *pgdir, __always_unused bool early) { - uintptr_t va, end_va; + uintptr_t va, start_va, end_va; /* Map the flash resident part */ end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; @@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir, PMD_SIZE, PAGE_KERNEL_EXEC); /* Map the data in RAM */ + start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; end_va = kernel_map.virt_addr + kernel_map.size; - for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) + for (va = start_va; va < end_va; va += PMD_SIZE) create_pgd_mapping(pgdir, va, - kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), + kernel_map.phys_addr + (va - start_va), PMD_SIZE, PAGE_KERNEL); } #else @@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void) #ifdef CONFIG_RANDOMIZE_BASE extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); +extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa); static int __init print_nokaslr(char *p) { @@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #ifdef CONFIG_RANDOMIZE_BASE if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { - u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); + u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa); u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); u32 nr_pos; + if (kaslr_seed == 0) + kaslr_seed = __pi_get_kaslr_seed(dtb_pa); /* * Compute the number of positions available: we are limited * by the early page table that only has one PUD and we must @@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); - kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; + kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; + kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr + + (uintptr_t)&_sdata - (uintptr_t)&_start; #else kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); kernel_map.phys_addr = (uintptr_t)(&_start); kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; + kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; #endif #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) @@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) */ kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? 0UL : PAGE_OFFSET - kernel_map.phys_addr; - kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; - /* - * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit - * kernel, whereas for 64-bit kernel, the end of the virtual address - * space is occupied by the modules/BPF/kernel mappings which reduces - * the available size of the linear mapping. - */ - memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); + memory_limit = KERN_VIRT_SIZE; /* Sanity check alignment and size */ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index 533ec9055fa0da..4ae67324f99233 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { + asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) + : : : : svvptc); + if (!pte_same(ptep_get(ptep), entry)) __set_pte_at(vma->vm_mm, ptep, entry); /* @@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma, * the case that the PTE changed and the spurious fault case. */ return true; + +svvptc: + if (!pte_same(ptep_get(ptep), entry)) { + __set_pte_at(vma->vm_mm, ptep, entry); + /* Here only not svadu is impacted */ + flush_tlb_page(vma, address); + return true; + } + + return false; } int ptep_test_and_clear_young(struct vm_area_struct *vma, diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile index f11945ee249031..fb9c917c9b4573 100644 --- a/arch/riscv/purgatory/Makefile +++ b/arch/riscv/purgatory/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o +ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),) purgatory-y += strcmp.o strlen.o strncmp.o +endif targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c60e699e99f5b3..d339fe4fdedf88 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -70,6 +70,7 @@ config S390 select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_OPS if PCI select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORCE_DMA_UNENCRYPTED select ARCH_HAS_FORTIFY_SOURCE @@ -137,7 +138,6 @@ config S390 select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 select DCACHE_WORD_ACCESS if !KMSAN - select DMA_OPS if PCI select DYNAMIC_FTRACE if FUNCTION_TRACER select FUNCTION_ALIGNMENT_8B if CC_IS_GCC select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC @@ -243,6 +243,7 @@ config S390 select TRACE_IRQFLAGS_SUPPORT select TTY select USER_STACKTRACE_SUPPORT + select VDSO_GETRANDOM select VIRT_CPU_ACCOUNTING select ZONE_DMA # Note: keep the above list sorted alphabetically @@ -513,6 +514,26 @@ config SCHED_TOPOLOGY making when dealing with machines that have multi-threading, multiple cores or multiple books. +config SCHED_TOPOLOGY_VERTICAL + def_bool y + bool "Use vertical CPU polarization by default" + depends on SCHED_TOPOLOGY + help + Use vertical CPU polarization by default if available. + The default CPU polarization is horizontal. + +config HIPERDISPATCH_ON + def_bool y + bool "Use hiperdispatch on vertical polarization by default" + depends on SCHED_TOPOLOGY + depends on PROC_SYSCTL + help + Hiperdispatch aims to improve the CPU scheduler's decision + making when using vertical polarization by adjusting CPU + capacities dynamically. Set this option to use hiperdispatch + on vertical polarization by default. This can be overwritten + by sysctl's s390.hiperdispatch attribute later on. + source "kernel/Kconfig.hz" config CERT_STORE @@ -557,17 +578,13 @@ config EXPOLINE If unsure, say N. config EXPOLINE_EXTERN - def_bool y if EXPOLINE - depends on EXPOLINE - depends on CC_IS_GCC && GCC_VERSION >= 110200 - depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) - prompt "Generate expolines as extern functions." + def_bool EXPOLINE && CC_IS_GCC && GCC_VERSION >= 110200 && \ + $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) help - This option is required for some tooling like kpatch. The kernel is - compiled with -mindirect-branch=thunk-extern and requires a newer - compiler. - - If unsure, say N. + Generate expolines as external functions if the compiler supports it. + This option is required for some tooling like kpatch, if expolines + are enabled. The kernel is compiled with + -mindirect-branch=thunk-extern, which requires a newer compiler. choice prompt "Expoline default" diff --git a/arch/s390/Makefile.postlink b/arch/s390/Makefile.postlink new file mode 100644 index 00000000000000..df82f541076933 --- /dev/null +++ b/arch/s390/Makefile.postlink @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 +# =========================================================================== +# Post-link s390 pass +# =========================================================================== +# +# 1. Separate relocations from vmlinux into relocs.S. +# 2. Strip relocations from vmlinux. + +PHONY := __archpost +__archpost: + +-include include/config/auto.conf +include $(srctree)/scripts/Kbuild.include + +CMD_RELOCS=arch/s390/tools/relocs +OUT_RELOCS = arch/s390/boot +quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S + cmd_relocs = \ + mkdir -p $(OUT_RELOCS); \ + $(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S + +quiet_cmd_strip_relocs = RSTRIP $@ + cmd_strip_relocs = \ + $(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \ + --remove-section='.rela.*' --remove-section='.rela__*' $@ + +vmlinux: FORCE + $(call cmd,relocs) + $(call cmd,strip_relocs) + +clean: + @rm -f $(OUT_RELOCS)/relocs.S + +PHONY += FORCE clean + +FORCE: + +.PHONY: $(PHONY) diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 4f476884d34044..8bc1308ac892ab 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -11,35 +11,23 @@ KASAN_SANITIZE := n KCSAN_SANITIZE := n KMSAN_SANITIZE := n -KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) -KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) - # -# Use minimum architecture for als.c to be able to print an error +# Use minimum architecture level so it is possible to print an error # message if the kernel is started on a machine which is too old # -ifndef CONFIG_CC_IS_CLANG -CC_FLAGS_MARCH_MINIMUM := -march=z900 -else CC_FLAGS_MARCH_MINIMUM := -march=z10 -endif - -ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM)) -AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) -AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM) -AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH) -AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM) -CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH) -CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM) -CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH) -CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM) -endif + +KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR)) +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR)) +KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM) +KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM) CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o -obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o +obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o +obj-y += uv.o printk.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o @@ -109,11 +97,9 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -CMD_RELOCS=arch/s390/tools/relocs -quiet_cmd_relocs = RELOCS $@ - cmd_relocs = $(CMD_RELOCS) $< > $@ -$(obj)/relocs.S: vmlinux FORCE - $(call if_changed,relocs) +# relocs.S is created by the vmlinux postlink step. +$(obj)/relocs.S: vmlinux + @true suffix-$(CONFIG_KERNEL_GZIP) := .gz suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c index 47c48fbfb563d2..11e0c3d5dbc8c1 100644 --- a/arch/s390/boot/als.c +++ b/arch/s390/boot/als.c @@ -9,42 +9,8 @@ #include #include "boot.h" -/* - * The code within this file will be called very early. It may _not_ - * access anything within the bss section, since that is not cleared - * yet and may contain data (e.g. initrd) that must be saved by other - * code. - * For temporary objects the stack (16k) should be used. - */ - static unsigned long als[] = { FACILITIES_ALS }; -static void u16_to_hex(char *str, u16 val) -{ - int i, num; - - for (i = 1; i <= 4; i++) { - num = (val >> (16 - 4 * i)) & 0xf; - if (num >= 10) - num += 7; - *str++ = '0' + num; - } - *str = '\0'; -} - -static void print_machine_type(void) -{ - static char mach_str[80] = "Detected machine-type number: "; - char type_str[5]; - struct cpuid id; - - get_cpu_id(&id); - u16_to_hex(type_str, id.machine); - strcat(mach_str, type_str); - strcat(mach_str, "\n"); - sclp_early_printk(mach_str); -} - static void u16_to_decimal(char *str, u16 val) { int div = 1; @@ -80,8 +46,7 @@ void print_missing_facilities(void) * z/VM adds a four character prefix. */ if (strlen(als_str) > 70) { - strcat(als_str, "\n"); - sclp_early_printk(als_str); + boot_printk("%s\n", als_str); *als_str = '\0'; } u16_to_decimal(val_str, i * BITS_PER_LONG + j); @@ -89,16 +54,18 @@ void print_missing_facilities(void) first = 0; } } - strcat(als_str, "\n"); - sclp_early_printk(als_str); + boot_printk("%s\n", als_str); } static void facility_mismatch(void) { - sclp_early_printk("The Linux kernel requires more recent processor hardware\n"); - print_machine_type(); + struct cpuid id; + + get_cpu_id(&id); + boot_printk("The Linux kernel requires more recent processor hardware\n"); + boot_printk("Detected machine-type number: %4x\n", id.machine); print_missing_facilities(); - sclp_early_printk("See Principles of Operations for facility bits\n"); + boot_printk("See Principles of Operations for facility bits\n"); disabled_wait(); } diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index 83e2ce050b6cef..7521a9d75fa241 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -70,7 +70,7 @@ void print_pgm_check_info(void); unsigned long randomize_within_range(unsigned long size, unsigned long align, unsigned long min, unsigned long max); void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit); -void __printf(1, 2) decompressor_printk(const char *fmt, ...); +void __printf(1, 2) boot_printk(const char *fmt, ...); void print_stacktrace(unsigned long sp); void error(char *m); int get_random(unsigned long limit, unsigned long *value); diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 637c29c3f6e33a..0a47b16f641289 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -299,11 +299,11 @@ SYM_CODE_END(startup_normal) # the save area and does disabled wait with a faulty address. # SYM_CODE_START_LOCAL(startup_pgm_check_handler) - stmg %r8,%r15,__LC_SAVE_AREA_SYNC + stmg %r8,%r15,__LC_SAVE_AREA la %r8,4095 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) - mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC + mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW ni __LC_RETURN_PSW,0xfc # remove IO and EX bits diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 1773b72a6a7bc3..557462e62cd737 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -215,7 +215,7 @@ static void check_cleared_facilities(void) for (i = 0; i < ARRAY_SIZE(als); i++) { if ((stfle_fac_list[i] & als[i]) != als[i]) { - sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); + boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); print_missing_facilities(); break; } diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index bd3bf5ef472df2..f864d2bff7754e 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -32,7 +32,7 @@ struct prng_parm { static int check_prng(void) { if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) { - sclp_early_printk("KASLR disabled: CPU has no PRNG\n"); + boot_printk("KASLR disabled: CPU has no PRNG\n"); return 0; } if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c index 5352b3d356da04..5abe59fb3bc08e 100644 --- a/arch/s390/boot/pgm_check_info.c +++ b/arch/s390/boot/pgm_check_info.c @@ -11,131 +11,19 @@ #include #include "boot.h" -const char hex_asc[] = "0123456789abcdef"; - -static char *as_hex(char *dst, unsigned long val, int pad) -{ - char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); - - for (*p-- = 0; p >= dst; val >>= 4) - *p-- = hex_asc[val & 0x0f]; - return end; -} - -static char *symstart(char *p) -{ - while (*p) - p--; - return p + 1; -} - -static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len) -{ - /* symbol entries are in a form "10000 c4 startup\0" */ - char *a = _decompressor_syms_start; - char *b = _decompressor_syms_end; - unsigned long start; - unsigned long size; - char *pivot; - char *endp; - - while (a < b) { - pivot = symstart(a + (b - a) / 2); - start = simple_strtoull(pivot, &endp, 16); - size = simple_strtoull(endp + 1, &endp, 16); - if (ip < start) { - b = pivot; - continue; - } - if (ip > start + size) { - a = pivot + strlen(pivot) + 1; - continue; - } - *off = ip - start; - *len = size; - return endp + 1; - } - return NULL; -} - -static noinline char *strsym(void *ip) -{ - static char buf[64]; - unsigned short off; - unsigned short len; - char *p; - - p = findsym((unsigned long)ip, &off, &len); - if (p) { - strncpy(buf, p, sizeof(buf)); - /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ - p = buf + strnlen(buf, sizeof(buf) - 15); - strcpy(p, "+0x"); - p = as_hex(p + 3, off, 0); - strcpy(p, "/0x"); - as_hex(p + 3, len, 0); - } else { - as_hex(buf, (unsigned long)ip, 16); - } - return buf; -} - -void decompressor_printk(const char *fmt, ...) -{ - char buf[1024] = { 0 }; - char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ - unsigned long pad; - char *p = buf; - va_list args; - - va_start(args, fmt); - for (; p < end && *fmt; fmt++) { - if (*fmt != '%') { - *p++ = *fmt; - continue; - } - pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; - switch (*fmt) { - case 's': - p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); - break; - case 'p': - if (*++fmt != 'S') - goto out; - p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); - break; - case 'l': - if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) - goto out; - p = as_hex(p, va_arg(args, unsigned long), pad); - break; - case 'x': - if (end - p <= max(sizeof(int) * 2, pad)) - goto out; - p = as_hex(p, va_arg(args, unsigned int), pad); - break; - default: - goto out; - } - } -out: - va_end(args); - sclp_early_printk(buf); -} - void print_stacktrace(unsigned long sp) { struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start, (unsigned long)_stack_end }; bool first = true; - decompressor_printk("Call Trace:\n"); + boot_printk("Call Trace:\n"); while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) { struct stack_frame *sf = (struct stack_frame *)sp; - decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : - " sp:%016lx [<%016lx>] %pS\n", - sp, sf->gprs[8], (void *)sf->gprs[8]); + boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : + " sp:%016lx [<%016lx>] %pS\n", + sp, sf->gprs[8], (void *)sf->gprs[8]); if (sf->back_chain <= sp) break; sp = sf->back_chain; @@ -148,34 +36,30 @@ void print_pgm_check_info(void) unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area; struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); - decompressor_printk("Linux version %s\n", kernel_version); + boot_printk("Linux version %s\n", kernel_version); if (!is_prot_virt_guest() && early_command_line[0]) - decompressor_printk("Kernel command line: %s\n", early_command_line); - decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n", - get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); + boot_printk("Kernel command line: %s\n", early_command_line); + boot_printk("Kernel fault: interruption code %04x ilc:%x\n", + get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); if (kaslr_enabled()) { - decompressor_printk("Kernel random base: %lx\n", __kaslr_offset); - decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); + boot_printk("Kernel random base: %lx\n", __kaslr_offset); + boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); } - decompressor_printk("PSW : %016lx %016lx (%pS)\n", - get_lowcore()->psw_save_area.mask, - get_lowcore()->psw_save_area.addr, - (void *)get_lowcore()->psw_save_area.addr); - decompressor_printk( + boot_printk("PSW : %016lx %016lx (%pS)\n", + get_lowcore()->psw_save_area.mask, + get_lowcore()->psw_save_area.addr, + (void *)get_lowcore()->psw_save_area.addr); + boot_printk( " R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n", psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba); - decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n", - gpregs[0], gpregs[1], gpregs[2], gpregs[3]); - decompressor_printk(" %016lx %016lx %016lx %016lx\n", - gpregs[4], gpregs[5], gpregs[6], gpregs[7]); - decompressor_printk(" %016lx %016lx %016lx %016lx\n", - gpregs[8], gpregs[9], gpregs[10], gpregs[11]); - decompressor_printk(" %016lx %016lx %016lx %016lx\n", - gpregs[12], gpregs[13], gpregs[14], gpregs[15]); + boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]); + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]); print_stacktrace(get_lowcore()->gpregs_save_area[15]); - decompressor_printk("Last Breaking-Event-Address:\n"); - decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, - (void *)get_lowcore()->pgm_last_break); + boot_printk("Last Breaking-Event-Address:\n"); + boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, + (void *)get_lowcore()->pgm_last_break); } diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c index 4c9ad8258f7eaf..1d131a81cb8be9 100644 --- a/arch/s390/boot/physmem_info.c +++ b/arch/s390/boot/physmem_info.c @@ -190,27 +190,27 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min, enum reserved_range_type t; int i; - decompressor_printk("Linux version %s\n", kernel_version); + boot_printk("Linux version %s\n", kernel_version); if (!is_prot_virt_guest() && early_command_line[0]) - decompressor_printk("Kernel command line: %s\n", early_command_line); - decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", - size, align, min, max); - decompressor_printk("Reserved memory ranges:\n"); + boot_printk("Kernel command line: %s\n", early_command_line); + boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", + size, align, min, max); + boot_printk("Reserved memory ranges:\n"); for_each_physmem_reserved_range(t, range, &start, &end) { - decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); + boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); total_reserved_mem += end - start; } - decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n", - get_physmem_info_source(), physmem_info.info_source); + boot_printk("Usable online memory ranges (info source: %s [%x]):\n", + get_physmem_info_source(), physmem_info.info_source); for_each_physmem_usable_range(i, &start, &end) { - decompressor_printk("%016lx %016lx\n", start, end); + boot_printk("%016lx %016lx\n", start, end); total_mem += end - start; } - decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", - total_mem, total_reserved_mem, - total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); + boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", + total_mem, total_reserved_mem, + total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); print_stacktrace(current_frame_address()); - sclp_early_printk("\n\n -- System halted\n"); + boot_printk("\n\n -- System halted\n"); disabled_wait(); } diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c new file mode 100644 index 00000000000000..35f18f2b936e0f --- /dev/null +++ b/arch/s390/boot/printk.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "boot.h" + +const char hex_asc[] = "0123456789abcdef"; + +static char *as_hex(char *dst, unsigned long val, int pad) +{ + char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); + + for (*p-- = 0; p >= dst; val >>= 4) + *p-- = hex_asc[val & 0x0f]; + return end; +} + +static char *symstart(char *p) +{ + while (*p) + p--; + return p + 1; +} + +static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len) +{ + /* symbol entries are in a form "10000 c4 startup\0" */ + char *a = _decompressor_syms_start; + char *b = _decompressor_syms_end; + unsigned long start; + unsigned long size; + char *pivot; + char *endp; + + while (a < b) { + pivot = symstart(a + (b - a) / 2); + start = simple_strtoull(pivot, &endp, 16); + size = simple_strtoull(endp + 1, &endp, 16); + if (ip < start) { + b = pivot; + continue; + } + if (ip > start + size) { + a = pivot + strlen(pivot) + 1; + continue; + } + *off = ip - start; + *len = size; + return endp + 1; + } + return NULL; +} + +static noinline char *strsym(void *ip) +{ + static char buf[64]; + unsigned short off; + unsigned short len; + char *p; + + p = findsym((unsigned long)ip, &off, &len); + if (p) { + strncpy(buf, p, sizeof(buf)); + /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ + p = buf + strnlen(buf, sizeof(buf) - 15); + strcpy(p, "+0x"); + p = as_hex(p + 3, off, 0); + strcpy(p, "/0x"); + as_hex(p + 3, len, 0); + } else { + as_hex(buf, (unsigned long)ip, 16); + } + return buf; +} + +void boot_printk(const char *fmt, ...) +{ + char buf[1024] = { 0 }; + char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ + unsigned long pad; + char *p = buf; + va_list args; + + va_start(args, fmt); + for (; p < end && *fmt; fmt++) { + if (*fmt != '%') { + *p++ = *fmt; + continue; + } + pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; + switch (*fmt) { + case 's': + p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); + break; + case 'p': + if (*++fmt != 'S') + goto out; + p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); + break; + case 'l': + if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) + goto out; + p = as_hex(p, va_arg(args, unsigned long), pad); + break; + case 'x': + if (end - p <= max(sizeof(int) * 2, pad)) + goto out; + p = as_hex(p, va_arg(args, unsigned int), pad); + break; + default: + goto out; + } + } +out: + va_end(args); + sclp_early_printk(buf); +} diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index c73b5118ad429a..c8f149ad77e584 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -39,10 +39,7 @@ struct machine_info machine; void error(char *x) { - sclp_early_printk("\n\n"); - sclp_early_printk(x); - sclp_early_printk("\n\n -- System halted"); - + boot_printk("\n\n%s\n\n -- System halted", x); disabled_wait(); } @@ -296,7 +293,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); - decompressor_printk("The kernel base address is forced to %lx\n", kernel_start); + boot_printk("The kernel base address is forced to %lx\n", kernel_start); } else { kernel_start = __NO_KASLR_START_KERNEL; } diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index ea63a7342f5ff2..9b57add02cd5c4 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -59,6 +59,7 @@ CONFIG_CMM=m CONFIG_APPLDATA_BASE=y CONFIG_S390_HYPFS_FS=y CONFIG_KVM=m +CONFIG_KVM_S390_UCONTROL=y CONFIG_S390_UNWIND_SELFTEST=m CONFIG_S390_KPROBES_SANITY_TEST=m CONFIG_S390_MODULES_SANITY_TEST=m @@ -794,8 +795,12 @@ CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_CHACHA_S390=m +CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m +CONFIG_PKEY_CCA=m +CONFIG_PKEY_EP11=m +CONFIG_PKEY_PCKMO=m CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index d8b28ff8ff451e..df4addd1834ab2 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -781,8 +781,12 @@ CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_CHACHA_S390=m +CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m +CONFIG_PKEY_CCA=m +CONFIG_PKEY_EP11=m +CONFIG_PKEY_PCKMO=m CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig index 06ee706b0d7886..d3eb3a23369321 100644 --- a/arch/s390/crypto/Kconfig +++ b/arch/s390/crypto/Kconfig @@ -132,4 +132,14 @@ config CRYPTO_CHACHA_S390 It is available as of z13. +config CRYPTO_HMAC_S390 + tristate "Keyed-hash message authentication code: HMAC" + depends on S390 + select CRYPTO_HASH + help + s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and + SHA512. + + Architecture: s390 + endmenu diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 1b1cc478fa9438..a0cb96937c3de2 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o +obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o obj-y += arch_random.o crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c6fe5405de4a4c..8cc02d6e0d0fec 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -51,8 +51,13 @@ struct s390_aes_ctx { }; struct s390_xts_ctx { - u8 key[32]; - u8 pcc_key[32]; + union { + u8 keys[64]; + struct { + u8 key[32]; + u8 pcc_key[32]; + }; + }; int key_len; unsigned long fc; struct crypto_skcipher *fallback; @@ -526,6 +531,108 @@ static struct skcipher_alg xts_aes_alg = { .decrypt = xts_aes_decrypt, }; +static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + unsigned long fc; + int err; + + err = xts_fallback_setkey(tfm, in_key, key_len); + if (err) + return err; + + /* Pick the correct function code based on the key length */ + fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL : + (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0; + + /* Check if the function code is available */ + xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + if (!xts_ctx->fc) + return 0; + + /* Store double-key */ + memcpy(xts_ctx->keys, in_key, key_len); + xts_ctx->key_len = key_len; + return 0; +} + +static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + unsigned int offset, nbytes, n; + struct skcipher_walk walk; + int ret; + struct { + __u8 key[64]; + __u8 tweak[16]; + __u8 nap[16]; + } fxts_param = { + .nap = {0}, + }; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, xts_ctx->fallback); + return (modifier & CPACF_DECRYPT) ? + crypto_skcipher_decrypt(subreq) : + crypto_skcipher_encrypt(subreq); + } + + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + + offset = xts_ctx->key_len & 0x20; + memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len); + memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE); + fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + + while ((nbytes = walk.nbytes) != 0) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset, + walk.dst.virt.addr, walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); + } + memzero_explicit(&fxts_param, sizeof(fxts_param)); + return ret; +} + +static int fullxts_aes_encrypt(struct skcipher_request *req) +{ + return fullxts_aes_crypt(req, 0); +} + +static int fullxts_aes_decrypt(struct skcipher_request *req) +{ + return fullxts_aes_crypt(req, CPACF_DECRYPT); +} + +static struct skcipher_alg fullxts_aes_alg = { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "full-xts-aes-s390", + .base.cra_priority = 403, /* aes-xts-s390 + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_xts_ctx), + .base.cra_module = THIS_MODULE, + .init = xts_fallback_init, + .exit = xts_fallback_exit, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = fullxts_aes_set_key, + .encrypt = fullxts_aes_encrypt, + .decrypt = fullxts_aes_decrypt, +}; + static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -955,7 +1062,7 @@ static struct aead_alg gcm_aes_aead = { }; static struct crypto_alg *aes_s390_alg; -static struct skcipher_alg *aes_s390_skcipher_algs[4]; +static struct skcipher_alg *aes_s390_skcipher_algs[5]; static int aes_s390_skciphers_num; static struct aead_alg *aes_s390_aead_alg; @@ -1012,6 +1119,13 @@ static int __init aes_s390_init(void) goto out_err; } + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) || + cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) { + ret = aes_s390_register_skcipher(&fullxts_aes_alg); + if (ret) + goto out_err; + } + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { ret = aes_s390_register_skcipher(&xts_aes_alg); diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c new file mode 100644 index 00000000000000..bba9a818dfdcd9 --- /dev/null +++ b/arch/s390/crypto/hmac_s390.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2024 + * + * s390 specific HMAC support. + */ + +#define KMSG_COMPONENT "hmac_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include + +/* + * KMAC param block layout for sha2 function codes: + * The layout of the param block for the KMAC instruction depends on the + * blocksize of the used hashing sha2-algorithm function codes. The param block + * contains the hash chaining value (cv), the input message bit-length (imbl) + * and the hmac-secret (key). To prevent code duplication, the sizes of all + * these are calculated based on the blocksize. + * + * param-block: + * +-------+ + * | cv | + * +-------+ + * | imbl | + * +-------+ + * | key | + * +-------+ + * + * sizes: + * part | sh2-alg | calculation | size | type + * -----+---------+-------------+------+-------- + * cv | 224/256 | blocksize/2 | 32 | u64[8] + * | 384/512 | | 64 | u128[8] + * imbl | 224/256 | blocksize/8 | 8 | u64 + * | 384/512 | | 16 | u128 + * key | 224/256 | blocksize | 64 | u8[64] + * | 384/512 | | 128 | u8[128] + */ + +#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define MAX_IMBL_SIZE sizeof(u128) +#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE + +#define SHA2_CV_SIZE(bs) ((bs) >> 1) +#define SHA2_IMBL_SIZE(bs) ((bs) >> 3) + +#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs)) +#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs)) + +struct s390_hmac_ctx { + u8 key[MAX_BLOCK_SIZE]; +}; + +union s390_kmac_gr0 { + unsigned long reg; + struct { + unsigned long : 48; + unsigned long ikp : 1; + unsigned long iimp : 1; + unsigned long ccup : 1; + unsigned long : 6; + unsigned long fc : 7; + }; +}; + +struct s390_kmac_sha2_ctx { + u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE]; + union s390_kmac_gr0 gr0; + u8 buf[MAX_BLOCK_SIZE]; + unsigned int buflen; +}; + +/* + * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize + */ +static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen, + unsigned int blocksize) +{ + u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); + + switch (blocksize) { + case SHA256_BLOCK_SIZE: + *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE; + break; + case SHA512_BLOCK_SIZE: + *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE; + break; + default: + break; + } +} + +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + unsigned long func; + union { + struct sha256_paramblock { + u32 h[8]; + u64 mbl; + } sha256; + struct sha512_paramblock { + u64 h[8]; + u128 mbl; + } sha512; + } __packed param; + +#define PARAM_INIT(x, y, z) \ + param.sha##x.h[0] = SHA##y ## _H0; \ + param.sha##x.h[1] = SHA##y ## _H1; \ + param.sha##x.h[2] = SHA##y ## _H2; \ + param.sha##x.h[3] = SHA##y ## _H3; \ + param.sha##x.h[4] = SHA##y ## _H4; \ + param.sha##x.h[5] = SHA##y ## _H5; \ + param.sha##x.h[6] = SHA##y ## _H6; \ + param.sha##x.h[7] = SHA##y ## _H7; \ + param.sha##x.mbl = (z) + + switch (digestsize) { + case SHA224_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 224, inlen * 8); + break; + case SHA256_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 256, inlen * 8); + break; + case SHA384_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 384, inlen * 8); + break; + case SHA512_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 512, inlen * 8); + break; + default: + return -EINVAL; + } + +#undef PARAM_INIT + + cpacf_klmd(func, ¶m, in, inlen); + + memcpy(digest, ¶m, digestsize); + + return 0; +} + +static int s390_hmac_sha2_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm); + unsigned int ds = crypto_shash_digestsize(tfm); + unsigned int bs = crypto_shash_blocksize(tfm); + + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + + if (keylen > bs) + return hash_key(key, keylen, tfm_ctx->key, ds); + + memcpy(tfm_ctx->key, key, keylen); + return 0; +} + +static int s390_hmac_sha2_init(struct shash_desc *desc) +{ + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm); + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->key, bs); + + ctx->buflen = 0; + ctx->gr0.reg = 0; + switch (crypto_shash_digestsize(desc->tfm)) { + case SHA224_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224; + break; + case SHA256_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256; + break; + case SHA384_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384; + break; + case SHA512_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int s390_hmac_sha2_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int offset, n; + + /* check current buffer */ + offset = ctx->buflen % bs; + ctx->buflen += len; + if (offset + len < bs) + goto store; + + /* process one stored block */ + if (offset) { + n = bs - offset; + memcpy(ctx->buf + offset, data, n); + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); + data += n; + len -= n; + offset = 0; + } + /* process as many blocks as possible */ + if (len >= bs) { + n = (len / bs) * bs; + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); + data += n; + len -= n; + } +store: + /* store incomplete block in buffer */ + if (len) + memcpy(ctx->buf + offset, data, len); + + return 0; +} + +static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs); + memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm)); + + return 0; +} + +static int s390_hmac_sha2_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int ds = crypto_shash_digestsize(desc->tfm); + int rc; + + rc = s390_hmac_sha2_init(desc); + if (rc) + return rc; + + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, len, + crypto_shash_blocksize(desc->tfm)); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len); + memcpy(out, ctx->param, ds); + + return 0; +} + +#define S390_HMAC_SHA2_ALG(x) { \ + .fc = CPACF_KMAC_HMAC_SHA_##x, \ + .alg = { \ + .init = s390_hmac_sha2_init, \ + .update = s390_hmac_sha2_update, \ + .final = s390_hmac_sha2_final, \ + .digest = s390_hmac_sha2_digest, \ + .setkey = s390_hmac_sha2_setkey, \ + .descsize = sizeof(struct s390_kmac_sha2_ctx), \ + .halg = { \ + .digestsize = SHA##x##_DIGEST_SIZE, \ + .base = { \ + .cra_name = "hmac(sha" #x ")", \ + .cra_driver_name = "hmac_s390_sha" #x, \ + .cra_blocksize = SHA##x##_BLOCK_SIZE, \ + .cra_priority = 400, \ + .cra_ctxsize = sizeof(struct s390_hmac_ctx), \ + .cra_module = THIS_MODULE, \ + }, \ + }, \ + }, \ +} + +static struct s390_hmac_alg { + bool registered; + unsigned int fc; + struct shash_alg alg; +} s390_hmac_algs[] = { + S390_HMAC_SHA2_ALG(224), + S390_HMAC_SHA2_ALG(256), + S390_HMAC_SHA2_ALG(384), + S390_HMAC_SHA2_ALG(512), +}; + +static __always_inline void _s390_hmac_algs_unregister(void) +{ + struct s390_hmac_alg *hmac; + int i; + + for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) { + hmac = &s390_hmac_algs[i]; + if (!hmac->registered) + continue; + crypto_unregister_shash(&hmac->alg); + } +} + +static int __init hmac_s390_init(void) +{ + struct s390_hmac_alg *hmac; + int i, rc = -ENODEV; + + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) + return -ENODEV; + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) { + hmac = &s390_hmac_algs[i]; + if (!cpacf_query_func(CPACF_KMAC, hmac->fc)) + continue; + + rc = crypto_register_shash(&hmac->alg); + if (rc) { + pr_err("unable to register %s\n", + hmac->alg.halg.base.cra_name); + goto out; + } + hmac->registered = true; + pr_debug("registered %s\n", hmac->alg.halg.base.cra_name); + } + return rc; +out: + _s390_hmac_algs_unregister(); + return rc; +} + +static void __exit hmac_s390_exit(void) +{ + _s390_hmac_algs_unregister(); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init); +module_exit(hmac_s390_exit); + +MODULE_DESCRIPTION("S390 HMAC driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 99ea3f12c5d2ab..ef4491ccbbf84a 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -133,8 +133,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, if (msleep_interruptible(1000)) return -EINTR; } - ret = pkey_keyblob2pkey(kb->key, kb->keylen, - pk->protkey, &pk->len, &pk->type); + ret = pkey_key2protkey(kb->key, kb->keylen, + pk->protkey, &pk->len, &pk->type); } return ret; @@ -802,7 +802,10 @@ static int __init paes_s390_init(void) module_init(paes_s390_init); module_exit(paes_s390_fini); -MODULE_ALIAS_CRYPTO("paes"); +MODULE_ALIAS_CRYPTO("ecb(paes)"); +MODULE_ALIAS_CRYPTO("cbc(paes)"); +MODULE_ALIAS_CRYPTO("ctr(paes)"); +MODULE_ALIAS_CRYPTO("xts(paes)"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index 65ea12fc87a1fd..2bb22db54c31a2 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -25,6 +25,7 @@ struct s390_sha_ctx { u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)]; u8 buf[SHA_MAX_BLOCK_SIZE]; int func; /* KIMD function to use */ + int first_message_part; }; struct shash_desc; diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c index e1350e033a3258..a84ef692f572da 100644 --- a/arch/s390/crypto/sha3_256_s390.c +++ b/arch/s390/crypto/sha3_256_s390.c @@ -21,9 +21,11 @@ static int sha3_256_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_256; + sctx->first_message_part = 1; return 0; } @@ -36,6 +38,7 @@ static int sha3_256_export(struct shash_desc *desc, void *out) octx->rsiz = sctx->count; memcpy(octx->st, sctx->state, sizeof(octx->st)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + octx->partial = sctx->first_message_part; return 0; } @@ -48,6 +51,7 @@ static int sha3_256_import(struct shash_desc *desc, const void *in) sctx->count = ictx->rsiz; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_256; return 0; @@ -61,6 +65,7 @@ static int sha3_224_import(struct shash_desc *desc, const void *in) sctx->count = ictx->rsiz; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_224; return 0; @@ -88,9 +93,11 @@ static int sha3_224_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_224; + sctx->first_message_part = 1; return 0; } diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c index 06c142ed9bb1d3..07528fc98ff7df 100644 --- a/arch/s390/crypto/sha3_512_s390.c +++ b/arch/s390/crypto/sha3_512_s390.c @@ -20,9 +20,11 @@ static int sha3_512_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_512; + sctx->first_message_part = 1; return 0; } @@ -37,6 +39,7 @@ static int sha3_512_export(struct shash_desc *desc, void *out) memcpy(octx->st, sctx->state, sizeof(octx->st)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + octx->partial = sctx->first_message_part; return 0; } @@ -52,6 +55,7 @@ static int sha3_512_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_512; return 0; @@ -68,6 +72,7 @@ static int sha3_384_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_384; return 0; @@ -97,9 +102,11 @@ static int sha3_384_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_384; + sctx->first_message_part = 1; return 0; } diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index 686fe7aa192f45..961d7d522af157 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -18,6 +18,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int index, n; + int fc; /* how much is already in the buffer? */ index = ctx->count % bsize; @@ -26,10 +27,16 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) if ((index + len) < bsize) goto store; + fc = ctx->func; + if (ctx->first_message_part) + fc |= test_facility(86) ? CPACF_KIMD_NIP : 0; + /* process one stored block */ if (index) { memcpy(ctx->buf + index, data, bsize - index); - cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize); + cpacf_kimd(fc, ctx->state, ctx->buf, bsize); + ctx->first_message_part = 0; + fc &= ~CPACF_KIMD_NIP; data += bsize - index; len -= bsize - index; index = 0; @@ -38,7 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) /* process as many blocks as possible */ if (len >= bsize) { n = (len / bsize) * bsize; - cpacf_kimd(ctx->func, ctx->state, data, n); + cpacf_kimd(fc, ctx->state, data, n); + ctx->first_message_part = 0; data += n; len -= n; } @@ -75,7 +83,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; unsigned int n; - int mbl_offset; + int mbl_offset, fc; n = ctx->count % bsize; bits = ctx->count * 8; @@ -109,7 +117,11 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) return -EINVAL; } - cpacf_klmd(ctx->func, ctx->state, ctx->buf, n); + fc = ctx->func; + fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0; + if (ctx->first_message_part) + fc |= CPACF_KLMD_NIP; + cpacf_klmd(fc, ctx->state, ctx->buf, n); /* copy digest to out */ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index 65f4036fd541a2..83ebf54cca6b0f 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -78,7 +78,6 @@ struct hypfs_dbfs_file { struct dentry *dentry; }; -extern void hypfs_dbfs_exit(void); extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df); diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 0e855c5e91c5c2..5d9effb0867cde 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -76,7 +76,6 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations dbfs_ops = { .read = dbfs_read, - .llseek = no_llseek, .unlocked_ioctl = dbfs_ioctl, }; diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 26a009f9c49e92..c8af67d2099417 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -29,8 +29,6 @@ static enum diag204_format diag204_info_type; /* used diag 204 data format */ static void *diag204_buf; /* 4K aligned buffer for diag204 data */ static int diag204_buf_pages; /* number of pages for diag204 data */ -static struct dentry *dbfs_d204_file; - enum diag204_format diag204_get_info_type(void) { return diag204_info_type; @@ -214,16 +212,13 @@ __init int hypfs_diag_init(void) hypfs_dbfs_create_file(&dbfs_file_d204); rc = hypfs_diag_fs_init(); - if (rc) { + if (rc) pr_err("The hardware system does not provide all functions required by hypfs\n"); - debugfs_remove(dbfs_d204_file); - } return rc; } void hypfs_diag_exit(void) { - debugfs_remove(dbfs_d204_file); hypfs_diag_fs_exit(); diag204_free_buffer(); hypfs_dbfs_remove_file(&dbfs_file_d204); diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 858beaf4a8cb76..d428635abf08c2 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -443,7 +443,6 @@ static const struct file_operations hypfs_file_ops = { .release = hypfs_release, .read_iter = hypfs_read_iter, .write_iter = hypfs_write_iter, - .llseek = no_llseek, }; static struct file_system_type hypfs_type = { diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 4b904110d27cb0..297bf715796890 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -7,3 +7,4 @@ generated-y += unistd_nr.h generic-y += asm-offsets.h generic-y += kvm_types.h generic-y += mcs_spinlock.h +generic-y += mmzone.h diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index de980c938a3e14..73e781b56bfe89 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -39,11 +39,7 @@ #define ALT_TYPE_SHIFT 20 #define ALT_CTX_SHIFT 28 -#define ALT_FACILITY_EARLY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ - ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \ - (facility) << ALT_DATA_SHIFT) - -#define ALT_FACILITY(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \ +#define ALT_FACILITY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \ (facility) << ALT_DATA_SHIFT) diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h index 50e23ce854e5c3..aca08b0acbc1de 100644 --- a/arch/s390/include/asm/arch_hweight.h +++ b/arch/s390/include/asm/arch_hweight.h @@ -4,6 +4,7 @@ #define _ASM_S390_ARCH_HWEIGHT_H #include +#include static __always_inline unsigned long popcnt_z196(unsigned long w) { @@ -29,9 +30,9 @@ static __always_inline unsigned long popcnt_z15(unsigned long w) static __always_inline unsigned long __arch_hweight64(__u64 w) { - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) + if (__is_defined(MARCH_HAS_Z15_FEATURES)) return popcnt_z15(w); - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { w = popcnt_z196(w); w += w >> 32; w += w >> 16; @@ -43,9 +44,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w) static __always_inline unsigned int __arch_hweight32(unsigned int w) { - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) + if (__is_defined(MARCH_HAS_Z15_FEATURES)) return popcnt_z15(w); - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { w = popcnt_z196(w); w += w >> 16; w += w >> 8; @@ -56,9 +57,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w) static __always_inline unsigned int __arch_hweight16(unsigned int w) { - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) + if (__is_defined(MARCH_HAS_Z15_FEATURES)) return popcnt_z15((unsigned short)w); - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { w = popcnt_z196(w); w += w >> 8; return w & 0xff; @@ -68,7 +69,7 @@ static __always_inline unsigned int __arch_hweight16(unsigned int w) static __always_inline unsigned int __arch_hweight8(unsigned int w) { - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) + if (__is_defined(MARCH_HAS_Z196_FEATURES)) return popcnt_z196((unsigned char)w); return __sw_hweight8(w); } diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h index 742c7919cbcdc1..65380da9e75f3e 100644 --- a/arch/s390/include/asm/atomic_ops.h +++ b/arch/s390/include/asm/atomic_ops.h @@ -9,6 +9,7 @@ #define __ARCH_S390_ATOMIC_OPS__ #include +#include static __always_inline int __atomic_read(const atomic_t *v) { @@ -56,7 +57,7 @@ static __always_inline void __atomic64_set(atomic64_t *v, s64 i) } } -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ static __always_inline op_type op_name(op_type val, op_type *ptr) \ @@ -107,7 +108,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") #undef __ATOMIC_CONST_OPS #undef __ATOMIC_CONST_OP -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#else /* MARCH_HAS_Z196_FEATURES */ #define __ATOMIC_OP(op_name, op_string) \ static __always_inline int op_name(int val, int *ptr) \ @@ -166,7 +167,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr) #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#endif /* MARCH_HAS_Z196_FEATURES */ static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new) { diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 82de2a7c41605f..d82130d7f2b6d8 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -8,13 +8,15 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H +#include + /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES /* Fast-BCR without checkpoint synchronization */ #define __ASM_BCR_SERIALIZE "bcr 14,0\n" #else diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index dae8843b164f82..1d3a4b0c650fc1 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -54,6 +54,8 @@ #define CPACF_KM_XTS_256 0x34 #define CPACF_KM_PXTS_128 0x3a #define CPACF_KM_PXTS_256 0x3c +#define CPACF_KM_XTS_128_FULL 0x52 +#define CPACF_KM_XTS_256_FULL 0x54 /* * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) @@ -121,23 +123,31 @@ #define CPACF_KMAC_DEA 0x01 #define CPACF_KMAC_TDEA_128 0x02 #define CPACF_KMAC_TDEA_192 0x03 +#define CPACF_KMAC_HMAC_SHA_224 0x70 +#define CPACF_KMAC_HMAC_SHA_256 0x71 +#define CPACF_KMAC_HMAC_SHA_384 0x72 +#define CPACF_KMAC_HMAC_SHA_512 0x73 /* * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) * instruction */ -#define CPACF_PCKMO_QUERY 0x00 -#define CPACF_PCKMO_ENC_DES_KEY 0x01 -#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 -#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 -#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 -#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 -#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 -#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 -#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 -#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 -#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 -#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 +#define CPACF_PCKMO_QUERY 0x00 +#define CPACF_PCKMO_ENC_DES_KEY 0x01 +#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 +#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 +#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 +#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 +#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 +#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15 +#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16 +#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 +#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 +#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 +#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 +#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 +#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76 +#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a /* * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) @@ -165,7 +175,40 @@ #define CPACF_KMA_LAAD 0x200 /* Last-AAD */ #define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */ +/* + * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST) + * instructions + */ +#define CPACF_KIMD_NIP 0x8000 +#define CPACF_KLMD_DUFOP 0x4000 +#define CPACF_KLMD_NIP 0x8000 + +/* + * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION) + * instruction + */ +#define CPACF_KDSA_QUERY 0x00 +#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01 +#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02 +#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03 +#define CPACF_KDSA_ECDSA_SIGN_P256 0x09 +#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a +#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b +#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11 +#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12 +#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13 +#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20 +#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24 +#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28 +#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c +#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30 +#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34 + +#define CPACF_FC_QUERY 0x00 +#define CPACF_FC_QUERY_AUTH_INFO 0x7F + typedef struct { unsigned char bytes[16]; } cpacf_mask_t; +typedef struct { unsigned char bytes[256]; } cpacf_qai_t; /* * Prototype for a not existing function to produce a link @@ -175,80 +218,85 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t; void __cpacf_bad_opcode(void); static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2, - cpacf_mask_t *mask) + u8 *pb, u8 fc) { asm volatile( - " la %%r1,%[mask]\n" - " xgr %%r0,%%r0\n" + " la %%r1,%[pb]\n" + " lghi %%r0,%[fc]\n" " .insn rre,%[opc] << 16,%[r1],%[r2]\n" - : [mask] "=R" (*mask) - : [opc] "i" (opc), + : [pb] "=R" (*pb) + : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), [r2] "i" (r2) - : "cc", "r0", "r1"); + : "cc", "memory", "r0", "r1"); } -static __always_inline void __cpacf_query_rrf(u32 opc, - u8 r1, u8 r2, u8 r3, u8 m4, - cpacf_mask_t *mask) +static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3, + u8 m4, u8 *pb, u8 fc) { asm volatile( - " la %%r1,%[mask]\n" - " xgr %%r0,%%r0\n" + " la %%r1,%[pb]\n" + " lghi %%r0,%[fc]\n" " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n" - : [mask] "=R" (*mask) - : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2), - [r3] "i" (r3), [m4] "i" (m4) - : "cc", "r0", "r1"); + : [pb] "=R" (*pb) + : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), + [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4) + : "cc", "memory", "r0", "r1"); } -static __always_inline void __cpacf_query(unsigned int opcode, - cpacf_mask_t *mask) +static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb, + u8 fc) { switch (opcode) { case CPACF_KDSA: - __cpacf_query_rre(CPACF_KDSA, 0, 2, mask); + __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc); break; case CPACF_KIMD: - __cpacf_query_rre(CPACF_KIMD, 0, 2, mask); + __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc); break; case CPACF_KLMD: - __cpacf_query_rre(CPACF_KLMD, 0, 2, mask); + __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc); break; case CPACF_KM: - __cpacf_query_rre(CPACF_KM, 2, 4, mask); + __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc); break; case CPACF_KMA: - __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask); + __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc); break; case CPACF_KMAC: - __cpacf_query_rre(CPACF_KMAC, 0, 2, mask); + __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc); break; case CPACF_KMC: - __cpacf_query_rre(CPACF_KMC, 2, 4, mask); + __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc); break; case CPACF_KMCTR: - __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask); + __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc); break; case CPACF_KMF: - __cpacf_query_rre(CPACF_KMF, 2, 4, mask); + __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc); break; case CPACF_KMO: - __cpacf_query_rre(CPACF_KMO, 2, 4, mask); + __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc); break; case CPACF_PCC: - __cpacf_query_rre(CPACF_PCC, 0, 0, mask); + __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc); break; case CPACF_PCKMO: - __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask); + __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc); break; case CPACF_PRNO: - __cpacf_query_rre(CPACF_PRNO, 2, 4, mask); + __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc); break; default: __cpacf_bad_opcode(); } } +static __always_inline void __cpacf_query(unsigned int opcode, + cpacf_mask_t *mask) +{ + __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY); +} + static __always_inline int __cpacf_check_opcode(unsigned int opcode) { switch (opcode) { @@ -269,6 +317,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode) return test_facility(57); /* check for MSA5 */ case CPACF_KMA: return test_facility(146); /* check for MSA8 */ + case CPACF_KDSA: + return test_facility(155); /* check for MSA9 */ default: __cpacf_bad_opcode(); return 0; @@ -276,14 +326,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode) } /** - * cpacf_query() - check if a specific CPACF function is available + * cpacf_query() - Query the function code mask for this CPACF opcode * @opcode: the opcode of the crypto instruction - * @func: the function code to test for + * @mask: ptr to struct cpacf_mask_t * * Executes the query function for the given crypto instruction @opcode * and checks if @func is available * - * Returns 1 if @func is available for @opcode, 0 otherwise + * On success 1 is returned and the mask is filled with the function + * code mask for this CPACF opcode, otherwise 0 is returned. */ static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask) { @@ -300,7 +351,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func) return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; } -static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func) +static __always_inline int cpacf_query_func(unsigned int opcode, + unsigned int func) { cpacf_mask_t mask; @@ -309,6 +361,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu return 0; } +static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) +{ + __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO); +} + +/** + * cpacf_qai() - Get the query authentication information for a CPACF opcode + * @opcode: the opcode of the crypto instruction + * @mask: ptr to struct cpacf_qai_t + * + * Executes the query authentication information function for the given crypto + * instruction @opcode and checks if @func is available + * + * On success 1 is returned and the mask is filled with the query authentication + * information for this CPACF opcode, otherwise 0 is returned. + */ +static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) +{ + if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) { + __cpacf_qai(opcode, qai); + return 1; + } + memset(qai, 0, sizeof(*qai)); + return 0; +} + /** * cpacf_km() - executes the KM (CIPHER MESSAGE) instruction * @func: the function code passed to KM; see CPACF_KM_xxx defines @@ -391,7 +469,7 @@ static inline void cpacf_kimd(unsigned long func, void *param, asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" - "0: .insn rre,%[opc] << 16,0,%[src]\n" + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" " brc 1,0b\n" /* handle partial completion */ : [src] "+&d" (s.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)(param)), @@ -416,7 +494,7 @@ static inline void cpacf_klmd(unsigned long func, void *param, asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" - "0: .insn rre,%[opc] << 16,0,%[src]\n" + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" " brc 1,0b\n" /* handle partial completion */ : [src] "+&d" (s.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), @@ -425,35 +503,52 @@ static inline void cpacf_klmd(unsigned long func, void *param, } /** - * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) - * instruction - * @func: the function code passed to KM; see CPACF_KMAC_xxx defines + * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction and updates flags in gr0 + * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines * @param: address of parameter block; see POP for details on each func * @src: address of source memory area * @src_len: length of src operand in bytes * * Returns 0 for the query func, number of processed bytes for digest funcs */ -static inline int cpacf_kmac(unsigned long func, void *param, - const u8 *src, long src_len) +static inline int _cpacf_kmac(unsigned long *gr0, void *param, + const u8 *src, long src_len) { union register_pair s; s.even = (unsigned long)src; s.odd = (unsigned long)src_len; asm volatile( - " lgr 0,%[fc]\n" + " lgr 0,%[r0]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,0,%[src]\n" " brc 1,0b\n" /* handle partial completion */ - : [src] "+&d" (s.pair) - : [fc] "d" (func), [pba] "d" ((unsigned long)param), + " lgr %[r0],0\n" + : [r0] "+d" (*gr0), [src] "+&d" (s.pair) + : [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_KMAC) : "cc", "memory", "0", "1"); return src_len - s.odd; } +/** + * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction + * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines + * @param: address of parameter block; see POP for details on each func + * @src: address of source memory area + * @src_len: length of src operand in bytes + * + * Returns 0 for the query func, number of processed bytes for digest funcs + */ +static inline int cpacf_kmac(unsigned long func, void *param, + const u8 *src, long src_len) +{ + return _cpacf_kmac(&func, param, src, src_len); +} + /** * cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction * @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h index 72a9556d04f3b0..e6527f51ad0b56 100644 --- a/arch/s390/include/asm/ctlreg.h +++ b/arch/s390/include/asm/ctlreg.h @@ -202,8 +202,9 @@ union ctlreg0 { unsigned long : 3; unsigned long ccc : 1; /* Cryptography counter control */ unsigned long pec : 1; /* PAI extension control */ - unsigned long : 17; - unsigned long : 3; + unsigned long : 15; + unsigned long wti : 1; /* Warning-track */ + unsigned long : 4; unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; unsigned long edat : 1; /* Enhanced-DAT-enablement control */ diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index c0d43512f4fc66..e1316e18123019 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -38,6 +38,7 @@ enum diag_stat_enum { DIAG_STAT_X308, DIAG_STAT_X318, DIAG_STAT_X320, + DIAG_STAT_X49C, DIAG_STAT_X500, NR_DIAG_STAT }; @@ -363,4 +364,12 @@ void _diag0c_amode31(unsigned long rx); void _diag308_reset_amode31(void); int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len); +/* diag 49c subcodes */ +enum diag49c_sc { + DIAG49C_SUBC_ACK = 0, + DIAG49C_SUBC_REG = 1 +}; + +int diag49c(unsigned long subcode); + #endif /* _ASM_S390_DIAG_H */ diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index b7d234838a3667..715bcf8fb69a51 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -14,7 +14,7 @@ #include #include #include - +#include #include #define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) @@ -39,28 +39,51 @@ static inline void __clear_facility(unsigned long nr, void *facilities) ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); } -static inline int __test_facility(unsigned long nr, void *facilities) +static __always_inline bool __test_facility(unsigned long nr, void *facilities) { unsigned char *ptr; if (nr >= MAX_FACILITY_BIT) - return 0; + return false; ptr = (unsigned char *) facilities + (nr >> 3); return (*ptr & (0x80 >> (nr & 7))) != 0; } +/* + * __test_facility_constant() generates a single instruction branch. If the + * tested facility is available (likely) the branch is patched into a nop. + * + * Do not use this function unless you know what you are doing. All users are + * supposed to use test_facility() which will do the right thing. + */ +static __always_inline bool __test_facility_constant(unsigned long nr) +{ + asm goto( + ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FACILITY(%[nr])) + : + : [nr] "i" (nr) + : + : l_no); + return true; +l_no: + return false; +} + /* * The test_facility function uses the bit ordering where the MSB is bit 0. * That makes it easier to query facility bits with the bit number as * documented in the Principles of Operation. */ -static inline int test_facility(unsigned long nr) +static __always_inline bool test_facility(unsigned long nr) { unsigned long facilities_als[] = { FACILITIES_ALS }; - if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) { - if (__test_facility(nr, &facilities_als)) - return 1; + if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(nr)) { + if (nr < sizeof(facilities_als) * 8) { + if (__test_facility(nr, &facilities_als)) + return true; + } + return __test_facility_constant(nr); } return __test_facility(nr, &stfle_fac_list); } diff --git a/arch/s390/include/asm/fpu-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h index 02ccfe46050a08..d296322be4bcc1 100644 --- a/arch/s390/include/asm/fpu-insn-asm.h +++ b/arch/s390/include/asm/fpu-insn-asm.h @@ -407,6 +407,28 @@ MRXBOPC 0, 0x0E, v1 .endm +/* VECTOR STORE BYTE REVERSED ELEMENTS */ + .macro VSTBR vr1, disp, index="%r0", base, m + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE600 | ((v1&15) << 4) | (x2&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x0E, v1 +.endm +.macro VSTBRH vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 1 +.endm +.macro VSTBRF vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 2 +.endm +.macro VSTBRG vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 3 +.endm +.macro VSTBRQ vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 4 +.endm + /* VECTOR STORE MULTIPLE */ .macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index fbadca645af79f..406746666eb782 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -6,8 +6,23 @@ #define MCOUNT_INSN_SIZE 6 #ifndef __ASSEMBLY__ +#include -unsigned long return_address(unsigned int n); +static __always_inline unsigned long return_address(unsigned int n) +{ + struct stack_frame *sf; + + if (!n) + return (unsigned long)__builtin_return_address(0); + + sf = (struct stack_frame *)current_frame_address(); + do { + sf = (struct stack_frame *)sf->back_chain; + if (!sf) + return 0; + } while (--n); + return sf->gprs[8]; +} #define ftrace_return_address(n) return_address(n) void ftrace_caller(void); diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h new file mode 100644 index 00000000000000..27e23aa27a2430 --- /dev/null +++ b/arch/s390/include/asm/hiperdispatch.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2024 + */ + +#ifndef _ASM_HIPERDISPATCH_H +#define _ASM_HIPERDISPATCH_H + +void hd_reset_state(void); +void hd_add_core(int cpu); +void hd_disable_hiperdispatch(void); +int hd_enable_hiperdispatch(void); + +#endif /* _ASM_HIPERDISPATCH_H */ diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 54b42817f70a65..d9e705f4a697e0 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -47,6 +47,7 @@ enum interruption_class { IRQEXT_CMS, IRQEXT_CMC, IRQEXT_FTP, + IRQEXT_WTI, IRQIO_CIO, IRQIO_DAS, IRQIO_C15, @@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler); enum irq_subclass { IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, IRQ_SUBCLASS_SERVICE_SIGNAL = 9, + IRQ_SUBCLASS_WARNING_TRACK = 33, }; #define CR0_IRQ_SUBCLASS_MASK \ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 183ac29afaf821..48c64716d1f2fd 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -98,8 +98,8 @@ struct lowcore { psw_t io_new_psw; /* 0x01f0 */ /* Save areas. */ - __u64 save_area_sync[8]; /* 0x0200 */ - __u64 save_area_async[8]; /* 0x0240 */ + __u64 save_area[8]; /* 0x0200 */ + __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */ __u64 save_area_restart[1]; /* 0x0280 */ __u64 pcpu; /* 0x0288 */ diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h new file mode 100644 index 00000000000000..fd9eef3be44ca4 --- /dev/null +++ b/arch/s390/include/asm/march.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_S390_MARCH_H +#define __ASM_S390_MARCH_H + +#include + +#define MARCH_HAS_Z10_FEATURES 1 + +#ifndef __DECOMPRESSOR + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#define MARCH_HAS_Z196_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES +#define MARCH_HAS_ZEC12_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES +#define MARCH_HAS_Z13_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES +#define MARCH_HAS_Z14_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES +#define MARCH_HAS_Z15_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES +#define MARCH_HAS_Z16_FEATURES 1 +#endif + +#endif /* __DECOMPRESSOR */ + +#endif /* __ASM_S390_MARCH_H */ diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h deleted file mode 100644 index 73e3e7c6976c5e..00000000000000 --- a/arch/s390/include/asm/mmzone.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * NUMA support for s390 - * - * Copyright IBM Corp. 2015 - */ - -#ifndef _ASM_S390_MMZONE_H -#define _ASM_S390_MMZONE_H - -#ifdef CONFIG_NUMA - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) - -#endif /* CONFIG_NUMA */ -#endif /* _ASM_S390_MMZONE_H */ diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h index 9f1eea15872ca3..916ab59e458a02 100644 --- a/arch/s390/include/asm/module.h +++ b/arch/s390/include/asm/module.h @@ -38,4 +38,18 @@ struct mod_arch_specific { #endif /* CONFIG_FUNCTION_TRACER */ }; +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + const Elf_Shdr *s, *se; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + return NULL; +} + #endif /* _ASM_S390_MODULE_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 16e4caa931f1f3..73e1e03317b433 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -176,8 +176,6 @@ static inline int devmem_is_allowed(unsigned long pfn) int arch_make_folio_accessible(struct folio *folio); #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE -int arch_make_page_accessible(struct page *page); -#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE struct vm_layout { unsigned long kaslr_offset; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 30820a649e6e7c..9d920ced604754 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -191,7 +191,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) return (zdev->fh & (1UL << 31)) ? true : false; } -extern const struct attribute_group *zpci_attr_groups[]; +extern const struct attribute_group zpci_attr_group; +extern const struct attribute_group pfip_attr_group; +extern const struct attribute_group zpci_ident_attr_group; + +#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \ + &pfip_attr_group, \ + &zpci_ident_attr_group, + extern unsigned int s390_pci_force_floating __initdata; extern unsigned int s390_pci_no_rid; diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 89a28740b6ab85..84f6b8357b4534 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -4,6 +4,7 @@ #include #include +#include /* * s390 uses its own implementation for per cpu data, the offset of @@ -50,7 +51,7 @@ #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) -#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifndef MARCH_HAS_Z196_FEATURES #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) @@ -61,7 +62,7 @@ #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#else /* MARCH_HAS_Z196_FEATURES */ #define arch_this_cpu_add(pcp, val, op1, op2, szcast) \ { \ @@ -129,7 +130,7 @@ #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao") #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog") -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#endif /* MARCH_HAS_Z196_FEATURES */ #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ ({ \ diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 9917e2717b2b42..66200d4a21341b 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -48,30 +48,6 @@ struct perf_sf_sde_regs { unsigned long reserved:63; /* reserved */ }; -/* Perf PMU definitions for the counter facility */ -#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ - -/* Perf PMU definitions for the sampling facility */ -#define PERF_CPUM_SF_MAX_CTR 2 -#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ -#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ -#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ -#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ -#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ -#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ - PERF_CPUM_SF_DIAG_MODE) -#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ - -#define REG_NONE 0 -#define REG_OVERFLOW 1 -#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) -#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) -#define TEAR_REG(hwc) ((hwc)->last_tag) -#define SAMPL_RATE(hwc) ((hwc)->event_base) -#define SAMPL_FLAGS(hwc) ((hwc)->config_base) -#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) -#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) - #define perf_arch_fetch_caller_regs(regs, __ip) do { \ (regs)->psw.addr = (__ip); \ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 3fa280d0672a3c..0ffbaf7419558b 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -955,6 +955,7 @@ static inline int pte_unused(pte_t pte) * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID * must not be set. */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h index 47d80a7451a669..5dca1a46a9f656 100644 --- a/arch/s390/include/asm/pkey.h +++ b/arch/s390/include/asm/pkey.h @@ -22,7 +22,7 @@ * @param protkey pointer to buffer receiving the protected key * @return 0 on success, negative errno value on failure */ -int pkey_keyblob2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); +int pkey_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); #endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 3ae5f31c665d1f..deca3f221836ed 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -5,8 +5,9 @@ #include #include #include +#include -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -75,7 +76,7 @@ static __always_inline bool should_resched(int preempt_offset) preempt_offset); } -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#else /* MARCH_HAS_Z196_FEATURES */ #define PREEMPT_ENABLED (0) @@ -123,7 +124,7 @@ static __always_inline bool should_resched(int preempt_offset) tif_need_resched()); } -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#endif /* MARCH_HAS_Z196_FEATURES */ #define init_task_preempt_count(p) do { } while (0) /* Deferred to CPU bringup time */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 5ecd442535b9ed..9a5236acc0a860 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -44,6 +44,7 @@ struct pcpu { unsigned long ec_mask; /* bit mask for ec_xxx functions */ unsigned long ec_clk; /* sigp timestamp for ec_xxx */ unsigned long flags; /* per CPU flags */ + unsigned long capacity; /* cpu capacity for scheduler */ signed char state; /* physical cpu state */ signed char polarization; /* physical polarization */ u16 address; /* physical cpu address */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index da3dad18fe5072..eb00fa1771da07 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -72,6 +72,7 @@ struct sclp_info { unsigned char has_core_type : 1; unsigned char has_sprp : 1; unsigned char has_hvs : 1; + unsigned char has_wti : 1; unsigned char has_esca : 1; unsigned char has_sief2 : 1; unsigned char has_64bscao : 1; diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 8505737712ee85..70b920b32827e8 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -34,6 +34,7 @@ #define MACHINE_FLAG_SCC BIT(17) #define MACHINE_FLAG_PCI_MIO BIT(18) #define MACHINE_FLAG_RDP BIT(19) +#define MACHINE_FLAG_SEQ_INSN BIT(20) #define LPP_MAGIC BIT(31) #define LPP_PID_MASK _AC(0xffffffff, UL) @@ -95,6 +96,7 @@ extern unsigned long mio_wb_bit_mask; #define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC) #define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO) #define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP) +#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN) /* * Console mode. Override with conmode= @@ -115,6 +117,8 @@ extern unsigned int console_irq; #define SET_CONSOLE_VT220 do { console_mode = 4; } while (0) #define SET_CONSOLE_HVC do { console_mode = 5; } while (0) +void register_early_console(void); + #ifdef CONFIG_VMCP void vmcp_cma_reserve(void); #else diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index cd835f4fb11a1d..7feca96c48c6a2 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -12,6 +12,7 @@ #include #define raw_smp_processor_id() (get_lowcore()->cpu_nr) +#define arch_scale_cpu_capacity smp_cpu_get_capacity extern struct mutex smp_cpu_state_mutex; extern unsigned int smp_cpu_mt_shift; @@ -34,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void); extern void smp_yield_cpu(int cpu); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); +extern void smp_cpu_set_capacity(int cpu, unsigned long val); +extern void smp_set_core_capacity(int cpu, unsigned long val); +extern unsigned long smp_cpu_get_capacity(int cpu); extern int smp_cpu_get_cpu_address(int cpu); extern void smp_fill_possible_mask(void); extern void smp_detect_cpus(void); diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 3a0ac0c7a9a39b..cef06bffad8041 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { } #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) +#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE +#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3) + #define SD_BOOK_INIT SD_CPU_INIT #ifdef CONFIG_NUMA diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h new file mode 100644 index 00000000000000..46462ee645b000 --- /dev/null +++ b/arch/s390/include/asm/trace/hiperdispatch.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoint header for hiperdispatch + * + * Copyright IBM Corp. 2024 + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_HIPERDISPATCH_H + +#include + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE hiperdispatch + +TRACE_EVENT(s390_hd_work_fn, + TP_PROTO(int steal_time_percentage, + int entitled_core_count, + int highcap_core_count), + TP_ARGS(steal_time_percentage, + entitled_core_count, + highcap_core_count), + TP_STRUCT__entry(__field(int, steal_time_percentage) + __field(int, entitled_core_count) + __field(int, highcap_core_count)), + TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage; + __entry->entitled_core_count = entitled_core_count; + __entry->highcap_core_count = highcap_core_count;), + TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d", + __entry->steal_time_percentage, + __entry->entitled_core_count, + __entry->highcap_core_count) +); + +TRACE_EVENT(s390_hd_rebuild_domains, + TP_PROTO(int current_highcap_core_count, + int new_highcap_core_count), + TP_ARGS(current_highcap_core_count, + new_highcap_core_count), + TP_STRUCT__entry(__field(int, current_highcap_core_count) + __field(int, new_highcap_core_count)), + TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count; + __entry->new_highcap_core_count = new_highcap_core_count), + TP_printk("change highcap_core_count: %u -> %u", + __entry->current_highcap_core_count, + __entry->new_highcap_core_count) +); + +#endif /* _TRACE_S390_HIPERDISPATCH_H */ + +/* This part must be outside protection */ +#include diff --git a/arch/s390/include/asm/vdso-symbols.h b/arch/s390/include/asm/vdso-symbols.h new file mode 100644 index 00000000000000..0df17574d78890 --- /dev/null +++ b/arch/s390/include/asm/vdso-symbols.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_VDSO_SYMBOLS_H__ +#define __S390_VDSO_SYMBOLS_H__ + +#include +#ifdef CONFIG_COMPAT +#include +#endif + +#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) +#ifdef CONFIG_COMPAT +#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) +#else +#define VDSO32_SYMBOL(tsk, name) (-1UL) +#endif + +#endif /* __S390_VDSO_SYMBOLS_H__ */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 53165aa7813a3b..91061f0279be45 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -6,18 +6,6 @@ #ifndef __ASSEMBLY__ -#include -#ifdef CONFIG_COMPAT -#include -#endif - -#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) -#ifdef CONFIG_COMPAT -#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) -#else -#define VDSO32_SYMBOL(tsk, name) (-1UL) -#endif - extern struct vdso_data *vdso_data; int vdso_getcpu_init(void); diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h new file mode 100644 index 00000000000000..36355af7160bee --- /dev/null +++ b/arch/s390/include/asm/vdso/getrandom.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_GETRANDOM_H +#define __ASM_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +/** + * getrandom_syscall - Invoke the getrandom() syscall. + * @buffer: Destination buffer to fill with random bytes. + * @len: Size of @buffer in bytes. + * @flags: Zero or more GRND_* flags. + * Returns: The number of random bytes written to @buffer, or a negative value indicating an error. + */ +static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags) +{ + return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags); +} + +static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void) +{ + /* + * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace + * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_ + * PAGE_OFFSET points to the real VVAR page. + */ + if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS) + return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE; + return &_vdso_rng_data; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h index 6c67c08cefdd6a..3c5d5e47814e16 100644 --- a/arch/s390/include/asm/vdso/vsyscall.h +++ b/arch/s390/include/asm/vdso/vsyscall.h @@ -2,12 +2,21 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H +#define __VDSO_RND_DATA_OFFSET 768 + #ifndef __ASSEMBLY__ #include #include #include #include + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES +}; + /* * Update the vDSO data page to keep in sync with kernel timekeeping. */ @@ -18,6 +27,12 @@ static __always_inline struct vdso_data *__s390_get_k_vdso_data(void) } #define __arch_get_k_vdso_data __s390_get_k_vdso_data +static __always_inline struct vdso_rng_data *__s390_get_k_vdso_rnd_data(void) +{ + return (void *)vdso_data + __VDSO_RND_DATA_OFFSET; +} +#define __arch_get_k_vdso_rng_data __s390_get_k_vdso_rnd_data + /* The asm-generic header needs to be included after the definitions above */ #include diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h index 5ad76471e73ff4..60431d00e6bd7b 100644 --- a/arch/s390/include/uapi/asm/pkey.h +++ b/arch/s390/include/uapi/asm/pkey.h @@ -41,6 +41,10 @@ #define PKEY_KEYTYPE_ECC_P521 7 #define PKEY_KEYTYPE_ECC_ED25519 8 #define PKEY_KEYTYPE_ECC_ED448 9 +#define PKEY_KEYTYPE_AES_XTS_128 10 +#define PKEY_KEYTYPE_AES_XTS_256 11 +#define PKEY_KEYTYPE_HMAC_512 12 +#define PKEY_KEYTYPE_HMAC_1024 13 /* the newer ioctls use a pkey_key_type enum for type information */ enum pkey_key_type { @@ -50,6 +54,7 @@ enum pkey_key_type { PKEY_TYPE_CCA_ECC = (__u32) 0x1f, PKEY_TYPE_EP11_AES = (__u32) 6, PKEY_TYPE_EP11_ECC = (__u32) 7, + PKEY_TYPE_PROTKEY = (__u32) 8, }; /* the newer ioctls use a pkey_key_size enum for key size information */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index e47a4be54ff8e2..48caae8c7e104b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -36,22 +36,23 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls CFLAGS_dumpstack.o += -fno-optimize-sibling-calls CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls -obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o +obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o obj-y += sysinfo.o lgr.o os_info.o ctlreg.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o kdebugfs.o alternative.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o -obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o +obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o extra-y += vmlinux.lds obj-$(CONFIG_SYSFS) += nospec-sysfs.o CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) +obj-$(CONFIG_SYSFS) += cpacf.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o +obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ffa0dd2dbaacba..5529248d84fb87 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -112,8 +112,7 @@ int main(void) OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw); OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw); /* software defined lowcore locations 0x200 - 0xdff*/ - OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync); - OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async); + OFFSET(__LC_SAVE_AREA, lowcore, save_area); OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart); OFFSET(__LC_PCPU, lowcore, pcpu); OFFSET(__LC_RETURN_PSW, lowcore, return_psw); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 1942e2a9f8db26..5a86b9d1da716d 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -24,11 +24,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include "compat_linux.h" #include "compat_ptrace.h" diff --git a/arch/s390/kernel/cpacf.c b/arch/s390/kernel/cpacf.c new file mode 100644 index 00000000000000..c8575dbc890d64 --- /dev/null +++ b/arch/s390/kernel/cpacf.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "cpacf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include + +#define CPACF_QUERY(name, instruction) \ +static ssize_t name##_query_raw_read(struct file *fp, \ + struct kobject *kobj, \ + struct bin_attribute *attr, \ + char *buf, loff_t offs, \ + size_t count) \ +{ \ + cpacf_mask_t mask; \ + \ + if (!cpacf_query(CPACF_##instruction, &mask)) \ + return -EOPNOTSUPP; \ + return memory_read_from_buffer(buf, count, &offs, &mask, sizeof(mask)); \ +} \ +static BIN_ATTR_RO(name##_query_raw, sizeof(cpacf_mask_t)) + +CPACF_QUERY(km, KM); +CPACF_QUERY(kmc, KMC); +CPACF_QUERY(kimd, KIMD); +CPACF_QUERY(klmd, KLMD); +CPACF_QUERY(kmac, KMAC); +CPACF_QUERY(pckmo, PCKMO); +CPACF_QUERY(kmf, KMF); +CPACF_QUERY(kmctr, KMCTR); +CPACF_QUERY(kmo, KMO); +CPACF_QUERY(pcc, PCC); +CPACF_QUERY(prno, PRNO); +CPACF_QUERY(kma, KMA); +CPACF_QUERY(kdsa, KDSA); + +#define CPACF_QAI(name, instruction) \ +static ssize_t name##_query_auth_info_raw_read( \ + struct file *fp, struct kobject *kobj, \ + struct bin_attribute *attr, char *buf, loff_t offs, \ + size_t count) \ +{ \ + cpacf_qai_t qai; \ + \ + if (!cpacf_qai(CPACF_##instruction, &qai)) \ + return -EOPNOTSUPP; \ + return memory_read_from_buffer(buf, count, &offs, &qai, \ + sizeof(qai)); \ +} \ +static BIN_ATTR_RO(name##_query_auth_info_raw, sizeof(cpacf_qai_t)) + +CPACF_QAI(km, KM); +CPACF_QAI(kmc, KMC); +CPACF_QAI(kimd, KIMD); +CPACF_QAI(klmd, KLMD); +CPACF_QAI(kmac, KMAC); +CPACF_QAI(pckmo, PCKMO); +CPACF_QAI(kmf, KMF); +CPACF_QAI(kmctr, KMCTR); +CPACF_QAI(kmo, KMO); +CPACF_QAI(pcc, PCC); +CPACF_QAI(prno, PRNO); +CPACF_QAI(kma, KMA); +CPACF_QAI(kdsa, KDSA); + +static struct bin_attribute *cpacf_attrs[] = { + &bin_attr_km_query_raw, + &bin_attr_kmc_query_raw, + &bin_attr_kimd_query_raw, + &bin_attr_klmd_query_raw, + &bin_attr_kmac_query_raw, + &bin_attr_pckmo_query_raw, + &bin_attr_kmf_query_raw, + &bin_attr_kmctr_query_raw, + &bin_attr_kmo_query_raw, + &bin_attr_pcc_query_raw, + &bin_attr_prno_query_raw, + &bin_attr_kma_query_raw, + &bin_attr_kdsa_query_raw, + &bin_attr_km_query_auth_info_raw, + &bin_attr_kmc_query_auth_info_raw, + &bin_attr_kimd_query_auth_info_raw, + &bin_attr_klmd_query_auth_info_raw, + &bin_attr_kmac_query_auth_info_raw, + &bin_attr_pckmo_query_auth_info_raw, + &bin_attr_kmf_query_auth_info_raw, + &bin_attr_kmctr_query_auth_info_raw, + &bin_attr_kmo_query_auth_info_raw, + &bin_attr_pcc_query_auth_info_raw, + &bin_attr_prno_query_auth_info_raw, + &bin_attr_kma_query_auth_info_raw, + &bin_attr_kdsa_query_auth_info_raw, + NULL, +}; + +static const struct attribute_group cpacf_attr_grp = { + .name = "cpacf", + .bin_attrs = cpacf_attrs, +}; + +static int __init cpacf_init(void) +{ + struct device *cpu_root; + int rc = 0; + + cpu_root = bus_get_dev_root(&cpu_subsys); + if (cpu_root) { + rc = sysfs_create_group(&cpu_root->kobj, &cpacf_attr_grp); + put_device(cpu_root); + } + return rc; +} +device_initcall(cpacf_init); diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index bce50ca75ea7be..e62bea9ab21e6a 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -163,7 +163,6 @@ static const struct file_operations debug_file_ops = { .write = debug_input, .open = debug_open, .release = debug_close, - .llseek = no_llseek, }; static struct dentry *debug_debugfs_root_entry; diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index ac7b8c8e313310..007e1795670e8a 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -52,6 +52,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = { [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, [DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" }, [DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" }, + [DIAG_STAT_X49C] = { .code = 0x49c, .name = "Warning-Track Interruption" }, [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, }; @@ -303,3 +304,19 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode) return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode); } EXPORT_SYMBOL(diag26c); + +int diag49c(unsigned long subcode) +{ + int rc; + + diag_stat_inc(DIAG_STAT_X49C); + asm volatile( + " diag %[subcode],0,0x49c\n" + " ipm %[rc]\n" + " srl %[rc],28\n" + : [rc] "=d" (rc) + : [subcode] "d" (subcode) + : "cc"); + return rc; +} +EXPORT_SYMBOL(diag49c); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 89dc826a8d2ef1..94eb8168ea4454 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -122,6 +122,7 @@ enum { U8_32, /* 8 bit unsigned value starting at 32 */ U12_16, /* 12 bit unsigned value starting at 16 */ U16_16, /* 16 bit unsigned value starting at 16 */ + U16_20, /* 16 bit unsigned value starting at 20 */ U16_32, /* 16 bit unsigned value starting at 32 */ U32_16, /* 32 bit unsigned value starting at 16 */ VX_12, /* Vector index register starting at position 12 */ @@ -184,6 +185,7 @@ static const struct s390_operand operands[] = { [U8_32] = { 8, 32, 0 }, [U12_16] = { 12, 16, 0 }, [U16_16] = { 16, 16, 0 }, + [U16_20] = { 16, 20, 0 }, [U16_32] = { 16, 32, 0 }, [U32_16] = { 32, 16, 0 }, [VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR }, @@ -257,7 +259,6 @@ static const unsigned char formats[][6] = { [INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 }, [INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 }, [INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 }, - [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 }, [INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 }, [INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 }, [INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 }, @@ -300,14 +301,17 @@ static const unsigned char formats[][6] = { [INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 }, [INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 }, [INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 }, + [INSTR_VRI_VV0UU] = { V_8, V_12, U8_28, U4_24, 0, 0 }, [INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 }, [INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 }, [INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 }, [INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 }, [INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 }, [INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 }, - [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 }, + [INSTR_VRI_VVV0UV] = { V_8, V_12, V_16, V_32, U8_24, 0 }, + [INSTR_VRR_0V0U] = { V_12, U16_20, 0, 0, 0, 0 }, [INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 }, + [INSTR_VRR_0VVU] = { V_12, V_16, U16_20, 0, 0, 0 }, [INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 }, [INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 }, [INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 }, @@ -455,21 +459,21 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) if (separator) ptr += sprintf(ptr, "%c", separator); if (operand->flags & OPERAND_GPR) - ptr += sprintf(ptr, "%%r%i", value); + ptr += sprintf(ptr, "%%r%u", value); else if (operand->flags & OPERAND_FPR) - ptr += sprintf(ptr, "%%f%i", value); + ptr += sprintf(ptr, "%%f%u", value); else if (operand->flags & OPERAND_AR) - ptr += sprintf(ptr, "%%a%i", value); + ptr += sprintf(ptr, "%%a%u", value); else if (operand->flags & OPERAND_CR) - ptr += sprintf(ptr, "%%c%i", value); + ptr += sprintf(ptr, "%%c%u", value); else if (operand->flags & OPERAND_VR) - ptr += sprintf(ptr, "%%v%i", value); + ptr += sprintf(ptr, "%%v%u", value); else if (operand->flags & OPERAND_PCREL) { void *pcrel = (void *)((int)value + addr); ptr += sprintf(ptr, "%px", pcrel); } else if (operand->flags & OPERAND_SIGNED) - ptr += sprintf(ptr, "%i", value); + ptr += sprintf(ptr, "%i", (int)value); else ptr += sprintf(ptr, "%u", value); if (operand->flags & OPERAND_DISP) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 14d324865e33fc..62f8f5a750a308 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -7,6 +7,7 @@ #define KMSG_COMPONENT "setup" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include @@ -175,20 +176,45 @@ static __init void setup_topology(void) topology_max_mnest = max_mnest; } -void __do_early_pgm_check(struct pt_regs *regs) +void __init __do_early_pgm_check(struct pt_regs *regs) { - if (!fixup_exception(regs)) - disabled_wait(); + struct lowcore *lc = get_lowcore(); + unsigned long ip; + + regs->int_code = lc->pgm_int_code; + regs->int_parm_long = lc->trans_exc_code; + ip = __rewind_psw(regs->psw, regs->int_code >> 16); + + /* Monitor Event? Might be a warning */ + if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) { + if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN) + return; + } + if (fixup_exception(regs)) + return; + /* + * Unhandled exception - system cannot continue but try to get some + * helpful messages to the console. Use early_printk() to print + * some basic information in case it is too early for printk(). + */ + register_early_console(); + early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n", + regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr); + show_regs(regs); + disabled_wait(); } static noinline __init void setup_lowcore_early(void) { + struct lowcore *lc = get_lowcore(); psw_t psw; psw.addr = (unsigned long)early_pgm_check_handler; psw.mask = PSW_KERNEL_BITS; - get_lowcore()->program_new_psw = psw; - get_lowcore()->preempt_count = INIT_PREEMPT_COUNT; + lc->program_new_psw = psw; + lc->preempt_count = INIT_PREEMPT_COUNT; + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); } static __init void detect_diag9c(void) @@ -242,6 +268,8 @@ static __init void detect_machine_facilities(void) } if (test_facility(194)) get_lowcore()->machine_flags |= MACHINE_FLAG_RDP; + if (test_facility(85)) + get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN; } static inline void save_vector_registers(void) diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c index d9d53f44008ac0..cefe020a3be381 100644 --- a/arch/s390/kernel/early_printk.c +++ b/arch/s390/kernel/early_printk.c @@ -6,6 +6,7 @@ #include #include #include +#include #include static void sclp_early_write(struct console *con, const char *s, unsigned int len) @@ -20,6 +21,16 @@ static struct console sclp_early_console = { .index = -1, }; +void __init register_early_console(void) +{ + if (early_console) + return; + if (!sclp.has_linemode && !sclp.has_vt220) + return; + early_console = &sclp_early_console; + register_console(early_console); +} + static int __init setup_early_printk(char *buf) { if (early_console) @@ -27,10 +38,7 @@ static int __init setup_early_printk(char *buf) /* Accept only "earlyprintk" and "earlyprintk=sclp" */ if (buf && !str_has_prefix(buf, "sclp")) return 0; - if (!sclp.has_linemode && !sclp.has_vt220) - return 0; - early_console = &sclp_early_console; - register_console(early_console); + register_early_console(); return 0; } early_param("earlyprintk", setup_early_printk); diff --git a/arch/s390/kernel/earlypgm.S b/arch/s390/kernel/earlypgm.S deleted file mode 100644 index c634871f0d905b..00000000000000 --- a/arch/s390/kernel/earlypgm.S +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright IBM Corp. 2006, 2007 - * Author(s): Michael Holzheu - */ - -#include -#include - -SYM_CODE_START(early_pgm_check_handler) - stmg %r8,%r15,__LC_SAVE_AREA_SYNC - aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - lgr %r2,%r11 - brasl %r14,__do_early_pgm_check - mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) - lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) - lpswe __LC_RETURN_PSW -SYM_CODE_END(early_pgm_check_handler) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 749410cfdbc078..d6d5317f768e82 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -42,7 +42,7 @@ _LPP_OFFSET = __LC_LPP .macro LPSWEY address, lpswe ALTERNATIVE_2 "b \lpswe;nopr", \ - ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY_EARLY(193), \ + ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \ __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ ALT_LOWCORE .endm @@ -264,7 +264,7 @@ EXPORT_SYMBOL(sie_exit) */ SYM_CODE_START(system_call) - STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC + STMG_LC %r8,%r15,__LC_SAVE_AREA GET_LC %r13 stpt __LC_SYS_ENTER_TIMER(%r13) BPOFF @@ -287,7 +287,7 @@ SYM_CODE_START(system_call) xgr %r10,%r10 xgr %r11,%r11 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs - mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13) + mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) MBEAR %r2,%r13 lgr %r3,%r14 brasl %r14,__do_syscall @@ -323,7 +323,7 @@ SYM_CODE_END(ret_from_fork) */ SYM_CODE_START(pgm_check_handler) - STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC + STMG_LC %r8,%r15,__LC_SAVE_AREA GET_LC %r13 stpt __LC_SYS_ENTER_TIMER(%r13) BPOFF @@ -338,16 +338,16 @@ SYM_CODE_START(pgm_check_handler) jnz 2f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc -2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13 +2: CHECK_STACK __LC_SAVE_AREA,%r13 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) # CHECK_VMAP_STACK branches to stack_overflow or 4f - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f + CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 3: lg %r15,__LC_KERNEL_STACK(%r13) 4: la %r11,STACK_FRAME_OVERHEAD(%r15) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) stctg %c1,%c1,__PT_CR1(%r11) #if IS_ENABLED(CONFIG_KVM) @@ -398,7 +398,7 @@ SYM_CODE_END(pgm_check_handler) */ .macro INT_HANDLER name,lc_old_psw,handler SYM_CODE_START(\name) - STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC + STMG_LC %r8,%r15,__LC_SAVE_AREA GET_LC %r13 stckf __LC_INT_CLOCK(%r13) stpt __LC_SYS_ENTER_TIMER(%r13) @@ -414,7 +414,7 @@ SYM_CODE_START(\name) BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST SIEEXIT __SF_SIE_CONTROL(%r15),%r13 #endif -0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13 +0: CHECK_STACK __LC_SAVE_AREA,%r13 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) @@ -432,7 +432,7 @@ SYM_CODE_START(\name) xgr %r7,%r7 xgr %r10,%r10 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) MBEAR %r11,%r13 stmg %r8,%r9,__PT_PSW(%r11) lgr %r2,%r11 # pass pointer to pt_regs @@ -599,6 +599,24 @@ SYM_CODE_START(restart_int_handler) 3: j 3b SYM_CODE_END(restart_int_handler) + __INIT +SYM_CODE_START(early_pgm_check_handler) + STMG_LC %r8,%r15,__LC_SAVE_AREA + GET_LC %r13 + aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) + lgr %r2,%r11 + brasl %r14,__do_early_pgm_check + mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) + LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE +SYM_CODE_END(early_pgm_check_handler) + __FINIT + .section .kprobes.text, "ax" #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 0bd6adc40a344d..0b6e62d1d8b87e 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -50,10 +50,6 @@ struct ftrace_insn { s32 disp; } __packed; -#ifdef CONFIG_MODULES -static char *ftrace_plt; -#endif /* CONFIG_MODULES */ - static const char *ftrace_shared_hotpatch_trampoline(const char **end) { const char *tstart, *tend; @@ -73,19 +69,20 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end) bool ftrace_need_init_nop(void) { - return true; + return !MACHINE_HAS_SEQ_INSN; } int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = __ftrace_hotpatch_trampolines_start; - static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 }; + static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 }; static struct ftrace_hotpatch_trampoline *trampoline; struct ftrace_hotpatch_trampoline **next_trampoline; struct ftrace_hotpatch_trampoline *trampolines_end; struct ftrace_hotpatch_trampoline tmp; struct ftrace_insn *insn; + struct ftrace_insn old; const char *shared; s32 disp; @@ -99,7 +96,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) if (mod) { next_trampoline = &mod->arch.next_trampoline; trampolines_end = mod->arch.trampolines_end; - shared = ftrace_plt; } #endif @@ -107,8 +103,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) return -ENOMEM; trampoline = (*next_trampoline)++; + if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old))) + return -EFAULT; /* Check for the compiler-generated fentry nop (brcl 0, .). */ - if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig)))) + if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old)))) return -EINVAL; /* Generate the trampoline. */ @@ -144,8 +142,35 @@ static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrac return trampoline; } -int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - unsigned long addr) +static inline struct ftrace_insn +ftrace_generate_branch_insn(unsigned long ip, unsigned long target) +{ + /* brasl r0,target or brcl 0,0 */ + return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004, + .disp = target ? (target - ip) / 2 : 0 }; +} + +static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target, + unsigned long target) +{ + struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target); + struct ftrace_insn new = ftrace_generate_branch_insn(ip, target); + struct ftrace_insn old; + + if (!IS_ALIGNED(ip, 8)) + return -EINVAL; + if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old))) + return -EFAULT; + /* Verify that the to be replaced code matches what we expect. */ + if (memcmp(&orig, &old, sizeof(old))) + return -EINVAL; + s390_kernel_write((void *)ip, &new, sizeof(new)); + return 0; +} + +static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec, + unsigned long old_addr, + unsigned long addr) { struct ftrace_hotpatch_trampoline *trampoline; u64 old; @@ -161,6 +186,15 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return 0; } +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + if (MACHINE_HAS_SEQ_INSN) + return ftrace_patch_branch_insn(rec->ip, old_addr, addr); + else + return ftrace_modify_trampoline_call(rec, old_addr, addr); +} + static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) { u16 old; @@ -179,11 +213,14 @@ static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - /* Expect brcl 0xf,... */ - return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); + /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */ + if (MACHINE_HAS_SEQ_INSN) + return ftrace_patch_branch_insn(rec->ip, addr, 0); + else + return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); } -int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr) { struct ftrace_hotpatch_trampoline *trampoline; @@ -195,6 +232,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true); } +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + if (MACHINE_HAS_SEQ_INSN) + return ftrace_patch_branch_insn(rec->ip, 0, addr); + else + return ftrace_make_trampoline_call(rec, addr); +} + int ftrace_update_ftrace_func(ftrace_func_t func) { ftrace_func = func; @@ -215,25 +260,6 @@ void ftrace_arch_code_modify_post_process(void) text_poke_sync_lock(); } -#ifdef CONFIG_MODULES - -static int __init ftrace_plt_init(void) -{ - const char *start, *end; - - ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE); - if (!ftrace_plt) - panic("cannot allocate ftrace plt\n"); - - start = ftrace_shared_hotpatch_trampoline(&end); - memcpy(ftrace_plt, start, end - start); - set_memory_rox((unsigned long)ftrace_plt, 1); - return 0; -} -device_initcall(ftrace_plt_init); - -#endif /* CONFIG_MODULES */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addresses @@ -264,26 +290,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return); */ int ftrace_enable_ftrace_graph_caller(void) { - int rc; - /* Expect brc 0xf,... */ - rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); - if (rc) - return rc; - text_poke_sync_lock(); - return 0; + return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); } int ftrace_disable_ftrace_graph_caller(void) { - int rc; - /* Expect brc 0x0,... */ - rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); - if (rc) - return rc; - text_poke_sync_lock(); - return 0; + return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h index 7f75a9616406db..23337065f402d4 100644 --- a/arch/s390/kernel/ftrace.h +++ b/arch/s390/kernel/ftrace.h @@ -18,7 +18,5 @@ extern const char ftrace_shared_hotpatch_trampoline_br[]; extern const char ftrace_shared_hotpatch_trampoline_br_end[]; extern const char ftrace_shared_hotpatch_trampoline_exrl[]; extern const char ftrace_shared_hotpatch_trampoline_exrl_end[]; -extern const char ftrace_plt_template[]; -extern const char ftrace_plt_template_end[]; #endif /* _FTRACE_H */ diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c new file mode 100644 index 00000000000000..2a99a216ab629e --- /dev/null +++ b/arch/s390/kernel/hiperdispatch.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "hd" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +/* + * Hiperdispatch: + * Dynamically calculates the optimum number of high capacity COREs + * by considering the state the system is in. When hiperdispatch decides + * that a capacity update is necessary, it schedules a topology update. + * During topology updates the CPU capacities are always re-adjusted. + * + * There is two places where CPU capacities are being accessed within + * hiperdispatch. + * -> hiperdispatch's reoccuring work function reads CPU capacities to + * determine high capacity CPU count. + * -> during a topology update hiperdispatch's adjustment function + * updates CPU capacities. + * These two can run on different CPUs in parallel which can cause + * hiperdispatch to make wrong decisions. This can potentially cause + * some overhead by leading to extra rebuild_sched_domains() calls + * for correction. Access to capacities within hiperdispatch has to be + * serialized to prevent the overhead. + * + * Hiperdispatch decision making revolves around steal time. + * HD_STEAL_THRESHOLD value is taken as reference. Whenever steal time + * crosses the threshold value hiperdispatch falls back to giving high + * capacities to entitled CPUs. When steal time drops below the + * threshold boundary, hiperdispatch utilizes all CPUs by giving all + * of them high capacity. + * + * The theory behind HD_STEAL_THRESHOLD is related to the SMP thread + * performance. Comparing the throughput of; + * - single CORE, with N threads, running N tasks + * - N separate COREs running N tasks, + * using individual COREs for individual tasks yield better + * performance. This performance difference is roughly ~30% (can change + * between machine generations) + * + * Hiperdispatch tries to hint scheduler to use individual COREs for + * each task, as long as steal time on those COREs are less than 30%, + * therefore delaying the throughput loss caused by using SMP threads. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define HD_DELAY_FACTOR (4) +#define HD_DELAY_INTERVAL (HZ / 4) +#define HD_STEAL_THRESHOLD 30 +#define HD_STEAL_AVG_WEIGHT 16 + +static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */ +static cpumask_t hd_vmvl_cpumask; /* Mask containing vertical medium and low CPUs */ +static int hd_high_capacity_cores; /* Current CORE count with high capacity */ +static int hd_entitled_cores; /* Total vertical high and medium CORE count */ +static int hd_online_cores; /* Current online CORE count */ + +static unsigned long hd_previous_steal; /* Previous iteration's CPU steal timer total */ +static unsigned long hd_high_time; /* Total time spent while all cpus have high capacity */ +static unsigned long hd_low_time; /* Total time spent while vl cpus have low capacity */ +static atomic64_t hd_adjustments; /* Total occurrence count of hiperdispatch adjustments */ + +static unsigned int hd_steal_threshold = HD_STEAL_THRESHOLD; +static unsigned int hd_delay_factor = HD_DELAY_FACTOR; +static int hd_enabled; + +static void hd_capacity_work_fn(struct work_struct *work); +static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn); + +static int hd_set_hiperdispatch_mode(int enable) +{ + if (!MACHINE_HAS_TOPOLOGY) + enable = 0; + if (hd_enabled == enable) + return 0; + hd_enabled = enable; + return 1; +} + +void hd_reset_state(void) +{ + cpumask_clear(&hd_vl_coremask); + cpumask_clear(&hd_vmvl_cpumask); + hd_entitled_cores = 0; + hd_online_cores = 0; +} + +void hd_add_core(int cpu) +{ + const struct cpumask *siblings; + int polarization; + + hd_online_cores++; + polarization = smp_cpu_get_polarization(cpu); + siblings = topology_sibling_cpumask(cpu); + switch (polarization) { + case POLARIZATION_VH: + hd_entitled_cores++; + break; + case POLARIZATION_VM: + hd_entitled_cores++; + cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); + break; + case POLARIZATION_VL: + cpumask_set_cpu(cpu, &hd_vl_coremask); + cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); + break; + } +} + +/* Serialize update and read operations of debug counters. */ +static DEFINE_MUTEX(hd_counter_mutex); + +static void hd_update_times(void) +{ + static ktime_t prev; + ktime_t now; + + /* + * Check if hiperdispatch is active, if not set the prev to 0. + * This way it is possible to differentiate the first update iteration after + * enabling hiperdispatch. + */ + if (hd_entitled_cores == 0 || hd_enabled == 0) { + prev = ktime_set(0, 0); + return; + } + now = ktime_get(); + if (ktime_after(prev, 0)) { + if (hd_high_capacity_cores == hd_online_cores) + hd_high_time += ktime_ms_delta(now, prev); + else + hd_low_time += ktime_ms_delta(now, prev); + } + prev = now; +} + +static void hd_update_capacities(void) +{ + int cpu, upscaling_cores; + unsigned long capacity; + + upscaling_cores = hd_high_capacity_cores - hd_entitled_cores; + capacity = upscaling_cores > 0 ? CPU_CAPACITY_HIGH : CPU_CAPACITY_LOW; + hd_high_capacity_cores = hd_entitled_cores; + for_each_cpu(cpu, &hd_vl_coremask) { + smp_set_core_capacity(cpu, capacity); + if (capacity != CPU_CAPACITY_HIGH) + continue; + hd_high_capacity_cores++; + upscaling_cores--; + if (upscaling_cores == 0) + capacity = CPU_CAPACITY_LOW; + } +} + +void hd_disable_hiperdispatch(void) +{ + cancel_delayed_work_sync(&hd_capacity_work); + hd_high_capacity_cores = hd_online_cores; + hd_previous_steal = 0; +} + +int hd_enable_hiperdispatch(void) +{ + mutex_lock(&hd_counter_mutex); + hd_update_times(); + mutex_unlock(&hd_counter_mutex); + if (hd_enabled == 0) + return 0; + if (hd_entitled_cores == 0) + return 0; + if (hd_online_cores <= hd_entitled_cores) + return 0; + mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor); + hd_update_capacities(); + return 1; +} + +static unsigned long hd_steal_avg(unsigned long new) +{ + static unsigned long steal; + + steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT; + return steal; +} + +static unsigned long hd_calculate_steal_percentage(void) +{ + unsigned long time_delta, steal_delta, steal, percentage; + static ktime_t prev; + int cpus, cpu; + ktime_t now; + + cpus = 0; + steal = 0; + percentage = 0; + for_each_cpu(cpu, &hd_vmvl_cpumask) { + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + cpus++; + } + /* + * If there is no vertical medium and low CPUs steal time + * is 0 as vertical high CPUs shouldn't experience steal time. + */ + if (cpus == 0) + return percentage; + now = ktime_get(); + time_delta = ktime_to_ns(ktime_sub(now, prev)); + if (steal > hd_previous_steal && hd_previous_steal != 0) { + steal_delta = (steal - hd_previous_steal) * 100 / time_delta; + percentage = steal_delta / cpus; + } + hd_previous_steal = steal; + prev = now; + return percentage; +} + +static void hd_capacity_work_fn(struct work_struct *work) +{ + unsigned long steal_percentage, new_cores; + + mutex_lock(&smp_cpu_state_mutex); + /* + * If online cores are less or equal to entitled cores hiperdispatch + * does not need to make any adjustments, call a topology update to + * disable hiperdispatch. + * Normally this check is handled on topology update, but during cpu + * unhotplug, topology and cpu mask updates are done in reverse + * order, causing hd_enable_hiperdispatch() to get stale data. + */ + if (hd_online_cores <= hd_entitled_cores) { + topology_schedule_update(); + mutex_unlock(&smp_cpu_state_mutex); + return; + } + steal_percentage = hd_steal_avg(hd_calculate_steal_percentage()); + if (steal_percentage < hd_steal_threshold) + new_cores = hd_online_cores; + else + new_cores = hd_entitled_cores; + if (hd_high_capacity_cores != new_cores) { + trace_s390_hd_rebuild_domains(hd_high_capacity_cores, new_cores); + hd_high_capacity_cores = new_cores; + atomic64_inc(&hd_adjustments); + topology_schedule_update(); + } + trace_s390_hd_work_fn(steal_percentage, hd_entitled_cores, hd_high_capacity_cores); + mutex_unlock(&smp_cpu_state_mutex); + schedule_delayed_work(&hd_capacity_work, HD_DELAY_INTERVAL); +} + +static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int hiperdispatch; + int rc; + struct ctl_table ctl_entry = { + .procname = ctl->procname, + .data = &hiperdispatch, + .maxlen = sizeof(int), + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }; + + hiperdispatch = hd_enabled; + rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); + if (rc < 0 || !write) + return rc; + mutex_lock(&smp_cpu_state_mutex); + if (hd_set_hiperdispatch_mode(hiperdispatch)) + topology_schedule_update(); + mutex_unlock(&smp_cpu_state_mutex); + return 0; +} + +static struct ctl_table hiperdispatch_ctl_table[] = { + { + .procname = "hiperdispatch", + .mode = 0644, + .proc_handler = hiperdispatch_ctl_handler, + }, +}; + +static ssize_t hd_steal_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%u\n", hd_steal_threshold); +} + +static ssize_t hd_steal_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + unsigned int val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc) + return rc; + if (val > 100) + return -ERANGE; + hd_steal_threshold = val; + return count; +} + +static DEVICE_ATTR_RW(hd_steal_threshold); + +static ssize_t hd_delay_factor_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%u\n", hd_delay_factor); +} + +static ssize_t hd_delay_factor_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + unsigned int val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc) + return rc; + if (!val) + return -ERANGE; + hd_delay_factor = val; + return count; +} + +static DEVICE_ATTR_RW(hd_delay_factor); + +static struct attribute *hd_attrs[] = { + &dev_attr_hd_steal_threshold.attr, + &dev_attr_hd_delay_factor.attr, + NULL, +}; + +static const struct attribute_group hd_attr_group = { + .name = "hiperdispatch", + .attrs = hd_attrs, +}; + +static int hd_greedy_time_get(void *unused, u64 *val) +{ + mutex_lock(&hd_counter_mutex); + hd_update_times(); + *val = hd_high_time; + mutex_unlock(&hd_counter_mutex); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hd_greedy_time_fops, hd_greedy_time_get, NULL, "%llu\n"); + +static int hd_conservative_time_get(void *unused, u64 *val) +{ + mutex_lock(&hd_counter_mutex); + hd_update_times(); + *val = hd_low_time; + mutex_unlock(&hd_counter_mutex); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hd_conservative_time_fops, hd_conservative_time_get, NULL, "%llu\n"); + +static int hd_adjustment_count_get(void *unused, u64 *val) +{ + *val = atomic64_read(&hd_adjustments); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hd_adjustments_fops, hd_adjustment_count_get, NULL, "%llu\n"); + +static void __init hd_create_debugfs_counters(void) +{ + struct dentry *dir; + + dir = debugfs_create_dir("hiperdispatch", arch_debugfs_dir); + debugfs_create_file("conservative_time_ms", 0400, dir, NULL, &hd_conservative_time_fops); + debugfs_create_file("greedy_time_ms", 0400, dir, NULL, &hd_greedy_time_fops); + debugfs_create_file("adjustment_count", 0400, dir, NULL, &hd_adjustments_fops); +} + +static void __init hd_create_attributes(void) +{ + struct device *dev; + + dev = bus_get_dev_root(&cpu_subsys); + if (!dev) + return; + if (sysfs_create_group(&dev->kobj, &hd_attr_group)) + pr_warn("Unable to create hiperdispatch attribute group\n"); + put_device(dev); +} + +static int __init hd_init(void) +{ + if (IS_ENABLED(CONFIG_HIPERDISPATCH_ON)) { + hd_set_hiperdispatch_mode(1); + topology_schedule_update(); + } + if (!register_sysctl("s390", hiperdispatch_ctl_table)) + pr_warn("Failed to register s390.hiperdispatch sysctl attribute\n"); + hd_create_debugfs_counters(); + hd_create_attributes(); + return 0; +} +late_initcall(hd_init); diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1af5a08d72abe4..2639a3d12736a8 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -76,6 +76,7 @@ static const struct irq_class irqclass_sub_desc[] = { {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"}, + {.irq = IRQEXT_WTI, .name = "WTI", .desc = "[EXT] Warning Track"}, {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"}, diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 05c83505e97990..6295faf0987d86 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -152,7 +153,12 @@ void arch_arm_kprobe(struct kprobe *p) { struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; - stop_machine_cpuslocked(swap_instruction, &args, NULL); + if (MACHINE_HAS_SEQ_INSN) { + swap_instruction(&args); + text_poke_sync(); + } else { + stop_machine_cpuslocked(swap_instruction, &args, NULL); + } } NOKPROBE_SYMBOL(arch_arm_kprobe); @@ -160,7 +166,12 @@ void arch_disarm_kprobe(struct kprobe *p) { struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; - stop_machine_cpuslocked(swap_instruction, &args, NULL); + if (MACHINE_HAS_SEQ_INSN) { + swap_instruction(&args); + text_poke_sync(); + } else { + stop_machine_cpuslocked(swap_instruction, &args, NULL); + } } NOKPROBE_SYMBOL(arch_disarm_kprobe); diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index ae4d4fd9afcd1c..7e267ef63a7fef 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -9,6 +9,7 @@ #include #include #include +#include #define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) @@ -88,7 +89,7 @@ SYM_CODE_START(ftrace_caller) SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_common) -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op lgrl %r1,ftrace_func @@ -115,7 +116,7 @@ SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL) .Lftrace_graph_caller_end: #endif lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15) -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15) locgrz %r1,%r0 #else diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c index 23ab9f02f27873..ddc1448ea2e112 100644 --- a/arch/s390/kernel/numa.c +++ b/arch/s390/kernel/numa.c @@ -14,9 +14,6 @@ #include #include -struct pglist_data *node_data[MAX_NUMNODES]; -EXPORT_SYMBOL(node_data); - void __init numa_setup(void) { int nid; diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 6968be98af1179..e2e0aa463fbd1e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -22,6 +22,10 @@ #include #include +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ +#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ + enum cpumf_ctr_set { CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ @@ -1694,7 +1698,6 @@ static const struct file_operations cfset_fops = { .release = cfset_release, .unlocked_ioctl = cfset_ioctl, .compat_ioctl = cfset_ioctl, - .llseek = no_llseek }; static struct miscdevice cfset_dev = { diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 736c1d9632dd55..5b765e3ccf0cad 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -24,6 +24,22 @@ #include #include +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ + +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) + /* Minimum number of sample-data-block-tables: * At least one table is required for the sampling buffer structure. * A single table contains up to 511 pointers to sample-data-blocks. @@ -113,17 +129,6 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, return USEC_PER_SEC * qsi->cpu_speed / rate; } -/* Return TOD timestamp contained in an trailer entry */ -static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) -{ - /* TOD in STCKE format */ - if (te->header.t) - return *((unsigned long long *)&te->timestamp[1]); - - /* TOD in STCK format */ - return *((unsigned long long *)&te->timestamp[0]); -} - /* Return pointer to trailer entry of an sample data block */ static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v) { @@ -154,12 +159,12 @@ static inline unsigned long *get_next_sdbt(unsigned long *s) /* * sf_disable() - Switch off sampling facility */ -static int sf_disable(void) +static void sf_disable(void) { struct hws_lsctl_request_block sreq; memset(&sreq, 0, sizeof(sreq)); - return lsctl(&sreq); + lsctl(&sreq); } /* @@ -208,8 +213,6 @@ static void free_sampling_buffer(struct sf_buffer *sfb) } } - debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, - (unsigned long)sfb->sdbt); memset(sfb, 0, sizeof(*sfb)); } @@ -265,10 +268,8 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, * the sampling buffer origin. */ if (sfb->sdbt != get_next_sdbt(tail)) { - debug_sprintf_event(sfdbg, 3, "%s: " - "sampling buffer is not linked: origin %#lx" - " tail %#lx\n", __func__, - (unsigned long)sfb->sdbt, + debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n", + __func__, (unsigned long)sfb->sdbt, (unsigned long)tail); return -EINVAL; } @@ -318,9 +319,6 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, *tail = virt_to_phys(sfb->sdbt) + 1; sfb->tail = tail; - debug_sprintf_event(sfdbg, 4, "%s: new buffer" - " settings: sdbt %lu sdb %lu\n", __func__, - sfb->num_sdbt, sfb->num_sdb); return rc; } @@ -357,15 +355,8 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) /* Allocate requested number of sample-data-blocks */ rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); - if (rc) { + if (rc) free_sampling_buffer(sfb); - debug_sprintf_event(sfdbg, 4, "%s: " - "realloc_sampling_buffer failed with rc %i\n", - __func__, rc); - } else - debug_sprintf_event(sfdbg, 4, - "%s: tear %#lx dear %#lx\n", __func__, - (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); return rc; } @@ -377,8 +368,8 @@ static void sfb_set_limits(unsigned long min, unsigned long max) CPUM_SF_MAX_SDB = max; memset(&si, 0, sizeof(si)); - if (!qsi(&si)) - CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); + qsi(&si); + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); } static unsigned long sfb_max_limit(struct hw_perf_event *hwc) @@ -397,12 +388,6 @@ static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, return 0; } -static int sfb_has_pending_allocs(struct sf_buffer *sfb, - struct hw_perf_event *hwc) -{ - return sfb_pending_allocs(sfb, hwc) > 0; -} - static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) { /* Limit the number of SDBs to not exceed the maximum */ @@ -426,7 +411,6 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw) static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) { unsigned long n_sdb, freq; - size_t sample_size; /* Calculate sampling buffers using 4K pages * @@ -457,7 +441,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up * to 511 SDBs). */ - sample_size = sizeof(struct hws_basic_entry); freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000); @@ -473,12 +456,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) if (sf_buffer_available(cpuhw)) return 0; - debug_sprintf_event(sfdbg, 3, - "%s: rate %lu f %lu sdb %lu/%lu" - " sample_size %lu cpuhw %p\n", __func__, - SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), - sample_size, cpuhw); - return alloc_sampling_buffer(&cpuhw->sfb, sfb_pending_allocs(&cpuhw->sfb, hwc)); } @@ -535,8 +512,6 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, if (num) sfb_account_allocs(num, hwc); - debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", - __func__, OVERFLOW_REG(hwc), ratio, num); OVERFLOW_REG(hwc) = 0; } @@ -554,13 +529,11 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, static void extend_sampling_buffer(struct sf_buffer *sfb, struct hw_perf_event *hwc) { - unsigned long num, num_old; - int rc; + unsigned long num; num = sfb_pending_allocs(sfb, hwc); if (!num) return; - num_old = sfb->num_sdb; /* Disable the sampling facility to reset any states and also * clear pending measurement alerts. @@ -572,51 +545,33 @@ static void extend_sampling_buffer(struct sf_buffer *sfb, * called by perf. Because this is a reallocation, it is fine if the * new SDB-request cannot be satisfied immediately. */ - rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); - if (rc) - debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", - __func__, rc); - - if (sfb_has_pending_allocs(sfb, hwc)) - debug_sprintf_event(sfdbg, 5, "%s: " - "req %lu alloc %lu remaining %lu\n", - __func__, num, sfb->num_sdb - num_old, - sfb_pending_allocs(sfb, hwc)); + realloc_sampling_buffer(sfb, num, GFP_ATOMIC); } /* Number of perf events counting hardware events */ -static atomic_t num_events; +static refcount_t num_events; /* Used to avoid races in calling reserve/release_cpumf_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); #define PMC_INIT 0 #define PMC_RELEASE 1 -#define PMC_FAILURE 2 static void setup_pmc_cpu(void *flags) { - struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); - int err = 0; + struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); switch (*((int *)flags)) { case PMC_INIT: - memset(cpusf, 0, sizeof(*cpusf)); - err = qsi(&cpusf->qsi); - if (err) - break; - cpusf->flags |= PMU_F_RESERVED; - err = sf_disable(); + memset(cpuhw, 0, sizeof(*cpuhw)); + qsi(&cpuhw->qsi); + cpuhw->flags |= PMU_F_RESERVED; + sf_disable(); break; case PMC_RELEASE: - cpusf->flags &= ~PMU_F_RESERVED; - err = sf_disable(); - if (!err) - deallocate_buffers(cpusf); + cpuhw->flags &= ~PMU_F_RESERVED; + sf_disable(); + deallocate_buffers(cpuhw); break; } - if (err) { - *((int *)flags) |= PMC_FAILURE; - pr_err("Switching off the sampling facility failed with rc %i\n", err); - } } static void release_pmc_hardware(void) @@ -627,27 +582,19 @@ static void release_pmc_hardware(void) on_each_cpu(setup_pmc_cpu, &flags, 1); } -static int reserve_pmc_hardware(void) +static void reserve_pmc_hardware(void) { int flags = PMC_INIT; on_each_cpu(setup_pmc_cpu, &flags, 1); - if (flags & PMC_FAILURE) { - release_pmc_hardware(); - return -ENODEV; - } irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); - - return 0; } static void hw_perf_event_destroy(struct perf_event *event) { /* Release PMC if this is the last perf event */ - if (!atomic_add_unless(&num_events, -1, 1)) { - mutex_lock(&pmc_reserve_mutex); - if (atomic_dec_return(&num_events) == 0) - release_pmc_hardware(); + if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) { + release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } @@ -751,9 +698,6 @@ static unsigned long getrate(bool freq, unsigned long sample, */ if (sample_rate_to_freq(si, rate) > sysctl_perf_event_sample_rate) { - debug_sprintf_event(sfdbg, 1, "%s: " - "Sampling rate exceeds maximum " - "perf sample rate\n", __func__); rate = 0; } } @@ -798,9 +742,6 @@ static int __hw_perf_event_init_rate(struct perf_event *event, attr->sample_period = rate; SAMPL_RATE(hwc) = rate; hw_init_period(hwc, SAMPL_RATE(hwc)); - debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", - __func__, event->cpu, event->attr.sample_period, - event->attr.freq, SAMPLE_FREQ_MODE(hwc)); return 0; } @@ -810,23 +751,17 @@ static int __hw_perf_event_init(struct perf_event *event) struct hws_qsi_info_block si; struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; - int cpu, err; + int cpu, err = 0; /* Reserve CPU-measurement sampling facility */ - err = 0; - if (!atomic_inc_not_zero(&num_events)) { - mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) - err = -EBUSY; - else - atomic_inc(&num_events); - mutex_unlock(&pmc_reserve_mutex); + mutex_lock(&pmc_reserve_mutex); + if (!refcount_inc_not_zero(&num_events)) { + reserve_pmc_hardware(); + refcount_set(&num_events, 1); } + mutex_unlock(&pmc_reserve_mutex); event->destroy = hw_perf_event_destroy; - if (err) - goto out; - /* Access per-CPU sampling information (query sampling info) */ /* * The event->cpu value can be -1 to count on every CPU, for example, @@ -838,9 +773,9 @@ static int __hw_perf_event_init(struct perf_event *event) */ memset(&si, 0, sizeof(si)); cpuhw = NULL; - if (event->cpu == -1) + if (event->cpu == -1) { qsi(&si); - else { + } else { /* Event is pinned to a particular CPU, retrieve the per-CPU * sampling structure for accessing the CPU-specific QSI. */ @@ -881,10 +816,6 @@ static int __hw_perf_event_init(struct perf_event *event) if (err) goto out; - /* Initialize sample data overflow accounting */ - hwc->extra_reg.reg = REG_OVERFLOW; - OVERFLOW_REG(hwc) = 0; - /* Use AUX buffer. No need to allocate it by ourself */ if (attr->config == PERF_EVENT_CPUM_SF_DIAG) return 0; @@ -1007,7 +938,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu) extend_sampling_buffer(&cpuhw->sfb, hwc); } /* Rate may be adjusted with ioctl() */ - cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); + cpuhw->lsctl.interval = SAMPL_RATE(hwc); } /* (Re)enable the PMU and sampling facility */ @@ -1023,12 +954,6 @@ static void cpumsf_pmu_enable(struct pmu *pmu) /* Load current program parameter */ lpp(&get_lowcore()->lpp); - - debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " - "interval %#lx tear %#lx dear %#lx\n", __func__, - cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, - cpuhw->lsctl.cd, cpuhw->lsctl.interval, - cpuhw->lsctl.tear, cpuhw->lsctl.dear); } static void cpumsf_pmu_disable(struct pmu *pmu) @@ -1055,21 +980,18 @@ static void cpumsf_pmu_disable(struct pmu *pmu) return; } - /* Save state of TEAR and DEAR register contents */ - err = qsi(&si); - if (!err) { - /* TEAR/DEAR values are valid only if the sampling facility is - * enabled. Note that cpumsf_pmu_disable() might be called even - * for a disabled sampling facility because cpumsf_pmu_enable() - * controls the enable/disable state. - */ - if (si.es) { - cpuhw->lsctl.tear = si.tear; - cpuhw->lsctl.dear = si.dear; - } - } else - debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", - __func__, err); + /* + * Save state of TEAR and DEAR register contents. + * TEAR/DEAR values are valid only if the sampling facility is + * enabled. Note that cpumsf_pmu_disable() might be called even + * for a disabled sampling facility because cpumsf_pmu_enable() + * controls the enable/disable state. + */ + qsi(&si); + if (si.es) { + cpuhw->lsctl.tear = si.tear; + cpuhw->lsctl.dear = si.dear; + } cpuhw->flags &= ~PMU_F_ENABLED; } @@ -1235,11 +1157,6 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, /* Count discarded samples */ *overflow += 1; } else { - debug_sprintf_event(sfdbg, 4, - "%s: Found unknown" - " sampling data entry: te->f %i" - " basic.def %#4x (%p)\n", __func__, - te->header.f, sample->def, sample); /* Sample slot is not yet written or other record. * * This condition can occur if the buffer was reused @@ -1284,7 +1201,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) * AUX buffer is used when in diagnostic sampling mode. * No perf events/samples are created. */ - if (SAMPL_DIAG_MODE(&event->hw)) + if (SAMPL_DIAG_MODE(hwc)) return; sdbt = (unsigned long *)TEAR_REG(hwc); @@ -1309,13 +1226,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) */ sampl_overflow += te->header.overflow; - /* Timestamps are valid for full sample-data-blocks only */ - debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx " - "overflow %llu timestamp %#llx\n", - __func__, sdb, (unsigned long)sdbt, - te->header.overflow, - (te->header.f) ? trailer_timestamp(te) : 0ULL); - /* Collect all samples from a single sample-data-block and * flag if an (perf) event overflow happened. If so, the PMU * is stopped and remaining samples will be discarded. @@ -1340,7 +1250,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) sdbt = get_next_sdbt(sdbt); /* Update event hardware registers */ - TEAR_REG(hwc) = (unsigned long) sdbt; + TEAR_REG(hwc) = (unsigned long)sdbt; /* Stop processing sample-data if all samples of the current * sample-data-block were flushed even if it was not full. @@ -1362,19 +1272,8 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) * are dropped. * Slightly increase the interval to avoid hitting this limit. */ - if (event_overflow) { + if (event_overflow) SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); - debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", - __func__, - DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); - } - - if (sampl_overflow || event_overflow) - debug_sprintf_event(sfdbg, 4, "%s: " - "overflows: sample %llu event %llu" - " total %llu num_sdb %llu\n", - __func__, sampl_overflow, event_overflow, - OVERFLOW_REG(hwc), num_sdb); } static inline unsigned long aux_sdb_index(struct aux_buffer *aux, @@ -1442,9 +1341,6 @@ static void aux_output_end(struct perf_output_handle *handle) /* Remove alert indicators in the buffer */ te = aux_sdb_trailer(aux, aux->alert_mark); te->header.a = 0; - - debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", - __func__, i, range_scan, aux->head); } /* @@ -1463,7 +1359,7 @@ static int aux_output_begin(struct perf_output_handle *handle, unsigned long range, i, range_scan, idx, head, base, offset; struct hws_trailer_entry *te; - if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) + if (handle->head & ~PAGE_MASK) return -EINVAL; aux->head = handle->head >> PAGE_SHIFT; @@ -1475,10 +1371,6 @@ static int aux_output_begin(struct perf_output_handle *handle, * SDBs between aux->head and aux->empty_mark are already ready * for new data. range_scan is num of SDBs not within them. */ - debug_sprintf_event(sfdbg, 6, - "%s: range %ld head %ld alert %ld empty %ld\n", - __func__, range, aux->head, aux->alert_mark, - aux->empty_mark); if (range > aux_sdb_num_empty(aux)) { range_scan = range - aux_sdb_num_empty(aux); idx = aux->empty_mark + 1; @@ -1504,12 +1396,6 @@ static int aux_output_begin(struct perf_output_handle *handle, cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long); cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]); - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld " - "index %ld tear %#lx dear %#lx\n", __func__, - aux->head, aux->alert_mark, aux->empty_mark, - head / CPUM_SF_SDB_PER_TABLE, - cpuhw->lsctl.tear, cpuhw->lsctl.dear); - return 0; } @@ -1571,14 +1457,11 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, unsigned long long *overflow) { - unsigned long i, range_scan, idx, idx_old; union hws_trailer_header old, prev, new; + unsigned long i, range_scan, idx; unsigned long long orig_overflow; struct hws_trailer_entry *te; - debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " - "empty %ld\n", __func__, range, aux->head, - aux->alert_mark, aux->empty_mark); if (range <= aux_sdb_num_empty(aux)) /* * No need to scan. All SDBs in range are marked as empty. @@ -1601,7 +1484,7 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, * indicator fall into this range, set it. */ range_scan = range - aux_sdb_num_empty(aux); - idx_old = idx = aux->empty_mark + 1; + idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); prev.val = READ_ONCE_ALIGNED_128(te->header.val); @@ -1623,9 +1506,6 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, /* Update empty_mark to new position */ aux->empty_mark = aux->head + range - 1; - debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld " - "empty %ld\n", __func__, range_scan, idx_old, - idx - 1, aux->empty_mark); return true; } @@ -1642,12 +1522,12 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) unsigned long num_sdb; aux = perf_get_aux(handle); - if (WARN_ON_ONCE(!aux)) + if (!aux) return; /* Inform user space new data arrived */ size = aux_sdb_num_alert(aux) << PAGE_SHIFT; - debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__, + debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__, size >> PAGE_SHIFT); perf_aux_output_end(handle, size); @@ -1661,7 +1541,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) num_sdb); break; } - if (WARN_ON_ONCE(!aux)) + if (!aux) return; /* Update head and alert_mark to new position */ @@ -1681,23 +1561,11 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) perf_aux_output_end(&cpuhw->handle, size); pr_err("Sample data caused the AUX buffer with %lu " "pages to overflow\n", aux->sfb.num_sdb); - debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld " - "overflow %lld\n", __func__, - aux->head, range, overflow); } else { size = aux_sdb_num_alert(aux) << PAGE_SHIFT; perf_aux_output_end(&cpuhw->handle, size); - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " - "already full, try another\n", - __func__, - aux->head, aux->alert_mark); } } - - if (done) - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " - "empty %ld\n", __func__, aux->head, - aux->alert_mark, aux->empty_mark); } /* @@ -1719,8 +1587,6 @@ static void aux_buffer_free(void *data) kfree(aux->sdbt_index); kfree(aux->sdb_index); kfree(aux); - - debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt); } static void aux_sdb_init(unsigned long sdb) @@ -1828,9 +1694,6 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, */ aux->empty_mark = sfb->num_sdb - 1; - debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__, - sfb->num_sdbt, sfb->num_sdb); - return aux; no_sdbt: @@ -1863,8 +1726,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) memset(&si, 0, sizeof(si)); if (event->cpu == -1) { - if (qsi(&si)) - return -ENODEV; + qsi(&si); } else { /* Event is pinned to a particular CPU, retrieve the per-CPU * sampling structure for accessing the CPU-specific QSI. @@ -1874,7 +1736,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) si = cpuhw->qsi; } - do_freq = !!SAMPLE_FREQ_MODE(&event->hw); + do_freq = !!SAMPL_FREQ_MODE(&event->hw); rate = getrate(do_freq, value, &si); if (!rate) return -EINVAL; @@ -1882,10 +1744,6 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) event->attr.sample_period = rate; SAMPL_RATE(&event->hw) = rate; hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); - debug_sprintf_event(sfdbg, 4, "%s:" - " cpu %d value %#llx period %#llx freq %d\n", - __func__, event->cpu, value, - event->attr.sample_period, do_freq); return 0; } @@ -1896,12 +1754,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags) { struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + if (!(event->hw.state & PERF_HES_STOPPED)) return; - - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); - perf_pmu_disable(event->pmu); event->hw.state = 0; cpuhw->lsctl.cs = 1; @@ -1936,7 +1790,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); struct aux_buffer *aux; - int err; + int err = 0; if (cpuhw->flags & PMU_F_IN_USE) return -EAGAIN; @@ -1944,7 +1798,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags) if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) return -EINVAL; - err = 0; perf_pmu_disable(event->pmu); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; @@ -2115,7 +1968,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, /* Report measurement alerts only for non-PRA codes */ if (alert != CPU_MF_INT_SF_PRA) - debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, + debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__, alert); /* Sampling authorization change request */ @@ -2143,7 +1996,7 @@ static int cpusf_pmu_setup(unsigned int cpu, int flags) /* Ignore the notification if no events are scheduled on the PMU. * This might be racy... */ - if (!atomic_read(&num_events)) + if (!refcount_read(&num_events)) return 0; local_irq_disable(); @@ -2205,10 +2058,12 @@ static const struct kernel_param_ops param_ops_sfb_size = { .get = param_get_sfb_size, }; -#define RS_INIT_FAILURE_QSI 0x0001 -#define RS_INIT_FAILURE_BSDES 0x0002 -#define RS_INIT_FAILURE_ALRT 0x0003 -#define RS_INIT_FAILURE_PERF 0x0004 +enum { + RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */ + RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */ + RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */ +}; + static void __init pr_cpumsf_err(unsigned int reason) { pr_err("Sampling facility support for perf is not available: " @@ -2224,11 +2079,7 @@ static int __init init_cpum_sampling_pmu(void) return -ENODEV; memset(&si, 0, sizeof(si)); - if (qsi(&si)) { - pr_cpumsf_err(RS_INIT_FAILURE_QSI); - return -ENODEV; - } - + qsi(&si); if (!si.as && !si.ad) return -ENODEV; diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 2f5a20e300f6f3..fa732545426611 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -738,6 +738,22 @@ static const char * const paicrypt_ctrnames[] = { [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", [155] = "IBM_RESERVED_155", [156] = "IBM_RESERVED_156", + [157] = "KM_FULL_XTS_AES_128", + [158] = "KM_FULL_XTS_AES_256", + [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", + [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", + [161] = "KMAC_HMAC_SHA_224", + [162] = "KMAC_HMAC_SHA_256", + [163] = "KMAC_HMAC_SHA_384", + [164] = "KMAC_HMAC_SHA_512", + [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", + [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", + [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", + [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", + [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", + [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", + [171] = "PCKMO_ENCRYPT_AES_XTS_128", + [172] = "PCKMO_ENCRYPT_AES_XTS_256", }; static void __init attr_event_free(struct attribute **attrs, int num) diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index 6295531b39a209..7f462bef1fc075 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -635,6 +635,15 @@ static const char * const paiext_ctrnames[] = { [25] = "NNPA_1MFRAME", [26] = "NNPA_2GFRAME", [27] = "NNPA_ACCESSEXCEPT", + [28] = "NNPA_TRANSFORM", + [29] = "NNPA_GELU", + [30] = "NNPA_MOMENTS", + [31] = "NNPA_LAYERNORM", + [32] = "NNPA_MATMUL_OP_BCAST1", + [33] = "NNPA_SQRT", + [34] = "NNPA_INVSQRT", + [35] = "NNPA_NORM", + [36] = "NNPA_REDUCE", }; static void __init attr_event_free(struct attribute **attrs, int num) diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6c2cb345402ff6..e48013cd832c16 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -30,9 +30,9 @@ #include #include #include +#include #include #include -#include #include "entry.h" /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index fbba37ec53cf7d..4df56fdb248807 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -671,6 +671,25 @@ int smp_cpu_get_polarization(int cpu) return per_cpu(pcpu_devices, cpu).polarization; } +void smp_cpu_set_capacity(int cpu, unsigned long val) +{ + per_cpu(pcpu_devices, cpu).capacity = val; +} + +unsigned long smp_cpu_get_capacity(int cpu) +{ + return per_cpu(pcpu_devices, cpu).capacity; +} + +void smp_set_core_capacity(int cpu, unsigned long val) +{ + int i; + + cpu = smp_get_base_cpu(cpu); + for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) + smp_cpu_set_capacity(i, val); +} + int smp_cpu_get_cpu_address(int cpu) { return per_cpu(pcpu_devices, cpu).address; @@ -719,6 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, else pcpu->state = CPU_STATE_STANDBY; smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); set_cpu_present(cpu, true); if (!early && arch_register_cpu(cpu)) set_cpu_present(cpu, false); @@ -961,6 +981,7 @@ void __init smp_prepare_boot_cpu(void) ipl_pcpu->state = CPU_STATE_CONFIGURED; lc->pcpu = (unsigned long)ipl_pcpu; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); + smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH); } void __init smp_setup_processor_id(void) diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 640363b2a1059e..9f59837d159e0c 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, { arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); } - -unsigned long return_address(unsigned int n) -{ - struct unwind_state state; - unsigned long addr; - - /* Increment to skip current stack entry */ - n++; - - unwind_for_each_frame(&state, NULL, NULL, 0) { - addr = unwind_get_return_address(&state); - if (!addr) - break; - if (!n--) - return addr; - } - return 0; -} -EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 2be30a96696a38..88055f58fbdab5 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -498,7 +498,6 @@ static const struct file_operations stsi_##fc##_##s1##_##s2##_fs_ops = { \ .open = stsi_open_##fc##_##s1##_##s2, \ .release = stsi_release, \ .read = stsi_read, \ - .llseek = no_llseek, \ }; static int stsi_release(struct inode *inode, struct file *file) diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 22029ecae1c554..813e5da9a9737e 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #define PTF_HORIZONTAL (0UL) @@ -47,6 +48,7 @@ static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; static void set_topology_timer(void); static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; +static int cpu_management; static DECLARE_WORK(topology_work, topology_work_fn); @@ -144,6 +146,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core, cpumask_set_cpu(cpu, &book->mask); cpumask_set_cpu(cpu, &socket->mask); smp_cpu_set_polarization(cpu, tl_core->pp); + smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); } } } @@ -270,6 +273,7 @@ void update_cpu_masks(void) topo->drawer_id = id; } } + hd_reset_state(); for_each_online_cpu(cpu) { topo = &cpu_topology[cpu]; pkg_first = cpumask_first(&topo->core_mask); @@ -278,8 +282,10 @@ void update_cpu_masks(void) for_each_cpu(sibling, &topo->core_mask) { topo_sibling = &cpu_topology[sibling]; smt_first = cpumask_first(&topo_sibling->thread_mask); - if (sibling == smt_first) + if (sibling == smt_first) { topo_package->booted_cores++; + hd_add_core(sibling); + } } } else { topo->booted_cores = topo_package->booted_cores; @@ -303,8 +309,10 @@ static void __arch_update_dedicated_flag(void *arg) static int __arch_update_cpu_topology(void) { struct sysinfo_15_1_x *info = tl_info; - int rc = 0; + int rc, hd_status; + hd_status = 0; + rc = 0; mutex_lock(&smp_cpu_state_mutex); if (MACHINE_HAS_TOPOLOGY) { rc = 1; @@ -314,7 +322,11 @@ static int __arch_update_cpu_topology(void) update_cpu_masks(); if (!MACHINE_HAS_TOPOLOGY) topology_update_polarization_simple(); + if (cpu_management == 1) + hd_status = hd_enable_hiperdispatch(); mutex_unlock(&smp_cpu_state_mutex); + if (hd_status == 0) + hd_disable_hiperdispatch(); return rc; } @@ -374,7 +386,24 @@ void topology_expect_change(void) set_topology_timer(); } -static int cpu_management; +static int set_polarization(int polarization) +{ + int rc = 0; + + cpus_read_lock(); + mutex_lock(&smp_cpu_state_mutex); + if (cpu_management == polarization) + goto out; + rc = topology_set_cpu_management(polarization); + if (rc) + goto out; + cpu_management = polarization; + topology_expect_change(); +out: + mutex_unlock(&smp_cpu_state_mutex); + cpus_read_unlock(); + return rc; +} static ssize_t dispatching_show(struct device *dev, struct device_attribute *attr, @@ -400,19 +429,7 @@ static ssize_t dispatching_store(struct device *dev, return -EINVAL; if (val != 0 && val != 1) return -EINVAL; - rc = 0; - cpus_read_lock(); - mutex_lock(&smp_cpu_state_mutex); - if (cpu_management == val) - goto out; - rc = topology_set_cpu_management(val); - if (rc) - goto out; - cpu_management = val; - topology_expect_change(); -out: - mutex_unlock(&smp_cpu_state_mutex); - cpus_read_unlock(); + rc = set_polarization(val); return rc ? rc : count; } static DEVICE_ATTR_RW(dispatching); @@ -624,12 +641,37 @@ static int topology_ctl_handler(const struct ctl_table *ctl, int write, return rc; } +static int polarization_ctl_handler(const struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int polarization; + int rc; + struct ctl_table ctl_entry = { + .procname = ctl->procname, + .data = &polarization, + .maxlen = sizeof(int), + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }; + + polarization = cpu_management; + rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); + if (rc < 0 || !write) + return rc; + return set_polarization(polarization); +} + static struct ctl_table topology_ctl_table[] = { { .procname = "topology", .mode = 0644, .proc_handler = topology_ctl_handler, }, + { + .procname = "polarization", + .mode = 0644, + .proc_handler = polarization_ctl_handler, + }, }; static int __init topology_init(void) @@ -642,6 +684,8 @@ static int __init topology_init(void) set_topology_timer(); else topology_update_polarization_simple(); + if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL)) + set_polarization(1); register_sysctl("s390", topology_ctl_table); dev_root = bus_get_dev_root(&cpu_subsys); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 36db065c7cf75f..9646f773208a11 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -462,9 +463,9 @@ EXPORT_SYMBOL_GPL(gmap_convert_to_secure); int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) { struct vm_area_struct *vma; + struct folio_walk fw; unsigned long uaddr; struct folio *folio; - struct page *page; int rc; rc = -EFAULT; @@ -483,11 +484,15 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) goto out; rc = 0; - /* we take an extra reference here */ - page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET); - if (IS_ERR_OR_NULL(page)) + folio = folio_walk_start(&fw, vma, uaddr, 0); + if (!folio) goto out; - folio = page_folio(page); + /* + * See gmap_make_secure(): large folios cannot be secure. Small + * folio implies FW_LEVEL_PTE. + */ + if (folio_test_large(folio) || !pte_write(fw.pte)) + goto out_walk_end; rc = uv_destroy_folio(folio); /* * Fault handlers can race; it is possible that two CPUs will fault @@ -500,7 +505,8 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) */ if (rc) rc = uv_convert_from_secure_folio(folio); - folio_put(folio); +out_walk_end: + folio_walk_end(&fw, vma); out: mmap_read_unlock(gmap->mm); return rc; @@ -548,11 +554,6 @@ int arch_make_folio_accessible(struct folio *folio) } EXPORT_SYMBOL_GPL(arch_make_folio_accessible); -int arch_make_page_accessible(struct page *page) -{ - return arch_make_folio_accessible(page_folio(page)); -} -EXPORT_SYMBOL_GPL(arch_make_page_accessible); static ssize_t uv_query_facilities(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 2f967ac2b8e3e4..598b512cde012b 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -12,12 +12,15 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include #include extern char vdso64_start[], vdso64_end[]; @@ -29,12 +32,6 @@ static union vdso_data_store vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = vdso_data_store.data; -enum vvar_pages { - VVAR_DATA_PAGE_OFFSET, - VVAR_TIMENS_PAGE_OFFSET, - VVAR_NR_PAGES, -}; - #ifdef CONFIG_TIME_NS struct vdso_data *arch_get_vdso_data(void *vvar_page) { @@ -250,8 +247,25 @@ static struct page ** __init vdso_setup_pages(void *start, void *end) return pagelist; } +static void vdso_apply_alternatives(void) +{ + const struct elf64_shdr *alt, *shdr; + struct alt_instr *start, *end; + const struct elf64_hdr *hdr; + + hdr = (struct elf64_hdr *)vdso64_start; + shdr = (void *)hdr + hdr->e_shoff; + alt = find_section(hdr, shdr, ".altinstructions"); + if (!alt) + return; + start = (void *)hdr + alt->sh_offset; + end = (void *)hdr + alt->sh_offset + alt->sh_size; + apply_alternatives(start, end); +} + static int __init vdso_init(void) { + vdso_apply_alternatives(); vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end); if (IS_ENABLED(CONFIG_COMPAT)) vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end); diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index ba19c0ca7c87af..37bb4b76122975 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -3,12 +3,17 @@ # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile -obj-vdso64 = vdso_user_wrapper.o note.o -obj-cvdso64 = vdso64_generic.o getcpu.o +obj-vdso64 = vdso_user_wrapper.o note.o vgetrandom-chacha.o +obj-cvdso64 = vdso64_generic.o getcpu.o vgetrandom.o VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK) CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE) +CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE) CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) +ifneq ($(c-getrandom-y),) + CFLAGS_vgetrandom.o += -include $(c-getrandom-y) +endif + # Build rules targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg diff --git a/arch/s390/kernel/vdso64/vdso.h b/arch/s390/kernel/vdso64/vdso.h index 34c7a2312f9dc0..9e5397e7b590a2 100644 --- a/arch/s390/kernel/vdso64/vdso.h +++ b/arch/s390/kernel/vdso64/vdso.h @@ -10,5 +10,6 @@ int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unuse int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts); +ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len); #endif /* __ARCH_S390_KERNEL_VDSO64_VDSO_H */ diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S index 37e2a505e81dbd..753040a4b5ab27 100644 --- a/arch/s390/kernel/vdso64/vdso64.lds.S +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -4,6 +4,7 @@ * library */ +#include #include #include @@ -13,6 +14,7 @@ OUTPUT_ARCH(s390:64-bit) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); + PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif @@ -42,6 +44,10 @@ SECTIONS .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } + . = ALIGN(8); + .altinstructions : { *(.altinstructions) } + .altinstr_replacement : { *(.altinstr_replacement) } + .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr @@ -140,6 +146,7 @@ VERSION __kernel_restart_syscall; __kernel_rt_sigreturn; __kernel_sigreturn; + __kernel_getrandom; local: *; }; } diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S index e26e68675c08d6..aa06c85bcbd357 100644 --- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S +++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S @@ -13,10 +13,7 @@ * for details. */ .macro vdso_func func - .globl __kernel_\func - .type __kernel_\func,@function - __ALIGN -__kernel_\func: +SYM_FUNC_START(__kernel_\func) CFI_STARTPROC aghi %r15,-STACK_FRAME_VDSO_OVERHEAD CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD) @@ -32,7 +29,7 @@ __kernel_\func: CFI_RESTORE 15 br %r14 CFI_ENDPROC - .size __kernel_\func,.-__kernel_\func +SYM_FUNC_END(__kernel_\func) .endm vdso_func gettimeofday @@ -41,16 +38,13 @@ vdso_func clock_gettime vdso_func getcpu .macro vdso_syscall func,syscall - .globl __kernel_\func - .type __kernel_\func,@function - __ALIGN -__kernel_\func: +SYM_FUNC_START(__kernel_\func) CFI_STARTPROC svc \syscall /* Make sure we notice when a syscall returns, which shouldn't happen */ .word 0 CFI_ENDPROC - .size __kernel_\func,.-__kernel_\func +SYM_FUNC_END(__kernel_\func) .endm vdso_syscall restart_syscall,__NR_restart_syscall diff --git a/arch/s390/kernel/vdso64/vgetrandom-chacha.S b/arch/s390/kernel/vdso64/vgetrandom-chacha.S new file mode 100644 index 00000000000000..09c034c2f85312 --- /dev/null +++ b/arch/s390/kernel/vdso64/vgetrandom-chacha.S @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include + +#define STATE0 %v0 +#define STATE1 %v1 +#define STATE2 %v2 +#define STATE3 %v3 +#define COPY0 %v4 +#define COPY1 %v5 +#define COPY2 %v6 +#define COPY3 %v7 +#define BEPERM %v19 +#define TMP0 %v20 +#define TMP1 %v21 +#define TMP2 %v22 +#define TMP3 %v23 + + .section .rodata + + .balign 32 +SYM_DATA_START_LOCAL(chacha20_constants) + .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral + .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap +SYM_DATA_END(chacha20_constants) + + .text +/* + * s390 ChaCha20 implementation meant for vDSO. Produces a given positive + * number of blocks of output with nonce 0, taking an input key and 8-bytes + * counter. Does not spill to the stack. + * + * void __arch_chacha20_blocks_nostack(uint8_t *dst_bytes, + * const uint8_t *key, + * uint32_t *counter, + * size_t nblocks) + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) + CFI_STARTPROC + larl %r1,chacha20_constants + + /* COPY0 = "expand 32-byte k" */ + VL COPY0,0,,%r1 + + /* BEPERM = byte selectors for VPERM */ + ALTERNATIVE __stringify(VL BEPERM,16,,%r1), "brcl 0,0", ALT_FACILITY(148) + + /* COPY1,COPY2 = key */ + VLM COPY1,COPY2,0,%r3 + + /* COPY3 = counter || zero nonce */ + lg %r3,0(%r4) + VZERO COPY3 + VLVGG COPY3,%r3,0 + + lghi %r1,0 +.Lblock: + VLR STATE0,COPY0 + VLR STATE1,COPY1 + VLR STATE2,COPY2 + VLR STATE3,COPY3 + + lghi %r0,10 +.Ldoubleround: + /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */ + VAF STATE0,STATE0,STATE1 + VX STATE3,STATE3,STATE0 + VERLLF STATE3,STATE3,16 + + /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 12) */ + VAF STATE2,STATE2,STATE3 + VX STATE1,STATE1,STATE2 + VERLLF STATE1,STATE1,12 + + /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 8) */ + VAF STATE0,STATE0,STATE1 + VX STATE3,STATE3,STATE0 + VERLLF STATE3,STATE3,8 + + /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 7) */ + VAF STATE2,STATE2,STATE3 + VX STATE1,STATE1,STATE2 + VERLLF STATE1,STATE1,7 + + /* STATE1[0,1,2,3] = STATE1[1,2,3,0] */ + VSLDB STATE1,STATE1,STATE1,4 + /* STATE2[0,1,2,3] = STATE2[2,3,0,1] */ + VSLDB STATE2,STATE2,STATE2,8 + /* STATE3[0,1,2,3] = STATE3[3,0,1,2] */ + VSLDB STATE3,STATE3,STATE3,12 + + /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */ + VAF STATE0,STATE0,STATE1 + VX STATE3,STATE3,STATE0 + VERLLF STATE3,STATE3,16 + + /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 12) */ + VAF STATE2,STATE2,STATE3 + VX STATE1,STATE1,STATE2 + VERLLF STATE1,STATE1,12 + + /* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 8) */ + VAF STATE0,STATE0,STATE1 + VX STATE3,STATE3,STATE0 + VERLLF STATE3,STATE3,8 + + /* STATE2 += STATE3, STATE1 = rotl32(STATE1 ^ STATE2, 7) */ + VAF STATE2,STATE2,STATE3 + VX STATE1,STATE1,STATE2 + VERLLF STATE1,STATE1,7 + + /* STATE1[0,1,2,3] = STATE1[3,0,1,2] */ + VSLDB STATE1,STATE1,STATE1,12 + /* STATE2[0,1,2,3] = STATE2[2,3,0,1] */ + VSLDB STATE2,STATE2,STATE2,8 + /* STATE3[0,1,2,3] = STATE3[1,2,3,0] */ + VSLDB STATE3,STATE3,STATE3,4 + brctg %r0,.Ldoubleround + + /* OUTPUT0 = STATE0 + COPY0 */ + VAF STATE0,STATE0,COPY0 + /* OUTPUT1 = STATE1 + COPY1 */ + VAF STATE1,STATE1,COPY1 + /* OUTPUT2 = STATE2 + COPY2 */ + VAF STATE2,STATE2,COPY2 + /* OUTPUT3 = STATE3 + COPY3 */ + VAF STATE3,STATE3,COPY3 + + ALTERNATIVE \ + __stringify( \ + /* Convert STATE to little endian and store to OUTPUT */\ + VPERM TMP0,STATE0,STATE0,BEPERM; \ + VPERM TMP1,STATE1,STATE1,BEPERM; \ + VPERM TMP2,STATE2,STATE2,BEPERM; \ + VPERM TMP3,STATE3,STATE3,BEPERM; \ + VSTM TMP0,TMP3,0,%r2), \ + __stringify( \ + /* 32 bit wise little endian store to OUTPUT */ \ + VSTBRF STATE0,0,,%r2; \ + VSTBRF STATE1,16,,%r2; \ + VSTBRF STATE2,32,,%r2; \ + VSTBRF STATE3,48,,%r2; \ + brcl 0,0), \ + ALT_FACILITY(148) + + /* ++COPY3.COUNTER */ + /* alsih %r3,1 */ + .insn rilu,0xcc0a00000000,%r3,1 + alcr %r3,%r1 + VLVGG COPY3,%r3,0 + + /* OUTPUT += 64, --NBLOCKS */ + aghi %r2,64 + brctg %r5,.Lblock + + /* COUNTER = COPY3.COUNTER */ + stg %r3,0(%r4) + + /* Zero out potentially sensitive regs */ + VZERO STATE0 + VZERO STATE1 + VZERO STATE2 + VZERO STATE3 + VZERO COPY1 + VZERO COPY2 + + /* Early exit if TMP0-TMP3 have not been used */ + ALTERNATIVE "nopr", "br %r14", ALT_FACILITY(148) + + VZERO TMP0 + VZERO TMP1 + VZERO TMP2 + VZERO TMP3 + + br %r14 + CFI_ENDPROC +SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/s390/kernel/vdso64/vgetrandom.c b/arch/s390/kernel/vdso64/vgetrandom.c new file mode 100644 index 00000000000000..b5268b507fb557 --- /dev/null +++ b/arch/s390/kernel/vdso64/vgetrandom.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "vdso.h" + +ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) +{ + if (test_facility(129)) + return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); + if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags)) + return -ENOSYS; + return getrandom_syscall(buffer, len, flags); +} diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index ae5d0a9d6911bc..377b9aaf8c9248 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -191,8 +191,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) - RUNTIME_CONST(shift, d_hash_shift) - RUNTIME_CONST(ptr, dentry_hashtable) + RUNTIME_CONST_VARIABLES PERCPU_SECTION(0x100) diff --git a/arch/s390/kernel/wti.c b/arch/s390/kernel/wti.c new file mode 100644 index 00000000000000..949fdbf0e8b659 --- /dev/null +++ b/arch/s390/kernel/wti.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for warning track interruption + * + * Copyright IBM Corp. 2023 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define WTI_DBF_LEN 64 + +struct wti_debug { + unsigned long missed; + unsigned long addr; + pid_t pid; +}; + +struct wti_state { + /* debug data for s390dbf */ + struct wti_debug dbg; + /* + * Represents the real-time thread responsible to + * acknowledge the warning-track interrupt and trigger + * preliminary and postliminary precautions. + */ + struct task_struct *thread; + /* + * If pending is true, the real-time thread must be scheduled. + * If not, a wake up of that thread will remain a noop. + */ + bool pending; +}; + +static DEFINE_PER_CPU(struct wti_state, wti_state); + +static debug_info_t *wti_dbg; + +/* + * During a warning-track grace period, interrupts are disabled + * to prevent delays of the warning-track acknowledgment. + * + * Once the CPU is physically dispatched again, interrupts are + * re-enabled. + */ + +static void wti_irq_disable(void) +{ + unsigned long flags; + struct ctlreg cr6; + + local_irq_save(flags); + local_ctl_store(6, &cr6); + /* disable all I/O interrupts */ + cr6.val &= ~0xff000000UL; + local_ctl_load(6, &cr6); + local_irq_restore(flags); +} + +static void wti_irq_enable(void) +{ + unsigned long flags; + struct ctlreg cr6; + + local_irq_save(flags); + local_ctl_store(6, &cr6); + /* enable all I/O interrupts */ + cr6.val |= 0xff000000UL; + local_ctl_load(6, &cr6); + local_irq_restore(flags); +} + +static void store_debug_data(struct wti_state *st) +{ + struct pt_regs *regs = get_irq_regs(); + + st->dbg.pid = current->pid; + st->dbg.addr = 0; + if (!user_mode(regs)) + st->dbg.addr = regs->psw.addr; +} + +static void wti_interrupt(struct ext_code ext_code, + unsigned int param32, unsigned long param64) +{ + struct wti_state *st = this_cpu_ptr(&wti_state); + + inc_irq_stat(IRQEXT_WTI); + wti_irq_disable(); + store_debug_data(st); + st->pending = true; + wake_up_process(st->thread); +} + +static int wti_pending(unsigned int cpu) +{ + struct wti_state *st = per_cpu_ptr(&wti_state, cpu); + + return st->pending; +} + +static void wti_dbf_grace_period(struct wti_state *st) +{ + struct wti_debug *wdi = &st->dbg; + char buf[WTI_DBF_LEN]; + + if (wdi->addr) + snprintf(buf, sizeof(buf), "%d %pS", wdi->pid, (void *)wdi->addr); + else + snprintf(buf, sizeof(buf), "%d ", wdi->pid); + debug_text_event(wti_dbg, 2, buf); + wdi->missed++; +} + +static int wti_show(struct seq_file *seq, void *v) +{ + struct wti_state *st; + int cpu; + + cpus_read_lock(); + seq_puts(seq, " "); + for_each_online_cpu(cpu) + seq_printf(seq, "CPU%-8d", cpu); + seq_putc(seq, '\n'); + for_each_online_cpu(cpu) { + st = per_cpu_ptr(&wti_state, cpu); + seq_printf(seq, " %10lu", st->dbg.missed); + } + seq_putc(seq, '\n'); + cpus_read_unlock(); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wti); + +static void wti_thread_fn(unsigned int cpu) +{ + struct wti_state *st = per_cpu_ptr(&wti_state, cpu); + + st->pending = false; + /* + * Yield CPU voluntarily to the hypervisor. Control + * resumes when hypervisor decides to dispatch CPU + * to this LPAR again. + */ + if (diag49c(DIAG49C_SUBC_ACK)) + wti_dbf_grace_period(st); + wti_irq_enable(); +} + +static struct smp_hotplug_thread wti_threads = { + .store = &wti_state.thread, + .thread_should_run = wti_pending, + .thread_fn = wti_thread_fn, + .thread_comm = "cpuwti/%u", + .selfparking = false, +}; + +static int __init wti_init(void) +{ + struct sched_param wti_sched_param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct dentry *wti_dir; + struct wti_state *st; + int cpu, rc; + + rc = -EOPNOTSUPP; + if (!sclp.has_wti) + goto out; + rc = smpboot_register_percpu_thread(&wti_threads); + if (WARN_ON(rc)) + goto out; + for_each_online_cpu(cpu) { + st = per_cpu_ptr(&wti_state, cpu); + sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param); + } + rc = register_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt); + if (rc) { + pr_warn("Couldn't request external interrupt 0x1007\n"); + goto out_thread; + } + irq_subclass_register(IRQ_SUBCLASS_WARNING_TRACK); + rc = diag49c(DIAG49C_SUBC_REG); + if (rc) { + pr_warn("Failed to register warning track interrupt through DIAG 49C\n"); + rc = -EOPNOTSUPP; + goto out_subclass; + } + wti_dir = debugfs_create_dir("wti", arch_debugfs_dir); + debugfs_create_file("stat", 0400, wti_dir, NULL, &wti_fops); + wti_dbg = debug_register("wti", 1, 1, WTI_DBF_LEN); + if (!wti_dbg) { + rc = -ENOMEM; + goto out_debug_register; + } + rc = debug_register_view(wti_dbg, &debug_hex_ascii_view); + if (rc) + goto out_debug_register; + goto out; +out_debug_register: + debug_unregister(wti_dbg); +out_subclass: + irq_subclass_unregister(IRQ_SUBCLASS_WARNING_TRACK); + unregister_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt); +out_thread: + smpboot_unregister_percpu_thread(&wti_threads); +out: + return rc; +} +late_initcall(wti_init); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0fd96860fc4586..bb7134faaebff3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -348,20 +348,29 @@ static inline int plo_test_bit(unsigned char nr) return cc == 0; } -static __always_inline void __insn32_query(unsigned int opcode, u8 *query) +static __always_inline void __sortl_query(u8 (*query)[32]) { asm volatile( " lghi 0,0\n" - " lgr 1,%[query]\n" + " la 1,%[query]\n" /* Parameter registers are ignored */ - " .insn rrf,%[opc] << 16,2,4,6,0\n" + " .insn rre,0xb9380000,2,4\n" + : [query] "=R" (*query) : - : [query] "d" ((unsigned long)query), [opc] "i" (opcode) - : "cc", "memory", "0", "1"); + : "cc", "0", "1"); } -#define INSN_SORTL 0xb938 -#define INSN_DFLTCC 0xb939 +static __always_inline void __dfltcc_query(u8 (*query)[32]) +{ + asm volatile( + " lghi 0,0\n" + " la 1,%[query]\n" + /* Parameter registers are ignored */ + " .insn rrf,0xb9390000,2,4,6,0\n" + : [query] "=R" (*query) + : + : "cc", "0", "1"); +} static void __init kvm_s390_cpu_feat_init(void) { @@ -415,10 +424,10 @@ static void __init kvm_s390_cpu_feat_init(void) kvm_s390_available_subfunc.kdsa); if (test_facility(150)) /* SORTL */ - __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl); + __sortl_query(&kvm_s390_available_subfunc.sortl); if (test_facility(151)) /* DFLTCC */ - __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc); + __dfltcc_query(&kvm_s390_available_subfunc.dfltcc); if (MACHINE_HAS_ESOP) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 75d15bf41d97c3..d01724a715d0fc 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter, (*counter)++; spin_unlock(&cmm_lock); nr--; + cond_resched(); } return nr; } -static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) +static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) { struct cmm_page_array *pa; unsigned long addr; @@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) return nr; } +static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) +{ + long inc = 0; + + while (nr) { + inc = min(256L, nr); + nr -= inc; + inc = __cmm_free_pages(inc, counter, list); + if (inc) + break; + cond_resched(); + } + return nr + inc; +} + static int cmm_oom_notify(struct notifier_block *self, unsigned long dummy, void *parm) { diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 0a67fcee44141e..fa54f3bc0c8d37 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -18,89 +18,12 @@ static unsigned long max_addr; struct addr_marker { int is_start; unsigned long start_address; + unsigned long size; const char *name; }; -enum address_markers_idx { - KVA_NR = 0, - LOWCORE_START_NR, - LOWCORE_END_NR, - AMODE31_START_NR, - AMODE31_END_NR, - KERNEL_START_NR, - KERNEL_END_NR, -#ifdef CONFIG_KFENCE - KFENCE_START_NR, - KFENCE_END_NR, -#endif - IDENTITY_START_NR, - IDENTITY_END_NR, - VMEMMAP_NR, - VMEMMAP_END_NR, - VMALLOC_NR, - VMALLOC_END_NR, -#ifdef CONFIG_KMSAN - KMSAN_VMALLOC_SHADOW_START_NR, - KMSAN_VMALLOC_SHADOW_END_NR, - KMSAN_VMALLOC_ORIGIN_START_NR, - KMSAN_VMALLOC_ORIGIN_END_NR, - KMSAN_MODULES_SHADOW_START_NR, - KMSAN_MODULES_SHADOW_END_NR, - KMSAN_MODULES_ORIGIN_START_NR, - KMSAN_MODULES_ORIGIN_END_NR, -#endif - MODULES_NR, - MODULES_END_NR, - ABS_LOWCORE_NR, - ABS_LOWCORE_END_NR, - MEMCPY_REAL_NR, - MEMCPY_REAL_END_NR, -#ifdef CONFIG_KASAN - KASAN_SHADOW_START_NR, - KASAN_SHADOW_END_NR, -#endif -}; - -static struct addr_marker address_markers[] = { - [KVA_NR] = {0, 0, "Kernel Virtual Address Space"}, - [LOWCORE_START_NR] = {1, 0, "Lowcore Start"}, - [LOWCORE_END_NR] = {0, 0, "Lowcore End"}, - [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"}, - [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"}, - [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"}, - [AMODE31_END_NR] = {0, 0, "Amode31 Area End"}, - [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"}, - [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"}, -#ifdef CONFIG_KFENCE - [KFENCE_START_NR] = {1, 0, "KFence Pool Start"}, - [KFENCE_END_NR] = {0, 0, "KFence Pool End"}, -#endif - [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"}, - [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"}, - [VMALLOC_NR] = {1, 0, "vmalloc Area Start"}, - [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"}, -#ifdef CONFIG_KMSAN - [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"}, - [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"}, - [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"}, - [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"}, - [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"}, - [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"}, - [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"}, - [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"}, -#endif - [MODULES_NR] = {1, 0, "Modules Area Start"}, - [MODULES_END_NR] = {0, 0, "Modules Area End"}, - [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"}, - [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"}, - [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"}, - [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"}, -#ifdef CONFIG_KASAN - [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"}, - [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"}, -#endif - {1, -1UL, NULL} -}; +static struct addr_marker *markers; +static unsigned int markers_cnt; struct pg_state { struct ptdump_state ptdump; @@ -173,7 +96,8 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr, unsi while (addr >= st->marker[1].start_address) { st->marker++; - pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); + pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name, + st->marker->is_start ? "Start" : "End"); } st->start_address = addr; st->current_prot = prot; @@ -202,7 +126,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, if (level == -1) addr = max_addr; if (st->level == -1) { - pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); + pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n"); note_page_update_state(st, addr, prot, level); } else if (prot != st->current_prot || level != st->level || addr >= st->marker[1].start_address) { @@ -276,7 +200,7 @@ static int ptdump_show(struct seq_file *m, void *v) .check_wx = false, .wx_pages = 0, .start_address = 0, - .marker = address_markers, + .marker = markers, }; get_online_mems(); @@ -299,10 +223,23 @@ static int ptdump_cmp(const void *a, const void *b) if (ama->start_address < amb->start_address) return -1; /* - * If the start addresses of two markers are identical consider the - * marker which defines the start of an area higher than the one which - * defines the end of an area. This keeps pairs of markers sorted. + * If the start addresses of two markers are identical sort markers in an + * order that considers areas contained within other areas correctly. */ + if (ama->is_start && amb->is_start) { + if (ama->size > amb->size) + return -1; + if (ama->size < amb->size) + return 1; + return 0; + } + if (!ama->is_start && !amb->is_start) { + if (ama->size > amb->size) + return 1; + if (ama->size < amb->size) + return -1; + return 0; + } if (ama->is_start) return 1; if (amb->is_start) @@ -310,12 +247,41 @@ static int ptdump_cmp(const void *a, const void *b) return 0; } +static int add_marker(unsigned long start, unsigned long end, const char *name) +{ + size_t oldsize, newsize; + + oldsize = markers_cnt * sizeof(*markers); + newsize = oldsize + 2 * sizeof(*markers); + if (!oldsize) + markers = kvmalloc(newsize, GFP_KERNEL); + else + markers = kvrealloc(markers, newsize, GFP_KERNEL); + if (!markers) + goto error; + markers[markers_cnt].is_start = 1; + markers[markers_cnt].start_address = start; + markers[markers_cnt].size = end - start; + markers[markers_cnt].name = name; + markers_cnt++; + markers[markers_cnt].is_start = 0; + markers[markers_cnt].start_address = end; + markers[markers_cnt].size = end - start; + markers[markers_cnt].name = name; + markers_cnt++; + return 0; +error: + markers_cnt = 0; + return -ENOMEM; +} + static int pt_dump_init(void) { #ifdef CONFIG_KFENCE unsigned long kfence_start = (unsigned long)__kfence_pool; #endif unsigned long lowcore = (unsigned long)get_lowcore(); + int rc; /* * Figure out the maximum virtual address being accessible with the @@ -324,41 +290,38 @@ static int pt_dump_init(void) */ max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); - address_markers[LOWCORE_START_NR].start_address = lowcore; - address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore); - address_markers[IDENTITY_START_NR].start_address = __identity_base; - address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size; - address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; - address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; - address_markers[MODULES_NR].start_address = MODULES_VADDR; - address_markers[MODULES_END_NR].start_address = MODULES_END; - address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore; - address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE; - address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area; - address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE; - address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; - address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size; - address_markers[VMALLOC_NR].start_address = VMALLOC_START; - address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + /* start + end markers - must be added first */ + rc = add_marker(0, -1UL, NULL); + rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image"); + rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore"); + rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping"); + rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area"); + rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area"); + rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area"); + rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area"); + rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area"); + rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area"); #ifdef CONFIG_KFENCE - address_markers[KFENCE_START_NR].start_address = kfence_start; - address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE; + rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool"); #endif #ifdef CONFIG_KMSAN - address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START; - address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END; - address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START; - address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END; - address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START; - address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END; - address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START; - address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END; + rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow"); + rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins"); + rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow"); + rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins"); +#endif +#ifdef CONFIG_KASAN + rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow"); #endif - sort(address_markers, ARRAY_SIZE(address_markers) - 1, - sizeof(address_markers[0]), ptdump_cmp, NULL); + if (rc) + goto error; + sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL); #ifdef CONFIG_PTDUMP_DEBUGFS debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); #endif /* CONFIG_PTDUMP_DEBUGFS */ return 0; +error: + kvfree(markers); + return -ENOMEM; } device_initcall(pt_dump_init); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 8e149ef5e89ba8..ad8b0d6b77ea16 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -492,9 +493,9 @@ void do_secure_storage_access(struct pt_regs *regs) union teid teid = { .val = regs->int_parm_long }; unsigned long addr = get_fault_address(regs); struct vm_area_struct *vma; + struct folio_walk fw; struct mm_struct *mm; struct folio *folio; - struct page *page; struct gmap *gmap; int rc; @@ -536,15 +537,18 @@ void do_secure_storage_access(struct pt_regs *regs) vma = find_vma(mm, addr); if (!vma) return handle_fault_error(regs, SEGV_MAPERR); - page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); - if (IS_ERR_OR_NULL(page)) { + folio = folio_walk_start(&fw, vma, addr, 0); + if (!folio) { mmap_read_unlock(mm); break; } - folio = page_folio(page); - if (arch_make_folio_accessible(folio)) - send_sig(SIGSEGV, current, 0); + /* arch_make_folio_accessible() needs a raised refcount. */ + folio_get(folio); + rc = arch_make_folio_accessible(folio); folio_put(folio); + folio_walk_end(&fw, vma); + if (rc) + send_sig(SIGSEGV, current, 0); mmap_read_unlock(mm); break; case KERNEL_FAULT: diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index e3d258f9e72610..7a96623a9d2e43 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -62,7 +62,7 @@ EXPORT_SYMBOL(zero_page_mask); static void __init setup_zero_pages(void) { - unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size()); + unsigned long total_pages = memblock_estimated_nr_free_pages(); unsigned int order; struct page *page; int i; @@ -97,7 +97,7 @@ void __init paging_init(void) vmem_map_init(); sparse_init(); - zone_dma_bits = 31; + zone_dma_limit = DMA_BIT_MASK(31); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 2067569465897b..96efa061ce01ba 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -82,7 +82,7 @@ static int get_align_mask(struct file *filp, unsigned long flags) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -117,7 +117,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index 0547a10406e72a..2c21f0394c9abe 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -3,7 +3,8 @@ # Makefile for the s390 PCI subsystem. # -obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \ +obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o \ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \ pci_bus.o pci_kvm_hook.o obj-$(CONFIG_PCI_IOV) += pci_iov.o +obj-$(CONFIG_SYSFS) += pci_sysfs.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index cff4838fad2166..bd9624c20b8020 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -587,7 +587,6 @@ int pcibios_device_add(struct pci_dev *pdev) if (pdev->is_physfn) pdev->no_vf_scan = 1; - pdev->dev.groups = zpci_attr_groups; zpci_map_resources(pdev); for (i = 0; i < PCI_STD_NUM_BARS; i++) { diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index ee90a91ed88818..6f55a59a087115 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -657,7 +657,6 @@ static const struct file_operations clp_misc_fops = { .release = clp_misc_release, .unlocked_ioctl = clp_misc_ioctl, .compat_ioctl = clp_misc_ioctl, - .llseek = no_llseek, }; static struct miscdevice clp_misc_device = { diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 5398729bfe1b77..de5c0b389a3ec8 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -118,12 +118,11 @@ static inline int __memcpy_toio_inuser(void __iomem *dst, SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, const void __user *, user_buffer, size_t, length) { + struct follow_pfnmap_args args = { }; u8 local_buf[64]; void __iomem *io_addr; void *buf; struct vm_area_struct *vma; - pte_t *ptep; - spinlock_t *ptl; long ret; if (!zpci_is_enabled()) @@ -169,11 +168,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma, mmio_addr, &ptep, &ptl); + args.address = mmio_addr; + args.vma = vma; + ret = follow_pfnmap_start(&args); if (ret) goto out_unlock_mmap; - io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) | + io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) @@ -181,7 +182,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, ret = zpci_memcpy_toio(io_addr, buf, length); out_unlock_pt: - pte_unmap_unlock(ptep, ptl); + follow_pfnmap_end(&args); out_unlock_mmap: mmap_read_unlock(current->mm); out_free: @@ -260,12 +261,11 @@ static inline int __memcpy_fromio_inuser(void __user *dst, SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, void __user *, user_buffer, size_t, length) { + struct follow_pfnmap_args args = { }; u8 local_buf[64]; void __iomem *io_addr; void *buf; struct vm_area_struct *vma; - pte_t *ptep; - spinlock_t *ptl; long ret; if (!zpci_is_enabled()) @@ -308,11 +308,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma, mmio_addr, &ptep, &ptl); + args.vma = vma; + args.address = mmio_addr; + ret = follow_pfnmap_start(&args); if (ret) goto out_unlock_mmap; - io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) | + io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { @@ -322,7 +324,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, ret = zpci_memcpy_fromio(buf, io_addr, length); out_unlock_pt: - pte_unmap_unlock(ptep, ptl); + follow_pfnmap_end(&args); out_unlock_mmap: mmap_read_unlock(current->mm); diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index 0f4f1e8fc480dc..1f81f6ff7b954b 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -197,7 +197,7 @@ static struct attribute *zpci_ident_attrs[] = { NULL, }; -static struct attribute_group zpci_ident_attr_group = { +const struct attribute_group zpci_ident_attr_group = { .attrs = zpci_ident_attrs, .is_visible = zpci_index_is_visible, }; @@ -223,7 +223,7 @@ static struct attribute *zpci_dev_attrs[] = { NULL, }; -static struct attribute_group zpci_attr_group = { +const struct attribute_group zpci_attr_group = { .attrs = zpci_dev_attrs, .bin_attrs = zpci_bin_attrs, }; @@ -235,14 +235,8 @@ static struct attribute *pfip_attrs[] = { &dev_attr_segment3.attr, NULL, }; -static struct attribute_group pfip_attr_group = { + +const struct attribute_group pfip_attr_group = { .name = "pfip", .attrs = pfip_attrs, }; - -const struct attribute_group *zpci_attr_groups[] = { - &zpci_attr_group, - &pfip_attr_group, - &zpci_ident_attr_group, - NULL, -}; diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt index 5f008e7948987b..def2659f6602d2 100644 --- a/arch/s390/tools/opcodes.txt +++ b/arch/s390/tools/opcodes.txt @@ -527,9 +527,9 @@ b938 sortl RRE_RR b939 dfltcc RRF_R0RR2 b93a kdsa RRE_RR b93b nnpa RRE_00 -b93c ppno RRE_RR -b93e kimd RRE_RR -b93f klmd RRE_RR +b93c prno RRE_RR +b93e kimd RRF_U0RR +b93f klmd RRF_U0RR b941 cfdtr RRF_UURF b942 clgdtr RRF_UURF b943 clfdtr RRF_UURF @@ -549,6 +549,10 @@ b964 nngrk RRF_R0RR2 b965 ocgrk RRF_R0RR2 b966 nogrk RRF_R0RR2 b967 nxgrk RRF_R0RR2 +b968 clzg RRE_RR +b969 ctzg RRE_RR +b96c bextg RRF_R0RR2 +b96d bdepg RRF_R0RR2 b972 crt RRF_U0RR b973 clrt RRF_U0RR b974 nnrk RRF_R0RR2 @@ -796,6 +800,16 @@ e35b sy RXY_RRRD e35c mfy RXY_RRRD e35e aly RXY_RRRD e35f sly RXY_RRRD +e360 lxab RXY_RRRD +e361 llxab RXY_RRRD +e362 lxah RXY_RRRD +e363 llxah RXY_RRRD +e364 lxaf RXY_RRRD +e365 llxaf RXY_RRRD +e366 lxag RXY_RRRD +e367 llxag RXY_RRRD +e368 lxaq RXY_RRRD +e369 llxaq RXY_RRRD e370 sthy RXY_RRRD e371 lay RXY_RRRD e372 stcy RXY_RRRD @@ -880,6 +894,8 @@ e63c vupkz VSI_URDV e63d vstrl VSI_URDV e63f vstrlr VRS_RRDV e649 vlip VRI_V0UU2 +e64a vcvdq VRI_VV0UU +e64e vcvbq VRR_VV0U2 e650 vcvb VRR_RV0UU e651 vclzdp VRR_VV0U2 e652 vcvbg VRR_RV0UU @@ -893,7 +909,7 @@ e65b vpsop VRI_VVUUU2 e65c vupkzl VRR_VV0U2 e65d vcfn VRR_VV0UU2 e65e vclfnl VRR_VV0UU2 -e65f vtp VRR_0V +e65f vtp VRR_0V0U e670 vpkzr VRI_VVV0UU2 e671 vap VRI_VVV0UU2 e672 vsrpr VRI_VVV0UU2 @@ -908,6 +924,7 @@ e67b vrp VRI_VVV0UU2 e67c vscshp VRR_VVV e67d vcsph VRR_VVV0U0 e67e vsdp VRI_VVV0UU2 +e67f vtz VRR_0VVU e700 vleb VRX_VRRDU e701 vleh VRX_VRRDU e702 vleg VRX_VRRDU @@ -948,6 +965,7 @@ e74d vrep VRI_VVUU e750 vpopct VRR_VV0U e752 vctz VRR_VV0U e753 vclz VRR_VV0U +e754 vgem VRR_VV0U e756 vlr VRX_VV e75c vistr VRR_VV0U0U e75f vseg VRR_VV0U @@ -985,6 +1003,8 @@ e784 vpdi VRR_VVV0U e785 vbperm VRR_VVV e786 vsld VRI_VVV0U e787 vsrd VRI_VVV0U +e788 veval VRI_VVV0UV +e789 vblend VRR_VVVU0V e78a vstrc VRR_VVVUU0V e78b vstrs VRR_VVVUU0V e78c vperm VRR_VVV0V @@ -1010,6 +1030,10 @@ e7ac vmale VRR_VVVU0V e7ad vmalo VRR_VVVU0V e7ae vmae VRR_VVVU0V e7af vmao VRR_VVVU0V +e7b0 vdl VRR_VVV0UU +e7b1 vrl VRR_VVV0UU +e7b2 vd VRR_VVV0UU +e7b3 vr VRR_VVV0UU e7b4 vgfm VRR_VVV0U e7b8 vmsl VRR_VVVUU0V e7b9 vaccc VRR_VVVU0V @@ -1017,12 +1041,12 @@ e7bb vac VRR_VVVU0V e7bc vgfma VRR_VVVU0V e7bd vsbcbi VRR_VVVU0V e7bf vsbi VRR_VVVU0V -e7c0 vclgd VRR_VV0UUU -e7c1 vcdlg VRR_VV0UUU -e7c2 vcgd VRR_VV0UUU -e7c3 vcdg VRR_VV0UUU -e7c4 vlde VRR_VV0UU2 -e7c5 vled VRR_VV0UUU +e7c0 vclfp VRR_VV0UUU +e7c1 vcfpl VRR_VV0UUU +e7c2 vcsfp VRR_VV0UUU +e7c3 vcfps VRR_VV0UUU +e7c4 vfll VRR_VV0UU2 +e7c5 vflr VRR_VV0UUU e7c7 vfi VRR_VV0UUU e7ca wfk VRR_VV0UU2 e7cb wfc VRR_VV0UU2 @@ -1094,9 +1118,9 @@ eb54 niy SIY_URD eb55 cliy SIY_URD eb56 oiy SIY_URD eb57 xiy SIY_URD -eb60 lric RSY_RDRU -eb61 stric RSY_RDRU -eb62 mric RSY_RDRU +eb60 lric RSY_RURD2 +eb61 stric RSY_RURD2 +eb62 mric RSY_RURD2 eb6a asi SIY_IRD eb6e alsi SIY_IRD eb71 lpswey SIY_RD @@ -1104,7 +1128,7 @@ eb7a agsi SIY_IRD eb7e algsi SIY_IRD eb80 icmh RSY_RURD eb81 icmy RSY_RURD -eb8a sqbs RSY_RDRU +eb8a sqbs RSY_RURD2 eb8e mvclu RSY_RRRD eb8f clclu RSY_RRRD eb90 stmy RSY_RRRD diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1aa3c4a0c5b276..e9103998cca91f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -14,6 +14,7 @@ config SUPERH select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_NEED_CMPXCHG_1_EMU select CPU_NO_EFFICIENT_FFS select DMA_DECLARE_COHERENT select GENERIC_ATOMIC64 diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 5d617b3ef78f77..1e5dc5ccf7bf50 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h @@ -9,6 +9,7 @@ #include #include +#include #if defined(CONFIG_GUSA_RB) #include @@ -56,6 +57,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, unsigned long new, int size) { switch (size) { + case 1: + return cmpxchg_emu_u8(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); } diff --git a/arch/sh/include/asm/flat.h b/arch/sh/include/asm/flat.h index fee4f25555cb53..70752c7bc55f2f 100644 --- a/arch/sh/include/asm/flat.h +++ b/arch/sh/include/asm/flat.h @@ -9,7 +9,7 @@ #ifndef __ASM_SH_FLAT_H #define __ASM_SH_FLAT_H -#include +#include static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, u32 *addr) diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 0f384b1f45cafa..53fc18a3d4c29b 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -13,12 +13,6 @@ */ #define NO_IRQ_IGNORE ((unsigned int)-1) -/* - * Simple Mask Register Support - */ -extern void make_maskreg_irq(unsigned int irq); -extern unsigned short *irq_mask_register; - /* * PINT IRQs */ diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 7b8dead2723dc3..63f88b465e3945 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -5,9 +5,6 @@ #ifdef CONFIG_NUMA #include -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) - static inline int pfn_to_nid(unsigned long pfn) { int nid; diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 45c8ae20d10957..a1b54bedc92935 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include /* Reserve enough memory for two stack frames */ diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index b9cee98a754e51..a469a80840d3de 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include int apply_relocate_add(Elf32_Shdr *sechdrs, diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 1bd85a6949c45d..add35c51e01781 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -36,6 +36,10 @@ __setup("vdso=", vdso_setup); */ extern const char vsyscall_trapa_start, vsyscall_trapa_end; static struct page *syscall_pages[1]; +static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + .pages = syscall_pages, +}; int __init vsyscall_init(void) { @@ -58,6 +62,7 @@ int __init vsyscall_init(void) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; unsigned long addr; int ret; @@ -70,14 +75,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - ret = install_special_mapping(mm, addr, PAGE_SIZE, + vdso_mapping.pages = syscall_pages; + vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - syscall_pages); - if (unlikely(ret)) + &vdso_mapping); + ret = PTR_ERR(vma); + if (IS_ERR(vma)) goto up_fail; current->mm->context.vdso = (void *)addr; + ret = 0; up_fail: mmap_write_unlock(mm); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d1fe90b2f5ff4b..2a88b0c9e70f46 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -212,12 +212,7 @@ void __init allocate_pgdat(unsigned int nid) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); #ifdef CONFIG_NUMA - NODE_DATA(nid) = memblock_alloc_try_nid( - sizeof(struct pglist_data), - SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, nid); - if (!NODE_DATA(nid)) - panic("Can't allocate pgdat for node %d\n", nid); + alloc_node_data(nid); #endif NODE_DATA(nid)->node_start_pfn = start_pfn; diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index bee329d4149aa7..c442734d9b0ca3 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -52,7 +52,8 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -99,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 50f0dc1744d049..9bc212b5e762a9 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -14,9 +14,6 @@ #include #include -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL_GPL(node_data); - /* * On SH machines the conventional approach is to stash system RAM * in node 0, and other memory blocks in to node 1 and up, ordered by diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 11bf9d312318c6..dcfdb7f1dae976 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -14,9 +14,9 @@ config SPARC bool default y select ARCH_HAS_CPU_CACHE_ALIASING + select ARCH_HAS_DMA_OPS select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO - select DMA_OPS select OF select OF_PROMTREE select HAVE_ASM_MODVERSIONS diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index 688db0dcb97d92..913b9a09e885de 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include "opcodes.h" diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h index a236d8aa893a91..74eb2c71d0774a 100644 --- a/arch/sparc/include/asm/mmzone.h +++ b/arch/sparc/include/asm/mmzone.h @@ -6,10 +6,6 @@ #include -extern struct pglist_data *node_data[]; - -#define NODE_DATA(nid) (node_data[nid]) - extern int numa_cpu_lookup_table[]; extern cpumask_t numa_cpumask_lookup_table[]; diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 3fe429d73a65b0..2b7f358762c187 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -783,6 +783,7 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) return __pmd(pte_val(pte)); } +#define pmd_pgprot pmd_pgprot static inline pgprot_t pmd_pgprot(pmd_t entry) { unsigned long val = pmd_val(entry); diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 682da3714686c9..57084ed2f3c4ec 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -133,6 +133,12 @@ #define SO_PASSPIDFD 0x0055 #define SO_PEERPIDFD 0x0056 +#define SO_DEVMEM_LINEAR 0x0057 +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 0x0058 +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 0x0059 + #if !defined(__KERNEL__) diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 08a19727795ca4..80822f922e7671 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -39,7 +39,7 @@ SYSCALL_DEFINE0(getpagesize) return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ } -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct vm_unmapped_area_info info = {}; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index d9c3b34ca74470..acade309dc2fbd 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -87,7 +87,7 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr, return base + off; } -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; @@ -146,7 +146,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 53d7cb5bbffe5a..21f8cbbd0581c7 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1075,14 +1075,9 @@ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; -#ifdef CONFIG_NUMA - NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), - SMP_CACHE_BYTES, nid); - if (!NODE_DATA(nid)) { - prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); - prom_halt(); - } +#ifdef CONFIG_NUMA + alloc_node_data(nid); NODE_DATA(nid)->node_id = nid; #endif @@ -1115,11 +1110,9 @@ static void init_node_masks_nonnuma(void) } #ifdef CONFIG_NUMA -struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); -EXPORT_SYMBOL(node_data); static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index ec61ff1f96b71a..1dc9b3d70eda7c 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) unsigned int ctxtbl; unsigned int pgd, pmd, ped; unsigned int ptr; - unsigned int lvl, pte, paddrbase; + unsigned int lvl, pte; unsigned int ctx; unsigned int paddr_calc; - paddrbase = 0; - if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: trace on\n"); @@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: pgd is entry level 3\n"); lvl = 3; pte = pgd; - paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: pmd is entry level 2\n"); lvl = 2; pte = pmd; - paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: ped is entry level 1\n"); lvl = 1; pte = ped; - paddrbase = ped & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: ptr is entry level 0\n"); lvl = 0; pte = ptr; - paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; goto ready; } if (srmmu_swprobe_trace) diff --git a/arch/um/Kconfig b/arch/um/Kconfig index dca84fd6d00a50..c89575d05021f1 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -11,7 +11,6 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_NO_PREEMPT_DYNAMIC select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c index 99a7144b229fa5..819aabb4ecdcf7 100644 --- a/arch/um/drivers/harddog_kern.c +++ b/arch/um/drivers/harddog_kern.c @@ -164,7 +164,6 @@ static const struct file_operations harddog_fops = { .compat_ioctl = compat_ptr_ioctl, .open = harddog_open, .release = harddog_release, - .llseek = no_llseek, }; static struct miscdevice harddog_miscdev = { diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index c42b793bce6506..9d228878cea2ed 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c @@ -291,7 +291,6 @@ static int hostmixer_release(struct inode *inode, struct file *file) static const struct file_operations hostaudio_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = hostaudio_read, .write = hostaudio_write, .poll = hostaudio_poll, @@ -304,7 +303,6 @@ static const struct file_operations hostaudio_fops = { static const struct file_operations hostmixer_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = hostmixer_ioctl_mixdev, .open = hostmixer_open_mixdev, .release = hostmixer_release, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 2d473282ab515e..c992da83268dd8 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -102,18 +103,33 @@ static const struct { static void vector_reset_stats(struct vector_private *vp) { + /* We reuse the existing queue locks for stats */ + + /* RX stats are modified with RX head_lock held + * in vector_poll. + */ + + spin_lock(&vp->rx_queue->head_lock); vp->estats.rx_queue_max = 0; vp->estats.rx_queue_running_average = 0; - vp->estats.tx_queue_max = 0; - vp->estats.tx_queue_running_average = 0; vp->estats.rx_encaps_errors = 0; + vp->estats.sg_ok = 0; + vp->estats.sg_linearized = 0; + spin_unlock(&vp->rx_queue->head_lock); + + /* TX stats are modified with TX head_lock held + * in vector_send. + */ + + spin_lock(&vp->tx_queue->head_lock); vp->estats.tx_timeout_count = 0; vp->estats.tx_restart_queue = 0; vp->estats.tx_kicks = 0; vp->estats.tx_flow_control_xon = 0; vp->estats.tx_flow_control_xoff = 0; - vp->estats.sg_ok = 0; - vp->estats.sg_linearized = 0; + vp->estats.tx_queue_max = 0; + vp->estats.tx_queue_running_average = 0; + spin_unlock(&vp->tx_queue->head_lock); } static int get_mtu(struct arglist *def) @@ -232,12 +248,6 @@ static int get_transport_options(struct arglist *def) static char *drop_buffer; -/* Array backed queues optimized for bulk enqueue/dequeue and - * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios. - * For more details and full design rationale see - * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt - */ - /* * Advance the mmsg queue head by n = advance. Resets the queue to @@ -247,27 +257,13 @@ static char *drop_buffer; static int vector_advancehead(struct vector_queue *qi, int advance) { - int queue_depth; - qi->head = (qi->head + advance) % qi->max_depth; - spin_lock(&qi->tail_lock); - qi->queue_depth -= advance; - - /* we are at 0, use this to - * reset head and tail so we can use max size vectors - */ - - if (qi->queue_depth == 0) { - qi->head = 0; - qi->tail = 0; - } - queue_depth = qi->queue_depth; - spin_unlock(&qi->tail_lock); - return queue_depth; + atomic_sub(advance, &qi->queue_depth); + return atomic_read(&qi->queue_depth); } /* Advance the queue tail by n = advance. @@ -277,16 +273,11 @@ static int vector_advancehead(struct vector_queue *qi, int advance) static int vector_advancetail(struct vector_queue *qi, int advance) { - int queue_depth; - qi->tail = (qi->tail + advance) % qi->max_depth; - spin_lock(&qi->head_lock); - qi->queue_depth += advance; - queue_depth = qi->queue_depth; - spin_unlock(&qi->head_lock); - return queue_depth; + atomic_add(advance, &qi->queue_depth); + return atomic_read(&qi->queue_depth); } static int prep_msg(struct vector_private *vp, @@ -339,9 +330,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) int iov_count; spin_lock(&qi->tail_lock); - spin_lock(&qi->head_lock); - queue_depth = qi->queue_depth; - spin_unlock(&qi->head_lock); + queue_depth = atomic_read(&qi->queue_depth); if (skb) packet_len = skb->len; @@ -360,6 +349,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) mmsg_vector->msg_hdr.msg_iovlen = iov_count; mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; + wmb(); /* Make the packet visible to the NAPI poll thread */ queue_depth = vector_advancetail(qi, 1); } else goto drop; @@ -398,7 +388,7 @@ static int consume_vector_skbs(struct vector_queue *qi, int count) } /* - * Generic vector deque via sendmmsg with support for forming headers + * Generic vector dequeue via sendmmsg with support for forming headers * using transport specific callback. Allows GRE, L2TPv3, RAW and * other transports to use a common dequeue procedure in vector mode */ @@ -408,69 +398,64 @@ static int vector_send(struct vector_queue *qi) { struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *send_from; - int result = 0, send_len, queue_depth = qi->max_depth; + int result = 0, send_len; if (spin_trylock(&qi->head_lock)) { - if (spin_trylock(&qi->tail_lock)) { - /* update queue_depth to current value */ - queue_depth = qi->queue_depth; - spin_unlock(&qi->tail_lock); - while (queue_depth > 0) { - /* Calculate the start of the vector */ - send_len = queue_depth; - send_from = qi->mmsg_vector; - send_from += qi->head; - /* Adjust vector size if wraparound */ - if (send_len + qi->head > qi->max_depth) - send_len = qi->max_depth - qi->head; - /* Try to TX as many packets as possible */ - if (send_len > 0) { - result = uml_vector_sendmmsg( - vp->fds->tx_fd, - send_from, - send_len, - 0 - ); - vp->in_write_poll = - (result != send_len); - } - /* For some of the sendmmsg error scenarios - * we may end being unsure in the TX success - * for all packets. It is safer to declare - * them all TX-ed and blame the network. - */ - if (result < 0) { - if (net_ratelimit()) - netdev_err(vp->dev, "sendmmsg err=%i\n", - result); - vp->in_error = true; - result = send_len; - } - if (result > 0) { - queue_depth = - consume_vector_skbs(qi, result); - /* This is equivalent to an TX IRQ. - * Restart the upper layers to feed us - * more packets. - */ - if (result > vp->estats.tx_queue_max) - vp->estats.tx_queue_max = result; - vp->estats.tx_queue_running_average = - (vp->estats.tx_queue_running_average + result) >> 1; - } - netif_wake_queue(qi->dev); - /* if TX is busy, break out of the send loop, - * poll write IRQ will reschedule xmit for us + /* update queue_depth to current value */ + while (atomic_read(&qi->queue_depth) > 0) { + /* Calculate the start of the vector */ + send_len = atomic_read(&qi->queue_depth); + send_from = qi->mmsg_vector; + send_from += qi->head; + /* Adjust vector size if wraparound */ + if (send_len + qi->head > qi->max_depth) + send_len = qi->max_depth - qi->head; + /* Try to TX as many packets as possible */ + if (send_len > 0) { + result = uml_vector_sendmmsg( + vp->fds->tx_fd, + send_from, + send_len, + 0 + ); + vp->in_write_poll = + (result != send_len); + } + /* For some of the sendmmsg error scenarios + * we may end being unsure in the TX success + * for all packets. It is safer to declare + * them all TX-ed and blame the network. + */ + if (result < 0) { + if (net_ratelimit()) + netdev_err(vp->dev, "sendmmsg err=%i\n", + result); + vp->in_error = true; + result = send_len; + } + if (result > 0) { + consume_vector_skbs(qi, result); + /* This is equivalent to an TX IRQ. + * Restart the upper layers to feed us + * more packets. */ - if (result != send_len) { - vp->estats.tx_restart_queue++; - break; - } + if (result > vp->estats.tx_queue_max) + vp->estats.tx_queue_max = result; + vp->estats.tx_queue_running_average = + (vp->estats.tx_queue_running_average + result) >> 1; + } + netif_wake_queue(qi->dev); + /* if TX is busy, break out of the send loop, + * poll write IRQ will reschedule xmit for us. + */ + if (result != send_len) { + vp->estats.tx_restart_queue++; + break; } } spin_unlock(&qi->head_lock); } - return queue_depth; + return atomic_read(&qi->queue_depth); } /* Queue destructor. Deliberately stateless so we can use @@ -589,7 +574,7 @@ static struct vector_queue *create_queue( } spin_lock_init(&result->head_lock); spin_lock_init(&result->tail_lock); - result->queue_depth = 0; + atomic_set(&result->queue_depth, 0); result->head = 0; result->tail = 0; return result; @@ -668,18 +653,27 @@ static struct sk_buff *prep_skb( } -/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/ +/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs */ static void prep_queue_for_rx(struct vector_queue *qi) { struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *mmsg_vector = qi->mmsg_vector; void **skbuff_vector = qi->skbuff_vector; - int i; + int i, queue_depth; + + queue_depth = atomic_read(&qi->queue_depth); - if (qi->queue_depth == 0) + if (queue_depth == 0) return; - for (i = 0; i < qi->queue_depth; i++) { + + /* RX is always emptied 100% during each cycle, so we do not + * have to do the tail wraparound math for it. + */ + + qi->head = qi->tail = 0; + + for (i = 0; i < queue_depth; i++) { /* it is OK if allocation fails - recvmmsg with NULL data in * iov argument still performs an RX, just drops the packet * This allows us stop faffing around with a "drop buffer" @@ -689,7 +683,7 @@ static void prep_queue_for_rx(struct vector_queue *qi) skbuff_vector++; mmsg_vector++; } - qi->queue_depth = 0; + atomic_set(&qi->queue_depth, 0); } static struct vector_device *find_device(int n) @@ -972,7 +966,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget) budget = qi->max_depth; packet_count = uml_vector_recvmmsg( - vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); + vp->fds->rx_fd, qi->mmsg_vector, budget, 0); if (packet_count < 0) vp->in_error = true; @@ -985,7 +979,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget) * many do we need to prep the next time prep_queue_for_rx() is called. */ - qi->queue_depth = packet_count; + atomic_add(packet_count, &qi->queue_depth); for (i = 0; i < packet_count; i++) { skb = (*skbuff_vector); @@ -1172,6 +1166,7 @@ static int vector_poll(struct napi_struct *napi, int budget) if ((vp->options & VECTOR_TX) != 0) tx_enqueued = (vector_send(vp->tx_queue) > 0); + spin_lock(&vp->rx_queue->head_lock); if ((vp->options & VECTOR_RX) > 0) err = vector_mmsg_rx(vp, budget); else { @@ -1179,12 +1174,13 @@ static int vector_poll(struct napi_struct *napi, int budget) if (err > 0) err = 1; } + spin_unlock(&vp->rx_queue->head_lock); if (err > 0) work_done += err; if (tx_enqueued || err > 0) napi_schedule(napi); - if (work_done < budget) + if (work_done <= budget) napi_complete_done(napi, work_done); return work_done; } @@ -1225,7 +1221,7 @@ static int vector_net_open(struct net_device *dev) vp->rx_header_size, MAX_IOV_SIZE ); - vp->rx_queue->queue_depth = get_depth(vp->parsed); + atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed)); } else { vp->header_rxbuffer = kmalloc( vp->rx_header_size, @@ -1467,7 +1463,17 @@ static void vector_get_ethtool_stats(struct net_device *dev, { struct vector_private *vp = netdev_priv(dev); + /* Stats are modified in the dequeue portions of + * rx/tx which are protected by the head locks + * grabbing these locks here ensures they are up + * to date. + */ + + spin_lock(&vp->tx_queue->head_lock); + spin_lock(&vp->rx_queue->head_lock); memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); + spin_unlock(&vp->rx_queue->head_lock); + spin_unlock(&vp->tx_queue->head_lock); } static int vector_get_coalesce(struct net_device *netdev, diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h index 806df551be0b45..417834793658ff 100644 --- a/arch/um/drivers/vector_kern.h +++ b/arch/um/drivers/vector_kern.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "vector_user.h" @@ -44,7 +45,8 @@ struct vector_queue { struct net_device *dev; spinlock_t head_lock; spinlock_t tail_lock; - int queue_depth, head, tail, max_depth, max_iov_frags; + atomic_t queue_depth; + int head, tail, max_depth, max_iov_frags; short options; }; diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index b16a5e5619d31f..2ea67e6fd067db 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -46,6 +46,9 @@ #define TRANS_FD "fd" #define TRANS_FD_LEN strlen(TRANS_FD) +#define TRANS_VDE "vde" +#define TRANS_VDE_LEN strlen(TRANS_VDE) + #define VNET_HDR_FAIL "could not enable vnet headers on fd %d" #define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s" #define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i" @@ -434,6 +437,84 @@ static struct vector_fds *user_init_fd_fds(struct arglist *ifspec) return NULL; } +/* enough char to store an int type */ +#define ENOUGH(type) ((CHAR_BIT * sizeof(type) - 1) / 3 + 2) +#define ENOUGH_OCTAL(type) ((CHAR_BIT * sizeof(type) + 2) / 3) +/* vde_plug --descr xx --port2 xx --mod2 xx --group2 xx seqpacket://NN vnl (NULL) */ +#define VDE_MAX_ARGC 12 +#define VDE_SEQPACKET_HEAD "seqpacket://" +#define VDE_SEQPACKET_HEAD_LEN (sizeof(VDE_SEQPACKET_HEAD) - 1) +#define VDE_DEFAULT_DESCRIPTION "UML" + +static struct vector_fds *user_init_vde_fds(struct arglist *ifspec) +{ + char seqpacketvnl[VDE_SEQPACKET_HEAD_LEN + ENOUGH(int) + 1]; + char *argv[VDE_MAX_ARGC] = {"vde_plug"}; + int argc = 1; + int rv; + int sv[2]; + struct vector_fds *result = NULL; + + char *vnl = uml_vector_fetch_arg(ifspec,"vnl"); + char *descr = uml_vector_fetch_arg(ifspec,"descr"); + char *port = uml_vector_fetch_arg(ifspec,"port"); + char *mode = uml_vector_fetch_arg(ifspec,"mode"); + char *group = uml_vector_fetch_arg(ifspec,"group"); + if (descr == NULL) descr = VDE_DEFAULT_DESCRIPTION; + + argv[argc++] = "--descr"; + argv[argc++] = descr; + if (port != NULL) { + argv[argc++] = "--port2"; + argv[argc++] = port; + } + if (mode != NULL) { + argv[argc++] = "--mod2"; + argv[argc++] = mode; + } + if (group != NULL) { + argv[argc++] = "--group2"; + argv[argc++] = group; + } + argv[argc++] = seqpacketvnl; + argv[argc++] = vnl; + argv[argc++] = NULL; + + rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sv); + if (rv < 0) { + printk(UM_KERN_ERR "vde: seqpacket socketpair err %d", -errno); + return NULL; + } + rv = os_set_exec_close(sv[0]); + if (rv < 0) { + printk(UM_KERN_ERR "vde: seqpacket socketpair cloexec err %d", -errno); + goto vde_cleanup_sv; + } + snprintf(seqpacketvnl, sizeof(seqpacketvnl), VDE_SEQPACKET_HEAD "%d", sv[1]); + + run_helper(NULL, NULL, argv); + + close(sv[1]); + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "fd open: allocation failed"); + goto vde_cleanup; + } + + result->rx_fd = sv[0]; + result->tx_fd = sv[0]; + result->remote_addr_size = 0; + result->remote_addr = NULL; + return result; + +vde_cleanup_sv: + close(sv[1]); +vde_cleanup: + close(sv[0]); + return NULL; +} + static struct vector_fds *user_init_raw_fds(struct arglist *ifspec) { int rxfd = -1, txfd = -1; @@ -673,6 +754,8 @@ struct vector_fds *uml_vector_user_open( return user_init_unix_fds(parsed, ID_BESS); if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0) return user_init_fd_fds(parsed); + if (strncmp(transport, TRANS_VDE, TRANS_VDE_LEN) == 0) + return user_init_vde_fds(parsed); return NULL; } diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index 6100819681b5cb..744e7f31e8ef17 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #define MAX_DEVICES 8 diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 5bb397b65efb2f..83373c9963e7c9 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -359,11 +359,4 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return pte; } -/* Clear a kernel PTE and flush it from the TLB */ -#define kpte_clear_flush(ptep, vaddr) \ -do { \ - pte_clear(&init_mm, (vaddr), (ptep)); \ - __flush_tlb_one((vaddr)); \ -} while (0) - #endif diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index 5a7c05275aa74d..bce4595798dae5 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -28,20 +28,10 @@ struct thread_struct { struct arch_thread arch; jmp_buf switch_buf; struct { - int op; - union { - struct { - int pid; - } fork, exec; - struct { - int (*proc)(void *); - void *arg; - } thread; - struct { - void (*proc)(void *); - void *arg; - } cb; - } u; + struct { + int (*proc)(void *); + void *arg; + } thread; } request; }; @@ -51,7 +41,7 @@ struct thread_struct { .fault_addr = NULL, \ .prev_sched = NULL, \ .arch = INIT_ARCH_THREAD, \ - .request = { 0 } \ + .request = { } \ } /* diff --git a/arch/um/include/asm/sysrq.h b/arch/um/include/asm/sysrq.h deleted file mode 100644 index 8fc8c65cd357b7..00000000000000 --- a/arch/um/include/asm/sysrq.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __UM_SYSRQ_H -#define __UM_SYSRQ_H - -struct task_struct; -extern void show_trace(struct task_struct* task, unsigned long *stack); - -#endif diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 7d9d60e41e4e39..1d4b6bbc1b65ce 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -8,7 +8,7 @@ #define __UM_UACCESS_H #include -#include +#include #define __under_task_size(addr, size) \ (((unsigned long) (addr) < TASK_SIZE) && \ diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h index 1e76ba40febadf..140388c282f6d3 100644 --- a/arch/um/include/shared/skas/mm_id.h +++ b/arch/um/include/shared/skas/mm_id.h @@ -7,10 +7,7 @@ #define __MM_ID_H struct mm_id { - union { - int mm_fd; - int pid; - } u; + int pid; unsigned long stack; int syscall_data_len; }; diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h index ebaa116de30b9c..85c50122ab9813 100644 --- a/arch/um/include/shared/skas/skas.h +++ b/arch/um/include/shared/skas/skas.h @@ -10,10 +10,8 @@ extern int userspace_pid[]; -extern int user_thread(unsigned long stack, int flags); extern void new_thread_handler(void); extern void handle_syscall(struct uml_pt_regs *regs); -extern long execute_syscall_skas(void *r); extern unsigned long current_stub_stack(void); extern struct mm_id *current_mm_id(void); extern void current_mm_sync(void); diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 2c15bb2c104c16..cb8b5cd9285c1c 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -35,8 +35,5 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; clear_thread_flag(TIF_SINGLESTEP); -#ifdef SUBARCH_EXECVE1 - SUBARCH_EXECVE1(regs->regs); -#endif } EXPORT_SYMBOL(start_thread); diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c index 4382cf02a6d14d..41902117527265 100644 --- a/arch/um/kernel/kmsg_dump.c +++ b/arch/um/kernel/kmsg_dump.c @@ -8,7 +8,7 @@ #include static void kmsg_dumper_stdout(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { static struct kmsg_dump_iter iter; static DEFINE_SPINLOCK(lock); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index f36b63f53babd4..be2856af6d4c31 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -109,8 +109,8 @@ void new_thread_handler(void) schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; - fn = current->thread.request.u.thread.proc; - arg = current->thread.request.u.thread.arg; + fn = current->thread.request.thread.proc; + arg = current->thread.request.thread.arg; /* * callback returns only if the kernel thread execs a process @@ -158,8 +158,8 @@ int copy_thread(struct task_struct * p, const struct kernel_clone_args *args) arch_copy_thread(¤t->thread.arch, &p->thread.arch); } else { get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); - p->thread.request.u.thread.proc = args->fn; - p->thread.request.u.thread.arg = args->fn_arg; + p->thread.request.thread.proc = args->fn; + p->thread.request.thread.arg = args->fn_arg; handler = new_thread_handler; } diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index 3736bca626ba0b..680bce4bd8fa7a 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c @@ -29,7 +29,7 @@ static void kill_off_processes(void) t = find_lock_task_mm(p); if (!t) continue; - pid = t->mm->context.id.u.pid; + pid = t->mm->context.id.pid; task_unlock(t); os_kill_ptraced_process(pid, 1); } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 47f98d87ea3cfb..886ed5e656743d 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -32,11 +32,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) new_id->stack = stack; block_signals_trace(); - new_id->u.pid = start_userspace(stack); + new_id->pid = start_userspace(stack); unblock_signals_trace(); - if (new_id->u.pid < 0) { - ret = new_id->u.pid; + if (new_id->pid < 0) { + ret = new_id->pid; goto out_free; } @@ -83,12 +83,12 @@ void destroy_context(struct mm_struct *mm) * whole UML suddenly dying. Also, cover negative and * 1 cases, since they shouldn't happen either. */ - if (mmu->id.u.pid < 2) { + if (mmu->id.pid < 2) { printk(KERN_ERR "corrupt mm_context - pid = %d\n", - mmu->id.u.pid); + mmu->id.pid); return; } - os_kill_ptraced_process(mmu->id.u.pid, 1); + os_kill_ptraced_process(mmu->id.pid, 1); free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); } diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 5f9c1c5f36e2fd..68657988c8d1ae 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -39,8 +39,8 @@ int __init start_uml(void) init_new_thread_signals(); - init_task.thread.request.u.thread.proc = start_kernel_proc; - init_task.thread.request.u.thread.arg = NULL; + init_task.thread.request.thread.proc = start_kernel_proc; + init_task.thread.request.thread.arg = NULL; return start_idle_thread(task_stack_page(&init_task), &init_task.thread.switch_buf); } diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index 9ee19e566da3bd..b09e85279d2b8c 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c @@ -12,23 +12,13 @@ #include #include #include +#include void handle_syscall(struct uml_pt_regs *r) { struct pt_regs *regs = container_of(r, struct pt_regs, regs); int syscall; - /* - * If we have infinite CPU resources, then make every syscall also a - * preemption point, since we don't have any other preemption in this - * case, and kernel threads would basically never run until userspace - * went to sleep, even if said userspace interacts with the kernel in - * various ways. - */ - if (time_travel_mode == TT_MODE_INFCPU || - time_travel_mode == TT_MODE_EXTERNAL) - schedule(); - /* Initialize the syscall number and default return value. */ UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); @@ -41,9 +31,25 @@ void handle_syscall(struct uml_pt_regs *r) goto out; syscall = UPT_SYSCALL_NR(r); - if (syscall >= 0 && syscall < __NR_syscalls) - PT_REGS_SET_SYSCALL_RETURN(regs, - EXECUTE_SYSCALL(syscall, regs)); + if (syscall >= 0 && syscall < __NR_syscalls) { + unsigned long ret = EXECUTE_SYSCALL(syscall, regs); + + PT_REGS_SET_SYSCALL_RETURN(regs, ret); + + /* + * An error value here can be some form of -ERESTARTSYS + * and then we'd just loop. Make any error syscalls take + * some time, so that it won't just loop if something is + * not ready, and hopefully other things will make some + * progress. + */ + if (IS_ERR_VALUE(ret) && + (time_travel_mode == TT_MODE_INFCPU || + time_travel_mode == TT_MODE_EXTERNAL)) { + um_udelay(1); + schedule(); + } + } out: syscall_trace_leave(regs); diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 746715379f12a8..4bb8622dc51226 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -11,7 +11,6 @@ #include #include -#include #include #include diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 47b9f5e63566f7..29b27b90581fbb 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -839,7 +839,7 @@ static irqreturn_t um_timer(int irq, void *dev) if (get_current()->mm != NULL) { /* userspace - relay signal, results in correct userspace timers */ - os_alarm_process(get_current()->mm->context.id.u.pid); + os_alarm_process(get_current()->mm->context.id.pid); } (*timer_clockevent.event_handler)(&timer_clockevent); diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 44c6fc697f3a8c..548af31d4111bb 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -82,16 +82,12 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, (x ? UM_PROT_EXEC : 0)); if (pte_newpage(*pte)) { if (pte_present(*pte)) { - if (pte_newpage(*pte)) { - __u64 offset; - unsigned long phys = - pte_val(*pte) & PAGE_MASK; - int fd = phys_mapping(phys, &offset); - - ret = ops->mmap(ops->mm_idp, addr, - PAGE_SIZE, prot, fd, - offset); - } + __u64 offset; + unsigned long phys = pte_val(*pte) & PAGE_MASK; + int fd = phys_mapping(phys, &offset); + + ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE, + prot, fd, offset); } else ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE); } else if (pte_newprot(*pte)) diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 5adf8f630049ec..f1d03cf3957fe8 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -528,7 +528,8 @@ int os_shutdown_socket(int fd, int r, int w) ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds, void *data, size_t data_len) { - char buf[CMSG_SPACE(sizeof(*fds) * n_fds)]; +#define MAX_RCV_FDS 2 + char buf[CMSG_SPACE(sizeof(*fds) * MAX_RCV_FDS)]; struct cmsghdr *cmsg; struct iovec iov = { .iov_base = data, @@ -538,10 +539,13 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds, .msg_iov = &iov, .msg_iovlen = 1, .msg_control = buf, - .msg_controllen = sizeof(buf), + .msg_controllen = CMSG_SPACE(sizeof(*fds) * n_fds), }; int n; + if (n_fds > MAX_RCV_FDS) + return -EINVAL; + n = recvmsg(fd, &msg, 0); if (n < 0) return -errno; diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index c55430775efdb4..9a13ac23c60644 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c @@ -78,7 +78,7 @@ static inline long do_syscall_stub(struct mm_id *mm_idp) { struct stub_data *proc_data = (void *)mm_idp->stack; int n, i; - int err, pid = mm_idp->u.pid; + int err, pid = mm_idp->pid; n = ptrace_setregs(pid, syscall_regs); if (n < 0) { diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index f7088345b3fc02..b6f656bcffb178 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -588,5 +588,5 @@ void reboot_skas(void) void __switch_mm(struct mm_id *mm_idp) { - userspace_pid[0] = mm_idp->u.pid; + userspace_pid[0] = mm_idp->pid; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 007bab9f2a0e39..2852fcd82cbd8c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -28,6 +28,7 @@ config X86_64 select ARCH_HAS_GIGANTIC_PAGE select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_PER_VMA_LOCK + select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE select HAVE_ARCH_SOFT_DIRTY select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE @@ -79,6 +80,7 @@ config X86 select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN select ARCH_HAS_EARLY_DEBUG if KGDB select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER @@ -107,6 +109,7 @@ config X86 select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAVE_EXTRA_ELF_NOTES select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_MIGHT_HAVE_PC_PARPORT @@ -122,6 +125,7 @@ config X86 select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN + select ARCH_SUPPORTS_RT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64 select ARCH_USE_MEMTEST @@ -296,6 +300,7 @@ config X86 select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK select NEED_SG_DMA_LENGTH + select NUMA_MEMBLKS if NUMA select PCI_DOMAINS if PCI select PCI_LOCKLESS_CONFIG if PCI select PERF_EVENTS @@ -943,7 +948,6 @@ config DMI config GART_IOMMU bool "Old AMD GART IOMMU support" - select DMA_OPS select IOMMU_HELPER select SWIOTLB depends on X86_64 && PCI && AMD_NB @@ -1599,14 +1603,6 @@ config X86_64_ACPI_NUMA help Enable ACPI SRAT based node topology detection. -config NUMA_EMU - bool "NUMA emulation" - depends on NUMA - help - Enable NUMA emulation. A flat machine will be split - into virtual nodes when booted with "numa=fake=N", where N is the - number of nodes. This is only useful for debugging. - config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP range 1 10 @@ -1806,6 +1802,7 @@ config X86_PAT def_bool y prompt "x86 PAT support" if EXPERT depends on MTRR + select ARCH_USES_PG_ARCH_2 help Use PAT attributes to setup page level cache control. @@ -1817,10 +1814,6 @@ config X86_PAT If unsure, say Y. -config ARCH_USES_PG_UNCACHED - def_bool y - depends on X86_PAT - config X86_UMIP def_bool y prompt "User Mode Instruction Prevention" if EXPERT @@ -1889,6 +1882,10 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS If unsure, say y. +config ARCH_PKEY_BITS + int + default 4 + choice prompt "TSX enable mode" depends on CPU_SUP_INTEL @@ -2610,24 +2607,15 @@ config MITIGATION_SLS against straight line speculation. The kernel image might be slightly larger. -config MITIGATION_GDS_FORCE - bool "Force GDS Mitigation" +config MITIGATION_GDS + bool "Mitigate Gather Data Sampling" depends on CPU_SUP_INTEL - default n + default y help - Gather Data Sampling (GDS) is a hardware vulnerability which allows - unprivileged speculative access to data which was previously stored in - vector registers. - - This option is equivalent to setting gather_data_sampling=force on the - command line. The microcode mitigation is used if present, otherwise - AVX is disabled as a mitigation. On affected systems that are missing - the microcode any userspace code that unconditionally uses AVX will - break with this option set. - - Setting this option on systems not vulnerable to GDS has no effect. - - If in doubt, say N. + Enable mitigation for Gather Data Sampling (GDS). GDS is a hardware + vulnerability which allows unprivileged speculative access to data + which was previously stored in vector registers. The attacker uses gather + instructions to infer the stale vector register data. config MITIGATION_RFDS bool "RFDS Mitigation" @@ -2650,6 +2638,107 @@ config MITIGATION_SPECTRE_BHI indirect branches. See +config MITIGATION_MDS + bool "Mitigate Microarchitectural Data Sampling (MDS) hardware bug" + depends on CPU_SUP_INTEL + default y + help + Enable mitigation for Microarchitectural Data Sampling (MDS). MDS is + a hardware vulnerability which allows unprivileged speculative access + to data which is available in various CPU internal buffers. + See also + +config MITIGATION_TAA + bool "Mitigate TSX Asynchronous Abort (TAA) hardware bug" + depends on CPU_SUP_INTEL + default y + help + Enable mitigation for TSX Asynchronous Abort (TAA). TAA is a hardware + vulnerability that allows unprivileged speculative access to data + which is available in various CPU internal buffers by using + asynchronous aborts within an Intel TSX transactional region. + See also + +config MITIGATION_MMIO_STALE_DATA + bool "Mitigate MMIO Stale Data hardware bug" + depends on CPU_SUP_INTEL + default y + help + Enable mitigation for MMIO Stale Data hardware bugs. Processor MMIO + Stale Data Vulnerabilities are a class of memory-mapped I/O (MMIO) + vulnerabilities that can expose data. The vulnerabilities require the + attacker to have access to MMIO. + See also + + +config MITIGATION_L1TF + bool "Mitigate L1 Terminal Fault (L1TF) hardware bug" + depends on CPU_SUP_INTEL + default y + help + Mitigate L1 Terminal Fault (L1TF) hardware bug. L1 Terminal Fault is a + hardware vulnerability which allows unprivileged speculative access to data + available in the Level 1 Data Cache. + See + +config MITIGATION_SPECTRE_V2 + bool "Mitigate SPECTRE V2 hardware bug" + default y + help + Enable mitigation for Spectre V2 (Branch Target Injection). Spectre + V2 is a class of side channel attacks that takes advantage of + indirect branch predictors inside the processor. In Spectre variant 2 + attacks, the attacker can steer speculative indirect branches in the + victim to gadget code by poisoning the branch target buffer of a CPU + used for predicting indirect branch addresses. + See also + +config MITIGATION_SRBDS + bool "Mitigate Special Register Buffer Data Sampling (SRBDS) hardware bug" + depends on CPU_SUP_INTEL + default y + help + Enable mitigation for Special Register Buffer Data Sampling (SRBDS). + SRBDS is a hardware vulnerability that allows Microarchitectural Data + Sampling (MDS) techniques to infer values returned from special + register accesses. An unprivileged user can extract values returned + from RDRAND and RDSEED executed on another core or sibling thread + using MDS techniques. + See also + + +config MITIGATION_SSB + bool "Mitigate Speculative Store Bypass (SSB) hardware bug" + default y + help + Enable mitigation for Speculative Store Bypass (SSB). SSB is a + hardware security vulnerability and its exploitation takes advantage + of speculative execution in a similar way to the Meltdown and Spectre + security vulnerabilities. + endif config ARCH_HAS_ADD_PAGES @@ -2979,9 +3068,13 @@ config OLPC_XO15_SCI - AC adapter status updates - Battery status updates +config GEODE_COMMON + bool + config ALIX bool "PCEngines ALIX System Support (LED setup)" select GPIOLIB + select GEODE_COMMON help This option enables system support for the PCEngines ALIX. At present this just sets up LEDs for GPIO control on @@ -2996,12 +3089,14 @@ config ALIX config NET5501 bool "Soekris Engineering net5501 System Support (LEDS, GPIO, etc)" select GPIOLIB + select GEODE_COMMON help This option enables system support for the Soekris Engineering net5501. config GEOS bool "Traverse Technologies GEOS System Support (LEDS, GPIO, etc)" select GPIOLIB + select GEODE_COMMON depends on DMI help This option enables system support for the Traverse Technologies GEOS. diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 801fd85c3ef693..cd75e78a06c109 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -24,11 +24,15 @@ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) ifdef CONFIG_MITIGATION_RETHUNK RETHUNK_CFLAGS := -mfunction-return=thunk-extern +RETHUNK_RUSTFLAGS := -Zfunction-return=thunk-extern RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) +RETPOLINE_RUSTFLAGS += $(RETHUNK_RUSTFLAGS) endif export RETHUNK_CFLAGS +export RETHUNK_RUSTFLAGS export RETPOLINE_CFLAGS +export RETPOLINE_RUSTFLAGS export RETPOLINE_VDSO_CFLAGS # For gcc stack alignment is specified with -mpreferred-stack-boundary, @@ -218,9 +222,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_MITIGATION_RETPOLINE KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) + KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS) # Additionally, avoid generating expensive indirect jumps which # are subject to retpolines for small number of switch cases. - # clang turns off jump table generation by default when under + # LLVM turns off jump table generation by default when under # retpoline builds, however, gcc does not for x86. This has # only been fixed starting from gcc stable version 8.4.0 and # onwards, but not for older ones. See gcc bug #86952. @@ -237,6 +242,10 @@ ifdef CONFIG_CALL_PADDING PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES) KBUILD_CFLAGS += $(PADDING_CFLAGS) export PADDING_CFLAGS + +PADDING_RUSTFLAGS := -Zpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES) +KBUILD_RUSTFLAGS += $(PADDING_RUSTFLAGS) +export PADDING_RUSTFLAGS endif KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 944454306ef408..04a35b2c26e9be 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -511,7 +511,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output) if (init_unaccepted_memory()) { debug_putstr("Accepting memory... "); - accept_memory(__pa(output), __pa(output) + needed_size); + accept_memory(__pa(output), needed_size); } entry_offset = decompress_kernel(output, virt_addr, error); diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index b353a7be380cb8..dd8d1a85f6716a 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -256,6 +256,6 @@ static inline bool init_unaccepted_memory(void) { return false; } /* Defined in EFI stub */ extern struct efi_unaccepted_memory *unaccepted_table; -void accept_memory(phys_addr_t start, phys_addr_t end); +void accept_memory(phys_addr_t start, unsigned long size); #endif /* BOOT_COMPRESSED_MISC_H */ diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index da8b66dce0da5f..327c45c5013fea 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -16,6 +16,7 @@ #include #include #include +#include /* MMIO direction */ #define EPT_READ 0 @@ -433,6 +434,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) return -EINVAL; } + if (!fault_in_kernel_space(ve->gla)) { + WARN_ONCE(1, "Access to userspace address is not supported"); + return -EINVAL; + } + /* * Reject EPT violation #VEs that split pages. * diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config index be3ee4294903bd..aabafa3faa6df9 100644 --- a/arch/x86/configs/tiny.config +++ b/arch/x86/configs/tiny.config @@ -1,6 +1,2 @@ CONFIG_NOHIGHMEM=y -# CONFIG_HIGHMEM4G is not set -# CONFIG_HIGHMEM64G is not set -# CONFIG_UNWINDER_ORC is not set CONFIG_UNWINDER_GUESS=y -# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 24875e6295f2d6..7b1bebed879df3 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -14,7 +14,7 @@ config CRYPTO_CURVE25519_X86 - ADX (large integer arithmetic) config CRYPTO_AES_NI_INTEL - tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)" + tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)" depends on X86 select CRYPTO_AEAD select CRYPTO_LIB_AES @@ -25,10 +25,14 @@ config CRYPTO_AES_NI_INTEL help Block cipher: AES cipher algorithms AEAD cipher: AES with GCM - Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS + Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XCTR, XTS Architecture: x86 (32-bit and 64-bit) using: - AES-NI (AES new instructions) + - VAES (Vector AES) + + Some algorithm implementations are supported only in 64-bit builds, + and some have additional prerequisites such as AVX2 or AVX512. config CRYPTO_BLOWFISH_X86_64 tristate "Ciphers: Blowfish, modes: ECB, CBC" diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index cd37de5ec40464..b0dd8355549979 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags) err = skcipher_walk_aead_encrypt(&walk, req, false); else err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; /* * Since the AES-GCM assembly code requires that at least three assembly @@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags) gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags); /* En/decrypt the data and pass the ciphertext through GHASH. */ - while ((nbytes = walk.nbytes) != 0) { - if (unlikely(nbytes < walk.total)) { - /* - * Non-last segment. In this case, the assembly - * function requires that the length be a multiple of 16 - * (AES_BLOCK_SIZE) bytes. The needed buffering of up - * to 16 bytes is handled by the skcipher_walk. Here we - * just need to round down to a multiple of 16. - */ - nbytes = round_down(nbytes, AES_BLOCK_SIZE); - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - le_ctr[0] += nbytes / AES_BLOCK_SIZE; - kernel_fpu_end(); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - kernel_fpu_begin(); - } else { - /* Last segment: process all remaining data. */ - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - err = skcipher_walk_done(&walk, 0); - /* - * The low word of the counter isn't used by the - * finalize, so there's no need to increment it here. - */ - } + while (unlikely((nbytes = walk.nbytes) < walk.total)) { + /* + * Non-last segment. In this case, the assembly function + * requires that the length be a multiple of 16 (AES_BLOCK_SIZE) + * bytes. The needed buffering of up to 16 bytes is handled by + * the skcipher_walk. Here we just need to round down to a + * multiple of 16. + */ + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + le_ctr[0] += nbytes / AES_BLOCK_SIZE; + kernel_fpu_end(); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + kernel_fpu_begin(); } - if (err) - goto out; + /* Last segment: process all remaining data. */ + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + /* + * The low word of the counter isn't used by the finalize, so there's no + * need to increment it here. + */ /* Finalize */ taglen = crypto_aead_authsize(tfm); @@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags) datalen, tag, taglen, flags)) err = -EBADMSG; } -out: kernel_fpu_end(); + if (nbytes) + skcipher_walk_done(&walk, 0); return err; } @@ -1753,6 +1750,6 @@ static void __exit aesni_exit(void) late_initcall(aesni_init); module_exit(aesni_exit); -MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); +MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("aes"); diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index d45e9c0c42acf7..f110708c8038c7 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -8,7 +8,7 @@ * Copyright (C) 2006 NTT (Nippon Telegraph and Telephone Corporation) */ -#include +#include #include #include #include diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 700ecaee9a08aa..41bc02e489160c 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 0ffb072be95615..0bbec1c75cd0be 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) leaq K256+0*32(%rip), INP ## reuse INP as scratch reg vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 0*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 0*32) leaq K256+1*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 1*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 1*32) leaq K256+2*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 2*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 2*32) leaq K256+3*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) - FOUR_ROUNDS_AND_SCHED _XFER + 3*32 + FOUR_ROUNDS_AND_SCHED (_XFER + 3*32) add $4*32, SRND cmp $3*4*32, SRND @@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) leaq K256+0*32(%rip), INP vpaddd (INP, SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) - DO_4ROUNDS _XFER + 0*32 + DO_4ROUNDS (_XFER + 0*32) leaq K256+1*32(%rip), INP vpaddd (INP, SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) - DO_4ROUNDS _XFER + 1*32 + DO_4ROUNDS (_XFER + 1*32) add $2*32, SRND vmovdqa X2, X0 @@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) xor SRND, SRND .align 16 .Lloop3: - DO_4ROUNDS _XFER + 0*32 + 16 - DO_4ROUNDS _XFER + 1*32 + 16 + DO_4ROUNDS (_XFER + 0*32 + 16) + DO_4ROUNDS (_XFER + 1*32 + 16) add $2*32, SRND cmp $4*4*32, SRND jb .Lloop3 diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 51cc9c7cb9bdc0..94941c5a10ac10 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -150,7 +150,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline); #endif /* - * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. + * Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL. */ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr) { diff --git a/arch/x86/entry/vdso/vgetrandom.c b/arch/x86/entry/vdso/vgetrandom.c index 52d3c7faae2e3c..430862b8977c78 100644 --- a/arch/x86/entry/vdso/vgetrandom.c +++ b/arch/x86/entry/vdso/vgetrandom.c @@ -6,8 +6,6 @@ #include "../../../../lib/vdso/getrandom.c" -ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len); - ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) { return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 6d83ceb7f1bada..b8fed8b8b9ccdb 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -38,6 +38,9 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page) } #undef EMIT_VVAR +DEFINE_VVAR(struct vdso_data, _vdso_data); +DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data); + unsigned int vclocks_used __read_mostly; #if defined(CONFIG_X86_64) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index be01823b1bb453..65ab6460aed4d7 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "perf_event.h" @@ -2816,6 +2818,46 @@ static unsigned long get_segment_base(unsigned int segment) return get_desc_base(desc); } +#ifdef CONFIG_UPROBES +/* + * Heuristic-based check if uprobe is installed at the function entry. + * + * Under assumption of user code being compiled with frame pointers, + * `push %rbp/%ebp` is a good indicator that we indeed are. + * + * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern. + * If we get this wrong, captured stack trace might have one extra bogus + * entry, but the rest of stack trace will still be meaningful. + */ +static bool is_uprobe_at_func_entry(struct pt_regs *regs) +{ + struct arch_uprobe *auprobe; + + if (!current->utask) + return false; + + auprobe = current->utask->auprobe; + if (!auprobe) + return false; + + /* push %rbp/%ebp */ + if (auprobe->insn[0] == 0x55) + return true; + + /* endbr64 (64-bit only) */ + if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn)) + return true; + + return false; +} + +#else +static bool is_uprobe_at_func_entry(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_UPROBES */ + #ifdef CONFIG_IA32_EMULATION #include @@ -2827,6 +2869,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const struct stack_frame_ia32 __user *fp; + u32 ret_addr; if (user_64bit_mode(regs)) return 0; @@ -2836,6 +2879,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent fp = compat_ptr(ss_base + regs->bp); pagefault_disable(); + + /* see perf_callchain_user() below for why we do this */ + if (is_uprobe_at_func_entry(regs) && + !get_user(ret_addr, (const u32 __user *)regs->sp)) + perf_callchain_store(entry, ret_addr); + while (entry->nr < entry->max_stack) { if (!valid_user_frame(fp, sizeof(frame))) break; @@ -2864,6 +2913,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs { struct stack_frame frame; const struct stack_frame __user *fp; + unsigned long ret_addr; if (perf_guest_state()) { /* TODO: We don't support guest os callchain now */ @@ -2887,6 +2937,19 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs return; pagefault_disable(); + + /* + * If we are called from uprobe handler, and we are indeed at the very + * entry to user function (which is normally a `push %rbp` instruction, + * under assumption of application being compiled with frame pointers), + * we should read return address from *regs->sp before proceeding + * to follow frame pointers, otherwise we'll skip immediate caller + * as %rbp is not yet setup. + */ + if (is_uprobe_at_func_entry(regs) && + !get_user(ret_addr, (const unsigned long __user *)regs->sp)) + perf_callchain_store(entry, ret_addr); + while (entry->nr < entry->max_stack) { if (!valid_user_frame(fp, sizeof(frame))) break; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 974e917e65b24a..8f78b0c900ef4b 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -557,9 +557,6 @@ static int bts_event_init(struct perf_event *event) * disabled, so disallow intel_bts driver for unprivileged * users on paranoid systems since it provides trace data * to the user in a zero-copy fashion. - * - * Note that the default paranoia setting permits unprivileged - * users to profile the kernel. */ if (event->attr.exclude_kernel) { ret = perf_allow_kernel(&event->attr); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9e519d8a810a68..d879478db3f572 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); } - if (needs_branch_stack(event) && is_sampling_event(event)) - event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + if (needs_branch_stack(event)) { + /* Avoid branch stack setup for counting events in SAMPLE READ */ + if (is_sampling_event(event) || + !(event->attr.sample_type & PERF_SAMPLE_READ)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + } if (branch_sample_counters(event)) { struct perf_event *leader, *sibling; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9f116dfc472845..ae4ec16156bb0f 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -128,10 +128,6 @@ static ssize_t __cstate_##_var##_show(struct device *dev, \ static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __cstate_##_var##_show, NULL) -static ssize_t cstate_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, - char *buf); - /* Model -> events mapping */ struct cstate_model { unsigned long core_events; @@ -206,22 +202,9 @@ static struct attribute_group cstate_format_attr_group = { .attrs = cstate_format_attrs, }; -static cpumask_t cstate_core_cpu_mask; -static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); - -static struct attribute *cstate_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static struct attribute_group cpumask_attr_group = { - .attrs = cstate_cpumask_attrs, -}; - static const struct attribute_group *cstate_attr_groups[] = { &cstate_events_attr_group, &cstate_format_attr_group, - &cpumask_attr_group, NULL, }; @@ -269,8 +252,6 @@ static struct perf_msr pkg_msr[] = { [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, }; -static cpumask_t cstate_pkg_cpu_mask; - /* cstate_module PMU */ static struct pmu cstate_module_pmu; static bool has_cstate_module; @@ -291,28 +272,9 @@ static struct perf_msr module_msr[] = { [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, }; -static cpumask_t cstate_module_cpu_mask; - -static ssize_t cstate_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct pmu *pmu = dev_get_drvdata(dev); - - if (pmu == &cstate_core_pmu) - return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); - else if (pmu == &cstate_pkg_pmu) - return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); - else if (pmu == &cstate_module_pmu) - return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); - else - return 0; -} - static int cstate_pmu_event_init(struct perf_event *event) { u64 cfg = event->attr.config; - int cpu; if (event->attr.type != event->pmu->type) return -ENOENT; @@ -331,20 +293,13 @@ static int cstate_pmu_event_init(struct perf_event *event) if (!(core_msr_mask & (1 << cfg))) return -EINVAL; event->hw.event_base = core_msr[cfg].msr; - cpu = cpumask_any_and(&cstate_core_cpu_mask, - topology_sibling_cpumask(event->cpu)); } else if (event->pmu == &cstate_pkg_pmu) { if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) return -EINVAL; cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); if (!(pkg_msr_mask & (1 << cfg))) return -EINVAL; - - event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; - event->hw.event_base = pkg_msr[cfg].msr; - cpu = cpumask_any_and(&cstate_pkg_cpu_mask, - topology_die_cpumask(event->cpu)); } else if (event->pmu == &cstate_module_pmu) { if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) return -EINVAL; @@ -352,16 +307,10 @@ static int cstate_pmu_event_init(struct perf_event *event) if (!(module_msr_mask & (1 << cfg))) return -EINVAL; event->hw.event_base = module_msr[cfg].msr; - cpu = cpumask_any_and(&cstate_module_cpu_mask, - topology_cluster_cpumask(event->cpu)); } else { return -ENOENT; } - if (cpu >= nr_cpu_ids) - return -ENODEV; - - event->cpu = cpu; event->hw.config = cfg; event->hw.idx = -1; return 0; @@ -412,84 +361,6 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode) return 0; } -/* - * Check if exiting cpu is the designated reader. If so migrate the - * events when there is a valid target available - */ -static int cstate_cpu_exit(unsigned int cpu) -{ - unsigned int target; - - if (has_cstate_core && - cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) { - - target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); - /* Migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &cstate_core_cpu_mask); - perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); - } - } - - if (has_cstate_pkg && - cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) { - - target = cpumask_any_but(topology_die_cpumask(cpu), cpu); - /* Migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &cstate_pkg_cpu_mask); - perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); - } - } - - if (has_cstate_module && - cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { - - target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); - /* Migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &cstate_module_cpu_mask); - perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); - } - } - return 0; -} - -static int cstate_cpu_init(unsigned int cpu) -{ - unsigned int target; - - /* - * If this is the first online thread of that core, set it in - * the core cpu mask as the designated reader. - */ - target = cpumask_any_and(&cstate_core_cpu_mask, - topology_sibling_cpumask(cpu)); - - if (has_cstate_core && target >= nr_cpu_ids) - cpumask_set_cpu(cpu, &cstate_core_cpu_mask); - - /* - * If this is the first online thread of that package, set it - * in the package cpu mask as the designated reader. - */ - target = cpumask_any_and(&cstate_pkg_cpu_mask, - topology_die_cpumask(cpu)); - if (has_cstate_pkg && target >= nr_cpu_ids) - cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); - - /* - * If this is the first online thread of that cluster, set it - * in the cluster cpu mask as the designated reader. - */ - target = cpumask_any_and(&cstate_module_cpu_mask, - topology_cluster_cpumask(cpu)); - if (has_cstate_module && target >= nr_cpu_ids) - cpumask_set_cpu(cpu, &cstate_module_cpu_mask); - - return 0; -} - static const struct attribute_group *core_attr_update[] = { &group_cstate_core_c1, &group_cstate_core_c3, @@ -526,6 +397,7 @@ static struct pmu cstate_core_pmu = { .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .scope = PERF_PMU_SCOPE_CORE, .module = THIS_MODULE, }; @@ -541,6 +413,7 @@ static struct pmu cstate_pkg_pmu = { .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .scope = PERF_PMU_SCOPE_PKG, .module = THIS_MODULE, }; @@ -556,6 +429,7 @@ static struct pmu cstate_module_pmu = { .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .scope = PERF_PMU_SCOPE_CLUSTER, .module = THIS_MODULE, }; @@ -810,9 +684,6 @@ static int __init cstate_probe(const struct cstate_model *cm) static inline void cstate_cleanup(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); - if (has_cstate_core) perf_pmu_unregister(&cstate_core_pmu); @@ -827,11 +698,6 @@ static int __init cstate_init(void) { int err; - cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, - "perf/x86/cstate:starting", cstate_cpu_init, NULL); - cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, - "perf/x86/cstate:online", NULL, cstate_cpu_exit); - if (has_cstate_core) { err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); if (err) { @@ -844,6 +710,8 @@ static int __init cstate_init(void) if (has_cstate_pkg) { if (topology_max_dies_per_package() > 1) { + /* CLX-AP is multi-die and the cstate is die-scope */ + cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE; err = perf_pmu_register(&cstate_pkg_pmu, "cstate_die", -1); } else { diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index b4aa8daa47738f..fd4670a6694e77 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -416,7 +416,7 @@ static bool pt_event_valid(struct perf_event *event) static void pt_config_start(struct perf_event *event) { struct pt *pt = this_cpu_ptr(&pt_ctx); - u64 ctl = event->hw.config; + u64 ctl = event->hw.aux_config; ctl |= RTIT_CTL_TRACEEN; if (READ_ONCE(pt->vmx_on)) @@ -424,7 +424,7 @@ static void pt_config_start(struct perf_event *event) else wrmsrl(MSR_IA32_RTIT_CTL, ctl); - WRITE_ONCE(event->hw.config, ctl); + WRITE_ONCE(event->hw.aux_config, ctl); } /* Address ranges and their corresponding msr configuration registers */ @@ -503,7 +503,7 @@ static void pt_config(struct perf_event *event) u64 reg; /* First round: clear STATUS, in particular the PSB byte counter. */ - if (!event->hw.config) { + if (!event->hw.aux_config) { perf_event_itrace_started(event); wrmsrl(MSR_IA32_RTIT_STATUS, 0); } @@ -533,14 +533,14 @@ static void pt_config(struct perf_event *event) reg |= (event->attr.config & PT_CONFIG_MASK); - event->hw.config = reg; + event->hw.aux_config = reg; pt_config_start(event); } static void pt_config_stop(struct perf_event *event) { struct pt *pt = this_cpu_ptr(&pt_ctx); - u64 ctl = READ_ONCE(event->hw.config); + u64 ctl = READ_ONCE(event->hw.aux_config); /* may be already stopped by a PMI */ if (!(ctl & RTIT_CTL_TRACEEN)) @@ -550,7 +550,7 @@ static void pt_config_stop(struct perf_event *event) if (!READ_ONCE(pt->vmx_on)) wrmsrl(MSR_IA32_RTIT_CTL, ctl); - WRITE_ONCE(event->hw.config, ctl); + WRITE_ONCE(event->hw.aux_config, ctl); /* * A wrmsr that disables trace generation serializes other PT @@ -1557,7 +1557,7 @@ void intel_pt_handle_vmx(int on) /* Turn PTs back on */ if (!on && event) - wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); + wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config); local_irq_restore(flags); } @@ -1606,6 +1606,7 @@ static void pt_event_stop(struct perf_event *event, int mode) * see comment in intel_pt_interrupt(). */ WRITE_ONCE(pt->handle_nmi, 0); + barrier(); pt_config_stop(event); @@ -1657,11 +1658,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, return 0; /* - * Here, handle_nmi tells us if the tracing is on + * There is no PT interrupt in this mode, so stop the trace and it will + * remain stopped while the buffer is copied. */ - if (READ_ONCE(pt->handle_nmi)) - pt_config_stop(event); - + pt_config_stop(event); pt_read_offset(buf); pt_update_head(pt); @@ -1673,11 +1673,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, ret = perf_output_copy_aux(&pt->handle, handle, from, to); /* - * If the tracing was on when we turned up, restart it. - * Compiler barrier not needed as we couldn't have been - * preempted by anything that touches pt->handle_nmi. + * Here, handle_nmi tells us if the tracing was on. + * If the tracing was on, restart it. */ - if (pt->handle_nmi) + if (READ_ONCE(pt->handle_nmi)) pt_config_start(event); return ret; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 64ca8625eb5809..d98fac56768469 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1816,6 +1816,11 @@ static const struct intel_uncore_init_fun mtl_uncore_init __initconst = { .mmio_init = adl_uncore_mmio_init, }; +static const struct intel_uncore_init_fun lnl_uncore_init __initconst = { + .cpu_init = lnl_uncore_cpu_init, + .mmio_init = lnl_uncore_mmio_init, +}; + static const struct intel_uncore_init_fun icx_uncore_init __initconst = { .cpu_init = icx_uncore_cpu_init, .pci_init = icx_uncore_pci_init, @@ -1893,6 +1898,10 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_uncore_init), X86_MATCH_VFM(INTEL_METEORLAKE, &mtl_uncore_init), X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_uncore_init), + X86_MATCH_VFM(INTEL_ARROWLAKE, &mtl_uncore_init), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &mtl_uncore_init), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init), diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 027ef292c60223..79ff32e13dcc81 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -611,10 +611,12 @@ void skl_uncore_cpu_init(void); void icl_uncore_cpu_init(void); void tgl_uncore_cpu_init(void); void adl_uncore_cpu_init(void); +void lnl_uncore_cpu_init(void); void mtl_uncore_cpu_init(void); void tgl_uncore_mmio_init(void); void tgl_l_uncore_mmio_init(void); void adl_uncore_mmio_init(void); +void lnl_uncore_mmio_init(void); int snb_pci2phy_map_init(int devid); /* uncore_snbep.c */ diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 9462fd9f3b7abc..3934e1e4e3b166 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -252,6 +252,7 @@ DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29"); +DEFINE_UNCORE_FORMAT_ATTR(threshold2, threshold, "config:24-31"); /* Sandy Bridge uncore support */ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) @@ -746,6 +747,34 @@ void mtl_uncore_cpu_init(void) uncore_msr_uncores = mtl_msr_uncores; } +static struct intel_uncore_type *lnl_msr_uncores[] = { + &mtl_uncore_cbox, + &mtl_uncore_arb, + NULL +}; + +#define LNL_UNC_MSR_GLOBAL_CTL 0x240e + +static void lnl_uncore_msr_init_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) + wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); +} + +static struct intel_uncore_ops lnl_uncore_msr_ops = { + .init_box = lnl_uncore_msr_init_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +void lnl_uncore_cpu_init(void) +{ + mtl_uncore_cbox.num_boxes = 4; + mtl_uncore_cbox.ops = &lnl_uncore_msr_ops; + uncore_msr_uncores = lnl_msr_uncores; +} + enum { SNB_PCI_UNCORE_IMC, }; @@ -1475,39 +1504,45 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void) ids++; } + /* Just try to grab 00:00.0 device */ + if (!mc_dev) + mc_dev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + return mc_dev; } #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 -static void __uncore_imc_init_box(struct intel_uncore_box *box, - unsigned int base_offset) +static void +uncore_get_box_mmio_addr(struct intel_uncore_box *box, + unsigned int base_offset, + int bar_offset, int step) { struct pci_dev *pdev = tgl_uncore_get_mc_dev(); struct intel_uncore_pmu *pmu = box->pmu; struct intel_uncore_type *type = pmu->type; resource_size_t addr; - u32 mch_bar; + u32 bar; if (!pdev) { pr_warn("perf uncore: Cannot find matched IMC device.\n"); return; } - pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar); - /* MCHBAR is disabled */ - if (!(mch_bar & BIT(0))) { - pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); + pci_read_config_dword(pdev, bar_offset, &bar); + if (!(bar & BIT(0))) { + pr_warn("perf uncore: BAR 0x%x is disabled. Failed to map %s counters.\n", + bar_offset, type->name); pci_dev_put(pdev); return; } - mch_bar &= ~BIT(0); - addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx); + bar &= ~BIT(0); + addr = (resource_size_t)(bar + step * pmu->pmu_idx); #ifdef CONFIG_PHYS_ADDR_T_64BIT - pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar); - addr |= ((resource_size_t)mch_bar << 32); + pci_read_config_dword(pdev, bar_offset + 4, &bar); + addr |= ((resource_size_t)bar << 32); #endif addr += base_offset; @@ -1518,6 +1553,14 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box, pci_dev_put(pdev); } +static void __uncore_imc_init_box(struct intel_uncore_box *box, + unsigned int base_offset) +{ + uncore_get_box_mmio_addr(box, base_offset, + SNB_UNCORE_PCI_IMC_BAR_OFFSET, + TGL_UNCORE_MMIO_IMC_MEM_OFFSET); +} + static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) { __uncore_imc_init_box(box, 0); @@ -1612,14 +1655,17 @@ static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box) writel(0, box->io_addr + uncore_mmio_box_ctl(box)); } +#define MMIO_UNCORE_COMMON_OPS() \ + .exit_box = uncore_mmio_exit_box, \ + .disable_box = adl_uncore_mmio_disable_box, \ + .enable_box = adl_uncore_mmio_enable_box, \ + .disable_event = intel_generic_uncore_mmio_disable_event, \ + .enable_event = intel_generic_uncore_mmio_enable_event, \ + .read_counter = uncore_mmio_read_counter, + static struct intel_uncore_ops adl_uncore_mmio_ops = { .init_box = adl_uncore_imc_init_box, - .exit_box = uncore_mmio_exit_box, - .disable_box = adl_uncore_mmio_disable_box, - .enable_box = adl_uncore_mmio_enable_box, - .disable_event = intel_generic_uncore_mmio_disable_event, - .enable_event = intel_generic_uncore_mmio_enable_event, - .read_counter = uncore_mmio_read_counter, + MMIO_UNCORE_COMMON_OPS() }; #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00 @@ -1703,3 +1749,108 @@ void adl_uncore_mmio_init(void) } /* end of Alder Lake MMIO uncore support */ + +/* Lunar Lake MMIO uncore support */ +#define LNL_UNCORE_PCI_SAFBAR_OFFSET 0x68 +#define LNL_UNCORE_MAP_SIZE 0x1000 +#define LNL_UNCORE_SNCU_BASE 0xE4B000 +#define LNL_UNCORE_SNCU_CTR 0x390 +#define LNL_UNCORE_SNCU_CTRL 0x398 +#define LNL_UNCORE_SNCU_BOX_CTL 0x380 +#define LNL_UNCORE_GLOBAL_CTL 0x700 +#define LNL_UNCORE_HBO_BASE 0xE54000 +#define LNL_UNCORE_HBO_OFFSET -4096 +#define LNL_UNCORE_HBO_CTR 0x570 +#define LNL_UNCORE_HBO_CTRL 0x550 +#define LNL_UNCORE_HBO_BOX_CTL 0x548 + +#define LNL_UNC_CTL_THRESHOLD 0xff000000 +#define LNL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ + SNB_UNC_CTL_UMASK_MASK | \ + SNB_UNC_CTL_EDGE_DET | \ + SNB_UNC_CTL_INVERT | \ + LNL_UNC_CTL_THRESHOLD) + +static struct attribute *lnl_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_threshold2.attr, + NULL +}; + +static const struct attribute_group lnl_uncore_format_group = { + .name = "format", + .attrs = lnl_uncore_formats_attr, +}; + +static void lnl_uncore_hbo_init_box(struct intel_uncore_box *box) +{ + uncore_get_box_mmio_addr(box, LNL_UNCORE_HBO_BASE, + LNL_UNCORE_PCI_SAFBAR_OFFSET, + LNL_UNCORE_HBO_OFFSET); +} + +static struct intel_uncore_ops lnl_uncore_hbo_ops = { + .init_box = lnl_uncore_hbo_init_box, + MMIO_UNCORE_COMMON_OPS() +}; + +static struct intel_uncore_type lnl_uncore_hbo = { + .name = "hbo", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 64, + .perf_ctr = LNL_UNCORE_HBO_CTR, + .event_ctl = LNL_UNCORE_HBO_CTRL, + .event_mask = LNL_UNC_RAW_EVENT_MASK, + .box_ctl = LNL_UNCORE_HBO_BOX_CTL, + .mmio_map_size = LNL_UNCORE_MAP_SIZE, + .ops = &lnl_uncore_hbo_ops, + .format_group = &lnl_uncore_format_group, +}; + +static void lnl_uncore_sncu_init_box(struct intel_uncore_box *box) +{ + uncore_get_box_mmio_addr(box, LNL_UNCORE_SNCU_BASE, + LNL_UNCORE_PCI_SAFBAR_OFFSET, + 0); + + if (box->io_addr) + writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + LNL_UNCORE_GLOBAL_CTL); +} + +static struct intel_uncore_ops lnl_uncore_sncu_ops = { + .init_box = lnl_uncore_sncu_init_box, + MMIO_UNCORE_COMMON_OPS() +}; + +static struct intel_uncore_type lnl_uncore_sncu = { + .name = "sncu", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 64, + .perf_ctr = LNL_UNCORE_SNCU_CTR, + .event_ctl = LNL_UNCORE_SNCU_CTRL, + .event_mask = LNL_UNC_RAW_EVENT_MASK, + .box_ctl = LNL_UNCORE_SNCU_BOX_CTL, + .mmio_map_size = LNL_UNCORE_MAP_SIZE, + .ops = &lnl_uncore_sncu_ops, + .format_group = &lnl_uncore_format_group, +}; + +static struct intel_uncore_type *lnl_mmio_uncores[] = { + &adl_uncore_imc, + &lnl_uncore_hbo, + &lnl_uncore_sncu, + &adl_uncore_imc_free_running, + NULL +}; + +void lnl_uncore_mmio_init(void) +{ + uncore_mmio_uncores = lnl_mmio_uncores; +} + +/* end of Lunar Lake MMIO uncore support */ diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index b985ca79cf97ba..a481a939862e54 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = { \ .event_str = str, \ }; +/* + * RAPL Package energy counter scope: + * 1. AMD/HYGON platforms have a per-PKG package energy counter + * 2. For Intel platforms + * 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope + * 2.2. Other Intel platforms are single die systems so the scope can be + * considered as either pkg-scope or die-scope, and we are considering + * them as die-scope. + */ +#define rapl_pmu_is_pkg_scope() \ + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \ + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + struct rapl_pmu { raw_spinlock_t lock; int n_active; @@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask; static u64 rapl_timer_ms; static struct perf_msr *rapl_msrs; +/* + * Helper functions to get the correct topology macros according to the + * RAPL PMU scope. + */ +static inline unsigned int get_rapl_pmu_idx(int cpu) +{ + return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) : + topology_logical_die_id(cpu); +} + +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu) +{ + return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) : + topology_die_cpumask(cpu); +} + static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) { - unsigned int rapl_pmu_idx = topology_logical_die_id(cpu); + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu); /* * The unsigned check also catches the '-1' return value for non @@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu) pmu->cpu = -1; /* Find a new cpu to collect rapl events */ - target = cpumask_any_but(topology_die_cpumask(cpu), cpu); + target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu); /* Migrate rapl events to the new target */ if (target < nr_cpu_ids) { @@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu) static int rapl_cpu_online(unsigned int cpu) { + s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu); + if (rapl_pmu_idx < 0) { + pr_err("topology_logical_(package/die)_id() returned a negative value"); + return -EINVAL; + } struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; @@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu) pmu->timer_interval = ms_to_ktime(rapl_timer_ms); rapl_hrtimer_init(pmu); - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu; + rapl_pmus->pmus[rapl_pmu_idx] = pmu; } /* * Check if there is an online cpu in the package which collects rapl * events already. */ - target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu)); + target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu)); if (target < nr_cpu_ids) return 0; @@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = { static int __init init_rapl_pmus(void) { - int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package(); + int nr_rapl_pmu = topology_max_packages(); + + if (!rapl_pmu_is_pkg_scope()) + nr_rapl_pmu *= topology_max_dies_per_package(); rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL); if (!rapl_pmus) diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index b4a851d27c7cb8..60fc3ed728304c 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -321,9 +321,9 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) vmsa->efer = native_read_msr(MSR_EFER); - asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); - asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); - asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); + vmsa->cr4 = native_read_cr4(); + vmsa->cr3 = __native_read_cr3(); + vmsa->cr0 = native_read_cr0(); vmsa->xcr0 = 1; vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index a192bdea69e2e0..6c23d1661b1739 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -11,3 +11,4 @@ generated-y += xen-hypercalls.h generic-y += early_ioremap.h generic-y += mcs_spinlock.h +generic-y += mmzone.h diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 21bc53f5ed0c81..5ab1a4598d00bc 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -174,6 +174,14 @@ void acpi_generic_reduced_hw_init(void); void x86_default_set_root_pointer(u64 addr); u64 x86_default_get_root_pointer(void); +#ifdef CONFIG_XEN_PV +/* A Xen PV domain needs a special acpi_os_ioremap() handling. */ +extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, + acpi_size size); +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +#define acpi_os_ioremap acpi_os_ioremap +#endif + #else /* !CONFIG_ACPI */ #define acpi_lapic 0 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 9327eb00e96d09..f21ff19326994e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -18,6 +18,11 @@ #define ARCH_APICTIMER_STOPS_ON_C3 1 +/* Macros for apic_extnmi which controls external NMI masking */ +#define APIC_EXTNMI_BSP 0 /* Default */ +#define APIC_EXTNMI_ALL 1 +#define APIC_EXTNMI_NONE 2 + /* * Debugging macros */ @@ -25,22 +30,22 @@ #define APIC_VERBOSE 1 #define APIC_DEBUG 2 -/* Macros for apic_extnmi which controls external NMI masking */ -#define APIC_EXTNMI_BSP 0 /* Default */ -#define APIC_EXTNMI_ALL 1 -#define APIC_EXTNMI_NONE 2 - /* - * Define the default level of output to be very little - * This can be turned up by using apic=verbose for more - * information and apic=debug for _lots_ of information. - * apic_verbosity is defined in apic.c + * Define the default level of output to be very little This can be turned + * up by using apic=verbose for more information and apic=debug for _lots_ + * of information. apic_verbosity is defined in apic.c */ -#define apic_printk(v, s, a...) do { \ - if ((v) <= apic_verbosity) \ - printk(s, ##a); \ - } while (0) - +#define apic_printk(v, s, a...) \ +do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ +} while (0) + +#define apic_pr_verbose(s, a...) apic_printk(APIC_VERBOSE, KERN_INFO s, ##a) +#define apic_pr_debug(s, a...) apic_printk(APIC_DEBUG, KERN_DEBUG s, ##a) +#define apic_pr_debug_cont(s, a...) apic_printk(APIC_DEBUG, KERN_CONT s, ##a) +/* Unconditional debug prints for code which is guarded by apic_verbosity already */ +#define apic_dbg(s, a...) printk(KERN_DEBUG s, ##a) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) extern void x86_32_probe_apic(void); @@ -122,8 +127,6 @@ static inline bool apic_is_x2apic_enabled(void) extern void enable_IR_x2apic(void); -extern int get_physical_broadcast(void); - extern int lapic_get_maxlvt(void); extern void clear_local_APIC(void); extern void disconnect_bsp_APIC(int virt_wire_setup); @@ -345,20 +348,12 @@ extern struct apic *apic; * APIC drivers are probed based on how they are listed in the .apicdrivers * section. So the order is important and enforced by the ordering * of different apic driver files in the Makefile. - * - * For the files having two apic drivers, we use apic_drivers() - * to enforce the order with in them. */ #define apic_driver(sym) \ static const struct apic *__apicdrivers_##sym __used \ __aligned(sizeof(struct apic *)) \ __section(".apicdrivers") = { &sym } -#define apic_drivers(sym1, sym2) \ - static struct apic *__apicdrivers_##sym1##sym2[2] __used \ - __aligned(sizeof(struct apic *)) \ - __section(".apicdrivers") = { &sym1, &sym2 } - extern struct apic *__apicdrivers[], *__apicdrivers_end[]; /* @@ -484,7 +479,6 @@ static inline u64 apic_icr_read(void) { return 0; } static inline void apic_icr_write(u32 low, u32 high) { } static inline void apic_wait_icr_idle(void) { } static inline u32 safe_apic_wait_icr_idle(void) { return 0; } -static inline void apic_set_eoi_cb(void (*eoi)(void)) {} static inline void apic_native_eoi(void) { WARN_ON_ONCE(1); } static inline void apic_setup_apic_calls(void) { } @@ -512,8 +506,6 @@ static inline bool is_vector_pending(unsigned int vector) #define TRAMPOLINE_PHYS_LOW 0x467 #define TRAMPOLINE_PHYS_HIGH 0x469 -extern void generic_bigsmp_probe(void); - #ifdef CONFIG_X86_LOCAL_APIC #include @@ -536,8 +528,6 @@ static inline int default_acpi_madt_oem_check(char *a, char *b) { return 0; } static inline void x86_64_probe_apic(void) { } #endif -extern int default_apic_id_valid(u32 apicid); - extern u32 apic_default_calc_apicid(unsigned int cpu); extern u32 apic_flat_calc_apicid(unsigned int cpu); diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 8db2ec4d6cdac7..1f650b4dde509b 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -163,20 +163,18 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v) } #define arch_atomic64_dec_return arch_atomic64_dec_return -static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v) +static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) { __alternative_atomic64(add, add_return, ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_NO_INPUT_CLOBBER("memory")); - return i; } -static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v) +static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v) { __alternative_atomic64(sub, sub_return, ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_NO_INPUT_CLOBBER("memory")); - return i; } static __always_inline void arch_atomic64_inc(atomic64_t *v) diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index a3ec87d198ac83..806649c7f23dc6 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -13,6 +13,18 @@ #define INSN_UD2 0x0b0f #define LEN_UD2 2 +/* + * In clang we have UD1s reporting UBSAN failures on X86, 64 and 32bit. + */ +#define INSN_ASOP 0x67 +#define OPCODE_ESCAPE 0x0f +#define SECOND_BYTE_OPCODE_UD1 0xb9 +#define SECOND_BYTE_OPCODE_UD2 0x0b + +#define BUG_NONE 0xffff +#define BUG_UD1 0xfffe +#define BUG_UD2 0xfffd + #ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index 3831f612e89c7f..e4121d9aa9e114 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -192,26 +192,6 @@ #define X86_MATCH_VENDOR_FAM(vendor, family, data) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data) -/** - * X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model - * @model: The model name without the INTEL_FAM6_ prefix or ANY - * The model name is expanded to INTEL_FAM6_@model internally - * @data: Driver specific data or NULL. The internal storage - * format is unsigned long. The supplied value, pointer - * etc. is casted to unsigned long internally. - * - * The vendor is set to INTEL, the family to 6 and all other missing - * arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards. - * - * See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information. - */ -#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \ - X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data) - -#define X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(model, steppings, data) \ - X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - steppings, X86_FEATURE_ANY, data) - /** * X86_MATCH_VFM - Match encoded vendor/family/model * @vfm: Encoded 8-bits each for vendor, family, model diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h index 6b122a31da065a..ca4243318aadc4 100644 --- a/arch/x86/include/asm/cpuid.h +++ b/arch/x86/include/asm/cpuid.h @@ -179,6 +179,7 @@ static __always_inline bool cpuid_function_is_indexed(u32 function) case 0x1d: case 0x1e: case 0x1f: + case 0x24: case 0x8000001d: return true; } @@ -196,7 +197,12 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) for_each_possible_hypervisor_cpuid_base(base) { cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); - if (!memcmp(sig, signature, 12) && + /* + * This must not compile to "call memcmp" because it's called + * from PVH early boot code before instrumentation is set up + * and memcmp() itself may be instrumented. + */ + if (!__builtin_memcmp(sig, signature, 12) && (leaves == 0 || ((eax - base) >= leaves))) return base; } diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index fb2809b20b0ac4..77d20555e04de5 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -8,6 +8,7 @@ #include #include #include +#include /* Check that the stack and regs on entry from user mode are sane. */ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) @@ -44,8 +45,7 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) } #define arch_enter_from_user_mode arch_enter_from_user_mode -static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - unsigned long ti_work) +static inline void arch_exit_work(unsigned long ti_work) { if (ti_work & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); @@ -56,6 +56,15 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, fpregs_assert_state_consistent(); if (unlikely(ti_work & _TIF_NEED_FPU_LOAD)) switch_fpu_return(); +} + +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + if (IS_ENABLED(CONFIG_X86_DEBUG_FPU) || unlikely(ti_work)) + arch_exit_work(ti_work); + + fred_update_rsp0(); #ifdef CONFIG_COMPAT /* diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h index eeed395c31777b..a0e0c6b50155f6 100644 --- a/arch/x86/include/asm/extable.h +++ b/arch/x86/include/asm/extable.h @@ -37,7 +37,6 @@ struct pt_regs; extern int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr); -extern int fixup_bug(struct pt_regs *regs, int trapnr); extern int ex_get_fixup_type(unsigned long ip); extern void early_fixup_exception(struct pt_regs *regs, int trapnr); diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h index 611fa41711affd..eccc75bc9c4f3d 100644 --- a/arch/x86/include/asm/fpu/signal.h +++ b/arch/x86/include/asm/fpu/signal.h @@ -29,7 +29,7 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long fpu__get_fpstate_size(void); -extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); +extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size, u32 pkru); extern void fpu__clear_user_states(struct fpu *fpu); extern bool fpu__restore_sig(void __user *buf, int ia32_frame); diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h index e86c7ba32435f5..25ca00bd70e834 100644 --- a/arch/x86/include/asm/fred.h +++ b/arch/x86/include/asm/fred.h @@ -36,6 +36,7 @@ #ifdef CONFIG_X86_FRED #include +#include #include @@ -84,13 +85,33 @@ static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int } void cpu_init_fred_exceptions(void); +void cpu_init_fred_rsps(void); void fred_complete_exception_setup(void); +DECLARE_PER_CPU(unsigned long, fred_rsp0); + +static __always_inline void fred_sync_rsp0(unsigned long rsp0) +{ + __this_cpu_write(fred_rsp0, rsp0); +} + +static __always_inline void fred_update_rsp0(void) +{ + unsigned long rsp0 = (unsigned long) task_stack_page(current) + THREAD_SIZE; + + if (cpu_feature_enabled(X86_FEATURE_FRED) && (__this_cpu_read(fred_rsp0) != rsp0)) { + wrmsrns(MSR_IA32_FRED_RSP0, rsp0); + __this_cpu_write(fred_rsp0, rsp0); + } +} #else /* CONFIG_X86_FRED */ static __always_inline unsigned long fred_event_data(struct pt_regs *regs) { return 0; } static inline void cpu_init_fred_exceptions(void) { } +static inline void cpu_init_fred_rsps(void) { } static inline void fred_complete_exception_setup(void) { } -static __always_inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { } +static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) { } +static inline void fred_sync_rsp0(unsigned long rsp0) { } +static inline void fred_update_rsp0(void) { } #endif /* CONFIG_X86_FRED */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 0152a81d9b4a2e..b4d719de2c8459 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_FTRACE_H #define _ASM_X86_FTRACE_H +#include + #ifdef CONFIG_FUNCTION_TRACER #ifndef CC_USING_FENTRY # error Compiler does not support fentry? diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index c67fa6ad098aae..6ffa8b75f4cd33 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -69,7 +69,11 @@ extern u64 arch_irq_stat(void); #define local_softirq_pending_ref pcpu_hot.softirq_pending #if IS_ENABLED(CONFIG_KVM_INTEL) -static inline void kvm_set_cpu_l1tf_flush_l1d(void) +/* + * This function is called from noinstr interrupt contexts + * and must be inlined to not get instrumentation. + */ +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); } @@ -84,7 +88,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void) return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); } #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ -static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { } #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ #endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index d4f24499b256c8..ad5c68f0509d4d 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -212,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \ irqentry_state_t state = irqentry_enter(regs); \ u32 vector = (u32)(u8)error_code; \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ run_irq_on_irqstack_cond(__##func, regs, vector); \ instrumentation_end(); \ irqentry_exit(regs, state); \ @@ -250,7 +250,6 @@ static void __##func(struct pt_regs *regs); \ \ static __always_inline void instr_##func(struct pt_regs *regs) \ { \ - kvm_set_cpu_l1tf_flush_l1d(); \ run_sysvec_on_irqstack_cond(__##func, regs); \ } \ \ @@ -258,6 +257,7 @@ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ instr_##func (regs); \ instrumentation_end(); \ @@ -288,7 +288,6 @@ static __always_inline void __##func(struct pt_regs *regs); \ static __always_inline void instr_##func(struct pt_regs *regs) \ { \ __irq_enter_raw(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ __##func (regs); \ __irq_exit_raw(); \ } \ @@ -297,6 +296,7 @@ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ instr_##func (regs); \ instrumentation_end(); \ diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index f81a851c46dca5..1a42f829667a37 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -10,7 +10,7 @@ * that group keep the CPUID for the variants sorted by model number. * * The defined symbol names have the following form: - * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF} + * INTEL_{OPTFAMILY}_{MICROARCH}{OPTDIFF} * where: * OPTFAMILY Describes the family of CPUs that this belongs to. Default * is assumed to be "_CORE" (and should be omitted). Other values @@ -42,219 +42,145 @@ #define IFM(_fam, _model) VFM_MAKE(X86_VENDOR_INTEL, _fam, _model) -/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */ -#define INTEL_FAM6_ANY X86_MODEL_ANY -/* Wildcard match for FAM6 so X86_MATCH_VFM(ANY) works */ +/* Wildcard match so X86_MATCH_VFM(ANY) works */ #define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY) -#define INTEL_FAM6_CORE_YONAH 0x0E +#define INTEL_PENTIUM_PRO IFM(6, 0x01) + #define INTEL_CORE_YONAH IFM(6, 0x0E) -#define INTEL_FAM6_CORE2_MEROM 0x0F #define INTEL_CORE2_MEROM IFM(6, 0x0F) -#define INTEL_FAM6_CORE2_MEROM_L 0x16 #define INTEL_CORE2_MEROM_L IFM(6, 0x16) -#define INTEL_FAM6_CORE2_PENRYN 0x17 #define INTEL_CORE2_PENRYN IFM(6, 0x17) -#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D #define INTEL_CORE2_DUNNINGTON IFM(6, 0x1D) -#define INTEL_FAM6_NEHALEM 0x1E #define INTEL_NEHALEM IFM(6, 0x1E) -#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ #define INTEL_NEHALEM_G IFM(6, 0x1F) /* Auburndale / Havendale */ -#define INTEL_FAM6_NEHALEM_EP 0x1A #define INTEL_NEHALEM_EP IFM(6, 0x1A) -#define INTEL_FAM6_NEHALEM_EX 0x2E #define INTEL_NEHALEM_EX IFM(6, 0x2E) -#define INTEL_FAM6_WESTMERE 0x25 #define INTEL_WESTMERE IFM(6, 0x25) -#define INTEL_FAM6_WESTMERE_EP 0x2C #define INTEL_WESTMERE_EP IFM(6, 0x2C) -#define INTEL_FAM6_WESTMERE_EX 0x2F #define INTEL_WESTMERE_EX IFM(6, 0x2F) -#define INTEL_FAM6_SANDYBRIDGE 0x2A #define INTEL_SANDYBRIDGE IFM(6, 0x2A) -#define INTEL_FAM6_SANDYBRIDGE_X 0x2D #define INTEL_SANDYBRIDGE_X IFM(6, 0x2D) -#define INTEL_FAM6_IVYBRIDGE 0x3A #define INTEL_IVYBRIDGE IFM(6, 0x3A) -#define INTEL_FAM6_IVYBRIDGE_X 0x3E #define INTEL_IVYBRIDGE_X IFM(6, 0x3E) -#define INTEL_FAM6_HASWELL 0x3C #define INTEL_HASWELL IFM(6, 0x3C) -#define INTEL_FAM6_HASWELL_X 0x3F #define INTEL_HASWELL_X IFM(6, 0x3F) -#define INTEL_FAM6_HASWELL_L 0x45 #define INTEL_HASWELL_L IFM(6, 0x45) -#define INTEL_FAM6_HASWELL_G 0x46 #define INTEL_HASWELL_G IFM(6, 0x46) -#define INTEL_FAM6_BROADWELL 0x3D #define INTEL_BROADWELL IFM(6, 0x3D) -#define INTEL_FAM6_BROADWELL_G 0x47 #define INTEL_BROADWELL_G IFM(6, 0x47) -#define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_BROADWELL_X IFM(6, 0x4F) -#define INTEL_FAM6_BROADWELL_D 0x56 #define INTEL_BROADWELL_D IFM(6, 0x56) -#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */ #define INTEL_SKYLAKE_L IFM(6, 0x4E) /* Sky Lake */ -#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */ #define INTEL_SKYLAKE IFM(6, 0x5E) /* Sky Lake */ -#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */ #define INTEL_SKYLAKE_X IFM(6, 0x55) /* Sky Lake */ /* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */ /* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */ -#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */ #define INTEL_KABYLAKE_L IFM(6, 0x8E) /* Sky Lake */ /* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */ /* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */ /* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */ -#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */ #define INTEL_KABYLAKE IFM(6, 0x9E) /* Sky Lake */ /* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */ -#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */ #define INTEL_COMETLAKE IFM(6, 0xA5) /* Sky Lake */ -#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */ #define INTEL_COMETLAKE_L IFM(6, 0xA6) /* Sky Lake */ -#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */ #define INTEL_CANNONLAKE_L IFM(6, 0x66) /* Palm Cove */ -#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */ #define INTEL_ICELAKE_X IFM(6, 0x6A) /* Sunny Cove */ -#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */ #define INTEL_ICELAKE_D IFM(6, 0x6C) /* Sunny Cove */ -#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */ #define INTEL_ICELAKE IFM(6, 0x7D) /* Sunny Cove */ -#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */ #define INTEL_ICELAKE_L IFM(6, 0x7E) /* Sunny Cove */ -#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */ #define INTEL_ICELAKE_NNPI IFM(6, 0x9D) /* Sunny Cove */ -#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */ #define INTEL_ROCKETLAKE IFM(6, 0xA7) /* Cypress Cove */ -#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */ #define INTEL_TIGERLAKE_L IFM(6, 0x8C) /* Willow Cove */ -#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */ #define INTEL_TIGERLAKE IFM(6, 0x8D) /* Willow Cove */ -#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */ #define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */ -#define INTEL_FAM6_EMERALDRAPIDS_X 0xCF #define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF) -#define INTEL_FAM6_GRANITERAPIDS_X 0xAD #define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) -#define INTEL_FAM6_GRANITERAPIDS_D 0xAE #define INTEL_GRANITERAPIDS_D IFM(6, 0xAE) /* "Hybrid" Processors (P-Core/E-Core) */ -#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */ #define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */ -#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ #define INTEL_ALDERLAKE IFM(6, 0x97) /* Golden Cove / Gracemont */ -#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ #define INTEL_ALDERLAKE_L IFM(6, 0x9A) /* Golden Cove / Gracemont */ -#define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */ #define INTEL_RAPTORLAKE IFM(6, 0xB7) /* Raptor Cove / Enhanced Gracemont */ -#define INTEL_FAM6_RAPTORLAKE_P 0xBA #define INTEL_RAPTORLAKE_P IFM(6, 0xBA) -#define INTEL_FAM6_RAPTORLAKE_S 0xBF #define INTEL_RAPTORLAKE_S IFM(6, 0xBF) -#define INTEL_FAM6_METEORLAKE 0xAC #define INTEL_METEORLAKE IFM(6, 0xAC) -#define INTEL_FAM6_METEORLAKE_L 0xAA #define INTEL_METEORLAKE_L IFM(6, 0xAA) -#define INTEL_FAM6_ARROWLAKE_H 0xC5 #define INTEL_ARROWLAKE_H IFM(6, 0xC5) -#define INTEL_FAM6_ARROWLAKE 0xC6 #define INTEL_ARROWLAKE IFM(6, 0xC6) -#define INTEL_FAM6_ARROWLAKE_U 0xB5 #define INTEL_ARROWLAKE_U IFM(6, 0xB5) -#define INTEL_FAM6_LUNARLAKE_M 0xBD #define INTEL_LUNARLAKE_M IFM(6, 0xBD) +#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) + /* "Small Core" Processors (Atom/E-Core) */ -#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ #define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */ -#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ #define INTEL_ATOM_BONNELL_MID IFM(6, 0x26) /* Silverthorne, Lincroft */ -#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */ #define INTEL_ATOM_SALTWELL IFM(6, 0x36) /* Cedarview */ -#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */ #define INTEL_ATOM_SALTWELL_MID IFM(6, 0x27) /* Penwell */ -#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ #define INTEL_ATOM_SALTWELL_TABLET IFM(6, 0x35) /* Cloverview */ -#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ #define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */ -#define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */ #define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */ -#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ #define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */ -#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ #define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */ -#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ #define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */ -#define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */ #define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */ -#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */ -#define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ #define INTEL_ATOM_GOLDMONT_D IFM(6, 0x5F) /* Denverton */ /* Note: the micro-architecture is "Goldmont Plus" */ -#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ #define INTEL_ATOM_GOLDMONT_PLUS IFM(6, 0x7A) /* Gemini Lake */ -#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ #define INTEL_ATOM_TREMONT_D IFM(6, 0x86) /* Jacobsville */ -#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ #define INTEL_ATOM_TREMONT IFM(6, 0x96) /* Elkhart Lake */ -#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ #define INTEL_ATOM_TREMONT_L IFM(6, 0x9C) /* Jasper Lake */ -#define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */ #define INTEL_ATOM_GRACEMONT IFM(6, 0xBE) /* Alderlake N */ -#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */ #define INTEL_ATOM_CRESTMONT_X IFM(6, 0xAF) /* Sierra Forest */ -#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */ #define INTEL_ATOM_CRESTMONT IFM(6, 0xB6) /* Grand Ridge */ -#define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */ #define INTEL_ATOM_DARKMONT_X IFM(6, 0xDD) /* Clearwater Forest */ /* Xeon Phi */ -#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ #define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */ -#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ #define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */ /* Family 5 */ #define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */ #define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */ +/* Family 19 */ +#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */ + #endif /* _ASM_X86_INTEL_FAMILY_H */ diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h deleted file mode 100644 index 8537f597d20ac5..00000000000000 --- a/arch/x86/include/asm/intel_scu_ipc.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_SCU_IPC_H_ -#define _ASM_X86_INTEL_SCU_IPC_H_ - -#include - -struct device; -struct intel_scu_ipc_dev; - -/** - * struct intel_scu_ipc_data - Data used to configure SCU IPC - * @mem: Base address of SCU IPC MMIO registers - * @irq: The IRQ number used for SCU (optional) - */ -struct intel_scu_ipc_data { - struct resource mem; - int irq; -}; - -struct intel_scu_ipc_dev * -__intel_scu_ipc_register(struct device *parent, - const struct intel_scu_ipc_data *scu_data, - struct module *owner); - -#define intel_scu_ipc_register(parent, scu_data) \ - __intel_scu_ipc_register(parent, scu_data, THIS_MODULE) - -void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu); - -struct intel_scu_ipc_dev * -__devm_intel_scu_ipc_register(struct device *parent, - const struct intel_scu_ipc_data *scu_data, - struct module *owner); - -#define devm_intel_scu_ipc_register(parent, scu_data) \ - __devm_intel_scu_ipc_register(parent, scu_data, THIS_MODULE) - -struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void); -void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu); -struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev); - -int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, - u8 *data); -int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, - u8 data); -int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, - u8 *data, size_t len); -int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, - u8 *data, size_t len); - -int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, - u8 data, u8 mask); - -int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd, - int sub); -int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd, - int sub, const void *in, size_t inlen, - size_t size, void *out, size_t outlen); - -static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int cmd, - int sub, const void *in, size_t inlen, - void *out, size_t outlen) -{ - return intel_scu_ipc_dev_command_with_size(scu, cmd, sub, in, inlen, - inlen, out, outlen); -} - -#endif diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h index 8046e70dfd7cf7..43b7657febcaa3 100644 --- a/arch/x86/include/asm/intel_telemetry.h +++ b/arch/x86/include/asm/intel_telemetry.h @@ -10,7 +10,7 @@ #define TELEM_MAX_EVENTS_SRAM 28 #define TELEM_MAX_OS_ALLOCATED_EVENTS 20 -#include +#include enum telemetry_unit { TELEM_PSS = 0, diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 13aea8fc3d45fc..47051871b43618 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -18,8 +18,8 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vectors 129 ... LOCAL_TIMER_VECTOR-1 - * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts + * Vectors 129 ... FIRST_SYSTEM_VECTOR-1 : device interrupts + * Vectors FIRST_SYSTEM_VECTOR ... 255 : special interrupts * * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. * diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 68ad4f923664e2..861d080ed4c6ab 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -14,8 +14,8 @@ BUILD_BUG_ON(1) * be __static_call_return0. */ KVM_X86_OP(check_processor_compatibility) -KVM_X86_OP(hardware_enable) -KVM_X86_OP(hardware_disable) +KVM_X86_OP(enable_virtualization_cpu) +KVM_X86_OP(disable_virtualization_cpu) KVM_X86_OP(hardware_unsetup) KVM_X86_OP(has_emulated_msr) KVM_X86_OP(vcpu_after_set_cpuid) @@ -125,7 +125,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from) KVM_X86_OP_OPTIONAL(vm_move_enc_context_from) KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) -KVM_X86_OP(get_msr_feature) +KVM_X86_OP(get_feature_msr) KVM_X86_OP(check_emulate_instruction) KVM_X86_OP(apic_init_signal_blocked) KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4a68cb3eba78f8..6d9f763a7bb9d5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -36,6 +36,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_VCPU_DEBUGFS @@ -211,6 +212,7 @@ enum exit_fastpath_completion { EXIT_FASTPATH_NONE, EXIT_FASTPATH_REENTER_GUEST, EXIT_FASTPATH_EXIT_HANDLED, + EXIT_FASTPATH_EXIT_USERSPACE, }; typedef enum exit_fastpath_completion fastpath_t; @@ -280,10 +282,6 @@ enum x86_intercept_stage; #define PFERR_PRIVATE_ACCESS BIT_ULL(49) #define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS) -#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ - PFERR_WRITE_MASK | \ - PFERR_PRESENT_MASK) - /* apic attention bits */ #define KVM_APIC_CHECK_VAPIC 0 /* @@ -1629,8 +1627,10 @@ struct kvm_x86_ops { int (*check_processor_compatibility)(void); - int (*hardware_enable)(void); - void (*hardware_disable)(void); + int (*enable_virtualization_cpu)(void); + void (*disable_virtualization_cpu)(void); + cpu_emergency_virt_cb *emergency_disable_virtualization_cpu; + void (*hardware_unsetup)(void); bool (*has_emulated_msr)(struct kvm *kvm, u32 index); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); @@ -1727,6 +1727,8 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); + + const bool x2apic_icr_is_split; const unsigned long required_apicv_inhibits; bool allow_apicv_in_x2apic_without_x2apic_virtualization; void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); @@ -1806,7 +1808,7 @@ struct kvm_x86_ops { int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); void (*guest_memory_reclaimed)(struct kvm *kvm); - int (*get_msr_feature)(struct kvm_msr_entry *entry); + int (*get_feature_msr)(u32 msr, u64 *data); int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len); @@ -2060,6 +2062,8 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu); void kvm_enable_efer_bits(u64); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); +int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data); +int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data); int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data); @@ -2136,7 +2140,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu); void kvm_update_dr7(struct kvm_vcpu *vcpu); -int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); +bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + bool always_retry); + +static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, + gpa_t cr2_or_gpa) +{ + return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false); +} + void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free); void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu); @@ -2254,6 +2266,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); +int kvm_cpu_get_extint(struct kvm_vcpu *v); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); @@ -2345,7 +2358,8 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); KVM_X86_QUIRK_OUT_7E_INC_RIP | \ KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \ KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \ - KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) + KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \ + KVM_X86_QUIRK_SLOT_ZAP_ALL) /* * KVM previously used a u32 field in kvm_run to indicate the hypercall was diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 3ad29b12894329..3b9970117a0fa7 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -221,7 +221,7 @@ static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { return -EINVAL; } #endif -void mce_setup(struct mce *m); +void mce_prep_record(struct mce *m); void mce_log(struct mce *m); DECLARE_PER_CPU(struct device *, mce_device); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 8dac45a2c7fcf2..2886cb668d7fae 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -88,7 +88,13 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) #ifdef CONFIG_ADDRESS_MASKING static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) { - return mm->context.lam_cr3_mask; + /* + * When switch_mm_irqs_off() is called for a kthread, it may race with + * LAM enablement. switch_mm_irqs_off() uses the LAM mask to do two + * things: populate CR3 and populate 'cpu_tlbstate.lam'. Make sure it + * reads a single value for both. + */ + return READ_ONCE(mm->context.lam_cr3_mask); } static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) @@ -232,11 +238,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm) } #endif -static inline void arch_unmap(struct mm_struct *mm, unsigned long start, - unsigned long end) -{ -} - /* * We only want to enforce protection keys on the current process * because we effectively have no access to PKRU for other diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h deleted file mode 100644 index c41b41edd691ef..00000000000000 --- a/arch/x86/include/asm/mmzone.h +++ /dev/null @@ -1,6 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifdef CONFIG_X86_32 -# include -#else -# include -#endif diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h deleted file mode 100644 index 2d4515e8b7df88..00000000000000 --- a/arch/x86/include/asm/mmzone_32.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 - * - */ - -#ifndef _ASM_X86_MMZONE_32_H -#define _ASM_X86_MMZONE_32_H - -#include - -#ifdef CONFIG_NUMA -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) -#endif /* CONFIG_NUMA */ - -#endif /* _ASM_X86_MMZONE_32_H */ diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h deleted file mode 100644 index 0c585046f7447a..00000000000000 --- a/arch/x86/include/asm/mmzone_64.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* K8 NUMA support */ -/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ -/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ -#ifndef _ASM_X86_MMZONE_64_H -#define _ASM_X86_MMZONE_64_H - -#ifdef CONFIG_NUMA - -#include -#include - -extern struct pglist_data *node_data[]; - -#define NODE_DATA(nid) (node_data[nid]) - -#endif -#endif /* _ASM_X86_MMZONE_64_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 82c6a4d350e09e..3ae84c3b8e6dba 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -36,6 +36,20 @@ #define EFER_FFXSR (1<<_EFER_FFXSR) #define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS) +/* + * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc. + * Most MSRs support/allow only a subset of memory types, but the values + * themselves are common across all relevant MSRs. + */ +#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */ +#define X86_MEMTYPE_WC 1ull /* Write Combining */ +/* RESERVED 2 */ +/* RESERVED 3 */ +#define X86_MEMTYPE_WT 4ull /* Write Through */ +#define X86_MEMTYPE_WP 5ull /* Write Protected */ +#define X86_MEMTYPE_WB 6ull /* Write Back */ +#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */ + /* FRED MSRs */ #define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */ #define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */ @@ -247,6 +261,8 @@ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) +#define MSR_INTEGRITY_CAPS_SBAF_BIT 8 +#define MSR_INTEGRITY_CAPS_SBAF BIT(MSR_INTEGRITY_CAPS_SBAF_BIT) #define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) #define MSR_LBR_NHM_FROM 0x00000680 @@ -363,6 +379,12 @@ #define MSR_IA32_CR_PAT 0x00000277 +#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \ + ((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \ + (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \ + (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \ + (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56)) + #define MSR_IA32_DEBUGCTLMSR 0x000001d9 #define MSR_IA32_LASTBRANCHFROMIP 0x000001db #define MSR_IA32_LASTBRANCHTOIP 0x000001dc @@ -1157,15 +1179,6 @@ #define MSR_IA32_VMX_VMFUNC 0x00000491 #define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492 -/* VMX_BASIC bits and bitmasks */ -#define VMX_BASIC_VMCS_SIZE_SHIFT 32 -#define VMX_BASIC_TRUE_CTLS (1ULL << 55) -#define VMX_BASIC_64 0x0001000000000000LLU -#define VMX_BASIC_MEM_TYPE_SHIFT 50 -#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU -#define VMX_BASIC_MEM_TYPE_WB 6LLU -#define VMX_BASIC_INOUT 0x0040000000000000LLU - /* Resctrl MSRs: */ /* - Intel: */ #define MSR_IA32_L3_QOS_CFG 0xc81 @@ -1183,11 +1196,6 @@ #define MSR_IA32_SMBA_BW_BASE 0xc0000280 #define MSR_IA32_EVT_CFG_BASE 0xc0000400 -/* MSR_IA32_VMX_MISC bits */ -#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) -#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) -#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F - /* AMD-V MSRs */ #define MSR_VM_CR 0xc0010114 #define MSR_VM_IGNNE 0xc0010115 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index d642037f9ed5d8..001853541f1e8c 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -99,19 +99,6 @@ static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) : : "c" (msr), "a"(low), "d" (high) : "memory"); } -/* - * WRMSRNS behaves exactly like WRMSR with the only difference being - * that it is not a serializing instruction by default. - */ -static __always_inline void __wrmsrns(u32 msr, u32 low, u32 high) -{ - /* Instruction opcode for WRMSRNS; supported in binutils >= 2.40. */ - asm volatile("1: .byte 0x0f,0x01,0xc6\n" - "2:\n" - _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) - : : "c" (msr), "a"(low), "d" (high)); -} - #define native_rdmsr(msr, val1, val2) \ do { \ u64 __val = __rdmsr((msr)); \ @@ -312,9 +299,19 @@ do { \ #endif /* !CONFIG_PARAVIRT_XXL */ +/* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */ +#define WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6) + +/* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */ static __always_inline void wrmsrns(u32 msr, u64 val) { - __wrmsrns(msr, val, val >> 32); + /* + * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant + * DS prefix to avoid a trailing NOP. + */ + asm volatile("1: " ALTERNATIVE("ds wrmsr", WRMSRNS, X86_FEATURE_WRMSRNS) + "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) + : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32))); } /* diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 090d658a85a6c0..4218248083d985 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -69,7 +69,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment); extern int mtrr_del(int reg, unsigned long base, unsigned long size); extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); -extern void mtrr_bp_restore(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); extern int amd_special_default_mtrr(void); void mtrr_disable(void); @@ -117,7 +116,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) return 0; } #define mtrr_bp_init() do {} while (0) -#define mtrr_bp_restore() do {} while (0) #define mtrr_disable() do {} while (0) #define mtrr_enable() do {} while (0) #define mtrr_generic_set_state() do {} while (0) diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index ef2844d691735d..5469d7a7c40fa6 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -10,8 +10,6 @@ #ifdef CONFIG_NUMA -#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) - extern int numa_off; /* @@ -25,9 +23,6 @@ extern int numa_off; extern s16 __apicid_to_node[MAX_LOCAL_APIC]; extern nodemask_t numa_nodes_parsed __initdata; -extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); -extern void __init numa_set_distance(int from, int to, int distance); - static inline void set_apicid_to_node(int apicid, s16 node) { __apicid_to_node[apicid] = node; @@ -54,31 +49,20 @@ static inline int numa_cpu_node(int cpu) extern void numa_set_node(int cpu, int node); extern void numa_clear_node(int cpu); extern void __init init_cpu_to_node(void); -extern void numa_add_cpu(int cpu); -extern void numa_remove_cpu(int cpu); +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); extern void init_gi_nodes(void); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } static inline void init_cpu_to_node(void) { } -static inline void numa_add_cpu(int cpu) { } -static inline void numa_remove_cpu(int cpu) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } static inline void init_gi_nodes(void) { } #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEBUG_PER_CPU_MAPS -void debug_cpumask_set_cpu(int cpu, int node, bool enable); +void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable); #endif -#ifdef CONFIG_NUMA_EMU -#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) -#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) -int numa_emu_cmdline(char *str); -#else /* CONFIG_NUMA_EMU */ -static inline int numa_emu_cmdline(char *str) -{ - return -EINVAL; -} -#endif /* CONFIG_NUMA_EMU */ - #endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e39311a89bf478..4c2d080d26b4fb 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -120,6 +120,34 @@ extern pmdval_t early_pmd_flags; #define arch_end_context_switch(prev) do {} while(0) #endif /* CONFIG_PARAVIRT_XXL */ +static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) +{ + pmdval_t v = native_pmd_val(pmd); + + return native_make_pmd(v | set); +} + +static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) +{ + pmdval_t v = native_pmd_val(pmd); + + return native_make_pmd(v & ~clear); +} + +static inline pud_t pud_set_flags(pud_t pud, pudval_t set) +{ + pudval_t v = native_pud_val(pud); + + return native_make_pud(v | set); +} + +static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) +{ + pudval_t v = native_pud_val(pud); + + return native_make_pud(v & ~clear); +} + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -174,6 +202,13 @@ static inline int pud_young(pud_t pud) return pud_flags(pud) & _PAGE_ACCESSED; } +static inline bool pud_shstk(pud_t pud) +{ + return cpu_feature_enabled(X86_FEATURE_SHSTK) && + (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) == + (_PAGE_DIRTY | _PAGE_PSE); +} + static inline int pte_write(pte_t pte) { /* @@ -310,6 +345,30 @@ static inline int pud_devmap(pud_t pud) } #endif +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP +static inline bool pmd_special(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_SPECIAL; +} + +static inline pmd_t pmd_mkspecial(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_SPECIAL); +} +#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */ + +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP +static inline bool pud_special(pud_t pud) +{ + return pud_flags(pud) & _PAGE_SPECIAL; +} + +static inline pud_t pud_mkspecial(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_SPECIAL); +} +#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ + static inline int pgd_devmap(pgd_t pgd) { return 0; @@ -480,20 +539,6 @@ static inline pte_t pte_mkdevmap(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); } -static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) -{ - pmdval_t v = native_pmd_val(pmd); - - return native_make_pmd(v | set); -} - -static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) -{ - pmdval_t v = native_pmd_val(pmd); - - return native_make_pmd(v & ~clear); -} - /* See comments above mksaveddirty_shift() */ static inline pmd_t pmd_mksaveddirty(pmd_t pmd) { @@ -588,20 +633,6 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); #define pmd_mkwrite pmd_mkwrite -static inline pud_t pud_set_flags(pud_t pud, pudval_t set) -{ - pudval_t v = native_pud_val(pud); - - return native_make_pud(v | set); -} - -static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) -{ - pudval_t v = native_pud_val(pud); - - return native_make_pud(v & ~clear); -} - /* See comments above mksaveddirty_shift() */ static inline pud_t pud_mksaveddirty(pud_t pud) { @@ -780,6 +811,12 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd) __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); } +static inline pud_t pud_mkinvalid(pud_t pud) +{ + return pfn_pud(pud_pfn(pud), + __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); +} + static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) @@ -827,14 +864,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd_result = __pmd(val); /* - * To avoid creating Write=0,Dirty=1 PMDs, pte_modify() needs to avoid: - * 1. Marking Write=0 PMDs Dirty=1 - * 2. Marking Dirty=1 PMDs Write=0 - * - * The first case cannot happen because the _PAGE_CHG_MASK will filter - * out any Dirty bit passed in newprot. Handle the second case by - * going through the mksaveddirty exercise. Only do this if the old - * value was Write=1 to avoid doing this on Shadow Stack PTEs. + * Avoid creating shadow stack PMD by accident. See comment in + * pte_modify(). */ if (oldval & _PAGE_RW) pmd_result = pmd_mksaveddirty(pmd_result); @@ -844,6 +875,29 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pmd_result; } +static inline pud_t pud_modify(pud_t pud, pgprot_t newprot) +{ + pudval_t val = pud_val(pud), oldval = val; + pud_t pud_result; + + val &= _HPAGE_CHG_MASK; + val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK; + val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK); + + pud_result = __pud(val); + + /* + * Avoid creating shadow stack PUD by accident. See comment in + * pte_modify(). + */ + if (oldval & _PAGE_RW) + pud_result = pud_mksaveddirty(pud_result); + else + pud_result = pud_clear_saveddirty(pud_result); + + return pud_result; +} + /* * mprotect needs to preserve PAT and encryption bits when updating * vm_page_prot @@ -1078,8 +1132,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define pud_leaf pud_leaf static inline bool pud_leaf(pud_t pud) { - return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == - (_PAGE_PSE | _PAGE_PRESENT); + return pud_val(pud) & _PAGE_PSE; } static inline int pud_bad(pud_t pud) @@ -1383,10 +1436,28 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, } #endif +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline pud_t pudp_establish(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, pud_t pud) +{ + page_table_check_pud_set(vma->vm_mm, pudp, pud); + if (IS_ENABLED(CONFIG_SMP)) { + return xchg(pudp, pud); + } else { + pud_t old = *pudp; + WRITE_ONCE(*pudp, pud); + return old; + } +} +#endif + #define __HAVE_ARCH_PMDP_INVALIDATE_AD extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp); + /* * Page table pages are page-aligned. The lower half of the top * level is used for userspace and the top half for the kernel. @@ -1668,6 +1739,9 @@ void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte); #define arch_check_zapped_pmd arch_check_zapped_pmd void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd); +#define arch_check_zapped_pud arch_check_zapped_pud +void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud); + #ifdef CONFIG_XEN_PV #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young static inline bool arch_has_hw_nonleaf_pmd_young(void) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 3c4407271d0838..d1426b64c1b971 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -245,7 +245,6 @@ extern void cleanup_highmap(void); #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#define HAVE_ARCH_UNMAPPED_AREA_VMFLAGS #define PAGE_AGP PAGE_KERNEL_NOCACHE #define HAVE_PAGE_AGP 1 @@ -271,5 +270,26 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end) #include -#endif /* !__ASSEMBLY__ */ +#else /* __ASSEMBLY__ */ + +#define l4_index(x) (((x) >> 39) & 511) +#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) + +L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) +L4_START_KERNEL = l4_index(__START_KERNEL_map) + +L3_START_KERNEL = pud_index(__START_KERNEL_map) + +#define SYM_DATA_START_PAGE_ALIGNED(name) \ + SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) + +/* Automate the creation of 1 to 1 mapping pmd entries */ +#define PMDS(START, PERM, COUNT) \ + i = 0 ; \ + .rept (COUNT) ; \ + .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ + i = i + 1 ; \ + .endr + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_64_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 2f321137736c17..6f82e75b61494e 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -517,8 +517,6 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern pteval_t __default_kernel_pte_mask; -extern void set_nx(void); -extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index a75a07f4931fdb..4a686f0e5dbf6d 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -582,7 +582,8 @@ extern void switch_gdt_and_percpu_base(int); extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); extern void cpu_init(void); -extern void cpu_init_exception_handling(void); +extern void cpu_init_exception_handling(bool boot_cpu); +extern void cpu_init_replace_early_idt(void); extern void cr4_init(void); extern void set_task_blockstep(struct task_struct *task, bool on); @@ -691,8 +692,6 @@ static inline u32 per_cpu_l2c_id(unsigned int cpu) } #ifdef CONFIG_CPU_SUP_AMD -extern u32 amd_get_highest_perf(void); - /* * Issue a DIV 0/1 insn to clear any division data from previous DIV * operations. @@ -705,7 +704,6 @@ static __always_inline void amd_clear_divider(void) extern void amd_check_microcode(void); #else -static inline u32 amd_get_highest_perf(void) { return 0; } static inline void amd_clear_divider(void) { } static inline void amd_check_microcode(void) { } #endif diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 0c92db84469dbd..6e4f8fae3ce9e9 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -5,6 +5,7 @@ #include #include +struct timespec64; /* some helper functions for xen and kvm pv clock sources */ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src); diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 6536873f8fc0a2..ecd58ea9a837b1 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,14 +25,16 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 -#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) typedef void (cpu_emergency_virt_cb)(void); +#if IS_ENABLED(CONFIG_KVM_X86) void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_disable_virtualization(void); #else +static inline void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) {} +static inline void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) {} static inline void cpu_emergency_disable_virtualization(void) {} -#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ +#endif /* CONFIG_KVM_X86 */ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 79bbe2be900eb7..ee34ab00a8d6d9 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -164,7 +164,7 @@ struct snp_guest_msg_hdr { struct snp_guest_msg { struct snp_guest_msg_hdr hdr; - u8 payload[4000]; + u8 payload[PAGE_SIZE - sizeof(struct snp_guest_msg_hdr)]; } __packed; struct sev_guest_platform_data { diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h index 64df897c0ee30f..3918c7a434f5b8 100644 --- a/arch/x86/include/asm/sparsemem.h +++ b/arch/x86/include/asm/sparsemem.h @@ -31,13 +31,4 @@ #endif /* CONFIG_SPARSEMEM */ -#ifndef __ASSEMBLY__ -#ifdef CONFIG_NUMA_KEEP_MEMINFO -extern int phys_to_target_node(phys_addr_t start); -#define phys_to_target_node phys_to_target_node -extern int memory_add_physaddr_to_nid(u64 start); -#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid -#endif -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_X86_SPARSEMEM_H */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index f0dea3750ca9f0..2b59b9951c90ee 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -516,6 +516,20 @@ struct ghcb { u32 ghcb_usage; } __packed; +struct vmcb { + struct vmcb_control_area control; + union { + struct vmcb_save_area save; + + /* + * For SEV-ES VMs, the save area in the VMCB is used only to + * save/load host state. Guest state resides in a separate + * page, the aptly named VM Save Area (VMSA), that is encrypted + * with the guest's private key. + */ + struct sev_es_save_area host_sev_es_save; + }; +} __packed; #define EXPECTED_VMCB_SAVE_AREA_SIZE 744 #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 @@ -532,6 +546,7 @@ static inline void __unused_size_checks(void) BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE); BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE); BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); + BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE); BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); /* Check offsets of reserved fields */ @@ -568,11 +583,6 @@ static inline void __unused_size_checks(void) BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0); } -struct vmcb { - struct vmcb_control_area control; - struct vmcb_save_area save; -} __packed; - #define SVM_CPUID_FUNC 0x8000000a #define SVM_SELECTOR_S_SHIFT 4 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index c3bd0c0758c9a4..75248546403d79 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -70,13 +70,9 @@ static inline void update_task_stack(struct task_struct *task) #ifdef CONFIG_X86_32 this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0); #else - if (cpu_feature_enabled(X86_FEATURE_FRED)) { - /* WRMSRNS is a baseline feature for FRED. */ - wrmsrns(MSR_IA32_FRED_RSP0, (unsigned long)task_stack_page(task) + THREAD_SIZE); - } else if (cpu_feature_enabled(X86_FEATURE_XENPV)) { + if (!cpu_feature_enabled(X86_FEATURE_FRED) && cpu_feature_enabled(X86_FEATURE_XENPV)) /* Xen PV enters the kernel on the thread stack. */ load_sp0(task_top_of_stack(task)); - } #endif } diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 2fc7bc3863ff6f..7c488ff0c7641b 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -82,7 +82,12 @@ static inline void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { - memcpy(args, ®s->bx, 6 * sizeof(args[0])); + args[0] = regs->bx; + args[1] = regs->cx; + args[2] = regs->dx; + args[3] = regs->si; + args[4] = regs->di; + args[5] = regs->bp; } static inline int syscall_get_arch(struct task_struct *task) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 25726893c6f4dd..69e79fff41b800 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -399,11 +399,10 @@ static inline u64 tlbstate_lam_cr3_mask(void) return lam << X86_CR3_LAM_U57_BIT; } -static inline void set_tlbstate_lam_mode(struct mm_struct *mm) +static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask) { - this_cpu_write(cpu_tlbstate.lam, - mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT); - this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask); + this_cpu_write(cpu_tlbstate.lam, lam >> X86_CR3_LAM_U57_BIT); + this_cpu_write(tlbstate_untag_mask, untag_mask); } #else @@ -413,7 +412,7 @@ static inline u64 tlbstate_lam_cr3_mask(void) return 0; } -static inline void set_tlbstate_lam_mode(struct mm_struct *mm) +static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask) { } #endif diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index abe3a8f22cbd96..aef70336d62475 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -282,9 +282,22 @@ static inline long arch_scale_freq_capacity(int cpu) } #define arch_scale_freq_capacity arch_scale_freq_capacity +bool arch_enable_hybrid_capacity_scale(void); +void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap, + unsigned long cap_freq, unsigned long base_freq); + +unsigned long arch_scale_cpu_capacity(int cpu); +#define arch_scale_cpu_capacity arch_scale_cpu_capacity + extern void arch_set_max_freq_ratio(bool turbo_disabled); extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled); #else +static inline bool arch_enable_hybrid_capacity_scale(void) { return false; } +static inline void arch_set_cpu_capacity(int cpu, unsigned long cap, + unsigned long max_cap, + unsigned long cap_freq, + unsigned long base_freq) { } + static inline void arch_set_max_freq_ratio(bool turbo_disabled) { } static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { } #endif diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 04789f45ab2b2f..afce8ee5d7b794 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -53,6 +53,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, */ #define valid_user_address(x) ((__force long)(x) >= 0) +/* + * Masking the user address is an alternative to a conditional + * user_access_begin that can avoid the fencing. This only works + * for dense accesses starting at the address. + */ +#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63))) +#define masked_user_access_begin(x) ({ \ + __auto_type __masked_ptr = (x); \ + __masked_ptr = mask_user_address(__masked_ptr); \ + __uaccess_begin(); __masked_ptr; }) + /* * User pointers can have tag bits on x86-64. This scheme tolerates * arbitrary values in those bits rather then masking them off. diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index d6b17c76062225..1876b5edd14266 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h @@ -31,7 +31,6 @@ enum { UV_AFFINITY_CPU }; -extern int uv_irq_2_mmr_info(int, unsigned long *, int *); extern int uv_setup_irq(char *, int, int, unsigned long, int); extern void uv_teardown_irq(unsigned int); diff --git a/arch/x86/include/asm/vdso/getrandom.h b/arch/x86/include/asm/vdso/getrandom.h index b96e674cafdeb8..ff5334ad32a0f5 100644 --- a/arch/x86/include/asm/vdso/getrandom.h +++ b/arch/x86/include/asm/vdso/getrandom.h @@ -37,19 +37,6 @@ static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void return &__vdso_rng_data; } -/** - * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack. - * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output. - * @key: 32-byte input key. - * @counter: 8-byte counter, read on input and updated on return. - * @nblocks: Number of blocks to generate. - * - * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write - * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data - * leaking into forked child processes. - */ -extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks); - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h index 972415a8be3191..67fedf1698b5e2 100644 --- a/arch/x86/include/asm/vdso/vsyscall.h +++ b/arch/x86/include/asm/vdso/vsyscall.h @@ -9,9 +9,6 @@ #include #include -DEFINE_VVAR(struct vdso_data, _vdso_data); -DEFINE_VVAR_SINGLE(struct vdso_rng_data, _vdso_rng_data); - /* * Update the vDSO data page to keep in sync with kernel timekeeping. */ @@ -22,6 +19,13 @@ struct vdso_data *__x86_get_k_vdso_data(void) } #define __arch_get_k_vdso_data __x86_get_k_vdso_data +static __always_inline +struct vdso_rng_data *__x86_get_k_vdso_rng_data(void) +{ + return &_vdso_rng_data; +} +#define __arch_get_k_vdso_rng_data __x86_get_k_vdso_rng_data + /* The asm-generic header needs to be included after the definitions above */ #include diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index d77a31039f241e..f7fd4369b82188 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -122,19 +122,17 @@ #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff -#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f -#define VMX_MISC_SAVE_EFER_LMA 0x00000020 -#define VMX_MISC_ACTIVITY_HLT 0x00000040 -#define VMX_MISC_ACTIVITY_WAIT_SIPI 0x00000100 -#define VMX_MISC_ZERO_LEN_INS 0x40000000 -#define VMX_MISC_MSR_LIST_MULTIPLIER 512 - /* VMFUNC functions */ #define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28) #define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING) #define VMFUNC_EPTP_ENTRIES 512 +#define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48) +#define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49) +#define VMX_BASIC_INOUT BIT_ULL(54) +#define VMX_BASIC_TRUE_CTLS BIT_ULL(55) + static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic) { return vmx_basic & GENMASK_ULL(30, 0); @@ -145,9 +143,30 @@ static inline u32 vmx_basic_vmcs_size(u64 vmx_basic) return (vmx_basic & GENMASK_ULL(44, 32)) >> 32; } +static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic) +{ + return (vmx_basic & GENMASK_ULL(53, 50)) >> 50; +} + +static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype) +{ + return revision | ((u64)size << 32) | ((u64)memtype << 50); +} + +#define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5) +#define VMX_MISC_ACTIVITY_HLT BIT_ULL(6) +#define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7) +#define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8) +#define VMX_MISC_INTEL_PT BIT_ULL(14) +#define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15) +#define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28) +#define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29) +#define VMX_MISC_ZERO_LEN_INS BIT_ULL(30) +#define VMX_MISC_MSR_LIST_MULTIPLIER 512 + static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc) { - return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; + return vmx_misc & GENMASK_ULL(4, 0); } static inline int vmx_misc_cr3_count(u64 vmx_misc) @@ -508,9 +527,10 @@ enum vmcs_field { #define VMX_EPTP_PWL_4 0x18ull #define VMX_EPTP_PWL_5 0x20ull #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6) +/* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */ #define VMX_EPTP_MT_MASK 0x7ull -#define VMX_EPTP_MT_WB 0x6ull -#define VMX_EPTP_MT_UC 0x0ull +#define VMX_EPTP_MT_WB X86_MEMTYPE_WB +#define VMX_EPTP_MT_UC X86_MEMTYPE_UC #define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull diff --git a/arch/x86/include/uapi/asm/elf.h b/arch/x86/include/uapi/asm/elf.h new file mode 100644 index 00000000000000..468e135fa285f4 --- /dev/null +++ b/arch/x86/include/uapi/asm/elf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_X86_ELF_H +#define _UAPI_ASM_X86_ELF_H + +#include + +struct x86_xfeat_component { + __u32 type; + __u32 size; + __u32 offset; + __u32 flags; +} __packed; + +_Static_assert(sizeof(struct x86_xfeat_component) % 4 == 0, "x86_xfeat_component is not aligned"); + +#endif /* _UAPI_ASM_X86_ELF_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index bf57a824f72281..a8debbf2f70280 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -439,6 +439,7 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) #define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5) #define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6) +#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7) #define KVM_STATE_NESTED_FORMAT_VMX 0 #define KVM_STATE_NESTED_FORMAT_SVM 1 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a847180836e475..f7918980667a33 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -35,6 +35,14 @@ KMSAN_SANITIZE_nmi.o := n # If instrumentation of the following files is enabled, boot hangs during # first second. KCOV_INSTRUMENT_head$(BITS).o := n +# These are called from save_stack_trace() on debug paths, +# and produce large amounts of uninteresting coverage. +KCOV_INSTRUMENT_stacktrace.o := n +KCOV_INSTRUMENT_dumpstack.o := n +KCOV_INSTRUMENT_dumpstack_$(BITS).o := n +KCOV_INSTRUMENT_unwind_orc.o := n +KCOV_INSTRUMENT_unwind_frame.o := n +KCOV_INSTRUMENT_unwind_guess.o := n CFLAGS_irq.o := -I $(src)/../include/asm/trace diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 9f4618dcd70465..4efecac49863ec 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1778,3 +1778,14 @@ u64 x86_default_get_root_pointer(void) { return boot_params.acpi_rsdp_addr; } + +#ifdef CONFIG_XEN_PV +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +{ + return ioremap_cache(phys, size); +} + +void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) = + x86_acpi_os_ioremap; +EXPORT_SYMBOL_GPL(acpi_os_ioremap); +#endif diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index ff8f25faca3dd1..956984054bf300 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -9,6 +9,17 @@ #include #include +#define CPPC_HIGHEST_PERF_PERFORMANCE 196 +#define CPPC_HIGHEST_PERF_PREFCORE 166 + +enum amd_pref_core { + AMD_PREF_CORE_UNKNOWN = 0, + AMD_PREF_CORE_SUPPORTED, + AMD_PREF_CORE_UNSUPPORTED, +}; +static enum amd_pref_core amd_pref_core_detected; +static u64 boost_numerator; + /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */ bool cpc_supported_by_cpu(void) @@ -69,31 +80,30 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) static void amd_set_max_freq_ratio(void) { struct cppc_perf_caps perf_caps; - u64 highest_perf, nominal_perf; + u64 numerator, nominal_perf; u64 perf_ratio; int rc; rc = cppc_get_perf_caps(0, &perf_caps); if (rc) { - pr_debug("Could not retrieve perf counters (%d)\n", rc); + pr_warn("Could not retrieve perf counters (%d)\n", rc); return; } - highest_perf = amd_get_highest_perf(); + rc = amd_get_boost_ratio_numerator(0, &numerator); + if (rc) { + pr_warn("Could not retrieve highest performance (%d)\n", rc); + return; + } nominal_perf = perf_caps.nominal_perf; - if (!highest_perf || !nominal_perf) { - pr_debug("Could not retrieve highest or nominal performance\n"); + if (!nominal_perf) { + pr_warn("Could not retrieve nominal performance\n"); return; } - perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf); /* midpoint between max_boost and max_P */ - perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1; - if (!perf_ratio) { - pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n"); - return; - } + perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1; freq_invariance_set_perf_ratio(perf_ratio, false); } @@ -116,3 +126,143 @@ void init_freq_invariance_cppc(void) init_done = true; mutex_unlock(&freq_invariance_lock); } + +/* + * Get the highest performance register value. + * @cpu: CPU from which to get highest performance. + * @highest_perf: Return address for highest performance value. + * + * Return: 0 for success, negative error code otherwise. + */ +int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf) +{ + u64 val; + int ret; + + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { + ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); + if (ret) + goto out; + + val = AMD_CPPC_HIGHEST_PERF(val); + } else { + ret = cppc_get_highest_perf(cpu, &val); + if (ret) + goto out; + } + + WRITE_ONCE(*highest_perf, (u32)val); +out: + return ret; +} +EXPORT_SYMBOL_GPL(amd_get_highest_perf); + +/** + * amd_detect_prefcore: Detect if CPUs in the system support preferred cores + * @detected: Output variable for the result of the detection. + * + * Determine whether CPUs in the system support preferred cores. On systems + * that support preferred cores, different highest perf values will be found + * on different cores. On other systems, the highest perf value will be the + * same on all cores. + * + * The result of the detection will be stored in the 'detected' parameter. + * + * Return: 0 for success, negative error code otherwise + */ +int amd_detect_prefcore(bool *detected) +{ + int cpu, count = 0; + u64 highest_perf[2] = {0}; + + if (WARN_ON(!detected)) + return -EINVAL; + + switch (amd_pref_core_detected) { + case AMD_PREF_CORE_SUPPORTED: + *detected = true; + return 0; + case AMD_PREF_CORE_UNSUPPORTED: + *detected = false; + return 0; + default: + break; + } + + for_each_present_cpu(cpu) { + u32 tmp; + int ret; + + ret = amd_get_highest_perf(cpu, &tmp); + if (ret) + return ret; + + if (!count || (count == 1 && tmp != highest_perf[0])) + highest_perf[count++] = tmp; + + if (count == 2) + break; + } + + *detected = (count == 2); + boost_numerator = highest_perf[0]; + + amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED : + AMD_PREF_CORE_UNSUPPORTED; + + pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n", + *detected ? "" : "un", highest_perf[0]); + + return 0; +} +EXPORT_SYMBOL_GPL(amd_detect_prefcore); + +/** + * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation + * @cpu: CPU to get numerator for. + * @numerator: Output variable for numerator. + * + * Determine the numerator to use for calculating the boost ratio on + * a CPU. On systems that support preferred cores, this will be a hardcoded + * value. On other systems this will the highest performance register value. + * + * If booting the system with amd-pstate enabled but preferred cores disabled then + * the correct boost numerator will be returned to match hardware capabilities + * even if the preferred cores scheduling hints are not enabled. + * + * Return: 0 for success, negative error code otherwise. + */ +int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator) +{ + bool prefcore; + int ret; + + ret = amd_detect_prefcore(&prefcore); + if (ret) + return ret; + + /* without preferred cores, return the highest perf register value */ + if (!prefcore) { + *numerator = boost_numerator; + return 0; + } + + /* + * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f, + * the highest performance level is set to 196. + * https://bugzilla.kernel.org/show_bug.cgi?id=218759 + */ + if (cpu_feature_enabled(X86_FEATURE_ZEN4)) { + switch (boot_cpu_data.x86_model) { + case 0x70 ... 0x7f: + *numerator = CPPC_HIGHEST_PERF_PERFORMANCE; + return 0; + default: + break; + } + } + *numerator = CPPC_HIGHEST_PERF_PREFCORE; + + return 0; +} +EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 059e5c16af054e..dc5d3216af2404 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -26,6 +26,7 @@ #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 +#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb #define PCI_DEVICE_ID_AMD_MI300_ROOT 0x14f8 @@ -43,6 +44,8 @@ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 +#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4 0x124c +#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4 0x12bc #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 #define PCI_DEVICE_ID_AMD_MI300_DF_F4 0x152c @@ -63,6 +66,7 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) }, {} @@ -95,6 +99,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) }, @@ -122,6 +127,8 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) }, {} diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 373638691cd480..6513c53c9459eb 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -677,7 +677,7 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc) return -1; #endif - apic_printk(APIC_VERBOSE, "... PM-Timer delta = %u\n", deltapm); + apic_pr_verbose("... PM-Timer delta = %u\n", deltapm); /* Check, if the PM timer is available */ if (!deltapm) @@ -687,14 +687,14 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc) if (deltapm > (pm_100ms - pm_thresh) && deltapm < (pm_100ms + pm_thresh)) { - apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n"); + apic_pr_verbose("... PM-Timer result ok\n"); return 0; } res = (((u64)deltapm) * mult) >> 22; do_div(res, 1000000); - pr_warn("APIC calibration not consistent " - "with PM-Timer: %ldms instead of 100ms\n", (long)res); + pr_warn("APIC calibration not consistent with PM-Timer: %ldms instead of 100ms\n", + (long)res); /* Correct the lapic counter value */ res = (((u64)(*delta)) * pm_100ms); @@ -707,9 +707,8 @@ calibrate_by_pmtimer(u32 deltapm, long *delta, long *deltatsc) if (boot_cpu_has(X86_FEATURE_TSC)) { res = (((u64)(*deltatsc)) * pm_100ms); do_div(res, deltapm); - apic_printk(APIC_VERBOSE, "TSC delta adjusted to " - "PM-Timer: %lu (%ld)\n", - (unsigned long)res, *deltatsc); + apic_pr_verbose("TSC delta adjusted to PM-Timer: %lu (%ld)\n", + (unsigned long)res, *deltatsc); *deltatsc = (long)res; } @@ -792,8 +791,7 @@ static int __init calibrate_APIC_clock(void) * in the clockevent structure and return. */ if (!lapic_init_clockevent()) { - apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n", - lapic_timer_period); + apic_pr_verbose("lapic timer already calibrated %d\n", lapic_timer_period); /* * Direct calibration methods must have an always running * local APIC timer, no need for broadcast timer. @@ -802,8 +800,7 @@ static int __init calibrate_APIC_clock(void) return 0; } - apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" - "calibrating APIC timer ...\n"); + apic_pr_verbose("Using local APIC timer interrupts. Calibrating APIC timer ...\n"); /* * There are platforms w/o global clockevent devices. Instead of @@ -866,7 +863,7 @@ static int __init calibrate_APIC_clock(void) /* Build delta t1-t2 as apic timer counts down */ delta = lapic_cal_t1 - lapic_cal_t2; - apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); + apic_pr_verbose("... lapic delta = %ld\n", delta); deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1); @@ -877,22 +874,19 @@ static int __init calibrate_APIC_clock(void) lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; lapic_init_clockevent(); - apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); - apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); - apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", - lapic_timer_period); + apic_pr_verbose("..... delta %ld\n", delta); + apic_pr_verbose("..... mult: %u\n", lapic_clockevent.mult); + apic_pr_verbose("..... calibration result: %u\n", lapic_timer_period); if (boot_cpu_has(X86_FEATURE_TSC)) { - apic_printk(APIC_VERBOSE, "..... CPU clock speed is " - "%ld.%04ld MHz.\n", - (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), - (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); + apic_pr_verbose("..... CPU clock speed is %ld.%04ld MHz.\n", + (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ), + (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ)); } - apic_printk(APIC_VERBOSE, "..... host bus clock speed is " - "%u.%04u MHz.\n", - lapic_timer_period / (1000000 / HZ), - lapic_timer_period % (1000000 / HZ)); + apic_pr_verbose("..... host bus clock speed is %u.%04u MHz.\n", + lapic_timer_period / (1000000 / HZ), + lapic_timer_period % (1000000 / HZ)); /* * Do a sanity check on the APIC calibration result @@ -911,7 +905,7 @@ static int __init calibrate_APIC_clock(void) * available. */ if (!pm_referenced && global_clock_event) { - apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); + apic_pr_verbose("... verify APIC timer\n"); /* * Setup the apic timer manually @@ -932,11 +926,11 @@ static int __init calibrate_APIC_clock(void) /* Jiffies delta */ deltaj = lapic_cal_j2 - lapic_cal_j1; - apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); + apic_pr_verbose("... jiffies delta = %lu\n", deltaj); /* Check, if the jiffies result is consistent */ if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) - apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); + apic_pr_verbose("... jiffies result ok\n"); else levt->features |= CLOCK_EVT_FEAT_DUMMY; } @@ -1221,9 +1215,8 @@ void __init sync_Arb_IDs(void) */ apic_wait_icr_idle(); - apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); - apic_write(APIC_ICR, APIC_DEST_ALLINC | - APIC_INT_LEVELTRIG | APIC_DM_INIT); + apic_pr_debug("Synchronizing Arb IDs.\n"); + apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | APIC_DM_INIT); } enum apic_intr_mode_id apic_intr_mode __ro_after_init; @@ -1409,10 +1402,10 @@ static void lapic_setup_esr(void) if (maxlvt > 3) apic_write(APIC_ESR, 0); value = apic_read(APIC_ESR); - if (value != oldvalue) - apic_printk(APIC_VERBOSE, "ESR value before enabling " - "vector: 0x%08x after: 0x%08x\n", - oldvalue, value); + if (value != oldvalue) { + apic_pr_verbose("ESR value before enabling vector: 0x%08x after: 0x%08x\n", + oldvalue, value); + } } #define APIC_IR_REGS APIC_ISR_NR @@ -1599,10 +1592,10 @@ static void setup_local_APIC(void) value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; if (!cpu && (pic_mode || !value || ioapic_is_disabled)) { value = APIC_DM_EXTINT; - apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); + apic_pr_verbose("Enabled ExtINT on CPU#%d\n", cpu); } else { value = APIC_DM_EXTINT | APIC_LVT_MASKED; - apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu); + apic_pr_verbose("Masked ExtINT on CPU#%d\n", cpu); } apic_write(APIC_LVT0, value); @@ -2067,8 +2060,7 @@ static __init void apic_set_fixmap(bool read_apic) { set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); apic_mmio_base = APIC_BASE; - apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", - apic_mmio_base, mp_lapic_addr); + apic_pr_verbose("Mapped APIC to %16lx (%16lx)\n", apic_mmio_base, mp_lapic_addr); if (read_apic) apic_read_boot_cpu_id(false); } @@ -2171,18 +2163,17 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt) apic_eoi(); atomic_inc(&irq_err_count); - apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", - smp_processor_id(), v); + apic_pr_debug("APIC error on CPU%d: %02x", smp_processor_id(), v); v &= 0xff; while (v) { if (v & 0x1) - apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); + apic_pr_debug_cont(" : %s", error_interrupt_reason[i]); i++; v >>= 1; } - apic_printk(APIC_DEBUG, KERN_CONT "\n"); + apic_pr_debug_cont("\n"); trace_error_apic_exit(ERROR_APIC_VECTOR); } @@ -2202,8 +2193,7 @@ static void __init connect_bsp_APIC(void) * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's * local APIC to INT and NMI lines. */ - apic_printk(APIC_VERBOSE, "leaving PIC mode, " - "enabling APIC mode.\n"); + apic_pr_verbose("Leaving PIC mode, enabling APIC mode.\n"); imcr_pic_to_apic(); } #endif @@ -2228,8 +2218,7 @@ void disconnect_bsp_APIC(int virt_wire_setup) * IPIs, won't work beyond this point! The only exception are * INIT IPIs. */ - apic_printk(APIC_VERBOSE, "disabling APIC mode, " - "entering PIC mode.\n"); + apic_pr_verbose("Disabling APIC mode, entering PIC mode.\n"); imcr_apic_to_pic(); return; } diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index f37ad3392fec91..e0308d8c4e6c27 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,129 +8,25 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ -#include #include -#include -#include #include #include "local.h" -static struct apic apic_physflat; -static struct apic apic_flat; - -struct apic *apic __ro_after_init = &apic_flat; -EXPORT_SYMBOL_GPL(apic); - -static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 1; -} - -static void _flat_send_IPI_mask(unsigned long mask, int vector) -{ - unsigned long flags; - - local_irq_save(flags); - __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); - local_irq_restore(flags); -} - -static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - - _flat_send_IPI_mask(mask, vector); -} - -static void -flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) -{ - unsigned long mask = cpumask_bits(cpumask)[0]; - int cpu = smp_processor_id(); - - if (cpu < BITS_PER_LONG) - __clear_bit(cpu, &mask); - - _flat_send_IPI_mask(mask, vector); -} - -static u32 flat_get_apic_id(u32 x) +static u32 physflat_get_apic_id(u32 x) { return (x >> 24) & 0xFF; } -static int flat_probe(void) +static int physflat_probe(void) { return 1; } -static struct apic apic_flat __ro_after_init = { - .name = "flat", - .probe = flat_probe, - .acpi_madt_oem_check = flat_acpi_madt_oem_check, - - .dest_mode_logical = true, - - .disable_esr = 0, - - .init_apic_ldr = default_init_apic_ldr, - .cpu_present_to_apicid = default_cpu_present_to_apicid, - - .max_apic_id = 0xFE, - .get_apic_id = flat_get_apic_id, - - .calc_dest_apicid = apic_flat_calc_apicid, - - .send_IPI = default_send_IPI_single, - .send_IPI_mask = flat_send_IPI_mask, - .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, - .send_IPI_allbutself = default_send_IPI_allbutself, - .send_IPI_all = default_send_IPI_all, - .send_IPI_self = default_send_IPI_self, - .nmi_to_offline_cpu = true, - - .read = native_apic_mem_read, - .write = native_apic_mem_write, - .eoi = native_apic_mem_eoi, - .icr_read = native_apic_icr_read, - .icr_write = native_apic_icr_write, - .wait_icr_idle = apic_mem_wait_icr_idle, - .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, -}; - -/* - * Physflat mode is used when there are more than 8 CPUs on a system. - * We cannot use logical delivery in this case because the mask - * overflows, so use physical mode. - */ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { -#ifdef CONFIG_ACPI - /* - * Quirk: some x86_64 machines can only use physical APIC mode - * regardless of how many processors are present (x86_64 ES7000 - * is an example). - */ - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && - (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { - printk(KERN_DEBUG "system APIC only can use physical flat"); - return 1; - } - - if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { - printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); - return 1; - } -#endif - - return 0; -} - -static int physflat_probe(void) -{ - return apic == &apic_physflat || num_possible_cpus() > 8 || jailhouse_paravirt(); + return 1; } static struct apic apic_physflat __ro_after_init = { @@ -146,7 +42,7 @@ static struct apic apic_physflat __ro_after_init = { .cpu_present_to_apicid = default_cpu_present_to_apicid, .max_apic_id = 0xFE, - .get_apic_id = flat_get_apic_id, + .get_apic_id = physflat_get_apic_id, .calc_dest_apicid = apic_default_calc_apicid, @@ -166,8 +62,7 @@ static struct apic apic_physflat __ro_after_init = { .wait_icr_idle = apic_mem_wait_icr_idle, .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout, }; +apic_driver(apic_physflat); -/* - * We need to check for physflat first, so this order is important. - */ -apic_drivers(apic_physflat, apic_flat); +struct apic *apic __ro_after_init = &apic_physflat; +EXPORT_SYMBOL_GPL(apic); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 477b740b2f267b..1029ea4ac8ba3d 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -86,8 +86,8 @@ static unsigned int ioapic_dynirq_base; static int ioapic_initialized; struct irq_pin_list { - struct list_head list; - int apic, pin; + struct list_head list; + int apic, pin; }; struct mp_chip_data { @@ -96,7 +96,7 @@ struct mp_chip_data { bool is_level; bool active_low; bool isa_irq; - u32 count; + u32 count; }; struct mp_ioapic_gsi { @@ -105,21 +105,17 @@ struct mp_ioapic_gsi { }; static struct ioapic { - /* - * # of IRQ routing registers - */ - int nr_registers; - /* - * Saved state during suspend/resume, or while enabling intr-remap. - */ - struct IO_APIC_route_entry *saved_registers; + /* # of IRQ routing registers */ + int nr_registers; + /* Saved state during suspend/resume, or while enabling intr-remap. */ + struct IO_APIC_route_entry *saved_registers; /* I/O APIC config */ - struct mpc_ioapic mp_config; + struct mpc_ioapic mp_config; /* IO APIC gsi routing info */ - struct mp_ioapic_gsi gsi_config; - struct ioapic_domain_cfg irqdomain_cfg; - struct irq_domain *irqdomain; - struct resource *iomem_res; + struct mp_ioapic_gsi gsi_config; + struct ioapic_domain_cfg irqdomain_cfg; + struct irq_domain *irqdomain; + struct resource *iomem_res; } ioapics[MAX_IO_APICS]; #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver @@ -205,10 +201,9 @@ void mp_save_irq(struct mpc_intsrc *m) { int i; - apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," - " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, - m->srcbusirq, m->dstapic, m->dstirq); + apic_pr_verbose("Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", + m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, + m->srcbusirq, m->dstapic, m->dstirq); for (i = 0; i < mp_irq_entries; i++) { if (!memcmp(&mp_irqs[i], m, sizeof(*m))) @@ -269,12 +264,14 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) static inline void io_apic_eoi(unsigned int apic, unsigned int vector) { struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(vector, &io_apic->eoi); } unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) { struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(reg, &io_apic->index); return readl(&io_apic->data); } @@ -300,14 +297,8 @@ static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) { - struct IO_APIC_route_entry entry; - unsigned long flags; - - raw_spin_lock_irqsave(&ioapic_lock, flags); - entry = __ioapic_read_entry(apic, pin); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - - return entry; + guard(raw_spinlock_irqsave)(&ioapic_lock); + return __ioapic_read_entry(apic, pin); } /* @@ -324,11 +315,8 @@ static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { - unsigned long flags; - - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); __ioapic_write_entry(apic, pin, e); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -339,12 +327,10 @@ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) static void ioapic_mask_entry(int apic, int pin) { struct IO_APIC_route_entry e = { .masked = true }; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); io_apic_write(apic, 0x10 + 2*pin, e.w1); io_apic_write(apic, 0x11 + 2*pin, e.w2); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -352,68 +338,39 @@ static void ioapic_mask_entry(int apic, int pin) * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ -static int __add_pin_to_irq_node(struct mp_chip_data *data, - int node, int apic, int pin) +static bool add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) { struct irq_pin_list *entry; - /* don't allow duplicates */ - for_each_irq_pin(entry, data->irq_2_pin) + /* Don't allow duplicates */ + for_each_irq_pin(entry, data->irq_2_pin) { if (entry->apic == apic && entry->pin == pin) - return 0; + return true; + } entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node); if (!entry) { - pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", - node, apic, pin); - return -ENOMEM; + pr_err("Cannot allocate irq_pin_list (%d,%d,%d)\n", node, apic, pin); + return false; } + entry->apic = apic; entry->pin = pin; list_add_tail(&entry->list, &data->irq_2_pin); - - return 0; + return true; } static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin) { struct irq_pin_list *tmp, *entry; - list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list) + list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list) { if (entry->apic == apic && entry->pin == pin) { list_del(&entry->list); kfree(entry); return; } -} - -static void add_pin_to_irq_node(struct mp_chip_data *data, - int node, int apic, int pin) -{ - if (__add_pin_to_irq_node(data, node, apic, pin)) - panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); -} - -/* - * Reroute an IRQ to a different pin. - */ -static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node, - int oldapic, int oldpin, - int newapic, int newpin) -{ - struct irq_pin_list *entry; - - for_each_irq_pin(entry, data->irq_2_pin) { - if (entry->apic == oldapic && entry->pin == oldpin) { - entry->apic = newapic; - entry->pin = newpin; - /* every one is different, right? */ - return; - } } - - /* old apic/pin didn't exist, so just add new ones */ - add_pin_to_irq_node(data, node, newapic, newpin); } static void io_apic_modify_irq(struct mp_chip_data *data, bool masked, @@ -430,12 +387,12 @@ static void io_apic_modify_irq(struct mp_chip_data *data, bool masked, } } +/* + * Synchronize the IO-APIC and the CPU by doing a dummy read from the + * IO-APIC + */ static void io_apic_sync(struct irq_pin_list *entry) { - /* - * Synchronize the IO-APIC and the CPU by doing - * a dummy read from the IO-APIC - */ struct io_apic __iomem *io_apic; io_apic = io_apic_base(entry->apic); @@ -445,11 +402,9 @@ static void io_apic_sync(struct irq_pin_list *entry) static void mask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); io_apic_modify_irq(data, true, &io_apic_sync); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void __unmask_ioapic(struct mp_chip_data *data) @@ -460,11 +415,9 @@ static void __unmask_ioapic(struct mp_chip_data *data) static void unmask_ioapic_irq(struct irq_data *irq_data) { struct mp_chip_data *data = irq_data->chip_data; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); __unmask_ioapic(data); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -492,30 +445,24 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector) entry = entry1 = __ioapic_read_entry(apic, pin); - /* - * Mask the entry and change the trigger mode to edge. - */ + /* Mask the entry and change the trigger mode to edge. */ entry1.masked = true; entry1.is_level = false; __ioapic_write_entry(apic, pin, entry1); - /* - * Restore the previous level triggered entry. - */ + /* Restore the previous level triggered entry. */ __ioapic_write_entry(apic, pin, entry); } } static void eoi_ioapic_pin(int vector, struct mp_chip_data *data) { - unsigned long flags; struct irq_pin_list *entry; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); for_each_irq_pin(entry, data->irq_2_pin) __eoi_ioapic_pin(entry->apic, entry->pin, vector); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) @@ -538,8 +485,6 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) } if (entry.irr) { - unsigned long flags; - /* * Make sure the trigger mode is set to level. Explicit EOI * doesn't clear the remote-IRR if the trigger mode is not @@ -549,9 +494,8 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) entry.is_level = true; ioapic_write_entry(apic, pin, entry); } - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); __eoi_ioapic_pin(apic, pin, entry.vector); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -586,28 +530,23 @@ static int pirq_entries[MAX_PIRQS] = { static int __init ioapic_pirq_setup(char *str) { - int i, max; - int ints[MAX_PIRQS+1]; + int i, max, ints[MAX_PIRQS+1]; get_options(str, ARRAY_SIZE(ints), ints); - apic_printk(APIC_VERBOSE, KERN_INFO - "PIRQ redirection, working around broken MP-BIOS.\n"); + apic_pr_verbose("PIRQ redirection, working around broken MP-BIOS.\n"); + max = MAX_PIRQS; if (ints[0] < MAX_PIRQS) max = ints[0]; for (i = 0; i < max; i++) { - apic_printk(APIC_VERBOSE, KERN_DEBUG - "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); - /* - * PIRQs are mapped upside down, usually. - */ + apic_pr_verbose("... PIRQ%d -> IRQ %d\n", i, ints[i + 1]); + /* PIRQs are mapped upside down, usually */ pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; } return 1; } - __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ @@ -626,8 +565,7 @@ int save_ioapic_entries(void) } for_each_pin(apic, pin) - ioapics[apic].saved_registers[pin] = - ioapic_read_entry(apic, pin); + ioapics[apic].saved_registers[pin] = ioapic_read_entry(apic, pin); } return err; @@ -668,8 +606,7 @@ int restore_ioapic_entries(void) continue; for_each_pin(apic, pin) - ioapic_write_entry(apic, pin, - ioapics[apic].saved_registers[pin]); + ioapic_write_entry(apic, pin, ioapics[apic].saved_registers[pin]); } return 0; } @@ -681,12 +618,13 @@ static int find_irq_entry(int ioapic_idx, int pin, int type) { int i; - for (i = 0; i < mp_irq_entries; i++) + for (i = 0; i < mp_irq_entries; i++) { if (mp_irqs[i].irqtype == type && (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || mp_irqs[i].dstapic == MP_APIC_ALL) && mp_irqs[i].dstirq == pin) return i; + } return -1; } @@ -701,10 +639,8 @@ static int __init find_isa_irq_pin(int irq, int type) for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; - if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].irqtype == type) && + if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) - return mp_irqs[i].dstirq; } return -1; @@ -717,8 +653,7 @@ static int __init find_isa_irq_apic(int irq, int type) for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].srcbus; - if (test_bit(lbus, mp_bus_not_pci) && - (mp_irqs[i].irqtype == type) && + if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].irqtype == type) && (mp_irqs[i].srcbusirq == irq)) break; } @@ -726,9 +661,10 @@ static int __init find_isa_irq_apic(int irq, int type) if (i < mp_irq_entries) { int ioapic_idx; - for_each_ioapic(ioapic_idx) + for_each_ioapic(ioapic_idx) { if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) return ioapic_idx; + } } return -1; @@ -769,8 +705,7 @@ static bool EISA_ELCR(unsigned int irq) unsigned int port = PIC_ELCR1 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } - apic_printk(APIC_VERBOSE, KERN_INFO - "Broken MPtable reports ISA irq %d\n", irq); + apic_pr_verbose("Broken MPtable reports ISA irq %d\n", irq); return false; } @@ -947,9 +882,9 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, struct irq_alloc_info *info) { + int type = ioapics[ioapic].irqdomain_cfg.type; bool legacy = false; int irq = -1; - int type = ioapics[ioapic].irqdomain_cfg.type; switch (type) { case IOAPIC_DOMAIN_LEGACY: @@ -971,8 +906,7 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, return -1; } - return __irq_domain_alloc_irqs(domain, irq, 1, - ioapic_alloc_attr_node(info), + return __irq_domain_alloc_irqs(domain, irq, 1, ioapic_alloc_attr_node(info), info, legacy, NULL); } @@ -986,13 +920,12 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, * PIRQs instead of reprogramming the interrupt routing logic. Thus there may be * multiple pins sharing the same legacy IRQ number when ACPI is disabled. */ -static int alloc_isa_irq_from_domain(struct irq_domain *domain, - int irq, int ioapic, int pin, +static int alloc_isa_irq_from_domain(struct irq_domain *domain, int irq, int ioapic, int pin, struct irq_alloc_info *info) { - struct mp_chip_data *data; struct irq_data *irq_data = irq_get_irq_data(irq); int node = ioapic_alloc_attr_node(info); + struct mp_chip_data *data; /* * Legacy ISA IRQ has already been allocated, just add pin to @@ -1002,13 +935,11 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain, if (irq_data && irq_data->parent_data) { if (!mp_check_pin_attr(irq, info)) return -EBUSY; - if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, - info->ioapic.pin)) + if (!add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin)) return -ENOMEM; } else { info->flags |= X86_IRQ_ALLOC_LEGACY; - irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, - NULL); + irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, NULL); if (irq >= 0) { irq_data = irq_domain_get_irq_data(domain, irq); data = irq_data->chip_data; @@ -1022,11 +953,11 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain, static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, unsigned int flags, struct irq_alloc_info *info) { - int irq; - bool legacy = false; + struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); struct irq_alloc_info tmp; struct mp_chip_data *data; - struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); + bool legacy = false; + int irq; if (!domain) return -ENOSYS; @@ -1046,7 +977,7 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, return -EINVAL; } - mutex_lock(&ioapic_mutex); + guard(mutex)(&ioapic_mutex); if (!(flags & IOAPIC_MAP_ALLOC)) { if (!legacy) { irq = irq_find_mapping(domain, pin); @@ -1067,8 +998,6 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, data->count++; } } - mutex_unlock(&ioapic_mutex); - return irq; } @@ -1076,26 +1005,20 @@ static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags) { u32 gsi = mp_pin_to_gsi(ioapic, pin); - /* - * Debugging check, we are in big trouble if this message pops up! - */ + /* Debugging check, we are in big trouble if this message pops up! */ if (mp_irqs[idx].dstirq != pin) pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); #ifdef CONFIG_X86_32 - /* - * PCI IRQ command line redirection. Yes, limits are hardcoded. - */ + /* PCI IRQ command line redirection. Yes, limits are hardcoded. */ if ((pin >= 16) && (pin <= 23)) { - if (pirq_entries[pin-16] != -1) { - if (!pirq_entries[pin-16]) { - apic_printk(APIC_VERBOSE, KERN_DEBUG - "disabling PIRQ%d\n", pin-16); + if (pirq_entries[pin - 16] != -1) { + if (!pirq_entries[pin - 16]) { + apic_pr_verbose("Disabling PIRQ%d\n", pin - 16); } else { int irq = pirq_entries[pin-16]; - apic_printk(APIC_VERBOSE, KERN_DEBUG - "using PIRQ%d -> IRQ %d\n", - pin-16, irq); + + apic_pr_verbose("Using PIRQ%d -> IRQ %d\n", pin - 16, irq); return irq; } } @@ -1133,10 +1056,9 @@ void mp_unmap_irq(int irq) if (!data || data->isa_irq) return; - mutex_lock(&ioapic_mutex); + guard(mutex)(&ioapic_mutex); if (--data->count == 0) irq_domain_free_irqs(irq, 1); - mutex_unlock(&ioapic_mutex); } /* @@ -1147,12 +1069,10 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) { int irq, i, best_ioapic = -1, best_idx = -1; - apic_printk(APIC_DEBUG, - "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", - bus, slot, pin); + apic_pr_debug("Querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", + bus, slot, pin); if (test_bit(bus, mp_bus_not_pci)) { - apic_printk(APIC_VERBOSE, - "PCI BIOS passed nonexistent PCI bus %d!\n", bus); + apic_pr_verbose("PCI BIOS passed nonexistent PCI bus %d!\n", bus); return -1; } @@ -1197,8 +1117,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) return -1; out: - return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, - IOAPIC_MAP_ALLOC); + return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq, IOAPIC_MAP_ALLOC); } EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); @@ -1209,17 +1128,16 @@ static void __init setup_IO_APIC_irqs(void) unsigned int ioapic, pin; int idx; - apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); + apic_pr_verbose("Init IO_APIC IRQs\n"); for_each_ioapic_pin(ioapic, pin) { idx = find_irq_entry(ioapic, pin, mp_INT); - if (idx < 0) - apic_printk(APIC_VERBOSE, - KERN_DEBUG " apic %d pin %d not connected\n", - mpc_ioapic_id(ioapic), pin); - else - pin_2_irq(idx, ioapic, pin, - ioapic ? 0 : IOAPIC_MAP_ALLOC); + if (idx < 0) { + apic_pr_verbose("apic %d pin %d not connected\n", + mpc_ioapic_id(ioapic), pin); + } else { + pin_2_irq(idx, ioapic, pin, ioapic ? 0 : IOAPIC_MAP_ALLOC); + } } } @@ -1234,26 +1152,21 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries) char buf[256]; int i; - printk(KERN_DEBUG "IOAPIC %d:\n", apic); + apic_dbg("IOAPIC %d:\n", apic); for (i = 0; i <= nr_entries; i++) { entry = ioapic_read_entry(apic, i); - snprintf(buf, sizeof(buf), - " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)", - i, - entry.masked ? "disabled" : "enabled ", + snprintf(buf, sizeof(buf), " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)", + i, entry.masked ? "disabled" : "enabled ", entry.is_level ? "level" : "edge ", entry.active_low ? "low " : "high", entry.vector, entry.irr, entry.delivery_status); if (entry.ir_format) { - printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n", - buf, - (entry.ir_index_15 << 15) | entry.ir_index_0_14, - entry.ir_zero); + apic_dbg("%s, remapped, I(%04X), Z(%X)\n", buf, + (entry.ir_index_15 << 15) | entry.ir_index_0_14, entry.ir_zero); } else { - printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf, - entry.dest_mode_logical ? "logical " : "physical", - entry.virt_destid_8_14, entry.destid_0_7, - entry.delivery_mode); + apic_dbg("%s, %s, D(%02X%02X), M(%1d)\n", buf, + entry.dest_mode_logical ? "logical " : "physic al", + entry.virt_destid_8_14, entry.destid_0_7, entry.delivery_mode); } } } @@ -1264,30 +1177,25 @@ static void __init print_IO_APIC(int ioapic_idx) union IO_APIC_reg_01 reg_01; union IO_APIC_reg_02 reg_02; union IO_APIC_reg_03 reg_03; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(ioapic_idx, 0); - reg_01.raw = io_apic_read(ioapic_idx, 1); - if (reg_01.bits.version >= 0x10) - reg_02.raw = io_apic_read(ioapic_idx, 2); - if (reg_01.bits.version >= 0x20) - reg_03.raw = io_apic_read(ioapic_idx, 3); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - - printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); - printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); - printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); - printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); - printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); - - printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); - printk(KERN_DEBUG "....... : max redirection entries: %02X\n", - reg_01.bits.entries); - - printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); - printk(KERN_DEBUG "....... : IO APIC version: %02X\n", - reg_01.bits.version); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) { + reg_00.raw = io_apic_read(ioapic_idx, 0); + reg_01.raw = io_apic_read(ioapic_idx, 1); + if (reg_01.bits.version >= 0x10) + reg_02.raw = io_apic_read(ioapic_idx, 2); + if (reg_01.bits.version >= 0x20) + reg_03.raw = io_apic_read(ioapic_idx, 3); + } + + apic_dbg("IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); + apic_dbg(".... register #00: %08X\n", reg_00.raw); + apic_dbg("....... : physical APIC id: %02X\n", reg_00.bits.ID); + apic_dbg("....... : Delivery Type: %X\n", reg_00.bits.delivery_type); + apic_dbg("....... : LTS : %X\n", reg_00.bits.LTS); + apic_dbg(".... register #01: %08X\n", *(int *)®_01); + apic_dbg("....... : max redirection entries: %02X\n", reg_01.bits.entries); + apic_dbg("....... : PRQ implemented: %X\n", reg_01.bits.PRQ); + apic_dbg("....... : IO APIC version: %02X\n", reg_01.bits.version); /* * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, @@ -1295,8 +1203,8 @@ static void __init print_IO_APIC(int ioapic_idx) * value, so ignore it if reg_02 == reg_01. */ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { - printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); - printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); + apic_dbg(".... register #02: %08X\n", reg_02.raw); + apic_dbg("....... : arbitration: %02X\n", reg_02.bits.arbitration); } /* @@ -1306,11 +1214,11 @@ static void __init print_IO_APIC(int ioapic_idx) */ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && reg_03.raw != reg_01.raw) { - printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); - printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); + apic_dbg(".... register #03: %08X\n", reg_03.raw); + apic_dbg("....... : Boot DT : %X\n", reg_03.bits.boot_DT); } - printk(KERN_DEBUG ".... IRQ redirection table:\n"); + apic_dbg(".... IRQ redirection table:\n"); io_apic_print_entries(ioapic_idx, reg_01.bits.entries); } @@ -1319,11 +1227,11 @@ void __init print_IO_APICs(void) int ioapic_idx; unsigned int irq; - printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); - for_each_ioapic(ioapic_idx) - printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", - mpc_ioapic_id(ioapic_idx), - ioapics[ioapic_idx].nr_registers); + apic_dbg("number of MP IRQ sources: %d.\n", mp_irq_entries); + for_each_ioapic(ioapic_idx) { + apic_dbg("number of IO-APIC #%d registers: %d.\n", + mpc_ioapic_id(ioapic_idx), ioapics[ioapic_idx].nr_registers); + } /* * We are a bit conservative about what we expect. We have to @@ -1334,7 +1242,7 @@ void __init print_IO_APICs(void) for_each_ioapic(ioapic_idx) print_IO_APIC(ioapic_idx); - printk(KERN_DEBUG "IRQ to pin mappings:\n"); + apic_dbg("IRQ to pin mappings:\n"); for_each_active_irq(irq) { struct irq_pin_list *entry; struct irq_chip *chip; @@ -1349,7 +1257,7 @@ void __init print_IO_APICs(void) if (list_empty(&data->irq_2_pin)) continue; - printk(KERN_DEBUG "IRQ%d ", irq); + apic_dbg("IRQ%d ", irq); for_each_irq_pin(entry, data->irq_2_pin) pr_cont("-> %d:%d", entry->apic, entry->pin); pr_cont("\n"); @@ -1363,8 +1271,7 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; void __init enable_IO_APIC(void) { - int i8259_apic, i8259_pin; - int apic, pin; + int i8259_apic, i8259_pin, apic, pin; if (ioapic_is_disabled) nr_ioapics = 0; @@ -1376,19 +1283,21 @@ void __init enable_IO_APIC(void) /* See if any of the pins is in ExtINT mode */ struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin); - /* If the interrupt line is enabled and in ExtInt mode - * I have found the pin where the i8259 is connected. + /* + * If the interrupt line is enabled and in ExtInt mode I + * have found the pin where the i8259 is connected. */ - if (!entry.masked && - entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) { + if (!entry.masked && entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) { ioapic_i8259.apic = apic; ioapic_i8259.pin = pin; - goto found_i8259; + break; } } - found_i8259: - /* Look to see what if the MP table has reported the ExtINT */ - /* If we could not find the appropriate pin by looking at the ioapic + + /* + * Look to see what if the MP table has reported the ExtINT + * + * If we could not find the appropriate pin by looking at the ioapic * the i8259 probably is not connected the ioapic but give the * mptable a chance anyway. */ @@ -1396,29 +1305,24 @@ void __init enable_IO_APIC(void) i8259_apic = find_isa_irq_apic(0, mp_ExtINT); /* Trust the MP table if nothing is setup in the hardware */ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { - printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); + pr_warn("ExtINT not setup in hardware but reported by MP table\n"); ioapic_i8259.pin = i8259_pin; ioapic_i8259.apic = i8259_apic; } /* Complain if the MP table and the hardware disagree */ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && - (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) - { - printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); - } + (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) + pr_warn("ExtINT in hardware and MP table differ\n"); - /* - * Do not trust the IO-APIC being empty at bootup - */ + /* Do not trust the IO-APIC being empty at bootup */ clear_IO_APIC(); } void native_restore_boot_irq_mode(void) { /* - * If the i8259 is routed through an IOAPIC - * Put that IOAPIC in virtual wire mode - * so legacy interrupts can be delivered. + * If the i8259 is routed through an IOAPIC Put that IOAPIC in + * virtual wire mode so legacy interrupts can be delivered. */ if (ioapic_i8259.pin != -1) { struct IO_APIC_route_entry entry; @@ -1433,9 +1337,7 @@ void native_restore_boot_irq_mode(void) entry.destid_0_7 = apic_id & 0xFF; entry.virt_destid_8_14 = apic_id >> 8; - /* - * Add it to the IO-APIC irq-routing table: - */ + /* Add it to the IO-APIC irq-routing table */ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); } @@ -1464,7 +1366,6 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void) const u32 broadcast_id = 0xF; union IO_APIC_reg_00 reg_00; unsigned char old_id; - unsigned long flags; int ioapic_idx, i; /* @@ -1478,9 +1379,8 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void) */ for_each_ioapic(ioapic_idx) { /* Read the register 0 value */ - raw_spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(ioapic_idx, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) + reg_00.raw = io_apic_read(ioapic_idx, 0); old_id = mpc_ioapic_id(ioapic_idx); @@ -1508,47 +1408,42 @@ static void __init setup_ioapic_ids_from_mpc_nocheck(void) set_bit(i, phys_id_present_map); ioapics[ioapic_idx].mp_config.apicid = i; } else { - apic_printk(APIC_VERBOSE, "Setting %d in the phys_id_present_map\n", - mpc_ioapic_id(ioapic_idx)); + apic_pr_verbose("Setting %d in the phys_id_present_map\n", + mpc_ioapic_id(ioapic_idx)); set_bit(mpc_ioapic_id(ioapic_idx), phys_id_present_map); } /* - * We need to adjust the IRQ routing table - * if the ID changed. + * We need to adjust the IRQ routing table if the ID + * changed. */ - if (old_id != mpc_ioapic_id(ioapic_idx)) - for (i = 0; i < mp_irq_entries; i++) + if (old_id != mpc_ioapic_id(ioapic_idx)) { + for (i = 0; i < mp_irq_entries; i++) { if (mp_irqs[i].dstapic == old_id) - mp_irqs[i].dstapic - = mpc_ioapic_id(ioapic_idx); + mp_irqs[i].dstapic = mpc_ioapic_id(ioapic_idx); + } + } /* - * Update the ID register according to the right value - * from the MPC table if they are different. + * Update the ID register according to the right value from + * the MPC table if they are different. */ if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) continue; - apic_printk(APIC_VERBOSE, KERN_INFO - "...changing IO-APIC physical APIC ID to %d ...", - mpc_ioapic_id(ioapic_idx)); + apic_pr_verbose("...changing IO-APIC physical APIC ID to %d ...", + mpc_ioapic_id(ioapic_idx)); reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); - raw_spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(ioapic_idx, 0, reg_00.raw); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - - /* - * Sanity check - */ - raw_spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(ioapic_idx, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) { + io_apic_write(ioapic_idx, 0, reg_00.raw); + reg_00.raw = io_apic_read(ioapic_idx, 0); + } + /* Sanity check */ if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) pr_cont("could not set ID!\n"); else - apic_printk(APIC_VERBOSE, " ok.\n"); + apic_pr_verbose(" ok.\n"); } } @@ -1593,8 +1488,7 @@ static void __init delay_with_tsc(void) do { rep_nop(); now = rdtsc(); - } while ((now - start) < 40000000000ULL / HZ && - time_before_eq(jiffies, end)); + } while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end)); } static void __init delay_without_tsc(void) @@ -1655,36 +1549,29 @@ static int __init timer_irq_works(void) * so we 'resend' these IRQs via IPIs, to the same CPU. It's much * better to do it this way as thus we do not have to be aware of * 'pending' interrupts in the IRQ path, except at this point. - */ -/* - * Edge triggered needs to resend any interrupt - * that was delayed but this is now handled in the device - * independent code. - */ - -/* - * Starting up a edge-triggered IO-APIC interrupt is - * nasty - we need to make sure that we get the edge. - * If it is already asserted for some reason, we need - * return 1 to indicate that is was pending. * - * This is not complete - we should be able to fake - * an edge even if it isn't on the 8259A... + * + * Edge triggered needs to resend any interrupt that was delayed but this + * is now handled in the device independent code. + * + * Starting up a edge-triggered IO-APIC interrupt is nasty - we need to + * make sure that we get the edge. If it is already asserted for some + * reason, we need return 1 to indicate that is was pending. + * + * This is not complete - we should be able to fake an edge even if it + * isn't on the 8259A... */ static unsigned int startup_ioapic_irq(struct irq_data *data) { int was_pending = 0, irq = data->irq; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); if (irq < nr_legacy_irqs()) { legacy_pic->mask(irq); if (legacy_pic->irq_pending(irq)) was_pending = 1; } __unmask_ioapic(data->chip_data); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - return was_pending; } @@ -1694,9 +1581,8 @@ atomic_t irq_mis_count; static bool io_apic_level_ack_pending(struct mp_chip_data *data) { struct irq_pin_list *entry; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); for_each_irq_pin(entry, data->irq_2_pin) { struct IO_APIC_route_entry e; int pin; @@ -1704,13 +1590,9 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) pin = entry->pin; e.w1 = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ - if (e.irr) { - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + if (e.irr) return true; - } } - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - return false; } @@ -1728,7 +1610,8 @@ static inline bool ioapic_prepare_move(struct irq_data *data) static inline void ioapic_finish_move(struct irq_data *data, bool moveit) { if (unlikely(moveit)) { - /* Only migrate the irq if the ack has been received. + /* + * Only migrate the irq if the ack has been received. * * On rare occasions the broadcast level triggered ack gets * delayed going to ioapics, and if we reprogram the @@ -1911,18 +1794,16 @@ static void ioapic_configure_entry(struct irq_data *irqd) __ioapic_write_entry(entry->apic, entry->pin, mpd->entry); } -static int ioapic_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) +static int ioapic_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force) { struct irq_data *parent = irq_data->parent_data; - unsigned long flags; int ret; ret = parent->chip->irq_set_affinity(parent, mask, force); - raw_spin_lock_irqsave(&ioapic_lock, flags); + + guard(raw_spinlock_irqsave)(&ioapic_lock); if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) ioapic_configure_entry(irq_data); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); return ret; } @@ -1941,9 +1822,8 @@ static int ioapic_set_affinity(struct irq_data *irq_data, * * Verify that the corresponding Remote-IRR bits are clear. */ -static int ioapic_irq_get_chip_state(struct irq_data *irqd, - enum irqchip_irq_state which, - bool *state) +static int ioapic_irq_get_chip_state(struct irq_data *irqd, enum irqchip_irq_state which, + bool *state) { struct mp_chip_data *mcd = irqd->chip_data; struct IO_APIC_route_entry rentry; @@ -1953,7 +1833,8 @@ static int ioapic_irq_get_chip_state(struct irq_data *irqd, return -EINVAL; *state = false; - raw_spin_lock(&ioapic_lock); + + guard(raw_spinlock)(&ioapic_lock); for_each_irq_pin(p, mcd->irq_2_pin) { rentry = __ioapic_read_entry(p->apic, p->pin); /* @@ -1967,7 +1848,6 @@ static int ioapic_irq_get_chip_state(struct irq_data *irqd, break; } } - raw_spin_unlock(&ioapic_lock); return 0; } @@ -2008,14 +1888,13 @@ static inline void init_IO_APIC_traps(void) cfg = irq_cfg(irq); if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* - * Hmm.. We don't have an entry for this, - * so default to an old-fashioned 8259 - * interrupt if we can.. + * Hmm.. We don't have an entry for this, so + * default to an old-fashioned 8259 interrupt if we + * can. Otherwise set the dummy interrupt chip. */ if (irq < nr_legacy_irqs()) legacy_pic->make_irq(irq); else - /* Strange. Oh, well.. */ irq_set_chip(irq, &no_irq_chip); } } @@ -2024,20 +1903,17 @@ static inline void init_IO_APIC_traps(void) /* * The local APIC irq-chip implementation: */ - static void mask_lapic_irq(struct irq_data *data) { - unsigned long v; + unsigned long v = apic_read(APIC_LVT0); - v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v | APIC_LVT_MASKED); } static void unmask_lapic_irq(struct irq_data *data) { - unsigned long v; + unsigned long v = apic_read(APIC_LVT0); - v = apic_read(APIC_LVT0); apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); } @@ -2056,8 +1932,7 @@ static struct irq_chip lapic_chip __read_mostly = { static void lapic_register_intr(int irq) { irq_clear_status_flags(irq, IRQ_LEVEL); - irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, - "edge"); + irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, "edge"); } /* @@ -2069,9 +1944,9 @@ static void lapic_register_intr(int irq) */ static inline void __init unlock_ExtINT_logic(void) { - int apic, pin, i; - struct IO_APIC_route_entry entry0, entry1; unsigned char save_control, save_freq_select; + struct IO_APIC_route_entry entry0, entry1; + int apic, pin, i; u32 apic_id; pin = find_isa_irq_pin(8, mp_INT); @@ -2131,10 +2006,10 @@ static int __init disable_timer_pin_setup(char *arg) } early_param("disable_timer_pin_1", disable_timer_pin_setup); -static int mp_alloc_timer_irq(int ioapic, int pin) +static int __init mp_alloc_timer_irq(int ioapic, int pin) { - int irq = -1; struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); + int irq = -1; if (domain) { struct irq_alloc_info info; @@ -2142,21 +2017,36 @@ static int mp_alloc_timer_irq(int ioapic, int pin) ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0); info.devid = mpc_ioapic_id(ioapic); info.ioapic.pin = pin; - mutex_lock(&ioapic_mutex); + guard(mutex)(&ioapic_mutex); irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info); - mutex_unlock(&ioapic_mutex); } return irq; } +static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node, + int oldapic, int oldpin, + int newapic, int newpin) +{ + struct irq_pin_list *entry; + + for_each_irq_pin(entry, data->irq_2_pin) { + if (entry->apic == oldapic && entry->pin == oldpin) { + entry->apic = newapic; + entry->pin = newpin; + return; + } + } + + /* Old apic/pin didn't exist, so just add a new one */ + add_pin_to_irq_node(data, node, newapic, newpin); +} + /* * This code may look a bit paranoid, but it's supposed to cooperate with * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast * fanatically on his truly buggy board. - * - * FIXME: really need to revamp this for all platforms. */ static inline void __init check_timer(void) { @@ -2194,9 +2084,8 @@ static inline void __init check_timer(void) pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; - apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " - "apic1=%d pin1=%d apic2=%d pin2=%d\n", - cfg->vector, apic1, pin1, apic2, pin2); + pr_info("..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", + cfg->vector, apic1, pin1, apic2, pin2); /* * Some BIOS writers are clueless and report the ExtINTA @@ -2240,13 +2129,10 @@ static inline void __init check_timer(void) panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) - apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " - "8254 timer not connected to IO-APIC\n"); + pr_err("..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); - apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " - "(IRQ0) through the 8259A ...\n"); - apic_printk(APIC_QUIET, KERN_INFO - "..... (found apic %d pin %d) ...\n", apic2, pin2); + pr_info("...trying to set up timer (IRQ0) through the 8259A ...\n"); + pr_info("..... (found apic %d pin %d) ...\n", apic2, pin2); /* * legacy devices should be connected to IO APIC #0 */ @@ -2255,7 +2141,7 @@ static inline void __init check_timer(void) irq_domain_activate_irq(irq_data, false); legacy_pic->unmask(0); if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); + pr_info("....... works.\n"); goto out; } /* @@ -2263,26 +2149,24 @@ static inline void __init check_timer(void) */ legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); - apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); + pr_info("....... failed.\n"); } - apic_printk(APIC_QUIET, KERN_INFO - "...trying to set up timer as Virtual Wire IRQ...\n"); + pr_info("...trying to set up timer as Virtual Wire IRQ...\n"); lapic_register_intr(0); apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ legacy_pic->unmask(0); if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + pr_info("..... works.\n"); goto out; } legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); - apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); + pr_info("..... failed.\n"); - apic_printk(APIC_QUIET, KERN_INFO - "...trying to set up timer as ExtINT IRQ...\n"); + pr_info("...trying to set up timer as ExtINT IRQ...\n"); legacy_pic->init(0); legacy_pic->make_irq(0); @@ -2292,14 +2176,15 @@ static inline void __init check_timer(void) unlock_ExtINT_logic(); if (timer_irq_works()) { - apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); + pr_info("..... works.\n"); goto out; } - apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); - if (apic_is_x2apic_enabled()) - apic_printk(APIC_QUIET, KERN_INFO - "Perhaps problem with the pre-enabled x2apic mode\n" - "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); + + pr_info("..... failed :\n"); + if (apic_is_x2apic_enabled()) { + pr_info("Perhaps problem with the pre-enabled x2apic mode\n" + "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); + } panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); out: @@ -2327,11 +2212,11 @@ static inline void __init check_timer(void) static int mp_irqdomain_create(int ioapic) { - struct irq_domain *parent; + struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); int hwirqs = mp_ioapic_pin_count(ioapic); struct ioapic *ip = &ioapics[ioapic]; struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg; - struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic); + struct irq_domain *parent; struct fwnode_handle *fn; struct irq_fwspec fwspec; @@ -2367,10 +2252,8 @@ static int mp_irqdomain_create(int ioapic) return -ENOMEM; } - if (cfg->type == IOAPIC_DOMAIN_LEGACY || - cfg->type == IOAPIC_DOMAIN_STRICT) - ioapic_dynirq_base = max(ioapic_dynirq_base, - gsi_cfg->gsi_end + 1); + if (cfg->type == IOAPIC_DOMAIN_LEGACY || cfg->type == IOAPIC_DOMAIN_STRICT) + ioapic_dynirq_base = max(ioapic_dynirq_base, gsi_cfg->gsi_end + 1); return 0; } @@ -2397,13 +2280,11 @@ void __init setup_IO_APIC(void) io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL; - apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); + apic_pr_verbose("ENABLING IO-APIC IRQs\n"); for_each_ioapic(ioapic) BUG_ON(mp_irqdomain_create(ioapic)); - /* - * Set up IO-APIC IRQ routing. - */ + /* Set up IO-APIC IRQ routing. */ x86_init.mpparse.setup_ioapic_ids(); sync_Arb_IDs(); @@ -2417,16 +2298,14 @@ void __init setup_IO_APIC(void) static void resume_ioapic_id(int ioapic_idx) { - unsigned long flags; union IO_APIC_reg_00 reg_00; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); reg_00.raw = io_apic_read(ioapic_idx, 0); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); io_apic_write(ioapic_idx, 0, reg_00.raw); } - raw_spin_unlock_irqrestore(&ioapic_lock, flags); } static void ioapic_resume(void) @@ -2440,8 +2319,8 @@ static void ioapic_resume(void) } static struct syscore_ops ioapic_syscore_ops = { - .suspend = save_ioapic_entries, - .resume = ioapic_resume, + .suspend = save_ioapic_entries, + .resume = ioapic_resume, }; static int __init ioapic_init_ops(void) @@ -2456,15 +2335,13 @@ device_initcall(ioapic_init_ops); static int io_apic_get_redir_entries(int ioapic) { union IO_APIC_reg_01 reg_01; - unsigned long flags; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); reg_01.raw = io_apic_read(ioapic, 1); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - /* The register returns the maximum index redir index - * supported, which is one less than the total number of redir - * entries. + /* + * The register returns the maximum index redir index supported, + * which is one less than the total number of redir entries. */ return reg_01.bits.entries + 1; } @@ -2494,16 +2371,14 @@ static int io_apic_get_unique_id(int ioapic, int apic_id) static DECLARE_BITMAP(apic_id_map, MAX_LOCAL_APIC); const u32 broadcast_id = 0xF; union IO_APIC_reg_00 reg_00; - unsigned long flags; int i = 0; /* Initialize the ID map */ if (bitmap_empty(apic_id_map, MAX_LOCAL_APIC)) copy_phys_cpu_present_map(apic_id_map); - raw_spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(ioapic, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) + reg_00.raw = io_apic_read(ioapic, 0); if (apic_id >= broadcast_id) { pr_warn("IOAPIC[%d]: Invalid apic_id %d, trying %d\n", @@ -2530,21 +2405,19 @@ static int io_apic_get_unique_id(int ioapic, int apic_id) if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; - raw_spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(ioapic, 0, reg_00.raw); - reg_00.raw = io_apic_read(ioapic, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) { + io_apic_write(ioapic, 0, reg_00.raw); + reg_00.raw = io_apic_read(ioapic, 0); + } /* Sanity check */ if (reg_00.bits.ID != apic_id) { - pr_err("IOAPIC[%d]: Unable to change apic_id!\n", - ioapic); + pr_err("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } - apic_printk(APIC_VERBOSE, KERN_INFO - "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + apic_pr_verbose("IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); return apic_id; } @@ -2560,7 +2433,6 @@ static u8 io_apic_unique_id(int idx, u8 id) { union IO_APIC_reg_00 reg_00; DECLARE_BITMAP(used, 256); - unsigned long flags; u8 new_id; int i; @@ -2576,26 +2448,23 @@ static u8 io_apic_unique_id(int idx, u8 id) * Read the current id from the ioapic and keep it if * available. */ - raw_spin_lock_irqsave(&ioapic_lock, flags); - reg_00.raw = io_apic_read(idx, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) + reg_00.raw = io_apic_read(idx, 0); + new_id = reg_00.bits.ID; if (!test_bit(new_id, used)) { - apic_printk(APIC_VERBOSE, KERN_INFO - "IOAPIC[%d]: Using reg apic_id %d instead of %d\n", - idx, new_id, id); + apic_pr_verbose("IOAPIC[%d]: Using reg apic_id %d instead of %d\n", + idx, new_id, id); return new_id; } - /* - * Get the next free id and write it to the ioapic. - */ + /* Get the next free id and write it to the ioapic. */ new_id = find_first_zero_bit(used, 256); reg_00.bits.ID = new_id; - raw_spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(idx, 0, reg_00.raw); - reg_00.raw = io_apic_read(idx, 0); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); + scoped_guard (raw_spinlock_irqsave, &ioapic_lock) { + io_apic_write(idx, 0, reg_00.raw); + reg_00.raw = io_apic_read(idx, 0); + } /* Sanity check */ BUG_ON(reg_00.bits.ID != new_id); @@ -2605,12 +2474,10 @@ static u8 io_apic_unique_id(int idx, u8 id) static int io_apic_get_version(int ioapic) { - union IO_APIC_reg_01 reg_01; - unsigned long flags; + union IO_APIC_reg_01 reg_01; - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); reg_01.raw = io_apic_read(ioapic, 1); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.version; } @@ -2625,8 +2492,8 @@ static struct resource *ioapic_resources; static struct resource * __init ioapic_setup_resources(void) { - unsigned long n; struct resource *res; + unsigned long n; char *mem; int i; @@ -2686,9 +2553,7 @@ void __init io_apic_init_mappings(void) ioapic_phys = mpc_ioapic_addr(i); #ifdef CONFIG_X86_32 if (!ioapic_phys) { - printk(KERN_ERR - "WARNING: bogus zero IO-APIC " - "address found in MPTABLE, " + pr_err("WARNING: bogus zero IO-APIC address found in MPTABLE, " "disabling IO/APIC support!\n"); smp_found_config = 0; ioapic_is_disabled = true; @@ -2707,9 +2572,8 @@ void __init io_apic_init_mappings(void) ioapic_phys = __pa(ioapic_phys); } io_apic_set_fixmap(idx, ioapic_phys); - apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", - __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), - ioapic_phys); + apic_pr_verbose("mapped IOAPIC to %08lx (%08lx)\n", + __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), ioapic_phys); idx++; ioapic_res->start = ioapic_phys; @@ -2720,13 +2584,12 @@ void __init io_apic_init_mappings(void) void __init ioapic_insert_resources(void) { - int i; struct resource *r = ioapic_resources; + int i; if (!r) { if (nr_ioapics > 0) - printk(KERN_ERR - "IO APIC resources couldn't be allocated.\n"); + pr_err("IO APIC resources couldn't be allocated.\n"); return; } @@ -2746,11 +2609,12 @@ int mp_find_ioapic(u32 gsi) /* Find the IOAPIC that manages this GSI. */ for_each_ioapic(i) { struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); + if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end) return i; } - printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); + pr_err("ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); return -1; } @@ -2789,12 +2653,10 @@ static int bad_ioapic_register(int idx) static int find_free_ioapic_entry(void) { - int idx; - - for (idx = 0; idx < MAX_IO_APICS; idx++) + for (int idx = 0; idx < MAX_IO_APICS; idx++) { if (ioapics[idx].nr_registers == 0) return idx; - + } return MAX_IO_APICS; } @@ -2805,8 +2667,7 @@ static int find_free_ioapic_entry(void) * @gsi_base: base of GSI associated with the IOAPIC * @cfg: configuration information for the IOAPIC */ -int mp_register_ioapic(int id, u32 address, u32 gsi_base, - struct ioapic_domain_cfg *cfg) +int mp_register_ioapic(int id, u32 address, u32 gsi_base, struct ioapic_domain_cfg *cfg) { bool hotplug = !!ioapic_initialized; struct mp_ioapic_gsi *gsi_cfg; @@ -2817,12 +2678,13 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base, pr_warn("Bogus (zero) I/O APIC address found, skipping!\n"); return -EINVAL; } - for_each_ioapic(ioapic) + + for_each_ioapic(ioapic) { if (ioapics[ioapic].mp_config.apicaddr == address) { - pr_warn("address 0x%x conflicts with IOAPIC%d\n", - address, ioapic); + pr_warn("address 0x%x conflicts with IOAPIC%d\n", address, ioapic); return -EEXIST; } + } idx = find_free_ioapic_entry(); if (idx >= MAX_IO_APICS) { @@ -2857,8 +2719,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base, (gsi_end >= gsi_cfg->gsi_base && gsi_end <= gsi_cfg->gsi_end)) { pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n", - gsi_base, gsi_end, - gsi_cfg->gsi_base, gsi_cfg->gsi_end); + gsi_base, gsi_end, gsi_cfg->gsi_base, gsi_cfg->gsi_end); clear_fixmap(FIX_IO_APIC_BASE_0 + idx); return -ENOSPC; } @@ -2892,8 +2753,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base, ioapics[idx].nr_registers = entries; pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", - idx, mpc_ioapic_id(idx), - mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), + idx, mpc_ioapic_id(idx), mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), gsi_cfg->gsi_base, gsi_cfg->gsi_end); return 0; @@ -2904,11 +2764,13 @@ int mp_unregister_ioapic(u32 gsi_base) int ioapic, pin; int found = 0; - for_each_ioapic(ioapic) + for_each_ioapic(ioapic) { if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) { found = 1; break; } + } + if (!found) { pr_warn("can't find IOAPIC for GSI %d\n", gsi_base); return -ENODEV; @@ -2922,8 +2784,7 @@ int mp_unregister_ioapic(u32 gsi_base) if (irq >= 0) { data = irq_get_chip_data(irq); if (data && data->count) { - pr_warn("pin%d on IOAPIC%d is still in use.\n", - pin, ioapic); + pr_warn("pin%d on IOAPIC%d is still in use.\n", pin, ioapic); return -EBUSY; } } @@ -2958,8 +2819,7 @@ static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data, if (info && info->ioapic.valid) { data->is_level = info->ioapic.is_level; data->active_low = info->ioapic.active_low; - } else if (__acpi_get_override_irq(gsi, &data->is_level, - &data->active_low) < 0) { + } else if (__acpi_get_override_irq(gsi, &data->is_level, &data->active_low) < 0) { /* PCI interrupts are always active low level triggered. */ data->is_level = true; data->active_low = true; @@ -3017,10 +2877,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, return -ENOMEM; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); - if (ret < 0) { - kfree(data); - return ret; - } + if (ret < 0) + goto free_data; INIT_LIST_HEAD(&data->irq_2_pin); irq_data->hwirq = info->ioapic.pin; @@ -3029,7 +2887,10 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, irq_data->chip_data = data; mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info); - add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); + if (!add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin)) { + ret = -ENOMEM; + goto free_irqs; + } mp_preconfigure_entry(data); mp_register_handler(virq, data->is_level); @@ -3039,11 +2900,15 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, legacy_pic->mask(virq); local_irq_restore(flags); - apic_printk(APIC_VERBOSE, KERN_DEBUG - "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n", - ioapic, mpc_ioapic_id(ioapic), pin, virq, - data->is_level, data->active_low); + apic_pr_verbose("IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n", + ioapic, mpc_ioapic_id(ioapic), pin, virq, data->is_level, data->active_low); return 0; + +free_irqs: + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +free_data: + kfree(data); + return ret; } void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, @@ -3056,22 +2921,17 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, irq_data = irq_domain_get_irq_data(domain, virq); if (irq_data && irq_data->chip_data) { data = irq_data->chip_data; - __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain), - (int)irq_data->hwirq); + __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); WARN_ON(!list_empty(&data->irq_2_pin)); kfree(irq_data->chip_data); } irq_domain_free_irqs_top(domain, virq, nr_irqs); } -int mp_irqdomain_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool reserve) +int mp_irqdomain_activate(struct irq_domain *domain, struct irq_data *irq_data, bool reserve) { - unsigned long flags; - - raw_spin_lock_irqsave(&ioapic_lock, flags); + guard(raw_spinlock_irqsave)(&ioapic_lock); ioapic_configure_entry(irq_data); - raw_spin_unlock_irqrestore(&ioapic_lock, flags); return 0; } @@ -3079,8 +2939,7 @@ void mp_irqdomain_deactivate(struct irq_domain *domain, struct irq_data *irq_data) { /* It won't be called for IRQ with multiple IOAPIC pins associated */ - ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain), - (int)irq_data->hwirq); + ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain), (int)irq_data->hwirq); } int mp_irqdomain_ioapic_idx(struct irq_domain *domain) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1e0fe5f8ab84e4..015971adadfc7e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1190,22 +1190,6 @@ unsigned long amd_get_dr_addr_mask(unsigned int dr) } EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); -u32 amd_get_highest_perf(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - - if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) || - (c->x86_model >= 0x70 && c->x86_model < 0x80))) - return 166; - - if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) || - (c->x86_model >= 0x40 && c->x86_model < 0x70))) - return 166; - - return 255; -} -EXPORT_SYMBOL_GPL(amd_get_highest_perf); - static void zenbleed_check_cpu(void *unused) { struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 0b69bfbf345d0d..f642de2ebdac8a 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -349,9 +349,89 @@ static DECLARE_WORK(disable_freq_invariance_work, DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale); +static DEFINE_STATIC_KEY_FALSE(arch_hybrid_cap_scale_key); + +struct arch_hybrid_cpu_scale { + unsigned long capacity; + unsigned long freq_ratio; +}; + +static struct arch_hybrid_cpu_scale __percpu *arch_cpu_scale; + +/** + * arch_enable_hybrid_capacity_scale() - Enable hybrid CPU capacity scaling + * + * Allocate memory for per-CPU data used by hybrid CPU capacity scaling, + * initialize it and set the static key controlling its code paths. + * + * Must be called before arch_set_cpu_capacity(). + */ +bool arch_enable_hybrid_capacity_scale(void) +{ + int cpu; + + if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) { + WARN_ONCE(1, "Hybrid CPU capacity scaling already enabled"); + return true; + } + + arch_cpu_scale = alloc_percpu(struct arch_hybrid_cpu_scale); + if (!arch_cpu_scale) + return false; + + for_each_possible_cpu(cpu) { + per_cpu_ptr(arch_cpu_scale, cpu)->capacity = SCHED_CAPACITY_SCALE; + per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio = arch_max_freq_ratio; + } + + static_branch_enable(&arch_hybrid_cap_scale_key); + + pr_info("Hybrid CPU capacity scaling enabled\n"); + + return true; +} + +/** + * arch_set_cpu_capacity() - Set scale-invariance parameters for a CPU + * @cpu: Target CPU. + * @cap: Capacity of @cpu at its maximum frequency, relative to @max_cap. + * @max_cap: System-wide maximum CPU capacity. + * @cap_freq: Frequency of @cpu corresponding to @cap. + * @base_freq: Frequency of @cpu at which MPERF counts. + * + * The units in which @cap and @max_cap are expressed do not matter, so long + * as they are consistent, because the former is effectively divided by the + * latter. Analogously for @cap_freq and @base_freq. + * + * After calling this function for all CPUs, call arch_rebuild_sched_domains() + * to let the scheduler know that capacity-aware scheduling can be used going + * forward. + */ +void arch_set_cpu_capacity(int cpu, unsigned long cap, unsigned long max_cap, + unsigned long cap_freq, unsigned long base_freq) +{ + if (static_branch_likely(&arch_hybrid_cap_scale_key)) { + WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity, + div_u64(cap << SCHED_CAPACITY_SHIFT, max_cap)); + WRITE_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->freq_ratio, + div_u64(cap_freq << SCHED_CAPACITY_SHIFT, base_freq)); + } else { + WARN_ONCE(1, "Hybrid CPU capacity scaling not enabled"); + } +} + +unsigned long arch_scale_cpu_capacity(int cpu) +{ + if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) + return READ_ONCE(per_cpu_ptr(arch_cpu_scale, cpu)->capacity); + + return SCHED_CAPACITY_SCALE; +} +EXPORT_SYMBOL_GPL(arch_scale_cpu_capacity); + static void scale_freq_tick(u64 acnt, u64 mcnt) { - u64 freq_scale; + u64 freq_scale, freq_ratio; if (!arch_scale_freq_invariant()) return; @@ -359,7 +439,12 @@ static void scale_freq_tick(u64 acnt, u64 mcnt) if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) goto error; - if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt) + if (static_branch_unlikely(&arch_hybrid_cap_scale_key)) + freq_ratio = READ_ONCE(this_cpu_ptr(arch_cpu_scale)->freq_ratio); + else + freq_ratio = arch_max_freq_ratio; + + if (check_mul_overflow(mcnt, freq_ratio, &mcnt) || !mcnt) goto error; freq_scale = div64_u64(acnt, mcnt); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 45675da354f33d..d1915427b4ffcb 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -233,7 +233,8 @@ static void x86_amd_ssb_disable(void) #define pr_fmt(fmt) "MDS: " fmt /* Default mitigation for MDS-affected CPUs */ -static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; +static enum mds_mitigations mds_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF; static bool mds_nosmt __ro_after_init = false; static const char * const mds_strings[] = { @@ -293,7 +294,8 @@ enum taa_mitigations { }; /* Default mitigation for TAA-affected CPUs */ -static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; +static enum taa_mitigations taa_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_VERW : TAA_MITIGATION_OFF; static bool taa_nosmt __ro_after_init; static const char * const taa_strings[] = { @@ -391,7 +393,8 @@ enum mmio_mitigations { }; /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ -static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static enum mmio_mitigations mmio_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_VERW : MMIO_MITIGATION_OFF; static bool mmio_nosmt __ro_after_init = false; static const char * const mmio_strings[] = { @@ -605,7 +608,8 @@ enum srbds_mitigations { SRBDS_MITIGATION_HYPERVISOR, }; -static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; +static enum srbds_mitigations srbds_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF; static const char * const srbds_strings[] = { [SRBDS_MITIGATION_OFF] = "Vulnerable", @@ -731,11 +735,8 @@ enum gds_mitigations { GDS_MITIGATION_HYPERVISOR, }; -#if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE) -static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; -#else -static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; -#endif +static enum gds_mitigations gds_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF; static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", @@ -871,7 +872,8 @@ enum spectre_v1_mitigation { }; static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = - SPECTRE_V1_MITIGATION_AUTO; + IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? + SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; static const char * const spectre_v1_strings[] = { [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", @@ -986,7 +988,7 @@ static const char * const retbleed_strings[] = { static enum retbleed_mitigation retbleed_mitigation __ro_after_init = RETBLEED_MITIGATION_NONE; static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = - RETBLEED_CMD_AUTO; + IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF; static int __ro_after_init retbleed_nosmt = false; @@ -1447,17 +1449,18 @@ static void __init spec_v2_print_cond(const char *reason, bool secure) static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { - enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + enum spectre_v2_mitigation_cmd cmd; char arg[20]; int ret, i; + cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || cpu_mitigations_off()) return SPECTRE_V2_CMD_NONE; ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); if (ret < 0) - return SPECTRE_V2_CMD_AUTO; + return cmd; for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { if (!match_option(arg, ret, mitigation_options[i].option)) @@ -1467,8 +1470,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_CMD_AUTO; + pr_err("unknown option (%s). Switching to default mode\n", arg); + return cmd; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || @@ -2021,10 +2024,12 @@ static const struct { static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) { - enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; + enum ssb_mitigation_cmd cmd; char arg[20]; int ret, i; + cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ? + SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE; if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || cpu_mitigations_off()) { return SPEC_STORE_BYPASS_CMD_NONE; @@ -2032,7 +2037,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", arg, sizeof(arg)); if (ret < 0) - return SPEC_STORE_BYPASS_CMD_AUTO; + return cmd; for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { if (!match_option(arg, ret, ssb_mitigation_options[i].option)) @@ -2043,8 +2048,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) } if (i >= ARRAY_SIZE(ssb_mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPEC_STORE_BYPASS_CMD_AUTO; + pr_err("unknown option (%s). Switching to default mode\n", arg); + return cmd; } } @@ -2371,7 +2376,8 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); #define pr_fmt(fmt) "L1TF: " fmt /* Default mitigation for L1TF-affected CPUs */ -enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; +enum l1tf_mitigations l1tf_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF; #if IS_ENABLED(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(l1tf_mitigation); #endif @@ -2551,10 +2557,9 @@ static void __init srso_select_mitigation(void) { bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); - if (cpu_mitigations_off()) - return; - - if (!boot_cpu_has_bug(X86_BUG_SRSO)) { + if (!boot_cpu_has_bug(X86_BUG_SRSO) || + cpu_mitigations_off() || + srso_cmd == SRSO_CMD_OFF) { if (boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; return; @@ -2585,11 +2590,6 @@ static void __init srso_select_mitigation(void) } switch (srso_cmd) { - case SRSO_CMD_OFF: - if (boot_cpu_has(X86_FEATURE_SBPB)) - x86_pred_cmd = PRED_CMD_SBPB; - return; - case SRSO_CMD_MICROCODE: if (has_microcode) { srso_mitigation = SRSO_MITIGATION_MICROCODE; @@ -2643,6 +2643,8 @@ static void __init srso_select_mitigation(void) pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); } break; + default: + break; } out: diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d4e539d4e158cc..07a34d72350574 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1165,8 +1165,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB), - VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY), + VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), @@ -1510,6 +1510,11 @@ static void __init cpu_parse_early_param(void) if (cmdline_find_option_bool(boot_command_line, "nousershstk")) setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK); + /* Minimize the gap between FRED is available and available but disabled. */ + arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg)); + if (arglen != 2 || strncmp(arg, "on", 2)) + setup_clear_cpu_cap(X86_FEATURE_FRED); + arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); if (arglen <= 0) return; @@ -2171,7 +2176,7 @@ static inline void tss_setup_io_bitmap(struct tss_struct *tss) * Setup everything needed to handle exceptions from the IDT, including the IST * exceptions which use paranoid_entry(). */ -void cpu_init_exception_handling(void) +void cpu_init_exception_handling(bool boot_cpu) { struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); int cpu = raw_smp_processor_id(); @@ -2190,10 +2195,23 @@ void cpu_init_exception_handling(void) /* GHCB needs to be setup to handle #VC. */ setup_ghcb(); + if (cpu_feature_enabled(X86_FEATURE_FRED)) { + /* The boot CPU has enabled FRED during early boot */ + if (!boot_cpu) + cpu_init_fred_exceptions(); + + cpu_init_fred_rsps(); + } else { + load_current_idt(); + } +} + +void __init cpu_init_replace_early_idt(void) +{ if (cpu_feature_enabled(X86_FEATURE_FRED)) cpu_init_fred_exceptions(); else - load_current_idt(); + idt_setup_early_pf(); } /* diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index b7d9f530ae1646..8bd84114c2d961 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -83,7 +83,6 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, { X86_FEATURE_FRED, X86_FEATURE_LKGS }, - { X86_FEATURE_FRED, X86_FEATURE_WRMSRNS }, {} }; diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 1640ae76548fc7..4a4118784c1313 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -188,7 +188,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) update_sgx: if (!(msr & FEAT_CTL_SGX_ENABLED)) { if (enable_sgx_kvm || enable_sgx_driver) - pr_err_once("SGX disabled by BIOS.\n"); + pr_err_once("SGX disabled or unsupported by BIOS.\n"); clear_cpu_cap(c, X86_FEATURE_SGX); return; } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 08b95a35b5cb1a..e7656cbef68d54 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -311,16 +311,18 @@ static void early_init_intel(struct cpuinfo_x86 *c) } /* - * There is a known erratum on Pentium III and Core Solo - * and Core Duo CPUs. - * " Page with PAT set to WC while associated MTRR is UC - * may consolidate to UC " - * Because of this erratum, it is better to stick with - * setting WC in MTRR rather than using PAT on these CPUs. + * PAT is broken on early family 6 CPUs, the last of which + * is "Yonah" where the erratum is named "AN7": * - * Enable PAT WC only on P4, Core 2 or later CPUs. + * Page with PAT (Page Attribute Table) Set to USWC + * (Uncacheable Speculative Write Combine) While + * Associated MTRR (Memory Type Range Register) Is UC + * (Uncacheable) May Consolidate to UC + * + * Disable PAT and fall back to MTRR on these CPUs. */ - if (c->x86 == 6 && c->x86_model < 15) + if (c->x86_vfm >= INTEL_PENTIUM_PRO && + c->x86_vfm <= INTEL_CORE_YONAH) clear_cpu_cap(c, X86_FEATURE_PAT); /* diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 9a0133ef7e20ab..14bf8c232e457f 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -780,7 +780,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) { struct mce m; - mce_setup(&m); + mce_prep_record(&m); m.status = status; m.misc = misc; diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 7f7309ff67d0fe..3885fe05f01ecc 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -44,7 +44,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) else lsb = PAGE_SHIFT; - mce_setup(&m); + mce_prep_record(&m); m.bank = -1; /* Fake a memory read error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f; @@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { const u64 *i_mce = ((const u64 *) (ctx_info + 1)); + bool apicid_found = false; unsigned int cpu; struct mce m; @@ -97,20 +98,19 @@ int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) if (ctx_info->reg_arr_size < 48) return -EINVAL; - mce_setup(&m); - - m.extcpu = -1; - m.socketid = -1; - for_each_possible_cpu(cpu) { if (cpu_data(cpu).topo.initial_apicid == lapic_id) { - m.extcpu = cpu; - m.socketid = cpu_data(m.extcpu).topo.pkg_id; + apicid_found = true; break; } } - m.apicid = lapic_id; + if (!apicid_found) + return -EINVAL; + + mce_prep_record_common(&m); + mce_prep_record_per_cpu(cpu, &m); + m.bank = (ctx_info->msr_addr >> 4) & 0xFF; m.status = *i_mce; m.addr = *(i_mce + 1); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index b85ec7a4ec9e61..2a938f429c4d50 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -117,20 +117,32 @@ static struct irq_work mce_irq_work; */ BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); -/* Do initial initialization of a struct mce */ -void mce_setup(struct mce *m) +void mce_prep_record_common(struct mce *m) { memset(m, 0, sizeof(struct mce)); - m->cpu = m->extcpu = smp_processor_id(); + + m->cpuid = cpuid_eax(1); + m->cpuvendor = boot_cpu_data.x86_vendor; + m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); /* need the internal __ version to avoid deadlocks */ - m->time = __ktime_get_real_seconds(); - m->cpuvendor = boot_cpu_data.x86_vendor; - m->cpuid = cpuid_eax(1); - m->socketid = cpu_data(m->extcpu).topo.pkg_id; - m->apicid = cpu_data(m->extcpu).topo.initial_apicid; - m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); - m->ppin = cpu_data(m->extcpu).ppin; - m->microcode = boot_cpu_data.microcode; + m->time = __ktime_get_real_seconds(); +} + +void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m) +{ + m->cpu = cpu; + m->extcpu = cpu; + m->apicid = cpu_data(cpu).topo.initial_apicid; + m->microcode = cpu_data(cpu).microcode; + m->ppin = topology_ppin(cpu); + m->socketid = topology_physical_package_id(cpu); +} + +/* Do initial initialization of a struct mce */ +void mce_prep_record(struct mce *m) +{ + mce_prep_record_common(m); + mce_prep_record_per_cpu(smp_processor_id(), m); } DEFINE_PER_CPU(struct mce, injectm); @@ -436,11 +448,11 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v) static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs) { /* - * Enable instrumentation around mce_setup() which calls external + * Enable instrumentation around mce_prep_record() which calls external * facilities. */ instrumentation_begin(); - mce_setup(m); + mce_prep_record(m); instrumentation_end(); m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index a05ac0716ecfb9..af44fd5dbd7c39 100644 --- a/arch/x86/kernel/cpu/mce/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -314,7 +314,7 @@ static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, /* * Need to give user space some time to set everything up, - * so do it a jiffie or two later everywhere. + * so do it a jiffy or two later everywhere. */ schedule_timeout(2); @@ -331,7 +331,6 @@ static const struct file_operations mce_chrdev_ops = { .poll = mce_chrdev_poll, .unlocked_ioctl = mce_chrdev_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; static struct miscdevice mce_chrdev_device = { diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 01f8f03969e63e..43c7f3b71df59c 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -261,6 +261,8 @@ enum mca_msr { /* Decide whether to add MCE record to MCE event pool or filter it out. */ extern bool filter_mce(struct mce *m); +void mce_prep_record_common(struct mce *m); +void mce_prep_record_per_cpu(unsigned int cpu, struct mce *m); #ifdef CONFIG_X86_MCE_AMD extern bool amd_filter_mce(struct mce *m); diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c0d56c02b8da9f..f63b051f25a0a9 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -89,6 +89,31 @@ static struct equiv_cpu_table { struct equiv_cpu_entry *entry; } equiv_table; +union zen_patch_rev { + struct { + __u32 rev : 8, + stepping : 4, + model : 4, + __reserved : 4, + ext_model : 4, + ext_fam : 8; + }; + __u32 ucode_rev; +}; + +union cpuid_1_eax { + struct { + __u32 stepping : 4, + model : 4, + family : 4, + __reserved0 : 4, + ext_model : 4, + ext_fam : 8, + __reserved1 : 4; + }; + __u32 full; +}; + /* * This points to the current valid container of microcode patches which we will * save from the initrd/builtin before jettisoning its contents. @mc is the @@ -96,7 +121,6 @@ static struct equiv_cpu_table { */ struct cont_desc { struct microcode_amd *mc; - u32 cpuid_1_eax; u32 psize; u8 *data; size_t size; @@ -109,10 +133,42 @@ struct cont_desc { static const char ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; +/* + * This is CPUID(1).EAX on the BSP. It is used in two ways: + * + * 1. To ignore the equivalence table on Zen1 and newer. + * + * 2. To match which patches to load because the patch revision ID + * already contains the f/m/s for which the microcode is destined + * for. + */ +static u32 bsp_cpuid_1_eax __ro_after_init; + +static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val) +{ + union zen_patch_rev p; + union cpuid_1_eax c; + + p.ucode_rev = val; + c.full = 0; + + c.stepping = p.stepping; + c.model = p.model; + c.ext_model = p.ext_model; + c.family = 0xf; + c.ext_fam = p.ext_fam; + + return c; +} + static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) { unsigned int i; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return 0; + if (!et || !et->num_entries) return 0; @@ -159,6 +215,10 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size) if (!verify_container(buf, buf_size)) return false; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return true; + cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { pr_debug("Wrong microcode container equivalence table type: %u.\n", @@ -222,8 +282,9 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) * exceed the per-family maximum). @sh_psize is the size read from the section * header. */ -static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) +static unsigned int __verify_patch_size(u32 sh_psize, size_t buf_size) { + u8 family = x86_family(bsp_cpuid_1_eax); u32 max_size; if (family >= 0x15) @@ -258,9 +319,9 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * positive: patch is not for this family, skip it * 0: success */ -static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) +static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size) { + u8 family = x86_family(bsp_cpuid_1_eax); struct microcode_header_amd *mc_hdr; unsigned int ret; u32 sh_psize; @@ -286,7 +347,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) return -1; } - ret = __verify_patch_size(family, sh_psize, buf_size); + ret = __verify_patch_size(sh_psize, buf_size); if (!ret) { pr_debug("Per-family patch size mismatch.\n"); return -1; @@ -308,6 +369,15 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) return 0; } +static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id) +{ + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax; + else + return eq_id == mc->hdr.processor_rev_id; +} + /* * This scans the ucode blob for the proper container as we can have multiple * containers glued together. Returns the equivalence ID from the equivalence @@ -336,7 +406,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) * doesn't contain a patch for the CPU, scan through the whole container * so that it can be skipped in case there are other containers appended. */ - eq_id = find_equiv_id(&table, desc->cpuid_1_eax); + eq_id = find_equiv_id(&table, bsp_cpuid_1_eax); buf += hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ; @@ -350,7 +420,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); + ret = verify_patch(buf, size, &patch_size); if (ret < 0) { /* * Patch verification failed, skip to the next container, if @@ -363,7 +433,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) } mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); - if (eq_id == mc->hdr.processor_rev_id) { + if (mc_patch_matches(mc, eq_id)) { desc->psize = patch_size; desc->mc = mc; } @@ -421,6 +491,7 @@ static int __apply_microcode_amd(struct microcode_amd *mc) /* verify patch application was successful */ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + if (rev != mc->hdr.patch_id) return -1; @@ -438,14 +509,12 @@ static int __apply_microcode_amd(struct microcode_amd *mc) * * Returns true if container found (sets @desc), false otherwise. */ -static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size) +static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; bool ret = false; - desc.cpuid_1_eax = cpuid_1_eax; - scan_containers(ucode, size, &desc); mc = desc.mc; @@ -463,9 +532,10 @@ static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, siz return !__apply_microcode_amd(mc); } -static bool get_builtin_microcode(struct cpio_data *cp, u8 family) +static bool get_builtin_microcode(struct cpio_data *cp) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; + u8 family = x86_family(bsp_cpuid_1_eax); struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) @@ -484,11 +554,11 @@ static bool get_builtin_microcode(struct cpio_data *cp, u8 family) return false; } -static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) +static void __init find_blobs_in_containers(struct cpio_data *ret) { struct cpio_data cp; - if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) + if (!get_builtin_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path); *ret = cp; @@ -499,16 +569,18 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_ struct cpio_data cp = { }; u32 dummy; + bsp_cpuid_1_eax = cpuid_1_eax; + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); /* Needed in load_microcode_amd() */ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; - find_blobs_in_containers(cpuid_1_eax, &cp); + find_blobs_in_containers(&cp); if (!(cp.data && cp.size)) return; - if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size)) + if (early_apply_microcode(ed->old_rev, cp.data, cp.size)) native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); } @@ -525,12 +597,10 @@ static int __init save_microcode_in_initrd(void) if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) return 0; - find_blobs_in_containers(cpuid_1_eax, &cp); + find_blobs_in_containers(&cp); if (!(cp.data && cp.size)) return -EINVAL; - desc.cpuid_1_eax = cpuid_1_eax; - scan_containers(cp.data, cp.size, &desc); if (!desc.mc) return -EINVAL; @@ -543,26 +613,65 @@ static int __init save_microcode_in_initrd(void) } early_initcall(save_microcode_in_initrd); +static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n) +{ + /* Zen and newer hardcode the f/m/s in the patch ID */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) { + union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id); + union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id); + + /* Zap stepping */ + p_cid.stepping = 0; + n_cid.stepping = 0; + + return p_cid.full == n_cid.full; + } else { + return p->equiv_cpu == n->equiv_cpu; + } +} + /* * a small, trivial cache of per-family ucode patches */ -static struct ucode_patch *cache_find_patch(u16 equiv_cpu) +static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu) { struct ucode_patch *p; + struct ucode_patch n; + + n.equiv_cpu = equiv_cpu; + n.patch_id = uci->cpu_sig.rev; + + WARN_ON_ONCE(!n.patch_id); list_for_each_entry(p, µcode_cache, plist) - if (p->equiv_cpu == equiv_cpu) + if (patch_cpus_equivalent(p, &n)) return p; + return NULL; } +static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n) +{ + /* Zen and newer hardcode the f/m/s in the patch ID */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) { + union zen_patch_rev zp, zn; + + zp.ucode_rev = p->patch_id; + zn.ucode_rev = n->patch_id; + + return zn.rev > zp.rev; + } else { + return n->patch_id > p->patch_id; + } +} + static void update_cache(struct ucode_patch *new_patch) { struct ucode_patch *p; list_for_each_entry(p, µcode_cache, plist) { - if (p->equiv_cpu == new_patch->equiv_cpu) { - if (p->patch_id >= new_patch->patch_id) { + if (patch_cpus_equivalent(p, new_patch)) { + if (!patch_newer(p, new_patch)) { /* we already have the latest patch */ kfree(new_patch->data); kfree(new_patch); @@ -593,13 +702,22 @@ static void free_cache(void) static struct ucode_patch *find_patch(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - u16 equiv_id; + u32 rev, dummy __always_unused; + u16 equiv_id = 0; - equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); - if (!equiv_id) - return NULL; + /* fetch rev if not populated yet: */ + if (!uci->cpu_sig.rev) { + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + uci->cpu_sig.rev = rev; + } + + if (x86_family(bsp_cpuid_1_eax) < 0x17) { + equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); + if (!equiv_id) + return NULL; + } - return cache_find_patch(equiv_id); + return cache_find_patch(uci, equiv_id); } void reload_ucode_amd(unsigned int cpu) @@ -649,7 +767,7 @@ static enum ucode_state apply_microcode_amd(int cpu) struct ucode_cpu_info *uci; struct ucode_patch *p; enum ucode_state ret; - u32 rev, dummy __always_unused; + u32 rev; BUG_ON(raw_smp_processor_id() != cpu); @@ -659,11 +777,11 @@ static enum ucode_state apply_microcode_amd(int cpu) if (!p) return UCODE_NFOUND; + rev = uci->cpu_sig.rev; + mc_amd = p->data; uci->mc = p->data; - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - /* need to apply patch? */ if (rev > mc_amd->hdr.patch_id) { ret = UCODE_OK; @@ -709,6 +827,10 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) hdr = (const u32 *)buf; equiv_tbl_len = hdr[2]; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + goto out; + equiv_table.entry = vmalloc(equiv_tbl_len); if (!equiv_table.entry) { pr_err("failed to allocate equivalent CPU table\n"); @@ -718,12 +840,16 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); +out: /* add header length */ return equiv_tbl_len + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return; + vfree(equiv_table.entry); memset(&equiv_table, 0, sizeof(equiv_table)); } @@ -749,7 +875,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size); + ret = verify_patch(fw, leftover, patch_size); if (ret) return ret; @@ -774,7 +900,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; - pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", + pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n", __func__, patch->patch_id, proc_id); /* ... and add to cache. */ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index ead967479fa634..d18078834dedac 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -537,16 +536,6 @@ static void __init ms_hyperv_init_platform(void) if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; - /* - * Hyper-V VMs have a PIT emulation quirk such that zeroing the - * counter register during PIT shutdown restarts the PIT. So it - * continues to interrupt @18.2 HZ. Setting i8253_clear_counter - * to false tells pit_shutdown() not to zero the counter so that - * the PIT really is shutdown. Generation 2 VMs don't have a PIT, - * and setting this value has no effect. - */ - i8253_clear_counter_on_shutdown = false; - #if IS_ENABLED(CONFIG_HYPERV) if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) || ms_hyperv.paravisor_present) diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 2a2fc14955cd3b..989d368be04fcc 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -55,6 +55,12 @@ #include "mtrr.h" +static_assert(X86_MEMTYPE_UC == MTRR_TYPE_UNCACHABLE); +static_assert(X86_MEMTYPE_WC == MTRR_TYPE_WRCOMB); +static_assert(X86_MEMTYPE_WT == MTRR_TYPE_WRTHROUGH); +static_assert(X86_MEMTYPE_WP == MTRR_TYPE_WRPROT); +static_assert(X86_MEMTYPE_WB == MTRR_TYPE_WRBACK); + /* arch_phys_wc_add returns an MTRR register index plus this offset. */ #define MTRR_TO_PHYS_WC_OFFSET 1000 diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index e69489d48625bd..972e6b6b0481ff 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1567,7 +1567,6 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) static const struct file_operations pseudo_lock_dev_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = NULL, .write = NULL, .open = pseudo_lock_dev_open, diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 27892e57c4ef93..9ace84486499b8 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -475,24 +475,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) { struct sgx_epc_page *page; int nid_of_current = numa_node_id(); - int nid = nid_of_current; + int nid_start, nid; - if (node_isset(nid_of_current, sgx_numa_mask)) { - page = __sgx_alloc_epc_page_from_node(nid_of_current); - if (page) - return page; - } - - /* Fall back to the non-local NUMA nodes: */ - while (true) { - nid = next_node_in(nid, sgx_numa_mask); - if (nid == nid_of_current) - break; + /* + * Try local node first. If it doesn't have an EPC section, + * fall back to the non-local NUMA nodes. + */ + if (node_isset(nid_of_current, sgx_numa_mask)) + nid_start = nid_of_current; + else + nid_start = next_node_in(nid_of_current, sgx_numa_mask); + nid = nid_start; + do { page = __sgx_alloc_epc_page_from_node(nid); if (page) return page; - } + + nid = next_node_in(nid, sgx_numa_mask); + } while (nid != nid_start); return ERR_PTR(-ENOMEM); } @@ -732,7 +733,7 @@ int arch_memory_failure(unsigned long pfn, int flags) return 0; } -/** +/* * A section metric is concatenated in a way that @low bits 12-31 define the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the * metric. @@ -847,6 +848,13 @@ static bool __init sgx_page_cache_init(void) return false; } + for_each_online_node(nid) { + if (!node_isset(nid, sgx_numa_mask) && + node_state(nid, N_MEMORY) && node_state(nid, N_CPU)) + pr_info("node%d has both CPUs and memory but doesn't have an EPC section\n", + nid); + } + return true; } @@ -895,10 +903,10 @@ int sgx_set_attribute(unsigned long *allowed_attributes, { struct fd f = fdget(attribute_fd); - if (!f.file) + if (!fd_file(f)) return -EINVAL; - if (f.file->f_op != &sgx_provision_fops) { + if (fd_file(f)->f_op != &sgx_provision_fops) { fdput(f); return -EINVAL; } diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c index 53935b4d62e305..9535a6507db755 100644 --- a/arch/x86/kernel/eisa.c +++ b/arch/x86/kernel/eisa.c @@ -11,15 +11,15 @@ static __init int eisa_bus_probe(void) { - void __iomem *p; + u32 *p; if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return 0; - p = ioremap(0x0FFFD9, 4); - if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) + p = memremap(0x0FFFD9, 4, MEMREMAP_WB); + if (p && *p == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) EISA_bus = 1; - iounmap(p); + memunmap(p); return 0; } subsys_initcall(eisa_bus_probe); diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 247f2225aa9f36..1065ab995305cd 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -63,6 +63,16 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf, return true; } +/* + * Update the value of PKRU register that was already pushed onto the signal frame. + */ +static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru) +{ + if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE))) + return 0; + return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU)); +} + /* * Signal frame handlers. */ @@ -156,10 +166,17 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, return !err; } -static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) +static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru) { - if (use_xsave()) - return xsave_to_user_sigframe(buf); + int err = 0; + + if (use_xsave()) { + err = xsave_to_user_sigframe(buf); + if (!err) + err = update_pkru_in_sigframe(buf, pkru); + return err; + } + if (use_fxsr()) return fxsave_to_user_sigframe((struct fxregs_state __user *) buf); else @@ -185,7 +202,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) * For [f]xsave state, update the SW reserved fields in the [f]xsave frame * indicating the absence/presence of the extended state to the user. */ -bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) +bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru) { struct task_struct *tsk = current; struct fpstate *fpstate = tsk->thread.fpu.fpstate; @@ -228,7 +245,7 @@ bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) fpregs_restore_userregs(); pagefault_disable(); - ret = copy_fpregs_to_sigframe(buf_fx); + ret = copy_fpregs_to_sigframe(buf_fx, pkru); pagefault_enable(); fpregs_unlock(); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 1339f8328db5ad..22abb5ee0cf2dc 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -23,6 +24,8 @@ #include #include +#include + #include "context.h" #include "internal.h" #include "legacy.h" @@ -996,6 +999,19 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) } EXPORT_SYMBOL_GPL(get_xsave_addr); +/* + * Given an xstate feature nr, calculate where in the xsave buffer the state is. + * The xsave buffer should be in standard format, not compacted (e.g. user mode + * signal frames). + */ +void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr) +{ + if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) + return NULL; + + return (void __user *)xsave + xstate_offsets[xfeature_nr]; +} + #ifdef CONFIG_ARCH_HAS_PKEYS /* @@ -1841,3 +1857,89 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, return 0; } #endif /* CONFIG_PROC_PID_ARCH_STATUS */ + +#ifdef CONFIG_COREDUMP +static const char owner_name[] = "LINUX"; + +/* + * Dump type, size, offset and flag values for every xfeature that is present. + */ +static int dump_xsave_layout_desc(struct coredump_params *cprm) +{ + int num_records = 0; + int i; + + for_each_extended_xfeature(i, fpu_user_cfg.max_features) { + struct x86_xfeat_component xc = { + .type = i, + .size = xstate_sizes[i], + .offset = xstate_offsets[i], + /* reserved for future use */ + .flags = 0, + }; + + if (!dump_emit(cprm, &xc, sizeof(xc))) + return 0; + + num_records++; + } + return num_records; +} + +static u32 get_xsave_desc_size(void) +{ + u32 cnt = 0; + u32 i; + + for_each_extended_xfeature(i, fpu_user_cfg.max_features) + cnt++; + + return cnt * (sizeof(struct x86_xfeat_component)); +} + +int elf_coredump_extra_notes_write(struct coredump_params *cprm) +{ + int num_records = 0; + struct elf_note en; + + if (!fpu_user_cfg.max_features) + return 0; + + en.n_namesz = sizeof(owner_name); + en.n_descsz = get_xsave_desc_size(); + en.n_type = NT_X86_XSAVE_LAYOUT; + + if (!dump_emit(cprm, &en, sizeof(en))) + return 1; + if (!dump_emit(cprm, owner_name, en.n_namesz)) + return 1; + if (!dump_align(cprm, 4)) + return 1; + + num_records = dump_xsave_layout_desc(cprm); + if (!num_records) + return 1; + + /* Total size should be equal to the number of records */ + if ((sizeof(struct x86_xfeat_component) * num_records) != en.n_descsz) + return 1; + + return 0; +} + +int elf_coredump_extra_notes_size(void) +{ + int size; + + if (!fpu_user_cfg.max_features) + return 0; + + /* .note header */ + size = sizeof(struct elf_note); + /* Name plus alignment to 4 bytes */ + size += roundup(sizeof(owner_name), 4); + size += get_xsave_desc_size(); + + return size; +} +#endif /* CONFIG_COREDUMP */ diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index afb404cd205911..0b86a5002c846d 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -54,6 +54,8 @@ extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void extern void fpu__init_cpu_xstate(void); extern void fpu__init_system_xstate(unsigned int legacy_size); +extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr); + static inline u64 xfeatures_mask_supervisor(void) { return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED; diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c index 4bcd8791ad96ad..8d32c3f48abc0c 100644 --- a/arch/x86/kernel/fred.c +++ b/arch/x86/kernel/fred.c @@ -21,17 +21,53 @@ #define FRED_STKLVL(vector, lvl) ((lvl) << (2 * (vector))) +DEFINE_PER_CPU(unsigned long, fred_rsp0); +EXPORT_PER_CPU_SYMBOL(fred_rsp0); + void cpu_init_fred_exceptions(void) { /* When FRED is enabled by default, remove this log message */ pr_info("Initialize FRED on CPU%d\n", smp_processor_id()); + /* + * If a kernel event is delivered before a CPU goes to user level for + * the first time, its SS is NULL thus NULL is pushed into the SS field + * of the FRED stack frame. But before ERETS is executed, the CPU may + * context switch to another task and go to user level. Then when the + * CPU comes back to kernel mode, SS is changed to __KERNEL_DS. Later + * when ERETS is executed to return from the kernel event handler, a #GP + * fault is generated because SS doesn't match the SS saved in the FRED + * stack frame. + * + * Initialize SS to __KERNEL_DS when enabling FRED to avoid such #GPs. + */ + loadsegment(ss, __KERNEL_DS); + wrmsrl(MSR_IA32_FRED_CONFIG, /* Reserve for CALL emulation */ FRED_CONFIG_REDZONE | FRED_CONFIG_INT_STKLVL(0) | FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user)); + wrmsrl(MSR_IA32_FRED_STKLVLS, 0); + wrmsrl(MSR_IA32_FRED_RSP0, 0); + wrmsrl(MSR_IA32_FRED_RSP1, 0); + wrmsrl(MSR_IA32_FRED_RSP2, 0); + wrmsrl(MSR_IA32_FRED_RSP3, 0); + + /* Enable FRED */ + cr4_set_bits(X86_CR4_FRED); + /* Any further IDT use is a bug */ + idt_invalidate(); + + /* Use int $0x80 for 32-bit system calls in FRED mode */ + setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); + setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); +} + +/* Must be called after setup_cpu_entry_areas() */ +void cpu_init_fred_rsps(void) +{ /* * The purpose of separate stacks for NMI, #DB and #MC *in the kernel* * (remember that user space faults are always taken on stack level 0) @@ -47,13 +83,4 @@ void cpu_init_fred_exceptions(void) wrmsrl(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); wrmsrl(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); wrmsrl(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); - - /* Enable FRED */ - cr4_set_bits(X86_CR4_FRED); - /* Any further IDT use is a bug */ - idt_invalidate(); - - /* Use int $0x80 for 32-bit system calls in FRED mode */ - setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); - setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a817ed0724d1e7..4b9d4557fc94a4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -559,10 +559,11 @@ void early_setup_idt(void) */ void __head startup_64_setup_gdt_idt(void) { + struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt); void *handler = NULL; struct desc_ptr startup_gdt_descr = { - .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)), + .address = (unsigned long)&RIP_REL_REF(*gdt), .size = GDT_SIZE - 1, }; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 330922b328bfed..16752b8dfa8992 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -32,13 +32,6 @@ * We are not able to switch in one step to the final KERNEL ADDRESS SPACE * because we need identity-mapped pages. */ -#define l4_index(x) (((x) >> 39) & 511) -#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) - -L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) -L4_START_KERNEL = l4_index(__START_KERNEL_map) - -L3_START_KERNEL = pud_index(__START_KERNEL_map) __HEAD .code64 @@ -577,9 +570,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb) SYM_CODE_END(vc_no_ghcb) #endif -#define SYM_DATA_START_PAGE_ALIGNED(name) \ - SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) - #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION /* * Each PGD needs to be 8k long and 8k aligned. We do not @@ -601,14 +591,6 @@ SYM_CODE_END(vc_no_ghcb) #define PTI_USER_PGD_FILL 0 #endif -/* Automate the creation of 1 to 1 mapping pmd entries */ -#define PMDS(START, PERM, COUNT) \ - i = 0 ; \ - .rept (COUNT) ; \ - .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ - i = i + 1 ; \ - .endr - __INITDATA .balign 4 @@ -708,8 +690,6 @@ SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) .endr SYM_DATA_END(level1_fixmap_pgt) -#undef PMDS - .data .align 16 diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 2b7999a1a50a83..80e262bb627fe1 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -39,9 +40,15 @@ static bool __init use_pit(void) bool __init pit_timer_init(void) { - if (!use_pit()) + if (!use_pit()) { + /* + * Don't just ignore the PIT. Ensure it's stopped, because + * VMMs otherwise steal CPU time just to pointlessly waggle + * the (masked) IRQ. + */ + clockevent_i8253_disable(); return false; - + } clockevent_i8253_init(true); global_clock_event = &i8253_clockevent; return true; diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index df337860612d84..cd8ed1edbf9ee7 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index cc0f7f70b17ba3..9c9ac606893e99 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -28,6 +28,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI /* @@ -87,6 +88,8 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) { #ifdef CONFIG_EFI unsigned long mstart, mend; + void *kaddr; + int ret; if (!efi_enabled(EFI_BOOT)) return 0; @@ -102,6 +105,30 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) if (!mstart) return 0; + ret = kernel_ident_mapping_init(info, level4p, mstart, mend); + if (ret) + return ret; + + kaddr = memremap(mstart, mend - mstart, MEMREMAP_WB); + if (!kaddr) { + pr_err("Could not map UEFI system table\n"); + return -ENOMEM; + } + + mstart = efi_config_table; + + if (efi_enabled(EFI_64BIT)) { + efi_system_table_64_t *stbl = (efi_system_table_64_t *)kaddr; + + mend = mstart + sizeof(efi_config_table_64_t) * stbl->nr_tables; + } else { + efi_system_table_32_t *stbl = (efi_system_table_32_t *)kaddr; + + mend = mstart + sizeof(efi_config_table_32_t) * stbl->nr_tables; + } + + memunmap(kaddr); + return kernel_ident_mapping_init(info, level4p, mstart, mend); #endif return 0; diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index c94dec6a18345a..1f54eedc3015e9 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index e89171b0347a6b..4a1b1b28abf95c 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -68,7 +68,7 @@ static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str) { memcpy(str, m->bustype, 6); str[6] = 0; - apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); + apic_pr_verbose("Bus #%d is %s\n", m->busid, str); } static void __init MP_bus_info(struct mpc_bus *m) @@ -417,7 +417,7 @@ static unsigned long __init get_mpc_size(unsigned long physptr) mpc = early_memremap(physptr, PAGE_SIZE); size = mpc->length; early_memunmap(mpc, PAGE_SIZE); - apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); + apic_pr_verbose(" mpc: %lx-%lx\n", physptr, physptr + size); return size; } @@ -560,8 +560,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) struct mpf_intel *mpf; int ret = 0; - apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", - base, base + length - 1); + apic_pr_verbose("Scan for SMP in [mem %#010lx-%#010lx]\n", base, base + length - 1); BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { @@ -683,13 +682,13 @@ static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) { int i; - apic_printk(APIC_VERBOSE, "OLD "); + apic_pr_verbose("OLD "); print_mp_irq_info(m); i = get_MP_intsrc_index(m); if (i > 0) { memcpy(m, &mp_irqs[i], sizeof(*m)); - apic_printk(APIC_VERBOSE, "NEW "); + apic_pr_verbose("NEW "); print_mp_irq_info(&mp_irqs[i]); return; } @@ -772,7 +771,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, continue; if (nr_m_spare > 0) { - apic_printk(APIC_VERBOSE, "*NEW* found\n"); + apic_pr_verbose("*NEW* found\n"); nr_m_spare--; memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); m_spare[nr_m_spare] = NULL; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6d3d20e3e43a9b..226472332a70dd 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -798,6 +798,32 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) #define LAM_U57_BITS 6 +static void enable_lam_func(void *__mm) +{ + struct mm_struct *mm = __mm; + unsigned long lam; + + if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { + lam = mm_lam_cr3_mask(mm); + write_cr3(__read_cr3() | lam); + cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); + } +} + +static void mm_enable_lam(struct mm_struct *mm) +{ + mm->context.lam_cr3_mask = X86_CR3_LAM_U57; + mm->context.untag_mask = ~GENMASK(62, 57); + + /* + * Even though the process must still be single-threaded at this + * point, kernel threads may be using the mm. IPI those kernel + * threads if they exist. + */ + on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); + set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); +} + static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) @@ -814,25 +840,21 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) if (mmap_write_lock_killable(mm)) return -EINTR; + /* + * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from + * being enabled unless the process is single threaded: + */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; } - if (!nr_bits) { - mmap_write_unlock(mm); - return -EINVAL; - } else if (nr_bits <= LAM_U57_BITS) { - mm->context.lam_cr3_mask = X86_CR3_LAM_U57; - mm->context.untag_mask = ~GENMASK(62, 57); - } else { + if (!nr_bits || nr_bits > LAM_U57_BITS) { mmap_write_unlock(mm); return -EINVAL; } - write_cr3(__read_cr3() | mm->context.lam_cr3_mask); - set_tlbstate_lam_mode(mm); - set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); + mm_enable_lam(mm); mmap_write_unlock(mm); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0e0a4cf6b5eb17..615922838c510b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -530,7 +530,7 @@ static inline void kb_wait(void) static inline void nmi_shootdown_cpus_on_restart(void); -#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) +#if IS_ENABLED(CONFIG_KVM_X86) /* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; @@ -600,7 +600,7 @@ static void emergency_reboot_disable_virtualization(void) } #else static void emergency_reboot_disable_virtualization(void) { } -#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ +#endif /* CONFIG_KVM_X86 */ void __attribute__((weak)) mach_reboot_fixups(void) { diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 042c9a0334e94b..e9e88c342f752e 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -170,6 +170,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) wbinvd .Lsme_off: + /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */ movq %rcx, %r11 call swap_pages @@ -258,7 +259,7 @@ SYM_CODE_END(virtual_mapped) /* Do the copies */ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) UNWIND_HINT_END_OF_STACK - movq %rdi, %rcx /* Put the page_list in %rcx */ + movq %rdi, %rcx /* Put the indirection_page in %rcx */ xorl %edi, %edi xorl %esi, %esi jmp 1f @@ -289,18 +290,21 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) movq %rcx, %rsi /* For ever source page do a copy */ andq $0xfffffffffffff000, %rsi - movq %rdi, %rdx - movq %rsi, %rax + movq %rdi, %rdx /* Save destination page to %rdx */ + movq %rsi, %rax /* Save source page to %rax */ + /* copy source page to swap page */ movq %r10, %rdi movl $512, %ecx rep ; movsq + /* copy destination page to source page */ movq %rax, %rdi movq %rdx, %rsi movl $512, %ecx rep ; movsq + /* copy swap page to destination page */ movq %rdx, %rdi movq %r10, %rsi movl $512, %ecx diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6129dc2ba784cb..f1fea506e20f41 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1039,7 +1039,12 @@ void __init setup_arch(char **cmdline_p) init_mem_mapping(); - idt_setup_early_pf(); + /* + * init_mem_mapping() relies on the early IDT page fault handling. + * Now either enable FRED or install the real page fault handler + * for 64-bit in the IDT. + */ + cpu_init_replace_early_idt(); /* * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 31b6f5dddfc274..5f441039b5725f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -60,6 +60,24 @@ static inline int is_x32_frame(struct ksignal *ksig) ksig->ka.sa.sa_flags & SA_X32_ABI; } +/* + * Enable all pkeys temporarily, so as to ensure that both the current + * execution stack as well as the alternate signal stack are writeable. + * The application can use any of the available pkeys to protect the + * alternate signal stack, and we don't know which one it is, so enable + * all. The PKRU register will be reset to init_pkru later in the flow, + * in fpu__clear_user_states(), and it is the application's responsibility + * to enable the appropriate pkey as the first step in the signal handler + * so that the handler does not segfault. + */ +static inline u32 sig_prepare_pkru(void) +{ + u32 orig_pkru = read_pkru(); + + write_pkru(0); + return orig_pkru; +} + /* * Set up a signal frame. */ @@ -84,6 +102,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, unsigned long math_size = 0; unsigned long sp = regs->sp; unsigned long buf_fx = 0; + u32 pkru; /* redzone */ if (!ia32_frame) @@ -138,9 +157,17 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, return (void __user *)-1L; } + /* Update PKRU to enable access to the alternate signal stack. */ + pkru = sig_prepare_pkru(); /* save i387 and extended state */ - if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size)) + if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) { + /* + * Restore PKRU to the original, user-defined value; disable + * extra pkeys enabled for the alternate signal stack, if any. + */ + write_pkru(pkru); return (void __user *)-1L; + } return (void __user *)sp; } diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 8a94053c544465..ee9453891901b7 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -260,13 +260,13 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); - if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; - if (restore_signal_shadow_stack()) + if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) goto badframe; - if (restore_altstack(&frame->uc.uc_stack)) + if (restore_signal_shadow_stack()) goto badframe; return regs->ax; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0c35207320cb4f..766f092dab80b3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -246,7 +247,7 @@ static void notrace start_secondary(void *unused) __flush_tlb_all(); } - cpu_init_exception_handling(); + cpu_init_exception_handling(false); /* * Load the microcode before reaching the AP alive synchronization diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 01d7cd85ef9789..87f8c9a71c4964 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -121,7 +121,7 @@ static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) } unsigned long -arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, +arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; @@ -158,7 +158,7 @@ arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned l } unsigned long -arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0, +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { @@ -228,20 +228,5 @@ arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0, * can happen with large stack limits and large mmap() * allocations. */ - return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); -} - -unsigned long -arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); -} - -unsigned long -arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, - const unsigned long len, const unsigned long pgoff, - const unsigned long flags) -{ - return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, flags, 0); + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags, 0); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4fa0b17e5043aa..d05392db5d0fec 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -91,6 +92,47 @@ __always_inline int is_valid_bugaddr(unsigned long addr) return *(unsigned short *)addr == INSN_UD2; } +/* + * Check for UD1 or UD2, accounting for Address Size Override Prefixes. + * If it's a UD1, get the ModRM byte to pass along to UBSan. + */ +__always_inline int decode_bug(unsigned long addr, u32 *imm) +{ + u8 v; + + if (addr < TASK_SIZE_MAX) + return BUG_NONE; + + v = *(u8 *)(addr++); + if (v == INSN_ASOP) + v = *(u8 *)(addr++); + if (v != OPCODE_ESCAPE) + return BUG_NONE; + + v = *(u8 *)(addr++); + if (v == SECOND_BYTE_OPCODE_UD2) + return BUG_UD2; + + if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1) + return BUG_NONE; + + /* Retrieve the immediate (type value) for the UBSAN UD1 */ + v = *(u8 *)(addr++); + if (X86_MODRM_RM(v) == 4) + addr++; + + *imm = 0; + if (X86_MODRM_MOD(v) == 1) + *imm = *(u8 *)addr; + else if (X86_MODRM_MOD(v) == 2) + *imm = *(u32 *)addr; + else + WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v)); + + return BUG_UD1; +} + + static nokprobe_inline int do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, struct pt_regs *regs, long error_code) @@ -216,6 +258,8 @@ static inline void handle_invalid_op(struct pt_regs *regs) static noinstr bool handle_bug(struct pt_regs *regs) { bool handled = false; + int ud_type; + u32 imm; /* * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug() @@ -223,7 +267,8 @@ static noinstr bool handle_bug(struct pt_regs *regs) * irqentry_enter(). */ kmsan_unpoison_entry_regs(regs); - if (!is_valid_bugaddr(regs->ip)) + ud_type = decode_bug(regs->ip, &imm); + if (ud_type == BUG_NONE) return handled; /* @@ -236,10 +281,14 @@ static noinstr bool handle_bug(struct pt_regs *regs) */ if (regs->flags & X86_EFLAGS_IF) raw_local_irq_enable(); - if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || - handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { - regs->ip += LEN_UD2; - handled = true; + if (ud_type == BUG_UD2) { + if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || + handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { + regs->ip += LEN_UD2; + handled = true; + } + } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { + pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip); } if (regs->flags & X86_EFLAGS_IF) raw_local_irq_disable(); @@ -1402,34 +1451,8 @@ DEFINE_IDTENTRY_SW(iret_error) } #endif -/* Do not enable FRED by default yet. */ -static bool enable_fred __ro_after_init = false; - -#ifdef CONFIG_X86_FRED -static int __init fred_setup(char *str) -{ - if (!str) - return -EINVAL; - - if (!cpu_feature_enabled(X86_FEATURE_FRED)) - return 0; - - if (!strcmp(str, "on")) - enable_fred = true; - else if (!strcmp(str, "off")) - enable_fred = false; - else - pr_warn("invalid FRED option: 'fred=%s'\n", str); - return 0; -} -early_param("fred", fred_setup); -#endif - void __init trap_init(void) { - if (cpu_feature_enabled(X86_FEATURE_FRED) && !enable_fred) - setup_clear_cpu_cap(X86_FEATURE_FRED); - /* Init cpu_entry_area before IST entries are set up */ setup_cpu_entry_areas(); @@ -1437,7 +1460,7 @@ void __init trap_init(void) sev_es_init_vc_handling(); /* Initialize TSS before setting up traps so ISTs work */ - cpu_init_exception_handling(); + cpu_init_exception_handling(true); /* Setup traps as cpu_init() might #GP */ if (!cpu_feature_enabled(X86_FEATURE_FRED)) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d4462fb2629969..dfe6847fd99e5e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -28,6 +28,7 @@ #include #include #include +#include #include unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ @@ -1253,15 +1254,12 @@ static void __init check_system_tsc_reliable(void) * - TSC which does not stop in C-States * - the TSC_ADJUST register which allows to detect even minimal * modifications - * - not more than two sockets. As the number of sockets cannot be - * evaluated at the early boot stage where this has to be - * invoked, check the number of online memory nodes as a - * fallback solution which is an reasonable estimate. + * - not more than four packages */ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && boot_cpu_has(X86_FEATURE_TSC_ADJUST) && - nr_online_nodes <= 4) + topology_max_packages() <= 4) tsc_disable_clocksource_watchdog(); } @@ -1290,7 +1288,7 @@ int unsynchronized_tsc(void) */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { /* assume multi socket systems are not synchronized: */ - if (num_possible_cpus() > 1) + if (topology_max_packages() > 1) return 1; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 6e73403e874fc6..6726be89b7a663 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -357,8 +357,7 @@ SECTIONS PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif - RUNTIME_CONST(shift, d_hash_shift) - RUNTIME_CONST(ptr, dentry_hashtable) + RUNTIME_CONST_VARIABLES . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 82b128d3f30970..0a2bbd674a6d99 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 730c2f34d34796..f09f13c01c6bbd 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -17,8 +17,8 @@ menuconfig VIRTUALIZATION if VIRTUALIZATION -config KVM - tristate "Kernel-based Virtual Machine (KVM) support" +config KVM_X86 + def_tristate KVM if KVM_INTEL || KVM_AMD depends on X86_LOCAL_APIC select KVM_COMMON select KVM_GENERIC_MMU_NOTIFIER @@ -44,7 +44,11 @@ config KVM select HAVE_KVM_PM_NOTIFIER if PM select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_PRE_FAULT_MEMORY + select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM select KVM_WERROR if WERROR + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" help Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent @@ -77,7 +81,6 @@ config KVM_SW_PROTECTED_VM bool "Enable support for KVM software-protected VMs" depends on EXPERT depends on KVM && X86_64 - select KVM_GENERIC_PRIVATE_MEM help Enable support for KVM software-protected VMs. Currently, software- protected VMs are purely a development and testing vehicle for diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 5494669a055a6e..f9dddb8cb46698 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -32,7 +32,7 @@ kvm-intel-y += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o kvm-amd-y += svm/svm_onhyperv.o endif -obj-$(CONFIG_KVM) += kvm.o +obj-$(CONFIG_KVM_X86) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 2617be544480aa..41786b834b1635 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -705,7 +705,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) | - F(AMX_COMPLEX) + F(AMX_COMPLEX) | F(AVX10) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX, @@ -721,6 +721,10 @@ void kvm_set_cpu_caps(void) SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA) ); + kvm_cpu_cap_init_kvm_defined(CPUID_24_0_EBX, + F(AVX10_128) | F(AVX10_256) | F(AVX10_512) + ); + kvm_cpu_cap_mask(CPUID_8000_0001_ECX, F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | @@ -949,7 +953,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) switch (function) { case 0: /* Limited to the highest leaf implemented in KVM. */ - entry->eax = min(entry->eax, 0x1fU); + entry->eax = min(entry->eax, 0x24U); break; case 1: cpuid_entry_override(entry, CPUID_1_EDX); @@ -1174,6 +1178,28 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) break; } break; + case 0x24: { + u8 avx10_version; + + if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + + /* + * The AVX10 version is encoded in EBX[7:0]. Note, the version + * is guaranteed to be >=1 if AVX10 is supported. Note #2, the + * version needs to be captured before overriding EBX features! + */ + avx10_version = min_t(u8, entry->ebx & 0xff, 1); + cpuid_entry_override(entry, CPUID_24_0_EBX); + entry->ebx |= avx10_version; + + entry->eax = 0; + entry->ecx = 0; + entry->edx = 0; + break; + } case KVM_CPUID_SIGNATURE: { const u32 *sigptr = (const u32 *)KVM_SIGNATURE; entry->eax = KVM_CPUID_FEATURES; diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 3d7eb11d0e456a..63f66c51975a57 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); * Read pending interrupt(from non-APIC source) * vector and intack. */ -static int kvm_cpu_get_extint(struct kvm_vcpu *v) +int kvm_cpu_get_extint(struct kvm_vcpu *v) { if (!kvm_cpu_has_extint(v)) { WARN_ON(!lapic_in_kernel(v)); @@ -131,6 +131,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) } else return kvm_pic_read_irq(v->kvm); /* PIC */ } +EXPORT_SYMBOL_GPL(kvm_cpu_get_extint); /* * Read pending interrupt vector and intack. @@ -141,9 +142,12 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) if (vector != -1) return vector; /* PIC */ - return kvm_get_apic_interrupt(v); /* APIC */ + vector = kvm_apic_has_interrupt(v); /* APIC */ + if (vector != -1) + kvm_apic_ack_interrupt(v, vector); + + return vector; } -EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5bb481aefcbcdc..2098dc689088bb 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1944,7 +1944,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) u64 ns = 0; ktime_t expire; struct kvm_vcpu *vcpu = apic->vcpu; - unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; + u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz; unsigned long flags; ktime_t now; @@ -2453,6 +2453,43 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); +#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) + +int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) +{ + if (data & X2APIC_ICR_RESERVED_BITS) + return 1; + + /* + * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but + * only AMD requires it to be zero, Intel essentially just ignores the + * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled, + * the CPU performs the reserved bits checks, i.e. the underlying CPU + * behavior will "win". Arbitrarily clear the BUSY bit, as there is no + * sane way to provide consistent behavior with respect to hardware. + */ + data &= ~APIC_ICR_BUSY; + + kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); + if (kvm_x86_ops.x2apic_icr_is_split) { + kvm_lapic_set_reg(apic, APIC_ICR, data); + kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32); + } else { + kvm_lapic_set_reg64(apic, APIC_ICR, data); + } + trace_kvm_apic_write(APIC_ICR, data); + return 0; +} + +static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic) +{ + if (kvm_x86_ops.x2apic_icr_is_split) + return (u64)kvm_lapic_get_reg(apic, APIC_ICR) | + (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32; + + return kvm_lapic_get_reg64(apic, APIC_ICR); +} + /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) { @@ -2470,7 +2507,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) * maybe-unecessary write, and both are in the noise anyways. */ if (apic_x2apic_mode(apic) && offset == APIC_ICR) - kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)); + WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic))); else kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); } @@ -2922,14 +2959,13 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) } } -int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) +void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector) { - int vector = kvm_apic_has_interrupt(vcpu); struct kvm_lapic *apic = vcpu->arch.apic; u32 ppr; - if (vector == -1) - return -1; + if (WARN_ON_ONCE(vector < 0 || !apic)) + return; /* * We get here even with APIC virtualization enabled, if doing @@ -2957,8 +2993,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) __apic_update_ppr(apic, &ppr); } - return vector; } +EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt); static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s, bool set) @@ -2990,18 +3026,22 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, /* * In x2APIC mode, the LDR is fixed and based on the id. And - * ICR is internally a single 64-bit register, but needs to be - * split to ICR+ICR2 in userspace for backwards compatibility. + * if the ICR is _not_ split, ICR is internally a single 64-bit + * register, but needs to be split to ICR+ICR2 in userspace for + * backwards compatibility. */ - if (set) { + if (set) *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id); - icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | - (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; - __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr); - } else { - icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); - __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + if (!kvm_x86_ops.x2apic_icr_is_split) { + if (set) { + icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | + (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; + __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr); + } else { + icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); + __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + } } } @@ -3194,22 +3234,12 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) return 0; } -int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) -{ - data &= ~APIC_ICR_BUSY; - - kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); - kvm_lapic_set_reg64(apic, APIC_ICR, data); - trace_kvm_apic_write(APIC_ICR, data); - return 0; -} - static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) { u32 low; if (reg == APIC_ICR) { - *data = kvm_lapic_get_reg64(apic, APIC_ICR); + *data = kvm_x2apic_icr_read(apic); return 0; } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 7ef8ae73e82d39..1b8ef9856422a4 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -88,15 +88,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu); void kvm_free_lapic(struct kvm_vcpu *vcpu); int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); +void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector); int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); -int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); int kvm_apic_accept_events(struct kvm_vcpu *vcpu); void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event); u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu); void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); -u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); void kvm_recalculate_apic_map(struct kvm *kvm); void kvm_apic_set_version(struct kvm_vcpu *vcpu); void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 4341e0e2857129..9dc5dd43ae7f21 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -223,8 +223,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, bool kvm_mmu_may_ignore_guest_pat(void); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); - int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7813d28b082f2f..a9a23e058555b9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -614,32 +614,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep) return __get_spte_lockless(sptep); } -/* Returns the Accessed status of the PTE and resets it at the same time. */ -static bool mmu_spte_age(u64 *sptep) -{ - u64 spte = mmu_spte_get_lockless(sptep); - - if (!is_accessed_spte(spte)) - return false; - - if (spte_ad_enabled(spte)) { - clear_bit((ffs(shadow_accessed_mask) - 1), - (unsigned long *)sptep); - } else { - /* - * Capture the dirty status of the page, so that it doesn't get - * lost when the SPTE is marked for access tracking. - */ - if (is_writable_pte(spte)) - kvm_set_pfn_dirty(spte_to_pfn(spte)); - - spte = mark_spte_for_access_track(spte); - mmu_spte_update_no_track(sptep, spte); - } - - return true; -} - static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu) { return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; @@ -938,6 +912,7 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct * pte_list_desc containing more mappings. */ +#define KVM_RMAP_MANY BIT(0) /* * Returns the number of pointers in the rmap chain, not counting the new one. @@ -950,16 +925,16 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, if (!rmap_head->val) { rmap_head->val = (unsigned long)spte; - } else if (!(rmap_head->val & 1)) { + } else if (!(rmap_head->val & KVM_RMAP_MANY)) { desc = kvm_mmu_memory_cache_alloc(cache); desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; desc->spte_count = 2; desc->tail_count = 0; - rmap_head->val = (unsigned long)desc | 1; + rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY; ++count; } else { - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); count = desc->tail_count + desc->spte_count; /* @@ -968,10 +943,10 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, */ if (desc->spte_count == PTE_LIST_EXT) { desc = kvm_mmu_memory_cache_alloc(cache); - desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc->more = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); desc->spte_count = 0; desc->tail_count = count; - rmap_head->val = (unsigned long)desc | 1; + rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY; } desc->sptes[desc->spte_count++] = spte; } @@ -982,7 +957,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc, int i) { - struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); int j = head_desc->spte_count - 1; /* @@ -1011,7 +986,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm, if (!head_desc->more) rmap_head->val = 0; else - rmap_head->val = (unsigned long)head_desc->more | 1; + rmap_head->val = (unsigned long)head_desc->more | KVM_RMAP_MANY; mmu_free_pte_list_desc(head_desc); } @@ -1024,13 +999,13 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte, if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm)) return; - if (!(rmap_head->val & 1)) { + if (!(rmap_head->val & KVM_RMAP_MANY)) { if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm)) return; rmap_head->val = 0; } else { - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); while (desc) { for (i = 0; i < desc->spte_count; ++i) { if (desc->sptes[i] == spte) { @@ -1063,12 +1038,12 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm, if (!rmap_head->val) return false; - if (!(rmap_head->val & 1)) { + if (!(rmap_head->val & KVM_RMAP_MANY)) { mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val); goto out; } - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); for (; desc; desc = next) { for (i = 0; i < desc->spte_count; i++) @@ -1088,10 +1063,10 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head) if (!rmap_head->val) return 0; - else if (!(rmap_head->val & 1)) + else if (!(rmap_head->val & KVM_RMAP_MANY)) return 1; - desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); return desc->tail_count + desc->spte_count; } @@ -1153,13 +1128,13 @@ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, if (!rmap_head->val) return NULL; - if (!(rmap_head->val & 1)) { + if (!(rmap_head->val & KVM_RMAP_MANY)) { iter->desc = NULL; sptep = (u64 *)rmap_head->val; goto out; } - iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + iter->desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY); iter->pos = 0; sptep = iter->desc->sptes[iter->pos]; out: @@ -1307,15 +1282,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, return flush; } -/** - * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages - * @kvm: kvm instance - * @slot: slot to protect - * @gfn_offset: start of the BITS_PER_LONG pages we care about - * @mask: indicates which pages we should protect - * - * Used when we do not need to care about huge page mappings. - */ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) @@ -1339,16 +1305,6 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, } } -/** - * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write - * protect the page if the D-bit isn't supported. - * @kvm: kvm instance - * @slot: slot to clear D-bit - * @gfn_offset: start of the BITS_PER_LONG pages we care about - * @mask: indicates which pages we should clear D-bit - * - * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. - */ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) @@ -1372,24 +1328,16 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, } } -/** - * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected - * PT level pages. - * - * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to - * enable dirty logging for them. - * - * We need to care about huge page mappings: e.g. during dirty logging we may - * have such mappings. - */ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) { /* - * Huge pages are NOT write protected when we start dirty logging in - * initially-all-set mode; must write protect them here so that they - * are split to 4K on the first write. + * If the slot was assumed to be "initially all dirty", write-protect + * huge pages to ensure they are split to 4KiB on the first write (KVM + * dirty logs at 4KiB granularity). If eager page splitting is enabled, + * immediately try to split huge pages, e.g. so that vCPUs don't get + * saddled with the cost of splitting. * * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn * of memslot has no such restriction, so the range can cross two large @@ -1411,7 +1359,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, PG_LEVEL_2M); } - /* Now handle 4K PTEs. */ + /* + * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in + * mask. If PML is enabled and the GFN doesn't need to be write- + * protected for other reasons, e.g. shadow paging, clear the Dirty bit. + * Otherwise clear the Writable bit. + * + * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is + * enabled but it chooses between clearing the Dirty bit and Writeable + * bit based on the context. + */ if (kvm_x86_ops.cpu_dirty_log_size) kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask); else @@ -1453,16 +1410,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); } -static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - const struct kvm_memory_slot *slot) -{ - return kvm_zap_all_rmap_sptes(kvm, rmap_head); -} - static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) + const struct kvm_memory_slot *slot) { - return __kvm_zap_rmap(kvm, rmap_head, slot); + return kvm_zap_all_rmap_sptes(kvm, rmap_head); } struct slot_rmap_walk_iterator { @@ -1513,7 +1464,7 @@ static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) { while (++iterator->rmap <= iterator->end_rmap) { - iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); + iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level); if (iterator->rmap->val) return; @@ -1534,23 +1485,71 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) -typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level); +/* The return value indicates if tlb flush on all vcpus is needed. */ +typedef bool (*slot_rmaps_handler) (struct kvm *kvm, + struct kvm_rmap_head *rmap_head, + const struct kvm_memory_slot *slot); -static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, - struct kvm_gfn_range *range, - rmap_handler_t handler) +static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn, + bool can_yield, bool flush_on_yield, + bool flush) { struct slot_rmap_walk_iterator iterator; - bool ret = false; - for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, - range->start, range->end - 1, &iterator) - ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, - iterator.level); + lockdep_assert_held_write(&kvm->mmu_lock); - return ret; + for_each_slot_rmap_range(slot, start_level, end_level, start_gfn, + end_gfn, &iterator) { + if (iterator.rmap) + flush |= fn(kvm, iterator.rmap, slot); + + if (!can_yield) + continue; + + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { + if (flush && flush_on_yield) { + kvm_flush_remote_tlbs_range(kvm, start_gfn, + iterator.gfn - start_gfn + 1); + flush = false; + } + cond_resched_rwlock_write(&kvm->mmu_lock); + } + } + + return flush; +} + +static __always_inline bool walk_slot_rmaps(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + int start_level, int end_level, + bool flush_on_yield) +{ + return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level, + slot->base_gfn, slot->base_gfn + slot->npages - 1, + true, flush_on_yield, false); +} + +static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + bool flush_on_yield) +{ + return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); +} + +static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm, + const struct kvm_memory_slot *slot, + gfn_t start, gfn_t end, bool can_yield, + bool flush) +{ + return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap, + PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + start, end - 1, can_yield, true, flush); } bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) @@ -1558,7 +1557,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) bool flush = false; if (kvm_memslots_have_rmaps(kvm)) - flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap); + flush = __kvm_rmap_zap_gfn_range(kvm, range->slot, + range->start, range->end, + range->may_block, flush); if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); @@ -1570,31 +1571,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return flush; } -static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) -{ - u64 *sptep; - struct rmap_iterator iter; - int young = 0; - - for_each_rmap_spte(rmap_head, &iter, sptep) - young |= mmu_spte_age(sptep); - - return young; -} - -static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) -{ - u64 *sptep; - struct rmap_iterator iter; - - for_each_rmap_spte(rmap_head, &iter, sptep) - if (is_accessed_spte(*sptep)) - return true; - return false; -} - #define RMAP_RECYCLE_THRESHOLD 1000 static void __rmap_add(struct kvm *kvm, @@ -1629,12 +1605,52 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access); } +static bool kvm_rmap_age_gfn_range(struct kvm *kvm, + struct kvm_gfn_range *range, bool test_only) +{ + struct slot_rmap_walk_iterator iterator; + struct rmap_iterator iter; + bool young = false; + u64 *sptep; + + for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + range->start, range->end - 1, &iterator) { + for_each_rmap_spte(iterator.rmap, &iter, sptep) { + u64 spte = *sptep; + + if (!is_accessed_spte(spte)) + continue; + + if (test_only) + return true; + + if (spte_ad_enabled(spte)) { + clear_bit((ffs(shadow_accessed_mask) - 1), + (unsigned long *)sptep); + } else { + /* + * Capture the dirty status of the page, so that + * it doesn't get lost when the SPTE is marked + * for access tracking. + */ + if (is_writable_pte(spte)) + kvm_set_pfn_dirty(spte_to_pfn(spte)); + + spte = mark_spte_for_access_track(spte); + mmu_spte_update_no_track(sptep, spte); + } + young = true; + } + } + return young; +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { bool young = false; if (kvm_memslots_have_rmaps(kvm)) - young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap); + young = kvm_rmap_age_gfn_range(kvm, range, false); if (tdp_mmu_enabled) young |= kvm_tdp_mmu_age_gfn_range(kvm, range); @@ -1647,7 +1663,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool young = false; if (kvm_memslots_have_rmaps(kvm)) - young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap); + young = kvm_rmap_age_gfn_range(kvm, range, true); if (tdp_mmu_enabled) young |= kvm_tdp_mmu_test_age_gfn(kvm, range); @@ -1868,10 +1884,14 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp) if (is_obsolete_sp((_kvm), (_sp))) { \ } else -#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ +#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ for_each_valid_sp(_kvm, _sp, \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ - if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else + if ((_sp)->gfn != (_gfn)) {} else + +#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ + for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ + if (!sp_has_gptes(_sp)) {} else static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -2713,36 +2733,49 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) write_unlock(&kvm->mmu_lock); } -int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) +bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + bool always_retry) { - struct kvm_mmu_page *sp; + struct kvm *kvm = vcpu->kvm; LIST_HEAD(invalid_list); - int r; + struct kvm_mmu_page *sp; + gpa_t gpa = cr2_or_gpa; + bool r = false; + + /* + * Bail early if there aren't any write-protected shadow pages to avoid + * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked + * by a third party. Reading indirect_shadow_pages without holding + * mmu_lock is safe, as this is purely an optimization, i.e. a false + * positive is benign, and a false negative will simply result in KVM + * skipping the unprotect+retry path, which is also an optimization. + */ + if (!READ_ONCE(kvm->arch.indirect_shadow_pages)) + goto out; + + if (!vcpu->arch.mmu->root_role.direct) { + gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); + if (gpa == INVALID_GPA) + goto out; + } - r = 0; write_lock(&kvm->mmu_lock); - for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { - r = 1; + for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa)) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); - } + + /* + * Snapshot the result before zapping, as zapping will remove all list + * entries, i.e. checking the list later would yield a false negative. + */ + r = !list_empty(&invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list); write_unlock(&kvm->mmu_lock); - return r; -} - -static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) -{ - gpa_t gpa; - int r; - - if (vcpu->arch.mmu->root_role.direct) - return 0; - - gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); - - r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); - +out: + if (r || always_retry) { + vcpu->arch.last_retry_eip = kvm_rip_read(vcpu); + vcpu->arch.last_retry_addr = cr2_or_gpa; + } return r; } @@ -2914,10 +2947,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, trace_kvm_mmu_set_spte(level, gfn, sptep); } - if (wrprot) { - if (write_fault) - ret = RET_PF_EMULATE; - } + if (wrprot && write_fault) + ret = RET_PF_WRITE_PROTECTED; if (flush) kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); @@ -4549,7 +4580,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return RET_PF_RETRY; if (page_fault_handle_page_track(vcpu, fault)) - return RET_PF_EMULATE; + return RET_PF_WRITE_PROTECTED; r = fast_page_fault(vcpu, fault); if (r != RET_PF_INVALID) @@ -4618,8 +4649,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, if (!flags) { trace_kvm_page_fault(vcpu, fault_address, error_code); - if (kvm_event_needs_reinjection(vcpu)) - kvm_mmu_unprotect_page_virt(vcpu, fault_address); r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, insn_len); } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { @@ -4642,7 +4671,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, int r; if (page_fault_handle_page_track(vcpu, fault)) - return RET_PF_EMULATE; + return RET_PF_WRITE_PROTECTED; r = fast_page_fault(vcpu, fault); if (r != RET_PF_INVALID) @@ -4719,6 +4748,7 @@ static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, switch (r) { case RET_PF_FIXED: case RET_PF_SPURIOUS: + case RET_PF_WRITE_PROTECTED: return 0; case RET_PF_EMULATE: @@ -5963,6 +5993,106 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, write_unlock(&vcpu->kvm->mmu_lock); } +static bool is_write_to_guest_page_table(u64 error_code) +{ + const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK; + + return (error_code & mask) == mask; +} + +static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + u64 error_code, int *emulation_type) +{ + bool direct = vcpu->arch.mmu->root_role.direct; + + /* + * Do not try to unprotect and retry if the vCPU re-faulted on the same + * RIP with the same address that was previously unprotected, as doing + * so will likely put the vCPU into an infinite. E.g. if the vCPU uses + * a non-page-table modifying instruction on the PDE that points to the + * instruction, then unprotecting the gfn will unmap the instruction's + * code, i.e. make it impossible for the instruction to ever complete. + */ + if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) && + vcpu->arch.last_retry_addr == cr2_or_gpa) + return RET_PF_EMULATE; + + /* + * Reset the unprotect+retry values that guard against infinite loops. + * The values will be refreshed if KVM explicitly unprotects a gfn and + * retries, in all other cases it's safe to retry in the future even if + * the next page fault happens on the same RIP+address. + */ + vcpu->arch.last_retry_eip = 0; + vcpu->arch.last_retry_addr = 0; + + /* + * It should be impossible to reach this point with an MMIO cache hit, + * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid, + * writable memslot, and creating a memslot should invalidate the MMIO + * cache by way of changing the memslot generation. WARN and disallow + * retry if MMIO is detected, as retrying MMIO emulation is pointless + * and could put the vCPU into an infinite loop because the processor + * will keep faulting on the non-existent MMIO address. + */ + if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct))) + return RET_PF_EMULATE; + + /* + * Before emulating the instruction, check to see if the access was due + * to a read-only violation while the CPU was walking non-nested NPT + * page tables, i.e. for a direct MMU, for _guest_ page tables in L1. + * If L1 is sharing (a subset of) its page tables with L2, e.g. by + * having nCR3 share lower level page tables with hCR3, then when KVM + * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also + * unknowingly write-protecting L1's guest page tables, which KVM isn't + * shadowing. + * + * Because the CPU (by default) walks NPT page tables using a write + * access (to ensure the CPU can do A/D updates), page walks in L1 can + * trigger write faults for the above case even when L1 isn't modifying + * PTEs. As a result, KVM will unnecessarily emulate (or at least, try + * to emulate) an excessive number of L1 instructions; because L1's MMU + * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs + * and thus no need to emulate in order to guarantee forward progress. + * + * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can + * proceed without triggering emulation. If one or more shadow pages + * was zapped, skip emulation and resume L1 to let it natively execute + * the instruction. If no shadow pages were zapped, then the write- + * fault is due to something else entirely, i.e. KVM needs to emulate, + * as resuming the guest will put it into an infinite loop. + * + * Note, this code also applies to Intel CPUs, even though it is *very* + * unlikely that an L1 will share its page tables (IA32/PAE/paging64 + * format) with L2's page tables (EPT format). + * + * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to + * unprotect the gfn and retry if an event is awaiting reinjection. If + * KVM emulates multiple instructions before completing event injection, + * the event could be delayed beyond what is architecturally allowed, + * e.g. KVM could inject an IRQ after the TPR has been raised. + */ + if (((direct && is_write_to_guest_page_table(error_code)) || + (!direct && kvm_event_needs_reinjection(vcpu))) && + kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa)) + return RET_PF_RETRY; + + /* + * The gfn is write-protected, but if KVM detects its emulating an + * instruction that is unlikely to be used to modify page tables, or if + * emulation fails, KVM can try to unprotect the gfn and let the CPU + * re-execute the instruction that caused the page fault. Do not allow + * retrying an instruction from a nested guest as KVM is only explicitly + * shadowing L1's page tables, i.e. unprotecting something for L1 isn't + * going to magically fix whatever issue caused L2 to fail. + */ + if (!is_guest_mode(vcpu)) + *emulation_type |= EMULTYPE_ALLOW_RETRY_PF; + + return RET_PF_EMULATE; +} + int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len) { @@ -6008,6 +6138,10 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err if (r < 0) return r; + if (r == RET_PF_WRITE_PROTECTED) + r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code, + &emulation_type); + if (r == RET_PF_FIXED) vcpu->stat.pf_fixed++; else if (r == RET_PF_EMULATE) @@ -6018,32 +6152,6 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err if (r != RET_PF_EMULATE) return 1; - /* - * Before emulating the instruction, check if the error code - * was due to a RO violation while translating the guest page. - * This can occur when using nested virtualization with nested - * paging in both guests. If true, we simply unprotect the page - * and resume the guest. - */ - if (vcpu->arch.mmu->root_role.direct && - (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); - return 1; - } - - /* - * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still - * optimistically try to just unprotect the page and let the processor - * re-execute the instruction that caused the page fault. Do not allow - * retrying MMIO emulation, as it's not only pointless but could also - * cause us to enter an infinite loop because the processor will keep - * faulting on the non-existent MMIO address. Retrying an instruction - * from a nested guest is also pointless and dangerous as we are only - * explicitly shadowing L1's page tables, i.e. unprotecting something - * for L1 isn't going to magically fix whatever issue cause L2 to fail. - */ - if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) - emulation_type |= EMULTYPE_ALLOW_RETRY_PF; emulate: return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len); @@ -6202,59 +6310,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, } EXPORT_SYMBOL_GPL(kvm_configure_mmu); -/* The return value indicates if tlb flush on all vcpus is needed. */ -typedef bool (*slot_rmaps_handler) (struct kvm *kvm, - struct kvm_rmap_head *rmap_head, - const struct kvm_memory_slot *slot); - -static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, - const struct kvm_memory_slot *slot, - slot_rmaps_handler fn, - int start_level, int end_level, - gfn_t start_gfn, gfn_t end_gfn, - bool flush_on_yield, bool flush) -{ - struct slot_rmap_walk_iterator iterator; - - lockdep_assert_held_write(&kvm->mmu_lock); - - for_each_slot_rmap_range(slot, start_level, end_level, start_gfn, - end_gfn, &iterator) { - if (iterator.rmap) - flush |= fn(kvm, iterator.rmap, slot); - - if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { - if (flush && flush_on_yield) { - kvm_flush_remote_tlbs_range(kvm, start_gfn, - iterator.gfn - start_gfn + 1); - flush = false; - } - cond_resched_rwlock_write(&kvm->mmu_lock); - } - } - - return flush; -} - -static __always_inline bool walk_slot_rmaps(struct kvm *kvm, - const struct kvm_memory_slot *slot, - slot_rmaps_handler fn, - int start_level, int end_level, - bool flush_on_yield) -{ - return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level, - slot->base_gfn, slot->base_gfn + slot->npages - 1, - flush_on_yield, false); -} - -static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, - const struct kvm_memory_slot *slot, - slot_rmaps_handler fn, - bool flush_on_yield) -{ - return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); -} - static void free_mmu_pages(struct kvm_mmu *mmu) { if (!tdp_enabled && mmu->pae_root) @@ -6528,9 +6583,8 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e if (WARN_ON_ONCE(start >= end)) continue; - flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap, - PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, - start, end - 1, true, flush); + flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start, + end, true, flush); } } @@ -6818,7 +6872,7 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, */ for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages, - level, level, start, end - 1, true, false); + level, level, start, end - 1, true, true, false); } /* Must be called with the mmu_lock held in write-mode. */ @@ -6997,10 +7051,70 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) kvm_mmu_zap_all(kvm); } +static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm, + struct kvm_memory_slot *slot, + bool flush) +{ + LIST_HEAD(invalid_list); + unsigned long i; + + if (list_empty(&kvm->arch.active_mmu_pages)) + goto out_flush; + + /* + * Since accounting information is stored in struct kvm_arch_memory_slot, + * shadow pages deletion (e.g. unaccount_shadowed()) requires that all + * gfns with a shadow page have a corresponding memslot. Do so before + * the memslot goes away. + */ + for (i = 0; i < slot->npages; i++) { + struct kvm_mmu_page *sp; + gfn_t gfn = slot->base_gfn + i; + + for_each_gfn_valid_sp(kvm, sp, gfn) + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); + flush = false; + cond_resched_rwlock_write(&kvm->mmu_lock); + } + } + +out_flush: + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); +} + +static void kvm_mmu_zap_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + struct kvm_gfn_range range = { + .slot = slot, + .start = slot->base_gfn, + .end = slot->base_gfn + slot->npages, + .may_block = true, + }; + bool flush; + + write_lock(&kvm->mmu_lock); + flush = kvm_unmap_gfn_range(kvm, &range); + kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush); + write_unlock(&kvm->mmu_lock); +} + +static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm) +{ + return kvm->arch.vm_type == KVM_X86_DEFAULT_VM && + kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL); +} + void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { - kvm_mmu_zap_all_fast(kvm); + if (kvm_memslot_flush_zap_all(kvm)) + kvm_mmu_zap_all_fast(kvm); + else + kvm_mmu_zap_memslot(kvm, slot); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 1721d97743e99c..c98827840e07ab 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); * RET_PF_CONTINUE: So far, so good, keep handling the page fault. * RET_PF_RETRY: let CPU fault again on the address. * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. + * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the + * gfn and retry, or emulate the instruction directly. * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. * RET_PF_FIXED: The faulting entry has been fixed. * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. @@ -274,6 +276,7 @@ enum { RET_PF_CONTINUE = 0, RET_PF_RETRY, RET_PF_EMULATE, + RET_PF_WRITE_PROTECTED, RET_PF_INVALID, RET_PF_FIXED, RET_PF_SPURIOUS, @@ -349,8 +352,6 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); -void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); - void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h index 195d98bc8de85e..f35a830ce469b5 100644 --- a/arch/x86/kvm/mmu/mmutrace.h +++ b/arch/x86/kvm/mmu/mmutrace.h @@ -57,6 +57,7 @@ TRACE_DEFINE_ENUM(RET_PF_CONTINUE); TRACE_DEFINE_ENUM(RET_PF_RETRY); TRACE_DEFINE_ENUM(RET_PF_EMULATE); +TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED); TRACE_DEFINE_ENUM(RET_PF_INVALID); TRACE_DEFINE_ENUM(RET_PF_FIXED); TRACE_DEFINE_ENUM(RET_PF_SPURIOUS); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 69941cebb3a87e..ae7d39ff2d07f0 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, * really care if it changes underneath us after this point). */ if (FNAME(gpte_changed)(vcpu, gw, top_level)) - goto out_gpte_changed; + return RET_PF_RETRY; if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) - goto out_gpte_changed; + return RET_PF_RETRY; /* * Load a new root and retry the faulting instruction in the extremely @@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, */ if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) { kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu); - goto out_gpte_changed; + return RET_PF_RETRY; } for_each_shadow_entry(vcpu, fault->addr, it) { @@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn, false, access); - if (sp != ERR_PTR(-EEXIST)) { - /* - * We must synchronize the pagetable before linking it - * because the guest doesn't need to flush tlb when - * the gpte is changed from non-present to present. - * Otherwise, the guest may use the wrong mapping. - * - * For PG_LEVEL_4K, kvm_mmu_get_page() has already - * synchronized it transiently via kvm_sync_page(). - * - * For higher level pagetable, we synchronize it via - * the slower mmu_sync_children(). If it needs to - * break, some progress has been made; return - * RET_PF_RETRY and retry on the next #PF. - * KVM_REQ_MMU_SYNC is not necessary but it - * expedites the process. - */ - if (sp->unsync_children && - mmu_sync_children(vcpu, sp, false)) - return RET_PF_RETRY; - } + /* + * Synchronize the new page before linking it, as the CPU (KVM) + * is architecturally disallowed from inserting non-present + * entries into the TLB, i.e. the guest isn't required to flush + * the TLB when changing the gPTE from non-present to present. + * + * For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already + * synchronized the page via kvm_sync_page(). + * + * For higher level pages, which cannot be unsync themselves + * but can have unsync children, synchronize via the slower + * mmu_sync_children(). If KVM needs to drop mmu_lock due to + * contention or to reschedule, instruct the caller to retry + * the #PF (mmu_sync_children() ensures forward progress will + * be made). + */ + if (sp != ERR_PTR(-EEXIST) && sp->unsync_children && + mmu_sync_children(vcpu, sp, false)) + return RET_PF_RETRY; /* - * Verify that the gpte in the page we've just write - * protected is still there. + * Verify that the gpte in the page, which is now either + * write-protected or unsync, wasn't modified between the fault + * and acquiring mmu_lock. This needs to be done even when + * reusing an existing shadow page to ensure the information + * gathered by the walker matches the information stored in the + * shadow page (which could have been modified by a different + * vCPU even if the page was already linked). Holding mmu_lock + * prevents the shadow page from changing after this point. */ if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) - goto out_gpte_changed; + return RET_PF_RETRY; if (sp != ERR_PTR(-EEXIST)) link_shadow_page(vcpu, it.sptep, sp); @@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, FNAME(pte_prefetch)(vcpu, gw, it.sptep); return ret; - -out_gpte_changed: - return RET_PF_RETRY; } /* @@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (page_fault_handle_page_track(vcpu, fault)) { shadow_page_table_clear_flood(vcpu, fault->addr); - return RET_PF_EMULATE; + return RET_PF_WRITE_PROTECTED; } r = mmu_topup_memory_caches(vcpu, true); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3c55955bcaf8ca..3b996c1fdaabc0 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, * protected, emulation is needed. If the emulation was skipped, * the vCPU would have the same fault again. */ - if (wrprot) { - if (fault->write) - ret = RET_PF_EMULATE; - } + if (wrprot && fault->write) + ret = RET_PF_WRITE_PROTECTED; /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */ if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) { diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 2f4e155080badc..0d17d6b7063964 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -17,6 +17,7 @@ enum kvm_only_cpuid_leafs { CPUID_8000_0007_EDX, CPUID_8000_0022_EAX, CPUID_7_2_EDX, + CPUID_24_0_EBX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, @@ -46,6 +47,7 @@ enum kvm_only_cpuid_leafs { #define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5) #define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8) #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) +#define X86_FEATURE_AVX10 KVM_X86_FEATURE(CPUID_7_1_EDX, 19) /* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */ #define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0) @@ -55,6 +57,11 @@ enum kvm_only_cpuid_leafs { #define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4) #define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5) +/* Intel-defined sub-features, CPUID level 0x00000024:0 (EBX) */ +#define X86_FEATURE_AVX10_128 KVM_X86_FEATURE(CPUID_24_0_EBX, 16) +#define X86_FEATURE_AVX10_256 KVM_X86_FEATURE(CPUID_24_0_EBX, 17) +#define X86_FEATURE_AVX10_512 KVM_X86_FEATURE(CPUID_24_0_EBX, 18) + /* CPUID level 0x80000007 (EDX). */ #define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8) @@ -90,6 +97,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, + [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, }; /* diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index 00e3c27d2a876d..85241c0c7f569b 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -624,17 +624,31 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) #endif /* - * Give leave_smm() a chance to make ISA-specific changes to the vCPU - * state (e.g. enter guest mode) before loading state from the SMM - * state-save area. + * FIXME: When resuming L2 (a.k.a. guest mode), the transition to guest + * mode should happen _after_ loading state from SMRAM. However, KVM + * piggybacks the nested VM-Enter flows (which is wrong for many other + * reasons), and so nSVM/nVMX would clobber state that is loaded from + * SMRAM and from the VMCS/VMCB. */ if (kvm_x86_call(leave_smm)(vcpu, &smram)) return X86EMUL_UNHANDLEABLE; #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - return rsm_load_state_64(ctxt, &smram.smram64); + ret = rsm_load_state_64(ctxt, &smram.smram64); else #endif - return rsm_load_state_32(ctxt, &smram.smram32); + ret = rsm_load_state_32(ctxt, &smram.smram32); + + /* + * If RSM fails and triggers shutdown, architecturally the shutdown + * occurs *before* the transition to guest mode. But due to KVM's + * flawed handling of RSM to L2 (see above), the vCPU may already be + * in_guest_mode(). Force the vCPU out of guest mode before delivering + * the shutdown, so that L1 enters shutdown instead of seeing a VM-Exit + * that architecturally shouldn't be possible. + */ + if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu)) + kvm_leave_nested(vcpu); + return ret; } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 6f704c1037e514..d5314cb7dff4ca 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; ret = -ENOMEM; - ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); - save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + save = kzalloc(sizeof(*save), GFP_KERNEL); if (!ctl || !save) goto out_free; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 714c517dd4b72b..0b851ef937f297 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -534,10 +534,10 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error) int ret; f = fdget(fd); - if (!f.file) + if (!fd_file(f)) return -EBADF; - ret = sev_issue_cmd_external_user(f.file, id, data, error); + ret = sev_issue_cmd_external_user(fd_file(f), id, data, error); fdput(f); return ret; @@ -2078,15 +2078,15 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) bool charged = false; int ret; - if (!f.file) + if (!fd_file(f)) return -EBADF; - if (!file_is_kvm(f.file)) { + if (!file_is_kvm(fd_file(f))) { ret = -EBADF; goto out_fput; } - source_kvm = f.file->private_data; + source_kvm = fd_file(f)->private_data; ret = sev_lock_two_vms(kvm, source_kvm); if (ret) goto out_fput; @@ -2803,15 +2803,15 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd) struct kvm_sev_info *source_sev, *mirror_sev; int ret; - if (!f.file) + if (!fd_file(f)) return -EBADF; - if (!file_is_kvm(f.file)) { + if (!file_is_kvm(fd_file(f))) { ret = -EBADF; goto e_source_fput; } - source_kvm = f.file->private_data; + source_kvm = fd_file(f)->private_data; ret = sev_lock_two_vms(kvm, source_kvm); if (ret) goto e_source_fput; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5ab2c92c7331de..9df3e1e5ae81a1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier) static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd) { - return page_address(sd->save_area) + 0x400; + return &sd->save_area->host_sev_es_save; } static inline void kvm_cpu_svm_disable(void) @@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void) } } -static void svm_emergency_disable(void) +static void svm_emergency_disable_virtualization_cpu(void) { kvm_rebooting = true; kvm_cpu_svm_disable(); } -static void svm_hardware_disable(void) +static void svm_disable_virtualization_cpu(void) { /* Make sure we clean up behind us */ if (tsc_scaling) @@ -610,7 +610,7 @@ static void svm_hardware_disable(void) amd_pmu_disable_virt(); } -static int svm_hardware_enable(void) +static int svm_enable_virtualization_cpu(void) { struct svm_cpu_data *sd; @@ -696,7 +696,7 @@ static void svm_cpu_uninit(int cpu) return; kfree(sd->sev_vmcbs); - __free_page(sd->save_area); + __free_page(__sme_pa_to_page(sd->save_area_pa)); sd->save_area_pa = 0; sd->save_area = NULL; } @@ -704,23 +704,24 @@ static void svm_cpu_uninit(int cpu) static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); + struct page *save_area_page; int ret = -ENOMEM; memset(sd, 0, sizeof(struct svm_cpu_data)); - sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); - if (!sd->save_area) + save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); + if (!save_area_page) return ret; ret = sev_cpu_init(sd); if (ret) goto free_save_area; - sd->save_area_pa = __sme_page_pa(sd->save_area); + sd->save_area = page_address(save_area_page); + sd->save_area_pa = __sme_page_pa(save_area_page); return 0; free_save_area: - __free_page(sd->save_area); - sd->save_area = NULL; + __free_page(save_area_page); return ret; } @@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void) for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); - __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), - get_order(IOPM_SIZE)); + __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE)); iopm_base = 0; } @@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu) if (!kvm_hlt_in_guest(vcpu->kvm)) svm_set_intercept(svm, INTERCEPT_HLT); - control->iopm_base_pa = __sme_set(iopm_base); + control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); control->int_ctl = V_INTR_MASKING_MASK; @@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) sev_free_vcpu(vcpu); - __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); + __free_page(__sme_pa_to_page(svm->vmcb01.pa)); __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); } @@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) * TSC_AUX is always virtualized for SEV-ES guests when the feature is * available. The user return MSR support is not required in this case * because TSC_AUX is restored on #VMEXIT from the host save area - * (which has been initialized in svm_hardware_enable()). + * (which has been initialized in svm_enable_virtualization_cpu()). */ if (likely(tsc_aux_uret_slot >= 0) && (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) @@ -2825,17 +2825,17 @@ static int efer_trap(struct kvm_vcpu *vcpu) return kvm_complete_insn_gp(vcpu, ret); } -static int svm_get_msr_feature(struct kvm_msr_entry *msr) +static int svm_get_feature_msr(u32 msr, u64 *data) { - msr->data = 0; + *data = 0; - switch (msr->index) { + switch (msr) { case MSR_AMD64_DE_CFG: if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) - msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; + *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; break; default: - return KVM_MSR_RET_INVALID; + return KVM_MSR_RET_UNSUPPORTED; } return 0; @@ -3144,7 +3144,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * feature is available. The user return MSR support is not * required in this case because TSC_AUX is restored on #VMEXIT * from the host save area (which has been initialized in - * svm_hardware_enable()). + * svm_enable_virtualization_cpu()). */ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm)) break; @@ -3191,18 +3191,21 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) kvm_pr_unimpl_wrmsr(vcpu, ecx, data); break; case MSR_AMD64_DE_CFG: { - struct kvm_msr_entry msr_entry; + u64 supported_de_cfg; - msr_entry.index = msr->index; - if (svm_get_msr_feature(&msr_entry)) + if (svm_get_feature_msr(ecx, &supported_de_cfg)) return 1; - /* Check the supported bits */ - if (data & ~msr_entry.data) + if (data & ~supported_de_cfg) return 1; - /* Don't allow the guest to change a bit, #GP */ - if (!msr->host_initiated && (data ^ msr_entry.data)) + /* + * Don't let the guest change the host-programmed value. The + * MSR is very model specific, i.e. contains multiple bits that + * are completely unknown to KVM, and the one bit known to KVM + * is simply a reflection of hardware capabilities. + */ + if (!msr->host_initiated && data != svm->msr_decfg) return 1; svm->msr_decfg = data; @@ -4156,12 +4159,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); + if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; - if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && - to_svm(vcpu)->vmcb->control.exit_info_1) + switch (svm->vmcb->control.exit_code) { + case SVM_EXIT_MSR: + if (!svm->vmcb->control.exit_info_1) + break; return handle_fastpath_set_msr_irqoff(vcpu); + case SVM_EXIT_HLT: + return handle_fastpath_hlt(vcpu); + default: + break; + } return EXIT_FASTPATH_NONE; } @@ -4992,8 +5004,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .check_processor_compatibility = svm_check_processor_compat, .hardware_unsetup = svm_hardware_unsetup, - .hardware_enable = svm_hardware_enable, - .hardware_disable = svm_hardware_disable, + .enable_virtualization_cpu = svm_enable_virtualization_cpu, + .disable_virtualization_cpu = svm_disable_virtualization_cpu, + .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu, .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_vcpu_create, @@ -5011,7 +5024,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_unblocking = avic_vcpu_unblocking, .update_exception_bitmap = svm_update_exception_bitmap, - .get_msr_feature = svm_get_msr_feature, + .get_feature_msr = svm_get_feature_msr, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, @@ -5062,6 +5075,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .enable_nmi_window = svm_enable_nmi_window, .enable_irq_window = svm_enable_irq_window, .update_cr8_intercept = svm_update_cr8_intercept, + + .x2apic_icr_is_split = true, .set_virtual_apic_mode = avic_refresh_virtual_apic_mode, .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl, .apicv_post_state_restore = avic_apicv_post_state_restore, @@ -5266,7 +5281,7 @@ static __init int svm_hardware_setup(void) iopm_va = page_address(iopm_pages); memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); - iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; + iopm_base = __sme_page_pa(iopm_pages); init_msrpm_offsets(); @@ -5425,8 +5440,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static void __svm_exit(void) { kvm_x86_vendor_exit(); - - cpu_emergency_unregister_virt_callback(svm_emergency_disable); } static int __init svm_init(void) @@ -5442,8 +5455,6 @@ static int __init svm_init(void) if (r) return r; - cpu_emergency_register_virt_callback(svm_emergency_disable); - /* * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 76107c7d0595d7..43fa6a16eb1917 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -25,7 +25,21 @@ #include "cpuid.h" #include "kvm_cache_regs.h" -#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) +/* + * Helpers to convert to/from physical addresses for pages whose address is + * consumed directly by hardware. Even though it's a physical address, SVM + * often restricts the address to the natural width, hence 'unsigned long' + * instead of 'hpa_t'. + */ +static inline unsigned long __sme_page_pa(struct page *page) +{ + return __sme_set(page_to_pfn(page) << PAGE_SHIFT); +} + +static inline struct page *__sme_pa_to_page(unsigned long pa) +{ + return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT); +} #define IOPM_SIZE PAGE_SIZE * 3 #define MSRPM_SIZE PAGE_SIZE * 2 @@ -321,7 +335,7 @@ struct svm_cpu_data { u32 next_asid; u32 min_asid; - struct page *save_area; + struct vmcb *save_area; unsigned long save_area_pa; struct vmcb *current_vmcb; diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index a0c8eb37d3e1c6..2ed80aea3bb130 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run) 7: vmload %_ASM_AX 8: -#ifdef CONFIG_MITIGATION_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ - FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE -#endif + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT /* Clobbers RAX, RCX, RDX. */ RESTORE_HOST_SPEC_CTRL @@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) 2: cli -#ifdef CONFIG_MITIGATION_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ - FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE -#endif + FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */ RESTORE_HOST_SPEC_CTRL diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f998974..cb6588238f4667 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -54,9 +54,7 @@ struct nested_vmx_msrs { }; struct vmcs_config { - int size; - u32 basic_cap; - u32 revision_id; + u64 basic; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; @@ -76,7 +74,7 @@ extern struct vmx_capability vmx_capability __ro_after_init; static inline bool cpu_has_vmx_basic_inout(void) { - return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); + return vmcs_config.basic & VMX_BASIC_INOUT; } static inline bool cpu_has_virtual_nmis(void) @@ -225,7 +223,7 @@ static inline bool cpu_has_vmx_vmfunc(void) static inline bool cpu_has_vmx_shadow_vmcs(void) { /* check if the cpu supports writing r/o exit information fields */ - if (!(vmcs_config.misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) + if (!(vmcs_config.misc & VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -367,7 +365,7 @@ static inline bool cpu_has_vmx_invvpid_global(void) static inline bool cpu_has_vmx_intel_pt(void) { - return (vmcs_config.misc & MSR_IA32_VMX_MISC_INTEL_PT) && + return (vmcs_config.misc & VMX_MISC_INTEL_PT) && (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL); } diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0bf35ebe8a1bdb..7668e2fb8043ef 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -23,8 +23,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .hardware_unsetup = vmx_hardware_unsetup, - .hardware_enable = vmx_hardware_enable, - .hardware_disable = vmx_hardware_disable, + .enable_virtualization_cpu = vmx_enable_virtualization_cpu, + .disable_virtualization_cpu = vmx_disable_virtualization_cpu, + .emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu, + .has_emulated_msr = vmx_has_emulated_msr, .vm_size = sizeof(struct kvm_vmx), @@ -41,7 +43,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .vcpu_put = vmx_vcpu_put, .update_exception_bitmap = vmx_update_exception_bitmap, - .get_msr_feature = vmx_get_msr_feature, + .get_feature_msr = vmx_get_feature_msr, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, @@ -89,6 +91,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .enable_nmi_window = vmx_enable_nmi_window, .enable_irq_window = vmx_enable_irq_window, .update_cr8_intercept = vmx_update_cr8_intercept, + + .x2apic_icr_is_split = false, .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 2392a7ef254df2..a8e7bc04d9bf36 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -981,7 +981,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) __func__, i, e.index, e.reserved); goto fail; } - if (kvm_set_msr(vcpu, e.index, e.value)) { + if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) { pr_debug_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, e.value); @@ -1017,7 +1017,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, } } - if (kvm_get_msr(vcpu, msr_index, data)) { + if (kvm_get_msr_with_filter(vcpu, msr_index, data)) { pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, msr_index); return false; @@ -1112,9 +1112,9 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, /* * Emulated VMEntry does not fail here. Instead a less * accurate value will be returned by - * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() - * instead of reading the value from the vmcs02 VMExit - * MSR-store area. + * nested_vmx_get_vmexit_msr_value() by reading KVM's + * internal MSR state instead of reading the value from + * the vmcs02 VMExit MSR-store area. */ pr_warn_ratelimited( "Not enough msr entries in msr_autostore. Can't add msr %x\n", @@ -1251,21 +1251,32 @@ static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) { - const u64 feature_and_reserved = - /* feature (except bit 48; see below) */ - BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | - /* reserved */ - BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); + const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT | + VMX_BASIC_INOUT | + VMX_BASIC_TRUE_CTLS; + + const u64 reserved_bits = GENMASK_ULL(63, 56) | + GENMASK_ULL(47, 45) | + BIT_ULL(31); + u64 vmx_basic = vmcs_config.nested.basic; - if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) + BUILD_BUG_ON(feature_bits & reserved_bits); + + /* + * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has + * inverted polarity), the incoming value must not set feature bits or + * reserved bits that aren't allowed/supported by KVM. Fields, i.e. + * multi-bit values, are explicitly checked below. + */ + if (!is_bitwise_subset(vmx_basic, data, feature_bits | reserved_bits)) return -EINVAL; /* * KVM does not emulate a version of VMX that constrains physical * addresses of VMX structures (e.g. VMCS) to 32-bits. */ - if (data & BIT_ULL(48)) + if (data & VMX_BASIC_32BIT_PHYS_ADDR_ONLY) return -EINVAL; if (vmx_basic_vmcs_revision_id(vmx_basic) != @@ -1334,16 +1345,29 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) { - const u64 feature_and_reserved_bits = - /* feature */ - BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | - BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | - /* reserved */ - GENMASK_ULL(13, 9) | BIT_ULL(31); + const u64 feature_bits = VMX_MISC_SAVE_EFER_LMA | + VMX_MISC_ACTIVITY_HLT | + VMX_MISC_ACTIVITY_SHUTDOWN | + VMX_MISC_ACTIVITY_WAIT_SIPI | + VMX_MISC_INTEL_PT | + VMX_MISC_RDMSR_IN_SMM | + VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | + VMX_MISC_VMXOFF_BLOCK_SMI | + VMX_MISC_ZERO_LEN_INS; + + const u64 reserved_bits = BIT_ULL(31) | GENMASK_ULL(13, 9); + u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, vmcs_config.nested.misc_high); - if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) + BUILD_BUG_ON(feature_bits & reserved_bits); + + /* + * The incoming value must not set feature bits or reserved bits that + * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are + * explicitly checked below. + */ + if (!is_bitwise_subset(vmx_misc, data, feature_bits | reserved_bits)) return -EINVAL; if ((vmx->nested.msrs.pinbased_ctls_high & @@ -2317,10 +2341,12 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 /* Posted interrupts setting is only taken from vmcs12. */ vmx->nested.pi_pending = false; - if (nested_cpu_has_posted_intr(vmcs12)) + if (nested_cpu_has_posted_intr(vmcs12)) { vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; - else + } else { + vmx->nested.posted_intr_nv = -1; exec_control &= ~PIN_BASED_POSTED_INTR; + } pin_controls_set(vmx, exec_control); /* @@ -2470,6 +2496,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); @@ -2507,7 +2534,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); - vmx->segment_cache.bitmask = 0; + vmx_segment_cache_clear(vmx); } if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & @@ -4284,11 +4311,52 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) } if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { + int irq; + if (block_nested_events) return -EBUSY; if (!nested_exit_on_intr(vcpu)) goto no_vmexit; - nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); + + if (!nested_exit_intr_ack_set(vcpu)) { + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); + return 0; + } + + irq = kvm_cpu_get_extint(vcpu); + if (irq != -1) { + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, + INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); + return 0; + } + + irq = kvm_apic_has_interrupt(vcpu); + if (WARN_ON_ONCE(irq < 0)) + goto no_vmexit; + + /* + * If the IRQ is L2's PI notification vector, process posted + * interrupts for L2 instead of injecting VM-Exit, as the + * detection/morphing architecturally occurs when the IRQ is + * delivered to the CPU. Note, only interrupts that are routed + * through the local APIC trigger posted interrupt processing, + * and enabling posted interrupts requires ACK-on-exit. + */ + if (irq == vmx->nested.posted_intr_nv) { + vmx->nested.pi_pending = true; + kvm_apic_clear_irr(vcpu, irq); + goto no_vmexit; + } + + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, + INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); + + /* + * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must + * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI + * if APICv is active. + */ + kvm_apic_ack_interrupt(vcpu, irq); return 0; } @@ -4806,7 +4874,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) goto vmabort; } - if (kvm_set_msr(vcpu, h.index, h.value)) { + if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) { pr_debug_ratelimited( "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", __func__, j, h.index, h.value); @@ -4969,14 +5037,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (likely(!vmx->fail)) { - if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - nested_exit_intr_ack_set(vcpu)) { - int irq = kvm_cpu_get_interrupt(vcpu); - WARN_ON(irq < 0); - vmcs12->vm_exit_intr_info = irq | - INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; - } - if (vm_exit_reason != -1) trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, @@ -7051,7 +7111,7 @@ static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, { msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; msrs->misc_low |= - MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | + VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT | VMX_MISC_ACTIVITY_WAIT_SIPI; @@ -7066,12 +7126,10 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs) * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ - msrs->basic = - VMCS12_REVISION | - VMX_BASIC_TRUE_CTLS | - ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | - (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); + msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE, + X86_MEMTYPE_WB); + msrs->basic |= VMX_BASIC_TRUE_CTLS; if (cpu_has_vmx_basic_inout()) msrs->basic |= VMX_BASIC_INOUT; } diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index cce4e2aa30fbf8..2c296b6abb8ccf 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -39,11 +39,17 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { + lockdep_assert_once(lockdep_is_held(&vcpu->mutex) || + !refcount_read(&vcpu->kvm->users_count)); + return to_vmx(vcpu)->nested.cached_vmcs12; } static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) { + lockdep_assert_once(lockdep_is_held(&vcpu->mutex) || + !refcount_read(&vcpu->kvm->users_count)); + return to_vmx(vcpu)->nested.cached_shadow_vmcs12; } @@ -109,7 +115,7 @@ static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.msrs.misc_low & - MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; + VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; } static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 6fef01e0536e50..a3c3d2a51f47d1 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -274,7 +274,7 @@ static int handle_encls_ecreate(struct kvm_vcpu *vcpu) * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to * enforce restriction of access to the PROVISIONKEY. */ - contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT); + contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL); if (!contents) return -ENOMEM; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 733a0c45d1a612..1a4438358c5e38 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -525,10 +525,6 @@ static const struct kvm_vmx_segment_field { VMX_SEGMENT_FIELD(LDTR), }; -static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) -{ - vmx->segment_cache.bitmask = 0; -} static unsigned long host_idt_base; @@ -755,7 +751,7 @@ static int kvm_cpu_vmxoff(void) return -EIO; } -static void vmx_emergency_disable(void) +void vmx_emergency_disable_virtualization_cpu(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; @@ -1998,15 +1994,15 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, return !(msr->data & ~valid_bits); } -int vmx_get_msr_feature(struct kvm_msr_entry *msr) +int vmx_get_feature_msr(u32 msr, u64 *data) { - switch (msr->index) { + switch (msr) { case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested) return 1; - return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); + return vmx_get_vmx_msr(&vmcs_config.nested, msr, data); default: - return KVM_MSR_RET_INVALID; + return KVM_MSR_RET_UNSUPPORTED; } } @@ -2605,13 +2601,13 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr) static int setup_vmcs_config(struct vmcs_config *vmcs_conf, struct vmx_capability *vmx_cap) { - u32 vmx_msr_low, vmx_msr_high; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u64 _cpu_based_3rd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; + u64 basic_msr; u64 misc_msr; int i; @@ -2734,29 +2730,29 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, _vmexit_control &= ~x_ctrl; } - rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); + rdmsrl(MSR_IA32_VMX_BASIC, basic_msr); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ - if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) + if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 - /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ - if (vmx_msr_high & (1u<<16)) + /* + * KVM expects to be able to shove all legal physical addresses into + * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always + * 0 for processors that support Intel 64 architecture". + */ + if (basic_msr & VMX_BASIC_32BIT_PHYS_ADDR_ONLY) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ - if (((vmx_msr_high >> 18) & 15) != 6) + if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB) return -EIO; rdmsrl(MSR_IA32_VMX_MISC, misc_msr); - vmcs_conf->size = vmx_msr_high & 0x1fff; - vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; - - vmcs_conf->revision_id = vmx_msr_low; - + vmcs_conf->basic = basic_msr; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; @@ -2844,7 +2840,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) return -EFAULT; } -int vmx_hardware_enable(void) +int vmx_enable_virtualization_cpu(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); @@ -2881,7 +2877,7 @@ static void vmclear_local_loaded_vmcss(void) __loaded_vmcs_clear(v); } -void vmx_hardware_disable(void) +void vmx_disable_virtualization_cpu(void) { vmclear_local_loaded_vmcss(); @@ -2903,13 +2899,13 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) if (!pages) return NULL; vmcs = page_address(pages); - memset(vmcs, 0, vmcs_config.size); + memset(vmcs, 0, vmx_basic_vmcs_size(vmcs_config.basic)); /* KVM supports Enlightened VMCS v1 only */ if (kvm_is_using_evmcs()) vmcs->hdr.revision_id = KVM_EVMCS_VERSION; else - vmcs->hdr.revision_id = vmcs_config.revision_id; + vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic); if (shadow) vmcs->hdr.shadow_vmcs = 1; @@ -3002,7 +2998,7 @@ static __init int alloc_kvm_area(void) * physical CPU. */ if (kvm_is_using_evmcs()) - vmcs->hdr.revision_id = vmcs_config.revision_id; + vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic); per_cpu(vmxarea, cpu) = vmcs; } @@ -4219,6 +4215,13 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, { struct vcpu_vmx *vmx = to_vmx(vcpu); + /* + * DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated + * and freed, and must not be accessed outside of vcpu->mutex. The + * vCPU's cached PI NV is valid if and only if posted interrupts + * enabled in its vmcs12, i.e. checking the vector also checks that + * L1 has enabled posted interrupts for L2. + */ if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { /* @@ -5804,8 +5807,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK) ? PFERR_PRESENT_MASK : 0; - error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? - PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; + if (error_code & EPT_VIOLATION_GVA_IS_VALID) + error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ? + PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; /* * Check that the GPA doesn't exceed physical memory limits, as that is @@ -7265,6 +7269,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu, return handle_fastpath_set_msr_irqoff(vcpu); case EXIT_REASON_PREEMPTION_TIMER: return handle_fastpath_preemption_timer(vcpu, force_immediate_exit); + case EXIT_REASON_HLT: + return handle_fastpath_hlt(vcpu); default: return EXIT_FASTPATH_NONE; } @@ -7965,6 +7971,7 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); kvm_cpu_cap_clear(X86_FEATURE_SGX1); kvm_cpu_cap_clear(X86_FEATURE_SGX2); + kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA); } if (vmx_umip_emulated()) @@ -8515,7 +8522,7 @@ __init int vmx_hardware_setup(void) u64 use_timer_freq = 5000ULL * 1000 * 1000; cpu_preemption_timer_multi = - vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; + vmx_misc_preemption_timer_rate(vmcs_config.misc); if (tsc_khz) use_timer_freq = (u64)tsc_khz * 1000; @@ -8582,8 +8589,6 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false; - cpu_emergency_unregister_virt_callback(vmx_emergency_disable); - vmx_cleanup_l1d_flush(); } @@ -8630,8 +8635,6 @@ static int __init vmx_init(void) pi_init_cpu(cpu); } - cpu_emergency_register_virt_callback(vmx_emergency_disable); - vmx_check_vmcs12_offsets(); /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 42498fa63abbe5..2325f773a20be0 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -17,10 +17,6 @@ #include "run_flags.h" #include "../mmu.h" -#define MSR_TYPE_R 1 -#define MSR_TYPE_W 2 -#define MSR_TYPE_RW 3 - #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) #ifdef CONFIG_X86_64 @@ -756,4 +752,9 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu) return lapic_in_kernel(vcpu) && enable_ipiv; } +static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) +{ + vmx->segment_cache.bitmask = 0; +} + #endif /* __KVM_X86_VMX_H */ diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h index eb48153bfd73c0..bba24ed99ee6ca 100644 --- a/arch/x86/kvm/vmx/vmx_onhyperv.h +++ b/arch/x86/kvm/vmx/vmx_onhyperv.h @@ -104,6 +104,14 @@ static inline void evmcs_load(u64 phys_addr) struct hv_vp_assist_page *vp_ap = hv_get_vp_assist_page(smp_processor_id()); + /* + * When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page() + * and aborts enabling the feature otherwise. CPU onlining path is also checked in + * vmx_hardware_enable(). + */ + if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm)) + return; + if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall) vp_ap->nested_control.features.directhypercall = 1; vp_ap->current_nested_vmcs = phys_addr; diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index 8060e5fc6dbd83..93e020dc88f654 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -47,7 +47,7 @@ static __always_inline void vmcs_check16(unsigned long field) BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "16-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, - "16-bit accessor invalid for 32-bit high field"); + "16-bit accessor invalid for 32-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "16-bit accessor invalid for natural width field"); } diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index ce3221cd1d01ad..a55981c5216e63 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -13,8 +13,9 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata; void vmx_hardware_unsetup(void); int vmx_check_processor_compat(void); -int vmx_hardware_enable(void); -void vmx_hardware_disable(void); +int vmx_enable_virtualization_cpu(void); +void vmx_disable_virtualization_cpu(void); +void vmx_emergency_disable_virtualization_cpu(void); int vmx_vm_init(struct kvm *kvm); void vmx_vm_destroy(struct kvm *kvm); int vmx_vcpu_precreate(struct kvm *kvm); @@ -56,7 +57,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index); void vmx_msr_filter_changed(struct kvm_vcpu *vcpu); void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); -int vmx_get_msr_feature(struct kvm_msr_entry *msr); +int vmx_get_feature_msr(u32 msr, u64 *data); int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c983c8e434b8b8..83fe0a78146fc1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -305,24 +305,237 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { static struct kmem_cache *x86_emulator_cache; /* - * When called, it means the previous get/set msr reached an invalid msr. - * Return true if we want to ignore/silent this failed msr access. + * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track + * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS, + * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that + * require host support, i.e. should be probed via RDMSR. emulated_msrs holds + * MSRs that KVM emulates without strictly requiring host support. + * msr_based_features holds MSRs that enumerate features, i.e. are effectively + * CPUID leafs. Note, msr_based_features isn't mutually exclusive with + * msrs_to_save and emulated_msrs. */ -static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) + +static const u32 msrs_to_save_base[] = { + MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, + MSR_STAR, +#ifdef CONFIG_X86_64 + MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, +#endif + MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, + MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, + MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, + MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, + MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, + MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, + MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, + MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, + MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, + MSR_IA32_UMWAIT_CONTROL, + + MSR_IA32_XFD, MSR_IA32_XFD_ERR, +}; + +static const u32 msrs_to_save_pmu[] = { + MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, + MSR_ARCH_PERFMON_FIXED_CTR0 + 2, + MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, + MSR_CORE_PERF_GLOBAL_CTRL, + MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, + + /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */ + MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, + MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, + MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, + MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, + MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, + MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, + MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, + MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, + + MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, + MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, + + /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */ + MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, + MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, + MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, + MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, + + MSR_AMD64_PERF_CNTR_GLOBAL_CTL, + MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, + MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, +}; + +static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + + ARRAY_SIZE(msrs_to_save_pmu)]; +static unsigned num_msrs_to_save; + +static const u32 emulated_msrs_all[] = { + MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, + MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, + +#ifdef CONFIG_KVM_HYPERV + HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, + HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, + HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, + HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, + HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, + HV_X64_MSR_RESET, + HV_X64_MSR_VP_INDEX, + HV_X64_MSR_VP_RUNTIME, + HV_X64_MSR_SCONTROL, + HV_X64_MSR_STIMER0_CONFIG, + HV_X64_MSR_VP_ASSIST_PAGE, + HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, + HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, + HV_X64_MSR_SYNDBG_OPTIONS, + HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, + HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, + HV_X64_MSR_SYNDBG_PENDING_BUFFER, +#endif + + MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, + MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, + + MSR_IA32_TSC_ADJUST, + MSR_IA32_TSC_DEADLINE, + MSR_IA32_ARCH_CAPABILITIES, + MSR_IA32_PERF_CAPABILITIES, + MSR_IA32_MISC_ENABLE, + MSR_IA32_MCG_STATUS, + MSR_IA32_MCG_CTL, + MSR_IA32_MCG_EXT_CTL, + MSR_IA32_SMBASE, + MSR_SMI_COUNT, + MSR_PLATFORM_INFO, + MSR_MISC_FEATURES_ENABLES, + MSR_AMD64_VIRT_SPEC_CTRL, + MSR_AMD64_TSC_RATIO, + MSR_IA32_POWER_CTL, + MSR_IA32_UCODE_REV, + + /* + * KVM always supports the "true" VMX control MSRs, even if the host + * does not. The VMX MSRs as a whole are considered "emulated" as KVM + * doesn't strictly require them to exist in the host (ignoring that + * KVM would refuse to load in the first place if the core set of MSRs + * aren't supported). + */ + MSR_IA32_VMX_BASIC, + MSR_IA32_VMX_TRUE_PINBASED_CTLS, + MSR_IA32_VMX_TRUE_PROCBASED_CTLS, + MSR_IA32_VMX_TRUE_EXIT_CTLS, + MSR_IA32_VMX_TRUE_ENTRY_CTLS, + MSR_IA32_VMX_MISC, + MSR_IA32_VMX_CR0_FIXED0, + MSR_IA32_VMX_CR4_FIXED0, + MSR_IA32_VMX_VMCS_ENUM, + MSR_IA32_VMX_PROCBASED_CTLS2, + MSR_IA32_VMX_EPT_VPID_CAP, + MSR_IA32_VMX_VMFUNC, + + MSR_K7_HWCR, + MSR_KVM_POLL_CONTROL, +}; + +static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; +static unsigned num_emulated_msrs; + +/* + * List of MSRs that control the existence of MSR-based features, i.e. MSRs + * that are effectively CPUID leafs. VMX MSRs are also included in the set of + * feature MSRs, but are handled separately to allow expedited lookups. + */ +static const u32 msr_based_features_all_except_vmx[] = { + MSR_AMD64_DE_CFG, + MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, + MSR_IA32_PERF_CAPABILITIES, +}; + +static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + + (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; +static unsigned int num_msr_based_features; + +/* + * All feature MSRs except uCode revID, which tracks the currently loaded uCode + * patch, are immutable once the vCPU model is defined. + */ +static bool kvm_is_immutable_feature_msr(u32 msr) { - const char *op = write ? "wrmsr" : "rdmsr"; + int i; - if (ignore_msrs) { - if (report_ignored_msrs) - kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", - op, msr, data); - /* Mask the error */ + if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) return true; - } else { + + for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { + if (msr == msr_based_features_all_except_vmx[i]) + return msr != MSR_IA32_UCODE_REV; + } + + return false; +} + +static bool kvm_is_advertised_msr(u32 msr_index) +{ + unsigned int i; + + for (i = 0; i < num_msrs_to_save; i++) { + if (msrs_to_save[i] == msr_index) + return true; + } + + for (i = 0; i < num_emulated_msrs; i++) { + if (emulated_msrs[i] == msr_index) + return true; + } + + return false; +} + +typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data, + bool host_initiated); + +static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr, + u64 *data, bool host_initiated, + enum kvm_msr_access rw, + msr_access_t msr_access_fn) +{ + const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr"; + int ret; + + BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W); + + /* + * Zero the data on read failures to avoid leaking stack data to the + * guest and/or userspace, e.g. if the failure is ignored below. + */ + ret = msr_access_fn(vcpu, msr, data, host_initiated); + if (ret && rw == MSR_TYPE_R) + *data = 0; + + if (ret != KVM_MSR_RET_UNSUPPORTED) + return ret; + + /* + * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM + * advertises to userspace, even if an MSR isn't fully supported. + * Simply check that @data is '0', which covers both the write '0' case + * and all reads (in which case @data is zeroed on failure; see above). + */ + if (host_initiated && !*data && kvm_is_advertised_msr(msr)) + return 0; + + if (!ignore_msrs) { kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", - op, msr, data); - return false; + op, msr, *data); + return ret; } + + if (report_ignored_msrs) + kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data); + + return 0; } static struct kmem_cache *kvm_alloc_emulator_cache(void) @@ -355,7 +568,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn) /* * Disabling irqs at this point since the following code could be - * interrupted and executed through kvm_arch_hardware_disable() + * interrupted and executed through kvm_arch_disable_virtualization_cpu() */ local_irq_save(flags); if (msrs->registered) { @@ -413,8 +626,7 @@ EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); static void kvm_user_return_msr_cpu_online(void) { - unsigned int cpu = smp_processor_id(); - struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); + struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); u64 value; int i; @@ -621,12 +833,6 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto ex->payload = payload; } -/* Forcibly leave the nested mode in cases like a vCPU reset */ -static void kvm_leave_nested(struct kvm_vcpu *vcpu) -{ - kvm_x86_ops.nested_ops->leave_nested(vcpu); -} - static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool has_payload, unsigned long payload, bool reinject) @@ -1341,247 +1547,75 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { u64 fixed = DR6_FIXED_1; - if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) - fixed |= DR6_RTM; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) - fixed |= DR6_BUS_LOCK; - return fixed; -} - -int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) -{ - size_t size = ARRAY_SIZE(vcpu->arch.db); - - switch (dr) { - case 0 ... 3: - vcpu->arch.db[array_index_nospec(dr, size)] = val; - if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) - vcpu->arch.eff_db[dr] = val; - break; - case 4: - case 6: - if (!kvm_dr6_valid(val)) - return 1; /* #GP */ - vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); - break; - case 5: - default: /* 7 */ - if (!kvm_dr7_valid(val)) - return 1; /* #GP */ - vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; - kvm_update_dr7(vcpu); - break; - } - - return 0; -} -EXPORT_SYMBOL_GPL(kvm_set_dr); - -unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) -{ - size_t size = ARRAY_SIZE(vcpu->arch.db); - - switch (dr) { - case 0 ... 3: - return vcpu->arch.db[array_index_nospec(dr, size)]; - case 4: - case 6: - return vcpu->arch.dr6; - case 5: - default: /* 7 */ - return vcpu->arch.dr7; - } -} -EXPORT_SYMBOL_GPL(kvm_get_dr); - -int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) -{ - u32 ecx = kvm_rcx_read(vcpu); - u64 data; - - if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - - kvm_rax_write(vcpu, (u32)data); - kvm_rdx_write(vcpu, data >> 32); - return kvm_skip_emulated_instruction(vcpu); -} -EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); - -/* - * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track - * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS, - * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that - * require host support, i.e. should be probed via RDMSR. emulated_msrs holds - * MSRs that KVM emulates without strictly requiring host support. - * msr_based_features holds MSRs that enumerate features, i.e. are effectively - * CPUID leafs. Note, msr_based_features isn't mutually exclusive with - * msrs_to_save and emulated_msrs. - */ - -static const u32 msrs_to_save_base[] = { - MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, - MSR_STAR, -#ifdef CONFIG_X86_64 - MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, -#endif - MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, - MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, - MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, - MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, - MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, - MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, - MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, - MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, - MSR_IA32_UMWAIT_CONTROL, - - MSR_IA32_XFD, MSR_IA32_XFD_ERR, -}; - -static const u32 msrs_to_save_pmu[] = { - MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, - MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, - MSR_CORE_PERF_GLOBAL_CTRL, - MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, - - /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */ - MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, - MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, - MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, - MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, - MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, - MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, - MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, - MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, - - MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, - MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, - - /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */ - MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, - MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, - MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, - MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, - - MSR_AMD64_PERF_CNTR_GLOBAL_CTL, - MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, - MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, -}; - -static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + - ARRAY_SIZE(msrs_to_save_pmu)]; -static unsigned num_msrs_to_save; - -static const u32 emulated_msrs_all[] = { - MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, - -#ifdef CONFIG_KVM_HYPERV - HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, - HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, - HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, - HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, - HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, - HV_X64_MSR_RESET, - HV_X64_MSR_VP_INDEX, - HV_X64_MSR_VP_RUNTIME, - HV_X64_MSR_SCONTROL, - HV_X64_MSR_STIMER0_CONFIG, - HV_X64_MSR_VP_ASSIST_PAGE, - HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, - HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, - HV_X64_MSR_SYNDBG_OPTIONS, - HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, - HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, - HV_X64_MSR_SYNDBG_PENDING_BUFFER, -#endif - - MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, - MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, - - MSR_IA32_TSC_ADJUST, - MSR_IA32_TSC_DEADLINE, - MSR_IA32_ARCH_CAPABILITIES, - MSR_IA32_PERF_CAPABILITIES, - MSR_IA32_MISC_ENABLE, - MSR_IA32_MCG_STATUS, - MSR_IA32_MCG_CTL, - MSR_IA32_MCG_EXT_CTL, - MSR_IA32_SMBASE, - MSR_SMI_COUNT, - MSR_PLATFORM_INFO, - MSR_MISC_FEATURES_ENABLES, - MSR_AMD64_VIRT_SPEC_CTRL, - MSR_AMD64_TSC_RATIO, - MSR_IA32_POWER_CTL, - MSR_IA32_UCODE_REV, - - /* - * KVM always supports the "true" VMX control MSRs, even if the host - * does not. The VMX MSRs as a whole are considered "emulated" as KVM - * doesn't strictly require them to exist in the host (ignoring that - * KVM would refuse to load in the first place if the core set of MSRs - * aren't supported). - */ - MSR_IA32_VMX_BASIC, - MSR_IA32_VMX_TRUE_PINBASED_CTLS, - MSR_IA32_VMX_TRUE_PROCBASED_CTLS, - MSR_IA32_VMX_TRUE_EXIT_CTLS, - MSR_IA32_VMX_TRUE_ENTRY_CTLS, - MSR_IA32_VMX_MISC, - MSR_IA32_VMX_CR0_FIXED0, - MSR_IA32_VMX_CR4_FIXED0, - MSR_IA32_VMX_VMCS_ENUM, - MSR_IA32_VMX_PROCBASED_CTLS2, - MSR_IA32_VMX_EPT_VPID_CAP, - MSR_IA32_VMX_VMFUNC, + if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) + fixed |= DR6_RTM; - MSR_K7_HWCR, - MSR_KVM_POLL_CONTROL, -}; + if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) + fixed |= DR6_BUS_LOCK; + return fixed; +} -static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; -static unsigned num_emulated_msrs; +int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) +{ + size_t size = ARRAY_SIZE(vcpu->arch.db); -/* - * List of MSRs that control the existence of MSR-based features, i.e. MSRs - * that are effectively CPUID leafs. VMX MSRs are also included in the set of - * feature MSRs, but are handled separately to allow expedited lookups. - */ -static const u32 msr_based_features_all_except_vmx[] = { - MSR_AMD64_DE_CFG, - MSR_IA32_UCODE_REV, - MSR_IA32_ARCH_CAPABILITIES, - MSR_IA32_PERF_CAPABILITIES, -}; + switch (dr) { + case 0 ... 3: + vcpu->arch.db[array_index_nospec(dr, size)] = val; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + vcpu->arch.eff_db[dr] = val; + break; + case 4: + case 6: + if (!kvm_dr6_valid(val)) + return 1; /* #GP */ + vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); + break; + case 5: + default: /* 7 */ + if (!kvm_dr7_valid(val)) + return 1; /* #GP */ + vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; + kvm_update_dr7(vcpu); + break; + } -static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + - (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; -static unsigned int num_msr_based_features; + return 0; +} +EXPORT_SYMBOL_GPL(kvm_set_dr); -/* - * All feature MSRs except uCode revID, which tracks the currently loaded uCode - * patch, are immutable once the vCPU model is defined. - */ -static bool kvm_is_immutable_feature_msr(u32 msr) +unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) { - int i; + size_t size = ARRAY_SIZE(vcpu->arch.db); - if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) - return true; + switch (dr) { + case 0 ... 3: + return vcpu->arch.db[array_index_nospec(dr, size)]; + case 4: + case 6: + return vcpu->arch.dr6; + case 5: + default: /* 7 */ + return vcpu->arch.dr7; + } +} +EXPORT_SYMBOL_GPL(kvm_get_dr); - for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { - if (msr == msr_based_features_all_except_vmx[i]) - return msr != MSR_IA32_UCODE_REV; +int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) +{ + u32 ecx = kvm_rcx_read(vcpu); + u64 data; + + if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { + kvm_inject_gp(vcpu, 0); + return 1; } - return false; + kvm_rax_write(vcpu, (u32)data); + kvm_rdx_write(vcpu, data >> 32); + return kvm_skip_emulated_instruction(vcpu); } +EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); /* * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM @@ -1660,40 +1694,31 @@ static u64 kvm_get_arch_capabilities(void) return data; } -static int kvm_get_msr_feature(struct kvm_msr_entry *msr) +static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, + bool host_initiated) { - switch (msr->index) { + WARN_ON_ONCE(!host_initiated); + + switch (index) { case MSR_IA32_ARCH_CAPABILITIES: - msr->data = kvm_get_arch_capabilities(); + *data = kvm_get_arch_capabilities(); break; case MSR_IA32_PERF_CAPABILITIES: - msr->data = kvm_caps.supported_perf_cap; + *data = kvm_caps.supported_perf_cap; break; case MSR_IA32_UCODE_REV: - rdmsrl_safe(msr->index, &msr->data); + rdmsrl_safe(index, data); break; default: - return kvm_x86_call(get_msr_feature)(msr); + return kvm_x86_call(get_feature_msr)(index, data); } return 0; } -static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) +static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { - struct kvm_msr_entry msr; - int r; - - /* Unconditionally clear the output for simplicity */ - msr.data = 0; - msr.index = index; - r = kvm_get_msr_feature(&msr); - - if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) - r = 0; - - *data = msr.data; - - return r; + return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R, + kvm_get_feature_msr); } static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) @@ -1880,16 +1905,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, return kvm_x86_call(set_msr)(vcpu, &msr); } +static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, + bool host_initiated) +{ + return __kvm_set_msr(vcpu, index, *data, host_initiated); +} + static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, u32 index, u64 data, bool host_initiated) { - int ret = __kvm_set_msr(vcpu, index, data, host_initiated); - - if (ret == KVM_MSR_RET_INVALID) - if (kvm_msr_ignored_check(index, data, true)) - ret = 0; - - return ret; + return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W, + _kvm_set_msr); } /* @@ -1928,31 +1954,25 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated) { - int ret = __kvm_get_msr(vcpu, index, data, host_initiated); - - if (ret == KVM_MSR_RET_INVALID) { - /* Unconditionally clear *data for simplicity */ - *data = 0; - if (kvm_msr_ignored_check(index, 0, false)) - ret = 0; - } - - return ret; + return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R, + __kvm_get_msr); } -static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) +int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) { if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) return KVM_MSR_RET_FILTERED; return kvm_get_msr_ignored_check(vcpu, index, data, false); } +EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter); -static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) +int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) { if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) return KVM_MSR_RET_FILTERED; return kvm_set_msr_ignored_check(vcpu, index, data, false); } +EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { @@ -1999,7 +2019,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) static u64 kvm_msr_reason(int r) { switch (r) { - case KVM_MSR_RET_INVALID: + case KVM_MSR_RET_UNSUPPORTED: return KVM_MSR_EXIT_REASON_UNKNOWN; case KVM_MSR_RET_FILTERED: return KVM_MSR_EXIT_REASON_FILTER; @@ -2162,31 +2182,34 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) { u32 msr = kvm_rcx_read(vcpu); u64 data; - fastpath_t ret = EXIT_FASTPATH_NONE; + fastpath_t ret; + bool handled; kvm_vcpu_srcu_read_lock(vcpu); switch (msr) { case APIC_BASE_MSR + (APIC_ICR >> 4): data = kvm_read_edx_eax(vcpu); - if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { - kvm_skip_emulated_instruction(vcpu); - ret = EXIT_FASTPATH_EXIT_HANDLED; - } + handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data); break; case MSR_IA32_TSC_DEADLINE: data = kvm_read_edx_eax(vcpu); - if (!handle_fastpath_set_tscdeadline(vcpu, data)) { - kvm_skip_emulated_instruction(vcpu); - ret = EXIT_FASTPATH_REENTER_GUEST; - } + handled = !handle_fastpath_set_tscdeadline(vcpu, data); break; default: + handled = false; break; } - if (ret != EXIT_FASTPATH_NONE) + if (handled) { + if (!kvm_skip_emulated_instruction(vcpu)) + ret = EXIT_FASTPATH_EXIT_USERSPACE; + else + ret = EXIT_FASTPATH_REENTER_GUEST; trace_kvm_msr_write(msr, data); + } else { + ret = EXIT_FASTPATH_NONE; + } kvm_vcpu_srcu_read_unlock(vcpu); @@ -3746,18 +3769,6 @@ static void record_steal_time(struct kvm_vcpu *vcpu) mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); } -static bool kvm_is_msr_to_save(u32 msr_index) -{ - unsigned int i; - - for (i = 0; i < num_msrs_to_save; i++) { - if (msrs_to_save[i] == msr_index) - return true; - } - - return false; -} - int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u32 msr = msr_info->index; @@ -4139,15 +4150,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); - /* - * Userspace is allowed to write '0' to MSRs that KVM reports - * as to-be-saved, even if an MSRs isn't fully supported. - */ - if (msr_info->host_initiated && !data && - kvm_is_msr_to_save(msr)) - break; - - return KVM_MSR_RET_INVALID; + return KVM_MSR_RET_UNSUPPORTED; } return 0; } @@ -4498,17 +4501,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info); - /* - * Userspace is allowed to read MSRs that KVM reports as - * to-be-saved, even if an MSR isn't fully supported. - */ - if (msr_info->host_initiated && - kvm_is_msr_to_save(msr_info->index)) { - msr_info->data = 0; - break; - } - - return KVM_MSR_RET_INVALID; + return KVM_MSR_RET_UNSUPPORTED; } return 0; } @@ -4946,7 +4939,7 @@ long kvm_arch_dev_ioctl(struct file *filp, break; } case KVM_GET_MSRS: - r = msr_io(NULL, argp, do_get_msr_feature, 1); + r = msr_io(NULL, argp, do_get_feature_msr, 1); break; #ifdef CONFIG_KVM_HYPERV case KVM_GET_SUPPORTED_HV_CPUID: @@ -7383,11 +7376,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) static void kvm_probe_feature_msr(u32 msr_index) { - struct kvm_msr_entry msr = { - .index = msr_index, - }; + u64 data; - if (kvm_get_msr_feature(&msr)) + if (kvm_get_feature_msr(NULL, msr_index, &data, true)) return; msr_based_features[num_msr_based_features++] = msr_index; @@ -8865,60 +8856,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) return 1; } -static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - int emulation_type) +static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu, + gpa_t cr2_or_gpa, + int emulation_type) { - gpa_t gpa = cr2_or_gpa; - kvm_pfn_t pfn; - if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) return false; - if (WARN_ON_ONCE(is_guest_mode(vcpu)) || - WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) - return false; - - if (!vcpu->arch.mmu->root_role.direct) { - /* - * Write permission should be allowed since only - * write access need to be emulated. - */ - gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); - - /* - * If the mapping is invalid in guest, let cpu retry - * it to generate fault. - */ - if (gpa == INVALID_GPA) - return true; - } - - /* - * Do not retry the unhandleable instruction if it faults on the - * readonly host memory, otherwise it will goto a infinite loop: - * retry instruction -> write #PF -> emulation fail -> retry - * instruction -> ... - */ - pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); - - /* - * If the instruction failed on the error pfn, it can not be fixed, - * report the error to userspace. - */ - if (is_error_noslot_pfn(pfn)) - return false; - - kvm_release_pfn_clean(pfn); - - /* - * If emulation may have been triggered by a write to a shadowed page - * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the - * guest to let the CPU re-execute the instruction in the hope that the - * CPU can cleanly execute the instruction that KVM failed to emulate. - */ - if (vcpu->kvm->arch.indirect_shadow_pages) - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); - /* * If the failed instruction faulted on an access to page tables that * are used to translate any part of the instruction, KVM can't resolve @@ -8929,54 +8873,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * then zap the SPTE to unprotect the gfn, and then do it all over * again. Report the error to userspace. */ - return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP); -} - -static bool retry_instruction(struct x86_emulate_ctxt *ctxt, - gpa_t cr2_or_gpa, int emulation_type) -{ - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; - - last_retry_eip = vcpu->arch.last_retry_eip; - last_retry_addr = vcpu->arch.last_retry_addr; + if (emulation_type & EMULTYPE_WRITE_PF_TO_SP) + return false; /* - * If the emulation is caused by #PF and it is non-page_table - * writing instruction, it means the VM-EXIT is caused by shadow - * page protected, we can zap the shadow page and retry this - * instruction directly. - * - * Note: if the guest uses a non-page-table modifying instruction - * on the PDE that points to the instruction, then we will unmap - * the instruction and go to an infinite loop. So, we cache the - * last retried eip and the last fault address, if we meet the eip - * and the address again, we can break out of the potential infinite - * loop. + * If emulation may have been triggered by a write to a shadowed page + * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the + * guest to let the CPU re-execute the instruction in the hope that the + * CPU can cleanly execute the instruction that KVM failed to emulate. */ - vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; - - if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) - return false; - - if (WARN_ON_ONCE(is_guest_mode(vcpu)) || - WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) - return false; - - if (x86_page_table_writing_insn(ctxt)) - return false; - - if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) - return false; - - vcpu->arch.last_retry_eip = ctxt->eip; - vcpu->arch.last_retry_addr = cr2_or_gpa; - - if (!vcpu->arch.mmu->root_role.direct) - gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); - - kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); + __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true); + /* + * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible + * all SPTEs were already zapped by a different task. The alternative + * is to report the error to userspace and likely terminate the guest, + * and the last_retry_{eip,addr} checks will prevent retrying the page + * fault indefinitely, i.e. there's nothing to lose by retrying. + */ return true; } @@ -9176,6 +9090,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; bool writeback = true; + if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) && + (WARN_ON_ONCE(is_guest_mode(vcpu)) || + WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))) + emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF; + r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len); if (r != X86EMUL_CONTINUE) { if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT) @@ -9206,8 +9125,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - if (reexecute_instruction(vcpu, cr2_or_gpa, - emulation_type)) + if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa, + emulation_type)) return 1; if (ctxt->have_exception && @@ -9254,7 +9173,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, return 1; } - if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) + /* + * If emulation was caused by a write-protection #PF on a non-page_table + * writing instruction, try to unprotect the gfn, i.e. zap shadow pages, + * and retry the instruction, as the vCPU is likely no longer using the + * gfn as a page table. + */ + if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) && + !x86_page_table_writing_insn(ctxt) && + kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa)) return 1; /* this is needed for vmware backdoor interface to work since it @@ -9285,7 +9212,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, return 1; if (r == EMULATION_FAILED) { - if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type)) + if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa, + emulation_type)) return 1; return handle_emulation_failure(vcpu, emulation_type); @@ -9753,7 +9681,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) guard(mutex)(&vendor_module_lock); - if (kvm_x86_ops.hardware_enable) { + if (kvm_x86_ops.enable_virtualization_cpu) { pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name); return -EEXIST; } @@ -9880,7 +9808,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) return 0; out_unwind_ops: - kvm_x86_ops.hardware_enable = NULL; + kvm_x86_ops.enable_virtualization_cpu = NULL; kvm_x86_call(hardware_unsetup)(); out_mmu_exit: kvm_mmu_vendor_module_exit(); @@ -9904,72 +9832,27 @@ void kvm_x86_vendor_exit(void) if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); - } -#ifdef CONFIG_X86_64 - pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); - irq_work_sync(&pvclock_irq_work); - cancel_work_sync(&pvclock_gtod_work); -#endif - kvm_x86_call(hardware_unsetup)(); - kvm_mmu_vendor_module_exit(); - free_percpu(user_return_msrs); - kmem_cache_destroy(x86_emulator_cache); -#ifdef CONFIG_KVM_XEN - static_key_deferred_flush(&kvm_xen_enabled); - WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); -#endif - mutex_lock(&vendor_module_lock); - kvm_x86_ops.hardware_enable = NULL; - mutex_unlock(&vendor_module_lock); -} -EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); - -static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) -{ - /* - * The vCPU has halted, e.g. executed HLT. Update the run state if the - * local APIC is in-kernel, the run loop will detect the non-runnable - * state and halt the vCPU. Exit to userspace if the local APIC is - * managed by userspace, in which case userspace is responsible for - * handling wake events. - */ - ++vcpu->stat.halt_exits; - if (lapic_in_kernel(vcpu)) { - vcpu->arch.mp_state = state; - return 1; - } else { - vcpu->run->exit_reason = reason; - return 0; - } -} - -int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) -{ - return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); -} -EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); - -int kvm_emulate_halt(struct kvm_vcpu *vcpu) -{ - int ret = kvm_skip_emulated_instruction(vcpu); - /* - * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered - * KVM_EXIT_DEBUG here. - */ - return kvm_emulate_halt_noskip(vcpu) && ret; -} -EXPORT_SYMBOL_GPL(kvm_emulate_halt); - -int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) -{ - int ret = kvm_skip_emulated_instruction(vcpu); - - return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, - KVM_EXIT_AP_RESET_HOLD) && ret; + CPUFREQ_TRANSITION_NOTIFIER); + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + } +#ifdef CONFIG_X86_64 + pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); + irq_work_sync(&pvclock_irq_work); + cancel_work_sync(&pvclock_gtod_work); +#endif + kvm_x86_call(hardware_unsetup)(); + kvm_mmu_vendor_module_exit(); + free_percpu(user_return_msrs); + kmem_cache_destroy(x86_emulator_cache); +#ifdef CONFIG_KVM_XEN + static_key_deferred_flush(&kvm_xen_enabled); + WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); +#endif + mutex_lock(&vendor_module_lock); + kvm_x86_ops.enable_virtualization_cpu = NULL; + mutex_unlock(&vendor_module_lock); } -EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); +EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); #ifdef CONFIG_X86_64 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, @@ -11207,6 +11090,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); + if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE)) + return 0; + r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath); return r; @@ -11220,6 +11106,67 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) return r; } +static bool kvm_vcpu_running(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted); +} + +static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) +{ + if (!list_empty_careful(&vcpu->async_pf.done)) + return true; + + if (kvm_apic_has_pending_init_or_sipi(vcpu) && + kvm_apic_init_sipi_allowed(vcpu)) + return true; + + if (vcpu->arch.pv.pv_unhalted) + return true; + + if (kvm_is_exception_pending(vcpu)) + return true; + + if (kvm_test_request(KVM_REQ_NMI, vcpu) || + (vcpu->arch.nmi_pending && + kvm_x86_call(nmi_allowed)(vcpu, false))) + return true; + +#ifdef CONFIG_KVM_SMM + if (kvm_test_request(KVM_REQ_SMI, vcpu) || + (vcpu->arch.smi_pending && + kvm_x86_call(smi_allowed)(vcpu, false))) + return true; +#endif + + if (kvm_test_request(KVM_REQ_PMI, vcpu)) + return true; + + if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) + return true; + + if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)) + return true; + + if (kvm_hv_has_stimer_pending(vcpu)) + return true; + + if (is_guest_mode(vcpu) && + kvm_x86_ops.nested_ops->has_events && + kvm_x86_ops.nested_ops->has_events(vcpu, false)) + return true; + + if (kvm_xen_has_pending_events(vcpu)) + return true; + + return false; +} + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); +} + /* Called within kvm->srcu read side. */ static inline int vcpu_block(struct kvm_vcpu *vcpu) { @@ -11291,12 +11238,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu) return 1; } -static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) -{ - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && - !vcpu->arch.apf.halted); -} - /* Called within kvm->srcu read side. */ static int vcpu_run(struct kvm_vcpu *vcpu) { @@ -11348,6 +11289,98 @@ static int vcpu_run(struct kvm_vcpu *vcpu) return r; } +static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) +{ + /* + * The vCPU has halted, e.g. executed HLT. Update the run state if the + * local APIC is in-kernel, the run loop will detect the non-runnable + * state and halt the vCPU. Exit to userspace if the local APIC is + * managed by userspace, in which case userspace is responsible for + * handling wake events. + */ + ++vcpu->stat.halt_exits; + if (lapic_in_kernel(vcpu)) { + if (kvm_vcpu_has_events(vcpu)) + vcpu->arch.pv.pv_unhalted = false; + else + vcpu->arch.mp_state = state; + return 1; + } else { + vcpu->run->exit_reason = reason; + return 0; + } +} + +int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) +{ + return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); +} +EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); + +int kvm_emulate_halt(struct kvm_vcpu *vcpu) +{ + int ret = kvm_skip_emulated_instruction(vcpu); + /* + * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered + * KVM_EXIT_DEBUG here. + */ + return kvm_emulate_halt_noskip(vcpu) && ret; +} +EXPORT_SYMBOL_GPL(kvm_emulate_halt); + +fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu) +{ + int ret; + + kvm_vcpu_srcu_read_lock(vcpu); + ret = kvm_emulate_halt(vcpu); + kvm_vcpu_srcu_read_unlock(vcpu); + + if (!ret) + return EXIT_FASTPATH_EXIT_USERSPACE; + + if (kvm_vcpu_running(vcpu)) + return EXIT_FASTPATH_REENTER_GUEST; + + return EXIT_FASTPATH_EXIT_HANDLED; +} +EXPORT_SYMBOL_GPL(handle_fastpath_hlt); + +int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) +{ + int ret = kvm_skip_emulated_instruction(vcpu); + + return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, + KVM_EXIT_AP_RESET_HOLD) && ret; +} +EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); + +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_apicv_active(vcpu) && + kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu); +} + +bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.preempted_in_kernel; +} + +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) + return true; + + if (kvm_test_request(KVM_REQ_NMI, vcpu) || +#ifdef CONFIG_KVM_SMM + kvm_test_request(KVM_REQ_SMI, vcpu) || +#endif + kvm_test_request(KVM_REQ_EVENT, vcpu)) + return true; + + return kvm_arch_dy_has_pending_interrupt(vcpu); +} + static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); @@ -12264,8 +12297,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); - vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; - kvm_async_pf_hash_reset(vcpu); vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; @@ -12431,6 +12462,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) if (!init_event) { vcpu->arch.smbase = 0x30000; + vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; + vcpu->arch.msr_misc_features_enables = 0; vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; @@ -12516,7 +12549,17 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) } EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); -int kvm_arch_hardware_enable(void) +void kvm_arch_enable_virtualization(void) +{ + cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu); +} + +void kvm_arch_disable_virtualization(void) +{ + cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu); +} + +int kvm_arch_enable_virtualization_cpu(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; @@ -12532,7 +12575,7 @@ int kvm_arch_hardware_enable(void) if (ret) return ret; - ret = kvm_x86_call(hardware_enable)(); + ret = kvm_x86_call(enable_virtualization_cpu)(); if (ret != 0) return ret; @@ -12612,9 +12655,9 @@ int kvm_arch_hardware_enable(void) return 0; } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { - kvm_x86_call(hardware_disable)(); + kvm_x86_call(disable_virtualization_cpu)(); drop_user_return_notifiers(); } @@ -13162,87 +13205,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvm_arch_free_memslot(kvm, old); } -static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) -{ - if (!list_empty_careful(&vcpu->async_pf.done)) - return true; - - if (kvm_apic_has_pending_init_or_sipi(vcpu) && - kvm_apic_init_sipi_allowed(vcpu)) - return true; - - if (vcpu->arch.pv.pv_unhalted) - return true; - - if (kvm_is_exception_pending(vcpu)) - return true; - - if (kvm_test_request(KVM_REQ_NMI, vcpu) || - (vcpu->arch.nmi_pending && - kvm_x86_call(nmi_allowed)(vcpu, false))) - return true; - -#ifdef CONFIG_KVM_SMM - if (kvm_test_request(KVM_REQ_SMI, vcpu) || - (vcpu->arch.smi_pending && - kvm_x86_call(smi_allowed)(vcpu, false))) - return true; -#endif - - if (kvm_test_request(KVM_REQ_PMI, vcpu)) - return true; - - if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) - return true; - - if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)) - return true; - - if (kvm_hv_has_stimer_pending(vcpu)) - return true; - - if (is_guest_mode(vcpu) && - kvm_x86_ops.nested_ops->has_events && - kvm_x86_ops.nested_ops->has_events(vcpu, false)) - return true; - - if (kvm_xen_has_pending_events(vcpu)) - return true; - - return false; -} - -int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); -} - -bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_apicv_active(vcpu) && - kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu); -} - -bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.preempted_in_kernel; -} - -bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) -{ - if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) - return true; - - if (kvm_test_request(KVM_REQ_NMI, vcpu) || -#ifdef CONFIG_KVM_SMM - kvm_test_request(KVM_REQ_SMI, vcpu) || -#endif - kvm_test_request(KVM_REQ_EVENT, vcpu)) - return true; - - return kvm_arch_dy_has_pending_interrupt(vcpu); -} - bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { if (vcpu->arch.guest_state_protected) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 50596f6f832085..a84c48ef527853 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -103,11 +103,18 @@ static inline unsigned int __shrink_ple_window(unsigned int val, return max(val, min); } -#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL +#define MSR_IA32_CR_PAT_DEFAULT \ + PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC) void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu); +/* Forcibly leave the nested mode in cases like a vCPU reset */ +static inline void kvm_leave_nested(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops.nested_ops->leave_nested(vcpu); +} + static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) { return vcpu->arch.last_vmentry_cpu != -1; @@ -334,6 +341,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); +fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu); extern struct kvm_caps kvm_caps; extern struct kvm_host_values kvm_host; @@ -504,13 +512,26 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); +enum kvm_msr_access { + MSR_TYPE_R = BIT(0), + MSR_TYPE_W = BIT(1), + MSR_TYPE_RW = MSR_TYPE_R | MSR_TYPE_W, +}; + /* * Internal error codes that are used to indicate that MSR emulation encountered - * an error that should result in #GP in the guest, unless userspace - * handles it. + * an error that should result in #GP in the guest, unless userspace handles it. + * Note, '1', '0', and negative numbers are off limits, as they are used by KVM + * as part of KVM's lightly documented internal KVM_RUN return codes. + * + * UNSUPPORTED - The MSR isn't supported, either because it is completely + * unknown to KVM, or because the MSR should not exist according + * to the vCPU model. + * + * FILTERED - Access to the MSR is denied by a userspace MSR filter. */ -#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ -#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ +#define KVM_MSR_RET_UNSUPPORTED 2 +#define KVM_MSR_RET_FILTERED 3 #define __cr4_reserved_bits(__cpu_has, __c) \ ({ \ diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 90afb488b396a0..b2eff07d65e47e 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -16,6 +16,11 @@ cmpxchg8b (\reg) .endm +.macro read64_nonatomic reg + movl (\reg), %eax + movl 4(\reg), %edx +.endm + SYM_FUNC_START(atomic64_read_cx8) read64 %ecx RET @@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8) movl %edx, %edi movl %ecx, %ebp - read64 %ecx + read64_nonatomic %ecx 1: movl %eax, %ebx movl %edx, %ecx @@ -79,7 +84,7 @@ addsub_return sub sub sbb SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebx - read64 %esi + read64_nonatomic %esi 1: movl %eax, %ebx movl %edx, %ecx diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 5952ab41c60f4d..6ffb931b9fb14f 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -13,7 +13,7 @@ #endif #include /*__ignore_sync_check__ */ #include /* __ignore_sync_check__ */ -#include /* __ignore_sync_check__ */ +#include /* __ignore_sync_check__ */ #include #include diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 8d3a00e5c528e5..690fbf48e8538b 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -57,7 +57,6 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o -obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 9332b36a10915c..628833afee3775 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index e91500a8096394..575f863f3c75e3 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -164,7 +164,7 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) } } #else -static inline void percpu_setup_exception_stacks(unsigned int cpu) +static void __init percpu_setup_exception_stacks(unsigned int cpu) { struct cpu_entry_area *cea = get_cpu_entry_area(cpu); diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index c45127265f2fa3..437e96fb497734 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -99,18 +99,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, for (; addr < end; addr = next) { pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; + bool use_gbpage; next = (addr & PUD_MASK) + PUD_SIZE; if (next > end) next = end; - if (info->direct_gbpages) { - pud_t pudval; + /* if this is already a gbpage, this portion is already mapped */ + if (pud_leaf(*pud)) + continue; + + /* Is using a gbpage allowed? */ + use_gbpage = info->direct_gbpages; - if (pud_present(*pud)) - continue; + /* Don't use gbpage if it maps more than the requested region. */ + /* at the begining: */ + use_gbpage &= ((addr & ~PUD_MASK) == 0); + /* ... or at the end: */ + use_gbpage &= ((next & ~PUD_MASK) == 0); + + /* Never overwrite existing mappings */ + use_gbpage &= !pud_present(*pud); + + if (use_gbpage) { + pud_t pudval; - addr &= PUD_MASK; pudval = __pud((addr - info->offset) | info->page_flag); set_pud(pud, pudval); continue; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index aa7d279321ea0c..70b02fc61d935c 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -457,7 +458,7 @@ void iounmap(volatile void __iomem *addr) { struct vm_struct *p, *o; - if ((void __force *)addr <= high_memory) + if (WARN_ON_ONCE(!is_ioremap_addr((void __force *)addr))) return; /* diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 6ce10e3c622858..64e5cdb2460ac2 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -22,16 +23,6 @@ #include "numa_internal.h" int numa_off; -nodemask_t numa_nodes_parsed __initdata; - -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); - -static struct numa_meminfo numa_meminfo __initdata_or_meminfo; -static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo; - -static int numa_distance_cnt; -static u8 *numa_distance; static __init int numa_setup(char *opt) { @@ -124,456 +115,27 @@ void __init setup_node_to_cpumask_map(void) pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } -static int __init numa_add_memblk_to(int nid, u64 start, u64 end, - struct numa_meminfo *mi) -{ - /* ignore zero length blks */ - if (start == end) - return 0; - - /* whine about and ignore invalid blks */ - if (start > end || nid < 0 || nid >= MAX_NUMNODES) { - pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - nid, start, end - 1); - return 0; - } - - if (mi->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("too many memblk ranges\n"); - return -EINVAL; - } - - mi->blk[mi->nr_blks].start = start; - mi->blk[mi->nr_blks].end = end; - mi->blk[mi->nr_blks].nid = nid; - mi->nr_blks++; - return 0; -} - -/** - * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo - * @idx: Index of memblk to remove - * @mi: numa_meminfo to remove memblk from - * - * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and - * decrementing @mi->nr_blks. - */ -void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) -{ - mi->nr_blks--; - memmove(&mi->blk[idx], &mi->blk[idx + 1], - (mi->nr_blks - idx) * sizeof(mi->blk[0])); -} - -/** - * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another - * @dst: numa_meminfo to append block to - * @idx: Index of memblk to remove - * @src: numa_meminfo to remove memblk from - */ -static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx, - struct numa_meminfo *src) -{ - dst->blk[dst->nr_blks++] = src->blk[idx]; - numa_remove_memblk_from(idx, src); -} - -/** - * numa_add_memblk - Add one numa_memblk to numa_meminfo - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * Add a new memblk to the default numa_meminfo. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - return numa_add_memblk_to(nid, start, end, &numa_meminfo); -} - -/* Allocate NODE_DATA for a node on the local memory */ -static void __init alloc_node_data(int nid) +static int __init numa_register_nodes(void) { - const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); - u64 nd_pa; - void *nd; - int tnid; - - /* - * Allocate node data. Try node-local memory and then any node. - * Never allocate in DMA zone. - */ - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) { - pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", - nd_size, nid); - return; - } - nd = __va(nd_pa); - - /* report and initialize */ - printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); - - node_set_online(nid); -} - -/** - * numa_cleanup_meminfo - Cleanup a numa_meminfo - * @mi: numa_meminfo to clean up - * - * Sanitize @mi by merging and removing unnecessary memblks. Also check for - * conflicts and clear unused memblks. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_cleanup_meminfo(struct numa_meminfo *mi) -{ - const u64 low = 0; - const u64 high = PFN_PHYS(max_pfn); - int i, j, k; - - /* first, trim all entries */ - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *bi = &mi->blk[i]; - - /* move / save reserved memory ranges */ - if (!memblock_overlaps_region(&memblock.memory, - bi->start, bi->end - bi->start)) { - numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi); - continue; - } - - /* make sure all non-reserved blocks are inside the limits */ - bi->start = max(bi->start, low); - - /* preserve info for non-RAM areas above 'max_pfn': */ - if (bi->end > high) { - numa_add_memblk_to(bi->nid, high, bi->end, - &numa_reserved_meminfo); - bi->end = high; - } - - /* and there's no empty block */ - if (bi->start >= bi->end) - numa_remove_memblk_from(i--, mi); - } - - /* merge neighboring / overlapping entries */ - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *bi = &mi->blk[i]; - - for (j = i + 1; j < mi->nr_blks; j++) { - struct numa_memblk *bj = &mi->blk[j]; - u64 start, end; - - /* - * See whether there are overlapping blocks. Whine - * about but allow overlaps of the same nid. They - * will be merged below. - */ - if (bi->end > bj->start && bi->start < bj->end) { - if (bi->nid != bj->nid) { - pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", - bi->nid, bi->start, bi->end - 1, - bj->nid, bj->start, bj->end - 1); - return -EINVAL; - } - pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", - bi->nid, bi->start, bi->end - 1, - bj->start, bj->end - 1); - } - - /* - * Join together blocks on the same node, holes - * between which don't overlap with memory on other - * nodes. - */ - if (bi->nid != bj->nid) - continue; - start = min(bi->start, bj->start); - end = max(bi->end, bj->end); - for (k = 0; k < mi->nr_blks; k++) { - struct numa_memblk *bk = &mi->blk[k]; - - if (bi->nid == bk->nid) - continue; - if (start < bk->end && end > bk->start) - break; - } - if (k < mi->nr_blks) - continue; - printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", - bi->nid, bi->start, bi->end - 1, bj->start, - bj->end - 1, start, end - 1); - bi->start = start; - bi->end = end; - numa_remove_memblk_from(j--, mi); - } - } - - /* clear unused ones */ - for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { - mi->blk[i].start = mi->blk[i].end = 0; - mi->blk[i].nid = NUMA_NO_NODE; - } - - return 0; -} - -/* - * Set nodes, which have memory in @mi, in *@nodemask. - */ -static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, - const struct numa_meminfo *mi) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mi->blk); i++) - if (mi->blk[i].start != mi->blk[i].end && - mi->blk[i].nid != NUMA_NO_NODE) - node_set(mi->blk[i].nid, *nodemask); -} - -/** - * numa_reset_distance - Reset NUMA distance table - * - * The current table is freed. The next numa_set_distance() call will - * create a new one. - */ -void __init numa_reset_distance(void) -{ - size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - - /* numa_distance could be 1LU marking allocation failure, test cnt */ - if (numa_distance_cnt) - memblock_free(numa_distance, size); - numa_distance_cnt = 0; - numa_distance = NULL; /* enable table creation */ -} - -static int __init numa_alloc_distance(void) -{ - nodemask_t nodes_parsed; - size_t size; - int i, j, cnt = 0; - u64 phys; - - /* size the new table and allocate it */ - nodes_parsed = numa_nodes_parsed; - numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); - - for_each_node_mask(i, nodes_parsed) - cnt = i; - cnt++; - size = cnt * cnt * sizeof(numa_distance[0]); - - phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0, - PFN_PHYS(max_pfn_mapped)); - if (!phys) { - pr_warn("Warning: can't allocate distance table!\n"); - /* don't retry until explicitly reset */ - numa_distance = (void *)1LU; - return -ENOMEM; - } - - numa_distance = __va(phys); - numa_distance_cnt = cnt; - - /* fill with the default distances */ - for (i = 0; i < cnt; i++) - for (j = 0; j < cnt; j++) - numa_distance[i * cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); - - return 0; -} - -/** - * numa_set_distance - Set NUMA distance from one NUMA to another - * @from: the 'from' node to set distance - * @to: the 'to' node to set distance - * @distance: NUMA distance - * - * Set the distance from node @from to @to to @distance. If distance table - * doesn't exist, one which is large enough to accommodate all the currently - * known nodes will be created. - * - * If such table cannot be allocated, a warning is printed and further - * calls are ignored until the distance table is reset with - * numa_reset_distance(). - * - * If @from or @to is higher than the highest known node or lower than zero - * at the time of table creation or @distance doesn't make sense, the call - * is ignored. - * This is to allow simplification of specific NUMA config implementations. - */ -void __init numa_set_distance(int from, int to, int distance) -{ - if (!numa_distance && numa_alloc_distance() < 0) - return; - - if (from >= numa_distance_cnt || to >= numa_distance_cnt || - from < 0 || to < 0) { - pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - if ((u8)distance != distance || - (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - numa_distance[from * numa_distance_cnt + to] = distance; -} - -int __node_distance(int from, int to) -{ - if (from >= numa_distance_cnt || to >= numa_distance_cnt) - return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return numa_distance[from * numa_distance_cnt + to]; -} -EXPORT_SYMBOL(__node_distance); - -/* - * Mark all currently memblock-reserved physical memory (which covers the - * kernel's own memory ranges) as hot-unswappable. - */ -static void __init numa_clear_kernel_node_hotplug(void) -{ - nodemask_t reserved_nodemask = NODE_MASK_NONE; - struct memblock_region *mb_region; - int i; - - /* - * We have to do some preprocessing of memblock regions, to - * make them suitable for reservation. - * - * At this time, all memory regions reserved by memblock are - * used by the kernel, but those regions are not split up - * along node boundaries yet, and don't necessarily have their - * node ID set yet either. - * - * So iterate over all memory known to the x86 architecture, - * and use those ranges to set the nid in memblock.reserved. - * This will split up the memblock regions along node - * boundaries and will set the node IDs as well. - */ - for (i = 0; i < numa_meminfo.nr_blks; i++) { - struct numa_memblk *mb = numa_meminfo.blk + i; - int ret; - - ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); - WARN_ON_ONCE(ret); - } - - /* - * Now go over all reserved memblock regions, to construct a - * node mask of all kernel reserved memory areas. - * - * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, - * numa_meminfo might not include all memblock.reserved - * memory ranges, because quirks such as trim_snb_memory() - * reserve specific pages for Sandy Bridge graphics. ] - */ - for_each_reserved_mem_region(mb_region) { - int nid = memblock_get_region_node(mb_region); - - if (nid != NUMA_NO_NODE) - node_set(nid, reserved_nodemask); - } - - /* - * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory - * belonging to the reserved node mask. - * - * Note that this will include memory regions that reside - * on nodes that contain kernel memory - entire nodes - * become hot-unpluggable: - */ - for (i = 0; i < numa_meminfo.nr_blks; i++) { - struct numa_memblk *mb = numa_meminfo.blk + i; - - if (!node_isset(mb->nid, reserved_nodemask)) - continue; - - memblock_clear_hotplug(mb->start, mb->end - mb->start); - } -} - -static int __init numa_register_memblks(struct numa_meminfo *mi) -{ - int i, nid; - - /* Account for nodes with cpus and no memory */ - node_possible_map = numa_nodes_parsed; - numa_nodemask_from_meminfo(&node_possible_map, mi); - if (WARN_ON(nodes_empty(node_possible_map))) - return -EINVAL; - - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *mb = &mi->blk[i]; - memblock_set_node(mb->start, mb->end - mb->start, - &memblock.memory, mb->nid); - } - - /* - * At very early time, the kernel have to use some memory such as - * loading the kernel image. We cannot prevent this anyway. So any - * node the kernel resides in should be un-hotpluggable. - * - * And when we come here, alloc node data won't fail. - */ - numa_clear_kernel_node_hotplug(); - - /* - * If sections array is gonna be used for pfn -> nid mapping, check - * whether its granularity is fine enough. - */ - if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) { - unsigned long pfn_align = node_map_pfn_alignment(); - - if (pfn_align && pfn_align < PAGES_PER_SECTION) { - pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", - PFN_PHYS(pfn_align) >> 20, - PFN_PHYS(PAGES_PER_SECTION) >> 20); - return -EINVAL; - } - } + int nid; if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL; /* Finally register nodes. */ for_each_node_mask(nid, node_possible_map) { - u64 start = PFN_PHYS(max_pfn); - u64 end = 0; - - for (i = 0; i < mi->nr_blks; i++) { - if (nid != mi->blk[i].nid) - continue; - start = min(mi->blk[i].start, start); - end = max(mi->blk[i].end, end); - } + unsigned long start_pfn, end_pfn; - if (start >= end) + /* + * Note, get_pfn_range_for_nid() depends on + * memblock_set_node() having already happened + */ + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + if (start_pfn >= end_pfn) continue; alloc_node_data(nid); + node_set_online(nid); } /* Dump memblock with node info and return. */ @@ -609,39 +171,11 @@ static int __init numa_init(int (*init_func)(void)) for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); - nodes_clear(numa_nodes_parsed); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, - NUMA_NO_NODE)); - WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, - NUMA_NO_NODE)); - /* In case that parsing SRAT failed. */ - WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); - numa_reset_distance(); - - ret = init_func(); - if (ret < 0) - return ret; - - /* - * We reset memblock back to the top-down direction - * here because if we configured ACPI_NUMA, we have - * parsed SRAT in init_func(). It is ok to have the - * reset here even if we did't configure ACPI_NUMA - * or acpi numa init fails and fallbacks to dummy - * numa init. - */ - memblock_set_bottom_up(false); - - ret = numa_cleanup_meminfo(&numa_meminfo); + ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret; - numa_emulation(&numa_meminfo, numa_distance_cnt); - - ret = numa_register_memblks(&numa_meminfo); + ret = numa_register_nodes(); if (ret < 0) return ret; @@ -782,12 +316,12 @@ void __init init_cpu_to_node(void) #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU -void numa_add_cpu(int cpu) +void numa_add_cpu(unsigned int cpu) { cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } -void numa_remove_cpu(int cpu) +void numa_remove_cpu(unsigned int cpu) { cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } @@ -825,7 +359,7 @@ int early_cpu_to_node(int cpu) return per_cpu(x86_cpu_to_node_map, cpu); } -void debug_cpumask_set_cpu(int cpu, int node, bool enable) +void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) { struct cpumask *mask; @@ -857,12 +391,12 @@ static void numa_set_cpumask(int cpu, bool enable) debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } -void numa_add_cpu(int cpu) +void numa_add_cpu(unsigned int cpu) { numa_set_cpumask(cpu, true); } -void numa_remove_cpu(int cpu) +void numa_remove_cpu(unsigned int cpu) { numa_set_cpumask(cpu, false); } @@ -893,113 +427,29 @@ EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -#ifdef CONFIG_NUMA_KEEP_MEMINFO -static int meminfo_to_nid(struct numa_meminfo *mi, u64 start) +#ifdef CONFIG_NUMA_EMU +void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, + unsigned int nr_emu_nids) { - int i; - - for (i = 0; i < mi->nr_blks; i++) - if (mi->blk[i].start <= start && mi->blk[i].end > start) - return mi->blk[i].nid; - return NUMA_NO_NODE; -} - -int phys_to_target_node(phys_addr_t start) -{ - int nid = meminfo_to_nid(&numa_meminfo, start); + int i, j; /* - * Prefer online nodes, but if reserved memory might be - * hot-added continue the search with reserved ranges. + * Transform __apicid_to_node table to use emulated nids by + * reverse-mapping phys_nid. The maps should always exist but fall + * back to zero just in case. */ - if (nid != NUMA_NO_NODE) - return nid; - - return meminfo_to_nid(&numa_reserved_meminfo, start); -} -EXPORT_SYMBOL_GPL(phys_to_target_node); - -int memory_add_physaddr_to_nid(u64 start) -{ - int nid = meminfo_to_nid(&numa_meminfo, start); - - if (nid == NUMA_NO_NODE) - nid = numa_meminfo.blk[0].nid; - return nid; -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); - -#endif - -static int __init cmp_memblk(const void *a, const void *b) -{ - const struct numa_memblk *ma = *(const struct numa_memblk **)a; - const struct numa_memblk *mb = *(const struct numa_memblk **)b; - - return (ma->start > mb->start) - (ma->start < mb->start); + for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { + if (__apicid_to_node[i] == NUMA_NO_NODE) + continue; + for (j = 0; j < nr_emu_nids; j++) + if (__apicid_to_node[i] == emu_nid_to_phys[j]) + break; + __apicid_to_node[i] = j < nr_emu_nids ? j : 0; + } } -static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata; - -/** - * numa_fill_memblks - Fill gaps in numa_meminfo memblks - * @start: address to begin fill - * @end: address to end fill - * - * Find and extend numa_meminfo memblks to cover the physical - * address range @start-@end - * - * RETURNS: - * 0 : Success - * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end - */ - -int __init numa_fill_memblks(u64 start, u64 end) +u64 __init numa_emu_dma_end(void) { - struct numa_memblk **blk = &numa_memblk_list[0]; - struct numa_meminfo *mi = &numa_meminfo; - int count = 0; - u64 prev_end; - - /* - * Create a list of pointers to numa_meminfo memblks that - * overlap start, end. The list is used to make in-place - * changes that fill out the numa_meminfo memblks. - */ - for (int i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *bi = &mi->blk[i]; - - if (memblock_addrs_overlap(start, end - start, bi->start, - bi->end - bi->start)) { - blk[count] = &mi->blk[i]; - count++; - } - } - if (!count) - return NUMA_NO_MEMBLK; - - /* Sort the list of pointers in memblk->start order */ - sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL); - - /* Make sure the first/last memblks include start/end */ - blk[0]->start = min(blk[0]->start, start); - blk[count - 1]->end = max(blk[count - 1]->end, end); - - /* - * Fill any gaps by tracking the previous memblks - * end address and backfilling to it if needed. - */ - prev_end = blk[0]->end; - for (int i = 1; i < count; i++) { - struct numa_memblk *curr = blk[i]; - - if (prev_end >= curr->start) { - if (prev_end < curr->end) - prev_end = curr->end; - } else { - curr->start = prev_end; - prev_end = curr->end; - } - } - return 0; + return PFN_PHYS(MAX_DMA32_PFN); } +#endif /* CONFIG_NUMA_EMU */ diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c deleted file mode 100644 index 9a9305367fdd16..00000000000000 --- a/arch/x86/mm/numa_emulation.c +++ /dev/null @@ -1,585 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NUMA emulation - */ -#include -#include -#include -#include -#include - -#include "numa_internal.h" - -static int emu_nid_to_phys[MAX_NUMNODES]; -static char *emu_cmdline __initdata; - -int __init numa_emu_cmdline(char *str) -{ - emu_cmdline = str; - return 0; -} - -static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) -{ - int i; - - for (i = 0; i < mi->nr_blks; i++) - if (mi->blk[i].nid == nid) - return i; - return -ENOENT; -} - -static u64 __init mem_hole_size(u64 start, u64 end) -{ - unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = PFN_DOWN(end); - - if (start_pfn < end_pfn) - return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); - return 0; -} - -/* - * Sets up nid to range from @start to @end. The return value is -errno if - * something went wrong, 0 otherwise. - */ -static int __init emu_setup_memblk(struct numa_meminfo *ei, - struct numa_meminfo *pi, - int nid, int phys_blk, u64 size) -{ - struct numa_memblk *eb = &ei->blk[ei->nr_blks]; - struct numa_memblk *pb = &pi->blk[phys_blk]; - - if (ei->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: Too many emulated memblks, failing emulation\n"); - return -EINVAL; - } - - ei->nr_blks++; - eb->start = pb->start; - eb->end = pb->start + size; - eb->nid = nid; - - if (emu_nid_to_phys[nid] == NUMA_NO_NODE) - emu_nid_to_phys[nid] = pb->nid; - - pb->start += size; - if (pb->start >= pb->end) { - WARN_ON_ONCE(pb->start > pb->end); - numa_remove_memblk_from(phys_blk, pi); - } - - printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n", - nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); - return 0; -} - -/* - * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr - * to max_addr. - * - * Returns zero on success or negative on error. - */ -static int __init split_nodes_interleave(struct numa_meminfo *ei, - struct numa_meminfo *pi, - u64 addr, u64 max_addr, int nr_nodes) -{ - nodemask_t physnode_mask = numa_nodes_parsed; - u64 size; - int big; - int nid = 0; - int i, ret; - - if (nr_nodes <= 0) - return -1; - if (nr_nodes > MAX_NUMNODES) { - pr_info("numa=fake=%d too large, reducing to %d\n", - nr_nodes, MAX_NUMNODES); - nr_nodes = MAX_NUMNODES; - } - - /* - * Calculate target node size. x86_32 freaks on __udivdi3() so do - * the division in ulong number of pages and convert back. - */ - size = max_addr - addr - mem_hole_size(addr, max_addr); - size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); - - /* - * Calculate the number of big nodes that can be allocated as a result - * of consolidating the remainder. - */ - big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / - FAKE_NODE_MIN_SIZE; - - size &= FAKE_NODE_MIN_HASH_MASK; - if (!size) { - pr_err("Not enough memory for each node. " - "NUMA emulation disabled.\n"); - return -1; - } - - /* - * Continue to fill physical nodes with fake nodes until there is no - * memory left on any of them. - */ - while (!nodes_empty(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); - u64 start, limit, end; - int phys_blk; - - phys_blk = emu_find_memblk_by_nid(i, pi); - if (phys_blk < 0) { - node_clear(i, physnode_mask); - continue; - } - start = pi->blk[phys_blk].start; - limit = pi->blk[phys_blk].end; - end = start + size; - - if (nid < big) - end += FAKE_NODE_MIN_SIZE; - - /* - * Continue to add memory to this fake node if its - * non-reserved memory is less than the per-node size. - */ - while (end - start - mem_hole_size(start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > limit) { - end = limit; - break; - } - } - - /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. - */ - if (end < dma32_end && dma32_end - end - - mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; - - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if (limit - end - mem_hole_size(end, limit) < size) - end = limit; - - ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, - phys_blk, - min(end, limit) - start); - if (ret < 0) - return ret; - } - } - return 0; -} - -/* - * Returns the end address of a node so that there is at least `size' amount of - * non-reserved memory or `max_addr' is reached. - */ -static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) -{ - u64 end = start + size; - - while (end - start - mem_hole_size(start, end) < size) { - end += FAKE_NODE_MIN_SIZE; - if (end > max_addr) { - end = max_addr; - break; - } - } - return end; -} - -static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes) -{ - unsigned long max_pfn = PHYS_PFN(max_addr); - unsigned long base_pfn = PHYS_PFN(base); - unsigned long hole_pfns = PHYS_PFN(hole); - - return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes); -} - -/* - * Sets up fake nodes of `size' interleaved over physical nodes ranging from - * `addr' to `max_addr'. - * - * Returns zero on success or negative on error. - */ -static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei, - struct numa_meminfo *pi, - u64 addr, u64 max_addr, u64 size, - int nr_nodes, struct numa_memblk *pblk, - int nid) -{ - nodemask_t physnode_mask = numa_nodes_parsed; - int i, ret, uniform = 0; - u64 min_size; - - if ((!size && !nr_nodes) || (nr_nodes && !pblk)) - return -1; - - /* - * In the 'uniform' case split the passed in physical node by - * nr_nodes, in the non-uniform case, ignore the passed in - * physical block and try to create nodes of at least size - * @size. - * - * In the uniform case, split the nodes strictly by physical - * capacity, i.e. ignore holes. In the non-uniform case account - * for holes and treat @size as a minimum floor. - */ - if (!nr_nodes) - nr_nodes = MAX_NUMNODES; - else { - nodes_clear(physnode_mask); - node_set(pblk->nid, physnode_mask); - uniform = 1; - } - - if (uniform) { - min_size = uniform_size(max_addr, addr, 0, nr_nodes); - size = min_size; - } else { - /* - * The limit on emulated nodes is MAX_NUMNODES, so the - * size per node is increased accordingly if the - * requested size is too small. This creates a uniform - * distribution of node sizes across the entire machine - * (but not necessarily over physical nodes). - */ - min_size = uniform_size(max_addr, addr, - mem_hole_size(addr, max_addr), nr_nodes); - } - min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE); - if (size < min_size) { - pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", - size >> 20, min_size >> 20); - size = min_size; - } - size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE); - - /* - * Fill physical nodes with fake nodes of size until there is no memory - * left on any of them. - */ - while (!nodes_empty(physnode_mask)) { - for_each_node_mask(i, physnode_mask) { - u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); - u64 start, limit, end; - int phys_blk; - - phys_blk = emu_find_memblk_by_nid(i, pi); - if (phys_blk < 0) { - node_clear(i, physnode_mask); - continue; - } - - start = pi->blk[phys_blk].start; - limit = pi->blk[phys_blk].end; - - if (uniform) - end = start + size; - else - end = find_end_of_node(start, limit, size); - /* - * If there won't be at least FAKE_NODE_MIN_SIZE of - * non-reserved memory in ZONE_DMA32 for the next node, - * this one must extend to the boundary. - */ - if (end < dma32_end && dma32_end - end - - mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) - end = dma32_end; - - /* - * If there won't be enough non-reserved memory for the - * next node, this one must extend to the end of the - * physical node. - */ - if ((limit - end - mem_hole_size(end, limit) < size) - && !uniform) - end = limit; - - ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, - phys_blk, - min(end, limit) - start); - if (ret < 0) - return ret; - } - } - return nid; -} - -static int __init split_nodes_size_interleave(struct numa_meminfo *ei, - struct numa_meminfo *pi, - u64 addr, u64 max_addr, u64 size) -{ - return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, 0); -} - -static int __init setup_emu2phys_nid(int *dfl_phys_nid) -{ - int i, max_emu_nid = 0; - - *dfl_phys_nid = NUMA_NO_NODE; - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { - if (emu_nid_to_phys[i] != NUMA_NO_NODE) { - max_emu_nid = i; - if (*dfl_phys_nid == NUMA_NO_NODE) - *dfl_phys_nid = emu_nid_to_phys[i]; - } - } - - return max_emu_nid; -} - -/** - * numa_emulation - Emulate NUMA nodes - * @numa_meminfo: NUMA configuration to massage - * @numa_dist_cnt: The size of the physical NUMA distance table - * - * Emulate NUMA nodes according to the numa=fake kernel parameter. - * @numa_meminfo contains the physical memory configuration and is modified - * to reflect the emulated configuration on success. @numa_dist_cnt is - * used to determine the size of the physical distance table. - * - * On success, the following modifications are made. - * - * - @numa_meminfo is updated to reflect the emulated nodes. - * - * - __apicid_to_node[] is updated such that APIC IDs are mapped to the - * emulated nodes. - * - * - NUMA distance table is rebuilt to represent distances between emulated - * nodes. The distances are determined considering how emulated nodes - * are mapped to physical nodes and match the actual distances. - * - * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical - * nodes. This is used by numa_add_cpu() and numa_remove_cpu(). - * - * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with - * identity mapping and no other modification is made. - */ -void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) -{ - static struct numa_meminfo ei __initdata; - static struct numa_meminfo pi __initdata; - const u64 max_addr = PFN_PHYS(max_pfn); - u8 *phys_dist = NULL; - size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); - int max_emu_nid, dfl_phys_nid; - int i, j, ret; - - if (!emu_cmdline) - goto no_emu; - - memset(&ei, 0, sizeof(ei)); - pi = *numa_meminfo; - - for (i = 0; i < MAX_NUMNODES; i++) - emu_nid_to_phys[i] = NUMA_NO_NODE; - - /* - * If the numa=fake command-line contains a 'M' or 'G', it represents - * the fixed node size. Otherwise, if it is just a single number N, - * split the system RAM into N fake nodes. - */ - if (strchr(emu_cmdline, 'U')) { - nodemask_t physnode_mask = numa_nodes_parsed; - unsigned long n; - int nid = 0; - - n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); - ret = -1; - for_each_node_mask(i, physnode_mask) { - /* - * The reason we pass in blk[0] is due to - * numa_remove_memblk_from() called by - * emu_setup_memblk() will delete entry 0 - * and then move everything else up in the pi.blk - * array. Therefore we should always be looking - * at blk[0]. - */ - ret = split_nodes_size_interleave_uniform(&ei, &pi, - pi.blk[0].start, pi.blk[0].end, 0, - n, &pi.blk[0], nid); - if (ret < 0) - break; - if (ret < n) { - pr_info("%s: phys: %d only got %d of %ld nodes, failing\n", - __func__, i, ret, n); - ret = -1; - break; - } - nid = ret; - } - } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { - u64 size; - - size = memparse(emu_cmdline, &emu_cmdline); - ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); - } else { - unsigned long n; - - n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); - ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); - } - if (*emu_cmdline == ':') - emu_cmdline++; - - if (ret < 0) - goto no_emu; - - if (numa_cleanup_meminfo(&ei) < 0) { - pr_warn("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); - goto no_emu; - } - - /* copy the physical distance table */ - if (numa_dist_cnt) { - u64 phys; - - phys = memblock_phys_alloc_range(phys_size, PAGE_SIZE, 0, - PFN_PHYS(max_pfn_mapped)); - if (!phys) { - pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); - goto no_emu; - } - phys_dist = __va(phys); - - for (i = 0; i < numa_dist_cnt; i++) - for (j = 0; j < numa_dist_cnt; j++) - phys_dist[i * numa_dist_cnt + j] = - node_distance(i, j); - } - - /* - * Determine the max emulated nid and the default phys nid to use - * for unmapped nodes. - */ - max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid); - - /* commit */ - *numa_meminfo = ei; - - /* Make sure numa_nodes_parsed only contains emulated nodes */ - nodes_clear(numa_nodes_parsed); - for (i = 0; i < ARRAY_SIZE(ei.blk); i++) - if (ei.blk[i].start != ei.blk[i].end && - ei.blk[i].nid != NUMA_NO_NODE) - node_set(ei.blk[i].nid, numa_nodes_parsed); - - /* - * Transform __apicid_to_node table to use emulated nids by - * reverse-mapping phys_nid. The maps should always exist but fall - * back to zero just in case. - */ - for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { - if (__apicid_to_node[i] == NUMA_NO_NODE) - continue; - for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) - if (__apicid_to_node[i] == emu_nid_to_phys[j]) - break; - __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; - } - - /* make sure all emulated nodes are mapped to a physical node */ - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) - if (emu_nid_to_phys[i] == NUMA_NO_NODE) - emu_nid_to_phys[i] = dfl_phys_nid; - - /* transform distance table */ - numa_reset_distance(); - for (i = 0; i < max_emu_nid + 1; i++) { - for (j = 0; j < max_emu_nid + 1; j++) { - int physi = emu_nid_to_phys[i]; - int physj = emu_nid_to_phys[j]; - int dist; - - if (get_option(&emu_cmdline, &dist) == 2) - ; - else if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) - dist = physi == physj ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - else - dist = phys_dist[physi * numa_dist_cnt + physj]; - - numa_set_distance(i, j, dist); - } - } - - /* free the copied physical distance table */ - memblock_free(phys_dist, phys_size); - return; - -no_emu: - /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) - emu_nid_to_phys[i] = i; -} - -#ifndef CONFIG_DEBUG_PER_CPU_MAPS -void numa_add_cpu(int cpu) -{ - int physnid, nid; - - nid = early_cpu_to_node(cpu); - BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); - - physnid = emu_nid_to_phys[nid]; - - /* - * Map the cpu to each emulated node that is allocated on the physical - * node of the cpu's apic id. - */ - for_each_online_node(nid) - if (emu_nid_to_phys[nid] == physnid) - cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); -} - -void numa_remove_cpu(int cpu) -{ - int i; - - for_each_online_node(i) - cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); -} -#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ -static void numa_set_cpumask(int cpu, bool enable) -{ - int nid, physnid; - - nid = early_cpu_to_node(cpu); - if (nid == NUMA_NO_NODE) { - /* early_cpu_to_node() already emits a warning and trace */ - return; - } - - physnid = emu_nid_to_phys[nid]; - - for_each_online_node(nid) { - if (emu_nid_to_phys[nid] != physnid) - continue; - - debug_cpumask_set_cpu(cpu, nid, enable); - } -} - -void numa_add_cpu(int cpu) -{ - numa_set_cpumask(cpu, true); -} - -void numa_remove_cpu(int cpu) -{ - numa_set_cpumask(cpu, false); -} -#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index 86860f2796625b..11e1ff370c106f 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -5,30 +5,6 @@ #include #include -struct numa_memblk { - u64 start; - u64 end; - int nid; -}; - -struct numa_meminfo { - int nr_blks; - struct numa_memblk blk[NR_NODE_MEMBLKS]; -}; - -void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); -int __init numa_cleanup_meminfo(struct numa_meminfo *mi); -void __init numa_reset_distance(void); - void __init x86_numa_init(void); -#ifdef CONFIG_NUMA_EMU -void __init numa_emulation(struct numa_meminfo *numa_meminfo, - int numa_dist_cnt); -#else -static inline void numa_emulation(struct numa_meminfo *numa_meminfo, - int numa_dist_cnt) -{ } -#endif - #endif /* __X86_MM_NUMA_INTERNAL_H */ diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index bdc2a240c2aae6..feb8cc6a12bf23 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -104,7 +104,7 @@ __setup("debugpat", pat_debug_setup); #ifdef CONFIG_X86_PAT /* - * X86 PAT uses page flags arch_1 and uncached together to keep track of + * X86 PAT uses page flags arch_1 and arch_2 together to keep track of * memory type of pages that have backing page struct. * * X86 PAT supports 4 different memory types: @@ -118,9 +118,9 @@ __setup("debugpat", pat_debug_setup); #define _PGMT_WB 0 #define _PGMT_WC (1UL << PG_arch_1) -#define _PGMT_UC_MINUS (1UL << PG_uncached) -#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) -#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) +#define _PGMT_UC_MINUS (1UL << PG_arch_2) +#define _PGMT_WT (1UL << PG_arch_2 | 1UL << PG_arch_1) +#define _PGMT_MASK (1UL << PG_arch_2 | 1UL << PG_arch_1) #define _PGMT_CLEAR_MASK (~_PGMT_MASK) static inline enum page_cache_mode get_page_memtype(struct page *pg) @@ -176,15 +176,6 @@ static inline void set_page_memtype(struct page *pg, } #endif -enum { - PAT_UC = 0, /* uncached */ - PAT_WC = 1, /* Write combining */ - PAT_WT = 4, /* Write Through */ - PAT_WP = 5, /* Write Protected */ - PAT_WB = 6, /* Write Back (default) */ - PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ -}; - #define CM(c) (_PAGE_CACHE_MODE_ ## c) static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, @@ -194,13 +185,13 @@ static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, char *cache_mode; switch (pat_val) { - case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; - case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; - case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; - case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; - case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; - case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; - default: cache = CM(WB); cache_mode = "WB "; break; + case X86_MEMTYPE_UC: cache = CM(UC); cache_mode = "UC "; break; + case X86_MEMTYPE_WC: cache = CM(WC); cache_mode = "WC "; break; + case X86_MEMTYPE_WT: cache = CM(WT); cache_mode = "WT "; break; + case X86_MEMTYPE_WP: cache = CM(WP); cache_mode = "WP "; break; + case X86_MEMTYPE_WB: cache = CM(WB); cache_mode = "WB "; break; + case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; + default: cache = CM(WB); cache_mode = "WB "; break; } memcpy(msg, cache_mode, 4); @@ -257,12 +248,6 @@ void pat_cpu_init(void) void __init pat_bp_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; -#define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \ - (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \ - ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \ - ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \ - ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56)) - if (!IS_ENABLED(CONFIG_X86_PAT)) pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n"); @@ -293,7 +278,7 @@ void __init pat_bp_init(void) * NOTE: When WC or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ - pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); + pat_msr_val = PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); } /* @@ -328,7 +313,7 @@ void __init pat_bp_init(void) * NOTE: When WT or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ - pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); + pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); } else { /* * Full PAT support. We put WT in slot 7 to improve @@ -356,13 +341,12 @@ void __init pat_bp_init(void) * The reserved slots are unused, but mapped to their * corresponding types in the presence of PAT errata. */ - pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); + pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); } memory_caching_control |= CACHE_PAT; init_cache_modes(pat_msr_val); -#undef PAT } static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ @@ -951,23 +935,20 @@ static void free_pfn_range(u64 paddr, unsigned long size) static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, resource_size_t *phys) { - pte_t *ptep, pte; - spinlock_t *ptl; + struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start }; - if (follow_pte(vma, vma->vm_start, &ptep, &ptl)) + if (follow_pfnmap_start(&args)) return -EINVAL; - pte = ptep_get(ptep); - /* Never return PFNs of anon folios in COW mappings. */ - if (vm_normal_folio(vma, vma->vm_start, pte)) { - pte_unmap_unlock(ptep, ptl); + if (!args.special) { + follow_pfnmap_end(&args); return -EINVAL; } - *prot = pgprot_val(pte_pgprot(pte)); - *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; - pte_unmap_unlock(ptep, ptl); + *prot = pgprot_val(args.pgprot); + *phys = (resource_size_t)args.pfn << PAGE_SHIFT; + follow_pfnmap_end(&args); return 0; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index f5931499c2d6b8..5745a354a241c7 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -641,6 +641,18 @@ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, } #endif +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp) +{ + VM_WARN_ON_ONCE(!pud_present(*pudp)); + pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp)); + flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); + return old; +} +#endif + /** * reserve_top_address - reserves a hole in the top of kernel address space * @reserve - size of hole to reserve @@ -926,3 +938,9 @@ void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd) VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pmd_shstk(pmd)); } + +void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) +{ + /* See note in arch_check_zapped_pte() */ + VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud)); +} diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 9c52a95937adec..6f8e0f21c7103d 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -57,8 +57,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) } set_apicid_to_node(apic_id, node); node_set(node, numa_nodes_parsed); - printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", - pxm, apic_id, node); + pr_debug("SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node); } /* Callback for Proximity Domain -> LAPIC mapping */ @@ -98,8 +97,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) set_apicid_to_node(apic_id, node); node_set(node, numa_nodes_parsed); - printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", - pxm, apic_id, node); + pr_debug("SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node); } int __init x86_acpi_numa_init(void) diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index bda73cb7a0446b..ae295659ca1422 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -144,3 +144,4 @@ static void __exit cleanup(void) module_init(init); module_exit(cleanup); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test module for mmiotrace"); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 44ac64f3a047ca..86593d1b787d8a 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -85,9 +86,6 @@ * */ -/* There are 12 bits of space for ASIDS in CR3 */ -#define CR3_HW_ASID_BITS 12 - /* * When enabled, MITIGATION_PAGE_TABLE_ISOLATION consumes a single bit for * user/kernel switches @@ -160,7 +158,6 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam) unsigned long cr3 = __sme_pa(pgd) | lam; if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); cr3 |= kern_pcid(asid); } else { VM_WARN_ON_ONCE(asid != 0); @@ -503,9 +500,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, { struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm); u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - unsigned long new_lam = mm_lam_cr3_mask(next); bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); unsigned cpu = smp_processor_id(); + unsigned long new_lam; u64 next_tlb_gen; bool need_flush; u16 new_asid; @@ -619,9 +616,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); } - /* - * Start remote flushes and then read tlb_gen. - */ + /* Start receiving IPIs and then read tlb_gen (and LAM below) */ if (next != &init_mm) cpumask_set_cpu(cpu, mm_cpumask(next)); next_tlb_gen = atomic64_read(&next->context.tlb_gen); @@ -633,7 +628,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, barrier(); } - set_tlbstate_lam_mode(next); + new_lam = mm_lam_cr3_mask(next); if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); @@ -652,6 +647,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); + cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next)); if (next != prev) { cr4_update_pce_mm(next); @@ -698,6 +694,7 @@ void initialize_tlbstate_and_flush(void) int i; struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); + unsigned long lam = mm_lam_cr3_mask(mm); unsigned long cr3 = __read_cr3(); /* Assert that CR3 already references the right mm. */ @@ -705,7 +702,7 @@ void initialize_tlbstate_and_flush(void) /* LAM expected to be disabled */ WARN_ON(cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)); - WARN_ON(mm_lam_cr3_mask(mm)); + WARN_ON(lam); /* * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization @@ -724,7 +721,7 @@ void initialize_tlbstate_and_flush(void) this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); - set_tlbstate_lam_mode(mm); + cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); for (i = 1; i < TLB_NR_DYN_ASIDS; i++) this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d25d81c8ecc009..06b080b61aa578 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -64,6 +64,56 @@ static bool is_imm8(int value) return value <= 127 && value >= -128; } +/* + * Let us limit the positive offset to be <= 123. + * This is to ensure eventual jit convergence For the following patterns: + * ... + * pass4, final_proglen=4391: + * ... + * 20e: 48 85 ff test rdi,rdi + * 211: 74 7d je 0x290 + * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] + * ... + * 289: 48 85 ff test rdi,rdi + * 28c: 74 17 je 0x2a5 + * 28e: e9 7f ff ff ff jmp 0x212 + * 293: bf 03 00 00 00 mov edi,0x3 + * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) + * and insn at 0x28e is 5-byte jmp insn with offset -129. + * + * pass5, final_proglen=4392: + * ... + * 20e: 48 85 ff test rdi,rdi + * 211: 0f 84 80 00 00 00 je 0x297 + * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] + * ... + * 28d: 48 85 ff test rdi,rdi + * 290: 74 1a je 0x2ac + * 292: eb 84 jmp 0x218 + * 294: bf 03 00 00 00 mov edi,0x3 + * Note that insn at 0x211 is 6-byte cond jump insn now since its offset + * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). + * At the same time, insn at 0x292 is a 2-byte insn since its offset is + * -124. + * + * pass6 will repeat the same code as in pass4 and this will prevent + * eventual convergence. + * + * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) + * cycle in the above. In the above example je offset <= 0x7c should work. + * + * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence + * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should + * avoid no convergence issue. + * + * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn + * to maximum 123 (0x7b). This way, the jit pass can eventually converge. + */ +static bool is_imm8_jmp_offset(int value) +{ + return value <= 123 && value >= -128; +} + static bool is_simm32(s64 value) { return value == (s64)(s32)value; @@ -273,7 +323,7 @@ struct jit_context { /* Number of bytes emit_patch() needs to generate instructions */ #define X86_PATCH_SIZE 5 /* Number of bytes that will be skipped on tailcall */ -#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) +#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) static void push_r12(u8 **pprog) { @@ -403,6 +453,37 @@ static void emit_cfi(u8 **pprog, u32 hash) *pprog = prog; } +static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) +{ + u8 *prog = *pprog; + + if (!is_subprog) { + /* cmp rax, MAX_TAIL_CALL_CNT */ + EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); + EMIT2(X86_JA, 6); /* ja 6 */ + /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. + * case1: entry of main prog. + * case2: tail callee of main prog. + */ + EMIT1(0x50); /* push rax */ + /* Make rax as tail_call_cnt_ptr. */ + EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ + EMIT2(0xEB, 1); /* jmp 1 */ + /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. + * case: tail callee of subprog. + */ + EMIT1(0x50); /* push rax */ + /* push tail_call_cnt_ptr */ + EMIT1(0x50); /* push rax */ + } else { /* is_subprog */ + /* rax is tail_call_cnt_ptr. */ + EMIT1(0x50); /* push rax */ + EMIT1(0x50); /* push rax */ + } + + *pprog = prog; +} + /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes @@ -424,10 +505,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, /* When it's the entry of the whole tailcall context, * zeroing rax means initialising tail_call_cnt. */ - EMIT2(0x31, 0xC0); /* xor eax, eax */ + EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ else /* Keep the same instruction layout. */ - EMIT2(0x66, 0x90); /* nop2 */ + emit_nops(&prog, 3); /* nop3 */ } /* Exception callback receives FP as third parameter */ if (is_exception_cb) { @@ -453,7 +534,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, if (stack_depth) EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); if (tail_call_reachable) - EMIT1(0x50); /* push rax */ + emit_prologue_tail_call(&prog, is_subprog); *pprog = prog; } @@ -589,13 +670,15 @@ static void emit_return(u8 **pprog, u8 *ip) *pprog = prog; } +#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) + /* * Generate the following code: * * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; * prog = array->ptrs[index]; * if (prog == NULL) @@ -608,7 +691,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, u32 stack_depth, u8 *ip, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset; @@ -630,16 +713,14 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, EMIT2(X86_JBE, offset); /* jbe out */ /* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ @@ -654,6 +735,9 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JE, offset); /* je out */ + /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -663,6 +747,11 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, pop_r12(&prog); } + /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ @@ -691,21 +780,19 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, bool *callee_regs_used, u32 stack_depth, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset; /* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_direct_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ poke->tailcall_bypass = ip + (prog - start); poke->adj_off = X86_TAIL_CALL_OFFSET; @@ -715,6 +802,9 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, poke->tailcall_bypass); + /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -724,6 +814,11 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, pop_r12(&prog); } + /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); @@ -1311,9 +1406,11 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) -/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ -#define RESTORE_TAIL_CALL_CNT(stack) \ - EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) +#define __LOAD_TCC_PTR(off) \ + EMIT3_off32(0x48, 0x8B, 0x85, off) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ +#define LOAD_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) @@ -2031,7 +2128,7 @@ st: if (is_imm8(insn->off)) func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { - RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); + LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); ip += 7; } if (!imm32) @@ -2184,7 +2281,7 @@ st: if (is_imm8(insn->off)) return -EFAULT; } jmp_offset = addrs[i + insn->off] - addrs[i]; - if (is_imm8(jmp_offset)) { + if (is_imm8_jmp_offset(jmp_offset)) { if (jmp_padding) { /* To keep the jmp_offset valid, the extra bytes are * padded before the jump insn, so we subtract the @@ -2266,7 +2363,7 @@ st: if (is_imm8(insn->off)) break; } emit_jmp: - if (is_imm8(jmp_offset)) { + if (is_imm8_jmp_offset(jmp_offset)) { if (jmp_padding) { /* To avoid breaking jmp_offset, the extra bytes * are padded before the actual jmp insn, so @@ -2706,6 +2803,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, return 0; } +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(-round_up(stack, 8) - 8) + /* Example: * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * its 'struct btf_func_model' will be nr_args=2 @@ -2826,7 +2927,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * [ ... ] * [ stack_arg2 ] * RBP - arg_stack_off [ stack_arg1 ] - * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX + * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX */ /* room for return value of orig_call or fentry prog */ @@ -2955,10 +3056,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im save_args(m, &prog, arg_stack_off, true); if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before calling the original function, restore the - * tail_call_cnt from stack to rax. + /* Before calling the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); } if (flags & BPF_TRAMP_F_ORIG_STACK) { @@ -3017,10 +3118,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im goto cleanup; } } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before running the original function, restore the - * tail_call_cnt from stack to rax. + /* Before running the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); } /* restore return value of orig_call or fentry prog back into RAX */ diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b33afb240601b0..98a9bb92d75c88 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev) return; rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return; rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> @@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev) u16 pmc; rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return; pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); diff --git a/arch/x86/platform/geode/Makefile b/arch/x86/platform/geode/Makefile index a8a6b1dedb0153..34b53e97a0add4 100644 --- a/arch/x86/platform/geode/Makefile +++ b/arch/x86/platform/geode/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_GEODE_COMMON) += geode-common.o obj-$(CONFIG_ALIX) += alix.o obj-$(CONFIG_NET5501) += net5501.o obj-$(CONFIG_GEOS) += geos.o diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c index b39bf3b5e108cd..be65cd704e21ed 100644 --- a/arch/x86/platform/geode/alix.c +++ b/arch/x86/platform/geode/alix.c @@ -18,15 +18,12 @@ #include #include #include -#include -#include -#include -#include -#include #include #include +#include "geode-common.h" + #define BIOS_SIGNATURE_TINYBIOS 0xf0000 #define BIOS_SIGNATURE_COREBOOT 0x500 #define BIOS_REGION_SIZE 0x10000 @@ -41,79 +38,16 @@ module_param(force, bool, 0444); /* FIXME: Award bios is not automatically detected as Alix platform */ MODULE_PARM_DESC(force, "Force detection as ALIX.2/ALIX.3 platform"); -static struct gpio_keys_button alix_gpio_buttons[] = { - { - .code = KEY_RESTART, - .gpio = 24, - .active_low = 1, - .desc = "Reset button", - .type = EV_KEY, - .wakeup = 0, - .debounce_interval = 100, - .can_disable = 0, - } -}; -static struct gpio_keys_platform_data alix_buttons_data = { - .buttons = alix_gpio_buttons, - .nbuttons = ARRAY_SIZE(alix_gpio_buttons), - .poll_interval = 20, -}; - -static struct platform_device alix_buttons_dev = { - .name = "gpio-keys-polled", - .id = 1, - .dev = { - .platform_data = &alix_buttons_data, - } -}; - -static struct gpio_led alix_leds[] = { - { - .name = "alix:1", - .default_trigger = "default-on", - }, - { - .name = "alix:2", - .default_trigger = "default-off", - }, - { - .name = "alix:3", - .default_trigger = "default-off", - }, -}; - -static struct gpio_led_platform_data alix_leds_data = { - .num_leds = ARRAY_SIZE(alix_leds), - .leds = alix_leds, -}; - -static struct gpiod_lookup_table alix_leds_gpio_table = { - .dev_id = "leds-gpio", - .table = { - /* The Geode GPIOs should be on the CS5535 companion chip */ - GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW), - { } - }, -}; - -static struct platform_device alix_leds_dev = { - .name = "leds-gpio", - .id = -1, - .dev.platform_data = &alix_leds_data, -}; - -static struct platform_device *alix_devs[] __initdata = { - &alix_buttons_dev, - &alix_leds_dev, +static const struct geode_led alix_leds[] __initconst = { + { 6, true }, + { 25, false }, + { 27, false }, }; static void __init register_alix(void) { - /* Setup LED control through leds-gpio driver */ - gpiod_add_lookup_table(&alix_leds_gpio_table); - platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs)); + geode_create_restart_key(24); + geode_create_leds("alix", alix_leds, ARRAY_SIZE(alix_leds)); } static bool __init alix_present(unsigned long bios_phys, diff --git a/arch/x86/platform/geode/geode-common.c b/arch/x86/platform/geode/geode-common.c new file mode 100644 index 00000000000000..8fd78e60bf15a9 --- /dev/null +++ b/arch/x86/platform/geode/geode-common.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared helpers to register GPIO-connected buttons and LEDs + * on AMD Geode boards. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "geode-common.h" + +static const struct software_node geode_gpiochip_node = { + .name = "cs5535-gpio", +}; + +static const struct property_entry geode_gpio_keys_props[] = { + PROPERTY_ENTRY_U32("poll-interval", 20), + { } +}; + +static const struct software_node geode_gpio_keys_node = { + .name = "geode-gpio-keys", + .properties = geode_gpio_keys_props, +}; + +static struct property_entry geode_restart_key_props[] = { + { /* Placeholder for GPIO property */ }, + PROPERTY_ENTRY_U32("linux,code", KEY_RESTART), + PROPERTY_ENTRY_STRING("label", "Reset button"), + PROPERTY_ENTRY_U32("debounce-interval", 100), + { } +}; + +static const struct software_node geode_restart_key_node = { + .parent = &geode_gpio_keys_node, + .properties = geode_restart_key_props, +}; + +static const struct software_node *geode_gpio_keys_swnodes[] __initconst = { + &geode_gpiochip_node, + &geode_gpio_keys_node, + &geode_restart_key_node, + NULL +}; + +/* + * Creates gpio-keys-polled device for the restart key. + * + * Note that it needs to be called first, before geode_create_leds(), + * because it registers gpiochip software node used by both gpio-keys and + * leds-gpio devices. + */ +int __init geode_create_restart_key(unsigned int pin) +{ + struct platform_device_info keys_info = { + .name = "gpio-keys-polled", + .id = 1, + }; + struct platform_device *pd; + int err; + + geode_restart_key_props[0] = PROPERTY_ENTRY_GPIO("gpios", + &geode_gpiochip_node, + pin, GPIO_ACTIVE_LOW); + + err = software_node_register_node_group(geode_gpio_keys_swnodes); + if (err) { + pr_err("failed to register gpio-keys software nodes: %d\n", err); + return err; + } + + keys_info.fwnode = software_node_fwnode(&geode_gpio_keys_node); + + pd = platform_device_register_full(&keys_info); + err = PTR_ERR_OR_ZERO(pd); + if (err) { + pr_err("failed to create gpio-keys device: %d\n", err); + software_node_unregister_node_group(geode_gpio_keys_swnodes); + return err; + } + + return 0; +} + +static const struct software_node geode_gpio_leds_node = { + .name = "geode-leds", +}; + +#define MAX_LEDS 3 + +int __init geode_create_leds(const char *label, const struct geode_led *leds, + unsigned int n_leds) +{ + const struct software_node *group[MAX_LEDS + 2] = { 0 }; + struct software_node *swnodes; + struct property_entry *props; + struct platform_device_info led_info = { + .name = "leds-gpio", + .id = PLATFORM_DEVID_NONE, + }; + struct platform_device *led_dev; + const char *node_name; + int err; + int i; + + if (n_leds > MAX_LEDS) { + pr_err("%s: too many LEDs\n", __func__); + return -EINVAL; + } + + swnodes = kcalloc(n_leds, sizeof(*swnodes), GFP_KERNEL); + if (!swnodes) + return -ENOMEM; + + /* + * Each LED is represented by 3 properties: "gpios", + * "linux,default-trigger", and am empty terminator. + */ + props = kcalloc(n_leds * 3, sizeof(*props), GFP_KERNEL); + if (!props) { + err = -ENOMEM; + goto err_free_swnodes; + } + + group[0] = &geode_gpio_leds_node; + for (i = 0; i < n_leds; i++) { + node_name = kasprintf(GFP_KERNEL, "%s:%d", label, i); + if (!node_name) { + err = -ENOMEM; + goto err_free_names; + } + + props[i * 3 + 0] = + PROPERTY_ENTRY_GPIO("gpios", &geode_gpiochip_node, + leds[i].pin, GPIO_ACTIVE_LOW); + props[i * 3 + 1] = + PROPERTY_ENTRY_STRING("linux,default-trigger", + leds[i].default_on ? + "default-on" : "default-off"); + /* props[i * 3 + 2] is an empty terminator */ + + swnodes[i] = SOFTWARE_NODE(node_name, &props[i * 3], + &geode_gpio_leds_node); + group[i + 1] = &swnodes[i]; + } + + err = software_node_register_node_group(group); + if (err) { + pr_err("failed to register LED software nodes: %d\n", err); + goto err_free_names; + } + + led_info.fwnode = software_node_fwnode(&geode_gpio_leds_node); + + led_dev = platform_device_register_full(&led_info); + err = PTR_ERR_OR_ZERO(led_dev); + if (err) { + pr_err("failed to create LED device: %d\n", err); + goto err_unregister_group; + } + + return 0; + +err_unregister_group: + software_node_unregister_node_group(group); +err_free_names: + while (--i >= 0) + kfree(swnodes[i].name); + kfree(props); +err_free_swnodes: + kfree(swnodes); + return err; +} diff --git a/arch/x86/platform/geode/geode-common.h b/arch/x86/platform/geode/geode-common.h new file mode 100644 index 00000000000000..9e0afd34bfad0b --- /dev/null +++ b/arch/x86/platform/geode/geode-common.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared helpers to register GPIO-connected buttons and LEDs + * on AMD Geode boards. + */ + +#ifndef __PLATFORM_GEODE_COMMON_H +#define __PLATFORM_GEODE_COMMON_H + +#include + +struct geode_led { + unsigned int pin; + bool default_on; +}; + +int geode_create_restart_key(unsigned int pin); +int geode_create_leds(const char *label, const struct geode_led *leds, + unsigned int n_leds); + +#endif /* __PLATFORM_GEODE_COMMON_H */ diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c index d263528c90bbf1..98027fb1ec32fe 100644 --- a/arch/x86/platform/geode/geos.c +++ b/arch/x86/platform/geode/geos.c @@ -16,88 +16,22 @@ #include #include #include -#include -#include -#include -#include -#include #include #include -static struct gpio_keys_button geos_gpio_buttons[] = { - { - .code = KEY_RESTART, - .gpio = 3, - .active_low = 1, - .desc = "Reset button", - .type = EV_KEY, - .wakeup = 0, - .debounce_interval = 100, - .can_disable = 0, - } -}; -static struct gpio_keys_platform_data geos_buttons_data = { - .buttons = geos_gpio_buttons, - .nbuttons = ARRAY_SIZE(geos_gpio_buttons), - .poll_interval = 20, -}; - -static struct platform_device geos_buttons_dev = { - .name = "gpio-keys-polled", - .id = 1, - .dev = { - .platform_data = &geos_buttons_data, - } -}; - -static struct gpio_led geos_leds[] = { - { - .name = "geos:1", - .default_trigger = "default-on", - }, - { - .name = "geos:2", - .default_trigger = "default-off", - }, - { - .name = "geos:3", - .default_trigger = "default-off", - }, -}; - -static struct gpio_led_platform_data geos_leds_data = { - .num_leds = ARRAY_SIZE(geos_leds), - .leds = geos_leds, -}; - -static struct gpiod_lookup_table geos_leds_gpio_table = { - .dev_id = "leds-gpio", - .table = { - /* The Geode GPIOs should be on the CS5535 companion chip */ - GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW), - { } - }, -}; - -static struct platform_device geos_leds_dev = { - .name = "leds-gpio", - .id = -1, - .dev.platform_data = &geos_leds_data, -}; +#include "geode-common.h" -static struct platform_device *geos_devs[] __initdata = { - &geos_buttons_dev, - &geos_leds_dev, +static const struct geode_led geos_leds[] __initconst = { + { 6, true }, + { 25, false }, + { 27, false }, }; static void __init register_geos(void) { - /* Setup LED control through leds-gpio driver */ - gpiod_add_lookup_table(&geos_leds_gpio_table); - platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs)); + geode_create_restart_key(3); + geode_create_leds("geos", geos_leds, ARRAY_SIZE(geos_leds)); } static int __init geos_init(void) diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 558384acd77768..c9cee7dea99b5e 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c @@ -16,80 +16,25 @@ #include #include #include -#include -#include #include -#include #include +#include #include +#include "geode-common.h" + #define BIOS_REGION_BASE 0xffff0000 #define BIOS_REGION_SIZE 0x00010000 -static struct gpio_keys_button net5501_gpio_buttons[] = { - { - .code = KEY_RESTART, - .gpio = 24, - .active_low = 1, - .desc = "Reset button", - .type = EV_KEY, - .wakeup = 0, - .debounce_interval = 100, - .can_disable = 0, - } -}; -static struct gpio_keys_platform_data net5501_buttons_data = { - .buttons = net5501_gpio_buttons, - .nbuttons = ARRAY_SIZE(net5501_gpio_buttons), - .poll_interval = 20, -}; - -static struct platform_device net5501_buttons_dev = { - .name = "gpio-keys-polled", - .id = 1, - .dev = { - .platform_data = &net5501_buttons_data, - } -}; - -static struct gpio_led net5501_leds[] = { - { - .name = "net5501:1", - .default_trigger = "default-on", - }, -}; - -static struct gpio_led_platform_data net5501_leds_data = { - .num_leds = ARRAY_SIZE(net5501_leds), - .leds = net5501_leds, -}; - -static struct gpiod_lookup_table net5501_leds_gpio_table = { - .dev_id = "leds-gpio", - .table = { - /* The Geode GPIOs should be on the CS5535 companion chip */ - GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_HIGH), - { } - }, -}; - -static struct platform_device net5501_leds_dev = { - .name = "leds-gpio", - .id = -1, - .dev.platform_data = &net5501_leds_data, -}; - -static struct platform_device *net5501_devs[] __initdata = { - &net5501_buttons_dev, - &net5501_leds_dev, +static const struct geode_led net5501_leds[] __initconst = { + { 6, true }, }; static void __init register_net5501(void) { - /* Setup LED control through leds-gpio driver */ - gpiod_add_lookup_table(&net5501_leds_gpio_table); - platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs)); + geode_create_restart_key(24); + geode_create_leds("net5501", net5501_leds, ARRAY_SIZE(net5501_leds)); } struct net5501_board { diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index f83bbe0acd4a1b..a8e75f8c14fd40 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -27,9 +27,10 @@ #include #include #include -#include #include +#include + #define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */ #define IPCMSG_COLD_RESET 0xF1 diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile index 5dec5067c9fb77..c43fb7964dc4dc 100644 --- a/arch/x86/platform/pvh/Makefile +++ b/arch/x86/platform/pvh/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 OBJECT_FILES_NON_STANDARD_head.o := y +KASAN_SANITIZE := n obj-$(CONFIG_PVH) += enlighten.o obj-$(CONFIG_PVH) += head.o diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c index 944e0290f2c001..2263885d16badd 100644 --- a/arch/x86/platform/pvh/enlighten.c +++ b/arch/x86/platform/pvh/enlighten.c @@ -130,7 +130,11 @@ void __init xen_prepare_pvh(void) BUG(); } - memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); + /* + * This must not compile to "call memset" because memset() may be + * instrumented. + */ + __builtin_memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); hypervisor_specific_init(xen_guest); diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index f7235ef87bc32b..64fca49cd88ff9 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -7,6 +7,7 @@ .code32 .text #define _pa(x) ((x) - __START_KERNEL_map) +#define rva(x) ((x) - pvh_start_xen) #include #include @@ -15,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -54,7 +56,25 @@ SYM_CODE_START_LOCAL(pvh_start_xen) UNWIND_HINT_END_OF_STACK cld - lgdt (_pa(gdt)) + /* + * See the comment for startup_32 for more details. We need to + * execute a call to get the execution address to be position + * independent, but we don't have a stack. Save and restore the + * magic field of start_info in ebx, and use that as the stack. + */ + mov (%ebx), %eax + leal 4(%ebx), %esp + ANNOTATE_INTRA_FUNCTION_CALL + call 1f +1: popl %ebp + mov %eax, (%ebx) + subl $rva(1b), %ebp + movl $0, %esp + + leal rva(gdt)(%ebp), %eax + leal rva(gdt_start)(%ebp), %ecx + movl %ecx, 2(%eax) + lgdt (%eax) mov $PVH_DS_SEL,%eax mov %eax,%ds @@ -62,14 +82,14 @@ SYM_CODE_START_LOCAL(pvh_start_xen) mov %eax,%ss /* Stash hvm_start_info. */ - mov $_pa(pvh_start_info), %edi + leal rva(pvh_start_info)(%ebp), %edi mov %ebx, %esi - mov _pa(pvh_start_info_sz), %ecx + movl rva(pvh_start_info_sz)(%ebp), %ecx shr $2,%ecx rep movsl - mov $_pa(early_stack_end), %esp + leal rva(early_stack_end)(%ebp), %esp /* Enable PAE mode. */ mov %cr4, %eax @@ -83,31 +103,86 @@ SYM_CODE_START_LOCAL(pvh_start_xen) btsl $_EFER_LME, %eax wrmsr + mov %ebp, %ebx + subl $_pa(pvh_start_xen), %ebx /* offset */ + jz .Lpagetable_done + + /* Fixup page-tables for relocation. */ + leal rva(pvh_init_top_pgt)(%ebp), %edi + movl $PTRS_PER_PGD, %ecx +2: + testl $_PAGE_PRESENT, 0x00(%edi) + jz 1f + addl %ebx, 0x00(%edi) +1: + addl $8, %edi + decl %ecx + jnz 2b + + /* L3 ident has a single entry. */ + leal rva(pvh_level3_ident_pgt)(%ebp), %edi + addl %ebx, 0x00(%edi) + + leal rva(pvh_level3_kernel_pgt)(%ebp), %edi + addl %ebx, (PAGE_SIZE - 16)(%edi) + addl %ebx, (PAGE_SIZE - 8)(%edi) + + /* pvh_level2_ident_pgt is fine - large pages */ + + /* pvh_level2_kernel_pgt needs adjustment - large pages */ + leal rva(pvh_level2_kernel_pgt)(%ebp), %edi + movl $PTRS_PER_PMD, %ecx +2: + testl $_PAGE_PRESENT, 0x00(%edi) + jz 1f + addl %ebx, 0x00(%edi) +1: + addl $8, %edi + decl %ecx + jnz 2b + +.Lpagetable_done: /* Enable pre-constructed page tables. */ - mov $_pa(init_top_pgt), %eax + leal rva(pvh_init_top_pgt)(%ebp), %eax mov %eax, %cr3 mov $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 /* Jump to 64-bit mode. */ - ljmp $PVH_CS_SEL, $_pa(1f) + pushl $PVH_CS_SEL + leal rva(1f)(%ebp), %eax + pushl %eax + lretl /* 64-bit entry point. */ .code64 1: + UNWIND_HINT_END_OF_STACK + /* Set base address in stack canary descriptor. */ mov $MSR_GS_BASE,%ecx - mov $_pa(canary), %eax + leal canary(%rip), %eax xor %edx, %edx wrmsr + /* + * Calculate load offset and store in phys_base. __pa() needs + * phys_base set to calculate the hypercall page in xen_pvh_init(). + */ + movq %rbp, %rbx + subq $_pa(pvh_start_xen), %rbx + movq %rbx, phys_base(%rip) call xen_prepare_pvh + /* + * Clear phys_base. __startup_64 will *add* to its value, + * so reset to 0. + */ + xor %rbx, %rbx + movq %rbx, phys_base(%rip) /* startup_64 expects boot_params in %rsi. */ - mov $_pa(pvh_bootparams), %rsi - mov $_pa(startup_64), %rax - ANNOTATE_RETPOLINE_SAFE - jmp *%rax + lea pvh_bootparams(%rip), %rsi + jmp startup_64 #else /* CONFIG_X86_64 */ @@ -143,7 +218,7 @@ SYM_CODE_END(pvh_start_xen) .balign 8 SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt_start - .long _pa(gdt_start) + .long _pa(gdt_start) /* x86-64 will overwrite if relocated. */ .word 0 SYM_DATA_END(gdt) SYM_DATA_START_LOCAL(gdt_start) @@ -163,5 +238,67 @@ SYM_DATA_START_LOCAL(early_stack) .fill BOOT_STACK_SIZE, 1, 0 SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end) +#ifdef CONFIG_X86_64 +/* + * Xen PVH needs a set of identity mapped and kernel high mapping + * page tables. pvh_start_xen starts running on the identity mapped + * page tables, but xen_prepare_pvh calls into the high mapping. + * These page tables need to be relocatable and are only used until + * startup_64 transitions to init_top_pgt. + */ +SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt) + .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC + .org pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0 + .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC + .org pvh_init_top_pgt + L4_START_KERNEL * 8, 0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC +SYM_DATA_END(pvh_init_top_pgt) + +SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt) + .quad pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC + .fill 511, 8, 0 +SYM_DATA_END(pvh_level3_ident_pgt) +SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt) + /* + * Since I easily can, map the first 1G. + * Don't set NX because code runs from these pages. + * + * Note: This sets _PAGE_GLOBAL despite whether + * the CPU supports it or it is enabled. But, + * the CPU should ignore the bit. + */ + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +SYM_DATA_END(pvh_level2_ident_pgt) +SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt) + .fill L3_START_KERNEL, 8, 0 + /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ + .quad pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC + .quad 0 /* no fixmap */ +SYM_DATA_END(pvh_level3_kernel_pgt) + +SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt) + /* + * Kernel high mapping. + * + * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in + * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, + * 512 MiB otherwise. + * + * (NOTE: after that starts the module area, see MODULES_VADDR.) + * + * This table is eventually used by the kernel during normal runtime. + * Care must be taken to clear out undesired bits later, like _PAGE_RW + * or _PAGE_GLOBAL in some cases. + */ + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE) +SYM_DATA_END(pvh_level2_kernel_pgt) + + ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_RELOC, + .long CONFIG_PHYSICAL_ALIGN; + .long LOAD_PHYSICAL_ADDR; + .long KERNEL_IMAGE_SIZE - 1) +#endif + ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, _ASM_PTR (pvh_start_xen - __START_KERNEL_map)) diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c index f2383484840d3b..a1ee415c008d98 100644 --- a/arch/x86/um/sysrq_32.c +++ b/arch/x86/um/sysrq_32.c @@ -9,7 +9,6 @@ #include #include #include -#include /* This is declared by */ void show_regs(struct pt_regs *regs) diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c index 0bf6de40abffe1..340d8a243c8a8a 100644 --- a/arch/x86/um/sysrq_64.c +++ b/arch/x86/um/sysrq_64.c @@ -12,7 +12,6 @@ #include #include #include -#include void show_regs(struct pt_regs *regs) { diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c index 76d9f6ce7a3d16..f238f7b33cdd93 100644 --- a/arch/x86/um/vdso/vma.c +++ b/arch/x86/um/vdso/vma.c @@ -52,8 +52,11 @@ subsys_initcall(init_vdso); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - int err; + struct vm_area_struct *vma; struct mm_struct *mm = current->mm; + static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + }; if (!vdso_enabled) return 0; @@ -61,12 +64,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (mmap_write_lock_killable(mm)) return -EINTR; - err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, + vdso_mapping.pages = vdsop; + vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - vdsop); + &vdso_mapping); mmap_write_unlock(mm); - return err; + return IS_ERR(vma) ? PTR_ERR(vma) : 0; } diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index 728a4366ca8556..bf68c329fc013e 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -28,6 +29,28 @@ bool __ro_after_init xen_pvh; EXPORT_SYMBOL_GPL(xen_pvh); +#ifdef CONFIG_XEN_DOM0 +int xen_pvh_setup_gsi(int gsi, int trigger, int polarity) +{ + int ret; + struct physdev_setup_gsi setup_gsi; + + setup_gsi.gsi = gsi; + setup_gsi.triggering = (trigger == ACPI_EDGE_SENSITIVE ? 0 : 1); + setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + + ret = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); + if (ret == -EEXIST) { + xen_raw_printk("Already setup the GSI :%d\n", gsi); + ret = 0; + } else if (ret) + xen_raw_printk("Fail to setup GSI (%d)!\n", gsi); + + return ret; +} +EXPORT_SYMBOL_GPL(xen_pvh_setup_gsi); +#endif + /* * Reserve e820 UNUSABLE regions to inflate the memory balloon. * diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index f1ce39d6d32cbb..55a4996d0c04f1 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -665,7 +665,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) { spinlock_t *ptl = NULL; -#if USE_SPLIT_PTE_PTLOCKS +#if defined(CONFIG_SPLIT_PTE_PTLOCKS) ptl = ptlock_ptr(page_ptdesc(page)); spin_lock_nest_lock(ptl, &mm->page_table_lock); #endif @@ -1553,7 +1553,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, __set_pfn_prot(pfn, PAGE_KERNEL_RO); - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) + if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) && + !pinned) __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); xen_mc_issue(XEN_LAZY_MMU); @@ -1581,7 +1582,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level) if (pinned) { xen_mc_batch(); - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) + if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS)) __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); __set_pfn_prot(pfn, PAGE_KERNEL); @@ -2018,10 +2019,7 @@ void __init xen_reserve_special_pages(void) void __init xen_pt_check_e820(void) { - if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { - xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); - BUG(); - } + xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table"); } static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 7c735b730acd25..b52d3e17e2c152 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include @@ -80,6 +81,7 @@ #include #include #include +#include #include "xen-ops.h" @@ -792,6 +794,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, return ret; } +/* Remapped non-RAM areas */ +#define NR_NONRAM_REMAP 4 +static struct nonram_remap { + phys_addr_t maddr; + phys_addr_t paddr; + size_t size; +} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init; +static unsigned int nr_nonram_remap __ro_after_init; + +/* + * Do the real remapping of non-RAM regions as specified in the + * xen_nonram_remap[] array. + * In case of an error just crash the system. + */ +void __init xen_do_remap_nonram(void) +{ + unsigned int i; + unsigned int remapped = 0; + const struct nonram_remap *remap = xen_nonram_remap; + unsigned long pfn, mfn, end_pfn; + + for (i = 0; i < nr_nonram_remap; i++) { + end_pfn = PFN_UP(remap->paddr + remap->size); + pfn = PFN_DOWN(remap->paddr); + mfn = PFN_DOWN(remap->maddr); + while (pfn < end_pfn) { + if (!set_phys_to_machine(pfn, mfn)) + panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n", + pfn, mfn); + + pfn++; + mfn++; + remapped++; + } + + remap++; + } + + pr_info("Remapped %u non-RAM page(s)\n", remapped); +} + +#ifdef CONFIG_ACPI +/* + * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM + * regions into account. + * Any attempt to map an area crossing a remap boundary will produce a + * WARN() splat. + * phys is related to remap->maddr on input and will be rebased to remap->paddr. + */ +static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + unsigned int i; + const struct nonram_remap *remap = xen_nonram_remap; + + for (i = 0; i < nr_nonram_remap; i++) { + if (phys + size > remap->maddr && + phys < remap->maddr + remap->size) { + WARN_ON(phys < remap->maddr || + phys + size > remap->maddr + remap->size); + phys += remap->paddr - remap->maddr; + break; + } + } + + return x86_acpi_os_ioremap(phys, size); +} +#endif /* CONFIG_ACPI */ + +/* + * Add a new non-RAM remap entry. + * In case of no free entry found, just crash the system. + */ +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size) +{ + BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK)); + + if (nr_nonram_remap == NR_NONRAM_REMAP) { + xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); + BUG(); + } + +#ifdef CONFIG_ACPI + /* Switch to the Xen acpi_os_ioremap() variant. */ + if (nr_nonram_remap == 0) + acpi_os_ioremap = xen_acpi_os_ioremap; +#endif + + xen_nonram_remap[nr_nonram_remap].maddr = maddr; + xen_nonram_remap[nr_nonram_remap].paddr = paddr; + xen_nonram_remap[nr_nonram_remap].size = size; + + nr_nonram_remap++; +} + #ifdef CONFIG_XEN_DEBUG_FS #include static int p2m_dump_show(struct seq_file *m, void *v) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 806ddb2391d9bd..c3db71d96c434a 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -15,12 +15,12 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -46,6 +46,9 @@ bool xen_pv_pci_possible; /* E820 map used during setting up memory. */ static struct e820_table xen_e820_table __initdata; +/* Number of initially usable memory pages. */ +static unsigned long ini_nr_pages __initdata; + /* * Buffer used to remap identity mapped pages. We only need the virtual space. * The physical page behind this address is remapped as needed to different @@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages) + unsigned long end_pfn) { unsigned long pfn, end; int ret; @@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN_ON(start_pfn > end_pfn); /* Release pages first. */ - end = min(end_pfn, nr_pages); + end = min(end_pfn, ini_nr_pages); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk( * to Xen and not remapped. */ static unsigned long __init xen_set_identity_and_remap_chunk( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, - unsigned long remap_pfn) + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) { unsigned long pfn; unsigned long i = 0; unsigned long n = end_pfn - start_pfn; if (remap_pfn == 0) - remap_pfn = nr_pages; + remap_pfn = ini_nr_pages; while (i < n) { unsigned long cur_pfn = start_pfn + i; @@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( unsigned long remap_range_size; /* Do not remap pages beyond the current allocation */ - if (cur_pfn >= nr_pages) { + if (cur_pfn >= ini_nr_pages) { /* Identity map remaining pages */ set_phys_range_identity(cur_pfn, cur_pfn + size); break; } - if (cur_pfn + size > nr_pages) - size = nr_pages - cur_pfn; + if (cur_pfn + size > ini_nr_pages) + size = ini_nr_pages - cur_pfn; remap_range_size = xen_find_pfn_range(&remap_pfn); if (!remap_range_size) { pr_warn("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages); + cur_pfn + left); break; } /* Adjust size to fit in current e820 RAM region */ @@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( } static unsigned long __init xen_count_remap_pages( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pages) { - if (start_pfn >= nr_pages) + if (start_pfn >= ini_nr_pages) return remap_pages; - return remap_pages + min(end_pfn, nr_pages) - start_pfn; + return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; } -static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, +static unsigned long __init xen_foreach_remap_area( unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, - unsigned long nr_pages, unsigned long last_val)) + unsigned long last_val)) { phys_addr_t start = 0; unsigned long ret_val = 0; @@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, end_pfn = PFN_UP(entry->addr); if (start_pfn < end_pfn) - ret_val = func(start_pfn, end_pfn, nr_pages, - ret_val); + ret_val = func(start_pfn, end_pfn, ret_val); start = end; } } @@ -494,6 +495,8 @@ void __init xen_remap_memory(void) set_pte_mfn(buf, mfn_save, PAGE_KERNEL); pr_info("Remapped %ld page(s)\n", remapped); + + xen_do_remap_nonram(); } static unsigned long __init xen_get_pages_limit(void) @@ -567,7 +570,7 @@ static void __init xen_ignore_unusable(void) } } -bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) +static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) { struct e820_entry *entry; unsigned mapcnt; @@ -624,6 +627,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) return 0; } +/* + * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. + * Note that the E820 map is modified accordingly, but the P2M map isn't yet. + * The adaption of the P2M must be deferred until page allocation is possible. + */ +static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); + phys_addr_t swap_addr, swap_size, entry_end; + + swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); + swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + entry_end = entry->addr + entry->size; + if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && + entry_end - swap_size >= mem_end) { + /* Reduce RAM entry by needed space (whole pages). */ + entry->size -= swap_size; + + /* Add new entry at the end of E820 map. */ + entry = xen_e820_table.entries + + xen_e820_table.nr_entries; + xen_e820_table.nr_entries++; + + /* Fill new entry (keep size and page offset). */ + entry->type = swap_entry->type; + entry->addr = entry_end - swap_size + + swap_addr - swap_entry->addr; + entry->size = swap_entry->size; + + /* Convert old entry to RAM, align to pages. */ + swap_entry->type = E820_TYPE_RAM; + swap_entry->addr = swap_addr; + swap_entry->size = swap_size; + + /* Remember PFN<->MFN relation for P2M update. */ + xen_add_remap_nonram(swap_addr, entry_end - swap_size, + swap_size); + + /* Order E820 table and merge entries. */ + e820__update_table(&xen_e820_table); + + return; + } + + entry++; + } + + xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); + BUG(); +} + +/* + * Look for non-RAM memory types in a specific guest physical area and move + * those away if possible (ACPI NVS only for now). + */ +static void __init xen_e820_resolve_conflicts(phys_addr_t start, + phys_addr_t size) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t end; + + if (!size) + return; + + end = start + size; + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + if (entry->addr >= end) + return; + + if (entry->addr + entry->size > start && + entry->type == E820_TYPE_NVS) + xen_e820_swap_entry_with_ram(entry); + + entry++; + } +} + +/* + * Check for an area in physical memory to be usable for non-movable purposes. + * An area is considered to usable if the used E820 map lists it to be RAM or + * some other type which can be moved to higher PFNs while keeping the MFNs. + * In case the area is not usable, crash the system with an error message. + */ +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component) +{ + xen_e820_resolve_conflicts(start, size); + + if (!xen_is_e820_reserved(start, size)) + return; + + xen_raw_console_write("Xen hypervisor allocated "); + xen_raw_console_write(component); + xen_raw_console_write(" memory conflicts with E820 map\n"); + BUG(); +} + /* * Like memcpy, but with physical addresses for dest and src. */ @@ -683,7 +791,7 @@ static void __init xen_reserve_xen_mfnlist(void) **/ char * __init xen_memory_setup(void) { - unsigned long max_pfn, pfn_s, n_pfns; + unsigned long pfn_s, n_pfns; phys_addr_t mem_end, addr, size, chunk_size; u32 type; int rc; @@ -695,9 +803,8 @@ char * __init xen_memory_setup(void) int op; xen_parse_512gb(); - max_pfn = xen_get_pages_limit(); - max_pfn = min(max_pfn, xen_start_info->nr_pages); - mem_end = PFN_PHYS(max_pfn); + ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); + mem_end = PFN_PHYS(ini_nr_pages); memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); @@ -747,13 +854,35 @@ char * __init xen_memory_setup(void) /* Make sure the Xen-supplied memory map is well-ordered. */ e820__update_table(&xen_e820_table); + /* + * Check whether the kernel itself conflicts with the target E820 map. + * Failing now is better than running into weird problems later due + * to relocating (and even reusing) pages with kernel text or data. + */ + xen_chk_is_e820_usable(__pa_symbol(_text), + __pa_symbol(_end) - __pa_symbol(_text), + "kernel"); + + /* + * Check for a conflict of the xen_start_info memory with the target + * E820 map. + */ + xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), + "xen_start_info"); + + /* + * Check for a conflict of the hypervisor supplied page tables with + * the target E820 map. + */ + xen_pt_check_e820(); + max_pages = xen_get_max_pages(); /* How many extra pages do we need due to remapping? */ - max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); + max_pages += xen_foreach_remap_area(xen_count_remap_pages); - if (max_pages > max_pfn) - extra_pages += max_pages - max_pfn; + if (max_pages > ini_nr_pages) + extra_pages += max_pages - ini_nr_pages; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO @@ -762,8 +891,8 @@ char * __init xen_memory_setup(void) * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. */ - maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)); - extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn); + maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)); + extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); i = 0; addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; @@ -819,23 +948,6 @@ char * __init xen_memory_setup(void) e820__update_table(e820_table); - /* - * Check whether the kernel itself conflicts with the target E820 map. - * Failing now is better than running into weird problems later due - * to relocating (and even reusing) pages with kernel text or data. - */ - if (xen_is_e820_reserved(__pa_symbol(_text), - __pa_symbol(__bss_stop) - __pa_symbol(_text))) { - xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); - BUG(); - } - - /* - * Check for a conflict of the hypervisor supplied page tables with - * the target E820 map. - */ - xen_pt_check_e820(); - xen_reserve_xen_mfnlist(); /* Check for a conflict of the initrd with the target E820 map. */ @@ -863,7 +975,7 @@ char * __init xen_memory_setup(void) * Set identity map on non-RAM pages and prepare remapping the * underlying RAM. */ - xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); + xen_foreach_remap_area(xen_set_identity_and_remap_chunk); pr_info("Released %ld page(s)\n", xen_released_pages); diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 0cf16fc79e0bf0..e1b782e823e6b4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -47,8 +47,12 @@ void xen_mm_unpin_all(void); #ifdef CONFIG_X86_64 void __init xen_relocate_p2m(void); #endif +void __init xen_do_remap_nonram(void); +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size); -bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component); unsigned long __ref xen_chk_extra_mem(unsigned long pfn); void __init xen_inv_extra_mem(void); void __init xen_remap_memory(void); diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f200a4ec044e64..d3db28f2f81108 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -14,6 +14,7 @@ config XTENSA select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_HAS_STRNCPY_FROM_USER if !KASAN select ARCH_HAS_STRNLEN_USER + select ARCH_NEED_CMPXCHG_1_EMU select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 675a11ea8de76b..95e33a913962d8 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -15,6 +15,7 @@ #include #include +#include /* * cmpxchg @@ -74,6 +75,7 @@ static __inline__ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { + case 1: return cmpxchg_emu_u8(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); default: __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h index ed5870c779f92d..4854419dcd861e 100644 --- a/arch/xtensa/include/asm/flat.h +++ b/arch/xtensa/include/asm/flat.h @@ -2,7 +2,7 @@ #ifndef __ASM_XTENSA_FLAT_H #define __ASM_XTENSA_FLAT_H -#include +#include static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, u32 *addr) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index b3c2450d6f239e..dc54f854c2f58d 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -55,7 +55,8 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, #ifdef CONFIG_MMU unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { struct vm_area_struct *vmm; struct vma_iterator vmi; diff --git a/block/bdev.c b/block/bdev.c index c5507b6f63b8b1..738e3c8457e7f4 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -324,6 +325,11 @@ static struct inode *bdev_alloc_inode(struct super_block *sb) if (!ei) return NULL; memset(&ei->bdev, 0, sizeof(ei->bdev)); + + if (security_bdev_alloc(&ei->bdev)) { + kmem_cache_free(bdev_cachep, ei); + return NULL; + } return &ei->vfs_inode; } @@ -333,6 +339,7 @@ static void bdev_free_inode(struct inode *inode) free_percpu(bdev->bd_stats); kfree(bdev->bd_meta_info); + security_bdev_free(bdev); if (!bdev_is_partition(bdev)) { if (bdev->bd_disk && bdev->bd_disk->bdi) @@ -548,7 +555,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder, /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { - wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); + wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); @@ -571,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); + wake_up_var(&whole->bd_claiming); } /** diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index b758693697c093..e831aedb464329 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -679,12 +679,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); bfqg_and_blkg_put(old_parent); - if (entity->parent && - entity->parent->last_bfqq_created == bfqq) - entity->parent->last_bfqq_created = NULL; - else if (bfqd->last_bfqq_created == bfqq) - bfqd->last_bfqq_created = NULL; - + bfq_reassign_last_bfqq(bfqq, NULL); entity->parent = bfqg->my_entity; entity->sched_data = &bfqg->sched_data; /* pin down bfqg and its associated blkg */ @@ -741,7 +736,6 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd, */ bfq_put_cooperator(sync_bfqq); bic_set_bfqq(bic, NULL, true, act_idx); - bfq_release_process_ref(bfqd, sync_bfqq); } } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 36a4998c4b378b..0747d9d0e48c8a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; /* if a merge has already been setup, then proceed with that first */ - if (bfqq->new_bfqq) - return bfqq->new_bfqq; + new_bfqq = bfqq->new_bfqq; + if (new_bfqq) { + while (new_bfqq->new_bfqq) + new_bfqq = new_bfqq->new_bfqq; + return new_bfqq; + } /* * Check delayed stable merge for rotational or non-queueing @@ -3093,8 +3097,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) } -static void -bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq) +void bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, + struct bfq_queue *new_bfqq) { if (cur_bfqq->entity.parent && cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq) @@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_put_queue(bfqq); } -static void -bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bfq_queue *bfqq) { + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", (unsigned long)new_bfqq->pid); /* Save weight raising and idle window of the merged queues */ @@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_reassign_last_bfqq(bfqq, new_bfqq); bfq_release_process_ref(bfqd, bfqq); + + return new_bfqq; } static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, @@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, * fulfilled, i.e., bic can be redirected to new_bfqq * and bfqq can be put. */ - bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, - new_bfqq); - /* - * If we get here, bio will be queued into new_queue, - * so use new_bfqq to decide whether bio and rq can be - * merged. - */ - bfqq = new_bfqq; + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq); /* * Change also bqfd->bio_bfqq, as @@ -5432,6 +5434,8 @@ void bfq_put_cooperator(struct bfq_queue *bfqq) bfq_put_queue(__bfqq); __bfqq = next; } + + bfq_release_process_ref(bfqq->bfqd, bfqq); } static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -5444,8 +5448,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); bfq_put_cooperator(bfqq); - - bfq_release_process_ref(bfqd, bfqq); } static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync, @@ -5701,9 +5703,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, * state before killing it. */ bfqq->bic = bic; - bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); - - return new_bfqq; + return bfq_merge_bfqqs(bfqd, bic, bfqq); } /* @@ -6158,6 +6158,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bool waiting, idle_timer_disabled = false; if (new_bfqq) { + struct bfq_queue *old_bfqq = bfqq; /* * Release the request's reference to the old bfqq * and make sure one is taken to the shared queue. @@ -6174,18 +6175,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) * new_bfqq. */ if (bic_to_bfqq(RQ_BIC(rq), true, - bfq_actuator_index(bfqd, rq->bio)) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); + bfq_actuator_index(bfqd, rq->bio)) == bfqq) { + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); + } - bfq_clear_bfqq_just_created(bfqq); + bfq_clear_bfqq_just_created(old_bfqq); /* * rq is about to be enqueued into new_bfqq, * release rq reference on bfqq */ - bfq_put_queue(bfqq); + bfq_put_queue(old_bfqq); rq->elv.priv[1] = new_bfqq; - bfqq = new_bfqq; } bfq_update_io_thinktime(bfqd, bfqq); @@ -6723,7 +6724,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); - if (bfqq_process_refs(bfqq) == 1) { + if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) { bfqq->pid = current->pid; bfq_clear_bfqq_coop(bfqq); bfq_clear_bfqq_split_coop(bfqq); @@ -6733,16 +6734,13 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx); bfq_put_cooperator(bfqq); - - bfq_release_process_ref(bfqq->bfqd, bfqq); return NULL; } -static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, - struct bfq_io_cq *bic, - struct bio *bio, - bool split, bool is_sync, - bool *new_queue) +static struct bfq_queue * +__bfq_get_bfqq_handle_split(struct bfq_data *bfqd, struct bfq_io_cq *bic, + struct bio *bio, bool split, bool is_sync, + bool *new_queue) { unsigned int act_idx = bfq_actuator_index(bfqd, bio); struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx); @@ -6821,6 +6819,84 @@ static void bfq_prepare_request(struct request *rq) rq->elv.priv[0] = rq->elv.priv[1] = NULL; } +static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq) +{ + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + struct bfq_queue *waker_bfqq = bfqq->waker_bfqq; + + if (!waker_bfqq) + return NULL; + + while (new_bfqq) { + if (new_bfqq == waker_bfqq) { + /* + * If waker_bfqq is in the merge chain, and current + * is the only procress. + */ + if (bfqq_process_refs(waker_bfqq) == 1) + return NULL; + break; + } + + new_bfqq = new_bfqq->new_bfqq; + } + + return waker_bfqq; +} + +static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bio *bio, + unsigned int idx, + bool is_sync) +{ + struct bfq_queue *waker_bfqq; + struct bfq_queue *bfqq; + bool new_queue = false; + + bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, + &new_queue); + if (unlikely(new_queue)) + return bfqq; + + /* If the queue was seeky for too long, break it apart. */ + if (!bfq_bfqq_coop(bfqq) || !bfq_bfqq_split_coop(bfqq) || + bic->bfqq_data[idx].stably_merged) + return bfqq; + + waker_bfqq = bfq_waker_bfqq(bfqq); + + /* Update bic before losing reference to bfqq */ + if (bfq_bfqq_in_large_burst(bfqq)) + bic->bfqq_data[idx].saved_in_large_burst = true; + + bfqq = bfq_split_bfqq(bic, bfqq); + if (bfqq) { + bfq_bfqq_resume_state(bfqq, bfqd, bic, true); + return bfqq; + } + + bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL); + if (unlikely(bfqq == &bfqd->oom_bfqq)) + return bfqq; + + bfq_bfqq_resume_state(bfqq, bfqd, bic, false); + bfqq->waker_bfqq = waker_bfqq; + bfqq->tentative_waker_bfqq = NULL; + + /* + * If the waker queue disappears, then new_bfqq->waker_bfqq must be + * reset. So insert new_bfqq into the + * woken_list of the waker. See + * bfq_check_waker for details. + */ + if (waker_bfqq) + hlist_add_head(&bfqq->woken_list_node, + &bfqq->waker_bfqq->woken_list); + + return bfqq; +} + /* * If needed, init rq, allocate bfq data structures associated with * rq, and increment reference counters in the destination bfq_queue @@ -6852,8 +6928,6 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) struct bfq_io_cq *bic; const int is_sync = rq_is_sync(rq); struct bfq_queue *bfqq; - bool new_queue = false; - bool bfqq_already_existing = false, split = false; unsigned int a_idx = bfq_actuator_index(bfqd, bio); if (unlikely(!rq->elv.icq)) @@ -6870,54 +6944,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) return RQ_BFQQ(rq); bic = icq_to_bic(rq->elv.icq); - bfq_check_ioprio_change(bic, bio); - bfq_bic_update_cgroup(bic, bio); - - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, - &new_queue); - - if (likely(!new_queue)) { - /* If the queue was seeky for too long, break it apart. */ - if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && - !bic->bfqq_data[a_idx].stably_merged) { - struct bfq_queue *old_bfqq = bfqq; - - /* Update bic before losing reference to bfqq */ - if (bfq_bfqq_in_large_burst(bfqq)) - bic->bfqq_data[a_idx].saved_in_large_burst = - true; - - bfqq = bfq_split_bfqq(bic, bfqq); - split = true; - - if (!bfqq) { - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, - true, is_sync, - NULL); - if (unlikely(bfqq == &bfqd->oom_bfqq)) - bfqq_already_existing = true; - } else - bfqq_already_existing = true; - - if (!bfqq_already_existing) { - bfqq->waker_bfqq = old_bfqq->waker_bfqq; - bfqq->tentative_waker_bfqq = NULL; - - /* - * If the waker queue disappears, then - * new_bfqq->waker_bfqq must be - * reset. So insert new_bfqq into the - * woken_list of the waker. See - * bfq_check_waker for details. - */ - if (bfqq->waker_bfqq) - hlist_add_head(&bfqq->woken_list_node, - &bfqq->waker_bfqq->woken_list); - } - } - } + bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, a_idx, is_sync); bfqq_request_allocated(bfqq); bfqq->ref++; @@ -6934,18 +6963,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * addition, if the queue has also just been split, we have to * resume its state. */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { + if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && + bfqq_process_refs(bfqq) == 1) bfqq->bic = bic; - if (split) { - /* - * The queue has just been split from a shared - * queue: restore the idle window and the - * possible weight raising period. - */ - bfq_bfqq_resume_state(bfqq, bfqd, bic, - bfqq_already_existing); - } - } /* * Consider bfqq as possibly belonging to a burst of newly diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 08ddf2cfae5b1c..687a3a7ba78478 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -1156,6 +1156,8 @@ void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration); void bfq_add_bfqq_busy(struct bfq_queue *bfqq); void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq); void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq); +void bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, + struct bfq_queue *new_bfqq); /* --------------- end of interface of B-WF2Q+ ---------------- */ @@ -1183,11 +1185,6 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq); "%s " fmt, pid_str, ##args); \ } while (0) -#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ - blk_add_cgroup_trace_msg((bfqd)->queue, \ - &bfqg_to_blkg(bfqg)->blkcg->css, fmt, ##args); \ -} while (0) - #else /* CONFIG_BFQ_GROUP_IOSCHED */ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ @@ -1197,7 +1194,6 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq); bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \ blk_add_trace_msg((bfqd)->queue, "%s " fmt, pid_str, ##args); \ } while (0) -#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) #endif /* CONFIG_BFQ_GROUP_IOSCHED */ diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 96a2653905aef8..88e3ad73c38549 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -367,7 +367,6 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, kfree(bvec); return ret; } -EXPORT_SYMBOL_GPL(bio_integrity_map_user); /** * bio_integrity_prep - Prepare bio for integrity I/O diff --git a/block/bio.c b/block/bio.c index c4053d49679a9a..ac4d77c889322d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -931,7 +931,8 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) return false; - *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); + *same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) & + PAGE_MASK)); if (!*same_page) { if (IS_ENABLED(CONFIG_KMSAN)) return false; @@ -1016,6 +1017,29 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio, return len; } +/** + * bio_add_hw_folio - attempt to add a folio to a bio with hw constraints + * @q: the target queue + * @bio: destination bio + * @folio: folio to add + * @len: vec entry length + * @offset: vec entry offset in the folio + * @max_sectors: maximum number of sectors that can be added + * @same_page: return if the segment has been merged inside the same folio + * + * Add a folio to a bio while respecting the hardware max_sectors, max_segment + * and gap limitations. + */ +int bio_add_hw_folio(struct request_queue *q, struct bio *bio, + struct folio *folio, size_t len, size_t offset, + unsigned int max_sectors, bool *same_page) +{ + if (len > UINT_MAX || offset > UINT_MAX) + return 0; + return bio_add_hw_page(q, bio, folio_page(folio, 0), len, offset, + max_sectors, same_page); +} + /** * bio_add_pc_page - attempt to add page to passthrough bio * @q: the target queue @@ -1166,7 +1190,6 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) struct folio_iter fi; bio_for_each_folio_all(fi, bio) { - struct page *page; size_t nr_pages; if (mark_dirty) { @@ -1174,12 +1197,9 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) folio_mark_dirty(fi.folio); folio_unlock(fi.folio); } - page = folio_page(fi.folio, fi.offset / PAGE_SIZE); nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - fi.offset / PAGE_SIZE + 1; - do { - bio_release_page(bio, page++); - } while (--nr_pages != 0); + unpin_user_folio(fi.folio, nr_pages); } } EXPORT_SYMBOL_GPL(__bio_release_pages); @@ -1204,8 +1224,8 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) bio_set_flag(bio, BIO_CLONED); } -static int bio_iov_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) +static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len, + size_t offset) { bool same_page = false; @@ -1214,30 +1234,61 @@ static int bio_iov_add_page(struct bio *bio, struct page *page, if (bio->bi_vcnt > 0 && bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - page, len, offset, &same_page)) { + folio_page(folio, 0), len, offset, + &same_page)) { bio->bi_iter.bi_size += len; - if (same_page) - bio_release_page(bio, page); + if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) + unpin_user_folio(folio, 1); return 0; } - __bio_add_page(bio, page, len, offset); + bio_add_folio_nofail(bio, folio, len, offset); return 0; } -static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) +static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio, + size_t len, size_t offset) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); bool same_page = false; - if (bio_add_hw_page(q, bio, page, len, offset, + if (bio_add_hw_folio(q, bio, folio, len, offset, queue_max_zone_append_sectors(q), &same_page) != len) return -EINVAL; - if (same_page) - bio_release_page(bio, page); + if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) + unpin_user_folio(folio, 1); return 0; } +static unsigned int get_contig_folio_len(unsigned int *num_pages, + struct page **pages, unsigned int i, + struct folio *folio, size_t left, + size_t offset) +{ + size_t bytes = left; + size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes); + unsigned int j; + + /* + * We might COW a single page in the middle of + * a large folio, so we have to check that all + * pages belong to the same folio. + */ + bytes -= contig_sz; + for (j = i + 1; j < i + *num_pages; j++) { + size_t next = min_t(size_t, PAGE_SIZE, bytes); + + if (page_folio(pages[j]) != folio || + pages[j] != pages[j - 1] + 1) { + break; + } + contig_sz += next; + bytes -= next; + } + *num_pages = j - i; + + return contig_sz; +} + #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) /** @@ -1257,9 +1308,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; - ssize_t size, left; - unsigned len, i = 0; - size_t offset; + ssize_t size; + unsigned int num_pages, i = 0; + size_t offset, folio_offset, left, len; int ret = 0; /* @@ -1299,17 +1350,28 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) goto out; } - for (left = size, i = 0; left > 0; left -= len, i++) { + for (left = size, i = 0; left > 0; left -= len, i += num_pages) { struct page *page = pages[i]; + struct folio *folio = page_folio(page); + + folio_offset = ((size_t)folio_page_idx(folio, page) << + PAGE_SHIFT) + offset; + + len = min(folio_size(folio) - folio_offset, left); + + num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); + + if (num_pages > 1) + len = get_contig_folio_len(&num_pages, pages, i, + folio, left, offset); - len = min_t(size_t, PAGE_SIZE - offset, left); if (bio_op(bio) == REQ_OP_ZONE_APPEND) { - ret = bio_iov_add_zone_append_page(bio, page, len, - offset); + ret = bio_iov_add_zone_append_folio(bio, folio, len, + folio_offset); if (ret) break; } else - bio_iov_add_page(bio, page, len, offset); + bio_iov_add_folio(bio, folio, len, folio_offset); offset = 0; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 69e70964398c65..e68c725cf8d975 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1458,7 +1458,6 @@ int blkcg_init_disk(struct gendisk *disk) struct request_queue *q = disk->queue; struct blkcg_gq *new_blkg, *blkg; bool preloaded; - int ret; new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); if (!new_blkg) @@ -1478,15 +1477,8 @@ int blkcg_init_disk(struct gendisk *disk) if (preloaded) radix_tree_preload_end(); - ret = blk_ioprio_init(disk); - if (ret) - goto err_destroy_all; - return 0; -err_destroy_all: - blkg_destroy_all(disk); - return ret; err_unlock: spin_unlock_irq(&q->queue_lock); if (preloaded) @@ -1554,6 +1546,14 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) if (blkcg_policy_enabled(q, pol)) return 0; + /* + * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn, + * for example, ioprio. Such policy will work on blkcg level, not disk + * level, and don't need to be activated. + */ + if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn)) + return -EINVAL; + if (queue_is_mq(q)) blk_mq_freeze_queue(q); retry: @@ -1733,9 +1733,12 @@ int blkcg_policy_register(struct blkcg_policy *pol) goto err_unlock; } - /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ + /* + * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy + * without pd_alloc_fn/pd_free_fn can't be activated. + */ if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || - (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) + (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) goto err_unlock; /* register @pol */ diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 864fad4a850bb6..b9e3265c1eb307 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -485,7 +485,6 @@ static inline void blkcg_deactivate_policy(struct gendisk *disk, static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } -static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } static inline void blkcg_bio_issue_init(struct bio *bio) { } diff --git a/block/blk-core.c b/block/blk-core.c index 1217c2cd66dd88..bc5e8c5eaac9ff 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -799,6 +799,7 @@ void submit_bio_noacct(struct bio *bio) switch (bio_op(bio)) { case REQ_OP_READ: + break; case REQ_OP_WRITE: if (bio->bi_opf & REQ_ATOMIC) { status = blk_validate_atomic_write_op_size(q, bio); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 010decc892eaa0..83b696ba0cac31 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -53,29 +53,28 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) return segments; } -EXPORT_SYMBOL(blk_rq_count_integrity_sg); /** * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist - * @q: request queue - * @bio: bio with integrity metadata attached + * @rq: request to map * @sglist: target scatterlist * * Description: Map the integrity vectors in request into a * scatterlist. The scatterlist must be big enough to hold all - * elements. I.e. sized using blk_rq_count_integrity_sg(). + * elements. I.e. sized using blk_rq_count_integrity_sg() or + * rq->nr_integrity_segments. */ -int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist) +int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist) { struct bio_vec iv, ivprv = { NULL }; + struct request_queue *q = rq->q; struct scatterlist *sg = NULL; + struct bio *bio = rq->bio; unsigned int segments = 0; struct bvec_iter iter; int prev = 0; bio_for_each_integrity_vec(iv, bio, iter) { - if (prev) { if (!biovec_phys_mergeable(q, &ivprv, &iv)) goto new_segment; @@ -103,10 +102,30 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, if (sg) sg_mark_end(sg); + /* + * Something must have been wrong if the figured number of segment + * is bigger than number of req's physical integrity segments + */ + BUG_ON(segments > rq->nr_integrity_segments); + BUG_ON(segments > queue_max_integrity_segments(q)); return segments; } EXPORT_SYMBOL(blk_rq_map_integrity_sg); +int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, + ssize_t bytes, u32 seed) +{ + int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed); + + if (ret) + return ret; + + rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio); + rq->cmd_flags |= REQ_INTEGRITY; + return 0; +} +EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user); + bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) { @@ -134,7 +153,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, struct bio *bio) { int nr_integrity_segs; - struct bio *next = bio->bi_next; if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) return true; @@ -145,16 +163,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) return false; - bio->bi_next = NULL; nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); - bio->bi_next = next; - if (req->nr_integrity_segments + nr_integrity_segs > q->limits.max_integrity_segments) return false; - req->nr_integrity_segments += nr_integrity_segs; - return true; } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 690ca99dfaca67..384aa15e8260bd 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -648,7 +648,7 @@ static const struct ioc_params autop[] = { * vrate adjust percentages indexed by ioc->busy_level. We adjust up on * vtime credit shortage and down on device saturation. */ -static u32 vrate_adj_pct[] = +static const u32 vrate_adj_pct[] = { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -2076,7 +2076,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, struct ioc_now *now) { struct ioc_gq *iocg; - u64 dur, usage_pct, nr_cycles; + u64 dur, usage_pct, nr_cycles, nr_cycles_shift; /* if no debtor, reset the cycle */ if (!nr_debtors) { @@ -2138,10 +2138,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, old_debt = iocg->abs_vdebt; old_delay = iocg->delay; + nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1); if (iocg->abs_vdebt) - iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1; + iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1; + if (iocg->delay) - iocg->delay = iocg->delay >> nr_cycles ?: 1; + iocg->delay = iocg->delay >> nr_cycles_shift ?: 1; iocg_kick_waitq(iocg, true, now); @@ -3164,7 +3166,7 @@ static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, if (!dname) return 0; - spin_lock_irq(&ioc->lock); + spin_lock(&ioc->lock); seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n", dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", ioc->params.qos[QOS_RPPM] / 10000, @@ -3177,7 +3179,7 @@ static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, ioc->params.qos[QOS_MIN] % 10000 / 100, ioc->params.qos[QOS_MAX] / 10000, ioc->params.qos[QOS_MAX] % 10000 / 100); - spin_unlock_irq(&ioc->lock); + spin_unlock(&ioc->lock); return 0; } @@ -3364,14 +3366,14 @@ static u64 ioc_cost_model_prfill(struct seq_file *sf, if (!dname) return 0; - spin_lock_irq(&ioc->lock); + spin_lock(&ioc->lock); seq_printf(sf, "%s ctrl=%s model=linear " "rbps=%llu rseqiops=%llu rrandiops=%llu " "wbps=%llu wseqiops=%llu wrandiops=%llu\n", dname, ioc->user_cost_model ? "user" : "auto", u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]); - spin_unlock_irq(&ioc->lock); + spin_unlock(&ioc->lock); return 0; } diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c index 4051fada01f1e8..8fff7ccc0ac735 100644 --- a/block/blk-ioprio.c +++ b/block/blk-ioprio.c @@ -49,14 +49,6 @@ static const char *policy_name[] = { static struct blkcg_policy ioprio_policy; -/** - * struct ioprio_blkg - Per (cgroup, request queue) data. - * @pd: blkg_policy_data structure. - */ -struct ioprio_blkg { - struct blkg_policy_data pd; -}; - /** * struct ioprio_blkcg - Per cgroup data. * @cpd: blkcg_policy_data structure. @@ -67,11 +59,6 @@ struct ioprio_blkcg { enum prio_policy prio_policy; }; -static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd) -{ - return pd ? container_of(pd, struct ioprio_blkg, pd) : NULL; -} - static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg) { return container_of(blkcg_to_cpd(blkcg, &ioprio_policy), @@ -84,16 +71,6 @@ ioprio_blkcg_from_css(struct cgroup_subsys_state *css) return blkcg_to_ioprio_blkcg(css_to_blkcg(css)); } -static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio) -{ - struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy); - - if (!pd) - return NULL; - - return blkcg_to_ioprio_blkcg(pd->blkg->blkcg); -} - static int ioprio_show_prio_policy(struct seq_file *sf, void *v) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf)); @@ -118,25 +95,6 @@ static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf, return nbytes; } -static struct blkg_policy_data * -ioprio_alloc_pd(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) -{ - struct ioprio_blkg *ioprio_blkg; - - ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp); - if (!ioprio_blkg) - return NULL; - - return &ioprio_blkg->pd; -} - -static void ioprio_free_pd(struct blkg_policy_data *pd) -{ - struct ioprio_blkg *ioprio_blkg = pd_to_ioprio(pd); - - kfree(ioprio_blkg); -} - static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp) { struct ioprio_blkcg *blkcg; @@ -179,14 +137,11 @@ static struct blkcg_policy ioprio_policy = { .cpd_alloc_fn = ioprio_alloc_cpd, .cpd_free_fn = ioprio_free_cpd, - - .pd_alloc_fn = ioprio_alloc_pd, - .pd_free_fn = ioprio_free_pd, }; void blkcg_set_ioprio(struct bio *bio) { - struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); + struct ioprio_blkcg *blkcg = blkcg_to_ioprio_blkcg(bio->bi_blkg->blkcg); u16 prio; if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE) @@ -219,16 +174,6 @@ void blkcg_set_ioprio(struct bio *bio) bio->bi_ioprio = prio; } -void blk_ioprio_exit(struct gendisk *disk) -{ - blkcg_deactivate_policy(disk, &ioprio_policy); -} - -int blk_ioprio_init(struct gendisk *disk) -{ - return blkcg_activate_policy(disk, &ioprio_policy); -} - static int __init ioprio_init(void) { return blkcg_policy_register(&ioprio_policy); diff --git a/block/blk-ioprio.h b/block/blk-ioprio.h index b6afb8e80de05c..9265143f9bc9f9 100644 --- a/block/blk-ioprio.h +++ b/block/blk-ioprio.h @@ -9,17 +9,8 @@ struct request_queue; struct bio; #ifdef CONFIG_BLK_CGROUP_IOPRIO -int blk_ioprio_init(struct gendisk *disk); -void blk_ioprio_exit(struct gendisk *disk); void blkcg_set_ioprio(struct bio *bio); #else -static inline int blk_ioprio_init(struct gendisk *disk) -{ - return 0; -} -static inline void blk_ioprio_exit(struct gendisk *disk) -{ -} static inline void blkcg_set_ioprio(struct bio *bio) { } diff --git a/block/blk-merge.c b/block/blk-merge.c index de5281bcadc538..ad763ec313b6ad 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -105,9 +105,33 @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT; } -static struct bio *bio_split_discard(struct bio *bio, - const struct queue_limits *lim, - unsigned *nsegs, struct bio_set *bs) +static struct bio *bio_submit_split(struct bio *bio, int split_sectors) +{ + if (unlikely(split_sectors < 0)) { + bio->bi_status = errno_to_blk_status(split_sectors); + bio_endio(bio); + return NULL; + } + + if (split_sectors) { + struct bio *split; + + split = bio_split(bio, split_sectors, GFP_NOIO, + &bio->bi_bdev->bd_disk->bio_split); + split->bi_opf |= REQ_NOMERGE; + blkcg_bio_issue_init(split); + bio_chain(split, bio); + trace_block_split(split, bio->bi_iter.bi_sector); + WARN_ON_ONCE(bio_zone_write_plugging(bio)); + submit_bio_noacct(bio); + return split; + } + + return bio; +} + +struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, + unsigned *nsegs) { unsigned int max_discard_sectors, granularity; sector_t tmp; @@ -121,10 +145,10 @@ static struct bio *bio_split_discard(struct bio *bio, min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) - return NULL; + return bio; if (bio_sectors(bio) <= max_discard_sectors) - return NULL; + return bio; split_sectors = max_discard_sectors; @@ -139,19 +163,18 @@ static struct bio *bio_split_discard(struct bio *bio, if (split_sectors > tmp) split_sectors -= tmp; - return bio_split(bio, split_sectors, GFP_NOIO, bs); + return bio_submit_split(bio, split_sectors); } -static struct bio *bio_split_write_zeroes(struct bio *bio, - const struct queue_limits *lim, - unsigned *nsegs, struct bio_set *bs) +struct bio *bio_split_write_zeroes(struct bio *bio, + const struct queue_limits *lim, unsigned *nsegs) { *nsegs = 0; if (!lim->max_write_zeroes_sectors) - return NULL; + return bio; if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) - return NULL; - return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs); + return bio; + return bio_submit_split(bio, lim->max_write_zeroes_sectors); } static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim, @@ -274,27 +297,19 @@ static bool bvec_split_segs(const struct queue_limits *lim, } /** - * bio_split_rw - split a bio in two bios + * bio_split_rw_at - check if and where to split a read/write bio * @bio: [in] bio to be split * @lim: [in] queue limits to split based on * @segs: [out] number of segments in the bio with the first half of the sectors - * @bs: [in] bio set to allocate the clone from * @max_bytes: [in] maximum number of bytes per bio * - * Clone @bio, update the bi_iter of the clone to represent the first sectors - * of @bio and update @bio->bi_iter to represent the remaining sectors. The - * following is guaranteed for the cloned bio: - * - That it has at most @max_bytes worth of data - * - That it has at most queue_max_segments(@q) segments. - * - * Except for discard requests the cloned bio will point at the bi_io_vec of - * the original bio. It is the responsibility of the caller to ensure that the - * original bio is not freed before the cloned bio. The caller is also - * responsible for ensuring that @bs is only destroyed after processing of the - * split bio has finished. + * Find out if @bio needs to be split to fit the queue limits in @lim and a + * maximum size of @max_bytes. Returns a negative error number if @bio can't be + * split, 0 if the bio doesn't have to be split, or a positive sector offset if + * @bio needs to be split. */ -struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, - unsigned *segs, struct bio_set *bs, unsigned max_bytes) +int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, + unsigned *segs, unsigned max_bytes) { struct bio_vec bv, bvprv, *bvprvp = NULL; struct bvec_iter iter; @@ -324,22 +339,17 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, } *segs = nsegs; - return NULL; + return 0; split: - if (bio->bi_opf & REQ_ATOMIC) { - bio->bi_status = BLK_STS_INVAL; - bio_endio(bio); - return ERR_PTR(-EINVAL); - } + if (bio->bi_opf & REQ_ATOMIC) + return -EINVAL; + /* * We can't sanely support splitting for a REQ_NOWAIT bio. End it * with EAGAIN if splitting is required and return an error pointer. */ - if (bio->bi_opf & REQ_NOWAIT) { - bio->bi_status = BLK_STS_AGAIN; - bio_endio(bio); - return ERR_PTR(-EAGAIN); - } + if (bio->bi_opf & REQ_NOWAIT) + return -EAGAIN; *segs = nsegs; @@ -356,58 +366,36 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, * big IO can be trival, disable iopoll when split needed. */ bio_clear_polled(bio); - return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs); + return bytes >> SECTOR_SHIFT; } -EXPORT_SYMBOL_GPL(bio_split_rw); +EXPORT_SYMBOL_GPL(bio_split_rw_at); -/** - * __bio_split_to_limits - split a bio to fit the queue limits - * @bio: bio to be split - * @lim: queue limits to split based on - * @nr_segs: returns the number of segments in the returned bio - * - * Check if @bio needs splitting based on the queue limits, and if so split off - * a bio fitting the limits from the beginning of @bio and return it. @bio is - * shortened to the remainder and re-submitted. +struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, + unsigned *nr_segs) +{ + return bio_submit_split(bio, + bio_split_rw_at(bio, lim, nr_segs, + get_max_io_size(bio, lim) << SECTOR_SHIFT)); +} + +/* + * REQ_OP_ZONE_APPEND bios must never be split by the block layer. * - * The split bio is allocated from @q->bio_split, which is provided by the - * block layer. + * But we want the nr_segs calculation provided by bio_split_rw_at, and having + * a good sanity check that the submitter built the bio correctly is nice to + * have as well. */ -struct bio *__bio_split_to_limits(struct bio *bio, - const struct queue_limits *lim, - unsigned int *nr_segs) +struct bio *bio_split_zone_append(struct bio *bio, + const struct queue_limits *lim, unsigned *nr_segs) { - struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split; - struct bio *split; - - switch (bio_op(bio)) { - case REQ_OP_DISCARD: - case REQ_OP_SECURE_ERASE: - split = bio_split_discard(bio, lim, nr_segs, bs); - break; - case REQ_OP_WRITE_ZEROES: - split = bio_split_write_zeroes(bio, lim, nr_segs, bs); - break; - default: - split = bio_split_rw(bio, lim, nr_segs, bs, - get_max_io_size(bio, lim) << SECTOR_SHIFT); - if (IS_ERR(split)) - return NULL; - break; - } - - if (split) { - /* there isn't chance to merge the split bio */ - split->bi_opf |= REQ_NOMERGE; - - blkcg_bio_issue_init(split); - bio_chain(split, bio); - trace_block_split(split, bio->bi_iter.bi_sector); - WARN_ON_ONCE(bio_zone_write_plugging(bio)); - submit_bio_noacct(bio); - return split; - } - return bio; + unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim); + int split_sectors; + + split_sectors = bio_split_rw_at(bio, lim, nr_segs, + max_sectors << SECTOR_SHIFT); + if (WARN_ON_ONCE(split_sectors > 0)) + split_sectors = -EINVAL; + return bio_submit_split(bio, split_sectors); } /** @@ -426,9 +414,7 @@ struct bio *bio_split_to_limits(struct bio *bio) const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; unsigned int nr_segs; - if (bio_may_exceed_limits(bio, lim)) - return __bio_split_to_limits(bio, lim, &nr_segs); - return bio; + return __bio_split_to_limits(bio, lim, &nr_segs); } EXPORT_SYMBOL(bio_split_to_limits); @@ -653,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio, * counters. */ req->nr_phys_segments += nr_phys_segs; + if (bio_integrity(bio)) + req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q, + bio); return 1; no_merge: @@ -745,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, /* Merge is OK... */ req->nr_phys_segments = total_phys_segments; + req->nr_integrity_segments += next->nr_integrity_segments; return 1; } diff --git a/block/blk-mq.c b/block/blk-mq.c index e3c3c0c21b5536..4b2c8e940f5913 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; -#if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; -#endif rq->end_io = NULL; rq->end_io_data = NULL; @@ -1128,7 +1126,7 @@ static void blk_complete_reqs(struct llist_head *list) rq->q->mq_ops->complete(rq); } -static __latent_entropy void blk_done_softirq(struct softirq_action *h) +static __latent_entropy void blk_done_softirq(void) { blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); } @@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, rq->__sector = bio->bi_iter.bi_sector; rq->write_hint = bio->bi_write_hint; blk_rq_bio_prep(rq, bio, nr_segs); + if (bio_integrity(bio)) + rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, + bio); /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); @@ -2753,6 +2754,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request *rq; + unsigned int depth; /* * We may have been called recursively midway through handling @@ -2763,6 +2765,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (plug->rq_count == 0) return; + depth = plug->rq_count; plug->rq_count = 0; if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { @@ -2770,6 +2773,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) rq = rq_list_peek(&plug->mq_list); q = rq->q; + trace_block_unplug(q, depth, true); /* * Peek first request and see if we have a ->queue_rqs() hook. @@ -2939,7 +2943,7 @@ void blk_mq_submit_bio(struct bio *bio) struct blk_plug *plug = current->plug; const int is_sync = op_is_sync(bio->bi_opf); struct blk_mq_hw_ctx *hctx; - unsigned int nr_segs = 1; + unsigned int nr_segs; struct request *rq; blk_status_t ret; @@ -2981,11 +2985,10 @@ void blk_mq_submit_bio(struct bio *bio) goto queue_exit; } - if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { - bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); - if (!bio) - goto queue_exit; - } + bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); + if (!bio) + goto queue_exit; + if (!bio_integrity_prep(bio)) goto queue_exit; diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index dd7310c94713c9..2cfb297d9a6276 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -263,7 +263,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data, has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); do { - /* The memory barrier in set_task_state saves us here. */ + /* The memory barrier in set_current_state saves us here. */ if (data.got_token) break; if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) { diff --git a/block/blk-settings.c b/block/blk-settings.c index cd8a8eabc9a5eb..a446654ddee5ef 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim) } EXPORT_SYMBOL_GPL(queue_limits_set); -/** - * blk_limits_io_min - set minimum request size for a device - * @limits: the queue limits - * @min: smallest I/O size in bytes - * - * Description: - * Some devices have an internal block size bigger than the reported - * hardware sector size. This function can be used to signal the - * smallest I/O the device can perform without incurring a performance - * penalty. - */ -void blk_limits_io_min(struct queue_limits *limits, unsigned int min) -{ - limits->io_min = min; - - if (limits->io_min < limits->logical_block_size) - limits->io_min = limits->logical_block_size; - - if (limits->io_min < limits->physical_block_size) - limits->io_min = limits->physical_block_size; -} -EXPORT_SYMBOL(blk_limits_io_min); - -/** - * blk_limits_io_opt - set optimal request size for a device - * @limits: the queue limits - * @opt: smallest I/O size in bytes - * - * Description: - * Storage devices may report an optimal I/O size, which is the - * device's preferred unit for sustained I/O. This is rarely reported - * for disk drives. For RAID arrays it is usually the stripe width or - * the internal track size. A properly aligned multiple of - * optimal_io_size is the preferred request size for workloads where - * sustained throughput is desired. - */ -void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) -{ - limits->io_opt = opt; -} -EXPORT_SYMBOL(blk_limits_io_opt); - static int queue_limit_alignment_offset(const struct queue_limits *lim, sector_t sector) { diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 6943ec720f39ff..2c4192e12efab6 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1584,6 +1584,22 @@ void blk_throtl_cancel_bios(struct gendisk *disk) spin_unlock_irq(&q->queue_lock); } +static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw) +{ + /* throtl is FIFO - if bios are already queued, should queue */ + if (tg->service_queue.nr_queued[rw]) + return false; + + return tg_may_dispatch(tg, bio, NULL); +} + +static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw) +{ + if (!bio_flagged(bio, BIO_BPS_THROTTLED)) + tg->carryover_bytes[rw] -= throtl_bio_data_size(bio); + tg->carryover_ios[rw]--; +} + bool __blk_throtl_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); @@ -1600,34 +1616,35 @@ bool __blk_throtl_bio(struct bio *bio) sq = &tg->service_queue; while (true) { - if (tg->last_low_overflow_time[rw] == 0) - tg->last_low_overflow_time[rw] = jiffies; - /* throtl is FIFO - if bios are already queued, should queue */ - if (sq->nr_queued[rw]) - break; - - /* if above limits, break to queue */ - if (!tg_may_dispatch(tg, bio, NULL)) { - tg->last_low_overflow_time[rw] = jiffies; + if (tg_within_limit(tg, bio, rw)) { + /* within limits, let's charge and dispatch directly */ + throtl_charge_bio(tg, bio); + + /* + * We need to trim slice even when bios are not being + * queued otherwise it might happen that a bio is not + * queued for a long time and slice keeps on extending + * and trim is not called for a long time. Now if limits + * are reduced suddenly we take into account all the IO + * dispatched so far at new low rate and * newly queued + * IO gets a really long dispatch time. + * + * So keep on trimming slice even if bio is not queued. + */ + throtl_trim_slice(tg, rw); + } else if (bio_issue_as_root_blkg(bio)) { + /* + * IOs which may cause priority inversions are + * dispatched directly, even if they're over limit. + * Debts are handled by carryover_bytes/ios while + * calculating wait time. + */ + tg_dispatch_in_debt(tg, bio, rw); + } else { + /* if above limits, break to queue */ break; } - /* within limits, let's charge and dispatch directly */ - throtl_charge_bio(tg, bio); - - /* - * We need to trim slice even when bios are not being queued - * otherwise it might happen that a bio is not queued for - * a long time and slice keeps on extending and trim is not - * called for a long time. Now if limits are reduced suddenly - * we take into account all the IO dispatched so far at new - * low rate and * newly queued IO gets a really long dispatch - * time. - * - * So keep on trimming slice even if bio is not queued. - */ - throtl_trim_slice(tg, rw); - /* * @bio passed through this layer without being throttled. * Climb up the ladder. If we're already at the top, it @@ -1650,8 +1667,6 @@ bool __blk_throtl_bio(struct bio *bio) tg->io_disp[rw], tg_iops_limit(tg, rw), sq->nr_queued[READ], sq->nr_queued[WRITE]); - tg->last_low_overflow_time[rw] = jiffies; - td->nr_queued[rw]++; throtl_add_bio_tg(bio, qn, tg); throttled = true; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index 4d9ef5abdf21cc..1a36d1278eea85 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -106,8 +106,6 @@ struct throtl_grp { /* Number of bio's dispatched in current slice */ unsigned int io_disp[2]; - unsigned long last_low_overflow_time[2]; - uint64_t last_bytes_disp[2]; unsigned int last_io_disp[2]; diff --git a/block/blk.h b/block/blk.h index e180863f918b15..c718e4291db062 100644 --- a/block/blk.h +++ b/block/blk.h @@ -331,33 +331,67 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -static inline bool bio_may_exceed_limits(struct bio *bio, - const struct queue_limits *lim) +struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, + unsigned *nsegs); +struct bio *bio_split_write_zeroes(struct bio *bio, + const struct queue_limits *lim, unsigned *nsegs); +struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, + unsigned *nr_segs); +struct bio *bio_split_zone_append(struct bio *bio, + const struct queue_limits *lim, unsigned *nr_segs); + +/* + * All drivers must accept single-segments bios that are smaller than PAGE_SIZE. + * + * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is + * always valid if a bio has data. The check might lead to occasional false + * positives when bios are cloned, but compared to the performance impact of + * cloned bios themselves the loop below doesn't matter anyway. + */ +static inline bool bio_may_need_split(struct bio *bio, + const struct queue_limits *lim) +{ + return lim->chunk_sectors || bio->bi_vcnt != 1 || + bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; +} + +/** + * __bio_split_to_limits - split a bio to fit the queue limits + * @bio: bio to be split + * @lim: queue limits to split based on + * @nr_segs: returns the number of segments in the returned bio + * + * Check if @bio needs splitting based on the queue limits, and if so split off + * a bio fitting the limits from the beginning of @bio and return it. @bio is + * shortened to the remainder and re-submitted. + * + * The split bio is allocated from @q->bio_split, which is provided by the + * block layer. + */ +static inline struct bio *__bio_split_to_limits(struct bio *bio, + const struct queue_limits *lim, unsigned int *nr_segs) { switch (bio_op(bio)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + if (bio_may_need_split(bio, lim)) + return bio_split_rw(bio, lim, nr_segs); + *nr_segs = 1; + return bio; + case REQ_OP_ZONE_APPEND: + return bio_split_zone_append(bio, lim, nr_segs); case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: + return bio_split_discard(bio, lim, nr_segs); case REQ_OP_WRITE_ZEROES: - return true; /* non-trivial splitting decisions */ + return bio_split_write_zeroes(bio, lim, nr_segs); default: - break; + /* other operations can't be split */ + *nr_segs = 0; + return bio; } - - /* - * All drivers must accept single-segments bios that are <= PAGE_SIZE. - * This is a quick and dirty check that relies on the fact that - * bi_io_vec[0] is always valid if a bio has data. The check might - * lead to occasional false negatives when bios are cloned, but compared - * to the performance impact of cloned bios themselves the loop below - * doesn't matter anyway. - */ - return lim->chunk_sectors || bio->bi_vcnt != 1 || - bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; } -struct bio *__bio_split_to_limits(struct bio *bio, - const struct queue_limits *lim, - unsigned int *nr_segs); int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, @@ -540,6 +574,10 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned int max_sectors, bool *same_page); +int bio_add_hw_folio(struct request_queue *q, struct bio *bio, + struct folio *folio, size_t len, size_t offset, + unsigned int max_sectors, bool *same_page); + /* * Clean up a page appropriately, where the page may be pinned, may have a * ref taken on it or neither. @@ -571,6 +609,7 @@ blk_mode_t file_to_blk_mode(struct file *file); int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, loff_t lstart, loff_t lend); long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); +int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); extern const struct address_space_operations def_blk_aops; diff --git a/block/elevator.c b/block/elevator.c index c355b55d010786..4122026b11f1a1 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf, strscpy(elevator_name, buf, sizeof(elevator_name)); - return request_module("%s-iosched", strstrip(elevator_name)); + request_module("%s-iosched", strstrip(elevator_name)); + + return 0; } ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, diff --git a/block/fops.c b/block/fops.c index 9825c1713a49a9..e696ae53bf1e08 100644 --- a/block/fops.c +++ b/block/fops.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "blk.h" static inline struct inode *bdev_file_inode(struct file *file) @@ -451,20 +452,20 @@ static void blkdev_readahead(struct readahead_control *rac) } static int blkdev_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, struct page **pagep, void **fsdata) + loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { - return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); + return block_write_begin(mapping, pos, len, foliop, blkdev_get_block); } static int blkdev_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, struct page *page, + loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { int ret; - ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); + ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return ret; } @@ -665,7 +666,7 @@ blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) { - return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops); + return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL); } /* @@ -771,7 +772,7 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) #define BLKDEV_FALLOC_FL_SUPPORTED \ (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ - FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) + FALLOC_FL_ZERO_RANGE) static long blkdev_fallocate(struct file *file, int mode, loff_t start, loff_t len) @@ -830,14 +831,6 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, len >> SECTOR_SHIFT, GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK); break; - case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: - error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); - if (error) - goto fail; - - error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, - len >> SECTOR_SHIFT, GFP_KERNEL); - break; default: error = -EOPNOTSUPP; } @@ -873,6 +866,7 @@ const struct file_operations def_blk_fops = { .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .fallocate = blkdev_fallocate, + .uring_cmd = blkdev_uring_cmd, .fop_flags = FOP_BUFFER_RASYNC, }; diff --git a/block/ioctl.c b/block/ioctl.c index e8e4a4190f183a..6554b728bae6aa 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include "blk.h" static int blkpg_do_ioctl(struct block_device *bdev, @@ -92,38 +95,51 @@ static int compat_blkpg_ioctl(struct block_device *bdev, } #endif +/* + * Check that [start, start + len) is a valid range from the block device's + * perspective, including verifying that it can be correctly translated into + * logical block addresses. + */ +static int blk_validate_byte_range(struct block_device *bdev, + uint64_t start, uint64_t len) +{ + unsigned int bs_mask = bdev_logical_block_size(bdev) - 1; + uint64_t end; + + if ((start | len) & bs_mask) + return -EINVAL; + if (!len) + return -EINVAL; + if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev)) + return -EINVAL; + + return 0; +} + static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, unsigned long arg) { - unsigned int bs_mask = bdev_logical_block_size(bdev) - 1; - uint64_t range[2], start, len, end; + uint64_t range[2], start, len; struct bio *prev = NULL, *bio; sector_t sector, nr_sects; struct blk_plug plug; int err; - if (!(mode & BLK_OPEN_WRITE)) - return -EBADF; - - if (!bdev_max_discard_sectors(bdev)) - return -EOPNOTSUPP; - if (bdev_read_only(bdev)) - return -EPERM; - if (copy_from_user(range, (void __user *)arg, sizeof(range))) return -EFAULT; - start = range[0]; len = range[1]; - if (!len) - return -EINVAL; - if ((start | len) & bs_mask) - return -EINVAL; + if (!bdev_max_discard_sectors(bdev)) + return -EOPNOTSUPP; - if (check_add_overflow(start, len, &end) || - end > bdev_nr_bytes(bdev)) - return -EINVAL; + if (!(mode & BLK_OPEN_WRITE)) + return -EBADF; + if (bdev_read_only(bdev)) + return -EPERM; + err = blk_validate_byte_range(bdev, start, len); + if (err) + return err; filemap_invalidate_lock(bdev->bd_mapping); err = truncate_bdev_range(bdev, mode, start, start + len - 1); @@ -163,7 +179,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, void __user *argp) { - uint64_t start, len; + uint64_t start, len, end; uint64_t range[2]; int err; @@ -178,11 +194,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, len = range[1]; if ((start & 511) || (len & 511)) return -EINVAL; - if (start + len > bdev_nr_bytes(bdev)) + if (check_add_overflow(start, len, &end) || + end > bdev_nr_bytes(bdev)) return -EINVAL; filemap_invalidate_lock(bdev->bd_mapping); - err = truncate_bdev_range(bdev, mode, start, start + len - 1); + err = truncate_bdev_range(bdev, mode, start, end - 1); if (!err) err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, GFP_KERNEL); @@ -734,3 +751,112 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) return ret; } #endif + +struct blk_iou_cmd { + int res; + bool nowait; +}; + +static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); + + if (bic->res == -EAGAIN && bic->nowait) + io_uring_cmd_issue_blocking(cmd); + else + io_uring_cmd_done(cmd, bic->res, 0, issue_flags); +} + +static void bio_cmd_bio_end_io(struct bio *bio) +{ + struct io_uring_cmd *cmd = bio->bi_private; + struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); + + if (unlikely(bio->bi_status) && !bic->res) + bic->res = blk_status_to_errno(bio->bi_status); + + io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete); + bio_put(bio); +} + +static int blkdev_cmd_discard(struct io_uring_cmd *cmd, + struct block_device *bdev, + uint64_t start, uint64_t len, bool nowait) +{ + struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); + gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL; + sector_t sector = start >> SECTOR_SHIFT; + sector_t nr_sects = len >> SECTOR_SHIFT; + struct bio *prev = NULL, *bio; + int err; + + if (!bdev_max_discard_sectors(bdev)) + return -EOPNOTSUPP; + if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE)) + return -EBADF; + if (bdev_read_only(bdev)) + return -EPERM; + err = blk_validate_byte_range(bdev, start, len); + if (err) + return err; + + err = filemap_invalidate_pages(bdev->bd_mapping, start, + start + len - 1, nowait); + if (err) + return err; + + while (true) { + bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, gfp); + if (!bio) + break; + if (nowait) { + /* + * Don't allow multi-bio non-blocking submissions as + * subsequent bios may fail but we won't get a direct + * indication of that. Normally, the caller should + * retry from a blocking context. + */ + if (unlikely(nr_sects)) { + bio_put(bio); + return -EAGAIN; + } + bio->bi_opf |= REQ_NOWAIT; + } + + prev = bio_chain_and_submit(prev, bio); + } + if (unlikely(!prev)) + return -EAGAIN; + if (unlikely(nr_sects)) + bic->res = -EAGAIN; + + prev->bi_private = cmd; + prev->bi_end_io = bio_cmd_bio_end_io; + submit_bio(prev); + return -EIOCBQUEUED; +} + +int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host); + struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); + const struct io_uring_sqe *sqe = cmd->sqe; + u32 cmd_op = cmd->cmd_op; + uint64_t start, len; + + if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len || + sqe->rw_flags || sqe->file_index)) + return -EINVAL; + + bic->res = 0; + bic->nowait = issue_flags & IO_URING_F_NONBLOCK; + + start = READ_ONCE(sqe->addr); + len = READ_ONCE(sqe->addr3); + + switch (cmd_op) { + case BLOCK_URING_CMD_DISCARD: + return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait); + } + return -EINVAL; +} diff --git a/block/partitions/core.c b/block/partitions/core.c index ab76e64f0f6c3b..5bd7a603092ea9 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -555,9 +555,11 @@ static bool blk_add_partition(struct gendisk *disk, part = add_partition(disk, p, from, size, state->parts[p].flags, &state->parts[p].info); - if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) { - printk(KERN_ERR " %s: p%d could not be added: %pe\n", - disk->disk_name, p, part); + if (IS_ERR(part)) { + if (PTR_ERR(part) != -ENXIO) { + printk(KERN_ERR " %s: p%d could not be added: %pe\n", + disk->disk_name, p, part); + } return true; } diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h index 0a747a0c782d5d..e259180c89148b 100644 --- a/block/partitions/ldm.h +++ b/block/partitions/ldm.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include struct parsed_partitions; diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index b5d5c229cc3b94..073be78ba0b0d7 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -36,7 +36,7 @@ * the nr_sects and start_sect partition table entries are * at a 2 (mod 4) address. */ -#include +#include static inline sector_t nr_sects(struct msdos_partition *p) { diff --git a/block/t10-pi.c b/block/t10-pi.c index 425e2836f3e1d8..2d05421f0fa566 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -8,9 +8,8 @@ #include #include #include -#include #include -#include +#include #include "blk.h" struct blk_integrity_iter { @@ -240,9 +239,9 @@ static void ext_pi_crc64_generate(struct blk_integrity_iter *iter, } } -static bool ext_pi_ref_escape(u8 *ref_tag) +static bool ext_pi_ref_escape(const u8 ref_tag[6]) { - static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0; } @@ -472,6 +471,3 @@ void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) else t10_pi_type1_complete(rq, nr_bytes); } - -MODULE_DESCRIPTION("T10 Protection Information module"); -MODULE_LICENSE("GPL"); diff --git a/certs/Makefile b/certs/Makefile index 1094e3860c2a74..f6fa4d8d75e05f 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -84,5 +84,5 @@ targets += x509_revocation_list hostprogs := extract-cert -HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) +HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) diff --git a/certs/extract-cert.c b/certs/extract-cert.c index 70e9ec89d87d36..7d6d468ed6129d 100644 --- a/certs/extract-cert.c +++ b/certs/extract-cert.c @@ -21,14 +21,17 @@ #include #include #include -#include - -/* - * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. - * - * Remove this if/when that API is no longer used - */ -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#if OPENSSL_VERSION_MAJOR >= 3 +# define USE_PKCS11_PROVIDER +# include +# include +#else +# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0) +# define USE_PKCS11_ENGINE +# include +# endif +#endif +#include "ssl-common.h" #define PKEY_ID_PKCS7 2 @@ -40,41 +43,6 @@ void format(void) exit(2); } -static void display_openssl_errors(int l) -{ - const char *file; - char buf[120]; - int e, line; - - if (ERR_peek_error() == 0) - return; - fprintf(stderr, "At main.c:%d:\n", l); - - while ((e = ERR_get_error_line(&file, &line))) { - ERR_error_string(e, buf); - fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); - } -} - -static void drain_openssl_errors(void) -{ - const char *file; - int line; - - if (ERR_peek_error() == 0) - return; - while (ERR_get_error_line(&file, &line)) {} -} - -#define ERR(cond, fmt, ...) \ - do { \ - bool __cond = (cond); \ - display_openssl_errors(__LINE__); \ - if (__cond) { \ - err(1, fmt, ## __VA_ARGS__); \ - } \ - } while(0) - static const char *key_pass; static BIO *wb; static char *cert_dst; @@ -94,6 +62,66 @@ static void write_cert(X509 *x509) fprintf(stderr, "Extracted cert: %s\n", buf); } +static X509 *load_cert_pkcs11(const char *cert_src) +{ + X509 *cert = NULL; +#ifdef USE_PKCS11_PROVIDER + OSSL_STORE_CTX *store; + + if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true)) + ERR(1, "OSSL_PROVIDER_try_load(pkcs11)"); + if (!OSSL_PROVIDER_try_load(NULL, "default", true)) + ERR(1, "OSSL_PROVIDER_try_load(default)"); + + store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL); + ERR(!store, "OSSL_STORE_open"); + + while (!OSSL_STORE_eof(store)) { + OSSL_STORE_INFO *info = OSSL_STORE_load(store); + + if (!info) { + drain_openssl_errors(__LINE__, 0); + continue; + } + if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) { + cert = OSSL_STORE_INFO_get1_CERT(info); + ERR(!cert, "OSSL_STORE_INFO_get1_CERT"); + } + OSSL_STORE_INFO_free(info); + if (cert) + break; + } + OSSL_STORE_close(store); +#elif defined(USE_PKCS11_ENGINE) + ENGINE *e; + struct { + const char *cert_id; + X509 *cert; + } parms; + + parms.cert_id = cert_src; + parms.cert = NULL; + + ENGINE_load_builtin_engines(); + drain_openssl_errors(__LINE__, 1); + e = ENGINE_by_id("pkcs11"); + ERR(!e, "Load PKCS#11 ENGINE"); + if (ENGINE_init(e)) + drain_openssl_errors(__LINE__, 1); + else + ERR(1, "ENGINE_init"); + if (key_pass) + ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); + ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); + ERR(!parms.cert, "Get X.509 from PKCS#11"); + cert = parms.cert; +#else + fprintf(stderr, "no pkcs11 engine/provider available\n"); + exit(1); +#endif + return cert; +} + int main(int argc, char **argv) { char *cert_src; @@ -122,28 +150,10 @@ int main(int argc, char **argv) fclose(f); exit(0); } else if (!strncmp(cert_src, "pkcs11:", 7)) { - ENGINE *e; - struct { - const char *cert_id; - X509 *cert; - } parms; - - parms.cert_id = cert_src; - parms.cert = NULL; + X509 *cert = load_cert_pkcs11(cert_src); - ENGINE_load_builtin_engines(); - drain_openssl_errors(); - e = ENGINE_by_id("pkcs11"); - ERR(!e, "Load PKCS#11 ENGINE"); - if (ENGINE_init(e)) - drain_openssl_errors(); - else - ERR(1, "ENGINE_init"); - if (key_pass) - ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); - ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); - ERR(!parms.cert, "Get X.509 from PKCS#11"); - write_cert(parms.cert); + ERR(!cert, "load_cert_pkcs11 failed"); + write_cert(cert); } else { BIO *b; X509 *x509; diff --git a/crypto/Kconfig b/crypto/Kconfig index 72e2decb8c6aa3..a779cab668c24c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1305,7 +1305,7 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE config CRYPTO_JITTERENTROPY_OSR int "CPU Jitter RNG Oversampling Rate" range 1 15 - default 1 + default 3 help The Jitter RNG allows the specification of an oversampling rate (OSR). The Jitter RNG operation requires a fixed amount of timing diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index c4f1bfa1d04fa9..4fdb53435827e5 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -323,8 +323,9 @@ static __always_inline int crypto_aegis128_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, void (*crypt)(struct aegis_state *state, - u8 *dst, const u8 *src, - unsigned int size)) + u8 *dst, + const u8 *src, + unsigned int size)) { int err = 0; diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 666474b81c6aa5..3c66d425c97b5d 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -54,7 +54,7 @@ #include #include #include -#include +#include static inline u8 byte(const u32 x, const unsigned n) { diff --git a/crypto/algapi.c b/crypto/algapi.c index 122cd910c4e1c1..74e2261c184ca9 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -235,7 +235,6 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, EXPORT_SYMBOL_GPL(crypto_remove_spawns); static void crypto_alg_finish_registration(struct crypto_alg *alg, - bool fulfill_requests, struct list_head *algs_to_put) { struct crypto_alg *q; @@ -247,30 +246,8 @@ static void crypto_alg_finish_registration(struct crypto_alg *alg, if (crypto_is_moribund(q)) continue; - if (crypto_is_larval(q)) { - struct crypto_larval *larval = (void *)q; - - /* - * Check to see if either our generic name or - * specific name can satisfy the name requested - * by the larval entry q. - */ - if (strcmp(alg->cra_name, q->cra_name) && - strcmp(alg->cra_driver_name, q->cra_name)) - continue; - - if (larval->adult) - continue; - if ((q->cra_flags ^ alg->cra_flags) & larval->mask) - continue; - - if (fulfill_requests && crypto_mod_get(alg)) - larval->adult = alg; - else - larval->adult = ERR_PTR(-EAGAIN); - + if (crypto_is_larval(q)) continue; - } if (strcmp(alg->cra_name, q->cra_name)) continue; @@ -359,7 +336,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put) list_add(&larval->alg.cra_list, &crypto_alg_list); } else { alg->cra_flags |= CRYPTO_ALG_TESTED; - crypto_alg_finish_registration(alg, true, algs_to_put); + crypto_alg_finish_registration(alg, algs_to_put); } out: @@ -376,7 +353,6 @@ void crypto_alg_tested(const char *name, int err) struct crypto_alg *alg; struct crypto_alg *q; LIST_HEAD(list); - bool best; down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { @@ -390,7 +366,8 @@ void crypto_alg_tested(const char *name, int err) } pr_err("alg: Unexpected test result for %s: %d\n", name, err); - goto unlock; + up_write(&crypto_alg_sem); + return; found: q->cra_flags |= CRYPTO_ALG_DEAD; @@ -408,32 +385,15 @@ void crypto_alg_tested(const char *name, int err) alg->cra_flags |= CRYPTO_ALG_TESTED; - /* - * If a higher-priority implementation of the same algorithm is - * currently being tested, then don't fulfill request larvals. - */ - best = true; - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (crypto_is_moribund(q) || !crypto_is_larval(q)) - continue; - - if (strcmp(alg->cra_name, q->cra_name)) - continue; - - if (q->cra_priority > alg->cra_priority) { - best = false; - break; - } - } - - crypto_alg_finish_registration(alg, best, &list); + crypto_alg_finish_registration(alg, &list); complete: + list_del_init(&test->alg.cra_list); complete_all(&test->completion); -unlock: up_write(&crypto_alg_sem); + crypto_alg_put(&test->alg); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_alg_tested); @@ -454,7 +414,6 @@ int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; LIST_HEAD(algs_to_put); - bool test_started = false; int err; alg->cra_flags &= ~CRYPTO_ALG_DEAD; @@ -465,15 +424,16 @@ int crypto_register_alg(struct crypto_alg *alg) down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg, &algs_to_put); if (!IS_ERR_OR_NULL(larval)) { - test_started = crypto_boot_test_finished(); + bool test_started = crypto_boot_test_finished(); + larval->test_started = test_started; + if (test_started) + crypto_schedule_test(larval); } up_write(&crypto_alg_sem); if (IS_ERR(larval)) return PTR_ERR(larval); - if (test_started) - crypto_wait_for_test(larval); crypto_remove_final(&algs_to_put); return 0; } @@ -688,8 +648,10 @@ int crypto_register_instance(struct crypto_template *tmpl, larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; - else if (larval) + else if (larval) { larval->test_started = true; + crypto_schedule_test(larval); + } hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -699,8 +661,6 @@ int crypto_register_instance(struct crypto_template *tmpl, if (IS_ERR(larval)) return PTR_ERR(larval); - if (larval) - crypto_wait_for_test(larval); crypto_remove_final(&algs_to_put); return 0; } @@ -1084,6 +1044,7 @@ static void __init crypto_start_tests(void) l->test_started = true; larval = l; + crypto_schedule_test(larval); break; } @@ -1091,8 +1052,6 @@ static void __init crypto_start_tests(void) if (!larval) break; - - crypto_wait_for_test(larval); } set_crypto_boot_test_finished(); diff --git a/crypto/algboss.c b/crypto/algboss.c index 1aa5f306998a27..a20926bfd34e0c 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data) { struct cryptomgr_param *param = data; struct crypto_template *tmpl; - int err; + int err = -ENOENT; tmpl = crypto_lookup_template(param->template); if (!tmpl) @@ -64,6 +64,8 @@ static int cryptomgr_probe(void *data) crypto_tmpl_put(tmpl); out: + param->larval->adult = ERR_PTR(err); + param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD; complete_all(¶m->larval->completion); crypto_alg_put(¶m->larval->alg); kfree(param); diff --git a/crypto/api.c b/crypto/api.c index 22556907b3bcd3..bfd177a4313a01 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -37,6 +37,8 @@ DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, + u32 mask); struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { @@ -68,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, if ((q->cra_flags ^ type) & mask) continue; - if (crypto_is_larval(q) && - !crypto_is_test_larval((struct crypto_larval *)q) && - ((struct crypto_larval *)q)->mask != mask) - continue; - exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) @@ -111,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) if (!larval) return ERR_PTR(-ENOMEM); + type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK); + larval->mask = mask; larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; larval->alg.cra_priority = -1; @@ -152,32 +151,31 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, return alg; } -void crypto_larval_kill(struct crypto_alg *alg) +static void crypto_larval_kill(struct crypto_larval *larval) { - struct crypto_larval *larval = (void *)alg; + bool unlinked; down_write(&crypto_alg_sem); - list_del(&alg->cra_list); + unlinked = list_empty(&larval->alg.cra_list); + if (!unlinked) + list_del_init(&larval->alg.cra_list); up_write(&crypto_alg_sem); + + if (unlinked) + return; + complete_all(&larval->completion); - crypto_alg_put(alg); + crypto_alg_put(&larval->alg); } -EXPORT_SYMBOL_GPL(crypto_larval_kill); -void crypto_wait_for_test(struct crypto_larval *larval) +void crypto_schedule_test(struct crypto_larval *larval) { int err; err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult); - if (WARN_ON_ONCE(err != NOTIFY_STOP)) - goto out; - - err = wait_for_completion_killable(&larval->completion); - WARN_ON(err); -out: - crypto_larval_kill(&larval->alg); + WARN_ON_ONCE(err != NOTIFY_STOP); } -EXPORT_SYMBOL_GPL(crypto_wait_for_test); +EXPORT_SYMBOL_GPL(crypto_schedule_test); static void crypto_start_test(struct crypto_larval *larval) { @@ -196,14 +194,17 @@ static void crypto_start_test(struct crypto_larval *larval) larval->test_started = true; up_write(&crypto_alg_sem); - crypto_wait_for_test(larval); + crypto_schedule_test(larval); } static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { - struct crypto_larval *larval = (void *)alg; + struct crypto_larval *larval; long time_left; +again: + larval = container_of(alg, struct crypto_larval, alg); + if (!crypto_boot_test_finished()) crypto_start_test(larval); @@ -213,11 +214,20 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = larval->adult; if (time_left < 0) alg = ERR_PTR(-EINTR); - else if (!time_left) + else if (!time_left) { + if (crypto_is_test_larval(larval)) + crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - else if (!alg) - alg = ERR_PTR(-ENOENT); - else if (IS_ERR(alg)) + } else if (!alg) { + u32 type; + u32 mask; + + alg = &larval->alg; + type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); + mask = larval->mask; + alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: + ERR_PTR(-EAGAIN); + } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) @@ -228,6 +238,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); + if (!IS_ERR(alg) && crypto_is_larval(alg)) + goto again; + return alg; } @@ -292,8 +305,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) alg = crypto_larval_wait(alg); - else if (!alg) + else if (alg) + ; + else if (!(mask & CRYPTO_ALG_TESTED)) alg = crypto_larval_add(name, type, mask); + else + alg = ERR_PTR(-ENOENT); return alg; } @@ -340,7 +357,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); } - crypto_larval_kill(larval); + crypto_larval_kill(container_of(larval, struct crypto_larval, alg)); return alg; } EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index a5da8ccd353ef7..43af5fa510c09f 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len; - WARN_ON(!id_0 && !id_1 && !id_2); - if (id_0) { lookup = id_0->data; len = id_0->len; } else if (id_1) { lookup = id_1->data; len = id_1->len; - } else { + } else if (id_2) { lookup = id_2->data; len = id_2->len; + } else { + WARN_ON(1); + return ERR_PTR(-EINVAL); } /* Construct an identifier "id:". */ diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 32e380b714b6cc..04a712ddfb4309 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -15,7 +15,7 @@ * More information about BLAKE2 can be found at https://blake2.net. */ -#include +#include #include #include #include diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 0e74c7242e77e4..0146bc762c09a4 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index c04670cf51acfb..197fcf3abc8921 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include static const u32 camellia_sp1110[256] = { 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 085a1eedae03bb..f3e57775fa02ea 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -13,7 +13,7 @@ */ -#include +#include #include #include #include diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 34f1ab53e3a713..11b725b12f2741 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -10,7 +10,7 @@ */ -#include +#include #include #include #include diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 9e4651330852b5..d740849f1c1915 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -27,7 +27,7 @@ struct chachapoly_ctx { struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; - u8 salt[]; + u8 salt[] __counted_by(saltlen); }; struct poly_req { diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c index 8beea79ab11782..ba7fcb47f9aa02 100644 --- a/crypto/chacha_generic.c +++ b/crypto/chacha_generic.c @@ -6,7 +6,7 @@ * Copyright (C) 2018 Google LLC */ -#include +#include #include #include #include diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index a989cb44fd1608..d1251663ed6683 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -7,7 +7,7 @@ * This is crypto api shash wrappers to crc32_le. */ -#include +#include #include #include #include diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 7686147385412a..a8c90b3f4c6c55 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -30,7 +30,7 @@ * Copyright (c) 2008 Herbert Xu */ -#include +#include #include #include #include diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c index 9e812bb26dba64..ce0f3059b9125a 100644 --- a/crypto/crc64_rocksoft_generic.c +++ b/crypto/crc64_rocksoft_generic.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include static int chksum_init(struct shash_desc *desc) { diff --git a/crypto/dh.c b/crypto/dh.c index 68d11d66c0b54d..afc0fd84776138 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -145,9 +145,9 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) * ->p is odd, so no need to explicitly subtract one * from it before shifting to the right. */ - mpi_rshift(q, ctx->p, 1); + ret = mpi_rshift(q, ctx->p, 1) ?: + mpi_powm(val, y, q, ctx->p); - ret = mpi_powm(val, y, q, ctx->p); mpi_free(q); if (ret) { mpi_free(val); diff --git a/crypto/ecc.c b/crypto/ecc.c index 420decdad7d94f..50ad2d4ed672c5 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include "ecc_curve_defs.h" diff --git a/crypto/internal.h b/crypto/internal.h index aee31319be2e6f..711a6a5bfa2b56 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -113,8 +113,7 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); -void crypto_larval_kill(struct crypto_alg *alg); -void crypto_wait_for_test(struct crypto_larval *larval); +void crypto_schedule_test(struct crypto_larval *larval); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index d7056de8c0d7b2..3b390bd6c1196b 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -146,6 +146,7 @@ struct rand_data { #define JENT_ENTROPY_SAFETY_FACTOR 64 #include +#include #include "jitterentropy.h" /*************************************************************************** @@ -638,10 +639,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -2; } - if ((DATA_SIZE_BITS / 8) < len) - tocopy = (DATA_SIZE_BITS / 8); - else - tocopy = len; + tocopy = min(DATA_SIZE_BITS / 8, len); if (jent_read_random_block(ec->hash_state, p, tocopy)) return -1; diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index f4c31049601c9d..0d14e980d4d613 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -7,7 +7,7 @@ * Copyright (c) 2004 Jouni Malinen */ #include -#include +#include #include #include #include diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c index 8a3006c3b51b9c..a661d4f667cde5 100644 --- a/crypto/nhpoly1305.c +++ b/crypto/nhpoly1305.c @@ -30,7 +30,7 @@ * (https://cr.yp.to/mac/poly1305-20050329.pdf) */ -#include +#include #include #include #include diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 94af47eb6fa699..e6f29a98725a83 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include static int crypto_poly1305_init(struct shash_desc *desc) { diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c index 16bfa6925b31eb..4f98910bcdb589 100644 --- a/crypto/polyval-generic.c +++ b/crypto/polyval-generic.c @@ -44,7 +44,7 @@ * */ -#include +#include #include #include #include diff --git a/crypto/rsa.c b/crypto/rsa.c index d9be9e86097e1c..78b28d14ced397 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -98,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) goto err_free_mpi; /* (2iii) h = (m_1 - m_2) * qInv mod p */ - mpi_sub(m12_or_qh, m_or_m1_or_h, m2); - mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); + ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?: + mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); /* (2iv) m = m_2 + q * h */ - mpi_mul(m12_or_qh, key->q, m_or_m1_or_h); - mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); - - ret = 0; + ret = ret ?: + mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?: + mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); err_free_mpi: mpi_free(m12_or_qh); @@ -236,6 +235,7 @@ static int rsa_check_key_length(unsigned int len) static int rsa_check_exponent_fips(MPI e) { MPI e_max = NULL; + int err; /* check if odd */ if (!mpi_test_bit(e, 0)) { @@ -250,7 +250,12 @@ static int rsa_check_exponent_fips(MPI e) e_max = mpi_alloc(0); if (!e_max) return -ENOMEM; - mpi_set_bit(e_max, 256); + + err = mpi_set_bit(e_max, 256); + if (err) { + mpi_free(e_max); + return err; + } if (mpi_cmp(e, e_max) >= 0) { mpi_free(e_max); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index c6bca47931e21e..f6ef187be6fe7a 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index bf147b01e31354..b00521f1a6d455 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 3e4069935b53ba..b103642b56ea12 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include /* * On some 32-bit architectures (h8300), GCC ends up using diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index be70e76d6d8623..ed81813bd42013 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, diff --git a/crypto/simd.c b/crypto/simd.c index 2aa4f72e224fd9..b07721d1f3f6ea 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) return 0; } -struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, + const char *algname, const char *drvname, const char *basename) { struct simd_skcipher_alg *salg; - struct crypto_skcipher *tfm; - struct skcipher_alg *ialg; struct skcipher_alg *alg; int err; - tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_skcipher_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_skcipher(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; + goto out; } EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); -struct simd_skcipher_alg *simd_skcipher_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_skcipher_create_compat(algname, drvname, basename); -} -EXPORT_SYMBOL_GPL(simd_skcipher_create); - void simd_skcipher_free(struct simd_skcipher_alg *salg) { crypto_unregister_skcipher(&salg->alg); @@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; @@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm) return 0; } -struct simd_aead_alg *simd_aead_create_compat(const char *algname, - const char *drvname, - const char *basename) +static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, + const char *algname, + const char *drvname, + const char *basename) { struct simd_aead_alg *salg; - struct crypto_aead *tfm; - struct aead_alg *ialg; struct aead_alg *alg; int err; - tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - - ialg = crypto_aead_alg(tfm); - salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); - goto out_put_tfm; + goto out; } salg->ialg_name = basename; @@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, if (err) goto out_free_salg; -out_put_tfm: - crypto_free_aead(tfm); +out: return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); - goto out_put_tfm; -} -EXPORT_SYMBOL_GPL(simd_aead_create_compat); - -struct simd_aead_alg *simd_aead_create(const char *algname, - const char *basename) -{ - char drvname[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= - CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-ENAMETOOLONG); - - return simd_aead_create_compat(algname, drvname, basename); + goto out; } -EXPORT_SYMBOL_GPL(simd_aead_create); -void simd_aead_free(struct simd_aead_alg *salg) +static void simd_aead_free(struct simd_aead_alg *salg) { crypto_unregister_aead(&salg->alg); kfree(salg); } -EXPORT_SYMBOL_GPL(simd_aead_free); int simd_register_aeads_compat(struct aead_alg *algs, int count, struct simd_aead_alg **simd_algs) @@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, algname = algs[i].base.cra_name + 2; drvname = algs[i].base.cra_driver_name + 2; basename = algs[i].base.cra_driver_name; - simd = simd_aead_create_compat(algname, drvname, basename); + simd = simd_aead_create_compat(algs + i, algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto err_unregister; diff --git a/crypto/sm3.c b/crypto/sm3.c index d473e358a873a8..18c2fb73ba16e9 100644 --- a/crypto/sm3.c +++ b/crypto/sm3.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include static const u32 ____cacheline_aligned K[64] = { diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index a215c1c37e730a..a2d23a46924eab 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, diff --git a/crypto/sm4.c b/crypto/sm4.c index 2c44193bc27e4a..f4cd7edc11f054 100644 --- a/crypto/sm4.c +++ b/crypto/sm4.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include static const u32 ____cacheline_aligned fk[4] = { diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 560eba37dc55ea..7df86369ac0091 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include /** * sm4_setkey - Set the SM4 key. diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f02cb075bd6811..ee8da628e9da46 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1939,6 +1939,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs, atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { + if (PTR_ERR(atfm) == -ENOENT) + return -ENOENT; pr_err("alg: hash: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(atfm)); return PTR_ERR(atfm); @@ -2703,6 +2705,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: aead: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3280,6 +3284,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3693,6 +3699,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc, tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3717,6 +3725,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { acomp = crypto_alloc_acomp(driver, type, mask); if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) + return -ENOENT; pr_err("alg: acomp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(acomp)); return PTR_ERR(acomp); @@ -3729,6 +3739,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, } else { comp = crypto_alloc_comp(driver, type, mask); if (IS_ERR(comp)) { + if (PTR_ERR(comp) == -ENOENT) + return -ENOENT; pr_err("alg: comp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(comp)); return PTR_ERR(comp); @@ -3805,6 +3817,8 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, rng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(rng)) { + if (PTR_ERR(rng) == -ENOENT) + return -ENOENT; printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); return PTR_ERR(rng); @@ -3832,10 +3846,13 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { + if (PTR_ERR(drng) == -ENOENT) + goto out_no_rng; printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); +out_no_rng: kfree_sensitive(buf); - return -ENOMEM; + return PTR_ERR(drng); } test_data.testentropy = &testentropy; @@ -4077,6 +4094,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4305,6 +4324,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) + return -ENOENT; pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 557915e4062d7c..19f2b365e140b2 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -24,7 +24,7 @@ * Third Edition. */ -#include +#include #include #include #include diff --git a/crypto/vmac.c b/crypto/vmac.c index 0a1d8efa6c1a6f..bd9d70eac22e00 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -28,7 +28,7 @@ * Last modified: 17 APR 08, 1700 PDT */ -#include +#include #include #include #include diff --git a/crypto/xor.c b/crypto/xor.c index a1363162978c77..f39621a57bb33c 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0; tmpl->next = template_list; template_list = tmpl; preempt_disable(); - min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start); preempt_enable(); // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed; pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c index 55d1c8a761273e..ac206ad4184df1 100644 --- a/crypto/xxhash_generic.c +++ b/crypto/xxhash_generic.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 diff --git a/drivers/Makefile b/drivers/Makefile index fe9ceb0d2288ad..45d1c3e630f754 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -17,6 +17,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/ obj-$(CONFIG_GPIOLIB) += gpio/ obj-y += pwm/ +# LEDs must come before PCI, it is needed by NPEM driver +obj-y += leds/ + obj-y += pci/ obj-$(CONFIG_PARISC) += parisc/ @@ -130,7 +133,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-y += mmc/ obj-y += ufs/ obj-$(CONFIG_MEMSTICK) += memstick/ -obj-y += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-y += firmware/ obj-$(CONFIG_CRYPTO) += crypto/ diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c index 16c3edb8c46ee1..aa826033b0ceb9 100644 --- a/drivers/accel/drm_accel.c +++ b/drivers/accel/drm_accel.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include @@ -18,8 +18,7 @@ #include #include -static DEFINE_SPINLOCK(accel_minor_lock); -static struct idr accel_minors_idr; +DEFINE_XARRAY_ALLOC(accel_minors_xa); static struct dentry *accel_debugfs_root; @@ -117,99 +116,6 @@ void accel_set_device_instance_params(struct device *kdev, int index) kdev->type = &accel_sysfs_device_minor; } -/** - * accel_minor_alloc() - Allocates a new accel minor - * - * This function access the accel minors idr and allocates from it - * a new id to represent a new accel minor - * - * Return: A new id on success or error code in case idr_alloc failed - */ -int accel_minor_alloc(void) -{ - unsigned long flags; - int r; - - spin_lock_irqsave(&accel_minor_lock, flags); - r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT); - spin_unlock_irqrestore(&accel_minor_lock, flags); - - return r; -} - -/** - * accel_minor_remove() - Remove an accel minor - * @index: The minor id to remove. - * - * This function access the accel minors idr and removes from - * it the member with the id that is passed to this function. - */ -void accel_minor_remove(int index) -{ - unsigned long flags; - - spin_lock_irqsave(&accel_minor_lock, flags); - idr_remove(&accel_minors_idr, index); - spin_unlock_irqrestore(&accel_minor_lock, flags); -} - -/** - * accel_minor_replace() - Replace minor pointer in accel minors idr. - * @minor: Pointer to the new minor. - * @index: The minor id to replace. - * - * This function access the accel minors idr structure and replaces the pointer - * that is associated with an existing id. Because the minor pointer can be - * NULL, we need to explicitly pass the index. - * - * Return: 0 for success, negative value for error - */ -void accel_minor_replace(struct drm_minor *minor, int index) -{ - unsigned long flags; - - spin_lock_irqsave(&accel_minor_lock, flags); - idr_replace(&accel_minors_idr, minor, index); - spin_unlock_irqrestore(&accel_minor_lock, flags); -} - -/* - * Looks up the given minor-ID and returns the respective DRM-minor object. The - * refence-count of the underlying device is increased so you must release this - * object with accel_minor_release(). - * - * The object can be only a drm_minor that represents an accel device. - * - * As long as you hold this minor, it is guaranteed that the object and the - * minor->dev pointer will stay valid! However, the device may get unplugged and - * unregistered while you hold the minor. - */ -static struct drm_minor *accel_minor_acquire(unsigned int minor_id) -{ - struct drm_minor *minor; - unsigned long flags; - - spin_lock_irqsave(&accel_minor_lock, flags); - minor = idr_find(&accel_minors_idr, minor_id); - if (minor) - drm_dev_get(minor->dev); - spin_unlock_irqrestore(&accel_minor_lock, flags); - - if (!minor) { - return ERR_PTR(-ENODEV); - } else if (drm_dev_is_unplugged(minor->dev)) { - drm_dev_put(minor->dev); - return ERR_PTR(-ENODEV); - } - - return minor; -} - -static void accel_minor_release(struct drm_minor *minor) -{ - drm_dev_put(minor->dev); -} - /** * accel_open - open method for ACCEL file * @inode: device inode @@ -227,7 +133,7 @@ int accel_open(struct inode *inode, struct file *filp) struct drm_minor *minor; int retcode; - minor = accel_minor_acquire(iminor(inode)); + minor = drm_minor_acquire(&accel_minors_xa, iminor(inode)); if (IS_ERR(minor)) return PTR_ERR(minor); @@ -246,7 +152,7 @@ int accel_open(struct inode *inode, struct file *filp) err_undo: atomic_dec(&dev->open_count); - accel_minor_release(minor); + drm_minor_release(minor); return retcode; } EXPORT_SYMBOL_GPL(accel_open); @@ -257,7 +163,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp) struct drm_minor *minor; int err; - minor = accel_minor_acquire(iminor(inode)); + minor = drm_minor_acquire(&accel_minors_xa, iminor(inode)); if (IS_ERR(minor)) return PTR_ERR(minor); @@ -274,7 +180,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp) err = 0; out: - accel_minor_release(minor); + drm_minor_release(minor); return err; } @@ -290,15 +196,13 @@ void accel_core_exit(void) unregister_chrdev(ACCEL_MAJOR, "accel"); debugfs_remove(accel_debugfs_root); accel_sysfs_destroy(); - idr_destroy(&accel_minors_idr); + WARN_ON(!xa_empty(&accel_minors_xa)); } int __init accel_core_init(void) { int ret; - idr_init(&accel_minors_idr); - ret = accel_sysfs_init(); if (ret < 0) { DRM_ERROR("Cannot create ACCEL class: %d\n", ret); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index de3d661163756c..ede6165e09d90d 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -60,6 +60,10 @@ static struct { { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, }; +/* Production fw_names from the table above */ +MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin"); + static int ivpu_fw_request(struct ivpu_device *vdev) { int ret = -ENOENT; diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 580b29ed190217..bf10156c334e71 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) return ret; - ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX); - if (ret) - return ret; + dma_set_max_seg_size(&pdev->dev, UINT_MAX); qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]); if (IS_ERR(qdev->bar_0)) diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index e3a7c2aedd5f00..d67f63d93b2abd 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -451,7 +451,7 @@ config ACPI_HED config ACPI_BGRT bool "Boottime Graphics Resource Table support" - depends on EFI && (X86 || ARM64) + depends on EFI && (X86 || ARM64 || LOONGARCH) help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index eaa70b23dd0b08..7c5b040a83e8d6 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -213,8 +213,8 @@ static int acpi_ac_probe(struct platform_device *pdev) return -ENOMEM; ac->device = adev; - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); - strcpy(acpi_device_class(adev), ACPI_AC_CLASS); + strscpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); + strscpy(acpi_device_class(adev), ACPI_AC_CLASS); platform_set_drvdata(pdev, ac); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 80f945cbec8a7c..800f978684481e 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -118,6 +118,11 @@ static const struct apd_device_desc wt_i2c_desc = { .fixed_clk_rate = 150000000, }; +static const struct apd_device_desc wt_i3c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +}; + static struct property_entry uart_properties[] = { PROPERTY_ENTRY_U32("reg-io-width", 4), PROPERTY_ENTRY_U32("reg-shift", 2), @@ -231,6 +236,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMD0030", }, { "AMD0040", APD_ADDR(fch_misc_desc)}, { "AMDI0010", APD_ADDR(wt_i2c_desc) }, + { "AMDI0015", APD_ADDR(wt_i3c_desc) }, { "AMDI0019", APD_ADDR(wt_i2c_desc) }, { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMDI0022", APD_ADDR(cz_uart_desc) }, diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 350d3a8928896d..42b7220d4cfda7 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -136,8 +136,10 @@ static void exit_round_robin(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); - cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); - tsk_in_cpu[tsk_index] = -1; + if (tsk_in_cpu[tsk_index] != -1) { + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = -1; + } } static unsigned int idle_pct = 5; /* percentage */ @@ -428,8 +430,8 @@ static int acpi_pad_probe(struct platform_device *pdev) struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); acpi_status status; - strcpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); - strcpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS); + strscpy(acpi_device_name(adev), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); + strscpy(acpi_device_class(adev), ACPI_PROCESSOR_AGGREGATOR_CLASS); status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify, adev); diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 9916cc7ced39b2..7cf6101cb4c731 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -436,8 +436,8 @@ static int acpi_processor_add(struct acpi_device *device, } pr->handle = device->handle; - strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); + strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); device->driver_data = pr; result = acpi_processor_get_info(device); @@ -985,7 +985,7 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, memcpy(&info->states[++last_index], &cx, sizeof(cx)); } - acpi_handle_info(handle, "Found %d idle states\n", last_index); + acpi_handle_debug(handle, "Found %d idle states\n", last_index); info->count = last_index; diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index f4c90fc99be2fb..309ce8efb4f684 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -29,11 +29,7 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); - -#if (!ACPI_REDUCED_HARDWARE) -ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); - -#endif /* !ACPI_REDUCED_HARDWARE */ +ACPI_INIT_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS, NULL); /* These addresses are calculated from the FADT Event Block addresses */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 02012168a087a9..6f4fe47c955bd0 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -1090,6 +1090,8 @@ struct acpi_port_info { #define ACPI_ADDRESS_TYPE_IO_RANGE 1 #define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE 2 +#define ACPI_ADDRESS_TYPE_PCC_NUMBER 0xA + /* Resource descriptor types and masks */ #define ACPI_RESOURCE_NAME_LARGE 0x80 diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 2e442f5a312322..ef068f4c864aa5 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -450,7 +450,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_DSM", METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, - ACPI_TYPE_PACKAGE), + ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM, METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */ {{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER), diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 2b84ac093698a3..8dbab693204998 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object) elements = ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS * sizeof(union acpi_object)); + if (!elements) + return (AE_NO_MEMORY); this = string; for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) { diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 3729bf3b74f7a3..bb1be42daee107 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -17,7 +17,8 @@ ACPI_MODULE_NAME("exconvrt") /* Local prototypes */ static u32 -acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); +acpi_ex_convert_to_ascii(u64 integer, + u16 base, u8 *string, u8 max_length, u8 leading_zeros); /******************************************************************************* * @@ -249,6 +250,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, * base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX * string - Where the string is returned * data_width - Size of data item to be converted, in bytes + * leading_zeros - Allow leading zeros * * RETURN: Actual string length * @@ -257,7 +259,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, ******************************************************************************/ static u32 -acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) +acpi_ex_convert_to_ascii(u64 integer, + u16 base, u8 *string, u8 data_width, u8 leading_zeros) { u64 digit; u32 i; @@ -266,7 +269,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) u32 hex_length; u32 decimal_length; u32 remainder; - u8 supress_zeros; + u8 supress_zeros = !leading_zeros; + u8 hex_char; ACPI_FUNCTION_ENTRY(); @@ -293,7 +297,6 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) break; } - supress_zeros = TRUE; /* No leading zeros */ remainder = 0; for (i = decimal_length; i > 0; i--) { @@ -328,8 +331,17 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) /* Get one hex digit, most significant digits first */ - string[k] = (u8) + hex_char = (u8) acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j)); + + /* Supress leading zeros until the first non-zero character */ + + if (hex_char == ACPI_ASCII_ZERO && supress_zeros) { + continue; + } + + supress_zeros = FALSE; + string[k] = hex_char; k++; } break; @@ -379,6 +391,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, u32 string_length = 0; u16 base = 16; u8 separator = ','; + u8 leading_zeros; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc); @@ -400,14 +413,26 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * Make room for the maximum decimal number size */ string_length = ACPI_MAX_DECIMAL_DIGITS; + leading_zeros = FALSE; base = 10; break; + case ACPI_EXPLICIT_CONVERT_HEX: + /* + * From to_hex_string. + * + * Supress leading zeros and append "0x" + */ + string_length = + ACPI_MUL_2(acpi_gbl_integer_byte_width) + 2; + leading_zeros = FALSE; + break; default: /* Two hex string characters for each integer byte */ string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width); + leading_zeros = TRUE; break; } @@ -422,17 +447,32 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, } new_buf = return_desc->buffer.pointer; + if (type == ACPI_EXPLICIT_CONVERT_HEX) { + + /* Append "0x" prefix for explicit hex conversion */ + + *new_buf++ = '0'; + *new_buf++ = 'x'; + } /* Convert integer to string */ string_length = acpi_ex_convert_to_ascii(obj_desc->integer.value, base, new_buf, - acpi_gbl_integer_byte_width); + acpi_gbl_integer_byte_width, + leading_zeros); /* Null terminate at the correct place */ return_desc->string.length = string_length; + if (type == ACPI_EXPLICIT_CONVERT_HEX) { + + /* Take "0x" prefix into account */ + + return_desc->string.length += 2; + } + new_buf[string_length] = 0; break; @@ -448,6 +488,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * From ACPI: "If the input is a buffer, it is converted to a * a string of decimal values separated by commas." */ + leading_zeros = FALSE; base = 10; /* @@ -475,6 +516,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * * Each hex number is prefixed with 0x (11/2018) */ + leading_zeros = TRUE; separator = ' '; string_length = (obj_desc->buffer.length * 5); break; @@ -488,6 +530,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, * * Each hex number is prefixed with 0x (11/2018) */ + leading_zeros = TRUE; separator = ','; string_length = (obj_desc->buffer.length * 5); break; @@ -528,7 +571,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, new_buf += acpi_ex_convert_to_ascii((u64) obj_desc-> buffer.pointer[i], - base, new_buf, 1); + base, new_buf, 1, + leading_zeros); /* Each digit is separated by either a comma or space */ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 08196fa17080e2..82b1fa2d201fed 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) if (info->connection_node) { second_desc = info->connection_node->object; + if (second_desc == NULL) { + break; + } if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_arguments(second_desc); diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index f665ffd9a396cf..2c384bd52b9c4b 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -133,14 +133,15 @@ acpi_status acpi_ex_system_do_stall(u32 how_long_us) * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ - ACPI_ERROR((AE_INFO, - "Time parameter is too large (%u)", how_long_us)); + ACPI_ERROR_ONCE((AE_INFO, + "Time parameter is too large (%u)", + how_long_us)); status = AE_AML_OPERAND_VALUE; } else { if (how_long_us > 100) { - ACPI_WARNING((AE_INFO, - "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.", - how_long_us)); + ACPI_WARNING_ONCE((AE_INFO, + "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.", + how_long_us)); } acpi_os_stall(how_long_us); } diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 36ea48f6411027..8dbf83aeb455d2 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -16,20 +16,11 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ -#if (!ACPI_REDUCED_HARDWARE) static acpi_status acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, acpi_physical_address physical_address, acpi_physical_address physical_address64); -#endif - -/* - * These functions are removed for the ACPI_REDUCED_HARDWARE case: - * acpi_set_firmware_waking_vector - * acpi_enter_sleep_state_s4bios - */ -#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_hw_set_firmware_waking_vector @@ -115,6 +106,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address, ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) +/* + * These functions are removed for the ACPI_REDUCED_HARDWARE case: + * acpi_enter_sleep_state_s4bios + */ + +#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_s4bios diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 422c074ed2897b..28582adfc0acaf 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state); static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state); +static void acpi_ps_free_field_list(union acpi_parse_object *start); + /******************************************************************************* * * FUNCTION: acpi_ps_get_next_package_length @@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state return_PTR(field); } +/******************************************************************************* + * + * FUNCTION: acpi_ps_free_field_list + * + * PARAMETERS: start - First Op in field list + * + * RETURN: None. + * + * DESCRIPTION: Free all Op objects inside a field list. + * + ******************************************************************************/ + +static void acpi_ps_free_field_list(union acpi_parse_object *start) +{ + union acpi_parse_object *cur = start; + union acpi_parse_object *next; + union acpi_parse_object *arg; + + while (cur) { + next = cur->common.next; + + /* AML_INT_CONNECTION_OP can have a single argument */ + + arg = acpi_ps_get_arg(cur, 0); + if (arg) { + acpi_ps_free_op(arg); + } + + acpi_ps_free_op(cur); + cur = next; + } +} + /******************************************************************************* * * FUNCTION: acpi_ps_get_next_arg @@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, while (parser_state->aml < parser_state->pkg_end) { field = acpi_ps_get_next_field(parser_state); if (!field) { + if (arg) { + acpi_ps_free_field_list(arg); + } + return_ACPI_STATUS(AE_NO_MEMORY); } @@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_NOT_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } } else { /* Single complex argument, nothing returned */ @@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_POSSIBLE_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index fff48001d7efbd..27384ee245f094 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -282,7 +282,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource, /* Validate the Resource Type */ - if ((address.resource_type > 2) && (address.resource_type < 0xC0)) { + if ((address.resource_type > 2) && + (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) { return (FALSE); } diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index 611bc71c193f97..5b7d7074ce4fdc 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c @@ -48,6 +48,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource); static void acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table); +#ifdef ACPI_DEBUGGER /******************************************************************************* * * FUNCTION: acpi_rs_dump_resource_list @@ -160,6 +161,7 @@ void acpi_rs_dump_irq_list(u8 *route_table) prt_element, prt_element->length); } } +#endif /******************************************************************************* * diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 15fa68a5ea6e25..dad7425fce3f10 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -18,7 +18,6 @@ ACPI_MODULE_NAME("tbutils") static acpi_physical_address acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); -#if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_tb_initialize_facs @@ -56,7 +55,6 @@ acpi_status acpi_tb_initialize_facs(void) return (AE_OK); } -#endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8d7736d2d2699c..c85bfa13ac1e2e 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -140,7 +140,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) (void) acpi_os_delete_semaphore (acpi_gbl_global_lock_semaphore); - acpi_gbl_global_lock_semaphore = NULL; + acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL; acpi_os_delete_mutex(object->mutex.os_mutex); acpi_gbl_global_lock_mutex = NULL; @@ -157,7 +157,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) object, object->event.os_semaphore)); (void)acpi_os_delete_semaphore(object->event.os_semaphore); - object->event.os_semaphore = NULL; + object->event.os_semaphore = ACPI_SEMAPHORE_NULL; break; case ACPI_TYPE_METHOD: diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 92fbaef161a73f..6d78504e9fbc7d 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -154,7 +154,7 @@ acpi_status acpi_ut_init_globals(void) /* Global Lock support */ - acpi_gbl_global_lock_semaphore = NULL; + acpi_gbl_global_lock_semaphore = ACPI_SEMAPHORE_NULL; acpi_gbl_global_lock_mutex = NULL; acpi_gbl_global_lock_acquired = FALSE; acpi_gbl_global_lock_handle = 0; diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 251bd396c6fd3f..99b85fd6eccf3f 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -75,6 +75,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { {"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */ {"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */ {"Windows 2021", NULL, 0, ACPI_OSI_WIN_11}, /* Windows 11 - Added 01/2022 */ + {"Windows 2022", NULL, 0, ACPI_OSI_WIN_11_22H2}, /* Windows 11 version 22H2 - Added 04/2024 */ /* Feature Group Strings */ diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index 1915bec2b27993..70ae0afa793952 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -120,6 +120,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) */ acpi_gbl_early_initialization = FALSE; + /* + * Obtain a permanent mapping for the FACS. This is required for the + * Global Lock and the Firmware Waking Vector + */ + if (!(flags & ACPI_NO_FACS_INIT)) { + status = acpi_tb_initialize_facs(); + if (ACPI_FAILURE(status)) { + ACPI_WARNING((AE_INFO, "Could not map the FACS table")); + return_ACPI_STATUS(status); + } + } + #if (!ACPI_REDUCED_HARDWARE) /* Enable ACPI mode */ @@ -137,18 +149,6 @@ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) } } - /* - * Obtain a permanent mapping for the FACS. This is required for the - * Global Lock and the Firmware Waking Vector - */ - if (!(flags & ACPI_NO_FACS_INIT)) { - status = acpi_tb_initialize_facs(); - if (ACPI_FAILURE(status)) { - ACPI_WARNING((AE_INFO, "Could not map the FACS table")); - return_ACPI_STATUS(status); - } - } - /* * Initialize ACPI Event handling (Fixed and General Purpose) * diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index c7c26872f4cec1..9c84f3da7c0900 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include "apei-internal.h" diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index 73903a497d73f7..5c22720f43ccb9 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include "apei-internal.h" diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c index 8b8be0c90709f9..a4e709937236f6 100644 --- a/drivers/acpi/apei/einj-cxl.c +++ b/drivers/acpi/apei/einj-cxl.c @@ -7,9 +7,9 @@ * * Author: Ben Cheatham */ -#include #include #include +#include #include "apei-internal.h" @@ -63,7 +63,7 @@ static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf) seg = bridge->domain_nr; bus = pbus->number; - *sbdf = (seg << 24) | (bus << 16) | dport_dev->devfn; + *sbdf = (seg << 24) | (bus << 16) | (dport_dev->devfn << 8); return 0; } diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 8bc71cdc2270ab..246076341e8cc0 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -199,7 +199,6 @@ static const struct file_operations erst_dbg_ops = { .read = erst_dbg_read, .write = erst_dbg_write, .unlocked_ioctl = erst_dbg_ioctl, - .llseek = no_llseek, }; static struct miscdevice erst_dbg_dev = { diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 623cc0cb4a6568..ada93cfde9ba1c 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -50,6 +49,7 @@ #include #include #include +#include #include #include "apei-internal.h" diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 1b39e9ae7ac178..4c745a26226b27 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -822,7 +822,7 @@ static struct iommu_iort_rmr_data *iort_rmr_alloc( return NULL; /* Create a copy of SIDs array to associate with this rmr_data */ - sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL); + sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL); if (!sids_copy) { kfree(rmr_data); return NULL; @@ -1703,6 +1703,13 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = { /* HiSilicon Hip09 Platform */ {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */ + {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, + {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, { } }; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index da3a879d638a8d..65fa3444367a13 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -10,7 +10,6 @@ #define pr_fmt(fmt) "ACPI: battery: " fmt -#include #include #include #include @@ -22,7 +21,7 @@ #include #include -#include +#include #include #include @@ -50,8 +49,6 @@ MODULE_AUTHOR("Alexey Starikovskiy "); MODULE_DESCRIPTION("ACPI Battery Driver"); MODULE_LICENSE("GPL"); -static async_cookie_t async_cookie; -static bool battery_driver_registered; static int battery_bix_broken_package; static int battery_notification_delay_ms; static int battery_ac_is_broken; @@ -706,28 +703,35 @@ static LIST_HEAD(acpi_battery_list); static LIST_HEAD(battery_hook_list); static DEFINE_MUTEX(hook_mutex); -static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock) +static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook) { struct acpi_battery *battery; + /* * In order to remove a hook, we first need to * de-register all the batteries that are registered. */ - if (lock) - mutex_lock(&hook_mutex); list_for_each_entry(battery, &acpi_battery_list, list) { if (!hook->remove_battery(battery->bat, hook)) power_supply_changed(battery->bat); } - list_del(&hook->list); - if (lock) - mutex_unlock(&hook_mutex); + list_del_init(&hook->list); + pr_info("extension unregistered: %s\n", hook->name); } void battery_hook_unregister(struct acpi_battery_hook *hook) { - __battery_hook_unregister(hook, 1); + mutex_lock(&hook_mutex); + /* + * Ignore already unregistered battery hooks. This might happen + * if a battery hook was previously unloaded due to an error when + * adding a new battery. + */ + if (!list_empty(&hook->list)) + battery_hook_unregister_unlocked(hook); + + mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_unregister); @@ -736,7 +740,6 @@ void battery_hook_register(struct acpi_battery_hook *hook) struct acpi_battery *battery; mutex_lock(&hook_mutex); - INIT_LIST_HEAD(&hook->list); list_add(&hook->list, &battery_hook_list); /* * Now that the driver is registered, we need @@ -753,7 +756,7 @@ void battery_hook_register(struct acpi_battery_hook *hook) * hooks. */ pr_err("extension failed to load: %s", hook->name); - __battery_hook_unregister(hook, 0); + battery_hook_unregister_unlocked(hook); goto end; } @@ -807,7 +810,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery) */ pr_err("error in extension, unloading: %s", hook_node->name); - __battery_hook_unregister(hook_node, 0); + battery_hook_unregister_unlocked(hook_node); } } mutex_unlock(&hook_mutex); @@ -840,7 +843,7 @@ static void __exit battery_hook_exit(void) * need to remove the hooks. */ list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) { - __battery_hook_unregister(hook, 1); + battery_hook_unregister(hook); } mutex_destroy(&hook_mutex); } @@ -1207,7 +1210,7 @@ static int acpi_battery_update_retry(struct acpi_battery *battery) static int acpi_battery_add(struct acpi_device *device) { int result = 0; - struct acpi_battery *battery = NULL; + struct acpi_battery *battery; if (!device) return -EINVAL; @@ -1219,8 +1222,8 @@ static int acpi_battery_add(struct acpi_device *device) if (!battery) return -ENOMEM; battery->device = device; - strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); + strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS); device->driver_data = battery; mutex_init(&battery->lock); mutex_init(&battery->sysfs_lock); @@ -1260,7 +1263,7 @@ static int acpi_battery_add(struct acpi_device *device) static void acpi_battery_remove(struct acpi_device *device) { - struct acpi_battery *battery = NULL; + struct acpi_battery *battery; if (!device || !acpi_driver_data(device)) return; @@ -1311,37 +1314,23 @@ static struct acpi_driver acpi_battery_driver = { .remove = acpi_battery_remove, }, .drv.pm = &acpi_battery_pm, + .drv.probe_type = PROBE_PREFER_ASYNCHRONOUS, }; -static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) -{ - int result; - - if (acpi_quirk_skip_acpi_ac_and_battery()) - return; - - dmi_check_system(bat_dmi_table); - - result = acpi_bus_register_driver(&acpi_battery_driver); - battery_driver_registered = (result == 0); -} - static int __init acpi_battery_init(void) { - if (acpi_disabled) + if (acpi_disabled || acpi_quirk_skip_acpi_ac_and_battery()) return -ENODEV; - async_cookie = async_schedule(acpi_battery_init_async, NULL); - return 0; + dmi_check_system(bat_dmi_table); + + return acpi_bus_register_driver(&acpi_battery_driver); } static void __exit acpi_battery_exit(void) { - async_synchronize_cookie(async_cookie + 1); - if (battery_driver_registered) { - acpi_bus_unregister_driver(&acpi_battery_driver); - battery_hook_exit(); - } + acpi_bus_unregister_driver(&acpi_battery_driver); + battery_hook_exit(); } module_init(acpi_battery_init); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 284bc2e035804f..16917dc3ad604c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1203,6 +1203,9 @@ static int __init acpi_bus_init_irq(void) case ACPI_IRQ_MODEL_LPIC: message = "LPIC"; break; + case ACPI_IRQ_MODEL_RINTC: + message = "RINTC"; + break; default: pr_info("Unknown interrupt routing model\n"); return -ENODEV; @@ -1459,6 +1462,7 @@ static int __init acpi_init(void) acpi_hest_init(); acpi_ghes_init(); acpi_arm_init(); + acpi_riscv_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index cc61020756beb8..51470208e6daed 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -547,20 +547,20 @@ static int acpi_button_add(struct acpi_device *device) !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { button->type = ACPI_BUTTON_TYPE_POWER; handler = acpi_button_notify; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); + strscpy(name, ACPI_BUTTON_DEVICE_NAME_POWER, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { button->type = ACPI_BUTTON_TYPE_SLEEP; handler = acpi_button_notify; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); + strscpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { button->type = ACPI_BUTTON_TYPE_LID; handler = acpi_lid_notify; - strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); + strscpy(name, ACPI_BUTTON_DEVICE_NAME_LID, MAX_ACPI_DEVICE_NAME_LEN); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); input->open = acpi_lid_input_open; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index dd3d3082c8c76a..b73b3aa92f3f68 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include @@ -103,6 +103,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); (cpc)->cpc_entry.reg.space_id == \ ACPI_ADR_SPACE_PLATFORM_COMM) +/* Check if a CPC register is in FFH */ +#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_FIXED_HARDWARE) + /* Check if a CPC register is in SystemMemory */ #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ (cpc)->cpc_entry.reg.space_id == \ @@ -171,8 +176,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) /* Shift and apply the mask for CPC reads/writes */ -#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ +#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \ GENMASK(((reg)->bit_width) - 1, 0)) +#define MASK_VAL_WRITE(reg, prev_val, val) \ + ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \ + ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \ static ssize_t show_feedback_ctrs(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -859,6 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; + spin_lock_init(&cpc_ptr->rmw_lock); /* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); @@ -1064,7 +1073,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) } if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - *val = MASK_VAL(reg, *val); + *val = MASK_VAL_READ(reg, *val); return 0; } @@ -1073,9 +1082,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; int size; + u64 prev_val; void __iomem *vaddr = NULL; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; + struct cpc_desc *cpc_desc; size = GET_BIT_WIDTH(reg); @@ -1108,8 +1119,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return acpi_os_write_memory((acpi_physical_address)reg->address, val, size); - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - val = MASK_VAL(reg, val); + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + spin_lock(&cpc_desc->rmw_lock); + switch (size) { + case 8: + prev_val = readb_relaxed(vaddr); + break; + case 16: + prev_val = readw_relaxed(vaddr); + break; + case 32: + prev_val = readl_relaxed(vaddr); + break; + case 64: + prev_val = readq_relaxed(vaddr); + break; + default: + spin_unlock(&cpc_desc->rmw_lock); + return -EFAULT; + } + val = MASK_VAL_WRITE(reg, prev_val, val); + val |= prev_val; + } switch (size) { case 8: @@ -1136,6 +1173,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) break; } + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + spin_unlock(&cpc_desc->rmw_lock); + return ret_val; } @@ -1486,9 +1526,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) /* after writing CPC, transfer the ownership of PCC to platform */ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); up_write(&pcc_ss_data->pcc_lock); + } else if (osc_cpc_flexible_adr_space_confirmed && + CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) { + ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); } else { ret = -ENOTSUPP; - pr_debug("_CPC in PCC is not supported\n"); + pr_debug("_CPC in PCC and _CPC in FFH are not supported\n"); } return ret; diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 23373faa35ecd6..3961fc47152c0a 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -439,23 +439,33 @@ static ssize_t description_show(struct device *dev, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *str_obj; + acpi_status status; int result; - if (acpi_dev->pnp.str_obj == NULL) - return 0; + status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR", + NULL, &buffer, + ACPI_TYPE_BUFFER); + if (ACPI_FAILURE(status)) + return -EIO; + + str_obj = buffer.pointer; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( - (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, - acpi_dev->pnp.str_obj->buffer.length, + (wchar_t *)str_obj->buffer.pointer, + str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[result++] = '\n'; + kfree(str_obj); + return result; } static DEVICE_ATTR_RO(description); @@ -507,96 +517,97 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(status); -/** - * acpi_device_setup_files - Create sysfs attributes of an ACPI device. - * @dev: ACPI device object. - */ -int acpi_device_setup_files(struct acpi_device *dev) -{ - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - int result = 0; +static struct attribute *acpi_attrs[] = { + &dev_attr_path.attr, + &dev_attr_hid.attr, + &dev_attr_modalias.attr, + &dev_attr_description.attr, + &dev_attr_adr.attr, + &dev_attr_uid.attr, + &dev_attr_sun.attr, + &dev_attr_hrv.attr, + &dev_attr_status.attr, + &dev_attr_eject.attr, + &dev_attr_power_state.attr, + &dev_attr_real_power_state.attr, + NULL +}; +static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr) +{ /* * Devices gotten from FADT don't have a "path" attribute */ - if (dev->handle) { - result = device_create_file(&dev->dev, &dev_attr_path); - if (result) - goto end; - } + if (attr == &dev_attr_path) + return dev->handle; - if (!list_empty(&dev->pnp.ids)) { - result = device_create_file(&dev->dev, &dev_attr_hid); - if (result) - goto end; + if (attr == &dev_attr_hid || attr == &dev_attr_modalias) + return !list_empty(&dev->pnp.ids); - result = device_create_file(&dev->dev, &dev_attr_modalias); - if (result) - goto end; - } + if (attr == &dev_attr_description) + return acpi_has_method(dev->handle, "_STR"); - /* - * If device has _STR, 'description' file is created - */ - if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); - if (ACPI_FAILURE(status)) - buffer.pointer = NULL; - dev->pnp.str_obj = buffer.pointer; - result = device_create_file(&dev->dev, &dev_attr_description); - if (result) - goto end; - } + if (attr == &dev_attr_adr) + return dev->pnp.type.bus_address; - if (dev->pnp.type.bus_address) - result = device_create_file(&dev->dev, &dev_attr_adr); - if (acpi_device_uid(dev)) - result = device_create_file(&dev->dev, &dev_attr_uid); + if (attr == &dev_attr_uid) + return acpi_device_uid(dev); - if (acpi_has_method(dev->handle, "_SUN")) { - result = device_create_file(&dev->dev, &dev_attr_sun); - if (result) - goto end; - } + if (attr == &dev_attr_sun) + return acpi_has_method(dev->handle, "_SUN"); - if (acpi_has_method(dev->handle, "_HRV")) { - result = device_create_file(&dev->dev, &dev_attr_hrv); - if (result) - goto end; - } + if (attr == &dev_attr_hrv) + return acpi_has_method(dev->handle, "_HRV"); - if (acpi_has_method(dev->handle, "_STA")) { - result = device_create_file(&dev->dev, &dev_attr_status); - if (result) - goto end; - } + if (attr == &dev_attr_status) + return acpi_has_method(dev->handle, "_STA"); /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ - if (acpi_has_method(dev->handle, "_EJ0")) { - result = device_create_file(&dev->dev, &dev_attr_eject); - if (result) - return result; - } + if (attr == &dev_attr_eject) + return acpi_has_method(dev->handle, "_EJ0"); - if (dev->flags.power_manageable) { - result = device_create_file(&dev->dev, &dev_attr_power_state); - if (result) - return result; + if (attr == &dev_attr_power_state) + return dev->flags.power_manageable; - if (dev->power.flags.power_resources) - result = device_create_file(&dev->dev, - &dev_attr_real_power_state); - } + if (attr == &dev_attr_real_power_state) + return dev->flags.power_manageable && dev->power.flags.power_resources; - acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); + dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name); + return false; +} -end: - return result; +static umode_t acpi_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int attrno) +{ + struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj)); + + if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr))) + return attr->mode; + else + return 0; +} + +static const struct attribute_group acpi_group = { + .attrs = acpi_attrs, + .is_visible = acpi_attr_is_visible, +}; + +const struct attribute_group *acpi_groups[] = { + &acpi_group, + NULL +}; + +/** + * acpi_device_setup_files - Create sysfs attributes of an ACPI device. + * @dev: ACPI device object. + */ +void acpi_device_setup_files(struct acpi_device *dev) +{ + acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); } /** @@ -606,41 +617,4 @@ int acpi_device_setup_files(struct acpi_device *dev) void acpi_device_remove_files(struct acpi_device *dev) { acpi_hide_nondev_subnodes(&dev->data); - - if (dev->flags.power_manageable) { - device_remove_file(&dev->dev, &dev_attr_power_state); - if (dev->power.flags.power_resources) - device_remove_file(&dev->dev, - &dev_attr_real_power_state); - } - - /* - * If device has _STR, remove 'description' file - */ - if (acpi_has_method(dev->handle, "_STR")) { - kfree(dev->pnp.str_obj); - device_remove_file(&dev->dev, &dev_attr_description); - } - /* - * If device has _EJ0, remove 'eject' file. - */ - if (acpi_has_method(dev->handle, "_EJ0")) - device_remove_file(&dev->dev, &dev_attr_eject); - - if (acpi_has_method(dev->handle, "_SUN")) - device_remove_file(&dev->dev, &dev_attr_sun); - - if (acpi_has_method(dev->handle, "_HRV")) - device_remove_file(&dev->dev, &dev_attr_hrv); - - if (acpi_device_uid(dev)) - device_remove_file(&dev->dev, &dev_attr_uid); - if (dev->pnp.type.bus_address) - device_remove_file(&dev->dev, &dev_attr_adr); - device_remove_file(&dev->dev, &dev_attr_modalias); - device_remove_file(&dev->dev, &dev_attr_hid); - if (acpi_has_method(dev->handle, "_STA")) - device_remove_file(&dev->dev, &dev_attr_status); - if (dev->handle) - device_remove_file(&dev->dev, &dev_attr_path); } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 38d2f6e6b12b4f..25399f6dde7e27 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -783,6 +783,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; + if (t->rdata) + memset(t->rdata, 0, t->rlen); + /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -819,8 +822,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; - if (t->rdata) - memset(t->rdata, 0, t->rlen); mutex_lock(&ec->mutex); if (ec->global_lock) { @@ -847,7 +848,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec) .wdata = NULL, .rdata = &d, .wlen = 0, .rlen = 1}; - return acpi_ec_transaction(ec, &t); + return acpi_ec_transaction_unlocked(ec, &t); } static int acpi_ec_burst_disable(struct acpi_ec *ec) @@ -857,7 +858,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) .wlen = 0, .rlen = 0}; return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? - acpi_ec_transaction(ec, &t) : 0; + acpi_ec_transaction_unlocked(ec, &t) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) @@ -873,6 +874,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) return result; } +static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) +{ + int result; + u8 d; + struct transaction t = {.command = ACPI_EC_COMMAND_READ, + .wdata = &address, .rdata = &d, + .wlen = 1, .rlen = 1}; + + result = acpi_ec_transaction_unlocked(ec, &t); + *data = d; + return result; +} + static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; @@ -883,6 +897,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) return acpi_ec_transaction(ec, &t); } +static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) +{ + u8 wdata[2] = { address, data }; + struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, + .wdata = wdata, .rdata = NULL, + .wlen = 2, .rlen = 0}; + + return acpi_ec_transaction_unlocked(ec, &t); +} + int ec_read(u8 addr, u8 *val) { int err; @@ -1323,6 +1347,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, struct acpi_ec *ec = handler_context; int result = 0, i, bytes = bits / 8; u8 *value = (u8 *)value64; + u32 glk; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -1330,13 +1355,25 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; + mutex_lock(&ec->mutex); + + if (ec->global_lock) { + acpi_status status; + + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto unlock; + } + } + if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? - acpi_ec_read(ec, address, value) : - acpi_ec_write(ec, address, *value); + acpi_ec_read_unlocked(ec, address, value) : + acpi_ec_write_unlocked(ec, address, *value); if (result < 0) break; } @@ -1344,6 +1381,12 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); + if (ec->global_lock) + acpi_release_global_lock(glk); + +unlock: + mutex_unlock(&ec->mutex); + switch (result) { case -EINVAL: return AE_BAD_PARAMETER; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index aadd4c218b320e..ced7dff9a5dbfe 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -118,8 +118,9 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, void (*release)(struct device *)); int acpi_tie_acpi_dev(struct acpi_device *adev); int acpi_device_add(struct acpi_device *device); -int acpi_device_setup_files(struct acpi_device *dev); +void acpi_device_setup_files(struct acpi_device *dev); void acpi_device_remove_files(struct acpi_device *dev); +extern const struct attribute_group *acpi_groups[]; void acpi_device_add_finalize(struct acpi_device *device); void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); bool acpi_device_is_enabled(const struct acpi_device *adev); diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 44f91f2c6c5d86..bec0dcd1f9c389 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -17,6 +17,7 @@ #include #include #include +#include static nodemask_t nodes_found_map = NODE_MASK_NONE; diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index ff30ceca2203e3..630fe0a34bc6e7 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -288,7 +288,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev, } #endif /* CONFIG_X86_IO_APIC */ -static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) +struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) { struct acpi_prt_entry *entry = NULL; struct pci_dev *bridge; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index aa1038b8aec4ac..b727db968f3349 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device, if (result) kfree(link); + acpi_dev_clear_dependencies(device); + return result < 0 ? result : 1; } diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 860014b89b8ebe..58e10a9801148e 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -181,6 +181,18 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("LOONGSON", 0), LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), + LOONGSON_ECAM_MCFG("\0", 2), + LOONGSON_ECAM_MCFG("LOONGSON", 2), + LOONGSON_ECAM_MCFG("\0", 3), + LOONGSON_ECAM_MCFG("LOONGSON", 3), + LOONGSON_ECAM_MCFG("\0", 4), + LOONGSON_ECAM_MCFG("LOONGSON", 4), + LOONGSON_ECAM_MCFG("\0", 5), + LOONGSON_ECAM_MCFG("LOONGSON", 5), + LOONGSON_ECAM_MCFG("\0", 6), + LOONGSON_ECAM_MCFG("LOONGSON", 6), + LOONGSON_ECAM_MCFG("\0", 7), + LOONGSON_ECAM_MCFG("LOONGSON", 7), #endif /* LOONGARCH */ }; diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c index ebd03e4729555a..0d1a82eeb4b0b6 100644 --- a/drivers/acpi/pmic/tps68470_pmic.c +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) struct tps68470_pmic_opregion *opregion; acpi_status status; - if (!dev || !tps68470_regmap) { - dev_warn(dev, "dev or regmap is NULL\n"); - return -EINVAL; - } + if (!tps68470_regmap) + return dev_err_probe(dev, -EINVAL, "regmap is missing\n"); if (!handle) { dev_warn(dev, "acpi handle is NULL\n"); diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index c78453c74ef5aa..1cfaa5957ac4d8 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -214,6 +214,30 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid) #define UPDATE_LOCK_ALREADY_HELD 4 #define UPDATE_UNLOCK_WITHOUT_LOCK 5 +int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer) +{ + struct prm_handler_info *handler = find_prm_handler(&handler_guid); + struct prm_module_info *module = find_prm_module(&handler_guid); + struct prm_context_buffer context; + efi_status_t status; + + if (!module || !handler) + return -ENODEV; + + memset(&context, 0, sizeof(context)); + ACPI_COPY_NAMESEG(context.signature, "PRMC"); + context.identifier = handler->guid; + context.static_data_buffer = handler->static_data_buffer_addr; + context.mmio_ranges = module->mmio_info; + + status = efi_call_acpi_prm_handler(handler->handler_addr, + (u64)param_buffer, + &context); + + return efi_status_to_err(status); +} +EXPORT_SYMBOL_GPL(acpi_call_prm_handler); + /* * This is the PlatformRtMechanism opregion space handler. * @function: indicates the read/write. In fact as the PlatformRtMechanism diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index df5d5a554b388b..3d74ebe9dbd804 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + /* Asus Vivobook X1704VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"), + }, + }, { /* Asus ExpertBook B1402CBA */ .matches = { @@ -504,17 +511,24 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { - /* Asus Vivobook E1504GA */ + /* Asus ExpertBook B2502CVA */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "E1504GA"), + DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"), + }, + }, + { + /* Asus Vivobook Go E1404GA* */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "E1404GA"), }, }, { - /* Asus Vivobook E1504GAB */ + /* Asus Vivobook E1504GA* */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"), + DMI_MATCH(DMI_BOARD_NAME, "E1504GA"), }, }, { @@ -554,6 +568,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { * to have a working keyboard. */ static const struct dmi_system_id irq1_edge_low_force_override[] = { + { + /* MECHREV Jiaolong17KS Series GM7XG0M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"), + }, + }, { /* XMG APEX 17 (M23) */ .matches = { @@ -572,6 +592,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), }, }, + { + /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + }, { /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ .matches = { diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile index 86b0925f612d98..a96fdf1e2cb810 100644 --- a/drivers/acpi/riscv/Makefile +++ b/drivers/acpi/riscv/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += rhct.o +obj-y += rhct.o init.o irq.o obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c new file mode 100644 index 00000000000000..5ef97905a72759 --- /dev/null +++ b/drivers/acpi/riscv/init.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L + */ + +#include +#include "init.h" + +void __init acpi_riscv_init(void) +{ + riscv_acpi_init_gsi_mapping(); +} diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h new file mode 100644 index 00000000000000..0b9a07e4031f0d --- /dev/null +++ b/drivers/acpi/riscv/init.h @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include + +void __init riscv_acpi_init_gsi_mapping(void); diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c new file mode 100644 index 00000000000000..cced960c2aeff2 --- /dev/null +++ b/drivers/acpi/riscv/irq.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L + */ + +#include +#include +#include + +#include "init.h" + +struct riscv_ext_intc_list { + acpi_handle handle; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + u32 id; + u32 type; + struct list_head list; +}; + +struct acpi_irq_dep_ctx { + int rc; + unsigned int index; + acpi_handle handle; +}; + +LIST_HEAD(ext_intc_list); + +static int irqchip_cmp_func(const void *in0, const void *in1) +{ + struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; + struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; + + return (elem0->type > elem1->type) - (elem0->type < elem1->type); +} + +/* + * On RISC-V, RINTC structures in MADT should be probed before any other + * interrupt controller structures and IMSIC before APLIC. The interrupt + * controller subtypes in MADT of ACPI spec for RISC-V are defined in + * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). + * Hence, simply sorting the subtypes in incremental order will + * establish the required order. + */ +void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) +{ + struct acpi_probe_entry *ape = ap_head; + + if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) + return; + sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); +} + +static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i, *tmp; + + list_for_each_safe(i, tmp, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi_base == ext_intc_element->gsi_base) { + ext_intc_element->handle = handle; + return AE_OK; + } + } + + return AE_NOT_FOUND; +} + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { + *gsi_base = ext_intc_element->gsi_base; + *id = ext_intc_element->id; + *nr_irqs = ext_intc_element->nr_irqs; + if (nr_idcs) + *nr_idcs = ext_intc_element->nr_idcs; + + return 0; + } + } + + return -ENODEV; +} + +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct acpi_device *adev; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { + adev = acpi_fetch_acpi_dev(ext_intc_element->handle); + if (!adev) + return NULL; + + return acpi_fwnode_handle(adev); + } + } + + return NULL; +} + +static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, + u32 id, u32 type) +{ + struct riscv_ext_intc_list *ext_intc_element; + + ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); + if (!ext_intc_element) + return -ENOMEM; + + ext_intc_element->gsi_base = gsi_base; + ext_intc_element->nr_irqs = nr_irqs; + ext_intc_element->nr_idcs = nr_idcs; + ext_intc_element->id = id; + list_add_tail(&ext_intc_element->list, &ext_intc_list); + return 0; +} + +static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + acpi_status status; + u64 gbase; + + if (!acpi_has_method(handle, "_GSB")) { + acpi_handle_err(handle, "_GSB method not found\n"); + return AE_ERROR; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to evaluate _GSB method\n"); + return status; + } + + status = riscv_acpi_update_gsi_handle((u32)gbase, handle); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); + return status; + } + + return AE_OK; +} + +static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; + + return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, + aplic->id, ACPI_RISCV_IRQCHIP_APLIC); +} + +static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; + + return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, + plic->id, ACPI_RISCV_IRQCHIP_PLIC); +} + +void __init riscv_acpi_init_gsi_mapping(void) +{ + /* There can be either PLIC or APLIC */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { + acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); + return; + } + + if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) + acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); +} + +static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) + return ext_intc_element->handle; + } + + return NULL; +} + +static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) +{ + struct acpi_irq_dep_ctx *ctx = context; + struct acpi_resource_irq *irq; + struct acpi_resource_extended_irq *eirq; + + switch (ares->type) { + case ACPI_RESOURCE_TYPE_IRQ: + irq = &ares->data.irq; + if (ctx->index >= irq->interrupt_count) { + ctx->index -= irq->interrupt_count; + return AE_OK; + } + ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: + eirq = &ares->data.extended_irq; + if (eirq->producer_consumer == ACPI_PRODUCER) + return AE_OK; + + if (ctx->index >= eirq->interrupt_count) { + ctx->index -= eirq->interrupt_count; + return AE_OK; + } + + /* Support GSIs only */ + if (eirq->resource_source.string_length) + return AE_OK; + + ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + +static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) +{ + struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; + + if (!gsi_handle) + return 0; + + acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); + *gsi_handle = ctx.handle; + if (*gsi_handle) + return 1; + + return 0; +} + +static u32 riscv_acpi_add_prt_dep(acpi_handle handle) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_pci_routing_table *entry; + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + acpi_handle link_handle; + acpi_status status; + u32 count = 0; + + status = acpi_get_irq_routing_table(handle, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to get IRQ routing table\n"); + kfree(buffer.pointer); + return 0; + } + + entry = buffer.pointer; + while (entry && (entry->length > 0)) { + if (entry->source[0]) { + acpi_get_handle(handle, entry->source, &link_handle); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = link_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } else { + gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + entry = (struct acpi_pci_routing_table *) + ((unsigned long)entry + entry->length); + } + + kfree(buffer.pointer); + return count; +} + +static u32 riscv_acpi_add_irq_dep(acpi_handle handle) +{ + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + u32 count = 0; + int i; + + for (i = 0; + riscv_acpi_irq_get_dep(handle, i, &gsi_handle); + i++) { + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + return count; +} + +u32 arch_acpi_add_auto_dep(acpi_handle handle) +{ + if (acpi_has_method(handle, "_PRT")) + return riscv_acpi_add_prt_dep(handle); + + return riscv_acpi_add_irq_dep(handle); +} diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 22ae7829a9155d..7ecc401fb97f9e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -795,10 +795,7 @@ int acpi_device_add(struct acpi_device *device) goto err; } - result = acpi_device_setup_files(device); - if (result) - pr_err("Error creating sysfs interface for device %s\n", - dev_name(&device->dev)); + acpi_device_setup_files(device); return 0; @@ -861,6 +858,9 @@ static const char * const acpi_honor_dep_ids[] = { "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ "INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */ + "RSCV0001", /* RISC-V PLIC */ + "RSCV0002", /* RISC-V APLIC */ + "PNP0C0F", /* PCI Link Device */ NULL }; @@ -1822,6 +1822,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device->dev.parent = parent ? &parent->dev : NULL; device->dev.release = release; device->dev.bus = &acpi_bus_type; + device->dev.groups = acpi_groups; fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); acpi_set_device_status(device, ACPI_STA_DEFAULT); acpi_device_get_busid(device); @@ -2013,6 +2014,49 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) mutex_unlock(&acpi_scan_lock); } +int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) +{ + u32 count; + int i; + + for (count = 0, i = 0; i < dep_devices->count; i++) { + struct acpi_device_info *info; + struct acpi_dep_data *dep; + bool skip, honor_dep; + acpi_status status; + + status = acpi_get_object_info(dep_devices->handles[i], &info); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Error reading _DEP device info\n"); + continue; + } + + skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids); + honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids); + kfree(info); + + if (skip) + continue; + + dep = kzalloc(sizeof(*dep), GFP_KERNEL); + if (!dep) + continue; + + count++; + + dep->supplier = dep_devices->handles[i]; + dep->consumer = handle; + dep->honor_dep = honor_dep; + + mutex_lock(&acpi_dep_list_lock); + list_add_tail(&dep->node, &acpi_dep_list); + mutex_unlock(&acpi_dep_list_lock); + } + + acpi_handle_list_free(dep_devices); + return count; +} + static void acpi_scan_init_hotplug(struct acpi_device *adev) { struct acpi_hardware_id *hwid; @@ -2032,11 +2076,21 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev) } } +u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; } + static u32 acpi_scan_check_dep(acpi_handle handle) { struct acpi_handle_list dep_devices; - u32 count; - int i; + u32 count = 0; + + /* + * Some architectures like RISC-V need to add dependencies for + * all devices which use GSI to the interrupt controller so that + * interrupt controller is probed before any of those devices. + * Instead of mandating _DEP on all the devices, detect the + * dependency and add automatically. + */ + count += arch_acpi_add_auto_dep(handle); /* * Check for _HID here to avoid deferring the enumeration of: @@ -2045,48 +2099,14 @@ static u32 acpi_scan_check_dep(acpi_handle handle) * Still, checking for _HID catches more then just these cases ... */ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) - return 0; + return count; if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); - return 0; - } - - for (count = 0, i = 0; i < dep_devices.count; i++) { - struct acpi_device_info *info; - struct acpi_dep_data *dep; - bool skip, honor_dep; - acpi_status status; - - status = acpi_get_object_info(dep_devices.handles[i], &info); - if (ACPI_FAILURE(status)) { - acpi_handle_debug(handle, "Error reading _DEP device info\n"); - continue; - } - - skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids); - honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids); - kfree(info); - - if (skip) - continue; - - dep = kzalloc(sizeof(*dep), GFP_KERNEL); - if (!dep) - continue; - - count++; - - dep->supplier = dep_devices.handles[i]; - dep->consumer = handle; - dep->honor_dep = honor_dep; - - mutex_lock(&acpi_dep_list_lock); - list_add_tail(&dep->node , &acpi_dep_list); - mutex_unlock(&acpi_dep_list_lock); + return count; } - acpi_handle_list_free(&dep_devices); + count += acpi_scan_add_dep(handle, &dep_devices); return count; } @@ -2757,6 +2777,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, return 0; } +void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { } + int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) { int count = 0; @@ -2765,6 +2787,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) return 0; mutex_lock(&acpi_probe_mutex); + arch_sort_irqchip_probe(ap_head, nr); for (ape = ap_head; nr; ape++, nr--) { if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { acpi_probe_count = 0; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 889f1c1a1fa92d..c8ee8e42b0f641 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -351,6 +351,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "1025C"), }, }, + /* + * The ASUS ROG M16 from 2023 has many events which wake it from s2idle + * resulting in excessive battery drain and risk of laptop overheating, + * these events can be caused by the MMC or y AniMe display if installed. + * The match is valid for all of the GU604V range. + */ + { + .callback = init_default_s3, + .ident = "ASUS ROG Zephyrus M16 (2023)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"), + }, + }, /* * https://bugzilla.kernel.org/show_bug.cgi?id=189431 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index a0cfc857fb553e..78db38c7076e45 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -558,77 +558,31 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma thermal_zone_device_critical(thermal); } -struct acpi_thermal_bind_data { - struct thermal_zone_device *thermal; - struct thermal_cooling_device *cdev; - bool bind; -}; - -static int bind_unbind_cdev_cb(struct thermal_trip *trip, void *arg) +static bool acpi_thermal_should_bind_cdev(struct thermal_zone_device *thermal, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { struct acpi_thermal_trip *acpi_trip = trip->priv; - struct acpi_thermal_bind_data *bd = arg; - struct thermal_zone_device *thermal = bd->thermal; - struct thermal_cooling_device *cdev = bd->cdev; struct acpi_device *cdev_adev = cdev->devdata; int i; /* Skip critical and hot trips. */ if (!acpi_trip) - return 0; + return false; for (i = 0; i < acpi_trip->devices.count; i++) { acpi_handle handle = acpi_trip->devices.handles[i]; - struct acpi_device *adev = acpi_fetch_acpi_dev(handle); - - if (adev != cdev_adev) - continue; - - if (bd->bind) { - int ret; - - ret = thermal_bind_cdev_to_trip(thermal, trip, cdev, - THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); - if (ret) - return ret; - } else { - thermal_unbind_cdev_from_trip(thermal, trip, cdev); - } - } - - return 0; -} -static int acpi_thermal_bind_unbind_cdev(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev, - bool bind) -{ - struct acpi_thermal_bind_data bd = { - .thermal = thermal, .cdev = cdev, .bind = bind - }; - - return for_each_thermal_trip(thermal, bind_unbind_cdev_cb, &bd); -} - -static int -acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - return acpi_thermal_bind_unbind_cdev(thermal, cdev, true); -} + if (acpi_fetch_acpi_dev(handle) == cdev_adev) + return true; + } -static int -acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - return acpi_thermal_bind_unbind_cdev(thermal, cdev, false); + return false; } static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { - .bind = acpi_thermal_bind_cooling_device, - .unbind = acpi_thermal_unbind_cooling_device, + .should_bind = acpi_thermal_should_bind_cdev, .get_temp = thermal_get_temp, .get_trend = thermal_get_trend, .hot = acpi_thermal_zone_device_hot, diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index ae93842822733e..6de542d995189a 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -801,7 +801,8 @@ acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, if (ret != AE_NOT_FOUND) acpi_handle_warn(handle, - "failed to evaluate _DSM %pUb (0x%x)\n", guid, ret); + "failed to evaluate _DSM %pUb rev:%lld func:%lld (0x%x)\n", + guid, rev, func, ret); return NULL; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 674b9db7a1ef8b..015bd8e66c1cf8 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -254,6 +254,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"), }, }, + { + .callback = video_detect_force_vendor, + /* Panasonic Toughbook CF-18 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"), + DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), + }, + }, /* * Toshiba models with Transflective display, these need to use @@ -549,6 +557,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), }, }, + { + .callback = video_detect_force_native, + /* Apple MacBook Pro 9,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, @@ -828,6 +844,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { * controller board in their ACPI tables (and may even have one), but * which need native backlight control nevertheless. */ + { + /* https://github.com/zabbly/linux/issues/26 */ + .callback = video_detect_force_native, + /* Dell OptiPlex 5480 AIO */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 5480 AIO"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */ .callback = video_detect_force_native, @@ -896,7 +921,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { /* Lenovo Yoga Tab 3 Pro YT3-X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, }, diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index ab2b5fa83e1ffe..6af546b21574f9 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -355,7 +355,6 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* Lenovo Yoga Tab 3 Pro X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 34bc880ca20bd2..0230c43377c1da 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -435,7 +435,7 @@ static const struct dev_pm_ops amba_pm = { * DMA configuration for platform and AMBA bus is same. So here we reuse * platform's DMA config routine. */ -struct bus_type amba_bustype = { +const struct bus_type amba_bustype = { .name = "amba", .dev_groups = amba_dev_groups, .match = amba_match, diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e8643c69d42622..978740537a1aac 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -277,7 +277,7 @@ _binder_proc_lock(struct binder_proc *proc, int line) } /** - * binder_proc_unlock() - Release spinlock for given binder_proc + * binder_proc_unlock() - Release outer lock for given binder_proc * @proc: struct binder_proc to acquire * * Release lock acquired via binder_proc_lock() @@ -1352,6 +1352,7 @@ static void binder_free_ref(struct binder_ref *ref) if (ref->node) binder_free_node(ref->node); kfree(ref->death); + kfree(ref->freeze); kfree(ref); } @@ -1546,7 +1547,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread) * by threads that are being released. When done with the binder_proc, * this function is called to decrement the counter and free the * proc if appropriate (proc has been released, all threads have - * been released and not currenly in-use to process a transaction). + * been released and not currently in-use to process a transaction). */ static void binder_proc_dec_tmpref(struct binder_proc *proc) { @@ -3842,6 +3843,155 @@ static void binder_transaction(struct binder_proc *proc, } } +static int +binder_request_freeze_notification(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_handle_cookie *handle_cookie) +{ + struct binder_ref_freeze *freeze; + struct binder_ref *ref; + bool is_frozen; + + freeze = kzalloc(sizeof(*freeze), GFP_KERNEL); + if (!freeze) + return -ENOMEM; + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); + if (!ref) { + binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n", + proc->pid, thread->pid, handle_cookie->handle); + binder_proc_unlock(proc); + kfree(freeze); + return -EINVAL; + } + + binder_node_lock(ref->node); + + if (ref->freeze || !ref->node->proc) { + binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n", + proc->pid, thread->pid, + ref->freeze ? "already set" : "dead node"); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + kfree(freeze); + return -EINVAL; + } + binder_inner_proc_lock(ref->node->proc); + is_frozen = ref->node->proc->is_frozen; + binder_inner_proc_unlock(ref->node->proc); + + binder_stats_created(BINDER_STAT_FREEZE); + INIT_LIST_HEAD(&freeze->work.entry); + freeze->cookie = handle_cookie->cookie; + freeze->work.type = BINDER_WORK_FROZEN_BINDER; + freeze->is_frozen = is_frozen; + + ref->freeze = freeze; + + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); + + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return 0; +} + +static int +binder_clear_freeze_notification(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_handle_cookie *handle_cookie) +{ + struct binder_ref_freeze *freeze; + struct binder_ref *ref; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, handle_cookie->handle, false); + if (!ref) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n", + proc->pid, thread->pid, handle_cookie->handle); + binder_proc_unlock(proc); + return -EINVAL; + } + + binder_node_lock(ref->node); + + if (!ref->freeze) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n", + proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return -EINVAL; + } + freeze = ref->freeze; + binder_inner_proc_lock(proc); + if (freeze->cookie != handle_cookie->cookie) { + binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n", + proc->pid, thread->pid, (u64)freeze->cookie, + (u64)handle_cookie->cookie); + binder_inner_proc_unlock(proc); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return -EINVAL; + } + ref->freeze = NULL; + /* + * Take the existing freeze object and overwrite its work type. There are three cases here: + * 1. No pending notification. In this case just add the work to the queue. + * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we + * should resend with the new work type. + * 3. A notification is pending to be sent. Since the work is already in the queue, nothing + * needs to be done here. + */ + freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION; + if (list_empty(&freeze->work.entry)) { + binder_enqueue_work_ilocked(&freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } else if (freeze->sent) { + freeze->resend = true; + } + binder_inner_proc_unlock(proc); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + return 0; +} + +static int +binder_freeze_notification_done(struct binder_proc *proc, + struct binder_thread *thread, + binder_uintptr_t cookie) +{ + struct binder_ref_freeze *freeze = NULL; + struct binder_work *w; + + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->delivered_freeze, entry) { + struct binder_ref_freeze *tmp_freeze = + container_of(w, struct binder_ref_freeze, work); + + if (tmp_freeze->cookie == cookie) { + freeze = tmp_freeze; + break; + } + } + if (!freeze) { + binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n", + proc->pid, thread->pid, (u64)cookie); + binder_inner_proc_unlock(proc); + return -EINVAL; + } + binder_dequeue_work_ilocked(&freeze->work); + freeze->sent = false; + if (freeze->resend) { + freeze->resend = false; + binder_enqueue_work_ilocked(&freeze->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_inner_proc_unlock(proc); + return 0; +} + /** * binder_free_buf() - free the specified buffer * @proc: binder proc that owns buffer @@ -4325,6 +4475,44 @@ static int binder_thread_write(struct binder_proc *proc, binder_inner_proc_unlock(proc); } break; + case BC_REQUEST_FREEZE_NOTIFICATION: { + struct binder_handle_cookie handle_cookie; + int error; + + if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) + return -EFAULT; + ptr += sizeof(handle_cookie); + error = binder_request_freeze_notification(proc, thread, + &handle_cookie); + if (error) + return error; + } break; + + case BC_CLEAR_FREEZE_NOTIFICATION: { + struct binder_handle_cookie handle_cookie; + int error; + + if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie))) + return -EFAULT; + ptr += sizeof(handle_cookie); + error = binder_clear_freeze_notification(proc, thread, &handle_cookie); + if (error) + return error; + } break; + + case BC_FREEZE_NOTIFICATION_DONE: { + binder_uintptr_t cookie; + int error; + + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + + ptr += sizeof(cookie); + error = binder_freeze_notification_done(proc, thread, cookie); + if (error) + return error; + } break; + default: pr_err("%d:%d unknown command %u\n", proc->pid, thread->pid, cmd); @@ -4714,6 +4902,46 @@ static int binder_thread_read(struct binder_proc *proc, if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; + + case BINDER_WORK_FROZEN_BINDER: { + struct binder_ref_freeze *freeze; + struct binder_frozen_state_info info; + + memset(&info, 0, sizeof(info)); + freeze = container_of(w, struct binder_ref_freeze, work); + info.is_frozen = freeze->is_frozen; + info.cookie = freeze->cookie; + freeze->sent = true; + binder_enqueue_work_ilocked(w, &proc->delivered_freeze); + binder_inner_proc_unlock(proc); + + if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (copy_to_user(ptr, &info, sizeof(info))) + return -EFAULT; + ptr += sizeof(info); + binder_stat_br(proc, thread, BR_FROZEN_BINDER); + goto done; /* BR_FROZEN_BINDER notifications can cause transactions */ + } break; + + case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: { + struct binder_ref_freeze *freeze = + container_of(w, struct binder_ref_freeze, work); + binder_uintptr_t cookie = freeze->cookie; + + binder_inner_proc_unlock(proc); + kfree(freeze); + binder_stats_deleted(BINDER_STAT_FREEZE); + if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE); + } break; + default: binder_inner_proc_unlock(proc); pr_err("%d:%d: bad work type %d\n", @@ -5322,6 +5550,48 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc) return false; } +static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen) +{ + struct rb_node *n; + struct binder_ref *ref; + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->nodes); n; n = rb_next(n)) { + struct binder_node *node; + + node = rb_entry(n, struct binder_node, rb_node); + binder_inner_proc_unlock(proc); + binder_node_lock(node); + hlist_for_each_entry(ref, &node->refs, node_entry) { + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * freeze notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->freeze) { + binder_inner_proc_unlock(ref->proc); + continue; + } + ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER; + if (list_empty(&ref->freeze->work.entry)) { + ref->freeze->is_frozen = is_frozen; + binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo); + binder_wakeup_proc_ilocked(ref->proc); + } else { + if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen) + ref->freeze->resend = true; + ref->freeze->is_frozen = is_frozen; + } + binder_inner_proc_unlock(ref->proc); + } + binder_node_unlock(node); + binder_inner_proc_lock(proc); + } + binder_inner_proc_unlock(proc); +} + static int binder_ioctl_freeze(struct binder_freeze_info *info, struct binder_proc *target_proc) { @@ -5333,6 +5603,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info, target_proc->async_recv = false; target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); + binder_add_freeze_work(target_proc, false); return 0; } @@ -5365,6 +5636,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info, binder_inner_proc_lock(target_proc); target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); + } else { + binder_add_freeze_work(target_proc, true); } return ret; @@ -5740,6 +6013,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_stats_created(BINDER_STAT_PROC); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); + INIT_LIST_HEAD(&proc->delivered_freeze); INIT_LIST_HEAD(&proc->waiting_threads); filp->private_data = proc; @@ -6291,7 +6565,9 @@ static const char * const binder_return_strings[] = { "BR_FAILED_REPLY", "BR_FROZEN_REPLY", "BR_ONEWAY_SPAM_SUSPECT", - "BR_TRANSACTION_PENDING_FROZEN" + "BR_TRANSACTION_PENDING_FROZEN", + "BR_FROZEN_BINDER", + "BR_CLEAR_FREEZE_NOTIFICATION_DONE", }; static const char * const binder_command_strings[] = { @@ -6314,6 +6590,9 @@ static const char * const binder_command_strings[] = { "BC_DEAD_BINDER_DONE", "BC_TRANSACTION_SG", "BC_REPLY_SG", + "BC_REQUEST_FREEZE_NOTIFICATION", + "BC_CLEAR_FREEZE_NOTIFICATION", + "BC_FREEZE_NOTIFICATION_DONE", }; static const char * const binder_objstat_strings[] = { @@ -6323,7 +6602,8 @@ static const char * const binder_objstat_strings[] = { "ref", "death", "transaction", - "transaction_complete" + "transaction_complete", + "freeze", }; static void print_binder_stats(struct seq_file *m, const char *prefix, diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7d4fc53f7a732c..f8d6be682f2389 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -130,12 +130,13 @@ enum binder_stat_types { BINDER_STAT_DEATH, BINDER_STAT_TRANSACTION, BINDER_STAT_TRANSACTION_COMPLETE, + BINDER_STAT_FREEZE, BINDER_STAT_COUNT }; struct binder_stats { - atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1]; - atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; + atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1]; + atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1]; atomic_t obj_created[BINDER_STAT_COUNT]; atomic_t obj_deleted[BINDER_STAT_COUNT]; }; @@ -160,6 +161,8 @@ struct binder_work { BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, + BINDER_WORK_FROZEN_BINDER, + BINDER_WORK_CLEAR_FREEZE_NOTIFICATION, } type; }; @@ -276,6 +279,14 @@ struct binder_ref_death { binder_uintptr_t cookie; }; +struct binder_ref_freeze { + struct binder_work work; + binder_uintptr_t cookie; + bool is_frozen:1; + bool sent:1; + bool resend:1; +}; + /** * struct binder_ref_data - binder_ref counts and id * @debug_id: unique ID for the ref @@ -308,6 +319,8 @@ struct binder_ref_data { * @node indicates the node must be freed * @death: pointer to death notification (ref_death) if requested * (protected by @node->lock) + * @freeze: pointer to freeze notification (ref_freeze) if requested + * (protected by @node->lock) * * Structure to track references from procA to target node (on procB). This * structure is unsafe to access without holding @proc->outer_lock. @@ -324,6 +337,7 @@ struct binder_ref { struct binder_proc *proc; struct binder_node *node; struct binder_ref_death *death; + struct binder_ref_freeze *freeze; }; /** @@ -377,6 +391,8 @@ struct binder_ref { * (atomics, no lock needed) * @delivered_death: list of delivered death notification * (protected by @inner_lock) + * @delivered_freeze: list of delivered freeze notification + * (protected by @inner_lock) * @max_threads: cap on number of binder threads * (protected by @inner_lock) * @requested_threads: number of binder threads requested but not @@ -424,6 +440,7 @@ struct binder_proc { struct list_head todo; struct binder_stats stats; struct list_head delivered_death; + struct list_head delivered_freeze; u32 max_threads; int requested_threads; int requested_threads_started; diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 3001d754ac369e..ad1fa7abc3232d 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -58,6 +58,7 @@ enum binderfs_stats_mode { struct binder_features { bool oneway_spam_detection; bool extended_error; + bool freeze_notification; }; static const struct constant_table binderfs_param_stats[] = { @@ -74,6 +75,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = { static struct binder_features binder_features = { .oneway_spam_detection = true, .extended_error = true, + .freeze_notification = true, }; static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) @@ -608,6 +610,12 @@ static int init_binder_features(struct super_block *sb) if (IS_ERR(dentry)) return PTR_ERR(dentry); + dentry = binderfs_create_file(dir, "freeze_notification", + &binder_features_fops, + &binder_features.freeze_notification); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + return 0; } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index a05c1724944812..45f63b09828a1a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1370,7 +1370,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) * V1.03 is known to be broken. V3.04 is known to * work. Between, there are V1.06, V2.06 and V3.03 * that we don't have much idea about. For now, - * blacklist anything older than V3.04. + * assume that anything older than V3.04 is broken. * * http://bugzilla.kernel.org/show_bug.cgi?id=15104 */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 70c3a33eee6f24..2f16524c252629 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -437,7 +437,6 @@ static int brcm_ahci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -451,8 +450,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) priv->version = (unsigned long)of_id->data; priv->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl"); - priv->top_ctrl = devm_ioremap_resource(dev, res); + priv->top_ctrl = devm_platform_ioremap_resource_byname(pdev, "top-ctrl"); if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index cb768f66f0a709..6f955e9105e883 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "ahci.h" @@ -44,42 +45,10 @@ enum { /* Clock Reset Register */ IMX_CLOCK_RESET = 0x7f3f, IMX_CLOCK_RESET_RESET = 1 << 0, - /* IMX8QM HSIO AHCI definitions */ - IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET = 0x03, - IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET = 0x09, - IMX8QM_SATA_PHY_IMPED_RATIO_85OHM = 0x6c, - IMX8QM_LPCG_PHYX2_OFFSET = 0x00000, - IMX8QM_CSR_PHYX2_OFFSET = 0x90000, - IMX8QM_CSR_PHYX1_OFFSET = 0xa0000, - IMX8QM_CSR_PHYX_STTS0_OFFSET = 0x4, - IMX8QM_CSR_PCIEA_OFFSET = 0xb0000, - IMX8QM_CSR_PCIEB_OFFSET = 0xc0000, - IMX8QM_CSR_SATA_OFFSET = 0xd0000, - IMX8QM_CSR_PCIE_CTRL2_OFFSET = 0x8, - IMX8QM_CSR_MISC_OFFSET = 0xe0000, - - IMX8QM_LPCG_PHYX2_PCLK0_MASK = (0x3 << 16), - IMX8QM_LPCG_PHYX2_PCLK1_MASK = (0x3 << 20), - IMX8QM_PHY_APB_RSTN_0 = BIT(0), - IMX8QM_PHY_MODE_SATA = BIT(19), - IMX8QM_PHY_MODE_MASK = (0xf << 17), - IMX8QM_PHY_PIPE_RSTN_0 = BIT(24), - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0 = BIT(25), - IMX8QM_PHY_PIPE_RSTN_1 = BIT(26), - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1 = BIT(27), - IMX8QM_STTS0_LANE0_TX_PLL_LOCK = BIT(4), - IMX8QM_MISC_IOB_RXENA = BIT(0), - IMX8QM_MISC_IOB_TXENA = BIT(1), - IMX8QM_MISC_PHYX1_EPCS_SEL = BIT(12), - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 = BIT(24), - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 = BIT(25), - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 = BIT(28), - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0 = BIT(29), - IMX8QM_SATA_CTRL_RESET_N = BIT(12), - IMX8QM_SATA_CTRL_EPCS_PHYRESET_N = BIT(7), - IMX8QM_CTRL_BUTTON_RST_N = BIT(21), - IMX8QM_CTRL_POWER_UP_RST_N = BIT(23), - IMX8QM_CTRL_LTSSM_ENABLE = BIT(4), + /* IMX8QM SATA specific control registers */ + IMX8QM_SATA_AHCI_PTC = 0xc8, + IMX8QM_SATA_AHCI_PTC_RXWM_MASK = GENMASK(6, 0), + IMX8QM_SATA_AHCI_PTC_RXWM = 0x29, }; enum ahci_imx_type { @@ -95,14 +64,10 @@ struct imx_ahci_priv { struct clk *sata_clk; struct clk *sata_ref_clk; struct clk *ahb_clk; - struct clk *epcs_tx_clk; - struct clk *epcs_rx_clk; - struct clk *phy_apbclk; - struct clk *phy_pclk0; - struct clk *phy_pclk1; - void __iomem *phy_base; - struct gpio_desc *clkreq_gpiod; struct regmap *gpr; + struct phy *sata_phy; + struct phy *cali_phy0; + struct phy *cali_phy1; bool no_device; bool first_time; u32 phy_params; @@ -450,201 +415,79 @@ ATTRIBUTE_GROUPS(fsl_sata_ahci); static int imx8_sata_enable(struct ahci_host_priv *hpriv) { - u32 val, reg; - int i, ret; + u32 val; + int ret; struct imx_ahci_priv *imxpriv = hpriv->plat_data; struct device *dev = &imxpriv->ahci_pdev->dev; - /* configure the hsio for sata */ - ret = clk_prepare_enable(imxpriv->phy_pclk0); - if (ret < 0) { - dev_err(dev, "can't enable phy_pclk0.\n"); + /* + * Since "REXT" pin is only present for first lane of i.MX8QM + * PHY, its calibration results will be stored, passed through + * to the second lane PHY, and shared with all three lane PHYs. + * + * Initialize the first two lane PHYs here, although only the + * third lane PHY is used by SATA. + */ + ret = phy_init(imxpriv->cali_phy0); + if (ret) { + dev_err(dev, "cali PHY init failed\n"); return ret; } - ret = clk_prepare_enable(imxpriv->phy_pclk1); - if (ret < 0) { - dev_err(dev, "can't enable phy_pclk1.\n"); - goto disable_phy_pclk0; + ret = phy_power_on(imxpriv->cali_phy0); + if (ret) { + dev_err(dev, "cali PHY power on failed\n"); + goto err_cali_phy0_exit; } - ret = clk_prepare_enable(imxpriv->epcs_tx_clk); - if (ret < 0) { - dev_err(dev, "can't enable epcs_tx_clk.\n"); - goto disable_phy_pclk1; + ret = phy_init(imxpriv->cali_phy1); + if (ret) { + dev_err(dev, "cali PHY1 init failed\n"); + goto err_cali_phy0_off; } - ret = clk_prepare_enable(imxpriv->epcs_rx_clk); - if (ret < 0) { - dev_err(dev, "can't enable epcs_rx_clk.\n"); - goto disable_epcs_tx_clk; + ret = phy_power_on(imxpriv->cali_phy1); + if (ret) { + dev_err(dev, "cali PHY1 power on failed\n"); + goto err_cali_phy1_exit; } - ret = clk_prepare_enable(imxpriv->phy_apbclk); - if (ret < 0) { - dev_err(dev, "can't enable phy_apbclk.\n"); - goto disable_epcs_rx_clk; + ret = phy_init(imxpriv->sata_phy); + if (ret) { + dev_err(dev, "sata PHY init failed\n"); + goto err_cali_phy1_off; } - /* Configure PHYx2 PIPE_RSTN */ - regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET + - IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val); - if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) { - /* The link of the PCIEA of HSIO is down */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_PHYX2_OFFSET, - IMX8QM_PHY_PIPE_RSTN_0 | - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0, - IMX8QM_PHY_PIPE_RSTN_0 | - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0); + ret = phy_set_mode(imxpriv->sata_phy, PHY_MODE_SATA); + if (ret) { + dev_err(dev, "unable to set SATA PHY mode\n"); + goto err_sata_phy_exit; } - regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET + - IMX8QM_CSR_PCIE_CTRL2_OFFSET, ®); - if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) { - /* The link of the PCIEB of HSIO is down */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_PHYX2_OFFSET, - IMX8QM_PHY_PIPE_RSTN_1 | - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1, - IMX8QM_PHY_PIPE_RSTN_1 | - IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1); + ret = phy_power_on(imxpriv->sata_phy); + if (ret) { + dev_err(dev, "sata PHY power up failed\n"); + goto err_sata_phy_exit; } - if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) { - /* The links of both PCIA and PCIEB of HSIO are down */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_LPCG_PHYX2_OFFSET, - IMX8QM_LPCG_PHYX2_PCLK0_MASK | - IMX8QM_LPCG_PHYX2_PCLK1_MASK, - 0); - } - - /* set PWR_RST and BT_RST of csr_pciea */ - val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET; - regmap_update_bits(imxpriv->gpr, - val, - IMX8QM_CTRL_BUTTON_RST_N, - IMX8QM_CTRL_BUTTON_RST_N); - regmap_update_bits(imxpriv->gpr, - val, - IMX8QM_CTRL_POWER_UP_RST_N, - IMX8QM_CTRL_POWER_UP_RST_N); - - /* PHYX1_MODE to SATA */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_PHYX1_OFFSET, - IMX8QM_PHY_MODE_MASK, - IMX8QM_PHY_MODE_SATA); - /* - * BIT0 RXENA 1, BIT1 TXENA 0 - * BIT12 PHY_X1_EPCS_SEL 1. - */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_MISC_OFFSET, - IMX8QM_MISC_IOB_RXENA, - IMX8QM_MISC_IOB_RXENA); - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_MISC_OFFSET, - IMX8QM_MISC_IOB_TXENA, - 0); - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_MISC_OFFSET, - IMX8QM_MISC_PHYX1_EPCS_SEL, - IMX8QM_MISC_PHYX1_EPCS_SEL); - /* - * It is possible, for PCIe and SATA are sharing - * the same clock source, HPLL or external oscillator. - * When PCIe is in low power modes (L1.X or L2 etc), - * the clock source can be turned off. In this case, - * if this clock source is required to be toggling by - * SATA, then SATA functions will be abnormal. - * Set the override here to avoid it. - */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_MISC_OFFSET, - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 | - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 | - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 | - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0, - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 | - IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 | - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 | - IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0); - - /* clear PHY RST, then set it */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_SATA_OFFSET, - IMX8QM_SATA_CTRL_EPCS_PHYRESET_N, - 0); - - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_SATA_OFFSET, - IMX8QM_SATA_CTRL_EPCS_PHYRESET_N, - IMX8QM_SATA_CTRL_EPCS_PHYRESET_N); - - /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_SATA_OFFSET, - IMX8QM_SATA_CTRL_RESET_N, - IMX8QM_SATA_CTRL_RESET_N); - udelay(1); - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_SATA_OFFSET, - IMX8QM_SATA_CTRL_RESET_N, - 0); - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_SATA_OFFSET, - IMX8QM_SATA_CTRL_RESET_N, - IMX8QM_SATA_CTRL_RESET_N); - - /* APB reset */ - regmap_update_bits(imxpriv->gpr, - IMX8QM_CSR_PHYX1_OFFSET, - IMX8QM_PHY_APB_RSTN_0, - IMX8QM_PHY_APB_RSTN_0); - - for (i = 0; i < 100; i++) { - reg = IMX8QM_CSR_PHYX1_OFFSET + - IMX8QM_CSR_PHYX_STTS0_OFFSET; - regmap_read(imxpriv->gpr, reg, &val); - val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK; - if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK) - break; - udelay(1); - } + /* The cali_phy# can be turned off after SATA PHY is initialized. */ + phy_power_off(imxpriv->cali_phy1); + phy_exit(imxpriv->cali_phy1); + phy_power_off(imxpriv->cali_phy0); + phy_exit(imxpriv->cali_phy0); - if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) { - dev_err(dev, "TX PLL of the PHY is not locked\n"); - ret = -ENODEV; - } else { - writeb(imxpriv->imped_ratio, imxpriv->phy_base + - IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET); - writeb(imxpriv->imped_ratio, imxpriv->phy_base + - IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET); - reg = readb(imxpriv->phy_base + - IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET); - if (unlikely(reg != imxpriv->imped_ratio)) - dev_info(dev, "Can't set PHY RX impedance ratio.\n"); - reg = readb(imxpriv->phy_base + - IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET); - if (unlikely(reg != imxpriv->imped_ratio)) - dev_info(dev, "Can't set PHY TX impedance ratio.\n"); - usleep_range(50, 100); + /* RxWaterMark setting */ + val = readl(hpriv->mmio + IMX8QM_SATA_AHCI_PTC); + val &= ~IMX8QM_SATA_AHCI_PTC_RXWM_MASK; + val |= IMX8QM_SATA_AHCI_PTC_RXWM; + writel(val, hpriv->mmio + IMX8QM_SATA_AHCI_PTC); - /* - * To reduce the power consumption, gate off - * the PHY clks - */ - clk_disable_unprepare(imxpriv->phy_apbclk); - clk_disable_unprepare(imxpriv->phy_pclk1); - clk_disable_unprepare(imxpriv->phy_pclk0); - return ret; - } + return 0; - clk_disable_unprepare(imxpriv->phy_apbclk); -disable_epcs_rx_clk: - clk_disable_unprepare(imxpriv->epcs_rx_clk); -disable_epcs_tx_clk: - clk_disable_unprepare(imxpriv->epcs_tx_clk); -disable_phy_pclk1: - clk_disable_unprepare(imxpriv->phy_pclk1); -disable_phy_pclk0: - clk_disable_unprepare(imxpriv->phy_pclk0); +err_sata_phy_exit: + phy_exit(imxpriv->sata_phy); +err_cali_phy1_off: + phy_power_off(imxpriv->cali_phy1); +err_cali_phy1_exit: + phy_exit(imxpriv->cali_phy1); +err_cali_phy0_off: + phy_power_off(imxpriv->cali_phy0); +err_cali_phy0_exit: + phy_exit(imxpriv->cali_phy0); return ret; } @@ -698,6 +541,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) } } else if (imxpriv->type == AHCI_IMX8QM) { ret = imx8_sata_enable(hpriv); + if (ret) + goto disable_clk; + } usleep_range(1000, 2000); @@ -736,8 +582,10 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv) break; case AHCI_IMX8QM: - clk_disable_unprepare(imxpriv->epcs_rx_clk); - clk_disable_unprepare(imxpriv->epcs_tx_clk); + if (imxpriv->sata_phy) { + phy_power_off(imxpriv->sata_phy); + phy_exit(imxpriv->sata_phy); + } break; default: @@ -760,6 +608,9 @@ static void ahci_imx_error_handler(struct ata_port *ap) ahci_error_handler(ap); + if (imxpriv->type == AHCI_IMX8QM) + return; + if (!(imxpriv->first_time) || ahci_imx_hotplug) return; @@ -986,65 +837,19 @@ static const struct scsi_host_template ahci_platform_sht = { static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv) { - struct resource *phy_res; - struct platform_device *pdev = imxpriv->ahci_pdev; - struct device_node *np = dev->of_node; - - if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio)) - imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM; - phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - if (phy_res) { - imxpriv->phy_base = devm_ioremap(dev, phy_res->start, - resource_size(phy_res)); - if (!imxpriv->phy_base) { - dev_err(dev, "error with ioremap\n"); - return -ENOMEM; - } - } else { - dev_err(dev, "missing *phy* reg region.\n"); - return -ENOMEM; - } - imxpriv->gpr = - syscon_regmap_lookup_by_phandle(np, "hsio"); - if (IS_ERR(imxpriv->gpr)) { - dev_err(dev, "unable to find gpr registers\n"); - return PTR_ERR(imxpriv->gpr); - } - - imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx"); - if (IS_ERR(imxpriv->epcs_tx_clk)) { - dev_err(dev, "can't get epcs_tx_clk clock.\n"); - return PTR_ERR(imxpriv->epcs_tx_clk); - } - imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx"); - if (IS_ERR(imxpriv->epcs_rx_clk)) { - dev_err(dev, "can't get epcs_rx_clk clock.\n"); - return PTR_ERR(imxpriv->epcs_rx_clk); - } - imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0"); - if (IS_ERR(imxpriv->phy_pclk0)) { - dev_err(dev, "can't get phy_pclk0 clock.\n"); - return PTR_ERR(imxpriv->phy_pclk0); - } - imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1"); - if (IS_ERR(imxpriv->phy_pclk1)) { - dev_err(dev, "can't get phy_pclk1 clock.\n"); - return PTR_ERR(imxpriv->phy_pclk1); - } - imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk"); - if (IS_ERR(imxpriv->phy_apbclk)) { - dev_err(dev, "can't get phy_apbclk clock.\n"); - return PTR_ERR(imxpriv->phy_apbclk); - } - - /* Fetch GPIO, then enable the external OSC */ - imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq", - GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE); - if (IS_ERR(imxpriv->clkreq_gpiod)) - return PTR_ERR(imxpriv->clkreq_gpiod); - if (imxpriv->clkreq_gpiod) - gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ"); - + imxpriv->sata_phy = devm_phy_get(dev, "sata-phy"); + if (IS_ERR(imxpriv->sata_phy)) + return dev_err_probe(dev, PTR_ERR(imxpriv->sata_phy), + "Failed to get sata_phy\n"); + + imxpriv->cali_phy0 = devm_phy_get(dev, "cali-phy0"); + if (IS_ERR(imxpriv->cali_phy0)) + return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy0), + "Failed to get cali_phy0\n"); + imxpriv->cali_phy1 = devm_phy_get(dev, "cali-phy1"); + if (IS_ERR(imxpriv->cali_phy1)) + return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy1), + "Failed to get cali_phy1\n"); return 0; } @@ -1077,12 +882,6 @@ static int imx_ahci_probe(struct platform_device *pdev) return PTR_ERR(imxpriv->sata_ref_clk); } - imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); - if (IS_ERR(imxpriv->ahb_clk)) { - dev_err(dev, "can't get ahb clock.\n"); - return PTR_ERR(imxpriv->ahb_clk); - } - if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) { u32 reg_value; @@ -1142,11 +941,8 @@ static int imx_ahci_probe(struct platform_device *pdev) goto disable_clk; /* - * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, - * and IP vendor specific register IMX_TIMER1MS. - * Configure CAP_SSS (support stagered spin up). - * Implement the port0. - * Get the ahb clock rate, and configure the TIMER1MS register. + * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL. + * Set CAP_SSS (support stagered spin up) and Implement the port0. */ reg_val = readl(hpriv->mmio + HOST_CAP); if (!(reg_val & HOST_CAP_SSS)) { @@ -1159,8 +955,20 @@ static int imx_ahci_probe(struct platform_device *pdev) writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL); } - reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; - writel(reg_val, hpriv->mmio + IMX_TIMER1MS); + if (imxpriv->type != AHCI_IMX8QM) { + /* + * Get AHB clock rate and configure the vendor specified + * TIMER1MS register on i.MX53, i.MX6Q and i.MX6QP only. + */ + imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(imxpriv->ahb_clk)) { + dev_err(dev, "Failed to get ahb clock\n"); + ret = PTR_ERR(imxpriv->ahb_clk); + goto disable_sata; + } + reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; + writel(reg_val, hpriv->mmio + IMX_TIMER1MS); + } ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, &ahci_platform_sht); @@ -1229,6 +1037,6 @@ static struct platform_driver imx_ahci_driver = { module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); -MODULE_AUTHOR("Richard Zhu "); +MODULE_AUTHOR("Richard Zhu "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ec3c5bd1f81349..093b940bc953f0 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1446,7 +1446,6 @@ static int piix_init_sidpr(struct ata_host *host) if (hpriv->map[i] == IDE) return 0; - /* is it blacklisted? */ if (piix_no_sidpr(host)) return 0; diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 581704e61f286b..7a8064520a35bd 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -410,7 +410,6 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port, static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv, struct device *dev) { - struct device_node *child; u32 port; if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap)) @@ -419,14 +418,12 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv, of_property_read_u32(dev->of_node, "ports-implemented", &hpriv->saved_port_map); - for_each_child_of_node(dev->of_node, child) { + for_each_child_of_node_scoped(dev->of_node, child) { if (!of_device_is_available(child)) continue; - if (of_property_read_u32(child, "reg", &port)) { - of_node_put(child); + if (of_property_read_u32(child, "reg", &port)) return -EINVAL; - } if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port])) hpriv->saved_port_cap[port] &= PORT_CMD_CAP; @@ -460,7 +457,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, int child_nodes, rc = -ENOMEM, enabled_ports = 0; struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; - struct device_node *child; u32 mask_port_map = 0; if (!devres_open_group(dev, NULL, GFP_KERNEL)) @@ -579,7 +575,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, } if (child_nodes) { - for_each_child_of_node(dev->of_node, child) { + for_each_child_of_node_scoped(dev->of_node, child) { u32 port; struct platform_device *port_dev __maybe_unused; @@ -588,7 +584,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, if (of_property_read_u32(child, "reg", &port)) { rc = -EINVAL; - of_node_put(child); goto err_out; } @@ -606,18 +601,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, if (port_dev) { rc = ahci_platform_get_regulator(hpriv, port, &port_dev->dev); - if (rc == -EPROBE_DEFER) { - of_node_put(child); + if (rc == -EPROBE_DEFER) goto err_out; - } } #endif rc = ahci_platform_get_phy(hpriv, port, dev, child); - if (rc) { - of_node_put(child); + if (rc) goto err_out; - } enabled_ports++; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 30932552437a7e..c085dd81ebe7f6 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include #include @@ -72,19 +72,11 @@ const struct ata_port_operations ata_base_port_ops = { .end_eh = ata_std_end_eh, }; -const struct ata_port_operations sata_port_ops = { - .inherits = &ata_base_port_ops, - - .qc_defer = ata_std_qc_defer, - .hardreset = sata_std_hardreset, -}; -EXPORT_SYMBOL_GPL(sata_port_ops); - static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev); -static unsigned long ata_dev_blacklisted(const struct ata_device *dev); +static unsigned int ata_dev_quirks(const struct ata_device *dev); static DEFINE_IDA(ata_ida); @@ -94,8 +86,8 @@ struct ata_force_param { u8 cbl; u8 spd_limit; unsigned int xfer_mask; - unsigned int horkage_on; - unsigned int horkage_off; + unsigned int quirk_on; + unsigned int quirk_off; u16 lflags_on; u16 lflags_off; }; @@ -160,18 +152,13 @@ MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -static inline bool ata_dev_print_info(struct ata_device *dev) +static inline bool ata_dev_print_info(const struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; return ehc->i.flags & ATA_EHI_PRINTINFO; } -static bool ata_sstatus_online(u32 sstatus) -{ - return (sstatus & 0xf) == 0x3; -} - /** * ata_link_next - link iteration helper * @link: the previous link, NULL to start @@ -457,17 +444,17 @@ static void ata_force_xfermask(struct ata_device *dev) } /** - * ata_force_horkage - force horkage according to libata.force + * ata_force_quirks - force quirks according to libata.force * @dev: ATA device of interest * - * Force horkage according to libata.force and whine about it. + * Force quirks according to libata.force and whine about it. * For consistency with link selection, device number 15 selects * the first device connected to the host link. * * LOCKING: * EH context. */ -static void ata_force_horkage(struct ata_device *dev) +static void ata_force_quirks(struct ata_device *dev) { int devno = dev->link->pmp + dev->devno; int alt_devno = devno; @@ -487,21 +474,21 @@ static void ata_force_horkage(struct ata_device *dev) fe->device != alt_devno) continue; - if (!(~dev->horkage & fe->param.horkage_on) && - !(dev->horkage & fe->param.horkage_off)) + if (!(~dev->quirks & fe->param.quirk_on) && + !(dev->quirks & fe->param.quirk_off)) continue; - dev->horkage |= fe->param.horkage_on; - dev->horkage &= ~fe->param.horkage_off; + dev->quirks |= fe->param.quirk_on; + dev->quirks &= ~fe->param.quirk_off; - ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", + ata_dev_notice(dev, "FORCE: modified (%s)\n", fe->param.name); } } #else static inline void ata_force_link_limits(struct ata_link *link) { } static inline void ata_force_xfermask(struct ata_device *dev) { } -static inline void ata_force_horkage(struct ata_device *dev) { } +static inline void ata_force_quirks(struct ata_device *dev) { } #endif /** @@ -1221,7 +1208,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) *max_sectors = ata_tf_to_lba48(&tf) + 1; else *max_sectors = ata_tf_to_lba(&tf) + 1; - if (dev->horkage & ATA_HORKAGE_HPA_SIZE) + if (dev->quirks & ATA_QUIRK_HPA_SIZE) (*max_sectors)--; return 0; } @@ -1306,7 +1293,7 @@ static int ata_hpa_resize(struct ata_device *dev) /* do we need to do it? */ if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || - (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) + (dev->quirks & ATA_QUIRK_BROKEN_HPA)) return 0; /* read native max address */ @@ -1318,7 +1305,7 @@ static int ata_hpa_resize(struct ata_device *dev) if (rc == -EACCES || !unlock_hpa) { ata_dev_warn(dev, "HPA support seems broken, skipping HPA handling\n"); - dev->horkage |= ATA_HORKAGE_BROKEN_HPA; + dev->quirks |= ATA_QUIRK_BROKEN_HPA; /* we can continue if device aborted the command */ if (rc == -EACCES) @@ -1355,7 +1342,7 @@ static int ata_hpa_resize(struct ata_device *dev) "device aborted resize (%llu -> %llu), skipping HPA handling\n", (unsigned long long)sectors, (unsigned long long)native_sectors); - dev->horkage |= ATA_HORKAGE_BROKEN_HPA; + dev->quirks |= ATA_QUIRK_BROKEN_HPA; return 0; } else if (rc) return rc; @@ -1835,7 +1822,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, goto err_out; } - if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + if (dev->quirks & ATA_QUIRK_DUMP_ID) { ata_dev_info(dev, "dumping IDENTIFY data, " "class=%d may_fallback=%d tried_spinup=%d\n", class, may_fallback, tried_spinup); @@ -2104,7 +2091,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, retry: ata_tf_init(dev, &tf); if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) && - !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { + !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) { tf.command = ATA_CMD_READ_LOG_DMA_EXT; tf.protocol = ATA_PROT_DMA; dma = true; @@ -2124,7 +2111,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, if (err_mask) { if (dma) { - dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; + dev->quirks |= ATA_QUIRK_NO_DMA_LOG; if (!ata_port_is_frozen(dev->link->ap)) goto retry; } @@ -2138,22 +2125,19 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, static int ata_log_supported(struct ata_device *dev, u8 log) { - struct ata_port *ap = dev->link->ap; - - if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) + if (dev->quirks & ATA_QUIRK_NO_LOG_DIR) return 0; - if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) + if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1)) return 0; - return get_unaligned_le16(&ap->sector_buf[log * 2]); + return get_unaligned_le16(&dev->sector_buf[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) { - struct ata_port *ap = dev->link->ap; unsigned int err, i; - if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG) + if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG) return false; if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) { @@ -2165,7 +2149,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page) if (ata_id_major_version(dev->id) >= 10) ata_dev_warn(dev, "ATA Identify Device Log not supported\n"); - dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG; + dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG; return false; } @@ -2173,20 +2157,20 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page) * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is * supported. */ - err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf, - 1); + err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, + dev->sector_buf, 1); if (err) return false; - for (i = 0; i < ap->sector_buf[8]; i++) { - if (ap->sector_buf[9 + i] == page) + for (i = 0; i < dev->sector_buf[8]; i++) { + if (dev->sector_buf[9 + i] == page) return true; } return false; } -static int ata_do_link_spd_horkage(struct ata_device *dev) +static int ata_do_link_spd_quirk(struct ata_device *dev) { struct ata_link *plink = ata_dev_phys_link(dev); u32 target, target_limit; @@ -2194,7 +2178,7 @@ static int ata_do_link_spd_horkage(struct ata_device *dev) if (!sata_scr_valid(plink)) return 0; - if (dev->horkage & ATA_HORKAGE_1_5_GBPS) + if (dev->quirks & ATA_QUIRK_1_5_GBPS) target = 1; else return 0; @@ -2212,26 +2196,25 @@ static int ata_do_link_spd_horkage(struct ata_device *dev) * guaranteed by setting sata_spd_limit to target_limit above. */ if (plink->sata_spd > target) { - ata_dev_info(dev, "applying link speed limit horkage to %s\n", + ata_dev_info(dev, "applying link speed limit quirk to %s\n", sata_spd_string(target)); return -EAGAIN; } return 0; } -static inline u8 ata_dev_knobble(struct ata_device *dev) +static inline bool ata_dev_knobble(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; - if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) - return 0; + if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK) + return false; return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } static void ata_dev_config_ncq_send_recv(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; unsigned int err_mask; if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) { @@ -2239,14 +2222,14 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev) return; } err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, - 0, ap->sector_buf, 1); + 0, dev->sector_buf, 1); if (!err_mask) { u8 *cmds = dev->ncq_send_recv_cmds; dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; - memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); + memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); - if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { + if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) { ata_dev_dbg(dev, "disabling queued TRIM support\n"); cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; @@ -2256,7 +2239,6 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev) static void ata_dev_config_ncq_non_data(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; unsigned int err_mask; if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) { @@ -2265,17 +2247,14 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev) return; } err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, - 0, ap->sector_buf, 1); - if (!err_mask) { - u8 *cmds = dev->ncq_non_data_cmds; - - memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); - } + 0, dev->sector_buf, 1); + if (!err_mask) + memcpy(dev->ncq_non_data_cmds, dev->sector_buf, + ATA_LOG_NCQ_NON_DATA_SIZE); } static void ata_dev_config_ncq_prio(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; unsigned int err_mask; if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) @@ -2284,12 +2263,11 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev) err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SATA_SETTINGS, - ap->sector_buf, - 1); + dev->sector_buf, 1); if (err_mask) goto not_supported; - if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3))) + if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3))) goto not_supported; dev->flags |= ATA_DFLAG_NCQ_PRIO; @@ -2334,12 +2312,12 @@ static int ata_dev_config_ncq(struct ata_device *dev, } if (!IS_ENABLED(CONFIG_SATA_HOST)) return 0; - if (dev->horkage & ATA_HORKAGE_NONCQ) { + if (dev->quirks & ATA_QUIRK_NONCQ) { snprintf(desc, desc_sz, "NCQ (not used)"); return 0; } - if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI && + if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI && ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) { snprintf(desc, desc_sz, "NCQ (not used)"); return 0; @@ -2350,7 +2328,7 @@ static int ata_dev_config_ncq(struct ata_device *dev, dev->flags |= ATA_DFLAG_NCQ; } - if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && + if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) && (ap->flags & ATA_FLAG_FPDMA_AA) && ata_id_has_fpdma_aa(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, @@ -2360,7 +2338,7 @@ static int ata_dev_config_ncq(struct ata_device *dev, "failed to enable AA (error_mask=0x%x)\n", err_mask); if (err_mask != AC_ERR_DEV) { - dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; + dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA; return -EIO; } } else @@ -2405,9 +2383,8 @@ static void ata_dev_config_sense_reporting(struct ata_device *dev) static void ata_dev_config_zac(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; unsigned int err_mask; - u8 *identify_buf = ap->sector_buf; + u8 *identify_buf = dev->sector_buf; dev->zac_zones_optimal_open = U32_MAX; dev->zac_zones_optimal_nonseq = U32_MAX; @@ -2459,7 +2436,6 @@ static void ata_dev_config_zac(struct ata_device *dev) static void ata_dev_config_trusted(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; u64 trusted_cap; unsigned int err; @@ -2473,11 +2449,11 @@ static void ata_dev_config_trusted(struct ata_device *dev) } err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY, - ap->sector_buf, 1); + dev->sector_buf, 1); if (err) return; - trusted_cap = get_unaligned_le64(&ap->sector_buf[40]); + trusted_cap = get_unaligned_le64(&dev->sector_buf[40]); if (!(trusted_cap & (1ULL << 63))) { ata_dev_dbg(dev, "Trusted Computing capability qword not valid!\n"); @@ -2488,12 +2464,41 @@ static void ata_dev_config_trusted(struct ata_device *dev) dev->flags |= ATA_DFLAG_TRUSTED; } +void ata_dev_cleanup_cdl_resources(struct ata_device *dev) +{ + kfree(dev->cdl); + dev->cdl = NULL; +} + +static int ata_dev_init_cdl_resources(struct ata_device *dev) +{ + struct ata_cdl *cdl = dev->cdl; + unsigned int err_mask; + + if (!cdl) { + cdl = kzalloc(sizeof(*cdl), GFP_KERNEL); + if (!cdl) + return -ENOMEM; + dev->cdl = cdl; + } + + err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf, + ATA_LOG_CDL_SIZE / ATA_SECT_SIZE); + if (err_mask) { + ata_dev_warn(dev, "Read Command Duration Limits log failed\n"); + ata_dev_cleanup_cdl_resources(dev); + return -EIO; + } + + return 0; +} + static void ata_dev_config_cdl(struct ata_device *dev) { - struct ata_port *ap = dev->link->ap; unsigned int err_mask; bool cdl_enabled; u64 val; + int ret; if (ata_id_major_version(dev->id) < 11) goto not_supported; @@ -2505,12 +2510,12 @@ static void ata_dev_config_cdl(struct ata_device *dev) err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SUPPORTED_CAPABILITIES, - ap->sector_buf, 1); + dev->sector_buf, 1); if (err_mask) goto not_supported; /* Check Command Duration Limit Supported bits */ - val = get_unaligned_le64(&ap->sector_buf[168]); + val = get_unaligned_le64(&dev->sector_buf[168]); if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0))) goto not_supported; @@ -2523,7 +2528,7 @@ static void ata_dev_config_cdl(struct ata_device *dev) * We must have support for the sense data for successful NCQ commands * log indicated by the successful NCQ command sense data supported bit. */ - val = get_unaligned_le64(&ap->sector_buf[8]); + val = get_unaligned_le64(&dev->sector_buf[8]); if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) { ata_dev_warn(dev, "CDL supported but Successful NCQ Command Sense Data is not supported\n"); @@ -2543,11 +2548,11 @@ static void ata_dev_config_cdl(struct ata_device *dev) */ err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_CURRENT_SETTINGS, - ap->sector_buf, 1); + dev->sector_buf, 1); if (err_mask) goto not_supported; - val = get_unaligned_le64(&ap->sector_buf[8]); + val = get_unaligned_le64(&dev->sector_buf[8]); cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21); if (dev->flags & ATA_DFLAG_CDL_ENABLED) { if (!cdl_enabled) { @@ -2588,37 +2593,20 @@ static void ata_dev_config_cdl(struct ata_device *dev) } } - /* - * Allocate a buffer to handle reading the sense data for successful - * NCQ Commands log page for commands using a CDL with one of the limit - * policy set to 0xD (successful completion with sense data available - * bit set). - */ - if (!ap->ncq_sense_buf) { - ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL); - if (!ap->ncq_sense_buf) - goto not_supported; - } - - /* - * Command duration limits is supported: cache the CDL log page 18h - * (command duration descriptors). - */ - err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1); - if (err_mask) { - ata_dev_warn(dev, "Read Command Duration Limits log failed\n"); + /* CDL is supported: allocate and initialize needed resources. */ + ret = ata_dev_init_cdl_resources(dev); + if (ret) { + ata_dev_warn(dev, "Initialize CDL resources failed\n"); goto not_supported; } - memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE); dev->flags |= ATA_DFLAG_CDL; return; not_supported: dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED); - kfree(ap->ncq_sense_buf); - ap->ncq_sense_buf = NULL; + ata_dev_cleanup_cdl_resources(dev); } static int ata_dev_config_lba(struct ata_device *dev) @@ -2689,7 +2677,7 @@ static void ata_dev_config_fua(struct ata_device *dev) goto nofua; /* Ignore known bad devices and devices that lack NCQ support */ - if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA)) + if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA)) goto nofua; dev->flags |= ATA_DFLAG_FUA; @@ -2702,7 +2690,7 @@ static void ata_dev_config_fua(struct ata_device *dev) static void ata_dev_config_devslp(struct ata_device *dev) { - u8 *sata_setting = dev->link->ap->sector_buf; + u8 *sata_setting = dev->sector_buf; unsigned int err_mask; int i, j; @@ -2829,11 +2817,11 @@ int ata_dev_configure(struct ata_device *dev) return 0; } - /* set horkage */ - dev->horkage |= ata_dev_blacklisted(dev); - ata_force_horkage(dev); + /* Set quirks */ + dev->quirks |= ata_dev_quirks(dev); + ata_force_quirks(dev); - if (dev->horkage & ATA_HORKAGE_DISABLE) { + if (dev->quirks & ATA_QUIRK_DISABLE) { ata_dev_info(dev, "unsupported device, disabling\n"); ata_dev_disable(dev); return 0; @@ -2848,19 +2836,19 @@ int ata_dev_configure(struct ata_device *dev) return 0; } - rc = ata_do_link_spd_horkage(dev); + rc = ata_do_link_spd_quirk(dev); if (rc) return rc; /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ - if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && + if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) && (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) - dev->horkage |= ATA_HORKAGE_NOLPM; + dev->quirks |= ATA_QUIRK_NOLPM; if (ap->flags & ATA_FLAG_NO_LPM) - dev->horkage |= ATA_HORKAGE_NOLPM; + dev->quirks |= ATA_QUIRK_NOLPM; - if (dev->horkage & ATA_HORKAGE_NOLPM) { + if (dev->quirks & ATA_QUIRK_NOLPM) { ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; } @@ -3006,7 +2994,8 @@ int ata_dev_configure(struct ata_device *dev) cdb_intr_string = ", CDB intr"; } - if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { + if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) || + atapi_id_dmadir(dev->id)) { dev->flags |= ATA_DFLAG_DMADIR; dma_dir_string = ", DMADIR"; } @@ -3043,24 +3032,24 @@ int ata_dev_configure(struct ata_device *dev) if ((dev->class == ATA_DEV_ATAPI) && (atapi_command_packet_set(id) == TYPE_TAPE)) { dev->max_sectors = ATA_MAX_SECTORS_TAPE; - dev->horkage |= ATA_HORKAGE_STUCK_ERR; + dev->quirks |= ATA_QUIRK_STUCK_ERR; } - if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) + if (dev->quirks & ATA_QUIRK_MAX_SEC_128) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); - if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) + if (dev->quirks & ATA_QUIRK_MAX_SEC_1024) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, dev->max_sectors); - if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; if (ap->ops->dev_config) ap->ops->dev_config(dev); - if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { + if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) { /* Let the user know. We don't want to disallow opens for rescue purposes, or in case the vendor is just a blithering idiot. Do this after the dev_config call as some controllers @@ -3075,7 +3064,7 @@ int ata_dev_configure(struct ata_device *dev) } } - if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { + if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) { ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); } @@ -3199,86 +3188,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev) } EXPORT_SYMBOL_GPL(ata_dev_pair); -/** - * sata_down_spd_limit - adjust SATA spd limit downward - * @link: Link to adjust SATA spd limit for - * @spd_limit: Additional limit - * - * Adjust SATA spd limit of @link downward. Note that this - * function only adjusts the limit. The change must be applied - * using sata_set_spd(). - * - * If @spd_limit is non-zero, the speed is limited to equal to or - * lower than @spd_limit if such speed is supported. If - * @spd_limit is slower than any supported speed, only the lowest - * supported speed is allowed. - * - * LOCKING: - * Inherited from caller. - * - * RETURNS: - * 0 on success, negative errno on failure - */ -int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) -{ - u32 sstatus, spd, mask; - int rc, bit; - - if (!sata_scr_valid(link)) - return -EOPNOTSUPP; - - /* If SCR can be read, use it to determine the current SPD. - * If not, use cached value in link->sata_spd. - */ - rc = sata_scr_read(link, SCR_STATUS, &sstatus); - if (rc == 0 && ata_sstatus_online(sstatus)) - spd = (sstatus >> 4) & 0xf; - else - spd = link->sata_spd; - - mask = link->sata_spd_limit; - if (mask <= 1) - return -EINVAL; - - /* unconditionally mask off the highest bit */ - bit = fls(mask) - 1; - mask &= ~(1 << bit); - - /* - * Mask off all speeds higher than or equal to the current one. At - * this point, if current SPD is not available and we previously - * recorded the link speed from SStatus, the driver has already - * masked off the highest bit so mask should already be 1 or 0. - * Otherwise, we should not force 1.5Gbps on a link where we have - * not previously recorded speed from SStatus. Just return in this - * case. - */ - if (spd > 1) - mask &= (1 << (spd - 1)) - 1; - else if (link->sata_spd) - return -EINVAL; - - /* were we already at the bottom? */ - if (!mask) - return -EINVAL; - - if (spd_limit) { - if (mask & ((1 << spd_limit) - 1)) - mask &= (1 << spd_limit) - 1; - else { - bit = ffs(mask) - 1; - mask = 1 << bit; - } - } - - link->sata_spd_limit = mask; - - ata_link_warn(link, "limiting SATA link speed to %s\n", - sata_spd_string(fls(mask))); - - return 0; -} - #ifdef CONFIG_ATA_ACPI /** * ata_timing_cycle2mode - find xfer mode for the specified cycle duration @@ -3425,7 +3334,7 @@ static int ata_dev_set_mode(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &dev->link->eh_context; - const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; + const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER; const char *dev_err_whine = ""; int ign_dev_err = 0; unsigned int err_mask = 0; @@ -3760,33 +3669,6 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) } EXPORT_SYMBOL_GPL(ata_std_prereset); -/** - * sata_std_hardreset - COMRESET w/o waiting or classification - * @link: link to reset - * @class: resulting class of attached device - * @deadline: deadline jiffies for the operation - * - * Standard SATA COMRESET w/o waiting or classification. - * - * LOCKING: - * Kernel thread context (may sleep) - * - * RETURNS: - * 0 if link offline, -EAGAIN if link online, -errno on errors. - */ -int sata_std_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) -{ - const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context); - bool online; - int rc; - - /* do hardreset */ - rc = sata_link_hardreset(link, timing, deadline, &online, NULL); - return online ? -EAGAIN : rc; -} -EXPORT_SYMBOL_GPL(sata_std_hardreset); - /** * ata_std_postreset - standard postreset callback * @link: the target ata_link @@ -3878,7 +3760,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) { unsigned int class = dev->class; - u16 *id = (void *)dev->link->ap->sector_buf; + u16 *id = (void *)dev->sector_buf; int rc; /* read ID data */ @@ -3969,7 +3851,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, */ if (dev->n_native_sectors == n_native_sectors && dev->n_sectors < n_sectors && n_sectors == n_native_sectors && - !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { + !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) { ata_dev_warn(dev, "old n_sectors matches native, probably " "late HPA lock, will try to unlock HPA\n"); @@ -3987,223 +3869,292 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, return rc; } -struct ata_blacklist_entry { +static const char * const ata_quirk_names[] = { + [__ATA_QUIRK_DIAGNOSTIC] = "diagnostic", + [__ATA_QUIRK_NODMA] = "nodma", + [__ATA_QUIRK_NONCQ] = "noncq", + [__ATA_QUIRK_MAX_SEC_128] = "maxsec128", + [__ATA_QUIRK_BROKEN_HPA] = "brokenhpa", + [__ATA_QUIRK_DISABLE] = "disable", + [__ATA_QUIRK_HPA_SIZE] = "hpasize", + [__ATA_QUIRK_IVB] = "ivb", + [__ATA_QUIRK_STUCK_ERR] = "stuckerr", + [__ATA_QUIRK_BRIDGE_OK] = "bridgeok", + [__ATA_QUIRK_ATAPI_MOD16_DMA] = "atapimod16dma", + [__ATA_QUIRK_FIRMWARE_WARN] = "firmwarewarn", + [__ATA_QUIRK_1_5_GBPS] = "1.5gbps", + [__ATA_QUIRK_NOSETXFER] = "nosetxfer", + [__ATA_QUIRK_BROKEN_FPDMA_AA] = "brokenfpdmaaa", + [__ATA_QUIRK_DUMP_ID] = "dumpid", + [__ATA_QUIRK_MAX_SEC_LBA48] = "maxseclba48", + [__ATA_QUIRK_ATAPI_DMADIR] = "atapidmadir", + [__ATA_QUIRK_NO_NCQ_TRIM] = "noncqtrim", + [__ATA_QUIRK_NOLPM] = "nolpm", + [__ATA_QUIRK_WD_BROKEN_LPM] = "wdbrokenlpm", + [__ATA_QUIRK_ZERO_AFTER_TRIM] = "zeroaftertrim", + [__ATA_QUIRK_NO_DMA_LOG] = "nodmalog", + [__ATA_QUIRK_NOTRIM] = "notrim", + [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024", + [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m", + [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati", + [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog", + [__ATA_QUIRK_NO_LOG_DIR] = "nologdir", + [__ATA_QUIRK_NO_FUA] = "nofua", +}; + +static void ata_dev_print_quirks(const struct ata_device *dev, + const char *model, const char *rev, + unsigned int quirks) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + int n = 0, i; + size_t sz; + char *str; + + if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS) + return; + + ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS; + + if (!quirks) + return; + + sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16; + str = kmalloc(sz, GFP_KERNEL); + if (!str) + return; + + n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:", + model, rev); + + for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) { + if (quirks & (1U << i)) + n += snprintf(str + n, sz - n, + " %s", ata_quirk_names[i]); + } + + ata_dev_warn(dev, "%s\n", str); + + kfree(str); +} + +struct ata_dev_quirks_entry { const char *model_num; const char *model_rev; - unsigned long horkage; + unsigned int quirks; }; -static const struct ata_blacklist_entry ata_device_blacklist [] = { +static const struct ata_dev_quirks_entry __ata_dev_quirks[] = { /* Devices with DMA related problems under Linux */ - { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, - { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, - { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, - { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, - { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, - { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, - { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, - { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, - { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, - { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, - { "CRD-84", NULL, ATA_HORKAGE_NODMA }, - { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, - { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, - { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, - { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, - { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, - { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, - { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, - { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, - { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, - { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, - { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, - { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, - { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, - { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, - { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, - { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, - { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, - { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, - { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, + { "WDC AC11000H", NULL, ATA_QUIRK_NODMA }, + { "WDC AC22100H", NULL, ATA_QUIRK_NODMA }, + { "WDC AC32500H", NULL, ATA_QUIRK_NODMA }, + { "WDC AC33100H", NULL, ATA_QUIRK_NODMA }, + { "WDC AC31600H", NULL, ATA_QUIRK_NODMA }, + { "WDC AC32100H", "24.09P07", ATA_QUIRK_NODMA }, + { "WDC AC23200L", "21.10N21", ATA_QUIRK_NODMA }, + { "Compaq CRD-8241B", NULL, ATA_QUIRK_NODMA }, + { "CRD-8400B", NULL, ATA_QUIRK_NODMA }, + { "CRD-848[02]B", NULL, ATA_QUIRK_NODMA }, + { "CRD-84", NULL, ATA_QUIRK_NODMA }, + { "SanDisk SDP3B", NULL, ATA_QUIRK_NODMA }, + { "SanDisk SDP3B-64", NULL, ATA_QUIRK_NODMA }, + { "SANYO CD-ROM CRD", NULL, ATA_QUIRK_NODMA }, + { "HITACHI CDR-8", NULL, ATA_QUIRK_NODMA }, + { "HITACHI CDR-8[34]35", NULL, ATA_QUIRK_NODMA }, + { "Toshiba CD-ROM XM-6202B", NULL, ATA_QUIRK_NODMA }, + { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_QUIRK_NODMA }, + { "CD-532E-A", NULL, ATA_QUIRK_NODMA }, + { "E-IDE CD-ROM CR-840", NULL, ATA_QUIRK_NODMA }, + { "CD-ROM Drive/F5A", NULL, ATA_QUIRK_NODMA }, + { "WPI CDD-820", NULL, ATA_QUIRK_NODMA }, + { "SAMSUNG CD-ROM SC-148C", NULL, ATA_QUIRK_NODMA }, + { "SAMSUNG CD-ROM SC", NULL, ATA_QUIRK_NODMA }, + { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA }, + { "_NEC DV5800A", NULL, ATA_QUIRK_NODMA }, + { "SAMSUNG CD-ROM SN-124", "N001", ATA_QUIRK_NODMA }, + { "Seagate STT20000A", NULL, ATA_QUIRK_NODMA }, + { " 2GB ATA Flash Disk", "ADMA428M", ATA_QUIRK_NODMA }, + { "VRFDFC22048UCHC-TE*", NULL, ATA_QUIRK_NODMA }, /* Odd clown on sil3726/4726 PMPs */ - { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, + { "Config Disk", NULL, ATA_QUIRK_DISABLE }, /* Similar story with ASMedia 1092 */ - { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE }, + { "ASMT109x- Config", NULL, ATA_QUIRK_DISABLE }, /* Weird ATAPI devices */ - { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, - { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, - { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, - { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_QUIRK_MAX_SEC_128 }, + { "QUANTUM DAT DAT72-000", NULL, ATA_QUIRK_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 }, + { "Slimtype DVD A DS8A9SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 }, /* * Causes silent data corruption with higher max sects. * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com */ - { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + { "ST380013AS", "3.20", ATA_QUIRK_MAX_SEC_1024 }, /* * These devices time out with higher max sects. * https://bugzilla.kernel.org/show_bug.cgi?id=121671 */ - { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, - { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + { "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 }, + { "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 }, /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ /* NCQ is slow */ - { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, - { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ }, + { "WDC WD740ADFD-00", NULL, ATA_QUIRK_NONCQ }, + { "WDC WD740ADFD-00NLR1", NULL, ATA_QUIRK_NONCQ }, /* http://thread.gmane.org/gmane.linux.ide/14907 */ - { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, + { "FUJITSU MHT2060BH", NULL, ATA_QUIRK_NONCQ }, /* NCQ is broken */ - { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, - { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, - { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, - { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, - { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, + { "Maxtor *", "BANC*", ATA_QUIRK_NONCQ }, + { "Maxtor 7V300F0", "VA111630", ATA_QUIRK_NONCQ }, + { "ST380817AS", "3.42", ATA_QUIRK_NONCQ }, + { "ST3160023AS", "3.42", ATA_QUIRK_NONCQ }, + { "OCZ CORE_SSD", "02.10104", ATA_QUIRK_NONCQ }, /* Seagate NCQ + FLUSH CACHE firmware bug */ - { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | - ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31500341AS", "SD1[5-9]", ATA_QUIRK_NONCQ | + ATA_QUIRK_FIRMWARE_WARN }, - { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | - ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD1[5-9]", ATA_QUIRK_NONCQ | + ATA_QUIRK_FIRMWARE_WARN }, - { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | - ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640[36]23AS", "SD1[5-9]", ATA_QUIRK_NONCQ | + ATA_QUIRK_FIRMWARE_WARN }, - { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | - ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ | + ATA_QUIRK_FIRMWARE_WARN }, /* drives which fail FPDMA_AA activation (some may freeze afterwards) the ST disks also have LPM issues */ - { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA | - ATA_HORKAGE_NOLPM }, - { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA | + ATA_QUIRK_NOLPM }, + { "VB0250EAVER", "HPG7", ATA_QUIRK_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ - { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ }, - { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ }, - { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ }, + { "HTS541060G9SA00", "MB3OC60D", ATA_QUIRK_NONCQ }, + { "HTS541080G9SA00", "MB4OC60D", ATA_QUIRK_NONCQ }, + { "HTS541010G9SA00", "MBZOC60D", ATA_QUIRK_NONCQ }, /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ - { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ }, + { "C300-CTFDDAC128MAG", "0001", ATA_QUIRK_NONCQ }, /* Sandisk SD7/8/9s lock up hard on large trims */ - { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M }, + { "SanDisk SD[789]*", NULL, ATA_QUIRK_MAX_TRIM_128M }, /* devices which puke on READ_NATIVE_MAX */ - { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA }, - { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, - { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, - { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, + { "HDS724040KLSA80", "KFAOA20N", ATA_QUIRK_BROKEN_HPA }, + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA }, + { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA }, + { "MAXTOR 6L080L4", "A93.0500", ATA_QUIRK_BROKEN_HPA }, /* this one allows HPA unlocking but fails IOs on the area */ - { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, + { "OCZ-VERTEX", "1.30", ATA_QUIRK_BROKEN_HPA }, /* Devices which report 1 sector over size HPA */ - { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE }, - { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE }, - { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE }, + { "ST340823A", NULL, ATA_QUIRK_HPA_SIZE }, + { "ST320413A", NULL, ATA_QUIRK_HPA_SIZE }, + { "ST310211A", NULL, ATA_QUIRK_HPA_SIZE }, /* Devices which get the IVB wrong */ - { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB }, - /* Maybe we should just blacklist TSSTcorp... */ - { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB }, + { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB }, + /* Maybe we should just add all TSSTcorp devices... */ + { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_QUIRK_IVB }, /* Devices that do not need bridging limits applied */ - { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK }, - { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK }, + { "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK }, + { "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK }, /* Devices which aren't very happy with higher link speeds */ - { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS }, - { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS }, + { "WD My Book", NULL, ATA_QUIRK_1_5_GBPS }, + { "Seagate FreeAgent GoFlex", NULL, ATA_QUIRK_1_5_GBPS }, /* * Devices which choke on SETXFER. Applies only if both the * device and controller are SATA. */ - { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, - { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, - { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, - { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, - { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVRTD08", NULL, ATA_QUIRK_NOSETXFER }, + { "PIONEER DVD-RW DVRTD08A", NULL, ATA_QUIRK_NOSETXFER }, + { "PIONEER DVD-RW DVR-215", NULL, ATA_QUIRK_NOSETXFER }, + { "PIONEER DVD-RW DVR-212D", NULL, ATA_QUIRK_NOSETXFER }, + { "PIONEER DVD-RW DVR-216D", NULL, ATA_QUIRK_NOSETXFER }, /* These specific Pioneer models have LPM issues */ - { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM }, - { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, + { "PIONEER BD-RW BDR-207M", NULL, ATA_QUIRK_NOLPM }, + { "PIONEER BD-RW BDR-205", NULL, ATA_QUIRK_NOLPM }, /* Crucial devices with broken LPM support */ - { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM }, + { "CT*0BX*00SSD1", NULL, ATA_QUIRK_NOLPM }, /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ - { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NOLPM }, + { "Crucial_CT512MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NOLPM }, /* 512GB MX100 with newer firmware has only LPM issues */ - { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NOLPM }, + { "Crucial_CT512MX100*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NOLPM }, /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ - { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NOLPM }, - { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NOLPM }, + { "Crucial_CT480M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NOLPM }, + { "Crucial_CT960M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NOLPM }, /* AMD Radeon devices with broken LPM support */ - { "R3SL240G", NULL, ATA_HORKAGE_NOLPM }, + { "R3SL240G", NULL, ATA_QUIRK_NOLPM }, /* Apacer models with LPM issues */ - { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM }, + { "Apacer AS340*", NULL, ATA_QUIRK_NOLPM }, /* These specific Samsung models/firmware-revs do not handle LPM well */ - { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM }, - { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM }, - { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM }, - { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM }, + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM }, + { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM }, + { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_QUIRK_NOLPM }, + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM }, /* devices that don't properly handle queued TRIM commands */ - { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_NO_DMA_LOG | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NO_NCQ_ON_ATI }, - { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NO_NCQ_ON_ATI }, - { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NO_NCQ_ON_ATI, }, - { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM }, + { "Micron_M500IT_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Micron_M500_*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Micron_M5[15]0_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Micron_1100_*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM, }, + { "Crucial_CT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Crucial_CT*M550*", "MU01", ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Crucial_CT*MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Samsung SSD 840 EVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_NO_DMA_LOG | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Samsung SSD 840*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NO_NCQ_ON_ATI }, + { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NO_NCQ_ON_ATI }, + { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM | + ATA_QUIRK_NO_NCQ_ON_ATI, }, + { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | + ATA_QUIRK_ZERO_AFTER_TRIM }, /* devices that don't properly handle TRIM commands */ - { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM }, - { "M88V29*", NULL, ATA_HORKAGE_NOTRIM }, + { "SuperSSpeed S238*", NULL, ATA_QUIRK_NOTRIM }, + { "M88V29*", NULL, ATA_QUIRK_NOTRIM }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT @@ -4223,14 +4174,14 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { */ { "INTEL*SSDSC2MH*", NULL, 0 }, - { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, - { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM }, + { "Micron*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Crucial*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "INTEL*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "SSD*INTEL*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "Samsung*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "SAMSUNG*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "SAMSUNG*MZ7KM*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, + { "ST[1248][0248]0[FH]*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM }, /* * Some WD SATA-I drives spin up and down erratically when the link @@ -4241,62 +4192,66 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * * https://bugzilla.kernel.org/show_bug.cgi?id=57211 */ - { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, - { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD800JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD1200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD1600JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD2000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD2500JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD3000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, + { "WDC WD3200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM }, /* * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY * log page is accessed. Ensure we never ask for this log page with * these devices. */ - { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR }, + { "SATADOM-ML 3ME", NULL, ATA_QUIRK_NO_LOG_DIR }, /* Buggy FUA */ - { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA }, - { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA }, - { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA }, - { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA }, + { "Maxtor", "BANC1G10", ATA_QUIRK_NO_FUA }, + { "WDC*WD2500J*", NULL, ATA_QUIRK_NO_FUA }, + { "OCZ-VERTEX*", NULL, ATA_QUIRK_NO_FUA }, + { "INTEL*SSDSC2CT*", NULL, ATA_QUIRK_NO_FUA }, /* End Marker */ { } }; -static unsigned long ata_dev_blacklisted(const struct ata_device *dev) +static unsigned int ata_dev_quirks(const struct ata_device *dev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; - const struct ata_blacklist_entry *ad = ata_device_blacklist; + const struct ata_dev_quirks_entry *ad = __ata_dev_quirks; + + /* dev->quirks is an unsigned int. */ + BUILD_BUG_ON(__ATA_QUIRK_MAX > 32); ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); while (ad->model_num) { - if (glob_match(ad->model_num, model_num)) { - if (ad->model_rev == NULL) - return ad->horkage; - if (glob_match(ad->model_rev, model_rev)) - return ad->horkage; + if (glob_match(ad->model_num, model_num) && + (!ad->model_rev || glob_match(ad->model_rev, model_rev))) { + ata_dev_print_quirks(dev, model_num, model_rev, + ad->quirks); + return ad->quirks; } ad++; } return 0; } -static int ata_dma_blacklisted(const struct ata_device *dev) +static bool ata_dev_nodma(const struct ata_device *dev) { - /* We don't support polling DMA. - * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) - * if the LLDD handles only interrupts in the HSM_ST_LAST state. + /* + * We do not support polling DMA. Deny DMA for those ATAPI devices + * with CDB-intr (and use PIO) if the LLDD handles only interrupts in + * the HSM_ST_LAST state. */ if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && (dev->flags & ATA_DFLAG_CDB_INTR)) - return 1; - return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; + return true; + return dev->quirks & ATA_QUIRK_NODMA; } /** @@ -4309,7 +4264,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev) static int ata_is_40wire(struct ata_device *dev) { - if (dev->horkage & ATA_HORKAGE_IVB) + if (dev->quirks & ATA_QUIRK_IVB) return ata_drive_40wire_relaxed(dev->id); return ata_drive_40wire(dev->id); } @@ -4371,8 +4326,7 @@ static int cable_is_40wire(struct ata_port *ap) * * Compute supported xfermask of @dev and store it in * dev->*_mask. This function is responsible for applying all - * known limits including host controller limits, device - * blacklist, etc... + * known limits including host controller limits, device quirks, etc... * * LOCKING: * None. @@ -4404,10 +4358,10 @@ static void ata_dev_xfermask(struct ata_device *dev) xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); } - if (ata_dma_blacklisted(dev)) { + if (ata_dev_nodma(dev)) { xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); ata_dev_warn(dev, - "device is on DMA blacklist, disabling DMA\n"); + "device does not support DMA, disabling DMA\n"); } if ((host->flags & ATA_HOST_SIMPLEX) && @@ -4588,7 +4542,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc) /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a * few ATAPI devices choke on such DMA requests. */ - if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && + if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) && unlikely(qc->nbytes & 15)) return 1; @@ -4629,12 +4583,6 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_std_qc_defer); -enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) -{ - return AC_ERR_OK; -} -EXPORT_SYMBOL_GPL(ata_noop_qc_prep); - /** * ata_sg_init - Associate command with scatter-gather table. * @qc: Command to be associated @@ -4762,8 +4710,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ - WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); + if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE))) + return; + ap = qc->ap; link = qc->dev->link; @@ -4785,9 +4734,10 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) ap->excl_link == link)) ap->excl_link = NULL; - /* atapi: mark qc as inactive to prevent the interrupt handler - * from completing the command twice later, before the error handler - * is called. (when rc != 0 and atapi request sense is needed) + /* + * Mark qc as inactive to prevent the port interrupt handler from + * completing the command twice later, before the error handler is + * called. */ qc->flags &= ~ATA_QCFLAG_ACTIVE; ap->qc_active &= ~(1ULL << qc->tag); @@ -5021,10 +4971,13 @@ void ata_qc_issue(struct ata_queued_cmd *qc) return; } - trace_ata_qc_prep(qc); - qc->err_mask |= ap->ops->qc_prep(qc); - if (unlikely(qc->err_mask)) - goto err; + if (ap->ops->qc_prep) { + trace_ata_qc_prep(qc); + qc->err_mask |= ap->ops->qc_prep(qc); + if (unlikely(qc->err_mask)) + goto err; + } + trace_ata_qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) @@ -5368,7 +5321,7 @@ void ata_dev_init(struct ata_device *dev) */ spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_INIT_MASK; - dev->horkage = 0; + dev->quirks = 0; spin_unlock_irqrestore(ap->lock, flags); memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, @@ -5510,7 +5463,6 @@ void ata_port_free(struct ata_port *ap) kfree(ap->pmp_link); kfree(ap->slave_link); - kfree(ap->ncq_sense_buf); ida_free(&ata_ida, ap->print_id); kfree(ap); } @@ -6037,6 +5989,23 @@ int ata_host_activate(struct ata_host *host, int irq, } EXPORT_SYMBOL_GPL(ata_host_activate); +/** + * ata_dev_free_resources - Free a device resources + * @dev: Target ATA device + * + * Free resources allocated to support a device features. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_dev_free_resources(struct ata_device *dev) +{ + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + + ata_dev_cleanup_cdl_resources(dev); +} + /** * ata_port_detach - Detach ATA port in preparation of device removal * @ap: ATA port to be detached @@ -6091,19 +6060,15 @@ static void ata_port_detach(struct ata_port *ap) cancel_delayed_work_sync(&ap->hotplug_task); cancel_delayed_work_sync(&ap->scsi_rescan_task); - /* clean up zpodd on port removal */ - ata_for_each_link(link, ap, HOST_FIRST) { - ata_for_each_dev(dev, link, ALL) { - if (zpodd_dev_enabled(dev)) - zpodd_exit(dev); - } - } + /* Delete port multiplier link transport devices */ if (ap->pmp_link) { int i; + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) ata_tlink_delete(&ap->pmp_link[i]); } - /* remove the associated SCSI host */ + + /* Remove the associated SCSI host */ scsi_remove_host(ap->scsi_host); ata_tport_delete(ap); } @@ -6299,12 +6264,12 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one); { "no" #name, .lflags_on = (flags) }, \ { #name, .lflags_off = (flags) } -#define force_horkage_on(name, flag) \ - { #name, .horkage_on = (flag) } +#define force_quirk_on(name, flag) \ + { #name, .quirk_on = (flag) } -#define force_horkage_onoff(name, flag) \ - { "no" #name, .horkage_on = (flag) }, \ - { #name, .horkage_off = (flag) } +#define force_quirk_onoff(name, flag) \ + { "no" #name, .quirk_on = (flag) }, \ + { #name, .quirk_off = (flag) } static const struct ata_force_param force_tbl[] __initconst = { force_cbl(40c, ATA_CBL_PATA40), @@ -6358,32 +6323,32 @@ static const struct ata_force_param force_tbl[] __initconst = { force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE), force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY), - force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ), - force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM), - force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI), + force_quirk_onoff(ncq, ATA_QUIRK_NONCQ), + force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM), + force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI), - force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM), - force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM), - force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M), + force_quirk_onoff(trim, ATA_QUIRK_NOTRIM), + force_quirk_on(trim_zero, ATA_QUIRK_ZERO_AFTER_TRIM), + force_quirk_on(max_trim_128m, ATA_QUIRK_MAX_TRIM_128M), - force_horkage_onoff(dma, ATA_HORKAGE_NODMA), - force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR), - force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA), + force_quirk_onoff(dma, ATA_QUIRK_NODMA), + force_quirk_on(atapi_dmadir, ATA_QUIRK_ATAPI_DMADIR), + force_quirk_on(atapi_mod16_dma, ATA_QUIRK_ATAPI_MOD16_DMA), - force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG), - force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG), - force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR), + force_quirk_onoff(dmalog, ATA_QUIRK_NO_DMA_LOG), + force_quirk_onoff(iddevlog, ATA_QUIRK_NO_ID_DEV_LOG), + force_quirk_onoff(logdir, ATA_QUIRK_NO_LOG_DIR), - force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128), - force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024), - force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48), + force_quirk_on(max_sec_128, ATA_QUIRK_MAX_SEC_128), + force_quirk_on(max_sec_1024, ATA_QUIRK_MAX_SEC_1024), + force_quirk_on(max_sec_lba48, ATA_QUIRK_MAX_SEC_LBA48), - force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM), - force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER), - force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID), - force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA), + force_quirk_onoff(lpm, ATA_QUIRK_NOLPM), + force_quirk_onoff(setxfer, ATA_QUIRK_NOSETXFER), + force_quirk_on(dump_id, ATA_QUIRK_DUMP_ID), + force_quirk_onoff(fua, ATA_QUIRK_NO_FUA), - force_horkage_on(disable, ATA_HORKAGE_DISABLE), + force_quirk_on(disable, ATA_QUIRK_DISABLE), }; static int __init ata_parse_force_one(char **cur, @@ -6659,7 +6624,6 @@ static void ata_dummy_error_handler(struct ata_port *ap) } struct ata_port_operations ata_dummy_port_ops = { - .qc_prep = ata_noop_qc_prep, .qc_issue = ata_dummy_qc_issue, .error_handler = ata_dummy_error_handler, .sched_eh = ata_std_sched_eh, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 214b935c2ced79..3f0144e7dc8042 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -500,10 +500,13 @@ static void ata_eh_dev_disable(struct ata_device *dev) ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); dev->class++; - /* From now till the next successful probe, ering is used to + /* + * From now till the next successful probe, ering is used to * track probe failures. Clear accumulated device error info. */ ata_ering_clear(&dev->ering); + + ata_dev_free_resources(dev); } static void ata_eh_unload(struct ata_port *ap) @@ -630,6 +633,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; + /* + * If the scmd was added to EH, via ata_qc_schedule_eh() -> + * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will + * have set DID_TIME_OUT (since libata does not have an abort + * handler). Thus, to clear DID_TIME_OUT, clear the host byte. + */ + set_host_byte(scmd, DID_OK); + ata_qc_for_each_raw(ap, qc, i) { if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) @@ -1401,6 +1412,43 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) return err_mask; } +/** + * ata_eh_decide_disposition - Disposition a qc based on sense data + * @qc: qc to examine + * + * For a regular SCSI command, the SCSI completion callback (scsi_done()) + * will call scsi_complete(), which will call scsi_decide_disposition(), + * which will call scsi_check_sense(). scsi_complete() finally calls + * scsi_finish_command(). This is fine for SCSI, since any eventual sense + * data is usually returned in the completion itself (without invoking SCSI + * EH). However, for a QC, we always need to fetch the sense data + * explicitly using SCSI EH. + * + * A command that is completed via SCSI EH will instead be completed using + * scsi_eh_flush_done_q(), which will call scsi_finish_command() directly + * (without ever calling scsi_check_sense()). + * + * For a command that went through SCSI EH, it is the responsibility of the + * SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how + * scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that + * do not get the sense data as part of the completion. + * + * Thus, for QC commands that went via SCSI EH, we need to call + * scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls + * scsi_decide_disposition(), which calls scsi_check_sense(), in order to + * set the correct SCSI ML byte (if any). + * + * LOCKING: + * EH context. + * + * RETURNS: + * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE + */ +enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc) +{ + return scsi_check_sense(qc->scsicmd); +} + /** * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to @@ -1627,7 +1675,8 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc) } if (qc->flags & ATA_QCFLAG_SENSE_VALID) { - enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); + enum scsi_disposition ret = ata_eh_decide_disposition(qc); + /* * SUCCESS here means that the sense code could be * evaluated and should be passed to the upper layers @@ -1924,7 +1973,7 @@ static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) return qc->flags & ATA_QCFLAG_QUIET; } -static int ata_eh_read_sense_success_non_ncq(struct ata_link *link) +static int ata_eh_get_non_ncq_success_sense(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_queued_cmd *qc; @@ -1942,11 +1991,10 @@ static int ata_eh_read_sense_success_non_ncq(struct ata_link *link) return -EIO; /* - * If we have sense data, call scsi_check_sense() in order to set the - * correct SCSI ML byte (if any). No point in checking the return value, - * since the command has already completed successfully. + * No point in checking the return value, since the command has already + * completed successfully. */ - scsi_check_sense(qc->scsicmd); + ata_eh_decide_disposition(qc); return 0; } @@ -1976,9 +2024,9 @@ static void ata_eh_get_success_sense(struct ata_link *link) * request sense ext command to retrieve the sense data. */ if (link->sactive) - ret = ata_eh_read_sense_success_ncq_log(link); + ret = ata_eh_get_ncq_success_sense(link); else - ret = ata_eh_read_sense_success_non_ncq(link); + ret = ata_eh_get_non_ncq_success_sense(link); if (ret) goto out; @@ -3247,7 +3295,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev) int i; for (i = 0; i < ATA_EH_UA_TRIES; i++) { - u8 *sense_buffer = dev->link->ap->sector_buf; + u8 *sense_buffer = dev->sector_buf; u8 sense_key = 0; unsigned int err_mask; diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index e2e9cbd405fa05..d5d189328ae637 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -648,8 +648,7 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr) static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class) { struct ata_link *link = dev->link; - struct ata_port *ap = link->ap; - u32 *gscr = (void *)ap->sector_buf; + u32 *gscr = (void *)dev->sector_buf; int rc; ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE); diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 48660d44560250..9c76fb1ad2ec50 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "libata.h" #include "libata-transport.h" @@ -517,6 +517,86 @@ int sata_set_spd(struct ata_link *link) } EXPORT_SYMBOL_GPL(sata_set_spd); +/** + * sata_down_spd_limit - adjust SATA spd limit downward + * @link: Link to adjust SATA spd limit for + * @spd_limit: Additional limit + * + * Adjust SATA spd limit of @link downward. Note that this + * function only adjusts the limit. The change must be applied + * using sata_set_spd(). + * + * If @spd_limit is non-zero, the speed is limited to equal to or + * lower than @spd_limit if such speed is supported. If + * @spd_limit is slower than any supported speed, only the lowest + * supported speed is allowed. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 0 on success, negative errno on failure + */ +int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) +{ + u32 sstatus, spd, mask; + int rc, bit; + + if (!sata_scr_valid(link)) + return -EOPNOTSUPP; + + /* If SCR can be read, use it to determine the current SPD. + * If not, use cached value in link->sata_spd. + */ + rc = sata_scr_read(link, SCR_STATUS, &sstatus); + if (rc == 0 && ata_sstatus_online(sstatus)) + spd = (sstatus >> 4) & 0xf; + else + spd = link->sata_spd; + + mask = link->sata_spd_limit; + if (mask <= 1) + return -EINVAL; + + /* unconditionally mask off the highest bit */ + bit = fls(mask) - 1; + mask &= ~(1 << bit); + + /* + * Mask off all speeds higher than or equal to the current one. At + * this point, if current SPD is not available and we previously + * recorded the link speed from SStatus, the driver has already + * masked off the highest bit so mask should already be 1 or 0. + * Otherwise, we should not force 1.5Gbps on a link where we have + * not previously recorded speed from SStatus. Just return in this + * case. + */ + if (spd > 1) + mask &= (1 << (spd - 1)) - 1; + else if (link->sata_spd) + return -EINVAL; + + /* were we already at the bottom? */ + if (!mask) + return -EINVAL; + + if (spd_limit) { + if (mask & ((1 << spd_limit) - 1)) + mask &= (1 << spd_limit) - 1; + else { + bit = ffs(mask) - 1; + mask = 1 << bit; + } + } + + link->sata_spd_limit = mask; + + ata_link_warn(link, "limiting SATA link speed to %s\n", + sata_spd_string(fls(mask))); + + return 0; +} + /** * sata_link_hardreset - reset link via SATA phy reset * @link: link to reset @@ -626,6 +706,34 @@ int sata_link_hardreset(struct ata_link *link, const unsigned int *timing, } EXPORT_SYMBOL_GPL(sata_link_hardreset); +/** + * sata_std_hardreset - COMRESET w/o waiting or classification + * @link: link to reset + * @class: resulting class of attached device + * @deadline: deadline jiffies for the operation + * + * Standard SATA COMRESET w/o waiting or classification. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 if link offline, -EAGAIN if link online, -errno on errors. + */ +int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context); + bool online; + int rc; + + rc = sata_link_hardreset(link, timing, deadline, &online, NULL); + if (online) + return -EAGAIN; + return rc; +} +EXPORT_SYMBOL_GPL(sata_std_hardreset); + /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question @@ -818,7 +926,7 @@ static ssize_t ata_scsi_lpm_store(struct device *device, ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, &ap->link, ENABLED) { - if (dev->horkage & ATA_HORKAGE_NOLPM) { + if (dev->quirks & ATA_QUIRK_NOLPM) { count = -EOPNOTSUPP; goto out_unlock; } @@ -1340,7 +1448,7 @@ EXPORT_SYMBOL_GPL(sata_async_notification); static int ata_eh_read_log_10h(struct ata_device *dev, int *tag, struct ata_taskfile *tf) { - u8 *buf = dev->link->ap->sector_buf; + u8 *buf = dev->sector_buf; unsigned int err_mask; u8 csum; int i; @@ -1379,8 +1487,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, } /** - * ata_eh_read_sense_success_ncq_log - Read the sense data for successful - * NCQ commands log + * ata_eh_get_ncq_success_sense - Read and process the sense data for + * successful NCQ commands log page * @link: ATA link to get sense data for * * Read the sense data for successful NCQ commands log page to obtain @@ -1393,11 +1501,11 @@ static int ata_eh_read_log_10h(struct ata_device *dev, * RETURNS: * 0 on success, -errno otherwise. */ -int ata_eh_read_sense_success_ncq_log(struct ata_link *link) +int ata_eh_get_ncq_success_sense(struct ata_link *link) { struct ata_device *dev = link->device; struct ata_port *ap = dev->link->ap; - u8 *buf = ap->ncq_sense_buf; + u8 *buf = dev->cdl->ncq_sense_log_buf; struct ata_queued_cmd *qc; unsigned int err_mask, tag; u8 *sense, sk = 0, asc = 0, ascq = 0; @@ -1455,17 +1563,14 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link) qc->flags |= ATA_QCFLAG_SENSE_VALID; /* - * If we have sense data, call scsi_check_sense() in order to - * set the correct SCSI ML byte (if any). No point in checking - * the return value, since the command has already completed - * successfully. + * No point in checking the return value, since the command has + * already completed successfully. */ - scsi_check_sense(qc->scsicmd); + ata_eh_decide_disposition(qc); } return ret; } -EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log); /** * ata_eh_analyze_ncq_error - analyze NCQ error @@ -1576,3 +1681,11 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) ehc->i.err_mask &= ~AC_ERR_DEV; } EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); + +const struct ata_port_operations sata_port_ops = { + .inherits = &ata_base_port_ops, + + .qc_defer = ata_std_qc_defer, + .hardreset = sata_std_hardreset, +}; +EXPORT_SYMBOL_GPL(sata_port_ops); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 473e00a58a8b0c..f915e3df57a9a0 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -1691,9 +1691,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); } else if (is_error && !have_sense) { ata_gen_ata_sense(qc); - } else { - /* Keep the SCSI ML and status byte, clear host byte. */ - cmd->result &= 0x0000ffff; } ata_qc_done(qc); @@ -2094,7 +2091,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) if (ata_id_has_trim(args->id)) { u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; - if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M) max_blocks = 128 << (20 - SECTOR_SHIFT); put_unaligned_be64(max_blocks, &rbuf[36]); @@ -2259,10 +2256,15 @@ static inline u16 ata_xlat_cdl_limit(u8 *buf) static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf, u8 spg) { - u8 *b, *cdl = dev->cdl, *desc; + u8 *b, *cdl, *desc; u32 policy; int i; + if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl) + return 0; + + cdl = dev->cdl->desc_log_buf; + /* * Fill the subpage. The first four bytes of the T2A/T2B mode pages * are a header. The PAGE LENGTH field is the size of the page @@ -2359,7 +2361,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, case ALL_SUB_MPAGES: n = ata_msense_control_spg0(dev, buf, changeable); n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); - n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE); n += ata_msense_control_ata_feature(dev, buf + n); return n; default: @@ -2572,11 +2574,11 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[15] = lowest_aligned; if (ata_id_has_trim(args->id) && - !(dev->horkage & ATA_HORKAGE_NOTRIM)) { + !(dev->quirks & ATA_QUIRK_NOTRIM)) { rbuf[14] |= 0x80; /* LBPME */ if (ata_id_has_zero_after_trim(args->id) && - dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { + dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) { ata_dev_info(dev, "Enabling discard_zeroes_data\n"); rbuf[14] |= 0x40; /* LBPRZ */ } @@ -3240,8 +3242,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) } scsi_16_lba_len(cdb, &block, &n_block); - if (!unmap || - (dev->horkage & ATA_HORKAGE_NOTRIM) || + if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) || !ata_id_has_trim(dev->id)) { fp = 1; bp = 3; @@ -4617,16 +4618,15 @@ static void ata_scsi_handle_link_detach(struct ata_link *link) ata_for_each_dev(dev, link, ALL) { unsigned long flags; - if (!(dev->flags & ATA_DFLAG_DETACHED)) + spin_lock_irqsave(ap->lock, flags); + if (!(dev->flags & ATA_DFLAG_DETACHED)) { + spin_unlock_irqrestore(ap->lock, flags); continue; + } - spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_DETACHED; spin_unlock_irqrestore(ap->lock, flags); - if (zpodd_dev_enabled(dev)) - zpodd_exit(dev); - ata_scsi_remove_dev(dev); } } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 250f7dae05fd63..67f277e1c3bf31 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -26,7 +26,6 @@ static struct workqueue_struct *ata_sff_wq; const struct ata_port_operations ata_sff_port_ops = { .inherits = &ata_base_port_ops, - .qc_prep = ata_noop_qc_prep, .qc_issue = ata_sff_qc_issue, .qc_fill_rtf = ata_sff_qc_fill_rtf, @@ -970,7 +969,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, * We ignore ERR here to workaround and proceed sending * the CDB. */ - if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { + if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) { ata_ehi_push_desc(ehi, "ST_FIRST: " "DRQ=1 with device error, " "dev_stat 0x%X", status); @@ -1045,8 +1044,8 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, * IDENTIFY, it's likely a phantom * device. Mark hint. */ - if (qc->dev->horkage & - ATA_HORKAGE_DIAGNOSTIC) + if (qc->dev->quirks & + ATA_QUIRK_DIAGNOSTIC) qc->err_mask |= AC_ERR_NODEV_HINT; } else { @@ -1762,7 +1761,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, /* see if device passed diags: continue and warn later */ if (err == 0) /* diagnostic fail : do nothing _YET_ */ - dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; + dev->quirks |= ATA_QUIRK_DIAGNOSTIC; else if (err == 1) /* do nothing */ ; else if ((dev->devno == 0) && (err == 0x81)) @@ -1781,7 +1780,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, * device signature is invalid with diagnostic * failure. */ - if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) + if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC)) class = ATA_DEV_ATA; else class = ATA_DEV_NONE; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 9e24c33388f968..e898be49df6b54 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -80,12 +80,6 @@ struct ata_internal { #define transport_class_to_port(dev) \ tdev_to_port((dev)->parent) - -/* Device objects are always created whit link objects */ -static int ata_tdev_add(struct ata_device *dev); -static void ata_tdev_delete(struct ata_device *dev); - - /* * Hack to allow attributes of the same name in different objects. */ @@ -364,135 +358,6 @@ unsigned int ata_port_classify(struct ata_port *ap, } EXPORT_SYMBOL_GPL(ata_port_classify); -/* - * ATA link attributes - */ -static int noop(int x) { return x; } - -#define ata_link_show_linkspeed(field, format) \ -static ssize_t \ -show_ata_link_##field(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct ata_link *link = transport_class_to_link(dev); \ - \ - return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \ -} - -#define ata_link_linkspeed_attr(field, format) \ - ata_link_show_linkspeed(field, format) \ -static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL) - -ata_link_linkspeed_attr(hw_sata_spd_limit, fls); -ata_link_linkspeed_attr(sata_spd_limit, fls); -ata_link_linkspeed_attr(sata_spd, noop); - - -static DECLARE_TRANSPORT_CLASS(ata_link_class, - "ata_link", NULL, NULL, NULL); - -static void ata_tlink_release(struct device *dev) -{ -} - -/** - * ata_is_link -- check if a struct device represents a ATA link - * @dev: device to check - * - * Returns: - * %1 if the device represents a ATA link, %0 else - */ -static int ata_is_link(const struct device *dev) -{ - return dev->release == ata_tlink_release; -} - -static int ata_tlink_match(struct attribute_container *cont, - struct device *dev) -{ - struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); - if (!ata_is_link(dev)) - return 0; - return &i->link_attr_cont.ac == cont; -} - -/** - * ata_tlink_delete -- remove ATA LINK - * @link: ATA LINK to remove - * - * Removes the specified ATA LINK. remove associated ATA device(s) as well. - */ -void ata_tlink_delete(struct ata_link *link) -{ - struct device *dev = &link->tdev; - struct ata_device *ata_dev; - - ata_for_each_dev(ata_dev, link, ALL) { - ata_tdev_delete(ata_dev); - } - - transport_remove_device(dev); - device_del(dev); - transport_destroy_device(dev); - put_device(dev); -} - -/** - * ata_tlink_add -- initialize a transport ATA link structure - * @link: allocated ata_link structure. - * - * Initialize an ATA LINK structure for sysfs. It will be added in the - * device tree below the ATA PORT it belongs to. - * - * Returns %0 on success - */ -int ata_tlink_add(struct ata_link *link) -{ - struct device *dev = &link->tdev; - struct ata_port *ap = link->ap; - struct ata_device *ata_dev; - int error; - - device_initialize(dev); - dev->parent = &ap->tdev; - dev->release = ata_tlink_release; - if (ata_is_host_link(link)) - dev_set_name(dev, "link%d", ap->print_id); - else - dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); - - transport_setup_device(dev); - - error = device_add(dev); - if (error) { - goto tlink_err; - } - - error = transport_add_device(dev); - if (error) - goto tlink_transport_err; - transport_configure_device(dev); - - ata_for_each_dev(ata_dev, link, ALL) { - error = ata_tdev_add(ata_dev); - if (error) { - goto tlink_dev_err; - } - } - return 0; - tlink_dev_err: - while (--ata_dev >= link->device) { - ata_tdev_delete(ata_dev); - } - transport_remove_device(dev); - tlink_transport_err: - device_del(dev); - tlink_err: - transport_destroy_device(dev); - put_device(dev); - return error; -} - /* * ATA device attributes */ @@ -617,10 +482,10 @@ show_ata_dev_trim(struct device *dev, if (!ata_id_has_trim(ata_dev->id)) mode = "unsupported"; - else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) + else if (ata_dev->quirks & ATA_QUIRK_NOTRIM) mode = "forced_unsupported"; - else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) - mode = "forced_unqueued"; + else if (ata_dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) + mode = "forced_unqueued"; else if (ata_fpdma_dsm_supported(ata_dev)) mode = "queued"; else @@ -643,9 +508,9 @@ static void ata_tdev_release(struct device *dev) * @dev: device to check * * Returns: - * %1 if the device represents a ATA device, %0 else + * true if the device represents a ATA device, false otherwise */ -static int ata_is_ata_dev(const struct device *dev) +static bool ata_is_ata_dev(const struct device *dev) { return dev->release == ata_tdev_release; } @@ -653,21 +518,22 @@ static int ata_is_ata_dev(const struct device *dev) static int ata_tdev_match(struct attribute_container *cont, struct device *dev) { - struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); + struct ata_internal *i = to_ata_internal(ata_scsi_transport_template); + if (!ata_is_ata_dev(dev)) return 0; return &i->dev_attr_cont.ac == cont; } /** - * ata_tdev_free -- free a ATA LINK - * @dev: ATA PHY to free + * ata_tdev_free -- free an ATA transport device + * @dev: struct ata_device owning the transport device to free * - * Frees the specified ATA PHY. + * Free the ATA transport device for the specified ATA device. * * Note: - * This function must only be called on a PHY that has not - * successfully been added using ata_tdev_add(). + * This function must only be called for a ATA transport device that has not + * yet successfully been added using ata_tdev_add(). */ static void ata_tdev_free(struct ata_device *dev) { @@ -676,10 +542,10 @@ static void ata_tdev_free(struct ata_device *dev) } /** - * ata_tdev_delete -- remove ATA device - * @ata_dev: ATA device to remove + * ata_tdev_delete -- remove an ATA transport device + * @ata_dev: struct ata_device owning the transport device to delete * - * Removes the specified ATA device. + * Removes the ATA transport device for the specified ATA device. */ static void ata_tdev_delete(struct ata_device *ata_dev) { @@ -690,15 +556,14 @@ static void ata_tdev_delete(struct ata_device *ata_dev) ata_tdev_free(ata_dev); } - /** - * ata_tdev_add -- initialize a transport ATA device structure. - * @ata_dev: ata_dev structure. + * ata_tdev_add -- initialize an ATA transport device + * @ata_dev: struct ata_device owning the transport device to add * - * Initialize an ATA device structure for sysfs. It will be added in the - * device tree below the ATA LINK device it belongs to. + * Initialize an ATA transport device for sysfs. It will be added in the + * device tree below the ATA link device it belongs to. * - * Returns %0 on success + * Returns %0 on success and a negative error code on error. */ static int ata_tdev_add(struct ata_device *ata_dev) { @@ -734,6 +599,136 @@ static int ata_tdev_add(struct ata_device *ata_dev) return 0; } +/* + * ATA link attributes + */ +static int noop(int x) +{ + return x; +} + +#define ata_link_show_linkspeed(field, format) \ +static ssize_t \ +show_ata_link_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_link *link = transport_class_to_link(dev); \ + \ + return sprintf(buf, "%s\n", \ + sata_spd_string(format(link->field))); \ +} + +#define ata_link_linkspeed_attr(field, format) \ + ata_link_show_linkspeed(field, format) \ +static DEVICE_ATTR(field, 0444, show_ata_link_##field, NULL) + +ata_link_linkspeed_attr(hw_sata_spd_limit, fls); +ata_link_linkspeed_attr(sata_spd_limit, fls); +ata_link_linkspeed_attr(sata_spd, noop); + +static DECLARE_TRANSPORT_CLASS(ata_link_class, + "ata_link", NULL, NULL, NULL); + +static void ata_tlink_release(struct device *dev) +{ +} + +/** + * ata_is_link -- check if a struct device represents a ATA link + * @dev: device to check + * + * Returns: + * true if the device represents a ATA link, false otherwise + */ +static bool ata_is_link(const struct device *dev) +{ + return dev->release == ata_tlink_release; +} + +static int ata_tlink_match(struct attribute_container *cont, + struct device *dev) +{ + struct ata_internal *i = to_ata_internal(ata_scsi_transport_template); + + if (!ata_is_link(dev)) + return 0; + return &i->link_attr_cont.ac == cont; +} + +/** + * ata_tlink_delete -- remove an ATA link transport device + * @link: struct ata_link owning the link transport device to remove + * + * Removes the link transport device of the specified ATA link. This also + * removes the ATA device(s) associated with the link as well. + */ +void ata_tlink_delete(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_device *ata_dev; + + ata_for_each_dev(ata_dev, link, ALL) { + ata_tdev_delete(ata_dev); + } + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} + +/** + * ata_tlink_add -- initialize an ATA link transport device + * @link: struct ata_link owning the link transport device to initialize + * + * Initialize an ATA link transport device for sysfs. It will be added in the + * device tree below the ATA port it belongs to. + * + * Returns %0 on success and a negative error code on error. + */ +int ata_tlink_add(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_port *ap = link->ap; + struct ata_device *ata_dev; + int error; + + device_initialize(dev); + dev->parent = &ap->tdev; + dev->release = ata_tlink_release; + if (ata_is_host_link(link)) + dev_set_name(dev, "link%d", ap->print_id); + else + dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); + + transport_setup_device(dev); + + error = device_add(dev); + if (error) + goto tlink_err; + + error = transport_add_device(dev); + if (error) + goto tlink_transport_err; + transport_configure_device(dev); + + ata_for_each_dev(ata_dev, link, ALL) { + error = ata_tdev_add(ata_dev); + if (error) + goto tlink_dev_err; + } + return 0; + tlink_dev_err: + while (--ata_dev >= link->device) + ata_tdev_delete(ata_dev); + transport_remove_device(dev); + tlink_transport_err: + device_del(dev); + tlink_err: + transport_destroy_device(dev); + put_device(dev); + return error; +} /* * Setup / Teardown code diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index eefda51f97d351..4b83b517caec66 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -112,7 +112,7 @@ static bool zpready(struct ata_device *dev) if (!ret || sense_key != NOT_READY) return false; - sense_buf = dev->link->ap->sector_buf; + sense_buf = dev->sector_buf; ret = atapi_eh_request_sense(dev, sense_buf, sense_key); if (ret) return false; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6abf265f626efe..0337be4faec7c4 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -38,6 +38,12 @@ extern int libata_noacpi; extern int libata_allow_tpm; extern const struct device_type ata_port_type; extern struct ata_link *ata_dev_phys_link(struct ata_device *dev); + +static inline bool ata_sstatus_online(u32 sstatus) +{ + return (sstatus & 0xf) == 0x3; +} + #ifdef CONFIG_ATA_FORCE extern void ata_force_cbl(struct ata_port *ap); #else @@ -65,7 +71,7 @@ extern bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf, bool set_active); extern void ata_dev_power_set_standby(struct ata_device *dev); extern void ata_dev_power_set_active(struct ata_device *dev); -extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); +void ata_dev_free_resources(struct ata_device *dev); extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); extern unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action); @@ -84,9 +90,25 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); extern const char *sata_spd_string(unsigned int spd); extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, u8 page, void *buf, unsigned int sectors); +void ata_dev_cleanup_cdl_resources(struct ata_device *dev); #define to_ata_port(d) container_of(d, struct ata_port, tdev) +/* libata-sata.c */ +#ifdef CONFIG_SATA_HOST +int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); +int ata_eh_get_ncq_success_sense(struct ata_link *link); +#else +static inline int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) +{ + return -EOPNOTSUPP; +} +static inline int ata_eh_get_ncq_success_sense(struct ata_link *link) +{ + return -EOPNOTSUPP; +} +#endif + /* libata-acpi.c */ #ifdef CONFIG_ATA_ACPI extern unsigned int ata_acpi_gtf_filter; @@ -124,7 +146,6 @@ extern void ata_scsi_set_sense_information(struct ata_device *dev, const struct ata_taskfile *tf); extern void ata_scsi_media_change_notify(struct ata_device *dev); extern void ata_scsi_hotplug(struct work_struct *work); -extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, unsigned int id, u64 lun); @@ -162,6 +183,7 @@ extern void ata_eh_finish(struct ata_port *ap); extern int ata_ering_map(struct ata_ering *ering, int (*map_fn)(struct ata_ering_entry *, void *), void *arg); +enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc); extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key); extern unsigned int atapi_eh_request_sense(struct ata_device *dev, u8 *sense_buf, u8 dfl_sense_key); diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 027cf67101ef0e..3163c8d9cef5bc 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -8,9 +8,9 @@ * PIO mode and smarter silicon. * * The practical upshot of this is that we must always tune the - * drive for the right PIO mode. We must also ignore all the blacklists - * and the drive bus mastering DMA information. Also to confuse matters - * further we can do DMA on PIO only drives. + * drive for the right PIO mode and ignore the drive bus mastering DMA + * information. Also to confuse matters further we can do DMA on PIO only + * drives. * * DMA on the 5510 also requires we disable_hlt() during DMA on early * revisions. diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index c84a20892f1b0c..f3f5b2b0ecc9f1 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -44,8 +44,8 @@ #include #include #include +#include -#include #include #define DRV_NAME "ep93xx-ide" @@ -126,7 +126,7 @@ enum { }; struct ep93xx_pata_data { - const struct platform_device *pdev; + struct platform_device *pdev; void __iomem *ide_base; struct ata_timing t; bool iordy; @@ -135,9 +135,7 @@ struct ep93xx_pata_data { unsigned long udma_out_phys; struct dma_chan *dma_rx_channel; - struct ep93xx_dma_data dma_rx_data; struct dma_chan *dma_tx_channel; - struct ep93xx_dma_data dma_tx_data; }; static void ep93xx_pata_clear_regs(void __iomem *base) @@ -637,20 +635,13 @@ static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data) } } -static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param) +static int ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) { - if (ep93xx_dma_chan_is_m2p(chan)) - return false; - - chan->private = filter_param; - return true; -} - -static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) -{ - const struct platform_device *pdev = drv_data->pdev; + struct platform_device *pdev = drv_data->pdev; + struct device *dev = &pdev->dev; dma_cap_mask_t mask; struct dma_slave_config conf; + int ret; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -660,22 +651,16 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) * to request only one channel, and reprogram it's direction at * start of new transfer. */ - drv_data->dma_rx_data.port = EP93XX_DMA_IDE; - drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM; - drv_data->dma_rx_data.name = "ep93xx-pata-rx"; - drv_data->dma_rx_channel = dma_request_channel(mask, - ep93xx_pata_dma_filter, &drv_data->dma_rx_data); - if (!drv_data->dma_rx_channel) - return; - - drv_data->dma_tx_data.port = EP93XX_DMA_IDE; - drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV; - drv_data->dma_tx_data.name = "ep93xx-pata-tx"; - drv_data->dma_tx_channel = dma_request_channel(mask, - ep93xx_pata_dma_filter, &drv_data->dma_tx_data); - if (!drv_data->dma_tx_channel) { - dma_release_channel(drv_data->dma_rx_channel); - return; + drv_data->dma_rx_channel = dma_request_chan(dev, "rx"); + if (IS_ERR(drv_data->dma_rx_channel)) + return dev_err_probe(dev, PTR_ERR(drv_data->dma_rx_channel), + "rx DMA setup failed\n"); + + drv_data->dma_tx_channel = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(drv_data->dma_tx_channel)) { + ret = dev_err_probe(dev, PTR_ERR(drv_data->dma_tx_channel), + "tx DMA setup failed\n"); + goto fail_release_rx; } /* Configure receive channel direction and source address */ @@ -683,10 +668,10 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) conf.direction = DMA_DEV_TO_MEM; conf.src_addr = drv_data->udma_in_phys; conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) { - dev_err(&pdev->dev, "failed to configure rx dma channel\n"); - ep93xx_pata_release_dma(drv_data); - return; + ret = dmaengine_slave_config(drv_data->dma_rx_channel, &conf); + if (ret) { + dev_err_probe(dev, ret, "failed to configure rx dma channel"); + goto fail_release_dma; } /* Configure transmit channel direction and destination address */ @@ -694,10 +679,20 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) conf.direction = DMA_MEM_TO_DEV; conf.dst_addr = drv_data->udma_out_phys; conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) { - dev_err(&pdev->dev, "failed to configure tx dma channel\n"); - ep93xx_pata_release_dma(drv_data); + ret = dmaengine_slave_config(drv_data->dma_tx_channel, &conf); + if (ret) { + dev_err_probe(dev, ret, "failed to configure tx dma channel"); + goto fail_release_dma; } + + return 0; + +fail_release_rx: + dma_release_channel(drv_data->dma_rx_channel); +fail_release_dma: + ep93xx_pata_release_dma(drv_data); + + return ret; } static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc) @@ -884,8 +879,6 @@ static const struct scsi_host_template ep93xx_pata_sht = { static struct ata_port_operations ep93xx_pata_port_ops = { .inherits = &ata_bmdma_port_ops, - .qc_prep = ata_noop_qc_prep, - .softreset = ep93xx_pata_softreset, .hardreset = ATA_OP_NULL, @@ -927,34 +920,26 @@ static int ep93xx_pata_probe(struct platform_device *pdev) void __iomem *ide_base; int err; - err = ep93xx_ide_acquire_gpio(pdev); - if (err) - return err; - /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = irq; - goto err_rel_gpio; - } + if (irq < 0) + return irq; ide_base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); - if (IS_ERR(ide_base)) { - err = PTR_ERR(ide_base); - goto err_rel_gpio; - } + if (IS_ERR(ide_base)) + return PTR_ERR(ide_base); drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL); - if (!drv_data) { - err = -ENOMEM; - goto err_rel_gpio; - } + if (!drv_data) + return -ENOMEM; drv_data->pdev = pdev; drv_data->ide_base = ide_base; drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN; drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT; - ep93xx_pata_dma_init(drv_data); + err = ep93xx_pata_dma_init(drv_data); + if (err) + return err; /* allocate host */ host = ata_host_alloc(&pdev->dev, 1); @@ -1005,8 +990,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev) err_rel_dma: ep93xx_pata_release_dma(drv_data); -err_rel_gpio: - ep93xx_ide_release_gpio(pdev); return err; } @@ -1018,12 +1001,18 @@ static void ep93xx_pata_remove(struct platform_device *pdev) ata_host_detach(host); ep93xx_pata_release_dma(drv_data); ep93xx_pata_clear_regs(drv_data->ide_base); - ep93xx_ide_release_gpio(pdev); } +static const struct of_device_id ep93xx_pata_of_ids[] = { + { .compatible = "cirrus,ep9312-pata" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ep93xx_pata_of_ids); + static struct platform_driver ep93xx_pata_platform_driver = { .driver = { .name = DRV_NAME, + .of_match_table = ep93xx_pata_of_ids, }, .probe = ep93xx_pata_probe, .remove_new = ep93xx_pata_remove, diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 4d6ef90ccc774b..73a9a5109238b3 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -549,6 +549,7 @@ static const struct of_device_id pata_ftide010_of_match[] = { { .compatible = "faraday,ftide010", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, pata_ftide010_of_match); static struct platform_driver pata_ftide010_driver = { .driver = { diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index bdccd1ba152471..5280e9960025ec 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -170,8 +170,8 @@ static const char * const bad_ata66_3[] = { NULL }; -static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, - const char * const list[]) +static int hpt_dma_broken(const struct ata_device *dev, char *modestr, + const char * const list[]) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i; @@ -197,11 +197,11 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { - if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) + if (hpt_dma_broken(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; - if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) + if (hpt_dma_broken(adev, "UDMA3", bad_ata66_3)) mask &= ~(0xF8 << ATA_SHIFT_UDMA); - if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) + if (hpt_dma_broken(adev, "UDMA4", bad_ata66_4)) mask &= ~(0xF0 << ATA_SHIFT_UDMA); } else if (adev->class == ATA_DEV_ATAPI) mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index c0329cf01135f6..4af22b819416c7 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -218,8 +218,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed) return 0xffffffffU; /* silence compiler warning */ } -static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, - const char * const list[]) +static int hpt_dma_broken(const struct ata_device *dev, char *modestr, + const char * const list[]) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; int i; @@ -281,9 +281,9 @@ static const char * const bad_ata100_5[] = { static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { - if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) + if (hpt_dma_broken(adev, "UDMA", bad_ata33)) mask &= ~ATA_MASK_UDMA; - if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) + if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; @@ -300,7 +300,7 @@ static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask) static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask) { if (adev->class == ATA_DEV_ATA) { - if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) + if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 9cfb064782c3c9..61d8760f09d91f 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -328,8 +328,6 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) static struct ata_port_operations pata_icside_port_ops = { .inherits = &ata_bmdma_port_ops, - /* no need to build any PRD tables for DMA */ - .qc_prep = ata_noop_qc_prep, .sff_data_xfer = ata_sff_data_xfer32, .bmdma_setup = pata_icside_bmdma_setup, .bmdma_start = pata_icside_bmdma_start, diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 2fe3fb6102ce6a..042f6ad1f7c691 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -519,9 +519,9 @@ static void it821x_dev_config(struct ata_device *adev) } /* This is a controller firmware triggered funny, don't report the drive faulty! */ - adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; + adev->quirks &= ~ATA_QUIRK_DIAGNOSTIC; /* No HPA in 'smart' mode */ - adev->horkage |= ATA_HORKAGE_BROKEN_HPA; + adev->quirks |= ATA_QUIRK_BROKEN_HPA; } /** diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 246bb4f8f1f7f6..8a9ee828478f01 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -290,6 +290,7 @@ static const struct of_device_id ixp4xx_pata_of_match[] = { { .compatible = "intel,ixp4xx-compact-flash", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, ixp4xx_pata_of_match); static struct platform_driver ixp4xx_pata_platform_driver = { .driver = { diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 6c317a461a1f63..3f92586779156a 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -620,7 +620,6 @@ static struct ata_port_operations mpc52xx_ata_port_ops = { .bmdma_start = mpc52xx_bmdma_start, .bmdma_stop = mpc52xx_bmdma_stop, .bmdma_status = mpc52xx_bmdma_status, - .qc_prep = ata_noop_qc_prep, }; static int mpc52xx_ata_init_one(struct device *dev, diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 2884acfc4863b6..0bb9607e734863 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -789,7 +789,6 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) static struct ata_port_operations octeon_cf_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = octeon_cf_check_atapi_dma, - .qc_prep = ata_noop_qc_prep, .qc_issue = octeon_cf_qc_issue, .sff_dev_select = octeon_cf_dev_select, .sff_irq_on = octeon_cf_ata_port_noaction, diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 549ff24a982311..4edddf6bcc1507 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -46,10 +46,11 @@ #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ -/* Seagate Barracuda ATA IV Family drives in UDMA mode 5 - * can overrun their FIFOs when used with the CSB5 */ - -static const char *csb_bad_ata100[] = { +/* + * Seagate Barracuda ATA IV Family drives in UDMA mode 5 + * can overrun their FIFOs when used with the CSB5. + */ +static const char * const csb_bad_ata100[] = { "ST320011A", "ST340016A", "ST360021A", @@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in * @adev: ATA device * @mask: Mask of proposed modes * - * Check the blacklist and disable UDMA5 if matched + * Check the list of devices with broken UDMA5 and + * disable UDMA5 if matched. */ - -static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask) +static unsigned int serverworks_csb_filter(struct ata_device *adev, + unsigned int mask) { const char *p; char model_num[ATA_ID_PROD_LEN + 1]; diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index 4c270999ba3ccd..f574e3c3f5b48f 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -417,6 +417,7 @@ static const struct of_device_id gemini_sata_of_match[] = { { .compatible = "cortina,gemini-sata-bridge", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, gemini_sata_of_match); static struct platform_driver gemini_sata_driver = { .driver = { diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index cc77c024828431..3a99f66198a980 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = { static const struct sil_drivelist { const char *product; unsigned int quirk; -} sil_blacklist [] = { +} sil_quirks[] = { { "ST320012AS", SIL_QUIRK_MOD15WRITE }, { "ST330013AS", SIL_QUIRK_MOD15WRITE }, { "ST340017AS", SIL_QUIRK_MOD15WRITE }, @@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap) * list, and apply the fixups to only the specific * devices/hosts/firmwares that need it. * - * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted - * The Maxtor quirk is in the blacklist, but I'm keeping the original + * 20040111 - Seagate drives affected by the Mod15Write bug are quirked + * The Maxtor quirk is in sil_quirks, but I'm keeping the original * pessimistic fix for the following reasons... * - There seems to be less info on it, only one device gleaned off the * Windows driver, maybe only one is affected. More info would be greatly @@ -616,13 +616,13 @@ static void sil_dev_config(struct ata_device *dev) unsigned char model_num[ATA_ID_PROD_LEN + 1]; /* This controller doesn't support trim */ - dev->horkage |= ATA_HORKAGE_NOTRIM; + dev->quirks |= ATA_QUIRK_NOTRIM; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); - for (n = 0; sil_blacklist[n].product; n++) - if (!strcmp(sil_blacklist[n].product, model_num)) { - quirks = sil_blacklist[n].quirk; + for (n = 0; sil_quirks[n].product; n++) + if (!strcmp(sil_quirks[n].product, model_num)) { + quirks = sil_quirks[n].quirk; break; } diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index bb94638144546e..19b619376d48b9 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -526,7 +526,6 @@ static const struct file_operations charlcd_fops = { .write = charlcd_write, .open = charlcd_open, .release = charlcd_release, - .llseek = no_llseek, }; static struct miscdevice charlcd_dev = { diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 8a7034b41d50e8..a816f9e1025595 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include "line-display.h" diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 2b8fd6bb7da0b8..064eb52ff7e2d4 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -226,6 +226,7 @@ config GENERIC_ARCH_TOPOLOGY config GENERIC_ARCH_NUMA bool + select NUMA_MEMBLKS help Enable support for generic NUMA implementation. Currently, RISC-V and ARM64 use it. diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index 555aee3ee8e732..e1870167642658 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -12,16 +12,12 @@ #include #include #include +#include #include -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); -nodemask_t numa_nodes_parsed __initdata; static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; -static int numa_distance_cnt; -static u8 *numa_distance; bool numa_off; static __init int numa_parse_early_param(char *opt) @@ -30,6 +26,8 @@ static __init int numa_parse_early_param(char *opt) return -EINVAL; if (str_has_prefix(opt, "off")) numa_off = true; + if (!strncmp(opt, "fake=", 5)) + return numa_emu_cmdline(opt + 5); return 0; } @@ -61,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node); #endif +#ifndef CONFIG_NUMA_EMU static void numa_update_cpu(unsigned int cpu, bool remove) { int nid = cpu_to_node(cpu); @@ -83,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu) { numa_update_cpu(cpu, true); } +#endif void numa_clear_node(unsigned int cpu) { @@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); -int __init early_cpu_to_node(int cpu) +int early_cpu_to_node(int cpu) { return cpu_to_node_map[cpu]; } @@ -189,174 +189,24 @@ void __init setup_per_cpu_areas(void) } #endif -/** - * numa_add_memblk() - Set node id to memblk - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - int ret; - - ret = memblock_set_node(start, (end - start), &memblock.memory, nid); - if (ret < 0) { - pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", - start, (end - 1), nid); - return ret; - } - - node_set(nid, numa_nodes_parsed); - return ret; -} - /* * Initialize NODE_DATA for a node on the local memory */ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); - u64 nd_pa; - void *nd; - int tnid; - if (start_pfn >= end_pfn) pr_info("Initmem setup node %d []\n", nid); - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, nid); + alloc_node_data(nid); - nd = __va(nd_pa); - - /* report and initialize */ - pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; } -/* - * numa_free_distance - * - * The current table is freed. - */ -void __init numa_free_distance(void) -{ - size_t size; - - if (!numa_distance) - return; - - size = numa_distance_cnt * numa_distance_cnt * - sizeof(numa_distance[0]); - - memblock_free(numa_distance, size); - numa_distance_cnt = 0; - numa_distance = NULL; -} - -/* - * Create a new NUMA distance table. - */ -static int __init numa_alloc_distance(void) -{ - size_t size; - int i, j; - - size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); - numa_distance = memblock_alloc(size, PAGE_SIZE); - if (WARN_ON(!numa_distance)) - return -ENOMEM; - - numa_distance_cnt = nr_node_ids; - - /* fill with the default distances */ - for (i = 0; i < numa_distance_cnt; i++) - for (j = 0; j < numa_distance_cnt; j++) - numa_distance[i * numa_distance_cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - - pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt); - - return 0; -} - -/** - * numa_set_distance() - Set inter node NUMA distance from node to node. - * @from: the 'from' node to set distance - * @to: the 'to' node to set distance - * @distance: NUMA distance - * - * Set the distance from node @from to @to to @distance. - * If distance table doesn't exist, a warning is printed. - * - * If @from or @to is higher than the highest known node or lower than zero - * or @distance doesn't make sense, the call is ignored. - */ -void __init numa_set_distance(int from, int to, int distance) -{ - if (!numa_distance) { - pr_warn_once("Warning: distance table not allocated yet\n"); - return; - } - - if (from >= numa_distance_cnt || to >= numa_distance_cnt || - from < 0 || to < 0) { - pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - if ((u8)distance != distance || - (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - numa_distance[from * numa_distance_cnt + to] = distance; -} - -/* - * Return NUMA distance @from to @to - */ -int __node_distance(int from, int to) -{ - if (from >= numa_distance_cnt || to >= numa_distance_cnt) - return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return numa_distance[from * numa_distance_cnt + to]; -} -EXPORT_SYMBOL(__node_distance); - static int __init numa_register_nodes(void) { int nid; - struct memblock_region *mblk; - - /* Check that valid nid is set to memblks */ - for_each_mem_region(mblk) { - int mblk_nid = memblock_get_region_node(mblk); - phys_addr_t start = mblk->base; - phys_addr_t end = mblk->base + mblk->size - 1; - - if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { - pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n", - mblk_nid, &start, &end); - return -EINVAL; - } - } /* Finally register nodes. */ for_each_node_mask(nid, numa_nodes_parsed) { @@ -381,11 +231,7 @@ static int __init numa_init(int (*init_func)(void)) nodes_clear(node_possible_map); nodes_clear(node_online_map); - ret = numa_alloc_distance(); - if (ret < 0) - return ret; - - ret = init_func(); + ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false); if (ret < 0) goto out_free_distance; @@ -403,7 +249,7 @@ static int __init numa_init(int (*init_func)(void)) return 0; out_free_distance: - numa_free_distance(); + numa_reset_distance(); return ret; } @@ -433,6 +279,7 @@ static int __init dummy_numa_init(void) pr_err("NUMA init failed\n"); return ret; } + node_set(0, numa_nodes_parsed); numa_off = true; return 0; @@ -475,3 +322,54 @@ void __init arch_numa_init(void) numa_init(dummy_numa_init); } + +#ifdef CONFIG_NUMA_EMU +void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, + unsigned int nr_emu_nids) +{ + int i, j; + + /* + * Transform cpu_to_node_map table to use emulated nids by + * reverse-mapping phys_nid. The maps should always exist but fall + * back to zero just in case. + */ + for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) { + if (cpu_to_node_map[i] == NUMA_NO_NODE) + continue; + for (j = 0; j < nr_emu_nids; j++) + if (cpu_to_node_map[i] == emu_nid_to_phys[j]) + break; + cpu_to_node_map[i] = j < nr_emu_nids ? j : 0; + } +} + +u64 __init numa_emu_dma_end(void) +{ + return memblock_start_of_DRAM() + SZ_4G; +} + +void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) +{ + struct cpumask *mask; + + if (node == NUMA_NO_NODE) + return; + + mask = node_to_cpumask_map[node]; + if (!cpumask_available(mask)) { + pr_err("node_to_cpumask_map[%i] NULL\n", node); + dump_stack(); + return; + } + + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); + + pr_debug("%s cpu %d node %d: mask now %*pbl\n", + enable ? "numa_add_cpu" : "numa_remove_cpu", + cpu, node, cpumask_pr_args(mask)); +} +#endif /* CONFIG_NUMA_EMU */ diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index 01ef796c2055a0..b6f941a6ab693a 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c @@ -346,8 +346,7 @@ attribute_container_device_trigger_safe(struct device *dev, * @fn: the function to execute for each classdev. * * This function is for executing a trigger when you need to know both - * the container and the classdev. If you only care about the - * container, then use attribute_container_trigger() instead. + * the container and the classdev. */ void attribute_container_device_trigger(struct device *dev, @@ -378,33 +377,6 @@ attribute_container_device_trigger(struct device *dev, mutex_unlock(&attribute_container_mutex); } -/** - * attribute_container_trigger - trigger a function for each matching container - * - * @dev: The generic device to activate the trigger for - * @fn: the function to trigger - * - * This routine triggers a function that only needs to know the - * matching containers (not the classdev) associated with a device. - * It is more lightweight than attribute_container_device_trigger, so - * should be used in preference unless the triggering function - * actually needs to know the classdev. - */ -void -attribute_container_trigger(struct device *dev, - int (*fn)(struct attribute_container *, - struct device *)) -{ - struct attribute_container *cont; - - mutex_lock(&attribute_container_mutex); - list_for_each_entry(cont, &attribute_container_list, node) { - if (cont->match(cont, dev)) - fn(cont, dev); - } - mutex_unlock(&attribute_container_mutex); -} - /** * attribute_container_add_attrs - add attributes * @@ -458,24 +430,6 @@ attribute_container_add_class_device(struct device *classdev) return attribute_container_add_attrs(classdev); } -/** - * attribute_container_add_class_device_adapter - simple adapter for triggers - * - * @cont: the container to register. - * @dev: the generic device to activate the trigger for - * @classdev: the class device to add - * - * This function is identical to attribute_container_add_class_device except - * that it is designed to be called from the triggers - */ -int -attribute_container_add_class_device_adapter(struct attribute_container *cont, - struct device *dev, - struct device *classdev) -{ - return attribute_container_add_class_device(classdev); -} - /** * attribute_container_remove_attrs - remove any attribute files * diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index 54b92839e05ced..7823888af4f684 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add); */ struct auxiliary_device *auxiliary_find_device(struct device *start, const void *data, - int (*match)(struct device *dev, const void *data)) + device_match_t match) { struct device *dev; diff --git a/drivers/base/base.h b/drivers/base/base.h index 0b53593372d79d..8cf04a557bdb0d 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -145,7 +145,7 @@ void auxiliary_bus_init(void); static inline void auxiliary_bus_init(void) { } #endif -struct kobject *virtual_device_parent(struct device *dev); +struct kobject *virtual_device_parent(void); int bus_add_device(struct device *dev); void bus_probe_device(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index ffea0728b8b2fb..657c93c38b0dc2 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); - ssize_t ret = 0; + /* return -EIO for reading a bus attribute without show() */ + ssize_t ret = -EIO; if (bus_attr->show) ret = bus_attr->show(subsys_priv->bus, buf); @@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); - ssize_t ret = 0; + /* return -EIO for writing a bus attribute without store() */ + ssize_t ret = -EIO; if (bus_attr->store) ret = bus_attr->store(subsys_priv->bus, buf, count); @@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev); */ struct device *bus_find_device(const struct bus_type *bus, struct device *start, const void *data, - int (*match)(struct device *dev, const void *data)) + device_match_t match) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; @@ -920,6 +922,8 @@ int bus_register(const struct bus_type *bus) bus_remove_file(bus, &bus_attr_uevent); bus_uevent_fail: kset_unregister(&priv->subsys); + /* Above kset_unregister() will kfree @priv */ + priv = NULL; out: kfree(priv); return retval; @@ -1294,7 +1298,7 @@ int subsys_virtual_register(const struct bus_type *subsys, { struct kobject *virtual_dir; - virtual_dir = virtual_device_parent(NULL); + virtual_dir = virtual_device_parent(); if (!virtual_dir) return -ENOMEM; @@ -1385,8 +1389,13 @@ int __init buses_init(void) return -ENOMEM; system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); - if (!system_kset) + if (!system_kset) { + /* Do error handling here as devices_init() do */ + kset_unregister(bus_kset); + bus_kset = NULL; + pr_err("%s: failed to create and add kset 'bus'\n", __func__); return -ENOMEM; + } return 0; } diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 23b8cba4a2a3b8..7a7609298e18bd 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -202,29 +202,24 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, static int cache_setup_of_node(unsigned int cpu) { - struct device_node *np, *prev; struct cacheinfo *this_leaf; unsigned int index = 0; - np = of_cpu_device_node_get(cpu); + struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); if (!np) { pr_err("Failed to find cpu%d device node\n", cpu); return -ENOENT; } if (!of_check_cache_nodes(np)) { - of_node_put(np); return -ENOENT; } - prev = np; - while (index < cache_leaves(cpu)) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); if (this_leaf->level != 1) { + struct device_node *prev __free(device_node) = np; np = of_find_next_cache_node(np); - of_node_put(prev); - prev = np; if (!np) break; } @@ -233,8 +228,6 @@ static int cache_setup_of_node(unsigned int cpu) index++; } - of_node_put(np); - if (index != cache_leaves(cpu)) /* not all OF nodes populated */ return -ENOENT; @@ -243,17 +236,14 @@ static int cache_setup_of_node(unsigned int cpu) static bool of_check_cache_nodes(struct device_node *np) { - struct device_node *next; - if (of_property_present(np, "cache-size") || of_property_present(np, "i-cache-size") || of_property_present(np, "d-cache-size") || of_property_present(np, "cache-unified")) return true; - next = of_find_next_cache_node(np); + struct device_node *next __free(device_node) = of_find_next_cache_node(np); if (next) { - of_node_put(next); return true; } @@ -287,12 +277,10 @@ static int of_count_cache_leaves(struct device_node *np) int init_of_cache_level(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; + struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); unsigned int levels = 0, leaves, level; if (!of_check_cache_nodes(np)) { - of_node_put(np); return -ENOENT; } @@ -300,30 +288,27 @@ int init_of_cache_level(unsigned int cpu) if (leaves > 0) levels = 1; - prev = np; - while ((np = of_find_next_cache_node(np))) { - of_node_put(prev); - prev = np; + while (1) { + struct device_node *prev __free(device_node) = np; + np = of_find_next_cache_node(np); + if (!np) + break; + if (!of_device_is_compatible(np, "cache")) - goto err_out; + return -EINVAL; if (of_property_read_u32(np, "cache-level", &level)) - goto err_out; + return -EINVAL; if (level <= levels) - goto err_out; + return -EINVAL; leaves += of_count_cache_leaves(np); levels = level; } - of_node_put(np); this_cpu_ci->num_levels = levels; this_cpu_ci->num_leaves = leaves; return 0; - -err_out: - of_node_put(np); - return -EINVAL; } #else diff --git a/drivers/base/class.c b/drivers/base/class.c index 7b38fdf8e1d78e..cb5359235c7020 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -183,6 +183,17 @@ int class_register(const struct class *cls) pr_debug("device class '%s': registering\n", cls->name); + if (cls->ns_type && !cls->namespace) { + pr_err("%s: class '%s' does not have namespace\n", + __func__, cls->name); + return -EINVAL; + } + if (!cls->ns_type && cls->namespace) { + pr_err("%s: class '%s' does not have ns_type\n", + __func__, cls->name); + return -EINVAL; + } + cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) return -ENOMEM; @@ -433,8 +444,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device); * code. There's no locking restriction. */ struct device *class_find_device(const struct class *class, const struct device *start, - const void *data, - int (*match)(struct device *, const void *)) + const void *data, device_match_t match) { struct subsys_private *sp = class_to_subsys(class); struct class_dev_iter iter; diff --git a/drivers/base/core.c b/drivers/base/core.c index 8c0733d3aad8e9..a4c853411a6b2b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -9,29 +9,30 @@ */ #include +#include +#include #include #include +#include /* for dma_default_coherent */ #include #include #include +#include #include #include -#include -#include +#include +#include #include #include #include -#include -#include #include -#include #include -#include #include +#include +#include #include #include #include -#include /* for dma_default_coherent */ #include "base.h" #include "physical_location.h" @@ -97,12 +98,9 @@ static int __fwnode_link_add(struct fwnode_handle *con, int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, u8 flags) { - int ret; + guard(mutex)(&fwnode_link_lock); - mutex_lock(&fwnode_link_lock); - ret = __fwnode_link_add(con, sup, flags); - mutex_unlock(&fwnode_link_lock); - return ret; + return __fwnode_link_add(con, sup, flags); } /** @@ -143,10 +141,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) __fwnode_link_del(link); - mutex_unlock(&fwnode_link_lock); } /** @@ -159,10 +157,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) __fwnode_link_del(link); - mutex_unlock(&fwnode_link_lock); } /** @@ -563,20 +561,11 @@ static struct class devlink_class = { static int devlink_add_symlinks(struct device *dev) { + char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; int ret; - size_t len; struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; struct device *con = link->consumer; - char *buf; - - len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), - strlen(dev_bus_name(con)) + strlen(dev_name(con))); - len += strlen(":"); - len += strlen("supplier:") + 1; - buf = kzalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); if (ret) @@ -586,58 +575,64 @@ static int devlink_add_symlinks(struct device *dev) if (ret) goto err_con; - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); + buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + if (!buf_con) { + ret = -ENOMEM; + goto err_con_dev; + } + + ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con); if (ret) goto err_con_dev; - snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); + buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + if (!buf_sup) { + ret = -ENOMEM; + goto err_sup_dev; + } + + ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup); if (ret) goto err_sup_dev; goto out; err_sup_dev: - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - sysfs_remove_link(&sup->kobj, buf); + sysfs_remove_link(&sup->kobj, buf_con); err_con_dev: sysfs_remove_link(&link->link_dev.kobj, "consumer"); err_con: sysfs_remove_link(&link->link_dev.kobj, "supplier"); out: - kfree(buf); return ret; } static void devlink_remove_symlinks(struct device *dev) { + char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; struct device_link *link = to_devlink(dev); - size_t len; struct device *sup = link->supplier; struct device *con = link->consumer; - char *buf; sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "supplier"); - len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), - strlen(dev_bus_name(con)) + strlen(dev_name(con))); - len += strlen(":"); - len += strlen("supplier:") + 1; - buf = kzalloc(len, GFP_KERNEL); - if (!buf) { - WARN(1, "Unable to properly free device link symlinks!\n"); - return; - } - if (device_is_registered(con)) { - snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - sysfs_remove_link(&con->kobj, buf); + buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + if (!buf_sup) + goto out; + sysfs_remove_link(&con->kobj, buf_sup); } - snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); - sysfs_remove_link(&sup->kobj, buf); - kfree(buf); + + buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + if (!buf_con) + goto out; + sysfs_remove_link(&sup->kobj, buf_con); + + return; + +out: + WARN(1, "Unable to properly free device link symlinks!\n"); } static struct class_interface devlink_class_intf = { @@ -678,6 +673,9 @@ postcore_initcall(devlink_class_init); * @supplier: Supplier end of the link. * @flags: Link flags. * + * Return: On success, a device_link struct will be returned. + * On error or invalid flag settings, NULL will be returned. + * * The caller is responsible for the proper synchronization of the link creation * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the * runtime PM framework to take the link into account. Second, if the @@ -1061,20 +1059,16 @@ int device_links_check_suppliers(struct device *dev) * Device waiting for supplier to become available is not allowed to * probe. */ - mutex_lock(&fwnode_link_lock); - sup_fw = fwnode_links_check_suppliers(dev->fwnode); - if (sup_fw) { - if (!dev_is_best_effort(dev)) { - fwnode_ret = -EPROBE_DEFER; - dev_err_probe(dev, -EPROBE_DEFER, - "wait for supplier %pfwf\n", sup_fw); - } else { - fwnode_ret = -EAGAIN; + scoped_guard(mutex, &fwnode_link_lock) { + sup_fw = fwnode_links_check_suppliers(dev->fwnode); + if (sup_fw) { + if (dev_is_best_effort(dev)) + fwnode_ret = -EAGAIN; + else + return dev_err_probe(dev, -EPROBE_DEFER, + "wait for supplier %pfwf\n", sup_fw); } } - mutex_unlock(&fwnode_link_lock); - if (fwnode_ret == -EPROBE_DEFER) - return fwnode_ret; device_links_write_lock(); @@ -1093,10 +1087,8 @@ int device_links_check_suppliers(struct device *dev) } device_links_missing_supplier(dev); - dev_err_probe(dev, -EPROBE_DEFER, - "supplier %s not ready\n", - dev_name(link->supplier)); - ret = -EPROBE_DEFER; + ret = dev_err_probe(dev, -EPROBE_DEFER, + "supplier %s not ready\n", dev_name(link->supplier)); break; } WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); @@ -1249,9 +1241,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev, bool val; device_lock(dev); - mutex_lock(&fwnode_link_lock); - val = !!fwnode_links_check_suppliers(dev->fwnode); - mutex_unlock(&fwnode_link_lock); + scoped_guard(mutex, &fwnode_link_lock) + val = !!fwnode_links_check_suppliers(dev->fwnode); device_unlock(dev); return sysfs_emit(buf, "%u\n", val); } @@ -1324,13 +1315,15 @@ void device_links_driver_bound(struct device *dev) */ if (dev->fwnode && dev->fwnode->dev == dev) { struct fwnode_handle *child; + fwnode_links_purge_suppliers(dev->fwnode); - mutex_lock(&fwnode_link_lock); + + guard(mutex)(&fwnode_link_lock); + fwnode_for_each_available_child_node(dev->fwnode, child) __fw_devlink_pickup_dangling_consumers(child, dev->fwnode); __fw_devlink_link_to_consumers(dev); - mutex_unlock(&fwnode_link_lock); } device_remove_file(dev, &dev_attr_waiting_for_supplier); @@ -2339,10 +2332,10 @@ static void fw_devlink_link_device(struct device *dev) fw_devlink_parse_fwtree(fwnode); - mutex_lock(&fwnode_link_lock); + guard(mutex)(&fwnode_link_lock); + __fw_devlink_link_to_consumers(dev); __fw_devlink_link_to_suppliers(dev, fwnode); - mutex_unlock(&fwnode_link_lock); } /* Device links support end. */ @@ -2591,7 +2584,7 @@ static const void *device_namespace(const struct kobject *kobj) const struct device *dev = kobj_to_dev(kobj); const void *ns = NULL; - if (dev->class && dev->class->ns_type) + if (dev->class && dev->class->namespace) ns = dev->class->namespace(dev); return ns; @@ -3170,7 +3163,7 @@ void device_initialize(struct device *dev) } EXPORT_SYMBOL_GPL(device_initialize); -struct kobject *virtual_device_parent(struct device *dev) +struct kobject *virtual_device_parent(void) { static struct kobject *virtual_dir = NULL; @@ -3248,7 +3241,7 @@ static struct kobject *get_device_parent(struct device *dev, * in a "glue" directory to prevent namespace collisions. */ if (parent == NULL) - parent_kobj = virtual_device_parent(dev); + parent_kobj = virtual_device_parent(); else if (parent->class && !dev->class->ns_type) { subsys_put(sp); return &parent->kobj; @@ -4003,7 +3996,7 @@ int device_for_each_child(struct device *parent, void *data, struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); @@ -4033,7 +4026,7 @@ int device_for_each_child_reverse(struct device *parent, void *data, struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); @@ -4067,7 +4060,7 @@ struct device *device_find_child(struct device *parent, void *data, struct klist_iter i; struct device *child; - if (!parent) + if (!parent || !parent->p) return NULL; klist_iter_init(&parent->p->klist_children, &i); @@ -4515,9 +4508,11 @@ EXPORT_SYMBOL_GPL(device_destroy); */ int device_rename(struct device *dev, const char *new_name) { + struct subsys_private *sp = NULL; struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; + bool is_link_renamed = false; dev = get_device(dev); if (!dev) @@ -4532,7 +4527,7 @@ int device_rename(struct device *dev, const char *new_name) } if (dev->class) { - struct subsys_private *sp = class_to_subsys(dev->class); + sp = class_to_subsys(dev->class); if (!sp) { error = -EINVAL; @@ -4541,16 +4536,19 @@ int device_rename(struct device *dev, const char *new_name) error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); - subsys_put(sp); if (error) goto out; + + is_link_renamed = true; } error = kobject_rename(kobj, new_name); - if (error) - goto out; - out: + if (error && is_link_renamed) + sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, + old_device_name, kobject_namespace(kobj)); + subsys_put(sp); + put_device(dev); kfree(old_device_name); @@ -4872,7 +4870,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) else return; - strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); + strscpy(dev_info->subsystem, subsys); /* * Add device identifier DEVICE=: diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9b745ba54de107..f0e4b4aba885c6 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -248,7 +248,7 @@ static int deferred_devs_show(struct seq_file *s, void *data) list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe) seq_printf(s, "%s\t%s", dev_name(curr->device), - curr->device->p->deferred_probe_reason ?: "\n"); + curr->deferred_probe_reason ?: "\n"); mutex_unlock(&deferred_probe_mutex); @@ -393,6 +393,7 @@ bool device_is_bound(struct device *dev) { return dev->p && klist_node_attached(&dev->p->knode_driver); } +EXPORT_SYMBOL_GPL(device_is_bound); static void driver_bound(struct device *dev) { diff --git a/drivers/base/devres.c b/drivers/base/devres.c index a2ce0ead06a6de..2152eec0c1352c 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -1231,6 +1231,6 @@ void devm_free_percpu(struct device *dev, void __percpu *pdata) * devm_free_pages() does. */ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match, - (__force void *)pdata)); + (void *)(__force unsigned long)pdata)); } EXPORT_SYMBOL_GPL(devm_free_percpu); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 88c6fd1f1992f5..b4eb5b89c4ee7b 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(driver_for_each_device); */ struct device *driver_find_device(const struct device_driver *drv, struct device *start, const void *data, - int (*match)(struct device *dev, const void *data)) + device_match_t match) { struct klist_iter i; struct device *dev; diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index a03ee4b11134cf..324a9a3c087aa2 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -849,6 +849,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif +/* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from device-supplied + * strings, and we don't want some device to be able to tell us "I would like to + * be sent my firmware from ../../../etc/shadow, please". + * + * Search for ".." surrounded by either '/' or start/end of string. + * + * This intentionally only looks at the firmware name, not at the firmware base + * directory or at symlink contents. + */ +static bool name_contains_dotdot(const char *name) +{ + size_t name_len = strlen(name); + + return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || + strstr(name, "/../") != NULL || + (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -869,6 +889,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } + if (name_contains_dotdot(name)) { + dev_warn(device, + "Firmware load for '%s' refused, path contains '..' component\n", + name); + ret = -EINVAL; + goto out; + } + ret = _request_firmware_prepare(&fw, name, device, buf, size, offset, opt_flags); if (ret <= 0) /* error or already assigned */ @@ -946,6 +974,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. + * It must not contain any ".." path components - "foo/bar..bin" is + * allowed, but "foo/../bar.bin" is not. * * Caller must hold the reference count of @device. * diff --git a/drivers/base/module.c b/drivers/base/module.c index f742ad2a21da02..c4eaa1158d54ed 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, const struct device_driver *drv) driver_name = make_driver_name(drv); if (!driver_name) { ret = -ENOMEM; - goto out; + goto out_remove_kobj; } module_create_drivers_dir(mk); if (!mk->drivers_dir) { ret = -EINVAL; - goto out; + goto out_free_driver_name; } ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); if (ret) - goto out; + goto out_remove_drivers_dir; kfree(driver_name); return 0; -out: - sysfs_remove_link(&drv->p->kobj, "module"); + +out_remove_drivers_dir: sysfs_remove_link(mk->drivers_dir, driver_name); + +out_free_driver_name: kfree(driver_name); +out_remove_kobj: + sysfs_remove_link(&drv->p->kobj, "module"); return ret; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4c3ee6521ba5f2..6f2a33722c5203 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1474,7 +1474,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = { USE_PLATFORM_PM_SLEEP_OPS }; -struct bus_type platform_bus_type = { +const struct bus_type platform_bus_type = { .name = "platform", .dev_groups = platform_dev_groups, .match = platform_match, diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 327d168dd37a80..8c34ae1cd8d554 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -276,6 +276,51 @@ int dev_pm_domain_attach_list(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list); +/** + * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list. + * @_list: The list of PM domains to detach. + * + * This function reverse the actions from devm_pm_domain_attach_list(). + * it will be invoked during the remove phase from drivers implicitly if driver + * uses devm_pm_domain_attach_list() to attach the PM domains. + */ +static void devm_pm_domain_detach_list(void *_list) +{ + struct dev_pm_domain_list *list = _list; + + dev_pm_domain_detach_list(list); +} + +/** + * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list + * @dev: The device used to lookup the PM domains for. + * @data: The data used for attaching to the PM domains. + * @list: An out-parameter with an allocated list of attached PM domains. + * + * NOTE: this will also handle calling devm_pm_domain_detach_list() for + * you during remove phase. + * + * Returns the number of attached PM domains or a negative error code in case of + * a failure. + */ +int devm_pm_domain_attach_list(struct device *dev, + const struct dev_pm_domain_attach_data *data, + struct dev_pm_domain_list **list) +{ + int ret, num_pds; + + num_pds = dev_pm_domain_attach_list(dev, data, list); + if (num_pds <= 0) + return num_pds; + + ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list); + if (ret) + return ret; + + return num_pds; +} +EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list); + /** * dev_pm_domain_detach - Detach a device from its PM domain. * @dev: Device to detach. diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c index 9b17c77dec9d7e..f36d3618b67c1a 100644 --- a/drivers/base/regmap/regcache-flat.c +++ b/drivers/base/regmap/regcache-flat.c @@ -27,7 +27,7 @@ static int regcache_flat_init(struct regmap *map) return -EINVAL; map->cache = kcalloc(regcache_flat_get_index(map, map->max_register) - + 1, sizeof(unsigned int), GFP_KERNEL); + + 1, sizeof(unsigned int), map->alloc_flags); if (!map->cache) return -ENOMEM; diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index 2dea9d259c493a..8d27d3653ea3e7 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -348,7 +348,7 @@ static int regcache_maple_init(struct regmap *map) int ret; int range_start; - mt = kmalloc(sizeof(*mt), GFP_KERNEL); + mt = kmalloc(sizeof(*mt), map->alloc_flags); if (!mt) return -ENOMEM; map->cache = mt; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 3db88bbcae0fc5..188438186589f6 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -187,7 +187,7 @@ static int regcache_rbtree_init(struct regmap *map) int i; int ret; - map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); + map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags); if (!map->cache) return -ENOMEM; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 7ec1ec60533508..d3659ba3cc1122 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -195,7 +195,9 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) if (map->cache_ops->init) { dev_dbg(map->dev, "Initializing %s cache\n", map->cache_ops->name); + map->lock(map->lock_arg); ret = map->cache_ops->init(map); + map->unlock(map->lock_arg); if (ret) goto err_free; } @@ -223,7 +225,9 @@ void regcache_exit(struct regmap *map) if (map->cache_ops->exit) { dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name); + map->lock(map->lock_arg); map->cache_ops->exit(map); + map->unlock(map->lock_arg); } } diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d3ec1345b5b53a..a750e48a26b87c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -608,6 +608,30 @@ int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, } EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); +static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data *d) +{ + struct irq_domain_info info = { + .fwnode = fwnode, + .size = chip->num_irqs, + .hwirq_max = chip->num_irqs, + .virq_base = irq_base, + .ops = ®map_domain_ops, + .host_data = d, + .name_suffix = chip->domain_suffix, + }; + + d->domain = irq_domain_instantiate(&info); + if (IS_ERR(d->domain)) { + dev_err(d->map->dev, "Failed to create IRQ domain\n"); + return PTR_ERR(d->domain); + } + + return 0; +} + + /** * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling * @@ -856,18 +880,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, } } - if (irq_base) - d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, - irq_base, 0, - ®map_domain_ops, d); - else - d->domain = irq_domain_create_linear(fwnode, chip->num_irqs, - ®map_domain_ops, d); - if (!d->domain) { - dev_err(map->dev, "Failed to create IRQ domain\n"); - ret = -ENOMEM; + ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); + if (ret) goto err_alloc; - } ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags | IRQF_ONESHOT, diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index d790c7df5cac0e..4bf3f1e59ed760 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -22,6 +22,7 @@ struct regmap_test_param { enum regmap_endian val_endian; unsigned int from_reg; + bool fast_io; }; static void get_changed_bytes(void *orig, void *new, size_t size) @@ -80,41 +81,52 @@ static const char *regmap_endian_name(enum regmap_endian endian) static void param_to_desc(const struct regmap_test_param *param, char *desc) { - snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x", + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x", regcache_type_name(param->cache), regmap_endian_name(param->val_endian), + param->fast_io ? " fast I/O" : "", param->from_reg); } static const struct regmap_test_param regcache_types_list[] = { { .cache = REGCACHE_NONE }, + { .cache = REGCACHE_NONE, .fast_io = true }, { .cache = REGCACHE_FLAT }, + { .cache = REGCACHE_FLAT, .fast_io = true }, { .cache = REGCACHE_RBTREE }, + { .cache = REGCACHE_RBTREE, .fast_io = true }, { .cache = REGCACHE_MAPLE }, + { .cache = REGCACHE_MAPLE, .fast_io = true }, }; KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); static const struct regmap_test_param real_cache_types_only_list[] = { { .cache = REGCACHE_FLAT }, + { .cache = REGCACHE_FLAT, .fast_io = true }, { .cache = REGCACHE_RBTREE }, + { .cache = REGCACHE_RBTREE, .fast_io = true }, { .cache = REGCACHE_MAPLE }, + { .cache = REGCACHE_MAPLE, .fast_io = true }, }; KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); static const struct regmap_test_param real_cache_types_list[] = { { .cache = REGCACHE_FLAT, .from_reg = 0 }, + { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, { .cache = REGCACHE_RBTREE, .from_reg = 0 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, { .cache = REGCACHE_MAPLE, .from_reg = 0 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, @@ -125,11 +137,13 @@ KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); static const struct regmap_test_param sparse_cache_types_list[] = { { .cache = REGCACHE_RBTREE, .from_reg = 0 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, { .cache = REGCACHE_MAPLE, .from_reg = 0 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, @@ -151,8 +165,7 @@ static struct regmap *gen_regmap(struct kunit *test, struct reg_default *defaults; config->cache_type = param->cache; - config->disable_locking = config->cache_type == REGCACHE_RBTREE || - config->cache_type == REGCACHE_MAPLE; + config->fast_io = param->fast_io; if (config->max_register == 0) { config->max_register = param->from_reg; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index bfc6bc1eb3a4a9..4ded93687c1f0a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -1445,6 +1445,7 @@ void regmap_exit(struct regmap *map) struct regmap_async *async; regcache_exit(map); + regmap_debugfs_exit(map); regmap_range_exit(map); if (map->bus && map->bus->free_context) diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index ed3be52ab63d16..8540052d37c581 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev, } /* If the root port is capable of returning Config Request - * Retry Status (CRS) Completion Status to software then + * Retry Status (RRS) Completion Status to software then * enable the feature. */ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) @@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) NULL); root_cap = cap_ptr + PCI_EXP_RTCAP; bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16)); - if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) { - /* Enable CRS software visibility */ + if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) { + /* Enable Configuration RRS Software Visibility */ root_ctrl = cap_ptr + PCI_EXP_RTCTL; - val16 = PCI_EXP_RTCTL_CRSSVE; + val16 = PCI_EXP_RTCTL_RRS_SVE; bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16, sizeof(u16)); @@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) * 100 ms wait time from the end of Reset. If the device is * not done with its internal initialization, it must at * least return a completion TLP, with a completion status - * of "Configuration Request Retry Status (CRS)". The root + * of "Configuration Request Retry Status (RRS)". The root * complex must complete the request to the host by returning * a read-data value of 0001h for the Vendor ID field and * all 1s for any additional bytes included in the request. diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index cc9077b588d7e7..92b06d1de4cc7b 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include "aoe.h" @@ -361,6 +361,7 @@ ata_rw_frameinit(struct frame *f) } ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit; + dev_hold(t->ifp->nd); skb->dev = t->ifp->nd; } @@ -401,6 +402,8 @@ aoecmd_ata_rw(struct aoedev *d) __skb_queue_head_init(&queue); __skb_queue_tail(&queue, skb); aoenet_xmit(&queue); + } else { + dev_put(f->t->ifp->nd); } return 1; } @@ -483,10 +486,13 @@ resend(struct aoedev *d, struct frame *f) memcpy(h->dst, t->addr, sizeof h->dst); memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); + dev_hold(t->ifp->nd); skb->dev = t->ifp->nd; skb = skb_clone(skb, GFP_ATOMIC); - if (skb == NULL) + if (skb == NULL) { + dev_put(t->ifp->nd); return; + } f->sent = ktime_get(); __skb_queue_head_init(&queue); __skb_queue_tail(&queue, skb); @@ -617,6 +623,8 @@ probe(struct aoetgt *t) __skb_queue_head_init(&queue); __skb_queue_tail(&queue, skb); aoenet_xmit(&queue); + } else { + dev_put(f->t->ifp->nd); } } @@ -1395,6 +1403,7 @@ aoecmd_ata_id(struct aoedev *d) ah->cmdstat = ATA_CMD_ID_ATA; ah->lba3 = 0xa0; + dev_hold(t->ifp->nd); skb->dev = t->ifp->nd; d->rttavg = RTTAVG_INIT; @@ -1404,6 +1413,8 @@ aoecmd_ata_id(struct aoedev *d) skb = skb_clone(skb, GFP_ATOMIC); if (skb) f->sent = ktime_get(); + else + dev_put(t->ifp->nd); return skb; } diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 923a134fd76656..66e617664c143a 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "aoe.h" #define NECODES 5 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 94dc0a235919d7..2a05d955e30b67 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -297,10 +297,6 @@ struct drbd_epoch { unsigned long flags; }; -/* Prototype declaration of function defined in drbd_receiver.c */ -int drbdd_init(struct drbd_thread *); -int drbd_asender(struct drbd_thread *); - /* drbd_epoch flag bits */ enum { DE_HAVE_BARRIER_NUMBER, @@ -864,7 +860,6 @@ struct drbd_device { struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ struct list_head net_ee; /* zero-copy network send in progress */ - int next_barrier_nr; struct list_head resync_reads; atomic_t pp_in_use; /* allocated from page pool */ atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ @@ -1390,9 +1385,6 @@ extern void conn_free_crypto(struct drbd_connection *connection); extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *); void drbd_submit_bio(struct bio *bio); -extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); -extern int is_valid_ar_handle(struct drbd_request *, sector_t); - /* drbd_nl.c */ @@ -1474,7 +1466,6 @@ extern int w_resync_timer(struct drbd_work *, int); extern int w_send_write_hint(struct drbd_work *, int); extern int w_send_dblock(struct drbd_work *, int); extern int w_send_read_req(struct drbd_work *, int); -extern int w_e_reissue(struct drbd_work *, int); extern int w_restart_disk_io(struct drbd_work *, int); extern int w_send_out_of_sync(struct drbd_work *, int); @@ -1488,7 +1479,6 @@ extern int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags); extern int drbd_receiver(struct drbd_thread *thi); extern int drbd_ack_receiver(struct drbd_thread *thi); -extern void drbd_send_ping_wf(struct work_struct *ws); extern void drbd_send_acks_wf(struct work_struct *ws); extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, @@ -1504,7 +1494,6 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); -extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); extern int drbd_connected(struct drbd_peer_device *); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a9e49b212341be..0d74d75260ef12 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1550,7 +1550,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa * put_page(); and would cause either a VM_BUG directly, or * __page_cache_release a page that would actually still be referenced * by someone, leading to some obscure delayed Oops somewhere else. */ - if (!drbd_disable_sendpage && sendpage_ok(page)) + if (!drbd_disable_sendpage && sendpages_ok(page, len, offset)) msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES; drbd_update_congested(peer_device->connection); @@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) { unsigned long flags; - if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) + spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); + if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) { + spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); return; + } - spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); if (val == 0) { drbd_uuid_move_history(device); device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 5d65c9754d8377..720fc30e2ecc92 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -25,7 +25,7 @@ #include "drbd_protocol.h" #include "drbd_req.h" #include "drbd_state_change.h" -#include +#include #include #include diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index e858e7e0383f26..c2b6c4d9729db9 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ns.disk == D_OUTDATED) rv = SS_CONNECTED_OUTDATES; - else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && + else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && (nc->verify_alg[0] == 0)) rv = SS_NO_VERIFY_ALG; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index c6ef0546ffc9d2..223faa9d5ffdae 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2259,35 +2259,20 @@ static const struct file_operations mtip_regs_fops = { .owner = THIS_MODULE, .open = simple_open, .read = mtip_hw_read_registers, - .llseek = no_llseek, }; static const struct file_operations mtip_flags_fops = { .owner = THIS_MODULE, .open = simple_open, .read = mtip_hw_read_flags, - .llseek = no_llseek, }; -static int mtip_hw_debugfs_init(struct driver_data *dd) +static void mtip_hw_debugfs_init(struct driver_data *dd) { - if (!dfs_parent) - return -1; - dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); - if (IS_ERR_OR_NULL(dd->dfs_node)) { - dev_warn(&dd->pdev->dev, - "Error creating node %s under debugfs\n", - dd->disk->disk_name); - dd->dfs_node = NULL; - return -1; - } - debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops); debugfs_create_file("registers", 0444, dd->dfs_node, dd, &mtip_regs_fops); - - return 0; } static void mtip_hw_debugfs_exit(struct driver_data *dd) @@ -4043,10 +4028,6 @@ static int __init mtip_init(void) mtip_major = error; dfs_parent = debugfs_create_dir("rssd", NULL); - if (IS_ERR_OR_NULL(dfs_parent)) { - pr_warn("Error creating debugfs parent\n"); - dfs_parent = NULL; - } /* Register our PCI operations. */ error = pci_register_driver(&mtip_pci_driver); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 41a90150b501d7..b852050d8a9665 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd) { struct request *req = blk_mq_rq_from_pdu(cmd); + lockdep_assert_held(&cmd->lock); + + /* + * Clear INFLIGHT flag so that this cmd won't be completed in + * normal completion path + * + * INFLIGHT flag will be set when the cmd is queued to nbd next + * time. + */ + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) blk_mq_requeue_request(req, true); } @@ -339,7 +350,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize, lim = queue_limits_start_update(nbd->disk->queue); if (nbd->config->flags & NBD_FLAG_SEND_TRIM) - lim.max_hw_discard_sectors = UINT_MAX; + lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT; else lim.max_hw_discard_sectors = 0; if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { @@ -350,6 +361,11 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize, lim.features |= BLK_FEAT_WRITE_CACHE; lim.features &= ~BLK_FEAT_FUA; } + if (nbd->config->flags & NBD_FLAG_ROTATIONAL) + lim.features |= BLK_FEAT_ROTATIONAL; + if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES) + lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT; + lim.logical_block_size = blksize; lim.physical_block_size = blksize; error = queue_limits_commit_update(nbd->disk->queue, &lim); @@ -418,6 +434,8 @@ static u32 req_to_nbd_cmd_type(struct request *req) return NBD_CMD_WRITE; case REQ_OP_READ: return NBD_CMD_READ; + case REQ_OP_WRITE_ZEROES: + return NBD_CMD_WRITE_ZEROES; default: return U32_MAX; } @@ -488,8 +506,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - mutex_unlock(&cmd->lock); nbd_requeue_cmd(cmd); + mutex_unlock(&cmd->lock); nbd_config_put(nbd); return BLK_EH_DONE; } @@ -634,6 +652,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, if (req->cmd_flags & REQ_FUA) nbd_cmd_flags |= NBD_CMD_FLAG_FUA; + if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES)) + nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE; /* We did a partial send previously, and we at least sent the whole * request struct, so just go and send the rest of the pages in the @@ -1703,6 +1723,10 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused) seq_puts(s, "NBD_FLAG_SEND_FUA\n"); if (flags & NBD_FLAG_SEND_TRIM) seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); + if (flags & NBD_FLAG_SEND_WRITE_ZEROES) + seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n"); + if (flags & NBD_FLAG_ROTATIONAL) + seq_puts(s, "NBD_FLAG_ROTATIONAL\n"); return 0; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7cece5884b9c67..65b96c083b3cad 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -71,7 +71,7 @@ #include #include -#include +#include #define DRIVER_NAME "pktcdvd" @@ -498,8 +498,6 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) if (!pkt_debugfs_root) return; pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); - if (!pd->dfs_d_root) - return; pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root, pd, &pkt_seq_fops); @@ -2837,7 +2835,6 @@ static const struct file_operations pkt_ctl_fops = { .compat_ioctl = pkt_ctl_compat_ioctl, #endif .owner = THIS_MODULE, - .llseek = no_llseek, }; static struct miscdevice pkt_misc = { diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index f6e3a3c4b76cc4..08ce6d96d04cfa 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -149,15 +149,22 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); if (bio_add_page(bio, virt_to_page(data), datalen, offset_in_page(data)) != datalen) { - rnbd_srv_err(sess_dev, "Failed to map data to bio\n"); + rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n"); err = -EINVAL; goto bio_put; } + bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); + if (bio_has_data(bio) && + bio->bi_iter.bi_size != le32_to_cpu(msg->bi_size)) { + rnbd_srv_err_rl(sess_dev, "Datalen mismatch: bio bi_size (%u), bi_size (%u)\n", + bio->bi_iter.bi_size, msg->bi_size); + err = -EINVAL; + goto bio_put; + } bio->bi_end_io = rnbd_dev_bi_end_io; bio->bi_private = priv; bio->bi_iter.bi_sector = le64_to_cpu(msg->sector); - bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size); prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); bio_set_prio(bio, prio); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 1d53a3f48a0eb9..a6c8e5cc605173 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -71,9 +71,6 @@ struct ublk_rq_data { struct llist_node node; struct kref ref; - __u64 sector; - __u32 operation; - __u32 nr_zones; }; struct ublk_uring_cmd_pdu { @@ -214,6 +211,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq) #ifdef CONFIG_BLK_DEV_ZONED +struct ublk_zoned_report_desc { + __u64 sector; + __u32 operation; + __u32 nr_zones; +}; + +static DEFINE_XARRAY(ublk_zoned_report_descs); + +static int ublk_zoned_insert_report_desc(const struct request *req, + struct ublk_zoned_report_desc *desc) +{ + return xa_insert(&ublk_zoned_report_descs, (unsigned long)req, + desc, GFP_KERNEL); +} + +static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc( + const struct request *req) +{ + return xa_erase(&ublk_zoned_report_descs, (unsigned long)req); +} + +static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc( + const struct request *req) +{ + return xa_load(&ublk_zoned_report_descs, (unsigned long)req); +} + static int ublk_get_nr_zones(const struct ublk_device *ub) { const struct ublk_param_basic *p = &ub->params.basic; @@ -308,7 +332,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, unsigned int zones_in_request = min_t(unsigned int, remaining_zones, max_zones_per_request); struct request *req; - struct ublk_rq_data *pdu; + struct ublk_zoned_report_desc desc; blk_status_t status; memset(buffer, 0, buffer_length); @@ -319,20 +343,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, goto out; } - pdu = blk_mq_rq_to_pdu(req); - pdu->operation = UBLK_IO_OP_REPORT_ZONES; - pdu->sector = sector; - pdu->nr_zones = zones_in_request; + desc.operation = UBLK_IO_OP_REPORT_ZONES; + desc.sector = sector; + desc.nr_zones = zones_in_request; + ret = ublk_zoned_insert_report_desc(req, &desc); + if (ret) + goto free_req; ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, GFP_KERNEL); - if (ret) { - blk_mq_free_request(req); - goto out; - } + if (ret) + goto erase_desc; status = blk_execute_rq(req, 0); ret = blk_status_to_errno(status); +erase_desc: + ublk_zoned_erase_report_desc(req); +free_req: blk_mq_free_request(req); if (ret) goto out; @@ -366,7 +393,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, { struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); struct ublk_io *io = &ubq->ios[req->tag]; - struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req); + struct ublk_zoned_report_desc *desc; u32 ublk_op; switch (req_op(req)) { @@ -389,12 +416,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; break; case REQ_OP_DRV_IN: - ublk_op = pdu->operation; + desc = ublk_zoned_get_report_desc(req); + if (!desc) + return BLK_STS_IOERR; + ublk_op = desc->operation; switch (ublk_op) { case UBLK_IO_OP_REPORT_ZONES: iod->op_flags = ublk_op | ublk_req_build_flags(req); - iod->nr_zones = pdu->nr_zones; - iod->start_sector = pdu->sector; + iod->nr_zones = desc->nr_zones; + iod->start_sector = desc->sector; return BLK_STS_OK; default: return BLK_STS_IOERR; @@ -1953,7 +1983,6 @@ static const struct file_operations ublk_ch_fops = { .owner = THIS_MODULE, .open = ublk_ch_open, .release = ublk_ch_release, - .llseek = no_llseek, .read_iter = ublk_ch_read_iter, .write_iter = ublk_ch_write_iter, .uring_cmd = ublk_ch_uring_cmd, diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index eacf1cba7bf453..6aea609b795c2f 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -2,8 +2,6 @@ config ZRAM tristate "Compressed RAM block device support" depends on BLOCK && SYSFS && MMU - depends on HAVE_ZSMALLOC - depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842 select ZSMALLOC help Creates virtual block devices called /dev/zramX (X = 0, 1, ...). @@ -16,6 +14,49 @@ config ZRAM See Documentation/admin-guide/blockdev/zram.rst for more information. +config ZRAM_BACKEND_LZ4 + bool "lz4 compression support" + depends on ZRAM + select LZ4_COMPRESS + select LZ4_DECOMPRESS + +config ZRAM_BACKEND_LZ4HC + bool "lz4hc compression support" + depends on ZRAM + select LZ4HC_COMPRESS + select LZ4_DECOMPRESS + +config ZRAM_BACKEND_ZSTD + bool "zstd compression support" + depends on ZRAM + select ZSTD_COMPRESS + select ZSTD_DECOMPRESS + +config ZRAM_BACKEND_DEFLATE + bool "deflate compression support" + depends on ZRAM + select ZLIB_DEFLATE + select ZLIB_INFLATE + +config ZRAM_BACKEND_842 + bool "842 compression support" + depends on ZRAM + select 842_COMPRESS + select 842_DECOMPRESS + +config ZRAM_BACKEND_FORCE_LZO + depends on ZRAM + def_bool !ZRAM_BACKEND_LZ4 && !ZRAM_BACKEND_LZ4HC && \ + !ZRAM_BACKEND_ZSTD && !ZRAM_BACKEND_DEFLATE && \ + !ZRAM_BACKEND_842 + +config ZRAM_BACKEND_LZO + bool "lzo and lzo-rle compression support" if !ZRAM_BACKEND_FORCE_LZO + depends on ZRAM + default ZRAM_BACKEND_FORCE_LZO + select LZO_COMPRESS + select LZO_DECOMPRESS + choice prompt "Default zram compressor" default ZRAM_DEF_COMP_LZORLE @@ -23,38 +64,44 @@ choice config ZRAM_DEF_COMP_LZORLE bool "lzo-rle" - depends on CRYPTO_LZO + depends on ZRAM_BACKEND_LZO -config ZRAM_DEF_COMP_ZSTD - bool "zstd" - depends on CRYPTO_ZSTD +config ZRAM_DEF_COMP_LZO + bool "lzo" + depends on ZRAM_BACKEND_LZO config ZRAM_DEF_COMP_LZ4 bool "lz4" - depends on CRYPTO_LZ4 - -config ZRAM_DEF_COMP_LZO - bool "lzo" - depends on CRYPTO_LZO + depends on ZRAM_BACKEND_LZ4 config ZRAM_DEF_COMP_LZ4HC bool "lz4hc" - depends on CRYPTO_LZ4HC + depends on ZRAM_BACKEND_LZ4HC + +config ZRAM_DEF_COMP_ZSTD + bool "zstd" + depends on ZRAM_BACKEND_ZSTD + +config ZRAM_DEF_COMP_DEFLATE + bool "deflate" + depends on ZRAM_BACKEND_DEFLATE config ZRAM_DEF_COMP_842 bool "842" - depends on CRYPTO_842 + depends on ZRAM_BACKEND_842 endchoice config ZRAM_DEF_COMP string default "lzo-rle" if ZRAM_DEF_COMP_LZORLE - default "zstd" if ZRAM_DEF_COMP_ZSTD - default "lz4" if ZRAM_DEF_COMP_LZ4 default "lzo" if ZRAM_DEF_COMP_LZO + default "lz4" if ZRAM_DEF_COMP_LZ4 default "lz4hc" if ZRAM_DEF_COMP_LZ4HC + default "zstd" if ZRAM_DEF_COMP_ZSTD + default "deflate" if ZRAM_DEF_COMP_DEFLATE default "842" if ZRAM_DEF_COMP_842 + default "unset-value" config ZRAM_WRITEBACK bool "Write back incompressible or idle page to backing device" diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index de9e457907b1e9..0fdefd576691bc 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile @@ -1,4 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only + zram-y := zcomp.o zram_drv.o +zram-$(CONFIG_ZRAM_BACKEND_LZO) += backend_lzorle.o backend_lzo.o +zram-$(CONFIG_ZRAM_BACKEND_LZ4) += backend_lz4.o +zram-$(CONFIG_ZRAM_BACKEND_LZ4HC) += backend_lz4hc.o +zram-$(CONFIG_ZRAM_BACKEND_ZSTD) += backend_zstd.o +zram-$(CONFIG_ZRAM_BACKEND_DEFLATE) += backend_deflate.o +zram-$(CONFIG_ZRAM_BACKEND_842) += backend_842.o + obj-$(CONFIG_ZRAM) += zram.o diff --git a/drivers/block/zram/backend_842.c b/drivers/block/zram/backend_842.c new file mode 100644 index 00000000000000..10d9d5c60f5313 --- /dev/null +++ b/drivers/block/zram/backend_842.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include "backend_842.h" + +static void release_params_842(struct zcomp_params *params) +{ +} + +static int setup_params_842(struct zcomp_params *params) +{ + return 0; +} + +static void destroy_842(struct zcomp_ctx *ctx) +{ + kfree(ctx->context); +} + +static int create_842(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + ctx->context = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL); + if (!ctx->context) + return -ENOMEM; + return 0; +} + +static int compress_842(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + unsigned int dlen = req->dst_len; + int ret; + + ret = sw842_compress(req->src, req->src_len, req->dst, &dlen, + ctx->context); + if (ret == 0) + req->dst_len = dlen; + return ret; +} + +static int decompress_842(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + unsigned int dlen = req->dst_len; + + return sw842_decompress(req->src, req->src_len, req->dst, &dlen); +} + +const struct zcomp_ops backend_842 = { + .compress = compress_842, + .decompress = decompress_842, + .create_ctx = create_842, + .destroy_ctx = destroy_842, + .setup_params = setup_params_842, + .release_params = release_params_842, + .name = "842", +}; diff --git a/drivers/block/zram/backend_842.h b/drivers/block/zram/backend_842.h new file mode 100644 index 00000000000000..4dc85c188799ae --- /dev/null +++ b/drivers/block/zram/backend_842.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_842_H__ +#define __BACKEND_842_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_842; + +#endif /* __BACKEND_842_H__ */ diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c new file mode 100644 index 00000000000000..0f7f252c12f448 --- /dev/null +++ b/drivers/block/zram/backend_deflate.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include "backend_deflate.h" + +/* Use the same value as crypto API */ +#define DEFLATE_DEF_WINBITS 11 +#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL + +struct deflate_ctx { + struct z_stream_s cctx; + struct z_stream_s dctx; +}; + +static void deflate_release_params(struct zcomp_params *params) +{ +} + +static int deflate_setup_params(struct zcomp_params *params) +{ + if (params->level == ZCOMP_PARAM_NO_LEVEL) + params->level = Z_DEFAULT_COMPRESSION; + + return 0; +} + +static void deflate_destroy(struct zcomp_ctx *ctx) +{ + struct deflate_ctx *zctx = ctx->context; + + if (!zctx) + return; + + if (zctx->cctx.workspace) { + zlib_deflateEnd(&zctx->cctx); + vfree(zctx->cctx.workspace); + } + if (zctx->dctx.workspace) { + zlib_inflateEnd(&zctx->dctx); + vfree(zctx->dctx.workspace); + } + kfree(zctx); +} + +static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + struct deflate_ctx *zctx; + size_t sz; + int ret; + + zctx = kzalloc(sizeof(*zctx), GFP_KERNEL); + if (!zctx) + return -ENOMEM; + + ctx->context = zctx; + sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL); + zctx->cctx.workspace = vzalloc(sz); + if (!zctx->cctx.workspace) + goto error; + + ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (ret != Z_OK) + goto error; + + sz = zlib_inflate_workspacesize(); + zctx->dctx.workspace = vzalloc(sz); + if (!zctx->dctx.workspace) + goto error; + + ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS); + if (ret != Z_OK) + goto error; + + return 0; + +error: + deflate_destroy(ctx); + return -EINVAL; +} + +static int deflate_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct deflate_ctx *zctx = ctx->context; + struct z_stream_s *deflate; + int ret; + + deflate = &zctx->cctx; + ret = zlib_deflateReset(deflate); + if (ret != Z_OK) + return -EINVAL; + + deflate->next_in = (u8 *)req->src; + deflate->avail_in = req->src_len; + deflate->next_out = (u8 *)req->dst; + deflate->avail_out = req->dst_len; + + ret = zlib_deflate(deflate, Z_FINISH); + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dst_len = deflate->total_out; + return 0; +} + +static int deflate_decompress(struct zcomp_params *params, + struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct deflate_ctx *zctx = ctx->context; + struct z_stream_s *inflate; + int ret; + + inflate = &zctx->dctx; + + ret = zlib_inflateReset(inflate); + if (ret != Z_OK) + return -EINVAL; + + inflate->next_in = (u8 *)req->src; + inflate->avail_in = req->src_len; + inflate->next_out = (u8 *)req->dst; + inflate->avail_out = req->dst_len; + + ret = zlib_inflate(inflate, Z_SYNC_FLUSH); + if (ret != Z_STREAM_END) + return -EINVAL; + + return 0; +} + +const struct zcomp_ops backend_deflate = { + .compress = deflate_compress, + .decompress = deflate_decompress, + .create_ctx = deflate_create, + .destroy_ctx = deflate_destroy, + .setup_params = deflate_setup_params, + .release_params = deflate_release_params, + .name = "deflate", +}; diff --git a/drivers/block/zram/backend_deflate.h b/drivers/block/zram/backend_deflate.h new file mode 100644 index 00000000000000..a39ac12b114cf8 --- /dev/null +++ b/drivers/block/zram/backend_deflate.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_DEFLATE_H__ +#define __BACKEND_DEFLATE_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_deflate; + +#endif /* __BACKEND_DEFLATE_H__ */ diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c new file mode 100644 index 00000000000000..847f3334eb3824 --- /dev/null +++ b/drivers/block/zram/backend_lz4.c @@ -0,0 +1,127 @@ +#include +#include +#include +#include + +#include "backend_lz4.h" + +struct lz4_ctx { + void *mem; + + LZ4_streamDecode_t *dstrm; + LZ4_stream_t *cstrm; +}; + +static void lz4_release_params(struct zcomp_params *params) +{ +} + +static int lz4_setup_params(struct zcomp_params *params) +{ + if (params->level == ZCOMP_PARAM_NO_LEVEL) + params->level = LZ4_ACCELERATION_DEFAULT; + + return 0; +} + +static void lz4_destroy(struct zcomp_ctx *ctx) +{ + struct lz4_ctx *zctx = ctx->context; + + if (!zctx) + return; + + vfree(zctx->mem); + kfree(zctx->dstrm); + kfree(zctx->cstrm); + kfree(zctx); +} + +static int lz4_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + struct lz4_ctx *zctx; + + zctx = kzalloc(sizeof(*zctx), GFP_KERNEL); + if (!zctx) + return -ENOMEM; + + ctx->context = zctx; + if (params->dict_sz == 0) { + zctx->mem = vmalloc(LZ4_MEM_COMPRESS); + if (!zctx->mem) + goto error; + } else { + zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL); + if (!zctx->dstrm) + goto error; + + zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL); + if (!zctx->cstrm) + goto error; + } + + return 0; + +error: + lz4_destroy(ctx); + return -ENOMEM; +} + +static int lz4_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct lz4_ctx *zctx = ctx->context; + int ret; + + if (!zctx->cstrm) { + ret = LZ4_compress_fast(req->src, req->dst, req->src_len, + req->dst_len, params->level, + zctx->mem); + } else { + /* Cstrm needs to be reset */ + ret = LZ4_loadDict(zctx->cstrm, params->dict, params->dict_sz); + if (ret != params->dict_sz) + return -EINVAL; + ret = LZ4_compress_fast_continue(zctx->cstrm, req->src, + req->dst, req->src_len, + req->dst_len, params->level); + } + if (!ret) + return -EINVAL; + req->dst_len = ret; + return 0; +} + +static int lz4_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct lz4_ctx *zctx = ctx->context; + int ret; + + if (!zctx->dstrm) { + ret = LZ4_decompress_safe(req->src, req->dst, req->src_len, + req->dst_len); + } else { + /* Dstrm needs to be reset */ + ret = LZ4_setStreamDecode(zctx->dstrm, params->dict, + params->dict_sz); + if (!ret) + return -EINVAL; + ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src, + req->dst, req->src_len, + req->dst_len); + } + if (ret < 0) + return -EINVAL; + return 0; +} + +const struct zcomp_ops backend_lz4 = { + .compress = lz4_compress, + .decompress = lz4_decompress, + .create_ctx = lz4_create, + .destroy_ctx = lz4_destroy, + .setup_params = lz4_setup_params, + .release_params = lz4_release_params, + .name = "lz4", +}; diff --git a/drivers/block/zram/backend_lz4.h b/drivers/block/zram/backend_lz4.h new file mode 100644 index 00000000000000..c11fa602a7033b --- /dev/null +++ b/drivers/block/zram/backend_lz4.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_LZ4_H__ +#define __BACKEND_LZ4_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_lz4; + +#endif /* __BACKEND_LZ4_H__ */ diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c new file mode 100644 index 00000000000000..5f37d5abcaeb7f --- /dev/null +++ b/drivers/block/zram/backend_lz4hc.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include + +#include "backend_lz4hc.h" + +struct lz4hc_ctx { + void *mem; + + LZ4_streamDecode_t *dstrm; + LZ4_streamHC_t *cstrm; +}; + +static void lz4hc_release_params(struct zcomp_params *params) +{ +} + +static int lz4hc_setup_params(struct zcomp_params *params) +{ + if (params->level == ZCOMP_PARAM_NO_LEVEL) + params->level = LZ4HC_DEFAULT_CLEVEL; + + return 0; +} + +static void lz4hc_destroy(struct zcomp_ctx *ctx) +{ + struct lz4hc_ctx *zctx = ctx->context; + + if (!zctx) + return; + + kfree(zctx->dstrm); + kfree(zctx->cstrm); + vfree(zctx->mem); + kfree(zctx); +} + +static int lz4hc_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + struct lz4hc_ctx *zctx; + + zctx = kzalloc(sizeof(*zctx), GFP_KERNEL); + if (!zctx) + return -ENOMEM; + + ctx->context = zctx; + if (params->dict_sz == 0) { + zctx->mem = vmalloc(LZ4HC_MEM_COMPRESS); + if (!zctx->mem) + goto error; + } else { + zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL); + if (!zctx->dstrm) + goto error; + + zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL); + if (!zctx->cstrm) + goto error; + } + + return 0; + +error: + lz4hc_destroy(ctx); + return -EINVAL; +} + +static int lz4hc_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct lz4hc_ctx *zctx = ctx->context; + int ret; + + if (!zctx->cstrm) { + ret = LZ4_compress_HC(req->src, req->dst, req->src_len, + req->dst_len, params->level, + zctx->mem); + } else { + /* Cstrm needs to be reset */ + LZ4_resetStreamHC(zctx->cstrm, params->level); + ret = LZ4_loadDictHC(zctx->cstrm, params->dict, + params->dict_sz); + if (ret != params->dict_sz) + return -EINVAL; + ret = LZ4_compress_HC_continue(zctx->cstrm, req->src, req->dst, + req->src_len, req->dst_len); + } + if (!ret) + return -EINVAL; + req->dst_len = ret; + return 0; +} + +static int lz4hc_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct lz4hc_ctx *zctx = ctx->context; + int ret; + + if (!zctx->dstrm) { + ret = LZ4_decompress_safe(req->src, req->dst, req->src_len, + req->dst_len); + } else { + /* Dstrm needs to be reset */ + ret = LZ4_setStreamDecode(zctx->dstrm, params->dict, + params->dict_sz); + if (!ret) + return -EINVAL; + ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src, + req->dst, req->src_len, + req->dst_len); + } + if (ret < 0) + return -EINVAL; + return 0; +} + +const struct zcomp_ops backend_lz4hc = { + .compress = lz4hc_compress, + .decompress = lz4hc_decompress, + .create_ctx = lz4hc_create, + .destroy_ctx = lz4hc_destroy, + .setup_params = lz4hc_setup_params, + .release_params = lz4hc_release_params, + .name = "lz4hc", +}; diff --git a/drivers/block/zram/backend_lz4hc.h b/drivers/block/zram/backend_lz4hc.h new file mode 100644 index 00000000000000..6de03551ed4d03 --- /dev/null +++ b/drivers/block/zram/backend_lz4hc.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_LZ4HC_H__ +#define __BACKEND_LZ4HC_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_lz4hc; + +#endif /* __BACKEND_LZ4HC_H__ */ diff --git a/drivers/block/zram/backend_lzo.c b/drivers/block/zram/backend_lzo.c new file mode 100644 index 00000000000000..4c906beaae6b33 --- /dev/null +++ b/drivers/block/zram/backend_lzo.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +#include "backend_lzo.h" + +static void lzo_release_params(struct zcomp_params *params) +{ +} + +static int lzo_setup_params(struct zcomp_params *params) +{ + return 0; +} + +static int lzo_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!ctx->context) + return -ENOMEM; + return 0; +} + +static void lzo_destroy(struct zcomp_ctx *ctx) +{ + kfree(ctx->context); +} + +static int lzo_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + int ret; + + ret = lzo1x_1_compress(req->src, req->src_len, req->dst, + &req->dst_len, ctx->context); + return ret == LZO_E_OK ? 0 : ret; +} + +static int lzo_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + int ret; + + ret = lzo1x_decompress_safe(req->src, req->src_len, + req->dst, &req->dst_len); + return ret == LZO_E_OK ? 0 : ret; +} + +const struct zcomp_ops backend_lzo = { + .compress = lzo_compress, + .decompress = lzo_decompress, + .create_ctx = lzo_create, + .destroy_ctx = lzo_destroy, + .setup_params = lzo_setup_params, + .release_params = lzo_release_params, + .name = "lzo", +}; diff --git a/drivers/block/zram/backend_lzo.h b/drivers/block/zram/backend_lzo.h new file mode 100644 index 00000000000000..93d54749e63cd6 --- /dev/null +++ b/drivers/block/zram/backend_lzo.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_LZO_H__ +#define __BACKEND_LZO_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_lzo; + +#endif /* __BACKEND_LZO_H__ */ diff --git a/drivers/block/zram/backend_lzorle.c b/drivers/block/zram/backend_lzorle.c new file mode 100644 index 00000000000000..10640c96cbfcaa --- /dev/null +++ b/drivers/block/zram/backend_lzorle.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +#include "backend_lzorle.h" + +static void lzorle_release_params(struct zcomp_params *params) +{ +} + +static int lzorle_setup_params(struct zcomp_params *params) +{ + return 0; +} + +static int lzorle_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!ctx->context) + return -ENOMEM; + return 0; +} + +static void lzorle_destroy(struct zcomp_ctx *ctx) +{ + kfree(ctx->context); +} + +static int lzorle_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + int ret; + + ret = lzorle1x_1_compress(req->src, req->src_len, req->dst, + &req->dst_len, ctx->context); + return ret == LZO_E_OK ? 0 : ret; +} + +static int lzorle_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + int ret; + + ret = lzo1x_decompress_safe(req->src, req->src_len, + req->dst, &req->dst_len); + return ret == LZO_E_OK ? 0 : ret; +} + +const struct zcomp_ops backend_lzorle = { + .compress = lzorle_compress, + .decompress = lzorle_decompress, + .create_ctx = lzorle_create, + .destroy_ctx = lzorle_destroy, + .setup_params = lzorle_setup_params, + .release_params = lzorle_release_params, + .name = "lzo-rle", +}; diff --git a/drivers/block/zram/backend_lzorle.h b/drivers/block/zram/backend_lzorle.h new file mode 100644 index 00000000000000..6ecb163b09f138 --- /dev/null +++ b/drivers/block/zram/backend_lzorle.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_LZORLE_H__ +#define __BACKEND_LZORLE_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_lzorle; + +#endif /* __BACKEND_LZORLE_H__ */ diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c new file mode 100644 index 00000000000000..1184c0036f44bd --- /dev/null +++ b/drivers/block/zram/backend_zstd.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include "backend_zstd.h" + +struct zstd_ctx { + zstd_cctx *cctx; + zstd_dctx *dctx; + void *cctx_mem; + void *dctx_mem; +}; + +struct zstd_params { + zstd_custom_mem custom_mem; + zstd_cdict *cdict; + zstd_ddict *ddict; + zstd_parameters cprm; +}; + +/* + * For C/D dictionaries we need to provide zstd with zstd_custom_mem, + * which zstd uses internally to allocate/free memory when needed. + * + * This means that allocator.customAlloc() can be called from zcomp_compress() + * under local-lock (per-CPU compression stream), in which case we must use + * GFP_ATOMIC. + * + * Another complication here is that we can be configured as a swap device. + */ +static void *zstd_custom_alloc(void *opaque, size_t size) +{ + if (!preemptible()) + return kvzalloc(size, GFP_ATOMIC); + + return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN); +} + +static void zstd_custom_free(void *opaque, void *address) +{ + kvfree(address); +} + +static void zstd_release_params(struct zcomp_params *params) +{ + struct zstd_params *zp = params->drv_data; + + params->drv_data = NULL; + if (!zp) + return; + + zstd_free_cdict(zp->cdict); + zstd_free_ddict(zp->ddict); + kfree(zp); +} + +static int zstd_setup_params(struct zcomp_params *params) +{ + zstd_compression_parameters prm; + struct zstd_params *zp; + + zp = kzalloc(sizeof(*zp), GFP_KERNEL); + if (!zp) + return -ENOMEM; + + params->drv_data = zp; + if (params->level == ZCOMP_PARAM_NO_LEVEL) + params->level = zstd_default_clevel(); + + zp->cprm = zstd_get_params(params->level, PAGE_SIZE); + + zp->custom_mem.customAlloc = zstd_custom_alloc; + zp->custom_mem.customFree = zstd_custom_free; + + prm = zstd_get_cparams(params->level, PAGE_SIZE, + params->dict_sz); + + zp->cdict = zstd_create_cdict_byreference(params->dict, + params->dict_sz, + prm, + zp->custom_mem); + if (!zp->cdict) + goto error; + + zp->ddict = zstd_create_ddict_byreference(params->dict, + params->dict_sz, + zp->custom_mem); + if (!zp->ddict) + goto error; + + return 0; + +error: + zstd_release_params(params); + return -EINVAL; +} + +static void zstd_destroy(struct zcomp_ctx *ctx) +{ + struct zstd_ctx *zctx = ctx->context; + + if (!zctx) + return; + + /* + * If ->cctx_mem and ->dctx_mem were allocated then we didn't use + * C/D dictionary and ->cctx / ->dctx were "embedded" into these + * buffers. + * + * If otherwise then we need to explicitly release ->cctx / ->dctx. + */ + if (zctx->cctx_mem) + vfree(zctx->cctx_mem); + else + zstd_free_cctx(zctx->cctx); + + if (zctx->dctx_mem) + vfree(zctx->dctx_mem); + else + zstd_free_dctx(zctx->dctx); + + kfree(zctx); +} + +static int zstd_create(struct zcomp_params *params, struct zcomp_ctx *ctx) +{ + struct zstd_ctx *zctx; + zstd_parameters prm; + size_t sz; + + zctx = kzalloc(sizeof(*zctx), GFP_KERNEL); + if (!zctx) + return -ENOMEM; + + ctx->context = zctx; + if (params->dict_sz == 0) { + prm = zstd_get_params(params->level, PAGE_SIZE); + sz = zstd_cctx_workspace_bound(&prm.cParams); + zctx->cctx_mem = vzalloc(sz); + if (!zctx->cctx_mem) + goto error; + + zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz); + if (!zctx->cctx) + goto error; + + sz = zstd_dctx_workspace_bound(); + zctx->dctx_mem = vzalloc(sz); + if (!zctx->dctx_mem) + goto error; + + zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz); + if (!zctx->dctx) + goto error; + } else { + struct zstd_params *zp = params->drv_data; + + zctx->cctx = zstd_create_cctx_advanced(zp->custom_mem); + if (!zctx->cctx) + goto error; + + zctx->dctx = zstd_create_dctx_advanced(zp->custom_mem); + if (!zctx->dctx) + goto error; + } + + return 0; + +error: + zstd_release_params(params); + zstd_destroy(ctx); + return -EINVAL; +} + +static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct zstd_params *zp = params->drv_data; + struct zstd_ctx *zctx = ctx->context; + size_t ret; + + if (params->dict_sz == 0) + ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len, + req->src, req->src_len, &zp->cprm); + else + ret = zstd_compress_using_cdict(zctx->cctx, req->dst, + req->dst_len, req->src, + req->src_len, + zp->cdict); + if (zstd_is_error(ret)) + return -EINVAL; + req->dst_len = ret; + return 0; +} + +static int zstd_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req) +{ + struct zstd_params *zp = params->drv_data; + struct zstd_ctx *zctx = ctx->context; + size_t ret; + + if (params->dict_sz == 0) + ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len, + req->src, req->src_len); + else + ret = zstd_decompress_using_ddict(zctx->dctx, req->dst, + req->dst_len, req->src, + req->src_len, zp->ddict); + if (zstd_is_error(ret)) + return -EINVAL; + return 0; +} + +const struct zcomp_ops backend_zstd = { + .compress = zstd_compress, + .decompress = zstd_decompress, + .create_ctx = zstd_create, + .destroy_ctx = zstd_destroy, + .setup_params = zstd_setup_params, + .release_params = zstd_release_params, + .name = "zstd", +}; diff --git a/drivers/block/zram/backend_zstd.h b/drivers/block/zram/backend_zstd.h new file mode 100644 index 00000000000000..10fdfff1ec1cce --- /dev/null +++ b/drivers/block/zram/backend_zstd.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __BACKEND_ZSTD_H__ +#define __BACKEND_ZSTD_H__ + +#include "zcomp.h" + +extern const struct zcomp_ops backend_zstd; + +#endif /* __BACKEND_ZSTD_H__ */ diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 8237b08c49d861..bb514403e30527 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -1,7 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2014 Sergey Senozhatsky. - */ #include #include @@ -15,91 +12,97 @@ #include "zcomp.h" -static const char * const backends[] = { -#if IS_ENABLED(CONFIG_CRYPTO_LZO) - "lzo", - "lzo-rle", +#include "backend_lzo.h" +#include "backend_lzorle.h" +#include "backend_lz4.h" +#include "backend_lz4hc.h" +#include "backend_zstd.h" +#include "backend_deflate.h" +#include "backend_842.h" + +static const struct zcomp_ops *backends[] = { +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO) + &backend_lzorle, + &backend_lzo, #endif -#if IS_ENABLED(CONFIG_CRYPTO_LZ4) - "lz4", +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4) + &backend_lz4, #endif -#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC) - "lz4hc", +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC) + &backend_lz4hc, #endif -#if IS_ENABLED(CONFIG_CRYPTO_842) - "842", +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD) + &backend_zstd, #endif -#if IS_ENABLED(CONFIG_CRYPTO_ZSTD) - "zstd", +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE) + &backend_deflate, #endif +#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842) + &backend_842, +#endif + NULL }; -static void zcomp_strm_free(struct zcomp_strm *zstrm) +static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) { - if (!IS_ERR_OR_NULL(zstrm->tfm)) - crypto_free_comp(zstrm->tfm); + comp->ops->destroy_ctx(&zstrm->ctx); vfree(zstrm->buffer); - zstrm->tfm = NULL; zstrm->buffer = NULL; } -/* - * Initialize zcomp_strm structure with ->tfm initialized by backend, and - * ->buffer. Return a negative value on error. - */ -static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp) +static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm) { - zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); + int ret; + + ret = comp->ops->create_ctx(comp->params, &zstrm->ctx); + if (ret) + return ret; + /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ zstrm->buffer = vzalloc(2 * PAGE_SIZE); - if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { - zcomp_strm_free(zstrm); + if (!zstrm->buffer) { + zcomp_strm_free(comp, zstrm); return -ENOMEM; } return 0; } +static const struct zcomp_ops *lookup_backend_ops(const char *comp) +{ + int i = 0; + + while (backends[i]) { + if (sysfs_streq(comp, backends[i]->name)) + break; + i++; + } + return backends[i]; +} + bool zcomp_available_algorithm(const char *comp) { - /* - * Crypto does not ignore a trailing new line symbol, - * so make sure you don't supply a string containing - * one. - * This also means that we permit zcomp initialisation - * with any compressing algorithm known to crypto api. - */ - return crypto_has_comp(comp, 0, 0) == 1; + return lookup_backend_ops(comp) != NULL; } /* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { - bool known_algorithm = false; ssize_t sz = 0; int i; - for (i = 0; i < ARRAY_SIZE(backends); i++) { - if (!strcmp(comp, backends[i])) { - known_algorithm = true; + for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) { + if (!strcmp(comp, backends[i]->name)) { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", backends[i]); + "[%s] ", backends[i]->name); } else { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "%s ", backends[i]); + "%s ", backends[i]->name); } } - /* - * Out-of-tree module known to crypto api or a missing - * entry in `backends'. - */ - if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) - sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", comp); - sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; } @@ -115,38 +118,34 @@ void zcomp_stream_put(struct zcomp *comp) local_unlock(&comp->stream->lock); } -int zcomp_compress(struct zcomp_strm *zstrm, - const void *src, unsigned int *dst_len) +int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len) { - /* - * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized - * because sometimes we can endup having a bigger compressed data - * due to various reasons: for example compression algorithms tend - * to add some padding to the compressed buffer. Speaking of padding, - * comp algorithm `842' pads the compressed length to multiple of 8 - * and returns -ENOSP when the dst memory is not big enough, which - * is not something that ZRAM wants to see. We can handle the - * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we - * receive -ERRNO from the compressing backend we can't help it - * anymore. To make `842' happy we need to tell the exact size of - * the dst buffer, zram_drv will take care of the fact that - * compressed buffer is too big. - */ - *dst_len = PAGE_SIZE * 2; + struct zcomp_req req = { + .src = src, + .dst = zstrm->buffer, + .src_len = PAGE_SIZE, + .dst_len = 2 * PAGE_SIZE, + }; + int ret; - return crypto_comp_compress(zstrm->tfm, - src, PAGE_SIZE, - zstrm->buffer, dst_len); + ret = comp->ops->compress(comp->params, &zstrm->ctx, &req); + if (!ret) + *dst_len = req.dst_len; + return ret; } -int zcomp_decompress(struct zcomp_strm *zstrm, - const void *src, unsigned int src_len, void *dst) +int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst) { - unsigned int dst_len = PAGE_SIZE; - - return crypto_comp_decompress(zstrm->tfm, - src, src_len, - dst, &dst_len); + struct zcomp_req req = { + .src = src, + .dst = dst, + .src_len = src_len, + .dst_len = PAGE_SIZE, + }; + + return comp->ops->decompress(comp->params, &zstrm->ctx, &req); } int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) @@ -158,7 +157,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) zstrm = per_cpu_ptr(comp->stream, cpu); local_lock_init(&zstrm->lock); - ret = zcomp_strm_init(zstrm, comp); + ret = zcomp_strm_init(comp, zstrm); if (ret) pr_err("Can't allocate a compression stream\n"); return ret; @@ -170,11 +169,11 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) struct zcomp_strm *zstrm; zstrm = per_cpu_ptr(comp->stream, cpu); - zcomp_strm_free(zstrm); + zcomp_strm_free(comp, zstrm); return 0; } -static int zcomp_init(struct zcomp *comp) +static int zcomp_init(struct zcomp *comp, struct zcomp_params *params) { int ret; @@ -182,12 +181,19 @@ static int zcomp_init(struct zcomp *comp) if (!comp->stream) return -ENOMEM; + comp->params = params; + ret = comp->ops->setup_params(comp->params); + if (ret) + goto cleanup; + ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node); if (ret < 0) goto cleanup; + return 0; cleanup: + comp->ops->release_params(comp->params); free_percpu(comp->stream); return ret; } @@ -195,37 +201,35 @@ static int zcomp_init(struct zcomp *comp) void zcomp_destroy(struct zcomp *comp) { cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node); + comp->ops->release_params(comp->params); free_percpu(comp->stream); kfree(comp); } -/* - * search available compressors for requested algorithm. - * allocate new zcomp and initialize it. return compressing - * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) - * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in - * case of allocation error, or any other error potentially - * returned by zcomp_init(). - */ -struct zcomp *zcomp_create(const char *alg) +struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params) { struct zcomp *comp; int error; /* - * Crypto API will execute /sbin/modprobe if the compression module - * is not loaded yet. We must do it here, otherwise we are about to - * call /sbin/modprobe under CPU hot-plug lock. + * The backends array has a sentinel NULL value, so the minimum + * size is 1. In order to be valid the array, apart from the + * sentinel NULL element, should have at least one compression + * backend selected. */ - if (!zcomp_available_algorithm(alg)) - return ERR_PTR(-EINVAL); + BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); - comp->name = alg; - error = zcomp_init(comp); + comp->ops = lookup_backend_ops(alg); + if (!comp->ops) { + kfree(comp); + return ERR_PTR(-EINVAL); + } + + error = zcomp_init(comp, params); if (error) { kfree(comp); return ERR_PTR(error); diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index e9fe63da0e9b1c..ad576281384248 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -1,24 +1,70 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - */ #ifndef _ZCOMP_H_ #define _ZCOMP_H_ + #include +#define ZCOMP_PARAM_NO_LEVEL INT_MIN + +/* + * Immutable driver (backend) parameters. The driver may attach private + * data to it (e.g. driver representation of the dictionary, etc.). + * + * This data is kept per-comp and is shared among execution contexts. + */ +struct zcomp_params { + void *dict; + size_t dict_sz; + s32 level; + + void *drv_data; +}; + +/* + * Run-time driver context - scratch buffers, etc. It is modified during + * request execution (compression/decompression), cannot be shared, so + * it's in per-CPU area. + */ +struct zcomp_ctx { + void *context; +}; + struct zcomp_strm { - /* The members ->buffer and ->tfm are protected by ->lock. */ local_lock_t lock; - /* compression/decompression buffer */ + /* compression buffer */ void *buffer; - struct crypto_comp *tfm; + struct zcomp_ctx ctx; +}; + +struct zcomp_req { + const unsigned char *src; + const size_t src_len; + + unsigned char *dst; + size_t dst_len; +}; + +struct zcomp_ops { + int (*compress)(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req); + int (*decompress)(struct zcomp_params *params, struct zcomp_ctx *ctx, + struct zcomp_req *req); + + int (*create_ctx)(struct zcomp_params *params, struct zcomp_ctx *ctx); + void (*destroy_ctx)(struct zcomp_ctx *ctx); + + int (*setup_params)(struct zcomp_params *params); + void (*release_params)(struct zcomp_params *params); + + const char *name; }; /* dynamic per-device compression frontend */ struct zcomp { struct zcomp_strm __percpu *stream; - const char *name; + const struct zcomp_ops *ops; + struct zcomp_params *params; struct hlist_node node; }; @@ -27,16 +73,15 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node); ssize_t zcomp_available_show(const char *comp, char *buf); bool zcomp_available_algorithm(const char *comp); -struct zcomp *zcomp_create(const char *alg); +struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params); void zcomp_destroy(struct zcomp *comp); struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); void zcomp_stream_put(struct zcomp *comp); -int zcomp_compress(struct zcomp_strm *zstrm, - const void *src, unsigned int *dst_len); - -int zcomp_decompress(struct zcomp_strm *zstrm, - const void *src, unsigned int src_len, void *dst); +int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len); +int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst); #endif /* _ZCOMP_H_ */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index efcb8d9d274c31..ad9c9bc3ccfc5b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "zram_drv.h" @@ -59,17 +60,17 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, static int zram_slot_trylock(struct zram *zram, u32 index) { - return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); + return spin_trylock(&zram->table[index].lock); } static void zram_slot_lock(struct zram *zram, u32 index) { - bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); + spin_lock(&zram->table[index].lock); } static void zram_slot_unlock(struct zram *zram, u32 index) { - bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); + spin_unlock(&zram->table[index].lock); } static inline bool init_done(struct zram *zram) @@ -998,6 +999,103 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) return 0; } +static void comp_params_reset(struct zram *zram, u32 prio) +{ + struct zcomp_params *params = &zram->params[prio]; + + vfree(params->dict); + params->level = ZCOMP_PARAM_NO_LEVEL; + params->dict_sz = 0; + params->dict = NULL; +} + +static int comp_params_store(struct zram *zram, u32 prio, s32 level, + const char *dict_path) +{ + ssize_t sz = 0; + + comp_params_reset(zram, prio); + + if (dict_path) { + sz = kernel_read_file_from_path(dict_path, 0, + &zram->params[prio].dict, + INT_MAX, + NULL, + READING_POLICY); + if (sz < 0) + return -EINVAL; + } + + zram->params[prio].dict_sz = sz; + zram->params[prio].level = level; + return 0; +} + +static ssize_t algorithm_params_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL; + char *args, *param, *val, *algo = NULL, *dict_path = NULL; + struct zram *zram = dev_to_zram(dev); + int ret; + + args = skip_spaces(buf); + while (*args) { + args = next_arg(args, ¶m, &val); + + if (!val || !*val) + return -EINVAL; + + if (!strcmp(param, "priority")) { + ret = kstrtoint(val, 10, &prio); + if (ret) + return ret; + continue; + } + + if (!strcmp(param, "level")) { + ret = kstrtoint(val, 10, &level); + if (ret) + return ret; + continue; + } + + if (!strcmp(param, "algo")) { + algo = val; + continue; + } + + if (!strcmp(param, "dict")) { + dict_path = val; + continue; + } + } + + /* Lookup priority by algorithm name */ + if (algo) { + s32 p; + + prio = -EINVAL; + for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) { + if (!zram->comp_algs[p]) + continue; + + if (!strcmp(zram->comp_algs[p], algo)) { + prio = p; + break; + } + } + } + + if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS) + return -EINVAL; + + ret = comp_params_store(zram, prio, level, dict_path); + return ret ? ret : len; +} + static ssize_t comp_algorithm_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1211,7 +1309,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize) static bool zram_meta_alloc(struct zram *zram, u64 disksize) { - size_t num_pages; + size_t num_pages, index; num_pages = disksize >> PAGE_SHIFT; zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); @@ -1226,6 +1324,9 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); + + for (index = 0; index < num_pages; index++) + spin_lock_init(&zram->table[index].lock); return true; } @@ -1283,7 +1384,7 @@ static void zram_free_page(struct zram *zram, size_t index) zram_set_handle(zram, index, 0); zram_set_obj_size(zram, index, 0); WARN_ON_ONCE(zram->table[index].flags & - ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); + ~(1UL << ZRAM_UNDER_WB)); } /* @@ -1327,7 +1428,8 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, ret = 0; } else { dst = kmap_local_page(page); - ret = zcomp_decompress(zstrm, src, size, dst); + ret = zcomp_decompress(zram->comps[prio], zstrm, + src, size, dst); kunmap_local(dst); zcomp_stream_put(zram->comps[prio]); } @@ -1414,7 +1516,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); src = kmap_local_page(page); - ret = zcomp_compress(zstrm, src, &comp_len); + ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, + src, &comp_len); kunmap_local(src); if (unlikely(ret)) { @@ -1601,7 +1704,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); src = kmap_local_page(page); - ret = zcomp_compress(zstrm, src, &comp_len_new); + ret = zcomp_compress(zram->comps[prio], zstrm, + src, &comp_len_new); kunmap_local(src); if (ret) { @@ -1754,6 +1858,18 @@ static ssize_t recompress_store(struct device *dev, algo = val; continue; } + + if (!strcmp(param, "priority")) { + ret = kstrtouint(val, 10, &prio); + if (ret) + return ret; + + if (prio == ZRAM_PRIMARY_COMP) + prio = ZRAM_SECONDARY_COMP; + + prio_max = min(prio + 1, ZRAM_MAX_COMPS); + continue; + } } if (threshold >= huge_class_size) @@ -1976,6 +2092,15 @@ static void zram_slot_free_notify(struct block_device *bdev, zram_slot_unlock(zram, index); } +static void zram_comp_params_reset(struct zram *zram) +{ + u32 prio; + + for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { + comp_params_reset(zram, prio); + } +} + static void zram_destroy_comps(struct zram *zram) { u32 prio; @@ -1989,6 +2114,15 @@ static void zram_destroy_comps(struct zram *zram) zcomp_destroy(comp); zram->num_active_comps--; } + + for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { + /* Do not free statically defined compression algorithms */ + if (zram->comp_algs[prio] != default_compressor) + kfree(zram->comp_algs[prio]); + zram->comp_algs[prio] = NULL; + } + + zram_comp_params_reset(zram); } static void zram_reset_device(struct zram *zram) @@ -2046,7 +2180,8 @@ static ssize_t disksize_store(struct device *dev, if (!zram->comp_algs[prio]) continue; - comp = zcomp_create(zram->comp_algs[prio]); + comp = zcomp_create(zram->comp_algs[prio], + &zram->params[prio]); if (IS_ERR(comp)) { pr_err("Cannot initialise %s compressing backend\n", zram->comp_algs[prio]); @@ -2149,6 +2284,7 @@ static DEVICE_ATTR_RW(writeback_limit_enable); static DEVICE_ATTR_RW(recomp_algorithm); static DEVICE_ATTR_WO(recompress); #endif +static DEVICE_ATTR_WO(algorithm_params); static struct attribute *zram_disk_attrs[] = { &dev_attr_disksize.attr, @@ -2176,6 +2312,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_recomp_algorithm.attr, &dev_attr_recompress.attr, #endif + &dev_attr_algorithm_params.attr, NULL, }; @@ -2251,6 +2388,7 @@ static int zram_add(void) if (ret) goto out_cleanup_disk; + zram_comp_params_reset(zram); comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); zram_debugfs_register(zram); @@ -2401,9 +2539,10 @@ static void destroy_devices(void) static int __init zram_init(void) { + struct zram_table_entry zram_te; int ret; - BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG); + BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8); ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare", zcomp_cpu_up_prepare, zcomp_cpu_dead); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 35e32214462922..cfc8c059db6369 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -45,9 +45,7 @@ /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { - /* zram slot is locked */ - ZRAM_LOCK = ZRAM_FLAG_SHIFT, - ZRAM_SAME, /* Page consists the same element */ + ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */ ZRAM_WB, /* page is stored on backing_device */ ZRAM_UNDER_WB, /* page is under writeback */ ZRAM_HUGE, /* Incompressible page */ @@ -68,7 +66,8 @@ struct zram_table_entry { unsigned long handle; unsigned long element; }; - unsigned long flags; + unsigned int flags; + spinlock_t lock; #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME ktime_t ac_time; #endif @@ -107,6 +106,7 @@ struct zram { struct zram_table_entry *table; struct zs_pool *mem_pool; struct zcomp *comps[ZRAM_MAX_COMPS]; + struct zcomp_params params[ZRAM_MAX_COMPS]; struct gendisk *disk; /* Prevent concurrent execution of device init */ struct rw_semaphore init_lock; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 769fa288179d39..18767b54df352e 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -274,6 +274,18 @@ config BT_HCIUART_MRVL Say Y here to compile support for HCI MRVL protocol. +config BT_HCIUART_AML + bool "Amlogic protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + select BT_HCIUART_H4 + select FW_LOADER + help + The Amlogic protocol support enables Bluetooth HCI over serial + port interface for Amlogic Bluetooth controllers. + + Say Y here to compile support for HCI AML protocol. + config BT_HCIBCM203X tristate "HCI BCM203x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 0730d6684d1ab9..81856512ddd030 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -51,4 +51,5 @@ hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o +hci_uart-$(CONFIG_BT_HCIUART_AML) += hci_aml.o hci_uart-objs := $(hci_uart-y) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index ce97b336fbfb8a..fc796f1dbda974 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #define VERSION "1.0" diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index f9a7c790d7e2ec..eef00467905eb3 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 1ccbb515751533..438b92967bc364 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 1c7631f22c522b..5252125b003f58 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include @@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table); #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004 +#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia, u16 queue_num) @@ -423,6 +424,18 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, goto exit_error; } break; + + case BTINTEL_PCIE_HCI_ISO_PKT: + if (skb->len >= HCI_ISO_HDR_SIZE) { + plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen); + pkt_type = HCI_ISODATA_PKT; + } else { + bt_dev_err(hdev, "ISO packet is too short"); + ret = -EILSEQ; + goto exit_error; + } + break; + default: bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x", pcie_pkt_type); @@ -1082,6 +1095,9 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev, type = BTINTEL_PCIE_HCI_SCO_PKT; hdev->stat.sco_tx++; break; + case HCI_ISODATA_PKT: + type = BTINTEL_PCIE_HCI_ISO_PKT; + break; default: bt_dev_err(hdev, "Unknown HCI packet type"); return -EILSEQ; @@ -1208,7 +1224,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data) int err; struct hci_dev *hdev; - hdev = hci_alloc_dev(); + hdev = hci_alloc_dev_priv(sizeof(struct btintel_data)); if (!hdev) return -ENOMEM; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 85b7f2bb425982..07cd308f7abf6d 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -92,7 +92,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, } else { ret = devm_request_irq(dev, cfg->irq_bt, btmrvl_wake_irq_bt, - 0, "bt_wake", card); + IRQF_NO_AUTOEN, "bt_wake", card); if (ret) { dev_err(dev, "Failed to request irq_bt %d (%d)\n", @@ -101,7 +101,6 @@ static int btmrvl_sdio_probe_of(struct device *dev, /* Configure wakeup (enabled by default) */ device_init_wakeup(dev, true); - disable_irq(cfg->irq_bt); } } diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c index 2b7c80043aa2eb..9bbf205021634f 100644 --- a/drivers/bluetooth/btmtk.c +++ b/drivers/bluetooth/btmtk.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 497e4c87f5be56..11d33cd7b08fc0 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -10,7 +10,7 @@ * */ -#include +#include #include #include #include diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index aa87c3e788718f..64e4d835af5211 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -8,7 +8,7 @@ * */ -#include +#include #include #include #include diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index ad1ec6f3685a76..5ea0d23e88c02b 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -1412,6 +1412,7 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, + { H4_RECV_ISO, .recv = hci_recv_frame }, { NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 }, { NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 }, { NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 }, diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 0c91d7635ac39e..6c1f584c8a3372 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #define RSI_DMA_ALIGN 8 diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index fd7991ea76726e..0bcb44cf7b31d7 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -30,6 +30,7 @@ #define RTL_ROM_LMP_8822B 0x8822 #define RTL_ROM_LMP_8852A 0x8852 #define RTL_ROM_LMP_8851B 0x8851 +#define RTL_ROM_LMP_8922A 0x8922 #define RTL_CONFIG_MAGIC 0x8723ab55 #define RTL_VSC_OP_COREDUMP 0xfcff @@ -69,6 +70,7 @@ enum btrtl_chip_id { CHIP_ID_8852B = 20, CHIP_ID_8852C = 25, CHIP_ID_8851B = 36, + CHIP_ID_8922A = 44, CHIP_ID_8852BT = 47, }; @@ -309,6 +311,15 @@ static const struct id_table ic_id_table[] = { .cfg_name = "rtl_bt/rtl8851bu_config", .hw_info = "rtl8851bu" }, + /* 8922A */ + { IC_INFO(RTL_ROM_LMP_8922A, 0xa, 0xc, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8922au_fw", + .cfg_name = "rtl_bt/rtl8922au_config", + .hw_info = "rtl8922au" }, + /* 8852BT/8852BE-VT */ { IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB), .config_needed = false, @@ -655,6 +666,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8852A, 20 }, /* 8852B */ { RTL_ROM_LMP_8852A, 25 }, /* 8852C */ { RTL_ROM_LMP_8851B, 36 }, /* 8851B */ + { RTL_ROM_LMP_8922A, 44 }, /* 8922A */ { RTL_ROM_LMP_8852A, 47 }, /* 8852BT */ }; @@ -878,10 +890,8 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) if (ret < 0) return ret; ret = fw->size; - *buff = kvmalloc(fw->size, GFP_KERNEL); - if (*buff) - memcpy(*buff, fw->data, ret); - else + *buff = kvmemdup(fw->data, fw->size, GFP_KERNEL); + if (!*buff) ret = -ENOMEM; release_firmware(fw); @@ -1255,6 +1265,7 @@ int btrtl_download_firmware(struct hci_dev *hdev, case RTL_ROM_LMP_8852A: case RTL_ROM_LMP_8703B: case RTL_ROM_LMP_8851B: + case RTL_ROM_LMP_8922A: err = btrtl_setup_rtl8723b(hdev, btrtl_dev); break; default: @@ -1286,6 +1297,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) case CHIP_ID_8852B: case CHIP_ID_8852C: case CHIP_ID_8851B: + case CHIP_ID_8922A: case CHIP_ID_8852BT: set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); @@ -1296,6 +1308,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP); if (btrtl_dev->project_id == CHIP_ID_8852A || + btrtl_dev->project_id == CHIP_ID_8852B || btrtl_dev->project_id == CHIP_ID_8852C) set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks); @@ -1528,3 +1541,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8922au_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8922au_config.bin"); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index fdcfe9c50313ea..a69feb08486a5a 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -295,6 +295,7 @@ static int btsdio_probe(struct sdio_func *func, case SDIO_DEVICE_ID_BROADCOM_4345: case SDIO_DEVICE_ID_BROADCOM_43455: case SDIO_DEVICE_ID_BROADCOM_4356: + case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373: return -ENODEV; } } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 51d9d4532dda4e..f23c8801ad5cc4 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include @@ -59,7 +59,7 @@ static struct usb_driver btusb_driver; #define BTUSB_CW6622 BIT(19) #define BTUSB_MEDIATEK BIT(20) #define BTUSB_WIDEBAND_SPEECH BIT(21) -#define BTUSB_VALID_LE_STATES BIT(22) +#define BTUSB_INVALID_LE_STATES BIT(22) #define BTUSB_QCA_WCN6855 BIT(23) #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24) #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) @@ -298,115 +298,79 @@ static const struct usb_device_id quirks_table[] = { /* QCA WCN6855 chipset */ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* QCA WCN785x chipset */ { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -540,6 +504,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852BE Bluetooth devices */ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | @@ -564,6 +530,17 @@ static const struct usb_device_id quirks_table[] = { /* Realtek 8852BT/8852BE-VT Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8922AE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3617), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3616), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe130), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), .driver_info = BTUSB_REALTEK }, @@ -571,131 +548,102 @@ static const struct usb_device_id quirks_table[] = { /* MediaTek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* Additional MediaTek MT7615E Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, /* Additional MediaTek MT7663 Bluetooth devices */ { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* Additional MediaTek MT7668 Bluetooth devices */ { USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* Additional MediaTek MT7921 Bluetooth devices */ { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* MediaTek MT7922 Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* MediaTek MT7922A Bluetooth devices */ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, /* Additional MediaTek MT7925 Bluetooth devices */ { USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3604), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, @@ -1397,7 +1345,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) if (!urb) return -ENOMEM; - size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + /* Use maximum HCI Event size so the USB stack handles + * ZPL/short-transfer automatically. + */ + size = HCI_MAX_EVENT_SIZE; buf = kmalloc(size, mem_flags); if (!buf) { @@ -3956,7 +3907,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_WIDEBAND_SPEECH) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); - if (!(id->driver_info & BTUSB_VALID_LE_STATES)) + if (id->driver_info & BTUSB_INVALID_LE_STATES) set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks); if (id->driver_info & BTUSB_DIGIANSWER) { diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h index 4f2c897422459f..28cf2d8c2d48e9 100644 --- a/drivers/bluetooth/h4_recv.h +++ b/drivers/bluetooth/h4_recv.h @@ -6,7 +6,7 @@ * Copyright (C) 2015-2018 Intel Corporation */ -#include +#include struct h4_recv_pkt { u8 type; /* Packet type */ @@ -38,6 +38,13 @@ struct h4_recv_pkt { .lsize = 1, \ .maxlen = HCI_MAX_EVENT_SIZE +#define H4_RECV_ISO \ + .type = HCI_ISODATA_PKT, \ + .hlen = HCI_ISO_HDR_SIZE, \ + .loff = 2, \ + .lsize = 2, \ + .maxlen = HCI_MAX_FRAME_SIZE + static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const unsigned char *buffer, diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c new file mode 100644 index 00000000000000..dc9541e76d8199 --- /dev/null +++ b/drivers/bluetooth/hci_aml.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR MIT) +/* + * Copyright (C) 2024 Amlogic, Inc. All rights reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hci_uart.h" + +#define AML_EVT_HEAD_SIZE 4 +#define AML_BDADDR_DEFAULT (&(bdaddr_t) {{ 0x00, 0xff, 0x00, 0x22, 0x2d, 0xae }}) + +#define AML_FIRMWARE_OPERATION_SIZE (248) +#define AML_FIRMWARE_MAX_SIZE (512 * 1024) + +/* TCI command */ +#define AML_TCI_CMD_READ 0xFEF0 +#define AML_TCI_CMD_WRITE 0xFEF1 +#define AML_TCI_CMD_UPDATE_BAUDRATE 0xFEF2 +#define AML_TCI_CMD_HARDWARE_RESET 0xFEF2 +#define AML_TCI_CMD_DOWNLOAD_BT_FW 0xFEF3 + +/* Vendor command */ +#define AML_BT_HCI_VENDOR_CMD 0xFC1A + +/* TCI operation parameter in controller chip */ +#define AML_OP_UART_MODE 0x00A30128 +#define AML_OP_EVT_ENABLE 0x00A70014 +#define AML_OP_MEM_HARD_TRANS_EN 0x00A7000C +#define AML_OP_RF_CFG 0x00F03040 +#define AML_OP_RAM_POWER_CTR 0x00F03050 +#define AML_OP_HARDWARE_RST 0x00F03058 +#define AML_OP_ICCM_RAM_BASE 0x00000000 +#define AML_OP_DCCM_RAM_BASE 0x00D00000 + +/* UART configuration */ +#define AML_UART_XMIT_EN BIT(12) +#define AML_UART_RECV_EN BIT(13) +#define AML_UART_TIMEOUT_INT_EN BIT(14) +#define AML_UART_CLK_SOURCE 40000000 + +/* Controller event */ +#define AML_EVT_EN BIT(24) + +/* RAM power control */ +#define AML_RAM_POWER_ON (0) +#define AML_RAM_POWER_OFF (1) + +/* RF configuration */ +#define AML_RF_ANT_SINGLE BIT(28) +#define AML_RF_ANT_DOUBLE BIT(29) + +/* Memory transaction */ +#define AML_MM_CTR_HARD_TRAS_EN BIT(27) + +/* Controller reset */ +#define AML_CTR_CPU_RESET BIT(8) +#define AML_CTR_MAC_RESET BIT(9) +#define AML_CTR_PHY_RESET BIT(10) + +enum { + FW_ICCM, + FW_DCCM +}; + +struct aml_fw_len { + u32 iccm_len; + u32 dccm_len; +}; + +struct aml_tci_rsp { + u8 num_cmd_packet; + u16 opcode; + u8 status; +} __packed; + +struct aml_device_data { + int iccm_offset; + int dccm_offset; + bool is_coex; +}; + +struct aml_serdev { + struct hci_uart serdev_hu; + struct device *dev; + struct gpio_desc *bt_en_gpio; + struct regulator *bt_supply; + struct clk *lpo_clk; + const struct aml_device_data *aml_dev_data; + const char *firmware_name; +}; + +struct aml_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; +}; + +static const struct h4_recv_pkt aml_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { H4_RECV_ISO, .recv = hci_recv_frame }, +}; + +/* The TCI command is a private command, which is for setting baud rate, + * downloading firmware, initiating RAM. + * + * op_code | op_len | op_addr | parameter | + * --------|-----------------------|---------|-------------| + * 2B | 1B len(addr+param) | 4B | len(param) | + */ +static int aml_send_tci_cmd(struct hci_dev *hdev, u16 op_code, u32 op_addr, + u32 *param, u32 param_len) +{ + struct aml_tci_rsp *rsp = NULL; + struct sk_buff *skb = NULL; + size_t buf_len = 0; + u8 *buf = NULL; + int err = 0; + + buf_len = sizeof(op_addr) + param_len; + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, &op_addr, sizeof(op_addr)); + if (param && param_len > 0) + memcpy(buf + sizeof(op_addr), param, param_len); + + skb = __hci_cmd_sync_ev(hdev, op_code, buf_len, buf, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to send TCI cmd (error: %d)", err); + goto exit; + } + + rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp)); + if (!rsp) + goto skb_free; + + if (rsp->opcode != op_code || rsp->status != 0x00) { + bt_dev_err(hdev, "send TCI cmd (0x%04X), response (0x%04X):(%d)", + op_code, rsp->opcode, rsp->status); + err = -EINVAL; + goto skb_free; + } + +skb_free: + kfree_skb(skb); + +exit: + kfree(buf); + return err; +} + +static int aml_update_chip_baudrate(struct hci_dev *hdev, u32 baud) +{ + u32 value; + + value = ((AML_UART_CLK_SOURCE / baud) - 1) & 0x0FFF; + value |= AML_UART_XMIT_EN | AML_UART_RECV_EN | AML_UART_TIMEOUT_INT_EN; + + return aml_send_tci_cmd(hdev, AML_TCI_CMD_UPDATE_BAUDRATE, + AML_OP_UART_MODE, &value, sizeof(value)); +} + +static int aml_start_chip(struct hci_dev *hdev) +{ + u32 value = 0; + int ret; + + value = AML_MM_CTR_HARD_TRAS_EN; + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE, + AML_OP_MEM_HARD_TRANS_EN, + &value, sizeof(value)); + if (ret) + return ret; + + /* controller hardware reset */ + value = AML_CTR_CPU_RESET | AML_CTR_MAC_RESET | AML_CTR_PHY_RESET; + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_HARDWARE_RESET, + AML_OP_HARDWARE_RST, + &value, sizeof(value)); + return ret; +} + +static int aml_send_firmware_segment(struct hci_dev *hdev, + u8 fw_type, + u8 *seg, + u32 seg_size, + u32 offset) +{ + u32 op_addr = 0; + + if (fw_type == FW_ICCM) + op_addr = AML_OP_ICCM_RAM_BASE + offset; + else if (fw_type == FW_DCCM) + op_addr = AML_OP_DCCM_RAM_BASE + offset; + + return aml_send_tci_cmd(hdev, AML_TCI_CMD_DOWNLOAD_BT_FW, + op_addr, (u32 *)seg, seg_size); +} + +static int aml_send_firmware(struct hci_dev *hdev, u8 fw_type, + u8 *fw, u32 fw_size, u32 offset) +{ + u32 seg_size = 0; + u32 seg_off = 0; + + if (fw_size > AML_FIRMWARE_MAX_SIZE) { + bt_dev_err(hdev, + "Firmware size %d kB is larger than the maximum of 512 kB. Aborting.", + fw_size); + return -EINVAL; + } + while (fw_size > 0) { + seg_size = (fw_size > AML_FIRMWARE_OPERATION_SIZE) ? + AML_FIRMWARE_OPERATION_SIZE : fw_size; + if (aml_send_firmware_segment(hdev, fw_type, (fw + seg_off), + seg_size, offset)) { + bt_dev_err(hdev, "Failed send firmware, type: %d, offset: 0x%x", + fw_type, offset); + return -EINVAL; + } + seg_off += seg_size; + fw_size -= seg_size; + offset += seg_size; + } + return 0; +} + +static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev); + const struct firmware *firmware = NULL; + struct aml_fw_len *fw_len = NULL; + u8 *iccm_start = NULL, *dccm_start = NULL; + u32 iccm_len, dccm_len; + u32 value = 0; + int ret = 0; + + /* Enable firmware download event */ + value = AML_EVT_EN; + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE, + AML_OP_EVT_ENABLE, + &value, sizeof(value)); + if (ret) + goto exit; + + /* RAM power on */ + value = AML_RAM_POWER_ON; + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE, + AML_OP_RAM_POWER_CTR, + &value, sizeof(value)); + if (ret) + goto exit; + + /* Check RAM power status */ + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_READ, + AML_OP_RAM_POWER_CTR, NULL, 0); + if (ret) + goto exit; + + ret = request_firmware(&firmware, fw_name, &hdev->dev); + if (ret < 0) { + bt_dev_err(hdev, "Failed to load <%s>:(%d)", fw_name, ret); + goto exit; + } + + fw_len = (struct aml_fw_len *)firmware->data; + + /* Download ICCM */ + iccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len) + + amldev->aml_dev_data->iccm_offset; + iccm_len = fw_len->iccm_len - amldev->aml_dev_data->iccm_offset; + ret = aml_send_firmware(hdev, FW_ICCM, iccm_start, iccm_len, + amldev->aml_dev_data->iccm_offset); + if (ret) { + bt_dev_err(hdev, "Failed to send FW_ICCM (%d)", ret); + goto exit; + } + + /* Download DCCM */ + dccm_start = (u8 *)(firmware->data) + sizeof(struct aml_fw_len) + fw_len->iccm_len; + dccm_len = fw_len->dccm_len; + ret = aml_send_firmware(hdev, FW_DCCM, dccm_start, dccm_len, + amldev->aml_dev_data->dccm_offset); + if (ret) { + bt_dev_err(hdev, "Failed to send FW_DCCM (%d)", ret); + goto exit; + } + + /* Disable firmware download event */ + value = 0; + ret = aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE, + AML_OP_EVT_ENABLE, + &value, sizeof(value)); + if (ret) + goto exit; + +exit: + if (firmware) + release_firmware(firmware); + return ret; +} + +static int aml_send_reset(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync_ev(hdev, HCI_OP_RESET, 0, NULL, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to send hci reset cmd (%d)", err); + return err; + } + + kfree_skb(skb); + return 0; +} + +static int aml_dump_fw_version(struct hci_dev *hdev) +{ + struct aml_tci_rsp *rsp = NULL; + struct sk_buff *skb; + u8 value[6] = {0}; + u8 *fw_ver = NULL; + int err = 0; + + skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD, sizeof(value), value, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to get fw version (error: %d)", err); + return err; + } + + rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp)); + if (!rsp) + goto exit; + + if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) { + bt_dev_err(hdev, "dump version, error response (0x%04X):(%d)", + rsp->opcode, rsp->status); + err = -EINVAL; + goto exit; + } + + fw_ver = (u8 *)rsp + AML_EVT_HEAD_SIZE; + bt_dev_info(hdev, "fw_version: date = %02x.%02x, number = 0x%02x%02x", + *(fw_ver + 1), *fw_ver, *(fw_ver + 3), *(fw_ver + 2)); + +exit: + kfree_skb(skb); + return err; +} + +static int aml_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct aml_tci_rsp *rsp = NULL; + struct sk_buff *skb; + int err = 0; + + bt_dev_info(hdev, "set bdaddr (%pM)", bdaddr); + skb = __hci_cmd_sync_ev(hdev, AML_BT_HCI_VENDOR_CMD, + sizeof(bdaddr_t), bdaddr, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to set bdaddr (error: %d)", err); + return err; + } + + rsp = skb_pull_data(skb, sizeof(struct aml_tci_rsp)); + if (!rsp) + goto exit; + + if (rsp->opcode != AML_BT_HCI_VENDOR_CMD || rsp->status != 0x00) { + bt_dev_err(hdev, "error response (0x%x):(%d)", rsp->opcode, rsp->status); + err = -EINVAL; + goto exit; + } + +exit: + kfree_skb(skb); + return err; +} + +static int aml_check_bdaddr(struct hci_dev *hdev) +{ + struct hci_rp_read_bd_addr *paddr; + struct sk_buff *skb; + int err; + + if (bacmp(&hdev->public_addr, BDADDR_ANY)) + return 0; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to read bdaddr (error: %d)", err); + return err; + } + + paddr = skb_pull_data(skb, sizeof(struct hci_rp_read_bd_addr)); + if (!paddr) + goto exit; + + if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) { + bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + +exit: + kfree_skb(skb); + return 0; +} + +static int aml_config_rf(struct hci_dev *hdev, bool is_coex) +{ + u32 value = AML_RF_ANT_DOUBLE; + + /* Use a single antenna when co-existing with wifi */ + if (is_coex) + value = AML_RF_ANT_SINGLE; + + return aml_send_tci_cmd(hdev, AML_TCI_CMD_WRITE, + AML_OP_RF_CFG, + &value, sizeof(value)); +} + +static int aml_parse_dt(struct aml_serdev *amldev) +{ + struct device *pdev = amldev->dev; + + amldev->bt_en_gpio = devm_gpiod_get(pdev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(amldev->bt_en_gpio)) { + dev_err(pdev, "Failed to acquire enable gpios"); + return PTR_ERR(amldev->bt_en_gpio); + } + + if (device_property_read_string(pdev, "firmware-name", + &amldev->firmware_name)) { + dev_err(pdev, "Failed to acquire firmware path"); + return -ENODEV; + } + + amldev->bt_supply = devm_regulator_get(pdev, "vddio"); + if (IS_ERR(amldev->bt_supply)) { + dev_err(pdev, "Failed to acquire regulator"); + return PTR_ERR(amldev->bt_supply); + } + + amldev->lpo_clk = devm_clk_get(pdev, NULL); + if (IS_ERR(amldev->lpo_clk)) { + dev_err(pdev, "Failed to acquire clock source"); + return PTR_ERR(amldev->lpo_clk); + } + + return 0; +} + +static int aml_power_on(struct aml_serdev *amldev) +{ + int err; + + err = regulator_enable(amldev->bt_supply); + if (err) { + dev_err(amldev->dev, "Failed to enable regulator: (%d)", err); + return err; + } + + err = clk_prepare_enable(amldev->lpo_clk); + if (err) { + dev_err(amldev->dev, "Failed to enable lpo clock: (%d)", err); + return err; + } + + gpiod_set_value_cansleep(amldev->bt_en_gpio, 1); + + /* Wait 20ms for bluetooth controller power on */ + msleep(20); + return 0; +} + +static int aml_power_off(struct aml_serdev *amldev) +{ + gpiod_set_value_cansleep(amldev->bt_en_gpio, 0); + + clk_disable_unprepare(amldev->lpo_clk); + + regulator_disable(amldev->bt_supply); + + return 0; +} + +static int aml_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + /* update controller baudrate */ + if (aml_update_chip_baudrate(hu->hdev, speed) != 0) { + bt_dev_err(hu->hdev, "Failed to update baud rate"); + return -EINVAL; + } + + /* update local baudrate */ + serdev_device_set_baudrate(hu->serdev, speed); + + return 0; +} + +/* Initialize protocol */ +static int aml_open(struct hci_uart *hu) +{ + struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev); + struct aml_data *aml_data; + int err; + + err = aml_parse_dt(amldev); + if (err) + return err; + + if (!hci_uart_has_flow_control(hu)) { + bt_dev_err(hu->hdev, "no flow control"); + return -EOPNOTSUPP; + } + + aml_data = kzalloc(sizeof(*aml_data), GFP_KERNEL); + if (!aml_data) + return -ENOMEM; + + skb_queue_head_init(&aml_data->txq); + + hu->priv = aml_data; + + return 0; +} + +static int aml_close(struct hci_uart *hu) +{ + struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev); + struct aml_data *aml_data = hu->priv; + + skb_queue_purge(&aml_data->txq); + kfree_skb(aml_data->rx_skb); + kfree(aml_data); + + hu->priv = NULL; + + return aml_power_off(amldev); +} + +static int aml_flush(struct hci_uart *hu) +{ + struct aml_data *aml_data = hu->priv; + + skb_queue_purge(&aml_data->txq); + + return 0; +} + +static int aml_setup(struct hci_uart *hu) +{ + struct aml_serdev *amldev = serdev_device_get_drvdata(hu->serdev); + struct hci_dev *hdev = amldev->serdev_hu.hdev; + int err; + + /* Setup bdaddr */ + hdev->set_bdaddr = aml_set_bdaddr; + + err = aml_power_on(amldev); + if (err) + return err; + + err = aml_set_baudrate(hu, amldev->serdev_hu.proto->oper_speed); + if (err) + return err; + + err = aml_download_firmware(hdev, amldev->firmware_name); + if (err) + return err; + + err = aml_config_rf(hdev, amldev->aml_dev_data->is_coex); + if (err) + return err; + + err = aml_start_chip(hdev); + if (err) + return err; + + /* Wait 60ms for controller startup */ + msleep(60); + + err = aml_dump_fw_version(hdev); + if (err) + return err; + + err = aml_send_reset(hdev); + if (err) + return err; + + err = aml_check_bdaddr(hdev); + if (err) + return err; + + return 0; +} + +static int aml_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct aml_data *aml_data = hu->priv; + + skb_queue_tail(&aml_data->txq, skb); + + return 0; +} + +static struct sk_buff *aml_dequeue(struct hci_uart *hu) +{ + struct aml_data *aml_data = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&aml_data->txq); + + /* Prepend skb with frame type */ + if (skb) + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + return skb; +} + +static int aml_recv(struct hci_uart *hu, const void *data, int count) +{ + struct aml_data *aml_data = hu->priv; + int err; + + aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count, + aml_recv_pkts, + ARRAY_SIZE(aml_recv_pkts)); + if (IS_ERR(aml_data->rx_skb)) { + err = PTR_ERR(aml_data->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + aml_data->rx_skb = NULL; + return err; + } + + return count; +} + +static const struct hci_uart_proto aml_hci_proto = { + .id = HCI_UART_AML, + .name = "AML", + .init_speed = 115200, + .oper_speed = 4000000, + .open = aml_open, + .close = aml_close, + .setup = aml_setup, + .flush = aml_flush, + .recv = aml_recv, + .enqueue = aml_enqueue, + .dequeue = aml_dequeue, +}; + +static void aml_device_driver_shutdown(struct device *dev) +{ + struct aml_serdev *amldev = dev_get_drvdata(dev); + + aml_power_off(amldev); +} + +static int aml_serdev_probe(struct serdev_device *serdev) +{ + struct aml_serdev *amldev; + int err; + + amldev = devm_kzalloc(&serdev->dev, sizeof(*amldev), GFP_KERNEL); + if (!amldev) + return -ENOMEM; + + amldev->serdev_hu.serdev = serdev; + amldev->dev = &serdev->dev; + serdev_device_set_drvdata(serdev, amldev); + + err = hci_uart_register_device(&amldev->serdev_hu, &aml_hci_proto); + if (err) + return dev_err_probe(amldev->dev, err, + "Failed to register hci uart device"); + + amldev->aml_dev_data = device_get_match_data(&serdev->dev); + + return 0; +} + +static void aml_serdev_remove(struct serdev_device *serdev) +{ + struct aml_serdev *amldev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&amldev->serdev_hu); +} + +static const struct aml_device_data data_w155s2 = { + .iccm_offset = 256 * 1024, +}; + +static const struct aml_device_data data_w265s2 = { + .iccm_offset = 384 * 1024, +}; + +static const struct of_device_id aml_bluetooth_of_match[] = { + { .compatible = "amlogic,w155s2-bt", .data = &data_w155s2 }, + { .compatible = "amlogic,w265s2-bt", .data = &data_w265s2 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, aml_bluetooth_of_match); + +static struct serdev_device_driver aml_serdev_driver = { + .probe = aml_serdev_probe, + .remove = aml_serdev_remove, + .driver = { + .name = "hci_uart_aml", + .of_match_table = aml_bluetooth_of_match, + .shutdown = aml_device_driver_shutdown, + }, +}; + +int __init aml_init(void) +{ + serdev_device_driver_register(&aml_serdev_driver); + + return hci_uart_register_proto(&aml_hci_proto); +} + +int __exit aml_deinit(void) +{ + serdev_device_driver_unregister(&aml_serdev_driver); + + return hci_uart_unregister_proto(&aml_hci_proto); +} diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index 77a5454a8721e8..9bce53e49cfa60 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 2a5a27d713f8a0..76878119d910cf 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 1d0cdf02324373..9070e31a68bfd2 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 30192bb083549e..395d66e32a2ea9 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -507,6 +507,9 @@ static int hci_uart_tty_open(struct tty_struct *tty) hu->alignment = 1; hu->padding = 0; + /* Use serial port speed as oper_speed */ + hu->oper_speed = tty->termios.c_ospeed; + INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); @@ -870,7 +873,9 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_MRVL mrvl_init(); #endif - +#ifdef CONFIG_BT_HCIUART_AML + aml_init(); +#endif return 0; } @@ -906,7 +911,9 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_MRVL mrvl_deinit(); #endif - +#ifdef CONFIG_BT_HCIUART_AML + aml_deinit(); +#endif tty_unregister_ldisc(&hci_uart_ldisc); } diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 62633d9ba7c43e..49bbe4975be4bc 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 678f150229e779..37fddf6055bebb 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 00bf7ae82c5b72..fbf3079b92a533 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -20,7 +20,7 @@ #define HCIUARTGETFLAGS _IOR('U', 204, int) /* UART protocols */ -#define HCI_UART_MAX_PROTO 12 +#define HCI_UART_MAX_PROTO 13 #define HCI_UART_H4 0 #define HCI_UART_BCSP 1 @@ -34,6 +34,7 @@ #define HCI_UART_AG6XX 9 #define HCI_UART_NOKIA 10 #define HCI_UART_MRVL 11 +#define HCI_UART_AML 12 #define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RESET_ON_INIT 1 @@ -209,3 +210,8 @@ int ag6xx_deinit(void); int mrvl_init(void); int mrvl_deinit(void); #endif + +#ifdef CONFIG_BT_HCIUART_AML +int aml_init(void); +int aml_deinit(void); +#endif diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 43e9ac5a3324e2..7651321d351ccd 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include @@ -679,7 +679,6 @@ static const struct file_operations vhci_fops = { .poll = vhci_poll, .open = vhci_open, .release = vhci_release, - .llseek = no_llseek, }; static struct miscdevice vhci_miscdev = { diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 64cd2ee03aa3a4..ff669a8ccad994 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -89,7 +89,7 @@ config HISILICON_LPC config IMX_WEIM bool "Freescale EIM DRIVER" - depends on ARCH_MXC + depends on ARCH_MXC || COMPILE_TEST help Driver for i.MX WEIM controller. The WEIM(Wireless External Interface Module) works like a bus. diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c index b715c8ab36e8bd..a65c79b08804f4 100644 --- a/drivers/bus/arm-integrator-lm.c +++ b/drivers/bus/arm-integrator-lm.c @@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev) return -ENODEV; } map = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(map)) { dev_err(dev, "could not find Integrator/AP system controller\n"); diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c index 595fb22b73e068..7463124b6dd92f 100644 --- a/drivers/bus/bt1-apb.c +++ b/drivers/bus/bt1-apb.c @@ -185,34 +185,13 @@ static int bt1_apb_request_rst(struct bt1_apb *apb) return ret; } -static void bt1_apb_disable_clk(void *data) -{ - struct bt1_apb *apb = data; - - clk_disable_unprepare(apb->pclk); -} - static int bt1_apb_request_clk(struct bt1_apb *apb) { - int ret; - - apb->pclk = devm_clk_get(apb->dev, "pclk"); + apb->pclk = devm_clk_get_enabled(apb->dev, "pclk"); if (IS_ERR(apb->pclk)) return dev_err_probe(apb->dev, PTR_ERR(apb->pclk), "Couldn't get APB clock descriptor\n"); - ret = clk_prepare_enable(apb->pclk); - if (ret) { - dev_err(apb->dev, "Couldn't enable the APB clock\n"); - return ret; - } - - ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb); - if (ret) { - dev_err(apb->dev, "Can't add APB EHB clocks disable action\n"); - return ret; - } - apb->rate = clk_get_rate(apb->pclk); if (!apb->rate) { dev_err(apb->dev, "Invalid clock rate\n"); diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c index 4007e7322cf21b..a5254c73bf43a6 100644 --- a/drivers/bus/bt1-axi.c +++ b/drivers/bus/bt1-axi.c @@ -146,33 +146,14 @@ static int bt1_axi_request_rst(struct bt1_axi *axi) return ret; } -static void bt1_axi_disable_clk(void *data) -{ - struct bt1_axi *axi = data; - - clk_disable_unprepare(axi->aclk); -} - static int bt1_axi_request_clk(struct bt1_axi *axi) { - int ret; - - axi->aclk = devm_clk_get(axi->dev, "aclk"); + axi->aclk = devm_clk_get_enabled(axi->dev, "aclk"); if (IS_ERR(axi->aclk)) return dev_err_probe(axi->dev, PTR_ERR(axi->aclk), "Couldn't get AXI Interconnect clock\n"); - ret = clk_prepare_enable(axi->aclk); - if (ret) { - dev_err(axi->dev, "Couldn't enable the AXI clock\n"); - return ret; - } - - ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi); - if (ret) - dev_err(axi->dev, "Can't add AXI clock disable action\n"); - - return ret; + return 0; } static int bt1_axi_request_irq(struct bt1_axi *axi) diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index dd68b8191a0a05..930d8a3ba722b3 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -309,7 +309,7 @@ static struct attribute *fsl_mc_bus_attrs[] = { ATTRIBUTE_GROUPS(fsl_mc_bus); -struct bus_type fsl_mc_bus_type = { +const struct bus_type fsl_mc_bus_type = { .name = "fsl-mc", .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 837bf9d51c6ec9..83d623d97f5f28 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -282,22 +282,18 @@ static int weim_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); /* get the clock */ - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) - return ret; - /* parse the device node */ ret = weim_parse_dt(pdev); if (ret) - clk_disable_unprepare(clk); - else - dev_info(&pdev->dev, "Driver registered.\n"); + return ret; - return ret; + dev_info(&pdev->dev, "Driver registered.\n"); + + return 0; } #if IS_ENABLED(CONFIG_OF_DYNAMIC) diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index ce7d2e62c2f18e..a9b1f8beee7bc9 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -1464,7 +1464,7 @@ static int mhi_match(struct device *dev, const struct device_driver *drv) return 0; }; -struct bus_type mhi_bus_type = { +const struct bus_type mhi_bus_type = { .name = "mhi", .dev_name = "mhi", .match = mhi_match, diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index aaad40a07f6925..d057e877932e3a 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -9,7 +9,7 @@ #include "../common.h" -extern struct bus_type mhi_bus_type; +extern const struct bus_type mhi_bus_type; /* Host request register */ #define MHI_SOC_RESET_REQ_OFFSET 0xb0 diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 14a11880bceafb..9938bb034c1cbc 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -26,6 +26,7 @@ /* PCI VID definitions */ #define PCI_VENDOR_ID_THALES 0x1269 #define PCI_VENDOR_ID_QUECTEL 0x1eac +#define PCI_VENDOR_ID_NETPRISMA 0x203e #define MHI_EDL_DB 91 #define MHI_EDL_COOKIE 0xEDEDEDED @@ -433,8 +434,8 @@ static const struct mhi_controller_config modem_foxconn_sdx72_config = { static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .name = "foxconn-sdx55", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -444,8 +445,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = { .name = "foxconn-t99w175", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -455,8 +456,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = { static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = { .name = "foxconn-dw5930e", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -466,6 +467,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = { static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = { .name = "foxconn-t99w368", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -475,6 +478,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = { static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = { .name = "foxconn-t99w373", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -484,6 +489,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = { static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = { .name = "foxconn-t99w510", + .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -493,6 +500,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = { static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = { .name = "foxconn-dw5932e", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -502,7 +511,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = { static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = { .name = "foxconn-t99w515", - .edl = "fox/sdx72m/edl.mbn", + .edl = "qcom/sdx72m/foxconn/edl.mbn", .edl_trigger = true, .config = &modem_foxconn_sdx72_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, @@ -513,7 +522,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = { static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = { .name = "foxconn-dw5934e", - .edl = "fox/sdx72m/edl.mbn", + .edl = "qcom/sdx72m/foxconn/edl.mbn", .edl_trigger = true, .config = &modem_foxconn_sdx72_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, @@ -680,6 +689,35 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { .mru_default = 32768, }; +static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { + .name = "telit-fe990a", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + +static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = { + .name = "netprisma-lcur57", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + +static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = { + .name = "netprisma-fcun69", + .edl = "qcom/prog_firehose_sdx6x.elf", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -697,9 +735,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* Telit FN990 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, - /* Telit FE990 */ + /* Telit FE990A */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), - .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), @@ -778,6 +816,12 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* T99W175 (sdx55), HP variant */ { PCI_DEVICE(0x03f0, 0x0a6c), .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, + /* NETPRISMA LCUR57 (SDX24) */ + { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000), + .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info }, + /* NETPRISMA FCUN69 (SDX6X) */ + { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001), + .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 8412406c4f1d2e..6276551d79680e 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -484,7 +484,6 @@ static const struct file_operations input_fops = { .owner = THIS_MODULE, .open = moxtet_debug_open, .read = input_read, - .llseek = no_llseek, }; static ssize_t output_read(struct file *file, char __user *buf, size_t len, @@ -549,7 +548,6 @@ static const struct file_operations output_fops = { .open = moxtet_debug_open, .read = output_read, .write = output_write, - .llseek = no_llseek, }; static int moxtet_register_debugfs(struct moxtet *moxtet) diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index eee41fb798a16b..a89d7892563767 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -751,12 +751,10 @@ static int sunxi_rsb_probe(struct platform_device *pdev) int irq, ret; of_property_read_u32(np, "clock-frequency", &clk_freq); - if (clk_freq > RSB_MAX_FREQ) { - dev_err(dev, - "clock-frequency (%u Hz) is too high (max = 20MHz)\n", - clk_freq); - return -EINVAL; - } + if (clk_freq > RSB_MAX_FREQ) + return dev_err_probe(dev, -EINVAL, + "clock-frequency (%u Hz) is too high (max = 20MHz)\n", + clk_freq); rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL); if (!rsb) @@ -774,28 +772,22 @@ static int sunxi_rsb_probe(struct platform_device *pdev) return irq; rsb->clk = devm_clk_get(dev, NULL); - if (IS_ERR(rsb->clk)) { - ret = PTR_ERR(rsb->clk); - dev_err(dev, "failed to retrieve clk: %d\n", ret); - return ret; - } + if (IS_ERR(rsb->clk)) + return dev_err_probe(dev, PTR_ERR(rsb->clk), + "failed to retrieve clk\n"); rsb->rstc = devm_reset_control_get(dev, NULL); - if (IS_ERR(rsb->rstc)) { - ret = PTR_ERR(rsb->rstc); - dev_err(dev, "failed to retrieve reset controller: %d\n", ret); - return ret; - } + if (IS_ERR(rsb->rstc)) + return dev_err_probe(dev, PTR_ERR(rsb->rstc), + "failed to retrieve reset controller\n"); init_completion(&rsb->complete); mutex_init(&rsb->lock); ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb); - if (ret) { - dev_err(dev, "can't register interrupt handler irq %d: %d\n", - irq, ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "can't register interrupt handler irq %d\n", irq); ret = sunxi_rsb_hw_init(rsb); if (ret) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 2b59ef61dda23b..270a94a06e05ce 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -126,7 +126,6 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @enabled: sysc runtime enabled status * @needs_resume: runtime resume needed on resume from suspend * @child_needs_resume: runtime resume needed for child on resume from suspend - * @disable_on_idle: status flag used for disabling modules with resets * @idle_work: work structure used to perform delayed idle on a module * @pre_reset_quirk: module specific pre-reset quirk * @post_reset_quirk: module specific post-reset quirk @@ -2569,14 +2568,12 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = { static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, bool is_child) { - const struct property *prop; - int i, len; + int i; for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { const char *name = sysc_dts_quirks[i].name; - prop = of_get_property(np, name, &len); - if (!prop) + if (!of_property_present(np, name)) continue; ddata->cfg.quirks |= sysc_dts_quirks[i].mask; diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c index 1eedc5eeb31543..e760f8d347cc19 100644 --- a/drivers/cdx/controller/mcdi.c +++ b/drivers/cdx/controller/mcdi.c @@ -27,10 +27,6 @@ #include "bitfield.h" #include "mcdi.h" -struct cdx_mcdi_copy_buffer { - struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)]; -}; - static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd); static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx); static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx, diff --git a/drivers/char/adi.c b/drivers/char/adi.c index 751d7cc0da1b3b..f9bec10a60642b 100644 --- a/drivers/char/adi.c +++ b/drivers/char/adi.c @@ -14,12 +14,6 @@ #define MAX_BUF_SZ PAGE_SIZE -static int adi_open(struct inode *inode, struct file *file) -{ - file->f_mode |= FMODE_UNSIGNED_OFFSET; - return 0; -} - static int read_mcd_tag(unsigned long addr) { long err; @@ -196,7 +190,6 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) if (offset != file->f_pos) { file->f_pos = offset; - file->f_version = 0; ret = offset; } @@ -206,9 +199,9 @@ static loff_t adi_llseek(struct file *file, loff_t offset, int whence) static const struct file_operations adi_fops = { .owner = THIS_MODULE, .llseek = adi_llseek, - .open = adi_open, .read = adi_read, .write = adi_write, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static struct miscdevice adi_miscdev = { diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 69314532f38cd8..9fed9706d9cd26 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *); static const struct file_operations ac_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = ac_read, .write = ac_write, .unlocked_ioctl = ac_ioctl, diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index a4f4291b4492cb..44a1cdbd4bfb65 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = { .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, - .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 5a1a73310e9764..27f5f9d19531dd 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops = .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, - .llseek = no_llseek, }; /* local prototypes */ diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index da32e8ed083096..e904e476e49ad8 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -700,7 +700,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations hpet_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = hpet_read, .poll = hpet_poll, .unlocked_ioctl = hpet_ioctl, @@ -808,7 +807,7 @@ int hpet_alloc(struct hpet_data *hdp) struct hpets *hpetp; struct hpet __iomem *hpet; static struct hpets *last; - unsigned long period; + u32 period; unsigned long long temp; u32 remainder; @@ -865,11 +864,11 @@ int hpet_alloc(struct hpet_data *hdp) do_div(temp, period); hpetp->hp_tick_freq = temp; /* ticks per second */ - printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", + printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s", hpetp->hp_which, hdp->hd_phys_address, hpetp->hp_ntimer > 1 ? "s" : ""); for (i = 0; i < hpetp->hp_ntimer; i++) - printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); + printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]); printk(KERN_CONT "\n"); temp = hpetp->hp_tick_freq; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 01e2e1ef82cf9c..b51d9e243f3512 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -555,6 +555,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG config HW_RANDOM_CN10K tristate "Marvell CN10K Random Number Generator support" depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER help This driver provides support for the True Random Number generator available in Marvell CN10K SoCs. @@ -572,6 +573,20 @@ config HW_RANDOM_JH7110 To compile this driver as a module, choose M here. The module will be called jh7110-trng. +config HW_RANDOM_ROCKCHIP + tristate "Rockchip True Random Number Generator" + depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on HAS_IOMEM + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator hardware found on some Rockchip SoC like RK3566 or RK3568. + + To compile this driver as a module, choose M here: the + module will be called rockchip-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc57..01f012eab44008 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -48,4 +48,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o +obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index b03e8030062758..aa2b135e3ee230 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret; ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + } if (priv->mask_interrupts) { /* mask the interrupt */ diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index c0d2f824769f88..4c50efc464835b 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab120b..1e3048f2bb38f0 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); dev_info(&pdev->dev, "registered RNG driver\n"); diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 94ee18a1120a81..f01eb95bee314b 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -147,33 +147,25 @@ static int mxc_rnga_probe(struct platform_device *pdev) mxc_rng->rng.data_present = mxc_rnga_data_present; mxc_rng->rng.data_read = mxc_rnga_data_read; - mxc_rng->clk = devm_clk_get(&pdev->dev, NULL); + mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(mxc_rng->clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); return PTR_ERR(mxc_rng->clk); } - err = clk_prepare_enable(mxc_rng->clk); - if (err) - return err; - mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); - goto err_ioremap; + return err; } err = hwrng_register(&mxc_rng->rng); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); - goto err_ioremap; + return err; } return 0; - -err_ioremap: - clk_disable_unprepare(mxc_rng->clk); - return err; } static void mxc_rnga_remove(struct platform_device *pdev) @@ -181,8 +173,6 @@ static void mxc_rnga_remove(struct platform_device *pdev) struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); hwrng_unregister(&mxc_rng->rng); - - clk_disable_unprepare(mxc_rng->clk); } static const struct of_device_id mxc_rnga_of_match[] = { diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c new file mode 100644 index 00000000000000..289b385bbf05e3 --- /dev/null +++ b/drivers/char/hw_random/rockchip-rng.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC + * + * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. + * Copyright (c) 2022, Aurelien Jarno + * Authors: + * Lin Jinhan + * Aurelien Jarno + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RK_RNG_AUTOSUSPEND_DELAY 100 +#define RK_RNG_MAX_BYTE 32 +#define RK_RNG_POLL_PERIOD_US 100 +#define RK_RNG_POLL_TIMEOUT_US 10000 + +/* + * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is + * a tradeoff between speed and quality and has been adjusted to get a quality + * of ~900 (~87.5% of FIPS 140-2 successes). + */ +#define RK_RNG_SAMPLE_CNT 1000 + +/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ +#define TRNG_RST_CTL 0x0004 +#define TRNG_RNG_CTL 0x0400 +#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4) +#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4) +#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4) +#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4) +#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2) +#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2) +#define TRNG_RNG_CTL_MASK GENMASK(15, 0) +#define TRNG_RNG_CTL_ENABLE BIT(1) +#define TRNG_RNG_CTL_START BIT(0) +#define TRNG_RNG_SAMPLE_CNT 0x0404 +#define TRNG_RNG_DOUT 0x0410 + +struct rk_rng { + struct hwrng rng; + void __iomem *base; + int clk_num; + struct clk_bulk_data *clk_bulks; +}; + +/* The mask in the upper 16 bits determines the bits that are updated */ +static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) +{ + writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); +} + +static int rk_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + /* start clocks */ + ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); + if (ret < 0) { + dev_err((struct device *) rk_rng->rng.priv, + "Failed to enable clks %d\n", ret); + return ret; + } + + /* set the sample period */ + writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); + + /* set osc ring speed and enable it */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT | + TRNG_RNG_CTL_OSC_RING_SPEED_0 | + TRNG_RNG_CTL_ENABLE, + TRNG_RNG_CTL_MASK); + + return 0; +} + +static void rk_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + /* stop TRNG */ + rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK); + + /* stop clocks */ + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + u32 reg; + int ret = 0; + + ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv); + if (ret < 0) + return ret; + + /* Start collecting random data */ + rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START); + + ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg, + !(reg & TRNG_RNG_CTL_START), + RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data stored in the registers */ + memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); +out: + pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv); + pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv); + + return (ret < 0) ? ret : to_read; +} + +static int rk_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_control *rst; + struct rk_rng *rk_rng; + int ret; + + rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL); + if (!rk_rng) + return -ENOMEM; + + rk_rng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rk_rng->base)) + return PTR_ERR(rk_rng->base); + + rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks); + if (rk_rng->clk_num < 0) + return dev_err_probe(dev, rk_rng->clk_num, + "Failed to get clks property\n"); + + rst = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + + platform_set_drvdata(pdev, rk_rng); + + rk_rng->rng.name = dev_driver_string(dev); + if (!IS_ENABLED(CONFIG_PM)) { + rk_rng->rng.init = rk_rng_init; + rk_rng->rng.cleanup = rk_rng_cleanup; + } + rk_rng->rng.read = rk_rng_read; + rk_rng->rng.priv = (unsigned long) dev; + rk_rng->rng.quality = 900; + + pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n"); + + ret = devm_hwrng_register(dev, &rk_rng->rng); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n"); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + rk_rng_cleanup(&rk_rng->rng); + + return 0; +} + +static int __maybe_unused rk_rng_runtime_resume(struct device *dev) +{ + struct rk_rng *rk_rng = dev_get_drvdata(dev); + + return rk_rng_init(&rk_rng->rng); +} + +static const struct dev_pm_ops rk_rng_pm_ops = { + SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend, + rk_rng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static const struct of_device_id rk_rng_dt_match[] = { + { .compatible = "rockchip,rk3568-rng", }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, rk_rng_dt_match); + +static struct platform_driver rk_rng_driver = { + .driver = { + .name = "rockchip-rng", + .pm = &rk_rng_pm_ops, + .of_match_table = rk_rng_dt_match, + }, + .probe = rk_rng_probe, +}; + +module_platform_driver(rk_rng_driver); + +MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver"); +MODULE_AUTHOR("Lin Jinhan "); +MODULE_AUTHOR("Aurelien Jarno "); +MODULE_AUTHOR("Daniel Golle "); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 96ad571d041a95..d04b391048fbaa 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -980,7 +980,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ipmi_ssif_unlock_cond(ssif_info, flags); start_get(ssif_info); } else { - /* Wait a jiffie then request the next message */ + /* Wait a jiffy then request the next message */ ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; if (!ssif_info->stopping) @@ -1368,8 +1368,20 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) rv = do_cmd(client, 2, msg, &len, resp); if (rv) rv = -ENODEV; - else + else { + if (len < 3) { + rv = -ENODEV; + } else { + struct ipmi_device_id id; + + rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], + resp + 2, len - 2, &id); + if (rv) + rv = -ENODEV; /* Error means a BMC probably isn't there. */ + } + if (!rv && info) strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE); + } kfree(resp); return rv; } @@ -1704,6 +1716,16 @@ static int ssif_probe(struct i2c_client *client) ipmi_addr_src_to_str(ssif_info->addr_source), client->addr, client->adapter->name, slave_addr); + /* + * Send a get device id command and validate its response to + * make sure a valid BMC is there. + */ + rv = ssif_detect(client, NULL); + if (rv) { + dev_err(&client->dev, "Not present\n"); + goto out; + } + /* Now check for system interface capabilities */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; @@ -2085,6 +2107,7 @@ static const struct platform_device_id ssif_plat_ids[] = { { "dmi-ipmi-ssif", 0 }, { } }; +MODULE_DEVICE_TABLE(platform, ssif_plat_ids); static struct platform_driver ipmi_driver = { .driver = { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 9a459257489f0d..335eea80054eef 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -903,7 +903,6 @@ static const struct file_operations ipmi_wdog_fops = { .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, - .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 7c359cc406d5bc..169eed162a7fba 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -643,6 +643,7 @@ static const struct file_operations __maybe_unused mem_fops = { .get_unmapped_area = get_unmapped_area_mem, .mmap_capabilities = memory_mmap_capabilities, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct file_operations null_fops = { @@ -693,7 +694,7 @@ static const struct memdev { umode_t mode; } devlist[] = { #ifdef CONFIG_DEVMEM - [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 }, + [DEVMEM_MINOR] = { "mem", &mem_fops, 0, 0 }, #endif [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 }, #ifdef CONFIG_DEVPORT diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index c39a836ebd153b..5f4696813ceaaf 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = { .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, - .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index eaff98dbaa8c84..d1dfbd8d4d4269 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -786,7 +786,6 @@ static const struct class ppdev_class = { static const struct file_operations pp_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, diff --git a/drivers/char/random.c b/drivers/char/random.c index 87fe61295ea1fc..23ee76bbb4aa72 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -59,6 +59,7 @@ #ifdef CONFIG_VDSO_GETRANDOM #include #include +#include #endif #include #include @@ -281,8 +282,15 @@ static void crng_reseed(struct work_struct *work) * former to arrive at the latter. Use smp_store_release so that this * is ordered with the write above to base_crng.generation. Pairs with * the smp_rmb() before the syscall in the vDSO code. + * + * Cast to unsigned long for 32-bit architectures, since atomic 64-bit + * operations are not supported on those architectures. This is safe + * because base_crng.generation is a 32-bit value. On big-endian + * architectures it will be stored in the upper 32 bits, but that's okay + * because the vDSO side only checks whether the value changed, without + * actually using or interpreting the value. */ - smp_store_release(&_vdso_rng_data.generation, next_gen + 1); + smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1); #endif if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; @@ -735,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits) queue_work(system_unbound_wq, &set_ready); atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); #ifdef CONFIG_VDSO_GETRANDOM - WRITE_ONCE(_vdso_rng_data.is_ready, true); + WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true); #endif wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 9f701dcba95c74..700e6affea6f78 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = { .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, - .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index bb5115b1736a46..0f8185e541ed47 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1054,7 +1054,6 @@ static const struct file_operations sonypi_misc_fops = { .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, - .llseek = no_llseek, }; static struct miscdevice sonypi_misc_device = { diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index 45ca33b3dcb268..81348487c1258b 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -133,7 +133,7 @@ static void st33zp24_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id st33zp24_i2c_id[] = { - {TPM_ST33_I2C, 0}, + { TPM_ST33_I2C }, {} }; MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 30b4c288c1bbc3..c3fbbf4d3db79a 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); + else + tpm2_flush_space(chip); out_rc: return ret ? ret : len; diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index e2c0baa69feff1..97c94b5e934070 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file) const struct file_operations tpm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index d3521aadd43eef..511c67061728ad 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -71,7 +71,7 @@ #include "tpm.h" #include #include -#include +#include #include #include #include @@ -1362,4 +1362,5 @@ int tpm2_sessions_init(struct tpm_chip *chip) return rc; } +EXPORT_SYMBOL(tpm2_sessions_init); #endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 4892d491da8dae..60354cd53b5c12 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -12,7 +12,7 @@ */ #include -#include +#include #include "tpm.h" enum tpm2_handle_types { @@ -169,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip) struct tpm_space *space = &chip->work_space; int i; + if (!space) + return; + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 301a95b3734fdf..d1d27fdfe52331 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -186,7 +186,7 @@ static void i2c_atmel_remove(struct i2c_client *client) } static const struct i2c_device_id i2c_atmel_id[] = { - {I2C_DRIVER_NAME, 0}, + { I2C_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c index 9511c0d501852b..6cd07dd34507e3 100644 --- a/drivers/char/tpm/tpm_tis_i2c.c +++ b/drivers/char/tpm/tpm_tis_i2c.c @@ -375,7 +375,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id tpm_tis_i2c_id[] = { - { "tpm_tis_i2c", 0 }, + { "tpm_tis_i2c" }, {} }; MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id); diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 11c502039faf58..8fe4a01eea1234 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp) static const struct file_operations vtpm_proxy_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = vtpm_proxy_fops_read, .write = vtpm_proxy_fops_write, .poll = vtpm_proxy_fops_poll, diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index eef0fb06ea8322..c25df7ea064eec 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file) const struct file_operations tpmrm_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = tpmrm_open, .read = tpm_common_read, .write = tpm_common_write, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index de7d720d99fa94..99a7f2441e70fb 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1093,7 +1093,6 @@ static const struct file_operations port_fops = { .poll = port_fops_poll, .release = port_fops_release, .fasync = port_fops_fasync, - .llseek = no_llseek, }; /* diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig index efa12ac2b3f204..54ece920705525 100644 --- a/drivers/clk/.kunitconfig +++ b/drivers/clk/.kunitconfig @@ -1,6 +1,8 @@ CONFIG_KUNIT=y +CONFIG_OF=y CONFIG_COMMON_CLK=y CONFIG_CLK_KUNIT_TEST=y +CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y CONFIG_CLK_GATE_KUNIT_TEST=y CONFIG_CLK_FD_KUNIT_TEST=y CONFIG_UML_PCI_OVER_VIRTIO=n diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 983ef4f36d8c43..299bc678ed1b9f 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -218,6 +218,14 @@ config COMMON_CLK_EN7523 This driver provides the fixed clocks and gates present on Airoha ARM silicon. +config COMMON_CLK_EP93XX + tristate "Clock driver for Cirrus Logic ep93xx SoC" + depends on ARCH_EP93XX || COMPILE_TEST + select AUXILIARY_BUS + select REGMAP_MMIO + help + This driver supports the SoC clocks on the Cirrus Logic ep93xx. + config COMMON_CLK_FSL_FLEXSPI tristate "Clock driver for FlexSPI on Layerscape SoCs" depends on ARCH_LAYERSCAPE || COMPILE_TEST @@ -509,9 +517,20 @@ config CLK_KUNIT_TEST tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS depends on KUNIT default KUNIT_ALL_TESTS + select OF_OVERLAY if OF + select DTC help Kunit tests for the common clock framework. +config CLK_FIXED_RATE_KUNIT_TEST + tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + select OF_OVERLAY if OF + select DTC + help + KUnit tests for the basic fixed rate clk type. + config CLK_GATE_KUNIT_TEST tristate "Basic gate type Kunit test" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index f793a16cad40bf..fb8878a5d7d93d 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -2,10 +2,14 @@ # common clock types obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o obj-$(CONFIG_COMMON_CLK) += clk.o -obj-$(CONFIG_CLK_KUNIT_TEST) += clk_test.o +obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o +clk-test-y := clk_test.o \ + kunit_clk_parent_data_test.dtbo.o obj-$(CONFIG_COMMON_CLK) += clk-divider.o obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o +obj-$(CONFIG_CLK_FIXED_RATE_KUNIT_TEST) += clk-fixed-rate-test.o +clk-fixed-rate-test-y := clk-fixed-rate_test.o kunit_clk_fixed_rate_test.dtbo.o obj-$(CONFIG_COMMON_CLK) += clk-gate.o obj-$(CONFIG_CLK_GATE_KUNIT_TEST) += clk-gate_test.o obj-$(CONFIG_COMMON_CLK) += clk-multiplier.o @@ -18,6 +22,11 @@ ifeq ($(CONFIG_OF), y) obj-$(CONFIG_COMMON_CLK) += clk-conf.o endif +# KUnit specific helpers +ifeq ($(CONFIG_COMMON_CLK), y) +obj-$(CONFIG_KUNIT) += clk_kunit_helpers.o +endif + # hardware specific clock types # please keep this section sorted lexicographically by file path name obj-$(CONFIG_COMMON_CLK_APPLE_NCO) += clk-apple-nco.o @@ -30,6 +39,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o +obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 89061b85e7d2a0..8e3684ba2c74ed 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o dt-compat. obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o dt-compat.o obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o dt-compat.o obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o +obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c index ff65f7b916f077..fda04110222463 100644 --- a/drivers/clk/at91/clk-sam9x60-pll.c +++ b/drivers/clk/at91/clk-sam9x60-pll.c @@ -23,9 +23,6 @@ #define UPLL_DIV 2 #define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1) -#define FCORE_MIN (600000000) -#define FCORE_MAX (1200000000) - #define PLL_MAX_ID 7 struct sam9x60_pll_core { @@ -76,9 +73,15 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw, { struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); struct sam9x60_frac *frac = to_sam9x60_frac(core); + unsigned long freq; - return parent_rate * (frac->mul + 1) + + freq = parent_rate * (frac->mul + 1) + DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22)); + + if (core->layout->div2) + freq >>= 1; + + return freq; } static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core) @@ -194,7 +197,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core, unsigned long nmul = 0; unsigned long nfrac = 0; - if (rate < FCORE_MIN || rate > FCORE_MAX) + if (rate < core->characteristics->core_output[0].min || + rate > core->characteristics->core_output[0].max) return -ERANGE; /* @@ -214,7 +218,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core, } /* Check if resulted rate is a valid. */ - if (tmprate < FCORE_MIN || tmprate > FCORE_MAX) + if (tmprate < core->characteristics->core_output[0].min || + tmprate > core->characteristics->core_output[0].max) return -ERANGE; if (update) { @@ -433,6 +438,12 @@ static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw, return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1)); } +static unsigned long sam9x60_fixed_div_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return parent_rate >> 1; +} + static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core, unsigned long *parent_rate, unsigned long rate) @@ -607,6 +618,16 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = { .restore_context = sam9x60_div_pll_restore_context, }; +static const struct clk_ops sam9x60_fixed_div_pll_ops = { + .prepare = sam9x60_div_pll_prepare, + .unprepare = sam9x60_div_pll_unprepare, + .is_prepared = sam9x60_div_pll_is_prepared, + .recalc_rate = sam9x60_fixed_div_pll_recalc_rate, + .round_rate = sam9x60_div_pll_round_rate, + .save_context = sam9x60_div_pll_save_context, + .restore_context = sam9x60_div_pll_restore_context, +}; + struct clk_hw * __init sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, const char *name, const char *parent_name, @@ -669,7 +690,8 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, goto free; } - ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN, + ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, + characteristics->core_output[0].min, parent_rate, true); if (ret < 0) { hw = ERR_PTR(ret); @@ -725,10 +747,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock, else init.parent_names = &parent_name; init.num_parents = 1; - if (flags & CLK_SET_RATE_GATE) + + if (layout->div2) + init.ops = &sam9x60_fixed_div_pll_ops; + else if (flags & CLK_SET_RATE_GATE) init.ops = &sam9x60_div_pll_ops; else init.ops = &sam9x60_div_pll_ops_chg; + init.flags = flags; div->core.id = id; diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c index a32dc2111b900e..f5a5f9ba76346b 100644 --- a/drivers/clk/at91/dt-compat.c +++ b/drivers/clk/at91/dt-compat.c @@ -563,9 +563,10 @@ of_at91_clk_pll_get_characteristics(struct device_node *np) if (num_cells < 2 || num_cells > 4) return NULL; - if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp)) + num_output = of_property_count_u32_elems(np, "atmel,pll-clk-output-ranges"); + if (num_output <= 0) return NULL; - num_output = tmp / (sizeof(u32) * num_cells); + num_output /= num_cells; characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL); if (!characteristics) diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 0f52e80bcd49a1..4fb29ca111f7d4 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -64,6 +64,7 @@ struct clk_pll_layout { u8 frac_shift; u8 div_shift; u8 endiv_shift; + u8 div2; }; extern const struct clk_pll_layout at91rm9200_pll_layout; @@ -75,6 +76,7 @@ struct clk_pll_characteristics { struct clk_range input; int num_output; const struct clk_range *output; + const struct clk_range *core_output; u16 *icpll; u8 *out; u8 upll : 1; @@ -119,6 +121,22 @@ struct at91_clk_pms { #define ndck(a, s) (a[s - 1].id + 1) #define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1) + +#define PMC_INIT_TABLE(_table, _count) \ + do { \ + u8 _i; \ + for (_i = 0; _i < (_count); _i++) \ + (_table)[_i] = _i; \ + } while (0) + +#define PMC_FILL_TABLE(_to, _from, _count) \ + do { \ + u8 _i; \ + for (_i = 0; _i < (_count); _i++) { \ + (_to)[_i] = (_from)[_i]; \ + } \ + } while (0) + struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, unsigned int nperiph, unsigned int ngck, unsigned int npck); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index e309cbf3cb9a4d..db6db9e2073eb9 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -26,10 +26,16 @@ static const struct clk_range plla_outputs[] = { { .min = 2343750, .max = 1200000000 }, }; +/* Fractional PLL core output range. */ +static const struct clk_range core_outputs[] = { + { .min = 600000000, .max = 1200000000 }, +}; + static const struct clk_pll_characteristics plla_characteristics = { .input = { .min = 12000000, .max = 48000000 }, .num_output = ARRAY_SIZE(plla_outputs), .output = plla_outputs, + .core_output = core_outputs, }; static const struct clk_range upll_outputs[] = { @@ -40,6 +46,7 @@ static const struct clk_pll_characteristics upll_characteristics = { .input = { .min = 12000000, .max = 48000000 }, .num_output = ARRAY_SIZE(upll_outputs), .output = upll_outputs, + .core_output = core_outputs, .upll = true, }; diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c new file mode 100644 index 00000000000000..cbb8b220f16bcd --- /dev/null +++ b/drivers/clk/at91/sam9x7.c @@ -0,0 +1,946 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SAM9X7 PMC code. + * + * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries + * + * Author: Varshini Rajendran + * + */ +#include +#include +#include +#include + +#include + +#include "pmc.h" + +static DEFINE_SPINLOCK(pmc_pll_lock); +static DEFINE_SPINLOCK(mck_lock); + +/** + * enum pll_ids - PLL clocks identifiers + * @PLL_ID_PLLA: PLLA identifier + * @PLL_ID_UPLL: UPLL identifier + * @PLL_ID_AUDIO: Audio PLL identifier + * @PLL_ID_LVDS: LVDS PLL identifier + * @PLL_ID_PLLA_DIV2: PLLA DIV2 identifier + * @PLL_ID_MAX: Max PLL Identifier + */ +enum pll_ids { + PLL_ID_PLLA, + PLL_ID_UPLL, + PLL_ID_AUDIO, + PLL_ID_LVDS, + PLL_ID_PLLA_DIV2, + PLL_ID_MAX, +}; + +/** + * enum pll_type - PLL type identifiers + * @PLL_TYPE_FRAC: fractional PLL identifier + * @PLL_TYPE_DIV: divider PLL identifier + */ +enum pll_type { + PLL_TYPE_FRAC, + PLL_TYPE_DIV, +}; + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 32000000, .max = 266666667 }, + .divisors = { 1, 2, 4, 3, 5}, + .have_div3_pres = 1, +}; + +static const struct clk_master_layout sam9x7_master_layout = { + .mask = 0x373, + .pres_shift = 4, + .offset = 0x28, +}; + +/* Fractional PLL core output range. */ +static const struct clk_range plla_core_outputs[] = { + { .min = 375000000, .max = 1600000000 }, +}; + +static const struct clk_range upll_core_outputs[] = { + { .min = 600000000, .max = 1200000000 }, +}; + +static const struct clk_range lvdspll_core_outputs[] = { + { .min = 400000000, .max = 800000000 }, +}; + +static const struct clk_range audiopll_core_outputs[] = { + { .min = 400000000, .max = 800000000 }, +}; + +static const struct clk_range plladiv2_core_outputs[] = { + { .min = 375000000, .max = 1600000000 }, +}; + +/* Fractional PLL output range. */ +static const struct clk_range plla_outputs[] = { + { .min = 732421, .max = 800000000 }, +}; + +static const struct clk_range upll_outputs[] = { + { .min = 300000000, .max = 600000000 }, +}; + +static const struct clk_range lvdspll_outputs[] = { + { .min = 10000000, .max = 800000000 }, +}; + +static const struct clk_range audiopll_outputs[] = { + { .min = 10000000, .max = 800000000 }, +}; + +static const struct clk_range plladiv2_outputs[] = { + { .min = 366210, .max = 400000000 }, +}; + +/* PLL characteristics. */ +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 20000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .core_output = plla_core_outputs, +}; + +static const struct clk_pll_characteristics upll_characteristics = { + .input = { .min = 20000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(upll_outputs), + .output = upll_outputs, + .core_output = upll_core_outputs, + .upll = true, +}; + +static const struct clk_pll_characteristics lvdspll_characteristics = { + .input = { .min = 20000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(lvdspll_outputs), + .output = lvdspll_outputs, + .core_output = lvdspll_core_outputs, +}; + +static const struct clk_pll_characteristics audiopll_characteristics = { + .input = { .min = 20000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(audiopll_outputs), + .output = audiopll_outputs, + .core_output = audiopll_core_outputs, +}; + +static const struct clk_pll_characteristics plladiv2_characteristics = { + .input = { .min = 20000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(plladiv2_outputs), + .output = plladiv2_outputs, + .core_output = plladiv2_core_outputs, +}; + +/* Layout for fractional PLL ID PLLA. */ +static const struct clk_pll_layout plla_frac_layout = { + .mul_mask = GENMASK(31, 24), + .frac_mask = GENMASK(21, 0), + .mul_shift = 24, + .frac_shift = 0, + .div2 = 1, +}; + +/* Layout for fractional PLLs. */ +static const struct clk_pll_layout pll_frac_layout = { + .mul_mask = GENMASK(31, 24), + .frac_mask = GENMASK(21, 0), + .mul_shift = 24, + .frac_shift = 0, +}; + +/* Layout for DIV PLLs. */ +static const struct clk_pll_layout pll_divpmc_layout = { + .div_mask = GENMASK(7, 0), + .endiv_mask = BIT(29), + .div_shift = 0, + .endiv_shift = 29, +}; + +/* Layout for DIV PLL ID PLLADIV2. */ +static const struct clk_pll_layout plladiv2_divpmc_layout = { + .div_mask = GENMASK(7, 0), + .endiv_mask = BIT(29), + .div_shift = 0, + .endiv_shift = 29, + .div2 = 1, +}; + +/* Layout for DIVIO dividers. */ +static const struct clk_pll_layout pll_divio_layout = { + .div_mask = GENMASK(19, 12), + .endiv_mask = BIT(30), + .div_shift = 12, + .endiv_shift = 30, +}; + +/* + * PLL clocks description + * @n: clock name + * @p: clock parent + * @l: clock layout + * @t: clock type + * @c: pll characteristics + * @f: clock flags + * @eid: export index in sam9x7->chws[] array + */ +static const struct { + const char *n; + const char *p; + const struct clk_pll_layout *l; + u8 t; + const struct clk_pll_characteristics *c; + unsigned long f; + u8 eid; +} sam9x7_plls[][3] = { + [PLL_ID_PLLA] = { + { + .n = "plla_fracck", + .p = "mainck", + .l = &plla_frac_layout, + .t = PLL_TYPE_FRAC, + /* + * This feeds plla_divpmcck which feeds CPU. It should + * not be disabled. + */ + .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, + .c = &plla_characteristics, + }, + + { + .n = "plla_divpmcck", + .p = "plla_fracck", + .l = &pll_divpmc_layout, + .t = PLL_TYPE_DIV, + /* This feeds CPU. It should not be disabled */ + .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, + .eid = PMC_PLLACK, + .c = &plla_characteristics, + }, + }, + + [PLL_ID_UPLL] = { + { + .n = "upll_fracck", + .p = "main_osc", + .l = &pll_frac_layout, + .t = PLL_TYPE_FRAC, + .f = CLK_SET_RATE_GATE, + .c = &upll_characteristics, + }, + + { + .n = "upll_divpmcck", + .p = "upll_fracck", + .l = &pll_divpmc_layout, + .t = PLL_TYPE_DIV, + .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT, + .eid = PMC_UTMI, + .c = &upll_characteristics, + }, + }, + + [PLL_ID_AUDIO] = { + { + .n = "audiopll_fracck", + .p = "main_osc", + .l = &pll_frac_layout, + .f = CLK_SET_RATE_GATE, + .c = &audiopll_characteristics, + .t = PLL_TYPE_FRAC, + }, + + { + .n = "audiopll_divpmcck", + .p = "audiopll_fracck", + .l = &pll_divpmc_layout, + .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT, + .c = &audiopll_characteristics, + .eid = PMC_AUDIOPMCPLL, + .t = PLL_TYPE_DIV, + }, + + { + .n = "audiopll_diviock", + .p = "audiopll_fracck", + .l = &pll_divio_layout, + .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT, + .c = &audiopll_characteristics, + .eid = PMC_AUDIOIOPLL, + .t = PLL_TYPE_DIV, + }, + }, + + [PLL_ID_LVDS] = { + { + .n = "lvdspll_fracck", + .p = "main_osc", + .l = &pll_frac_layout, + .f = CLK_SET_RATE_GATE, + .c = &lvdspll_characteristics, + .t = PLL_TYPE_FRAC, + }, + + { + .n = "lvdspll_divpmcck", + .p = "lvdspll_fracck", + .l = &pll_divpmc_layout, + .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT, + .c = &lvdspll_characteristics, + .eid = PMC_LVDSPLL, + .t = PLL_TYPE_DIV, + }, + }, + + [PLL_ID_PLLA_DIV2] = { + { + .n = "plla_div2pmcck", + .p = "plla_fracck", + .l = &plladiv2_divpmc_layout, + /* + * This may feed critical parts of the system like timers. + * It should not be disabled. + */ + .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, + .c = &plladiv2_characteristics, + .eid = PMC_PLLADIV2, + .t = PLL_TYPE_DIV, + }, + }, +}; + +static const struct clk_programmable_layout sam9x7_programmable_layout = { + .pres_mask = 0xff, + .pres_shift = 8, + .css_mask = 0x1f, + .have_slck_mck = 0, + .is_pres_direct = 1, +}; + +static const struct clk_pcr_layout sam9x7_pcr_layout = { + .offset = 0x88, + .cmd = BIT(31), + .gckcss_mask = GENMASK(12, 8), + .pid_mask = GENMASK(6, 0), +}; + +static const struct { + char *n; + char *p; + u8 id; + unsigned long flags; +} sam9x7_systemck[] = { + /* + * ddrck feeds DDR controller and is enabled by bootloader thus we need + * to keep it enabled in case there is no Linux consumer for it. + */ + { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +/* + * Peripheral clocks description + * @n: clock name + * @f: clock flags + * @id: peripheral id + */ +static const struct { + char *n; + unsigned long f; + u8 id; +} sam9x7_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioC_clk", .id = 4, }, + { .n = "flex0_clk", .id = 5, }, + { .n = "flex1_clk", .id = 6, }, + { .n = "flex2_clk", .id = 7, }, + { .n = "flex3_clk", .id = 8, }, + { .n = "flex6_clk", .id = 9, }, + { .n = "flex7_clk", .id = 10, }, + { .n = "flex8_clk", .id = 11, }, + { .n = "sdmmc0_clk", .id = 12, }, + { .n = "flex4_clk", .id = 13, }, + { .n = "flex5_clk", .id = 14, }, + { .n = "flex9_clk", .id = 15, }, + { .n = "flex10_clk", .id = 16, }, + { .n = "tcb0_clk", .id = 17, }, + { .n = "pwm_clk", .id = 18, }, + { .n = "adc_clk", .id = 19, }, + { .n = "dma0_clk", .id = 20, }, + { .n = "uhphs_clk", .id = 22, }, + { .n = "udphs_clk", .id = 23, }, + { .n = "macb0_clk", .id = 24, }, + { .n = "lcd_clk", .id = 25, }, + { .n = "sdmmc1_clk", .id = 26, }, + { .n = "ssc_clk", .id = 28, }, + { .n = "can0_clk", .id = 29, }, + { .n = "can1_clk", .id = 30, }, + { .n = "flex11_clk", .id = 32, }, + { .n = "flex12_clk", .id = 33, }, + { .n = "i2s_clk", .id = 34, }, + { .n = "qspi_clk", .id = 35, }, + { .n = "gfx2d_clk", .id = 36, }, + { .n = "pit64b0_clk", .id = 37, }, + { .n = "trng_clk", .id = 38, }, + { .n = "aes_clk", .id = 39, }, + { .n = "tdes_clk", .id = 40, }, + { .n = "sha_clk", .id = 41, }, + { .n = "classd_clk", .id = 42, }, + { .n = "isi_clk", .id = 43, }, + { .n = "pioD_clk", .id = 44, }, + { .n = "tcb1_clk", .id = 45, }, + { .n = "dbgu_clk", .id = 47, }, + /* + * mpddr_clk feeds DDR controller and is enabled by bootloader thus we + * need to keep it enabled in case there is no Linux consumer for it. + */ + { .n = "mpddr_clk", .id = 49, .f = CLK_IS_CRITICAL }, + { .n = "csi2dc_clk", .id = 52, }, + { .n = "csi4l_clk", .id = 53, }, + { .n = "dsi4l_clk", .id = 54, }, + { .n = "lvdsc_clk", .id = 56, }, + { .n = "pit64b1_clk", .id = 58, }, + { .n = "puf_clk", .id = 59, }, + { .n = "gmactsu_clk", .id = 67, }, +}; + +/* + * Generic clock description + * @n: clock name + * @pp: PLL parents + * @pp_mux_table: PLL parents mux table + * @r: clock output range + * @pp_chg_id: id in parent array of changeable PLL parent + * @pp_count: PLL parents count + * @id: clock id + */ +static const struct { + const char *n; + const char *pp[8]; + const char pp_mux_table[8]; + struct clk_range r; + int pp_chg_id; + u8 pp_count; + u8 id; +} sam9x7_gck[] = { + { + .n = "flex0_gclk", + .id = 5, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex1_gclk", + .id = 6, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex2_gclk", + .id = 7, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex3_gclk", + .id = 8, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex6_gclk", + .id = 9, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex7_gclk", + .id = 10, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex8_gclk", + .id = 11, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "sdmmc0_gclk", + .id = 12, + .r = { .max = 105000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex4_gclk", + .id = 13, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex5_gclk", + .id = 14, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex9_gclk", + .id = 15, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex10_gclk", + .id = 16, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "tcb0_gclk", + .id = 17, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "adc_gclk", + .id = 19, + .pp = { "upll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "lcd_gclk", + .id = 25, + .r = { .max = 75000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "sdmmc1_gclk", + .id = 26, + .r = { .max = 105000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "mcan0_gclk", + .id = 29, + .r = { .max = 80000000 }, + .pp = { "upll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "mcan1_gclk", + .id = 30, + .r = { .max = 80000000 }, + .pp = { "upll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex11_gclk", + .id = 32, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "flex12_gclk", + .id = 33, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "i2s_gclk", + .id = 34, + .r = { .max = 100000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "qspi_gclk", + .id = 35, + .r = { .max = 200000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "pit64b0_gclk", + .id = 37, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "classd_gclk", + .id = 42, + .r = { .max = 100000000 }, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "tcb1_gclk", + .id = 45, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, + + { + .n = "dbgu_gclk", + .id = 47, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "mipiphy_gclk", + .id = 55, + .r = { .max = 27000000 }, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "pit64b1_gclk", + .id = 58, + .pp = { "plla_div2pmcck", }, + .pp_mux_table = { 8, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, + }, + + { + .n = "gmac_gclk", + .id = 67, + .pp = { "audiopll_divpmcck", "plla_div2pmcck", }, + .pp_mux_table = { 6, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, + }, +}; + +static void __init sam9x7_pmc_setup(struct device_node *np) +{ + struct clk_range range = CLK_RANGE(0, 0); + const char *td_slck_name, *md_slck_name, *mainxtal_name; + struct pmc_data *sam9x7_pmc; + const char *parent_names[9]; + void **clk_mux_buffer = NULL; + int clk_mux_buffer_size = 0; + struct clk_hw *main_osc_hw; + struct regmap *regmap; + struct clk_hw *hw; + int i, j; + + i = of_property_match_string(np, "clock-names", "td_slck"); + if (i < 0) + return; + + td_slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "md_slck"); + if (i < 0) + return; + + md_slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = device_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + sam9x7_pmc = pmc_data_allocate(PMC_LVDSPLL + 1, + nck(sam9x7_systemck), + nck(sam9x7_periphck), + nck(sam9x7_gck), 8); + if (!sam9x7_pmc) + return; + + clk_mux_buffer = kmalloc(sizeof(void *) * + (ARRAY_SIZE(sam9x7_gck)), + GFP_KERNEL); + if (!clk_mux_buffer) + goto err_free; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL, 0); + if (IS_ERR(hw)) + goto err_free; + main_osc_hw = hw; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->chws[PMC_MAIN] = hw; + + for (i = 0; i < PLL_ID_MAX; i++) { + for (j = 0; j < 3; j++) { + struct clk_hw *parent_hw; + + if (!sam9x7_plls[i][j].n) + continue; + + switch (sam9x7_plls[i][j].t) { + case PLL_TYPE_FRAC: + if (!strcmp(sam9x7_plls[i][j].p, "mainck")) + parent_hw = sam9x7_pmc->chws[PMC_MAIN]; + else if (!strcmp(sam9x7_plls[i][j].p, "main_osc")) + parent_hw = main_osc_hw; + else + parent_hw = __clk_get_hw(of_clk_get_by_name + (np, sam9x7_plls[i][j].p)); + + hw = sam9x60_clk_register_frac_pll(regmap, + &pmc_pll_lock, + sam9x7_plls[i][j].n, + sam9x7_plls[i][j].p, + parent_hw, i, + sam9x7_plls[i][j].c, + sam9x7_plls[i][j].l, + sam9x7_plls[i][j].f); + break; + + case PLL_TYPE_DIV: + hw = sam9x60_clk_register_div_pll(regmap, + &pmc_pll_lock, + sam9x7_plls[i][j].n, + sam9x7_plls[i][j].p, NULL, i, + sam9x7_plls[i][j].c, + sam9x7_plls[i][j].l, + sam9x7_plls[i][j].f, 0); + break; + + default: + continue; + } + + if (IS_ERR(hw)) + goto err_free; + + if (sam9x7_plls[i][j].eid) + sam9x7_pmc->chws[sam9x7_plls[i][j].eid] = hw; + } + } + + parent_names[0] = md_slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plla_divpmcck"; + parent_names[3] = "upll_divpmcck"; + hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4, + parent_names, NULL, &sam9x7_master_layout, + &mck_characteristics, &mck_lock); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_master_div(regmap, "masterck_div", + "masterck_pres", NULL, &sam9x7_master_layout, + &mck_characteristics, &mck_lock, + CLK_SET_RATE_GATE, 0); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = "plla_divpmcck"; + parent_names[1] = "upll_divpmcck"; + parent_names[2] = "main_osc"; + hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = md_slck_name; + parent_names[1] = td_slck_name; + parent_names[2] = "mainck"; + parent_names[3] = "masterck_div"; + parent_names[4] = "plla_divpmcck"; + parent_names[5] = "upll_divpmcck"; + parent_names[6] = "audiopll_divpmcck"; + for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, NULL, 7, i, + &sam9x7_programmable_layout, + NULL); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->pchws[i] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sam9x7_systemck); i++) { + hw = at91_clk_register_system(regmap, sam9x7_systemck[i].n, + sam9x7_systemck[i].p, NULL, + sam9x7_systemck[i].id, + sam9x7_systemck[i].flags); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->shws[sam9x7_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sam9x7_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + &sam9x7_pcr_layout, + sam9x7_periphck[i].n, + "masterck_div", NULL, + sam9x7_periphck[i].id, + &range, INT_MIN, + sam9x7_periphck[i].f); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->phws[sam9x7_periphck[i].id] = hw; + } + + parent_names[0] = md_slck_name; + parent_names[1] = td_slck_name; + parent_names[2] = "mainck"; + parent_names[3] = "masterck_div"; + for (i = 0; i < ARRAY_SIZE(sam9x7_gck); i++) { + u8 num_parents = 4 + sam9x7_gck[i].pp_count; + u32 *mux_table; + + mux_table = kmalloc_array(num_parents, sizeof(*mux_table), + GFP_KERNEL); + if (!mux_table) + goto err_free; + + PMC_INIT_TABLE(mux_table, 4); + PMC_FILL_TABLE(&mux_table[4], sam9x7_gck[i].pp_mux_table, + sam9x7_gck[i].pp_count); + PMC_FILL_TABLE(&parent_names[4], sam9x7_gck[i].pp, + sam9x7_gck[i].pp_count); + + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, + &sam9x7_pcr_layout, + sam9x7_gck[i].n, + parent_names, NULL, mux_table, + num_parents, + sam9x7_gck[i].id, + &sam9x7_gck[i].r, + sam9x7_gck[i].pp_chg_id); + if (IS_ERR(hw)) + goto err_free; + + sam9x7_pmc->ghws[sam9x7_gck[i].id] = hw; + clk_mux_buffer[clk_mux_buffer_size++] = mux_table; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x7_pmc); + kfree(clk_mux_buffer); + + return; + +err_free: + if (clk_mux_buffer) { + for (i = 0; i < clk_mux_buffer_size; i++) + kfree(clk_mux_buffer[i]); + kfree(clk_mux_buffer); + } + kfree(sam9x7_pmc); +} + +/* Some clks are used for a clocksource */ +CLK_OF_DECLARE(sam9x7_pmc, "microchip,sam9x7-pmc", sam9x7_pmc_setup); diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index 91b5c6f1481964..8385badc1c7067 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -16,21 +16,6 @@ #include "pmc.h" -#define SAMA7G5_INIT_TABLE(_table, _count) \ - do { \ - u8 _i; \ - for (_i = 0; _i < (_count); _i++) \ - (_table)[_i] = _i; \ - } while (0) - -#define SAMA7G5_FILL_TABLE(_to, _from, _count) \ - do { \ - u8 _i; \ - for (_i = 0; _i < (_count); _i++) { \ - (_to)[_i] = (_from)[_i]; \ - } \ - } while (0) - static DEFINE_SPINLOCK(pmc_pll_lock); static DEFINE_SPINLOCK(pmc_mck0_lock); static DEFINE_SPINLOCK(pmc_mckX_lock); @@ -66,6 +51,7 @@ enum pll_component_id { PLL_COMPID_FRAC, PLL_COMPID_DIV0, PLL_COMPID_DIV1, + PLL_COMPID_MAX, }; /* @@ -116,11 +102,17 @@ static const struct clk_range pll_outputs[] = { { .min = 2343750, .max = 1200000000 }, }; +/* Fractional PLL core output range. */ +static const struct clk_range core_outputs[] = { + { .min = 600000000, .max = 1200000000 }, +}; + /* CPU PLL characteristics. */ static const struct clk_pll_characteristics cpu_pll_characteristics = { .input = { .min = 12000000, .max = 50000000 }, .num_output = ARRAY_SIZE(cpu_pll_outputs), .output = cpu_pll_outputs, + .core_output = core_outputs, }; /* PLL characteristics. */ @@ -128,6 +120,7 @@ static const struct clk_pll_characteristics pll_characteristics = { .input = { .min = 12000000, .max = 50000000 }, .num_output = ARRAY_SIZE(pll_outputs), .output = pll_outputs, + .core_output = core_outputs, }; /* @@ -165,7 +158,7 @@ static struct sama7g5_pll { u8 t; u8 eid; u8 safe_div; -} sama7g5_plls[][PLL_ID_MAX] = { +} sama7g5_plls[][PLL_COMPID_MAX] = { [PLL_ID_CPU] = { [PLL_COMPID_FRAC] = { .n = "cpupll_fracck", @@ -1038,7 +1031,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) sama7g5_pmc->chws[PMC_MAIN] = hw; for (i = 0; i < PLL_ID_MAX; i++) { - for (j = 0; j < 3; j++) { + for (j = 0; j < PLL_COMPID_MAX; j++) { struct clk_hw *parent_hw; if (!sama7g5_plls[i][j].n) @@ -1112,17 +1105,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np) if (!mux_table) goto err_free; - SAMA7G5_INIT_TABLE(mux_table, 3); - SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table, - sama7g5_mckx[i].ep_count); + PMC_INIT_TABLE(mux_table, 3); + PMC_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table, + sama7g5_mckx[i].ep_count); for (j = 0; j < sama7g5_mckx[i].ep_count; j++) { u8 pll_id = sama7g5_mckx[i].ep[j].pll_id; u8 pll_compid = sama7g5_mckx[i].ep[j].pll_compid; tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw; } - SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws, - sama7g5_mckx[i].ep_count); + PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws, + sama7g5_mckx[i].ep_count); hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n, num_parents, NULL, parent_hws, mux_table, @@ -1208,17 +1201,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np) if (!mux_table) goto err_free; - SAMA7G5_INIT_TABLE(mux_table, 3); - SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table, - sama7g5_gck[i].pp_count); + PMC_INIT_TABLE(mux_table, 3); + PMC_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table, + sama7g5_gck[i].pp_count); for (j = 0; j < sama7g5_gck[i].pp_count; j++) { u8 pll_id = sama7g5_gck[i].pp[j].pll_id; u8 pll_compid = sama7g5_gck[i].pp[j].pll_compid; tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw; } - SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws, - sama7g5_gck[i].pp_count); + PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws, + sama7g5_gck[i].pp_count); hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, &sama7g5_pcr_layout, diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c index 2334e6c334cf85..9667ce89842803 100644 --- a/drivers/clk/axs10x/i2s_pll_clock.c +++ b/drivers/clk/axs10x/i2s_pll_clock.c @@ -215,7 +215,7 @@ static struct platform_driver i2s_pll_clk_driver = { .of_match_table = i2s_pll_clk_id, }, .probe = i2s_pll_clk_probe, - .remove_new = i2s_pll_clk_remove, + .remove = i2s_pll_clk_remove, }; module_platform_driver(i2s_pll_clk_driver); diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c index 3cb235df9d379f..e79720e85685bf 100644 --- a/drivers/clk/bcm/clk-bcm2711-dvp.c +++ b/drivers/clk/bcm/clk-bcm2711-dvp.c @@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids); static struct platform_driver clk_dvp_driver = { .probe = clk_dvp_probe, - .remove_new = clk_dvp_remove, + .remove = clk_dvp_remove, .driver = { .name = "brcm2711-dvp", .of_match_table = clk_dvp_dt_ids, diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c index 84f2af736ee8a6..83ef41d618be37 100644 --- a/drivers/clk/bcm/clk-bcm53573-ilp.c +++ b/drivers/clk/bcm/clk-bcm53573-ilp.c @@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np) goto err_free_ilp; } - ilp->regmap = syscon_node_to_regmap(of_get_parent(np)); + ilp->regmap = syscon_node_to_regmap(np->parent); if (IS_ERR(ilp->regmap)) { err = PTR_ERR(ilp->regmap); goto err_free_ilp; diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c index 36c7b302e39682..d6d857474436e8 100644 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -567,7 +567,7 @@ static const struct of_device_id clk_bcm63xx_dt_ids[] = { static struct platform_driver clk_bcm63xx = { .probe = clk_bcm63xx_probe, - .remove_new = clk_bcm63xx_remove, + .remove = clk_bcm63xx_remove, .driver = { .name = "bcm63xx-clock", .of_match_table = clk_bcm63xx_dt_ids, diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index 4d411408e4afef..a18a8768feb405 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -458,7 +458,7 @@ static struct platform_driver raspberrypi_clk_driver = { .of_match_table = raspberrypi_clk_match, }, .probe = raspberrypi_clk_probe, - .remove_new = raspberrypi_clk_remove, + .remove = raspberrypi_clk_remove, }; module_platform_driver(raspberrypi_clk_driver); diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 05842056202079..303a0bb26e54a9 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -10,6 +10,7 @@ #include #include #include +#include static int __set_clk_parents(struct device_node *node, bool clk_supplier) { @@ -81,11 +82,44 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) static int __set_clk_rates(struct device_node *node, bool clk_supplier) { struct of_phandle_args clkspec; - int rc, index = 0; + int rc, count, count_64, index; struct clk *clk; - u32 rate; + u64 *rates_64 __free(kfree) = NULL; + u32 *rates __free(kfree) = NULL; + + count = of_property_count_u32_elems(node, "assigned-clock-rates"); + count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64"); + if (count_64 > 0) { + count = count_64; + rates_64 = kcalloc(count, sizeof(*rates_64), GFP_KERNEL); + if (!rates_64) + return -ENOMEM; + + rc = of_property_read_u64_array(node, + "assigned-clock-rates-u64", + rates_64, count); + } else if (count > 0) { + rates = kcalloc(count, sizeof(*rates), GFP_KERNEL); + if (!rates) + return -ENOMEM; + + rc = of_property_read_u32_array(node, "assigned-clock-rates", + rates, count); + } else { + return 0; + } + + if (rc) + return rc; + + for (index = 0; index < count; index++) { + unsigned long rate; + + if (rates_64) + rate = rates_64[index]; + else + rate = rates[index]; - of_property_for_each_u32(node, "assigned-clock-rates", rate) { if (rate) { rc = of_parse_phandle_with_args(node, "assigned-clocks", "#clock-cells", index, &clkspec); @@ -112,12 +146,11 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) rc = clk_set_rate(clk, rate); if (rc < 0) - pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n", + pr_err("clk: couldn't set %s clk rate to %lu (%d), current rate: %lu\n", __clk_get_name(clk), rate, rc, clk_get_rate(clk)); clk_put(clk); } - index++; } return 0; } diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 90e6078fb6e1b2..82ae1f26e63457 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -99,6 +99,34 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id) } EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled); +struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, + const char *id, + unsigned long rate) +{ + struct clk *clk; + int ret; + + clk = __devm_clk_get(dev, id, clk_get_optional, NULL, + clk_disable_unprepare); + if (IS_ERR(clk)) + return ERR_CAST(clk); + + ret = clk_set_rate(clk, rate); + if (ret) + goto out_put_clk; + + ret = clk_prepare_enable(clk); + if (ret) + goto out_put_clk; + + return clk; + +out_put_clk: + devm_clk_put(dev, clk); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate); + struct clk_bulk_devres { struct clk_bulk_data *clks; int num_clks; diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c new file mode 100644 index 00000000000000..f888aed79b11b3 --- /dev/null +++ b/drivers/clk/clk-ep93xx.c @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Clock control for Cirrus EP93xx chips. + * Copyright (C) 2021 Nikita Shubin + * + * Based on a rewrite of arch/arm/mach-ep93xx/clock.c: + * Copyright (C) 2006 Lennert Buytenhek + */ +#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define EP93XX_EXT_CLK_RATE 14745600 +#define EP93XX_EXT_RTC_RATE 32768 + +#define EP93XX_SYSCON_POWER_STATE 0x00 +#define EP93XX_SYSCON_PWRCNT 0x04 +#define EP93XX_SYSCON_PWRCNT_UARTBAUD BIT(29) +#define EP93XX_SYSCON_PWRCNT_USH_EN 28 +#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27 +#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17 +#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16 +#define EP93XX_SYSCON_CLKSET1 0x20 +#define EP93XX_SYSCON_CLKSET1_NBYP1 BIT(23) +#define EP93XX_SYSCON_CLKSET2 0x24 +#define EP93XX_SYSCON_CLKSET2_NBYP2 BIT(19) +#define EP93XX_SYSCON_CLKSET2_PLL2_EN BIT(18) +#define EP93XX_SYSCON_DEVCFG 0x80 +#define EP93XX_SYSCON_DEVCFG_U3EN 24 +#define EP93XX_SYSCON_DEVCFG_U2EN 20 +#define EP93XX_SYSCON_DEVCFG_U1EN 18 +#define EP93XX_SYSCON_VIDCLKDIV 0x84 +#define EP93XX_SYSCON_CLKDIV_ENABLE 15 +#define EP93XX_SYSCON_CLKDIV_ESEL BIT(14) +#define EP93XX_SYSCON_CLKDIV_PSEL BIT(13) +#define EP93XX_SYSCON_CLKDIV_MASK GENMASK(14, 13) +#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8 +#define EP93XX_SYSCON_I2SCLKDIV 0x8c +#define EP93XX_SYSCON_I2SCLKDIV_SENA 31 +#define EP93XX_SYSCON_I2SCLKDIV_ORIDE BIT(29) +#define EP93XX_SYSCON_I2SCLKDIV_SPOL BIT(19) +#define EP93XX_SYSCON_KEYTCHCLKDIV 0x90 +#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31 +#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16 +#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15 +#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV 0 +#define EP93XX_SYSCON_CHIPID 0x94 +#define EP93XX_SYSCON_CHIPID_ID 0x9213 + +#define EP93XX_FIXED_CLK_COUNT 21 + +static const char ep93xx_adc_divisors[] = { 16, 4 }; +static const char ep93xx_sclk_divisors[] = { 2, 4 }; +static const char ep93xx_lrclk_divisors[] = { 32, 64, 128 }; + +struct ep93xx_clk { + struct clk_hw hw; + u16 idx; + u16 reg; + u32 mask; + u8 bit_idx; + u8 shift; + u8 width; + u8 num_div; + const char *div; +}; + +struct ep93xx_clk_priv { + spinlock_t lock; + struct ep93xx_regmap_adev *aux_dev; + struct device *dev; + void __iomem *base; + struct regmap *map; + struct clk_hw *fixed[EP93XX_FIXED_CLK_COUNT]; + struct ep93xx_clk reg[]; +}; + +static struct ep93xx_clk *ep93xx_clk_from(struct clk_hw *hw) +{ + return container_of(hw, struct ep93xx_clk, hw); +} + +static struct ep93xx_clk_priv *ep93xx_priv_from(struct ep93xx_clk *clk) +{ + return container_of(clk, struct ep93xx_clk_priv, reg[clk->idx]); +} + +static void ep93xx_clk_write(struct ep93xx_clk_priv *priv, unsigned int reg, unsigned int val) +{ + struct ep93xx_regmap_adev *aux = priv->aux_dev; + + aux->write(aux->map, aux->lock, reg, val); +} + +static int ep93xx_clk_is_enabled(struct clk_hw *hw) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + + regmap_read(priv->map, clk->reg, &val); + + return !!(val & BIT(clk->bit_idx)); +} + +static int ep93xx_clk_enable(struct clk_hw *hw) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + + guard(spinlock_irqsave)(&priv->lock); + + regmap_read(priv->map, clk->reg, &val); + val |= BIT(clk->bit_idx); + + ep93xx_clk_write(priv, clk->reg, val); + + return 0; +} + +static void ep93xx_clk_disable(struct clk_hw *hw) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + + guard(spinlock_irqsave)(&priv->lock); + + regmap_read(priv->map, clk->reg, &val); + val &= ~BIT(clk->bit_idx); + + ep93xx_clk_write(priv, clk->reg, val); +} + +static const struct clk_ops clk_ep93xx_gate_ops = { + .enable = ep93xx_clk_enable, + .disable = ep93xx_clk_disable, + .is_enabled = ep93xx_clk_is_enabled, +}; + +static int ep93xx_clk_register_gate(struct ep93xx_clk *clk, + const char *name, + struct clk_parent_data *parent_data, + unsigned long flags, + unsigned int reg, + u8 bit_idx) +{ + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + struct clk_init_data init = { }; + + init.name = name; + init.ops = &clk_ep93xx_gate_ops; + init.flags = flags; + init.parent_data = parent_data; + init.num_parents = 1; + + clk->reg = reg; + clk->bit_idx = bit_idx; + clk->hw.init = &init; + + return devm_clk_hw_register(priv->dev, &clk->hw); +} + +static u8 ep93xx_mux_get_parent(struct clk_hw *hw) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + + regmap_read(priv->map, clk->reg, &val); + + val &= EP93XX_SYSCON_CLKDIV_MASK; + + switch (val) { + case EP93XX_SYSCON_CLKDIV_ESEL: + return 1; /* PLL1 */ + case EP93XX_SYSCON_CLKDIV_MASK: + return 2; /* PLL2 */ + default: + return 0; /* XTALI */ + }; +} + +static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + + if (index >= 3) + return -EINVAL; + + guard(spinlock_irqsave)(&priv->lock); + + regmap_read(priv->map, clk->reg, &val); + val &= ~(EP93XX_SYSCON_CLKDIV_MASK); + val |= index > 0 ? EP93XX_SYSCON_CLKDIV_ESEL : 0; + val |= index > 1 ? EP93XX_SYSCON_CLKDIV_PSEL : 0; + + ep93xx_clk_write(priv, clk->reg, val); + + return 0; +} + +static bool is_best(unsigned long rate, unsigned long now, + unsigned long best) +{ + return abs_diff(rate, now) < abs_diff(rate, best); +} + +static int ep93xx_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + unsigned long best_rate = 0, actual_rate, mclk_rate; + unsigned long rate = req->rate; + struct clk_hw *parent_best = NULL; + unsigned long parent_rate_best; + unsigned long parent_rate; + int div, pdiv; + unsigned int i; + + /* + * Try the two pll's and the external clock, + * because the valid predividers are 2, 2.5 and 3, we multiply + * all the clocks by 2 to avoid floating point math. + * + * This is based on the algorithm in the ep93xx raster guide: + * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf + * + */ + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + + parent_rate = clk_hw_get_rate(parent); + mclk_rate = parent_rate * 2; + + /* Try each predivider value */ + for (pdiv = 4; pdiv <= 6; pdiv++) { + div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv); + if (!in_range(div, 1, 127)) + continue; + + actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div); + if (is_best(rate, actual_rate, best_rate)) { + best_rate = actual_rate; + parent_rate_best = parent_rate; + parent_best = parent; + } + } + } + + if (!parent_best) + return -EINVAL; + + req->best_parent_rate = parent_rate_best; + req->best_parent_hw = parent_best; + req->rate = best_rate; + + return 0; +} + +static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + unsigned int pdiv, div; + u32 val; + + regmap_read(priv->map, clk->reg, &val); + pdiv = (val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & GENMASK(1, 0); + div = val & GENMASK(6, 0); + if (!div) + return 0; + + return DIV_ROUND_CLOSEST(parent_rate * 2, (pdiv + 3) * div); +} + +static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + int pdiv, div, npdiv, ndiv; + unsigned long actual_rate, mclk_rate, rate_err = ULONG_MAX; + u32 val; + + regmap_read(priv->map, clk->reg, &val); + mclk_rate = parent_rate * 2; + + for (pdiv = 4; pdiv <= 6; pdiv++) { + div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv); + if (!in_range(div, 1, 127)) + continue; + + actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div); + if (abs(actual_rate - rate) < rate_err) { + npdiv = pdiv - 3; + ndiv = div; + rate_err = abs(actual_rate - rate); + } + } + + if (rate_err == ULONG_MAX) + return -EINVAL; + + /* + * Clear old dividers. + * Bit 7 is reserved bit in all ClkDiv registers. + */ + val &= ~(GENMASK(9, 0) & ~BIT(7)); + + /* Set the new pdiv and div bits for the new clock rate */ + val |= (npdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | ndiv; + + ep93xx_clk_write(priv, clk->reg, val); + + return 0; +} + +static const struct clk_ops clk_ddiv_ops = { + .enable = ep93xx_clk_enable, + .disable = ep93xx_clk_disable, + .is_enabled = ep93xx_clk_is_enabled, + .get_parent = ep93xx_mux_get_parent, + .set_parent = ep93xx_mux_set_parent_lock, + .determine_rate = ep93xx_mux_determine_rate, + .recalc_rate = ep93xx_ddiv_recalc_rate, + .set_rate = ep93xx_ddiv_set_rate, +}; + +static int ep93xx_clk_register_ddiv(struct ep93xx_clk *clk, + const char *name, + struct clk_parent_data *parent_data, + u8 num_parents, + unsigned int reg, + u8 bit_idx) +{ + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + struct clk_init_data init = { }; + + init.name = name; + init.ops = &clk_ddiv_ops; + init.flags = 0; + init.parent_data = parent_data; + init.num_parents = num_parents; + + clk->reg = reg; + clk->bit_idx = bit_idx; + clk->hw.init = &init; + + return devm_clk_hw_register(priv->dev, &clk->hw); +} + +static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + u32 val; + u8 index; + + regmap_read(priv->map, clk->reg, &val); + index = (val & clk->mask) >> clk->shift; + if (index >= clk->num_div) + return 0; + + return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]); +} + +static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + unsigned long best = 0, now; + unsigned int i; + + for (i = 0; i < clk->num_div; i++) { + if ((rate * clk->div[i]) == *parent_rate) + return rate; + + now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]); + if (!best || is_best(rate, now, best)) + best = now; + } + + return best; +} + +static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ep93xx_clk *clk = ep93xx_clk_from(hw); + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + unsigned int i; + u32 val; + + regmap_read(priv->map, clk->reg, &val); + val &= ~clk->mask; + for (i = 0; i < clk->num_div; i++) + if (rate == DIV_ROUND_CLOSEST(parent_rate, clk->div[i])) + break; + + if (i == clk->num_div) + return -EINVAL; + + val |= i << clk->shift; + + ep93xx_clk_write(priv, clk->reg, val); + + return 0; +} + +static const struct clk_ops ep93xx_div_ops = { + .enable = ep93xx_clk_enable, + .disable = ep93xx_clk_disable, + .is_enabled = ep93xx_clk_is_enabled, + .recalc_rate = ep93xx_div_recalc_rate, + .round_rate = ep93xx_div_round_rate, + .set_rate = ep93xx_div_set_rate, +}; + +static int ep93xx_register_div(struct ep93xx_clk *clk, + const char *name, + const struct clk_parent_data *parent_data, + unsigned int reg, + u8 enable_bit, + u8 shift, + u8 width, + const char *clk_divisors, + u8 num_div) +{ + struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk); + struct clk_init_data init = { }; + + init.name = name; + init.ops = &ep93xx_div_ops; + init.flags = 0; + init.parent_data = parent_data; + init.num_parents = 1; + + clk->reg = reg; + clk->bit_idx = enable_bit; + clk->mask = GENMASK(shift + width - 1, shift); + clk->shift = shift; + clk->div = clk_divisors; + clk->num_div = num_div; + clk->hw.init = &init; + + return devm_clk_hw_register(priv->dev, &clk->hw); +} + +struct ep93xx_gate { + unsigned int idx; + unsigned int bit; + const char *name; +}; + +static const struct ep93xx_gate ep93xx_uarts[] = { + { EP93XX_CLK_UART1, EP93XX_SYSCON_DEVCFG_U1EN, "uart1" }, + { EP93XX_CLK_UART2, EP93XX_SYSCON_DEVCFG_U2EN, "uart2" }, + { EP93XX_CLK_UART3, EP93XX_SYSCON_DEVCFG_U3EN, "uart3" }, +}; + +static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv) +{ + struct clk_parent_data parent_data = { }; + unsigned int i, idx, ret, clk_uart_div; + struct ep93xx_clk *clk; + u32 val; + + regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val); + if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD) + clk_uart_div = 1; + else + clk_uart_div = 2; + + priv->fixed[EP93XX_CLK_UART] = + devm_clk_hw_register_fixed_factor_index(priv->dev, "uart", + 0, /* XTALI external clock */ + 0, 1, clk_uart_div); + parent_data.hw = priv->fixed[EP93XX_CLK_UART]; + + /* parenting uart gate clocks to uart clock */ + for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) { + idx = ep93xx_uarts[i].idx - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + ret = ep93xx_clk_register_gate(clk, + ep93xx_uarts[i].name, + &parent_data, CLK_SET_RATE_PARENT, + EP93XX_SYSCON_DEVCFG, + ep93xx_uarts[i].bit); + if (ret) + return dev_err_probe(priv->dev, ret, + "failed to register uart[%d] clock\n", i); + } + + return 0; +} + +static const struct ep93xx_gate ep93xx_dmas[] = { + { EP93XX_CLK_M2M0, EP93XX_SYSCON_PWRCNT_DMA_M2M0, "m2m0" }, + { EP93XX_CLK_M2M1, EP93XX_SYSCON_PWRCNT_DMA_M2M1, "m2m1" }, + { EP93XX_CLK_M2P0, EP93XX_SYSCON_PWRCNT_DMA_M2P0, "m2p0" }, + { EP93XX_CLK_M2P1, EP93XX_SYSCON_PWRCNT_DMA_M2P1, "m2p1" }, + { EP93XX_CLK_M2P2, EP93XX_SYSCON_PWRCNT_DMA_M2P2, "m2p2" }, + { EP93XX_CLK_M2P3, EP93XX_SYSCON_PWRCNT_DMA_M2P3, "m2p3" }, + { EP93XX_CLK_M2P4, EP93XX_SYSCON_PWRCNT_DMA_M2P4, "m2p4" }, + { EP93XX_CLK_M2P5, EP93XX_SYSCON_PWRCNT_DMA_M2P5, "m2p5" }, + { EP93XX_CLK_M2P6, EP93XX_SYSCON_PWRCNT_DMA_M2P6, "m2p6" }, + { EP93XX_CLK_M2P7, EP93XX_SYSCON_PWRCNT_DMA_M2P7, "m2p7" }, + { EP93XX_CLK_M2P8, EP93XX_SYSCON_PWRCNT_DMA_M2P8, "m2p8" }, + { EP93XX_CLK_M2P9, EP93XX_SYSCON_PWRCNT_DMA_M2P9, "m2p9" }, +}; + +static int ep93xx_dma_clock_init(struct ep93xx_clk_priv *priv) +{ + struct clk_parent_data parent_data = { }; + unsigned int i, idx; + + parent_data.hw = priv->fixed[EP93XX_CLK_HCLK]; + for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) { + idx = ep93xx_dmas[i].idx; + priv->fixed[idx] = devm_clk_hw_register_gate_parent_data(priv->dev, + ep93xx_dmas[i].name, + &parent_data, 0, + priv->base + EP93XX_SYSCON_PWRCNT, + ep93xx_dmas[i].bit, + 0, + &priv->lock); + if (IS_ERR(priv->fixed[idx])) + return PTR_ERR(priv->fixed[idx]); + } + + return 0; +} + +static struct clk_hw *of_clk_ep93xx_get(struct of_phandle_args *clkspec, void *data) +{ + struct ep93xx_clk_priv *priv = data; + unsigned int idx = clkspec->args[0]; + + if (idx < EP93XX_CLK_UART1) + return priv->fixed[idx]; + + if (idx <= EP93XX_CLK_I2S_LRCLK) + return &priv->reg[idx - EP93XX_CLK_UART1].hw; + + return ERR_PTR(-EINVAL); +} + +/* + * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS + */ +static unsigned long calc_pll_rate(u64 rate, u32 config_word) +{ + rate *= ((config_word >> 11) & GENMASK(4, 0)) + 1; /* X1FBD */ + rate *= ((config_word >> 5) & GENMASK(5, 0)) + 1; /* X2FBD */ + do_div(rate, (config_word & GENMASK(4, 0)) + 1); /* X2IPD */ + rate >>= (config_word >> 16) & GENMASK(1, 0); /* PS */ + + return rate; +} + +static int ep93xx_plls_init(struct ep93xx_clk_priv *priv) +{ + const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 }; + const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 }; + const char pclk_divisors[] = { 1, 2, 4, 8 }; + struct clk_parent_data xtali = { .index = 0 }; + unsigned int clk_f_div, clk_h_div, clk_p_div; + unsigned long clk_pll1_rate, clk_pll2_rate; + struct device *dev = priv->dev; + struct clk_hw *hw, *pll1; + u32 value; + + /* Determine the bootloader configured pll1 rate */ + regmap_read(priv->map, EP93XX_SYSCON_CLKSET1, &value); + + if (value & EP93XX_SYSCON_CLKSET1_NBYP1) + clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); + else + clk_pll1_rate = EP93XX_EXT_CLK_RATE; + + pll1 = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll1", &xtali, + 0, clk_pll1_rate); + if (IS_ERR(pll1)) + return PTR_ERR(pll1); + + priv->fixed[EP93XX_CLK_PLL1] = pll1; + + /* Initialize the pll1 derived clocks */ + clk_f_div = fclk_divisors[(value >> 25) & GENMASK(2, 0)]; + clk_h_div = hclk_divisors[(value >> 20) & GENMASK(2, 0)]; + clk_p_div = pclk_divisors[(value >> 18) & GENMASK(1, 0)]; + + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "fclk", pll1, 0, 1, clk_f_div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_FCLK] = hw; + + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "hclk", pll1, 0, 1, clk_h_div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_HCLK] = hw; + + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "pclk", hw, 0, 1, clk_p_div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_PCLK] = hw; + + /* Determine the bootloader configured pll2 rate */ + regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value); + if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2)) + clk_pll2_rate = EP93XX_EXT_CLK_RATE; + else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN) + clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value); + else + clk_pll2_rate = 0; + + hw = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll2", &xtali, + 0, clk_pll2_rate); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_PLL2] = hw; + + return 0; +} + +static int ep93xx_clk_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev); + struct clk_parent_data xtali = { .index = 0 }; + struct clk_parent_data ddiv_pdata[3] = { }; + unsigned int clk_spi_div, clk_usb_div; + struct clk_parent_data pdata = {}; + struct device *dev = &adev->dev; + struct ep93xx_clk_priv *priv; + struct ep93xx_clk *clk; + struct clk_hw *hw; + unsigned int idx; + int ret; + u32 value; + + priv = devm_kzalloc(dev, struct_size(priv, reg, 10), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->lock); + priv->dev = dev; + priv->aux_dev = rdev; + priv->map = rdev->map; + priv->base = rdev->base; + + ret = ep93xx_plls_init(priv); + if (ret) + return ret; + + regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value); + clk_usb_div = (value >> 28 & GENMASK(3, 0)) + 1; + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "usb_clk", + priv->fixed[EP93XX_CLK_PLL2], 0, 1, + clk_usb_div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_USB] = hw; + + ret = ep93xx_uart_clock_init(priv); + if (ret) + return ret; + + ret = ep93xx_dma_clock_init(priv); + if (ret) + return ret; + + clk_spi_div = id->driver_data; + hw = devm_clk_hw_register_fixed_factor_index(dev, "ep93xx-spi.0", + 0, /* XTALI external clock */ + 0, 1, clk_spi_div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_SPI] = hw; + + /* PWM clock */ + hw = devm_clk_hw_register_fixed_factor_index(dev, "pwm_clk", 0, /* XTALI external clock */ + 0, 1, 1); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_PWM] = hw; + + /* USB clock */ + pdata.hw = priv->fixed[EP93XX_CLK_USB]; + hw = devm_clk_hw_register_gate_parent_data(priv->dev, "ohci-platform", &pdata, + 0, priv->base + EP93XX_SYSCON_PWRCNT, + EP93XX_SYSCON_PWRCNT_USH_EN, 0, + &priv->lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->fixed[EP93XX_CLK_USB] = hw; + + ddiv_pdata[0].index = 0; /* XTALI external clock */ + ddiv_pdata[1].hw = priv->fixed[EP93XX_CLK_PLL1]; + ddiv_pdata[2].hw = priv->fixed[EP93XX_CLK_PLL2]; + + /* touchscreen/ADC clock */ + idx = EP93XX_CLK_ADC - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + ret = ep93xx_register_div(clk, "ep93xx-adc", &xtali, + EP93XX_SYSCON_KEYTCHCLKDIV, + EP93XX_SYSCON_KEYTCHCLKDIV_TSEN, + EP93XX_SYSCON_KEYTCHCLKDIV_ADIV, + 1, + ep93xx_adc_divisors, + ARRAY_SIZE(ep93xx_adc_divisors)); + + + /* keypad clock */ + idx = EP93XX_CLK_KEYPAD - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + ret = ep93xx_register_div(clk, "ep93xx-keypad", &xtali, + EP93XX_SYSCON_KEYTCHCLKDIV, + EP93XX_SYSCON_KEYTCHCLKDIV_KEN, + EP93XX_SYSCON_KEYTCHCLKDIV_KDIV, + 1, + ep93xx_adc_divisors, + ARRAY_SIZE(ep93xx_adc_divisors)); + + /* + * On reset PDIV and VDIV is set to zero, while PDIV zero + * means clock disable, VDIV shouldn't be zero. + * So we set both video and i2s dividers to minimum. + * ENA - Enable CLK divider. + * PDIV - 00 - Disable clock + * VDIV - at least 2 + */ + + /* Check and enable video clk registers */ + regmap_read(priv->map, EP93XX_SYSCON_VIDCLKDIV, &value); + value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; + ep93xx_clk_write(priv, EP93XX_SYSCON_VIDCLKDIV, value); + + /* Check and enable i2s clk registers */ + regmap_read(priv->map, EP93XX_SYSCON_I2SCLKDIV, &value); + value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2; + + /* + * Override the SAI_MSTR_CLK_CFG from the I2S block and use the + * I2SClkDiv Register settings. LRCLK transitions on the falling SCLK + * edge. + */ + value |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL; + ep93xx_clk_write(priv, EP93XX_SYSCON_I2SCLKDIV, value); + + /* video clk */ + idx = EP93XX_CLK_VIDEO - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + ret = ep93xx_clk_register_ddiv(clk, "ep93xx-fb", + ddiv_pdata, ARRAY_SIZE(ddiv_pdata), + EP93XX_SYSCON_VIDCLKDIV, + EP93XX_SYSCON_CLKDIV_ENABLE); + + /* i2s clk */ + idx = EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + ret = ep93xx_clk_register_ddiv(clk, "mclk", + ddiv_pdata, ARRAY_SIZE(ddiv_pdata), + EP93XX_SYSCON_I2SCLKDIV, + EP93XX_SYSCON_CLKDIV_ENABLE); + + /* i2s sclk */ + idx = EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + pdata.hw = &priv->reg[EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1].hw; + ret = ep93xx_register_div(clk, "sclk", &pdata, + EP93XX_SYSCON_I2SCLKDIV, + EP93XX_SYSCON_I2SCLKDIV_SENA, + 16, /* EP93XX_I2SCLKDIV_SDIV_SHIFT */ + 1, /* EP93XX_I2SCLKDIV_SDIV_WIDTH */ + ep93xx_sclk_divisors, + ARRAY_SIZE(ep93xx_sclk_divisors)); + + /* i2s lrclk */ + idx = EP93XX_CLK_I2S_LRCLK - EP93XX_CLK_UART1; + clk = &priv->reg[idx]; + clk->idx = idx; + pdata.hw = &priv->reg[EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1].hw; + ret = ep93xx_register_div(clk, "lrclk", &pdata, + EP93XX_SYSCON_I2SCLKDIV, + EP93XX_SYSCON_I2SCLKDIV_SENA, + 17, /* EP93XX_I2SCLKDIV_LRDIV32_SHIFT */ + 2, /* EP93XX_I2SCLKDIV_LRDIV32_WIDTH */ + ep93xx_lrclk_divisors, + ARRAY_SIZE(ep93xx_lrclk_divisors)); + + /* IrDa clk uses same pattern but no init code presents in original clock driver */ + return devm_of_clk_add_hw_provider(priv->dev, of_clk_ep93xx_get, priv); +} + +static const struct auxiliary_device_id ep93xx_clk_ids[] = { + { .name = "soc_ep93xx.clk-ep93xx", .driver_data = 2, }, + { .name = "soc_ep93xx.clk-ep93xx.e2", .driver_data = 1, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(auxiliary, ep93xx_clk_ids); + +static struct auxiliary_driver ep93xx_clk_driver = { + .probe = ep93xx_clk_probe, + .id_table = ep93xx_clk_ids, +}; +module_auxiliary_driver(ep93xx_clk_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nikita Shubin "); +MODULE_DESCRIPTION("Clock control for Cirrus EP93xx chips"); diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index fe0500a1af3ea8..8fba63fc70c554 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -405,7 +405,7 @@ static struct platform_driver of_fixed_factor_clk_driver = { .of_match_table = of_fixed_factor_clk_ids, }, .probe = of_fixed_factor_clk_probe, - .remove_new = of_fixed_factor_clk_remove, + .remove = of_fixed_factor_clk_remove, }; builtin_platform_driver(of_fixed_factor_clk_driver); #endif diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c index 0e08cb22c196a9..3bfcf4cd98a221 100644 --- a/drivers/clk/clk-fixed-mmio.c +++ b/drivers/clk/clk-fixed-mmio.c @@ -91,7 +91,7 @@ static struct platform_driver of_fixed_mmio_clk_driver = { .of_match_table = of_fixed_mmio_clk_ids, }, .probe = of_fixed_mmio_clk_probe, - .remove_new = of_fixed_mmio_clk_remove, + .remove = of_fixed_mmio_clk_remove, }; module_platform_driver(of_fixed_mmio_clk_driver); diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index 3481eb8cdeb3b9..6b4f76b9c4da96 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -232,7 +232,7 @@ static struct platform_driver of_fixed_clk_driver = { .of_match_table = of_fixed_clk_ids, }, .probe = of_fixed_clk_probe, - .remove_new = of_fixed_clk_remove, + .remove = of_fixed_clk_remove, }; builtin_platform_driver(of_fixed_clk_driver); #endif diff --git a/drivers/clk/clk-fixed-rate_test.c b/drivers/clk/clk-fixed-rate_test.c new file mode 100644 index 00000000000000..0e04c10a21aadb --- /dev/null +++ b/drivers/clk/clk-fixed-rate_test.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for clk fixed rate basic type + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "clk-fixed-rate_test.h" + +/** + * struct clk_hw_fixed_rate_kunit_params - Parameters to pass to __clk_hw_register_fixed_rate() + * @dev: device registering clk + * @np: device_node of device registering clk + * @name: name of clk + * @parent_name: parent name of clk + * @parent_hw: clk_hw pointer to parent of clk + * @parent_data: parent_data describing parent of clk + * @flags: clk framework flags + * @fixed_rate: frequency of clk + * @fixed_accuracy: accuracy of clk + * @clk_fixed_flags: fixed rate specific clk flags + */ +struct clk_hw_fixed_rate_kunit_params { + struct device *dev; + struct device_node *np; + const char *name; + const char *parent_name; + const struct clk_hw *parent_hw; + const struct clk_parent_data *parent_data; + unsigned long flags; + unsigned long fixed_rate; + unsigned long fixed_accuracy; + unsigned long clk_fixed_flags; +}; + +static int +clk_hw_register_fixed_rate_kunit_init(struct kunit_resource *res, void *context) +{ + struct clk_hw_fixed_rate_kunit_params *params = context; + struct clk_hw *hw; + + hw = __clk_hw_register_fixed_rate(params->dev, params->np, + params->name, + params->parent_name, + params->parent_hw, + params->parent_data, + params->flags, + params->fixed_rate, + params->fixed_accuracy, + params->clk_fixed_flags, + false); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + res->data = hw; + + return 0; +} + +static void clk_hw_register_fixed_rate_kunit_exit(struct kunit_resource *res) +{ + struct clk_hw *hw = res->data; + + clk_hw_unregister_fixed_rate(hw); +} + +/** + * clk_hw_register_fixed_rate_kunit() - Test managed __clk_hw_register_fixed_rate() + * @test: The test context + * @params: Arguments to __clk_hw_register_fixed_rate() + * + * Return: Registered fixed rate clk_hw or ERR_PTR on failure + */ +static struct clk_hw * +clk_hw_register_fixed_rate_kunit(struct kunit *test, + struct clk_hw_fixed_rate_kunit_params *params) +{ + struct clk_hw *hw; + + hw = kunit_alloc_resource(test, + clk_hw_register_fixed_rate_kunit_init, + clk_hw_register_fixed_rate_kunit_exit, + GFP_KERNEL, params); + if (!hw) + return ERR_PTR(-EINVAL); + + return hw; +} + +/** + * clk_hw_unregister_fixed_rate_kunit() - Test managed clk_hw_unregister_fixed_rate() + * @test: The test context + * @hw: fixed rate clk to unregister upon test completion + * + * Automatically unregister @hw when @test is complete via + * clk_hw_unregister_fixed_rate(). + * + * Return: 0 on success or negative errno on failure + */ +static int clk_hw_unregister_fixed_rate_kunit(struct kunit *test, struct clk_hw *hw) +{ + if (!kunit_alloc_resource(test, NULL, + clk_hw_register_fixed_rate_kunit_exit, + GFP_KERNEL, hw)) + return -ENOMEM; + + return 0; +} + +/* + * Test that clk_get_rate() on a fixed rate clk registered with + * clk_hw_register_fixed_rate() gets the proper frequency. + */ +static void clk_fixed_rate_rate_test(struct kunit *test) +{ + struct clk_hw *hw; + struct clk *clk; + const unsigned long fixed_rate = 230000; + + hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", NULL, 0, fixed_rate); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); + KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw)); + + clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_EXPECT_EQ(test, fixed_rate, clk_get_rate(clk)); +} + +/* + * Test that clk_get_accuracy() on a fixed rate clk registered via + * clk_hw_register_fixed_rate_with_accuracy() gets the proper accuracy. + */ +static void clk_fixed_rate_accuracy_test(struct kunit *test) +{ + struct clk_hw *hw; + struct clk *clk; + const unsigned long fixed_accuracy = 5000; + + hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate", + NULL, 0, 0, + fixed_accuracy); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); + KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw)); + + clk = clk_hw_get_clk_kunit(test, hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_EXPECT_EQ(test, fixed_accuracy, clk_get_accuracy(clk)); +} + +/* Test suite for a fixed rate clk without any parent */ +static struct kunit_case clk_fixed_rate_test_cases[] = { + KUNIT_CASE(clk_fixed_rate_rate_test), + KUNIT_CASE(clk_fixed_rate_accuracy_test), + {} +}; + +static struct kunit_suite clk_fixed_rate_suite = { + .name = "clk_fixed_rate", + .test_cases = clk_fixed_rate_test_cases, +}; + +/* + * Test that clk_get_parent() on a fixed rate clk gets the proper parent. + */ +static void clk_fixed_rate_parent_test(struct kunit *test) +{ + struct clk_hw *hw, *parent_hw; + struct clk *expected_parent, *actual_parent; + struct clk *clk; + const char *parent_name = "test-fixed-rate-parent"; + struct clk_hw_fixed_rate_kunit_params parent_params = { + .name = parent_name, + }; + + parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw); + KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw)); + + expected_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent); + + hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); + KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw)); + + clk = clk_hw_get_clk_kunit(test, hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + actual_parent = clk_get_parent(clk); + KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent)); +} + +/* + * Test that clk_get_rate() on a fixed rate clk ignores the parent rate. + */ +static void clk_fixed_rate_parent_rate_test(struct kunit *test) +{ + struct clk_hw *hw, *parent_hw; + struct clk *clk; + const unsigned long expected_rate = 1405; + const unsigned long parent_rate = 90402; + const char *parent_name = "test-fixed-rate-parent"; + struct clk_hw_fixed_rate_kunit_params parent_params = { + .name = parent_name, + .fixed_rate = parent_rate, + }; + + parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw); + KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw)); + + hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0, + expected_rate); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); + KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw)); + + clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_EXPECT_EQ(test, expected_rate, clk_get_rate(clk)); +} + +/* + * Test that clk_get_accuracy() on a fixed rate clk ignores the parent accuracy. + */ +static void clk_fixed_rate_parent_accuracy_test(struct kunit *test) +{ + struct clk_hw *hw, *parent_hw; + struct clk *clk; + const unsigned long expected_accuracy = 900; + const unsigned long parent_accuracy = 24000; + const char *parent_name = "test-fixed-rate-parent"; + struct clk_hw_fixed_rate_kunit_params parent_params = { + .name = parent_name, + .fixed_accuracy = parent_accuracy, + }; + + parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw); + KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw)); + + hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate", + parent_name, 0, 0, + expected_accuracy); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw); + KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw)); + + clk = clk_hw_get_clk_kunit(test, hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_EXPECT_EQ(test, expected_accuracy, clk_get_accuracy(clk)); +} + +/* Test suite for a fixed rate clk with a parent */ +static struct kunit_case clk_fixed_rate_parent_test_cases[] = { + KUNIT_CASE(clk_fixed_rate_parent_test), + KUNIT_CASE(clk_fixed_rate_parent_rate_test), + KUNIT_CASE(clk_fixed_rate_parent_accuracy_test), + {} +}; + +static struct kunit_suite clk_fixed_rate_parent_suite = { + .name = "clk_fixed_rate_parent", + .test_cases = clk_fixed_rate_parent_test_cases, +}; + +struct clk_fixed_rate_of_test_context { + struct device *dev; + struct platform_driver pdrv; + struct completion probed; +}; + +static inline struct clk_fixed_rate_of_test_context * +pdev_to_clk_fixed_rate_of_test_context(struct platform_device *pdev) +{ + return container_of(to_platform_driver(pdev->dev.driver), + struct clk_fixed_rate_of_test_context, + pdrv); +} + +/* + * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper + * rate. + */ +static void clk_fixed_rate_of_probe_test(struct kunit *test) +{ + struct clk_fixed_rate_of_test_context *ctx = test->priv; + struct device *dev = ctx->dev; + struct clk *clk; + + clk = clk_get_kunit(test, dev, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_ASSERT_EQ(test, 0, clk_prepare_enable_kunit(test, clk)); + KUNIT_EXPECT_EQ(test, TEST_FIXED_FREQUENCY, clk_get_rate(clk)); +} + +/* + * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper + * accuracy. + */ +static void clk_fixed_rate_of_accuracy_test(struct kunit *test) +{ + struct clk_fixed_rate_of_test_context *ctx = test->priv; + struct device *dev = ctx->dev; + struct clk *clk; + + clk = clk_get_kunit(test, dev, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + KUNIT_EXPECT_EQ(test, TEST_FIXED_ACCURACY, clk_get_accuracy(clk)); +} + +static struct kunit_case clk_fixed_rate_of_cases[] = { + KUNIT_CASE(clk_fixed_rate_of_probe_test), + KUNIT_CASE(clk_fixed_rate_of_accuracy_test), + {} +}; + +static int clk_fixed_rate_of_test_probe(struct platform_device *pdev) +{ + struct clk_fixed_rate_of_test_context *ctx; + + ctx = pdev_to_clk_fixed_rate_of_test_context(pdev); + ctx->dev = &pdev->dev; + complete(&ctx->probed); + + return 0; +} + +static int clk_fixed_rate_of_init(struct kunit *test) +{ + struct clk_fixed_rate_of_test_context *ctx; + static const struct of_device_id match_table[] = { + { .compatible = "test,single-clk-consumer" }, + { } + }; + + KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_fixed_rate_test)); + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + test->priv = ctx; + + ctx->pdrv.probe = clk_fixed_rate_of_test_probe; + ctx->pdrv.driver.of_match_table = match_table; + ctx->pdrv.driver.name = __func__; + ctx->pdrv.driver.owner = THIS_MODULE; + init_completion(&ctx->probed); + + KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv)); + KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&ctx->probed, HZ)); + + return 0; +} + +static struct kunit_suite clk_fixed_rate_of_suite = { + .name = "clk_fixed_rate_of", + .init = clk_fixed_rate_of_init, + .test_cases = clk_fixed_rate_of_cases, +}; + +kunit_test_suites( + &clk_fixed_rate_suite, + &clk_fixed_rate_of_suite, + &clk_fixed_rate_parent_suite, +); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit test for clk fixed rate basic type"); diff --git a/drivers/clk/clk-fixed-rate_test.h b/drivers/clk/clk-fixed-rate_test.h new file mode 100644 index 00000000000000..e0d28e5b608176 --- /dev/null +++ b/drivers/clk/clk-fixed-rate_test.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CLK_FIXED_RATE_TEST_H +#define _CLK_FIXED_RATE_TEST_H + +#define TEST_FIXED_FREQUENCY 50000000 +#define TEST_FIXED_ACCURACY 300 + +#endif diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c index 99b271c1278a9e..c997e74919961b 100644 --- a/drivers/clk/clk-lmk04832.c +++ b/drivers/clk/clk-lmk04832.c @@ -1405,16 +1405,12 @@ static int lmk04832_probe(struct spi_device *spi) lmk->dev = &spi->dev; - lmk->oscin = devm_clk_get(lmk->dev, "oscin"); + lmk->oscin = devm_clk_get_enabled(lmk->dev, "oscin"); if (IS_ERR(lmk->oscin)) { dev_err(lmk->dev, "failed to get oscin clock\n"); return PTR_ERR(lmk->oscin); } - ret = clk_prepare_enable(lmk->oscin); - if (ret) - return ret; - lmk->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW); @@ -1422,14 +1418,14 @@ static int lmk04832_probe(struct spi_device *spi) sizeof(struct lmk_dclk), GFP_KERNEL); if (!lmk->dclk) { ret = -ENOMEM; - goto err_disable_oscin; + return ret; } lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels, sizeof(*lmk->clkout), GFP_KERNEL); if (!lmk->clkout) { ret = -ENOMEM; - goto err_disable_oscin; + return ret; } lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws, @@ -1437,7 +1433,7 @@ static int lmk04832_probe(struct spi_device *spi) GFP_KERNEL); if (!lmk->clk_data) { ret = -ENOMEM; - goto err_disable_oscin; + return ret; } device_property_read_u32(lmk->dev, "ti,vco-hz", &lmk->vco_rate); @@ -1465,7 +1461,7 @@ static int lmk04832_probe(struct spi_device *spi) dev_err(lmk->dev, "missing reg property in child: %s\n", child->full_name); of_node_put(child); - goto err_disable_oscin; + return ret; } of_property_read_u32(child, "ti,clkout-fmt", @@ -1486,7 +1482,7 @@ static int lmk04832_probe(struct spi_device *spi) __func__, PTR_ERR(lmk->regmap)); ret = PTR_ERR(lmk->regmap); - goto err_disable_oscin; + return ret; } regmap_write(lmk->regmap, LMK04832_REG_RST3W, LMK04832_BIT_RESET); @@ -1496,7 +1492,7 @@ static int lmk04832_probe(struct spi_device *spi) &rdbk_pin); ret = lmk04832_set_spi_rdbk(lmk, rdbk_pin); if (ret) - goto err_disable_oscin; + return ret; } regmap_bulk_read(lmk->regmap, LMK04832_REG_ID_PROD_MSB, &tmp, 3); @@ -1504,13 +1500,13 @@ static int lmk04832_probe(struct spi_device *spi) dev_err(lmk->dev, "unsupported device type: pid 0x%04x, maskrev 0x%02x\n", tmp[0] << 8 | tmp[1], tmp[2]); ret = -EINVAL; - goto err_disable_oscin; + return ret; } ret = lmk04832_register_vco(lmk); if (ret) { dev_err(lmk->dev, "failed to init device clock path\n"); - goto err_disable_oscin; + return ret; } if (lmk->vco_rate) { @@ -1518,21 +1514,21 @@ static int lmk04832_probe(struct spi_device *spi) ret = clk_set_rate(lmk->vco.clk, lmk->vco_rate); if (ret) { dev_err(lmk->dev, "failed to set VCO rate\n"); - goto err_disable_oscin; + return ret; } } ret = lmk04832_register_sclk(lmk); if (ret) { dev_err(lmk->dev, "failed to init SYNC/SYSREF clock path\n"); - goto err_disable_oscin; + return ret; } for (i = 0; i < info->num_channels; i++) { ret = lmk04832_register_clkout(lmk, i); if (ret) { dev_err(lmk->dev, "failed to register clk %d\n", i); - goto err_disable_oscin; + return ret; } } @@ -1541,24 +1537,12 @@ static int lmk04832_probe(struct spi_device *spi) lmk->clk_data); if (ret) { dev_err(lmk->dev, "failed to add provider (%d)\n", ret); - goto err_disable_oscin; + return ret; } spi_set_drvdata(spi, lmk); return 0; - -err_disable_oscin: - clk_disable_unprepare(lmk->oscin); - - return ret; -} - -static void lmk04832_remove(struct spi_device *spi) -{ - struct lmk04832 *lmk = spi_get_drvdata(spi); - - clk_disable_unprepare(lmk->oscin); } static const struct spi_device_id lmk04832_id[] = { @@ -1579,7 +1563,6 @@ static struct spi_driver lmk04832_driver = { .of_match_table = lmk04832_of_id, }, .probe = lmk04832_probe, - .remove = lmk04832_remove, .id_table = lmk04832_id, }; module_spi_driver(lmk04832_driver); diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c index 5efb10776ae5fc..39049f62dbbb30 100644 --- a/drivers/clk/clk-palmas.c +++ b/drivers/clk/clk-palmas.c @@ -281,7 +281,7 @@ static struct platform_driver palmas_clks_driver = { .of_match_table = palmas_clks_of_match, }, .probe = palmas_clks_probe, - .remove_new = palmas_clks_remove, + .remove = palmas_clks_remove, }; module_platform_driver(palmas_clks_driver); diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c index 3dd2b83d0404a9..bd4f21c2200462 100644 --- a/drivers/clk/clk-pwm.c +++ b/drivers/clk/clk-pwm.c @@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids); static struct platform_driver clk_pwm_driver = { .probe = clk_pwm_probe, - .remove_new = clk_pwm_remove, + .remove = clk_pwm_remove, .driver = { .name = "pwm-clock", .of_match_table = clk_pwm_dt_ids, diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 38c456540d1b98..014db638662407 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -263,7 +263,7 @@ static struct platform_driver s2mps11_clk_driver = { .name = "s2mps11-clk", }, .probe = s2mps11_clk_probe, - .remove_new = s2mps11_clk_remove, + .remove = s2mps11_clk_remove, .id_table = s2mps11_clk_id, }; module_platform_driver(s2mps11_clk_driver); diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index d86a02563f6c8c..15510c2ff21c03 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -156,13 +156,13 @@ static void scmi_clk_atomic_disable(struct clk_hw *hw) scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC); } -static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) +static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic) { int ret; bool enabled = false; struct scmi_clk *clk = to_scmi_clk(hw); - ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC); + ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic); if (ret) dev_warn(clk->dev, "Failed to get state for clock ID %d\n", clk->id); @@ -170,6 +170,16 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) return !!enabled; } +static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) +{ + return __scmi_clk_is_enabled(hw, ATOMIC); +} + +static int scmi_clk_is_enabled(struct clk_hw *hw) +{ + return __scmi_clk_is_enabled(hw, NOT_ATOMIC); +} + static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) { int ret; @@ -285,6 +295,8 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key) if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) ops->is_enabled = scmi_clk_atomic_is_enabled; + else + ops->is_prepared = scmi_clk_is_enabled; /* Rate ops */ ops->recalc_rate = scmi_clk_recalc_rate; diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 108b697bd317dc..19d530d52e647b 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -303,7 +303,7 @@ static struct platform_driver scpi_clocks_driver = { .of_match_table = scpi_clocks_ids, }, .probe = scpi_clocks_probe, - .remove_new = scpi_clocks_remove, + .remove = scpi_clocks_remove, }; module_platform_driver(scpi_clocks_driver); diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 6e8dd7387cfdd4..5004888c7eca5f 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #define SI5341_NUM_INPUTS 4 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 285ed1ad8a3780..d02451f951cf05 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4762,7 +4762,7 @@ void __clk_put(struct clk *clk) clk->exclusive_count = 0; } - hlist_del(&clk->clks_node); + clk_core_unlink_consumer(clk); /* If we had any boundaries on that clock, let's drop them. */ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) @@ -5232,7 +5232,7 @@ static int of_parse_clkspec(const struct device_node *np, int index, * clocks. */ np = np->parent; - if (np && !of_get_property(np, "clock-ranges", NULL)) + if (np && !of_property_present(np, "clock-ranges")) break; index = 0; } diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c new file mode 100644 index 00000000000000..52fd25594c9607 --- /dev/null +++ b/drivers/clk/clk_kunit_helpers.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit helpers for clk providers and consumers + */ +#include +#include +#include +#include +#include + +#include +#include + +KUNIT_DEFINE_ACTION_WRAPPER(clk_disable_unprepare_wrapper, + clk_disable_unprepare, struct clk *); +/** + * clk_prepare_enable_kunit() - Test managed clk_prepare_enable() + * @test: The test context + * @clk: clk to prepare and enable + * + * Return: 0 on success, or negative errno on failure. + */ +int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk) +{ + int ret; + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + return kunit_add_action_or_reset(test, clk_disable_unprepare_wrapper, + clk); +} +EXPORT_SYMBOL_GPL(clk_prepare_enable_kunit); + +KUNIT_DEFINE_ACTION_WRAPPER(clk_put_wrapper, clk_put, struct clk *); + +static struct clk *__clk_get_kunit(struct kunit *test, struct clk *clk) +{ + int ret; + + if (IS_ERR(clk)) + return clk; + + ret = kunit_add_action_or_reset(test, clk_put_wrapper, clk); + if (ret) + return ERR_PTR(ret); + + return clk; +} + +/** + * clk_get_kunit() - Test managed clk_get() + * @test: The test context + * @dev: device for clock "consumer" + * @con_id: clock consumer ID + * + * Just like clk_get(), except the clk is managed by the test case and is + * automatically put with clk_put() after the test case concludes. + * + * Return: new clk consumer or ERR_PTR on failure. + */ +struct clk * +clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + + return __clk_get_kunit(test, clk); +} +EXPORT_SYMBOL_GPL(clk_get_kunit); + +/** + * of_clk_get_kunit() - Test managed of_clk_get() + * @test: The test context + * @np: device_node for clock "consumer" + * @index: index in 'clocks' property of @np + * + * Just like of_clk_get(), except the clk is managed by the test case and is + * automatically put with clk_put() after the test case concludes. + * + * Return: new clk consumer or ERR_PTR on failure. + */ +struct clk * +of_clk_get_kunit(struct kunit *test, struct device_node *np, int index) +{ + struct clk *clk; + + clk = of_clk_get(np, index); + + return __clk_get_kunit(test, clk); +} +EXPORT_SYMBOL_GPL(of_clk_get_kunit); + +/** + * clk_hw_get_clk_kunit() - Test managed clk_hw_get_clk() + * @test: The test context + * @hw: clk_hw associated with the clk being consumed + * @con_id: connection ID string on device + * + * Just like clk_hw_get_clk(), except the clk is managed by the test case and + * is automatically put with clk_put() after the test case concludes. + * + * Return: new clk consumer or ERR_PTR on failure. + */ +struct clk * +clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id) +{ + struct clk *clk; + + clk = clk_hw_get_clk(hw, con_id); + + return __clk_get_kunit(test, clk); +} +EXPORT_SYMBOL_GPL(clk_hw_get_clk_kunit); + +/** + * clk_hw_get_clk_prepared_enabled_kunit() - Test managed clk_hw_get_clk() + clk_prepare_enable() + * @test: The test context + * @hw: clk_hw associated with the clk being consumed + * @con_id: connection ID string on device + * + * Just like + * + * .. code-block:: c + * + * struct clk *clk = clk_hw_get_clk(...); + * clk_prepare_enable(clk); + * + * except the clk is managed by the test case and is automatically disabled and + * unprepared with clk_disable_unprepare() and put with clk_put() after the + * test case concludes. + * + * Return: new clk consumer that is prepared and enabled or ERR_PTR on failure. + */ +struct clk * +clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw, + const char *con_id) +{ + int ret; + struct clk *clk; + + clk = clk_hw_get_clk_kunit(test, hw, con_id); + if (IS_ERR(clk)) + return clk; + + ret = clk_prepare_enable_kunit(test, clk); + if (ret) + return ERR_PTR(ret); + + return clk; +} +EXPORT_SYMBOL_GPL(clk_hw_get_clk_prepared_enabled_kunit); + +KUNIT_DEFINE_ACTION_WRAPPER(clk_hw_unregister_wrapper, + clk_hw_unregister, struct clk_hw *); + +/** + * clk_hw_register_kunit() - Test managed clk_hw_register() + * @test: The test context + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * Just like clk_hw_register(), except the clk registration is managed by the + * test case and is automatically unregistered after the test case concludes. + * + * Return: 0 on success or a negative errno value on failure. + */ +int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw) +{ + int ret; + + ret = clk_hw_register(dev, hw); + if (ret) + return ret; + + return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw); +} +EXPORT_SYMBOL_GPL(clk_hw_register_kunit); + +/** + * of_clk_hw_register_kunit() - Test managed of_clk_hw_register() + * @test: The test context + * @node: device_node of device that is registering this clock + * @hw: link to hardware-specific clock data + * + * Just like of_clk_hw_register(), except the clk registration is managed by + * the test case and is automatically unregistered after the test case + * concludes. + * + * Return: 0 on success or a negative errno value on failure. + */ +int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw) +{ + int ret; + + ret = of_clk_hw_register(node, hw); + if (ret) + return ret; + + return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw); +} +EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers"); diff --git a/drivers/clk/clk_parent_data_test.h b/drivers/clk/clk_parent_data_test.h new file mode 100644 index 00000000000000..eedd53ae910d8e --- /dev/null +++ b/drivers/clk/clk_parent_data_test.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CLK_PARENT_DATA_TEST_H +#define _CLK_PARENT_DATA_TEST_H + +#define CLK_PARENT_DATA_1MHZ_NAME "1mhz_fixed_legacy" +#define CLK_PARENT_DATA_PARENT1 "parent_fwname" +#define CLK_PARENT_DATA_PARENT2 "50" +#define CLK_PARENT_DATA_50MHZ_NAME "50_clk" + +#endif diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c index fbbea66d9cba26..41fc8eba341848 100644 --- a/drivers/clk/clk_test.c +++ b/drivers/clk/clk_test.c @@ -4,12 +4,19 @@ */ #include #include +#include +#include /* Needed for clk_hw_get_clk() */ #include "clk.h" +#include +#include +#include #include +#include "clk_parent_data_test.h" + static const struct clk_ops empty_clk_ops = { }; #define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000) @@ -2659,6 +2666,448 @@ static struct kunit_suite clk_mux_no_reparent_test_suite = { .test_cases = clk_mux_no_reparent_test_cases, }; +struct clk_register_clk_parent_data_test_case { + const char *desc; + struct clk_parent_data pdata; +}; + +static void +clk_register_clk_parent_data_test_case_to_desc( + const struct clk_register_clk_parent_data_test_case *t, char *desc) +{ + strcpy(desc, t->desc); +} + +static const struct clk_register_clk_parent_data_test_case +clk_register_clk_parent_data_of_cases[] = { + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct clk_parent_data::index. + */ + .desc = "clk_parent_data_of_index_test", + .pdata.index = 0, + }, + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct clk_parent_data::fwname. + */ + .desc = "clk_parent_data_of_fwname_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT1, + }, + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct clk_parent_data::name. + */ + .desc = "clk_parent_data_of_name_test", + /* The index must be negative to indicate firmware not used */ + .pdata.index = -1, + .pdata.name = CLK_PARENT_DATA_1MHZ_NAME, + }, + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct + * clk_parent_data::{fw_name,name}. + */ + .desc = "clk_parent_data_of_fwname_name_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT1, + .pdata.name = "not_matching", + }, + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct clk_parent_data::{index,name}. + * Index takes priority. + */ + .desc = "clk_parent_data_of_index_name_priority_test", + .pdata.index = 0, + .pdata.name = "not_matching", + }, + { + /* + * Test that a clk registered with a struct device_node can + * find a parent based on struct + * clk_parent_data::{index,fwname,name}. The fw_name takes + * priority over index and name. + */ + .desc = "clk_parent_data_of_index_fwname_name_priority_test", + .pdata.index = 1, + .pdata.fw_name = CLK_PARENT_DATA_PARENT1, + .pdata.name = "not_matching", + }, +}; + +KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases, + clk_register_clk_parent_data_test_case_to_desc) + +/** + * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests + * @np: device node of clk under test + * @hw: clk_hw for clk under test + */ +struct clk_register_clk_parent_data_of_ctx { + struct device_node *np; + struct clk_hw hw; +}; + +static int clk_register_clk_parent_data_of_test_init(struct kunit *test) +{ + struct clk_register_clk_parent_data_of_ctx *ctx; + + KUNIT_ASSERT_EQ(test, 0, + of_overlay_apply_kunit(test, kunit_clk_parent_data_test)); + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data"); + if (!ctx->np) + return -ENODEV; + + of_node_put_kunit(test, ctx->np); + + return 0; +} + +/* + * Test that a clk registered with a struct device_node can find a parent based on + * struct clk_parent_data when the hw member isn't set. + */ +static void clk_register_clk_parent_data_of_test(struct kunit *test) +{ + struct clk_register_clk_parent_data_of_ctx *ctx = test->priv; + struct clk_hw *parent_hw; + const struct clk_register_clk_parent_data_test_case *test_param; + struct clk_init_data init = { }; + struct clk *expected_parent, *actual_parent; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np); + + expected_parent = of_clk_get_kunit(test, ctx->np, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent); + + test_param = test->param_value; + init.parent_data = &test_param->pdata; + init.num_parents = 1; + init.name = "parent_data_of_test_clk"; + init.ops = &clk_dummy_single_parent_ops; + ctx->hw.init = &init; + KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw)); + + parent_hw = clk_hw_get_parent(&ctx->hw); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw); + + actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent); + + KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent)); +} + +static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = { + KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test, + clk_register_clk_parent_data_of_test_gen_params), + {} +}; + +/* + * Test suite for registering clks with struct clk_parent_data and a struct + * device_node. + */ +static struct kunit_suite clk_register_clk_parent_data_of_suite = { + .name = "clk_register_clk_parent_data_of", + .init = clk_register_clk_parent_data_of_test_init, + .test_cases = clk_register_clk_parent_data_of_test_cases, +}; + +/** + * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests + * @dev: device of clk under test + * @hw: clk_hw for clk under test + * @pdrv: driver to attach to find @dev + */ +struct clk_register_clk_parent_data_device_ctx { + struct device *dev; + struct clk_hw hw; + struct platform_driver pdrv; +}; + +static inline struct clk_register_clk_parent_data_device_ctx * +clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev) +{ + return container_of(to_platform_driver(pdev->dev.driver), + struct clk_register_clk_parent_data_device_ctx, pdrv); +} + +static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev) +{ + struct clk_register_clk_parent_data_device_ctx *ctx; + + ctx = clk_register_clk_parent_data_driver_to_test_context(pdev); + ctx->dev = &pdev->dev; + + return 0; +} + +static void clk_register_clk_parent_data_device_driver(struct kunit *test) +{ + struct clk_register_clk_parent_data_device_ctx *ctx = test->priv; + static const struct of_device_id match_table[] = { + { .compatible = "test,clk-parent-data" }, + { } + }; + + ctx->pdrv.probe = clk_register_clk_parent_data_device_probe; + ctx->pdrv.driver.of_match_table = match_table; + ctx->pdrv.driver.name = __func__; + ctx->pdrv.driver.owner = THIS_MODULE; + + KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev); +} + +static const struct clk_register_clk_parent_data_test_case +clk_register_clk_parent_data_device_cases[] = { + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::index. + */ + .desc = "clk_parent_data_device_index_test", + .pdata.index = 1, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::fwname. + */ + .desc = "clk_parent_data_device_fwname_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::name. + */ + .desc = "clk_parent_data_device_name_test", + /* The index must be negative to indicate firmware not used */ + .pdata.index = -1, + .pdata.name = CLK_PARENT_DATA_50MHZ_NAME, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::{fw_name,name}. + */ + .desc = "clk_parent_data_device_fwname_name_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + .pdata.name = "not_matching", + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::{index,name}. Index + * takes priority. + */ + .desc = "clk_parent_data_device_index_name_priority_test", + .pdata.index = 1, + .pdata.name = "not_matching", + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::{index,fwname,name}. + * The fw_name takes priority over index and name. + */ + .desc = "clk_parent_data_device_index_fwname_name_priority_test", + .pdata.index = 0, + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + .pdata.name = "not_matching", + }, +}; + +KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test, + clk_register_clk_parent_data_device_cases, + clk_register_clk_parent_data_test_case_to_desc) + +/* + * Test that a clk registered with a struct device can find a parent based on + * struct clk_parent_data when the hw member isn't set. + */ +static void clk_register_clk_parent_data_device_test(struct kunit *test) +{ + struct clk_register_clk_parent_data_device_ctx *ctx; + const struct clk_register_clk_parent_data_test_case *test_param; + struct clk_hw *parent_hw; + struct clk_init_data init = { }; + struct clk *expected_parent, *actual_parent; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + test->priv = ctx; + + clk_register_clk_parent_data_device_driver(test); + + expected_parent = clk_get_kunit(test, ctx->dev, "50"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent); + + test_param = test->param_value; + init.parent_data = &test_param->pdata; + init.num_parents = 1; + init.name = "parent_data_device_test_clk"; + init.ops = &clk_dummy_single_parent_ops; + ctx->hw.init = &init; + KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw)); + + parent_hw = clk_hw_get_parent(&ctx->hw); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw); + + actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent); + + KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent)); +} + +static const struct clk_register_clk_parent_data_test_case +clk_register_clk_parent_data_device_hw_cases[] = { + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw. + */ + .desc = "clk_parent_data_device_hw_index_test", + /* The index must be negative to indicate firmware not used */ + .pdata.index = -1, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw when + * struct clk_parent_data::fw_name is set. + */ + .desc = "clk_parent_data_device_hw_fwname_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw when struct + * clk_parent_data::name is set. + */ + .desc = "clk_parent_data_device_hw_name_test", + /* The index must be negative to indicate firmware not used */ + .pdata.index = -1, + .pdata.name = CLK_PARENT_DATA_50MHZ_NAME, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw when struct + * clk_parent_data::{fw_name,name} are set. + */ + .desc = "clk_parent_data_device_hw_fwname_name_test", + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + .pdata.name = "not_matching", + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw when struct + * clk_parent_data::index is set. The hw pointer takes + * priority. + */ + .desc = "clk_parent_data_device_hw_index_priority_test", + .pdata.index = 0, + }, + { + /* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw when + * struct clk_parent_data::{index,fwname,name} are set. + * The hw pointer takes priority over everything else. + */ + .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test", + .pdata.index = 0, + .pdata.fw_name = CLK_PARENT_DATA_PARENT2, + .pdata.name = "not_matching", + }, +}; + +KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test, + clk_register_clk_parent_data_device_hw_cases, + clk_register_clk_parent_data_test_case_to_desc) + +/* + * Test that a clk registered with a struct device can find a + * parent based on struct clk_parent_data::hw. + */ +static void clk_register_clk_parent_data_device_hw_test(struct kunit *test) +{ + struct clk_register_clk_parent_data_device_ctx *ctx; + const struct clk_register_clk_parent_data_test_case *test_param; + struct clk_dummy_context *parent; + struct clk_hw *parent_hw; + struct clk_parent_data pdata = { }; + struct clk_init_data init = { }; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + test->priv = ctx; + + clk_register_clk_parent_data_device_driver(test); + + parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + parent_hw = &parent->hw; + parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk", + &clk_dummy_rate_ops, 0); + + KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw)); + + test_param = test->param_value; + memcpy(&pdata, &test_param->pdata, sizeof(pdata)); + pdata.hw = parent_hw; + init.parent_data = &pdata; + init.num_parents = 1; + init.ops = &clk_dummy_single_parent_ops; + init.name = "parent_data_device_hw_test_clk"; + ctx->hw.init = &init; + KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw)); + + KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw)); +} + +static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = { + KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test, + clk_register_clk_parent_data_device_test_gen_params), + KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test, + clk_register_clk_parent_data_device_hw_test_gen_params), + {} +}; + +static int clk_register_clk_parent_data_device_init(struct kunit *test) +{ + KUNIT_ASSERT_EQ(test, 0, + of_overlay_apply_kunit(test, kunit_clk_parent_data_test)); + + return 0; +} + +/* + * Test suite for registering clks with struct clk_parent_data and a struct + * device. + */ +static struct kunit_suite clk_register_clk_parent_data_device_suite = { + .name = "clk_register_clk_parent_data_device", + .init = clk_register_clk_parent_data_device_init, + .test_cases = clk_register_clk_parent_data_device_test_cases, +}; + kunit_test_suites( &clk_leaf_mux_set_rate_parent_test_suite, &clk_test_suite, @@ -2671,8 +3120,10 @@ kunit_test_suites( &clk_range_test_suite, &clk_range_maximize_test_suite, &clk_range_minimize_test_suite, + &clk_register_clk_parent_data_of_suite, + &clk_register_clk_parent_data_device_suite, &clk_single_parent_mux_test_suite, - &clk_uncached_test_suite + &clk_uncached_test_suite, ); MODULE_DESCRIPTION("Kunit tests for clk framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index ec60ecb517f1f9..a5109fe8b16e9b 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -513,8 +513,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev, fck_clk = devm_clk_get(dev, "fck"); if (IS_ERR(fck_clk)) { - dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n"); - return ERR_CAST(fck_clk); + return dev_err_cast_probe(dev, fck_clk, "Missing fck clock\n"); } usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL); @@ -749,11 +748,9 @@ static int da8xx_cfgchip_probe(struct platform_device *pdev) clk_init = device_get_match_data(dev); if (clk_init) { - struct device_node *parent; + struct device_node *parent __free(device_node) = of_get_parent(dev->of_node); - parent = of_get_parent(dev->of_node); regmap = syscon_node_to_regmap(parent); - of_node_put(parent); } else if (pdev->id_entry && pdata) { clk_init = (void *)pdev->id_entry->driver_data; regmap = pdata->cfgchip; diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c index 141b727ff60d64..0c50acd8543af7 100644 --- a/drivers/clk/hisilicon/clk-hi3519.c +++ b/drivers/clk/hisilicon/clk-hi3519.c @@ -179,7 +179,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table); static struct platform_driver hi3519_clk_driver = { .probe = hi3519_clk_probe, - .remove_new = hi3519_clk_remove, + .remove = hi3519_clk_remove, .driver = { .name = "hi3519-clk", .of_match_table = hi3519_clk_match_table, diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c index c79a94f6d9d24c..f297fb25c512a3 100644 --- a/drivers/clk/hisilicon/clk-hi3559a.c +++ b/drivers/clk/hisilicon/clk-hi3559a.c @@ -407,7 +407,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct hi3559av100_clk_pll *clk = to_pll_clk(hw); - u64 frac_val, fbdiv_val, refdiv_val; + u64 frac_val, fbdiv_val; u32 postdiv1_val, postdiv2_val; u32 val; u64 tmp, rate; @@ -435,14 +435,13 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, val = readl_relaxed(clk->ctrl_reg2); val = val >> clk->refdiv_shift; val &= ((1 << clk->refdiv_width) - 1); - refdiv_val = val; /* rate = 24000000 * (fbdiv + frac / (1<<24) ) / refdiv */ rate = 0; tmp = 24000000 * fbdiv_val + (24000000 * frac_val) / (1 << 24); rate += tmp; - do_div(rate, refdiv_val); - do_div(rate, postdiv1_val * postdiv2_val); + rate = div_u64(rate, val); + rate = div_u64(rate, postdiv1_val * postdiv2_val); return rate; } @@ -818,7 +817,7 @@ static void hi3559av100_crg_remove(struct platform_device *pdev) static struct platform_driver hi3559av100_crg_driver = { .probe = hi3559av100_crg_probe, - .remove_new = hi3559av100_crg_remove, + .remove = hi3559av100_crg_remove, .driver = { .name = "hi3559av100-clock", .of_match_table = hi3559av100_crg_match_table, diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c index e602e65fbc384a..b66140f74c5102 100644 --- a/drivers/clk/hisilicon/crg-hi3516cv300.c +++ b/drivers/clk/hisilicon/crg-hi3516cv300.c @@ -294,7 +294,7 @@ static void hi3516cv300_crg_remove(struct platform_device *pdev) static struct platform_driver hi3516cv300_crg_driver = { .probe = hi3516cv300_crg_probe, - .remove_new = hi3516cv300_crg_remove, + .remove = hi3516cv300_crg_remove, .driver = { .name = "hi3516cv300-crg", .of_match_table = hi3516cv300_crg_match_table, diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c index f651b197e45abe..8eabd1cc229fbe 100644 --- a/drivers/clk/hisilicon/crg-hi3798cv200.c +++ b/drivers/clk/hisilicon/crg-hi3798cv200.c @@ -377,7 +377,7 @@ static void hi3798cv200_crg_remove(struct platform_device *pdev) static struct platform_driver hi3798cv200_crg_driver = { .probe = hi3798cv200_crg_probe, - .remove_new = hi3798cv200_crg_remove, + .remove = hi3798cv200_crg_remove, .driver = { .name = "hi3798cv200-crg", .of_match_table = hi3798cv200_crg_match_table, diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index 6da0fba68225e4..6ff6d934848a3b 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -81,6 +81,7 @@ config CLK_IMX8MP tristate "IMX8MP CCM Clock Driver" depends on ARCH_MXC || COMPILE_TEST select MXC_CLK + select AUXILIARY_BUS if RESET_CONTROLLER help Build the driver for i.MX8MP CCM Clock Driver diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c index e208ddc511339e..8ed2e0ad2769c6 100644 --- a/drivers/clk/imx/clk-composite-7ulp.c +++ b/drivers/clk/imx/clk-composite-7ulp.c @@ -14,6 +14,7 @@ #include "../clk-fractional-divider.h" #include "clk.h" +#define PCG_PR_MASK BIT(31) #define PCG_PCS_SHIFT 24 #define PCG_PCS_MASK 0x7 #define PCG_CGC_SHIFT 30 @@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name, struct clk_hw *hw; u32 val; + val = readl(reg); + if (!(val & PCG_PR_MASK)) { + pr_info("PCC PR is 0 for clk:%s, bypass\n", name); + return NULL; + } + if (mux_present) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 8cc07d056a8384..f187582ba49196 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = { .determine_rate = imx8m_clk_composite_mux_determine_rate, }; +static int imx8m_clk_composite_gate_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + unsigned long flags; + u32 val; + + spin_lock_irqsave(gate->lock, flags); + + val = readl(gate->reg); + val |= BIT(gate->bit_idx); + writel(val, gate->reg); + + spin_unlock_irqrestore(gate->lock, flags); + + return 0; +} + +static void imx8m_clk_composite_gate_disable(struct clk_hw *hw) +{ + /* composite clk requires the disable hook */ +} + +static const struct clk_ops imx8m_clk_composite_gate_ops = { + .enable = imx8m_clk_composite_gate_enable, + .disable = imx8m_clk_composite_gate_disable, + .is_enabled = clk_gate_is_enabled, +}; + struct clk_hw *__imx8m_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, @@ -217,6 +245,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, struct clk_mux *mux; const struct clk_ops *divider_ops; const struct clk_ops *mux_ops; + const struct clk_ops *gate_ops; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) @@ -257,20 +286,22 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, div->flags = CLK_DIVIDER_ROUND_CLOSEST; /* skip registering the gate ops if M4 is enabled */ - if (!mcore_booted) { - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) - goto free_div; - - gate_hw = &gate->hw; - gate->reg = reg; - gate->bit_idx = PCG_CGC_SHIFT; - gate->lock = &imx_ccm_lock; - } + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + goto free_div; + + gate_hw = &gate->hw; + gate->reg = reg; + gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; + if (!mcore_booted) + gate_ops = &clk_gate_ops; + else + gate_ops = &imx8m_clk_composite_gate_ops; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, mux_ops, div_hw, - divider_ops, gate_hw, &clk_gate_ops, flags); + divider_ops, gate_hw, gate_ops, flags); if (IS_ERR(hw)) goto free_gate; diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c index 81164bdcd6cc9a..6c6c5a30f3282d 100644 --- a/drivers/clk/imx/clk-composite-93.c +++ b/drivers/clk/imx/clk-composite-93.c @@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw) static void imx93_clk_composite_gate_disable(struct clk_hw *hw) { + /* + * Skip disable the root clock gate if mcore enabled. + * The root clock may be used by the mcore. + */ + if (mcore_booted) + return; + imx93_clk_composite_gate_endisable(hw, 0); } @@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ro_ops, div_hw, &clk_divider_ro_ops, NULL, NULL, flags); - } else if (!mcore_booted) { + } else { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto fail; @@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p &imx93_clk_composite_divider_ops, gate_hw, &imx93_clk_composite_gate_ops, flags | CLK_SET_RATE_NO_REPARENT); - } else { - hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, - mux_hw, &imx93_clk_composite_mux_ops, div_hw, - &imx93_clk_composite_divider_ops, NULL, - &imx93_clk_composite_gate_ops, - flags | CLK_SET_RATE_NO_REPARENT); } if (IS_ERR(hw)) diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c index 44462ab50e513c..591e0364ee5c11 100644 --- a/drivers/clk/imx/clk-fracn-gppll.c +++ b/drivers/clk/imx/clk-fracn-gppll.c @@ -78,6 +78,7 @@ struct clk_fracn_gppll { * The Fvco should be in range 2.5Ghz to 5Ghz */ static const struct imx_fracn_gppll_rate_table fracn_tbl[] = { + PLL_FRACN_GP(1039500000U, 173, 25, 100, 1, 4), PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6), PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8), PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6), @@ -106,6 +107,7 @@ static const struct imx_fracn_gppll_rate_table int_tbl[] = { PLL_FRACN_GP_INTEGER(1700000000U, 141, 1, 2), PLL_FRACN_GP_INTEGER(1400000000U, 175, 1, 3), PLL_FRACN_GP_INTEGER(900000000U, 150, 1, 4), + PLL_FRACN_GP_INTEGER(800000000U, 200, 1, 6), }; struct imx_fracn_gppll_clk imx_fracn_gppll_integer = { @@ -291,6 +293,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) if (val & POWERUP_MASK) return 0; + if (pll->flags & CLK_FRACN_GPPLL_FRACN) + writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR), + pll->base + PLL_NUMERATOR); + val |= CLKMUX_BYPASS; writel_relaxed(val, pll->base + PLL_CTRL); diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index f9394e94f69d73..05c7a82b751f3c 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk); - clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk); - clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk); imx_register_uart_clocks(); } diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 2b77d1fc7bb946..99adc55e3f5d66 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -498,14 +498,14 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE); hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE); hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE); - hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE); + hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel)); hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE); - hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE); + hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel)); hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE); hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE); hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE); hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE); - hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE); + hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT); hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE); hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE); hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE); diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c index 1bdb480cc96c6b..6c351050b82ae0 100644 --- a/drivers/clk/imx/clk-imx8-acm.c +++ b/drivers/clk/imx/clk-imx8-acm.c @@ -54,10 +54,12 @@ struct clk_imx8_acm_sel { * struct imx8_acm_soc_data - soc specific data * @sels: pointer to struct clk_imx8_acm_sel * @num_sels: numbers of items + * @mclk_sels: pointer to imx8qm/qxp/dxl_mclk_sels */ struct imx8_acm_soc_data { struct clk_imx8_acm_sel *sels; unsigned int num_sels; + struct clk_parent_data *mclk_sels; }; /** @@ -111,11 +113,14 @@ static const struct clk_parent_data imx8qm_mclk_out_sels[] = { { .fw_name = "sai6_rx_bclk" }, }; -static const struct clk_parent_data imx8qm_mclk_sels[] = { +#define ACM_AUD_CLK0_SEL_INDEX 2 +#define ACM_AUD_CLK1_SEL_INDEX 3 + +static struct clk_parent_data imx8qm_mclk_sels[] = { { .fw_name = "aud_pll_div_clk0_lpcg_clk" }, { .fw_name = "aud_pll_div_clk1_lpcg_clk" }, - { .fw_name = "acm_aud_clk0_sel" }, - { .fw_name = "acm_aud_clk1_sel" }, + { }, /* clk_hw pointer of "acm_aud_clk0_sel" */ + { }, /* clk_hw pointer of "acm_aud_clk1_sel" */ }; static const struct clk_parent_data imx8qm_asrc_mux_clk_sels[] = { @@ -176,11 +181,11 @@ static const struct clk_parent_data imx8qxp_mclk_out_sels[] = { { .fw_name = "sai4_rx_bclk" }, }; -static const struct clk_parent_data imx8qxp_mclk_sels[] = { +static struct clk_parent_data imx8qxp_mclk_sels[] = { { .fw_name = "aud_pll_div_clk0_lpcg_clk" }, { .fw_name = "aud_pll_div_clk1_lpcg_clk" }, - { .fw_name = "acm_aud_clk0_sel" }, - { .fw_name = "acm_aud_clk1_sel" }, + { }, /* clk_hw pointer of "acm_aud_clk0_sel" */ + { }, /* clk_hw pointer of "acm_aud_clk1_sel" */ }; static struct clk_imx8_acm_sel imx8qxp_sels[] = { @@ -228,11 +233,11 @@ static const struct clk_parent_data imx8dxl_mclk_out_sels[] = { { .index = -1 }, }; -static const struct clk_parent_data imx8dxl_mclk_sels[] = { +static struct clk_parent_data imx8dxl_mclk_sels[] = { { .fw_name = "aud_pll_div_clk0_lpcg_clk" }, { .fw_name = "aud_pll_div_clk1_lpcg_clk" }, - { .fw_name = "acm_aud_clk0_sel" }, - { .fw_name = "acm_aud_clk1_sel" }, + { }, /* clk_hw pointer of "acm_aud_clk0_sel" */ + { }, /* clk_hw pointer of "acm_aud_clk1_sel" */ }; static struct clk_imx8_acm_sel imx8dxl_sels[] = { @@ -375,6 +380,18 @@ static int imx8_acm_clk_probe(struct platform_device *pdev) imx_check_clk_hws(hws, IMX_ADMA_ACM_CLK_END); goto err_clk_register; } + + /* + * The IMX_ADMA_ACM_AUD_CLK0_SEL and IMX_ADMA_ACM_AUD_CLK1_SEL are + * registered first. After registration, update the clk_hw pointer + * to imx8qm/qxp/dxl_mclk_sels structures. + */ + if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK0_SEL) + priv->soc_data->mclk_sels[ACM_AUD_CLK0_SEL_INDEX].hw = + hws[IMX_ADMA_ACM_AUD_CLK0_SEL]; + if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK1_SEL) + priv->soc_data->mclk_sels[ACM_AUD_CLK1_SEL_INDEX].hw = + hws[IMX_ADMA_ACM_AUD_CLK1_SEL]; } ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data); @@ -406,16 +423,19 @@ static void imx8_acm_clk_remove(struct platform_device *pdev) static const struct imx8_acm_soc_data imx8qm_acm_data = { .sels = imx8qm_sels, .num_sels = ARRAY_SIZE(imx8qm_sels), + .mclk_sels = imx8qm_mclk_sels, }; static const struct imx8_acm_soc_data imx8qxp_acm_data = { .sels = imx8qxp_sels, .num_sels = ARRAY_SIZE(imx8qxp_sels), + .mclk_sels = imx8qxp_mclk_sels, }; static const struct imx8_acm_soc_data imx8dxl_acm_data = { .sels = imx8dxl_sels, .num_sels = ARRAY_SIZE(imx8dxl_sels), + .mclk_sels = imx8dxl_mclk_sels, }; static const struct of_device_id imx8_acm_match[] = { @@ -468,7 +488,7 @@ static struct platform_driver imx8_acm_clk_driver = { .pm = &imx8_acm_pm_ops, }, .probe = imx8_acm_clk_probe, - .remove_new = imx8_acm_clk_remove, + .remove = imx8_acm_clk_remove, }; module_platform_driver(imx8_acm_clk_driver); diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 075f643e3f3569..342049b847b900 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -432,7 +432,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) /* BUS */ hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800); hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880); - hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900); + hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900); hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980); hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00); hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 4bd1ed11353b38..ab77e148e70ca7 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -583,6 +583,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0); hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0); hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7); + hws[IMX8MN_CLK_SAI7_IPG] = imx_clk_hw_gate2_shared2("sai7_ipg_clk", "ipg_audio_root", base + 0x4650, 0, &share_count_sai7); hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8); diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index b381d6f784c890..b2cb157703c57f 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -5,6 +5,7 @@ * Copyright (C) 2022 Marek Vasut */ +#include #include #include #include @@ -13,6 +14,7 @@ #include #include #include +#include #include @@ -154,6 +156,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = { PDM_SEL, 2, 0 \ } +#define CLK_GATE_PARENT(gname, cname, pname) \ + { \ + gname"_cg", \ + IMX8MP_CLK_AUDIOMIX_##cname, \ + { .fw_name = pname, .name = pname }, NULL, 1, \ + CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \ + 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \ + } + struct clk_imx8mp_audiomix_sel { const char *name; int clkid; @@ -171,14 +182,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("earc", EARC_IPG), CLK_GATE("ocrama", OCRAMA_IPG), CLK_GATE("aud2htx", AUD2HTX_IPG), - CLK_GATE("earc_phy", EARC_PHY), + CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), CLK_GATE("dsp", DSP_ROOT), CLK_GATE("dspdbg", DSPDBG_ROOT), CLK_GATE("edma", EDMA_ROOT), - CLK_GATE("audpll", AUDPLL_ROOT), + CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), CLK_GATE("mu3", MU3_ROOT), CLK_PDM, @@ -217,6 +228,63 @@ struct clk_imx8mp_audiomix_priv { struct clk_hw_onecell_data clk_data; }; +#if IS_ENABLED(CONFIG_RESET_CONTROLLER) + +static void clk_imx8mp_audiomix_reset_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +static void clk_imx8mp_audiomix_reset_adev_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + kfree(adev); +} + +static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev, + struct clk_imx8mp_audiomix_priv *priv) +{ + struct auxiliary_device *adev __free(kfree) = NULL; + int ret; + + if (!of_property_present(dev->of_node, "#reset-cells")) + return 0; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + adev->name = "reset"; + adev->dev.parent = dev; + adev->dev.release = clk_imx8mp_audiomix_reset_adev_release; + + ret = auxiliary_device_init(adev); + if (ret) + return ret; + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(dev, clk_imx8mp_audiomix_reset_unregister_adev, + no_free_ptr(adev)); +} + +#else /* !CONFIG_RESET_CONTROLLER */ + +static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv) +{ + return 0; +} + +#endif /* !CONFIG_RESET_CONTROLLER */ + static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save) { struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev); @@ -269,12 +337,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(sels); i++) { if (sels[i].num_parents == 1) { hw = devm_clk_hw_register_gate_parent_data(dev, - sels[i].name, &sels[i].parent, 0, + sels[i].name, &sels[i].parent, CLK_SET_RATE_PARENT, base + sels[i].reg, sels[i].shift, 0, NULL); } else { hw = devm_clk_hw_register_mux_parent_data_table(dev, sels[i].name, sels[i].parents, - sels[i].num_parents, 0, + sels[i].num_parents, CLK_SET_RATE_PARENT, base + sels[i].reg, sels[i].shift, sels[i].width, 0, NULL, NULL); @@ -317,7 +385,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw; hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass", - 0, base + SAI_PLL_GNRL_CTL, 13, + CLK_SET_RATE_PARENT, + base + SAI_PLL_GNRL_CTL, 13, 0, NULL); if (IS_ERR(hw)) { ret = PTR_ERR(hw); @@ -326,7 +395,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw; hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2", - "sai_pll_out", 0, 1, 2); + "sai_pll_out", + CLK_SET_RATE_PARENT, 1, 2); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto err_clk_register; @@ -337,6 +407,10 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) if (ret) goto err_clk_register; + ret = clk_imx8mp_audiomix_reset_controller_register(dev, priv); + if (ret) + goto err_clk_register; + pm_runtime_put_sync(dev); return 0; @@ -380,7 +454,7 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match); static struct platform_driver clk_imx8mp_audiomix_driver = { .probe = clk_imx8mp_audiomix_probe, - .remove_new = clk_imx8mp_audiomix_remove, + .remove = clk_imx8mp_audiomix_remove, .driver = { .name = "imx8mp-audio-blk-ctrl", .of_match_table = clk_imx8mp_audiomix_of_match, diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 670aa2bab3017e..516dbd170c8a35 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -547,12 +547,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000); hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); - hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300); + hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300, CLK_SET_RATE_PARENT); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); - hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); - hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); + hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); + hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100); hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180); hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200); @@ -609,7 +609,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80); hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00); hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80); - hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00); + hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00, CLK_SET_RATE_PARENT); hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80); hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00); hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80); diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index 7d8883916cacdd..3ae162625bb1a4 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c @@ -71,7 +71,7 @@ static const char *const lvds0_sels[] = { "clk_dummy", "clk_dummy", "clk_dummy", - "mipi0_lvds_bypass_clk", + "lvds0_bypass_clk", }; static const char *const lvds1_sels[] = { @@ -79,7 +79,7 @@ static const char *const lvds1_sels[] = { "clk_dummy", "clk_dummy", "clk_dummy", - "mipi1_lvds_bypass_clk", + "lvds1_bypass_clk", }; static const char * const mipi_sels[] = { @@ -90,6 +90,22 @@ static const char * const mipi_sels[] = { "clk_dummy", }; +static const char * const mipi0_phy_sels[] = { + "clk_dummy", + "clk_dummy", + "mipi_pll_div2_clk", + "clk_dummy", + "mipi0_bypass_clk", +}; + +static const char * const mipi1_phy_sels[] = { + "clk_dummy", + "clk_dummy", + "mipi_pll_div2_clk", + "clk_dummy", + "mipi1_bypass_clk", +}; + static const char * const lcd_sels[] = { "clk_dummy", "clk_dummy", @@ -170,8 +186,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); - imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); /* Audio SS */ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); @@ -206,42 +222,41 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC); /* Display controller SS */ - imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS); /* MIPI-LVDS SS */ imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER); - imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2); - imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3); + imx_clk_scu2("mipi0_pixel_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER); + imx_clk_scu("lvds0_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("lvds0_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2); + imx_clk_scu2("lvds0_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3); imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS); imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS); - imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY); + imx_clk_scu2("mipi0_dsi_phy_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY); imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2); imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2); imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER); imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER); - imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2); - imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3); - + imx_clk_scu2("mipi1_pixel_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER); + imx_clk_scu("lvds1_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("lvds1_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2); + imx_clk_scu2("lvds1_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3); imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS); imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS); - imx_clk_scu2("mipi1_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY); + imx_clk_scu2("mipi1_dsi_phy_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY); imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2); imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2); imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER); diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 74f595f9e5e348..19a62da74be450 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -248,6 +248,35 @@ static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = { .clk_reg_offset = 0, }; +static const struct imx95_blk_ctl_clk_dev_data netxmix_clk_dev_data[] = { + [IMX95_CLK_NETCMIX_ENETC0_RMII] = { + .name = "enetc0_rmii_sel", + .parent_names = (const char *[]){"ext_enetref", "enetref"}, + .num_parents = 2, + .reg = 4, + .bit_idx = 5, + .bit_width = 1, + .type = CLK_MUX, + .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, + }, + [IMX95_CLK_NETCMIX_ENETC1_RMII] = { + .name = "enetc1_rmii_sel", + .parent_names = (const char *[]){"ext_enetref", "enetref"}, + .num_parents = 2, + .reg = 4, + .bit_idx = 10, + .bit_width = 1, + .type = CLK_MUX, + .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, + }, +}; + +static const struct imx95_blk_ctl_dev_data netcmix_dev_data = { + .num_clks = ARRAY_SIZE(netxmix_clk_dev_data), + .clk_dev_data = netxmix_clk_dev_data, + .clk_reg_offset = 0, +}; + static int imx95_bc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -419,6 +448,7 @@ static const struct of_device_id imx95_bc_of_match[] = { { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data }, { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data }, { .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data }, + { .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data}, { /* Sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx95_bc_of_match); diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c index 08d155feb035a8..efd1ac9d8eeb76 100644 --- a/drivers/clk/imx/clk-imxrt1050.c +++ b/drivers/clk/imx/clk-imxrt1050.c @@ -176,6 +176,7 @@ static struct platform_driver imxrt1050_clk_driver = { }; module_platform_driver(imxrt1050_clk_driver); +MODULE_DESCRIPTION("NXP i.MX RT1050 clock driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jesse Taube "); MODULE_AUTHOR("Giulio Benetti "); diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c index e35496af5ceba0..df83bd939492eb 100644 --- a/drivers/clk/imx/clk.c +++ b/drivers/clk/imx/clk.c @@ -226,4 +226,5 @@ static int __init imx_clk_disable_uart(void) late_initcall_sync(imx_clk_disable_uart); #endif +MODULE_DESCRIPTION("Common clock support for NXP i.MX SoC family"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index adb7ad649a0d2e..aa5202f284f3d1 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -442,6 +442,10 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, _imx8m_clk_hw_composite(name, parent_names, reg, \ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT) +#define imx8m_clk_hw_composite_bus_flags(name, parent_names, reg, flags) \ + _imx8m_clk_hw_composite(name, parent_names, reg, \ + IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT | flags) + #define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \ _imx8m_clk_hw_composite(name, parent_names, reg, \ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL) diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 5cefc30a843ee5..c5894fc9395e83 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -707,7 +707,7 @@ static void ti_sci_clk_remove(struct platform_device *pdev) static struct platform_driver ti_sci_clk_driver = { .probe = ti_sci_clk_probe, - .remove_new = ti_sci_clk_remove, + .remove = ti_sci_clk_remove, .driver = { .name = "ti-sci-clk", .of_match_table = of_match_ptr(ti_sci_clk_of_match), diff --git a/drivers/clk/kunit_clk_fixed_rate_test.dtso b/drivers/clk/kunit_clk_fixed_rate_test.dtso new file mode 100644 index 00000000000000..d838ce766fa214 --- /dev/null +++ b/drivers/clk/kunit_clk_fixed_rate_test.dtso @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "clk-fixed-rate_test.h" + +&{/} { + fixed_50MHz: kunit-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = ; + clock-accuracy = ; + }; + + kunit-clock-consumer { + compatible = "test,single-clk-consumer"; + clocks = <&fixed_50MHz>; + }; +}; diff --git a/drivers/clk/kunit_clk_parent_data_test.dtso b/drivers/clk/kunit_clk_parent_data_test.dtso new file mode 100644 index 00000000000000..7d3ed9a5a2e874 --- /dev/null +++ b/drivers/clk/kunit_clk_parent_data_test.dtso @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "clk_parent_data_test.h" + +&{/} { + fixed_50: kunit-clock-50MHz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + clock-output-names = CLK_PARENT_DATA_50MHZ_NAME; + }; + + fixed_parent: kunit-clock-1MHz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1000000>; + clock-output-names = CLK_PARENT_DATA_1MHZ_NAME; + }; + + kunit-clock-controller { + compatible = "test,clk-parent-data"; + clocks = <&fixed_parent>, <&fixed_50>; + clock-names = CLK_PARENT_DATA_PARENT1, CLK_PARENT_DATA_PARENT2; + #clock-cells = <1>; + }; +}; diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c index 15859132c76945..425c69cfb105a6 100644 --- a/drivers/clk/mediatek/clk-mt2701-aud.c +++ b/drivers/clk/mediatek/clk-mt2701-aud.c @@ -158,7 +158,7 @@ static void clk_mt2701_aud_remove(struct platform_device *pdev) static struct platform_driver clk_mt2701_aud_drv = { .probe = clk_mt2701_aud_probe, - .remove_new = clk_mt2701_aud_remove, + .remove = clk_mt2701_aud_remove, .driver = { .name = "clk-mt2701-aud", .of_match_table = of_match_clk_mt2701_aud, diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c index e203dca70786e2..5da3eabffd3e76 100644 --- a/drivers/clk/mediatek/clk-mt2701-bdp.c +++ b/drivers/clk/mediatek/clk-mt2701-bdp.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_bdp); static struct platform_driver clk_mt2701_bdp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-bdp", .of_match_table = of_match_clk_mt2701_bdp, diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c index f6e1fdc9ee0a0f..608252e73f24df 100644 --- a/drivers/clk/mediatek/clk-mt2701-eth.c +++ b/drivers/clk/mediatek/clk-mt2701-eth.c @@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_eth); static struct platform_driver clk_mt2701_eth_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-eth", .of_match_table = of_match_clk_mt2701_eth, diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c index 5e04975433ea60..b3e18b6db75d42 100644 --- a/drivers/clk/mediatek/clk-mt2701-g3d.c +++ b/drivers/clk/mediatek/clk-mt2701-g3d.c @@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_g3d); static struct platform_driver clk_mt2701_g3d_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-g3d", .of_match_table = of_match_clk_mt2701_g3d, diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c index c7b38d066403ef..000e00576052b9 100644 --- a/drivers/clk/mediatek/clk-mt2701-hif.c +++ b/drivers/clk/mediatek/clk-mt2701-hif.c @@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_hif); static struct platform_driver clk_mt2701_hif_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-hif", .of_match_table = of_match_clk_mt2701_hif, diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c index ce13b79a799449..875594bc9dcba8 100644 --- a/drivers/clk/mediatek/clk-mt2701-img.c +++ b/drivers/clk/mediatek/clk-mt2701-img.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_img); static struct platform_driver clk_mt2701_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-img", .of_match_table = of_match_clk_mt2701_img, diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c index 903592be56b513..bc68fa718878f9 100644 --- a/drivers/clk/mediatek/clk-mt2701-mm.c +++ b/drivers/clk/mediatek/clk-mt2701-mm.c @@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2701_mm_id_table); static struct platform_driver clk_mt2701_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt2701-mm", }, diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c index 591091fb215128..94db86f8d0a462 100644 --- a/drivers/clk/mediatek/clk-mt2701-vdec.c +++ b/drivers/clk/mediatek/clk-mt2701-vdec.c @@ -52,7 +52,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_vdec); static struct platform_driver clk_mt2701_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2701-vdec", .of_match_table = of_match_clk_mt2701_vdec, diff --git a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c index 66987d205eee35..a60622d251ff30 100644 --- a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c @@ -156,7 +156,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_apmixed); static struct platform_driver clk_mt2712_apmixed_drv = { .probe = clk_mt2712_apmixed_probe, - .remove_new = clk_mt2712_apmixed_remove, + .remove = clk_mt2712_apmixed_remove, .driver = { .name = "clk-mt2712-apmixed", .of_match_table = of_match_clk_mt2712_apmixed, diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c index 93c5453e43920e..c838311a0c5104 100644 --- a/drivers/clk/mediatek/clk-mt2712-bdp.c +++ b/drivers/clk/mediatek/clk-mt2712-bdp.c @@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_bdp); static struct platform_driver clk_mt2712_bdp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-bdp", .of_match_table = of_match_clk_mt2712_bdp, diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c index 84abd0515fd2a7..bedebf86b0b55c 100644 --- a/drivers/clk/mediatek/clk-mt2712-img.c +++ b/drivers/clk/mediatek/clk-mt2712-img.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_img); static struct platform_driver clk_mt2712_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-img", .of_match_table = of_match_clk_mt2712_img, diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c index 89be9082adbae9..1a73474b2f9966 100644 --- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c +++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_jpgdec); static struct platform_driver clk_mt2712_jpgdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-jpgdec", .of_match_table = of_match_clk_mt2712_jpgdec, diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c index f7e0d0ebf66508..c1bb45c7469e7f 100644 --- a/drivers/clk/mediatek/clk-mt2712-mfg.c +++ b/drivers/clk/mediatek/clk-mt2712-mfg.c @@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_mfg); static struct platform_driver clk_mt2712_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-mfg", .of_match_table = of_match_clk_mt2712_mfg, diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c index 248529d3134d19..32ecb949f7eb35 100644 --- a/drivers/clk/mediatek/clk-mt2712-mm.c +++ b/drivers/clk/mediatek/clk-mt2712-mm.c @@ -121,7 +121,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2712_mm_id_table); static struct platform_driver clk_mt2712_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt2712-mm", }, diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c index a063f1f0aa521b..a766342fbafa0a 100644 --- a/drivers/clk/mediatek/clk-mt2712-vdec.c +++ b/drivers/clk/mediatek/clk-mt2712-vdec.c @@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_vdec); static struct platform_driver clk_mt2712_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-vdec", .of_match_table = of_match_clk_mt2712_vdec, diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c index 5b15df0a26f54f..fc193dc8e8f6e9 100644 --- a/drivers/clk/mediatek/clk-mt2712-venc.c +++ b/drivers/clk/mediatek/clk-mt2712-venc.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_venc); static struct platform_driver clk_mt2712_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712-venc", .of_match_table = of_match_clk_mt2712_venc, diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c index 91af45160aa452..964c92130e3caa 100644 --- a/drivers/clk/mediatek/clk-mt2712.c +++ b/drivers/clk/mediatek/clk-mt2712.c @@ -993,7 +993,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712); static struct platform_driver clk_mt2712_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt2712", .of_match_table = of_match_clk_mt2712, diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c index 3e481c697eff3b..2be1458087e668 100644 --- a/drivers/clk/mediatek/clk-mt6765-audio.c +++ b/drivers/clk/mediatek/clk-mt6765-audio.c @@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_audio); static struct platform_driver clk_mt6765_audio_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-audio", .of_match_table = of_match_clk_mt6765_audio, diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c index fed9c789d9fa96..2a7f30dc85bbb3 100644 --- a/drivers/clk/mediatek/clk-mt6765-cam.c +++ b/drivers/clk/mediatek/clk-mt6765-cam.c @@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_cam); static struct platform_driver clk_mt6765_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-cam", .of_match_table = of_match_clk_mt6765_cam, diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c index 34bb89ffd2dd1d..ff857852cfb0a6 100644 --- a/drivers/clk/mediatek/clk-mt6765-img.c +++ b/drivers/clk/mediatek/clk-mt6765-img.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_img); static struct platform_driver clk_mt6765_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-img", .of_match_table = of_match_clk_mt6765_img, diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c index 957eb494fee5a7..8261dfd12a9aa3 100644 --- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c +++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mipi0a); static struct platform_driver clk_mt6765_mipi0a_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-mipi0a", .of_match_table = of_match_clk_mt6765_mipi0a, diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c index 099540fcfc761e..e525919f9e819b 100644 --- a/drivers/clk/mediatek/clk-mt6765-mm.c +++ b/drivers/clk/mediatek/clk-mt6765-mm.c @@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mm); static struct platform_driver clk_mt6765_mm_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-mm", .of_match_table = of_match_clk_mt6765_mm, diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c index 64f3451d0aeeca..f309d1090cda1e 100644 --- a/drivers/clk/mediatek/clk-mt6765-vcodec.c +++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c @@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec); static struct platform_driver clk_mt6765_vcodec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-vcodec", .of_match_table = of_match_clk_mt6765_vcodec, diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c index 3d23b8e29af66d..8ed318bd776581 100644 --- a/drivers/clk/mediatek/clk-mt6779-aud.c +++ b/drivers/clk/mediatek/clk-mt6779-aud.c @@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_aud); static struct platform_driver clk_mt6779_aud_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-aud", .of_match_table = of_match_clk_mt6779_aud, diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c index e76b2c4f548ef4..f397b55606de56 100644 --- a/drivers/clk/mediatek/clk-mt6779-cam.c +++ b/drivers/clk/mediatek/clk-mt6779-cam.c @@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_cam); static struct platform_driver clk_mt6779_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-cam", .of_match_table = of_match_clk_mt6779_cam, diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c index 0c5971f3966ab8..474a59a4ca9ece 100644 --- a/drivers/clk/mediatek/clk-mt6779-img.c +++ b/drivers/clk/mediatek/clk-mt6779-img.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_img); static struct platform_driver clk_mt6779_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-img", .of_match_table = of_match_clk_mt6779_img, diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c index 9c1a9f1b0f3e4a..c2314654f43ab2 100644 --- a/drivers/clk/mediatek/clk-mt6779-ipe.c +++ b/drivers/clk/mediatek/clk-mt6779-ipe.c @@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_ipe); static struct platform_driver clk_mt6779_ipe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-ipe", .of_match_table = of_match_clk_mt6779_ipe, diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c index 3cc82b59117f60..21793cb6e6e32c 100644 --- a/drivers/clk/mediatek/clk-mt6779-mfg.c +++ b/drivers/clk/mediatek/clk-mt6779-mfg.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_mfg); static struct platform_driver clk_mt6779_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-mfg", .of_match_table = of_match_clk_mt6779_mfg, diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c index 97d437a6f98fc6..30bbab30838837 100644 --- a/drivers/clk/mediatek/clk-mt6779-mm.c +++ b/drivers/clk/mediatek/clk-mt6779-mm.c @@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6779_mm_id_table); static struct platform_driver clk_mt6779_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt6779-mm", }, diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c index a9122e627aa585..458d012f023c6c 100644 --- a/drivers/clk/mediatek/clk-mt6779-vdec.c +++ b/drivers/clk/mediatek/clk-mt6779-vdec.c @@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_vdec); static struct platform_driver clk_mt6779_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-vdec", .of_match_table = of_match_clk_mt6779_vdec, diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c index 2cd032648eb10e..70cebc2740313a 100644 --- a/drivers/clk/mediatek/clk-mt6779-venc.c +++ b/drivers/clk/mediatek/clk-mt6779-venc.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_venc); static struct platform_driver clk_mt6779_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-venc", .of_match_table = of_match_clk_mt6779_venc, diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c index 819253b97a02ad..86732f5acf9340 100644 --- a/drivers/clk/mediatek/clk-mt6779.c +++ b/drivers/clk/mediatek/clk-mt6779.c @@ -1305,7 +1305,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779); static struct platform_driver clk_mt6779_infra_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6779-infra", .of_match_table = of_match_clk_mt6779_infra, diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c index 8c65974ed9b870..91665d7f125efd 100644 --- a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c @@ -201,7 +201,7 @@ static void clk_mt6795_apmixed_remove(struct platform_device *pdev) static struct platform_driver clk_mt6795_apmixed_drv = { .probe = clk_mt6795_apmixed_probe, - .remove_new = clk_mt6795_apmixed_remove, + .remove = clk_mt6795_apmixed_remove, .driver = { .name = "clk-mt6795-apmixed", .of_match_table = of_match_clk_mt6795_apmixed, diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c index 06d7fdf3098b0c..e4559569f5b02f 100644 --- a/drivers/clk/mediatek/clk-mt6795-infracfg.c +++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c @@ -144,7 +144,7 @@ static struct platform_driver clk_mt6795_infracfg_drv = { .of_match_table = of_match_clk_mt6795_infracfg, }, .probe = clk_mt6795_infracfg_probe, - .remove_new = clk_mt6795_infracfg_remove, + .remove = clk_mt6795_infracfg_remove, }; module_platform_driver(clk_mt6795_infracfg_drv); diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c index dff6a6ded837c7..1d658bb19e822f 100644 --- a/drivers/clk/mediatek/clk-mt6795-mfg.c +++ b/drivers/clk/mediatek/clk-mt6795-mfg.c @@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_mfg_drv = { .of_match_table = of_match_clk_mt6795_mfg, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt6795_mfg_drv); diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c index dd1708d689dc68..733d0e2021fc1b 100644 --- a/drivers/clk/mediatek/clk-mt6795-mm.c +++ b/drivers/clk/mediatek/clk-mt6795-mm.c @@ -93,7 +93,7 @@ static struct platform_driver clk_mt6795_mm_drv = { }, .id_table = clk_mt6795_mm_id_table, .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, }; module_platform_driver(clk_mt6795_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c index 3f6bea418a5ac1..d48240eb2a67cb 100644 --- a/drivers/clk/mediatek/clk-mt6795-pericfg.c +++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c @@ -153,7 +153,7 @@ static struct platform_driver clk_mt6795_pericfg_drv = { .of_match_table = of_match_clk_mt6795_pericfg, }, .probe = clk_mt6795_pericfg_probe, - .remove_new = clk_mt6795_pericfg_remove, + .remove = clk_mt6795_pericfg_remove, }; module_platform_driver(clk_mt6795_pericfg_drv); diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c index be595853a92586..9c6d63a80b19b8 100644 --- a/drivers/clk/mediatek/clk-mt6795-topckgen.c +++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c @@ -547,7 +547,7 @@ static struct platform_driver clk_mt6795_topckgen_drv = { .of_match_table = of_match_clk_mt6795_topckgen, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt6795_topckgen_drv); diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c index 9e91d6f7f5bf40..f2968f859dca09 100644 --- a/drivers/clk/mediatek/clk-mt6795-vdecsys.c +++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vdecsys); static struct platform_driver clk_mt6795_vdecsys_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6795-vdecsys", .of_match_table = of_match_clk_mt6795_vdecsys, diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c index bd81e80b744fb8..2f8d48da1a8599 100644 --- a/drivers/clk/mediatek/clk-mt6795-vencsys.c +++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c @@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_vencsys_drv = { .of_match_table = of_match_clk_mt6795_vencsys, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt6795_vencsys_drv); diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c index 0ec0cf2154dcb8..338c69234f24e8 100644 --- a/drivers/clk/mediatek/clk-mt6797-img.c +++ b/drivers/clk/mediatek/clk-mt6797-img.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_img); static struct platform_driver clk_mt6797_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6797-img", .of_match_table = of_match_clk_mt6797_img, diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c index f5701e965792f5..ddb40b8a1a7d29 100644 --- a/drivers/clk/mediatek/clk-mt6797-mm.c +++ b/drivers/clk/mediatek/clk-mt6797-mm.c @@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6797_mm_id_table); static struct platform_driver clk_mt6797_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt6797-mm", }, diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c index c967d5e25c7d57..d832f48123f58e 100644 --- a/drivers/clk/mediatek/clk-mt6797-vdec.c +++ b/drivers/clk/mediatek/clk-mt6797-vdec.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_vdec); static struct platform_driver clk_mt6797_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6797-vdec", .of_match_table = of_match_clk_mt6797_vdec, diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c index f6fac5db65b0b1..fd4446f4a9d7b4 100644 --- a/drivers/clk/mediatek/clk-mt6797-venc.c +++ b/drivers/clk/mediatek/clk-mt6797-venc.c @@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_venc); static struct platform_driver clk_mt6797_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6797-venc", .of_match_table = of_match_clk_mt6797_venc, diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c index 1b8f859b6b6ccd..2350592d9a934f 100644 --- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c @@ -137,7 +137,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_apmixed); static struct platform_driver clk_mt7622_apmixed_drv = { .probe = clk_mt7622_apmixed_probe, - .remove_new = clk_mt7622_apmixed_remove, + .remove = clk_mt7622_apmixed_remove, .driver = { .name = "clk-mt7622-apmixed", .of_match_table = of_match_clk_mt7622_apmixed, diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c index b7bf626e4d143f..931a0598e59896 100644 --- a/drivers/clk/mediatek/clk-mt7622-aud.c +++ b/drivers/clk/mediatek/clk-mt7622-aud.c @@ -149,7 +149,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_aud); static struct platform_driver clk_mt7622_aud_drv = { .probe = clk_mt7622_aud_probe, - .remove_new = clk_mt7622_aud_remove, + .remove = clk_mt7622_aud_remove, .driver = { .name = "clk-mt7622-aud", .of_match_table = of_match_clk_mt7622_aud, diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c index fa4876317a8da1..1c1033a92c4661 100644 --- a/drivers/clk/mediatek/clk-mt7622-eth.c +++ b/drivers/clk/mediatek/clk-mt7622-eth.c @@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_eth); static struct platform_driver clk_mt7622_eth_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7622-eth", .of_match_table = of_match_clk_mt7622_eth, diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c index 8e57582454c2bc..5bcfe12c4fd07f 100644 --- a/drivers/clk/mediatek/clk-mt7622-hif.c +++ b/drivers/clk/mediatek/clk-mt7622-hif.c @@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_hif); static struct platform_driver clk_mt7622_hif_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7622-hif", .of_match_table = of_match_clk_mt7622_hif, diff --git a/drivers/clk/mediatek/clk-mt7622-infracfg.c b/drivers/clk/mediatek/clk-mt7622-infracfg.c index 6bc911cb29a670..cfdf3b07c3e069 100644 --- a/drivers/clk/mediatek/clk-mt7622-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7622-infracfg.c @@ -118,7 +118,7 @@ static struct platform_driver clk_mt7622_infracfg_drv = { .of_match_table = of_match_clk_mt7622_infracfg, }, .probe = clk_mt7622_infracfg_probe, - .remove_new = clk_mt7622_infracfg_remove, + .remove = clk_mt7622_infracfg_remove, }; module_platform_driver(clk_mt7622_infracfg_drv); diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c index 27781a62a1316b..f62b03abab4fd0 100644 --- a/drivers/clk/mediatek/clk-mt7622.c +++ b/drivers/clk/mediatek/clk-mt7622.c @@ -524,7 +524,7 @@ static struct platform_driver clk_mt7622_drv = { .of_match_table = of_match_clk_mt7622, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7622_drv) diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c index 96d1a82ad75f9b..3fdc2d7d427405 100644 --- a/drivers/clk/mediatek/clk-mt7629-hif.c +++ b/drivers/clk/mediatek/clk-mt7629-hif.c @@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_hif); static struct platform_driver clk_mt7629_hif_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7629-hif", .of_match_table = of_match_clk_mt7629_hif, diff --git a/drivers/clk/mediatek/clk-mt7981-eth.c b/drivers/clk/mediatek/clk-mt7981-eth.c index e8cb247db0ce8d..906aec9ddff54b 100644 --- a/drivers/clk/mediatek/clk-mt7981-eth.c +++ b/drivers/clk/mediatek/clk-mt7981-eth.c @@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_eth); static struct platform_driver clk_mt7981_eth_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7981-eth", .of_match_table = of_match_clk_mt7981_eth, diff --git a/drivers/clk/mediatek/clk-mt7981-infracfg.c b/drivers/clk/mediatek/clk-mt7981-infracfg.c index b2b05515129774..0487b6bb80ae3c 100644 --- a/drivers/clk/mediatek/clk-mt7981-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7981-infracfg.c @@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_infracfg); static struct platform_driver clk_mt7981_infracfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7981-infracfg", .of_match_table = of_match_clk_mt7981_infracfg, diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c index 72f2f4f30e853c..1943f11e47c13f 100644 --- a/drivers/clk/mediatek/clk-mt7981-topckgen.c +++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c @@ -413,7 +413,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_topckgen); static struct platform_driver clk_mt7981_topckgen_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7981-topckgen", .of_match_table = of_match_clk_mt7981_topckgen, diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c index 7ab78e0f49a156..4514d42c0829f5 100644 --- a/drivers/clk/mediatek/clk-mt7986-eth.c +++ b/drivers/clk/mediatek/clk-mt7986-eth.c @@ -92,7 +92,7 @@ static struct platform_driver clk_mt7986_eth_drv = { .of_match_table = of_match_clk_mt7986_eth, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7986_eth_drv); diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c index cb8ab3e53abf3f..732c65e616dea7 100644 --- a/drivers/clk/mediatek/clk-mt7986-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c @@ -177,7 +177,7 @@ static struct platform_driver clk_mt7986_infracfg_drv = { .of_match_table = of_match_clk_mt7986_infracfg, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7986_infracfg_drv); diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c index b644b4ca4710bb..2dd30da306d999 100644 --- a/drivers/clk/mediatek/clk-mt7986-topckgen.c +++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c @@ -306,7 +306,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_topckgen); static struct platform_driver clk_mt7986_topckgen_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7986-topckgen", .of_match_table = of_match_clk_mt7986_topckgen, diff --git a/drivers/clk/mediatek/clk-mt7988-eth.c b/drivers/clk/mediatek/clk-mt7988-eth.c index adf4a9d39b38f1..7d9463688be244 100644 --- a/drivers/clk/mediatek/clk-mt7988-eth.c +++ b/drivers/clk/mediatek/clk-mt7988-eth.c @@ -142,7 +142,7 @@ static struct platform_driver clk_mt7988_eth_drv = { .of_match_table = of_match_clk_mt7988_eth, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7988_eth_drv); diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c index 6c2bebabb4de94..ef8267319d91b9 100644 --- a/drivers/clk/mediatek/clk-mt7988-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c @@ -292,7 +292,7 @@ static struct platform_driver clk_mt7988_infracfg_drv = { .of_match_table = of_match_clk_mt7988_infracfg, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7988_infracfg_drv); diff --git a/drivers/clk/mediatek/clk-mt7988-topckgen.c b/drivers/clk/mediatek/clk-mt7988-topckgen.c index 7300e9694582f0..50e02cc7a21435 100644 --- a/drivers/clk/mediatek/clk-mt7988-topckgen.c +++ b/drivers/clk/mediatek/clk-mt7988-topckgen.c @@ -315,7 +315,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_topckgen); static struct platform_driver clk_mt7988_topckgen_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt7988-topckgen", .of_match_table = of_match_clk_mt7988_topckgen, diff --git a/drivers/clk/mediatek/clk-mt7988-xfipll.c b/drivers/clk/mediatek/clk-mt7988-xfipll.c index 9b9ca5471158be..f941e4d3ef2879 100644 --- a/drivers/clk/mediatek/clk-mt7988-xfipll.c +++ b/drivers/clk/mediatek/clk-mt7988-xfipll.c @@ -74,7 +74,7 @@ static struct platform_driver clk_mt7988_xfipll_drv = { .of_match_table = of_match_clk_mt7988_xfipll, }, .probe = clk_mt7988_xfipll_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt7988_xfipll_drv); diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c index 41bb2d2e2ea740..bdadc35c64cbd8 100644 --- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c @@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed); static struct platform_driver clk_mt8135_apmixed_drv = { .probe = clk_mt8135_apmixed_probe, - .remove_new = clk_mt8135_apmixed_remove, + .remove = clk_mt8135_apmixed_remove, .driver = { .name = "clk-mt8135-apmixed", .of_match_table = of_match_clk_mt8135_apmixed, diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c index 019af88d7f9c32..084e48a554c262 100644 --- a/drivers/clk/mediatek/clk-mt8135.c +++ b/drivers/clk/mediatek/clk-mt8135.c @@ -558,7 +558,7 @@ static struct platform_driver clk_mt8135_drv = { .of_match_table = of_match_clk_mt8135, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8135_drv); diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c index d1a42ff549c107..d6cff4bdf4cb97 100644 --- a/drivers/clk/mediatek/clk-mt8167-aud.c +++ b/drivers/clk/mediatek/clk-mt8167-aud.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_audsys); static struct platform_driver clk_mt8167_audsys_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8167-audsys", .of_match_table = of_match_clk_mt8167_audsys, diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c index 888ac3bdeacb21..42d38ae94b69e2 100644 --- a/drivers/clk/mediatek/clk-mt8167-img.c +++ b/drivers/clk/mediatek/clk-mt8167-img.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_imgsys); static struct platform_driver clk_mt8167_imgsys_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8167-imgsys", .of_match_table = of_match_clk_mt8167_imgsys, diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c index e873766f130c31..1ef37a3e685164 100644 --- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_mfgcfg); static struct platform_driver clk_mt8167_mfgcfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8167-mfgcfg", .of_match_table = of_match_clk_mt8167_mfgcfg, diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c index 38deedffaacf79..cef66ee836f3fa 100644 --- a/drivers/clk/mediatek/clk-mt8167-mm.c +++ b/drivers/clk/mediatek/clk-mt8167-mm.c @@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8167_mm_id_table); static struct platform_driver clk_mt8167_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8167-mm", }, diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c index c3c892bb83343e..e3769bc556a96a 100644 --- a/drivers/clk/mediatek/clk-mt8167-vdec.c +++ b/drivers/clk/mediatek/clk-mt8167-vdec.c @@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_vdec); static struct platform_driver clk_mt8167_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8167-vdecsys", .of_match_table = of_match_clk_mt8167_vdec, diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c index 5c94995f859ce4..c64d918c37dead 100644 --- a/drivers/clk/mediatek/clk-mt8167.c +++ b/drivers/clk/mediatek/clk-mt8167.c @@ -887,7 +887,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167); static struct platform_driver clk_mt8167_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8167", .of_match_table = of_match_clk_mt8167, diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c index 6cab483b8e1ed3..95385bb67d5511 100644 --- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c @@ -207,7 +207,7 @@ static void clk_mt8173_apmixed_remove(struct platform_device *pdev) static struct platform_driver clk_mt8173_apmixed_drv = { .probe = clk_mt8173_apmixed_probe, - .remove_new = clk_mt8173_apmixed_remove, + .remove = clk_mt8173_apmixed_remove, .driver = { .name = "clk-mt8173-apmixed", .of_match_table = of_match_clk_mt8173_apmixed, diff --git a/drivers/clk/mediatek/clk-mt8173-img.c b/drivers/clk/mediatek/clk-mt8173-img.c index 1011b9ab3daddb..6db2b9ab2bc919 100644 --- a/drivers/clk/mediatek/clk-mt8173-img.c +++ b/drivers/clk/mediatek/clk-mt8173-img.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_imgsys); static struct platform_driver clk_mt8173_vdecsys_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8173-imgsys", .of_match_table = of_match_clk_mt8173_imgsys, diff --git a/drivers/clk/mediatek/clk-mt8173-infracfg.c b/drivers/clk/mediatek/clk-mt8173-infracfg.c index ecc8b0063ea560..fa2d1d557e04f8 100644 --- a/drivers/clk/mediatek/clk-mt8173-infracfg.c +++ b/drivers/clk/mediatek/clk-mt8173-infracfg.c @@ -156,7 +156,7 @@ static struct platform_driver clk_mt8173_infracfg_drv = { .of_match_table = of_match_clk_mt8173_infracfg, }, .probe = clk_mt8173_infracfg_probe, - .remove_new = clk_mt8173_infracfg_remove, + .remove = clk_mt8173_infracfg_remove, }; module_platform_driver(clk_mt8173_infracfg_drv); diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c index fd903bee328fff..26d27250b914b0 100644 --- a/drivers/clk/mediatek/clk-mt8173-mm.c +++ b/drivers/clk/mediatek/clk-mt8173-mm.c @@ -106,7 +106,7 @@ static struct platform_driver clk_mt8173_mm_drv = { }, .id_table = clk_mt8173_mm_id_table, .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, }; module_platform_driver(clk_mt8173_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt8173-pericfg.c b/drivers/clk/mediatek/clk-mt8173-pericfg.c index 783efed3f25421..bebda74d0f4391 100644 --- a/drivers/clk/mediatek/clk-mt8173-pericfg.c +++ b/drivers/clk/mediatek/clk-mt8173-pericfg.c @@ -115,7 +115,7 @@ static struct platform_driver clk_mt8173_pericfg_drv = { .of_match_table = of_match_clk_mt8173_pericfg, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8173_pericfg_drv); diff --git a/drivers/clk/mediatek/clk-mt8173-topckgen.c b/drivers/clk/mediatek/clk-mt8173-topckgen.c index 6bb7ffd7448728..42c37541cebb44 100644 --- a/drivers/clk/mediatek/clk-mt8173-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8173-topckgen.c @@ -646,7 +646,7 @@ static struct platform_driver clk_mt8173_topckgen_drv = { .of_match_table = of_match_clk_mt8173_topckgen, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8173_topckgen_drv); diff --git a/drivers/clk/mediatek/clk-mt8173-vdecsys.c b/drivers/clk/mediatek/clk-mt8173-vdecsys.c index 011e3812156ff2..625ca0b09cc225 100644 --- a/drivers/clk/mediatek/clk-mt8173-vdecsys.c +++ b/drivers/clk/mediatek/clk-mt8173-vdecsys.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vdecsys); static struct platform_driver clk_mt8173_vdecsys_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8173-vdecsys", .of_match_table = of_match_clk_mt8173_vdecsys, diff --git a/drivers/clk/mediatek/clk-mt8173-vencsys.c b/drivers/clk/mediatek/clk-mt8173-vencsys.c index 1bf84ae6a0bcb6..87755dd1a337f9 100644 --- a/drivers/clk/mediatek/clk-mt8173-vencsys.c +++ b/drivers/clk/mediatek/clk-mt8173-vencsys.c @@ -57,7 +57,7 @@ static struct platform_driver clk_mt8173_vencsys_drv = { .of_match_table = of_match_clk_mt8173_vencsys, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8173_vencsys_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c index 30a20e8ba84b32..011d329ad30e96 100644 --- a/drivers/clk/mediatek/clk-mt8183-audio.c +++ b/drivers/clk/mediatek/clk-mt8183-audio.c @@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_audio); static struct platform_driver clk_mt8183_audio_drv = { .probe = clk_mt8183_audio_probe, - .remove_new = clk_mt8183_audio_remove, + .remove = clk_mt8183_audio_remove, .driver = { .name = "clk-mt8183-audio", .of_match_table = of_match_clk_mt8183_audio, diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c index f16c3aa3c911d8..c7642085f8deb1 100644 --- a/drivers/clk/mediatek/clk-mt8183-cam.c +++ b/drivers/clk/mediatek/clk-mt8183-cam.c @@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_cam); static struct platform_driver clk_mt8183_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-cam", .of_match_table = of_match_clk_mt8183_cam, diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c index 32ee6a1867fc51..ee92459c74ca53 100644 --- a/drivers/clk/mediatek/clk-mt8183-img.c +++ b/drivers/clk/mediatek/clk-mt8183-img.c @@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_img); static struct platform_driver clk_mt8183_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-img", .of_match_table = of_match_clk_mt8183_img, diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c index dc2916c4e0dcdc..6831747f123b65 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu0.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core0); static struct platform_driver clk_mt8183_ipu_core0_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-ipu_core0", .of_match_table = of_match_clk_mt8183_ipu_core0, diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c index 9c63e4c592d05f..ecf434432e7bb1 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu1.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core1); static struct platform_driver clk_mt8183_ipu_core1_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-ipu_core1", .of_match_table = of_match_clk_mt8183_ipu_core1, diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c index 54a50eda1719be..c1a770ba3245bb 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c @@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_adl); static struct platform_driver clk_mt8183_ipu_adl_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-ipu_adl", .of_match_table = of_match_clk_mt8183_ipu_adl, diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c index 99a817d3be6cc6..f0e72e6edb7a6d 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c @@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_conn); static struct platform_driver clk_mt8183_ipu_conn_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-ipu_conn", .of_match_table = of_match_clk_mt8183_ipu_conn, diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index b1e802bbfaef3f..be44889783ff2a 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_mfg); static struct platform_driver clk_mt8183_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-mfg", .of_match_table = of_match_clk_mt8183_mfg, diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c index 59acf1e2951ba0..0f132f05fa8b22 100644 --- a/drivers/clk/mediatek/clk-mt8183-mm.c +++ b/drivers/clk/mediatek/clk-mt8183-mm.c @@ -95,7 +95,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8183_mm_id_table); static struct platform_driver clk_mt8183_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8183-mm", }, diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c index 48a8ef3f69aa75..43bf34077b16a7 100644 --- a/drivers/clk/mediatek/clk-mt8183-vdec.c +++ b/drivers/clk/mediatek/clk-mt8183-vdec.c @@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_vdec); static struct platform_driver clk_mt8183_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-vdec", .of_match_table = of_match_clk_mt8183_vdec, diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c index 8f36688dfa14ef..c3d99b3b8ff799 100644 --- a/drivers/clk/mediatek/clk-mt8183-venc.c +++ b/drivers/clk/mediatek/clk-mt8183-venc.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_venc); static struct platform_driver clk_mt8183_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183-venc", .of_match_table = of_match_clk_mt8183_venc, diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 27eee4ef2c0ff5..aa7cc7709b2d67 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -899,7 +899,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183); static struct platform_driver clk_mt8183_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8183", .of_match_table = of_match_clk_mt8183, diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c index 6f7127003e4ff5..4b2b16578232d9 100644 --- a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c @@ -185,7 +185,7 @@ static void clk_mt8186_apmixed_remove(struct platform_device *pdev) static struct platform_driver clk_mt8186_apmixed_drv = { .probe = clk_mt8186_apmixed_probe, - .remove_new = clk_mt8186_apmixed_remove, + .remove = clk_mt8186_apmixed_remove, .driver = { .name = "clk-mt8186-apmixed", .of_match_table = of_match_clk_mt8186_apmixed, diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c index 0082f0d9286bf5..2ddd5f90377fba 100644 --- a/drivers/clk/mediatek/clk-mt8186-cam.c +++ b/drivers/clk/mediatek/clk-mt8186-cam.c @@ -82,7 +82,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_cam); static struct platform_driver clk_mt8186_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-cam", .of_match_table = of_match_clk_mt8186_cam, diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c index 0583a18805cef1..5e466e1f5f4440 100644 --- a/drivers/clk/mediatek/clk-mt8186-img.c +++ b/drivers/clk/mediatek/clk-mt8186-img.c @@ -60,7 +60,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_img); static struct platform_driver clk_mt8186_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-img", .of_match_table = of_match_clk_mt8186_img, diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c index 2a2a6bb2320553..75abb871044ce8 100644 --- a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c +++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c @@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_imp_iic_wrap); static struct platform_driver clk_mt8186_imp_iic_wrap_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-imp_iic_wrap", .of_match_table = of_match_clk_mt8186_imp_iic_wrap, diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c index d7239875fb15bd..8d9d86a510ffd4 100644 --- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c @@ -231,7 +231,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_infra_ao); static struct platform_driver clk_mt8186_infra_ao_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-infra-ao", .of_match_table = of_match_clk_mt8186_infra_ao, diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c index 77bdd28065174f..f66a0aeaa6b318 100644 --- a/drivers/clk/mediatek/clk-mt8186-ipe.c +++ b/drivers/clk/mediatek/clk-mt8186-ipe.c @@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_ipe); static struct platform_driver clk_mt8186_ipe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-ipe", .of_match_table = of_match_clk_mt8186_ipe, diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c index eb54ccb77b74cb..d1640e4dc2adbd 100644 --- a/drivers/clk/mediatek/clk-mt8186-mcu.c +++ b/drivers/clk/mediatek/clk-mt8186-mcu.c @@ -60,7 +60,7 @@ static struct platform_driver clk_mt8186_mcu_drv = { .of_match_table = of_match_clk_mt8186_mcu, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8186_mcu_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c index fb47d6bacf7fb5..01561cf902c477 100644 --- a/drivers/clk/mediatek/clk-mt8186-mdp.c +++ b/drivers/clk/mediatek/clk-mt8186-mdp.c @@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mdp); static struct platform_driver clk_mt8186_mdp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-mdp", .of_match_table = of_match_clk_mt8186_mdp, diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c index 64cdee1fddd4ca..3f21b1f222e1a5 100644 --- a/drivers/clk/mediatek/clk-mt8186-mfg.c +++ b/drivers/clk/mediatek/clk-mt8186-mfg.c @@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mfg); static struct platform_driver clk_mt8186_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-mfg", .of_match_table = of_match_clk_mt8186_mfg, diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c index 403566187e6487..fc8488c448667e 100644 --- a/drivers/clk/mediatek/clk-mt8186-mm.c +++ b/drivers/clk/mediatek/clk-mt8186-mm.c @@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8186_mm_id_table); static struct platform_driver clk_mt8186_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8186-mm", }, diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c index eb9f51e77ca8bb..14f1cbdbbd13db 100644 --- a/drivers/clk/mediatek/clk-mt8186-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c @@ -725,7 +725,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_topck); static struct platform_driver clk_mt8186_topck_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-topck", .of_match_table = of_match_clk_mt8186_topck, diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c index 25465704ddfba1..522b8c952969b5 100644 --- a/drivers/clk/mediatek/clk-mt8186-vdec.c +++ b/drivers/clk/mediatek/clk-mt8186-vdec.c @@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_vdec); static struct platform_driver clk_mt8186_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-vdec", .of_match_table = of_match_clk_mt8186_vdec, diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c index 647dd66a3ce0b6..c0c98bc7511255 100644 --- a/drivers/clk/mediatek/clk-mt8186-venc.c +++ b/drivers/clk/mediatek/clk-mt8186-venc.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_venc); static struct platform_driver clk_mt8186_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-venc", .of_match_table = of_match_clk_mt8186_venc, diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c index 47f96e088361fd..babd7b2778c2c4 100644 --- a/drivers/clk/mediatek/clk-mt8186-wpe.c +++ b/drivers/clk/mediatek/clk-mt8186-wpe.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_wpe); static struct platform_driver clk_mt8186_wpe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8186-wpe", .of_match_table = of_match_clk_mt8186_wpe, diff --git a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c index 5ac035bbe68455..dcde2187d24a09 100644 --- a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c +++ b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c @@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_adsp_audio26m); static struct platform_driver clk_mt8188_adsp_audio26m_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-adsp_audio26m", .of_match_table = of_match_clk_mt8188_adsp_audio26m, diff --git a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c index 85d573d960815b..21d7a9a2ab1af6 100644 --- a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c @@ -145,7 +145,7 @@ static void clk_mt8188_apmixed_remove(struct platform_device *pdev) static struct platform_driver clk_mt8188_apmixed_drv = { .probe = clk_mt8188_apmixed_probe, - .remove_new = clk_mt8188_apmixed_remove, + .remove = clk_mt8188_apmixed_remove, .driver = { .name = "clk-mt8188-apmixed", .of_match_table = of_match_clk_mt8188_apmixed, diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c index a6a6581f0461b6..7500bd25387f3e 100644 --- a/drivers/clk/mediatek/clk-mt8188-cam.c +++ b/drivers/clk/mediatek/clk-mt8188-cam.c @@ -109,7 +109,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_cam); static struct platform_driver clk_mt8188_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-cam", .of_match_table = of_match_clk_mt8188_cam, diff --git a/drivers/clk/mediatek/clk-mt8188-ccu.c b/drivers/clk/mediatek/clk-mt8188-ccu.c index 9532fc652f01fa..1566fc437ea3f1 100644 --- a/drivers/clk/mediatek/clk-mt8188-ccu.c +++ b/drivers/clk/mediatek/clk-mt8188-ccu.c @@ -39,7 +39,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ccu); static struct platform_driver clk_mt8188_ccu_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-ccu", .of_match_table = of_match_clk_mt8188_ccu, diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c index 00ad6d7884aeca..cb2fbd4136b925 100644 --- a/drivers/clk/mediatek/clk-mt8188-img.c +++ b/drivers/clk/mediatek/clk-mt8188-img.c @@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imgsys_main); static struct platform_driver clk_mt8188_imgsys_main_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-imgsys_main", .of_match_table = of_match_clk_mt8188_imgsys_main, diff --git a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c index 7b713f4cd6621d..14a4b575b5839b 100644 --- a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c +++ b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c @@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imp_iic_wrap); static struct platform_driver clk_mt8188_imp_iic_wrap_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-imp_iic_wrap", .of_match_table = of_match_clk_mt8188_imp_iic_wrap, diff --git a/drivers/clk/mediatek/clk-mt8188-infra_ao.c b/drivers/clk/mediatek/clk-mt8188-infra_ao.c index face3e1914649d..b9bc8fcc2adeae 100644 --- a/drivers/clk/mediatek/clk-mt8188-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8188-infra_ao.c @@ -213,7 +213,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_infra_ao); static struct platform_driver clk_mt8188_infra_ao_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-infra_ao", .of_match_table = of_match_clk_mt8188_infra_ao, diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c index fa439af3435912..8f1933b71e2810 100644 --- a/drivers/clk/mediatek/clk-mt8188-ipe.c +++ b/drivers/clk/mediatek/clk-mt8188-ipe.c @@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ipe); static struct platform_driver clk_mt8188_ipe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-ipe", .of_match_table = of_match_clk_mt8188_ipe, diff --git a/drivers/clk/mediatek/clk-mt8188-mfg.c b/drivers/clk/mediatek/clk-mt8188-mfg.c index ec562e7d459d29..2ddfb1a3de471e 100644 --- a/drivers/clk/mediatek/clk-mt8188-mfg.c +++ b/drivers/clk/mediatek/clk-mt8188-mfg.c @@ -38,7 +38,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_mfgcfg); static struct platform_driver clk_mt8188_mfgcfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-mfgcfg", .of_match_table = of_match_clk_mt8188_mfgcfg, diff --git a/drivers/clk/mediatek/clk-mt8188-peri_ao.c b/drivers/clk/mediatek/clk-mt8188-peri_ao.c index e4339885b0626b..639865335fc81a 100644 --- a/drivers/clk/mediatek/clk-mt8188-peri_ao.c +++ b/drivers/clk/mediatek/clk-mt8188-peri_ao.c @@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_peri_ao); static struct platform_driver clk_mt8188_peri_ao_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-peri_ao", .of_match_table = of_match_clk_mt8188_peri_ao, diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c index 2ccc8a1c98f9b9..c4baf4076ed64a 100644 --- a/drivers/clk/mediatek/clk-mt8188-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c @@ -1347,7 +1347,7 @@ static void clk_mt8188_topck_remove(struct platform_device *pdev) static struct platform_driver clk_mt8188_topck_drv = { .probe = clk_mt8188_topck_probe, - .remove_new = clk_mt8188_topck_remove, + .remove = clk_mt8188_topck_remove, .driver = { .name = "clk-mt8188-topck", .of_match_table = of_match_clk_mt8188_topck, diff --git a/drivers/clk/mediatek/clk-mt8188-vdec.c b/drivers/clk/mediatek/clk-mt8188-vdec.c index bf388997c3f85d..f48f0716d7c2ba 100644 --- a/drivers/clk/mediatek/clk-mt8188-vdec.c +++ b/drivers/clk/mediatek/clk-mt8188-vdec.c @@ -81,7 +81,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_vdec); static struct platform_driver clk_mt8188_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-vdec", .of_match_table = of_match_clk_mt8188_vdec, diff --git a/drivers/clk/mediatek/clk-mt8188-vdo0.c b/drivers/clk/mediatek/clk-mt8188-vdo0.c index 935371fbf1d2e7..017d6662589bfa 100644 --- a/drivers/clk/mediatek/clk-mt8188-vdo0.c +++ b/drivers/clk/mediatek/clk-mt8188-vdo0.c @@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo0_id_table); static struct platform_driver clk_mt8188_vdo0_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8188-vdo0", }, diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c index fb24c9026fd899..4fa355f8f0c2b2 100644 --- a/drivers/clk/mediatek/clk-mt8188-vdo1.c +++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c @@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo1_id_table); static struct platform_driver clk_mt8188_vdo1_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8188-vdo1", }, diff --git a/drivers/clk/mediatek/clk-mt8188-venc.c b/drivers/clk/mediatek/clk-mt8188-venc.c index 4df8d4e05159f4..01e971545506c6 100644 --- a/drivers/clk/mediatek/clk-mt8188-venc.c +++ b/drivers/clk/mediatek/clk-mt8188-venc.c @@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1); static struct platform_driver clk_mt8188_venc1_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-venc1", .of_match_table = of_match_clk_mt8188_venc1, diff --git a/drivers/clk/mediatek/clk-mt8188-vpp0.c b/drivers/clk/mediatek/clk-mt8188-vpp0.c index 3107921087932d..cd2579b7b9c332 100644 --- a/drivers/clk/mediatek/clk-mt8188-vpp0.c +++ b/drivers/clk/mediatek/clk-mt8188-vpp0.c @@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp0_id_table); static struct platform_driver clk_mt8188_vpp0_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8188-vpp0", }, diff --git a/drivers/clk/mediatek/clk-mt8188-vpp1.c b/drivers/clk/mediatek/clk-mt8188-vpp1.c index 0aa10aaa02929e..0e1bd8306e8a62 100644 --- a/drivers/clk/mediatek/clk-mt8188-vpp1.c +++ b/drivers/clk/mediatek/clk-mt8188-vpp1.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp1_id_table); static struct platform_driver clk_mt8188_vpp1_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8188-vpp1", }, diff --git a/drivers/clk/mediatek/clk-mt8188-wpe.c b/drivers/clk/mediatek/clk-mt8188-wpe.c index fbac440363cc46..d709bb1ee1d62c 100644 --- a/drivers/clk/mediatek/clk-mt8188-wpe.c +++ b/drivers/clk/mediatek/clk-mt8188-wpe.c @@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_wpe); static struct platform_driver clk_mt8188_wpe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8188-wpe", .of_match_table = of_match_clk_mt8188_wpe, diff --git a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c index 3590932acc63a2..0b66a27e4d5ac6 100644 --- a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c @@ -206,7 +206,7 @@ static struct platform_driver clk_mt8192_apmixed_drv = { .of_match_table = of_match_clk_mt8192_apmixed, }, .probe = clk_mt8192_apmixed_probe, - .remove_new = clk_mt8192_apmixed_remove, + .remove = clk_mt8192_apmixed_remove, }; module_platform_driver(clk_mt8192_apmixed_drv); MODULE_DESCRIPTION("MediaTek MT8192 apmixed clocks driver"); diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c index b438ebad998d3a..f3ebf8713fbb75 100644 --- a/drivers/clk/mediatek/clk-mt8192-aud.c +++ b/drivers/clk/mediatek/clk-mt8192-aud.c @@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_aud); static struct platform_driver clk_mt8192_aud_drv = { .probe = clk_mt8192_aud_probe, - .remove_new = clk_mt8192_aud_remove, + .remove = clk_mt8192_aud_remove, .driver = { .name = "clk-mt8192-aud", .of_match_table = of_match_clk_mt8192_aud, diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c index 3eed4a7b6d8e98..891d2f88d9cf79 100644 --- a/drivers/clk/mediatek/clk-mt8192-cam.c +++ b/drivers/clk/mediatek/clk-mt8192-cam.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_cam); static struct platform_driver clk_mt8192_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-cam", .of_match_table = of_match_clk_mt8192_cam, diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c index 13a4353327528b..c08e831125a5bf 100644 --- a/drivers/clk/mediatek/clk-mt8192-img.c +++ b/drivers/clk/mediatek/clk-mt8192-img.c @@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_img); static struct platform_driver clk_mt8192_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-img", .of_match_table = of_match_clk_mt8192_img, diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c index 45585f2edd50cc..0f9530d9263c43 100644 --- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c +++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c @@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_imp_iic_wrap); static struct platform_driver clk_mt8192_imp_iic_wrap_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-imp_iic_wrap", .of_match_table = of_match_clk_mt8192_imp_iic_wrap, diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c index da2e2d83cd253d..c932b8b20edc4b 100644 --- a/drivers/clk/mediatek/clk-mt8192-ipe.c +++ b/drivers/clk/mediatek/clk-mt8192-ipe.c @@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_ipe); static struct platform_driver clk_mt8192_ipe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-ipe", .of_match_table = of_match_clk_mt8192_ipe, diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c index be674d6c31d7be..30334ebca864e9 100644 --- a/drivers/clk/mediatek/clk-mt8192-mdp.c +++ b/drivers/clk/mediatek/clk-mt8192-mdp.c @@ -74,7 +74,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mdp); static struct platform_driver clk_mt8192_mdp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-mdp", .of_match_table = of_match_clk_mt8192_mdp, diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c index 2da969f4ca6bea..9d176659e8a273 100644 --- a/drivers/clk/mediatek/clk-mt8192-mfg.c +++ b/drivers/clk/mediatek/clk-mt8192-mfg.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mfg); static struct platform_driver clk_mt8192_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-mfg", .of_match_table = of_match_clk_mt8192_mfg, diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c index 2b9c1c4524c28c..bda4406e13042a 100644 --- a/drivers/clk/mediatek/clk-mt8192-mm.c +++ b/drivers/clk/mediatek/clk-mt8192-mm.c @@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8192_mm_id_table); static struct platform_driver clk_mt8192_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8192-mm", }, diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c index bc5ce987b76c9e..04a66220f26983 100644 --- a/drivers/clk/mediatek/clk-mt8192-msdc.c +++ b/drivers/clk/mediatek/clk-mt8192-msdc.c @@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_msdc); static struct platform_driver clk_mt8192_msdc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-msdc", .of_match_table = of_match_clk_mt8192_msdc, diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c index e017d30a8832dd..f9e4c16573e2a7 100644 --- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c +++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c @@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_scp_adsp); static struct platform_driver clk_mt8192_scp_adsp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-scp_adsp", .of_match_table = of_match_clk_mt8192_scp_adsp, diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c index fcb34b1dcdab75..9c10161807b2e2 100644 --- a/drivers/clk/mediatek/clk-mt8192-vdec.c +++ b/drivers/clk/mediatek/clk-mt8192-vdec.c @@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec); static struct platform_driver clk_mt8192_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-vdec", .of_match_table = of_match_clk_mt8192_vdec, diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c index 98d58a9397cd58..0b01e2b7f036fb 100644 --- a/drivers/clk/mediatek/clk-mt8192-venc.c +++ b/drivers/clk/mediatek/clk-mt8192-venc.c @@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_venc); static struct platform_driver clk_mt8192_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8192-venc", .of_match_table = of_match_clk_mt8192_venc, diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c index bce2298ebc8d3c..50b43807c60cfb 100644 --- a/drivers/clk/mediatek/clk-mt8192.c +++ b/drivers/clk/mediatek/clk-mt8192.c @@ -1026,7 +1026,7 @@ static struct platform_driver clk_mt8192_drv = { .of_match_table = of_match_clk_mt8192, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8192_drv); diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c index 049ae8123e3450..282a3137dc8941 100644 --- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c @@ -223,7 +223,7 @@ static void clk_mt8195_apmixed_remove(struct platform_device *pdev) static struct platform_driver clk_mt8195_apmixed_drv = { .probe = clk_mt8195_apmixed_probe, - .remove_new = clk_mt8195_apmixed_remove, + .remove = clk_mt8195_apmixed_remove, .driver = { .name = "clk-mt8195-apmixed", .of_match_table = of_match_clk_mt8195_apmixed, diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c index b1b562e44cb460..8b45a3fad02f18 100644 --- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c +++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c @@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apusys_pll); static struct platform_driver clk_mt8195_apusys_pll_drv = { .probe = clk_mt8195_apusys_pll_probe, - .remove_new = clk_mt8195_apusys_pll_remove, + .remove = clk_mt8195_apusys_pll_remove, .driver = { .name = "clk-mt8195-apusys_pll", .of_match_table = of_match_clk_mt8195_apusys_pll, diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c index 7c8f7781761685..02cb20c2948b36 100644 --- a/drivers/clk/mediatek/clk-mt8195-cam.c +++ b/drivers/clk/mediatek/clk-mt8195-cam.c @@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_cam); static struct platform_driver clk_mt8195_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-cam", .of_match_table = of_match_clk_mt8195_cam, diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c index f78afd7b6ade49..22cd1cb070f137 100644 --- a/drivers/clk/mediatek/clk-mt8195-ccu.c +++ b/drivers/clk/mediatek/clk-mt8195-ccu.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ccu); static struct platform_driver clk_mt8195_ccu_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-ccu", .of_match_table = of_match_clk_mt8195_ccu, diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c index a59c082ef5224b..11beba4b2ac275 100644 --- a/drivers/clk/mediatek/clk-mt8195-img.c +++ b/drivers/clk/mediatek/clk-mt8195-img.c @@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_img); static struct platform_driver clk_mt8195_img_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-img", .of_match_table = of_match_clk_mt8195_img, diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c index 54557f1b06811c..8711b18b15760b 100644 --- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c +++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c @@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_imp_iic_wrap); static struct platform_driver clk_mt8195_imp_iic_wrap_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-imp_iic_wrap", .of_match_table = of_match_clk_mt8195_imp_iic_wrap, diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c index 165fe92c6f61b3..bb648a88e43afd 100644 --- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c @@ -233,7 +233,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_infra_ao); static struct platform_driver clk_mt8195_infra_ao_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-infra_ao", .of_match_table = of_match_clk_mt8195_infra_ao, diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c index 38a23d88370bd4..b1af00348a86ea 100644 --- a/drivers/clk/mediatek/clk-mt8195-ipe.c +++ b/drivers/clk/mediatek/clk-mt8195-ipe.c @@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ipe); static struct platform_driver clk_mt8195_ipe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-ipe", .of_match_table = of_match_clk_mt8195_ipe, diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c index e19968eeb346ae..07c358db1af947 100644 --- a/drivers/clk/mediatek/clk-mt8195-mfg.c +++ b/drivers/clk/mediatek/clk-mt8195-mfg.c @@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_mfg); static struct platform_driver clk_mt8195_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-mfg", .of_match_table = of_match_clk_mt8195_mfg, diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c index fc341030f10ba4..b743eb60a30bfc 100644 --- a/drivers/clk/mediatek/clk-mt8195-peri_ao.c +++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c @@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_peri_ao); static struct platform_driver clk_mt8195_peri_ao_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-peri_ao", .of_match_table = of_match_clk_mt8195_peri_ao, diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c index 1f37bde97d90eb..bc73fccd051521 100644 --- a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c +++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c @@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_scp_adsp); static struct platform_driver clk_mt8195_scp_adsp_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-scp_adsp", .of_match_table = of_match_clk_mt8195_scp_adsp, diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c index 704498c40349d1..b1f44b873354ae 100644 --- a/drivers/clk/mediatek/clk-mt8195-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c @@ -1354,7 +1354,7 @@ static void clk_mt8195_topck_remove(struct platform_device *pdev) static struct platform_driver clk_mt8195_topck_drv = { .probe = clk_mt8195_topck_probe, - .remove_new = clk_mt8195_topck_remove, + .remove = clk_mt8195_topck_remove, .driver = { .name = "clk-mt8195-topck", .of_match_table = of_match_clk_mt8195_topck, diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c index 9e4cc1a82cbe19..0bad706047c929 100644 --- a/drivers/clk/mediatek/clk-mt8195-vdec.c +++ b/drivers/clk/mediatek/clk-mt8195-vdec.c @@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_vdec); static struct platform_driver clk_mt8195_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-vdec", .of_match_table = of_match_clk_mt8195_vdec, diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c index 6e9c3ef1950289..581d99f8c254b5 100644 --- a/drivers/clk/mediatek/clk-mt8195-vdo0.c +++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c @@ -106,7 +106,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo0_id_table); static struct platform_driver clk_mt8195_vdo0_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8195-vdo0", }, diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c index 422e5729386c09..7f8b1a8967bdae 100644 --- a/drivers/clk/mediatek/clk-mt8195-vdo1.c +++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c @@ -133,7 +133,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo1_id_table); static struct platform_driver clk_mt8195_vdo1_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8195-vdo1", }, diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c index db7a6ce97ed06d..3b52ff025d5e6c 100644 --- a/drivers/clk/mediatek/clk-mt8195-venc.c +++ b/drivers/clk/mediatek/clk-mt8195-venc.c @@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_venc); static struct platform_driver clk_mt8195_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-venc", .of_match_table = of_match_clk_mt8195_venc, diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c index 77d9aaf47a25a4..0e3e1dd7977c48 100644 --- a/drivers/clk/mediatek/clk-mt8195-vpp0.c +++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp0_id_table); static struct platform_driver clk_mt8195_vpp0_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8195-vpp0", }, diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c index 18ca8f1d953832..fb7b7aef0bba13 100644 --- a/drivers/clk/mediatek/clk-mt8195-vpp1.c +++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c @@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp1_id_table); static struct platform_driver clk_mt8195_vpp1_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8195-vpp1", }, diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c index 9c45a2fed0ce6a..315b93bbfcdcb9 100644 --- a/drivers/clk/mediatek/clk-mt8195-wpe.c +++ b/drivers/clk/mediatek/clk-mt8195-wpe.c @@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_wpe); static struct platform_driver clk_mt8195_wpe_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8195-wpe", .of_match_table = of_match_clk_mt8195_wpe, diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c index 934060e6d9e9eb..2583c4704ffaba 100644 --- a/drivers/clk/mediatek/clk-mt8365-apu.c +++ b/drivers/clk/mediatek/clk-mt8365-apu.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apu); static struct platform_driver clk_mt8365_apu_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-apu", .of_match_table = of_match_clk_mt8365_apu, diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c index c8fe5f5bb06cb4..89d2bd50263b40 100644 --- a/drivers/clk/mediatek/clk-mt8365-cam.c +++ b/drivers/clk/mediatek/clk-mt8365-cam.c @@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_cam); static struct platform_driver clk_mt8365_cam_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-cam", .of_match_table = of_match_clk_mt8365_cam, diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c index 5355f725363d6e..41bcd389119c7c 100644 --- a/drivers/clk/mediatek/clk-mt8365-mfg.c +++ b/drivers/clk/mediatek/clk-mt8365-mfg.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg); static struct platform_driver clk_mt8365_mfg_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-mfg", .of_match_table = of_match_clk_mt8365_mfg, diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c index 8201949bfdaecc..56fb2a43ecd029 100644 --- a/drivers/clk/mediatek/clk-mt8365-mm.c +++ b/drivers/clk/mediatek/clk-mt8365-mm.c @@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8365_mm_id_table); static struct platform_driver clk_mt8365_mm_drv = { .probe = mtk_clk_pdev_probe, - .remove_new = mtk_clk_pdev_remove, + .remove = mtk_clk_pdev_remove, .driver = { .name = "clk-mt8365-mm", }, diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c index 1be0b3faa2c304..f5d0518bc2e0c4 100644 --- a/drivers/clk/mediatek/clk-mt8365-vdec.c +++ b/drivers/clk/mediatek/clk-mt8365-vdec.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_vdec); static struct platform_driver clk_mt8365_vdec_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-vdec", .of_match_table = of_match_clk_mt8365_vdec, diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c index 4228ddec565785..35abd908537c8a 100644 --- a/drivers/clk/mediatek/clk-mt8365-venc.c +++ b/drivers/clk/mediatek/clk-mt8365-venc.c @@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_venc); static struct platform_driver clk_mt8365_venc_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-venc", .of_match_table = of_match_clk_mt8365_venc, diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c index 485b525b8acd49..e7952121112ed9 100644 --- a/drivers/clk/mediatek/clk-mt8365.c +++ b/drivers/clk/mediatek/clk-mt8365.c @@ -809,7 +809,7 @@ static struct platform_driver clk_mt8365_drv = { .of_match_table = of_match_clk_mt8365, }, .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, }; module_platform_driver(clk_mt8365_drv); diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c index 53e1866fb8e246..6227635fd5a1c0 100644 --- a/drivers/clk/mediatek/clk-mt8516-aud.c +++ b/drivers/clk/mediatek/clk-mt8516-aud.c @@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_aud); static struct platform_driver clk_mt8516_aud_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8516-aud", .of_match_table = of_match_clk_mt8516_aud, diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c index b8ae837c59dc8c..21eb052b0a539c 100644 --- a/drivers/clk/mediatek/clk-mt8516.c +++ b/drivers/clk/mediatek/clk-mt8516.c @@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516); static struct platform_driver clk_mt8516_drv = { .probe = mtk_clk_simple_probe, - .remove_new = mtk_clk_simple_remove, + .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8516", .of_match_table = of_match_clk_mt8516, diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index 290ceda84ce47d..2e3303975096a4 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c @@ -110,65 +110,6 @@ static int reset_xlate(struct reset_controller_dev *rcdev, return data->desc->rst_idx_map[reset_spec->args[0]]; } -int mtk_register_reset_controller(struct device_node *np, - const struct mtk_clk_rst_desc *desc) -{ - struct regmap *regmap; - const struct reset_control_ops *rcops = NULL; - struct mtk_clk_rst_data *data; - int ret; - - if (!desc) { - pr_err("mtk clock reset desc is NULL\n"); - return -EINVAL; - } - - switch (desc->version) { - case MTK_RST_SIMPLE: - rcops = &mtk_reset_ops; - break; - case MTK_RST_SET_CLR: - rcops = &mtk_reset_ops_set_clr; - break; - default: - pr_err("Unknown reset version %d\n", desc->version); - return -EINVAL; - } - - regmap = device_node_to_regmap(np); - if (IS_ERR(regmap)) { - pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap); - return -EINVAL; - } - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->desc = desc; - data->regmap = regmap; - data->rcdev.owner = THIS_MODULE; - data->rcdev.ops = rcops; - data->rcdev.of_node = np; - - if (data->desc->rst_idx_map_nr > 0) { - data->rcdev.of_reset_n_cells = 1; - data->rcdev.nr_resets = desc->rst_idx_map_nr; - data->rcdev.of_xlate = reset_xlate; - } else { - data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK; - } - - ret = reset_controller_register(&data->rcdev); - if (ret) { - pr_err("could not register reset controller: %d\n", ret); - kfree(data); - return ret; - } - - return 0; -} - int mtk_register_reset_controller_with_dev(struct device *dev, const struct mtk_clk_rst_desc *desc) { @@ -198,7 +139,7 @@ int mtk_register_reset_controller_with_dev(struct device *dev, regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) { dev_err(dev, "Cannot find regmap %pe\n", regmap); - return -EINVAL; + return PTR_ERR(regmap); } data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h index 6a58a3d5916545..562ffd290a224a 100644 --- a/drivers/clk/mediatek/reset.h +++ b/drivers/clk/mediatek/reset.h @@ -59,16 +59,6 @@ struct mtk_clk_rst_data { const struct mtk_clk_rst_desc *desc; }; -/** - * mtk_register_reset_controller - Register MediaTek clock reset controller - * @np: Pointer to device node. - * @desc: Constant pointer to description of clock reset. - * - * Return: 0 on success and errorno otherwise. - */ -int mtk_register_reset_controller(struct device_node *np, - const struct mtk_clk_rst_desc *desc); - /** * mtk_register_reset_controller - Register mediatek clock reset controller with device * @np: Pointer to device. diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c index 99b5bc4504461e..7aa6abb2eb1f2b 100644 --- a/drivers/clk/meson/a1-peripherals.c +++ b/drivers/clk/meson/a1-peripherals.c @@ -2183,7 +2183,7 @@ static struct clk_regmap *const a1_periphs_regmaps[] = { &dmc_sel2, }; -static struct regmap_config a1_periphs_regmap_cfg = { +static const struct regmap_config a1_periphs_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -2246,3 +2246,4 @@ MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver"); MODULE_AUTHOR("Jian Hu "); MODULE_AUTHOR("Dmitry Rokosov "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c index a16e537d139ac5..8e5a42d1afbbcd 100644 --- a/drivers/clk/meson/a1-pll.c +++ b/drivers/clk/meson/a1-pll.c @@ -295,7 +295,7 @@ static struct clk_regmap *const a1_pll_regmaps[] = { &hifi_pll, }; -static struct regmap_config a1_pll_regmap_cfg = { +static const struct regmap_config a1_pll_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -360,3 +360,4 @@ MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver"); MODULE_AUTHOR("Jian Hu "); MODULE_AUTHOR("Dmitry Rokosov "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c index fa1dcb7f91e4df..1dabc81535a6f7 100644 --- a/drivers/clk/meson/axg-aoclk.c +++ b/drivers/clk/meson/axg-aoclk.c @@ -342,3 +342,4 @@ module_platform_driver(axg_aoclkc_driver); MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index e03a5bf899c0fc..beda8634938999 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -753,6 +753,9 @@ static struct clk_regmap toddr_d = AUD_PCLK_GATE(toddr_d, AUDIO_CLK_GATE_EN1, 1); static struct clk_regmap loopback_b = AUD_PCLK_GATE(loopback_b, AUDIO_CLK_GATE_EN1, 2); +static struct clk_regmap earcrx = + AUD_PCLK_GATE(earcrx, AUDIO_CLK_GATE_EN1, 6); + static struct clk_regmap sm1_mst_a_mclk_sel = AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL); @@ -766,6 +769,10 @@ static struct clk_regmap sm1_mst_e_mclk_sel = AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL); static struct clk_regmap sm1_mst_f_mclk_sel = AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL); +static struct clk_regmap sm1_earcrx_cmdc_clk_sel = + AUD_MST_MCLK_MUX(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL); +static struct clk_regmap sm1_earcrx_dmac_clk_sel = + AUD_MST_MCLK_MUX(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL); static struct clk_regmap sm1_mst_a_mclk_div = AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL); @@ -779,6 +786,11 @@ static struct clk_regmap sm1_mst_e_mclk_div = AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL); static struct clk_regmap sm1_mst_f_mclk_div = AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL); +static struct clk_regmap sm1_earcrx_cmdc_clk_div = + AUD_MST_MCLK_DIV(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL); +static struct clk_regmap sm1_earcrx_dmac_clk_div = + AUD_MST_MCLK_DIV(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL); + static struct clk_regmap sm1_mst_a_mclk = AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL); @@ -792,6 +804,10 @@ static struct clk_regmap sm1_mst_e_mclk = AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL); static struct clk_regmap sm1_mst_f_mclk = AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL); +static struct clk_regmap sm1_earcrx_cmdc_clk = + AUD_MST_MCLK_GATE(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL); +static struct clk_regmap sm1_earcrx_dmac_clk = + AUD_MST_MCLK_GATE(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL); static struct clk_regmap sm1_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL( tdm_mclk_pad_0, AUDIO_SM1_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data); @@ -1232,6 +1248,13 @@ static struct clk_hw *sm1_audio_hw_clks[] = { [AUD_CLKID_SYSCLK_A_EN] = &sm1_sysclk_a_en.hw, [AUD_CLKID_SYSCLK_B_DIV] = &sm1_sysclk_b_div.hw, [AUD_CLKID_SYSCLK_B_EN] = &sm1_sysclk_b_en.hw, + [AUD_CLKID_EARCRX] = &earcrx.hw, + [AUD_CLKID_EARCRX_CMDC_SEL] = &sm1_earcrx_cmdc_clk_sel.hw, + [AUD_CLKID_EARCRX_CMDC_DIV] = &sm1_earcrx_cmdc_clk_div.hw, + [AUD_CLKID_EARCRX_CMDC] = &sm1_earcrx_cmdc_clk.hw, + [AUD_CLKID_EARCRX_DMAC_SEL] = &sm1_earcrx_dmac_clk_sel.hw, + [AUD_CLKID_EARCRX_DMAC_DIV] = &sm1_earcrx_dmac_clk_div.hw, + [AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw, }; @@ -1646,6 +1669,13 @@ static struct clk_regmap *const sm1_clk_regmaps[] = { &sm1_sysclk_a_en, &sm1_sysclk_b_div, &sm1_sysclk_b_en, + &earcrx, + &sm1_earcrx_cmdc_clk_sel, + &sm1_earcrx_cmdc_clk_div, + &sm1_earcrx_cmdc_clk, + &sm1_earcrx_dmac_clk_sel, + &sm1_earcrx_dmac_clk_div, + &sm1_earcrx_dmac_clk, }; struct axg_audio_reset_data { @@ -1726,11 +1756,10 @@ static const struct reset_control_ops axg_audio_rstc_ops = { .status = axg_audio_reset_status, }; -static const struct regmap_config axg_audio_regmap_cfg = { +static struct regmap_config axg_audio_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, }; struct audioclk_data { @@ -1739,6 +1768,7 @@ struct audioclk_data { struct meson_clk_hw_data hw_clks; unsigned int reset_offset; unsigned int reset_num; + unsigned int max_register; }; static int axg_audio_clkc_probe(struct platform_device *pdev) @@ -1760,6 +1790,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); + axg_audio_regmap_cfg.max_register = data->max_register; map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg); if (IS_ERR(map)) { dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map)); @@ -1828,6 +1859,7 @@ static const struct audioclk_data axg_audioclk_data = { .hws = axg_audio_hw_clks, .num = ARRAY_SIZE(axg_audio_hw_clks), }, + .max_register = AUDIO_CLK_PDMIN_CTRL1, }; static const struct audioclk_data g12a_audioclk_data = { @@ -1839,6 +1871,7 @@ static const struct audioclk_data g12a_audioclk_data = { }, .reset_offset = AUDIO_SW_RESET, .reset_num = 26, + .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, }; static const struct audioclk_data sm1_audioclk_data = { @@ -1850,6 +1883,7 @@ static const struct audioclk_data sm1_audioclk_data = { }, .reset_offset = AUDIO_SM1_SW_RESET0, .reset_num = 39, + .max_register = AUDIO_EARCRX_DMAC_CLK_CTRL, }; static const struct of_device_id clkc_match_table[] = { @@ -1878,3 +1912,4 @@ module_platform_driver(axg_audio_driver); MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver"); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h index 01a3da19933ef5..9e7765b630c96a 100644 --- a/drivers/clk/meson/axg-audio.h +++ b/drivers/clk/meson/axg-audio.h @@ -64,5 +64,7 @@ #define AUDIO_SM1_SW_RESET1 0x02C #define AUDIO_CLK81_CTRL 0x030 #define AUDIO_CLK81_EN 0x034 +#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0 +#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4 #endif /*__AXG_AUDIO_CLKC_H */ diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 065b5f19829777..757c7a28c53de6 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -2187,3 +2187,4 @@ module_platform_driver(axg_driver); MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c index 56b33d23c31711..7dcbf4ebee078a 100644 --- a/drivers/clk/meson/c3-peripherals.c +++ b/drivers/clk/meson/c3-peripherals.c @@ -2296,7 +2296,7 @@ static struct clk_regmap *const c3_periphs_clk_regmaps[] = { &vapb, }; -static struct regmap_config clkc_regmap_config = { +static const struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -2364,3 +2364,4 @@ module_platform_driver(c3_peripherals_driver); MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver"); MODULE_AUTHOR("Chuan Liu "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c index 6d5271c61d1495..32bd2ed9d30441 100644 --- a/drivers/clk/meson/c3-pll.c +++ b/drivers/clk/meson/c3-pll.c @@ -678,7 +678,7 @@ static struct clk_regmap *const c3_pll_clk_regmaps[] = { &mclk1, }; -static struct regmap_config clkc_regmap_config = { +static const struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -745,3 +745,4 @@ module_platform_driver(c3_pll_driver); MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver"); MODULE_AUTHOR("Chuan Liu "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c index aa824b030cb829..6c1f58826e24a2 100644 --- a/drivers/clk/meson/clk-cpu-dyndiv.c +++ b/drivers/clk/meson/clk-cpu-dyndiv.c @@ -65,8 +65,9 @@ const struct clk_ops meson_clk_cpu_dyndiv_ops = { .determine_rate = meson_clk_cpu_dyndiv_determine_rate, .set_rate = meson_clk_cpu_dyndiv_set_rate, }; -EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider"); MODULE_AUTHOR("Neil Armstrong "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c index d46c02b51be544..913bf25d3771bd 100644 --- a/drivers/clk/meson/clk-dualdiv.c +++ b/drivers/clk/meson/clk-dualdiv.c @@ -130,14 +130,15 @@ const struct clk_ops meson_clk_dualdiv_ops = { .determine_rate = meson_clk_dualdiv_determine_rate, .set_rate = meson_clk_dualdiv_set_rate, }; -EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, CLK_MESON); const struct clk_ops meson_clk_dualdiv_ro_ops = { .recalc_rate = meson_clk_dualdiv_recalc_rate, }; -EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic dual divider driver"); MODULE_AUTHOR("Neil Armstrong "); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index eae9b7dc5a6cc7..f639d56f0fd3f3 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -165,7 +165,7 @@ const struct clk_ops meson_clk_mpll_ro_ops = { .recalc_rate = mpll_recalc_rate, .determine_rate = mpll_determine_rate, }; -EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, CLK_MESON); const struct clk_ops meson_clk_mpll_ops = { .recalc_rate = mpll_recalc_rate, @@ -173,8 +173,9 @@ const struct clk_ops meson_clk_mpll_ops = { .set_rate = mpll_set_rate, .init = mpll_init, }; -EXPORT_SYMBOL_GPL(meson_clk_mpll_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic MPLL driver"); MODULE_AUTHOR("Michael Turquette "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c index ff3f0b1a3ed16c..c1526fbfb6c4cc 100644 --- a/drivers/clk/meson/clk-phase.c +++ b/drivers/clk/meson/clk-phase.c @@ -61,7 +61,7 @@ const struct clk_ops meson_clk_phase_ops = { .get_phase = meson_clk_phase_get_phase, .set_phase = meson_clk_phase_set_phase, }; -EXPORT_SYMBOL_GPL(meson_clk_phase_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, CLK_MESON); /* * This is a special clock for the audio controller. @@ -123,7 +123,7 @@ const struct clk_ops meson_clk_triphase_ops = { .get_phase = meson_clk_triphase_get_phase, .set_phase = meson_clk_triphase_set_phase, }; -EXPORT_SYMBOL_GPL(meson_clk_triphase_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, CLK_MESON); /* * This is a special clock for the audio controller. @@ -178,9 +178,9 @@ const struct clk_ops meson_sclk_ws_inv_ops = { .get_phase = meson_sclk_ws_inv_get_phase, .set_phase = meson_sclk_ws_inv_set_phase, }; -EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops); - +EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic phase driver"); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 467dc8b61a370b..bc570a2ff3a3f8 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -472,7 +472,7 @@ const struct clk_ops meson_clk_pcie_pll_ops = { .enable = meson_clk_pcie_pll_enable, .disable = meson_clk_pll_disable }; -EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, CLK_MESON); const struct clk_ops meson_clk_pll_ops = { .init = meson_clk_pll_init, @@ -483,15 +483,16 @@ const struct clk_ops meson_clk_pll_ops = { .enable = meson_clk_pll_enable, .disable = meson_clk_pll_disable }; -EXPORT_SYMBOL_GPL(meson_clk_pll_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, CLK_MESON); const struct clk_ops meson_clk_pll_ro_ops = { .recalc_rate = meson_clk_pll_recalc_rate, .is_enabled = meson_clk_pll_is_enabled, }; -EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops); +EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic PLL driver"); MODULE_AUTHOR("Carlo Caione "); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c index ad116d24f7002b..07f7e441b9161c 100644 --- a/drivers/clk/meson/clk-regmap.c +++ b/drivers/clk/meson/clk-regmap.c @@ -49,12 +49,12 @@ const struct clk_ops clk_regmap_gate_ops = { .disable = clk_regmap_gate_disable, .is_enabled = clk_regmap_gate_is_enabled, }; -EXPORT_SYMBOL_GPL(clk_regmap_gate_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, CLK_MESON); const struct clk_ops clk_regmap_gate_ro_ops = { .is_enabled = clk_regmap_gate_is_enabled, }; -EXPORT_SYMBOL_GPL(clk_regmap_gate_ro_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, CLK_MESON); static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw, unsigned long prate) @@ -125,13 +125,13 @@ const struct clk_ops clk_regmap_divider_ops = { .determine_rate = clk_regmap_div_determine_rate, .set_rate = clk_regmap_div_set_rate, }; -EXPORT_SYMBOL_GPL(clk_regmap_divider_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, CLK_MESON); const struct clk_ops clk_regmap_divider_ro_ops = { .recalc_rate = clk_regmap_div_recalc_rate, .determine_rate = clk_regmap_div_determine_rate, }; -EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, CLK_MESON); static u8 clk_regmap_mux_get_parent(struct clk_hw *hw) { @@ -174,13 +174,14 @@ const struct clk_ops clk_regmap_mux_ops = { .set_parent = clk_regmap_mux_set_parent, .determine_rate = clk_regmap_mux_determine_rate, }; -EXPORT_SYMBOL_GPL(clk_regmap_mux_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, CLK_MESON); const struct clk_ops clk_regmap_mux_ro_ops = { .get_parent = clk_regmap_mux_get_parent, }; -EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); +EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic regmap backed clock driver"); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c index a5f4d15d8396cf..f0a18d8c9fc231 100644 --- a/drivers/clk/meson/g12a-aoclk.c +++ b/drivers/clk/meson/g12a-aoclk.c @@ -477,3 +477,4 @@ module_platform_driver(g12a_aoclkc_driver); MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 4647e84d2502b9..02dda57105b10e 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -5616,3 +5616,4 @@ module_platform_driver(g12a_driver); MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c index 33fafbdf65c4a4..83b034157b3534 100644 --- a/drivers/clk/meson/gxbb-aoclk.c +++ b/drivers/clk/meson/gxbb-aoclk.c @@ -303,3 +303,4 @@ module_platform_driver(gxbb_aoclkc_driver); MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index d3175e4335bbce..f071faad1ebb70 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -3571,3 +3571,4 @@ module_platform_driver(gxbb_driver); MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c index 2dd064201faede..053940ee8940d7 100644 --- a/drivers/clk/meson/meson-aoclk.c +++ b/drivers/clk/meson/meson-aoclk.c @@ -88,7 +88,8 @@ int meson_aoclkc_probe(struct platform_device *pdev) return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks); } -EXPORT_SYMBOL_GPL(meson_aoclkc_probe); +EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, CLK_MESON); MODULE_DESCRIPTION("Amlogic Always-ON Clock Controller helpers"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c index 4dd5948b7ae43c..a8cd2c21fab77f 100644 --- a/drivers/clk/meson/meson-clkc-utils.c +++ b/drivers/clk/meson/meson-clkc-utils.c @@ -20,7 +20,8 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da return data->hws[idx]; } -EXPORT_SYMBOL_GPL(meson_clk_hw_get); +EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, CLK_MESON); MODULE_DESCRIPTION("Amlogic Clock Controller Utilities"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c index 570992eece86c0..66f79e384fe511 100644 --- a/drivers/clk/meson/meson-eeclk.c +++ b/drivers/clk/meson/meson-eeclk.c @@ -57,7 +57,8 @@ int meson_eeclkc_probe(struct platform_device *pdev) return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks); } -EXPORT_SYMBOL_GPL(meson_eeclkc_probe); +EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, CLK_MESON); MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c index 130c50554290bc..c930cf0614a0f4 100644 --- a/drivers/clk/meson/s4-peripherals.c +++ b/drivers/clk/meson/s4-peripherals.c @@ -3747,7 +3747,7 @@ static struct clk_regmap *const s4_periphs_clk_regmaps[] = { &s4_adc_extclk_in_gate, }; -static struct regmap_config clkc_regmap_config = { +static const struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -3814,3 +3814,4 @@ module_platform_driver(s4_driver); MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver"); MODULE_AUTHOR("Yu Tu "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c index c2afade24f9f57..b0258933fb9d25 100644 --- a/drivers/clk/meson/s4-pll.c +++ b/drivers/clk/meson/s4-pll.c @@ -799,7 +799,7 @@ static const struct reg_sequence s4_init_regs[] = { { .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 }, }; -static struct regmap_config clkc_regmap_config = { +static const struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -873,3 +873,4 @@ module_platform_driver(s4_driver); MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver"); MODULE_AUTHOR("Yu Tu "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c index 987f5b06587c64..ae03b048182f3b 100644 --- a/drivers/clk/meson/sclk-div.c +++ b/drivers/clk/meson/sclk-div.c @@ -247,8 +247,9 @@ const struct clk_ops meson_sclk_div_ops = { .set_duty_cycle = sclk_div_set_duty_cycle, .init = sclk_div_init, }; -EXPORT_SYMBOL_GPL(meson_sclk_div_ops); +EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic Sample divider driver"); MODULE_AUTHOR("Jerome Brunet "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c index e886df55d6e381..36f637d2d01b5d 100644 --- a/drivers/clk/meson/vclk.c +++ b/drivers/clk/meson/vclk.c @@ -49,7 +49,7 @@ const struct clk_ops meson_vclk_gate_ops = { .disable = meson_vclk_gate_disable, .is_enabled = meson_vclk_gate_is_enabled, }; -EXPORT_SYMBOL_GPL(meson_vclk_gate_ops); +EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, CLK_MESON); /* The VCLK Divider has supplementary reset & enable bits */ @@ -134,8 +134,9 @@ const struct clk_ops meson_vclk_div_ops = { .disable = meson_vclk_div_disable, .is_enabled = meson_vclk_div_is_enabled, }; -EXPORT_SYMBOL_GPL(meson_vclk_div_ops); +EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic vclk clock driver"); MODULE_AUTHOR("Neil Armstrong "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index ee129f86794db8..486cf68fc97a02 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c @@ -92,8 +92,9 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw, const struct clk_ops meson_vid_pll_div_ro_ops = { .recalc_rate = meson_vid_pll_div_recalc_rate, }; -EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops); +EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, CLK_MESON); MODULE_DESCRIPTION("Amlogic video pll divider driver"); MODULE_AUTHOR("Neil Armstrong "); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(CLK_MESON); diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c index ae521aaf8cdcf4..88d798d510cdbe 100644 --- a/drivers/clk/mmp/clk-audio.c +++ b/drivers/clk/mmp/clk-audio.c @@ -436,7 +436,7 @@ static struct platform_driver mmp2_audio_clk_driver = { .pm = &mmp2_audio_clk_pm_ops, }, .probe = mmp2_audio_clk_probe, - .remove_new = mmp2_audio_clk_remove, + .remove = mmp2_audio_clk_remove, }; module_platform_driver(mmp2_audio_clk_driver); diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c index 454d131f475eac..07ac9e6937e58b 100644 --- a/drivers/clk/mmp/clk-mix.c +++ b/drivers/clk/mmp/clk-mix.c @@ -447,7 +447,6 @@ struct clk *mmp_clk_register_mix(struct device *dev, struct mmp_clk_mix *mix; struct clk *clk; struct clk_init_data init; - size_t table_bytes; mix = kzalloc(sizeof(*mix), GFP_KERNEL); if (!mix) @@ -461,8 +460,8 @@ struct clk *mmp_clk_register_mix(struct device *dev, memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info)); if (config->table) { - table_bytes = sizeof(*config->table) * config->table_size; - mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL); + mix->table = kmemdup_array(config->table, config->table_size, + sizeof(*mix->table), GFP_KERNEL); if (!mix->table) goto free_mix; @@ -470,9 +469,8 @@ struct clk *mmp_clk_register_mix(struct device *dev, } if (config->mux_table) { - table_bytes = sizeof(u32) * num_parents; - mix->mux_table = kmemdup(config->mux_table, table_bytes, - GFP_KERNEL); + mix->mux_table = kmemdup_array(config->mux_table, num_parents, + sizeof(*mix->mux_table), GFP_KERNEL); if (!mix->mux_table) { kfree(mix->table); goto free_mix; diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 8701a58a5804e9..13906e31bef869 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -792,7 +792,7 @@ static void armada_3700_periph_clock_remove(struct platform_device *pdev) static struct platform_driver armada_3700_periph_clock_driver = { .probe = armada_3700_periph_clock_probe, - .remove_new = armada_3700_periph_clock_remove, + .remove = armada_3700_periph_clock_remove, .driver = { .name = "marvell-armada-3700-periph-clock", .of_match_table = armada_3700_periph_clock_of_match, diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c index e94c336e0f1cc5..1a16f9c0b1d81c 100644 --- a/drivers/clk/mvebu/armada-37xx-tbg.c +++ b/drivers/clk/mvebu/armada-37xx-tbg.c @@ -141,7 +141,7 @@ static const struct of_device_id armada_3700_tbg_clock_of_match[] = { static struct platform_driver armada_3700_tbg_clock_driver = { .probe = armada_3700_tbg_clock_probe, - .remove_new = armada_3700_tbg_clock_remove, + .remove = armada_3700_tbg_clock_remove, .driver = { .name = "marvell-armada-3700-tbg-clock", .of_match_table = armada_3700_tbg_clock_of_match, diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c index 0e2e7d00ae113c..ca88e5e78b0671 100644 --- a/drivers/clk/mvebu/armada-37xx-xtal.c +++ b/drivers/clk/mvebu/armada-37xx-xtal.c @@ -77,7 +77,7 @@ static const struct of_device_id armada_3700_xtal_clock_of_match[] = { static struct platform_driver armada_3700_xtal_clock_driver = { .probe = armada_3700_xtal_clock_probe, - .remove_new = armada_3700_xtal_clock_remove, + .remove = armada_3700_xtal_clock_remove, .driver = { .name = "marvell-armada-3700-xtal-clock", .of_match_table = armada_3700_xtal_clock_of_match, diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 11ae28430dadbc..a3e2a09e2105b2 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -810,6 +810,14 @@ config SDX_GCC_75 Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc. +config SM_CAMCC_4450 + tristate "SM4450 Camera Clock Controller" + depends on ARM64 || COMPILE_TEST + select SM_GCC_4450 + help + Support for the camera clock controller on SM4450 devices. + Say Y if you want to support camera devices and camera functionality. + config SM_CAMCC_6350 tristate "SM6350 Camera Clock Controller" depends on ARM64 || COMPILE_TEST @@ -826,6 +834,16 @@ config SM_CAMCC_7150 Support for the camera clock controller on SM7150 devices. Say Y if you want to support camera devices and camera functionality. +config SM_CAMCC_8150 + tristate "SM8150 Camera Clock Controller" + depends on ARM64 || COMPILE_TEST + select SM_GCC_8150 + help + Support for the camera clock controller on Qualcomm Technologies, Inc + SM8150 devices. + Say Y if you want to support camera devices and functionality such as + capturing pictures. + config SM_CAMCC_8250 tristate "SM8250 Camera Clock Controller" depends on ARM64 || COMPILE_TEST @@ -858,6 +876,16 @@ config SM_CAMCC_8650 Support for the camera clock controller on SM8650 devices. Say Y if you want to support camera devices and camera functionality. +config SM_DISPCC_4450 + tristate "SM4450 Display Clock Controller" + depends on ARM64 || COMPILE_TEST + depends on SM_GCC_4450 + help + Support for the display clock controller on Qualcomm Technologies, Inc + SM4450 devices. + Say Y if you want to support display devices and functionality such as + splash screen + config SM_DISPCC_6115 tristate "SM6115 Display Clock Controller" depends on ARM64 || COMPILE_TEST @@ -931,20 +959,10 @@ config SM_DISPCC_8450 config SM_DISPCC_8550 tristate "SM8550 Display Clock Controller" depends on ARM64 || COMPILE_TEST - depends on SM_GCC_8550 + depends on SM_GCC_8550 || SM_GCC_8650 help Support for the display clock controller on Qualcomm Technologies, Inc - SM8550 devices. - Say Y if you want to support display devices and functionality such as - splash screen. - -config SM_DISPCC_8650 - tristate "SM8650 Display Clock Controller" - depends on ARM64 || COMPILE_TEST - select SM_GCC_8650 - help - Support for the display clock controller on Qualcomm Technologies, Inc - SM8650 devices. + SM8550 or SM8650 devices. Say Y if you want to support display devices and functionality such as splash screen. @@ -1054,6 +1072,15 @@ config SM_GCC_8650 Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/UFS, PCIe etc. +config SM_GPUCC_4450 + tristate "SM4450 Graphics Clock Controller" + depends on ARM64 || COMPILE_TEST + select SM_GCC_4450 + help + Support for the graphics clock controller on SM4450 devices. + Say Y if you want to support graphics controller devices and + functionality such as 3D graphics. + config SM_GPUCC_6115 tristate "SM6115 Graphics Clock Controller" select SM_GCC_6115 diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 0de5fce6113a09..2b378667a63ff6 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -107,12 +107,15 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o obj-$(CONFIG_SDX_GCC_75) += gcc-sdx75.o +obj-$(CONFIG_SM_CAMCC_4450) += camcc-sm4450.o obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o obj-$(CONFIG_SM_CAMCC_7150) += camcc-sm7150.o +obj-$(CONFIG_SM_CAMCC_8150) += camcc-sm8150.o obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o +obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o @@ -121,7 +124,6 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o -obj-$(CONFIG_SM_DISPCC_8650) += dispcc-sm8650.o obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o @@ -134,6 +136,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o +obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c index f9c5e296dba2a0..f43d455ab4b84e 100644 --- a/drivers/clk/qcom/a53-pll.c +++ b/drivers/clk/qcom/a53-pll.c @@ -151,6 +151,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev) } static const struct of_device_id qcom_a53pll_match_table[] = { + { .compatible = "qcom,msm8226-a7pll" }, { .compatible = "qcom,msm8916-a53pll" }, { .compatible = "qcom,msm8939-a53pll" }, { } diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c index ce57b333ec99e6..ef31386831ebd2 100644 --- a/drivers/clk/qcom/apcs-msm8916.c +++ b/drivers/clk/qcom/apcs-msm8916.c @@ -128,7 +128,7 @@ static void qcom_apcs_msm8916_clk_remove(struct platform_device *pdev) static struct platform_driver qcom_apcs_msm8916_clk_driver = { .probe = qcom_apcs_msm8916_clk_probe, - .remove_new = qcom_apcs_msm8916_clk_remove, + .remove = qcom_apcs_msm8916_clk_remove, .driver = { .name = "qcom-apcs-msm8916-clk", }, diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c index d644e6e1f8b715..76ece6c4a96926 100644 --- a/drivers/clk/qcom/apcs-sdx55.c +++ b/drivers/clk/qcom/apcs-sdx55.c @@ -131,7 +131,7 @@ static void qcom_apcs_sdx55_clk_remove(struct platform_device *pdev) static struct platform_driver qcom_apcs_sdx55_clk_driver = { .probe = qcom_apcs_sdx55_clk_probe, - .remove_new = qcom_apcs_sdx55_clk_remove, + .remove = qcom_apcs_sdx55_clk_remove, .driver = { .name = "qcom-sdx55-acps-clk", }, diff --git a/drivers/clk/qcom/camcc-sm4450.c b/drivers/clk/qcom/camcc-sm4450.c new file mode 100644 index 00000000000000..f8503ced3d05d3 --- /dev/null +++ b/drivers/clk/qcom/camcc-sm4450.c @@ -0,0 +1,1688 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, +}; + +enum { + P_BI_TCXO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL1_OUT_MAIN, + P_CAM_CC_PLL2_OUT_EVEN, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, + P_CAM_CC_PLL4_OUT_MAIN, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +static const struct pll_vco rivian_evo_vco[] = { + { 864000000, 1056000000, 0 }, +}; + +/* 1200.0 MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3e, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00008400, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { + { 0x2, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { + .offset = 0x0, + .post_div_shift = 14, + .post_div_table = post_div_table_cam_cc_pll0_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +/* 600.0 MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll1_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { + .offset = 0x1000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll1_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll1.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +/* 960.0 MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x90008820, + .config_ctl_hi_val = 0x00890263, + .config_ctl_hi1_val = 0x00000247, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00400000, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .vco_table = rivian_evo_vco, + .num_vco = ARRAY_SIZE(rivian_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_rivian_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll2_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = { + .offset = 0x2000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll2_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll2.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_rivian_evo_ops, + }, +}; + +/* 600.0 MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x3000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +/* 700.0 MHz Configuration */ +static const struct alpha_pll_config cam_cc_pll4_config = { + .l = 0x24, + .alpha = 0x7555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000400, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x4000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { + .offset = 0x4000, + .post_div_shift = 10, + .post_div_table = post_div_table_cam_cc_pll4_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_evo_ops, + }, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_ODD, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EVEN, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 4 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll2_out_even.clkr.hw }, + { .hw = &cam_cc_pll2.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_ODD, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL4_OUT_EVEN, 2 }, + { P_CAM_CC_PLL4_OUT_MAIN, 3 }, + { P_CAM_CC_PLL0_OUT_ODD, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll4_out_even.clkr.hw }, + { .hw = &cam_cc_pll4.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL1_OUT_MAIN, 2 }, + { P_CAM_CC_PLL1_OUT_EVEN, 3 }, + { P_CAM_CC_PLL0_OUT_ODD, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll1.clkr.hw }, + { .hw = &cam_cc_pll1_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL1_OUT_MAIN, 2 }, + { P_CAM_CC_PLL1_OUT_EVEN, 3 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_5[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll1.clkr.hw }, + { .hw = &cam_cc_pll1_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL3_OUT_EVEN, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_7[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll3_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0xa004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk_src", + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .cmd_rcgr = 0x13014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0x10004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0x11004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0xc054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cphy_rx_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cre_clk_src = { + .cmd_rcgr = 0x16004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_5, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_clk_src", + .parent_data = cam_cc_parent_data_5, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x9004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x9028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x904c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0), + F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0xa02c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fast_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0xf014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_6, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk_src", + .parent_data = cam_cc_parent_data_6, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_6), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50), + F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4), + F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x8004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x8024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x8044, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x8064, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(460000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ope_0_clk_src = { + .cmd_rcgr = 0xb004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_7, + .freq_tbl = ftbl_cam_cc_ope_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ope_0_clk_src", + .parent_data = cam_cc_parent_data_7, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_7), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0xa048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_slow_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(548000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_tfe_0_clk_src = { + .cmd_rcgr = 0xc004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_tfe_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = { + .cmd_rcgr = 0xc02c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_tfe_1_clk_src = { + .cmd_rcgr = 0xd004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_tfe_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = { + .cmd_rcgr = 0xd024, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0xa060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_areg_clk = { + .halt_reg = 0xa044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_areg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0xa01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_atb_clk = { + .halt_reg = 0x13034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_atb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0x1302c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1302c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_hf_clk = { + .halt_reg = 0x1300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_hf_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_sf_clk = { + .halt_reg = 0x13004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_sf_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0x1001c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1001c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0x1101c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1101c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0x1401c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1401c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_core_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0x12004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cre_ahb_clk = { + .halt_reg = 0x16020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cre_clk = { + .halt_reg = 0x1601c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1601c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cre_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cre_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x901c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x901c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x9040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x9064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi2phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x9020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x9044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x9068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_atb_clk = { + .halt_reg = 0xf004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_atb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0xf02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf02c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_icp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_cti_clk = { + .halt_reg = 0xf008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_cti_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ts_clk = { + .halt_reg = 0xf00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_ts_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x801c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x801c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x805c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x805c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x807c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x807c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ope_0_ahb_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ope_0_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ope_0_areg_clk = { + .halt_reg = 0xb02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb02c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ope_0_areg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ope_0_clk = { + .halt_reg = 0xb01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ope_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ope_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_soc_ahb_clk = { + .halt_reg = 0x14018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_soc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sys_tmr_clk = { + .halt_reg = 0xf034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_sys_tmr_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_0_ahb_clk = { + .halt_reg = 0xc070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_0_clk = { + .halt_reg = 0xc01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_tfe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = { + .halt_reg = 0xc06c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc06c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_0_csid_clk = { + .halt_reg = 0xc044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_0_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_tfe_0_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_1_ahb_clk = { + .halt_reg = 0xd048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xd048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_1_clk = { + .halt_reg = 0xd01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xd01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_tfe_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = { + .halt_reg = 0xd044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xd044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_tfe_1_csid_clk = { + .halt_reg = 0xd03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xd03c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_tfe_1_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_tfe_1_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc cam_cc_camss_top_gdsc = { + .gdscr = 0x14004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "cam_cc_camss_top_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *cam_cc_sm4450_clocks[] = { + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr, + [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr, + [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr, + [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr, + [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr, + [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr, + [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr, + [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr, + [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr, + [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr, + [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr, + [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr, + [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr, + [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr, + [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr, + [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr, + [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr, + [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr, + [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr, + [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr, + [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr, + [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr, +}; + +static struct gdsc *cam_cc_sm4450_gdscs[] = { + [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc, +}; + +static const struct qcom_reset_map cam_cc_sm4450_resets[] = { + [CAM_CC_BPS_BCR] = { 0xa000 }, + [CAM_CC_CAMNOC_BCR] = { 0x13000 }, + [CAM_CC_CAMSS_TOP_BCR] = { 0x14000 }, + [CAM_CC_CCI_0_BCR] = { 0x10000 }, + [CAM_CC_CCI_1_BCR] = { 0x11000 }, + [CAM_CC_CPAS_BCR] = { 0x12000 }, + [CAM_CC_CRE_BCR] = { 0x16000 }, + [CAM_CC_CSI0PHY_BCR] = { 0x9000 }, + [CAM_CC_CSI1PHY_BCR] = { 0x9024 }, + [CAM_CC_CSI2PHY_BCR] = { 0x9048 }, + [CAM_CC_ICP_BCR] = { 0xf000 }, + [CAM_CC_MCLK0_BCR] = { 0x8000 }, + [CAM_CC_MCLK1_BCR] = { 0x8020 }, + [CAM_CC_MCLK2_BCR] = { 0x8040 }, + [CAM_CC_MCLK3_BCR] = { 0x8060 }, + [CAM_CC_OPE_0_BCR] = { 0xb000 }, + [CAM_CC_TFE_0_BCR] = { 0xc000 }, + [CAM_CC_TFE_1_BCR] = { 0xd000 }, +}; + +static const struct regmap_config cam_cc_sm4450_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x16024, + .fast_io = true, +}; + +static struct qcom_cc_desc cam_cc_sm4450_desc = { + .config = &cam_cc_sm4450_regmap_config, + .clks = cam_cc_sm4450_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sm4450_clocks), + .resets = cam_cc_sm4450_resets, + .num_resets = ARRAY_SIZE(cam_cc_sm4450_resets), + .gdscs = cam_cc_sm4450_gdscs, + .num_gdscs = ARRAY_SIZE(cam_cc_sm4450_gdscs), +}; + +static const struct of_device_id cam_cc_sm4450_match_table[] = { + { .compatible = "qcom,sm4450-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sm4450_match_table); + +static int cam_cc_sm4450_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &cam_cc_sm4450_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + + return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm4450_desc, regmap); +} + +static struct platform_driver cam_cc_sm4450_driver = { + .probe = cam_cc_sm4450_probe, + .driver = { + .name = "camcc-sm4450", + .of_match_table = cam_cc_sm4450_match_table, + }, +}; + +module_platform_driver(cam_cc_sm4450_driver); + +MODULE_DESCRIPTION("QTI CAMCC SM4450 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c new file mode 100644 index 00000000000000..bb3009818ad76e --- /dev/null +++ b/drivers/clk/qcom/camcc-sm8150.c @@ -0,0 +1,2159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, + DT_IFACE, +}; + +enum { + P_BI_TCXO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL0_OUT_MAIN, + P_CAM_CC_PLL0_OUT_ODD, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL2_OUT_EARLY, + P_CAM_CC_PLL2_OUT_MAIN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CAM_CC_PLL4_OUT_EVEN, +}; + +static const struct pll_vco regera_vco[] = { + { 600000000, 3300000000, 0 }, +}; + +static const struct pll_vco trion_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct alpha_pll_config cam_cc_pll0_config = { + .l = 0x3e, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, + .user_ctl_val = 0x00003100, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_trion_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_cam_cc_pll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = { + { 0x3, 3 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = { + .offset = 0x0, + .post_div_shift = 12, + .post_div_table = post_div_table_cam_cc_pll0_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll0_out_odd", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll1_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, + .user_ctl_val = 0x00000100, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_trion_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { + .offset = 0x1000, + .post_div_shift = 8, + .post_div_table = post_div_table_cam_cc_pll1_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll1_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll1.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll2_config = { + .l = 0x32, + .alpha = 0x0, + .config_ctl_val = 0x10000807, + .config_ctl_hi_val = 0x00000011, + .config_ctl_hi1_val = 0x04300142, + .test_ctl_val = 0x04000400, + .test_ctl_hi_val = 0x00004000, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000100, + .user_ctl_hi_val = 0x00000000, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .vco_table = regera_vco, + .num_vco = ARRAY_SIZE(regera_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_regera_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = { + .offset = 0x2000, + .post_div_shift = 8, + .post_div_table = post_div_table_cam_cc_pll2_out_main, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main), + .width = 2, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll2_out_main", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll2.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll3_config = { + .l = 0x29, + .alpha = 0xaaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, + .user_ctl_val = 0x00000100, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_trion_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x3000, + .post_div_shift = 8, + .post_div_table = post_div_table_cam_cc_pll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll3_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll3.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct alpha_pll_config cam_cc_pll4_config = { + .l = 0x29, + .alpha = 0xaaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, + .user_ctl_val = 0x00000100, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static struct clk_alpha_pll cam_cc_pll4 = { + .offset = 0x4000, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_trion_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = { + .offset = 0x4000, + .post_div_shift = 8, + .post_div_table = post_div_table_cam_cc_pll4_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_pll4_out_even", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_pll4.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_trion_ops, + }, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL0_OUT_MAIN, 1 }, + { P_CAM_CC_PLL0_OUT_EVEN, 2 }, + { P_CAM_CC_PLL0_OUT_ODD, 3 }, + { P_CAM_CC_PLL2_OUT_MAIN, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll0.clkr.hw }, + { .hw = &cam_cc_pll0_out_even.clkr.hw }, + { .hw = &cam_cc_pll0_out_odd.clkr.hw }, + { .hw = &cam_cc_pll2_out_main.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EARLY, 5 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll2.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL3_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll3_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL4_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll4_out_even.clkr.hw }, +}; + +static const struct parent_map cam_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL1_OUT_EVEN, 4 }, +}; + +static const struct clk_parent_data cam_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &cam_cc_pll1_out_even.clkr.hw }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0x7010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0), + F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { + .cmd_rcgr = 0xc170, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_0_clk_src = { + .cmd_rcgr = 0xc108, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_cci_1_clk_src = { + .cmd_rcgr = 0xc124, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0xa064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cphy_rx_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x6004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x6028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x604c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x6070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x703c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fast_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fd_core_clk_src = { + .cmd_rcgr = 0xc0e0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fd_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fd_core_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0xc0b8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(847000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(950000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0xa010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_2, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk_src", + .parent_data = cam_cc_parent_data_2, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { + .cmd_rcgr = 0xa03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(847000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(950000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0xb010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_3, + .freq_tbl = ftbl_cam_cc_ife_1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk_src", + .parent_data = cam_cc_parent_data_3, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { + .cmd_rcgr = 0xb034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = { + .cmd_rcgr = 0xc004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_0_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = { + .cmd_rcgr = 0xc020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fd_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_0_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = { + .cmd_rcgr = 0xc048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_1_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = { + .cmd_rcgr = 0xc064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fd_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_1_csid_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_0_clk_src = { + .cmd_rcgr = 0x8010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_4, + .freq_tbl = ftbl_cam_cc_ipe_0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_0_clk_src", + .parent_data = cam_cc_parent_data_4, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .cmd_rcgr = 0xc08c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_jpeg_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_lrme_clk_src = { + .cmd_rcgr = 0xc144, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_lrme_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_lrme_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(12000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 8), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4), + F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x5004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x5024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x5044, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x5064, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_1, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk_src", + .parent_data = cam_cc_parent_data_1, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x7058, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "cam_cc_slow_ahb_clk_src", + .parent_data = cam_cc_parent_data_0, + .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0x7070, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7070, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_areg_clk = { + .halt_reg = 0x7054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_areg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_axi_clk = { + .halt_reg = 0x7038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0x7028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_bps_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_bps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0xc18c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc18c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_dcd_xo_clk = { + .halt_reg = 0xc194, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc194, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_camnoc_dcd_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_0_clk = { + .halt_reg = 0xc120, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc120, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_1_clk = { + .halt_reg = 0xc13c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc13c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cci_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cci_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_core_ahb_clk = { + .halt_reg = 0xc1c8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xc1c8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_core_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0xc168, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc168, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_cpas_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x601c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x601c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi0phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x6040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi1phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x6064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi2phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi2phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x6088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csi3phytimer_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_csi3phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x6020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x6044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x6068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6068, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x608c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x608c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_csiphy3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_clk = { + .halt_reg = 0xc0f8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0f8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fd_core_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fd_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_uar_clk = { + .halt_reg = 0xc100, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc100, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_fd_core_uar_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fd_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ahb_clk = { + .halt_reg = 0xc0d8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0d8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0xc0d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0d0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_icp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_icp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_axi_clk = { + .halt_reg = 0xa080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0xa028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_cphy_rx_clk = { + .halt_reg = 0xa07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa07c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_csid_clk = { + .halt_reg = 0xa054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_dsp_clk = { + .halt_reg = 0xa038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_0_dsp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_axi_clk = { + .halt_reg = 0xb058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0xb028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_cphy_rx_clk = { + .halt_reg = 0xb054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_csid_clk = { + .halt_reg = 0xb04c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb04c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_dsp_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_1_dsp_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_0_clk = { + .halt_reg = 0xc01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = { + .halt_reg = 0xc040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_0_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_0_csid_clk = { + .halt_reg = 0xc038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_0_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_0_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_1_clk = { + .halt_reg = 0xc060, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc060, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = { + .halt_reg = 0xc084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_1_cphy_rx_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_cphy_rx_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_1_csid_clk = { + .halt_reg = 0xc07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc07c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ife_lite_1_csid_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ife_lite_1_csid_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_ahb_clk = { + .halt_reg = 0x8040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_0_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_areg_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_0_areg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_axi_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_0_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_clk = { + .halt_reg = 0x8028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_ahb_clk = { + .halt_reg = 0x9028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_1_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_slow_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_areg_clk = { + .halt_reg = 0x9024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_1_areg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_fast_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_axi_clk = { + .halt_reg = 0x9020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_1_axi_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_camnoc_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_clk = { + .halt_reg = 0x9010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_ipe_1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_ipe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_clk = { + .halt_reg = 0xc0a4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc0a4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_jpeg_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_jpeg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_lrme_clk = { + .halt_reg = 0xc15c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc15c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_lrme_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_lrme_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x501c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x503c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x503c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk1_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x505c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x505c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk2_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x507c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x507c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_mclk3_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_mclk3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc titan_top_gdsc = { + .gdscr = 0xc1bc, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "titan_top_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc bps_gdsc = { + .gdscr = 0x7004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "bps_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc ife_0_gdsc = { + .gdscr = 0xa004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "ife_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc ife_1_gdsc = { + .gdscr = 0xb004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "ife_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc ipe_0_gdsc = { + .gdscr = 0x8004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "ipe_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc ipe_1_gdsc = { + .gdscr = 0x9004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "ipe_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .parent = &titan_top_gdsc.pd, + .flags = POLL_CFG_GDSCR, +}; + +static struct clk_regmap *cam_cc_sm8150_clocks[] = { + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_PLL4] = &cam_cc_pll4.clkr, + [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr, + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr, + [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr, + [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr, + [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr, + [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr, + [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr, + [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr, + [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr, + [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr, + [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr, + [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr, + [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr, + [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr, + [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr, + [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr, + [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr, + [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr, + [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr, + [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr, + [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr, + [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr, + [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr, + [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr, + [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr, + [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr, + [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr, + [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr, + [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr, + [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr, + [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr, + [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr, + [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr, + [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr, + [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr, + [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, +}; + +static struct gdsc *cam_cc_sm8150_gdscs[] = { + [TITAN_TOP_GDSC] = &titan_top_gdsc, + [BPS_GDSC] = &bps_gdsc, + [IFE_0_GDSC] = &ife_0_gdsc, + [IFE_1_GDSC] = &ife_1_gdsc, + [IPE_0_GDSC] = &ipe_0_gdsc, + [IPE_1_GDSC] = &ipe_1_gdsc, +}; + +static const struct qcom_reset_map cam_cc_sm8150_resets[] = { + [CAM_CC_BPS_BCR] = { 0x7000 }, + [CAM_CC_CAMNOC_BCR] = { 0xc16c }, + [CAM_CC_CCI_BCR] = { 0xc104 }, + [CAM_CC_CPAS_BCR] = { 0xc164 }, + [CAM_CC_CSI0PHY_BCR] = { 0x6000 }, + [CAM_CC_CSI1PHY_BCR] = { 0x6024 }, + [CAM_CC_CSI2PHY_BCR] = { 0x6048 }, + [CAM_CC_CSI3PHY_BCR] = { 0x606c }, + [CAM_CC_FD_BCR] = { 0xc0dc }, + [CAM_CC_ICP_BCR] = { 0xc0b4 }, + [CAM_CC_IFE_0_BCR] = { 0xa000 }, + [CAM_CC_IFE_1_BCR] = { 0xb000 }, + [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 }, + [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 }, + [CAM_CC_IPE_0_BCR] = { 0x8000 }, + [CAM_CC_IPE_1_BCR] = { 0x9000 }, + [CAM_CC_JPEG_BCR] = { 0xc088 }, + [CAM_CC_LRME_BCR] = { 0xc140 }, + [CAM_CC_MCLK0_BCR] = { 0x5000 }, + [CAM_CC_MCLK1_BCR] = { 0x5020 }, + [CAM_CC_MCLK2_BCR] = { 0x5040 }, + [CAM_CC_MCLK3_BCR] = { 0x5060 }, +}; + +static const struct regmap_config cam_cc_sm8150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xe004, + .fast_io = true, +}; + +static struct qcom_cc_desc cam_cc_sm8150_desc = { + .config = &cam_cc_sm8150_regmap_config, + .clks = cam_cc_sm8150_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sm8150_clocks), + .resets = cam_cc_sm8150_resets, + .num_resets = ARRAY_SIZE(cam_cc_sm8150_resets), + .gdscs = cam_cc_sm8150_gdscs, + .num_gdscs = ARRAY_SIZE(cam_cc_sm8150_gdscs), +}; + +static const struct of_device_id cam_cc_sm8150_match_table[] = { + { .compatible = "qcom,sm8150-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sm8150_match_table); + +static int cam_cc_sm8150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + + regmap = qcom_cc_map(pdev, &cam_cc_sm8150_desc); + if (IS_ERR(regmap)) { + pm_runtime_put(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); + clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); + clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); + clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config); + clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); + + /* Keep the critical clock always-on */ + qcom_branch_set_clk_en(regmap, 0xc1e4); /* cam_cc_gdsc_clk */ + + ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8150_desc, regmap); + + pm_runtime_put(&pdev->dev); + + return ret; +} + +static struct platform_driver cam_cc_sm8150_driver = { + .probe = cam_cc_sm8150_probe, + .driver = { + .name = "camcc-sm8150", + .of_match_table = cam_cc_sm8150_match_table, + }, +}; + +module_platform_driver(cam_cc_sm8150_driver); + +MODULE_DESCRIPTION("QTI CAM_CC SM8150 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 31bf9d13f15419..f9105443d7dbb1 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2021, 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1713,7 +1713,7 @@ static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate, if (ret < 0) return ret; - regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); + regmap_update_bits(pll->clkr.regmap, PLL_L_VAL(pll), LUCID_EVO_PLL_L_VAL_MASK, l); regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); /* Latch the PLL input */ @@ -1832,6 +1832,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = { }; EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops); +/** + * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll + * + * @pll: clk alpha pll + * @regmap: register map + * @config: configuration to apply for pll + */ +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + /* + * If the bootloader left the PLL enabled it's likely that there are + * RCGs that will lock up if we disable the PLL below. + */ + if (trion_pll_is_enabled(pll, regmap)) { + pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n"); + return; + } + + clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); + regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); + clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), + config->config_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), + config->config_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), + config->config_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), + config->user_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), + config->user_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), + config->user_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), + config->test_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), + config->test_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), + config->test_ctl_hi1_val); + + /* Disable PLL output */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); + + /* Set operation mode to OFF */ + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); + + /* Place the PLL in STANDBY mode */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); +} +EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure); + static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); @@ -2674,3 +2726,33 @@ const struct clk_ops clk_alpha_pll_stromer_plus_ops = { .set_rate = clk_alpha_pll_stromer_plus_set_rate, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops); + +void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); + clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), config->user_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val); + + /* Set operation mode to STANDBY */ + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); +} +EXPORT_SYMBOL_GPL(clk_regera_pll_configure); + +const struct clk_ops clk_alpha_pll_regera_ops = { + .enable = clk_zonda_pll_enable, + .disable = clk_zonda_pll_disable, + .is_enabled = clk_alpha_pll_is_enabled, + .recalc_rate = clk_trion_pll_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, + .set_rate = clk_zonda_pll_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index df8f0fe155313e..55eca04b23a1fc 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -23,6 +23,7 @@ enum { CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION, CLK_ALPHA_PLL_TYPE_AGERA, CLK_ALPHA_PLL_TYPE_ZONDA, + CLK_ALPHA_PLL_TYPE_REGERA = CLK_ALPHA_PLL_TYPE_ZONDA, CLK_ALPHA_PLL_TYPE_ZONDA_OLE, CLK_ALPHA_PLL_TYPE_LUCID_EVO, CLK_ALPHA_PLL_TYPE_LUCID_OLE, @@ -193,6 +194,8 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops; extern const struct clk_ops clk_alpha_pll_rivian_evo_ops; #define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops +extern const struct clk_ops clk_alpha_pll_regera_ops; + void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, @@ -208,6 +211,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, @@ -216,5 +221,7 @@ void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regm const struct alpha_pll_config *config); void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); #endif diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c index f5fd1ff9c6c992..ce4efcd995ea43 100644 --- a/drivers/clk/qcom/clk-cbf-8996.c +++ b/drivers/clk/qcom/clk-cbf-8996.c @@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table); static struct platform_driver qcom_msm8996_cbf_driver = { .probe = qcom_msm8996_cbf_probe, - .remove_new = qcom_msm8996_cbf_remove, + .remove = qcom_msm8996_cbf_remove, .driver = { .name = "qcom-msm8996-cbf", .of_match_table = qcom_msm8996_cbf_match_table, diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index bb82abeed88f3b..4acde937114af3 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -263,6 +263,8 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) cmd_state = 0; } + cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK); + if (c->last_sent_aggr_state != cmd_state) { cmd.addr = c->res_addr; cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); diff --git a/drivers/clk/qcom/dispcc-sm4450.c b/drivers/clk/qcom/dispcc-sm4450.c new file mode 100644 index 00000000000000..98ba016bc57f11 --- /dev/null +++ b/drivers/clk/qcom/dispcc-sm4450.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, + DT_BI_TCXO_AO, + DT_AHB_CLK, + DT_SLEEP_CLK, + + DT_DSI0_PHY_PLL_OUT_BYTECLK, + DT_DSI0_PHY_PLL_OUT_DSICLK, +}; + +enum { + P_BI_TCXO, + P_DISP_CC_PLL0_OUT_MAIN, + P_DISP_CC_PLL1_OUT_EVEN, + P_DISP_CC_PLL1_OUT_MAIN, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_SLEEP_CLK, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +/* 600.0 MHz Configuration */ +static const struct alpha_pll_config disp_cc_pll0_config = { + .l = 0x1f, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll disp_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static struct clk_alpha_pll disp_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map disp_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_DISP_CC_PLL0_OUT_MAIN, 1 }, + { P_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &disp_cc_pll0.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct clk_parent_data disp_cc_parent_data_2_ao[] = { + { .index = DT_BI_TCXO_AO }, +}; + +static const struct parent_map disp_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_DISP_CC_PLL1_OUT_MAIN, 4 }, + { P_DISP_CC_PLL1_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &disp_cc_pll1.clkr.hw }, + { .hw = &disp_cc_pll1.clkr.hw }, +}; + +static const struct parent_map disp_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, +}; + +static const struct parent_map disp_cc_parent_map_5[] = { + { P_SLEEP_CLK, 0 }, +}; + +static const struct clk_parent_data disp_cc_parent_data_5[] = { + { .index = DT_SLEEP_CLK }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), + F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { + .cmd_rcgr = 0x82a4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_3, + .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_ahb_clk_src", + .parent_data = disp_cc_parent_data_3, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { + .cmd_rcgr = 0x80f8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_byte0_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { + .cmd_rcgr = 0x8114, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_4, + .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_esc0_clk_src", + .parent_data = disp_cc_parent_data_4, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { + F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(380000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(506000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(608000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { + .cmd_rcgr = 0x80b0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_mdp_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { + .cmd_rcgr = 0x8098, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_pclk0_clk_src", + .parent_data = disp_cc_parent_data_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = { + F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0), + F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_rot_clk_src = { + .cmd_rcgr = 0x80c8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_rot_clk_src", + .parent_data = disp_cc_parent_data_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { + .cmd_rcgr = 0x80e0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_vsync_clk_src", + .parent_data = disp_cc_parent_data_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = { + F(32000, P_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_sleep_clk_src = { + .cmd_rcgr = 0xe058, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_5, + .freq_tbl = ftbl_disp_cc_sleep_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_sleep_clk_src", + .parent_data = disp_cc_parent_data_5, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 disp_cc_xo_clk_src = { + .cmd_rcgr = 0xe03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_xo_clk_src", + .parent_data = disp_cc_parent_data_2_ao, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_2_ao), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { + .reg = 0x8110, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_byte0_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_branch disp_cc_mdss_ahb1_clk = { + .halt_reg = 0xa020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_ahb1_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_ahb_clk = { + .halt_reg = 0x8094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_clk = { + .halt_reg = 0x8024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_byte0_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_intf_clk = { + .halt_reg = 0x8028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_byte0_intf_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_byte0_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_esc0_clk = { + .halt_reg = 0x802c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x802c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_esc0_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_esc0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp1_clk = { + .halt_reg = 0xa004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_mdp1_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_clk = { + .halt_reg = 0x8008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_mdp_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { + .halt_reg = 0xa014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_mdp_lut1_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_lut_clk = { + .halt_reg = 0x8018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_mdp_lut_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { + .halt_reg = 0xc004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xc004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_non_gdsc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_pclk0_clk = { + .halt_reg = 0x8004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_pclk0_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_pclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rot1_clk = { + .halt_reg = 0xa00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_rot1_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_rot_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rot_clk = { + .halt_reg = 0x8010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_rot_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_rot_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { + .halt_reg = 0xc00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_rscc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { + .halt_reg = 0xc008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_rscc_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_vsync1_clk = { + .halt_reg = 0xa01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_vsync1_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_vsync_clk = { + .halt_reg = 0x8020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "disp_cc_mdss_vsync_clk", + .parent_hws = (const struct clk_hw*[]) { + &disp_cc_mdss_vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc disp_cc_mdss_core_gdsc = { + .gdscr = 0x9000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "disp_cc_mdss_core_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct gdsc disp_cc_mdss_core_int2_gdsc = { + .gdscr = 0xb000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, + .pd = { + .name = "disp_cc_mdss_core_int2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *disp_cc_sm4450_clocks[] = { + [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr, + [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, + [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, + [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, + [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, + [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, + [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr, + [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, + [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, + [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr, + [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, + [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, + [DISP_CC_MDSS_ROT1_CLK] = &disp_cc_mdss_rot1_clk.clkr, + [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr, + [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr, + [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, + [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, + [DISP_CC_PLL0] = &disp_cc_pll0.clkr, + [DISP_CC_PLL1] = &disp_cc_pll1.clkr, + [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr, + [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr, +}; + +static struct gdsc *disp_cc_sm4450_gdscs[] = { + [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc, + [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc, +}; + +static const struct qcom_reset_map disp_cc_sm4450_resets[] = { + [DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, + [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 }, + [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 }, +}; + +static const struct regmap_config disp_cc_sm4450_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x11008, + .fast_io = true, +}; + +static struct qcom_cc_desc disp_cc_sm4450_desc = { + .config = &disp_cc_sm4450_regmap_config, + .clks = disp_cc_sm4450_clocks, + .num_clks = ARRAY_SIZE(disp_cc_sm4450_clocks), + .resets = disp_cc_sm4450_resets, + .num_resets = ARRAY_SIZE(disp_cc_sm4450_resets), + .gdscs = disp_cc_sm4450_gdscs, + .num_gdscs = ARRAY_SIZE(disp_cc_sm4450_gdscs), +}; + +static const struct of_device_id disp_cc_sm4450_match_table[] = { + { .compatible = "qcom,sm4450-dispcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, disp_cc_sm4450_match_table); + +static int disp_cc_sm4450_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &disp_cc_sm4450_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll0_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0xe070); /* DISP_CC_SLEEP_CLK */ + qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */ + + return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm4450_desc, regmap); +} + +static struct platform_driver disp_cc_sm4450_driver = { + .probe = disp_cc_sm4450_probe, + .driver = { + .name = "dispcc-sm4450", + .of_match_table = disp_cc_sm4450_match_table, + }, +}; + +module_platform_driver(disp_cc_sm4450_driver); + +MODULE_DESCRIPTION("QTI DISPCC SM4450 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index 5a09009b72891b..884bbd3fb30571 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -849,6 +849,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = { &disp_cc_mdss_dp_link1_div_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -884,6 +885,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = { &disp_cc_mdss_dp_link_div_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1009,6 +1011,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1357,8 +1360,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev) disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL; } - clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); - clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) { + clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + } else { + clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + } /* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, 0x8000, 0x10, 0x10); diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c index 31ae46f180a5c2..7f9021ca0ecb0e 100644 --- a/drivers/clk/qcom/dispcc-sm8550.c +++ b/drivers/clk/qcom/dispcc-sm8550.c @@ -71,7 +71,7 @@ enum { P_SLEEP_CLK, }; -static const struct pll_vco lucid_ole_vco[] = { +static struct pll_vco lucid_ole_vco[] = { { 249600000, 2000000000, 0 }, }; @@ -95,7 +95,7 @@ static struct clk_alpha_pll disp_cc_pll0 = { .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll0", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, @@ -126,7 +126,7 @@ static struct clk_alpha_pll disp_cc_pll1 = { .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll1", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, @@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = { static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, - { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, @@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = { static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, - { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; @@ -286,7 +286,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_6, .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk_src", .parent_data = disp_cc_parent_data_6, .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), @@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), @@ -321,7 +321,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), @@ -336,7 +336,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), @@ -350,7 +350,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_7, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_7, .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), @@ -365,7 +365,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), @@ -380,7 +380,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), @@ -395,12 +395,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, + .ops = &clk_rcg2_ops, }, }; @@ -409,7 +409,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), @@ -424,7 +424,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), @@ -439,7 +439,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), @@ -454,7 +454,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), @@ -468,7 +468,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), @@ -483,7 +483,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), @@ -498,7 +498,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), @@ -513,7 +513,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), @@ -527,7 +527,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), @@ -542,7 +542,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), @@ -557,12 +557,12 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_5, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -572,12 +572,12 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_5, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -594,13 +594,25 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { { } }; +static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), + { } +}; + static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { .cmd_rcgr = 0x80d8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_8, .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk_src", .parent_data = disp_cc_parent_data_8, .num_parents = ARRAY_SIZE(disp_cc_parent_data_8), @@ -615,7 +627,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), @@ -630,7 +642,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), @@ -645,7 +657,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), @@ -665,7 +677,7 @@ static struct clk_rcg2 disp_cc_sleep_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_9, .freq_tbl = ftbl_disp_cc_sleep_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_sleep_clk_src", .parent_data = disp_cc_parent_data_9, .num_parents = ARRAY_SIZE(disp_cc_parent_data_9), @@ -680,7 +692,7 @@ static struct clk_rcg2 disp_cc_xo_clk_src = { .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_xo_clk_src", .parent_data = disp_cc_parent_data_0_ao, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao), @@ -693,7 +705,7 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { .reg = 0x8120, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, @@ -707,7 +719,7 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { .reg = 0x813c, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, @@ -721,7 +733,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = { .reg = 0x8188, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, @@ -736,7 +748,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = { .reg = 0x821c, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, @@ -751,7 +763,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = { .reg = 0x8250, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, @@ -766,7 +778,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = { .reg = 0x82cc, .shift = 0, .width = 4, - .clkr.hw.init = &(struct clk_init_data) { + .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, @@ -783,7 +795,7 @@ static struct clk_branch disp_cc_mdss_accu_clk = { .clkr = { .enable_reg = 0xe058, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_accu_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_xo_clk_src.clkr.hw, @@ -801,7 +813,7 @@ static struct clk_branch disp_cc_mdss_ahb1_clk = { .clkr = { .enable_reg = 0xa020, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, @@ -819,7 +831,7 @@ static struct clk_branch disp_cc_mdss_ahb_clk = { .clkr = { .enable_reg = 0x80a4, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, @@ -837,7 +849,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = { .clkr = { .enable_reg = 0x8028, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, @@ -855,7 +867,7 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = { .clkr = { .enable_reg = 0x802c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_div_clk_src.clkr.hw, @@ -873,7 +885,7 @@ static struct clk_branch disp_cc_mdss_byte1_clk = { .clkr = { .enable_reg = 0x8030, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, @@ -891,7 +903,7 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = { .clkr = { .enable_reg = 0x8034, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_div_clk_src.clkr.hw, @@ -909,7 +921,7 @@ static struct clk_branch disp_cc_mdss_dptx0_aux_clk = { .clkr = { .enable_reg = 0x8058, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, @@ -927,7 +939,7 @@ static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = { .clkr = { .enable_reg = 0x804c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, @@ -945,7 +957,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_clk = { .clkr = { .enable_reg = 0x8040, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, @@ -963,7 +975,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = { .clkr = { .enable_reg = 0x8048, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, @@ -981,7 +993,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = { .clkr = { .enable_reg = 0x8050, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, @@ -999,7 +1011,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = { .clkr = { .enable_reg = 0x8054, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, @@ -1017,7 +1029,7 @@ static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = { .clkr = { .enable_reg = 0x8044, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, @@ -1035,7 +1047,7 @@ static struct clk_branch disp_cc_mdss_dptx1_aux_clk = { .clkr = { .enable_reg = 0x8074, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, @@ -1053,7 +1065,7 @@ static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = { .clkr = { .enable_reg = 0x8070, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, @@ -1071,7 +1083,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_clk = { .clkr = { .enable_reg = 0x8064, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, @@ -1089,7 +1101,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = { .clkr = { .enable_reg = 0x806c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, @@ -1107,7 +1119,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = { .clkr = { .enable_reg = 0x805c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, @@ -1125,7 +1137,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = { .clkr = { .enable_reg = 0x8060, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, @@ -1143,7 +1155,7 @@ static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = { .clkr = { .enable_reg = 0x8068, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, @@ -1161,7 +1173,7 @@ static struct clk_branch disp_cc_mdss_dptx2_aux_clk = { .clkr = { .enable_reg = 0x808c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw, @@ -1179,7 +1191,7 @@ static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = { .clkr = { .enable_reg = 0x8088, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, @@ -1197,7 +1209,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_clk = { .clkr = { .enable_reg = 0x8080, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, @@ -1215,7 +1227,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = { .clkr = { .enable_reg = 0x8084, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, @@ -1233,7 +1245,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = { .clkr = { .enable_reg = 0x8078, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw, @@ -1251,7 +1263,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = { .clkr = { .enable_reg = 0x807c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw, @@ -1269,7 +1281,7 @@ static struct clk_branch disp_cc_mdss_dptx3_aux_clk = { .clkr = { .enable_reg = 0x809c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw, @@ -1287,7 +1299,7 @@ static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = { .clkr = { .enable_reg = 0x80a0, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_crypto_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, @@ -1305,7 +1317,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_clk = { .clkr = { .enable_reg = 0x8094, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, @@ -1323,7 +1335,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = { .clkr = { .enable_reg = 0x8098, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw, @@ -1341,7 +1353,7 @@ static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = { .clkr = { .enable_reg = 0x8090, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw, @@ -1359,7 +1371,7 @@ static struct clk_branch disp_cc_mdss_esc0_clk = { .clkr = { .enable_reg = 0x8038, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc0_clk_src.clkr.hw, @@ -1377,7 +1389,7 @@ static struct clk_branch disp_cc_mdss_esc1_clk = { .clkr = { .enable_reg = 0x803c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc1_clk_src.clkr.hw, @@ -1395,7 +1407,7 @@ static struct clk_branch disp_cc_mdss_mdp1_clk = { .clkr = { .enable_reg = 0xa004, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, @@ -1413,7 +1425,7 @@ static struct clk_branch disp_cc_mdss_mdp_clk = { .clkr = { .enable_reg = 0x800c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, @@ -1431,7 +1443,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { .clkr = { .enable_reg = 0xa010, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, @@ -1449,7 +1461,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = { .clkr = { .enable_reg = 0x8018, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, @@ -1467,7 +1479,7 @@ static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { .clkr = { .enable_reg = 0xc004, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_non_gdsc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, @@ -1485,7 +1497,7 @@ static struct clk_branch disp_cc_mdss_pclk0_clk = { .clkr = { .enable_reg = 0x8004, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk0_clk_src.clkr.hw, @@ -1503,7 +1515,7 @@ static struct clk_branch disp_cc_mdss_pclk1_clk = { .clkr = { .enable_reg = 0x8008, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk1_clk_src.clkr.hw, @@ -1521,7 +1533,7 @@ static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { .clkr = { .enable_reg = 0xc00c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, @@ -1539,7 +1551,7 @@ static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { .clkr = { .enable_reg = 0xc008, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, @@ -1557,7 +1569,7 @@ static struct clk_branch disp_cc_mdss_vsync1_clk = { .clkr = { .enable_reg = 0xa01c, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, @@ -1575,7 +1587,7 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { .clkr = { .enable_reg = 0x8024, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, @@ -1593,7 +1605,7 @@ static struct clk_branch disp_cc_sleep_clk = { .clkr = { .enable_reg = 0xe074, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data) { + .hw.init = &(const struct clk_init_data) { .name = "disp_cc_sleep_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_sleep_clk_src.clkr.hw, @@ -1611,7 +1623,7 @@ static struct gdsc mdss_gdsc = { .name = "mdss_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, }; static struct gdsc mdss_int2_gdsc = { @@ -1620,7 +1632,7 @@ static struct gdsc mdss_int2_gdsc = { .name = "mdss_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, }; static struct clk_regmap *disp_cc_sm8550_clocks[] = { @@ -1739,6 +1751,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = { static const struct of_device_id disp_cc_sm8550_match_table[] = { { .compatible = "qcom,sm8550-dispcc" }, + { .compatible = "qcom,sm8650-dispcc" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_sm8550_match_table); @@ -1762,6 +1775,13 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev) goto err_put_rpm; } + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-dispcc")) { + lucid_ole_vco[0].max_freq = 2100000000; + disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650; + disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] = + &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw; + } + clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); @@ -1795,5 +1815,5 @@ static struct platform_driver disp_cc_sm8550_driver = { module_platform_driver(disp_cc_sm8550_driver); -MODULE_DESCRIPTION("QTI DISPCC SM8550 Driver"); +MODULE_DESCRIPTION("QTI DISPCC SM8550 / SM8650 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c deleted file mode 100644 index c9d2751f5cb8cb..00000000000000 --- a/drivers/clk/qcom/dispcc-sm8650.c +++ /dev/null @@ -1,1796 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved - * Copyright (c) 2023, Linaro Ltd. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "common.h" -#include "clk-alpha-pll.h" -#include "clk-branch.h" -#include "clk-pll.h" -#include "clk-rcg.h" -#include "clk-regmap.h" -#include "clk-regmap-divider.h" -#include "reset.h" -#include "gdsc.h" - -/* Need to match the order of clocks in DT binding */ -enum { - DT_BI_TCXO, - DT_BI_TCXO_AO, - DT_AHB_CLK, - DT_SLEEP_CLK, - - DT_DSI0_PHY_PLL_OUT_BYTECLK, - DT_DSI0_PHY_PLL_OUT_DSICLK, - DT_DSI1_PHY_PLL_OUT_BYTECLK, - DT_DSI1_PHY_PLL_OUT_DSICLK, - - DT_DP0_PHY_PLL_LINK_CLK, - DT_DP0_PHY_PLL_VCO_DIV_CLK, - DT_DP1_PHY_PLL_LINK_CLK, - DT_DP1_PHY_PLL_VCO_DIV_CLK, - DT_DP2_PHY_PLL_LINK_CLK, - DT_DP2_PHY_PLL_VCO_DIV_CLK, - DT_DP3_PHY_PLL_LINK_CLK, - DT_DP3_PHY_PLL_VCO_DIV_CLK, -}; - -#define DISP_CC_MISC_CMD 0xF000 - -enum { - P_BI_TCXO, - P_DISP_CC_PLL0_OUT_MAIN, - P_DISP_CC_PLL1_OUT_EVEN, - P_DISP_CC_PLL1_OUT_MAIN, - P_DP0_PHY_PLL_LINK_CLK, - P_DP0_PHY_PLL_VCO_DIV_CLK, - P_DP1_PHY_PLL_LINK_CLK, - P_DP1_PHY_PLL_VCO_DIV_CLK, - P_DP2_PHY_PLL_LINK_CLK, - P_DP2_PHY_PLL_VCO_DIV_CLK, - P_DP3_PHY_PLL_LINK_CLK, - P_DP3_PHY_PLL_VCO_DIV_CLK, - P_DSI0_PHY_PLL_OUT_BYTECLK, - P_DSI0_PHY_PLL_OUT_DSICLK, - P_DSI1_PHY_PLL_OUT_BYTECLK, - P_DSI1_PHY_PLL_OUT_DSICLK, - P_SLEEP_CLK, -}; - -static const struct pll_vco lucid_ole_vco[] = { - { 249600000, 2100000000, 0 }, -}; - -static const struct alpha_pll_config disp_cc_pll0_config = { - .l = 0xd, - .alpha = 0x6492, - .config_ctl_val = 0x20485699, - .config_ctl_hi_val = 0x00182261, - .config_ctl_hi1_val = 0x82aa299c, - .test_ctl_val = 0x00000000, - .test_ctl_hi_val = 0x00000003, - .test_ctl_hi1_val = 0x00009000, - .test_ctl_hi2_val = 0x00000034, - .user_ctl_val = 0x00000000, - .user_ctl_hi_val = 0x00000005, -}; - -static struct clk_alpha_pll disp_cc_pll0 = { - .offset = 0x0, - .vco_table = lucid_ole_vco, - .num_vco = ARRAY_SIZE(lucid_ole_vco), - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], - .clkr = { - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_pll0", - .parent_data = &(const struct clk_parent_data) { - .index = DT_BI_TCXO, - }, - .num_parents = 1, - .ops = &clk_alpha_pll_reset_lucid_ole_ops, - }, - }, -}; - -static const struct alpha_pll_config disp_cc_pll1_config = { - .l = 0x1f, - .alpha = 0x4000, - .config_ctl_val = 0x20485699, - .config_ctl_hi_val = 0x00182261, - .config_ctl_hi1_val = 0x82aa299c, - .test_ctl_val = 0x00000000, - .test_ctl_hi_val = 0x00000003, - .test_ctl_hi1_val = 0x00009000, - .test_ctl_hi2_val = 0x00000034, - .user_ctl_val = 0x00000000, - .user_ctl_hi_val = 0x00000005, -}; - -static struct clk_alpha_pll disp_cc_pll1 = { - .offset = 0x1000, - .vco_table = lucid_ole_vco, - .num_vco = ARRAY_SIZE(lucid_ole_vco), - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], - .clkr = { - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_pll1", - .parent_data = &(const struct clk_parent_data) { - .index = DT_BI_TCXO, - }, - .num_parents = 1, - .ops = &clk_alpha_pll_reset_lucid_ole_ops, - }, - }, -}; - -static const struct parent_map disp_cc_parent_map_0[] = { - { P_BI_TCXO, 0 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_0[] = { - { .index = DT_BI_TCXO }, -}; - -static const struct clk_parent_data disp_cc_parent_data_0_ao[] = { - { .index = DT_BI_TCXO_AO }, -}; - -static const struct parent_map disp_cc_parent_map_1[] = { - { P_BI_TCXO, 0 }, - { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, - { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, - { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_1[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK }, - { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, - { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK }, -}; - -static const struct parent_map disp_cc_parent_map_2[] = { - { P_BI_TCXO, 0 }, - { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, - { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, - { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, - { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_2[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, - { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, - { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, - { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, -}; - -static const struct parent_map disp_cc_parent_map_3[] = { - { P_BI_TCXO, 0 }, - { P_DP1_PHY_PLL_LINK_CLK, 2 }, - { P_DP2_PHY_PLL_LINK_CLK, 3 }, - { P_DP3_PHY_PLL_LINK_CLK, 4 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_3[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DP1_PHY_PLL_LINK_CLK }, - { .index = DT_DP2_PHY_PLL_LINK_CLK }, - { .index = DT_DP3_PHY_PLL_LINK_CLK }, -}; - -static const struct parent_map disp_cc_parent_map_4[] = { - { P_BI_TCXO, 0 }, - { P_DP0_PHY_PLL_LINK_CLK, 1 }, - { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, - { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, - { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, - { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_4[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DP0_PHY_PLL_LINK_CLK }, - { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, - { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK }, - { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, - { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK }, -}; - -static const struct parent_map disp_cc_parent_map_5[] = { - { P_BI_TCXO, 0 }, - { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, - { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_5[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, - { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, -}; - -static const struct parent_map disp_cc_parent_map_6[] = { - { P_BI_TCXO, 0 }, - { P_DISP_CC_PLL1_OUT_MAIN, 4 }, - { P_DISP_CC_PLL1_OUT_EVEN, 6 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_6[] = { - { .index = DT_BI_TCXO }, - { .hw = &disp_cc_pll1.clkr.hw }, - { .hw = &disp_cc_pll1.clkr.hw }, -}; - -static const struct parent_map disp_cc_parent_map_7[] = { - { P_BI_TCXO, 0 }, - { P_DP0_PHY_PLL_LINK_CLK, 1 }, - { P_DP1_PHY_PLL_LINK_CLK, 2 }, - { P_DP2_PHY_PLL_LINK_CLK, 3 }, - { P_DP3_PHY_PLL_LINK_CLK, 4 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_7[] = { - { .index = DT_BI_TCXO }, - { .index = DT_DP0_PHY_PLL_LINK_CLK }, - { .index = DT_DP1_PHY_PLL_LINK_CLK }, - { .index = DT_DP2_PHY_PLL_LINK_CLK }, - { .index = DT_DP3_PHY_PLL_LINK_CLK }, -}; - -static const struct parent_map disp_cc_parent_map_8[] = { - { P_BI_TCXO, 0 }, - { P_DISP_CC_PLL0_OUT_MAIN, 1 }, - { P_DISP_CC_PLL1_OUT_MAIN, 4 }, - { P_DISP_CC_PLL1_OUT_EVEN, 6 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_8[] = { - { .index = DT_BI_TCXO }, - { .hw = &disp_cc_pll0.clkr.hw }, - { .hw = &disp_cc_pll1.clkr.hw }, - { .hw = &disp_cc_pll1.clkr.hw }, -}; - -static const struct parent_map disp_cc_parent_map_9[] = { - { P_SLEEP_CLK, 0 }, -}; - -static const struct clk_parent_data disp_cc_parent_data_9[] = { - { .index = DT_SLEEP_CLK }, -}; - -static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { - F(19200000, P_BI_TCXO, 1, 0, 0), - F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), - F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), - { } -}; - -static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { - .cmd_rcgr = 0x82e8, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_6, - .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_ahb_clk_src", - .parent_data = disp_cc_parent_data_6, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, - }, -}; - -static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = { - F(19200000, P_BI_TCXO, 1, 0, 0), - { } -}; - -static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { - .cmd_rcgr = 0x8108, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_2, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte0_clk_src", - .parent_data = disp_cc_parent_data_2, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = { - .cmd_rcgr = 0x8124, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_2, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte1_clk_src", - .parent_data = disp_cc_parent_data_2, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { - .cmd_rcgr = 0x81bc, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_aux_clk_src", - .parent_data = disp_cc_parent_data_0, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { - .cmd_rcgr = 0x8170, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_7, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_link_clk_src", - .parent_data = disp_cc_parent_data_7, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = { - .cmd_rcgr = 0x818c, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_4, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_pixel0_clk_src", - .parent_data = disp_cc_parent_data_4, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = { - .cmd_rcgr = 0x81a4, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_4, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_pixel1_clk_src", - .parent_data = disp_cc_parent_data_4, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { - .cmd_rcgr = 0x8220, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_aux_clk_src", - .parent_data = disp_cc_parent_data_0, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { - .cmd_rcgr = 0x8204, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_link_clk_src", - .parent_data = disp_cc_parent_data_3, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = { - .cmd_rcgr = 0x81d4, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_1, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_pixel0_clk_src", - .parent_data = disp_cc_parent_data_1, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = { - .cmd_rcgr = 0x81ec, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_1, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_pixel1_clk_src", - .parent_data = disp_cc_parent_data_1, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = { - .cmd_rcgr = 0x8284, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_aux_clk_src", - .parent_data = disp_cc_parent_data_0, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { - .cmd_rcgr = 0x8238, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_link_clk_src", - .parent_data = disp_cc_parent_data_3, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = { - .cmd_rcgr = 0x8254, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_1, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_pixel0_clk_src", - .parent_data = disp_cc_parent_data_1, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = { - .cmd_rcgr = 0x826c, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_1, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_pixel1_clk_src", - .parent_data = disp_cc_parent_data_1, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = { - .cmd_rcgr = 0x82d0, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_aux_clk_src", - .parent_data = disp_cc_parent_data_0, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { - .cmd_rcgr = 0x82b4, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_3, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_link_clk_src", - .parent_data = disp_cc_parent_data_3, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_byte2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = { - .cmd_rcgr = 0x829c, - .mnd_width = 16, - .hid_width = 5, - .parent_map = disp_cc_parent_map_1, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_pixel0_clk_src", - .parent_data = disp_cc_parent_data_1, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { - .cmd_rcgr = 0x8140, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_5, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_esc0_clk_src", - .parent_data = disp_cc_parent_data_5, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { - .cmd_rcgr = 0x8158, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_5, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_esc1_clk_src", - .parent_data = disp_cc_parent_data_5, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { - F(19200000, P_BI_TCXO, 1, 0, 0), - F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), - { } -}; - -static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { - .cmd_rcgr = 0x80d8, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_8, - .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_mdp_clk_src", - .parent_data = disp_cc_parent_data_8, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_8), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_shared_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { - .cmd_rcgr = 0x80a8, - .mnd_width = 8, - .hid_width = 5, - .parent_map = disp_cc_parent_map_2, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_pclk0_clk_src", - .parent_data = disp_cc_parent_data_2, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_pixel_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = { - .cmd_rcgr = 0x80c0, - .mnd_width = 8, - .hid_width = 5, - .parent_map = disp_cc_parent_map_2, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_pclk1_clk_src", - .parent_data = disp_cc_parent_data_2, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_pixel_ops, - }, -}; - -static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { - .cmd_rcgr = 0x80f0, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_vsync_clk_src", - .parent_data = disp_cc_parent_data_0, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = { - F(32000, P_SLEEP_CLK, 1, 0, 0), - { } -}; - -static struct clk_rcg2 disp_cc_sleep_clk_src = { - .cmd_rcgr = 0xe05c, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_9, - .freq_tbl = ftbl_disp_cc_sleep_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_sleep_clk_src", - .parent_data = disp_cc_parent_data_9, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_9), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 disp_cc_xo_clk_src = { - .cmd_rcgr = 0xe03c, - .mnd_width = 0, - .hid_width = 5, - .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_xo_clk_src", - .parent_data = disp_cc_parent_data_0_ao, - .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { - .reg = 0x8120, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte0_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte0_clk_src.clkr.hw, - }, - .num_parents = 1, - .ops = &clk_regmap_div_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { - .reg = 0x813c, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte1_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte1_clk_src.clkr.hw, - }, - .num_parents = 1, - .ops = &clk_regmap_div_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = { - .reg = 0x8188, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_link_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_regmap_div_ro_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = { - .reg = 0x821c, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_link_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_regmap_div_ro_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = { - .reg = 0x8250, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_link_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_regmap_div_ro_ops, - }, -}; - -static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = { - .reg = 0x82cc, - .shift = 0, - .width = 4, - .clkr.hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_link_div_clk_src", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_regmap_div_ro_ops, - }, -}; - -static struct clk_branch disp_cc_mdss_accu_clk = { - .halt_reg = 0xe058, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0xe058, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_accu_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_xo_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_ahb1_clk = { - .halt_reg = 0xa020, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xa020, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_ahb1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_ahb_clk = { - .halt_reg = 0x80a4, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x80a4, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_ahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_byte0_clk = { - .halt_reg = 0x8028, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8028, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_byte0_intf_clk = { - .halt_reg = 0x802c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x802c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte0_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte0_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_byte1_clk = { - .halt_reg = 0x8030, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8030, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_byte1_intf_clk = { - .halt_reg = 0x8034, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8034, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_byte1_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_byte1_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_aux_clk = { - .halt_reg = 0x8058, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8058, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_aux_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = { - .halt_reg = 0x804c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x804c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_crypto_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_link_clk = { - .halt_reg = 0x8040, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8040, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_link_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = { - .halt_reg = 0x8048, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8048, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = { - .halt_reg = 0x8050, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8050, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_pixel0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = { - .halt_reg = 0x8054, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8054, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_pixel1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = { - .halt_reg = 0x8044, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8044, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_aux_clk = { - .halt_reg = 0x8074, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8074, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_aux_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = { - .halt_reg = 0x8070, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8070, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_crypto_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_link_clk = { - .halt_reg = 0x8064, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8064, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_link_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = { - .halt_reg = 0x806c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x806c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = { - .halt_reg = 0x805c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x805c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_pixel0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = { - .halt_reg = 0x8060, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8060, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_pixel1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = { - .halt_reg = 0x8068, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8068, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_aux_clk = { - .halt_reg = 0x808c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x808c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_aux_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = { - .halt_reg = 0x8088, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8088, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_crypto_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_link_clk = { - .halt_reg = 0x8080, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8080, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_link_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = { - .halt_reg = 0x8084, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8084, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = { - .halt_reg = 0x8078, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8078, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_pixel0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = { - .halt_reg = 0x807c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x807c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx2_pixel1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx3_aux_clk = { - .halt_reg = 0x809c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x809c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_aux_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = { - .halt_reg = 0x80a0, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x80a0, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_crypto_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx3_link_clk = { - .halt_reg = 0x8094, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8094, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_link_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = { - .halt_reg = 0x8098, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8098, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_link_intf_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = { - .halt_reg = 0x8090, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8090, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_dptx3_pixel0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_esc0_clk = { - .halt_reg = 0x8038, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8038, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_esc0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_esc0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_esc1_clk = { - .halt_reg = 0x803c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x803c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_esc1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_esc1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_mdp1_clk = { - .halt_reg = 0xa004, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xa004, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_mdp1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_mdp_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_mdp_clk = { - .halt_reg = 0x800c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x800c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_mdp_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_mdp_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { - .halt_reg = 0xa010, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xa010, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_mdp_lut1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_mdp_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_mdp_lut_clk = { - .halt_reg = 0x8018, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x8018, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_mdp_lut_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_mdp_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { - .halt_reg = 0xc004, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0xc004, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_non_gdsc_ahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_pclk0_clk = { - .halt_reg = 0x8004, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8004, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_pclk0_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_pclk0_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_pclk1_clk = { - .halt_reg = 0x8008, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8008, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_pclk1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_pclk1_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { - .halt_reg = 0xc00c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xc00c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_rscc_ahb_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { - .halt_reg = 0xc008, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xc008, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_rscc_vsync_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_vsync_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_vsync1_clk = { - .halt_reg = 0xa01c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xa01c, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_vsync1_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_vsync_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_mdss_vsync_clk = { - .halt_reg = 0x8024, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x8024, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_mdss_vsync_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_mdss_vsync_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct clk_branch disp_cc_sleep_clk = { - .halt_reg = 0xe074, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0xe074, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "disp_cc_sleep_clk", - .parent_hws = (const struct clk_hw*[]) { - &disp_cc_sleep_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - -static struct gdsc mdss_gdsc = { - .gdscr = 0x9000, - .pd = { - .name = "mdss_gdsc", - }, - .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, -}; - -static struct gdsc mdss_int2_gdsc = { - .gdscr = 0xb000, - .pd = { - .name = "mdss_int2_gdsc", - }, - .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, -}; - -static struct clk_regmap *disp_cc_sm8650_clocks[] = { - [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr, - [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr, - [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, - [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, - [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, - [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, - [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, - [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, - [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr, - [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr, - [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr, - [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr, - [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr, - [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr, - [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr, - [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr, - [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr, - [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr, - [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr, - [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr, - [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr, - [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr, - [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = - &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr, - [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr, - [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr, - [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr, - [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr, - [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr, - [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr, - [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr, - [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr, - [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr, - [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = - &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr, - [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr, - [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr, - [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr, - [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr, - [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr, - [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr, - [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr, - [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr, - [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr, - [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr, - [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr, - [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr, - [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr, - [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr, - [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr, - [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr, - [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr, - [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr, - [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, - [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, - [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr, - [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr, - [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr, - [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, - [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, - [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr, - [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, - [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, - [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, - [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, - [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr, - [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr, - [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, - [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, - [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr, - [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, - [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, - [DISP_CC_PLL0] = &disp_cc_pll0.clkr, - [DISP_CC_PLL1] = &disp_cc_pll1.clkr, - [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr, - [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr, - [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr, -}; - -static const struct qcom_reset_map disp_cc_sm8650_resets[] = { - [DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, - [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 }, - [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 }, -}; - -static struct gdsc *disp_cc_sm8650_gdscs[] = { - [MDSS_GDSC] = &mdss_gdsc, - [MDSS_INT2_GDSC] = &mdss_int2_gdsc, -}; - -static const struct regmap_config disp_cc_sm8650_regmap_config = { - .reg_bits = 32, - .reg_stride = 4, - .val_bits = 32, - .max_register = 0x11008, - .fast_io = true, -}; - -static struct qcom_cc_desc disp_cc_sm8650_desc = { - .config = &disp_cc_sm8650_regmap_config, - .clks = disp_cc_sm8650_clocks, - .num_clks = ARRAY_SIZE(disp_cc_sm8650_clocks), - .resets = disp_cc_sm8650_resets, - .num_resets = ARRAY_SIZE(disp_cc_sm8650_resets), - .gdscs = disp_cc_sm8650_gdscs, - .num_gdscs = ARRAY_SIZE(disp_cc_sm8650_gdscs), -}; - -static const struct of_device_id disp_cc_sm8650_match_table[] = { - { .compatible = "qcom,sm8650-dispcc" }, - { } -}; -MODULE_DEVICE_TABLE(of, disp_cc_sm8650_match_table); - -static int disp_cc_sm8650_probe(struct platform_device *pdev) -{ - struct regmap *regmap; - int ret; - - ret = devm_pm_runtime_enable(&pdev->dev); - if (ret) - return ret; - - ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret) - return ret; - - regmap = qcom_cc_map(pdev, &disp_cc_sm8650_desc); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - goto err_put_rpm; - } - - clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); - clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); - - /* Enable clock gating for MDP clocks */ - regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10); - - /* Keep some clocks always-on */ - qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */ - - ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8650_desc, regmap); - if (ret) - goto err_put_rpm; - - pm_runtime_put(&pdev->dev); - - return 0; - -err_put_rpm: - pm_runtime_put_sync(&pdev->dev); - - return ret; -} - -static struct platform_driver disp_cc_sm8650_driver = { - .probe = disp_cc_sm8650_probe, - .driver = { - .name = "disp_cc-sm8650", - .of_match_table = disp_cc_sm8650_match_table, - }, -}; - -module_platform_driver(disp_cc_sm8650_driver); - -MODULE_DESCRIPTION("QTI DISPCC SM8650 Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c index f98591148a9767..9536b2b7d07c25 100644 --- a/drivers/clk/qcom/gcc-ipq5332.c +++ b/drivers/clk/qcom/gcc-ipq5332.c @@ -4,12 +4,14 @@ */ #include +#include #include #include #include #include #include +#include #include "clk-alpha-pll.h" #include "clk-branch.h" @@ -126,17 +128,6 @@ static struct clk_alpha_pll gpll4_main = { .parent_data = &gcc_parent_data_xo, .num_parents = 1, .ops = &clk_alpha_pll_stromer_ops, - /* - * There are no consumers for this GPLL in kernel yet, - * (will be added soon), so the clock framework - * disables this source. But some of the clocks - * initialized by boot loaders uses this source. So we - * need to keep this clock ON. Add the - * CLK_IGNORE_UNUSED flag so the clock will not be - * disabled. Once the consumer in kernel is added, we - * can get rid of this flag. - */ - .flags = CLK_IGNORE_UNUSED, }, }, }; @@ -3388,6 +3379,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr, [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr, [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr, + [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr, [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, @@ -3628,6 +3620,24 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = { [GCC_UNIPHY1_XPCS_ARES] = { 0x16060 }, }; +#define IPQ_APPS_ID 5332 /* some unique value */ + +static struct qcom_icc_hws_data icc_ipq5332_hws[] = { + { MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK }, + { MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK }, + { MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK }, + { MASTER_ANOC_PCIE3_2_S, SLAVE_ANOC_PCIE3_2_S, GCC_SNOC_PCIE3_2LANE_S_CLK }, + { MASTER_SNOC_USB, SLAVE_SNOC_USB, GCC_SNOC_USB_CLK }, + { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK }, + { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK }, + { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK }, + { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK }, + { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK }, + { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK }, + { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK }, + { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK }, +}; + static const struct regmap_config gcc_ipq5332_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -3656,6 +3666,9 @@ static const struct qcom_cc_desc gcc_ipq5332_desc = { .num_resets = ARRAY_SIZE(gcc_ipq5332_resets), .clk_hws = gcc_ipq5332_hws, .num_clk_hws = ARRAY_SIZE(gcc_ipq5332_hws), + .icc_hws = icc_ipq5332_hws, + .num_icc_hws = ARRAY_SIZE(icc_ipq5332_hws), + .icc_first_node_id = IPQ_APPS_ID, }; static int gcc_ipq5332_probe(struct platform_device *pdev) @@ -3674,6 +3687,7 @@ static struct platform_driver gcc_ipq5332_driver = { .driver = { .name = "gcc-ipq5332", .of_match_table = gcc_ipq5332_match_table, + .sync_state = icc_sync_state, }, }; diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c index 2e411d874662cd..ab0f7fc665a979 100644 --- a/drivers/clk/qcom/gcc-ipq6018.c +++ b/drivers/clk/qcom/gcc-ipq6018.c @@ -2684,7 +2684,7 @@ static struct clk_rcg2 lpass_q6_axim_clk_src = { }, }; -static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = { +static const struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = { F(24000000, P_XO, 1, 0, 0), F(50000000, P_GPLL0, 16, 0, 0), { } diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 974d01fd4381e6..9260e2fdb839b9 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -390,7 +390,7 @@ static const struct clk_parent_data gcc_pxo_pll3_pll0_pll14_pll18_pll11[] = { }; -static struct freq_tbl clk_tbl_gsbi_uart[] = { +static const struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, { 7372800, P_PLL8, 2, 24, 625 }, @@ -714,7 +714,7 @@ static struct clk_branch gsbi7_uart_clk = { }, }; -static struct freq_tbl clk_tbl_gsbi_qup[] = { +static const struct freq_tbl clk_tbl_gsbi_qup[] = { { 1100000, P_PXO, 1, 2, 49 }, { 5400000, P_PXO, 1, 1, 5 }, { 10800000, P_PXO, 1, 2, 5 }, diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 32fd01ef469a02..7258ba5c09001e 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -1947,7 +1947,7 @@ static struct clk_regmap_div nss_port6_tx_div_clk_src = { }, }; -static struct freq_tbl ftbl_crypto_clk_src[] = { +static const struct freq_tbl ftbl_crypto_clk_src[] = { F(40000000, P_GPLL0_DIV2, 10, 0, 0), F(80000000, P_GPLL0, 10, 0, 0), F(100000000, P_GPLL0, 8, 0, 0), @@ -1968,7 +1968,7 @@ static struct clk_rcg2 crypto_clk_src = { }, }; -static struct freq_tbl ftbl_gp_clk_src[] = { +static const struct freq_tbl ftbl_gp_clk_src[] = { F(19200000, P_XO, 1, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c index 33987b9577371b..37fc5607b2d38d 100644 --- a/drivers/clk/qcom/gcc-mdm9615.c +++ b/drivers/clk/qcom/gcc-mdm9615.c @@ -164,7 +164,7 @@ static const struct clk_parent_data gcc_cxo_pll14[] = { { .hw = &pll14_vote.hw }, }; -static struct freq_tbl clk_tbl_gsbi_uart[] = { +static const struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, { 7372800, P_PLL8, 2, 24, 625 }, @@ -437,7 +437,7 @@ static struct clk_branch gsbi5_uart_clk = { }, }; -static struct freq_tbl clk_tbl_gsbi_qup[] = { +static const struct freq_tbl clk_tbl_gsbi_qup[] = { { 960000, P_CXO, 4, 1, 5 }, { 4800000, P_CXO, 4, 0, 1 }, { 9600000, P_CXO, 2, 0, 1 }, diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c index 67870c899ab952..a6a4477ccdef15 100644 --- a/drivers/clk/qcom/gcc-msm8660.c +++ b/drivers/clk/qcom/gcc-msm8660.c @@ -82,7 +82,7 @@ static const struct clk_parent_data gcc_pxo_pll8_cxo[] = { { .fw_name = "cxo", .name = "cxo_board" }, }; -static struct freq_tbl clk_tbl_gsbi_uart[] = { +static const struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, { 7372800, P_PLL8, 2, 24, 625 }, @@ -712,7 +712,7 @@ static struct clk_branch gsbi12_uart_clk = { }, }; -static struct freq_tbl clk_tbl_gsbi_qup[] = { +static const struct freq_tbl clk_tbl_gsbi_qup[] = { { 1100000, P_PXO, 1, 2, 49 }, { 5400000, P_PXO, 1, 1, 5 }, { 10800000, P_PXO, 1, 2, 5 }, diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index 6236a458e4eb26..9ddce11db6dfe2 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -328,7 +328,7 @@ static const struct clk_parent_data gcc_pxo_pll8_pll3[] = { { .hw = &pll3.clkr.hw }, }; -static struct freq_tbl clk_tbl_gsbi_uart[] = { +static const struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, { 7372800, P_PLL8, 2, 24, 625 }, @@ -958,7 +958,7 @@ static struct clk_branch gsbi12_uart_clk = { }, }; -static struct freq_tbl clk_tbl_gsbi_qup[] = { +static const struct freq_tbl clk_tbl_gsbi_qup[] = { { 1100000, P_PXO, 1, 2, 49 }, { 5400000, P_PXO, 1, 1, 5 }, { 10800000, P_PXO, 1, 2, 5 }, @@ -2940,7 +2940,7 @@ static struct clk_branch adm0_pbus_clk = { }, }; -static struct freq_tbl clk_tbl_ce3[] = { +static const struct freq_tbl clk_tbl_ce3[] = { { 48000000, P_PLL8, 8 }, { 100000000, P_PLL3, 12 }, { 120000000, P_PLL3, 10 }, @@ -3761,7 +3761,7 @@ static void gcc_msm8960_remove(struct platform_device *pdev) static struct platform_driver gcc_msm8960_driver = { .probe = gcc_msm8960_probe, - .remove_new = gcc_msm8960_remove, + .remove = gcc_msm8960_remove, .driver = { .name = "gcc-msm8960", .of_match_table = gcc_msm8960_match_table, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 80170a805c3b61..6a6b7da2b151da 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -112,7 +112,7 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = { { .hw = &gpll4.clkr.hw }, }; -static struct freq_tbl ftbl_ufs_axi_clk_src[] = { +static const struct freq_tbl ftbl_ufs_axi_clk_src[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), @@ -136,7 +136,7 @@ static struct clk_rcg2 ufs_axi_clk_src = { }, }; -static struct freq_tbl ftbl_usb30_master_clk_src[] = { +static const struct freq_tbl ftbl_usb30_master_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(125000000, P_GPLL0, 1, 5, 24), { } @@ -156,7 +156,7 @@ static struct clk_rcg2 usb30_master_clk_src = { }, }; -static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), { } @@ -175,7 +175,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -188,7 +188,7 @@ static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { { } }; -static struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = { +static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -226,7 +226,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -266,7 +266,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -333,7 +333,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -373,7 +373,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -400,7 +400,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = { F(3686400, P_GPLL0, 1, 96, 15625), F(7372800, P_GPLL0, 1, 192, 15625), F(14745600, P_GPLL0, 1, 384, 15625), @@ -516,7 +516,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -570,7 +570,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -678,7 +678,7 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = { }, }; -static struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = { +static const struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = { F(960000, P_XO, 10, 1, 2), F(4800000, P_XO, 4, 0, 0), F(9600000, P_XO, 2, 0, 0), @@ -789,7 +789,7 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = { }, }; -static struct freq_tbl ftbl_gp1_clk_src[] = { +static const struct freq_tbl ftbl_gp1_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), @@ -810,7 +810,7 @@ static struct clk_rcg2 gp1_clk_src = { }, }; -static struct freq_tbl ftbl_gp2_clk_src[] = { +static const struct freq_tbl ftbl_gp2_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), @@ -831,7 +831,7 @@ static struct clk_rcg2 gp2_clk_src = { }, }; -static struct freq_tbl ftbl_gp3_clk_src[] = { +static const struct freq_tbl ftbl_gp3_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), @@ -852,7 +852,7 @@ static struct clk_rcg2 gp3_clk_src = { }, }; -static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = { +static const struct freq_tbl ftbl_pcie_0_aux_clk_src[] = { F(1011000, P_XO, 1, 1, 19), { } }; @@ -872,7 +872,7 @@ static struct clk_rcg2 pcie_0_aux_clk_src = { }, }; -static struct freq_tbl ftbl_pcie_pipe_clk_src[] = { +static const struct freq_tbl ftbl_pcie_pipe_clk_src[] = { F(125000000, P_XO, 1, 0, 0), { } }; @@ -891,7 +891,7 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = { }, }; -static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = { +static const struct freq_tbl ftbl_pcie_1_aux_clk_src[] = { F(1011000, P_XO, 1, 1, 19), { } }; @@ -925,7 +925,7 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = { }, }; -static struct freq_tbl ftbl_pdm2_clk_src[] = { +static const struct freq_tbl ftbl_pdm2_clk_src[] = { F(60000000, P_GPLL0, 10, 0, 0), { } }; @@ -943,7 +943,7 @@ static struct clk_rcg2 pdm2_clk_src = { }, }; -static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { +static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { F(144000, P_XO, 16, 3, 25), F(400000, P_XO, 12, 1, 4), F(20000000, P_GPLL0, 15, 1, 2), @@ -955,7 +955,7 @@ static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { { } }; -static struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = { +static const struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = { F(144000, P_XO, 16, 3, 25), F(400000, P_XO, 12, 1, 4), F(20000000, P_GPLL0, 15, 1, 2), @@ -981,7 +981,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { }, }; -static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = { +static const struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = { F(144000, P_XO, 16, 3, 25), F(400000, P_XO, 12, 1, 4), F(20000000, P_GPLL0, 15, 1, 2), @@ -1034,7 +1034,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = { }, }; -static struct freq_tbl ftbl_tsif_ref_clk_src[] = { +static const struct freq_tbl ftbl_tsif_ref_clk_src[] = { F(105500, P_XO, 1, 1, 182), { } }; @@ -1054,7 +1054,7 @@ static struct clk_rcg2 tsif_ref_clk_src = { }, }; -static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = { +static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), { } @@ -1073,7 +1073,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = { }, }; -static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = { +static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = { F(1200000, P_XO, 16, 0, 0), { } }; @@ -1092,7 +1092,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = { }, }; -static struct freq_tbl ftbl_usb_hs_system_clk_src[] = { +static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = { F(75000000, P_GPLL0, 8, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 4fc667b94cf235..aa3bd277786860 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -359,7 +359,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { }, }; -static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = { +static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(300000000, P_GPLL0, 2, 0, 0), diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 90b66caba2cdf6..c9701f7f6e1810 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -2242,7 +2242,7 @@ static struct clk_branch gcc_hmss_trig_clk = { }, }; -static struct freq_tbl ftbl_hmss_gpll0_clk_src[] = { +static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = { F( 300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), F( 600000000, P_GPLL0_OUT_MAIN, 1, 0, 0), { } @@ -2922,6 +2922,43 @@ static struct clk_branch ssc_cnoc_ahbs_clk = { }, }; +static struct clk_branch hlos1_vote_lpass_core_smmu_clk = { + .halt_reg = 0x7D010, + .clkr = { + .enable_reg = 0x7D010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "hlos1_vote_lpass_core_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = { + .halt_reg = 0x7D014, + .clkr = { + .enable_reg = 0x7D014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "hlos1_vote_lpass_adsp_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_bimc_axi_clk = { + .halt_reg = 0x8A040, + .clkr = { + .enable_reg = 0x8A040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "gcc_mss_q6_bimc_axi_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc pcie_0_gdsc = { .gdscr = 0x6b004, .gds_hw_ctrl = 0x0, @@ -2953,6 +2990,26 @@ static struct gdsc usb_30_gdsc = { .flags = VOTABLE, }; +static struct gdsc hlos1_vote_lpass_adsp = { + .gdscr = 0x7d034, + .gds_hw_ctrl = 0x0, + .pd = { + .name = "lpass_adsp_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_lpass_core = { + .gdscr = 0x7d038, + .gds_hw_ctrl = 0x0, + .pd = { + .name = "lpass_core_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = ALWAYS_ON, +}; + static struct clk_regmap *gcc_msm8998_clocks[] = { [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, @@ -3133,12 +3190,17 @@ static struct clk_regmap *gcc_msm8998_clocks[] = { [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr, [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr, [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr, + [HLOS1_VOTE_LPASS_CORE_SMMU_CLK] = &hlos1_vote_lpass_core_smmu_clk.clkr, + [HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr, + [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr, }; static struct gdsc *gcc_msm8998_gdscs[] = { [PCIE_0_GDSC] = &pcie_0_gdsc, [UFS_GDSC] = &ufs_gdsc, [USB_30_GDSC] = &usb_30_gdsc, + [LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp, + [LPASS_CORE_GDSC] = &hlos1_vote_lpass_core, }; static const struct qcom_reset_map gcc_msm8998_resets[] = { diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c index ad135bfa4c7686..31e788e22ab4ad 100644 --- a/drivers/clk/qcom/gcc-sc8180x.c +++ b/drivers/clk/qcom/gcc-sc8180x.c @@ -142,6 +142,23 @@ static struct clk_alpha_pll gpll7 = { }, }; +static struct clk_alpha_pll gpll9 = { + .offset = 0x1c000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gpll9", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_trion_ops, + }, + }, +}; + static const struct parent_map gcc_parent_map_0[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -241,7 +258,7 @@ static const struct parent_map gcc_parent_map_7[] = { static const struct clk_parent_data gcc_parents_7[] = { { .fw_name = "bi_tcxo", }, { .hw = &gpll0.clkr.hw }, - { .name = "gppl9" }, + { .hw = &gpll9.clkr.hw }, { .hw = &gpll4.clkr.hw }, { .hw = &gpll0_out_even.clkr.hw }, }; @@ -260,28 +277,6 @@ static const struct clk_parent_data gcc_parents_8[] = { { .hw = &gpll0_out_even.clkr.hw }, }; -static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { - F(19200000, P_BI_TCXO, 1, 0, 0), - F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), - F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), - { } -}; - -static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { - .cmd_rcgr = 0x48014, - .mnd_width = 0, - .hid_width = 5, - .parent_map = gcc_parent_map_0, - .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_cpuss_ahb_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), @@ -609,19 +604,29 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { { } }; +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, +}; + static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { .cmd_rcgr = 0x17148, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s0_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { @@ -630,13 +635,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s1_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { @@ -645,13 +652,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s2_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { @@ -660,13 +669,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s3_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { @@ -675,13 +686,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s4_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { @@ -690,13 +703,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s5_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { @@ -705,13 +720,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s6_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { @@ -720,13 +737,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s7_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { @@ -735,13 +754,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s0_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { @@ -750,13 +771,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s1_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { @@ -765,13 +788,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s2_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { @@ -780,13 +805,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s3_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { @@ -795,13 +822,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s4_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { @@ -810,13 +839,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s5_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { + .name = "gcc_qupv3_wrap2_s0_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { @@ -825,13 +856,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s0_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { + .name = "gcc_qupv3_wrap2_s1_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { @@ -840,28 +873,33 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s1_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init, }; +static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { + .name = "gcc_qupv3_wrap2_s2_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, +}; + + static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { .cmd_rcgr = 0x1e3a8, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s2_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { + .name = "gcc_qupv3_wrap2_s3_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { @@ -870,13 +908,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s3_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { + .name = "gcc_qupv3_wrap2_s4_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { @@ -885,13 +925,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s4_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { + .name = "gcc_qupv3_wrap2_s5_clk_src", + .parent_data = gcc_parents_0, + .num_parents = ARRAY_SIZE(gcc_parents_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { @@ -900,13 +942,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap2_s5_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init, }; static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { @@ -916,7 +952,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), - F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0), { } }; @@ -939,9 +975,8 @@ static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = { F(400000, P_BI_TCXO, 12, 1, 4), F(9600000, P_BI_TCXO, 2, 0, 0), F(19200000, P_BI_TCXO, 1, 0, 0), - F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0), F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), - F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), { } }; @@ -1599,25 +1634,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { }, }; -/* For CPUSS functionality the AHB clock needs to be left enabled */ -static struct clk_branch gcc_cpuss_ahb_clk = { - .halt_reg = 0x48000, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x52004, - .enable_mask = BIT(21), - .hw.init = &(struct clk_init_data){ - .name = "gcc_cpuss_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ - &gcc_cpuss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_cpuss_rbcpr_clk = { .halt_reg = 0x48008, .halt_check = BRANCH_HALT, @@ -3150,25 +3166,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = { }, }; -/* For CPUSS functionality the SYS NOC clock needs to be left enabled */ -static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { - .halt_reg = 0x4819c, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x52004, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_sys_noc_cpuss_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ - &gcc_cpuss_ahb_clk_src.clkr.hw - }, - .num_parents = 1, - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_tsif_ahb_clk = { .halt_reg = 0x36004, .halt_check = BRANCH_HALT, @@ -4284,8 +4281,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr, [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, - [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, - [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, @@ -4422,7 +4417,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, - [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr, [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr, [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, @@ -4511,6 +4505,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { [GPLL1] = &gpll1.clkr, [GPLL4] = &gpll4.clkr, [GPLL7] = &gpll7.clkr, + [GPLL9] = &gpll9.clkr, }; static const struct qcom_reset_map gcc_sc8180x_resets[] = { @@ -4546,6 +4541,10 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = { [GCC_USB3_PHY_SEC_BCR] = { 0x50018 }, [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c }, [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 }, + [GCC_USB3_UNIPHY_MP0_BCR] = { 0x50024 }, + [GCC_USB3_UNIPHY_MP1_BCR] = { 0x50028 }, + [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5002c }, + [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x50030 }, [GCC_SDCC2_BCR] = { 0x14000 }, [GCC_SDCC4_BCR] = { 0x16000 }, [GCC_TSIF_BCR] = { 0x36000 }, @@ -4561,6 +4560,29 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = { [GCC_VIDEO_AXI1_CLK_BCR] = { .reg = 0xb028, .bit = 2, .udelay = 150 }, }; +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src), +}; + static struct gdsc *gcc_sc8180x_gdscs[] = { [EMAC_GDSC] = &emac_gdsc, [PCIE_0_GDSC] = &pcie_0_gdsc, @@ -4602,6 +4624,7 @@ MODULE_DEVICE_TABLE(of, gcc_sc8180x_match_table); static int gcc_sc8180x_probe(struct platform_device *pdev) { struct regmap *regmap; + int ret; regmap = qcom_cc_map(pdev, &gcc_sc8180x_desc); if (IS_ERR(regmap)) @@ -4623,6 +4646,11 @@ static int gcc_sc8180x_probe(struct platform_device *pdev) regmap_update_bits(regmap, 0x4d110, 0x3, 0x3); regmap_update_bits(regmap, 0x71028, 0x3, 0x3); + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + return qcom_cc_really_probe(&pdev->dev, &gcc_sc8180x_desc, regmap); } diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c index 991cd8b8d59726..1c59d70e0f96c5 100644 --- a/drivers/clk/qcom/gcc-sm8250.c +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -3226,7 +3226,7 @@ static struct gdsc pcie_0_gdsc = { .pd = { .name = "pcie_0_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc pcie_1_gdsc = { @@ -3234,7 +3234,7 @@ static struct gdsc pcie_1_gdsc = { .pd = { .name = "pcie_1_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc pcie_2_gdsc = { @@ -3242,7 +3242,7 @@ static struct gdsc pcie_2_gdsc = { .pd = { .name = "pcie_2_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc ufs_card_gdsc = { diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c index 639a9a955914f4..c445c271678a5f 100644 --- a/drivers/clk/qcom/gcc-sm8450.c +++ b/drivers/clk/qcom/gcc-sm8450.c @@ -2974,7 +2974,7 @@ static struct gdsc pcie_0_gdsc = { .pd = { .name = "pcie_0_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc pcie_1_gdsc = { @@ -2982,7 +2982,7 @@ static struct gdsc pcie_1_gdsc = { .pd = { .name = "pcie_1_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc ufs_phy_gdsc = { diff --git a/drivers/clk/qcom/gpucc-sm4450.c b/drivers/clk/qcom/gpucc-sm4450.c new file mode 100644 index 00000000000000..a14d0bb031acc5 --- /dev/null +++ b/drivers/clk/qcom/gpucc-sm4450.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + DT_BI_TCXO, + DT_GPLL0_OUT_MAIN, + DT_GPLL0_OUT_MAIN_DIV, +}; + +enum { + P_BI_TCXO, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_OUT_EVEN, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL0_OUT_ODD, + P_GPU_CC_PLL1_OUT_EVEN, + P_GPU_CC_PLL1_OUT_MAIN, + P_GPU_CC_PLL1_OUT_ODD, +}; + +static const struct pll_vco lucid_evo_vco[] = { + { 249600000, 2020000000, 0 }, +}; + +/* 680.0 MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll0_config = { + .l = 0x23, + .alpha = 0x6aaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll gpu_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_pll0", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +/* 500.0 MHz Configuration */ +static const struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x1a, + .alpha = 0xaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00182261, + .config_ctl_hi1_val = 0x32aa299c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x1000, + .vco_table = lucid_evo_vco, + .num_vco = ARRAY_SIZE(lucid_evo_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO], + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_pll1", + .parent_data = &(const struct clk_parent_data) { + .index = DT_BI_TCXO, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_evo_ops, + }, + }, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct parent_map gpu_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .hw = &gpu_cc_pll0.clkr.hw }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct parent_map gpu_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_EVEN, 1 }, + { P_GPU_CC_PLL0_OUT_ODD, 2 }, + { P_GPU_CC_PLL1_OUT_EVEN, 3 }, + { P_GPU_CC_PLL1_OUT_ODD, 4 }, + { P_GPLL0_OUT_MAIN, 5 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &gpu_cc_pll0.clkr.hw }, + { .hw = &gpu_cc_pll0.clkr.hw }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .index = DT_GPLL0_OUT_MAIN }, +}; + +static const struct parent_map gpu_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_3[] = { + { .index = DT_BI_TCXO }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .index = DT_GPLL0_OUT_MAIN }, + { .index = DT_GPLL0_OUT_MAIN_DIV }, +}; + +static const struct parent_map gpu_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_4[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_ff_clk_src = { + .cmd_rcgr = 0x9474, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_ff_clk_src", + .parent_data = gpu_cc_parent_data_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x9318, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_1, + .freq_tbl = ftbl_gpu_cc_ff_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gmu_clk_src", + .parent_data = gpu_cc_parent_data_1, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { + F(340000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(500000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(605000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(765000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(850000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(955000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(1010000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { + .cmd_rcgr = 0x9070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_2, + .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_gfx3d_clk_src", + .parent_data = gpu_cc_parent_data_2, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = { + F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_hub_clk_src = { + .cmd_rcgr = 0x93ec, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_3, + .freq_tbl = ftbl_gpu_cc_hub_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_clk_src", + .parent_data = gpu_cc_parent_data_3, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_xo_clk_src = { + .cmd_rcgr = 0x9010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_4, + .freq_tbl = ftbl_gpu_cc_xo_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_xo_clk_src", + .parent_data = gpu_cc_parent_data_4, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_4), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div gpu_cc_demet_div_clk_src = { + .reg = 0x9054, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_demet_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = { + .reg = 0x9430, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_ahb_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = { + .reg = 0x942c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_cx_int_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gpu_cc_xo_div_clk_src = { + .reg = 0x9050, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_xo_div_clk_src", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x911c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x911c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_ahb_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x9120, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9120, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_crc_ahb_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_ahb_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_ff_clk = { + .halt_reg = 0x914c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x914c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_ff_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_ff_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gfx3d_clk = { + .halt_reg = 0x919c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x919c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_gfx3d_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gx_gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = { + .halt_reg = 0x91a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x91a0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_gfx3d_slv_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gx_gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x913c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x913c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_gmu_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { + .halt_reg = 0x9130, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9130, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cx_snoc_dvm_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x9144, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9144, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_cxo_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_freq_measure_clk = { + .halt_reg = 0x9008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_freq_measure_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_xo_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_cxo_clk = { + .halt_reg = 0x90b8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_cxo_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_ff_clk = { + .halt_reg = 0x90c0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90c0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_ff_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_ff_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gfx3d_clk = { + .halt_reg = 0x90a8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90a8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_gfx3d_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gx_gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gfx3d_rdvm_clk = { + .halt_reg = 0x90c8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90c8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_gfx3d_rdvm_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gx_gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x90bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90bc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_gmu_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_vsense_clk = { + .halt_reg = 0x90b0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x90b0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_gx_vsense_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_aon_clk = { + .halt_reg = 0x93e8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x93e8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_aon_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hub_cx_int_clk = { + .halt_reg = 0x9148, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9148, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_hub_cx_int_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_hub_cx_int_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_memnoc_gfx_clk = { + .halt_reg = 0x9150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9150, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = { + .halt_reg = 0x9288, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9288, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_mnd1x_0_gfx3d_clk", + .parent_hws = (const struct clk_hw*[]) { + &gpu_cc_gx_gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_sleep_clk = { + .halt_reg = 0x9134, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9134, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gpu_cc_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc gpu_cc_cx_gdsc = { + .gdscr = 0x9108, + .gds_hw_ctrl = 0x953c, + .clk_dis_wait_val = 8, + .pd = { + .name = "gpu_cx_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, +}; + +static struct gdsc gpu_cc_gx_gdsc = { + .gdscr = 0x905c, + .clamp_io_ctrl = 0x9504, + .resets = (unsigned int []){ GPU_CC_GX_BCR, + GPU_CC_ACD_BCR, + GPU_CC_GX_ACD_IROOT_BCR }, + .reset_count = 3, + .pd = { + .name = "gpu_gx_gdsc", + .power_on = gdsc_gx_do_nothing_enable, + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR, +}; + +static struct clk_regmap *gpu_cc_sm4450_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr, + [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr, + [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr, + [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr, + [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr, + [GPU_CC_GX_FF_CLK] = &gpu_cc_gx_ff_clk.clkr, + [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr, + [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr, + [GPU_CC_GX_GFX3D_RDVM_CLK] = &gpu_cc_gx_gfx3d_rdvm_clk.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr, + [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr, + [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr, + [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr, + [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr, + [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr, + [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr, + [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr, + [GPU_CC_PLL0] = &gpu_cc_pll0.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr, + [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr, + [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr, +}; + +static struct gdsc *gpu_cc_sm4450_gdscs[] = { + [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc, + [GPU_CC_GX_GDSC] = &gpu_cc_gx_gdsc, +}; + +static const struct qcom_reset_map gpu_cc_sm4450_resets[] = { + [GPU_CC_CB_BCR] = { 0x93a0 }, + [GPU_CC_CX_BCR] = { 0x9104 }, + [GPU_CC_GX_BCR] = { 0x9058 }, + [GPU_CC_FAST_HUB_BCR] = { 0x93e4 }, + [GPU_CC_ACD_BCR] = { 0x9358 }, + [GPU_CC_FF_BCR] = { 0x9470 }, + [GPU_CC_GFX3D_AON_BCR] = { 0x9198 }, + [GPU_CC_GMU_BCR] = { 0x9314 }, + [GPU_CC_RBCPR_BCR] = { 0x91e0 }, + [GPU_CC_XO_BCR] = { 0x9000 }, + [GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c }, +}; + +static const struct regmap_config gpu_cc_sm4450_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x95c0, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_sm4450_desc = { + .config = &gpu_cc_sm4450_regmap_config, + .clks = gpu_cc_sm4450_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_sm4450_clocks), + .resets = gpu_cc_sm4450_resets, + .num_resets = ARRAY_SIZE(gpu_cc_sm4450_resets), + .gdscs = gpu_cc_sm4450_gdscs, + .num_gdscs = ARRAY_SIZE(gpu_cc_sm4450_gdscs), +}; + +static const struct of_device_id gpu_cc_sm4450_match_table[] = { + { .compatible = "qcom,sm4450-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_sm4450_match_table); + +static int gpu_cc_sm4450_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gpu_cc_sm4450_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */ + qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */ + qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */ + + return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm4450_desc, regmap); +} + +static struct platform_driver gpu_cc_sm4450_driver = { + .probe = gpu_cc_sm4450_probe, + .driver = { + .name = "gpucc-sm4450", + .of_match_table = gpu_cc_sm4450_match_table, + }, +}; + +module_platform_driver(gpu_cc_sm4450_driver); + +MODULE_DESCRIPTION("QTI GPUCC SM4450 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index bf5320a43e8c25..bbacd7fedb2f5b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -70,7 +70,7 @@ static const struct clk_parent_data lcc_pxo_pll4[] = { { .fw_name = "pll4_vote", .name = "pll4_vote" }, }; -static struct freq_tbl clk_tbl_aif_mi2s[] = { +static const struct freq_tbl clk_tbl_aif_mi2s[] = { { 1024000, P_PLL4, 4, 1, 96 }, { 1411200, P_PLL4, 4, 2, 139 }, { 1536000, P_PLL4, 4, 1, 64 }, @@ -214,7 +214,7 @@ static struct clk_regmap_mux mi2s_bit_clk = { }, }; -static struct freq_tbl clk_tbl_pcm[] = { +static const struct freq_tbl clk_tbl_pcm[] = { { 64000, P_PLL4, 4, 1, 1536 }, { 128000, P_PLL4, 4, 1, 768 }, { 256000, P_PLL4, 4, 1, 384 }, @@ -296,7 +296,7 @@ static struct clk_regmap_mux pcm_clk = { }, }; -static struct freq_tbl clk_tbl_aif_osr[] = { +static const struct freq_tbl clk_tbl_aif_osr[] = { { 2822400, P_PLL4, 1, 147, 20480 }, { 4096000, P_PLL4, 1, 1, 96 }, { 5644800, P_PLL4, 1, 147, 10240 }, @@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = { }, }; -static struct freq_tbl clk_tbl_ahbix[] = { +static const struct freq_tbl clk_tbl_ahbix[] = { { 131072000, P_PLL4, 1, 1, 3 }, { }, }; diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index d53bf315e9c33d..7cba2ce3e408d4 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -57,7 +57,7 @@ static struct clk_parent_data lcc_pxo_pll4[] = { { .fw_name = "pll4_vote", .name = "pll4_vote" }, }; -static struct freq_tbl clk_tbl_aif_osr_492[] = { +static const struct freq_tbl clk_tbl_aif_osr_492[] = { { 512000, P_PLL4, 4, 1, 240 }, { 768000, P_PLL4, 4, 1, 160 }, { 1024000, P_PLL4, 4, 1, 120 }, @@ -73,7 +73,7 @@ static struct freq_tbl clk_tbl_aif_osr_492[] = { { } }; -static struct freq_tbl clk_tbl_aif_osr_393[] = { +static const struct freq_tbl clk_tbl_aif_osr_393[] = { { 512000, P_PLL4, 4, 1, 192 }, { 768000, P_PLL4, 4, 1, 128 }, { 1024000, P_PLL4, 4, 1, 96 }, @@ -218,7 +218,7 @@ CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80); CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74); CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c); -static struct freq_tbl clk_tbl_pcm_492[] = { +static const struct freq_tbl clk_tbl_pcm_492[] = { { 256000, P_PLL4, 4, 1, 480 }, { 512000, P_PLL4, 4, 1, 240 }, { 768000, P_PLL4, 4, 1, 160 }, @@ -235,7 +235,7 @@ static struct freq_tbl clk_tbl_pcm_492[] = { { } }; -static struct freq_tbl clk_tbl_pcm_393[] = { +static const struct freq_tbl clk_tbl_pcm_393[] = { { 256000, P_PLL4, 4, 1, 384 }, { 512000, P_PLL4, 4, 1, 192 }, { 768000, P_PLL4, 4, 1, 128 }, diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index c89700ab93f9c6..cc03722596a4c6 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -338,7 +338,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = { }, }; -static struct freq_tbl ftbl_mmss_axi_clk[] = { +static const struct freq_tbl ftbl_mmss_axi_clk[] = { F(19200000, P_XO, 1, 0, 0), F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), @@ -364,7 +364,7 @@ static struct clk_rcg2 mmss_axi_clk_src = { }, }; -static struct freq_tbl ftbl_ocmemnoc_clk[] = { +static const struct freq_tbl ftbl_ocmemnoc_clk[] = { F(19200000, P_XO, 1, 0, 0), F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), @@ -389,7 +389,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = { }, }; -static struct freq_tbl ftbl_camss_csi0_3_clk[] = { +static const struct freq_tbl ftbl_camss_csi0_3_clk[] = { F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_MMPLL0, 4, 0, 0), { } @@ -447,7 +447,7 @@ static struct clk_rcg2 csi3_clk_src = { }, }; -static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = { +static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = { F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), @@ -490,7 +490,7 @@ static struct clk_rcg2 vfe1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_mdp_clk[] = { +static const struct freq_tbl ftbl_mdss_mdp_clk[] = { F(37500000, P_GPLL0, 16, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), F(75000000, P_GPLL0, 8, 0, 0), @@ -530,7 +530,7 @@ static struct clk_rcg2 gfx3d_clk_src = { }, }; -static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = { +static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = { F(75000000, P_GPLL0, 8, 0, 0), F(133330000, P_GPLL0, 4.5, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), @@ -607,7 +607,7 @@ static struct clk_rcg2 pclk1_clk_src = { }, }; -static struct freq_tbl ftbl_venus0_vcodec0_clk[] = { +static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(133330000, P_GPLL0, 4.5, 0, 0), @@ -631,7 +631,7 @@ static struct clk_rcg2 vcodec0_clk_src = { }, }; -static struct freq_tbl ftbl_avsync_vp_clk[] = { +static const struct freq_tbl ftbl_avsync_vp_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), { } @@ -650,7 +650,7 @@ static struct clk_rcg2 vp_clk_src = { }, }; -static struct freq_tbl ftbl_camss_cci_cci_clk[] = { +static const struct freq_tbl ftbl_camss_cci_cci_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -669,7 +669,7 @@ static struct clk_rcg2 cci_clk_src = { }, }; -static struct freq_tbl ftbl_camss_gp0_1_clk[] = { +static const struct freq_tbl ftbl_camss_gp0_1_clk[] = { F(10000, P_XO, 16, 1, 120), F(24000, P_XO, 16, 1, 50), F(6000000, P_GPLL0, 10, 1, 10), @@ -707,7 +707,7 @@ static struct clk_rcg2 camss_gp1_clk_src = { }, }; -static struct freq_tbl ftbl_camss_mclk0_3_clk[] = { +static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = { F(4800000, P_XO, 4, 0, 0), F(6000000, P_GPLL0, 10, 1, 10), F(8000000, P_GPLL0, 15, 1, 5), @@ -777,7 +777,7 @@ static struct clk_rcg2 mclk3_clk_src = { }, }; -static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = { +static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = { F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_MMPLL0, 4, 0, 0), { } @@ -822,7 +822,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = { }, }; -static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = { +static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = { F(133330000, P_GPLL0, 4.5, 0, 0), F(266670000, P_MMPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), @@ -871,7 +871,7 @@ static struct clk_rcg2 byte1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_edpaux_clk[] = { +static const struct freq_tbl ftbl_mdss_edpaux_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -889,7 +889,7 @@ static struct clk_rcg2 edpaux_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_edplink_clk[] = { +static const struct freq_tbl ftbl_mdss_edplink_clk[] = { F(135000000, P_EDPLINK, 2, 0, 0), F(270000000, P_EDPLINK, 11, 0, 0), { } @@ -909,7 +909,7 @@ static struct clk_rcg2 edplink_clk_src = { }, }; -static struct freq_tbl edp_pixel_freq_tbl[] = { +static const struct freq_tbl edp_pixel_freq_tbl[] = { { .src = P_EDPVCO }, { } }; @@ -928,7 +928,7 @@ static struct clk_rcg2 edppixel_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_esc0_1_clk[] = { +static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -959,7 +959,7 @@ static struct clk_rcg2 esc1_clk_src = { }, }; -static struct freq_tbl extpclk_freq_tbl[] = { +static const struct freq_tbl extpclk_freq_tbl[] = { { .src = P_HDMIPLL }, { } }; @@ -978,7 +978,7 @@ static struct clk_rcg2 extpclk_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_hdmi_clk[] = { +static const struct freq_tbl ftbl_mdss_hdmi_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -996,7 +996,7 @@ static struct clk_rcg2 hdmi_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_vsync_clk[] = { +static const struct freq_tbl ftbl_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -1014,7 +1014,7 @@ static struct clk_rcg2 vsync_clk_src = { }, }; -static struct freq_tbl ftbl_mmss_rbcpr_clk[] = { +static const struct freq_tbl ftbl_mmss_rbcpr_clk[] = { F(50000000, P_GPLL0, 12, 0, 0), { } }; @@ -1032,7 +1032,7 @@ static struct clk_rcg2 rbcpr_clk_src = { }, }; -static struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = { +static const struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -1050,7 +1050,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = { }, }; -static struct freq_tbl ftbl_vpu_maple_clk[] = { +static const struct freq_tbl ftbl_vpu_maple_clk[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(133330000, P_GPLL0, 4.5, 0, 0), @@ -1073,7 +1073,7 @@ static struct clk_rcg2 maple_clk_src = { }, }; -static struct freq_tbl ftbl_vpu_vdp_clk[] = { +static const struct freq_tbl ftbl_vpu_vdp_clk[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_MMPLL0, 4, 0, 0), @@ -1095,7 +1095,7 @@ static struct clk_rcg2 vdp_clk_src = { }, }; -static struct freq_tbl ftbl_vpu_bus_clk[] = { +static const struct freq_tbl ftbl_vpu_bus_clk[] = { F(40000000, P_GPLL0, 15, 0, 0), F(80000000, P_MMPLL0, 10, 0, 0), { } diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c index 1061322534c414..3f41249c5ae435 100644 --- a/drivers/clk/qcom/mmcc-msm8960.c +++ b/drivers/clk/qcom/mmcc-msm8960.c @@ -155,7 +155,7 @@ static const struct clk_parent_data mmcc_pxo_dsi1_dsi2_byte[] = { { .fw_name = "dsi2pllbyte", .name = "dsi2pllbyte" }, }; -static struct freq_tbl clk_tbl_cam[] = { +static const struct freq_tbl clk_tbl_cam[] = { { 6000000, P_PLL8, 4, 1, 16 }, { 8000000, P_PLL8, 4, 1, 12 }, { 12000000, P_PLL8, 4, 1, 8 }, @@ -323,7 +323,7 @@ static struct clk_branch camclk2_clk = { }; -static struct freq_tbl clk_tbl_csi[] = { +static const struct freq_tbl clk_tbl_csi[] = { { 27000000, P_PXO, 1, 0, 0 }, { 85330000, P_PLL8, 1, 2, 9 }, { 177780000, P_PLL2, 1, 2, 9 }, @@ -715,7 +715,7 @@ static struct clk_pix_rdi csi_rdi2_clk = { }, }; -static struct freq_tbl clk_tbl_csiphytimer[] = { +static const struct freq_tbl clk_tbl_csiphytimer[] = { { 85330000, P_PLL8, 1, 2, 9 }, { 177780000, P_PLL2, 1, 2, 9 }, { } @@ -808,7 +808,7 @@ static struct clk_branch csiphy2_timer_clk = { }, }; -static struct freq_tbl clk_tbl_gfx2d[] = { +static const struct freq_tbl clk_tbl_gfx2d[] = { F_MN( 27000000, P_PXO, 1, 0), F_MN( 48000000, P_PLL8, 1, 8), F_MN( 54857000, P_PLL8, 1, 7), @@ -948,7 +948,7 @@ static struct clk_branch gfx2d1_clk = { }, }; -static struct freq_tbl clk_tbl_gfx3d[] = { +static const struct freq_tbl clk_tbl_gfx3d[] = { F_MN( 27000000, P_PXO, 1, 0), F_MN( 48000000, P_PLL8, 1, 8), F_MN( 54857000, P_PLL8, 1, 7), @@ -968,7 +968,7 @@ static struct freq_tbl clk_tbl_gfx3d[] = { { } }; -static struct freq_tbl clk_tbl_gfx3d_8064[] = { +static const struct freq_tbl clk_tbl_gfx3d_8064[] = { F_MN( 27000000, P_PXO, 0, 0), F_MN( 48000000, P_PLL8, 1, 8), F_MN( 54857000, P_PLL8, 1, 7), @@ -1058,7 +1058,7 @@ static struct clk_branch gfx3d_clk = { }, }; -static struct freq_tbl clk_tbl_vcap[] = { +static const struct freq_tbl clk_tbl_vcap[] = { F_MN( 27000000, P_PXO, 0, 0), F_MN( 54860000, P_PLL8, 1, 7), F_MN( 64000000, P_PLL8, 1, 6), @@ -1149,7 +1149,7 @@ static struct clk_branch vcap_npl_clk = { }, }; -static struct freq_tbl clk_tbl_ijpeg[] = { +static const struct freq_tbl clk_tbl_ijpeg[] = { { 27000000, P_PXO, 1, 0, 0 }, { 36570000, P_PLL8, 1, 2, 21 }, { 54860000, P_PLL8, 7, 0, 0 }, @@ -1214,7 +1214,7 @@ static struct clk_branch ijpeg_clk = { }, }; -static struct freq_tbl clk_tbl_jpegd[] = { +static const struct freq_tbl clk_tbl_jpegd[] = { { 64000000, P_PLL8, 6 }, { 76800000, P_PLL8, 5 }, { 96000000, P_PLL8, 4 }, @@ -1264,7 +1264,7 @@ static struct clk_branch jpegd_clk = { }, }; -static struct freq_tbl clk_tbl_mdp[] = { +static const struct freq_tbl clk_tbl_mdp[] = { { 9600000, P_PLL8, 1, 1, 40 }, { 13710000, P_PLL8, 1, 1, 28 }, { 27000000, P_PXO, 1, 0, 0 }, @@ -1381,7 +1381,7 @@ static struct clk_branch mdp_vsync_clk = { }, }; -static struct freq_tbl clk_tbl_rot[] = { +static const struct freq_tbl clk_tbl_rot[] = { { 27000000, P_PXO, 1 }, { 29540000, P_PLL8, 13 }, { 32000000, P_PLL8, 12 }, @@ -1461,7 +1461,7 @@ static const struct clk_parent_data mmcc_pxo_hdmi[] = { { .fw_name = "hdmipll", .name = "hdmi_pll" }, }; -static struct freq_tbl clk_tbl_tv[] = { +static const struct freq_tbl clk_tbl_tv[] = { { .src = P_HDMI_PLL, .pre_div = 1 }, { } }; @@ -1624,7 +1624,7 @@ static struct clk_branch hdmi_app_clk = { }, }; -static struct freq_tbl clk_tbl_vcodec[] = { +static const struct freq_tbl clk_tbl_vcodec[] = { F_MN( 27000000, P_PXO, 1, 0), F_MN( 32000000, P_PLL8, 1, 12), F_MN( 48000000, P_PLL8, 1, 8), @@ -1699,7 +1699,7 @@ static struct clk_branch vcodec_clk = { }, }; -static struct freq_tbl clk_tbl_vpe[] = { +static const struct freq_tbl clk_tbl_vpe[] = { { 27000000, P_PXO, 1 }, { 34909000, P_PLL8, 11 }, { 38400000, P_PLL8, 10 }, @@ -1752,7 +1752,7 @@ static struct clk_branch vpe_clk = { }, }; -static struct freq_tbl clk_tbl_vfe[] = { +static const struct freq_tbl clk_tbl_vfe[] = { { 13960000, P_PLL8, 1, 2, 55 }, { 27000000, P_PXO, 1, 0, 0 }, { 36570000, P_PLL8, 1, 2, 21 }, diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index d5bcb09ebd0cce..169e85f60550bb 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -268,7 +268,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = { }, }; -static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = { +static const struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = { F(19200000, P_XO, 1, 0, 0), F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), @@ -280,7 +280,7 @@ static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = { { } }; -static struct freq_tbl ftbl_mmss_axi_clk[] = { +static const struct freq_tbl ftbl_mmss_axi_clk[] = { F( 19200000, P_XO, 1, 0, 0), F( 37500000, P_GPLL0, 16, 0, 0), F( 50000000, P_GPLL0, 12, 0, 0), @@ -306,7 +306,7 @@ static struct clk_rcg2 mmss_axi_clk_src = { }, }; -static struct freq_tbl ftbl_ocmemnoc_clk[] = { +static const struct freq_tbl ftbl_ocmemnoc_clk[] = { F( 19200000, P_XO, 1, 0, 0), F( 37500000, P_GPLL0, 16, 0, 0), F( 50000000, P_GPLL0, 12, 0, 0), @@ -331,7 +331,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = { }, }; -static struct freq_tbl ftbl_camss_csi0_3_clk[] = { +static const struct freq_tbl ftbl_camss_csi0_3_clk[] = { F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_MMPLL0, 4, 0, 0), { } @@ -389,7 +389,7 @@ static struct clk_rcg2 csi3_clk_src = { }, }; -static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = { +static const struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = { F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), @@ -406,7 +406,7 @@ static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = { { } }; -static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = { +static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = { F(37500000, P_GPLL0, 16, 0, 0), F(50000000, P_GPLL0, 12, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), @@ -449,7 +449,7 @@ static struct clk_rcg2 vfe1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = { +static const struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = { F(37500000, P_GPLL0, 16, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), F(75000000, P_GPLL0, 8, 0, 0), @@ -461,7 +461,7 @@ static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = { { } }; -static struct freq_tbl ftbl_mdss_mdp_clk[] = { +static const struct freq_tbl ftbl_mdss_mdp_clk[] = { F(37500000, P_GPLL0, 16, 0, 0), F(60000000, P_GPLL0, 10, 0, 0), F(75000000, P_GPLL0, 8, 0, 0), @@ -490,7 +490,7 @@ static struct clk_rcg2 mdp_clk_src = { }, }; -static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = { +static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = { F(75000000, P_GPLL0, 8, 0, 0), F(133330000, P_GPLL0, 4.5, 0, 0), F(200000000, P_GPLL0, 3, 0, 0), @@ -567,7 +567,7 @@ static struct clk_rcg2 pclk1_clk_src = { }, }; -static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = { +static const struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = { F(66700000, P_GPLL0, 9, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(133330000, P_MMPLL0, 6, 0, 0), @@ -575,7 +575,7 @@ static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = { { } }; -static struct freq_tbl ftbl_venus0_vcodec0_clk[] = { +static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), F(133330000, P_MMPLL0, 6, 0, 0), @@ -599,7 +599,7 @@ static struct clk_rcg2 vcodec0_clk_src = { }, }; -static struct freq_tbl ftbl_camss_cci_cci_clk[] = { +static const struct freq_tbl ftbl_camss_cci_cci_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -617,7 +617,7 @@ static struct clk_rcg2 cci_clk_src = { }, }; -static struct freq_tbl ftbl_camss_gp0_1_clk[] = { +static const struct freq_tbl ftbl_camss_gp0_1_clk[] = { F(10000, P_XO, 16, 1, 120), F(24000, P_XO, 16, 1, 50), F(6000000, P_GPLL0, 10, 1, 10), @@ -655,14 +655,14 @@ static struct clk_rcg2 camss_gp1_clk_src = { }, }; -static struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = { +static const struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = { F(19200000, P_XO, 1, 0, 0), F(24000000, P_GPLL0, 5, 1, 5), F(66670000, P_GPLL0, 9, 0, 0), { } }; -static struct freq_tbl ftbl_camss_mclk0_3_clk[] = { +static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = { F(4800000, P_XO, 4, 0, 0), F(6000000, P_GPLL0, 10, 1, 10), F(8000000, P_GPLL0, 15, 1, 5), @@ -729,7 +729,7 @@ static struct clk_rcg2 mclk3_clk_src = { }, }; -static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = { +static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = { F(100000000, P_GPLL0, 6, 0, 0), F(200000000, P_MMPLL0, 4, 0, 0), { } @@ -774,7 +774,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = { }, }; -static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = { +static const struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = { F(133330000, P_GPLL0, 4.5, 0, 0), F(150000000, P_GPLL0, 4, 0, 0), F(266670000, P_MMPLL0, 3, 0, 0), @@ -783,7 +783,7 @@ static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = { { } }; -static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = { +static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = { F(133330000, P_GPLL0, 4.5, 0, 0), F(266670000, P_MMPLL0, 3, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), @@ -805,7 +805,7 @@ static struct clk_rcg2 cpp_clk_src = { }, }; -static struct freq_tbl byte_freq_tbl[] = { +static const struct freq_tbl byte_freq_tbl[] = { { .src = P_DSI0PLL_BYTE }, { } }; @@ -838,7 +838,7 @@ static struct clk_rcg2 byte1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_edpaux_clk[] = { +static const struct freq_tbl ftbl_mdss_edpaux_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -856,7 +856,7 @@ static struct clk_rcg2 edpaux_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_edplink_clk[] = { +static const struct freq_tbl ftbl_mdss_edplink_clk[] = { F(135000000, P_EDPLINK, 2, 0, 0), F(270000000, P_EDPLINK, 11, 0, 0), { } @@ -876,7 +876,7 @@ static struct clk_rcg2 edplink_clk_src = { }, }; -static struct freq_tbl edp_pixel_freq_tbl[] = { +static const struct freq_tbl edp_pixel_freq_tbl[] = { { .src = P_EDPVCO }, { } }; @@ -895,7 +895,7 @@ static struct clk_rcg2 edppixel_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_esc0_1_clk[] = { +static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -926,7 +926,7 @@ static struct clk_rcg2 esc1_clk_src = { }, }; -static struct freq_tbl extpclk_freq_tbl[] = { +static const struct freq_tbl extpclk_freq_tbl[] = { { .src = P_HDMIPLL }, { } }; @@ -945,7 +945,7 @@ static struct clk_rcg2 extpclk_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_hdmi_clk[] = { +static const struct freq_tbl ftbl_mdss_hdmi_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -963,7 +963,7 @@ static struct clk_rcg2 hdmi_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_vsync_clk[] = { +static const struct freq_tbl ftbl_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c index 78e5083eaf0f60..f70d080bf51c84 100644 --- a/drivers/clk/qcom/mmcc-msm8994.c +++ b/drivers/clk/qcom/mmcc-msm8994.c @@ -974,7 +974,7 @@ static struct clk_rcg2 byte1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_esc0_1_clk[] = { +static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -1005,7 +1005,7 @@ static struct clk_rcg2 esc1_clk_src = { }, }; -static struct freq_tbl extpclk_freq_tbl[] = { +static const struct freq_tbl extpclk_freq_tbl[] = { { .src = P_HDMIPLL }, { } }; @@ -1024,7 +1024,7 @@ static struct clk_rcg2 extpclk_clk_src = { }, }; -static struct freq_tbl ftbl_hdmi_clk_src[] = { +static const struct freq_tbl ftbl_hdmi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -1042,7 +1042,7 @@ static struct clk_rcg2 hdmi_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_vsync_clk[] = { +static const struct freq_tbl ftbl_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 1a32c6eb821789..a742f848e4eeb5 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -734,7 +734,7 @@ static struct clk_rcg2 mdp_clk_src = { }, }; -static struct freq_tbl extpclk_freq_tbl[] = { +static const struct freq_tbl extpclk_freq_tbl[] = { { .src = P_HDMIPLL }, { } }; @@ -753,7 +753,7 @@ static struct clk_rcg2 extpclk_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_vsync_clk[] = { +static const struct freq_tbl ftbl_mdss_vsync_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -771,7 +771,7 @@ static struct clk_rcg2 vsync_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_hdmi_clk[] = { +static const struct freq_tbl ftbl_mdss_hdmi_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; @@ -815,7 +815,7 @@ static struct clk_rcg2 byte1_clk_src = { }, }; -static struct freq_tbl ftbl_mdss_esc0_1_clk[] = { +static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = { F(19200000, P_XO, 1, 0, 0), { } }; diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c index 97d150b132a6dc..7c25a50cfa970d 100644 --- a/drivers/clk/qcom/videocc-sm8550.c +++ b/drivers/clk/qcom/videocc-sm8550.c @@ -449,7 +449,7 @@ static struct gdsc video_cc_mvs0_gdsc = { }, .pwrsts = PWRSTS_OFF_ON, .parent = &video_cc_mvs0c_gdsc.pd, - .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER, }; static struct gdsc video_cc_mvs1c_gdsc = { @@ -474,7 +474,7 @@ static struct gdsc video_cc_mvs1_gdsc = { }, .pwrsts = PWRSTS_OFF_ON, .parent = &video_cc_mvs1c_gdsc.pd, - .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL, + .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER, }; static struct clk_regmap *video_cc_sm8550_clocks[] = { diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 4410d16de4e2a5..76791a1c50ac73 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -40,6 +40,7 @@ config CLK_RENESAS select CLK_R9A07G054 if ARCH_R9A07G054 select CLK_R9A08G045 if ARCH_R9A08G045 select CLK_R9A09G011 if ARCH_R9A09G011 + select CLK_R9A09G057 if ARCH_R9A09G057 select CLK_SH73A0 if ARCH_SH73A0 if CLK_RENESAS @@ -193,6 +194,10 @@ config CLK_R9A09G011 bool "RZ/V2M clock support" if COMPILE_TEST select CLK_RZG2L +config CLK_R9A09G057 + bool "RZ/V2H(P) clock support" if COMPILE_TEST + select CLK_RZV2H + config CLK_SH73A0 bool "SH-Mobile AG5 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP @@ -228,6 +233,10 @@ config CLK_RZG2L bool "RZ/{G2L,G2UL,G3S,V2L} family clock support" if COMPILE_TEST select RESET_CONTROLLER +config CLK_RZV2H + bool "RZ/V2H(P) family clock support" if COMPILE_TEST + select RESET_CONTROLLER + # Generic config CLK_RENESAS_CPG_MSSR bool "CPG/MSSR clock support" if COMPILE_TEST diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index f7e18679c3b81b..23d2e26051c84a 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o +obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o # Family @@ -46,6 +47,7 @@ obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o obj-$(CONFIG_CLK_RCAR_GEN4_CPG) += rcar-gen4-cpg.o obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o +obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o # Generic obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index 5304c977562fc6..5bc473c2adb33a 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -207,7 +207,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) for (i = 0; i < MSTP_MAX_CLOCKS; ++i) clks[i] = ERR_PTR(-ENOENT); - if (of_find_property(np, "clock-indices", &i)) + if (of_property_present(np, "clock-indices")) idxname = "clock-indices"; else idxname = "renesas,clock-indices"; diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c index ff3f85e906fe17..4c8e4c69c1bfaa 100644 --- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c @@ -61,6 +61,11 @@ enum clk_ids { DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL2X_3X, CLK_MAIN, \ .offset = _offset) +#define CPG_PLL20CR 0x0834 /* PLL20 Control Register */ +#define CPG_PLL21CR 0x0838 /* PLL21 Control Register */ +#define CPG_PLL30CR 0x083c /* PLL30 Control Register */ +#define CPG_PLL31CR 0x0840 /* PLL31 Control Register */ + static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), @@ -70,10 +75,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN), DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), - DEF_PLL(".pll20", CLK_PLL20, 0x0834), - DEF_PLL(".pll21", CLK_PLL21, 0x0838), - DEF_PLL(".pll30", CLK_PLL30, 0x083c), - DEF_PLL(".pll31", CLK_PLL31, 0x0840), + DEF_PLL(".pll20", CLK_PLL20, CPG_PLL20CR), + DEF_PLL(".pll21", CLK_PLL21, CPG_PLL21CR), + DEF_PLL(".pll30", CLK_PLL30, CPG_PLL30CR), + DEF_PLL(".pll31", CLK_PLL31, CPG_PLL31CR), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1), @@ -116,17 +121,17 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1), - DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870), - DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870), + DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR), + DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, CPG_SD0CKCR), DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779A0_CLK_RPC), - DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), - DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878), - DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880), - DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884), + DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR), + DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR), + DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, CPG_CSICKCR), + DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, CPG_DSIEXTCKCR), DEF_GEN4_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8), DEF_GEN4_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), @@ -253,12 +258,12 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = { */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) -static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 16, }, - { 1, 106, 1, 0, 0, 0, 0, 120, 1, 160, 1, 0, 0, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 32, }, +static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { + /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */ + { 1, 128, 1, 192, 1, 16, }, + { 1, 106, 1, 160, 1, 19, }, + { 0, 0, 0, 0, 0, 0, }, + { 2, 128, 1, 192, 1, 32, }, }; diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c index cc06127406ab57..f33342314b2ef9 100644 --- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c @@ -57,12 +57,12 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ - DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), - DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN), - DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN), - DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN), - DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), - DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN), + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), + DEF_GEN4_PLL_F9_24(".pll1", 1, CLK_PLL1, CLK_MAIN), + DEF_GEN4_PLL_V9_24(".pll2", 2, CLK_PLL2, CLK_MAIN), + DEF_GEN4_PLL_V9_24(".pll3", 3, CLK_PLL3, CLK_MAIN), + DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), + DEF_GEN4_PLL_V9_24(".pll6", 6, CLK_PLL6, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), @@ -115,13 +115,13 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { DEF_FIXED("sasyncperd2",R8A779F0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1), DEF_FIXED("sasyncperd4",R8A779F0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1), - DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870), - DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870), + DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR), + DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, CPG_SD0CKCR), DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC), - DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), + DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR), DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8), DEF_GEN4_MDSEL("r", R8A779F0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), @@ -187,12 +187,12 @@ static const unsigned int r8a779f0_crit_mod_clks[] __initconst = { #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) -static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 200, 1, 150, 1, 200, 1, 0, 0, 200, 1, 134, 1, 15, }, - { 1, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 38, }, +static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { + /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */ + { 1, 200, 1, 200, 1, 15, }, + { 1, 160, 1, 160, 1, 19, }, + { 0, 0, 0, 0, 0, 0, }, + { 2, 160, 1, 160, 1, 38, }, }; static int __init r8a779f0_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c index c4b1938db76b35..55c8dd032fc325 100644 --- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c @@ -66,13 +66,13 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ - DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), - DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN), - DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2_VAR, CLK_MAIN), - DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN), - DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN), - DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), - DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN), + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), + DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN), + DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), @@ -146,14 +146,14 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1), DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1), DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1), - DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, 0x878), - DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, 0x880), + DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR), + DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR), DEF_FIXED("dsiref", R8A779G0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1), - DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884), + DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR), - DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, 0x870), - DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, 0x870), - DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), + DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR), + DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, CPG_SD0CKCR), + DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR), DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC), @@ -258,12 +258,12 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = { #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) -static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 16, }, - { 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 32, }, +static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { + /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 0, 0, 0, 0, 0, 0, }, + { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a779g0_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c index 16a2e26abcc7f7..e20c048bfa9be1 100644 --- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c @@ -63,19 +63,19 @@ enum clk_ids { MOD_CLK_BASE }; -static const struct cpg_core_clk r8a779h0_core_clks[] = { +static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), - DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN), - DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN), - DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN), - DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN), + DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN), DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), - DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN), + DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), @@ -156,14 +156,14 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = { DEF_FIXED("viobusd2", R8A779H0_CLK_VIOBUSD2, CLK_VIOSRC, 2, 1), DEF_FIXED("vcbusd1", R8A779H0_CLK_VCBUSD1, CLK_VCSRC, 1, 1), DEF_FIXED("vcbusd2", R8A779H0_CLK_VCBUSD2, CLK_VCSRC, 2, 1), - DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, 0x878), - DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, 0x880), + DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR), + DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR), DEF_FIXED("dsiref", R8A779H0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1), - DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884), - DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), + DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR), + DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR), - DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, 0x870), - DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, 0x870), + DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR), + DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, CPG_SD0CKCR), DEF_BASE("rpc", R8A779H0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A779H0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779H0_CLK_RPC), @@ -172,10 +172,11 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = { DEF_GEN4_MDSEL("r", R8A779H0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), }; -static const struct mssr_mod_clk r8a779h0_mod_clks[] = { +static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = { DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC), DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC), DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC), + DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2), DEF_MOD("csi40", 331, R8A779H0_CLK_CSI), DEF_MOD("csi41", 400, R8A779H0_CLK_CSI), DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1), @@ -195,6 +196,8 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = { DEF_MOD("msi3", 621, R8A779H0_CLK_MSO), DEF_MOD("msi4", 622, R8A779H0_CLK_MSO), DEF_MOD("msi5", 623, R8A779H0_CLK_MSO), + DEF_MOD("pcie0", 624, R8A779H0_CLK_S0D2_HSC), + DEF_MOD("pwm", 628, R8A779H0_CLK_SASYNCPERD4), DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2), DEF_MOD("scif0", 702, R8A779H0_CLK_SASYNCPERD4), DEF_MOD("scif1", 703, R8A779H0_CLK_SASYNCPERD4), @@ -252,12 +255,12 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = { #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) -static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 16, }, - { 1, 160, 1, 200, 1, 160, 1, 200, 1, 160, 1, 140, 1, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 32, }, +static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { + /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 0, 0, 0, 0, 0, 0, }, + { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a779h0_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c index 16acc95f3c6231..c3c2b0c4398330 100644 --- a/drivers/clk/renesas/r9a07g043-cpg.c +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -52,6 +52,8 @@ enum clk_ids { CLK_PLL5, CLK_PLL5_500, CLK_PLL5_250, + CLK_PLL5_FOUTPOSTDIV, + CLK_DSI_DIV, #endif CLK_PLL6, CLK_PLL6_250, @@ -120,6 +122,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1), DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6), DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2), + DEF_PLL5_FOUTPOSTDIV(".pll5_foutpostdiv", CLK_PLL5_FOUTPOSTDIV, CLK_EXTAL), #endif DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6), DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2), @@ -146,6 +149,8 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { #ifdef CONFIG_ARM64 DEF_FIXED("M2", R9A07G043_CLK_M2, CLK_PLL3_533, 1, 2), DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G043_CLK_M2, 1, 2), + DEF_DSI_DIV("DSI_DIV", CLK_DSI_DIV, CLK_PLL5_FOUTPOSTDIV, CLK_SET_RATE_PARENT), + DEF_FIXED("M3", R9A07G043_CLK_M3, CLK_DSI_DIV, 1, 1), #endif }; @@ -209,6 +214,12 @@ static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = { 0x564, 2), DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0, 0x564, 3), + DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0, + 0x56c, 0), + DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT, + 0x56c, 0), + DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3, + 0x56c, 1), #endif DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0, 0x570, 0), @@ -309,6 +320,7 @@ static const struct rzg2l_reset r9a07g043_resets[] = { DEF_RST(R9A07G043_CRU_CMN_RSTB, 0x864, 0), DEF_RST(R9A07G043_CRU_PRESETN, 0x864, 1), DEF_RST(R9A07G043_CRU_ARESETN, 0x864, 2), + DEF_RST(R9A07G043_LCDC_RESET_N, 0x86c, 0), #endif DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0), DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1), diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c index a891bfc3ab5a88..1ce40fb51f13bd 100644 --- a/drivers/clk/renesas/r9a08g045-cpg.c +++ b/drivers/clk/renesas/r9a08g045-cpg.c @@ -193,6 +193,7 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = { DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0), DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1), DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0), + DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1), DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0), DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1), DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0), @@ -207,6 +208,10 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = { DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9), DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10), DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11), + DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0), + DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1), + DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2), + DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3), DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0), DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0), DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8), @@ -226,10 +231,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = { DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0), DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1), DEF_RST(R9A08G045_IA55_RESETN, 0x818, 0), + DEF_RST(R9A08G045_DMAC_ARESETN, 0x82c, 0), + DEF_RST(R9A08G045_DMAC_RST_ASYNC, 0x82c, 1), DEF_RST(R9A08G045_WDT0_PRESETN, 0x848, 0), DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0), DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1), DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2), + DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0), + DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1), + DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2), + DEF_RST(R9A08G045_USB_PRESETN, 0x878, 3), DEF_RST(R9A08G045_ETH0_RST_HW_N, 0x87c, 0), DEF_RST(R9A08G045_ETH1_RST_HW_N, 0x87c, 1), DEF_RST(R9A08G045_I2C0_MRST, 0x880, 0), @@ -277,6 +288,15 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = { DEF_PD("sdhi2", R9A08G045_PD_SDHI2, DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), RZG2L_PD_F_NONE), + DEF_PD("usb0", R9A08G045_PD_USB0, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), + RZG2L_PD_F_NONE), + DEF_PD("usb1", R9A08G045_PD_USB1, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), + RZG2L_PD_F_NONE), + DEF_PD("usb-phy", R9A08G045_PD_USB_PHY, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), + RZG2L_PD_F_NONE), DEF_PD("eth0", R9A08G045_PD_ETHER0, DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), RZG2L_PD_F_NONE), diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c new file mode 100644 index 00000000000000..3ee32db5c0af79 --- /dev/null +++ b/drivers/clk/renesas/r9a09g057-cpg.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2H(P) CPG driver + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +#include +#include +#include +#include + +#include + +#include "rzv2h-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK, + + /* External Input Clocks */ + CLK_AUDIO_EXTAL, + CLK_RTXIN, + CLK_QEXTAL, + + /* PLL Clocks */ + CLK_PLLCM33, + CLK_PLLCLN, + CLK_PLLDTY, + CLK_PLLCA55, + + /* Internal Core Clocks */ + CLK_PLLCM33_DIV16, + CLK_PLLCLN_DIV2, + CLK_PLLCLN_DIV8, + CLK_PLLCLN_DIV16, + CLK_PLLDTY_ACPU, + CLK_PLLDTY_ACPU_DIV4, + + /* Module Clocks */ + MOD_CLK_BASE, +}; + +static const struct clk_div_table dtable_2_64[] = { + {0, 2}, + {1, 4}, + {2, 8}, + {3, 16}, + {4, 64}, + {0, 0}, +}; + +static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL), + DEF_INPUT("rtxin", CLK_RTXIN), + DEF_INPUT("qextal", CLK_QEXTAL), + + /* PLL Clocks */ + DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3), + DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3), + DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3), + DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)), + + /* Internal Core Clocks */ + DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16), + + DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2), + DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8), + DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16), + + DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64), + DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4), + + /* Core Clocks */ + DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1), + DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1), +}; + +static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { + DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3), + DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4), + DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5), + DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6), + DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7), + DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8), + DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9), + DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10), + DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11), + DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12), + DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13), + DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14), + DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15), + DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16), + DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17), + DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18), + DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15), + DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19), + DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20), + DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21), + DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22), + DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23), + DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24), + DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25), + DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26), + DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27), + DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3), + DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4), + DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5), + DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6), + DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7), + DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8), + DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9), + DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10), + DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11), + DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12), + DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13), + DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14), +}; + +static const struct rzv2h_reset r9a09g057_resets[] __initconst = { + DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */ + DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */ + DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */ + DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */ + DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */ + DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */ + DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */ + DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */ + DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */ + DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */ + DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */ + DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */ + DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */ + DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */ + DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */ + DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */ + DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */ + DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */ + DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */ + DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */ + DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */ + DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */ + DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */ + DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */ + DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */ +}; + +const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = { + /* Core Clocks */ + .core_clks = r9a09g057_core_clks, + .num_core_clks = ARRAY_SIZE(r9a09g057_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r9a09g057_mod_clks, + .num_mod_clks = ARRAY_SIZE(r9a09g057_mod_clks), + .num_hw_mod_clks = 25 * 16, + + /* Resets */ + .resets = r9a09g057_resets, + .num_resets = ARRAY_SIZE(r9a09g057_resets), +}; diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c index 77a4bb3e17f348..31aa790fd003d4 100644 --- a/drivers/clk/renesas/rcar-gen4-cpg.c +++ b/drivers/clk/renesas/rcar-gen4-cpg.c @@ -45,7 +45,6 @@ static u32 cpg_mode __initdata; #define CPG_PLL6CR1 0x8d8 #define CPG_PLLxCR0_KICK BIT(31) -#define CPG_PLLxCR0_NI GENMASK(27, 20) /* Integer mult. factor */ #define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */ #define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */ #define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */ @@ -53,35 +52,57 @@ static u32 cpg_mode __initdata; #define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */ #define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */ -#define SSMODE_FM BIT(2) /* Fractional Multiplication */ -#define SSMODE_DITHER BIT(1) /* Frequency Dithering */ -#define SSMODE_CENTER BIT(0) /* Center (vs. Down) Spread Dithering */ +/* Fractional 8.25 PLL */ +#define CPG_PLLxCR0_NI8 GENMASK(27, 20) /* Integer mult. factor */ +#define CPG_PLLxCR1_NF25 GENMASK(24, 0) /* Fractional mult. factor */ + +/* Fractional 9.24 PLL */ +#define CPG_PLLxCR0_NI9 GENMASK(28, 20) /* Integer mult. factor */ +#define CPG_PLLxCR1_NF24 GENMASK(23, 0) /* Fractional mult. factor */ + +#define CPG_PLLxCR_STC GENMASK(30, 24) /* R_Car V3U PLLxCR */ + +#define CPG_RPCCKCR 0x874 /* RPC Clock Freq. Control Register */ + +#define CPG_SD0CKCR1 0x8a4 /* SD-IF0 Clock Freq. Control Reg. 1 */ + +#define CPG_SD0CKCR1_SDSRC_SEL GENMASK(30, 29) /* SDSRC clock freq. select */ /* PLL Clocks */ struct cpg_pll_clk { struct clk_hw hw; void __iomem *pllcr0_reg; + void __iomem *pllcr1_reg; void __iomem *pllecr_reg; u32 pllecr_pllst_mask; }; #define to_pll_clk(_hw) container_of(_hw, struct cpg_pll_clk, hw) -static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long cpg_pll_8_25_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { struct cpg_pll_clk *pll_clk = to_pll_clk(hw); - unsigned int mult; - - mult = FIELD_GET(CPG_PLLxCR0_NI, readl(pll_clk->pllcr0_reg)) + 1; + u32 cr0 = readl(pll_clk->pllcr0_reg); + unsigned int ni, nf; + unsigned long rate; + + ni = (FIELD_GET(CPG_PLLxCR0_NI8, cr0) + 1) * 2; + rate = parent_rate * ni; + if (cr0 & CPG_PLLxCR0_SSMODE_FM) { + nf = FIELD_GET(CPG_PLLxCR1_NF25, readl(pll_clk->pllcr1_reg)); + rate += mul_u64_u32_shr(parent_rate, nf, 24); + } - return parent_rate * mult * 2; + return rate; } -static int cpg_pll_clk_determine_rate(struct clk_hw *hw, - struct clk_rate_request *req) +static int cpg_pll_8_25_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - unsigned int min_mult, max_mult, mult; + struct cpg_pll_clk *pll_clk = to_pll_clk(hw); + unsigned int min_mult, max_mult, ni, nf; + u32 cr0 = readl(pll_clk->pllcr0_reg); unsigned long prate; prate = req->best_parent_rate * 2; @@ -90,28 +111,58 @@ static int cpg_pll_clk_determine_rate(struct clk_hw *hw, if (max_mult < min_mult) return -EINVAL; - mult = DIV_ROUND_CLOSEST_ULL(req->rate, prate); - mult = clamp(mult, min_mult, max_mult); + if (cr0 & CPG_PLLxCR0_SSMODE_FM) { + ni = div64_ul(req->rate, prate); + if (ni < min_mult) { + ni = min_mult; + nf = 0; + } else { + ni = min(ni, max_mult); + nf = div64_ul((u64)(req->rate - prate * ni) << 24, + req->best_parent_rate); + } + } else { + ni = DIV_ROUND_CLOSEST_ULL(req->rate, prate); + ni = clamp(ni, min_mult, max_mult); + nf = 0; + } + req->rate = prate * ni + mul_u64_u32_shr(req->best_parent_rate, nf, 24); - req->rate = prate * mult; return 0; } -static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +static int cpg_pll_8_25_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { struct cpg_pll_clk *pll_clk = to_pll_clk(hw); - unsigned int mult; + unsigned long prate = parent_rate * 2; + u32 cr0 = readl(pll_clk->pllcr0_reg); + unsigned int ni, nf; u32 val; - mult = DIV_ROUND_CLOSEST_ULL(rate, parent_rate * 2); - mult = clamp(mult, 1U, 256U); + if (cr0 & CPG_PLLxCR0_SSMODE_FM) { + ni = div64_ul(rate, prate); + if (ni < 1) { + ni = 1; + nf = 0; + } else { + ni = min(ni, 256U); + nf = div64_ul((u64)(rate - prate * ni) << 24, + parent_rate); + } + } else { + ni = DIV_ROUND_CLOSEST_ULL(rate, prate); + ni = clamp(ni, 1U, 256U); + } if (readl(pll_clk->pllcr0_reg) & CPG_PLLxCR0_KICK) return -EBUSY; - cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI, - FIELD_PREP(CPG_PLLxCR0_NI, mult - 1)); + cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI8, + FIELD_PREP(CPG_PLLxCR0_NI8, ni - 1)); + if (cr0 & CPG_PLLxCR0_SSMODE_FM) + cpg_reg_modify(pll_clk->pllcr1_reg, CPG_PLLxCR1_NF25, + FIELD_PREP(CPG_PLLxCR1_NF25, nf)); /* * Set KICK bit in PLLxCR0 to update hardware setting and wait for @@ -132,22 +183,55 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate, val & pll_clk->pllecr_pllst_mask, 0, 1000); } -static const struct clk_ops cpg_pll_clk_ops = { - .recalc_rate = cpg_pll_clk_recalc_rate, - .determine_rate = cpg_pll_clk_determine_rate, - .set_rate = cpg_pll_clk_set_rate, +static const struct clk_ops cpg_pll_f8_25_clk_ops = { + .recalc_rate = cpg_pll_8_25_clk_recalc_rate, +}; + +static const struct clk_ops cpg_pll_v8_25_clk_ops = { + .recalc_rate = cpg_pll_8_25_clk_recalc_rate, + .determine_rate = cpg_pll_8_25_clk_determine_rate, + .set_rate = cpg_pll_8_25_clk_set_rate, +}; + +static unsigned long cpg_pll_9_24_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cpg_pll_clk *pll_clk = to_pll_clk(hw); + u32 cr0 = readl(pll_clk->pllcr0_reg); + unsigned int ni, nf; + unsigned long rate; + + ni = FIELD_GET(CPG_PLLxCR0_NI9, cr0) + 1; + rate = parent_rate * ni; + if (cr0 & CPG_PLLxCR0_SSMODE_FM) { + nf = FIELD_GET(CPG_PLLxCR1_NF24, readl(pll_clk->pllcr1_reg)); + rate += mul_u64_u32_shr(parent_rate, nf, 24); + } else { + rate *= 2; + } + + return rate; +} + +static const struct clk_ops cpg_pll_f9_24_clk_ops = { + .recalc_rate = cpg_pll_9_24_clk_recalc_rate, }; static struct clk * __init cpg_pll_clk_register(const char *name, const char *parent_name, void __iomem *base, - unsigned int cr0_offset, - unsigned int cr1_offset, - unsigned int index) - + unsigned int index, + const struct clk_ops *ops) { - struct cpg_pll_clk *pll_clk; + static const struct { u16 cr0, cr1; } pll_cr_offsets[] __initconst = { + [1 - 1] = { CPG_PLL1CR0, CPG_PLL1CR1 }, + [2 - 1] = { CPG_PLL2CR0, CPG_PLL2CR1 }, + [3 - 1] = { CPG_PLL3CR0, CPG_PLL3CR1 }, + [4 - 1] = { CPG_PLL4CR0, CPG_PLL4CR1 }, + [6 - 1] = { CPG_PLL6CR0, CPG_PLL6CR1 }, + }; struct clk_init_data init = {}; + struct cpg_pll_clk *pll_clk; struct clk *clk; pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); @@ -155,25 +239,23 @@ static struct clk * __init cpg_pll_clk_register(const char *name, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &cpg_pll_clk_ops; + init.ops = ops; init.parent_names = &parent_name; init.num_parents = 1; pll_clk->hw.init = &init; - pll_clk->pllcr0_reg = base + cr0_offset; + pll_clk->pllcr0_reg = base + pll_cr_offsets[index - 1].cr0; + pll_clk->pllcr1_reg = base + pll_cr_offsets[index - 1].cr1; pll_clk->pllecr_reg = base + CPG_PLLECR; pll_clk->pllecr_pllst_mask = CPG_PLLECR_PLLST(index); - /* Disable Fractional Multiplication and Frequency Dithering */ - writel(0, base + cr1_offset); - cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_SSMODE, 0); - clk = clk_register(NULL, &pll_clk->hw); if (IS_ERR(clk)) kfree(pll_clk); return clk; } + /* * Z0 Clock & Z1 Clock */ @@ -358,51 +440,41 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev, div = cpg_pll_config->pll1_div; break; - case CLK_TYPE_GEN4_PLL2_VAR: - /* - * PLL2 is implemented as a custom clock, to change the - * multiplier when cpufreq changes between normal and boost - * modes. - */ - return cpg_pll_clk_register(core->name, __clk_get_name(parent), - base, CPG_PLL2CR0, CPG_PLL2CR1, 2); - - case CLK_TYPE_GEN4_PLL2: - mult = cpg_pll_config->pll2_mult; - div = cpg_pll_config->pll2_div; - break; - - case CLK_TYPE_GEN4_PLL3: - mult = cpg_pll_config->pll3_mult; - div = cpg_pll_config->pll3_div; - break; - - case CLK_TYPE_GEN4_PLL4: - mult = cpg_pll_config->pll4_mult; - div = cpg_pll_config->pll4_div; - break; - case CLK_TYPE_GEN4_PLL5: mult = cpg_pll_config->pll5_mult; div = cpg_pll_config->pll5_div; break; - case CLK_TYPE_GEN4_PLL6: - mult = cpg_pll_config->pll6_mult; - div = cpg_pll_config->pll6_div; - break; - case CLK_TYPE_GEN4_PLL2X_3X: value = readl(base + core->offset); - mult = (((value >> 24) & 0x7f) + 1) * 2; + mult = (FIELD_GET(CPG_PLLxCR_STC, value) + 1) * 2; break; + case CLK_TYPE_GEN4_PLL_F8_25: + return cpg_pll_clk_register(core->name, __clk_get_name(parent), + base, core->offset, + &cpg_pll_f8_25_clk_ops); + + case CLK_TYPE_GEN4_PLL_V8_25: + return cpg_pll_clk_register(core->name, __clk_get_name(parent), + base, core->offset, + &cpg_pll_v8_25_clk_ops); + + case CLK_TYPE_GEN4_PLL_V9_24: + /* Variable fractional 9.24 is not yet supported, using fixed */ + fallthrough; + case CLK_TYPE_GEN4_PLL_F9_24: + return cpg_pll_clk_register(core->name, __clk_get_name(parent), + base, core->offset, + &cpg_pll_f9_24_clk_ops); + case CLK_TYPE_GEN4_Z: return cpg_z_clk_register(core->name, __clk_get_name(parent), base, core->div, core->offset); case CLK_TYPE_GEN4_SDSRC: - div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4; + value = readl(base + CPG_SD0CKCR1); + div = FIELD_GET(CPG_SD0CKCR1_SDSRC_SEL, value) + 4; break; case CLK_TYPE_GEN4_SDH: diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h index 006537e29e4eb1..717fd148464fed 100644 --- a/drivers/clk/renesas/rcar-gen4-cpg.h +++ b/drivers/clk/renesas/rcar-gen4-cpg.h @@ -12,13 +12,12 @@ enum rcar_gen4_clk_types { CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM, CLK_TYPE_GEN4_PLL1, - CLK_TYPE_GEN4_PLL2, - CLK_TYPE_GEN4_PLL2_VAR, CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */ - CLK_TYPE_GEN4_PLL3, - CLK_TYPE_GEN4_PLL4, CLK_TYPE_GEN4_PLL5, - CLK_TYPE_GEN4_PLL6, + CLK_TYPE_GEN4_PLL_F8_25, /* Fixed fractional 8.25 PLL */ + CLK_TYPE_GEN4_PLL_V8_25, /* Variable fractional 8.25 PLL */ + CLK_TYPE_GEN4_PLL_F9_24, /* Fixed fractional 9.24 PLL */ + CLK_TYPE_GEN4_PLL_V9_24, /* Variable fractional 9.24 PLL */ CLK_TYPE_GEN4_SDSRC, CLK_TYPE_GEN4_SDH, CLK_TYPE_GEN4_SD, @@ -47,6 +46,18 @@ enum rcar_gen4_clk_types { #define DEF_GEN4_OSC(_name, _id, _parent, _div) \ DEF_BASE(_name, _id, CLK_TYPE_GEN4_OSC, _parent, .div = _div) +#define DEF_GEN4_PLL_F8_25(_name, _idx, _id, _parent) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F8_25, _parent, .offset = _idx) + +#define DEF_GEN4_PLL_V8_25(_name, _idx, _id, _parent) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V8_25, _parent, .offset = _idx) + +#define DEF_GEN4_PLL_F9_24(_name, _idx, _id, _parent) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F9_24, _parent, .offset = _idx) + +#define DEF_GEN4_PLL_V9_24(_name, _idx, _id, _parent) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V9_24, _parent, .offset = _idx) + #define DEF_GEN4_Z(_name, _id, _type, _parent, _div, _offset) \ DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset) @@ -54,21 +65,16 @@ struct rcar_gen4_cpg_pll_config { u8 extal_div; u8 pll1_mult; u8 pll1_div; - u8 pll2_mult; - u8 pll2_div; - u8 pll3_mult; - u8 pll3_div; - u8 pll4_mult; - u8 pll4_div; u8 pll5_mult; u8 pll5_div; - u8 pll6_mult; - u8 pll6_div; u8 osc_prediv; }; -#define CPG_RPCCKCR 0x874 -#define SD0CKCR1 0x8a4 +#define CPG_SD0CKCR 0x870 /* SD-IF0 Clock Frequency Control Register */ +#define CPG_CANFDCKCR 0x878 /* CAN-FD Clock Frequency Control Register */ +#define CPG_MSOCKCR 0x87c /* MSIOF Clock Frequency Control Register */ +#define CPG_CSICKCR 0x880 /* CSI Clock Frequency Control Register */ +#define CPG_DSIEXTCKCR 0x884 /* DSI Clock Frequency Control Register */ struct clk *rcar_gen4_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core, const struct cpg_mssr_info *info, diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c index de4896cf5f403a..421ae973ea8e0d 100644 --- a/drivers/clk/renesas/rcar-usb2-clock-sel.c +++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c @@ -212,7 +212,7 @@ static struct platform_driver rcar_usb2_clock_sel_driver = { .pm = &rcar_usb2_clock_sel_pm_ops, }, .probe = rcar_usb2_clock_sel_probe, - .remove_new = rcar_usb2_clock_sel_remove, + .remove = rcar_usb2_clock_sel_remove, }; builtin_platform_driver(rcar_usb2_clock_sel_driver); diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 04b78064d4e01c..88bf39e8c79c83 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -339,8 +339,7 @@ static const struct clk_ops rzg3s_div_clk_ops = { }; static struct clk * __init -rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks, - void __iomem *base, struct rzg2l_cpg_priv *priv) +rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv) { struct div_hw_data *div_hw_data; struct clk_init_data init = {}; @@ -351,7 +350,7 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks, u32 max = 0; int ret; - parent = clks[core->parent & 0xffff]; + parent = priv->clks[core->parent]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -400,16 +399,15 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks, static struct clk * __init rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core, - struct clk **clks, - void __iomem *base, struct rzg2l_cpg_priv *priv) { + void __iomem *base = priv->base; struct device *dev = priv->dev; const struct clk *parent; const char *parent_name; struct clk_hw *clk_hw; - parent = clks[core->parent & 0xffff]; + parent = priv->clks[core->parent]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -440,7 +438,6 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core, static struct clk * __init rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core, - void __iomem *base, struct rzg2l_cpg_priv *priv) { const struct clk_hw *clk_hw; @@ -448,7 +445,7 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core, clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, core->parent_names, core->num_parents, core->flag, - base + GET_REG_OFFSET(core->conf), + priv->base + GET_REG_OFFSET(core->conf), GET_SHIFT(core->conf), GET_WIDTH(core->conf), core->mux_flags, &priv->rmw_lock); @@ -508,7 +505,6 @@ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { static struct clk * __init rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, - void __iomem *base, struct rzg2l_cpg_priv *priv) { struct sd_mux_hw_data *sd_mux_hw_data; @@ -652,7 +648,6 @@ static const struct clk_ops rzg2l_cpg_dsi_div_ops = { static struct clk * __init rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core, - struct clk **clks, struct rzg2l_cpg_priv *priv) { struct dsi_div_hw_data *clk_hw_data; @@ -662,7 +657,7 @@ rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core, struct clk_hw *clk_hw; int ret; - parent = clks[core->parent & 0xffff]; + parent = priv->clks[core->parent]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -900,7 +895,6 @@ static const struct clk_ops rzg2l_cpg_sipll5_ops = { static struct clk * __init rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core, - struct clk **clks, struct rzg2l_cpg_priv *priv) { const struct clk *parent; @@ -910,7 +904,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core, struct clk_hw *clk_hw; int ret; - parent = clks[core->parent & 0xffff]; + parent = priv->clks[core->parent]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -1013,8 +1007,6 @@ static const struct clk_ops rzg3s_cpg_pll_ops = { static struct clk * __init rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, - struct clk **clks, - void __iomem *base, struct rzg2l_cpg_priv *priv, const struct clk_ops *ops) { @@ -1023,8 +1015,9 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, struct clk_init_data init; const char *parent_name; struct pll_clk *pll_clk; + int ret; - parent = clks[core->parent & 0xffff]; + parent = priv->clks[core->parent]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -1041,11 +1034,15 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, pll_clk->hw.init = &init; pll_clk->conf = core->conf; - pll_clk->base = base; + pll_clk->base = priv->base; pll_clk->priv = priv; pll_clk->type = core->type; - return clk_register(NULL, &pll_clk->hw); + ret = devm_clk_hw_register(dev, &pll_clk->hw); + if (ret) + return ERR_PTR(ret); + + return pll_clk->hw.clk; } static struct clk @@ -1102,6 +1099,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, struct device *dev = priv->dev; unsigned int id = core->id, div = core->div; const char *parent_name; + struct clk_hw *clk_hw; WARN_DEBUG(id >= priv->num_core_clks); WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); @@ -1124,39 +1122,40 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, } parent_name = __clk_get_name(parent); - clk = clk_register_fixed_factor(NULL, core->name, - parent_name, CLK_SET_RATE_PARENT, - core->mult, div); + clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name, + CLK_SET_RATE_PARENT, + core->mult, div); + if (IS_ERR(clk_hw)) + clk = ERR_CAST(clk_hw); + else + clk = clk_hw->clk; break; case CLK_TYPE_SAM_PLL: - clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv, - &rzg2l_cpg_pll_ops); + clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops); break; case CLK_TYPE_G3S_PLL: - clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv, - &rzg3s_cpg_pll_ops); + clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops); break; case CLK_TYPE_SIPLL5: - clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv); + clk = rzg2l_cpg_sipll5_register(core, priv); break; case CLK_TYPE_DIV: - clk = rzg2l_cpg_div_clk_register(core, priv->clks, - priv->base, priv); + clk = rzg2l_cpg_div_clk_register(core, priv); break; case CLK_TYPE_G3S_DIV: - clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv); + clk = rzg3s_cpg_div_clk_register(core, priv); break; case CLK_TYPE_MUX: - clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv); + clk = rzg2l_cpg_mux_clk_register(core, priv); break; case CLK_TYPE_SD_MUX: - clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv); + clk = rzg2l_cpg_sd_mux_clk_register(core, priv); break; case CLK_TYPE_PLL5_4_MUX: clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv); break; case CLK_TYPE_DSI_DIV: - clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv); + clk = rzg2l_cpg_dsi_div_clk_register(core, priv); break; default: goto fail; @@ -1337,6 +1336,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, struct clk *parent, *clk; const char *parent_name; unsigned int i; + int ret; WARN_DEBUG(id < priv->num_core_clks); WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); @@ -1380,10 +1380,13 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, clock->priv = priv; clock->hw.init = &init; - clk = clk_register(NULL, &clock->hw); - if (IS_ERR(clk)) + ret = devm_clk_hw_register(dev, &clock->hw); + if (ret) { + clk = ERR_PTR(ret); goto fail; + } + clk = clock->hw.clk; dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); priv->clks[id] = clk; diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c new file mode 100644 index 00000000000000..b524a9d33610f6 --- /dev/null +++ b/drivers/clk/renesas/rzv2h-cpg.c @@ -0,0 +1,853 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2H(P) Clock Pulse Generator + * + * Copyright (C) 2024 Renesas Electronics Corp. + * + * Based on rzg2l-cpg.c + * + * Copyright (C) 2015 Glider bvba + * Copyright (C) 2013 Ideas On Board SPRL + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "rzv2h-cpg.h" + +#ifdef DEBUG +#define WARN_DEBUG(x) WARN_ON(x) +#else +#define WARN_DEBUG(x) do { } while (0) +#endif + +#define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4)) +#define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4)) +#define GET_RST_OFFSET(x) (0x900 + ((x) * 4)) +#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4)) + +#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val))) +#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val)) +#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val)) +#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val)) + +#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16) + +#define GET_MOD_CLK_ID(base, index, bit) \ + ((base) + ((((index) * (16))) + (bit))) + +#define CPG_CLKSTATUS0 (0x700) + +/** + * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data + * + * @dev: CPG device + * @base: CPG register block base address + * @rmw_lock: protects register accesses + * @clks: Array containing all Core and Module Clocks + * @num_core_clks: Number of Core Clocks in clks[] + * @num_mod_clks: Number of Module Clocks in clks[] + * @resets: Array of resets + * @num_resets: Number of Module Resets in info->resets[] + * @last_dt_core_clk: ID of the last Core Clock exported to DT + * @rcdev: Reset controller entity + */ +struct rzv2h_cpg_priv { + struct device *dev; + void __iomem *base; + spinlock_t rmw_lock; + + struct clk **clks; + unsigned int num_core_clks; + unsigned int num_mod_clks; + struct rzv2h_reset *resets; + unsigned int num_resets; + unsigned int last_dt_core_clk; + + struct reset_controller_dev rcdev; +}; + +#define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev) + +struct pll_clk { + struct rzv2h_cpg_priv *priv; + void __iomem *base; + struct clk_hw hw; + unsigned int conf; + unsigned int type; +}; + +#define to_pll(_hw) container_of(_hw, struct pll_clk, hw) + +/** + * struct mod_clock - Module clock + * + * @priv: CPG private data + * @hw: handle between common and hardware-specific interfaces + * @on_index: register offset + * @on_bit: ON/MON bit + * @mon_index: monitor register offset + * @mon_bit: montor bit + */ +struct mod_clock { + struct rzv2h_cpg_priv *priv; + struct clk_hw hw; + u8 on_index; + u8 on_bit; + s8 mon_index; + u8 mon_bit; +}; + +#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw) + +/** + * struct ddiv_clk - DDIV clock + * + * @priv: CPG private data + * @div: divider clk + * @mon: monitor bit in CPG_CLKSTATUS0 register + */ +struct ddiv_clk { + struct rzv2h_cpg_priv *priv; + struct clk_divider div; + u8 mon; +}; + +#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div) + +static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct pll_clk *pll_clk = to_pll(hw); + struct rzv2h_cpg_priv *priv = pll_clk->priv; + unsigned int clk1, clk2; + u64 rate; + + if (!PLL_CLK_ACCESS(pll_clk->conf)) + return 0; + + clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf)); + clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf)); + + rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1), + 16 + SDIV(clk2)); + + return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1)); +} + +static const struct clk_ops rzv2h_cpg_pll_ops = { + .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate, +}; + +static struct clk * __init +rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core, + struct rzv2h_cpg_priv *priv, + const struct clk_ops *ops) +{ + void __iomem *base = priv->base; + struct device *dev = priv->dev; + struct clk_init_data init; + const struct clk *parent; + const char *parent_name; + struct pll_clk *pll_clk; + int ret; + + parent = priv->clks[core->parent]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); + if (!pll_clk) + return ERR_PTR(-ENOMEM); + + parent_name = __clk_get_name(parent); + init.name = core->name; + init.ops = ops; + init.flags = 0; + init.parent_names = &parent_name; + init.num_parents = 1; + + pll_clk->hw.init = &init; + pll_clk->conf = core->cfg.conf; + pll_clk->base = base; + pll_clk->priv = priv; + pll_clk->type = core->type; + + ret = devm_clk_hw_register(dev, &pll_clk->hw); + if (ret) + return ERR_PTR(ret); + + return pll_clk->hw.clk; +} + +static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + unsigned int val; + + val = readl(divider->reg) >> divider->shift; + val &= clk_div_mask(divider->width); + + return divider_recalc_rate(hw, parent_rate, val, divider->table, + divider->flags, divider->width); +} + +static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_divider *divider = to_clk_divider(hw); + + return divider_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags); +} + +static int rzv2h_ddiv_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_divider *divider = to_clk_divider(hw); + + return divider_determine_rate(hw, req, divider->table, divider->width, + divider->flags); +} + +static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon) +{ + u32 bitmask = BIT(mon); + u32 val; + + return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200); +} + +static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + struct ddiv_clk *ddiv = to_ddiv_clock(divider); + struct rzv2h_cpg_priv *priv = ddiv->priv; + unsigned long flags = 0; + int value; + u32 val; + int ret; + + value = divider_get_val(rate, parent_rate, divider->table, + divider->width, divider->flags); + if (value < 0) + return value; + + spin_lock_irqsave(divider->lock, flags); + + ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); + if (ret) + goto ddiv_timeout; + + val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift); + val &= ~(clk_div_mask(divider->width) << divider->shift); + val |= (u32)value << divider->shift; + writel(val, divider->reg); + + ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); + if (ret) + goto ddiv_timeout; + + spin_unlock_irqrestore(divider->lock, flags); + + return 0; + +ddiv_timeout: + spin_unlock_irqrestore(divider->lock, flags); + return ret; +} + +static const struct clk_ops rzv2h_ddiv_clk_divider_ops = { + .recalc_rate = rzv2h_ddiv_recalc_rate, + .round_rate = rzv2h_ddiv_round_rate, + .determine_rate = rzv2h_ddiv_determine_rate, + .set_rate = rzv2h_ddiv_set_rate, +}; + +static struct clk * __init +rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, + struct rzv2h_cpg_priv *priv) +{ + struct ddiv cfg_ddiv = core->cfg.ddiv; + struct clk_init_data init = {}; + struct device *dev = priv->dev; + u8 shift = cfg_ddiv.shift; + u8 width = cfg_ddiv.width; + const struct clk *parent; + const char *parent_name; + struct clk_divider *div; + struct ddiv_clk *ddiv; + int ret; + + parent = priv->clks[core->parent]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + parent_name = __clk_get_name(parent); + + if ((shift + width) > 16) + return ERR_PTR(-EINVAL); + + ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL); + if (!ddiv) + return ERR_PTR(-ENOMEM); + + init.name = core->name; + init.ops = &rzv2h_ddiv_clk_divider_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + + ddiv->priv = priv; + ddiv->mon = cfg_ddiv.monbit; + div = &ddiv->div; + div->reg = priv->base + cfg_ddiv.offset; + div->shift = shift; + div->width = width; + div->flags = core->flag; + div->lock = &priv->rmw_lock; + div->hw.init = &init; + div->table = core->dtable; + + ret = devm_clk_hw_register(dev, &div->hw); + if (ret) + return ERR_PTR(ret); + + return div->hw.clk; +} + +static struct clk +*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, + void *data) +{ + unsigned int clkidx = clkspec->args[1]; + struct rzv2h_cpg_priv *priv = data; + struct device *dev = priv->dev; + const char *type; + struct clk *clk; + + switch (clkspec->args[0]) { + case CPG_CORE: + type = "core"; + if (clkidx > priv->last_dt_core_clk) { + dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); + return ERR_PTR(-EINVAL); + } + clk = priv->clks[clkidx]; + break; + + case CPG_MOD: + type = "module"; + if (clkidx >= priv->num_mod_clks) { + dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); + return ERR_PTR(-EINVAL); + } + clk = priv->clks[priv->num_core_clks + clkidx]; + break; + + default: + dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR(clk)) + dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, + PTR_ERR(clk)); + else + dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", + clkspec->args[0], clkspec->args[1], clk, + clk_get_rate(clk)); + return clk; +} + +static void __init +rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core, + struct rzv2h_cpg_priv *priv) +{ + struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent; + unsigned int id = core->id, div = core->div; + struct device *dev = priv->dev; + const char *parent_name; + struct clk_hw *clk_hw; + + WARN_DEBUG(id >= priv->num_core_clks); + WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); + + switch (core->type) { + case CLK_TYPE_IN: + clk = of_clk_get_by_name(priv->dev->of_node, core->name); + break; + case CLK_TYPE_FF: + WARN_DEBUG(core->parent >= priv->num_core_clks); + parent = priv->clks[core->parent]; + if (IS_ERR(parent)) { + clk = parent; + goto fail; + } + + parent_name = __clk_get_name(parent); + clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, + parent_name, CLK_SET_RATE_PARENT, + core->mult, div); + if (IS_ERR(clk_hw)) + clk = ERR_CAST(clk_hw); + else + clk = clk_hw->clk; + break; + case CLK_TYPE_PLL: + clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops); + break; + case CLK_TYPE_DDIV: + clk = rzv2h_cpg_ddiv_clk_register(core, priv); + break; + default: + goto fail; + } + + if (IS_ERR_OR_NULL(clk)) + goto fail; + + dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); + priv->clks[id] = clk; + return; + +fail: + dev_err(dev, "Failed to register core clock %s: %ld\n", + core->name, PTR_ERR(clk)); +} + +static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) +{ + struct mod_clock *clock = to_mod_clock(hw); + unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index); + struct rzv2h_cpg_priv *priv = clock->priv; + u32 bitmask = BIT(clock->on_bit); + struct device *dev = priv->dev; + u32 value; + int error; + + dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk, + enable ? "ON" : "OFF"); + + value = bitmask << 16; + if (enable) + value |= bitmask; + + writel(value, priv->base + reg); + + if (!enable || clock->mon_index < 0) + return 0; + + reg = GET_CLK_MON_OFFSET(clock->mon_index); + bitmask = BIT(clock->mon_bit); + error = readl_poll_timeout_atomic(priv->base + reg, value, + value & bitmask, 0, 10); + if (error) + dev_err(dev, "Failed to enable CLK_ON %p\n", + priv->base + reg); + + return error; +} + +static int rzv2h_mod_clock_enable(struct clk_hw *hw) +{ + return rzv2h_mod_clock_endisable(hw, true); +} + +static void rzv2h_mod_clock_disable(struct clk_hw *hw) +{ + rzv2h_mod_clock_endisable(hw, false); +} + +static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) +{ + struct mod_clock *clock = to_mod_clock(hw); + struct rzv2h_cpg_priv *priv = clock->priv; + u32 bitmask; + u32 offset; + + if (clock->mon_index >= 0) { + offset = GET_CLK_MON_OFFSET(clock->mon_index); + bitmask = BIT(clock->mon_bit); + } else { + offset = GET_CLK_ON_OFFSET(clock->on_index); + bitmask = BIT(clock->on_bit); + } + + return readl(priv->base + offset) & bitmask; +} + +static const struct clk_ops rzv2h_mod_clock_ops = { + .enable = rzv2h_mod_clock_enable, + .disable = rzv2h_mod_clock_disable, + .is_enabled = rzv2h_mod_clock_is_enabled, +}; + +static void __init +rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod, + struct rzv2h_cpg_priv *priv) +{ + struct mod_clock *clock = NULL; + struct device *dev = priv->dev; + struct clk_init_data init; + struct clk *parent, *clk; + const char *parent_name; + unsigned int id; + int ret; + + id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit); + WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); + WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); + WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); + + parent = priv->clks[mod->parent]; + if (IS_ERR(parent)) { + clk = parent; + goto fail; + } + + clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); + if (!clock) { + clk = ERR_PTR(-ENOMEM); + goto fail; + } + + init.name = mod->name; + init.ops = &rzv2h_mod_clock_ops; + init.flags = CLK_SET_RATE_PARENT; + if (mod->critical) + init.flags |= CLK_IS_CRITICAL; + + parent_name = __clk_get_name(parent); + init.parent_names = &parent_name; + init.num_parents = 1; + + clock->on_index = mod->on_index; + clock->on_bit = mod->on_bit; + clock->mon_index = mod->mon_index; + clock->mon_bit = mod->mon_bit; + clock->priv = priv; + clock->hw.init = &init; + + ret = devm_clk_hw_register(dev, &clock->hw); + if (ret) { + clk = ERR_PTR(ret); + goto fail; + } + + priv->clks[id] = clock->hw.clk; + + return; + +fail: + dev_err(dev, "Failed to register module clock %s: %ld\n", + mod->name, PTR_ERR(clk)); +} + +static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); + u32 mask = BIT(priv->resets[id].reset_bit); + u8 monbit = priv->resets[id].mon_bit; + u32 value = mask << 16; + + dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg); + + writel(value, priv->base + reg); + + reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); + mask = BIT(monbit); + + return readl_poll_timeout_atomic(priv->base + reg, value, + value & mask, 10, 200); +} + +static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); + u32 mask = BIT(priv->resets[id].reset_bit); + u8 monbit = priv->resets[id].mon_bit; + u32 value = (mask << 16) | mask; + + dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg); + + writel(value, priv->base + reg); + + reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); + mask = BIT(monbit); + + return readl_poll_timeout_atomic(priv->base + reg, value, + !(value & mask), 10, 200); +} + +static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = rzv2h_cpg_assert(rcdev, id); + if (ret) + return ret; + + return rzv2h_cpg_deassert(rcdev, id); +} + +static int rzv2h_cpg_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); + unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); + u8 monbit = priv->resets[id].mon_bit; + + return !!(readl(priv->base + reg) & BIT(monbit)); +} + +static const struct reset_control_ops rzv2h_cpg_reset_ops = { + .reset = rzv2h_cpg_reset, + .assert = rzv2h_cpg_assert, + .deassert = rzv2h_cpg_deassert, + .status = rzv2h_cpg_status, +}; + +static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); + unsigned int id = reset_spec->args[0]; + u8 rst_index = id / 16; + u8 rst_bit = id % 16; + unsigned int i; + + for (i = 0; i < rcdev->nr_resets; i++) { + if (rst_index == priv->resets[i].reset_index && + rst_bit == priv->resets[i].reset_bit) + return i; + } + + return -EINVAL; +} + +static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv) +{ + priv->rcdev.ops = &rzv2h_cpg_reset_ops; + priv->rcdev.of_node = priv->dev->of_node; + priv->rcdev.dev = priv->dev; + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate; + priv->rcdev.nr_resets = priv->num_resets; + + return devm_reset_controller_register(priv->dev, &priv->rcdev); +} + +/** + * struct rzv2h_cpg_pd - RZ/V2H power domain data structure + * @priv: pointer to CPG private data structure + * @genpd: generic PM domain + */ +struct rzv2h_cpg_pd { + struct rzv2h_cpg_priv *priv; + struct generic_pm_domain genpd; +}; + +static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) +{ + struct device_node *np = dev->of_node; + struct of_phandle_args clkspec; + bool once = true; + struct clk *clk; + int error; + int i = 0; + + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + &clkspec)) { + if (once) { + once = false; + error = pm_clk_create(dev); + if (error) { + of_node_put(clkspec.np); + goto err; + } + } + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + if (IS_ERR(clk)) { + error = PTR_ERR(clk); + goto fail_destroy; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk failed %d\n", + error); + goto fail_put; + } + i++; + } + + return 0; + +fail_put: + clk_put(clk); + +fail_destroy: + pm_clk_destroy(dev); +err: + return error; +} + +static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev) +{ + if (!pm_clk_no_clocks(dev)) + pm_clk_destroy(dev); +} + +static void rzv2h_cpg_genpd_remove_simple(void *data) +{ + pm_genpd_remove(data); +} + +static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv) +{ + struct device *dev = priv->dev; + struct device_node *np = dev->of_node; + struct rzv2h_cpg_pd *pd; + int ret; + + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + pd->genpd.name = np->name; + pd->priv = priv; + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->genpd.attach_dev = rzv2h_cpg_attach_dev; + pd->genpd.detach_dev = rzv2h_cpg_detach_dev; + ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd); + if (ret) + return ret; + + return of_genpd_add_provider_simple(np, &pd->genpd); +} + +static void rzv2h_cpg_del_clk_provider(void *data) +{ + of_clk_del_provider(data); +} + +static int __init rzv2h_cpg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct rzv2h_cpg_info *info; + struct rzv2h_cpg_priv *priv; + unsigned int nclks, i; + struct clk **clks; + int error; + + info = of_device_get_match_data(dev); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->rmw_lock); + + priv->dev = dev; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + nclks = info->num_total_core_clks + info->num_hw_mod_clks; + clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) * + info->num_resets, GFP_KERNEL); + if (!priv->resets) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + priv->clks = clks; + priv->num_core_clks = info->num_total_core_clks; + priv->num_mod_clks = info->num_hw_mod_clks; + priv->last_dt_core_clk = info->last_dt_core_clk; + priv->num_resets = info->num_resets; + + for (i = 0; i < nclks; i++) + clks[i] = ERR_PTR(-ENOENT); + + for (i = 0; i < info->num_core_clks; i++) + rzv2h_cpg_register_core_clk(&info->core_clks[i], priv); + + for (i = 0; i < info->num_mod_clks; i++) + rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv); + + error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv); + if (error) + return error; + + error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np); + if (error) + return error; + + error = rzv2h_cpg_add_pm_domains(priv); + if (error) + return error; + + error = rzv2h_cpg_reset_controller_register(priv); + if (error) + return error; + + return 0; +} + +static const struct of_device_id rzv2h_cpg_match[] = { +#ifdef CONFIG_CLK_R9A09G057 + { + .compatible = "renesas,r9a09g057-cpg", + .data = &r9a09g057_cpg_info, + }, +#endif + { /* sentinel */ } +}; + +static struct platform_driver rzv2h_cpg_driver = { + .driver = { + .name = "rzv2h-cpg", + .of_match_table = rzv2h_cpg_match, + }, +}; + +static int __init rzv2h_cpg_init(void) +{ + return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe); +} + +subsys_initcall(rzv2h_cpg_init); + +MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver"); diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h new file mode 100644 index 00000000000000..1bd406c69015ba --- /dev/null +++ b/drivers/clk/renesas/rzv2h-cpg.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Renesas RZ/V2H(P) Clock Pulse Generator + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ + +#ifndef __RENESAS_RZV2H_CPG_H__ +#define __RENESAS_RZV2H_CPG_H__ + +/** + * struct ddiv - Structure for dynamic switching divider + * + * @offset: register offset + * @shift: position of the divider bit + * @width: width of the divider + * @monbit: monitor bit in CPG_CLKSTATUS0 register + */ +struct ddiv { + unsigned int offset:11; + unsigned int shift:4; + unsigned int width:4; + unsigned int monbit:5; +}; + +#define DDIV_PACK(_offset, _shift, _width, _monbit) \ + ((struct ddiv){ \ + .offset = _offset, \ + .shift = _shift, \ + .width = _width, \ + .monbit = _monbit \ + }) + +#define CPG_CDDIV0 (0x400) + +#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2) + +/** + * Definitions of CPG Core Clocks + * + * These include: + * - Clock outputs exported to DT + * - External input clocks + * - Internal CPG clocks + */ +struct cpg_core_clk { + const char *name; + unsigned int id; + unsigned int parent; + unsigned int div; + unsigned int mult; + unsigned int type; + union { + unsigned int conf; + struct ddiv ddiv; + } cfg; + const struct clk_div_table *dtable; + u32 flag; +}; + +enum clk_types { + /* Generic */ + CLK_TYPE_IN, /* External Clock Input */ + CLK_TYPE_FF, /* Fixed Factor Clock */ + CLK_TYPE_PLL, + CLK_TYPE_DDIV, /* Dynamic Switching Divider */ +}; + +/* BIT(31) indicates if CLK1/2 are accessible or not */ +#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16))) +#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0) +#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16)) +#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4)) + +#define DEF_TYPE(_name, _id, _type...) \ + { .name = _name, .id = _id, .type = _type } +#define DEF_BASE(_name, _id, _type, _parent...) \ + DEF_TYPE(_name, _id, _type, .parent = _parent) +#define DEF_PLL(_name, _id, _parent, _conf) \ + DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf) +#define DEF_INPUT(_name, _id) \ + DEF_TYPE(_name, _id, CLK_TYPE_IN) +#define DEF_FIXED(_name, _id, _parent, _mult, _div) \ + DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult) +#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \ + DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \ + .cfg.ddiv = _ddiv_packed, \ + .parent = _parent, \ + .dtable = _dtable, \ + .flag = CLK_DIVIDER_HIWORD_MASK) + +/** + * struct rzv2h_mod_clk - Module Clocks definitions + * + * @name: handle between common and hardware-specific interfaces + * @parent: id of parent clock + * @critical: flag to indicate the clock is critical + * @on_index: control register index + * @on_bit: ON bit + * @mon_index: monitor register index + * @mon_bit: monitor bit + */ +struct rzv2h_mod_clk { + const char *name; + u16 parent; + bool critical; + u8 on_index; + u8 on_bit; + s8 mon_index; + u8 mon_bit; +}; + +#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \ + { \ + .name = (_name), \ + .parent = (_parent), \ + .critical = (_critical), \ + .on_index = (_onindex), \ + .on_bit = (_onbit), \ + .mon_index = (_monindex), \ + .mon_bit = (_monbit), \ + } + +#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \ + DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit) + +#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \ + DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit) + +/** + * struct rzv2h_reset - Reset definitions + * + * @reset_index: reset register index + * @reset_bit: reset bit + * @mon_index: monitor register index + * @mon_bit: monitor bit + */ +struct rzv2h_reset { + u8 reset_index; + u8 reset_bit; + u8 mon_index; + u8 mon_bit; +}; + +#define DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit) \ + { \ + .reset_index = (_resindex), \ + .reset_bit = (_resbit), \ + .mon_index = (_monindex), \ + .mon_bit = (_monbit), \ + } + +#define DEF_RST(_resindex, _resbit, _monindex, _monbit) \ + DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit) + +/** + * struct rzv2h_cpg_info - SoC-specific CPG Description + * + * @core_clks: Array of Core Clock definitions + * @num_core_clks: Number of entries in core_clks[] + * @last_dt_core_clk: ID of the last Core Clock exported to DT + * @num_total_core_clks: Total number of Core Clocks (exported + internal) + * + * @mod_clks: Array of Module Clock definitions + * @num_mod_clks: Number of entries in mod_clks[] + * @num_hw_mod_clks: Number of Module Clocks supported by the hardware + * + * @resets: Array of Module Reset definitions + * @num_resets: Number of entries in resets[] + */ +struct rzv2h_cpg_info { + /* Core Clocks */ + const struct cpg_core_clk *core_clks; + unsigned int num_core_clks; + unsigned int last_dt_core_clk; + unsigned int num_total_core_clks; + + /* Module Clocks */ + const struct rzv2h_mod_clk *mod_clks; + unsigned int num_mod_clks; + unsigned int num_hw_mod_clks; + + /* Resets */ + const struct rzv2h_reset *resets; + unsigned int num_resets; +}; + +extern const struct rzv2h_cpg_info r9a09g057_cpg_info; + +#endif /* __RENESAS_RZV2H_CPG_H__ */ diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig index 9aad86925cd288..570ad90835d332 100644 --- a/drivers/clk/rockchip/Kconfig +++ b/drivers/clk/rockchip/Kconfig @@ -100,6 +100,13 @@ config CLK_RK3568 help Build the driver for RK3568 Clock Driver. +config CLK_RK3576 + bool "Rockchip RK3576 clock controller support" + depends on ARM64 || COMPILE_TEST + default y + help + Build the driver for RK3576 Clock Driver. + config CLK_RK3588 bool "Rockchip RK3588 clock controller support" depends on ARM64 || COMPILE_TEST diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index 36894f6a7022d8..af2ade54a7efa9 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -28,4 +28,5 @@ obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o +obj-$(CONFIG_CLK_RK3576) += clk-rk3576.o rst-rk3576.o obj-$(CONFIG_CLK_RK3588) += clk-rk3588.o rst-rk3588.o diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index 606ce5458f54af..fe76756e592e9f 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -914,7 +914,10 @@ static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw, unsigned } rate64 = rate64 >> cur.s; - return (unsigned long)rate64; + if (pll->type == pll_rk3588_ddr) + return (unsigned long)rate64 * 2; + else + return (unsigned long)rate64; } static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll, @@ -1167,6 +1170,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, break; case pll_rk3588: case pll_rk3588_core: + case pll_rk3588_ddr: if (!pll->rate_table) init.ops = &rockchip_rk3588_pll_clk_norate_ops; else diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c index b58619eb412bbc..caf7c0e6e479e0 100644 --- a/drivers/clk/rockchip/clk-px30.c +++ b/drivers/clk/rockchip/clk-px30.c @@ -1002,6 +1002,7 @@ static const char *const px30_cru_critical_clocks[] __initconst = { static void __init px30_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -1010,7 +1011,9 @@ static void __init px30_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_branches, + ARRAY_SIZE(px30_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); @@ -1043,6 +1046,7 @@ CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init); static void __init px30_pmu_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clkpmu_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -1051,7 +1055,9 @@ static void __init px30_pmu_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); + clkpmu_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_pmu_branches, + ARRAY_SIZE(px30_clk_pmu_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip pmu clk init failed\n", __func__); return; diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index d644bc155ec6e5..d341ce0708aac3 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -436,6 +436,7 @@ static const char *const rk3036_critical_clocks[] __initconst = { static void __init rk3036_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; struct clk *clk; @@ -452,7 +453,9 @@ static void __init rk3036_clk_init(struct device_node *np) writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10), reg_base + RK2928_CLKSEL_CON(13)); - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3036_clk_branches, + ARRAY_SIZE(rk3036_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index a24a35553e1349..ed602c27b6245b 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(29), 0, 3, DFLAGS), DIV(0, "sclk_vop_pre", "sclk_vop_src", 0, RK2928_CLKSEL_CON(27), 8, 8, DFLAGS), - MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0, + MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, RK2928_CLKSEL_CON(27), 1, 1, MFLAGS), FACTOR(0, "xin12m", "xin24m", 0, 1, 2), @@ -683,6 +683,7 @@ static const char *const rk3228_critical_clocks[] __initconst = { static void __init rk3228_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -691,7 +692,9 @@ static void __init rk3228_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3228_clk_branches, + ARRAY_SIZE(rk3228_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index baa5aebd32771a..90d329216064a5 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -932,6 +932,7 @@ static void __init rk3288_common_init(struct device_node *np, enum rk3288_variant soc) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; rk3288_cru_base = of_iomap(np, 0); if (!rk3288_cru_base) { @@ -939,7 +940,9 @@ static void __init rk3288_common_init(struct device_node *np, return; } - ctx = rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3288_clk_branches, + ARRAY_SIZE(rk3288_clk_branches)) + 1; + ctx = rockchip_clk_init(np, rk3288_cru_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(rk3288_cru_base); diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c index db3396c3e6e9a5..95a9512a41a3f5 100644 --- a/drivers/clk/rockchip/clk-rk3308.c +++ b/drivers/clk/rockchip/clk-rk3308.c @@ -917,6 +917,7 @@ static const char *const rk3308_critical_clocks[] __initconst = { static void __init rk3308_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -925,7 +926,9 @@ static void __init rk3308_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3308_clk_branches, + ARRAY_SIZE(rk3308_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 267ab54937d3df..3bb87b27b662da 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -881,6 +881,7 @@ static const char *const rk3328_critical_clocks[] __initconst = { static void __init rk3328_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -889,7 +890,9 @@ static void __init rk3328_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3328_clk_branches, + ARRAY_SIZE(rk3328_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index 2c50cc2cc6dbb5..04391e4e287477 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -866,6 +866,7 @@ static const char *const rk3368_critical_clocks[] __initconst = { static void __init rk3368_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -874,7 +875,9 @@ static void __init rk3368_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3368_clk_branches, + ARRAY_SIZE(rk3368_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 4f1a5782c2308b..c2b243d7a5e26c 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -1531,6 +1531,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = { static void __init rk3399_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -1539,7 +1540,9 @@ static void __init rk3399_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_branches, + ARRAY_SIZE(rk3399_clk_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); @@ -1577,6 +1580,7 @@ CLK_OF_DECLARE(rk3399_cru, "rockchip,rk3399-cru", rk3399_clk_init); static void __init rk3399_pmu_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clkpmu_nr_clks; void __iomem *reg_base; reg_base = of_iomap(np, 0); @@ -1585,7 +1589,9 @@ static void __init rk3399_pmu_clk_init(struct device_node *np) return; } - ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); + clkpmu_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_pmu_branches, + ARRAY_SIZE(rk3399_clk_pmu_branches)) + 1; + ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip pmu clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c new file mode 100644 index 00000000000000..595e010341f73a --- /dev/null +++ b/drivers/clk/rockchip/clk-rk3576.c @@ -0,0 +1,1818 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Rockchip Electronics Co. Ltd. + * Author: Elaine Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include "clk.h" + +#define RK3576_GRF_SOC_STATUS0 0x600 +#define RK3576_PMU0_GRF_OSC_CON6 0x18 + +enum rk3576_plls { + bpll, lpll, vpll, aupll, cpll, gpll, ppll, +}; + +static struct rockchip_pll_rate_table rk3576_pll_rates[] = { + /* _mhz, _p, _m, _s, _k */ + RK3588_PLL_RATE(2520000000, 2, 210, 0, 0), + RK3588_PLL_RATE(2496000000, 2, 208, 0, 0), + RK3588_PLL_RATE(2472000000, 2, 206, 0, 0), + RK3588_PLL_RATE(2448000000, 2, 204, 0, 0), + RK3588_PLL_RATE(2424000000, 2, 202, 0, 0), + RK3588_PLL_RATE(2400000000, 2, 200, 0, 0), + RK3588_PLL_RATE(2376000000, 2, 198, 0, 0), + RK3588_PLL_RATE(2352000000, 2, 196, 0, 0), + RK3588_PLL_RATE(2328000000, 2, 194, 0, 0), + RK3588_PLL_RATE(2304000000, 2, 192, 0, 0), + RK3588_PLL_RATE(2280000000, 2, 190, 0, 0), + RK3588_PLL_RATE(2256000000, 2, 376, 1, 0), + RK3588_PLL_RATE(2232000000, 2, 372, 1, 0), + RK3588_PLL_RATE(2208000000, 2, 368, 1, 0), + RK3588_PLL_RATE(2184000000, 2, 364, 1, 0), + RK3588_PLL_RATE(2160000000, 2, 360, 1, 0), + RK3588_PLL_RATE(2136000000, 2, 356, 1, 0), + RK3588_PLL_RATE(2112000000, 2, 352, 1, 0), + RK3588_PLL_RATE(2088000000, 2, 348, 1, 0), + RK3588_PLL_RATE(2064000000, 2, 344, 1, 0), + RK3588_PLL_RATE(2040000000, 2, 340, 1, 0), + RK3588_PLL_RATE(2016000000, 2, 336, 1, 0), + RK3588_PLL_RATE(1992000000, 2, 332, 1, 0), + RK3588_PLL_RATE(1968000000, 2, 328, 1, 0), + RK3588_PLL_RATE(1944000000, 2, 324, 1, 0), + RK3588_PLL_RATE(1920000000, 2, 320, 1, 0), + RK3588_PLL_RATE(1896000000, 2, 316, 1, 0), + RK3588_PLL_RATE(1872000000, 2, 312, 1, 0), + RK3588_PLL_RATE(1848000000, 2, 308, 1, 0), + RK3588_PLL_RATE(1824000000, 2, 304, 1, 0), + RK3588_PLL_RATE(1800000000, 2, 300, 1, 0), + RK3588_PLL_RATE(1776000000, 2, 296, 1, 0), + RK3588_PLL_RATE(1752000000, 2, 292, 1, 0), + RK3588_PLL_RATE(1728000000, 2, 288, 1, 0), + RK3588_PLL_RATE(1704000000, 2, 284, 1, 0), + RK3588_PLL_RATE(1680000000, 2, 280, 1, 0), + RK3588_PLL_RATE(1656000000, 2, 276, 1, 0), + RK3588_PLL_RATE(1632000000, 2, 272, 1, 0), + RK3588_PLL_RATE(1608000000, 2, 268, 1, 0), + RK3588_PLL_RATE(1584000000, 2, 264, 1, 0), + RK3588_PLL_RATE(1560000000, 2, 260, 1, 0), + RK3588_PLL_RATE(1536000000, 2, 256, 1, 0), + RK3588_PLL_RATE(1512000000, 2, 252, 1, 0), + RK3588_PLL_RATE(1488000000, 2, 248, 1, 0), + RK3588_PLL_RATE(1464000000, 2, 244, 1, 0), + RK3588_PLL_RATE(1440000000, 2, 240, 1, 0), + RK3588_PLL_RATE(1416000000, 2, 236, 1, 0), + RK3588_PLL_RATE(1392000000, 2, 232, 1, 0), + RK3588_PLL_RATE(1320000000, 2, 220, 1, 0), + RK3588_PLL_RATE(1200000000, 2, 200, 1, 0), + RK3588_PLL_RATE(1188000000, 2, 198, 1, 0), + RK3588_PLL_RATE(1100000000, 3, 550, 2, 0), + RK3588_PLL_RATE(1008000000, 2, 336, 2, 0), + RK3588_PLL_RATE(1000000000, 3, 500, 2, 0), + RK3588_PLL_RATE(983040000, 4, 655, 2, 23592), + RK3588_PLL_RATE(955520000, 3, 477, 2, 49806), + RK3588_PLL_RATE(903168000, 6, 903, 2, 11009), + RK3588_PLL_RATE(900000000, 2, 300, 2, 0), + RK3588_PLL_RATE(816000000, 2, 272, 2, 0), + RK3588_PLL_RATE(786432000, 2, 262, 2, 9437), + RK3588_PLL_RATE(786000000, 1, 131, 2, 0), + RK3588_PLL_RATE(785560000, 3, 392, 2, 51117), + RK3588_PLL_RATE(722534400, 8, 963, 2, 24850), + RK3588_PLL_RATE(600000000, 2, 200, 2, 0), + RK3588_PLL_RATE(594000000, 2, 198, 2, 0), + RK3588_PLL_RATE(408000000, 2, 272, 3, 0), + RK3588_PLL_RATE(312000000, 2, 208, 3, 0), + RK3588_PLL_RATE(216000000, 2, 288, 4, 0), + RK3588_PLL_RATE(96000000, 2, 256, 5, 0), + { /* sentinel */ }, +}; + +static struct rockchip_pll_rate_table rk3576_ppll_rates[] = { + /* _mhz, _p, _m, _s, _k */ + RK3588_PLL_RATE(1300000000, 3, 325, 2, 0), + { /* sentinel */ }, +}; + +#define RK3576_ACLK_M_BIGCORE_DIV_MASK 0x1f +#define RK3576_ACLK_M_BIGCORE_DIV_SHIFT 0 +#define RK3576_ACLK_M_LITCORE_DIV_MASK 0x1f +#define RK3576_ACLK_M_LITCORE_DIV_SHIFT 8 +#define RK3576_PCLK_DBG_LITCORE_DIV_MASK 0x1f +#define RK3576_PCLK_DBG_LITCORE_DIV_SHIFT 0 +#define RK3576_ACLK_CCI_DIV_MASK 0x1f +#define RK3576_ACLK_CCI_DIV_SHIFT 7 +#define RK3576_ACLK_CCI_MUX_MASK 0x3 +#define RK3576_ACLK_CCI_MUX_SHIFT 12 + +#define RK3576_BIGCORE_CLKSEL2(_amcore) \ +{ \ + .reg = RK3576_BIGCORE_CLKSEL_CON(2), \ + .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_BIGCORE_DIV_MASK, \ + RK3576_ACLK_M_BIGCORE_DIV_SHIFT), \ +} + +#define RK3576_LITCORE_CLKSEL1(_amcore) \ +{ \ + .reg = RK3576_LITCORE_CLKSEL_CON(1), \ + .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_LITCORE_DIV_MASK, \ + RK3576_ACLK_M_LITCORE_DIV_SHIFT), \ +} + +#define RK3576_LITCORE_CLKSEL2(_pclkdbg) \ +{ \ + .reg = RK3576_LITCORE_CLKSEL_CON(2), \ + .val = HIWORD_UPDATE(_pclkdbg - 1, RK3576_PCLK_DBG_LITCORE_DIV_MASK, \ + RK3576_PCLK_DBG_LITCORE_DIV_SHIFT), \ +} + +#define RK3576_CCI_CLKSEL4(_ccisel, _div) \ +{ \ + .reg = RK3576_CCI_CLKSEL_CON(4), \ + .val = HIWORD_UPDATE(_ccisel, RK3576_ACLK_CCI_MUX_MASK, \ + RK3576_ACLK_CCI_MUX_SHIFT) | \ + HIWORD_UPDATE(_div - 1, RK3576_ACLK_CCI_DIV_MASK, \ + RK3576_ACLK_CCI_DIV_SHIFT), \ +} + +#define RK3576_CPUBCLK_RATE(_prate, _amcore) \ +{ \ + .prate = _prate##U, \ + .divs = { \ + RK3576_BIGCORE_CLKSEL2(_amcore), \ + }, \ +} + +#define RK3576_CPULCLK_RATE(_prate, _amcore, _pclkdbg, _ccisel) \ +{ \ + .prate = _prate##U, \ + .divs = { \ + RK3576_LITCORE_CLKSEL1(_amcore), \ + RK3576_LITCORE_CLKSEL2(_pclkdbg), \ + }, \ + .pre_muxs = { \ + RK3576_CCI_CLKSEL4(2, 2), \ + }, \ + .post_muxs = { \ + RK3576_CCI_CLKSEL4(_ccisel, 2), \ + }, \ +} + +static struct rockchip_cpuclk_rate_table rk3576_cpubclk_rates[] __initdata = { + RK3576_CPUBCLK_RATE(2496000000, 2), + RK3576_CPUBCLK_RATE(2400000000, 2), + RK3576_CPUBCLK_RATE(2304000000, 2), + RK3576_CPUBCLK_RATE(2208000000, 2), + RK3576_CPUBCLK_RATE(2184000000, 2), + RK3576_CPUBCLK_RATE(2088000000, 2), + RK3576_CPUBCLK_RATE(2040000000, 2), + RK3576_CPUBCLK_RATE(2016000000, 2), + RK3576_CPUBCLK_RATE(1992000000, 2), + RK3576_CPUBCLK_RATE(1896000000, 2), + RK3576_CPUBCLK_RATE(1800000000, 2), + RK3576_CPUBCLK_RATE(1704000000, 2), + RK3576_CPUBCLK_RATE(1608000000, 2), + RK3576_CPUBCLK_RATE(1584000000, 2), + RK3576_CPUBCLK_RATE(1560000000, 2), + RK3576_CPUBCLK_RATE(1536000000, 2), + RK3576_CPUBCLK_RATE(1512000000, 2), + RK3576_CPUBCLK_RATE(1488000000, 2), + RK3576_CPUBCLK_RATE(1464000000, 2), + RK3576_CPUBCLK_RATE(1440000000, 2), + RK3576_CPUBCLK_RATE(1416000000, 2), + RK3576_CPUBCLK_RATE(1392000000, 2), + RK3576_CPUBCLK_RATE(1368000000, 2), + RK3576_CPUBCLK_RATE(1344000000, 2), + RK3576_CPUBCLK_RATE(1320000000, 2), + RK3576_CPUBCLK_RATE(1296000000, 2), + RK3576_CPUBCLK_RATE(1272000000, 2), + RK3576_CPUBCLK_RATE(1248000000, 2), + RK3576_CPUBCLK_RATE(1224000000, 2), + RK3576_CPUBCLK_RATE(1200000000, 2), + RK3576_CPUBCLK_RATE(1104000000, 2), + RK3576_CPUBCLK_RATE(1008000000, 2), + RK3576_CPUBCLK_RATE(912000000, 2), + RK3576_CPUBCLK_RATE(816000000, 2), + RK3576_CPUBCLK_RATE(696000000, 2), + RK3576_CPUBCLK_RATE(600000000, 2), + RK3576_CPUBCLK_RATE(408000000, 2), + RK3576_CPUBCLK_RATE(312000000, 2), + RK3576_CPUBCLK_RATE(216000000, 2), + RK3576_CPUBCLK_RATE(96000000, 2), +}; + +static const struct rockchip_cpuclk_reg_data rk3576_cpubclk_data = { + .core_reg[0] = RK3576_BIGCORE_CLKSEL_CON(1), + .div_core_shift[0] = 7, + .div_core_mask[0] = 0x1f, + .num_cores = 1, + .mux_core_alt = 1, + .mux_core_main = 0, + .mux_core_shift = 12, + .mux_core_mask = 0x3, +}; + +static struct rockchip_cpuclk_rate_table rk3576_cpulclk_rates[] __initdata = { + RK3576_CPULCLK_RATE(2400000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2304000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2208000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2184000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2088000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2040000000, 2, 6, 3), + RK3576_CPULCLK_RATE(2016000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1992000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1896000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1800000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1704000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1608000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1584000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1560000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1536000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1512000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1488000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1464000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1440000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1416000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1392000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1368000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1344000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1320000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1296000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1272000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1248000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1224000000, 2, 6, 3), + RK3576_CPULCLK_RATE(1200000000, 2, 6, 2), + RK3576_CPULCLK_RATE(1104000000, 2, 6, 2), + RK3576_CPULCLK_RATE(1008000000, 2, 6, 2), + RK3576_CPULCLK_RATE(912000000, 2, 6, 2), + RK3576_CPULCLK_RATE(816000000, 2, 6, 2), + RK3576_CPULCLK_RATE(696000000, 2, 6, 2), + RK3576_CPULCLK_RATE(600000000, 2, 6, 2), + RK3576_CPULCLK_RATE(408000000, 2, 6, 2), + RK3576_CPULCLK_RATE(312000000, 2, 6, 2), + RK3576_CPULCLK_RATE(216000000, 2, 6, 2), + RK3576_CPULCLK_RATE(96000000, 2, 6, 2), +}; + +static const struct rockchip_cpuclk_reg_data rk3576_cpulclk_data = { + .core_reg[0] = RK3576_LITCORE_CLKSEL_CON(0), + .div_core_shift[0] = 7, + .div_core_mask[0] = 0x1f, + .num_cores = 1, + .mux_core_alt = 1, + .mux_core_main = 0, + .mux_core_shift = 12, + .mux_core_mask = 0x3, +}; + +#define MFLAGS CLK_MUX_HIWORD_MASK +#define DFLAGS CLK_DIVIDER_HIWORD_MASK +#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) + +PNAME(mux_pll_p) = { "xin24m", "xin32k" }; +PNAME(mux_24m_32k_p) = { "xin24m", "xin_osc0_div" }; +PNAME(mux_armclkl_p) = { "xin24m", "pll_lpll", "lpll" }; +PNAME(mux_armclkb_p) = { "xin24m", "pll_bpll", "bpll" }; +PNAME(gpll_24m_p) = { "gpll", "xin24m" }; +PNAME(cpll_24m_p) = { "cpll", "xin24m" }; +PNAME(gpll_cpll_p) = { "gpll", "cpll" }; +PNAME(gpll_spll_p) = { "gpll", "spll" }; +PNAME(gpll_cpll_aupll_p) = { "gpll", "cpll", "aupll" }; +PNAME(gpll_cpll_24m_p) = { "gpll", "cpll", "xin24m" }; +PNAME(gpll_cpll_24m_spll_p) = { "gpll", "cpll", "xin24m", "spll" }; +PNAME(gpll_cpll_aupll_24m_p) = { "gpll", "cpll", "aupll", "xin24m" }; +PNAME(gpll_cpll_aupll_spll_p) = { "gpll", "cpll", "aupll", "spll" }; +PNAME(gpll_cpll_aupll_spll_lpll_p) = { "gpll", "cpll", "aupll", "spll", "lpll_dummy" }; +PNAME(gpll_cpll_spll_bpll_p) = { "gpll", "cpll", "spll", "bpll_dummy" }; +PNAME(gpll_cpll_lpll_bpll_p) = { "gpll", "cpll", "lpll_dummy", "bpll_dummy" }; +PNAME(gpll_spll_cpll_bpll_lpll_p) = { "gpll", "spll", "cpll", "bpll_dummy", "lpll_dummy" }; +PNAME(gpll_cpll_vpll_aupll_24m_p) = { "gpll", "cpll", "vpll", "aupll", "xin24m" }; +PNAME(gpll_cpll_spll_aupll_bpll_p) = { "gpll", "cpll", "spll", "aupll", "bpll_dummy" }; +PNAME(gpll_cpll_spll_bpll_lpll_p) = { "gpll", "cpll", "spll", "bpll_dummy", "lpll_dummy" }; +PNAME(gpll_cpll_spll_lpll_bpll_p) = { "gpll", "cpll", "spll", "lpll_dummy", "bpll_dummy" }; +PNAME(gpll_cpll_vpll_bpll_lpll_p) = { "gpll", "cpll", "vpll", "bpll_dummy", "lpll_dummy" }; +PNAME(gpll_spll_aupll_bpll_lpll_p) = { "gpll", "spll", "aupll", "bpll_dummy", "lpll_dummy" }; +PNAME(gpll_spll_isppvtpll_bpll_lpll_p) = { "gpll", "spll", "isp_pvtpll", "bpll_dummy", "lpll_dummy" }; +PNAME(gpll_cpll_spll_aupll_lpll_24m_p) = { "gpll", "cpll", "spll", "aupll", "lpll_dummy", "xin24m" }; +PNAME(gpll_cpll_spll_vpll_bpll_lpll_p) = { "gpll", "cpll", "spll", "vpll", "bpll_dummy", "lpll_dummy" }; +PNAME(cpll_vpll_lpll_bpll_p) = { "cpll", "vpll", "lpll_dummy", "bpll_dummy" }; +PNAME(mux_24m_ccipvtpll_gpll_lpll_p) = { "xin24m", "cci_pvtpll", "gpll", "lpll" }; +PNAME(mux_24m_spll_gpll_cpll_p) = {"xin24m", "spll", "gpll", "cpll" }; +PNAME(audio_frac_int_p) = { "xin24m", "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2", + "clk_audio_frac_3", "clk_audio_int_0", "clk_audio_int_1", "clk_audio_int_2" }; +PNAME(audio_frac_p) = { "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2", "clk_audio_frac_3" }; +PNAME(mux_100m_24m_p) = { "clk_cpll_div10", "xin24m" }; +PNAME(mux_100m_50m_24m_p) = { "clk_cpll_div10", "clk_cpll_div20", "xin24m" }; +PNAME(mux_100m_24m_lclk0_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_0" }; +PNAME(mux_100m_24m_lclk1_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_1" }; +PNAME(mux_150m_100m_50m_24m_p) = { "clk_gpll_div8", "clk_cpll_div10", "clk_cpll_div20", "xin24m" }; +PNAME(mux_200m_100m_50m_24m_p) = { "clk_gpll_div6", "clk_cpll_div10", "clk_cpll_div20", "xin24m" }; +PNAME(mux_400m_200m_100m_24m_p) = { "clk_gpll_div3", "clk_gpll_div6", "clk_cpll_div10", "xin24m" }; +PNAME(mux_500m_250m_100m_24m_p) = { "clk_cpll_div2", "clk_cpll_div4", "clk_cpll_div10", "xin24m" }; +PNAME(mux_600m_400m_300m_24m_p) = { "clk_gpll_div2", "clk_gpll_div3", "clk_gpll_div4", "xin24m" }; +PNAME(mux_350m_175m_116m_24m_p) = { "clk_spll_div2", "clk_spll_div4", "clk_spll_div6", "xin24m" }; +PNAME(mux_175m_116m_58m_24m_p) = { "clk_spll_div4", "clk_spll_div6", "clk_spll_div12", "xin24m" }; +PNAME(mux_116m_58m_24m_p) = { "clk_spll_div6", "clk_spll_div12", "xin24m" }; +PNAME(mclk_sai0_8ch_p) = { "mclk_sai0_8ch_src", "sai0_mclkin", "sai1_mclkin" }; +PNAME(mclk_sai1_8ch_p) = { "mclk_sai1_8ch_src", "sai1_mclkin" }; +PNAME(mclk_sai2_2ch_p) = { "mclk_sai2_2ch_src", "sai2_mclkin", "sai1_mclkin" }; +PNAME(mclk_sai3_2ch_p) = { "mclk_sai3_2ch_src", "sai3_mclkin", "sai1_mclkin" }; +PNAME(mclk_sai4_2ch_p) = { "mclk_sai4_2ch_src", "sai4_mclkin", "sai1_mclkin" }; +PNAME(mclk_sai5_8ch_p) = { "mclk_sai5_8ch_src", "sai1_mclkin" }; +PNAME(mclk_sai6_8ch_p) = { "mclk_sai6_8ch_src", "sai1_mclkin" }; +PNAME(mclk_sai7_8ch_p) = { "mclk_sai7_8ch_src", "sai1_mclkin" }; +PNAME(mclk_sai8_8ch_p) = { "mclk_sai8_8ch_src", "sai1_mclkin" }; +PNAME(mclk_sai9_8ch_p) = { "mclk_sai9_8ch_src", "sai1_mclkin" }; +PNAME(uart1_p) = { "clk_uart1_src_top", "xin24m" }; +PNAME(clk_gmac1_ptp_ref_src_p) = { "gpll", "cpll", "gmac1_ptp_refclk_in" }; +PNAME(clk_gmac0_ptp_ref_src_p) = { "gpll", "cpll", "gmac0_ptp_refclk_in" }; +PNAME(dclk_ebc_p) = { "gpll", "cpll", "vpll", "aupll", "lpll_dummy", + "dclk_ebc_frac", "xin24m" }; +PNAME(dclk_vp0_p) = { "dclk_vp0_src", "clk_hdmiphy_pixel0" }; +PNAME(dclk_vp1_p) = { "dclk_vp1_src", "clk_hdmiphy_pixel0" }; +PNAME(dclk_vp2_p) = { "dclk_vp2_src", "clk_hdmiphy_pixel0" }; +PNAME(clk_uart_p) = { "gpll", "cpll", "aupll", "xin24m", "clk_uart_frac_0", + "clk_uart_frac_1", "clk_uart_frac_2"}; +PNAME(clk_freq_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin", + "sai3_mclkin", "sai4_mclkin", "sai_sclkin_freq"}; +PNAME(clk_counter_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin", + "sai3_mclkin", "sai4_mclkin", "sai_sclkin_counter"}; +PNAME(sai_sclkin_freq_p) = { "sai0_sclk_in", "sai1_sclk_in", "sai2_sclk_in", + "sai3_sclk_in", "sai4_sclk_in"}; +PNAME(clk_ref_pcie0_phy_p) = { "clk_pcie_100m_src", "clk_pcie_100m_nduty_src", + "xin24m"}; +PNAME(hclk_vi_root_p) = { "clk_gpll_div6", "clk_cpll_div10", + "aclk_vi_root_inter", "xin24m"}; +PNAME(clk_ref_osc_mphy_p) = { "xin24m", "clk_gpio_mphy_i", "clk_ref_mphy_26m"}; +PNAME(mux_pmu200m_pmu100m_pmu50m_24m_p) = { "clk_200m_pmu_src", "clk_100m_pmu_src", + "clk_50m_pmu_src", "xin24m" }; +PNAME(mux_pmu100m_pmu50m_24m_p) = { "clk_100m_pmu_src", "clk_50m_pmu_src", "xin24m" }; +PNAME(mux_pmu100m_24m_32k_p) = { "clk_100m_pmu_src", "xin24m", "xin_osc0_div" }; +PNAME(clk_phy_ref_src_p) = { "xin24m", "clk_pmuphy_ref_src" }; +PNAME(clk_usbphy_ref_src_p) = { "usbphy0_24m", "usbphy1_24m" }; +PNAME(clk_cpll_ref_src_p) = { "xin24m", "clk_usbphy_ref_src" }; +PNAME(clk_aupll_ref_src_p) = { "xin24m", "clk_aupll_ref_io" }; + +static struct rockchip_pll_clock rk3576_pll_clks[] __initdata = { + [bpll] = PLL(pll_rk3588_core, PLL_BPLL, "bpll", mux_pll_p, + 0, RK3576_PLL_CON(0), + RK3576_BPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates), + [lpll] = PLL(pll_rk3588_core, PLL_LPLL, "lpll", mux_pll_p, + 0, RK3576_LPLL_CON(16), + RK3576_LPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates), + [vpll] = PLL(pll_rk3588, PLL_VPLL, "vpll", mux_pll_p, + 0, RK3576_PLL_CON(88), + RK3576_MODE_CON0, 4, 15, 0, rk3576_pll_rates), + [aupll] = PLL(pll_rk3588, PLL_AUPLL, "aupll", mux_pll_p, + 0, RK3576_PLL_CON(96), + RK3576_MODE_CON0, 6, 15, 0, rk3576_pll_rates), + [cpll] = PLL(pll_rk3588, PLL_CPLL, "cpll", mux_pll_p, + CLK_IGNORE_UNUSED, RK3576_PLL_CON(104), + RK3576_MODE_CON0, 8, 15, 0, rk3576_pll_rates), + [gpll] = PLL(pll_rk3588, PLL_GPLL, "gpll", mux_pll_p, + CLK_IGNORE_UNUSED, RK3576_PLL_CON(112), + RK3576_MODE_CON0, 2, 15, 0, rk3576_pll_rates), + [ppll] = PLL(pll_rk3588_ddr, PLL_PPLL, "ppll", mux_pll_p, + CLK_IGNORE_UNUSED, RK3576_PMU_PLL_CON(128), + RK3576_MODE_CON0, 10, 15, 0, rk3576_ppll_rates), +}; + +static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { + /* + * CRU Clock-Architecture + */ + /* fixed */ + FACTOR(0, "xin12m", "xin24m", 0, 1, 2), + + COMPOSITE_FRAC(XIN_OSC0_DIV, "xin_osc0_div", "xin24m", CLK_IS_CRITICAL, + RK3576_PMU_CLKSEL_CON(21), 0, + RK3576_PMU_CLKGATE_CON(7), 11, GFLAGS), + + FACTOR(0, "clk_spll_div12", "spll", 0, 1, 12), + FACTOR(0, "clk_spll_div6", "spll", 0, 1, 6), + FACTOR(0, "clk_spll_div4", "spll", 0, 1, 4), + FACTOR(0, "lpll_div2", "lpll", 0, 1, 2), + FACTOR(0, "bpll_div4", "bpll", 0, 1, 4), + + /* top */ + COMPOSITE(CLK_CPLL_DIV20, "clk_cpll_div20", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(0), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 0, GFLAGS), + COMPOSITE(CLK_CPLL_DIV10, "clk_cpll_div10", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(0), 11, 1, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 1, GFLAGS), + COMPOSITE(CLK_GPLL_DIV8, "clk_gpll_div8", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(1), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 2, GFLAGS), + COMPOSITE(CLK_GPLL_DIV6, "clk_gpll_div6", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(1), 11, 1, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 3, GFLAGS), + COMPOSITE(CLK_CPLL_DIV4, "clk_cpll_div4", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(2), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 4, GFLAGS), + COMPOSITE(CLK_GPLL_DIV4, "clk_gpll_div4", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(2), 11, 1, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 5, GFLAGS), + COMPOSITE(CLK_SPLL_DIV2, "clk_spll_div2", gpll_cpll_spll_bpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(3), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 6, GFLAGS), + COMPOSITE(CLK_GPLL_DIV3, "clk_gpll_div3", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(3), 12, 1, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 7, GFLAGS), + COMPOSITE(CLK_CPLL_DIV2, "clk_cpll_div2", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(4), 11, 1, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 9, GFLAGS), + COMPOSITE(CLK_GPLL_DIV2, "clk_gpll_div2", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(5), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 10, GFLAGS), + COMPOSITE(CLK_SPLL_DIV1, "clk_spll_div1", gpll_cpll_spll_bpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(6), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(0), 12, GFLAGS), + COMPOSITE_NODIV(PCLK_TOP_ROOT, "pclk_top_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(8), 7, 2, MFLAGS, + RK3576_CLKGATE_CON(1), 1, GFLAGS), + COMPOSITE(ACLK_TOP, "aclk_top", gpll_cpll_aupll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(9), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(1), 3, GFLAGS), + COMPOSITE(ACLK_TOP_MID, "aclk_top_mid", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(10), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(1), 6, GFLAGS), + COMPOSITE(ACLK_SECURE_HIGH, "aclk_secure_high", gpll_spll_aupll_bpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(10), 11, 3, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(1), 7, GFLAGS), + COMPOSITE_NODIV(HCLK_TOP, "hclk_top", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(19), 2, 2, MFLAGS, + RK3576_CLKGATE_CON(1), 14, GFLAGS), + COMPOSITE_NODIV(HCLK_VO0VOP_CHANNEL, "hclk_vo0vop_channel", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(19), 6, 2, MFLAGS, + RK3576_CLKGATE_CON(2), 0, GFLAGS), + COMPOSITE(ACLK_VO0VOP_CHANNEL, "aclk_vo0vop_channel", gpll_cpll_lpll_bpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(19), 12, 2, MFLAGS, 8, 4, DFLAGS, + RK3576_CLKGATE_CON(2), 1, GFLAGS), + MUX(CLK_AUDIO_FRAC_0_SRC, "clk_audio_frac_0_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(13), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_AUDIO_FRAC_0, "clk_audio_frac_0", "clk_audio_frac_0_src", 0, + RK3576_CLKSEL_CON(12), 0, + RK3576_CLKGATE_CON(1), 10, GFLAGS), + MUX(CLK_AUDIO_FRAC_1_SRC, "clk_audio_frac_1_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(15), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_AUDIO_FRAC_1, "clk_audio_frac_1", "clk_audio_frac_1_src", 0, + RK3576_CLKSEL_CON(14), 0, + RK3576_CLKGATE_CON(1), 11, GFLAGS), + MUX(CLK_AUDIO_FRAC_2_SRC, "clk_audio_frac_2_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(17), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_AUDIO_FRAC_2, "clk_audio_frac_2", "clk_audio_frac_2_src", 0, + RK3576_CLKSEL_CON(16), 0, + RK3576_CLKGATE_CON(1), 12, GFLAGS), + MUX(CLK_AUDIO_FRAC_3_SRC, "clk_audio_frac_3_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(19), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_AUDIO_FRAC_3, "clk_audio_frac_3", "clk_audio_frac_3_src", 0, + RK3576_CLKSEL_CON(18), 0, + RK3576_CLKGATE_CON(1), 13, GFLAGS), + MUX(0, "clk_uart_frac_0_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(22), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_UART_FRAC_0, "clk_uart_frac_0", "clk_uart_frac_0_src", 0, + RK3576_CLKSEL_CON(21), 0, + RK3576_CLKGATE_CON(2), 5, GFLAGS), + MUX(0, "clk_uart_frac_1_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(24), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_UART_FRAC_1, "clk_uart_frac_1", "clk_uart_frac_1_src", 0, + RK3576_CLKSEL_CON(23), 0, + RK3576_CLKGATE_CON(2), 6, GFLAGS), + MUX(0, "clk_uart_frac_2_src", gpll_cpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(26), 0, 2, MFLAGS), + COMPOSITE_FRAC(CLK_UART_FRAC_2, "clk_uart_frac_2", "clk_uart_frac_2_src", 0, + RK3576_CLKSEL_CON(25), 0, + RK3576_CLKGATE_CON(2), 7, GFLAGS), + COMPOSITE(CLK_UART1_SRC_TOP, "clk_uart1_src_top", clk_uart_p, 0, + RK3576_CLKSEL_CON(27), 13, 3, MFLAGS, 5, 8, DFLAGS, + RK3576_CLKGATE_CON(2), 13, GFLAGS), + COMPOSITE_NOMUX(CLK_AUDIO_INT_0, "clk_audio_int_0", "gpll", 0, + RK3576_CLKSEL_CON(28), 0, 5, DFLAGS, + RK3576_CLKGATE_CON(2), 14, GFLAGS), + COMPOSITE_NOMUX(CLK_AUDIO_INT_1, "clk_audio_int_1", "cpll", 0, + RK3576_CLKSEL_CON(28), 5, 5, DFLAGS, + RK3576_CLKGATE_CON(2), 15, GFLAGS), + COMPOSITE_NOMUX(CLK_AUDIO_INT_2, "clk_audio_int_2", "aupll", 0, + RK3576_CLKSEL_CON(28), 10, 5, DFLAGS, + RK3576_CLKGATE_CON(3), 0, GFLAGS), + COMPOSITE(CLK_PDM0_SRC_TOP, "clk_pdm0_src_top", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(29), 9, 3, MFLAGS, 0, 9, DFLAGS, + RK3576_CLKGATE_CON(3), 2, GFLAGS), + COMPOSITE_NOMUX(CLK_GMAC0_125M_SRC, "clk_gmac0_125m_src", "cpll", 0, + RK3576_CLKSEL_CON(30), 10, 5, DFLAGS, + RK3576_CLKGATE_CON(3), 6, GFLAGS), + COMPOSITE_NOMUX(CLK_GMAC1_125M_SRC, "clk_gmac1_125m_src", "cpll", 0, + RK3576_CLKSEL_CON(31), 0, 5, DFLAGS, + RK3576_CLKGATE_CON(3), 7, GFLAGS), + COMPOSITE(LCLK_ASRC_SRC_0, "lclk_asrc_src_0", audio_frac_p, 0, + RK3576_CLKSEL_CON(31), 10, 2, MFLAGS, 5, 5, DFLAGS, + RK3576_CLKGATE_CON(3), 10, GFLAGS), + COMPOSITE(LCLK_ASRC_SRC_1, "lclk_asrc_src_1", audio_frac_p, 0, + RK3576_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(3), 11, GFLAGS), + COMPOSITE(REF_CLK0_OUT_PLL, "ref_clk0_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0, + RK3576_CLKSEL_CON(33), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(4), 1, GFLAGS), + COMPOSITE(REF_CLK1_OUT_PLL, "ref_clk1_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0, + RK3576_CLKSEL_CON(34), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(4), 2, GFLAGS), + COMPOSITE(REF_CLK2_OUT_PLL, "ref_clk2_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0, + RK3576_CLKSEL_CON(35), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(4), 3, GFLAGS), + COMPOSITE(REFCLKO25M_GMAC0_OUT, "refclko25m_gmac0_out", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3576_CLKGATE_CON(5), 10, GFLAGS), + COMPOSITE(REFCLKO25M_GMAC1_OUT, "refclko25m_gmac1_out", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(36), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3576_CLKGATE_CON(5), 11, GFLAGS), + COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0, + RK3576_CLKSEL_CON(37), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(5), 12, GFLAGS), + GATE(CLK_GMAC0_RMII_CRU, "clk_gmac0_rmii_cru", "clk_cpll_div20", 0, + RK3576_CLKGATE_CON(5), 13, GFLAGS), + GATE(CLK_GMAC1_RMII_CRU, "clk_gmac1_rmii_cru", "clk_cpll_div20", 0, + RK3576_CLKGATE_CON(5), 14, GFLAGS), + GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0, + RK3576_CLKGATE_CON(5), 15, GFLAGS), + COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0, + RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(6), 3, GFLAGS), + COMPOSITE(CLK_MIPI_CAMERAOUT_M1, "clk_mipi_cameraout_m1", mux_24m_spll_gpll_cpll_p, 0, + RK3576_CLKSEL_CON(39), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(6), 4, GFLAGS), + COMPOSITE(CLK_MIPI_CAMERAOUT_M2, "clk_mipi_cameraout_m2", mux_24m_spll_gpll_cpll_p, 0, + RK3576_CLKSEL_CON(40), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(6), 5, GFLAGS), + COMPOSITE(MCLK_PDM0_SRC_TOP, "mclk_pdm0_src_top", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(41), 7, 3, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(6), 8, GFLAGS), + + /* bus */ + COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(55), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(11), 0, GFLAGS), + COMPOSITE_NODIV(PCLK_BUS_ROOT, "pclk_bus_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(55), 2, 2, MFLAGS, + RK3576_CLKGATE_CON(11), 1, GFLAGS), + COMPOSITE(ACLK_BUS_ROOT, "aclk_bus_root", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(55), 9, 1, MFLAGS, 4, 5, DFLAGS, + RK3576_CLKGATE_CON(11), 2, GFLAGS), + GATE(HCLK_CAN0, "hclk_can0", "hclk_bus_root", 0, + RK3576_CLKGATE_CON(11), 6, GFLAGS), + COMPOSITE(CLK_CAN0, "clk_can0", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(56), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(11), 7, GFLAGS), + GATE(HCLK_CAN1, "hclk_can1", "hclk_bus_root", 0, + RK3576_CLKGATE_CON(11), 8, GFLAGS), + COMPOSITE(CLK_CAN1, "clk_can1", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(56), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(11), 9, GFLAGS), + GATE(CLK_KEY_SHIFT, "clk_key_shift", "xin24m", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(11), 15, GFLAGS), + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 0, GFLAGS), + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 1, GFLAGS), + GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 2, GFLAGS), + GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 3, GFLAGS), + GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 4, GFLAGS), + GATE(PCLK_I2C6, "pclk_i2c6", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 5, GFLAGS), + GATE(PCLK_I2C7, "pclk_i2c7", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 6, GFLAGS), + GATE(PCLK_I2C8, "pclk_i2c8", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 7, GFLAGS), + GATE(PCLK_I2C9, "pclk_i2c9", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 8, GFLAGS), + GATE(PCLK_WDT_BUSMCU, "pclk_wdt_busmcu", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(12), 9, GFLAGS), + GATE(TCLK_WDT_BUSMCU, "tclk_wdt_busmcu", "xin24m", 0, + RK3576_CLKGATE_CON(12), 10, GFLAGS), + GATE(ACLK_GIC, "aclk_gic", "aclk_bus_root", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(12), 11, GFLAGS), + COMPOSITE_NODIV(CLK_I2C1, "clk_i2c1", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(12), 12, GFLAGS), + COMPOSITE_NODIV(CLK_I2C2, "clk_i2c2", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 2, 2, MFLAGS, + RK3576_CLKGATE_CON(12), 13, GFLAGS), + COMPOSITE_NODIV(CLK_I2C3, "clk_i2c3", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 4, 2, MFLAGS, + RK3576_CLKGATE_CON(12), 14, GFLAGS), + COMPOSITE_NODIV(CLK_I2C4, "clk_i2c4", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 6, 2, MFLAGS, + RK3576_CLKGATE_CON(12), 15, GFLAGS), + COMPOSITE_NODIV(CLK_I2C5, "clk_i2c5", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(13), 0, GFLAGS), + COMPOSITE_NODIV(CLK_I2C6, "clk_i2c6", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(13), 1, GFLAGS), + COMPOSITE_NODIV(CLK_I2C7, "clk_i2c7", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 12, 2, MFLAGS, + RK3576_CLKGATE_CON(13), 2, GFLAGS), + COMPOSITE_NODIV(CLK_I2C8, "clk_i2c8", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(57), 14, 2, MFLAGS, + RK3576_CLKGATE_CON(13), 3, GFLAGS), + COMPOSITE_NODIV(CLK_I2C9, "clk_i2c9", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(58), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(13), 4, GFLAGS), + GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 6, GFLAGS), + COMPOSITE(CLK_SARADC, "clk_saradc", gpll_24m_p, 0, + RK3576_CLKSEL_CON(58), 12, 1, MFLAGS, 4, 8, DFLAGS, + RK3576_CLKGATE_CON(13), 7, GFLAGS), + GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 8, GFLAGS), + COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0, + RK3576_CLKSEL_CON(59), 0, 8, DFLAGS, + RK3576_CLKGATE_CON(13), 9, GFLAGS), + GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 10, GFLAGS), + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 11, GFLAGS), + GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 12, GFLAGS), + GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 13, GFLAGS), + GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 14, GFLAGS), + GATE(PCLK_UART6, "pclk_uart6", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(13), 15, GFLAGS), + GATE(PCLK_UART7, "pclk_uart7", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(14), 0, GFLAGS), + GATE(PCLK_UART8, "pclk_uart8", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(14), 1, GFLAGS), + GATE(PCLK_UART9, "pclk_uart9", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(14), 2, GFLAGS), + GATE(PCLK_UART10, "pclk_uart10", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(14), 3, GFLAGS), + GATE(PCLK_UART11, "pclk_uart11", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(14), 4, GFLAGS), + COMPOSITE(SCLK_UART0, "sclk_uart0", clk_uart_p, 0, + RK3576_CLKSEL_CON(60), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(14), 5, GFLAGS), + COMPOSITE(SCLK_UART2, "sclk_uart2", clk_uart_p, 0, + RK3576_CLKSEL_CON(61), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(14), 6, GFLAGS), + COMPOSITE(SCLK_UART3, "sclk_uart3", clk_uart_p, 0, + RK3576_CLKSEL_CON(62), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(14), 9, GFLAGS), + COMPOSITE(SCLK_UART4, "sclk_uart4", clk_uart_p, 0, + RK3576_CLKSEL_CON(63), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(14), 12, GFLAGS), + COMPOSITE(SCLK_UART5, "sclk_uart5", clk_uart_p, 0, + RK3576_CLKSEL_CON(64), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(14), 15, GFLAGS), + COMPOSITE(SCLK_UART6, "sclk_uart6", clk_uart_p, 0, + RK3576_CLKSEL_CON(65), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 2, GFLAGS), + COMPOSITE(SCLK_UART7, "sclk_uart7", clk_uart_p, 0, + RK3576_CLKSEL_CON(66), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 5, GFLAGS), + COMPOSITE(SCLK_UART8, "sclk_uart8", clk_uart_p, 0, + RK3576_CLKSEL_CON(67), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 8, GFLAGS), + COMPOSITE(SCLK_UART9, "sclk_uart9", clk_uart_p, 0, + RK3576_CLKSEL_CON(68), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 9, GFLAGS), + COMPOSITE(SCLK_UART10, "sclk_uart10", clk_uart_p, 0, + RK3576_CLKSEL_CON(69), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 10, GFLAGS), + COMPOSITE(SCLK_UART11, "sclk_uart11", clk_uart_p, 0, + RK3576_CLKSEL_CON(70), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(15), 11, GFLAGS), + GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(15), 13, GFLAGS), + GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(15), 14, GFLAGS), + GATE(PCLK_SPI2, "pclk_spi2", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(15), 15, GFLAGS), + GATE(PCLK_SPI3, "pclk_spi3", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(16), 0, GFLAGS), + GATE(PCLK_SPI4, "pclk_spi4", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(16), 1, GFLAGS), + COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(70), 13, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 2, GFLAGS), + COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(71), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 3, GFLAGS), + COMPOSITE_NODIV(CLK_SPI2, "clk_spi2", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(71), 2, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 4, GFLAGS), + COMPOSITE_NODIV(CLK_SPI3, "clk_spi3", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(71), 4, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 5, GFLAGS), + COMPOSITE_NODIV(CLK_SPI4, "clk_spi4", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(71), 6, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 6, GFLAGS), + GATE(PCLK_WDT0, "pclk_wdt0", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(16), 7, GFLAGS), + GATE(TCLK_WDT0, "tclk_wdt0", "xin24m", 0, + RK3576_CLKGATE_CON(16), 8, GFLAGS), + GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(16), 10, GFLAGS), + COMPOSITE_NODIV(CLK_PWM1, "clk_pwm1", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(71), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(16), 11, GFLAGS), + GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0, + RK3576_CLKGATE_CON(16), 13, GFLAGS), + GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_pvtm_clkout", 0, + RK3576_CLKGATE_CON(16), 15, GFLAGS), + GATE(PCLK_BUSTIMER0, "pclk_bustimer0", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(17), 3, GFLAGS), + GATE(PCLK_BUSTIMER1, "pclk_bustimer1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(17), 4, GFLAGS), + COMPOSITE_NODIV(CLK_TIMER0_ROOT, "clk_timer0_root", mux_100m_24m_p, 0, + RK3576_CLKSEL_CON(71), 14, 1, MFLAGS, + RK3576_CLKGATE_CON(17), 5, GFLAGS), + GATE(CLK_TIMER0, "clk_timer0", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 6, GFLAGS), + GATE(CLK_TIMER1, "clk_timer1", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 7, GFLAGS), + GATE(CLK_TIMER2, "clk_timer2", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 8, GFLAGS), + GATE(CLK_TIMER3, "clk_timer3", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 9, GFLAGS), + GATE(CLK_TIMER4, "clk_timer4", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 10, GFLAGS), + GATE(CLK_TIMER5, "clk_timer5", "clk_timer0_root", 0, + RK3576_CLKGATE_CON(17), 11, GFLAGS), + GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(17), 13, GFLAGS), + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(17), 15, GFLAGS), + GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0, + RK3576_CLKGATE_CON(18), 0, GFLAGS), + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(18), 1, GFLAGS), + GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0, + RK3576_CLKGATE_CON(18), 2, GFLAGS), + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(18), 3, GFLAGS), + GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0, + RK3576_CLKGATE_CON(18), 4, GFLAGS), + GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(18), 5, GFLAGS), + GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0, + RK3576_CLKGATE_CON(18), 6, GFLAGS), + GATE(ACLK_DECOM, "aclk_decom", "aclk_bus_root", 0, + RK3576_CLKGATE_CON(18), 7, GFLAGS), + GATE(PCLK_DECOM, "pclk_decom", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(18), 8, GFLAGS), + COMPOSITE(DCLK_DECOM, "dclk_decom", gpll_spll_p, 0, + RK3576_CLKSEL_CON(72), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(18), 9, GFLAGS), + COMPOSITE_NODIV(CLK_TIMER1_ROOT, "clk_timer1_root", mux_100m_24m_p, 0, + RK3576_CLKSEL_CON(72), 6, 1, MFLAGS, + RK3576_CLKGATE_CON(18), 10, GFLAGS), + GATE(CLK_TIMER6, "clk_timer6", "clk_timer1_root", 0, + RK3576_CLKGATE_CON(18), 11, GFLAGS), + COMPOSITE(CLK_TIMER7, "clk_timer7", mux_100m_24m_lclk0_p, 0, + RK3576_CLKSEL_CON(72), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(18), 12, GFLAGS), + COMPOSITE(CLK_TIMER8, "clk_timer8", mux_100m_24m_lclk1_p, 0, + RK3576_CLKSEL_CON(73), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(18), 13, GFLAGS), + GATE(CLK_TIMER9, "clk_timer9", "clk_timer1_root", 0, + RK3576_CLKGATE_CON(18), 14, GFLAGS), + GATE(CLK_TIMER10, "clk_timer10", "clk_timer1_root", 0, + RK3576_CLKGATE_CON(18), 15, GFLAGS), + GATE(CLK_TIMER11, "clk_timer11", "clk_timer1_root", 0, + RK3576_CLKGATE_CON(19), 0, GFLAGS), + GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 1, GFLAGS), + GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 2, GFLAGS), + GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 3, GFLAGS), + GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 4, GFLAGS), + GATE(HCLK_I3C0, "hclk_i3c0", "hclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 7, GFLAGS), + GATE(HCLK_I3C1, "hclk_i3c1", "hclk_bus_root", 0, + RK3576_CLKGATE_CON(19), 9, GFLAGS), + COMPOSITE_NODIV(HCLK_BUS_CM0_ROOT, "hclk_bus_cm0_root", mux_400m_200m_100m_24m_p, 0, + RK3576_CLKSEL_CON(73), 13, 2, MFLAGS, + RK3576_CLKGATE_CON(19), 10, GFLAGS), + GATE(FCLK_BUS_CM0_CORE, "fclk_bus_cm0_core", "hclk_bus_cm0_root", 0, + RK3576_CLKGATE_CON(19), 12, GFLAGS), + COMPOSITE(CLK_BUS_CM0_RTC, "clk_bus_cm0_rtc", mux_24m_32k_p, 0, + RK3576_CLKSEL_CON(74), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(19), 14, GFLAGS), + GATE(PCLK_PMU2, "pclk_pmu2", "pclk_bus_root", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(19), 15, GFLAGS), + GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(20), 4, GFLAGS), + COMPOSITE_NODIV(CLK_PWM2, "clk_pwm2", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(74), 6, 2, MFLAGS, + RK3576_CLKGATE_CON(20), 5, GFLAGS), + GATE(CLK_OSC_PWM2, "clk_osc_pwm2", "xin24m", 0, + RK3576_CLKGATE_CON(20), 7, GFLAGS), + GATE(CLK_RC_PWM2, "clk_rc_pwm2", "clk_pvtm_clkout", 0, + RK3576_CLKGATE_CON(20), 6, GFLAGS), + COMPOSITE_NODIV(CLK_FREQ_PWM1, "clk_freq_pwm1", clk_freq_pwm1_p, 0, + RK3576_CLKSEL_CON(74), 8, 3, MFLAGS, + RK3576_CLKGATE_CON(20), 8, GFLAGS), + COMPOSITE_NODIV(CLK_COUNTER_PWM1, "clk_counter_pwm1", clk_counter_pwm1_p, 0, + RK3576_CLKSEL_CON(74), 11, 3, MFLAGS, + RK3576_CLKGATE_CON(20), 9, GFLAGS), + COMPOSITE_NODIV(SAI_SCLKIN_FREQ, "sai_sclkin_freq", sai_sclkin_freq_p, 0, + RK3576_CLKSEL_CON(75), 0, 3, MFLAGS, + RK3576_CLKGATE_CON(20), 10, GFLAGS), + COMPOSITE_NODIV(SAI_SCLKIN_COUNTER, "sai_sclkin_counter", sai_sclkin_freq_p, 0, + RK3576_CLKSEL_CON(75), 3, 3, MFLAGS, + RK3576_CLKGATE_CON(20), 11, GFLAGS), + COMPOSITE(CLK_I3C0, "clk_i3c0", gpll_cpll_aupll_spll_p, 0, + RK3576_CLKSEL_CON(78), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(20), 12, GFLAGS), + COMPOSITE(CLK_I3C1, "clk_i3c1", gpll_cpll_aupll_spll_p, 0, + RK3576_CLKSEL_CON(78), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(20), 13, GFLAGS), + GATE(PCLK_CSIDPHY1, "pclk_csidphy1", "pclk_bus_root", 0, + RK3576_CLKGATE_CON(40), 2, GFLAGS), + + /* cci */ + COMPOSITE(PCLK_CCI_ROOT, "pclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CCI_CLKSEL_CON(4), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CCI_CLKGATE_CON(1), 10, GFLAGS), + COMPOSITE(ACLK_CCI_ROOT, "aclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CCI_CLKSEL_CON(4), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CCI_CLKGATE_CON(1), 11, GFLAGS), + + /* center */ + COMPOSITE_DIV_OFFSET(ACLK_CENTER_ROOT, "aclk_center_root", gpll_cpll_spll_aupll_bpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(168), 5, 3, MFLAGS, + RK3576_CLKSEL_CON(167), 9, 5, DFLAGS, + RK3576_CLKGATE_CON(72), 0, GFLAGS), + COMPOSITE_NODIV(ACLK_CENTER_LOW_ROOT, "aclk_center_low_root", mux_500m_250m_100m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(168), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(72), 1, GFLAGS), + COMPOSITE_NODIV(HCLK_CENTER_ROOT, "hclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(168), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(72), 2, GFLAGS), + COMPOSITE_NODIV(PCLK_CENTER_ROOT, "pclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(168), 12, 2, MFLAGS, + RK3576_CLKGATE_CON(72), 3, GFLAGS), + GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_center_root", CLK_IGNORE_UNUSED, + RK3576_CLKGATE_CON(72), 5, GFLAGS), + GATE(ACLK_DDR_SHAREMEM, "aclk_ddr_sharemem", "aclk_center_low_root", CLK_IGNORE_UNUSED, + RK3576_CLKGATE_CON(72), 6, GFLAGS), + GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_center_root", CLK_IGNORE_UNUSED, + RK3576_CLKGATE_CON(72), 10, GFLAGS), + GATE(PCLK_SHAREMEM, "pclk_sharemem", "pclk_center_root", CLK_IGNORE_UNUSED, + RK3576_CLKGATE_CON(72), 11, GFLAGS), + + /* ddr */ + COMPOSITE(PCLK_DDR_ROOT, "pclk_ddr_root", gpll_cpll_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(76), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(21), 0, GFLAGS), + GATE(PCLK_DDR_MON_CH0, "pclk_ddr_mon_ch0", "pclk_ddr_root", CLK_IGNORE_UNUSED, + RK3576_CLKGATE_CON(21), 1, GFLAGS), + COMPOSITE(HCLK_DDR_ROOT, "hclk_ddr_root", gpll_cpll_p, CLK_IGNORE_UNUSED, + RK3576_CLKSEL_CON(77), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(22), 11, GFLAGS), + GATE(FCLK_DDR_CM0_CORE, "fclk_ddr_cm0_core", "hclk_ddr_root", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(22), 15, GFLAGS), + COMPOSITE_NODIV(CLK_DDR_TIMER_ROOT, "clk_ddr_timer_root", mux_100m_24m_p, 0, + RK3576_CLKSEL_CON(77), 6, 1, MFLAGS, + RK3576_CLKGATE_CON(23), 3, GFLAGS), + GATE(CLK_DDR_TIMER0, "clk_ddr_timer0", "clk_ddr_timer_root", 0, + RK3576_CLKGATE_CON(23), 4, GFLAGS), + GATE(CLK_DDR_TIMER1, "clk_ddr_timer1", "clk_ddr_timer_root", 0, + RK3576_CLKGATE_CON(23), 5, GFLAGS), + GATE(TCLK_WDT_DDR, "tclk_wdt_ddr", "xin24m", 0, + RK3576_CLKGATE_CON(23), 6, GFLAGS), + GATE(PCLK_WDT, "pclk_wdt", "pclk_ddr_root", 0, + RK3576_CLKGATE_CON(23), 7, GFLAGS), + GATE(PCLK_TIMER, "pclk_timer", "pclk_ddr_root", 0, + RK3576_CLKGATE_CON(23), 8, GFLAGS), + COMPOSITE(CLK_DDR_CM0_RTC, "clk_ddr_cm0_rtc", mux_24m_32k_p, 0, + RK3576_CLKSEL_CON(77), 12, 1, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(23), 10, GFLAGS), + + /* gpu */ + COMPOSITE(CLK_GPU_SRC_PRE, "clk_gpu_src_pre", gpll_cpll_aupll_spll_lpll_p, 0, + RK3576_CLKSEL_CON(165), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(69), 1, GFLAGS), + GATE(CLK_GPU, "clk_gpu", "clk_gpu_src_pre", 0, + RK3576_CLKGATE_CON(69), 3, GFLAGS), + COMPOSITE_NODIV(PCLK_GPU_ROOT, "pclk_gpu_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(166), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(69), 8, GFLAGS), + + /* npu */ + COMPOSITE_NODIV(HCLK_RKNN_ROOT, "hclk_rknn_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(86), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(31), 4, GFLAGS), + COMPOSITE(CLK_RKNN_DSU0, "clk_rknn_dsu0", gpll_cpll_aupll_spll_p, 0, + RK3576_CLKSEL_CON(86), 7, 2, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(31), 5, GFLAGS), + GATE(ACLK_RKNN0, "aclk_rknn0", "clk_rknn_dsu0", 0, + RK3576_CLKGATE_CON(28), 9, GFLAGS), + GATE(ACLK_RKNN1, "aclk_rknn1", "clk_rknn_dsu0", 0, + RK3576_CLKGATE_CON(29), 0, GFLAGS), + COMPOSITE_NODIV(PCLK_NPUTOP_ROOT, "pclk_nputop_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(87), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(31), 8, GFLAGS), + GATE(PCLK_NPU_TIMER, "pclk_npu_timer", "pclk_nputop_root", 0, + RK3576_CLKGATE_CON(31), 10, GFLAGS), + COMPOSITE_NODIV(CLK_NPUTIMER_ROOT, "clk_nputimer_root", mux_100m_24m_p, 0, + RK3576_CLKSEL_CON(87), 2, 1, MFLAGS, + RK3576_CLKGATE_CON(31), 11, GFLAGS), + GATE(CLK_NPUTIMER0, "clk_nputimer0", "clk_nputimer_root", 0, + RK3576_CLKGATE_CON(31), 12, GFLAGS), + GATE(CLK_NPUTIMER1, "clk_nputimer1", "clk_nputimer_root", 0, + RK3576_CLKGATE_CON(31), 13, GFLAGS), + GATE(PCLK_NPU_WDT, "pclk_npu_wdt", "pclk_nputop_root", 0, + RK3576_CLKGATE_CON(31), 14, GFLAGS), + GATE(TCLK_NPU_WDT, "tclk_npu_wdt", "xin24m", 0, + RK3576_CLKGATE_CON(31), 15, GFLAGS), + GATE(ACLK_RKNN_CBUF, "aclk_rknn_cbuf", "clk_rknn_dsu0", 0, + RK3576_CLKGATE_CON(32), 0, GFLAGS), + COMPOSITE_NODIV(HCLK_NPU_CM0_ROOT, "hclk_npu_cm0_root", mux_400m_200m_100m_24m_p, 0, + RK3576_CLKSEL_CON(87), 3, 2, MFLAGS, + RK3576_CLKGATE_CON(32), 5, GFLAGS), + GATE(FCLK_NPU_CM0_CORE, "fclk_npu_cm0_core", "hclk_npu_cm0_root", 0, + RK3576_CLKGATE_CON(32), 7, GFLAGS), + COMPOSITE(CLK_NPU_CM0_RTC, "clk_npu_cm0_rtc", mux_24m_32k_p, 0, + RK3576_CLKSEL_CON(87), 10, 1, MFLAGS, 5, 5, DFLAGS, + RK3576_CLKGATE_CON(32), 9, GFLAGS), + GATE(HCLK_RKNN_CBUF, "hclk_rknn_cbuf", "hclk_rknn_root", 0, + RK3576_CLKGATE_CON(32), 12, GFLAGS), + + /* nvm */ + COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(88), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(33), 0, GFLAGS), + COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(88), 7, 1, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(33), 1, GFLAGS), + COMPOSITE(SCLK_FSPI_X2, "sclk_fspi_x2", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(89), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3576_CLKGATE_CON(33), 6, GFLAGS), + GATE(HCLK_FSPI, "hclk_fspi", "hclk_nvm_root", 0, + RK3576_CLKGATE_CON(33), 7, GFLAGS), + COMPOSITE(CCLK_SRC_EMMC, "cclk_src_emmc", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(89), 14, 2, MFLAGS, 8, 6, DFLAGS, + RK3576_CLKGATE_CON(33), 8, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_nvm_root", 0, + RK3576_CLKGATE_CON(33), 9, GFLAGS), + GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0, + RK3576_CLKGATE_CON(33), 10, GFLAGS), + COMPOSITE_NODIV(BCLK_EMMC, "bclk_emmc", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(90), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(33), 11, GFLAGS), + GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 0, + RK3576_CLKGATE_CON(33), 12, GFLAGS), + + /* usb */ + COMPOSITE(ACLK_UFS_ROOT, "aclk_ufs_root", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(115), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(47), 0, GFLAGS), + COMPOSITE(ACLK_USB_ROOT, "aclk_usb_root", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(115), 11, 1, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(47), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_USB_ROOT, "pclk_usb_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(115), 12, 2, MFLAGS, + RK3576_CLKGATE_CON(47), 2, GFLAGS), + GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb_root", 0, + RK3576_CLKGATE_CON(47), 5, GFLAGS), + GATE(CLK_REF_USB3OTG0, "clk_ref_usb3otg0", "xin24m", 0, + RK3576_CLKGATE_CON(47), 6, GFLAGS), + GATE(CLK_SUSPEND_USB3OTG0, "clk_suspend_usb3otg0", "xin24m", 0, + RK3576_CLKGATE_CON(47), 7, GFLAGS), + GATE(ACLK_MMU2, "aclk_mmu2", "aclk_usb_root", 0, + RK3576_CLKGATE_CON(47), 12, GFLAGS), + GATE(ACLK_SLV_MMU2, "aclk_slv_mmu2", "aclk_usb_root", 0, + RK3576_CLKGATE_CON(47), 13, GFLAGS), + GATE(ACLK_UFS_SYS, "aclk_ufs_sys", "aclk_ufs_root", 0, + RK3576_CLKGATE_CON(47), 15, GFLAGS), + + /* vdec */ + COMPOSITE_NODIV(HCLK_RKVDEC_ROOT, "hclk_rkvdec_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(110), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(45), 0, GFLAGS), + COMPOSITE(ACLK_RKVDEC_ROOT, "aclk_rkvdec_root", gpll_cpll_aupll_spll_p, 0, + RK3576_CLKSEL_CON(110), 7, 2, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(45), 1, GFLAGS), + COMPOSITE(ACLK_RKVDEC_ROOT_BAK, "aclk_rkvdec_root_bak", cpll_vpll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(110), 14, 2, MFLAGS, 9, 5, DFLAGS, + RK3576_CLKGATE_CON(45), 2, GFLAGS), + GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_root", 0, + RK3576_CLKGATE_CON(45), 3, GFLAGS), + COMPOSITE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", gpll_cpll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(111), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(45), 8, GFLAGS), + GATE(CLK_RKVDEC_CORE, "clk_rkvdec_core", "aclk_rkvdec_root", 0, + RK3576_CLKGATE_CON(45), 9, GFLAGS), + + /* venc */ + COMPOSITE_NODIV(HCLK_VEPU0_ROOT, "hclk_vepu0_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(124), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(51), 0, GFLAGS), + COMPOSITE(ACLK_VEPU0_ROOT, "aclk_vepu0_root", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(124), 7, 1, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(51), 1, GFLAGS), + COMPOSITE(CLK_VEPU0_CORE, "clk_vepu0_core", gpll_cpll_spll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(124), 13, 3, MFLAGS, 8, 5, DFLAGS, + RK3576_CLKGATE_CON(51), 6, GFLAGS), + GATE(HCLK_VEPU0, "hclk_vepu0", "hclk_vepu0_root", 0, + RK3576_CLKGATE_CON(51), 4, GFLAGS), + GATE(ACLK_VEPU0, "aclk_vepu0", "aclk_vepu0_root", 0, + RK3576_CLKGATE_CON(51), 5, GFLAGS), + + /* vi */ + COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_spll_isppvtpll_bpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(128), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(53), 0, GFLAGS), + COMPOSITE_NOMUX(ACLK_VI_ROOT_INTER, "aclk_vi_root_inter", "aclk_vi_root", 0, + RK3576_CLKSEL_CON(130), 10, 3, DFLAGS, + RK3576_CLKGATE_CON(54), 13, GFLAGS), + COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", hclk_vi_root_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(128), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(53), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(128), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(53), 2, GFLAGS), + COMPOSITE(DCLK_VICAP, "dclk_vicap", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(129), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(53), 6, GFLAGS), + GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_root", 0, + RK3576_CLKGATE_CON(53), 7, GFLAGS), + GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi_root", 0, + RK3576_CLKGATE_CON(53), 8, GFLAGS), + COMPOSITE(CLK_ISP_CORE, "clk_isp_core", gpll_spll_isppvtpll_bpll_lpll_p, 0, + RK3576_CLKSEL_CON(129), 11, 3, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(53), 9, GFLAGS), + GATE(CLK_ISP_CORE_MARVIN, "clk_isp_core_marvin", "clk_isp_core", 0, + RK3576_CLKGATE_CON(53), 10, GFLAGS), + GATE(CLK_ISP_CORE_VICAP, "clk_isp_core_vicap", "clk_isp_core", 0, + RK3576_CLKGATE_CON(53), 11, GFLAGS), + GATE(ACLK_ISP, "aclk_isp", "aclk_vi_root", 0, + RK3576_CLKGATE_CON(53), 12, GFLAGS), + GATE(HCLK_ISP, "hclk_isp", "hclk_vi_root", 0, + RK3576_CLKGATE_CON(53), 13, GFLAGS), + GATE(ACLK_VPSS, "aclk_vpss", "aclk_vi_root", 0, + RK3576_CLKGATE_CON(53), 15, GFLAGS), + GATE(HCLK_VPSS, "hclk_vpss", "hclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 0, GFLAGS), + GATE(CLK_CORE_VPSS, "clk_core_vpss", "clk_isp_core", 0, + RK3576_CLKGATE_CON(54), 1, GFLAGS), + GATE(PCLK_CSI_HOST_0, "pclk_csi_host_0", "pclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 4, GFLAGS), + GATE(PCLK_CSI_HOST_1, "pclk_csi_host_1", "pclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 5, GFLAGS), + GATE(PCLK_CSI_HOST_2, "pclk_csi_host_2", "pclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 6, GFLAGS), + GATE(PCLK_CSI_HOST_3, "pclk_csi_host_3", "pclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 7, GFLAGS), + GATE(PCLK_CSI_HOST_4, "pclk_csi_host_4", "pclk_vi_root", 0, + RK3576_CLKGATE_CON(54), 8, GFLAGS), + COMPOSITE_NODIV(ICLK_CSIHOST01, "iclk_csihost01", mux_400m_200m_100m_24m_p, 0, + RK3576_CLKSEL_CON(130), 7, 2, MFLAGS, + RK3576_CLKGATE_CON(54), 10, GFLAGS), + GATE(ICLK_CSIHOST0, "iclk_csihost0", "iclk_csihost01", 0, + RK3576_CLKGATE_CON(54), 11, GFLAGS), + COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_aupll_spll_lpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(144), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(61), 0, GFLAGS), + COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(144), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(61), 2, GFLAGS), + COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(144), 12, 2, MFLAGS, + RK3576_CLKGATE_CON(61), 3, GFLAGS), + GATE(HCLK_VOP, "hclk_vop", "hclk_vop_root", 0, + RK3576_CLKGATE_CON(61), 8, GFLAGS), + GATE(ACLK_VOP, "aclk_vop", "aclk_vop_root", 0, + RK3576_CLKGATE_CON(61), 9, GFLAGS), + COMPOSITE(DCLK_VP0_SRC, "dclk_vp0_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(145), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(61), 10, GFLAGS), + COMPOSITE(DCLK_VP1_SRC, "dclk_vp1_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(146), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(61), 11, GFLAGS), + COMPOSITE(DCLK_VP2_SRC, "dclk_vp2_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(147), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(61), 12, GFLAGS), + COMPOSITE_NODIV(DCLK_VP0, "dclk_vp0", dclk_vp0_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(147), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(61), 13, GFLAGS), + COMPOSITE_NODIV(DCLK_VP1, "dclk_vp1", dclk_vp1_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(147), 12, 1, MFLAGS, + RK3576_CLKGATE_CON(62), 0, GFLAGS), + COMPOSITE_NODIV(DCLK_VP2, "dclk_vp2", dclk_vp2_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(147), 13, 1, MFLAGS, + RK3576_CLKGATE_CON(62), 1, GFLAGS), + + /* vo0 */ + COMPOSITE(ACLK_VO0_ROOT, "aclk_vo0_root", gpll_cpll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(149), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(63), 0, GFLAGS), + COMPOSITE_NODIV(HCLK_VO0_ROOT, "hclk_vo0_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(149), 7, 2, MFLAGS, + RK3576_CLKGATE_CON(63), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_VO0_ROOT, "pclk_vo0_root", mux_150m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(149), 11, 2, MFLAGS, + RK3576_CLKGATE_CON(63), 3, GFLAGS), + GATE(ACLK_HDCP0, "aclk_hdcp0", "aclk_vo0_root", 0, + RK3576_CLKGATE_CON(63), 12, GFLAGS), + GATE(HCLK_HDCP0, "hclk_hdcp0", "hclk_vo0_root", 0, + RK3576_CLKGATE_CON(63), 13, GFLAGS), + GATE(PCLK_HDCP0, "pclk_hdcp0", "pclk_vo0_root", 0, + RK3576_CLKGATE_CON(63), 14, GFLAGS), + GATE(CLK_TRNG0_SKP, "clk_trng0_skp", "aclk_hdcp0", 0, + RK3576_CLKGATE_CON(64), 4, GFLAGS), + GATE(PCLK_DSIHOST0, "pclk_dsihost0", "pclk_vo0_root", 0, + RK3576_CLKGATE_CON(64), 5, GFLAGS), + COMPOSITE(CLK_DSIHOST0, "clk_dsihost0", gpll_cpll_spll_vpll_bpll_lpll_p, 0, + RK3576_CLKSEL_CON(151), 7, 3, MFLAGS, 0, 7, DFLAGS, + RK3576_CLKGATE_CON(64), 6, GFLAGS), + GATE(PCLK_HDMITX0, "pclk_hdmitx0", "pclk_vo0_root", 0, + RK3576_CLKGATE_CON(64), 7, GFLAGS), + COMPOSITE(CLK_HDMITX0_EARC, "clk_hdmitx0_earc", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(151), 15, 1, MFLAGS, 10, 5, DFLAGS, + RK3576_CLKGATE_CON(64), 8, GFLAGS), + GATE(CLK_HDMITX0_REF, "clk_hdmitx0_ref", "aclk_vo0_root", 0, + RK3576_CLKGATE_CON(64), 9, GFLAGS), + GATE(PCLK_EDP0, "pclk_edp0", "pclk_vo0_root", 0, + RK3576_CLKGATE_CON(64), 13, GFLAGS), + GATE(CLK_EDP0_24M, "clk_edp0_24m", "xin24m", 0, + RK3576_CLKGATE_CON(64), 14, GFLAGS), + COMPOSITE_NODIV(CLK_EDP0_200M, "clk_edp0_200m", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(152), 1, 2, MFLAGS, + RK3576_CLKGATE_CON(64), 15, GFLAGS), + COMPOSITE(MCLK_SAI5_8CH_SRC, "mclk_sai5_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(154), 10, 3, MFLAGS, 2, 8, DFLAGS, + RK3576_CLKGATE_CON(65), 3, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI5_8CH, "mclk_sai5_8ch", mclk_sai5_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(154), 13, 1, MFLAGS, + RK3576_CLKGATE_CON(65), 4, GFLAGS), + GATE(HCLK_SAI5_8CH, "hclk_sai5_8ch", "hclk_vo0_root", 0, + RK3576_CLKGATE_CON(65), 5, GFLAGS), + COMPOSITE(MCLK_SAI6_8CH_SRC, "mclk_sai6_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(155), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(65), 7, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI6_8CH, "mclk_sai6_8ch", mclk_sai6_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(155), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(65), 8, GFLAGS), + GATE(HCLK_SAI6_8CH, "hclk_sai6_8ch", "hclk_vo0_root", 0, + RK3576_CLKGATE_CON(65), 9, GFLAGS), + GATE(HCLK_SPDIF_TX2, "hclk_spdif_tx2", "hclk_vo0_root", 0, + RK3576_CLKGATE_CON(65), 10, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX2, "mclk_spdif_tx2", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(156), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(65), 13, GFLAGS), + GATE(HCLK_SPDIF_RX2, "hclk_spdif_rx2", "hclk_vo0_root", 0, + RK3576_CLKGATE_CON(65), 14, GFLAGS), + COMPOSITE(MCLK_SPDIF_RX2, "mclk_spdif_rx2", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(156), 13, 2, MFLAGS, 8, 5, DFLAGS, + RK3576_CLKGATE_CON(65), 15, GFLAGS), + + /* vo1 */ + COMPOSITE(ACLK_VO1_ROOT, "aclk_vo1_root", gpll_cpll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(158), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(67), 1, GFLAGS), + COMPOSITE_NODIV(HCLK_VO1_ROOT, "hclk_vo1_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(158), 7, 2, MFLAGS, + RK3576_CLKGATE_CON(67), 2, GFLAGS), + COMPOSITE_NODIV(PCLK_VO1_ROOT, "pclk_vo1_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(158), 9, 2, MFLAGS, + RK3576_CLKGATE_CON(67), 3, GFLAGS), + COMPOSITE(MCLK_SAI8_8CH_SRC, "mclk_sai8_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(157), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(66), 1, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI8_8CH, "mclk_sai8_8ch", mclk_sai8_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(157), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(66), 2, GFLAGS), + GATE(HCLK_SAI8_8CH, "hclk_sai8_8ch", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(66), 0, GFLAGS), + COMPOSITE(MCLK_SAI7_8CH_SRC, "mclk_sai7_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(159), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(67), 8, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI7_8CH, "mclk_sai7_8ch", mclk_sai7_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(159), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(67), 9, GFLAGS), + GATE(HCLK_SAI7_8CH, "hclk_sai7_8ch", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(67), 10, GFLAGS), + GATE(HCLK_SPDIF_TX3, "hclk_spdif_tx3", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(67), 11, GFLAGS), + GATE(HCLK_SPDIF_TX4, "hclk_spdif_tx4", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(67), 12, GFLAGS), + GATE(HCLK_SPDIF_TX5, "hclk_spdif_tx5", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(67), 13, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX3, "mclk_spdif_tx3", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(160), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(67), 14, GFLAGS), + COMPOSITE_NOMUX(CLK_AUX16MHZ_0, "clk_aux16mhz_0", "gpll", 0, + RK3576_CLKSEL_CON(161), 0, 8, DFLAGS, + RK3576_CLKGATE_CON(67), 15, GFLAGS), + GATE(ACLK_DP0, "aclk_dp0", "aclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 0, GFLAGS), + GATE(PCLK_DP0, "pclk_dp0", "pclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 1, GFLAGS), + GATE(ACLK_HDCP1, "aclk_hdcp1", "aclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 4, GFLAGS), + GATE(HCLK_HDCP1, "hclk_hdcp1", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 5, GFLAGS), + GATE(PCLK_HDCP1, "pclk_hdcp1", "pclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 6, GFLAGS), + GATE(CLK_TRNG1_SKP, "clk_trng1_skp", "aclk_hdcp1", 0, + RK3576_CLKGATE_CON(68), 7, GFLAGS), + GATE(HCLK_SAI9_8CH, "hclk_sai9_8ch", "hclk_vo1_root", 0, + RK3576_CLKGATE_CON(68), 9, GFLAGS), + COMPOSITE(MCLK_SAI9_8CH_SRC, "mclk_sai9_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(162), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(68), 10, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI9_8CH, "mclk_sai9_8ch", mclk_sai9_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(162), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(68), 11, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX4, "mclk_spdif_tx4", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(163), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(68), 12, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX5, "mclk_spdif_tx5", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(164), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(68), 13, GFLAGS), + + /* vpu */ + COMPOSITE(ACLK_VPU_ROOT, "aclk_vpu_root", gpll_spll_cpll_bpll_lpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(118), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(49), 0, GFLAGS), + COMPOSITE_NODIV(ACLK_VPU_MID_ROOT, "aclk_vpu_mid_root", mux_600m_400m_300m_24m_p, 0, + RK3576_CLKSEL_CON(118), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(49), 1, GFLAGS), + COMPOSITE_NODIV(HCLK_VPU_ROOT, "hclk_vpu_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(118), 10, 2, MFLAGS, + RK3576_CLKGATE_CON(49), 2, GFLAGS), + COMPOSITE(ACLK_JPEG_ROOT, "aclk_jpeg_root", gpll_cpll_aupll_spll_p, 0, + RK3576_CLKSEL_CON(119), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(49), 3, GFLAGS), + COMPOSITE_NODIV(ACLK_VPU_LOW_ROOT, "aclk_vpu_low_root", mux_400m_200m_100m_24m_p, 0, + RK3576_CLKSEL_CON(119), 7, 2, MFLAGS, + RK3576_CLKGATE_CON(49), 4, GFLAGS), + GATE(HCLK_RGA2E_0, "hclk_rga2e_0", "hclk_vpu_root", 0, + RK3576_CLKGATE_CON(49), 13, GFLAGS), + GATE(ACLK_RGA2E_0, "aclk_rga2e_0", "aclk_vpu_root", 0, + RK3576_CLKGATE_CON(49), 14, GFLAGS), + COMPOSITE(CLK_CORE_RGA2E_0, "clk_core_rga2e_0", gpll_spll_cpll_bpll_lpll_p, 0, + RK3576_CLKSEL_CON(120), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(49), 15, GFLAGS), + GATE(ACLK_JPEG, "aclk_jpeg", "aclk_jpeg_root", 0, + RK3576_CLKGATE_CON(50), 0, GFLAGS), + GATE(HCLK_JPEG, "hclk_jpeg", "hclk_vpu_root", 0, + RK3576_CLKGATE_CON(50), 1, GFLAGS), + GATE(HCLK_VDPP, "hclk_vdpp", "hclk_vpu_root", 0, + RK3576_CLKGATE_CON(50), 2, GFLAGS), + GATE(ACLK_VDPP, "aclk_vdpp", "aclk_vpu_mid_root", 0, + RK3576_CLKGATE_CON(50), 3, GFLAGS), + COMPOSITE(CLK_CORE_VDPP, "clk_core_vdpp", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(120), 13, 1, MFLAGS, 8, 5, DFLAGS, + RK3576_CLKGATE_CON(50), 4, GFLAGS), + GATE(HCLK_RGA2E_1, "hclk_rga2e_1", "hclk_vpu_root", 0, + RK3576_CLKGATE_CON(50), 5, GFLAGS), + GATE(ACLK_RGA2E_1, "aclk_rga2e_1", "aclk_vpu_root", 0, + RK3576_CLKGATE_CON(50), 6, GFLAGS), + COMPOSITE(CLK_CORE_RGA2E_1, "clk_core_rga2e_1", gpll_spll_cpll_bpll_lpll_p, 0, + RK3576_CLKSEL_CON(121), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(50), 7, GFLAGS), + MUX(0, "dclk_ebc_frac_src_p", gpll_cpll_vpll_aupll_24m_p, 0, + RK3576_CLKSEL_CON(123), 0, 3, MFLAGS), + COMPOSITE_FRAC(DCLK_EBC_FRAC_SRC, "dclk_ebc_frac_src", "dclk_ebc_frac_src_p", 0, + RK3576_CLKSEL_CON(122), 0, + RK3576_CLKGATE_CON(50), 9, GFLAGS), + GATE(ACLK_EBC, "aclk_ebc", "aclk_vpu_low_root", 0, + RK3576_CLKGATE_CON(50), 11, GFLAGS), + GATE(HCLK_EBC, "hclk_ebc", "hclk_vpu_root", 0, + RK3576_CLKGATE_CON(50), 10, GFLAGS), + COMPOSITE(DCLK_EBC, "dclk_ebc", dclk_ebc_p, CLK_SET_RATE_NO_REPARENT, + RK3576_CLKSEL_CON(123), 12, 3, MFLAGS, 3, 9, DFLAGS, + RK3576_CLKGATE_CON(50), 12, GFLAGS), + + /* vepu */ + COMPOSITE_NODIV(HCLK_VEPU1_ROOT, "hclk_vepu1_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(178), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(78), 0, GFLAGS), + COMPOSITE(ACLK_VEPU1_ROOT, "aclk_vepu1_root", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(180), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(79), 0, GFLAGS), + GATE(HCLK_VEPU1, "hclk_vepu1", "hclk_vepu1_root", 0, + RK3576_CLKGATE_CON(79), 3, GFLAGS), + GATE(ACLK_VEPU1, "aclk_vepu1", "aclk_vepu1_root", 0, + RK3576_CLKGATE_CON(79), 4, GFLAGS), + COMPOSITE(CLK_VEPU1_CORE, "clk_vepu1_core", gpll_cpll_spll_lpll_bpll_p, 0, + RK3576_CLKSEL_CON(180), 11, 3, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(79), 5, GFLAGS), + + /* php */ + COMPOSITE_NODIV(PCLK_PHP_ROOT, "pclk_php_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(92), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(34), 0, GFLAGS), + COMPOSITE(ACLK_PHP_ROOT, "aclk_php_root", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(92), 9, 1, MFLAGS, 4, 5, DFLAGS, + RK3576_CLKGATE_CON(34), 7, GFLAGS), + GATE(PCLK_PCIE0, "pclk_pcie0", "pclk_php_root", 0, + RK3576_CLKGATE_CON(34), 13, GFLAGS), + GATE(CLK_PCIE0_AUX, "clk_pcie0_aux", "xin24m", 0, + RK3576_CLKGATE_CON(34), 14, GFLAGS), + GATE(ACLK_PCIE0_MST, "aclk_pcie0_mst", "aclk_php_root", 0, + RK3576_CLKGATE_CON(34), 15, GFLAGS), + GATE(ACLK_PCIE0_SLV, "aclk_pcie0_slv", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 0, GFLAGS), + GATE(ACLK_PCIE0_DBI, "aclk_pcie0_dbi", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 1, GFLAGS), + GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 3, GFLAGS), + GATE(CLK_REF_USB3OTG1, "clk_ref_usb3otg1", "xin24m", 0, + RK3576_CLKGATE_CON(35), 4, GFLAGS), + GATE(CLK_SUSPEND_USB3OTG1, "clk_suspend_usb3otg1", "xin24m", 0, + RK3576_CLKGATE_CON(35), 5, GFLAGS), + GATE(ACLK_MMU0, "aclk_mmu0", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 11, GFLAGS), + GATE(ACLK_SLV_MMU0, "aclk_slv_mmu0", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 13, GFLAGS), + GATE(ACLK_MMU1, "aclk_mmu1", "aclk_php_root", 0, + RK3576_CLKGATE_CON(35), 14, GFLAGS), + GATE(ACLK_SLV_MMU1, "aclk_slv_mmu1", "aclk_php_root", 0, + RK3576_CLKGATE_CON(36), 0, GFLAGS), + GATE(PCLK_PCIE1, "pclk_pcie1", "pclk_php_root", 0, + RK3576_CLKGATE_CON(36), 7, GFLAGS), + GATE(CLK_PCIE1_AUX, "clk_pcie1_aux", "xin24m", 0, + RK3576_CLKGATE_CON(36), 8, GFLAGS), + GATE(ACLK_PCIE1_MST, "aclk_pcie1_mst", "aclk_php_root", 0, + RK3576_CLKGATE_CON(36), 9, GFLAGS), + GATE(ACLK_PCIE1_SLV, "aclk_pcie1_slv", "aclk_php_root", 0, + RK3576_CLKGATE_CON(36), 10, GFLAGS), + GATE(ACLK_PCIE1_DBI, "aclk_pcie1_dbi", "aclk_php_root", 0, + RK3576_CLKGATE_CON(36), 11, GFLAGS), + COMPOSITE(CLK_RXOOB0, "clk_rxoob0", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(93), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3576_CLKGATE_CON(37), 0, GFLAGS), + COMPOSITE(CLK_RXOOB1, "clk_rxoob1", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(93), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3576_CLKGATE_CON(37), 1, GFLAGS), + GATE(CLK_PMALIVE0, "clk_pmalive0", "xin24m", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(37), 2, GFLAGS), + GATE(CLK_PMALIVE1, "clk_pmalive1", "xin24m", CLK_IS_CRITICAL, + RK3576_CLKGATE_CON(37), 3, GFLAGS), + GATE(ACLK_SATA0, "aclk_sata0", "aclk_php_root", 0, + RK3576_CLKGATE_CON(37), 4, GFLAGS), + GATE(ACLK_SATA1, "aclk_sata1", "aclk_php_root", 0, + RK3576_CLKGATE_CON(37), 5, GFLAGS), + + /* audio */ + COMPOSITE_NODIV(HCLK_AUDIO_ROOT, "hclk_audio_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(42), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(7), 1, GFLAGS), + GATE(HCLK_ASRC_2CH_0, "hclk_asrc_2ch_0", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 3, GFLAGS), + GATE(HCLK_ASRC_2CH_1, "hclk_asrc_2ch_1", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 4, GFLAGS), + GATE(HCLK_ASRC_4CH_0, "hclk_asrc_4ch_0", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 5, GFLAGS), + GATE(HCLK_ASRC_4CH_1, "hclk_asrc_4ch_1", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 6, GFLAGS), + COMPOSITE(CLK_ASRC_2CH_0, "clk_asrc_2ch_0", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(42), 7, 2, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(7), 7, GFLAGS), + COMPOSITE(CLK_ASRC_2CH_1, "clk_asrc_2ch_1", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(42), 14, 2, MFLAGS, 9, 5, DFLAGS, + RK3576_CLKGATE_CON(7), 8, GFLAGS), + COMPOSITE(CLK_ASRC_4CH_0, "clk_asrc_4ch_0", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(43), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(7), 9, GFLAGS), + COMPOSITE(CLK_ASRC_4CH_1, "clk_asrc_4ch_1", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(43), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(7), 10, GFLAGS), + COMPOSITE(MCLK_SAI0_8CH_SRC, "mclk_sai0_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(44), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(7), 11, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI0_8CH, "mclk_sai0_8ch", mclk_sai0_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(44), 11, 2, MFLAGS, + RK3576_CLKGATE_CON(7), 12, GFLAGS), + GATE(HCLK_SAI0_8CH, "hclk_sai0_8ch", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 13, GFLAGS), + GATE(HCLK_SPDIF_RX0, "hclk_spdif_rx0", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(7), 14, GFLAGS), + COMPOSITE(MCLK_SPDIF_RX0, "mclk_spdif_rx0", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(45), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(7), 15, GFLAGS), + GATE(HCLK_SPDIF_RX1, "hclk_spdif_rx1", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(8), 0, GFLAGS), + COMPOSITE(MCLK_SPDIF_RX1, "mclk_spdif_rx1", gpll_cpll_aupll_p, 0, + RK3576_CLKSEL_CON(45), 12, 2, MFLAGS, 7, 5, DFLAGS, + RK3576_CLKGATE_CON(8), 1, GFLAGS), + COMPOSITE(MCLK_SAI1_8CH_SRC, "mclk_sai1_8ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(46), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(8), 4, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI1_8CH, "mclk_sai1_8ch", mclk_sai1_8ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(46), 11, 1, MFLAGS, + RK3576_CLKGATE_CON(8), 5, GFLAGS), + GATE(HCLK_SAI1_8CH, "hclk_sai1_8ch", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(8), 6, GFLAGS), + COMPOSITE(MCLK_SAI2_2CH_SRC, "mclk_sai2_2ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(47), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(8), 7, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI2_2CH, "mclk_sai2_2ch", mclk_sai2_2ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(47), 11, 2, MFLAGS, + RK3576_CLKGATE_CON(8), 8, GFLAGS), + GATE(HCLK_SAI2_2CH, "hclk_sai2_2ch", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(8), 10, GFLAGS), + COMPOSITE(MCLK_SAI3_2CH_SRC, "mclk_sai3_2ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(48), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(8), 11, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI3_2CH, "mclk_sai3_2ch", mclk_sai3_2ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(48), 11, 2, MFLAGS, + RK3576_CLKGATE_CON(8), 12, GFLAGS), + GATE(HCLK_SAI3_2CH, "hclk_sai3_2ch", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(8), 14, GFLAGS), + COMPOSITE(MCLK_SAI4_2CH_SRC, "mclk_sai4_2ch_src", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(49), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(8), 15, GFLAGS), + COMPOSITE_NODIV(MCLK_SAI4_2CH, "mclk_sai4_2ch", mclk_sai4_2ch_p, CLK_SET_RATE_PARENT, + RK3576_CLKSEL_CON(49), 11, 2, MFLAGS, + RK3576_CLKGATE_CON(9), 0, GFLAGS), + GATE(HCLK_SAI4_2CH, "hclk_sai4_2ch", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(9), 2, GFLAGS), + GATE(HCLK_ACDCDIG_DSM, "hclk_acdcdig_dsm", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(9), 3, GFLAGS), + GATE(MCLK_ACDCDIG_DSM, "mclk_acdcdig_dsm", "mclk_sai4_2ch", 0, + RK3576_CLKGATE_CON(9), 4, GFLAGS), + COMPOSITE(CLK_PDM1, "clk_pdm1", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(50), 9, 3, MFLAGS, 0, 9, DFLAGS, + RK3576_CLKGATE_CON(9), 5, GFLAGS), + GATE(HCLK_PDM1, "hclk_pdm1", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(9), 7, GFLAGS), + GATE(CLK_PDM1_OUT, "clk_pdm1_out", "clk_pdm1", 0, + RK3576_CLKGATE_CON(3), 5, GFLAGS), + COMPOSITE(MCLK_PDM1, "mclk_pdm1", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(51), 5, 3, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(9), 8, GFLAGS), + GATE(HCLK_SPDIF_TX0, "hclk_spdif_tx0", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(9), 9, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX0, "mclk_spdif_tx0", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(52), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(9), 10, GFLAGS), + GATE(HCLK_SPDIF_TX1, "hclk_spdif_tx1", "hclk_audio_root", 0, + RK3576_CLKGATE_CON(9), 11, GFLAGS), + COMPOSITE(MCLK_SPDIF_TX1, "mclk_spdif_tx1", audio_frac_int_p, 0, + RK3576_CLKSEL_CON(53), 8, 3, MFLAGS, 0, 8, DFLAGS, + RK3576_CLKGATE_CON(9), 12, GFLAGS), + GATE(CLK_SAI1_MCLKOUT, "clk_sai1_mclkout", "mclk_sai1_8ch", 0, + RK3576_CLKGATE_CON(9), 13, GFLAGS), + GATE(CLK_SAI2_MCLKOUT, "clk_sai2_mclkout", "mclk_sai2_2ch", 0, + RK3576_CLKGATE_CON(9), 14, GFLAGS), + GATE(CLK_SAI3_MCLKOUT, "clk_sai3_mclkout", "mclk_sai3_2ch", 0, + RK3576_CLKGATE_CON(9), 15, GFLAGS), + GATE(CLK_SAI4_MCLKOUT, "clk_sai4_mclkout", "mclk_sai4_2ch", 0, + RK3576_CLKGATE_CON(10), 0, GFLAGS), + GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0, + RK3576_CLKGATE_CON(10), 1, GFLAGS), + + /* sdgmac */ + COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(103), 0, 2, MFLAGS, + RK3576_CLKGATE_CON(42), 0, GFLAGS), + COMPOSITE(ACLK_SDGMAC_ROOT, "aclk_sdgmac_root", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(103), 7, 1, MFLAGS, 2, 5, DFLAGS, + RK3576_CLKGATE_CON(42), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_SDGMAC_ROOT, "pclk_sdgmac_root", mux_100m_50m_24m_p, 0, + RK3576_CLKSEL_CON(103), 8, 2, MFLAGS, + RK3576_CLKGATE_CON(42), 2, GFLAGS), + GATE(ACLK_GMAC0, "aclk_gmac0", "aclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(42), 7, GFLAGS), + GATE(ACLK_GMAC1, "aclk_gmac1", "aclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(42), 8, GFLAGS), + GATE(PCLK_GMAC0, "pclk_gmac0", "pclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(42), 9, GFLAGS), + GATE(PCLK_GMAC1, "pclk_gmac1", "pclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(42), 10, GFLAGS), + COMPOSITE(CCLK_SRC_SDIO, "cclk_src_sdio", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(104), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3576_CLKGATE_CON(42), 11, GFLAGS), + GATE(HCLK_SDIO, "hclk_sdio", "hclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(42), 12, GFLAGS), + COMPOSITE(CLK_GMAC1_PTP_REF_SRC, "clk_gmac1_ptp_ref_src", clk_gmac1_ptp_ref_src_p, 0, + RK3576_CLKSEL_CON(104), 13, 2, MFLAGS, 8, 5, DFLAGS, + RK3576_CLKGATE_CON(42), 15, GFLAGS), + COMPOSITE(CLK_GMAC0_PTP_REF_SRC, "clk_gmac0_ptp_ref_src", clk_gmac0_ptp_ref_src_p, 0, + RK3576_CLKSEL_CON(105), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(43), 0, GFLAGS), + GATE(CLK_GMAC1_PTP_REF, "clk_gmac1_ptp_ref", "clk_gmac1_ptp_ref_src", 0, + RK3576_CLKGATE_CON(42), 13, GFLAGS), + GATE(CLK_GMAC0_PTP_REF, "clk_gmac0_ptp_ref", "clk_gmac0_ptp_ref_src", 0, + RK3576_CLKGATE_CON(42), 14, GFLAGS), + COMPOSITE(CCLK_SRC_SDMMC0, "cclk_src_sdmmc0", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(105), 13, 2, MFLAGS, 7, 6, DFLAGS, + RK3576_CLKGATE_CON(43), 1, GFLAGS), + GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(43), 2, GFLAGS), + COMPOSITE(SCLK_FSPI1_X2, "sclk_fspi1_x2", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(106), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3576_CLKGATE_CON(43), 3, GFLAGS), + GATE(HCLK_FSPI1, "hclk_fspi1", "hclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(43), 4, GFLAGS), + COMPOSITE(ACLK_DSMC_ROOT, "aclk_dsmc_root", gpll_cpll_p, CLK_IS_CRITICAL, + RK3576_CLKSEL_CON(106), 13, 1, MFLAGS, 8, 5, DFLAGS, + RK3576_CLKGATE_CON(43), 5, GFLAGS), + GATE(ACLK_DSMC, "aclk_dsmc", "aclk_dsmc_root", 0, + RK3576_CLKGATE_CON(43), 7, GFLAGS), + GATE(PCLK_DSMC, "pclk_dsmc", "pclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(43), 8, GFLAGS), + COMPOSITE(CLK_DSMC_SYS, "clk_dsmc_sys", gpll_cpll_p, 0, + RK3576_CLKSEL_CON(107), 5, 1, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(43), 9, GFLAGS), + GATE(HCLK_HSGPIO, "hclk_hsgpio", "hclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(43), 10, GFLAGS), + COMPOSITE(CLK_HSGPIO_TX, "clk_hsgpio_tx", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(107), 11, 2, MFLAGS, 6, 5, DFLAGS, + RK3576_CLKGATE_CON(43), 11, GFLAGS), + COMPOSITE(CLK_HSGPIO_RX, "clk_hsgpio_rx", gpll_cpll_24m_p, 0, + RK3576_CLKSEL_CON(108), 5, 2, MFLAGS, 0, 5, DFLAGS, + RK3576_CLKGATE_CON(43), 12, GFLAGS), + GATE(ACLK_HSGPIO, "aclk_hsgpio", "aclk_sdgmac_root", 0, + RK3576_CLKGATE_CON(43), 13, GFLAGS), + + /* phpphy */ + GATE(PCLK_PHPPHY_ROOT, "pclk_phpphy_root", "pclk_bus_root", CLK_IS_CRITICAL, + RK3576_PHP_CLKGATE_CON(0), 2, GFLAGS), + GATE(PCLK_PCIE2_COMBOPHY0, "pclk_pcie2_combophy0", "pclk_phpphy_root", 0, + RK3576_PHP_CLKGATE_CON(0), 5, GFLAGS), + GATE(PCLK_PCIE2_COMBOPHY1, "pclk_pcie2_combophy1", "pclk_phpphy_root", 0, + RK3576_PHP_CLKGATE_CON(0), 7, GFLAGS), + COMPOSITE_NOMUX(CLK_PCIE_100M_SRC, "clk_pcie_100m_src", "ppll", 0, + RK3576_PHP_CLKSEL_CON(0), 2, 5, DFLAGS, + RK3576_PHP_CLKGATE_CON(1), 1, GFLAGS), + COMPOSITE_NOMUX(CLK_PCIE_100M_NDUTY_SRC, "clk_pcie_100m_nduty_src", "ppll", 0, + RK3576_PHP_CLKSEL_CON(0), 7, 5, DFLAGS, + RK3576_PHP_CLKGATE_CON(1), 2, GFLAGS), + COMPOSITE_NODIV(CLK_REF_PCIE0_PHY, "clk_ref_pcie0_phy", clk_ref_pcie0_phy_p, 0, + RK3576_PHP_CLKSEL_CON(0), 12, 2, MFLAGS, + RK3576_PHP_CLKGATE_CON(1), 5, GFLAGS), + COMPOSITE_NODIV(CLK_REF_PCIE1_PHY, "clk_ref_pcie1_phy", clk_ref_pcie0_phy_p, 0, + RK3576_PHP_CLKSEL_CON(0), 14, 2, MFLAGS, + RK3576_PHP_CLKGATE_CON(1), 8, GFLAGS), + COMPOSITE_NOMUX(CLK_REF_MPHY_26M, "clk_ref_mphy_26m", "ppll", CLK_IS_CRITICAL, + RK3576_PHP_CLKSEL_CON(1), 0, 8, DFLAGS, + RK3576_PHP_CLKGATE_CON(1), 9, GFLAGS), + + /* pmu */ + GATE(CLK_200M_PMU_SRC, "clk_200m_pmu_src", "clk_gpll_div6", 0, + RK3576_PMU_CLKGATE_CON(3), 2, GFLAGS), + COMPOSITE_NOMUX(CLK_100M_PMU_SRC, "clk_100m_pmu_src", "cpll", 0, + RK3576_PMU_CLKSEL_CON(4), 4, 5, DFLAGS, + RK3576_PMU_CLKGATE_CON(3), 3, GFLAGS), + FACTOR_GATE(CLK_50M_PMU_SRC, "clk_50m_pmu_src", "clk_100m_pmu_src", 0, 1, 2, + RK3576_PMU_CLKGATE_CON(3), 4, GFLAGS), + COMPOSITE_NODIV(HCLK_PMU1_ROOT, "hclk_pmu1_root", mux_pmu200m_pmu100m_pmu50m_24m_p, CLK_IS_CRITICAL, + RK3576_PMU_CLKSEL_CON(4), 0, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(3), 0, GFLAGS), + COMPOSITE_NODIV(HCLK_PMU_CM0_ROOT, "hclk_pmu_cm0_root", mux_pmu200m_pmu100m_pmu50m_24m_p, 0, + RK3576_PMU_CLKSEL_CON(4), 2, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(3), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_PMU0_ROOT, "pclk_pmu0_root", mux_pmu100m_pmu50m_24m_p, 0, + RK3576_PMU_CLKSEL_CON(20), 0, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(7), 0, GFLAGS), + GATE(PCLK_PMU0, "pclk_pmu0", "pclk_pmu0_root", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(7), 3, GFLAGS), + GATE(PCLK_PMU1_ROOT, "pclk_pmu1_root", "pclk_pmu0_root", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(7), 9, GFLAGS), + GATE(PCLK_PMU1, "pclk_pmu1", "pclk_pmu1_root", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(3), 15, GFLAGS), + GATE(CLK_PMU1, "clk_pmu1", "xin24m", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(4), 2, GFLAGS), + GATE(PCLK_PMUPHY_ROOT, "pclk_pmuphy_root", "pclk_pmu1_root", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(5), 0, GFLAGS), + GATE(PCLK_HDPTX_APB, "pclk_hdptx_apb", "pclk_pmuphy_root", 0, + RK3576_PMU_CLKGATE_CON(0), 1, GFLAGS), + GATE(PCLK_MIPI_DCPHY, "pclk_mipi_dcphy", "pclk_pmuphy_root", 0, + RK3576_PMU_CLKGATE_CON(0), 2, GFLAGS), + GATE(PCLK_CSIDPHY, "pclk_csidphy", "pclk_pmuphy_root", 0, + RK3576_PMU_CLKGATE_CON(0), 8, GFLAGS), + GATE(PCLK_USBDPPHY, "pclk_usbdpphy", "pclk_pmuphy_root", 0, + RK3576_PMU_CLKGATE_CON(0), 12, GFLAGS), + COMPOSITE_NOMUX(CLK_PMUPHY_REF_SRC, "clk_pmuphy_ref_src", "cpll", 0, + RK3576_PMU_CLKSEL_CON(0), 0, 5, DFLAGS, + RK3576_PMU_CLKGATE_CON(0), 13, GFLAGS), + GATE(CLK_USBDP_COMBO_PHY_IMMORTAL, "clk_usbdp_combo_phy_immortal", "xin24m", 0, + RK3576_PMU_CLKGATE_CON(0), 15, GFLAGS), + GATE(CLK_HDMITXHDP, "clk_hdmitxhdp", "xin24m", 0, + RK3576_PMU_CLKGATE_CON(1), 13, GFLAGS), + GATE(PCLK_MPHY, "pclk_mphy", "pclk_pmuphy_root", 0, + RK3576_PMU_CLKGATE_CON(2), 0, GFLAGS), + MUX(CLK_REF_OSC_MPHY, "clk_ref_osc_mphy", clk_ref_osc_mphy_p, 0, + RK3576_PMU_CLKSEL_CON(3), 0, 2, MFLAGS), + GATE(CLK_REF_UFS_CLKOUT, "clk_ref_ufs_clkout", "clk_ref_osc_mphy", 0, + RK3576_PMU_CLKGATE_CON(2), 5, GFLAGS), + GATE(FCLK_PMU_CM0_CORE, "fclk_pmu_cm0_core", "hclk_pmu_cm0_root", 0, + RK3576_PMU_CLKGATE_CON(3), 12, GFLAGS), + COMPOSITE(CLK_PMU_CM0_RTC, "clk_pmu_cm0_rtc", mux_24m_32k_p, 0, + RK3576_PMU_CLKSEL_CON(4), 14, 1, MFLAGS, 9, 5, DFLAGS, + RK3576_PMU_CLKGATE_CON(3), 14, GFLAGS), + GATE(PCLK_PMU1WDT, "pclk_pmu1wdt", "pclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(4), 5, GFLAGS), + COMPOSITE_NODIV(TCLK_PMU1WDT, "tclk_pmu1wdt", mux_24m_32k_p, 0, + RK3576_PMU_CLKSEL_CON(4), 15, 1, MFLAGS, + RK3576_PMU_CLKGATE_CON(4), 6, GFLAGS), + GATE(PCLK_PMUTIMER, "pclk_pmutimer", "pclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(4), 7, GFLAGS), + COMPOSITE_NODIV(CLK_PMUTIMER_ROOT, "clk_pmutimer_root", mux_pmu100m_24m_32k_p, 0, + RK3576_PMU_CLKSEL_CON(5), 0, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(4), 8, GFLAGS), + GATE(CLK_PMUTIMER0, "clk_pmutimer0", "clk_pmutimer_root", 0, + RK3576_PMU_CLKGATE_CON(4), 9, GFLAGS), + GATE(CLK_PMUTIMER1, "clk_pmutimer1", "clk_pmutimer_root", 0, + RK3576_PMU_CLKGATE_CON(4), 10, GFLAGS), + GATE(PCLK_PMU1PWM, "pclk_pmu1pwm", "pclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(4), 11, GFLAGS), + COMPOSITE_NODIV(CLK_PMU1PWM, "clk_pmu1pwm", mux_pmu100m_pmu50m_24m_p, 0, + RK3576_PMU_CLKSEL_CON(5), 2, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(4), 12, GFLAGS), + GATE(CLK_PMU1PWM_OSC, "clk_pmu1pwm_osc", "xin24m", 0, + RK3576_PMU_CLKGATE_CON(4), 13, GFLAGS), + GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(5), 1, GFLAGS), + COMPOSITE_NODIV(CLK_I2C0, "clk_i2c0", mux_pmu200m_pmu100m_pmu50m_24m_p, 0, + RK3576_PMU_CLKSEL_CON(6), 7, 2, MFLAGS, + RK3576_PMU_CLKGATE_CON(5), 2, GFLAGS), + COMPOSITE_NODIV(SCLK_UART1, "sclk_uart1", uart1_p, 0, + RK3576_PMU_CLKSEL_CON(8), 0, 1, MFLAGS, + RK3576_PMU_CLKGATE_CON(5), 5, GFLAGS), + GATE(PCLK_UART1, "pclk_uart1", "pclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(5), 6, GFLAGS), + GATE(CLK_PDM0, "clk_pdm0", "clk_pdm0_src_top", 0, + RK3576_PMU_CLKGATE_CON(5), 13, GFLAGS), + GATE(HCLK_PDM0, "hclk_pdm0", "hclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(5), 15, GFLAGS), + GATE(MCLK_PDM0, "mclk_pdm0", "mclk_pdm0_src_top", 0, + RK3576_PMU_CLKGATE_CON(6), 0, GFLAGS), + GATE(HCLK_VAD, "hclk_vad", "hclk_pmu1_root", 0, + RK3576_PMU_CLKGATE_CON(6), 1, GFLAGS), + GATE(CLK_PDM0_OUT, "clk_pdm0_out", "clk_pdm0", 0, + RK3576_PMU_CLKGATE_CON(6), 8, GFLAGS), + COMPOSITE(CLK_HPTIMER_SRC, "clk_hptimer_src", cpll_24m_p, CLK_IS_CRITICAL, + RK3576_PMU_CLKSEL_CON(11), 6, 1, MFLAGS, 1, 5, DFLAGS, + RK3576_PMU_CLKGATE_CON(6), 10, GFLAGS), + GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu0_root", 0, + RK3576_PMU_CLKGATE_CON(7), 6, GFLAGS), + COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_24m_32k_p, 0, + RK3576_PMU_CLKSEL_CON(20), 2, 1, MFLAGS, + RK3576_PMU_CLKGATE_CON(7), 7, GFLAGS), + GATE(CLK_OSC0_PMU1, "clk_osc0_pmu1", "xin24m", CLK_IS_CRITICAL, + RK3576_PMU_CLKGATE_CON(7), 8, GFLAGS), + GATE(CLK_PMU1PWM_RC, "clk_pmu1pwm_rc", "clk_pvtm_clkout", 0, + RK3576_PMU_CLKGATE_CON(5), 7, GFLAGS), + + /* phy ref */ + MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0, + RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS), + MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0, + RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS), + MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0, + RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS), + MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0, + RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS), + + /* secure ns */ + COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL, + RK3576_SECURE_NS_CLKSEL_CON(0), 0, 2, MFLAGS, + RK3576_SECURE_NS_CLKGATE_CON(0), 0, GFLAGS), + COMPOSITE_NODIV(HCLK_SECURE_NS, "hclk_secure_ns", mux_175m_116m_58m_24m_p, CLK_IS_CRITICAL, + RK3576_SECURE_NS_CLKSEL_CON(0), 2, 2, MFLAGS, + RK3576_SECURE_NS_CLKGATE_CON(0), 1, GFLAGS), + COMPOSITE_NODIV(PCLK_SECURE_NS, "pclk_secure_ns", mux_116m_58m_24m_p, CLK_IS_CRITICAL, + RK3576_SECURE_NS_CLKSEL_CON(0), 4, 2, MFLAGS, + RK3576_SECURE_NS_CLKGATE_CON(0), 2, GFLAGS), + GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_secure_ns", 0, + RK3576_SECURE_NS_CLKGATE_CON(0), 3, GFLAGS), + GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_secure_ns", 0, + RK3576_SECURE_NS_CLKGATE_CON(0), 8, GFLAGS), + GATE(CLK_OTPC_NS, "clk_otpc_ns", "xin24m", 0, + RK3576_SECURE_NS_CLKGATE_CON(0), 9, GFLAGS), + GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_secure_s", 0, + RK3576_NON_SECURE_GATING_CON00, 14, GFLAGS), + GATE(HCLK_TRNG_NS, "hclk_trng_ns", "hclk_secure_s", 0, + RK3576_NON_SECURE_GATING_CON00, 13, GFLAGS), + GATE(CLK_PKA_CRYPTO_NS, "clk_pka_crypto_ns", "clk_pka_crypto_s", 0, + RK3576_NON_SECURE_GATING_CON00, 1, GFLAGS), + + /* io */ + GATE(CLK_VICAP_I0CLK, "clk_vicap_i0clk", "clk_csihost0_clkdata_i", 0, + RK3576_CLKGATE_CON(59), 1, GFLAGS), + GATE(CLK_VICAP_I1CLK, "clk_vicap_i1clk", "clk_csihost1_clkdata_i", 0, + RK3576_CLKGATE_CON(59), 2, GFLAGS), + GATE(CLK_VICAP_I2CLK, "clk_vicap_i2clk", "clk_csihost2_clkdata_i", 0, + RK3576_CLKGATE_CON(59), 3, GFLAGS), + GATE(CLK_VICAP_I3CLK, "clk_vicap_i3clk", "clk_csihost3_clkdata_i", 0, + RK3576_CLKGATE_CON(59), 4, GFLAGS), + GATE(CLK_VICAP_I4CLK, "clk_vicap_i4clk", "clk_csihost4_clkdata_i", 0, + RK3576_CLKGATE_CON(59), 5, GFLAGS), +}; + +static void __init rk3576_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; + void __iomem *reg_base; + struct regmap *grf; + + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches, + ARRAY_SIZE(rk3576_clk_branches)) + 1; + + grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf"); + if (IS_ERR(grf)) { + pr_err("%s: could not get PMU0 GRF syscon\n", __func__); + return; + } + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: could not map cru region\n", __func__); + return; + } + + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); + if (IS_ERR(ctx)) { + pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); + return; + } + + ctx->grf = grf; + + rockchip_clk_register_plls(ctx, rk3576_pll_clks, + ARRAY_SIZE(rk3576_pll_clks), + RK3576_GRF_SOC_STATUS0); + + rockchip_clk_register_armclk(ctx, ARMCLK_L, "armclk_l", + mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p), + &rk3576_cpulclk_data, rk3576_cpulclk_rates, + ARRAY_SIZE(rk3576_cpulclk_rates)); + rockchip_clk_register_armclk(ctx, ARMCLK_B, "armclk_b", + mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), + &rk3576_cpubclk_data, rk3576_cpubclk_rates, + ARRAY_SIZE(rk3576_cpubclk_rates)); + + rockchip_clk_register_branches(ctx, rk3576_clk_branches, + ARRAY_SIZE(rk3576_clk_branches)); + + rk3576_rst_init(np, reg_base); + + rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL); + + rockchip_clk_of_add_provider(np, ctx); +} + +CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init); + +struct clk_rk3576_inits { + void (*inits)(struct device_node *np); +}; + +static const struct clk_rk3576_inits clk_rk3576_cru_init = { + .inits = rk3576_clk_init, +}; + +static const struct of_device_id clk_rk3576_match_table[] = { + { + .compatible = "rockchip,rk3576-cru", + .data = &clk_rk3576_cru_init, + }, + { } +}; + +static int clk_rk3576_probe(struct platform_device *pdev) +{ + const struct clk_rk3576_inits *init_data; + struct device *dev = &pdev->dev; + + init_data = device_get_match_data(dev); + if (!init_data) + return -EINVAL; + + if (init_data->inits) + init_data->inits(dev->of_node); + + return 0; +} + +static struct platform_driver clk_rk3576_driver = { + .probe = clk_rk3576_probe, + .driver = { + .name = "clk-rk3576", + .of_match_table = clk_rk3576_match_table, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(clk_rk3576_driver, clk_rk3576_probe); diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c index b30279a96dc8af..0ffaf639f807a6 100644 --- a/drivers/clk/rockchip/clk-rk3588.c +++ b/drivers/clk/rockchip/clk-rk3588.c @@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" }; PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" }; PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" }; PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; -PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" }; +PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" }; PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" }; PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" }; @@ -2502,43 +2502,3 @@ static void __init rk3588_clk_init(struct device_node *np) } CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init); - -struct clk_rk3588_inits { - void (*inits)(struct device_node *np); -}; - -static const struct clk_rk3588_inits clk_3588_cru_init = { - .inits = rk3588_clk_init, -}; - -static const struct of_device_id clk_rk3588_match_table[] = { - { - .compatible = "rockchip,rk3588-cru", - .data = &clk_3588_cru_init, - }, - { } -}; - -static int __init clk_rk3588_probe(struct platform_device *pdev) -{ - const struct clk_rk3588_inits *init_data; - struct device *dev = &pdev->dev; - - init_data = device_get_match_data(dev); - if (!init_data) - return -EINVAL; - - if (init_data->inits) - init_data->inits(dev->of_node); - - return 0; -} - -static struct platform_driver clk_rk3588_driver = { - .driver = { - .name = "clk-rk3588", - .of_match_table = clk_rk3588_match_table, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver_probe(clk_rk3588_driver, clk_rk3588_probe); diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 73d2cbdc716b45..2fa7253c73b2cd 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -450,12 +450,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, struct rockchip_clk_branch *list, unsigned int nr_clk) { - struct clk *clk = NULL; + struct clk *clk; unsigned int idx; unsigned long flags; for (idx = 0; idx < nr_clk; idx++, list++) { flags = list->flags; + clk = NULL; /* catch simple muxes */ switch (list->branch_type) { diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index fd3b476dedda9a..f1957e1c117839 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -235,6 +235,58 @@ struct clk; #define RK3568_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180) #define RK3568_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200) +#define RK3576_PHP_CRU_BASE 0x8000 +#define RK3576_SECURE_NS_CRU_BASE 0x10000 +#define RK3576_PMU_CRU_BASE 0x20000 +#define RK3576_BIGCORE_CRU_BASE 0x38000 +#define RK3576_LITCORE_CRU_BASE 0x40000 +#define RK3576_CCI_CRU_BASE 0x48000 + +#define RK3576_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3576_MODE_CON0 0x280 +#define RK3576_BPLL_MODE_CON0 (RK3576_BIGCORE_CRU_BASE + 0x280) +#define RK3576_LPLL_MODE_CON0 (RK3576_LITCORE_CRU_BASE + 0x280) +#define RK3576_PPLL_MODE_CON0 (RK3576_PHP_CRU_BASE + 0x280) +#define RK3576_CLKSEL_CON(x) ((x) * 0x4 + 0x300) +#define RK3576_CLKGATE_CON(x) ((x) * 0x4 + 0x800) +#define RK3576_SOFTRST_CON(x) ((x) * 0x4 + 0xa00) +#define RK3576_GLB_CNT_TH 0xc00 +#define RK3576_GLB_SRST_FST 0xc08 +#define RK3576_GLB_SRST_SND 0xc0c +#define RK3576_GLB_RST_CON 0xc10 +#define RK3576_GLB_RST_ST 0xc04 +#define RK3576_SDIO_CON0 0xC24 +#define RK3576_SDIO_CON1 0xC28 +#define RK3576_SDMMC_CON0 0xC30 +#define RK3576_SDMMC_CON1 0xC34 + +#define RK3576_PHP_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x300) +#define RK3576_PHP_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x800) +#define RK3576_PHP_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0xa00) + +#define RK3576_PMU_PLL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE) +#define RK3576_PMU_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x300) +#define RK3576_PMU_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x800) +#define RK3576_PMU_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0xa00) + +#define RK3576_SECURE_NS_CLKSEL_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x300) +#define RK3576_SECURE_NS_CLKGATE_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x800) +#define RK3576_SECURE_NS_SOFTRST_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0xa00) + +#define RK3576_CCI_CLKSEL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x300) +#define RK3576_CCI_CLKGATE_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x800) +#define RK3576_CCI_SOFTRST_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0xa00) + +#define RK3576_BPLL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE) +#define RK3576_BIGCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x300) +#define RK3576_BIGCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x800) +#define RK3576_BIGCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0xa00) +#define RK3576_LPLL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE) +#define RK3576_LITCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x300) +#define RK3576_LITCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x800) +#define RK3576_LITCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0xa00) +#define RK3576_NON_SECURE_GATING_CON00 0xc48 + #define RK3588_PHP_CRU_BASE 0x8000 #define RK3588_PMU_CRU_BASE 0x30000 #define RK3588_BIGCORE0_CRU_BASE 0x50000 @@ -287,6 +339,7 @@ enum rockchip_pll_type { pll_rk3399, pll_rk3588, pll_rk3588_core, + pll_rk3588_ddr, }; #define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1, \ @@ -1025,6 +1078,7 @@ static inline void rockchip_register_softrst(struct device_node *np, return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags); } +void rk3576_rst_init(struct device_node *np, void __iomem *reg_base); void rk3588_rst_init(struct device_node *np, void __iomem *reg_base); #endif diff --git a/drivers/clk/rockchip/rst-rk3576.c b/drivers/clk/rockchip/rst-rk3576.c new file mode 100644 index 00000000000000..15cbb9bc0a4142 --- /dev/null +++ b/drivers/clk/rockchip/rst-rk3576.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * Author: Detlev Casanova + * Based on Sebastien Reichel's implementation for RK3588 + */ + +#include +#include +#include +#include "clk.h" + +/* 0x27200000 + 0x0A00 */ +#define RK3576_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit) +/* 0x27208000 + 0x0A00 */ +#define RK3576_PHPCRU_RESET_OFFSET(id, reg, bit) [id] = (0x8000*4 + reg * 16 + bit) +/* 0x27210000 + 0x0A00 */ +#define RK3576_SECURENSCRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000*4 + reg * 16 + bit) +/* 0x27220000 + 0x0A00 */ +#define RK3576_PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000*4 + reg * 16 + bit) + +/* mapping table for reset ID to register offset */ +static const int rk3576_register_offset[] = { + /* SOFTRST_CON01 */ + RK3576_CRU_RESET_OFFSET(SRST_A_TOP_BIU, 1, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_TOP_BIU, 1, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_TOP_MID_BIU, 1, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_SECURE_HIGH_BIU, 1, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_TOP_BIU, 1, 14), + + /* SOFTRST_CON02 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VO0VOP_CHANNEL_BIU, 2, 0), + RK3576_CRU_RESET_OFFSET(SRST_A_VO0VOP_CHANNEL_BIU, 2, 1), + + /* SOFTRST_CON06 */ + RK3576_CRU_RESET_OFFSET(SRST_BISRINTF, 6, 2), + + /* SOFTRST_CON07 */ + RK3576_CRU_RESET_OFFSET(SRST_H_AUDIO_BIU, 7, 2), + RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_0, 7, 3), + RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_1, 7, 4), + RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_0, 7, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_1, 7, 6), + RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_0, 7, 7), + RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_1, 7, 8), + RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_0, 7, 9), + RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_1, 7, 10), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI0_8CH, 7, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI0_8CH, 7, 13), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX0, 7, 14), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX0, 7, 15), + + /* SOFTRST_CON08 */ + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX1, 8, 0), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX1, 8, 1), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI1_8CH, 8, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI1_8CH, 8, 6), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI2_2CH, 8, 8), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI2_2CH, 8, 10), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI3_2CH, 8, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI3_2CH, 8, 14), + + /* SOFTRST_CON09 */ + RK3576_CRU_RESET_OFFSET(SRST_M_SAI4_2CH, 9, 0), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI4_2CH, 9, 2), + RK3576_CRU_RESET_OFFSET(SRST_H_ACDCDIG_DSM, 9, 3), + RK3576_CRU_RESET_OFFSET(SRST_M_ACDCDIG_DSM, 9, 4), + RK3576_CRU_RESET_OFFSET(SRST_PDM1, 9, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_PDM1, 9, 7), + RK3576_CRU_RESET_OFFSET(SRST_M_PDM1, 9, 8), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX0, 9, 9), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX0, 9, 10), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX1, 9, 11), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX1, 9, 12), + + /* SOFTRST_CON11 */ + RK3576_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 11, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 11, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_CRU, 11, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_CAN0, 11, 6), + RK3576_CRU_RESET_OFFSET(SRST_CAN0, 11, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_CAN1, 11, 8), + RK3576_CRU_RESET_OFFSET(SRST_CAN1, 11, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2BUS, 11, 12), + RK3576_CRU_RESET_OFFSET(SRST_P_VCCIO_IOC, 11, 13), + RK3576_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 11, 14), + RK3576_CRU_RESET_OFFSET(SRST_KEY_SHIFT, 11, 15), + + /* SOFTRST_CON12 */ + RK3576_CRU_RESET_OFFSET(SRST_P_I2C1, 12, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C2, 12, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C3, 12, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C4, 12, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C5, 12, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C6, 12, 5), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C7, 12, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C8, 12, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_I2C9, 12, 8), + RK3576_CRU_RESET_OFFSET(SRST_P_WDT_BUSMCU, 12, 9), + RK3576_CRU_RESET_OFFSET(SRST_T_WDT_BUSMCU, 12, 10), + RK3576_CRU_RESET_OFFSET(SRST_A_GIC, 12, 11), + RK3576_CRU_RESET_OFFSET(SRST_I2C1, 12, 12), + RK3576_CRU_RESET_OFFSET(SRST_I2C2, 12, 13), + RK3576_CRU_RESET_OFFSET(SRST_I2C3, 12, 14), + RK3576_CRU_RESET_OFFSET(SRST_I2C4, 12, 15), + + /* SOFTRST_CON13 */ + RK3576_CRU_RESET_OFFSET(SRST_I2C5, 13, 0), + RK3576_CRU_RESET_OFFSET(SRST_I2C6, 13, 1), + RK3576_CRU_RESET_OFFSET(SRST_I2C7, 13, 2), + RK3576_CRU_RESET_OFFSET(SRST_I2C8, 13, 3), + RK3576_CRU_RESET_OFFSET(SRST_I2C9, 13, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_SARADC, 13, 6), + RK3576_CRU_RESET_OFFSET(SRST_SARADC, 13, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_TSADC, 13, 8), + RK3576_CRU_RESET_OFFSET(SRST_TSADC, 13, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_UART0, 13, 10), + RK3576_CRU_RESET_OFFSET(SRST_P_UART2, 13, 11), + RK3576_CRU_RESET_OFFSET(SRST_P_UART3, 13, 12), + RK3576_CRU_RESET_OFFSET(SRST_P_UART4, 13, 13), + RK3576_CRU_RESET_OFFSET(SRST_P_UART5, 13, 14), + RK3576_CRU_RESET_OFFSET(SRST_P_UART6, 13, 15), + + /* SOFTRST_CON14 */ + RK3576_CRU_RESET_OFFSET(SRST_P_UART7, 14, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_UART8, 14, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_UART9, 14, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_UART10, 14, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_UART11, 14, 4), + RK3576_CRU_RESET_OFFSET(SRST_S_UART0, 14, 5), + RK3576_CRU_RESET_OFFSET(SRST_S_UART2, 14, 6), + RK3576_CRU_RESET_OFFSET(SRST_S_UART3, 14, 9), + RK3576_CRU_RESET_OFFSET(SRST_S_UART4, 14, 12), + RK3576_CRU_RESET_OFFSET(SRST_S_UART5, 14, 15), + + /* SOFTRST_CON15 */ + RK3576_CRU_RESET_OFFSET(SRST_S_UART6, 15, 2), + RK3576_CRU_RESET_OFFSET(SRST_S_UART7, 15, 5), + RK3576_CRU_RESET_OFFSET(SRST_S_UART8, 15, 8), + RK3576_CRU_RESET_OFFSET(SRST_S_UART9, 15, 9), + RK3576_CRU_RESET_OFFSET(SRST_S_UART10, 15, 10), + RK3576_CRU_RESET_OFFSET(SRST_S_UART11, 15, 11), + RK3576_CRU_RESET_OFFSET(SRST_P_SPI0, 15, 13), + RK3576_CRU_RESET_OFFSET(SRST_P_SPI1, 15, 14), + RK3576_CRU_RESET_OFFSET(SRST_P_SPI2, 15, 15), + + /* SOFTRST_CON16 */ + RK3576_CRU_RESET_OFFSET(SRST_P_SPI3, 16, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_SPI4, 16, 1), + RK3576_CRU_RESET_OFFSET(SRST_SPI0, 16, 2), + RK3576_CRU_RESET_OFFSET(SRST_SPI1, 16, 3), + RK3576_CRU_RESET_OFFSET(SRST_SPI2, 16, 4), + RK3576_CRU_RESET_OFFSET(SRST_SPI3, 16, 5), + RK3576_CRU_RESET_OFFSET(SRST_SPI4, 16, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_WDT0, 16, 7), + RK3576_CRU_RESET_OFFSET(SRST_T_WDT0, 16, 8), + RK3576_CRU_RESET_OFFSET(SRST_P_SYS_GRF, 16, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_PWM1, 16, 10), + RK3576_CRU_RESET_OFFSET(SRST_PWM1, 16, 11), + + /* SOFTRST_CON17 */ + RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER0, 17, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER1, 17, 4), + RK3576_CRU_RESET_OFFSET(SRST_TIMER0, 17, 6), + RK3576_CRU_RESET_OFFSET(SRST_TIMER1, 17, 7), + RK3576_CRU_RESET_OFFSET(SRST_TIMER2, 17, 8), + RK3576_CRU_RESET_OFFSET(SRST_TIMER3, 17, 9), + RK3576_CRU_RESET_OFFSET(SRST_TIMER4, 17, 10), + RK3576_CRU_RESET_OFFSET(SRST_TIMER5, 17, 11), + RK3576_CRU_RESET_OFFSET(SRST_P_BUSIOC, 17, 12), + RK3576_CRU_RESET_OFFSET(SRST_P_MAILBOX0, 17, 13), + RK3576_CRU_RESET_OFFSET(SRST_P_GPIO1, 17, 15), + + /* SOFTRST_CON18 */ + RK3576_CRU_RESET_OFFSET(SRST_GPIO1, 18, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_GPIO2, 18, 1), + RK3576_CRU_RESET_OFFSET(SRST_GPIO2, 18, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_GPIO3, 18, 3), + RK3576_CRU_RESET_OFFSET(SRST_GPIO3, 18, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_GPIO4, 18, 5), + RK3576_CRU_RESET_OFFSET(SRST_GPIO4, 18, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_DECOM, 18, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_DECOM, 18, 8), + RK3576_CRU_RESET_OFFSET(SRST_D_DECOM, 18, 9), + RK3576_CRU_RESET_OFFSET(SRST_TIMER6, 18, 11), + RK3576_CRU_RESET_OFFSET(SRST_TIMER7, 18, 12), + RK3576_CRU_RESET_OFFSET(SRST_TIMER8, 18, 13), + RK3576_CRU_RESET_OFFSET(SRST_TIMER9, 18, 14), + RK3576_CRU_RESET_OFFSET(SRST_TIMER10, 18, 15), + + /* SOFTRST_CON19 */ + RK3576_CRU_RESET_OFFSET(SRST_TIMER11, 19, 0), + RK3576_CRU_RESET_OFFSET(SRST_A_DMAC0, 19, 1), + RK3576_CRU_RESET_OFFSET(SRST_A_DMAC1, 19, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_DMAC2, 19, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_SPINLOCK, 19, 4), + RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_BUS, 19, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_I3C0, 19, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_I3C1, 19, 9), + RK3576_CRU_RESET_OFFSET(SRST_H_BUS_CM0_BIU, 19, 11), + RK3576_CRU_RESET_OFFSET(SRST_F_BUS_CM0_CORE, 19, 12), + RK3576_CRU_RESET_OFFSET(SRST_T_BUS_CM0_JTAG, 19, 13), + + /* SOFTRST_CON20 */ + RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2PMU, 20, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2DDR, 20, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_BUS, 20, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_PWM2, 20, 4), + RK3576_CRU_RESET_OFFSET(SRST_PWM2, 20, 5), + RK3576_CRU_RESET_OFFSET(SRST_FREQ_PWM1, 20, 8), + RK3576_CRU_RESET_OFFSET(SRST_COUNTER_PWM1, 20, 9), + RK3576_CRU_RESET_OFFSET(SRST_I3C0, 20, 12), + RK3576_CRU_RESET_OFFSET(SRST_I3C1, 20, 13), + + /* SOFTRST_CON21 */ + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH0, 21, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_BIU, 21, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH0, 21, 3), + RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH0, 21, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_BIU, 21, 5), + RK3576_CRU_RESET_OFFSET(SRST_DFI_CH0, 21, 6), + RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH0, 21, 10), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH0, 21, 13), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH1, 21, 14), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH1, 21, 15), + + /* SOFTRST_CON22 */ + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH1, 22, 0), + RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH1, 22, 1), + RK3576_CRU_RESET_OFFSET(SRST_DFI_CH1, 22, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH0, 22, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH1, 22, 4), + RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH1, 22, 6), + RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH0, 22, 9), + RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH1, 22, 10), + RK3576_CRU_RESET_OFFSET(SRST_P_AHB2APB, 22, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_AHB2APB, 22, 13), + RK3576_CRU_RESET_OFFSET(SRST_H_DDR_BIU, 22, 14), + RK3576_CRU_RESET_OFFSET(SRST_F_DDR_CM0_CORE, 22, 15), + + /* SOFTRST_CON23 */ + RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH0, 23, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH1, 23, 2), + RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER0, 23, 4), + RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER1, 23, 5), + RK3576_CRU_RESET_OFFSET(SRST_T_WDT_DDR, 23, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_WDT, 23, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_TIMER, 23, 8), + RK3576_CRU_RESET_OFFSET(SRST_T_DDR_CM0_JTAG, 23, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_DDR_GRF, 23, 11), + + /* SOFTRST_CON25 */ + RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH0, 25, 1), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH0, 25, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH0, 25, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH0, 25, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH0, 25, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH0, 25, 6), + + /* SOFTRST_CON26 */ + RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH1, 26, 1), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH1, 26, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH1, 26, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH1, 26, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH1, 26, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH1, 26, 6), + + /* SOFTRST_CON27 */ + RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_DDR, 27, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_DDR, 27, 1), + + /* SOFTRST_CON28 */ + RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0, 28, 9), + RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0_BIU, 28, 11), + RK3576_CRU_RESET_OFFSET(SRST_L_RKNN0_BIU, 28, 12), + + /* SOFTRST_CON29 */ + RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1, 29, 0), + RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1_BIU, 29, 2), + RK3576_CRU_RESET_OFFSET(SRST_L_RKNN1_BIU, 29, 3), + + /* SOFTRST_CON31 */ + RK3576_CRU_RESET_OFFSET(SRST_NPU_DAP, 31, 0), + RK3576_CRU_RESET_OFFSET(SRST_L_NPUSUBSYS_BIU, 31, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_NPUTOP_BIU, 31, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_NPU_TIMER, 31, 10), + RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER0, 31, 12), + RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER1, 31, 13), + RK3576_CRU_RESET_OFFSET(SRST_P_NPU_WDT, 31, 14), + RK3576_CRU_RESET_OFFSET(SRST_T_NPU_WDT, 31, 15), + + /* SOFTRST_CON32 */ + RK3576_CRU_RESET_OFFSET(SRST_A_RKNN_CBUF, 32, 0), + RK3576_CRU_RESET_OFFSET(SRST_A_RVCORE0, 32, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_NPU_GRF, 32, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_NPU, 32, 3), + RK3576_CRU_RESET_OFFSET(SRST_NPU_PVTPLL, 32, 4), + RK3576_CRU_RESET_OFFSET(SRST_H_NPU_CM0_BIU, 32, 6), + RK3576_CRU_RESET_OFFSET(SRST_F_NPU_CM0_CORE, 32, 7), + RK3576_CRU_RESET_OFFSET(SRST_T_NPU_CM0_JTAG, 32, 8), + RK3576_CRU_RESET_OFFSET(SRST_A_RKNNTOP_BIU, 32, 11), + RK3576_CRU_RESET_OFFSET(SRST_H_RKNN_CBUF, 32, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_RKNNTOP_BIU, 32, 13), + + /* SOFTRST_CON33 */ + RK3576_CRU_RESET_OFFSET(SRST_H_NVM_BIU, 33, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_NVM_BIU, 33, 3), + RK3576_CRU_RESET_OFFSET(SRST_S_FSPI, 33, 6), + RK3576_CRU_RESET_OFFSET(SRST_H_FSPI, 33, 7), + RK3576_CRU_RESET_OFFSET(SRST_C_EMMC, 33, 8), + RK3576_CRU_RESET_OFFSET(SRST_H_EMMC, 33, 9), + RK3576_CRU_RESET_OFFSET(SRST_A_EMMC, 33, 10), + RK3576_CRU_RESET_OFFSET(SRST_B_EMMC, 33, 11), + RK3576_CRU_RESET_OFFSET(SRST_T_EMMC, 33, 12), + + /* SOFTRST_CON34 */ + RK3576_CRU_RESET_OFFSET(SRST_P_GRF, 34, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_PHP_BIU, 34, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_PHP_BIU, 34, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_PCIE0, 34, 13), + RK3576_CRU_RESET_OFFSET(SRST_PCIE0_POWER_UP, 34, 15), + + /* SOFTRST_CON35 */ + RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG1, 35, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_MMU0, 35, 11), + RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU0, 35, 13), + RK3576_CRU_RESET_OFFSET(SRST_A_MMU1, 35, 14), + + /* SOFTRST_CON36 */ + RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU1, 36, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_PCIE1, 36, 7), + RK3576_CRU_RESET_OFFSET(SRST_PCIE1_POWER_UP, 36, 9), + + /* SOFTRST_CON37 */ + RK3576_CRU_RESET_OFFSET(SRST_RXOOB0, 37, 0), + RK3576_CRU_RESET_OFFSET(SRST_RXOOB1, 37, 1), + RK3576_CRU_RESET_OFFSET(SRST_PMALIVE0, 37, 2), + RK3576_CRU_RESET_OFFSET(SRST_PMALIVE1, 37, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_SATA0, 37, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_SATA1, 37, 5), + RK3576_CRU_RESET_OFFSET(SRST_ASIC1, 37, 6), + RK3576_CRU_RESET_OFFSET(SRST_ASIC0, 37, 7), + + /* SOFTRST_CON40 */ + RK3576_CRU_RESET_OFFSET(SRST_P_CSIDPHY1, 40, 2), + RK3576_CRU_RESET_OFFSET(SRST_SCAN_CSIDPHY1, 40, 3), + + /* SOFTRST_CON42 */ + RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_GRF, 42, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_BIU, 42, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_SDGMAC_BIU, 42, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_SDGMAC_BIU, 42, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_GMAC0, 42, 7), + RK3576_CRU_RESET_OFFSET(SRST_A_GMAC1, 42, 8), + RK3576_CRU_RESET_OFFSET(SRST_P_GMAC0, 42, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_GMAC1, 42, 10), + RK3576_CRU_RESET_OFFSET(SRST_H_SDIO, 42, 12), + + /* SOFTRST_CON43 */ + RK3576_CRU_RESET_OFFSET(SRST_H_SDMMC0, 43, 2), + RK3576_CRU_RESET_OFFSET(SRST_S_FSPI1, 43, 3), + RK3576_CRU_RESET_OFFSET(SRST_H_FSPI1, 43, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_DSMC_BIU, 43, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_DSMC, 43, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_DSMC, 43, 8), + RK3576_CRU_RESET_OFFSET(SRST_H_HSGPIO, 43, 10), + RK3576_CRU_RESET_OFFSET(SRST_HSGPIO, 43, 11), + RK3576_CRU_RESET_OFFSET(SRST_A_HSGPIO, 43, 13), + + /* SOFTRST_CON45 */ + RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC, 45, 3), + RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC_BIU, 45, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 45, 6), + RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_HEVC_CA, 45, 8), + RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_CORE, 45, 9), + + /* SOFTRST_CON47 */ + RK3576_CRU_RESET_OFFSET(SRST_A_USB_BIU, 47, 3), + RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_BIU, 47, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG0, 47, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_UFS_BIU, 47, 10), + RK3576_CRU_RESET_OFFSET(SRST_A_MMU2, 47, 12), + RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU2, 47, 13), + RK3576_CRU_RESET_OFFSET(SRST_A_UFS_SYS, 47, 15), + + /* SOFTRST_CON48 */ + RK3576_CRU_RESET_OFFSET(SRST_A_UFS, 48, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_GRF, 48, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_UFS_GRF, 48, 2), + + /* SOFTRST_CON49 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VPU_BIU, 49, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_JPEG_BIU, 49, 7), + RK3576_CRU_RESET_OFFSET(SRST_A_RGA_BIU, 49, 10), + RK3576_CRU_RESET_OFFSET(SRST_A_VDPP_BIU, 49, 11), + RK3576_CRU_RESET_OFFSET(SRST_A_EBC_BIU, 49, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_0, 49, 13), + RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_0, 49, 14), + RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_0, 49, 15), + + /* SOFTRST_CON50 */ + RK3576_CRU_RESET_OFFSET(SRST_A_JPEG, 50, 0), + RK3576_CRU_RESET_OFFSET(SRST_H_JPEG, 50, 1), + RK3576_CRU_RESET_OFFSET(SRST_H_VDPP, 50, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_VDPP, 50, 3), + RK3576_CRU_RESET_OFFSET(SRST_CORE_VDPP, 50, 4), + RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_1, 50, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_1, 50, 6), + RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_1, 50, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_EBC, 50, 10), + RK3576_CRU_RESET_OFFSET(SRST_A_EBC, 50, 11), + RK3576_CRU_RESET_OFFSET(SRST_D_EBC, 50, 12), + + /* SOFTRST_CON51 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0_BIU, 51, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0_BIU, 51, 3), + RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0, 51, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0, 51, 5), + RK3576_CRU_RESET_OFFSET(SRST_VEPU0_CORE, 51, 6), + + /* SOFTRST_CON53 */ + RK3576_CRU_RESET_OFFSET(SRST_A_VI_BIU, 53, 3), + RK3576_CRU_RESET_OFFSET(SRST_H_VI_BIU, 53, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_VI_BIU, 53, 5), + RK3576_CRU_RESET_OFFSET(SRST_D_VICAP, 53, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_VICAP, 53, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_VICAP, 53, 8), + RK3576_CRU_RESET_OFFSET(SRST_ISP0, 53, 10), + RK3576_CRU_RESET_OFFSET(SRST_ISP0_VICAP, 53, 11), + + /* SOFTRST_CON54 */ + RK3576_CRU_RESET_OFFSET(SRST_CORE_VPSS, 54, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_0, 54, 4), + RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_1, 54, 5), + RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_2, 54, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_3, 54, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_4, 54, 8), + + /* SOFTRST_CON59 */ + RK3576_CRU_RESET_OFFSET(SRST_CIFIN, 59, 0), + RK3576_CRU_RESET_OFFSET(SRST_VICAP_I0CLK, 59, 1), + RK3576_CRU_RESET_OFFSET(SRST_VICAP_I1CLK, 59, 2), + RK3576_CRU_RESET_OFFSET(SRST_VICAP_I2CLK, 59, 3), + RK3576_CRU_RESET_OFFSET(SRST_VICAP_I3CLK, 59, 4), + RK3576_CRU_RESET_OFFSET(SRST_VICAP_I4CLK, 59, 5), + + /* SOFTRST_CON61 */ + RK3576_CRU_RESET_OFFSET(SRST_A_VOP_BIU, 61, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_VOP2_BIU, 61, 5), + RK3576_CRU_RESET_OFFSET(SRST_H_VOP_BIU, 61, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_VOP_BIU, 61, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_VOP, 61, 8), + RK3576_CRU_RESET_OFFSET(SRST_A_VOP, 61, 9), + RK3576_CRU_RESET_OFFSET(SRST_D_VP0, 61, 13), + + /* SOFTRST_CON62 */ + RK3576_CRU_RESET_OFFSET(SRST_D_VP1, 62, 0), + RK3576_CRU_RESET_OFFSET(SRST_D_VP2, 62, 1), + RK3576_CRU_RESET_OFFSET(SRST_P_VOP2_BIU, 62, 2), + RK3576_CRU_RESET_OFFSET(SRST_P_VOPGRF, 62, 3), + + /* SOFTRST_CON63 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VO0_BIU, 63, 5), + RK3576_CRU_RESET_OFFSET(SRST_P_VO0_BIU, 63, 7), + RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0_BIU, 63, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_VO0_GRF, 63, 10), + RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0, 63, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_HDCP0, 63, 13), + RK3576_CRU_RESET_OFFSET(SRST_HDCP0, 63, 14), + + /* SOFTRST_CON64 */ + RK3576_CRU_RESET_OFFSET(SRST_P_DSIHOST0, 64, 5), + RK3576_CRU_RESET_OFFSET(SRST_DSIHOST0, 64, 6), + RK3576_CRU_RESET_OFFSET(SRST_P_HDMITX0, 64, 7), + RK3576_CRU_RESET_OFFSET(SRST_HDMITX0_REF, 64, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_EDP0, 64, 13), + RK3576_CRU_RESET_OFFSET(SRST_EDP0_24M, 64, 14), + + /* SOFTRST_CON65 */ + RK3576_CRU_RESET_OFFSET(SRST_M_SAI5_8CH, 65, 4), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI5_8CH, 65, 5), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI6_8CH, 65, 8), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI6_8CH, 65, 9), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX2, 65, 10), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX2, 65, 13), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX2, 65, 14), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX2, 65, 15), + + /* SOFTRST_CON66 */ + RK3576_CRU_RESET_OFFSET(SRST_H_SAI8_8CH, 66, 0), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI8_8CH, 66, 2), + + /* SOFTRST_CON67 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VO1_BIU, 67, 5), + RK3576_CRU_RESET_OFFSET(SRST_P_VO1_BIU, 67, 6), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI7_8CH, 67, 9), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI7_8CH, 67, 10), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX3, 67, 11), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX4, 67, 12), + RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX5, 67, 13), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX3, 67, 14), + + /* SOFTRST_CON68 */ + RK3576_CRU_RESET_OFFSET(SRST_DP0, 68, 0), + RK3576_CRU_RESET_OFFSET(SRST_P_VO1_GRF, 68, 2), + RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1_BIU, 68, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1, 68, 4), + RK3576_CRU_RESET_OFFSET(SRST_H_HDCP1, 68, 5), + RK3576_CRU_RESET_OFFSET(SRST_HDCP1, 68, 6), + RK3576_CRU_RESET_OFFSET(SRST_H_SAI9_8CH, 68, 9), + RK3576_CRU_RESET_OFFSET(SRST_M_SAI9_8CH, 68, 11), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX4, 68, 12), + RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX5, 68, 13), + + /* SOFTRST_CON69 */ + RK3576_CRU_RESET_OFFSET(SRST_GPU, 69, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_S_GPU_BIU, 69, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_M0_GPU_BIU, 69, 7), + RK3576_CRU_RESET_OFFSET(SRST_P_GPU_BIU, 69, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_GPU_GRF, 69, 13), + RK3576_CRU_RESET_OFFSET(SRST_GPU_PVTPLL, 69, 14), + RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_GPU, 69, 15), + + /* SOFTRST_CON72 */ + RK3576_CRU_RESET_OFFSET(SRST_A_CENTER_BIU, 72, 4), + RK3576_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 72, 5), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM, 72, 6), + RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM_BIU, 72, 7), + RK3576_CRU_RESET_OFFSET(SRST_H_CENTER_BIU, 72, 8), + RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_GRF, 72, 9), + RK3576_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 72, 10), + RK3576_CRU_RESET_OFFSET(SRST_P_SHAREMEM, 72, 11), + RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_BIU, 72, 12), + + /* SOFTRST_CON75 */ + RK3576_CRU_RESET_OFFSET(SRST_LINKSYM_HDMITXPHY0, 75, 1), + + /* SOFTRST_CON78 */ + RK3576_CRU_RESET_OFFSET(SRST_DP0_PIXELCLK, 78, 1), + RK3576_CRU_RESET_OFFSET(SRST_PHY_DP0_TX, 78, 2), + RK3576_CRU_RESET_OFFSET(SRST_DP1_PIXELCLK, 78, 3), + RK3576_CRU_RESET_OFFSET(SRST_DP2_PIXELCLK, 78, 4), + + /* SOFTRST_CON79 */ + RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1_BIU, 79, 1), + RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1_BIU, 79, 2), + RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1, 79, 3), + RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1, 79, 4), + RK3576_CRU_RESET_OFFSET(SRST_VEPU1_CORE, 79, 5), + + /* PPLL_SOFTRST_CON00 */ + RK3576_PHPCRU_RESET_OFFSET(SRST_P_PHPPHY_CRU, 0, 1), + RK3576_PHPCRU_RESET_OFFSET(SRST_P_APB2ASB_SLV_CHIP_TOP, 0, 3), + RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0, 0, 5), + RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0_GRF, 0, 6), + RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1, 0, 7), + RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1_GRF, 0, 8), + + /* PPLL_SOFTRST_CON01 */ + RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE0_PIPE_PHY, 1, 5), + RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE1_PIPE_PHY, 1, 8), + + /* SECURENS_SOFTRST_CON00 */ + RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_CRYPTO_NS, 0, 3), + RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_TRNG_NS, 0, 4), + RK3576_SECURENSCRU_RESET_OFFSET(SRST_P_OTPC_NS, 0, 8), + RK3576_SECURENSCRU_RESET_OFFSET(SRST_OTPC_NS, 0, 9), + + /* PMU1_SOFTRST_CON00 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_GRF, 0, 0), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_APB, 0, 1), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MIPI_DCPHY, 0, 2), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_DCPHY_GRF, 0, 3), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT0_APB2ASB, 0, 4), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT1_APB2ASB, 0, 5), + RK3576_PMU1CRU_RESET_OFFSET(SRST_USB2DEBUG, 0, 6), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY_GRF, 0, 7), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY, 0, 8), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_0, 0, 9), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_1, 0, 10), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDP_GRF, 0, 11), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDPPHY, 0, 12), + RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_INIT, 0, 15), + + /* PMU1_SOFTRST_CON01 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_CMN, 1, 0), + RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_LANE, 1, 1), + RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_PCS, 1, 2), + RK3576_PMU1CRU_RESET_OFFSET(SRST_M_MIPI_DCPHY, 1, 3), + RK3576_PMU1CRU_RESET_OFFSET(SRST_S_MIPI_DCPHY, 1, 4), + RK3576_PMU1CRU_RESET_OFFSET(SRST_SCAN_CSIPHY, 1, 5), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO6_IOC, 1, 6), + RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_0, 1, 7), + RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_1, 1, 8), + RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_INIT, 1, 9), + RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_CMN, 1, 10), + RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_LANE, 1, 11), + RK3576_PMU1CRU_RESET_OFFSET(SRST_HDMITXHDP, 1, 13), + + /* PMU1_SOFTRST_CON02 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_MPHY_INIT, 2, 0), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MPHY_GRF, 2, 1), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO7_IOC, 2, 3), + + /* PMU1_SOFTRST_CON03 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 3, 9), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_NIU, 3, 10), + RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU_CM0_BIU, 3, 11), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_CORE, 3, 12), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_JTAG, 3, 13), + + /* PMU1_SOFTRST_CON04 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CRU_PMU1, 4, 1), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_GRF, 4, 3), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_IOC, 4, 4), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1WDT, 4, 5), + RK3576_PMU1CRU_RESET_OFFSET(SRST_T_PMU1WDT, 4, 6), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMUTIMER, 4, 7), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER0, 4, 9), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER1, 4, 10), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1PWM, 4, 11), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU1PWM, 4, 12), + + /* PMU1_SOFTRST_CON05 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_I2C0, 5, 1), + RK3576_PMU1CRU_RESET_OFFSET(SRST_I2C0, 5, 2), + RK3576_PMU1CRU_RESET_OFFSET(SRST_S_UART1, 5, 5), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_UART1, 5, 6), + RK3576_PMU1CRU_RESET_OFFSET(SRST_PDM0, 5, 13), + RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PDM0, 5, 15), + + /* PMU1_SOFTRST_CON06 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_M_PDM0, 6, 0), + RK3576_PMU1CRU_RESET_OFFSET(SRST_H_VAD, 6, 1), + + /* PMU1_SOFTRST_CON07 */ + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0GRF, 7, 4), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0IOC, 7, 5), + RK3576_PMU1CRU_RESET_OFFSET(SRST_P_GPIO0, 7, 6), + RK3576_PMU1CRU_RESET_OFFSET(SRST_DB_GPIO0, 7, 7), +}; + +void rk3576_rst_init(struct device_node *np, void __iomem *reg_base) +{ + rockchip_register_softrst_lut(np, + rk3576_register_offset, + ARRAY_SIZE(rk3576_register_offset), + reg_base + RK3576_SOFTRST_CON(0), + ROCKCHIP_SOFTRST_HIWORD_MASK); +} diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 3056944a5a545c..f1ba48758c78c5 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o +obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index e44b172d72556b..abd49edcf707aa 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -292,7 +292,7 @@ static struct platform_driver exynos_audss_clk_driver = { .pm = &exynos_audss_clk_pm_ops, }, .probe = exynos_audss_clk_probe, - .remove_new = exynos_audss_clk_remove, + .remove = exynos_audss_clk_remove, }; module_platform_driver(exynos_audss_clk_driver); diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 89cf2000884fca..2ef5748c139b37 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -241,7 +241,7 @@ static struct platform_driver exynos_clkout_driver = { .pm = &exynos_clkout_pm_ops, }, .probe = exynos_clkout_probe, - .remove_new = exynos_clkout_remove, + .remove = exynos_clkout_remove, }; module_platform_driver(exynos_clkout_driver); diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c index f7d7427a558ba0..fc42251731edb6 100644 --- a/drivers/clk/samsung/clk-exynos7885.c +++ b/drivers/clk/samsung/clk-exynos7885.c @@ -17,10 +17,10 @@ #include "clk-exynos-arm64.h" /* NOTE: Must be equal to the last clock ID increased by one */ -#define CLKS_NR_TOP (CLK_GOUT_FSYS_USB30DRD + 1) +#define CLKS_NR_TOP (CLK_MOUT_SHARED1_PLL + 1) #define CLKS_NR_CORE (CLK_GOUT_TREX_P_CORE_PCLK_P_CORE + 1) #define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1) -#define CLKS_NR_FSYS (CLK_GOUT_MMC_SDIO_SDCLKIN + 1) +#define CLKS_NR_FSYS (CLK_FSYS_USB30DRD_REF_CLK + 1) /* ---- CMU_TOP ------------------------------------------------------------- */ @@ -162,6 +162,10 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = { NULL), }; +/* List of parent clocks for Muxes in CMU_TOP */ +PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" }; +PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" }; + /* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */ PNAME(mout_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2", "dout_shared0_div3", "dout_shared0_div3" }; @@ -189,6 +193,12 @@ PNAME(mout_fsys_mmc_sdio_p) = { "dout_shared0_div2", "dout_shared1_div2" }; PNAME(mout_fsys_usb30drd_p) = { "dout_shared0_div4", "dout_shared1_div4" }; static const struct samsung_mux_clock top_mux_clks[] __initconst = { + /* TOP */ + MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p, + PLL_CON0_PLL_SHARED0, 4, 1), + MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p, + PLL_CON0_PLL_SHARED1, 4, 1), + /* CORE */ MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2), @@ -232,17 +242,17 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = { static const struct samsung_div_clock top_div_clks[] __initconst = { /* TOP */ - DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "fout_shared0_pll", + DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1), - DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll", + DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2), DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1), - DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll", + DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "mout_shared0_pll", CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3), - DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "fout_shared1_pll", + DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1), - DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll", + DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2), DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1), @@ -676,30 +686,56 @@ static const struct samsung_cmu_info core_cmu_info __initconst = { /* ---- CMU_FSYS ------------------------------------------------------------ */ /* Register Offset definitions for CMU_FSYS (0x13400000) */ -#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100 -#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120 -#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140 -#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160 -#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180 -#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030 -#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034 -#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038 -#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c -#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040 -#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044 +#define PLL_LOCKTIME_PLL_USB 0x0000 +#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100 +#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120 +#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140 +#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160 +#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180 +#define PLL_CON0_PLL_USB 0x01a0 +#define CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE 0x200c +#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030 +#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034 +#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038 +#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c +#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040 +#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044 +#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL 0x2068 +#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0 0x206c +#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1 0x2070 +#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY 0x2074 +#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK 0x2078 static const unsigned long fsys_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_USB, PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER, PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER, PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER, + PLL_CON0_PLL_USB, + CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE, CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN, CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK, CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN, CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK, CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN, + CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL, + CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0, + CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1, + CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY, + CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK, +}; + +static const struct samsung_pll_rate_table pll_usb_rate_table[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 50000000U, 400, 13, 4), +}; + +static const struct samsung_pll_clock fsys_pll_clks[] __initconst = { + PLL(pll_1418x, CLK_FOUT_USB_PLL, "fout_usb_pll", "oscclk", + PLL_LOCKTIME_PLL_USB, PLL_CON0_PLL_USB, + pll_usb_rate_table), }; /* List of parent clocks for Muxes in CMU_FSYS */ @@ -708,6 +744,7 @@ PNAME(mout_fsys_mmc_card_user_p) = { "oscclk", "dout_fsys_mmc_card" }; PNAME(mout_fsys_mmc_embd_user_p) = { "oscclk", "dout_fsys_mmc_embd" }; PNAME(mout_fsys_mmc_sdio_user_p) = { "oscclk", "dout_fsys_mmc_sdio" }; PNAME(mout_fsys_usb30drd_user_p) = { "oscclk", "dout_fsys_usb30drd" }; +PNAME(mout_usb_pll_p) = { "oscclk", "fout_usb_pll" }; static const struct samsung_mux_clock fsys_mux_clks[] __initconst = { MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user", mout_fsys_bus_user_p, @@ -721,12 +758,16 @@ static const struct samsung_mux_clock fsys_mux_clks[] __initconst = { MUX_F(CLK_MOUT_FSYS_MMC_SDIO_USER, "mout_fsys_mmc_sdio_user", mout_fsys_mmc_sdio_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER, 4, 1, CLK_SET_RATE_PARENT, 0), - MUX_F(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user", + MUX(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user", mout_fsys_usb30drd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER, - 4, 1, CLK_SET_RATE_PARENT, 0), + 4, 1), + nMUX_F(CLK_MOUT_USB_PLL, "mout_usb_pll", mout_usb_pll_p, + PLL_CON0_PLL_USB, 4, 1, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_gate_clock fsys_gate_clks[] __initconst = { + GATE(CLK_FSYS_USB20PHY_CLKCORE, "clk_fsys_usb20phy_clkcore", "mout_usb_pll", + CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE, 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, 21, 0, 0), GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin", @@ -742,9 +783,21 @@ static const struct samsung_gate_clock fsys_gate_clks[] __initconst = { GATE(CLK_GOUT_MMC_SDIO_SDCLKIN, "gout_mmc_sdio_sdclkin", "mout_fsys_mmc_sdio_user", CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN, 21, CLK_SET_RATE_PARENT, 0), + GATE(CLK_FSYS_USB30DRD_ACLK_20PHYCTRL, "clk_fsys_usb30drd_aclk_20phyctrl", + "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL, 21, 0, 0), + GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0, "clk_fsys_usb30drd_aclk_30phyctrl_0", + "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0, 21, 0, 0), + GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1, "clk_fsys_usb30drd_aclk_30phyctrl_1", + "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1, 21, 0, 0), + GATE(CLK_FSYS_USB30DRD_BUS_CLK_EARLY, "clk_fsys_usb30drd_bus_clk_early", + "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY, 21, 0, 0), + GATE(CLK_FSYS_USB30DRD_REF_CLK, "clk_fsys_usb30drd_ref_clk", "mout_fsys_usb30drd_user", + CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK, 21, 0, 0), }; static const struct samsung_cmu_info fsys_cmu_info __initconst = { + .pll_clks = fsys_pll_clks, + .nr_pll_clks = ARRAY_SIZE(fsys_pll_clks), .mux_clks = fsys_mux_clks, .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks), .gate_clks = fsys_gate_clks, diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c index 6215471c4ac6df..e00e213b1201c8 100644 --- a/drivers/clk/samsung/clk-exynos850.c +++ b/drivers/clk/samsung/clk-exynos850.c @@ -28,7 +28,7 @@ #define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1) #define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1) #define CLKS_NR_MFCMSCL (CLK_GOUT_MFCMSCL_SYSREG_PCLK + 1) -#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1) +#define CLKS_NR_PERI (CLK_GOUT_BUSIF_TMU_PCLK + 1) #define CLKS_NR_CORE (CLK_GOUT_SPDMA_CORE_ACLK + 1) #define CLKS_NR_DPU (CLK_GOUT_DPU_SYSREG_PCLK + 1) @@ -1921,6 +1921,7 @@ static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = { #define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c #define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010 #define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014 +#define CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK 0x2018 #define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020 #define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024 #define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028 @@ -1957,6 +1958,7 @@ static const unsigned long peri_clk_regs[] __initconst = { CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0, CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1, CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2, + CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK, CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK, CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, @@ -2068,6 +2070,9 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = { GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk", "mout_peri_bus_user", CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_BUSIF_TMU_PCLK, "gout_busif_tmu_pclk", + "mout_peri_bus_user", + CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK, 21, 0, 0), }; static const struct samsung_cmu_info peri_cmu_info __initconst = { diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c index f04bacacab2cb8..5971e680e56633 100644 --- a/drivers/clk/samsung/clk-exynosautov9.c +++ b/drivers/clk/samsung/clk-exynosautov9.c @@ -20,6 +20,7 @@ #define CLKS_NR_TOP (GOUT_CLKCMU_PERIS_BUS + 1) #define CLKS_NR_BUSMC (CLK_GOUT_BUSMC_SPDMA_PCLK + 1) #define CLKS_NR_CORE (CLK_GOUT_CORE_CMU_CORE_PCLK + 1) +#define CLKS_NR_DPUM (CLK_GOUT_DPUM_SYSMMU_D3_CLK + 1) #define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK + 1) #define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_USB30_1_ACLK + 1) #define CLKS_NR_FSYS2 (CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO + 1) @@ -1076,6 +1077,85 @@ static const struct samsung_cmu_info core_cmu_info __initconst = { .clk_name = "dout_clkcmu_core_bus", }; +/* ---- CMU_DPUM ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_DPUM (0x18c00000) */ +#define PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER 0x0600 +#define CLK_CON_DIV_DIV_CLK_DPUM_BUSP 0x1800 +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON 0x202c +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA 0x2030 +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP 0x2034 +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1 0x207c +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1 0x2084 +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1 0x208c +#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1 0x2094 + +static const unsigned long dpum_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER, + CLK_CON_DIV_DIV_CLK_DPUM_BUSP, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1, + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1, +}; + +PNAME(mout_dpum_bus_user_p) = { "oscclk", "dout_clkcmu_dpum_bus" }; + +static const struct samsung_mux_clock dpum_mux_clks[] __initconst = { + MUX(CLK_MOUT_DPUM_BUS_USER, "mout_dpum_bus_user", + mout_dpum_bus_user_p, PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER, 4, 1), +}; + +static const struct samsung_div_clock dpum_div_clks[] __initconst = { + DIV(CLK_DOUT_DPUM_BUSP, "dout_dpum_busp", "mout_dpum_bus_user", + CLK_CON_DIV_DIV_CLK_DPUM_BUSP, 0, 3), +}; + +static const struct samsung_gate_clock dpum_gate_clks[] __initconst = { + GATE(CLK_GOUT_DPUM_ACLK_DECON, "gout_dpum_decon_aclk", + "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_ACLK_DMA, "gout_dpum_dma_aclk", "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_ACLK_DPP, "gout_dpum_dpp_aclk", "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_SYSMMU_D0_CLK, "gout_dpum_sysmmu_d0_clk", + "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_SYSMMU_D1_CLK, "gout_dpum_sysmmu_d1_clk", + "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_SYSMMU_D2_CLK, "gout_dpum_sysmmu_d2_clk", + "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1, 21, + 0, 0), + GATE(CLK_GOUT_DPUM_SYSMMU_D3_CLK, "gout_dpum_sysmmu_d3_clk", + "mout_dpum_bus_user", + CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1, 21, + 0, 0), +}; + +static const struct samsung_cmu_info dpum_cmu_info __initconst = { + .mux_clks = dpum_mux_clks, + .nr_mux_clks = ARRAY_SIZE(dpum_mux_clks), + .div_clks = dpum_div_clks, + .nr_div_clks = ARRAY_SIZE(dpum_div_clks), + .gate_clks = dpum_gate_clks, + .nr_gate_clks = ARRAY_SIZE(dpum_gate_clks), + .nr_clk_ids = CLKS_NR_DPUM, + .clk_regs = dpum_clk_regs, + .nr_clk_regs = ARRAY_SIZE(dpum_clk_regs), + .clk_name = "bus", +}; + /* ---- CMU_FSYS0 ---------------------------------------------------------- */ /* Register Offset definitions for CMU_FSYS2 (0x17700000) */ @@ -2085,6 +2165,9 @@ static const struct of_device_id exynosautov9_cmu_of_match[] = { }, { .compatible = "samsung,exynosautov9-cmu-core", .data = &core_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-dpum", + .data = &dpum_cmu_info, }, { .compatible = "samsung,exynosautov9-cmu-fsys0", .data = &fsys0_cmu_info, diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c new file mode 100644 index 00000000000000..7ba9748c0526af --- /dev/null +++ b/drivers/clk/samsung/clk-exynosautov920.c @@ -0,0 +1,1173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Samsung Electronics Co., Ltd. + * Author: Sunyeal Hong + * + * Common Clock Framework support for ExynosAuto v920 SoC. + */ + +#include +#include +#include +#include + +#include + +#include "clk.h" +#include "clk-exynos-arm64.h" + +/* NOTE: Must be equal to the last clock ID increased by one */ +#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1) +#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1) + +/* ---- CMU_TOP ------------------------------------------------------------ */ + +/* Register Offset definitions for CMU_TOP (0x11000000) */ +#define PLL_LOCKTIME_PLL_MMC 0x0004 +#define PLL_LOCKTIME_PLL_SHARED0 0x0008 +#define PLL_LOCKTIME_PLL_SHARED1 0x000c +#define PLL_LOCKTIME_PLL_SHARED2 0x0010 +#define PLL_LOCKTIME_PLL_SHARED3 0x0014 +#define PLL_LOCKTIME_PLL_SHARED4 0x0018 +#define PLL_LOCKTIME_PLL_SHARED5 0x0018 +#define PLL_CON0_PLL_MMC 0x0140 +#define PLL_CON3_PLL_MMC 0x014c +#define PLL_CON0_PLL_SHARED0 0x0180 +#define PLL_CON3_PLL_SHARED0 0x018c +#define PLL_CON0_PLL_SHARED1 0x01c0 +#define PLL_CON3_PLL_SHARED1 0x01cc +#define PLL_CON0_PLL_SHARED2 0x0200 +#define PLL_CON3_PLL_SHARED2 0x020c +#define PLL_CON0_PLL_SHARED3 0x0240 +#define PLL_CON3_PLL_SHARED3 0x024c +#define PLL_CON0_PLL_SHARED4 0x0280 +#define PLL_CON3_PLL_SHARED4 0x028c +#define PLL_CON0_PLL_SHARED5 0x02c0 +#define PLL_CON3_PLL_SHARED5 0x02cc + +/* MUX */ +#define CLK_CON_MUX_MUX_CLKCMU_ACC_NOC 0x1000 +#define CLK_CON_MUX_MUX_CLKCMU_APM_NOC 0x1004 +#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008 +#define CLK_CON_MUX_MUX_CLKCMU_AUD_NOC 0x100c +#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0 0x1010 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1 0x1014 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2 0x1018 +#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3 0x101c +#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1020 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER 0x1024 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1028 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x102c +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER 0x1030 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1034 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER 0x1038 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x103c +#define CLK_CON_MUX_MUX_CLKCMU_DNC_NOC 0x1040 +#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC 0x1044 +#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC 0x1048 +#define CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC 0x104c +#define CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM 0x1050 +#define CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC 0x1054 +#define CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC 0x1058 +#define CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC 0x105c +#define CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC 0x1060 +#define CLK_CON_MUX_MUX_CLKCMU_DSP_NOC 0x1064 +#define CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP 0x1068 +#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x106c +#define CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC 0x1070 +#define CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC 0x1074 +#define CLK_CON_MUX_MUX_CLKCMU_ACC_ORB 0x1078 +#define CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA 0x107c +#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x1080 +#define CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC 0x1084 +#define CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD 0x1088 +#define CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET 0x108c +#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC 0x1090 +#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS 0x1094 +#define CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD 0x1098 +#define CLK_CON_MUX_MUX_CLKCMU_ISP_NOC 0x109c +#define CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG 0x10a0 +#define CLK_CON_MUX_MUX_CLKCMU_M2M_NOC 0x10a4 +#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x10a8 +#define CLK_CON_MUX_MUX_CLKCMU_MFC_WFD 0x10ac +#define CLK_CON_MUX_MUX_CLKCMU_MFD_NOC 0x10b0 +#define CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP 0x10b4 +#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10b8 +#define CLK_CON_MUX_MUX_CLKCMU_MISC_NOC 0x10bc +#define CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC 0x10c0 +#define CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC 0x10c4 +#define CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC 0x10c8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10cc +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC 0x10d0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC 0x10d8 +#define CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC 0x10dc +#define CLK_CON_MUX_MUX_CLKCMU_SNW_NOC 0x10e0 +#define CLK_CON_MUX_MUX_CLKCMU_SSP_NOC 0x10e4 +#define CLK_CON_MUX_MUX_CLKCMU_TAA_NOC 0x10e8 +#define CLK_CON_MUX_MUX_CLK_CMU_NOCP 0x10ec +#define CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT 0x10f0 +#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4 + +/* DIV */ +#define CLK_CON_DIV_CLKCMU_ACC_NOC 0x1800 +#define CLK_CON_DIV_CLKCMU_APM_NOC 0x1804 +#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1808 +#define CLK_CON_DIV_CLKCMU_AUD_NOC 0x180c +#define CLK_CON_DIV_CLKCMU_CIS_MCLK0 0x1810 +#define CLK_CON_DIV_CLKCMU_CIS_MCLK1 0x1814 +#define CLK_CON_DIV_CLKCMU_CIS_MCLK2 0x1818 +#define CLK_CON_DIV_CLKCMU_CIS_MCLK3 0x181c +#define CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER 0x1820 +#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1824 +#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1828 +#define CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER 0x182c +#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x1830 +#define CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER 0x1834 +#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1838 +#define CLK_CON_DIV_CLKCMU_DNC_NOC 0x183c +#define CLK_CON_DIV_CLKCMU_DPTX_DPGTC 0x1840 +#define CLK_CON_DIV_CLKCMU_DPTX_DPOSC 0x1844 +#define CLK_CON_DIV_CLKCMU_DPTX_NOC 0x1848 +#define CLK_CON_DIV_CLKCMU_DPUB_DSIM 0x184c +#define CLK_CON_DIV_CLKCMU_DPUB_NOC 0x1850 +#define CLK_CON_DIV_CLKCMU_DPUF0_NOC 0x1854 +#define CLK_CON_DIV_CLKCMU_DPUF1_NOC 0x1858 +#define CLK_CON_DIV_CLKCMU_DPUF2_NOC 0x185c +#define CLK_CON_DIV_CLKCMU_DSP_NOC 0x1860 +#define CLK_CON_DIV_CLKCMU_G3D_NOCP 0x1864 +#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868 +#define CLK_CON_DIV_CLKCMU_GNPU_NOC 0x186c +#define CLK_CON_DIV_CLKCMU_HSI0_NOC 0x1870 +#define CLK_CON_DIV_CLKCMU_ACC_ORB 0x1874 +#define CLK_CON_DIV_CLKCMU_GNPU_XMAA 0x1878 +#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x187c +#define CLK_CON_DIV_CLKCMU_HSI1_NOC 0x1880 +#define CLK_CON_DIV_CLKCMU_HSI1_USBDRD 0x1884 +#define CLK_CON_DIV_CLKCMU_HSI2_ETHERNET 0x1888 +#define CLK_CON_DIV_CLKCMU_HSI2_NOC 0x188c +#define CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS 0x1890 +#define CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD 0x1894 +#define CLK_CON_DIV_CLKCMU_ISP_NOC 0x1898 +#define CLK_CON_DIV_CLKCMU_M2M_JPEG 0x189c +#define CLK_CON_DIV_CLKCMU_M2M_NOC 0x18a0 +#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x18a4 +#define CLK_CON_DIV_CLKCMU_MFC_WFD 0x18a8 +#define CLK_CON_DIV_CLKCMU_MFD_NOC 0x18ac +#define CLK_CON_DIV_CLKCMU_MIF_NOCP 0x18b0 +#define CLK_CON_DIV_CLKCMU_MISC_NOC 0x18b4 +#define CLK_CON_DIV_CLKCMU_NOCL0_NOC 0x18b8 +#define CLK_CON_DIV_CLKCMU_NOCL1_NOC 0x18bc +#define CLK_CON_DIV_CLKCMU_NOCL2_NOC 0x18c0 +#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c4 +#define CLK_CON_DIV_CLKCMU_PERIC0_NOC 0x18c8 +#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18cc +#define CLK_CON_DIV_CLKCMU_PERIC1_NOC 0x18d0 +#define CLK_CON_DIV_CLKCMU_SDMA_NOC 0x18d4 +#define CLK_CON_DIV_CLKCMU_SNW_NOC 0x18d8 +#define CLK_CON_DIV_CLKCMU_SSP_NOC 0x18dc +#define CLK_CON_DIV_CLKCMU_TAA_NOC 0x18e0 +#define CLK_CON_DIV_CLK_ADD_CH_CLK 0x18e4 +#define CLK_CON_DIV_CLK_CMU_PLLCLKOUT 0x18e8 +#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18ec +#define CLK_CON_DIV_DIV_CLK_CMU_NOCP 0x18f0 + +static const unsigned long top_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_MMC, + PLL_LOCKTIME_PLL_SHARED0, + PLL_LOCKTIME_PLL_SHARED1, + PLL_LOCKTIME_PLL_SHARED2, + PLL_LOCKTIME_PLL_SHARED3, + PLL_LOCKTIME_PLL_SHARED4, + PLL_LOCKTIME_PLL_SHARED5, + PLL_CON0_PLL_MMC, + PLL_CON3_PLL_MMC, + PLL_CON0_PLL_SHARED0, + PLL_CON3_PLL_SHARED0, + PLL_CON0_PLL_SHARED1, + PLL_CON3_PLL_SHARED1, + PLL_CON0_PLL_SHARED2, + PLL_CON3_PLL_SHARED2, + PLL_CON0_PLL_SHARED3, + PLL_CON3_PLL_SHARED3, + PLL_CON0_PLL_SHARED4, + PLL_CON3_PLL_SHARED4, + PLL_CON0_PLL_SHARED5, + PLL_CON3_PLL_SHARED5, + CLK_CON_MUX_MUX_CLKCMU_ACC_NOC, + CLK_CON_MUX_MUX_CLKCMU_APM_NOC, + CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, + CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, + CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0, + CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1, + CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2, + CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3, + CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER, + CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, + CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, + CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC, + CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC, + CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, + CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC, + CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC, + CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC, + CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC, + CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, + CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, + CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, + CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, + CLK_CON_MUX_MUX_CLKCMU_ACC_ORB, + CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA, + CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD, + CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC, + CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD, + CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET, + CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC, + CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS, + CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD, + CLK_CON_MUX_MUX_CLKCMU_ISP_NOC, + CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG, + CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, + CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, + CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, + CLK_CON_MUX_MUX_CLKCMU_MFD_NOC, + CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, + CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_MISC_NOC, + CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, + CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC, + CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC, + CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, + CLK_CON_MUX_MUX_CLKCMU_SNW_NOC, + CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, + CLK_CON_MUX_MUX_CLKCMU_TAA_NOC, + CLK_CON_MUX_MUX_CLK_CMU_NOCP, + CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT, + CLK_CON_MUX_MUX_CMU_CMUREF, + CLK_CON_DIV_CLKCMU_ACC_NOC, + CLK_CON_DIV_CLKCMU_APM_NOC, + CLK_CON_DIV_CLKCMU_AUD_CPU, + CLK_CON_DIV_CLKCMU_AUD_NOC, + CLK_CON_DIV_CLKCMU_CIS_MCLK0, + CLK_CON_DIV_CLKCMU_CIS_MCLK1, + CLK_CON_DIV_CLKCMU_CIS_MCLK2, + CLK_CON_DIV_CLKCMU_CIS_MCLK3, + CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, + CLK_CON_DIV_CLKCMU_CPUCL0_DBG, + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, + CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, + CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER, + CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, + CLK_CON_DIV_CLKCMU_DNC_NOC, + CLK_CON_DIV_CLKCMU_DPTX_DPGTC, + CLK_CON_DIV_CLKCMU_DPTX_DPOSC, + CLK_CON_DIV_CLKCMU_DPTX_NOC, + CLK_CON_DIV_CLKCMU_DPUB_DSIM, + CLK_CON_DIV_CLKCMU_DPUB_NOC, + CLK_CON_DIV_CLKCMU_DPUF0_NOC, + CLK_CON_DIV_CLKCMU_DPUF1_NOC, + CLK_CON_DIV_CLKCMU_DPUF2_NOC, + CLK_CON_DIV_CLKCMU_DSP_NOC, + CLK_CON_DIV_CLKCMU_G3D_NOCP, + CLK_CON_DIV_CLKCMU_G3D_SWITCH, + CLK_CON_DIV_CLKCMU_GNPU_NOC, + CLK_CON_DIV_CLKCMU_HSI0_NOC, + CLK_CON_DIV_CLKCMU_ACC_ORB, + CLK_CON_DIV_CLKCMU_GNPU_XMAA, + CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD, + CLK_CON_DIV_CLKCMU_HSI1_NOC, + CLK_CON_DIV_CLKCMU_HSI1_USBDRD, + CLK_CON_DIV_CLKCMU_HSI2_ETHERNET, + CLK_CON_DIV_CLKCMU_HSI2_NOC, + CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS, + CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD, + CLK_CON_DIV_CLKCMU_ISP_NOC, + CLK_CON_DIV_CLKCMU_M2M_JPEG, + CLK_CON_DIV_CLKCMU_M2M_NOC, + CLK_CON_DIV_CLKCMU_MFC_MFC, + CLK_CON_DIV_CLKCMU_MFC_WFD, + CLK_CON_DIV_CLKCMU_MFD_NOC, + CLK_CON_DIV_CLKCMU_MIF_NOCP, + CLK_CON_DIV_CLKCMU_MISC_NOC, + CLK_CON_DIV_CLKCMU_NOCL0_NOC, + CLK_CON_DIV_CLKCMU_NOCL1_NOC, + CLK_CON_DIV_CLKCMU_NOCL2_NOC, + CLK_CON_DIV_CLKCMU_PERIC0_IP, + CLK_CON_DIV_CLKCMU_PERIC0_NOC, + CLK_CON_DIV_CLKCMU_PERIC1_IP, + CLK_CON_DIV_CLKCMU_PERIC1_NOC, + CLK_CON_DIV_CLKCMU_SDMA_NOC, + CLK_CON_DIV_CLKCMU_SNW_NOC, + CLK_CON_DIV_CLKCMU_SSP_NOC, + CLK_CON_DIV_CLKCMU_TAA_NOC, + CLK_CON_DIV_CLK_ADD_CH_CLK, + CLK_CON_DIV_CLK_CMU_PLLCLKOUT, + CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, + CLK_CON_DIV_DIV_CLK_CMU_NOCP, +}; + +static const struct samsung_pll_clock top_pll_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + PLL(pll_531x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL), + PLL(pll_531x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL), + PLL(pll_531x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL), + PLL(pll_531x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL), + PLL(pll_531x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL), + PLL(pll_531x, FOUT_SHARED5_PLL, "fout_shared5_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED5, PLL_CON3_PLL_SHARED5, NULL), + PLL(pll_531x, FOUT_MMC_PLL, "fout_mmc_pll", "oscclk", + PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL), +}; + +/* List of parent clocks for Muxes in CMU_TOP */ +PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" }; +PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" }; +PNAME(mout_shared2_pll_p) = { "oscclk", "fout_shared2_pll" }; +PNAME(mout_shared3_pll_p) = { "oscclk", "fout_shared3_pll" }; +PNAME(mout_shared4_pll_p) = { "oscclk", "fout_shared4_pll" }; +PNAME(mout_shared5_pll_p) = { "oscclk", "fout_shared5_pll" }; +PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" }; + +PNAME(mout_clkcmu_cmu_boost_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_cmu_cmuref_p) = { "oscclk", "dout_cmu_boost" }; + +PNAME(mout_clkcmu_acc_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared5_div1", + "dout_shared3_div1", "oscclk" }; + +PNAME(mout_clkcmu_acc_orb_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared1_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_apm_noc_p) = { "dout_shared2_div2", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_aud_cpu_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared4_div3" }; + +PNAME(mout_clkcmu_aud_noc_p) = { "dout_shared2_div2", "dout_shared4_div2", + "dout_shared1_div2", "dout_shared2_div3" }; + +PNAME(mout_clkcmu_cpucl0_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; + +PNAME(mout_clkcmu_cpucl0_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2", + "dout_shared2_div3", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_cpucl0_dbg_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared0_div4" }; + +PNAME(mout_clkcmu_cpucl1_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; + +PNAME(mout_clkcmu_cpucl1_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2", + "dout_shared2_div3", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_cpucl2_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; + +PNAME(mout_clkcmu_cpucl2_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2", + "dout_shared2_div3", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_dnc_noc_p) = { "dout_shared1_div2", "dout_shared2_div2", + "dout_shared0_div3", "dout_shared4_div2", + "dout_shared1_div3", "dout_shared2_div3", + "dout_shared1_div4", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_dptx_noc_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_dptx_dpgtc_p) = { "oscclk", "dout_shared2_div3", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_dptx_dposc_p) = { "oscclk", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_dpub_noc_p) = { "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4", + "fout_shared3_pll" }; + +PNAME(mout_clkcmu_dpub_dsim_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_dpuf_noc_p) = { "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4", + "fout_shared3_pll" }; + +PNAME(mout_clkcmu_dsp_noc_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "fout_shared5_pll", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; + +PNAME(mout_clkcmu_g3d_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_gnpu_noc_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared2_div3", + "fout_shared5_pll", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_hsi0_noc_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_hsi1_noc_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_hsi1_usbdrd_p) = { "oscclk", "dout_shared2_div3", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_hsi1_mmc_card_p) = { "oscclk", "dout_shared2_div2", + "dout_shared4_div2", "fout_mmc_pll" }; + +PNAME(mout_clkcmu_hsi2_noc_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_hsi2_noc_ufs_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div2" }; + +PNAME(mout_clkcmu_hsi2_ufs_embd_p) = { "oscclk", "dout_shared2_div3", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_hsi2_ethernet_p) = { "oscclk", "dout_shared2_div2", + "dout_shared0_div3", "dout_shared1_div3" }; + +PNAME(mout_clkcmu_isp_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_m2m_noc_p) = { "dout_shared0_div3", "dout_shared4_div2", + "dout_shared2_div3", "dout_shared1_div4" }; + +PNAME(mout_clkcmu_m2m_jpeg_p) = { "dout_shared0_div3", "dout_shared4_div2", + "dout_shared2_div3", "dout_shared1_div4" }; + +PNAME(mout_clkcmu_mfc_mfc_p) = { "dout_shared0_div3", "dout_shared4_div2", + "dout_shared2_div3", "dout_shared1_div4" }; + +PNAME(mout_clkcmu_mfc_wfd_p) = { "dout_shared0_div3", "dout_shared4_div2", + "dout_shared2_div3", "dout_shared1_div4" }; + +PNAME(mout_clkcmu_mfd_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll", + "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "fout_shared5_pll" }; + +PNAME(mout_clkcmu_mif_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_misc_noc_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_nocl0_noc_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_nocl1_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_nocl2_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_peric0_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_peric0_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_peric1_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_peric1_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +PNAME(mout_clkcmu_sdma_noc_p) = { "dout_shared1_div2", "dout_shared2_div2", + "dout_shared0_div3", "dout_shared4_div2", + "dout_shared1_div3", "dout_shared2_div3", + "dout_shared1_div4", "fout_shared3_pll" }; + +PNAME(mout_clkcmu_snw_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +PNAME(mout_clkcmu_ssp_noc_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div2", "dout_shared4_div4" }; + +PNAME(mout_clkcmu_taa_noc_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared5_pll", + "fout_shared3_pll", "oscclk" }; + +static const struct samsung_mux_clock top_mux_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + MUX(MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p, + PLL_CON0_PLL_SHARED0, 4, 1), + MUX(MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p, + PLL_CON0_PLL_SHARED1, 4, 1), + MUX(MOUT_SHARED2_PLL, "mout_shared2_pll", mout_shared2_pll_p, + PLL_CON0_PLL_SHARED2, 4, 1), + MUX(MOUT_SHARED3_PLL, "mout_shared3_pll", mout_shared3_pll_p, + PLL_CON0_PLL_SHARED3, 4, 1), + MUX(MOUT_SHARED4_PLL, "mout_shared4_pll", mout_shared4_pll_p, + PLL_CON0_PLL_SHARED4, 4, 1), + MUX(MOUT_SHARED5_PLL, "mout_shared5_pll", mout_shared5_pll_p, + PLL_CON0_PLL_SHARED5, 4, 1), + MUX(MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p, + PLL_CON0_PLL_MMC, 4, 1), + + /* BOOST */ + MUX(MOUT_CLKCMU_CMU_BOOST, "mout_clkcmu_cmu_boost", + mout_clkcmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2), + MUX(MOUT_CLKCMU_CMU_CMUREF, "mout_clkcmu_cmu_cmuref", + mout_clkcmu_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1), + + /* ACC */ + MUX(MOUT_CLKCMU_ACC_NOC, "mout_clkcmu_acc_noc", + mout_clkcmu_acc_noc_p, CLK_CON_MUX_MUX_CLKCMU_ACC_NOC, 0, 3), + MUX(MOUT_CLKCMU_ACC_ORB, "mout_clkcmu_acc_orb", + mout_clkcmu_acc_orb_p, CLK_CON_MUX_MUX_CLKCMU_ACC_ORB, 0, 3), + + /* APM */ + MUX(MOUT_CLKCMU_APM_NOC, "mout_clkcmu_apm_noc", + mout_clkcmu_apm_noc_p, CLK_CON_MUX_MUX_CLKCMU_APM_NOC, 0, 2), + + /* AUD */ + MUX(MOUT_CLKCMU_AUD_CPU, "mout_clkcmu_aud_cpu", + mout_clkcmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3), + MUX(MOUT_CLKCMU_AUD_NOC, "mout_clkcmu_aud_noc", + mout_clkcmu_aud_noc_p, CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, 0, 2), + + /* CPUCL0 */ + MUX(MOUT_CLKCMU_CPUCL0_SWITCH, "mout_clkcmu_cpucl0_switch", + mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_CPUCL0_CLUSTER, "mout_clkcmu_cpucl0_cluster", + mout_clkcmu_cpucl0_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER, + 0, 3), + MUX(MOUT_CLKCMU_CPUCL0_DBG, "mout_clkcmu_cpucl0_dbg", + mout_clkcmu_cpucl0_dbg_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, + 0, 2), + + /* CPUCL1 */ + MUX(MOUT_CLKCMU_CPUCL1_SWITCH, "mout_clkcmu_cpucl1_switch", + mout_clkcmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_CPUCL1_CLUSTER, "mout_clkcmu_cpucl1_cluster", + mout_clkcmu_cpucl1_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER, + 0, 3), + + /* CPUCL2 */ + MUX(MOUT_CLKCMU_CPUCL2_SWITCH, "mout_clkcmu_cpucl2_switch", + mout_clkcmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_CPUCL2_CLUSTER, "mout_clkcmu_cpucl2_cluster", + mout_clkcmu_cpucl2_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER, + 0, 3), + + /* DNC */ + MUX(MOUT_CLKCMU_DNC_NOC, "mout_clkcmu_dnc_noc", + mout_clkcmu_dnc_noc_p, CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, 0, 3), + + /* DPTX */ + MUX(MOUT_CLKCMU_DPTX_NOC, "mout_clkcmu_dptx_noc", + mout_clkcmu_dptx_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC, 0, 2), + MUX(MOUT_CLKCMU_DPTX_DPGTC, "mout_clkcmu_dptx_dpgtc", + mout_clkcmu_dptx_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, 0, 2), + MUX(MOUT_CLKCMU_DPTX_DPOSC, "mout_clkcmu_dptx_dposc", + mout_clkcmu_dptx_dposc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC, 0, 1), + + /* DPUB */ + MUX(MOUT_CLKCMU_DPUB_NOC, "mout_clkcmu_dpub_noc", + mout_clkcmu_dpub_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC, 0, 3), + MUX(MOUT_CLKCMU_DPUB_DSIM, "mout_clkcmu_dpub_dsim", + mout_clkcmu_dpub_dsim_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, 0, 1), + + /* DPUF */ + MUX(MOUT_CLKCMU_DPUF0_NOC, "mout_clkcmu_dpuf0_noc", + mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC, 0, 3), + MUX(MOUT_CLKCMU_DPUF1_NOC, "mout_clkcmu_dpuf1_noc", + mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC, 0, 3), + MUX(MOUT_CLKCMU_DPUF2_NOC, "mout_clkcmu_dpuf2_noc", + mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC, 0, 3), + + /* DSP */ + MUX(MOUT_CLKCMU_DSP_NOC, "mout_clkcmu_dsp_noc", + mout_clkcmu_dsp_noc_p, CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, 0, 3), + + /* G3D */ + MUX(MOUT_CLKCMU_G3D_SWITCH, "mout_clkcmu_g3d_switch", + mout_clkcmu_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 2), + MUX(MOUT_CLKCMU_G3D_NOCP, "mout_clkcmu_g3d_nocp", + mout_clkcmu_g3d_nocp_p, CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, 0, 2), + + /* GNPU */ + MUX(MOUT_CLKCMU_GNPU_NOC, "mout_clkcmu_gnpu_noc", + mout_clkcmu_gnpu_noc_p, CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, 0, 3), + + /* HSI0 */ + MUX(MOUT_CLKCMU_HSI0_NOC, "mout_clkcmu_hsi0_noc", + mout_clkcmu_hsi0_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, 0, 2), + + /* HSI1 */ + MUX(MOUT_CLKCMU_HSI1_NOC, "mout_clkcmu_hsi1_noc", + mout_clkcmu_hsi1_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC, + 0, 2), + MUX(MOUT_CLKCMU_HSI1_USBDRD, "mout_clkcmu_hsi1_usbdrd", + mout_clkcmu_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD, + 0, 2), + MUX(MOUT_CLKCMU_HSI1_MMC_CARD, "mout_clkcmu_hsi1_mmc_card", + mout_clkcmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD, + 0, 2), + + /* HSI2 */ + MUX(MOUT_CLKCMU_HSI2_NOC, "mout_clkcmu_hsi2_noc", + mout_clkcmu_hsi2_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC, + 0, 2), + MUX(MOUT_CLKCMU_HSI2_NOC_UFS, "mout_clkcmu_hsi2_noc_ufs", + mout_clkcmu_hsi2_noc_ufs_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS, + 0, 2), + MUX(MOUT_CLKCMU_HSI2_UFS_EMBD, "mout_clkcmu_hsi2_ufs_embd", + mout_clkcmu_hsi2_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD, + 0, 2), + MUX(MOUT_CLKCMU_HSI2_ETHERNET, "mout_clkcmu_hsi2_ethernet", + mout_clkcmu_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET, + 0, 2), + + /* ISP */ + MUX(MOUT_CLKCMU_ISP_NOC, "mout_clkcmu_isp_noc", + mout_clkcmu_isp_noc_p, CLK_CON_MUX_MUX_CLKCMU_ISP_NOC, 0, 3), + + /* M2M */ + MUX(MOUT_CLKCMU_M2M_NOC, "mout_clkcmu_m2m_noc", + mout_clkcmu_m2m_noc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, 0, 2), + MUX(MOUT_CLKCMU_M2M_JPEG, "mout_clkcmu_m2m_jpeg", + mout_clkcmu_m2m_jpeg_p, CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG, 0, 2), + + /* MFC */ + MUX(MOUT_CLKCMU_MFC_MFC, "mout_clkcmu_mfc_mfc", + mout_clkcmu_mfc_mfc_p, CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 2), + MUX(MOUT_CLKCMU_MFC_WFD, "mout_clkcmu_mfc_wfd", + mout_clkcmu_mfc_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, 0, 2), + + /* MFD */ + MUX(MOUT_CLKCMU_MFD_NOC, "mout_clkcmu_mfd_noc", + mout_clkcmu_mfd_noc_p, CLK_CON_MUX_MUX_CLKCMU_MFD_NOC, 0, 3), + + /* MIF */ + MUX(MOUT_CLKCMU_MIF_SWITCH, "mout_clkcmu_mif_switch", + mout_clkcmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3), + MUX(MOUT_CLKCMU_MIF_NOCP, "mout_clkcmu_mif_nocp", + mout_clkcmu_mif_nocp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, 0, 2), + + /* MISC */ + MUX(MOUT_CLKCMU_MISC_NOC, "mout_clkcmu_misc_noc", + mout_clkcmu_misc_noc_p, CLK_CON_MUX_MUX_CLKCMU_MISC_NOC, 0, 2), + + /* NOCL0 */ + MUX(MOUT_CLKCMU_NOCL0_NOC, "mout_clkcmu_nocl0_noc", + mout_clkcmu_nocl0_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, 0, 3), + + /* NOCL1 */ + MUX(MOUT_CLKCMU_NOCL1_NOC, "mout_clkcmu_nocl1_noc", + mout_clkcmu_nocl1_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC, 0, 3), + + /* NOCL2 */ + MUX(MOUT_CLKCMU_NOCL2_NOC, "mout_clkcmu_nocl2_noc", + mout_clkcmu_nocl2_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC, 0, 3), + + /* PERIC0 */ + MUX(MOUT_CLKCMU_PERIC0_NOC, "mout_clkcmu_peric0_noc", + mout_clkcmu_peric0_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC, 0, 1), + MUX(MOUT_CLKCMU_PERIC0_IP, "mout_clkcmu_peric0_ip", + mout_clkcmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1), + + /* PERIC1 */ + MUX(MOUT_CLKCMU_PERIC1_NOC, "mout_clkcmu_peric1_noc", + mout_clkcmu_peric1_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC, 0, 1), + MUX(MOUT_CLKCMU_PERIC1_IP, "mout_clkcmu_peric1_ip", + mout_clkcmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1), + + /* SDMA */ + MUX(MOUT_CLKCMU_SDMA_NOC, "mout_clkcmu_sdma_noc", + mout_clkcmu_sdma_noc_p, CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, 0, 3), + + /* SNW */ + MUX(MOUT_CLKCMU_SNW_NOC, "mout_clkcmu_snw_noc", + mout_clkcmu_snw_noc_p, CLK_CON_MUX_MUX_CLKCMU_SNW_NOC, 0, 3), + + /* SSP */ + MUX(MOUT_CLKCMU_SSP_NOC, "mout_clkcmu_ssp_noc", + mout_clkcmu_ssp_noc_p, CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, 0, 2), + + /* TAA */ + MUX(MOUT_CLKCMU_TAA_NOC, "mout_clkcmu_taa_noc", + mout_clkcmu_taa_noc_p, CLK_CON_MUX_MUX_CLKCMU_TAA_NOC, 0, 3), +}; + +static const struct samsung_div_clock top_div_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + + /* BOOST */ + DIV(DOUT_CLKCMU_CMU_BOOST, "dout_clkcmu_cmu_boost", + "mout_clkcmu_cmu_boost", CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2), + + /* ACC */ + DIV(DOUT_CLKCMU_ACC_NOC, "dout_clkcmu_acc_noc", + "mout_clkcmu_acc_noc", CLK_CON_DIV_CLKCMU_ACC_NOC, 0, 4), + DIV(DOUT_CLKCMU_ACC_ORB, "dout_clkcmu_acc_orb", + "mout_clkcmu_acc_orb", CLK_CON_DIV_CLKCMU_ACC_ORB, 0, 4), + + /* APM */ + DIV(DOUT_CLKCMU_APM_NOC, "dout_clkcmu_apm_noc", + "mout_clkcmu_apm_noc", CLK_CON_DIV_CLKCMU_APM_NOC, 0, 3), + + /* AUD */ + DIV(DOUT_CLKCMU_AUD_CPU, "dout_clkcmu_aud_cpu", + "mout_clkcmu_aud_cpu", CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3), + DIV(DOUT_CLKCMU_AUD_NOC, "dout_clkcmu_aud_noc", + "mout_clkcmu_aud_noc", CLK_CON_DIV_CLKCMU_AUD_NOC, 0, 4), + + /* CPUCL0 */ + DIV(DOUT_CLKCMU_CPUCL0_SWITCH, "dout_clkcmu_cpucl0_switch", + "mout_clkcmu_cpucl0_switch", + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3), + DIV(DOUT_CLKCMU_CPUCL0_CLUSTER, "dout_clkcmu_cpucl0_cluster", + "mout_clkcmu_cpucl0_cluster", + CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, 0, 3), + DIV(DOUT_CLKCMU_CPUCL0_DBG, "dout_clkcmu_cpucl0_dbg", + "mout_clkcmu_cpucl0_dbg", + CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 4), + + /* CPUCL1 */ + DIV(DOUT_CLKCMU_CPUCL1_SWITCH, "dout_clkcmu_cpucl1_switch", + "mout_clkcmu_cpucl1_switch", + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3), + DIV(DOUT_CLKCMU_CPUCL1_CLUSTER, "dout_clkcmu_cpucl1_cluster", + "mout_clkcmu_cpucl1_cluster", + CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, 0, 3), + + /* CPUCL2 */ + DIV(DOUT_CLKCMU_CPUCL2_SWITCH, "dout_clkcmu_cpucl2_switch", + "mout_clkcmu_cpucl2_switch", + CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3), + DIV(DOUT_CLKCMU_CPUCL2_CLUSTER, "dout_clkcmu_cpucl2_cluster", + "mout_clkcmu_cpucl2_cluster", + CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER, 0, 3), + + /* DNC */ + DIV(DOUT_CLKCMU_DNC_NOC, "dout_clkcmu_dnc_noc", + "mout_clkcmu_dnc_noc", CLK_CON_DIV_CLKCMU_DNC_NOC, 0, 4), + + /* DPTX */ + DIV(DOUT_CLKCMU_DPTX_NOC, "dout_clkcmu_dptx_noc", + "mout_clkcmu_dptx_noc", CLK_CON_DIV_CLKCMU_DPTX_NOC, 0, 4), + DIV(DOUT_CLKCMU_DPTX_DPGTC, "dout_clkcmu_dptx_dpgtc", + "mout_clkcmu_dptx_dpgtc", CLK_CON_DIV_CLKCMU_DPTX_DPGTC, 0, 3), + DIV(DOUT_CLKCMU_DPTX_DPOSC, "dout_clkcmu_dptx_dposc", + "mout_clkcmu_dptx_dposc", CLK_CON_DIV_CLKCMU_DPTX_DPOSC, 0, 5), + + /* DPUB */ + DIV(DOUT_CLKCMU_DPUB_NOC, "dout_clkcmu_dpub_noc", + "mout_clkcmu_dpub_noc", CLK_CON_DIV_CLKCMU_DPUB_NOC, 0, 4), + DIV(DOUT_CLKCMU_DPUB_DSIM, "dout_clkcmu_dpub_dsim", + "mout_clkcmu_dpub_dsim", CLK_CON_DIV_CLKCMU_DPUB_DSIM, 0, 4), + + /* DPUF */ + DIV(DOUT_CLKCMU_DPUF0_NOC, "dout_clkcmu_dpuf0_noc", + "mout_clkcmu_dpuf0_noc", CLK_CON_DIV_CLKCMU_DPUF0_NOC, 0, 4), + DIV(DOUT_CLKCMU_DPUF1_NOC, "dout_clkcmu_dpuf1_noc", + "mout_clkcmu_dpuf1_noc", CLK_CON_DIV_CLKCMU_DPUF1_NOC, 0, 4), + DIV(DOUT_CLKCMU_DPUF2_NOC, "dout_clkcmu_dpuf2_noc", + "mout_clkcmu_dpuf2_noc", CLK_CON_DIV_CLKCMU_DPUF2_NOC, 0, 4), + + /* DSP */ + DIV(DOUT_CLKCMU_DSP_NOC, "dout_clkcmu_dsp_noc", + "mout_clkcmu_dsp_noc", CLK_CON_DIV_CLKCMU_DSP_NOC, 0, 4), + + /* G3D */ + DIV(DOUT_CLKCMU_G3D_SWITCH, "dout_clkcmu_g3d_switch", + "mout_clkcmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3), + DIV(DOUT_CLKCMU_G3D_NOCP, "dout_clkcmu_g3d_nocp", + "mout_clkcmu_g3d_nocp", CLK_CON_DIV_CLKCMU_G3D_NOCP, 0, 3), + + /* GNPU */ + DIV(DOUT_CLKCMU_GNPU_NOC, "dout_clkcmu_gnpu_noc", + "mout_clkcmu_gnpu_noc", CLK_CON_DIV_CLKCMU_GNPU_NOC, 0, 4), + + /* HSI0 */ + DIV(DOUT_CLKCMU_HSI0_NOC, "dout_clkcmu_hsi0_noc", + "mout_clkcmu_hsi0_noc", CLK_CON_DIV_CLKCMU_HSI0_NOC, 0, 4), + + /* HSI1 */ + DIV(DOUT_CLKCMU_HSI1_NOC, "dout_clkcmu_hsi1_noc", + "mout_clkcmu_hsi1_noc", CLK_CON_DIV_CLKCMU_HSI1_NOC, 0, 4), + DIV(DOUT_CLKCMU_HSI1_USBDRD, "dout_clkcmu_hsi1_usbdrd", + "mout_clkcmu_hsi1_usbdrd", CLK_CON_DIV_CLKCMU_HSI1_USBDRD, 0, 4), + DIV(DOUT_CLKCMU_HSI1_MMC_CARD, "dout_clkcmu_hsi1_mmc_card", + "mout_clkcmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD, 0, 9), + + /* HSI2 */ + DIV(DOUT_CLKCMU_HSI2_NOC, "dout_clkcmu_hsi2_noc", + "mout_clkcmu_hsi2_noc", CLK_CON_DIV_CLKCMU_HSI2_NOC, 0, 4), + DIV(DOUT_CLKCMU_HSI2_NOC_UFS, "dout_clkcmu_hsi2_noc_ufs", + "mout_clkcmu_hsi2_noc_ufs", CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS, 0, 4), + DIV(DOUT_CLKCMU_HSI2_UFS_EMBD, "dout_clkcmu_hsi2_ufs_embd", + "mout_clkcmu_hsi2_ufs_embd", CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD, 0, 3), + DIV(DOUT_CLKCMU_HSI2_ETHERNET, "dout_clkcmu_hsi2_ethernet", + "mout_clkcmu_hsi2_ethernet", CLK_CON_DIV_CLKCMU_HSI2_ETHERNET, 0, 3), + + /* ISP */ + DIV(DOUT_CLKCMU_ISP_NOC, "dout_clkcmu_isp_noc", + "mout_clkcmu_isp_noc", CLK_CON_DIV_CLKCMU_ISP_NOC, 0, 4), + + /* M2M */ + DIV(DOUT_CLKCMU_M2M_NOC, "dout_clkcmu_m2m_noc", + "mout_clkcmu_m2m_noc", CLK_CON_DIV_CLKCMU_M2M_NOC, 0, 4), + DIV(DOUT_CLKCMU_M2M_JPEG, "dout_clkcmu_m2m_jpeg", + "mout_clkcmu_m2m_jpeg", CLK_CON_DIV_CLKCMU_M2M_JPEG, 0, 4), + + /* MFC */ + DIV(DOUT_CLKCMU_MFC_MFC, "dout_clkcmu_mfc_mfc", + "mout_clkcmu_mfc_mfc", CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4), + DIV(DOUT_CLKCMU_MFC_WFD, "dout_clkcmu_mfc_wfd", + "mout_clkcmu_mfc_wfd", CLK_CON_DIV_CLKCMU_MFC_WFD, 0, 4), + + /* MFD */ + DIV(DOUT_CLKCMU_MFD_NOC, "dout_clkcmu_mfd_noc", + "mout_clkcmu_mfd_noc", CLK_CON_DIV_CLKCMU_MFD_NOC, 0, 4), + + /* MIF */ + DIV(DOUT_CLKCMU_MIF_NOCP, "dout_clkcmu_mif_nocp", + "mout_clkcmu_mif_nocp", CLK_CON_DIV_CLKCMU_MIF_NOCP, 0, 4), + + /* MISC */ + DIV(DOUT_CLKCMU_MISC_NOC, "dout_clkcmu_misc_noc", + "mout_clkcmu_misc_noc", CLK_CON_DIV_CLKCMU_MISC_NOC, 0, 4), + + /* NOCL0 */ + DIV(DOUT_CLKCMU_NOCL0_NOC, "dout_clkcmu_nocl0_noc", + "mout_clkcmu_nocl0_noc", CLK_CON_DIV_CLKCMU_NOCL0_NOC, 0, 4), + + /* NOCL1 */ + DIV(DOUT_CLKCMU_NOCL1_NOC, "dout_clkcmu_nocl1_noc", + "mout_clkcmu_nocl1_noc", CLK_CON_DIV_CLKCMU_NOCL1_NOC, 0, 4), + + /* NOCL2 */ + DIV(DOUT_CLKCMU_NOCL2_NOC, "dout_clkcmu_nocl2_noc", + "mout_clkcmu_nocl2_noc", CLK_CON_DIV_CLKCMU_NOCL2_NOC, 0, 4), + + /* PERIC0 */ + DIV(DOUT_CLKCMU_PERIC0_NOC, "dout_clkcmu_peric0_noc", + "mout_clkcmu_peric0_noc", CLK_CON_DIV_CLKCMU_PERIC0_NOC, 0, 4), + DIV(DOUT_CLKCMU_PERIC0_IP, "dout_clkcmu_peric0_ip", + "mout_clkcmu_peric0_ip", CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4), + + /* PERIC1 */ + DIV(DOUT_CLKCMU_PERIC1_NOC, "dout_clkcmu_peric1_noc", + "mout_clkcmu_peric1_noc", CLK_CON_DIV_CLKCMU_PERIC1_NOC, 0, 4), + DIV(DOUT_CLKCMU_PERIC1_IP, "dout_clkcmu_peric1_ip", + "mout_clkcmu_peric1_ip", CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4), + + /* SDMA */ + DIV(DOUT_CLKCMU_SDMA_NOC, "dout_clkcmu_sdma_noc", + "mout_clkcmu_sdma_noc", CLK_CON_DIV_CLKCMU_SDMA_NOC, 0, 4), + + /* SNW */ + DIV(DOUT_CLKCMU_SNW_NOC, "dout_clkcmu_snw_noc", + "mout_clkcmu_snw_noc", CLK_CON_DIV_CLKCMU_SNW_NOC, 0, 4), + + /* SSP */ + DIV(DOUT_CLKCMU_SSP_NOC, "dout_clkcmu_ssp_noc", + "mout_clkcmu_ssp_noc", CLK_CON_DIV_CLKCMU_SSP_NOC, 0, 4), + + /* TAA */ + DIV(DOUT_CLKCMU_TAA_NOC, "dout_clkcmu_taa_noc", + "mout_clkcmu_taa_noc", CLK_CON_DIV_CLKCMU_TAA_NOC, 0, 4), +}; + +static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = { + FFACTOR(DOUT_SHARED0_DIV1, "dout_shared0_div1", + "mout_shared0_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED0_DIV2, "dout_shared0_div2", + "mout_shared0_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED0_DIV3, "dout_shared0_div3", + "mout_shared0_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED0_DIV4, "dout_shared0_div4", + "mout_shared0_pll", 1, 4, 0), + FFACTOR(DOUT_SHARED1_DIV1, "dout_shared1_div1", + "mout_shared1_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED1_DIV2, "dout_shared1_div2", + "mout_shared1_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED1_DIV3, "dout_shared1_div3", + "mout_shared1_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED1_DIV4, "dout_shared1_div4", + "mout_shared1_pll", 1, 4, 0), + FFACTOR(DOUT_SHARED2_DIV1, "dout_shared2_div1", + "mout_shared2_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED2_DIV2, "dout_shared2_div2", + "mout_shared2_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED2_DIV3, "dout_shared2_div3", + "mout_shared2_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED2_DIV4, "dout_shared2_div4", + "mout_shared2_pll", 1, 4, 0), + FFACTOR(DOUT_SHARED3_DIV1, "dout_shared3_div1", + "mout_shared3_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED3_DIV2, "dout_shared3_div2", + "mout_shared3_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED3_DIV3, "dout_shared3_div3", + "mout_shared3_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED3_DIV4, "dout_shared3_div4", + "mout_shared3_pll", 1, 4, 0), + FFACTOR(DOUT_SHARED4_DIV1, "dout_shared4_div1", + "mout_shared4_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED4_DIV2, "dout_shared4_div2", + "mout_shared4_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED4_DIV3, "dout_shared4_div3", + "mout_shared4_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED4_DIV4, "dout_shared4_div4", + "mout_shared4_pll", 1, 4, 0), + FFACTOR(DOUT_SHARED5_DIV1, "dout_shared5_div1", + "mout_shared5_pll", 1, 1, 0), + FFACTOR(DOUT_SHARED5_DIV2, "dout_shared5_div2", + "mout_shared5_pll", 1, 2, 0), + FFACTOR(DOUT_SHARED5_DIV3, "dout_shared5_div3", + "mout_shared5_pll", 1, 3, 0), + FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4", + "mout_shared5_pll", 1, 4, 0), +}; + +static const struct samsung_cmu_info top_cmu_info __initconst = { + .pll_clks = top_pll_clks, + .nr_pll_clks = ARRAY_SIZE(top_pll_clks), + .mux_clks = top_mux_clks, + .nr_mux_clks = ARRAY_SIZE(top_mux_clks), + .div_clks = top_div_clks, + .nr_div_clks = ARRAY_SIZE(top_div_clks), + .fixed_factor_clks = top_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks), + .nr_clk_ids = CLKS_NR_TOP, + .clk_regs = top_clk_regs, + .nr_clk_regs = ARRAY_SIZE(top_clk_regs), +}; + +static void __init exynosautov920_cmu_top_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &top_cmu_info); +} + +/* Register CMU_TOP early, as it's a dependency for other early domains */ +CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top", + exynosautov920_cmu_top_init); + +/* ---- CMU_PERIC0 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC0 (0x10800000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER 0x0610 +#define CLK_CON_MUX_MUX_CLK_PERIC0_I3C 0x1000 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI 0x1004 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI 0x1008 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI 0x100c +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI 0x1010 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI 0x1014 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI 0x1018 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI 0x101c +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI 0x1020 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI 0x1024 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C 0x1028 +#define CLK_CON_DIV_DIV_CLK_PERIC0_I3C 0x1800 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI 0x181c +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI 0x1820 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI 0x1824 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828 + +static const unsigned long peric0_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, + CLK_CON_MUX_MUX_CLK_PERIC0_I3C, + CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, + CLK_CON_DIV_DIV_CLK_PERIC0_I3C, + CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, +}; + +/* List of parent clocks for Muxes in CMU_PERIC0 */ +PNAME(mout_peric0_ip_user_p) = { "oscclk", "dout_clkcmu_peric0_ip" }; +PNAME(mout_peric0_noc_user_p) = { "oscclk", "dout_clkcmu_peric0_noc" }; +PNAME(mout_peric0_usi_p) = { "oscclk", "mout_peric0_ip_user" }; + +static const struct samsung_mux_clock peric0_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC0_IP_USER, "mout_peric0_ip_user", + mout_peric0_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, 4, 1), + MUX(CLK_MOUT_PERIC0_NOC_USER, "mout_peric0_noc_user", + mout_peric0_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, 4, 1), + /* USI00 ~ USI08 */ + MUX(CLK_MOUT_PERIC0_USI00_USI, "mout_peric0_usi00_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI01_USI, "mout_peric0_usi01_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI02_USI, "mout_peric0_usi02_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI03_USI, "mout_peric0_usi03_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI04_USI, "mout_peric0_usi04_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI05_USI, "mout_peric0_usi05_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI06_USI, "mout_peric0_usi06_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI07_USI, "mout_peric0_usi07_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI08_USI, "mout_peric0_usi08_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI, 0, 1), + /* USI_I2C */ + MUX(CLK_MOUT_PERIC0_USI_I2C, "mout_peric0_usi_i2c", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, 0, 1), + /* USI_I3C */ + MUX(CLK_MOUT_PERIC0_I3C, "mout_peric0_i3c", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_I3C, 0, 1), +}; + +static const struct samsung_div_clock peric0_div_clks[] __initconst = { + /* USI00 ~ USI08 */ + DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi", + "mout_peric0_usi00_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi", + "mout_peric0_usi01_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi", + "mout_peric0_usi02_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi", + "mout_peric0_usi03_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi", + "mout_peric0_usi04_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi", + "mout_peric0_usi05_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI06_USI, "dout_peric0_usi06_usi", + "mout_peric0_usi06_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI07_USI, "dout_peric0_usi07_usi", + "mout_peric0_usi07_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI08_USI, "dout_peric0_usi08_usi", + "mout_peric0_usi08_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI, + 0, 4), + /* USI_I2C */ + DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c", + "mout_peric0_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, 0, 4), + /* USI_I3C */ + DIV(CLK_DOUT_PERIC0_I3C, "dout_peric0_i3c", + "mout_peric0_i3c", CLK_CON_DIV_DIV_CLK_PERIC0_I3C, 0, 4), +}; + +static const struct samsung_cmu_info peric0_cmu_info __initconst = { + .mux_clks = peric0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks), + .div_clks = peric0_div_clks, + .nr_div_clks = ARRAY_SIZE(peric0_div_clks), + .nr_clk_ids = CLKS_NR_PERIC0, + .clk_regs = peric0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs), + .clk_name = "noc", +}; + +static int __init exynosautov920_cmu_probe(struct platform_device *pdev) +{ + const struct samsung_cmu_info *info; + struct device *dev = &pdev->dev; + + info = of_device_get_match_data(dev); + exynos_arm64_register_cmu(dev, dev->of_node, info); + + return 0; +} + +static const struct of_device_id exynosautov920_cmu_of_match[] = { + { + .compatible = "samsung,exynosautov920-cmu-peric0", + .data = &peric0_cmu_info, + }, +}; + +static struct platform_driver exynosautov920_cmu_driver __refdata = { + .driver = { + .name = "exynosautov920-cmu", + .of_match_table = exynosautov920_cmu_of_match, + .suppress_bind_attrs = true, + }, + .probe = exynosautov920_cmu_probe, +}; + +static int __init exynosautov920_cmu_init(void) +{ + return platform_driver_register(&exynosautov920_cmu_driver); +} +core_initcall(exynosautov920_cmu_init); diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 4be879ab917e12..cca3e630922c14 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -430,6 +430,9 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = { #define PLL0822X_LOCK_STAT_SHIFT (29) #define PLL0822X_ENABLE_SHIFT (31) +/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */ +#define PLL1418X_MDIV_MASK (0x1FF) + static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -438,7 +441,10 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw, u64 fvco = parent_rate; pll_con3 = readl_relaxed(pll->con_reg); - mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK; + if (pll->type != pll_1418x) + mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK; + else + mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK; pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK; sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK; @@ -456,7 +462,12 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate, { const struct samsung_pll_rate_table *rate; struct samsung_clk_pll *pll = to_clk_pll(hw); - u32 pll_con3; + u32 mdiv_mask, pll_con3; + + if (pll->type != pll_1418x) + mdiv_mask = PLL0822X_MDIV_MASK; + else + mdiv_mask = PLL1418X_MDIV_MASK; /* Get required rate settings from table */ rate = samsung_get_pll_settings(pll, drate); @@ -468,7 +479,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate, /* Change PLL PMS values */ pll_con3 = readl_relaxed(pll->con_reg); - pll_con3 &= ~((PLL0822X_MDIV_MASK << PLL0822X_MDIV_SHIFT) | + pll_con3 &= ~((mdiv_mask << PLL0822X_MDIV_SHIFT) | (PLL0822X_PDIV_MASK << PLL0822X_PDIV_SHIFT) | (PLL0822X_SDIV_MASK << PLL0822X_SDIV_SHIFT)); pll_con3 |= (rate->mdiv << PLL0822X_MDIV_SHIFT) | @@ -1261,6 +1272,47 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = { .recalc_rate = samsung_pll2650xx_recalc_rate, }; +/* + * PLL531X Clock Type + */ +/* Maximum lock time can be 500 * PDIV cycles */ +#define PLL531X_LOCK_FACTOR (500) +#define PLL531X_MDIV_MASK (0x3FF) +#define PLL531X_PDIV_MASK (0x3F) +#define PLL531X_SDIV_MASK (0x7) +#define PLL531X_FDIV_MASK (0xFFFFFFFF) +#define PLL531X_MDIV_SHIFT (16) +#define PLL531X_PDIV_SHIFT (8) +#define PLL531X_SDIV_SHIFT (0) + +static unsigned long samsung_pll531x_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pdiv, sdiv, fdiv, pll_con0, pll_con8; + u64 mdiv, fout = parent_rate; + + pll_con0 = readl_relaxed(pll->con_reg); + pll_con8 = readl_relaxed(pll->con_reg + 20); + mdiv = (pll_con0 >> PLL531X_MDIV_SHIFT) & PLL531X_MDIV_MASK; + pdiv = (pll_con0 >> PLL531X_PDIV_SHIFT) & PLL531X_PDIV_MASK; + sdiv = (pll_con0 >> PLL531X_SDIV_SHIFT) & PLL531X_SDIV_MASK; + fdiv = (pll_con8 & PLL531X_FDIV_MASK); + + if (fdiv >> 31) + mdiv--; + + fout *= (mdiv << 24) + (fdiv >> 8); + do_div(fout, (pdiv << sdiv)); + fout >>= 24; + + return (unsigned long)fout; +} + +static const struct clk_ops samsung_pll531x_clk_ops = { + .recalc_rate = samsung_pll531x_recalc_rate, +}; + static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, const struct samsung_pll_clock *pll_clk) { @@ -1317,6 +1369,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, init.ops = &samsung_pll35xx_clk_ops; break; case pll_1417x: + case pll_1418x: case pll_0818x: case pll_0822x: case pll_0516x: @@ -1394,6 +1447,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, else init.ops = &samsung_pll2650xx_clk_ops; break; + case pll_531x: + init.ops = &samsung_pll531x_clk_ops; + break; default: pr_warn("%s: Unknown pll type for pll clk %s\n", __func__, pll_clk->name); diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h index ffd3d52c0dec23..3481941ba07a9d 100644 --- a/drivers/clk/samsung/clk-pll.h +++ b/drivers/clk/samsung/clk-pll.h @@ -30,6 +30,7 @@ enum samsung_pll_type { pll_2650x, pll_2650xx, pll_1417x, + pll_1418x, pll_1450x, pll_1451x, pll_1452x, @@ -41,6 +42,7 @@ enum samsung_pll_type { pll_0516x, pll_0517x, pll_0518x, + pll_531x, }; #define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \ diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c index d3c85421f948fc..8c4c3a958a9f4a 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c @@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, jh7110_ispcrg_match); static struct platform_driver jh7110_ispcrg_driver = { .probe = jh7110_ispcrg_probe, - .remove_new = jh7110_ispcrg_remove, + .remove = jh7110_ispcrg_remove, .driver = { .name = "clk-starfive-jh7110-isp", .of_match_table = jh7110_ispcrg_match, diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c index 53f7af234cc23e..04eeed199087f6 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c @@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev) /* enable power domain and clocks */ pm_runtime_enable(priv->dev); - ret = pm_runtime_get_sync(priv->dev); + ret = pm_runtime_resume_and_get(priv->dev); if (ret < 0) return dev_err_probe(priv->dev, ret, "failed to turn on power\n"); @@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, jh7110_voutcrg_match); static struct platform_driver jh7110_voutcrg_driver = { .probe = jh7110_voutcrg_probe, - .remove_new = jh7110_voutcrg_remove, + .remove = jh7110_voutcrg_remove, .driver = { .name = "clk-starfive-jh7110-vout", .of_match_table = jh7110_voutcrg_match, diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c index 7e2337297402a0..5fcc4c77c11f27 100644 --- a/drivers/clk/stm32/clk-stm32mp1.c +++ b/drivers/clk/stm32/clk-stm32mp1.c @@ -2354,7 +2354,7 @@ static struct platform_driver stm32mp1_rcc_clocks_driver = { .of_match_table = stm32mp1_match_data, }, .probe = stm32mp1_rcc_clocks_probe, - .remove_new = stm32mp1_rcc_clocks_remove, + .remove = stm32mp1_rcc_clocks_remove, }; static int __init stm32mp1_clocks_init(void) diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c index a9be4b56b2b7b7..0251618b82c832 100644 --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c @@ -635,7 +635,7 @@ static const struct dev_pm_ops tegra124_dfll_pm_ops = { static struct platform_driver tegra124_dfll_fcpu_driver = { .probe = tegra124_dfll_fcpu_probe, - .remove_new = tegra124_dfll_fcpu_remove, + .remove = tegra124_dfll_fcpu_remove, .driver = { .name = "tegra124-dfll", .of_match_table = tegra124_dfll_fcpu_of_match, diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 6121020b4b38a9..e305fcbac6475b 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -934,7 +934,7 @@ static struct platform_driver ti_adpll_driver = { .of_match_table = ti_adpll_match, }, .probe = ti_adpll_probe, - .remove_new = ti_adpll_remove, + .remove = ti_adpll_remove, }; static int __init ti_adpll_init(void) diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index d964e3affd42ce..0eab7f3e2eab9e 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) } clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); if (IS_ERR(clk)) { pr_err("%s: failed to get atl clock %d from provider\n", __func__, i); diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c index 45adac1b4630f0..033d4f78edc895 100644 --- a/drivers/clk/versatile/clk-sp810.c +++ b/drivers/clk/versatile/clk-sp810.c @@ -110,7 +110,7 @@ static void __init clk_sp810_of_setup(struct device_node *node) init.parent_names = parent_names; init.num_parents = num; - deprecated = !of_find_property(node, "assigned-clock-parents", NULL); + deprecated = !of_property_present(node, "assigned-clock-parents"); for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) { snprintf(name, sizeof(name), "sp810_%d_%d", instance, i); diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c index e9cd80e085dc3b..3f929cf8dd2f71 100644 --- a/drivers/clk/visconti/pll.c +++ b/drivers/clk/visconti/pll.c @@ -262,9 +262,9 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx, for (len = 0; rate_table[len].rate != 0; ) len++; pll->rate_count = len; - pll->rate_table = kmemdup(rate_table, - pll->rate_count * sizeof(struct visconti_pll_rate_table), - GFP_KERNEL); + pll->rate_table = kmemdup_array(rate_table, + pll->rate_count, sizeof(*pll->rate_table), + GFP_KERNEL); WARN(!pll->rate_table, "%s: could not allocate rate table for %s\n", __func__, name); init.ops = &visconti_pll_ops; diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c index aed7d22fae6313..cf5cd3ad46477f 100644 --- a/drivers/clk/x86/clk-fch.c +++ b/drivers/clk/x86/clk-fch.c @@ -115,6 +115,6 @@ static struct platform_driver fch_clk_driver = { .suppress_bind_attrs = true, }, .probe = fch_clk_probe, - .remove_new = fch_clk_remove, + .remove = fch_clk_remove, }; builtin_platform_driver(fch_clk_driver); diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 5ec9255e33faf7..99291ba65da73d 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -373,6 +373,6 @@ static struct platform_driver plt_clk_driver = { .name = "clk-pmc-atom", }, .probe = plt_clk_probe, - .remove_new = plt_clk_remove, + .remove = plt_clk_remove, }; builtin_platform_driver(plt_clk_driver); diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c index 19eb3fb7ae319e..7a0269bdfbb385 100644 --- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c +++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c @@ -1257,7 +1257,7 @@ static struct platform_driver clk_wzrd_driver = { .pm = &clk_wzrd_dev_pm_ops, }, .probe = clk_wzrd_probe, - .remove_new = clk_wzrd_remove, + .remove = clk_wzrd_remove, }; module_platform_driver(clk_wzrd_driver); diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c index d983fab12756a8..81501b48412ee6 100644 --- a/drivers/clk/xilinx/xlnx_vcu.c +++ b/drivers/clk/xilinx/xlnx_vcu.c @@ -729,7 +729,7 @@ static struct platform_driver xvcu_driver = { .of_match_table = xvcu_of_id_table, }, .probe = xvcu_probe, - .remove_new = xvcu_remove, + .remove = xvcu_remove, }; module_platform_driver(xvcu_driver); diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 82338773602cae..b4330a01a566bf 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -25,6 +25,10 @@ #include #include +static void *suspend_resume_cb_data; + +static void (*suspend_resume_callback)(void *data, bool suspend); + /* * The I/O port the PMTMR resides at. * The location is detected during setup_arch(), @@ -58,6 +62,32 @@ u32 acpi_pm_read_verified(void) return v2; } +void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data) +{ + suspend_resume_callback = cb; + suspend_resume_cb_data = data; +} +EXPORT_SYMBOL_GPL(acpi_pmtmr_register_suspend_resume_callback); + +void acpi_pmtmr_unregister_suspend_resume_callback(void) +{ + suspend_resume_callback = NULL; + suspend_resume_cb_data = NULL; +} +EXPORT_SYMBOL_GPL(acpi_pmtmr_unregister_suspend_resume_callback); + +static void acpi_pm_suspend(struct clocksource *cs) +{ + if (suspend_resume_callback) + suspend_resume_callback(suspend_resume_cb_data, true); +} + +static void acpi_pm_resume(struct clocksource *cs) +{ + if (suspend_resume_callback) + suspend_resume_callback(suspend_resume_cb_data, false); +} + static u64 acpi_pm_read(struct clocksource *cs) { return (u64)read_pmtmr(); @@ -69,6 +99,8 @@ static struct clocksource clocksource_acpi_pm = { .read = acpi_pm_read, .mask = (u64)ACPI_PM_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .suspend = acpi_pm_suspend, + .resume = acpi_pm_resume, }; diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aeafc74181f071..03733101e23174 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1594,7 +1594,6 @@ static int __init arch_timer_mem_of_init(struct device_node *np) { struct arch_timer_mem *timer_mem; struct arch_timer_mem_frame *frame; - struct device_node *frame_node; struct resource res; int ret = -EINVAL; u32 rate; @@ -1608,33 +1607,29 @@ static int __init arch_timer_mem_of_init(struct device_node *np) timer_mem->cntctlbase = res.start; timer_mem->size = resource_size(&res); - for_each_available_child_of_node(np, frame_node) { + for_each_available_child_of_node_scoped(np, frame_node) { u32 n; struct arch_timer_mem_frame *frame; if (of_property_read_u32(frame_node, "frame-number", &n)) { pr_err(FW_BUG "Missing frame-number.\n"); - of_node_put(frame_node); goto out; } if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", ARCH_TIMER_MEM_MAX_FRAMES - 1); - of_node_put(frame_node); goto out; } frame = &timer_mem->frame[n]; if (frame->valid) { pr_err(FW_BUG "Duplicated frame-number.\n"); - of_node_put(frame_node); goto out; } - if (of_address_to_resource(frame_node, 0, &res)) { - of_node_put(frame_node); + if (of_address_to_resource(frame_node, 0, &res)) goto out; - } + frame->cntbase = res.start; frame->size = resource_size(&res); diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c index 5b39d3701fa3c9..8f97ab0b01ec17 100644 --- a/drivers/clocksource/asm9260_timer.c +++ b/drivers/clocksource/asm9260_timer.c @@ -210,6 +210,7 @@ static int __init asm9260_timer_init(struct device_node *np) DRIVER_NAME, &event_dev); if (ret) { pr_err("Failed to setup irq!\n"); + clk_disable_unprepare(clk); return ret; } diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index d4350bb10b83a2..39f7c2d736d169 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -20,13 +20,6 @@ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); -/* - * Handle PIT quirk in pit_shutdown() where zeroing the counter register - * restarts the PIT, negating the shutdown. On platforms with the quirk, - * platform specific code can set this to false. - */ -bool i8253_clear_counter_on_shutdown __ro_after_init = true; - #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful @@ -108,21 +101,47 @@ int __init clocksource_i8253_init(void) #endif #ifdef CONFIG_CLKEVT_I8253 -static int pit_shutdown(struct clock_event_device *evt) +void clockevent_i8253_disable(void) { - if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt)) - return 0; - raw_spin_lock(&i8253_lock); + /* + * Writing the MODE register should stop the counter, according to + * the datasheet. This appears to work on real hardware (well, on + * modern Intel and AMD boxes; I didn't dig the Pegasos out of the + * shed). + * + * However, some virtual implementations differ, and the MODE change + * doesn't have any effect until either the counter is written (KVM + * in-kernel PIT) or the next interrupt (QEMU). And in those cases, + * it may not stop the *count*, only the interrupts. Although in + * the virt case, that probably doesn't matter, as the value of the + * counter will only be calculated on demand if the guest reads it; + * it's the interrupts which cause steal time. + * + * Hyper-V apparently has a bug where even in mode 0, the IRQ keeps + * firing repeatedly if the counter is running. But it *does* do the + * right thing when the MODE register is written. + * + * So: write the MODE and then load the counter, which ensures that + * the IRQ is stopped on those buggy virt implementations. And then + * write the MODE again, which is the right way to stop it. + */ outb_p(0x30, PIT_MODE); + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); - if (i8253_clear_counter_on_shutdown) { - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); - } + outb_p(0x30, PIT_MODE); raw_spin_unlock(&i8253_lock); +} + +static int pit_shutdown(struct clock_event_device *evt) +{ + if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt)) + return 0; + + clockevent_i8253_disable(); return 0; } diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c index 9f7c280a1336e3..e0ec33307c8489 100644 --- a/drivers/clocksource/ingenic-ost.c +++ b/drivers/clocksource/ingenic-ost.c @@ -93,14 +93,10 @@ static int __init ingenic_ost_probe(struct platform_device *pdev) return PTR_ERR(map); } - ost->clk = devm_clk_get(dev, "ost"); + ost->clk = devm_clk_get_enabled(dev, "ost"); if (IS_ERR(ost->clk)) return PTR_ERR(ost->clk); - err = clk_prepare_enable(ost->clk); - if (err) - return err; - /* Clear counter high/low registers */ if (soc_info->is64bit) regmap_write(map, TCU_REG_OST_CNTL, 0); @@ -129,7 +125,6 @@ static int __init ingenic_ost_probe(struct platform_device *pdev) err = clocksource_register_hz(cs, rate); if (err) { dev_err(dev, "clocksource registration failed"); - clk_disable_unprepare(ost->clk); return err; } diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c index a4a991101fa316..a3fe98cd383820 100644 --- a/drivers/clocksource/jcore-pit.c +++ b/drivers/clocksource/jcore-pit.c @@ -120,7 +120,7 @@ static int jcore_pit_local_init(unsigned cpu) static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id) { - struct jcore_pit *pit = this_cpu_ptr(dev_id); + struct jcore_pit *pit = dev_id; if (clockevent_state_oneshot(&pit->ced)) jcore_pit_disable(pit); @@ -168,9 +168,8 @@ static int __init jcore_pit_init(struct device_node *node) return -ENOMEM; } - err = request_irq(pit_irq, jcore_timer_interrupt, - IRQF_TIMER | IRQF_PERCPU, - "jcore_pit", jcore_pit_percpu); + err = request_percpu_irq(pit_irq, jcore_timer_interrupt, + "jcore_pit", jcore_pit_percpu); if (err) { pr_err("pit irq request failed: %d\n", err); free_percpu(jcore_pit_percpu); diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index ca7a06489c405f..b8a1cf59b9d615 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -435,7 +435,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, &ttcce->ttc.clk_rate_change_nb); if (err) { pr_warn("Unable to register clock notifier.\n"); - goto out_kfree; + goto out_clk_unprepare; } ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); @@ -465,13 +465,15 @@ static int __init ttc_setup_clockevent(struct clk *clk, err = request_irq(irq, ttc_clock_event_interrupt, IRQF_TIMER, ttcce->ce.name, ttcce); if (err) - goto out_kfree; + goto out_clk_unprepare; clockevents_config_and_register(&ttcce->ce, ttcce->ttc.freq / PRESCALE, 1, 0xfffe); return 0; +out_clk_unprepare: + clk_disable_unprepare(ttcce->ttc.clk); out_kfree: kfree(ttcce); return err; diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c index b4afe3a6758351..eac4c95c6127f2 100644 --- a/drivers/clocksource/timer-qcom.c +++ b/drivers/clocksource/timer-qcom.c @@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np) } if (of_property_read_u32(np, "clock-frequency", &freq)) { + iounmap(cpu0_base); pr_err("Unknown frequency\n"); return -EINVAL; } @@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np) freq /= 4; writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); - return msm_timer_init(freq, 32, irq, !!percpu_offset); + ret = msm_timer_init(freq, 32, irq, !!percpu_offset); + if (ret) + iounmap(cpu0_base); + + return ret; } TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c index 8876a1d24c56c2..330ae1c5880079 100644 --- a/drivers/comedi/drivers/ni_atmio.c +++ b/drivers/comedi/drivers/ni_atmio.c @@ -79,6 +79,15 @@ #include "ni_stc.h" +static const struct comedi_lrange range_ni_E_ao_ext = { + 4, { + BIP_RANGE(10), + UNI_RANGE(10), + RANGE_ext(-1, 1), + RANGE_ext(0, 1) + } +}; + /* AT specific setup */ static const struct ni_board_struct ni_boards[] = { { diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c index 980f309d6de789..3acb449d293cb3 100644 --- a/drivers/comedi/drivers/ni_mio_common.c +++ b/drivers/comedi/drivers/ni_mio_common.c @@ -166,15 +166,6 @@ static const struct comedi_lrange range_ni_M_ai_628x = { } }; -static const struct comedi_lrange range_ni_E_ao_ext = { - 4, { - BIP_RANGE(10), - UNI_RANGE(10), - RANGE_ext(-1, 1), - RANGE_ext(0, 1) - } -}; - static const struct comedi_lrange *const ni_range_lkup[] = { [ai_gain_16] = &range_ni_E_ai, [ai_gain_8] = &range_ni_E_ai_limited, diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c index 0b055321023d7b..f63c390314e1f6 100644 --- a/drivers/comedi/drivers/ni_pcimio.c +++ b/drivers/comedi/drivers/ni_pcimio.c @@ -102,6 +102,15 @@ #define PCIDMA +static const struct comedi_lrange range_ni_E_ao_ext = { + 4, { + BIP_RANGE(10), + UNI_RANGE(10), + RANGE_ext(-1, 1), + RANGE_ext(0, 1) + } +}; + /* * These are not all the possible ao ranges for 628x boards. * They can do OFFSET +- REFERENCE where OFFSET can be diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c index d55521b5bdcb2d..892a66b2cea665 100644 --- a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c +++ b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c @@ -140,6 +140,11 @@ int main(void) { FILE *fp = fopen("ni_values.py", "w"); + if (fp == NULL) { + fprintf(stderr, "Could not open file!"); + return -1; + } + /* write route register values */ fprintf(fp, "ni_route_values = {\n"); for (int i = 0; ni_all_route_values[i]; ++i) diff --git a/drivers/comedi/drivers/ni_stc.h b/drivers/comedi/drivers/ni_stc.h index fbc0b753a0f592..7837e4683c6d24 100644 --- a/drivers/comedi/drivers/ni_stc.h +++ b/drivers/comedi/drivers/ni_stc.h @@ -1137,6 +1137,4 @@ struct ni_private { u8 rgout0_usage; }; -static const struct comedi_lrange range_ni_E_ao_ext; - #endif /* _COMEDI_NI_STC_H */ diff --git a/drivers/comedi/drivers/usbduxsigma.c b/drivers/comedi/drivers/usbduxsigma.c index 2aaeaf44fbe57d..3f215ae228b2d3 100644 --- a/drivers/comedi/drivers/usbduxsigma.c +++ b/drivers/comedi/drivers/usbduxsigma.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include /* timeout for the USB-transfer in ms*/ diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index ed1f5751195585..4a6868b8f58bce 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -22,7 +22,7 @@ #include #include -#include +#include #define QUAD8_EXTENT 32 diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c index afc94d0062b177..3ee75e1a78cd5e 100644 --- a/drivers/counter/counter-chrdev.c +++ b/drivers/counter/counter-chrdev.c @@ -454,7 +454,6 @@ static int counter_chrdev_release(struct inode *inode, struct file *filp) static const struct file_operations counter_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = counter_chrdev_read, .poll = counter_chrdev_poll, .unlocked_ioctl = counter_chrdev_ioctl, diff --git a/drivers/counter/i8254.c b/drivers/counter/i8254.c index c41e4fdc960117..6d74e8ef92f009 100644 --- a/drivers/counter/i8254.c +++ b/drivers/counter/i8254.c @@ -15,7 +15,7 @@ #include #include -#include +#include #define I8254_COUNTER_REG(_counter) (_counter) #define I8254_CONTROL_REG 0x3 diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 10cda6f2fe1d1f..2561b215432a82 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -231,9 +231,7 @@ if X86 source "drivers/cpufreq/Kconfig.x86" endif -if ARM || ARM64 source "drivers/cpufreq/Kconfig.arm" -endif if PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 96b404ce829f31..5f7e13e60c8023 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -5,7 +5,7 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM tristate "Allwinner nvmem based SUN50I CPUFreq driver" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST depends on NVMEM_SUNXI_SID select PM_OPP help @@ -26,15 +26,17 @@ config ARM_APPLE_SOC_CPUFREQ config ARM_ARMADA_37XX_CPUFREQ tristate "Armada 37xx CPUFreq support" - depends on ARCH_MVEBU && CPUFREQ_DT + depends on ARCH_MVEBU || COMPILE_TEST + depends on CPUFREQ_DT help This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. The Armada 37xx PMU supports 4 frequency and VDD levels. config ARM_ARMADA_8K_CPUFREQ tristate "Armada 8K CPUFreq driver" - depends on ARCH_MVEBU && CPUFREQ_DT - select ARMADA_AP_CPU_CLK + depends on ARCH_MVEBU || COMPILE_TEST + depends on CPUFREQ_DT + select ARMADA_AP_CPU_CLK if COMMON_CLK help This enables the CPUFreq driver support for Marvell Armada8k SOCs. @@ -56,7 +58,7 @@ config ARM_SCPI_CPUFREQ config ARM_VEXPRESS_SPC_CPUFREQ tristate "Versatile Express SPC based CPUfreq driver" depends on ARM_CPU_TOPOLOGY && HAVE_CLK - depends on ARCH_VEXPRESS_SPC + depends on ARCH_VEXPRESS_SPC || COMPILE_TEST select PM_OPP help This add the CPUfreq driver support for Versatile Express @@ -75,7 +77,8 @@ config ARM_BRCMSTB_AVS_CPUFREQ config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR + depends on ARCH_HIGHBANK || COMPILE_TEST + depends on CPUFREQ_DT && REGULATOR && PL320_MBOX default m help This adds the CPUFreq driver for Calxeda Highbank SoC @@ -96,7 +99,8 @@ config ARM_IMX6Q_CPUFREQ config ARM_IMX_CPUFREQ_DT tristate "Freescale i.MX8M cpufreq support" - depends on ARCH_MXC && CPUFREQ_DT + depends on CPUFREQ_DT + depends on ARCH_MXC || COMPILE_TEST help This adds cpufreq driver support for Freescale i.MX7/i.MX8M series SoCs, based on cpufreq-dt. @@ -111,7 +115,8 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MEDIATEK_CPUFREQ tristate "CPU Frequency scaling support for MediaTek SoCs" - depends on ARCH_MEDIATEK && REGULATOR + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on REGULATOR select PM_OPP help This adds the CPUFreq driver support for MediaTek SoCs. @@ -130,12 +135,12 @@ config ARM_MEDIATEK_CPUFREQ_HW config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || COMPILE_TEST default ARCH_OMAP2PLUS config ARM_QCOM_CPUFREQ_NVMEM tristate "Qualcomm nvmem based CPUFreq" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST depends on NVMEM_QCOM_QFPROM depends on QCOM_SMEM select PM_OPP @@ -166,7 +171,7 @@ config ARM_RASPBERRYPI_CPUFREQ config ARM_S3C64XX_CPUFREQ bool "Samsung S3C64XX" - depends on CPU_S3C6410 + depends on CPU_S3C6410 || COMPILE_TEST default y help This adds the CPUFreq driver for Samsung S3C6410 SoC. @@ -175,7 +180,7 @@ config ARM_S3C64XX_CPUFREQ config ARM_S5PV210_CPUFREQ bool "Samsung S5PV210 and S5PC110" - depends on CPU_S5PV210 + depends on CPU_S5PV210 || COMPILE_TEST default y help This adds the CPUFreq driver for Samsung S5PV210 and @@ -199,14 +204,15 @@ config ARM_SCMI_CPUFREQ config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" - depends on PLAT_SPEAR + depends on PLAT_SPEAR || COMPILE_TEST default y help This adds the CPUFreq driver support for SPEAr SOCs. config ARM_STI_CPUFREQ tristate "STi CPUFreq support" - depends on CPUFREQ_DT && SOC_STIH407 + depends on CPUFREQ_DT + depends on SOC_STIH407 || COMPILE_TEST help This driver uses the generic OPP framework to match the running platform with a predefined set of suitable values. If not provided @@ -216,34 +222,38 @@ config ARM_STI_CPUFREQ config ARM_TEGRA20_CPUFREQ tristate "Tegra20/30 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT + depends on ARCH_TEGRA || COMPILE_TEST + depends on CPUFREQ_DT default y help This adds the CPUFreq driver support for Tegra20/30 SOCs. config ARM_TEGRA124_CPUFREQ bool "Tegra124 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT + depends on ARCH_TEGRA || COMPILE_TEST + depends on CPUFREQ_DT default y help This adds the CPUFreq driver support for Tegra124 SOCs. config ARM_TEGRA186_CPUFREQ tristate "Tegra186 CPUFreq support" - depends on ARCH_TEGRA && TEGRA_BPMP + depends on ARCH_TEGRA || COMPILE_TEST + depends on TEGRA_BPMP help This adds the CPUFreq driver support for Tegra186 SOCs. config ARM_TEGRA194_CPUFREQ tristate "Tegra194 CPUFreq support" - depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP + depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST) + depends on TEGRA_BPMP default y help This adds CPU frequency driver support for Tegra194 SOCs. config ARM_TI_CPUFREQ bool "Texas Instruments CPUFreq support" - depends on ARCH_OMAP2PLUS || ARCH_K3 + depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST default y help This driver enables valid OPPs on the running platform based on @@ -255,7 +265,7 @@ config ARM_TI_CPUFREQ config ARM_PXA2xx_CPUFREQ tristate "Intel PXA2xx CPUfreq driver" - depends on PXA27x || PXA25x + depends on PXA27x || PXA25x || COMPILE_TEST help This add the CPUFreq driver support for Intel PXA2xx SOCs. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a8ca625a98b896..0f04feb6cafaf1 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -642,10 +642,16 @@ static u64 get_max_boost_ratio(unsigned int cpu) return 0; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - highest_perf = amd_get_highest_perf(); - else + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + ret = amd_get_boost_ratio_numerator(cpu, &highest_perf); + if (ret) { + pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n", + cpu, ret); + return 0; + } + } else { highest_perf = perf_caps.highest_perf; + } nominal_perf = perf_caps.nominal_perf; diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index b7318669485e4b..f66701514d9063 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -54,12 +54,14 @@ static void amd_pstate_ut_acpi_cpc_valid(u32 index); static void amd_pstate_ut_check_enabled(u32 index); static void amd_pstate_ut_check_perf(u32 index); static void amd_pstate_ut_check_freq(u32 index); +static void amd_pstate_ut_check_driver(u32 index); static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = { {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid }, {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled }, {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf }, - {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq } + {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }, + {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver } }; static bool get_shared_mem(void) @@ -257,6 +259,43 @@ static void amd_pstate_ut_check_freq(u32 index) cpufreq_cpu_put(policy); } +static int amd_pstate_set_mode(enum amd_pstate_mode mode) +{ + const char *mode_str = amd_pstate_get_mode_string(mode); + + pr_debug("->setting mode to %s\n", mode_str); + + return amd_pstate_update_status(mode_str, strlen(mode_str)); +} + +static void amd_pstate_ut_check_driver(u32 index) +{ + enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE; + int ret; + + for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) { + ret = amd_pstate_set_mode(mode1); + if (ret) + goto out; + for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) { + if (mode1 == mode2) + continue; + ret = amd_pstate_set_mode(mode2); + if (ret) + goto out; + } + } +out: + if (ret) + pr_warn("%s: failed to update status for %s->%s: %d\n", __func__, + amd_pstate_get_mode_string(mode1), + amd_pstate_get_mode_string(mode2), ret); + + amd_pstate_ut_cases[index].result = ret ? + AMD_PSTATE_UT_RESULT_FAIL : + AMD_PSTATE_UT_RESULT_PASS; +} + static int __init amd_pstate_ut_init(void) { u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 259a917da75f35..15e201d5e911c3 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -52,26 +52,12 @@ #define AMD_PSTATE_TRANSITION_LATENCY 20000 #define AMD_PSTATE_TRANSITION_DELAY 1000 #define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 -#define CPPC_HIGHEST_PERF_PERFORMANCE 196 -#define CPPC_HIGHEST_PERF_DEFAULT 166 #define AMD_CPPC_EPP_PERFORMANCE 0x00 #define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 #define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF #define AMD_CPPC_EPP_POWERSAVE 0xFF -/* - * enum amd_pstate_mode - driver working mode of amd pstate - */ -enum amd_pstate_mode { - AMD_PSTATE_UNDEFINED = 0, - AMD_PSTATE_DISABLE, - AMD_PSTATE_PASSIVE, - AMD_PSTATE_ACTIVE, - AMD_PSTATE_GUIDED, - AMD_PSTATE_MAX, -}; - static const char * const amd_pstate_mode_string[] = { [AMD_PSTATE_UNDEFINED] = "undefined", [AMD_PSTATE_DISABLE] = "disable", @@ -81,6 +67,14 @@ static const char * const amd_pstate_mode_string[] = { NULL, }; +const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) +{ + if (mode < 0 || mode >= AMD_PSTATE_MAX) + return NULL; + return amd_pstate_mode_string[mode]; +} +EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); + struct quirk_entry { u32 nominal_freq; u32 lowest_freq; @@ -372,43 +366,17 @@ static inline int amd_pstate_enable(bool enable) return static_call(amd_pstate_enable)(enable); } -static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - - /* - * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f, - * the highest performance level is set to 196. - * https://bugzilla.kernel.org/show_bug.cgi?id=218759 - */ - if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f)) - return CPPC_HIGHEST_PERF_PERFORMANCE; - - return CPPC_HIGHEST_PERF_DEFAULT; -} - static int pstate_init_perf(struct amd_cpudata *cpudata) { u64 cap1; - u32 highest_perf; int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) return ret; - /* For platforms that do not support the preferred core feature, the - * highest_pef may be configured with 166 or 255, to avoid max frequency - * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as - * the default max perf. - */ - if (cpudata->hw_prefcore) - highest_perf = amd_pstate_highest_perf_set(cpudata); - else - highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); + WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); + WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1)); WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); @@ -420,19 +388,13 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) static int cppc_init_perf(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; - u32 highest_perf; int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) return ret; - if (cpudata->hw_prefcore) - highest_perf = amd_pstate_highest_perf_set(cpudata); - else - highest_perf = cppc_perf.highest_perf; - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); + WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf); + WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf); WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); WRITE_ONCE(cpudata->lowest_nonlinear_perf, cppc_perf.lowest_nonlinear_perf); @@ -554,12 +516,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, } if (value == prev) - return; + goto cpufreq_policy_put; WRITE_ONCE(cpudata->cppc_req_cached, value); amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, fast_switch); + +cpufreq_policy_put: + cpufreq_cpu_put(policy); } static int amd_pstate_verify(struct cpufreq_policy_data *policy) @@ -656,7 +621,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu, unsigned long max_perf, min_perf, des_perf, cap_perf, lowest_nonlinear_perf; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata; + + if (!policy) + return; + + cpudata = policy->driver_data; if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) amd_pstate_update_min_max_limit(policy); @@ -803,66 +773,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) } static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); -/* - * Get the highest performance register value. - * @cpu: CPU from which to get highest performance. - * @highest_perf: Return address. - * - * Return: 0 for success, -EIO otherwise. - */ -static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) -{ - int ret; - - if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - u64 cap1; - - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); - } else { - u64 cppc_highest_perf; - - ret = cppc_get_highest_perf(cpu, &cppc_highest_perf); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, cppc_highest_perf); - } - - return (ret); -} - #define CPPC_MAX_PERF U8_MAX static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) { - int ret, prio; - u32 highest_perf; - - ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf); - if (ret) + /* user disabled or not detected */ + if (!amd_pstate_prefcore) return; cpudata->hw_prefcore = true; - /* check if CPPC preferred core feature is enabled*/ - if (highest_perf < CPPC_MAX_PERF) - prio = (int)highest_perf; - else { - pr_debug("AMD CPPC preferred core is unsupported!\n"); - cpudata->hw_prefcore = false; - return; - } - - if (!amd_pstate_prefcore) - return; /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. */ - sched_set_itmt_core_prio(prio, cpudata->cpu); + sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu); schedule_work(&sched_prefcore_work); } @@ -870,22 +796,27 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) static void amd_pstate_update_limits(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata; u32 prev_high = 0, cur_high = 0; int ret; bool highest_perf_changed = false; - mutex_lock(&amd_pstate_driver_lock); - if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore)) - goto free_cpufreq_put; + if (!policy) + return; + + cpudata = policy->driver_data; + + if (!amd_pstate_prefcore) + return; - ret = amd_pstate_get_highest_perf(cpu, &cur_high); + mutex_lock(&amd_pstate_driver_lock); + ret = amd_get_highest_perf(cpu, &cur_high); if (ret) goto free_cpufreq_put; prev_high = READ_ONCE(cpudata->prefcore_ranking); - if (prev_high != cur_high) { - highest_perf_changed = true; + highest_perf_changed = (prev_high != cur_high); + if (highest_perf_changed) { WRITE_ONCE(cpudata->prefcore_ranking, cur_high); if (cur_high < CPPC_MAX_PERF) @@ -949,8 +880,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu) static int amd_pstate_init_freq(struct amd_cpudata *cpudata) { int ret; - u32 min_freq; - u32 highest_perf, max_freq; + u32 min_freq, max_freq; + u64 numerator; u32 nominal_perf, nominal_freq; u32 lowest_nonlinear_perf, lowest_nonlinear_freq; u32 boost_ratio, lowest_nonlinear_ratio; @@ -972,8 +903,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) nominal_perf = READ_ONCE(cpudata->nominal_perf); - highest_perf = READ_ONCE(cpudata->highest_perf); - boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); + ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); + if (ret) + return ret; + boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf); max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000; lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); @@ -1028,12 +961,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; - amd_pstate_init_prefcore(cpudata); - ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; + amd_pstate_init_prefcore(cpudata); + ret = amd_pstate_init_freq(cpudata); if (ret) goto free_cpudata1; @@ -1349,7 +1282,7 @@ static ssize_t amd_pstate_show_status(char *buf) return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); } -static int amd_pstate_update_status(const char *buf, size_t size) +int amd_pstate_update_status(const char *buf, size_t size) { int mode_idx; @@ -1366,6 +1299,7 @@ static int amd_pstate_update_status(const char *buf, size_t size) return 0; } +EXPORT_SYMBOL_GPL(amd_pstate_update_status); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1483,12 +1417,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; cpudata->epp_policy = 0; - amd_pstate_init_prefcore(cpudata); - ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; + amd_pstate_init_prefcore(cpudata); + ret = amd_pstate_init_freq(cpudata); if (ret) goto free_cpudata1; @@ -1555,7 +1489,7 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) pr_debug("CPU %d exiting\n", policy->cpu); } -static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) +static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; u32 max_perf, min_perf, min_limit_perf, max_limit_perf; @@ -1605,7 +1539,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) * This return value can only be negative for shared_memory * systems where EPP register read/write not supported. */ - return; + return epp; } if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) @@ -1618,12 +1552,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) } WRITE_ONCE(cpudata->cppc_req_cached, value); - amd_pstate_set_epp(cpudata, epp); + return amd_pstate_set_epp(cpudata, epp); } static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + int ret; if (!policy->cpuinfo.max_freq) return -ENODEV; @@ -1633,7 +1568,9 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) cpudata->policy = policy->policy; - amd_pstate_epp_update_limit(policy); + ret = amd_pstate_epp_update_limit(policy); + if (ret) + return ret; /* * policy->cur is never updated with the amd_pstate_epp driver, but it @@ -1947,6 +1884,12 @@ static int __init amd_pstate_init(void) static_call_update(amd_pstate_update_perf, cppc_update_perf); } + if (amd_pstate_prefcore) { + ret = amd_detect_prefcore(&amd_pstate_prefcore); + if (ret) + return ret; + } + /* enable amd pstate feature */ ret = amd_pstate_enable(true); if (ret) { diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index cc8bb2bc325aa3..cd573bc6b6db83 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -103,4 +103,18 @@ struct amd_cpudata { bool boost_state; }; +/* + * enum amd_pstate_mode - driver working mode of amd pstate + */ +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, + AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, +}; +const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode); +int amd_pstate_update_status(const char *buf, size_t size); + #endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c index af34c22fa273da..4dcacab9b4bf25 100644 --- a/drivers/cpufreq/apple-soc-cpufreq.c +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -85,7 +85,7 @@ static const struct apple_soc_cpufreq_info soc_default_info = { .cur_pstate_mask = 0, /* fallback */ }; -static const struct of_device_id apple_soc_cpufreq_of_match[] = { +static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = { { .compatible = "apple,t8103-cluster-cpufreq", .data = &soc_t8103_info, diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index ce5a5641b6dd0e..7a979db81f0982 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void) int ret = 0, opps_index = 0, cpu, nb_cpus; struct freq_table *freq_tables; struct device_node *node; - struct cpumask cpus; + static struct cpumask cpus; node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match, NULL); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index bafa32dd375d5e..2b8708475ac776 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include @@ -224,9 +224,9 @@ static void __init cppc_freq_invariance_init(void) * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. */ - .sched_runtime = 1000000, - .sched_deadline = 10000000, - .sched_period = 10000000, + .sched_runtime = NSEC_PER_MSEC, + .sched_deadline = 10 * NSEC_PER_MSEC, + .sched_period = 10 * NSEC_PER_MSEC, }; int ret; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index cac379ba006dfb..18942bfe9c95f7 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -166,6 +166,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm6375", }, { .compatible = "qcom,sm7225", }, + { .compatible = "qcom,sm7325", }, { .compatible = "qcom,sm8150", }, { .compatible = "qcom,sm8250", }, { .compatible = "qcom,sm8350", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 6532c4d7133825..983443396f8f22 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -69,7 +69,6 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) static const char *find_supply_name(struct device *dev) { struct device_node *np __free(device_node) = of_node_get(dev->of_node); - struct property *pp; int cpu = dev->id; /* This must be valid for sure */ @@ -77,14 +76,10 @@ static const char *find_supply_name(struct device *dev) return NULL; /* Try "cpu0" for older DTs */ - if (!cpu) { - pp = of_find_property(np, "cpu0-supply", NULL); - if (pp) - return "cpu0"; - } + if (!cpu && of_property_present(np, "cpu0-supply")) + return "cpu0"; - pp = of_find_property(np, "cpu-supply", NULL); - if (pp) + if (of_property_present(np, "cpu-supply")) return "cpu"; dev_dbg(dev, "no regulator for cpu%d\n", cpu); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 04fc786dd2c096..f98c9438760c97 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -575,30 +575,11 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) return policy->transition_delay_us; latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; - if (latency) { - unsigned int max_delay_us = 2 * MSEC_PER_SEC; + if (latency) + /* Give a 50% breathing room between updates */ + return latency + (latency >> 1); - /* - * If the platform already has high transition_latency, use it - * as-is. - */ - if (latency > max_delay_us) - return latency; - - /* - * For platforms that can change the frequency very fast (< 2 - * us), the above formula gives a decent transition delay. But - * for platforms where transition_latency is in milliseconds, it - * ends up giving unrealistic values. - * - * Cap the default transition delay to 2 ms, which seems to be - * a reasonable amount of time after which we should reevaluate - * the frequency. - */ - return min(latency * LATENCY_MULTIPLIER, max_delay_us); - } - - return LATENCY_MULTIPLIER; + return USEC_PER_MSEC; } EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c0278d023cfce5..b0018f371ea3a5 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -215,6 +216,7 @@ struct global_params { * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set + * @capacity_perf: Highest perf used for scale invariance * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. @@ -253,6 +255,7 @@ struct cpudata { u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; + unsigned int capacity_perf; unsigned int sched_flags; u32 hwp_boost_min; bool suspended; @@ -295,6 +298,7 @@ static int hwp_mode_bdw __ro_after_init; static bool per_cpu_limits __ro_after_init; static bool hwp_forced __ro_after_init; static bool hwp_boost __read_mostly; +static bool hwp_is_hybrid; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -934,6 +938,139 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { NULL, }; +static struct cpudata *hybrid_max_perf_cpu __read_mostly; +/* + * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata, + * and the x86 arch scale-invariance information from concurrent updates. + */ +static DEFINE_MUTEX(hybrid_capacity_lock); + +static void hybrid_set_cpu_capacity(struct cpudata *cpu) +{ + arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf, + hybrid_max_perf_cpu->capacity_perf, + cpu->capacity_perf, + cpu->pstate.max_pstate_physical); + + pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu, + cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, + cpu->pstate.max_pstate_physical); +} + +static void hybrid_clear_cpu_capacity(unsigned int cpunum) +{ + arch_set_cpu_capacity(cpunum, 1, 1, 1, 1); +} + +static void hybrid_get_capacity_perf(struct cpudata *cpu) +{ + if (READ_ONCE(global.no_turbo)) { + cpu->capacity_perf = cpu->pstate.max_pstate_physical; + return; + } + + cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); +} + +static void hybrid_set_capacity_of_cpus(void) +{ + int cpunum; + + for_each_online_cpu(cpunum) { + struct cpudata *cpu = all_cpu_data[cpunum]; + + if (cpu) + hybrid_set_cpu_capacity(cpu); + } +} + +static void hybrid_update_cpu_capacity_scaling(void) +{ + struct cpudata *max_perf_cpu = NULL; + unsigned int max_cap_perf = 0; + int cpunum; + + for_each_online_cpu(cpunum) { + struct cpudata *cpu = all_cpu_data[cpunum]; + + if (!cpu) + continue; + + /* + * During initialization, CPU performance at full capacity needs + * to be determined. + */ + if (!hybrid_max_perf_cpu) + hybrid_get_capacity_perf(cpu); + + /* + * If hybrid_max_perf_cpu is not NULL at this point, it is + * being replaced, so don't take it into account when looking + * for the new one. + */ + if (cpu == hybrid_max_perf_cpu) + continue; + + if (cpu->capacity_perf > max_cap_perf) { + max_cap_perf = cpu->capacity_perf; + max_perf_cpu = cpu; + } + } + + if (max_perf_cpu) { + hybrid_max_perf_cpu = max_perf_cpu; + hybrid_set_capacity_of_cpus(); + } else { + pr_info("Found no CPUs with nonzero maximum performance\n"); + /* Revert to the flat CPU capacity structure. */ + for_each_online_cpu(cpunum) + hybrid_clear_cpu_capacity(cpunum); + } +} + +static void __hybrid_init_cpu_capacity_scaling(void) +{ + hybrid_max_perf_cpu = NULL; + hybrid_update_cpu_capacity_scaling(); +} + +static void hybrid_init_cpu_capacity_scaling(void) +{ + bool disable_itmt = false; + + mutex_lock(&hybrid_capacity_lock); + + /* + * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity + * scaling has been enabled already and the driver is just changing the + * operation mode. + */ + if (hybrid_max_perf_cpu) { + __hybrid_init_cpu_capacity_scaling(); + goto unlock; + } + + /* + * On hybrid systems, use asym capacity instead of ITMT, but because + * the capacity of SMT threads is not deterministic even approximately, + * do not do that when SMT is in use. + */ + if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { + __hybrid_init_cpu_capacity_scaling(); + disable_itmt = true; + } + +unlock: + mutex_unlock(&hybrid_capacity_lock); + + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity. + */ + if (disable_itmt) + sched_clear_itmt_support(); +} + static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; @@ -962,6 +1099,43 @@ static void intel_pstate_get_hwp_cap(struct cpudata *cpu) } } +static void hybrid_update_capacity(struct cpudata *cpu) +{ + unsigned int max_cap_perf; + + mutex_lock(&hybrid_capacity_lock); + + if (!hybrid_max_perf_cpu) + goto unlock; + + /* + * The maximum performance of the CPU may have changed, but assume + * that the performance of the other CPUs has not changed. + */ + max_cap_perf = hybrid_max_perf_cpu->capacity_perf; + + intel_pstate_get_hwp_cap(cpu); + + hybrid_get_capacity_perf(cpu); + /* Should hybrid_max_perf_cpu be replaced by this CPU? */ + if (cpu->capacity_perf > max_cap_perf) { + hybrid_max_perf_cpu = cpu; + hybrid_set_capacity_of_cpus(); + goto unlock; + } + + /* If this CPU is hybrid_max_perf_cpu, should it be replaced? */ + if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) { + hybrid_update_cpu_capacity_scaling(); + goto unlock; + } + + hybrid_set_cpu_capacity(cpu); + +unlock: + mutex_unlock(&hybrid_capacity_lock); +} + static void intel_pstate_hwp_set(unsigned int cpu) { struct cpudata *cpu_data = all_cpu_data[cpu]; @@ -1070,6 +1244,22 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + + mutex_lock(&hybrid_capacity_lock); + + if (!hybrid_max_perf_cpu) { + mutex_unlock(&hybrid_capacity_lock); + + return; + } + + if (hybrid_max_perf_cpu == cpu) + hybrid_update_cpu_capacity_scaling(); + + mutex_unlock(&hybrid_capacity_lock); + + /* Reset the capacity of the CPU going offline to the initial value. */ + hybrid_clear_cpu_capacity(cpu->cpu); } #define POWER_CTL_EE_ENABLE 1 @@ -1165,21 +1355,46 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata, static void intel_pstate_update_limits(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + struct cpudata *cpudata; if (!policy) return; - __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); + cpudata = all_cpu_data[cpu]; + + __intel_pstate_update_max_freq(cpudata, policy); + + /* Prevent the driver from being unregistered now. */ + mutex_lock(&intel_pstate_driver_lock); cpufreq_cpu_release(policy); + + hybrid_update_capacity(cpudata); + + mutex_unlock(&intel_pstate_driver_lock); } static void intel_pstate_update_limits_for_all(void) { int cpu; - for_each_possible_cpu(cpu) - intel_pstate_update_limits(cpu); + for_each_possible_cpu(cpu) { + struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + + if (!policy) + continue; + + __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); + + cpufreq_cpu_release(policy); + } + + mutex_lock(&hybrid_capacity_lock); + + if (hybrid_max_perf_cpu) + __hybrid_init_cpu_capacity_scaling(); + + mutex_unlock(&hybrid_capacity_lock); } /************************** sysfs begin ************************/ @@ -1618,12 +1833,19 @@ static void intel_pstate_notify_work(struct work_struct *work) __intel_pstate_update_max_freq(cpudata, policy); cpufreq_cpu_release(policy); + + /* + * The driver will not be unregistered while this function is + * running, so update the capacity without acquiring the driver + * lock. + */ + hybrid_update_capacity(cpudata); } wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } -static DEFINE_SPINLOCK(hwp_notify_lock); +static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; #define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) @@ -1646,7 +1868,7 @@ void notify_hwp_interrupt(void) if (!(value & status_mask)) return; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irqsave(&hwp_notify_lock, flags); if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; @@ -1654,13 +1876,13 @@ void notify_hwp_interrupt(void) schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, msecs_to_jiffies(10)); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); return; ack_intr: wrmsrl_safe(MSR_HWP_STATUS, 0); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) @@ -1673,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cancel_work) cancel_delayed_work_sync(&cpudata->hwp_notify_work); @@ -1690,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; @@ -2034,8 +2256,10 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_cpu_scaling) { cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); - if (cpu->pstate.scaling != perf_ctl_scaling) + if (cpu->pstate.scaling != perf_ctl_scaling) { intel_pstate_hybrid_hwp_adjust(cpu); + hwp_is_hybrid = true; + } } else { cpu->pstate.scaling = perf_ctl_scaling; } @@ -2425,6 +2649,10 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(INTEL_ICELAKE_X, core_funcs), X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs), + X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), + X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), + X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), {} }; #endif @@ -2703,6 +2931,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy) */ intel_pstate_hwp_reenable(cpu); cpu->suspended = false; + + hybrid_update_capacity(cpu); } return 0; @@ -3143,6 +3373,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); + hybrid_init_cpu_capacity_scaling(); + return 0; } diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c index 5f79b6de127c9f..6b5e6798d9a283 100644 --- a/drivers/cpufreq/loongson3_cpufreq.c +++ b/drivers/cpufreq/loongson3_cpufreq.c @@ -176,7 +176,7 @@ static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data); static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra) { int retries; - unsigned int cpu = smp_processor_id(); + unsigned int cpu = raw_smp_processor_id(); unsigned int package = cpu_data[cpu].package; union smc_message msg, last; diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index f9306410a07f1c..690da85c4865a0 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c @@ -238,4 +238,5 @@ static int __init maple_cpufreq_init(void) module_init(maple_cpufreq_init); +MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 3a1aadaa723c4a..663f61565cf728 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -738,7 +738,7 @@ static const struct mtk_cpufreq_platform_data mt8516_platform_data = { }; /* List of machines supported by this driver */ -static const struct of_device_id mtk_cpufreq_machines[] __initconst = { +static const struct of_device_id mtk_cpufreq_machines[] __initconst __maybe_unused = { { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data }, diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 3458d5cc9b7f2d..de8be0a8932d88 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -28,9 +28,6 @@ #include #include -#include -#include - /* OPP tolerance in percentage */ #define OPP_TOLERANCE 4 diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index ee925b53b6b928..5fc9cb48051610 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -269,5 +269,6 @@ static void __exit pas_cpufreq_exit(void) module_init(pas_cpufreq_init); module_exit(pas_cpufreq_exit); +MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Egor Martovetsky , Olof Johansson "); diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 2cd2b06849a23d..74ff6c47df29bd 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -505,7 +505,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) continue; if (strcmp(loc, "CPU CLOCK")) continue; - if (!of_get_property(hwclock, "platform-get-frequency", NULL)) + if (!of_property_present(hwclock, "platform-get-frequency")) continue; break; } @@ -671,4 +671,5 @@ static int __init g5_cpufreq_init(void) module_init(g5_cpufreq_init); +MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 50c62929f7ca6c..8de759247771fb 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -692,7 +692,7 @@ static void gpstate_timer_handler(struct timer_list *t) } /* - * If PMCR was last updated was using fast_swtich then + * If PMCR was last updated was using fast_switch then * We may have wrong in gpstate->last_lpstate_idx * value. Hence, read from PMCR to get correct data. */ @@ -1160,5 +1160,6 @@ static void __exit powernv_cpufreq_exit(void) } module_exit(powernv_cpufreq_exit); +MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vaidyanathan Srinivasan "); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 5ee4c7bfdcc536..98595b3ea13fde 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -168,5 +168,6 @@ static void __exit cbe_cpufreq_exit(void) module_init(cbe_cpufreq_init); module_exit(cbe_cpufreq_exit); +MODULE_DESCRIPTION("cpufreq driver for Cell BE processors"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft "); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 370fe6a0104b6c..900d6844c43d3f 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 939702dfa73f75..703308fb891a33 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -611,7 +611,7 @@ static struct platform_driver qcom_cpufreq_driver = { }, }; -static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { +static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = { { .compatible = "qcom,apq8096", .data = &match_data_kryo }, { .compatible = "qcom,msm8909", .data = &match_data_msm8909 }, { .compatible = "qcom,msm8996", .data = &match_data_kryo }, diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 78b875db6b6691..d8ab5b01d46d04 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -171,10 +171,9 @@ static struct cpufreq_driver spear_cpufreq_driver = { static int spear_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - const struct property *prop; struct cpufreq_frequency_table *freq_tbl; - const __be32 *val; - int cnt, i, ret; + u32 val; + int cnt, ret, i = 0; np = of_cpu_device_node_get(0); if (!np) { @@ -186,26 +185,23 @@ static int spear_cpufreq_probe(struct platform_device *pdev) &spear_cpufreq.transition_latency)) spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; - prop = of_find_property(np, "cpufreq_tbl", NULL); - if (!prop || !prop->value) { + cnt = of_property_count_u32_elems(np, "cpufreq_tbl"); + if (cnt <= 0) { pr_err("Invalid cpufreq_tbl\n"); ret = -ENODEV; goto out_put_node; } - cnt = prop->length / sizeof(u32); - val = prop->value; - freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL); if (!freq_tbl) { ret = -ENOMEM; goto out_put_node; } - for (i = 0; i < cnt; i++) - freq_tbl[i].frequency = be32_to_cpup(val++); + of_property_for_each_u32(np, "cpufreq_tbl", val) + freq_tbl[i++].frequency = val; - freq_tbl[i].frequency = CPUFREQ_TABLE_END; + freq_tbl[cnt].frequency = CPUFREQ_TABLE_END; spear_cpufreq.freq_tbl = freq_tbl; diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 8e2e703c3865d9..b15b3142b5fec3 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -267,7 +267,7 @@ static int __init sti_cpufreq_init(void) goto skip_voltage_scaling; } - if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) { + if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) { dev_err(ddata.cpu, "OPP-v2 not supported\n"); goto skip_voltage_scaling; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 95ac8d46c1563e..293921acec9378 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -146,7 +146,7 @@ static bool dt_has_supported_hw(void) return false; for_each_child_of_node_scoped(np, opp) { - if (of_find_property(opp, "opp-supported-hw", NULL)) { + if (of_property_present(opp, "opp-supported-hw")) { has_opp_supported_hw = true; break; } diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 4d3f27958fbde9..ba621ce1cdda69 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -16,6 +16,7 @@ #include #include #include +#include #define REVISION_MASK 0xF #define REVISION_SHIFT 28 @@ -90,6 +91,9 @@ struct ti_cpufreq_soc_data { unsigned long efuse_shift; unsigned long rev_offset; bool multi_regulator; +/* Backward compatibility hack: Might have missing syscon */ +#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 + u8 quirks; }; struct ti_cpufreq_data { @@ -254,6 +258,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = { .efuse_mask = BIT(3), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, }; /* @@ -281,6 +286,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = { .efuse_mask = BIT(9), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = true, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, }; /* @@ -295,6 +301,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { .efuse_mask = 0, .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, +}; + +static const struct soc_device_attribute k3_cpufreq_soc[] = { + { .family = "AM62X", .revision = "SR1.0" }, + { .family = "AM62AX", .revision = "SR1.0" }, + { .family = "AM62PX", .revision = "SR1.0" }, + { /* sentinel */ } }; static struct ti_cpufreq_soc_data am625_soc_data = { @@ -340,7 +354,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data, ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->efuse_offset, 4); @@ -378,10 +392,20 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, struct device *dev = opp_data->cpu_dev; u32 revision; int ret; + if (soc_device_match(k3_cpufreq_soc)) { + /* + * Since the SR is 1.0, hard code the revision_value as + * 0x1 here. This way we avoid re using the same register + * that is giving us required information inside socinfo + * anyway. + */ + *revision_value = 0x1; + goto done; + } ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, &revision); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->rev_offset, 4); @@ -400,6 +424,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, *revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK); +done: return 0; } @@ -419,7 +444,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) return 0; } -static const struct of_device_id ti_cpufreq_of_match[] = { +static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, { .compatible = "ti,am3517", .data = &am3517_soc_data, }, { .compatible = "ti,am43", .data = &am4x_soc_data, }, diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index fae9587943397b..146f9706802249 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -67,12 +67,16 @@ static int psci_pd_init(struct device_node *np, bool use_osi) /* * Allow power off when OSI has been successfully enabled. - * PREEMPT_RT is not yet ready to enter domain idle states. + * On a PREEMPT_RT based configuration the domain idle states are + * supported, but only during system-wide suspend. */ - if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT)) + if (use_osi) { pd->power_off = psci_pd_power_off; - else + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; + } else { pd->flags |= GENPD_FLAG_ALWAYS_ON; + } /* Use governor for CPU PM domains if it has some states to manage. */ pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; @@ -138,7 +142,6 @@ static const struct of_device_id psci_of_match[] = { static int psci_cpuidle_domain_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct device_node *node; bool use_osi = psci_has_osi_support(); int ret = 0, pd_count = 0; @@ -149,15 +152,13 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev) * Parse child nodes for the "#power-domain-cells" property and * initialize a genpd/genpd-of-provider pair when it's found. */ - for_each_child_of_node(np, node) { + for_each_child_of_node_scoped(np, node) { if (!of_property_present(node, "#power-domain-cells")) continue; ret = psci_pd_init(node, use_osi); - if (ret) { - of_node_put(node); + if (ret) goto exit; - } pd_count++; } diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 782030a2770311..2562dc001fc1de 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -37,6 +37,7 @@ struct psci_cpuidle_data { static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(u32, domain_state); +static bool psci_cpuidle_use_syscore; static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(u32 state) @@ -166,6 +167,12 @@ static struct syscore_ops psci_idle_syscore_ops = { .resume = psci_idle_syscore_resume, }; +static void psci_idle_init_syscore(void) +{ + if (psci_cpuidle_use_syscore) + register_syscore_ops(&psci_idle_syscore_ops); +} + static void psci_idle_init_cpuhp(void) { int err; @@ -173,8 +180,6 @@ static void psci_idle_init_cpuhp(void) if (!psci_cpuidle_use_cpuhp) return; - register_syscore_ops(&psci_idle_syscore_ops); - err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/psci:online", psci_idle_cpuhp_up, @@ -222,22 +227,23 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, if (!psci_has_osi_support()) return 0; - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - return 0; - data->dev = dt_idle_attach_cpu(cpu, "psci"); if (IS_ERR_OR_NULL(data->dev)) return PTR_ERR_OR_ZERO(data->dev); + psci_cpuidle_use_syscore = true; + /* * Using the deepest state for the CPU to trigger a potential selection * of a shared state for the domain, assumes the domain states are all - * deeper states. + * deeper states. On PREEMPT_RT the hierarchical topology is limited to + * s2ram and s2idle. */ - drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; - drv->states[state_count - 1].enter = psci_enter_domain_idle_state; drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; - psci_cpuidle_use_cpuhp = true; + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + drv->states[state_count - 1].enter = psci_enter_domain_idle_state; + psci_cpuidle_use_cpuhp = true; + } return 0; } @@ -313,6 +319,7 @@ static void psci_cpu_deinit_idle(int cpu) struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); dt_idle_detach_cpu(data->dev); + psci_cpuidle_use_syscore = false; psci_cpuidle_use_cpuhp = false; } @@ -409,6 +416,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev) goto out_fail; } + psci_idle_init_syscore(); psci_idle_init_cpuhp(); return 0; diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index a6e123dfe394d8..d228b4d18d5600 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt +#include #include #include #include @@ -236,19 +237,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, { struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); struct device_node *state_node; - struct device_node *cpu_node; u32 *states; int i, ret; - cpu_node = of_cpu_device_node_get(cpu); + struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV; states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); - if (!states) { - ret = -ENOMEM; - goto fail; - } + if (!states) + return -ENOMEM; /* Parse SBI specific details from state DT nodes */ for (i = 1; i < state_count; i++) { @@ -264,10 +262,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, pr_debug("sbi-state %#x index %d\n", states[i], i); } - if (i != state_count) { - ret = -ENODEV; - goto fail; - } + if (i != state_count) + return -ENODEV; /* Initialize optional data, used for the hierarchical topology. */ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); @@ -277,10 +273,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, /* Store states in the per-cpu struct. */ data->states = states; -fail: - of_node_put(cpu_node); - - return ret; + return 0; } static void sbi_cpuidle_deinit_cpu(int cpu) @@ -455,7 +448,6 @@ static void sbi_pd_remove(void) static int sbi_genpd_probe(struct device_node *np) { - struct device_node *node; int ret = 0, pd_count = 0; if (!np) @@ -465,13 +457,13 @@ static int sbi_genpd_probe(struct device_node *np) * Parse child nodes for the "#power-domain-cells" property and * initialize a genpd/genpd-of-provider pair when it's found. */ - for_each_child_of_node(np, node) { + for_each_child_of_node_scoped(np, node) { if (!of_property_present(node, "#power-domain-cells")) continue; ret = sbi_pd_init(node); if (ret) - goto put_node; + goto remove_pd; pd_count++; } @@ -487,8 +479,6 @@ static int sbi_genpd_probe(struct device_node *np) return 0; -put_node: - of_node_put(node); remove_pd: sbi_pd_remove(); pr_err("failed to create CPU PM domains ret=%d\n", ret); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 02e40fd7d948c9..9e418aec17550e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -228,10 +228,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev, if (broadcast && tick_broadcast_enter()) { index = find_deepest_state(drv, dev, target_state->exit_latency_ns, CPUIDLE_FLAG_TIMER_STOP, false); - if (index < 0) { - default_idle_call(); - return -EBUSY; - } + target_state = &drv->states[index]; broadcast = false; } diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c index 1af63c189039e9..203e9b754aeac0 100644 --- a/drivers/cpuidle/dt_idle_genpd.c +++ b/drivers/cpuidle/dt_idle_genpd.c @@ -130,11 +130,10 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np, int dt_idle_pd_init_topology(struct device_node *np) { - struct device_node *node; struct of_phandle_args child, parent; int ret; - for_each_child_of_node(np, node) { + for_each_child_of_node_scoped(np, node) { if (of_parse_phandle_with_args(node, "power-domains", "#power-domain-cells", 0, &parent)) continue; @@ -143,10 +142,8 @@ int dt_idle_pd_init_topology(struct device_node *np) child.args_count = 0; ret = of_genpd_add_subdomain(&parent, &child); of_node_put(parent.np); - if (ret) { - of_node_put(node); + if (ret) return ret; - } } return 0; @@ -154,11 +151,10 @@ int dt_idle_pd_init_topology(struct device_node *np) int dt_idle_pd_remove_topology(struct device_node *np) { - struct device_node *node; struct of_phandle_args child, parent; int ret; - for_each_child_of_node(np, node) { + for_each_child_of_node_scoped(np, node) { if (of_parse_phandle_with_args(node, "power-domains", "#power-domain-cells", 0, &parent)) continue; @@ -167,10 +163,8 @@ int dt_idle_pd_remove_topology(struct device_node *np) child.args_count = 0; ret = of_genpd_remove_subdomain(&parent, &child); of_node_put(parent.np); - if (ret) { - of_node_put(node); + if (ret) return ret; - } } return 0; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 94f23c6fc93b4c..08b1238bcd7b37 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -21,7 +21,7 @@ config CRYPTO_DEV_PADLOCK (so called VIA PadLock ACE, Advanced Cryptography Engine) that provides instructions for very fast cryptographic operations with supported algorithms. - + The instructions are used only when the CPU supports them. Otherwise software encryption is used. @@ -78,18 +78,79 @@ config ZCRYPT config PKEY tristate "Kernel API for protected key handling" depends on S390 - depends on ZCRYPT help - With this option enabled the pkey kernel module provides an API + With this option enabled the pkey kernel modules provide an API for creation and handling of protected keys. Other parts of the kernel or userspace applications may use these functions. + The protected key support is distributed into: + - A pkey base and API kernel module (pkey.ko) which offers the + infrastructure for the pkey handler kernel modules, the ioctl + and the sysfs API and the in-kernel API to the crypto cipher + implementations using protected key. + - A pkey pckmo kernel module (pkey-pckmo.ko) which is automatically + loaded when pckmo support (that is generation of protected keys + from clear key values) is available. + - A pkey CCA kernel module (pkey-cca.ko) which is automatically + loaded when a CEX crypto card is available. + - A pkey EP11 kernel module (pkey-ep11.ko) which is automatically + loaded when a CEX crypto card is available. + Select this option if you want to enable the kernel and userspace - API for proteced key handling. + API for protected key handling. + +config PKEY_CCA + tristate "PKEY CCA support handler" + depends on PKEY + depends on ZCRYPT + help + This is the CCA support handler for deriving protected keys + from CCA (secure) keys. Also this handler provides an alternate + way to make protected keys from clear key values. + + The PKEY CCA support handler needs a Crypto Express card (CEX) + in CCA mode. + + If you have selected the PKEY option then you should also enable + this option unless you are sure you never need to derive protected + keys from CCA key material. + +config PKEY_EP11 + tristate "PKEY EP11 support handler" + depends on PKEY + depends on ZCRYPT + help + This is the EP11 support handler for deriving protected keys + from EP11 (secure) keys. Also this handler provides an alternate + way to make protected keys from clear key values. + + The PKEY EP11 support handler needs a Crypto Express card (CEX) + in EP11 mode. + + If you have selected the PKEY option then you should also enable + this option unless you are sure you never need to derive protected + keys from EP11 key material. + +config PKEY_PCKMO + tristate "PKEY PCKMO support handler" + depends on PKEY + help + This is the PCKMO support handler for deriving protected keys + from clear key values via invoking the PCKMO instruction. + + The PCKMO instruction can be enabled and disabled in the crypto + settings at the LPAR profile. This handler checks for availability + during initialization and if build as a kernel module unloads + itself if PCKMO is disabled. + + The PCKMO way of deriving protected keys from clear key material + is especially used during self test of protected key ciphers like + PAES but the CCA and EP11 handler provide alternate ways to + generate protected keys from clear key values. - Please note that creation of protected keys from secure keys - requires to have at least one CEX card in coprocessor mode - available at runtime. + If you have selected the PKEY option then you should also enable + this option unless you are sure you never need to derive protected + keys from clear key values directly via PCKMO. config CRYPTO_PAES_S390 tristate "PAES cipher algorithms" diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c index f7893e4ac59dd8..434f2b27101200 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -9,7 +9,7 @@ * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" -#include +#include #include /* This is a totally arbitrary value */ diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 96355d463b04e6..3adcc5e6569421 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -149,7 +149,6 @@ struct crypto4xx_alg { int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); -void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); int crypto4xx_build_pd(struct crypto_async_request *req, struct crypto4xx_ctx *ctx, struct scatterlist *src, diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index 1013a666c93248..d68094ffb70af3 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -150,8 +150,6 @@ struct meson_alg_template { #endif }; -int meson_enqueue(struct crypto_async_request *areq, u32 type); - int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int meson_cipher_init(struct crypto_tfm *tfm); diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 8bd64fc37e7571..0dd90785db9a86 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2376,33 +2376,29 @@ static int atmel_aes_probe(struct platform_device *pdev) } /* Initializing the clock */ - aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk"); + aes_dd->iclk = devm_clk_get_prepared(&pdev->dev, "aes_clk"); if (IS_ERR(aes_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(aes_dd->iclk); goto err_tasklet_kill; } - err = clk_prepare(aes_dd->iclk); - if (err) - goto err_tasklet_kill; - err = atmel_aes_hw_version_init(aes_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; atmel_aes_get_cap(aes_dd); #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { err = -EPROBE_DEFER; - goto err_iclk_unprepare; + goto err_tasklet_kill; } #endif err = atmel_aes_buff_init(aes_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; err = atmel_aes_dma_init(aes_dd); if (err) @@ -2429,8 +2425,6 @@ static int atmel_aes_probe(struct platform_device *pdev) atmel_aes_dma_cleanup(aes_dd); err_buff_cleanup: atmel_aes_buff_cleanup(aes_dd); -err_iclk_unprepare: - clk_unprepare(aes_dd->iclk); err_tasklet_kill: tasklet_kill(&aes_dd->done_task); tasklet_kill(&aes_dd->queue_task); @@ -2455,8 +2449,6 @@ static void atmel_aes_remove(struct platform_device *pdev) atmel_aes_dma_cleanup(aes_dd); atmel_aes_buff_cleanup(aes_dd); - - clk_unprepare(aes_dd->iclk); } static struct platform_driver atmel_aes_driver = { diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index f4cd6158a4f787..8cc57df257784a 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2623,27 +2623,23 @@ static int atmel_sha_probe(struct platform_device *pdev) } /* Initializing the clock */ - sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); + sha_dd->iclk = devm_clk_get_prepared(&pdev->dev, "sha_clk"); if (IS_ERR(sha_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(sha_dd->iclk); goto err_tasklet_kill; } - err = clk_prepare(sha_dd->iclk); - if (err) - goto err_tasklet_kill; - err = atmel_sha_hw_version_init(sha_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; atmel_sha_get_cap(sha_dd); if (sha_dd->caps.has_dma) { err = atmel_sha_dma_init(sha_dd); if (err) - goto err_iclk_unprepare; + goto err_tasklet_kill; dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(sha_dd->dma_lch_in.chan)); @@ -2669,8 +2665,6 @@ static int atmel_sha_probe(struct platform_device *pdev) spin_unlock(&atmel_sha.lock); if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); -err_iclk_unprepare: - clk_unprepare(sha_dd->iclk); err_tasklet_kill: tasklet_kill(&sha_dd->queue_task); tasklet_kill(&sha_dd->done_task); @@ -2693,8 +2687,6 @@ static void atmel_sha_remove(struct platform_device *pdev) if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); - - clk_unprepare(sha_dd->iclk); } static struct platform_driver atmel_sha_driver = { diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 066f08a3a040d8..2cfb1b8d8c7cfc 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -56,7 +56,7 @@ #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamalg_desc.h" -#include +#include #include #include #include diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 743ce50c14f2e0..65f6adb6c673f8 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -19,7 +19,7 @@ #include "jr.h" #include "caamalg_desc.h" #include -#include +#include #include #include #include @@ -961,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) - return (struct aead_edesc *)drv_ctx; + return ERR_CAST(drv_ctx); /* allocate space for base edesc and hw desc commands, link tables */ edesc = qi_cache_alloc(flags); @@ -1271,7 +1271,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) - return (struct skcipher_edesc *)drv_ctx; + return ERR_CAST(drv_ctx); src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 207dc422785ae6..e809d030ab1135 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define CAAM_CRA_PRIORITY 2000 @@ -5006,10 +5006,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; - cpumask_t clean_mask; + cpumask_var_t clean_mask; int err, cpu; u8 i; + err = -ENOMEM; + if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL)) + goto err_cpumask; + priv = dev_get_drvdata(dev); priv->dev = dev; @@ -5085,7 +5089,6 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) } } - cpumask_clear(&clean_mask); i = 0; for_each_online_cpu(cpu) { u8 j; @@ -5114,7 +5117,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) err = -ENOMEM; goto err_alloc_netdev; } - cpumask_set_cpu(cpu, &clean_mask); + cpumask_set_cpu(cpu, clean_mask); ppriv->net_dev->dev = *dev; netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, @@ -5122,15 +5125,19 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) DPAA2_CAAM_NAPI_WEIGHT); } - return 0; + err = 0; + goto free_cpumask; err_alloc_netdev: - free_dpaa2_pcpu_netdev(priv, &clean_mask); + free_dpaa2_pcpu_netdev(priv, clean_mask); err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); err_open: +free_cpumask: + free_cpumask_var(clean_mask); +err_cpumask: return err; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index fdd724228c2fa8..25c02e26725858 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc; + sg_num = pad_sg_nents(sg_num); edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); if (!edesc) return NULL; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index ba8fb5d8a7b265..f6111ee9ed342d 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -736,7 +736,11 @@ int caam_qi_init(struct platform_device *caam_pdev) struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); - cpumask_t clean_mask; + cpumask_var_t clean_mask; + + err = -ENOMEM; + if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL)) + goto fail_cpumask; ctrlpriv = dev_get_drvdata(ctrldev); qidev = ctrldev; @@ -745,19 +749,16 @@ int caam_qi_init(struct platform_device *caam_pdev) err = init_cgr(qidev); if (err) { dev_err(qidev, "CGR initialization failed: %d\n", err); - return err; + goto fail_cgr; } /* Initialise response FQs */ err = alloc_rsp_fqs(qidev); if (err) { dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); - free_rsp_fqs(); - return err; + goto fail_fqs; } - cpumask_clear(&clean_mask); - /* * Enable the NAPI contexts on each of the core which has an affine * portal. @@ -773,7 +774,7 @@ int caam_qi_init(struct platform_device *caam_pdev) err = -ENOMEM; goto fail; } - cpumask_set_cpu(i, &clean_mask); + cpumask_set_cpu(i, clean_mask); priv->net_dev = net_dev; net_dev->dev = *qidev; @@ -788,7 +789,7 @@ int caam_qi_init(struct platform_device *caam_pdev) if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); err = -ENOMEM; - goto fail2; + goto fail; } caam_debugfs_qi_init(ctrlpriv); @@ -798,11 +799,19 @@ int caam_qi_init(struct platform_device *caam_pdev) goto fail2; dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); - return 0; + goto free_cpumask; fail2: - free_rsp_fqs(); + kmem_cache_destroy(qi_cache); fail: - free_caam_qi_pcpu_netdev(&clean_mask); + free_caam_qi_pcpu_netdev(clean_mask); +fail_fqs: + free_rsp_fqs(); + qman_delete_cgr_safe(&qipriv.cgr); + qman_release_cgrid(qipriv.cgr.cgrid); +fail_cgr: +free_cpumask: + free_cpumask_var(clean_mask); +fail_cpumask: return err; } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9810edbb272d2b..af018afd9cd7fc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -910,7 +910,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) sev->int_rcvd = 0; - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd); + + /* + * If invoked during panic handling, local interrupts are disabled so + * the PSP command completion interrupt can't be used. + * sev_wait_cmd_ioc() already checks for interrupts disabled and + * polls for PSP command completion. Ensure we do not request an + * interrupt from the PSP if irqs disabled. + */ + if (!irqs_disabled()) + reg |= SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); /* wait for command completion */ @@ -1629,8 +1640,6 @@ static int sev_update_firmware(struct device *dev) if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); - else - dev_info(dev, "SEV firmware update successful\n"); __free_pages(p, order); @@ -2382,6 +2391,7 @@ void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; struct sev_platform_init_args args = {0}; + u8 api_major, api_minor, build; int rc; if (!sev) @@ -2392,9 +2402,19 @@ void sev_pci_init(void) if (sev_get_api_version()) goto err; + api_major = sev->api_major; + api_minor = sev->api_minor; + build = sev->build; + if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); + if (api_major != sev->api_major || api_minor != sev->api_minor || + build != sev->build) + dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n", + api_major, api_minor, build, + sev->api_major, sev->api_minor, sev->build); + /* Initialize the platform */ args.probe = true; rc = sev_platform_init(&args); @@ -2410,6 +2430,8 @@ void sev_pci_init(void) return; err: + sev_dev_destroy(psp_master); + psp_master->sev_data = NULL; } diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 0895de823674ea..6f9d7063257d70 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -138,7 +138,6 @@ struct sp_device *sp_alloc_struct(struct device *dev); int sp_init(struct sp_device *sp); void sp_destroy(struct sp_device *sp); -struct sp_device *sp_get_master(void); int sp_suspend(struct sp_device *sp); int sp_resume(struct sp_device *sp); diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h index 9e1a7e7f896120..56b844d0cd9c2d 100644 --- a/drivers/crypto/gemini/sl3516-ce.h +++ b/drivers/crypto/gemini/sl3516-ce.h @@ -326,8 +326,6 @@ struct sl3516_ce_alg_template { unsigned long stat_bytes; }; -int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type); - int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sl3516_ce_cipher_init(struct crypto_tfm *tfm); diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 764532a6ca828d..c167dbd6c7d623 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -575,7 +575,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) do { atomic64_inc(&dfx[HPRE_SEND_CNT].value); + spin_lock_bh(&ctx->req_lock); ret = hisi_qp_send(ctx->qp, msg); + spin_unlock_bh(&ctx->req_lock); if (ret != -EBUSY) break; atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 10aa4da93323f1..6b536ad2ada52a 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,9 +13,7 @@ #include #include "hpre.h" -#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c @@ -42,7 +40,6 @@ #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 -#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 @@ -66,7 +63,6 @@ #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 -#define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 @@ -203,9 +199,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, - {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, - {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, @@ -358,6 +354,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, }; +static const struct hisi_qm_err_ini hpre_err_ini; + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; @@ -654,11 +652,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); - /* HPRE need more time, we close this interrupt */ - val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); - val |= BIT(HPRE_TIMEOUT_ABNML_BIT); - writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); - if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); @@ -667,9 +660,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); - writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); - writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS); writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); @@ -759,7 +750,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm) static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + u32 ce, nfe, err_en; ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); @@ -776,7 +767,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) hpre_master_ooo_ctrl(qm, true); /* enable hpre hw error interrupts */ - writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; + writel(~err_en, qm->io_base + HPRE_INT_MASK); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + qm->err_ini = &hpre_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } @@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) hpre_open_sva_prefetch(qm); - qm->err_ini = &hpre_err_ini; - qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) @@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre) return 0; } +static void hpre_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); + hpre_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = hisi_qm_start(qm); if (ret) - goto err_with_err_init; + goto err_with_probe_init; ret = hpre_debugfs_init(qm); if (ret) @@ -1444,9 +1447,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); -err_with_err_init: - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_with_probe_init: + hpre_probe_uninit(qm); err_with_qm_init: hisi_qm_uninit(qm); @@ -1468,13 +1470,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - if (qm->fun_type == QM_HW_PF) { - hpre_cnt_regs_clear(qm); - qm->debug.curr_qm_qp_num = 0; - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); - } - + hpre_probe_uninit(qm); hisi_qm_uninit(qm); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f614fd228b5624..07983af9e3e229 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -450,6 +450,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { }; static void qm_irqs_unregister(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm); static u32 qm_get_hw_error_status(struct hisi_qm *qm) { @@ -4014,6 +4015,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set) return -ETIMEDOUT; } +static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ + if (qm->ver >= QM_HW_V3) + return; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + qm->err_ini->close_axi_master_ooo(qm); + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + static int qm_vf_reset_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { @@ -4078,6 +4101,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; } + qm_dev_ecc_mbit_handle(qm); + /* PF obtains the information of VF by querying the register. */ qm_cmd_uninit(qm); @@ -4108,33 +4133,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return 0; } -static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +static int qm_master_ooo_check(struct hisi_qm *qm) { - u32 nfe_enb = 0; + u32 val; + int ret; - /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ - if (qm->ver >= QM_HW_V3) - return; + /* Check the ooo register of the device before resetting the device. */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); - if (!qm->err_status.is_dev_ecc_mbit && - qm->err_status.is_qm_ecc_mbit && - qm->err_ini->close_axi_master_ooo) { - qm->err_ini->close_axi_master_ooo(qm); - } else if (qm->err_status.is_dev_ecc_mbit && - !qm->err_status.is_qm_ecc_mbit && - !qm->err_ini->close_axi_master_ooo) { - nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, - qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); - } + return ret; } -static int qm_soft_reset(struct hisi_qm *qm) +static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val; /* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); @@ -4155,30 +4173,23 @@ static int qm_soft_reset(struct hisi_qm *qm) return ret; } - qm_dev_ecc_mbit_handle(qm); - - /* OOO register set and check */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - /* If bus lock, reset chip */ - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - } if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm); ret = qm_set_pf_mse(qm, false); - if (ret) { + if (ret) pci_err(pdev, "Fails to disable pf MSE bit.\n"); - return ret; - } + + return ret; +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; /* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { @@ -4197,12 +4208,23 @@ static int qm_soft_reset(struct hisi_qm *qm) pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } - } else { - pci_err(pdev, "No reset method!\n"); - return -EINVAL; + + return 0; } - return 0; + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + int ret; + + ret = qm_soft_reset_prepare(qm); + if (ret) + return ret; + + return qm_reset_device(qm); } static int qm_vf_reset_done(struct hisi_qm *qm) @@ -5155,6 +5177,35 @@ static int qm_get_pci_res(struct hisi_qm *qm) return ret; } +static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + /* Device does not support reset, return */ + if (!qm->err_ini->err_info_init) + return 0; + qm->err_ini->err_info_init(qm); + + if (!handle) + return 0; + + /* No reset method, return */ + if (!acpi_has_method(handle, qm->err_info.acpi_rst)) + return 0; + + ret = qm_master_ooo_check(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5184,8 +5235,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; } + ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + return 0; +err_free_vectors: + pci_free_irq_vectors(pdev); err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: @@ -5486,7 +5543,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val; ret = qm->ops->set_msi(qm, false); if (ret) { @@ -5494,18 +5550,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) return ret; } - /* shutdown OOO register */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - } ret = qm_set_pf_mse(qm, false); if (ret) diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index afdddf87cc348a..9bafcc5aa404f5 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -458,7 +458,7 @@ static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[]) static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask) { if (hash_mask & SEC_HASH_IPV4_MASK) { - dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n "); + dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n"); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 75aad04ffe5eb9..c35533d8930b21 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - qm->err_ini = &sec_err_ini; - qm->err_ini->err_info_init(qm); - ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + qm->err_ini = &sec_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec) static void sec_probe_uninit(struct hisi_qm *qm) { + if (qm->fun_type == QM_HW_VF) + return; + + sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); + sec_close_sva_prefetch(qm); hisi_qm_dev_err_uninit(qm); } @@ -1274,7 +1278,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: - sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev) sec_debugfs_exit(qm); (void)hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) - sec_debug_regs_clear(qm); - sec_show_last_regs_uninit(qm); - sec_probe_uninit(qm); sec_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 568acd0aee3fa5..c974f95cd126fd 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -225,7 +225,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; - int sg_n; + int sg_n, ret; if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); @@ -240,14 +240,15 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, if (sg_n_mapped > pool->sge_nr) { dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto err_unmap; } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { dev_err(dev, "Get SGL error!\n"); - dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_unmap; } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; @@ -262,6 +263,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, *hw_sgl_dma = curr_sgl_dma; return curr_hw_sgl; + +err_unmap: + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 451b167bcc73dc..66c551ecdee80f 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ +#include #include #include #include @@ -13,7 +14,6 @@ #include #include #include -#include #define HISI_TRNG_REG 0x00F0 #define HISI_TRNG_BYTES 4 @@ -121,7 +121,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 i; if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) { - pr_err("dlen(%d) exceeds limit(%d)!\n", dlen, + pr_err("dlen(%u) exceeds limit(%d)!\n", dlen, SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 94e2d66b04b65d..7327f8f29b0138 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -54,7 +54,7 @@ struct hisi_zip_req { struct hisi_zip_req_q { struct hisi_zip_req *q; unsigned long *req_bitmap; - rwlock_t req_lock; + spinlock_t req_lock; u16 size; }; @@ -116,17 +116,17 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx, struct hisi_zip_req *req_cache; int req_id; - write_lock(&req_q->req_lock); + spin_lock(&req_q->req_lock); req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); if (req_id >= req_q->size) { - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); return ERR_PTR(-EAGAIN); } set_bit(req_id, req_q->req_bitmap); - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); req_cache = q + req_id; req_cache->req_id = req_id; @@ -140,9 +140,9 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - write_lock(&req_q->req_lock); + spin_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); - write_unlock(&req_q->req_lock); + spin_unlock(&req_q->req_lock); } static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) @@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; @@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, /* send command to start a task */ atomic64_inc(&dfx->send_cnt); + spin_lock_bh(&req_q->req_lock); ret = hisi_qp_send(qp, &zip_sqe); + spin_unlock_bh(&req_q->req_lock); if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; @@ -456,7 +459,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) goto err_free_comp_q; } - rwlock_init(&req_q->req_lock); + spin_lock_init(&req_q->req_lock); req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), GFP_KERNEL); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 7c2d803886fdea..d07e47b48be06a 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1141,8 +1141,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - qm->err_ini = &hisi_zip_err_ini; - qm->err_ini->err_info_init(qm); ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) @@ -1203,6 +1201,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + qm->err_ini = &hisi_zip_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1269,6 +1268,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) return 0; } +static void hisi_zip_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hisi_zip_show_last_regs_uninit(qm); + hisi_zip_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -1295,7 +1304,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = hisi_qm_start(qm); if (ret) - goto err_dev_err_uninit; + goto err_probe_uninit; ret = hisi_zip_debugfs_init(qm); if (ret) @@ -1334,9 +1343,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); -err_dev_err_uninit: - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_probe_uninit: + hisi_zip_probe_uninit(qm); err_qm_uninit: hisi_zip_qm_uninit(qm); @@ -1358,8 +1366,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_zip_qm_uninit(qm); } diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d269036bdaa39f..7e93159c3b6b96 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -987,31 +987,23 @@ static int img_hash_probe(struct platform_device *pdev) } dev_dbg(dev, "using IRQ channel %d\n", irq); - hdev->hash_clk = devm_clk_get(&pdev->dev, "hash"); + hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash"); if (IS_ERR(hdev->hash_clk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(hdev->hash_clk); goto res_err; } - hdev->sys_clk = devm_clk_get(&pdev->dev, "sys"); + hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys"); if (IS_ERR(hdev->sys_clk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(hdev->sys_clk); goto res_err; } - err = clk_prepare_enable(hdev->hash_clk); - if (err) - goto res_err; - - err = clk_prepare_enable(hdev->sys_clk); - if (err) - goto clk_err; - err = img_hash_dma_init(hdev); if (err) - goto dma_err; + goto res_err; dev_dbg(dev, "using %s for DMA transfers\n", dma_chan_name(hdev->dma_lch)); @@ -1032,10 +1024,6 @@ static int img_hash_probe(struct platform_device *pdev) list_del(&hdev->list); spin_unlock(&img_hash.lock); dma_release_channel(hdev->dma_lch); -dma_err: - clk_disable_unprepare(hdev->sys_clk); -clk_err: - clk_disable_unprepare(hdev->hash_clk); res_err: tasklet_kill(&hdev->done_task); tasklet_kill(&hdev->dma_task); @@ -1058,9 +1046,6 @@ static void img_hash_remove(struct platform_device *pdev) tasklet_kill(&hdev->dma_task); dma_release_channel(hdev->dma_lch); - - clk_disable_unprepare(hdev->hash_clk); - clk_disable_unprepare(hdev->sys_clk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index d0059ce954dd8c..0c79ad78d1c0a1 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -897,7 +897,6 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, int safexcel_select_ring(struct safexcel_crypto_priv *priv); void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); -void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring); void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *ring); struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 42677f7458b787..919e5a2cab95e3 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -5,7 +5,7 @@ * Antoine Tenart */ -#include +#include #include #include #include diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index e810d286ee8c42..237f8700007021 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -495,10 +495,10 @@ static void remove_device_compression_modes(struct iaa_device *iaa_device) if (!device_mode) continue; - free_device_compression_mode(iaa_device, device_mode); - iaa_device->compression_modes[i] = NULL; if (iaa_compression_modes[i]->free) iaa_compression_modes[i]->free(device_mode); + free_device_compression_mode(iaa_device, device_mode); + iaa_device->compression_modes[i] = NULL; } } diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index c2dfca73fe4ea3..e54c79890d44f5 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -1150,6 +1150,7 @@ static const struct of_device_id kmb_ocs_hcu_of_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match); static void kmb_ocs_hcu_remove(struct platform_device *pdev) { diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 2a3598409eeb51..f49818a13013a3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -163,7 +163,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; @@ -177,7 +177,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index d26564cebdec4a..659905e4595034 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -165,7 +165,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err: adf_cleanup_accel(accel_dev); return ret; @@ -179,7 +179,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 956a4c85609a95..4d18057745d444 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index a8de9cd09c05a2..f0023cfb234cdd 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index ad0ca438499852..e6b5de55434ec1 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index 53b8ddb6336419..2bd5b0ff00e369 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 04260f61d04294..ec7913ab00a2c7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -44,7 +44,7 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, adf_pf2vf_notify_restarting(accel_dev); adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_clear_master(pdev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); return PCI_ERS_RESULT_NEED_RESET; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c index 2cf102ad4ca82d..b0fc453fa3fb51 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c @@ -100,6 +100,8 @@ void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev) } static void adf_cfg_section_del_all(struct list_head *head); +static void adf_cfg_section_del_all_except(struct list_head *head, + const char *section_name); void adf_cfg_del_all(struct adf_accel_dev *accel_dev) { @@ -111,6 +113,17 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); } +void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev, + const char *section_name) +{ + struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + + down_write(&dev_cfg_data->lock); + adf_cfg_section_del_all_except(&dev_cfg_data->sec_list, section_name); + up_write(&dev_cfg_data->lock); + clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); +} + /** * adf_cfg_dev_remove() - Clears acceleration device configuration table. * @accel_dev: Pointer to acceleration device. @@ -185,6 +198,22 @@ static void adf_cfg_section_del_all(struct list_head *head) } } +static void adf_cfg_section_del_all_except(struct list_head *head, + const char *section_name) +{ + struct list_head *list, *tmp; + struct adf_cfg_section *ptr; + + list_for_each_prev_safe(list, tmp, head) { + ptr = list_entry(list, struct adf_cfg_section, list); + if (!strcmp(ptr->name, section_name)) + continue; + adf_cfg_keyval_del_all(&ptr->param_head); + list_del(list); + kfree(ptr); + } +} + static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s, const char *key) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h index c0c9052b22135c..2afa6f0d15c511 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h @@ -35,6 +35,8 @@ void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev); void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev); int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name); void adf_cfg_del_all(struct adf_accel_dev *accel_dev); +void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev, + const char *section_name); int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, const char *key, const void *val, diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 3bec9e20bad0a3..f7ecabdf7805db 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -56,7 +56,7 @@ int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config); -int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config); +int adf_dev_down(struct adf_accel_dev *accel_dev); int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c index 26a1662fafbb07..70fa0f6497a968 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c @@ -247,7 +247,7 @@ static void adf_ctl_stop_devices(u32 id) if (!accel_dev->is_vf) continue; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } } @@ -256,7 +256,7 @@ static void adf_ctl_stop_devices(u32 id) if (!adf_dev_started(accel_dev)) continue; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } } } @@ -319,7 +319,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", ctl_data->device_id); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); } out: kfree(ctl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 8b10926cedbac2..e8c53bd76f1bd2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -83,7 +83,7 @@ #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) /* Ring interrupt */ -#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0) #define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) #define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 #define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 74f0818c070348..f189cce7d15358 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -323,6 +323,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) if (hw_data->stop_timer) hw_data->stop_timer(accel_dev); + hw_data->disable_iov(accel_dev); + if (wait) msleep(100); @@ -386,16 +388,14 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) adf_tl_shutdown(accel_dev); - hw_data->disable_iov(accel_dev); - if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); } - /* Delete configuration only if not restarting */ + /* If not restarting, delete all cfg sections except for GENERAL */ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) - adf_cfg_del_all(accel_dev); + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); if (hw_data->exit_arb) hw_data->exit_arb(accel_dev); @@ -445,33 +445,7 @@ void adf_error_notifier(struct adf_accel_dev *accel_dev) } } -static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); - - if (!ret) { - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, - services, ADF_STR); - if (ret) - return ret; - } - - return 0; -} - -int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) +int adf_dev_down(struct adf_accel_dev *accel_dev) { int ret = 0; @@ -480,15 +454,9 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) mutex_lock(&accel_dev->state_lock); - if (reconfig) { - ret = adf_dev_shutdown_cache_cfg(accel_dev); - goto out; - } - adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); -out: mutex_unlock(&accel_dev->state_lock); return ret; } @@ -535,7 +503,7 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev) if (!accel_dev) return -EFAULT; - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); ret = adf_dev_up(accel_dev, false); /* if device is already up return success*/ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 0e31f4b41844e0..0cee3b23dee90b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -18,14 +18,17 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - vf->restarting = false; + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + else + vf->restarting = false; + if (!vf->init) continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); - else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) - vf->restarting = true; } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c index 1141258db4b65a..10c91e56d6be3b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c @@ -48,6 +48,20 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown); +void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE }; + + /* Check compatibility version */ + if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) + return; + + if (adf_send_vf2pf_msg(accel_dev, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Restarting complete event to PF\n"); +} +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete); + int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { u8 pf_version; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h index 71bc0e3f1d9335..d79340ab3134ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h @@ -6,6 +6,7 @@ #if defined(CONFIG_PCI_IOV) int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); +void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev); int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 8d645e7e04aa55..c75d0b6cb0ada3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -86,11 +86,133 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev) +{ + unsigned long val = 0; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; +} + +static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev) +{ + int ret; + + if (adf_dev_in_use(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Cannot disable SR-IOV, device in use\n"); + return -EBUSY; + } + + if (adf_dev_started(accel_dev)) { + if (adf_devmgr_in_reset(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Cannot disable SR-IOV, device in reset\n"); + return -EBUSY; + } + + ret = adf_dev_down(accel_dev); + if (ret) + goto err_del_cfg; + } + + adf_disable_sriov(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto err_del_cfg; + + return 0; + +err_del_cfg: + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); + return ret; +} + +static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + int totalvfs = pci_sriov_get_totalvfs(pdev); + unsigned long val; + int ret; + + if (!device_iommu_mapped(&GET_DEV(accel_dev))) { + dev_warn(&GET_DEV(accel_dev), + "IOMMU should be enabled for SR-IOV to work correctly\n"); + return -EINVAL; + } + + if (adf_dev_started(accel_dev)) { + if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Device busy\n"); + return -EBUSY; + } + + ret = adf_dev_down(accel_dev); + if (ret) + return ret; + } + + ret = adf_add_sriov_configuration(accel_dev); + if (ret) + goto err_del_cfg; + + /* Allocate memory for VF info structs */ + accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info), + GFP_KERNEL); + ret = -ENOMEM; + if (!accel_dev->pf.vf_info) + goto err_del_cfg; + + ret = adf_dev_up(accel_dev, false); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", + accel_dev->accel_id); + goto err_free_vf_info; + } + + ret = adf_enable_sriov(accel_dev); + if (ret) + goto err_free_vf_info; + + val = 1; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + if (ret) + goto err_free_vf_info; + + return totalvfs; + +err_free_vf_info: + adf_dev_down(accel_dev); + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + return ret; +err_del_cfg: + adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); + return ret; +} + void adf_reenable_sriov(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - unsigned long val = 0; if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, cfg)) @@ -99,15 +221,9 @@ void adf_reenable_sriov(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) return; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC)) - return; - - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC)) + if (adf_add_sriov_configuration(accel_dev)) return; - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); adf_enable_sriov(accel_dev); } @@ -168,70 +284,16 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov); int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); - int totalvfs = pci_sriov_get_totalvfs(pdev); - unsigned long val; - int ret; if (!accel_dev) { dev_err(&pdev->dev, "Failed to find accel_dev\n"); return -EFAULT; } - if (!device_iommu_mapped(&pdev->dev)) - dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n"); - - if (accel_dev->pf.vf_info) { - dev_info(&pdev->dev, "Already enabled for this device\n"); - return -EINVAL; - } - - if (adf_dev_started(accel_dev)) { - if (adf_devmgr_in_reset(accel_dev) || - adf_dev_in_use(accel_dev)) { - dev_err(&GET_DEV(accel_dev), "Device busy\n"); - return -EBUSY; - } - - ret = adf_dev_down(accel_dev, true); - if (ret) - return ret; - } - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - return -EFAULT; - val = 0; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - return -EFAULT; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - /* Allocate memory for VF info structs */ - accel_dev->pf.vf_info = kcalloc(totalvfs, - sizeof(struct adf_accel_vf_info), - GFP_KERNEL); - if (!accel_dev->pf.vf_info) - return -ENOMEM; - - if (adf_dev_up(accel_dev, false)) { - dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", - accel_dev->accel_id); - return -EFAULT; - } - - ret = adf_enable_sriov(accel_dev); - if (ret) - return ret; - - val = 1; - adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, - &val, ADF_DEC); - - return numvfs; + if (numvfs) + return adf_do_enable_sriov(accel_dev); + else + return adf_do_disable_sriov(accel_dev); } EXPORT_SYMBOL_GPL(adf_sriov_configure); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 4e7f70d4049d35..4fcd61ff70d129 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -62,7 +62,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, break; } - ret = adf_dev_down(accel_dev, true); + ret = adf_dev_down(accel_dev); if (ret) return ret; @@ -76,7 +76,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } else if (ret) { dev_err(dev, "Failed to start device qat_dev%d\n", accel_id); - adf_dev_down(accel_dev, true); + adf_dev_down(accel_dev); return ret; } break; diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index cdbb2d687b1b0d..a4636ec9f9cac0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -13,6 +13,7 @@ #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_cfg_common.h" +#include "adf_pfvf_vf_msg.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" @@ -71,10 +72,11 @@ static void adf_dev_stop_async(struct work_struct *work) struct adf_accel_dev *accel_dev = stop_data->accel_dev; adf_dev_restarting_notify(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); /* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); + adf_vf2pf_notify_restart_complete(accel_dev); kfree(stop_data); } diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index 85bc32a9ec0eb3..3f5b790154008f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -23,6 +23,8 @@ struct qat_alg_buf_list { ); struct qat_alg_buf buffers[]; } __packed; +static_assert(offsetof(struct qat_alg_buf_list, buffers) == sizeof(struct qat_alg_buf_list_hdr), + "struct member likely outside of __struct_group()"); struct qat_alg_fixed_buf_list { struct qat_alg_buf_list_hdr sgl_hdr; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index ad2c64af7427ee..7ea40b4f6e5b4f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -58,7 +58,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) unsigned int i; if (!ae_data) { - pr_err("QAT: bad argument, ae_data is NULL\n "); + pr_err("QAT: bad argument, ae_data is NULL\n"); return -EINVAL; } diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 40b456b8035b5a..2a50cce415151b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev) pr_err("QAT: Driver removal failed\n"); return; } - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index d59cb1ba2ad599..7cb015b5512234 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; out_err_dev_stop: - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: @@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev) return; } adf_flush_vf_wq(accel_dev); - adf_dev_down(accel_dev, false); + adf_dev_down(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig index a48591af12d025..78217577aa5403 100644 --- a/drivers/crypto/marvell/Kconfig +++ b/drivers/crypto/marvell/Kconfig @@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD + select CRYPTO_AUTHENC select CRYPTO_DEV_MARVELL help This driver allows you to utilize the Marvell Cryptographic @@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD + select CRYPTO_AUTHENC select NET_DEVLINK help This driver allows you to utilize the Marvell Cryptographic diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 3c5d577d8f0d5e..096be42e9d0319 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include "otx_cptvf.h" @@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = { .count = ATOMIC_INIT(0) }; +static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) { int count, ret = 0; @@ -509,44 +510,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; + switch (ctx->mac_type) { + case OTX_CPT_SHA1: + ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); + break; + + case OTX_CPT_SHA256: + ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); + break; + + case OTX_CPT_SHA384: + ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); + break; + + case OTX_CPT_SHA512: + ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); + break; + } + + if (IS_ERR(ctx->hashalg)) + return PTR_ERR(ctx->hashalg); + + crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); + + if (!ctx->hashalg) + return 0; + /* * When selected cipher is NULL we use HMAC opcode instead of * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms * for calculating ipad and opad */ if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { - switch (ctx->mac_type) { - case OTX_CPT_SHA1: - ctx->hashalg = crypto_alloc_shash("sha1", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + int ss = crypto_shash_statesize(ctx->hashalg); - case OTX_CPT_SHA256: - ctx->hashalg = crypto_alloc_shash("sha256", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; - - case OTX_CPT_SHA384: - ctx->hashalg = crypto_alloc_shash("sha384", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->ipad = kzalloc(ss, GFP_KERNEL); + if (!ctx->ipad) { + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } - case OTX_CPT_SHA512: - ctx->hashalg = crypto_alloc_shash("sha512", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->opad = kzalloc(ss, GFP_KERNEL); + if (!ctx->opad) { + kfree(ctx->ipad); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; } } - crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); + ctx->sdesc = alloc_sdesc(ctx->hashalg); + if (!ctx->sdesc) { + kfree(ctx->opad); + kfree(ctx->ipad); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } return 0; } @@ -602,8 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm) kfree(ctx->ipad); kfree(ctx->opad); - if (ctx->hashalg) - crypto_free_shash(ctx->hashalg); + crypto_free_shash(ctx->hashalg); kfree(ctx->sdesc); } @@ -699,7 +716,7 @@ static inline void swap_data64(void *buf, u32 len) *dst = cpu_to_be64p(src); } -static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) +static int swap_pad(u8 mac_type, u8 *pad) { struct sha512_state *sha512; struct sha256_state *sha256; @@ -707,22 +724,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) switch (mac_type) { case OTX_CPT_SHA1: - sha1 = (struct sha1_state *) in_pad; + sha1 = (struct sha1_state *)pad; swap_data32(sha1->state, SHA1_DIGEST_SIZE); - memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); break; case OTX_CPT_SHA256: - sha256 = (struct sha256_state *) in_pad; + sha256 = (struct sha256_state *)pad; swap_data32(sha256->state, SHA256_DIGEST_SIZE); - memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); break; case OTX_CPT_SHA384: case OTX_CPT_SHA512: - sha512 = (struct sha512_state *) in_pad; + sha512 = (struct sha512_state *)pad; swap_data64(sha512->state, SHA512_DIGEST_SIZE); - memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); break; default: @@ -732,55 +746,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) return 0; } -static int aead_hmac_init(struct crypto_aead *cipher) +static int aead_hmac_init(struct crypto_aead *cipher, + struct crypto_authenc_keys *keys) { struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); - int authkeylen = ctx->auth_key_len; + int authkeylen = keys->authkeylen; u8 *ipad = NULL, *opad = NULL; - int ret = 0, icount = 0; + int icount = 0; + int ret; - ctx->sdesc = alloc_sdesc(ctx->hashalg); - if (!ctx->sdesc) - return -ENOMEM; + if (authkeylen > bs) { + ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, + authkeylen, ctx->key); + if (ret) + return ret; + authkeylen = ds; + } else + memcpy(ctx->key, keys->authkey, authkeylen); - ctx->ipad = kzalloc(bs, GFP_KERNEL); - if (!ctx->ipad) { - ret = -ENOMEM; - goto calc_fail; - } + ctx->enc_key_len = keys->enckeylen; + ctx->auth_key_len = authkeylen; - ctx->opad = kzalloc(bs, GFP_KERNEL); - if (!ctx->opad) { - ret = -ENOMEM; - goto calc_fail; - } + if (ctx->cipher_type == OTX_CPT_CIPHER_NULL) + return keys->enckeylen ? -EINVAL : 0; - ipad = kzalloc(state_size, GFP_KERNEL); - if (!ipad) { - ret = -ENOMEM; - goto calc_fail; + switch (keys->enckeylen) { + case AES_KEYSIZE_128: + ctx->key_type = OTX_CPT_AES_128_BIT; + break; + case AES_KEYSIZE_192: + ctx->key_type = OTX_CPT_AES_192_BIT; + break; + case AES_KEYSIZE_256: + ctx->key_type = OTX_CPT_AES_256_BIT; + break; + default: + /* Invalid key length */ + return -EINVAL; } - opad = kzalloc(state_size, GFP_KERNEL); - if (!opad) { - ret = -ENOMEM; - goto calc_fail; - } + memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); - if (authkeylen > bs) { - ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, - authkeylen, ipad); - if (ret) - goto calc_fail; - - authkeylen = ds; - } else { - memcpy(ipad, ctx->key, authkeylen); - } + ipad = ctx->ipad; + opad = ctx->opad; + memcpy(ipad, ctx->key, authkeylen); memset(ipad + authkeylen, 0, bs - authkeylen); memcpy(opad, ipad, bs); @@ -798,7 +810,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, ipad, bs); crypto_shash_export(&ctx->sdesc->shash, ipad); - ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); + ret = swap_pad(ctx->mac_type, ipad); if (ret) goto calc_fail; @@ -806,25 +818,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, opad, bs); crypto_shash_export(&ctx->sdesc->shash, opad); - ret = copy_pad(ctx->mac_type, ctx->opad, opad); - if (ret) - goto calc_fail; - - kfree(ipad); - kfree(opad); - - return 0; + ret = swap_pad(ctx->mac_type, opad); calc_fail: - kfree(ctx->ipad); - ctx->ipad = NULL; - kfree(ctx->opad); - ctx->opad = NULL; - kfree(ipad); - kfree(opad); - kfree(ctx->sdesc); - ctx->sdesc = NULL; - return ret; } @@ -832,57 +828,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - int enckeylen = 0, authkeylen = 0; - struct rtattr *rta = (void *)key; - int status = -EINVAL; - - if (!RTA_OK(rta, keylen)) - goto badkey; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - goto badkey; - - if (keylen > OTX_CPT_MAX_KEY_SIZE) - goto badkey; - - authkeylen = keylen - enckeylen; - memcpy(ctx->key, key, keylen); - - switch (enckeylen) { - case AES_KEYSIZE_128: - ctx->key_type = OTX_CPT_AES_128_BIT; - break; - case AES_KEYSIZE_192: - ctx->key_type = OTX_CPT_AES_192_BIT; - break; - case AES_KEYSIZE_256: - ctx->key_type = OTX_CPT_AES_256_BIT; - break; - default: - /* Invalid key length */ - goto badkey; - } - - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = authkeylen; + struct crypto_authenc_keys authenc_keys; + int status; - status = aead_hmac_init(cipher); + status = crypto_authenc_extractkeys(&authenc_keys, key, keylen); if (status) goto badkey; - return 0; + status = aead_hmac_init(cipher, &authenc_keys); + badkey: return status; } @@ -891,36 +845,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - struct rtattr *rta = (void *)key; - int enckeylen = 0; - - if (!RTA_OK(rta, keylen)) - goto badkey; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (enckeylen != 0) - goto badkey; - - if (keylen > OTX_CPT_MAX_KEY_SIZE) - goto badkey; - - memcpy(ctx->key, key, keylen); - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = keylen; - return 0; -badkey: - return -EINVAL; + return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); } static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, @@ -1613,14 +1538,6 @@ static int compare_func(const void *lptr, const void *rptr) return 0; } -static void swap_func(void *lptr, void *rptr, int size) -{ - struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; - struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; - - swap(*ldesc, *rdesc); -} - int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, enum otx_cptpf_type pf_type, enum otx_cptvf_type engine_type, @@ -1655,7 +1572,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, is_crypto_registered = true; } sort(se_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); break; case OTX_CPT_AE_TYPES: @@ -1670,7 +1587,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, ae_devices.desc[count++].dev = pdev; atomic_inc(&ae_devices.count); sort(ae_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); break; default: diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h index 4181b5c5c356b2..a50b5e2f8d0022 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h @@ -185,6 +185,5 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, int num_queues, int num_devices); void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, enum otx_cptvf_type engine_type); -void otx_cpt_callback(int status, void *arg, void *req); #endif /* __OTX_CPT_ALGS_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 1604fc58dc13ec..7eb0bc13994d90 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include "otx2_cptvf.h" @@ -55,6 +54,8 @@ static struct cpt_device_table se_devices = { .count = ATOMIC_INIT(0) }; +static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) { int count; @@ -598,40 +599,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; + switch (ctx->mac_type) { + case OTX2_CPT_SHA1: + ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); + break; + + case OTX2_CPT_SHA256: + ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); + break; + + case OTX2_CPT_SHA384: + ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); + break; + + case OTX2_CPT_SHA512: + ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); + break; + } + + if (IS_ERR(ctx->hashalg)) + return PTR_ERR(ctx->hashalg); + + if (ctx->hashalg) { + ctx->sdesc = alloc_sdesc(ctx->hashalg); + if (!ctx->sdesc) { + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } + } + /* * When selected cipher is NULL we use HMAC opcode instead of * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms * for calculating ipad and opad */ - if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) { - switch (ctx->mac_type) { - case OTX2_CPT_SHA1: - ctx->hashalg = crypto_alloc_shash("sha1", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; - - case OTX2_CPT_SHA256: - ctx->hashalg = crypto_alloc_shash("sha256", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) { + int ss = crypto_shash_statesize(ctx->hashalg); - case OTX2_CPT_SHA384: - ctx->hashalg = crypto_alloc_shash("sha384", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->ipad = kzalloc(ss, GFP_KERNEL); + if (!ctx->ipad) { + kfree(ctx->sdesc); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; + } - case OTX2_CPT_SHA512: - ctx->hashalg = crypto_alloc_shash("sha512", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(ctx->hashalg)) - return PTR_ERR(ctx->hashalg); - break; + ctx->opad = kzalloc(ss, GFP_KERNEL); + if (!ctx->opad) { + kfree(ctx->ipad); + kfree(ctx->sdesc); + crypto_free_shash(ctx->hashalg); + return -ENOMEM; } } switch (ctx->cipher_type) { @@ -713,8 +730,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm) kfree(ctx->ipad); kfree(ctx->opad); - if (ctx->hashalg) - crypto_free_shash(ctx->hashalg); + crypto_free_shash(ctx->hashalg); kfree(ctx->sdesc); if (ctx->fbk_cipher) { @@ -788,7 +804,7 @@ static inline void swap_data64(void *buf, u32 len) cpu_to_be64s(src); } -static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) +static int swap_pad(u8 mac_type, u8 *pad) { struct sha512_state *sha512; struct sha256_state *sha256; @@ -796,22 +812,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) switch (mac_type) { case OTX2_CPT_SHA1: - sha1 = (struct sha1_state *) in_pad; + sha1 = (struct sha1_state *)pad; swap_data32(sha1->state, SHA1_DIGEST_SIZE); - memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); break; case OTX2_CPT_SHA256: - sha256 = (struct sha256_state *) in_pad; + sha256 = (struct sha256_state *)pad; swap_data32(sha256->state, SHA256_DIGEST_SIZE); - memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); break; case OTX2_CPT_SHA384: case OTX2_CPT_SHA512: - sha512 = (struct sha512_state *) in_pad; + sha512 = (struct sha512_state *)pad; swap_data64(sha512->state, SHA512_DIGEST_SIZE); - memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); break; default: @@ -821,55 +834,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) return 0; } -static int aead_hmac_init(struct crypto_aead *cipher) +static int aead_hmac_init(struct crypto_aead *cipher, + struct crypto_authenc_keys *keys) { struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - int state_size = crypto_shash_statesize(ctx->hashalg); int ds = crypto_shash_digestsize(ctx->hashalg); int bs = crypto_shash_blocksize(ctx->hashalg); - int authkeylen = ctx->auth_key_len; + int authkeylen = keys->authkeylen; u8 *ipad = NULL, *opad = NULL; - int ret = 0, icount = 0; + int icount = 0; + int ret; - ctx->sdesc = alloc_sdesc(ctx->hashalg); - if (!ctx->sdesc) - return -ENOMEM; + if (authkeylen > bs) { + ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, + authkeylen, ctx->key); + if (ret) + goto calc_fail; - ctx->ipad = kzalloc(bs, GFP_KERNEL); - if (!ctx->ipad) { - ret = -ENOMEM; - goto calc_fail; - } + authkeylen = ds; + } else + memcpy(ctx->key, keys->authkey, authkeylen); - ctx->opad = kzalloc(bs, GFP_KERNEL); - if (!ctx->opad) { - ret = -ENOMEM; - goto calc_fail; - } + ctx->enc_key_len = keys->enckeylen; + ctx->auth_key_len = authkeylen; - ipad = kzalloc(state_size, GFP_KERNEL); - if (!ipad) { - ret = -ENOMEM; - goto calc_fail; - } + if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL) + return keys->enckeylen ? -EINVAL : 0; - opad = kzalloc(state_size, GFP_KERNEL); - if (!opad) { - ret = -ENOMEM; - goto calc_fail; + switch (keys->enckeylen) { + case AES_KEYSIZE_128: + ctx->key_type = OTX2_CPT_AES_128_BIT; + break; + case AES_KEYSIZE_192: + ctx->key_type = OTX2_CPT_AES_192_BIT; + break; + case AES_KEYSIZE_256: + ctx->key_type = OTX2_CPT_AES_256_BIT; + break; + default: + /* Invalid key length */ + return -EINVAL; } - if (authkeylen > bs) { - ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, - authkeylen, ipad); - if (ret) - goto calc_fail; + memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); - authkeylen = ds; - } else { - memcpy(ipad, ctx->key, authkeylen); - } + ipad = ctx->ipad; + opad = ctx->opad; + memcpy(ipad, ctx->key, authkeylen); memset(ipad + authkeylen, 0, bs - authkeylen); memcpy(opad, ipad, bs); @@ -887,7 +899,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, ipad, bs); crypto_shash_export(&ctx->sdesc->shash, ipad); - ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); + ret = swap_pad(ctx->mac_type, ipad); if (ret) goto calc_fail; @@ -895,25 +907,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) crypto_shash_init(&ctx->sdesc->shash); crypto_shash_update(&ctx->sdesc->shash, opad, bs); crypto_shash_export(&ctx->sdesc->shash, opad); - ret = copy_pad(ctx->mac_type, ctx->opad, opad); - if (ret) - goto calc_fail; - - kfree(ipad); - kfree(opad); - - return 0; + ret = swap_pad(ctx->mac_type, opad); calc_fail: - kfree(ctx->ipad); - ctx->ipad = NULL; - kfree(ctx->opad); - ctx->opad = NULL; - kfree(ipad); - kfree(opad); - kfree(ctx->sdesc); - ctx->sdesc = NULL; - return ret; } @@ -921,87 +917,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - int enckeylen = 0, authkeylen = 0; - struct rtattr *rta = (void *)key; - - if (!RTA_OK(rta, keylen)) - return -EINVAL; + struct crypto_authenc_keys authenc_keys; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - return -EINVAL; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - return -EINVAL; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - return -EINVAL; - - if (keylen > OTX2_CPT_MAX_KEY_SIZE) - return -EINVAL; - - authkeylen = keylen - enckeylen; - memcpy(ctx->key, key, keylen); - - switch (enckeylen) { - case AES_KEYSIZE_128: - ctx->key_type = OTX2_CPT_AES_128_BIT; - break; - case AES_KEYSIZE_192: - ctx->key_type = OTX2_CPT_AES_192_BIT; - break; - case AES_KEYSIZE_256: - ctx->key_type = OTX2_CPT_AES_256_BIT; - break; - default: - /* Invalid key length */ - return -EINVAL; - } - - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = authkeylen; - - return aead_hmac_init(cipher); + return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?: + aead_hmac_init(cipher, &authenc_keys); } static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, const unsigned char *key, unsigned int keylen) { - struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); - struct crypto_authenc_key_param *param; - struct rtattr *rta = (void *)key; - int enckeylen = 0; - - if (!RTA_OK(rta, keylen)) - return -EINVAL; - - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - return -EINVAL; - - if (RTA_PAYLOAD(rta) < sizeof(*param)) - return -EINVAL; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (enckeylen != 0) - return -EINVAL; - - if (keylen > OTX2_CPT_MAX_KEY_SIZE) - return -EINVAL; - - memcpy(ctx->key, key, keylen); - ctx->enc_key_len = enckeylen; - ctx->auth_key_len = keylen; - - return 0; + return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); } static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, @@ -1702,14 +1628,6 @@ static int compare_func(const void *lptr, const void *rptr) return 0; } -static void swap_func(void *lptr, void *rptr, int size) -{ - struct cpt_device_desc *ldesc = lptr; - struct cpt_device_desc *rdesc = rptr; - - swap(*ldesc, *rdesc); -} - int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, int num_queues, int num_devices) { @@ -1739,7 +1657,7 @@ int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, is_crypto_registered = true; } sort(se_devices.desc, count, sizeof(struct cpt_device_desc), - compare_func, swap_func); + compare_func, NULL); unlock: mutex_unlock(&mutex); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 251e088a53dff8..b11545cc5cb795 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1353,6 +1353,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) ahash->setkey = n2_hmac_async_setkey; base = &ahash->halg.base; + err = -EINVAL; if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg) >= CRYPTO_MAX_ALG_NAME) goto out_free_p; diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 25fa70b2112cff..887d4ce3cb49b1 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -157,6 +157,7 @@ struct nx842_crypto_header_group { } __packed; struct nx842_crypto_header { + /* New members MUST be added within the struct_group() macro below. */ struct_group_tagged(nx842_crypto_header_hdr, hdr, __be16 magic; /* NX842_CRYPTO_MAGIC */ __be16 ignore; /* decompressed end bytes to ignore */ @@ -164,6 +165,8 @@ struct nx842_crypto_header { ); struct nx842_crypto_header_group group[]; } __packed; +static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr), + "struct member likely outside of struct_group_tagged()"); #define NX842_CRYPTO_GROUP_MAX (0x20) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index c670d7d0c11ea8..09419e79e34c6b 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -36,14 +36,14 @@ struct qcom_rng { void __iomem *base; struct clk *clk; struct hwrng hwrng; - struct qcom_rng_of_data *of_data; + struct qcom_rng_match_data *match_data; }; struct qcom_rng_ctx { struct qcom_rng *rng; }; -struct qcom_rng_of_data { +struct qcom_rng_match_data { bool skip_init; bool hwrng_support; }; @@ -155,7 +155,7 @@ static int qcom_rng_init(struct crypto_tfm *tfm) ctx->rng = qcom_rng_dev; - if (!ctx->rng->of_data->skip_init) + if (!ctx->rng->match_data->skip_init) return qcom_rng_enable(ctx->rng); return 0; @@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->clk)) return PTR_ERR(rng->clk); - rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev); + rng->match_data = (struct qcom_rng_match_data *)device_get_match_data(&pdev->dev); qcom_rng_dev = rng; ret = crypto_register_rng(&qcom_rng_alg); @@ -206,7 +206,7 @@ static int qcom_rng_probe(struct platform_device *pdev) return ret; } - if (rng->of_data->hwrng_support) { + if (rng->match_data->hwrng_support) { rng->hwrng.name = "qcom_hwrng"; rng->hwrng.read = qcom_hwrng_read; rng->hwrng.quality = QCOM_TRNG_QUALITY; @@ -231,31 +231,31 @@ static void qcom_rng_remove(struct platform_device *pdev) qcom_rng_dev = NULL; } -static struct qcom_rng_of_data qcom_prng_of_data = { +static struct qcom_rng_match_data qcom_prng_match_data = { .skip_init = false, .hwrng_support = false, }; -static struct qcom_rng_of_data qcom_prng_ee_of_data = { +static struct qcom_rng_match_data qcom_prng_ee_match_data = { .skip_init = true, .hwrng_support = false, }; -static struct qcom_rng_of_data qcom_trng_of_data = { +static struct qcom_rng_match_data qcom_trng_match_data = { .skip_init = true, .hwrng_support = true, }; static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { - { .id = "QCOM8160", .driver_data = 1 }, + { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_match_data }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); static const struct of_device_id __maybe_unused qcom_rng_of_match[] = { - { .compatible = "qcom,prng", .data = &qcom_prng_of_data }, - { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_of_data }, - { .compatible = "qcom,trng", .data = &qcom_trng_of_data }, + { .compatible = "qcom,prng", .data = &qcom_prng_match_data }, + { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_match_data }, + { .compatible = "qcom,trng", .data = &qcom_trng_match_data }, {} }; MODULE_DEVICE_TABLE(of, qcom_rng_of_match); diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index a235e6c300f1e5..69d6019d8abcf0 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -9,7 +9,7 @@ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ -#include +#include #include #include #include diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index b0cf6d2fd352ff..e0faddbf8990cb 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -17,7 +17,7 @@ #include -#include +#include #define DRIVER_NAME "stm32-crc32" #define CHKSUM_DIGEST_SIZE 4 diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 99b5c25be07943..29c192f20082cb 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -6,7 +6,7 @@ menuconfig CXL_BUS select FW_UPLOAD select PCI_DOE select FIRMWARE_TABLE - select NUMA_KEEP_MEMINFO if (NUMA && X86) + select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS help CXL is a bus that is electrically compatible with PCI Express, but layers three protocols on that signalling (CXL.io, CXL.cache, and diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index bb83867d9fec98..ef1621d40f0542 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -9,13 +9,12 @@ #include "cxlmem.h" #include "core.h" #include "cxl.h" -#include "core.h" struct dsmas_entry { struct range dpa_range; u8 handle; struct access_coordinate coord[ACCESS_COORDINATE_MAX]; - + struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX]; int entries; int qos_class; }; @@ -163,7 +162,7 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), dslbis->data_type); - cxl_access_coordinate_set(dent->coord, dslbis->data_type, val); + cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val); return 0; } @@ -220,7 +219,7 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, xa_for_each(dsmas_xa, index, dent) { int qos_class; - cxl_coordinates_combine(dent->coord, dent->coord, ep_c); + cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c); dent->entries = 1; rc = cxl_root->ops->qos_class(cxl_root, &dent->coord[ACCESS_COORDINATE_CPU], @@ -241,8 +240,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, struct cxl_dpa_perf *dpa_perf) { - for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { dpa_perf->coord[i] = dent->coord[i]; + dpa_perf->cdat_coord[i] = dent->cdat_coord[i]; + } dpa_perf->dpa_range = dent->dpa_range; dpa_perf->qos_class = dent->qos_class; dev_dbg(dev, @@ -546,19 +547,37 @@ void cxl_coordinates_combine(struct access_coordinate *out, MODULE_IMPORT_NS(CXL); -void cxl_region_perf_data_calculate(struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) +static void cxl_bandwidth_add(struct access_coordinate *coord, + struct access_coordinate *c1, + struct access_coordinate *c2) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + coord[i].read_bandwidth = c1[i].read_bandwidth + + c2[i].read_bandwidth; + coord[i].write_bandwidth = c1[i].write_bandwidth + + c2[i].write_bandwidth; + } +} + +static bool dpa_perf_contains(struct cxl_dpa_perf *perf, + struct resource *dpa_res) { - struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); struct range dpa = { - .start = cxled->dpa_res->start, - .end = cxled->dpa_res->end, + .start = dpa_res->start, + .end = dpa_res->end, }; + + return range_contains(&perf->dpa_range, &dpa); +} + +static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled, + enum cxl_decoder_mode mode) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_dpa_perf *perf; - switch (cxlr->mode) { + switch (mode) { case CXL_DECODER_RAM: perf = &mds->ram_perf; break; @@ -566,12 +585,473 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr, perf = &mds->pmem_perf; break; default: + return ERR_PTR(-EINVAL); + } + + if (!dpa_perf_contains(perf, cxled->dpa_res)) + return ERR_PTR(-EINVAL); + + return perf; +} + +/* + * Transient context for containing the current calculation of bandwidth when + * doing walking the port hierarchy to deal with shared upstream link. + */ +struct cxl_perf_ctx { + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct cxl_port *port; +}; + +/** + * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray + * @cxlr: CXL region for the bandwidth calculation + * @cxled: endpoint decoder to start on + * @usp_xa: (output) the xarray that collects all the bandwidth coordinates + * indexed by the upstream device with data of 'struct cxl_perf_ctx'. + * @gp_is_root: (output) bool of whether the grandparent is cxl root. + * + * Return: 0 for success or -errno + * + * Collects aggregated endpoint bandwidth and store the bandwidth in + * an xarray indexed by the upstream device of the switch or the RP + * device. Each endpoint consists the minimum of the bandwidth from DSLBIS + * from the endpoint CDAT, the endpoint upstream link bandwidth, and the + * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to + * the downstream port that's associated with the endpoint. If the + * device is directly connected to a RP, then no SSLBIS is involved. + */ +static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, + struct xarray *usp_xa, + bool *gp_is_root) +{ + struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent); + struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent); + struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); + struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX]; + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct cxl_perf_ctx *perf_ctx; + struct cxl_dpa_perf *perf; + unsigned long index; + void *ptr; + int rc; + + if (cxlds->rcd) + return -ENODEV; + + perf = cxled_get_dpa_perf(cxled, cxlr->mode); + if (IS_ERR(perf)) + return PTR_ERR(perf); + + gp_port = to_cxl_port(parent_port->dev.parent); + *gp_is_root = is_cxl_root(gp_port); + + /* + * If the grandparent is cxl root, then index is the root port, + * otherwise it's the parent switch upstream device. + */ + if (*gp_is_root) + index = (unsigned long)endpoint->parent_dport->dport_dev; + else + index = (unsigned long)parent_port->uport_dev; + + perf_ctx = xa_load(usp_xa, index); + if (!perf_ctx) { + struct cxl_perf_ctx *c __free(kfree) = + kzalloc(sizeof(*perf_ctx), GFP_KERNEL); + + if (!c) + return -ENOMEM; + ptr = xa_store(usp_xa, index, c, GFP_KERNEL); + if (xa_is_err(ptr)) + return xa_err(ptr); + perf_ctx = no_free_ptr(c); + perf_ctx->port = parent_port; + } + + /* Direct upstream link from EP bandwidth */ + rc = cxl_pci_get_bandwidth(pdev, pci_coord); + if (rc < 0) + return rc; + + /* + * Min of upstream link bandwidth and Endpoint CDAT bandwidth from + * DSLBIS. + */ + cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord); + + /* + * If grandparent port is root, then there's no switch involved and + * the endpoint is connected to a root port. + */ + if (!*gp_is_root) { + /* + * Retrieve the switch SSLBIS for switch downstream port + * associated with the endpoint bandwidth. + */ + rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord); + if (rc) + return rc; + + /* + * Min of the earlier coordinates with the switch SSLBIS + * bandwidth + */ + cxl_coordinates_combine(ep_coord, ep_coord, sw_coord); + } + + /* + * Aggregate the computed bandwidth with the current aggregated bandwidth + * of the endpoints with the same switch upstream device or RP. + */ + cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord); + + return 0; +} + +static void free_perf_xa(struct xarray *xa) +{ + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!xa) return; + + xa_for_each(xa, index, ctx) + kfree(ctx); + xa_destroy(xa); + kfree(xa); +} +DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T)) + +/** + * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray + * @cxlr: The region being operated on + * @input_xa: xarray indexed by upstream device of a switch with data of 'struct + * cxl_perf_ctx' + * @gp_is_root: (output) bool of whether the grandparent is cxl root. + * + * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port + * or ERR_PTR(-errno) + * + * Iterate through the xarray. Take the minimum of the downstream calculated + * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream + * switch if exists. Sum the resulting bandwidth under the switch upstream + * device or a RP device. The function can be iterated over multiple switches + * if the switches are present. + */ +static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr, + struct xarray *input_xa, + bool *gp_is_root) +{ + struct xarray *res_xa __free(free_perf_xa) = + kzalloc(sizeof(*res_xa), GFP_KERNEL); + struct access_coordinate coords[ACCESS_COORDINATE_MAX]; + struct cxl_perf_ctx *ctx, *us_ctx; + unsigned long index, us_index; + int dev_count = 0; + int gp_count = 0; + void *ptr; + int rc; + + if (!res_xa) + return ERR_PTR(-ENOMEM); + xa_init(res_xa); + + xa_for_each(input_xa, index, ctx) { + struct device *dev = (struct device *)index; + struct cxl_port *port = ctx->port; + struct cxl_port *parent_port = to_cxl_port(port->dev.parent); + struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); + struct cxl_dport *dport = port->parent_dport; + bool is_root = false; + + dev_count++; + if (is_cxl_root(gp_port)) { + is_root = true; + gp_count++; + } + + /* + * If the grandparent is cxl root, then index is the root port, + * otherwise it's the parent switch upstream device. + */ + if (is_root) + us_index = (unsigned long)port->parent_dport->dport_dev; + else + us_index = (unsigned long)parent_port->uport_dev; + + us_ctx = xa_load(res_xa, us_index); + if (!us_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + + ptr = xa_store(res_xa, us_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + us_ctx = no_free_ptr(n); + us_ctx->port = parent_port; + } + + /* + * If the device isn't an upstream PCIe port, there's something + * wrong with the topology. + */ + if (!dev_is_pci(dev)) + return ERR_PTR(-EINVAL); + + /* Retrieve the upstream link bandwidth */ + rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords); + if (rc) + return ERR_PTR(-ENXIO); + + /* + * Take the min of downstream bandwidth and the upstream link + * bandwidth. + */ + cxl_coordinates_combine(coords, coords, ctx->coord); + + /* + * Take the min of the calculated bandwdith and the upstream + * switch SSLBIS bandwidth if there's a parent switch + */ + if (!is_root) + cxl_coordinates_combine(coords, coords, dport->coord); + + /* + * Aggregate the calculated bandwidth common to an upstream + * switch. + */ + cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords); } + /* Asymmetric topology detected. */ + if (gp_count) { + if (gp_count != dev_count) { + dev_dbg(&cxlr->dev, + "Asymmetric hierarchy detected, bandwidth not updated\n"); + return ERR_PTR(-EOPNOTSUPP); + } + *gp_is_root = true; + } + + return no_free_ptr(res_xa); +} + +/** + * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection + * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated + * below each root port device. + * + * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno) + */ +static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa) +{ + struct xarray *hb_xa __free(free_perf_xa) = + kzalloc(sizeof(*hb_xa), GFP_KERNEL); + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!hb_xa) + return ERR_PTR(-ENOMEM); + xa_init(hb_xa); + + xa_for_each(xa, index, ctx) { + struct cxl_port *port = ctx->port; + unsigned long hb_index = (unsigned long)port->uport_dev; + struct cxl_perf_ctx *hb_ctx; + void *ptr; + + hb_ctx = xa_load(hb_xa, hb_index); + if (!hb_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + hb_ctx = no_free_ptr(n); + hb_ctx->port = port; + } + + cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord); + } + + return no_free_ptr(hb_xa); +} + +/** + * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection + * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated + * below each host bridge. + * + * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno) + */ +static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa) +{ + struct xarray *mw_xa __free(free_perf_xa) = + kzalloc(sizeof(*mw_xa), GFP_KERNEL); + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!mw_xa) + return ERR_PTR(-ENOMEM); + xa_init(mw_xa); + + xa_for_each(xa, index, ctx) { + struct cxl_port *port = ctx->port; + struct cxl_port *parent_port; + struct cxl_perf_ctx *mw_ctx; + struct cxl_dport *dport; + unsigned long mw_index; + void *ptr; + + parent_port = to_cxl_port(port->dev.parent); + mw_index = (unsigned long)parent_port->uport_dev; + + mw_ctx = xa_load(mw_xa, mw_index); + if (!mw_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + mw_ctx = no_free_ptr(n); + } + + dport = port->parent_dport; + cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord); + cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord); + } + + return no_free_ptr(mw_xa); +} + +/** + * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region + * @cxlr: The region being operated on + * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance + */ +static void cxl_region_update_bandwidth(struct cxl_region *cxlr, + struct xarray *input_xa) +{ + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct cxl_perf_ctx *ctx; + unsigned long index; + + memset(coord, 0, sizeof(coord)); + xa_for_each(input_xa, index, ctx) + cxl_bandwidth_add(coord, coord, ctx->coord); + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth; + cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth; + } +} + +/** + * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for + * the region + * @cxlr: the cxl region to recalculate + * + * The function walks the topology from bottom up and calculates the bandwidth. It + * starts at the endpoints, processes at the switches if any, processes at the rootport + * level, at the host bridge level, and finally aggregates at the region. + */ +void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr) +{ + struct xarray *working_xa; + int root_count = 0; + bool is_root; + int rc; + + lockdep_assert_held(&cxl_dpa_rwsem); + + struct xarray *usp_xa __free(free_perf_xa) = + kzalloc(sizeof(*usp_xa), GFP_KERNEL); + + if (!usp_xa) + return; + + xa_init(usp_xa); + + /* Collect bandwidth data from all the endpoints. */ + for (int i = 0; i < cxlr->params.nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i]; + + is_root = false; + rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root); + if (rc) + return; + root_count += is_root; + } + + /* Detect asymmetric hierarchy with some direct attached endpoints. */ + if (root_count && root_count != cxlr->params.nr_targets) { + dev_dbg(&cxlr->dev, + "Asymmetric hierarchy detected, bandwidth not updated\n"); + return; + } + + /* + * Walk up one or more switches to deal with the bandwidth of the + * switches if they exist. Endpoints directly attached to RPs skip + * over this part. + */ + if (!root_count) { + do { + working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa, + &is_root); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + } while (!is_root); + } + + /* Handle the bandwidth at the root port of the hierarchy */ + working_xa = cxl_rp_gather_bandwidth(usp_xa); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + + /* Handle the bandwidth at the host bridge of the hierarchy */ + working_xa = cxl_hb_gather_bandwidth(usp_xa); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + + /* + * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and + * update the region bandwidth with the final calculated values. + */ + cxl_region_update_bandwidth(cxlr, usp_xa); +} + +void cxl_region_perf_data_calculate(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_dpa_perf *perf; + lockdep_assert_held(&cxl_dpa_rwsem); - if (!range_contains(&perf->dpa_range, &dpa)) + perf = cxled_get_dpa_perf(cxled, cxlr->mode); + if (IS_ERR(perf)) return; for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 72a506c9dbd0a8..0c62b4069ba00a 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -103,9 +103,11 @@ enum cxl_poison_trace_type { }; long cxl_pci_get_latency(struct pci_dev *pdev); - +int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c); int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr, enum access_coordinate_class access); bool cxl_need_node_perf_attrs_update(int nid); +int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, + struct access_coordinate *c); #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index e5cdeafdf76e7d..5175138c4fb738 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -225,7 +225,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) /** * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command - * @mds: The driver data for the operation + * @cxl_mbox: CXL mailbox context * @mbox_cmd: initialized command to execute * * Context: Any context. @@ -241,19 +241,19 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) * error. While this distinction can be useful for commands from userspace, the * kernel will only be able to use results when both are successful. */ -int cxl_internal_send_cmd(struct cxl_memdev_state *mds, +int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd) { size_t out_size, min_out; int rc; - if (mbox_cmd->size_in > mds->payload_size || - mbox_cmd->size_out > mds->payload_size) + if (mbox_cmd->size_in > cxl_mbox->payload_size || + mbox_cmd->size_out > cxl_mbox->payload_size) return -E2BIG; out_size = mbox_cmd->size_out; min_out = mbox_cmd->min_out; - rc = mds->mbox_send(mds, mbox_cmd); + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); /* * EIO is reserved for a payload size mismatch and mbox_send() * may not return this error. @@ -353,6 +353,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, struct cxl_memdev_state *mds, u16 opcode, size_t in_size, size_t out_size, u64 in_payload) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; *mbox = (struct cxl_mbox_cmd) { .opcode = opcode, .size_in = in_size, @@ -374,7 +375,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, /* Prepare to handle a full payload for variable sized output */ if (out_size == CXL_VARIABLE_PAYLOAD) - mbox->size_out = mds->payload_size; + mbox->size_out = cxl_mbox->payload_size; else mbox->size_out = out_size; @@ -398,6 +399,8 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, const struct cxl_send_command *send_cmd, struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + if (send_cmd->raw.rsvd) return -EINVAL; @@ -406,7 +409,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, * gets passed along without further checking, so it must be * validated here. */ - if (send_cmd->out.size > mds->payload_size) + if (send_cmd->out.size > cxl_mbox->payload_size) return -EINVAL; if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) @@ -494,6 +497,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, struct cxl_memdev_state *mds, const struct cxl_send_command *send_cmd) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mem_command mem_cmd; int rc; @@ -505,7 +509,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, * supports, but output can be arbitrarily large (simply write out as * much data as the hardware provides). */ - if (send_cmd->in.size > mds->payload_size) + if (send_cmd->in.size > cxl_mbox->payload_size) return -EINVAL; /* Sanitize and construct a cxl_mem_command */ @@ -542,7 +546,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); /* - * otherwise, return max(n_commands, total commands) cxl_command_info + * otherwise, return min(n_commands, total commands) cxl_command_info * structures. */ cxl_for_each_cmd(cmd) { @@ -591,6 +595,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, u64 out_payload, s32 *size_out, u32 *retval) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct device *dev = mds->cxlds.dev; int rc; @@ -601,7 +606,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, cxl_mem_opcode_to_name(mbox_cmd->opcode), mbox_cmd->opcode, mbox_cmd->size_in); - rc = mds->mbox_send(mds, mbox_cmd); + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); if (rc) goto out; @@ -659,11 +664,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, u32 *size, u8 *out) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; u32 remaining = *size; u32 offset = 0; while (remaining) { - u32 xfer_size = min_t(u32, remaining, mds->payload_size); + u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size); struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_get_log log; int rc; @@ -682,7 +688,7 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, .payload_out = out, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); /* * The output payload length that indicates the number @@ -752,22 +758,23 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_supported_logs *ret; struct cxl_mbox_cmd mbox_cmd; int rc; - ret = kvmalloc(mds->payload_size, GFP_KERNEL); + ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); mbox_cmd = (struct cxl_mbox_cmd) { .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, - .size_out = mds->payload_size, + .size_out = cxl_mbox->payload_size, .payload_out = ret, /* At least the record number field must be valid */ .min_out = 2, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { kvfree(ret); return ERR_PTR(rc); @@ -910,6 +917,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, enum cxl_event_log_type log, struct cxl_get_event_payload *get_pl) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_clear_event_payload *payload; u16 total = le16_to_cpu(get_pl->record_count); u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; @@ -920,8 +928,8 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, int i; /* Payload size may limit the max handles */ - if (pl_size > mds->payload_size) { - max_handles = (mds->payload_size - sizeof(*payload)) / + if (pl_size > cxl_mbox->payload_size) { + max_handles = (cxl_mbox->payload_size - sizeof(*payload)) / sizeof(__le16); pl_size = struct_size(payload, handles, max_handles); } @@ -955,7 +963,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, if (i == max_handles) { payload->nr_recs = i; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto free_pl; i = 0; @@ -966,7 +974,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, if (i) { payload->nr_recs = i; mbox_cmd.size_in = struct_size(payload, handles, i); - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto free_pl; } @@ -979,6 +987,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, enum cxl_event_log_type type) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; struct device *dev = mds->cxlds.dev; struct cxl_get_event_payload *payload; @@ -995,11 +1004,11 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, .payload_in = &log_type, .size_in = sizeof(log_type), .payload_out = payload, - .size_out = mds->payload_size, + .size_out = cxl_mbox->payload_size, .min_out = struct_size(payload, records, 0), }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) { dev_err_ratelimited(dev, "Event log '%d': Failed to query event records : %d", @@ -1070,6 +1079,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); */ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_partition_info pi; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -1079,7 +1089,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) .size_out = sizeof(pi), .payload_out = &pi, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) return rc; @@ -1106,6 +1116,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) */ int cxl_dev_state_identify(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; struct cxl_mbox_cmd mbox_cmd; @@ -1120,7 +1131,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds) .size_out = sizeof(id), .payload_out = &id, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; @@ -1148,6 +1159,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; int rc; u32 sec_out = 0; struct cxl_get_security_output { @@ -1159,14 +1171,13 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) .size_out = sizeof(out), }; struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; - struct cxl_dev_state *cxlds = &mds->cxlds; if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) return -EINVAL; - rc = cxl_internal_send_cmd(mds, &sec_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd); if (rc < 0) { - dev_err(cxlds->dev, "Failed to get security state : %d", rc); + dev_err(cxl_mbox->host, "Failed to get security state : %d", rc); return rc; } @@ -1183,9 +1194,9 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) sec_out & CXL_PMEM_SEC_STATE_LOCKED) return -EINVAL; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { - dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); + dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc); return rc; } @@ -1214,7 +1225,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) int rc; /* synchronize with cxl_mem_probe() and decoder write operations */ - device_lock(&cxlmd->dev); + guard(device)(&cxlmd->dev); endpoint = cxlmd->endpoint; down_read(&cxl_region_rwsem); /* @@ -1226,7 +1237,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) else rc = -EBUSY; up_read(&cxl_region_rwsem); - device_unlock(&cxlmd->dev); return rc; } @@ -1300,6 +1310,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); int cxl_set_timestamp(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_set_timestamp_in pi; int rc; @@ -1311,7 +1322,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds) .payload_in = &pi, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); /* * Command is optional. Devices may have another way of providing * a timestamp, or may return all 0s in timestamp fields. @@ -1328,6 +1339,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_region *cxlr) { struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_poison_out *po; struct cxl_mbox_poison_in pi; int nr_records = 0; @@ -1346,12 +1358,12 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, .opcode = CXL_MBOX_OP_GET_POISON, .size_in = sizeof(pi), .payload_in = &pi, - .size_out = mds->payload_size, + .size_out = cxl_mbox->payload_size, .payload_out = po, .min_out = struct_size(po, record, 0), }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) break; @@ -1382,7 +1394,9 @@ static void free_poison_buf(void *buf) /* Get Poison List output buffer is protected by mds->poison.lock */ static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) { - mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + + mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!mds->poison.list_out) return -ENOMEM; @@ -1408,6 +1422,19 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds) } EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); +int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host) +{ + if (!cxl_mbox || !host) + return -EINVAL; + + cxl_mbox->host = host; + mutex_init(&cxl_mbox->mbox_mutex); + rcuwait_init(&cxl_mbox->mbox_wait); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL); + struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) { struct cxl_memdev_state *mds; @@ -1418,7 +1445,6 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) return ERR_PTR(-ENOMEM); } - mutex_init(&mds->mbox_mutex); mutex_init(&mds->event.log_lock); mds->cxlds.dev = dev; mds->cxlds.reg_map.host = dev; diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 0277726afd049a..84fefb76dafabc 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -58,7 +58,7 @@ static ssize_t payload_max_show(struct device *dev, if (!mds) return sysfs_emit(buf, "\n"); - return sysfs_emit(buf, "%zu\n", mds->payload_size); + return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -124,15 +124,16 @@ static ssize_t security_state_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); unsigned long state = mds->security.state; int rc = 0; /* sync with latest submission state */ - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); if (mds->security.sanitize_active) rc = sysfs_emit(buf, "sanitize\n"); - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); if (rc) return rc; @@ -277,7 +278,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_inject_poison inject; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -307,13 +308,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .size_in = sizeof(inject), .payload_in = &inject, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(mds->cxlds.dev, + dev_warn_once(cxl_mbox->host, "poison inject dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -332,7 +333,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_clear_poison clear; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -371,13 +372,13 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) .payload_in = &clear, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(mds->cxlds.dev, + dev_warn_once(cxl_mbox->host, "poison clear dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -714,6 +715,7 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) */ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_fw_info info; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -724,7 +726,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) .payload_out = &info, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; @@ -748,6 +750,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) */ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_activate_fw activate; struct cxl_mbox_cmd mbox_cmd; @@ -764,7 +767,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) activate.action = CXL_FW_ACTIVATE_OFFLINE; activate.slot = slot; - return cxl_internal_send_cmd(mds, &mbox_cmd); + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } /** @@ -779,6 +782,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) */ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -798,7 +802,7 @@ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); kfree(transfer); return rc; } @@ -829,12 +833,13 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, { struct cxl_memdev_state *mds = fwl->dd_handle; struct cxl_mbox_transfer_fw *transfer; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; if (!size) return FW_UPLOAD_ERR_INVALID_SIZE; mds->fw.oneshot = struct_size(transfer, data, size) < - mds->payload_size; + cxl_mbox->payload_size; if (cxl_mem_get_fw_info(mds)) return FW_UPLOAD_ERR_HW_ERROR; @@ -854,6 +859,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, { struct cxl_memdev_state *mds = fwl->dd_handle; struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct cxl_memdev *cxlmd = cxlds->cxlmd; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; @@ -877,7 +883,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, * sizeof(*transfer) is 128. These constraints imply that @cur_size * will always be 128b aligned. */ - cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer)); + cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer)); remaining = size - cur_size; size_in = struct_size(transfer, data, cur_size); @@ -921,7 +927,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, .poll_count = 30, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { rc = FW_UPLOAD_ERR_RW_ERROR; goto out_free; @@ -1059,16 +1065,17 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL); static void sanitize_teardown_notifier(void *data) { struct cxl_memdev_state *mds = data; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct kernfs_node *state; /* * Prevent new irq triggered invocations of the workqueue and * flush inflight invocations. */ - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); state = mds->security.sanitize_node; mds->security.sanitize_node = NULL; - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); cancel_delayed_work_sync(&mds->security.poll_dwork); sysfs_put(state); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 51132a575b2766..5b46bc46aaa951 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -211,37 +211,6 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds) } EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL); -static int wait_for_valid(struct pci_dev *pdev, int d) -{ - u32 val; - int rc; - - /* - * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high - * and Size Low registers are valid. Must be set within 1 second of - * deassertion of reset to CXL device. Likely it is already set by the - * time this runs, but otherwise give a 1.5 second timeout in case of - * clock skew. - */ - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - msleep(1500); - - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - return -ETIMEDOUT; -} - static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) { struct pci_dev *pdev = to_pci_dev(cxlds->dev); @@ -322,11 +291,13 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) return devm_add_action_or_reset(host, disable_hdm, cxlhdm); } -int cxl_dvsec_rr_decode(struct device *dev, int d, +int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port, struct cxl_endpoint_dvsec_info *info) { struct pci_dev *pdev = to_pci_dev(dev); + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); int hdm_count, rc, i, ranges = 0; + int d = cxlds->cxl_dvsec; u16 cap, ctrl; if (!d) { @@ -353,12 +324,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, if (!hdm_count || hdm_count > 2) return -EINVAL; - rc = wait_for_valid(pdev, d); - if (rc) { - dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc); - return rc; - } - /* * The current DVSEC values are moot if the memory capability is * disabled, and they will remain moot after the HDM Decoder @@ -376,6 +341,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, u64 base, size; u32 temp; + rc = cxl_dvsec_mem_range_valid(cxlds, i); + if (rc) + return rc; + rc = pci_read_config_dword( pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); if (rc) @@ -390,10 +359,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; if (!size) { - info->dvsec_range[i] = (struct range) { - .start = 0, - .end = CXL_RESOURCE_NONE, - }; continue; } @@ -411,12 +376,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; - info->dvsec_range[i] = (struct range) { + info->dvsec_range[ranges++] = (struct range) { .start = base, .end = base + size - 1 }; - - ranges++; } info->ranges = ranges; @@ -463,7 +426,15 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, return -ENODEV; } - for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) { + if (!info->mem_enabled) { + rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + if (rc) + return rc; + + return devm_cxl_enable_mem(&port->dev, cxlds); + } + + for (i = 0, allowed = 0; i < info->ranges; i++) { struct device *cxld_dev; cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], @@ -477,7 +448,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, allowed++; } - if (!allowed && info->mem_enabled) { + if (!allowed) { dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); return -ENXIO; } @@ -491,14 +462,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, * match. If at least one DVSEC range is enabled and allowed, skip HDM * Decoder Capability Enable. */ - if (info->mem_enabled) - return 0; - - rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); - if (rc) - return rc; - - return devm_cxl_enable_mem(&port->dev, cxlds); + return 0; } EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); @@ -772,22 +736,20 @@ static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds) static void cxl_dport_map_rch_aer(struct cxl_dport *dport) { - struct cxl_rcrb_info *ri = &dport->rcrb; - void __iomem *dport_aer = NULL; resource_size_t aer_phys; struct device *host; + u16 aer_cap; - if (dport->rch && ri->aer_cap) { + aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base); + if (aer_cap) { host = dport->reg_map.host; - aer_phys = ri->aer_cap + ri->base; - dport_aer = devm_cxl_iomap_block(host, aer_phys, - sizeof(struct aer_capability_regs)); + aer_phys = aer_cap + dport->rcrb.base; + dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys, + sizeof(struct aer_capability_regs)); } - - dport->regs.dport_aer = dport_aer; } -static void cxl_dport_map_regs(struct cxl_dport *dport) +static void cxl_dport_map_ras(struct cxl_dport *dport) { struct cxl_register_map *map = &dport->reg_map; struct device *dev = dport->dport_dev; @@ -797,22 +759,16 @@ static void cxl_dport_map_regs(struct cxl_dport *dport) else if (cxl_map_component_regs(map, &dport->regs.component, BIT(CXL_CM_CAP_CAP_ID_RAS))) dev_dbg(dev, "Failed to map RAS capability.\n"); - - if (dport->rch) - cxl_dport_map_rch_aer(dport); } static void cxl_disable_rch_root_ints(struct cxl_dport *dport) { void __iomem *aer_base = dport->regs.dport_aer; - struct pci_host_bridge *bridge; u32 aer_cmd_mask, aer_cmd; if (!aer_base) return; - bridge = to_pci_host_bridge(dport->dport_dev); - /* * Disable RCH root port command interrupts. * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors @@ -821,34 +777,35 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport) * the root cmd register's interrupts is required. But, PCI spec * shows these are disabled by default on reset. */ - if (bridge->native_aer) { - aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | - PCI_ERR_ROOT_CMD_NONFATAL_EN | - PCI_ERR_ROOT_CMD_FATAL_EN); - aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); - aer_cmd &= ~aer_cmd_mask; - writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); - } + aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); + aer_cmd &= ~aer_cmd_mask; + writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); } -void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport) +/** + * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport + * @dport: the cxl_dport that needs to be initialized + * @host: host device for devm operations + */ +void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host) { - struct device *dport_dev = dport->dport_dev; + dport->reg_map.host = host; + cxl_dport_map_ras(dport); if (dport->rch) { - struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport_dev); - - if (host_bridge->native_aer) - dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base); - } + struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev); - dport->reg_map.host = host; - cxl_dport_map_regs(dport); + if (!host_bridge->native_aer) + return; - if (dport->rch) + cxl_dport_map_rch_aer(dport); cxl_disable_rch_root_ints(dport); + } } -EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, CXL); static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds, struct cxl_dport *dport) @@ -915,15 +872,13 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) struct pci_dev *pdev = to_pci_dev(cxlds->dev); struct aer_capability_regs aer_regs; struct cxl_dport *dport; - struct cxl_port *port; int severity; - port = cxl_pci_find_port(pdev, &dport); + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); if (!port) return; - put_device(&port->dev); - if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) return; @@ -1076,3 +1031,26 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) __cxl_endpoint_decoder_reset_detected); } EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL); + +int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c) +{ + int speed, bw; + u16 lnksta; + u32 width; + + speed = pcie_link_speed_mbps(pdev); + if (speed < 0) + return speed; + speed /= BITS_PER_BYTE; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); + bw = speed * width; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].read_bandwidth = bw; + c[i].write_bandwidth = bw; + } + + return 0; +} diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 1d5007e3795a3c..e666ec6a9085a5 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -11,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -828,27 +828,20 @@ static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport) &cxl_einj_inject_fops); } -static struct cxl_port *__devm_cxl_add_port(struct device *host, - struct device *uport_dev, - resource_size_t component_reg_phys, - struct cxl_dport *parent_dport) +static int cxl_port_add(struct cxl_port *port, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) { - struct cxl_port *port; - struct device *dev; + struct device *dev __free(put_device) = &port->dev; int rc; - port = cxl_port_alloc(uport_dev, parent_dport); - if (IS_ERR(port)) - return port; - - dev = &port->dev; - if (is_cxl_memdev(uport_dev)) { - struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev); + if (is_cxl_memdev(port->uport_dev)) { + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; rc = dev_set_name(dev, "endpoint%d", port->id); if (rc) - goto err; + return rc; /* * The endpoint driver already enumerated the component and RAS @@ -861,19 +854,41 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, } else if (parent_dport) { rc = dev_set_name(dev, "port%d", port->id); if (rc) - goto err; + return rc; rc = cxl_port_setup_regs(port, component_reg_phys); if (rc) - goto err; - } else + return rc; + } else { rc = dev_set_name(dev, "root%d", port->id); - if (rc) - goto err; + if (rc) + return rc; + } rc = device_add(dev); if (rc) - goto err; + return rc; + + /* Inhibit the cleanup function invoked */ + dev = NULL; + return 0; +} + +static struct cxl_port *__devm_cxl_add_port(struct device *host, + struct device *uport_dev, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) +{ + struct cxl_port *port; + int rc; + + port = cxl_port_alloc(uport_dev, parent_dport); + if (IS_ERR(port)) + return port; + + rc = cxl_port_add(port, component_reg_phys, parent_dport); + if (rc) + return ERR_PTR(rc); rc = devm_add_action_or_reset(host, unregister_port, port); if (rc) @@ -891,10 +906,6 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev)); return port; - -err: - put_device(dev); - return ERR_PTR(rc); } /** @@ -941,7 +952,7 @@ struct cxl_root *devm_cxl_add_root(struct device *host, port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); if (IS_ERR(port)) - return (struct cxl_root *)port; + return ERR_CAST(port); cxl_root = to_cxl_root(port); cxl_root->ops = ops; @@ -1258,18 +1269,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); static int add_ep(struct cxl_ep *new) { struct cxl_port *port = new->dport->port; - int rc; - device_lock(&port->dev); - if (port->dead) { - device_unlock(&port->dev); + guard(device)(&port->dev); + if (port->dead) return -ENXIO; - } - rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, - GFP_KERNEL); - device_unlock(&port->dev); - return rc; + return xa_insert(&port->endpoints, (unsigned long)new->ep, + new, GFP_KERNEL); } /** @@ -1393,14 +1399,14 @@ static void delete_endpoint(void *data) struct cxl_port *endpoint = cxlmd->endpoint; struct device *host = endpoint_host(endpoint); - device_lock(host); - if (host->driver && !endpoint->dead) { - devm_release_action(host, cxl_unlink_parent_dport, endpoint); - devm_release_action(host, cxl_unlink_uport, endpoint); - devm_release_action(host, unregister_port, endpoint); + scoped_guard(device, host) { + if (host->driver && !endpoint->dead) { + devm_release_action(host, cxl_unlink_parent_dport, endpoint); + devm_release_action(host, cxl_unlink_uport, endpoint); + devm_release_action(host, unregister_port, endpoint); + } + cxlmd->endpoint = NULL; } - cxlmd->endpoint = NULL; - device_unlock(host); put_device(&endpoint->dev); put_device(host); } @@ -1477,12 +1483,11 @@ static void cxl_detach_ep(void *data) .cxlmd = cxlmd, .depth = i, }; - struct device *dev; struct cxl_ep *ep; bool died = false; - dev = bus_find_device(&cxl_bus_type, NULL, &ctx, - port_has_memdev); + struct device *dev __free(put_device) = + bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev); if (!dev) continue; port = to_cxl_port(dev); @@ -1512,7 +1517,6 @@ static void cxl_detach_ep(void *data) dev_name(&port->dev)); delete_switch_port(port); } - put_device(&port->dev); device_unlock(&parent_port->dev); } } @@ -1540,7 +1544,6 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, struct device *dport_dev) { struct device *dparent = grandparent(dport_dev); - struct cxl_port *port, *parent_port = NULL; struct cxl_dport *dport, *parent_dport; resource_size_t component_reg_phys; int rc; @@ -1556,50 +1559,52 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, return -ENXIO; } - parent_port = find_cxl_port(dparent, &parent_dport); + struct cxl_port *parent_port __free(put_cxl_port) = + find_cxl_port(dparent, &parent_dport); if (!parent_port) { /* iterate to create this parent_port */ return -EAGAIN; } - device_lock(&parent_port->dev); - if (!parent_port->dev.driver) { - dev_warn(&cxlmd->dev, - "port %s:%s disabled, failed to enumerate CXL.mem\n", - dev_name(&parent_port->dev), dev_name(uport_dev)); - port = ERR_PTR(-ENXIO); - goto out; - } + /* + * Definition with __free() here to keep the sequence of + * dereferencing the device of the port before the parent_port releasing. + */ + struct cxl_port *port __free(put_cxl_port) = NULL; + scoped_guard(device, &parent_port->dev) { + if (!parent_port->dev.driver) { + dev_warn(&cxlmd->dev, + "port %s:%s disabled, failed to enumerate CXL.mem\n", + dev_name(&parent_port->dev), dev_name(uport_dev)); + return -ENXIO; + } - port = find_cxl_port_at(parent_port, dport_dev, &dport); - if (!port) { - component_reg_phys = find_component_registers(uport_dev); - port = devm_cxl_add_port(&parent_port->dev, uport_dev, - component_reg_phys, parent_dport); - /* retry find to pick up the new dport information */ - if (!IS_ERR(port)) + port = find_cxl_port_at(parent_port, dport_dev, &dport); + if (!port) { + component_reg_phys = find_component_registers(uport_dev); + port = devm_cxl_add_port(&parent_port->dev, uport_dev, + component_reg_phys, parent_dport); + if (IS_ERR(port)) + return PTR_ERR(port); + + /* retry find to pick up the new dport information */ port = find_cxl_port_at(parent_port, dport_dev, &dport); + if (!port) + return -ENXIO; + } } -out: - device_unlock(&parent_port->dev); - if (IS_ERR(port)) - rc = PTR_ERR(port); - else { - dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", - dev_name(&port->dev), dev_name(port->uport_dev)); - rc = cxl_add_ep(dport, &cxlmd->dev); - if (rc == -EBUSY) { - /* - * "can't" happen, but this error code means - * something to the caller, so translate it. - */ - rc = -ENXIO; - } - put_device(&port->dev); + dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", + dev_name(&port->dev), dev_name(port->uport_dev)); + rc = cxl_add_ep(dport, &cxlmd->dev); + if (rc == -EBUSY) { + /* + * "can't" happen, but this error code means + * something to the caller, so translate it. + */ + rc = -ENXIO; } - put_device(&parent_port->dev); return rc; } @@ -1630,7 +1635,6 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) struct device *dport_dev = grandparent(iter); struct device *uport_dev; struct cxl_dport *dport; - struct cxl_port *port; /* * The terminal "grandparent" in PCI is NULL and @platform_bus @@ -1649,7 +1653,8 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", dev_name(iter), dev_name(dport_dev), dev_name(uport_dev)); - port = find_cxl_port(dport_dev, &dport); + struct cxl_port *port __free(put_cxl_port) = + find_cxl_port(dport_dev, &dport); if (port) { dev_dbg(&cxlmd->dev, "found already registered port %s:%s\n", @@ -1664,18 +1669,13 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) * the parent_port lock as the current port may be being * reaped. */ - if (rc && rc != -EBUSY) { - put_device(&port->dev); + if (rc && rc != -EBUSY) return rc; - } /* Any more ports to add between this one and the root? */ - if (!dev_is_cxl_root_child(&port->dev)) { - put_device(&port->dev); + if (!dev_is_cxl_root_child(&port->dev)) continue; - } - put_device(&port->dev); return 0; } @@ -1983,7 +1983,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) { struct cxl_port *port; - int rc; if (WARN_ON_ONCE(!cxld)) return -EINVAL; @@ -1993,11 +1992,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) port = to_cxl_port(cxld->dev.parent); - device_lock(&port->dev); - rc = cxl_decoder_add_locked(cxld, target_map); - device_unlock(&port->dev); - - return rc; + guard(device)(&port->dev); + return cxl_decoder_add_locked(cxld, target_map); } EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); @@ -2241,6 +2237,26 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, } EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL); +int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, + struct access_coordinate *c) +{ + struct cxl_dport *dport = port->parent_dport; + + /* Check this port is connected to a switch DSP and not an RP */ + if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent))) + return -ENODEV; + + if (!coordinates_valid(dport->coord)) + return -EINVAL; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].read_bandwidth = dport->coord[i].read_bandwidth; + c[i].write_bandwidth = dport->coord[i].write_bandwidth; + } + + return 0; +} + /* for user tooling to ensure port disable work has completed */ static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) { diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 21ad5f24287581..e701e4b0403282 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1983,6 +1983,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, * then the region is already committed. */ p->state = CXL_CONFIG_COMMIT; + cxl_region_shared_upstream_bandwidth_update(cxlr); return 0; } @@ -2004,6 +2005,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, if (rc) return rc; p->state = CXL_CONFIG_ACTIVE; + cxl_region_shared_upstream_bandwidth_update(cxlr); } cxled->cxld.interleave_ways = p->interleave_ways; @@ -2313,8 +2315,6 @@ static void unregister_region(void *_cxlr) struct cxl_region_params *p = &cxlr->params; int i; - unregister_memory_notifier(&cxlr->memory_notifier); - unregister_mt_adistance_algorithm(&cxlr->adist_notifier); device_del(&cxlr->dev); /* @@ -2391,18 +2391,6 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid) return true; } -static int cxl_region_nid(struct cxl_region *cxlr) -{ - struct cxl_region_params *p = &cxlr->params; - struct resource *res; - - guard(rwsem_read)(&cxl_region_rwsem); - res = p->res; - if (!res) - return NUMA_NO_NODE; - return phys_to_target_node(res->start); -} - static int cxl_region_perf_attrs_callback(struct notifier_block *nb, unsigned long action, void *arg) { @@ -2415,7 +2403,11 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, if (nid == NUMA_NO_NODE || action != MEM_ONLINE) return NOTIFY_DONE; - region_nid = cxl_region_nid(cxlr); + /* + * No need to hold cxl_region_rwsem; region parameters are stable + * within the cxl_region driver. + */ + region_nid = phys_to_target_node(cxlr->params.res->start); if (nid != region_nid) return NOTIFY_DONE; @@ -2434,7 +2426,11 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, int *adist = data; int region_nid; - region_nid = cxl_region_nid(cxlr); + /* + * No need to hold cxl_region_rwsem; region parameters are stable + * within the cxl_region driver. + */ + region_nid = phys_to_target_node(cxlr->params.res->start); if (nid != region_nid) return NOTIFY_OK; @@ -2484,14 +2480,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, if (rc) goto err; - cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; - cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; - register_memory_notifier(&cxlr->memory_notifier); - - cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; - cxlr->adist_notifier.priority = 100; - register_mt_adistance_algorithm(&cxlr->adist_notifier); - rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); if (rc) return ERR_PTR(rc); @@ -3094,11 +3082,11 @@ static void cxlr_release_nvdimm(void *_cxlr) struct cxl_region *cxlr = _cxlr; struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; - device_lock(&cxl_nvb->dev); - if (cxlr->cxlr_pmem) - devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, - cxlr->cxlr_pmem); - device_unlock(&cxl_nvb->dev); + scoped_guard(device, &cxl_nvb->dev) { + if (cxlr->cxlr_pmem) + devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, + cxlr->cxlr_pmem); + } cxlr->cxl_nvb = NULL; put_device(&cxl_nvb->dev); } @@ -3134,13 +3122,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), dev_name(dev)); - device_lock(&cxl_nvb->dev); - if (cxl_nvb->dev.driver) - rc = devm_add_action_or_reset(&cxl_nvb->dev, - cxlr_pmem_unregister, cxlr_pmem); - else - rc = -ENXIO; - device_unlock(&cxl_nvb->dev); + scoped_guard(device, &cxl_nvb->dev) { + if (cxl_nvb->dev.driver) + rc = devm_add_action_or_reset(&cxl_nvb->dev, + cxlr_pmem_unregister, + cxlr_pmem); + else + rc = -ENXIO; + } if (rc) goto err_bridge; @@ -3386,6 +3375,14 @@ static int is_system_ram(struct resource *res, void *arg) return 1; } +static void shutdown_notifiers(void *_cxlr) +{ + struct cxl_region *cxlr = _cxlr; + + unregister_memory_notifier(&cxlr->memory_notifier); + unregister_mt_adistance_algorithm(&cxlr->adist_notifier); +} + static int cxl_region_probe(struct device *dev) { struct cxl_region *cxlr = to_cxl_region(dev); @@ -3418,6 +3415,18 @@ static int cxl_region_probe(struct device *dev) out: up_read(&cxl_region_rwsem); + if (rc) + return rc; + + cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; + cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; + register_memory_notifier(&cxlr->memory_notifier); + + cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; + cxlr->adist_notifier.priority = 100; + register_mt_adistance_algorithm(&cxlr->adist_notifier); + + rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr); if (rc) return rc; diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 9167cfba7f592c..8672b42ee4d1b3 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 9afb407d438fae..0d8b810a51f04d 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -744,6 +744,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port); void put_cxl_root(struct cxl_root *cxl_root); DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T)) +DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); void cxl_bus_rescan(void); void cxl_bus_drain(void); @@ -762,9 +763,10 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, #ifdef CONFIG_PCIEAER_CXL void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport); +void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host); #else -static inline void cxl_setup_parent_dport(struct device *host, - struct cxl_dport *dport) { } +static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport, + struct device *host) { } #endif struct cxl_decoder *to_cxl_decoder(struct device *dev); @@ -809,7 +811,7 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, struct cxl_endpoint_dvsec_info *info); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); -int cxl_dvsec_rr_decode(struct device *dev, int dvsec, +int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port, struct cxl_endpoint_dvsec_info *info); bool is_cxl_region(struct device *dev); @@ -889,6 +891,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord); void cxl_region_perf_data_calculate(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled); +void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr); void cxl_memdev_update_perf(struct cxl_memdev *cxlmd); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index afb53d058d6289..2a25d1957ddb97 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -3,11 +3,12 @@ #ifndef __CXL_MEM_H__ #define __CXL_MEM_H__ #include +#include #include #include -#include -#include #include +#include +#include #include "cxl.h" /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ @@ -397,11 +398,13 @@ enum cxl_devtype { * struct cxl_dpa_perf - DPA performance property entry * @dpa_range: range for DPA address * @coord: QoS performance data (i.e. latency, bandwidth) + * @cdat_coord: raw QoS performance data from CDAT * @qos_class: QoS Class cookies */ struct cxl_dpa_perf { struct range dpa_range; struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX]; int qos_class; }; @@ -424,6 +427,7 @@ struct cxl_dpa_perf { * @ram_res: Active Volatile memory capacity configuration * @serial: PCIe Device Serial Number * @type: Generic Memory Class device or Vendor Specific Memory device + * @cxl_mbox: CXL mailbox context */ struct cxl_dev_state { struct device *dev; @@ -438,8 +442,14 @@ struct cxl_dev_state { struct resource ram_res; u64 serial; enum cxl_devtype type; + struct cxl_mailbox cxl_mbox; }; +static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox) +{ + return dev_get_drvdata(cxl_mbox->host); +} + /** * struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data * @@ -448,11 +458,8 @@ struct cxl_dev_state { * the functionality related to that like Identify Memory Device and Get * Partition Info * @cxlds: Core driver state common across Type-2 and Type-3 devices - * @payload_size: Size of space for payload - * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) * @lsa_size: Size of Label Storage Area * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device) - * @mbox_mutex: Mutex to synchronize mailbox access. * @firmware_version: Firmware version for the memory device. * @enabled_cmds: Hardware commands found enabled in CEL. * @exclusive_cmds: Commands that are kernel-internal only @@ -470,17 +477,13 @@ struct cxl_dev_state { * @poison: poison driver state info * @security: security driver state info * @fw: firmware upload / activation state - * @mbox_wait: RCU wait for mbox send completely - * @mbox_send: @dev specific transport for transmitting mailbox commands * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. */ struct cxl_memdev_state { struct cxl_dev_state cxlds; - size_t payload_size; size_t lsa_size; - struct mutex mbox_mutex; /* Protects device mailbox and firmware */ char firmware_version[0x10]; DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX); DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); @@ -500,10 +503,6 @@ struct cxl_memdev_state { struct cxl_poison_state poison; struct cxl_security_state security; struct cxl_fw_state fw; - - struct rcuwait mbox_wait; - int (*mbox_send)(struct cxl_memdev_state *mds, - struct cxl_mbox_cmd *cmd); }; static inline struct cxl_memdev_state * @@ -814,7 +813,7 @@ enum { CXL_PMEM_SEC_PASS_USER, }; -int cxl_internal_send_cmd(struct cxl_memdev_state *mds, +int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd); int cxl_dev_state_identify(struct cxl_memdev_state *mds); int cxl_await_media_ready(struct cxl_dev_state *cxlds); diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 7de232eaeb1747..a9fd5cd5a0d2f9 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -109,7 +109,6 @@ static int cxl_mem_probe(struct device *dev) struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *endpoint_parent; - struct cxl_port *parent_port; struct cxl_dport *dport; struct dentry *dentry; int rc; @@ -146,7 +145,8 @@ static int cxl_mem_probe(struct device *dev) if (rc) return rc; - parent_port = cxl_mem_find_port(cxlmd, &dport); + struct cxl_port *parent_port __free(put_cxl_port) = + cxl_mem_find_port(cxlmd, &dport); if (!parent_port) { dev_err(dev, "CXL port topology not found\n"); return -ENXIO; @@ -166,22 +166,19 @@ static int cxl_mem_probe(struct device *dev) else endpoint_parent = &parent_port->dev; - cxl_setup_parent_dport(dev, dport); + cxl_dport_init_ras_reporting(dport, dev); - device_lock(endpoint_parent); - if (!endpoint_parent->driver) { - dev_err(dev, "CXL port topology %s not enabled\n", - dev_name(endpoint_parent)); - rc = -ENXIO; - goto unlock; - } + scoped_guard(device, endpoint_parent) { + if (!endpoint_parent->driver) { + dev_err(dev, "CXL port topology %s not enabled\n", + dev_name(endpoint_parent)); + return -ENXIO; + } - rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); -unlock: - device_unlock(endpoint_parent); - put_device(&parent_port->dev); - if (rc) - return rc; + rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); + if (rc) + return rc; + } /* * The kernel may be operating out of CXL memory on this device, diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 4be35dc22202df..188412d45e0d26 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include +#include #include #include #include @@ -11,6 +11,7 @@ #include #include #include +#include #include "cxlmem.h" #include "cxlpci.h" #include "cxl.h" @@ -124,6 +125,7 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) u16 opcode; struct cxl_dev_id *dev_id = id; struct cxl_dev_state *cxlds = dev_id->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); if (!cxl_mbox_background_complete(cxlds)) @@ -132,13 +134,13 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); if (opcode == CXL_MBOX_OP_SANITIZE) { - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); if (mds->security.sanitize_node) mod_delayed_work(system_wq, &mds->security.poll_dwork, 0); - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); } else { /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ - rcuwait_wake_up(&mds->mbox_wait); + rcuwait_wake_up(&cxl_mbox->mbox_wait); } return IRQ_HANDLED; @@ -152,8 +154,9 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) struct cxl_memdev_state *mds = container_of(work, typeof(*mds), security.poll_dwork.work); struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); if (cxl_mbox_background_complete(cxlds)) { mds->security.poll_tmo_secs = 0; if (mds->security.sanitize_node) @@ -167,12 +170,12 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) mds->security.poll_tmo_secs = min(15 * 60, timeout); schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ); } - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); } /** * __cxl_pci_mbox_send_cmd() - Execute a mailbox command - * @mds: The memory device driver data + * @cxl_mbox: CXL mailbox context * @mbox_cmd: Command to send to the memory device. * * Context: Any context. Expects mbox_mutex to be held. @@ -192,17 +195,18 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) * not need to coordinate with each other. The driver only uses the primary * mailbox. */ -static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, +static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd) { - struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_dev_state *cxlds = mbox_to_cxlds(cxl_mbox); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; struct device *dev = cxlds->dev; u64 cmd_reg, status_reg; size_t out_len; int rc; - lockdep_assert_held(&mds->mbox_mutex); + lockdep_assert_held(&cxl_mbox->mbox_mutex); /* * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. @@ -315,10 +319,10 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, timeout = mbox_cmd->poll_interval_ms; for (i = 0; i < mbox_cmd->poll_count; i++) { - if (rcuwait_wait_event_timeout(&mds->mbox_wait, - cxl_mbox_background_complete(cxlds), - TASK_UNINTERRUPTIBLE, - msecs_to_jiffies(timeout)) > 0) + if (rcuwait_wait_event_timeout(&cxl_mbox->mbox_wait, + cxl_mbox_background_complete(cxlds), + TASK_UNINTERRUPTIBLE, + msecs_to_jiffies(timeout)) > 0) break; } @@ -360,7 +364,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, */ size_t n; - n = min3(mbox_cmd->size_out, mds->payload_size, out_len); + n = min3(mbox_cmd->size_out, cxl_mbox->payload_size, out_len); memcpy_fromio(mbox_cmd->payload_out, payload, n); mbox_cmd->size_out = n; } else { @@ -370,14 +374,14 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, return 0; } -static int cxl_pci_mbox_send(struct cxl_memdev_state *mds, +static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd) { int rc; - mutex_lock_io(&mds->mbox_mutex); - rc = __cxl_pci_mbox_send_cmd(mds, cmd); - mutex_unlock(&mds->mbox_mutex); + mutex_lock_io(&cxl_mbox->mbox_mutex); + rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd); + mutex_unlock(&cxl_mbox->mbox_mutex); return rc; } @@ -385,6 +389,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds, static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail) { struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); struct device *dev = cxlds->dev; unsigned long timeout; @@ -417,8 +422,8 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail) return -ETIMEDOUT; } - mds->mbox_send = cxl_pci_mbox_send; - mds->payload_size = + cxl_mbox->mbox_send = cxl_pci_mbox_send; + cxl_mbox->payload_size = 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); /* @@ -428,16 +433,15 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail) * there's no point in going forward. If the size is too large, there's * no harm is soft limiting it. */ - mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M); - if (mds->payload_size < 256) { + cxl_mbox->payload_size = min_t(size_t, cxl_mbox->payload_size, SZ_1M); + if (cxl_mbox->payload_size < 256) { dev_err(dev, "Mailbox is too small (%zub)", - mds->payload_size); + cxl_mbox->payload_size); return -ENXIO; } - dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size); + dev_dbg(dev, "Mailbox payload sized %zu", cxl_mbox->payload_size); - rcuwait_init(&mds->mbox_wait); INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); /* background command interrupts are optional */ @@ -473,7 +477,6 @@ static bool is_cxl_restricted(struct pci_dev *pdev) static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, struct cxl_register_map *map) { - struct cxl_port *port; struct cxl_dport *dport; resource_size_t component_reg_phys; @@ -482,14 +485,12 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, .resource = CXL_RESOURCE_NONE, }; - port = cxl_pci_find_port(pdev, &dport); + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); if (!port) return -EPROBE_DEFER; component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport); - - put_device(&port->dev); - if (component_reg_phys == CXL_RESOURCE_NONE) return -ENXIO; @@ -578,9 +579,10 @@ static void free_event_buf(void *buf) */ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_get_event_payload *buf; - buf = kvmalloc(mds->payload_size, GFP_KERNEL); + buf = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!buf) return -ENOMEM; mds->event.buf = buf; @@ -653,6 +655,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting) static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, struct cxl_event_interrupt_policy *policy) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_cmd mbox_cmd = { .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY, .payload_out = policy, @@ -660,7 +663,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, }; int rc; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) dev_err(mds->cxlds.dev, "Failed to get event interrupt policy : %d", rc); @@ -671,6 +674,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, static int cxl_event_config_msgnums(struct cxl_memdev_state *mds, struct cxl_event_interrupt_policy *policy) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -687,7 +691,7 @@ static int cxl_event_config_msgnums(struct cxl_memdev_state *mds, .size_in = sizeof(*policy), }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d", rc); @@ -786,6 +790,23 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge, return 0; } +static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds) +{ + int rc; + + /* + * Fail the init if there's no mailbox. For a type3 this is out of spec. + */ + if (!cxlds->reg_map.device_map.mbox.valid) + return -ENODEV; + + rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev); + if (rc) + return rc; + + return 0; +} + static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); @@ -846,6 +867,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); + rc = cxl_pci_type3_init_mailbox(cxlds); + if (rc) + return rc; + rc = cxl_await_media_ready(cxlds); if (rc == 0) cxlds->media_ready = true; diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 4ef93da2233558..d2d43a4fc05387 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ #include -#include +#include #include #include #include @@ -102,13 +102,15 @@ static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds, struct nd_cmd_get_config_size *cmd, unsigned int buf_len) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + if (sizeof(*cmd) > buf_len) return -EINVAL; *cmd = (struct nd_cmd_get_config_size){ .config_size = mds->lsa_size, .max_xfer = - mds->payload_size - sizeof(struct cxl_mbox_set_lsa), + cxl_mbox->payload_size - sizeof(struct cxl_mbox_set_lsa), }; return 0; @@ -118,6 +120,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds, struct nd_cmd_get_config_data_hdr *cmd, unsigned int buf_len) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_lsa get_lsa; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -139,7 +142,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds, .payload_out = cmd->out_buf, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); cmd->status = 0; return rc; @@ -149,6 +152,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds, struct nd_cmd_set_config_hdr *cmd, unsigned int buf_len) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_set_lsa *set_lsa; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -175,7 +179,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds, .size_in = struct_size(set_lsa, data, cmd->in_length), }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); /* * Set "firmware" status (4-packed bytes at the end of the input @@ -233,15 +237,13 @@ static int detach_nvdimm(struct device *dev, void *data) if (!is_cxl_nvdimm(dev)) return 0; - device_lock(dev); - if (!dev->driver) - goto out; - - cxl_nvd = to_cxl_nvdimm(dev); - if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) - release = true; -out: - device_unlock(dev); + scoped_guard(device, dev) { + if (dev->driver) { + cxl_nvd = to_cxl_nvdimm(dev); + if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) + release = true; + } + } if (release) device_release_driver(dev); return 0; diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index d7d5d982ce69f3..861dde65768fe3 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port) struct cxl_port *root; int rc; - rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info); + rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info); if (rc < 0) return rc; diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 21856a3f408eee..ab793e8577c782 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ #include -#include +#include #include #include #include @@ -14,6 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); unsigned long security_flags = 0; struct cxl_get_security_output { @@ -29,7 +30,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, .payload_out = &out, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return 0; @@ -70,7 +71,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_cmd mbox_cmd; struct cxl_set_pass set_pass; @@ -87,7 +88,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, .payload_in = &set_pass, }; - return cxl_internal_send_cmd(mds, &mbox_cmd); + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, @@ -96,7 +97,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_disable_pass dis_pass; struct cxl_mbox_cmd mbox_cmd; @@ -112,7 +113,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, .payload_in = &dis_pass, }; - return cxl_internal_send_cmd(mds, &mbox_cmd); + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } static int cxl_pmem_security_disable(struct nvdimm *nvdimm, @@ -131,12 +132,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_cmd mbox_cmd = { .opcode = CXL_MBOX_OP_FREEZE_SECURITY, }; - return cxl_internal_send_cmd(mds, &mbox_cmd); + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, @@ -144,7 +145,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; u8 pass[NVDIMM_PASSPHRASE_LEN]; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -156,7 +157,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, .payload_in = pass, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; @@ -169,7 +170,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_cmd mbox_cmd; struct cxl_pass_erase erase; int rc; @@ -185,7 +186,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, .payload_in = &erase, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index a8874424414989..d656e4c0eb8463 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -30,7 +30,7 @@ config DEV_DAX_PMEM config DEV_DAX_HMEM tristate "HMEM DAX: direct access to 'specific purpose' memory" depends on EFI_SOFT_RESERVE - select NUMA_KEEP_MEMINFO if (NUMA && X86) + select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS default DEV_DAX help EFI 2.8 platforms, and others, may advertise 'specific purpose' diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 2051e4f73c8a43..9c1a729cd77e94 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -235,9 +235,9 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order) int id; struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) order:%d\n", current->comm, - (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", - vmf->vma->vm_start, vmf->vma->vm_end, order); + dev_dbg(&dev_dax->dev, "%s: op=%s addr=%#lx order=%d\n", current->comm, + (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", + vmf->address & ~((1UL << (order + PAGE_SHIFT)) - 1), order); id = dax_read_lock(); if (order == 0) diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index 00118580905af4..7d06c476d8e992 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -160,7 +160,6 @@ static void exynos_bus_exit(struct device *dev) platform_device_unregister(bus->icc_pdev); dev_pm_opp_of_remove_table(dev); - clk_disable_unprepare(bus->clk); dev_pm_opp_put_regulators(bus->opp_token); } @@ -171,7 +170,6 @@ static void exynos_bus_passive_exit(struct device *dev) platform_device_unregister(bus->icc_pdev); dev_pm_opp_of_remove_table(dev); - clk_disable_unprepare(bus->clk); } static int exynos_bus_parent_parse_of(struct device_node *np, @@ -247,23 +245,16 @@ static int exynos_bus_parse_of(struct device_node *np, int ret; /* Get the clock to provide each bus with source clock */ - bus->clk = devm_clk_get(dev, "bus"); - if (IS_ERR(bus->clk)) { - dev_err(dev, "failed to get bus clock\n"); - return PTR_ERR(bus->clk); - } - - ret = clk_prepare_enable(bus->clk); - if (ret < 0) { - dev_err(dev, "failed to get enable clock\n"); - return ret; - } + bus->clk = devm_clk_get_enabled(dev, "bus"); + if (IS_ERR(bus->clk)) + return dev_err_probe(dev, PTR_ERR(bus->clk), + "failed to get bus clock\n"); /* Get the freq and voltage from OPP table to scale the bus freq */ ret = dev_pm_opp_of_add_table(dev); if (ret < 0) { dev_err(dev, "failed to get OPP table\n"); - goto err_clk; + return ret; } rate = clk_get_rate(bus->clk); @@ -281,8 +272,6 @@ static int exynos_bus_parse_of(struct device_node *np, err_opp: dev_pm_opp_of_remove_table(dev); -err_clk: - clk_disable_unprepare(bus->clk); return ret; } @@ -453,7 +442,6 @@ static int exynos_bus_probe(struct platform_device *pdev) err: dev_pm_opp_of_remove_table(dev); - clk_disable_unprepare(bus->clk); err_reg: dev_pm_opp_put_regulators(bus->opp_token); diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index 5dbc1e56ec08c5..2e4e981446fa8e 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c @@ -58,4 +58,5 @@ static void __exit devfreq_performance_exit(void) return; } module_exit(devfreq_performance_exit); +MODULE_DESCRIPTION("DEVFREQ Performance governor"); MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index 4746af2435b022..f059e881480465 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c @@ -58,4 +58,5 @@ static void __exit devfreq_powersave_exit(void) return; } module_exit(devfreq_powersave_exit); +MODULE_DESCRIPTION("DEVFREQ Powersave governor"); MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index d57b82a2b5707f..c2343573636755 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -140,4 +140,5 @@ static void __exit devfreq_simple_ondemand_exit(void) return; } module_exit(devfreq_simple_ondemand_exit); +MODULE_DESCRIPTION("DEVFREQ Simple On-demand governor"); MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index d69672ccacc49b..d1aa6806b683ac 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -153,4 +153,5 @@ static void __exit devfreq_userspace_exit(void) return; } module_exit(devfreq_userspace_exit); +MODULE_DESCRIPTION("DEVFREQ Userspace governor"); MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c index 86850b7dea0912..49798f542d6809 100644 --- a/drivers/devfreq/imx-bus.c +++ b/drivers/devfreq/imx-bus.c @@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev) struct imx_bus *priv = dev_get_drvdata(dev); const char *icc_driver_name; - if (!of_get_property(dev->of_node, "#interconnect-cells", NULL)) + if (!of_property_present(dev->of_node, "#interconnect-cells")) return 0; if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) { dev_warn(dev, "imx interconnect drivers disabled\n"); diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index c74ac197d5fea1..8a08ffde31e758 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -144,37 +144,38 @@ const struct dma_fence_ops dma_fence_array_ops = { EXPORT_SYMBOL(dma_fence_array_ops); /** - * dma_fence_array_create - Create a custom fence array + * dma_fence_array_alloc - Allocate a custom fence array + * @num_fences: [in] number of fences to add in the array + * + * Return dma fence array on success, NULL on failure + */ +struct dma_fence_array *dma_fence_array_alloc(int num_fences) +{ + struct dma_fence_array *array; + + return kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL); +} +EXPORT_SYMBOL(dma_fence_array_alloc); + +/** + * dma_fence_array_init - Init a custom fence array + * @array: [in] dma fence array to arm * @num_fences: [in] number of fences to add in the array * @fences: [in] array containing the fences * @context: [in] fence context to use * @seqno: [in] sequence number to use * @signal_on_any: [in] signal on any fence in the array * - * Allocate a dma_fence_array object and initialize the base fence with - * dma_fence_init(). - * In case of error it returns NULL. - * - * The caller should allocate the fences array with num_fences size - * and fill it with the fences it wants to add to the object. Ownership of this - * array is taken and dma_fence_put() is used on each fence on release. - * - * If @signal_on_any is true the fence array signals if any fence in the array - * signals, otherwise it signals when all fences in the array signal. + * Implementation of @dma_fence_array_create without allocation. Useful to init + * a preallocated dma fence array in the path of reclaim or dma fence signaling. */ -struct dma_fence_array *dma_fence_array_create(int num_fences, - struct dma_fence **fences, - u64 context, unsigned seqno, - bool signal_on_any) +void dma_fence_array_init(struct dma_fence_array *array, + int num_fences, struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) { - struct dma_fence_array *array; - WARN_ON(!num_fences || !fences); - array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL); - if (!array) - return NULL; - array->num_fences = num_fences; spin_lock_init(&array->lock); @@ -200,6 +201,41 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, */ while (num_fences--) WARN_ON(dma_fence_is_container(fences[num_fences])); +} +EXPORT_SYMBOL(dma_fence_array_init); + +/** + * dma_fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any: [in] signal on any fence in the array + * + * Allocate a dma_fence_array object and initialize the base fence with + * dma_fence_init(). + * In case of error it returns NULL. + * + * The caller should allocate the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is taken and dma_fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct dma_fence_array *array; + + array = dma_fence_array_alloc(num_fences); + if (!array) + return NULL; + + dma_fence_array_init(array, num_fences, fences, + context, seqno, signal_on_any); return array; } diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 2298ca5e112e0e..3cbe87d4a464dd 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -7,17 +7,15 @@ */ #include -#include #include #include +#include #include -#include #include -#include #include -#include #include -#include +#include +#include #include #define DEVNAME "dma_heap" @@ -28,9 +26,10 @@ * struct dma_heap - represents a dmabuf heap in the system * @name: used for debugging/device-node name * @ops: ops struct for this heap - * @heap_devt heap device node - * @list list head connecting to list of heaps - * @heap_cdev heap char device + * @priv: private data for this heap + * @heap_devt: heap device node + * @list: list head connecting to list of heaps + * @heap_cdev: heap char device * * Represents a heap of memory from which buffers can be made. */ @@ -193,11 +192,11 @@ static const struct file_operations dma_heap_fops = { }; /** - * dma_heap_get_drvdata() - get per-subdriver data for the heap + * dma_heap_get_drvdata - get per-heap driver data * @heap: DMA-Heap to retrieve private data for * * Returns: - * The per-subdriver data for the heap. + * The per-heap data for the heap. */ void *dma_heap_get_drvdata(struct dma_heap *heap) { @@ -205,8 +204,8 @@ void *dma_heap_get_drvdata(struct dma_heap *heap) } /** - * dma_heap_get_name() - get heap name - * @heap: DMA-Heap to retrieve private data for + * dma_heap_get_name - get heap name + * @heap: DMA-Heap to retrieve the name of * * Returns: * The char* for the heap name. @@ -216,6 +215,10 @@ const char *dma_heap_get_name(struct dma_heap *heap) return heap->name; } +/** + * dma_heap_add - adds a heap to dmabuf heaps + * @exp_info: information needed to register this heap + */ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { struct dma_heap *heap, *h, *err_ret; diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c index 6a1bfcd0cc2108..cf2ce3744ce6e9 100644 --- a/drivers/dma-buf/st-dma-fence.c +++ b/drivers/dma-buf/st-dma-fence.c @@ -402,7 +402,7 @@ static int test_wait_timeout(void *arg) if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) { if (timer_pending(&wt.timer)) { - pr_notice("Timer did not fire within the jiffie!\n"); + pr_notice("Timer did not fire within the jiffy!\n"); err = 0; /* not our fault! */ } else { pr_err("Wait reported incomplete after timeout\n"); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cc0a62c348616b..d9ec1e69e42831 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -369,6 +369,15 @@ config K3_DMA Support the DMA engine for Hisilicon K3 platform devices. +config LOONGSON1_APB_DMA + tristate "Loongson1 APB DMA support" + depends on MACH_LOONGSON32 || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + This selects support for the APB DMA controller in Loongson1 SoCs, + which is required by Loongson1 NAND and audio support. + config LPC18XX_DMAMUX bool "NXP LPC18xx/43xx DMA MUX for PL080" depends on ARCH_LPC18XX || COMPILE_TEST @@ -378,6 +387,15 @@ config LPC18XX_DMAMUX Enable support for DMA on NXP LPC18xx/43xx platforms with PL080 and multiplexed DMA request lines. +config LPC32XX_DMAMUX + bool "NXP LPC32xx DMA MUX for PL080" + depends on ARCH_LPC32XX || COMPILE_TEST + depends on OF && AMBA_PL08X + select MFD_SYSCON + help + Support for PL080 multiplexed DMA request lines on + LPC32XX platrofm. + config LS2X_APB_DMA tristate "Loongson LS2X APB DMA support" depends on LOONGARCH || COMPILE_TEST @@ -716,6 +734,8 @@ config XILINX_ZYNQMP_DPDMA display driver. # driver files +source "drivers/dma/amd/Kconfig" + source "drivers/dma/bestcomm/Kconfig" source "drivers/dma/mediatek/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 374ea98faf43e2..ad6a03c052ec4a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -49,7 +49,9 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-y += idxd/ obj-$(CONFIG_K3_DMA) += k3dma.o +obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o +obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o @@ -83,6 +85,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ obj-$(CONFIG_INTEL_LDMA) += lgm/ +obj-y += amd/ obj-y += mediatek/ obj-y += qcom/ obj-y += stm32/ diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 5906eae26e2aee..a58a1600dd6590 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -112,7 +112,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, } /** - * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources + * acpi_dma_parse_csrt - parse CSRT to extract additional DMA resources * @adev: ACPI device to match with * @adma: struct acpi_dma of the given DMA controller * @@ -305,7 +305,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); * found. * * Return: - * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. + * 0, if no information is available, -1 on mismatch, and 1 otherwise. */ static int acpi_dma_update_dma_spec(struct acpi_dma *adma, struct acpi_dma_spec *dma_spec) diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 0968176f323da8..e6a6566b309ee4 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -153,7 +153,7 @@ struct msgdma_extended_desc { /** * struct msgdma_sw_desc - implements a sw descriptor * @async_tx: support for the async_tx api - * @hw_desc: assosiated HW descriptor + * @hw_desc: associated HW descriptor * @node: node to move from the free list to the tx list * @tx_list: transmit list node */ @@ -511,7 +511,7 @@ static void msgdma_copy_one(struct msgdma_device *mdev, * of the DMA controller. The descriptor will get flushed to the * FIFO, once the last word (control word) is written. Since we * are not 100% sure that memcpy() writes all word in the "correct" - * oder (address from low to high) on all architectures, we make + * order (address from low to high) on all architectures, we make * sure this control word is written last by single coding it and * adding some write-barriers here. */ diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 73a5cfb4da8a6d..38cdbca59485ca 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2006 ARM Ltd. * Copyright (c) 2010 ST-Ericsson SA - * Copyirght (c) 2017 Linaro Ltd. + * Copyright (c) 2017 Linaro Ltd. * * Author: Peter Pearse * Author: Linus Walleij diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig new file mode 100644 index 00000000000000..7d1f51d6967505 --- /dev/null +++ b/drivers/dma/amd/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config AMD_QDMA + tristate "AMD Queue-based DMA" + depends on HAS_IOMEM + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select REGMAP_MMIO + help + Enable support for the AMD Queue-based DMA subsystem. The primary + mechanism to transfer data using the QDMA is for the QDMA engine to + operate on instructions (descriptors) provided by the host operating + system. Using the descriptors, the QDMA can move data in either the + Host to Card (H2C) direction or the Card to Host (C2H) direction. diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile new file mode 100644 index 00000000000000..37212be9364fbc --- /dev/null +++ b/drivers/dma/amd/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_AMD_QDMA) += qdma/ diff --git a/drivers/dma/amd/qdma/Makefile b/drivers/dma/amd/qdma/Makefile new file mode 100644 index 00000000000000..011268fef37716 --- /dev/null +++ b/drivers/dma/amd/qdma/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_AMD_QDMA) += amd-qdma.o + +amd-qdma-$(CONFIG_AMD_QDMA) := qdma.o qdma-comm-regs.o diff --git a/drivers/dma/amd/qdma/qdma-comm-regs.c b/drivers/dma/amd/qdma/qdma-comm-regs.c new file mode 100644 index 00000000000000..9162f9d367cc15 --- /dev/null +++ b/drivers/dma/amd/qdma/qdma-comm-regs.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. + */ + +#ifndef __QDMA_REGS_DEF_H +#define __QDMA_REGS_DEF_H + +#include "qdma.h" + +const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX] = { + [QDMA_REGO_CTXT_DATA] = QDMA_REGO(0x804, 8), + [QDMA_REGO_CTXT_CMD] = QDMA_REGO(0x844, 1), + [QDMA_REGO_CTXT_MASK] = QDMA_REGO(0x824, 8), + [QDMA_REGO_MM_H2C_CTRL] = QDMA_REGO(0x1004, 1), + [QDMA_REGO_MM_C2H_CTRL] = QDMA_REGO(0x1204, 1), + [QDMA_REGO_QUEUE_COUNT] = QDMA_REGO(0x120, 1), + [QDMA_REGO_RING_SIZE] = QDMA_REGO(0x204, 1), + [QDMA_REGO_H2C_PIDX] = QDMA_REGO(0x18004, 1), + [QDMA_REGO_C2H_PIDX] = QDMA_REGO(0x18008, 1), + [QDMA_REGO_INTR_CIDX] = QDMA_REGO(0x18000, 1), + [QDMA_REGO_FUNC_ID] = QDMA_REGO(0x12c, 1), + [QDMA_REGO_ERR_INT] = QDMA_REGO(0xb04, 1), + [QDMA_REGO_ERR_STAT] = QDMA_REGO(0x248, 1), +}; + +const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX] = { + /* QDMA_REGO_CTXT_DATA fields */ + [QDMA_REGF_IRQ_ENABLE] = QDMA_REGF(53, 53), + [QDMA_REGF_WBK_ENABLE] = QDMA_REGF(52, 52), + [QDMA_REGF_WBI_CHECK] = QDMA_REGF(34, 34), + [QDMA_REGF_IRQ_ARM] = QDMA_REGF(16, 16), + [QDMA_REGF_IRQ_VEC] = QDMA_REGF(138, 128), + [QDMA_REGF_IRQ_AGG] = QDMA_REGF(139, 139), + [QDMA_REGF_WBI_INTVL_ENABLE] = QDMA_REGF(35, 35), + [QDMA_REGF_MRKR_DISABLE] = QDMA_REGF(62, 62), + [QDMA_REGF_QUEUE_ENABLE] = QDMA_REGF(32, 32), + [QDMA_REGF_QUEUE_MODE] = QDMA_REGF(63, 63), + [QDMA_REGF_DESC_BASE] = QDMA_REGF(127, 64), + [QDMA_REGF_DESC_SIZE] = QDMA_REGF(49, 48), + [QDMA_REGF_RING_ID] = QDMA_REGF(47, 44), + [QDMA_REGF_QUEUE_BASE] = QDMA_REGF(11, 0), + [QDMA_REGF_QUEUE_MAX] = QDMA_REGF(44, 32), + [QDMA_REGF_FUNCTION_ID] = QDMA_REGF(24, 17), + [QDMA_REGF_INTR_AGG_BASE] = QDMA_REGF(66, 15), + [QDMA_REGF_INTR_VECTOR] = QDMA_REGF(11, 1), + [QDMA_REGF_INTR_SIZE] = QDMA_REGF(69, 67), + [QDMA_REGF_INTR_VALID] = QDMA_REGF(0, 0), + [QDMA_REGF_INTR_COLOR] = QDMA_REGF(14, 14), + [QDMA_REGF_INTR_FUNCTION_ID] = QDMA_REGF(125, 114), + /* QDMA_REGO_CTXT_CMD fields */ + [QDMA_REGF_CMD_INDX] = QDMA_REGF(19, 7), + [QDMA_REGF_CMD_CMD] = QDMA_REGF(6, 5), + [QDMA_REGF_CMD_TYPE] = QDMA_REGF(4, 1), + [QDMA_REGF_CMD_BUSY] = QDMA_REGF(0, 0), + /* QDMA_REGO_QUEUE_COUNT fields */ + [QDMA_REGF_QUEUE_COUNT] = QDMA_REGF(11, 0), + /* QDMA_REGO_ERR_INT fields */ + [QDMA_REGF_ERR_INT_FUNC] = QDMA_REGF(11, 0), + [QDMA_REGF_ERR_INT_VEC] = QDMA_REGF(22, 12), + [QDMA_REGF_ERR_INT_ARM] = QDMA_REGF(24, 24), +}; + +#endif /* __QDMA_REGS_DEF_H */ diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c new file mode 100644 index 00000000000000..b0a1f3ad851b1e --- /dev/null +++ b/drivers/dma/amd/qdma/qdma.c @@ -0,0 +1,1143 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DMA driver for AMD Queue-based DMA Subsystem + * + * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qdma.h" + +#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H") +#define QDMA_REG_OFF(d, r) ((d)->roffs[r].off) + +/* MMIO regmap config for all QDMA registers */ +static const struct regmap_config qdma_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan) +{ + return container_of(chan, struct qdma_queue, vchan.chan); +} + +static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct qdma_mm_vdesc, vdesc); +} + +static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev) +{ + u32 idx; + + idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx; + qdev->qintr_ring_idx %= qdev->qintr_ring_num; + + return idx; +} + +static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data, + enum qdma_reg_fields field) +{ + const struct qdma_reg_field *f = &qdev->rfields[field]; + u16 low_pos, hi_pos, low_bit, hi_bit; + u64 value = 0, mask; + + low_pos = f->lsb / BITS_PER_TYPE(*data); + hi_pos = f->msb / BITS_PER_TYPE(*data); + + if (low_pos == hi_pos) { + low_bit = f->lsb % BITS_PER_TYPE(*data); + hi_bit = f->msb % BITS_PER_TYPE(*data); + mask = GENMASK(hi_bit, low_bit); + value = (data[low_pos] & mask) >> low_bit; + } else if (hi_pos == low_pos + 1) { + low_bit = f->lsb % BITS_PER_TYPE(*data); + hi_bit = low_bit + (f->msb - f->lsb); + value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) | + data[low_pos]; + mask = GENMASK_ULL(hi_bit, low_bit); + value = (value & mask) >> low_bit; + } else { + hi_bit = f->msb % BITS_PER_TYPE(*data); + mask = GENMASK(hi_bit, 0); + value = data[hi_pos] & mask; + low_bit = f->msb - f->lsb - hi_bit; + value <<= low_bit; + low_bit -= 32; + value |= (u64)data[hi_pos - 1] << low_bit; + mask = GENMASK(31, 32 - low_bit); + value |= (data[hi_pos - 2] & mask) >> low_bit; + } + + return value; +} + +static void qdma_set_field(const struct qdma_device *qdev, u32 *data, + enum qdma_reg_fields field, u64 value) +{ + const struct qdma_reg_field *f = &qdev->rfields[field]; + u16 low_pos, hi_pos, low_bit; + + low_pos = f->lsb / BITS_PER_TYPE(*data); + hi_pos = f->msb / BITS_PER_TYPE(*data); + low_bit = f->lsb % BITS_PER_TYPE(*data); + + data[low_pos++] |= value << low_bit; + if (low_pos <= hi_pos) + data[low_pos++] |= (u32)(value >> (32 - low_bit)); + if (low_pos <= hi_pos) + data[low_pos] |= (u32)(value >> (64 - low_bit)); +} + +static inline int qdma_reg_write(const struct qdma_device *qdev, + const u32 *data, enum qdma_regs reg) +{ + const struct qdma_reg *r = &qdev->roffs[reg]; + int ret; + + if (r->count > 1) + ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count); + else + ret = regmap_write(qdev->regmap, r->off, *data); + + return ret; +} + +static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data, + enum qdma_regs reg) +{ + const struct qdma_reg *r = &qdev->roffs[reg]; + int ret; + + if (r->count > 1) + ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count); + else + ret = regmap_read(qdev->regmap, r->off, data); + + return ret; +} + +static int qdma_context_cmd_execute(const struct qdma_device *qdev, + enum qdma_ctxt_type type, + enum qdma_ctxt_cmd cmd, u16 index) +{ + u32 value = 0; + int ret; + + qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index); + qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd); + qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type); + + ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(qdev->regmap, + QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD), + value, + !qdma_get_field(qdev, &value, + QDMA_REGF_CMD_BUSY), + QDMA_POLL_INTRVL_US, + QDMA_POLL_TIMEOUT_US); + if (ret) { + qdma_err(qdev, "Context command execution timed out"); + return ret; + } + + return 0; +} + +static int qdma_context_write_data(const struct qdma_device *qdev, + const u32 *data) +{ + u32 mask[QDMA_CTXT_REGMAP_LEN]; + int ret; + + memset(mask, ~0, sizeof(mask)); + + ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK); + if (ret) + return ret; + + ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA); + if (ret) + return ret; + + return 0; +} + +static void qdma_prep_sw_desc_context(const struct qdma_device *qdev, + const struct qdma_ctxt_sw_desc *ctxt, + u32 *data) +{ + memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); + qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base); + qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec); + qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid); + + qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B); + qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID); + qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM); + qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1); + qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1); + qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1); + qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1); + qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1); + qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1); + qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1); + qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1); +} + +static void qdma_prep_intr_context(const struct qdma_device *qdev, + const struct qdma_ctxt_intr *ctxt, + u32 *data) +{ + memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); + qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base); + qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec); + qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size); + qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid); + qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color); + qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid); +} + +static void qdma_prep_fmap_context(const struct qdma_device *qdev, + const struct qdma_ctxt_fmap *ctxt, + u32 *data) +{ + memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); + qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase); + qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax); +} + +/* + * Program the indirect context register space + * + * Once the queue is enabled, context is dynamically updated by hardware. Any + * modification of the context through this API when the queue is enabled can + * result in unexpected behavior. Reading the context when the queue is enabled + * is not recommended as it can result in reduced performance. + */ +static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type, + enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt) +{ + int ret; + + mutex_lock(&qdev->ctxt_lock); + if (cmd == QDMA_CTXT_WRITE) { + ret = qdma_context_write_data(qdev, ctxt); + if (ret) + goto failed; + } + + ret = qdma_context_cmd_execute(qdev, type, cmd, index); + if (ret) + goto failed; + + if (cmd == QDMA_CTXT_READ) { + ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA); + if (ret) + goto failed; + } + +failed: + mutex_unlock(&qdev->ctxt_lock); + + return ret; +} + +static int qdma_check_queue_status(struct qdma_device *qdev, + enum dma_transfer_direction dir, u16 qid) +{ + u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0}; + enum qdma_ctxt_type type; + int ret; + + if (dir == DMA_MEM_TO_DEV) + type = QDMA_CTXT_DESC_SW_H2C; + else + type = QDMA_CTXT_DESC_SW_C2H; + ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data); + if (ret) + return ret; + + status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE); + if (status) { + qdma_err(qdev, "queue %d already in use", qid); + return -EBUSY; + } + + return 0; +} + +static int qdma_clear_queue_context(const struct qdma_queue *queue) +{ + enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C, + QDMA_CTXT_DESC_HW_H2C, + QDMA_CTXT_DESC_CR_H2C, + QDMA_CTXT_PFTCH, }; + enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H, + QDMA_CTXT_DESC_HW_C2H, + QDMA_CTXT_DESC_CR_C2H, + QDMA_CTXT_PFTCH, }; + struct qdma_device *qdev = queue->qdev; + enum qdma_ctxt_type *type; + int ret, num, i; + + if (queue->dir == DMA_MEM_TO_DEV) { + type = h2c_types; + num = ARRAY_SIZE(h2c_types); + } else { + type = c2h_types; + num = ARRAY_SIZE(c2h_types); + } + for (i = 0; i < num; i++) { + ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR, + queue->qid, NULL); + if (ret) { + qdma_err(qdev, "Failed to clear ctxt %d", type[i]); + return ret; + } + } + + return 0; +} + +static int qdma_setup_fmap_context(struct qdma_device *qdev) +{ + u32 ctxt[QDMA_CTXT_REGMAP_LEN]; + struct qdma_ctxt_fmap fmap; + int ret; + + ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR, + qdev->fid, NULL); + if (ret) { + qdma_err(qdev, "Failed clearing context"); + return ret; + } + + fmap.qbase = 0; + fmap.qmax = qdev->chan_num * 2; + qdma_prep_fmap_context(qdev, &fmap, ctxt); + ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE, + qdev->fid, ctxt); + if (ret) + qdma_err(qdev, "Failed setup fmap, ret %d", ret); + + return ret; +} + +static int qdma_setup_queue_context(struct qdma_device *qdev, + const struct qdma_ctxt_sw_desc *sw_desc, + enum dma_transfer_direction dir, u16 qid) +{ + u32 ctxt[QDMA_CTXT_REGMAP_LEN]; + enum qdma_ctxt_type type; + int ret; + + if (dir == DMA_MEM_TO_DEV) + type = QDMA_CTXT_DESC_SW_H2C; + else + type = QDMA_CTXT_DESC_SW_C2H; + + qdma_prep_sw_desc_context(qdev, sw_desc, ctxt); + /* Setup SW descriptor context */ + ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt); + if (ret) + qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid); + + return ret; +} + +/* + * Enable or disable memory-mapped DMA engines + * 1: enable, 0: disable + */ +static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl) +{ + int ret; + + ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL); + ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL); + + return ret; +} + +static int qdma_get_hw_info(struct qdma_device *qdev) +{ + struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev); + u32 value = 0; + int ret; + + ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT); + if (ret) + return ret; + + value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1; + if (pdata->max_mm_channels * 2 > value) { + qdma_err(qdev, "not enough hw queues %d", value); + return -EINVAL; + } + qdev->chan_num = pdata->max_mm_channels; + + ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID); + if (ret) + return ret; + + qdma_info(qdev, "max channel %d, function id %d", + qdev->chan_num, qdev->fid); + + return 0; +} + +static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx) +{ + struct qdma_device *qdev = queue->qdev; + + return regmap_write(qdev->regmap, queue->pidx_reg, + pidx | QDMA_QUEUE_ARM_BIT); +} + +static inline int qdma_update_cidx(const struct qdma_queue *queue, + u16 ridx, u16 cidx) +{ + struct qdma_device *qdev = queue->qdev; + + return regmap_write(qdev->regmap, queue->cidx_reg, + ((u32)ridx << 16) | cidx); +} + +/** + * qdma_free_vdesc - Free descriptor + * @vdesc: Virtual DMA descriptor + */ +static void qdma_free_vdesc(struct virt_dma_desc *vdesc) +{ + struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc); + + kfree(vd); +} + +static int qdma_alloc_queues(struct qdma_device *qdev, + enum dma_transfer_direction dir) +{ + struct qdma_queue *q, **queues; + u32 i, pidx_base; + int ret; + + if (dir == DMA_MEM_TO_DEV) { + queues = &qdev->h2c_queues; + pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX); + } else { + queues = &qdev->c2h_queues; + pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX); + } + + *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q), + GFP_KERNEL); + if (!*queues) + return -ENOMEM; + + for (i = 0; i < qdev->chan_num; i++) { + ret = qdma_check_queue_status(qdev, dir, i); + if (ret) + return ret; + + q = &(*queues)[i]; + q->ring_size = QDMA_DEFAULT_RING_SIZE; + q->idx_mask = q->ring_size - 2; + q->qdev = qdev; + q->dir = dir; + q->qid = i; + q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE; + q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) + + i * QDMA_DMAP_REG_STRIDE; + q->vchan.desc_free = qdma_free_vdesc; + vchan_init(&q->vchan, &qdev->dma_dev); + } + + return 0; +} + +static int qdma_device_verify(struct qdma_device *qdev) +{ + u32 value; + int ret; + + ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value); + if (ret) + return ret; + + value = FIELD_GET(QDMA_IDENTIFIER_MASK, value); + if (value != QDMA_IDENTIFIER) { + qdma_err(qdev, "Invalid identifier"); + return -ENODEV; + } + qdev->rfields = qdma_regfs_default; + qdev->roffs = qdma_regos_default; + + return 0; +} + +static int qdma_device_setup(struct qdma_device *qdev) +{ + struct device *dev = &qdev->pdev->dev; + u32 ring_sz = QDMA_DEFAULT_RING_SIZE; + int ret = 0; + + while (dev && get_dma_ops(dev)) + dev = dev->parent; + if (!dev) { + qdma_err(qdev, "dma device not found"); + return -EINVAL; + } + set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev)); + + ret = qdma_setup_fmap_context(qdev); + if (ret) { + qdma_err(qdev, "Failed setup fmap context"); + return ret; + } + + /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */ + ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE); + if (ret) { + qdma_err(qdev, "Failed to setup ring %d of size %ld", + QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE); + return ret; + } + + /* Enable memory-mapped DMA engine in both directions */ + ret = qdma_sgdma_control(qdev, 1); + if (ret) { + qdma_err(qdev, "Failed to SGDMA with error %d", ret); + return ret; + } + + ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV); + if (ret) { + qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret); + return ret; + } + + ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM); + if (ret) { + qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret); + return ret; + } + + return 0; +} + +/** + * qdma_free_queue_resources() - Free queue resources + * @chan: DMA channel + */ +static void qdma_free_queue_resources(struct dma_chan *chan) +{ + struct qdma_queue *queue = to_qdma_queue(chan); + struct qdma_device *qdev = queue->qdev; + struct device *dev = qdev->dma_dev.dev; + + qdma_clear_queue_context(queue); + vchan_free_chan_resources(&queue->vchan); + dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE, + queue->desc_base, queue->dma_desc_base); +} + +/** + * qdma_alloc_queue_resources() - Allocate queue resources + * @chan: DMA channel + */ +static int qdma_alloc_queue_resources(struct dma_chan *chan) +{ + struct qdma_queue *queue = to_qdma_queue(chan); + struct qdma_device *qdev = queue->qdev; + struct qdma_ctxt_sw_desc desc; + size_t size; + int ret; + + ret = qdma_clear_queue_context(queue); + if (ret) + return ret; + + size = queue->ring_size * QDMA_MM_DESC_SIZE; + queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size, + &queue->dma_desc_base, + GFP_KERNEL); + if (!queue->desc_base) { + qdma_err(qdev, "Failed to allocate descriptor ring"); + return -ENOMEM; + } + + /* Setup SW descriptor queue context for DMA memory map */ + desc.vec = qdma_get_intr_ring_idx(qdev); + desc.desc_base = queue->dma_desc_base; + ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid); + if (ret) { + qdma_err(qdev, "Failed to setup SW desc ctxt for %s", + chan->name); + dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base, + queue->dma_desc_base); + return ret; + } + + queue->pidx = 0; + queue->cidx = 0; + + return 0; +} + +static bool qdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct qdma_queue *queue = to_qdma_queue(chan); + struct qdma_queue_info *info = param; + + return info->dir == queue->dir; +} + +static int qdma_xfer_start(struct qdma_queue *queue) +{ + struct qdma_device *qdev = queue->qdev; + int ret; + + if (!vchan_next_desc(&queue->vchan)) + return 0; + + qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d", + queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid); + + ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx); + if (ret) { + qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d", + queue->pidx, CHAN_STR(queue), queue->qid); + } + + return ret; +} + +static void qdma_issue_pending(struct dma_chan *chan) +{ + struct qdma_queue *queue = to_qdma_queue(chan); + unsigned long flags; + + spin_lock_irqsave(&queue->vchan.lock, flags); + if (vchan_issue_pending(&queue->vchan)) { + if (queue->submitted_vdesc) { + queue->issued_vdesc = queue->submitted_vdesc; + queue->submitted_vdesc = NULL; + } + qdma_xfer_start(queue); + } + + spin_unlock_irqrestore(&queue->vchan.lock, flags); +} + +static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q) +{ + struct qdma_mm_desc *desc; + + if (((q->pidx + 1) & q->idx_mask) == q->cidx) + return NULL; + + desc = q->desc_base + q->pidx; + q->pidx = (q->pidx + 1) & q->idx_mask; + + return desc; +} + +static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc) +{ + struct qdma_mm_desc *desc; + struct scatterlist *sg; + u64 addr, *src, *dst; + u32 rest, len; + int ret = 0; + u32 i; + + if (!vdesc->sg_len) + return 0; + + if (q->dir == DMA_MEM_TO_DEV) { + dst = &vdesc->dev_addr; + src = &addr; + } else { + dst = &addr; + src = &vdesc->dev_addr; + } + + for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) { + addr = sg_dma_address(sg) + vdesc->sg_off; + rest = sg_dma_len(sg) - vdesc->sg_off; + while (rest) { + len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN); + desc = qdma_get_desc(q); + if (!desc) { + ret = -EBUSY; + goto out; + } + + desc->src_addr = cpu_to_le64(*src); + desc->dst_addr = cpu_to_le64(*dst); + desc->len = cpu_to_le32(len); + + vdesc->dev_addr += len; + vdesc->sg_off += len; + vdesc->pending_descs++; + addr += len; + rest -= len; + } + vdesc->sg_off = 0; + } +out: + vdesc->sg_len -= i; + vdesc->pidx = q->pidx; + return ret; +} + +static void qdma_fill_pending_vdesc(struct qdma_queue *q) +{ + struct virt_dma_chan *vc = &q->vchan; + struct qdma_mm_vdesc *vdesc = NULL; + struct virt_dma_desc *vd; + int ret; + + if (!list_empty(&vc->desc_issued)) { + vd = &q->issued_vdesc->vdesc; + list_for_each_entry_from(vd, &vc->desc_issued, node) { + vdesc = to_qdma_vdesc(vd); + ret = qdma_hw_enqueue(q, vdesc); + if (ret) { + q->issued_vdesc = vdesc; + return; + } + } + q->issued_vdesc = vdesc; + } + + if (list_empty(&vc->desc_submitted)) + return; + + if (q->submitted_vdesc) + vd = &q->submitted_vdesc->vdesc; + else + vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node); + + list_for_each_entry_from(vd, &vc->desc_submitted, node) { + vdesc = to_qdma_vdesc(vd); + ret = qdma_hw_enqueue(q, vdesc); + if (ret) + break; + } + q->submitted_vdesc = vdesc; +} + +static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct qdma_queue *q = to_qdma_queue(&vc->chan); + struct virt_dma_desc *vd; + unsigned long flags; + dma_cookie_t cookie; + + vd = container_of(tx, struct virt_dma_desc, tx); + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_move_tail(&vd->node, &vc->desc_submitted); + qdma_fill_pending_vdesc(q); + spin_unlock_irqrestore(&vc->lock, flags); + + return cookie; +} + +static struct dma_async_tx_descriptor * +qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct qdma_queue *q = to_qdma_queue(chan); + struct dma_async_tx_descriptor *tx; + struct qdma_mm_vdesc *vdesc; + + vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT); + if (!vdesc) + return NULL; + vdesc->sgl = sgl; + vdesc->sg_len = sg_len; + if (dir == DMA_MEM_TO_DEV) + vdesc->dev_addr = q->cfg.dst_addr; + else + vdesc->dev_addr = q->cfg.src_addr; + + tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags); + tx->tx_submit = qdma_tx_submit; + + return tx; +} + +static int qdma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct qdma_queue *q = to_qdma_queue(chan); + + memcpy(&q->cfg, cfg, sizeof(*cfg)); + + return 0; +} + +static int qdma_arm_err_intr(const struct qdma_device *qdev) +{ + u32 value = 0; + + qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid); + qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx); + qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1); + + return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT); +} + +static irqreturn_t qdma_error_isr(int irq, void *data) +{ + struct qdma_device *qdev = data; + u32 err_stat = 0; + int ret; + + ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT); + if (ret) { + qdma_err(qdev, "read error state failed, ret %d", ret); + goto out; + } + + qdma_err(qdev, "global error %d", err_stat); + ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT); + if (ret) + qdma_err(qdev, "clear error state failed, ret %d", ret); + +out: + qdma_arm_err_intr(qdev); + return IRQ_HANDLED; +} + +static irqreturn_t qdma_queue_isr(int irq, void *data) +{ + struct qdma_intr_ring *intr = data; + struct qdma_queue *q = NULL; + struct qdma_device *qdev; + u32 index, comp_desc; + u64 intr_ent; + u8 color; + int ret; + u16 qid; + + qdev = intr->qdev; + index = intr->cidx; + while (1) { + struct virt_dma_desc *vd; + struct qdma_mm_vdesc *vdesc; + unsigned long flags; + u32 cidx; + + intr_ent = le64_to_cpu(intr->base[index]); + color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent); + if (color != intr->color) + break; + + qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent); + if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent)) + q = qdev->c2h_queues; + else + q = qdev->h2c_queues; + q += qid; + + cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent); + + spin_lock_irqsave(&q->vchan.lock, flags); + comp_desc = (cidx - q->cidx) & q->idx_mask; + + vd = vchan_next_desc(&q->vchan); + if (!vd) + goto skip; + + vdesc = to_qdma_vdesc(vd); + while (comp_desc > vdesc->pending_descs) { + list_del(&vd->node); + vchan_cookie_complete(vd); + comp_desc -= vdesc->pending_descs; + vd = vchan_next_desc(&q->vchan); + vdesc = to_qdma_vdesc(vd); + } + vdesc->pending_descs -= comp_desc; + if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + q->cidx = cidx; + + qdma_fill_pending_vdesc(q); + qdma_xfer_start(q); + +skip: + spin_unlock_irqrestore(&q->vchan.lock, flags); + + /* + * Wrap the index value and flip the expected color value if + * interrupt aggregation PIDX has wrapped around. + */ + index++; + index &= QDMA_INTR_RING_IDX_MASK; + if (!index) + intr->color = !intr->color; + } + + /* + * Update the software interrupt aggregation ring CIDX if a valid entry + * was found. + */ + if (q) { + qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index); + + /* + * Record the last read index of status descriptor from the + * interrupt aggregation ring. + */ + intr->cidx = index; + + ret = qdma_update_cidx(q, intr->ridx, index); + if (ret) { + qdma_err(qdev, "Failed to update IRQ CIDX"); + return IRQ_NONE; + } + } + + return IRQ_HANDLED; +} + +static int qdma_init_error_irq(struct qdma_device *qdev) +{ + struct device *dev = &qdev->pdev->dev; + int ret; + u32 vec; + + vec = qdev->queue_irq_start - 1; + + ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr, + IRQF_ONESHOT, "amd-qdma-error", qdev); + if (ret) { + qdma_err(qdev, "Failed to request error IRQ vector: %d", vec); + return ret; + } + + ret = qdma_arm_err_intr(qdev); + if (ret) + qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret); + + return ret; +} + +static int qdmam_alloc_qintr_rings(struct qdma_device *qdev) +{ + u32 ctxt[QDMA_CTXT_REGMAP_LEN]; + struct device *dev = &qdev->pdev->dev; + struct qdma_intr_ring *ring; + struct qdma_ctxt_intr intr_ctxt; + u32 vector; + int ret, i; + + qdev->qintr_ring_num = qdev->queue_irq_num; + qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num, + sizeof(*qdev->qintr_rings), + GFP_KERNEL); + if (!qdev->qintr_rings) + return -ENOMEM; + + vector = qdev->queue_irq_start; + for (i = 0; i < qdev->qintr_ring_num; i++, vector++) { + ring = &qdev->qintr_rings[i]; + ring->qdev = qdev; + ring->msix_id = qdev->err_irq_idx + i + 1; + ring->ridx = i; + ring->color = 1; + ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE, + &ring->dev_base, GFP_KERNEL); + if (!ring->base) { + qdma_err(qdev, "Failed to alloc intr ring %d", i); + return -ENOMEM; + } + intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base); + intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096; + intr_ctxt.vec = ring->msix_id; + intr_ctxt.valid = true; + intr_ctxt.color = true; + ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL, + QDMA_CTXT_CLEAR, ring->ridx, NULL); + if (ret) { + qdma_err(qdev, "Failed clear intr ctx, ret %d", ret); + return ret; + } + + qdma_prep_intr_context(qdev, &intr_ctxt, ctxt); + ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL, + QDMA_CTXT_WRITE, ring->ridx, ctxt); + if (ret) { + qdma_err(qdev, "Failed setup intr ctx, ret %d", ret); + return ret; + } + + ret = devm_request_threaded_irq(dev, vector, NULL, + qdma_queue_isr, IRQF_ONESHOT, + "amd-qdma-queue", ring); + if (ret) { + qdma_err(qdev, "Failed to request irq %d", vector); + return ret; + } + } + + return 0; +} + +static int qdma_intr_init(struct qdma_device *qdev) +{ + int ret; + + ret = qdma_init_error_irq(qdev); + if (ret) { + qdma_err(qdev, "Failed to init error IRQs, ret %d", ret); + return ret; + } + + ret = qdmam_alloc_qintr_rings(qdev); + if (ret) { + qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret); + return ret; + } + + return 0; +} + +static void amd_qdma_remove(struct platform_device *pdev) +{ + struct qdma_device *qdev = platform_get_drvdata(pdev); + + qdma_sgdma_control(qdev, 0); + dma_async_device_unregister(&qdev->dma_dev); + + mutex_destroy(&qdev->ctxt_lock); +} + +static int amd_qdma_probe(struct platform_device *pdev) +{ + struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev); + struct qdma_device *qdev; + struct resource *res; + void __iomem *regs; + int ret; + + qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL); + if (!qdev) + return -ENOMEM; + + platform_set_drvdata(pdev, qdev); + qdev->pdev = pdev; + mutex_init(&qdev->ctxt_lock); + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + qdma_err(qdev, "Failed to get IRQ resource"); + ret = -ENODEV; + goto failed; + } + qdev->err_irq_idx = pdata->irq_index; + qdev->queue_irq_start = res->start + 1; + qdev->queue_irq_num = resource_size(res) - 1; + + regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + qdma_err(qdev, "Failed to map IO resource, err %d", ret); + goto failed; + } + + qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs, + &qdma_regmap_config); + if (IS_ERR(qdev->regmap)) { + ret = PTR_ERR(qdev->regmap); + qdma_err(qdev, "Regmap init failed, err %d", ret); + goto failed; + } + + ret = qdma_device_verify(qdev); + if (ret) + goto failed; + + ret = qdma_get_hw_info(qdev); + if (ret) + goto failed; + + INIT_LIST_HEAD(&qdev->dma_dev.channels); + + ret = qdma_device_setup(qdev); + if (ret) + goto failed; + + ret = qdma_intr_init(qdev); + if (ret) { + qdma_err(qdev, "Failed to initialize IRQs %d", ret); + goto failed_disable_engine; + } + + dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask); + + qdev->dma_dev.dev = &pdev->dev; + qdev->dma_dev.filter.map = pdata->device_map; + qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2; + qdev->dma_dev.filter.fn = qdma_filter_fn; + qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources; + qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources; + qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg; + qdev->dma_dev.device_config = qdma_device_config; + qdev->dma_dev.device_issue_pending = qdma_issue_pending; + qdev->dma_dev.device_tx_status = dma_cookie_status; + qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + + ret = dma_async_device_register(&qdev->dma_dev); + if (ret) { + qdma_err(qdev, "Failed to register AMD QDMA: %d", ret); + goto failed_disable_engine; + } + + return 0; + +failed_disable_engine: + qdma_sgdma_control(qdev, 0); +failed: + mutex_destroy(&qdev->ctxt_lock); + qdma_err(qdev, "Failed to probe AMD QDMA driver"); + return ret; +} + +static struct platform_driver amd_qdma_driver = { + .driver = { + .name = "amd-qdma", + }, + .probe = amd_qdma_probe, + .remove_new = amd_qdma_remove, +}; + +module_platform_driver(amd_qdma_driver); + +MODULE_DESCRIPTION("AMD QDMA driver"); +MODULE_AUTHOR("XRT Team "); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/amd/qdma/qdma.h b/drivers/dma/amd/qdma/qdma.h new file mode 100644 index 00000000000000..94089f1f0c115f --- /dev/null +++ b/drivers/dma/amd/qdma/qdma.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * DMA header for AMD Queue-based DMA Subsystem + * + * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. + */ + +#ifndef __QDMA_H +#define __QDMA_H + +#include +#include +#include +#include +#include + +#include "../../virt-dma.h" + +#define DISABLE 0 +#define ENABLE 1 + +#define QDMA_MIN_IRQ 3 +#define QDMA_INTR_NAME_MAX_LEN 30 +#define QDMA_INTR_PREFIX "amd-qdma" + +#define QDMA_IDENTIFIER 0x1FD3 +#define QDMA_DEFAULT_RING_SIZE (BIT(10) + 1) +#define QDMA_DEFAULT_RING_ID 0 +#define QDMA_POLL_INTRVL_US 10 /* 10us */ +#define QDMA_POLL_TIMEOUT_US (500 * 1000) /* 500ms */ +#define QDMA_DMAP_REG_STRIDE 16 +#define QDMA_CTXT_REGMAP_LEN 8 /* 8 regs */ +#define QDMA_MM_DESC_SIZE 32 /* Bytes */ +#define QDMA_MM_DESC_LEN_BITS 28 +#define QDMA_MM_DESC_MAX_LEN (BIT(QDMA_MM_DESC_LEN_BITS) - 1) +#define QDMA_MIN_DMA_ALLOC_SIZE 4096 +#define QDMA_INTR_RING_SIZE BIT(13) +#define QDMA_INTR_RING_IDX_MASK GENMASK(9, 0) +#define QDMA_INTR_RING_BASE(_addr) ((_addr) >> 12) + +#define QDMA_IDENTIFIER_REGOFF 0x0 +#define QDMA_IDENTIFIER_MASK GENMASK(31, 16) +#define QDMA_QUEUE_ARM_BIT BIT(16) + +#define qdma_err(qdev, fmt, args...) \ + dev_err(&(qdev)->pdev->dev, fmt, ##args) + +#define qdma_dbg(qdev, fmt, args...) \ + dev_dbg(&(qdev)->pdev->dev, fmt, ##args) + +#define qdma_info(qdev, fmt, args...) \ + dev_info(&(qdev)->pdev->dev, fmt, ##args) + +enum qdma_reg_fields { + QDMA_REGF_IRQ_ENABLE, + QDMA_REGF_WBK_ENABLE, + QDMA_REGF_WBI_CHECK, + QDMA_REGF_IRQ_ARM, + QDMA_REGF_IRQ_VEC, + QDMA_REGF_IRQ_AGG, + QDMA_REGF_WBI_INTVL_ENABLE, + QDMA_REGF_MRKR_DISABLE, + QDMA_REGF_QUEUE_ENABLE, + QDMA_REGF_QUEUE_MODE, + QDMA_REGF_DESC_BASE, + QDMA_REGF_DESC_SIZE, + QDMA_REGF_RING_ID, + QDMA_REGF_CMD_INDX, + QDMA_REGF_CMD_CMD, + QDMA_REGF_CMD_TYPE, + QDMA_REGF_CMD_BUSY, + QDMA_REGF_QUEUE_COUNT, + QDMA_REGF_QUEUE_MAX, + QDMA_REGF_QUEUE_BASE, + QDMA_REGF_FUNCTION_ID, + QDMA_REGF_INTR_AGG_BASE, + QDMA_REGF_INTR_VECTOR, + QDMA_REGF_INTR_SIZE, + QDMA_REGF_INTR_VALID, + QDMA_REGF_INTR_COLOR, + QDMA_REGF_INTR_FUNCTION_ID, + QDMA_REGF_ERR_INT_FUNC, + QDMA_REGF_ERR_INT_VEC, + QDMA_REGF_ERR_INT_ARM, + QDMA_REGF_MAX +}; + +enum qdma_regs { + QDMA_REGO_CTXT_DATA, + QDMA_REGO_CTXT_CMD, + QDMA_REGO_CTXT_MASK, + QDMA_REGO_MM_H2C_CTRL, + QDMA_REGO_MM_C2H_CTRL, + QDMA_REGO_QUEUE_COUNT, + QDMA_REGO_RING_SIZE, + QDMA_REGO_H2C_PIDX, + QDMA_REGO_C2H_PIDX, + QDMA_REGO_INTR_CIDX, + QDMA_REGO_FUNC_ID, + QDMA_REGO_ERR_INT, + QDMA_REGO_ERR_STAT, + QDMA_REGO_MAX +}; + +struct qdma_reg_field { + u16 lsb; /* Least significant bit of field */ + u16 msb; /* Most significant bit of field */ +}; + +struct qdma_reg { + u32 off; + u32 count; +}; + +#define QDMA_REGF(_msb, _lsb) { \ + .lsb = (_lsb), \ + .msb = (_msb), \ +} + +#define QDMA_REGO(_off, _count) { \ + .off = (_off), \ + .count = (_count), \ +} + +enum qdma_desc_size { + QDMA_DESC_SIZE_8B, + QDMA_DESC_SIZE_16B, + QDMA_DESC_SIZE_32B, + QDMA_DESC_SIZE_64B, +}; + +enum qdma_queue_op_mode { + QDMA_QUEUE_OP_STREAM, + QDMA_QUEUE_OP_MM, +}; + +enum qdma_ctxt_type { + QDMA_CTXT_DESC_SW_C2H, + QDMA_CTXT_DESC_SW_H2C, + QDMA_CTXT_DESC_HW_C2H, + QDMA_CTXT_DESC_HW_H2C, + QDMA_CTXT_DESC_CR_C2H, + QDMA_CTXT_DESC_CR_H2C, + QDMA_CTXT_WRB, + QDMA_CTXT_PFTCH, + QDMA_CTXT_INTR_COAL, + QDMA_CTXT_RSVD, + QDMA_CTXT_HOST_PROFILE, + QDMA_CTXT_TIMER, + QDMA_CTXT_FMAP, + QDMA_CTXT_FNC_STS, +}; + +enum qdma_ctxt_cmd { + QDMA_CTXT_CLEAR, + QDMA_CTXT_WRITE, + QDMA_CTXT_READ, + QDMA_CTXT_INVALIDATE, + QDMA_CTXT_MAX +}; + +struct qdma_ctxt_sw_desc { + u64 desc_base; + u16 vec; +}; + +struct qdma_ctxt_intr { + u64 agg_base; + u16 vec; + u32 size; + bool valid; + bool color; +}; + +struct qdma_ctxt_fmap { + u16 qbase; + u16 qmax; +}; + +struct qdma_device; + +struct qdma_mm_desc { + __le64 src_addr; + __le32 len; + __le32 reserved1; + __le64 dst_addr; + __le64 reserved2; +} __packed; + +struct qdma_mm_vdesc { + struct virt_dma_desc vdesc; + struct qdma_queue *queue; + struct scatterlist *sgl; + u64 sg_off; + u32 sg_len; + u64 dev_addr; + u32 pidx; + u32 pending_descs; + struct dma_slave_config cfg; +}; + +#define QDMA_VDESC_QUEUED(vdesc) (!(vdesc)->sg_len) + +struct qdma_queue { + struct qdma_device *qdev; + struct virt_dma_chan vchan; + enum dma_transfer_direction dir; + struct dma_slave_config cfg; + struct qdma_mm_desc *desc_base; + struct qdma_mm_vdesc *submitted_vdesc; + struct qdma_mm_vdesc *issued_vdesc; + dma_addr_t dma_desc_base; + u32 pidx_reg; + u32 cidx_reg; + u32 ring_size; + u32 idx_mask; + u16 qid; + u32 pidx; + u32 cidx; +}; + +struct qdma_intr_ring { + struct qdma_device *qdev; + __le64 *base; + dma_addr_t dev_base; + char msix_name[QDMA_INTR_NAME_MAX_LEN]; + u32 msix_vector; + u16 msix_id; + u32 ring_size; + u16 ridx; + u16 cidx; + u8 color; +}; + +#define QDMA_INTR_MASK_PIDX GENMASK_ULL(15, 0) +#define QDMA_INTR_MASK_CIDX GENMASK_ULL(31, 16) +#define QDMA_INTR_MASK_DESC_COLOR GENMASK_ULL(32, 32) +#define QDMA_INTR_MASK_STATE GENMASK_ULL(34, 33) +#define QDMA_INTR_MASK_ERROR GENMASK_ULL(36, 35) +#define QDMA_INTR_MASK_TYPE GENMASK_ULL(38, 38) +#define QDMA_INTR_MASK_QID GENMASK_ULL(62, 39) +#define QDMA_INTR_MASK_COLOR GENMASK_ULL(63, 63) + +struct qdma_device { + struct platform_device *pdev; + struct dma_device dma_dev; + struct regmap *regmap; + struct mutex ctxt_lock; /* protect ctxt registers */ + const struct qdma_reg_field *rfields; + const struct qdma_reg *roffs; + struct qdma_queue *h2c_queues; + struct qdma_queue *c2h_queues; + struct qdma_intr_ring *qintr_rings; + u32 qintr_ring_num; + u32 qintr_ring_idx; + u32 chan_num; + u32 queue_irq_start; + u32 queue_irq_num; + u32 err_irq_idx; + u32 fid; +}; + +extern const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX]; +extern const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX]; + +#endif /* __QDMA_H */ diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 40052d1bd0b5c1..baebddc740b0d4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -339,7 +339,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) * @regs: memory mapped register base * @clk: dma controller clock * @save_imr: interrupt mask register that is saved on suspend/resume cycle - * @all_chan_mask: all channels availlable in a mask + * @all_chan_mask: all channels available in a mask * @lli_pool: hw lli table * @memset_pool: hw memset pool * @chan: channels table to store at_dma_chan structures @@ -668,7 +668,7 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) * CTRLA is read in turn, next the DSCR is read a second time. If the two * consecutive read values of the DSCR are the same then we assume both refers * to the very same LLI as well as the CTRLA value read inbetween does. For - * cyclic tranfers, the assumption is that a full loop is "not so fast". If the + * cyclic transfers, the assumption is that a full loop is "not so fast". If the * two DSCR values are different, we read again the CTRLA then the DSCR till two * consecutive read values from DSCR are equal or till the maximum trials is * reach. This algorithm is very unlikely not to find a stable value for DSCR. @@ -700,7 +700,7 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan, break; /* - * DSCR has changed inside the DMA controller, so the previouly + * DSCR has changed inside the DMA controller, so the previously * read value of CTRLA may refer to an already processed * descriptor hence could be outdated. We need to update ctrla * to match the current descriptor. diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index fbaacb4c19b2bb..cfa6e1167a1fd1 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -15,7 +15,7 @@ * number of hardware rings over one or more SBA hardware devices. By * design, the internal buffer size of SBA hardware device is limited * but all offload operations supported by SBA can be broken down into - * multiple small size requests and executed parallely on multiple SBA + * multiple small size requests and executed parallelly on multiple SBA * hardware devices for achieving high through-put. * * The Broadcom SBA RAID driver does not require any register programming @@ -135,7 +135,7 @@ struct sba_device { u32 max_xor_srcs; u32 max_resp_pool_size; u32 max_cmds_pool_size; - /* Maibox client and Mailbox channels */ + /* Mailbox client and Mailbox channels */ struct mbox_client client; struct mbox_chan *mchan; struct device *mbox_dev; diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 9d74fe97452e08..e1b92b4d7b056e 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -369,7 +369,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( /* the last frame requires extra flags */ d->cb_list[d->frames - 1].cb->info |= finalextrainfo; - /* detect a size missmatch */ + /* detect a size mismatch */ if (buf_len && (d->size != buf_len)) goto error_cb; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c380a4dda77a04..c1357d7f3dc6ca 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1070,7 +1070,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, if (!name) dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id); else - dev_set_name(&chan->dev->device, name); + dev_set_name(&chan->dev->device, "%s", name); rc = device_register(&chan->dev->device); if (rc) goto err_out_ida; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 1f201a542b3740..91b2fbc0b86471 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -500,7 +500,7 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val) per_sec *= val; per_sec = INT_TO_FIXPT(per_sec); - do_div(per_sec, runtime); + do_div(per_sec, (u32)runtime); return per_sec; } diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index d6c60635e90db9..995427afe0773d 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -17,14 +17,15 @@ #include #include #include +#include #include #include #include +#include +#include #include #include -#include - #include "dmaengine.h" /* M2P registers */ @@ -104,6 +105,31 @@ #define DMA_MAX_CHAN_BYTES 0xffff #define DMA_MAX_CHAN_DESCRIPTORS 32 +/* + * M2P channels. + * + * Note that these values are also directly used for setting the PPALLOC + * register. + */ +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 + +enum ep93xx_dma_type { + M2P_DMA, + M2M_DMA, +}; + struct ep93xx_dma_engine; static int ep93xx_dma_slave_config_write(struct dma_chan *chan, enum dma_transfer_direction dir, @@ -129,11 +155,17 @@ struct ep93xx_dma_desc { struct list_head node; }; +struct ep93xx_dma_chan_cfg { + u8 port; + enum dma_transfer_direction dir; +}; + /** * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel * @chan: dmaengine API channel * @edma: pointer to the engine device * @regs: memory mapped registers + * @dma_cfg: channel number, direction * @irq: interrupt number of the channel * @clk: clock used by this channel * @tasklet: channel specific tasklet used for callbacks @@ -157,14 +189,12 @@ struct ep93xx_dma_desc { * descriptor in the chain. When a descriptor is moved to the @active queue, * the first and chained descriptors are flattened into a single list. * - * @chan.private holds pointer to &struct ep93xx_dma_data which contains - * necessary channel configuration information. For memcpy channels this must - * be %NULL. */ struct ep93xx_dma_chan { struct dma_chan chan; const struct ep93xx_dma_engine *edma; void __iomem *regs; + struct ep93xx_dma_chan_cfg dma_cfg; int irq; struct clk *clk; struct tasklet_struct tasklet; @@ -216,6 +246,11 @@ struct ep93xx_dma_engine { struct ep93xx_dma_chan channels[] __counted_by(num_channels); }; +struct ep93xx_edma_data { + u32 id; + size_t num_channels; +}; + static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) { return &edmac->chan.dev->device; @@ -226,6 +261,31 @@ static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) return container_of(chan, struct ep93xx_dma_chan, chan); } +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p")) + return true; + + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} + +/* + * ep93xx_dma_chan_direction - returns direction the channel can be used + * + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. + */ +static inline enum dma_transfer_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_TRANS_NONE; + + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; +} + /** * ep93xx_dma_set_active - set new active descriptor chain * @edmac: channel @@ -318,10 +378,9 @@ static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) { - struct ep93xx_dma_data *data = edmac->chan.private; u32 control; - writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); + writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC); control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE | M2P_CONTROL_ENABLE; @@ -458,16 +517,15 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) { - const struct ep93xx_dma_data *data = edmac->chan.private; u32 control = 0; - if (!data) { + if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { /* This is memcpy channel, nothing to configure */ writel(control, edmac->regs + M2M_CONTROL); return 0; } - switch (data->port) { + switch (edmac->dma_cfg.port) { case EP93XX_DMA_SSP: /* * This was found via experimenting - anything less than 5 @@ -477,7 +535,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) control = (5 << M2M_CONTROL_PWSC_SHIFT); control |= M2M_CONTROL_NO_HDSK; - if (data->direction == DMA_MEM_TO_DEV) { + if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { control |= M2M_CONTROL_DAH; control |= M2M_CONTROL_TM_TX; control |= M2M_CONTROL_RSS_SSPTX; @@ -493,7 +551,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) * This IDE part is totally untested. Values below are taken * from the EP93xx Users's Guide and might not be correct. */ - if (data->direction == DMA_MEM_TO_DEV) { + if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { /* Worst case from the UG */ control = (3 << M2M_CONTROL_PWSC_SHIFT); control |= M2M_CONTROL_DAH; @@ -548,7 +606,6 @@ static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) { - struct ep93xx_dma_data *data = edmac->chan.private; u32 control = readl(edmac->regs + M2M_CONTROL); /* @@ -574,7 +631,7 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) control |= M2M_CONTROL_ENABLE; writel(control, edmac->regs + M2M_CONTROL); - if (!data) { + if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { /* * For memcpy channels the software trigger must be asserted * in order to start the memcpy operation. @@ -636,7 +693,7 @@ static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) */ if (ep93xx_dma_advance_active(edmac)) { m2m_fill_desc(edmac); - if (done && !edmac->chan.private) { + if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { /* Software trigger for memcpy channel */ control = readl(edmac->regs + M2M_CONTROL); control |= M2M_CONTROL_START; @@ -841,7 +898,7 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) desc = container_of(tx, struct ep93xx_dma_desc, txd); /* - * If nothing is currently prosessed, we push this descriptor + * If nothing is currently processed, we push this descriptor * directly to the hardware. Otherwise we put the descriptor * to the pending queue. */ @@ -867,25 +924,22 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) { struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); - struct ep93xx_dma_data *data = chan->private; const char *name = dma_chan_name(chan); int ret, i; /* Sanity check the channel parameters */ if (!edmac->edma->m2m) { - if (!data) - return -EINVAL; - if (data->port < EP93XX_DMA_I2S1 || - data->port > EP93XX_DMA_IRDA) + if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 || + edmac->dma_cfg.port > EP93XX_DMA_IRDA) return -EINVAL; - if (data->direction != ep93xx_dma_chan_direction(chan)) + if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan)) return -EINVAL; } else { - if (data) { - switch (data->port) { + if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) { + switch (edmac->dma_cfg.port) { case EP93XX_DMA_SSP: case EP93XX_DMA_IDE: - if (!is_slave_direction(data->direction)) + if (!is_slave_direction(edmac->dma_cfg.dir)) return -EINVAL; break; default: @@ -894,9 +948,6 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) } } - if (data && data->name) - name = data->name; - ret = clk_prepare_enable(edmac->clk); if (ret) return ret; @@ -1025,7 +1076,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, * @chan: channel * @sgl: list of buffers to transfer * @sg_len: number of entries in @sgl - * @dir: direction of tha DMA transfer + * @dir: direction of the DMA transfer * @flags: flags for the descriptor * @context: operation context (ignored) * @@ -1315,36 +1366,53 @@ static void ep93xx_dma_issue_pending(struct dma_chan *chan) ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); } -static int __init ep93xx_dma_probe(struct platform_device *pdev) +static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev) { - struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); + const struct ep93xx_edma_data *data; + struct device *dev = &pdev->dev; struct ep93xx_dma_engine *edma; struct dma_device *dma_dev; - int ret, i; + char dma_clk_name[5]; + int i; - edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL); + data = device_get_match_data(dev); + if (!data) + return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n")); + + edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels), + GFP_KERNEL); if (!edma) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + edma->m2m = data->id; + edma->num_channels = data->num_channels; dma_dev = &edma->dma_dev; - edma->m2m = platform_get_device_id(pdev)->driver_data; - edma->num_channels = pdata->num_channels; INIT_LIST_HEAD(&dma_dev->channels); - for (i = 0; i < pdata->num_channels; i++) { - const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; + for (i = 0; i < edma->num_channels; i++) { struct ep93xx_dma_chan *edmac = &edma->channels[i]; edmac->chan.device = dma_dev; - edmac->regs = cdata->base; - edmac->irq = cdata->irq; + edmac->regs = devm_platform_ioremap_resource(pdev, i); + if (IS_ERR(edmac->regs)) + return edmac->regs; + + edmac->irq = fwnode_irq_get(dev_fwnode(dev), i); + if (edmac->irq < 0) + return ERR_PTR(edmac->irq); + edmac->edma = edma; - edmac->clk = clk_get(NULL, cdata->name); + if (edma->m2m) + snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i); + else + snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i); + + edmac->clk = devm_clk_get(dev, dma_clk_name); if (IS_ERR(edmac->clk)) { - dev_warn(&pdev->dev, "failed to get clock for %s\n", - cdata->name); - continue; + dev_err_probe(dev, PTR_ERR(edmac->clk), + "no %s clock found\n", dma_clk_name); + return ERR_CAST(edmac->clk); } spin_lock_init(&edmac->lock); @@ -1357,6 +1425,90 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) &dma_dev->channels); } + return edma; +} + +static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param) +{ + struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_chan_cfg *cfg = filter_param; + + if (cfg->dir != ep93xx_dma_chan_direction(chan)) + return false; + + echan->dma_cfg = *cfg; + return true; +} + +static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct ep93xx_dma_engine *edma = ofdma->of_dma_data; + dma_cap_mask_t mask = edma->dma_dev.cap_mask; + struct ep93xx_dma_chan_cfg dma_cfg; + u8 port = dma_spec->args[0]; + u8 direction = dma_spec->args[1]; + + if (port > EP93XX_DMA_IRDA) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + dma_cfg.port = port; + dma_cfg.dir = direction; + + return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node); +} + +static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param) +{ + struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_chan_cfg *cfg = filter_param; + + echan->dma_cfg = *cfg; + + return true; +} + +static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct ep93xx_dma_engine *edma = ofdma->of_dma_data; + dma_cap_mask_t mask = edma->dma_dev.cap_mask; + struct ep93xx_dma_chan_cfg dma_cfg; + u8 port = dma_spec->args[0]; + u8 direction = dma_spec->args[1]; + + if (!is_slave_direction(direction)) + return NULL; + + switch (port) { + case EP93XX_DMA_SSP: + case EP93XX_DMA_IDE: + break; + default: + return NULL; + } + + dma_cfg.port = port; + dma_cfg.dir = direction; + + return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node); +} + +static int ep93xx_dma_probe(struct platform_device *pdev) +{ + struct ep93xx_dma_engine *edma; + struct dma_device *dma_dev; + int ret; + + edma = ep93xx_dma_of_probe(pdev); + if (IS_ERR(edma)) + return PTR_ERR(edma); + + dma_dev = &edma->dma_dev; + dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); @@ -1393,21 +1545,46 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) } ret = dma_async_device_register(dma_dev); - if (unlikely(ret)) { - for (i = 0; i < edma->num_channels; i++) { - struct ep93xx_dma_chan *edmac = &edma->channels[i]; - if (!IS_ERR_OR_NULL(edmac->clk)) - clk_put(edmac->clk); - } - kfree(edma); + if (ret) + return ret; + + if (edma->m2m) { + ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate, + edma); } else { - dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", - edma->m2m ? "M" : "P"); + ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate, + edma); } + if (ret) + goto err_dma_unregister; + + dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P"); + + return 0; + +err_dma_unregister: + dma_async_device_unregister(dma_dev); return ret; } +static const struct ep93xx_edma_data edma_m2p = { + .id = M2P_DMA, + .num_channels = 10, +}; + +static const struct ep93xx_edma_data edma_m2m = { + .id = M2M_DMA, + .num_channels = 2, +}; + +static const struct of_device_id ep93xx_dma_of_ids[] = { + { .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p }, + { .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids); + static const struct platform_device_id ep93xx_dma_driver_ids[] = { { "ep93xx-dma-m2p", 0 }, { "ep93xx-dma-m2m", 1 }, @@ -1417,15 +1594,13 @@ static const struct platform_device_id ep93xx_dma_driver_ids[] = { static struct platform_driver ep93xx_dma_driver = { .driver = { .name = "ep93xx-dma", + .of_match_table = ep93xx_dma_of_ids, }, .id_table = ep93xx_dma_driver_ids, + .probe = ep93xx_dma_probe, }; -static int __init ep93xx_dma_module_init(void) -{ - return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); -} -subsys_initcall(ep93xx_dma_module_init); +module_platform_driver(ep93xx_dma_driver); MODULE_AUTHOR("Mika Westerberg "); MODULE_DESCRIPTION("EP93xx DMA driver"); diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h index 2c80077cb7c0a7..36c284a3d18423 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h @@ -12,8 +12,8 @@ struct dpaa2_qdma_sd_d { u32 rsv:32; union { struct { - u32 ssd:12; /* souce stride distance */ - u32 sss:12; /* souce stride size */ + u32 ssd:12; /* source stride distance */ + u32 sss:12; /* source stride size */ u32 rsv1:8; } sdf; struct { @@ -48,7 +48,7 @@ struct dpaa2_qdma_sd_d { #define QDMA_SER_DISABLE (8) /* no notification */ #define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */ #define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ -#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ +#define QDMA_SER_BOTH (3 << 8) /* source and dest notification */ #define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */ #define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */ diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index c66185c5a19925..f9f1eda792546e 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -100,6 +100,22 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id) return fsl_edma_err_handler(irq, dev_id); } +static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid) +{ + struct fsl_edma_chan *fsl_chan; + int i; + + for (i = 0; i < fsl_edma->n_chans; i++) { + fsl_chan = &fsl_edma->chans[i]; + + if (fsl_chan->srcid && srcid == fsl_chan->srcid) { + dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!"); + return true; + } + } + return false; +} + static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -117,6 +133,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { if (chan->client_count) continue; + + if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1])) + return NULL; + if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { chan = dma_get_slave_channel(chan); if (chan) { @@ -153,7 +173,7 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec, b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX); - mutex_lock(&fsl_edma->fsl_edma_mutex); + guard(mutex)(&fsl_edma->fsl_edma_mutex); list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { @@ -161,6 +181,8 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec, continue; fsl_chan = to_fsl_edma_chan(chan); + if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0])) + return NULL; i = fsl_chan - fsl_edma->chans; fsl_chan->priority = dma_spec->args[1]; @@ -177,18 +199,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec, if (!b_chmux && i == dma_spec->args[0]) { chan = dma_get_slave_channel(chan); chan->device->privatecnt++; - mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; } else if (b_chmux && !fsl_chan->srcid) { /* if controller support channel mux, choose a free channel */ chan = dma_get_slave_channel(chan); chan->device->privatecnt++; fsl_chan->srcid = dma_spec->args[0]; - mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; } } - mutex_unlock(&fsl_edma->fsl_edma_mutex); return NULL; } diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index 4c47bff8106401..25a4134be36b7b 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -677,7 +677,7 @@ static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) writel_relaxed(tmp, addr); /* - * 0 - dma should process FLR whith CPU. + * 0 - dma should process FLR with CPU. * 1 - dma not process FLR, only cpu process FLR. */ addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE + diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index e3505e56784b1a..3c648308a54a38 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -290,7 +290,7 @@ static void idma64_desc_fill(struct idma64_chan *idma64c, desc->length += hw->len; } while (i); - /* Trigger an interrupt after the last block is transfered */ + /* Trigger an interrupt after the last block is transferred */ lli->ctllo |= IDMA64C_CTLL_INT_EN; /* Disable LLP transfer in the last block */ @@ -364,7 +364,7 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) if (!i) return bytes; - /* The current chunk is not fully transfered yet */ + /* The current chunk is not fully transferred yet */ bytes += desc->hw[--i].len; return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); @@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.dev = chip->sysdev; - ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); - if (ret) - return ret; + dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); ret = dma_async_device_register(&idma64->dma); if (ret) diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 868b724a3b75bb..d84e21daa99122 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -124,7 +124,6 @@ struct idxd_pmu { struct pmu pmu; char name[IDXD_NAME_SIZE]; - int cpu; int n_counters; int counter_width; @@ -135,8 +134,6 @@ struct idxd_pmu { unsigned long supported_filters; int n_filters; - - struct hlist_node cpuhp_node; }; #define IDXD_MAX_PRIORITY 0xf @@ -803,14 +800,10 @@ void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index); int perfmon_pmu_init(struct idxd_device *idxd); void perfmon_pmu_remove(struct idxd_device *idxd); void perfmon_counter_overflow(struct idxd_device *idxd); -void perfmon_init(void); -void perfmon_exit(void); #else static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} -static inline void perfmon_init(void) {} -static inline void perfmon_exit(void) {} #endif /* debugfs */ diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 21f6905b554d84..234c1c658ec792 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -69,9 +69,15 @@ static struct idxd_driver_data idxd_driver_data[] = { static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on GNR-D platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) }, + /* DSA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) }, /* IAX ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA on DMR platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); @@ -878,8 +884,6 @@ static int __init idxd_init_module(void) else support_enqcmd = true; - perfmon_init(); - err = idxd_driver_register(&idxd_drv); if (err < 0) goto err_idxd_driver_register; @@ -928,7 +932,6 @@ static void __exit idxd_exit_module(void) idxd_driver_unregister(&idxd_drv); pci_unregister_driver(&idxd_pci_driver); idxd_cdev_remove(); - perfmon_exit(); idxd_remove_debugfs(); } module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c index 5e94247e1ea703..4b6af2f15d8ab9 100644 --- a/drivers/dma/idxd/perfmon.c +++ b/drivers/dma/idxd/perfmon.c @@ -6,29 +6,6 @@ #include "idxd.h" #include "perfmon.h" -static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, - char *buf); - -static cpumask_t perfmon_dsa_cpu_mask; -static bool cpuhp_set_up; -static enum cpuhp_state cpuhp_slot; - -/* - * perf userspace reads this attribute to determine which cpus to open - * counters on. It's connected to perfmon_dsa_cpu_mask, which is - * maintained by the cpu hotplug handlers. - */ -static DEVICE_ATTR_RO(cpumask); - -static struct attribute *perfmon_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static struct attribute_group cpumask_attr_group = { - .attrs = perfmon_cpumask_attrs, -}; - /* * These attributes specify the bits in the config word that the perf * syscall uses to pass the event ids and categories to perfmon. @@ -67,16 +44,9 @@ static struct attribute_group perfmon_format_attr_group = { static const struct attribute_group *perfmon_attr_groups[] = { &perfmon_format_attr_group, - &cpumask_attr_group, NULL, }; -static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask); -} - static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) { return &idxd_pmu->pmu == event->pmu; @@ -217,7 +187,6 @@ static int perfmon_pmu_event_init(struct perf_event *event) return -EINVAL; event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); - event->cpu = idxd->idxd_pmu->cpu; event->hw.config = event->attr.config; if (event->group_leader != event) @@ -480,14 +449,15 @@ static void idxd_pmu_init(struct idxd_pmu *idxd_pmu) idxd_pmu->pmu.attr_groups = perfmon_attr_groups; idxd_pmu->pmu.task_ctx_nr = perf_invalid_context; idxd_pmu->pmu.event_init = perfmon_pmu_event_init; - idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable, - idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable, + idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable; + idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable; idxd_pmu->pmu.add = perfmon_pmu_event_add; idxd_pmu->pmu.del = perfmon_pmu_event_del; idxd_pmu->pmu.start = perfmon_pmu_event_start; idxd_pmu->pmu.stop = perfmon_pmu_event_stop; idxd_pmu->pmu.read = perfmon_pmu_event_update; idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + idxd_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE; idxd_pmu->pmu.module = THIS_MODULE; } @@ -496,59 +466,17 @@ void perfmon_pmu_remove(struct idxd_device *idxd) if (!idxd->idxd_pmu) return; - cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node); perf_pmu_unregister(&idxd->idxd_pmu->pmu); kfree(idxd->idxd_pmu); idxd->idxd_pmu = NULL; } -static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct idxd_pmu *idxd_pmu; - - idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); - - /* select the first online CPU as the designated reader */ - if (cpumask_empty(&perfmon_dsa_cpu_mask)) { - cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask); - idxd_pmu->cpu = cpu; - } - - return 0; -} - -static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node) -{ - struct idxd_pmu *idxd_pmu; - unsigned int target; - - idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); - - if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask)) - return 0; - - target = cpumask_any_but(cpu_online_mask, cpu); - /* migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &perfmon_dsa_cpu_mask); - perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target); - } - - return 0; -} - int perfmon_pmu_init(struct idxd_device *idxd) { union idxd_perfcap perfcap; struct idxd_pmu *idxd_pmu; int rc = -ENODEV; - /* - * perfmon module initialization failed, nothing to do - */ - if (!cpuhp_set_up) - return -ENODEV; - /* * If perfmon_offset or num_counters is 0, it means perfmon is * not supported on this hardware. @@ -624,11 +552,6 @@ int perfmon_pmu_init(struct idxd_device *idxd) if (rc) goto free; - rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node); - if (rc) { - perf_pmu_unregister(&idxd->idxd_pmu->pmu); - goto free; - } out: return rc; free: @@ -637,22 +560,3 @@ int perfmon_pmu_init(struct idxd_device *idxd) goto out; } - -void __init perfmon_init(void) -{ - int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "driver/dma/idxd/perf:online", - perf_event_cpu_online, - perf_event_cpu_offline); - if (WARN_ON(rc < 0)) - return; - - cpuhp_slot = rc; - cpuhp_set_up = true; -} - -void __exit perfmon_exit(void) -{ - if (cpuhp_set_up) - cpuhp_remove_multi_state(cpuhp_slot); -} diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 817a564413b08e..94eca25ae9b909 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, * completing the descriptor will return desc to allocator and * the desc can be acquired by a different process and the * desc->list can be modified. Delete desc from list so the - * list trasversing does not get corrupted by the other process. + * list traversing does not get corrupted by the other process. */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index ebf7c115d55344..e913f0db99dadf 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -167,7 +167,6 @@ struct imxdma_channel { enum imx_dma_type { IMX1_DMA, - IMX21_DMA, IMX27_DMA, }; @@ -194,8 +193,6 @@ struct imxdma_filter_data { static const struct of_device_id imx_dma_of_dev_id[] = { { .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA, - }, { - .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA, }, { .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA, }, { diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 7b502b60b38beb..cc9ddd6c325bac 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -905,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) op = IOAT_OP_XOR_VAL; - /* validate the sources with the destintation page */ + /* validate the sources with the destination page */ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) xor_val_srcs[i] = xor_srcs[i]; xor_val_srcs[i] = dest; diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c index 4117c7b67e9c2e..8173c3f1075a62 100644 --- a/drivers/dma/lgm/lgm-dma.c +++ b/drivers/dma/lgm/lgm-dma.c @@ -107,7 +107,7 @@ * If header mode is set in DMA descriptor, * If bit 30 is disabled, HDR_LEN must be configured according to channel * requirement. - * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to + * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to * be configured. It will enable check sum for switch * If header mode is not set in DMA descriptor, * This register setting doesn't matter diff --git a/drivers/dma/loongson1-apb-dma.c b/drivers/dma/loongson1-apb-dma.c new file mode 100644 index 00000000000000..255fe7eca212a9 --- /dev/null +++ b/drivers/dma/loongson1-apb-dma.c @@ -0,0 +1,660 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Loongson-1 APB DMA Controller + * + * Copyright (C) 2015-2024 Keguang Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dmaengine.h" +#include "virt-dma.h" + +/* Loongson-1 DMA Control Register */ +#define LS1X_DMA_CTRL 0x0 + +/* DMA Control Register Bits */ +#define LS1X_DMA_STOP BIT(4) +#define LS1X_DMA_START BIT(3) +#define LS1X_DMA_ASK_VALID BIT(2) + +/* DMA Next Field Bits */ +#define LS1X_DMA_NEXT_VALID BIT(0) + +/* DMA Command Field Bits */ +#define LS1X_DMA_RAM2DEV BIT(12) +#define LS1X_DMA_INT BIT(1) +#define LS1X_DMA_INT_MASK BIT(0) + +#define LS1X_DMA_LLI_ALIGNMENT 64 +#define LS1X_DMA_LLI_ADDR_MASK GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT)) +#define LS1X_DMA_MAX_CHANNELS 3 + +enum ls1x_dmadesc_offsets { + LS1X_DMADESC_NEXT = 0, + LS1X_DMADESC_SADDR, + LS1X_DMADESC_DADDR, + LS1X_DMADESC_LENGTH, + LS1X_DMADESC_STRIDE, + LS1X_DMADESC_CYCLES, + LS1X_DMADESC_CMD, + LS1X_DMADESC_SIZE +}; + +struct ls1x_dma_lli { + unsigned int hw[LS1X_DMADESC_SIZE]; + dma_addr_t phys; + struct list_head node; +} __aligned(LS1X_DMA_LLI_ALIGNMENT); + +struct ls1x_dma_desc { + struct virt_dma_desc vd; + struct list_head lli_list; +}; + +struct ls1x_dma_chan { + struct virt_dma_chan vc; + struct dma_pool *lli_pool; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + unsigned int bus_width; + void __iomem *reg_base; + int irq; + bool is_cyclic; + struct ls1x_dma_lli *curr_lli; +}; + +struct ls1x_dma { + struct dma_device ddev; + unsigned int nr_chans; + struct ls1x_dma_chan chan[]; +}; + +static irqreturn_t ls1x_dma_irq_handler(int irq, void *data); + +#define to_ls1x_dma_chan(dchan) \ + container_of(dchan, struct ls1x_dma_chan, vc.chan) + +#define to_ls1x_dma_desc(d) \ + container_of(d, struct ls1x_dma_desc, vd) + +static inline struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline int ls1x_dma_query(struct ls1x_dma_chan *chan, + dma_addr_t *lli_phys) +{ + struct dma_chan *dchan = &chan->vc.chan; + int val, ret; + + val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK; + val |= LS1X_DMA_ASK_VALID; + val |= dchan->chan_id; + writel(val, chan->reg_base + LS1X_DMA_CTRL); + ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val, + !(val & LS1X_DMA_ASK_VALID), 0, 3000); + if (ret) + dev_err(chan2dev(dchan), "failed to query DMA\n"); + + return ret; +} + +static inline int ls1x_dma_start(struct ls1x_dma_chan *chan, + dma_addr_t *lli_phys) +{ + struct dma_chan *dchan = &chan->vc.chan; + struct device *dev = chan2dev(dchan); + int val, ret; + + val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK; + val |= LS1X_DMA_START; + val |= dchan->chan_id; + writel(val, chan->reg_base + LS1X_DMA_CTRL); + ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val, + !(val & LS1X_DMA_START), 0, 1000); + if (!ret) + dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys); + else + dev_err(dev, "failed to start DMA\n"); + + return ret; +} + +static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan) +{ + int val = readl(chan->reg_base + LS1X_DMA_CTRL); + + writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL); +} + +static void ls1x_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + struct device *dev = chan2dev(dchan); + + dma_free_coherent(dev, sizeof(struct ls1x_dma_lli), + chan->curr_lli, chan->curr_lli->phys); + dma_pool_destroy(chan->lli_pool); + chan->lli_pool = NULL; + devm_free_irq(dev, chan->irq, chan); + vchan_free_chan_resources(&chan->vc); +} + +static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + struct device *dev = chan2dev(dchan); + dma_addr_t phys; + int ret; + + ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler, + IRQF_SHARED, dma_chan_name(dchan), chan); + if (ret) { + dev_err(dev, "failed to request IRQ %d\n", chan->irq); + return ret; + } + + chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev, + sizeof(struct ls1x_dma_lli), + __alignof__(struct ls1x_dma_lli), 0); + if (!chan->lli_pool) + return -ENOMEM; + + /* allocate memory for querying the current lli */ + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli), + &phys, GFP_KERNEL); + if (!chan->curr_lli) { + dma_pool_destroy(chan->lli_pool); + return -ENOMEM; + } + chan->curr_lli->phys = phys; + + return 0; +} + +static void ls1x_dma_free_desc(struct virt_dma_desc *vd) +{ + struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd); + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan); + struct ls1x_dma_lli *lli, *_lli; + + list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) { + list_del(&lli->node); + dma_pool_free(chan->lli_pool, lli, lli->phys); + } + + kfree(desc); +} + +static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void) +{ + struct ls1x_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + INIT_LIST_HEAD(&desc->lli_list); + + return desc; +} + +static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc, + struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, bool is_cyclic) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL; + struct device *dev = chan2dev(dchan); + struct list_head *pos = NULL; + struct scatterlist *sg; + unsigned int dev_addr, cmd, i; + + switch (dir) { + case DMA_MEM_TO_DEV: + dev_addr = chan->dst_addr; + chan->bus_width = chan->dst_addr_width; + cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT; + break; + case DMA_DEV_TO_MEM: + dev_addr = chan->src_addr; + chan->bus_width = chan->src_addr_width; + cmd = LS1X_DMA_INT; + break; + default: + dev_err(dev, "unsupported DMA direction: %s\n", + dmaengine_get_direction_text(dir)); + return -EINVAL; + } + + for_each_sg(sgl, sg, sg_len, i) { + dma_addr_t buf_addr = sg_dma_address(sg); + size_t buf_len = sg_dma_len(sg); + dma_addr_t phys; + + if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) { + dev_err(dev, "buffer is not aligned\n"); + return -EINVAL; + } + + /* allocate HW descriptors */ + lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys); + if (!lli) { + dev_err(dev, "failed to alloc lli %u\n", i); + return -ENOMEM; + } + + /* setup HW descriptors */ + lli->phys = phys; + lli->hw[LS1X_DMADESC_SADDR] = buf_addr; + lli->hw[LS1X_DMADESC_DADDR] = dev_addr; + lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width; + lli->hw[LS1X_DMADESC_STRIDE] = 0; + lli->hw[LS1X_DMADESC_CYCLES] = 1; + lli->hw[LS1X_DMADESC_CMD] = cmd; + + if (prev) + prev->hw[LS1X_DMADESC_NEXT] = + lli->phys | LS1X_DMA_NEXT_VALID; + prev = lli; + + if (!first) + first = lli; + + list_add_tail(&lli->node, &desc->lli_list); + } + + if (is_cyclic) { + lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID; + chan->is_cyclic = is_cyclic; + } + + list_for_each(pos, &desc->lli_list) { + lli = list_entry(pos, struct ls1x_dma_lli, node); + print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4, + lli, sizeof(*lli), false); + } + + return 0; +} + +static struct dma_async_tx_descriptor * +ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct ls1x_dma_desc *desc; + + dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n", + sg_len, flags, dmaengine_get_direction_text(dir)); + + desc = ls1x_dma_alloc_desc(); + if (!desc) + return NULL; + + if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) { + ls1x_dma_free_desc(&desc->vd); + return NULL; + } + + return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct ls1x_dma_desc *desc; + struct scatterlist *sgl; + unsigned int sg_len; + unsigned int i; + int ret; + + dev_dbg(chan2dev(dchan), + "buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n", + buf_len, period_len, flags, dmaengine_get_direction_text(dir)); + + desc = ls1x_dma_alloc_desc(); + if (!desc) + return NULL; + + /* allocate the scatterlist */ + sg_len = buf_len / period_len; + sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT); + if (!sgl) + return NULL; + + sg_init_table(sgl, sg_len); + for (i = 0; i < sg_len; ++i) { + sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)), + period_len, offset_in_page(buf_addr)); + sg_dma_address(&sgl[i]) = buf_addr; + sg_dma_len(&sgl[i]) = period_len; + buf_addr += period_len; + } + + ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true); + kfree(sgl); + if (ret) { + ls1x_dma_free_desc(&desc->vd); + return NULL; + } + + return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags); +} + +static int ls1x_dma_slave_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + + chan->src_addr = config->src_addr; + chan->src_addr_width = config->src_addr_width; + chan->dst_addr = config->dst_addr; + chan->dst_addr_width = config->dst_addr_width; + + return 0; +} + +static int ls1x_dma_pause(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + int ret; + + guard(spinlock_irqsave)(&chan->vc.lock); + /* save the current lli */ + ret = ls1x_dma_query(chan, &chan->curr_lli->phys); + if (!ret) + ls1x_dma_stop(chan); + + return ret; +} + +static int ls1x_dma_resume(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + + guard(spinlock_irqsave)(&chan->vc.lock); + + return ls1x_dma_start(chan, &chan->curr_lli->phys); +} + +static int ls1x_dma_terminate_all(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + struct virt_dma_desc *vd; + LIST_HEAD(head); + + ls1x_dma_stop(chan); + + scoped_guard(spinlock_irqsave, &chan->vc.lock) { + vd = vchan_next_desc(&chan->vc); + if (vd) + vchan_terminate_vdesc(vd); + + vchan_get_all_descriptors(&chan->vc, &head); + } + + vchan_dma_desc_free_list(&chan->vc, &head); + + return 0; +} + +static void ls1x_dma_synchronize(struct dma_chan *dchan) +{ + vchan_synchronize(to_virt_chan(dchan)); +} + +static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + struct virt_dma_desc *vd; + enum dma_status status; + size_t bytes = 0; + + status = dma_cookie_status(dchan, cookie, state); + if (status == DMA_COMPLETE) + return status; + + scoped_guard(spinlock_irqsave, &chan->vc.lock) { + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd); + struct ls1x_dma_lli *lli; + dma_addr_t next_phys; + + /* get the current lli */ + if (ls1x_dma_query(chan, &chan->curr_lli->phys)) + return status; + + /* locate the current lli */ + next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT]; + list_for_each_entry(lli, &desc->lli_list, node) + if (lli->hw[LS1X_DMADESC_NEXT] == next_phys) + break; + + dev_dbg(chan2dev(dchan), "current lli_phys=%pad", + &lli->phys); + + /* count the residues */ + list_for_each_entry_from(lli, &desc->lli_list, node) + bytes += lli->hw[LS1X_DMADESC_LENGTH] * + chan->bus_width; + } + } + + dma_set_residue(state, bytes); + + return status; +} + +static void ls1x_dma_issue_pending(struct dma_chan *dchan) +{ + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan); + + guard(spinlock_irqsave)(&chan->vc.lock); + + if (vchan_issue_pending(&chan->vc)) { + struct virt_dma_desc *vd = vchan_next_desc(&chan->vc); + + if (vd) { + struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd); + struct ls1x_dma_lli *lli; + + lli = list_first_entry(&desc->lli_list, + struct ls1x_dma_lli, node); + ls1x_dma_start(chan, &lli->phys); + } + } +} + +static irqreturn_t ls1x_dma_irq_handler(int irq, void *data) +{ + struct ls1x_dma_chan *chan = data; + struct dma_chan *dchan = &chan->vc.chan; + struct device *dev = chan2dev(dchan); + struct virt_dma_desc *vd; + + scoped_guard(spinlock, &chan->vc.lock) { + vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_warn(dev, + "IRQ %d with no active desc on channel %d\n", + irq, dchan->chan_id); + return IRQ_NONE; + } + + if (chan->is_cyclic) { + vchan_cyclic_callback(vd); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + } + + dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id); + + return IRQ_HANDLED; +} + +static int ls1x_dma_chan_probe(struct platform_device *pdev, + struct ls1x_dma *dma) +{ + void __iomem *reg_base; + int id; + + reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg_base)) + return PTR_ERR(reg_base); + + for (id = 0; id < dma->nr_chans; id++) { + struct ls1x_dma_chan *chan = &dma->chan[id]; + char pdev_irqname[16]; + + snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id); + chan->irq = platform_get_irq_byname(pdev, pdev_irqname); + if (chan->irq < 0) + return dev_err_probe(&pdev->dev, chan->irq, + "failed to get IRQ for ch%d\n", + id); + + chan->reg_base = reg_base; + chan->vc.desc_free = ls1x_dma_free_desc; + vchan_init(&chan->vc, &dma->ddev); + } + + return 0; +} + +static void ls1x_dma_chan_remove(struct ls1x_dma *dma) +{ + int id; + + for (id = 0; id < dma->nr_chans; id++) { + struct ls1x_dma_chan *chan = &dma->chan[id]; + + if (chan->vc.chan.device == &dma->ddev) { + list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } + } +} + +static int ls1x_dma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dma_device *ddev; + struct ls1x_dma *dma; + int ret; + + ret = platform_irq_count(pdev); + if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS) + return dev_err_probe(dev, -EINVAL, + "Invalid number of IRQ channels: %d\n", + ret); + + dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL); + if (!dma) + return -ENOMEM; + dma->nr_chans = ret; + + /* initialize DMA device */ + ddev = &dma->ddev; + ddev->dev = dev; + ddev->copy_align = DMAENGINE_ALIGN_4_BYTES; + ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources; + ddev->device_free_chan_resources = ls1x_dma_free_chan_resources; + ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg; + ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic; + ddev->device_config = ls1x_dma_slave_config; + ddev->device_pause = ls1x_dma_pause; + ddev->device_resume = ls1x_dma_resume; + ddev->device_terminate_all = ls1x_dma_terminate_all; + ddev->device_synchronize = ls1x_dma_synchronize; + ddev->device_tx_status = ls1x_dma_tx_status; + ddev->device_issue_pending = ls1x_dma_issue_pending; + dma_cap_set(DMA_SLAVE, ddev->cap_mask); + INIT_LIST_HEAD(&ddev->channels); + + /* initialize DMA channels */ + ret = ls1x_dma_chan_probe(pdev, dma); + if (ret) + goto err; + + ret = dmaenginem_async_device_register(ddev); + if (ret) { + dev_err(dev, "failed to register DMA device\n"); + goto err; + } + + ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, + ddev); + if (ret) { + dev_err(dev, "failed to register DMA controller\n"); + goto err; + } + + platform_set_drvdata(pdev, dma); + dev_info(dev, "Loongson1 DMA driver registered\n"); + + return 0; + +err: + ls1x_dma_chan_remove(dma); + + return ret; +} + +static void ls1x_dma_remove(struct platform_device *pdev) +{ + struct ls1x_dma *dma = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + ls1x_dma_chan_remove(dma); +} + +static const struct of_device_id ls1x_dma_match[] = { + { .compatible = "loongson,ls1b-apbdma" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ls1x_dma_match); + +static struct platform_driver ls1x_dma_driver = { + .probe = ls1x_dma_probe, + .remove = ls1x_dma_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ls1x_dma_match, + }, +}; + +module_platform_driver(ls1x_dma_driver); + +MODULE_AUTHOR("Keguang Zhang "); +MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/lpc32xx-dmamux.c b/drivers/dma/lpc32xx-dmamux.c new file mode 100644 index 00000000000000..351d7e23e6156e --- /dev/null +++ b/drivers/dma/lpc32xx-dmamux.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright 2024 Timesys Corporation +// +// Based on TI DMA Crossbar driver by: +// Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com +// Author: Peter Ujfalusi + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LPC32XX_SSP_CLK_CTRL 0x78 +#define LPC32XX_I2S_CLK_CTRL 0x7c + +struct lpc32xx_dmamux { + int signal; + char *name_sel0; + char *name_sel1; + int muxval; + int muxreg; + int bit; + bool busy; +}; + +struct lpc32xx_dmamux_data { + struct dma_router dmarouter; + struct regmap *reg; + spinlock_t lock; /* protects busy status flag */ +}; + +/* From LPC32x0 User manual "3.2.1 DMA request signals" */ +static struct lpc32xx_dmamux lpc32xx_muxes[] = { + { + .signal = 3, + .name_sel0 = "spi2-rx-tx", + .name_sel1 = "ssp1-rx", + .muxreg = LPC32XX_SSP_CLK_CTRL, + .bit = 5, + }, + { + .signal = 10, + .name_sel0 = "uart7-rx", + .name_sel1 = "i2s1-dma1", + .muxreg = LPC32XX_I2S_CLK_CTRL, + .bit = 4, + }, + { + .signal = 11, + .name_sel0 = "spi1-rx-tx", + .name_sel1 = "ssp1-tx", + .muxreg = LPC32XX_SSP_CLK_CTRL, + .bit = 4, + }, + { + .signal = 14, + .name_sel0 = "none", + .name_sel1 = "ssp0-rx", + .muxreg = LPC32XX_SSP_CLK_CTRL, + .bit = 3, + }, + { + .signal = 15, + .name_sel0 = "none", + .name_sel1 = "ssp0-tx", + .muxreg = LPC32XX_SSP_CLK_CTRL, + .bit = 2, + }, +}; + +static void lpc32xx_dmamux_release(struct device *dev, void *route_data) +{ + struct lpc32xx_dmamux_data *dmamux = dev_get_drvdata(dev); + struct lpc32xx_dmamux *mux = route_data; + + dev_dbg(dev, "releasing dma request signal %d routed to %s\n", + mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); + + guard(spinlock)(&dmamux->lock); + + mux->busy = false; +} + +static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct device *dev = &pdev->dev; + struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev); + unsigned long flags; + struct lpc32xx_dmamux *mux = NULL; + int i; + + if (dma_spec->args_count != 3) { + dev_err(&pdev->dev, "invalid number of dma mux args\n"); + return ERR_PTR(-EINVAL); + } + + for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) { + if (lpc32xx_muxes[i].signal == dma_spec->args[0]) { + mux = &lpc32xx_muxes[i]; + break; + } + } + if (!mux) { + dev_err(&pdev->dev, "invalid mux request number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + if (dma_spec->args[2] > 1) { + dev_err(&pdev->dev, "invalid dma mux value: %d\n", + dma_spec->args[1]); + return ERR_PTR(-EINVAL); + } + + /* The of_node_put() will be done in the core for the node */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "can't get dma master\n"); + return ERR_PTR(-EINVAL); + } + + spin_lock_irqsave(&dmamux->lock, flags); + if (mux->busy) { + spin_unlock_irqrestore(&dmamux->lock, flags); + dev_err(dev, "dma request signal %d busy, routed to %s\n", + mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); + of_node_put(dma_spec->np); + return ERR_PTR(-EBUSY); + } + + mux->busy = true; + mux->muxval = dma_spec->args[2] ? BIT(mux->bit) : 0; + + regmap_update_bits(dmamux->reg, mux->muxreg, BIT(mux->bit), mux->muxval); + spin_unlock_irqrestore(&dmamux->lock, flags); + + dma_spec->args[2] = 0; + dma_spec->args_count = 2; + + dev_dbg(dev, "dma request signal %d routed to %s\n", + mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); + + return mux; +} + +static int lpc32xx_dmamux_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct lpc32xx_dmamux_data *dmamux; + + dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); + if (!dmamux) + return -ENOMEM; + + dmamux->reg = syscon_node_to_regmap(np->parent); + if (IS_ERR(dmamux->reg)) { + dev_err(&pdev->dev, "syscon lookup failed\n"); + return PTR_ERR(dmamux->reg); + } + + spin_lock_init(&dmamux->lock); + platform_set_drvdata(pdev, dmamux); + dmamux->dmarouter.dev = &pdev->dev; + dmamux->dmarouter.route_free = lpc32xx_dmamux_release; + + return of_dma_router_register(np, lpc32xx_dmamux_reserve, + &dmamux->dmarouter); +} + +static const struct of_device_id lpc32xx_dmamux_match[] = { + { .compatible = "nxp,lpc3220-dmamux" }, + {}, +}; + +static struct platform_driver lpc32xx_dmamux_driver = { + .probe = lpc32xx_dmamux_probe, + .driver = { + .name = "lpc32xx-dmamux", + .of_match_table = lpc32xx_dmamux_match, + }, +}; + +static int __init lpc32xx_dmamux_init(void) +{ + return platform_driver_register(&lpc32xx_dmamux_driver); +} +arch_initcall(lpc32xx_dmamux_init); diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c index a49913f3ed3f7c..9652e86667224b 100644 --- a/drivers/dma/ls2x-apb-dma.c +++ b/drivers/dma/ls2x-apb-dma.c @@ -33,11 +33,11 @@ #define LDMA_STOP BIT(4) /* DMA stop operation */ #define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */ -/* Bitfields in ndesc_addr field of HW decriptor */ +/* Bitfields in ndesc_addr field of HW descriptor */ #define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */ #define LDMA_DESC_ADDR_LOW GENMASK(31, 1) -/* Bitfields in cmd field of HW decriptor */ +/* Bitfields in cmd field of HW descriptor */ #define LDMA_INT BIT(1) /* Enable DMA interrupts */ #define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */ diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 529100c5b9f5d2..b69eabf12a24fa 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -518,7 +518,7 @@ mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, /* setup dma channel */ cvd[i]->ch = c; - /* setup sourece, destination, and length */ + /* setup source, destination, and length */ tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len; cvd[i]->len = tlen; cvd[i]->src = src; @@ -617,7 +617,7 @@ static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) u32 i, min_refcnt = U32_MAX, refcnt; unsigned long flags; - /* allocate PC with the minimun refcount */ + /* allocate PC with the minimum refcount */ for (i = 0; i < cqdma->dma_channels; ++i) { refcnt = refcount_read(&cqdma->pc[i]->refcnt); if (refcnt < min_refcnt) { diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 36ff11e909ea08..58c7961ab9ad15 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -226,7 +226,7 @@ struct mtk_hsdma_soc { * @pc_refcnt: Track how many VCs are using the PC * @lock: Lock protect agaisting multiple VCs access PC * @soc: The pointer to area holding differences among - * vaious platform + * various platform */ struct mtk_hsdma_device { struct dma_device ddev; diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index bcd3b623ac6c20..43efce77bb5770 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* - * and the curren desc is the end of the chain before + * and the current desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) @@ -1074,7 +1074,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, if (!mv_chan->dma_desc_pool_virt) return ERR_PTR(-ENOMEM); - /* discover transaction capabilites from the platform data */ + /* discover transaction capabilities from the platform data */ dma_dev->cap_mask = cap_mask; INIT_LIST_HEAD(&dma_dev->channels); diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index d86086b05b0e68..c87cefd38a07a1 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -99,7 +99,7 @@ struct mv_xor_device { * @common: common dmaengine channel object members * @slots_allocated: records the actual size of the descriptor slot pool * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs - * @op_in_desc: new mode of driver, each op is writen to descriptor. + * @op_in_desc: new mode of driver, each op is written to descriptor. */ struct mv_xor_chan { int pending; diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 97ebc791a30bd6..c8c67f4d982c01 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -175,7 +175,7 @@ struct mv_xor_v2_device { * struct mv_xor_v2_sw_desc - implements a xor SW descriptor * @idx: descriptor index * @async_tx: support for the async_tx api - * @hw_desc: assosiated HW descriptor + * @hw_desc: associated HW descriptor * @free_list: node of the free SW descriprots list */ struct mv_xor_v2_sw_desc { diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index c08916339aa767..3b011a91d48ec7 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -897,7 +897,7 @@ static int nbpf_config(struct dma_chan *dchan, /* * We could check config->slave_id to match chan->terminal here, * but with DT they would be coming from the same source, so - * such a check would be superflous + * such a check would be superfluous */ chan->slave_dst_addr = config->dst_addr; diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index e588fff9f21d27..423442e55d368d 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -26,7 +26,7 @@ static DEFINE_MUTEX(of_dma_lock); * * Finds a DMA controller with matching device node and number for dma cells * in a list of registered DMA controllers. If a match is found a valid pointer - * to the DMA data stored is retuned. A NULL pointer is returned if no match is + * to the DMA data stored is returned. A NULL pointer is returned if no match is * found. */ static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec) @@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(of_dma_simple_xlate); * * This function can be used as the of xlate callback for DMA driver which wants * to match the channel based on the channel id. When using this xlate function - * the #dma-cells propety of the DMA controller dt node needs to be set to 1. + * the #dma-cells property of the DMA controller dt node needs to be set to 1. * The data parameter of of_dma_controller_register must be a pointer to the * dma_device struct the function should match upon. * diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index e001f4f7aa640f..aa436f9e357185 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -1156,7 +1156,7 @@ static int owl_dma_probe(struct platform_device *pdev) } /* - * Eventhough the DMA controller is capable of generating 4 + * Even though the DMA controller is capable of generating 4 * IRQ's for DMA priority feature, we only use 1 IRQ for * simplification. */ diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 60c4de8dac1d2a..82a9fe88ad54c9 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) * This is the limit for transfers with a buswidth of 1, larger * buswidths will have larger limits. */ - ret = dma_set_max_seg_size(&adev->dev, 1900800); - if (ret) - dev_err(&adev->dev, "unable to set the seg size\n"); - + dma_set_max_seg_size(&adev->dev, 1900800); init_pl330_debugfs(pl330); dev_info(&adev->dev, diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index bbb60a970dabdc..7b78759ac734dc 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -9,7 +9,7 @@ */ /* - * This driver supports the asynchrounous DMA copy and RAID engines available + * This driver supports the asynchronous DMA copy and RAID engines available * on the AMCC PPC440SPe Processors. * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) * ADMA driver written by D.Williams. diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h index 1ff4be23db0f7a..b5725481bfa645 100644 --- a/drivers/dma/ppc4xx/dma.h +++ b/drivers/dma/ppc4xx/dma.h @@ -14,7 +14,7 @@ /* Number of elements in the array with statical CDBs */ #define MAX_STAT_DMA_CDBS 16 -/* Number of DMA engines available on the contoller */ +/* Number of DMA engines available on the controller */ #define DMA_ENGINES_NUM 2 /* Maximum h/w supported number of destinations */ diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h index 21b4bf895200b5..39bc3726823538 100644 --- a/drivers/dma/ptdma/ptdma.h +++ b/drivers/dma/ptdma/ptdma.h @@ -192,7 +192,7 @@ struct pt_cmd_queue { /* Queue dma pool */ struct dma_pool *dma_pool; - /* Queue base address (not neccessarily aligned)*/ + /* Queue base address (not necessarily aligned)*/ struct ptdma_desc *qbase; /* Aligned queue start address (per requirement) */ diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 5e7d332731e0c1..d43a881e43b904 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -440,7 +440,7 @@ static void bam_reset(struct bam_device *bdev) val |= BAM_EN; writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); - /* set descriptor threshhold, start with 4 bytes */ + /* set descriptor threshold, start with 4 bytes */ writel_relaxed(DEFAULT_CNT_THRSHLD, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); @@ -667,7 +667,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, for_each_sg(sgl, sg, sg_len, i) num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); - /* allocate enough room to accomodate the number of entries */ + /* allocate enough room to accommodate the number of entries */ async_desc = kzalloc(struct_size(async_desc, desc, num_alloc), GFP_NOWAIT); @@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev) /* set max dma segment size */ bdev->common.dev = bdev->dev; - ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); - if (ret) { - dev_err(bdev->dev, "cannot set maximum segment size\n"); - goto err_bam_channel_exit; - } + dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); platform_set_drvdata(pdev, bdev); diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index e6ebd688d74630..52a7c8f2498f61 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -1856,7 +1856,7 @@ static void gpi_issue_pending(struct dma_chan *chan) read_lock_irqsave(&gpii->pm_lock, pm_lock_flags); - /* move all submitted discriptors to issued list */ + /* move all submitted descriptors to issued list */ spin_lock_irqsave(&gchan->vc.lock, flags); if (vchan_issue_pending(&gchan->vc)) vd = list_last_entry(&gchan->vc.desc_issued, diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c index 53f4273b657cec..c1db398adc84d7 100644 --- a/drivers/dma/qcom/qcom_adm.c +++ b/drivers/dma/qcom/qcom_adm.c @@ -650,7 +650,7 @@ static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie, /* * residue is either the full length if it is in the issued list, or 0 * if it is in progress. We have no reliable way of determining - * anything inbetween + * anything in between */ dma_set_residue(txstate, residue); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 40482cb73d798a..1094a2f821649c 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - if (ret) - return ret; + dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); if (ret) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 7cc9eb2217e8f0..8ead0a1fd2371b 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -318,7 +318,7 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan, } /* - * Find a slave channel configuration from the contoller list by either a slave + * Find a slave channel configuration from the controller list by either a slave * ID in the non-DT case, or by a MID/RID value in the DT case */ static const struct sh_dmae_slave_config *dmae_find_slave( diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 2c489299148eee..d52e1685aed53f 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev) if (ret) goto destroy_cache; - ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); - if (ret) { - d40_err(dev, "Failed to set dma max seg size\n"); - goto destroy_cache; - } + dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); d40_hw_init(base); diff --git a/drivers/dma/ste_dma40.h b/drivers/dma/ste_dma40.h index c697bfe16a0172..a90c786acc1f1a 100644 --- a/drivers/dma/ste_dma40.h +++ b/drivers/dma/ste_dma40.h @@ -4,7 +4,7 @@ #define STE_DMA40_H /* - * Maxium size for a single dma descriptor + * Maximum size for a single dma descriptor * Size is limited to 16 bits. * Size is in the units of addr-widths (1,2,4,8 bytes) * Larger transfers will be split up to multiple linked desc diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index c504e855eb02ba..2e30e9a94a1e3c 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -369,7 +369,7 @@ struct d40_phy_lli_bidir { * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst. * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst. * - * This struct must be 8 bytes aligned since it will be accessed directy by + * This struct must be 8 bytes aligned since it will be accessed directly by * the DMA. Never add any none hw mapped registers to this struct. */ diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ac69778827f2a2..7d1acda2d72b35 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -463,7 +463,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, /* * If interrupt is pending then do nothing as the ISR will handle - * the programing for new request. + * the programming for new request. */ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { dev_err(tdc2dev(tdc), diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h index d349c6d482aee0..9062a237cd1672 100644 --- a/drivers/dma/ti/k3-udma.h +++ b/drivers/dma/ti/k3-udma.h @@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property); struct device *xudma_get_device(struct udma_dev *ud); struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud); -void xudma_dev_put(struct udma_dev *ud); u32 xudma_dev_get_psil_base(struct udma_dev *ud); struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index fd4397adeb792b..275848a9c4502c 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1742,7 +1742,7 @@ static int xgene_dma_probe(struct platform_device *pdev) /* Initialize DMA channels software state */ xgene_dma_init_channels(pdma); - /* Configue DMA rings */ + /* Configure DMA rings */ ret = xgene_dma_init_rings(pdma); if (ret) goto err_clk_enable; diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index 36bd4825d389d5..be87764af9e836 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -149,7 +149,7 @@ struct xilinx_dpdma_chan; * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) * @next_desc: next descriptor 32 bit address * @src_addr: payload source address (1st page, 32 LSB) - * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs) + * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs) * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) * @src_addr2: payload source address (2nd page, 32 LSB) * @src_addr3: payload source address (3rd page, 32 LSB) @@ -210,7 +210,7 @@ struct xilinx_dpdma_tx_desc { * @vchan: virtual DMA channel * @reg: register base address * @id: channel ID - * @wait_to_stop: queue to wait for outstanding transacitons before stopping + * @wait_to_stop: queue to wait for outstanding transactions before stopping * @running: true if the channel is running * @first_frame: flag for the first frame of stream * @video_group: flag if multi-channel operation is needed for video channels @@ -670,6 +670,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) kfree(desc); } +/** + * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor + * @chan: DPDMA channel + * @buf_addr: buffer address + * @buf_len: buffer length + * @period_len: number of periods + * @flags: tx flags argument passed in to prepare function + * + * Prepare a tx descriptor incudling internal software/hardware descriptors + * for the given cyclic transaction. + * + * Return: A dma async tx descriptor on success, or NULL. + */ +static struct dma_async_tx_descriptor * +xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, + size_t period_len, unsigned long flags) +{ + struct xilinx_dpdma_tx_desc *tx_desc; + struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL; + unsigned int periods = buf_len / period_len; + unsigned int i; + + tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan); + if (!tx_desc) + return NULL; + + for (i = 0; i < periods; i++) { + struct xilinx_dpdma_hw_desc *hw_desc; + + if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) { + dev_err(chan->xdev->dev, + "buffer should be aligned at %d B\n", + XILINX_DPDMA_ALIGN_BYTES); + goto error; + } + + sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan); + if (!sw_desc) + goto error; + + xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last, + &buf_addr, 1); + hw_desc = &sw_desc->hw; + hw_desc->xfer_size = period_len; + hw_desc->hsize_stride = + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, + period_len) | + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK, + period_len); + hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE | + XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE | + XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR; + + list_add_tail(&sw_desc->node, &tx_desc->descriptors); + + buf_addr += period_len; + last = sw_desc; + } + + sw_desc = list_first_entry(&tx_desc->descriptors, + struct xilinx_dpdma_sw_desc, node); + last->hw.next_desc = lower_32_bits(sw_desc->dma_addr); + if (chan->xdev->ext_addr) + last->hw.addr_ext |= + FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK, + upper_32_bits(sw_desc->dma_addr)); + + last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME; + + return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags); + +error: + xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc); + + return NULL; +} + /** * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma * descriptor @@ -1189,6 +1267,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) /* ----------------------------------------------------------------------------- * DMA Engine Operations */ +static struct dma_async_tx_descriptor * +xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); + + if (direction != DMA_MEM_TO_DEV) + return NULL; + + if (buf_len % period_len) + return NULL; + + return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len, + period_len, flags); +} static struct dma_async_tx_descriptor * xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, @@ -1672,6 +1767,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, ddev->cap_mask); dma_cap_set(DMA_PRIVATE, ddev->cap_mask); + dma_cap_set(DMA_CYCLIC, ddev->cap_mask); dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask); dma_cap_set(DMA_REPEAT, ddev->cap_mask); dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask); @@ -1679,6 +1775,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev) ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources; ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources; + ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic; ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma; /* TODO: Can we achieve better granularity ? */ ddev->device_tx_status = dma_cookie_status; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index f31631bef961a9..9ae46f1198fe60 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -22,10 +22,10 @@ #include "../dmaengine.h" /* Register Offsets */ -#define ZYNQMP_DMA_ISR 0x100 -#define ZYNQMP_DMA_IMR 0x104 -#define ZYNQMP_DMA_IER 0x108 -#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100) +#define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104) +#define ZYNQMP_DMA_IER (chan->irq_offset + 0x108) +#define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c) #define ZYNQMP_DMA_CTRL0 0x110 #define ZYNQMP_DMA_CTRL1 0x114 #define ZYNQMP_DMA_DATA_ATTR 0x120 @@ -145,6 +145,9 @@ #define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ async_tx) +/* IRQ Register offset for Versal Gen 2 */ +#define IRQ_REG_OFFSET 0x308 + /** * struct zynqmp_dma_desc_ll - Hw linked list descriptor * @addr: Buffer address @@ -211,6 +214,7 @@ struct zynqmp_dma_desc_sw { * @bus_width: Bus width * @src_burst_len: Source burst length * @dst_burst_len: Dest burst length + * @irq_offset: Irq register offset */ struct zynqmp_dma_chan { struct zynqmp_dma_device *zdev; @@ -235,6 +239,7 @@ struct zynqmp_dma_chan { u32 bus_width; u32 src_burst_len; u32 dst_burst_len; + u32 irq_offset; }; /** @@ -253,6 +258,14 @@ struct zynqmp_dma_device { struct clk *clk_apb; }; +struct zynqmp_dma_config { + u32 offset; +}; + +static const struct zynqmp_dma_config versal2_dma_config = { + .offset = IRQ_REG_OFFSET, +}; + static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, u64 value) { @@ -892,6 +905,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, { struct zynqmp_dma_chan *chan; struct device_node *node = pdev->dev.of_node; + const struct zynqmp_dma_config *match_data; int err; chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); @@ -919,6 +933,10 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return -EINVAL; } + match_data = of_device_get_match_data(&pdev->dev); + if (match_data) + chan->irq_offset = match_data->offset; + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); zdev->chan = chan; tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet); @@ -1161,6 +1179,7 @@ static void zynqmp_dma_remove(struct platform_device *pdev) } static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config }, { .compatible = "xlnx,zynqmp-dma-1.0", }, {} }; diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 98e6ad8528d37c..fc0280dcddd107 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -342,6 +342,51 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin, return 0; } +static int +dpll_msg_add_pin_esync(struct sk_buff *msg, struct dpll_pin *pin, + struct dpll_pin_ref *ref, struct netlink_ext_ack *extack) +{ + const struct dpll_pin_ops *ops = dpll_pin_ops(ref); + struct dpll_device *dpll = ref->dpll; + struct dpll_pin_esync esync; + struct nlattr *nest; + int ret, i; + + if (!ops->esync_get) + return 0; + ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll, + dpll_priv(dpll), &esync, extack); + if (ret == -EOPNOTSUPP) + return 0; + else if (ret) + return ret; + if (nla_put_64bit(msg, DPLL_A_PIN_ESYNC_FREQUENCY, sizeof(esync.freq), + &esync.freq, DPLL_A_PIN_PAD)) + return -EMSGSIZE; + if (nla_put_u32(msg, DPLL_A_PIN_ESYNC_PULSE, esync.pulse)) + return -EMSGSIZE; + for (i = 0; i < esync.range_num; i++) { + nest = nla_nest_start(msg, + DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED); + if (!nest) + return -EMSGSIZE; + if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, + sizeof(esync.range[i].min), + &esync.range[i].min, DPLL_A_PIN_PAD)) + goto nest_cancel; + if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, + sizeof(esync.range[i].max), + &esync.range[i].max, DPLL_A_PIN_PAD)) + goto nest_cancel; + nla_nest_end(msg, nest); + } + return 0; + +nest_cancel: + nla_nest_cancel(msg, nest); + return -EMSGSIZE; +} + static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq) { int fs; @@ -481,6 +526,9 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin, if (ret) return ret; ret = dpll_msg_add_ffo(msg, pin, ref, extack); + if (ret) + return ret; + ret = dpll_msg_add_pin_esync(msg, pin, ref, extack); if (ret) return ret; if (xa_empty(&pin->parent_refs)) @@ -738,6 +786,83 @@ dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a, return ret; } +static int +dpll_pin_esync_set(struct dpll_pin *pin, struct nlattr *a, + struct netlink_ext_ack *extack) +{ + struct dpll_pin_ref *ref, *failed; + const struct dpll_pin_ops *ops; + struct dpll_pin_esync esync; + u64 freq = nla_get_u64(a); + struct dpll_device *dpll; + bool supported = false; + unsigned long i; + int ret; + + xa_for_each(&pin->dpll_refs, i, ref) { + ops = dpll_pin_ops(ref); + if (!ops->esync_set || !ops->esync_get) { + NL_SET_ERR_MSG(extack, + "embedded sync feature is not supported by this device"); + return -EOPNOTSUPP; + } + } + ref = dpll_xa_ref_dpll_first(&pin->dpll_refs); + ops = dpll_pin_ops(ref); + dpll = ref->dpll; + ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll, + dpll_priv(dpll), &esync, extack); + if (ret) { + NL_SET_ERR_MSG(extack, "unable to get current embedded sync frequency value"); + return ret; + } + if (freq == esync.freq) + return 0; + for (i = 0; i < esync.range_num; i++) + if (freq <= esync.range[i].max && freq >= esync.range[i].min) + supported = true; + if (!supported) { + NL_SET_ERR_MSG_ATTR(extack, a, + "requested embedded sync frequency value is not supported by this device"); + return -EINVAL; + } + + xa_for_each(&pin->dpll_refs, i, ref) { + void *pin_dpll_priv; + + ops = dpll_pin_ops(ref); + dpll = ref->dpll; + pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin); + ret = ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll), + freq, extack); + if (ret) { + failed = ref; + NL_SET_ERR_MSG_FMT(extack, + "embedded sync frequency set failed for dpll_id: %u", + dpll->id); + goto rollback; + } + } + __dpll_pin_change_ntf(pin); + + return 0; + +rollback: + xa_for_each(&pin->dpll_refs, i, ref) { + void *pin_dpll_priv; + + if (ref == failed) + break; + ops = dpll_pin_ops(ref); + dpll = ref->dpll; + pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin); + if (ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll), + esync.freq, extack)) + NL_SET_ERR_MSG(extack, "set embedded sync frequency rollback failed"); + } + return ret; +} + static int dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx, enum dpll_pin_state state, @@ -1039,6 +1164,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info) if (ret) return ret; break; + case DPLL_A_PIN_ESYNC_FREQUENCY: + ret = dpll_pin_esync_set(pin, a, info->extack); + if (ret) + return ret; + break; } } diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c index 1e95f5397cfce6..fe9b6893d26147 100644 --- a/drivers/dpll/dpll_nl.c +++ b/drivers/dpll/dpll_nl.c @@ -62,7 +62,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] = }; /* DPLL_CMD_PIN_SET - do */ -static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST + 1] = { +static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = { [DPLL_A_PIN_ID] = { .type = NLA_U32, }, [DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, }, [DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2), @@ -71,6 +71,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST + [DPLL_A_PIN_PARENT_DEVICE] = NLA_POLICY_NESTED(dpll_pin_parent_device_nl_policy), [DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy), [DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, }, + [DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, }, }; /* Ops table for dpll */ @@ -138,7 +139,7 @@ static const struct genl_split_ops dpll_nl_ops[] = { .doit = dpll_nl_pin_set_doit, .post_doit = dpll_pin_post_doit, .policy = dpll_pin_set_nl_policy, - .maxattr = DPLL_A_PIN_PHASE_ADJUST, + .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, }, }; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 16c8de5050e592..81af6c344d6baa 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -311,15 +311,6 @@ config EDAC_CELL Cell Broadband Engine internal memory controller on platform without a hypervisor -config EDAC_PPC4XX - tristate "PPC4xx IBM DDR2 Memory Controller" - depends on 4xx - help - This enables support for EDAC on the ECC memory used - with the IBM DDR2 memory controller found in various - PowerPC 4xx embedded processors such as the 405EX[r], - 440SP, 440SPe, 460EX, 460GT and 460SX. - config EDAC_AMD8131 tristate "AMD8131 HyperTransport PCI-X Tunnel" depends on PCI && PPC_MAPLE diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 4edfb83ffbeef5..faf310eec4a678 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -63,7 +63,6 @@ i10nm_edac-y := i10nm_base.o obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o -obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 24dd896d9a9d58..e2a954de913b42 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -47,10 +47,6 @@ readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \ (res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \ (i) * (m)->chan_mmio_sz) -#define I10NM_GET_AMAP(m, i) \ - readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \ - (res_cfg->type == GNR ? 0xc14 : 0x20814)) + \ - (i) * (m)->chan_mmio_sz) #define I10NM_GET_REG32(m, i, offset) \ readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset)) #define I10NM_GET_REG64(m, i, offset) \ @@ -971,7 +967,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci, { struct skx_pvt *pvt = mci->pvt_info; struct skx_imc *imc = pvt->imc; - u32 mtr, amap, mcddrtcfg = 0; + u32 mtr, mcddrtcfg = 0; struct dimm_info *dimm; int i, j, ndimms; @@ -980,7 +976,6 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci, continue; ndimms = 0; - amap = I10NM_GET_AMAP(imc, i); if (res_cfg->type != GNR) mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i); @@ -992,7 +987,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci, mtr, mcddrtcfg, imc->mc, i, j); if (IS_DIMM_PRESENT(mtr)) - ndimms += skx_get_dimm_info(mtr, 0, amap, dimm, + ndimms += skx_get_dimm_info(mtr, 0, 0, dimm, imc, i, j, cfg); else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) ndimms += skx_get_nvdimm_info(dimm, imc, i, j, @@ -1013,54 +1008,6 @@ static struct notifier_block i10nm_mce_dec = { .priority = MCE_PRIO_EDAC, }; -#ifdef CONFIG_EDAC_DEBUG -/* - * Debug feature. - * Exercise the address decode logic by writing an address to - * /sys/kernel/debug/edac/i10nm_test/addr. - */ -static struct dentry *i10nm_test; - -static int debugfs_u64_set(void *data, u64 val) -{ - struct mce m; - - pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); - - memset(&m, 0, sizeof(m)); - /* ADDRV + MemRd + Unknown channel */ - m.status = MCI_STATUS_ADDRV + 0x90; - /* One corrected error */ - m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); - m.addr = val; - skx_mce_check_error(NULL, 0, &m); - - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); - -static void setup_i10nm_debug(void) -{ - i10nm_test = edac_debugfs_create_dir("i10nm_test"); - if (!i10nm_test) - return; - - if (!edac_debugfs_create_file("addr", 0200, i10nm_test, - NULL, &fops_u64_wo)) { - debugfs_remove(i10nm_test); - i10nm_test = NULL; - } -} - -static void teardown_i10nm_debug(void) -{ - debugfs_remove_recursive(i10nm_test); -} -#else -static inline void setup_i10nm_debug(void) {} -static inline void teardown_i10nm_debug(void) {} -#endif /*CONFIG_EDAC_DEBUG*/ - static int __init i10nm_init(void) { u8 mc = 0, src_id = 0, node_id = 0; @@ -1159,7 +1106,7 @@ static int __init i10nm_init(void) opstate_init(); mce_register_decode_chain(&i10nm_mce_dec); - setup_i10nm_debug(); + skx_setup_debug("i10nm_test"); if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log); @@ -1187,7 +1134,7 @@ static void __exit i10nm_exit(void) enable_retry_rd_err_log(false); } - teardown_i10nm_debug(); + skx_teardown_debug(); mce_unregister_decode_chain(&i10nm_mce_dec); skx_adxl_put(); skx_remove(); diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index 0fe75eed8973b2..189a2fc29e74f5 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -316,7 +316,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) if (igen6_tom <= _4GB) return eaddr + igen6_tolud - _4GB; - if (eaddr < _4GB) + if (eaddr >= igen6_tom) return eaddr + igen6_tolud - igen6_tom; return eaddr; diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c deleted file mode 100644 index 1eea3341a91682..00000000000000 --- a/drivers/edac/ppc4xx_edac.c +++ /dev/null @@ -1,1425 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2008 Nuovation System Designs, LLC - * Grant Erickson - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "edac_module.h" -#include "ppc4xx_edac.h" - -/* - * This file implements a driver for monitoring and handling events - * associated with the IMB DDR2 ECC controller found in the AMCC/IBM - * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX. - * - * As realized in the 405EX[r], this controller features: - * - * - Support for registered- and non-registered DDR1 and DDR2 memory. - * - 32-bit or 16-bit memory interface with optional ECC. - * - * o ECC support includes: - * - * - 4-bit SEC/DED - * - Aligned-nibble error detect - * - Bypass mode - * - * - Two (2) memory banks/ranks. - * - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per - * bank/rank in 16-bit mode. - * - * As realized in the 440SP and 440SPe, this controller changes/adds: - * - * - 64-bit or 32-bit memory interface with optional ECC. - * - * o ECC support includes: - * - * - 8-bit SEC/DED - * - Aligned-nibble error detect - * - Bypass mode - * - * - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB - * per bank/rank in 32-bit mode. - * - * As realized in the 460EX and 460GT, this controller changes/adds: - * - * - 64-bit or 32-bit memory interface with optional ECC. - * - * o ECC support includes: - * - * - 8-bit SEC/DED - * - Aligned-nibble error detect - * - Bypass mode - * - * - Four (4) memory banks/ranks. - * - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB - * per bank/rank in 32-bit mode. - * - * At present, this driver has ONLY been tested against the controller - * realization in the 405EX[r] on the AMCC Kilauea and Haleakala - * boards (256 MiB w/o ECC memory soldered onto the board) and a - * proprietary board based on those designs (128 MiB ECC memory, also - * soldered onto the board). - * - * Dynamic feature detection and handling needs to be added for the - * other realizations of this controller listed above. - * - * Eventually, this driver will likely be adapted to the above variant - * realizations of this controller as well as broken apart to handle - * the other known ECC-capable controllers prevalent in other 4xx - * processors: - * - * - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx" - * - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr" - * - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2" - * - * For this controller, unfortunately, correctable errors report - * nothing more than the beat/cycle and byte/lane the correction - * occurred on and the check bit group that covered the error. - * - * In contrast, uncorrectable errors also report the failing address, - * the bus master and the transaction direction (i.e. read or write) - * - * Regardless of whether the error is a CE or a UE, we report the - * following pieces of information in the driver-unique message to the - * EDAC subsystem: - * - * - Device tree path - * - Bank(s) - * - Check bit error group - * - Beat(s)/lane(s) - */ - -/* Preprocessor Definitions */ - -#define EDAC_OPSTATE_INT_STR "interrupt" -#define EDAC_OPSTATE_POLL_STR "polled" -#define EDAC_OPSTATE_UNKNOWN_STR "unknown" - -#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" -#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0" - -#define PPC4XX_EDAC_MESSAGE_SIZE 256 - -/* - * Kernel logging without an EDAC instance - */ -#define ppc4xx_edac_printk(level, fmt, arg...) \ - edac_printk(level, "PPC4xx MC", fmt, ##arg) - -/* - * Kernel logging with an EDAC instance - */ -#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \ - edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg) - -/* - * Macros to convert bank configuration size enumerations into MiB and - * page values. - */ -#define SDRAM_MBCF_SZ_MiB_MIN 4 -#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \ - << (SDRAM_MBCF_SZ_DECODE(n))) -#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \ - << (20 - PAGE_SHIFT + \ - SDRAM_MBCF_SZ_DECODE(n))) - -/* - * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are - * indirectly accessed and have a base and length defined by the - * device tree. The base can be anything; however, we expect the - * length to be precisely two registers, the first for the address - * window and the second for the data window. - */ -#define SDRAM_DCR_RESOURCE_LEN 2 -#define SDRAM_DCR_ADDR_OFFSET 0 -#define SDRAM_DCR_DATA_OFFSET 1 - -/* - * Device tree interrupt indices - */ -#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */ -#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */ - -/* Type Definitions */ - -/* - * PPC4xx SDRAM memory controller private instance data - */ -struct ppc4xx_edac_pdata { - dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */ - struct { - int sec; /* Single-bit correctable error IRQ assigned */ - int ded; /* Double-bit detectable error IRQ assigned */ - } irqs; -}; - -/* - * Various status data gathered and manipulated when checking and - * reporting ECC status. - */ -struct ppc4xx_ecc_status { - u32 ecces; - u32 besr; - u32 bearh; - u32 bearl; - u32 wmirq; -}; - -/* Global Variables */ - -/* - * Device tree node type and compatible tuples this driver can match - * on. - */ -static const struct of_device_id ppc4xx_edac_match[] = { - { - .compatible = "ibm,sdram-4xx-ddr2" - }, - { } -}; -MODULE_DEVICE_TABLE(of, ppc4xx_edac_match); - -/* - * TODO: The row and channel parameters likely need to be dynamically - * set based on the aforementioned variant controller realizations. - */ -static const unsigned ppc4xx_edac_nr_csrows = 2; -static const unsigned ppc4xx_edac_nr_chans = 1; - -/* - * Strings associated with PLB master IDs capable of being posted in - * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors. - */ -static const char * const ppc4xx_plb_masters[9] = { - [SDRAM_PLB_M0ID_ICU] = "ICU", - [SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0", - [SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1", - [SDRAM_PLB_M0ID_DMA] = "DMA", - [SDRAM_PLB_M0ID_DCU] = "DCU", - [SDRAM_PLB_M0ID_OPB] = "OPB", - [SDRAM_PLB_M0ID_MAL] = "MAL", - [SDRAM_PLB_M0ID_SEC] = "SEC", - [SDRAM_PLB_M0ID_AHB] = "AHB" -}; - -/** - * mfsdram - read and return controller register data - * @dcr_host: A pointer to the DCR mapping. - * @idcr_n: The indirect DCR register to read. - * - * This routine reads and returns the data associated with the - * controller's specified indirect DCR register. - * - * Returns the read data. - */ -static inline u32 -mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n) -{ - return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET, - dcr_host->base + SDRAM_DCR_DATA_OFFSET, - idcr_n); -} - -/** - * mtsdram - write controller register data - * @dcr_host: A pointer to the DCR mapping. - * @idcr_n: The indirect DCR register to write. - * @value: The data to write. - * - * This routine writes the provided data to the controller's specified - * indirect DCR register. - */ -static inline void -mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value) -{ - return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET, - dcr_host->base + SDRAM_DCR_DATA_OFFSET, - idcr_n, - value); -} - -/** - * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error - * @status: A pointer to the ECC status structure to check for an - * ECC bank error. - * @bank: The bank to check for an ECC error. - * - * This routine determines whether the specified bank has an ECC - * error. - * - * Returns true if the specified bank has an ECC error; otherwise, - * false. - */ -static bool -ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status, - unsigned int bank) -{ - switch (bank) { - case 0: - return status->ecces & SDRAM_ECCES_BK0ER; - case 1: - return status->ecces & SDRAM_ECCES_BK1ER; - default: - return false; - } -} - -/** - * ppc4xx_edac_generate_bank_message - generate interpretted bank status message - * @mci: A pointer to the EDAC memory controller instance associated - * with the bank message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the portion of the - * driver-unique report message associated with the ECCESS[BKNER] - * field of the specified ECC status. - * - * Returns the number of characters generated on success; otherwise, < - * 0 on error. - */ -static int -ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - int n, total = 0; - unsigned int row, rows; - - n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - for (rows = 0, row = 0; row < mci->nr_csrows; row++) { - if (ppc4xx_edac_check_bank_error(status, row)) { - n = snprintf(buffer, size, "%s%u", - (rows++ ? ", " : ""), row); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - } - } - - n = snprintf(buffer, size, "%s; ", rows ? "" : "None"); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - fail: - return total; -} - -/** - * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message - * @mci: A pointer to the EDAC memory controller instance associated - * with the checkbit message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the portion of the - * driver-unique report message associated with the ECCESS[CKBER] - * field of the specified ECC status. - * - * Returns the number of characters generated on success; otherwise, < - * 0 on error. - */ -static int -ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - const char *ckber = NULL; - - switch (status->ecces & SDRAM_ECCES_CKBER_MASK) { - case SDRAM_ECCES_CKBER_NONE: - ckber = "None"; - break; - case SDRAM_ECCES_CKBER_32_ECC_0_3: - ckber = "ECC0:3"; - break; - case SDRAM_ECCES_CKBER_32_ECC_4_8: - switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) & - SDRAM_MCOPT1_WDTH_MASK) { - case SDRAM_MCOPT1_WDTH_16: - ckber = "ECC0:3"; - break; - case SDRAM_MCOPT1_WDTH_32: - ckber = "ECC4:8"; - break; - default: - ckber = "Unknown"; - break; - } - break; - case SDRAM_ECCES_CKBER_32_ECC_0_8: - ckber = "ECC0:8"; - break; - default: - ckber = "Unknown"; - break; - } - - return snprintf(buffer, size, "Checkbit Error: %s", ckber); -} - -/** - * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message - * @mci: A pointer to the EDAC memory controller instance associated - * with the byte lane message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the portion of the - * driver-unique report message associated with the ECCESS[BNCE] - * field of the specified ECC status. - * - * Returns the number of characters generated on success; otherwise, < - * 0 on error. - */ -static int -ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - int n, total = 0; - unsigned int lane, lanes; - const unsigned int first_lane = 0; - const unsigned int lane_count = 16; - - n = snprintf(buffer, size, "; Byte Lane Errors: "); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - for (lanes = 0, lane = first_lane; lane < lane_count; lane++) { - if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) { - n = snprintf(buffer, size, - "%s%u", - (lanes++ ? ", " : ""), lane); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - } - } - - n = snprintf(buffer, size, "%s; ", lanes ? "" : "None"); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - fail: - return total; -} - -/** - * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message - * @mci: A pointer to the EDAC memory controller instance associated - * with the ECCES message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the portion of the - * driver-unique report message associated with the ECCESS register of - * the specified ECC status. - * - * Returns the number of characters generated on success; otherwise, < - * 0 on error. - */ -static int -ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - int n, total = 0; - - n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size); - - if (n < 0 || n >= size) - goto fail; - - buffer += n; - size -= n; - total += n; - - fail: - return total; -} - -/** - * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message - * @mci: A pointer to the EDAC memory controller instance associated - * with the PLB message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the portion of the - * driver-unique report message associated with the PLB-related BESR - * and/or WMIRQ registers of the specified ECC status. - * - * Returns the number of characters generated on success; otherwise, < - * 0 on error. - */ -static int -ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - unsigned int master; - bool read; - - if ((status->besr & SDRAM_BESR_MASK) == 0) - return 0; - - if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE) - return 0; - - read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ); - - master = SDRAM_BESR_M0ID_DECODE(status->besr); - - return snprintf(buffer, size, - "%s error w/ PLB master %u \"%s\"; ", - (read ? "Read" : "Write"), - master, - (((master >= SDRAM_PLB_M0ID_FIRST) && - (master <= SDRAM_PLB_M0ID_LAST)) ? - ppc4xx_plb_masters[master] : "UNKNOWN")); -} - -/** - * ppc4xx_edac_generate_message - generate interpretted status message - * @mci: A pointer to the EDAC memory controller instance associated - * with the driver-unique message being generated. - * @status: A pointer to the ECC status structure to generate the - * message from. - * @buffer: A pointer to the buffer in which to generate the - * message. - * @size: The size, in bytes, of space available in buffer. - * - * This routine generates to the provided buffer the driver-unique - * EDAC report message from the specified ECC status. - */ -static void -ppc4xx_edac_generate_message(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status, - char *buffer, - size_t size) -{ - int n; - - if (buffer == NULL || size == 0) - return; - - n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size); - - if (n < 0 || n >= size) - return; - - buffer += n; - size -= n; - - ppc4xx_edac_generate_plb_message(mci, status, buffer, size); -} - -#ifdef DEBUG -/** - * ppc4xx_ecc_dump_status - dump controller ECC status registers - * @mci: A pointer to the EDAC memory controller instance - * associated with the status being dumped. - * @status: A pointer to the ECC status structure to generate the - * dump from. - * - * This routine dumps to the kernel log buffer the raw and - * interpretted specified ECC status. - */ -static void -ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status) -{ - char message[PPC4XX_EDAC_MESSAGE_SIZE]; - - ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); - - ppc4xx_edac_mc_printk(KERN_INFO, mci, - "\n" - "\tECCES: 0x%08x\n" - "\tWMIRQ: 0x%08x\n" - "\tBESR: 0x%08x\n" - "\tBEAR: 0x%08x%08x\n" - "\t%s\n", - status->ecces, - status->wmirq, - status->besr, - status->bearh, - status->bearl, - message); -} -#endif /* DEBUG */ - -/** - * ppc4xx_ecc_get_status - get controller ECC status - * @mci: A pointer to the EDAC memory controller instance - * associated with the status being retrieved. - * @status: A pointer to the ECC status structure to populate the - * ECC status with. - * - * This routine reads and masks, as appropriate, all the relevant - * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors. - * While we read all of them, for correctable errors, we only expect - * to deal with ECCES. For uncorrectable errors, we expect to deal - * with all of them. - */ -static void -ppc4xx_ecc_get_status(const struct mem_ctl_info *mci, - struct ppc4xx_ecc_status *status) -{ - const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - const dcr_host_t *dcr_host = &pdata->dcr_host; - - status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK; - status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK; - status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK; - status->bearl = mfsdram(dcr_host, SDRAM_BEARL); - status->bearh = mfsdram(dcr_host, SDRAM_BEARH); -} - -/** - * ppc4xx_ecc_clear_status - clear controller ECC status - * @mci: A pointer to the EDAC memory controller instance - * associated with the status being cleared. - * @status: A pointer to the ECC status structure containing the - * values to write to clear the ECC status. - * - * This routine clears--by writing the masked (as appropriate) status - * values back to--the status registers that deal with - * ibm,sdram-4xx-ddr2 ECC errors. - */ -static void -ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status) -{ - const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - const dcr_host_t *dcr_host = &pdata->dcr_host; - - mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK); - mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK); - mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK); - mtsdram(dcr_host, SDRAM_BEARL, 0); - mtsdram(dcr_host, SDRAM_BEARH, 0); -} - -/** - * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE) - * @mci: A pointer to the EDAC memory controller instance - * associated with the correctable error being handled and reported. - * @status: A pointer to the ECC status structure associated with - * the correctable error being handled and reported. - * - * This routine handles an ibm,sdram-4xx-ddr2 controller ECC - * correctable error. Per the aforementioned discussion, there's not - * enough status available to use the full EDAC correctable error - * interface, so we just pass driver-unique message to the "no info" - * interface. - */ -static void -ppc4xx_edac_handle_ce(struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status) -{ - int row; - char message[PPC4XX_EDAC_MESSAGE_SIZE]; - - ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); - - for (row = 0; row < mci->nr_csrows; row++) - if (ppc4xx_edac_check_bank_error(status, row)) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, - 0, 0, 0, - row, 0, -1, - message, ""); -} - -/** - * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE) - * @mci: A pointer to the EDAC memory controller instance - * associated with the uncorrectable error being handled and - * reported. - * @status: A pointer to the ECC status structure associated with - * the uncorrectable error being handled and reported. - * - * This routine handles an ibm,sdram-4xx-ddr2 controller ECC - * uncorrectable error. - */ -static void -ppc4xx_edac_handle_ue(struct mem_ctl_info *mci, - const struct ppc4xx_ecc_status *status) -{ - const u64 bear = ((u64)status->bearh << 32 | status->bearl); - const unsigned long page = bear >> PAGE_SHIFT; - const unsigned long offset = bear & ~PAGE_MASK; - int row; - char message[PPC4XX_EDAC_MESSAGE_SIZE]; - - ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); - - for (row = 0; row < mci->nr_csrows; row++) - if (ppc4xx_edac_check_bank_error(status, row)) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, - page, offset, 0, - row, 0, -1, - message, ""); -} - -/** - * ppc4xx_edac_check - check controller for ECC errors - * @mci: A pointer to the EDAC memory controller instance - * associated with the ibm,sdram-4xx-ddr2 controller being - * checked. - * - * This routine is used to check and post ECC errors and is called by - * both the EDAC polling thread and this driver's CE and UE interrupt - * handler. - */ -static void -ppc4xx_edac_check(struct mem_ctl_info *mci) -{ -#ifdef DEBUG - static unsigned int count; -#endif - struct ppc4xx_ecc_status status; - - ppc4xx_ecc_get_status(mci, &status); - -#ifdef DEBUG - if (count++ % 30 == 0) - ppc4xx_ecc_dump_status(mci, &status); -#endif - - if (status.ecces & SDRAM_ECCES_UE) - ppc4xx_edac_handle_ue(mci, &status); - - if (status.ecces & SDRAM_ECCES_CE) - ppc4xx_edac_handle_ce(mci, &status); - - ppc4xx_ecc_clear_status(mci, &status); -} - -/** - * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine - * @irq: The virtual interrupt number being serviced. - * @dev_id: A pointer to the EDAC memory controller instance - * associated with the interrupt being handled. - * - * This routine implements the interrupt handler for both correctable - * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2 - * controller. It simply calls through to the same routine used during - * polling to check, report and clear the ECC status. - * - * Unconditionally returns IRQ_HANDLED. - */ -static irqreturn_t -ppc4xx_edac_isr(int irq, void *dev_id) -{ - struct mem_ctl_info *mci = dev_id; - - ppc4xx_edac_check(mci); - - return IRQ_HANDLED; -} - -/** - * ppc4xx_edac_get_dtype - return the controller memory width - * @mcopt1: The 32-bit Memory Controller Option 1 register value - * currently set for the controller, from which the width - * is derived. - * - * This routine returns the EDAC device type width appropriate for the - * current controller configuration. - * - * TODO: This needs to be conditioned dynamically through feature - * flags or some such when other controller variants are supported as - * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the - * 16- and 64-bit field definition/value/enumeration (b1) overloaded - * among them. - * - * Returns a device type width enumeration. - */ -static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1) -{ - switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) { - case SDRAM_MCOPT1_WDTH_16: - return DEV_X2; - case SDRAM_MCOPT1_WDTH_32: - return DEV_X4; - default: - return DEV_UNKNOWN; - } -} - -/** - * ppc4xx_edac_get_mtype - return controller memory type - * @mcopt1: The 32-bit Memory Controller Option 1 register value - * currently set for the controller, from which the memory type - * is derived. - * - * This routine returns the EDAC memory type appropriate for the - * current controller configuration. - * - * Returns a memory type enumeration. - */ -static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1) -{ - bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN); - - switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) { - case SDRAM_MCOPT1_DDR2_TYPE: - return rden ? MEM_RDDR2 : MEM_DDR2; - case SDRAM_MCOPT1_DDR1_TYPE: - return rden ? MEM_RDDR : MEM_DDR; - default: - return MEM_UNKNOWN; - } -} - -/** - * ppc4xx_edac_init_csrows - initialize driver instance rows - * @mci: A pointer to the EDAC memory controller instance - * associated with the ibm,sdram-4xx-ddr2 controller for which - * the csrows (i.e. banks/ranks) are being initialized. - * @mcopt1: The 32-bit Memory Controller Option 1 register value - * currently set for the controller, from which bank width - * and memory typ information is derived. - * - * This routine initializes the virtual "chip select rows" associated - * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2 - * controller bank/rank is mapped to a row. - * - * Returns 0 if OK; otherwise, -EINVAL if the memory bank size - * configuration cannot be determined. - */ -static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) -{ - const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - int status = 0; - enum mem_type mtype; - enum dev_type dtype; - enum edac_type edac_mode; - int row, j; - u32 mbxcf, size, nr_pages; - - /* Establish the memory type and width */ - - mtype = ppc4xx_edac_get_mtype(mcopt1); - dtype = ppc4xx_edac_get_dtype(mcopt1); - - /* Establish EDAC mode */ - - if (mci->edac_cap & EDAC_FLAG_SECDED) - edac_mode = EDAC_SECDED; - else if (mci->edac_cap & EDAC_FLAG_EC) - edac_mode = EDAC_EC; - else - edac_mode = EDAC_NONE; - - /* - * Initialize each chip select row structure which correspond - * 1:1 with a controller bank/rank. - */ - - for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = mci->csrows[row]; - - /* - * Get the configuration settings for this - * row/bank/rank and skip disabled banks. - */ - - mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row)); - - if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE) - continue; - - /* Map the bank configuration size setting to pages. */ - - size = mbxcf & SDRAM_MBCF_SZ_MASK; - - switch (size) { - case SDRAM_MBCF_SZ_4MB: - case SDRAM_MBCF_SZ_8MB: - case SDRAM_MBCF_SZ_16MB: - case SDRAM_MBCF_SZ_32MB: - case SDRAM_MBCF_SZ_64MB: - case SDRAM_MBCF_SZ_128MB: - case SDRAM_MBCF_SZ_256MB: - case SDRAM_MBCF_SZ_512MB: - case SDRAM_MBCF_SZ_1GB: - case SDRAM_MBCF_SZ_2GB: - case SDRAM_MBCF_SZ_4GB: - case SDRAM_MBCF_SZ_8GB: - nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); - break; - default: - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Unrecognized memory bank %d " - "size 0x%08x\n", - row, SDRAM_MBCF_SZ_DECODE(size)); - status = -EINVAL; - goto done; - } - - /* - * It's unclear exactly what grain should be set to - * here. The SDRAM_ECCES register allows resolution of - * an error down to a nibble which would potentially - * argue for a grain of '1' byte, even though we only - * know the associated address for uncorrectable - * errors. This value is not used at present for - * anything other than error reporting so getting it - * wrong should be of little consequence. Other - * possible values would be the PLB width (16), the - * page size (PAGE_SIZE) or the memory width (2 or 4). - */ - for (j = 0; j < csi->nr_channels; j++) { - struct dimm_info *dimm = csi->channels[j]->dimm; - - dimm->nr_pages = nr_pages / csi->nr_channels; - dimm->grain = 1; - - dimm->mtype = mtype; - dimm->dtype = dtype; - - dimm->edac_mode = edac_mode; - } - } - - done: - return status; -} - -/** - * ppc4xx_edac_mc_init - initialize driver instance - * @mci: A pointer to the EDAC memory controller instance being - * initialized. - * @op: A pointer to the OpenFirmware device tree node associated - * with the controller this EDAC instance is bound to. - * @dcr_host: A pointer to the DCR data containing the DCR mapping - * for this controller instance. - * @mcopt1: The 32-bit Memory Controller Option 1 register value - * currently set for the controller, from which ECC capabilities - * and scrub mode are derived. - * - * This routine performs initialization of the EDAC memory controller - * instance and related driver-private data associated with the - * ibm,sdram-4xx-ddr2 memory controller the instance is bound to. - * - * Returns 0 if OK; otherwise, < 0 on error. - */ -static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci, - struct platform_device *op, - const dcr_host_t *dcr_host, u32 mcopt1) -{ - int status = 0; - const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); - struct ppc4xx_edac_pdata *pdata = NULL; - const struct device_node *np = op->dev.of_node; - - if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) - return -EINVAL; - - /* Initial driver pointers and private data */ - - mci->pdev = &op->dev; - - dev_set_drvdata(mci->pdev, mci); - - pdata = mci->pvt_info; - - pdata->dcr_host = *dcr_host; - - /* Initialize controller capabilities and configuration */ - - mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR | - MEM_FLAG_DDR2 | MEM_FLAG_RDDR2); - - mci->edac_ctl_cap = (EDAC_FLAG_NONE | - EDAC_FLAG_EC | - EDAC_FLAG_SECDED); - - mci->scrub_cap = SCRUB_NONE; - mci->scrub_mode = SCRUB_NONE; - - /* - * Update the actual capabilites based on the MCOPT1[MCHK] - * settings. Scrubbing is only useful if reporting is enabled. - */ - - switch (memcheck) { - case SDRAM_MCOPT1_MCHK_CHK: - mci->edac_cap = EDAC_FLAG_EC; - break; - case SDRAM_MCOPT1_MCHK_CHK_REP: - mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED); - mci->scrub_mode = SCRUB_SW_SRC; - break; - default: - mci->edac_cap = EDAC_FLAG_NONE; - break; - } - - /* Initialize strings */ - - mci->mod_name = PPC4XX_EDAC_MODULE_NAME; - mci->ctl_name = ppc4xx_edac_match->compatible; - mci->dev_name = np->full_name; - - /* Initialize callbacks */ - - mci->edac_check = ppc4xx_edac_check; - mci->ctl_page_to_phys = NULL; - - /* Initialize chip select rows */ - - status = ppc4xx_edac_init_csrows(mci, mcopt1); - - if (status) - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Failed to initialize rows!\n"); - - return status; -} - -/** - * ppc4xx_edac_register_irq - setup and register controller interrupts - * @op: A pointer to the OpenFirmware device tree node associated - * with the controller this EDAC instance is bound to. - * @mci: A pointer to the EDAC memory controller instance - * associated with the ibm,sdram-4xx-ddr2 controller for which - * interrupts are being registered. - * - * This routine parses the correctable (CE) and uncorrectable error (UE) - * interrupts from the device tree node and maps and assigns them to - * the associated EDAC memory controller instance. - * - * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be - * mapped and assigned. - */ -static int ppc4xx_edac_register_irq(struct platform_device *op, - struct mem_ctl_info *mci) -{ - int status = 0; - int ded_irq, sec_irq; - struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - struct device_node *np = op->dev.of_node; - - ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX); - sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX); - - if (!ded_irq || !sec_irq) { - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Unable to map interrupts.\n"); - status = -ENODEV; - goto fail; - } - - status = request_irq(ded_irq, - ppc4xx_edac_isr, - 0, - "[EDAC] MC ECCDED", - mci); - - if (status < 0) { - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Unable to request irq %d for ECC DED", - ded_irq); - status = -ENODEV; - goto fail1; - } - - status = request_irq(sec_irq, - ppc4xx_edac_isr, - 0, - "[EDAC] MC ECCSEC", - mci); - - if (status < 0) { - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Unable to request irq %d for ECC SEC", - sec_irq); - status = -ENODEV; - goto fail2; - } - - ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq); - ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq); - - pdata->irqs.ded = ded_irq; - pdata->irqs.sec = sec_irq; - - return 0; - - fail2: - free_irq(sec_irq, mci); - - fail1: - free_irq(ded_irq, mci); - - fail: - return status; -} - -/** - * ppc4xx_edac_map_dcrs - locate and map controller registers - * @np: A pointer to the device tree node containing the DCR - * resources to map. - * @dcr_host: A pointer to the DCR data to populate with the - * DCR mapping. - * - * This routine attempts to locate in the device tree and map the DCR - * register resources associated with the controller's indirect DCR - * address and data windows. - * - * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on - * error. - */ -static int ppc4xx_edac_map_dcrs(const struct device_node *np, - dcr_host_t *dcr_host) -{ - unsigned int dcr_base, dcr_len; - - if (np == NULL || dcr_host == NULL) - return -EINVAL; - - /* Get the DCR resource extent and sanity check the values. */ - - dcr_base = dcr_resource_start(np, 0); - dcr_len = dcr_resource_len(np, 0); - - if (dcr_base == 0 || dcr_len == 0) { - ppc4xx_edac_printk(KERN_ERR, - "Failed to obtain DCR property.\n"); - return -ENODEV; - } - - if (dcr_len != SDRAM_DCR_RESOURCE_LEN) { - ppc4xx_edac_printk(KERN_ERR, - "Unexpected DCR length %d, expected %d.\n", - dcr_len, SDRAM_DCR_RESOURCE_LEN); - return -ENODEV; - } - - /* Attempt to map the DCR extent. */ - - *dcr_host = dcr_map(np, dcr_base, dcr_len); - - if (!DCR_MAP_OK(*dcr_host)) { - ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n"); - return -ENODEV; - } - - return 0; -} - -/** - * ppc4xx_edac_probe - check controller and bind driver - * @op: A pointer to the OpenFirmware device tree node associated - * with the controller being probed for driver binding. - * - * This routine probes a specific ibm,sdram-4xx-ddr2 controller - * instance for binding with the driver. - * - * Returns 0 if the controller instance was successfully bound to the - * driver; otherwise, < 0 on error. - */ -static int ppc4xx_edac_probe(struct platform_device *op) -{ - int status = 0; - u32 mcopt1, memcheck; - dcr_host_t dcr_host; - const struct device_node *np = op->dev.of_node; - struct mem_ctl_info *mci = NULL; - struct edac_mc_layer layers[2]; - static int ppc4xx_edac_instance; - - /* - * At this point, we only support the controller realized on - * the AMCC PPC 405EX[r]. Reject anything else. - */ - - if (!of_device_is_compatible(np, "ibm,sdram-405ex") && - !of_device_is_compatible(np, "ibm,sdram-405exr")) { - ppc4xx_edac_printk(KERN_NOTICE, - "Only the PPC405EX[r] is supported.\n"); - return -ENODEV; - } - - /* - * Next, get the DCR property and attempt to map it so that we - * can probe the controller. - */ - - status = ppc4xx_edac_map_dcrs(np, &dcr_host); - - if (status) - return status; - - /* - * First determine whether ECC is enabled at all. If not, - * there is no useful checking or monitoring that can be done - * for this controller. - */ - - mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1); - memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); - - if (memcheck == SDRAM_MCOPT1_MCHK_NON) { - ppc4xx_edac_printk(KERN_INFO, "%pOF: No ECC memory detected or " - "ECC is disabled.\n", np); - status = -ENODEV; - goto done; - } - - /* - * At this point, we know ECC is enabled, allocate an EDAC - * controller instance and perform the appropriate - * initialization. - */ - layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = ppc4xx_edac_nr_csrows; - layers[0].is_virt_csrow = true; - layers[1].type = EDAC_MC_LAYER_CHANNEL; - layers[1].size = ppc4xx_edac_nr_chans; - layers[1].is_virt_csrow = false; - mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers, - sizeof(struct ppc4xx_edac_pdata)); - if (mci == NULL) { - ppc4xx_edac_printk(KERN_ERR, "%pOF: " - "Failed to allocate EDAC MC instance!\n", - np); - status = -ENOMEM; - goto done; - } - - status = ppc4xx_edac_mc_init(mci, op, &dcr_host, mcopt1); - - if (status) { - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Failed to initialize instance!\n"); - goto fail; - } - - /* - * We have a valid, initialized EDAC instance bound to the - * controller. Attempt to register it with the EDAC subsystem - * and, if necessary, register interrupts. - */ - - if (edac_mc_add_mc(mci)) { - ppc4xx_edac_mc_printk(KERN_ERR, mci, - "Failed to add instance!\n"); - status = -ENODEV; - goto fail; - } - - if (edac_op_state == EDAC_OPSTATE_INT) { - status = ppc4xx_edac_register_irq(op, mci); - - if (status) - goto fail1; - } - - ppc4xx_edac_instance++; - - return 0; - - fail1: - edac_mc_del_mc(mci->pdev); - - fail: - edac_mc_free(mci); - - done: - return status; -} - -/** - * ppc4xx_edac_remove - unbind driver from controller - * @op: A pointer to the OpenFirmware device tree node associated - * with the controller this EDAC instance is to be unbound/removed - * from. - * - * This routine unbinds the EDAC memory controller instance associated - * with the specified ibm,sdram-4xx-ddr2 controller described by the - * OpenFirmware device tree node passed as a parameter. - * - * Unconditionally returns 0. - */ -static void ppc4xx_edac_remove(struct platform_device *op) -{ - struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); - struct ppc4xx_edac_pdata *pdata = mci->pvt_info; - - if (edac_op_state == EDAC_OPSTATE_INT) { - free_irq(pdata->irqs.sec, mci); - free_irq(pdata->irqs.ded, mci); - } - - dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); - - edac_mc_del_mc(mci->pdev); - edac_mc_free(mci); -} - -/** - * ppc4xx_edac_opstate_init - initialize EDAC reporting method - * - * This routine ensures that the EDAC memory controller reporting - * method is mapped to a sane value as the EDAC core defines the value - * to EDAC_OPSTATE_INVAL by default. We don't call the global - * opstate_init as that defaults to polling and we want interrupt as - * the default. - */ -static inline void __init -ppc4xx_edac_opstate_init(void) -{ - switch (edac_op_state) { - case EDAC_OPSTATE_POLL: - case EDAC_OPSTATE_INT: - break; - default: - edac_op_state = EDAC_OPSTATE_INT; - break; - } - - ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n", - ((edac_op_state == EDAC_OPSTATE_POLL) ? - EDAC_OPSTATE_POLL_STR : - ((edac_op_state == EDAC_OPSTATE_INT) ? - EDAC_OPSTATE_INT_STR : - EDAC_OPSTATE_UNKNOWN_STR))); -} - -static struct platform_driver ppc4xx_edac_driver = { - .probe = ppc4xx_edac_probe, - .remove_new = ppc4xx_edac_remove, - .driver = { - .name = PPC4XX_EDAC_MODULE_NAME, - .of_match_table = ppc4xx_edac_match, - }, -}; - -/** - * ppc4xx_edac_init - driver/module insertion entry point - * - * This routine is the driver/module insertion entry point. It - * initializes the EDAC memory controller reporting state and - * registers the driver as an OpenFirmware device tree platform - * driver. - */ -static int __init -ppc4xx_edac_init(void) -{ - ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n"); - - ppc4xx_edac_opstate_init(); - - return platform_driver_register(&ppc4xx_edac_driver); -} - -/** - * ppc4xx_edac_exit - driver/module removal entry point - * - * This routine is the driver/module removal entry point. It - * unregisters the driver as an OpenFirmware device tree platform - * driver. - */ -static void __exit -ppc4xx_edac_exit(void) -{ - platform_driver_unregister(&ppc4xx_edac_driver); -} - -module_init(ppc4xx_edac_init); -module_exit(ppc4xx_edac_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Grant Erickson "); -MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller"); -module_param(edac_op_state, int, 0444); -MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: " - "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR); diff --git a/drivers/edac/ppc4xx_edac.h b/drivers/edac/ppc4xx_edac.h deleted file mode 100644 index b38459aa58ee05..00000000000000 --- a/drivers/edac/ppc4xx_edac.h +++ /dev/null @@ -1,167 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2008 Nuovation System Designs, LLC - * Grant Erickson - * - * This file defines processor mnemonics for accessing and managing - * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP, - * 440SPe, 460EX, 460GT and 460SX. - */ - -#ifndef __PPC4XX_EDAC_H -#define __PPC4XX_EDAC_H - -#include - -/* - * Macro for generating register field mnemonics - */ -#define PPC_REG_BITS 32 -#define PPC_REG_VAL(bit, val) ((val) << ((PPC_REG_BITS - 1) - (bit))) -#define PPC_REG_DECODE(bit, val) ((val) >> ((PPC_REG_BITS - 1) - (bit))) - -/* - * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those - * relevant to ECC) - */ -#define SDRAM_BESR 0x00 /* Error status (read/clear) */ -#define SDRAM_BESRT 0x01 /* Error statuss (test/set) */ -#define SDRAM_BEARL 0x02 /* Error address low */ -#define SDRAM_BEARH 0x03 /* Error address high */ -#define SDRAM_WMIRQ 0x06 /* Write master (read/clear) */ -#define SDRAM_WMIRQT 0x07 /* Write master (test/set) */ -#define SDRAM_MCOPT1 0x20 /* Controller options 1 */ -#define SDRAM_MBXCF_BASE 0x40 /* Bank n configuration base */ -#define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n))) -#define SDRAM_MB0CF SDRAM_MBXCF(0) -#define SDRAM_MB1CF SDRAM_MBXCF(1) -#define SDRAM_MB2CF SDRAM_MBXCF(2) -#define SDRAM_MB3CF SDRAM_MBXCF(3) -#define SDRAM_ECCCR 0x98 /* ECC error status */ -#define SDRAM_ECCES SDRAM_ECCCR - -/* - * PLB Master IDs - */ -#define SDRAM_PLB_M0ID_FIRST 0 -#define SDRAM_PLB_M0ID_ICU SDRAM_PLB_M0ID_FIRST -#define SDRAM_PLB_M0ID_PCIE0 1 -#define SDRAM_PLB_M0ID_PCIE1 2 -#define SDRAM_PLB_M0ID_DMA 3 -#define SDRAM_PLB_M0ID_DCU 4 -#define SDRAM_PLB_M0ID_OPB 5 -#define SDRAM_PLB_M0ID_MAL 6 -#define SDRAM_PLB_M0ID_SEC 7 -#define SDRAM_PLB_M0ID_AHB 8 -#define SDRAM_PLB_M0ID_LAST SDRAM_PLB_M0ID_AHB -#define SDRAM_PLB_M0ID_COUNT (SDRAM_PLB_M0ID_LAST - \ - SDRAM_PLB_M0ID_FIRST + 1) - -/* - * Memory Controller Bus Error Status Register - */ -#define SDRAM_BESR_MASK PPC_REG_VAL(7, 0xFF) -#define SDRAM_BESR_M0ID_MASK PPC_REG_VAL(3, 0xF) -#define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n) -#define SDRAM_BESR_M0ID_ICU PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU) -#define SDRAM_BESR_M0ID_PCIE0 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0) -#define SDRAM_BESR_M0ID_PCIE1 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1) -#define SDRAM_BESR_M0ID_DMA PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA) -#define SDRAM_BESR_M0ID_DCU PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU) -#define SDRAM_BESR_M0ID_OPB PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB) -#define SDRAM_BESR_M0ID_MAL PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL) -#define SDRAM_BESR_M0ID_SEC PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC) -#define SDRAM_BESR_M0ID_AHB PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB) -#define SDRAM_BESR_M0ET_MASK PPC_REG_VAL(6, 0x7) -#define SDRAM_BESR_M0ET_NONE PPC_REG_VAL(6, 0) -#define SDRAM_BESR_M0ET_ECC PPC_REG_VAL(6, 1) -#define SDRAM_BESR_M0RW_MASK PPC_REG_VAL(7, 1) -#define SDRAM_BESR_M0RW_WRITE PPC_REG_VAL(7, 0) -#define SDRAM_BESR_M0RW_READ PPC_REG_VAL(7, 1) - -/* - * Memory Controller PLB Write Master Interrupt Register - */ -#define SDRAM_WMIRQ_MASK PPC_REG_VAL(8, 0x1FF) -#define SDRAM_WMIRQ_ENCODE(id) PPC_REG_VAL((id % \ - SDRAM_PLB_M0ID_COUNT), 1) -#define SDRAM_WMIRQ_ICU PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1) -#define SDRAM_WMIRQ_PCIE0 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1) -#define SDRAM_WMIRQ_PCIE1 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1) -#define SDRAM_WMIRQ_DMA PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1) -#define SDRAM_WMIRQ_DCU PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1) -#define SDRAM_WMIRQ_OPB PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1) -#define SDRAM_WMIRQ_MAL PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1) -#define SDRAM_WMIRQ_SEC PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1) -#define SDRAM_WMIRQ_AHB PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1) - -/* - * Memory Controller Options 1 Register - */ -#define SDRAM_MCOPT1_MCHK_MASK PPC_REG_VAL(3, 0x3) /* ECC mask */ -#define SDRAM_MCOPT1_MCHK_NON PPC_REG_VAL(3, 0x0) /* No ECC gen */ -#define SDRAM_MCOPT1_MCHK_GEN PPC_REG_VAL(3, 0x2) /* ECC gen */ -#define SDRAM_MCOPT1_MCHK_CHK PPC_REG_VAL(3, 0x1) /* ECC gen and chk */ -#define SDRAM_MCOPT1_MCHK_CHK_REP PPC_REG_VAL(3, 0x3) /* ECC gen/chk/rpt */ -#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3) -#define SDRAM_MCOPT1_RDEN_MASK PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM mask */ -#define SDRAM_MCOPT1_RDEN PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM enbl */ -#define SDRAM_MCOPT1_WDTH_MASK PPC_REG_VAL(7, 0x1) /* Width mask */ -#define SDRAM_MCOPT1_WDTH_32 PPC_REG_VAL(7, 0x0) /* 32 bits */ -#define SDRAM_MCOPT1_WDTH_16 PPC_REG_VAL(7, 0x1) /* 16 bits */ -#define SDRAM_MCOPT1_DDR_TYPE_MASK PPC_REG_VAL(11, 0x1) /* DDR type mask */ -#define SDRAM_MCOPT1_DDR1_TYPE PPC_REG_VAL(11, 0x0) /* DDR1 type */ -#define SDRAM_MCOPT1_DDR2_TYPE PPC_REG_VAL(11, 0x1) /* DDR2 type */ - -/* - * Memory Bank 0 - n Configuration Register - */ -#define SDRAM_MBCF_BA_MASK PPC_REG_VAL(12, 0x1FFF) -#define SDRAM_MBCF_SZ_MASK PPC_REG_VAL(19, 0xF) -#define SDRAM_MBCF_SZ_DECODE(mbxcf) PPC_REG_DECODE(19, mbxcf) -#define SDRAM_MBCF_SZ_4MB PPC_REG_VAL(19, 0x0) -#define SDRAM_MBCF_SZ_8MB PPC_REG_VAL(19, 0x1) -#define SDRAM_MBCF_SZ_16MB PPC_REG_VAL(19, 0x2) -#define SDRAM_MBCF_SZ_32MB PPC_REG_VAL(19, 0x3) -#define SDRAM_MBCF_SZ_64MB PPC_REG_VAL(19, 0x4) -#define SDRAM_MBCF_SZ_128MB PPC_REG_VAL(19, 0x5) -#define SDRAM_MBCF_SZ_256MB PPC_REG_VAL(19, 0x6) -#define SDRAM_MBCF_SZ_512MB PPC_REG_VAL(19, 0x7) -#define SDRAM_MBCF_SZ_1GB PPC_REG_VAL(19, 0x8) -#define SDRAM_MBCF_SZ_2GB PPC_REG_VAL(19, 0x9) -#define SDRAM_MBCF_SZ_4GB PPC_REG_VAL(19, 0xA) -#define SDRAM_MBCF_SZ_8GB PPC_REG_VAL(19, 0xB) -#define SDRAM_MBCF_AM_MASK PPC_REG_VAL(23, 0xF) -#define SDRAM_MBCF_AM_MODE0 PPC_REG_VAL(23, 0x0) -#define SDRAM_MBCF_AM_MODE1 PPC_REG_VAL(23, 0x1) -#define SDRAM_MBCF_AM_MODE2 PPC_REG_VAL(23, 0x2) -#define SDRAM_MBCF_AM_MODE3 PPC_REG_VAL(23, 0x3) -#define SDRAM_MBCF_AM_MODE4 PPC_REG_VAL(23, 0x4) -#define SDRAM_MBCF_AM_MODE5 PPC_REG_VAL(23, 0x5) -#define SDRAM_MBCF_AM_MODE6 PPC_REG_VAL(23, 0x6) -#define SDRAM_MBCF_AM_MODE7 PPC_REG_VAL(23, 0x7) -#define SDRAM_MBCF_AM_MODE8 PPC_REG_VAL(23, 0x8) -#define SDRAM_MBCF_AM_MODE9 PPC_REG_VAL(23, 0x9) -#define SDRAM_MBCF_BE_MASK PPC_REG_VAL(31, 0x1) -#define SDRAM_MBCF_BE_DISABLE PPC_REG_VAL(31, 0x0) -#define SDRAM_MBCF_BE_ENABLE PPC_REG_VAL(31, 0x1) - -/* - * ECC Error Status - */ -#define SDRAM_ECCES_MASK PPC_REG_VAL(21, 0x3FFFFF) -#define SDRAM_ECCES_BNCE_MASK PPC_REG_VAL(15, 0xFFFF) -#define SDRAM_ECCES_BNCE_ENCODE(lane) PPC_REG_VAL(((lane) & 0xF), 1) -#define SDRAM_ECCES_CKBER_MASK PPC_REG_VAL(17, 0x3) -#define SDRAM_ECCES_CKBER_NONE PPC_REG_VAL(17, 0) -#define SDRAM_ECCES_CKBER_16_ECC_0_3 PPC_REG_VAL(17, 2) -#define SDRAM_ECCES_CKBER_32_ECC_0_3 PPC_REG_VAL(17, 1) -#define SDRAM_ECCES_CKBER_32_ECC_4_8 PPC_REG_VAL(17, 2) -#define SDRAM_ECCES_CKBER_32_ECC_0_8 PPC_REG_VAL(17, 3) -#define SDRAM_ECCES_CE PPC_REG_VAL(18, 1) -#define SDRAM_ECCES_UE PPC_REG_VAL(19, 1) -#define SDRAM_ECCES_BKNER_MASK PPC_REG_VAL(21, 0x3) -#define SDRAM_ECCES_BK0ER PPC_REG_VAL(20, 1) -#define SDRAM_ECCES_BK1ER PPC_REG_VAL(21, 1) - -#endif /* __PPC4XX_EDAC_H */ diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index e5c05a8769471e..d5f12219598a8d 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -29,6 +29,8 @@ /* Static vars */ static LIST_HEAD(sbridge_edac_list); +static char sb_msg[256]; +static char sb_msg_full[512]; /* * Alter this version for the module when modifications are made @@ -3079,7 +3081,6 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; enum hw_event_mc_err_type tp_event; - char *optype, msg[256], msg_full[512]; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -3095,10 +3096,10 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, * aligned address reported by patrol scrubber. */ u32 lsb = GET_BITFIELD(m->misc, 0, 5); + char *optype, *area_type = "DRAM"; long channel_mask, first_channel; u8 rank = 0xff, socket, ha; int rc, dimm; - char *area_type = "DRAM"; if (pvt->info.type != SANDY_BRIDGE) recoverable = true; @@ -3168,32 +3169,32 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, channel = knl_channel_remap(m->bank == 16, channel); channel_mask = 1 << channel; - snprintf(msg, sizeof(msg), - "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) - ? " recoverable" : " ", - mscod, errcode, channel, A + channel); + snprintf(sb_msg, sizeof(sb_msg), + "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) + ? " recoverable" : " ", + mscod, errcode, channel, A + channel); edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, channel, 0, -1, - optype, msg); + optype, sb_msg); } return; } else if (lsb < 12) { rc = get_memory_error_data(mci, m->addr, &socket, &ha, &channel_mask, &rank, - &area_type, msg); + &area_type, sb_msg); } else { rc = get_memory_error_data_from_mce(mci, m, &socket, &ha, - &channel_mask, msg); + &channel_mask, sb_msg); } if (rc < 0) goto err_parsing; new_mci = get_mci_for_node_id(socket, ha); if (!new_mci) { - strcpy(msg, "Error: socket got corrupted!"); + strscpy(sb_msg, "Error: socket got corrupted!"); goto err_parsing; } mci = new_mci; @@ -3218,7 +3219,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, */ if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg) channel = first_channel; - snprintf(msg_full, sizeof(msg_full), + snprintf(sb_msg_full, sizeof(sb_msg_full), "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", @@ -3226,9 +3227,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, mscod, errcode, socket, ha, channel_mask, - rank, msg); + rank, sb_msg); - edac_dbg(0, "%s\n", msg_full); + edac_dbg(0, "%s\n", sb_msg_full); /* FIXME: need support for channel mask */ @@ -3239,12 +3240,12 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, channel, dimm, -1, - optype, msg_full); + optype, sb_msg_full); return; err_parsing: edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, - msg, ""); + sb_msg, ""); } diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c index af3fa807acdba9..14cfd394b46964 100644 --- a/drivers/edac/skx_base.c +++ b/drivers/edac/skx_base.c @@ -587,54 +587,6 @@ static struct notifier_block skx_mce_dec = { .priority = MCE_PRIO_EDAC, }; -#ifdef CONFIG_EDAC_DEBUG -/* - * Debug feature. - * Exercise the address decode logic by writing an address to - * /sys/kernel/debug/edac/skx_test/addr. - */ -static struct dentry *skx_test; - -static int debugfs_u64_set(void *data, u64 val) -{ - struct mce m; - - pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); - - memset(&m, 0, sizeof(m)); - /* ADDRV + MemRd + Unknown channel */ - m.status = MCI_STATUS_ADDRV + 0x90; - /* One corrected error */ - m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); - m.addr = val; - skx_mce_check_error(NULL, 0, &m); - - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); - -static void setup_skx_debug(void) -{ - skx_test = edac_debugfs_create_dir("skx_test"); - if (!skx_test) - return; - - if (!edac_debugfs_create_file("addr", 0200, skx_test, - NULL, &fops_u64_wo)) { - debugfs_remove(skx_test); - skx_test = NULL; - } -} - -static void teardown_skx_debug(void) -{ - debugfs_remove_recursive(skx_test); -} -#else -static inline void setup_skx_debug(void) {} -static inline void teardown_skx_debug(void) {} -#endif /*CONFIG_EDAC_DEBUG*/ - /* * skx_init: * make sure we are running on the correct cpu model @@ -728,7 +680,7 @@ static int __init skx_init(void) /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - setup_skx_debug(); + skx_setup_debug("skx_test"); mce_register_decode_chain(&skx_mce_dec); @@ -742,7 +694,7 @@ static void __exit skx_exit(void) { edac_dbg(2, "\n"); mce_unregister_decode_chain(&skx_mce_dec); - teardown_skx_debug(); + skx_teardown_debug(); if (nvdimm_count) skx_adxl_put(); skx_remove(); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 8d18099fd528cf..85713646957b3e 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -363,7 +363,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, if (imc->hbm_mc) { banks = 32; mtype = MEM_HBM2; - } else if (cfg->support_ddr5 && (amap & 0x8)) { + } else if (cfg->support_ddr5) { banks = 32; mtype = MEM_DDR5; } else { @@ -739,6 +739,53 @@ void skx_remove(void) } EXPORT_SYMBOL_GPL(skx_remove); +#ifdef CONFIG_EDAC_DEBUG +/* + * Debug feature. + * Exercise the address decode logic by writing an address to + * /sys/kernel/debug/edac/{skx,i10nm}_test/addr. + */ +static struct dentry *skx_test; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct mce m; + + pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); + + memset(&m, 0, sizeof(m)); + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x90; + /* One corrected error */ + m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); + m.addr = val; + skx_mce_check_error(NULL, 0, &m); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +void skx_setup_debug(const char *name) +{ + skx_test = edac_debugfs_create_dir(name); + if (!skx_test) + return; + + if (!edac_debugfs_create_file("addr", 0200, skx_test, + NULL, &fops_u64_wo)) { + debugfs_remove(skx_test); + skx_test = NULL; + } +} +EXPORT_SYMBOL_GPL(skx_setup_debug); + +void skx_teardown_debug(void) +{ + debugfs_remove_recursive(skx_test); +} +EXPORT_SYMBOL_GPL(skx_teardown_debug); +#endif /*CONFIG_EDAC_DEBUG*/ + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Tony Luck"); MODULE_DESCRIPTION("MC Driver for Intel server processors"); diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index 473421ba7a18a5..f945c1bf5ca465 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -259,4 +259,12 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, void skx_remove(void); +#ifdef CONFIG_EDAC_DEBUG +void skx_setup_debug(const char *name); +void skx_teardown_debug(void); +#else +static inline void skx_setup_debug(const char *name) {} +static inline void skx_teardown_debug(void) {} +#endif + #endif /* _SKX_COMM_EDAC_H */ diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index ea7a9a342dd30b..d7416166fd8a42 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -337,6 +338,7 @@ struct synps_edac_priv { * @get_mtype: Get mtype. * @get_dtype: Get dtype. * @get_ecc_state: Get ECC state. + * @get_mem_info: Get EDAC memory info * @quirks: To differentiate IPs. */ struct synps_platform_data { @@ -344,6 +346,9 @@ struct synps_platform_data { enum mem_type (*get_mtype)(const void __iomem *base); enum dev_type (*get_dtype)(const void __iomem *base); bool (*get_ecc_state)(void __iomem *base); +#ifdef CONFIG_EDAC_DEBUG + u64 (*get_mem_info)(struct synps_edac_priv *priv); +#endif int quirks; }; @@ -402,6 +407,25 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) return 0; } +#ifdef CONFIG_EDAC_DEBUG +/** + * zynqmp_get_mem_info - Get the current memory info. + * @priv: DDR memory controller private instance data. + * + * Return: host interface address. + */ +static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv) +{ + u64 hif_addr = 0, linear_addr; + + linear_addr = priv->poison_addr; + if (linear_addr >= SZ_32G) + linear_addr = linear_addr - SZ_32G + SZ_2G; + hif_addr = linear_addr >> 3; + return hif_addr; +} +#endif + /** * zynqmp_get_error_info - Get the current ECC error info. * @priv: DDR memory controller private instance data. @@ -922,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = { .get_mtype = zynqmp_get_mtype, .get_dtype = zynqmp_get_dtype, .get_ecc_state = zynqmp_get_ecc_state, +#ifdef CONFIG_EDAC_DEBUG + .get_mem_info = zynqmp_get_mem_info, +#endif .quirks = (DDR_ECC_INTR_SUPPORT #ifdef CONFIG_EDAC_DEBUG | DDR_ECC_DATA_POISON_SUPPORT @@ -975,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match); static void ddr_poison_setup(struct synps_edac_priv *priv) { int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval; + const struct synps_platform_data *p_data; int index; ulong hif_addr = 0; - hif_addr = priv->poison_addr >> 3; + p_data = priv->p_data; + + if (p_data->get_mem_info) + hif_addr = p_data->get_mem_info(priv); + else + hif_addr = priv->poison_addr >> 3; for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) { if (priv->row_shift[index]) diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 3da94b38229235..a6f6d467aacff5 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -75,6 +75,17 @@ config EXTCON_INTEL_MRFLD Say Y here to enable extcon support for charger detection / control on the Intel Merrifield Basin Cove PMIC. +config EXTCON_LC824206XA + tristate "LC824206XA extcon Support" + depends on I2C + depends on POWER_SUPPLY + help + Say Y here to enable support for the ON Semiconductor LC824206XA + microUSB switch and accessory detector chip. The LC824206XA is a USB + port accessory detector and switch. The LC824206XA is fully controlled + using I2C and enables USB data, stereo and mono audio, video, + microphone and UART data to use a common connector port. + config EXTCON_MAX14577 tristate "Maxim MAX14577/77836 EXTCON Support" depends on MFD_MAX14577 diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index f779adb5e4c7c3..0d6d23faf7486d 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o +obj-$(CONFIG_EXTCON_LC824206XA) += extcon-lc824206xa.o obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index a703a8315634df..d3bcbe839c095f 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -108,7 +108,7 @@ struct axp288_extcon_info { }; static const struct x86_cpu_id cherry_trail_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, NULL), {} }; diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c index 733c470c31025c..93552dc3c895c6 100644 --- a/drivers/extcon/extcon-intel-cht-wc.c +++ b/drivers/extcon/extcon-intel-cht-wc.c @@ -461,14 +461,6 @@ static int cht_wc_extcon_psy_get_prop(struct power_supply *psy, return 0; } -static const enum power_supply_usb_type cht_wc_extcon_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_ACA, - POWER_SUPPLY_USB_TYPE_UNKNOWN, -}; - static const enum power_supply_property cht_wc_extcon_psy_props[] = { POWER_SUPPLY_PROP_USB_TYPE, POWER_SUPPLY_PROP_ONLINE, @@ -477,8 +469,11 @@ static const enum power_supply_property cht_wc_extcon_psy_props[] = { static const struct power_supply_desc cht_wc_extcon_psy_desc = { .name = "cht_wcove_pwrsrc", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = cht_wc_extcon_psy_usb_types, - .num_usb_types = ARRAY_SIZE(cht_wc_extcon_psy_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_ACA) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = cht_wc_extcon_psy_props, .num_properties = ARRAY_SIZE(cht_wc_extcon_psy_props), .get_property = cht_wc_extcon_psy_get_prop, diff --git a/drivers/extcon/extcon-lc824206xa.c b/drivers/extcon/extcon-lc824206xa.c new file mode 100644 index 00000000000000..56938748aea83b --- /dev/null +++ b/drivers/extcon/extcon-lc824206xa.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ON Semiconductor LC824206XA Micro USB Switch driver + * + * Copyright (c) 2024 Hans de Goede + * + * ON Semiconductor has an "Advance Information" datasheet available + * (ENA2222-D.PDF), but no full datasheet. So there is no documentation + * available for the registers. + * + * This driver is based on the register info from the extcon-fsa9285.c driver, + * from the Lollipop Android sources for the Lenovo Yoga Tablet 2 (Pro) + * 830 / 1050 / 1380 models. Note despite the name this is actually a driver + * for the LC824206XA not the FSA9285. The Android sources can be downloaded + * from Lenovo's support page for these tablets, filename: + * yoga_tab_2_osc_android_to_lollipop_201505.rar. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Register defines as mentioned above there is no datasheet with register + * info, so this may not be 100% accurate. + */ +#define REG00 0x00 +#define REG00_INIT_VALUE 0x01 + +#define REG_STATUS 0x01 +#define STATUS_OVP BIT(0) +#define STATUS_DATA_SHORT BIT(1) +#define STATUS_VBUS_PRESENT BIT(2) +#define STATUS_USB_ID GENMASK(7, 3) +#define STATUS_USB_ID_GND 0x80 +#define STATUS_USB_ID_ACA 0xf0 +#define STATUS_USB_ID_FLOAT 0xf8 + +/* + * This controls the DP/DM muxes + other switches, + * meaning of individual bits is unknown. + */ +#define REG_SWITCH_CONTROL 0x02 +#define SWITCH_STEREO_MIC 0xc8 +#define SWITCH_USB_HOST 0xec +#define SWITCH_DISCONNECTED 0xf8 +#define SWITCH_USB_DEVICE 0xfc + +/* 5 bits? ADC 0x10 GND, 0x1a-0x1f ACA, 0x1f float */ +#define REG_ID_PIN_ADC_VALUE 0x03 + +/* Masks for all 3 interrupt registers */ +#define INTR_ID_PIN_CHANGE BIT(0) +#define INTR_VBUS_CHANGE BIT(1) +/* Both of these get set after a continuous mode ADC conversion */ +#define INTR_ID_PIN_ADC_INT1 BIT(2) +#define INTR_ID_PIN_ADC_INT2 BIT(3) +/* Charger type available in reg 0x09 */ +#define INTR_CHARGER_DET_DONE BIT(4) +#define INTR_OVP BIT(5) + +/* There are 7 interrupt sources, bit 6 use is unknown (OCP?) */ +#define INTR_ALL GENMASK(6, 0) + +/* Unmask interrupts this driver cares about */ +#define INTR_MASK \ + (INTR_ALL & ~(INTR_ID_PIN_CHANGE | INTR_VBUS_CHANGE | INTR_CHARGER_DET_DONE)) + +/* Active (event happened and not cleared yet) interrupts */ +#define REG_INTR_STATUS 0x04 + +/* + * Writing a 1 to a bit here clears it in INTR_STATUS. These bits do NOT + * auto-reset to 0, so these must be set to 0 manually after clearing. + */ +#define REG_INTR_CLEAR 0x05 + +/* Interrupts which bit is set to 1 here will not raise the HW IRQ */ +#define REG_INTR_MASK 0x06 + +/* ID pin ADC control, meaning of individual bits is unknown */ +#define REG_ID_PIN_ADC_CTRL 0x07 +#define ID_PIN_ADC_AUTO 0x40 +#define ID_PIN_ADC_CONTINUOUS 0x44 + +#define REG_CHARGER_DET 0x08 +#define CHARGER_DET_ON BIT(0) +#define CHARGER_DET_CDP_ON BIT(1) +#define CHARGER_DET_CDP_VAL BIT(2) + +#define REG_CHARGER_TYPE 0x09 +#define CHARGER_TYPE_UNKNOWN 0x00 +#define CHARGER_TYPE_DCP 0x01 +#define CHARGER_TYPE_SDP_OR_CDP 0x04 +#define CHARGER_TYPE_QC 0x06 + +#define REG10 0x10 +#define REG10_INIT_VALUE 0x00 + +struct lc824206xa_data { + struct work_struct work; + struct i2c_client *client; + struct extcon_dev *edev; + struct power_supply *psy; + struct regulator *vbus_boost; + unsigned int usb_type; + unsigned int cable; + unsigned int previous_cable; + u8 switch_control; + u8 previous_switch_control; + bool vbus_ok; + bool vbus_boost_enabled; + bool fastcharge_over_miclr; +}; + +static const unsigned int lc824206xa_cables[] = { + EXTCON_USB_HOST, + EXTCON_CHG_USB_SDP, + EXTCON_CHG_USB_CDP, + EXTCON_CHG_USB_DCP, + EXTCON_CHG_USB_ACA, + EXTCON_CHG_USB_FAST, + EXTCON_NONE, +}; + +/* read/write reg helpers to add error logging to smbus byte functions */ +static int lc824206xa_read_reg(struct lc824206xa_data *data, u8 reg) +{ + int ret; + + ret = i2c_smbus_read_byte_data(data->client, reg); + if (ret < 0) + dev_err(&data->client->dev, "Error %d reading reg 0x%02x\n", ret, reg); + + return ret; +} + +static int lc824206xa_write_reg(struct lc824206xa_data *data, u8 reg, u8 val) +{ + int ret; + + ret = i2c_smbus_write_byte_data(data->client, reg, val); + if (ret < 0) + dev_err(&data->client->dev, "Error %d writing reg 0x%02x\n", ret, reg); + + return ret; +} + +static int lc824206xa_get_id(struct lc824206xa_data *data) +{ + int ret; + + ret = lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_CONTINUOUS); + if (ret) + return ret; + + ret = lc824206xa_read_reg(data, REG_ID_PIN_ADC_VALUE); + + lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO); + + return ret; +} + +static void lc824206xa_set_vbus_boost(struct lc824206xa_data *data, bool enable) +{ + int ret; + + if (data->vbus_boost_enabled == enable) + return; + + if (enable) + ret = regulator_enable(data->vbus_boost); + else + ret = regulator_disable(data->vbus_boost); + + if (ret == 0) + data->vbus_boost_enabled = enable; + else + dev_err(&data->client->dev, "Error updating Vbus boost regulator: %d\n", ret); +} + +static void lc824206xa_charger_detect(struct lc824206xa_data *data) +{ + int charger_type, ret; + + charger_type = lc824206xa_read_reg(data, REG_CHARGER_TYPE); + if (charger_type < 0) + return; + + dev_dbg(&data->client->dev, "charger type 0x%02x\n", charger_type); + + switch (charger_type) { + case CHARGER_TYPE_UNKNOWN: + data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN; + /* Treat as SDP */ + data->cable = EXTCON_CHG_USB_SDP; + data->switch_control = SWITCH_USB_DEVICE; + break; + case CHARGER_TYPE_SDP_OR_CDP: + data->usb_type = POWER_SUPPLY_USB_TYPE_SDP; + data->cable = EXTCON_CHG_USB_SDP; + data->switch_control = SWITCH_USB_DEVICE; + + ret = lc824206xa_write_reg(data, REG_CHARGER_DET, + CHARGER_DET_CDP_ON | CHARGER_DET_ON); + if (ret < 0) + break; + + msleep(100); + ret = lc824206xa_read_reg(data, REG_CHARGER_DET); + if (ret >= 0 && (ret & CHARGER_DET_CDP_VAL)) { + data->usb_type = POWER_SUPPLY_USB_TYPE_CDP; + data->cable = EXTCON_CHG_USB_CDP; + } + + lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON); + break; + case CHARGER_TYPE_DCP: + data->usb_type = POWER_SUPPLY_USB_TYPE_DCP; + data->cable = EXTCON_CHG_USB_DCP; + if (data->fastcharge_over_miclr) + data->switch_control = SWITCH_STEREO_MIC; + else + data->switch_control = SWITCH_DISCONNECTED; + break; + case CHARGER_TYPE_QC: + data->usb_type = POWER_SUPPLY_USB_TYPE_DCP; + data->cable = EXTCON_CHG_USB_DCP; + data->switch_control = SWITCH_DISCONNECTED; + break; + default: + dev_warn(&data->client->dev, "Unknown charger type: 0x%02x\n", charger_type); + break; + } +} + +static void lc824206xa_work(struct work_struct *work) +{ + struct lc824206xa_data *data = container_of(work, struct lc824206xa_data, work); + bool vbus_boost_enable = false; + int status, id; + + status = lc824206xa_read_reg(data, REG_STATUS); + if (status < 0) + return; + + dev_dbg(&data->client->dev, "status 0x%02x\n", status); + + data->vbus_ok = (status & (STATUS_VBUS_PRESENT | STATUS_OVP)) == STATUS_VBUS_PRESENT; + + /* Read id pin ADC if necessary */ + switch (status & STATUS_USB_ID) { + case STATUS_USB_ID_GND: + case STATUS_USB_ID_FLOAT: + break; + default: + /* Happens when the connector is inserted slowly, log at dbg level */ + dev_dbg(&data->client->dev, "Unknown status 0x%02x\n", status); + fallthrough; + case STATUS_USB_ID_ACA: + id = lc824206xa_get_id(data); + dev_dbg(&data->client->dev, "RID 0x%02x\n", id); + switch (id) { + case 0x10: + status = STATUS_USB_ID_GND; + break; + case 0x18 ... 0x1e: + status = STATUS_USB_ID_ACA; + break; + case 0x1f: + status = STATUS_USB_ID_FLOAT; + break; + default: + dev_warn(&data->client->dev, "Unknown RID 0x%02x\n", id); + return; + } + } + + /* Check for out of spec OTG charging hubs, treat as ACA */ + if ((status & STATUS_USB_ID) == STATUS_USB_ID_GND && + data->vbus_ok && !data->vbus_boost_enabled) { + dev_info(&data->client->dev, "Out of spec USB host adapter with Vbus present, not enabling 5V output\n"); + status = STATUS_USB_ID_ACA; + } + + switch (status & STATUS_USB_ID) { + case STATUS_USB_ID_ACA: + data->usb_type = POWER_SUPPLY_USB_TYPE_ACA; + data->cable = EXTCON_CHG_USB_ACA; + data->switch_control = SWITCH_USB_HOST; + break; + case STATUS_USB_ID_GND: + data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN; + data->cable = EXTCON_USB_HOST; + data->switch_control = SWITCH_USB_HOST; + vbus_boost_enable = true; + break; + case STATUS_USB_ID_FLOAT: + /* When fast charging with Vbus > 5V, OVP will be set */ + if (data->fastcharge_over_miclr && + data->switch_control == SWITCH_STEREO_MIC && + (status & STATUS_OVP)) { + data->cable = EXTCON_CHG_USB_FAST; + break; + } + + if (data->vbus_ok) { + lc824206xa_charger_detect(data); + } else { + data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN; + data->cable = EXTCON_NONE; + data->switch_control = SWITCH_DISCONNECTED; + } + break; + } + + lc824206xa_set_vbus_boost(data, vbus_boost_enable); + + if (data->switch_control != data->previous_switch_control) { + lc824206xa_write_reg(data, REG_SWITCH_CONTROL, data->switch_control); + data->previous_switch_control = data->switch_control; + } + + if (data->cable != data->previous_cable) { + extcon_set_state_sync(data->edev, data->previous_cable, false); + extcon_set_state_sync(data->edev, data->cable, true); + data->previous_cable = data->cable; + } + + power_supply_changed(data->psy); +} + +static irqreturn_t lc824206xa_irq(int irq, void *_data) +{ + struct lc824206xa_data *data = _data; + int intr_status; + + intr_status = lc824206xa_read_reg(data, REG_INTR_STATUS); + if (intr_status < 0) + intr_status = INTR_ALL; /* Should never happen, clear all */ + + dev_dbg(&data->client->dev, "interrupt 0x%02x\n", intr_status); + + lc824206xa_write_reg(data, REG_INTR_CLEAR, intr_status); + lc824206xa_write_reg(data, REG_INTR_CLEAR, 0); + + schedule_work(&data->work); + return IRQ_HANDLED; +} + +/* + * Newer charger (power_supply) drivers expect the max input current to be + * provided by a parent power_supply device for the charger chip. + */ +static int lc824206xa_psy_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct lc824206xa_data *data = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = data->vbus_ok && !data->vbus_boost_enabled; + break; + case POWER_SUPPLY_PROP_USB_TYPE: + val->intval = data->usb_type; + break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + switch (data->usb_type) { + case POWER_SUPPLY_USB_TYPE_DCP: + case POWER_SUPPLY_USB_TYPE_ACA: + val->intval = 2000000; + break; + case POWER_SUPPLY_USB_TYPE_CDP: + val->intval = 1500000; + break; + default: + val->intval = 500000; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static const enum power_supply_property lc824206xa_psy_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_USB_TYPE, + POWER_SUPPLY_PROP_CURRENT_MAX, +}; + +static const struct power_supply_desc lc824206xa_psy_desc = { + .name = "lc824206xa-charger-detect", + .type = POWER_SUPPLY_TYPE_USB, + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_ACA) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), + .properties = lc824206xa_psy_props, + .num_properties = ARRAY_SIZE(lc824206xa_psy_props), + .get_property = lc824206xa_psy_get_prop, +}; + +static int lc824206xa_probe(struct i2c_client *client) +{ + struct power_supply_config psy_cfg = { }; + struct device *dev = &client->dev; + struct lc824206xa_data *data; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + INIT_WORK(&data->work, lc824206xa_work); + data->cable = EXTCON_NONE; + data->previous_cable = EXTCON_NONE; + data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN; + /* Some designs use a custom fast-charge protocol over the mic L/R inputs */ + data->fastcharge_over_miclr = + device_property_read_bool(dev, "onnn,enable-miclr-for-dcp"); + + data->vbus_boost = devm_regulator_get(dev, "vbus"); + if (IS_ERR(data->vbus_boost)) + return dev_err_probe(dev, PTR_ERR(data->vbus_boost), + "getting regulator\n"); + + /* Init */ + ret = lc824206xa_write_reg(data, REG00, REG00_INIT_VALUE); + ret |= lc824206xa_write_reg(data, REG10, REG10_INIT_VALUE); + msleep(100); + ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, INTR_ALL); + ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, 0); + ret |= lc824206xa_write_reg(data, REG_INTR_MASK, INTR_MASK); + ret |= lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO); + ret |= lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON); + if (ret) + return -EIO; + + /* Initialize extcon device */ + data->edev = devm_extcon_dev_allocate(dev, lc824206xa_cables); + if (IS_ERR(data->edev)) + return PTR_ERR(data->edev); + + ret = devm_extcon_dev_register(dev, data->edev); + if (ret) + return dev_err_probe(dev, ret, "registering extcon device\n"); + + psy_cfg.drv_data = data; + data->psy = devm_power_supply_register(dev, &lc824206xa_psy_desc, &psy_cfg); + if (IS_ERR(data->psy)) + return dev_err_probe(dev, PTR_ERR(data->psy), "registering power supply\n"); + + ret = devm_request_threaded_irq(dev, client->irq, NULL, lc824206xa_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + KBUILD_MODNAME, data); + if (ret) + return dev_err_probe(dev, ret, "requesting IRQ\n"); + + /* Sync initial state */ + schedule_work(&data->work); + return 0; +} + +static const struct i2c_device_id lc824206xa_i2c_ids[] = { + { "lc824206xa" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lc824206xa_i2c_ids); + +static struct i2c_driver lc824206xa_driver = { + .driver = { + .name = KBUILD_MODNAME, + }, + .probe = lc824206xa_probe, + .id_table = lc824206xa_i2c_ids, +}; + +module_i2c_driver(lc824206xa_driver); + +MODULE_AUTHOR("Hans de Goede "); +MODULE_DESCRIPTION("LC824206XA Micro USB Switch driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index f8b99dd6cd827d..01354b9de8b236 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -168,7 +168,6 @@ static size_t required_space(struct fw_descriptor *desc) int fw_core_add_descriptor(struct fw_descriptor *desc) { size_t i; - int ret; /* * Check descriptor is valid; the length of all blocks in the @@ -182,29 +181,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) if (i != desc->length) return -EINVAL; - mutex_lock(&card_mutex); + guard(mutex)(&card_mutex); - if (config_rom_length + required_space(desc) > 256) { - ret = -EBUSY; - } else { - list_add_tail(&desc->link, &descriptor_list); - config_rom_length += required_space(desc); - descriptor_count++; - if (desc->immediate > 0) - descriptor_count++; - update_config_roms(); - ret = 0; - } + if (config_rom_length + required_space(desc) > 256) + return -EBUSY; - mutex_unlock(&card_mutex); + list_add_tail(&desc->link, &descriptor_list); + config_rom_length += required_space(desc); + descriptor_count++; + if (desc->immediate > 0) + descriptor_count++; + update_config_roms(); - return ret; + return 0; } EXPORT_SYMBOL(fw_core_add_descriptor); void fw_core_remove_descriptor(struct fw_descriptor *desc) { - mutex_lock(&card_mutex); + guard(mutex)(&card_mutex); list_del(&desc->link); config_rom_length -= required_space(desc); @@ -212,8 +207,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) if (desc->immediate > 0) descriptor_count--; update_config_roms(); - - mutex_unlock(&card_mutex); } EXPORT_SYMBOL(fw_core_remove_descriptor); @@ -381,11 +374,11 @@ static void bm_work(struct work_struct *work) bm_id = be32_to_cpu(transaction_data[0]); - spin_lock_irq(&card->lock); - if (rcode == RCODE_COMPLETE && generation == card->generation) - card->bm_node_id = - bm_id == 0x3f ? local_id : 0xffc0 | bm_id; - spin_unlock_irq(&card->lock); + scoped_guard(spinlock_irq, &card->lock) { + if (rcode == RCODE_COMPLETE && generation == card->generation) + card->bm_node_id = + bm_id == 0x3f ? local_id : 0xffc0 | bm_id; + } if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { /* Somebody else is BM. Only act as IRM. */ @@ -578,25 +571,47 @@ void fw_card_initialize(struct fw_card *card, } EXPORT_SYMBOL(fw_card_initialize); -int fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid) +int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid, + unsigned int supported_isoc_contexts) { + struct workqueue_struct *isoc_wq; int ret; + // This workqueue should be: + // * != WQ_BH Sleepable. + // * == WQ_UNBOUND Any core can process data for isoc context. The + // implementation of unit protocol could consumes the core + // longer somehow. + // * != WQ_MEM_RECLAIM Not used for any backend of block device. + // * == WQ_FREEZABLE Isochronous communication is at regular interval in real + // time, thus should be drained if possible at freeze phase. + // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data. + // * == WQ_SYSFS Parameters are available via sysfs. + // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous + // contexts if they are scheduled to the same cycle. + isoc_wq = alloc_workqueue("firewire-isoc-card%u", + WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS, + supported_isoc_contexts, card->index); + if (!isoc_wq) + return -ENOMEM; + card->max_receive = max_receive; card->link_speed = link_speed; card->guid = guid; - mutex_lock(&card_mutex); + guard(mutex)(&card_mutex); generate_config_rom(card, tmp_config_rom); ret = card->driver->enable(card, tmp_config_rom, config_rom_length); - if (ret == 0) - list_add_tail(&card->link, &card_list); + if (ret < 0) { + destroy_workqueue(isoc_wq); + return ret; + } - mutex_unlock(&card_mutex); + card->isoc_wq = isoc_wq; + list_add_tail(&card->link, &card_list); - return ret; + return 0; } EXPORT_SYMBOL(fw_card_add); @@ -714,29 +729,31 @@ EXPORT_SYMBOL_GPL(fw_card_release); void fw_core_remove_card(struct fw_card *card) { struct fw_card_driver dummy_driver = dummy_driver_template; - unsigned long flags; + + might_sleep(); card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); fw_schedule_bus_reset(card, false, true); - mutex_lock(&card_mutex); - list_del_init(&card->link); - mutex_unlock(&card_mutex); + scoped_guard(mutex, &card_mutex) + list_del_init(&card->link); /* Switch off most of the card driver interface. */ dummy_driver.free_iso_context = card->driver->free_iso_context; dummy_driver.stop_iso = card->driver->stop_iso; card->driver = &dummy_driver; + drain_workqueue(card->isoc_wq); - spin_lock_irqsave(&card->lock, flags); - fw_destroy_nodes(card); - spin_unlock_irqrestore(&card->lock, flags); + scoped_guard(spinlock_irqsave, &card->lock) + fw_destroy_nodes(card); /* Wait for all users, especially device workqueue jobs, to finish. */ fw_card_put(card); wait_for_completion(&card->done); + destroy_workqueue(card->isoc_wq); + WARN_ON(!list_empty(&card->transaction_list)); } EXPORT_SYMBOL(fw_core_remove_card); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 9a7dc90330a351..b360dca2c69e85 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -37,6 +36,8 @@ #include "core.h" #include +#include "packet-header-definitions.h" + /* * ABI version history is documented in linux/firewire-cdev.h. */ @@ -52,7 +53,7 @@ struct client { spinlock_t lock; bool in_shutdown; - struct idr resource_idr; + struct xarray resource_xa; struct list_head event_list; wait_queue_head_t wait; wait_queue_head_t tx_flush_wait; @@ -137,8 +138,41 @@ struct iso_resource { struct iso_resource_event *e_alloc, *e_dealloc; }; +static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource) +{ + return container_of(resource, struct address_handler_resource, resource); +} + +static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource) +{ + return container_of(resource, struct inbound_transaction_resource, resource); +} + +static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource) +{ + return container_of(resource, struct descriptor_resource, resource); +} + +static struct iso_resource *to_iso_resource(struct client_resource *resource) +{ + return container_of(resource, struct iso_resource, resource); +} + static void release_iso_resource(struct client *, struct client_resource *); +static int is_iso_resource(const struct client_resource *resource) +{ + return resource->release == release_iso_resource; +} + +static void release_transaction(struct client *client, + struct client_resource *resource); + +static int is_outbound_transaction_resource(const struct client_resource *resource) +{ + return resource->release == release_transaction; +} + static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) { client_get(r->client); @@ -146,13 +180,6 @@ static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) client_put(r->client); } -static void schedule_if_iso_resource(struct client_resource *resource) -{ - if (resource->release == release_iso_resource) - schedule_iso_resource(container_of(resource, - struct iso_resource, resource), 0); -} - /* * dequeue_event() just kfree()'s the event, so the event has to be * the first field in a struct XYZ_event. @@ -269,7 +296,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) client->device = device; spin_lock_init(&client->lock); - idr_init(&client->resource_idr); + xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH); INIT_LIST_HEAD(&client->event_list); init_waitqueue_head(&client->wait); init_waitqueue_head(&client->tx_flush_wait); @@ -285,19 +312,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) static void queue_event(struct client *client, struct event *event, void *data0, size_t size0, void *data1, size_t size1) { - unsigned long flags; - event->v[0].data = data0; event->v[0].size = size0; event->v[1].data = data1; event->v[1].size = size1; - spin_lock_irqsave(&client->lock, flags); - if (client->in_shutdown) - kfree(event); - else - list_add_tail(&event->link, &client->event_list); - spin_unlock_irqrestore(&client->lock, flags); + scoped_guard(spinlock_irqsave, &client->lock) { + if (client->in_shutdown) + kfree(event); + else + list_add_tail(&event->link, &client->event_list); + } wake_up_interruptible(&client->wait); } @@ -319,10 +344,10 @@ static int dequeue_event(struct client *client, fw_device_is_shutdown(client->device)) return -ENODEV; - spin_lock_irq(&client->lock); - event = list_first_entry(&client->event_list, struct event, link); - list_del(&event->link); - spin_unlock_irq(&client->lock); + scoped_guard(spinlock_irq, &client->lock) { + event = list_first_entry(&client->event_list, struct event, link); + list_del(&event->link); + } total = 0; for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { @@ -354,7 +379,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, { struct fw_card *card = client->device->card; - spin_lock_irq(&card->lock); + guard(spinlock_irq)(&card->lock); event->closure = client->bus_reset_closure; event->type = FW_CDEV_EVENT_BUS_RESET; @@ -364,8 +389,6 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event->bm_node_id = card->bm_node_id; event->irm_node_id = card->irm_node->node_id; event->root_node_id = card->root_node->node_id; - - spin_unlock_irq(&card->lock); } static void for_each_client(struct fw_device *device, @@ -373,22 +396,17 @@ static void for_each_client(struct fw_device *device, { struct client *c; - mutex_lock(&device->client_list_mutex); + guard(mutex)(&device->client_list_mutex); + list_for_each_entry(c, &device->client_list, link) callback(c); - mutex_unlock(&device->client_list_mutex); -} - -static int schedule_reallocations(int id, void *p, void *data) -{ - schedule_if_iso_resource(p); - - return 0; } static void queue_bus_reset_event(struct client *client) { struct bus_reset_event *e; + struct client_resource *resource; + unsigned long index; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) @@ -399,9 +417,12 @@ static void queue_bus_reset_event(struct client *client) queue_event(client, &e->event, &e->reset, sizeof(e->reset), NULL, 0); - spin_lock_irq(&client->lock); - idr_for_each(&client->resource_idr, schedule_reallocations, client); - spin_unlock_irq(&client->lock); + guard(spinlock_irq)(&client->lock); + + xa_for_each(&client->resource_xa, index, resource) { + if (is_iso_resource(resource)) + schedule_iso_resource(to_iso_resource(resource), 0); + } } void fw_device_cdev_update(struct fw_device *device) @@ -452,23 +473,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) a->version = FW_CDEV_KERNEL_VERSION; a->card = client->device->card->index; - down_read(&fw_device_rwsem); - - if (a->rom != 0) { - size_t want = a->rom_length; - size_t have = client->device->config_rom_length * 4; + scoped_guard(rwsem_read, &fw_device_rwsem) { + if (a->rom != 0) { + size_t want = a->rom_length; + size_t have = client->device->config_rom_length * 4; - ret = copy_to_user(u64_to_uptr(a->rom), - client->device->config_rom, min(want, have)); + ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom, + min(want, have)); + if (ret != 0) + return -EFAULT; + } + a->rom_length = client->device->config_rom_length * 4; } - a->rom_length = client->device->config_rom_length * 4; - - up_read(&fw_device_rwsem); - if (ret != 0) - return -EFAULT; - - mutex_lock(&client->device->client_list_mutex); + guard(mutex)(&client->device->client_list_mutex); client->bus_reset_closure = a->bus_reset_closure; if (a->bus_reset != 0) { @@ -479,37 +497,36 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) if (ret == 0 && list_empty(&client->link)) list_add_tail(&client->link, &client->device->client_list); - mutex_unlock(&client->device->client_list_mutex); - return ret ? -EFAULT : 0; } -static int add_client_resource(struct client *client, - struct client_resource *resource, gfp_t gfp_mask) +static int add_client_resource(struct client *client, struct client_resource *resource, + gfp_t gfp_mask) { - bool preload = gfpflags_allow_blocking(gfp_mask); - unsigned long flags; int ret; - if (preload) - idr_preload(gfp_mask); - spin_lock_irqsave(&client->lock, flags); + scoped_guard(spinlock_irqsave, &client->lock) { + u32 index; - if (client->in_shutdown) - ret = -ECANCELED; - else - ret = idr_alloc(&client->resource_idr, resource, 0, 0, - GFP_NOWAIT); - if (ret >= 0) { - resource->handle = ret; - client_get(client); - schedule_if_iso_resource(resource); + if (client->in_shutdown) { + ret = -ECANCELED; + } else { + if (gfpflags_allow_blocking(gfp_mask)) { + ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b, + GFP_NOWAIT); + } else { + ret = xa_alloc_bh(&client->resource_xa, &index, resource, + xa_limit_32b, GFP_NOWAIT); + } + } + if (ret >= 0) { + resource->handle = index; + client_get(client); + if (is_iso_resource(resource)) + schedule_iso_resource(to_iso_resource(resource), 0); + } } - spin_unlock_irqrestore(&client->lock, flags); - if (preload) - idr_preload_end(); - return ret < 0 ? ret : 0; } @@ -517,19 +534,19 @@ static int release_client_resource(struct client *client, u32 handle, client_resource_release_fn_t release, struct client_resource **return_resource) { + unsigned long index = handle; struct client_resource *resource; - spin_lock_irq(&client->lock); - if (client->in_shutdown) - resource = NULL; - else - resource = idr_find(&client->resource_idr, handle); - if (resource && resource->release == release) - idr_remove(&client->resource_idr, handle); - spin_unlock_irq(&client->lock); + scoped_guard(spinlock_irq, &client->lock) { + if (client->in_shutdown) + return -EINVAL; - if (!(resource && resource->release == release)) - return -EINVAL; + resource = xa_load(&client->resource_xa, index); + if (!resource || resource->release != release) + return -EINVAL; + + xa_erase(&client->resource_xa, handle); + } if (return_resource) *return_resource = resource; @@ -551,13 +568,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts { struct outbound_transaction_event *e = data; struct client *client = e->client; - unsigned long flags; + unsigned long index = e->r.resource.handle; - spin_lock_irqsave(&client->lock, flags); - idr_remove(&client->resource_idr, e->r.resource.handle); - if (client->in_shutdown) - wake_up(&client->tx_flush_wait); - spin_unlock_irqrestore(&client->lock, flags); + scoped_guard(spinlock_irqsave, &client->lock) { + xa_erase(&client->resource_xa, index); + if (client->in_shutdown) + wake_up(&client->tx_flush_wait); + } switch (e->rsp.without_tstamp.type) { case FW_CDEV_EVENT_RESPONSE: @@ -599,13 +616,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); break; + } default: WARN_ON(1); break; } - } - /* Drop the idr's reference */ + // Drop the xarray's reference. client_put(client); } @@ -693,8 +710,7 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg) static void release_request(struct client *client, struct client_resource *resource) { - struct inbound_transaction_resource *r = container_of(resource, - struct inbound_transaction_resource, resource); + struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource); if (r->is_fcp) fw_request_put(r->request); @@ -804,8 +820,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, static void release_address_handler(struct client *client, struct client_resource *resource) { - struct address_handler_resource *r = - container_of(resource, struct address_handler_resource, resource); + struct address_handler_resource *r = to_address_handler_resource(resource); fw_core_remove_address_handler(&r->handler); kfree(r); @@ -869,8 +884,7 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) release_request, &resource) < 0) return -EINVAL; - r = container_of(resource, struct inbound_transaction_resource, - resource); + r = to_inbound_transaction_resource(resource); if (r->is_fcp) { fw_request_put(r->request); goto out; @@ -904,8 +918,7 @@ static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) static void release_descriptor(struct client *client, struct client_resource *resource) { - struct descriptor_resource *r = - container_of(resource, struct descriptor_resource, resource); + struct descriptor_resource *r = to_descriptor_resource(resource); fw_core_remove_descriptor(&r->descriptor); kfree(r); @@ -969,7 +982,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, struct client *client = data; struct iso_interrupt_event *e; - e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); + e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL); if (e == NULL) return; @@ -988,7 +1001,7 @@ static void iso_mc_callback(struct fw_iso_context *context, struct client *client = data; struct iso_interrupt_mc_event *e; - e = kmalloc(sizeof(*e), GFP_ATOMIC); + e = kmalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return; @@ -1070,10 +1083,10 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) context->drop_overflow_headers = true; - /* We only support one context at this time. */ - spin_lock_irq(&client->lock); + // We only support one context at this time. + guard(spinlock_irq)(&client->lock); + if (client->iso_context != NULL) { - spin_unlock_irq(&client->lock); fw_iso_context_destroy(context); return -EBUSY; @@ -1083,7 +1096,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) client->device->card, iso_dma_direction(context)); if (ret < 0) { - spin_unlock_irq(&client->lock); fw_iso_context_destroy(context); return ret; @@ -1092,7 +1104,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) } client->iso_closure = a->closure; client->iso_context = context; - spin_unlock_irq(&client->lock); a->handle = 0; @@ -1266,29 +1277,27 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) struct fw_card *card = client->device->card; struct timespec64 ts = {0, 0}; u32 cycle_time = 0; - int ret = 0; + int ret; - local_irq_disable(); + guard(irq)(); ret = fw_card_read_cycle_time(card, &cycle_time); if (ret < 0) - goto end; + return ret; switch (a->clk_id) { case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break; case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break; case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break; default: - ret = -EINVAL; + return -EINVAL; } -end: - local_irq_enable(); a->tv_sec = ts.tv_sec; a->tv_nsec = ts.tv_nsec; a->cycle_timer = cycle_time; - return ret; + return 0; } static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) @@ -1311,28 +1320,28 @@ static void iso_resource_work(struct work_struct *work) struct iso_resource *r = container_of(work, struct iso_resource, work.work); struct client *client = r->client; + unsigned long index = r->resource.handle; int generation, channel, bandwidth, todo; bool skip, free, success; - spin_lock_irq(&client->lock); - generation = client->device->generation; - todo = r->todo; - /* Allow 1000ms grace period for other reallocations. */ - if (todo == ISO_RES_ALLOC && - time_before64(get_jiffies_64(), - client->device->card->reset_jiffies + HZ)) { - schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); - skip = true; - } else { - /* We could be called twice within the same generation. */ - skip = todo == ISO_RES_REALLOC && - r->generation == generation; + scoped_guard(spinlock_irq, &client->lock) { + generation = client->device->generation; + todo = r->todo; + // Allow 1000ms grace period for other reallocations. + if (todo == ISO_RES_ALLOC && + time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) { + schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); + skip = true; + } else { + // We could be called twice within the same generation. + skip = todo == ISO_RES_REALLOC && + r->generation == generation; + } + free = todo == ISO_RES_DEALLOC || + todo == ISO_RES_ALLOC_ONCE || + todo == ISO_RES_DEALLOC_ONCE; + r->generation = generation; } - free = todo == ISO_RES_DEALLOC || - todo == ISO_RES_ALLOC_ONCE || - todo == ISO_RES_DEALLOC_ONCE; - r->generation = generation; - spin_unlock_irq(&client->lock); if (skip) goto out; @@ -1346,7 +1355,7 @@ static void iso_resource_work(struct work_struct *work) todo == ISO_RES_ALLOC_ONCE); /* * Is this generation outdated already? As long as this resource sticks - * in the idr, it will be scheduled again for a newer generation or at + * in the xarray, it will be scheduled again for a newer generation or at * shutdown. */ if (channel == -EAGAIN && @@ -1355,24 +1364,20 @@ static void iso_resource_work(struct work_struct *work) success = channel >= 0 || bandwidth > 0; - spin_lock_irq(&client->lock); - /* - * Transit from allocation to reallocation, except if the client - * requested deallocation in the meantime. - */ - if (r->todo == ISO_RES_ALLOC) - r->todo = ISO_RES_REALLOC; - /* - * Allocation or reallocation failure? Pull this resource out of the - * idr and prepare for deletion, unless the client is shutting down. - */ - if (r->todo == ISO_RES_REALLOC && !success && - !client->in_shutdown && - idr_remove(&client->resource_idr, r->resource.handle)) { - client_put(client); - free = true; + scoped_guard(spinlock_irq, &client->lock) { + // Transit from allocation to reallocation, except if the client + // requested deallocation in the meantime. + if (r->todo == ISO_RES_ALLOC) + r->todo = ISO_RES_REALLOC; + // Allocation or reallocation failure? Pull this resource out of the + // xarray and prepare for deletion, unless the client is shutting down. + if (r->todo == ISO_RES_REALLOC && !success && + !client->in_shutdown && + xa_erase(&client->resource_xa, index)) { + client_put(client); + free = true; + } } - spin_unlock_irq(&client->lock); if (todo == ISO_RES_ALLOC && channel >= 0) r->channels = 1ULL << channel; @@ -1407,13 +1412,12 @@ static void iso_resource_work(struct work_struct *work) static void release_iso_resource(struct client *client, struct client_resource *resource) { - struct iso_resource *r = - container_of(resource, struct iso_resource, resource); + struct iso_resource *r = to_iso_resource(resource); + + guard(spinlock_irq)(&client->lock); - spin_lock_irq(&client->lock); r->todo = ISO_RES_DEALLOC; schedule_iso_resource(r, 0); - spin_unlock_irq(&client->lock); } static int init_iso_resource(struct client *client, @@ -1635,7 +1639,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) e->client = client; e->p.speed = SCODE_100; e->p.generation = a->generation; - e->p.header[0] = TCODE_LINK_INTERNAL << 4; + async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL); e->p.header[1] = a->data[0]; e->p.header[2] = a->data[1]; e->p.header_length = 12; @@ -1676,26 +1680,22 @@ static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg if (!client->device->is_local) return -ENOSYS; - spin_lock_irq(&card->lock); + guard(spinlock_irq)(&card->lock); list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); client->phy_receiver_closure = a->closure; - spin_unlock_irq(&card->lock); - return 0; } void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) { struct client *client; - struct inbound_phy_packet_event *e; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); + guard(spinlock_irqsave)(&card->lock); list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { - e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); + struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); if (e == NULL) break; @@ -1723,8 +1723,6 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0); } } - - spin_unlock_irqrestore(&card->lock, flags); } static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { @@ -1821,16 +1819,15 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) if (ret < 0) return ret; - spin_lock_irq(&client->lock); - if (client->iso_context) { - ret = fw_iso_buffer_map_dma(&client->buffer, - client->device->card, - iso_dma_direction(client->iso_context)); - client->buffer_is_mapped = (ret == 0); + scoped_guard(spinlock_irq, &client->lock) { + if (client->iso_context) { + ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card, + iso_dma_direction(client->iso_context)); + if (ret < 0) + goto fail; + client->buffer_is_mapped = true; + } } - spin_unlock_irq(&client->lock); - if (ret < 0) - goto fail; ret = vm_map_pages_zero(vma, client->buffer.pages, client->buffer.page_count); @@ -1843,48 +1840,33 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) return ret; } -static int is_outbound_transaction_resource(int id, void *p, void *data) +static bool has_outbound_transactions(struct client *client) { - struct client_resource *resource = p; - - return resource->release == release_transaction; -} - -static int has_outbound_transactions(struct client *client) -{ - int ret; - - spin_lock_irq(&client->lock); - ret = idr_for_each(&client->resource_idr, - is_outbound_transaction_resource, NULL); - spin_unlock_irq(&client->lock); + struct client_resource *resource; + unsigned long index; - return ret; -} + guard(spinlock_irq)(&client->lock); -static int shutdown_resource(int id, void *p, void *data) -{ - struct client_resource *resource = p; - struct client *client = data; - - resource->release(client, resource); - client_put(client); + xa_for_each(&client->resource_xa, index, resource) { + if (is_outbound_transaction_resource(resource)) + return true; + } - return 0; + return false; } static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *event, *next_event; + struct client_resource *resource; + unsigned long index; - spin_lock_irq(&client->device->card->lock); - list_del(&client->phy_receiver_link); - spin_unlock_irq(&client->device->card->lock); + scoped_guard(spinlock_irq, &client->device->card->lock) + list_del(&client->phy_receiver_link); - mutex_lock(&client->device->client_list_mutex); - list_del(&client->link); - mutex_unlock(&client->device->client_list_mutex); + scoped_guard(mutex, &client->device->client_list_mutex) + list_del(&client->link); if (client->iso_context) fw_iso_context_destroy(client->iso_context); @@ -1892,15 +1874,17 @@ static int fw_device_op_release(struct inode *inode, struct file *file) if (client->buffer.pages) fw_iso_buffer_destroy(&client->buffer, client->device->card); - /* Freeze client->resource_idr and client->event_list */ - spin_lock_irq(&client->lock); - client->in_shutdown = true; - spin_unlock_irq(&client->lock); + // Freeze client->resource_xa and client->event_list. + scoped_guard(spinlock_irq, &client->lock) + client->in_shutdown = true; wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); - idr_for_each(&client->resource_idr, shutdown_resource, client); - idr_destroy(&client->resource_idr); + xa_for_each(&client->resource_xa, index, resource) { + resource->release(client, resource); + client_put(client); + } + xa_destroy(&client->resource_xa); list_for_each_entry_safe(event, next_event, &client->event_list, link) kfree(event); @@ -1927,7 +1911,6 @@ static __poll_t fw_device_op_poll(struct file *file, poll_table * pt) const struct file_operations fw_device_ops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = fw_device_op_open, .read = fw_device_op_read, .unlocked_ioctl = fw_device_op_ioctl, diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 00e9a13e6c459e..a99fe35f1f0d1a 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -288,7 +287,7 @@ static ssize_t show_immediate(struct device *dev, const u32 *directories[] = {NULL, NULL}; int i, value = -1; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); if (is_fw_unit(dev)) { directories[0] = fw_unit(dev)->directory; @@ -317,8 +316,6 @@ static ssize_t show_immediate(struct device *dev, } } - up_read(&fw_device_rwsem); - if (value < 0) return -ENOENT; @@ -339,7 +336,7 @@ static ssize_t show_text_leaf(struct device *dev, char dummy_buf[2]; int i, ret = -ENOENT; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); if (is_fw_unit(dev)) { directories[0] = fw_unit(dev)->directory; @@ -382,15 +379,14 @@ static ssize_t show_text_leaf(struct device *dev, } } - if (ret >= 0) { - /* Strip trailing whitespace and add newline. */ - while (ret > 0 && isspace(buf[ret - 1])) - ret--; - strcpy(buf + ret, "\n"); - ret++; - } + if (ret < 0) + return ret; - up_read(&fw_device_rwsem); + // Strip trailing whitespace and add newline. + while (ret > 0 && isspace(buf[ret - 1])) + ret--; + strcpy(buf + ret, "\n"); + ret++; return ret; } @@ -466,10 +462,10 @@ static ssize_t config_rom_show(struct device *dev, struct fw_device *device = fw_device(dev); size_t length; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); + length = device->config_rom_length * 4; memcpy(buf, device->config_rom, length); - up_read(&fw_device_rwsem); return length; } @@ -478,13 +474,10 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); - int ret; - down_read(&fw_device_rwsem); - ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); - up_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); - return ret; + return sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); } static ssize_t is_local_show(struct device *dev, @@ -524,7 +517,8 @@ static ssize_t units_show(struct device *dev, struct fw_csr_iterator ci; int key, value, i = 0; - down_read(&fw_device_rwsem); + guard(rwsem_read)(&fw_device_rwsem); + fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]); while (fw_csr_iterator_next(&ci, &key, &value)) { if (key != (CSR_UNIT | CSR_DIRECTORY)) @@ -533,7 +527,6 @@ static ssize_t units_show(struct device *dev, if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) break; } - up_read(&fw_device_rwsem); if (i) buf[i - 1] = '\n'; @@ -571,7 +564,8 @@ static int read_rom(struct fw_device *device, return rcode; } -#define MAX_CONFIG_ROM_SIZE 256 +// By quadlet unit. +#define MAX_CONFIG_ROM_SIZE ((CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) / sizeof(u32)) /* * Read the bus info block, perform a speed probe, and read all of the rest of @@ -729,10 +723,10 @@ static int read_config_rom(struct fw_device *device, int generation) goto out; } - down_write(&fw_device_rwsem); - device->config_rom = new_rom; - device->config_rom_length = length; - up_write(&fw_device_rwsem); + scoped_guard(rwsem_write, &fw_device_rwsem) { + device->config_rom = new_rom; + device->config_rom_length = length; + } kfree(old_rom); ret = RCODE_COMPLETE; @@ -813,24 +807,21 @@ static int shutdown_unit(struct device *device, void *data) /* * fw_device_rwsem acts as dual purpose mutex: - * - serializes accesses to fw_device_idr, * - serializes accesses to fw_device.config_rom/.config_rom_length and * fw_unit.directory, unless those accesses happen at safe occasions */ DECLARE_RWSEM(fw_device_rwsem); -DEFINE_IDR(fw_device_idr); +DEFINE_XARRAY_ALLOC(fw_device_xa); int fw_cdev_major; struct fw_device *fw_device_get_by_devt(dev_t devt) { struct fw_device *device; - down_read(&fw_device_rwsem); - device = idr_find(&fw_device_idr, MINOR(devt)); + device = xa_load(&fw_device_xa, MINOR(devt)); if (device) fw_device_get(device); - up_read(&fw_device_rwsem); return device; } @@ -864,7 +855,6 @@ static void fw_device_shutdown(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); - int minor = MINOR(device->device.devt); if (time_before64(get_jiffies_64(), device->card->reset_jiffies + SHUTDOWN_DELAY) @@ -882,9 +872,7 @@ static void fw_device_shutdown(struct work_struct *work) device_for_each_child(&device->device, NULL, shutdown_unit); device_unregister(&device->device); - down_write(&fw_device_rwsem); - idr_remove(&fw_device_idr, minor); - up_write(&fw_device_rwsem); + xa_erase(&fw_device_xa, MINOR(device->device.devt)); fw_device_put(device); } @@ -893,16 +881,14 @@ static void fw_device_release(struct device *dev) { struct fw_device *device = fw_device(dev); struct fw_card *card = device->card; - unsigned long flags; /* * Take the card lock so we don't set this to NULL while a * FW_NODE_UPDATED callback is being handled or while the * bus manager work looks at this node. */ - spin_lock_irqsave(&card->lock, flags); - device->node->data = NULL; - spin_unlock_irqrestore(&card->lock, flags); + scoped_guard(spinlock_irqsave, &card->lock) + device->node->data = NULL; fw_node_put(device->node); kfree(device->config_rom); @@ -942,59 +928,6 @@ static void fw_device_update(struct work_struct *work) device_for_each_child(&device->device, NULL, update_unit); } -/* - * If a device was pending for deletion because its node went away but its - * bus info block and root directory header matches that of a newly discovered - * device, revive the existing fw_device. - * The newly allocated fw_device becomes obsolete instead. - */ -static int lookup_existing_device(struct device *dev, void *data) -{ - struct fw_device *old = fw_device(dev); - struct fw_device *new = data; - struct fw_card *card = new->card; - int match = 0; - - if (!is_fw_device(dev)) - return 0; - - down_read(&fw_device_rwsem); /* serialize config_rom access */ - spin_lock_irq(&card->lock); /* serialize node access */ - - if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && - atomic_cmpxchg(&old->state, - FW_DEVICE_GONE, - FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { - struct fw_node *current_node = new->node; - struct fw_node *obsolete_node = old->node; - - new->node = obsolete_node; - new->node->data = new; - old->node = current_node; - old->node->data = old; - - old->max_speed = new->max_speed; - old->node_id = current_node->node_id; - smp_wmb(); /* update node_id before generation */ - old->generation = card->generation; - old->config_rom_retries = 0; - fw_notice(card, "rediscovered device %s\n", dev_name(dev)); - - old->workfn = fw_device_update; - fw_schedule_device_work(old, 0); - - if (current_node == card->root_node) - fw_schedule_bm_work(card, 0); - - match = 1; - } - - spin_unlock_irq(&card->lock); - up_read(&fw_device_rwsem); - - return match; -} - enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; static void set_broadcast_channel(struct fw_device *device, int generation) @@ -1055,13 +988,26 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen) return 0; } +static int compare_configuration_rom(struct device *dev, void *data) +{ + const struct fw_device *old = fw_device(dev); + const u32 *config_rom = data; + + if (!is_fw_device(dev)) + return 0; + + // Compare the bus information block and root_length/root_crc. + return !memcmp(old->config_rom, config_rom, 6 * 4); +} + static void fw_device_init(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); struct fw_card *card = device->card; - struct device *revived_dev; - int minor, ret; + struct device *found; + u32 minor; + int ret; /* * All failure paths here set node->data to NULL, so that we @@ -1087,24 +1033,62 @@ static void fw_device_init(struct work_struct *work) return; } - revived_dev = device_find_child(card->device, - device, lookup_existing_device); - if (revived_dev) { - put_device(revived_dev); - fw_device_release(&device->device); + // If a device was pending for deletion because its node went away but its bus info block + // and root directory header matches that of a newly discovered device, revive the + // existing fw_device. The newly allocated fw_device becomes obsolete instead. + // + // serialize config_rom access. + scoped_guard(rwsem_read, &fw_device_rwsem) { + found = device_find_child(card->device, (void *)device->config_rom, + compare_configuration_rom); + } + if (found) { + struct fw_device *reused = fw_device(found); + + if (atomic_cmpxchg(&reused->state, + FW_DEVICE_GONE, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + // serialize node access + scoped_guard(spinlock_irq, &card->lock) { + struct fw_node *current_node = device->node; + struct fw_node *obsolete_node = reused->node; + + device->node = obsolete_node; + device->node->data = device; + reused->node = current_node; + reused->node->data = reused; + + reused->max_speed = device->max_speed; + reused->node_id = current_node->node_id; + smp_wmb(); /* update node_id before generation */ + reused->generation = card->generation; + reused->config_rom_retries = 0; + fw_notice(card, "rediscovered device %s\n", + dev_name(found)); + + reused->workfn = fw_device_update; + fw_schedule_device_work(reused, 0); + + if (current_node == card->root_node) + fw_schedule_bm_work(card, 0); + } - return; + put_device(found); + fw_device_release(&device->device); + + return; + } + + put_device(found); } device_initialize(&device->device); fw_device_get(device); - down_write(&fw_device_rwsem); - minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS, - GFP_KERNEL); - up_write(&fw_device_rwsem); - if (minor < 0) + // The index of allocated entry is used for minor identifier of device node. + ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL); + if (ret < 0) goto error; device->device.bus = &fw_bus_type; @@ -1165,11 +1149,9 @@ static void fw_device_init(struct work_struct *work) return; error_with_cdev: - down_write(&fw_device_rwsem); - idr_remove(&fw_device_idr, minor); - up_write(&fw_device_rwsem); + xa_erase(&fw_device_xa, minor); error: - fw_device_put(device); /* fw_device_idr's reference */ + fw_device_put(device); // fw_device_xa's reference. put_device(&device->device); /* our reference */ } diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index b3eda38a36f310..a67493862c8564 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -209,23 +209,63 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx) } EXPORT_SYMBOL(fw_iso_context_queue_flush); +/** + * fw_iso_context_flush_completions() - process isochronous context in current process context. + * @ctx: the isochronous context + * + * Process the isochronous context in the current process context. The registered callback function + * is called when a queued packet buffer with the interrupt flag is completed, either after + * transmission in the IT context or after being filled in the IR context. Additionally, the + * callback function is also called for the packet buffer completed at last. Furthermore, the + * callback function is called as well when the header buffer in the context becomes full. If it is + * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is + * available instead. + * + * Context: Process context. May sleep due to disable_work_sync(). + */ int fw_iso_context_flush_completions(struct fw_iso_context *ctx) { + int err; + trace_isoc_outbound_flush_completions(ctx); trace_isoc_inbound_single_flush_completions(ctx); trace_isoc_inbound_multiple_flush_completions(ctx); - return ctx->card->driver->flush_iso_completions(ctx); + might_sleep(); + + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return 0; + + disable_work_sync(&ctx->work); + + err = ctx->card->driver->flush_iso_completions(ctx); + + enable_work(&ctx->work); + + return err; } EXPORT_SYMBOL(fw_iso_context_flush_completions); int fw_iso_context_stop(struct fw_iso_context *ctx) { + int err; + trace_isoc_outbound_stop(ctx); trace_isoc_inbound_single_stop(ctx); trace_isoc_inbound_multiple_stop(ctx); - return ctx->card->driver->stop_iso(ctx); + might_sleep(); + + // Avoid dead lock due to programming mistake. + if (WARN_ON_ONCE(current_work() == &ctx->work)) + return 0; + + err = ctx->card->driver->stop_iso(ctx); + + cancel_work_sync(&ctx->work); + + return err; } EXPORT_SYMBOL(fw_iso_context_stop); @@ -375,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ int irm_id, ret, c = -EINVAL; - spin_lock_irq(&card->lock); - irm_id = card->irm_node->node_id; - spin_unlock_irq(&card->lock); + scoped_guard(spinlock_irq, &card->lock) + irm_id = card->irm_node->node_id; if (channels_hi) c = manage_channel(card, irm_id, generation, channels_hi, diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index b4e637aa693214..6adadb11962e3d 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -39,7 +39,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid); node->port_count = port_count; - refcount_set(&node->ref_count, 1); + kref_init(&node->kref); INIT_LIST_HEAD(&node->link); return node; @@ -455,11 +455,10 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, int self_id_count, u32 *self_ids, bool bm_abdicate) { struct fw_node *local_node; - unsigned long flags; trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count); - spin_lock_irqsave(&card->lock, flags); + guard(spinlock_irqsave)(&card->lock); /* * If the selfID buffer is not the immediate successor of the @@ -500,7 +499,5 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, } else { update_tree(card, local_node); } - - spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL(fw_core_handle_bus_reset); diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 4d2fc1f31fec90..e141d24a764416 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -49,35 +48,31 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card u32 response_tstamp) { struct fw_transaction *t = NULL, *iter; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(iter, &card->transaction_list, link) { - if (iter == transaction) { - if (!try_cancel_split_timeout(iter)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; + scoped_guard(spinlock_irqsave, &card->lock) { + list_for_each_entry(iter, &card->transaction_list, link) { + if (iter == transaction) { + if (try_cancel_split_timeout(iter)) { + list_del_init(&iter->link); + card->tlabel_mask &= ~(1ULL << iter->tlabel); + t = iter; + } + break; } - list_del_init(&iter->link); - card->tlabel_mask &= ~(1ULL << iter->tlabel); - t = iter; - break; } } - spin_unlock_irqrestore(&card->lock, flags); - if (t) { - if (!t->with_tstamp) { - t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data); - } else { - t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, - NULL, 0, t->callback_data); - } - return 0; + if (!t) + return -ENOENT; + + if (!t->with_tstamp) { + t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data); + } else { + t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0, + t->callback_data); } - timed_out: - return -ENOENT; + return 0; } /* @@ -121,16 +116,13 @@ static void split_transaction_timeout_callback(struct timer_list *timer) { struct fw_transaction *t = from_timer(t, timer, split_timeout_timer); struct fw_card *card = t->card; - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); - if (list_empty(&t->link)) { - spin_unlock_irqrestore(&card->lock, flags); - return; + scoped_guard(spinlock_irqsave, &card->lock) { + if (list_empty(&t->link)) + return; + list_del(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); } - list_del(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); - spin_unlock_irqrestore(&card->lock, flags); if (!t->with_tstamp) { t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data); @@ -143,20 +135,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer) static void start_split_transaction_timeout(struct fw_transaction *t, struct fw_card *card) { - unsigned long flags; + guard(spinlock_irqsave)(&card->lock); - spin_lock_irqsave(&card->lock, flags); - - if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { - spin_unlock_irqrestore(&card->lock, flags); + if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) return; - } t->is_split_transaction = true; mod_timer(&t->split_timeout_timer, jiffies + card->split_timeout_jiffies); - - spin_unlock_irqrestore(&card->lock, flags); } static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp); @@ -464,7 +450,6 @@ static void transmit_phy_packet_callback(struct fw_packet *packet, static struct fw_packet phy_config_packet = { .header_length = 12, - .header[0] = TCODE_LINK_INTERNAL << 4, .payload_length = 0, .speed = SCODE_100, .callback = transmit_phy_packet_callback, @@ -495,8 +480,9 @@ void fw_send_phy_config(struct fw_card *card, phy_packet_phy_config_set_gap_count(&data, gap_count); phy_packet_phy_config_set_gap_count_optimization(&data, true); - mutex_lock(&phy_config_mutex); + guard(mutex)(&phy_config_mutex); + async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL); phy_config_packet.header[1] = data; phy_config_packet.header[2] = ~data; phy_config_packet.generation = generation; @@ -508,8 +494,6 @@ void fw_send_phy_config(struct fw_card *card, card->driver->send_request(card, &phy_config_packet); wait_for_completion_timeout(&phy_config_done, timeout); - - mutex_unlock(&phy_config_mutex); } static struct fw_address_handler *lookup_overlapping_address_handler( @@ -598,7 +582,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, handler->length == 0) return -EINVAL; - spin_lock(&address_handler_list_lock); + guard(spinlock)(&address_handler_list_lock); handler->offset = region->start; while (handler->offset + handler->length <= region->end) { @@ -617,8 +601,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, } } - spin_unlock(&address_handler_list_lock); - return ret; } EXPORT_SYMBOL(fw_core_add_address_handler); @@ -634,9 +616,9 @@ EXPORT_SYMBOL(fw_core_add_address_handler); */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { - spin_lock(&address_handler_list_lock); - list_del_rcu(&handler->link); - spin_unlock(&address_handler_list_lock); + scoped_guard(spinlock, &address_handler_list_lock) + list_del_rcu(&handler->link); + synchronize_rcu(); } EXPORT_SYMBOL(fw_core_remove_address_handler); @@ -927,16 +909,14 @@ static void handle_exclusive_region_request(struct fw_card *card, if (tcode == TCODE_LOCK_REQUEST) tcode = 0x10 + async_header_get_extended_tcode(p->header); - rcu_read_lock(); - handler = lookup_enclosing_address_handler(&address_handler_list, - offset, request->length); - if (handler) - handler->address_callback(card, request, - tcode, destination, source, - p->generation, offset, - request->data, request->length, - handler->callback_data); - rcu_read_unlock(); + scoped_guard(rcu) { + handler = lookup_enclosing_address_handler(&address_handler_list, offset, + request->length); + if (handler) + handler->address_callback(card, request, tcode, destination, source, + p->generation, offset, request->data, + request->length, handler->callback_data); + } if (!handler) fw_send_response(card, request, RCODE_ADDRESS_ERROR); @@ -969,17 +949,14 @@ static void handle_fcp_region_request(struct fw_card *card, return; } - rcu_read_lock(); - list_for_each_entry_rcu(handler, &address_handler_list, link) { - if (is_enclosing_handler(handler, offset, request->length)) - handler->address_callback(card, request, tcode, - destination, source, - p->generation, offset, - request->data, - request->length, - handler->callback_data); + scoped_guard(rcu) { + list_for_each_entry_rcu(handler, &address_handler_list, link) { + if (is_enclosing_handler(handler, offset, request->length)) + handler->address_callback(card, request, tcode, destination, source, + p->generation, offset, request->data, + request->length, handler->callback_data); + } } - rcu_read_unlock(); fw_send_response(card, request, RCODE_COMPLETE); } @@ -1024,7 +1001,6 @@ EXPORT_SYMBOL(fw_core_handle_request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { struct fw_transaction *t = NULL, *iter; - unsigned long flags; u32 *data; size_t data_length; int tcode, tlabel, source, rcode; @@ -1063,26 +1039,23 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } - spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(iter, &card->transaction_list, link) { - if (iter->node_id == source && iter->tlabel == tlabel) { - if (!try_cancel_split_timeout(iter)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; + scoped_guard(spinlock_irqsave, &card->lock) { + list_for_each_entry(iter, &card->transaction_list, link) { + if (iter->node_id == source && iter->tlabel == tlabel) { + if (try_cancel_split_timeout(iter)) { + list_del_init(&iter->link); + card->tlabel_mask &= ~(1ULL << iter->tlabel); + t = iter; + } + break; } - list_del_init(&iter->link); - card->tlabel_mask &= ~(1ULL << iter->tlabel); - t = iter; - break; } } - spin_unlock_irqrestore(&card->lock, flags); trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, p->timestamp, p->header, data, data_length / 4); if (!t) { - timed_out: fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", source, tlabel); return; @@ -1186,7 +1159,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, int reg = offset & ~CSR_REGISTER_BASE; __be32 *data = payload; int rcode = RCODE_COMPLETE; - unsigned long flags; switch (reg) { case CSR_PRIORITY_BUDGET: @@ -1228,10 +1200,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_hi); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { - spin_lock_irqsave(&card->lock, flags); + guard(spinlock_irqsave)(&card->lock); + card->split_timeout_hi = be32_to_cpu(*data) & 7; update_split_timeout(card); - spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } @@ -1241,11 +1213,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, if (tcode == TCODE_READ_QUADLET_REQUEST) { *data = cpu_to_be32(card->split_timeout_lo); } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { - spin_lock_irqsave(&card->lock, flags); - card->split_timeout_lo = - be32_to_cpu(*data) & 0xfff80000; + guard(spinlock_irqsave)(&card->lock); + + card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000; update_split_timeout(card); - spin_unlock_irqrestore(&card->lock, flags); } else { rcode = RCODE_TYPE_ERROR; } @@ -1387,7 +1358,7 @@ static void __exit fw_core_cleanup(void) unregister_chrdev(fw_cdev_major, "firewire"); bus_unregister(&fw_bus_type); destroy_workqueue(fw_workqueue); - idr_destroy(&fw_device_idr); + xa_destroy(&fw_device_xa); } module_init(fw_core_init); diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 7c36d2628e37af..0ae2c84ecafedb 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -115,8 +115,8 @@ struct fw_card_driver { void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device); -int fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid); +int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid, + unsigned int supported_isoc_contexts); void fw_core_remove_card(struct fw_card *card); int fw_compute_block_crc(__be32 *block); void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); @@ -133,7 +133,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); /* -device */ extern struct rw_semaphore fw_device_rwsem; -extern struct idr fw_device_idr; +extern struct xarray fw_device_xa; extern int fw_cdev_major; static inline struct fw_device *fw_device_get(struct fw_device *device) @@ -159,6 +159,11 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, enum dma_data_direction direction); +static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func) +{ + INIT_WORK(&ctx->work, func); +} + /* -topology */ @@ -183,7 +188,8 @@ struct fw_node { * local node to this node. */ u8 max_depth:4; /* Maximum depth to any leaf node */ u8 max_hops:4; /* Max hops in this sub tree */ - refcount_t ref_count; + + struct kref kref; /* For serializing node topology into a list. */ struct list_head link; @@ -196,15 +202,21 @@ struct fw_node { static inline struct fw_node *fw_node_get(struct fw_node *node) { - refcount_inc(&node->ref_count); + kref_get(&node->kref); return node; } +static void release_node(struct kref *kref) +{ + struct fw_node *node = container_of(kref, struct fw_node, kref); + + kfree(node); +} + static inline void fw_node_put(struct fw_node *node) { - if (refcount_dec_and_test(&node->ref_count)) - kfree(node); + kref_put(&node->kref, release_node); } void fw_core_handle_bus_reset(struct fw_card *card, int node_id, diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 7a4d1a478e33e2..1bf0e15c15408e 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/firewire/ohci-serdes-test.c b/drivers/firewire/ohci-serdes-test.c index 304a09ff528eb6..258f668619ef1c 100644 --- a/drivers/firewire/ohci-serdes-test.c +++ b/drivers/firewire/ohci-serdes-test.c @@ -40,9 +40,75 @@ static void test_self_id_receive_buffer_deserialization(struct kunit *test) KUNIT_EXPECT_EQ(test, 0xf38b, timestamp); } +static void test_at_data_serdes(struct kunit *test) +{ + static const __le32 expected[] = { + cpu_to_le32(0x00020e80), + cpu_to_le32(0xffc2ffff), + cpu_to_le32(0xe0000000), + }; + __le32 quadlets[] = {0, 0, 0}; + bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected); + unsigned int speed = ohci1394_at_data_get_speed(expected); + unsigned int tlabel = ohci1394_at_data_get_tlabel(expected); + unsigned int retry = ohci1394_at_data_get_retry(expected); + unsigned int tcode = ohci1394_at_data_get_tcode(expected); + unsigned int destination_id = ohci1394_at_data_get_destination_id(expected); + u64 destination_offset = ohci1394_at_data_get_destination_offset(expected); + + KUNIT_EXPECT_FALSE(test, has_src_bus_id); + KUNIT_EXPECT_EQ(test, 0x02, speed); + KUNIT_EXPECT_EQ(test, 0x03, tlabel); + KUNIT_EXPECT_EQ(test, 0x02, retry); + KUNIT_EXPECT_EQ(test, 0x08, tcode); + + ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id); + ohci1394_at_data_set_speed(quadlets, speed); + ohci1394_at_data_set_tlabel(quadlets, tlabel); + ohci1394_at_data_set_retry(quadlets, retry); + ohci1394_at_data_set_tcode(quadlets, tcode); + ohci1394_at_data_set_destination_id(quadlets, destination_id); + ohci1394_at_data_set_destination_offset(quadlets, destination_offset); + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + +static void test_it_data_serdes(struct kunit *test) +{ + static const __le32 expected[] = { + cpu_to_le32(0x000349a7), + cpu_to_le32(0x02300000), + }; + __le32 quadlets[] = {0, 0}; + unsigned int scode = ohci1394_it_data_get_speed(expected); + unsigned int tag = ohci1394_it_data_get_tag(expected); + unsigned int channel = ohci1394_it_data_get_channel(expected); + unsigned int tcode = ohci1394_it_data_get_tcode(expected); + unsigned int sync = ohci1394_it_data_get_sync(expected); + unsigned int data_length = ohci1394_it_data_get_data_length(expected); + + KUNIT_EXPECT_EQ(test, 0x03, scode); + KUNIT_EXPECT_EQ(test, 0x01, tag); + KUNIT_EXPECT_EQ(test, 0x09, channel); + KUNIT_EXPECT_EQ(test, 0x0a, tcode); + KUNIT_EXPECT_EQ(test, 0x7, sync); + KUNIT_EXPECT_EQ(test, 0x0230, data_length); + + ohci1394_it_data_set_speed(quadlets, scode); + ohci1394_it_data_set_tag(quadlets, tag); + ohci1394_it_data_set_channel(quadlets, channel); + ohci1394_it_data_set_tcode(quadlets, tcode); + ohci1394_it_data_set_sync(quadlets, sync); + ohci1394_it_data_set_data_length(quadlets, data_length); + + KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected)); +} + static struct kunit_case ohci_serdes_test_cases[] = { KUNIT_CASE(test_self_id_count_register_deserialization), KUNIT_CASE(test_self_id_receive_buffer_deserialization), + KUNIT_CASE(test_at_data_serdes), + KUNIT_CASE(test_it_data_serdes), {} }; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 314a29c0fd3e9b..7ee55c2804dedf 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -50,7 +50,6 @@ static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk); #define CREATE_TRACE_POINTS #include -#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) @@ -77,7 +76,7 @@ struct descriptor { __le32 branch_address; __le16 res_count; __le16 transfer_status; -} __attribute__((aligned(16))); +} __aligned(16); #define CONTROL_SET(regs) (regs) #define CONTROL_CLEAR(regs) ((regs) + 4) @@ -162,13 +161,6 @@ struct context { struct tasklet_struct tasklet; }; -#define IT_HEADER_SY(v) ((v) << 0) -#define IT_HEADER_TCODE(v) ((v) << 4) -#define IT_HEADER_CHANNEL(v) ((v) << 8) -#define IT_HEADER_TAG(v) ((v) << 14) -#define IT_HEADER_SPEED(v) ((v) << 16) -#define IT_HEADER_DATA_LENGTH(v) ((v) << 16) - struct iso_context { struct fw_iso_context base; struct context context; @@ -182,7 +174,7 @@ struct iso_context { u8 tags; }; -#define CONFIG_ROM_SIZE 1024 +#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) struct fw_ohci { struct fw_card card; @@ -264,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) #define OHCI1394_REGISTER_SIZE 0x800 #define OHCI1394_PCI_HCI_Control 0x40 #define SELF_ID_BUF_SIZE 0x800 -#define OHCI_TCODE_PHY_PACKET 0x0e #define OHCI_VERSION_1_1 0x010010 static char ohci_driver_name[] = KBUILD_MODNAME; @@ -405,7 +396,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" static int param_debug; module_param_named(debug, param_debug, int, 0644); -MODULE_PARM_DESC(debug, "Verbose logging (default = 0" +MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0" ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) @@ -532,20 +523,28 @@ static const char *evts[] = { [0x1e] = "ack_type_error", [0x1f] = "-reserved-", [0x20] = "pending/cancelled", }; -static const char *tcodes[] = { - [0x0] = "QW req", [0x1] = "BW req", - [0x2] = "W resp", [0x3] = "-reserved-", - [0x4] = "QR req", [0x5] = "BR req", - [0x6] = "QR resp", [0x7] = "BR resp", - [0x8] = "cycle start", [0x9] = "Lk req", - [0xa] = "async stream packet", [0xb] = "Lk resp", - [0xc] = "-reserved-", [0xd] = "-reserved-", - [0xe] = "link internal", [0xf] = "-reserved-", -}; static void log_ar_at_event(struct fw_ohci *ohci, char dir, int speed, u32 *header, int evt) { + static const char *const tcodes[] = { + [TCODE_WRITE_QUADLET_REQUEST] = "QW req", + [TCODE_WRITE_BLOCK_REQUEST] = "BW req", + [TCODE_WRITE_RESPONSE] = "W resp", + [0x3] = "-reserved-", + [TCODE_READ_QUADLET_REQUEST] = "QR req", + [TCODE_READ_BLOCK_REQUEST] = "BR req", + [TCODE_READ_QUADLET_RESPONSE] = "QR resp", + [TCODE_READ_BLOCK_RESPONSE] = "BR resp", + [TCODE_CYCLE_START] = "cycle start", + [TCODE_LOCK_REQUEST] = "Lk req", + [TCODE_STREAM_DATA] = "async stream packet", + [TCODE_LOCK_RESPONSE] = "Lk resp", + [0xc] = "-reserved-", + [0xd] = "-reserved-", + [TCODE_LINK_INTERNAL] = "link internal", + [0xf] = "-reserved-", + }; int tcode = async_header_get_tcode(header); char specific[12]; @@ -586,7 +585,7 @@ static void log_ar_at_event(struct fw_ohci *ohci, ohci_notice(ohci, "A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); break; - case 0xe: + case TCODE_LINK_INTERNAL: ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", dir, evts[evt], header[1], header[2]); break; @@ -713,26 +712,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) static int ohci_read_phy_reg(struct fw_card *card, int addr) { struct fw_ohci *ohci = fw_ohci(card); - int ret; - mutex_lock(&ohci->phy_reg_mutex); - ret = read_phy_reg(ohci, addr); - mutex_unlock(&ohci->phy_reg_mutex); + guard(mutex)(&ohci->phy_reg_mutex); - return ret; + return read_phy_reg(ohci, addr); } static int ohci_update_phy_reg(struct fw_card *card, int addr, int clear_bits, int set_bits) { struct fw_ohci *ohci = fw_ohci(card); - int ret; - mutex_lock(&ohci->phy_reg_mutex); - ret = update_phy_reg(ohci, addr, clear_bits, set_bits); - mutex_unlock(&ohci->phy_reg_mutex); + guard(mutex)(&ohci->phy_reg_mutex); - return ret; + return update_phy_reg(ohci, addr, clear_bits, set_bits); } static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) @@ -939,7 +932,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) case TCODE_WRITE_RESPONSE: case TCODE_READ_QUADLET_REQUEST: - case OHCI_TCODE_PHY_PACKET: + case TCODE_LINK_INTERNAL: p.header_length = 12; p.payload_length = 0; break; @@ -967,7 +960,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) * Several controllers, notably from NEC and VIA, forget to * write ack_complete status at PHY packet reception. */ - if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode) + if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL) p.ack = ACK_COMPLETE; /* @@ -1148,9 +1141,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) return d + z - 1; } -static void context_tasklet(unsigned long data) +static void context_retire_descriptors(struct context *ctx) { - struct context *ctx = (struct context *) data; struct descriptor *d, *last; u32 address; int z; @@ -1179,18 +1171,31 @@ static void context_tasklet(unsigned long data) break; if (old_desc != desc) { - /* If we've advanced to the next buffer, move the - * previous buffer to the free list. */ - unsigned long flags; + // If we've advanced to the next buffer, move the previous buffer to the + // free list. old_desc->used = 0; - spin_lock_irqsave(&ctx->ohci->lock, flags); + guard(spinlock_irqsave)(&ctx->ohci->lock); list_move_tail(&old_desc->list, &ctx->buffer_list); - spin_unlock_irqrestore(&ctx->ohci->lock, flags); } ctx->last = last; } } +static void context_tasklet(unsigned long data) +{ + struct context *ctx = (struct context *) data; + + context_retire_descriptors(ctx); +} + +static void ohci_isoc_context_work(struct work_struct *work) +{ + struct fw_iso_context *base = container_of(work, struct fw_iso_context, work); + struct iso_context *isoc_ctx = container_of(base, struct iso_context, base); + + context_retire_descriptors(&isoc_ctx->context); +} + /* * Allocate a new buffer and add it to the list of free buffers for this * context. Must be called with ohci->lock held. @@ -1402,12 +1407,6 @@ static int at_context_queue_packet(struct context *ctx, d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); d[0].res_count = cpu_to_le16(packet->timestamp); - /* - * The DMA format for asynchronous link packets is different - * from the IEEE1394 layout, so shift the fields around - * accordingly. - */ - tcode = async_header_get_tcode(packet->header); header = (__le32 *) &d[1]; switch (tcode) { @@ -1420,11 +1419,21 @@ static int at_context_queue_packet(struct context *ctx, case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE: - header[0] = cpu_to_le32((packet->header[0] & 0xffff) | - (packet->speed << 16)); - header[1] = cpu_to_le32((packet->header[1] & 0xffff) | - (packet->header[0] & 0xffff0000)); - header[2] = cpu_to_le32(packet->header[2]); + ohci1394_at_data_set_src_bus_id(header, false); + ohci1394_at_data_set_speed(header, packet->speed); + ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header)); + ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header)); + ohci1394_at_data_set_tcode(header, tcode); + + ohci1394_at_data_set_destination_id(header, + async_header_get_destination(packet->header)); + + if (ctx == &ctx->ohci->at_response_ctx) { + ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header)); + } else { + ohci1394_at_data_set_destination_offset(header, + async_header_get_offset(packet->header)); + } if (tcode_is_block_packet(tcode)) header[3] = cpu_to_le32(packet->header[3]); @@ -1433,10 +1442,10 @@ static int at_context_queue_packet(struct context *ctx, d[0].req_count = cpu_to_le16(packet->header_length); break; - case TCODE_LINK_INTERNAL: - header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | - (packet->speed << 16)); + ohci1394_at_data_set_speed(header, packet->speed); + ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL); + header[1] = cpu_to_le32(packet->header[1]); header[2] = cpu_to_le32(packet->header[2]); d[0].req_count = cpu_to_le16(12); @@ -1446,9 +1455,14 @@ static int at_context_queue_packet(struct context *ctx, break; case TCODE_STREAM_DATA: - header[0] = cpu_to_le32((packet->header[0] & 0xffff) | - (packet->speed << 16)); - header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); + ohci1394_it_data_set_speed(header, packet->speed); + ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0])); + ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0])); + ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA); + ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0])); + + ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0])); + d[0].req_count = cpu_to_le16(8); break; @@ -1873,13 +1887,15 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index, { int reg; - mutex_lock(&ohci->phy_reg_mutex); - reg = write_phy_reg(ohci, 7, port_index); - if (reg >= 0) + scoped_guard(mutex, &ohci->phy_reg_mutex) { + reg = write_phy_reg(ohci, 7, port_index); + if (reg < 0) + return reg; + reg = read_phy_reg(ohci, 8); - mutex_unlock(&ohci->phy_reg_mutex); - if (reg < 0) - return reg; + if (reg < 0) + return reg; + } switch (reg & 0x0f) { case 0x06: @@ -1917,29 +1933,36 @@ static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, return i; } -static bool initiated_reset(struct fw_ohci *ohci) +static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset) { int reg; - int ret = false; - mutex_lock(&ohci->phy_reg_mutex); - reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */ - if (reg >= 0) { - reg = read_phy_reg(ohci, 8); - reg |= 0x40; - reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */ - if (reg >= 0) { - reg = read_phy_reg(ohci, 12); /* read register 12 */ - if (reg >= 0) { - if ((reg & 0x08) == 0x08) { - /* bit 3 indicates "initiated reset" */ - ret = true; - } - } - } - } - mutex_unlock(&ohci->phy_reg_mutex); - return ret; + guard(mutex)(&ohci->phy_reg_mutex); + + // Select page 7 + reg = write_phy_reg(ohci, 7, 0xe0); + if (reg < 0) + return reg; + + reg = read_phy_reg(ohci, 8); + if (reg < 0) + return reg; + + // set PMODE bit + reg |= 0x40; + reg = write_phy_reg(ohci, 8, reg); + if (reg < 0) + return reg; + + // read register 12 + reg = read_phy_reg(ohci, 12); + if (reg < 0) + return reg; + + // bit 3 indicates "initiated reset" + *is_initiated_reset = !!((reg & 0x08) == 0x08); + + return 0; } /* @@ -1949,7 +1972,8 @@ static bool initiated_reset(struct fw_ohci *ohci) */ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) { - int reg, i, pos; + int reg, i, pos, err; + bool is_initiated_reset; u32 self_id = 0; // link active 1, speed 3, bridge 0, contender 1, more packets 0. @@ -1978,7 +2002,6 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) for (i = 0; i < 3; i++) { enum phy_packet_self_id_port_status status; - int err; err = get_status_for_port(ohci, i, &status); if (err < 0) @@ -1987,7 +2010,10 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) self_id_sequence_set_port_status(&self_id, 1, i, status); } - phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci)); + err = detect_initiated_reset(ohci, &is_initiated_reset); + if (err < 0) + return err; + phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset); pos = get_self_id_pos(ohci, self_id, self_id_count); if (pos >= 0) { @@ -2112,14 +2138,12 @@ static void bus_reset_work(struct work_struct *work) return; } - /* FIXME: Document how the locking works. */ - spin_lock_irq(&ohci->lock); - - ohci->generation = -1; /* prevent AT packet queueing */ - context_stop(&ohci->at_request_ctx); - context_stop(&ohci->at_response_ctx); - - spin_unlock_irq(&ohci->lock); + // FIXME: Document how the locking works. + scoped_guard(spinlock_irq, &ohci->lock) { + ohci->generation = -1; // prevent AT packet queueing + context_stop(&ohci->at_request_ctx); + context_stop(&ohci->at_response_ctx); + } /* * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent @@ -2129,53 +2153,42 @@ static void bus_reset_work(struct work_struct *work) at_context_flush(&ohci->at_request_ctx); at_context_flush(&ohci->at_response_ctx); - spin_lock_irq(&ohci->lock); - - ohci->generation = generation; - reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); - reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); - - if (ohci->quirks & QUIRK_RESET_PACKET) - ohci->request_generation = generation; - - /* - * This next bit is unrelated to the AT context stuff but we - * have to do it under the spinlock also. If a new config rom - * was set up before this reset, the old one is now no longer - * in use and we can free it. Update the config rom pointers - * to point to the current config rom and clear the - * next_config_rom pointer so a new update can take place. - */ - - if (ohci->next_config_rom != NULL) { - if (ohci->next_config_rom != ohci->config_rom) { - free_rom = ohci->config_rom; - free_rom_bus = ohci->config_rom_bus; + scoped_guard(spinlock_irq, &ohci->lock) { + ohci->generation = generation; + reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); + + if (ohci->quirks & QUIRK_RESET_PACKET) + ohci->request_generation = generation; + + // This next bit is unrelated to the AT context stuff but we have to do it under the + // spinlock also. If a new config rom was set up before this reset, the old one is + // now no longer in use and we can free it. Update the config rom pointers to point + // to the current config rom and clear the next_config_rom pointer so a new update + // can take place. + if (ohci->next_config_rom != NULL) { + if (ohci->next_config_rom != ohci->config_rom) { + free_rom = ohci->config_rom; + free_rom_bus = ohci->config_rom_bus; + } + ohci->config_rom = ohci->next_config_rom; + ohci->config_rom_bus = ohci->next_config_rom_bus; + ohci->next_config_rom = NULL; + + // Restore config_rom image and manually update config_rom registers. + // Writing the header quadlet will indicate that the config rom is ready, + // so we do that last. + reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2])); + ohci->config_rom[0] = ohci->next_header; + reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header)); } - ohci->config_rom = ohci->next_config_rom; - ohci->config_rom_bus = ohci->next_config_rom_bus; - ohci->next_config_rom = NULL; - - /* - * Restore config_rom image and manually update - * config_rom registers. Writing the header quadlet - * will indicate that the config rom is ready, so we - * do that last. - */ - reg_write(ohci, OHCI1394_BusOptions, - be32_to_cpu(ohci->config_rom[2])); - ohci->config_rom[0] = ohci->next_header; - reg_write(ohci, OHCI1394_ConfigROMhdr, - be32_to_cpu(ohci->next_header)); - } - if (param_remote_dma) { - reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); - reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + if (param_remote_dma) { + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + } } - spin_unlock_irq(&ohci->lock); - if (free_rom) dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus); @@ -2198,6 +2211,11 @@ static irqreturn_t irq_handler(int irq, void *data) if (!event || !~event) return IRQ_NONE; + if (unlikely(param_debug > 0)) { + dev_notice_ratelimited(ohci->card.device, + "The debug parameter is superceded by tracepoints events, and deprecated."); + } + /* * busReset and postedWriteErr events must not be cleared yet * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) @@ -2238,8 +2256,7 @@ static irqreturn_t irq_handler(int irq, void *data) while (iso_event) { i = ffs(iso_event) - 1; - tasklet_schedule( - &ohci->ir_context_list[i].context.tasklet); + fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base); iso_event &= ~(1 << i); } } @@ -2250,8 +2267,7 @@ static irqreturn_t irq_handler(int irq, void *data) while (iso_event) { i = ffs(iso_event) - 1; - tasklet_schedule( - &ohci->it_context_list[i].context.tasklet); + fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base); iso_event &= ~(1 << i); } } @@ -2264,13 +2280,11 @@ static irqreturn_t irq_handler(int irq, void *data) reg_read(ohci, OHCI1394_PostedWriteAddressLo); reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_postedWriteErr); - if (printk_ratelimit()) - ohci_err(ohci, "PCI posted write error\n"); + dev_err_ratelimited(ohci->card.device, "PCI posted write error\n"); } if (unlikely(event & OHCI1394_cycleTooLong)) { - if (printk_ratelimit()) - ohci_notice(ohci, "isochronous cycle too long\n"); + dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n"); reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_cycleMaster); } @@ -2282,17 +2296,15 @@ static irqreturn_t irq_handler(int irq, void *data) * stop active cycleMatch iso contexts now and restart * them at least two cycles later. (FIXME?) */ - if (printk_ratelimit()) - ohci_notice(ohci, "isochronous cycle inconsistent\n"); + dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n"); } if (unlikely(event & OHCI1394_unrecoverableError)) handle_dead_contexts(ohci); if (event & OHCI1394_cycle64Seconds) { - spin_lock(&ohci->lock); + guard(spinlock)(&ohci->lock); update_bus_time(ohci); - spin_unlock(&ohci->lock); } else flush_writes(ohci); @@ -2617,33 +2629,26 @@ static int ohci_set_config_rom(struct fw_card *card, if (next_config_rom == NULL) return -ENOMEM; - spin_lock_irq(&ohci->lock); - - /* - * If there is not an already pending config_rom update, - * push our new allocation into the ohci->next_config_rom - * and then mark the local variable as null so that we - * won't deallocate the new buffer. - * - * OTOH, if there is a pending config_rom update, just - * use that buffer with the new config_rom data, and - * let this routine free the unused DMA allocation. - */ - - if (ohci->next_config_rom == NULL) { - ohci->next_config_rom = next_config_rom; - ohci->next_config_rom_bus = next_config_rom_bus; - next_config_rom = NULL; - } - - copy_config_rom(ohci->next_config_rom, config_rom, length); + scoped_guard(spinlock_irq, &ohci->lock) { + // If there is not an already pending config_rom update, push our new allocation + // into the ohci->next_config_rom and then mark the local variable as null so that + // we won't deallocate the new buffer. + // + // OTOH, if there is a pending config_rom update, just use that buffer with the new + // config_rom data, and let this routine free the unused DMA allocation. + if (ohci->next_config_rom == NULL) { + ohci->next_config_rom = next_config_rom; + ohci->next_config_rom_bus = next_config_rom_bus; + next_config_rom = NULL; + } - ohci->next_header = config_rom[0]; - ohci->next_config_rom[0] = 0; + copy_config_rom(ohci->next_config_rom, config_rom, length); - reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; - spin_unlock_irq(&ohci->lock); + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + } /* If we didn't use the DMA allocation, delete it. */ if (next_config_rom != NULL) { @@ -2713,7 +2718,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; int n, ret = 0; if (param_remote_dma) @@ -2724,12 +2728,10 @@ static int ohci_enable_phys_dma(struct fw_card *card, * interrupt bit. Clear physReqResourceAllBuses on bus reset. */ - spin_lock_irqsave(&ohci->lock, flags); + guard(spinlock_irqsave)(&ohci->lock); - if (ohci->generation != generation) { - ret = -ESTALE; - goto out; - } + if (ohci->generation != generation) + return -ESTALE; /* * Note, if the node ID contains a non-local bus ID, physical DMA is @@ -2743,8 +2745,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); flush_writes(ohci); - out: - spin_unlock_irqrestore(&ohci->lock, flags); return ret; } @@ -2752,7 +2752,6 @@ static int ohci_enable_phys_dma(struct fw_card *card, static u32 ohci_read_csr(struct fw_card *card, int csr_offset) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; u32 value; switch (csr_offset) { @@ -2776,16 +2775,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset) return get_cycle_time(ohci); case CSR_BUS_TIME: - /* - * We might be called just after the cycle timer has wrapped - * around but just before the cycle64Seconds handler, so we - * better check here, too, if the bus time needs to be updated. - */ - spin_lock_irqsave(&ohci->lock, flags); - value = update_bus_time(ohci); - spin_unlock_irqrestore(&ohci->lock, flags); - return value; + { + // We might be called just after the cycle timer has wrapped around but just before + // the cycle64Seconds handler, so we better check here, too, if the bus time needs + // to be updated. + guard(spinlock_irqsave)(&ohci->lock); + return update_bus_time(ohci); + } case CSR_BUSY_TIMEOUT: value = reg_read(ohci, OHCI1394_ATRetries); return (value >> 4) & 0x0ffff00f; @@ -2803,7 +2800,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset) static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) { struct fw_ohci *ohci = fw_ohci(card); - unsigned long flags; switch (csr_offset) { case CSR_STATE_CLEAR: @@ -2839,12 +2835,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) break; case CSR_BUS_TIME: - spin_lock_irqsave(&ohci->lock, flags); - ohci->bus_time = (update_bus_time(ohci) & 0x40) | - (value & ~0x7f); - spin_unlock_irqrestore(&ohci->lock, flags); + { + guard(spinlock_irqsave)(&ohci->lock); + ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f); break; - + } case CSR_BUSY_TIMEOUT: value = (value & 0xf) | ((value & 0xf) << 4) | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); @@ -2932,7 +2927,7 @@ static int handle_ir_packet_per_buffer(struct context *context, copy_iso_headers(ctx, (u32 *) (last + 1)); if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) - flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); return 1; } @@ -2968,7 +2963,7 @@ static int handle_ir_buffer_fill(struct context *context, if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { trace_isoc_inbound_multiple_completions(&ctx->base, completed, - FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ); + FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); ctx->base.callback.mc(&ctx->base, buffer_dma + completed, @@ -3064,7 +3059,7 @@ static int handle_it_packet(struct context *context, ctx->header_length += 4; if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) - flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ); + flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT); return 1; } @@ -3090,55 +3085,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, u32 *mask, regs; int index, ret = -EBUSY; - spin_lock_irq(&ohci->lock); + scoped_guard(spinlock_irq, &ohci->lock) { + switch (type) { + case FW_ISO_CONTEXT_TRANSMIT: + mask = &ohci->it_context_mask; + callback = handle_it_packet; + index = ffs(*mask) - 1; + if (index >= 0) { + *mask &= ~(1 << index); + regs = OHCI1394_IsoXmitContextBase(index); + ctx = &ohci->it_context_list[index]; + } + break; - switch (type) { - case FW_ISO_CONTEXT_TRANSMIT: - mask = &ohci->it_context_mask; - callback = handle_it_packet; - index = ffs(*mask) - 1; - if (index >= 0) { - *mask &= ~(1 << index); - regs = OHCI1394_IsoXmitContextBase(index); - ctx = &ohci->it_context_list[index]; - } - break; + case FW_ISO_CONTEXT_RECEIVE: + channels = &ohci->ir_context_channels; + mask = &ohci->ir_context_mask; + callback = handle_ir_packet_per_buffer; + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - case FW_ISO_CONTEXT_RECEIVE: - channels = &ohci->ir_context_channels; - mask = &ohci->ir_context_mask; - callback = handle_ir_packet_per_buffer; - index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; - if (index >= 0) { - *channels &= ~(1ULL << channel); - *mask &= ~(1 << index); - regs = OHCI1394_IsoRcvContextBase(index); - ctx = &ohci->ir_context_list[index]; - } - break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + mask = &ohci->ir_context_mask; + callback = handle_ir_buffer_fill; + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; + if (index >= 0) { + ohci->mc_allocated = true; + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - mask = &ohci->ir_context_mask; - callback = handle_ir_buffer_fill; - index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; - if (index >= 0) { - ohci->mc_allocated = true; - *mask &= ~(1 << index); - regs = OHCI1394_IsoRcvContextBase(index); - ctx = &ohci->ir_context_list[index]; + default: + index = -1; + ret = -ENOSYS; } - break; - default: - index = -1; - ret = -ENOSYS; + if (index < 0) + return ERR_PTR(ret); } - spin_unlock_irq(&ohci->lock); - - if (index < 0) - return ERR_PTR(ret); - memset(ctx, 0, sizeof(*ctx)); ctx->header_length = 0; ctx->header = (void *) __get_free_page(GFP_KERNEL); @@ -3149,6 +3142,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, ret = context_init(&ctx->context, ohci, regs, callback); if (ret < 0) goto out_with_header; + fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work); if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { set_multichannel_mask(ohci, 0); @@ -3160,20 +3154,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, out_with_header: free_page((unsigned long)ctx->header); out: - spin_lock_irq(&ohci->lock); - - switch (type) { - case FW_ISO_CONTEXT_RECEIVE: - *channels |= 1ULL << channel; - break; + scoped_guard(spinlock_irq, &ohci->lock) { + switch (type) { + case FW_ISO_CONTEXT_RECEIVE: + *channels |= 1ULL << channel; + break; - case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - ohci->mc_allocated = false; - break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ohci->mc_allocated = false; + break; + } + *mask |= 1 << index; } - *mask |= 1 << index; - - spin_unlock_irq(&ohci->lock); return ERR_PTR(ret); } @@ -3248,7 +3240,6 @@ static int ohci_stop_iso(struct fw_iso_context *base) } flush_writes(ohci); context_stop(&ctx->context); - tasklet_kill(&ctx->context.tasklet); return 0; } @@ -3257,14 +3248,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base) { struct fw_ohci *ohci = fw_ohci(base->card); struct iso_context *ctx = container_of(base, struct iso_context, base); - unsigned long flags; int index; ohci_stop_iso(base); context_release(&ctx->context); free_page((unsigned long)ctx->header); - spin_lock_irqsave(&ohci->lock, flags); + guard(spinlock_irqsave)(&ohci->lock); switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: @@ -3286,38 +3276,29 @@ static void ohci_free_iso_context(struct fw_iso_context *base) ohci->mc_allocated = false; break; } - - spin_unlock_irqrestore(&ohci->lock, flags); } static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) { struct fw_ohci *ohci = fw_ohci(base->card); - unsigned long flags; - int ret; switch (base->type) { case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + { + guard(spinlock_irqsave)(&ohci->lock); - spin_lock_irqsave(&ohci->lock, flags); - - /* Don't allow multichannel to grab other contexts' channels. */ + // Don't allow multichannel to grab other contexts' channels. if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { *channels = ohci->ir_context_channels; - ret = -EBUSY; + return -EBUSY; } else { set_multichannel_mask(ohci, *channels); - ret = 0; + return 0; } - - spin_unlock_irqrestore(&ohci->lock, flags); - - break; + } default: - ret = -EINVAL; + return -EINVAL; } - - return ret; } #ifdef CONFIG_PM @@ -3392,14 +3373,14 @@ static int queue_iso_transmit(struct iso_context *ctx, d[0].branch_address = cpu_to_le32(d_bus | z); header = (__le32 *) &d[1]; - header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | - IT_HEADER_TAG(p->tag) | - IT_HEADER_TCODE(TCODE_STREAM_DATA) | - IT_HEADER_CHANNEL(ctx->base.channel) | - IT_HEADER_SPEED(ctx->base.speed)); - header[1] = - cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + - p->payload_length)); + + ohci1394_it_data_set_speed(header, ctx->base.speed); + ohci1394_it_data_set_tag(header, p->tag); + ohci1394_it_data_set_channel(header, ctx->base.channel); + ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA); + ohci1394_it_data_set_sync(header, p->sy); + + ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length); } if (p->header_length > 0) { @@ -3587,24 +3568,19 @@ static int ohci_queue_iso(struct fw_iso_context *base, unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); - unsigned long flags; - int ret = -ENOSYS; - spin_lock_irqsave(&ctx->context.ohci->lock, flags); + guard(spinlock_irqsave)(&ctx->context.ohci->lock); + switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: - ret = queue_iso_transmit(ctx, packet, buffer, payload); - break; + return queue_iso_transmit(ctx, packet, buffer, payload); case FW_ISO_CONTEXT_RECEIVE: - ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); - break; + return queue_iso_packet_per_buffer(ctx, packet, buffer, payload); case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: - ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); - break; + return queue_iso_buffer_fill(ctx, packet, buffer, payload); + default: + return -ENOSYS; } - spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); - - return ret; } static void ohci_flush_queue_iso(struct fw_iso_context *base) @@ -3620,10 +3596,8 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int ret = 0; - tasklet_disable_in_atomic(&ctx->context.tasklet); - if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { - context_tasklet((unsigned long)&ctx->context); + ohci_isoc_context_work(&base->work); switch (base->type) { case FW_ISO_CONTEXT_TRANSMIT: @@ -3643,8 +3617,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) smp_mb__after_atomic(); } - tasklet_enable(&ctx->context.tasklet); - return ret; } @@ -3863,7 +3835,7 @@ static int pci_probe(struct pci_dev *dev, goto fail_msi; } - err = fw_card_add(&ohci->card, max_receive, link_speed, guid); + err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir); if (err) goto fail_irq; diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 71c2ed84cafb0b..218666cfe14a5e 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -153,7 +153,205 @@ #define OHCI1394_evt_unknown 0xe #define OHCI1394_evt_flushed 0xf -#define OHCI1394_phy_tcode 0xe + +// Asynchronous Transmit DMA. +// +// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394 +// asynchronous packet. + +#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000 +#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23 +#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000 +#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16 +#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00 +#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10 +#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300 +#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8 +#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0 +#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4 +#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000 +#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16 +#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff +#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0 +#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000 +#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12 + +static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data) +{ + return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT); +} + +static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK); + data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK); +} + +static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT; +} + +static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK); + data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK); +} + +static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT; +} + +static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK); + data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK); +} + +static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT; +} + +static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK); + data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK); +} + +static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT; +} + +static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode) +{ + data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK); + data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK); +} + +static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT; +} + +static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id) +{ + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK); + data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK); +} + +static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data) +{ + u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT); + u64 lo = (u64)le32_to_cpu(data[2]); + return (hi << 32) | lo; +} + +static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset) +{ + u32 hi = (u32)(offset >> 32); + u32 lo = (u32)(offset & 0x00000000ffffffff); + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK); + data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK); + data[2] = cpu_to_le32(lo); +} + +static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT; +} + +static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode) +{ + data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK); + data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK); +} + +// Isochronous Transmit DMA. +// +// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394 +// isochronous packet. + +#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000 +#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16 +#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000 +#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14 +#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00 +#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8 +#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0 +#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4 +#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f +#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0 +#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000 +#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16 + +static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT; +} + +static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK); + data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK); +} + +static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT; +} + +static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK); + data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK); +} + +static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT; +} + +static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK); + data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK); +} + +static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT; +} + +static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK); + data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK); +} + +static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data) +{ + return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT; +} + +static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync) +{ + data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK); + data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK); +} + +static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data) +{ + return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT; +} + +static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length) +{ + data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK); + data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK); +} // Self-ID DMA. diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 7ba98c7af2e980..4d231bc375e03b 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -53,11 +54,8 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/* - * Keeping RX TX buffer size as 4K for now - * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config - */ -#define RXTX_BUFFER_SIZE SZ_4K +#define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0) +#define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK) #define FFA_MAX_NOTIFICATIONS 64 @@ -75,6 +73,7 @@ static const int ffa_linux_errmap[] = { -EAGAIN, /* FFA_RET_RETRY */ -ECANCELED, /* FFA_RET_ABORTED */ -ENODATA, /* FFA_RET_NO_DATA */ + -EAGAIN, /* FFA_RET_NOT_READY */ }; static inline int ffa_to_linux_errno(int errno) @@ -97,7 +96,9 @@ struct ffa_drv_info { struct mutex tx_lock; /* lock to protect Tx buffer */ void *rx_buffer; void *tx_buffer; + size_t rxtx_bufsz; bool mem_ops_native; + bool msg_direct_req2_supp; bool bitmap_created; bool notif_enabled; unsigned int sched_recv_irq; @@ -211,6 +212,32 @@ static int ffa_rxtx_unmap(u16 vm_id) return 0; } +static int ffa_features(u32 func_feat_id, u32 input_props, + u32 *if_props_1, u32 *if_props_2) +{ + ffa_value_t id; + + if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { + pr_err("%s: Invalid Parameters: %x, %x", __func__, + func_feat_id, input_props); + return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); + } + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, + }, &id); + + if (id.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)id.a2); + + if (if_props_1) + *if_props_1 = id.a2; + if (if_props_2) + *if_props_2 = id.a3; + + return 0; +} + #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ @@ -260,17 +287,75 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, return count; } +#define LAST_INDEX_MASK GENMASK(15, 0) +#define CURRENT_INDEX_MASK GENMASK(31, 16) +#define UUID_INFO_TAG_MASK GENMASK(47, 32) +#define PARTITION_INFO_SZ_MASK GENMASK(63, 48) +#define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1) +#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) +#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) +#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) +static int +__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, + struct ffa_partition_info *buffer, int num_parts) +{ + u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; + ffa_value_t partition_info; + + do { + start_idx = prev_idx ? prev_idx + 1 : 0; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_PARTITION_INFO_GET_REGS, + .a1 = (u64)uuid1 << 32 | uuid0, + .a2 = (u64)uuid3 << 32 | uuid2, + .a3 = start_idx | tag << 16, + }, &partition_info); + + if (partition_info.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)partition_info.a2); + + if (!count) + count = PARTITION_COUNT(partition_info.a2); + if (!buffer || !num_parts) /* count only */ + return count; + + cur_idx = CURRENT_INDEX(partition_info.a2); + tag = UUID_INFO_TAG(partition_info.a2); + buf_sz = PARTITION_INFO_SZ(partition_info.a2); + if (buf_sz > sizeof(*buffer)) + buf_sz = sizeof(*buffer); + + memcpy(buffer + prev_idx * buf_sz, &partition_info.a3, + (cur_idx - start_idx + 1) * buf_sz); + prev_idx = cur_idx; + + } while (cur_idx < (count - 1)); + + return count; +} + /* buffer is allocated and caller must free the same if returned count > 0 */ static int ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) { int count; u32 uuid0_4[4]; + bool reg_mode = false; struct ffa_partition_info *pbuf; + if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL)) + reg_mode = true; + export_uuid((u8 *)uuid0_4, uuid); - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], NULL, 0); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); if (count <= 0) return count; @@ -278,8 +363,14 @@ ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) if (!pbuf) return -ENOMEM; - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], pbuf, count); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); if (count <= 0) kfree(pbuf); else @@ -305,6 +396,18 @@ static int ffa_id_get(u16 *vm_id) return 0; } +static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret) +{ + while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) { + if (ret->a0 == FFA_YIELD) + fsleep(1000); + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_RUN, .a1 = ret->a1, + }, ret); + } +} + static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, struct ffa_send_direct_data *data) { @@ -325,10 +428,7 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, .a6 = data->data3, .a7 = data->data4, }, &ret); - while (ret.a0 == FFA_INTERRUPT) - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_RUN, .a1 = ret.a1, - }, &ret); + ffa_msg_send_wait_for_completion(&ret); if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); @@ -352,7 +452,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) ffa_value_t ret; int retval = 0; - if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg))) + if (sz > (drv_info->rxtx_bufsz - sizeof(*msg))) return -ERANGE; mutex_lock(&drv_info->tx_lock); @@ -377,6 +477,32 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) return retval; } +static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, + struct ffa_send_direct_data2 *data) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + ffa_value_t ret, args = { + .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids, + }; + + export_uuid((u8 *)&args.a2, uuid); + memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); + + invoke_ffa_fn(args, &ret); + + ffa_msg_send_wait_for_completion(&ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { + memcpy(data, &ret.a4, sizeof(*data)); + return 0; + } + + return -EINVAL; +} + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, u32 len, u64 *handle) { @@ -561,9 +687,10 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) { int ret; void *buffer; + size_t rxtx_bufsz = drv_info->rxtx_bufsz; if (!args->use_txbuf) { - buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!buffer) return -ENOMEM; } else { @@ -571,12 +698,12 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) mutex_lock(&drv_info->tx_lock); } - ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); + ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args); if (args->use_txbuf) mutex_unlock(&drv_info->tx_lock); else - free_pages_exact(buffer, RXTX_BUFFER_SIZE); + free_pages_exact(buffer, rxtx_bufsz); return ret < 0 ? ret : 0; } @@ -597,32 +724,6 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags) return 0; } -static int ffa_features(u32 func_feat_id, u32 input_props, - u32 *if_props_1, u32 *if_props_2) -{ - ffa_value_t id; - - if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { - pr_err("%s: Invalid Parameters: %x, %x", __func__, - func_feat_id, input_props); - return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); - } - - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, - }, &id); - - if (id.a0 == FFA_ERROR) - return ffa_to_linux_errno((int)id.a2); - - if (if_props_1) - *if_props_1 = id.a2; - if (if_props_2) - *if_props_2 = id.a3; - - return 0; -} - static int ffa_notification_bitmap_create(void) { ffa_value_t ret; @@ -858,11 +959,15 @@ static int ffa_run(struct ffa_device *dev, u16 vcpu) return 0; } -static void ffa_set_up_mem_ops_native_flag(void) +static void ffa_drvinfo_flags_init(void) { if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) drv_info->mem_ops_native = true; + + if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) || + !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL)) + drv_info->msg_direct_req2_supp = true; } static u32 ffa_api_version_get(void) @@ -908,6 +1013,16 @@ static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); } +static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid, + struct ffa_send_direct_data2 *data) +{ + if (!drv_info->msg_direct_req2_supp) + return -EOPNOTSUPP; + + return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, + uuid, data); +} + static int ffa_memory_share(struct ffa_mem_ops_args *args) { if (drv_info->mem_ops_native) @@ -1191,6 +1306,7 @@ static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, .indirect_send = ffa_indirect_msg_send, + .sync_send_receive2 = ffa_sync_send_receive2, }; static const struct ffa_mem_ops ffa_drv_mem_ops = { @@ -1242,7 +1358,7 @@ ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) if (action == BUS_NOTIFY_BIND_DRIVER) { struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); - const struct ffa_device_id *id_table= ffa_drv->id_table; + const struct ffa_device_id *id_table = ffa_drv->id_table; /* * FF-A v1.1 provides UUID for each partition as part of the @@ -1327,8 +1443,6 @@ static int ffa_setup_partitions(void) /* Allocate for the host */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n", - __func__, drv_info->vm_id); /* Already registered devices are freed on bus_exit */ ffa_partitions_cleanup(); return -ENOMEM; @@ -1603,15 +1717,16 @@ static void ffa_notifications_setup(void) static int __init ffa_init(void) { int ret; + u32 buf_sz; + size_t rxtx_bufsz = SZ_4K; ret = ffa_transport_init(&invoke_ffa_fn); if (ret) return ret; drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); - if (!drv_info) { + if (!drv_info) return -ENOMEM; - } ret = ffa_version_check(&drv_info->version); if (ret) @@ -1623,13 +1738,24 @@ static int __init ffa_init(void) goto free_drv_info; } - drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL); + if (!ret) { + if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1) + rxtx_bufsz = SZ_64K; + else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2) + rxtx_bufsz = SZ_16K; + else + rxtx_bufsz = SZ_4K; + } + + drv_info->rxtx_bufsz = rxtx_bufsz; + drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->rx_buffer) { ret = -ENOMEM; goto free_pages; } - drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->tx_buffer) { ret = -ENOMEM; goto free_pages; @@ -1637,7 +1763,7 @@ static int __init ffa_init(void) ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), virt_to_phys(drv_info->rx_buffer), - RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); + rxtx_bufsz / FFA_PAGE_SIZE); if (ret) { pr_err("failed to register FFA RxTx buffers\n"); goto free_pages; @@ -1646,7 +1772,7 @@ static int __init ffa_init(void) mutex_init(&drv_info->rx_lock); mutex_init(&drv_info->tx_lock); - ffa_set_up_mem_ops_native_flag(); + ffa_drvinfo_flags_init(); ffa_notifications_setup(); @@ -1667,8 +1793,8 @@ static int __init ffa_init(void) ffa_notifications_cleanup(); free_pages: if (drv_info->tx_buffer) - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, rxtx_bufsz); free_drv_info: kfree(drv_info); return ret; @@ -1680,8 +1806,8 @@ static void __exit ffa_exit(void) ffa_notifications_cleanup(); ffa_partitions_cleanup(); ffa_rxtx_unmap(drv_info->vm_id); - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); kfree(drv_info); } module_exit(ffa_exit); diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index aa5842be19b2d1..dabd874641d0b1 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -55,116 +55,22 @@ config ARM_SCMI_RAW_MODE_SUPPORT_COEX operate normally, thing which could make an SCMI test suite using the SCMI Raw mode support unreliable. If unsure, say N. -config ARM_SCMI_HAVE_TRANSPORT - bool - help - This declares whether at least one SCMI transport has been configured. - Used to trigger a build bug when trying to build SCMI without any - configured transport. - -config ARM_SCMI_HAVE_SHMEM - bool - help - This declares whether a shared memory based transport for SCMI is - available. - -config ARM_SCMI_HAVE_MSG - bool - help - This declares whether a message passing based transport for SCMI is - available. - -config ARM_SCMI_TRANSPORT_MAILBOX - bool "SCMI transport based on Mailbox" - depends on MAILBOX - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - default y - help - Enable mailbox based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on mailboxes, answer Y. - -config ARM_SCMI_TRANSPORT_OPTEE - bool "SCMI transport based on OP-TEE service" - depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - select ARM_SCMI_HAVE_MSG - default y - help - This enables the OP-TEE service based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on OP-TEE SCMI service, answer Y. - -config ARM_SCMI_TRANSPORT_SMC - bool "SCMI transport based on SMC" - depends on HAVE_ARM_SMCCC_DISCOVERY - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_SHMEM - default y - help - Enable SMC based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on SMC, answer Y. - -config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE - bool "Enable atomic mode support for SCMI SMC transport" - depends on ARM_SCMI_TRANSPORT_SMC - help - Enable support of atomic operation for SCMI SMC based transport. - - If you want the SCMI SMC based transport to operate in atomic - mode, avoiding any kind of sleeping behaviour for selected - transactions on the TX path, answer Y. - Enabling atomic mode operations allows any SCMI driver using this - transport to optionally ask for atomic SCMI transactions and operate - in atomic context too, at the price of using a number of busy-waiting - primitives all over instead. If unsure say N. - -config ARM_SCMI_TRANSPORT_VIRTIO - bool "SCMI transport based on VirtIO" - depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL - select ARM_SCMI_HAVE_TRANSPORT - select ARM_SCMI_HAVE_MSG - help - This enables the virtio based transport for SCMI. - - If you want the ARM SCMI PROTOCOL stack to include support for a - transport based on VirtIO, answer Y. - -config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE - bool "SCMI VirtIO transport Version 1 compliance" - depends on ARM_SCMI_TRANSPORT_VIRTIO - default y - help - This enforces strict compliance with VirtIO Version 1 specification. - - If you want the ARM SCMI VirtIO transport layer to refuse to work - with Legacy VirtIO backends and instead support only VirtIO Version 1 - devices (or above), answer Y. - - If you want instead to support also old Legacy VirtIO backends (like - the ones implemented by kvmtool) and let the core Kernel VirtIO layer - take care of the needed conversions, say N. - -config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE - bool "Enable atomic mode for SCMI VirtIO transport" - depends on ARM_SCMI_TRANSPORT_VIRTIO +config ARM_SCMI_DEBUG_COUNTERS + bool "Enable SCMI communication debug metrics tracking" + select ARM_SCMI_NEED_DEBUGFS + depends on DEBUG_FS + default n help - Enable support of atomic operation for SCMI VirtIO based transport. + Enables tracking of some key communication metrics for debug + purposes. It may track metrics like how many messages were sent + or received, were there any failures, what kind of failures, ..etc. - If you want the SCMI VirtIO based transport to operate in atomic - mode, avoiding any kind of sleeping behaviour for selected - transactions on the TX path, answer Y. + Enable this option to create a new debugfs directory which contains + such useful debug counters. This can be helpful for debugging and + SCMI monitoring. - Enabling atomic mode operations allows any SCMI driver using this - transport to optionally ask for atomic SCMI transactions and operate - in atomic context too, at the price of using a number of busy-waiting - primitives all over instead. If unsure say N. +source "drivers/firmware/arm_scmi/transports/Kconfig" +source "drivers/firmware/arm_scmi/vendors/imx/Kconfig" endif #ARM_SCMI_PROTOCOL diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index fd59f58ce8a2dd..9ac81adff56756 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -5,23 +5,15 @@ scmi-core-objs := $(scmi-bus-y) scmi-driver-y = driver.o notify.o scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o -scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o scmi-protocols-y += pinctrl.o scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += transports/ +obj-$(CONFIG_ARM_SCMI_PROTOCOL) += vendors/imx/ + obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o - -ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) -# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame -# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling -# hooks are inserted via the -pg switch. -CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) -endif diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 97254de35ab0d4..86b376c50a13fa 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define SCMI_BASE_NUM_SOURCES 1 #define SCMI_BASE_MAX_CMD_ERR_COUNT 1024 @@ -42,7 +42,6 @@ struct scmi_msg_resp_base_discover_agent { u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; - struct scmi_msg_base_error_notify { __le32 event_control; #define BASE_TP_NOTIFY_ALL BIT(0) @@ -105,7 +104,6 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) struct scmi_xfer *t; struct scmi_revision_info *rev = ph->get_priv(ph); - if (sub_vendor) { cmd = BASE_DISCOVER_SUB_VENDOR; vendor_id = rev->sub_vendor_id; @@ -386,7 +384,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) if (ret) return ret; - rev->major_ver = PROTOCOL_REV_MAJOR(version), + rev->major_ver = PROTOCOL_REV_MAJOR(version); rev->minor_ver = PROTOCOL_REV_MINOR(version); ph->set_priv(ph, rev, version); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 134019297d08b1..2ed2279388f02e 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -365,6 +365,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { u32 latency = 0; + attributes = le32_to_cpu(attr->attributes); strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); /* clock_enable_latency field is present only since SCMI v3.1 */ diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 4b8c5250cdb57e..c4b8e7ff88aa23 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -4,7 +4,7 @@ * driver common header file containing some definitions, structures * and function prototypes used in all the different SCMI protocols. * - * Copyright (C) 2018-2022 ARM Ltd. + * Copyright (C) 2018-2024 ARM Ltd. */ #ifndef _SCMI_COMMON_H #define _SCMI_COMMON_H @@ -22,7 +22,7 @@ #include #include -#include +#include #include "protocols.h" #include "notify.h" @@ -183,7 +183,6 @@ struct scmi_chan_info { /** * struct scmi_transport_ops - Structure representing a SCMI transport ops * - * @link_supplier: Optional callback to add link to a supplier device * @chan_available: Callback to check if channel is available or not * @chan_setup: Callback to allocate and setup a channel * @chan_free: Callback to free a channel @@ -198,7 +197,6 @@ struct scmi_chan_info { * @poll_done: Callback to poll transfer status */ struct scmi_transport_ops { - int (*link_supplier)(struct device *dev); bool (*chan_available)(struct device_node *of_node, int idx); int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, bool tx); @@ -219,12 +217,6 @@ struct scmi_transport_ops { /** * struct scmi_desc - Description of SoC integration * - * @transport_init: An optional function that a transport can provide to - * initialize some transport-specific setup during SCMI core - * initialization, so ahead of SCMI core probing. - * @transport_exit: An optional function that a transport can provide to - * de-initialize some transport-specific setup during SCMI core - * de-initialization, so after SCMI core removal. * @ops: Pointer to the transport specific ops structure * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) * @max_msg: Maximum number of messages for a channel type (tx or rx) that can @@ -245,8 +237,6 @@ struct scmi_transport_ops { * when requested. */ struct scmi_desc { - int (*transport_init)(void); - void (*transport_exit)(void); const struct scmi_transport_ops *ops; int max_rx_timeout_ms; int max_msg; @@ -286,20 +276,30 @@ int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle, int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, unsigned int timeout_ms); -#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX -extern const struct scmi_desc scmi_mailbox_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC -extern const struct scmi_desc scmi_smc_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO -extern const struct scmi_desc scmi_virtio_desc; -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE -extern const struct scmi_desc scmi_optee_desc; -#endif - -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); + +enum debug_counters { + SENT_OK, + SENT_FAIL, + SENT_FAIL_POLLING_UNSUPPORTED, + SENT_FAIL_CHANNEL_NOT_FOUND, + RESPONSE_OK, + NOTIFICATION_OK, + DELAYED_RESPONSE_OK, + XFERS_RESPONSE_TIMEOUT, + XFERS_RESPONSE_POLLED_TIMEOUT, + RESPONSE_POLLED_OK, + ERR_MSG_UNEXPECTED, + ERR_MSG_INVALID, + ERR_MSG_NOMEM, + ERR_PROTOCOL, + SCMI_DEBUG_COUNTERS_LAST +}; + +static inline void scmi_inc_count(atomic_t *arr, int stat) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) + atomic_inc(&arr[stat]); +} enum scmi_bad_msg { MSG_UNEXPECTED = -1, @@ -309,24 +309,44 @@ enum scmi_bad_msg { MSG_MBOX_SPURIOUS = -5, }; -void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, - enum scmi_bad_msg err); - /* shmem related declarations */ struct scmi_shared_mem; -void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); -u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); -void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, +/** + * struct scmi_shared_mem_operations - Transport core operations for + * Shared Memory + * + * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem + * @read_header: Read header of the message currently hold in @shmem + * @fetch_response: Copy the message response from @shmem into @xfer + * @fetch_notification: Copy the message notification from @shmem into @xfer + * @clear_channel: Clear the @shmem channel busy flag + * @poll_done: Check if poll has completed for @xfer on @shmem + * @channel_free: Check if @shmem channel is marked as free + * @channel_intr_enabled: Check is @shmem channel has requested a completion irq + * @setup_iomap: Setup IO shared memory for channel @cinfo + */ +struct scmi_shared_mem_operations { + void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + struct scmi_chan_info *cinfo); + u32 (*read_header)(struct scmi_shared_mem __iomem *shmem); + + void (*fetch_response)(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer); + void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer); + void (*clear_channel)(struct scmi_shared_mem __iomem *shmem); + bool (*poll_done)(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); -void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer); -void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); -bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); -bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); -bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem); + bool (*channel_free)(struct scmi_shared_mem __iomem *shmem); + bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem); + void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo, + struct device *dev, + bool tx, struct resource *res); +}; + +const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void); /* declarations for message passing transports */ struct scmi_msg_payld; @@ -334,14 +354,108 @@ struct scmi_msg_payld; /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) -size_t msg_response_size(struct scmi_xfer *xfer); -size_t msg_command_size(struct scmi_xfer *xfer); -void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); -u32 msg_read_header(struct scmi_msg_payld *msg); -void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, - struct scmi_xfer *xfer); -void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, - size_t max_len, struct scmi_xfer *xfer); +/** + * struct scmi_message_operations - Transport core operations for Message + * + * @response_size: Get calculated response size for @xfer + * @command_size: Get calculated command size for @xfer + * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg + * @read_header: Read header of the message currently hold in @msg + * @fetch_response: Copy the message response from @msg into @xfer + * @fetch_notification: Copy the message notification from @msg into @xfer + */ +struct scmi_message_operations { + size_t (*response_size)(struct scmi_xfer *xfer); + size_t (*command_size)(struct scmi_xfer *xfer); + void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); + u32 (*read_header)(struct scmi_msg_payld *msg); + void (*fetch_response)(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer); + void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer); +}; + +const struct scmi_message_operations *scmi_message_operations_get(void); + +/** + * struct scmi_transport_core_operations - Transpoert core operations + * + * @bad_message_trace: An helper to report a malformed/unexpected message + * @rx_callback: Callback to report received messages + * @shmem: Datagram operations for shared memory based transports + * @msg: Datagram operations for message based transports + */ +struct scmi_transport_core_operations { + void (*bad_message_trace)(struct scmi_chan_info *cinfo, + u32 msg_hdr, enum scmi_bad_msg err); + void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr, + void *priv); + const struct scmi_shared_mem_operations *shmem; + const struct scmi_message_operations *msg; +}; + +/** + * struct scmi_transport - A structure representing a configured transport + * + * @supplier: Device representing the transport and acting as a supplier for + * the core SCMI stack + * @desc: Transport descriptor + * @core_ops: A pointer to a pointer used by the core SCMI stack to make the + * core transport operations accessible to the transports. + */ +struct scmi_transport { + struct device *supplier; + struct scmi_desc *desc; + struct scmi_transport_core_operations **core_ops; +}; + +#define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\ +static void __tag##_dev_free(void *data) \ +{ \ + struct platform_device *spdev = data; \ + \ + platform_device_unregister(spdev); \ +} \ + \ +static int __tag##_probe(struct platform_device *pdev) \ +{ \ + struct device *dev = &pdev->dev; \ + struct platform_device *spdev; \ + struct scmi_transport strans; \ + int ret; \ + \ + spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO); \ + if (!spdev) \ + return -ENOMEM; \ + \ + device_set_of_node_from_dev(&spdev->dev, dev); \ + \ + strans.supplier = dev; \ + strans.desc = &(__desc); \ + strans.core_ops = &(__core_ops); \ + \ + ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \ + if (ret) \ + goto err; \ + \ + ret = platform_device_add(spdev); \ + if (ret) \ + goto err; \ + \ + return devm_add_action_or_reset(dev, __tag##_dev_free, spdev); \ + \ +err: \ + platform_device_put(spdev); \ + return ret; \ +} \ + \ +static struct platform_driver __drv = { \ + .driver = { \ + .name = #__tag "_transport", \ + .of_match_table = __match, \ + }, \ + .probe = __tag##_probe, \ +} void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 6b6957f4743fec..88c5c4ff4bb62e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -11,7 +11,7 @@ * various power domain DVFS including the core/cluster, certain system * clocks configuration, thermal sensors and many others. * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2024 ARM Ltd. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -117,12 +117,14 @@ struct scmi_protocol_instance { * @name: Name of this SCMI instance * @type: Type of this SCMI instance * @is_atomic: Flag to state if the transport of this instance is atomic + * @counters: An array of atomic_c's used for tracking statistics (if enabled) */ struct scmi_debug_info { struct dentry *top_dentry; const char *name; const char *type; bool is_atomic; + atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; }; /** @@ -194,6 +196,16 @@ struct scmi_info { #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb) #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb) +static void scmi_rx_callback(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv); +static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, + u32 msg_hdr, enum scmi_bad_msg err); + +static struct scmi_transport_core_operations scmi_trans_core_ops = { + .bad_message_trace = scmi_bad_message_trace, + .rx_callback = scmi_rx_callback, +}; + static unsigned long scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id, char *sub_vendor_id, u32 impl_ver) @@ -833,8 +845,8 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) * timed-out message that arrives and as such, can be traced only referring to * the header content, since the payload is missing/unreliable. */ -void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, - enum scmi_bad_msg err) +static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, + enum scmi_bad_msg err) { char *tag; struct scmi_info *info = handle_to_scmi_info(cinfo->handle); @@ -988,6 +1000,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) spin_unlock_irqrestore(&minfo->xfer_lock, flags); scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); + scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); return xfer; } @@ -1015,6 +1028,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) msg_type, xfer_id, msg_hdr, xfer->state); scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); + scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); @@ -1054,6 +1068,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, PTR_ERR(xfer)); scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); + scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); scmi_clear_channel(info, cinfo); return; @@ -1069,6 +1084,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); + scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -1128,8 +1144,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { scmi_clear_channel(info, cinfo); complete(xfer->async_done); + scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); } else { complete(&xfer->done); + scmi_inc_count(info->dbg->counters, RESPONSE_OK); } if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { @@ -1160,7 +1178,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * NOTE: This function will be invoked in IRQ context, hence should be * as optimal as possible. */ -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) +static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, + void *priv) { u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); @@ -1213,6 +1232,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, struct scmi_xfer *xfer, unsigned int timeout_ms) { int ret = 0; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); if (xfer->hdr.poll_completion) { /* @@ -1233,13 +1253,12 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); ret = -ETIMEDOUT; + scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); } } if (!ret) { unsigned long flags; - struct scmi_info *info = - handle_to_scmi_info(cinfo->handle); /* * Do not fetch_response if an out-of-order delayed @@ -1259,11 +1278,9 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); + scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { - struct scmi_info *info = - handle_to_scmi_info(cinfo->handle); - scmi_raw_message_report(info->raw, xfer, SCMI_RAW_REPLY_QUEUE, cinfo->id); @@ -1276,6 +1293,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; + scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); } } @@ -1359,13 +1377,15 @@ static int do_xfer(const struct scmi_protocol_handle *ph, !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); + scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); return -EINVAL; } cinfo = idr_find(&info->tx_idr, pi->proto->id); - if (unlikely(!cinfo)) + if (unlikely(!cinfo)) { + scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); return -EINVAL; - + } /* True ONLY if also supported by transport. */ if (is_polling_enabled(cinfo, info->desc)) xfer->hdr.poll_completion = true; @@ -1397,16 +1417,20 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); + scmi_inc_count(info->dbg->counters, SENT_FAIL); return ret; } trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "CMND", xfer->hdr.seq, xfer->hdr.status, xfer->tx.buf, xfer->tx.len); + scmi_inc_count(info->dbg->counters, SENT_OK); ret = scmi_wait_for_message_response(cinfo, xfer); - if (!ret && xfer->hdr.status) + if (!ret && xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); + scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); + } if (info->desc->ops->mark_txdone) info->desc->ops->mark_txdone(cinfo, ret, xfer); @@ -2708,14 +2732,14 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node, static int scmi_channels_setup(struct scmi_info *info) { int ret; - struct device_node *child, *top_np = info->dev->of_node; + struct device_node *top_np = info->dev->of_node; /* Initialize a common generic channel at first */ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE); if (ret) return ret; - for_each_available_child_of_node(top_np, child) { + for_each_available_child_of_node_scoped(top_np, child) { u32 prot_id; if (of_property_read_u32(child, "reg", &prot_id)) @@ -2726,10 +2750,8 @@ static int scmi_channels_setup(struct scmi_info *info) "Out of range protocol %d\n", prot_id); ret = scmi_txrx_setup(info, child, prot_id); - if (ret) { - of_node_put(child); + if (ret) return ret; - } } return 0; @@ -2833,6 +2855,55 @@ static int scmi_device_request_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static const char * const dbg_counter_strs[] = { + "sent_ok", + "sent_fail", + "sent_fail_polling_unsupported", + "sent_fail_channel_not_found", + "response_ok", + "notification_ok", + "delayed_response_ok", + "xfers_response_timeout", + "xfers_response_polled_timeout", + "response_polled_ok", + "err_msg_unexpected", + "err_msg_invalid", + "err_msg_nomem", + "err_protocol", +}; + +static ssize_t reset_all_on_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct scmi_debug_info *dbg = filp->private_data; + + for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++) + atomic_set(&dbg->counters[i], 0); + + return count; +} + +static const struct file_operations fops_reset_counts = { + .owner = THIS_MODULE, + .open = simple_open, + .write = reset_all_on_write, +}; + +static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg, + struct dentry *trans) +{ + struct dentry *counters; + int idx; + + counters = debugfs_create_dir("counters", trans); + + for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++) + debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters, + &dbg->counters[idx]); + + debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts); +} + static void scmi_debugfs_common_cleanup(void *d) { struct scmi_debug_info *dbg = d; @@ -2899,6 +2970,9 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) debugfs_create_u32("rx_max_msg", 0400, trans, (u32 *)&info->rx_minfo.max_msg); + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) + scmi_debugfs_counters_setup(dbg, trans); + dbg->top_dentry = top_dentry; if (devm_add_action_or_reset(info->dev, @@ -2950,6 +3024,37 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) return ret; } +static const struct scmi_desc *scmi_transport_setup(struct device *dev) +{ + struct scmi_transport *trans; + int ret; + + trans = dev_get_platdata(dev); + if (!trans || !trans->desc || !trans->supplier || !trans->core_ops) + return NULL; + + if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, + "Adding link to supplier transport device failed\n"); + return NULL; + } + + /* Provide core transport ops */ + *trans->core_ops = &scmi_trans_core_ops; + + dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); + + ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms", + &trans->desc->max_rx_timeout_ms); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n"); + + dev_info(dev, "SCMI max-rx-timeout: %dms\n", + trans->desc->max_rx_timeout_ms); + + return trans->desc; +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -2961,9 +3066,12 @@ static int scmi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *child, *np = dev->of_node; - desc = of_device_get_match_data(dev); - if (!desc) - return -EINVAL; + desc = scmi_transport_setup(dev); + if (!desc) { + err_str = "transport invalid\n"; + ret = -EINVAL; + goto out_err; + } info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -3002,14 +3110,6 @@ static int scmi_probe(struct platform_device *pdev) info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; - if (desc->ops->link_supplier) { - ret = desc->ops->link_supplier(dev); - if (ret) { - err_str = "transport not ready\n"; - goto clear_ida; - } - } - /* Setup all channels described in the DT at first */ ret = scmi_channels_setup(info); if (ret) { @@ -3130,6 +3230,7 @@ static int scmi_probe(struct platform_device *pdev) clear_ida: ida_free(&scmi_id, info->id); +out_err: return dev_err_probe(dev, ret, "%s", err_str); } @@ -3215,86 +3316,16 @@ static struct attribute *versions_attrs[] = { }; ATTRIBUTE_GROUPS(versions); -/* Each compatible listed below must have descriptor associated with it */ -static const struct of_device_id scmi_of_match[] = { -#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX - { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE - { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC - { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, - { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc}, - { .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc}, -#endif -#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO - { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, -#endif - { /* Sentinel */ }, -}; - -MODULE_DEVICE_TABLE(of, scmi_of_match); - static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", .suppress_bind_attrs = true, - .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, .probe = scmi_probe, .remove_new = scmi_remove, }; -/** - * __scmi_transports_setup - Common helper to call transport-specific - * .init/.exit code if provided. - * - * @init: A flag to distinguish between init and exit. - * - * Note that, if provided, we invoke .init/.exit functions for all the - * transports currently compiled in. - * - * Return: 0 on Success. - */ -static inline int __scmi_transports_setup(bool init) -{ - int ret = 0; - const struct of_device_id *trans; - - for (trans = scmi_of_match; trans->data; trans++) { - const struct scmi_desc *tdesc = trans->data; - - if ((init && !tdesc->transport_init) || - (!init && !tdesc->transport_exit)) - continue; - - if (init) - ret = tdesc->transport_init(); - else - tdesc->transport_exit(); - - if (ret) { - pr_err("SCMI transport %s FAILED initialization!\n", - trans->compatible); - break; - } - } - - return ret; -} - -static int __init scmi_transports_init(void) -{ - return __scmi_transports_setup(true); -} - -static void __exit scmi_transports_exit(void) -{ - __scmi_transports_setup(false); -} - static struct dentry *scmi_debugfs_init(void) { struct dentry *d; @@ -3310,16 +3341,15 @@ static struct dentry *scmi_debugfs_init(void) static int __init scmi_driver_init(void) { - int ret; - /* Bail out if no SCMI transport was configured */ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) return -EINVAL; - /* Initialize any compiled-in transport which provided an init/exit */ - ret = scmi_transports_init(); - if (ret) - return ret; + if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM)) + scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get(); + + if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG)) + scmi_trans_core_ops.msg = scmi_message_operations_get(); if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS)) scmi_top_dentry = scmi_debugfs_init(); @@ -3354,8 +3384,6 @@ static void __exit scmi_driver_exit(void) scmi_powercap_unregister(); scmi_pinctrl_unregister(); - scmi_transports_exit(); - platform_driver_unregister(&scmi_driver); debugfs_remove_recursive(scmi_top_dentry); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c deleted file mode 100644 index 0219a12e3209a7..00000000000000 --- a/drivers/firmware/arm_scmi/mailbox.c +++ /dev/null @@ -1,377 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * System Control and Management Interface (SCMI) Message Mailbox Transport - * driver. - * - * Copyright (C) 2019 ARM Ltd. - */ - -#include -#include -#include -#include -#include -#include - -#include "common.h" - -/** - * struct scmi_mailbox - Structure representing a SCMI mailbox transport - * - * @cl: Mailbox Client - * @chan: Transmit/Receive mailbox uni/bi-directional channel - * @chan_receiver: Optional Receiver mailbox unidirectional channel - * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel - * @cinfo: SCMI channel info - * @shmem: Transmit/Receive shared memory area - */ -struct scmi_mailbox { - struct mbox_client cl; - struct mbox_chan *chan; - struct mbox_chan *chan_receiver; - struct mbox_chan *chan_platform_receiver; - struct scmi_chan_info *cinfo; - struct scmi_shared_mem __iomem *shmem; -}; - -#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) - -static void tx_prepare(struct mbox_client *cl, void *m) -{ - struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - - shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); -} - -static void rx_callback(struct mbox_client *cl, void *m) -{ - struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - - /* - * An A2P IRQ is NOT valid when received while the platform still has - * the ownership of the channel, because the platform at first releases - * the SMT channel and then sends the completion interrupt. - * - * This addresses a possible race condition in which a spurious IRQ from - * a previous timed-out reply which arrived late could be wrongly - * associated with the next pending transaction. - */ - if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { - dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); - scmi_bad_message_trace(smbox->cinfo, - shmem_read_header(smbox->shmem), - MSG_MBOX_SPURIOUS); - return; - } - - scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); -} - -static bool mailbox_chan_available(struct device_node *of_node, int idx) -{ - int num_mb; - - /* - * Just check if bidirrectional channels are involved, and check the - * index accordingly; proper full validation will be made later - * in mailbox_chan_setup(). - */ - num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells"); - if (num_mb == 3 && idx == 1) - idx = 2; - - return !of_parse_phandle_with_args(of_node, "mboxes", - "#mbox-cells", idx, NULL); -} - -/** - * mailbox_chan_validate - Validate transport configuration and map channels - * - * @cdev: Reference to the underlying transport device carrying the - * of_node descriptor to analyze. - * @a2p_rx_chan: A reference to an optional unidirectional channel to use - * for replies on the a2p channel. Set as zero if not present. - * @p2a_chan: A reference to the optional p2a channel. - * Set as zero if not present. - * @p2a_rx_chan: A reference to the optional p2a completion channel. - * Set as zero if not present. - * - * At first, validate the transport configuration as described in terms of - * 'mboxes' and 'shmem', then determin which mailbox channel indexes are - * appropriate to be use in the current configuration. - * - * Return: 0 on Success or error - */ -static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, - int *p2a_chan, int *p2a_rx_chan) -{ - int num_mb, num_sh, ret = 0; - struct device_node *np = cdev->of_node; - - num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); - num_sh = of_count_phandle_with_args(np, "shmem", NULL); - dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); - - /* Bail out if mboxes and shmem descriptors are inconsistent */ - if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || - (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || - (num_mb == 4 && num_sh != 2)) { - dev_warn(cdev, - "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", - of_node_full_name(np), num_mb, num_sh); - return -EINVAL; - } - - /* Bail out if provided shmem descriptors do not refer distinct areas */ - if (num_sh > 1) { - struct device_node *np_tx, *np_rx; - - np_tx = of_parse_phandle(np, "shmem", 0); - np_rx = of_parse_phandle(np, "shmem", 1); - if (!np_tx || !np_rx || np_tx == np_rx) { - dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", - of_node_full_name(np)); - ret = -EINVAL; - } - - of_node_put(np_tx); - of_node_put(np_rx); - } - - /* Calculate channels IDs to use depending on mboxes/shmem layout */ - if (!ret) { - switch (num_mb) { - case 1: - *a2p_rx_chan = 0; - *p2a_chan = 0; - *p2a_rx_chan = 0; - break; - case 2: - if (num_sh == 2) { - *a2p_rx_chan = 0; - *p2a_chan = 1; - } else { - *a2p_rx_chan = 1; - *p2a_chan = 0; - } - *p2a_rx_chan = 0; - break; - case 3: - *a2p_rx_chan = 1; - *p2a_chan = 2; - *p2a_rx_chan = 0; - break; - case 4: - *a2p_rx_chan = 1; - *p2a_chan = 2; - *p2a_rx_chan = 3; - break; - } - } - - return ret; -} - -static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - bool tx) -{ - const char *desc = tx ? "Tx" : "Rx"; - struct device *cdev = cinfo->dev; - struct scmi_mailbox *smbox; - struct device_node *shmem; - int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan, idx = tx ? 0 : 1; - struct mbox_client *cl; - resource_size_t size; - struct resource res; - - ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); - if (ret) - return ret; - - if (!tx && !p2a_chan) - return -ENODEV; - - smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); - if (!smbox) - return -ENOMEM; - - shmem = of_parse_phandle(cdev->of_node, "shmem", idx); - if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { - of_node_put(shmem); - return -ENXIO; - } - - ret = of_address_to_resource(shmem, 0, &res); - of_node_put(shmem); - if (ret) { - dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); - return ret; - } - - size = resource_size(&res); - smbox->shmem = devm_ioremap(dev, res.start, size); - if (!smbox->shmem) { - dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); - return -EADDRNOTAVAIL; - } - - cl = &smbox->cl; - cl->dev = cdev; - cl->tx_prepare = tx ? tx_prepare : NULL; - cl->rx_callback = rx_callback; - cl->tx_block = false; - cl->knows_txdone = tx; - - smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan); - if (IS_ERR(smbox->chan)) { - ret = PTR_ERR(smbox->chan); - if (ret != -EPROBE_DEFER) - dev_err(cdev, - "failed to request SCMI %s mailbox\n", desc); - return ret; - } - - /* Additional unidirectional channel for TX if needed */ - if (tx && a2p_rx_chan) { - smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan); - if (IS_ERR(smbox->chan_receiver)) { - ret = PTR_ERR(smbox->chan_receiver); - if (ret != -EPROBE_DEFER) - dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n"); - return ret; - } - } - - if (!tx && p2a_rx_chan) { - smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); - if (IS_ERR(smbox->chan_platform_receiver)) { - ret = PTR_ERR(smbox->chan_platform_receiver); - if (ret != -EPROBE_DEFER) - dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); - return ret; - } - } - - - cinfo->transport_info = smbox; - smbox->cinfo = cinfo; - - return 0; -} - -static int mailbox_chan_free(int id, void *p, void *data) -{ - struct scmi_chan_info *cinfo = p; - struct scmi_mailbox *smbox = cinfo->transport_info; - - if (smbox && !IS_ERR(smbox->chan)) { - mbox_free_channel(smbox->chan); - mbox_free_channel(smbox->chan_receiver); - mbox_free_channel(smbox->chan_platform_receiver); - cinfo->transport_info = NULL; - smbox->chan = NULL; - smbox->chan_receiver = NULL; - smbox->chan_platform_receiver = NULL; - smbox->cinfo = NULL; - } - - return 0; -} - -static int mailbox_send_message(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - int ret; - - ret = mbox_send_message(smbox->chan, xfer); - - /* mbox_send_message returns non-negative value on success, so reset */ - if (ret > 0) - ret = 0; - - return ret; -} - -static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, - struct scmi_xfer *__unused) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - - /* - * NOTE: we might prefer not to need the mailbox ticker to manage the - * transfer queueing since the protocol layer queues things by itself. - * Unfortunately, we have to kick the mailbox framework after we have - * received our message. - */ - mbox_client_txdone(smbox->chan, ret); -} - -static void mailbox_fetch_response(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - - shmem_fetch_response(smbox->shmem, xfer); -} - -static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, - size_t max_len, struct scmi_xfer *xfer) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - - shmem_fetch_notification(smbox->shmem, max_len, xfer); -} - -static void mailbox_clear_channel(struct scmi_chan_info *cinfo) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - struct mbox_chan *intr_chan; - int ret; - - shmem_clear_channel(smbox->shmem); - - if (!shmem_channel_intr_enabled(smbox->shmem)) - return; - - if (smbox->chan_platform_receiver) - intr_chan = smbox->chan_platform_receiver; - else if (smbox->chan) - intr_chan = smbox->chan; - else - return; - - ret = mbox_send_message(intr_chan, NULL); - /* mbox_send_message returns non-negative value on success, so reset */ - if (ret > 0) - ret = 0; - - mbox_client_txdone(intr_chan, ret); -} - -static bool -mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) -{ - struct scmi_mailbox *smbox = cinfo->transport_info; - - return shmem_poll_done(smbox->shmem, xfer); -} - -static const struct scmi_transport_ops scmi_mailbox_ops = { - .chan_available = mailbox_chan_available, - .chan_setup = mailbox_chan_setup, - .chan_free = mailbox_chan_free, - .send_message = mailbox_send_message, - .mark_txdone = mailbox_mark_txdone, - .fetch_response = mailbox_fetch_response, - .fetch_notification = mailbox_fetch_notification, - .clear_channel = mailbox_clear_channel, - .poll_done = mailbox_poll_done, -}; - -const struct scmi_desc scmi_mailbox_desc = { - .ops = &scmi_mailbox_ops, - .max_rx_timeout_ms = 30, /* We may increase this if required */ - .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ - .max_msg_size = 128, -}; diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c index d33a704e58144c..2cc74e6bbd723f 100644 --- a/drivers/firmware/arm_scmi/msg.c +++ b/drivers/firmware/arm_scmi/msg.c @@ -4,7 +4,7 @@ * * Derived from shm.c. * - * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2019-2024 ARM Ltd. * Copyright (C) 2020-2021 OpenSynergy GmbH */ @@ -30,7 +30,7 @@ struct scmi_msg_payld { * * Return: transport SDU size. */ -size_t msg_command_size(struct scmi_xfer *xfer) +static size_t msg_command_size(struct scmi_xfer *xfer) { return sizeof(struct scmi_msg_payld) + xfer->tx.len; } @@ -42,7 +42,7 @@ size_t msg_command_size(struct scmi_xfer *xfer) * * Return: transport SDU size. */ -size_t msg_response_size(struct scmi_xfer *xfer) +static size_t msg_response_size(struct scmi_xfer *xfer) { return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len; } @@ -53,7 +53,7 @@ size_t msg_response_size(struct scmi_xfer *xfer) * @msg: transport SDU for command * @xfer: message which is being sent */ -void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) +static void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) { msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr)); if (xfer->tx.buf) @@ -67,7 +67,7 @@ void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) * * Return: SCMI header */ -u32 msg_read_header(struct scmi_msg_payld *msg) +static u32 msg_read_header(struct scmi_msg_payld *msg) { return le32_to_cpu(msg->msg_header); } @@ -79,8 +79,8 @@ u32 msg_read_header(struct scmi_msg_payld *msg) * @len: transport SDU size * @xfer: message being responded to */ -void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, - struct scmi_xfer *xfer) +static void msg_fetch_response(struct scmi_msg_payld *msg, + size_t len, struct scmi_xfer *xfer) { size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]); @@ -100,8 +100,8 @@ void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, * @max_len: maximum SCMI payload size to fetch * @xfer: notification message */ -void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, - size_t max_len, struct scmi_xfer *xfer) +static void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer) { xfer->rx.len = min_t(size_t, max_len, len >= sizeof(*msg) ? len - sizeof(*msg) : 0); @@ -109,3 +109,17 @@ void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, /* Take a copy to the rx buffer.. */ memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len); } + +static const struct scmi_message_operations scmi_msg_ops = { + .tx_prepare = msg_tx_prepare, + .command_size = msg_command_size, + .response_size = msg_response_size, + .read_header = msg_read_header, + .fetch_response = msg_fetch_response, + .fetch_notification = msg_fetch_notification, +}; + +const struct scmi_message_operations *scmi_message_operations_get(void) +{ + return &scmi_msg_ops; +} diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c deleted file mode 100644 index 4e7944b91e3857..00000000000000 --- a/drivers/firmware/arm_scmi/optee.c +++ /dev/null @@ -1,649 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2019-2021 Linaro Ltd. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" - -#define SCMI_OPTEE_MAX_MSG_SIZE 128 - -enum scmi_optee_pta_cmd { - /* - * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities - * - * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) - * [out] value[0].b: Extended capabilities or 0 - */ - PTA_SCMI_CMD_CAPABILITIES = 0, - - /* - * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer - * - * [in] value[0].a: Channel handle - * - * Shared memory used for SCMI message/response exhange is expected - * already identified and bound to channel handle in both SCMI agent - * and SCMI server (OP-TEE) parts. - * The memory uses SMT header to carry SCMI meta-data (protocol ID and - * protocol message ID). - */ - PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, - - /* - * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message - * - * [in] value[0].a: Channel handle - * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) - * - * Shared memory used for SCMI message/response is a SMT buffer - * referenced by param[1]. It shall be 128 bytes large to fit response - * payload whatever message playload size. - * The memory uses SMT header to carry SCMI meta-data (protocol ID and - * protocol message ID). - */ - PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, - - /* - * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle - * - * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM - * - * [in] value[0].a: Channel identifier - * [out] value[0].a: Returned channel handle - * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) - */ - PTA_SCMI_CMD_GET_CHANNEL = 3, - - /* - * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG - * buffer pointed by memref parameters - * - * [in] value[0].a: Channel handle - * [in] memref[1]: Message buffer (MSG and SCMI payload) - * [out] memref[2]: Response buffer (MSG and SCMI payload) - * - * Shared memories used for SCMI message/response are MSG buffers - * referenced by param[1] and param[2]. MSG transport protocol - * uses a 32bit header to carry SCMI meta-data (protocol ID and - * protocol message ID) followed by the effective SCMI message - * payload. - */ - PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4, -}; - -/* - * OP-TEE SCMI service capabilities bit flags (32bit) - * - * PTA_SCMI_CAPS_SMT_HEADER - * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in - * shared memory buffers to carry SCMI protocol synchronisation information. - * - * PTA_SCMI_CAPS_MSG_HEADER - * When set, OP-TEE supports command using MSG header protocol in an OP-TEE - * shared memory to carry SCMI protocol synchronisation information and SCMI - * message payload. - */ -#define PTA_SCMI_CAPS_NONE 0 -#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) -#define PTA_SCMI_CAPS_MSG_HEADER BIT(1) -#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \ - PTA_SCMI_CAPS_MSG_HEADER) - -/** - * struct scmi_optee_channel - Description of an OP-TEE SCMI channel - * - * @channel_id: OP-TEE channel ID used for this transport - * @tee_session: TEE session identifier - * @caps: OP-TEE SCMI channel capabilities - * @rx_len: Response size - * @mu: Mutex protection on channel access - * @cinfo: SCMI channel information - * @req: union for SCMI interface - * @req.shmem: Virtual base address of the shared memory - * @req.msg: Shared memory protocol handle for SCMI request and - * synchronous response - * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem - * @link: Reference in agent's channel list - */ -struct scmi_optee_channel { - u32 channel_id; - u32 tee_session; - u32 caps; - u32 rx_len; - struct mutex mu; - struct scmi_chan_info *cinfo; - union { - struct scmi_shared_mem __iomem *shmem; - struct scmi_msg_payld *msg; - } req; - struct tee_shm *tee_shm; - struct list_head link; -}; - -/** - * struct scmi_optee_agent - OP-TEE transport private data - * - * @dev: Device used for communication with TEE - * @tee_ctx: TEE context used for communication - * @caps: Supported channel capabilities - * @mu: Mutex for protection of @channel_list - * @channel_list: List of all created channels for the agent - */ -struct scmi_optee_agent { - struct device *dev; - struct tee_context *tee_ctx; - u32 caps; - struct mutex mu; - struct list_head channel_list; -}; - -/* There can be only 1 SCMI service in OP-TEE we connect to */ -static struct scmi_optee_agent *scmi_optee_private; - -/* Forward reference to scmi_optee transport initialization */ -static int scmi_optee_init(void); - -/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ -static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) -{ - struct device *dev = agent->dev; - struct tee_client_device *scmi_pta = to_tee_client_device(dev); - struct tee_ioctl_open_session_arg arg = { }; - int ret; - - memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); - arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; - - ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); - if (ret < 0 || arg.ret) { - dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); - return -EOPNOTSUPP; - } - - *tee_session = arg.session; - - return 0; -} - -static void close_session(struct scmi_optee_agent *agent, u32 tee_session) -{ - tee_client_close_session(agent->tee_ctx, tee_session); -} - -static int get_capabilities(struct scmi_optee_agent *agent) -{ - struct tee_ioctl_invoke_arg arg = { }; - struct tee_param param[1] = { }; - u32 caps; - u32 tee_session; - int ret; - - ret = open_session(agent, &tee_session); - if (ret) - return ret; - - arg.func = PTA_SCMI_CMD_CAPABILITIES; - arg.session = tee_session; - arg.num_params = 1; - - param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; - - ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); - - close_session(agent, tee_session); - - if (ret < 0 || arg.ret) { - dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); - return -EOPNOTSUPP; - } - - caps = param[0].u.value.a; - - if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) { - dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n"); - return -EOPNOTSUPP; - } - - agent->caps = caps; - - return 0; -} - -static int get_channel(struct scmi_optee_channel *channel) -{ - struct device *dev = scmi_optee_private->dev; - struct tee_ioctl_invoke_arg arg = { }; - struct tee_param param[1] = { }; - unsigned int caps = 0; - int ret; - - if (channel->tee_shm) - caps = PTA_SCMI_CAPS_MSG_HEADER; - else - caps = PTA_SCMI_CAPS_SMT_HEADER; - - arg.func = PTA_SCMI_CMD_GET_CHANNEL; - arg.session = channel->tee_session; - arg.num_params = 1; - - param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; - param[0].u.value.a = channel->channel_id; - param[0].u.value.b = caps; - - ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); - - if (ret || arg.ret) { - dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); - return -EOPNOTSUPP; - } - - /* From now on use channel identifer provided by OP-TEE SCMI service */ - channel->channel_id = param[0].u.value.a; - channel->caps = caps; - - return 0; -} - -static int invoke_process_smt_channel(struct scmi_optee_channel *channel) -{ - struct tee_ioctl_invoke_arg arg = { - .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL, - .session = channel->tee_session, - .num_params = 1, - }; - struct tee_param param[1] = { }; - int ret; - - param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; - param[0].u.value.a = channel->channel_id; - - ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); - if (ret < 0 || arg.ret) { - dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", - channel->channel_id, ret, arg.ret); - return -EIO; - } - - return 0; -} - -static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) -{ - struct tee_ioctl_invoke_arg arg = { - .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL, - .session = channel->tee_session, - .num_params = 3, - }; - struct tee_param param[3] = { }; - int ret; - - param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; - param[0].u.value.a = channel->channel_id; - - param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; - param[1].u.memref.shm = channel->tee_shm; - param[1].u.memref.size = msg_size; - - param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; - param[2].u.memref.shm = channel->tee_shm; - param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; - - ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); - if (ret < 0 || arg.ret) { - dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", - channel->channel_id, ret, arg.ret); - return -EIO; - } - - /* Save response size */ - channel->rx_len = param[2].u.memref.size; - - return 0; -} - -static int scmi_optee_link_supplier(struct device *dev) -{ - if (!scmi_optee_private) { - if (scmi_optee_init()) - dev_dbg(dev, "Optee bus not yet ready\n"); - - /* Wait for optee bus */ - return -EPROBE_DEFER; - } - - if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { - dev_err(dev, "Adding link to supplier optee device failed\n"); - return -ECANCELED; - } - - return 0; -} - -static bool scmi_optee_chan_available(struct device_node *of_node, int idx) -{ - u32 channel_id; - - return !of_property_read_u32_index(of_node, "linaro,optee-channel-id", - idx, &channel_id); -} - -static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) -{ - struct scmi_optee_channel *channel = cinfo->transport_info; - - if (!channel->tee_shm) - shmem_clear_channel(channel->req.shmem); -} - -static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) -{ - const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; - void *shbuf; - - channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); - if (IS_ERR(channel->tee_shm)) { - dev_err(channel->cinfo->dev, "shmem allocation failed\n"); - return -ENOMEM; - } - - shbuf = tee_shm_get_va(channel->tee_shm, 0); - memset(shbuf, 0, msg_size); - channel->req.msg = shbuf; - channel->rx_len = msg_size; - - return 0; -} - -static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, - struct scmi_optee_channel *channel) -{ - struct device_node *np; - resource_size_t size; - struct resource res; - int ret; - - np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) { - ret = -ENXIO; - goto out; - } - - ret = of_address_to_resource(np, 0, &res); - if (ret) { - dev_err(dev, "Failed to get SCMI Tx shared memory\n"); - goto out; - } - - size = resource_size(&res); - - channel->req.shmem = devm_ioremap(dev, res.start, size); - if (!channel->req.shmem) { - dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); - ret = -EADDRNOTAVAIL; - goto out; - } - - ret = 0; - -out: - of_node_put(np); - - return ret; -} - -static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, - struct scmi_optee_channel *channel) -{ - if (of_property_present(cinfo->dev->of_node, "shmem")) - return setup_static_shmem(dev, cinfo, channel); - else - return setup_dynamic_shmem(dev, channel); -} - -static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) -{ - struct scmi_optee_channel *channel; - uint32_t channel_id; - int ret; - - if (!tx) - return -ENODEV; - - channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); - if (!channel) - return -ENOMEM; - - ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", - 0, &channel_id); - if (ret) - return ret; - - cinfo->transport_info = channel; - channel->cinfo = cinfo; - channel->channel_id = channel_id; - mutex_init(&channel->mu); - - ret = setup_shmem(dev, cinfo, channel); - if (ret) - return ret; - - ret = open_session(scmi_optee_private, &channel->tee_session); - if (ret) - goto err_free_shm; - - ret = tee_client_system_session(scmi_optee_private->tee_ctx, channel->tee_session); - if (ret) - dev_warn(dev, "Could not switch to system session, do best effort\n"); - - ret = get_channel(channel); - if (ret) - goto err_close_sess; - - /* Enable polling */ - cinfo->no_completion_irq = true; - - mutex_lock(&scmi_optee_private->mu); - list_add(&channel->link, &scmi_optee_private->channel_list); - mutex_unlock(&scmi_optee_private->mu); - - return 0; - -err_close_sess: - close_session(scmi_optee_private, channel->tee_session); -err_free_shm: - if (channel->tee_shm) - tee_shm_free(channel->tee_shm); - - return ret; -} - -static int scmi_optee_chan_free(int id, void *p, void *data) -{ - struct scmi_chan_info *cinfo = p; - struct scmi_optee_channel *channel = cinfo->transport_info; - - mutex_lock(&scmi_optee_private->mu); - list_del(&channel->link); - mutex_unlock(&scmi_optee_private->mu); - - close_session(scmi_optee_private, channel->tee_session); - - if (channel->tee_shm) { - tee_shm_free(channel->tee_shm); - channel->tee_shm = NULL; - } - - cinfo->transport_info = NULL; - channel->cinfo = NULL; - - return 0; -} - -static int scmi_optee_send_message(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_optee_channel *channel = cinfo->transport_info; - int ret; - - mutex_lock(&channel->mu); - - if (channel->tee_shm) { - msg_tx_prepare(channel->req.msg, xfer); - ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); - } else { - shmem_tx_prepare(channel->req.shmem, xfer, cinfo); - ret = invoke_process_smt_channel(channel); - } - - if (ret) - mutex_unlock(&channel->mu); - - return ret; -} - -static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_optee_channel *channel = cinfo->transport_info; - - if (channel->tee_shm) - msg_fetch_response(channel->req.msg, channel->rx_len, xfer); - else - shmem_fetch_response(channel->req.shmem, xfer); -} - -static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, - struct scmi_xfer *__unused) -{ - struct scmi_optee_channel *channel = cinfo->transport_info; - - mutex_unlock(&channel->mu); -} - -static struct scmi_transport_ops scmi_optee_ops = { - .link_supplier = scmi_optee_link_supplier, - .chan_available = scmi_optee_chan_available, - .chan_setup = scmi_optee_chan_setup, - .chan_free = scmi_optee_chan_free, - .send_message = scmi_optee_send_message, - .mark_txdone = scmi_optee_mark_txdone, - .fetch_response = scmi_optee_fetch_response, - .clear_channel = scmi_optee_clear_channel, -}; - -static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) -{ - return ver->impl_id == TEE_IMPL_ID_OPTEE; -} - -static int scmi_optee_service_probe(struct device *dev) -{ - struct scmi_optee_agent *agent; - struct tee_context *tee_ctx; - int ret; - - /* Only one SCMI OP-TEE device allowed */ - if (scmi_optee_private) { - dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); - return -EBUSY; - } - - tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); - if (IS_ERR(tee_ctx)) - return -ENODEV; - - agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); - if (!agent) { - ret = -ENOMEM; - goto err; - } - - agent->dev = dev; - agent->tee_ctx = tee_ctx; - INIT_LIST_HEAD(&agent->channel_list); - mutex_init(&agent->mu); - - ret = get_capabilities(agent); - if (ret) - goto err; - - /* Ensure agent resources are all visible before scmi_optee_private is */ - smp_mb(); - scmi_optee_private = agent; - - return 0; - -err: - tee_client_close_context(tee_ctx); - - return ret; -} - -static int scmi_optee_service_remove(struct device *dev) -{ - struct scmi_optee_agent *agent = scmi_optee_private; - - if (!scmi_optee_private) - return -EINVAL; - - if (!list_empty(&scmi_optee_private->channel_list)) - return -EBUSY; - - /* Ensure cleared reference is visible before resources are released */ - smp_store_mb(scmi_optee_private, NULL); - - tee_client_close_context(agent->tee_ctx); - - return 0; -} - -static const struct tee_client_device_id scmi_optee_service_id[] = { - { - UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, - 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) - }, - { } -}; - -MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); - -static struct tee_client_driver scmi_optee_driver = { - .id_table = scmi_optee_service_id, - .driver = { - .name = "scmi-optee", - .bus = &tee_bus_type, - .probe = scmi_optee_service_probe, - .remove = scmi_optee_service_remove, - }, -}; - -static int scmi_optee_init(void) -{ - return driver_register(&scmi_optee_driver.driver); -} - -static void scmi_optee_exit(void) -{ - if (scmi_optee_private) - driver_unregister(&scmi_optee_driver.driver); -} - -const struct scmi_desc scmi_optee_desc = { - .transport_exit = scmi_optee_exit, - .ops = &scmi_optee_ops, - .max_rx_timeout_ms = 30, - .max_msg = 20, - .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, - .sync_cmds_completed_on_ret = true, -}; diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 4b7f1cbb9b04d6..2d77b5f40ca710 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -310,7 +310,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, } if (!dom_info->mult_factor) dev_warn(ph->dev, - "Wrong sustained perf/frequency(domain %d)\n", + "Wrong sustained perf/frequency(domain %d)\n", dom_info->id); strscpy(dom_info->info.name, attr->name, diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c index a2a7f880d6a32d..3855c98caf06bd 100644 --- a/drivers/firmware/arm_scmi/pinctrl.c +++ b/drivers/firmware/arm_scmi/pinctrl.c @@ -913,4 +913,5 @@ static const struct scmi_protocol scmi_pinctrl = { .ops = &pinctrl_proto_ops, .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, }; + DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 49666bd1d8ac0c..59aa16444c6432 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 enum scmi_power_protocol_cmd { POWER_DOMAIN_ATTRIBUTES = 0x3, diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 8e95f53bd7b76a..aaee57cdcd5589 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -22,7 +22,7 @@ #include #include -#include +#include #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index 130d13e9cd6beb..9e89a6a763daf5 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -950,7 +950,6 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = { .open = scmi_dbg_raw_mode_open, .release = scmi_dbg_raw_mode_release, .write = scmi_dbg_raw_mode_reset_write, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -960,7 +959,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = { .read = scmi_dbg_raw_mode_message_read, .write = scmi_dbg_raw_mode_message_write, .poll = scmi_dbg_raw_mode_message_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -977,7 +975,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { .read = scmi_dbg_raw_mode_message_read, .write = scmi_dbg_raw_mode_message_async_write, .poll = scmi_dbg_raw_mode_message_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -1001,7 +998,6 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = { .release = scmi_dbg_raw_mode_release, .read = scmi_test_dbg_raw_mode_notif_read, .poll = scmi_test_dbg_raw_mode_notif_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; @@ -1025,7 +1021,6 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = { .release = scmi_dbg_raw_mode_release, .read = scmi_test_dbg_raw_mode_errors_read, .poll = scmi_test_dbg_raw_mode_errors_poll, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 1b318316535ec4..0aa82b96f41b9c 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 0x3, diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 7fc5535ca34c71..791efd0f82d737 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -15,7 +15,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30001 #define SCMI_MAX_NUM_SENSOR_AXIS 63 #define SCMIv2_SENSOR_PROTOCOL 0x10000 diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index b74e5a740f2c28..01d8a9398fe8e0 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -2,11 +2,13 @@ /* * For transport using shared mem structure. * - * Copyright (C) 2019 ARM Ltd. + * Copyright (C) 2019-2024 ARM Ltd. */ #include #include +#include +#include #include #include @@ -32,8 +34,9 @@ struct scmi_shared_mem { u8 msg_payload[]; }; -void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) +static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer, + struct scmi_chan_info *cinfo) { ktime_t stop; @@ -73,13 +76,13 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); } -u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) +static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) { return ioread32(&shmem->msg_header); } -void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) +static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) { size_t len = ioread32(&shmem->length); @@ -91,8 +94,8 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); } -void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer) +static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, + size_t max_len, struct scmi_xfer *xfer) { size_t len = ioread32(&shmem->length); @@ -103,13 +106,13 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); } -void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) +static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) { iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status); } -bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) +static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, + struct scmi_xfer *xfer) { u16 xfer_id; @@ -123,13 +126,69 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } -bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) +static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem) { return (ioread32(&shmem->channel_status) & SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); } -bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) +static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) { return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED; } + +static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, + struct device *dev, bool tx, + struct resource *res) +{ + struct device_node *shmem __free(device_node); + const char *desc = tx ? "Tx" : "Rx"; + int ret, idx = tx ? 0 : 1; + struct device *cdev = cinfo->dev; + struct resource lres = {}; + resource_size_t size; + void __iomem *addr; + + shmem = of_parse_phandle(cdev->of_node, "shmem", idx); + if (!shmem) + return IOMEM_ERR_PTR(-ENODEV); + + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) + return IOMEM_ERR_PTR(-ENXIO); + + /* Use a local on-stack as a working area when not provided */ + if (!res) + res = &lres; + + ret = of_address_to_resource(shmem, 0, res); + if (ret) { + dev_err(cdev, "failed to get SCMI %s shared memory\n", desc); + return IOMEM_ERR_PTR(ret); + } + + size = resource_size(res); + addr = devm_ioremap(dev, res->start, size); + if (!addr) { + dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); + return IOMEM_ERR_PTR(-EADDRNOTAVAIL); + } + + return addr; +} + +static const struct scmi_shared_mem_operations scmi_shmem_ops = { + .tx_prepare = shmem_tx_prepare, + .read_header = shmem_read_header, + .fetch_response = shmem_fetch_response, + .fetch_notification = shmem_fetch_notification, + .clear_channel = shmem_clear_channel, + .poll_done = shmem_poll_done, + .channel_free = shmem_channel_free, + .channel_intr_enabled = shmem_channel_intr_enabled, + .setup_iomap = shmem_setup_iomap, +}; + +const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void) +{ + return &scmi_shmem_ops; +} diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c deleted file mode 100644 index 39936e1dd30e98..00000000000000 --- a/drivers/firmware/arm_scmi/smc.c +++ /dev/null @@ -1,305 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * System Control and Management Interface (SCMI) Message SMC/HVC - * Transport driver - * - * Copyright 2020 NXP - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" - -/* - * The shmem address is split into 4K page and offset. - * This is to make sure the parameters fit in 32bit arguments of the - * smc/hvc call to keep it uniform across smc32/smc64 conventions. - * This however limits the shmem address to 44 bit. - * - * These optional parameters can be used to distinguish among multiple - * scmi instances that are using the same smc-id. - * The page parameter is passed in r1/x1/w1 register and the offset parameter - * is passed in r2/x2/w2 register. - */ - -#define SHMEM_SIZE (SZ_4K) -#define SHMEM_SHIFT 12 -#define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT)) -#define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1)) - -/** - * struct scmi_smc - Structure representing a SCMI smc transport - * - * @irq: An optional IRQ for completion - * @cinfo: SCMI channel info - * @shmem: Transmit/Receive shared memory area - * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. - * Used when NOT operating in atomic mode. - * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. - * Used when operating in atomic mode. - * @func_id: smc/hvc call function id - * @param_page: 4K page number of the shmem channel - * @param_offset: Offset within the 4K page of the shmem channel - * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual - * platforms - */ - -struct scmi_smc { - int irq; - struct scmi_chan_info *cinfo; - struct scmi_shared_mem __iomem *shmem; - /* Protect access to shmem area */ - struct mutex shmem_lock; -#define INFLIGHT_NONE MSG_TOKEN_MAX - atomic_t inflight; - unsigned long func_id; - unsigned long param_page; - unsigned long param_offset; - unsigned long cap_id; -}; - -static irqreturn_t smc_msg_done_isr(int irq, void *data) -{ - struct scmi_smc *scmi_info = data; - - scmi_rx_callback(scmi_info->cinfo, - shmem_read_header(scmi_info->shmem), NULL); - - return IRQ_HANDLED; -} - -static bool smc_chan_available(struct device_node *of_node, int idx) -{ - struct device_node *np = of_parse_phandle(of_node, "shmem", 0); - if (!np) - return false; - - of_node_put(np); - return true; -} - -static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) -{ - if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) - atomic_set(&scmi_info->inflight, INFLIGHT_NONE); - else - mutex_init(&scmi_info->shmem_lock); -} - -static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) -{ - int ret; - - ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); - - return ret == INFLIGHT_NONE; -} - -static inline void -smc_channel_lock_acquire(struct scmi_smc *scmi_info, - struct scmi_xfer *xfer __maybe_unused) -{ - if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) - spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); - else - mutex_lock(&scmi_info->shmem_lock); -} - -static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) -{ - if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) - atomic_set(&scmi_info->inflight, INFLIGHT_NONE); - else - mutex_unlock(&scmi_info->shmem_lock); -} - -static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - bool tx) -{ - struct device *cdev = cinfo->dev; - unsigned long cap_id = ULONG_MAX; - struct scmi_smc *scmi_info; - resource_size_t size; - struct resource res; - struct device_node *np; - u32 func_id; - int ret; - - if (!tx) - return -ENODEV; - - scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL); - if (!scmi_info) - return -ENOMEM; - - np = of_parse_phandle(cdev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) { - of_node_put(np); - return -ENXIO; - } - - ret = of_address_to_resource(np, 0, &res); - of_node_put(np); - if (ret) { - dev_err(cdev, "failed to get SCMI Tx shared memory\n"); - return ret; - } - - size = resource_size(&res); - scmi_info->shmem = devm_ioremap(dev, res.start, size); - if (!scmi_info->shmem) { - dev_err(dev, "failed to ioremap SCMI Tx shared memory\n"); - return -EADDRNOTAVAIL; - } - - ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id); - if (ret < 0) - return ret; - - if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) { - void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8; - /* The capability-id is kept in last 8 bytes of shmem. - * +-------+ <-- 0 - * | shmem | - * +-------+ <-- size - 8 - * | capId | - * +-------+ <-- size - */ - memcpy_fromio(&cap_id, ptr, sizeof(cap_id)); - } - - if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) { - scmi_info->param_page = SHMEM_PAGE(res.start); - scmi_info->param_offset = SHMEM_OFFSET(res.start); - } - /* - * If there is an interrupt named "a2p", then the service and - * completion of a message is signaled by an interrupt rather than by - * the return of the SMC call. - */ - scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p"); - if (scmi_info->irq > 0) { - ret = request_irq(scmi_info->irq, smc_msg_done_isr, - IRQF_NO_SUSPEND, dev_name(dev), scmi_info); - if (ret) { - dev_err(dev, "failed to setup SCMI smc irq\n"); - return ret; - } - } else { - cinfo->no_completion_irq = true; - } - - scmi_info->func_id = func_id; - scmi_info->cap_id = cap_id; - scmi_info->cinfo = cinfo; - smc_channel_lock_init(scmi_info); - cinfo->transport_info = scmi_info; - - return 0; -} - -static int smc_chan_free(int id, void *p, void *data) -{ - struct scmi_chan_info *cinfo = p; - struct scmi_smc *scmi_info = cinfo->transport_info; - - /* - * Different protocols might share the same chan info, so a previous - * smc_chan_free call might have already freed the structure. - */ - if (!scmi_info) - return 0; - - /* Ignore any possible further reception on the IRQ path */ - if (scmi_info->irq > 0) - free_irq(scmi_info->irq, scmi_info); - - cinfo->transport_info = NULL; - scmi_info->cinfo = NULL; - - return 0; -} - -static int smc_send_message(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_smc *scmi_info = cinfo->transport_info; - struct arm_smccc_res res; - - /* - * Channel will be released only once response has been - * surely fully retrieved, so after .mark_txdone() - */ - smc_channel_lock_acquire(scmi_info, xfer); - - shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); - - if (scmi_info->cap_id != ULONG_MAX) - arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, - 0, 0, 0, 0, 0, &res); - else - arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page, - scmi_info->param_offset, 0, 0, 0, 0, 0, - &res); - - /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ - if (res.a0) { - smc_channel_lock_release(scmi_info); - return -EOPNOTSUPP; - } - - return 0; -} - -static void smc_fetch_response(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_smc *scmi_info = cinfo->transport_info; - - shmem_fetch_response(scmi_info->shmem, xfer); -} - -static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, - struct scmi_xfer *__unused) -{ - struct scmi_smc *scmi_info = cinfo->transport_info; - - smc_channel_lock_release(scmi_info); -} - -static const struct scmi_transport_ops scmi_smc_ops = { - .chan_available = smc_chan_available, - .chan_setup = smc_chan_setup, - .chan_free = smc_chan_free, - .send_message = smc_send_message, - .mark_txdone = smc_mark_txdone, - .fetch_response = smc_fetch_response, -}; - -const struct scmi_desc scmi_smc_desc = { - .ops = &scmi_smc_ops, - .max_rx_timeout_ms = 30, - .max_msg = 20, - .max_msg_size = 128, - /* - * Setting .sync_cmds_atomic_replies to true for SMC assumes that, - * once the SMC instruction has completed successfully, the issued - * SCMI command would have been already fully processed by the SCMI - * platform firmware and so any possible response value expected - * for the issued command will be immmediately ready to be fetched - * from the shared memory area. - */ - .sync_cmds_completed_on_ret = true, - .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), -}; diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index b6358c155f7fca..ec3d355d177226 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -14,7 +14,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define SCMI_SYSTEM_NUM_SOURCES 1 diff --git a/drivers/firmware/arm_scmi/transports/Kconfig b/drivers/firmware/arm_scmi/transports/Kconfig new file mode 100644 index 00000000000000..57eccf316e26b3 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/Kconfig @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "SCMI Transport Drivers" + +config ARM_SCMI_HAVE_TRANSPORT + bool + help + This declares whether at least one SCMI transport has been configured. + Used to trigger a build bug when trying to build SCMI without any + configured transport. + +config ARM_SCMI_HAVE_SHMEM + bool + help + This declares whether a shared memory based transport for SCMI is + available. + +config ARM_SCMI_HAVE_MSG + bool + help + This declares whether a message passing based transport for SCMI is + available. + +config ARM_SCMI_TRANSPORT_MAILBOX + tristate "SCMI transport based on Mailbox" + depends on MAILBOX + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable mailbox based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on mailboxes, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_mailbox. + +config ARM_SCMI_TRANSPORT_SMC + tristate "SCMI transport based on SMC" + depends on HAVE_ARM_SMCCC_DISCOVERY + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable SMC based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on SMC, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_smc. + +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +config ARM_SCMI_TRANSPORT_OPTEE + tristate "SCMI transport based on OP-TEE service" + depends on OPTEE + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + select ARM_SCMI_HAVE_MSG + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_optee. + +config ARM_SCMI_TRANSPORT_VIRTIO + tristate "SCMI transport based on VirtIO" + depends on VIRTIO + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_MSG + help + This enables the virtio based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on VirtIO, answer Y. + This driver can also be built as a module. If so, the module + will be called scmi_transport_virtio. + +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + +endmenu diff --git a/drivers/firmware/arm_scmi/transports/Makefile b/drivers/firmware/arm_scmi/transports/Makefile new file mode 100644 index 00000000000000..362a406f08e674 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +scmi_transport_mailbox-objs := mailbox.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o +scmi_transport_smc-objs := smc.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += scmi_transport_smc.o +scmi_transport_optee-objs := optee.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += scmi_transport_optee.o +scmi_transport_virtio-objs := virtio.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += scmi_transport_virtio.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c new file mode 100644 index 00000000000000..1a754dee24f730 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/mailbox.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Mailbox Transport + * driver. + * + * Copyright (C) 2019-2024 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../common.h" + +/** + * struct scmi_mailbox - Structure representing a SCMI mailbox transport + * + * @cl: Mailbox Client + * @chan: Transmit/Receive mailbox uni/bi-directional channel + * @chan_receiver: Optional Receiver mailbox unidirectional channel + * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel + * @cinfo: SCMI channel info + * @shmem: Transmit/Receive shared memory area + */ +struct scmi_mailbox { + struct mbox_client cl; + struct mbox_chan *chan; + struct mbox_chan *chan_receiver; + struct mbox_chan *chan_platform_receiver; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; +}; + +#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) + +static struct scmi_transport_core_operations *core; + +static void tx_prepare(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo); +} + +static void rx_callback(struct mbox_client *cl, void *m) +{ + struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); + + /* + * An A2P IRQ is NOT valid when received while the platform still has + * the ownership of the channel, because the platform at first releases + * the SMT channel and then sends the completion interrupt. + * + * This addresses a possible race condition in which a spurious IRQ from + * a previous timed-out reply which arrived late could be wrongly + * associated with the next pending transaction. + */ + if (cl->knows_txdone && + !core->shmem->channel_free(smbox->shmem)) { + dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); + core->bad_message_trace(smbox->cinfo, + core->shmem->read_header(smbox->shmem), + MSG_MBOX_SPURIOUS); + return; + } + + core->rx_callback(smbox->cinfo, + core->shmem->read_header(smbox->shmem), NULL); +} + +static bool mailbox_chan_available(struct device_node *of_node, int idx) +{ + int num_mb; + + /* + * Just check if bidirrectional channels are involved, and check the + * index accordingly; proper full validation will be made later + * in mailbox_chan_setup(). + */ + num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells"); + if (num_mb == 3 && idx == 1) + idx = 2; + + return !of_parse_phandle_with_args(of_node, "mboxes", + "#mbox-cells", idx, NULL); +} + +/** + * mailbox_chan_validate - Validate transport configuration and map channels + * + * @cdev: Reference to the underlying transport device carrying the + * of_node descriptor to analyze. + * @a2p_rx_chan: A reference to an optional unidirectional channel to use + * for replies on the a2p channel. Set as zero if not present. + * @p2a_chan: A reference to the optional p2a channel. + * Set as zero if not present. + * @p2a_rx_chan: A reference to the optional p2a completion channel. + * Set as zero if not present. + * + * At first, validate the transport configuration as described in terms of + * 'mboxes' and 'shmem', then determin which mailbox channel indexes are + * appropriate to be use in the current configuration. + * + * Return: 0 on Success or error + */ +static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan, + int *p2a_chan, int *p2a_rx_chan) +{ + int num_mb, num_sh, ret = 0; + struct device_node *np = cdev->of_node; + + num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + num_sh = of_count_phandle_with_args(np, "shmem", NULL); + dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh); + + /* Bail out if mboxes and shmem descriptors are inconsistent */ + if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 || + (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || + (num_mb == 4 && num_sh != 2)) { + dev_warn(cdev, + "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", + of_node_full_name(np), num_mb, num_sh); + return -EINVAL; + } + + /* Bail out if provided shmem descriptors do not refer distinct areas */ + if (num_sh > 1) { + struct device_node *np_tx __free(device_node) = + of_parse_phandle(np, "shmem", 0); + struct device_node *np_rx __free(device_node) = + of_parse_phandle(np, "shmem", 1); + + if (!np_tx || !np_rx || np_tx == np_rx) { + dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", + of_node_full_name(np)); + ret = -EINVAL; + } + } + + /* Calculate channels IDs to use depending on mboxes/shmem layout */ + if (!ret) { + switch (num_mb) { + case 1: + *a2p_rx_chan = 0; + *p2a_chan = 0; + *p2a_rx_chan = 0; + break; + case 2: + if (num_sh == 2) { + *a2p_rx_chan = 0; + *p2a_chan = 1; + } else { + *a2p_rx_chan = 1; + *p2a_chan = 0; + } + *p2a_rx_chan = 0; + break; + case 3: + *a2p_rx_chan = 1; + *p2a_chan = 2; + *p2a_rx_chan = 0; + break; + case 4: + *a2p_rx_chan = 1; + *p2a_chan = 2; + *p2a_rx_chan = 3; + break; + } + } + + return ret; +} + +static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + const char *desc = tx ? "Tx" : "Rx"; + struct device *cdev = cinfo->dev; + struct scmi_mailbox *smbox; + int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan; + struct mbox_client *cl; + + ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan); + if (ret) + return ret; + + if (!tx && !p2a_chan) + return -ENODEV; + + smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); + if (!smbox) + return -ENOMEM; + + smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL); + if (IS_ERR(smbox->shmem)) + return PTR_ERR(smbox->shmem); + + cl = &smbox->cl; + cl->dev = cdev; + cl->tx_prepare = tx ? tx_prepare : NULL; + cl->rx_callback = rx_callback; + cl->tx_block = false; + cl->knows_txdone = tx; + + smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan); + if (IS_ERR(smbox->chan)) { + ret = PTR_ERR(smbox->chan); + if (ret != -EPROBE_DEFER) + dev_err(cdev, + "failed to request SCMI %s mailbox\n", desc); + return ret; + } + + /* Additional unidirectional channel for TX if needed */ + if (tx && a2p_rx_chan) { + smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan); + if (IS_ERR(smbox->chan_receiver)) { + ret = PTR_ERR(smbox->chan_receiver); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n"); + return ret; + } + } + + if (!tx && p2a_rx_chan) { + smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan); + if (IS_ERR(smbox->chan_platform_receiver)) { + ret = PTR_ERR(smbox->chan_platform_receiver); + if (ret != -EPROBE_DEFER) + dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n"); + return ret; + } + } + + cinfo->transport_info = smbox; + smbox->cinfo = cinfo; + + return 0; +} + +static int mailbox_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_mailbox *smbox = cinfo->transport_info; + + if (smbox && !IS_ERR(smbox->chan)) { + mbox_free_channel(smbox->chan); + mbox_free_channel(smbox->chan_receiver); + mbox_free_channel(smbox->chan_platform_receiver); + cinfo->transport_info = NULL; + smbox->chan = NULL; + smbox->chan_receiver = NULL; + smbox->chan_platform_receiver = NULL; + smbox->cinfo = NULL; + } + + return 0; +} + +static int mailbox_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + int ret; + + ret = mbox_send_message(smbox->chan, xfer); + + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + return ret; +} + +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(smbox->chan, ret); +} + +static void mailbox_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + core->shmem->fetch_response(smbox->shmem, xfer); +} + +static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + core->shmem->fetch_notification(smbox->shmem, max_len, xfer); +} + +static void mailbox_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + struct mbox_chan *intr_chan; + int ret; + + core->shmem->clear_channel(smbox->shmem); + + if (!core->shmem->channel_intr_enabled(smbox->shmem)) + return; + + if (smbox->chan_platform_receiver) + intr_chan = smbox->chan_platform_receiver; + else if (smbox->chan) + intr_chan = smbox->chan; + else + return; + + ret = mbox_send_message(intr_chan, NULL); + /* mbox_send_message returns non-negative value on success, so reset */ + if (ret > 0) + ret = 0; + + mbox_client_txdone(intr_chan, ret); +} + +static bool +mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) +{ + struct scmi_mailbox *smbox = cinfo->transport_info; + + return core->shmem->poll_done(smbox->shmem, xfer); +} + +static const struct scmi_transport_ops scmi_mailbox_ops = { + .chan_available = mailbox_chan_available, + .chan_setup = mailbox_chan_setup, + .chan_free = mailbox_chan_free, + .send_message = mailbox_send_message, + .mark_txdone = mailbox_mark_txdone, + .fetch_response = mailbox_fetch_response, + .fetch_notification = mailbox_fetch_notification, + .clear_channel = mailbox_clear_channel, + .poll_done = mailbox_poll_done, +}; + +static struct scmi_desc scmi_mailbox_desc = { + .ops = &scmi_mailbox_ops, + .max_rx_timeout_ms = 30, /* We may increase this if required */ + .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ + .max_msg_size = 128, +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver, + scmi_mailbox_desc, scmi_of_match, core); +module_platform_driver(scmi_mailbox_driver); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("SCMI Mailbox Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c new file mode 100644 index 00000000000000..56fc63edf51e23 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/optee.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../common.h" + +#define SCMI_OPTEE_MAX_MSG_SIZE 128 + +enum scmi_optee_pta_cmd { + /* + * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities + * + * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) + * [out] value[0].b: Extended capabilities or 0 + */ + PTA_SCMI_CMD_CAPABILITIES = 0, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer + * + * [in] value[0].a: Channel handle + * + * Shared memory used for SCMI message/response exhange is expected + * already identified and bound to channel handle in both SCMI agent + * and SCMI server (OP-TEE) parts. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message + * + * [in] value[0].a: Channel handle + * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) + * + * Shared memory used for SCMI message/response is a SMT buffer + * referenced by param[1]. It shall be 128 bytes large to fit response + * payload whatever message playload size. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, + + /* + * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle + * + * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM + * + * [in] value[0].a: Channel identifier + * [out] value[0].a: Returned channel handle + * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) + */ + PTA_SCMI_CMD_GET_CHANNEL = 3, + + /* + * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG + * buffer pointed by memref parameters + * + * [in] value[0].a: Channel handle + * [in] memref[1]: Message buffer (MSG and SCMI payload) + * [out] memref[2]: Response buffer (MSG and SCMI payload) + * + * Shared memories used for SCMI message/response are MSG buffers + * referenced by param[1] and param[2]. MSG transport protocol + * uses a 32bit header to carry SCMI meta-data (protocol ID and + * protocol message ID) followed by the effective SCMI message + * payload. + */ + PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4, +}; + +/* + * OP-TEE SCMI service capabilities bit flags (32bit) + * + * PTA_SCMI_CAPS_SMT_HEADER + * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in + * shared memory buffers to carry SCMI protocol synchronisation information. + * + * PTA_SCMI_CAPS_MSG_HEADER + * When set, OP-TEE supports command using MSG header protocol in an OP-TEE + * shared memory to carry SCMI protocol synchronisation information and SCMI + * message payload. + */ +#define PTA_SCMI_CAPS_NONE 0 +#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) +#define PTA_SCMI_CAPS_MSG_HEADER BIT(1) +#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \ + PTA_SCMI_CAPS_MSG_HEADER) + +/** + * struct scmi_optee_channel - Description of an OP-TEE SCMI channel + * + * @channel_id: OP-TEE channel ID used for this transport + * @tee_session: TEE session identifier + * @caps: OP-TEE SCMI channel capabilities + * @rx_len: Response size + * @mu: Mutex protection on channel access + * @cinfo: SCMI channel information + * @req: union for SCMI interface + * @req.shmem: Virtual base address of the shared memory + * @req.msg: Shared memory protocol handle for SCMI request and + * synchronous response + * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem + * @link: Reference in agent's channel list + */ +struct scmi_optee_channel { + u32 channel_id; + u32 tee_session; + u32 caps; + u32 rx_len; + struct mutex mu; + struct scmi_chan_info *cinfo; + union { + struct scmi_shared_mem __iomem *shmem; + struct scmi_msg_payld *msg; + } req; + struct tee_shm *tee_shm; + struct list_head link; +}; + +/** + * struct scmi_optee_agent - OP-TEE transport private data + * + * @dev: Device used for communication with TEE + * @tee_ctx: TEE context used for communication + * @caps: Supported channel capabilities + * @mu: Mutex for protection of @channel_list + * @channel_list: List of all created channels for the agent + */ +struct scmi_optee_agent { + struct device *dev; + struct tee_context *tee_ctx; + u32 caps; + struct mutex mu; + struct list_head channel_list; +}; + +static struct scmi_transport_core_operations *core; + +/* There can be only 1 SCMI service in OP-TEE we connect to */ +static struct scmi_optee_agent *scmi_optee_private; + +/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ +static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) +{ + struct device *dev = agent->dev; + struct tee_client_device *scmi_pta = to_tee_client_device(dev); + struct tee_ioctl_open_session_arg arg = { }; + int ret; + + memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); + arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); + if (ret < 0 || arg.ret) { + dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + *tee_session = arg.session; + + return 0; +} + +static void close_session(struct scmi_optee_agent *agent, u32 tee_session) +{ + tee_client_close_session(agent->tee_ctx, tee_session); +} + +static int get_capabilities(struct scmi_optee_agent *agent) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + u32 caps; + u32 tee_session; + int ret; + + ret = open_session(agent, &tee_session); + if (ret) + return ret; + + arg.func = PTA_SCMI_CMD_CAPABILITIES; + arg.session = tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); + + close_session(agent, tee_session); + + if (ret < 0 || arg.ret) { + dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + caps = param[0].u.value.a; + + if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n"); + return -EOPNOTSUPP; + } + + agent->caps = caps; + + return 0; +} + +static int get_channel(struct scmi_optee_channel *channel) +{ + struct device *dev = scmi_optee_private->dev; + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + unsigned int caps = 0; + int ret; + + if (channel->tee_shm) + caps = PTA_SCMI_CAPS_MSG_HEADER; + else + caps = PTA_SCMI_CAPS_SMT_HEADER; + + arg.func = PTA_SCMI_CMD_GET_CHANNEL; + arg.session = channel->tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param[0].u.value.a = channel->channel_id; + param[0].u.value.b = caps; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + + if (ret || arg.ret) { + dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); + return -EOPNOTSUPP; + } + + /* From now on use channel identifer provided by OP-TEE SCMI service */ + channel->channel_id = param[0].u.value.a; + channel->caps = caps; + + return 0; +} + +static int invoke_process_smt_channel(struct scmi_optee_channel *channel) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL, + .session = channel->tee_session, + .num_params = 1, + }; + struct tee_param param[1] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + return 0; +} + +static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL, + .session = channel->tee_session, + .num_params = 3, + }; + struct tee_param param[3] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = msg_size; + + param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[2].u.memref.shm = channel->tee_shm; + param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + /* Save response size */ + channel->rx_len = param[2].u.memref.size; + + return 0; +} + +static bool scmi_optee_chan_available(struct device_node *of_node, int idx) +{ + u32 channel_id; + + return !of_property_read_u32_index(of_node, "linaro,optee-channel-id", + idx, &channel_id); +} + +static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (!channel->tee_shm) + core->shmem->clear_channel(channel->req.shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + void *shbuf; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + shbuf = tee_shm_get_va(channel->tee_shm, 0); + memset(shbuf, 0, msg_size); + channel->req.msg = shbuf; + channel->rx_len = msg_size; + + return 0; +} + +static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL); + if (IS_ERR(channel->req.shmem)) + return PTR_ERR(channel->req.shmem); + + return 0; +} + +static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + if (of_property_present(cinfo->dev->of_node, "shmem")) + return setup_static_shmem(dev, cinfo, channel); + else + return setup_dynamic_shmem(dev, channel); +} + +static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) +{ + struct scmi_optee_channel *channel; + uint32_t channel_id; + int ret; + + if (!tx) + return -ENODEV; + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", + 0, &channel_id); + if (ret) + return ret; + + cinfo->transport_info = channel; + channel->cinfo = cinfo; + channel->channel_id = channel_id; + mutex_init(&channel->mu); + + ret = setup_shmem(dev, cinfo, channel); + if (ret) + return ret; + + ret = open_session(scmi_optee_private, &channel->tee_session); + if (ret) + goto err_free_shm; + + ret = tee_client_system_session(scmi_optee_private->tee_ctx, channel->tee_session); + if (ret) + dev_warn(dev, "Could not switch to system session, do best effort\n"); + + ret = get_channel(channel); + if (ret) + goto err_close_sess; + + /* Enable polling */ + cinfo->no_completion_irq = true; + + mutex_lock(&scmi_optee_private->mu); + list_add(&channel->link, &scmi_optee_private->channel_list); + mutex_unlock(&scmi_optee_private->mu); + + return 0; + +err_close_sess: + close_session(scmi_optee_private, channel->tee_session); +err_free_shm: + if (channel->tee_shm) + tee_shm_free(channel->tee_shm); + + return ret; +} + +static int scmi_optee_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + + /* + * Different protocols might share the same chan info, so a previous + * call might have already freed the structure. + */ + if (!channel) + return 0; + + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); + + close_session(scmi_optee_private, channel->tee_session); + + if (channel->tee_shm) { + tee_shm_free(channel->tee_shm); + channel->tee_shm = NULL; + } + + cinfo->transport_info = NULL; + channel->cinfo = NULL; + + return 0; +} + +static int scmi_optee_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + int ret; + + mutex_lock(&channel->mu); + + if (channel->tee_shm) { + core->msg->tx_prepare(channel->req.msg, xfer); + ret = invoke_process_msg_channel(channel, + core->msg->command_size(xfer)); + } else { + core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo); + ret = invoke_process_smt_channel(channel); + } + + if (ret) + mutex_unlock(&channel->mu); + + return ret; +} + +static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (channel->tee_shm) + core->msg->fetch_response(channel->req.msg, + channel->rx_len, xfer); + else + core->shmem->fetch_response(channel->req.shmem, xfer); +} + +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_unlock(&channel->mu); +} + +static struct scmi_transport_ops scmi_optee_ops = { + .chan_available = scmi_optee_chan_available, + .chan_setup = scmi_optee_chan_setup, + .chan_free = scmi_optee_chan_free, + .send_message = scmi_optee_send_message, + .mark_txdone = scmi_optee_mark_txdone, + .fetch_response = scmi_optee_fetch_response, + .clear_channel = scmi_optee_clear_channel, +}; + +static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_OPTEE; +} + +static struct scmi_desc scmi_optee_desc = { + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .sync_cmds_completed_on_ret = true, +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "linaro,scmi-optee" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_optee, scmi_optee_driver, scmi_optee_desc, + scmi_of_match, core); + +static int scmi_optee_service_probe(struct device *dev) +{ + struct scmi_optee_agent *agent; + struct tee_context *tee_ctx; + int ret; + + /* Only one SCMI OP-TEE device allowed */ + if (scmi_optee_private) { + dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); + return -EBUSY; + } + + tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); + if (IS_ERR(tee_ctx)) + return -ENODEV; + + agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); + if (!agent) { + ret = -ENOMEM; + goto err; + } + + agent->dev = dev; + agent->tee_ctx = tee_ctx; + INIT_LIST_HEAD(&agent->channel_list); + mutex_init(&agent->mu); + + ret = get_capabilities(agent); + if (ret) + goto err; + + /* Ensure agent resources are all visible before scmi_optee_private is */ + smp_mb(); + scmi_optee_private = agent; + + ret = platform_driver_register(&scmi_optee_driver); + if (ret) { + scmi_optee_private = NULL; + goto err; + } + + return 0; + +err: + tee_client_close_context(tee_ctx); + + return ret; +} + +static int scmi_optee_service_remove(struct device *dev) +{ + struct scmi_optee_agent *agent = scmi_optee_private; + + if (!scmi_optee_private) + return -EINVAL; + + platform_driver_unregister(&scmi_optee_driver); + + if (!list_empty(&scmi_optee_private->channel_list)) + return -EBUSY; + + /* Ensure cleared reference is visible before resources are released */ + smp_store_mb(scmi_optee_private, NULL); + + tee_client_close_context(agent->tee_ctx); + + return 0; +} + +static const struct tee_client_device_id scmi_optee_service_id[] = { + { + UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, + 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) + }, + { } +}; + +MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); + +static struct tee_client_driver scmi_optee_service_driver = { + .id_table = scmi_optee_service_id, + .driver = { + .name = "scmi-optee", + .bus = &tee_bus_type, + .probe = scmi_optee_service_probe, + .remove = scmi_optee_service_remove, + }, +}; + +static int __init scmi_transport_optee_init(void) +{ + return driver_register(&scmi_optee_service_driver.driver); +} +module_init(scmi_transport_optee_init); + +static void __exit scmi_transport_optee_exit(void) +{ + driver_unregister(&scmi_optee_service_driver.driver); +} +module_exit(scmi_transport_optee_exit); + +MODULE_AUTHOR("Etienne Carriere "); +MODULE_DESCRIPTION("SCMI OPTEE Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c new file mode 100644 index 00000000000000..f8dd108777f9b9 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/smc.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message SMC/HVC + * Transport driver + * + * Copyright 2020 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../common.h" + +/* + * The shmem address is split into 4K page and offset. + * This is to make sure the parameters fit in 32bit arguments of the + * smc/hvc call to keep it uniform across smc32/smc64 conventions. + * This however limits the shmem address to 44 bit. + * + * These optional parameters can be used to distinguish among multiple + * scmi instances that are using the same smc-id. + * The page parameter is passed in r1/x1/w1 register and the offset parameter + * is passed in r2/x2/w2 register. + */ + +#define SHMEM_SIZE (SZ_4K) +#define SHMEM_SHIFT 12 +#define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT)) +#define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1)) + +/** + * struct scmi_smc - Structure representing a SCMI smc transport + * + * @irq: An optional IRQ for completion + * @cinfo: SCMI channel info + * @shmem: Transmit/Receive shared memory area + * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. + * Used when NOT operating in atomic mode. + * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. + * Used when operating in atomic mode. + * @func_id: smc/hvc call function id + * @param_page: 4K page number of the shmem channel + * @param_offset: Offset within the 4K page of the shmem channel + * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual + * platforms + */ + +struct scmi_smc { + int irq; + struct scmi_chan_info *cinfo; + struct scmi_shared_mem __iomem *shmem; + /* Protect access to shmem area */ + struct mutex shmem_lock; +#define INFLIGHT_NONE MSG_TOKEN_MAX + atomic_t inflight; + unsigned long func_id; + unsigned long param_page; + unsigned long param_offset; + unsigned long cap_id; +}; + +static struct scmi_transport_core_operations *core; + +static irqreturn_t smc_msg_done_isr(int irq, void *data) +{ + struct scmi_smc *scmi_info = data; + + core->rx_callback(scmi_info->cinfo, + core->shmem->read_header(scmi_info->shmem), NULL); + + return IRQ_HANDLED; +} + +static bool smc_chan_available(struct device_node *of_node, int idx) +{ + struct device_node *np __free(device_node) = + of_parse_phandle(of_node, "shmem", 0); + if (!np) + return false; + + return true; +} + +static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_init(&scmi_info->shmem_lock); +} + +static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) +{ + int ret; + + ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); + + return ret == INFLIGHT_NONE; +} + +static inline void +smc_channel_lock_acquire(struct scmi_smc *scmi_info, + struct scmi_xfer *xfer __maybe_unused) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); + else + mutex_lock(&scmi_info->shmem_lock); +} + +static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_unlock(&scmi_info->shmem_lock); +} + +static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + struct device *cdev = cinfo->dev; + unsigned long cap_id = ULONG_MAX; + struct scmi_smc *scmi_info; + struct resource res = {}; + u32 func_id; + int ret; + + if (!tx) + return -ENODEV; + + scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL); + if (!scmi_info) + return -ENOMEM; + + scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res); + if (IS_ERR(scmi_info->shmem)) + return PTR_ERR(scmi_info->shmem); + + ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id); + if (ret < 0) + return ret; + + if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) { + resource_size_t size = resource_size(&res); + void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8; + /* The capability-id is kept in last 8 bytes of shmem. + * +-------+ <-- 0 + * | shmem | + * +-------+ <-- size - 8 + * | capId | + * +-------+ <-- size + */ + memcpy_fromio(&cap_id, ptr, sizeof(cap_id)); + } + + if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) { + scmi_info->param_page = SHMEM_PAGE(res.start); + scmi_info->param_offset = SHMEM_OFFSET(res.start); + } + /* + * If there is an interrupt named "a2p", then the service and + * completion of a message is signaled by an interrupt rather than by + * the return of the SMC call. + */ + scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p"); + if (scmi_info->irq > 0) { + ret = request_irq(scmi_info->irq, smc_msg_done_isr, + IRQF_NO_SUSPEND, dev_name(dev), scmi_info); + if (ret) { + dev_err(dev, "failed to setup SCMI smc irq\n"); + return ret; + } + } else { + cinfo->no_completion_irq = true; + } + + scmi_info->func_id = func_id; + scmi_info->cap_id = cap_id; + scmi_info->cinfo = cinfo; + smc_channel_lock_init(scmi_info); + cinfo->transport_info = scmi_info; + + return 0; +} + +static int smc_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_smc *scmi_info = cinfo->transport_info; + + /* + * Different protocols might share the same chan info, so a previous + * smc_chan_free call might have already freed the structure. + */ + if (!scmi_info) + return 0; + + /* Ignore any possible further reception on the IRQ path */ + if (scmi_info->irq > 0) + free_irq(scmi_info->irq, scmi_info); + + cinfo->transport_info = NULL; + scmi_info->cinfo = NULL; + + return 0; +} + +static int smc_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + struct arm_smccc_res res; + + /* + * Channel will be released only once response has been + * surely fully retrieved, so after .mark_txdone() + */ + smc_channel_lock_acquire(scmi_info, xfer); + + core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo); + + if (scmi_info->cap_id != ULONG_MAX) + arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, + 0, 0, 0, 0, 0, &res); + else + arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page, + scmi_info->param_offset, 0, 0, 0, 0, 0, + &res); + + /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ + if (res.a0) { + smc_channel_lock_release(scmi_info); + return -EOPNOTSUPP; + } + + return 0; +} + +static void smc_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + core->shmem->fetch_response(scmi_info->shmem, xfer); +} + +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_smc *scmi_info = cinfo->transport_info; + + smc_channel_lock_release(scmi_info); +} + +static const struct scmi_transport_ops scmi_smc_ops = { + .chan_available = smc_chan_available, + .chan_setup = smc_chan_setup, + .chan_free = smc_chan_free, + .send_message = smc_send_message, + .mark_txdone = smc_mark_txdone, + .fetch_response = smc_fetch_response, +}; + +static struct scmi_desc scmi_smc_desc = { + .ops = &scmi_smc_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = 128, + /* + * Setting .sync_cmds_atomic_replies to true for SMC assumes that, + * once the SMC instruction has completed successfully, the issued + * SCMI command would have been already fully processed by the SCMI + * platform firmware and so any possible response value expected + * for the issued command will be immmediately ready to be fetched + * from the shared memory area. + */ + .sync_cmds_completed_on_ret = true, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi-smc" }, + { .compatible = "arm,scmi-smc-param" }, + { .compatible = "qcom,scmi-smc" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc, + scmi_of_match, core); +module_platform_driver(scmi_smc_driver); + +MODULE_AUTHOR("Peng Fan "); +MODULE_AUTHOR("Nikunj Kela "); +MODULE_DESCRIPTION("SCMI SMC Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c new file mode 100644 index 00000000000000..d349766bc0b267 --- /dev/null +++ b/drivers/firmware/arm_scmi/transports/virtio.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtio Transport driver for Arm System Control and Management Interface + * (SCMI). + * + * Copyright (C) 2020-2022 OpenSynergy. + * Copyright (C) 2021-2024 ARM Ltd. + */ + +/** + * DOC: Theory of Operation + * + * The scmi-virtio transport implements a driver for the virtio SCMI device. + * + * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx + * channel (virtio eventq, P2A channel). Each channel is implemented through a + * virtqueue. Access to each virtqueue is protected by spinlocks. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../common.h" + +#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 +#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ +#define VIRTIO_SCMI_MAX_PDU_SIZE \ + (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define DESCRIPTORS_PER_TX_MSG 2 + +/** + * struct scmi_vio_channel - Transport channel information + * + * @vqueue: Associated virtqueue + * @cinfo: SCMI Tx or Rx channel + * @free_lock: Protects access to the @free_list. + * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @deferred_tx_work: Worker for TX deferred replies processing + * @deferred_tx_wq: Workqueue for TX deferred replies + * @pending_lock: Protects access to the @pending_cmds_list. + * @pending_cmds_list: List of pre-fetched commands queueud for later processing + * @is_rx: Whether channel is an Rx channel + * @max_msg: Maximum number of pending messages for this channel. + * @lock: Protects access to all members except users, free_list and + * pending_cmds_list. + * @shutdown_done: A reference to a completion used when freeing this channel. + * @users: A reference count to currently active users of this channel. + */ +struct scmi_vio_channel { + struct virtqueue *vqueue; + struct scmi_chan_info *cinfo; + /* lock to protect access to the free list. */ + spinlock_t free_lock; + struct list_head free_list; + /* lock to protect access to the pending list. */ + spinlock_t pending_lock; + struct list_head pending_cmds_list; + struct work_struct deferred_tx_work; + struct workqueue_struct *deferred_tx_wq; + bool is_rx; + unsigned int max_msg; + /* + * Lock to protect access to all members except users, free_list and + * pending_cmds_list + */ + spinlock_t lock; + struct completion *shutdown_done; + refcount_t users; +}; + +enum poll_states { + VIO_MSG_NOT_POLLED, + VIO_MSG_POLL_TIMEOUT, + VIO_MSG_POLLING, + VIO_MSG_POLL_DONE, +}; + +/** + * struct scmi_vio_msg - Transport PDU information + * + * @request: SDU used for commands + * @input: SDU used for (delayed) responses and notifications + * @list: List which scmi_vio_msg may be part of + * @rx_len: Input SDU size in bytes, once input has been received + * @poll_idx: Last used index registered for polling purposes if this message + * transaction reply was configured for polling. + * @poll_status: Polling state for this message. + * @poll_lock: A lock to protect @poll_status + * @users: A reference count to track this message users and avoid premature + * freeing (and reuse) when polling and IRQ execution paths interleave. + */ +struct scmi_vio_msg { + struct scmi_msg_payld *request; + struct scmi_msg_payld *input; + struct list_head list; + unsigned int rx_len; + unsigned int poll_idx; + enum poll_states poll_status; + /* Lock to protect access to poll_status */ + spinlock_t poll_lock; + refcount_t users; +}; + +static struct scmi_transport_core_operations *core; + +/* Only one SCMI VirtIO device can possibly exist */ +static struct virtio_device *scmi_vdev; + +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, + struct scmi_chan_info *cinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + refcount_set(&vioch->users, 1); +} + +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) +{ + return refcount_inc_not_zero(&vioch->users); +} + +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) +{ + if (refcount_dec_and_test(&vioch->users)) { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + if (vioch->shutdown_done) { + vioch->cinfo = NULL; + complete(vioch->shutdown_done); + } + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + + /* + * Prepare to wait for the last release if not already released + * or in progress. + */ + spin_lock_irqsave(&vioch->lock, flags); + if (!vioch->cinfo || vioch->shutdown_done) { + spin_unlock_irqrestore(&vioch->lock, flags); + return; + } + + vioch->shutdown_done = &vioch_shutdown_done; + if (!vioch->is_rx && vioch->deferred_tx_wq) + /* Cannot be kicked anymore after this...*/ + vioch->deferred_tx_wq = NULL; + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + /* Let any possibly concurrent RX path release the channel */ + wait_for_completion(vioch->shutdown_done); +} + +/* Assumes to be called with vio channel acquired already */ +static struct scmi_vio_msg * +scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->free_lock, flags); + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->free_lock, flags); + return NULL; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del_init(&msg->list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_NOT_POLLED; + refcount_set(&msg->users, 1); + + return msg; +} + +static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) +{ + return refcount_inc_not_zero(&msg->users); +} + +/* Assumes to be called with vio channel acquired already */ +static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + bool ret; + + ret = refcount_dec_and_test(&msg->users); + if (ret) { + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + } + + return ret; +} + +static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) +{ + return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); +} + +static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + struct scatterlist sg_in; + int rc; + unsigned long flags; + struct device *dev = &vioch->vqueue->vdev->dev; + + sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + + spin_lock_irqsave(&vioch->lock, flags); + + rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); + if (rc) + dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +/* + * Assume to be called with channel already acquired or not ready at all; + * vioch->lock MUST NOT have been already acquired. + */ +static void scmi_finalize_message(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + if (vioch->is_rx) + scmi_vio_feed_vq_rx(vioch, msg); + else + scmi_vio_msg_release(vioch, msg); +} + +static void scmi_vio_complete_cb(struct virtqueue *vqueue) +{ + unsigned long flags; + unsigned int length; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg; + bool cb_enabled = true; + + if (WARN_ON_ONCE(!vqueue->vdev->priv)) + return; + vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; + + for (;;) { + if (!scmi_vio_channel_acquire(vioch)) + return; + + spin_lock_irqsave(&vioch->lock, flags); + if (cb_enabled) { + virtqueue_disable_cb(vqueue); + cb_enabled = false; + } + + msg = virtqueue_get_buf(vqueue, &length); + if (!msg) { + if (virtqueue_enable_cb(vqueue)) { + spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return; + } + cb_enabled = true; + } + spin_unlock_irqrestore(&vioch->lock, flags); + + if (msg) { + msg->rx_len = length; + core->rx_callback(vioch->cinfo, + core->msg->read_header(msg->input), + msg); + + scmi_finalize_message(vioch, msg); + } + + /* + * Release vio channel between loop iterations to allow + * virtio_chan_free() to eventually fully release it when + * shutting down; in such a case, any outstanding message will + * be ignored since this loop will bail out at the next + * iteration. + */ + scmi_vio_channel_release(vioch); + } +} + +static void scmi_vio_deferred_tx_worker(struct work_struct *work) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg, *tmp; + + vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); + + if (!scmi_vio_channel_acquire(vioch)) + return; + + /* + * Process pre-fetched messages: these could be non-polled messages or + * late timed-out replies to polled messages dequeued by chance while + * polling for some other messages: this worker is in charge to process + * the valid non-expired messages and anyway finally free all of them. + */ + spin_lock_irqsave(&vioch->pending_lock, flags); + + /* Scan the list of possibly pre-fetched messages during polling. */ + list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { + list_del(&msg->list); + + /* + * Channel is acquired here (cannot vanish) and this message + * is no more processed elsewhere so no poll_lock needed. + */ + if (msg->poll_status == VIO_MSG_NOT_POLLED) + core->rx_callback(vioch->cinfo, + core->msg->read_header(msg->input), + msg); + + /* Free the processed message once done */ + scmi_vio_msg_release(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->pending_lock, flags); + + /* Process possibly still pending messages */ + scmi_vio_complete_cb(vioch->vqueue); + + scmi_vio_channel_release(vioch); +} + +static struct virtqueue_info scmi_vio_vqs_info[] = { + { "tx", scmi_vio_complete_cb }, + { "rx", scmi_vio_complete_cb }, +}; + +static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) +{ + struct scmi_vio_channel *vioch = base_cinfo->transport_info; + + return vioch->max_msg; +} + +static bool virtio_chan_available(struct device_node *of_node, int idx) +{ + struct scmi_vio_channel *channels, *vioch = NULL; + + if (WARN_ON_ONCE(!scmi_vdev)) + return false; + + channels = (struct scmi_vio_channel *)scmi_vdev->priv; + + switch (idx) { + case VIRTIO_SCMI_VQ_TX: + vioch = &channels[VIRTIO_SCMI_VQ_TX]; + break; + case VIRTIO_SCMI_VQ_RX: + if (scmi_vio_have_vq_rx(scmi_vdev)) + vioch = &channels[VIRTIO_SCMI_VQ_RX]; + break; + default: + return false; + } + + return vioch && !vioch->cinfo; +} + +static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) +{ + destroy_workqueue(deferred_tx_wq); +} + +static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + struct scmi_vio_channel *vioch; + int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; + int i; + + if (!scmi_vdev) + return -EPROBE_DEFER; + + vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + + /* Setup a deferred worker for polling. */ + if (tx && !vioch->deferred_tx_wq) { + int ret; + + vioch->deferred_tx_wq = + alloc_workqueue(dev_name(&scmi_vdev->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!vioch->deferred_tx_wq) + return -ENOMEM; + + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, + vioch->deferred_tx_wq); + if (ret) + return ret; + + INIT_WORK(&vioch->deferred_tx_work, + scmi_vio_deferred_tx_worker); + } + + for (i = 0; i < vioch->max_msg; i++) { + struct scmi_vio_msg *msg; + + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (tx) { + msg->request = devm_kzalloc(dev, + VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->request) + return -ENOMEM; + spin_lock_init(&msg->poll_lock); + refcount_set(&msg->users, 1); + } + + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->input) + return -ENOMEM; + + scmi_finalize_message(vioch, msg); + } + + scmi_vio_channel_ready(vioch, cinfo); + + return 0; +} + +static int virtio_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + /* + * Break device to inhibit further traffic flowing while shutting down + * the channels: doing it later holding vioch->lock creates unsafe + * locking dependency chains as reported by LOCKDEP. + */ + virtio_break_device(vioch->vqueue->vdev); + scmi_vio_channel_cleanup_sync(vioch); + + return 0; +} + +static int virtio_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scatterlist sg_out; + struct scatterlist sg_in; + struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; + unsigned long flags; + int rc; + struct scmi_vio_msg *msg; + + if (!scmi_vio_channel_acquire(vioch)) + return -EINVAL; + + msg = scmi_virtio_get_free_msg(vioch); + if (!msg) { + scmi_vio_channel_release(vioch); + return -EBUSY; + } + + core->msg->tx_prepare(msg->request, xfer); + + sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer)); + sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer)); + + spin_lock_irqsave(&vioch->lock, flags); + + /* + * If polling was requested for this transaction: + * - retrieve last used index (will be used as polling reference) + * - bind the polled message to the xfer via .priv + * - grab an additional msg refcount for the poll-path + */ + if (xfer->hdr.poll_completion) { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_POLLING; + scmi_vio_msg_acquire(msg); + /* Ensure initialized msg is visibly bound to xfer */ + smp_store_mb(xfer->priv, msg); + } + + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); + if (rc) + dev_err(vioch->cinfo->dev, + "failed to add to TX virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + if (rc) { + /* Ensure order between xfer->priv clear and vq feeding */ + smp_store_mb(xfer->priv, NULL); + if (xfer->hdr.poll_completion) + scmi_vio_msg_release(vioch, msg); + scmi_vio_msg_release(vioch, msg); + } + + scmi_vio_channel_release(vioch); + + return rc; +} + +static void virtio_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) + core->msg->fetch_response(msg->input, msg->rx_len, xfer); +} + +static void virtio_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) + core->msg->fetch_notification(msg->input, msg->rx_len, + max_len, xfer); +} + +/** + * virtio_mark_txdone - Mark transmission done + * + * Free only completed polling transfer messages. + * + * Note that in the SCMI VirtIO transport we never explicitly release still + * outstanding but timed-out messages by forcibly re-adding them to the + * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the + * TX deferred worker, eventually clean up such messages once, finally, a late + * reply is received and discarded (if ever). + * + * This approach was deemed preferable since those pending timed-out buffers are + * still effectively owned by the SCMI platform VirtIO device even after timeout + * expiration: forcibly freeing and reusing them before they had been returned + * explicitly by the SCMI platform could lead to subtle bugs due to message + * corruption. + * An SCMI platform VirtIO device which never returns message buffers is + * anyway broken and it will quickly lead to exhaustion of available messages. + * + * For this same reason, here, we take care to free only the polled messages + * that had been somehow replied (only if not by chance already processed on the + * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also + * any timed-out polled message if that indeed appears to have been at least + * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such + * messages won't be freed elsewhere. Any other polled message is marked as + * VIO_MSG_POLL_TIMEOUT. + * + * Possible late replies to timed-out polled messages will be eventually freed + * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if + * dequeued on some other polling path. + * + * @cinfo: SCMI channel info + * @ret: Transmission return code + * @xfer: Transfer descriptor + */ +static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scmi_vio_msg *msg = xfer->priv; + + if (!msg || !scmi_vio_channel_acquire(vioch)) + return; + + /* Ensure msg is unbound from xfer anyway at this point */ + smp_store_mb(xfer->priv, NULL); + + /* Must be a polled xfer and not already freed on the IRQ path */ + if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { + scmi_vio_channel_release(vioch); + return; + } + + spin_lock_irqsave(&msg->poll_lock, flags); + /* Do not free timedout polled messages only if still inflight */ + if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) + scmi_vio_msg_release(vioch, msg); + else if (msg->poll_status == VIO_MSG_POLLING) + msg->poll_status = VIO_MSG_POLL_TIMEOUT; + spin_unlock_irqrestore(&msg->poll_lock, flags); + + scmi_vio_channel_release(vioch); +} + +/** + * virtio_poll_done - Provide polling support for VirtIO transport + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being poll for. + * + * VirtIO core provides a polling mechanism based only on last used indexes: + * this means that it is possible to poll the virtqueues waiting for something + * new to arrive from the host side, but the only way to check if the freshly + * arrived buffer was indeed what we were waiting for is to compare the newly + * arrived message descriptor with the one we are polling on. + * + * As a consequence it can happen to dequeue something different from the buffer + * we were poll-waiting for: if that is the case such early fetched buffers are + * then added to a the @pending_cmds_list list for later processing by a + * dedicated deferred worker. + * + * So, basically, once something new is spotted we proceed to de-queue all the + * freshly received used buffers until we found the one we were polling on, or, + * we have 'seemingly' emptied the virtqueue; if some buffers are still pending + * in the vqueue at the end of the polling loop (possible due to inherent races + * in virtqueues handling mechanisms), we similarly kick the deferred worker + * and let it process those, to avoid indefinitely looping in the .poll_done + * busy-waiting helper. + * + * Finally, we delegate to the deferred worker also the final free of any timed + * out reply to a polled message that we should dequeue. + * + * Note that, since we do NOT have per-message suppress notification mechanism, + * the message we are polling for could be alternatively delivered via usual + * IRQs callbacks on another core which happened to have IRQs enabled while we + * are actively polling for it here: in such a case it will be handled as such + * by rx_callback() and the polling loop in the SCMI Core TX path will be + * transparently terminated anyway. + * + * Return: True once polling has successfully completed. + */ +static bool virtio_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + bool pending, found = false; + unsigned int length, any_prefetched = 0; + unsigned long flags; + struct scmi_vio_msg *next_msg, *msg = xfer->priv; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + if (!msg) + return true; + + /* + * Processed already by other polling loop on another CPU ? + * + * Note that this message is acquired on the poll path so cannot vanish + * while inside this loop iteration even if concurrently processed on + * the IRQ path. + * + * Avoid to acquire poll_lock since polled_status can be changed + * in a relevant manner only later in this same thread of execution: + * any other possible changes made concurrently by other polling loops + * or by a reply delivered on the IRQ path have no meaningful impact on + * this loop iteration: in other words it is harmless to allow this + * possible race but let has avoid spinlocking with irqs off in this + * initial part of the polling loop. + */ + if (msg->poll_status == VIO_MSG_POLL_DONE) + return true; + + if (!scmi_vio_channel_acquire(vioch)) + return true; + + /* Has cmdq index moved at all ? */ + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + if (!pending) { + scmi_vio_channel_release(vioch); + return false; + } + + spin_lock_irqsave(&vioch->lock, flags); + virtqueue_disable_cb(vioch->vqueue); + + /* + * Process all new messages till the polled-for message is found OR + * the vqueue is empty. + */ + while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { + bool next_msg_done = false; + + /* + * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so + * that can be properly freed even on timeout in mark_txdone. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_POLLING) { + next_msg->poll_status = VIO_MSG_POLL_DONE; + next_msg_done = true; + } + spin_unlock(&next_msg->poll_lock); + + next_msg->rx_len = length; + /* Is the message we were polling for ? */ + if (next_msg == msg) { + found = true; + break; + } else if (next_msg_done) { + /* Skip the rest if this was another polled msg */ + continue; + } + + /* + * Enqueue for later processing any non-polled message and any + * timed-out polled one that we happen to have dequeued. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_NOT_POLLED || + next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { + spin_unlock(&next_msg->poll_lock); + + any_prefetched++; + spin_lock(&vioch->pending_lock); + list_add_tail(&next_msg->list, + &vioch->pending_cmds_list); + spin_unlock(&vioch->pending_lock); + } else { + spin_unlock(&next_msg->poll_lock); + } + } + + /* + * When the polling loop has successfully terminated if something + * else was queued in the meantime, it will be served by a deferred + * worker OR by the normal IRQ/callback OR by other poll loops. + * + * If we are still looking for the polled reply, the polling index has + * to be updated to the current vqueue last used index. + */ + if (found) { + pending = !virtqueue_enable_cb(vioch->vqueue); + } else { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + } + + if (vioch->deferred_tx_wq && (any_prefetched || pending)) + queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); + + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + return found; +} + +static const struct scmi_transport_ops scmi_virtio_ops = { + .chan_available = virtio_chan_available, + .chan_setup = virtio_chan_setup, + .chan_free = virtio_chan_free, + .get_max_msg = virtio_get_max_msg, + .send_message = virtio_send_message, + .fetch_response = virtio_fetch_response, + .fetch_notification = virtio_fetch_notification, + .mark_txdone = virtio_mark_txdone, + .poll_done = virtio_poll_done, +}; + +static struct scmi_desc scmi_virtio_desc = { + .ops = &scmi_virtio_ops, + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, + .max_msg = 0, /* overridden by virtio_get_max_msg() */ + .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), +}; + +static const struct of_device_id scmi_of_match[] = { + { .compatible = "arm,scmi-virtio" }, + { /* Sentinel */ }, +}; + +DEFINE_SCMI_TRANSPORT_DRIVER(scmi_virtio, scmi_virtio_driver, scmi_virtio_desc, + scmi_of_match, core); + +static int scmi_vio_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct scmi_vio_channel *channels; + bool have_vq_rx; + int vq_cnt; + int i; + int ret; + struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; + + /* Only one SCMI VirtiO device allowed */ + if (scmi_vdev) { + dev_err(dev, + "One SCMI Virtio device was already initialized: only one allowed.\n"); + return -EBUSY; + } + + have_vq_rx = scmi_vio_have_vq_rx(vdev); + vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; + + channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + if (have_vq_rx) + channels[VIRTIO_SCMI_VQ_RX].is_rx = true; + + ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_vqs_info, NULL); + if (ret) { + dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); + return ret; + } + + for (i = 0; i < vq_cnt; i++) { + unsigned int sz; + + spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].free_lock); + INIT_LIST_HEAD(&channels[i].free_list); + spin_lock_init(&channels[i].pending_lock); + INIT_LIST_HEAD(&channels[i].pending_cmds_list); + channels[i].vqueue = vqs[i]; + + sz = virtqueue_get_vring_size(channels[i].vqueue); + /* Tx messages need multiple descriptors. */ + if (!channels[i].is_rx) + sz /= DESCRIPTORS_PER_TX_MSG; + + if (sz > MSG_TOKEN_MAX) { + dev_info(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); + sz = MSG_TOKEN_MAX; + } + channels[i].max_msg = sz; + } + + vdev->priv = channels; + + /* Ensure initialized scmi_vdev is visible */ + smp_store_mb(scmi_vdev, vdev); + + ret = platform_driver_register(&scmi_virtio_driver); + if (ret) { + vdev->priv = NULL; + vdev->config->del_vqs(vdev); + /* Ensure NULLified scmi_vdev is visible */ + smp_store_mb(scmi_vdev, NULL); + + return ret; + } + + return 0; +} + +static void scmi_vio_remove(struct virtio_device *vdev) +{ + platform_driver_unregister(&scmi_virtio_driver); + + /* + * Once we get here, virtio_chan_free() will have already been called by + * the SCMI core for any existing channel and, as a consequence, all the + * virtio channels will have been already marked NOT ready, causing any + * outstanding message on any vqueue to be ignored by complete_cb: now + * we can just stop processing buffers and destroy the vqueues. + */ + virtio_reset_device(vdev); + vdev->config->del_vqs(vdev); + /* Ensure scmi_vdev is visible as NULL */ + smp_store_mb(scmi_vdev, NULL); +} + +static int scmi_vio_validate(struct virtio_device *vdev) +{ +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, + "device does not comply with spec version 1.x\n"); + return -EINVAL; + } +#endif + return 0; +} + +static unsigned int features[] = { + VIRTIO_SCMI_F_P2A_CHANNELS, +}; + +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, + { 0 } +}; + +static struct virtio_driver virtio_scmi_driver = { + .driver.name = "scmi-virtio", + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .id_table = id_table, + .probe = scmi_vio_probe, + .remove = scmi_vio_remove, + .validate = scmi_vio_validate, +}; + +module_virtio_driver(virtio_scmi_driver); + +MODULE_AUTHOR("Igor Skalkin "); +MODULE_AUTHOR("Peter Hilber "); +MODULE_AUTHOR("Cristian Marussi "); +MODULE_DESCRIPTION("SCMI VirtIO Transport driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig new file mode 100644 index 00000000000000..2883ed24a84d65 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "ARM SCMI NXP i.MX Vendor Protocols" + +config IMX_SCMI_BBM_EXT + tristate "i.MX SCMI BBM EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y if ARCH_MXC + help + This enables i.MX System BBM control logic which supports RTC + and BUTTON. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-bbm. + +config IMX_SCMI_MISC_EXT + tristate "i.MX SCMI MISC EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y if ARCH_MXC + help + This enables i.MX System MISC control logic such as gpio expander + wakeup + + To compile this driver as a module, choose M here: the + module will be called imx-sm-misc. +endmenu diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile new file mode 100644 index 00000000000000..d3ee6d5449244a --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o +obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c new file mode 100644 index 00000000000000..17799eacf06c30 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) NXP BBM Protocol + * + * Copyright 2024 NXP + */ + +#define pr_fmt(fmt) "SCMI Notifications BBM - " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_bbm_protocol_cmd { + IMX_BBM_GPR_SET = 0x3, + IMX_BBM_GPR_GET = 0x4, + IMX_BBM_RTC_ATTRIBUTES = 0x5, + IMX_BBM_RTC_TIME_SET = 0x6, + IMX_BBM_RTC_TIME_GET = 0x7, + IMX_BBM_RTC_ALARM_SET = 0x8, + IMX_BBM_BUTTON_GET = 0x9, + IMX_BBM_RTC_NOTIFY = 0xA, + IMX_BBM_BUTTON_NOTIFY = 0xB, +}; + +#define GET_RTCS_NR(x) le32_get_bits((x), GENMASK(23, 16)) +#define GET_GPRS_NR(x) le32_get_bits((x), GENMASK(15, 0)) + +#define SCMI_IMX_BBM_NOTIFY_RTC_UPDATED BIT(2) +#define SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER BIT(1) +#define SCMI_IMX_BBM_NOTIFY_RTC_ALARM BIT(0) + +#define SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG BIT(0) + +#define SCMI_IMX_BBM_NOTIFY_RTC_FLAG \ + (SCMI_IMX_BBM_NOTIFY_RTC_UPDATED | SCMI_IMX_BBM_NOTIFY_RTC_ROLLOVER | \ + SCMI_IMX_BBM_NOTIFY_RTC_ALARM) + +#define SCMI_IMX_BBM_EVENT_RTC_MASK GENMASK(31, 24) + +struct scmi_imx_bbm_info { + u32 version; + int nr_rtc; + int nr_gpr; +}; + +struct scmi_msg_imx_bbm_protocol_attributes { + __le32 attributes; +}; + +struct scmi_imx_bbm_set_time { + __le32 id; + __le32 flags; + __le32 value_low; + __le32 value_high; +}; + +struct scmi_imx_bbm_get_time { + __le32 id; + __le32 flags; +}; + +struct scmi_imx_bbm_alarm_time { + __le32 id; + __le32 flags; + __le32 value_low; + __le32 value_high; +}; + +struct scmi_msg_imx_bbm_rtc_notify { + __le32 rtc_id; + __le32 flags; +}; + +struct scmi_msg_imx_bbm_button_notify { + __le32 flags; +}; + +struct scmi_imx_bbm_notify_payld { + __le32 flags; +}; + +static int scmi_imx_bbm_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_bbm_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_imx_bbm_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->nr_rtc = GET_RTCS_NR(attr->attributes); + pi->nr_gpr = GET_GPRS_NR(attr->attributes); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_notify(const struct scmi_protocol_handle *ph, + u32 src_id, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + if (message_id == IMX_BBM_RTC_NOTIFY) { + struct scmi_msg_imx_bbm_rtc_notify *rtc_notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*rtc_notify), 0, &t); + if (ret) + return ret; + + rtc_notify = t->tx.buf; + rtc_notify->rtc_id = cpu_to_le32(0); + rtc_notify->flags = + cpu_to_le32(enable ? SCMI_IMX_BBM_NOTIFY_RTC_FLAG : 0); + } else if (message_id == IMX_BBM_BUTTON_NOTIFY) { + struct scmi_msg_imx_bbm_button_notify *button_notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*button_notify), 0, &t); + if (ret) + return ret; + + button_notify = t->tx.buf; + button_notify->flags = cpu_to_le32(enable ? 1 : 0); + } else { + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static enum scmi_imx_bbm_protocol_cmd evt_2_cmd[] = { + IMX_BBM_RTC_NOTIFY, + IMX_BBM_BUTTON_NOTIFY +}; + +static int scmi_imx_bbm_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_imx_bbm_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_imx_bbm_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_imx_bbm_notify_payld *p = payld; + struct scmi_imx_bbm_notif_report *r = report; + + if (sizeof(*p) != payld_sz) + return NULL; + + if (evt_id == SCMI_EVENT_IMX_BBM_RTC) { + r->is_rtc = true; + r->is_button = false; + r->timestamp = timestamp; + r->rtc_id = le32_get_bits(p->flags, SCMI_IMX_BBM_EVENT_RTC_MASK); + r->rtc_evt = le32_get_bits(p->flags, SCMI_IMX_BBM_NOTIFY_RTC_FLAG); + dev_dbg(ph->dev, "RTC: %d evt: %x\n", r->rtc_id, r->rtc_evt); + *src_id = r->rtc_evt; + } else if (evt_id == SCMI_EVENT_IMX_BBM_BUTTON) { + r->is_rtc = false; + r->is_button = true; + r->timestamp = timestamp; + dev_dbg(ph->dev, "BBM Button\n"); + *src_id = 0; + } else { + WARN_ON_ONCE(1); + return NULL; + } + + return r; +} + +static const struct scmi_event scmi_imx_bbm_events[] = { + { + .id = SCMI_EVENT_IMX_BBM_RTC, + .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report), + }, + { + .id = SCMI_EVENT_IMX_BBM_BUTTON, + .max_payld_sz = sizeof(struct scmi_imx_bbm_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_bbm_notif_report), + }, +}; + +static const struct scmi_event_ops scmi_imx_bbm_event_ops = { + .set_notify_enabled = scmi_imx_bbm_set_notify_enabled, + .fill_custom_report = scmi_imx_bbm_fill_custom_report, +}; + +static const struct scmi_protocol_events scmi_imx_bbm_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &scmi_imx_bbm_event_ops, + .evts = scmi_imx_bbm_events, + .num_events = ARRAY_SIZE(scmi_imx_bbm_events), + .num_sources = 1, +}; + +static int scmi_imx_bbm_rtc_time_set(const struct scmi_protocol_handle *ph, + u32 rtc_id, u64 sec) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_set_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_SET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = 0; + cfg->value_low = cpu_to_le32(lower_32_bits(sec)); + cfg->value_high = cpu_to_le32(upper_32_bits(sec)); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_rtc_time_get(const struct scmi_protocol_handle *ph, + u32 rtc_id, u64 *value) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_get_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_TIME_GET, sizeof(*cfg), + sizeof(u64), &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = 0; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *value = get_unaligned_le64(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_rtc_alarm_set(const struct scmi_protocol_handle *ph, + u32 rtc_id, bool enable, u64 sec) +{ + struct scmi_imx_bbm_info *pi = ph->get_priv(ph); + struct scmi_imx_bbm_alarm_time *cfg; + struct scmi_xfer *t; + int ret; + + if (rtc_id >= pi->nr_rtc) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_RTC_ALARM_SET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(rtc_id); + cfg->flags = enable ? + cpu_to_le32(SCMI_IMX_BBM_RTC_ALARM_ENABLE_FLAG) : 0; + cfg->value_low = cpu_to_le32(lower_32_bits(sec)); + cfg->value_high = cpu_to_le32(upper_32_bits(sec)); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_bbm_button_get(const struct scmi_protocol_handle *ph, u32 *state) +{ + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, IMX_BBM_BUTTON_GET, 0, sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *state = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_bbm_proto_ops scmi_imx_bbm_proto_ops = { + .rtc_time_get = scmi_imx_bbm_rtc_time_get, + .rtc_time_set = scmi_imx_bbm_rtc_time_set, + .rtc_alarm_set = scmi_imx_bbm_rtc_alarm_set, + .button_get = scmi_imx_bbm_button_get, +}; + +static int scmi_imx_bbm_protocol_init(const struct scmi_protocol_handle *ph) +{ + u32 version; + int ret; + struct scmi_imx_bbm_info *binfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM BBM Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + binfo = devm_kzalloc(ph->dev, sizeof(*binfo), GFP_KERNEL); + if (!binfo) + return -ENOMEM; + + ret = scmi_imx_bbm_attributes_get(ph, binfo); + if (ret) + return ret; + + return ph->set_priv(ph, binfo, version); +} + +static const struct scmi_protocol scmi_imx_bbm = { + .id = SCMI_PROTOCOL_IMX_BBM, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_bbm_protocol_init, + .ops = &scmi_imx_bbm_proto_ops, + .events = &scmi_imx_bbm_protocol_events, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = "NXP", + .sub_vendor_id = "IMX", +}; +module_scmi_protocol(scmi_imx_bbm); + +MODULE_DESCRIPTION("i.MX SCMI BBM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c new file mode 100644 index 00000000000000..a86ab9b35953f7 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP MISC Protocol + * + * Copyright 2024 NXP + */ + +#define pr_fmt(fmt) "SCMI Notifications MISC - " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +#define MAX_MISC_CTRL_SOURCES GENMASK(15, 0) + +enum scmi_imx_misc_protocol_cmd { + SCMI_IMX_MISC_CTRL_SET = 0x3, + SCMI_IMX_MISC_CTRL_GET = 0x4, + SCMI_IMX_MISC_CTRL_NOTIFY = 0x8, +}; + +struct scmi_imx_misc_info { + u32 version; + u32 nr_dev_ctrl; + u32 nr_brd_ctrl; + u32 nr_reason; +}; + +struct scmi_msg_imx_misc_protocol_attributes { + __le32 attributes; +}; + +#define GET_BRD_CTRLS_NR(x) le32_get_bits((x), GENMASK(31, 24)) +#define GET_REASONS_NR(x) le32_get_bits((x), GENMASK(23, 16)) +#define GET_DEV_CTRLS_NR(x) le32_get_bits((x), GENMASK(15, 0)) +#define BRD_CTRL_START_ID BIT(15) + +struct scmi_imx_misc_ctrl_set_in { + __le32 id; + __le32 num; + __le32 value[]; +}; + +struct scmi_imx_misc_ctrl_notify_in { + __le32 ctrl_id; + __le32 flags; +}; + +struct scmi_imx_misc_ctrl_notify_payld { + __le32 ctrl_id; + __le32 flags; +}; + +struct scmi_imx_misc_ctrl_get_out { + __le32 num; + __le32 val[]; +}; + +static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_misc_info *mi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_imx_misc_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + mi->nr_dev_ctrl = GET_DEV_CTRLS_NR(attr->attributes); + mi->nr_brd_ctrl = GET_BRD_CTRLS_NR(attr->attributes); + mi->nr_reason = GET_REASONS_NR(attr->attributes); + dev_info(ph->dev, "i.MX MISC NUM DEV CTRL: %d, NUM BRD CTRL: %d,NUM Reason: %d\n", + mi->nr_dev_ctrl, mi->nr_brd_ctrl, mi->nr_reason); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_misc_ctrl_validate_id(const struct scmi_protocol_handle *ph, + u32 ctrl_id) +{ + struct scmi_imx_misc_info *mi = ph->get_priv(ph); + + /* + * [0, BRD_CTRL_START_ID) is for Dev Ctrl which is SOC related + * [BRD_CTRL_START_ID, 0xffff) is for Board Ctrl which is board related + */ + if (ctrl_id < BRD_CTRL_START_ID && ctrl_id > mi->nr_dev_ctrl) + return -EINVAL; + if (ctrl_id >= BRD_CTRL_START_ID + mi->nr_brd_ctrl) + return -EINVAL; + + return 0; +} + +static int scmi_imx_misc_ctrl_notify(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 evt_id, u32 flags) +{ + struct scmi_imx_misc_ctrl_notify_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_NOTIFY, + sizeof(*in), 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->ctrl_id = cpu_to_le32(ctrl_id); + in->flags = cpu_to_le32(flags); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_imx_misc_ctrl_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + /* misc_ctrl_req_notify is for enablement */ + if (enable) + return 0; + + ret = scmi_imx_misc_ctrl_notify(ph, src_id, evt_id, 0); + if (ret) + dev_err(ph->dev, "FAIL_ENABLED - evt[%X] src[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void * +scmi_imx_misc_ctrl_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_imx_misc_ctrl_notify_payld *p = payld; + struct scmi_imx_misc_ctrl_notify_report *r = report; + + if (sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->ctrl_id = le32_to_cpu(p->ctrl_id); + r->flags = le32_to_cpu(p->flags); + if (src_id) + *src_id = r->ctrl_id; + dev_dbg(ph->dev, "%s: ctrl_id: %d flags: %d\n", __func__, + r->ctrl_id, r->flags); + + return r; +} + +static const struct scmi_event_ops scmi_imx_misc_event_ops = { + .set_notify_enabled = scmi_imx_misc_ctrl_set_notify_enabled, + .fill_custom_report = scmi_imx_misc_ctrl_fill_custom_report, +}; + +static const struct scmi_event scmi_imx_misc_events[] = { + { + .id = SCMI_EVENT_IMX_MISC_CONTROL, + .max_payld_sz = sizeof(struct scmi_imx_misc_ctrl_notify_payld), + .max_report_sz = sizeof(struct scmi_imx_misc_ctrl_notify_report), + }, +}; + +static struct scmi_protocol_events scmi_imx_misc_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &scmi_imx_misc_event_ops, + .evts = scmi_imx_misc_events, + .num_events = ARRAY_SIZE(scmi_imx_misc_events), + .num_sources = MAX_MISC_CTRL_SOURCES, +}; + +static int scmi_imx_misc_ctrl_get(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 *num, u32 *val) +{ + struct scmi_imx_misc_ctrl_get_out *out; + struct scmi_xfer *t; + int ret, i; + int max_msg_size = ph->hops->get_max_msg_size(ph); + int max_num = (max_msg_size - sizeof(*out)) / sizeof(__le32); + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_GET, sizeof(u32), + 0, &t); + if (ret) + return ret; + + put_unaligned_le32(ctrl_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + *num = le32_to_cpu(out->num); + + if (*num >= max_num || + *num * sizeof(__le32) > t->rx.len - sizeof(__le32)) { + ph->xops->xfer_put(ph, t); + return -EINVAL; + } + + for (i = 0; i < *num; i++) + val[i] = le32_to_cpu(out->val[i]); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 num, u32 *val) +{ + struct scmi_imx_misc_ctrl_set_in *in; + struct scmi_xfer *t; + int ret, i; + int max_msg_size = ph->hops->get_max_msg_size(ph); + int max_num = (max_msg_size - sizeof(*in)) / sizeof(__le32); + + ret = scmi_imx_misc_ctrl_validate_id(ph, ctrl_id); + if (ret) + return ret; + + if (num > max_num) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->id = cpu_to_le32(ctrl_id); + in->num = cpu_to_le32(num); + for (i = 0; i < num; i++) + in->value[i] = cpu_to_le32(val[i]); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = { + .misc_ctrl_set = scmi_imx_misc_ctrl_set, + .misc_ctrl_get = scmi_imx_misc_ctrl_get, + .misc_ctrl_req_notify = scmi_imx_misc_ctrl_notify, +}; + +static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_misc_info *minfo; + u32 version; + int ret; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM MISC Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + minfo = devm_kzalloc(ph->dev, sizeof(*minfo), GFP_KERNEL); + if (!minfo) + return -ENOMEM; + + ret = scmi_imx_misc_attributes_get(ph, minfo); + if (ret) + return ret; + + return ph->set_priv(ph, minfo, version); +} + +static const struct scmi_protocol scmi_imx_misc = { + .id = SCMI_PROTOCOL_IMX_MISC, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_misc_protocol_init, + .ops = &scmi_imx_misc_proto_ops, + .events = &scmi_imx_misc_protocol_events, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = "NXP", + .sub_vendor_id = "IMX", +}; +module_scmi_protocol(scmi_imx_misc); + +MODULE_DESCRIPTION("i.MX SCMI MISC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst new file mode 100644 index 00000000000000..b2dfd6c46ca2f5 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst @@ -0,0 +1,886 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: + +=============================================================================== +i.MX95 System Control and Management Interface(SCMI) Vendor Protocols Extension +=============================================================================== + +:Copyright: |copy| 2024 NXP + +:Author: Peng Fan + +The System Manager (SM) is a low-level system function which runs on a System +Control Processor (SCP) to support isolation and management of power domains, +clocks, resets, sensors, pins, etc. on complex application processors. It often +runs on a Cortex-M processor and provides an abstraction to many of the +underlying features of the hardware. The primary purpose of the SM is to allow +isolation between software running on different cores in the SoC. It does this +by having exclusive access to critical resources such as those controlling +power, clocks, reset, PMIC, etc. and then providing an RPC interface to those +clients. This allows the SM to provide access control, arbitration, and +aggregation policies for those shared critical resources. + +SM introduces a concept Logic Machine(LM) which is analogous to VM and each has +its own instance of SCMI. All normal SCMI calls only apply to that LM. That +includes boot, shutdown, reset, suspend, wake, etc. Each LM (e.g. A55 and M7) +are completely isolated from the others and each LM has its own communication +channels talking to the same SCMI server. + +This document covers all the information necessary to understand, maintain, +port, and deploy the SM on supported processors. + +The SM implements an interface compliant with the Arm SCMI Specification +with additional vendor specific extensions. + +SCMI_BBM: System Control and Management BBM Vendor Protocol +============================================================== + +This protocol is intended provide access to the battery-backed module. This +contains persistent storage (GPR), an RTC, and the ON/OFF button. The protocol +can also provide access to similar functions implemented via external board +components. The BBM protocol provides functions to: + +- Describe the protocol version. +- Discover implementation attributes. +- Read/write GPR +- Discover the RTCs available in the system. +- Read/write the RTC time in seconds and ticks +- Set an alarm (per LM) in seconds +- Get notifications on RTC update, alarm, or rollover. +- Get notification on ON/OFF button activity. + +For most SoC, there is one on-chip RTC (e.g. in BBNSM) and this is RTC ID 0. +Board code can add additional GPR and RTC. + +GPR are not aggregated. The RTC time is also not aggregated. Setting these +sets for all so normally exclusive access would be granted to one agent for +each. However, RTC alarms are maintained for each LM and the hardware is +programmed with the next nearest alarm time. So only one agent in an LM should +be given access rights to set an RTC alarm. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes | Bits[31:8] Number of RTCs. | +| | Bits[15:0] Number of persistent storage (GPR) words. | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x81 + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific function in the | +| |protocol. For all functions in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +BBM_GPR_SET +~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of GPR to write | ++------------------+-----------------------------------------------------------+ +|uint32 value |32-bit value to write to the GPR | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the GPR was successfully written. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to write | +| |the specified GPR | ++------------------+-----------------------------------------------------------+ + +BBM_GPR_GET +~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of GPR to read | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the GPR was successfully read. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to read | +| |the specified GPR. | ++------------------+-----------------------------------------------------------+ +|uint32 value |32-bit value read from the GPR | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_ATTRIBUTES +~~~~~~~~~~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: returned the attributes. | +| |NOT_FOUND: Index is invalid. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Bit[31:24] Bit width of RTC seconds. | +| |Bit[23:16] Bit width of RTC ticks. | +| |Bits[15:0] RTC ticks per second | ++------------------+-----------------------------------------------------------+ +|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length | +| |describing the RTC name | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_TIME_SET +~~~~~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC time format: | +| |Set to 1 if the time is in ticks. | +| |Set to 0 if the time is in seconds | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. | +| |Upper word: Upper 32 bits of the time in seconds/ticks. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully set. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |INVALID_PARAMETERS: time is not valid | +| |(beyond the range of the RTC). | +| |DENIED: the agent does not have permission to set the RTC. | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_TIME_GET +~~~~~~~~~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC time format: | +| |Set to 1 if the time is in ticks. | +| |Set to 0 if the time is in seconds | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully get. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds/ticks. | +| |Upper word: Upper 32 bits of the time in seconds/ticks. | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_ALARM_SET +~~~~~~~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Bits[31:1] Reserved, must be zero. | +| |Bit[0] RTC enable flag: | +| |Set to 1 if the RTC alarm should be enabled. | +| |Set to 0 if the RTC alarm should be disabled | ++------------------+-----------------------------------------------------------+ +|uint32 time[2] |Lower word: Lower 32 bits of the time in seconds. | +| |Upper word: Upper 32 bits of the time in seconds. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: RTC time was successfully set. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |INVALID_PARAMETERS: time is not valid | +| |(beyond the range of the RTC). | +| |DENIED: the agent does not have permission to set the RTC | +| |alarm | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_GET +~~~~~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the button status was read. | +| |Other value: ARM SCMI Specification status code definitions| ++------------------+-----------------------------------------------------------+ +|uint32 state |State of the ON/OFF button. 1: ON, 0: OFF | ++------------------+-----------------------------------------------------------+ + +BBM_RTC_NOTIFY +~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of RTC | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[2] Update enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[1] Rollover enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[0] Alarm enable: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |NOT_FOUND: rtcId pertains to a non-existent RTC. | +| |DENIED: the agent does not have permission to request RTC | +| |notifications. | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_NOTIFY +~~~~~~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Enable button: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |DENIED: the agent does not have permission to request | +| |button notifications. | ++------------------+-----------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x81 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +BBM_RTC_EVENT +~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |RTC events: | +| |Bits[31:2] Reserved, must be zero. | +| |Bit[1] RTC rollover notification: | +| |1 RTC rollover detected. | +| |0 no RTC rollover detected. | +| |Bit[0] RTC alarm notification: | +| |1 RTC alarm generated. | +| |0 no RTC alarm generated. | ++------------------+-----------------------------------------------------------+ + +BBM_BUTTON_EVENT +~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 flags |RTC events: | ++------------------+-----------------------------------------------------------+ +| |Button events: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Button notification: | +| |1 button change detected. | +| |0 no button change detected. | ++------------------+-----------------------------------------------------------+ + +SCMI_MISC: System Control and Management MISC Vendor Protocol +================================================================ + +Provides miscellaneous functions. This includes controls that are miscellaneous +settings/actions that must be exposed from the SM to agents. They are device +specific and are usually define to access bit fields in various mix block +control modules, IOMUX_GPR, and other GPR/CSR owned by the SM. This protocol +supports the following functions: + +- Describe the protocol version. +- Discover implementation attributes. +- Set/Get a control. +- Initiate an action on a control. +- Obtain platform (i.e. SM) build information. +- Obtain ROM passover data. +- Read boot/shutdown/reset information for the LM or the system. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x84 + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:24] Reserved, must be zero. | +| |Bits[23:16] Number of reset reasons. | +| |Bits[15:0] Number of controls | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific function in the | +| |protocol. For all functions in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_SET +~~~~~~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the value data in words | ++------------------+-----------------------------------------------------------+ +|uint32 val[8] |value data array | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the control was set successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to set the | +| |control | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_GET +~~~~~~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the control was get successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to get the | +| |control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the return data in words, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|val[0, num - 1] |value data array | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_ACTION +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of the control | ++------------------+-----------------------------------------------------------+ +|uint32 action |Action for the control | ++------------------+-----------------------------------------------------------+ +|uint32 numarg |Size of the argument data, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|arg[0, numarg -1] |Argument data array | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the action was set successfully. | +| |NOT_FOUND: if the index is not valid. | +| |DENIED: if the agent does not have permission to get the | +| |control | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the return data in words, max 8 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|val[0, num - 1] |value data array | ++------------------+-----------------------------------------------------------+ + +MISC_DISCOVER_BUILD_INFO +~~~~~~~~~~~~~~~~~~~~~~~~ + +This function is used to obtain the build commit, data, time, number. + +message_id: 0x6 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the build info was got successfully. | +| |NOT_SUPPORTED: if the data is not available. | ++------------------+-----------------------------------------------------------+ +|uint32 buildnum |Build number | ++------------------+-----------------------------------------------------------+ +|uint32 buildcommit|Most significant 32 bits of the git commit hash | ++------------------+-----------------------------------------------------------+ +|uint8 date[16] |Date of build. Null terminated ASCII string of up to 16 | +| |bytes in length | ++------------------+-----------------------------------------------------------+ +|uint8 time[16] |Time of build. Null terminated ASCII string of up to 16 | +| |bytes in length | ++------------------+-----------------------------------------------------------+ + +MISC_ROM_PASSOVER_GET +~~~~~~~~~~~~~~~~~~~~~ + +ROM passover data is information exported by ROM and could be used by others. +It includes boot device, instance, type, mode and etc. This function is used +to obtain the ROM passover data. The returned block of words is structured as +defined in the ROM passover section in the SoC RM. + +message_id: 0x7 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the data was got successfully. | +| |NOT_SUPPORTED: if the data is not available. | ++------------------+-----------------------------------------------------------+ +|uint32 num |Size of the passover data in words, max 13 | ++------------------+-----------------------------------------------------------+ +|uint32 | | +|data[0, num - 1] |Passover data array | ++------------------+-----------------------------------------------------------+ + +MISC_CONTROL_NOTIFY +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 index |Index of control | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags, varies by control | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: notification configuration was successfully | +| |updated. | +| |NOT_FOUND: control id not exists. | +| |INVALID_PARAMETERS: if the input attributes flag specifies | +| |unsupported or invalid configurations.. | +| |DENIED: if the calling agent is not permitted to request | +| |the notification. | ++------------------+-----------------------------------------------------------+ + +MISC_RESET_REASON_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x84 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 reasonid |Identifier for the reason | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid reason attributes are returned | +| |NOT_FOUND: if reasonId pertains to a non-existent reason. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Reason attributes. This parameter has the following | +| |format: Bits[31:0] Reserved, must be zero | +| |Bits[15:0] Number of persistent storage (GPR) words. | ++------------------+-----------------------------------------------------------+ +|uint8 name[16] |Null-terminated ASCII string of up to 16 bytes in length | +| |describing the reason | ++------------------+-----------------------------------------------------------+ + +MISC_RESET_REASON_GET +~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 flags |Reason flags. This parameter has the following format: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] System: | +| |Set to 1 to return the system reason. | +| |Set to 0 to return the LM reason | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: reset reason return | ++--------------------+---------------------------------------------------------+ +|uint32 bootflags |Boot reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Reserved, must be zero. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID. | +| |Bit[7:0] Reason | ++--------------------+---------------------------------------------------------+ +|uint32 shutdownflags|Shutdown reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Number of valid extended info words. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID. | +| |Bit[7:0] Reason | ++--------------------+---------------------------------------------------------+ +|uint32 extinfo[8] |Array of extended info words | ++--------------------+---------------------------------------------------------+ + +MISC_SI_INFO_GET +~~~~~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: silicon info return | ++--------------------+---------------------------------------------------------+ +|uint32 deviceid |Silicon specific device ID | ++--------------------+---------------------------------------------------------+ +|uint32 sirev |Silicon specific revision | ++--------------------+---------------------------------------------------------+ +|uint32 partnum |Silicon specific part number | ++--------------------+---------------------------------------------------------+ +|uint8 siname[16] |Silicon name/revision. Null terminated ASCII string of up| +| |to 16 bytes in length | ++--------------------+---------------------------------------------------------+ + +MISC_CFG_INFO_GET +~~~~~~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: config name return | +| |NOT_SUPPORTED: name not available | ++--------------------+---------------------------------------------------------+ +|uint32 msel |Mode selector value | ++--------------------+---------------------------------------------------------+ +|uint8 cfgname[16] |config file basename. Null terminated ASCII string of up | +| |to 16 bytes in length | ++--------------------+---------------------------------------------------------+ + +MISC_SYSLOG_GET +~~~~~~~~~~~~~~~ + +message_id: 0xD +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 flags |Device specific flags that might impact the data returned| +| |or clearing of the data | ++--------------------+---------------------------------------------------------+ +|uint32 logindex |Index to the first log word. Will be the first element in| +| |the return array | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: system log return | ++--------------------+---------------------------------------------------------+ +|uint32 numLogflags |Descriptor for the log data returned by this call. | +| |Bits[31:20] Number of remaining log words. | +| |Bits[15:12] Reserved, must be zero. | +| |Bits[11:0] Number of log words that are returned by this | +| |call | ++--------------------+---------------------------------------------------------+ +|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags| ++--------------------+---------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x84 + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +MISC_CONTROL_EVENT +~~~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x81 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 ctrlid |Identifier for the control that caused the event. | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Event flags, varies by control. | ++------------------+-----------------------------------------------------------+ diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c deleted file mode 100644 index dd3459bdb9cb29..00000000000000 --- a/drivers/firmware/arm_scmi/virtio.c +++ /dev/null @@ -1,935 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Virtio Transport driver for Arm System Control and Management Interface - * (SCMI). - * - * Copyright (C) 2020-2022 OpenSynergy. - * Copyright (C) 2021-2022 ARM Ltd. - */ - -/** - * DOC: Theory of Operation - * - * The scmi-virtio transport implements a driver for the virtio SCMI device. - * - * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx - * channel (virtio eventq, P2A channel). Each channel is implemented through a - * virtqueue. Access to each virtqueue is protected by spinlocks. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "common.h" - -#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 -#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ -#define VIRTIO_SCMI_MAX_PDU_SIZE \ - (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) -#define DESCRIPTORS_PER_TX_MSG 2 - -/** - * struct scmi_vio_channel - Transport channel information - * - * @vqueue: Associated virtqueue - * @cinfo: SCMI Tx or Rx channel - * @free_lock: Protects access to the @free_list. - * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only - * @deferred_tx_work: Worker for TX deferred replies processing - * @deferred_tx_wq: Workqueue for TX deferred replies - * @pending_lock: Protects access to the @pending_cmds_list. - * @pending_cmds_list: List of pre-fetched commands queueud for later processing - * @is_rx: Whether channel is an Rx channel - * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except users, free_list and - * pending_cmds_list. - * @shutdown_done: A reference to a completion used when freeing this channel. - * @users: A reference count to currently active users of this channel. - */ -struct scmi_vio_channel { - struct virtqueue *vqueue; - struct scmi_chan_info *cinfo; - /* lock to protect access to the free list. */ - spinlock_t free_lock; - struct list_head free_list; - /* lock to protect access to the pending list. */ - spinlock_t pending_lock; - struct list_head pending_cmds_list; - struct work_struct deferred_tx_work; - struct workqueue_struct *deferred_tx_wq; - bool is_rx; - unsigned int max_msg; - /* - * Lock to protect access to all members except users, free_list and - * pending_cmds_list - */ - spinlock_t lock; - struct completion *shutdown_done; - refcount_t users; -}; - -enum poll_states { - VIO_MSG_NOT_POLLED, - VIO_MSG_POLL_TIMEOUT, - VIO_MSG_POLLING, - VIO_MSG_POLL_DONE, -}; - -/** - * struct scmi_vio_msg - Transport PDU information - * - * @request: SDU used for commands - * @input: SDU used for (delayed) responses and notifications - * @list: List which scmi_vio_msg may be part of - * @rx_len: Input SDU size in bytes, once input has been received - * @poll_idx: Last used index registered for polling purposes if this message - * transaction reply was configured for polling. - * @poll_status: Polling state for this message. - * @poll_lock: A lock to protect @poll_status - * @users: A reference count to track this message users and avoid premature - * freeing (and reuse) when polling and IRQ execution paths interleave. - */ -struct scmi_vio_msg { - struct scmi_msg_payld *request; - struct scmi_msg_payld *input; - struct list_head list; - unsigned int rx_len; - unsigned int poll_idx; - enum poll_states poll_status; - /* Lock to protect access to poll_status */ - spinlock_t poll_lock; - refcount_t users; -}; - -/* Only one SCMI VirtIO device can possibly exist */ -static struct virtio_device *scmi_vdev; - -static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, - struct scmi_chan_info *cinfo) -{ - unsigned long flags; - - spin_lock_irqsave(&vioch->lock, flags); - cinfo->transport_info = vioch; - /* Indirectly setting channel not available any more */ - vioch->cinfo = cinfo; - spin_unlock_irqrestore(&vioch->lock, flags); - - refcount_set(&vioch->users, 1); -} - -static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) -{ - return refcount_inc_not_zero(&vioch->users); -} - -static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) -{ - if (refcount_dec_and_test(&vioch->users)) { - unsigned long flags; - - spin_lock_irqsave(&vioch->lock, flags); - if (vioch->shutdown_done) { - vioch->cinfo = NULL; - complete(vioch->shutdown_done); - } - spin_unlock_irqrestore(&vioch->lock, flags); - } -} - -static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) -{ - unsigned long flags; - DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); - - /* - * Prepare to wait for the last release if not already released - * or in progress. - */ - spin_lock_irqsave(&vioch->lock, flags); - if (!vioch->cinfo || vioch->shutdown_done) { - spin_unlock_irqrestore(&vioch->lock, flags); - return; - } - - vioch->shutdown_done = &vioch_shutdown_done; - if (!vioch->is_rx && vioch->deferred_tx_wq) - /* Cannot be kicked anymore after this...*/ - vioch->deferred_tx_wq = NULL; - spin_unlock_irqrestore(&vioch->lock, flags); - - scmi_vio_channel_release(vioch); - - /* Let any possibly concurrent RX path release the channel */ - wait_for_completion(vioch->shutdown_done); -} - -/* Assumes to be called with vio channel acquired already */ -static struct scmi_vio_msg * -scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) -{ - unsigned long flags; - struct scmi_vio_msg *msg; - - spin_lock_irqsave(&vioch->free_lock, flags); - if (list_empty(&vioch->free_list)) { - spin_unlock_irqrestore(&vioch->free_lock, flags); - return NULL; - } - - msg = list_first_entry(&vioch->free_list, typeof(*msg), list); - list_del_init(&msg->list); - spin_unlock_irqrestore(&vioch->free_lock, flags); - - /* Still no users, no need to acquire poll_lock */ - msg->poll_status = VIO_MSG_NOT_POLLED; - refcount_set(&msg->users, 1); - - return msg; -} - -static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) -{ - return refcount_inc_not_zero(&msg->users); -} - -/* Assumes to be called with vio channel acquired already */ -static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) -{ - bool ret; - - ret = refcount_dec_and_test(&msg->users); - if (ret) { - unsigned long flags; - - spin_lock_irqsave(&vioch->free_lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->free_lock, flags); - } - - return ret; -} - -static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) -{ - return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); -} - -static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) -{ - struct scatterlist sg_in; - int rc; - unsigned long flags; - struct device *dev = &vioch->vqueue->vdev->dev; - - sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); - - spin_lock_irqsave(&vioch->lock, flags); - - rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); - if (rc) - dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); - else - virtqueue_kick(vioch->vqueue); - - spin_unlock_irqrestore(&vioch->lock, flags); - - return rc; -} - -/* - * Assume to be called with channel already acquired or not ready at all; - * vioch->lock MUST NOT have been already acquired. - */ -static void scmi_finalize_message(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) -{ - if (vioch->is_rx) - scmi_vio_feed_vq_rx(vioch, msg); - else - scmi_vio_msg_release(vioch, msg); -} - -static void scmi_vio_complete_cb(struct virtqueue *vqueue) -{ - unsigned long flags; - unsigned int length; - struct scmi_vio_channel *vioch; - struct scmi_vio_msg *msg; - bool cb_enabled = true; - - if (WARN_ON_ONCE(!vqueue->vdev->priv)) - return; - vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; - - for (;;) { - if (!scmi_vio_channel_acquire(vioch)) - return; - - spin_lock_irqsave(&vioch->lock, flags); - if (cb_enabled) { - virtqueue_disable_cb(vqueue); - cb_enabled = false; - } - - msg = virtqueue_get_buf(vqueue, &length); - if (!msg) { - if (virtqueue_enable_cb(vqueue)) { - spin_unlock_irqrestore(&vioch->lock, flags); - scmi_vio_channel_release(vioch); - return; - } - cb_enabled = true; - } - spin_unlock_irqrestore(&vioch->lock, flags); - - if (msg) { - msg->rx_len = length; - scmi_rx_callback(vioch->cinfo, - msg_read_header(msg->input), msg); - - scmi_finalize_message(vioch, msg); - } - - /* - * Release vio channel between loop iterations to allow - * virtio_chan_free() to eventually fully release it when - * shutting down; in such a case, any outstanding message will - * be ignored since this loop will bail out at the next - * iteration. - */ - scmi_vio_channel_release(vioch); - } -} - -static void scmi_vio_deferred_tx_worker(struct work_struct *work) -{ - unsigned long flags; - struct scmi_vio_channel *vioch; - struct scmi_vio_msg *msg, *tmp; - - vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); - - if (!scmi_vio_channel_acquire(vioch)) - return; - - /* - * Process pre-fetched messages: these could be non-polled messages or - * late timed-out replies to polled messages dequeued by chance while - * polling for some other messages: this worker is in charge to process - * the valid non-expired messages and anyway finally free all of them. - */ - spin_lock_irqsave(&vioch->pending_lock, flags); - - /* Scan the list of possibly pre-fetched messages during polling. */ - list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { - list_del(&msg->list); - - /* - * Channel is acquired here (cannot vanish) and this message - * is no more processed elsewhere so no poll_lock needed. - */ - if (msg->poll_status == VIO_MSG_NOT_POLLED) - scmi_rx_callback(vioch->cinfo, - msg_read_header(msg->input), msg); - - /* Free the processed message once done */ - scmi_vio_msg_release(vioch, msg); - } - - spin_unlock_irqrestore(&vioch->pending_lock, flags); - - /* Process possibly still pending messages */ - scmi_vio_complete_cb(vioch->vqueue); - - scmi_vio_channel_release(vioch); -} - -static struct virtqueue_info scmi_vio_vqs_info[] = { - { "tx", scmi_vio_complete_cb }, - { "rx", scmi_vio_complete_cb }, -}; - -static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) -{ - struct scmi_vio_channel *vioch = base_cinfo->transport_info; - - return vioch->max_msg; -} - -static int virtio_link_supplier(struct device *dev) -{ - if (!scmi_vdev) { - dev_notice(dev, - "Deferring probe after not finding a bound scmi-virtio device\n"); - return -EPROBE_DEFER; - } - - if (!device_link_add(dev, &scmi_vdev->dev, - DL_FLAG_AUTOREMOVE_CONSUMER)) { - dev_err(dev, "Adding link to supplier virtio device failed\n"); - return -ECANCELED; - } - - return 0; -} - -static bool virtio_chan_available(struct device_node *of_node, int idx) -{ - struct scmi_vio_channel *channels, *vioch = NULL; - - if (WARN_ON_ONCE(!scmi_vdev)) - return false; - - channels = (struct scmi_vio_channel *)scmi_vdev->priv; - - switch (idx) { - case VIRTIO_SCMI_VQ_TX: - vioch = &channels[VIRTIO_SCMI_VQ_TX]; - break; - case VIRTIO_SCMI_VQ_RX: - if (scmi_vio_have_vq_rx(scmi_vdev)) - vioch = &channels[VIRTIO_SCMI_VQ_RX]; - break; - default: - return false; - } - - return vioch && !vioch->cinfo; -} - -static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) -{ - destroy_workqueue(deferred_tx_wq); -} - -static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - bool tx) -{ - struct scmi_vio_channel *vioch; - int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; - int i; - - if (!scmi_vdev) - return -EPROBE_DEFER; - - vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; - - /* Setup a deferred worker for polling. */ - if (tx && !vioch->deferred_tx_wq) { - int ret; - - vioch->deferred_tx_wq = - alloc_workqueue(dev_name(&scmi_vdev->dev), - WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, - 0); - if (!vioch->deferred_tx_wq) - return -ENOMEM; - - ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, - vioch->deferred_tx_wq); - if (ret) - return ret; - - INIT_WORK(&vioch->deferred_tx_work, - scmi_vio_deferred_tx_worker); - } - - for (i = 0; i < vioch->max_msg; i++) { - struct scmi_vio_msg *msg; - - msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - if (tx) { - msg->request = devm_kzalloc(dev, - VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); - if (!msg->request) - return -ENOMEM; - spin_lock_init(&msg->poll_lock); - refcount_set(&msg->users, 1); - } - - msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); - if (!msg->input) - return -ENOMEM; - - scmi_finalize_message(vioch, msg); - } - - scmi_vio_channel_ready(vioch, cinfo); - - return 0; -} - -static int virtio_chan_free(int id, void *p, void *data) -{ - struct scmi_chan_info *cinfo = p; - struct scmi_vio_channel *vioch = cinfo->transport_info; - - /* - * Break device to inhibit further traffic flowing while shutting down - * the channels: doing it later holding vioch->lock creates unsafe - * locking dependency chains as reported by LOCKDEP. - */ - virtio_break_device(vioch->vqueue->vdev); - scmi_vio_channel_cleanup_sync(vioch); - - return 0; -} - -static int virtio_send_message(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_vio_channel *vioch = cinfo->transport_info; - struct scatterlist sg_out; - struct scatterlist sg_in; - struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; - unsigned long flags; - int rc; - struct scmi_vio_msg *msg; - - if (!scmi_vio_channel_acquire(vioch)) - return -EINVAL; - - msg = scmi_virtio_get_free_msg(vioch); - if (!msg) { - scmi_vio_channel_release(vioch); - return -EBUSY; - } - - msg_tx_prepare(msg->request, xfer); - - sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); - sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); - - spin_lock_irqsave(&vioch->lock, flags); - - /* - * If polling was requested for this transaction: - * - retrieve last used index (will be used as polling reference) - * - bind the polled message to the xfer via .priv - * - grab an additional msg refcount for the poll-path - */ - if (xfer->hdr.poll_completion) { - msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); - /* Still no users, no need to acquire poll_lock */ - msg->poll_status = VIO_MSG_POLLING; - scmi_vio_msg_acquire(msg); - /* Ensure initialized msg is visibly bound to xfer */ - smp_store_mb(xfer->priv, msg); - } - - rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); - if (rc) - dev_err(vioch->cinfo->dev, - "failed to add to TX virtqueue (%d)\n", rc); - else - virtqueue_kick(vioch->vqueue); - - spin_unlock_irqrestore(&vioch->lock, flags); - - if (rc) { - /* Ensure order between xfer->priv clear and vq feeding */ - smp_store_mb(xfer->priv, NULL); - if (xfer->hdr.poll_completion) - scmi_vio_msg_release(vioch, msg); - scmi_vio_msg_release(vioch, msg); - } - - scmi_vio_channel_release(vioch); - - return rc; -} - -static void virtio_fetch_response(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - struct scmi_vio_msg *msg = xfer->priv; - - if (msg) - msg_fetch_response(msg->input, msg->rx_len, xfer); -} - -static void virtio_fetch_notification(struct scmi_chan_info *cinfo, - size_t max_len, struct scmi_xfer *xfer) -{ - struct scmi_vio_msg *msg = xfer->priv; - - if (msg) - msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); -} - -/** - * virtio_mark_txdone - Mark transmission done - * - * Free only completed polling transfer messages. - * - * Note that in the SCMI VirtIO transport we never explicitly release still - * outstanding but timed-out messages by forcibly re-adding them to the - * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the - * TX deferred worker, eventually clean up such messages once, finally, a late - * reply is received and discarded (if ever). - * - * This approach was deemed preferable since those pending timed-out buffers are - * still effectively owned by the SCMI platform VirtIO device even after timeout - * expiration: forcibly freeing and reusing them before they had been returned - * explicitly by the SCMI platform could lead to subtle bugs due to message - * corruption. - * An SCMI platform VirtIO device which never returns message buffers is - * anyway broken and it will quickly lead to exhaustion of available messages. - * - * For this same reason, here, we take care to free only the polled messages - * that had been somehow replied (only if not by chance already processed on the - * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also - * any timed-out polled message if that indeed appears to have been at least - * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such - * messages won't be freed elsewhere. Any other polled message is marked as - * VIO_MSG_POLL_TIMEOUT. - * - * Possible late replies to timed-out polled messages will be eventually freed - * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if - * dequeued on some other polling path. - * - * @cinfo: SCMI channel info - * @ret: Transmission return code - * @xfer: Transfer descriptor - */ -static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, - struct scmi_xfer *xfer) -{ - unsigned long flags; - struct scmi_vio_channel *vioch = cinfo->transport_info; - struct scmi_vio_msg *msg = xfer->priv; - - if (!msg || !scmi_vio_channel_acquire(vioch)) - return; - - /* Ensure msg is unbound from xfer anyway at this point */ - smp_store_mb(xfer->priv, NULL); - - /* Must be a polled xfer and not already freed on the IRQ path */ - if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { - scmi_vio_channel_release(vioch); - return; - } - - spin_lock_irqsave(&msg->poll_lock, flags); - /* Do not free timedout polled messages only if still inflight */ - if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) - scmi_vio_msg_release(vioch, msg); - else if (msg->poll_status == VIO_MSG_POLLING) - msg->poll_status = VIO_MSG_POLL_TIMEOUT; - spin_unlock_irqrestore(&msg->poll_lock, flags); - - scmi_vio_channel_release(vioch); -} - -/** - * virtio_poll_done - Provide polling support for VirtIO transport - * - * @cinfo: SCMI channel info - * @xfer: Reference to the transfer being poll for. - * - * VirtIO core provides a polling mechanism based only on last used indexes: - * this means that it is possible to poll the virtqueues waiting for something - * new to arrive from the host side, but the only way to check if the freshly - * arrived buffer was indeed what we were waiting for is to compare the newly - * arrived message descriptor with the one we are polling on. - * - * As a consequence it can happen to dequeue something different from the buffer - * we were poll-waiting for: if that is the case such early fetched buffers are - * then added to a the @pending_cmds_list list for later processing by a - * dedicated deferred worker. - * - * So, basically, once something new is spotted we proceed to de-queue all the - * freshly received used buffers until we found the one we were polling on, or, - * we have 'seemingly' emptied the virtqueue; if some buffers are still pending - * in the vqueue at the end of the polling loop (possible due to inherent races - * in virtqueues handling mechanisms), we similarly kick the deferred worker - * and let it process those, to avoid indefinitely looping in the .poll_done - * busy-waiting helper. - * - * Finally, we delegate to the deferred worker also the final free of any timed - * out reply to a polled message that we should dequeue. - * - * Note that, since we do NOT have per-message suppress notification mechanism, - * the message we are polling for could be alternatively delivered via usual - * IRQs callbacks on another core which happened to have IRQs enabled while we - * are actively polling for it here: in such a case it will be handled as such - * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be - * transparently terminated anyway. - * - * Return: True once polling has successfully completed. - */ -static bool virtio_poll_done(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer) -{ - bool pending, found = false; - unsigned int length, any_prefetched = 0; - unsigned long flags; - struct scmi_vio_msg *next_msg, *msg = xfer->priv; - struct scmi_vio_channel *vioch = cinfo->transport_info; - - if (!msg) - return true; - - /* - * Processed already by other polling loop on another CPU ? - * - * Note that this message is acquired on the poll path so cannot vanish - * while inside this loop iteration even if concurrently processed on - * the IRQ path. - * - * Avoid to acquire poll_lock since polled_status can be changed - * in a relevant manner only later in this same thread of execution: - * any other possible changes made concurrently by other polling loops - * or by a reply delivered on the IRQ path have no meaningful impact on - * this loop iteration: in other words it is harmless to allow this - * possible race but let has avoid spinlocking with irqs off in this - * initial part of the polling loop. - */ - if (msg->poll_status == VIO_MSG_POLL_DONE) - return true; - - if (!scmi_vio_channel_acquire(vioch)) - return true; - - /* Has cmdq index moved at all ? */ - pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); - if (!pending) { - scmi_vio_channel_release(vioch); - return false; - } - - spin_lock_irqsave(&vioch->lock, flags); - virtqueue_disable_cb(vioch->vqueue); - - /* - * Process all new messages till the polled-for message is found OR - * the vqueue is empty. - */ - while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { - bool next_msg_done = false; - - /* - * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so - * that can be properly freed even on timeout in mark_txdone. - */ - spin_lock(&next_msg->poll_lock); - if (next_msg->poll_status == VIO_MSG_POLLING) { - next_msg->poll_status = VIO_MSG_POLL_DONE; - next_msg_done = true; - } - spin_unlock(&next_msg->poll_lock); - - next_msg->rx_len = length; - /* Is the message we were polling for ? */ - if (next_msg == msg) { - found = true; - break; - } else if (next_msg_done) { - /* Skip the rest if this was another polled msg */ - continue; - } - - /* - * Enqueue for later processing any non-polled message and any - * timed-out polled one that we happen to have dequeued. - */ - spin_lock(&next_msg->poll_lock); - if (next_msg->poll_status == VIO_MSG_NOT_POLLED || - next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { - spin_unlock(&next_msg->poll_lock); - - any_prefetched++; - spin_lock(&vioch->pending_lock); - list_add_tail(&next_msg->list, - &vioch->pending_cmds_list); - spin_unlock(&vioch->pending_lock); - } else { - spin_unlock(&next_msg->poll_lock); - } - } - - /* - * When the polling loop has successfully terminated if something - * else was queued in the meantime, it will be served by a deferred - * worker OR by the normal IRQ/callback OR by other poll loops. - * - * If we are still looking for the polled reply, the polling index has - * to be updated to the current vqueue last used index. - */ - if (found) { - pending = !virtqueue_enable_cb(vioch->vqueue); - } else { - msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); - pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); - } - - if (vioch->deferred_tx_wq && (any_prefetched || pending)) - queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); - - spin_unlock_irqrestore(&vioch->lock, flags); - - scmi_vio_channel_release(vioch); - - return found; -} - -static const struct scmi_transport_ops scmi_virtio_ops = { - .link_supplier = virtio_link_supplier, - .chan_available = virtio_chan_available, - .chan_setup = virtio_chan_setup, - .chan_free = virtio_chan_free, - .get_max_msg = virtio_get_max_msg, - .send_message = virtio_send_message, - .fetch_response = virtio_fetch_response, - .fetch_notification = virtio_fetch_notification, - .mark_txdone = virtio_mark_txdone, - .poll_done = virtio_poll_done, -}; - -static int scmi_vio_probe(struct virtio_device *vdev) -{ - struct device *dev = &vdev->dev; - struct scmi_vio_channel *channels; - bool have_vq_rx; - int vq_cnt; - int i; - int ret; - struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; - - /* Only one SCMI VirtiO device allowed */ - if (scmi_vdev) { - dev_err(dev, - "One SCMI Virtio device was already initialized: only one allowed.\n"); - return -EBUSY; - } - - have_vq_rx = scmi_vio_have_vq_rx(vdev); - vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; - - channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); - if (!channels) - return -ENOMEM; - - if (have_vq_rx) - channels[VIRTIO_SCMI_VQ_RX].is_rx = true; - - ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_vqs_info, NULL); - if (ret) { - dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); - return ret; - } - - for (i = 0; i < vq_cnt; i++) { - unsigned int sz; - - spin_lock_init(&channels[i].lock); - spin_lock_init(&channels[i].free_lock); - INIT_LIST_HEAD(&channels[i].free_list); - spin_lock_init(&channels[i].pending_lock); - INIT_LIST_HEAD(&channels[i].pending_cmds_list); - channels[i].vqueue = vqs[i]; - - sz = virtqueue_get_vring_size(channels[i].vqueue); - /* Tx messages need multiple descriptors. */ - if (!channels[i].is_rx) - sz /= DESCRIPTORS_PER_TX_MSG; - - if (sz > MSG_TOKEN_MAX) { - dev_info(dev, - "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", - channels[i].is_rx ? "rx" : "tx", - sz, MSG_TOKEN_MAX); - sz = MSG_TOKEN_MAX; - } - channels[i].max_msg = sz; - } - - vdev->priv = channels; - /* Ensure initialized scmi_vdev is visible */ - smp_store_mb(scmi_vdev, vdev); - - return 0; -} - -static void scmi_vio_remove(struct virtio_device *vdev) -{ - /* - * Once we get here, virtio_chan_free() will have already been called by - * the SCMI core for any existing channel and, as a consequence, all the - * virtio channels will have been already marked NOT ready, causing any - * outstanding message on any vqueue to be ignored by complete_cb: now - * we can just stop processing buffers and destroy the vqueues. - */ - virtio_reset_device(vdev); - vdev->config->del_vqs(vdev); - /* Ensure scmi_vdev is visible as NULL */ - smp_store_mb(scmi_vdev, NULL); -} - -static int scmi_vio_validate(struct virtio_device *vdev) -{ -#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE - if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { - dev_err(&vdev->dev, - "device does not comply with spec version 1.x\n"); - return -EINVAL; - } -#endif - return 0; -} - -static unsigned int features[] = { - VIRTIO_SCMI_F_P2A_CHANNELS, -}; - -static const struct virtio_device_id id_table[] = { - { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, - { 0 } -}; - -static struct virtio_driver virtio_scmi_driver = { - .driver.name = "scmi-virtio", - .feature_table = features, - .feature_table_size = ARRAY_SIZE(features), - .id_table = id_table, - .probe = scmi_vio_probe, - .remove = scmi_vio_remove, - .validate = scmi_vio_validate, -}; - -static int __init virtio_scmi_init(void) -{ - return register_virtio_driver(&virtio_scmi_driver); -} - -static void virtio_scmi_exit(void) -{ - unregister_virtio_driver(&virtio_scmi_driver); -} - -const struct scmi_desc scmi_virtio_desc = { - .transport_init = virtio_scmi_init, - .transport_exit = virtio_scmi_exit, - .ops = &scmi_virtio_ops, - /* for non-realtime virtio devices */ - .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, - .max_msg = 0, /* overridden by virtio_get_max_msg() */ - .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, - .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), -}; diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index 2175ffd6cef534..fda6a1573609b9 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -11,7 +11,7 @@ #include "protocols.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0) #define REMAINING_LEVELS_MASK GENMASK(31, 16) @@ -229,8 +229,10 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, /* Retrieve domain attributes at first ... */ put_unaligned_le32(dom, td->tx.buf); /* Skip domain on comms error */ - if (ph->xops->do_xfer(ph, td)) + if (ph->xops->do_xfer(ph, td)) { + ph->xops->reset_rx_to_maxsz(ph, td); continue; + } v = vinfo->domains + dom; v->id = dom; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 23b002e4d4a0be..fde0656481cc95 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #ifndef SMBIOS_ENTRY_POINT_SCAN_START #define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000 diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 97bafb5f703892..0c17bdd388e122 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -309,7 +309,6 @@ static const struct file_operations efi_capsule_fops = { .open = efi_capsule_open, .write = efi_capsule_write, .release = efi_capsule_release, - .llseek = no_llseek, }; static struct miscdevice efi_capsule_misc = { diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 7d2cdd9e222740..b69e68ef3f02be 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -434,12 +434,17 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); - /* Fatal errors call __ghes_panic() before AER handler prints this */ - if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && - (gdata->error_severity & CPER_SEV_FATAL)) { + /* + * Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time). + * + * Fatal errors call __ghes_panic() before AER handler prints this. + */ + if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) { struct aer_capability_regs *aer; aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n", + pfx, aer->cor_status, aer->cor_mask); printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", pfx, aer->uncor_status, aer->uncor_mask); printk("%saer_uncor_severity: 0x%08x\n", diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index fdf07dd6f4591d..70490bf2697b16 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -349,7 +349,7 @@ static void __init efi_debugfs_init(void) int i = 0; efi_debugfs = debugfs_create_dir("efi", NULL); - if (IS_ERR_OR_NULL(efi_debugfs)) + if (IS_ERR(efi_debugfs)) return; for_each_efi_memory_desc(md) { diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c index 0ec83ba580972f..b815d2a754eef3 100644 --- a/drivers/firmware/efi/fdtparams.c +++ b/drivers/firmware/efi/fdtparams.c @@ -8,7 +8,7 @@ #include #include -#include +#include enum { SYSTAB, diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index d33ccbc4a2c630..685098f9626f2b 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1229,7 +1229,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, struct efi_boot_memmap *map); void process_unaccepted_memory(u64 start, u64 end); -void accept_memory(phys_addr_t start, phys_addr_t end); +void accept_memory(phys_addr_t start, unsigned long size); void arch_accept_memory(phys_addr_t start, phys_addr_t end); #endif diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index c96d6dcee86c20..e7d9204baee312 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/riscv.c b/drivers/firmware/efi/libstub/riscv.c index 8022b104c3e6ec..f66f33ceb99e30 100644 --- a/drivers/firmware/efi/libstub/riscv.c +++ b/drivers/firmware/efi/libstub/riscv.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index df3182f2e63a56..1fd6823248ab6e 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -96,7 +96,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca } /* Allocate space for the logs and copy them. */ - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*log_tbl) + log_size, (void **)&log_tbl); if (status != EFI_SUCCESS) { diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c index c295ea3a6efc37..757dbe734a47af 100644 --- a/drivers/firmware/efi/libstub/unaccepted_memory.c +++ b/drivers/firmware/efi/libstub/unaccepted_memory.c @@ -177,9 +177,10 @@ void process_unaccepted_memory(u64 start, u64 end) start / unit_size, (end - start) / unit_size); } -void accept_memory(phys_addr_t start, phys_addr_t end) +void accept_memory(phys_addr_t start, unsigned long size) { unsigned long range_start, range_end; + phys_addr_t end = start + size; unsigned long bitmap_size; u64 unit_size; diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index 1ceace95675868..af23b3c502282f 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include "efistub.h" diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 47d67bb0a516d6..9e2628728aadc7 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -750,7 +750,6 @@ static const struct file_operations efi_test_fops = { .unlocked_ioctl = efi_test_ioctl, .open = efi_test_open, .release = efi_test_close, - .llseek = no_llseek, }; static struct miscdevice efi_test_dev = { diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c index 50f6503fe49f5e..c2c067eff63454 100644 --- a/drivers/firmware/efi/unaccepted_memory.c +++ b/drivers/firmware/efi/unaccepted_memory.c @@ -30,11 +30,12 @@ static LIST_HEAD(accepting_list); * - memory that is below phys_base; * - memory that is above the memory that addressable by the bitmap; */ -void accept_memory(phys_addr_t start, phys_addr_t end) +void accept_memory(phys_addr_t start, unsigned long size) { struct efi_unaccepted_memory *unaccepted; unsigned long range_start, range_end; struct accept_range range, *entry; + phys_addr_t end = start + size; unsigned long flags; u64 unit_size; @@ -74,13 +75,13 @@ void accept_memory(phys_addr_t start, phys_addr_t end) * "guard" page is accepted in addition to the memory that needs to be * used: * - * 1. Implicitly extend the range_contains_unaccepted_memory(start, end) - * checks up to end+unit_size if 'end' is aligned on a unit_size - * boundary. + * 1. Implicitly extend the range_contains_unaccepted_memory(start, size) + * checks up to the next unit_size if 'start+size' is aligned on a + * unit_size boundary. * - * 2. Implicitly extend accept_memory(start, end) to end+unit_size if - * 'end' is aligned on a unit_size boundary. (immediately following - * this comment) + * 2. Implicitly extend accept_memory(start, size) to the next unit_size + * if 'size+end' is aligned on a unit_size boundary. (immediately + * following this comment) */ if (!(end % unit_size)) end += unit_size; @@ -156,9 +157,10 @@ void accept_memory(phys_addr_t start, phys_addr_t end) spin_unlock_irqrestore(&unaccepted_memory_lock, flags); } -bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end) +bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size) { struct efi_unaccepted_memory *unaccepted; + phys_addr_t end = start + size; unsigned long flags; bool ret = false; u64 unit_size; diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 183613f82a1100..477d3f32d99a6b 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -22,3 +22,14 @@ config IMX_SCU This driver manages the IPC interface between host CPU and the SCU firmware running on M4. + +config IMX_SCMI_MISC_DRV + tristate "IMX SCMI MISC Protocol driver" + depends on IMX_SCMI_MISC_EXT || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide misc functions such as board control. + + This driver can also be built as a module. diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 8f9f04a513a8c4..8d046c341be878 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o +obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o diff --git a/drivers/firmware/imx/sm-misc.c b/drivers/firmware/imx/sm-misc.c new file mode 100644 index 00000000000000..fc3ee12c2be878 --- /dev/null +++ b/drivers/firmware/imx/sm-misc.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2024 NXP + */ + +#include +#include +#include +#include +#include +#include + +static const struct scmi_imx_misc_proto_ops *imx_misc_ctrl_ops; +static struct scmi_protocol_handle *ph; +struct notifier_block scmi_imx_misc_ctrl_nb; + +int scmi_imx_misc_ctrl_set(u32 id, u32 val) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_misc_ctrl_ops->misc_ctrl_set(ph, id, 1, &val); +}; +EXPORT_SYMBOL(scmi_imx_misc_ctrl_set); + +int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_misc_ctrl_ops->misc_ctrl_get(ph, id, num, val); +} +EXPORT_SYMBOL(scmi_imx_misc_ctrl_get); + +static int scmi_imx_misc_ctrl_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + /* + * notifier_chain_register requires a valid notifier_block and + * valid notifier_call. SCMI_EVENT_IMX_MISC_CONTROL is needed + * to let SCMI firmware enable control events, but the hook here + * is just a dummy function to avoid kernel panic as of now. + */ + return 0; +} + +static int scmi_imx_misc_ctrl_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device_node *np = sdev->dev.of_node; + u32 src_id, flags; + int ret, i, num; + + if (!handle) + return -ENODEV; + + if (imx_misc_ctrl_ops) { + dev_err(&sdev->dev, "misc ctrl already initialized\n"); + return -EEXIST; + } + + imx_misc_ctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_MISC, &ph); + if (IS_ERR(imx_misc_ctrl_ops)) + return PTR_ERR(imx_misc_ctrl_ops); + + num = of_property_count_u32_elems(np, "nxp,ctrl-ids"); + if (num % 2) { + dev_err(&sdev->dev, "Invalid wakeup-sources\n"); + return -EINVAL; + } + + scmi_imx_misc_ctrl_nb.notifier_call = &scmi_imx_misc_ctrl_notifier; + for (i = 0; i < num; i += 2) { + ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i, &src_id); + if (ret) { + dev_err(&sdev->dev, "Failed to read ctrl-id: %i\n", i); + continue; + } + + ret = of_property_read_u32_index(np, "nxp,ctrl-ids", i + 1, &flags); + if (ret) { + dev_err(&sdev->dev, "Failed to read ctrl-id value: %d\n", i + 1); + continue; + } + + ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_MISC, + SCMI_EVENT_IMX_MISC_CONTROL, + &src_id, + &scmi_imx_misc_ctrl_nb); + if (ret) { + dev_err(&sdev->dev, "Failed to register scmi misc event: %d\n", src_id); + } else { + ret = imx_misc_ctrl_ops->misc_ctrl_req_notify(ph, src_id, + SCMI_EVENT_IMX_MISC_CONTROL, + flags); + if (ret) + dev_err(&sdev->dev, "Failed to req notify: %d\n", src_id); + } + } + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_MISC, "imx-misc-ctrl" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_misc_ctrl_driver = { + .name = "scmi-imx-misc-ctrl", + .probe = scmi_imx_misc_ctrl_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_misc_ctrl_driver); + +MODULE_AUTHOR("Peng Fan "); +MODULE_DESCRIPTION("IMX SM MISC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 73a1a41bf92ddd..b477d54b495a62 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -41,17 +41,6 @@ config QCOM_TZMEM_MODE_SHMBRIDGE endchoice -config QCOM_SCM_DOWNLOAD_MODE_DEFAULT - bool "Qualcomm download mode enabled by default" - depends on QCOM_SCM - help - A device with "download mode" enabled will upon an unexpected - warm-restart enter a special debug mode that allows the user to - "download" memory content over USB for offline postmortem analysis. - The feature can be enabled/disabled on the kernel command line. - - Say Y here to enable "download mode" by default. - config QCOM_QSEECOM bool "Qualcomm QSEECOM interface driver" depends on QCOM_SCM=y diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 00c379a3ccebeb..10986cb11ec028 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -32,8 +33,7 @@ #include "qcom_scm.h" #include "qcom_tzmem.h" -static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); -module_param(download_mode, bool, 0); +static u32 download_mode; struct qcom_scm { struct device *dev; @@ -126,6 +126,8 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { #define QCOM_DLOAD_MASK GENMASK(5, 4) #define QCOM_DLOAD_NODUMP 0 #define QCOM_DLOAD_FULLDUMP 1 +#define QCOM_DLOAD_MINIDUMP 2 +#define QCOM_DLOAD_BOTHDUMP 3 static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_UNKNOWN] = "unknown", @@ -134,6 +136,13 @@ static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_LEGACY] = "smc legacy", }; +static const char * const download_mode_name[] = { + [QCOM_DLOAD_NODUMP] = "off", + [QCOM_DLOAD_FULLDUMP] = "full", + [QCOM_DLOAD_MINIDUMP] = "mini", + [QCOM_DLOAD_BOTHDUMP] = "full,mini", +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -526,17 +535,16 @@ static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val return qcom_scm_io_writel(addr, new); } -static void qcom_scm_set_download_mode(bool enable) +static void qcom_scm_set_download_mode(u32 dload_mode) { - u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP; int ret = 0; if (__scm->dload_mode_addr) { ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, - FIELD_PREP(QCOM_DLOAD_MASK, val)); + FIELD_PREP(QCOM_DLOAD_MASK, dload_mode)); } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SET_DLOAD_MODE)) { - ret = __qcom_scm_set_dload_mode(__scm->dev, enable); + ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); } else { dev_err(__scm->dev, "No available mechanism for setting download mode\n"); @@ -1724,7 +1732,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); */ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { .compatible = "lenovo,flex-5g" }, + { .compatible = "lenovo,thinkpad-t14s" }, { .compatible = "lenovo,thinkpad-x13s", }, + { .compatible = "microsoft,romulus13", }, + { .compatible = "microsoft,romulus15", }, { .compatible = "qcom,sc8180x-primus" }, { .compatible = "qcom,x1e80100-crd" }, { .compatible = "qcom,x1e80100-qcp" }, @@ -1886,6 +1897,45 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static int get_download_mode(char *buffer, const struct kernel_param *kp) +{ + if (download_mode >= ARRAY_SIZE(download_mode_name)) + return sysfs_emit(buffer, "unknown mode\n"); + + return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]); +} + +static int set_download_mode(const char *val, const struct kernel_param *kp) +{ + bool tmp; + int ret; + + ret = sysfs_match_string(download_mode_name, val); + if (ret < 0) { + ret = kstrtobool(val, &tmp); + if (ret < 0) { + pr_err("qcom_scm: err: %d\n", ret); + return ret; + } + + ret = tmp ? 1 : 0; + } + + download_mode = ret; + if (__scm) + qcom_scm_set_download_mode(download_mode); + + return 0; +} + +static const struct kernel_param_ops download_mode_param_ops = { + .get = get_download_mode, + .set = set_download_mode, +}; + +module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644); +MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values"); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_tzmem_pool_config pool_config; @@ -1950,18 +2000,16 @@ static int qcom_scm_probe(struct platform_device *pdev) __get_convention(); /* - * If requested enable "download mode", from this point on warmboot + * If "download mode" is requested, from this point on warmboot * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ - if (download_mode) - qcom_scm_set_download_mode(true); - + qcom_scm_set_download_mode(download_mode); /* * Disable SDI if indicated by DT that it is enabled by default. */ - if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) + if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) qcom_scm_disable_sdi(); ret = of_reserved_mem_device_init(__scm->dev); @@ -2003,7 +2051,7 @@ static int qcom_scm_probe(struct platform_device *pdev) static void qcom_scm_shutdown(struct platform_device *pdev) { /* Clean shutdown, disable download mode to allow normal restart */ - qcom_scm_set_download_mode(false); + qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP); } static const struct of_device_id qcom_scm_dt_match[] = { diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 5f43dfa22f799c..85c525745b311f 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -452,7 +452,7 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) } /* kobj_type: ties together all properties required to register an entry */ -static struct kobj_type fw_cfg_sysfs_entry_ktype = { +static const struct kobj_type fw_cfg_sysfs_entry_ktype = { .default_groups = fw_cfg_sysfs_entry_groups, .sysfs_ops = &fw_cfg_sysfs_attr_ops, .release = fw_cfg_sysfs_release_entry, diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index ac34876a97f8b0..18cc3498710853 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -62,7 +62,6 @@ rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data) ret = 0; } else { ret = -ETIMEDOUT; - WARN_ONCE(1, "Firmware transaction timeout"); } } else { dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret); @@ -125,6 +124,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw, dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n", buf[2], buf[1]); ret = -EINVAL; + } else if (ret == -ETIMEDOUT) { + WARN_ONCE(1, "Firmware transaction 0x%08x timeout", buf[2]); } dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr); diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index 89a68e7eeaa6f5..f3319be20b3659 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -39,6 +39,8 @@ void __init kvm_init_hyp_services(void) pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n", res.a3, res.a2, res.a1, res.a0); + + kvm_arch_init_hyp_services(); } bool kvm_arm_hyp_service_available(u32 func_id) diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 02a07d3d0d40a9..a3df782fa687b0 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -67,9 +67,11 @@ static bool sysfb_unregister(void) void sysfb_disable(struct device *dev) { struct screen_info *si = &screen_info; + struct device *parent; mutex_lock(&disable_lock); - if (!dev || dev == sysfb_parent_dev(si)) { + parent = sysfb_parent_dev(si); + if (!dev || !parent || dev == parent) { sysfb_unregister(); disabled = true; } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index c1590d3aa9cb78..2bee6e918f8117 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -3,6 +3,7 @@ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. */ +#include #include #include #include @@ -24,12 +25,6 @@ #define MSG_RING BIT(1) #define TAG_SZ 32 -static inline struct tegra_bpmp * -mbox_client_to_bpmp(struct mbox_client *client) -{ - return container_of(client, struct tegra_bpmp, mbox.client); -} - static inline const struct tegra_bpmp_ops * channel_to_ops(struct tegra_bpmp_channel *channel) { @@ -40,29 +35,24 @@ channel_to_ops(struct tegra_bpmp_channel *channel) struct tegra_bpmp *tegra_bpmp_get(struct device *dev) { + struct device_node *np __free(device_node); struct platform_device *pdev; struct tegra_bpmp *bpmp; - struct device_node *np; np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0); if (!np) return ERR_PTR(-ENOENT); pdev = of_find_device_by_node(np); - if (!pdev) { - bpmp = ERR_PTR(-ENODEV); - goto put; - } + if (!pdev) + return ERR_PTR(-ENODEV); bpmp = platform_get_drvdata(pdev); if (!bpmp) { - bpmp = ERR_PTR(-EPROBE_DEFER); put_device(&pdev->dev); - goto put; + return ERR_PTR(-EPROBE_DEFER); } -put: - of_node_put(np); return bpmp; } EXPORT_SYMBOL_GPL(tegra_bpmp_get); diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 3e7f186d239a2b..f3bc0d4278256b 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -5,26 +5,43 @@ * Copyright (C) 2019, 2024 Marek Behún */ +#include +#include #include #include +#include #include +#include #include +#include +#include #include +#include +#include #include +#include #include #include -#include #include -#include +#include +#include +#include #define DRIVER_NAME "turris-mox-rwtm" +#define RWTM_DMA_BUFFER_SIZE SZ_4K + /* * The macros and constants below come from Turris Mox's rWTM firmware code. * This firmware is open source and it's sources can be found at * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi. */ +#define MOX_ECC_NUMBER_WORDS 17 +#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32)) + +#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS) + #define MBOX_STS_SUCCESS (0 << 30) #define MBOX_STS_FAIL (1 << 30) #define MBOX_STS_BADCMD (2 << 30) @@ -44,13 +61,9 @@ enum mbox_cmd { MBOX_CMD_OTP_WRITE = 8, }; -struct mox_kobject; - struct mox_rwtm { - struct device *dev; struct mbox_client mbox_client; struct mbox_chan *mbox; - struct mox_kobject *kobj; struct hwrng hwrng; struct armada_37xx_rwtm_rx_msg reply; @@ -62,13 +75,13 @@ struct mox_rwtm { struct completion cmd_done; /* board information */ - int has_board_info; + bool has_board_info; u64 serial_number; int board_version, ram_size; - u8 mac_address1[6], mac_address2[6]; + u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN]; /* public key burned in eFuse */ - int has_pubkey; + bool has_pubkey; u8 pubkey[135]; #ifdef CONFIG_DEBUG_FS @@ -78,65 +91,27 @@ struct mox_rwtm { * It should be rewritten via crypto API once akcipher API is available * from userspace. */ - struct dentry *debugfs_root; - u32 last_sig[34]; - int last_sig_done; + u32 last_sig[MOX_ECC_SIGNATURE_WORDS]; + bool last_sig_done; #endif }; -struct mox_kobject { - struct kobject kobj; - struct mox_rwtm *rwtm; -}; - -static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm) -{ - return &rwtm->kobj->kobj; -} - -static inline struct mox_rwtm *to_rwtm(struct kobject *kobj) -{ - return container_of(kobj, struct mox_kobject, kobj)->rwtm; -} - -static void mox_kobj_release(struct kobject *kobj) -{ - kfree(to_rwtm(kobj)->kobj); -} - -static const struct kobj_type mox_kobj_ktype = { - .release = mox_kobj_release, - .sysfs_ops = &kobj_sysfs_ops, -}; - -static int mox_kobj_create(struct mox_rwtm *rwtm) +static inline struct device *rwtm_dev(struct mox_rwtm *rwtm) { - rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL); - if (!rwtm->kobj) - return -ENOMEM; - - kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype); - if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) { - kobject_put(rwtm_to_kobj(rwtm)); - return -ENXIO; - } - - rwtm->kobj->rwtm = rwtm; - - return 0; + return rwtm->mbox_client.dev; } #define MOX_ATTR_RO(name, format, cat) \ static ssize_t \ -name##_show(struct kobject *kobj, struct kobj_attribute *a, \ +name##_show(struct device *dev, struct device_attribute *a, \ char *buf) \ { \ - struct mox_rwtm *rwtm = to_rwtm(kobj); \ + struct mox_rwtm *rwtm = dev_get_drvdata(dev); \ if (!rwtm->has_##cat) \ return -ENODATA; \ - return sprintf(buf, format, rwtm->name); \ + return sysfs_emit(buf, format, rwtm->name); \ } \ -static struct kobj_attribute mox_attr_##name = __ATTR_RO(name) +static DEVICE_ATTR_RO(name) MOX_ATTR_RO(serial_number, "%016llX\n", board_info); MOX_ATTR_RO(board_version, "%i\n", board_info); @@ -145,6 +120,17 @@ MOX_ATTR_RO(mac_address1, "%pM\n", board_info); MOX_ATTR_RO(mac_address2, "%pM\n", board_info); MOX_ATTR_RO(pubkey, "%s\n", pubkey); +static struct attribute *turris_mox_rwtm_attrs[] = { + &dev_attr_serial_number.attr, + &dev_attr_board_version.attr, + &dev_attr_ram_size.attr, + &dev_attr_mac_address1.attr, + &dev_attr_mac_address2.attr, + &dev_attr_pubkey.attr, + NULL +}; +ATTRIBUTE_GROUPS(turris_mox_rwtm); + static int mox_get_status(enum mbox_cmd cmd, u32 retval) { if (MBOX_STS_CMD(retval) != cmd) @@ -152,23 +138,13 @@ static int mox_get_status(enum mbox_cmd cmd, u32 retval) else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL) return -(int)MBOX_STS_VALUE(retval); else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD) - return -ENOSYS; + return -EOPNOTSUPP; else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS) return -EIO; else return MBOX_STS_VALUE(retval); } -static const struct attribute *mox_rwtm_attrs[] = { - &mox_attr_serial_number.attr, - &mox_attr_board_version.attr, - &mox_attr_ram_size.attr, - &mox_attr_mac_address1.attr, - &mox_attr_mac_address2.attr, - &mox_attr_pubkey.attr, - NULL -}; - static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) { struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); @@ -181,6 +157,34 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) complete(&rwtm->cmd_done); } +static int mox_rwtm_exec(struct mox_rwtm *rwtm, enum mbox_cmd cmd, + struct armada_37xx_rwtm_tx_msg *msg, + bool interruptible) +{ + struct armada_37xx_rwtm_tx_msg _msg = {}; + int ret; + + if (!msg) + msg = &_msg; + + msg->command = cmd; + + ret = mbox_send_message(rwtm->mbox, msg); + if (ret < 0) + return ret; + + if (interruptible) { + ret = wait_for_completion_interruptible(&rwtm->cmd_done); + if (ret < 0) + return ret; + } else { + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; + } + + return mox_get_status(cmd, rwtm->reply.retval); +} + static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2) { mac[0] = t1 >> 8; @@ -193,24 +197,16 @@ static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2) static int mox_get_board_info(struct mox_rwtm *rwtm) { - struct armada_37xx_rwtm_tx_msg msg; + struct device *dev = rwtm_dev(rwtm); struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; int ret; - msg.command = MBOX_CMD_BOARD_INFO; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) - return -ETIMEDOUT; - - ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_BOARD_INFO, NULL, false); if (ret == -ENODATA) { - dev_warn(rwtm->dev, + dev_warn(dev, "Board does not have manufacturing information burned!\n"); - } else if (ret == -ENOSYS) { - dev_notice(rwtm->dev, + } else if (ret == -EOPNOTSUPP) { + dev_notice(dev, "Firmware does not support the BOARD_INFO command\n"); } else if (ret < 0) { return ret; @@ -224,7 +220,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) reply->status[5]); reply_to_mac_addr(rwtm->mac_address2, reply->status[6], reply->status[7]); - rwtm->has_board_info = 1; + rwtm->has_board_info = true; pr_info("Turris Mox serial number %016llX\n", rwtm->serial_number); @@ -232,26 +228,18 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) pr_info(" burned RAM size %i MiB\n", rwtm->ram_size); } - msg.command = MBOX_CMD_ECDSA_PUB_KEY; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) - return -ETIMEDOUT; - - ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false); if (ret == -ENODATA) { - dev_warn(rwtm->dev, "Board has no public key burned!\n"); - } else if (ret == -ENOSYS) { - dev_notice(rwtm->dev, + dev_warn(dev, "Board has no public key burned!\n"); + } else if (ret == -EOPNOTSUPP) { + dev_notice(dev, "Firmware does not support the ECDSA_PUB_KEY command\n"); } else if (ret < 0) { return ret; } else { u32 *s = reply->status; - rwtm->has_pubkey = 1; + rwtm->has_pubkey = true; sprintf(rwtm->pubkey, "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x", ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], @@ -263,37 +251,22 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) static int check_get_random_support(struct mox_rwtm *rwtm) { - struct armada_37xx_rwtm_tx_msg msg; - int ret; + struct armada_37xx_rwtm_tx_msg msg = { + .args = { 1, rwtm->buf_phys, 4 }, + }; - msg.command = MBOX_CMD_GET_RANDOM; - msg.args[0] = 1; - msg.args[1] = rwtm->buf_phys; - msg.args[2] = 4; - - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - return ret; - - if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) - return -ETIMEDOUT; - - return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); + return mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, false); } static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { - struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv; - struct armada_37xx_rwtm_tx_msg msg; + struct mox_rwtm *rwtm = container_of(rng, struct mox_rwtm, hwrng); + struct armada_37xx_rwtm_tx_msg msg = { + .args = { 1, rwtm->buf_phys, ALIGN(max, 4) }, + }; int ret; - if (max > 4096) - max = 4096; - - msg.command = MBOX_CMD_GET_RANDOM; - msg.args[0] = 1; - msg.args[1] = rwtm->buf_phys; - msg.args[2] = (max + 3) & ~3; + max = min(max, RWTM_DMA_BUFFER_SIZE); if (!wait) { if (!mutex_trylock(&rwtm->busy)) @@ -302,15 +275,7 @@ static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) mutex_lock(&rwtm->busy); } - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - goto unlock_mutex; - - ret = wait_for_completion_interruptible(&rwtm->cmd_done); - if (ret < 0) - goto unlock_mutex; - - ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_GET_RANDOM, &msg, true); if (ret < 0) goto unlock_mutex; @@ -336,19 +301,19 @@ static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len, struct mox_rwtm *rwtm = file->private_data; ssize_t ret; - /* only allow one read, of 136 bytes, from position 0 */ + /* only allow one read, of whole signature, from position 0 */ if (*ppos != 0) return 0; - if (len < 136) + if (len < sizeof(rwtm->last_sig)) return -EINVAL; if (!rwtm->last_sig_done) return -ENODATA; - /* 2 arrays of 17 32-bit words are 136 bytes */ - ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136); - rwtm->last_sig_done = 0; + ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, + sizeof(rwtm->last_sig)); + rwtm->last_sig_done = false; return ret; } @@ -357,13 +322,11 @@ static ssize_t do_sign_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct mox_rwtm *rwtm = file->private_data; - struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; struct armada_37xx_rwtm_tx_msg msg; loff_t dummy = 0; ssize_t ret; - /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */ - if (len != 64) + if (len != SHA512_DIGEST_SIZE) return -EINVAL; /* if last result is not zero user has not read that information yet */ @@ -384,37 +347,32 @@ static ssize_t do_sign_write(struct file *file, const char __user *buf, * 3. Address of the buffer where ECDSA signature value S shall be * stored by the rWTM firmware. */ - memset(rwtm->buf, 0, 4); - ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len); + memset(rwtm->buf, 0, sizeof(u32)); + ret = simple_write_to_buffer(rwtm->buf + sizeof(u32), + SHA512_DIGEST_SIZE, &dummy, buf, len); if (ret < 0) goto unlock_mutex; - be32_to_cpu_array(rwtm->buf, rwtm->buf, 17); + be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS); - msg.command = MBOX_CMD_SIGN; msg.args[0] = 1; msg.args[1] = rwtm->buf_phys; - msg.args[2] = rwtm->buf_phys + 68; - msg.args[3] = rwtm->buf_phys + 2 * 68; - ret = mbox_send_message(rwtm->mbox, &msg); - if (ret < 0) - goto unlock_mutex; + msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN; + msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN; - ret = wait_for_completion_interruptible(&rwtm->cmd_done); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true); if (ret < 0) goto unlock_mutex; - ret = MBOX_STS_VALUE(reply->retval); - if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS) - goto unlock_mutex; - /* * Here we read the R and S values of the ECDSA signature * computed by the rWTM firmware and convert their words from * LE to BE. */ - memcpy(rwtm->last_sig, rwtm->buf + 68, 136); - cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34); - rwtm->last_sig_done = 1; + memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN, + sizeof(rwtm->last_sig)); + cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, + MOX_ECC_SIGNATURE_WORDS); + rwtm->last_sig_done = true; mutex_unlock(&rwtm->busy); return len; @@ -428,45 +386,38 @@ static const struct file_operations do_sign_fops = { .open = rwtm_debug_open, .read = do_sign_read, .write = do_sign_write, - .llseek = no_llseek, }; -static int rwtm_register_debugfs(struct mox_rwtm *rwtm) +static void rwtm_debugfs_release(void *root) { - struct dentry *root, *entry; - - root = debugfs_create_dir("turris-mox-rwtm", NULL); + debugfs_remove_recursive(root); +} - if (IS_ERR(root)) - return PTR_ERR(root); +static void rwtm_register_debugfs(struct mox_rwtm *rwtm) +{ + struct dentry *root; - entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, - &do_sign_fops); - if (IS_ERR(entry)) - goto err_remove; + root = debugfs_create_dir("turris-mox-rwtm", NULL); - rwtm->debugfs_root = root; + debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops); - return 0; -err_remove: - debugfs_remove_recursive(root); - return PTR_ERR(entry); + devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root); } - -static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm) +#else +static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm) { - debugfs_remove_recursive(rwtm->debugfs_root); } -#else -static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm) +#endif + +static void rwtm_devm_mbox_release(void *mbox) { - return 0; + mbox_free_channel(mbox); } -static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm) +static void rwtm_firmware_symlink_drop(void *parent) { + sysfs_remove_link(parent, DRIVER_NAME); } -#endif static int turris_mox_rwtm_probe(struct platform_device *pdev) { @@ -478,40 +429,30 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) if (!rwtm) return -ENOMEM; - rwtm->dev = dev; - rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys, - GFP_KERNEL); + rwtm->buf = dmam_alloc_coherent(dev, RWTM_DMA_BUFFER_SIZE, + &rwtm->buf_phys, GFP_KERNEL); if (!rwtm->buf) return -ENOMEM; - ret = mox_kobj_create(rwtm); - if (ret < 0) { - dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n"); - return ret; - } - - ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); - if (ret < 0) { - dev_err(dev, "Cannot create sysfs files!\n"); - goto put_kobj; - } - platform_set_drvdata(pdev, rwtm); - mutex_init(&rwtm->busy); + ret = devm_mutex_init(dev, &rwtm->busy); + if (ret) + return ret; + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0); - if (IS_ERR(rwtm->mbox)) { - ret = PTR_ERR(rwtm->mbox); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Cannot request mailbox channel: %i\n", - ret); - goto remove_files; - } + if (IS_ERR(rwtm->mbox)) + return dev_err_probe(dev, PTR_ERR(rwtm->mbox), + "Cannot request mailbox channel!\n"); + + ret = devm_add_action_or_reset(dev, rwtm_devm_mbox_release, rwtm->mbox); + if (ret) + return ret; ret = mox_get_board_info(rwtm); if (ret < 0) @@ -521,46 +462,30 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) if (ret < 0) { dev_notice(dev, "Firmware does not support the GET_RANDOM command\n"); - goto free_channel; + return ret; } rwtm->hwrng.name = DRIVER_NAME "_hwrng"; rwtm->hwrng.read = mox_hwrng_read; - rwtm->hwrng.priv = (unsigned long) rwtm; ret = devm_hwrng_register(dev, &rwtm->hwrng); - if (ret < 0) { - dev_err(dev, "Cannot register HWRNG: %i\n", ret); - goto free_channel; - } + if (ret) + return dev_err_probe(dev, ret, "Cannot register HWRNG!\n"); - ret = rwtm_register_debugfs(rwtm); - if (ret < 0) { - dev_err(dev, "Failed creating debugfs entries: %i\n", ret); - goto free_channel; - } + rwtm_register_debugfs(rwtm); dev_info(dev, "HWRNG successfully registered\n"); - return 0; - -free_channel: - mbox_free_channel(rwtm->mbox); -remove_files: - sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); -put_kobj: - kobject_put(rwtm_to_kobj(rwtm)); - return ret; -} - -static void turris_mox_rwtm_remove(struct platform_device *pdev) -{ - struct mox_rwtm *rwtm = platform_get_drvdata(pdev); + /* + * For sysfs ABI compatibility, create symlink + * /sys/firmware/turris-mox-rwtm to this device's sysfs directory. + */ + ret = sysfs_create_link(firmware_kobj, &dev->kobj, DRIVER_NAME); + if (!ret) + devm_add_action_or_reset(dev, rwtm_firmware_symlink_drop, + firmware_kobj); - rwtm_unregister_debugfs(rwtm); - sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); - kobject_put(rwtm_to_kobj(rwtm)); - mbox_free_channel(rwtm->mbox); + return 0; } static const struct of_device_id turris_mox_rwtm_match[] = { @@ -573,10 +498,10 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match); static struct platform_driver turris_mox_rwtm_driver = { .probe = turris_mox_rwtm_probe, - .remove_new = turris_mox_rwtm_remove, .driver = { .name = DRIVER_NAME, .of_match_table = turris_mox_rwtm_match, + .dev_groups = turris_mox_rwtm_groups, }, }; module_platform_driver(turris_mox_rwtm_driver); diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c index 2a82c726d6e59c..6134cea86ac856 100644 --- a/drivers/fpga/microchip-spi.c +++ b/drivers/fpga/microchip-spi.c @@ -3,7 +3,7 @@ * Microchip Polarfire FPGA programming over slave SPI interface. */ -#include +#include #include #include #include diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c index 723ea0ad3f09b6..b08b4bb8f6507d 100644 --- a/drivers/fpga/socfpga.c +++ b/drivers/fpga/socfpga.c @@ -301,16 +301,17 @@ static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id) static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv) { - int timeout, ret = 0; + int ret = 0; + long time_left; socfpga_fpga_disable_irqs(priv); init_completion(&priv->status_complete); socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE); - timeout = wait_for_completion_interruptible_timeout( + time_left = wait_for_completion_interruptible_timeout( &priv->status_complete, msecs_to_jiffies(10)); - if (timeout == 0) + if (time_left == 0) ret = -ETIMEDOUT; socfpga_fpga_disable_irqs(priv); diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c index 2f7a24f238083c..b9ab29809e9619 100644 --- a/drivers/fpga/tests/fpga-bridge-test.c +++ b/drivers/fpga/tests/fpga-bridge-test.c @@ -23,6 +23,13 @@ struct bridge_ctx { struct bridge_stats stats; }; +/* + * Wrapper to avoid a cast warning when passing the action function directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister, + struct fpga_bridge *); + static int op_enable_set(struct fpga_bridge *bridge, bool enable) { struct bridge_stats *stats = bridge->priv; @@ -50,6 +57,7 @@ static const struct fpga_bridge_ops fake_bridge_ops = { static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name) { struct bridge_ctx *ctx; + int ret; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); @@ -61,13 +69,10 @@ static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *d &ctx->stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); - return ctx; -} + ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge); + KUNIT_ASSERT_EQ(test, ret, 0); -static void unregister_test_bridge(struct kunit *test, struct bridge_ctx *ctx) -{ - fpga_bridge_unregister(ctx->bridge); - kunit_device_unregister(test, ctx->dev); + return ctx; } static void fpga_bridge_test_get(struct kunit *test) @@ -141,8 +146,6 @@ static void fpga_bridge_test_get_put_list(struct kunit *test) fpga_bridges_put(&bridge_list); KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list)); - - unregister_test_bridge(test, ctx_1); } static int fpga_bridge_test_init(struct kunit *test) @@ -152,11 +155,6 @@ static int fpga_bridge_test_init(struct kunit *test) return 0; } -static void fpga_bridge_test_exit(struct kunit *test) -{ - unregister_test_bridge(test, test->priv); -} - static struct kunit_case fpga_bridge_test_cases[] = { KUNIT_CASE(fpga_bridge_test_get), KUNIT_CASE(fpga_bridge_test_toggle), @@ -167,7 +165,6 @@ static struct kunit_case fpga_bridge_test_cases[] = { static struct kunit_suite fpga_bridge_suite = { .name = "fpga_bridge", .init = fpga_bridge_test_init, - .exit = fpga_bridge_test_exit, .test_cases = fpga_bridge_test_cases, }; diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c index 125b3a4d43c6ab..9cb37aefbac4b2 100644 --- a/drivers/fpga/tests/fpga-mgr-test.c +++ b/drivers/fpga/tests/fpga-mgr-test.c @@ -44,6 +44,16 @@ struct mgr_ctx { struct mgr_stats stats; }; +/* + * Wrappers to avoid cast warnings when passing action functions directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table, + struct sg_table *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free, + struct fpga_image_info *); + /** * init_test_buffer() - Allocate and initialize a test image in a buffer. * @test: KUnit test context object. @@ -257,6 +267,9 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test) KUNIT_ASSERT_EQ(test, ret, 0); sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE); + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + ctx->img_info->sgt = sgt; ret = fpga_mgr_load(ctx->mgr, ctx->img_info); @@ -273,13 +286,12 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1); KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2); KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3); - - sg_free_table(ctx->img_info->sgt); } static int fpga_mgr_test_init(struct kunit *test) { struct mgr_ctx *ctx; + int ret; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); @@ -294,19 +306,14 @@ static int fpga_mgr_test_init(struct kunit *test) ctx->img_info = fpga_image_info_alloc(ctx->dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info); + ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, ctx->img_info); + KUNIT_ASSERT_EQ(test, ret, 0); + test->priv = ctx; return 0; } -static void fpga_mgr_test_exit(struct kunit *test) -{ - struct mgr_ctx *ctx = test->priv; - - fpga_image_info_free(ctx->img_info); - kunit_device_unregister(test, ctx->dev); -} - static struct kunit_case fpga_mgr_test_cases[] = { KUNIT_CASE(fpga_mgr_test_get), KUNIT_CASE(fpga_mgr_test_lock), @@ -318,7 +325,6 @@ static struct kunit_case fpga_mgr_test_cases[] = { static struct kunit_suite fpga_mgr_suite = { .name = "fpga_mgr", .init = fpga_mgr_test_init, - .exit = fpga_mgr_test_exit, .test_cases = fpga_mgr_test_cases, }; diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c index bcf0651df261e8..6a108cafded863 100644 --- a/drivers/fpga/tests/fpga-region-test.c +++ b/drivers/fpga/tests/fpga-region-test.c @@ -35,6 +35,19 @@ struct test_ctx { struct mgr_stats mgr_stats; }; +/* + * Wrappers to avoid cast warnings when passing action functions directly + * to kunit_add_action(). + */ +KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free, + struct fpga_image_info *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister, + struct fpga_bridge *); + +KUNIT_DEFINE_ACTION_WRAPPER(fpga_region_unregister_wrapper, fpga_region_unregister, + struct fpga_region *); + static int op_write(struct fpga_manager *mgr, const char *buf, size_t count) { struct mgr_stats *stats = mgr->priv; @@ -111,6 +124,9 @@ static void fpga_region_test_program_fpga(struct kunit *test) img_info = fpga_image_info_alloc(ctx->mgr_dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info); + ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, img_info); + KUNIT_ASSERT_EQ(test, ret, 0); + img_info->buf = img_buf; img_info->count = sizeof(img_buf); @@ -130,8 +146,6 @@ static void fpga_region_test_program_fpga(struct kunit *test) KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count); fpga_bridges_put(&ctx->region->bridge_list); - - fpga_image_info_free(img_info); } /* @@ -144,6 +158,7 @@ static int fpga_region_test_init(struct kunit *test) { struct test_ctx *ctx; struct fpga_region_info region_info = { 0 }; + int ret; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); @@ -164,6 +179,9 @@ static int fpga_region_test_init(struct kunit *test) ctx->bridge_stats.enable = true; + ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge); + KUNIT_ASSERT_EQ(test, ret, 0); + ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev"); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev); @@ -174,24 +192,14 @@ static int fpga_region_test_init(struct kunit *test) ctx->region = fpga_region_register_full(ctx->region_dev, ®ion_info); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region)); + ret = kunit_add_action_or_reset(test, fpga_region_unregister_wrapper, ctx->region); + KUNIT_ASSERT_EQ(test, ret, 0); + test->priv = ctx; return 0; } -static void fpga_region_test_exit(struct kunit *test) -{ - struct test_ctx *ctx = test->priv; - - fpga_region_unregister(ctx->region); - kunit_device_unregister(test, ctx->region_dev); - - fpga_bridge_unregister(ctx->bridge); - kunit_device_unregister(test, ctx->bridge_dev); - - kunit_device_unregister(test, ctx->mgr_dev); -} - static struct kunit_case fpga_region_test_cases[] = { KUNIT_CASE(fpga_region_test_class_find), KUNIT_CASE(fpga_region_test_program_fpga), @@ -199,9 +207,8 @@ static struct kunit_case fpga_region_test_cases[] = { }; static struct kunit_suite fpga_region_suite = { - .name = "fpga_mgr", + .name = "fpga_region", .init = fpga_region_test_init, - .exit = fpga_region_test_exit, .test_cases = fpga_region_test_cases, }; diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 0ac93183d20165..4db3d80e10b090 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -387,7 +387,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) const char *why; int err; u32 intr_status; - unsigned long timeout; + unsigned long time_left; unsigned long flags; struct scatterlist *sg; int i; @@ -427,8 +427,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) zynq_step_dma(priv); spin_unlock_irqrestore(&priv->dma_lock, flags); - timeout = wait_for_completion_timeout(&priv->dma_done, - msecs_to_jiffies(DMA_TIMEOUT_MS)); + time_left = wait_for_completion_timeout(&priv->dma_done, + msecs_to_jiffies(DMA_TIMEOUT_MS)); spin_lock_irqsave(&priv->dma_lock, flags); zynq_fpga_set_irq(priv, 0); @@ -452,7 +452,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) if (priv->cur_sg || !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { - if (timeout == 0) + if (time_left == 0) why = "DMA timed out"; else why = "DMA did not complete"; diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c index f58b158d097ce5..a6d4c8f123a5a9 100644 --- a/drivers/fsi/fsi-occ.c +++ b/drivers/fsi/fsi-occ.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #define OCC_SRAM_BYTES 4096 #define OCC_CMD_DATA_BYTES 4090 diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c index 48f2ee0f78c4df..883ef86ad3fc28 100644 --- a/drivers/gnss/core.c +++ b/drivers/gnss/core.c @@ -206,7 +206,6 @@ static const struct file_operations gnss_fops = { .read = gnss_read, .write = gnss_write, .poll = gnss_poll, - .llseek = no_llseek, }; static struct class *gnss_class; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 58f43bcced7c1f..d93cd4f722b401 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1233,6 +1233,13 @@ config GPIO_ADP5520 This option enables support for on-chip GPIO found on Analog Devices ADP5520 PMICs. +config GPIO_ADP5585 + tristate "GPIO Support for ADP5585" + depends on MFD_ADP5585 + help + This option enables support for the GPIO function found in the Analog + Devices ADP5585. + config GPIO_ALTERA_A10SR tristate "Altera Arria10 System Resource GPIO" depends on MFD_ALTERA_A10SR diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 64dd6d9d730d5a..1429e8c0229b92 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o +obj-$(CONFIG_GPIO_ADP5585) += gpio-adp5585.o obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c new file mode 100644 index 00000000000000..000d31f0967102 --- /dev/null +++ b/drivers/gpio/gpio-adp5585.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Analog Devices ADP5585 GPIO driver + * + * Copyright 2022 NXP + * Copyright 2024 Ideas on Board Oy + */ + +#include +#include +#include +#include +#include +#include +#include + +#define ADP5585_GPIO_MAX 11 + +struct adp5585_gpio_dev { + struct gpio_chip gpio_chip; + struct regmap *regmap; +}; + +static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + unsigned int val; + + regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val); + + return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; +} + +static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + + return regmap_clear_bits(adp5585_gpio->regmap, + ADP5585_GPIO_DIRECTION_A + bank, bit); +} + +static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + int ret; + + ret = regmap_update_bits(adp5585_gpio->regmap, + ADP5585_GPO_DATA_OUT_A + bank, bit, + val ? bit : 0); + if (ret) + return ret; + + return regmap_set_bits(adp5585_gpio->regmap, + ADP5585_GPIO_DIRECTION_A + bank, bit); +} + +static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + unsigned int reg; + unsigned int val; + + /* + * The input status register doesn't reflect the pin state when the + * GPIO is configured as an output. Check the direction, and read the + * input status from GPI_STATUS or output value from GPO_DATA_OUT + * accordingly. + * + * We don't need any locking, as concurrent access to the same GPIO + * isn't allowed by the GPIO API, so there's no risk of the + * .direction_input(), .direction_output() or .set() operations racing + * with this. + */ + regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val); + reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A; + regmap_read(adp5585_gpio->regmap, reg + bank, &val); + + return !!(val & bit); +} + +static void adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + + regmap_update_bits(adp5585_gpio->regmap, ADP5585_GPO_DATA_OUT_A + bank, + bit, val ? bit : 0); +} + +static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio, + unsigned int off, unsigned int bias) +{ + unsigned int bit, reg, mask, val; + + /* + * The bias configuration fields are 2 bits wide and laid down in + * consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits + * after R5. + */ + bit = off * 2 + (off > 5 ? 4 : 0); + reg = ADP5585_RPULL_CONFIG_A + bit / 8; + mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8); + val = bias << (bit % 8); + + return regmap_update_bits(adp5585_gpio->regmap, reg, mask, val); +} + +static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio, + unsigned int off, enum pin_config_param drive) +{ + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + + return regmap_update_bits(adp5585_gpio->regmap, + ADP5585_GPO_OUT_MODE_A + bank, bit, + drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0); +} + +static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio, + unsigned int off, unsigned int debounce) +{ + unsigned int bank = ADP5585_BANK(off); + unsigned int bit = ADP5585_BIT(off); + + return regmap_update_bits(adp5585_gpio->regmap, + ADP5585_DEBOUNCE_DIS_A + bank, bit, + debounce ? 0 : bit); +} + +static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off, + unsigned long config) +{ + struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip); + enum pin_config_param param = pinconf_to_config_param(config); + u32 arg = pinconf_to_config_argument(config); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + return adp5585_gpio_set_bias(adp5585_gpio, off, + ADP5585_Rx_PULL_CFG_DISABLE); + + case PIN_CONFIG_BIAS_PULL_DOWN: + return adp5585_gpio_set_bias(adp5585_gpio, off, arg ? + ADP5585_Rx_PULL_CFG_PD_300K : + ADP5585_Rx_PULL_CFG_DISABLE); + + case PIN_CONFIG_BIAS_PULL_UP: + return adp5585_gpio_set_bias(adp5585_gpio, off, arg ? + ADP5585_Rx_PULL_CFG_PU_300K : + ADP5585_Rx_PULL_CFG_DISABLE); + + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + case PIN_CONFIG_DRIVE_PUSH_PULL: + return adp5585_gpio_set_drive(adp5585_gpio, off, param); + + case PIN_CONFIG_INPUT_DEBOUNCE: + return adp5585_gpio_set_debounce(adp5585_gpio, off, arg); + + default: + return -ENOTSUPP; + }; +} + +static int adp5585_gpio_probe(struct platform_device *pdev) +{ + struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent); + struct adp5585_gpio_dev *adp5585_gpio; + struct device *dev = &pdev->dev; + struct gpio_chip *gc; + int ret; + + adp5585_gpio = devm_kzalloc(dev, sizeof(*adp5585_gpio), GFP_KERNEL); + if (!adp5585_gpio) + return -ENOMEM; + + adp5585_gpio->regmap = adp5585->regmap; + + device_set_of_node_from_dev(dev, dev->parent); + + gc = &adp5585_gpio->gpio_chip; + gc->parent = dev; + gc->get_direction = adp5585_gpio_get_direction; + gc->direction_input = adp5585_gpio_direction_input; + gc->direction_output = adp5585_gpio_direction_output; + gc->get = adp5585_gpio_get_value; + gc->set = adp5585_gpio_set_value; + gc->set_config = adp5585_gpio_set_config; + gc->can_sleep = true; + + gc->base = -1; + gc->ngpio = ADP5585_GPIO_MAX; + gc->label = pdev->name; + gc->owner = THIS_MODULE; + + ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip, + adp5585_gpio); + if (ret) + return dev_err_probe(dev, ret, "failed to add GPIO chip\n"); + + return 0; +} + +static const struct platform_device_id adp5585_gpio_id_table[] = { + { "adp5585-gpio" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table); + +static struct platform_driver adp5585_gpio_driver = { + .driver = { + .name = "adp5585-gpio", + }, + .probe = adp5585_gpio_probe, + .id_table = adp5585_gpio_id_table, +}; +module_platform_driver(adp5585_gpio_driver); + +MODULE_AUTHOR("Haibo Chen "); +MODULE_DESCRIPTION("GPIO ADP5585 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index 6211d99a577090..de4cc12e5e0399 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -8,13 +8,13 @@ * Copyright (C) 2008 Imre Kaloz */ +#include #include -#include -#include -#include #include -#include #include +#include +#include +#include #define AR71XX_GPIO_REG_OE 0x00 #define AR71XX_GPIO_REG_IN 0x04 @@ -224,9 +224,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match); static int ath79_gpio_probe(struct platform_device *pdev) { - struct ath79_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct ath79_gpio_ctrl *ctrl; struct gpio_irq_chip *girq; u32 ath79_gpio_count; @@ -237,21 +235,14 @@ static int ath79_gpio_probe(struct platform_device *pdev) if (!ctrl) return -ENOMEM; - if (np) { - err = of_property_read_u32(np, "ngpios", &ath79_gpio_count); - if (err) { - dev_err(dev, "ngpios property is not valid\n"); - return err; - } - oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio"); - } else if (pdata) { - ath79_gpio_count = pdata->ngpios; - oe_inverted = pdata->oe_inverted; - } else { - dev_err(dev, "No DT node or platform data found\n"); - return -EINVAL; + err = device_property_read_u32(dev, "ngpios", &ath79_gpio_count); + if (err) { + dev_err(dev, "ngpios property is not valid\n"); + return err; } + oe_inverted = device_is_compatible(dev, "qca,ar9340-gpio"); + if (ath79_gpio_count >= 32) { dev_err(dev, "ngpios must be less than 32\n"); return -EINVAL; @@ -275,7 +266,7 @@ static int ath79_gpio_probe(struct platform_device *pdev) } /* Optional interrupt setup */ - if (!np || of_property_read_bool(np, "interrupt-controller")) { + if (device_property_read_bool(dev, "interrupt-controller")) { girq = &ctrl->gc.irq; gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip); girq->parent_handler = ath79_gpio_irq_handler; diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c index 6a439cf7845967..1b8ffd0ddab65b 100644 --- a/drivers/gpio/gpio-cadence.c +++ b/drivers/gpio/gpio-cadence.c @@ -31,7 +31,6 @@ struct cdns_gpio_chip { struct gpio_chip gc; - struct clk *pclk; void __iomem *regs; u32 bypass_orig; }; @@ -155,6 +154,7 @@ static int cdns_gpio_probe(struct platform_device *pdev) int ret, irq; u32 dir_prev; u32 num_gpios = 32; + struct clk *clk; cgpio = devm_kzalloc(&pdev->dev, sizeof(*cgpio), GFP_KERNEL); if (!cgpio) @@ -203,21 +203,14 @@ static int cdns_gpio_probe(struct platform_device *pdev) cgpio->gc.request = cdns_gpio_request; cgpio->gc.free = cdns_gpio_free; - cgpio->pclk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(cgpio->pclk)) { - ret = PTR_ERR(cgpio->pclk); + clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(&pdev->dev, "Failed to retrieve peripheral clock, %d\n", ret); goto err_revert_dir; } - ret = clk_prepare_enable(cgpio->pclk); - if (ret) { - dev_err(&pdev->dev, - "Failed to enable the peripheral clock, %d\n", ret); - goto err_revert_dir; - } - /* * Optional irq_chip support */ @@ -234,7 +227,7 @@ static int cdns_gpio_probe(struct platform_device *pdev) GFP_KERNEL); if (!girq->parents) { ret = -ENOMEM; - goto err_disable_clk; + goto err_revert_dir; } girq->parents[0] = irq; girq->default_type = IRQ_TYPE_NONE; @@ -244,7 +237,7 @@ static int cdns_gpio_probe(struct platform_device *pdev) ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); - goto err_disable_clk; + goto err_revert_dir; } cgpio->bypass_orig = ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE); @@ -259,9 +252,6 @@ static int cdns_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cgpio); return 0; -err_disable_clk: - clk_disable_unprepare(cgpio->pclk); - err_revert_dir: iowrite32(dir_prev, cgpio->regs + CDNS_GPIO_DIRECTION_MODE); @@ -273,7 +263,6 @@ static void cdns_gpio_remove(struct platform_device *pdev) struct cdns_gpio_chip *cgpio = platform_get_drvdata(pdev); iowrite32(cgpio->bypass_orig, cgpio->regs + CDNS_GPIO_BYPASS_MODE); - clk_disable_unprepare(cgpio->pclk); } static const struct of_device_id cdns_of_ids[] = { diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 1d0175d6350b78..76b58c70b2577f 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -154,74 +153,37 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value) value ? &g->set_data : &g->clr_data); } -static struct davinci_gpio_platform_data * -davinci_gpio_get_pdata(struct platform_device *pdev) -{ - struct device_node *dn = pdev->dev.of_node; - struct davinci_gpio_platform_data *pdata; - int ret; - u32 val; - - if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) - return dev_get_platdata(&pdev->dev); - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - ret = of_property_read_u32(dn, "ti,ngpio", &val); - if (ret) - goto of_err; - - pdata->ngpio = val; - - ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", &val); - if (ret) - goto of_err; - - pdata->gpio_unbanked = val; - - return pdata; - -of_err: - dev_err(&pdev->dev, "Populating pdata from DT failed: err %d\n", ret); - return NULL; -} - static int davinci_gpio_probe(struct platform_device *pdev) { int bank, i, ret = 0; - unsigned int ngpio, nbank, nirq; + unsigned int ngpio, nbank, nirq, gpio_unbanked; struct davinci_gpio_controller *chips; - struct davinci_gpio_platform_data *pdata; struct device *dev = &pdev->dev; - - pdata = davinci_gpio_get_pdata(pdev); - if (!pdata) { - dev_err(dev, "No platform data found\n"); - return -EINVAL; - } - - dev->platform_data = pdata; + struct device_node *dn = dev_of_node(dev); /* * The gpio banks conceptually expose a segmented bitmap, * and "ngpio" is one more than the largest zero-based * bit index that's valid. */ - ngpio = pdata->ngpio; - if (ngpio == 0) { - dev_err(dev, "How many GPIOs?\n"); - return -EINVAL; - } + ret = of_property_read_u32(dn, "ti,ngpio", &ngpio); + if (ret) + return dev_err_probe(dev, ret, "Failed to get the number of GPIOs\n"); + if (ngpio == 0) + return dev_err_probe(dev, -EINVAL, "How many GPIOs?\n"); /* * If there are unbanked interrupts then the number of * interrupts is equal to number of gpios else all are banked so * number of interrupts is equal to number of banks(each with 16 gpios) */ - if (pdata->gpio_unbanked) - nirq = pdata->gpio_unbanked; + ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", + &gpio_unbanked); + if (ret) + return dev_err_probe(dev, ret, "Failed to get the unbanked GPIOs property\n"); + + if (gpio_unbanked) + nirq = gpio_unbanked; else nirq = DIV_ROUND_UP(ngpio, 16); @@ -252,7 +214,7 @@ static int davinci_gpio_probe(struct platform_device *pdev) chips->chip.set = davinci_gpio_set; chips->chip.ngpio = ngpio; - chips->chip.base = pdata->no_auto_base ? pdata->base : -1; + chips->chip.base = -1; #ifdef CONFIG_OF_GPIO chips->chip.parent = dev; @@ -261,6 +223,8 @@ static int davinci_gpio_probe(struct platform_device *pdev) #endif spin_lock_init(&chips->lock); + chips->gpio_unbanked = gpio_unbanked; + nbank = DIV_ROUND_UP(ngpio, 32); for (bank = 0; bank < nbank; bank++) chips->regs[bank] = gpio_base + offset_array[bank]; @@ -289,7 +253,7 @@ static int davinci_gpio_probe(struct platform_device *pdev) * serve as EDMA event triggers. */ -static void gpio_irq_disable(struct irq_data *d) +static void gpio_irq_mask(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d); uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d); @@ -298,7 +262,7 @@ static void gpio_irq_disable(struct irq_data *d) writel_relaxed(mask, &g->clr_rising); } -static void gpio_irq_enable(struct irq_data *d) +static void gpio_irq_unmask(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d); uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d); @@ -324,8 +288,8 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger) static struct irq_chip gpio_irqchip = { .name = "GPIO", - .irq_enable = gpio_irq_enable, - .irq_disable = gpio_irq_disable, + .irq_unmask = gpio_irq_unmask, + .irq_mask = gpio_irq_mask, .irq_set_type = gpio_irq_type, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, }; @@ -482,13 +446,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) { unsigned gpio, bank; int irq; - int ret; struct clk *clk; u32 binten = 0; unsigned ngpio; struct device *dev = &pdev->dev; struct davinci_gpio_controller *chips = platform_get_drvdata(pdev); - struct davinci_gpio_platform_data *pdata = dev->platform_data; struct davinci_gpio_regs __iomem *g; struct irq_domain *irq_domain = NULL; struct irq_chip *irq_chip; @@ -502,23 +464,18 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) if (dev->of_node) gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)device_get_match_data(dev); - ngpio = pdata->ngpio; + ngpio = chips->chip.ngpio; - clk = devm_clk_get(dev, "gpio"); + clk = devm_clk_get_enabled(dev, "gpio"); if (IS_ERR(clk)) { dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk)); return PTR_ERR(clk); } - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - if (!pdata->gpio_unbanked) { + if (!chips->gpio_unbanked) { irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0); if (irq < 0) { dev_err(dev, "Couldn't allocate IRQ numbers\n"); - clk_disable_unprepare(clk); return irq; } @@ -527,7 +484,6 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) chips); if (!irq_domain) { dev_err(dev, "Couldn't register an IRQ domain\n"); - clk_disable_unprepare(clk); return -ENODEV; } } @@ -546,11 +502,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) * controller only handling trigger modes. We currently assume no * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. */ - if (pdata->gpio_unbanked) { + if (chips->gpio_unbanked) { /* pass "bank 0" GPIO IRQs to AINTC */ chips->chip.to_irq = gpio_to_irq_unbanked; - chips->gpio_unbanked = pdata->gpio_unbanked; - binten = GENMASK(pdata->gpio_unbanked / 16, 0); + + binten = GENMASK(chips->gpio_unbanked / 16, 0); /* AINTC handles mask/unmask; GPIO handles triggering */ irq = chips->irqs[0]; @@ -564,7 +520,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) writel_relaxed(~0, &g->set_rising); /* set the direct IRQs up to use that irqchip */ - for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) { + for (gpio = 0; gpio < chips->gpio_unbanked; gpio++) { irq_set_chip(chips->irqs[gpio], irq_chip); irq_set_handler_data(chips->irqs[gpio], chips); irq_set_status_flags(chips->irqs[gpio], @@ -596,10 +552,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) sizeof(struct davinci_gpio_irq_data), GFP_KERNEL); - if (!irqdata) { - clk_disable_unprepare(clk); + if (!irqdata) return -ENOMEM; - } irqdata->regs = g; irqdata->bank_num = bank; @@ -675,8 +629,7 @@ static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips, static int davinci_gpio_suspend(struct device *dev) { struct davinci_gpio_controller *chips = dev_get_drvdata(dev); - struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev); - u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32); + u32 nbank = DIV_ROUND_UP(chips->chip.ngpio, 32); davinci_gpio_save_context(chips, nbank); @@ -686,8 +639,7 @@ static int davinci_gpio_suspend(struct device *dev) static int davinci_gpio_resume(struct device *dev) { struct davinci_gpio_controller *chips = dev_get_drvdata(dev); - struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev); - u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32); + u32 nbank = DIV_ROUND_UP(chips->chip.ngpio, 32); davinci_gpio_restore_context(chips, nbank); diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 6cedf46efec6cb..ab798c848215ad 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -19,29 +20,8 @@ #include #include -#define EP93XX_GPIO_F_INT_STATUS 0x5c -#define EP93XX_GPIO_A_INT_STATUS 0xa0 -#define EP93XX_GPIO_B_INT_STATUS 0xbc - -/* Maximum value for gpio line identifiers */ -#define EP93XX_GPIO_LINE_MAX 63 - -/* Number of GPIO chips in EP93XX */ -#define EP93XX_GPIO_CHIP_NUM 8 - -/* Maximum value for irq capable line identifiers */ -#define EP93XX_GPIO_LINE_MAX_IRQ 23 - -#define EP93XX_GPIO_A_IRQ_BASE 64 -#define EP93XX_GPIO_B_IRQ_BASE 72 -/* - * Static mapping of GPIO bank F IRQS: - * F0..F7 (16..24) to irq 80..87. - */ -#define EP93XX_GPIO_F_IRQ_BASE 80 - struct ep93xx_gpio_irq_chip { - u8 irq_offset; + void __iomem *base; u8 int_unmasked; u8 int_enabled; u8 int_type1; @@ -50,15 +30,11 @@ struct ep93xx_gpio_irq_chip { }; struct ep93xx_gpio_chip { + void __iomem *base; struct gpio_chip gc; struct ep93xx_gpio_irq_chip *eic; }; -struct ep93xx_gpio { - void __iomem *base; - struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM]; -}; - #define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc) static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc) @@ -79,25 +55,23 @@ static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc #define EP93XX_INT_RAW_STATUS_OFFSET 0x14 #define EP93XX_INT_DEBOUNCE_OFFSET 0x18 -static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, - struct ep93xx_gpio_irq_chip *eic) +static void ep93xx_gpio_update_int_params(struct ep93xx_gpio_irq_chip *eic) { - writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); + writeb_relaxed(0, eic->base + EP93XX_INT_EN_OFFSET); writeb_relaxed(eic->int_type2, - epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET); + eic->base + EP93XX_INT_TYPE2_OFFSET); writeb_relaxed(eic->int_type1, - epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET); + eic->base + EP93XX_INT_TYPE1_OFFSET); writeb_relaxed(eic->int_unmasked & eic->int_enabled, - epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); + eic->base + EP93XX_INT_EN_OFFSET); } static void ep93xx_gpio_int_debounce(struct gpio_chip *gc, unsigned int offset, bool enable) { - struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); int port_mask = BIT(offset); @@ -106,53 +80,43 @@ static void ep93xx_gpio_int_debounce(struct gpio_chip *gc, else eic->int_debounce &= ~port_mask; - writeb(eic->int_debounce, - epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET); + writeb(eic->int_debounce, eic->base + EP93XX_INT_DEBOUNCE_OFFSET); } -static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) +static u32 ep93xx_gpio_ab_irq_handler(struct gpio_chip *gc) { - struct gpio_chip *gc = irq_desc_get_handler_data(desc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - struct irq_chip *irqchip = irq_desc_get_chip(desc); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); unsigned long stat; int offset; - chained_irq_enter(irqchip, desc); - - /* - * Dispatch the IRQs to the irqdomain of each A and B - * gpiochip irqdomains depending on what has fired. - * The tricky part is that the IRQ line is shared - * between bank A and B and each has their own gpiochip. - */ - stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS); + stat = readb(eic->base + EP93XX_INT_STATUS_OFFSET); for_each_set_bit(offset, &stat, 8) - generic_handle_domain_irq(epg->gc[0].gc.irq.domain, - offset); + generic_handle_domain_irq(gc->irq.domain, offset); - stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS); - for_each_set_bit(offset, &stat, 8) - generic_handle_domain_irq(epg->gc[1].gc.irq.domain, - offset); + return stat; +} - chained_irq_exit(irqchip, desc); +static irqreturn_t ep93xx_ab_irq_handler(int irq, void *dev_id) +{ + return IRQ_RETVAL(ep93xx_gpio_ab_irq_handler(dev_id)); } static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) { - /* - * map discontiguous hw irq range to continuous sw irq range: - * - * IRQ_EP93XX_GPIO{0..7}MUX -> EP93XX_GPIO_LINE_F{0..7} - */ struct irq_chip *irqchip = irq_desc_get_chip(desc); - unsigned int irq = irq_desc_get_irq(desc); - int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */ - int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx; + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct gpio_irq_chip *gic = &gc->irq; + unsigned int parent = irq_desc_get_irq(desc); + unsigned int i; chained_irq_enter(irqchip, desc); - generic_handle_irq(gpio_irq); + for (i = 0; i < gic->num_parents; i++) + if (gic->parents[i] == parent) + break; + + if (i < gic->num_parents) + generic_handle_domain_irq(gc->irq.domain, i); + chained_irq_exit(irqchip, desc); } @@ -160,54 +124,53 @@ static void ep93xx_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port_mask = BIT(d->irq & 7); + int port_mask = BIT(irqd_to_hwirq(d)); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { eic->int_type2 ^= port_mask; /* switch edge direction */ - ep93xx_gpio_update_int_params(epg, eic); + ep93xx_gpio_update_int_params(eic); } - writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); + writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET); } static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port_mask = BIT(d->irq & 7); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int port_mask = BIT(hwirq); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) eic->int_type2 ^= port_mask; /* switch edge direction */ eic->int_unmasked &= ~port_mask; - ep93xx_gpio_update_int_params(epg, eic); + ep93xx_gpio_update_int_params(eic); - writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); - gpiochip_disable_irq(gc, irqd_to_hwirq(d)); + writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET); + gpiochip_disable_irq(gc, hwirq); } static void ep93xx_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - eic->int_unmasked &= ~BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, eic); - gpiochip_disable_irq(gc, irqd_to_hwirq(d)); + eic->int_unmasked &= ~BIT(hwirq); + ep93xx_gpio_update_int_params(eic); + gpiochip_disable_irq(gc, hwirq); } static void ep93xx_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - gpiochip_enable_irq(gc, irqd_to_hwirq(d)); - eic->int_unmasked |= BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, eic); + gpiochip_enable_irq(gc, hwirq); + eic->int_unmasked |= BIT(hwirq); + ep93xx_gpio_update_int_params(eic); } /* @@ -219,12 +182,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int offset = d->irq & 7; - int port_mask = BIT(offset); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int port_mask = BIT(hwirq); irq_flow_handler_t handler; - gc->direction_input(gc, offset); + gc->direction_input(gc, hwirq); switch (type) { case IRQ_TYPE_EDGE_RISING: @@ -250,7 +212,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_EDGE_BOTH: eic->int_type1 |= port_mask; /* set initial polarity based on current input level */ - if (gc->get(gc, offset)) + if (gc->get(gc, hwirq)) eic->int_type2 &= ~port_mask; /* falling */ else eic->int_type2 |= port_mask; /* rising */ @@ -264,51 +226,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) eic->int_enabled |= port_mask; - ep93xx_gpio_update_int_params(epg, eic); + ep93xx_gpio_update_int_params(eic); return 0; } -/************************************************************************* - * gpiolib interface for EP93xx on-chip GPIOs - *************************************************************************/ -struct ep93xx_gpio_bank { - const char *label; - int data; - int dir; - int irq; - int base; - bool has_irq; - bool has_hierarchical_irq; - unsigned int irq_base; -}; - -#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \ - { \ - .label = _label, \ - .data = _data, \ - .dir = _dir, \ - .irq = _irq, \ - .base = _base, \ - .has_irq = _has_irq, \ - .has_hierarchical_irq = _has_hier, \ - .irq_base = _irq_base, \ - } - -static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { - /* Bank A has 8 IRQs */ - EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, EP93XX_GPIO_A_IRQ_BASE), - /* Bank B has 8 IRQs */ - EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, EP93XX_GPIO_B_IRQ_BASE), - EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0), - EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0), - EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0), - /* Bank F has 8 IRQs */ - EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, EP93XX_GPIO_F_IRQ_BASE), - EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0), - EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0), -}; - static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset, unsigned long config) { @@ -342,115 +264,112 @@ static const struct irq_chip gpio_eic_irq_chip = { GPIOCHIP_IRQ_RESOURCE_HELPERS, }; -static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc, - struct platform_device *pdev, - struct ep93xx_gpio *epg, - struct ep93xx_gpio_bank *bank) +static int ep93xx_setup_irqs(struct platform_device *pdev, + struct ep93xx_gpio_chip *egc) { - void __iomem *data = epg->base + bank->data; - void __iomem *dir = epg->base + bank->dir; struct gpio_chip *gc = &egc->gc; struct device *dev = &pdev->dev; - struct gpio_irq_chip *girq; - int err; - - err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0); - if (err) - return err; - - gc->label = bank->label; - gc->base = bank->base; - - girq = &gc->irq; - if (bank->has_irq || bank->has_hierarchical_irq) { - gc->set_config = ep93xx_gpio_set_config; - egc->eic = devm_kcalloc(dev, 1, - sizeof(*egc->eic), - GFP_KERNEL); - if (!egc->eic) - return -ENOMEM; - egc->eic->irq_offset = bank->irq; - gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip); - } + struct gpio_irq_chip *girq = &gc->irq; + int ret, irq, i; + void __iomem *intr; - if (bank->has_irq) { - int ab_parent_irq = platform_get_irq(pdev, 0); - - girq->parent_handler = ep93xx_gpio_ab_irq_handler; - girq->num_parents = 1; - girq->parents = devm_kcalloc(dev, girq->num_parents, - sizeof(*girq->parents), - GFP_KERNEL); - if (!girq->parents) - return -ENOMEM; - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_level_irq; - girq->parents[0] = ab_parent_irq; - girq->first = bank->irq_base; - } + intr = devm_platform_ioremap_resource_byname(pdev, "intr"); + if (IS_ERR(intr)) + return PTR_ERR(intr); + + gc->set_config = ep93xx_gpio_set_config; + egc->eic = devm_kzalloc(dev, sizeof(*egc->eic), GFP_KERNEL); + if (!egc->eic) + return -ENOMEM; + + egc->eic->base = intr; + gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip); + girq->num_parents = platform_irq_count(pdev); + if (girq->num_parents == 0) + return -EINVAL; + + girq->parents = devm_kcalloc(dev, girq->num_parents, sizeof(*girq->parents), + GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; - /* Only bank F has especially funky IRQ handling */ - if (bank->has_hierarchical_irq) { - int gpio_irq; - int i; + if (girq->num_parents == 1) { /* A/B irqchips */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; - /* - * FIXME: convert this to use hierarchical IRQ support! - * this requires fixing the root irqchip to be hierarchical. - */ + ret = devm_request_irq(dev, irq, ep93xx_ab_irq_handler, + IRQF_SHARED, gc->label, gc); + if (ret) + return dev_err_probe(dev, ret, "requesting IRQ: %d\n", irq); + + girq->parents[0] = irq; + } else { /* F irqchip */ girq->parent_handler = ep93xx_gpio_f_irq_handler; - girq->num_parents = 8; - girq->parents = devm_kcalloc(dev, girq->num_parents, - sizeof(*girq->parents), - GFP_KERNEL); - if (!girq->parents) - return -ENOMEM; - /* Pick resources 1..8 for these IRQs */ + for (i = 0; i < girq->num_parents; i++) { - girq->parents[i] = platform_get_irq(pdev, i + 1); - gpio_irq = bank->irq_base + i; - irq_set_chip_data(gpio_irq, &epg->gc[5]); - irq_set_chip_and_handler(gpio_irq, - girq->chip, - handle_level_irq); - irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST); + irq = platform_get_irq_optional(pdev, i); + if (irq < 0) + continue; + + girq->parents[i] = irq; } - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_level_irq; - girq->first = bank->irq_base; + + girq->map = girq->parents; } - return devm_gpiochip_add_data(dev, gc, epg); + girq->default_type = IRQ_TYPE_NONE; + /* TODO: replace with handle_bad_irq() once we are fully hierarchical */ + girq->handler = handle_simple_irq; + + return 0; } static int ep93xx_gpio_probe(struct platform_device *pdev) { - struct ep93xx_gpio *epg; - int i; - - epg = devm_kzalloc(&pdev->dev, sizeof(*epg), GFP_KERNEL); - if (!epg) + struct ep93xx_gpio_chip *egc; + struct gpio_chip *gc; + void __iomem *data; + void __iomem *dir; + int ret; + + egc = devm_kzalloc(&pdev->dev, sizeof(*egc), GFP_KERNEL); + if (!egc) return -ENOMEM; - epg->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(epg->base)) - return PTR_ERR(epg->base); - - for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { - struct ep93xx_gpio_chip *gc = &epg->gc[i]; - struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; - - if (ep93xx_gpio_add_bank(gc, pdev, epg, bank)) - dev_warn(&pdev->dev, "Unable to add gpio bank %s\n", - bank->label); + data = devm_platform_ioremap_resource_byname(pdev, "data"); + if (IS_ERR(data)) + return PTR_ERR(data); + + dir = devm_platform_ioremap_resource_byname(pdev, "dir"); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + gc = &egc->gc; + ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0); + if (ret) + return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n"); + + gc->label = dev_name(&pdev->dev); + if (platform_irq_count(pdev) > 0) { + dev_dbg(&pdev->dev, "setting up irqs for %s\n", dev_name(&pdev->dev)); + ret = ep93xx_setup_irqs(pdev, egc); + if (ret) + dev_err_probe(&pdev->dev, ret, "setup irqs failed"); } - return 0; + return devm_gpiochip_add_data(&pdev->dev, gc, egc); } +static const struct of_device_id ep93xx_gpio_match[] = { + { .compatible = "cirrus,ep9301-gpio" }, + { /* sentinel */ } +}; + static struct platform_driver ep93xx_gpio_driver = { .driver = { .name = "gpio-ep93xx", + .of_match_table = ep93xx_gpio_match, }, .probe = ep93xx_gpio_probe, }; diff --git a/drivers/gpio/gpio-fxl6408.c b/drivers/gpio/gpio-fxl6408.c index 991549888904b1..86ebc66b11041c 100644 --- a/drivers/gpio/gpio-fxl6408.c +++ b/drivers/gpio/gpio-fxl6408.c @@ -138,7 +138,7 @@ static const __maybe_unused struct of_device_id fxl6408_dt_ids[] = { MODULE_DEVICE_TABLE(of, fxl6408_dt_ids); static const struct i2c_device_id fxl6408_id[] = { - { "fxl6408", 0 }, + { "fxl6408" }, { } }; MODULE_DEVICE_TABLE(i2c, fxl6408_id); diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c index c5a9fa64056648..28a8a6a8f05fee 100644 --- a/drivers/gpio/gpio-ixp4xx.c +++ b/drivers/gpio/gpio-ixp4xx.c @@ -6,6 +6,7 @@ // based on previous work and know-how from: // Deepak Saxena +#include #include #include #include @@ -13,7 +14,7 @@ #include #include #include -#include +#include #define IXP4XX_REG_GPOUT 0x00 #define IXP4XX_REG_GPOE 0x04 @@ -53,16 +54,14 @@ /** * struct ixp4xx_gpio - IXP4 GPIO state container * @dev: containing device for this instance - * @fwnode: the fwnode for this GPIO chip * @gc: gpiochip for this instance * @base: remapped I/O-memory base * @irq_edge: Each bit represents an IRQ: 1: edge-triggered, * 0: level triggered */ struct ixp4xx_gpio { - struct device *dev; - struct fwnode_handle *fwnode; struct gpio_chip gc; + struct device *dev; void __iomem *base; unsigned long long irq_edge; }; @@ -237,7 +236,6 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev) dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; } - g->fwnode = of_node_to_fwnode(np); /* * If either clock output is enabled explicitly in the device tree @@ -322,7 +320,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev) girq = &g->gc.irq; gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip); - girq->fwnode = g->fwnode; + girq->fwnode = dev_fwnode(dev); girq->parent_domain = parent; girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq; girq->handler = handle_bad_irq; diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c index 5c6bb57a8c99b9..e7c0ef6e54fabc 100644 --- a/drivers/gpio/gpio-lpc18xx.c +++ b/drivers/gpio/gpio-lpc18xx.c @@ -47,7 +47,6 @@ struct lpc18xx_gpio_pin_ic { struct lpc18xx_gpio_chip { struct gpio_chip gpio; void __iomem *base; - struct clk *clk; struct lpc18xx_gpio_pin_ic *pin_ic; spinlock_t lock; }; @@ -328,6 +327,7 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct lpc18xx_gpio_chip *gc; int index, ret; + struct clk *clk; gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); if (!gc) @@ -352,16 +352,10 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev) if (IS_ERR(gc->base)) return PTR_ERR(gc->base); - gc->clk = devm_clk_get(dev, NULL); - if (IS_ERR(gc->clk)) { + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) { dev_err(dev, "input clock not found\n"); - return PTR_ERR(gc->clk); - } - - ret = clk_prepare_enable(gc->clk); - if (ret) { - dev_err(dev, "unable to enable clock\n"); - return ret; + return PTR_ERR(clk); } spin_lock_init(&gc->lock); @@ -369,11 +363,8 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev) gc->gpio.parent = dev; ret = devm_gpiochip_add_data(dev, &gc->gpio, gc); - if (ret) { - dev_err(dev, "failed to add gpio chip\n"); - clk_disable_unprepare(gc->clk); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to add gpio chip\n"); /* On error GPIO pin interrupt controller just won't be registered */ lpc18xx_gpio_pin_ic_probe(gc); @@ -387,8 +378,6 @@ static void lpc18xx_gpio_remove(struct platform_device *pdev) if (gc->pin_ic) irq_domain_remove(gc->pin_ic->domain); - - clk_disable_unprepare(gc->clk); } static const struct of_device_id lpc18xx_gpio_match[] = { diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c index 31c2b95321cc20..621d609ece904a 100644 --- a/drivers/gpio/gpio-max7300.c +++ b/drivers/gpio/gpio-max7300.c @@ -53,7 +53,7 @@ static void max7300_remove(struct i2c_client *client) } static const struct i2c_device_id max7300_id[] = { - { "max7300", 0 }, + { "max7300" }, { } }; MODULE_DEVICE_TABLE(i2c, max7300_id); diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c index 7fb298b4571bd5..ccbb63c21d6f05 100644 --- a/drivers/gpio/gpio-mb86s7x.c +++ b/drivers/gpio/gpio-mb86s7x.c @@ -35,7 +35,6 @@ struct mb86s70_gpio_chip { struct gpio_chip gc; void __iomem *base; - struct clk *clk; spinlock_t lock; }; @@ -157,6 +156,7 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) static int mb86s70_gpio_probe(struct platform_device *pdev) { struct mb86s70_gpio_chip *gchip; + struct clk *clk; int ret; gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL); @@ -169,13 +169,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev) if (IS_ERR(gchip->base)) return PTR_ERR(gchip->base); - gchip->clk = devm_clk_get_optional(&pdev->dev, NULL); - if (IS_ERR(gchip->clk)) - return PTR_ERR(gchip->clk); - - ret = clk_prepare_enable(gchip->clk); - if (ret) - return ret; + clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); spin_lock_init(&gchip->lock); @@ -193,11 +189,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev) gchip->gc.base = -1; ret = gpiochip_add_data(&gchip->gc, gchip); - if (ret) { - dev_err(&pdev->dev, "couldn't register gpio driver\n"); - clk_disable_unprepare(gchip->clk); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "couldn't register gpio driver\n"); acpi_gpiochip_request_interrupts(&gchip->gc); @@ -210,7 +204,6 @@ static void mb86s70_gpio_remove(struct platform_device *pdev) acpi_gpiochip_free_interrupts(&gchip->gc); gpiochip_remove(&gchip->gc); - clk_disable_unprepare(gchip->clk); } static const struct of_device_id mb86s70_gpio_dt_ids[] = { diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 455eecf6380e44..d39c6618bade2a 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -347,7 +347,6 @@ static const struct file_operations gpio_mockup_debugfs_ops = { .open = gpio_mockup_debugfs_open, .read = gpio_mockup_debugfs_read, .write = gpio_mockup_debugfs_write, - .llseek = no_llseek, .release = single_release, }; diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index c0125ac739066e..685ec31db409d4 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -7,19 +7,21 @@ */ #include -#include +#include +#include #include -#include -#include +#include #include +#include +#include +#include #include +#include +#include +#include #include -#include #include -#include -#include -#include -#include +#include #define MPC8XXX_GPIO_PINS 32 @@ -413,6 +415,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) goto err; } + device_init_wakeup(&pdev->dev, true); + return 0; err: irq_domain_remove(mpc8xxx_gc->irq); @@ -429,6 +433,29 @@ static void mpc8xxx_remove(struct platform_device *pdev) } } +static int mpc8xxx_suspend(struct device *dev) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = dev_get_drvdata(dev); + + if (mpc8xxx_gc->irqn && device_may_wakeup(dev)) + enable_irq_wake(mpc8xxx_gc->irqn); + + return 0; +} + +static int mpc8xxx_resume(struct device *dev) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = dev_get_drvdata(dev); + + if (mpc8xxx_gc->irqn && device_may_wakeup(dev)) + disable_irq_wake(mpc8xxx_gc->irqn); + + return 0; +} + +static DEFINE_RUNTIME_DEV_PM_OPS(mpc8xx_pm_ops, + mpc8xxx_suspend, mpc8xxx_resume, NULL); + #ifdef CONFIG_ACPI static const struct acpi_device_id gpio_acpi_ids[] = { {"NXP0031",}, @@ -444,6 +471,7 @@ static struct platform_driver mpc8xxx_plat_driver = { .name = "gpio-mpc8xxx", .of_match_table = mpc8xxx_gpio_ids, .acpi_match_table = ACPI_PTR(gpio_acpi_ids), + .pm = pm_ptr(&mpc8xx_pm_ops), }, }; diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c index 2f448eb23abb10..6db9e469e0dc25 100644 --- a/drivers/gpio/gpio-msc313.c +++ b/drivers/gpio/gpio-msc313.c @@ -3,13 +3,14 @@ #include #include -#include #include #include #include #include #include #include +#include +#include #include #include @@ -662,7 +663,7 @@ static int msc313_gpio_probe(struct platform_device *pdev) gpioirqchip = &gpiochip->irq; gpio_irq_chip_set_chip(gpioirqchip, &msc313_gpio_irqchip); - gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node); + gpioirqchip->fwnode = dev_fwnode(dev); gpioirqchip->parent_domain = parent_domain; gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq; gpioirqchip->populate_parent_alloc_arg = msc313_gpio_populate_parent_fwspec; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 8baf3edd527411..3f2d33ee20cca9 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -498,7 +498,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long ret = regmap_bulk_write(chip->regmap, regaddr, value, NBANK(chip)); if (ret < 0) { - dev_err(&chip->client->dev, "failed writing register\n"); + dev_err(&chip->client->dev, "failed writing register: %d\n", ret); return ret; } @@ -513,7 +513,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long * ret = regmap_bulk_read(chip->regmap, regaddr, value, NBANK(chip)); if (ret < 0) { - dev_err(&chip->client->dev, "failed reading register\n"); + dev_err(&chip->client->dev, "failed reading register: %d\n", ret); return ret; } diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index ee37ecb615cb17..63f25c72eac2fb 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -84,7 +84,6 @@ struct pch_gpio_reg_data { * @gpio: Data for GPIO infrastructure. * @pch_gpio_reg: Memory mapped Register data is saved here * when suspend. - * @lock: Used for register access protection * @irq_base: Save base of IRQ number for interrupt * @ioh: IOH ID * @spinlock: Used for register access protection diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c index d89da7300ddd06..d770a6f3d84664 100644 --- a/drivers/gpio/gpio-sama5d2-piobu.c +++ b/drivers/gpio/gpio-sama5d2-piobu.c @@ -191,15 +191,15 @@ static int sama5d2_piobu_probe(struct platform_device *pdev) piobu->chip.label = pdev->name; piobu->chip.parent = &pdev->dev; - piobu->chip.owner = THIS_MODULE, - piobu->chip.get_direction = sama5d2_piobu_get_direction, - piobu->chip.direction_input = sama5d2_piobu_direction_input, - piobu->chip.direction_output = sama5d2_piobu_direction_output, - piobu->chip.get = sama5d2_piobu_get, - piobu->chip.set = sama5d2_piobu_set, - piobu->chip.base = -1, - piobu->chip.ngpio = PIOBU_NUM, - piobu->chip.can_sleep = 0, + piobu->chip.owner = THIS_MODULE; + piobu->chip.get_direction = sama5d2_piobu_get_direction; + piobu->chip.direction_input = sama5d2_piobu_direction_input; + piobu->chip.direction_output = sama5d2_piobu_direction_output; + piobu->chip.get = sama5d2_piobu_get; + piobu->chip.set = sama5d2_piobu_set; + piobu->chip.base = -1; + piobu->chip.ngpio = PIOBU_NUM; + piobu->chip.can_sleep = 0; piobu->regmap = syscon_node_to_regmap(pdev->dev.of_node); if (IS_ERR(piobu->regmap)) { diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c index aed6d1f6cfc308..07e0d7180579cc 100644 --- a/drivers/gpio/gpio-sloppy-logic-analyzer.c +++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c @@ -217,7 +217,6 @@ static const struct file_operations fops_trigger = { .owner = THIS_MODULE, .open = trigger_open, .write = trigger_write, - .llseek = no_llseek, .release = single_release, }; diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 6c5ee81d71b3fb..75a3633ceddbb8 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -5,16 +5,16 @@ * Author: Rabin Vincent for ST-Ericsson */ +#include #include -#include -#include -#include #include +#include #include -#include #include +#include +#include #include -#include +#include /* * These registers are modified under the irq bus lock and cached to avoid @@ -31,7 +31,6 @@ enum { LSB, CSB, MSB }; struct stmpe_gpio { struct gpio_chip chip; struct stmpe *stmpe; - struct device *dev; struct mutex irq_lock; u32 norequest_mask; /* Caches of interrupt control registers for bus_lock */ @@ -464,59 +463,49 @@ static void stmpe_gpio_disable(void *stmpe) static int stmpe_gpio_probe(struct platform_device *pdev) { - struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent); - struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct stmpe *stmpe = dev_get_drvdata(dev->parent); struct stmpe_gpio *stmpe_gpio; int ret, irq; if (stmpe->num_gpios > MAX_GPIOS) { - dev_err(&pdev->dev, "Need to increase maximum GPIO number\n"); + dev_err(dev, "Need to increase maximum GPIO number\n"); return -EINVAL; } - stmpe_gpio = devm_kzalloc(&pdev->dev, sizeof(*stmpe_gpio), GFP_KERNEL); + stmpe_gpio = devm_kzalloc(dev, sizeof(*stmpe_gpio), GFP_KERNEL); if (!stmpe_gpio) return -ENOMEM; mutex_init(&stmpe_gpio->irq_lock); - stmpe_gpio->dev = &pdev->dev; stmpe_gpio->stmpe = stmpe; stmpe_gpio->chip = template_chip; stmpe_gpio->chip.ngpio = stmpe->num_gpios; - stmpe_gpio->chip.parent = &pdev->dev; + stmpe_gpio->chip.parent = dev; stmpe_gpio->chip.base = -1; if (IS_ENABLED(CONFIG_DEBUG_FS)) stmpe_gpio->chip.dbg_show = stmpe_dbg_show; - of_property_read_u32(np, "st,norequest-mask", - &stmpe_gpio->norequest_mask); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - dev_info(&pdev->dev, - "device configured in no-irq mode: " - "irqs are not available\n"); + device_property_read_u32(dev, "st,norequest-mask", &stmpe_gpio->norequest_mask); ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO); if (ret) return ret; - ret = devm_add_action_or_reset(&pdev->dev, stmpe_gpio_disable, stmpe); + ret = devm_add_action_or_reset(dev, stmpe_gpio_disable, stmpe); if (ret) return ret; + irq = platform_get_irq(pdev, 0); if (irq > 0) { struct gpio_irq_chip *girq; - ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, - stmpe_gpio_irq, IRQF_ONESHOT, - "stmpe-gpio", stmpe_gpio); - if (ret) { - dev_err(&pdev->dev, "unable to get irq: %d\n", ret); - return ret; - } + ret = devm_request_threaded_irq(dev, irq, NULL, stmpe_gpio_irq, + IRQF_ONESHOT, "stmpe-gpio", stmpe_gpio); + if (ret) + return dev_err_probe(dev, ret, "unable to register IRQ handler\n"); girq = &stmpe_gpio->chip.irq; gpio_irq_chip_set_chip(girq, &stmpe_gpio_irq_chip); @@ -530,7 +519,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) girq->init_valid_mask = stmpe_init_irq_valid_mask; } - return devm_gpiochip_add_data(&pdev->dev, &stmpe_gpio->chip, stmpe_gpio); + return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio); } static struct platform_driver stmpe_gpio_driver = { diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c index 053d616f2e02a9..5a6406d1f03aa7 100644 --- a/drivers/gpio/gpio-stp-xway.c +++ b/drivers/gpio/gpio-stp-xway.c @@ -296,23 +296,17 @@ static int xway_stp_probe(struct platform_device *pdev) if (!of_property_read_bool(pdev->dev.of_node, "lantiq,rising")) chip->edge = XWAY_STP_FALLING; - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); return PTR_ERR(clk); } - ret = clk_prepare_enable(clk); - if (ret) - return ret; - xway_stp_hw_init(chip); ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip); - if (ret) { - clk_disable_unprepare(clk); + if (ret) return ret; - } dev_info(&pdev->dev, "Init done\n"); diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 3a90a3a1caea1b..5ab394ec81e69b 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -23,7 +23,6 @@ /** * struct syscon_gpio_data - Configuration for the device. - * @compatible: SYSCON driver compatible string. * @flags: Set of GPIO_SYSCON_FEAT_ flags: * GPIO_SYSCON_FEAT_IN: GPIOs supports input, * GPIO_SYSCON_FEAT_OUT: GPIOs supports output, diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index ea5f9cc14bc486..6d3a39a03f58e2 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -18,11 +18,12 @@ #include #include #include -#include #include #include #include #include +#include +#include #define GPIO_BANK(x) ((x) >> 5) #define GPIO_PORT(x) (((x) >> 3) & 0x3) @@ -755,7 +756,7 @@ static int tegra_gpio_probe(struct platform_device *pdev) } irq = &tgi->gc.irq; - irq->fwnode = of_node_to_fwnode(pdev->dev.of_node); + irq->fwnode = dev_fwnode(&pdev->dev); irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq; irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec; irq->handler = handle_simple_irq; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 9130c691a2dd32..1ecb733a5e88b4 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -928,7 +929,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev) irq = &gpio->gpio.irq; gpio_irq_chip_set_chip(irq, &tegra186_gpio_irq_chip); - irq->fwnode = of_node_to_fwnode(pdev->dev.of_node); + irq->fwnode = dev_fwnode(&pdev->dev); irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq; irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec; irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq; diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 8521c6aacace1a..5b851e904c11f5 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #define GPIO_RX_DAT 0x0 @@ -533,7 +534,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set_config = thunderx_gpio_set_config; girq = &chip->irq; gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip); - girq->fwnode = of_node_to_fwnode(dev->of_node); + girq->fwnode = dev_fwnode(dev); girq->parent_domain = irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; @@ -549,7 +550,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, for (i = 0; i < ngpio; i++) { struct irq_fwspec fwspec; - fwspec.fwnode = of_node_to_fwnode(dev->of_node); + fwspec.fwnode = dev_fwnode(dev); fwspec.param_count = 2; fwspec.param[0] = i; fwspec.param[1] = IRQ_TYPE_NONE; diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c index 1f440707f8f4a4..da99ba13e82d0c 100644 --- a/drivers/gpio/gpio-uniphier.c +++ b/drivers/gpio/gpio-uniphier.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -164,7 +165,7 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) if (offset < UNIPHIER_GPIO_IRQ_OFFSET) return -ENXIO; - fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); + fwspec.fwnode = dev_fwnode(chip->parent); fwspec.param_count = 2; fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; /* @@ -404,7 +405,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev) priv->domain = irq_domain_create_hierarchy( parent_domain, 0, UNIPHIER_GPIO_IRQ_MAX_NUM, - of_node_to_fwnode(dev->of_node), + dev_fwnode(dev), &uniphier_gpio_irq_domain_ops, priv); if (!priv->domain) return -ENOMEM; diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 07e5e6323e86ad..27eff741fe9a2a 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -97,7 +97,7 @@ static inline u32 vf610_gpio_readl(void __iomem *reg) static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct vf610_gpio_port *port = gpiochip_get_data(gc); - unsigned long mask = BIT(gpio); + u32 mask = BIT(gpio); unsigned long offset = GPIO_PDIR; if (port->sdata->have_paddr) { @@ -112,16 +112,16 @@ static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio) static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct vf610_gpio_port *port = gpiochip_get_data(gc); - unsigned long mask = BIT(gpio); + u32 mask = BIT(gpio); unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR; vf610_gpio_writel(mask, port->gpio_base + offset); } -static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) +static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio) { struct vf610_gpio_port *port = gpiochip_get_data(chip); - unsigned long mask = BIT(gpio); + u32 mask = BIT(gpio); u32 val; if (port->sdata->have_paddr) { @@ -133,11 +133,11 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) return pinctrl_gpio_direction_input(chip, gpio); } -static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, +static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio, int value) { struct vf610_gpio_port *port = gpiochip_get_data(chip); - unsigned long mask = BIT(gpio); + u32 mask = BIT(gpio); u32 val; vf610_gpio_set(chip, gpio, value); @@ -151,6 +151,19 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, return pinctrl_gpio_direction_output(chip, gpio); } +static int vf610_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) +{ + struct vf610_gpio_port *port = gpiochip_get_data(gc); + u32 mask = BIT(gpio); + + mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR); + + if (mask) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + static void vf610_gpio_irq_handler(struct irq_desc *desc) { struct vf610_gpio_port *port = @@ -362,6 +375,12 @@ static int vf610_gpio_probe(struct platform_device *pdev) gc->get = vf610_gpio_get; gc->direction_output = vf610_gpio_direction_output; gc->set = vf610_gpio_set; + /* + * only IP has Port Data Direction Register(PDDR) can + * support get direction + */ + if (port->sdata->have_paddr) + gc->get_direction = vf610_gpio_get_direction; /* Mask all GPIO interrupts */ for (i = 0; i < gc->ngpio; i++) diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index ccc47ea0b3e122..91b6352c957cf9 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -1410,7 +1410,6 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev) size_t num_entries = gpio_virtuser_get_lookup_count(dev); struct gpio_virtuser_lookup_entry *entry; struct gpio_virtuser_lookup *lookup; - struct gpiod_lookup *curr; unsigned int i = 0; lockdep_assert_held(&dev->lock); @@ -1426,14 +1425,10 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev) list_for_each_entry(lookup, &dev->lookup_list, siblings) { list_for_each_entry(entry, &lookup->entry_list, siblings) { - curr = &table->table[i]; - - curr->con_id = lookup->con_id; - curr->idx = i; - curr->key = entry->key; - curr->chip_hwnum = entry->offset < 0 ? - U16_MAX : entry->offset; - curr->flags = entry->flags; + table->table[i] = + GPIO_LOOKUP_IDX(entry->key, + entry->offset < 0 ? U16_MAX : entry->offset, + lookup->con_id, i, entry->flags); i++; } } diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c index 6734e7e1e2a476..ebc71ecdb6cf5d 100644 --- a/drivers/gpio/gpio-visconti.c +++ b/drivers/gpio/gpio-visconti.c @@ -8,6 +8,7 @@ * Nobuhiro Iwamatsu */ +#include #include #include #include @@ -15,8 +16,8 @@ #include #include #include +#include #include -#include /* register offset */ #define GPIO_DIR 0x00 @@ -202,7 +203,7 @@ static int visconti_gpio_probe(struct platform_device *pdev) girq = &priv->gpio_chip.irq; gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip); - girq->fwnode = of_node_to_fwnode(dev->of_node); + girq->fwnode = dev_fwnode(dev); girq->parent_domain = parent; girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq; girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 7348df38519815..afcf432a1573ed 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -333,12 +333,9 @@ static int __maybe_unused xgpio_suspend(struct device *dev) */ static void xgpio_remove(struct platform_device *pdev) { - struct xgpio_instance *gpio = platform_get_drvdata(pdev); - pm_runtime_get_sync(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); - clk_disable_unprepare(gpio->clk); } /** @@ -644,15 +641,10 @@ static int xgpio_probe(struct platform_device *pdev) return PTR_ERR(chip->regs); } - chip->clk = devm_clk_get_optional(&pdev->dev, NULL); + chip->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(chip->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n"); - status = clk_prepare_enable(chip->clk); - if (status < 0) { - dev_err(&pdev->dev, "Failed to prepare clk\n"); - return status; - } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); @@ -699,7 +691,6 @@ static int xgpio_probe(struct platform_device *pdev) err_pm_put: pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - clk_disable_unprepare(chip->clk); return status; } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 466e23031afccb..1a42336dfc1d4a 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -940,16 +940,10 @@ static int zynq_gpio_probe(struct platform_device *pdev) chip->ngpio = gpio->p_data->ngpio; /* Retrieve GPIO clock */ - gpio->clk = devm_clk_get(&pdev->dev, NULL); + gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(gpio->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(gpio->clk), "input clock not found.\n"); - ret = clk_prepare_enable(gpio->clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable clock.\n"); - return ret; - } - spin_lock_init(&gpio->dirlock); pm_runtime_set_active(&pdev->dev); @@ -999,7 +993,6 @@ static int zynq_gpio_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); err_pm_dis: pm_runtime_disable(&pdev->dev); - clk_disable_unprepare(gpio->clk); return ret; } @@ -1019,7 +1012,6 @@ static void zynq_gpio_remove(struct platform_device *pdev) if (ret < 0) dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); gpiochip_remove(&gpio->chip); - clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 69cd2be9c7f3b3..78ecd56123a3b6 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -153,8 +153,12 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data) * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1") * @pin: ACPI GPIO pin number (0-based, controller-relative) * - * Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR - * error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. + * If the GPIO cannot be translated or there is an error an ERR_PTR is + * returned. + * + * Specifically returns %-EPROBE_DEFER if the referenced GPIO * controller does not have GPIO chip registered at the moment. This is to * support probe deferral. */ @@ -224,6 +228,9 @@ EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); * I/O resource or return False if not. * @ares: Pointer to the ACPI resource to fetch * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer + * + * Returns: + * %true if GpioIo resource is found, %false otherwise. */ bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) @@ -876,7 +883,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, * that case @index is used to select the GPIO entry in the property value * (in case of multiple). * - * If the GPIO cannot be translated or there is an error, an ERR_PTR is + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. + * If the GPIO cannot be translated or there is an error an ERR_PTR is * returned. * * Note: if the GPIO resource has multiple entries in the pin list, this @@ -924,6 +933,8 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, * resource with the relevant information from a data-only ACPI firmware node * and uses that to obtain the GPIO descriptor to return. * + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. * If the GPIO cannot be translated or there is an error an ERR_PTR is * returned. */ @@ -973,18 +984,9 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int struct acpi_device *adev = to_acpi_device_node(fwnode); struct gpio_desc *desc; char propname[32]; - int i; /* Try first from _DSD */ - for (i = 0; i < gpio_suffix_count; i++) { - if (con_id) { - snprintf(propname, sizeof(propname), "%s-%s", - con_id, gpio_suffixes[i]); - } else { - snprintf(propname, sizeof(propname), "%s", - gpio_suffixes[i]); - } - + for_each_gpio_property_name(propname, con_id) { if (adev) desc = acpi_get_gpiod_by_index(adev, propname, idx, info); @@ -1051,7 +1053,8 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, * The GPIO is considered wake capable if the GpioInt resource specifies * SharedAndWake or ExclusiveAndWake. * - * Return: Linux IRQ number (> %0) on success, negative errno on failure. + * Returns: + * Linux IRQ number (> 0) on success, negative errno on failure. */ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) @@ -1438,7 +1441,7 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data) * @fwnode: firmware node of the GPIO consumer * @con_id: function within the GPIO consumer * - * Return: + * Returns: * The number of GPIOs associated with a firmware node / function or %-ENOENT, * if no GPIO has been assigned to the requested function. */ @@ -1450,17 +1453,9 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) int count = -ENOENT; int ret; char propname[32]; - unsigned int i; /* Try first from _DSD */ - for (i = 0; i < gpio_suffix_count; i++) { - if (con_id) - snprintf(propname, sizeof(propname), "%s-%s", - con_id, gpio_suffixes[i]); - else - snprintf(propname, sizeof(propname), "%s", - gpio_suffixes[i]); - + for_each_gpio_property_name(propname, con_id) { ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj); if (ret == 0) { if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index ef08b23a56e253..78c9d9ed3d687f 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -2748,7 +2748,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev * @file: file struct for storing private data - * Returns 0 on success + * + * Returns: + * 0 on success, or negative errno on failure. */ static int gpio_chrdev_open(struct inode *inode, struct file *file) { @@ -2814,7 +2816,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) * gpio_chrdev_release() - close chardev after ioctl operations * @inode: inode for this chardev * @file: file struct for storing private data - * Returns 0 on success + * + * Returns: + * 0 on success, or negative errno on failure. */ static int gpio_chrdev_release(struct inode *inode, struct file *file) { @@ -2838,7 +2842,6 @@ static const struct file_operations gpio_fileops = { .poll = lineinfo_watch_poll, .read = lineinfo_watch_read, .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = gpio_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = gpio_ioctl_compat, diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 4987e62dcb3d15..08205f355cebe5 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -6,15 +6,19 @@ * Copyright (c) 2011 John Crispin */ -#include -#include -#include -#include #include +#include +#include #include +#include + +#include #include "gpiolib.h" +struct fwnode_handle; +struct lock_class_key; + static void devm_gpiod_release(struct device *dev, void *res) { struct gpio_desc **desc = res; @@ -52,6 +56,11 @@ static int devm_gpiod_match_array(struct device *dev, void *res, void *data) * Managed gpiod_get(). GPIO descriptors returned from this function are * automatically disposed on driver detach. See gpiod_get() for detailed * information about behavior and return values. + * + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, %-ENOENT if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, const char *con_id, @@ -70,6 +79,11 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get); * Managed gpiod_get_optional(). GPIO descriptors returned from this function * are automatically disposed on driver detach. See gpiod_get_optional() for * detailed information about behavior and return values. + * + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, NULL if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, const char *con_id, @@ -89,6 +103,11 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_optional); * Managed gpiod_get_index(). GPIO descriptors returned from this function are * automatically disposed on driver detach. See gpiod_get_index() for detailed * information about behavior and return values. + * + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, %-ENOENT if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, @@ -141,8 +160,10 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_index); * GPIO descriptors returned from this function are automatically disposed on * driver detach. * - * On successful request the GPIO pin is configured in accordance with - * provided @flags. + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, %-ENOENT if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, struct fwnode_handle *fwnode, @@ -182,6 +203,11 @@ EXPORT_SYMBOL_GPL(devm_fwnode_gpiod_get_index); * function are automatically disposed on driver detach. See * gpiod_get_index_optional() for detailed information about behavior and * return values. + * + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, %NULL if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, const char *con_id, @@ -207,6 +233,12 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_index_optional); * Managed gpiod_get_array(). GPIO descriptors returned from this function are * automatically disposed on driver detach. See gpiod_get_array() for detailed * information about behavior and return values. + * + * Returns: + * The GPIO descriptors corresponding to the function @con_id of device + * dev, %-ENOENT if no GPIO has been assigned to the requested function, + * or another IS_ERR() code if an error occurred while trying to acquire + * the GPIOs. */ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, const char *con_id, @@ -243,6 +275,12 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array); * function are automatically disposed on driver detach. * See gpiod_get_array_optional() for detailed information about behavior and * return values. + * + * Returns: + * The GPIO descriptors corresponding to the function @con_id of device + * dev, %NULL if no GPIO has been assigned to the requested function, + * or another IS_ERR() code if an error occurred while trying to acquire + * the GPIOs. */ struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, @@ -320,76 +358,6 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs) } EXPORT_SYMBOL_GPL(devm_gpiod_put_array); -static void devm_gpio_release(struct device *dev, void *res) -{ - unsigned *gpio = res; - - gpio_free(*gpio); -} - -/** - * devm_gpio_request - request a GPIO for a managed device - * @dev: device to request the GPIO for - * @gpio: GPIO to allocate - * @label: the name of the requested GPIO - * - * Except for the extra @dev argument, this function takes the - * same arguments and performs the same function as - * gpio_request(). GPIOs requested with this function will be - * automatically freed on driver detach. - */ -int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) -{ - unsigned *dr; - int rc; - - dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); - if (!dr) - return -ENOMEM; - - rc = gpio_request(gpio, label); - if (rc) { - devres_free(dr); - return rc; - } - - *dr = gpio; - devres_add(dev, dr); - - return 0; -} -EXPORT_SYMBOL_GPL(devm_gpio_request); - -/** - * devm_gpio_request_one - request a single GPIO with initial setup - * @dev: device to request for - * @gpio: the GPIO number - * @flags: GPIO configuration as specified by GPIOF_* - * @label: a literal description string of this GPIO - */ -int devm_gpio_request_one(struct device *dev, unsigned gpio, - unsigned long flags, const char *label) -{ - unsigned *dr; - int rc; - - dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); - if (!dr) - return -ENOMEM; - - rc = gpio_request_one(gpio, flags, label); - if (rc) { - devres_free(dr); - return rc; - } - - *dr = gpio; - devres_add(dev, dr); - - return 0; -} -EXPORT_SYMBOL_GPL(devm_gpio_request_one); - static void devm_gpio_chip_release(void *data) { struct gpio_chip *gc = data; diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index 5a9911ae912509..28f1046fb6704d 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -1,4 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + #include #include @@ -22,6 +28,9 @@ EXPORT_SYMBOL_GPL(gpio_free); * @label: a literal description string of this GPIO * * **DEPRECATED** This function is deprecated and must not be used in new code. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { @@ -40,11 +49,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (flags & GPIOF_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (flags & GPIOF_DIR_IN) + if (flags & GPIOF_IN) err = gpiod_direction_input(desc); else - err = gpiod_direction_output_raw(desc, - (flags & GPIOF_INIT_HIGH) ? 1 : 0); + err = gpiod_direction_output_raw(desc, !!(flags & GPIOF_OUT_INIT_HIGH)); if (err) goto free_gpio; @@ -72,3 +80,83 @@ int gpio_request(unsigned gpio, const char *label) return gpiod_request(desc, label); } EXPORT_SYMBOL_GPL(gpio_request); + +static void devm_gpio_release(struct device *dev, void *res) +{ + unsigned *gpio = res; + + gpio_free(*gpio); +} + +/** + * devm_gpio_request - request a GPIO for a managed device + * @dev: device to request the GPIO for + * @gpio: GPIO to allocate + * @label: the name of the requested GPIO + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as gpio_request(). + * GPIOs requested with this function will be automatically freed + * on driver detach. + * + * **DEPRECATED** This function is deprecated and must not be used in new code. + * + * Returns: + * 0 on success, or negative errno on failure. + */ +int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) +{ + unsigned *dr; + int rc; + + dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = gpio_request(gpio, label); + if (rc) { + devres_free(dr); + return rc; + } + + *dr = gpio; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_gpio_request); + +/** + * devm_gpio_request_one - request a single GPIO with initial setup + * @dev: device to request for + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + * + * **DEPRECATED** This function is deprecated and must not be used in new code. + * + * Returns: + * 0 on success, or negative errno on failure. + */ +int devm_gpio_request_one(struct device *dev, unsigned gpio, + unsigned long flags, const char *label) +{ + unsigned *dr; + int rc; + + dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = gpio_request_one(gpio, flags, label); + if (rc) { + devres_free(dr); + return rc; + } + + *dr = gpio; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_gpio_request_one); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index f6af5e7be4d147..880f1efcaca534 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -46,16 +46,19 @@ enum of_gpio_flags { * @propname: property name containing gpio specifier(s) * * The function returns the count of GPIOs specified for a node. - * Note that the empty GPIO specifiers count too. Returns either - * Number of gpios defined in property, - * -EINVAL for an incorrectly formed gpios property, or - * -ENOENT for a missing gpios property + * NOTE: The empty GPIO specifiers count too. * - * Example: - * gpios = <0 - * &gpio1 1 2 - * 0 - * &gpio2 3 4>; + * Returns: + * Either number of GPIOs defined in the property, or + * * %-EINVAL for an incorrectly formed "gpios" property, or + * * %-ENOENT for a missing "gpios" property. + * + * Example:: + * + * gpios = <0 + * &gpio1 1 2 + * 0 + * &gpio2 3 4>; * * The above example defines four GPIOs, two of which are not specified. * This function will return '4' @@ -77,6 +80,11 @@ static int of_gpio_named_count(const struct device_node *np, * "gpios" for the chip select lines. If we detect this, we redirect * the counting of "cs-gpios" to count "gpios" transparent to the * driver. + * + * Returns: + * Either number of GPIOs defined in the property, or + * * %-EINVAL for an incorrectly formed "gpios" property, or + * * %-ENOENT for a missing "gpios" property. */ static int of_gpio_spi_cs_get_count(const struct device_node *np, const char *con_id) @@ -97,20 +105,12 @@ int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) const struct device_node *np = to_of_node(fwnode); int ret; char propname[32]; - unsigned int i; ret = of_gpio_spi_cs_get_count(np, con_id); if (ret > 0) return ret; - for (i = 0; i < gpio_suffix_count; i++) { - if (con_id) - snprintf(propname, sizeof(propname), "%s-%s", - con_id, gpio_suffixes[i]); - else - snprintf(propname, sizeof(propname), "%s", - gpio_suffixes[i]); - + for_each_gpio_property_name(propname, con_id) { ret = of_gpio_named_count(np, propname); if (ret > 0) break; @@ -338,11 +338,10 @@ static void of_gpio_flags_quirks(const struct device_node *np, */ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && of_property_read_bool(np, "cs-gpios")) { - struct device_node *child; u32 cs; int ret; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { ret = of_property_read_u32(child, "reg", &cs); if (ret) continue; @@ -363,7 +362,6 @@ static void of_gpio_flags_quirks(const struct device_node *np, "spi-cs-high"); of_gpio_quirk_polarity(child, active_high, flags); - of_node_put(child); break; } } @@ -383,7 +381,8 @@ static void of_gpio_flags_quirks(const struct device_node *np, * @index: index of the GPIO * @flags: a flags pointer to fill in * - * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno + * Returns: + * GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. If @flags is not NULL the function also fills * in flags for the GPIO. */ @@ -435,7 +434,8 @@ static struct gpio_desc *of_get_named_gpiod_flags(const struct device_node *np, * * **DEPRECATED** This function is deprecated and must not be used in new code. * - * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * Returns: + * GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ int of_get_named_gpio(const struct device_node *np, const char *propname, @@ -687,23 +687,14 @@ static const of_find_gpio_quirk of_find_gpio_quirks[] = { struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *flags) { - char prop_name[32]; /* 32 is max size of property name */ + char propname[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; const of_find_gpio_quirk *q; struct gpio_desc *desc; - unsigned int i; /* Try GPIO property "foo-gpios" and "foo-gpio" */ - for (i = 0; i < gpio_suffix_count; i++) { - if (con_id) - snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id, - gpio_suffixes[i]); - else - snprintf(prop_name, sizeof(prop_name), "%s", - gpio_suffixes[i]); - - desc = of_get_named_gpiod_flags(np, prop_name, idx, &of_flags); - + for_each_gpio_property_name(propname, con_id) { + desc = of_get_named_gpiod_flags(np, propname, idx, &of_flags); if (!gpiod_not_found(desc)) break; } @@ -730,7 +721,8 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, * of_find_gpio() or of_parse_own_gpio() * @dflags: gpiod_flags - optional GPIO initialization flags * - * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno + * Returns: + * GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. */ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, @@ -798,7 +790,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, * @chip: gpio chip to act on * @hog: device node describing the hogs * - * Returns error if it fails otherwise 0 on success. + * Returns: + * 0 on success, or negative errno on failure. */ static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog) { @@ -832,22 +825,21 @@ static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog) * * This is only used by of_gpiochip_add to request/set GPIO initial * configuration. - * It returns error if it fails otherwise 0 on success. + * + * Returns: + * 0 on success, or negative errno on failure. */ static int of_gpiochip_scan_gpios(struct gpio_chip *chip) { - struct device_node *np; int ret; - for_each_available_child_of_node(dev_of_node(&chip->gpiodev->dev), np) { + for_each_available_child_of_node_scoped(dev_of_node(&chip->gpiodev->dev), np) { if (!of_property_read_bool(np, "gpio-hog")) continue; ret = of_gpiochip_add_hog(chip, np); - if (ret < 0) { - of_node_put(np); + if (ret < 0) return ret; - } of_node_set_flag(np, OF_POPULATED); } @@ -945,6 +937,9 @@ struct notifier_block gpio_of_notifier = { * This is simple translation function, suitable for the most 1:1 mapped * GPIO chips. This function performs only one sanity check: whether GPIO * is less than ngpios (that is specified in the gpio_chip). + * + * Returns: + * GPIO number (>= 0) on success, negative errno on failure. */ static int of_gpio_simple_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, @@ -994,6 +989,9 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, * If succeeded, this function will map bank's memory and will * do all necessary work for you. Then you'll able to use .regs * to manage GPIOs from the callbacks. + * + * Returns: + * 0 on success, or negative errno on failure. */ int of_mm_gpiochip_add_data(struct device_node *np, struct of_mm_gpio_chip *mm_gc, @@ -1058,13 +1056,13 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) int index = 0, ret, trim; const char *name; static const char group_names_propname[] = "gpio-ranges-group-names"; - struct property *group_names; + bool has_group_names; np = dev_of_node(&chip->gpiodev->dev); if (!np) return 0; - group_names = of_find_property(np, group_names_propname, NULL); + has_group_names = of_property_present(np, group_names_propname); for (;; index++) { ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, @@ -1085,7 +1083,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) if (pinspec.args[2]) { /* npins != 0: linear range */ - if (group_names) { + if (has_group_names) { of_property_read_string_index(np, group_names_propname, index, &name); @@ -1123,7 +1121,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) break; } - if (!group_names) { + if (!has_group_names) { pr_err("%pOF: GPIO group range requested but no %s property.\n", np, group_names_propname); break; diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c index cec1ab878af866..2b2dd7e92211dc 100644 --- a/drivers/gpio/gpiolib-swnode.c +++ b/drivers/gpio/gpiolib-swnode.c @@ -24,20 +24,6 @@ #define GPIOLIB_SWNODE_UNDEFINED_NAME "swnode-gpio-undefined" -static void swnode_format_propname(const char *con_id, char *propname, - size_t max_size) -{ - /* - * Note we do not need to try both -gpios and -gpio suffixes, - * as, unlike OF and ACPI, we can fix software nodes to conform - * to the proper binding. - */ - if (con_id) - snprintf(propname, max_size, "%s-gpios", con_id); - else - strscpy(propname, "gpios", max_size); -} - static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode) { const struct software_node *gdev_node; @@ -59,6 +45,17 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode) return gdev ?: ERR_PTR(-EPROBE_DEFER); } +static int swnode_gpio_get_reference(const struct fwnode_handle *fwnode, + const char *propname, unsigned int idx, + struct fwnode_reference_args *args) +{ + /* + * We expect all swnode-described GPIOs have GPIO number and + * polarity arguments, hence nargs is set to 2. + */ + return fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, args); +} + struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, unsigned long *flags) @@ -67,23 +64,21 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, struct fwnode_reference_args args; struct gpio_desc *desc; char propname[32]; /* 32 is max size of property name */ - int error; + int ret; swnode = to_software_node(fwnode); if (!swnode) return ERR_PTR(-EINVAL); - swnode_format_propname(con_id, propname, sizeof(propname)); - - /* - * We expect all swnode-described GPIOs have GPIO number and - * polarity arguments, hence nargs is set to 2. - */ - error = fwnode_property_get_reference_args(fwnode, propname, NULL, 2, idx, &args); - if (error) { + for_each_gpio_property_name(propname, con_id) { + ret = swnode_gpio_get_reference(fwnode, propname, idx, &args); + if (ret == 0) + break; + } + if (ret) { pr_debug("%s: can't parse '%s' property of node '%pfwP[%d]'\n", __func__, propname, fwnode, idx); - return ERR_PTR(error); + return ERR_PTR(ret); } struct gpio_device *gdev __free(gpio_device_put) = @@ -111,7 +106,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, * system-global GPIOs * @con_id: function within the GPIO consumer * - * Return: + * Returns: * The number of GPIOs associated with a device / function or %-ENOENT, * if no GPIO has been assigned to the requested function. */ @@ -121,20 +116,21 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) char propname[32]; int count; - swnode_format_propname(con_id, propname, sizeof(propname)); - /* * This is not very efficient, but GPIO lists usually have only * 1 or 2 entries. */ - count = 0; - while (fwnode_property_get_reference_args(fwnode, propname, NULL, 0, - count, &args) == 0) { - fwnode_handle_put(args.fwnode); - count++; + for_each_gpio_property_name(propname, con_id) { + count = 0; + while (swnode_gpio_get_reference(fwnode, propname, count, &args) == 0) { + fwnode_handle_put(args.fwnode); + count++; + } + if (count) + return count; } - return count ?: -ENOENT; + return -ENOENT; } #if IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 26202586fd39de..17ed229412af99 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -568,7 +568,8 @@ static struct class gpio_class = { * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * - * Returns zero on success, else an error. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { @@ -667,7 +668,8 @@ static int match_export(struct device *dev, const void *desc) * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * - * Returns zero on success, else an error. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3a9668cc100df5..d5952ab7752c2a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -90,8 +90,7 @@ DEFINE_STATIC_SRCU(gpio_devices_srcu); static DEFINE_MUTEX(gpio_machine_hogs_mutex); static LIST_HEAD(gpio_machine_hogs); -const char *const gpio_suffixes[] = { "gpios", "gpio" }; -const size_t gpio_suffix_count = ARRAY_SIZE(gpio_suffixes); +const char *const gpio_suffixes[] = { "gpios", "gpio", NULL }; static void gpiochip_free_hogs(struct gpio_chip *gc); static int gpiochip_add_irqchip(struct gpio_chip *gc, @@ -115,12 +114,12 @@ const char *gpiod_get_label(struct gpio_desc *desc) srcu_read_lock_held(&desc->gdev->desc_srcu)); if (test_bit(FLAG_USED_AS_IRQ, &flags)) - return label->str ?: "interrupt"; + return label ? label->str : "interrupt"; if (!test_bit(FLAG_REQUESTED, &flags)) return NULL; - return label->str; + return label ? label->str : NULL; } static void desc_free_label(struct rcu_head *rh) @@ -231,6 +230,9 @@ EXPORT_SYMBOL_GPL(desc_to_gpio); * This function is unsafe and should not be used. Using the chip address * without taking the SRCU read lock may result in dereferencing a dangling * pointer. + * + * Returns: + * Address of the GPIO chip backing this device. */ struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) { @@ -343,7 +345,8 @@ static int gpiochip_find_base_unlocked(u16 ngpio) * gpiod_get_direction - return the current direction of a GPIO * @desc: GPIO to get the direction of * - * Returns 0 for output, 1 for input, or an error code in case of error. + * Returns: + * 0 for output, 1 for input, or an error code in case of error. * * This function may sleep if gpiod_cansleep() is true. */ @@ -357,7 +360,7 @@ int gpiod_get_direction(struct gpio_desc *desc) * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL * descriptor like we usually do. */ - if (!desc || IS_ERR(desc)) + if (IS_ERR_OR_NULL(desc)) return -EINVAL; CLASS(gpio_chip_guard, guard)(desc); @@ -400,8 +403,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction); * Add a new chip to the global chips list, keeping the list of chips sorted * by range(means [base, base + ngpio - 1]) order. * - * Return -EBUSY if the new chip overlaps with some other chip's integer - * space. + * Returns: + * -EBUSY if the new chip overlaps with some other chip's integer space. */ static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev) { @@ -1517,6 +1520,9 @@ static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc, * This function is a wrapper that calls gpiochip_lock_as_irq() and is to be * used as the activate function for the &struct irq_domain_ops. The host_data * for the IRQ domain must be the &struct gpio_chip. + * + * Returns: + * 0 on success, or negative errno on failure. */ static int gpiochip_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool reserve) @@ -1661,6 +1667,9 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) * This function will set up the mapping for a certain IRQ line on a * gpiochip by assigning the gpiochip as chip data, and using the irqchip * stored inside the gpiochip. + * + * Returns: + * 0 on success, or negative errno on failure. */ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) @@ -1895,6 +1904,9 @@ static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc, * @gc: the GPIO chip to add the IRQ chip to * @lock_key: lockdep class for IRQ lock * @request_key: lockdep class for IRQ request + * + * Returns: + * 0 on success, or a negative errno on failure. */ static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, @@ -2030,6 +2042,9 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc) * @domain: the irqdomain to add to the gpiochip * * This function adds an IRQ domain to the gpiochip. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain) @@ -2066,6 +2081,9 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) * gpiochip_generic_request() - request the gpio function for a pin * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to request for GPIO function + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset) { @@ -2099,6 +2117,9 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free); * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to apply the configuration * @config: the configuration to be applied + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) @@ -2125,6 +2146,9 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config); * pinctrl driver is DEPRECATED. Please see Section 2.1 of * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, @@ -2176,13 +2200,13 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); * @npins: the number of pins from the offset of each pin space (GPIO and * pin controller) to accumulate in this range * - * Returns: - * 0 on success, or a negative error-code on failure. - * * Calling this function directly from a DeviceTree-supported * pinctrl driver is DEPRECATED. Please see Section 2.1 of * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. + * + * Returns: + * 0 on success, or a negative errno on failure. */ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, @@ -2586,7 +2610,8 @@ static int gpio_set_bias(struct gpio_desc *desc) * The function calls the certain GPIO driver to set debounce timeout * in the hardware. * - * Returns 0 on success, or negative error code otherwise. + * Returns: + * 0 on success, or negative errno on failure. */ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) { @@ -2602,7 +2627,8 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) * Set the direction of the passed GPIO to input, such as gpiod_get_value() can * be called safely on it. * - * Return 0 in case of success, else an error code. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_direction_input(struct gpio_desc *desc) { @@ -2709,7 +2735,8 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) * be called safely on it. The initial value of the output must be specified * as raw value on the physical line without regard for the ACTIVE_LOW status. * - * Return 0 in case of success, else an error code. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { @@ -2728,7 +2755,8 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw); * as the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into * account. * - * Return 0 in case of success, else an error code. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_direction_output(struct gpio_desc *desc, int value) { @@ -2801,7 +2829,8 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output); * @desc: GPIO to enable. * @flags: Flags related to GPIO edge. * - * Return 0 in case of success, else negative error code. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags) { @@ -2833,7 +2862,8 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns); * @desc: GPIO to disable. * @flags: Flags related to GPIO edge, same value as used during enable call. * - * Return 0 in case of success, else negative error code. + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags) { @@ -2925,7 +2955,8 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) * gpiod_is_active_low - test whether a GPIO is active-low or not * @desc: the gpio descriptor to test * - * Returns 1 if the GPIO is active-low, 0 otherwise. + * Returns: + * 1 if the GPIO is active-low, 0 otherwise. */ int gpiod_is_active_low(const struct gpio_desc *desc) { @@ -3140,7 +3171,8 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, * gpiod_get_raw_value() - return a gpio's raw value * @desc: gpio whose value will be returned * - * Return the GPIO's raw value, i.e. the value of the physical line disregarding + * Returns: + * The GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status, or negative errno on failure. * * This function can be called from contexts where we cannot sleep, and will @@ -3159,7 +3191,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value); * gpiod_get_value() - return a gpio's value * @desc: gpio whose value will be returned * - * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into + * Returns: + * The GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account, or negative errno on failure. * * This function can be called from contexts where we cannot sleep, and will @@ -3192,11 +3225,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_value); * @value_bitmap: bitmap to store the read values * * Read the raw values of the GPIOs, i.e. the values of the physical lines - * without regard for their ACTIVE_LOW status. Return 0 in case of success, - * else an error code. + * without regard for their ACTIVE_LOW status. * * This function can be called from contexts where we cannot sleep, * and it will complain if the GPIO chip functions potentially sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -3219,10 +3254,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value); * @value_bitmap: bitmap to store the read values * * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status - * into account. Return 0 in case of success, else an error code. + * into account. * * This function can be called from contexts where we cannot sleep, * and it will complain if the GPIO chip functions potentially sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_get_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -3510,6 +3548,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_value); * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -3535,6 +3576,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value); * * This function can be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -3553,6 +3597,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value); * gpiod_cansleep() - report whether gpio value access may sleep * @desc: gpio to check * + * Returns: + * 0 for non-sleepable, 1 for sleepable, or an error code in case of error. */ int gpiod_cansleep(const struct gpio_desc *desc) { @@ -3565,6 +3611,9 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep); * gpiod_set_consumer_name() - set the consumer name for the descriptor * @desc: gpio to set the consumer name on * @name: the new consumer name + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { @@ -3578,8 +3627,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name); * gpiod_to_irq() - return the IRQ corresponding to a GPIO * @desc: gpio whose IRQ will be returned (already requested) * - * Return the IRQ corresponding to the passed GPIO, or an error code in case of - * error. + * Returns: + * The IRQ corresponding to the passed GPIO, or an error code in case of error. */ int gpiod_to_irq(const struct gpio_desc *desc) { @@ -3592,7 +3641,7 @@ int gpiod_to_irq(const struct gpio_desc *desc) * requires this function to not return zero on an invalid descriptor * but rather a negative error number. */ - if (!desc || IS_ERR(desc)) + if (IS_ERR_OR_NULL(desc)) return -EINVAL; gdev = desc->gdev; @@ -3633,6 +3682,9 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used for IRQs. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { @@ -3784,7 +3836,8 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent); * gpiod_get_raw_value_cansleep() - return a gpio's raw value * @desc: gpio whose value will be returned * - * Return the GPIO's raw value, i.e. the value of the physical line disregarding + * Returns: + * The GPIO's raw value, i.e. the value of the physical line disregarding * its ACTIVE_LOW status, or negative errno on failure. * * This function is to be called from contexts that can sleep. @@ -3801,7 +3854,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); * gpiod_get_value_cansleep() - return a gpio's value * @desc: gpio whose value will be returned * - * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into + * Returns: + * The GPIO's logical value, i.e. taking the ACTIVE_LOW status into * account, or negative errno on failure. * * This function is to be called from contexts that can sleep. @@ -3831,10 +3885,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep); * @value_bitmap: bitmap to store the read values * * Read the raw values of the GPIOs, i.e. the values of the physical lines - * without regard for their ACTIVE_LOW status. Return 0 in case of success, - * else an error code. + * without regard for their ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -3858,9 +3914,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep); * @value_bitmap: bitmap to store the read values * * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status - * into account. Return 0 in case of success, else an error code. + * into account. * * This function is to be called from contexts that can sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -3923,6 +3982,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); * without regard for their ACTIVE_LOW status. * * This function is to be called from contexts that can sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -3965,6 +4027,9 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) * into account. * * This function is to be called from contexts that can sleep. + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -4298,9 +4363,12 @@ EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index); /** * gpiod_count - return the number of GPIOs associated with a device / function - * or -ENOENT if no GPIO has been assigned to the requested function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer + * + * Returns: + * The number of GPIOs associated with a device / function or -ENOENT if no + * GPIO has been assigned to the requested function. */ int gpiod_count(struct device *dev, const char *con_id) { @@ -4327,7 +4395,8 @@ EXPORT_SYMBOL_GPL(gpiod_count); * @con_id: function within the GPIO consumer * @flags: optional GPIO initialization flags * - * Return the GPIO descriptor corresponding to the function con_id of device + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device * dev, -ENOENT if no GPIO has been assigned to the requested function, or * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ @@ -4347,6 +4416,11 @@ EXPORT_SYMBOL_GPL(gpiod_get); * This is equivalent to gpiod_get(), except that when no GPIO was assigned to * the requested function it will return NULL. This is convenient for drivers * that need to handle optional GPIOs. + * + * Returns: + * The GPIO descriptor corresponding to the function @con_id of device + * dev, NULL if no GPIO has been assigned to the requested function, or + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, @@ -4365,7 +4439,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional); * of_find_gpio() or of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags * - * Return 0 on success, -ENOENT if no GPIO has been assigned to the + * Returns: + * 0 on success, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error * occurred while trying to acquire the GPIO. */ @@ -4440,7 +4515,8 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, * This variant of gpiod_get() allows to access GPIOs other than the first * defined one for functions that define several GPIOs. * - * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the + * Returns: + * A valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error * occurred while trying to acquire the GPIO. */ @@ -4468,6 +4544,11 @@ EXPORT_SYMBOL_GPL(gpiod_get_index); * This is equivalent to gpiod_get_index(), except that when no GPIO with the * specified index was assigned to the requested function it will return NULL. * This is convenient for drivers that need to handle optional GPIOs. + * + * Returns: + * A valid GPIO descriptor, NULL if no GPIO has been assigned to the + * requested function and/or index, or another IS_ERR() code if an error + * occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, @@ -4491,6 +4572,9 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional); * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from * of_find_gpio() or of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags + * + * Returns: + * 0 on success, or negative errno on failure. */ int gpiod_hog(struct gpio_desc *desc, const char *name, unsigned long lflags, enum gpiod_flags dflags) @@ -4547,9 +4631,11 @@ static void gpiochip_free_hogs(struct gpio_chip *gc) * * This function acquires all the GPIOs defined under a given function. * - * Return a struct gpio_descs containing an array of descriptors, -ENOENT if - * no GPIO has been assigned to the requested function, or another IS_ERR() - * code if an error occurred while trying to acquire the GPIOs. + * Returns: + * The GPIO descriptors corresponding to the function @con_id of device + * dev, -ENOENT if no GPIO has been assigned to the requested function, + * or another IS_ERR() code if an error occurred while trying to acquire + * the GPIOs. */ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, const char *con_id, @@ -4675,6 +4761,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_array); * * This is equivalent to gpiod_get_array(), except that when no GPIO was * assigned to the requested function it will return NULL. + * + * Returns: + * The GPIO descriptors corresponding to the function @con_id of device + * dev, NULL if no GPIO has been assigned to the requested function, + * or another IS_ERR() code if an error occurred while trying to acquire + * the GPIOs. */ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, const char *con_id, diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 4de0bf1a62d383..067197d61d57e4 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -89,9 +89,21 @@ static inline struct gpio_device *to_gpio_device(struct device *dev) return container_of(dev, struct gpio_device, dev); } -/* gpio suffixes used for ACPI and device tree lookup */ +/* GPIO suffixes used for ACPI and device tree lookup */ extern const char *const gpio_suffixes[]; -extern const size_t gpio_suffix_count; + +#define for_each_gpio_property_name(propname, con_id) \ + for (const char * const *__suffixes = gpio_suffixes; \ + *__suffixes && ({ \ + const char *__gs = *__suffixes; \ + \ + if (con_id) \ + snprintf(propname, sizeof(propname), "%s-%s", con_id, __gs); \ + else \ + snprintf(propname, sizeof(propname), "%s", __gs); \ + 1; \ + }); \ + __suffixes++) /** * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 6b2c6b91f96250..1cb5a4f1929335 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -107,7 +107,7 @@ config DRM_KMS_HELPER config DRM_PANIC bool "Display a user-friendly message when a kernel panic occurs" - depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE) + depends on DRM select FONT_SUPPORT help Enable a drm panic handler, which will display a user-friendly message @@ -149,6 +149,37 @@ config DRM_PANIC_SCREEN or by writing to /sys/module/drm/parameters/panic_screen sysfs entry Default is "user" +config DRM_PANIC_SCREEN_QR_CODE + bool "Add a panic screen with a QR code" + depends on DRM_PANIC && RUST + help + This option adds a QR code generator, and a panic screen with a QR + code. The QR code will contain the last lines of kmsg and other debug + information. This should be easier for the user to report a kernel + panic, with all debug information available. + To use this panic screen, also set DRM_PANIC_SCREEN to "qr_code" + +config DRM_PANIC_SCREEN_QR_CODE_URL + string "Base URL of the QR code in the panic screen" + depends on DRM_PANIC_SCREEN_QR_CODE + help + This option sets the base URL to report the kernel panic. If it's set + the QR code will contain the URL and the kmsg compressed with zlib as + a URL parameter. If it's empty, the QR code will contain the kmsg as + uncompressed text only. + There is a demo code in javascript, to decode and uncompress the kmsg + data from the URL parameter at https://github.com/kdj0c/panic_report + +config DRM_PANIC_SCREEN_QR_VERSION + int "Maximum version (size) of the QR code." + depends on DRM_PANIC_SCREEN_QR_CODE + default 40 + help + This option limits the version (or size) of the QR code. QR code + version ranges from Version 1 (21x21) to Version 40 (177x177). + Smaller QR code are easier to read, but will contain less debugging + data. Default is 40. + config DRM_DEBUG_DP_MST_TOPOLOGY_REFS bool "Enable refcount backtrace history in the DP MST helpers" depends on STACKTRACE_SUPPORT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index fa432a1ac9e2b7..784229d4504dcb 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \ drm_privacy_screen_x86.o drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o drm-$(CONFIG_DRM_PANIC) += drm_panic.o +drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 38408e4e158e57..c7b18c52825d67 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -39,23 +39,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ -I$(FULL_AMD_PATH)/amdkfd -subdir-ccflags-y := -Wextra -subdir-ccflags-y += -Wunused -subdir-ccflags-y += -Wmissing-prototypes -subdir-ccflags-y += -Wmissing-declarations -subdir-ccflags-y += -Wmissing-include-dirs -subdir-ccflags-y += -Wold-style-definition -subdir-ccflags-y += -Wmissing-format-attribute -# Need this to avoid recursive variable evaluation issues -cond-flags := $(call cc-option, -Wunused-but-set-variable) \ - $(call cc-option, -Wunused-const-variable) \ - $(call cc-option, -Wstringop-truncation) \ - $(call cc-option, -Wpacked-not-aligned) -subdir-ccflags-y += $(cond-flags) -subdir-ccflags-y += -Wno-unused-parameter -subdir-ccflags-y += -Wno-type-limits -subdir-ccflags-y += -Wno-sign-compare -subdir-ccflags-y += -Wno-missing-field-initializers +# Locally disable W=1 warnings enabled in drm subsystem Makefile subdir-ccflags-y += -Wno-override-init subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 137a88b8de4536..9b1e0ede05a452 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -118,6 +118,8 @@ #define MAX_GPU_INSTANCE 64 +#define GFX_SLICE_PERIOD msecs_to_jiffies(250) + struct amdgpu_gpu_instance { struct amdgpu_device *adev; int mgpu_fan_enabled; @@ -235,6 +237,7 @@ extern int sched_policy; extern bool debug_evictions; extern bool no_system_mem_limit; extern int halt_if_hws_hang; +extern uint amdgpu_svm_default_granularity; #else static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; static const bool __maybe_unused debug_evictions; /* = false */ @@ -347,9 +350,9 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_LAST }; -#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ -#define MAX_KIQ_REG_WAIT (amdgpu_sriov_vf(adev) ? 50000 : 5000) /* in usecs, extend for VF */ -#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ +#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ +#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ +#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_TRY 1000 int amdgpu_device_ip_set_clockgating_state(void *dev, @@ -823,17 +826,6 @@ struct amdgpu_mqd { struct amdgpu_reset_domain; struct amdgpu_fru_info; -struct amdgpu_reset_info { - /* reset dump register */ - u32 *reset_dump_reg_list; - u32 *reset_dump_reg_value; - int num_regs; - -#ifdef CONFIG_DEV_COREDUMP - struct amdgpu_coredump_info *coredump_info; -#endif -}; - /* * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. */ @@ -1091,10 +1083,6 @@ struct amdgpu_device { struct amdgpu_virt virt; - /* link all shadow bo */ - struct list_head shadow_list; - struct mutex shadow_list_lock; - /* record hw reset is performed */ bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; @@ -1157,8 +1145,6 @@ struct amdgpu_device { struct mutex benchmark_mutex; - struct amdgpu_reset_info reset_info; - bool scpm_enabled; uint32_t scpm_status; @@ -1175,6 +1161,11 @@ struct amdgpu_device { bool debug_disable_soft_recovery; bool debug_use_vram_fw_buf; bool debug_enable_ras_aca; + bool debug_exp_resets; + + bool enforce_isolation[MAX_XCP]; + /* Added this mutex for cleaner shader isolation between GFX and compute processes */ + struct mutex enforce_isolation_mutex; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1484,7 +1475,6 @@ extern const int amdgpu_max_kms_ioctl; int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); void amdgpu_driver_unload_kms(struct drm_device *dev); -void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); @@ -1588,13 +1578,6 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif -#if defined(CONFIG_DRM_AMD_DC) -int amdgpu_dm_display_resume(struct amdgpu_device *adev ); -#else -static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } -#endif - - void amdgpu_register_gpu_instance(struct amdgpu_device *adev); void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 19158cc30f31f2..2ca12717313573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -80,6 +80,9 @@ static void aca_banks_release(struct aca_banks *banks) { struct aca_bank_node *node, *tmp; + if (list_empty(&banks->list)) + return; + list_for_each_entry_safe(node, tmp, &banks->list, node) { list_del(&node->node); kvfree(node); @@ -453,13 +456,13 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er switch (type) { case ACA_ERROR_TYPE_UE: - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count); break; case ACA_ERROR_TYPE_CE: - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count); break; case ACA_ERROR_TYPE_DEFERRED: - amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count); + amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count); break; default: break; @@ -508,7 +511,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h return -EINVAL; } - /* udpate aca bank to aca source error_cache first */ + /* update aca bank to aca source error_cache first */ ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL); if (ret) return ret; @@ -562,9 +565,13 @@ static void aca_error_fini(struct aca_error *aerr) struct aca_bank_error *bank_error, *tmp; mutex_lock(&aerr->lock); + if (list_empty(&aerr->list)) + goto out_unlock; + list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) aca_bank_error_remove(aerr, bank_error); +out_unlock: mutex_destroy(&aerr->lock); } @@ -680,6 +687,9 @@ static void aca_manager_fini(struct aca_handle_manager *mgr) { struct aca_handle *handle, *tmp; + if (list_empty(&mgr->list)) + return; + list_for_each_entry_safe(handle, tmp, &mgr->list, node) amdgpu_aca_remove_handle(handle); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 03205e3c374638..4f08b153cb66d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -364,15 +364,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, return r; } -void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj) { - struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; + struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; - amdgpu_bo_reserve(bo, true); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&(bo)); + amdgpu_bo_reserve(*bo, true); + amdgpu_bo_kunmap(*bo); + amdgpu_bo_unpin(*bo); + amdgpu_bo_unreserve(*bo); + amdgpu_bo_unref(bo); } int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, @@ -783,22 +783,6 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, return 0; } -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, - int hub_inst, int hub_type) -{ - if (!hub_type) { - if (adev->gfxhub.funcs->query_utcl2_poison_status) - return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst); - else - return false; - } else { - if (adev->mmhub.funcs->query_utcl2_poison_status) - return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst); - else - return false; - } -} - int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) { return kgd2kfd_check_and_lock_kfd(); @@ -887,3 +871,21 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, return r; } + +/* Stop scheduling on KFD */ +int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id) +{ + if (!adev->kfd.init_complete) + return 0; + + return kgd2kfd_stop_sched(adev->kfd.dev, node_id); +} + +/* Start scheduling on KFD */ +int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id) +{ + if (!adev->kfd.init_complete) + return 0; + + return kgd2kfd_start_sched(adev->kfd.dev, node_id); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e7bb1ca3580142..f9d1194484423a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); -void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj); int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, void **mem_obj); void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); @@ -264,6 +264,8 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, uint32_t *payload); int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, u32 inst); +int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id); +int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -322,7 +324,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, void **kptr, uint64_t *size); void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); -int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo); +int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence __rcu **ef); @@ -345,11 +347,9 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad pasid_notify pasid_fn, void *data, uint32_t reset); bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev); -bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); +bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, - int hub_inst, int hub_type); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, @@ -426,6 +426,8 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); int kgd2kfd_check_and_lock_kfd(void); void kgd2kfd_unlock_kfd(void); +int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); +int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); #else static inline int kgd2kfd_init(void) { @@ -496,5 +498,15 @@ static inline int kgd2kfd_check_and_lock_kfd(void) static inline void kgd2kfd_unlock_kfd(void) { } + +static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) +{ + return 0; +} + +static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) +{ + return 0; +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index aff08321e97639..8dfdb18197c497 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -191,4 +191,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, + .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v9_hqd_reset, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 3a3f3ce09f00db..9435af2e6bdc01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include -#include #include #include #include "amdgpu.h" @@ -300,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus if (r) goto out; } else { - drm_sched_start(&ring->sched, false); + drm_sched_start(&ring->sched); } } @@ -418,5 +417,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, - .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings + .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, + .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v9_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index a5c7259cf2a3e8..e2ae714a700f85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -541,5 +541,7 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { kgd_gfx_v9_4_3_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, .set_address_watch = kgd_gfx_v9_4_3_set_address_watch, - .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch + .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch, + .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v9_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 3ab6c3aa0ad1a9..62176d607befad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -1070,6 +1070,20 @@ static void program_trap_handler_settings(struct amdgpu_device *adev, unlock_srbm(adev); } +uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst) +{ + return 0; +} + +uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst, unsigned int utimeout) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -1097,4 +1111,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, + .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v10_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 67bcaa3d422641..9efd2dd4fdd703 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -56,3 +56,12 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t grace_period, uint32_t *reg_offset, uint32_t *reg_data); +uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev, + uint32_t pipe_id, + uint32_t queue_id, + uint32_t inst); +uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev, + uint32_t pipe_id, + uint32_t queue_id, + uint32_t inst, + unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 8c8437a4383f7b..c718bedda0cacd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -680,5 +680,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, .set_address_watch = kgd_gfx_v10_set_address_watch, - .clear_address_watch = kgd_gfx_v10_clear_address_watch + .clear_address_watch = kgd_gfx_v10_clear_address_watch, + .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v10_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index b61a32d6af4b8a..a4ba49cb22db45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -786,6 +786,20 @@ static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, return 0; } +static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst) +{ + return 0; +} + +static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst, unsigned int utimeout) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -808,5 +822,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode, .set_address_watch = kgd_gfx_v11_set_address_watch, - .clear_address_watch = kgd_gfx_v11_clear_address_watch + .clear_address_watch = kgd_gfx_v11_clear_address_watch, + .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v11_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 5a35a8ca89222b..3bc0cbf45bc59a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -950,28 +950,30 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, - int *wave_cnt, int *vmid, uint32_t inst) + struct kfd_cu_occupancy *queue_cnt, uint32_t inst) { int pipe_idx; int queue_slot; unsigned int reg_val; - + unsigned int wave_cnt; /* * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID * parameters to read out waves in flight. Get VMID if there are * non-zero waves in flight. */ - *vmid = 0xFF; - *wave_cnt = 0; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; - soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst); - reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + - queue_slot); - *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; - if (*wave_cnt != 0) - *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) & - CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT; + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst)); + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); + wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; + if (wave_cnt != 0) { + queue_cnt->wave_cnt += wave_cnt; + queue_cnt->doorbell_off = + (RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) & + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + } } /** @@ -981,9 +983,8 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * or more queues running and submitting waves to compute units. * * @adev: Handle of device from which to get number of waves in flight - * @pasid: Identifies the process for which this query call is invoked - * @pasid_wave_cnt: Output parameter updated with number of waves in flight that - * belong to process with given pasid + * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset + * for comparison later. * @max_waves_per_cu: Output parameter updated with maximum number of waves * possible per Compute Unit * @inst: xcc's instance number on a multi-XCC setup @@ -1011,34 +1012,28 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * number of waves that are in flight for the queue at specified index. The * index ranges from 0 to 7. * - * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID - * of the wave(s). + * If non-zero waves are in flight, store the corresponding doorbell offset + * of the queue, along with the wave count. * - * Determine if VMID from above step maps to pasid provided as parameter. If - * it matches agrregate the wave count. That the VMID will not match pasid is - * a normal condition i.e. a device is expected to support multiple queues - * from multiple proceses. + * Determine if the queue belongs to the process by comparing the doorbell + * offset against the process's queues. If it matches, aggregate the wave + * count for the process. * * Reading registers referenced above involves programming GRBM appropriately */ -void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst) +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst) { int qidx; - int vmid; int se_idx; - int sh_idx; int se_cnt; - int sh_cnt; - int wave_cnt; int queue_map; - int pasid_tmp; int max_queue_cnt; - int vmid_wave_cnt = 0; DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES); lock_spi_csq_mutexes(adev); - soc15_grbm_select(adev, 1, 0, 0, 0, inst); + soc15_grbm_select(adev, 1, 0, 0, 0, GET_INST(GC, inst)); /* * Iterate through the shader engines and arrays of the device @@ -1048,51 +1043,38 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, AMDGPU_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - sh_cnt = adev->gfx.config.max_sh_per_se; se_cnt = adev->gfx.config.max_shader_engines; for (se_idx = 0; se_idx < se_cnt; se_idx++) { - for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { + amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst); + queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS); + + /* + * Assumption: queue map encodes following schema: four + * pipes per each micro-engine, with each pipe mapping + * eight queues. This schema is true for GFX9 devices + * and must be verified for newer device families + */ + for (qidx = 0; qidx < max_queue_cnt; qidx++) { + /* Skip qeueus that are not associated with + * compute functions + */ + if (!test_bit(qidx, cp_queue_bitmap)) + continue; - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst); - queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS); + if (!(queue_map & (1 << qidx))) + continue; - /* - * Assumption: queue map encodes following schema: four - * pipes per each micro-engine, with each pipe mapping - * eight queues. This schema is true for GFX9 devices - * and must be verified for newer device families - */ - for (qidx = 0; qidx < max_queue_cnt; qidx++) { - - /* Skip qeueus that are not associated with - * compute functions - */ - if (!test_bit(qidx, cp_queue_bitmap)) - continue; - - if (!(queue_map & (1 << qidx))) - continue; - - /* Get number of waves in flight and aggregate them */ - get_wave_count(adev, qidx, &wave_cnt, &vmid, - inst); - if (wave_cnt != 0) { - pasid_tmp = - RREG32(SOC15_REG_OFFSET(OSSSYS, inst, - mmIH_VMID_0_LUT) + vmid); - if (pasid_tmp == pasid) - vmid_wave_cnt += wave_cnt; - } - } + /* Get number of waves in flight and aggregate them */ + get_wave_count(adev, qidx, &cu_occupancy[qidx], + inst); } } amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst); - soc15_grbm_select(adev, 0, 0, 0, 0, inst); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst)); unlock_spi_csq_mutexes(adev); /* Update the output parameters and return */ - *pasid_wave_cnt = vmid_wave_cnt; *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu * adev->gfx.cu_info.max_waves_per_simd; } @@ -1144,6 +1126,109 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, kgd_gfx_v9_unlock_srbm(adev, inst); } +uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst) +{ + uint32_t low, high; + uint64_t queue_addr = 0; + + if (!adev->debug_exp_resets && + !adev->gfx.num_gfx_rings) + return 0; + + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); + amdgpu_gfx_rlc_enter_safe_mode(adev, inst); + + if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE)) + goto unlock_out; + + low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE); + high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI); + + /* only concerned with user queues. */ + if (!high) + goto unlock_out; + + queue_addr = (((queue_addr | high) << 32) | low) << 8; + +unlock_out: + amdgpu_gfx_rlc_exit_safe_mode(adev, inst); + kgd_gfx_v9_release_queue(adev, inst); + + return queue_addr; +} + +/* assume queue acquired */ +static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst, + unsigned int utimeout) +{ + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + while (true) { + uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE); + + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) + return 0; + + if (time_after(jiffies, end_jiffies)) + return -ETIME; + + usleep_range(500, 1000); + } +} + +uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst, unsigned int utimeout) +{ + uint32_t low, high, pipe_reset_data = 0; + uint64_t queue_addr = 0; + + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); + amdgpu_gfx_rlc_enter_safe_mode(adev, inst); + + if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE)) + goto unlock_out; + + low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE); + high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI); + + /* only concerned with user queues. */ + if (!high) + goto unlock_out; + + queue_addr = (((queue_addr | high) << 32) | low) << 8; + + pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n", + inst, pipe_id, queue_id); + + /* assume previous dequeue request issued will take affect after reset */ + WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1); + + if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout)) + goto unlock_out; + + pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id); + + pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1); + pipe_reset_data = pipe_reset_data << pipe_id; + + WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data); + WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0); + + if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout)) + queue_addr = 0; + +unlock_out: + pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n", + inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!"); + amdgpu_gfx_rlc_exit_safe_mode(adev, inst); + kgd_gfx_v9_release_queue(adev, inst); + + return queue_addr; +} + const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -1172,4 +1257,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, + .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr, + .hqd_reset = kgd_gfx_v9_hqd_reset }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index ce424615f59b5b..b6a91a552aa431 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -52,8 +52,9 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst); +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst); void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst); @@ -101,3 +102,12 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t grace_period, uint32_t *reg_offset, uint32_t *reg_data); +uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, + uint32_t pipe_id, + uint32_t queue_id, + uint32_t inst); +uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev, + uint32_t pipe_id, + uint32_t queue_id, + uint32_t inst, + unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 11672bfe4fad69..ce5ca304dba93b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -818,18 +817,13 @@ static int kfd_mem_export_dmabuf(struct kgd_mem *mem) if (!mem->dmabuf) { struct amdgpu_device *bo_adev; struct dma_buf *dmabuf; - int r, fd; bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); - r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file, + dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, mem->gem_handle, mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? - DRM_RDWR : 0, &fd); - if (r) - return r; - dmabuf = dma_buf_get(fd); - close_fd(fd); - if (WARN_ON_ONCE(IS_ERR(dmabuf))) + DRM_RDWR : 0); + if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); mem->dmabuf = dmabuf; } @@ -1252,7 +1246,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, return ret; } -static void unmap_bo_from_gpuvm(struct kgd_mem *mem, +static int unmap_bo_from_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { @@ -1260,11 +1254,18 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, struct amdgpu_device *adev = entry->adev; struct amdgpu_vm *vm = bo_va->base.vm; + if (bo_va->queue_refcount) { + pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount); + return -EBUSY; + } + amdgpu_vm_bo_unmap(adev, bo_va, entry->va); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); amdgpu_sync_fence(sync, bo_va->last_pt_update); + + return 0; } static int update_gpuvm_pte(struct kgd_mem *mem, @@ -1498,7 +1499,7 @@ static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) } } - ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); + ret = amdgpu_bo_pin(bo, domain); if (ret) pr_err("Error in Pinning BO to domain: %d\n", domain); @@ -2191,7 +2192,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", entry->va, entry->va + bo_size, entry); - unmap_bo_from_gpuvm(mem, entry, ctx.sync); + ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync); + if (ret) + goto unreserve_out; + entry->is_mapped = false; mem->mapped_to_gpu_memory--; @@ -2226,11 +2230,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory( /** * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count * @bo: Buffer object to be mapped + * @bo_gart: Return bo reference * * Before return, bo reference count is incremented. To release the reference and unpin/ * unmap the BO, call amdgpu_amdkfd_free_gtt_mem. */ -int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo) +int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart) { int ret; @@ -2257,7 +2262,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo) amdgpu_bo_unreserve(bo); - bo = amdgpu_bo_ref(bo); + *bo_gart = amdgpu_bo_ref(bo); return 0; @@ -3200,12 +3205,13 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, return 0; } -bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem) +bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem) { + struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv); struct kfd_mem_attachment *entry; list_for_each_entry(entry, &mem->attachments, list) { - if (entry->is_mapped && entry->adev == adev) + if (entry->is_mapped && entry->bo_va->base.vm == vm) return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 7dc102f0bc1d3c..0c8975ac5af9ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1018,8 +1018,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; dividers->post_div = args.v3.ucPostDiv; dividers->enable_post_div = (args.v3.ucCntlFlag & @@ -1039,8 +1040,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, if (strobe_mode) args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; dividers->post_div = args.v5.ucPostDiv; dividers->enable_post_div = (args.v5.ucCntlFlag & @@ -1058,8 +1060,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, /* fusion */ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; dividers->real_clock = le32_to_cpu(args.v4.ulClock); @@ -1070,8 +1073,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, args.v6_in.ulClock.ulComputeClockFlag = clock_type; args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); @@ -1113,8 +1117,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, if (strobe_mode) args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); @@ -1211,8 +1216,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, args.v2.ucVoltageMode = 0; args.v2.usVoltageLevel = 0; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; @@ -1221,8 +1227,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; args.v3.usVoltageLevel = cpu_to_le16(voltage_id); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + if (amdgpu_atom_execute_table(adev->mode_info.atom_context, + index, (uint32_t *)&args, sizeof(args))) + return -EINVAL; *voltage = le16_to_cpu(args.v3.usVoltageLevel); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 618e469e36222b..45affc02548c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -87,8 +87,9 @@ static bool check_atom_bios(uint8_t *bios, size_t size) * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. + * For SR-IOV, the vbios image is also put in VRAM in the VF. */ -static bool igp_read_bios_from_vram(struct amdgpu_device *adev) +static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) { uint8_t __iomem *bios; resource_size_t vram_base; @@ -284,10 +285,6 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) acpi_status status; bool found = false; - /* ATRM is for the discrete card only */ - if (adev->flags & AMD_IS_APU) - return false; - /* ATRM is for on-platform devices only */ if (dev_is_removable(&adev->pdev->dev)) return false; @@ -343,11 +340,8 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) { - if (adev->flags & AMD_IS_APU) - return igp_read_bios_from_vram(adev); - else - return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? - false : amdgpu_asic_read_disabled_bios(adev); + return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? + false : amdgpu_asic_read_disabled_bios(adev); } #ifdef CONFIG_ACPI @@ -414,7 +408,36 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) } #endif -bool amdgpu_get_bios(struct amdgpu_device *adev) +static bool amdgpu_get_bios_apu(struct amdgpu_device *adev) +{ + if (amdgpu_acpi_vfct_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VFCT\n"); + goto success; + } + + if (amdgpu_read_bios_from_vram(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); + goto success; + } + + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } + + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + dev_err(adev->dev, "Unable to locate a BIOS ROM\n"); + return false; + +success: + return true; +} + +static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) { if (amdgpu_atrm_get_bios(adev)) { dev_info(adev->dev, "Fetched VBIOS from ATRM\n"); @@ -426,11 +449,17 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) goto success; } - if (igp_read_bios_from_vram(adev)) { + /* this is required for SR-IOV */ + if (amdgpu_read_bios_from_vram(adev)) { dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); goto success; } + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + if (amdgpu_read_bios(adev)) { dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); goto success; @@ -446,19 +475,28 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) goto success; } - if (amdgpu_read_platform_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from platform\n"); - goto success; - } - dev_err(adev->dev, "Unable to locate a BIOS ROM\n"); return false; success: - adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10; return true; } +bool amdgpu_get_bios(struct amdgpu_device *adev) +{ + bool found; + + if (adev->flags & AMD_IS_APU) + found = amdgpu_get_bios_apu(adev); + else + found = amdgpu_get_bios_dgpu(adev); + + if (found) + adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10; + + return found; +} + /* helper function for soc15 and onwards to read bios from rom */ bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c3d89088123dbd..16153d275d7ae5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -414,7 +414,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, return -EINVAL; } - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name); if (err) { DRM_ERROR("Failed to load firmware \"%s\"", fw_name); amdgpu_ucode_release(&adev->pm.fw); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index cae7479c3ecf7e..344e0a9ee08a99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -249,11 +249,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector, static struct edid * amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev) { - if (adev->mode_info.bios_hardcoded_edid) { - return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid, - adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); - } - return NULL; + return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid)); } static void amdgpu_connector_get_edid(struct drm_connector *connector) @@ -442,6 +438,9 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder, continue; mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); + if (!mode) + return; + drm_mode_probed_add(connector, mode); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6dfdff58bffd1f..1e475eb01417ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (size < sizeof(struct drm_amdgpu_bo_list_in)) goto free_partial_kdata; + /* Only a single BO list is allowed to simplify handling. */ + if (p->bo_list) + ret = -EINVAL; + ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); if (ret) goto free_partial_kdata; @@ -292,6 +296,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, num_ibs[i], &p->jobs[i]); if (ret) goto free_all_kdata; + p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id]; } p->gang_leader = p->jobs[p->gang_leader_idx]; @@ -1106,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) struct drm_gpu_scheduler *sched = entity->rq->sched; struct amdgpu_ring *ring = to_amdgpu_ring(sched); - if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) + if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub)) return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 0e1a11b6b989d7..cbef720de77971 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2026,100 +2026,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, amdgpu_debugfs_sclk_set, "%llu\n"); -static ssize_t amdgpu_reset_dump_register_list_read(struct file *f, - char __user *buf, size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; - char reg_offset[12]; - int i, ret, len = 0; - - if (*pos) - return 0; - - memset(reg_offset, 0, 12); - ret = down_read_killable(&adev->reset_domain->sem); - if (ret) - return ret; - - for (i = 0; i < adev->reset_info.num_regs; i++) { - sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]); - up_read(&adev->reset_domain->sem); - if (copy_to_user(buf + len, reg_offset, strlen(reg_offset))) - return -EFAULT; - - len += strlen(reg_offset); - ret = down_read_killable(&adev->reset_domain->sem); - if (ret) - return ret; - } - - up_read(&adev->reset_domain->sem); - *pos += len; - - return len; -} - -static ssize_t amdgpu_reset_dump_register_list_write(struct file *f, - const char __user *buf, size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; - char reg_offset[11]; - uint32_t *new = NULL, *tmp = NULL; - unsigned int len = 0; - int ret, i = 0; - - do { - memset(reg_offset, 0, 11); - if (copy_from_user(reg_offset, buf + len, - min(10, (size-len)))) { - ret = -EFAULT; - goto error_free; - } - - new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL); - if (!new) { - ret = -ENOMEM; - goto error_free; - } - tmp = new; - if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) { - ret = -EINVAL; - goto error_free; - } - - len += ret; - i++; - } while (len < size); - - new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL); - if (!new) { - ret = -ENOMEM; - goto error_free; - } - ret = down_write_killable(&adev->reset_domain->sem); - if (ret) - goto error_free; - - swap(adev->reset_info.reset_dump_reg_list, tmp); - swap(adev->reset_info.reset_dump_reg_value, new); - adev->reset_info.num_regs = i; - up_write(&adev->reset_domain->sem); - ret = size; - -error_free: - if (tmp != new) - kfree(tmp); - kfree(new); - return ret; -} - -static const struct file_operations amdgpu_reset_dump_register_list = { - .owner = THIS_MODULE, - .read = amdgpu_reset_dump_register_list_read, - .write = amdgpu_reset_dump_register_list_write, - .llseek = default_llseek -}; - int amdgpu_debugfs_init(struct amdgpu_device *adev) { struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; @@ -2204,8 +2110,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) &amdgpu_debugfs_vm_info_fops); debugfs_create_file("amdgpu_benchmark", 0200, root, adev, &amdgpu_benchmark_fops); - debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev, - &amdgpu_reset_dump_register_list); adev->debugfs_vbios_blob.data = adev->bios; adev->debugfs_vbios_blob.size = adev->bios_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index f0a44d0dec271c..5ac59b62020cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -28,8 +28,8 @@ #include "atom.h" #ifndef CONFIG_DEV_COREDUMP -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) +void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check, + bool vram_lost, struct amdgpu_job *job) { } #else @@ -203,7 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, struct amdgpu_coredump_info *coredump = data; struct drm_print_iterator iter; struct amdgpu_vm_fault_info *fault_info; - int i, ver; + int ver; iter.data = buffer; iter.offset = 0; @@ -236,7 +236,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, drm_printf(&p, "\nSOC Memory Information\n"); drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size); drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size); - drm_printf(&p, "visible vram size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size); + drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size); /* GDS Config */ drm_printf(&p, "\nGDS Config\n"); @@ -315,16 +315,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, } } - if (coredump->reset_vram_lost) + if (coredump->skip_vram_check) + drm_printf(&p, "VRAM lost check is skipped!\n"); + else if (coredump->reset_vram_lost) drm_printf(&p, "VRAM is lost due to GPU reset!\n"); - if (coredump->adev->reset_info.num_regs) { - drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); - - for (i = 0; i < coredump->adev->reset_info.num_regs; i++) - drm_printf(&p, "0x%08x: 0x%08x\n", - coredump->adev->reset_info.reset_dump_reg_list[i], - coredump->adev->reset_info.reset_dump_reg_value[i]); - } return count - iter.remain; } @@ -334,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data) kfree(data); } -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) +void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check, + bool vram_lost, struct amdgpu_job *job) { - struct amdgpu_coredump_info *coredump; struct drm_device *dev = adev_to_drm(adev); - struct amdgpu_job *job = reset_context->job; + struct amdgpu_coredump_info *coredump; struct drm_sched_job *s_job; coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); @@ -349,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, return; } + coredump->skip_vram_check = skip_vram_check; coredump->reset_vram_lost = vram_lost; - if (reset_context->job && reset_context->job->vm) { + if (job && job->vm) { + struct amdgpu_vm *vm = job->vm; struct amdgpu_task_info *ti; - struct amdgpu_vm *vm = reset_context->job->vm; ti = amdgpu_vm_get_task_info_vm(vm); if (ti) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h index 52459512cb2b11..ef9772c6bcc9e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h @@ -26,7 +26,6 @@ #define __AMDGPU_DEV_COREDUMP_H__ #include "amdgpu.h" -#include "amdgpu_reset.h" #ifdef CONFIG_DEV_COREDUMP @@ -36,12 +35,12 @@ struct amdgpu_coredump_info { struct amdgpu_device *adev; struct amdgpu_task_info reset_task_info; struct timespec64 reset_time; + bool skip_vram_check; bool reset_vram_lost; struct amdgpu_ring *ring; }; #endif -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context); - +void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check, + bool vram_lost, struct amdgpu_job *job); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bcacf2e35eba08..c2394c8b4d6b21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1916,6 +1916,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) */ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) { + int i; + if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); @@ -1970,6 +1972,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); + for (i = 0; i < MAX_XCP; i++) + adev->enforce_isolation[i] = !!enforce_isolation; + return 0; } @@ -2471,6 +2476,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; struct pci_dev *parent; int i, r; bool total; @@ -2608,7 +2614,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!total) return -ENODEV; - amdgpu_amdkfd_device_probe(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (ip_block->status.valid != false) + amdgpu_amdkfd_device_probe(adev); + adev->cg_flags &= amdgpu_cg_mask; adev->pg_flags &= amdgpu_pg_mask; @@ -3948,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) adev->ram_is_direct_mapped = true; } +#if defined(CONFIG_HSA_AMD_P2P) +/** + * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled. + * + * @adev: amdgpu_device pointer + * + * return if IOMMU remapping bar address + */ +static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(adev->dev); + if (domain && (domain->type == IOMMU_DOMAIN_DMA || + domain->type == IOMMU_DOMAIN_DMA_FQ)) + return true; + + return false; +} +#endif + static const struct attribute *amdgpu_dev_attributes[] = { &dev_attr_pcie_replay_count.attr, NULL @@ -4055,6 +4085,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->notifier_lock); mutex_init(&adev->pm.stable_pstate_ctx_lock); mutex_init(&adev->benchmark_mutex); + mutex_init(&adev->gfx.reset_sem_mutex); + /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */ + mutex_init(&adev->enforce_isolation_mutex); + mutex_init(&adev->gfx.kfd_sch_mutex); amdgpu_device_init_apu_flags(adev); @@ -4073,9 +4107,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->mm_stats.lock); spin_lock_init(&adev->wb.lock); - INIT_LIST_HEAD(&adev->shadow_list); - mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->reset_list); INIT_LIST_HEAD(&adev->ras_list); @@ -4086,6 +4117,21 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, amdgpu_device_delay_enable_gfx_off); + /* + * Initialize the enforce_isolation work structures for each XCP + * partition. This work handler is responsible for enforcing shader + * isolation on AMD GPUs. It counts the number of emitted fences for + * each GFX and compute ring. If there are any fences, it schedules + * the `enforce_isolation_work` to be run after a delay. If there are + * no fences, it signals the Kernel Fusion Driver (KFD) to resume the + * runqueue. + */ + for (i = 0; i < MAX_XCP; i++) { + INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, + amdgpu_gfx_enforce_isolation_handler); + adev->gfx.enforce_isolation[i].adev = adev; + adev->gfx.enforce_isolation[i].xcp_id = i; + } INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); @@ -4482,6 +4528,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) { dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); + + if (adev->mman.initialized) + drain_workqueue(adev->mman.bdev.wq); adev->shutdown = true; /* make sure IB test finished before entering exclusive mode @@ -4502,9 +4551,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->mman.initialized) - drain_workqueue(adev->mman.bdev.wq); - if (adev->pm.sysfs_initialized) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) @@ -4980,80 +5026,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_recover_vram - Recover some VRAM contents - * - * @adev: amdgpu_device pointer - * - * Restores the contents of VRAM buffers from the shadows in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * - * Returns: - * 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram(struct amdgpu_device *adev) -{ - struct dma_fence *fence = NULL, *next = NULL; - struct amdgpu_bo *shadow; - struct amdgpu_bo_vm *vmbo; - long r = 1, tmo; - - if (amdgpu_sriov_runtime(adev)) - tmo = msecs_to_jiffies(8000); - else - tmo = msecs_to_jiffies(100); - - dev_info(adev->dev, "recover vram bo from shadow start\n"); - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - /* If vm is compute context or adev is APU, shadow will be NULL */ - if (!vmbo->shadow) - continue; - shadow = vmbo->shadow; - - /* No need to recover an evicted BO */ - if (!shadow->tbo.resource || - shadow->tbo.resource->mem_type != TTM_PL_TT || - shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) - continue; - - r = amdgpu_bo_restore_shadow(shadow, &next); - if (r) - break; - - if (fence) { - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - fence = next; - if (tmo == 0) { - r = -ETIMEDOUT; - break; - } else if (tmo < 0) { - r = tmo; - break; - } - } else { - fence = next; - } - } - mutex_unlock(&adev->shadow_list_lock); - - if (fence) - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - - if (r < 0 || tmo <= 0) { - dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); - return -EIO; - } - - dev_info(adev->dev, "recover vram bo from shadow done\n"); - return 0; -} - - /** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * @@ -5116,12 +5088,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { + if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) amdgpu_inc_vram_lost(adev); - r = amdgpu_device_recover_vram(adev); - } - if (r) - return r; /* need to be called during full access so we can't do it later like * bare-metal does. @@ -5278,16 +5246,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, { int i, r = 0; struct amdgpu_job *job = NULL; + struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; bool need_full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); if (reset_context->reset_req_dev == adev) job = reset_context->job; - if (amdgpu_sriov_vf(adev)) { - /* stop the data exchange thread */ - amdgpu_virt_fini_data_exchange(adev); - } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_pre_reset(adev); amdgpu_fence_driver_isr_toggle(adev, true); @@ -5336,6 +5303,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, } } + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { + dev_info(tmp_adev->dev, "Dumping IP State\n"); + /* Trigger ip dump before we reset the asic */ + for (i = 0; i < tmp_adev->num_ip_blocks; i++) + if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) + tmp_adev->ip_blocks[i].version->funcs + ->dump_ip_state((void *)tmp_adev); + dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); + } + if (need_full_reset) r = amdgpu_device_ip_suspend(adev); if (need_full_reset) @@ -5348,47 +5325,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) -{ - int i; - - lockdep_assert_held(&adev->reset_domain->sem); - - for (i = 0; i < adev->reset_info.num_regs; i++) { - adev->reset_info.reset_dump_reg_value[i] = - RREG32(adev->reset_info.reset_dump_reg_list[i]); - - trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i], - adev->reset_info.reset_dump_reg_value[i]); - } - - return 0; -} - int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context) { struct amdgpu_device *tmp_adev = NULL; bool need_full_reset, skip_hw_reset, vram_lost = false; int r = 0; - uint32_t i; /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); - if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { - amdgpu_reset_reg_dumps(tmp_adev); - - dev_info(tmp_adev->dev, "Dumping IP State\n"); - /* Trigger ip dump before we reset the asic */ - for (i = 0; i < tmp_adev->num_ip_blocks; i++) - if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) - tmp_adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)tmp_adev); - dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); - } - reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); /* If reset handler not implemented, continue; otherwise return */ @@ -5461,7 +5408,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) - amdgpu_coredump(tmp_adev, vram_lost, reset_context); + amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); @@ -5513,7 +5460,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, * bad_page_threshold value to fix this once * probing driver again. */ - if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { + if (!amdgpu_ras_is_rma(tmp_adev)) { /* must succeed. */ amdgpu_ras_resume(tmp_adev); } else { @@ -5541,9 +5488,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, } } - if (!r) - r = amdgpu_device_recover_vram(tmp_adev); - else + if (r) tmp_adev->asic_reset_res = r; } @@ -5879,7 +5824,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched, true); + drm_sched_start(&ring->sched); } if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -5891,8 +5836,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, tmp_adev->asic_reset_res = 0; if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); + /* bad news, how to tell it to userspace ? + * for ras error, we should report GPU bad status instead of + * reset failure + */ + if (reset_context->src != AMDGPU_RESET_SRC_RAS || + !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", + atomic_read(&tmp_adev->gpu_reset_counter)); amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); @@ -6138,18 +6089,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev) { #ifdef CONFIG_HSA_AMD_P2P - uint64_t address_mask = peer_adev->dev->dma_mask ? - ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); - resource_size_t aper_limit = - adev->gmc.aper_base + adev->gmc.aper_size - 1; bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); - return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && - adev->gmc.real_vram_size == adev->gmc.visible_vram_size && - !(adev->gmc.aper_base & address_mask || - aper_limit & address_mask)); + bool is_large_bar = adev->gmc.visible_vram_size && + adev->gmc.real_vram_size == adev->gmc.visible_vram_size; + bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev); + + if (!p2p_addressable) { + uint64_t address_mask = peer_adev->dev->dma_mask ? + ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); + resource_size_t aper_limit = + adev->gmc.aper_base + adev->gmc.aper_size - 1; + + p2p_addressable = !(adev->gmc.aper_base & address_mask || + aper_limit & address_mask); + } + return pcie_p2p && is_large_bar && p2p_access && p2p_addressable; #else return false; #endif @@ -6374,7 +6331,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched, true); + drm_sched_start(&ring->sched); } amdgpu_device_unset_mp1_state(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 092ec11258cdd7..b119d27271c1a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -233,6 +233,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, } if (!adev->enable_virtual_display) { + new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev, new_abo->flags)); if (unlikely(r != 0)) { @@ -1474,7 +1475,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && - connector->display_info.is_hdmi && + connector && connector->display_info.is_hdmi && amdgpu_display_is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; @@ -1759,6 +1760,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) r = amdgpu_bo_reserve(aobj, true); if (r == 0) { + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); if (r != 0) dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 094498a0964b51..81d9877c87357d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -117,9 +117,10 @@ * - 3.56.0 - Update IB start address and size alignment for decode and encode * - 3.57.0 - Compute tunneling on GFX10+ * - 3.58.0 - Add GFX12 DCC support + * - 3.59.0 - Cleared VRAM */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 58 +#define KMS_DRIVER_MINOR 59 #define KMS_DRIVER_PATCHLEVEL 0 /* @@ -131,6 +132,7 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3), AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4), + AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5), }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -168,6 +170,16 @@ uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu; char *amdgpu_virtual_display; bool enforce_isolation; + +/* Specifies the default granularity for SVM, used in buffer + * migration and restoration of backing memory when handling + * recoverable page faults. + * + * The value is given as log(numPages(buffer)); for a 2 MiB + * buffer it computes to be 9 + */ +uint amdgpu_svm_default_granularity = 9; + /* * OverDrive(bit 14) disabled by default * GFX DCS(bit 19) disabled by default @@ -319,6 +331,13 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(msi, amdgpu_msi, int, 0444); +/** + * DOC: svm_default_granularity (uint) + * Used in buffer migration and handling of recoverable page faults + */ +MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB"); +module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644); + /** * DOC: lockup_timeout (string) * Set GPU scheduler timeout value in ms. @@ -2199,6 +2218,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: enable RAS ACA\n"); adev->debug_enable_ras_aca = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) { + pr_info("debug: enable experimental reset features\n"); + adev->debug_exp_resets = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2908,6 +2932,7 @@ static const struct file_operations amdgpu_driver_kms_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = drm_show_fdinfo, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) @@ -2953,7 +2978,6 @@ static const struct drm_driver amdgpu_kms_driver = { DRIVER_SYNCOBJ_TIMELINE, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, - .lastclose = amdgpu_driver_lastclose_kms, .ioctls = amdgpu_ioctls_kms, .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, @@ -2980,7 +3004,6 @@ const struct drm_driver amdgpu_partition_driver = { DRIVER_SYNCOBJ_TIMELINE, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, - .lastclose = amdgpu_driver_lastclose_kms, .ioctls = amdgpu_ioctls_kms, .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 8283d682f543b9..7cc980bf4725d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -55,8 +55,6 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev); void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 0e617dff8765e2..1a5df8b9466161 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -43,8 +43,6 @@ #include "amdgpu_hmm.h" #include "amdgpu_xgmi.h" -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; - static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -87,11 +85,11 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = { static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { - struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); - if (robj) { - amdgpu_hmm_unregister(robj); - amdgpu_bo_unref(&robj); + if (aobj) { + amdgpu_hmm_unregister(aobj); + ttm_bo_put(&aobj->tbo); } } @@ -126,7 +124,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bo = &ubo->bo; *obj = &bo->tbo.base; - (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } @@ -295,7 +292,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str return drm_gem_ttm_mmap(obj, vma); } -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { +const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, .close = amdgpu_gem_object_close, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index f30264782ba27b..3a8f57900a3aaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -33,6 +33,8 @@ #define AMDGPU_GEM_DOMAIN_MAX 0x3 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base) +extern const struct drm_gem_object_funcs amdgpu_gem_object_funcs; + unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1849510a308add..83e54697f0ee83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -24,10 +24,13 @@ */ #include +#include + #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "amdgpu_xcp.h" #include "amdgpu_xgmi.h" @@ -882,8 +885,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r int r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { - if (!amdgpu_persistent_edc_harvesting_supported(adev)) - amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + if (r) + return r; + } r = amdgpu_ras_block_late_init(adev, ras_block); if (r) @@ -1027,7 +1033,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_ pr_err("critical bug! too many kiq readers\n"); goto failed_unlock; } - amdgpu_ring_alloc(ring, 32); + r = amdgpu_ring_alloc(ring, 32); + if (r) + goto failed_unlock; + amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) @@ -1093,7 +1102,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3 } spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); + r = amdgpu_ring_alloc(ring, 32); + if (r) + goto failed_unlock; + amdgpu_ring_emit_wreg(ring, reg, v); r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) @@ -1129,6 +1141,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3 failed_undo: amdgpu_ring_undo(ring); +failed_unlock: spin_unlock_irqrestore(&kiq->ring_lock, flags); failed_kiq_write: dev_err(adev->dev, "failed to write reg:%x\n", reg); @@ -1381,6 +1394,217 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", supported_partition); } +static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct drm_gpu_scheduler *sched = &ring->sched; + struct drm_sched_entity entity; + struct dma_fence *f; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + int i, r; + + /* Initialize the scheduler entity */ + r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + if (r) { + dev_err(adev->dev, "Failed setting up GFX kernel entity.\n"); + goto err; + } + + r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL, + 64, 0, + &job); + if (r) + goto err; + + job->enforce_isolation = true; + + ib = &job->ibs[0]; + for (i = 0; i <= ring->funcs->align_mask; ++i) + ib->ptr[i] = ring->funcs->nop; + ib->length_dw = ring->funcs->align_mask + 1; + + f = amdgpu_job_submit(job); + + r = dma_fence_wait(f, false); + if (r) + goto err; + + dma_fence_put(f); + + /* Clean up the scheduler entity */ + drm_sched_entity_destroy(&entity); + return 0; + +err: + return r; +} + +static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id) +{ + int num_xcc = NUM_XCC(adev->gfx.xcc_mask); + struct amdgpu_ring *ring; + int num_xcc_to_clear; + int i, r, xcc_id; + + if (adev->gfx.num_xcc_per_xcp) + num_xcc_to_clear = adev->gfx.num_xcc_per_xcp; + else + num_xcc_to_clear = 1; + + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; + if ((ring->xcp_id == xcp_id) && ring->sched.ready) { + r = amdgpu_gfx_run_cleaner_shader_job(ring); + if (r) + return r; + num_xcc_to_clear--; + break; + } + } + } + + if (num_xcc_to_clear) + return -ENOENT; + + return 0; +} + +static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int ret; + long value; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + ret = kstrtol(buf, 0, &value); + + if (ret) + return -EINVAL; + + if (value < 0) + return -EINVAL; + + if (adev->xcp_mgr) { + if (value >= adev->xcp_mgr->num_xcps) + return -EINVAL; + } else { + if (value > 1) + return -EINVAL; + } + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + ret = amdgpu_gfx_run_cleaner_shader(adev, value); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + if (ret) + return ret; + + return count; +} + +static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int i; + ssize_t size = 0; + + if (adev->xcp_mgr) { + for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { + size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]); + if (i < (adev->xcp_mgr->num_xcps - 1)) + size += sysfs_emit_at(buf, size, " "); + } + buf[size++] = '\n'; + } else { + size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]); + } + + return size; +} + +static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + long partition_values[MAX_XCP] = {0}; + int ret, i, num_partitions; + const char *input_buf = buf; + + for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { + ret = sscanf(input_buf, "%ld", &partition_values[i]); + if (ret <= 0) + break; + + /* Move the pointer to the next value in the string */ + input_buf = strchr(input_buf, ' '); + if (input_buf) { + input_buf++; + } else { + i++; + break; + } + } + num_partitions = i; + + if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps) + return -EINVAL; + + if (!adev->xcp_mgr && num_partitions != 1) + return -EINVAL; + + for (i = 0; i < num_partitions; i++) { + if (partition_values[i] != 0 && partition_values[i] != 1) + return -EINVAL; + } + + mutex_lock(&adev->enforce_isolation_mutex); + + for (i = 0; i < num_partitions; i++) { + if (adev->enforce_isolation[i] && !partition_values[i]) { + /* Going from enabled to disabled */ + amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i)); + } else if (!adev->enforce_isolation[i] && partition_values[i]) { + /* Going from disabled to enabled */ + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); + } + adev->enforce_isolation[i] = partition_values[i]; + } + + mutex_unlock(&adev->enforce_isolation_mutex); + + return count; +} + +static DEVICE_ATTR(run_cleaner_shader, 0200, + NULL, amdgpu_gfx_set_run_cleaner_shader); + +static DEVICE_ATTR(enforce_isolation, 0644, + amdgpu_gfx_get_enforce_isolation, + amdgpu_gfx_set_enforce_isolation); + static DEVICE_ATTR(current_compute_partition, 0644, amdgpu_gfx_get_current_compute_partition, amdgpu_gfx_set_compute_partition); @@ -1406,3 +1630,229 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_current_compute_partition); device_remove_file(adev->dev, &dev_attr_available_compute_partition); } + +int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) +{ + int r; + + if (!amdgpu_sriov_vf(adev)) { + r = device_create_file(adev->dev, &dev_attr_enforce_isolation); + if (r) + return r; + } + + r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); + if (r) + return r; + + return 0; +} + +void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev)) + device_remove_file(adev->dev, &dev_attr_enforce_isolation); + device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); +} + +int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, + unsigned int cleaner_shader_size) +{ + if (!adev->gfx.enable_cleaner_shader) + return -EOPNOTSUPP; + + return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.cleaner_shader_obj, + &adev->gfx.cleaner_shader_gpu_addr, + (void **)&adev->gfx.cleaner_shader_cpu_ptr); +} + +void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev) +{ + if (!adev->gfx.enable_cleaner_shader) + return; + + amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj, + &adev->gfx.cleaner_shader_gpu_addr, + (void **)&adev->gfx.cleaner_shader_cpu_ptr); +} + +void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, + unsigned int cleaner_shader_size, + const void *cleaner_shader_ptr) +{ + if (!adev->gfx.enable_cleaner_shader) + return; + + if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr) + memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr, + cleaner_shader_size); +} + +/** + * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver) + * @adev: amdgpu_device pointer + * @idx: Index of the scheduler to control + * @enable: Whether to enable or disable the KFD scheduler + * + * This function is used to control the KFD (Kernel Fusion Driver) scheduler + * from the KGD. It is part of the cleaner shader feature. This function plays + * a key role in enforcing process isolation on the GPU. + * + * The function uses a reference count mechanism (kfd_sch_req_count) to keep + * track of the number of requests to enable the KFD scheduler. When a request + * to enable the KFD scheduler is made, the reference count is decremented. + * When the reference count reaches zero, a delayed work is scheduled to + * enforce isolation after a delay of GFX_SLICE_PERIOD. + * + * When a request to disable the KFD scheduler is made, the function first + * checks if the reference count is zero. If it is, it cancels the delayed work + * for enforcing isolation and checks if the KFD scheduler is active. If the + * KFD scheduler is active, it sends a request to stop the KFD scheduler and + * sets the KFD scheduler state to inactive. Then, it increments the reference + * count. + * + * The function is synchronized using the kfd_sch_mutex to ensure that the KFD + * scheduler state and reference count are updated atomically. + * + * Note: If the reference count is already zero when a request to enable the + * KFD scheduler is made, it means there's an imbalance bug somewhere. The + * function triggers a warning in this case. + */ +static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, + bool enable) +{ + mutex_lock(&adev->gfx.kfd_sch_mutex); + + if (enable) { + /* If the count is already 0, it means there's an imbalance bug somewhere. + * Note that the bug may be in a different caller than the one which triggers the + * WARN_ON_ONCE. + */ + if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) { + dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n"); + goto unlock; + } + + adev->gfx.kfd_sch_req_count[idx]--; + + if (adev->gfx.kfd_sch_req_count[idx] == 0 && + adev->gfx.kfd_sch_inactive[idx]) { + schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, + GFX_SLICE_PERIOD); + } + } else { + if (adev->gfx.kfd_sch_req_count[idx] == 0) { + cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); + if (!adev->gfx.kfd_sch_inactive[idx]) { + amdgpu_amdkfd_stop_sched(adev, idx); + adev->gfx.kfd_sch_inactive[idx] = true; + } + } + + adev->gfx.kfd_sch_req_count[idx]++; + } + +unlock: + mutex_unlock(&adev->gfx.kfd_sch_mutex); +} + +/** + * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation + * + * @work: work_struct. + * + * This function is the work handler for enforcing shader isolation on AMD GPUs. + * It counts the number of emitted fences for each GFX and compute ring. If there + * are any fences, it schedules the `enforce_isolation_work` to be run after a + * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion + * Driver (KFD) to resume the runqueue. The function is synchronized using the + * `enforce_isolation_mutex`. + */ +void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) +{ + struct amdgpu_isolation_work *isolation_work = + container_of(work, struct amdgpu_isolation_work, work.work); + struct amdgpu_device *adev = isolation_work->adev; + u32 i, idx, fences = 0; + + if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION) + idx = 0; + else + idx = isolation_work->xcp_id; + + if (idx >= MAX_XCP) + return; + + mutex_lock(&adev->enforce_isolation_mutex); + for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) { + if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id) + fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); + } + for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) { + if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id) + fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); + } + if (fences) { + schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, + GFX_SLICE_PERIOD); + } else { + /* Tell KFD to resume the runqueue */ + if (adev->kfd.init_complete) { + WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]); + WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]); + amdgpu_amdkfd_start_sched(adev, idx); + adev->gfx.kfd_sch_inactive[idx] = false; + } + } + mutex_unlock(&adev->enforce_isolation_mutex); +} + +void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 idx; + + if (!adev->gfx.enable_cleaner_shader) + return; + + if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION) + idx = 0; + else + idx = ring->xcp_id; + + if (idx >= MAX_XCP) + return; + + mutex_lock(&adev->enforce_isolation_mutex); + if (adev->enforce_isolation[idx]) { + if (adev->kfd.init_complete) + amdgpu_gfx_kfd_sch_ctrl(adev, idx, false); + } + mutex_unlock(&adev->enforce_isolation_mutex); +} + +void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 idx; + + if (!adev->gfx.enable_cleaner_shader) + return; + + if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION) + idx = 0; + else + idx = ring->xcp_id; + + if (idx >= MAX_XCP) + return; + + mutex_lock(&adev->enforce_isolation_mutex); + if (adev->enforce_isolation[idx]) { + if (adev->kfd.init_complete) + amdgpu_gfx_kfd_sch_ctrl(adev, idx, true); + } + mutex_unlock(&adev->enforce_isolation_mutex); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 56cc58edbb4e9a..5644e10a86a99c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -34,6 +34,7 @@ #include "soc15.h" #include "amdgpu_ras.h" #include "amdgpu_ring_mux.h" +#include "amdgpu_xcp.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L @@ -138,6 +139,10 @@ struct kiq_pm4_funcs { void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, uint16_t pasid, uint32_t flush_type, bool all_hub); + void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring, + uint32_t queue_type, uint32_t me_id, + uint32_t pipe_id, uint32_t queue_id, + uint32_t xcc_id, uint32_t vmid); /* Packet sizes */ int set_resources_size; int map_queues_size; @@ -345,6 +350,12 @@ struct amdgpu_me { DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); }; +struct amdgpu_isolation_work { + struct amdgpu_device *adev; + u32 xcp_id; + struct delayed_work work; +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gfx_config config; @@ -397,6 +408,7 @@ struct amdgpu_gfx { struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_inst_irq; + struct amdgpu_irq_src bad_op_irq; struct amdgpu_irq_src cp_ecc_error_irq; struct amdgpu_irq_src sq_irq; struct amdgpu_irq_src rlc_gc_fed_irq; @@ -445,6 +457,21 @@ struct amdgpu_gfx { uint32_t *ip_dump_core; uint32_t *ip_dump_compute_queues; uint32_t *ip_dump_gfx_queues; + + struct mutex reset_sem_mutex; + + /* cleaner shader */ + struct amdgpu_bo *cleaner_shader_obj; + unsigned int cleaner_shader_size; + u64 cleaner_shader_gpu_addr; + void *cleaner_shader_cpu_ptr; + const void *cleaner_shader_ptr; + bool enable_cleaner_shader; + struct amdgpu_isolation_work enforce_isolation[MAX_XCP]; + /* Mutex for synchronizing KFD scheduler operations */ + struct mutex kfd_sch_mutex; + u64 kfd_sch_req_count[MAX_XCP]; + bool kfd_sch_inactive[MAX_XCP]; }; struct amdgpu_gfx_ras_reg_entry { @@ -546,6 +573,17 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, void *ras_error_status, void (*func)(struct amdgpu_device *adev, void *ras_error_status, int xcc_id)); +int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, + unsigned int cleaner_shader_size); +void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); +void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, + unsigned int cleaner_shader_size, + const void *cleaner_shader_ptr); +int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev); +void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev); +void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); +void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h index 103a837ccc712b..c7b44aeb671b09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h @@ -38,8 +38,6 @@ struct amdgpu_gfxhub_funcs { void (*mode2_save_regs)(struct amdgpu_device *adev); void (*mode2_restore_regs)(struct amdgpu_device *adev); void (*halt)(struct amdgpu_device *adev); - bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, - int xcc_id); }; struct amdgpu_gfxhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index b49b3650fd6217..17a19d49d30a57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -786,7 +786,8 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev, goto failed_kiq; might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY && + !amdgpu_reset_pending(adev->reset_domain)) { msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b6a8bddada4c30..92d27d32de41ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (amdgpu_vmid_uses_reserved(vm, vmhub)) { + if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -476,15 +476,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID + * @adev: amdgpu_device pointer * @vm: the VM to check * @vmhub: the VMHUB which will be used * * Returns: True if the VM will use a reserved VMID. */ -bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) +bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, unsigned int vmhub) { return vm->reserved_vmid[vmhub] || - (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0))); + (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ? + vm->root.bo->xcp_id : 0] && + AMDGPU_IS_GFXHUB(vmhub)); } int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, @@ -600,9 +604,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) } } /* alloc a default reserved vmid to enforce isolation */ - if (enforce_isolation) - amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); - + for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { + if (adev->enforce_isolation[i]) + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 240fa675126029..4012fb2dd08a59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -78,7 +78,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); -bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub); +bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm, unsigned int vmhub); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h index 44e2ea8c972826..b03664c66dd617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h @@ -49,6 +49,7 @@ struct amdgpu_isp { const struct isp_funcs *funcs; struct mfd_cell *isp_cell; struct resource *isp_res; + struct resource *isp_i2c_res; struct isp_platform_data *isp_pdata; unsigned int harvest_config; const struct firmware *fw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 908e1345515233..16f2605ac50b99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -30,6 +30,60 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_reset.h" +#include "amdgpu_dev_coredump.h" +#include "amdgpu_xgmi.h" + +static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + int i; + + dev_info(adev->dev, "Dumping IP State\n"); + for (i = 0; i < adev->num_ip_blocks; i++) + if (adev->ip_blocks[i].version->funcs->dump_ip_state) + adev->ip_blocks[i].version->funcs + ->dump_ip_state((void *)adev); + dev_info(adev->dev, "Dumping IP State Completed\n"); + + amdgpu_coredump(adev, true, false, job); +} + +static void amdgpu_job_core_dump(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + struct list_head device_list, *device_list_handle = NULL; + struct amdgpu_device *tmp_adev = NULL; + struct amdgpu_hive_info *hive = NULL; + + if (!amdgpu_sriov_vf(adev)) + hive = amdgpu_get_xgmi_hive(adev); + if (hive) + mutex_lock(&hive->hive_lock); + /* + * Reuse the logic in amdgpu_device_gpu_recover() to build list of + * devices for code dump + */ + INIT_LIST_HEAD(&device_list); + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_add_tail(&tmp_adev->reset_list, &device_list); + if (!list_is_first(&adev->reset_list, &device_list)) + list_rotate_to_front(&adev->reset_list, &device_list); + device_list_handle = &device_list; + } else { + list_add_tail(&adev->reset_list, &device_list); + device_list_handle = &device_list; + } + + /* Do the coredump for each device */ + list_for_each_entry(tmp_adev, device_list_handle, reset_list) + amdgpu_job_do_core_dump(tmp_adev, job); + + if (hive) { + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + } +} static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { @@ -48,9 +102,17 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) return DRM_GPU_SCHED_STAT_ENODEV; } - adev->job_hang = true; + /* + * Do the coredump immediately after a job timeout to get a very + * close dump/snapshot/representation of GPU's current error status + * Skip it for SRIOV, since VF FLR will be triggered by host driver + * before job timeout + */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_job_core_dump(adev, job); + if (amdgpu_gpu_recovery && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { dev_err(adev->dev, "ring %s timeout, but soft recovered\n", @@ -72,6 +134,26 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) dma_fence_set_error(&s_job->s_fence->finished, -ETIME); + /* attempt a per ring reset */ + if (amdgpu_gpu_recovery && + ring->funcs->reset) { + /* stop the scheduler, but don't mess with the + * bad job yet because if ring reset fails + * we'll fall back to full GPU reset. + */ + drm_sched_wqueue_stop(&ring->sched); + r = amdgpu_ring_reset(ring, job->vmid); + if (!r) { + if (amdgpu_ring_sched_ready(ring)) + drm_sched_stop(&ring->sched, s_job); + atomic_inc(&ring->adev->gpu_reset_counter); + amdgpu_fence_driver_force_completion(ring); + if (amdgpu_ring_sched_ready(ring)) + drm_sched_start(&ring->sched); + goto exit; + } + } + if (amdgpu_device_should_recover_gpu(ring->adev)) { struct amdgpu_reset_context reset_context; memset(&reset_context, 0, sizeof(reset_context)); @@ -81,6 +163,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) reset_context.src = AMDGPU_RESET_SRC_JOB; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + /* + * To avoid an unnecessary extra coredump, as we have already + * got the very close representation of GPU's error status + */ + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); + r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); if (r) dev_err(adev->dev, "GPU Recovery Failed: %d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index a963a25ddd6209..ce6b9ba967fff0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -76,6 +76,9 @@ struct amdgpu_job { /* job_run_counter >= 1 means a resubmit job */ uint32_t job_run_counter; + /* enforce isolation */ + bool enforce_isolation; + uint32_t num_ibs; struct amdgpu_ib ibs[]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 66782be5917b9e..016a6f6c4267b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -43,6 +43,7 @@ #include "amdgpu_gem.h" #include "amdgpu_display.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "amd_pcie.h" void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) @@ -778,6 +779,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { + int ret = 0; unsigned int n, alloc_size; uint32_t *regs; unsigned int se_num = (info->read_mmr_reg.instance >> @@ -787,24 +789,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_MASK; + if (!down_read_trylock(&adev->reset_domain->sem)) + return -ENOENT; + /* set full masks if the userspace set all bits * in the bitfields */ - if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) + if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) { se_num = 0xffffffff; - else if (se_num >= AMDGPU_GFX_MAX_SE) - return -EINVAL; - if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) + } else if (se_num >= AMDGPU_GFX_MAX_SE) { + ret = -EINVAL; + goto out; + } + + if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) { sh_num = 0xffffffff; - else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) - return -EINVAL; + } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) { + ret = -EINVAL; + goto out; + } - if (info->read_mmr_reg.count > 128) - return -EINVAL; + if (info->read_mmr_reg.count > 128) { + ret = -EINVAL; + goto out; + } regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); - if (!regs) - return -ENOMEM; + if (!regs) { + ret = -ENOMEM; + goto out; + } + alloc_size = info->read_mmr_reg.count * sizeof(*regs); amdgpu_gfx_off_ctrl(adev, false); @@ -816,13 +831,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) info->read_mmr_reg.dword_offset + i); kfree(regs); amdgpu_gfx_off_ctrl(adev, true); - return -EFAULT; + ret = -EFAULT; + goto out; } } amdgpu_gfx_off_ctrl(adev, true); n = copy_to_user(out, regs, min(size, alloc_size)); kfree(regs); - return n ? -EFAULT : 0; + ret = (n ? -EFAULT : 0); +out: + up_read(&adev->reset_domain->sem); + return ret; } case AMDGPU_INFO_DEV_INFO: { struct drm_amdgpu_info_device *dev_info; @@ -1269,23 +1288,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return 0; } - -/* - * Outdated mess for old drm with Xorg being in charge (void function now). - */ -/** - * amdgpu_driver_lastclose_kms - drm callback for last close - * - * @dev: drm dev pointer - * - * Switch vga_switcheroo state after last close (all asics). - */ -void amdgpu_driver_lastclose_kms(struct drm_device *dev) -{ - drm_fb_helper_lastclose(dev); - vga_switcheroo_process_delayed_switch(); -} - /** * amdgpu_driver_open_kms - drm callback for open * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 2542bd7aa7c77f..18ee60378727fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -396,7 +396,6 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, struct ras_err_data *err_data) { - struct ras_err_addr err_addr; struct amdgpu_smuio_mcm_config_info mcm_info; struct mca_bank_node *node, *tmp; struct mca_bank_entry *entry; @@ -421,27 +420,20 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r continue; memset(&mcm_info, 0, sizeof(mcm_info)); - memset(&err_addr, 0, sizeof(err_addr)); mcm_info.socket_id = entry->info.socket_id; mcm_info.die_id = entry->info.aid; - if (blk == AMDGPU_RAS_BLOCK__UMC) { - err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS]; - err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID]; - err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR]; - } - if (type == AMDGPU_MCA_ERROR_TYPE_UE) { amdgpu_ras_error_statistic_ue_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + &mcm_info, (uint64_t)count); } else { if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS])) amdgpu_ras_error_statistic_de_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + &mcm_info, (uint64_t)count); else amdgpu_ras_error_statistic_ce_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + &mcm_info, (uint64_t)count); } amdgpu_mca_bank_set_remove_node(mca_set, node); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 1cb1ec7beefed3..10b61ff63802ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -501,60 +501,50 @@ int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) int amdgpu_mes_suspend(struct amdgpu_device *adev) { - struct idr *idp; - struct amdgpu_mes_process *process; - struct amdgpu_mes_gang *gang; struct mes_suspend_gang_input input; - int r, pasid; + int r; + + if (!amdgpu_mes_suspend_resume_all_supported(adev)) + return 0; + + memset(&input, 0x0, sizeof(struct mes_suspend_gang_input)); + input.suspend_all_gangs = 1; /* * Avoid taking any other locks under MES lock to avoid circular * lock dependencies. */ amdgpu_mes_lock(&adev->mes); - - idp = &adev->mes.pasid_idr; - - idr_for_each_entry(idp, process, pasid) { - list_for_each_entry(gang, &process->gang_list, list) { - r = adev->mes.funcs->suspend_gang(&adev->mes, &input); - if (r) - DRM_ERROR("failed to suspend pasid %d gangid %d", - pasid, gang->gang_id); - } - } - + r = adev->mes.funcs->suspend_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); - return 0; + if (r) + DRM_ERROR("failed to suspend all gangs"); + + return r; } int amdgpu_mes_resume(struct amdgpu_device *adev) { - struct idr *idp; - struct amdgpu_mes_process *process; - struct amdgpu_mes_gang *gang; struct mes_resume_gang_input input; - int r, pasid; + int r; + + if (!amdgpu_mes_suspend_resume_all_supported(adev)) + return 0; + + memset(&input, 0x0, sizeof(struct mes_resume_gang_input)); + input.resume_all_gangs = 1; /* * Avoid taking any other locks under MES lock to avoid circular * lock dependencies. */ amdgpu_mes_lock(&adev->mes); - - idp = &adev->mes.pasid_idr; - - idr_for_each_entry(idp, process, pasid) { - list_for_each_entry(gang, &process->gang_list, list) { - r = adev->mes.funcs->resume_gang(&adev->mes, &input); - if (r) - DRM_ERROR("failed to resume pasid %d gangid %d", - pasid, gang->gang_id); - } - } - + r = adev->mes.funcs->resume_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); - return 0; + if (r) + DRM_ERROR("failed to resume all gangs"); + + return r; } static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, @@ -793,6 +783,68 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) return 0; } +int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id) +{ + unsigned long flags; + struct amdgpu_mes_queue *queue; + struct amdgpu_mes_gang *gang; + struct mes_reset_queue_input queue_input; + int r; + + /* + * Avoid taking any other locks under MES lock to avoid circular + * lock dependencies. + */ + amdgpu_mes_lock(&adev->mes); + + /* remove the mes gang from idr list */ + spin_lock_irqsave(&adev->mes.queue_id_lock, flags); + + queue = idr_find(&adev->mes.queue_id_idr, queue_id); + if (!queue) { + spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); + amdgpu_mes_unlock(&adev->mes); + DRM_ERROR("queue id %d doesn't exist\n", queue_id); + return -EINVAL; + } + spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); + + DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n", + queue->doorbell_off); + + gang = queue->gang; + queue_input.doorbell_offset = queue->doorbell_off; + queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; + + r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); + if (r) + DRM_ERROR("failed to reset hardware queue, queue id = %d\n", + queue_id); + + amdgpu_mes_unlock(&adev->mes); + + return 0; +} + +int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, + int me_id, int pipe_id, int queue_id, int vmid) +{ + struct mes_reset_queue_input queue_input; + int r; + + queue_input.queue_type = queue_type; + queue_input.use_mmio = true; + queue_input.me_id = me_id; + queue_input.pipe_id = pipe_id; + queue_input.queue_id = queue_id; + queue_input.vmid = vmid; + r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); + if (r) + DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n", + queue_id); + return r; +} + int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring) { @@ -838,6 +890,33 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, return r; } +int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int vmid, + bool use_mmio) +{ + struct mes_reset_legacy_queue_input queue_input; + int r; + + memset(&queue_input, 0, sizeof(queue_input)); + + queue_input.queue_type = ring->funcs->type; + queue_input.doorbell_offset = ring->doorbell_index; + queue_input.me_id = ring->me; + queue_input.pipe_id = ring->pipe; + queue_input.queue_id = ring->queue; + queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + queue_input.wptr_addr = ring->wptr_gpu_addr; + queue_input.vmid = vmid; + queue_input.use_mmio = use_mmio; + + r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); + if (r) + DRM_ERROR("failed to reset legacy queue\n"); + + return r; +} + uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) { struct mes_misc_op_input op_input; @@ -1533,7 +1612,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); } - r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); + r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name); if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix); r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], @@ -1584,6 +1663,19 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) return r; } +bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) +{ + uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; + bool is_supported = false; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && + amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && + mes_rev >= 0x63) + is_supported = true; + + return is_supported; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index bcce1add4ef68d..96788c0f42f1be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -249,6 +249,18 @@ struct mes_remove_queue_input { uint64_t gang_context_addr; }; +struct mes_reset_queue_input { + uint32_t doorbell_offset; + uint64_t gang_context_addr; + bool use_mmio; + uint32_t queue_type; + uint32_t me_id; + uint32_t pipe_id; + uint32_t queue_id; + uint32_t xcc_id; + uint32_t vmid; +}; + struct mes_map_legacy_queue_input { uint32_t queue_type; uint32_t doorbell_offset; @@ -280,6 +292,18 @@ struct mes_resume_gang_input { uint64_t gang_context_addr; }; +struct mes_reset_legacy_queue_input { + uint32_t queue_type; + uint32_t doorbell_offset; + bool use_mmio; + uint32_t me_id; + uint32_t pipe_id; + uint32_t queue_id; + uint64_t mqd_addr; + uint64_t wptr_addr; + uint32_t vmid; +}; + enum mes_misc_opcode { MES_MISC_OP_WRITE_REG, MES_MISC_OP_READ_REG, @@ -348,6 +372,12 @@ struct amdgpu_mes_funcs { int (*misc_op)(struct amdgpu_mes *mes, struct mes_misc_op_input *input); + + int (*reset_legacy_queue)(struct amdgpu_mes *mes, + struct mes_reset_legacy_queue_input *input); + + int (*reset_hw_queue)(struct amdgpu_mes *mes, + struct mes_reset_queue_input *input); }; #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev)) @@ -375,6 +405,9 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, struct amdgpu_mes_queue_properties *qprops, int *queue_id); int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id); +int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id); +int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, + int me_id, int pipe_id, int queue_id, int vmid); int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); @@ -382,6 +415,10 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring, enum amdgpu_unmap_queues_action action, u64 gpu_addr, u64 seq); +int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int vmid, + bool use_mmio); uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg); int amdgpu_mes_wreg(struct amdgpu_device *adev, @@ -479,4 +516,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes) memalloc_noreclaim_restore(mes->saved_flags); mutex_unlock(&mes->mutex_hidden); } + +bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev); #endif /* __AMDGPU_MES_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 95d676ee207f3f..1ca9d4ed8063a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -63,8 +63,6 @@ struct amdgpu_mmhub_funcs { uint64_t page_table_base); void (*update_power_gating)(struct amdgpu_device *adev, bool enable); - bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, - int hub_inst); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index d002b845d8acc2..5e3faefc551091 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -51,6 +51,7 @@ struct amdgpu_encoder; struct amdgpu_router; struct amdgpu_hpd; struct edid; +struct drm_edid; #define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base) #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) @@ -326,8 +327,7 @@ struct amdgpu_mode_info { /* FMT dithering */ struct drm_property *dither_property; /* hardcoded DFP edid from BIOS */ - struct edid *bios_hardcoded_edid; - int bios_hardcoded_edid_size; + const struct drm_edid *bios_hardcoded_edid; /* firmware flags */ u32 firmware_flags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e32161f6b67a37..44819cdba7fbe8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -77,24 +77,6 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_destroy(tbo); } -static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; - struct amdgpu_bo_vm *vmbo; - - bo = shadow_bo->parent; - vmbo = to_amdgpu_bo_vm(bo); - /* in case amdgpu_device_recover_vram got NULL of bo->parent */ - if (!list_empty(&vmbo->shadow_list)) { - mutex_lock(&adev->shadow_list_lock); - list_del_init(&vmbo->shadow_list); - mutex_unlock(&adev->shadow_list_lock); - } - - amdgpu_bo_destroy(tbo); -} - /** * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo * @bo: buffer object to be checked @@ -108,8 +90,7 @@ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { if (bo->destroy == &amdgpu_bo_destroy || - bo->destroy == &amdgpu_bo_user_destroy || - bo->destroy == &amdgpu_bo_vm_destroy) + bo->destroy == &amdgpu_bo_user_destroy) return true; return false; @@ -583,6 +564,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); + bo->tbo.base.funcs = &amdgpu_gem_object_funcs; bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; @@ -722,52 +704,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, return r; } -/** - * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list - * - * @vmbo: BO that will be inserted into the shadow list - * - * Insert a BO to the shadow list. - */ -void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); - - mutex_lock(&adev->shadow_list_lock); - list_add_tail(&vmbo->shadow_list, &adev->shadow_list); - vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); - vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; - mutex_unlock(&adev->shadow_list_lock); -} - -/** - * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow - * - * @shadow: &amdgpu_bo shadow to be restored - * @fence: dma_fence associated with the operation - * - * Copies a buffer object's shadow content back to the object. - * This is used for recovering a buffer from its shadow in case of a gpu - * reset where vram context may be lost. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) - -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - uint64_t shadow_addr, parent_addr; - - shadow_addr = amdgpu_bo_gpu_offset(shadow); - parent_addr = amdgpu_bo_gpu_offset(shadow->parent); - - return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, - amdgpu_bo_size(shadow), NULL, fence, - true, false, 0); -} - /** * amdgpu_bo_kmap - map an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be mapped @@ -851,7 +787,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) if (bo == NULL) return NULL; - ttm_bo_get(&bo->tbo); + drm_gem_object_get(&bo->tbo.base); return bo; } @@ -863,40 +799,30 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) */ void amdgpu_bo_unref(struct amdgpu_bo **bo) { - struct ttm_buffer_object *tbo; - if ((*bo) == NULL) return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); + drm_gem_object_put(&(*bo)->tbo.base); *bo = NULL; } /** - * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object + * amdgpu_bo_pin - pin an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be pinned * @domain: domain to be pinned to - * @min_offset: the start of requested address range - * @max_offset: the end of requested address range * - * Pins the buffer object according to requested domain and address range. If - * the memory is unbound gart memory, binds the pages into gart table. Adjusts - * pin_count and pin_size accordingly. + * Pins the buffer object according to requested domain. If the memory is + * unbound gart memory, binds the pages into gart table. Adjusts pin_count and + * pin_size accordingly. * * Pinning means to lock pages in memory along with keeping them at a fixed * offset. It is required when a buffer can not be moved, for example, when * a display buffer is being scanned out. * - * Compared with amdgpu_bo_pin(), this function gives more flexibility on - * where to pin a buffer if there are specific restrictions on where a buffer - * must be located. - * * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - u64 min_offset, u64 max_offset) +int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_operation_ctx ctx = { false, false }; @@ -905,9 +831,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; - if (WARN_ON_ONCE(min_offset > max_offset)) - return -EINVAL; - /* Check domain to be pinned to against preferred domains */ if (bo->preferred_domains & domain) domain = bo->preferred_domains & domain; @@ -933,14 +856,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; ttm_bo_pin(&bo->tbo); - - if (max_offset != 0) { - u64 domain_start = amdgpu_ttm_domain_start(adev, - mem_type); - WARN_ON_ONCE(max_offset < - (amdgpu_bo_gpu_offset(bo) - domain_start)); - } - return 0; } @@ -957,17 +872,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; amdgpu_bo_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { - unsigned int fpfn, lpfn; - - fpfn = min_offset >> PAGE_SHIFT; - lpfn = max_offset >> PAGE_SHIFT; - - if (fpfn > bo->placements[i].fpfn) - bo->placements[i].fpfn = fpfn; - if (!bo->placements[i].lpfn || - (lpfn && lpfn < bo->placements[i].lpfn)) - bo->placements[i].lpfn = lpfn; - if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && bo->placements[i].mem_type == TTM_PL_VRAM) bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; @@ -993,24 +897,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return r; } -/** - * amdgpu_bo_pin - pin an &amdgpu_bo buffer object - * @bo: &amdgpu_bo buffer object to be pinned - * @domain: domain to be pinned to - * - * A simple wrapper to amdgpu_bo_pin_restricted(). - * Provides a simpler API for buffers that do not have any strict restrictions - * on where a buffer must be located. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) -{ - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - return amdgpu_bo_pin_restricted(bo, domain, 0, 0); -} - /** * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be unpinned diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index bc42ccbde659ac..717e47b46167a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -90,6 +90,12 @@ struct amdgpu_bo_va { bool cleared; bool is_xgmi; + + /* + * protected by vm reservation lock + * if non-zero, cannot unmap from GPU because user queues may still access it + */ + unsigned int queue_refcount; }; struct amdgpu_bo { @@ -130,8 +136,6 @@ struct amdgpu_bo_user { struct amdgpu_bo_vm { struct amdgpu_bo bo; - struct amdgpu_bo *shadow; - struct list_head shadow_list; struct amdgpu_vm_bo_base entries[]; }; @@ -269,22 +273,6 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; } -/** - * amdgpu_bo_shadowed - check if the BO is shadowed - * - * @bo: BO to be tested. - * - * Returns: - * NULL if not shadowed or else return a BO pointer. - */ -static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) -{ - if (bo->tbo.type == ttm_bo_type_kernel) - return to_amdgpu_bo_vm(bo)->shadow; - - return NULL; -} - bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); @@ -316,8 +304,6 @@ void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); void amdgpu_bo_unref(struct amdgpu_bo **bo); int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); -int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - u64 min_offset, u64 max_offset); void amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); @@ -343,9 +329,6 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats); -void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); -int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, - struct dma_fence **fence); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c index 0bb2466d539a91..675aa138ea1125 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c @@ -94,7 +94,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n ref_div_max = min(128 / post_div, ref_div_max); /* get matching reference and feedback divider */ - *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); + *ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); /* limit fb divider to its maximum */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 189574d53ebd30..0b28b2cf1517d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2853,7 +2853,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp) if (ret) return ret; - /* Start rlc autoload after psp recieved all the gfx firmware */ + /* Start rlc autoload after psp received all the gfx firmware */ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload_start(psp); @@ -3425,9 +3425,11 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; - int err = 0; + const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; + int fw_index, fw_bin_count, start_index = 0; + const struct psp_fw_bin_desc *fw_bin; uint8_t *ucode_array_start_addr; - int fw_index = 0; + int err = 0; err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); if (err) @@ -3478,15 +3480,30 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) case 2: sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; - if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { + fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); + + if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); err = -EINVAL; goto out; } - for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { - err = parse_sos_bin_descriptor(psp, - &sos_hdr_v2_0->psp_fw_bin[fw_index], + if (sos_hdr_v2_0->header.header_version_minor == 1) { + sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; + + fw_bin = sos_hdr_v2_1->psp_fw_bin; + + if (psp_is_aux_sos_load_required(psp)) + start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); + else + fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); + + } else { + fw_bin = sos_hdr_v2_0->psp_fw_bin; + } + + for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { + err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, sos_hdr_v2_0); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 74a96516c91387..e8abbbcb432662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -138,6 +138,7 @@ struct psp_funcs { int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); + bool (*is_aux_sos_load_required)(struct psp_context *psp); }; struct ta_funcs { @@ -464,6 +465,9 @@ struct amdgpu_psp_funcs { ((psp)->funcs->fatal_error_recovery_quirk ? \ (psp)->funcs->fatal_error_recovery_quirk((psp)) : 0) +#define psp_is_aux_sos_load_required(psp) \ + ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index d0307c55da5092..1a1395c5fff15a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -882,7 +882,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret) return ret; - /* gfx block ras dsiable cmd must send to ras-ta */ + /* gfx block ras disable cmd must send to ras-ta */ if (head->block == AMDGPU_RAS_BLOCK__GFX) con->features |= BIT(head->block); @@ -1223,11 +1223,11 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; amdgpu_ras_error_statistic_de_count(&obj->err_data, - &err_info->mcm_info, NULL, err_info->de_count); + &err_info->mcm_info, err_info->de_count); amdgpu_ras_error_statistic_ce_count(&obj->err_data, - &err_info->mcm_info, NULL, err_info->ce_count); + &err_info->mcm_info, err_info->ce_count); amdgpu_ras_error_statistic_ue_count(&obj->err_data, - &err_info->mcm_info, NULL, err_info->ue_count); + &err_info->mcm_info, err_info->ue_count); } } else { /* for legacy asic path which doesn't has error source info */ @@ -2153,7 +2153,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * /* gpu reset is fallback for failed and default cases. * For RMA case, amdgpu_umc_poison_handler will handle gpu reset. */ - if (poison_stat && !con->is_rma) { + if (poison_stat && !amdgpu_ras_is_rma(adev)) { event_id = amdgpu_ras_acquire_event_id(adev, type); RAS_EVENT_LOG(adev, event_id, "GPU reset for %s RAS poison consumption is issued!\n", @@ -2881,9 +2881,6 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) { mutex_init(&ecc_log->lock); - /* Set any value as siphash key */ - memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key)); - INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); ecc_log->de_queried_count = 0; ecc_log->prev_de_queried_count = 0; @@ -2948,7 +2945,7 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) amdgpu_ras_error_data_fini(&err_data); - if (err_cnt && con->is_rma) + if (err_cnt && amdgpu_ras_is_rma(adev)) amdgpu_ras_reset_gpu(adev); amdgpu_ras_schedule_retirement_dwork(con, @@ -3049,7 +3046,7 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, } /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */ - if (reset_flags && !con->is_rma) { + if (reset_flags && !amdgpu_ras_is_rma(adev)) { if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) @@ -3195,7 +3192,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) * This calling fails when is_rma is true or * ret != 0. */ - if (con->is_rma || ret) + if (amdgpu_ras_is_rma(adev) || ret) goto free; if (con->eeprom_control.ras_num_recs) { @@ -3244,7 +3241,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) * Except error threshold exceeding case, other failure cases in this * function would not fail amdgpu driver init. */ - if (!con->is_rma) + if (!amdgpu_ras_is_rma(adev)) ret = 0; else ret = -EINVAL; @@ -3471,6 +3468,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) /* aca is disabled by default */ adev->aca.is_enabled = false; + + /* bad page feature is not applicable to specific app platform */ + if (adev->gmc.is_app_apu && + amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0)) + amdgpu_bad_page_threshold = 0; } static void amdgpu_ras_counte_dw(struct work_struct *work) @@ -4287,7 +4289,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); /* mode1 is the only selection for RMA status */ - if (ras->is_rma) { + if (amdgpu_ras_is_rma(adev)) { ras->gpu_reset_flags = 0; ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; } @@ -4611,8 +4613,6 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d if (!err_node) return NULL; - INIT_LIST_HEAD(&err_node->err_info.err_addr_list); - memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); err_data->err_list_count++; @@ -4622,21 +4622,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d return &err_node->err_info; } -void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr) -{ - /* This function will be retired. */ - return; -} - -void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr) -{ - list_del(&mca_err_addr->node); - kfree(mca_err_addr); -} - int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count) { struct ras_err_info *err_info; @@ -4650,9 +4638,6 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, if (!err_info) return -EINVAL; - if (err_addr && err_addr->err_status) - amdgpu_ras_add_mca_err_addr(err_info, err_addr); - err_info->ue_count += count; err_data->ue_count += count; @@ -4660,8 +4645,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, } int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count) { struct ras_err_info *err_info; @@ -4682,8 +4667,8 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, } int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count) { struct ras_err_info *err_info; @@ -4697,9 +4682,6 @@ int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, if (!err_info) return -EINVAL; - if (err_addr && err_addr->err_status) - amdgpu_ras_add_mca_err_addr(err_info, err_addr); - err_info->de_count += count; err_data->de_count += count; @@ -4771,6 +4753,16 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n", socket_id, aid_id, hbm_id, fw_status); + + if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error)) + dev_info(adev->dev, + "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n", + socket_id, aid_id, fw_status); + + if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error)) + dev_info(adev->dev, + "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n", + socket_id, aid_id, fw_status); } static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev, @@ -4837,3 +4829,13 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, va_end(args); } + +bool amdgpu_ras_is_rma(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return false; + + return con->is_rma; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index dcf1f3dbb5c43e..669720a9c60afd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -28,7 +28,6 @@ #include #include #include -#include #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" #include "amdgpu_smuio.h" @@ -47,6 +46,8 @@ struct amdgpu_iv_entry; #define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8) #define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11) #define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13) +#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29) +#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30) #define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100 #define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA @@ -476,16 +477,15 @@ struct ras_err_pages { }; struct ras_ecc_err { - u64 hash_index; uint64_t status; uint64_t ipid; uint64_t addr; + uint64_t pa_pfn; struct ras_err_pages err_pages; }; struct ras_ecc_log_info { struct mutex lock; - siphash_key_t ecc_key; struct radix_tree_root de_page_tree; uint64_t de_queried_count; uint64_t prev_de_queried_count; @@ -572,19 +572,11 @@ struct ras_fs_data { char debugfs_name[32]; }; -struct ras_err_addr { - struct list_head node; - uint64_t err_status; - uint64_t err_ipid; - uint64_t err_addr; -}; - struct ras_err_info { struct amdgpu_smuio_mcm_config_info mcm_info; u64 ce_count; u64 ue_count; u64 de_count; - struct list_head err_addr_list; }; struct ras_err_node { @@ -941,14 +933,14 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, int amdgpu_ras_error_data_init(struct ras_err_data *err_data); void amdgpu_ras_error_data_fini(struct ras_err_data *err_data); int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count); int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count); int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + u64 count); void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances); int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, const struct aca_info *aca_info, void *data); @@ -957,12 +949,6 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, struct aca_handle *handle, char *buf, void *data); -void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, - struct ras_err_addr *err_addr); - -void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, - struct ras_err_addr *mca_err_addr); - void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status); bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev); @@ -982,4 +968,5 @@ __printf(3, 4) void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, const char *fmt, ...); +bool amdgpu_ras_is_rma(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index aab8077e50988e..f28f6b4ba765d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -58,7 +58,7 @@ #define EEPROM_I2C_MADDR_4 0x40000 /* - * The 2 macros bellow represent the actual size in bytes that + * The 2 macros below represent the actual size in bytes that * those entities occupy in the EEPROM memory. * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which * uses uint64 to store 6b fields such as retired_page. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 4ae581f3fcb54b..1cb920abc2fe9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -136,6 +136,12 @@ static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *doma return queue_work(domain->wq, work); } +static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain) +{ + lockdep_assert_held(&domain->sem); + return rwsem_is_contended(&domain->sem); +} + void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain); void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index e6344a6b0a9f6a..690976665cf699 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -144,8 +144,10 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) /* We pad to match fetch size */ count = ring->funcs->align_mask + 1 - (ring->wptr & ring->funcs->align_mask); - count %= ring->funcs->align_mask + 1; - ring->funcs->insert_nop(ring, count); + count &= ring->funcs->align_mask; + + if (count != 0) + ring->funcs->insert_nop(ring, count); mb(); amdgpu_ring_set_wptr(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 582053f1cd5655..f93f5100220182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -235,6 +235,8 @@ struct amdgpu_ring_funcs { void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); + int (*reset)(struct amdgpu_ring *ring, unsigned int vmid); + void (*emit_cleaner_shader)(struct amdgpu_ring *ring); }; struct amdgpu_ring { @@ -334,6 +336,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) +#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v)) unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index d234b7ccfaafcd..1c66da1c3fb42f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -410,7 +410,7 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) struct amdgpu_ring_mux *mux = &adev->gfx.muxer; WARN_ON(!ring->is_sw_ring); - if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) + if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) return; amdgpu_ring_mux_end_ib(mux, ring); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 863b2a34b2d64a..b0a8abc7a8ecfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -22,7 +22,6 @@ * Authors: Andres Rodriguez */ -#include #include #include @@ -43,10 +42,10 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, uint32_t id; int r; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - r = amdgpu_file_to_fpriv(f.file, &fpriv); + r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); if (r) { fdput(f); return r; @@ -72,10 +71,10 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, struct amdgpu_ctx *ctx; int r; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - r = amdgpu_file_to_fpriv(f.file, &fpriv); + r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); if (r) { fdput(f); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index d3706a48487022..087ce0f6fa0763 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -115,6 +115,7 @@ struct amdgpu_sdma { bool has_page_queue; struct ras_common_if *ras_if; struct amdgpu_sdma_ras *ras; + uint32_t *ip_dump; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index bdf1ef825d8963..c586ab4c911bfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return 0; } +/** + * amdgpu_sync_kfd - sync to KFD fences + * + * @sync: sync object to add KFD fences to + * @resv: reservation object with KFD fences + * + * Extract all KFD fences and add them to the sync object. + */ +int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv) +{ + struct dma_resv_iter cursor; + struct dma_fence *f; + int r = 0; + + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, f) { + void *fence_owner = amdgpu_sync_get_owner(f); + + if (fence_owner != AMDGPU_FENCE_OWNER_KFD) + continue; + + r = amdgpu_sync_fence(sync, f); + if (r) + break; + } + dma_resv_iter_end(&cursor); + + return r; +} + /* Free the entry back to the slab */ static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index cf1e9e858efdc8..e3272dce798d79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner); +int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b8bc7fa8c37503..74adb983ab03e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1970,7 +1970,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); - /* Initiailize doorbell pool on PCI BAR */ + /* Initialize doorbell pool on PCI BAR */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) { DRM_ERROR("Failed initializing doorbell heap.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 5bc37acd398195..4e23419b92d4eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -136,6 +136,14 @@ struct psp_firmware_header_v2_0 { struct psp_fw_bin_desc psp_fw_bin[]; }; +/* version_major=2, version_minor=1 */ +struct psp_firmware_header_v2_1 { + struct common_firmware_header header; + uint32_t psp_fw_bin_count; + uint32_t psp_aux_fw_bin_index; + struct psp_fw_bin_desc psp_fw_bin[]; +}; + /* version_major=1, version_minor=0 */ struct ta_firmware_header_v1_0 { struct common_firmware_header header; @@ -426,6 +434,7 @@ union amdgpu_firmware_header { struct psp_firmware_header_v1_1 psp_v1_1; struct psp_firmware_header_v1_3 psp_v1_3; struct psp_firmware_header_v2_0 psp_v2_0; + struct psp_firmware_header_v2_0 psp_v2_1; struct ta_firmware_header_v1_0 ta; struct ta_firmware_header_v2_0 ta_v2_0; struct gfx_firmware_header_v1_0 gfx; @@ -447,7 +456,7 @@ union amdgpu_firmware_header { uint8_t raw[0x100]; }; -#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) +#define UCODE_MAX_PSP_PACKAGING (((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) * 2) /* * fw loading support diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 2f84bdb8c594d5..bb7b9b2eaac1a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -196,7 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, amdgpu_umc_handle_bad_pages(adev, ras_error_status); if ((err_data->ue_count || err_data->de_count) && - (reset || (con && con->is_rma))) { + (reset || amdgpu_ras_is_rma(adev))) { con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); } @@ -204,55 +204,6 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, return AMDGPU_RAS_SUCCESS; } -int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - uint32_t reset, uint32_t timeout_ms) -{ - struct ras_err_data err_data; - struct ras_common_if head = { - .block = AMDGPU_RAS_BLOCK__UMC, - }; - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - uint32_t timeout = timeout_ms; - - memset(&err_data, 0, sizeof(err_data)); - amdgpu_ras_error_data_init(&err_data); - - do { - - amdgpu_umc_handle_bad_pages(adev, &err_data); - - if (timeout && !err_data.de_count) { - msleep(1); - timeout--; - } - - } while (timeout && !err_data.de_count); - - if (!timeout) - dev_warn(adev->dev, "Can't find bad pages\n"); - - if (err_data.de_count) - dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count); - - if (obj) { - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; - obj->err_data.de_count += err_data.de_count; - } - - amdgpu_ras_error_data_fini(&err_data); - - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - - if (reset || (err_data.err_addr_cnt && con && con->is_rma)) { - con->gpu_reset_flags |= reset; - amdgpu_ras_reset_gpu(adev); - } - - return 0; -} - int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, enum amdgpu_ras_block block, uint16_t pasid, pasid_notify pasid_fn, void *data, uint32_t reset) @@ -472,43 +423,6 @@ int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev, return 0; } -static int amdgpu_umc_uint64_cmp(const void *a, const void *b) -{ - uint64_t *addr_a = (uint64_t *)a; - uint64_t *addr_b = (uint64_t *)b; - - if (*addr_a > *addr_b) - return 1; - else if (*addr_a < *addr_b) - return -1; - else - return 0; -} - -/* Use string hash to avoid logging the same bad pages repeatedly */ -int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev, - uint64_t *pfns, int len, uint64_t *val) -{ - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - char buf[MAX_UMC_HASH_STRING_SIZE] = {0}; - int offset = 0, i = 0; - uint64_t hash_val; - - if (!pfns || !len) - return -EINVAL; - - sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL); - - for (i = 0; i < len; i++) - offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]); - - hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key); - - *val = hash_val; - - return 0; -} - int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev, struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err) { @@ -519,18 +433,10 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev, ecc_log = &con->umc_ecc_log; mutex_lock(&ecc_log->lock); - ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err); - if (!ret) { - struct ras_err_pages *err_pages = &ecc_err->err_pages; - int i; - - /* Reserve memory */ - for (i = 0; i < err_pages->count; i++) - amdgpu_ras_reserve_page(adev, err_pages->pfn[i]); - + ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err); + if (!ret) radix_tree_tag_set(ecc_tree, - ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG); - } + ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG); mutex_unlock(&ecc_log->lock); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 5f50c69c3cecc4..ce4179db2a6d13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -127,13 +127,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, int amdgpu_umc_loop_channels(struct amdgpu_device *adev, umc_func func, void *data); -int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - uint32_t reset, uint32_t timeout_ms); - int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev, uint64_t status, uint64_t ipid, uint64_t addr); -int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev, - uint64_t *pfns, int len, uint64_t *val); int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev, struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index fbc2852278e11c..6162582d0aa272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -587,7 +587,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) break; } - r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name); + r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name); if (r) { release_firmware(adev->umsch_mm.fw); adev->umsch_mm.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 07d930339b0781..31fd30dcd593ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) return -EINVAL; } - r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name); + r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name); if (r) { dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", fw_name); @@ -1088,7 +1088,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, int r; job->vm = NULL; - ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); if (ib->length_dw % 16) { DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 968ca2c84ef7e5..74fdbf71d95b74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) return -EINVAL; } - r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name); + r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name); if (r) { dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", fw_name); @@ -749,7 +749,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, int i, r = 0; job->vm = NULL; - ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); for (idx = 0; idx < ib->length_dw;) { uint32_t len = amdgpu_ib_get_value(ib, idx); @@ -1044,7 +1043,6 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, if (!r) { /* No error, free all destroyed handle slots */ tmp = destroyed; - amdgpu_ib_free(p->adev, ib, NULL); } else { /* Error during parsing, free all allocated handle slots */ tmp = allocated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c87d68d4be5365..2a1f3dbb14d3f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -330,6 +330,9 @@ struct amdgpu_vcn { uint16_t inst_mask; uint8_t num_inst_per_aid; bool using_unified_queue; + + /* IP reg dump */ + uint32_t *ip_dump; }; struct amdgpu_fw_shared_rb_ptrs_struct { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b287a82e6177e9..b6397d3229e1ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -33,6 +33,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_reset.h" +#include "amdgpu_dpm.h" #include "vi.h" #include "soc15.h" #include "nv.h" @@ -849,6 +850,13 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad return mode; } +void amdgpu_virt_pre_reset(struct amdgpu_device *adev) +{ + /* stop the data exchange thread */ + amdgpu_virt_fini_data_exchange(adev); + amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR); +} + void amdgpu_virt_post_reset(struct amdgpu_device *adev) { if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b42a8854dca0cb..b650a2032c42bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -376,6 +376,7 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id); bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id); +void amdgpu_virt_pre_reset(struct amdgpu_device *adev); void amdgpu_virt_post_reset(struct amdgpu_device *adev); bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev); bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 6415d0d039e1d1..d4c2afafbb73cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -338,6 +338,7 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, else domain = AMDGPU_GEM_DOMAIN_VRAM; + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) @@ -549,7 +550,7 @@ static int amdgpu_vkms_sw_fini(void *handle) adev->mode_info.mode_config_initialized = false; - kfree(adev->mode_info.bios_hardcoded_edid); + drm_edid_free(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a060c28f0877cd..6005280f5f38f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -465,7 +465,6 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, { uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); struct amdgpu_vm_bo_base *bo_base; - struct amdgpu_bo *shadow; struct amdgpu_bo *bo; int r; @@ -486,16 +485,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&vm->status_lock); bo = bo_base->bo; - shadow = amdgpu_bo_shadowed(bo); r = validate(param, bo); if (r) return r; - if (shadow) { - r = validate(param, shadow); - if (r) - return r; - } if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); @@ -681,6 +674,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; + if (adev->gfx.enable_cleaner_shader && + ring->funcs->emit_cleaner_shader && + job->enforce_isolation) + ring->funcs->emit_cleaner_shader(ring); + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) return 0; @@ -742,6 +740,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, amdgpu_ring_emit_switch_buffer(ring); amdgpu_ring_emit_switch_buffer(ring); } + amdgpu_ring_ib_end(ring); return 0; } @@ -838,7 +837,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); + r = vm->update_funcs->prepare(¶ms, NULL); if (r) goto error; @@ -902,10 +901,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, { struct amdgpu_vm *vm = params->vm; - if (!fence || !*fence) + tlb_cb->vm = vm; + if (!fence || !*fence) { + amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); return; + } - tlb_cb->vm = vm; if (!dma_fence_add_callback(*fence, &tlb_cb->cb, amdgpu_vm_tlb_seq_cb)) { dma_fence_put(vm->last_tlb_flush); @@ -933,7 +934,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, * @unlocked: unlocked invalidation during MM callback * @flush_tlb: trigger tlb invalidation after update completed * @allow_override: change MTYPE for local NUMA nodes - * @resv: fences we need to sync to + * @sync: fences we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries @@ -949,16 +950,16 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, * 0 for success, negative erro code for failure. */ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, bool allow_override, - struct dma_resv *resv, uint64_t start, uint64_t last, - uint64_t flags, uint64_t offset, uint64_t vram_base, + bool immediate, bool unlocked, bool flush_tlb, + bool allow_override, struct amdgpu_sync *sync, + uint64_t start, uint64_t last, uint64_t flags, + uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) { struct amdgpu_vm_tlb_seq_struct *tlb_cb; struct amdgpu_vm_update_params params; struct amdgpu_res_cursor cursor; - enum amdgpu_sync_mode sync_mode; int r, idx; if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -991,14 +992,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.allow_override = allow_override; INIT_LIST_HEAD(¶ms.tlb_flush_waitlist); - /* Implicitly sync to command submissions in the same VM before - * unmapping. Sync to moving fences before mapping. - */ - if (!(flags & AMDGPU_PTE_VALID)) - sync_mode = AMDGPU_SYNC_EQ_OWNER; - else - sync_mode = AMDGPU_SYNC_EXPLICIT; - amdgpu_vm_eviction_lock(vm); if (vm->evicting) { r = -EBUSY; @@ -1013,7 +1006,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, dma_fence_put(tmp); } - r = vm->update_funcs->prepare(¶ms, resv, sync_mode); + r = vm->update_funcs->prepare(¶ms, sync); if (r) goto error_free; @@ -1155,23 +1148,36 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; + struct dma_fence **last_update; dma_addr_t *pages_addr = NULL; struct ttm_resource *mem; - struct dma_fence **last_update; + struct amdgpu_sync sync; bool flush_tlb = clear; - bool uncached; - struct dma_resv *resv; uint64_t vram_base; uint64_t flags; + bool uncached; int r; + amdgpu_sync_create(&sync); if (clear || !bo) { mem = NULL; - resv = vm->root.bo->tbo.base.resv; + + /* Implicitly sync to command submissions in the same VM before + * unmapping. + */ + r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, + AMDGPU_SYNC_EQ_OWNER, vm); + if (r) + goto error_free; + if (bo) { + r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv); + if (r) + goto error_free; + } + } else { struct drm_gem_object *obj = &bo->tbo.base; - resv = bo->tbo.base.resv; if (obj->import_attach && bo_va->is_xgmi) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; struct drm_gem_object *gobj = dma_buf->priv; @@ -1185,6 +1191,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (mem && (mem->mem_type == TTM_PL_TT || mem->mem_type == AMDGPU_PL_PREEMPT)) pages_addr = bo->tbo.ttm->dma_address; + + /* Implicitly sync to moving fences before mapping anything */ + r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, + AMDGPU_SYNC_EXPLICIT, vm); + if (r) + goto error_free; } if (bo) { @@ -1234,12 +1246,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, trace_amdgpu_vm_bo_update(mapping); r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, - !uncached, resv, mapping->start, mapping->last, - update_flags, mapping->offset, - vram_base, mem, pages_addr, - last_update); + !uncached, &sync, mapping->start, + mapping->last, update_flags, + mapping->offset, vram_base, mem, + pages_addr, last_update); if (r) - return r; + goto error_free; } /* If the BO is not in its preferred location add it back to @@ -1267,7 +1279,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, trace_amdgpu_vm_bo_mapping(mapping); } - return 0; +error_free: + amdgpu_sync_free(&sync); + return r; } /** @@ -1414,25 +1428,34 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { - struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct amdgpu_bo_va_mapping *mapping; - uint64_t init_pte_value = 0; struct dma_fence *f = NULL; + struct amdgpu_sync sync; int r; + + /* + * Implicitly sync to command submissions in the same VM before + * unmapping. + */ + amdgpu_sync_create(&sync); + r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, + AMDGPU_SYNC_EQ_OWNER, vm); + if (r) + goto error_free; + while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); r = amdgpu_vm_update_range(adev, vm, false, false, true, false, - resv, mapping->start, mapping->last, - init_pte_value, 0, 0, NULL, NULL, - &f); + &sync, mapping->start, mapping->last, + 0, 0, 0, NULL, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); - return r; + goto error_free; } } @@ -1443,7 +1466,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, dma_fence_put(f); } - return 0; +error_free: + amdgpu_sync_free(&sync); + return r; } @@ -2123,10 +2148,6 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, { struct amdgpu_vm_bo_base *bo_base; - /* shadow bo doesn't have bo base, its validation needs its parent */ - if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) - bo = bo->parent; - for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; @@ -2218,7 +2239,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + (1 << 30) - 1) >> 30; vm_size = roundup_pow_of_two( - min(max(phys_ram_gb * 3, min_vm_size), max_size)); + clamp(phys_ram_gb * 3, min_vm_size, max_size)); } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; @@ -2421,6 +2442,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; + ttm_lru_bulk_move_init(&vm->lru_bulk_move); + vm->is_compute_context = false; vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -2454,7 +2477,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, root_bo = amdgpu_bo_ref(&root->bo); r = amdgpu_bo_reserve(root_bo, true); if (r) { - amdgpu_bo_unref(&root->shadow); amdgpu_bo_unref(&root_bo); goto error_free_delayed; } @@ -2485,6 +2507,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, error_free_delayed: dma_fence_put(vm->last_tlb_flush); dma_fence_put(vm->last_unlocked); + ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); amdgpu_vm_fini_entities(vm); return r; @@ -2546,11 +2569,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; - /* Free the shadow bo for compute VM */ - amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); - - goto unreserve_bo; - unreserve_bo: amdgpu_bo_unreserve(vm->root.bo); return r; @@ -2641,6 +2659,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } } + ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); } /** @@ -2754,6 +2773,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) * amdgpu_vm_handle_fault - graceful handling of VM faults. * @adev: amdgpu device pointer * @pasid: PASID of the VM + * @ts: Timestamp of the fault * @vmid: VMID, only used for GFX 9.4.3. * @node_id: Node_id received in IH cookie. Only applicable for * GFX 9.4.3. @@ -2764,7 +2784,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 vmid, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, uint64_t ts, bool write_fault) { bool is_compute_context = false; @@ -2790,7 +2810,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, - node_id, addr, write_fault)) { + node_id, addr, ts, write_fault)) { amdgpu_bo_unref(&root); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 046949c4b6959f..52dd7cdfdc8145 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -304,8 +304,8 @@ struct amdgpu_vm_update_params { struct amdgpu_vm_update_funcs { int (*map_table)(struct amdgpu_bo_vm *bo); - int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, - enum amdgpu_sync_mode sync_mode); + int (*prepare)(struct amdgpu_vm_update_params *p, + struct amdgpu_sync *sync); int (*update)(struct amdgpu_vm_update_params *p, struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); @@ -505,9 +505,10 @@ int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, bool allow_override, - struct dma_resv *resv, uint64_t start, uint64_t last, - uint64_t flags, uint64_t offset, uint64_t vram_base, + bool immediate, bool unlocked, bool flush_tlb, + bool allow_override, struct amdgpu_sync *sync, + uint64_t start, uint64_t last, uint64_t flags, + uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence); int amdgpu_vm_bo_update(struct amdgpu_device *adev, @@ -558,7 +559,7 @@ amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm); void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 vmid, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, uint64_t ts, bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 3895bd7d176a9d..0c1ef5850a5eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -39,20 +39,18 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) * amdgpu_vm_cpu_prepare - prepare page table update with the CPU * * @p: see amdgpu_vm_update_params definition - * @resv: reservation object with embedded fence - * @sync_mode: synchronization mode + * @sync: sync obj with fences to wait on * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, - struct dma_resv *resv, - enum amdgpu_sync_mode sync_mode) + struct amdgpu_sync *sync) { - if (!resv) + if (!sync) return 0; - return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); + return amdgpu_sync_wait(sync, true); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index e39d6e7643bfbb..f78a0434a48fa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -383,14 +383,6 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vmbo->shadow) { - struct amdgpu_bo *shadow = vmbo->shadow; - - r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx); - if (r) - return r; - } - if (!drm_dev_enter(adev_to_drm(adev), &idx)) return -ENODEV; @@ -403,7 +395,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); + r = vm->update_funcs->prepare(¶ms, NULL); if (r) goto exit; @@ -448,10 +440,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) { struct amdgpu_bo_param bp; - struct amdgpu_bo *bo; - struct dma_resv *resv; unsigned int num_entries; - int r; memset(&bp, 0, sizeof(bp)); @@ -484,42 +473,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; - r = amdgpu_bo_create_vm(adev, &bp, vmbo); - if (r) - return r; - - bo = &(*vmbo)->bo; - if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { - (*vmbo)->shadow = NULL; - return 0; - } - - if (!bp.resv) - WARN_ON(dma_resv_lock(bo->tbo.base.resv, - NULL)); - resv = bp.resv; - memset(&bp, 0, sizeof(bp)); - bp.size = amdgpu_vm_pt_size(adev, level); - bp.domain = AMDGPU_GEM_DOMAIN_GTT; - bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; - bp.type = ttm_bo_type_kernel; - bp.resv = bo->tbo.base.resv; - bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.xcp_id_plus1 = xcp_id + 1; - - r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); - - if (!resv) - dma_resv_unlock(bo->tbo.base.resv); - - if (r) { - amdgpu_bo_unref(&bo); - return r; - } - - amdgpu_bo_add_to_shadow_list(*vmbo); - - return 0; + return amdgpu_bo_create_vm(adev, &bp, vmbo); } /** @@ -569,7 +523,6 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, return 0; error_free_pt: - amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt_bo); return r; } @@ -581,17 +534,10 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, */ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) { - struct amdgpu_bo *shadow; - if (!entry->bo) return; entry->bo->vm_bo = NULL; - shadow = amdgpu_bo_shadowed(entry->bo); - if (shadow) { - ttm_bo_set_bulk_move(&shadow->tbo, NULL); - amdgpu_bo_unref(&shadow); - } ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); spin_lock(&entry->vm->status_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 9b748d7058b5c0..46d9fb433ab2a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -35,16 +35,7 @@ */ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) { - int r; - - r = amdgpu_ttm_alloc_gart(&table->bo.tbo); - if (r) - return r; - - if (table->shadow) - r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); - - return r; + return amdgpu_ttm_alloc_gart(&table->bo.tbo); } /* Allocate a new job for @count PTE updates */ @@ -77,32 +68,24 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, * amdgpu_vm_sdma_prepare - prepare SDMA command submission * * @p: see amdgpu_vm_update_params definition - * @resv: reservation object with embedded fence - * @sync_mode: synchronization mode + * @sync: amdgpu_sync object with fences to wait for * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, - struct dma_resv *resv, - enum amdgpu_sync_mode sync_mode) + struct amdgpu_sync *sync) { - struct amdgpu_sync sync; int r; r = amdgpu_vm_sdma_alloc_job(p, 0); if (r) return r; - if (!resv) + if (!sync) return 0; - amdgpu_sync_create(&sync); - r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); - if (!r) - r = amdgpu_sync_push_to_job(&sync, p->job); - amdgpu_sync_free(&sync); - + r = amdgpu_sync_push_to_job(sync, p->job); if (r) { p->num_dw_left = 0; amdgpu_job_free(p->job); @@ -273,17 +256,13 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (!p->pages_addr) { /* set page commands needed */ - if (vmbo->shadow) - amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr, - count, incr, flags); amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, incr, flags); return 0; } /* copy commands needed */ - ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * - (vmbo->shadow ? 2 : 1); + ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; /* for padding */ ndw -= 7; @@ -298,8 +277,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, pte[i] |= flags; } - if (vmbo->shadow) - amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); pe += nptes * 8; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 90138bc5f03d1c..32775260556f44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -180,6 +180,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from) #define for_each_xcp(xcp_mgr, xcp, i) \ for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \ - xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) + ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 821ba2309dec2b..7de449fae1e3ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1389,10 +1389,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { case ACA_ERROR_TYPE_UE: - amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL); + amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); break; case ACA_ERROR_TYPE_CE: - amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL); + amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index fb2b394bb9c555..6e9eeaeb3de1dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info { uint32_t gpu_capacity; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; -}; +} __packed; struct amd_sriov_msg_vf2pf_info_header { /* the total structure size in byte */ @@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info { uint32_t mes_info_size; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; -}; +} __packed; /* mailbox message send from guest to host */ enum amd_sriov_mailbox_request_message { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 228fd4dd32f139..5e8833e4fed2a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -75,6 +75,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, uint32_t inst_mask; ring->xcp_id = AMDGPU_XCP_NO_PARTITION; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) return; @@ -92,8 +94,6 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG: ip_blk = AMDGPU_XCP_VCN; - if (aqua_vanjaram_xcp_vcn_shared(adev)) - inst_mask = 1 << (inst_idx * 2); break; default: DRM_ERROR("Not support ring type %d!", ring->funcs->type); @@ -103,6 +103,10 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { ring->xcp_id = xcp_id; + dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, + ring->xcp_id); + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; break; } } @@ -390,38 +394,31 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x struct amdgpu_xcp_ip *ip) { struct amdgpu_device *adev = xcp_mgr->adev; + int num_sdma, num_vcn, num_shared_vcn, num_xcp; int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; - int num_sdma, num_vcn; num_sdma = adev->sdma.num_instances; num_vcn = adev->vcn.num_vcn_inst; + num_shared_vcn = 1; + + num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp; switch (xcp_mgr->mode) { case AMDGPU_SPX_PARTITION_MODE: - num_sdma_xcp = num_sdma; - num_vcn_xcp = num_vcn; - break; case AMDGPU_DPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 2; - num_vcn_xcp = num_vcn / 2; - break; case AMDGPU_TPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 3; - num_vcn_xcp = num_vcn / 3; - break; case AMDGPU_QPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 4; - num_vcn_xcp = num_vcn / 4; - break; case AMDGPU_CPX_PARTITION_MODE: - num_sdma_xcp = 2; - num_vcn_xcp = num_vcn ? 1 : 0; + num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp); + num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp); break; default: return -EINVAL; } - num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + if (num_vcn && num_xcp > num_vcn) + num_shared_vcn = num_xcp / num_vcn; switch (ip_id) { case AMDGPU_XCP_GFXHUB: @@ -437,7 +434,8 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x ip->ip_funcs = &sdma_v4_4_2_xcp_funcs; break; case AMDGPU_XCP_VCN: - ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); + ip->inst_mask = + XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn); /* TODO : Assign IP funcs */ break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 09715b506468df..81d195d366ceba 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 25feab188dfe69..a51f3414b65dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -215,7 +215,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode dig->bl_dev = bd; bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); DRM_INFO("amdgpu atom DIG backlight initialized\n"); @@ -2064,27 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) case LCD_FAKE_EDID_PATCH_RECORD_TYPE: fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { - struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); - if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - - if (drm_edid_is_valid(edid)) { - adev->mode_info.bios_hardcoded_edid = edid; - adev->mode_info.bios_hardcoded_edid_size = edid_size; - } else - kfree(edid); - } + const struct drm_edid *edid; + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size); + if (drm_edid_valid(edid)) + adev->mode_info.bios_hardcoded_edid = edid; + else + drm_edid_free(edid); + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 55982c0064b560..06088d52d81c4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -364,6 +364,7 @@ * 1 - Stream * 2 - Bypass */ +#define EOP_EXEC (1 << 28) /* For Trailing Fence */ #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index dddb5fe16f2c53..70c1399f738def 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1881,6 +1881,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2401,6 +2402,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { @@ -2846,7 +2848,7 @@ static int dce_v10_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - kfree(adev->mode_info.bios_hardcoded_edid); + drm_edid_free(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev_to_drm(adev)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 11780e4d7e9f9d..f154c24499c8a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1931,6 +1931,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2485,6 +2486,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { @@ -2973,7 +2975,7 @@ static int dce_v11_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - kfree(adev->mode_info.bios_hardcoded_edid); + drm_edid_free(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev_to_drm(adev)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 05c0df97f01d35..a7fcb135827f8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1861,6 +1861,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2321,6 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { @@ -2745,7 +2747,7 @@ static int dce_v6_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - kfree(adev->mode_info.bios_hardcoded_edid); + drm_edid_free(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev_to_drm(adev)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index dc73e301d93700..77ac3f114d2411 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1828,6 +1828,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2320,6 +2321,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { @@ -2766,7 +2768,7 @@ static int dce_v8_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - kfree(adev->mode_info.bios_hardcoded_edid); + drm_edid_free(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev_to_drm(adev)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index e444e621ddaa01..45ed97038df0c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4649,7 +4649,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) uint32_t inst; ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); adev->gfx.ip_dump_core = NULL; } else { @@ -4662,7 +4662,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.mec.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); adev->gfx.ip_dump_compute_queues = NULL; } else { @@ -4675,7 +4675,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.me.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); adev->gfx.ip_dump_gfx_queues = NULL; } else { @@ -4741,6 +4741,13 @@ static int gfx_v10_0_sw_init(void *handle) if (r) return r; + /* Bad opcode Event */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, + GFX_10_1__SRCID__CP_BAD_OPCODE_ERROR, + &adev->gfx.bad_op_irq); + if (r) + return r; + /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT, &adev->gfx.priv_reg_irq); @@ -5213,26 +5220,74 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) } +static u32 gfx_v10_0_get_cpg_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + if (me != 0) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0); + case 1: + return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1); + default: + return 0; + } +} + +static u32 gfx_v10_0_get_cpc_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + if (me != 1) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); + case 1: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); + case 2: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); + case 3: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); + default: + return 0; + } +} + static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, bool enable) { - u32 tmp; + u32 tmp, cp_int_cntl_reg; + int i, j; if (amdgpu_sriov_vf(adev)) return; - tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); - - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, - enable ? 1 : 0); - - WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, + enable ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); + } + } + } } static int gfx_v10_0_init_csb(struct amdgpu_device *adev) @@ -6637,13 +6692,13 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, return 0; } -static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) +static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) { struct amdgpu_device *adev = ring->adev; struct v10_gfx_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.gfx_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -6695,7 +6750,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v10_0_gfx_init_queue(ring); + r = gfx_v10_0_kgq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -6975,13 +7030,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) +static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore) { struct amdgpu_device *adev = ring->adev; struct v10_compute_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!restore && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -7043,7 +7098,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v10_0_kcq_init_queue(ring); + r = gfx_v10_0_kcq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -7369,6 +7424,7 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); /* WA added for Vangogh asic fixing the SMU suspend failure * It needs to set power gating again during gfxoff control @@ -7679,6 +7735,10 @@ static int gfx_v10_0_late_init(void *handle) if (r) return r; + r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); + if (r) + return r; + return 0; } @@ -8889,7 +8949,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); WREG32_SOC15(GC, 0, mmSQ_CMD, value); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void @@ -9074,12 +9136,39 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -9088,17 +9177,75 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, return 0; } +static int gfx_v10_0_set_bad_op_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + break; + default: + break; + } + return 0; +} + static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -9122,8 +9269,8 @@ static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev, case 0: for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; - /* we only enabled 1 gfx queue per pipe for now */ - if (ring->me == me_id && ring->pipe == pipe_id) + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) drm_sched_fault(&ring->sched); } break; @@ -9150,6 +9297,15 @@ static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v10_0_bad_op_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal opcode in command stream \n"); + gfx_v10_0_handle_priv_fault(adev, entry); + return 0; +} + static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -9244,6 +9400,174 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) +{ + int i; + + /* Header itself is a NOP packet */ + if (num_nop == 1) { + amdgpu_ring_write(ring, ring->funcs->nop); + return; + } + + /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); + + /* Header is at index 0, followed by num_nops - 1 NOP packet's */ + for (i = 1; i < num_nop; i++) + amdgpu_ring_write(ring, ring->funcs->nop); +} + +static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + u32 tmp; + u64 addr; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + addr = amdgpu_bo_gpu_offset(ring->mqd_obj) + + offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active); + tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); + if (ring->pipe == 0) + tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue); + else + tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue); + + gfx_v10_0_ring_emit_wreg(kiq_ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp); + gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0, + lower_32_bits(addr), upper_32_bits(addr), + 0, 1, 0x20); + gfx_v10_0_ring_emit_reg_wait(kiq_ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff); + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + DRM_ERROR("fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v10_0_kgq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + DRM_ERROR("fail to unresv mqd_obj\n"); + return r; + } + + return amdgpu_ring_test_ring(ring); +} + +static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + int i, r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, + 0, 0); + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + /* make sure dequeue is complete*/ + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + nv_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + if (r) { + dev_err(adev->dev, "fail to wait on hqd deactivate\n"); + return r; + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v10_0_kcq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + dev_err(adev->dev, "fail to unresv mqd_obj\n"); + return r; + } + + spin_lock_irqsave(&kiq->ring_lock, flags); + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + return amdgpu_ring_test_ring(ring); +} + static void gfx_v10_ip_print(void *handle, struct drm_printer *p) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -9435,7 +9759,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, .test_ring = gfx_v10_0_ring_test_ring, .test_ib = gfx_v10_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v10_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_switch_buffer = gfx_v10_0_ring_emit_sb, .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl, @@ -9447,6 +9771,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, + .reset = gfx_v10_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -9476,12 +9801,14 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, .test_ring = gfx_v10_0_ring_test_ring, .test_ib = gfx_v10_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v10_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, + .reset = gfx_v10_0_reset_kcq, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { @@ -9536,6 +9863,11 @@ static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = { .process = gfx_v10_0_priv_reg_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v10_0_bad_op_irq_funcs = { + .set = gfx_v10_0_set_bad_op_fault_state, + .process = gfx_v10_0_bad_op_irq, +}; + static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = { .set = gfx_v10_0_set_priv_inst_fault_state, .process = gfx_v10_0_priv_inst_irq, @@ -9557,6 +9889,9 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs; + adev->gfx.bad_op_irq.num_types = 1; + adev->gfx.bad_op_irq.funcs = &gfx_v10_0_bad_op_irq_funcs; + adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index dcef399074492d..d3e8be82a1727a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -481,6 +481,24 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, amdgpu_ring_write(ring, inv); /* poll interval */ } +static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) +{ + int i; + + /* Header itself is a NOP packet */ + if (num_nop == 1) { + amdgpu_ring_write(ring, ring->funcs->nop); + return; + } + + /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); + + /* Header is at index 0, followed by num_nops - 1 NOP packet's */ + for (i = 1; i < num_nop; i++) + amdgpu_ring_write(ring, ring->funcs->nop); +} + static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1484,7 +1502,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) uint32_t inst; ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); adev->gfx.ip_dump_core = NULL; } else { @@ -1497,7 +1515,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.mec.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); adev->gfx.ip_dump_compute_queues = NULL; } else { @@ -1510,7 +1528,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.me.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); adev->gfx.ip_dump_gfx_queues = NULL; } else { @@ -1569,6 +1587,13 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; + /* Bad opcode Event */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, + &adev->gfx.bad_op_irq); + if (r) + return r; + /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, @@ -1953,26 +1978,74 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev) gfx_v11_0_init_gds_vmid(adev); } +static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + if (me != 0) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); + case 1: + return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); + default: + return 0; + } +} + +static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + if (me != 1) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + case 1: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); + case 2: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); + case 3: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); + default: + return 0; + } +} + static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, bool enable) { - u32 tmp; + u32 tmp, cp_int_cntl_reg; + int i, j; if (amdgpu_sriov_vf(adev)) return; - tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); - - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, - enable ? 1 : 0); - - WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, + enable ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); + } + } + } } static int gfx_v11_0_init_csb(struct amdgpu_device *adev) @@ -3911,13 +3984,13 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, return 0; } -static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) +static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) { struct amdgpu_device *adev = ring->adev; struct v11_gfx_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.gfx_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -3953,7 +4026,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v11_0_gfx_init_queue(ring); + r = gfx_v11_0_kgq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -4248,13 +4321,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) +static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) { struct amdgpu_device *adev = ring->adev; struct v11_compute_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -4318,7 +4391,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v11_0_kcq_init_queue(ring); + r = gfx_v11_0_kcq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -4598,6 +4671,7 @@ static int gfx_v11_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -4668,8 +4742,8 @@ static int gfx_v11_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, - int req) +int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, + bool req) { u32 i, tmp, val; @@ -4707,6 +4781,8 @@ static int gfx_v11_0_soft_reset(void *handle) int r, i, j, k; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); @@ -4714,8 +4790,6 @@ static int gfx_v11_0_soft_reset(void *handle) tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); - gfx_v11_0_set_safe_mode(adev, 0); - mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { @@ -4740,8 +4814,10 @@ static int gfx_v11_0_soft_reset(void *handle) mutex_unlock(&adev->srbm_mutex); /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ - r = gfx_v11_0_request_gfx_index_mutex(adev, 1); + mutex_lock(&adev->gfx.reset_sem_mutex); + r = gfx_v11_0_request_gfx_index_mutex(adev, true); if (r) { + mutex_unlock(&adev->gfx.reset_sem_mutex); DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); return r; } @@ -4755,7 +4831,8 @@ static int gfx_v11_0_soft_reset(void *handle) RREG32_SOC15(GC, 0, regCP_VMID_RESET); /* release the gfx mutex */ - r = gfx_v11_0_request_gfx_index_mutex(adev, 0); + r = gfx_v11_0_request_gfx_index_mutex(adev, false); + mutex_unlock(&adev->gfx.reset_sem_mutex); if (r) { DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); return r; @@ -4823,7 +4900,7 @@ static int gfx_v11_0_soft_reset(void *handle) tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); - gfx_v11_0_unset_safe_mode(adev, 0); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return gfx_v11_0_cp_resume(adev); } @@ -4954,6 +5031,9 @@ static int gfx_v11_0_late_init(void *handle) if (r) return r; + r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); + if (r) + return r; return 0; } @@ -5843,6 +5923,9 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; + if (adev->enable_mes) + return -EINVAL; + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; @@ -6008,7 +6091,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); WREG32_SOC15(GC, 0, regSQ_CMD, value); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void @@ -6201,15 +6286,42 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -6218,17 +6330,75 @@ static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, return 0; } +static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + break; + default: + break; + } + return 0; +} + static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -6252,8 +6422,8 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, case 0: for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; - /* we only enabled 1 gfx queue per pipe for now */ - if (ring->me == me_id && ring->pipe == pipe_id) + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) drm_sched_fault(&ring->sched); } break; @@ -6281,6 +6451,15 @@ static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal opcode in command stream \n"); + gfx_v11_0_handle_priv_fault(adev, entry); + return 0; +} + static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -6367,6 +6546,99 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + if (r) + return r; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v11_0_kgq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + dev_err(adev->dev, "fail to unresv mqd_obj\n"); + return r; + } + + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } + + return amdgpu_ring_test_ring(ring); +} + +static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, r = 0; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); + WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); + + /* make sure dequeue is complete*/ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + if (r) { + dev_err(adev->dev, "fail to wait on hqd deactivate\n"); + return r; + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v11_0_kcq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + dev_err(adev->dev, "fail to unresv mqd_obj\n"); + return r; + } + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kcq\n"); + return r; + } + + return amdgpu_ring_test_ring(ring); +} + static void gfx_v11_ip_print(void *handle, struct drm_printer *p) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -6556,7 +6828,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, .test_ring = gfx_v11_0_ring_test_ring, .test_ib = gfx_v11_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v11_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, @@ -6568,6 +6840,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, + .reset = gfx_v11_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { @@ -6598,12 +6871,14 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, .test_ring = gfx_v11_0_ring_test_ring, .test_ib = gfx_v11_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v11_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, + .reset = gfx_v11_0_reset_kcq, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { @@ -6658,6 +6933,11 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { .process = gfx_v11_0_priv_reg_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = { + .set = gfx_v11_0_set_bad_op_fault_state, + .process = gfx_v11_0_bad_op_irq, +}; + static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .set = gfx_v11_0_set_priv_inst_fault_state, .process = gfx_v11_0_priv_inst_irq, @@ -6675,6 +6955,9 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; + adev->gfx.bad_op_irq.num_types = 1; + adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs; + adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h index 10cfc29c27c9a0..157a5c812259db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h @@ -26,4 +26,7 @@ extern const struct amdgpu_ip_block_version gfx_v11_0_ip_block; +int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, + bool req); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c index 9cd221ed240c7c..999bb3cc88b70b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -97,7 +97,7 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev, ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; } - if (con && !con->is_rma) + if (con && !amdgpu_ras_is_rma(adev)) amdgpu_ras_reset_gpu(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index e45d23e8287888..47b47d21f46447 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -202,12 +202,16 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) }; -static const struct soc15_reg_golden golden_settings_gc_12_0[] = { +static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) }; +static const struct soc15_reg_golden golden_settings_gc_12_0[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000), +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -1281,7 +1285,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) uint32_t inst; ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); adev->gfx.ip_dump_core = NULL; } else { @@ -1294,7 +1298,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.mec.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); adev->gfx.ip_dump_compute_queues = NULL; } else { @@ -1307,7 +1311,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.me.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); adev->gfx.ip_dump_gfx_queues = NULL; } else { @@ -1355,6 +1359,13 @@ static int gfx_v12_0_sw_init(void *handle) if (r) return r; + /* Bad opcode Event */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, + &adev->gfx.bad_op_irq); + if (r) + return r; + /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, @@ -1686,26 +1697,68 @@ static void gfx_v12_0_constants_init(struct amdgpu_device *adev) gfx_v12_0_init_compute_vmid(adev); } +static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + if (me != 0) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); + default: + return 0; + } +} + +static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + if (me != 1) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + case 1: + return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); + default: + return 0; + } +} + static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, - bool enable) + bool enable) { - u32 tmp; + u32 tmp, cp_int_cntl_reg; + int i, j; if (amdgpu_sriov_vf(adev)) return; - tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); - - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, - enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, - enable ? 1 : 0); - - WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, + enable ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); + } + } + } } static int gfx_v12_0_init_csb(struct amdgpu_device *adev) @@ -2867,13 +2920,13 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, return 0; } -static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring) +static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) { struct amdgpu_device *adev = ring->adev; struct v12_gfx_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.gfx_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -2909,7 +2962,7 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v12_0_gfx_init_queue(ring); + r = gfx_v12_0_kgq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -3213,13 +3266,13 @@ static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring) +static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) { struct amdgpu_device *adev = ring->adev; struct v12_compute_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -3283,7 +3336,7 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v12_0_kcq_init_queue(ring); + r = gfx_v12_0_kcq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -3446,10 +3499,14 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): + soc15_program_register_sequence(adev, + golden_settings_gc_12_0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + if (adev->rev_id == 0) soc15_program_register_sequence(adev, - golden_settings_gc_12_0, - (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + golden_settings_gc_12_0_rev0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0)); break; default: break; @@ -3553,6 +3610,7 @@ static int gfx_v12_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -3672,6 +3730,10 @@ static int gfx_v12_0_late_init(void *handle) if (r) return r; + r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); + if (r) + return r; + return 0; } @@ -4447,6 +4509,9 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_ring *kiq_ring = &kiq->ring; unsigned long flags; + if (adev->enable_mes) + return -EINVAL; + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; @@ -4563,7 +4628,9 @@ static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); WREG32_SOC15(GC, 0, regSQ_CMD, value); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void @@ -4747,15 +4814,42 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -4764,17 +4858,75 @@ static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, return 0; } +static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + break; + default: + break; + } + return 0; +} + static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { + cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -4798,8 +4950,8 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, case 0: for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; - /* we only enabled 1 gfx queue per pipe for now */ - if (ring->me == me_id && ring->pipe == pipe_id) + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) drm_sched_fault(&ring->sched); } break; @@ -4827,6 +4979,15 @@ static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal opcode in command stream \n"); + gfx_v12_0_handle_priv_fault(adev, entry); + return 0; +} + static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -4859,6 +5020,24 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) +{ + int i; + + /* Header itself is a NOP packet */ + if (num_nop == 1) { + amdgpu_ring_write(ring, ring->funcs->nop); + return; + } + + /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); + + /* Header is at index 0, followed by num_nops - 1 NOP packet's */ + for (i = 1; i < num_nop; i++) + amdgpu_ring_write(ring, ring->funcs->nop); +} + static void gfx_v12_ip_print(void *handle, struct drm_printer *p) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -4989,6 +5168,93 @@ static void gfx_v12_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + if (r) { + dev_err(adev->dev, "reset via MES failed %d\n", r); + return r; + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v12_0_kgq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + DRM_ERROR("fail to unresv mqd_obj\n"); + return r; + } + + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } + + return amdgpu_ring_test_ring(ring); +} + +static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int r, i; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); + WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + soc24_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + DRM_ERROR("fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v12_0_kcq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + DRM_ERROR("fail to unresv mqd_obj\n"); + return r; + } + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kcq\n"); + return r; + } + + return amdgpu_ring_test_ring(ring); +} + static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { .name = "gfx_v12_0", .early_init = gfx_v12_0_early_init, @@ -5040,7 +5306,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, .test_ring = gfx_v12_0_ring_test_ring, .test_ib = gfx_v12_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v12_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, @@ -5051,6 +5317,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, + .reset = gfx_v12_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { @@ -5078,12 +5345,14 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, .test_ring = gfx_v12_0_ring_test_ring, .test_ib = gfx_v12_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v12_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, + .reset = gfx_v12_0_reset_kcq, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { @@ -5138,6 +5407,11 @@ static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = { .process = gfx_v12_0_priv_reg_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = { + .set = gfx_v12_0_set_bad_op_fault_state, + .process = gfx_v12_0_bad_op_irq, +}; + static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = { .set = gfx_v12_0_set_priv_inst_fault_state, .process = gfx_v12_0_priv_inst_irq, @@ -5151,6 +5425,9 @@ static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; + adev->gfx.bad_op_irq.num_types = 1; + adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs; + adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d84589137df912..f146806c4633ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2114,6 +2114,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; + /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. */ @@ -2133,7 +2135,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | - EVENT_INDEX(5))); + EVENT_INDEX(5) | + (exec ? EOP_EXEC : 0))); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); @@ -4921,6 +4924,76 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } +static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, + int mem_space, int opt, uint32_t addr0, + uint32_t addr1, uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, + /* memory (1) or register (0) */ + (WAIT_REG_MEM_MEM_SPACE(mem_space) | + WAIT_REG_MEM_OPERATION(opt) | /* wait */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(eng_sel))); + + if (mem_space) + BUG_ON(addr0 & 0x3); /* Dword align */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + amdgpu_ring_write(ring, ref); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, inv); /* poll interval */ +} + +static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); +} + +static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + u32 tmp; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, 5)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); + gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) + return -ENOMEM; + gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, + ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); + gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); + gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); + + return amdgpu_ring_test_ring(ring); +} + static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -4972,6 +5045,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, .emit_mem_sync = gfx_v7_0_emit_mem_sync, + .reset = gfx_v7_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -5002,6 +5076,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v7_0_ring_emit_wreg, + .soft_recovery = gfx_v7_0_ring_soft_recovery, .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b4658c7db0e167..bc8295812cc842 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6149,6 +6149,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; /* Workaround for cache flush problems. First send a dummy EOP * event down the pipe with seq one below. @@ -6172,7 +6173,8 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, EOP_TC_ACTION_EN | EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | - EVENT_INDEX(5))); + EVENT_INDEX(5) | + (exec ? EOP_EXEC : 0))); amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); @@ -6380,6 +6382,34 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, val); } +static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, + int mem_space, int opt, uint32_t addr0, + uint32_t addr1, uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, + /* memory (1) or register (0) */ + (WAIT_REG_MEM_MEM_SPACE(mem_space) | + WAIT_REG_MEM_OPERATION(opt) | /* wait */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(eng_sel))); + + if (mem_space) + BUG_ON(addr0 & 0x3); /* Dword align */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + amdgpu_ring_write(ring, ref); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, inv); /* poll interval */ +} + +static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); +} + static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) { struct amdgpu_device *adev = ring->adev; @@ -6856,6 +6886,48 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } +static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + u32 tmp; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, 5)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); + gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) + return -ENOMEM; + gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, + ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); + gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); + gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); + + return amdgpu_ring_test_ring(ring); +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6923,6 +6995,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .emit_wreg = gfx_v8_0_ring_emit_wreg, .soft_recovery = gfx_v8_0_ring_soft_recovery, .emit_mem_sync = gfx_v8_0_emit_mem_sync, + .reset = gfx_v8_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { @@ -6955,6 +7028,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .soft_recovery = gfx_v8_0_ring_soft_recovery, .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute, .emit_wave_limit = gfx_v8_0_emit_wave_limit, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2929c8972ea731..23f0573ae47b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -50,6 +50,7 @@ #include "amdgpu_ring_mux.h" #include "gfx_v9_4.h" #include "gfx_v9_0.h" +#include "gfx_v9_0_cleaner_shader.h" #include "gfx_v9_4_2.h" #include "asic_reg/pwr/pwr_10_0_offset.h" @@ -893,10 +894,18 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, unsigned int vmid); +static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); +static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | @@ -906,8 +915,8 @@ static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -1004,12 +1013,47 @@ static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); } + +static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type, + uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, + uint32_t xcc_id, uint32_t vmid) +{ + struct amdgpu_device *adev = kiq_ring->adev; + unsigned i; + + /* enter save mode */ + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0); + + if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2); + WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1); + /* wait till dequeue take effects */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + dev_err(adev->dev, "fail to wait on hqd deactive\n"); + } else { + dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type); + } + + soc15_grbm_select(adev, 0, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + /* exit safe mode */ + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); +} + static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { .kiq_set_resources = gfx_v9_0_kiq_set_resources, .kiq_map_queues = gfx_v9_0_kiq_map_queues, .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues, .kiq_query_status = gfx_v9_0_kiq_query_status, .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs, + .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue, .set_resources_size = 8, .map_queues_size = 7, .unmap_queues_size = 6, @@ -1301,6 +1345,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, + /* https://bbs.openkylin.top/t/topic/171497 */ + { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 }, + /* HP 705G4 DM with R5 2400G */ + { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 }, { 0, 0, 0, 0, 0 }, }; @@ -2129,7 +2177,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev) uint32_t inst; ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); adev->gfx.ip_dump_core = NULL; } else { @@ -2142,7 +2190,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev) adev->gfx.mec.num_queue_per_pipe; ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); - if (ptr == NULL) { + if (!ptr) { DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); adev->gfx.ip_dump_compute_queues = NULL; } else { @@ -2174,6 +2222,12 @@ static int gfx_v9_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; @@ -2182,6 +2236,13 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; + /* Bad opcode Event */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, + GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR, + &adev->gfx.bad_op_irq); + if (r) + return r; + /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, &adev->gfx.priv_reg_irq); @@ -2329,6 +2390,10 @@ static int gfx_v9_0_sw_init(void *handle) gfx_v9_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + if (r) + return r; + return 0; } @@ -2364,6 +2429,8 @@ static int gfx_v9_0_sw_fini(void *handle) } gfx_v9_0_free_microcode(adev); + amdgpu_gfx_sysfs_isolation_shader_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -2634,7 +2701,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); - if(adev->gfx.num_gfx_rings) + if (adev->gfx.num_gfx_rings) tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); @@ -3735,7 +3802,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) return 0; } -static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) +static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -3747,8 +3814,8 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) */ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; - if (!tmp_mqd->cp_hqd_pq_control || - (!amdgpu_in_reset(adev) && !adev->in_suspend)) { + if (!restore && (!tmp_mqd->cp_hqd_pq_control || + (!amdgpu_in_reset(adev) && !adev->in_suspend))) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -3812,7 +3879,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v9_0_kcq_init_queue(ring); + r = gfx_v9_0_kcq_init_queue(ring, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -3908,6 +3975,9 @@ static int gfx_v9_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); + if (!amdgpu_sriov_vf(adev)) gfx_v9_0_init_golden_registers(adev); @@ -3937,6 +4007,7 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); /* DF freeze and kcq disable will fail */ if (!amdgpu_ras_intr_triggered()) @@ -4747,6 +4818,10 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; + r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); + if (r) + return r; + r = gfx_v9_0_ecc_late_init(handle); if (r) return r; @@ -5858,7 +5933,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); WREG32_SOC15(GC, 0, mmSQ_CMD, value); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); } static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, @@ -5929,17 +6006,95 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, } } +static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev, + int me, int pipe) +{ + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + if (me != 1) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); + case 1: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); + case 2: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); + case 3: + return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); + default: + return 0; + } +} + static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) { + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } + break; + default: + break; + } + + return 0; +} + +static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl_reg, cp_int_cntl; + int i, j; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + /* MECs start at 1 */ + cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j); + + if (cp_int_cntl_reg) { + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + } + } + } break; default: break; @@ -6121,6 +6276,15 @@ static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal opcode in command stream\n"); + gfx_v9_0_fault(adev, entry); + return 0; +} + static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -7001,6 +7165,157 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } } +static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) +{ + int i; + + /* Header itself is a NOP packet */ + if (num_nop == 1) { + amdgpu_ring_write(ring, ring->funcs->nop); + return; + } + + /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); + + /* Header is at index 0, followed by num_nops - 1 NOP packet's */ + for (i = 1; i < num_nop; i++) + amdgpu_ring_write(ring, ring->funcs->nop); +} + +static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + u32 tmp; + int r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, 5)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); + gfx_v9_0_ring_emit_wreg(kiq_ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + if (amdgpu_ring_alloc(ring, 7 + 7 + 5)) + return -ENOMEM; + gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr, + ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); + gfx_v9_0_ring_emit_reg_wait(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff); + gfx_v9_0_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0); + + return amdgpu_ring_test_ring(ring); +} + +static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + int i, r; + + if (!adev->debug_exp_resets && + !adev->gfx.num_gfx_rings) + return -EINVAL; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, + 0, 0); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + return r; + + /* make sure dequeue is complete*/ + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + soc15_grbm_select(adev, 0, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + if (r) { + dev_err(adev->dev, "fail to wait on hqd deactive\n"); + return r; + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)){ + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v9_0_kcq_init_queue(ring, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + dev_err(adev->dev, "fail to unresv mqd_obj\n"); + return r; + } + spin_lock_irqsave(&kiq->ring_lock, flags); + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); + if (r) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + DRM_ERROR("fail to remap queue\n"); + return r; + } + return amdgpu_ring_test_ring(ring); +} + static void gfx_v9_ip_print(void *handle, struct drm_printer *p) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -7083,6 +7398,13 @@ static void gfx_v9_ip_dump(void *handle) } +static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -7132,7 +7454,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2 + /* SWITCH_BUFFER */ - 7, /* gfx_v9_0_emit_mem_sync */ + 7 + /* gfx_v9_0_emit_mem_sync */ + 2, /* gfx_v9_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ .emit_ib = gfx_v9_0_ring_emit_ib_gfx, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -7141,7 +7464,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, .test_ring = gfx_v9_0_ring_test_ring, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v9_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_switch_buffer = gfx_v9_ring_emit_sb, .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, @@ -7153,6 +7476,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .reset = gfx_v9_0_reset_kgq, + .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { @@ -7185,7 +7512,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2 + /* SWITCH_BUFFER */ - 7, /* gfx_v9_0_emit_mem_sync */ + 7 + /* gfx_v9_0_emit_mem_sync */ + 2, /* gfx_v9_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ .emit_ib = gfx_v9_0_ring_emit_ib_gfx, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -7195,7 +7523,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, .test_ring = gfx_v9_0_ring_test_ring, .test_ib = gfx_v9_0_ring_test_ib, - .insert_nop = amdgpu_sw_ring_insert_nop, + .insert_nop = gfx_v9_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_switch_buffer = gfx_v9_ring_emit_sb, .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, @@ -7209,6 +7537,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .patch_cntl = gfx_v9_0_ring_patch_cntl, .patch_de = gfx_v9_0_ring_patch_de_meta, .patch_ce = gfx_v9_0_ring_patch_ce_meta, + .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { @@ -7229,7 +7560,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 7 + /* gfx_v9_0_emit_mem_sync */ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ - 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ + 15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ + 2, /* gfx_v9_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -7239,13 +7571,18 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, .test_ring = gfx_v9_0_ring_test_ring, .test_ib = gfx_v9_0_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v9_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, .emit_wave_limit = gfx_v9_0_emit_wave_limit, + .reset = gfx_v9_0_reset_kcq, + .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { @@ -7303,6 +7640,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = { .process = gfx_v9_0_priv_reg_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = { + .set = gfx_v9_0_set_bad_op_fault_state, + .process = gfx_v9_0_bad_op_irq, +}; + static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { .set = gfx_v9_0_set_priv_inst_fault_state, .process = gfx_v9_0_priv_inst_irq, @@ -7322,6 +7664,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs; + adev->gfx.bad_op_irq.num_types = 1; + adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs; + adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h new file mode 100644 index 00000000000000..36c0292b511067 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* Define the cleaner shader gfx_9_0 */ +static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = { + /* Add the cleaner shader code here */ +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 20ea6cb01edfda..c100845409f794 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -37,6 +37,7 @@ #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +#include "gfx_v9_4_3_cleaner_shader.h" #include "amdgpu_xcp.h" #include "amdgpu_aca.h" @@ -63,6 +64,98 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); #define NORMALIZE_XCC_REG_OFFSET(offset) \ (offset & 0xFFFF) +static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = { + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), + SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), + SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), + SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2), + SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT), + SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP), + /* SE status registers */ + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) +}; + +static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = { + /* compute queue registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS), +}; + struct amdgpu_gfx_ras gfx_v9_4_3_ras; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); @@ -71,10 +164,18 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); +static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id); +static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | @@ -84,8 +185,8 @@ static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -182,12 +283,46 @@ static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); } +static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type, + uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, + uint32_t xcc_id, uint32_t vmid) +{ + struct amdgpu_device *adev = kiq_ring->adev; + unsigned i; + + /* enter save mode */ + amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id); + + if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1); + /* wait till dequeue take effects */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + dev_err(adev->dev, "fail to wait on hqd deactive\n"); + } else { + dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type); + } + + soc15_grbm_select(adev, 0, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + /* exit safe mode */ + amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); +} + static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, .kiq_query_status = gfx_v9_4_3_kiq_query_status, .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, + .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue, .set_resources_size = 8, .map_queues_size = 7, .unmap_queues_size = 6, @@ -885,11 +1020,59 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, hw_prio, NULL); } +static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) +{ + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); + uint32_t *ptr, num_xcc, inst; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); + adev->gfx.ip_dump_core = NULL; + } else { + adev->gfx.ip_dump_core = ptr; + } + + /* Allocate memory for compute queue registers for all the instances */ + reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); + inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe; + + ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); + adev->gfx.ip_dump_compute_queues = NULL; + } else { + adev->gfx.ip_dump_compute_queues = ptr; + } +} + static int gfx_v9_4_3_sw_init(void *handle) { int i, j, k, r, ring_id, xcc_id, num_xcc; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex); + if (adev->gfx.mec_fw_version >= 153) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; @@ -901,6 +1084,13 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; + /* Bad opcode Event */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, + GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR, + &adev->gfx.bad_op_irq); + if (r) + return r; + /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, &adev->gfx.priv_reg_irq); @@ -976,10 +1166,19 @@ static int gfx_v9_4_3_sw_init(void *handle) return r; - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev)) { r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + } - return r; + gfx_v9_4_3_alloc_ip_dump(adev); + + r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + if (r) + return r; + + return 0; } static int gfx_v9_4_3_sw_fini(void *handle) @@ -997,11 +1196,17 @@ static int gfx_v9_4_3_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev, i); } + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); if (!amdgpu_sriov_vf(adev)) amdgpu_gfx_sysfs_fini(adev); + amdgpu_gfx_sysfs_isolation_shader_fini(adev); + + kfree(adev->gfx.ip_dump_core); + kfree(adev->gfx.ip_dump_compute_queues); return 0; } @@ -1496,7 +1701,15 @@ static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[xcc_id].ring.sched.ready = false; } udelay(50); @@ -1910,7 +2123,7 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) +static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -1922,8 +2135,8 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) */ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; - if (!tmp_mqd->cp_hqd_pq_control || - (!amdgpu_in_reset(adev) && !adev->in_suspend)) { + if (!restore && (!tmp_mqd->cp_hqd_pq_control || + (!amdgpu_in_reset(adev) && !adev->in_suspend))) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -2008,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) goto done; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); if (!r) { - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id); + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); amdgpu_bo_kunmap(ring->mqd_obj); ring->mqd_ptr = NULL; } @@ -2035,6 +2248,8 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); if (r) return r; + } else { + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); @@ -2094,12 +2309,6 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, - int xcc_id) -{ - gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); -} - static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) { if (amdgpu_gfx_disable_kcq(adev, xcc_id)) @@ -2131,7 +2340,7 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) } gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); - gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } static int gfx_v9_4_3_hw_init(void *handle) @@ -2139,6 +2348,9 @@ static int gfx_v9_4_3_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); + if (!amdgpu_sriov_vf(adev)) gfx_v9_4_3_init_golden_registers(adev); @@ -2162,6 +2374,7 @@ static int gfx_v9_4_3_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { @@ -2327,6 +2540,10 @@ static int gfx_v9_4_3_late_init(void *handle) if (r) return r; + r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); + if (r) + return r; + if (adev->gfx.ras && adev->gfx.ras->enable_watchdog_timer) adev->gfx.ras->enable_watchdog_timer(adev); @@ -2833,6 +3050,24 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } +static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, + unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + if (!adev->debug_exp_resets) + return; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id); + WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value); + amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id); +} + static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state, int xcc_id) @@ -2886,21 +3121,103 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( } } +static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev, + int xcc_id, int me, int pipe) +{ + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + if (me != 1) + return 0; + + switch (pipe) { + case 0: + return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); + case 1: + return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); + case 2: + return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); + case 3: + return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); + default: + return 0; + } +} + static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, enum amdgpu_interrupt_state state) { - int i, num_xcc; + u32 mec_int_cntl_reg, mec_int_cntl; + int i, j, k, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < num_xcc; i++) + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (j = 0; j < adev->gfx.mec.num_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + /* MECs start at 1 */ + mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); + + if (mec_int_cntl_reg) { + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? + 1 : 0); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); + } + } + } + } + break; + default: + break; + } + + return 0; +} + +static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 mec_int_cntl_reg, mec_int_cntl; + int i, j, k, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + for (i = 0; i < num_xcc; i++) { + WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + for (j = 0; j < adev->gfx.mec.num_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + /* MECs start at 1 */ + mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); + + if (mec_int_cntl_reg) { + mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + OPCODE_ERROR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? + 1 : 0); + WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); + } + } + } + } break; default: break; @@ -3061,6 +3378,15 @@ static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal opcode in command stream\n"); + gfx_v9_4_3_fault(adev, entry); + return 0; +} + static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -3147,6 +3473,183 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } } +static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, + uint32_t pipe, uint32_t queue, + uint32_t xcc_id) +{ + int i, r; + /* make sure dequeue is complete*/ + gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id); + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id)); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + else + r = 0; + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); + mutex_unlock(&adev->srbm_mutex); + gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id); + + return r; + +} + +static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) +{ + /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && + adev->gfx.mec_fw_version >= 0x0000009b) + return true; + else + dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); + + return false; +} + +static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reset_pipe, clean_pipe; + int r; + + if (!gfx_v9_4_3_pipe_reset_support(adev)) + return -EINVAL; + + gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id); + mutex_lock(&adev->srbm_mutex); + + reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL); + clean_pipe = reset_pipe; + + if (ring->me == 1) { + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE0_RESET, 1); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE1_RESET, 1); + break; + case 2: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE2_RESET, 1); + break; + case 3: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE3_RESET, 1); + break; + default: + break; + } + } else { + if (ring->pipe) + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE1_RESET, 1); + else + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE0_RESET, 1); + } + + WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe); + WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe); + mutex_unlock(&adev->srbm_mutex); + gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id); + + r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); + return r; +} + +static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + int r; + + if (!adev->debug_exp_resets) + return -EINVAL; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, + 0, 0); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n", + ring->name); + goto pipe_reset; + } + + r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); + if (r) + dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); + +pipe_reset: + if(r) { + r = gfx_v9_4_3_reset_hw_pipe(ring); + dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, + r ? "failed" : "successfully"); + if (r) + return r; + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)){ + dev_err(adev->dev, "fail to resv mqd_obj\n"); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) { + dev_err(adev->dev, "fail to unresv mqd_obj\n"); + return r; + } + spin_lock_irqsave(&kiq->ring_lock, flags); + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); + if (r) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + dev_err(adev->dev, "fail to remap queue\n"); + return r; + } + return amdgpu_ring_test_ring(ring); +} + enum amdgpu_gfx_cp_ras_mem_id { AMDGPU_GFX_CP_MEM1 = 1, AMDGPU_GFX_CP_MEM2, @@ -3959,8 +4462,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, /* the caller should make sure initialize value of * err_data->ue_count and err_data->ce_count */ - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); } static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, @@ -4062,6 +4565,151 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); } +static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) +{ + int i; + + /* Header itself is a NOP packet */ + if (num_nop == 1) { + amdgpu_ring_write(ring, ring->funcs->nop); + return; + } + + /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); + + /* Header is at index 0, followed by num_nops - 1 NOP packet's */ + for (i = 1; i < num_nop; i++) + amdgpu_ring_write(ring, ring->funcs->nop); +} + +static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i, j, k; + uint32_t xcc_id, xcc_offset, inst_offset; + uint32_t num_xcc, reg, num_inst; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); + + if (!adev->gfx.ip_dump_core) + return; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + drm_printf(p, "Number of Instances:%d\n", num_xcc); + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + xcc_offset = xcc_id * reg_count; + drm_printf(p, "\nInstance id:%d\n", xcc_id); + for (i = 0; i < reg_count; i++) + drm_printf(p, "%-50s \t 0x%08x\n", + gc_reg_list_9_4_3[i].reg_name, + adev->gfx.ip_dump_core[xcc_offset + i]); + } + + /* print compute queue registers for all instances */ + if (!adev->gfx.ip_dump_compute_queues) + return; + + num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe; + + reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); + drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n", + num_xcc, + adev->gfx.mec.num_mec, + adev->gfx.mec.num_pipe_per_mec, + adev->gfx.mec.num_queue_per_pipe); + + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + xcc_offset = xcc_id * reg_count * num_inst; + inst_offset = 0; + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { + drm_printf(p, + "\nxcc:%d mec:%d, pipe:%d, queue:%d\n", + xcc_id, i, j, k); + for (reg = 0; reg < reg_count; reg++) { + drm_printf(p, + "%-50s \t 0x%08x\n", + gc_cp_reg_list_9_4_3[reg].reg_name, + adev->gfx.ip_dump_compute_queues + [xcc_offset + inst_offset + + reg]); + } + inst_offset += reg_count; + } + } + } + } +} + +static void gfx_v9_4_3_ip_dump(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i, j, k; + uint32_t num_xcc, reg, num_inst; + uint32_t xcc_id, xcc_offset, inst_offset; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); + + if (!adev->gfx.ip_dump_core) + return; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + amdgpu_gfx_off_ctrl(adev, false); + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + xcc_offset = xcc_id * reg_count; + for (i = 0; i < reg_count; i++) + adev->gfx.ip_dump_core[xcc_offset + i] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i], + GET_INST(GC, xcc_id))); + } + amdgpu_gfx_off_ctrl(adev, true); + + /* dump compute queue registers for all instances */ + if (!adev->gfx.ip_dump_compute_queues) + return; + + num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe; + reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->srbm_mutex); + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + xcc_offset = xcc_id * reg_count * num_inst; + inst_offset = 0; + for (i = 0; i < adev->gfx.mec.num_mec; i++) { + for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { + for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { + /* ME0 is for GFX so start from 1 for CP */ + soc15_grbm_select(adev, 1 + i, j, k, 0, + GET_INST(GC, xcc_id)); + + for (reg = 0; reg < reg_count; reg++) { + adev->gfx.ip_dump_compute_queues + [xcc_offset + + inst_offset + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST( + gc_cp_reg_list_9_4_3[reg], + GET_INST(GC, xcc_id))); + } + inst_offset += reg_count; + } + } + } + } + soc15_grbm_select(adev, 0, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + amdgpu_gfx_off_ctrl(adev, true); +} + +static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .name = "gfx_v9_4_3", .early_init = gfx_v9_4_3_early_init, @@ -4078,8 +4726,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, .set_powergating_state = gfx_v9_4_3_set_powergating_state, .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = gfx_v9_4_3_ip_dump, + .print_ip_state = gfx_v9_4_3_ip_print, }; static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { @@ -4101,7 +4749,8 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ 7 + /* gfx_v9_4_3_emit_mem_sync */ 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ - 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ + 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ + 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, .emit_fence = gfx_v9_4_3_ring_emit_fence, @@ -4111,13 +4760,18 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, .test_ring = gfx_v9_4_3_ring_test_ring, .test_ib = gfx_v9_4_3_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = gfx_v9_4_3_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v9_4_3_ring_emit_wreg, .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v9_4_3_ring_soft_recovery, .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, + .reset = gfx_v9_4_3_reset_kcq, + .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { @@ -4172,6 +4826,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { .process = gfx_v9_4_3_priv_reg_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = { + .set = gfx_v9_4_3_set_bad_op_fault_state, + .process = gfx_v9_4_3_bad_op_irq, +}; + static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { .set = gfx_v9_4_3_set_priv_inst_fault_state, .process = gfx_v9_4_3_priv_inst_irq, @@ -4185,6 +4844,9 @@ static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_reg_irq.num_types = 1; adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; + adev->gfx.bad_op_irq.num_types = 1; + adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs; + adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm new file mode 100644 index 00000000000000..d5325ef80ab02f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// MI300 : Clear SGPRs, VGPRs and LDS +// Uses two kernels launched separately: +// 1. Clean VGPRs, LDS, and lower SGPRs +// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU +// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD +// Waves in the workgroup share the 64KB of LDS +// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383 +// Each wave clears 128 VGPRs, so all 512 in the SIMD +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. +// 2. Clean remaining SGPRs +// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU +// Waves are allocating 96 SGPRs +// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223. +// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799 +// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER +// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command +// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799 + +shader main + asic(MI300) + type(CS) + wave_size(64) +// Note: original source code from SQ team + +// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks) + + s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3 + s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1) + S_BARRIER + + s_movk_i32 m0, 0x0000 + s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance) + // + // CLEAR VGPRs + // + s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing +label_0005: + v_mov_b32 v0, 0 + v_mov_b32 v1, 0 + v_mov_b32 v2, 0 + v_mov_b32 v3, 0 + v_mov_b32 v4, 0 + v_mov_b32 v5, 0 + v_mov_b32 v6, 0 + v_mov_b32 v7, 0 + s_sub_u32 s2, s2, 8 + s_set_gpr_idx_idx s2 + s_cbranch_scc0 label_0005 + s_set_gpr_idx_off + + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iteraions + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_clean_sgpr_1: + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0 //clear vcc + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 +s_endpgm + +label_0023: + + s_sethalt 1 + + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop1: + + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop1 + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0xee //clear vcc + +s_endpgm +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h new file mode 100644 index 00000000000000..69aa567c6c1d10 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* Define the cleaner shader gfx_9_4_3 */ +static const u32 gfx_9_4_3_cleaner_shader_hex[] = { + 0xbf068100, 0xbf84003b, + 0xbf8a0000, 0xb07c0000, + 0xbe8200ff, 0x00000078, + 0xbf110802, 0x7e000280, + 0x7e020280, 0x7e040280, + 0x7e060280, 0x7e080280, + 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x80828802, + 0xbe803202, 0xbf84fff5, + 0xbf9c0000, 0xbe8200ff, + 0x80000000, 0x86020102, + 0xbf840011, 0xbefe00c1, + 0xbeff00c1, 0xd28c0001, + 0x0001007f, 0xd28d0001, + 0x0002027e, 0x10020288, + 0xbe8200bf, 0xbefc00c1, + 0xd89c2000, 0x00020201, + 0xd89c6040, 0x00040401, + 0x320202ff, 0x00000400, + 0x80828102, 0xbf84fff8, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbf810000, 0xbf8d0001, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea01ff, + 0x000000ee, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index d200310d17319a..0e3ddea7b8e0f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -443,23 +443,6 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev) mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } -static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev, - int xcc_id) -{ - u32 status = 0; - struct amdgpu_vmhub *hub; - - if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) - return false; - - hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - status = RREG32(hub->vm_l2_pro_fault_status); - /* reset page fault status */ - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); - - return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); -} - const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset, .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs, @@ -468,5 +451,4 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default, .init = gfxhub_v1_0_init, .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, - .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 72109abe7c86c8..ed8e130c7d195b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -622,22 +622,6 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) return 0; } -static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev, - int xcc_id) -{ - u32 fed, status; - - status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS); - fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - if (!amdgpu_sriov_vf(adev)) { - /* clear page fault status and address */ - WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), - regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1); - } - - return fed; -} - const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset, .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs, @@ -646,7 +630,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default, .init = gfxhub_v1_2_init, .get_xgmi_info = gfxhub_v1_2_get_xgmi_info, - .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status, }; static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index f0ceab3ce5bfad..9784a28921853f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -132,7 +132,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b73136d390cc03..c76ac0dfe572d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -595,7 +595,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, - addr, write_fault); + addr, entry->timestamp, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) return 1; @@ -618,7 +618,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * tables */ if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, - addr, write_fault)) + addr, entry->timestamp, write_fault)) return 1; } } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index 077c6d920e27f2..e019249883fb2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -41,7 +41,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index a9ea23fa0def7f..ed7facacf2fe30 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -32,7 +32,7 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index ab06c2b4b20b2f..33736d361dd0bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -35,7 +35,7 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 8d7d0813e33154..1c99bb09e2a129 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -32,7 +32,7 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 6c1891889c4da8..d4f72e47ae9e20 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -153,7 +153,7 @@ static void imu_v11_0_setup(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val); } - //disble imu Rtavfs, SmsRepair, DfllBTC, and ClkB + //disable imu Rtavfs, SmsRepair, DfllBTC, and ClkB imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10); imu_reg_val |= 0x10007; WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val); diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c index aac107898baecc..964c29ef25dccd 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c @@ -42,23 +42,23 @@ static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = { static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp) { struct amdgpu_device *adev = isp->adev; + int idx, int_idx, num_res, r; u64 isp_base; - int int_idx; - int r; if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; isp_base = adev->rmmio_base; - isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL); + isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL); if (!isp->isp_cell) { r = -ENOMEM; DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__); goto failure; } - isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource), + num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC; + isp->isp_res = kcalloc(num_res, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_res) { r = -ENOMEM; @@ -83,22 +83,53 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp) isp->isp_res[0].start = isp_base; isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END; - for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) { - isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq"; - isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ; - isp->isp_res[int_idx + 1].start = + isp->isp_res[1].name = "isp_4_1_phy0_reg"; + isp->isp_res[1].flags = IORESOURCE_MEM; + isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET; + isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE; + + isp->isp_res[2].name = "isp_gpio_sensor0_reg"; + isp->isp_res[2].flags = IORESOURCE_MEM; + isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET; + isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET + + ISP410_GPIO_SENSOR0_SIZE; + + for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0; + idx < num_res; idx++, int_idx++) { + isp->isp_res[idx].name = "isp_4_1_0_irq"; + isp->isp_res[idx].flags = IORESOURCE_IRQ; + isp->isp_res[idx].start = amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]); - isp->isp_res[int_idx + 1].end = - isp->isp_res[int_idx + 1].start; + isp->isp_res[idx].end = + isp->isp_res[idx].start; } isp->isp_cell[0].name = "amd_isp_capture"; - isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1; + isp->isp_cell[0].num_resources = num_res; isp->isp_cell[0].resources = &isp->isp_res[0]; isp->isp_cell[0].platform_data = isp->isp_pdata; isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); - r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1); + isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), + GFP_KERNEL); + if (!isp->isp_i2c_res) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd res alloc failed\n", __func__); + goto failure; + } + + isp->isp_i2c_res[0].name = "isp_i2c0_reg"; + isp->isp_i2c_res[0].flags = IORESOURCE_MEM; + isp->isp_i2c_res[0].start = isp_base + ISP410_I2C0_OFFSET; + isp->isp_i2c_res[0].end = isp_base + ISP410_I2C0_OFFSET + ISP410_I2C0_SIZE; + + isp->isp_cell[1].name = "amd_isp_i2c_designware"; + isp->isp_cell[1].num_resources = 1; + isp->isp_cell[1].resources = &isp->isp_i2c_res[0]; + isp->isp_cell[1].platform_data = isp->isp_pdata; + isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data); + + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2); if (r) { DRM_ERROR("%s: add mfd hotplug device failed\n", __func__); goto failure; @@ -111,6 +142,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp) kfree(isp->isp_pdata); kfree(isp->isp_res); kfree(isp->isp_cell); + kfree(isp->isp_i2c_res); return r; } @@ -122,6 +154,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp) kfree(isp->isp_res); kfree(isp->isp_cell); kfree(isp->isp_pdata); + kfree(isp->isp_i2c_res); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h index 315f2822410c0e..7db24c0f108080 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h @@ -32,8 +32,19 @@ #include "ivsrcid/isp/irqsrcs_isp_4_1.h" +#define MAX_ISP410_MEM_RES 2 +#define MAX_ISP410_SENSOR_RES 1 #define MAX_ISP410_INT_SRC 8 +#define ISP410_PHY0_OFFSET 0x66700 +#define ISP410_PHY0_SIZE 0xD30 + +#define ISP410_I2C0_OFFSET 0x66400 +#define ISP410_I2C0_SIZE 0x100 + +#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C +#define ISP410_GPIO_SENSOR0_SIZE 0x4 + void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c index 4e17fa03f7b5f2..b56f27295468f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c @@ -42,23 +42,24 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) { struct amdgpu_device *adev = isp->adev; + int idx, int_idx, num_res, r; u64 isp_base; - int int_idx; - int r; if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; isp_base = adev->rmmio_base; - isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL); + isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL); if (!isp->isp_cell) { r = -ENOMEM; DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__); goto failure; } - isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource), + num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC; + + isp->isp_res = kcalloc(num_res, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_res) { r = -ENOMEM; @@ -83,22 +84,52 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_res[0].start = isp_base; isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END; - for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) { - isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq"; - isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ; - isp->isp_res[int_idx + 1].start = + isp->isp_res[1].name = "isp_4_1_1_phy0_reg"; + isp->isp_res[1].flags = IORESOURCE_MEM; + isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET; + isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE; + + isp->isp_res[2].name = "isp_4_1_1_sensor0_reg"; + isp->isp_res[2].flags = IORESOURCE_MEM; + isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET; + isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET + + ISP411_GPIO_SENSOR0_SIZE; + + for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0; + idx < num_res; idx++, int_idx++) { + isp->isp_res[idx].name = "isp_4_1_1_irq"; + isp->isp_res[idx].flags = IORESOURCE_IRQ; + isp->isp_res[idx].start = amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]); - isp->isp_res[int_idx + 1].end = - isp->isp_res[int_idx + 1].start; + isp->isp_res[idx].end = + isp->isp_res[idx].start; } isp->isp_cell[0].name = "amd_isp_capture"; - isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1; + isp->isp_cell[0].num_resources = num_res; isp->isp_cell[0].resources = &isp->isp_res[0]; isp->isp_cell[0].platform_data = isp->isp_pdata; isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); - r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1); + isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); + if (!isp->isp_i2c_res) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd res alloc failed\n", __func__); + goto failure; + } + + isp->isp_i2c_res[0].name = "isp_i2c0_reg"; + isp->isp_i2c_res[0].flags = IORESOURCE_MEM; + isp->isp_i2c_res[0].start = isp_base + ISP411_I2C0_OFFSET; + isp->isp_i2c_res[0].end = isp_base + ISP411_I2C0_OFFSET + ISP411_I2C0_SIZE; + + isp->isp_cell[1].name = "amd_isp_i2c_designware"; + isp->isp_cell[1].num_resources = 1; + isp->isp_cell[1].resources = &isp->isp_i2c_res[0]; + isp->isp_cell[1].platform_data = isp->isp_pdata; + isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data); + + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2); if (r) { DRM_ERROR("%s: add mfd hotplug device failed\n", __func__); goto failure; @@ -111,6 +142,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) kfree(isp->isp_pdata); kfree(isp->isp_res); kfree(isp->isp_cell); + kfree(isp->isp_i2c_res); return r; } @@ -122,6 +154,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp) kfree(isp->isp_res); kfree(isp->isp_cell); kfree(isp->isp_pdata); + kfree(isp->isp_i2c_res); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h index dfb9522c9d6a2a..40887ddeb08c01 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h @@ -32,8 +32,19 @@ #include "ivsrcid/isp/irqsrcs_isp_4_1.h" +#define MAX_ISP411_MEM_RES 2 +#define MAX_ISP411_SENSOR_RES 1 #define MAX_ISP411_INT_SRC 8 +#define ISP411_PHY0_OFFSET 0x66700 +#define ISP411_PHY0_SIZE 0xD30 + +#define ISP411_I2C0_OFFSET 0x66400 +#define ISP411_I2C0_SIZE 0x100 + +#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C +#define ISP411_GPIO_SENSOR0_SIZE 0x4 + void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index b55041f38cec9b..86958cb2c2ab2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -59,6 +59,12 @@ static int amdgpu_ih_srcid_jpeg[] = { VCN_4_0__SRCID__JPEG7_DECODE }; +static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) +{ + return amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)); +} + /** * jpeg_v4_0_3_early_init - set function pointers * @@ -734,32 +740,20 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); amdgpu_ring_write(ring, 0); - if (ring->adev->jpeg.inst[ring->me].aid_id) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x4); - } else { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x3fbc); - if (ring->adev->jpeg.inst[ring->me].aid_id) { - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x0); - } else { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x1); + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); amdgpu_ring_write(ring, 0); } @@ -834,8 +828,8 @@ void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, { uint32_t reg_offset; - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets if required */ + if (jpeg_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_JPEG_REG_OFFSET(reg); reg_offset = (reg << 2); @@ -881,8 +875,8 @@ void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint { uint32_t reg_offset; - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets if required */ + if (jpeg_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_JPEG_REG_OFFSET(reg); reg_offset = (reg << 2); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 8aded0a67037b4..231a3d490ea8e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "soc15_common.h" #include "soc21.h" +#include "gfx_v11_0.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" #include "gc/gc_11_0_0_default.h" @@ -160,7 +161,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[0]; struct MES_API_STATUS *api_status; @@ -360,6 +361,100 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, offsetof(union MESAPI__REMOVE_QUEUE, api_status)); } +static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type, + uint32_t me_id, uint32_t pipe_id, + uint32_t queue_id, uint32_t vmid) +{ + struct amdgpu_device *adev = mes->adev; + uint32_t value; + int i, r = 0; + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + if (queue_type == AMDGPU_RING_TYPE_GFX) { + dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n", + me_id, pipe_id, queue_id, vmid); + + mutex_lock(&adev->gfx.reset_sem_mutex); + gfx_v11_0_request_gfx_index_mutex(adev, true); + /* all se allow writes */ + WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, + (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); + value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); + if (pipe_id == 0) + value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id); + else + value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id); + WREG32_SOC15(GC, 0, regCP_VMID_RESET, value); + gfx_v11_0_request_gfx_index_mutex(adev, false); + mutex_unlock(&adev->gfx.reset_sem_mutex); + + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0); + /* wait till dequeue take effects */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n"); + r = -ETIMEDOUT; + } + + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { + dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n", + me_id, pipe_id, queue_id); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0); + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); + WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); + + /* wait till dequeue take effects */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "failed to wait on hqd deactivate\n"); + r = -ETIMEDOUT; + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + +static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes, + struct mes_reset_queue_input *input) +{ + if (input->use_mmio) + return mes_v11_0_reset_queue_mmio(mes, input->queue_type, + input->me_id, input->pipe_id, + input->queue_id, input->vmid); + + union MESAPI__RESET mes_reset_queue_pkt; + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; + mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr; + /*mes_reset_queue_pkt.reset_queue_only = 1;*/ + + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__REMOVE_QUEUE, api_status)); +} + static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes, struct mes_map_legacy_queue_input *input) { @@ -421,13 +516,41 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes, static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes, struct mes_suspend_gang_input *input) { - return 0; + union MESAPI__SUSPEND mes_suspend_gang_pkt; + + memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt)); + + mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND; + mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs; + mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr; + mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr; + mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value; + + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt), + offsetof(union MESAPI__SUSPEND, api_status)); } static int mes_v11_0_resume_gang(struct amdgpu_mes *mes, struct mes_resume_gang_input *input) { - return 0; + union MESAPI__RESUME mes_resume_gang_pkt; + + memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt)); + + mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME; + mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs; + mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr; + + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt), + offsetof(union MESAPI__RESUME, api_status)); } static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes) @@ -595,6 +718,43 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes) offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); } +static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes, + struct mes_reset_legacy_queue_input *input) +{ + union MESAPI__RESET mes_reset_queue_pkt; + + if (input->use_mmio) + return mes_v11_0_reset_queue_mmio(mes, input->queue_type, + input->me_id, input->pipe_id, + input->queue_id, input->vmid); + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.queue_type = + convert_to_mes_queue_type(input->queue_type); + + if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) { + mes_reset_queue_pkt.reset_legacy_gfx = 1; + mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; + mes_reset_queue_pkt.queue_id_lp = input->queue_id; + mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr; + mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset; + mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr; + mes_reset_queue_pkt.vmid_id_lp = input->vmid; + } else { + mes_reset_queue_pkt.reset_queue_only = 1; + mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; + } + + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__RESET, api_status)); +} + static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .add_hw_queue = mes_v11_0_add_hw_queue, .remove_hw_queue = mes_v11_0_remove_hw_queue, @@ -603,6 +763,8 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .suspend_gang = mes_v11_0_suspend_gang, .resume_gang = mes_v11_0_resume_gang, .misc_op = mes_v11_0_misc_op, + .reset_legacy_queue = mes_v11_0_reset_legacy_queue, + .reset_hw_queue = mes_v11_0_reset_hw_queue, }; static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index a79a8adc3aa5b1..8d27421689c9d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -146,7 +146,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[pipe]; spinlock_t *ring_lock = &mes->ring_lock[pipe]; @@ -350,6 +350,32 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, offsetof(union MESAPI__REMOVE_QUEUE, api_status)); } +static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, + struct mes_reset_queue_input *input) +{ + union MESAPI__RESET mes_reset_queue_pkt; + int pipe; + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; + mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr; + /*mes_reset_queue_pkt.reset_queue_only = 1;*/ + + if (mes->adev->enable_uni_mes) + pipe = AMDGPU_MES_KIQ_PIPE; + else + pipe = AMDGPU_MES_SCHED_PIPE; + + return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__REMOVE_QUEUE, api_status)); +} + static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes, struct mes_map_legacy_queue_input *input) { @@ -453,6 +479,11 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, union MESAPI__MISC misc_pkt; int pipe; + if (mes->adev->enable_uni_mes) + pipe = AMDGPU_MES_KIQ_PIPE; + else + pipe = AMDGPU_MES_SCHED_PIPE; + memset(&misc_pkt, 0, sizeof(misc_pkt)); misc_pkt.header.type = MES_API_TYPE_SCHEDULER; @@ -487,6 +518,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; break; case MES_MISC_OP_SET_SHADER_DEBUGGER: + pipe = AMDGPU_MES_SCHED_PIPE; misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; misc_pkt.set_shader_debugger.process_context_addr = input->set_shader_debugger.process_context_addr; @@ -504,11 +536,6 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, return -EINVAL; } - if (mes->adev->enable_uni_mes) - pipe = AMDGPU_MES_KIQ_PIPE; - else - pipe = AMDGPU_MES_SCHED_PIPE; - return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &misc_pkt, sizeof(misc_pkt), offsetof(union MESAPI__MISC, api_status)); @@ -582,6 +609,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.disable_mes_log = 1; mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; + mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; /* * Keep oversubscribe timer for sdma . When we have unmapped doorbell @@ -676,6 +704,44 @@ static void mes_v12_0_enable_unmapped_doorbell_handling( WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data); } +static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes, + struct mes_reset_legacy_queue_input *input) +{ + union MESAPI__RESET mes_reset_queue_pkt; + int pipe; + + memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); + + mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; + mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_reset_queue_pkt.queue_type = + convert_to_mes_queue_type(input->queue_type); + + if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) { + mes_reset_queue_pkt.reset_legacy_gfx = 1; + mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; + mes_reset_queue_pkt.queue_id_lp = input->queue_id; + mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr; + mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset; + mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr; + mes_reset_queue_pkt.vmid_id_lp = input->vmid; + } else { + mes_reset_queue_pkt.reset_queue_only = 1; + mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; + } + + if (mes->adev->enable_uni_mes) + pipe = AMDGPU_MES_KIQ_PIPE; + else + pipe = AMDGPU_MES_SCHED_PIPE; + + return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, + &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), + offsetof(union MESAPI__RESET, api_status)); +} + static const struct amdgpu_mes_funcs mes_v12_0_funcs = { .add_hw_queue = mes_v12_0_add_hw_queue, .remove_hw_queue = mes_v12_0_remove_hw_queue, @@ -684,6 +750,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = { .suspend_gang = mes_v12_0_suspend_gang, .resume_gang = mes_v12_0_resume_gang, .misc_op = mes_v12_0_misc_op, + .reset_legacy_queue = mes_v12_0_reset_legacy_queue, + .reset_hw_queue = mes_v12_0_reset_hw_queue, }; static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 621761a17ac746..b01bb759d0f4f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -559,22 +559,6 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) } -static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, - int hub_inst) -{ - u32 fed, status; - - status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); - fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - if (!amdgpu_sriov_vf(adev)) { - /* clear page fault status and address */ - WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, - regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1); - } - - return fed; -} - const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .get_fb_location = mmhub_v1_8_get_fb_location, .init = mmhub_v1_8_init, @@ -584,7 +568,6 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, .set_clockgating = mmhub_v1_8_set_clockgating, .get_clockgating = mmhub_v1_8_get_clockgating, - .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status, }; static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { @@ -670,8 +653,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); } static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index caf616a2c8a6c5..1d099ffb3a5a2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -25,7 +25,7 @@ #define __MXGPU_NV_H__ #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 -#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 15000 #define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000 #define NV_MAILBOX_POLL_MSG_REP_MAX 11 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index fa479dfa1ec155..739fce4fa8fdfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -365,7 +365,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; } else { - /* Disbale ASPM L1 */ + /* Disable ASPM L1 */ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; /* Disable ASPM TxL0s */ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 1251ee38a6764c..51e470e8d67d9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -81,6 +81,8 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +#define regMP1_PUB_SCRATCH0 0x3b10090 + static int psp_v13_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -807,6 +809,20 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp) } } +static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + u32 pmfw_ver; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) + return false; + + /* load 4e version of sos if pmfw version less than 85.115.0 */ + pmfw_ver = RREG32(regMP1_PUB_SCRATCH0 / 4); + + return (pmfw_ver < 0x557300); +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -830,6 +846,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, + .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index aa637541da5844..e65194fe94af65 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -710,7 +710,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) upper_32_bits(wptr_gpu_addr)); wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); if (ring->use_pollmem) { - /*wptr polling is not enogh fast, directly clean the wptr register */ + /*wptr polling is not enough fast, directly clean the wptr register */ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 772604feb6acdb..23ef4eb36b407a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -72,6 +72,53 @@ MODULE_FIRMWARE("amdgpu/renoir_sdma.bin"); MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin"); MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin"); +static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = { + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL) +}; + #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L @@ -1750,6 +1797,8 @@ static int sdma_v4_0_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); + uint32_t *ptr; /* SDMA trap event */ for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1870,6 +1919,13 @@ static int sdma_v4_0_sw_init(void *handle) return -EINVAL; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1890,6 +1946,8 @@ static int sdma_v4_0_sw_fini(void *handle) else amdgpu_sdma_destroy_inst_ctx(adev, false); + kfree(adev->sdma.ip_dump); + return 0; } @@ -2292,6 +2350,48 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } +static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_0[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v4_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v4_0_get_reg_offset(adev, i, + sdma_reg_list_4_0[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + const struct amd_ip_funcs sdma_v4_0_ip_funcs = { .name = "sdma_v4_0", .early_init = sdma_v4_0_early_init, @@ -2308,6 +2408,8 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = { .set_clockgating_state = sdma_v4_0_set_clockgating_state, .set_powergating_state = sdma_v4_0_set_powergating_state, .get_clockgating_state = sdma_v4_0_get_clockgating_state, + .dump_ip_state = sdma_v4_0_dump_ip_state, + .print_ip_state = sdma_v4_0_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 2c55bfd935bb37..c77889040760ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -46,6 +46,53 @@ MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin"); +static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = { + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL) +}; + #define mmSMNAID_AID0_MCA_SMU 0x03b30400 #define WREG32_SDMA(instance, offset, value) \ @@ -1291,6 +1338,8 @@ static int sdma_v4_4_2_sw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 aid_id; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); + uint32_t *ptr; /* SDMA trap event */ for (i = 0; i < adev->sdma.num_inst_per_aid; i++) { @@ -1386,6 +1435,13 @@ static int sdma_v4_4_2_sw_init(void *handle) return -EINVAL; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1406,6 +1462,8 @@ static int sdma_v4_4_2_sw_fini(void *handle) else amdgpu_sdma_destroy_inst_ctx(adev, false); + kfree(adev->sdma.ip_dump); + return 0; } @@ -1799,6 +1857,48 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } +static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v4_4_2_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v4_4_2_get_reg_offset(adev, i, + sdma_reg_list_4_4_2[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = { .name = "sdma_v4_4_2", .early_init = sdma_v4_4_2_early_init, @@ -1815,6 +1915,8 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = { .set_clockgating_state = sdma_v4_4_2_set_clockgating_state, .set_powergating_state = sdma_v4_4_2_set_powergating_state, .get_clockgating_state = sdma_v4_4_2_get_clockgating_state, + .dump_ip_state = sdma_v4_4_2_dump_ip_state, + .print_ip_state = sdma_v4_4_2_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { @@ -2141,7 +2243,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); } static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index b7d33d78bce04a..3e48ea38385de7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -59,6 +59,55 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin"); #define SDMA0_HYP_DEC_REG_END 0x5893 #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = { + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2) +}; + static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -1341,6 +1390,8 @@ static int sdma_v5_0_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); + uint32_t *ptr; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, @@ -1378,6 +1429,13 @@ static int sdma_v5_0_sw_init(void *handle) return r; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1391,6 +1449,8 @@ static int sdma_v5_0_sw_fini(void *handle) amdgpu_sdma_destroy_inst_ctx(adev, false); + kfree(adev->sdma.ip_dump); + return 0; } @@ -1718,7 +1778,49 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -const struct amd_ip_funcs sdma_v5_0_ip_funcs = { +static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v5_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v5_0_get_reg_offset(adev, i, + sdma_reg_list_5_0[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + +static const struct amd_ip_funcs sdma_v5_0_ip_funcs = { .name = "sdma_v5_0", .early_init = sdma_v5_0_early_init, .late_init = NULL, @@ -1734,6 +1836,8 @@ const struct amd_ip_funcs sdma_v5_0_ip_funcs = { .set_clockgating_state = sdma_v5_0_set_clockgating_state, .set_powergating_state = sdma_v5_0_set_powergating_state, .get_clockgating_state = sdma_v5_0_get_clockgating_state, + .dump_ip_state = sdma_v5_0_dump_ip_state, + .print_ip_state = sdma_v5_0_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h index d4e3c2e696f62e..2ab71f21755a61 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h @@ -24,7 +24,6 @@ #ifndef __SDMA_V5_0_H__ #define __SDMA_V5_0_H__ -extern const struct amd_ip_funcs sdma_v5_0_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v5_0_ip_block; #endif /* __SDMA_V5_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 2e72d445415f9e..bc9b240a3488e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -60,6 +60,55 @@ MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin"); #define SDMA0_HYP_DEC_REG_END 0x5893 #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = { + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2) +}; + static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -1224,6 +1273,8 @@ static int sdma_v5_2_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); + uint32_t *ptr; /* SDMA trap event */ for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1255,6 +1306,13 @@ static int sdma_v5_2_sw_init(void *handle) return r; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1268,6 +1326,8 @@ static int sdma_v5_2_sw_fini(void *handle) amdgpu_sdma_destroy_inst_ctx(adev, true); + kfree(adev->sdma.ip_dump); + return 0; } @@ -1676,7 +1736,49 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) amdgpu_gfx_off_ctrl(adev, true); } -const struct amd_ip_funcs sdma_v5_2_ip_funcs = { +static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_2[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v5_2_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v5_2_get_reg_offset(adev, i, + sdma_reg_list_5_2[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + +static const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .name = "sdma_v5_2", .early_init = sdma_v5_2_early_init, .late_init = NULL, @@ -1692,6 +1794,8 @@ const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .set_clockgating_state = sdma_v5_2_set_clockgating_state, .set_powergating_state = sdma_v5_2_set_powergating_state, .get_clockgating_state = sdma_v5_2_get_clockgating_state, + .dump_ip_state = sdma_v5_2_dump_ip_state, + .print_ip_state = sdma_v5_2_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h index b70414fef2a1b3..863145b3a77e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h @@ -24,7 +24,6 @@ #ifndef __SDMA_V5_2_H__ #define __SDMA_V5_2_H__ -extern const struct amd_ip_funcs sdma_v5_2_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v5_2_ip_block; #endif /* __SDMA_V5_2_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index dab4c2db8c9d31..208a1fa9d4e7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -57,6 +57,63 @@ MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin"); #define SDMA0_HYP_DEC_REG_END 0x589a #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = { + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_CHECKSUM), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS), +}; + static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -1239,6 +1296,8 @@ static int sdma_v6_0_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); + uint32_t *ptr; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, @@ -1274,6 +1333,13 @@ static int sdma_v6_0_sw_init(void *handle) return -EINVAL; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1287,6 +1353,8 @@ static int sdma_v6_0_sw_fini(void *handle) amdgpu_sdma_destroy_inst_ctx(adev, true); + kfree(adev->sdma.ip_dump); + return 0; } @@ -1488,6 +1556,48 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) { } +static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_6_0[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v6_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v6_0_get_reg_offset(adev, i, + sdma_reg_list_6_0[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + const struct amd_ip_funcs sdma_v6_0_ip_funcs = { .name = "sdma_v6_0", .early_init = sdma_v6_0_early_init, @@ -1505,6 +1615,8 @@ const struct amd_ip_funcs sdma_v6_0_ip_funcs = { .set_clockgating_state = sdma_v6_0_set_clockgating_state, .set_powergating_state = sdma_v6_0_set_powergating_state, .get_clockgating_state = sdma_v6_0_get_clockgating_state, + .dump_ip_state = sdma_v6_0_dump_ip_state, + .print_ip_state = sdma_v6_0_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index ecee9e7d7e4c6b..a8763496aed315 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -51,6 +51,64 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin"); #define SDMA0_HYP_DEC_REG_END 0x589a #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = { + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_REV), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_VM_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), + SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS), +}; + static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev); @@ -1022,13 +1080,16 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib, unsigned bytes = count * 8; ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_CPV(1); + ib->ptr[ib->length_dw++] = bytes - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = upper_32_bits(src); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = 0; } @@ -1217,6 +1278,8 @@ static int sdma_v7_0_sw_init(void *handle) struct amdgpu_ring *ring; int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); + uint32_t *ptr; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, @@ -1247,6 +1310,13 @@ static int sdma_v7_0_sw_init(void *handle) return r; } + /* Allocate memory for SDMA IP Dump buffer */ + ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr) + adev->sdma.ip_dump = ptr; + else + DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + return r; } @@ -1263,6 +1333,8 @@ static int sdma_v7_0_sw_fini(void *handle) if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) sdma_v12_0_free_ucode_buffer(adev); + kfree(adev->sdma.ip_dump); + return 0; } @@ -1466,6 +1538,48 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags) { } +static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); + uint32_t instance_offset; + + if (!adev->sdma.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + drm_printf(p, "\nInstance:%d\n", i); + + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_7_0[j].reg_name, + adev->sdma.ip_dump[instance_offset + j]); + } +} + +static void sdma_v7_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t instance_offset; + uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); + + if (!adev->sdma.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < adev->sdma.num_instances; i++) { + instance_offset = i * reg_count; + for (j = 0; j < reg_count; j++) + adev->sdma.ip_dump[instance_offset + j] = + RREG32(sdma_v7_0_get_reg_offset(adev, i, + sdma_reg_list_7_0[j].reg_offset)); + } + amdgpu_gfx_off_ctrl(adev, true); +} + const struct amd_ip_funcs sdma_v7_0_ip_funcs = { .name = "sdma_v7_0", .early_init = sdma_v7_0_early_init, @@ -1483,6 +1597,8 @@ const struct amd_ip_funcs sdma_v7_0_ip_funcs = { .set_clockgating_state = sdma_v7_0_set_clockgating_state, .set_powergating_state = sdma_v7_0_set_powergating_state, .get_clockgating_state = sdma_v7_0_get_clockgating_state, + .dump_ip_state = sdma_v7_0_dump_ip_state, + .print_ip_state = sdma_v7_0_print_ip_state, }; static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = { @@ -1631,7 +1747,7 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = { - .copy_pte_num_dw = 7, + .copy_pte_num_dw = 8, .copy_pte = sdma_v7_0_vm_copy_pte, .write_pte = sdma_v7_0_vm_write_pte, .set_pte_pde = sdma_v7_0_vm_set_pte_pde, diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c index e4e30b9d481b4a..c04fdd2d5b3898 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c @@ -60,7 +60,7 @@ static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *f { u32 data; - /* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */ + /* CGTT_ROM_CLK_CTRL0 is not available for APUs */ if (adev->flags & AMD_IS_APU) return; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 282584a48be095..ef7c603b50ae32 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -93,6 +93,10 @@ struct soc15_ras_field_entry { #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) +/* Over ride the instance id */ +#define SOC15_REG_ENTRY_OFFSET_INST(entry, inst) \ + (adev->reg_offset[entry.hwip][inst][entry.seg] + entry.reg_offset) + #define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index e74e1983da53aa..b9cbeb389edc1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -413,6 +413,10 @@ # define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2) # define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25) +#define PACKET3_RUN_CLEANER_SHADER 0xD2 +/* 1. header + * 2. RESERVED [31:0] + */ #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index b0c3678cfb31d8..fd4c3d4f838798 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -250,13 +250,6 @@ static void soc24_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc24_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version soc24_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, @@ -454,6 +447,11 @@ static int soc24_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev); + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -491,7 +489,7 @@ static int soc24_common_hw_init(void *handle) adev->df.funcs->hw_init(adev); /* enable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -500,8 +498,13 @@ static int soc24_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc21_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_put_irq(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 9dbb13adb6613d..1a8ea834efa6bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -157,9 +157,9 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev, umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, &de_count, umc_v12_0_is_deferred_error); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); - amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count); return 0; } @@ -225,26 +225,16 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, } } -static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, - struct ta_ras_query_address_input *addr_in, - uint64_t *pfns, int len) +static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev, + struct ta_ras_query_address_output *addr_out, + uint64_t err_addr) { uint32_t col, row, row_xor, bank, channel_index; - uint64_t soc_pa, retired_page, column, err_addr; - struct ta_ras_query_address_output addr_out; - uint32_t pos = 0; - - err_addr = addr_in->ma.err_addr; - addr_in->addr_type = TA_RAS_MCA_TO_PA; - if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { - dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", - err_addr); - return 0; - } + uint64_t soc_pa, retired_page, column; - soc_pa = addr_out.pa.pa; - bank = addr_out.pa.bank; - channel_index = addr_out.pa.channel_idx; + soc_pa = addr_out->pa.pa; + bank = addr_out->pa.bank; + channel_index = addr_out->pa.channel_idx; col = (err_addr >> 1) & 0x1fULL; row = (err_addr >> 10) & 0x3fffULL; @@ -258,11 +248,6 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); - - if (pos >= len) - return 0; - pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - /* include column bit 0 and 1 */ col &= 0x3; col |= (column << 2); @@ -270,6 +255,35 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", retired_page, row, col, bank, channel_index); + /* shift R13 bit */ + retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); + dev_info(adev->dev, + "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + retired_page, row_xor, col, bank, channel_index); + } +} + +static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev, + uint64_t pa_addr, uint64_t *pfns, int len) +{ + uint64_t soc_pa, retired_page, column; + uint32_t pos = 0; + + soc_pa = pa_addr; + /* clear [C3 C2] in soc physical address */ + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); + /* clear [C4] in soc physical address */ + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); + + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); + retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); + + if (pos >= len) + return 0; + pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + /* shift R13 bit */ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); @@ -277,14 +291,40 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, return 0; pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - dev_info(adev->dev, - "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", - retired_page, row_xor, col, bank, channel_index); } return pos; } +static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev, + uint64_t err_addr, uint32_t ch, uint32_t umc, + uint32_t node, uint32_t socket, + uint64_t *addr, bool dump_addr) +{ + struct ta_ras_query_address_input addr_in; + struct ta_ras_query_address_output addr_out; + + memset(&addr_in, 0, sizeof(addr_in)); + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = ch; + addr_in.ma.umc_inst = umc; + addr_in.ma.node_inst = node; + addr_in.ma.socket_id = socket; + addr_in.addr_type = TA_RAS_MCA_TO_PA; + if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) { + dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", + err_addr); + return -EINVAL; + } + + if (dump_addr) + umc_v12_0_dump_addr_info(adev, &addr_out, err_addr); + + *addr = addr_out.pa.pa; + + return 0; +} + static int umc_v12_0_query_error_address(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data) @@ -483,12 +523,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); uint16_t hwid, mcatype; - struct ta_ras_query_address_input addr_in; uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; - uint64_t err_addr, hash_val = 0; + uint64_t err_addr, pa_addr = 0; struct ras_ecc_err *ecc_err; - int count; - int ret; + int count, ret, i; hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); @@ -514,46 +552,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, MCA_IPID_2_UMC_CH(ipid), err_addr); - memset(page_pfn, 0, sizeof(page_pfn)); - - memset(&addr_in, 0, sizeof(addr_in)); - addr_in.ma.err_addr = err_addr; - addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid); - addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid); - addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid); - addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid); - - count = umc_v12_0_convert_err_addr(adev, - &addr_in, page_pfn, ARRAY_SIZE(page_pfn)); - if (count <= 0) { - dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count); - return 0; - } - - ret = amdgpu_umc_build_pages_hash(adev, - page_pfn, count, &hash_val); - if (ret) { - dev_err(adev->dev, "Fail to build error pages hash\n"); + ret = umc_v12_0_convert_mca_to_addr(adev, + err_addr, MCA_IPID_2_UMC_CH(ipid), + MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid), + MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true); + if (ret) return ret; - } ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL); if (!ecc_err) return -ENOMEM; - ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL); - if (!ecc_err->err_pages.pfn) { - kfree(ecc_err); - return -ENOMEM; - } - - memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn)); - ecc_err->err_pages.count = count; - - ecc_err->hash_index = hash_val; ecc_err->status = status; ecc_err->ipid = ipid; ecc_err->addr = addr; + ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT; + + /* If converted pa_pfn is 0, use pa C4 pfn. */ + if (!ecc_err->pa_pfn) + ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT; ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err); if (ret) { @@ -562,13 +579,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, else dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret); - kfree(ecc_err->err_pages.pfn); kfree(ecc_err); return ret; } con->umc_ecc_log.de_queried_count++; + memset(page_pfn, 0, sizeof(page_pfn)); + count = umc_v12_0_lookup_bad_pages_in_a_row(adev, + pa_addr, + page_pfn, ARRAY_SIZE(page_pfn)); + if (count <= 0) { + dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count); + return 0; + } + + /* Reserve memory */ + for (i = 0; i < count; i++) + amdgpu_ras_reserve_page(adev, page_pfn[i]); + /* The problem case is as follows: * 1. GPU A triggers a gpu ras reset, and GPU A drives * GPU B to also perform a gpu ras reset. @@ -593,16 +622,21 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev, struct ras_ecc_err *ecc_err, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t i = 0; - int ret = 0; + uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; + int ret, i, count; if (!err_data || !ecc_err) return -EINVAL; - for (i = 0; i < ecc_err->err_pages.count; i++) { + memset(page_pfn, 0, sizeof(page_pfn)); + count = umc_v12_0_lookup_bad_pages_in_a_row(adev, + ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT, + page_pfn, ARRAY_SIZE(page_pfn)); + + for (i = 0; i < count; i++) { ret = amdgpu_umc_fill_error_record(err_data, ecc_err->addr, - ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT, + page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT, MCA_IPID_2_UMC_CH(ecc_err->ipid), MCA_IPID_2_UMC_INST(ecc_err->ipid)); if (ret) @@ -636,7 +670,8 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev, dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret); break; } - radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG); + radix_tree_tag_clear(ecc_tree, + entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG); } mutex_unlock(&con->umc_ecc_log.lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index b4974793850b07..be5598d76c1db2 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -81,6 +81,11 @@ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03)) +#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \ + ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \ + (0x1ULL << UMC_V12_0_PA_C4_BIT) | \ + (0x1ULL << UMC_V12_0_PA_R13_BIT))) + bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 32517c364cf7a1..4bfba2931b088c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -950,7 +950,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { .get_rptr = vce_v3_0_ring_get_rptr, .get_wptr = vce_v3_0_ring_get_wptr, .set_wptr = vce_v3_0_ring_set_wptr, - .parse_cs = amdgpu_vce_ring_parse_cs_vm, + .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm, .emit_frame_size = 6 + /* vce_v3_0_emit_vm_flush */ 4 + /* vce_v3_0_emit_pipeline_sync */ diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 06d787385ad460..0748bf44c88086 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -1102,7 +1102,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .get_rptr = vce_v4_0_ring_get_rptr, .get_wptr = vce_v4_0_ring_get_wptr, .set_wptr = vce_v4_0_ring_set_wptr, - .parse_cs = amdgpu_vce_ring_parse_cs_vm, + .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index a280b9fecb7732..ecdfbfefd66ad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -45,6 +45,42 @@ #define mmUVD_REG_XX_MASK_1_0 0x05ac #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = { + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) +}; + static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -90,6 +126,8 @@ static int vcn_v1_0_sw_init(void *handle) { struct amdgpu_ring *ring; int i, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); + uint32_t *ptr; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* VCN DEC TRAP */ @@ -161,6 +199,14 @@ static int vcn_v1_0_sw_init(void *handle) r = jpeg_v1_0_sw_init(handle); + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } return r; } @@ -184,6 +230,8 @@ static int vcn_v1_0_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1877,6 +1925,66 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); } +static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v1_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i)); + } +} + static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, @@ -1895,8 +2003,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, .set_powergating_state = vcn_v1_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v1_0_dump_ip_state, + .print_ip_state = vcn_v1_0_print_ip_state, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index d3d096909a7f46..bfd067e2d2f1d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -53,6 +53,42 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = { + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) +}; + static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev); @@ -96,6 +132,8 @@ static int vcn_v2_0_sw_init(void *handle) { struct amdgpu_ring *ring; int i, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); + uint32_t *ptr; struct amdgpu_device *adev = (struct amdgpu_device *)handle; volatile struct amdgpu_fw_shared *fw_shared; @@ -184,6 +222,15 @@ static int vcn_v2_0_sw_init(void *handle) if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(adev->vcn.inst); + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } + return 0; } @@ -213,6 +260,8 @@ static int vcn_v2_0_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1985,6 +2034,66 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); } +static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v2_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i)); + } +} + static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, @@ -2003,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v2_0_dump_ip_state, + .print_ip_state = vcn_v2_0_print_ip_state, }; static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 96f60c30316100..04e9e806e3187f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -55,6 +55,43 @@ #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = { + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) +}; + static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); @@ -122,6 +159,8 @@ static int vcn_v2_5_sw_init(void *handle) { struct amdgpu_ring *ring; int i, j, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); + uint32_t *ptr; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { @@ -241,6 +280,15 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } + return 0; } @@ -277,6 +325,8 @@ static int vcn_v2_5_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1876,6 +1926,66 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } +static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v2_5_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i)); + } +} + static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .name = "vcn_v2_5", .early_init = vcn_v2_5_early_init, @@ -1894,8 +2004,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v2_5_dump_ip_state, + .print_ip_state = vcn_v2_5_print_ip_state, }; static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { @@ -1916,8 +2026,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v2_5_dump_ip_state, + .print_ip_state = vcn_v2_5_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v2_5_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 24f947751c463b..65dd68b322806e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -60,6 +60,42 @@ #define RDECODE_MSG_CREATE 0x00000000 #define RDECODE_MESSAGE_CREATE 0x00000001 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = { + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) +}; + static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, SOC15_IH_CLIENTID_VCN1 @@ -126,6 +162,8 @@ static int vcn_v3_0_sw_init(void *handle) struct amdgpu_ring *ring; int i, j, r; int vcn_doorbell_index = 0; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); + uint32_t *ptr; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_vcn_sw_init(adev); @@ -246,6 +284,15 @@ static int vcn_v3_0_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode; + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr == NULL) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } + return 0; } @@ -284,6 +331,7 @@ static int vcn_v3_0_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); return r; } @@ -2203,6 +2251,67 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } +static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); + uint32_t inst_off; + bool is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v3_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i)); + } +} + static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, @@ -2221,8 +2330,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v3_0_dump_ip_state, + .print_ip_state = vcn_v3_0_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v3_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 776c539bfddacb..26c6f10a8c8fae 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -52,6 +52,42 @@ #define RDECODE_MSG_CREATE 0x00000000 #define RDECODE_MESSAGE_CREATE 0x00000001 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = { + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) +}; + static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, SOC15_IH_CLIENTID_VCN1 @@ -137,6 +173,8 @@ static int vcn_v4_0_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); + uint32_t *ptr; r = amdgpu_vcn_sw_init(adev); if (r) @@ -200,6 +238,15 @@ static int vcn_v4_0_sw_init(void *handle) if (r) return r; + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } + return 0; } @@ -239,6 +286,8 @@ static int vcn_v4_0_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -2109,6 +2158,67 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) } } +static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v4_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j], + i)); + } +} + static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .name = "vcn_v4_0", .early_init = vcn_v4_0_early_init, @@ -2127,8 +2237,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_v4_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v4_0_dump_ip_state, + .print_ip_state = vcn_v4_0_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 9bae95538b6287..0fda703363004f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -45,6 +45,42 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = { + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) +}; + #define NORMALIZE_VCN_REG_OFFSET(offset) \ (offset & 0x1FFFF) @@ -92,6 +128,8 @@ static int vcn_v4_0_3_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; int i, r, vcn_inst; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); + uint32_t *ptr; r = amdgpu_vcn_sw_init(adev); if (r) @@ -159,6 +197,15 @@ static int vcn_v4_0_3_sw_init(void *handle) } } + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } + return 0; } @@ -194,6 +241,8 @@ static int vcn_v4_0_3_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1684,6 +1733,68 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } +static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v4_0_3_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off, inst_id; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_id = GET_INST(VCN, i); + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j], + inst_id)); + } +} + static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, @@ -1702,8 +1813,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v4_0_3_dump_ip_state, + .print_ip_state = vcn_v4_0_3_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 8d75061f9f3847..9d4f5352a62c8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -52,6 +52,42 @@ #define RDECODE_MSG_CREATE 0x00000000 #define RDECODE_MESSAGE_CREATE 0x00000001 +static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_5[] = { + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) +}; + static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, SOC15_IH_CLIENTID_VCN1 @@ -97,6 +133,8 @@ static int vcn_v4_0_5_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); + uint32_t *ptr; r = amdgpu_vcn_sw_init(adev); if (r) @@ -168,6 +206,14 @@ static int vcn_v4_0_5_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode; + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } return 0; } @@ -207,6 +253,8 @@ static int vcn_v4_0_5_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1347,170 +1395,6 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) -{ - struct drm_gpu_scheduler **scheds; - - /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) - return -EINVAL; - - /* if VCN0 is harvested, we can't support AV1 */ - if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) - return -EINVAL; - - scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] - [AMDGPU_RING_PRIO_0].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); - return 0; -} - -static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_va_mapping *map; - uint32_t *msg, num_buffers; - struct amdgpu_bo *bo; - uint64_t start, end; - unsigned int i; - void *ptr; - int r; - - addr &= AMDGPU_GMC_HOLE_MASK; - r = amdgpu_cs_find_mapping(p, addr, &bo, &map); - if (r) { - DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); - return r; - } - - start = map->start * AMDGPU_GPU_PAGE_SIZE; - end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; - if (addr & 0x7) { - DRM_ERROR("VCN messages must be 8 byte aligned!\n"); - return -EINVAL; - } - - bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) { - DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); - return r; - } - - r = amdgpu_bo_kmap(bo, &ptr); - if (r) { - DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); - return r; - } - - msg = ptr + addr - start; - - /* Check length */ - if (msg[1] > end - addr) { - r = -EINVAL; - goto out; - } - - if (msg[3] != RDECODE_MSG_CREATE) - goto out; - - num_buffers = msg[2]; - for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { - uint32_t offset, size, *create; - - if (msg[0] != RDECODE_MESSAGE_CREATE) - continue; - - offset = msg[1]; - size = msg[2]; - - if (offset + size > end) { - r = -EINVAL; - goto out; - } - - create = ptr + addr + offset - start; - - /* H264, HEVC and VP9 can run on any instance */ - if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) - continue; - - r = vcn_v4_0_5_limit_sched(p, job); - if (r) - goto out; - } - -out: - amdgpu_bo_kunmap(bo); - return r; -} - -#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) -#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) - -#define RADEON_VCN_ENGINE_INFO (0x30000001) -#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 - -#define RENCODE_ENCODE_STANDARD_AV1 2 -#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 -#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 - -/* return the offset in ib if id is found, -1 otherwise - * to speed up the searching we only search upto max_offset - */ -static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) -{ - int i; - - for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { - if (ib->ptr[i + 1] == id) - return i; - } - return -1; -} - -static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, - struct amdgpu_job *job, - struct amdgpu_ib *ib) -{ - struct amdgpu_ring *ring = amdgpu_job_ring(job); - struct amdgpu_vcn_decode_buffer *decode_buffer; - uint64_t addr; - uint32_t val; - int idx; - - /* The first instance can decode anything */ - if (!ring->me) - return 0; - - /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ - idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, - RADEON_VCN_ENGINE_INFO_MAX_OFFSET); - if (idx < 0) /* engine info is missing */ - return 0; - - val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ - if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { - decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; - - if (!(decode_buffer->valid_buf_flag & 0x1)) - return 0; - - addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | - decode_buffer->msg_buffer_address_lo; - return vcn_v4_0_5_dec_msg(p, job, addr); - } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { - idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, - RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); - if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) - return vcn_v4_0_5_limit_sched(p, job); - } - return 0; -} - static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1518,7 +1402,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, - .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + @@ -1733,6 +1616,67 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) } } +static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v4_0_5_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j], + i)); + } +} + static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .name = "vcn_v4_0_5", .early_init = vcn_v4_0_5_early_init, @@ -1751,8 +1695,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_v4_0_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v4_0_5_dump_ip_state, + .print_ip_state = vcn_v4_0_5_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index 68c97fcd539b90..c305386358b4b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -37,6 +37,40 @@ #include +static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = { + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK), + SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE) +}; + static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, SOC15_IH_CLIENTID_VCN1 @@ -83,6 +117,8 @@ static int vcn_v5_0_0_sw_init(void *handle) struct amdgpu_ring *ring; struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); + uint32_t *ptr; r = amdgpu_vcn_sw_init(adev); if (r) @@ -137,6 +173,14 @@ static int vcn_v5_0_0_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode; + /* Allocate memory for VCN IP Dump buffer */ + ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); + if (!ptr) { + DRM_ERROR("Failed to allocate memory for VCN IP Dump\n"); + adev->vcn.ip_dump = NULL; + } else { + adev->vcn.ip_dump = ptr; + } return 0; } @@ -173,6 +217,8 @@ static int vcn_v5_0_0_sw_fini(void *handle) r = amdgpu_vcn_sw_fini(adev); + kfree(adev->vcn.ip_dump); + return r; } @@ -1297,6 +1343,66 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) } } +static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); + uint32_t inst_off, is_powered; + + if (!adev->vcn.ip_dump) + return; + + drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) { + drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i); + continue; + } + + inst_off = i * reg_count; + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) { + drm_printf(p, "\nActive Instance:VCN%d\n", i); + for (j = 0; j < reg_count; j++) + drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name, + adev->vcn.ip_dump[inst_off + j]); + } else { + drm_printf(p, "\nInactive Instance:VCN%d\n", i); + } + } +} + +static void vcn_v5_0_dump_ip_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, j; + bool is_powered; + uint32_t inst_off; + uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); + + if (!adev->vcn.ip_dump) + return; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + inst_off = i * reg_count; + /* mmUVD_POWER_STATUS is always readable and is first element of the array */ + adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); + is_powered = (adev->vcn.ip_dump[inst_off] & + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; + + if (is_powered) + for (j = 1; j < reg_count; j++) + adev->vcn.ip_dump[inst_off + j] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i)); + } +} + static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .name = "vcn_v5_0_0", .early_init = vcn_v5_0_0_early_init, @@ -1315,8 +1421,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_v5_0_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, + .dump_ip_state = vcn_v5_0_dump_ip_state, + .print_ip_state = vcn_v5_0_print_ip_state, }; const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 80ce42aacc0cce..b61f6b838ec2c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -246,6 +246,7 @@ * 1 - Stream * 2 - Bypass */ +#define EOP_EXEC (1 << 28) /* For Trailing Fence */ #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 32e5db509560ee..9044bdb38cf4d5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include "kfd_priv.h" #include "kfd_device_queue_manager.h" @@ -247,14 +246,15 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, q_properties->priority = args->queue_priority; q_properties->queue_address = args->ring_base_address; q_properties->queue_size = args->ring_size; - q_properties->read_ptr = (uint32_t *) args->read_pointer_address; - q_properties->write_ptr = (uint32_t *) args->write_pointer_address; + q_properties->read_ptr = (void __user *)args->read_pointer_address; + q_properties->write_ptr = (void __user *)args->write_pointer_address; q_properties->eop_ring_buffer_address = args->eop_buffer_address; q_properties->eop_ring_buffer_size = args->eop_buffer_size; q_properties->ctx_save_restore_area_address = args->ctx_save_restore_address; q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; q_properties->ctl_stack_size = args->ctl_stack_size; + q_properties->sdma_engine_id = args->sdma_engine_id; if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) q_properties->type = KFD_QUEUE_TYPE_COMPUTE; @@ -262,6 +262,8 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, q_properties->type = KFD_QUEUE_TYPE_SDMA; else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI; + else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID) + q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID; else return -ENOTSUPP; @@ -306,7 +308,6 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, struct kfd_process_device *pdd; struct queue_properties q_properties; uint32_t doorbell_offset_in_process = 0; - struct amdgpu_bo *wptr_bo = NULL; memset(&q_properties, 0, sizeof(struct queue_properties)); @@ -334,6 +335,18 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } + if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { + int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) + + kfd_get_num_xgmi_sdma_engines(dev) - 1; + + if (q_properties.sdma_engine_id > max_sdma_eng_id) { + err = -EINVAL; + pr_err("sdma_engine_id %i exceeds maximum id of %i\n", + q_properties.sdma_engine_id, max_sdma_eng_id); + goto err_sdma_engine_id; + } + } + if (!pdd->qpd.proc_doorbells) { err = kfd_alloc_process_doorbells(dev->kfd, pdd); if (err) { @@ -342,53 +355,17 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, } } - /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work - * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) - */ - if (dev->kfd->shared_resources.enable_mes && - ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) - >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { - struct amdgpu_bo_va_mapping *wptr_mapping; - struct amdgpu_vm *wptr_vm; - - wptr_vm = drm_priv_to_vm(pdd->drm_priv); - err = amdgpu_bo_reserve(wptr_vm->root.bo, false); - if (err) - goto err_wptr_map_gart; - - wptr_mapping = amdgpu_vm_bo_lookup_mapping( - wptr_vm, args->write_pointer_address >> PAGE_SHIFT); - amdgpu_bo_unreserve(wptr_vm->root.bo); - if (!wptr_mapping) { - pr_err("Failed to lookup wptr bo\n"); - err = -EINVAL; - goto err_wptr_map_gart; - } - - wptr_bo = wptr_mapping->bo_va->base.bo; - if (wptr_bo->tbo.base.size > PAGE_SIZE) { - pr_err("Requested GART mapping for wptr bo larger than one page\n"); - err = -EINVAL; - goto err_wptr_map_gart; - } - if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) { - pr_err("Queue memory allocated to wrong device\n"); - err = -EINVAL; - goto err_wptr_map_gart; - } - - err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo); - if (err) { - pr_err("Failed to map wptr bo to GART\n"); - goto err_wptr_map_gart; - } + err = kfd_queue_acquire_buffers(pdd, &q_properties); + if (err) { + pr_debug("failed to acquire user queue buffers\n"); + goto err_acquire_queue_buf; } pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n", p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo, + err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, NULL, NULL, NULL, &doorbell_offset_in_process); if (err != 0) goto err_create_queue; @@ -422,9 +399,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, return 0; err_create_queue: - if (wptr_bo) - amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo); -err_wptr_map_gart: + kfd_queue_unref_bo_vas(pdd, &q_properties); + kfd_queue_release_buffers(pdd, &q_properties); +err_acquire_queue_buf: +err_sdma_engine_id: err_bind_process: err_pdd: mutex_unlock(&p->mutex); @@ -1422,8 +1400,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { - pr_err("Failed to unmap from gpu %d/%d\n", - i, args->n_devices); + pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices); goto unmap_memory_from_gpu_failed; } args->n_success = i+1; @@ -1857,7 +1834,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p) } static int criu_get_prime_handle(struct kgd_mem *mem, - int flags, u32 *shared_fd) + int flags, u32 *shared_fd, + struct file **file) { struct dma_buf *dmabuf; int ret; @@ -1868,13 +1846,14 @@ static int criu_get_prime_handle(struct kgd_mem *mem, return ret; } - ret = dma_buf_fd(dmabuf, flags); + ret = get_unused_fd_flags(flags); if (ret < 0) { pr_err("dmabuf create fd failed, ret:%d\n", ret); goto out_free_dmabuf; } *shared_fd = ret; + *file = dmabuf->file; return 0; out_free_dmabuf: @@ -1882,6 +1861,25 @@ static int criu_get_prime_handle(struct kgd_mem *mem, return ret; } +static void commit_files(struct file **files, + struct kfd_criu_bo_bucket *bo_buckets, + unsigned int count, + int err) +{ + while (count--) { + struct file *file = files[count]; + + if (!file) + continue; + if (err) { + fput(file); + put_unused_fd(bo_buckets[count].dmabuf_fd); + } else { + fd_install(bo_buckets[count].dmabuf_fd, file); + } + } +} + static int criu_checkpoint_bos(struct kfd_process *p, uint32_t num_bos, uint8_t __user *user_bos, @@ -1890,6 +1888,7 @@ static int criu_checkpoint_bos(struct kfd_process *p, { struct kfd_criu_bo_bucket *bo_buckets; struct kfd_criu_bo_priv_data *bo_privs; + struct file **files = NULL; int ret = 0, pdd_index, bo_index = 0, id; void *mem; @@ -1903,6 +1902,12 @@ static int criu_checkpoint_bos(struct kfd_process *p, goto exit; } + files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL); + if (!files) { + ret = -ENOMEM; + goto exit; + } + for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) { struct kfd_process_device *pdd = p->pdds[pdd_index]; struct amdgpu_bo *dumper_bo; @@ -1945,7 +1950,7 @@ static int criu_checkpoint_bos(struct kfd_process *p, ret = criu_get_prime_handle(kgd_mem, bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, - &bo_bucket->dmabuf_fd); + &bo_bucket->dmabuf_fd, &files[bo_index]); if (ret) goto exit; } else { @@ -1963,7 +1968,7 @@ static int criu_checkpoint_bos(struct kfd_process *p, bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo); for (i = 0; i < p->n_pdds; i++) { - if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem)) + if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem)) bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id; } @@ -1996,12 +2001,8 @@ static int criu_checkpoint_bos(struct kfd_process *p, *priv_offset += num_bos * sizeof(*bo_privs); exit: - while (ret && bo_index--) { - if (bo_buckets[bo_index].alloc_flags - & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) - close_fd(bo_buckets[bo_index].dmabuf_fd); - } - + commit_files(files, bo_buckets, bo_index, ret); + kvfree(files); kvfree(bo_buckets); kvfree(bo_privs); return ret; @@ -2353,7 +2354,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, static int criu_restore_bo(struct kfd_process *p, struct kfd_criu_bo_bucket *bo_bucket, - struct kfd_criu_bo_priv_data *bo_priv) + struct kfd_criu_bo_priv_data *bo_priv, + struct file **file) { struct kfd_process_device *pdd; struct kgd_mem *kgd_mem; @@ -2405,7 +2407,7 @@ static int criu_restore_bo(struct kfd_process *p, if (bo_bucket->alloc_flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { ret = criu_get_prime_handle(kgd_mem, DRM_RDWR, - &bo_bucket->dmabuf_fd); + &bo_bucket->dmabuf_fd, file); if (ret) return ret; } else { @@ -2422,6 +2424,7 @@ static int criu_restore_bos(struct kfd_process *p, { struct kfd_criu_bo_bucket *bo_buckets = NULL; struct kfd_criu_bo_priv_data *bo_privs = NULL; + struct file **files = NULL; int ret = 0; uint32_t i = 0; @@ -2435,6 +2438,12 @@ static int criu_restore_bos(struct kfd_process *p, if (!bo_buckets) return -ENOMEM; + files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL); + if (!files) { + ret = -ENOMEM; + goto exit; + } + ret = copy_from_user(bo_buckets, (void __user *)args->bos, args->num_bos * sizeof(*bo_buckets)); if (ret) { @@ -2460,7 +2469,7 @@ static int criu_restore_bos(struct kfd_process *p, /* Create and map new BOs */ for (; i < args->num_bos; i++) { - ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]); + ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]); if (ret) { pr_debug("Failed to restore BO[%d] ret%d\n", i, ret); goto exit; @@ -2475,11 +2484,8 @@ static int criu_restore_bos(struct kfd_process *p, ret = -EFAULT; exit: - while (ret && i--) { - if (bo_buckets[i].alloc_flags - & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) - close_fd(bo_buckets[i].dmabuf_fd); - } + commit_files(files, bo_buckets, i, ret); + kvfree(files); kvfree(bo_buckets); kvfree(bo_privs); return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 34a282540c7e08..312dfa84f29f84 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -365,47 +365,47 @@ static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_i *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID; - spin_lock(&pdd->dev->kfd->watch_points_lock); + spin_lock(&pdd->dev->watch_points_lock); for (i = 0; i < MAX_WATCH_ADDRESSES; i++) { /* device watchpoint in use so skip */ - if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1) + if ((pdd->dev->alloc_watch_ids >> i) & 0x1) continue; pdd->alloc_watch_ids |= 0x1 << i; - pdd->dev->kfd->alloc_watch_ids |= 0x1 << i; + pdd->dev->alloc_watch_ids |= 0x1 << i; *watch_id = i; - spin_unlock(&pdd->dev->kfd->watch_points_lock); + spin_unlock(&pdd->dev->watch_points_lock); return 0; } - spin_unlock(&pdd->dev->kfd->watch_points_lock); + spin_unlock(&pdd->dev->watch_points_lock); return -ENOMEM; } static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) { - spin_lock(&pdd->dev->kfd->watch_points_lock); + spin_lock(&pdd->dev->watch_points_lock); /* process owns device watch point so safe to clear */ if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { pdd->alloc_watch_ids &= ~(0x1 << watch_id); - pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id); + pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id); } - spin_unlock(&pdd->dev->kfd->watch_points_lock); + spin_unlock(&pdd->dev->watch_points_lock); } static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) { bool owns_watch_id = false; - spin_lock(&pdd->dev->kfd->watch_points_lock); + spin_lock(&pdd->dev->watch_points_lock); owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && ((pdd->alloc_watch_ids >> watch_id) & 0x1); - spin_unlock(&pdd->dev->kfd->watch_points_lock); + spin_unlock(&pdd->dev->watch_points_lock); return owns_watch_id; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index f4d20adaa06899..fad1c8f2bc8334 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -884,13 +884,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, dev_err(kfd_device, "Error initializing KFD node\n"); goto node_init_error; } + + spin_lock_init(&node->watch_points_lock); + kfd->nodes[i] = node; } svm_range_set_max_pages(kfd->adev); - spin_lock_init(&kfd->watch_points_lock); - kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); @@ -907,7 +908,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); alloc_gtt_mem_failure: dev_err(kfd_device, "device %x:%x NOT added due to errors\n", @@ -925,7 +926,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); - amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); } kfree(kfd); @@ -1445,6 +1446,45 @@ void kgd2kfd_unlock_kfd(void) mutex_unlock(&kfd_processes_mutex); } +int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) +{ + struct kfd_node *node; + int ret; + + if (!kfd->init_complete) + return 0; + + if (node_id >= kfd->num_nodes) { + dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", + node_id, kfd->num_nodes - 1); + return -EINVAL; + } + node = kfd->nodes[node_id]; + + ret = node->dqm->ops.unhalt(node->dqm); + if (ret) + dev_err(kfd_device, "Error in starting scheduler\n"); + + return ret; +} + +int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) +{ + struct kfd_node *node; + + if (!kfd->init_complete) + return 0; + + if (node_id >= kfd->num_nodes) { + dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", + node_id, kfd->num_nodes - 1); + return -EINVAL; + } + + node = kfd->nodes[node_id]; + return node->dqm->ops.halt(node->dqm); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 4f48507418d2f1..648f40091aa395 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -153,6 +153,20 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, static void kfd_hws_hang(struct device_queue_manager *dqm) { + struct device_process_node *cur; + struct qcm_process_device *qpd; + struct queue *q; + + /* Mark all device queues as reset. */ + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + struct kfd_process_device *pdd = qpd_to_pdd(qpd); + + pdd->has_reset_queue = true; + } + } + /* * Issue a GPU reset if HWS is unresponsive */ @@ -208,10 +222,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.mqd_addr = q->gart_mqd_addr; queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; - if (q->wptr_bo) { - wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); - queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off; - } + wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); + queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off; queue_input.is_kfd_process = 1; queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL); @@ -307,6 +319,46 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm) return retval; } +static int suspend_all_queues_mes(struct device_queue_manager *dqm) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; + int r = 0; + + if (!down_read_trylock(&adev->reset_domain->sem)) + return -EIO; + + r = amdgpu_mes_suspend(adev); + up_read(&adev->reset_domain->sem); + + if (r) { + dev_err(adev->dev, "failed to suspend gangs from MES\n"); + dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); + kfd_hws_hang(dqm); + } + + return r; +} + +static int resume_all_queues_mes(struct device_queue_manager *dqm) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; + int r = 0; + + if (!down_read_trylock(&adev->reset_domain->sem)) + return -EIO; + + r = amdgpu_mes_resume(adev); + up_read(&adev->reset_domain->sem); + + if (r) { + dev_err(adev->dev, "failed to resume gangs from MES\n"); + dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n"); + kfd_hws_hang(dqm); + } + + return r; +} + static void increment_queue_count(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -880,6 +932,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, else if (prev_active) retval = remove_queue_mes(dqm, q, &pdd->qpd); + /* queue is reset so inaccessable */ + if (pdd->has_reset_queue) { + retval = -EACCES; + goto out_unlock; + } + if (retval) { dev_err(dev, "unmap queue failed\n"); goto out_unlock; @@ -1534,6 +1592,41 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_xgmi_sdma_engines(dqm->dev); + } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { + int i, num_queues, num_engines, eng_offset = 0, start_engine; + bool free_bit_found = false, is_xgmi = false; + + if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) { + num_queues = get_num_sdma_queues(dqm); + num_engines = kfd_get_num_sdma_engines(dqm->dev); + q->properties.type = KFD_QUEUE_TYPE_SDMA; + } else { + num_queues = get_num_xgmi_sdma_queues(dqm); + num_engines = kfd_get_num_xgmi_sdma_engines(dqm->dev); + eng_offset = kfd_get_num_sdma_engines(dqm->dev); + q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI; + is_xgmi = true; + } + + /* Scan available bit based on target engine ID. */ + start_engine = q->properties.sdma_engine_id - eng_offset; + for (i = start_engine; i < num_queues; i += num_engines) { + + if (!test_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap)) + continue; + + clear_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap); + q->sdma_id = i; + q->properties.sdma_queue_id = q->sdma_id / num_engines; + free_bit_found = true; + break; + } + + if (!free_bit_found) { + dev_err(dev, "No more SDMA queue to allocate for target ID %i\n", + q->properties.sdma_engine_id); + return -ENOMEM; + } } pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); @@ -1626,10 +1719,64 @@ static int initialize_cpsch(struct device_queue_manager *dqm) return 0; } +/* halt_cpsch: + * Unmap queues so the schedule doesn't continue remaining jobs in the queue. + * Then set dqm->sched_halt so queues don't map to runlist until unhalt_cpsch + * is called. + */ +static int halt_cpsch(struct device_queue_manager *dqm) +{ + int ret = 0; + + dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + + WARN_ONCE(dqm->sched_halt, "Scheduling is already on halt\n"); + + if (!dqm->is_hws_hang) { + if (!dqm->dev->kfd->shared_resources.enable_mes) + ret = unmap_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + else + ret = remove_all_queues_mes(dqm); + } + dqm->sched_halt = true; + dqm_unlock(dqm); + + return ret; +} + +/* unhalt_cpsch + * Unset dqm->sched_halt and map queues back to runlist + */ +static int unhalt_cpsch(struct device_queue_manager *dqm) +{ + int ret = 0; + + dqm_lock(dqm); + if (!dqm->sched_running || !dqm->sched_halt) { + WARN_ONCE(!dqm->sched_halt, "Scheduling is not on halt.\n"); + dqm_unlock(dqm); + return 0; + } + dqm->sched_halt = false; + if (!dqm->dev->kfd->shared_resources.enable_mes) + ret = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD); + dqm_unlock(dqm); + + return ret; +} + static int start_cpsch(struct device_queue_manager *dqm) { struct device *dev = dqm->dev->adev->dev; - int retval; + int retval, num_hw_queue_slots; retval = 0; @@ -1682,9 +1829,24 @@ static int start_cpsch(struct device_queue_manager *dqm) &dqm->wait_times); } + /* setup per-queue reset detection buffer */ + num_hw_queue_slots = dqm->dev->kfd->shared_resources.num_queue_per_pipe * + dqm->dev->kfd->shared_resources.num_pipe_per_mec * + NUM_XCC(dqm->dev->xcc_mask); + + dqm->detect_hang_info_size = num_hw_queue_slots * sizeof(struct dqm_detect_hang_info); + dqm->detect_hang_info = kzalloc(dqm->detect_hang_info_size, GFP_KERNEL); + + if (!dqm->detect_hang_info) { + retval = -ENOMEM; + goto fail_detect_hang_buffer; + } + dqm_unlock(dqm); return 0; +fail_detect_hang_buffer: + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); fail_allocate_vidmem: fail_set_sched_resources: if (!dqm->dev->kfd->shared_resources.enable_mes) @@ -1715,6 +1877,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr); + kfree(dqm->detect_hang_info); + dqm->detect_hang_info = NULL; dqm_unlock(dqm); return 0; @@ -1786,7 +1950,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, } if (q->properties.type == KFD_QUEUE_TYPE_SDMA || - q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { + q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI || + q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { dqm_lock(dqm); retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); dqm_unlock(dqm); @@ -1913,7 +2078,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) struct device *dev = dqm->dev->adev->dev; int retval; - if (!dqm->sched_running) + if (!dqm->sched_running || dqm->sched_halt) return 0; if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0) return 0; @@ -1931,6 +2096,135 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) return retval; } +static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q, + struct qcm_process_device *qpd) +{ + struct kfd_process_device *pdd = qpd_to_pdd(qpd); + + dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n", + q->properties.queue_id, q->process->pasid); + + pdd->has_reset_queue = true; + if (q->properties.is_active) { + q->properties.is_active = false; + decrement_queue_count(dqm, qpd, q); + } +} + +static int detect_queue_hang(struct device_queue_manager *dqm) +{ + int i; + + /* detect should be used only in dqm locked queue reset */ + if (WARN_ON(dqm->detect_hang_count > 0)) + return 0; + + memset(dqm->detect_hang_info, 0, dqm->detect_hang_info_size); + + for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) { + uint32_t mec, pipe, queue; + int xcc_id; + + mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) + / dqm->dev->kfd->shared_resources.num_pipe_per_mec; + + if (mec || !test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) + continue; + + amdgpu_queue_mask_bit_to_mec_queue(dqm->dev->adev, i, &mec, &pipe, &queue); + + for_each_inst(xcc_id, dqm->dev->xcc_mask) { + uint64_t queue_addr = dqm->dev->kfd2kgd->hqd_get_pq_addr( + dqm->dev->adev, pipe, queue, xcc_id); + struct dqm_detect_hang_info hang_info; + + if (!queue_addr) + continue; + + hang_info.pipe_id = pipe; + hang_info.queue_id = queue; + hang_info.xcc_id = xcc_id; + hang_info.queue_address = queue_addr; + + dqm->detect_hang_info[dqm->detect_hang_count] = hang_info; + dqm->detect_hang_count++; + } + } + + return dqm->detect_hang_count; +} + +static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uint64_t queue_address) +{ + struct device_process_node *cur; + struct qcm_process_device *qpd; + struct queue *q; + + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + if (queue_address == q->properties.queue_address) + return q; + } + } + + return NULL; +} + +/* only for compute queue */ +static int reset_queues_on_hws_hang(struct device_queue_manager *dqm) +{ + int r = 0, reset_count = 0, i; + + if (!dqm->detect_hang_info || dqm->is_hws_hang) + return -EIO; + + /* assume dqm locked. */ + if (!detect_queue_hang(dqm)) + return -ENOTRECOVERABLE; + + for (i = 0; i < dqm->detect_hang_count; i++) { + struct dqm_detect_hang_info hang_info = dqm->detect_hang_info[i]; + struct queue *q = find_queue_by_address(dqm, hang_info.queue_address); + struct kfd_process_device *pdd; + uint64_t queue_addr = 0; + + if (!q) { + r = -ENOTRECOVERABLE; + goto reset_fail; + } + + pdd = kfd_get_process_device_data(dqm->dev, q->process); + if (!pdd) { + r = -ENOTRECOVERABLE; + goto reset_fail; + } + + queue_addr = dqm->dev->kfd2kgd->hqd_reset(dqm->dev->adev, + hang_info.pipe_id, hang_info.queue_id, hang_info.xcc_id, + KFD_UNMAP_LATENCY_MS); + + /* either reset failed or we reset an unexpected queue. */ + if (queue_addr != q->properties.queue_address) { + r = -ENOTRECOVERABLE; + goto reset_fail; + } + + set_queue_as_reset(dqm, q, &pdd->qpd); + reset_count++; + } + + if (reset_count == dqm->detect_hang_count) + kfd_signal_reset_event(dqm->dev); + else + r = -ENOTRECOVERABLE; + +reset_fail: + dqm->detect_hang_count = 0; + + return r; +} + /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, @@ -1981,11 +2275,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, */ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { - while (halt_if_hws_hang) - schedule(); - kfd_hws_hang(dqm); - retval = -ETIME; - goto out; + if (reset_queues_on_hws_hang(dqm)) { + while (halt_if_hws_hang) + schedule(); + dqm->is_hws_hang = true; + kfd_hws_hang(dqm); + retval = -ETIME; + goto out; + } } /* We need to reset the grace period value for this device */ @@ -2004,8 +2301,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, } /* only for compute queue */ -static int reset_queues_cpsch(struct device_queue_manager *dqm, - uint16_t pasid) +static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid) { int retval; @@ -2111,10 +2407,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, pdd->sdma_past_activity_counter += sdma_val; } - list_del(&q->list); - qpd->queue_count--; if (q->properties.is_active) { decrement_queue_count(dqm, qpd, q); + q->properties.is_active = false; if (!dqm->dev->kfd->shared_resources.enable_mes) { retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, @@ -2125,6 +2420,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, retval = remove_queue_mes(dqm, q, qpd); } } + list_del(&q->list); + qpd->queue_count--; /* * Unconditionally decrement this counter, regardless of the queue's @@ -2525,6 +2822,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) dqm->ops.initialize = initialize_cpsch; dqm->ops.start = start_cpsch; dqm->ops.stop = stop_cpsch; + dqm->ops.halt = halt_cpsch; + dqm->ops.unhalt = unhalt_cpsch; dqm->ops.destroy_queue = destroy_queue_cpsch; dqm->ops.update_queue = update_queue; dqm->ops.register_process = register_process; @@ -2621,7 +2920,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, { WARN(!mqd, "No hiq sdma mqd trunk to free"); - amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem); } void device_queue_manager_uninit(struct device_queue_manager *dqm) @@ -2633,6 +2932,95 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm) kfree(dqm); } +int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id) +{ + struct kfd_process_device *pdd; + struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + struct device_queue_manager *dqm = knode->dqm; + struct device *dev = dqm->dev->adev->dev; + struct qcm_process_device *qpd; + struct queue *q = NULL; + int ret = 0; + + if (!p) + return -EINVAL; + + dqm_lock(dqm); + + pdd = kfd_get_process_device_data(dqm->dev, p); + if (pdd) { + qpd = &pdd->qpd; + + list_for_each_entry(q, &qpd->queues_list, list) { + if (q->doorbell_id == doorbell_id && q->properties.is_active) { + ret = suspend_all_queues_mes(dqm); + if (ret) { + dev_err(dev, "Suspending all queues failed"); + goto out; + } + + q->properties.is_evicted = true; + q->properties.is_active = false; + decrement_queue_count(dqm, qpd, q); + + ret = remove_queue_mes(dqm, q, qpd); + if (ret) { + dev_err(dev, "Removing bad queue failed"); + goto out; + } + + ret = resume_all_queues_mes(dqm); + if (ret) + dev_err(dev, "Resuming all queues failed"); + + break; + } + } + } + +out: + dqm_unlock(dqm); + return ret; +} + +static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct device *dev = dqm->dev->adev->dev; + int ret = 0; + + /* Check if process is already evicted */ + dqm_lock(dqm); + if (qpd->evicted) { + /* Increment the evicted count to make sure the + * process stays evicted before its terminated. + */ + qpd->evicted++; + dqm_unlock(dqm); + goto out; + } + dqm_unlock(dqm); + + ret = suspend_all_queues_mes(dqm); + if (ret) { + dev_err(dev, "Suspending all queues failed"); + goto out; + } + + ret = dqm->ops.evict_process_queues(dqm, qpd); + if (ret) { + dev_err(dev, "Evicting process queues failed"); + goto out; + } + + ret = resume_all_queues_mes(dqm); + if (ret) + dev_err(dev, "Resuming all queues failed"); + +out: + return ret; +} + int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) { struct kfd_process_device *pdd; @@ -2643,8 +3031,13 @@ int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) return -EINVAL; WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); pdd = kfd_get_process_device_data(dqm->dev, p); - if (pdd) - ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); + if (pdd) { + if (dqm->dev->kfd->shared_resources.enable_mes) + ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); + else + ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); + } + kfd_unref_process(p); return ret; @@ -3147,6 +3540,30 @@ int debug_refresh_runlist(struct device_queue_manager *dqm) return debug_map_and_unlock(dqm); } +bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + int doorbell_off, u32 *queue_format) +{ + struct queue *q; + bool r = false; + + if (!queue_format) + return r; + + dqm_lock(dqm); + + list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.doorbell_off == doorbell_off) { + *queue_format = q->properties.format; + r = true; + goto out; + } + } + +out: + dqm_unlock(dqm); + return r; +} #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 3b9b8eabaaccfa..09ab36f8e8c69e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -106,6 +106,12 @@ union GRBM_GFX_INDEX_BITS { * @uninitialize: Destroys all the device queue manager resources allocated in * initialize routine. * + * @halt: This routine unmaps queues from runlist and set halt status to true + * so no more queues will be mapped to runlist until unhalt. + * + * @unhalt: This routine unset halt status to flase and maps queues back to + * runlist. + * * @create_kernel_queue: Creates kernel queue. Used for debug queue. * * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue. @@ -153,6 +159,8 @@ struct device_queue_manager_ops { int (*start)(struct device_queue_manager *dqm); int (*stop)(struct device_queue_manager *dqm); void (*uninitialize)(struct device_queue_manager *dqm); + int (*halt)(struct device_queue_manager *dqm); + int (*unhalt)(struct device_queue_manager *dqm); int (*create_kernel_queue)(struct device_queue_manager *dqm, struct kernel_queue *kq, struct qcm_process_device *qpd); @@ -210,6 +218,13 @@ struct device_queue_manager_asic_ops { struct kfd_node *dev); }; +struct dqm_detect_hang_info { + int pipe_id; + int queue_id; + int xcc_id; + uint64_t queue_address; +}; + /** * struct device_queue_manager * @@ -257,6 +272,7 @@ struct device_queue_manager { struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; bool sched_running; + bool sched_halt; /* used for GFX 9.4.3 only */ uint32_t current_logical_xcc_start; @@ -264,6 +280,11 @@ struct device_queue_manager { uint32_t wait_times; wait_queue_head_t destroy_wait; + + /* for per-queue reset support */ + struct dqm_detect_hang_info *detect_hang_info; + size_t detect_hang_info_size; + int detect_hang_count; }; void device_queue_manager_init_cik( @@ -303,6 +324,9 @@ void set_queue_snapshot_entry(struct queue *q, int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); +bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + int doorbell_off, u32 *queue_format); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 9b33d9d2c9ad53..ea379224920935 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -31,6 +31,7 @@ #include #include "kfd_priv.h" #include "kfd_events.h" +#include "kfd_device_queue_manager.h" #include /* @@ -1244,12 +1245,33 @@ void kfd_signal_reset_event(struct kfd_node *dev) idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); + struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p); if (unlikely(user_gpu_id == -EINVAL)) { WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); continue; } + if (unlikely(!pdd)) { + WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid); + continue; + } + + if (dev->dqm->detect_hang_count && !pdd->has_reset_queue) + continue; + + if (dev->dqm->detect_hang_count) { + struct amdgpu_task_info *ti; + + ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid); + if (ti) { + dev_err(dev->adev->dev, + "Queues reset on process %s tid %d thread %s pid %d\n", + ti->process_name, ti->tgid, ti->task_name, ti->pid); + amdgpu_vm_put_task_info(ti); + } + } + rcu_read_lock(); id = KFD_FIRST_NONSIGNAL_EVENT_ID; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 8e0d0356e810c0..37b69fe0ede384 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -129,63 +129,6 @@ enum SQ_INTERRUPT_ERROR_TYPE { KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) -static void event_interrupt_poison_consumption(struct kfd_node *dev, - uint16_t pasid, uint16_t client_id) -{ - enum amdgpu_ras_block block = 0; - int old_poison, ret = -EINVAL; - uint32_t reset = 0; - struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); - - if (!p) - return; - - /* all queues of a process will be unmapped in one time */ - old_poison = atomic_cmpxchg(&p->poison, 0, 1); - kfd_unref_process(p); - if (old_poison) - return; - - switch (client_id) { - case SOC15_IH_CLIENTID_SE0SH: - case SOC15_IH_CLIENTID_SE1SH: - case SOC15_IH_CLIENTID_SE2SH: - case SOC15_IH_CLIENTID_SE3SH: - case SOC15_IH_CLIENTID_UTCL2: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); - block = AMDGPU_RAS_BLOCK__GFX; - if (ret) - reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; - break; - case SOC15_IH_CLIENTID_SDMA0: - case SOC15_IH_CLIENTID_SDMA1: - case SOC15_IH_CLIENTID_SDMA2: - case SOC15_IH_CLIENTID_SDMA3: - case SOC15_IH_CLIENTID_SDMA4: - block = AMDGPU_RAS_BLOCK__SDMA; - reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; - break; - default: - break; - } - - kfd_signal_poison_consumed_event(dev, pasid); - - /* resetting queue passes, do page retirement without gpu reset - * resetting queue fails, fallback to gpu reset solution - */ - if (!ret) - dev_warn(dev->adev->dev, - "RAS poison consumption, unmap queue flow succeeded: client id %d\n", - client_id); - else - dev_warn(dev->adev->dev, - "RAS poison consumption, fall back to gpu reset flow: client id %d\n", - client_id); - - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); -} - static bool event_interrupt_isr_v10(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, @@ -332,11 +275,6 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID), sq_intr_err_type); - if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && - sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { - event_interrupt_poison_consumption(dev, pasid, source_id); - return; - } break; default: break; @@ -362,38 +300,14 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_SDMA7) { if (source_id == SOC15_INTSRC_SDMA_TRAP) { kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); - } else if (source_id == SOC15_INTSRC_SDMA_ECC) { - event_interrupt_poison_consumption(dev, pasid, source_id); - return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); - int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; - /* gfxhub */ - if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { - hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, - node_id); - if (hub_inst < 0) - hub_inst = 0; - } - - /* mmhub */ - if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) - hub_inst = node_id / 4; - - if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, - hub_inst, vmid_type)) { - event_interrupt_poison_consumption(dev, pasid, client_id); - return; - } - info.vmid = vmid; info.mc_id = client_id; info.page_addr = ih_ring_entry[4] | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index f524a55eee1167..b3f988b275a888 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -330,11 +330,14 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && - KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) - kfd_set_dbg_ev_from_interrupt(dev, pasid, - KFD_CTXID0_DOORBELL_ID(context_id0), + KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) { + u32 doorbell_id = KFD_CTXID0_DOORBELL_ID(context_id0); + + kfd_set_dbg_ev_from_interrupt(dev, pasid, doorbell_id, KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), NULL, 0); + kfd_dqm_suspend_bad_queue_mes(dev, pasid, doorbell_id); + } /* SDMA */ else if (source_id == SOC21_INTSRC_SDMA_TRAP) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index a9c3580be8c9b9..d46a13156ee9d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -167,11 +167,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: block = AMDGPU_RAS_BLOCK__GFX; - if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) - reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; - else + if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { + /* driver mode-2 for gfx poison is only supported by + * pmfw 0x00557300 and onwards */ + if (dev->adev->pm.fw_version < 0x00557300) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + else + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } else if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) { + /* driver mode-2 for gfx poison is only supported by + * pmfw 0x05550C00 and onwards */ + if (dev->adev->pm.fw_version < 0x05550C00) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + else + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } else { reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } break; case SOC15_IH_CLIENTID_VMC: case SOC15_IH_CLIENTID_VMC1: @@ -184,11 +196,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; - if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) - reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; - else + if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) { + /* driver mode-2 for gfx poison is only supported by + * pmfw 0x00557300 and onwards */ + if (dev->adev->pm.fw_version < 0x00557300) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + else + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } else if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) { + /* driver mode-2 for gfx poison is only supported by + * pmfw 0x05550C00 and onwards */ + if (dev->adev->pm.fw_version < 0x05550C00) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + else + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } else { reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + } break; default: dev_warn(dev->adev->dev, @@ -431,25 +455,9 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); - int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; - /* gfxhub */ - if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { - hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, - node_id); - if (hub_inst < 0) - hub_inst = 0; - } - - /* mmhub */ - if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) - hub_inst = node_id / 4; - - if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, - hub_inst, vmid_type)) { + if (source_id == SOC15_INTSRC_VMC_UTCL2_POISON) { event_interrupt_poison_consumption_v9(dev, pasid, client_id); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 50a81da43ce195..d9ae854b690849 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -225,7 +225,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem); kfree(mqd_mem_obj); } else { kfd_gtt_sa_free(mm->dev, mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index d163d92a692f67..2b72d5b4949b6c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -341,6 +341,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdmax_rlcx_doorbell_offset = q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; + m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum + << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT) + & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK; + m->sdma_engine_id = q->sdma_engine_id; m->sdma_queue_id = q->sdma_queue_id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 66c73825c0a04e..84e8ea3a8a0c94 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -321,8 +321,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v9_mqd *m = (struct v9_mqd *)mqd; + uint32_t doorbell_id = m->queue_doorbell_id0; - return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); + m->queue_doorbell_id0 = 0; + + return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, @@ -624,6 +627,7 @@ static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd) m = get_mqd(mqd + hiq_mqd_size * inst); ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, inst); + m->queue_doorbell_id0 = 0; ++inst; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 00776f08351c30..1f9f5bfeaf8680 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -37,11 +37,14 @@ static int pm_map_process_v9(struct packet_manager *pm, struct kfd_node *kfd = pm->dqm->dev; struct kfd_process_device *pdd = container_of(qpd, struct kfd_process_device, qpd); + struct amdgpu_device *adev = kfd->adev; packet = (struct pm4_mes_map_process *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process)); packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, sizeof(struct pm4_mes_map_process)); + if (adev->enforce_isolation[kfd->node_id]) + packet->bitfields2.exec_cleaner_shader = 1; packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 10; packet->bitfields2.pasid = qpd->pqm->process->pasid; @@ -89,14 +92,18 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, struct pm4_mes_map_process_aldebaran *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; struct kfd_dev *kfd = pm->dqm->dev->kfd; + struct kfd_node *knode = pm->dqm->dev; struct kfd_process_device *pdd = container_of(qpd, struct kfd_process_device, qpd); int i; + struct amdgpu_device *adev = kfd->adev; packet = (struct pm4_mes_map_process_aldebaran *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, sizeof(struct pm4_mes_map_process_aldebaran)); + if (adev->enforce_isolation[knode->node_id]) + packet->bitfields2.exec_cleaner_shader = 1; packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 10; packet->bitfields2.pasid = qpd->pqm->process->pasid; @@ -144,17 +151,22 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, int concurrent_proc_cnt = 0; struct kfd_node *kfd = pm->dqm->dev; + struct amdgpu_device *adev = kfd->adev; /* Determine the number of processes to map together to HW: * it can not exceed the number of VMIDs available to the * scheduler, and it is determined by the smaller of the number * of processes in the runlist and kfd module parameter * hws_max_conc_proc. + * However, if enforce_isolation is set (toggle LDS/VGPRs/SGPRs + * cleaner between process switch), enable single-process mode + * in HWS. * Note: the arbitration between the number of VMIDs and * hws_max_conc_proc has been done in * kgd2kfd_device_init(). */ - concurrent_proc_cnt = min(pm->dqm->processes_count, + concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ? + 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum); packet = (struct pm4_mes_runlist *)buffer; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 8b6b2bd5c148fd..cd8611401a6641 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -145,8 +145,9 @@ struct pm4_mes_map_process { union { struct { - uint32_t pasid:16; - uint32_t reserved1:2; + uint32_t pasid:16; /* 0 - 15 */ + uint32_t reserved1:1; /* 16 */ + uint32_t exec_cleaner_shader:1; /* 17 */ uint32_t debug_vmid:4; uint32_t new_debug:1; uint32_t reserved2:1; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h index 38f5cb6a222ab1..e0ed62c4ade047 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h @@ -37,7 +37,7 @@ struct pm4_mes_map_process_aldebaran { struct { uint32_t pasid:16; /* 0 - 15 */ uint32_t single_memops:1; /* 16 */ - uint32_t reserved1:1; /* 17 */ + uint32_t exec_cleaner_shader:1; /* 17 */ uint32_t debug_vmid:4; /* 18 - 21 */ uint32_t new_debug:1; /* 22 */ uint32_t tmz:1; /* 23 */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 2b3ec92981e8f9..d6530febabad7f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -310,6 +310,10 @@ struct kfd_node { struct kfd_local_mem_info local_mem_info; struct kfd_dev *kfd; + + /* Track per device allocated watch points */ + uint32_t alloc_watch_ids; + spinlock_t watch_points_lock; }; struct kfd_dev { @@ -362,10 +366,6 @@ struct kfd_dev { struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; - /* Track per device allocated watch points */ - uint32_t alloc_watch_ids; - spinlock_t watch_points_lock; - /* Kernel doorbells for KFD device */ struct amdgpu_bo *doorbells; @@ -414,13 +414,16 @@ enum kfd_unmap_queues_filter { * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. * * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. + * + * @KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: SDMA user mode queue with target SDMA engine ID. */ enum kfd_queue_type { KFD_QUEUE_TYPE_COMPUTE, KFD_QUEUE_TYPE_SDMA, KFD_QUEUE_TYPE_HIQ, KFD_QUEUE_TYPE_DIQ, - KFD_QUEUE_TYPE_SDMA_XGMI + KFD_QUEUE_TYPE_SDMA_XGMI, + KFD_QUEUE_TYPE_SDMA_BY_ENG_ID }; enum kfd_queue_format { @@ -494,8 +497,8 @@ struct queue_properties { uint64_t queue_size; uint32_t priority; uint32_t queue_percent; - uint32_t *read_ptr; - uint32_t *write_ptr; + void __user *read_ptr; + void __user *write_ptr; void __iomem *doorbell_ptr; uint32_t doorbell_off; bool is_interop; @@ -522,6 +525,12 @@ struct queue_properties { uint64_t tba_addr; uint64_t tma_addr; uint64_t exception_status; + + struct amdgpu_bo *wptr_bo; + struct amdgpu_bo *rptr_bo; + struct amdgpu_bo *ring_bo; + struct amdgpu_bo *eop_buf_bo; + struct amdgpu_bo *cwsr_bo; }; #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ @@ -604,7 +613,7 @@ struct queue { uint64_t gang_ctx_gpu_addr; void *gang_ctx_cpu_ptr; - struct amdgpu_bo *wptr_bo; + struct amdgpu_bo *wptr_bo_gart; }; enum KFD_MQD_TYPE { @@ -837,6 +846,9 @@ struct kfd_process_device { void *proc_ctx_bo; uint64_t proc_ctx_gpu_addr; void *proc_ctx_cpu_ptr; + + /* Tracks queue reset status */ + bool has_reset_queue; }; #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) @@ -854,6 +866,14 @@ struct svm_range_list { struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); struct task_struct *faulting_task; + /* check point ts decides if page fault recovery need be dropped */ + uint64_t checkpoint_ts[MAX_GPU_INSTANCE]; + + /* Default granularity to use in buffer migration + * and restoration of backing memory while handling + * recoverable page faults + */ + uint8_t default_granularity; }; /* Process data */ @@ -1284,6 +1304,15 @@ int init_queue(struct queue **q, const struct queue_properties *properties); void uninit_queue(struct queue *q); void print_queue_properties(struct queue_properties *q); void print_queue(struct queue *q); +int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo, + u64 expected_size); +void kfd_queue_buffer_put(struct amdgpu_bo **bo); +int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); +int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); +void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo); +int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd, + struct queue_properties *properties); +void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, struct kfd_node *dev); @@ -1303,6 +1332,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type); void kernel_queue_uninit(struct kernel_queue *kq); int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); +int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id); /* Process Queue Manager */ struct process_queue_node { @@ -1320,7 +1350,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct file *f, struct queue_properties *properties, unsigned int *qid, - struct amdgpu_bo *wptr_bo, const struct kfd_criu_queue_priv_data *q_data, const void *restore_mqd, const void *restore_ctl_stack, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 17e42161b01519..d07acf1b2f93c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -270,6 +270,11 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) struct kfd_node *dev = NULL; struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; + int i; + struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; + u32 queue_format; + + memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); dev = pdd->dev; @@ -287,8 +292,29 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Collect wave count from device if it supports */ wave_cnt = 0; max_waves_per_cu = 0; - dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, - &max_waves_per_cu, 0); + + /* + * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. + * For AQL queues, because of cooperative dispatch we multiply the wave count + * by number of XCCs in the partition to get the total wave counts across all + * XCCs in the partition. + * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is. + */ + dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy, + &max_waves_per_cu, ffs(dev->xcc_mask) - 1); + + for (i = 0; i < AMDGPU_MAX_QUEUES; i++) { + if (cu_occupancy[i].wave_cnt != 0 && + kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd, + cu_occupancy[i].doorbell_off, + &queue_format)) { + if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4)) + wave_cnt += cu_occupancy[i].wave_cnt; + else + wave_cnt += (NUM_XCC(dev->xcc_mask) * + cu_occupancy[i].wave_cnt); + } + } /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; @@ -1048,7 +1074,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) if (pdd->dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, - pdd->proc_ctx_bo); + &pdd->proc_ctx_bo); /* * before destroying pdd, make sure to report availability * for auto suspend @@ -1851,6 +1877,8 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) goto fail; } n_evicted++; + + pdd->dev->dqm->is_hws_hang = false; } return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 21f5a1fb3bf88d..01b960b152743d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -204,19 +204,23 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm, } if (dev->kfd->shared_resources.enable_mes) { - amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); - if (pqn->q->wptr_bo) - amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo); + amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo); + amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart); } } void pqm_uninit(struct process_queue_manager *pqm) { struct process_queue_node *pqn, *next; + struct kfd_process_device *pdd; list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { - if (pqn->q) + if (pqn->q) { + pdd = kfd_get_process_device_data(pqn->q->device, pqm->process); + kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); + kfd_queue_release_buffers(pdd, &pqn->q->properties); pqm_clean_queue_resource(pqm, pqn); + } kfd_procfs_del_queue(pqn->q); uninit_queue(pqn->q); @@ -231,8 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm) static int init_user_queue(struct process_queue_manager *pqm, struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, - struct file *f, struct amdgpu_bo *wptr_bo, - unsigned int qid) + struct file *f, unsigned int qid) { int retval; @@ -263,12 +266,32 @@ static int init_user_queue(struct process_queue_manager *pqm, goto cleanup; } memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); - (*q)->wptr_bo = wptr_bo; + + /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work + * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) + */ + if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) + >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { + if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { + pr_err("Queue memory allocated to wrong device\n"); + retval = -EINVAL; + goto free_gang_ctx_bo; + } + + retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, + &(*q)->wptr_bo_gart); + if (retval) { + pr_err("Failed to map wptr bo to GART\n"); + goto free_gang_ctx_bo; + } + } } pr_debug("PQM After init queue"); return 0; +free_gang_ctx_bo: + amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo); cleanup: uninit_queue(*q); *q = NULL; @@ -280,7 +303,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct file *f, struct queue_properties *properties, unsigned int *qid, - struct amdgpu_bo *wptr_bo, const struct kfd_criu_queue_priv_data *q_data, const void *restore_mqd, const void *restore_ctl_stack, @@ -345,13 +367,14 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: + case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: /* SDMA queues are always allocated statically no matter * which scheduler mode is used. We also do not need to * check whether a SDMA queue can be allocated here, because * allocate_sdma_queue() in create_queue() has the * corresponding check logic. */ - retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -372,7 +395,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -490,7 +513,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) } if (pqn->q) { - kfd_procfs_del_queue(pqn->q); + retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); + if (retval) + goto err_destroy_queue; + dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { @@ -500,7 +526,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) if (retval != -ETIME) goto err_destroy_queue; } - + kfd_procfs_del_queue(pqn->q); + kfd_queue_release_buffers(pdd, &pqn->q->properties); pqm_clean_queue_resource(pqm, pqn); uninit_queue(pqn->q); } @@ -524,11 +551,42 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm, struct process_queue_node *pqn; pqn = get_queue_by_qid(pqm, qid); - if (!pqn) { + if (!pqn || !pqn->q) { pr_debug("No queue %d exists for update operation\n", qid); return -EFAULT; } + /* + * Update with NULL ring address is used to disable the queue + */ + if (p->queue_address && p->queue_size) { + struct kfd_process_device *pdd; + struct amdgpu_vm *vm; + struct queue *q = pqn->q; + int err; + + pdd = kfd_get_process_device_data(q->device, q->process); + if (!pdd) + return -ENODEV; + vm = drm_priv_to_vm(pdd->drm_priv); + err = amdgpu_bo_reserve(vm->root.bo, false); + if (err) + return err; + + if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo, + p->queue_size)) { + pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n", + p->queue_address, p->queue_size); + return -EFAULT; + } + + kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo); + kfd_queue_buffer_put(&pqn->q->properties.ring_bo); + amdgpu_bo_unreserve(vm->root.bo); + + pqn->q->properties.ring_bo = p->ring_bo; + } + pqn->q->properties.queue_address = p->queue_address; pqn->q->properties.queue_size = p->queue_size; pqn->q->properties.queue_percent = p->queue_percent; @@ -971,7 +1029,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, print_queue_properties(&qp); - ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack, + ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); @@ -988,6 +1046,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, pr_debug("Queue id %d was restored successfully\n", queue_id); kfree(q_data); + kfree(q_extra_data); return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index 0f6992b1895c22..ad29634f8b44ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -24,6 +24,8 @@ #include #include "kfd_priv.h" +#include "kfd_topology.h" +#include "kfd_svm.h" void print_queue_properties(struct queue_properties *q) { @@ -82,3 +84,374 @@ void uninit_queue(struct queue *q) { kfree(q); } + +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) + +static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size) +{ + struct kfd_process *p = pdd->process; + struct list_head update_list; + struct svm_range *prange; + int ret = -EINVAL; + + INIT_LIST_HEAD(&update_list); + addr >>= PAGE_SHIFT; + size >>= PAGE_SHIFT; + + mutex_lock(&p->svms.lock); + + /* + * range may split to multiple svm pranges aligned to granularity boundaery. + */ + while (size) { + uint32_t gpuid, gpuidx; + int r; + + prange = svm_range_from_addr(&p->svms, addr, NULL); + if (!prange) + break; + + if (!prange->mapped_to_gpu) + break; + + r = kfd_process_gpuid_from_node(p, pdd->dev, &gpuid, &gpuidx); + if (r < 0) + break; + if (!test_bit(gpuidx, prange->bitmap_access) && + !test_bit(gpuidx, prange->bitmap_aip)) + break; + + if (!(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) + break; + + list_add(&prange->update_list, &update_list); + + if (prange->last - prange->start + 1 >= size) { + size = 0; + break; + } + + size -= prange->last - prange->start + 1; + addr += prange->last - prange->start + 1; + } + if (size) { + pr_debug("[0x%llx 0x%llx] not registered\n", addr, addr + size - 1); + goto out_unlock; + } + + list_for_each_entry(prange, &update_list, update_list) + atomic_inc(&prange->queue_refcount); + ret = 0; + +out_unlock: + mutex_unlock(&p->svms.lock); + return ret; +} + +static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size) +{ + struct kfd_process *p = pdd->process; + struct svm_range *prange, *pchild; + struct interval_tree_node *node; + unsigned long last; + + addr >>= PAGE_SHIFT; + last = addr + (size >> PAGE_SHIFT) - 1; + + mutex_lock(&p->svms.lock); + + node = interval_tree_iter_first(&p->svms.objects, addr, last); + while (node) { + struct interval_tree_node *next_node; + unsigned long next_start; + + prange = container_of(node, struct svm_range, it_node); + next_node = interval_tree_iter_next(node, addr, last); + next_start = min(node->last, last) + 1; + + if (atomic_add_unless(&prange->queue_refcount, -1, 0)) { + list_for_each_entry(pchild, &prange->child_list, child_list) + atomic_add_unless(&pchild->queue_refcount, -1, 0); + } + + node = next_node; + addr = next_start; + } + + mutex_unlock(&p->svms.lock); +} +#else + +static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size) +{ + return -EINVAL; +} + +static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size) +{ +} + +#endif + +int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo, + u64 expected_size) +{ + struct amdgpu_bo_va_mapping *mapping; + u64 user_addr; + u64 size; + + user_addr = (u64)addr >> AMDGPU_GPU_PAGE_SHIFT; + size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; + + mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr); + if (!mapping) + goto out_err; + + if (user_addr != mapping->start || + (size != 0 && user_addr + size - 1 != mapping->last)) { + pr_debug("expected size 0x%llx not equal to mapping addr 0x%llx size 0x%llx\n", + expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT, + (mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT); + goto out_err; + } + + *pbo = amdgpu_bo_ref(mapping->bo_va->base.bo); + mapping->bo_va->queue_refcount++; + return 0; + +out_err: + *pbo = NULL; + return -EINVAL; +} + +/* FIXME: remove this function, just call amdgpu_bo_unref directly */ +void kfd_queue_buffer_put(struct amdgpu_bo **bo) +{ + amdgpu_bo_unref(bo); +} + +int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties) +{ + struct kfd_topology_device *topo_dev; + struct amdgpu_vm *vm; + u32 total_cwsr_size; + int err; + + topo_dev = kfd_topology_device_by_id(pdd->dev->id); + if (!topo_dev) + return -EINVAL; + + vm = drm_priv_to_vm(pdd->drm_priv); + err = amdgpu_bo_reserve(vm->root.bo, false); + if (err) + return err; + + err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE); + if (err) + goto out_err_unreserve; + + err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE); + if (err) + goto out_err_unreserve; + + err = kfd_queue_buffer_get(vm, (void *)properties->queue_address, + &properties->ring_bo, properties->queue_size); + if (err) + goto out_err_unreserve; + + /* only compute queue requires EOP buffer and CWSR area */ + if (properties->type != KFD_QUEUE_TYPE_COMPUTE) + goto out_unreserve; + + /* EOP buffer is not required for all ASICs */ + if (properties->eop_ring_buffer_address) { + if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) { + pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n", + properties->eop_buf_bo->tbo.base.size, + topo_dev->node_props.eop_buffer_size); + err = -EINVAL; + goto out_err_unreserve; + } + err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address, + &properties->eop_buf_bo, + properties->eop_ring_buffer_size); + if (err) + goto out_err_unreserve; + } + + if (properties->ctl_stack_size != topo_dev->node_props.ctl_stack_size) { + pr_debug("queue ctl stack size 0x%x not equal to node ctl stack size 0x%x\n", + properties->ctl_stack_size, + topo_dev->node_props.ctl_stack_size); + err = -EINVAL; + goto out_err_unreserve; + } + + if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) { + pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n", + properties->ctx_save_restore_area_size, + topo_dev->node_props.cwsr_size); + err = -EINVAL; + goto out_err_unreserve; + } + + total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size) + * NUM_XCC(pdd->dev->xcc_mask); + total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE); + + err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address, + &properties->cwsr_bo, total_cwsr_size); + if (!err) + goto out_unreserve; + + amdgpu_bo_unreserve(vm->root.bo); + + err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address, + total_cwsr_size); + if (err) + goto out_err_release; + + return 0; + +out_unreserve: + amdgpu_bo_unreserve(vm->root.bo); + return 0; + +out_err_unreserve: + amdgpu_bo_unreserve(vm->root.bo); +out_err_release: + /* FIXME: make a _locked version of this that can be called before + * dropping the VM reservation. + */ + kfd_queue_unref_bo_vas(pdd, properties); + kfd_queue_release_buffers(pdd, properties); + return err; +} + +int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties) +{ + struct kfd_topology_device *topo_dev; + u32 total_cwsr_size; + + kfd_queue_buffer_put(&properties->wptr_bo); + kfd_queue_buffer_put(&properties->rptr_bo); + kfd_queue_buffer_put(&properties->ring_bo); + kfd_queue_buffer_put(&properties->eop_buf_bo); + kfd_queue_buffer_put(&properties->cwsr_bo); + + topo_dev = kfd_topology_device_by_id(pdd->dev->id); + if (!topo_dev) + return -EINVAL; + total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size) + * NUM_XCC(pdd->dev->xcc_mask); + total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE); + + kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size); + return 0; +} + +void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo) +{ + if (*bo) { + struct amdgpu_bo_va *bo_va; + + bo_va = amdgpu_vm_bo_find(vm, *bo); + if (bo_va && bo_va->queue_refcount) + bo_va->queue_refcount--; + } +} + +int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd, + struct queue_properties *properties) +{ + struct amdgpu_vm *vm; + int err; + + vm = drm_priv_to_vm(pdd->drm_priv); + err = amdgpu_bo_reserve(vm->root.bo, false); + if (err) + return err; + + kfd_queue_unref_bo_va(vm, &properties->wptr_bo); + kfd_queue_unref_bo_va(vm, &properties->rptr_bo); + kfd_queue_unref_bo_va(vm, &properties->ring_bo); + kfd_queue_unref_bo_va(vm, &properties->eop_buf_bo); + kfd_queue_unref_bo_va(vm, &properties->cwsr_bo); + + amdgpu_bo_unreserve(vm->root.bo); + return 0; +} + +#define SGPR_SIZE_PER_CU 0x4000 +#define LDS_SIZE_PER_CU 0x10000 +#define HWREG_SIZE_PER_CU 0x1000 +#define DEBUGGER_BYTES_ALIGN 64 +#define DEBUGGER_BYTES_PER_WAVE 32 + +static u32 kfd_get_vgpr_size_per_cu(u32 gfxv) +{ + u32 vgpr_size = 0x40000; + + if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */ + gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */ + gfxv == 90008) /* GFX_VERSION_ARCTURUS */ + vgpr_size = 0x80000; + else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */ + gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */ + gfxv == 120000 || /* GFX_VERSION_GFX1200 */ + gfxv == 120001) /* GFX_VERSION_GFX1201 */ + vgpr_size = 0x60000; + + return vgpr_size; +} + +#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \ + (kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\ + LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU) + +#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \ + ((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/ + +#define SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER 40 + +void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev) +{ + struct kfd_node_properties *props = &dev->node_props; + u32 gfxv = props->gfx_target_version; + u32 ctl_stack_size; + u32 wg_data_size; + u32 wave_num; + u32 cu_num; + + if (gfxv < 80001) /* GFX_VERSION_CARRIZO */ + return; + + cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask); + wave_num = (gfxv < 100100) ? /* GFX_VERSION_NAVI10 */ + min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512) + : cu_num * 32; + + wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE); + ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8; + ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size, + PAGE_SIZE); + + if ((gfxv / 10000 * 10000) == 100000) { + /* HW design limits control stack size to 0x7000. + * This is insufficient for theoretical PM4 cases + * but sufficient for AQL, limited by SPI events. + */ + ctl_stack_size = min(ctl_stack_size, 0x7000); + } + + props->ctl_stack_size = ctl_stack_size; + props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN); + props->cwsr_size = ctl_stack_size + wg_data_size; + + if (gfxv == 80002) /* GFX_VERSION_TONGA */ + props->eop_buffer_size = 0x8000; + else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */ + props->eop_buffer_size = 4096; + else if (gfxv >= 80000) + props->eop_buffer_size = 4096; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ea6a8e43bd5b28..de8b9abf7afcf3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -235,17 +235,16 @@ void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset, amdgpu_reset_get_desc(reset_context, reset_cause, sizeof(reset_cause)); - kfd_smi_event_add(0, dev, event, "%x %s\n", - dev->reset_seq_num, - reset_cause); + kfd_smi_event_add(0, dev, event, KFD_EVENT_FMT_UPDATE_GPU_RESET( + dev->reset_seq_num, reset_cause)); } void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask) { - kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n", + kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, KFD_EVENT_FMT_THERMAL_THROTTLING( throttle_bitmask, - amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); + amdgpu_dpm_get_thermal_throttling_counter(dev->adev))); } void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) @@ -256,8 +255,8 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) if (task_info) { /* Report VM faults from user applications, not retry from kernel */ if (task_info->pid) - kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n", - task_info->pid, task_info->task_name); + kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT( + task_info->pid, task_info->task_name)); amdgpu_vm_put_task_info(task_info); } } @@ -267,16 +266,16 @@ void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid, ktime_t ts) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START, - "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, node->id, write_fault ? 'W' : 'R'); + KFD_EVENT_FMT_PAGEFAULT_START(ktime_to_ns(ts), pid, + address, node->id, write_fault ? 'W' : 'R')); } void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid, unsigned long address, bool migration) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END, - "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, node->id, migration ? 'M' : 'U'); + KFD_EVENT_FMT_PAGEFAULT_END(ktime_get_boottime_ns(), + pid, address, node->id, migration ? 'M' : 'U')); } void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, @@ -286,9 +285,9 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, uint32_t trigger) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START, - "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", + KFD_EVENT_FMT_MIGRATE_START( ktime_get_boottime_ns(), pid, start, end - start, - from, to, prefetch_loc, preferred_loc, trigger); + from, to, prefetch_loc, preferred_loc, trigger)); } void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, @@ -296,24 +295,24 @@ void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, uint32_t from, uint32_t to, uint32_t trigger) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, - "%lld -%d @%lx(%lx) %x->%x %d\n", + KFD_EVENT_FMT_MIGRATE_END( ktime_get_boottime_ns(), pid, start, end - start, - from, to, trigger); + from, to, trigger)); } void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION, - "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - node->id, trigger); + KFD_EVENT_FMT_QUEUE_EVICTION(ktime_get_boottime_ns(), pid, + node->id, trigger)); } void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE, - "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - node->id); + KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), pid, + node->id, 0)); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -330,8 +329,8 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) kfd_smi_event_add(p->lead_thread->pid, pdd->dev, KFD_SMI_EVENT_QUEUE_RESTORE, - "%lld -%d %x %c\n", ktime_get_boottime_ns(), - p->lead_thread->pid, pdd->dev->id, 'R'); + KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), + p->lead_thread->pid, pdd->dev->id, 'R')); } kfd_unref_process(p); } @@ -341,8 +340,8 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, uint32_t trigger) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU, - "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, node->id, trigger); + KFD_EVENT_FMT_UNMAP_FROM_GPU(ktime_get_boottime_ns(), + pid, address, last - address + 1, node->id, trigger)); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index bd9c2921e0dccc..04e74692369745 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -309,12 +309,13 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap) } static void -svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc, - uint8_t *granularity, uint32_t *flags) +svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location, + int32_t *prefetch_loc, uint8_t *granularity, + uint32_t *flags) { *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; - *granularity = 9; + *granularity = svms->default_granularity; *flags = KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT; } @@ -358,7 +359,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, bitmap_copy(prange->bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); - svm_range_set_default_attributes(&prange->preferred_loc, + svm_range_set_default_attributes(svms, &prange->preferred_loc, &prange->prefetch_loc, &prange->granularity, &prange->flags); @@ -1051,6 +1052,7 @@ svm_range_split_adjust(struct svm_range *new, struct svm_range *old, new->mapped_to_gpu = old->mapped_to_gpu; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); + atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount)); return 0; } @@ -1992,6 +1994,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new->vram_pages = old->vram_pages; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); + atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount)); return new; } @@ -2260,16 +2263,10 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) { struct kfd_process_device *pdd; struct kfd_process *p; - int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); -restart: - drain = atomic_read(&svms->drain_pagefaults); - if (!drain) - return; - for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) @@ -2289,8 +2286,6 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } - if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) - goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -2312,17 +2307,8 @@ static void svm_range_deferred_list_work(struct work_struct *work) prange->start, prange->last, prange->work_item.op); mm = prange->work_item.mm; -retry: - mmap_write_lock(mm); - /* Checking for the need to drain retry faults must be inside - * mmap write lock to serialize with munmap notifiers. - */ - if (unlikely(atomic_read(&svms->drain_pagefaults))) { - mmap_write_unlock(mm); - svm_range_drain_retry_fault(svms); - goto retry; - } + mmap_write_lock(mm); /* Remove from deferred_list must be inside mmap write lock, for * two race cases: @@ -2443,6 +2429,17 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, struct kfd_process *p; unsigned long s, l; bool unmap_parent; + uint32_t i; + + if (atomic_read(&prange->queue_refcount)) { + int r; + + pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n", + prange->start << PAGE_SHIFT); + r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM); + if (r) + pr_debug("failed %d to quiesce KFD queues\n", r); + } p = kfd_lookup_process_by_mm(mm); if (!p) @@ -2452,11 +2449,38 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); - /* Make sure pending page faults are drained in the deferred worker - * before the range is freed to avoid straggler interrupts on - * unmapped memory causing "phantom faults". + /* calculate time stamps that are used to decide which page faults need be + * dropped or handled before unmap pages from gpu vm */ - atomic_inc(&svms->drain_pagefaults); + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { + struct kfd_process_device *pdd; + struct amdgpu_device *adev; + struct amdgpu_ih_ring *ih; + uint32_t checkpoint_wptr; + + pdd = p->pdds[i]; + if (!pdd) + continue; + + adev = pdd->dev->adev; + + /* Check and drain ih1 ring if cam not available */ + if (adev->irq.ih1.ring_size) { + ih = &adev->irq.ih1; + checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih); + if (ih->rptr != checkpoint_wptr) { + svms->checkpoint_ts[i] = + amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1); + continue; + } + } + + /* check if dev->irq.ih_soft is not empty */ + ih = &adev->irq.ih_soft; + checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih); + if (ih->rptr != checkpoint_wptr) + svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1); + } unmap_parent = start <= prange->start && last >= prange->last; @@ -2680,9 +2704,10 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma); start_limit = max(vma->vm_start >> PAGE_SHIFT, - (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); + (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity)); end_limit = min(vma->vm_end >> PAGE_SHIFT, - (unsigned long)ALIGN(addr + 1, 2UL << 8)); + (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity)); + /* First range that starts after the fault address */ node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); if (node) { @@ -2897,7 +2922,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, - uint64_t addr, bool write_fault) + uint64_t addr, uint64_t ts, bool write_fault) { unsigned long start, last, size; struct mm_struct *mm = NULL; @@ -2907,7 +2932,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, ktime_t timestamp = ktime_get_boottime(); struct kfd_node *node; int32_t best_loc; - int32_t gpuidx = MAX_GPU_INSTANCE; + int32_t gpuid, gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; struct vm_area_struct *vma; bool migration = false; @@ -2928,11 +2953,38 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); if (atomic_read(&svms->drain_pagefaults)) { - pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr); r = 0; goto out; } + node = kfd_node_by_irq_ids(adev, node_id, vmid); + if (!node) { + pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, + vmid); + r = -EFAULT; + goto out; + } + + if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { + pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id); + r = -EFAULT; + goto out; + } + + /* check if this page fault time stamp is before svms->checkpoint_ts */ + if (svms->checkpoint_ts[gpuidx] != 0) { + if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } else + /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts + * to zero to avoid following ts wrap around give wrong comparing + */ + svms->checkpoint_ts[gpuidx] = 0; + } + if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); r = -EFAULT; @@ -2949,13 +3001,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } - node = kfd_node_by_irq_ids(adev, node_id, vmid); - if (!node) { - pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, - vmid); - r = -EFAULT; - goto out; - } mmap_read_lock(mm); retry_write_locked: mutex_lock(&svms->lock); @@ -3170,8 +3215,9 @@ void svm_range_list_fini(struct kfd_process *p) /* * Ensure no retry fault comes in afterwards, as page fault handler will * not find kfd process and take mm lock to recover fault. + * stop kfd page fault handing, then wait pending page faults got drained */ - atomic_inc(&p->svms.drain_pagefaults); + atomic_set(&p->svms.drain_pagefaults, 1); svm_range_drain_retry_fault(&p->svms); list_for_each_entry_safe(prange, next, &p->svms.list, list) { @@ -3205,6 +3251,12 @@ int svm_range_list_init(struct kfd_process *p) if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) bitmap_set(svms->bitmap_supported, i, 1); + /* Value of default granularity cannot exceed 0x1B, the + * number of pages supported by a 4-level paging table + */ + svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B); + pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity); + return 0; } @@ -3732,7 +3784,7 @@ svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm, node = interval_tree_iter_first(&svms->objects, start, last); if (!node) { pr_debug("range attrs not found return default values\n"); - svm_range_set_default_attributes(&location, &prefetch_loc, + svm_range_set_default_attributes(svms, &location, &prefetch_loc, &granularity, &flags_and); flags_or = flags_and; if (p->xnack_enabled) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 70c1776611c472..bddd24f04669e8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -137,6 +137,7 @@ struct svm_range { DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); bool mapped_to_gpu; + atomic_t queue_refcount; }; static inline void svm_range_lock(struct svm_range *prange) @@ -173,7 +174,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear); void svm_range_vram_node_free(struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t vmid, uint32_t node_id, uint64_t addr, + uint32_t vmid, uint32_t node_id, uint64_t addr, uint64_t ts, bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, @@ -224,7 +225,7 @@ static inline void svm_range_list_fini(struct kfd_process *p) static inline int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t client_id, uint32_t node_id, - uint64_t addr, bool write_fault) + uint64_t addr, uint64_t ts, bool write_fault) { return -EFAULT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 6f89b06f89d380..3871591c9aec98 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -292,6 +292,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, iolink->max_bandwidth); sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size", iolink->rec_transfer_size); + sysfs_show_32bit_prop(buffer, offs, "recommended_sdma_engine_id_mask", + iolink->rec_sdma_eng_id_mask); sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags); return offs; @@ -1265,6 +1267,54 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev, } } +#define REC_SDMA_NUM_GPU 8 +static const int rec_sdma_eng_map[REC_SDMA_NUM_GPU][REC_SDMA_NUM_GPU] = { + { -1, 14, 12, 2, 4, 8, 10, 6 }, + { 14, -1, 2, 10, 8, 4, 6, 12 }, + { 10, 2, -1, 12, 14, 6, 4, 8 }, + { 2, 12, 10, -1, 6, 14, 8, 4 }, + { 4, 8, 14, 6, -1, 10, 12, 2 }, + { 8, 4, 6, 14, 12, -1, 2, 10 }, + { 10, 6, 4, 8, 12, 2, -1, 14 }, + { 6, 12, 8, 4, 2, 10, 14, -1 }}; + +static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev, + struct kfd_iolink_properties *outbound_link, + struct kfd_iolink_properties *inbound_link) +{ + struct kfd_node *gpu = outbound_link->gpu; + struct amdgpu_device *adev = gpu->adev; + int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes; + bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu && + adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 && + kfd_get_num_xgmi_sdma_engines(gpu) >= 14 && + (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8); + + if (support_rec_eng) { + int src_socket_id = adev->gmc.xgmi.physical_node_id; + int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id; + + outbound_link->rec_sdma_eng_id_mask = + 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id]; + inbound_link->rec_sdma_eng_id_mask = + 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id]; + } else { + int num_sdma_eng = kfd_get_num_sdma_engines(gpu); + int i, eng_offset = 0; + + if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && + kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) { + eng_offset = num_sdma_eng; + num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu); + } + + for (i = 0; i < num_sdma_eng; i++) { + outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset)); + inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset)); + } + } +} + static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) { struct kfd_iolink_properties *link, *inbound_link; @@ -1303,6 +1353,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED; kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); kfd_set_iolink_non_coherent(peer_dev, link, inbound_link); + kfd_set_recommended_sdma_engines(peer_dev, link, inbound_link); } } @@ -2027,7 +2078,7 @@ int kfd_topology_add_device(struct kfd_node *gpu) HSA_CAP_ASIC_REVISION_MASK); dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); - if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) + if (gpu->kfd->num_nodes > 1) dev->node_props.location_id |= dev->gpu->node_id; dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); @@ -2120,6 +2171,8 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->gpu->adev->gmc.xgmi.connected_to_cpu) dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS; + kfd_queue_ctx_save_restore_size(dev); + kfd_debug_print_topology(); kfd_notify_gpu_change(gpu_id, 1); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 2d1c9d771bef2d..155b5c410af165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -74,6 +74,10 @@ struct kfd_node_properties { uint32_t num_sdma_xgmi_engines; uint32_t num_sdma_queues_per_engine; uint32_t num_cp_queues; + uint32_t cwsr_size; + uint32_t ctl_stack_size; + uint32_t eop_buffer_size; + uint32_t debug_memory_size; char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; @@ -121,6 +125,7 @@ struct kfd_iolink_properties { uint32_t min_bandwidth; uint32_t max_bandwidth; uint32_t rec_transfer_size; + uint32_t rec_sdma_eng_id_mask; uint32_t flags; struct kfd_node *gpu; struct kobject *kobj; diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h index 10138676f27fd7..e5c0205f26181e 100644 --- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h +++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h @@ -29,6 +29,7 @@ #define SOC15_INTSRC_CP_BAD_OPCODE 183 #define SOC15_INTSRC_SQ_INTERRUPT_MSG 239 #define SOC15_INTSRC_VMC_FAULT 0 +#define SOC15_INTSRC_VMC_UTCL2_POISON 1 #define SOC15_INTSRC_SDMA_TRAP 224 #define SOC15_INTSRC_SDMA_ECC 220 #define SOC21_INTSRC_SDMA_TRAP 49 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1e069fa5211ee0..60c617fcc97ee7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -176,6 +176,7 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); +static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { @@ -769,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, return; } + /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ + if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { + DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); + return; + } + link_index = notify->link_index; link = adev->dm.dc->links[link_index]; dev = adev->dm.ddev; @@ -806,6 +813,20 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, } } +/** + * dmub_hpd_sense_callback - DMUB HPD sense processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * HPD sense changes can occur during low power states and need to be + * notified from firmware to driver. + */ +static void dmub_hpd_sense_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); +} + /** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer @@ -877,6 +898,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) "HPD_IRQ", "SET_CONFIGC_REPLY", "DPIA_NOTIFICATION", + "HPD_SENSE_NOTIFY", }; do { @@ -1740,7 +1762,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done unconditionally */ + /* No need to free bb here since it shall be done in dm_sw_fini() */ return NULL; } @@ -1755,25 +1777,41 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * static enum dmub_ips_disable_type dm_get_default_ips_mode( struct amdgpu_device *adev) { - /* - * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to - * cause a hard hang. A fix exists for newer PMFW. - * - * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest - * IPS state in all cases, except for s0ix and all displays off (DPMS), - * where IPS2 is allowed. - * - * When checking pmfw version, use the major and minor only. - */ - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) && - (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) - return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; - if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) - return DMUB_IPS_ENABLE; + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + /* + * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to + * cause a hard hang. A fix exists for newer PMFW. + * + * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest + * IPS state in all cases, except for s0ix and all displays off (DPMS), + * where IPS2 is allowed. + * + * When checking pmfw version, use the major and minor only. + */ + if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) + /* + * Other ASICs with DCN35 that have residency issues with + * IPS2 in idle. + * We want them to use IPS2 only in display off cases. + */ + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + case IP_VERSION(3, 5, 1): + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + default: + /* ASICs older than DCN35 do not have IPSs */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) + ret = DMUB_IPS_DISABLE_ALL; + break; + } - /* ASICs older than DCN35 do not have IPSs */ - return DMUB_IPS_DISABLE_ALL; + return ret; } static int amdgpu_dm_init(struct amdgpu_device *adev) @@ -1886,6 +1924,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) + init_data.flags.disable_ips = DMUB_IPS_ENABLE; else init_data.flags.disable_ips = dm_get_default_ips_mode(adev); @@ -1988,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); } - if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE) + if (adev->dm.dc->caps.ips_support && + adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) adev->dm.idle_workqueue = idle_create_workqueue(adev); if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { @@ -2242,7 +2287,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } - r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); + r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu); if (r == -ENODEV) { /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); @@ -2489,8 +2534,17 @@ static int dm_sw_init(void *handle) static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct dal_allocation *da; + + list_for_each_entry(da, &adev->dm.da_list, list) { + if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } - kfree(adev->dm.bb_from_dmub); adev->dm.bb_from_dmub = NULL; kfree(adev->dm.dmub_fb_info); @@ -2592,9 +2646,9 @@ static int dm_late_init(void *handle) static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) { + u8 buf[UUID_SIZE]; + guid_t guid; int ret; - u8 guid[16]; - u64 tmp64; mutex_lock(&mgr->lock); if (!mgr->mst_primary) @@ -2615,26 +2669,27 @@ static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (ret != 16) { + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret != sizeof(buf)) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } - if (memchr_inv(guid, 0, 16) == NULL) { - tmp64 = get_jiffies_64(); - memcpy(&guid[0], &tmp64, sizeof(u64)); - memcpy(&guid[8], &tmp64, sizeof(u64)); + import_guid(&guid, buf); + + if (guid_is_null(&guid)) { + guid_gen(&guid); + export_guid(buf, &guid); - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); - if (ret != 16) { + if (ret != sizeof(buf)) { drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); goto out_fail; } } - memcpy(mgr->mst_primary->guid, guid, 16); + guid_copy(&mgr->mst_primary->guid, &guid); out_fail: mutex_unlock(&mgr->lock); @@ -3230,8 +3285,11 @@ static int dm_resume(void *handle) drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } /* * atomic_check is expected to create the dc states. We need to release @@ -3787,6 +3845,12 @@ static int register_hpd_handlers(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); return -EINVAL; } + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, + dmub_hpd_sense_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); + return -EINVAL; + } } list_for_each_entry(connector, @@ -4428,6 +4492,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, @@ -4442,6 +4507,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, return; amdgpu_acpi_get_backlight_caps(&caps); + + /* validate the firmware value is sane */ + if (caps.caps_valid) { + int spread = caps.max_input_signal - caps.min_input_signal; + + if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps.min_input_signal < 0 || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps.min_input_signal, caps.max_input_signal); + caps.caps_valid = false; + } + } + if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; if (caps.aux_support) @@ -4874,18 +4954,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) /* Determine whether to enable Replay support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { -/* - * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344 - * case IP_VERSION(3, 1, 4): - * case IP_VERSION(3, 1, 5): - * case IP_VERSION(3, 1, 6): - * case IP_VERSION(3, 2, 0): - * case IP_VERSION(3, 2, 1): - * case IP_VERSION(3, 5, 0): - * case IP_VERSION(3, 5, 1): - * replay_feature_enabled = true; - * break; - */ + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + replay_feature_enabled = true; + break; + default: replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; break; @@ -4972,12 +5048,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); - - /* TODO: Fix vblank control helpers to delay PSR entry to allow this when - * PSR is also supported. - */ - if (link->psr_settings.psr_feature_enabled) - adev_to_drm(adev)->vblank_disable_immediate = false; } } amdgpu_set_panel_orientation(&aconnector->base); @@ -5182,7 +5252,7 @@ static int dm_init_microcode(struct amdgpu_device *adev) /* ASIC doesn't support DMUB. */ return 0; } - r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); + r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub); return r; } @@ -6471,7 +6541,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, @@ -6490,7 +6561,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -6671,12 +6742,21 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || stream->signal == SIGNAL_TYPE_EDP) { + const struct dc_edid_caps *edid_caps; + unsigned int disable_colorimetry = 0; + + if (aconnector->dc_sink) { + edid_caps = &aconnector->dc_sink->edid_caps; + disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; + } + // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + !disable_colorimetry; if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; @@ -7233,6 +7313,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + if (!dm_state) + return NULL; + do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, @@ -8270,7 +8353,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, - bool enable) + struct dm_crtc_state *acrtc_state) { /* * We have no guarantee that the frontend index maps to the same @@ -8282,9 +8365,31 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, amdgpu_display_crtc_idx_to_irq_type( adev, acrtc->crtc_id); + struct drm_vblank_crtc_config config = {0}; + struct dc_crtc_timing *timing; + int offdelay; + + if (acrtc_state) { + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + timing = &acrtc_state->stream->timing; + + /* at least 2 frames */ + offdelay = DIV64_U64_ROUND_UP((u64)20 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + + config.offdelay_ms = offdelay ?: 30; + } else { + config.disable_immediate = true; + } + + drm_crtc_vblank_on_config(&acrtc->base, + &config); - if (enable) { - drm_crtc_vblank_on(&acrtc->base); amdgpu_irq_get( adev, &adev->pageflip_irq, @@ -8750,7 +8855,8 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane, adev->dm.dc->caps.color.dpp.gamma_corr) attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; - attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + if (afb) + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; if (crtc_state->stream) { if (!dc_stream_set_cursor_attributes(crtc_state->stream, @@ -9340,7 +9446,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, if (acrtc) old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); - if (!acrtc->wb_enabled) + if (!acrtc || !acrtc->wb_enabled) continue; dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -9358,7 +9464,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, if (old_crtc_state->active && (!new_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { - manage_dm_interrupts(adev, acrtc, false); + manage_dm_interrupts(adev, acrtc, NULL); dc_stream_release(dm_old_crtc_state->stream); } } @@ -9744,9 +9850,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); - hdcp_update_display( - adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, enable_encryption); + if (aconnector->dc_link) + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type, enable_encryption); } } @@ -9873,7 +9980,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_crtc_needs_modeset(new_crtc_state))) { dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; - manage_dm_interrupts(adev, acrtc, true); + manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); } /* Handle vrr on->off / off->on transitions */ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); @@ -11651,7 +11758,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (dc_resource_is_dsc_encoding_supported(dc)) { ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); if (ret) { - drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n"); + drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); ret = -EINVAL; goto fail; } @@ -11672,7 +11779,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ ret = drm_dp_mst_atomic_check(state); if (ret) { - drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n"); + drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } status = dc_validate_global_state(dc, dm_state->context, true); @@ -11766,25 +11873,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, return ret; } -static bool is_dp_capable_without_timing_msa(struct dc *dc, - struct amdgpu_dm_connector *amdgpu_dm_connector) -{ - u8 dpcd_data; - bool capable = false; - - if (amdgpu_dm_connector->dc_link && - dm_helpers_dp_read_dpcd( - NULL, - amdgpu_dm_connector->dc_link, - DP_DOWN_STREAM_PORT_COUNT, - &dpcd_data, - sizeof(dpcd_data))) { - capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; - } - - return capable; -} - static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, unsigned int offset, unsigned int total_length, @@ -12087,8 +12175,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; - if (is_dp_capable_without_timing_msa(adev->dm.dc, - amdgpu_dm_connector)) { + if (amdgpu_dm_connector->dc_link && + amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; @@ -12170,7 +12258,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, } } - as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + if (amdgpu_dm_connector->dc_link) + as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); @@ -12194,6 +12283,12 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (dm_con_state) dm_con_state->freesync_capable = freesync_capable; + if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; + amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; + } + if (connector->vrr_capable_property) drm_connector_set_vrr_capable_property(connector, freesync_capable); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 2d7755e2b6c320..15d4690c74d60b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,7 +50,7 @@ #define AMDGPU_DM_MAX_NUM_EDP 2 -#define AMDGPU_DMUB_NOTIFICATION_MAX 6 +#define AMDGPU_DMUB_NOTIFICATION_MAX 7 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 99014339aaa390..a2cf2c066a76dd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -251,9 +251,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); - - DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + if (dm->active_vblank_irq_count > 0) { + DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n"); + dc_allow_idle_optimizations(dm->dc, false); + } /* * Control PSR based on vblank requirements from OS @@ -272,6 +273,11 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) vblank_work->stream->link->replay_settings.replay_feature_enabled); } + if (dm->active_vblank_irq_count == 0) { + DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n"); + dc_allow_idle_optimizations(dm->dc, true); + } + mutex_unlock(&dm->dc_lock); dc_stream_release(vblank_work->stream); @@ -286,11 +292,14 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct amdgpu_display_manager *dm = &adev->dm; struct vblank_control_work *work; + int irq_type; int rc = 0; if (acrtc->otg_inst == -1) goto skip; + irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); + if (enable) { /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_crtc_vrr_active(acrtc_state)) @@ -303,13 +312,52 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - rc = (enable) - ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) - : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); + /* crtc vblank or vstartup interrupt */ + if (enable) { + rc = amdgpu_irq_get(adev, &adev->crtc_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get crtc_irq ret=%d\n", rc); + } else { + rc = amdgpu_irq_put(adev, &adev->crtc_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put crtc_irq ret=%d\n", rc); + } if (rc) return rc; + /* + * hubp surface flip interrupt + * + * We have no guarantee that the frontend index maps to the same + * backend index - some even map to more than one. + * + * TODO: Use a different interrupt or check DC itself for the mapping. + */ + if (enable) { + rc = amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get pageflip_irq ret=%d\n", rc); + } else { + rc = amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put pageflip_irq ret=%d\n", rc); + } + + if (rc) + return rc; + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* crtc vline0 interrupt, only available on DCN+ */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) != 0) { + if (enable) { + rc = amdgpu_irq_get(adev, &adev->vline0_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get vline0_irq ret=%d\n", rc); + } else { + rc = amdgpu_irq_put(adev, &adev->vline0_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put vline0_irq ret=%d\n", rc); + } + + if (rc) + return rc; + } +#endif skip: if (amdgpu_in_reset(adev)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 62cb59f00929b8..db56b0aa545454 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3804,9 +3804,12 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { mutex_lock(&adev->dm.dc_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); + if (!ret) + DRM_ERROR("DM_MST: Failed to detect dc link!"); + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index b490ae67b6beb1..069e0195e50a42 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -73,6 +73,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): + DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_colorimetry = true; + break; default: return; } @@ -759,7 +763,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( uint8_t ret = 0; drm_dbg_dp(aux->drm_dev, - "Configure DSC to non-virtual dpcd synaptics\n"); + "MST_DSC Configure DSC to non-virtual dpcd synaptics\n"); if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, @@ -772,7 +776,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( apply_synaptics_fifo_reset_wa(aux); ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC enable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC enable to synaptics\n"); } else { /* Synaptics hub not support virtual dpcd, @@ -781,7 +785,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( */ if (!stream->link->link_status.link_active) { ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC disable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); } } @@ -823,14 +827,14 @@ bool dm_helpers_dp_write_dsc_enable( DP_DSC_ENABLE, &enable_passthrough, 1); drm_dbg_dp(dev, - "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", ret); } ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Sent DSC decoding enable to %s port, ret = %u\n", + "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); @@ -838,7 +842,7 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Sent DSC decoding disable to %s port, ret = %u\n", + "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); @@ -848,7 +852,7 @@ bool dm_helpers_dp_write_dsc_enable( DP_DSC_ENABLE, &enable_passthrough, 1); drm_dbg_dp(dev, - "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", ret); } } @@ -858,12 +862,12 @@ bool dm_helpers_dp_write_dsc_enable( if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Send DSC %s to SST RX\n", + "SST_DSC Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Send DSC %s to DP-HDMI PCON\n", + "SST_DSC Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); } } @@ -1286,3 +1290,15 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) return as_type; } + +bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream) +{ + // TODO + return false; +} + +bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream) +{ + // TODO + return false; +} \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 2e9f6da1acdcad..a08e8a0b696c60 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -253,7 +253,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; /* synaptics cascaded MST hub case */ - if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) aconnector->dsc_aux = port->mgr->aux; if (!aconnector->dsc_aux) @@ -578,6 +578,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, if (!aconnector) return NULL; + DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port); + connector = &aconnector->base; aconnector->mst_output_port = port; aconnector->mst_root = master; @@ -872,11 +874,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].sink) { if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && params[i].sink->sink_signal != SIGNAL_TYPE_NONE) - DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, + DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i, params[i].sink->edid_caps.display_name); } - DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", + DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n", params[i].timing->flags.DSC, params[i].timing->dsc_cfg.bits_per_pixel, vars[i + k].pbn); @@ -1025,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; int ret; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + int var_pbn; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -1054,26 +1057,37 @@ static int try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break; + DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); + var_pbn = vars[next_index].pbn; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); + vars[next_index].pbn = var_pbn; return ret; + } ret = drm_dp_mst_atomic_check(state); if (ret == 0) { + DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index); vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index); + vars[next_index].pbn = var_pbn; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); return ret; + } } tried[next_index] = true; @@ -1082,6 +1096,15 @@ static int try_disable_dsc(struct drm_atomic_state *state, return 0; } +static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k) +{ + int i; + + for (i = 0; i < count; i++) + DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n", + i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn); +} + static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, @@ -1104,6 +1127,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state); /* Set up params */ + DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -1132,7 +1156,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; - dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -1145,6 +1169,9 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(dc_link)); + DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n", + count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps, + params[count].bw_range.stream_kbps); count++; } @@ -1159,6 +1186,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, *link_vars_start_index += count; /* Try no compression */ + DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); @@ -1177,7 +1205,10 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return ret; } + log_dsc_params(count, vars, k); + /* Try max compression */ + DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); @@ -1201,14 +1232,26 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret != 0) return ret; + log_dsc_params(count, vars, k); + /* Optimize degree of compression */ + DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n"); ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n"); return ret; + } + + log_dsc_params(count, vars, k); + DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n"); ret = try_disable_dsc(state, dc_link, params, vars, count, k); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n"); return ret; + } + + log_dsc_params(count, vars, k); set_dsc_configs_from_fairness_vars(params, vars, count, k); @@ -1230,17 +1273,19 @@ static bool is_dsc_need_re_compute( /* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) - return false; + goto out; /* add a check for older MST DSC with no virtual DPCDs */ if (needs_dsc_aux_workaround(dc_link) && (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) - return false; + goto out; for (i = 0; i < MAX_PIPES; i++) stream_on_link[i] = NULL; + DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count); + /* check if there is mode change in new request */ for (i = 0; i < dc_state->stream_count; i++) { struct drm_crtc_state *new_crtc_state; @@ -1250,6 +1295,8 @@ static bool is_dsc_need_re_compute( if (!stream) continue; + DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream); + /* check if stream using the same link for mst */ if (stream->link != dc_link) continue; @@ -1262,8 +1309,11 @@ static bool is_dsc_need_re_compute( new_stream_on_link_num++; new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); - if (!new_conn_state) + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + } if (IS_ERR(new_conn_state)) continue; @@ -1272,21 +1322,36 @@ static bool is_dsc_need_re_compute( continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); - if (!new_crtc_state) + if (!new_crtc_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + } if (IS_ERR(new_crtc_state)) continue; if (new_crtc_state->enable && new_crtc_state->active) { if (new_crtc_state->mode_changed || new_crtc_state->active_changed || - new_crtc_state->connectors_changed) - return true; + new_crtc_state->connectors_changed) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." + "stream 0x%p in new dc_state\n", + __func__, __LINE__, stream); + is_dsc_need_re_compute = true; + goto out; + } } } - if (new_stream_on_link_num == 0) - return false; + if (new_stream_on_link_num == 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n", + __func__, __LINE__); + is_dsc_need_re_compute = false; + goto out; + } + + DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n", + __func__, dc->current_state->stream_count); /* check current_state if there stream on link but it is not in * new request state @@ -1310,11 +1375,18 @@ static bool is_dsc_need_re_compute( if (j == new_stream_on_link_num) { /* not in new state */ + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." + "stream 0x%p in current dc_state but not in new dc_state\n", + __func__, __LINE__, stream); is_dsc_need_re_compute = true; break; } } +out: + DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n", + __func__, is_dsc_need_re_compute ? "required" : "not required"); + return is_dsc_need_re_compute; } @@ -1343,6 +1415,9 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n", + __func__, stream, aconnector); + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; @@ -1375,8 +1450,11 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, stream = dc_state->streams[i]; if (stream->timing.flags.DSC == 1) - if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n", + __func__, __LINE__, stream); return -EINVAL; + } } return ret; @@ -1405,6 +1483,9 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n", + i, stream, aconnector); + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; @@ -1494,12 +1575,12 @@ int pre_validate_dsc(struct drm_atomic_state *state, int ret = 0; if (!is_dsc_precompute_needed(state)) { - DRM_INFO_ONCE("DSC precompute is not needed.\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__); return 0; } ret = dm_atomic_get_state(state, dm_state_ptr); if (ret != 0) { - DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__); return ret; } dm_state = *dm_state_ptr; @@ -1553,7 +1634,8 @@ int pre_validate_dsc(struct drm_atomic_state *state, ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); if (ret != 0) { - DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n", + __func__, __LINE__); ret = -EINVAL; goto clean_exit; } @@ -1567,12 +1649,15 @@ int pre_validate_dsc(struct drm_atomic_state *state, if (local_dc_state->streams[i] && dc_is_timing_changed(stream, local_dc_state->streams[i])) { - DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); + DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream); - if (ind >= 0) + if (ind >= 0) { + DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n", + __func__, __LINE__, stream); state->crtcs[ind].new_state->mode_changed = 0; + } } } clean_exit: @@ -1605,7 +1690,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, { struct dc_dsc_policy dsc_policy = {0}; - dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, @@ -1697,7 +1782,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); if (stream_kbps <= end_to_end_bw_in_kbps) { - DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient."); + DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n"); return DC_OK; } @@ -1710,7 +1795,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( /*capable of dsc passthough. dsc bitstream along the entire path*/ if (aconnector->mst_output_port->passthrough_aux) { if (bw_range.min_kbps > end_to_end_bw_in_kbps) { - DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint" + "Max dsc compression bw can't fit into end-to-end bw\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } else { @@ -1721,7 +1807,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( /*Get last DP link BW capability*/ if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) { if (stream_kbps > end_link_bw) { - DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + "Mode required bw can't fit into last link\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } @@ -1734,7 +1821,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { - DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + "Max dsc compression can't fit into MST available bw\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } @@ -1751,9 +1839,9 @@ enum dc_status dm_dp_mst_is_port_support_mode( dc_link_get_highest_encoding_format(stream->link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("Require dsc and dsc config found\n"); + DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n"); } else { - DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n"); + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } @@ -1775,11 +1863,11 @@ enum dc_status dm_dp_mst_is_port_support_mode( if (branch_max_throughput_mps != 0 && ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) { - DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails"); + DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } else { - DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config."); + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 5cb11cc2d06368..495e3cd70426db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -961,6 +961,7 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, else domain = AMDGPU_GEM_DOMAIN_VRAM; + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) @@ -1283,6 +1284,7 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(plane->dev); int x, y; int xorigin = 0, yorigin = 0; @@ -1314,12 +1316,14 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc y = 0; } position->enable = true; - position->translate_by_source = true; position->x = x; position->y = y; position->x_hotspot = xorigin; position->y_hotspot = yorigin; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1)) + position->translate_by_source = true; + return 0; } @@ -1377,7 +1381,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, adev->dm.dc->caps.color.dpp.gamma_corr) attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; - attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + if (afb) + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; if (crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 08c494a7a21bad..0d5fefb0f5917c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -114,6 +114,7 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector domain = amdgpu_display_supported_domains(adev, rbo->flags); + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 80069651def3ea..8992e697759f90 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -35,7 +35,6 @@ DC_LIBS += dcn201 DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn31 -DC_LIBS += dcn314 DC_LIBS += dml DC_LIBS += dml2 endif diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index e47e9db062f44b..681799468487c7 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -569,7 +569,7 @@ static void calculate_bandwidth( break; } data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1)); - /*clamp the partitions to the maxium number supported by the lb*/ + /* clamp the partitions to the maximum number supported by the lb */ if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { data->lb_partitions_max[i] = bw_int_to_fixed(10); } diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index 506f82cd5cc67a..88d3f9d7dd556a 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -486,3 +486,30 @@ int dc_fixpt_s4d19(struct fixed31_32 arg) else return ux_dy(arg.value, 4, 19); } + +struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value, + unsigned int integer_bits, + unsigned int fractional_bits) +{ + struct fixed31_32 fixpt_value = dc_fixpt_zero; + struct fixed31_32 fixpt_int_value = dc_fixpt_zero; + long long frac_mask = ((long long)1 << (long long)integer_bits) - 1; + + fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + frac_mask = frac_mask << fractional_bits; + fixpt_int_value.value = value & frac_mask; + fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + fixpt_value.value |= fixpt_int_value.value; + return fixpt_value; +} + +struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value, + unsigned int frac_value, + unsigned int integer_bits, + unsigned int fractional_bits) +{ + struct fixed31_32 fixpt_value = dc_fixpt_from_int(int_value); + + fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + return fixpt_value; +} diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 4254bdfefe38c2..7d18f372ce7ab4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -227,7 +227,7 @@ static void init_transmitter_control(struct bios_parser *bp) uint8_t frev; uint8_t crev = 0; - if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev)) + if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) && (bp->base.ctx->dc->ctx->dce_version <= DCN_VERSION_2_0)) BREAK_TO_DEBUGGER(); switch (crev) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index f770828df14937..0e243f4344d050 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -59,6 +59,7 @@ int clk_mgr_helper_get_active_display_cnt( display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; + const struct dc_stream_status *stream_status = &context->stream_status[i]; /* Don't count SubVP phantom pipes as part of active * display count @@ -66,13 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - /* - * Only notify active stream or virtual stream. - * Need to notify virtual stream to work around - * headless case. HPD does not fire when system is in - * S0i2. - */ - if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) + if (!stream->dpms_off || (stream_status && stream_status->plane_count)) display_count++; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 78df96882d6ec5..f8409453434c1c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -195,7 +195,7 @@ void dce11_pplib_apply_display_requirements( * , then change minimum memory clock based on real-time bandwidth * limitation. */ - if ((dc->ctx->asic_id.chip_family == FAMILY_AI) && + if (dc->bw_vbios && (dc->ctx->asic_id.chip_family == FAMILY_AI) && ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) { pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz, (uint32_t) div64_s64( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 70ee0089a20dfe..b46a3afe48ca7c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -120,25 +120,40 @@ static int dcn35_get_active_display_cnt_wa( return display_count; } - static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; + if (dc->ctx->dce_environment == DCE_ENV_DIAG) + return; + for (i = 0; i < dc->res_pool->pipe_count; ++i) { + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = safe_to_lower ? &context->res_ctx.pipe_ctx[i] : &dc->current_state->res_ctx.pipe_ctx[i]; - + bool stream_changed_otg_dig_on = false; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; + stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream && + old_pipe->stream != new_pipe->stream && + old_pipe->stream_res.tg == new_pipe->stream_res.tg && + new_pipe->stream->link_enc && !new_pipe->stream->dpms_off && + new_pipe->stream->link_enc->funcs->is_dig_enabled && + new_pipe->stream->link_enc->funcs->is_dig_enabled( + new_pipe->stream->link_enc) && + new_pipe->stream_res.stream_enc && + new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && + new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || - !pipe->stream->link_enc)) { + !pipe->stream->link_enc) && !stream_changed_otg_dig_on) { + /* This w/a should not trigger when we have a dig active */ if (disable) { - if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->disable_crtc) - pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); } else { @@ -367,6 +382,9 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz) + new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz; + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); @@ -1082,7 +1100,7 @@ void dcn35_clk_mgr_construct( clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, - DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + DC_MEM_ALLOC_TYPE_GART, sizeof(struct dcn35_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); @@ -1094,7 +1112,7 @@ void dcn35_clk_mgr_construct( smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, - DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + DC_MEM_ALLOC_TYPE_GART, sizeof(DpmClocks_t_dcn35), &smu_dpm_clks.mc_address.quad_part); @@ -1191,7 +1209,7 @@ void dcn35_clk_mgr_construct( } if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) - dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, + dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART, smu_dpm_clks.dpm_clks); if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { @@ -1204,6 +1222,12 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_dpp_power_gate = false; ctx->dc->debug.disable_hubp_power_gate = false; ctx->dc->debug.disable_dsc_power_gate = false; + + /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */ + if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE && + ctx->dce_version == DCN_VERSION_3_5 && + ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00)) + ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; } else { /*let's reset the config control flag*/ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 45fe17a4689099..8cfc5f4359374d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -14,6 +14,7 @@ #include "core_types.h" #include "dm_helpers.h" #include "link.h" +#include "dc_state_priv.h" #include "atomfirmware.h" #include "dcn401_smu14_driver_if.h" @@ -29,6 +30,7 @@ #define mmCLK01_CLK0_CLK2_DFS_CNTL 0x16E6F #define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E72 #define mmCLK01_CLK0_CLK4_DFS_CNTL 0x16E75 +#define mmCLK20_CLK2_CLK2_DFS_CNTL 0x1B051 #define CLK0_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL #define CLK0_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL @@ -302,6 +304,197 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_build_wm_range_table(clk_mgr_base); } +static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, + struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t dprefclk_did = 0; + uint32_t dcfclk_did = 0; + uint32_t dtbclk_did = 0; + uint32_t dispclk_did = 0; + uint32_t dppclk_did = 0; + uint32_t fclk_did = 0; + uint32_t target_div = 0; + + /* DFS Slice 0 is used for DISPCLK */ + dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL); + /* DFS Slice 1 is used for DPPCLK */ + dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL); + /* DFS Slice 2 is used for DPREFCLK */ + dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL); + /* DFS Slice 3 is used for DCFCLK */ + dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL); + /* DFS Slice 4 is used for DTBCLK */ + dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL); + /* DFS Slice _ is used for FCLK */ + fclk_did = REG_READ(CLK2_CLK2_DFS_CNTL); + + /* Convert DISPCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dispclk_did); + //Get dispclk in khz + regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DISPCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dppclk_did); + //Get dppclk in khz + regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DPREFCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dprefclk_did); + //Get dprefclk in khz + regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DCFCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dcfclk_did); + //Get dcfclk in khz + regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DTBCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(dtbclk_did); + //Get dtbclk in khz + regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + /* Convert DTBCLK DFS Slice DID to divider*/ + target_div = dentist_get_divider_from_did(fclk_did); + //Get fclk in khz + regs_and_bypass->fclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; +} + +static bool dcn401_check_native_scaling(struct pipe_ctx *pipe) +{ + bool is_native_scaling = false; + int width = pipe->plane_state->src_rect.width; + int height = pipe->plane_state->src_rect.height; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +static void dcn401_auto_dpm_test_log( + struct dc_clocks *new_clocks, + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) +{ + unsigned int mall_ss_size_bytes; + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; + + struct pipe_ctx *pipe_ctx_list[MAX_PIPES]; + int active_pipe_count = 0; + + for (int i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { + pipe_ctx_list[active_pipe_count] = pipe_ctx; + active_pipe_count++; + } + } + + msleep(5); + + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; + + struct clk_log_info log_info = {0}; + struct clk_state_registers_and_bypass clk_register_dump; + + dcn401_dump_clk_registers(&clk_register_dump, &clk_mgr->base, &log_info); + + // Overrides for these clocks in case there is no p_state change support + dramclk_khz_override = new_clocks->dramclk_khz; + fclk_khz_override = new_clocks->fclk_khz; + + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; + + if (!new_clocks->p_state_change_support) + dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; + + if (!new_clocks->fclk_p_state_change_support) + fclk_khz_override = clk_mgr->base.bw_params->clk_table.entries[num_fclk_levels].fclk_mhz * 1000; + + + //////////////////////////////////////////////////////////////////////////// + // IMPORTANT: When adding more clocks to these logs, do NOT put a newline + // anywhere other than at the very end of the string. + // + // Formatting example (make sure to have " - " between each entry): + // + // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" + //////////////////////////////////////////////////////////////////////////// + if (active_pipe_count > 0 && + new_clocks->dramclk_khz > 0 && + new_clocks->fclk_khz > 0 && + new_clocks->dcfclk_khz > 0 && + new_clocks->dppclk_khz > 0) { + + uint32_t pix_clk_list[MAX_PIPES] = {0}; + int p_state_list[MAX_PIPES] = {0}; + int disp_src_width_list[MAX_PIPES] = {0}; + int disp_src_height_list[MAX_PIPES] = {0}; + uint64_t disp_src_refresh_list[MAX_PIPES] = {0}; + bool is_scaled_list[MAX_PIPES] = {0}; + + for (int i = 0; i < active_pipe_count; i++) { + struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i]; + uint64_t refresh_rate; + + pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz; + p_state_list[i] = curr_pipe_ctx->p_state_type; + + refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + + curr_pipe_ctx->stream->timing.v_total + * (uint64_t) curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); + disp_src_refresh_list[i] = refresh_rate; + + if (curr_pipe_ctx->plane_state) { + is_scaled_list[i] = !(dcn401_check_native_scaling(curr_pipe_ctx)); + disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width; + disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height; + } + } + + DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - " + "dcfclk:%d - dppclk:%d - dispclk_hw:%d - " + "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - " + "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - " + "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - " + "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - " + "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - " + "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - " + "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - " + "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n", + dramclk_khz_override, + fclk_khz_override, + new_clocks->dcfclk_khz, + new_clocks->dppclk_khz, + clk_register_dump.dispclk, + clk_register_dump.dppclk, + clk_register_dump.dprefclk, + clk_register_dump.dcfclk, + clk_register_dump.dtbclk, + clk_register_dump.fclk, + pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2], + mall_ss_size_bytes, + p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3], + disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0], + disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1], + disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2], + disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]); + } +} + static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dtbclk_khz) @@ -324,10 +517,12 @@ static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr if (!use_hpo_encoder) continue; - otg_master->clock_source->funcs->program_pix_clk( + if (otg_master->stream_res.pix_clk_params.controller_id > CONTROLLER_ID_UNDEFINED) + otg_master->clock_source->funcs->program_pix_clk( otg_master->clock_source, &otg_master->stream_res.pix_clk_params, - dccg->ctx->dc->link_srv->dp_get_encoding_format(&otg_master->link_config.dp_link_settings), + dccg->ctx->dc->link_srv->dp_get_encoding_format( + &otg_master->link_config.dp_link_settings), &otg_master->pll_settings); } } @@ -738,12 +933,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned static unsigned int dcn401_build_update_bandwidth_clocks_sequence( struct clk_mgr *clk_mgr_base, struct dc_state *context, + struct dc_clocks *new_clocks, bool safe_to_lower) { struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal); struct dc *dc = clk_mgr_base->ctx->dc; - struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence; bool enter_display_off = false; bool update_active_fclk = false; @@ -1025,13 +1220,13 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence( static unsigned int dcn401_build_update_display_clocks_sequence( struct clk_mgr *clk_mgr_base, struct dc_state *context, + struct dc_clocks *new_clocks, bool safe_to_lower) { struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal); struct dc *dc = clk_mgr_base->ctx->dc; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence; bool force_reset = false; bool update_dispclk = false; @@ -1171,9 +1366,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base, unsigned int num_steps = 0; - if (dc->work_arounds.skip_clock_update) - return; - if (dc->debug.enable_legacy_clock_update) { dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower); return; @@ -1182,6 +1374,7 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base, /* build bandwidth related clocks update sequence */ num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base, context, + &context->bw_ctx.bw.dcn.clk, safe_to_lower); /* execute sequence */ @@ -1190,10 +1383,15 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base, /* build display related clocks update sequence */ num_steps = dcn401_build_update_display_clocks_sequence(clk_mgr_base, context, + &context->bw_ctx.bw.dcn.clk, safe_to_lower); /* execute sequence */ dcn401_execute_block_sequence(clk_mgr_base, num_steps); + + if (dc->config.enable_auto_dpm_test_logs) + dcn401_auto_dpm_test_log(&context->bw_ctx.bw.dcn.clk, TO_CLK_MGR_INTERNAL(clk_mgr_base), context); + } @@ -1218,59 +1416,6 @@ static uint32_t dcn401_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_m return dc_fixpt_floor(pll_req); } -static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, - struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - uint32_t dprefclk_did = 0; - uint32_t dcfclk_did = 0; - uint32_t dtbclk_did = 0; - uint32_t dispclk_did = 0; - uint32_t dppclk_did = 0; - uint32_t target_div = 0; - - /* DFS Slice 0 is used for DISPCLK */ - dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL); - /* DFS Slice 1 is used for DPPCLK */ - dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL); - /* DFS Slice 2 is used for DPREFCLK */ - dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL); - /* DFS Slice 3 is used for DCFCLK */ - dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL); - /* DFS Slice 4 is used for DTBCLK */ - dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL); - - /* Convert DISPCLK DFS Slice DID to divider*/ - target_div = dentist_get_divider_from_did(dispclk_did); - //Get dispclk in khz - regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - - /* Convert DISPCLK DFS Slice DID to divider*/ - target_div = dentist_get_divider_from_did(dppclk_did); - //Get dppclk in khz - regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - - /* Convert DPREFCLK DFS Slice DID to divider*/ - target_div = dentist_get_divider_from_did(dprefclk_did); - //Get dprefclk in khz - regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - - /* Convert DCFCLK DFS Slice DID to divider*/ - target_div = dentist_get_divider_from_did(dcfclk_did); - //Get dcfclk in khz - regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; - - /* Convert DTBCLK DFS Slice DID to divider*/ - target_div = dentist_get_divider_from_did(dtbclk_did); - //Get dtbclk in khz - regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; -} - static void dcn401_clock_read_ss_info(struct clk_mgr_internal *clk_mgr) { struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; @@ -1330,33 +1475,34 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base) static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + const struct dc *dc = clk_mgr->base.ctx->dc; + struct dc_state *context = dc->current_state; + struct dc_clocks new_clocks; + int num_steps; if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK)) return; + /* build clock update */ + memcpy(&new_clocks, &clk_mgr_base->clks, sizeof(struct dc_clocks)); + if (current_mode) { - if (clk_mgr_base->clks.p_state_change_support) - dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); - else - dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->max_memclk_mhz); + new_clocks.dramclk_khz = context->bw_ctx.bw.dcn.clk.dramclk_khz; + new_clocks.idle_dramclk_khz = context->bw_ctx.bw.dcn.clk.idle_dramclk_khz; + new_clocks.p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; } else { - dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz); + new_clocks.dramclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz * 1000; + new_clocks.idle_dramclk_khz = new_clocks.dramclk_khz; + new_clocks.p_state_change_support = true; } -} - -/* Set max memclk to highest DPM value */ -static void dcn401_set_hard_max_memclk(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK)) - return; + num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base, + context, + &new_clocks, + true); - dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->max_memclk_mhz); + /* execute sequence */ + dcn401_execute_block_sequence(clk_mgr_base, num_steps); } /* Get current memclk states, update bounding box */ @@ -1487,7 +1633,6 @@ static struct clk_mgr_funcs dcn401_funcs = { .init_clocks = dcn401_init_clocks, .notify_wm_ranges = dcn401_notify_wm_ranges, .set_hard_min_memclk = dcn401_set_hard_min_memclk, - .set_hard_max_memclk = dcn401_set_hard_max_memclk, .get_memclk_states_from_smu = dcn401_get_memclk_states_from_smu, .are_clock_states_equal = dcn401_are_clock_states_equal, .enable_pme_wa = dcn401_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 85a2ef82afa535..5c39390ecbd5c1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1254,7 +1254,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); if (pipe->stream && pipe->plane_state) { - set_p_state_switch_method(dc, context, pipe); + if (!dc->debug.using_dml2) + set_p_state_switch_method(dc, context, pipe); dc_update_visual_confirm_color(dc, context, pipe); } @@ -1351,80 +1352,6 @@ static void disable_vbios_mode_if_required( } } -/** - * wait_for_blank_complete - wait for all active OPPs to finish pending blank - * pattern updates - * - * @dc: [in] dc reference - * @context: [in] hardware context in use - */ -static void wait_for_blank_complete(struct dc *dc, - struct dc_state *context) -{ - struct pipe_ctx *opp_head; - struct dce_hwseq *hws = dc->hwseq; - int i; - - if (!hws->funcs.wait_for_blank_complete) - return; - - for (i = 0; i < MAX_PIPES; i++) { - opp_head = &context->res_ctx.pipe_ctx[i]; - - if (!resource_is_pipe_type(opp_head, OPP_HEAD) || - dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) - continue; - - hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); - } -} - -static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) -{ - struct pipe_ctx *otg_master; - struct timing_generator *tg; - int i; - - for (i = 0; i < MAX_PIPES; i++) { - otg_master = &context->res_ctx.pipe_ctx[i]; - if (!resource_is_pipe_type(otg_master, OTG_MASTER) || - dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) - continue; - tg = otg_master->stream_res.tg; - if (tg->funcs->wait_odm_doublebuffer_pending_clear) - tg->funcs->wait_odm_doublebuffer_pending_clear(tg); - } - - /* ODM update may require to reprogram blank pattern for each OPP */ - wait_for_blank_complete(dc, context); -} - -static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) -{ - int i; - PERF_TRACE(); - for (i = 0; i < MAX_PIPES; i++) { - int count = 0; - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) - continue; - - /* Timeout 100 ms */ - while (count < 100000) { - /* Must set to false to start with, due to OR in update function */ - pipe->plane_state->status.is_flip_pending = false; - dc->hwss.update_pending_status(pipe); - if (!pipe->plane_state->status.is_flip_pending) - break; - udelay(1); - count++; - } - ASSERT(!pipe->plane_state->status.is_flip_pending); - } - PERF_TRACE(); -} - /* Public functions */ struct dc *dc_create(const struct dc_init_data *init_params) @@ -1822,17 +1749,25 @@ bool dc_validate_boot_timing(const struct dc *dc, tg->funcs->get_optc_source(tg, &numOdmPipes, &id_src[0], &id_src[1]); - if (numOdmPipes == 2) + if (numOdmPipes == 2) { pix_clk_100hz *= 2; - if (numOdmPipes == 4) + } else if (numOdmPipes == 4) { pix_clk_100hz *= 4; + } else if (se && se->funcs->get_pixels_per_cycle) { + uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); + + if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) + return false; + + pix_clk_100hz *= pixels_per_cycle; + } // Note: In rare cases, HW pixclk may differ from crtc's pixclk // slightly due to rounding issues in 10 kHz units. if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; - if (!se->funcs->dp_get_pixel_format) + if (!se || !se->funcs->dp_get_pixel_format) return false; if (!se->funcs->dp_get_pixel_format( @@ -2100,12 +2035,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ - wait_for_no_pipes_pending(dc, context); + hwss_wait_for_no_pipes_pending(dc, context); /* * optimized dispclk depends on ODM setup. Need to wait for ODM * update pending complete before optimizing bandwidth. */ - wait_for_odm_update_pending_complete(dc, context); + hwss_wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); /* Need to do otg sync again as otg could be out of sync due to otg @@ -2441,7 +2376,7 @@ static bool is_surface_in_context( return false; } -static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) +static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; enum surface_update_type update_type = UPDATE_TYPE_FAST; @@ -2520,7 +2455,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa /* todo: below are HW dependent, we should add a hook to * DCE/N resource and validated there. */ - if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { + if (!dc->debug.skip_full_updated_if_possible) { /* swizzled mode requires RQ to be setup properly, * thus need to run DML to calculate RQ settings */ @@ -2612,7 +2547,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->raw = 0; // Reset all flags - type = get_plane_info_update_type(u); + type = get_plane_info_update_type(dc, u); elevate_update_type(&overall_type, type); type = get_scaling_info_update_type(dc, u); @@ -2661,6 +2596,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, UPDATE_TYPE_MED); } + if (u->sdr_white_level_nits) + if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { + update_flags->bits.sdr_white_level_nits = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + } + if (u->cm2_params) { if ((u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting) @@ -2716,6 +2657,10 @@ static enum surface_update_type check_update_surfaces_for_stream( overall_type = UPDATE_TYPE_FULL; } + if (stream_update && stream_update->hw_cursor_req) { + overall_type = UPDATE_TYPE_FULL; + } + /* some stream updates require passive update */ if (stream_update) { union stream_update_flags *su_flags = &stream_update->stream->update_flags; @@ -2751,6 +2696,9 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) su_flags->bits.fams_changed = 1; + if (stream_update->scaler_sharpener_update) + su_flags->bits.scaler_sharpener = 1; + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2934,6 +2882,10 @@ static void copy_surface_update_to_plane( surface->hdr_mult = srf_update->hdr_mult; + if (srf_update->sdr_white_level_nits) + surface->sdr_white_level_nits = + srf_update->sdr_white_level_nits; + if (srf_update->blend_tf) memcpy(&surface->blend_tf, srf_update->blend_tf, sizeof(surface->blend_tf)); @@ -3011,6 +2963,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_infopacket) stream->vrr_infopacket = *update->vrr_infopacket; + if (update->hw_cursor_req) + stream->hw_cursor_req = *update->hw_cursor_req; + if (update->allow_freesync) stream->allow_freesync = *update->allow_freesync; @@ -3080,6 +3035,8 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config = NULL; } } + if (update->scaler_sharpener_update) + stream->scaler_sharpener_update = *update->scaler_sharpener_update; } static void backup_planes_and_stream_state( @@ -3704,7 +3661,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state) { - set_p_state_switch_method(dc, context, pipe); + if (!dc->debug.using_dml2) + set_p_state_switch_method(dc, context, pipe); if (dc->debug.visual_confirm) dc_update_visual_confirm_color(dc, context, pipe); @@ -3739,7 +3697,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, surface_count, stream, context); - } else { + } else if (stream_status) { build_dmub_cmd_list(dc, srf_updates, surface_count, @@ -3769,47 +3727,6 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) -{ -/* - * This function calls HWSS to wait for any potentially double buffered - * operations to complete. It should be invoked as a pre-amble prior - * to full update programming before asserting any HW locks. - */ - int pipe_idx; - int opp_inst; - int opp_count = dc->res_pool->res_cap->num_opp; - struct hubp *hubp; - int mpcc_inst; - const struct pipe_ctx *pipe_ctx; - - for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { - pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; - - if (!pipe_ctx->stream) - continue; - - if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) - pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); - - hubp = pipe_ctx->plane_res.hubp; - if (!hubp) - continue; - - mpcc_inst = hubp->inst; - // MPCC inst is equal to pipe index in practice - for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { - if ((dc->res_pool->opps[opp_inst] != NULL) && - (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) { - dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); - dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; - break; - } - } - } - wait_for_odm_update_pending_complete(dc, dc_context); -} - static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -3833,13 +3750,14 @@ static void commit_planes_for_stream(struct dc *dc, dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) - wait_for_outstanding_hw_updates(dc, context); + hwss_process_outstanding_hw_updates(dc, dc->current_state); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->plane_state) { - set_p_state_switch_method(dc, context, pipe); + if (!dc->debug.using_dml2) + set_p_state_switch_method(dc, context, pipe); if (dc->debug.visual_confirm) dc_update_visual_confirm_color(dc, context, pipe); @@ -4127,7 +4045,8 @@ static void commit_planes_for_stream(struct dc *dc, } if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { top_pipe_to_program->stream_res.tg->funcs->wait_for_state( top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); @@ -4335,7 +4254,8 @@ static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, dc->debug.force_disable_subvp = true; for (i = 0; i < context->stream_count; i++) { policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; - context->streams[i]->debug.force_odm_combine_segments = 0; + if (context->streams[i]->debug.allow_transition_for_forced_odm) + context->streams[i]->debug.force_odm_combine_segments = 0; } } @@ -4686,7 +4606,7 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } -static void populate_fast_updates(struct dc_fast_update *fast_update, +void populate_fast_updates(struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update) @@ -4696,6 +4616,9 @@ static void populate_fast_updates(struct dc_fast_update *fast_update, if (stream_update) { fast_update[0].out_transfer_func = stream_update->out_transfer_func; fast_update[0].output_csc_transform = stream_update->output_csc_transform; + } else { + fast_update[0].out_transfer_func = NULL; + fast_update[0].output_csc_transform = NULL; } for (i = 0; i < surface_count; i++) { @@ -4729,6 +4652,26 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c return false; } +bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) +{ + int i; + + if (fast_update[0].out_transfer_func || + fast_update[0].output_csc_transform) + return true; + + for (i = 0; i < surface_count; i++) { + if (fast_update[i].input_csc_color_matrix || + fast_update[i].gamma || + fast_update[i].gamut_remap_matrix || + fast_update[i].coeff_reduction_factor || + fast_update[i].cursor_csc_color_matrix) + return true; + } + + return false; +} + static bool full_update_required(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -4746,6 +4689,8 @@ static bool full_update_required(struct dc *dc, srf_updates[i].scaling_info || (srf_updates[i].hdr_mult.value && srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || + (srf_updates[i].sdr_white_level_nits && + srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || @@ -4785,7 +4730,8 @@ static bool full_update_required(struct dc *dc, stream_update->func_shaper || stream_update->lut3d_func || stream_update->pending_test_pattern || - stream_update->crtc_timing_adjust)) + stream_update->crtc_timing_adjust || + stream_update->scaler_sharpener_update)) return true; if (stream) { @@ -5233,6 +5179,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) dc_z10_restore(dc); + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); + dc->hwss.init_hw(dc); if (dc->hwss.init_sys_ctx != NULL && @@ -5244,6 +5192,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) default: ASSERT(dc->current_state->stream_count == 0); + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); + dc_state_destruct(dc->current_state); break; @@ -5382,7 +5332,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const if (allow == dc->idle_optimizations_allowed) return; - if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) + if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && + dc->hwss.apply_idle_power_optimizations(dc, allow)) dc->idle_optimizations_allowed = allow; } @@ -5451,9 +5402,10 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl hubp->funcs->set_blank_regs(hubp, true); } } - - dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); - dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); + if (dc->clk_mgr->funcs->set_max_memclk) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); + if (dc->clk_mgr->funcs->set_min_memclk) + dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -5502,7 +5454,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { if (p_state_change_support) { - if (funcMin <= softMax) + if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); // else: No-Op } else { @@ -5512,7 +5464,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) } } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { if (p_state_change_support) { - if (funcMin <= softMax) + if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); // else: No-Op } else { @@ -5563,6 +5515,9 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) */ bool dc_is_dmub_outbox_supported(struct dc *dc) { + if (!dc->caps.dmcub_support) + return false; + switch (dc->ctx->asic_id.chip_family) { case FAMILY_YELLOW_CARP: @@ -5800,6 +5755,27 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, return DC_OK; } +/** + * dc_process_dmub_dpia_set_tps_notification - Submits tps notification + * + * @dc: [in] dc structure + * @link_index: [in] link index + * @tps: [in] request tps + * + * Submits set_tps_notification command to dmub via inbox message + */ +void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) +{ + union dmub_rb_cmd cmd = {0}; + + cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; + cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; + cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; + cmd.set_tps_notification.tps_notification.tps = tps; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + /** * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption * diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 87e36d51c56d80..7ee2be8f82c467 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -636,57 +636,59 @@ void hwss_build_fast_sequence(struct dc *dc, while (current_pipe) { current_mpc_pipe = current_pipe; while (current_mpc_pipe) { - if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) { - block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; - block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; - (*num_steps)++; - } - if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { - block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; - block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; - block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; - (*num_steps)++; - } - if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { - if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && - stream_status->mall_stream_config.type == SUBVP_MAIN) { - block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; - block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address; - block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index; - block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR; + if (current_mpc_pipe->plane_state) { + if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; + block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*num_steps)++; + } + if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) { + block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc; + block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips; + block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*num_steps)++; + } + if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { + if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && + stream_status->mall_stream_config.type == SUBVP_MAIN) { + block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; + block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address; + block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index; + block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR; + (*num_steps)++; + } + + block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; + block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; (*num_steps)++; } - block_sequence[*num_steps].params.update_plane_addr_params.dc = dc; - block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR; - (*num_steps)++; - } - - if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { - block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; - block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; - block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; - (*num_steps)++; - } + if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) { + block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc; + block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state; + block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*num_steps)++; + } - if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { - block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; - (*num_steps)++; - } - if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { - block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].func = DPP_SETUP_DPP; - (*num_steps)++; - } - if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { - block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; - block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; - (*num_steps)++; + if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) { + block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) { + block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_SETUP_DPP; + (*num_steps)++; + } + if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) { + block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*num_steps)++; + } } if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) { block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc; @@ -901,12 +903,12 @@ void hwss_program_bias_and_scale(union block_sequence_params *params) struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; - struct dc_bias_and_scale bns_params = {0}; + struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; //TODO :for CNVC set scale and bias registers if necessary - build_prescale_params(&bns_params, plane_state); - if (dpp->funcs->dpp_program_bias_and_scale) + if (dpp->funcs->dpp_program_bias_and_scale) { dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); + } } void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) @@ -976,3 +978,126 @@ void get_surface_tile_visual_confirm_color( break; } } + +/** + * hwss_wait_for_all_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +void hwss_wait_for_all_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + hwss_wait_for_all_blank_complete(dc, context); +} + +void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) +{ + int i; + for (i = 0; i < MAX_PIPES; i++) { + int count = 0; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + /* Timeout 100 ms */ + while (count < 100000) { + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (!pipe->plane_state->status.is_flip_pending) + break; + udelay(1); + count++; + } + ASSERT(!pipe->plane_state->status.is_flip_pending); + } +} + +void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) +{ +/* + * This function calls HWSS to wait for any potentially double buffered + * operations to complete. It should be invoked as a pre-amble prior + * to full update programming before asserting any HW locks. + */ + int pipe_idx; + int opp_inst; + int opp_count = dc->res_pool->res_cap->num_opp; + struct hubp *hubp; + int mpcc_inst; + const struct pipe_ctx *pipe_ctx; + + for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { + pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) + pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + + hubp = pipe_ctx->plane_res.hubp; + if (!hubp) + continue; + + mpcc_inst = hubp->inst; + // MPCC inst is equal to pipe index in practice + for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { + if ((dc->res_pool->opps[opp_inst] != NULL) && + (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) { + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; + break; + } + } + } + hwss_wait_for_odm_update_pending_complete(dc, dc_context); +} + +void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) +{ + /* wait for outstanding updates */ + hwss_wait_for_outstanding_hw_updates(dc, dc_context); + + /* perform outstanding post update programming */ + if (dc->hwss.program_outstanding_updates) + dc->hwss.program_outstanding_updates(dc, dc_context); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index bcb5267b5a6bc2..c7599c40d4be38 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -342,11 +342,6 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; - if (dc->debug.using_dml2) - if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq) - res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); } else ASSERT_CRITICAL(false); } @@ -1511,8 +1506,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; - spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active; - spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active; // Convert pipe_ctx to respective input params for SPL translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in); @@ -3241,6 +3234,8 @@ static bool are_stream_backends_same( bool dc_is_stream_unchanged( struct dc_stream_state *old_stream, struct dc_stream_state *stream) { + if (!old_stream || !stream) + return false; if (!are_stream_backends_same(old_stream, stream)) return false; @@ -3771,8 +3766,10 @@ static bool planes_changed_for_existing_stream(struct dc_state *context, } } - if (!stream_status) + if (!stream_status) { ASSERT(0); + return false; + } for (i = 0; i < set_count; i++) if (set[i].stream == stream) @@ -5164,7 +5161,7 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy( sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; if (sec_pipe->stream->timing.flags.DSC == 1) { #if defined(CONFIG_DRM_AMD_DC_FP) - dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); + dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, sec_pipe->stream_res.opp->inst); #endif ASSERT(sec_pipe->stream_res.dsc); if (sec_pipe->stream_res.dsc == NULL) @@ -5271,3 +5268,44 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes; dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes; } + +/* Returns number of DET segments allocated for a given OTG_MASTER pipe */ +int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + struct pipe_ctx *dpp_pipes[MAX_PIPES]; + + int dpp_count = 0; + int det_segments = 0; + + if (!otg_master->stream) + return 0; + + int slice_count = resource_get_opp_heads_for_otg_master(otg_master, + &state->res_ctx, opp_heads); + + for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) { + if (opp_heads[slice_idx]->plane_state) { + dpp_count = resource_get_dpp_pipes_for_opp_head( + opp_heads[slice_idx], + &state->res_ctx, + dpp_pipes); + for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) + det_segments += dpp_pipes[dpp_idx]->hubp_regs.det_size; + } + } + return det_segments; +} + +bool resource_is_hpo_acquired(struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { + if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) { + return true; + } + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index cd6570a1e20ea5..fe9f99f1bdf9f8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -61,6 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || + notify->type == DMUB_NOTIFICATION_AUX_REPLY || notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index e990346e51f67e..2597e3fd562bbe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -211,10 +211,16 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { dml2_opt->use_clock_dc_limits = false; - dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); + if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) { + dc_state_release(state); + return NULL; + } dml2_opt->use_clock_dc_limits = true; - dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); + if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) { + dc_state_release(state); + return NULL; + } } #endif @@ -961,10 +967,10 @@ bool dc_state_is_fams2_in_use( bool is_fams2_in_use = false; if (state) - is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_stream_count > 0; + is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; if (dc->current_state) - is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0; + is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; return is_fams2_in_use; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 73cdebcd9f37cd..3992ad73165bc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.291" +#define DC_VER "3.2.301" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -261,10 +261,7 @@ struct dc_caps { bool zstate_support; bool ips_support; uint32_t num_of_internal_disp; - uint32_t max_dwb_htap; - uint32_t max_dwb_vtap; enum dp_protocol_version max_dp_protocol_version; - bool spdif_aud; unsigned int mall_size_per_mem_channel; unsigned int mall_size_total; unsigned int cursor_cache_size; @@ -309,8 +306,6 @@ struct dc_bug_wa { uint8_t dcfclk_ds: 1; } clock_update_disable_mask; bool skip_psr_ips_crtc_disable; - //Customer Specific WAs - uint32_t force_backlight_start_level; }; struct dc_dcc_surface_param { struct dc_size surface_size; @@ -466,6 +461,8 @@ struct dc_config { bool use_assr_psp_message; bool support_edp0_on_dp1; unsigned int enable_fpo_flicker_detection; + bool disable_hbr_audio_dp2; + bool consolidated_dpia_dp_lt; }; enum visual_confirm { @@ -513,6 +510,7 @@ enum in_game_fams_config { INGAME_FAMS_SINGLE_DISP_ENABLE, // enable in-game fams INGAME_FAMS_DISABLE, // disable in-game fams INGAME_FAMS_MULTI_DISP_ENABLE, //enable in-game fams for multi-display + INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY, //enable in-game fams for multi-display only for clamped RR strategies }; /** @@ -764,7 +762,9 @@ union dpia_debug_options { uint32_t extend_aux_rd_interval:1; /* bit 2 */ uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ - uint32_t reserved:27; + uint32_t disable_usb4_pm_support:1; /* bit 5 */ + uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -981,6 +981,7 @@ struct dc_debug_options { bool disable_z10; bool enable_z9_disable_interface; bool psr_skip_crtc_disable; + uint32_t ips_skip_crtc_disable_mask; union dpia_debug_options dpia_debug; bool disable_fixed_vs_aux_timeout_wa; uint32_t fixed_vs_aux_delay_config_wa; @@ -1053,8 +1054,13 @@ struct dc_debug_options { unsigned int disable_spl; unsigned int force_easf; unsigned int force_sharpness; + unsigned int force_sharpness_level; unsigned int force_lls; bool notify_dpia_hr_bw; + bool enable_ips_visual_confirm; + unsigned int sharpen_policy; + unsigned int scale_to_sharpness_policy; + bool skip_full_updated_if_possible; }; @@ -1268,6 +1274,7 @@ union surface_update_flags { uint32_t tmz_changed:1; uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */ uint32_t full_update:1; + uint32_t sdr_white_level_nits:1; } bits; uint32_t raw; @@ -1291,7 +1298,7 @@ struct dc_plane_state { struct dc_gamma gamma_correction; struct dc_transfer_func in_transfer_func; - struct dc_bias_and_scale *bias_and_scale; + struct dc_bias_and_scale bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; struct fixed31_32 hdr_mult; @@ -1348,8 +1355,9 @@ struct dc_plane_state { enum mpcc_movable_cm_location mcm_location; struct dc_csc_transform cursor_csc_color_matrix; bool adaptive_sharpness_en; - unsigned int sharpnessX1000; + int sharpness_level; enum linear_light_scaling linear_light_scaling; + unsigned int sdr_white_level_nits; }; struct dc_plane_info { @@ -1368,7 +1376,6 @@ struct dc_plane_info { int global_alpha_value; bool input_csc_enabled; int layer_index; - bool front_buffer_rendering_active; enum chroma_cositing cositing; }; @@ -1508,6 +1515,7 @@ struct dc_surface_update { */ struct dc_cm2_parameters *cm2_params; const struct dc_csc_transform *cursor_csc_color_matrix; + unsigned int sdr_white_level_nits; }; /* @@ -1585,6 +1593,12 @@ bool dc_acquire_release_mpc_3dlut( bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); + +bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count); +void populate_fast_updates(struct dc_fast_update *fast_update, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_update *stream_update); /* * Set up streams and links associated to drive sinks * The streams parameter is an absolute set of all active streams. @@ -1756,6 +1770,7 @@ struct dc_link { bool dongle_mode_timing_override; bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; + bool disable_assr_for_uhbr; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -2513,6 +2528,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint8_t mst_alloc_slots, uint8_t *mst_slots_in_use); +void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps); + void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index ded13026c8ff7c..1e7de0f03290a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -979,6 +979,9 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); + DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr); + DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr); + DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size); DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); @@ -1282,7 +1285,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) union dmub_shared_state_ips_driver_signals new_signals; DC_LOG_IPS( - "%s wait idle (ips1_commit=%d ips2_commit=%d)", + "%s wait idle (ips1_commit=%u ips2_commit=%u)", __func__, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1328,7 +1331,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) } DC_LOG_IPS( - "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)", + "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)", __func__, allow_idle, ips_fw->signals.bits.ips1_commit, @@ -1371,7 +1374,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( - "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)", + "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, @@ -1390,7 +1393,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) (!dc->debug.optimize_ips_handshake || ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( - "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", + "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1399,7 +1402,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (ips_fw->signals.bits.ips2_commit) { DC_LOG_IPS( - "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)", + "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1407,7 +1410,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); DC_LOG_IPS( - "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)", + "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1415,14 +1418,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(dc->debug.ips2_entry_delay_us); DC_LOG_IPS( - "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)", + "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); DC_LOG_IPS( - "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)", + "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1430,7 +1433,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(1); DC_LOG_IPS( - "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)", + "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1438,7 +1441,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) ASSERT(0); DC_LOG_IPS( - "resync inbox1 (ips1_commit=%d ips2_commit=%d)", + "resync inbox1 (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1449,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1) { DC_LOG_IPS( - "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)", + "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1457,7 +1460,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(1); DC_LOG_IPS( - "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)", + "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); } @@ -1466,14 +1469,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)", + DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", __func__, rcg_exit_count, ips1_exit_count, ips2_exit_count); } -void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) +void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) { struct dmub_srv *dmub; @@ -1482,12 +1485,38 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c dmub = dc_dmub_srv->dmub; - if (powerState == DC_ACPI_CM_POWER_STATE_D0) + if (power_state == DC_ACPI_CM_POWER_STATE_D0) dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); else dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); } +void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, + enum dc_acpi_cm_power_state power_state) +{ + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT; + cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE; + cmd.idle_opt_set_dc_power_state.header.payload_bytes = + sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header); + + if (power_state == DC_ACPI_CM_POWER_STATE_D0) { + cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0; + } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) { + cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3; + } else { + cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN; + } + + dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv) { volatile const struct dmub_shared_state_ips_fw *ips_fw; @@ -1672,22 +1701,17 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); - /* send global configuration parameters */ - global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms - global_cmd->config.global.lock_wait_time_us = 5000; //5ms - global_cmd->config.global.recovery_timeout_us = 5000; //5ms - global_cmd->config.global.hwfq_flip_programming_delay_us = 100; //100us - - /* copy static feature configuration */ - global_cmd->config.global.features.all = dc->debug.fams2_config.all; + if (enable) { + /* send global configuration parameters */ + memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config)); - /* apply feature configuration based on current driver state */ - global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; - global_cmd->config.global.features.bits.enable = enable; + /* copy static feature configuration overrides */ + global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; + global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; + global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; - /* construct per-stream configs */ - if (enable) { - for (i = 0; i < context->bw_ctx.bw.dcn.fams2_stream_count; i++) { + /* construct per-stream configs */ + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config; /* configure command header */ @@ -1702,12 +1726,15 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, } } - if (enable && context->bw_ctx.bw.dcn.fams2_stream_count) { + /* apply feature configuration based on current driver state */ + global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; + global_cmd->config.global.features.bits.enable = enable; + + if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { /* set multi pending for global, and unset for last stream cmd */ - global_cmd->config.global.num_streams = context->bw_ctx.bw.dcn.fams2_stream_count; global_cmd->header.multi_cmd_pending = 1; - cmd[context->bw_ctx.bw.dcn.fams2_stream_count].fams2_config.header.multi_cmd_pending = 0; - num_cmds += context->bw_ctx.bw.dcn.fams2_stream_count; + cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0; + num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams; } dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 580940222777ea..42f0cb672d8bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -109,7 +109,29 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait); void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle); -void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState); +/** + * dc_dmub_srv_set_power_state() - Sets the power state for DMUB service. + * + * Controls whether messaging the DMCUB or interfacing with it via HW register + * interaction is permittable. + * + * @dc_dmub_srv - The DC DMUB service pointer + * @power_state - the DC power state + */ +void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state); + +/** + * dc_dmub_srv_notify_fw_dc_power_state() - Notifies firmware of the DC power state. + * + * Differs from dc_dmub_srv_set_power_state in that it needs to access HW in order + * to message DMCUB of the state transition. Should come after the D0 exit and + * before D3 set power state. + * + * @dc_dmub_srv - The DC DMUB service pointer + * @power_state - the DC power state + */ +void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, + enum dc_acpi_cm_power_state power_state); /** * @dc_dmub_srv_should_detect() - Checks if link detection is required. diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 519c3df78ee5b2..41bd95e9177a41 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -969,6 +969,14 @@ union dp_sink_video_fallback_formats { uint8_t raw; }; +union dpcd_max_uncompressed_pixel_rate_cap { + struct { + uint16_t max_uncompressed_pixel_rate_cap :15; + uint16_t valid :1; + } bits; + uint8_t raw[2]; +}; + union dp_fec_capability1 { struct { uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1; @@ -1170,6 +1178,7 @@ struct dpcd_caps { struct dc_lttpr_caps lttpr_caps; struct adaptive_sync_caps adaptive_sync_caps; struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; + union dpcd_max_uncompressed_pixel_rate_cap max_uncompressed_pixel_rate_cap; union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; union dp_main_line_channel_coding_cap channel_coding_cap; @@ -1340,6 +1349,9 @@ struct dp_trace { #ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX #define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110 #endif +#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP +#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c +#endif #ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index fe3078b8789ef1..9014c240981786 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -59,6 +59,7 @@ struct dc_dsc_config_options { uint32_t max_target_bpp_limit_override_x16; uint32_t slice_height_granularity; uint32_t dsc_force_odm_hslice_override; + bool force_dsc_when_not_needed; }; bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, @@ -100,7 +101,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy); + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding); void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 959ae0df1e565b..c10567ec1c8199 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -763,13 +763,6 @@ enum scanning_type { SCANNING_TYPE_UNDEFINED }; -enum chroma_cositing { - CHROMA_COSITING_NONE, - CHROMA_COSITING_LEFT, - CHROMA_COSITING_TOPLEFT, - CHROMA_COSITING_COUNT -}; - struct dc_crtc_timing_flags { uint32_t INTERLACE :1; uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1, diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 58260631976493..603552dbd77164 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -42,26 +42,26 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality, static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality, const struct spl_taps *spl_scaling_quality) { - scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c; - scaling_quality->h_taps = spl_scaling_quality->h_taps; - scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c; - scaling_quality->v_taps = spl_scaling_quality->v_taps; + scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c + 1; + scaling_quality->h_taps = spl_scaling_quality->h_taps + 1; + scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c + 1; + scaling_quality->v_taps = spl_scaling_quality->v_taps + 1; } static void populate_ratios_from_splratios(struct scaling_ratios *ratios, - const struct spl_ratios *spl_ratios) + const struct ratio *spl_ratios) { - ratios->horz = spl_ratios->horz; - ratios->vert = spl_ratios->vert; - ratios->horz_c = spl_ratios->horz_c; - ratios->vert_c = spl_ratios->vert_c; + ratios->horz = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio >> 5, 3, 19); + ratios->vert = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio >> 5, 3, 19); + ratios->horz_c = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio_c >> 5, 3, 19); + ratios->vert_c = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio_c >> 5, 3, 19); } static void populate_inits_from_splinits(struct scl_inits *inits, - const struct spl_inits *spl_inits) + const struct init *spl_inits) { - inits->h = spl_inits->h; - inits->v = spl_inits->v; - inits->h_c = spl_inits->h_c; - inits->v_c = spl_inits->v_c; + inits->h = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int, spl_inits->h_filter_init_frac >> 5, 0, 19); + inits->v = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int, spl_inits->v_filter_init_frac >> 5, 0, 19); + inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19); + inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19); } /// @brief Translate SPL input parameters from pipe context /// @param pipe_ctx @@ -128,6 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->basic_out.always_scale = pipe_ctx->stream->ctx->dc->debug.always_scale; // Make spl input basic output info alpha_en field point to plane res scl_data lb_params alpha_en spl_in->basic_out.alpha_en = pipe_ctx->plane_res.scl_data.lb_params.alpha_en; + spl_in->basic_out.use_two_pixels_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); // Make spl input basic input info scaling quality field point to plane state scaling_quality populate_spltaps_from_taps(&spl_in->scaling_quality, &plane_state->scaling_quality); // Translate edge adaptive scaler preference @@ -138,24 +139,36 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2) spl_in->disable_easf = true; /* Translate adaptive sharpening preference */ - if (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 0) { - spl_in->adaptive_sharpness.enable = (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 1) ? true : false; - if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 2) - spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW; - else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 3) - spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID; - else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness >= 4) - spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH; - } else { - spl_in->adaptive_sharpness.enable = plane_state->adaptive_sharpness_en; - if (plane_state->sharpnessX1000 == 0) + unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness; + unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level; + if (sharpness_setting == SHARPNESS_HW_OFF) + spl_in->adaptive_sharpness.enable = false; + else if (sharpness_setting == SHARPNESS_ZERO) { + spl_in->adaptive_sharpness.enable = true; + spl_in->adaptive_sharpness.sharpness_level = 0; + } else if (sharpness_setting == SHARPNESS_CUSTOM) { + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500; + + if (force_sharpness_level > 0) { + if (force_sharpness_level > 10) + force_sharpness_level = 10; + spl_in->adaptive_sharpness.enable = true; + spl_in->adaptive_sharpness.sharpness_level = force_sharpness_level; + } else if (!plane_state->adaptive_sharpness_en) { spl_in->adaptive_sharpness.enable = false; - else if (plane_state->sharpnessX1000 < 999) - spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW; - else if (plane_state->sharpnessX1000 < 1999) - spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID; - else // Any other value is high sharpness - spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH; + spl_in->adaptive_sharpness.sharpness_level = 0; + } else { + spl_in->adaptive_sharpness.enable = true; + spl_in->adaptive_sharpness.sharpness_level = plane_state->sharpness_level; + } } // Translate linear light scaling preference if (pipe_ctx->stream->ctx->dc->debug.force_lls > 0) @@ -171,6 +184,19 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->basic_in.tf_type = (enum spl_transfer_func_type) plane_state->in_transfer_func.type; spl_in->basic_in.tf_predefined_type = (enum spl_transfer_func_predefined) plane_state->in_transfer_func.tf; + spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active; + spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active; + + spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy; + spl_in->debug.scale_to_sharpness_policy = + (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy; + + /* Check if it is stream is in fullscreen and if its HDR. + * Use this to determine sharpness levels + */ + spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream); + spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream); + spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits; } /// @brief Translate SPL output parameters to pipe context @@ -179,15 +205,15 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out) { // Make scaler data recout point to spl output field recout - populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout); + populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->dscl_prog_data->recout); // Make scaler data ratios point to spl output field ratios - populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios); + populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->dscl_prog_data->ratios); // Make scaler data viewport point to spl output field viewport - populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport); + populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->dscl_prog_data->viewport); // Make scaler data viewport_c point to spl output field viewport_c - populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c); + populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->dscl_prog_data->viewport_c); // Make scaler data taps point to spl output field scaling taps - populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps); + populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->dscl_prog_data->taps); // Make scaler data init point to spl output field init - populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits); + populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->dscl_prog_data->init); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h index c73d640c3632f9..eaa5c5373b2849 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h @@ -6,6 +6,7 @@ #define __DC_SPL_TRANSLATE_H__ #include "dc.h" #include "resource.h" +#include "dm_helpers.h" /* Map SPL input parameters to pipe context * @pipe_ctx: pipe context diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 8ebd7e9e776e34..14ea47eda0c873 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -142,6 +142,7 @@ union stream_update_flags { uint32_t mst_bw : 1; uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; + uint32_t scaler_sharpener : 1; } bits; uint32_t raw; @@ -159,6 +160,12 @@ struct test_pattern { struct dc_stream_debug_options { char force_odm_combine_segments; + /* + * When force_odm_combine_segments is non zero, allow dc to + * temporarily transition to ODM bypass when minimal transition state + * is required to prevent visual glitches showing on the screen + */ + char allow_transition_for_forced_odm; }; #define LUMINANCE_DATA_TABLE_SIZE 10 @@ -260,6 +267,8 @@ struct dc_stream_state { struct dc_cursor_attributes cursor_attributes; struct dc_cursor_position cursor_position; + bool hw_cursor_req; + uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode /* from stream struct */ @@ -300,6 +309,7 @@ struct dc_stream_state { bool is_phantom; struct luminance_data lumin_data; + bool scaler_sharpener_update; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -344,6 +354,8 @@ struct dc_stream_update { struct dc_cursor_attributes *cursor_attributes; struct dc_cursor_position *cursor_position; + bool *hw_cursor_req; + bool *scaler_sharpener_update; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index c550e89970336a..6d7989b751e2ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -178,6 +178,7 @@ struct dc_panel_patch { unsigned int skip_avmute; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; + unsigned int disable_colorimetry; }; struct dc_edid_caps { @@ -590,6 +591,7 @@ enum dc_psr_state { PSR_STATE5c, PSR_STATE_HWLOCK_MGR, PSR_STATE_POLLVUPDATE, + PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME, PSR_STATE_INVALID = 0xFF }; @@ -1049,6 +1051,23 @@ union replay_error_status { unsigned char raw; }; +union replay_low_refresh_rate_enable_options { + struct { + //BIT[0-3]: Replay Low Hz Support control + unsigned int ENABLE_LOW_RR_SUPPORT :1; + unsigned int RESERVED_1_3 :3; + //BIT[4-15]: Replay Low Hz Enable Scenarios + unsigned int ENABLE_STATIC_SCREEN :1; + unsigned int ENABLE_FULL_SCREEN_VIDEO :1; + unsigned int ENABLE_GENERAL_UI :1; + unsigned int RESERVED_7_15 :9; + //BIT[16-31]: Replay Low Hz Enable Check + unsigned int ENABLE_STATIC_FLICKER_CHECK :1; + unsigned int RESERVED_17_31 :15; + } bits; + unsigned int raw; +}; + struct replay_config { /* Replay feature is supported */ bool replay_supported; @@ -1072,6 +1091,8 @@ struct replay_config { bool replay_support_fast_resync_in_ultra_sleep_mode; /* Replay error status */ union replay_error_status replay_error_status; + /* Replay Low Hz enable Options */ + union replay_low_refresh_rate_enable_options low_rr_enable_options; }; /* Replay feature flags*/ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h index 1e02928612446d..160c299419b726 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h @@ -328,6 +328,17 @@ type DPSTREAMCLK1_GATE_DISABLE;\ type DPSTREAMCLK2_GATE_DISABLE;\ type DPSTREAMCLK3_GATE_DISABLE;\ + type SYMCLKA_FE_GATE_DISABLE;\ + type SYMCLKB_FE_GATE_DISABLE;\ + type SYMCLKC_FE_GATE_DISABLE;\ + type SYMCLKD_FE_GATE_DISABLE;\ + type SYMCLKE_FE_GATE_DISABLE;\ + type SYMCLKA_GATE_DISABLE;\ + type SYMCLKB_GATE_DISABLE;\ + type SYMCLKC_GATE_DISABLE;\ + type SYMCLKD_GATE_DISABLE;\ + type SYMCLKE_GATE_DISABLE;\ + #define DCCG401_REG_FIELD_LIST(type) \ type OTG0_TMDS_PIXEL_RATE_DIV;\ @@ -346,11 +357,7 @@ type SYMCLK32_LE3_SRC_SEL;\ type SYMCLK32_LE2_EN;\ type SYMCLK32_LE3_EN;\ - type DP_DTO_ENABLE[MAX_PIPES];\ - type DSCCLK0_DTO_DB_EN;\ - type DSCCLK1_DTO_DB_EN;\ - type DSCCLK2_DTO_DB_EN;\ - type DSCCLK3_DTO_DB_EN; + type DP_DTO_ENABLE[MAX_PIPES]; struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index 68cd3258f4a97a..838d72eaa87fbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -24,6 +24,7 @@ #include "reg_helper.h" #include "core_types.h" +#include "resource.h" #include "dcn35_dccg.h" #define TO_DCN_DCCG(dccg)\ @@ -41,13 +42,1069 @@ #define DC_LOGGER \ dccg->ctx->logger +enum symclk_fe_source { + SYMCLK_FE_SYMCLK_A = 0, // Select functional clock from backend symclk A + SYMCLK_FE_SYMCLK_B, + SYMCLK_FE_SYMCLK_C, + SYMCLK_FE_SYMCLK_D, + SYMCLK_FE_SYMCLK_E, + SYMCLK_FE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum symclk_be_source { + SYMCLK_BE_PHYCLK = 0, // Select phy clk when sym_clk_enable = 1 + SYMCLK_BE_DPIACLK_810 = 4, + SYMCLK_BE_DPIACLK_162 = 5, + SYMCLK_BE_DPIACLK_540 = 6, + SYMCLK_BE_DPIACLK_270 = 7, + SYMCLK_BE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum physymclk_source { + PHYSYMCLK_PHYCLK = 0, // Select symclk as source of clock which is output to PHY through DCIO. + PHYSYMCLK_PHYD18CLK, // Select phyd18clk as the source of clock which is output to PHY through DCIO. + PHYSYMCLK_PHYD32CLK, // Select phyd32clk as the source of clock which is output to PHY through DCIO. + PHYSYMCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum dtbclk_source { + DTBCLK_DPREFCLK = 0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same) + DTBCLK_DPREFCLK_0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same) + DTBCLK_DTBCLK0, // Selects source for DTBCLK_P# as DTBCLK0 + DTBCLK_DTBCLK1, // Selects source for DTBCLK_P# as DTBCLK0 + DTBCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum dppclk_clock_source { + DPP_REFCLK = 0, // refclk is selected + DPP_DCCG_DTO, // Functional clock selected is DTO tuned DPPCLK +}; + +enum dp_stream_clk_source { + DP_STREAM_DTBCLK_P0 = 0, // Selects functional for DP_STREAM_CLK as DTBCLK_P# + DP_STREAM_DTBCLK_P1, + DP_STREAM_DTBCLK_P2, + DP_STREAM_DTBCLK_P3, + DP_STREAM_DTBCLK_P4, + DP_STREAM_DTBCLK_P5, + DP_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum hdmi_char_clk { + HDMI_CHAR_PHYAD18CLK = 0, // Selects functional for hdmi_char_clk as UNIPHYA PHYD18CLK + HDMI_CHAR_PHYBD18CLK, + HDMI_CHAR_PHYCD18CLK, + HDMI_CHAR_PHYDD18CLK, + HDMI_CHAR_PHYED18CLK, + HDMI_CHAR_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum hdmi_stream_clk_source { + HDMI_STREAM_DTBCLK_P0 = 0, // Selects functional for HDMI_STREAM_CLK as DTBCLK_P# + HDMI_STREAM_DTBCLK_P1, + HDMI_STREAM_DTBCLK_P2, + HDMI_STREAM_DTBCLK_P3, + HDMI_STREAM_DTBCLK_P4, + HDMI_STREAM_DTBCLK_P5, + HDMI_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum symclk32_se_clk_source { + SYMCLK32_SE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK + SYMCLK32_SE_PHYBD32CLK, + SYMCLK32_SE_PHYCD32CLK, + SYMCLK32_SE_PHYDD32CLK, + SYMCLK32_SE_PHYED32CLK, + SYMCLK32_SE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum symclk32_le_clk_source { + SYMCLK32_LE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK + SYMCLK32_LE_PHYBD32CLK, + SYMCLK32_LE_PHYCD32CLK, + SYMCLK32_LE_PHYDD32CLK, + SYMCLK32_LE_PHYED32CLK, + SYMCLK32_LE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software +}; + +enum dsc_clk_source { + DSC_CLK_REF_CLK = 0, // Ref clock selected for DSC_CLK + DSC_DTO_TUNED_CK_GPU_DISCLK_3, // DTO divided clock selected as functional clock +}; + + +static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk32_se_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable) + return; + + /* SYMCLK32_ROOT_SE#_GATE_DISABLE will clock gate in DCCG */ + /* SYMCLK32_SE#_GATE_DISABLE will clock gate in HPO only */ + switch (inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk32_le_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_physymclk_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 4: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk_fe_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKA_FE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKA_FE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKB_FE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKB_FE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKC_FE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKC_FE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKD_FE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKD_FE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 4: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKE_FE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKE_FE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk_be_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* TBD add symclk_be in rcg control bits */ + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKA_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKA_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKB_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKB_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKC_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKC_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKD_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKD_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 4: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + SYMCLKE_GATE_DISABLE, enable ? 0 : 1); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, + SYMCLKE_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable) +{ + + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +static void dccg35_set_dppclk_rcg(struct dccg *dccg, + int inst, bool enable) +{ + + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +static void dccg35_set_dpstreamclk_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK0_GATE_DISABLE, enable ? 0 : 1, + DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK1_GATE_DISABLE, enable ? 0 : 1, + DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK2_GATE_DISABLE, enable ? 0 : 1, + DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK3_GATE_DISABLE, enable ? 0 : 1, + DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_smclk32_se_rcg( + struct dccg *dccg, + int inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable) + return; + + switch (inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1); + break; + case 2: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1); + break; + case 3: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1, + SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_dsc_clk_src_new(struct dccg *dccg, int inst, enum dsc_clk_source src) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* DSCCLK#_EN=0 switches to refclock from functional clock */ + + switch (inst) { + case 0: + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, src); + break; + case 1: + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, src); + break; + case 2: + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, src); + break; + case 3: + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, src); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk32_se_src_new( + struct dccg *dccg, + int inst, + enum symclk32_se_clk_source src + ) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src, + SYMCLK32_SE0_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1); + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src, + SYMCLK32_SE1_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1); + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src, + SYMCLK32_SE2_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1); + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src, + SYMCLK32_SE3_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static int +dccg35_is_symclk32_se_src_functional_le_new(struct dccg *dccg, int symclk_32_se_inst, int symclk_32_le_inst) +{ + uint32_t en; + uint32_t src_sel; + + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + REG_GET_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, &src_sel, SYMCLK32_SE3_EN, &en); + + if (en == 1 && src_sel == symclk_32_le_inst) + return 1; + + return 0; +} + + +static void dccg35_set_symclk32_le_src_new( + struct dccg *dccg, + int inst, + enum symclk32_le_clk_source src) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE0_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src, + SYMCLK32_LE0_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1); + break; + case 1: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE1_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src, + SYMCLK32_LE1_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dcn35_set_dppclk_src_new(struct dccg *dccg, + int inst, enum dppclk_clock_source src) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, src); + break; + case 1: + REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, src); + break; + case 2: + REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, src); + break; + case 3: + REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, src); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +static void dccg35_set_dtbclk_p_src_new( + struct dccg *dccg, + enum dtbclk_source src, + int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* If DTBCLK_P#_EN is 0 refclock is selected as functional clock + * If DTBCLK_P#_EN is 1 functional clock is selected as DTBCLK_P#_SRC_SEL + */ + + switch (inst) { + case 0: + REG_UPDATE_2(DTBCLK_P_CNTL, + DTBCLK_P0_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src, + DTBCLK_P0_EN, (src == DTBCLK_REFCLK) ? 0 : 1); + break; + case 1: + REG_UPDATE_2(DTBCLK_P_CNTL, + DTBCLK_P1_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src, + DTBCLK_P1_EN, (src == DTBCLK_REFCLK) ? 0 : 1); + break; + case 2: + REG_UPDATE_2(DTBCLK_P_CNTL, + DTBCLK_P2_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src, + DTBCLK_P2_EN, (src == DTBCLK_REFCLK) ? 0 : 1); + break; + case 3: + REG_UPDATE_2(DTBCLK_P_CNTL, + DTBCLK_P3_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src, + DTBCLK_P3_EN, (src == DTBCLK_REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_dpstreamclk_src_new( + struct dccg *dccg, + enum dp_stream_clk_source src, + int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, + (src == DP_STREAM_REFCLK) ? 0 : 1, + DPSTREAMCLK0_SRC_SEL, + (src == DP_STREAM_REFCLK) ? 0 : src); + break; + case 1: + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, + (src == DP_STREAM_REFCLK) ? 0 : 1, + DPSTREAMCLK1_SRC_SEL, + (src == DP_STREAM_REFCLK) ? 0 : src); + + break; + case 2: + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, + (src == DP_STREAM_REFCLK) ? 0 : 1, + DPSTREAMCLK2_SRC_SEL, + (src == DP_STREAM_REFCLK) ? 0 : src); + + break; + case 3: + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, + (src == DP_STREAM_REFCLK) ? 0 : 1, + DPSTREAMCLK3_SRC_SEL, + (src == DP_STREAM_REFCLK) ? 0 : src); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_physymclk_src_new( + struct dccg *dccg, + enum physymclk_source src, + int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, + (src == PHYSYMCLK_REFCLK) ? 0 : 1, + PHYASYMCLK_SRC_SEL, + (src == PHYSYMCLK_REFCLK) ? 0 : src); + break; + case 1: + REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, + (src == PHYSYMCLK_REFCLK) ? 0 : 1, + PHYBSYMCLK_SRC_SEL, + (src == PHYSYMCLK_REFCLK) ? 0 : src); + break; + case 2: + REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, + (src == PHYSYMCLK_REFCLK) ? 0 : 1, + PHYCSYMCLK_SRC_SEL, + (src == PHYSYMCLK_REFCLK) ? 0 : src); + break; + case 3: + REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, + (src == PHYSYMCLK_REFCLK) ? 0 : 1, + PHYDSYMCLK_SRC_SEL, + (src == PHYSYMCLK_REFCLK) ? 0 : src); + break; + case 4: + REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, + (src == PHYSYMCLK_REFCLK) ? 0 : 1, + PHYESYMCLK_SRC_SEL, + (src == PHYSYMCLK_REFCLK) ? 0 : src); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg35_set_symclk_be_src_new( + struct dccg *dccg, + enum symclk_be_source src, + int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, + SYMCLKA_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1, + SYMCLKA_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src); + break; + case 1: + REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, + SYMCLKB_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1, + SYMCLKB_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src); + break; + case 2: + REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, + SYMCLKC_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1, + SYMCLKC_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src); + break; + case 3: + REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, + SYMCLKD_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1, + SYMCLKD_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src); + break; + case 4: + REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, + SYMCLKE_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1, + SYMCLKE_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src); + break; + } +} + +static int dccg35_is_symclk_fe_src_functional_be(struct dccg *dccg, + int symclk_fe_inst, + int symclk_be_inst) +{ + + uint32_t en = 0; + uint32_t src_sel = 0; + + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (symclk_fe_inst) { + case 0: + REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_SRC_SEL, &src_sel, SYMCLKA_FE_EN, &en); + break; + case 1: + REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, &src_sel, SYMCLKB_FE_EN, &en); + break; + case 2: + REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, &src_sel, SYMCLKC_FE_EN, &en); + break; + case 3: + REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, &src_sel, SYMCLKD_FE_EN, &en); + break; + case 4: + REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, &src_sel, SYMCLKE_FE_EN, &en); + break; + } + + if (en == 1 && src_sel == symclk_be_inst) + return 1; + + return 0; +} + +static void dccg35_set_symclk_fe_src_new(struct dccg *dccg, enum symclk_fe_source src, int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, + SYMCLKA_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1, + SYMCLKA_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src); + break; + case 1: + REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, + SYMCLKB_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1, + SYMCLKB_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src); + break; + case 2: + REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, + SYMCLKC_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1, + SYMCLKC_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src); + break; + case 3: + REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, + SYMCLKD_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1, + SYMCLKD_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src); + break; + case 4: + REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, + SYMCLKE_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1, + SYMCLKE_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src); + break; + } +} + +static uint32_t dccg35_is_fe_rcg(struct dccg *dccg, int inst) +{ + uint32_t enable = 0; + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_GET(DCCG_GATE_DISABLE_CNTL5, + SYMCLKA_FE_ROOT_GATE_DISABLE, &enable); + break; + case 1: + REG_GET(DCCG_GATE_DISABLE_CNTL5, + SYMCLKB_FE_ROOT_GATE_DISABLE, &enable); + break; + case 2: + REG_GET(DCCG_GATE_DISABLE_CNTL5, + SYMCLKC_FE_ROOT_GATE_DISABLE, &enable); + break; + case 3: + REG_GET(DCCG_GATE_DISABLE_CNTL5, + SYMCLKD_FE_ROOT_GATE_DISABLE, &enable); + break; + case 4: + REG_GET(DCCG_GATE_DISABLE_CNTL5, + SYMCLKE_FE_ROOT_GATE_DISABLE, &enable); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + return enable; +} + +static uint32_t dccg35_is_symclk32_se_rcg(struct dccg *dccg, int inst) +{ + uint32_t disable_l1 = 0; + uint32_t disable_l2 = 0; + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (inst) { + case 0: + REG_GET_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, &disable_l1, + SYMCLK32_ROOT_SE0_GATE_DISABLE, &disable_l2); + break; + case 1: + REG_GET_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, &disable_l1, + SYMCLK32_ROOT_SE1_GATE_DISABLE, &disable_l2); + break; + case 2: + REG_GET_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, &disable_l1, + SYMCLK32_ROOT_SE2_GATE_DISABLE, &disable_l2); + break; + case 3: + REG_GET_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, &disable_l1, + SYMCLK32_ROOT_SE3_GATE_DISABLE, &disable_l2); + break; + default: + BREAK_TO_DEBUGGER(); + return 0; + } + + /* return true if either block level or DCCG level gating is active */ + return (disable_l1 | disable_l2); +} + +static void dccg35_enable_symclk_fe_new( + struct dccg *dccg, + int inst, + enum symclk_fe_source src) +{ + dccg35_set_symclk_fe_rcg(dccg, inst, false); + dccg35_set_symclk_fe_src_new(dccg, src, inst); +} + +static void dccg35_disable_symclk_fe_new( + struct dccg *dccg, + int inst) +{ + dccg35_set_symclk_fe_src_new(dccg, SYMCLK_FE_REFCLK, inst); + dccg35_set_symclk_fe_rcg(dccg, inst, true); +} + +static void dccg35_enable_symclk_be_new( + struct dccg *dccg, + int inst, + enum symclk_be_source src) +{ + dccg35_set_symclk_be_rcg(dccg, inst, false); + dccg35_set_symclk_be_src_new(dccg, inst, src); +} + +static void dccg35_disable_symclk_be_new( + struct dccg *dccg, + int inst) +{ + int i; + + /* Switch from functional clock to refclock */ + dccg35_set_symclk_be_src_new(dccg, inst, SYMCLK_BE_REFCLK); + + /* Check if any other SE connected LE and disable them */ + for (i = 0; i < 4; i++) { + /* Make sure FE is not already in RCG */ + if (dccg35_is_fe_rcg(dccg, i) == 0) { + if (dccg35_is_symclk_fe_src_functional_be(dccg, i, inst)) + dccg35_disable_symclk_fe_new(dccg, i); + } + } + /* Safe to RCG SYMCLK*/ + dccg35_set_symclk_be_rcg(dccg, inst, true); +} + +static void dccg35_enable_symclk32_se_new( + struct dccg *dccg, + int inst, + enum symclk32_se_clk_source src) +{ + dccg35_set_symclk32_se_rcg(dccg, inst, false); + dccg35_set_symclk32_se_src_new(dccg, inst, src); +} + +static void dccg35_disable_symclk32_se_new( + struct dccg *dccg, + int inst) +{ + dccg35_set_symclk32_se_src_new(dccg, SYMCLK32_SE_REFCLK, inst); + dccg35_set_symclk32_se_rcg(dccg, inst, true); +} + +static void dccg35_enable_symclk32_le_new( + struct dccg *dccg, + int inst, + enum symclk32_le_clk_source src) +{ + dccg35_set_symclk32_le_rcg(dccg, inst, false); + dccg35_set_symclk32_le_src_new(dccg, inst, src); +} + +static void dccg35_disable_symclk32_le_new( + struct dccg *dccg, + int inst) +{ + int i; + + /* Switch from functional clock to refclock */ + dccg35_set_symclk32_le_src_new(dccg, inst, SYMCLK32_LE_REFCLK); + + /* Check if any SE are connected and disable SE as well */ + for (i = 0; i < 4; i++) { + /* Make sure FE is not already in RCG */ + if (dccg35_is_symclk32_se_rcg(dccg, i) == 0) { + /* Disable and SE connected to this LE before RCG */ + if (dccg35_is_symclk32_se_src_functional_le_new(dccg, i, inst)) + dccg35_disable_symclk32_se_new(dccg, i); + } + } + /* Safe to RCG SYM32_LE*/ + dccg35_set_symclk32_le_rcg(dccg, inst, true); +} + +static void dccg35_enable_physymclk_new(struct dccg *dccg, + int inst, + enum physymclk_source src) +{ + dccg35_set_physymclk_rcg(dccg, inst, false); + dccg35_set_physymclk_src_new(dccg, src, inst); +} + +static void dccg35_disable_physymclk_new(struct dccg *dccg, + int inst) +{ + dccg35_set_physymclk_src_new(dccg, PHYSYMCLK_REFCLK, inst); + dccg35_set_physymclk_rcg(dccg, inst, true); +} + +static void dccg35_enable_dpp_clk_new( + struct dccg *dccg, + int inst, + enum dppclk_clock_source src) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + /* Sanitize inst before use in array de-ref */ + if (inst < 0) { + BREAK_TO_DEBUGGER(); + return; + } + dccg35_set_dppclk_rcg(dccg, inst, false); + dcn35_set_dppclk_src_new(dccg, inst, src); + /* Switch DPP clock to DTO */ + REG_SET_2(DPPCLK_DTO_PARAM[inst], 0, + DPPCLK0_DTO_PHASE, 0xFF, + DPPCLK0_DTO_MODULO, 0xFF); +} + +static void dccg35_disable_dpp_clk_new( + struct dccg *dccg, + int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + /* Sanitize inst before use in array de-ref */ + if (inst < 0) { + BREAK_TO_DEBUGGER(); + return; + } + dcn35_set_dppclk_src_new(dccg, inst, DPP_REFCLK); + REG_SET_2(DPPCLK_DTO_PARAM[inst], 0, + DPPCLK0_DTO_PHASE, 0, + DPPCLK0_DTO_MODULO, 1); + dccg35_set_dppclk_rcg(dccg, inst, true); +} + +static void dccg35_disable_dscclk_new(struct dccg *dccg, + int inst) +{ + dccg35_set_dsc_clk_src_new(dccg, inst, DSC_CLK_REF_CLK); + dccg35_set_dsc_clk_rcg(dccg, inst, true); +} + +static void dccg35_enable_dscclk_new(struct dccg *dccg, + int inst, + enum dsc_clk_source src) +{ + dccg35_set_dsc_clk_rcg(dccg, inst, false); + dccg35_set_dsc_clk_src_new(dccg, inst, src); +} + +static void dccg35_enable_dtbclk_p_new(struct dccg *dccg, + enum dtbclk_source src, + int inst) +{ + dccg35_set_dtbclk_p_rcg(dccg, inst, false); + dccg35_set_dtbclk_p_src_new(dccg, src, inst); +} + +static void dccg35_disable_dtbclk_p_new(struct dccg *dccg, + int inst) +{ + dccg35_set_dtbclk_p_src_new(dccg, DTBCLK_REFCLK, inst); + dccg35_set_dtbclk_p_rcg(dccg, inst, true); +} + +static void dccg35_disable_dpstreamclk_new(struct dccg *dccg, + int inst) +{ + dccg35_set_dpstreamclk_src_new(dccg, DP_STREAM_REFCLK, inst); + dccg35_set_dpstreamclk_rcg(dccg, inst, true); +} + +static void dccg35_enable_dpstreamclk_new(struct dccg *dccg, + enum dp_stream_clk_source src, + int inst) +{ + dccg35_set_dpstreamclk_rcg(dccg, inst, false); + dccg35_set_dpstreamclk_src_new(dccg, src, inst); +} + static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); uint32_t dispclk_rdivider_value = 0; REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); - REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); + if (dispclk_rdivider_value != 0) + REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } static void dcn35_set_dppclk_enable(struct dccg *dccg, @@ -657,6 +1714,12 @@ static void dccg35_disable_symclk32_se( } } +static void dccg35_init_cb(struct dccg *dccg) +{ + (void)dccg; + /* Any RCG should be done when driver enter low power mode*/ +} + void dccg35_init(struct dccg *dccg) { int otg_inst; @@ -685,10 +1748,6 @@ void dccg35_init(struct dccg *dccg) dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false); } - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) - for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0); - /* dccg35_enable_global_fgcg_rep( dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits @@ -869,47 +1928,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, } /*get other front end connected to this backend*/ -static uint8_t dccg35_get_other_enabled_symclk_fe(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) +static uint8_t dccg35_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst) { uint8_t num_enabled_symclk_fe = 0; - uint32_t be_clk_en = 0, fe_clk_en[5] = {0}, be_clk_sel[5] = {0}; + uint32_t fe_clk_en[5] = {0}, be_clk_sel[5] = {0}; struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - switch (link_enc_inst) { - case 0: - REG_GET_3(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, &be_clk_en, - SYMCLKA_FE_EN, &fe_clk_en[0], - SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]); - break; - case 1: - REG_GET_3(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, &be_clk_en, - SYMCLKB_FE_EN, &fe_clk_en[1], - SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]); - break; - case 2: - REG_GET_3(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, &be_clk_en, - SYMCLKC_FE_EN, &fe_clk_en[2], - SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]); - break; - case 3: - REG_GET_3(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, &be_clk_en, - SYMCLKD_FE_EN, &fe_clk_en[3], - SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]); - break; - case 4: - REG_GET_3(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, &be_clk_en, - SYMCLKE_FE_EN, &fe_clk_en[4], - SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]); - break; - } - if (be_clk_en) { - /* for DPMST, this backend could be used by multiple front end. - only disable the backend if this stream_enc_ins is the last active stream enc connected to this back_end*/ - uint8_t i; - for (i = 0; i != link_enc_inst && i < ARRAY_SIZE(fe_clk_en); i++) { - if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst) - num_enabled_symclk_fe++; - } + REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0], + SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]); + + REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1], + SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]); + + REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2], + SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]); + + REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3], + SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]); + + REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, &fe_clk_en[4], + SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]); + + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) { + if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst) + num_enabled_symclk_fe++; } return num_enabled_symclk_fe; } @@ -957,9 +2001,9 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst break; } - /*check other enabled symclk fe */ - num_enabled_symclk_fe = dccg35_get_other_enabled_symclk_fe(dccg, stream_enc_inst, link_enc_inst); - /*only turn off backend clk if other front end attachecd to this backend are all off, + /*check other enabled symclk fe connected to this be */ + num_enabled_symclk_fe = dccg35_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst); + /*only turn off backend clk if other front end attached to this backend are all off, for mst, only turn off the backend if this is the last front end*/ if (num_enabled_symclk_fe == 0) { switch (link_enc_inst) { @@ -997,6 +2041,336 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst } } +static void dccg35_set_dpstreamclk_cb( + struct dccg *dccg, + enum streamclk_source src, + int otg_inst, + int dp_hpo_inst) +{ + + enum dtbclk_source dtb_clk_src; + enum dp_stream_clk_source dp_stream_clk_src; + + switch (src) { + case REFCLK: + dtb_clk_src = DTBCLK_REFCLK; + dp_stream_clk_src = DP_STREAM_REFCLK; + break; + case DPREFCLK: + dtb_clk_src = DTBCLK_DPREFCLK; + dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst; + break; + case DTBCLK0: + dtb_clk_src = DTBCLK_DTBCLK0; + dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + + if (dtb_clk_src == DTBCLK_REFCLK && + dp_stream_clk_src == DP_STREAM_REFCLK) { + dccg35_disable_dtbclk_p_new(dccg, otg_inst); + dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst); + } else { + dccg35_enable_dtbclk_p_new(dccg, dtb_clk_src, otg_inst); + dccg35_enable_dpstreamclk_new(dccg, + dp_stream_clk_src, + dp_hpo_inst); + } +} + +static void dccg35_set_dpstreamclk_root_clock_gating_cb( + struct dccg *dccg, + int dp_hpo_inst, + bool power_on) +{ + /* power_on set indicates we need to ungate + * Currently called from optimize_bandwidth and prepare_bandwidth calls + * Since clock source is not passed restore to refclock on ungate + * Instance 0 is implied here since only one streamclock resource + * Redundant as gating when enabled is acheived through set_dpstreamclk + */ + if (power_on) + dccg35_enable_dpstreamclk_new(dccg, + DP_STREAM_REFCLK, + dp_hpo_inst); + else + dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst); +} + +static void dccg35_update_dpp_dto_cb(struct dccg *dccg, int dpp_inst, + int req_dppclk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (dccg->dpp_clock_gated[dpp_inst]) { + /* + * Do not update the DPPCLK DTO if the clock is stopped. + */ + return; + } + + if (dccg->ref_dppclk && req_dppclk) { + int ref_dppclk = dccg->ref_dppclk; + int modulo, phase; + + // phase / modulo = dpp pipe clk / dpp global clk + modulo = 0xff; // use FF at the end + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; + + if (phase > 0xff) { + ASSERT(false); + phase = 0xff; + } + + /* Enable DPP CLK DTO output */ + dccg35_enable_dpp_clk_new(dccg, dpp_inst, DPP_DCCG_DTO); + + /* Program DTO */ + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, phase, + DPPCLK0_DTO_MODULO, modulo); + } else + dccg35_disable_dpp_clk_new(dccg, dpp_inst); + + dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; +} + +static void dccg35_dpp_root_clock_control_cb( + struct dccg *dccg, + unsigned int dpp_inst, + bool power_on) +{ + if (dccg->dpp_clock_gated[dpp_inst] == power_on) + return; + /* power_on set indicates we need to ungate + * Currently called from optimize_bandwidth and prepare_bandwidth calls + * Since clock source is not passed restore to refclock on ungate + * Redundant as gating when enabled is acheived through update_dpp_dto + */ + dccg35_set_dppclk_rcg(dccg, dpp_inst, !power_on); + + dccg->dpp_clock_gated[dpp_inst] = !power_on; +} + +static void dccg35_enable_symclk32_se_cb( + struct dccg *dccg, + int inst, + enum phyd32clk_clock_source phyd32clk) +{ + dccg35_enable_symclk32_se_new(dccg, inst, (enum symclk32_se_clk_source)phyd32clk); +} + +static void dccg35_disable_symclk32_se_cb(struct dccg *dccg, int inst) +{ + dccg35_disable_symclk32_se_new(dccg, inst); +} + +static void dccg35_enable_symclk32_le_cb( + struct dccg *dccg, + int inst, + enum phyd32clk_clock_source src) +{ + dccg35_enable_symclk32_le_new(dccg, inst, (enum symclk32_le_clk_source) src); +} + +static void dccg35_disable_symclk32_le_cb(struct dccg *dccg, int inst) +{ + dccg35_disable_symclk32_le_new(dccg, inst); +} + +static void dccg35_set_symclk32_le_root_clock_gating_cb( + struct dccg *dccg, + int inst, + bool power_on) +{ + /* power_on set indicates we need to ungate + * Currently called from optimize_bandwidth and prepare_bandwidth calls + * Since clock source is not passed restore to refclock on ungate + * Redundant as gating when enabled is acheived through disable_symclk32_le + */ + if (power_on) + dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK); + else + dccg35_disable_symclk32_le_new(dccg, inst); +} + +static void dccg35_set_physymclk_cb( + struct dccg *dccg, + int inst, + enum physymclk_clock_source clk_src, + bool force_enable) +{ + /* force_enable = 0 indicates we can switch to ref clock */ + if (force_enable) + dccg35_enable_physymclk_new(dccg, inst, (enum physymclk_source)clk_src); + else + dccg35_disable_physymclk_new(dccg, inst); +} + +static void dccg35_set_physymclk_root_clock_gating_cb( + struct dccg *dccg, + int inst, + bool power_on) +{ + /* Redundant RCG already done in disable_physymclk + * power_on = 1 indicates we need to ungate + */ + if (power_on) + dccg35_enable_physymclk_new(dccg, inst, PHYSYMCLK_REFCLK); + else + dccg35_disable_physymclk_new(dccg, inst); +} + +static void dccg35_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int inst, + bool power_on) +{ + /* power_on set indicates we need to ungate + * Currently called from optimize_bandwidth and prepare_bandwidth calls + * Since clock source is not passed restore to refclock on ungate + * Redundant as gating when enabled is acheived through disable_symclk32_le + */ + if (power_on) + dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK); + else + dccg35_disable_symclk32_le_new(dccg, inst); +} + +static void dccg35_set_dtbclk_p_src_cb( + struct dccg *dccg, + enum streamclk_source src, + uint32_t inst) +{ + if (src == DTBCLK0) + dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, inst); + else + dccg35_disable_dtbclk_p_new(dccg, inst); +} + +static void dccg35_set_dtbclk_dto_cb( + struct dccg *dccg, + const struct dtbclk_dto_params *params) +{ + /* set_dtbclk_p_src typ called earlier to switch to DTBCLK + * if params->ref_dtbclk_khz and req_dtbclk_khz are 0 switch to ref-clock + */ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + /* DTO Output Rate / Pixel Rate = 1/4 */ + int req_dtbclk_khz = params->pixclk_khz / 4; + + if (params->ref_dtbclk_khz && req_dtbclk_khz) { + uint32_t modulo, phase; + + dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, params->otg_inst); + + // phase / modulo = dtbclk / dtbclk ref + modulo = params->ref_dtbclk_khz * 1000; + phase = req_dtbclk_khz * 1000; + + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); + + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 1); + + REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, + 1, 100); + + /* program OTG_PIXEL_RATE_DIV for DIVK1 and DIVK2 fields */ + dccg35_set_pixel_rate_div(dccg, params->otg_inst, PIXEL_RATE_DIV_BY_1, PIXEL_RATE_DIV_BY_1); + + /* The recommended programming sequence to enable DTBCLK DTO to generate + * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should + * be set only after DTO is enabled + */ + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + PIPE_DTO_SRC_SEL[params->otg_inst], 2); + } else { + dccg35_disable_dtbclk_p_new(dccg, params->otg_inst); + + REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0, + PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); + + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); + } +} + +static void dccg35_disable_dscclk_cb(struct dccg *dccg, + int inst) +{ + dccg35_disable_dscclk_new(dccg, inst); +} + +static void dccg35_enable_dscclk_cb(struct dccg *dccg, int inst) +{ + dccg35_enable_dscclk_new(dccg, inst, DSC_DTO_TUNED_CK_GPU_DISCLK_3); +} + +static void dccg35_enable_symclk_se_cb(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) +{ + /* Switch to functional clock if already not selected */ + dccg35_enable_symclk_be_new(dccg, SYMCLK_BE_PHYCLK, link_enc_inst); + + dccg35_enable_symclk_fe_new(dccg, stream_enc_inst, (enum symclk_fe_source) link_enc_inst); + +} + +static void dccg35_disable_symclk_se_cb( + struct dccg *dccg, + uint32_t stream_enc_inst, + uint32_t link_enc_inst) +{ + dccg35_disable_symclk_fe_new(dccg, stream_enc_inst); + + /* DMU PHY sequence switches SYMCLK_BE (link_enc_inst) to ref clock once PHY is turned off */ +} + +void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating) +{ + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) { + dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating); + } +} + +static const struct dccg_funcs dccg35_funcs_new = { + .update_dpp_dto = dccg35_update_dpp_dto_cb, + .dpp_root_clock_control = dccg35_dpp_root_clock_control_cb, + .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, + .dccg_init = dccg35_init_cb, + .set_dpstreamclk = dccg35_set_dpstreamclk_cb, + .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating_cb, + .enable_symclk32_se = dccg35_enable_symclk32_se_cb, + .disable_symclk32_se = dccg35_disable_symclk32_se_cb, + .enable_symclk32_le = dccg35_enable_symclk32_le_cb, + .disable_symclk32_le = dccg35_disable_symclk32_le_cb, + .set_symclk32_le_root_clock_gating = dccg35_set_symclk32_le_root_clock_gating_cb, + .set_physymclk = dccg35_set_physymclk_cb, + .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating_cb, + .set_dtbclk_dto = dccg35_set_dtbclk_dto_cb, + .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, + .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, + .otg_add_pixel = dccg31_otg_add_pixel, + .otg_drop_pixel = dccg31_otg_drop_pixel, + .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, + .disable_dsc = dccg35_disable_dscclk_cb, + .enable_dsc = dccg35_enable_dscclk_cb, + .set_pixel_rate_div = dccg35_set_pixel_rate_div, + .get_pixel_rate_div = dccg35_get_pixel_rate_div, + .trigger_dio_fifo_resync = dccg35_trigger_dio_fifo_resync, + .set_valid_pixel_rate = dccg35_set_valid_pixel_rate, + .enable_symclk_se = dccg35_enable_symclk_se_cb, + .disable_symclk_se = dccg35_disable_symclk_se_cb, + .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb, +}; + static const struct dccg_funcs dccg35_funcs = { .update_dpp_dto = dccg35_update_dpp_dto, .dpp_root_clock_control = dccg35_dpp_root_clock_control, @@ -1026,6 +2400,7 @@ static const struct dccg_funcs dccg35_funcs = { .enable_symclk_se = dccg35_enable_symclk_se, .disable_symclk_se = dccg35_disable_symclk_se, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, + .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, }; struct dccg *dccg35_create( @@ -1041,6 +2416,10 @@ struct dccg *dccg35_create( BREAK_TO_DEBUGGER(); return NULL; } + (void)&dccg35_disable_symclk_be_new; + (void)&dccg35_set_symclk32_le_root_clock_gating; + (void)&dccg35_set_smclk32_se_rcg; + (void)&dccg35_funcs_new; base = &dccg_dcn->base; base->ctx = ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h index 1586a45ca3bd40..51f98c5c51c418 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h @@ -241,6 +241,7 @@ struct dccg *dccg35_create( void dccg35_init(struct dccg *dccg); void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value); +void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); #endif //__DCN35_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 07f1f396ba52aa..0b889004509ad0 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -730,35 +730,35 @@ void dccg401_init(struct dccg *dccg) } } -static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, bool enable) +static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - uint32_t phase = enable ? 1 : 0; switch (inst) { case 0: - REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1, DSCCLK0_DTO_DB_EN, 1); REG_UPDATE_2(DSCCLK0_DTO_PARAM, - DSCCLK0_DTO_PHASE, phase, + DSCCLK0_DTO_PHASE, 1, DSCCLK0_DTO_MODULO, 1); + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); + break; case 1: - REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1, DSCCLK1_DTO_DB_EN, 1); REG_UPDATE_2(DSCCLK1_DTO_PARAM, - DSCCLK1_DTO_PHASE, phase, + DSCCLK1_DTO_PHASE, 1, DSCCLK1_DTO_MODULO, 1); + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); break; case 2: - REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1, DSCCLK2_DTO_DB_EN, 1); REG_UPDATE_2(DSCCLK2_DTO_PARAM, - DSCCLK2_DTO_PHASE, phase, + DSCCLK2_DTO_PHASE, 1, DSCCLK2_DTO_MODULO, 1); + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); break; case 3: - REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1, DSCCLK3_DTO_DB_EN, 1); REG_UPDATE_2(DSCCLK3_DTO_PARAM, - DSCCLK3_DTO_PHASE, phase, + DSCCLK3_DTO_PHASE, 1, DSCCLK3_DTO_MODULO, 1); + REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1); break; default: BREAK_TO_DEBUGGER(); @@ -774,15 +774,27 @@ static void dccg401_set_ref_dscclk(struct dccg *dccg, switch (dsc_inst) { case 0: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0); + REG_UPDATE_2(DSCCLK0_DTO_PARAM, + DSCCLK0_DTO_PHASE, 0, + DSCCLK0_DTO_MODULO, 0); break; case 1: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0); + REG_UPDATE_2(DSCCLK1_DTO_PARAM, + DSCCLK1_DTO_PHASE, 0, + DSCCLK1_DTO_MODULO, 0); break; case 2: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0); + REG_UPDATE_2(DSCCLK2_DTO_PARAM, + DSCCLK2_DTO_PHASE, 0, + DSCCLK2_DTO_MODULO, 0); break; case 3: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0); + REG_UPDATE_2(DSCCLK3_DTO_PARAM, + DSCCLK3_DTO_PHASE, 0, + DSCCLK3_DTO_MODULO, 0); break; default: return; diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h index 8bcddc8363472b..a196ce9e81279a 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h @@ -117,10 +117,6 @@ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_EN, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_EN, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_EN, mask_sh),\ - DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_DB_EN, mask_sh),\ - DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_DB_EN, mask_sh),\ - DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_DB_EN, mask_sh),\ - DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_DTO_DB_EN, mask_sh),\ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index cf5f84fb9c69a1..eeed840073fe45 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -630,6 +630,11 @@ void dce_aud_az_enable(struct audio *audio) audio->inst, value); } +void dce_aud_az_disable_hbr_audio(struct audio *audio) +{ + set_high_bit_rate_capable(audio, false); +} + void dce_aud_az_disable(struct audio *audio) { uint32_t value; @@ -1293,6 +1298,7 @@ static const struct audio_funcs funcs = { .az_enable = dce_aud_az_enable, .az_disable = dce_aud_az_disable, .az_configure = dce_aud_az_configure, + .az_disable_hbr_audio = dce_aud_az_disable_hbr_audio, .destroy = dce_aud_destroy, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 539f881928d101..1b7b8b079af44c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -166,6 +166,7 @@ void dce_aud_hw_init(struct audio *audio); void dce_aud_az_enable(struct audio *audio); void dce_aud_az_disable(struct audio *audio); +void dce_aud_az_disable_hbr_audio(struct audio *audio); void dce_aud_az_configure(struct audio *audio, enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index b8996d285f003a..bb4ac5042c803a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -735,7 +735,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, (unsigned int) payload->mot); if (payload->write) dce_aux_log_payload(" write", payload->data, payload->length, 16); - ret = dce_aux_transfer_raw(ddc, payload, &operation_result); + + /* Check whether aux to be processed via dmub or dcn directly */ + if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc + || ddc->ddc_pin == NULL) { + ret = dce_aux_transfer_dmub_raw(ddc, payload, &operation_result); + } else { + ret = dce_aux_transfer_raw(ddc, payload, &operation_result); + } + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, LOG_FLAG_I2cAux_DceAux, "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u", diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index ccf153b7a4673c..cae18f8c1c9a08 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -94,6 +94,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state) state = PSR_STATE_HWLOCK_MGR; else if (raw_state == 0x61) state = PSR_STATE_POLLVUPDATE; + else if (raw_state == 0x62) + state = PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME; else state = PSR_STATE_INVALID; @@ -363,6 +365,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR; copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; copy_settings_data->debug.bitfields.force_full_frame_update = 0; + copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm; if (psr_context->su_granularity_required == 0) copy_settings_data->su_y_granularity = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 4d960dc5ce8935..c31e4f26a305b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -12,6 +12,8 @@ #define MAX_PIPES 6 +#define GPINT_RETRY_NUM 20 + static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5}; @@ -167,6 +169,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable; copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported; + copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm; + copy_settings_data->flags.u32All = 0; copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled); copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); @@ -220,6 +224,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, enum pr_residency_mode mode) { uint16_t param = (uint16_t)(panel_inst << 8); + uint32_t i = 0; switch (mode) { case PR_RESIDENCY_MODE_PHY: @@ -247,10 +252,17 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, if (is_start) param |= REPLAY_RESIDENCY_ENABLE; - // Send gpint command and wait for ack - if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param, - residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - *residency = 0; + for (i = 0; i < GPINT_RETRY_NUM; i++) { + // Send gpint command and wait for ack + if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param, + residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return; + + udelay(100); + } + + // it means gpint retry many times + *residency = 0; } /* diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 49bcfe6ec999a5..fa422a8cbced5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -1955,6 +1955,7 @@ void dce110_tg_program_timing(struct timing_generator *tg, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h index 28c58f1dff2d5a..ee4de740aceb3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h @@ -261,6 +261,7 @@ void dce110_tg_program_timing(struct timing_generator *tg, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c index bf35dc65ca29ff..9837dec837ff27 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c @@ -438,6 +438,7 @@ static void dce110_timing_generator_v_program_timing(struct timing_generator *tg int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c index eb3557965781eb..fcf59348eb6249 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c @@ -697,6 +697,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c index c1a85ee374d9da..e5fb0e8333e43f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c @@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { if (!use_vbios) program_pix_dur(tg, timing->pix_clk_100hz); - dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios); + dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios); } static void dce60_timing_generator_enable_advanced_request( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c index 2df4654858bed2..003a9330c28691 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c @@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { if (!use_vbios) program_pix_dur(tg, timing->pix_clk_100hz); - dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios); + dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios); } static void dce80_timing_generator_enable_advanced_request( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 9923d0d620d48c..e1f6623d493631 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -24,8 +24,6 @@ DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_opp.o \ - dcn10_mpc.o \ dcn10_cm_common.o \ AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 0b49362f71b06c..eaed5d1c398aa0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -591,6 +591,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format( i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) + return false; rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c deleted file mode 100644 index f2f55565e98a4c..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn10_mpc.h" - -#define REG(reg)\ - mpc10->mpc_regs->reg - -#define CTX \ - mpc10->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name - - -void mpc1_set_bg_color(struct mpc *mpc, - struct tg_color *bg_color, - int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); - uint32_t bg_r_cr, bg_g_y, bg_b_cb; - - bottommost_mpcc->blnd_cfg.black_color = *bg_color; - - /* find bottommost mpcc. */ - while (bottommost_mpcc->mpcc_bot) { - /* avoid circular linked link */ - ASSERT(bottommost_mpcc != bottommost_mpcc->mpcc_bot); - if (bottommost_mpcc == bottommost_mpcc->mpcc_bot) - break; - - bottommost_mpcc = bottommost_mpcc->mpcc_bot; - } - - /* mpc color is 12 bit. tg_color is 10 bit */ - /* todo: might want to use 16 bit to represent color and have each - * hw block translate to correct color depth. - */ - bg_r_cr = bg_color->color_r_cr << 2; - bg_g_y = bg_color->color_g_y << 2; - bg_b_cb = bg_color->color_b_cb << 2; - - REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0, - MPCC_BG_R_CR, bg_r_cr); - REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0, - MPCC_BG_G_Y, bg_g_y); - REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, - MPCC_BG_B_CB, bg_b_cb); -} - -static void mpc1_update_blending( - struct mpc *mpc, - struct mpcc_blnd_cfg *blnd_cfg, - int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id); - - REG_UPDATE_5(MPCC_CONTROL[mpcc_id], - MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, - MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only, - MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha, - MPCC_GLOBAL_GAIN, blnd_cfg->global_gain); - - mpcc->blnd_cfg = *blnd_cfg; -} - -void mpc1_update_stereo_mix( - struct mpc *mpc, - struct mpcc_sm_cfg *sm_cfg, - int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - - REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id], - MPCC_SM_EN, sm_cfg->enable, - MPCC_SM_MODE, sm_cfg->sm_mode, - MPCC_SM_FRAME_ALT, sm_cfg->frame_alt, - MPCC_SM_FIELD_ALT, sm_cfg->field_alt, - MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity, - MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity); -} -void mpc1_assert_idle_mpcc(struct mpc *mpc, int id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - - ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id)); - REG_WAIT(MPCC_STATUS[id], - MPCC_IDLE, 1, - 1, 100000); -} - -struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - - ASSERT(mpcc_id < mpc10->num_mpcc); - return &(mpc->mpcc_array[mpcc_id]); -} - -struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) -{ - struct mpcc *tmp_mpcc = tree->opp_list; - - while (tmp_mpcc != NULL) { - if (tmp_mpcc->dpp_id == dpp_id) - return tmp_mpcc; - - /* avoid circular linked list */ - ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); - if (tmp_mpcc == tmp_mpcc->mpcc_bot) - break; - - tmp_mpcc = tmp_mpcc->mpcc_bot; - } - return NULL; -} - -bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - unsigned int top_sel; - unsigned int opp_id; - unsigned int idle; - - REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel); - REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); - REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle); - if (top_sel == 0xf && opp_id == 0xf && idle) - return true; - else - return false; -} - -void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - unsigned int top_sel, mpc_busy, mpc_idle; - - REG_GET(MPCC_TOP_SEL[mpcc_id], - MPCC_TOP_SEL, &top_sel); - - if (top_sel == 0xf) { - REG_GET_2(MPCC_STATUS[mpcc_id], - MPCC_BUSY, &mpc_busy, - MPCC_IDLE, &mpc_idle); - - ASSERT(mpc_busy == 0); - ASSERT(mpc_idle == 1); - } -} - -/* - * Insert DPP into MPC tree based on specified blending position. - * Only used for planes that are part of blending chain for OPP output - * - * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be added to. - * [in] blnd_cfg - MPCC blending configuration for the new blending layer. - * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. - * stereo mix must disable for the very bottom layer of the tree config. - * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. - * [in] dpp_id - DPP instance for the plane to be added. - * [in] mpcc_id - The MPCC physical instance to use for blending. - * - * Return: struct mpcc* - MPCC that was added. - */ -struct mpcc *mpc1_insert_plane( - struct mpc *mpc, - struct mpc_tree *tree, - struct mpcc_blnd_cfg *blnd_cfg, - struct mpcc_sm_cfg *sm_cfg, - struct mpcc *insert_above_mpcc, - int dpp_id, - int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - struct mpcc *new_mpcc = NULL; - - /* sanity check parameters */ - ASSERT(mpcc_id < mpc10->num_mpcc); - ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id)); - - if (insert_above_mpcc) { - /* check insert_above_mpcc exist in tree->opp_list */ - struct mpcc *temp_mpcc = tree->opp_list; - - if (temp_mpcc != insert_above_mpcc) - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; - if (temp_mpcc == NULL) - return NULL; - } - - /* Get and update MPCC struct parameters */ - new_mpcc = mpc1_get_mpcc(mpc, mpcc_id); - new_mpcc->dpp_id = dpp_id; - - /* program mux and MPCC_MODE */ - if (insert_above_mpcc) { - new_mpcc->mpcc_bot = insert_above_mpcc; - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id); - REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING); - } else { - new_mpcc->mpcc_bot = NULL; - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_ONLY); - } - REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); - REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); - - /* Configure VUPDATE lock set for this MPCC to map to the OPP */ - REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id); - - /* update mpc tree mux setting */ - if (tree->opp_list == insert_above_mpcc) { - /* insert the toppest mpcc */ - tree->opp_list = new_mpcc; - REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id); - } else { - /* find insert position */ - struct mpcc *temp_mpcc = tree->opp_list; - - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; - if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) { - REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id); - temp_mpcc->mpcc_bot = new_mpcc; - if (!insert_above_mpcc) - REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id], - MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING); - } - } - - /* update the blending configuration */ - mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); - - /* update the stereo mix settings, if provided */ - if (sm_cfg != NULL) { - new_mpcc->sm_cfg = *sm_cfg; - mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id); - } - - /* mark this mpcc as in use */ - mpc10->mpcc_in_use_mask |= 1 << mpcc_id; - - return new_mpcc; -} - -/* - * Remove a specified MPCC from the MPC tree. - * - * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be removed from. - * [in/out] mpcc - MPCC to be removed from tree. - * - * Return: void - */ -void mpc1_remove_mpcc( - struct mpc *mpc, - struct mpc_tree *tree, - struct mpcc *mpcc_to_remove) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - bool found = false; - int mpcc_id = mpcc_to_remove->mpcc_id; - - if (tree->opp_list == mpcc_to_remove) { - found = true; - /* remove MPCC from top of tree */ - if (mpcc_to_remove->mpcc_bot) { - /* set the next MPCC in list to be the top MPCC */ - tree->opp_list = mpcc_to_remove->mpcc_bot; - REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id); - } else { - /* there are no other MPCC is list */ - tree->opp_list = NULL; - REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf); - } - } else { - /* find mpcc to remove MPCC list */ - struct mpcc *temp_mpcc = tree->opp_list; - - while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove) - temp_mpcc = temp_mpcc->mpcc_bot; - - if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) { - found = true; - temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot; - if (mpcc_to_remove->mpcc_bot) { - /* remove MPCC in middle of list */ - REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, - MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id); - } else { - /* remove MPCC from bottom of list */ - REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, - MPCC_BOT_SEL, 0xf); - REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id], - MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH); - } - } - } - - if (found) { - /* turn off MPCC mux registers */ - REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); - REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); - - /* mark this mpcc as not in use */ - mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); - mpcc_to_remove->dpp_id = 0xf; - mpcc_to_remove->mpcc_bot = NULL; - } else { - /* In case of resume from S3/S4, remove mpcc from bios left over */ - REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); - REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); - } -} - -static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst) -{ - mpcc->mpcc_id = mpcc_inst; - mpcc->dpp_id = 0xf; - mpcc->mpcc_bot = NULL; - mpcc->blnd_cfg.overlap_only = false; - mpcc->blnd_cfg.global_alpha = 0xff; - mpcc->blnd_cfg.global_gain = 0xff; - mpcc->sm_cfg.enable = false; -} - -/* - * Reset the MPCC HW status by disconnecting all muxes. - * - * Parameters: - * [in/out] mpc - MPC context. - * - * Return: void - */ -void mpc1_mpc_init(struct mpc *mpc) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - int mpcc_id; - int opp_id; - - mpc10->mpcc_in_use_mask = 0; - for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) { - REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); - REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); - - mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); - } - - for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { - if (REG(MUX[opp_id])) - REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf); - } -} - -void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - int opp_id; - - REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); - - REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); - REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); - REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); - - mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); - - if (opp_id < MAX_OPP && REG(MUX[opp_id])) - REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf); -} - - -void mpc1_init_mpcc_list_from_hw( - struct mpc *mpc, - struct mpc_tree *tree) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - unsigned int opp_id; - unsigned int top_sel; - unsigned int bot_sel; - unsigned int out_mux; - struct mpcc *mpcc; - int mpcc_id; - int bot_mpcc_id; - - REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux); - - if (out_mux != 0xf) { - for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) { - REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); - REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel); - REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel); - - if (bot_sel == mpcc_id) - bot_sel = 0xf; - - if ((opp_id == tree->opp_id) && (top_sel != 0xf)) { - mpcc = mpc1_get_mpcc(mpc, mpcc_id); - mpcc->dpp_id = top_sel; - mpc10->mpcc_in_use_mask |= 1 << mpcc_id; - - if (out_mux == mpcc_id) - tree->opp_list = mpcc; - if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) { - bot_mpcc_id = bot_sel; - REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id); - REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel); - if ((opp_id == tree->opp_id) && (top_sel != 0xf)) { - struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id); - - mpcc->mpcc_bot = mpcc_bottom; - } - } - } - } - } -} - -void mpc1_read_mpcc_state( - struct mpc *mpc, - int mpcc_inst, - struct mpcc_state *s) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - - REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); - REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); - REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); - REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, - MPCC_ALPHA_BLND_MODE, &s->alpha_mode, - MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); - REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, - MPCC_BUSY, &s->busy); -} - -void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - - REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); -} - -unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id) -{ - struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); - uint32_t val = 0xf; - - if (opp_id < MAX_OPP && REG(MUX[opp_id])) - REG_GET(MUX[opp_id], MPC_OUT_MUX, &val); - - return val; -} - -static const struct mpc_funcs dcn10_mpc_funcs = { - .read_mpcc_state = mpc1_read_mpcc_state, - .insert_plane = mpc1_insert_plane, - .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc1_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, - .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, - .wait_for_idle = mpc1_assert_idle_mpcc, - .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, - .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, - .update_blending = mpc1_update_blending, - .cursor_lock = mpc1_cursor_lock, - .set_denorm = NULL, - .set_denorm_clamp = NULL, - .set_output_csc = NULL, - .set_output_gamma = NULL, - .get_mpc_out_mux = mpc1_get_mpc_out_mux, - .set_bg_color = mpc1_set_bg_color, -}; - -void dcn10_mpc_construct(struct dcn10_mpc *mpc10, - struct dc_context *ctx, - const struct dcn_mpc_registers *mpc_regs, - const struct dcn_mpc_shift *mpc_shift, - const struct dcn_mpc_mask *mpc_mask, - int num_mpcc) -{ - int i; - - mpc10->base.ctx = ctx; - - mpc10->base.funcs = &dcn10_mpc_funcs; - - mpc10->mpc_regs = mpc_regs; - mpc10->mpc_shift = mpc_shift; - mpc10->mpc_mask = mpc_mask; - - mpc10->mpcc_in_use_mask = 0; - mpc10->num_mpcc = num_mpcc; - - for (i = 0; i < MAX_MPCC; i++) - mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i); -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h deleted file mode 100644 index dbfffc6383dcbc..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h +++ /dev/null @@ -1,204 +0,0 @@ -/* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_MPCC_DCN10_H__ -#define __DC_MPCC_DCN10_H__ - -#include "mpc.h" - -#define TO_DCN10_MPC(mpc_base) \ - container_of(mpc_base, struct dcn10_mpc, base) - -#define MPC_COMMON_REG_LIST_DCN1_0(inst) \ - SRII(MPCC_TOP_SEL, MPCC, inst),\ - SRII(MPCC_BOT_SEL, MPCC, inst),\ - SRII(MPCC_CONTROL, MPCC, inst),\ - SRII(MPCC_STATUS, MPCC, inst),\ - SRII(MPCC_OPP_ID, MPCC, inst),\ - SRII(MPCC_BG_G_Y, MPCC, inst),\ - SRII(MPCC_BG_R_CR, MPCC, inst),\ - SRII(MPCC_BG_B_CB, MPCC, inst),\ - SRII(MPCC_SM_CONTROL, MPCC, inst),\ - SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) - -#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ - SRII(MUX, MPC_OUT, inst),\ - VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) - -#define MPC_COMMON_REG_VARIABLE_LIST \ - uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ - uint32_t MPCC_BOT_SEL[MAX_MPCC]; \ - uint32_t MPCC_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_STATUS[MAX_MPCC]; \ - uint32_t MPCC_OPP_ID[MAX_MPCC]; \ - uint32_t MPCC_BG_G_Y[MAX_MPCC]; \ - uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ - uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ - uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ - uint32_t MUX[MAX_OPP]; \ - uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \ - uint32_t CUR[MAX_OPP]; - -#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ - SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ - SF(MPCC0_MPCC_BOT_SEL, MPCC_BOT_SEL, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_MODE, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_ALPHA, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_GAIN, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\ - SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\ - SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\ - SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\ - SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_EN, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_MODE, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FRAME_ALT, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ - SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\ - SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh) - -#define MPC_REG_FIELD_LIST(type) \ - type MPCC_TOP_SEL;\ - type MPCC_BOT_SEL;\ - type MPCC_MODE;\ - type MPCC_ALPHA_BLND_MODE;\ - type MPCC_ALPHA_MULTIPLIED_MODE;\ - type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\ - type MPCC_GLOBAL_ALPHA;\ - type MPCC_GLOBAL_GAIN;\ - type MPCC_IDLE;\ - type MPCC_BUSY;\ - type MPCC_OPP_ID;\ - type MPCC_BG_G_Y;\ - type MPCC_BG_R_CR;\ - type MPCC_BG_B_CB;\ - type MPCC_SM_EN;\ - type MPCC_SM_MODE;\ - type MPCC_SM_FRAME_ALT;\ - type MPCC_SM_FIELD_ALT;\ - type MPCC_SM_FORCE_NEXT_FRAME_POL;\ - type MPCC_SM_FORCE_NEXT_TOP_POL;\ - type MPC_OUT_MUX;\ - type MPCC_UPDATE_LOCK_SEL;\ - type CUR_VUPDATE_LOCK_SET; - -struct dcn_mpc_registers { - MPC_COMMON_REG_VARIABLE_LIST -}; - -struct dcn_mpc_shift { - MPC_REG_FIELD_LIST(uint8_t) -}; - -struct dcn_mpc_mask { - MPC_REG_FIELD_LIST(uint32_t) -}; - -struct dcn10_mpc { - struct mpc base; - - int mpcc_in_use_mask; - int num_mpcc; - const struct dcn_mpc_registers *mpc_regs; - const struct dcn_mpc_shift *mpc_shift; - const struct dcn_mpc_mask *mpc_mask; -}; - -void dcn10_mpc_construct(struct dcn10_mpc *mpcc10, - struct dc_context *ctx, - const struct dcn_mpc_registers *mpc_regs, - const struct dcn_mpc_shift *mpc_shift, - const struct dcn_mpc_mask *mpc_mask, - int num_mpcc); - -struct mpcc *mpc1_insert_plane( - struct mpc *mpc, - struct mpc_tree *tree, - struct mpcc_blnd_cfg *blnd_cfg, - struct mpcc_sm_cfg *sm_cfg, - struct mpcc *insert_above_mpcc, - int dpp_id, - int mpcc_id); - -void mpc1_remove_mpcc( - struct mpc *mpc, - struct mpc_tree *tree, - struct mpcc *mpcc); - -void mpc1_mpc_init( - struct mpc *mpc); - -void mpc1_mpc_init_single_inst( - struct mpc *mpc, - unsigned int mpcc_id); - -void mpc1_assert_idle_mpcc( - struct mpc *mpc, - int id); - -void mpc1_set_bg_color( - struct mpc *mpc, - struct tg_color *bg_color, - int id); - -void mpc1_update_stereo_mix( - struct mpc *mpc, - struct mpcc_sm_cfg *sm_cfg, - int mpcc_id); - -bool mpc1_is_mpcc_idle( - struct mpc *mpc, - int mpcc_id); - -void mpc1_assert_mpcc_idle_before_connect( - struct mpc *mpc, - int mpcc_id); - -void mpc1_init_mpcc_list_from_hw( - struct mpc *mpc, - struct mpc_tree *tree); - -struct mpcc *mpc1_get_mpcc( - struct mpc *mpc, - int mpcc_id); - -struct mpcc *mpc1_get_mpcc_for_dpp( - struct mpc_tree *tree, - int dpp_id); - -void mpc1_read_mpcc_state( - struct mpc *mpc, - int mpcc_inst, - struct mpcc_state *s); - -void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); - -unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c deleted file mode 100644 index 71e9288d60ed7c..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "core_types.h" -#include "dm_services.h" -#include "dcn10_opp.h" -#include "reg_helper.h" - -#define REG(reg) \ - (oppn10->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - oppn10->opp_shift->field_name, oppn10->opp_mask->field_name - -#define CTX \ - oppn10->base.ctx - -/** - * opp1_set_truncation(): - * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp - * 2) enable truncation - * 3) HW remove 12bit FMT support for DCE11 power saving reason. - * - * @oppn10: output_pixel_processor struct instance for dcn10. - * @params: pointer to bit_depth_reduction_params. - */ -static void opp1_set_truncation( - struct dcn10_opp *oppn10, - const struct bit_depth_reduction_params *params) -{ - REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, - FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED, - FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH, - FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE); -} - -static void opp1_set_spatial_dither( - struct dcn10_opp *oppn10, - const struct bit_depth_reduction_params *params) -{ - /*Disable spatial (random) dithering*/ - REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL, - FMT_SPATIAL_DITHER_EN, 0, - FMT_SPATIAL_DITHER_MODE, 0, - FMT_SPATIAL_DITHER_DEPTH, 0, - FMT_TEMPORAL_DITHER_EN, 0, - FMT_HIGHPASS_RANDOM_ENABLE, 0, - FMT_FRAME_RANDOM_ENABLE, 0, - FMT_RGB_RANDOM_ENABLE, 0); - - - /* only use FRAME_COUNTER_MAX if frameRandom == 1*/ - if (params->flags.FRAME_RANDOM == 1) { - if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) { - REG_UPDATE_2(FMT_CONTROL, - FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15, - FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2); - } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) { - REG_UPDATE_2(FMT_CONTROL, - FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3, - FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1); - } else { - return; - } - } else { - REG_UPDATE_2(FMT_CONTROL, - FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0, - FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0); - } - - /*Set seed for random values for - * spatial dithering for R,G,B channels*/ - - REG_SET(FMT_DITHER_RAND_R_SEED, 0, - FMT_RAND_R_SEED, params->r_seed_value); - - REG_SET(FMT_DITHER_RAND_G_SEED, 0, - FMT_RAND_G_SEED, params->g_seed_value); - - REG_SET(FMT_DITHER_RAND_B_SEED, 0, - FMT_RAND_B_SEED, params->b_seed_value); - - /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero - * offset for the R/Cr channel, lower 4LSB - * is forced to zeros. Typically set to 0 - * RGB and 0x80000 YCbCr. - */ - /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero - * offset for the G/Y channel, lower 4LSB is - * forced to zeros. Typically set to 0 RGB - * and 0x80000 YCbCr. - */ - /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero - * offset for the B/Cb channel, lower 4LSB is - * forced to zeros. Typically set to 0 RGB and - * 0x80000 YCbCr. - */ - - REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL, - /*Enable spatial dithering*/ - FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED, - /* Set spatial dithering mode - * (default is Seed patterrn AAAA...) - */ - FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE, - /*Set spatial dithering bit depth*/ - FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH, - /*Disable High pass filter*/ - FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM, - /*Reset only at startup*/ - FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM, - /*Set RGB data dithered with x^28+x^3+1*/ - FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM); -} - -void opp1_program_bit_depth_reduction( - struct output_pixel_processor *opp, - const struct bit_depth_reduction_params *params) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - opp1_set_truncation(oppn10, params); - opp1_set_spatial_dither(oppn10, params); - /* TODO - * set_temporal_dither(oppn10, params); - */ -} - -/** - * opp1_set_pixel_encoding(): - * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly - * 1: YCbCr 4:2:2 - * - * @oppn10: output_pixel_processor struct instance for dcn10. - * @params: pointer to clamping_and_pixel_encoding_params. - */ -static void opp1_set_pixel_encoding( - struct dcn10_opp *oppn10, - const struct clamping_and_pixel_encoding_params *params) -{ - bool force_chroma_subsampling_1tap = - oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap; - - switch (params->pixel_encoding) { - - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - REG_UPDATE_3(FMT_CONTROL, - FMT_PIXEL_ENCODING, 0, - FMT_SUBSAMPLING_MODE, 0, - FMT_CBCR_BIT_REDUCTION_BYPASS, 0); - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); - break; - case PIXEL_ENCODING_YCBCR422: - REG_UPDATE_3(FMT_CONTROL, - FMT_PIXEL_ENCODING, 1, - FMT_SUBSAMPLING_MODE, 2, - FMT_CBCR_BIT_REDUCTION_BYPASS, 0); - break; - case PIXEL_ENCODING_YCBCR420: - REG_UPDATE_3(FMT_CONTROL, - FMT_PIXEL_ENCODING, 2, - FMT_SUBSAMPLING_MODE, 2, - FMT_CBCR_BIT_REDUCTION_BYPASS, 1); - break; - default: - break; - } - - if (force_chroma_subsampling_1tap) - REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0); -} - -/** - * opp1_set_clamping(): - * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) - * 1 for 8 bpc - * 2 for 10 bpc - * 3 for 12 bpc - * 7 for programable - * 2) Enable clamp if Limited range requested - * - * @oppn10: output_pixel_processor struct instance for dcn10. - * @params: pointer to clamping_and_pixel_encoding_params. - */ -static void opp1_set_clamping( - struct dcn10_opp *oppn10, - const struct clamping_and_pixel_encoding_params *params) -{ - REG_UPDATE_2(FMT_CLAMP_CNTL, - FMT_CLAMP_DATA_EN, 0, - FMT_CLAMP_COLOR_FORMAT, 0); - - switch (params->clamping_level) { - case CLAMPING_FULL_RANGE: - REG_UPDATE_2(FMT_CLAMP_CNTL, - FMT_CLAMP_DATA_EN, 1, - FMT_CLAMP_COLOR_FORMAT, 0); - break; - case CLAMPING_LIMITED_RANGE_8BPC: - REG_UPDATE_2(FMT_CLAMP_CNTL, - FMT_CLAMP_DATA_EN, 1, - FMT_CLAMP_COLOR_FORMAT, 1); - break; - case CLAMPING_LIMITED_RANGE_10BPC: - REG_UPDATE_2(FMT_CLAMP_CNTL, - FMT_CLAMP_DATA_EN, 1, - FMT_CLAMP_COLOR_FORMAT, 2); - - break; - case CLAMPING_LIMITED_RANGE_12BPC: - REG_UPDATE_2(FMT_CLAMP_CNTL, - FMT_CLAMP_DATA_EN, 1, - FMT_CLAMP_COLOR_FORMAT, 3); - break; - case CLAMPING_LIMITED_RANGE_PROGRAMMABLE: - /* TODO */ - default: - break; - } - -} - -void opp1_set_dyn_expansion( - struct output_pixel_processor *opp, - enum dc_color_space color_sp, - enum dc_color_depth color_dpth, - enum signal_type signal) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, - FMT_DYNAMIC_EXP_EN, 0, - FMT_DYNAMIC_EXP_MODE, 0); - - if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) - return; - - /*00 - 10-bit -> 12-bit dynamic expansion*/ - /*01 - 8-bit -> 12-bit dynamic expansion*/ - if (signal == SIGNAL_TYPE_HDMI_TYPE_A || - signal == SIGNAL_TYPE_DISPLAY_PORT || - signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - signal == SIGNAL_TYPE_VIRTUAL) { - switch (color_dpth) { - case COLOR_DEPTH_888: - REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, - FMT_DYNAMIC_EXP_EN, 1, - FMT_DYNAMIC_EXP_MODE, 1); - break; - case COLOR_DEPTH_101010: - REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, - FMT_DYNAMIC_EXP_EN, 1, - FMT_DYNAMIC_EXP_MODE, 0); - break; - case COLOR_DEPTH_121212: - REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, - FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/ - FMT_DYNAMIC_EXP_MODE, 0); - break; - default: - break; - } - } -} - -static void opp1_program_clamping_and_pixel_encoding( - struct output_pixel_processor *opp, - const struct clamping_and_pixel_encoding_params *params) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - opp1_set_clamping(oppn10, params); - opp1_set_pixel_encoding(oppn10, params); -} - -void opp1_program_fmt( - struct output_pixel_processor *opp, - struct bit_depth_reduction_params *fmt_bit_depth, - struct clamping_and_pixel_encoding_params *clamping) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) - REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0); - - /* dithering is affected by , hence should be - * programmed afterwards */ - opp1_program_bit_depth_reduction( - opp, - fmt_bit_depth); - - opp1_program_clamping_and_pixel_encoding( - opp, - clamping); - - return; -} - -void opp1_program_stereo( - struct output_pixel_processor *opp, - bool enable, - const struct dc_crtc_timing *timing) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right; - uint32_t space1_size = timing->v_total - timing->v_addressable; - /* TODO: confirm computation of space2_size */ - uint32_t space2_size = timing->v_total - timing->v_addressable; - - if (!enable) { - active_width = 0; - space1_size = 0; - space2_size = 0; - } - - /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */ - REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0); - - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width); - - /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers - * In 3D progressive frames, Vactive space happens only in between the 2 frames, - * so only need to program OPPBUF_3D_VACT_SPACE1_SIZE - * In 3D alternative frames, left and right frames, top and bottom field. - */ - if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE) - REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size); - else - REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size); - - /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */ - /* - REG_UPDATE(OPPBUF_3D_PARAMETERS_0, - OPPBUF_DUMMY_DATA_R, data_r); - REG_UPDATE(OPPBUF_3D_PARAMETERS_1, - OPPBUF_DUMMY_DATA_G, data_g); - REG_UPDATE(OPPBUF_3D_PARAMETERS_1, - OPPBUF_DUMMY_DATA_B, _data_b); - */ -} - -void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - uint32_t regval = enable ? 1 : 0; - - REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval); -} - -/*****************************************/ -/* Constructor, Destructor */ -/*****************************************/ - -void opp1_destroy(struct output_pixel_processor **opp) -{ - kfree(TO_DCN10_OPP(*opp)); - *opp = NULL; -} - -static const struct opp_funcs dcn10_opp_funcs = { - .opp_set_dyn_expansion = opp1_set_dyn_expansion, - .opp_program_fmt = opp1_program_fmt, - .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, - .opp_program_stereo = opp1_program_stereo, - .opp_pipe_clock_control = opp1_pipe_clock_control, - .opp_set_disp_pattern_generator = NULL, - .opp_program_dpg_dimensions = NULL, - .dpg_is_blanked = NULL, - .dpg_is_pending = NULL, - .opp_destroy = opp1_destroy -}; - -void dcn10_opp_construct(struct dcn10_opp *oppn10, - struct dc_context *ctx, - uint32_t inst, - const struct dcn10_opp_registers *regs, - const struct dcn10_opp_shift *opp_shift, - const struct dcn10_opp_mask *opp_mask) -{ - - oppn10->base.ctx = ctx; - oppn10->base.inst = inst; - oppn10->base.funcs = &dcn10_opp_funcs; - - oppn10->regs = regs; - oppn10->opp_shift = opp_shift; - oppn10->opp_mask = opp_mask; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h deleted file mode 100644 index c87de68a509e49..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ /dev/null @@ -1,191 +0,0 @@ -/* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPP_DCN10_H__ -#define __DC_OPP_DCN10_H__ - -#include "opp.h" - -#define TO_DCN10_OPP(opp)\ - container_of(opp, struct dcn10_opp, base) - -#define OPP_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define OPP_REG_LIST_DCN(id) \ - SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \ - SRI(FMT_CONTROL, FMT, id), \ - SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \ - SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \ - SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \ - SRI(FMT_CLAMP_CNTL, FMT, id), \ - SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \ - SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \ - SRI(OPPBUF_CONTROL, OPPBUF, id),\ - SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \ - SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \ - SRI(OPP_PIPE_CONTROL, OPP_PIPE, id) - -#define OPP_REG_LIST_DCN10(id) \ - OPP_REG_LIST_DCN(id) - -#define OPP_COMMON_REG_VARIABLE_LIST \ - uint32_t FMT_BIT_DEPTH_CONTROL; \ - uint32_t FMT_CONTROL; \ - uint32_t FMT_DITHER_RAND_R_SEED; \ - uint32_t FMT_DITHER_RAND_G_SEED; \ - uint32_t FMT_DITHER_RAND_B_SEED; \ - uint32_t FMT_CLAMP_CNTL; \ - uint32_t FMT_DYNAMIC_EXP_CNTL; \ - uint32_t FMT_MAP420_MEMORY_CONTROL; \ - uint32_t OPPBUF_CONTROL; \ - uint32_t OPPBUF_CONTROL1; \ - uint32_t OPPBUF_3D_PARAMETERS_0; \ - uint32_t OPPBUF_3D_PARAMETERS_1; \ - uint32_t OPP_PIPE_CONTROL - -#define OPP_MASK_SH_LIST_DCN(mask_sh) \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh), \ - OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ - OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ - OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ - OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ - OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh), \ - OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh), \ - OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \ - OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \ - OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \ - OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh), \ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\ - OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \ - OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh), \ - OPP_SF(OPP_PIPE0_OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh) - -#define OPP_MASK_SH_LIST_DCN10(mask_sh) \ - OPP_MASK_SH_LIST_DCN(mask_sh), \ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh) - -#define OPP_DCN10_REG_FIELD_LIST(type) \ - type FMT_TRUNCATE_EN; \ - type FMT_TRUNCATE_DEPTH; \ - type FMT_TRUNCATE_MODE; \ - type FMT_SPATIAL_DITHER_EN; \ - type FMT_SPATIAL_DITHER_MODE; \ - type FMT_SPATIAL_DITHER_DEPTH; \ - type FMT_TEMPORAL_DITHER_EN; \ - type FMT_HIGHPASS_RANDOM_ENABLE; \ - type FMT_FRAME_RANDOM_ENABLE; \ - type FMT_RGB_RANDOM_ENABLE; \ - type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \ - type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \ - type FMT_RAND_R_SEED; \ - type FMT_RAND_G_SEED; \ - type FMT_RAND_B_SEED; \ - type FMT_PIXEL_ENCODING; \ - type FMT_SUBSAMPLING_MODE; \ - type FMT_CBCR_BIT_REDUCTION_BYPASS; \ - type FMT_CLAMP_DATA_EN; \ - type FMT_CLAMP_COLOR_FORMAT; \ - type FMT_DYNAMIC_EXP_EN; \ - type FMT_DYNAMIC_EXP_MODE; \ - type FMT_MAP420MEM_PWR_FORCE; \ - type FMT_STEREOSYNC_OVERRIDE; \ - type OPPBUF_ACTIVE_WIDTH;\ - type OPPBUF_PIXEL_REPETITION;\ - type OPPBUF_DISPLAY_SEGMENTATION;\ - type OPPBUF_OVERLAP_PIXEL_NUM;\ - type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \ - type OPPBUF_3D_VACT_SPACE1_SIZE; \ - type OPPBUF_3D_VACT_SPACE2_SIZE; \ - type OPP_PIPE_CLOCK_EN - -struct dcn10_opp_registers { - OPP_COMMON_REG_VARIABLE_LIST; -}; - -struct dcn10_opp_shift { - OPP_DCN10_REG_FIELD_LIST(uint8_t); -}; - -struct dcn10_opp_mask { - OPP_DCN10_REG_FIELD_LIST(uint32_t); -}; - -struct dcn10_opp { - struct output_pixel_processor base; - - const struct dcn10_opp_registers *regs; - const struct dcn10_opp_shift *opp_shift; - const struct dcn10_opp_mask *opp_mask; - - bool is_write_to_ram_a_safe; -}; - -void dcn10_opp_construct(struct dcn10_opp *oppn10, - struct dc_context *ctx, - uint32_t inst, - const struct dcn10_opp_registers *regs, - const struct dcn10_opp_shift *opp_shift, - const struct dcn10_opp_mask *opp_mask); - -void opp1_set_dyn_expansion( - struct output_pixel_processor *opp, - enum dc_color_space color_sp, - enum dc_color_depth color_dpth, - enum signal_type signal); - -void opp1_program_fmt( - struct output_pixel_processor *opp, - struct bit_depth_reduction_params *fmt_bit_depth, - struct clamping_and_pixel_encoding_params *clamping); - -void opp1_program_bit_depth_reduction( - struct output_pixel_processor *opp, - const struct bit_depth_reduction_params *params); - -void opp1_program_stereo( - struct output_pixel_processor *opp, - bool enable, - const struct dc_crtc_timing *timing); - -void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable); - -void opp1_destroy(struct output_pixel_processor **opp); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index b3aeabc4d60518..25ba0d310d46da 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: MIT # Copyright © 2019-2024 Advanced Micro Devices, Inc. All rights reserved. -DCN20 = dcn20_mpc.o dcn20_opp.o dcn20_mmhubbub.o \ - dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o +DCN20 = dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c deleted file mode 100644 index 259a98e4ee2c28..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "reg_helper.h" -#include "resource.h" -#include "mcif_wb.h" -#include "dcn20_mmhubbub.h" - - -#define REG(reg)\ - mcif_wb20->mcif_wb_regs->reg - -#define CTX \ - mcif_wb20->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - mcif_wb20->mcif_wb_shift->field_name, mcif_wb20->mcif_wb_mask->field_name - -#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 -#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 - -/* wbif programming guide: - * 1. set up wbif parameter: - * unsigned long long luma_address[4]; //4 frame buffer - * unsigned long long chroma_address[4]; - * unsigned int luma_pitch; - * unsigned int chroma_pitch; - * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10 - * unsigned int slice_lines; //slice size - * unsigned int time_per_pixel; // time per pixel, in ns - * unsigned int arbitration_slice; // 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes - * unsigned int max_scaled_time; // used for QOS generation - * unsigned int swlock=0x0; - * unsigned int cli_watermark[4]; //4 group urgent watermark - * unsigned int pstate_watermark[4]; //4 group pstate watermark - * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow - * unsigned int sw_slice_int_en; // slice end interrupt enable - * unsigned int sw_overrun_int_en; // overrun error interrupt enable - * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow - * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow - * - * 2. configure wbif register - * a. call mmhubbub_config_wbif() - * - * 3. Enable wbif - * call set_wbif_bufmgr_enable(); - * - * 4. wbif_dump_status(), option, for debug purpose - * the bufmgr status can show the progress of write back, can be used for debug purpose - */ - -static void mmhubbub2_config_mcif_buf(struct mcif_wb *mcif_wb, - struct mcif_buf_params *params, - unsigned int dest_height) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - /* sw lock buffer0~buffer3, default is 0 */ - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, params->swlock); - - /* buffer address for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0])); - REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0])); - /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, 0); - - /* buffer address for Chroma in planar mode (unused in packing mode) */ - REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0])); - REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0])); - /* right eye offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, 0); - - /* buffer address for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1])); - REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1])); - /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, 0); - - /* buffer address for Chroma in planar mode (unused in packing mode) */ - REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1])); - REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1])); - /* right eye offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, 0); - - /* buffer address for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2])); - REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2])); - /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, 0); - - /* buffer address for Chroma in planar mode (unused in packing mode) */ - REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2])); - REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2])); - /* right eye offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, 0); - - /* buffer address for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3])); - REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3])); - /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, 0); - - /* buffer address for Chroma in planar mode (unused in packing mode) */ - REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3])); - REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3])); - /* right eye offset for packing mode or Luma in planar mode */ - REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, 0); - - /* setup luma & chroma size - * should be enough to contain a whole frame Luma data, - * the programmed value is frame buffer size [27:8], 256-byte aligned - */ - REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height); - REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height); - - /* enable address fence */ - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1); - - /* setup pitch, the programmed value is [15:8], 256B align */ - REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8, - MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8); - - /* Set pitch for MC cache warm up mode */ - /* Pitch is 256 bytes aligned. The default pitch is 4K */ - /* default is 0x10 */ - REG_UPDATE(MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, params->warmup_pitch); -} - -static void mmhubbub2_config_mcif_arb(struct mcif_wb *mcif_wb, - struct mcif_arb_params *params) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - /* Programmed by the video driver based on the CRTC timing (for DWB) */ - REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel); - - /* Programming dwb watermark */ - /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */ - /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */ - REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x0); - /* urgent_watermarkA */ - REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]); - REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x1); - /* urgent_watermarkB */ - REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]); - REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x2); - /* urgent_watermarkC */ - REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]); - REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x3); - /* urgent_watermarkD */ - REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]); - - /* Programming nb pstate watermark */ - /* nbp_state_change_watermarkA */ - REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0); - REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, - NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]); - /* nbp_state_change_watermarkB */ - REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1); - REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, - NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]); - /* nbp_state_change_watermarkC */ - REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2); - REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, - NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]); - /* nbp_state_change_watermarkD */ - REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3); - REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, - NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]); - - /* max_scaled_time */ - REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time); - - /* slice_lines */ - REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1); - - /* Set arbitration unit for Luma/Chroma */ - /* arb_unit=2 should be chosen for more efficiency */ - /* Arbitration size, 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes */ - REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice); -} - -void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, - struct mcif_irq_params *params) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - /* Set interrupt mask */ - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, params->sw_int_en); - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, params->sw_slice_int_en); - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en); - - REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en); - if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN) - REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en); -} - -void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - /* Enable Mcifwb */ - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 1); -} - -void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - /* disable buffer manager */ - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 0); -} - -/* set which group of pstate watermark to use and set wbif watermark change request */ -/* -static void mmhubbub2_wbif_watermark_change_req(struct mcif_wb *mcif_wb, unsigned int wm_set) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - uint32_t change_req; - - REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, &change_req); - change_req = (change_req == 0) ? 1 : 0; - REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, wm_set); - REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, change_req); -} -*/ -/* Set watermark change interrupt disable bit */ -/* -static void mmhubbub2_set_wbif_watermark_change_int_disable(struct mcif_wb *mcif_wb, unsigned int ack_int_dis) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, ack_int_dis); -} -*/ -/* Read watermark change interrupt status */ -/* -unsigned int mmhubbub2_get_wbif_watermark_change_int_status(struct mcif_wb *mcif_wb) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - uint32_t irq_status; - - REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, &irq_status); - return irq_status; -} -*/ - -void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, - struct mcif_buf_params *mcif_params, - enum dwb_scaler_mode out_format, - unsigned int dest_width, - unsigned int dest_height, - struct mcif_wb_frame_dump_info *dump_info, - unsigned char *luma_buffer, - unsigned char *chroma_buffer, - unsigned char *dest_luma_buffer, - unsigned char *dest_chroma_buffer) -{ - struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); - - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf); - - memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height); - memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2); - - REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0); - - dump_info->format = out_format; - dump_info->width = dest_width; - dump_info->height = dest_height; - dump_info->luma_pitch = mcif_params->luma_pitch; - dump_info->chroma_pitch = mcif_params->chroma_pitch; - dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch); -} - -static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = { - .enable_mcif = mmhubbub2_enable_mcif, - .disable_mcif = mmhubbub2_disable_mcif, - .config_mcif_buf = mmhubbub2_config_mcif_buf, - .config_mcif_arb = mmhubbub2_config_mcif_arb, - .config_mcif_irq = mmhubbub2_config_mcif_irq, - .dump_frame = mcifwb2_dump_frame, -}; - -void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, - struct dc_context *ctx, - const struct dcn20_mmhubbub_registers *mcif_wb_regs, - const struct dcn20_mmhubbub_shift *mcif_wb_shift, - const struct dcn20_mmhubbub_mask *mcif_wb_mask, - int inst) -{ - mcif_wb20->base.ctx = ctx; - - mcif_wb20->base.inst = inst; - mcif_wb20->base.funcs = &dcn20_mmhubbub_funcs; - - mcif_wb20->mcif_wb_regs = mcif_wb_regs; - mcif_wb20->mcif_wb_shift = mcif_wb_shift; - mcif_wb20->mcif_wb_mask = mcif_wb_mask; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h deleted file mode 100644 index 5ab32aa51e1375..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h +++ /dev/null @@ -1,517 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_MCIF_WB_DCN20_H__ -#define __DC_MCIF_WB_DCN20_H__ - -#define TO_DCN20_MMHUBBUB(mcif_wb_base) \ - container_of(mcif_wb_base, struct dcn20_mmhubbub, base) - -#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \ - SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\ - SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\ - SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\ - SRI(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\ - SRI(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\ - SRI(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst),\ - SRI(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB, inst),\ - SRI(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\ - SRI(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MCIF_WB, inst),\ - SRI(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\ - SRI(MCIF_WB_WATERMARK, MCIF_WB, inst),\ - SRI(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\ - SRI(MCIF_WB_WARM_UP_CNTL, MCIF_WB, inst),\ - SRI(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\ - SRI(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\ - SRI(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\ - SRI(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\ - SRI(SMU_WM_CONTROL, WBIF, inst) - -#define MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_P_VMID, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB_BUFMGR_CUR_LINE_R, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FIELD, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_LONG_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SHORT_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FRAME_LENGTH_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_CUR_LINE_R, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FIELD, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_LONG_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SHORT_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FRAME_LENGTH_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_CUR_LINE_R, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FIELD, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_LONG_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SHORT_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FRAME_LENGTH_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_CUR_LINE_R, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FIELD, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_LONG_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SHORT_LINE_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FRAME_LENGTH_ERROR, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_CUR_LINE_R, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB_TEST_DEBUG_INDEX, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA, MCIF_WB_TEST_DEBUG_DATA, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\ - SF(MCIF_WB0_MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_SECURITY_LEVEL, MCIF_WB_SECURITY_LEVEL, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\ - SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\ - SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, mask_sh),\ - SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, mask_sh),\ - SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, mask_sh),\ - SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, mask_sh) - -#define MCIF_WB_REG_FIELD_LIST_DCN2_0(type) \ - type MCIF_WB_BUFMGR_ENABLE;\ - type MCIF_WB_BUFMGR_SW_INT_EN;\ - type MCIF_WB_BUFMGR_SW_INT_ACK;\ - type MCIF_WB_BUFMGR_SW_SLICE_INT_EN;\ - type MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN;\ - type MCIF_WB_BUFMGR_SW_LOCK;\ - type MCIF_WB_P_VMID;\ - type MCIF_WB_BUF_ADDR_FENCE_EN;\ - type MCIF_WB_BUFMGR_CUR_LINE_R;\ - type MCIF_WB_BUFMGR_VCE_INT_STATUS;\ - type MCIF_WB_BUFMGR_SW_INT_STATUS;\ - type MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS;\ - type MCIF_WB_BUFMGR_CUR_BUF;\ - type MCIF_WB_BUFMGR_BUFTAG;\ - type MCIF_WB_BUFMGR_CUR_LINE_L;\ - type MCIF_WB_BUFMGR_NEXT_BUF;\ - type MCIF_WB_BUF_LUMA_PITCH;\ - type MCIF_WB_BUF_CHROMA_PITCH;\ - type MCIF_WB_BUF_1_ACTIVE;\ - type MCIF_WB_BUF_1_SW_LOCKED;\ - type MCIF_WB_BUF_1_VCE_LOCKED;\ - type MCIF_WB_BUF_1_OVERFLOW;\ - type MCIF_WB_BUF_1_DISABLE;\ - type MCIF_WB_BUF_1_MODE;\ - type MCIF_WB_BUF_1_BUFTAG;\ - type MCIF_WB_BUF_1_NXT_BUF;\ - type MCIF_WB_BUF_1_FIELD;\ - type MCIF_WB_BUF_1_CUR_LINE_L;\ - type MCIF_WB_BUF_1_LONG_LINE_ERROR;\ - type MCIF_WB_BUF_1_SHORT_LINE_ERROR;\ - type MCIF_WB_BUF_1_FRAME_LENGTH_ERROR;\ - type MCIF_WB_BUF_1_CUR_LINE_R;\ - type MCIF_WB_BUF_1_NEW_CONTENT;\ - type MCIF_WB_BUF_1_COLOR_DEPTH;\ - type MCIF_WB_BUF_1_TMZ_BLACK_PIXEL;\ - type MCIF_WB_BUF_1_TMZ;\ - type MCIF_WB_BUF_1_Y_OVERRUN;\ - type MCIF_WB_BUF_1_C_OVERRUN;\ - type MCIF_WB_BUF_2_ACTIVE;\ - type MCIF_WB_BUF_2_SW_LOCKED;\ - type MCIF_WB_BUF_2_VCE_LOCKED;\ - type MCIF_WB_BUF_2_OVERFLOW;\ - type MCIF_WB_BUF_2_DISABLE;\ - type MCIF_WB_BUF_2_MODE;\ - type MCIF_WB_BUF_2_BUFTAG;\ - type MCIF_WB_BUF_2_NXT_BUF;\ - type MCIF_WB_BUF_2_FIELD;\ - type MCIF_WB_BUF_2_CUR_LINE_L;\ - type MCIF_WB_BUF_2_LONG_LINE_ERROR;\ - type MCIF_WB_BUF_2_SHORT_LINE_ERROR;\ - type MCIF_WB_BUF_2_FRAME_LENGTH_ERROR;\ - type MCIF_WB_BUF_2_CUR_LINE_R;\ - type MCIF_WB_BUF_2_NEW_CONTENT;\ - type MCIF_WB_BUF_2_COLOR_DEPTH;\ - type MCIF_WB_BUF_2_TMZ_BLACK_PIXEL;\ - type MCIF_WB_BUF_2_TMZ;\ - type MCIF_WB_BUF_2_Y_OVERRUN;\ - type MCIF_WB_BUF_2_C_OVERRUN;\ - type MCIF_WB_BUF_3_ACTIVE;\ - type MCIF_WB_BUF_3_SW_LOCKED;\ - type MCIF_WB_BUF_3_VCE_LOCKED;\ - type MCIF_WB_BUF_3_OVERFLOW;\ - type MCIF_WB_BUF_3_DISABLE;\ - type MCIF_WB_BUF_3_MODE;\ - type MCIF_WB_BUF_3_BUFTAG;\ - type MCIF_WB_BUF_3_NXT_BUF;\ - type MCIF_WB_BUF_3_FIELD;\ - type MCIF_WB_BUF_3_CUR_LINE_L;\ - type MCIF_WB_BUF_3_LONG_LINE_ERROR;\ - type MCIF_WB_BUF_3_SHORT_LINE_ERROR;\ - type MCIF_WB_BUF_3_FRAME_LENGTH_ERROR;\ - type MCIF_WB_BUF_3_CUR_LINE_R;\ - type MCIF_WB_BUF_3_NEW_CONTENT;\ - type MCIF_WB_BUF_3_COLOR_DEPTH;\ - type MCIF_WB_BUF_3_TMZ_BLACK_PIXEL;\ - type MCIF_WB_BUF_3_TMZ;\ - type MCIF_WB_BUF_3_Y_OVERRUN;\ - type MCIF_WB_BUF_3_C_OVERRUN;\ - type MCIF_WB_BUF_4_ACTIVE;\ - type MCIF_WB_BUF_4_SW_LOCKED;\ - type MCIF_WB_BUF_4_VCE_LOCKED;\ - type MCIF_WB_BUF_4_OVERFLOW;\ - type MCIF_WB_BUF_4_DISABLE;\ - type MCIF_WB_BUF_4_MODE;\ - type MCIF_WB_BUF_4_BUFTAG;\ - type MCIF_WB_BUF_4_NXT_BUF;\ - type MCIF_WB_BUF_4_FIELD;\ - type MCIF_WB_BUF_4_CUR_LINE_L;\ - type MCIF_WB_BUF_4_LONG_LINE_ERROR;\ - type MCIF_WB_BUF_4_SHORT_LINE_ERROR;\ - type MCIF_WB_BUF_4_FRAME_LENGTH_ERROR;\ - type MCIF_WB_BUF_4_CUR_LINE_R;\ - type MCIF_WB_BUF_4_NEW_CONTENT;\ - type MCIF_WB_BUF_4_COLOR_DEPTH;\ - type MCIF_WB_BUF_4_TMZ_BLACK_PIXEL;\ - type MCIF_WB_BUF_4_TMZ;\ - type MCIF_WB_BUF_4_Y_OVERRUN;\ - type MCIF_WB_BUF_4_C_OVERRUN;\ - type MCIF_WB_CLIENT_ARBITRATION_SLICE;\ - type MCIF_WB_TIME_PER_PIXEL;\ - type WM_CHANGE_ACK_FORCE_ON;\ - type MCIF_WB_CLI_WATERMARK_MASK;\ - type MCIF_WB_TEST_DEBUG_INDEX;\ - type MCIF_WB_TEST_DEBUG_DATA;\ - type MCIF_WB_BUF_1_ADDR_Y;\ - type MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ - type MCIF_WB_BUF_1_ADDR_C;\ - type MCIF_WB_BUF_1_ADDR_C_OFFSET;\ - type MCIF_WB_BUF_2_ADDR_Y;\ - type MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ - type MCIF_WB_BUF_2_ADDR_C;\ - type MCIF_WB_BUF_2_ADDR_C_OFFSET;\ - type MCIF_WB_BUF_3_ADDR_Y;\ - type MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ - type MCIF_WB_BUF_3_ADDR_C;\ - type MCIF_WB_BUF_3_ADDR_C_OFFSET;\ - type MCIF_WB_BUF_4_ADDR_Y;\ - type MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ - type MCIF_WB_BUF_4_ADDR_C;\ - type MCIF_WB_BUF_4_ADDR_C_OFFSET;\ - type MCIF_WB_BUFMGR_VCE_LOCK_IGNORE;\ - type MCIF_WB_BUFMGR_VCE_INT_EN;\ - type MCIF_WB_BUFMGR_VCE_INT_ACK;\ - type MCIF_WB_BUFMGR_VCE_SLICE_INT_EN;\ - type MCIF_WB_BUFMGR_VCE_LOCK;\ - type MCIF_WB_BUFMGR_SLICE_SIZE;\ - type NB_PSTATE_CHANGE_REFRESH_WATERMARK;\ - type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST;\ - type NB_PSTATE_CHANGE_FORCE_ON;\ - type NB_PSTATE_ALLOW_FOR_URGENT;\ - type NB_PSTATE_CHANGE_WATERMARK_MASK;\ - type MCIF_WB_CLI_WATERMARK;\ - type MCIF_WB_CLI_CLOCK_GATER_OVERRIDE;\ - type MCIF_WB_PITCH_SIZE_WARMUP;\ - type DIS_REFRESH_UNDER_NBPREQ;\ - type PERFRAME_SELF_REFRESH;\ - type MAX_SCALED_TIME_TO_URGENT;\ - type MCIF_WB_SECURITY_LEVEL;\ - type MCIF_WB_BUF_LUMA_SIZE;\ - type MCIF_WB_BUF_CHROMA_SIZE;\ - type MCIF_WB_BUF_1_ADDR_Y_HIGH;\ - type MCIF_WB_BUF_1_ADDR_C_HIGH;\ - type MCIF_WB_BUF_2_ADDR_Y_HIGH;\ - type MCIF_WB_BUF_2_ADDR_C_HIGH;\ - type MCIF_WB_BUF_3_ADDR_Y_HIGH;\ - type MCIF_WB_BUF_3_ADDR_C_HIGH;\ - type MCIF_WB_BUF_4_ADDR_Y_HIGH;\ - type MCIF_WB_BUF_4_ADDR_C_HIGH;\ - type MCIF_WB_BUF_1_RESOLUTION_WIDTH;\ - type MCIF_WB_BUF_1_RESOLUTION_HEIGHT;\ - type MCIF_WB_BUF_2_RESOLUTION_WIDTH;\ - type MCIF_WB_BUF_2_RESOLUTION_HEIGHT;\ - type MCIF_WB_BUF_3_RESOLUTION_WIDTH;\ - type MCIF_WB_BUF_3_RESOLUTION_HEIGHT;\ - type MCIF_WB_BUF_4_RESOLUTION_WIDTH;\ - type MCIF_WB_BUF_4_RESOLUTION_HEIGHT;\ - type MCIF_WB0_WM_CHG_SEL;\ - type MCIF_WB0_WM_CHG_REQ;\ - type MCIF_WB0_WM_CHG_ACK_INT_DIS;\ - type MCIF_WB0_WM_CHG_ACK_INT_STATUS - -#define MCIF_WB_REG_VARIABLE_LIST_DCN2_0 \ - uint32_t MCIF_WB_BUFMGR_SW_CONTROL;\ - uint32_t MCIF_WB_BUFMGR_CUR_LINE_R;\ - uint32_t MCIF_WB_BUFMGR_STATUS;\ - uint32_t MCIF_WB_BUF_PITCH;\ - uint32_t MCIF_WB_BUF_1_STATUS;\ - uint32_t MCIF_WB_BUF_1_STATUS2;\ - uint32_t MCIF_WB_BUF_2_STATUS;\ - uint32_t MCIF_WB_BUF_2_STATUS2;\ - uint32_t MCIF_WB_BUF_3_STATUS;\ - uint32_t MCIF_WB_BUF_3_STATUS2;\ - uint32_t MCIF_WB_BUF_4_STATUS;\ - uint32_t MCIF_WB_BUF_4_STATUS2;\ - uint32_t MCIF_WB_ARBITRATION_CONTROL;\ - uint32_t MCIF_WB_SCLK_CHANGE;\ - uint32_t MCIF_WB_TEST_DEBUG_INDEX;\ - uint32_t MCIF_WB_TEST_DEBUG_DATA;\ - uint32_t MCIF_WB_BUF_1_ADDR_Y;\ - uint32_t MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ - uint32_t MCIF_WB_BUF_1_ADDR_C;\ - uint32_t MCIF_WB_BUF_1_ADDR_C_OFFSET;\ - uint32_t MCIF_WB_BUF_2_ADDR_Y;\ - uint32_t MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ - uint32_t MCIF_WB_BUF_2_ADDR_C;\ - uint32_t MCIF_WB_BUF_2_ADDR_C_OFFSET;\ - uint32_t MCIF_WB_BUF_3_ADDR_Y;\ - uint32_t MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ - uint32_t MCIF_WB_BUF_3_ADDR_C;\ - uint32_t MCIF_WB_BUF_3_ADDR_C_OFFSET;\ - uint32_t MCIF_WB_BUF_4_ADDR_Y;\ - uint32_t MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ - uint32_t MCIF_WB_BUF_4_ADDR_C;\ - uint32_t MCIF_WB_BUF_4_ADDR_C_OFFSET;\ - uint32_t MCIF_WB_BUFMGR_VCE_CONTROL;\ - uint32_t MCIF_WB_NB_PSTATE_LATENCY_WATERMARK;\ - uint32_t MCIF_WB_NB_PSTATE_CONTROL;\ - uint32_t MCIF_WB_WATERMARK;\ - uint32_t MCIF_WB_CLOCK_GATER_CONTROL;\ - uint32_t MCIF_WB_WARM_UP_CNTL;\ - uint32_t MCIF_WB_SELF_REFRESH_CONTROL;\ - uint32_t MULTI_LEVEL_QOS_CTRL;\ - uint32_t MCIF_WB_SECURITY_LEVEL;\ - uint32_t MCIF_WB_BUF_LUMA_SIZE;\ - uint32_t MCIF_WB_BUF_CHROMA_SIZE;\ - uint32_t MCIF_WB_BUF_1_ADDR_Y_HIGH;\ - uint32_t MCIF_WB_BUF_1_ADDR_C_HIGH;\ - uint32_t MCIF_WB_BUF_2_ADDR_Y_HIGH;\ - uint32_t MCIF_WB_BUF_2_ADDR_C_HIGH;\ - uint32_t MCIF_WB_BUF_3_ADDR_Y_HIGH;\ - uint32_t MCIF_WB_BUF_3_ADDR_C_HIGH;\ - uint32_t MCIF_WB_BUF_4_ADDR_Y_HIGH;\ - uint32_t MCIF_WB_BUF_4_ADDR_C_HIGH;\ - uint32_t MCIF_WB_BUF_1_RESOLUTION;\ - uint32_t MCIF_WB_BUF_2_RESOLUTION;\ - uint32_t MCIF_WB_BUF_3_RESOLUTION;\ - uint32_t MCIF_WB_BUF_4_RESOLUTION;\ - uint32_t SMU_WM_CONTROL - -struct dcn20_mmhubbub_registers { - MCIF_WB_REG_VARIABLE_LIST_DCN2_0; -}; - - -struct dcn20_mmhubbub_mask { - MCIF_WB_REG_FIELD_LIST_DCN2_0(uint32_t); -}; - -struct dcn20_mmhubbub_shift { - MCIF_WB_REG_FIELD_LIST_DCN2_0(uint8_t); -}; - -struct dcn20_mmhubbub { - struct mcif_wb base; - const struct dcn20_mmhubbub_registers *mcif_wb_regs; - const struct dcn20_mmhubbub_shift *mcif_wb_shift; - const struct dcn20_mmhubbub_mask *mcif_wb_mask; -}; - -void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, - struct mcif_irq_params *params); - -void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb); - -void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb); - -void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, - struct mcif_buf_params *mcif_params, - enum dwb_scaler_mode out_format, - unsigned int dest_width, - unsigned int dest_height, - struct mcif_wb_frame_dump_info *dump_info, - unsigned char *luma_buffer, - unsigned char *chroma_buffer, - unsigned char *dest_luma_buffer, - unsigned char *dest_chroma_buffer); - -void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, - struct dc_context *ctx, - const struct dcn20_mmhubbub_registers *mcif_wb_regs, - const struct dcn20_mmhubbub_shift *mcif_wb_shift, - const struct dcn20_mmhubbub_mask *mcif_wb_mask, - int inst); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c deleted file mode 100644 index ea73473b970a6e..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ /dev/null @@ -1,610 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn20_mpc.h" - -#include "reg_helper.h" -#include "dc.h" -#include "mem_input.h" -#include "dcn10/dcn10_cm_common.h" - -#define REG(reg)\ - mpc20->mpc_regs->reg - -#define IND_REG(index) \ - (index) - -#define CTX \ - mpc20->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - mpc20->mpc_shift->field_name, mpc20->mpc_mask->field_name - -#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) - -void mpc2_update_blending( - struct mpc *mpc, - struct mpcc_blnd_cfg *blnd_cfg, - int mpcc_id) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id); - - REG_UPDATE_7(MPCC_CONTROL[mpcc_id], - MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, - MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only, - MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha, - MPCC_GLOBAL_GAIN, blnd_cfg->global_gain, - MPCC_BG_BPC, blnd_cfg->background_color_bpc, - MPCC_BOT_GAIN_MODE, blnd_cfg->bottom_gain_mode); - - REG_SET(MPCC_TOP_GAIN[mpcc_id], 0, MPCC_TOP_GAIN, blnd_cfg->top_gain); - REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain); - REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain); - - mpcc->blnd_cfg = *blnd_cfg; -} - -void mpc2_set_denorm( - struct mpc *mpc, - int opp_id, - enum dc_color_depth output_depth) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - int denorm_mode = 0; - - switch (output_depth) { - case COLOR_DEPTH_666: - denorm_mode = 1; - break; - case COLOR_DEPTH_888: - denorm_mode = 2; - break; - case COLOR_DEPTH_999: - denorm_mode = 3; - break; - case COLOR_DEPTH_101010: - denorm_mode = 4; - break; - case COLOR_DEPTH_111111: - denorm_mode = 5; - break; - case COLOR_DEPTH_121212: - denorm_mode = 6; - break; - case COLOR_DEPTH_141414: - case COLOR_DEPTH_161616: - default: - /* not valid used case! */ - break; - } - - REG_UPDATE(DENORM_CONTROL[opp_id], - MPC_OUT_DENORM_MODE, denorm_mode); -} - -void mpc2_set_denorm_clamp( - struct mpc *mpc, - int opp_id, - struct mpc_denorm_clamp denorm_clamp) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - REG_UPDATE_2(DENORM_CONTROL[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr, - MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr); - REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y, - MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y); - REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb, - MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb); -} - - - -void mpc2_set_output_csc( - struct mpc *mpc, - int opp_id, - const uint16_t *regval, - enum mpc_output_csc_mode ocsc_mode) -{ - uint32_t cur_mode; - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - struct color_matrices_reg ocsc_regs; - - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - return; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - /* determine which CSC coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, - MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, - MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); - - if (cur_mode != MPC_OUTPUT_CSC_COEF_A) - ocsc_mode = MPC_OUTPUT_CSC_COEF_A; - else - ocsc_mode = MPC_OUTPUT_CSC_COEF_B; - - ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; - ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; - ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; - ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A; - - if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); - } else { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); - } - - cm_helper_program_color_matrices( - mpc20->base.ctx, - regval, - &ocsc_regs); - - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); -} - -void mpc2_set_ocsc_default( - struct mpc *mpc, - int opp_id, - enum dc_color_space color_space, - enum mpc_output_csc_mode ocsc_mode) -{ - uint32_t cur_mode; - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - uint32_t arr_size; - struct color_matrices_reg ocsc_regs; - const uint16_t *regval = NULL; - - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - return; - } - - regval = find_color_matrix(color_space, &arr_size); - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - /* determine which CSC coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, - MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, - MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); - - if (cur_mode != MPC_OUTPUT_CSC_COEF_A) - ocsc_mode = MPC_OUTPUT_CSC_COEF_A; - else - ocsc_mode = MPC_OUTPUT_CSC_COEF_B; - - ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; - ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; - ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; - ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A; - - - if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); - } else { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); - } - - cm_helper_program_color_matrices( - mpc20->base.ctx, - regval, - &ocsc_regs); - - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); -} - -static void mpc2_ogam_get_reg_field( - struct mpc *mpc, - struct xfer_func_reg *reg) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - reg->shifts.exp_region0_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->shifts.field_region_end = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -void mpc20_power_on_ogam_lut( - struct mpc *mpc, int mpcc_id, - bool power_on) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0, - MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0); - -} - -static void mpc20_configure_ogam_lut( - struct mpc *mpc, int mpcc_id, - bool is_ram_a) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - REG_UPDATE_2(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], - MPCC_OGAM_LUT_WRITE_EN_MASK, 7, - MPCC_OGAM_LUT_RAM_SEL, is_ram_a == true ? 0:1); - - REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); -} - -static enum dc_lut_mode mpc20_get_ogam_current(struct mpc *mpc, int mpcc_id) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], MPCC_OGAM_CONFIG_STATUS, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void mpc2_program_lutb(struct mpc *mpc, int mpcc_id, - const struct pwl_params *params) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - struct xfer_func_reg gam_regs; - - mpc2_ogam_get_reg_field(mpc, &gam_regs); - - gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]); - gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]); - gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]); - gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_B[mpcc_id]); - gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_G[mpcc_id]); - gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_R[mpcc_id]); - gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]); - gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]); - gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]); - gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]); - gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]); - gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]); - gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]); - gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]); - - cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs); - -} - -static void mpc2_program_luta(struct mpc *mpc, int mpcc_id, - const struct pwl_params *params) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - struct xfer_func_reg gam_regs; - - mpc2_ogam_get_reg_field(mpc, &gam_regs); - - gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]); - gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]); - gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]); - gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_B[mpcc_id]); - gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_G[mpcc_id]); - gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_R[mpcc_id]); - gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]); - gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]); - gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]); - gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]); - gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]); - gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]); - gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]); - gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]); - - cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs); - -} - -static void mpc20_program_ogam_pwl( - struct mpc *mpc, int mpcc_id, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - PERF_TRACE(); - REG_SEQ_START(); - - for (i = 0 ; i < num; i++) { - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, - MPCC_OGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, - MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, - MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); - } - - REG_SEQ_SUBMIT(); - PERF_TRACE(); - REG_SEQ_WAIT_DONE(); - PERF_TRACE(); -} - -static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, - enum dc_lut_mode current_mode, - enum dc_lut_mode next_mode) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - if (mpc->ctx->dc->debug.cm_in_bypass) { - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); - return; - } - - if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) { - /*hw fixed in new review*/ - return; - } - if (current_mode == LUT_BYPASS) - /*this will only work if OTG is locked. - *if we were to support OTG unlock case, - *the workaround will be more complex - */ - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, - next_mode == LUT_RAM_A ? 1:2); -} - -void mpc2_set_output_gamma( - struct mpc *mpc, - int mpcc_id, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - if (mpc->ctx->dc->debug.cm_in_bypass) { - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); - return; - } - - if (params == NULL) { - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); - return; - } - - current_mode = mpc20_get_ogam_current(mpc, mpcc_id); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - mpc20_power_on_ogam_lut(mpc, mpcc_id, true); - mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - mpc2_program_luta(mpc, mpcc_id, params); - else - mpc2_program_lutb(mpc, mpcc_id, params); - - apply_DEDCN20_305_wa(mpc, mpcc_id, current_mode, next_mode); - - mpc20_program_ogam_pwl( - mpc, mpcc_id, params->rgb_resulted, params->hw_points_num); - - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, - next_mode == LUT_RAM_A ? 1:2); -} -void mpc2_assert_idle_mpcc(struct mpc *mpc, int id) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - unsigned int mpc_disabled; - - ASSERT(!(mpc20->mpcc_in_use_mask & 1 << id)); - REG_GET(MPCC_STATUS[id], MPCC_DISABLED, &mpc_disabled); - if (mpc_disabled) - return; - - REG_WAIT(MPCC_STATUS[id], - MPCC_IDLE, 1, - 1, 100000); -} - -void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled; - - REG_GET(MPCC_TOP_SEL[mpcc_id], - MPCC_TOP_SEL, &top_sel); - - REG_GET_3(MPCC_STATUS[mpcc_id], - MPCC_BUSY, &mpc_busy, - MPCC_IDLE, &mpc_idle, - MPCC_DISABLED, &mpc_disabled); - - if (top_sel == 0xf) { - ASSERT(!mpc_busy); - ASSERT(mpc_idle); - ASSERT(mpc_disabled); - } else { - ASSERT(!mpc_disabled); - ASSERT(!mpc_idle); - } -} - -static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) -{ - mpcc->mpcc_id = mpcc_inst; - mpcc->dpp_id = 0xf; - mpcc->mpcc_bot = NULL; - mpcc->blnd_cfg.overlap_only = false; - mpcc->blnd_cfg.global_alpha = 0xff; - mpcc->blnd_cfg.global_gain = 0xff; - mpcc->blnd_cfg.background_color_bpc = 4; - mpcc->blnd_cfg.bottom_gain_mode = 0; - mpcc->blnd_cfg.top_gain = 0x1f000; - mpcc->blnd_cfg.bottom_inside_gain = 0x1f000; - mpcc->blnd_cfg.bottom_outside_gain = 0x1f000; - mpcc->sm_cfg.enable = false; -} - -static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) -{ - struct mpcc *tmp_mpcc = tree->opp_list; - - while (tmp_mpcc != NULL) { - if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) - return tmp_mpcc; - - /* avoid circular linked list */ - ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); - if (tmp_mpcc == tmp_mpcc->mpcc_bot) - break; - - tmp_mpcc = tmp_mpcc->mpcc_bot; - } - return NULL; -} - -static void mpc2_read_mpcc_state( - struct mpc *mpc, - int mpcc_inst, - struct mpcc_state *s) -{ - struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); - - REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); - REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); - REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); - REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, - MPCC_ALPHA_BLND_MODE, &s->alpha_mode, - MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); - REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, - MPCC_BUSY, &s->busy); - - /* Gamma block state */ - REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst], - MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode); -} - -static const struct mpc_funcs dcn20_mpc_funcs = { - .read_mpcc_state = mpc2_read_mpcc_state, - .insert_plane = mpc1_insert_plane, - .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc1_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, - .update_blending = mpc2_update_blending, - .cursor_lock = mpc1_cursor_lock, - .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, - .wait_for_idle = mpc2_assert_idle_mpcc, - .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, - .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, - .set_denorm = mpc2_set_denorm, - .set_denorm_clamp = mpc2_set_denorm_clamp, - .set_output_csc = mpc2_set_output_csc, - .set_ocsc_default = mpc2_set_ocsc_default, - .set_output_gamma = mpc2_set_output_gamma, - .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, - .get_mpc_out_mux = mpc1_get_mpc_out_mux, - .set_bg_color = mpc1_set_bg_color, -}; - -void dcn20_mpc_construct(struct dcn20_mpc *mpc20, - struct dc_context *ctx, - const struct dcn20_mpc_registers *mpc_regs, - const struct dcn20_mpc_shift *mpc_shift, - const struct dcn20_mpc_mask *mpc_mask, - int num_mpcc) -{ - int i; - - mpc20->base.ctx = ctx; - - mpc20->base.funcs = &dcn20_mpc_funcs; - - mpc20->mpc_regs = mpc_regs; - mpc20->mpc_shift = mpc_shift; - mpc20->mpc_mask = mpc_mask; - - mpc20->mpcc_in_use_mask = 0; - mpc20->num_mpcc = num_mpcc; - - for (i = 0; i < MAX_MPCC; i++) - mpc2_init_mpcc(&mpc20->base.mpcc_array[i], i); -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h deleted file mode 100644 index 496658f420dbdb..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ /dev/null @@ -1,312 +0,0 @@ -/* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_MPCC_DCN20_H__ -#define __DC_MPCC_DCN20_H__ - -#include "dcn10/dcn10_mpc.h" - -#define TO_DCN20_MPC(mpc_base) \ - container_of(mpc_base, struct dcn20_mpc, base) - -#define MPC_REG_LIST_DCN2_0(inst)\ - MPC_COMMON_REG_LIST_DCN1_0(inst),\ - SRII(MPCC_TOP_GAIN, MPCC, inst),\ - SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\ - SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\ - SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\ - SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_MODE, MPCC_OGAM, inst) - -#define MPC_OUT_MUX_REG_LIST_DCN2_0(inst) \ - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\ - SRII(CSC_MODE, MPC_OUT, inst),\ - SRII(CSC_C11_C12_A, MPC_OUT, inst),\ - SRII(CSC_C33_C34_A, MPC_OUT, inst),\ - SRII(CSC_C11_C12_B, MPC_OUT, inst),\ - SRII(CSC_C33_C34_B, MPC_OUT, inst),\ - SRII(DENORM_CONTROL, MPC_OUT, inst),\ - SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ - SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst) - -#define MPC_DBG_REG_LIST_DCN2_0() \ - SR(MPC_OCSC_TEST_DEBUG_DATA),\ - SR(MPC_OCSC_TEST_DEBUG_INDEX) - -#define MPC_REG_VARIABLE_LIST_DCN2_0 \ - MPC_COMMON_REG_VARIABLE_LIST \ - uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \ - uint32_t MPCC_BOT_GAIN_INSIDE[MAX_MPCC]; \ - uint32_t MPCC_BOT_GAIN_OUTSIDE[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL1_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL2_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL1_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL2_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL1_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_END_CNTL2_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_REGION_32_33[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL1_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL2_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL1_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL2_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL1_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_END_CNTL2_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_REGION_32_33[MAX_MPCC];\ - uint32_t MPCC_MEM_PWR_CTRL[MAX_MPCC];\ - uint32_t MPCC_OGAM_LUT_INDEX[MAX_MPCC];\ - uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\ - uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\ - uint32_t MPCC_OGAM_MODE[MAX_MPCC];\ - uint32_t MPC_OCSC_TEST_DEBUG_DATA;\ - uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\ - uint32_t CSC_MODE[MAX_OPP]; \ - uint32_t CSC_C11_C12_A[MAX_OPP]; \ - uint32_t CSC_C33_C34_A[MAX_OPP]; \ - uint32_t CSC_C11_C12_B[MAX_OPP]; \ - uint32_t CSC_C33_C34_B[MAX_OPP]; \ - uint32_t DENORM_CONTROL[MAX_OPP]; \ - uint32_t DENORM_CLAMP_G_Y[MAX_OPP]; \ - uint32_t DENORM_CLAMP_B_CB[MAX_OPP]; - -#define MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ - MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ - SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ - SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\ - SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_WRITE_EN_MASK, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_RAM_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_CONFIG_STATUS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_MODE, MPCC_OGAM_MODE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ - SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) - -/* - * DCN2 MPC_OCSC debug status register: - * - * Status index including current OCSC Mode is 1 - * OCSC Mode: [1..0] - */ -#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1 - -#define MPC_DEBUG_REG_LIST_SH_DCN20 \ - .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0 - -#define MPC_DEBUG_REG_LIST_MASK_DCN20 \ - .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3 - -#define MPC_REG_FIELD_LIST_DCN2_0(type) \ - MPC_REG_FIELD_LIST(type)\ - type MPCC_BG_BPC;\ - type MPCC_BOT_GAIN_MODE;\ - type MPCC_TOP_GAIN;\ - type MPCC_BOT_GAIN_INSIDE;\ - type MPCC_BOT_GAIN_OUTSIDE;\ - type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\ - type MPC_OCSC_TEST_DEBUG_INDEX;\ - type MPC_OCSC_MODE;\ - type MPC_OCSC_C11_A;\ - type MPC_OCSC_C12_A;\ - type MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\ - type MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\ - type MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type MPCC_OGAM_RAMA_EXP_REGION_END_B;\ - type MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\ - type MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;\ - type MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;\ - type MPCC_OGAM_RAMA_EXP_REGION_START_B;\ - type MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\ - type MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\ - type MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\ - type MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\ - type MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\ - type MPCC_OGAM_RAMB_EXP_REGION_END_B;\ - type MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\ - type MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B;\ - type MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;\ - type MPCC_OGAM_RAMB_EXP_REGION_START_B;\ - type MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\ - type MPCC_OGAM_MEM_PWR_FORCE;\ - type MPCC_OGAM_LUT_INDEX;\ - type MPCC_OGAM_LUT_WRITE_EN_MASK;\ - type MPCC_OGAM_LUT_RAM_SEL;\ - type MPCC_OGAM_CONFIG_STATUS;\ - type MPCC_OGAM_LUT_DATA;\ - type MPCC_OGAM_MODE;\ - type MPC_OUT_DENORM_MODE;\ - type MPC_OUT_DENORM_CLAMP_MAX_R_CR;\ - type MPC_OUT_DENORM_CLAMP_MIN_R_CR;\ - type MPC_OUT_DENORM_CLAMP_MAX_G_Y;\ - type MPC_OUT_DENORM_CLAMP_MIN_G_Y;\ - type MPC_OUT_DENORM_CLAMP_MAX_B_CB;\ - type MPC_OUT_DENORM_CLAMP_MIN_B_CB;\ - type MPCC_DISABLED;\ - type MPCC_OGAM_MEM_PWR_DIS; - -struct dcn20_mpc_registers { - MPC_REG_VARIABLE_LIST_DCN2_0 -}; - -struct dcn20_mpc_shift { - MPC_REG_FIELD_LIST_DCN2_0(uint8_t) -}; - -struct dcn20_mpc_mask { - MPC_REG_FIELD_LIST_DCN2_0(uint32_t) -}; - -struct dcn20_mpc { - struct mpc base; - - int mpcc_in_use_mask; - int num_mpcc; - const struct dcn20_mpc_registers *mpc_regs; - const struct dcn20_mpc_shift *mpc_shift; - const struct dcn20_mpc_mask *mpc_mask; -}; - -void dcn20_mpc_construct(struct dcn20_mpc *mpcc20, - struct dc_context *ctx, - const struct dcn20_mpc_registers *mpc_regs, - const struct dcn20_mpc_shift *mpc_shift, - const struct dcn20_mpc_mask *mpc_mask, - int num_mpcc); - -void mpc2_update_blending( - struct mpc *mpc, - struct mpcc_blnd_cfg *blnd_cfg, - int mpcc_id); - -void mpc2_set_denorm( - struct mpc *mpc, - int opp_id, - enum dc_color_depth output_depth); - -void mpc2_set_denorm_clamp( - struct mpc *mpc, - int opp_id, - struct mpc_denorm_clamp denorm_clamp); - -void mpc2_set_output_csc( - struct mpc *mpc, - int opp_id, - const uint16_t *regval, - enum mpc_output_csc_mode ocsc_mode); - -void mpc2_set_ocsc_default( - struct mpc *mpc, - int opp_id, - enum dc_color_space color_space, - enum mpc_output_csc_mode ocsc_mode); - -void mpc2_set_output_gamma( - struct mpc *mpc, - int mpcc_id, - const struct pwl_params *params); - -void mpc2_assert_idle_mpcc(struct mpc *mpc, int id); -void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id); -void mpc20_power_on_ogam_lut(struct mpc *mpc, int mpcc_id, bool power_on); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c deleted file mode 100644 index f5fe0cac7cb064..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "core_types.h" -#include "dm_services.h" -#include "dcn20_opp.h" -#include "reg_helper.h" - -#define REG(reg) \ - (oppn20->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - oppn20->opp_shift->field_name, oppn20->opp_mask->field_name - -#define CTX \ - oppn20->base.ctx - - -void opp2_set_disp_pattern_generator( - struct output_pixel_processor *opp, - enum controller_dp_test_pattern test_pattern, - enum controller_dp_color_space color_space, - enum dc_color_depth color_depth, - const struct tg_color *solid_color, - int width, - int height, - int offset) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - enum test_pattern_color_format bit_depth; - enum test_pattern_dyn_range dyn_range; - enum test_pattern_mode mode; - - /* color ramp generator mixes 16-bits color */ - uint32_t src_bpc = 16; - /* requested bpc */ - uint32_t dst_bpc; - uint32_t index; - /* RGB values of the color bars. - * Produce two RGB colors: RGB0 - white (all Fs) - * and RGB1 - black (all 0s) - * (three RGB components for two colors) - */ - uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, - 0x0000, 0x0000}; - /* dest color (converted to the specified color format) */ - uint16_t dst_color[6]; - uint32_t inc_base; - - /* translate to bit depth */ - switch (color_depth) { - case COLOR_DEPTH_666: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; - break; - case COLOR_DEPTH_888: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; - break; - case COLOR_DEPTH_101010: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; - break; - case COLOR_DEPTH_121212: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; - break; - default: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; - break; - } - - /* set DPG dimentions */ - REG_SET_2(DPG_DIMENSIONS, 0, - DPG_ACTIVE_WIDTH, width, - DPG_ACTIVE_HEIGHT, height); - - /* set DPG offset */ - REG_SET_2(DPG_OFFSET_SEGMENT, 0, - DPG_X_OFFSET, offset, - DPG_SEGMENT_WIDTH, 0); - - switch (test_pattern) { - case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: - case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: - { - dyn_range = (test_pattern == - CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? - TEST_PATTERN_DYN_RANGE_CEA : - TEST_PATTERN_DYN_RANGE_VESA); - - switch (color_space) { - case CONTROLLER_DP_COLOR_SPACE_YCBCR601: - mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; - break; - case CONTROLLER_DP_COLOR_SPACE_YCBCR709: - mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; - break; - case CONTROLLER_DP_COLOR_SPACE_RGB: - default: - mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; - break; - } - - REG_UPDATE_6(DPG_CONTROL, - DPG_EN, 1, - DPG_MODE, mode, - DPG_DYNAMIC_RANGE, dyn_range, - DPG_BIT_DEPTH, bit_depth, - DPG_VRES, 6, - DPG_HRES, 6); - } - break; - - case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: - case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: - { - mode = (test_pattern == - CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? - TEST_PATTERN_MODE_VERTICALBARS : - TEST_PATTERN_MODE_HORIZONTALBARS); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - dst_bpc = 6; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - dst_bpc = 8; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - dst_bpc = 10; - break; - default: - dst_bpc = 8; - break; - } - - /* adjust color to the required colorFormat */ - for (index = 0; index < 6; index++) { - /* dst = 2^dstBpc * src / 2^srcBpc = src >> - * (srcBpc - dstBpc); - */ - dst_color[index] = - src_color[index] >> (src_bpc - dst_bpc); - /* DPG_COLOUR registers are 16-bit MSB aligned value with bits 3:0 hardwired to ZERO. - * XXXXXXXXXX000000 for 10 bit, - * XXXXXXXX00000000 for 8 bit, - * XXXXXX0000000000 for 6 bits - */ - dst_color[index] <<= (16 - dst_bpc); - } - - REG_SET_2(DPG_COLOUR_R_CR, 0, - DPG_COLOUR1_R_CR, dst_color[0], - DPG_COLOUR0_R_CR, dst_color[3]); - REG_SET_2(DPG_COLOUR_G_Y, 0, - DPG_COLOUR1_G_Y, dst_color[1], - DPG_COLOUR0_G_Y, dst_color[4]); - REG_SET_2(DPG_COLOUR_B_CB, 0, - DPG_COLOUR1_B_CB, dst_color[2], - DPG_COLOUR0_B_CB, dst_color[5]); - - /* enable test pattern */ - REG_UPDATE_6(DPG_CONTROL, - DPG_EN, 1, - DPG_MODE, mode, - DPG_DYNAMIC_RANGE, 0, - DPG_BIT_DEPTH, bit_depth, - DPG_VRES, 0, - DPG_HRES, 0); - } - break; - - case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: - { - mode = (bit_depth == - TEST_PATTERN_COLOR_FORMAT_BPC_10 ? - TEST_PATTERN_MODE_DUALRAMP_RGB : - TEST_PATTERN_MODE_SINGLERAMP_RGB); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - dst_bpc = 6; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - dst_bpc = 8; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - dst_bpc = 10; - break; - default: - dst_bpc = 8; - break; - } - - /* increment for the first ramp for one color gradation - * 1 gradation for 6-bit color is 2^10 - * gradations in 16-bit color - */ - inc_base = (src_bpc - dst_bpc); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - { - REG_SET_3(DPG_RAMP_CONTROL, 0, - DPG_RAMP0_OFFSET, 0, - DPG_INC0, inc_base, - DPG_INC1, 0); - REG_UPDATE_2(DPG_CONTROL, - DPG_VRES, 6, - DPG_HRES, 6); - } - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - { - REG_SET_3(DPG_RAMP_CONTROL, 0, - DPG_RAMP0_OFFSET, 0, - DPG_INC0, inc_base, - DPG_INC1, 0); - REG_UPDATE_2(DPG_CONTROL, - DPG_VRES, 6, - DPG_HRES, 8); - } - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - { - REG_SET_3(DPG_RAMP_CONTROL, 0, - DPG_RAMP0_OFFSET, 384 << 6, - DPG_INC0, inc_base, - DPG_INC1, inc_base + 2); - REG_UPDATE_2(DPG_CONTROL, - DPG_VRES, 5, - DPG_HRES, 8); - } - break; - default: - break; - } - - /* enable test pattern */ - REG_UPDATE_4(DPG_CONTROL, - DPG_EN, 1, - DPG_MODE, mode, - DPG_DYNAMIC_RANGE, 0, - DPG_BIT_DEPTH, bit_depth); - } - break; - case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: - { - REG_WRITE(DPG_CONTROL, 0); - REG_WRITE(DPG_COLOUR_R_CR, 0); - REG_WRITE(DPG_COLOUR_G_Y, 0); - REG_WRITE(DPG_COLOUR_B_CB, 0); - REG_WRITE(DPG_RAMP_CONTROL, 0); - } - break; - case CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR: - { - opp2_dpg_set_blank_color(opp, solid_color); - REG_UPDATE_2(DPG_CONTROL, - DPG_EN, 1, - DPG_MODE, TEST_PATTERN_MODE_HORIZONTALBARS); - - REG_SET_2(DPG_DIMENSIONS, 0, - DPG_ACTIVE_WIDTH, width, - DPG_ACTIVE_HEIGHT, height); - } - break; - default: - break; - - } -} - -void opp2_program_dpg_dimensions( - struct output_pixel_processor *opp, - int width, int height) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - - REG_SET_2(DPG_DIMENSIONS, 0, - DPG_ACTIVE_WIDTH, width, - DPG_ACTIVE_HEIGHT, height); -} - -void opp2_dpg_set_blank_color( - struct output_pixel_processor *opp, - const struct tg_color *color) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - - /* 16-bit MSB aligned value. Bits 3:0 of this field are hardwired to ZERO */ - ASSERT(color); - REG_SET_2(DPG_COLOUR_B_CB, 0, - DPG_COLOUR1_B_CB, color->color_b_cb << 6, - DPG_COLOUR0_B_CB, color->color_b_cb << 6); - REG_SET_2(DPG_COLOUR_G_Y, 0, - DPG_COLOUR1_G_Y, color->color_g_y << 6, - DPG_COLOUR0_G_Y, color->color_g_y << 6); - REG_SET_2(DPG_COLOUR_R_CR, 0, - DPG_COLOUR1_R_CR, color->color_r_cr << 6, - DPG_COLOUR0_R_CR, color->color_r_cr << 6); -} - -bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - uint32_t dpg_en, dpg_mode; - uint32_t double_buffer_pending; - - REG_GET_2(DPG_CONTROL, - DPG_EN, &dpg_en, - DPG_MODE, &dpg_mode); - - REG_GET(DPG_STATUS, - DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); - - return (dpg_en == 1) && - (double_buffer_pending == 0); -} - -bool opp2_dpg_is_pending(struct output_pixel_processor *opp) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - uint32_t double_buffer_pending; - uint32_t dpg_en; - - REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); - - REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); - - return (dpg_en == 1 && double_buffer_pending == 1); -} - -void opp2_program_left_edge_extra_pixel( - struct output_pixel_processor *opp, - enum dc_pixel_encoding pixel_encoding, - bool is_primary) -{ - struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); - uint32_t count = opp2_get_left_edge_extra_pixel_count(opp, pixel_encoding, is_primary); - - /* - * Specifies the number of extra left edge pixels that are supplied to - * the 422 horizontal chroma sub-sample filter. - */ - REG_UPDATE(FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, count); -} - -uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, - enum dc_pixel_encoding pixel_encoding, bool is_primary) -{ - if ((pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) && - !opp->ctx->dc->debug.force_chroma_subsampling_1tap && - !is_primary) - return 1; - else - return 0; -} - -/*****************************************/ -/* Constructor, Destructor */ -/*****************************************/ - -static struct opp_funcs dcn20_opp_funcs = { - .opp_set_dyn_expansion = opp1_set_dyn_expansion, - .opp_program_fmt = opp1_program_fmt, - .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, - .opp_program_stereo = opp1_program_stereo, - .opp_pipe_clock_control = opp1_pipe_clock_control, - .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, - .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, - .dpg_is_blanked = opp2_dpg_is_blanked, - .dpg_is_pending = opp2_dpg_is_pending, - .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, - .opp_destroy = opp1_destroy, - .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, - .opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count, -}; - -void dcn20_opp_construct(struct dcn20_opp *oppn20, - struct dc_context *ctx, - uint32_t inst, - const struct dcn20_opp_registers *regs, - const struct dcn20_opp_shift *opp_shift, - const struct dcn20_opp_mask *opp_mask) -{ - oppn20->base.ctx = ctx; - oppn20->base.inst = inst; - oppn20->base.funcs = &dcn20_opp_funcs; - - oppn20->regs = regs; - oppn20->opp_shift = opp_shift; - oppn20->opp_mask = opp_mask; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h deleted file mode 100644 index 34936e6c49f3fb..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ /dev/null @@ -1,174 +0,0 @@ -/* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPP_DCN20_H__ -#define __DC_OPP_DCN20_H__ - -#include "dcn10/dcn10_opp.h" - -#define TO_DCN20_OPP(opp)\ - container_of(opp, struct dcn20_opp, base) - -#define OPP_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define OPP_DPG_REG_LIST(id) \ - SRI(DPG_CONTROL, DPG, id), \ - SRI(DPG_DIMENSIONS, DPG, id), \ - SRI(DPG_OFFSET_SEGMENT, DPG, id), \ - SRI(DPG_COLOUR_B_CB, DPG, id), \ - SRI(DPG_COLOUR_G_Y, DPG, id), \ - SRI(DPG_COLOUR_R_CR, DPG, id), \ - SRI(DPG_RAMP_CONTROL, DPG, id), \ - SRI(DPG_STATUS, DPG, id) - -#define OPP_REG_LIST_DCN20(id) \ - OPP_REG_LIST_DCN10(id), \ - OPP_DPG_REG_LIST(id), \ - SRI(FMT_422_CONTROL, FMT, id), \ - SRI(OPPBUF_CONTROL1, OPPBUF, id) - -#define OPP_REG_VARIABLE_LIST_DCN2_0 \ - OPP_COMMON_REG_VARIABLE_LIST; \ - uint32_t FMT_422_CONTROL; \ - uint32_t DPG_CONTROL; \ - uint32_t DPG_DIMENSIONS; \ - uint32_t DPG_OFFSET_SEGMENT; \ - uint32_t DPG_COLOUR_B_CB; \ - uint32_t DPG_COLOUR_G_Y; \ - uint32_t DPG_COLOUR_R_CR; \ - uint32_t DPG_RAMP_CONTROL; \ - uint32_t DPG_STATUS - -#define OPP_DPG_MASK_SH_LIST(mask_sh) \ - OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \ - OPP_SF(DPG0_DPG_CONTROL, DPG_MODE, mask_sh), \ - OPP_SF(DPG0_DPG_CONTROL, DPG_DYNAMIC_RANGE, mask_sh), \ - OPP_SF(DPG0_DPG_CONTROL, DPG_BIT_DEPTH, mask_sh), \ - OPP_SF(DPG0_DPG_CONTROL, DPG_VRES, mask_sh), \ - OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \ - OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \ - OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \ - OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \ - OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR1_B_CB, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR0_G_Y, mask_sh), \ - OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR1_G_Y, mask_sh), \ - OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_RAMP0_OFFSET, mask_sh), \ - OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC0, mask_sh), \ - OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC1, mask_sh), \ - OPP_SF(DPG0_DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, mask_sh) - -#define OPP_MASK_SH_LIST_DCN20(mask_sh) \ - OPP_MASK_SH_LIST_DCN(mask_sh), \ - OPP_DPG_MASK_SH_LIST(mask_sh), \ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\ - OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh), \ - OPP_SF(FMT0_FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, mask_sh) - -#define OPP_DCN20_REG_FIELD_LIST(type) \ - OPP_DCN10_REG_FIELD_LIST(type); \ - type FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT; \ - type DPG_EN; \ - type DPG_MODE; \ - type DPG_DYNAMIC_RANGE; \ - type DPG_BIT_DEPTH; \ - type DPG_VRES; \ - type DPG_HRES; \ - type DPG_ACTIVE_WIDTH; \ - type DPG_ACTIVE_HEIGHT; \ - type DPG_X_OFFSET; \ - type DPG_SEGMENT_WIDTH; \ - type DPG_COLOUR0_R_CR; \ - type DPG_COLOUR1_R_CR; \ - type DPG_COLOUR0_B_CB; \ - type DPG_COLOUR1_B_CB; \ - type DPG_COLOUR0_G_Y; \ - type DPG_COLOUR1_G_Y; \ - type DPG_RAMP0_OFFSET; \ - type DPG_INC0; \ - type DPG_INC1; \ - type DPG_DOUBLE_BUFFER_PENDING - -struct dcn20_opp_registers { - OPP_REG_VARIABLE_LIST_DCN2_0; -}; - -struct dcn20_opp_shift { - OPP_DCN20_REG_FIELD_LIST(uint8_t); -}; - -struct dcn20_opp_mask { - OPP_DCN20_REG_FIELD_LIST(uint32_t); -}; - -struct dcn20_opp { - struct output_pixel_processor base; - - const struct dcn20_opp_registers *regs; - const struct dcn20_opp_shift *opp_shift; - const struct dcn20_opp_mask *opp_mask; - - bool is_write_to_ram_a_safe; -}; - -void dcn20_opp_construct(struct dcn20_opp *oppn20, - struct dc_context *ctx, - uint32_t inst, - const struct dcn20_opp_registers *regs, - const struct dcn20_opp_shift *opp_shift, - const struct dcn20_opp_mask *opp_mask); - -void opp2_set_disp_pattern_generator( - struct output_pixel_processor *opp, - enum controller_dp_test_pattern test_pattern, - enum controller_dp_color_space color_space, - enum dc_color_depth color_depth, - const struct tg_color *solid_color, - int width, - int height, - int offset); - -void opp2_program_dpg_dimensions( - struct output_pixel_processor *opp, - int width, int height); - -bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); - -bool opp2_dpg_is_pending(struct output_pixel_processor *opp); - -void opp2_dpg_set_blank_color( - struct output_pixel_processor *opp, - const struct tg_color *color); - -void opp2_program_left_edge_extra_pixel ( - struct output_pixel_processor *opp, - enum dc_pixel_encoding pixel_encoding, bool is_primary); - -uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, - enum dc_pixel_encoding pixel_encoding, bool is_primary); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index 4c43af867d86ed..b17277de0340fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -23,15 +23,11 @@ # # -DCN30 := dcn30_mpc.o dcn30_vpg.o \ +DCN30 := dcn30_vpg.o \ dcn30_afmt.o \ - dcn30_dwb.o \ - dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ - - AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN30) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index b8327237ed4418..f31f0e3abfc0fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -28,7 +28,7 @@ #include "reg_helper.h" #include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" -#include "dcn30_cm_common.h" +#include "dcn30/dcn30_cm_common.h" #include "custom_float.h" #define REG(reg) reg @@ -177,6 +177,8 @@ bool cm3_helper_translate_curve_to_hw_format( i += increment) { if (j == hw_points) break; + if (i >= TRANSFER_FUNC_POINTS) + return false; rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; @@ -335,6 +337,8 @@ bool cm3_helper_translate_curve_to_degamma_hw_format( i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) + return false; rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h deleted file mode 100644 index bd98b327a6c70e..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn10/dcn10_cm_common.h" - -#ifndef __DAL_DCN30_CM_COMMON_H__ -#define __DAL_DCN30_CM_COMMON_H__ - -#define TF_HELPER_REG_FIELD_LIST_DCN3(type) \ - TF_HELPER_REG_FIELD_LIST(type);\ - type field_region_start_base;\ - type field_offset - -struct DCN3_xfer_func_shift { - TF_HELPER_REG_FIELD_LIST_DCN3(uint8_t); -}; - -struct DCN3_xfer_func_mask { - TF_HELPER_REG_FIELD_LIST_DCN3(uint32_t); -}; - -struct dcn3_xfer_func_reg { - struct DCN3_xfer_func_shift shifts; - struct DCN3_xfer_func_mask masks; - - TF_HELPER_REG_LIST; - uint32_t offset_b; - uint32_t offset_g; - uint32_t offset_r; - uint32_t start_base_cntl_b; - uint32_t start_base_cntl_g; - uint32_t start_base_cntl_r; -}; - -void cm_helper_program_gamcor_xfer_func( - struct dc_context *ctx, - const struct pwl_params *params, - const struct dcn3_xfer_func_reg *reg); - -bool cm3_helper_translate_curve_to_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params, bool fixpoint); - -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params); - -bool cm3_helper_convert_to_custom_float( - struct pwl_result_data *rgb_resulted, - struct curve_points3 *corner_points, - uint32_t hw_points_num, - bool fixpoint); - -bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c deleted file mode 100644 index fae98cf520201d..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "reg_helper.h" -#include "resource.h" -#include "dwb.h" -#include "dcn30_dwb.h" - - -#define REG(reg)\ - dwbc30->dwbc_regs->reg - -#define CTX \ - dwbc30->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name - -#define DC_LOGGER \ - dwbc30->base.ctx->logger - -static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps) -{ - if (caps) { - caps->adapter_id = 0; /* we only support 1 adapter currently */ - caps->hw_version = DCN_VERSION_3_0; - caps->num_pipes = 2; - memset(&caps->reserved, 0, sizeof(caps->reserved)); - memset(&caps->reserved2, 0, sizeof(caps->reserved2)); - caps->sw_version = dwb_ver_2_0; - caps->caps.support_dwb = true; - caps->caps.support_ogam = true; - caps->caps.support_wbscl = true; - caps->caps.support_ocsc = false; - caps->caps.support_stereo = true; - return true; - } else { - return false; - } -} - -void dwb3_config_fc(struct dwbc *dwbc, struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - /* Set DWB source size */ - REG_UPDATE_2(FC_SOURCE_SIZE, FC_SOURCE_WIDTH, params->cnv_params.src_width, - FC_SOURCE_HEIGHT, params->cnv_params.src_height); - - /* source size is not equal the source size, then enable cropping. */ - if (params->cnv_params.crop_en) { - REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 1); - REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_X, params->cnv_params.crop_x); - REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_Y, params->cnv_params.crop_y); - REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_WIDTH, params->cnv_params.crop_width); - REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_HEIGHT, params->cnv_params.crop_height); - } else { - REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 0); - } - - /* Set CAPTURE_RATE */ - REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_RATE, params->capture_rate); - - dwb3_set_stereo(dwbc, ¶ms->stereo_params); -} - -bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - DC_LOG_DWB("%s dwb3_enabled at inst = %d", __func__, dwbc->inst); - - /* Set WB_ENABLE (not double buffered; capture not enabled) */ - REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 1); - - /* Set FC parameters */ - dwb3_config_fc(dwbc, params); - - /* Program color processing unit */ - dwb3_program_hdr_mult(dwbc, params); - dwb3_set_gamut_remap(dwbc, params); - dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func); - - /* Program output denorm */ - dwb3_set_denorm(dwbc, params); - - /* Enable DWB capture enable (double buffered) */ - REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE); - - /* First pixel count */ - REG_UPDATE(FC_FLOW_CTRL, FC_FIRST_PIXEL_DELAY_COUNT, 96); - - return true; -} - -bool dwb3_disable(struct dwbc *dwbc) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - /* disable FC */ - REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE); - - /* disable WB */ - REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 0); - - DC_LOG_DWB("%s dwb3_disabled at inst = %d", __func__, dwbc->inst); - return true; -} - -void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - unsigned int pre_locked; - - REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); - - /* Lock DWB registers */ - if (pre_locked == 0) - REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); - - /* Disable FC */ - REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); - - /* Unlock DWB registers */ - if (pre_locked == 0) - REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); - - DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); -} - - -bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - unsigned int pre_locked; - - /* - * Check if the caller has already locked DWB registers. - * If so: assume the caller will unlock, so don't touch the lock. - * If not: lock them for this update, then unlock after the - * update is complete. - */ - REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); - DC_LOG_DWB("%s dwb update, inst = %d", __func__, dwbc->inst); - - if (pre_locked == 0) { - /* Lock DWB registers */ - REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); - } - - /* Set FC parameters */ - dwb3_config_fc(dwbc, params); - - /* Program color processing unit */ - dwb3_program_hdr_mult(dwbc, params); - dwb3_set_gamut_remap(dwbc, params); - dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func); - - /* Program output denorm */ - dwb3_set_denorm(dwbc, params); - - if (pre_locked == 0) { - /* Unlock DWB registers */ - REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); - } - - return true; -} - -bool dwb3_is_enabled(struct dwbc *dwbc) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - unsigned int dwb_enabled = 0; - unsigned int fc_frame_capture_en = 0; - - REG_GET(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, &dwb_enabled); - REG_GET(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, &fc_frame_capture_en); - - return ((dwb_enabled != 0) && (fc_frame_capture_en != 0)); -} - -void dwb3_set_stereo(struct dwbc *dwbc, - struct dwb_stereo_params *stereo_params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - if (stereo_params->stereo_enabled) { - REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, stereo_params->stereo_eye_select); - REG_UPDATE(FC_MODE_CTRL, FC_STEREO_EYE_POLARITY, stereo_params->stereo_polarity); - DC_LOG_DWB("%s dwb stereo enabled", __func__); - } else { - REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, 0); - DC_LOG_DWB("%s dwb stereo disabled", __func__); - } -} - -void dwb3_set_new_content(struct dwbc *dwbc, - bool is_new_content) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - REG_UPDATE(FC_MODE_CTRL, FC_NEW_CONTENT, is_new_content); -} - -void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - /* Set output format*/ - REG_UPDATE(DWB_OUT_CTRL, OUT_FORMAT, params->cnv_params.fc_out_format); - - /* Set output denorm */ - if (params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_ARGB || - params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_RGBA) { - REG_UPDATE(DWB_OUT_CTRL, OUT_DENORM, params->cnv_params.out_denorm_mode); - REG_UPDATE(DWB_OUT_CTRL, OUT_MAX, params->cnv_params.out_max_pix_val); - REG_UPDATE(DWB_OUT_CTRL, OUT_MIN, params->cnv_params.out_min_pix_val); - } -} - - -static const struct dwbc_funcs dcn30_dwbc_funcs = { - .get_caps = dwb3_get_caps, - .enable = dwb3_enable, - .disable = dwb3_disable, - .update = dwb3_update, - .is_enabled = dwb3_is_enabled, - .set_fc_enable = dwb3_set_fc_enable, - .set_stereo = dwb3_set_stereo, - .set_new_content = dwb3_set_new_content, - .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename -}; - -void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, - struct dc_context *ctx, - const struct dcn30_dwbc_registers *dwbc_regs, - const struct dcn30_dwbc_shift *dwbc_shift, - const struct dcn30_dwbc_mask *dwbc_mask, - int inst) -{ - dwbc30->base.ctx = ctx; - - dwbc30->base.inst = inst; - dwbc30->base.funcs = &dcn30_dwbc_funcs; - - dwbc30->dwbc_regs = dwbc_regs; - dwbc30->dwbc_shift = dwbc_shift; - dwbc30->dwbc_mask = dwbc_mask; -} - -void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - /* - * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no - * idle cycles in HW pipeline (in number of clock cycles times 4) - */ - REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay); - - DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h deleted file mode 100644 index 0f3f7c5fbaecf9..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ /dev/null @@ -1,920 +0,0 @@ -/* Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef __DC_DWBC_DCN30_H__ -#define __DC_DWBC_DCN30_H__ - -#define TO_DCN30_DWBC(dwbc_base) \ - container_of(dwbc_base, struct dcn30_dwbc, base) - -#define DWBC_COMMON_REG_LIST_DCN30(inst) \ - SR(DWB_ENABLE_CLK_CTRL),\ - SR(DWB_MEM_PWR_CTRL),\ - SR(FC_MODE_CTRL),\ - SR(FC_FLOW_CTRL),\ - SR(FC_WINDOW_START),\ - SR(FC_WINDOW_SIZE),\ - SR(FC_SOURCE_SIZE),\ - SR(DWB_UPDATE_CTRL),\ - SR(DWB_CRC_CTRL),\ - SR(DWB_CRC_MASK_R_G),\ - SR(DWB_CRC_MASK_B_A),\ - SR(DWB_CRC_VAL_R_G),\ - SR(DWB_CRC_VAL_B_A),\ - SR(DWB_OUT_CTRL),\ - SR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN),\ - SR(DWB_MMHUBBUB_BACKPRESSURE_CNT),\ - SR(DWB_HOST_READ_CONTROL),\ - SR(DWB_SOFT_RESET),\ - SR(DWB_HDR_MULT_COEF),\ - SR(DWB_GAMUT_REMAP_MODE),\ - SR(DWB_GAMUT_REMAP_COEF_FORMAT),\ - SR(DWB_GAMUT_REMAPA_C11_C12),\ - SR(DWB_GAMUT_REMAPA_C13_C14),\ - SR(DWB_GAMUT_REMAPA_C21_C22),\ - SR(DWB_GAMUT_REMAPA_C23_C24),\ - SR(DWB_GAMUT_REMAPA_C31_C32),\ - SR(DWB_GAMUT_REMAPA_C33_C34),\ - SR(DWB_GAMUT_REMAPB_C11_C12),\ - SR(DWB_GAMUT_REMAPB_C13_C14),\ - SR(DWB_GAMUT_REMAPB_C21_C22),\ - SR(DWB_GAMUT_REMAPB_C23_C24),\ - SR(DWB_GAMUT_REMAPB_C31_C32),\ - SR(DWB_GAMUT_REMAPB_C33_C34),\ - SR(DWB_OGAM_CONTROL),\ - SR(DWB_OGAM_LUT_INDEX),\ - SR(DWB_OGAM_LUT_DATA),\ - SR(DWB_OGAM_LUT_CONTROL),\ - SR(DWB_OGAM_RAMA_START_CNTL_B),\ - SR(DWB_OGAM_RAMA_START_CNTL_G),\ - SR(DWB_OGAM_RAMA_START_CNTL_R),\ - SR(DWB_OGAM_RAMA_START_BASE_CNTL_B),\ - SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B),\ - SR(DWB_OGAM_RAMA_START_BASE_CNTL_G),\ - SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G),\ - SR(DWB_OGAM_RAMA_START_BASE_CNTL_R),\ - SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R),\ - SR(DWB_OGAM_RAMA_END_CNTL1_B),\ - SR(DWB_OGAM_RAMA_END_CNTL2_B),\ - SR(DWB_OGAM_RAMA_END_CNTL1_G),\ - SR(DWB_OGAM_RAMA_END_CNTL2_G),\ - SR(DWB_OGAM_RAMA_END_CNTL1_R),\ - SR(DWB_OGAM_RAMA_END_CNTL2_R),\ - SR(DWB_OGAM_RAMA_OFFSET_B),\ - SR(DWB_OGAM_RAMA_OFFSET_G),\ - SR(DWB_OGAM_RAMA_OFFSET_R),\ - SR(DWB_OGAM_RAMA_REGION_0_1),\ - SR(DWB_OGAM_RAMA_REGION_2_3),\ - SR(DWB_OGAM_RAMA_REGION_4_5),\ - SR(DWB_OGAM_RAMA_REGION_6_7),\ - SR(DWB_OGAM_RAMA_REGION_8_9),\ - SR(DWB_OGAM_RAMA_REGION_10_11),\ - SR(DWB_OGAM_RAMA_REGION_12_13),\ - SR(DWB_OGAM_RAMA_REGION_14_15),\ - SR(DWB_OGAM_RAMA_REGION_16_17),\ - SR(DWB_OGAM_RAMA_REGION_18_19),\ - SR(DWB_OGAM_RAMA_REGION_20_21),\ - SR(DWB_OGAM_RAMA_REGION_22_23),\ - SR(DWB_OGAM_RAMA_REGION_24_25),\ - SR(DWB_OGAM_RAMA_REGION_26_27),\ - SR(DWB_OGAM_RAMA_REGION_28_29),\ - SR(DWB_OGAM_RAMA_REGION_30_31),\ - SR(DWB_OGAM_RAMA_REGION_32_33),\ - SR(DWB_OGAM_RAMB_START_CNTL_B),\ - SR(DWB_OGAM_RAMB_START_CNTL_G),\ - SR(DWB_OGAM_RAMB_START_CNTL_R),\ - SR(DWB_OGAM_RAMB_START_BASE_CNTL_B),\ - SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B),\ - SR(DWB_OGAM_RAMB_START_BASE_CNTL_G),\ - SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G),\ - SR(DWB_OGAM_RAMB_START_BASE_CNTL_R),\ - SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R),\ - SR(DWB_OGAM_RAMB_END_CNTL1_B),\ - SR(DWB_OGAM_RAMB_END_CNTL2_B),\ - SR(DWB_OGAM_RAMB_END_CNTL1_G),\ - SR(DWB_OGAM_RAMB_END_CNTL2_G),\ - SR(DWB_OGAM_RAMB_END_CNTL1_R),\ - SR(DWB_OGAM_RAMB_END_CNTL2_R),\ - SR(DWB_OGAM_RAMB_OFFSET_B),\ - SR(DWB_OGAM_RAMB_OFFSET_G),\ - SR(DWB_OGAM_RAMB_OFFSET_R),\ - SR(DWB_OGAM_RAMB_REGION_0_1),\ - SR(DWB_OGAM_RAMB_REGION_2_3),\ - SR(DWB_OGAM_RAMB_REGION_4_5),\ - SR(DWB_OGAM_RAMB_REGION_6_7),\ - SR(DWB_OGAM_RAMB_REGION_8_9),\ - SR(DWB_OGAM_RAMB_REGION_10_11),\ - SR(DWB_OGAM_RAMB_REGION_12_13),\ - SR(DWB_OGAM_RAMB_REGION_14_15),\ - SR(DWB_OGAM_RAMB_REGION_16_17),\ - SR(DWB_OGAM_RAMB_REGION_18_19),\ - SR(DWB_OGAM_RAMB_REGION_20_21),\ - SR(DWB_OGAM_RAMB_REGION_22_23),\ - SR(DWB_OGAM_RAMB_REGION_24_25),\ - SR(DWB_OGAM_RAMB_REGION_26_27),\ - SR(DWB_OGAM_RAMB_REGION_28_29),\ - SR(DWB_OGAM_RAMB_REGION_30_31),\ - SR(DWB_OGAM_RAMB_REGION_32_33) - - -#define DWBC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \ - SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_ENABLE, mask_sh),\ - SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_R_DWB_GATE_DIS, mask_sh),\ - SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_G_DWB_GATE_DIS, mask_sh),\ - SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_TEST_CLK_SEL, mask_sh),\ - SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_FORCE, mask_sh),\ - SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_DIS, mask_sh),\ - SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_STATE, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_RATE, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_WINDOW_CROP_EN, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_EYE_SELECTION, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_STEREO_EYE_POLARITY, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_NEW_CONTENT, mask_sh),\ - SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN_CURRENT, mask_sh),\ - SF_DWB2(FC_FLOW_CTRL, DWB_TOP, 0, FC_FIRST_PIXEL_DELAY_COUNT, mask_sh),\ - SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_X, mask_sh),\ - SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_Y, mask_sh),\ - SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_WIDTH, mask_sh),\ - SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_HEIGHT, mask_sh),\ - SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_WIDTH, mask_sh),\ - SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_HEIGHT, mask_sh),\ - SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_LOCK, mask_sh),\ - SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_PENDING, mask_sh),\ - SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_EN, mask_sh),\ - SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_CONT_EN, mask_sh),\ - SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_SRC_SEL, mask_sh),\ - SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_RED_MASK, mask_sh),\ - SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_GREEN_MASK, mask_sh),\ - SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_BLUE_MASK, mask_sh),\ - SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_A_MASK, mask_sh),\ - SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_RED, mask_sh),\ - SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_GREEN, mask_sh),\ - SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_BLUE, mask_sh),\ - SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_A, mask_sh),\ - SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_FORMAT, mask_sh),\ - SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_DENORM, mask_sh),\ - SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MAX, mask_sh),\ - SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MIN, mask_sh),\ - SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, DWB_TOP, 0, DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, mask_sh),\ - SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT, DWB_TOP, 0, DWB_MMHUBBUB_MAX_BACKPRESSURE, mask_sh),\ - SF_DWB2(DWB_HOST_READ_CONTROL, DWB_TOP, 0, DWB_HOST_READ_RATE_CONTROL, mask_sh),\ - SF_DWB2(DWB_SOFT_RESET, DWB_TOP, 0, DWB_SOFT_RESET, mask_sh),\ - SF_DWB2(DWB_HDR_MULT_COEF, DWBCP, 0, DWB_HDR_MULT_COEF, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAP_COEF_FORMAT, DWBCP, 0, DWB_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C11, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C12, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C13, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C14, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C21, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C22, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C23, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C24, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C31, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C32, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C33, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C34, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C11, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C12, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C13, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C14, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C21, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C22, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C23, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C24, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C31, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C32, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C33, mask_sh),\ - SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C34, mask_sh),\ - SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE, mask_sh),\ - SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT, mask_sh),\ - SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_PWL_DISABLE, mask_sh),\ - SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE_CURRENT, mask_sh),\ - SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT_CURRENT, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_INDEX, DWBCP, 0, DWB_OGAM_LUT_INDEX, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\ - SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_B, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_G, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_R, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh),\ - SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh) - - -#define DWBC_REG_FIELD_LIST_DCN3_0(type) \ - type DWB_ENABLE;\ - type DISPCLK_R_DWB_GATE_DIS;\ - type DISPCLK_G_DWB_GATE_DIS;\ - type DWB_TEST_CLK_SEL;\ - type DWBSCL_LUT_MEM_PWR_FORCE;\ - type DWBSCL_LUT_MEM_PWR_DIS;\ - type DWBSCL_LUT_MEM_PWR_STATE;\ - type DWBSCL_LB_MEM_PWR_FORCE;\ - type DWBSCL_LB_MEM_PWR_DIS;\ - type DWBSCL_LB_MEM_PWR_STATE;\ - type DWB_OGAM_LUT_MEM_PWR_FORCE;\ - type DWB_OGAM_LUT_MEM_PWR_DIS;\ - type DWB_OGAM_LUT_MEM_PWR_STATE;\ - type FC_FRAME_CAPTURE_EN;\ - type FC_FRAME_CAPTURE_RATE;\ - type FC_WINDOW_CROP_EN;\ - type FC_EYE_SELECTION;\ - type FC_STEREO_EYE_POLARITY;\ - type FC_NEW_CONTENT;\ - type FC_FI_EN;\ - type FC_FI_PHASE;\ - type FC_FRAME_CAPTURE_EN_CURRENT;\ - type FC_FIRST_PIXEL_DELAY_COUNT;\ - type FC_WINDOW_START_X;\ - type FC_WINDOW_START_Y;\ - type FC_WINDOW_WIDTH;\ - type FC_WINDOW_HEIGHT;\ - type FC_SOURCE_WIDTH;\ - type FC_SOURCE_HEIGHT;\ - type DWB_UPDATE_LOCK;\ - type DWB_UPDATE_PENDING;\ - type DWB_CRC_EN;\ - type DWB_CRC_CONT_EN;\ - type DWB_CRC_SRC_SEL;\ - type DWB_CRC_RED_MASK;\ - type DWB_CRC_GREEN_MASK;\ - type DWB_CRC_BLUE_MASK;\ - type DWB_CRC_A_MASK;\ - type DWB_CRC_SIG_RED;\ - type DWB_CRC_SIG_GREEN;\ - type DWB_CRC_SIG_BLUE;\ - type DWB_CRC_SIG_A;\ - type OUT_FORMAT;\ - type OUT_DENORM;\ - type OUT_MAX;\ - type OUT_MIN;\ - type DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;\ - type DWB_MMHUBBUB_MAX_BACKPRESSURE;\ - type DWB_HOST_READ_RATE_CONTROL;\ - type DWBSCL_DATA_OVERFLOW_FLAG;\ - type DWBSCL_DATA_OVERFLOW_ACK;\ - type DWBSCL_DATA_OVERFLOW_MASK;\ - type DWBSCL_DATA_OVERFLOW_INT_STATUS;\ - type DWBSCL_DATA_OVERFLOW_INT_TYPE;\ - type DWBSCL_DATA_OVERFLOW_TYPE;\ - type DWBSCL_DATA_OVERFLOW_OUT_X_CNT;\ - type DWBSCL_DATA_OVERFLOW_OUT_Y_CNT;\ - type DWB_SOFT_RESET;\ - type DWBSCL_COEF_RAM_TAP_PAIR_IDX;\ - type DWBSCL_COEF_RAM_PHASE;\ - type DWBSCL_COEF_RAM_FILTER_TYPE;\ - type DWBSCL_COEF_RAM_SELECT_RD;\ - type DWBSCL_COEF_RAM_EVEN_TAP_COEF;\ - type DWBSCL_COEF_RAM_EVEN_TAP_COEF_EN;\ - type DWBSCL_COEF_RAM_ODD_TAP_COEF;\ - type DWBSCL_COEF_RAM_ODD_TAP_COEF_EN;\ - type DWBSCL_MODE;\ - type DWBSCL_COEF_RAM_SELECT;\ - type DWBSCL_COEF_RAM_SELECT_CURRENT;\ - type DWBSCL_H_NUM_OF_TAPS;\ - type DWBSCL_V_NUM_OF_TAPS;\ - type DWBSCL_H_SCALE_RATIO;\ - type DWBSCL_H_INIT_FRAC;\ - type DWBSCL_H_INIT_INT;\ - type DWBSCL_V_SCALE_RATIO;\ - type DWBSCL_V_INIT_FRAC;\ - type DWBSCL_V_INIT_INT;\ - type DWBSCL_BOUNDARY_MODE;\ - type DWBSCL_BLACK_COLOR_RGB;\ - type DWBSCL_DEST_WIDTH;\ - type DWBSCL_DEST_HEIGHT;\ - type DWB_HDR_MULT_COEF;\ - type DWB_GAMUT_REMAP_MODE;\ - type DWB_GAMUT_REMAP_MODE_CURRENT;\ - type DWB_GAMUT_REMAP_COEF_FORMAT;\ - type DWB_GAMUT_REMAPA_C11;\ - type DWB_GAMUT_REMAPA_C12;\ - type DWB_GAMUT_REMAPA_C13;\ - type DWB_GAMUT_REMAPA_C14;\ - type DWB_GAMUT_REMAPA_C21;\ - type DWB_GAMUT_REMAPA_C22;\ - type DWB_GAMUT_REMAPA_C23;\ - type DWB_GAMUT_REMAPA_C24;\ - type DWB_GAMUT_REMAPA_C31;\ - type DWB_GAMUT_REMAPA_C32;\ - type DWB_GAMUT_REMAPA_C33;\ - type DWB_GAMUT_REMAPA_C34;\ - type DWB_GAMUT_REMAPB_C11;\ - type DWB_GAMUT_REMAPB_C12;\ - type DWB_GAMUT_REMAPB_C13;\ - type DWB_GAMUT_REMAPB_C14;\ - type DWB_GAMUT_REMAPB_C21;\ - type DWB_GAMUT_REMAPB_C22;\ - type DWB_GAMUT_REMAPB_C23;\ - type DWB_GAMUT_REMAPB_C24;\ - type DWB_GAMUT_REMAPB_C31;\ - type DWB_GAMUT_REMAPB_C32;\ - type DWB_GAMUT_REMAPB_C33;\ - type DWB_GAMUT_REMAPB_C34;\ - type DWB_OGAM_MODE;\ - type DWB_OGAM_SELECT;\ - type DWB_OGAM_PWL_DISABLE;\ - type DWB_OGAM_MODE_CURRENT;\ - type DWB_OGAM_SELECT_CURRENT;\ - type DWB_OGAM_LUT_INDEX;\ - type DWB_OGAM_LUT_DATA;\ - type DWB_OGAM_LUT_WRITE_COLOR_MASK;\ - type DWB_OGAM_LUT_READ_COLOR_SEL;\ - type DWB_OGAM_LUT_READ_DBG;\ - type DWB_OGAM_LUT_HOST_SEL;\ - type DWB_OGAM_LUT_CONFIG_MODE;\ - type DWB_OGAM_LUT_STATUS;\ - type DWB_OGAM_RAMA_EXP_REGION_START_B;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\ - type DWB_OGAM_RAMA_EXP_REGION_START_G;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G;\ - type DWB_OGAM_RAMA_EXP_REGION_START_R;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R;\ - type DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;\ - type DWB_OGAM_RAMA_EXP_REGION_START_BASE_G;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G;\ - type DWB_OGAM_RAMA_EXP_REGION_START_BASE_R;\ - type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R;\ - type DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;\ - type DWB_OGAM_RAMA_EXP_REGION_END_B;\ - type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\ - type DWB_OGAM_RAMA_EXP_REGION_END_BASE_G;\ - type DWB_OGAM_RAMA_EXP_REGION_END_G;\ - type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G;\ - type DWB_OGAM_RAMA_EXP_REGION_END_BASE_R;\ - type DWB_OGAM_RAMA_EXP_REGION_END_R;\ - type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R;\ - type DWB_OGAM_RAMA_OFFSET_B;\ - type DWB_OGAM_RAMA_OFFSET_G;\ - type DWB_OGAM_RAMA_OFFSET_R;\ - type DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS;\ - type DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET;\ - type DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION_START_B;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\ - type DWB_OGAM_RAMB_EXP_REGION_START_G;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G;\ - type DWB_OGAM_RAMB_EXP_REGION_START_R;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R;\ - type DWB_OGAM_RAMB_EXP_REGION_START_BASE_B;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B;\ - type DWB_OGAM_RAMB_EXP_REGION_START_BASE_G;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G;\ - type DWB_OGAM_RAMB_EXP_REGION_START_BASE_R;\ - type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R;\ - type DWB_OGAM_RAMB_EXP_REGION_END_BASE_B;\ - type DWB_OGAM_RAMB_EXP_REGION_END_B;\ - type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\ - type DWB_OGAM_RAMB_EXP_REGION_END_BASE_G;\ - type DWB_OGAM_RAMB_EXP_REGION_END_G;\ - type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G;\ - type DWB_OGAM_RAMB_EXP_REGION_END_BASE_R;\ - type DWB_OGAM_RAMB_EXP_REGION_END_R;\ - type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R;\ - type DWB_OGAM_RAMB_OFFSET_B;\ - type DWB_OGAM_RAMB_OFFSET_G;\ - type DWB_OGAM_RAMB_OFFSET_R;\ - type DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\ - type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS - -struct dcn30_dwbc_registers { - /* DCN3AG */ - /* DWB_TOP */ - uint32_t DWB_ENABLE_CLK_CTRL; - uint32_t DWB_MEM_PWR_CTRL; - uint32_t FC_MODE_CTRL; - uint32_t FC_FLOW_CTRL; - uint32_t FC_WINDOW_START; - uint32_t FC_WINDOW_SIZE; - uint32_t FC_SOURCE_SIZE; - uint32_t DWB_UPDATE_CTRL; - uint32_t DWB_CRC_CTRL; - uint32_t DWB_CRC_MASK_R_G; - uint32_t DWB_CRC_MASK_B_A; - uint32_t DWB_CRC_VAL_R_G; - uint32_t DWB_CRC_VAL_B_A; - uint32_t DWB_OUT_CTRL; - uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT_EN; - uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT; - uint32_t DWB_HOST_READ_CONTROL; - uint32_t DWB_SOFT_RESET; - uint32_t DWB_DEBUG_CTRL; - uint32_t DWB_DEBUG; - uint32_t DWB_TEST_DEBUG_INDEX; - uint32_t DWB_TEST_DEBUG_DATA; - - /* DWBSCL */ - uint32_t DWBSCL_COEF_RAM_TAP_SELECT; - uint32_t DWBSCL_COEF_RAM_TAP_DATA; - uint32_t DWBSCL_MODE; - uint32_t DWBSCL_TAP_CONTROL; - uint32_t DWBSCL_HORZ_FILTER_SCALE_RATIO; - uint32_t DWBSCL_HORZ_FILTER_INIT; - uint32_t DWBSCL_VERT_FILTER_SCALE_RATIO; - uint32_t DWBSCL_VERT_FILTER_INIT; - uint32_t DWBSCL_BOUNDARY_CTRL; - uint32_t DWBSCL_DEST_SIZE; - uint32_t DWBSCL_OVERFLOW_STATUS; - uint32_t DWBSCL_OVERFLOW_COUNTER; - uint32_t DWBSCL_DEBUG; - uint32_t DWBSCL_TEST_DEBUG_INDEX; - uint32_t DWBSCL_TEST_DEBUG_DATA; - - /* DWBCP */ - uint32_t DWB_HDR_MULT_COEF; - uint32_t DWB_GAMUT_REMAP_MODE; - uint32_t DWB_GAMUT_REMAP_COEF_FORMAT; - uint32_t DWB_GAMUT_REMAPA_C11_C12; - uint32_t DWB_GAMUT_REMAPA_C13_C14; - uint32_t DWB_GAMUT_REMAPA_C21_C22; - uint32_t DWB_GAMUT_REMAPA_C23_C24; - uint32_t DWB_GAMUT_REMAPA_C31_C32; - uint32_t DWB_GAMUT_REMAPA_C33_C34; - uint32_t DWB_GAMUT_REMAPB_C11_C12; - uint32_t DWB_GAMUT_REMAPB_C13_C14; - uint32_t DWB_GAMUT_REMAPB_C21_C22; - uint32_t DWB_GAMUT_REMAPB_C23_C24; - uint32_t DWB_GAMUT_REMAPB_C31_C32; - uint32_t DWB_GAMUT_REMAPB_C33_C34; - uint32_t DWB_OGAM_CONTROL; - uint32_t DWB_OGAM_LUT_INDEX; - uint32_t DWB_OGAM_LUT_DATA; - uint32_t DWB_OGAM_LUT_CONTROL; - uint32_t DWB_OGAM_RAMA_START_CNTL_B; - uint32_t DWB_OGAM_RAMA_START_CNTL_G; - uint32_t DWB_OGAM_RAMA_START_CNTL_R; - uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_B; - uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_B; - uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_G; - uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_G; - uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_R; - uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_R; - uint32_t DWB_OGAM_RAMA_END_CNTL1_B; - uint32_t DWB_OGAM_RAMA_END_CNTL2_B; - uint32_t DWB_OGAM_RAMA_END_CNTL1_G; - uint32_t DWB_OGAM_RAMA_END_CNTL2_G; - uint32_t DWB_OGAM_RAMA_END_CNTL1_R; - uint32_t DWB_OGAM_RAMA_END_CNTL2_R; - uint32_t DWB_OGAM_RAMA_OFFSET_B; - uint32_t DWB_OGAM_RAMA_OFFSET_G; - uint32_t DWB_OGAM_RAMA_OFFSET_R; - uint32_t DWB_OGAM_RAMA_REGION_0_1; - uint32_t DWB_OGAM_RAMA_REGION_2_3; - uint32_t DWB_OGAM_RAMA_REGION_4_5; - uint32_t DWB_OGAM_RAMA_REGION_6_7; - uint32_t DWB_OGAM_RAMA_REGION_8_9; - uint32_t DWB_OGAM_RAMA_REGION_10_11; - uint32_t DWB_OGAM_RAMA_REGION_12_13; - uint32_t DWB_OGAM_RAMA_REGION_14_15; - uint32_t DWB_OGAM_RAMA_REGION_16_17; - uint32_t DWB_OGAM_RAMA_REGION_18_19; - uint32_t DWB_OGAM_RAMA_REGION_20_21; - uint32_t DWB_OGAM_RAMA_REGION_22_23; - uint32_t DWB_OGAM_RAMA_REGION_24_25; - uint32_t DWB_OGAM_RAMA_REGION_26_27; - uint32_t DWB_OGAM_RAMA_REGION_28_29; - uint32_t DWB_OGAM_RAMA_REGION_30_31; - uint32_t DWB_OGAM_RAMA_REGION_32_33; - uint32_t DWB_OGAM_RAMB_START_CNTL_B; - uint32_t DWB_OGAM_RAMB_START_CNTL_G; - uint32_t DWB_OGAM_RAMB_START_CNTL_R; - uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_B; - uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_B; - uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_G; - uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_G; - uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_R; - uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_R; - uint32_t DWB_OGAM_RAMB_END_CNTL1_B; - uint32_t DWB_OGAM_RAMB_END_CNTL2_B; - uint32_t DWB_OGAM_RAMB_END_CNTL1_G; - uint32_t DWB_OGAM_RAMB_END_CNTL2_G; - uint32_t DWB_OGAM_RAMB_END_CNTL1_R; - uint32_t DWB_OGAM_RAMB_END_CNTL2_R; - uint32_t DWB_OGAM_RAMB_OFFSET_B; - uint32_t DWB_OGAM_RAMB_OFFSET_G; - uint32_t DWB_OGAM_RAMB_OFFSET_R; - uint32_t DWB_OGAM_RAMB_REGION_0_1; - uint32_t DWB_OGAM_RAMB_REGION_2_3; - uint32_t DWB_OGAM_RAMB_REGION_4_5; - uint32_t DWB_OGAM_RAMB_REGION_6_7; - uint32_t DWB_OGAM_RAMB_REGION_8_9; - uint32_t DWB_OGAM_RAMB_REGION_10_11; - uint32_t DWB_OGAM_RAMB_REGION_12_13; - uint32_t DWB_OGAM_RAMB_REGION_14_15; - uint32_t DWB_OGAM_RAMB_REGION_16_17; - uint32_t DWB_OGAM_RAMB_REGION_18_19; - uint32_t DWB_OGAM_RAMB_REGION_20_21; - uint32_t DWB_OGAM_RAMB_REGION_22_23; - uint32_t DWB_OGAM_RAMB_REGION_24_25; - uint32_t DWB_OGAM_RAMB_REGION_26_27; - uint32_t DWB_OGAM_RAMB_REGION_28_29; - uint32_t DWB_OGAM_RAMB_REGION_30_31; - uint32_t DWB_OGAM_RAMB_REGION_32_33; - uint32_t DWBCP_DEBUG; - uint32_t DWBCP_TEST_DEBUG_INDEX; - uint32_t DWBCP_TEST_DEBUG_DATA; -}; - -/* Internal enums / structs */ -enum dwbscl_coef_filter_type_sel { - DWBSCL_COEF_RAM_FILTER_TYPE_VERT_RGB = 0, - DWBSCL_COEF_RAM_FILTER_TYPE_HORZ_RGB = 1 -}; - - -struct dcn30_dwbc_mask { - DWBC_REG_FIELD_LIST_DCN3_0(uint32_t); -}; - -struct dcn30_dwbc_shift { - DWBC_REG_FIELD_LIST_DCN3_0(uint8_t); -}; - -struct dcn30_dwbc { - struct dwbc base; - const struct dcn30_dwbc_registers *dwbc_regs; - const struct dcn30_dwbc_shift *dwbc_shift; - const struct dcn30_dwbc_mask *dwbc_mask; -}; - -void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, - struct dc_context *ctx, - const struct dcn30_dwbc_registers *dwbc_regs, - const struct dcn30_dwbc_shift *dwbc_shift, - const struct dcn30_dwbc_mask *dwbc_mask, - int inst); - -bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params); - -bool dwb3_disable(struct dwbc *dwbc); - -bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params); - -bool dwb3_is_enabled(struct dwbc *dwbc); - -void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); - -void dwb3_set_stereo(struct dwbc *dwbc, - struct dwb_stereo_params *stereo_params); - -void dwb3_set_new_content(struct dwbc *dwbc, - bool is_new_content); - -void dwb3_config_fc(struct dwbc *dwbc, - struct dc_dwb_params *params); - -void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params); - -void dwb3_program_hdr_mult( - struct dwbc *dwbc, - const struct dc_dwb_params *params); - -void dwb3_set_gamut_remap( - struct dwbc *dwbc, - const struct dc_dwb_params *params); - -bool dwb3_ogam_set_input_transfer_func( - struct dwbc *dwbc, - const struct dc_transfer_func *in_transfer_func_dwb_ogam); - -void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay); -#endif - - diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c deleted file mode 100644 index 03a50c32fcfe1f..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "fixed31_32.h" -#include "resource.h" -#include "basics/conversion.h" -#include "dwb.h" -#include "dcn30_dwb.h" -#include "dcn30_cm_common.h" -#include "dcn10/dcn10_cm_common.h" - - -#define REG(reg)\ - dwbc30->dwbc_regs->reg - -#define CTX \ - dwbc30->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name - -#define TO_DCN30_DWBC(dwbc_base) \ - container_of(dwbc_base, struct dcn30_dwbc, base) - -static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30, - struct dcn3_xfer_func_reg *reg) -{ - reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; - reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; - reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B; - reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B; - - reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -/*program dwb ogam RAM A*/ -static void dwb3_program_ogam_luta_settings( - struct dcn30_dwbc *dwbc30, - const struct pwl_params *params) -{ - struct dcn3_xfer_func_reg gam_regs; - - dwb3_get_reg_field_ogam(dwbc30, &gam_regs); - - gam_regs.start_cntl_b = REG(DWB_OGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(DWB_OGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(DWB_OGAM_RAMA_START_CNTL_R); - gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMA_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMA_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMA_START_BASE_CNTL_R); - gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMA_END_CNTL2_R); - gam_regs.offset_b = REG(DWB_OGAM_RAMA_OFFSET_B); - gam_regs.offset_g = REG(DWB_OGAM_RAMA_OFFSET_G); - gam_regs.offset_r = REG(DWB_OGAM_RAMA_OFFSET_R); - gam_regs.region_start = REG(DWB_OGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(DWB_OGAM_RAMA_REGION_32_33); - /*todo*/ - cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs); -} - -/*program dwb ogam RAM B*/ -static void dwb3_program_ogam_lutb_settings( - struct dcn30_dwbc *dwbc30, - const struct pwl_params *params) -{ - struct dcn3_xfer_func_reg gam_regs; - - dwb3_get_reg_field_ogam(dwbc30, &gam_regs); - - gam_regs.start_cntl_b = REG(DWB_OGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(DWB_OGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(DWB_OGAM_RAMB_START_CNTL_R); - gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMB_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMB_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMB_START_BASE_CNTL_R); - gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMB_END_CNTL2_R); - gam_regs.offset_b = REG(DWB_OGAM_RAMB_OFFSET_B); - gam_regs.offset_g = REG(DWB_OGAM_RAMB_OFFSET_G); - gam_regs.offset_r = REG(DWB_OGAM_RAMB_OFFSET_R); - gam_regs.region_start = REG(DWB_OGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(DWB_OGAM_RAMB_REGION_32_33); - - cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs); -} - -static enum dc_lut_mode dwb3_get_ogam_current( - struct dcn30_dwbc *dwbc30) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - uint32_t ram_select; - - REG_GET_2(DWB_OGAM_CONTROL, - DWB_OGAM_MODE_CURRENT, &state_mode, - DWB_OGAM_SELECT_CURRENT, &ram_select); - - if (state_mode == 0) { - mode = LUT_BYPASS; - } else if (state_mode == 2) { - if (ram_select == 0) - mode = LUT_RAM_A; - else if (ram_select == 1) - mode = LUT_RAM_B; - else - mode = LUT_BYPASS; - } else { - // Reserved value - mode = LUT_BYPASS; - BREAK_TO_DEBUGGER(); - return mode; - } - return mode; -} - -static void dwb3_configure_ogam_lut( - struct dcn30_dwbc *dwbc30, - bool is_ram_a) -{ - REG_UPDATE_2(DWB_OGAM_LUT_CONTROL, - DWB_OGAM_LUT_WRITE_COLOR_MASK, 7, - DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1); - - REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); -} - -static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - - uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; - uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; - uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; - - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); - - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); - - } else { - - REG_UPDATE(DWB_OGAM_LUT_CONTROL, - DWB_OGAM_LUT_WRITE_COLOR_MASK, 4); - - for (i = 0 ; i < num; i++) - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); - - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); - - REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); - - REG_UPDATE(DWB_OGAM_LUT_CONTROL, - DWB_OGAM_LUT_WRITE_COLOR_MASK, 2); - - for (i = 0 ; i < num; i++) - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg); - - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green); - - REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); - - REG_UPDATE(DWB_OGAM_LUT_CONTROL, - DWB_OGAM_LUT_WRITE_COLOR_MASK, 1); - - for (i = 0 ; i < num; i++) - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue); - } -} - -static bool dwb3_program_ogam_lut( - struct dcn30_dwbc *dwbc30, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - if (params == NULL) { - REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 0); - return false; - } - - if (params->hw_points_num == 0) - return false; - - REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); - - current_mode = dwb3_get_ogam_current(dwbc30); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dwb3_program_ogam_luta_settings(dwbc30, params); - else - dwb3_program_ogam_lutb_settings(dwbc30, params); - - dwb3_program_ogam_pwl( - dwbc30, params->rgb_resulted, params->hw_points_num); - - REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); - - return true; -} - -bool dwb3_ogam_set_input_transfer_func( - struct dwbc *dwbc, - const struct dc_transfer_func *in_transfer_func_dwb_ogam) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - bool result = false; - struct pwl_params *dwb_ogam_lut = NULL; - - if (in_transfer_func_dwb_ogam == NULL) - return result; - - dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL); - - if (dwb_ogam_lut) { - cm_helper_translate_curve_to_hw_format(dwbc->ctx, - in_transfer_func_dwb_ogam, - dwb_ogam_lut, false); - - result = dwb3_program_ogam_lut( - dwbc30, - dwb_ogam_lut); - kfree(dwb_ogam_lut); - dwb_ogam_lut = NULL; - } - - return result; -} - -static void dwb3_program_gamut_remap( - struct dwbc *dwbc, - const uint16_t *regval, - enum cm_gamut_coef_format coef_format, - enum cm_gamut_remap_select select) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) { - REG_SET(DWB_GAMUT_REMAP_MODE, 0, - DWB_GAMUT_REMAP_MODE, 0); - return; - } - - REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format); - - gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11; - gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11; - gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12; - gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12; - - switch (select) { - case CM_GAMUT_REMAP_MODE_RAMA_COEFF: - gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12); - gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPA_C33_C34); - - cm_helper_program_color_matrices( - dwbc30->base.ctx, - regval, - &gam_regs); - break; - case CM_GAMUT_REMAP_MODE_RAMB_COEFF: - gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPB_C11_C12); - gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPB_C33_C34); - - cm_helper_program_color_matrices( - dwbc30->base.ctx, - regval, - &gam_regs); - break; - case CM_GAMUT_REMAP_MODE_RESERVED: - /* should never happen, bug */ - BREAK_TO_DEBUGGER(); - return; - default: - break; - } - - REG_SET(DWB_GAMUT_REMAP_MODE, 0, - DWB_GAMUT_REMAP_MODE, select); - -} - -void dwb3_set_gamut_remap( - struct dwbc *dwbc, - const struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - struct cm_grph_csc_adjustment adjust = params->csc_params; - int i = 0; - - if (adjust.gamut_adjust_type != CM_GAMUT_ADJUST_TYPE_SW) { - /* Bypass if type is bypass or hw */ - dwb3_program_gamut_remap(dwbc, NULL, adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_BYPASS); - } else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - unsigned int current_mode; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust.temperature_matrix[i]; - - convert_float_matrix(arr_reg_val, arr_matrix, 12); - - REG_GET(DWB_GAMUT_REMAP_MODE, DWB_GAMUT_REMAP_MODE_CURRENT, ¤t_mode); - - if (current_mode == CM_GAMUT_REMAP_MODE_RAMA_COEFF) { - dwb3_program_gamut_remap(dwbc, arr_reg_val, - adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMB_COEFF); - } else { - dwb3_program_gamut_remap(dwbc, arr_reg_val, - adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMA_COEFF); - } - } -} - -void dwb3_program_hdr_mult( - struct dwbc *dwbc, - const struct dc_dwb_params *params) -{ - struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); - - REG_UPDATE(DWB_HDR_MULT_COEF, DWB_HDR_MULT_COEF, params->hdr_mult); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c deleted file mode 100644 index 3aeb85ec40b022..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ /dev/null @@ -1,1558 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn30_mpc.h" -#include "dcn30_cm_common.h" -#include "basics/conversion.h" -#include "dcn10/dcn10_cm_common.h" -#include "dc.h" - -#define REG(reg)\ - mpc30->mpc_regs->reg - -#define CTX \ - mpc30->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name - - -#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) - - -void mpc3_mpc_init(struct mpc *mpc) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - int opp_id; - - mpc1_mpc_init(mpc); - - for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { - if (REG(MUX[opp_id])) - /* disable mpc out rate and flow control */ - REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE, - 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); - } -} - -void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - mpc1_mpc_init_single_inst(mpc, mpcc_id); - - /* assuming mpc out mux is connected to opp with the same index at this - * point in time (e.g. transitioning from vbios to driver) - */ - if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id])) - /* disable mpc out rate and flow control */ - REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE, - 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); -} - -bool mpc3_is_dwb_idle( - struct mpc *mpc, - int dwb_id) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - unsigned int status; - - REG_GET(DWB_MUX[dwb_id], MPC_DWB0_MUX_STATUS, &status); - - if (status == 0xf) - return true; - else - return false; -} - -void mpc3_set_dwb_mux( - struct mpc *mpc, - int dwb_id, - int mpcc_id) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_SET(DWB_MUX[dwb_id], 0, - MPC_DWB0_MUX, mpcc_id); -} - -void mpc3_disable_dwb_mux( - struct mpc *mpc, - int dwb_id) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_SET(DWB_MUX[dwb_id], 0, - MPC_DWB0_MUX, 0xf); -} - -enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id) -{ - /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info; - *in DCN3/3AG, we need to read two separate fields to retrieve the same info - */ - enum dc_lut_mode mode; - uint32_t state_mode; - uint32_t state_ram_lut_in_use; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id], MPCC_OGAM_MODE_CURRENT, &state_mode, - MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 2: - switch (state_ram_lut_in_use) { - case 0: - mode = LUT_RAM_A; - break; - case 1: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -void mpc3_power_on_ogam_lut( - struct mpc *mpc, int mpcc_id, - bool power_on) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - /* - * Powering on: force memory active so the LUT can be updated. - * Powering off: allow entering memory low power mode - * - * Memory low power mode is controlled during MPC OGAM LUT init. - */ - REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], - MPCC_OGAM_MEM_PWR_DIS, power_on != 0); - - /* Wait for memory to be powered on - we won't be able to write to it otherwise. */ - if (power_on) - REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10); -} - -static void mpc3_configure_ogam_lut( - struct mpc *mpc, int mpcc_id, - bool is_ram_a) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE_2(MPCC_OGAM_LUT_CONTROL[mpcc_id], - MPCC_OGAM_LUT_WRITE_COLOR_MASK, 7, - MPCC_OGAM_LUT_HOST_SEL, is_ram_a == true ? 0:1); - - REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); -} - -static void mpc3_ogam_get_reg_field( - struct mpc *mpc, - struct dcn3_xfer_func_reg *reg) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - reg->shifts.field_region_start_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B; - reg->masks.field_region_start_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B; - reg->shifts.field_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_OFFSET_B; - reg->masks.field_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_OFFSET_B; - - reg->shifts.exp_region0_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -static void mpc3_program_luta(struct mpc *mpc, int mpcc_id, - const struct pwl_params *params) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - struct dcn3_xfer_func_reg gam_regs; - - mpc3_ogam_get_reg_field(mpc, &gam_regs); - - gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]); - gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]); - gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]); - gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[mpcc_id]); - gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[mpcc_id]); - gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[mpcc_id]); - gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]); - gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]); - gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]); - gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]); - gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]); - gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]); - gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]); - gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]); - //New registers in DCN3AG/DCN OGAM block - gam_regs.offset_b = REG(MPCC_OGAM_RAMA_OFFSET_B[mpcc_id]); - gam_regs.offset_g = REG(MPCC_OGAM_RAMA_OFFSET_G[mpcc_id]); - gam_regs.offset_r = REG(MPCC_OGAM_RAMA_OFFSET_R[mpcc_id]); - gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_B[mpcc_id]); - gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_G[mpcc_id]); - gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_R[mpcc_id]); - - cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs); -} - -static void mpc3_program_lutb(struct mpc *mpc, int mpcc_id, - const struct pwl_params *params) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - struct dcn3_xfer_func_reg gam_regs; - - mpc3_ogam_get_reg_field(mpc, &gam_regs); - - gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]); - gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]); - gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]); - gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[mpcc_id]); - gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[mpcc_id]); - gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[mpcc_id]); - gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]); - gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]); - gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]); - gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]); - gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]); - gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]); - gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]); - gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]); - //New registers in DCN3AG/DCN OGAM block - gam_regs.offset_b = REG(MPCC_OGAM_RAMB_OFFSET_B[mpcc_id]); - gam_regs.offset_g = REG(MPCC_OGAM_RAMB_OFFSET_G[mpcc_id]); - gam_regs.offset_r = REG(MPCC_OGAM_RAMB_OFFSET_R[mpcc_id]); - gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_B[mpcc_id]); - gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_G[mpcc_id]); - gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_R[mpcc_id]); - - cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs); -} - - -static void mpc3_program_ogam_pwl( - struct mpc *mpc, int mpcc_id, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); - } else { - - REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], - MPCC_OGAM_LUT_WRITE_COLOR_MASK, 4); - - for (i = 0 ; i < num; i++) - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); - - REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); - - REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], - MPCC_OGAM_LUT_WRITE_COLOR_MASK, 2); - - for (i = 0 ; i < num; i++) - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); - - REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); - - REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], - MPCC_OGAM_LUT_WRITE_COLOR_MASK, 1); - - for (i = 0 ; i < num; i++) - REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg); - - } - -} - -void mpc3_set_output_gamma( - struct mpc *mpc, - int mpcc_id, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (mpc->ctx->dc->debug.cm_in_bypass) { - REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); - return; - } - - if (params == NULL) { //disable OGAM - REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 0); - return; - } - //enable OGAM - REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 2); - - current_mode = mpc3_get_ogam_current(mpc, mpcc_id); - if (current_mode == LUT_BYPASS) - next_mode = LUT_RAM_A; - else if (current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - mpc3_power_on_ogam_lut(mpc, mpcc_id, true); - mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - mpc3_program_luta(mpc, mpcc_id, params); - else - mpc3_program_lutb(mpc, mpcc_id, params); - - mpc3_program_ogam_pwl( - mpc, mpcc_id, params->rgb_resulted, params->hw_points_num); - - /*we need to program 2 fields here as apposed to 1*/ - REG_UPDATE(MPCC_OGAM_CONTROL[mpcc_id], - MPCC_OGAM_SELECT, next_mode == LUT_RAM_A ? 0:1); - - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) - mpc3_power_on_ogam_lut(mpc, mpcc_id, false); -} - -void mpc3_set_denorm( - struct mpc *mpc, - int opp_id, - enum dc_color_depth output_depth) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - /* De-normalize Fixed U1.13 color data to different target bit depths. 0 is bypass*/ - int denorm_mode = 0; - - switch (output_depth) { - case COLOR_DEPTH_666: - denorm_mode = 1; - break; - case COLOR_DEPTH_888: - denorm_mode = 2; - break; - case COLOR_DEPTH_999: - denorm_mode = 3; - break; - case COLOR_DEPTH_101010: - denorm_mode = 4; - break; - case COLOR_DEPTH_111111: - denorm_mode = 5; - break; - case COLOR_DEPTH_121212: - denorm_mode = 6; - break; - case COLOR_DEPTH_141414: - case COLOR_DEPTH_161616: - default: - /* not valid used case! */ - break; - } - - REG_UPDATE(DENORM_CONTROL[opp_id], - MPC_OUT_DENORM_MODE, denorm_mode); -} - -void mpc3_set_denorm_clamp( - struct mpc *mpc, - int opp_id, - struct mpc_denorm_clamp denorm_clamp) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - /*program min and max clamp values for the pixel components*/ - REG_UPDATE_2(DENORM_CONTROL[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr, - MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr); - REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y, - MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y); - REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id], - MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb, - MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb); -} - -static enum dc_lut_mode mpc3_get_shaper_current(struct mpc *mpc, uint32_t rmu_idx) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_GET(SHAPER_CONTROL[rmu_idx], MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void mpc3_configure_shaper_lut( - struct mpc *mpc, - bool is_ram_a, - uint32_t rmu_idx) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx], - MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx], - MPC_RMU_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(SHAPER_LUT_INDEX[rmu_idx], 0, MPC_RMU_SHAPER_LUT_INDEX, 0); -} - -static void mpc3_program_shaper_luta_settings( - struct mpc *mpc, - const struct pwl_params *params, - uint32_t rmu_idx) -{ - const struct gamma_curve *curve; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_SET_2(SHAPER_RAMA_START_CNTL_B[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(SHAPER_RAMA_START_CNTL_G[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(SHAPER_RAMA_START_CNTL_R[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - - REG_SET_2(SHAPER_RAMA_END_CNTL_B[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - REG_SET_2(SHAPER_RAMA_END_CNTL_G[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); - REG_SET_2(SHAPER_RAMA_END_CNTL_R[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(SHAPER_RAMA_REGION_0_1[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_2_3[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_4_5[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_6_7[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_8_9[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_10_11[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_12_13[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_14_15[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_16_17[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_18_19[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_20_21[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_22_23[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_24_25[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_26_27[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_28_29[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_30_31[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMA_REGION_32_33[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); -} - -static void mpc3_program_shaper_lutb_settings( - struct mpc *mpc, - const struct pwl_params *params, - uint32_t rmu_idx) -{ - const struct gamma_curve *curve; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_SET_2(SHAPER_RAMB_START_CNTL_B[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(SHAPER_RAMB_START_CNTL_G[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(SHAPER_RAMB_START_CNTL_R[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - - REG_SET_2(SHAPER_RAMB_END_CNTL_B[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - REG_SET_2(SHAPER_RAMB_END_CNTL_G[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); - REG_SET_2(SHAPER_RAMB_END_CNTL_R[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, - MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(SHAPER_RAMB_REGION_0_1[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_2_3[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_4_5[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_6_7[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_8_9[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_10_11[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_12_13[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_14_15[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_16_17[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_18_19[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_20_21[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_22_23[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_24_25[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_26_27[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_28_29[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_30_31[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(SHAPER_RAMB_REGION_32_33[rmu_idx], 0, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); -} - - -static void mpc3_program_shaper_lut( - struct mpc *mpc, - const struct pwl_result_data *rgb, - uint32_t num, - uint32_t rmu_idx) -{ - uint32_t i, red, green, blue; - uint32_t red_delta, green_delta, blue_delta; - uint32_t red_value, green_value, blue_value; - - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - for (i = 0 ; i < num; i++) { - - red = rgb[i].red_reg; - green = rgb[i].green_reg; - blue = rgb[i].blue_reg; - - red_delta = rgb[i].delta_red_reg; - green_delta = rgb[i].delta_green_reg; - blue_delta = rgb[i].delta_blue_reg; - - red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); - green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); - blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); - - REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, red_value); - REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, green_value); - REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, blue_value); - } - -} - -static void mpc3_power_on_shaper_3dlut( - struct mpc *mpc, - uint32_t rmu_idx, - bool power_on) -{ - uint32_t power_status_shaper = 2; - uint32_t power_status_3dlut = 2; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - int max_retries = 10; - - if (rmu_idx == 0) { - REG_SET(MPC_RMU_MEM_PWR_CTRL, 0, - MPC_RMU0_MEM_PWR_DIS, power_on == true ? 1:0); - /* wait for memory to fully power up */ - if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { - REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, 0, 1, max_retries); - REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, 0, 1, max_retries); - } - - /*read status is not mandatory, it is just for debugging*/ - REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, &power_status_shaper); - REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, &power_status_3dlut); - } else if (rmu_idx == 1) { - REG_SET(MPC_RMU_MEM_PWR_CTRL, 0, - MPC_RMU1_MEM_PWR_DIS, power_on == true ? 1:0); - if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { - REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, 0, 1, max_retries); - REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, 0, 1, max_retries); - } - - REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, &power_status_shaper); - REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, &power_status_3dlut); - } - /*TODO Add rmu_idx == 2 for SIENNA_CICHLID */ - if (power_status_shaper != 0 && power_on == true) - BREAK_TO_DEBUGGER(); - - if (power_status_3dlut != 0 && power_on == true) - BREAK_TO_DEBUGGER(); -} - - - -bool mpc3_program_shaper( - struct mpc *mpc, - const struct pwl_params *params, - uint32_t rmu_idx) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (params == NULL) { - REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, 0); - return false; - } - - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) - mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true); - - current_mode = mpc3_get_shaper_current(mpc, rmu_idx); - - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, rmu_idx); - - if (next_mode == LUT_RAM_A) - mpc3_program_shaper_luta_settings(mpc, params, rmu_idx); - else - mpc3_program_shaper_lutb_settings(mpc, params, rmu_idx); - - mpc3_program_shaper_lut( - mpc, params->rgb_resulted, params->hw_points_num, rmu_idx); - - REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); - mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false); - - return true; -} - -static void mpc3_set_3dlut_mode( - struct mpc *mpc, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - bool is_lut_size17x17x17, - uint32_t rmu_idx) -{ - uint32_t lut_mode; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (mode == LUT_BYPASS) - lut_mode = 0; - else if (mode == LUT_RAM_A) - lut_mode = 1; - else - lut_mode = 2; - - REG_UPDATE_2(RMU_3DLUT_MODE[rmu_idx], - MPC_RMU_3DLUT_MODE, lut_mode, - MPC_RMU_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); -} - -static enum dc_lut_mode get3dlut_config( - struct mpc *mpc, - bool *is_17x17x17, - bool *is_12bits_color_channel, - int rmu_idx) -{ - uint32_t i_mode, i_enable_10bits, lut_size; - enum dc_lut_mode mode; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_GET(RMU_3DLUT_MODE[rmu_idx], - MPC_RMU_3DLUT_MODE_CURRENT, &i_mode); - - REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], - MPC_RMU_3DLUT_30BIT_EN, &i_enable_10bits); - - switch (i_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - if (i_enable_10bits > 0) - *is_12bits_color_channel = false; - else - *is_12bits_color_channel = true; - - REG_GET(RMU_3DLUT_MODE[rmu_idx], MPC_RMU_3DLUT_SIZE, &lut_size); - - if (lut_size == 0) - *is_17x17x17 = true; - else - *is_17x17x17 = false; - - return mode; -} - -static void mpc3_select_3dlut_ram( - struct mpc *mpc, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - uint32_t rmu_idx) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE_2(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], - MPC_RMU_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, - MPC_RMU_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1); -} - -static void mpc3_select_3dlut_ram_mask( - struct mpc *mpc, - uint32_t ram_selection_mask, - uint32_t rmu_idx) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], MPC_RMU_3DLUT_WRITE_EN_MASK, - ram_selection_mask); - REG_SET(RMU_3DLUT_INDEX[rmu_idx], 0, MPC_RMU_3DLUT_INDEX, 0); -} - -static void mpc3_set3dlut_ram12( - struct mpc *mpc, - const struct dc_rgb *lut, - uint32_t entries, - uint32_t rmu_idx) -{ - uint32_t i, red, green, blue, red1, green1, blue1; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - for (i = 0 ; i < entries; i += 2) { - red = lut[i].red<<4; - green = lut[i].green<<4; - blue = lut[i].blue<<4; - red1 = lut[i+1].red<<4; - green1 = lut[i+1].green<<4; - blue1 = lut[i+1].blue<<4; - - REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, - MPC_RMU_3DLUT_DATA0, red, - MPC_RMU_3DLUT_DATA1, red1); - - REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, - MPC_RMU_3DLUT_DATA0, green, - MPC_RMU_3DLUT_DATA1, green1); - - REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, - MPC_RMU_3DLUT_DATA0, blue, - MPC_RMU_3DLUT_DATA1, blue1); - } -} - -static void mpc3_set3dlut_ram10( - struct mpc *mpc, - const struct dc_rgb *lut, - uint32_t entries, - uint32_t rmu_idx) -{ - uint32_t i, red, green, blue, value; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - for (i = 0; i < entries; i++) { - red = lut[i].red; - green = lut[i].green; - blue = lut[i].blue; - //should we shift red 22bit and green 12? ask Nvenko - value = (red<<20) | (green<<10) | blue; - - REG_SET(RMU_3DLUT_DATA_30BIT[rmu_idx], 0, MPC_RMU_3DLUT_DATA_30BIT, value); - } - -} - - -void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst) -{ - mpcc->mpcc_id = mpcc_inst; - mpcc->dpp_id = 0xf; - mpcc->mpcc_bot = NULL; - mpcc->blnd_cfg.overlap_only = false; - mpcc->blnd_cfg.global_alpha = 0xff; - mpcc->blnd_cfg.global_gain = 0xff; - mpcc->blnd_cfg.background_color_bpc = 4; - mpcc->blnd_cfg.bottom_gain_mode = 0; - mpcc->blnd_cfg.top_gain = 0x1f000; - mpcc->blnd_cfg.bottom_inside_gain = 0x1f000; - mpcc->blnd_cfg.bottom_outside_gain = 0x1f000; - mpcc->sm_cfg.enable = false; - mpcc->shared_bottom = false; -} - -static void program_gamut_remap( - struct dcn30_mpc *mpc30, - int mpcc_id, - const uint16_t *regval, - int select) -{ - uint16_t selection = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == GAMUT_REMAP_BYPASS) { - REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0, - MPCC_GAMUT_REMAP_MODE, GAMUT_REMAP_BYPASS); - return; - } - switch (select) { - case GAMUT_REMAP_COEFF: - selection = 1; - break; - /*this corresponds to GAMUT_REMAP coefficients set B - * we don't have common coefficient sets in dcn3ag/dcn3 - */ - case GAMUT_REMAP_COMA_COEFF: - selection = 2; - break; - default: - break; - } - - gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; - gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; - gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; - gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; - - - if (select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); - gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); - - cm_helper_program_color_matrices( - mpc30->base.ctx, - regval, - &gam_regs); - - } else if (select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); - gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); - - cm_helper_program_color_matrices( - mpc30->base.ctx, - regval, - &gam_regs); - - } - //select coefficient set to use - REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0, - MPCC_GAMUT_REMAP_MODE, selection); -} - -void mpc3_set_gamut_remap( - struct mpc *mpc, - int mpcc_id, - const struct mpc_grph_gamut_adjustment *adjust) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - int i = 0; - int gamut_mode; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - program_gamut_remap(mpc30, mpcc_id, NULL, GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - //current coefficient set in use - REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); - - if (gamut_mode == 0) - gamut_mode = 1; //use coefficient set A - else if (gamut_mode == 1) - gamut_mode = 2; - else - gamut_mode = 1; - - program_gamut_remap(mpc30, mpcc_id, arr_reg_val, gamut_mode); - } -} - -static void read_gamut_remap(struct dcn30_mpc *mpc30, - int mpcc_id, - uint16_t *regval, - uint32_t *select) -{ - struct color_matrices_reg gam_regs; - - //current coefficient set in use - REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select); - - gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; - gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; - gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; - gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; - - if (*select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); - gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); - - cm_helper_read_color_matrices( - mpc30->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); - gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); - - cm_helper_read_color_matrices( - mpc30->base.ctx, - regval, - &gam_regs); - - } - -} - -void mpc3_get_gamut_remap(struct mpc *mpc, - int mpcc_id, - struct mpc_grph_gamut_adjustment *adjust) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - uint16_t arr_reg_val[12] = {0}; - int select; - - read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); - - if (select == GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} - -bool mpc3_program_3dlut( - struct mpc *mpc, - const struct tetrahedral_params *params, - int rmu_idx) -{ - enum dc_lut_mode mode; - bool is_17x17x17; - bool is_12bits_color_channel; - const struct dc_rgb *lut0; - const struct dc_rgb *lut1; - const struct dc_rgb *lut2; - const struct dc_rgb *lut3; - int lut_size0; - int lut_size; - - if (params == NULL) { - mpc3_set_3dlut_mode(mpc, LUT_BYPASS, false, false, rmu_idx); - return false; - } - mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true); - - mode = get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, rmu_idx); - - if (mode == LUT_BYPASS || mode == LUT_RAM_B) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - - is_17x17x17 = !params->use_tetrahedral_9; - is_12bits_color_channel = params->use_12bits; - if (is_17x17x17) { - lut0 = params->tetrahedral_17.lut0; - lut1 = params->tetrahedral_17.lut1; - lut2 = params->tetrahedral_17.lut2; - lut3 = params->tetrahedral_17.lut3; - lut_size0 = sizeof(params->tetrahedral_17.lut0)/ - sizeof(params->tetrahedral_17.lut0[0]); - lut_size = sizeof(params->tetrahedral_17.lut1)/ - sizeof(params->tetrahedral_17.lut1[0]); - } else { - lut0 = params->tetrahedral_9.lut0; - lut1 = params->tetrahedral_9.lut1; - lut2 = params->tetrahedral_9.lut2; - lut3 = params->tetrahedral_9.lut3; - lut_size0 = sizeof(params->tetrahedral_9.lut0)/ - sizeof(params->tetrahedral_9.lut0[0]); - lut_size = sizeof(params->tetrahedral_9.lut1)/ - sizeof(params->tetrahedral_9.lut1[0]); - } - - mpc3_select_3dlut_ram(mpc, mode, - is_12bits_color_channel, rmu_idx); - mpc3_select_3dlut_ram_mask(mpc, 0x1, rmu_idx); - if (is_12bits_color_channel) - mpc3_set3dlut_ram12(mpc, lut0, lut_size0, rmu_idx); - else - mpc3_set3dlut_ram10(mpc, lut0, lut_size0, rmu_idx); - - mpc3_select_3dlut_ram_mask(mpc, 0x2, rmu_idx); - if (is_12bits_color_channel) - mpc3_set3dlut_ram12(mpc, lut1, lut_size, rmu_idx); - else - mpc3_set3dlut_ram10(mpc, lut1, lut_size, rmu_idx); - - mpc3_select_3dlut_ram_mask(mpc, 0x4, rmu_idx); - if (is_12bits_color_channel) - mpc3_set3dlut_ram12(mpc, lut2, lut_size, rmu_idx); - else - mpc3_set3dlut_ram10(mpc, lut2, lut_size, rmu_idx); - - mpc3_select_3dlut_ram_mask(mpc, 0x8, rmu_idx); - if (is_12bits_color_channel) - mpc3_set3dlut_ram12(mpc, lut3, lut_size, rmu_idx); - else - mpc3_set3dlut_ram10(mpc, lut3, lut_size, rmu_idx); - - mpc3_set_3dlut_mode(mpc, mode, is_12bits_color_channel, - is_17x17x17, rmu_idx); - - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) - mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false); - - return true; -} - -void mpc3_set_output_csc( - struct mpc *mpc, - int opp_id, - const uint16_t *regval, - enum mpc_output_csc_mode ocsc_mode) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - struct color_matrices_reg ocsc_regs; - - REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0); - - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) - return; - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A; - ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A; - ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A; - ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A; - - if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); - } else { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); - } - cm_helper_program_color_matrices( - mpc30->base.ctx, - regval, - &ocsc_regs); -} - -void mpc3_set_ocsc_default( - struct mpc *mpc, - int opp_id, - enum dc_color_space color_space, - enum mpc_output_csc_mode ocsc_mode) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - uint32_t arr_size; - struct color_matrices_reg ocsc_regs; - const uint16_t *regval = NULL; - - REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0); - - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) - return; - - regval = find_color_matrix(color_space, &arr_size); - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A; - ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A; - ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A; - ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A; - - - if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); - } else { - ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); - ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); - } - - cm_helper_program_color_matrices( - mpc30->base.ctx, - regval, - &ocsc_regs); -} - -void mpc3_set_rmu_mux( - struct mpc *mpc, - int rmu_idx, - int value) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (rmu_idx == 0) - REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU0_MUX, value); - else if (rmu_idx == 1) - REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU1_MUX, value); - -} - -uint32_t mpc3_get_rmu_mux_status( - struct mpc *mpc, - int rmu_idx) -{ - uint32_t status = 0xf; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - if (rmu_idx == 0) - REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &status); - else if (rmu_idx == 1) - REG_GET(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, &status); - - return status; -} - -uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx) -{ - uint32_t rmu_status; - - //determine if this mpcc is already multiplexed to an RMU unit - rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx); - if (rmu_status == mpcc_id) - //return rmu_idx of pre_acquired rmu unit - return rmu_idx; - - if (rmu_status == 0xf) {//rmu unit is disabled - mpc3_set_rmu_mux(mpc, rmu_idx, mpcc_id); - return rmu_idx; - } - - //no vacant RMU units or invalid parameters acquire_post_bldn_3dlut - return -1; -} - -static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - int rmu_idx; - uint32_t rmu_status; - int released_rmu = -1; - - for (rmu_idx = 0; rmu_idx < mpc30->num_rmu; rmu_idx++) { - rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx); - if (rmu_status == mpcc_id) { - mpc3_set_rmu_mux(mpc, rmu_idx, 0xf); - released_rmu = rmu_idx; - break; - } - } - return released_rmu; - -} - -static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - int mpcc_id; - - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { - if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { - REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); - REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_LOW_PWR_MODE, 3); - } - - if (mpc30->mpc_mask->MPCC_OGAM_MEM_LOW_PWR_MODE) { - for (mpcc_id = 0; mpcc_id < mpc30->num_mpcc; mpcc_id++) - REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_LOW_PWR_MODE, 3); - } - } -} - -static void mpc3_read_mpcc_state( - struct mpc *mpc, - int mpcc_inst, - struct mpcc_state *s) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - uint32_t rmu_status = 0xf; - - REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); - REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); - REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); - REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, - MPCC_ALPHA_BLND_MODE, &s->alpha_mode, - MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); - REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, - MPCC_BUSY, &s->busy); - - /* Color blocks state */ - REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status); - - if (rmu_status == mpcc_inst) { - REG_GET(SHAPER_CONTROL[0], - MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); - REG_GET(RMU_3DLUT_MODE[0], - MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); - REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0], - MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(RMU_3DLUT_MODE[0], - MPC_RMU_3DLUT_SIZE, &s->lut3d_size); - } else { - REG_GET(SHAPER_CONTROL[1], - MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); - REG_GET(RMU_3DLUT_MODE[1], - MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); - REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1], - MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(RMU_3DLUT_MODE[1], - MPC_RMU_3DLUT_SIZE, &s->lut3d_size); - } - - REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], - MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, - MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); -} - -static const struct mpc_funcs dcn30_mpc_funcs = { - .read_mpcc_state = mpc3_read_mpcc_state, - .insert_plane = mpc1_insert_plane, - .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc3_mpc_init, - .mpc_init_single_inst = mpc3_mpc_init_single_inst, - .update_blending = mpc2_update_blending, - .cursor_lock = mpc1_cursor_lock, - .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, - .wait_for_idle = mpc2_assert_idle_mpcc, - .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, - .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, - .set_denorm = mpc3_set_denorm, - .set_denorm_clamp = mpc3_set_denorm_clamp, - .set_output_csc = mpc3_set_output_csc, - .set_ocsc_default = mpc3_set_ocsc_default, - .set_output_gamma = mpc3_set_output_gamma, - .insert_plane_to_secondary = NULL, - .remove_mpcc_from_secondary = NULL, - .set_dwb_mux = mpc3_set_dwb_mux, - .disable_dwb_mux = mpc3_disable_dwb_mux, - .is_dwb_idle = mpc3_is_dwb_idle, - .set_gamut_remap = mpc3_set_gamut_remap, - .program_shaper = mpc3_program_shaper, - .acquire_rmu = mpcc3_acquire_rmu, - .program_3dlut = mpc3_program_3dlut, - .release_rmu = mpcc3_release_rmu, - .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, - .get_mpc_out_mux = mpc1_get_mpc_out_mux, - .set_bg_color = mpc1_set_bg_color, - .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, -}; - -void dcn30_mpc_construct(struct dcn30_mpc *mpc30, - struct dc_context *ctx, - const struct dcn30_mpc_registers *mpc_regs, - const struct dcn30_mpc_shift *mpc_shift, - const struct dcn30_mpc_mask *mpc_mask, - int num_mpcc, - int num_rmu) -{ - int i; - - mpc30->base.ctx = ctx; - - mpc30->base.funcs = &dcn30_mpc_funcs; - - mpc30->mpc_regs = mpc_regs; - mpc30->mpc_shift = mpc_shift; - mpc30->mpc_mask = mpc_mask; - - mpc30->mpcc_in_use_mask = 0; - mpc30->num_mpcc = num_mpcc; - mpc30->num_rmu = num_rmu; - - for (i = 0; i < MAX_MPCC; i++) - mpc3_init_mpcc(&mpc30->base.mpcc_array[i], i); -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h deleted file mode 100644 index ce93003dae0113..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ /dev/null @@ -1,1098 +0,0 @@ -/* Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_MPCC_DCN30_H__ -#define __DC_MPCC_DCN30_H__ - -#include "dcn20/dcn20_mpc.h" - -#define MAX_RMU 3 - -#define TO_DCN30_MPC(mpc_base) \ - container_of(mpc_base, struct dcn30_mpc, base) - -#ifdef SRII_MPC_RMU -#undef SRII_MPC_RMU - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#endif - - -#define MPC_REG_LIST_DCN3_0(inst)\ - MPC_COMMON_REG_LIST_DCN1_0(inst),\ - SRII(MPCC_TOP_GAIN, MPCC, inst),\ - SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\ - SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\ - SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\ - SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \ - SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst),\ - SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst),\ - SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst),\ - SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst),\ - SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst),\ - SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst),\ - SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) - -#define MPC_OUT_MUX_REG_LIST_DCN3_0(inst) \ - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\ - SRII(CSC_MODE, MPC_OUT, inst),\ - SRII(CSC_C11_C12_A, MPC_OUT, inst),\ - SRII(CSC_C33_C34_A, MPC_OUT, inst),\ - SRII(CSC_C11_C12_B, MPC_OUT, inst),\ - SRII(CSC_C33_C34_B, MPC_OUT, inst),\ - SRII(DENORM_CONTROL, MPC_OUT, inst),\ - SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ - SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), \ - SR(MPC_OUT_CSC_COEF_FORMAT) - -#define MPC_RMU_GLOBAL_REG_LIST_DCN3AG \ - SR(MPC_RMU_CONTROL),\ - SR(MPC_RMU_MEM_PWR_CTRL) - -#define MPC_RMU_REG_LIST_DCN3AG(inst) \ - SRII(SHAPER_CONTROL, MPC_RMU, inst),\ - SRII(SHAPER_OFFSET_R, MPC_RMU, inst),\ - SRII(SHAPER_OFFSET_G, MPC_RMU, inst),\ - SRII(SHAPER_OFFSET_B, MPC_RMU, inst),\ - SRII(SHAPER_SCALE_R, MPC_RMU, inst),\ - SRII(SHAPER_SCALE_G_B, MPC_RMU, inst),\ - SRII(SHAPER_LUT_INDEX, MPC_RMU, inst),\ - SRII(SHAPER_LUT_DATA, MPC_RMU, inst),\ - SRII(SHAPER_LUT_WRITE_EN_MASK, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_START_CNTL_B, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_START_CNTL_G, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_START_CNTL_R, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_END_CNTL_B, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_END_CNTL_G, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_END_CNTL_R, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_0_1, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_2_3, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_4_5, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_6_7, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_8_9, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_10_11, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_12_13, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_14_15, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_16_17, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_18_19, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_20_21, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_22_23, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_24_25, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_26_27, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_28_29, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_30_31, MPC_RMU, inst),\ - SRII(SHAPER_RAMA_REGION_32_33, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_START_CNTL_B, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_START_CNTL_G, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_START_CNTL_R, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_END_CNTL_B, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_END_CNTL_G, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_END_CNTL_R, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_0_1, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_2_3, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_4_5, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_6_7, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_8_9, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_10_11, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_12_13, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_14_15, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_16_17, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_18_19, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_20_21, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_22_23, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_24_25, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_26_27, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_28_29, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_30_31, MPC_RMU, inst),\ - SRII(SHAPER_RAMB_REGION_32_33, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_MODE, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_INDEX, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_DATA, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_DATA_30BIT, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_READ_WRITE_CONTROL, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_OUT_NORM_FACTOR, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_OUT_OFFSET_R, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_OUT_OFFSET_G, MPC_RMU, inst),\ - SRII_MPC_RMU(3DLUT_OUT_OFFSET_B, MPC_RMU, inst) - - -#define MPC_DWB_MUX_REG_LIST_DCN3_0(inst) \ - SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst) - -#define MPC_REG_VARIABLE_LIST_DCN3_0 \ - MPC_REG_VARIABLE_LIST_DCN2_0 \ - uint32_t DWB_MUX[MAX_DWB]; \ - uint32_t MPCC_GAMUT_REMAP_COEF_FORMAT[MAX_MPCC]; \ - uint32_t MPCC_GAMUT_REMAP_MODE[MAX_MPCC]; \ - uint32_t MPC_GAMUT_REMAP_C11_C12_A[MAX_MPCC]; \ - uint32_t MPC_GAMUT_REMAP_C33_C34_A[MAX_MPCC]; \ - uint32_t MPC_GAMUT_REMAP_C11_C12_B[MAX_MPCC]; \ - uint32_t MPC_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \ - uint32_t MPC_RMU_CONTROL; \ - uint32_t MPC_RMU_MEM_PWR_CTRL; \ - uint32_t SHAPER_CONTROL[MAX_RMU]; \ - uint32_t SHAPER_OFFSET_R[MAX_RMU]; \ - uint32_t SHAPER_OFFSET_G[MAX_RMU]; \ - uint32_t SHAPER_OFFSET_B[MAX_RMU]; \ - uint32_t SHAPER_SCALE_R[MAX_RMU]; \ - uint32_t SHAPER_SCALE_G_B[MAX_RMU]; \ - uint32_t SHAPER_LUT_INDEX[MAX_RMU]; \ - uint32_t SHAPER_LUT_DATA[MAX_RMU]; \ - uint32_t SHAPER_LUT_WRITE_EN_MASK[MAX_RMU]; \ - uint32_t SHAPER_RAMA_START_CNTL_B[MAX_RMU]; \ - uint32_t SHAPER_RAMA_START_CNTL_G[MAX_RMU]; \ - uint32_t SHAPER_RAMA_START_CNTL_R[MAX_RMU]; \ - uint32_t SHAPER_RAMA_END_CNTL_B[MAX_RMU]; \ - uint32_t SHAPER_RAMA_END_CNTL_G[MAX_RMU]; \ - uint32_t SHAPER_RAMA_END_CNTL_R[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_0_1[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_2_3[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_4_5[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_6_7[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_8_9[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_10_11[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_12_13[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_14_15[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_16_17[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_18_19[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_20_21[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_22_23[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_24_25[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_26_27[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_28_29[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_30_31[MAX_RMU]; \ - uint32_t SHAPER_RAMA_REGION_32_33[MAX_RMU]; \ - uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_R[MAX_MPCC];\ - uint32_t SHAPER_RAMB_START_CNTL_B[MAX_RMU]; \ - uint32_t SHAPER_RAMB_START_CNTL_G[MAX_RMU]; \ - uint32_t SHAPER_RAMB_START_CNTL_R[MAX_RMU]; \ - uint32_t SHAPER_RAMB_END_CNTL_B[MAX_RMU]; \ - uint32_t SHAPER_RAMB_END_CNTL_G[MAX_RMU]; \ - uint32_t SHAPER_RAMB_END_CNTL_R[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_0_1[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_2_3[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_4_5[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_6_7[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_8_9[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_10_11[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_12_13[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_14_15[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_16_17[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_18_19[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_20_21[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_22_23[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_24_25[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_26_27[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_28_29[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_30_31[MAX_RMU]; \ - uint32_t SHAPER_RAMB_REGION_32_33[MAX_RMU]; \ - uint32_t RMU_3DLUT_MODE[MAX_RMU]; \ - uint32_t RMU_3DLUT_INDEX[MAX_RMU]; \ - uint32_t RMU_3DLUT_DATA[MAX_RMU]; \ - uint32_t RMU_3DLUT_DATA_30BIT[MAX_RMU]; \ - uint32_t RMU_3DLUT_READ_WRITE_CONTROL[MAX_RMU]; \ - uint32_t RMU_3DLUT_OUT_NORM_FACTOR[MAX_RMU]; \ - uint32_t RMU_3DLUT_OUT_OFFSET_R[MAX_RMU]; \ - uint32_t RMU_3DLUT_OUT_OFFSET_G[MAX_RMU]; \ - uint32_t RMU_3DLUT_OUT_OFFSET_B[MAX_RMU]; \ - uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_OGAM_LUT_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_R[MAX_MPCC]; \ - uint32_t MPC_OUT_CSC_COEF_FORMAT - -#define MPC_REG_VARIABLE_LIST_DCN32 \ - uint32_t MPCC_MOVABLE_CM_LOCATION_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_SCALE_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_SCALE_G_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_LUT_INDEX[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_LUT_DATA[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_2_3[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_4_5[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_6_7[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_8_9[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_10_11[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_12_13[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_14_15[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_16_17[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_18_19[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_20_21[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_22_23[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_24_25[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_26_27[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_28_29[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_30_31[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMA_REGION_32_33[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_2_3[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_4_5[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_6_7[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_8_9[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_10_11[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_12_13[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_14_15[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_16_17[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_18_19[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_20_21[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_22_23[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_24_25[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_26_27[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_28_29[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_30_31[MAX_MPCC]; \ - uint32_t MPCC_MCM_SHAPER_RAMB_REGION_32_33[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_MODE[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_INDEX[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_DATA[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_DATA_30BIT[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_READ_WRITE_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_OUT_NORM_FACTOR[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_LUT_INDEX[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_LUT_DATA[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_LUT_CONTROL[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_2_3[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_4_5[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_6_7[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_8_9[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_10_11[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_12_13[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_14_15[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_16_17[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_18_19[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_20_21[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_22_23[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_24_25[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_26_27[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_28_29[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_30_31[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMA_REGION_32_33[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_B[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_G[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_R[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_0_1[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_2_3[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_4_5[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_6_7[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_8_9[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_10_11[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_12_13[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_14_15[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_16_17[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_18_19[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_20_21[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_22_23[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_24_25[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_26_27[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_28_29[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_30_31[MAX_MPCC]; \ - uint32_t MPCC_MCM_1DLUT_RAMB_REGION_32_33[MAX_MPCC]; \ - uint32_t MPCC_MCM_MEM_PWR_CTRL[MAX_MPCC] - -#define MPC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh) \ - MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ - SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ - SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\ - SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) - - -#define MPC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \ - MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ - SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ - SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ - /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ - /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ - /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ - /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\ - SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ - /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_LOW_PWR_MODE, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\ - SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) - - -#define MPC_REG_FIELD_LIST_DCN3_0(type) \ - MPC_REG_FIELD_LIST_DCN2_0(type) \ - type MPC_DWB0_MUX;\ - type MPC_DWB0_MUX_STATUS;\ - type MPC_OUT_RATE_CONTROL;\ - type MPC_OUT_RATE_CONTROL_DISABLE;\ - type MPC_OUT_FLOW_CONTROL_MODE;\ - type MPC_OUT_FLOW_CONTROL_COUNT; \ - type MPCC_GAMUT_REMAP_MODE; \ - type MPCC_GAMUT_REMAP_MODE_CURRENT;\ - type MPCC_GAMUT_REMAP_COEF_FORMAT; \ - type MPCC_GAMUT_REMAP_C11_A; \ - type MPCC_GAMUT_REMAP_C12_A; \ - type MPC_RMU0_MUX; \ - type MPC_RMU1_MUX; \ - type MPC_RMU0_MUX_STATUS; \ - type MPC_RMU1_MUX_STATUS; \ - type MPC_RMU0_MEM_PWR_FORCE;\ - type MPC_RMU0_MEM_PWR_DIS;\ - type MPC_RMU0_MEM_LOW_PWR_MODE;\ - type MPC_RMU0_SHAPER_MEM_PWR_STATE;\ - type MPC_RMU0_3DLUT_MEM_PWR_STATE;\ - type MPC_RMU1_MEM_PWR_FORCE;\ - type MPC_RMU1_MEM_PWR_DIS;\ - type MPC_RMU1_MEM_LOW_PWR_MODE;\ - type MPC_RMU1_SHAPER_MEM_PWR_STATE;\ - type MPC_RMU1_3DLUT_MEM_PWR_STATE;\ - type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \ - type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\ - type MPCC_OGAM_RAMA_OFFSET_B;\ - type MPCC_OGAM_RAMA_OFFSET_G;\ - type MPCC_OGAM_RAMA_OFFSET_R;\ - type MPCC_OGAM_SELECT; \ - type MPCC_OGAM_PWL_DISABLE; \ - type MPCC_OGAM_MODE_CURRENT; \ - type MPCC_OGAM_SELECT_CURRENT; \ - type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \ - type MPCC_OGAM_LUT_READ_COLOR_SEL; \ - type MPCC_OGAM_LUT_READ_DBG; \ - type MPCC_OGAM_LUT_HOST_SEL; \ - type MPCC_OGAM_LUT_CONFIG_MODE; \ - type MPCC_OGAM_LUT_STATUS; \ - type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\ - type MPCC_OGAM_MEM_LOW_PWR_MODE;\ - type MPCC_OGAM_MEM_PWR_STATE;\ - type MPC_RMU_3DLUT_MODE; \ - type MPC_RMU_3DLUT_SIZE; \ - type MPC_RMU_3DLUT_MODE_CURRENT; \ - type MPC_RMU_3DLUT_WRITE_EN_MASK;\ - type MPC_RMU_3DLUT_RAM_SEL;\ - type MPC_RMU_3DLUT_30BIT_EN;\ - type MPC_RMU_3DLUT_CONFIG_STATUS;\ - type MPC_RMU_3DLUT_READ_SEL;\ - type MPC_RMU_3DLUT_INDEX;\ - type MPC_RMU_3DLUT_DATA0;\ - type MPC_RMU_3DLUT_DATA1;\ - type MPC_RMU_3DLUT_DATA_30BIT;\ - type MPC_RMU_SHAPER_LUT_MODE;\ - type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\ - type MPC_RMU_SHAPER_OFFSET_R;\ - type MPC_RMU_SHAPER_OFFSET_G;\ - type MPC_RMU_SHAPER_OFFSET_B;\ - type MPC_RMU_SHAPER_SCALE_R;\ - type MPC_RMU_SHAPER_SCALE_G;\ - type MPC_RMU_SHAPER_SCALE_B;\ - type MPC_RMU_SHAPER_LUT_INDEX;\ - type MPC_RMU_SHAPER_LUT_DATA;\ - type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\ - type MPC_RMU_SHAPER_LUT_WRITE_SEL;\ - type MPC_RMU_SHAPER_CONFIG_STATUS;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type MPC_RMU_SHAPER_MODE_CURRENT - -#define MPC_REG_FIELD_LIST_DCN32(type) \ - type MPCC_MOVABLE_CM_LOCATION_CNTL;\ - type MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT;\ - type MPCC_MCM_SHAPER_MEM_PWR_FORCE;\ - type MPCC_MCM_SHAPER_MEM_PWR_DIS;\ - type MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE;\ - type MPCC_MCM_3DLUT_MEM_PWR_FORCE;\ - type MPCC_MCM_3DLUT_MEM_PWR_DIS;\ - type MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE;\ - type MPCC_MCM_1DLUT_MEM_PWR_FORCE;\ - type MPCC_MCM_1DLUT_MEM_PWR_DIS;\ - type MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE;\ - type MPCC_MCM_SHAPER_MEM_PWR_STATE;\ - type MPCC_MCM_3DLUT_MEM_PWR_STATE;\ - type MPCC_MCM_1DLUT_MEM_PWR_STATE;\ - type MPCC_MCM_3DLUT_MODE; \ - type MPCC_MCM_3DLUT_SIZE; \ - type MPCC_MCM_3DLUT_MODE_CURRENT; \ - type MPCC_MCM_3DLUT_WRITE_EN_MASK;\ - type MPCC_MCM_3DLUT_RAM_SEL;\ - type MPCC_MCM_3DLUT_30BIT_EN;\ - type MPCC_MCM_3DLUT_CONFIG_STATUS;\ - type MPCC_MCM_3DLUT_READ_SEL;\ - type MPCC_MCM_3DLUT_INDEX;\ - type MPCC_MCM_3DLUT_DATA0;\ - type MPCC_MCM_3DLUT_DATA1;\ - type MPCC_MCM_3DLUT_DATA_30BIT;\ - type MPCC_MCM_SHAPER_LUT_MODE;\ - type MPCC_MCM_SHAPER_MODE_CURRENT;\ - type MPCC_MCM_SHAPER_OFFSET_R;\ - type MPCC_MCM_SHAPER_OFFSET_G;\ - type MPCC_MCM_SHAPER_OFFSET_B;\ - type MPCC_MCM_SHAPER_SCALE_R;\ - type MPCC_MCM_SHAPER_SCALE_G;\ - type MPCC_MCM_SHAPER_SCALE_B;\ - type MPCC_MCM_SHAPER_LUT_INDEX;\ - type MPCC_MCM_SHAPER_LUT_DATA;\ - type MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK;\ - type MPCC_MCM_SHAPER_LUT_WRITE_SEL;\ - type MPCC_MCM_SHAPER_CONFIG_STATUS;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ - type MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type MPCC_MCM_1DLUT_MODE;\ - type MPCC_MCM_1DLUT_SELECT;\ - type MPCC_MCM_1DLUT_PWL_DISABLE;\ - type MPCC_MCM_1DLUT_MODE_CURRENT;\ - type MPCC_MCM_1DLUT_SELECT_CURRENT;\ - type MPCC_MCM_1DLUT_LUT_INDEX;\ - type MPCC_MCM_1DLUT_LUT_DATA;\ - type MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK;\ - type MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL;\ - type MPCC_MCM_1DLUT_LUT_HOST_SEL;\ - type MPCC_MCM_1DLUT_LUT_CONFIG_MODE;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B;\ - type MPCC_MCM_1DLUT_RAMA_OFFSET_B;\ - type MPCC_MCM_1DLUT_RAMA_OFFSET_G;\ - type MPCC_MCM_1DLUT_RAMA_OFFSET_R;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET;\ - type MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS - - -#define MPC_COMMON_MASK_SH_LIST_DCN303(mask_sh) \ - MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ - SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ - SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ - SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ - SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ - SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ - SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\ - SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ - SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ - SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ - SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ - /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\ - SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ - SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ - /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ - /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\ - SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ - SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ - SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ - /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\ - SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ - SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ - SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ - /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ - SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\ - SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\ - SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) - -#define MPC_REG_FIELD_LIST_DCN3_03(type) \ - MPC_REG_FIELD_LIST_DCN2_0(type) \ - type MPC_DWB0_MUX;\ - type MPC_DWB0_MUX_STATUS;\ - type MPC_OUT_RATE_CONTROL;\ - type MPC_OUT_RATE_CONTROL_DISABLE;\ - type MPC_OUT_FLOW_CONTROL_MODE;\ - type MPC_OUT_FLOW_CONTROL_COUNT; \ - type MPCC_GAMUT_REMAP_MODE; \ - type MPCC_GAMUT_REMAP_MODE_CURRENT;\ - type MPCC_GAMUT_REMAP_COEF_FORMAT; \ - type MPCC_GAMUT_REMAP_C11_A; \ - type MPCC_GAMUT_REMAP_C12_A; \ - type MPC_RMU0_MUX; \ - type MPC_RMU0_MUX_STATUS; \ - type MPC_RMU0_MEM_PWR_FORCE;\ - type MPC_RMU0_MEM_PWR_DIS;\ - type MPC_RMU0_MEM_LOW_PWR_MODE;\ - type MPC_RMU0_SHAPER_MEM_PWR_STATE;\ - type MPC_RMU0_3DLUT_MEM_PWR_STATE;\ - type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \ - type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\ - type MPCC_OGAM_RAMA_OFFSET_B;\ - type MPCC_OGAM_RAMA_OFFSET_G;\ - type MPCC_OGAM_RAMA_OFFSET_R;\ - type MPCC_OGAM_SELECT; \ - type MPCC_OGAM_PWL_DISABLE; \ - type MPCC_OGAM_MODE_CURRENT; \ - type MPCC_OGAM_SELECT_CURRENT; \ - type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \ - type MPCC_OGAM_LUT_READ_COLOR_SEL; \ - type MPCC_OGAM_LUT_READ_DBG; \ - type MPCC_OGAM_LUT_HOST_SEL; \ - type MPCC_OGAM_LUT_CONFIG_MODE; \ - type MPCC_OGAM_LUT_STATUS; \ - type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\ - type MPCC_OGAM_MEM_LOW_PWR_MODE;\ - type MPCC_OGAM_MEM_PWR_STATE;\ - type MPC_RMU_3DLUT_MODE; \ - type MPC_RMU_3DLUT_SIZE; \ - type MPC_RMU_3DLUT_MODE_CURRENT; \ - type MPC_RMU_3DLUT_WRITE_EN_MASK;\ - type MPC_RMU_3DLUT_RAM_SEL;\ - type MPC_RMU_3DLUT_30BIT_EN;\ - type MPC_RMU_3DLUT_CONFIG_STATUS;\ - type MPC_RMU_3DLUT_READ_SEL;\ - type MPC_RMU_3DLUT_INDEX;\ - type MPC_RMU_3DLUT_DATA0;\ - type MPC_RMU_3DLUT_DATA1;\ - type MPC_RMU_3DLUT_DATA_30BIT;\ - type MPC_RMU_SHAPER_LUT_MODE;\ - type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\ - type MPC_RMU_SHAPER_OFFSET_R;\ - type MPC_RMU_SHAPER_OFFSET_G;\ - type MPC_RMU_SHAPER_OFFSET_B;\ - type MPC_RMU_SHAPER_SCALE_R;\ - type MPC_RMU_SHAPER_SCALE_G;\ - type MPC_RMU_SHAPER_SCALE_B;\ - type MPC_RMU_SHAPER_LUT_INDEX;\ - type MPC_RMU_SHAPER_LUT_DATA;\ - type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\ - type MPC_RMU_SHAPER_LUT_WRITE_SEL;\ - type MPC_RMU_SHAPER_CONFIG_STATUS;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ - type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type MPC_RMU_SHAPER_MODE_CURRENT - -struct dcn30_mpc_registers { - MPC_REG_VARIABLE_LIST_DCN3_0; - MPC_REG_VARIABLE_LIST_DCN32; -}; - -struct dcn30_mpc_shift { - MPC_REG_FIELD_LIST_DCN3_0(uint8_t); - MPC_REG_FIELD_LIST_DCN32(uint8_t); -}; - -struct dcn30_mpc_mask { - MPC_REG_FIELD_LIST_DCN3_0(uint32_t); - MPC_REG_FIELD_LIST_DCN32(uint32_t); -}; - -struct dcn30_mpc { - struct mpc base; - - int mpcc_in_use_mask; - int num_mpcc; - const struct dcn30_mpc_registers *mpc_regs; - const struct dcn30_mpc_shift *mpc_shift; - const struct dcn30_mpc_mask *mpc_mask; - int num_rmu; -}; - -void dcn30_mpc_construct(struct dcn30_mpc *mpc30, - struct dc_context *ctx, - const struct dcn30_mpc_registers *mpc_regs, - const struct dcn30_mpc_shift *mpc_shift, - const struct dcn30_mpc_mask *mpc_mask, - int num_mpcc, - int num_rmu); - -void mpc3_mpc_init( - struct mpc *mpc); - -void mpc3_mpc_init_single_inst( - struct mpc *mpc, - unsigned int mpcc_id); - -bool mpc3_program_shaper( - struct mpc *mpc, - const struct pwl_params *params, - uint32_t rmu_idx); - -bool mpc3_program_3dlut( - struct mpc *mpc, - const struct tetrahedral_params *params, - int rmu_idx); - -uint32_t mpcc3_acquire_rmu(struct mpc *mpc, - int mpcc_id, int rmu_idx); - -void mpc3_set_denorm( - struct mpc *mpc, - int opp_id, - enum dc_color_depth output_depth); - -void mpc3_set_denorm_clamp( - struct mpc *mpc, - int opp_id, - struct mpc_denorm_clamp denorm_clamp); - -void mpc3_set_output_csc( - struct mpc *mpc, - int opp_id, - const uint16_t *regval, - enum mpc_output_csc_mode ocsc_mode); - -void mpc3_set_ocsc_default( - struct mpc *mpc, - int opp_id, - enum dc_color_space color_space, - enum mpc_output_csc_mode ocsc_mode); - -void mpc3_set_output_gamma( - struct mpc *mpc, - int mpcc_id, - const struct pwl_params *params); - -uint32_t mpc3_get_rmu_mux_status( - struct mpc *mpc, - int rmu_idx); - -void mpc3_set_gamut_remap( - struct mpc *mpc, - int mpcc_id, - const struct mpc_grph_gamut_adjustment *adjust); - -void mpc3_get_gamut_remap(struct mpc *mpc, - int mpcc_id, - struct mpc_grph_gamut_adjustment *adjust); - -void mpc3_set_rmu_mux( - struct mpc *mpc, - int rmu_idx, - int value); - -void mpc3_set_dwb_mux( - struct mpc *mpc, - int dwb_id, - int mpcc_id); - -void mpc3_disable_dwb_mux( - struct mpc *mpc, - int dwb_id); - -bool mpc3_is_dwb_idle( - struct mpc *mpc, - int dwb_id); - -void mpc3_power_on_ogam_lut( - struct mpc *mpc, int mpcc_id, - bool power_on); - -void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); - -enum dc_lut_mode mpc3_get_ogam_current( - struct mpc *mpc, - int mpcc_id); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index dc37dbf870dfdc..fb4814ab3f05ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -3,7 +3,7 @@ # # Makefile for dcn30. -DCN301 = dcn301_dio_link_encoder.o dcn301_panel_cntl.o +DCN301 = dcn301_panel_cntl.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c deleted file mode 100644 index 1b39a6e8a1ac5a..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" - -#include "core_types.h" -#include "link_encoder.h" -#include "dcn301_dio_link_encoder.h" -#include "stream_encoder.h" -#include "dc_bios_types.h" -#include "gpio_service_interface.h" - -#define CTX \ - enc10->base.ctx -#define DC_LOGGER \ - enc10->base.ctx->logger - -#define REG(reg)\ - (enc10->link_regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - enc10->link_shift->field_name, enc10->link_mask->field_name - -#define IND_REG(index) \ - (enc10->link_regs->index) - -static const struct link_encoder_funcs dcn301_link_enc_funcs = { - .read_state = link_enc2_read_state, - .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, - .hw_init = enc3_hw_init, - .setup = dcn10_link_encoder_setup, - .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, - .enable_dp_output = dcn20_link_encoder_enable_dp_output, - .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, - .disable_output = dcn10_link_encoder_disable_output, - .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, - .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, - .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, - .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, - .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, - .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, - .enable_hpd = dcn10_link_encoder_enable_hpd, - .disable_hpd = dcn10_link_encoder_disable_hpd, - .is_dig_enabled = dcn10_is_dig_enabled, - .destroy = dcn10_link_encoder_destroy, - .fec_set_enable = enc2_fec_set_enable, - .fec_set_ready = enc2_fec_set_ready, - .fec_is_active = enc2_fec_is_active, - .get_dig_frontend = dcn10_get_dig_frontend, - .get_dig_mode = dcn10_get_dig_mode, - .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, - .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, -}; - -void dcn301_link_encoder_construct( - struct dcn20_link_encoder *enc20, - const struct encoder_init_data *init_data, - const struct encoder_feature_support *enc_features, - const struct dcn10_link_enc_registers *link_regs, - const struct dcn10_link_enc_aux_registers *aux_regs, - const struct dcn10_link_enc_hpd_registers *hpd_regs, - const struct dcn10_link_enc_shift *link_shift, - const struct dcn10_link_enc_mask *link_mask) -{ - struct bp_encoder_cap_info bp_cap_info = {0}; - const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; - enum bp_result result = BP_RESULT_OK; - struct dcn10_link_encoder *enc10 = &enc20->enc10; - - enc10->base.funcs = &dcn301_link_enc_funcs; - enc10->base.ctx = init_data->ctx; - enc10->base.id = init_data->encoder; - - enc10->base.hpd_source = init_data->hpd_source; - enc10->base.connector = init_data->connector; - - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - - enc10->base.features = *enc_features; - - enc10->base.transmitter = init_data->transmitter; - - /* set the flag to indicate whether driver poll the I2C data pin - * while doing the DP sink detect - */ - -/* if (dal_adapter_service_is_feature_supported(as, - FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) - enc10->base.features.flags.bits. - DP_SINK_DETECT_POLL_DATA_PIN = true;*/ - - enc10->base.output_signals = - SIGNAL_TYPE_DVI_SINGLE_LINK | - SIGNAL_TYPE_DVI_DUAL_LINK | - SIGNAL_TYPE_LVDS | - SIGNAL_TYPE_DISPLAY_PORT | - SIGNAL_TYPE_DISPLAY_PORT_MST | - SIGNAL_TYPE_EDP | - SIGNAL_TYPE_HDMI_TYPE_A; - - /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. - * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. - * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer - * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. - * Prefer DIG assignment is decided by board design. - * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design - * and VBIOS will filter out 7 UNIPHY for DCE 8.0. - * By this, adding DIGG should not hurt DCE 8.0. - * This will let DCE 8.1 share DCE 8.0 as much as possible - */ - - enc10->link_regs = link_regs; - enc10->aux_regs = aux_regs; - enc10->hpd_regs = hpd_regs; - enc10->link_shift = link_shift; - enc10->link_mask = link_mask; - - switch (enc10->base.transmitter) { - case TRANSMITTER_UNIPHY_A: - enc10->base.preferred_engine = ENGINE_ID_DIGA; - break; - case TRANSMITTER_UNIPHY_B: - enc10->base.preferred_engine = ENGINE_ID_DIGB; - break; - case TRANSMITTER_UNIPHY_C: - enc10->base.preferred_engine = ENGINE_ID_DIGC; - break; - case TRANSMITTER_UNIPHY_D: - enc10->base.preferred_engine = ENGINE_ID_DIGD; - break; - case TRANSMITTER_UNIPHY_E: - enc10->base.preferred_engine = ENGINE_ID_DIGE; - break; - case TRANSMITTER_UNIPHY_F: - enc10->base.preferred_engine = ENGINE_ID_DIGF; - break; - case TRANSMITTER_UNIPHY_G: - enc10->base.preferred_engine = ENGINE_ID_DIGG; - break; - default: - ASSERT_CRITICAL(false); - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - } - - /* default to one to mirror Windows behavior */ - enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - - result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, - enc10->base.id, &bp_cap_info); - - /* Override features with DCE-specific values */ - if (result == BP_RESULT_OK) { - enc10->base.features.flags.bits.IS_HBR2_CAPABLE = - bp_cap_info.DP_HBR2_EN; - enc10->base.features.flags.bits.IS_HBR3_CAPABLE = - bp_cap_info.DP_HBR3_EN; - enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - enc10->base.features.flags.bits.DP_IS_USB_C = - bp_cap_info.DP_IS_USB_C; - } else { - DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", - __func__, - result); - } - if (enc10->base.ctx->dc->debug.hdmi20_disable) { - enc10->base.features.flags.bits.HDMI_6GB_EN = 0; - } -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h deleted file mode 100644 index 49f8d91d495131..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_LINK_ENCODER__DCN301_H__ -#define __DC_LINK_ENCODER__DCN301_H__ - -#include "dcn20/dcn20_link_encoder.h" - - -#define LE_DCN301_REG_LIST(id)\ - SRI(DIG_BE_CNTL, DIG, id), \ - SRI(DIG_BE_EN_CNTL, DIG, id), \ - SRI(TMDS_CTL_BITS, DIG, id), \ - SRI(TMDS_DCBALANCER_CONTROL, DIG, id), \ - SRI(DP_CONFIG, DP, id), \ - SRI(DP_DPHY_CNTL, DP, id), \ - SRI(DP_DPHY_PRBS_CNTL, DP, id), \ - SRI(DP_DPHY_SCRAM_CNTL, DP, id),\ - SRI(DP_DPHY_SYM0, DP, id), \ - SRI(DP_DPHY_SYM1, DP, id), \ - SRI(DP_DPHY_SYM2, DP, id), \ - SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ - SRI(DP_LINK_CNTL, DP, id), \ - SRI(DP_LINK_FRAMING_CNTL, DP, id), \ - SRI(DP_MSE_SAT0, DP, id), \ - SRI(DP_MSE_SAT1, DP, id), \ - SRI(DP_MSE_SAT2, DP, id), \ - SRI(DP_MSE_SAT_UPDATE, DP, id), \ - SRI(DP_SEC_CNTL, DP, id), \ - SRI(DP_VID_STREAM_CNTL, DP, id), \ - SRI(DP_DPHY_FAST_TRAINING, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id), \ - SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ - SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) - -#define LINK_ENCODER_MASK_SH_LIST_DCN301(mask_sh) \ - LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ - LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh) - -#define DPCS_DCN301_MASK_SH_LIST(mask_sh)\ - DPCS_DCN2_MASK_SH_LIST(mask_sh),\ - LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_HDMI_FRL_MODE, mask_sh),\ - LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP_10_BIT, mask_sh),\ - LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh) - -void dcn301_link_encoder_construct( - struct dcn20_link_encoder *enc20, - const struct encoder_init_data *init_data, - const struct encoder_feature_support *enc_features, - const struct dcn10_link_enc_registers *link_regs, - const struct dcn10_link_enc_aux_registers *aux_regs, - const struct dcn10_link_enc_hpd_registers *hpd_regs, - const struct dcn10_link_enc_shift *link_shift, - const struct dcn10_link_enc_mask *link_mask); - -void enc3_hw_init(struct link_encoder *enc); - -#endif /* __DC_LINK_ENCODER__DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile deleted file mode 100644 index a954e316aca254..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: MIT -# -# Copyright (C) 2021 Advanced Micro Devices, Inc. All the rights reserved -# -# Authors: AMD -# -# Makefile for dcn303. - -DCN3_03 = dcn303_init.o - -AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index e2601d0aba415c..d510e4652c18bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -5,7 +5,7 @@ # Makefile for dcn31. DCN31 = dcn31_panel_cntl.o \ - dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ + dcn31_apg.o \ dcn31_afmt.o dcn31_vpg.o AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c deleted file mode 100644 index 03b4ac2f1991a7..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ /dev/null @@ -1,628 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dc_bios_types.h" -#include "dcn31_hpo_dp_link_encoder.h" -#include "reg_helper.h" -#include "stream_encoder.h" - -#define DC_LOGGER \ - enc3->base.ctx->logger - -#define REG(reg)\ - (enc3->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name - - -#define CTX \ - enc3->base.ctx - -enum { - DP_SAT_UPDATE_MAX_RETRY = 200 -}; - -void dcn31_hpo_dp_link_enc_enable( - struct hpo_dp_link_encoder *enc, - enum dc_lane_count num_lanes) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - uint32_t dp_link_enabled; - - /* get current status of link enabled */ - REG_GET(DP_DPHY_SYM32_STATUS, - STATUS, &dp_link_enabled); - - /* Enable clocks first */ - REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1); - - /* Reset DPHY. Only reset if going from disable to enable */ - if (!dp_link_enabled) { - REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0); - } - - /* Configure DPHY settings */ - REG_UPDATE_3(DP_DPHY_SYM32_CONTROL, - DPHY_ENABLE, 1, - PRECODER_ENABLE, 1, - NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3); -} - -void dcn31_hpo_dp_link_enc_disable( - struct hpo_dp_link_encoder *enc) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - - /* Configure DPHY settings */ - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - DPHY_ENABLE, 0); - - /* Shut down clock last */ - REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0); -} - -void dcn31_hpo_dp_link_enc_set_link_test_pattern( - struct hpo_dp_link_encoder *enc, - struct encoder_set_dp_phy_pattern_param *tp_params) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - uint32_t tp_custom; - - switch (tp_params->dp_phy_pattern) { - case DP_TEST_PATTERN_VIDEO_MODE: - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_LINK_ACTIVE); - break; - case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE: - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_LINK_TRAINING_TPS1); - break; - case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE: - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_LINK_TRAINING_TPS2); - break; - case DP_TEST_PATTERN_128b_132b_TPS1: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_TPS1, - TP_SELECT1, DP_DPHY_TP_SELECT_TPS1, - TP_SELECT2, DP_DPHY_TP_SELECT_TPS1, - TP_SELECT3, DP_DPHY_TP_SELECT_TPS1); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_128b_132b_TPS2: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_TPS2, - TP_SELECT1, DP_DPHY_TP_SELECT_TPS2, - TP_SELECT2, DP_DPHY_TP_SELECT_TPS2, - TP_SELECT3, DP_DPHY_TP_SELECT_TPS2); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS7: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS7, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS7, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS7, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS7); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS9: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS9, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS9, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS9, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS9); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS11: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS11, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS11, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS11, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS11); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS15: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS15, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS15, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS15, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS15); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS23: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS23, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS23, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS23, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS23); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_PRBS31: - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_PRBS_SEL0, DP_DPHY_TP_PRBS31, - TP_PRBS_SEL1, DP_DPHY_TP_PRBS31, - TP_PRBS_SEL2, DP_DPHY_TP_PRBS31, - TP_PRBS_SEL3, DP_DPHY_TP_PRBS31); - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, - TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_264BIT_CUSTOM: - tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom); - tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30]; - REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom); - - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM, - TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM, - TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM, - TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM); - - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - case DP_TEST_PATTERN_SQUARE: - case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: - case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: - case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: - REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, - TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); - - REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, - TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE, - TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE, - TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE, - TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE); - - REG_UPDATE(DP_DPHY_SYM32_CONTROL, - MODE, DP2_TEST_PATTERN); - break; - default: - break; - } -} - -static void fill_stream_allocation_row_info( - const struct link_mst_stream_allocation *stream_allocation, - uint32_t *src, - uint32_t *slots) -{ - const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc; - - if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) { - *src = stream_enc->id - ENGINE_ID_HPO_DP_0; - *slots = stream_allocation->slot_count; - } else { - *src = 0; - *slots = 0; - } -} - -/* programs DP VC payload allocation */ -void dcn31_hpo_dp_link_enc_update_stream_allocation_table( - struct hpo_dp_link_encoder *enc, - const struct link_mst_stream_allocation_table *table) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - uint32_t slots = 0; - uint32_t src = 0; - - /* --- Set MSE Stream Attribute - - * Setup VC Payload Table on Tx Side, - * Issue allocation change trigger - * to commit payload on both tx and rx side - */ - - /* we should clean-up table each time */ - - if (table->stream_count >= 1) { - fill_stream_allocation_row_info( - &table->stream_allocations[0], - &src, - &slots); - } else { - src = 0; - slots = 0; - } - - REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0, - SAT_STREAM_SOURCE, src, - SAT_SLOT_COUNT, slots); - - if (table->stream_count >= 2) { - fill_stream_allocation_row_info( - &table->stream_allocations[1], - &src, - &slots); - } else { - src = 0; - slots = 0; - } - - REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1, - SAT_STREAM_SOURCE, src, - SAT_SLOT_COUNT, slots); - - if (table->stream_count >= 3) { - fill_stream_allocation_row_info( - &table->stream_allocations[2], - &src, - &slots); - } else { - src = 0; - slots = 0; - } - - REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2, - SAT_STREAM_SOURCE, src, - SAT_SLOT_COUNT, slots); - - if (table->stream_count >= 4) { - fill_stream_allocation_row_info( - &table->stream_allocations[3], - &src, - &slots); - } else { - src = 0; - slots = 0; - } - - REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3, - SAT_STREAM_SOURCE, src, - SAT_SLOT_COUNT, slots); - - /* --- wait for transaction finish */ - - /* send allocation change trigger (ACT) - * this step first sends the ACT, - * then double buffers the SAT into the hardware - * making the new allocation active on the DP MST mode link - */ - - /* SAT_UPDATE: - * 0 - No Action - * 1 - Update SAT with trigger - * 2 - Update SAT without trigger - */ - REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE, - SAT_UPDATE, 1); - - /* wait for update to complete - * (i.e. SAT_UPDATE_PENDING field is set to 0) - * No need for HW to enforce keepout. - */ - /* Best case and worst case wait time for SAT_UPDATE_PENDING - * best: 109 us - * worst: 868 us - */ - REG_WAIT(DP_DPHY_SYM32_STATUS, - SAT_UPDATE_PENDING, 0, - 100, DP_SAT_UPDATE_MAX_RETRY); -} - -void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( - struct hpo_dp_link_encoder *enc, - uint32_t stream_encoder_inst, - struct fixed31_32 avg_time_slots_per_mtp) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - uint32_t x = dc_fixpt_floor( - avg_time_slots_per_mtp); - uint32_t y = dc_fixpt_ceil( - dc_fixpt_shl( - dc_fixpt_sub_int( - avg_time_slots_per_mtp, - x), - 25)); - - // If y rounds up to integer, carry it over to x. - if (y >> 25) { - x += 1; - y = 0; - } - - switch (stream_encoder_inst) { - case 0: - REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, - STREAM_VC_RATE_X, x, - STREAM_VC_RATE_Y, y); - break; - case 1: - REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0, - STREAM_VC_RATE_X, x, - STREAM_VC_RATE_Y, y); - break; - case 2: - REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0, - STREAM_VC_RATE_X, x, - STREAM_VC_RATE_Y, y); - break; - case 3: - REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0, - STREAM_VC_RATE_X, x, - STREAM_VC_RATE_Y, y); - break; - default: - ASSERT(0); - } - - /* Best case and worst case wait time for RATE_UPDATE_PENDING - * best: 116 ns - * worst: 903 ns - */ - /* wait for update to be completed on the link */ - REG_WAIT(DP_DPHY_SYM32_STATUS, - RATE_UPDATE_PENDING, 0, - 1, 10); -} - -static bool dcn31_hpo_dp_link_enc_is_in_alt_mode( - struct hpo_dp_link_encoder *enc) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - uint32_t dp_alt_mode_disable = 0; - - ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); - - /* if value == 1 alt mode is disabled, otherwise it is enabled */ - REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - return (dp_alt_mode_disable == 0); -} - -void dcn31_hpo_dp_link_enc_read_state( - struct hpo_dp_link_encoder *enc, - struct hpo_dp_link_enc_state *state) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - - ASSERT(state); - - REG_GET(DP_DPHY_SYM32_STATUS, - STATUS, &state->link_enc_enabled); - REG_GET(DP_DPHY_SYM32_CONTROL, - NUM_LANES, &state->lane_count); - REG_GET(DP_DPHY_SYM32_CONTROL, - MODE, (uint32_t *)&state->link_mode); - - REG_GET_2(DP_DPHY_SYM32_SAT_VC0, - SAT_STREAM_SOURCE, &state->stream_src[0], - SAT_SLOT_COUNT, &state->slot_count[0]); - REG_GET_2(DP_DPHY_SYM32_SAT_VC1, - SAT_STREAM_SOURCE, &state->stream_src[1], - SAT_SLOT_COUNT, &state->slot_count[1]); - REG_GET_2(DP_DPHY_SYM32_SAT_VC2, - SAT_STREAM_SOURCE, &state->stream_src[2], - SAT_SLOT_COUNT, &state->slot_count[2]); - REG_GET_2(DP_DPHY_SYM32_SAT_VC3, - SAT_STREAM_SOURCE, &state->stream_src[3], - SAT_SLOT_COUNT, &state->slot_count[3]); - - REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, - STREAM_VC_RATE_X, &state->vc_rate_x[0], - STREAM_VC_RATE_Y, &state->vc_rate_y[0]); - REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, - STREAM_VC_RATE_X, &state->vc_rate_x[1], - STREAM_VC_RATE_Y, &state->vc_rate_y[1]); - REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, - STREAM_VC_RATE_X, &state->vc_rate_x[2], - STREAM_VC_RATE_Y, &state->vc_rate_y[2]); - REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, - STREAM_VC_RATE_X, &state->vc_rate_x[3], - STREAM_VC_RATE_Y, &state->vc_rate_y[3]); -} - -static enum bp_result link_transmitter_control( - struct dcn31_hpo_dp_link_encoder *enc3, - struct bp_transmitter_control *cntl) -{ - enum bp_result result; - struct dc_bios *bp = enc3->base.ctx->dc_bios; - - result = bp->funcs->transmitter_control(bp, cntl); - - return result; -} - -/* enables DP PHY output for 128b132b encoding */ -void dcn31_hpo_dp_link_enc_enable_dp_output( - struct hpo_dp_link_encoder *enc, - const struct dc_link_settings *link_settings, - enum transmitter transmitter, - enum hpd_source_id hpd_source) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - struct bp_transmitter_control cntl = { 0 }; - enum bp_result result; - - /* Set the transmitter */ - enc3->base.transmitter = transmitter; - - /* Set the hpd source */ - enc3->base.hpd_source = hpd_source; - - /* Enable the PHY */ - cntl.action = TRANSMITTER_CONTROL_ENABLE; - cntl.engine_id = ENGINE_ID_UNKNOWN; - cntl.transmitter = enc3->base.transmitter; - //cntl.pll_id = clock_source; - cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - cntl.lanes_number = link_settings->lane_count; - cntl.hpd_sel = enc3->base.hpd_source; - cntl.pixel_clock = link_settings->link_rate * 1000; - cntl.color_depth = COLOR_DEPTH_UNDEFINED; - cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0; - - result = link_transmitter_control(enc3, &cntl); - - if (result != BP_RESULT_OK) { - DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", - __func__); - BREAK_TO_DEBUGGER(); - } -} - -void dcn31_hpo_dp_link_enc_disable_output( - struct hpo_dp_link_encoder *enc, - enum signal_type signal) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - struct bp_transmitter_control cntl = { 0 }; - enum bp_result result; - - /* disable transmitter */ - cntl.action = TRANSMITTER_CONTROL_DISABLE; - cntl.transmitter = enc3->base.transmitter; - cntl.hpd_sel = enc3->base.hpd_source; - cntl.signal = signal; - - result = link_transmitter_control(enc3, &cntl); - - if (result != BP_RESULT_OK) { - DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", - __func__); - BREAK_TO_DEBUGGER(); - return; - } - - /* disable encoder */ - dcn31_hpo_dp_link_enc_disable(enc); -} - -void dcn31_hpo_dp_link_enc_set_ffe( - struct hpo_dp_link_encoder *enc, - const struct dc_link_settings *link_settings, - uint8_t ffe_preset) -{ - struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); - struct bp_transmitter_control cntl = { 0 }; - enum bp_result result; - - /* disable transmitter */ - cntl.transmitter = enc3->base.transmitter; - cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; - cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - cntl.lanes_number = link_settings->lane_count; - cntl.pixel_clock = link_settings->link_rate * 1000; - cntl.lane_settings = ffe_preset; - - result = link_transmitter_control(enc3, &cntl); - - if (result != BP_RESULT_OK) { - DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", - __func__); - BREAK_TO_DEBUGGER(); - return; - } -} - -static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = { - .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, - .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, - .link_enable = dcn31_hpo_dp_link_enc_enable, - .link_disable = dcn31_hpo_dp_link_enc_disable, - .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, - .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, - .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, - .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode, - .read_state = dcn31_hpo_dp_link_enc_read_state, - .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, -}; - -void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, - struct dc_context *ctx, - uint32_t inst, - const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, - const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, - const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) -{ - enc31->base.ctx = ctx; - - enc31->base.inst = inst; - enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs; - enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; - enc31->base.transmitter = TRANSMITTER_UNKNOWN; - - enc31->regs = hpo_le_regs; - enc31->hpo_le_shift = hpo_le_shift; - enc31->hpo_le_mask = hpo_le_mask; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h deleted file mode 100644 index 51f5781325e895..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ -#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ - -#include "link_encoder.h" - - -#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\ - container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base) - - -#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \ - SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ - SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ - SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) - -#define DCN3_1_RDPCSTX_REG_LIST(id) \ - SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id) - - -#define DCN3_1_HPO_DP_LINK_ENC_REGS \ - uint32_t DP_LINK_ENC_CLOCK_CONTROL;\ - uint32_t DP_DPHY_SYM32_CONTROL;\ - uint32_t DP_DPHY_SYM32_STATUS;\ - uint32_t DP_DPHY_SYM32_TP_CONFIG;\ - uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\ - uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\ - uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\ - uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\ - uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\ - uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\ - uint32_t DP_DPHY_SYM32_SAT_VC0;\ - uint32_t DP_DPHY_SYM32_SAT_VC1;\ - uint32_t DP_DPHY_SYM32_SAT_VC2;\ - uint32_t DP_DPHY_SYM32_SAT_VC3;\ - uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\ - uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\ - uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\ - uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\ - uint32_t DP_DPHY_SYM32_SAT_UPDATE - -struct dcn31_hpo_dp_link_encoder_registers { - DCN3_1_HPO_DP_LINK_ENC_REGS; - uint32_t RDPCSTX_PHY_CNTL6[5]; -}; - -#define DCN3_1_HPO_DP_LINK_ENC_RDPCSTX_MASK_SH_LIST(mask_sh)\ - SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) - -#define DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(mask_sh)\ - SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\ - SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh) - -#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\ - DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(mask_sh),\ - DCN3_1_HPO_DP_LINK_ENC_RDPCSTX_MASK_SH_LIST(mask_sh)\ - -#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \ - type DP_LINK_ENC_CLOCK_EN;\ - type DPHY_RESET;\ - type DPHY_ENABLE;\ - type PRECODER_ENABLE;\ - type NUM_LANES;\ - type MODE;\ - type STATUS;\ - type SAT_UPDATE_PENDING;\ - type RATE_UPDATE_PENDING;\ - type TP_CUSTOM;\ - type TP_SELECT0;\ - type TP_SELECT1;\ - type TP_SELECT2;\ - type TP_SELECT3;\ - type TP_PRBS_SEL0;\ - type TP_PRBS_SEL1;\ - type TP_PRBS_SEL2;\ - type TP_PRBS_SEL3;\ - type TP_SQ_PULSE_WIDTH;\ - type SAT_STREAM_SOURCE;\ - type SAT_SLOT_COUNT;\ - type STREAM_VC_RATE_X;\ - type STREAM_VC_RATE_Y;\ - type SAT_UPDATE;\ - type RDPCS_PHY_DPALT_DISABLE - - -struct dcn31_hpo_dp_link_encoder_shift { - DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t); -}; - -struct dcn31_hpo_dp_link_encoder_mask { - DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t); -}; - -struct dcn31_hpo_dp_link_encoder { - struct hpo_dp_link_encoder base; - const struct dcn31_hpo_dp_link_encoder_registers *regs; - const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift; - const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask; -}; - -void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, - struct dc_context *ctx, - uint32_t inst, - const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, - const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, - const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask); - -void dcn31_hpo_dp_link_enc_enable_dp_output( - struct hpo_dp_link_encoder *enc, - const struct dc_link_settings *link_settings, - enum transmitter transmitter, - enum hpd_source_id hpd_source); - -void dcn31_hpo_dp_link_enc_disable_output( - struct hpo_dp_link_encoder *enc, - enum signal_type signal); - -void dcn31_hpo_dp_link_enc_enable( - struct hpo_dp_link_encoder *enc, - enum dc_lane_count num_lanes); - -void dcn31_hpo_dp_link_enc_disable( - struct hpo_dp_link_encoder *enc); - -void dcn31_hpo_dp_link_enc_set_link_test_pattern( - struct hpo_dp_link_encoder *enc, - struct encoder_set_dp_phy_pattern_param *tp_params); - -void dcn31_hpo_dp_link_enc_update_stream_allocation_table( - struct hpo_dp_link_encoder *enc, - const struct link_mst_stream_allocation_table *table); - -void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( - struct hpo_dp_link_encoder *enc, - uint32_t stream_encoder_inst, - struct fixed31_32 avg_time_slots_per_mtp); - -void dcn31_hpo_dp_link_enc_read_state( - struct hpo_dp_link_encoder *enc, - struct hpo_dp_link_enc_state *state); - -void dcn31_hpo_dp_link_enc_set_ffe( - struct hpo_dp_link_encoder *enc, - const struct dc_link_settings *link_settings, - uint8_t ffe_preset); - -#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c deleted file mode 100644 index 678db949cfe3ce..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ /dev/null @@ -1,779 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dc_bios_types.h" -#include "dcn31_hpo_dp_stream_encoder.h" -#include "reg_helper.h" -#include "dc.h" - -#define DC_LOGGER \ - enc3->base.ctx->logger - -#define REG(reg)\ - (enc3->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name - -#define CTX \ - enc3->base.ctx - - -enum dp2_pixel_encoding { - DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444, - DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422, - DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420, - DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY -}; - -enum dp2_uncompressed_component_depth { - DP_SYM32_ENC_COMPONENT_DEPTH_6BPC, - DP_SYM32_ENC_COMPONENT_DEPTH_8BPC, - DP_SYM32_ENC_COMPONENT_DEPTH_10BPC, - DP_SYM32_ENC_COMPONENT_DEPTH_12BPC -}; - - -static void dcn31_hpo_dp_stream_enc_enable_stream( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Enable all clocks in the DP_STREAM_ENC */ - REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, - DP_STREAM_ENC_CLOCK_EN, 1); - - /* Assert reset to the DP_SYM32_ENC logic */ - REG_UPDATE(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_RESET, 1); - /* Wait for reset to complete (to assert) */ - REG_WAIT(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_RESET_DONE, 1, - 1, 10); - - /* De-assert reset to the DP_SYM32_ENC logic */ - REG_UPDATE(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_RESET, 0); - /* Wait for reset to de-assert */ - REG_WAIT(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_RESET_DONE, 0, - 1, 10); - - /* Enable idle pattern generation */ - REG_UPDATE(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_ENABLE, 1); -} - -static void dcn31_hpo_dp_stream_enc_dp_unblank( - struct hpo_dp_stream_encoder *enc, - uint32_t stream_source) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Set the input mux for video stream source */ - REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL, - DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source); - - /* Enable video transmission in main framer */ - REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, - VID_STREAM_ENABLE, 1); - - /* Reset and Enable Pixel to Symbol FIFO */ - REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, - PIXEL_TO_SYMBOL_FIFO_RESET, 1); - REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, - PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1, - 1, 10); - REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, - PIXEL_TO_SYMBOL_FIFO_RESET, 0); - REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */ - PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0, - 1, 10); - REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, - PIXEL_TO_SYMBOL_FIFO_ENABLE, 1); - - /* Reset and Enable Clock Ramp Adjuster FIFO */ - REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_RESET, 1); - REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_RESET_DONE, 1, - 1, 10); - REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_RESET, 0); - REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_RESET_DONE, 0, - 1, 10); - - /* For Debug -- Enable CRC */ - REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL, - CRC_ENABLE, 1, - CRC_CONT_MODE_ENABLE, 1); - - REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_ENABLE, 1); -} - -static void dcn31_hpo_dp_stream_enc_dp_blank( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Disable video transmission */ - REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, - VID_STREAM_ENABLE, 0); - - /* Wait for video stream transmission disabled - * Larger delay to wait until VBLANK - use max retry of - * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + - * a little more because we may not trust delay accuracy. - */ - REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL, - VID_STREAM_STATUS, 0, - 10, 5000); - - /* Disable SDP transmission */ - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 0); - - /* Disable Pixel to Symbol FIFO */ - REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, - PIXEL_TO_SYMBOL_FIFO_ENABLE, 0); - - /* Disable Clock Ramp Adjuster FIFO */ - REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, - FIFO_ENABLE, 0); -} - -static void dcn31_hpo_dp_stream_enc_disable( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Disable DP_SYM32_ENC */ - REG_UPDATE(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_ENABLE, 0); - - /* Disable clocks in the DP_STREAM_ENC */ - REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, - DP_STREAM_ENC_CLOCK_EN, 0); -} - -static void dcn31_hpo_dp_stream_enc_set_stream_attribute( - struct hpo_dp_stream_encoder *enc, - struct dc_crtc_timing *crtc_timing, - enum dc_color_space output_color_space, - bool use_vsc_sdp_for_colorimetry, - bool compressed_format, - bool double_buffer_en) -{ - enum dp2_pixel_encoding pixel_encoding; - enum dp2_uncompressed_component_depth component_depth; - uint32_t h_active_start; - uint32_t v_active_start; - uint32_t h_blank; - uint32_t h_back_porch; - uint32_t h_width; - uint32_t v_height; - uint64_t v_freq; - uint8_t misc0 = 0; - uint8_t misc1 = 0; - uint8_t hsp; - uint8_t vsp; - - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - struct dc_crtc_timing hw_crtc_timing = *crtc_timing; - - /* MISC0[0] = 0 video and link clocks are asynchronous - * MISC1[0] = 0 interlace not supported - * MISC1[2:1] = 0 stereo field is handled by hardware - * MISC1[5:3] = 0 Reserved - */ - - /* Interlaced not supported */ - if (hw_crtc_timing.flags.INTERLACE) { - BREAK_TO_DEBUGGER(); - } - - /* Double buffer enable for MSA and pixel format registers - * Only double buffer for changing stream attributes for active streams - * Do not double buffer when initially enabling a stream - */ - REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, - MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en); - REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, - PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en); - - /* Pixel Encoding */ - switch (hw_crtc_timing.pixel_encoding) { - case PIXEL_ENCODING_YCBCR422: - pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422; - misc0 = misc0 | 0x2; // MISC0[2:1] = 01 - break; - case PIXEL_ENCODING_YCBCR444: - pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; - misc0 = misc0 | 0x4; // MISC0[2:1] = 10 - - if (hw_crtc_timing.flags.Y_ONLY) { - pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY; - if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) { - /* HW testing only, no use case yet. - * Color depth of Y-only could be - * 8, 10, 12, 16 bits - */ - misc1 = misc1 | 0x80; // MISC1[7] = 1 - } - } - break; - case PIXEL_ENCODING_YCBCR420: - pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420; - misc1 = misc1 | 0x40; // MISC1[6] = 1 - break; - case PIXEL_ENCODING_RGB: - default: - pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; - break; - } - - /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used. - * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the - * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, - * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). - */ - if (use_vsc_sdp_for_colorimetry) - misc1 = misc1 | 0x40; - else - misc1 = misc1 & ~0x40; - - /* Color depth */ - switch (hw_crtc_timing.display_color_depth) { - case COLOR_DEPTH_666: - component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; - // MISC0[7:5] = 000 - break; - case COLOR_DEPTH_888: - component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC; - misc0 = misc0 | 0x20; // MISC0[7:5] = 001 - break; - case COLOR_DEPTH_101010: - component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC; - misc0 = misc0 | 0x40; // MISC0[7:5] = 010 - break; - case COLOR_DEPTH_121212: - component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC; - misc0 = misc0 | 0x60; // MISC0[7:5] = 011 - break; - default: - component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; - break; - } - - REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, - PIXEL_ENCODING_TYPE, compressed_format, - UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding, - UNCOMPRESSED_COMPONENT_DEPTH, component_depth); - - switch (output_color_space) { - case COLOR_SPACE_SRGB: - misc1 = misc1 & ~0x80; /* bit7 = 0*/ - break; - case COLOR_SPACE_SRGB_LIMITED: - misc0 = misc0 | 0x8; /* bit3=1 */ - misc1 = misc1 & ~0x80; /* bit7 = 0*/ - break; - case COLOR_SPACE_YCBCR601: - case COLOR_SPACE_YCBCR601_LIMITED: - misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ - misc1 = misc1 & ~0x80; /* bit7 = 0*/ - if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) - misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ - break; - case COLOR_SPACE_YCBCR709: - case COLOR_SPACE_YCBCR709_LIMITED: - misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ - misc1 = misc1 & ~0x80; /* bit7 = 0*/ - if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) - misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ - break; - case COLOR_SPACE_2020_RGB_LIMITEDRANGE: - case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_YCBCR: - case COLOR_SPACE_XR_RGB: - case COLOR_SPACE_MSREF_SCRGB: - case COLOR_SPACE_ADOBERGB: - case COLOR_SPACE_DCIP3: - case COLOR_SPACE_XV_YCC_709: - case COLOR_SPACE_XV_YCC_601: - case COLOR_SPACE_DISPLAYNATIVE: - case COLOR_SPACE_DOLBYVISION: - case COLOR_SPACE_APPCTRL: - case COLOR_SPACE_CUSTOMPOINTS: - case COLOR_SPACE_UNKNOWN: - case COLOR_SPACE_YCBCR709_BLACK: - /* do nothing */ - break; - } - - /* calculate from vesa timing parameters - * h_active_start related to leading edge of sync - */ - h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - - hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; - - h_back_porch = h_blank - hw_crtc_timing.h_front_porch - - hw_crtc_timing.h_sync_width; - - /* start at beginning of left border */ - h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; - - v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - - hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - - hw_crtc_timing.v_front_porch; - - h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; - v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; - hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; - vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; - v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100; - - /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 - * - * Lane 0 Lane 1 Lane 2 Lane 3 - * MSA[0] = { 0, 0, 0, VFREQ[47:40]} - * MSA[1] = { 0, 0, 0, VFREQ[39:32]} - * MSA[2] = { 0, 0, 0, VFREQ[31:24]} - * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]} - * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]} - * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]} - * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]} - * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]} - * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0} - */ - REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0, - MSA_DATA_LANE_0, 0, - MSA_DATA_LANE_1, 0, - MSA_DATA_LANE_2, 0, - MSA_DATA_LANE_3, v_freq >> 40); - - REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0, - MSA_DATA_LANE_0, 0, - MSA_DATA_LANE_1, 0, - MSA_DATA_LANE_2, 0, - MSA_DATA_LANE_3, (v_freq >> 32) & 0xff); - - REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0, - MSA_DATA_LANE_0, 0, - MSA_DATA_LANE_1, 0, - MSA_DATA_LANE_2, 0, - MSA_DATA_LANE_3, (v_freq >> 24) & 0xff); - - REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0, - MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8, - MSA_DATA_LANE_1, h_active_start >> 8, - MSA_DATA_LANE_2, h_width >> 8, - MSA_DATA_LANE_3, (v_freq >> 16) & 0xff); - - REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0, - MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff, - MSA_DATA_LANE_1, h_active_start & 0xff, - MSA_DATA_LANE_2, h_width & 0xff, - MSA_DATA_LANE_3, (v_freq >> 8) & 0xff); - - REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0, - MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8, - MSA_DATA_LANE_1, v_active_start >> 8, - MSA_DATA_LANE_2, v_height >> 8, - MSA_DATA_LANE_3, v_freq & 0xff); - - REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0, - MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff, - MSA_DATA_LANE_1, v_active_start & 0xff, - MSA_DATA_LANE_2, v_height & 0xff, - MSA_DATA_LANE_3, misc0); - - REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0, - MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8), - MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8), - MSA_DATA_LANE_2, 0, - MSA_DATA_LANE_3, misc1); - - REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0, - MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff, - MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff, - MSA_DATA_LANE_2, 0, - MSA_DATA_LANE_3, 0); -} - -static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num( - struct hpo_dp_stream_encoder *enc, - struct encoder_info_frame *info_frame) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - if (info_frame->adaptive_sync.valid == true && - info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { - //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1); - - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, - info_frame->sdp_line_num.adaptive_sync_line_num); - } -} - -static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( - struct hpo_dp_stream_encoder *enc, - const struct encoder_info_frame *info_frame) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - uint32_t dmdata_packet_enabled = 0; - - if (info_frame->vsc.valid) - enc->vpg->funcs->update_generic_info_packet( - enc->vpg, - 0, /* packetIndex */ - &info_frame->vsc, - true); - - if (info_frame->spd.valid) - enc->vpg->funcs->update_generic_info_packet( - enc->vpg, - 2, /* packetIndex */ - &info_frame->spd, - true); - - if (info_frame->hdrsmd.valid) - enc->vpg->funcs->update_generic_info_packet( - enc->vpg, - 3, /* packetIndex */ - &info_frame->hdrsmd, - true); - - /* packetIndex 4 is used for send immediate sdp message, and please - * use other packetIndex (such as 5,6) for other info packet - */ - - if (info_frame->adaptive_sync.valid) - enc->vpg->funcs->update_generic_info_packet( - enc->vpg, - 5, /* packetIndex */ - &info_frame->adaptive_sync, - true); - - /* enable/disable transmission of packet(s). - * If enabled, packet transmission begins on the next frame - */ - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid); - - /* check if dynamic metadata packet transmission is enabled */ - REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, - METADATA_PACKET_ENABLE, &dmdata_packet_enabled); - - /* Enable secondary data path */ - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 1); -} - -static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets( - struct hpo_dp_stream_encoder *enc) -{ - /* stop generic packets on DP */ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - uint32_t asp_enable = 0; - uint32_t atp_enable = 0; - uint32_t aip_enable = 0; - uint32_t acm_enable = 0; - - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); - - /* Disable secondary data path if audio is also disabled */ - REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, - ASP_ENABLE, &asp_enable, - ATP_ENABLE, &atp_enable, - AIP_ENABLE, &aip_enable, - ACM_ENABLE, &acm_enable); - if (!(asp_enable || atp_enable || aip_enable || acm_enable)) - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 0); -} - -static uint32_t hpo_dp_is_gsp_enabled( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - uint32_t gsp0_enabled = 0; - uint32_t gsp2_enabled = 0; - uint32_t gsp3_enabled = 0; - uint32_t gsp11_enabled = 0; - - REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled); - REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled); - REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled); - REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled); - - return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled); -} - -static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet( - struct hpo_dp_stream_encoder *enc, - bool enable, - uint8_t *dsc_packed_pps, - bool immediate_update) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - if (enable) { - struct dc_info_packet pps_sdp; - int i; - - /* Configure for PPS packet size (128 bytes) */ - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, - GSP_PAYLOAD_SIZE, 3); - - /* Load PPS into infoframe (SDP) registers */ - pps_sdp.valid = true; - pps_sdp.hb0 = 0; - pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; - pps_sdp.hb2 = 127; - pps_sdp.hb3 = 0; - - for (i = 0; i < 4; i++) { - memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); - enc3->base.vpg->funcs->update_generic_info_packet( - enc3->base.vpg, - 11 + i, - &pps_sdp, - immediate_update); - } - - /* SW should make sure VBID[6] update line number is bigger - * than PPS transmit line number - */ - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, - GSP_TRANSMISSION_LINE_NUMBER, 2); - - REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL, - VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0, - VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3); - - /* Send PPS data at the line number specified above. */ - REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, - GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1); - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 1); - } else { - /* Disable Generic Stream Packet 11 (GSP) transmission */ - REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11, - GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0, - GSP_PAYLOAD_SIZE, 0); - } -} - -static void dcn31_hpo_dp_stream_enc_map_stream_to_link( - struct hpo_dp_stream_encoder *enc, - uint32_t stream_enc_inst, - uint32_t link_enc_inst) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - ASSERT(stream_enc_inst < 4 && link_enc_inst < 2); - - switch (stream_enc_inst) { - case 0: - REG_UPDATE(DP_STREAM_MAPPER_CONTROL0, - DP_STREAM_LINK_TARGET, link_enc_inst); - break; - case 1: - REG_UPDATE(DP_STREAM_MAPPER_CONTROL1, - DP_STREAM_LINK_TARGET, link_enc_inst); - break; - case 2: - REG_UPDATE(DP_STREAM_MAPPER_CONTROL2, - DP_STREAM_LINK_TARGET, link_enc_inst); - break; - case 3: - REG_UPDATE(DP_STREAM_MAPPER_CONTROL3, - DP_STREAM_LINK_TARGET, link_enc_inst); - break; - } -} - -static void dcn31_hpo_dp_stream_enc_audio_setup( - struct hpo_dp_stream_encoder *enc, - unsigned int az_inst, - struct audio_info *info) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Set the input mux for video stream source */ - REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL, - DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst); - - ASSERT(enc->apg); - enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info); -} - -static void dcn31_hpo_dp_stream_enc_audio_enable( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Enable Audio packets */ - REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1); - - /* Program the ATP and AIP next */ - REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, - ATP_ENABLE, 1, - AIP_ENABLE, 1); - - /* Enable secondary data path */ - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 1); - - /* Enable APG block */ - enc->apg->funcs->enable_apg(enc->apg); -} - -static void dcn31_hpo_dp_stream_enc_audio_disable( - struct hpo_dp_stream_encoder *enc) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - /* Disable Audio packets */ - REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, - ASP_ENABLE, 0, - ATP_ENABLE, 0, - AIP_ENABLE, 0, - ACM_ENABLE, 0); - - /* Disable STP Stream Enable if other SDP GSP are also disabled */ - if (!(hpo_dp_is_gsp_enabled(enc))) - REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, 0); - - /* Disable APG block */ - enc->apg->funcs->disable_apg(enc->apg); -} - -static void dcn31_hpo_dp_stream_enc_read_state( - struct hpo_dp_stream_encoder *enc, - struct hpo_dp_stream_encoder_state *s) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - REG_GET(DP_SYM32_ENC_CONTROL, - DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled); - REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL, - VID_STREAM_ENABLE, &s->vid_stream_enabled); - REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL, - DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst); - - REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, - PIXEL_ENCODING_TYPE, &s->compressed_format, - UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding, - UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth); - - REG_GET(DP_SYM32_ENC_SDP_CONTROL, - SDP_STREAM_ENABLE, &s->sdp_enabled); - - switch (enc->inst) { - case 0: - REG_GET(DP_STREAM_MAPPER_CONTROL0, - DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); - break; - case 1: - REG_GET(DP_STREAM_MAPPER_CONTROL1, - DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); - break; - case 2: - REG_GET(DP_STREAM_MAPPER_CONTROL2, - DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); - break; - case 3: - REG_GET(DP_STREAM_MAPPER_CONTROL3, - DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); - break; - } -} - -static void dcn31_set_hblank_min_symbol_width( - struct hpo_dp_stream_encoder *enc, - uint16_t width) -{ - struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); - - REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0, - HBLANK_MINIMUM_SYMBOL_WIDTH, width); -} - -static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { - .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, - .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, - .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, - .disable = dcn31_hpo_dp_stream_enc_disable, - .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, - .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num, - .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, - .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, - .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, - .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, - .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup, - .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, - .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, - .read_state = dcn31_hpo_dp_stream_enc_read_state, - .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width, -}; - -void dcn31_hpo_dp_stream_encoder_construct( - struct dcn31_hpo_dp_stream_encoder *enc3, - struct dc_context *ctx, - struct dc_bios *bp, - uint32_t inst, - enum engine_id eng_id, - struct vpg *vpg, - struct apg *apg, - const struct dcn31_hpo_dp_stream_encoder_registers *regs, - const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, - const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask) -{ - enc3->base.funcs = &dcn30_str_enc_funcs; - enc3->base.ctx = ctx; - enc3->base.inst = inst; - enc3->base.id = eng_id; - enc3->base.bp = bp; - enc3->base.vpg = vpg; - enc3->base.apg = apg; - enc3->regs = regs; - enc3->hpo_se_shift = hpo_se_shift; - enc3->hpo_se_mask = hpo_se_mask; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h deleted file mode 100644 index 82c3b3ac1f0d01..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ -#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ - -#include "dcn30/dcn30_vpg.h" -#include "dcn31/dcn31_apg.h" -#include "stream_encoder.h" - - -#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\ - container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base) - - -/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */ -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0 -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8 -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10 -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18 -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L -#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L - - -#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \ - SR(DP_STREAM_MAPPER_CONTROL0),\ - SR(DP_STREAM_MAPPER_CONTROL1),\ - SR(DP_STREAM_MAPPER_CONTROL2),\ - SR(DP_STREAM_MAPPER_CONTROL3),\ - SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\ - SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\ - SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\ - SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\ - SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ - SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) - -#define DCN3_1_HPO_DP_STREAM_ENC_REGS \ - uint32_t DP_STREAM_MAPPER_CONTROL0;\ - uint32_t DP_STREAM_MAPPER_CONTROL1;\ - uint32_t DP_STREAM_MAPPER_CONTROL2;\ - uint32_t DP_STREAM_MAPPER_CONTROL3;\ - uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\ - uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\ - uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\ - uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\ - uint32_t DP_SYM32_ENC_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\ - uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_MSA0;\ - uint32_t DP_SYM32_ENC_VID_MSA1;\ - uint32_t DP_SYM32_ENC_VID_MSA2;\ - uint32_t DP_SYM32_ENC_VID_MSA3;\ - uint32_t DP_SYM32_ENC_VID_MSA4;\ - uint32_t DP_SYM32_ENC_VID_MSA5;\ - uint32_t DP_SYM32_ENC_VID_MSA6;\ - uint32_t DP_SYM32_ENC_VID_MSA7;\ - uint32_t DP_SYM32_ENC_VID_MSA8;\ - uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\ - uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\ - uint32_t DP_SYM32_ENC_SDP_CONTROL;\ - uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\ - uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\ - uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\ - uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\ - uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ - uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ - uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ - uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\ - uint32_t DP_SYM32_ENC_HBLANK_CONTROL - - -#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ - SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\ - SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\ - SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\ - SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\ - SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\ - SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh) - - -#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \ - type DP_STREAM_LINK_TARGET;\ - type DP_STREAM_ENC_CLOCK_EN;\ - type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\ - type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\ - type FIFO_RESET;\ - type FIFO_RESET_DONE;\ - type FIFO_ENABLE;\ - type DP_SYM32_ENC_RESET;\ - type DP_SYM32_ENC_RESET_DONE;\ - type DP_SYM32_ENC_ENABLE;\ - type PIXEL_ENCODING_TYPE;\ - type UNCOMPRESSED_PIXEL_ENCODING;\ - type UNCOMPRESSED_COMPONENT_DEPTH;\ - type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\ - type MSA_DOUBLE_BUFFER_ENABLE;\ - type MSA_DATA_LANE_0;\ - type MSA_DATA_LANE_1;\ - type MSA_DATA_LANE_2;\ - type MSA_DATA_LANE_3;\ - type PIXEL_TO_SYMBOL_FIFO_RESET;\ - type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\ - type PIXEL_TO_SYMBOL_FIFO_ENABLE;\ - type VID_STREAM_ENABLE;\ - type VID_STREAM_STATUS;\ - type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\ - type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\ - type SDP_STREAM_ENABLE;\ - type AUDIO_MUTE;\ - type ASP_ENABLE;\ - type ATP_ENABLE;\ - type AIP_ENABLE;\ - type ACM_ENABLE;\ - type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\ - type GSP_PAYLOAD_SIZE;\ - type GSP_TRANSMISSION_LINE_NUMBER;\ - type GSP_SOF_REFERENCE;\ - type METADATA_PACKET_ENABLE;\ - type CRC_ENABLE;\ - type CRC_CONT_MODE_ENABLE;\ - type HBLANK_MINIMUM_SYMBOL_WIDTH - - -struct dcn31_hpo_dp_stream_encoder_registers { - DCN3_1_HPO_DP_STREAM_ENC_REGS; -}; - -struct dcn31_hpo_dp_stream_encoder_shift { - DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t); -}; - -struct dcn31_hpo_dp_stream_encoder_mask { - DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t); -}; - -struct dcn31_hpo_dp_stream_encoder { - struct hpo_dp_stream_encoder base; - const struct dcn31_hpo_dp_stream_encoder_registers *regs; - const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift; - const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask; -}; - - -void dcn31_hpo_dp_stream_encoder_construct( - struct dcn31_hpo_dp_stream_encoder *enc3, - struct dc_context *ctx, - struct dc_bios *bp, - uint32_t inst, - enum engine_id eng_id, - struct vpg *vpg, - struct apg *apg, - const struct dcn31_hpo_dp_stream_encoder_registers *regs, - const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, - const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask); - - -#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile deleted file mode 100644 index 15fdcf7c64667d..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved. -# -# Makefile for dcn314. - -DCN314 = dcn314_dio_stream_encoder.o - -AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN314) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c deleted file mode 100644 index 5b343f745cf333..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ /dev/null @@ -1,496 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dc_bios_types.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn314_dio_stream_encoder.h" -#include "reg_helper.h" -#include "hw_shared.h" -#include "link.h" -#include "dpcd_defs.h" - -#define DC_LOGGER \ - enc1->base.ctx->logger - -#define REG(reg)\ - (enc1->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - enc1->se_shift->field_name, enc1->se_mask->field_name - -#define VBI_LINE_0 0 -#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 - -#define CTX \ - enc1->base.ctx - -void enc314_reset_fifo(struct stream_encoder *enc, bool reset) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t reset_val = reset ? 1 : 0; - uint32_t is_symclk_on; - - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); - REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); - - if (is_symclk_on) - REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); - else - udelay(10); -} - -void enc314_enable_fifo(struct stream_encoder *enc) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); - - enc314_reset_fifo(enc, true); - enc314_reset_fifo(enc, false); - - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); -} - -void enc314_disable_fifo(struct stream_encoder *enc) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); -} - -void enc314_dp_set_odm_combine( - struct stream_encoder *enc, - bool odm_combine) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, odm_combine); -} - -/* setup stream encoder in dvi mode */ -void enc314_stream_encoder_dvi_set_stream_attribute( - struct stream_encoder *enc, - struct dc_crtc_timing *crtc_timing, - bool is_dual_link) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { - struct bp_encoder_control cntl = {0}; - - cntl.action = ENCODER_CONTROL_SETUP; - cntl.engine_id = enc1->base.id; - cntl.signal = is_dual_link ? - SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; - cntl.enable_dp_audio = false; - cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; - cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; - - if (enc1->base.bp->funcs->encoder_control( - enc1->base.bp, &cntl) != BP_RESULT_OK) - return; - - } else { - - //Set pattern for clock channel, default vlue 0x63 does not work - REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); - - //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup - - //DIG_SOURCE_SELECT is already set in dig_connect_to_otg - - enc314_enable_fifo(enc); - } - - ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); - ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); - enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); -} - -/* setup stream encoder in hdmi mode */ -void enc314_stream_encoder_hdmi_set_stream_attribute( - struct stream_encoder *enc, - struct dc_crtc_timing *crtc_timing, - int actual_pix_clk_khz, - bool enable_audio) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { - struct bp_encoder_control cntl = {0}; - - cntl.action = ENCODER_CONTROL_SETUP; - cntl.engine_id = enc1->base.id; - cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; - cntl.enable_dp_audio = enable_audio; - cntl.pixel_clock = actual_pix_clk_khz; - cntl.lanes_number = LANE_COUNT_FOUR; - - if (enc1->base.bp->funcs->encoder_control( - enc1->base.bp, &cntl) != BP_RESULT_OK) - return; - - } else { - - //Set pattern for clock channel, default vlue 0x63 does not work - REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); - - //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup - - //DIG_SOURCE_SELECT is already set in dig_connect_to_otg - - enc314_enable_fifo(enc); - } - - /* Configure pixel encoding */ - enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); - - /* setup HDMI engine */ - REG_UPDATE_6(HDMI_CONTROL, - HDMI_PACKET_GEN_VERSION, 1, - HDMI_KEEPOUT_MODE, 1, - HDMI_DEEP_COLOR_ENABLE, 0, - HDMI_DATA_SCRAMBLE_EN, 0, - HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1, - HDMI_CLOCK_CHANNEL_RATE, 0); - - /* Configure color depth */ - switch (crtc_timing->display_color_depth) { - case COLOR_DEPTH_888: - REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); - break; - case COLOR_DEPTH_101010: - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DEEP_COLOR_DEPTH, 1, - HDMI_DEEP_COLOR_ENABLE, 0); - } else { - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DEEP_COLOR_DEPTH, 1, - HDMI_DEEP_COLOR_ENABLE, 1); - } - break; - case COLOR_DEPTH_121212: - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DEEP_COLOR_DEPTH, 2, - HDMI_DEEP_COLOR_ENABLE, 0); - } else { - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DEEP_COLOR_DEPTH, 2, - HDMI_DEEP_COLOR_ENABLE, 1); - } - break; - case COLOR_DEPTH_161616: - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DEEP_COLOR_DEPTH, 3, - HDMI_DEEP_COLOR_ENABLE, 1); - break; - default: - break; - } - - if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { - /* enable HDMI data scrambler - * HDMI_CLOCK_CHANNEL_RATE_MORE_340M - * Clock channel frequency is 1/4 of character rate. - */ - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DATA_SCRAMBLE_EN, 1, - HDMI_CLOCK_CHANNEL_RATE, 1); - } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { - - /* TODO: New feature for DCE11, still need to implement */ - - /* enable HDMI data scrambler - * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE - * Clock channel frequency is the same - * as character rate - */ - REG_UPDATE_2(HDMI_CONTROL, - HDMI_DATA_SCRAMBLE_EN, 1, - HDMI_CLOCK_CHANNEL_RATE, 0); - } - - - /* Enable transmission of General Control packet on every frame */ - REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, - HDMI_GC_CONT, 1, - HDMI_GC_SEND, 1, - HDMI_NULL_SEND, 1); - - /* Disable Audio Content Protection packet transmission */ - REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0); - - /* following belongs to audio */ - /* Enable Audio InfoFrame packet transmission. */ - REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); - - /* update double-buffered AUDIO_INFO registers immediately */ - ASSERT(enc->afmt); - enc->afmt->funcs->audio_info_immediate_update(enc->afmt); - - /* Select line number on which to send Audio InfoFrame packets */ - REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, - VBI_LINE_0 + 2); - - /* set HDMI GC AVMUTE */ - REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); -} - - - -static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) -{ - bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; - - two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple); - return two_pix; -} - -void enc314_stream_encoder_dp_blank( - struct dc_link *link, - struct stream_encoder *enc) -{ - enc1_stream_encoder_dp_blank(link, enc); - - /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ - if (enc->ctx->dc->debug.dig_fifo_off_in_blank) - enc314_disable_fifo(enc); -} - -void enc314_stream_encoder_dp_unblank( - struct dc_link *link, - struct stream_encoder *enc, - const struct encoder_unblank_param *param) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { - uint32_t n_vid = 0x8000; - uint32_t m_vid; - uint32_t n_multiply = 0; - uint32_t pix_per_cycle = 0; - uint64_t m_vid_l = n_vid; - - /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */ - if (is_two_pixels_per_containter(¶m->timing) || param->opp_cnt > 1) { - /*this logic should be the same in get_pixel_clock_parameters() */ - n_multiply = 1; - pix_per_cycle = 1; - } - /* M / N = Fstream / Flink - * m_vid / n_vid = pixel rate / link rate - */ - - m_vid_l *= param->timing.pix_clk_100hz / 10; - m_vid_l = div_u64(m_vid_l, - param->link_settings.link_rate - * LINK_RATE_REF_FREQ_IN_KHZ); - - m_vid = (uint32_t) m_vid_l; - - /* enable auto measurement */ - - REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); - - /* auto measurement need 1 full 0x8000 symbol cycle to kick in, - * therefore program initial value for Mvid and Nvid - */ - - REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); - - REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); - - REG_UPDATE_2(DP_VID_TIMING, - DP_VID_M_N_GEN_EN, 1, - DP_VID_N_MUL, n_multiply); - - REG_UPDATE(DP_PIXEL_FORMAT, - DP_PIXEL_PER_CYCLE_PROCESSING_MODE, - pix_per_cycle); - } - - /* make sure stream is disabled before resetting steer fifo */ - REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); - REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); - - /* DIG_START is removed from the register spec */ - - /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen - * that it overflows during mode transition, and sometimes doesn't recover. - */ - REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); - udelay(10); - - REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); - - /* wait 100us for DIG/DP logic to prime - * (i.e. a few video lines) - */ - udelay(100); - - /* the hardware would start sending video at the start of the next DP - * frame (i.e. rising edge of the vblank). - * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this - * register has no effect on enable transition! HW always guarantees - * VID_STREAM enable at start of next frame, and this is not - * programmable - */ - - REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); - - /* - * DIG Resync FIFO now needs to be explicitly enabled. - * This should come after DP_VID_STREAM_ENABLE per HW docs. - */ - enc314_enable_fifo(enc); - - link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); -} - -/* Set DSC-related configuration. - * dsc_mode: 0 disables DSC, other values enable DSC in specified format - * sc_bytes_per_pixel: DP_DSC_BYTES_PER_PIXEL removed in DCN32 - * dsc_slice_width: DP_DSC_SLICE_WIDTH removed in DCN32 - */ -void enc314_dp_set_dsc_config(struct stream_encoder *enc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1); -} - -/* this function read dsc related register fields to be logged later in dcn10_log_hw_state - * into a dcn_dsc_state struct. - */ -void enc314_read_state(struct stream_encoder *enc, struct enc_state *s) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - //if dsc is enabled, continue to read - REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode); - if (s->dsc_mode) { - REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num); - - REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference); - REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num); - - REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable); - REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); - } -} - -void enc314_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - // The naming of this field is confusing, what it means is the output mode of otg, which - // is the input mode of the dig - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0); -} - - -static const struct stream_encoder_funcs dcn314_str_enc_funcs = { - .dp_set_odm_combine = - enc314_dp_set_odm_combine, - .dp_set_stream_attribute = - enc2_stream_encoder_dp_set_stream_attribute, - .hdmi_set_stream_attribute = - enc314_stream_encoder_hdmi_set_stream_attribute, - .dvi_set_stream_attribute = - enc314_stream_encoder_dvi_set_stream_attribute, - .set_throttled_vcp_size = - enc1_stream_encoder_set_throttled_vcp_size, - .update_hdmi_info_packets = - enc3_stream_encoder_update_hdmi_info_packets, - .stop_hdmi_info_packets = - enc3_stream_encoder_stop_hdmi_info_packets, - .update_dp_info_packets_sdp_line_num = - enc3_stream_encoder_update_dp_info_packets_sdp_line_num, - .update_dp_info_packets = - enc3_stream_encoder_update_dp_info_packets, - .stop_dp_info_packets = - enc1_stream_encoder_stop_dp_info_packets, - .dp_blank = - enc314_stream_encoder_dp_blank, - .dp_unblank = - enc314_stream_encoder_dp_unblank, - .audio_mute_control = enc3_audio_mute_control, - - .dp_audio_setup = enc3_se_dp_audio_setup, - .dp_audio_enable = enc3_se_dp_audio_enable, - .dp_audio_disable = enc1_se_dp_audio_disable, - - .hdmi_audio_setup = enc3_se_hdmi_audio_setup, - .hdmi_audio_disable = enc1_se_hdmi_audio_disable, - .setup_stereo_sync = enc1_setup_stereo_sync, - .set_avmute = enc1_stream_encoder_set_avmute, - .dig_connect_to_otg = enc1_dig_connect_to_otg, - .dig_source_otg = enc1_dig_source_otg, - - .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, - - .enc_read_state = enc314_read_state, - .dp_set_dsc_config = enc314_dp_set_dsc_config, - .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, - .set_dynamic_metadata = enc2_set_dynamic_metadata, - .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, - - .enable_fifo = enc314_enable_fifo, - .disable_fifo = enc314_disable_fifo, - .set_input_mode = enc314_set_dig_input_mode, -}; - -void dcn314_dio_stream_encoder_construct( - struct dcn10_stream_encoder *enc1, - struct dc_context *ctx, - struct dc_bios *bp, - enum engine_id eng_id, - struct vpg *vpg, - struct afmt *afmt, - const struct dcn10_stream_enc_registers *regs, - const struct dcn10_stream_encoder_shift *se_shift, - const struct dcn10_stream_encoder_mask *se_mask) -{ - enc1->base.funcs = &dcn314_str_enc_funcs; - enc1->base.ctx = ctx; - enc1->base.id = eng_id; - enc1->base.bp = bp; - enc1->base.vpg = vpg; - enc1->base.afmt = afmt; - enc1->regs = regs; - enc1->se_shift = se_shift; - enc1->se_mask = se_mask; - enc1->base.stream_enc_inst = vpg->inst; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h deleted file mode 100644 index 86548be591be09..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h +++ /dev/null @@ -1,355 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DIO_STREAM_ENCODER_DCN314_H__ -#define __DC_DIO_STREAM_ENCODER_DCN314_H__ - -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "stream_encoder.h" -#include "dcn20/dcn20_stream_encoder.h" - -/* Register bit field name change */ -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa -#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe -#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf - -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L -#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L -#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L -#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L - - -#define SE_DCN314_REG_LIST(id)\ - SRI(AFMT_CNTL, DIG, id), \ - SRI(DIG_FE_CNTL, DIG, id), \ - SRI(HDMI_CONTROL, DIG, id), \ - SRI(HDMI_DB_CONTROL, DIG, id), \ - SRI(HDMI_GC, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ - SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \ - SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \ - SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \ - SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\ - SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\ - SRI(HDMI_ACR_32_0, DIG, id),\ - SRI(HDMI_ACR_32_1, DIG, id),\ - SRI(HDMI_ACR_44_0, DIG, id),\ - SRI(HDMI_ACR_44_1, DIG, id),\ - SRI(HDMI_ACR_48_0, DIG, id),\ - SRI(HDMI_ACR_48_1, DIG, id),\ - SRI(DP_DB_CNTL, DP, id), \ - SRI(DP_MSA_MISC, DP, id), \ - SRI(DP_MSA_VBID_MISC, DP, id), \ - SRI(DP_MSA_COLORIMETRY, DP, id), \ - SRI(DP_MSA_TIMING_PARAM1, DP, id), \ - SRI(DP_MSA_TIMING_PARAM2, DP, id), \ - SRI(DP_MSA_TIMING_PARAM3, DP, id), \ - SRI(DP_MSA_TIMING_PARAM4, DP, id), \ - SRI(DP_MSE_RATE_CNTL, DP, id), \ - SRI(DP_MSE_RATE_UPDATE, DP, id), \ - SRI(DP_PIXEL_FORMAT, DP, id), \ - SRI(DP_SEC_CNTL, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id), \ - SRI(DP_SEC_CNTL2, DP, id), \ - SRI(DP_SEC_CNTL5, DP, id), \ - SRI(DP_SEC_CNTL6, DP, id), \ - SRI(DP_STEER_FIFO, DP, id), \ - SRI(DP_VID_M, DP, id), \ - SRI(DP_VID_N, DP, id), \ - SRI(DP_VID_STREAM_CNTL, DP, id), \ - SRI(DP_VID_TIMING, DP, id), \ - SRI(DP_SEC_AUD_N, DP, id), \ - SRI(DP_SEC_TIMESTAMP, DP, id), \ - SRI(DP_DSC_CNTL, DP, id), \ - SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI(DP_SEC_FRAMING4, DP, id), \ - SRI(DP_GSP11_CNTL, DP, id), \ - SRI(DME_CONTROL, DME, id),\ - SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI(DIG_FE_CNTL, DIG, id), \ - SRI(DIG_CLOCK_PATTERN, DIG, id), \ - SRI(DIG_FIFO_CTRL0, DIG, id) - - -#define SE_COMMON_MASK_SH_LIST_DCN314(mask_sh)\ - SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\ - SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\ - SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\ - SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\ - SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\ - SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\ - SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\ - SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\ - SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\ - SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\ - SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\ - SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\ - SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\ - SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\ - SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\ - SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\ - SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\ - SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\ - SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\ - SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\ - SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\ - SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\ - SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, mask_sh),\ - SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\ - SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\ - SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ - SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ - SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh), \ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_SEND, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, mask_sh),\ - SE_SF(DP0_DP_DSC_CNTL, DP_DSC_MODE, mask_sh),\ - SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\ - SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\ - SE_SF(DME0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\ - SE_SF(DME0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\ - SE_SF(DME0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\ - SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\ - SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\ - SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\ - SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\ - SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\ - SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\ - SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\ - SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh) - -void dcn314_dio_stream_encoder_construct( - struct dcn10_stream_encoder *enc1, - struct dc_context *ctx, - struct dc_bios *bp, - enum engine_id eng_id, - struct vpg *vpg, - struct afmt *afmt, - const struct dcn10_stream_enc_registers *regs, - const struct dcn10_stream_encoder_shift *se_shift, - const struct dcn10_stream_encoder_mask *se_mask); - -void enc3_stream_encoder_update_hdmi_info_packets( - struct stream_encoder *enc, - const struct encoder_info_frame *info_frame); - -void enc3_stream_encoder_stop_hdmi_info_packets( - struct stream_encoder *enc); - -void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( - struct stream_encoder *enc, - struct encoder_info_frame *info_frame); - -void enc3_stream_encoder_update_dp_info_packets( - struct stream_encoder *enc, - const struct encoder_info_frame *info_frame); - -void enc3_audio_mute_control( - struct stream_encoder *enc, - bool mute); - -void enc3_se_dp_audio_setup( - struct stream_encoder *enc, - unsigned int az_inst, - struct audio_info *info); - -void enc3_se_dp_audio_enable( - struct stream_encoder *enc); - -void enc3_se_hdmi_audio_setup( - struct stream_encoder *enc, - unsigned int az_inst, - struct audio_info *info, - struct audio_crtc_info *audio_crtc_info); - -void enc3_dp_set_dsc_pps_info_packet( - struct stream_encoder *enc, - bool enable, - uint8_t *dsc_packed_pps, - bool immediate_update); - -void enc314_stream_encoder_dvi_set_stream_attribute( - struct stream_encoder *enc, - struct dc_crtc_timing *crtc_timing, - bool is_dual_link); - -void enc314_stream_encoder_hdmi_set_stream_attribute( - struct stream_encoder *enc, - struct dc_crtc_timing *crtc_timing, - int actual_pix_clk_khz, - bool enable_audio); - -void enc314_stream_encoder_dp_blank( - struct dc_link *link, - struct stream_encoder *enc); - -void enc314_stream_encoder_dp_unblank( - struct dc_link *link, - struct stream_encoder *enc, - const struct encoder_unblank_param *param); - -void enc314_reset_fifo(struct stream_encoder *enc, bool reset); - -void enc314_enable_fifo(struct stream_encoder *enc); - -void enc314_disable_fifo(struct stream_encoder *enc); - -void enc314_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container); - -void enc314_read_state(struct stream_encoder *enc, struct enc_state *s); - -void enc314_dp_set_odm_combine( - struct stream_encoder *enc, - bool odm_combine); - -void enc314_dp_set_dsc_config( - struct stream_encoder *enc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width); - -#endif /* __DC_DIO_STREAM_ENCODER_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile b/drivers/gpu/drm/amd/display/dc/dcn401/Makefile deleted file mode 100644 index ded1f3140beb7f..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved. - -DCN401 += dcn401_dio_link_encoder.o -DCN401 += dcn401_dio_stream_encoder.o -DCN401 += dcn401_mpc.o - -AMD_DAL_DCN401 = $(addprefix $(AMDDALPATH)/dc/dcn401/,$(DCN401)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN401) diff --git a/drivers/gpu/drm/amd/display/dc/dio/Makefile b/drivers/gpu/drm/amd/display/dc/dio/Makefile index 67840e474d7a53..0dfd480976f774 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dio/Makefile @@ -51,6 +51,15 @@ AMD_DAL_DIO_DCN30 = $(addprefix $(AMDDALPATH)/dc/dio/dcn30/,$(DIO_DCN30)) AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN30) +############################################################################### +# DCN301 +############################################################################### +DIO_DCN301 = dcn301_dio_link_encoder.o + +AMD_DAL_DIO_DCN301 = $(addprefix $(AMDDALPATH)/dc/dio/dcn301/,$(DIO_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN301) + ############################################################################### # DCN31 ############################################################################### @@ -60,6 +69,15 @@ AMD_DAL_DIO_DCN31 = $(addprefix $(AMDDALPATH)/dc/dio/dcn31/,$(DIO_DCN31)) AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN31) +############################################################################### +# DCN314 +############################################################################### +DIO_DCN314 = dcn314_dio_stream_encoder.o + +AMD_DAL_DIO_DCN314 = $(addprefix $(AMDDALPATH)/dc/dio/dcn314/,$(DIO_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN314) + ############################################################################### # DCN32 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c new file mode 100644 index 00000000000000..1b39a6e8a1ac5a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c @@ -0,0 +1,191 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include "core_types.h" +#include "link_encoder.h" +#include "dcn301_dio_link_encoder.h" +#include "stream_encoder.h" +#include "dc_bios_types.h" +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +static const struct link_encoder_funcs dcn301_link_enc_funcs = { + .read_state = link_enc2_read_state, + .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc3_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn20_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, + .disable_output = dcn10_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .fec_is_active = enc2_fec_is_active, + .get_dig_frontend = dcn10_get_dig_frontend, + .get_dig_mode = dcn10_get_dig_mode, + .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, +}; + +void dcn301_link_encoder_construct( + struct dcn20_link_encoder *enc20, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc20->enc10; + + enc10->base.funcs = &dcn301_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h new file mode 100644 index 00000000000000..49f8d91d495131 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h @@ -0,0 +1,82 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN301_H__ +#define __DC_LINK_ENCODER__DCN301_H__ + +#include "dcn20/dcn20_link_encoder.h" + + +#define LE_DCN301_REG_LIST(id)\ + SRI(DIG_BE_CNTL, DIG, id), \ + SRI(DIG_BE_EN_CNTL, DIG, id), \ + SRI(TMDS_CTL_BITS, DIG, id), \ + SRI(TMDS_DCBALANCER_CONTROL, DIG, id), \ + SRI(DP_CONFIG, DP, id), \ + SRI(DP_DPHY_CNTL, DP, id), \ + SRI(DP_DPHY_PRBS_CNTL, DP, id), \ + SRI(DP_DPHY_SCRAM_CNTL, DP, id),\ + SRI(DP_DPHY_SYM0, DP, id), \ + SRI(DP_DPHY_SYM1, DP, id), \ + SRI(DP_DPHY_SYM2, DP, id), \ + SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ + SRI(DP_LINK_CNTL, DP, id), \ + SRI(DP_LINK_FRAMING_CNTL, DP, id), \ + SRI(DP_MSE_SAT0, DP, id), \ + SRI(DP_MSE_SAT1, DP, id), \ + SRI(DP_MSE_SAT2, DP, id), \ + SRI(DP_MSE_SAT_UPDATE, DP, id), \ + SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_VID_STREAM_CNTL, DP, id), \ + SRI(DP_DPHY_FAST_TRAINING, DP, id), \ + SRI(DP_SEC_CNTL1, DP, id), \ + SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ + SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) + +#define LINK_ENCODER_MASK_SH_LIST_DCN301(mask_sh) \ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh) + +#define DPCS_DCN301_MASK_SH_LIST(mask_sh)\ + DPCS_DCN2_MASK_SH_LIST(mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_HDMI_FRL_MODE, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP_10_BIT, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh) + +void dcn301_link_encoder_construct( + struct dcn20_link_encoder *enc20, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +void enc3_hw_init(struct link_encoder *enc); + +#endif /* __DC_LINK_ENCODER__DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c new file mode 100644 index 00000000000000..5b343f745cf333 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dc_bios_types.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn314_dio_stream_encoder.h" +#include "reg_helper.h" +#include "hw_shared.h" +#include "link.h" +#include "dpcd_defs.h" + +#define DC_LOGGER \ + enc1->base.ctx->logger + +#define REG(reg)\ + (enc1->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc1->se_shift->field_name, enc1->se_mask->field_name + +#define VBI_LINE_0 0 +#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 + +#define CTX \ + enc1->base.ctx + +void enc314_reset_fifo(struct stream_encoder *enc, bool reset) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val = reset ? 1 : 0; + uint32_t is_symclk_on; + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); + REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + + if (is_symclk_on) + REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); + else + udelay(10); +} + +void enc314_enable_fifo(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + + enc314_reset_fifo(enc, true); + enc314_reset_fifo(enc, false); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); +} + +void enc314_disable_fifo(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); +} + +void enc314_dp_set_odm_combine( + struct stream_encoder *enc, + bool odm_combine) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, odm_combine); +} + +/* setup stream encoder in dvi mode */ +void enc314_stream_encoder_dvi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + bool is_dual_link) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + struct bp_encoder_control cntl = {0}; + + cntl.action = ENCODER_CONTROL_SETUP; + cntl.engine_id = enc1->base.id; + cntl.signal = is_dual_link ? + SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; + cntl.enable_dp_audio = false; + cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; + cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; + + if (enc1->base.bp->funcs->encoder_control( + enc1->base.bp, &cntl) != BP_RESULT_OK) + return; + + } else { + + //Set pattern for clock channel, default vlue 0x63 does not work + REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); + + //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup + + //DIG_SOURCE_SELECT is already set in dig_connect_to_otg + + enc314_enable_fifo(enc); + } + + ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); + ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); + enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); +} + +/* setup stream encoder in hdmi mode */ +void enc314_stream_encoder_hdmi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + int actual_pix_clk_khz, + bool enable_audio) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + struct bp_encoder_control cntl = {0}; + + cntl.action = ENCODER_CONTROL_SETUP; + cntl.engine_id = enc1->base.id; + cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; + cntl.enable_dp_audio = enable_audio; + cntl.pixel_clock = actual_pix_clk_khz; + cntl.lanes_number = LANE_COUNT_FOUR; + + if (enc1->base.bp->funcs->encoder_control( + enc1->base.bp, &cntl) != BP_RESULT_OK) + return; + + } else { + + //Set pattern for clock channel, default vlue 0x63 does not work + REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F); + + //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup + + //DIG_SOURCE_SELECT is already set in dig_connect_to_otg + + enc314_enable_fifo(enc); + } + + /* Configure pixel encoding */ + enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); + + /* setup HDMI engine */ + REG_UPDATE_6(HDMI_CONTROL, + HDMI_PACKET_GEN_VERSION, 1, + HDMI_KEEPOUT_MODE, 1, + HDMI_DEEP_COLOR_ENABLE, 0, + HDMI_DATA_SCRAMBLE_EN, 0, + HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1, + HDMI_CLOCK_CHANNEL_RATE, 0); + + /* Configure color depth */ + switch (crtc_timing->display_color_depth) { + case COLOR_DEPTH_888: + REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); + break; + case COLOR_DEPTH_101010: + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 1, + HDMI_DEEP_COLOR_ENABLE, 0); + } else { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 1, + HDMI_DEEP_COLOR_ENABLE, 1); + } + break; + case COLOR_DEPTH_121212: + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 2, + HDMI_DEEP_COLOR_ENABLE, 0); + } else { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 2, + HDMI_DEEP_COLOR_ENABLE, 1); + } + break; + case COLOR_DEPTH_161616: + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 3, + HDMI_DEEP_COLOR_ENABLE, 1); + break; + default: + break; + } + + if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { + /* enable HDMI data scrambler + * HDMI_CLOCK_CHANNEL_RATE_MORE_340M + * Clock channel frequency is 1/4 of character rate. + */ + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DATA_SCRAMBLE_EN, 1, + HDMI_CLOCK_CHANNEL_RATE, 1); + } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { + + /* TODO: New feature for DCE11, still need to implement */ + + /* enable HDMI data scrambler + * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE + * Clock channel frequency is the same + * as character rate + */ + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DATA_SCRAMBLE_EN, 1, + HDMI_CLOCK_CHANNEL_RATE, 0); + } + + + /* Enable transmission of General Control packet on every frame */ + REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, + HDMI_GC_CONT, 1, + HDMI_GC_SEND, 1, + HDMI_NULL_SEND, 1); + + /* Disable Audio Content Protection packet transmission */ + REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0); + + /* following belongs to audio */ + /* Enable Audio InfoFrame packet transmission. */ + REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); + + /* update double-buffered AUDIO_INFO registers immediately */ + ASSERT(enc->afmt); + enc->afmt->funcs->audio_info_immediate_update(enc->afmt); + + /* Select line number on which to send Audio InfoFrame packets */ + REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, + VBI_LINE_0 + 2); + + /* set HDMI GC AVMUTE */ + REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); +} + + + +static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; + + two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !timing->dsc_cfg.ycbcr422_simple); + return two_pix; +} + +void enc314_stream_encoder_dp_blank( + struct dc_link *link, + struct stream_encoder *enc) +{ + enc1_stream_encoder_dp_blank(link, enc); + + /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ + if (enc->ctx->dc->debug.dig_fifo_off_in_blank) + enc314_disable_fifo(enc); +} + +void enc314_stream_encoder_dp_unblank( + struct dc_link *link, + struct stream_encoder *enc, + const struct encoder_unblank_param *param) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { + uint32_t n_vid = 0x8000; + uint32_t m_vid; + uint32_t n_multiply = 0; + uint32_t pix_per_cycle = 0; + uint64_t m_vid_l = n_vid; + + /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */ + if (is_two_pixels_per_containter(¶m->timing) || param->opp_cnt > 1) { + /*this logic should be the same in get_pixel_clock_parameters() */ + n_multiply = 1; + pix_per_cycle = 1; + } + /* M / N = Fstream / Flink + * m_vid / n_vid = pixel rate / link rate + */ + + m_vid_l *= param->timing.pix_clk_100hz / 10; + m_vid_l = div_u64(m_vid_l, + param->link_settings.link_rate + * LINK_RATE_REF_FREQ_IN_KHZ); + + m_vid = (uint32_t) m_vid_l; + + /* enable auto measurement */ + + REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); + + /* auto measurement need 1 full 0x8000 symbol cycle to kick in, + * therefore program initial value for Mvid and Nvid + */ + + REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); + + REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); + + REG_UPDATE_2(DP_VID_TIMING, + DP_VID_M_N_GEN_EN, 1, + DP_VID_N_MUL, n_multiply); + + REG_UPDATE(DP_PIXEL_FORMAT, + DP_PIXEL_PER_CYCLE_PROCESSING_MODE, + pix_per_cycle); + } + + /* make sure stream is disabled before resetting steer fifo */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + + /* DIG_START is removed from the register spec */ + + /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen + * that it overflows during mode transition, and sometimes doesn't recover. + */ + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); + udelay(10); + + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); + + /* wait 100us for DIG/DP logic to prime + * (i.e. a few video lines) + */ + udelay(100); + + /* the hardware would start sending video at the start of the next DP + * frame (i.e. rising edge of the vblank). + * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this + * register has no effect on enable transition! HW always guarantees + * VID_STREAM enable at start of next frame, and this is not + * programmable + */ + + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + + /* + * DIG Resync FIFO now needs to be explicitly enabled. + * This should come after DP_VID_STREAM_ENABLE per HW docs. + */ + enc314_enable_fifo(enc); + + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); +} + +/* Set DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + * sc_bytes_per_pixel: DP_DSC_BYTES_PER_PIXEL removed in DCN32 + * dsc_slice_width: DP_DSC_SLICE_WIDTH removed in DCN32 + */ +void enc314_dp_set_dsc_config(struct stream_encoder *enc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1); +} + +/* this function read dsc related register fields to be logged later in dcn10_log_hw_state + * into a dcn_dsc_state struct. + */ +void enc314_read_state(struct stream_encoder *enc, struct enc_state *s) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + //if dsc is enabled, continue to read + REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode); + if (s->dsc_mode) { + REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num); + + REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference); + REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num); + + REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable); + REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); + } +} + +void enc314_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + // The naming of this field is confusing, what it means is the output mode of otg, which + // is the input mode of the dig + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0); +} + + +static const struct stream_encoder_funcs dcn314_str_enc_funcs = { + .dp_set_odm_combine = + enc314_dp_set_odm_combine, + .dp_set_stream_attribute = + enc2_stream_encoder_dp_set_stream_attribute, + .hdmi_set_stream_attribute = + enc314_stream_encoder_hdmi_set_stream_attribute, + .dvi_set_stream_attribute = + enc314_stream_encoder_dvi_set_stream_attribute, + .set_throttled_vcp_size = + enc1_stream_encoder_set_throttled_vcp_size, + .update_hdmi_info_packets = + enc3_stream_encoder_update_hdmi_info_packets, + .stop_hdmi_info_packets = + enc3_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets_sdp_line_num = + enc3_stream_encoder_update_dp_info_packets_sdp_line_num, + .update_dp_info_packets = + enc3_stream_encoder_update_dp_info_packets, + .stop_dp_info_packets = + enc1_stream_encoder_stop_dp_info_packets, + .dp_blank = + enc314_stream_encoder_dp_blank, + .dp_unblank = + enc314_stream_encoder_dp_unblank, + .audio_mute_control = enc3_audio_mute_control, + + .dp_audio_setup = enc3_se_dp_audio_setup, + .dp_audio_enable = enc3_se_dp_audio_enable, + .dp_audio_disable = enc1_se_dp_audio_disable, + + .hdmi_audio_setup = enc3_se_hdmi_audio_setup, + .hdmi_audio_disable = enc1_se_hdmi_audio_disable, + .setup_stereo_sync = enc1_setup_stereo_sync, + .set_avmute = enc1_stream_encoder_set_avmute, + .dig_connect_to_otg = enc1_dig_connect_to_otg, + .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, + + .enc_read_state = enc314_read_state, + .dp_set_dsc_config = enc314_dp_set_dsc_config, + .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, + .set_dynamic_metadata = enc2_set_dynamic_metadata, + .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, + + .enable_fifo = enc314_enable_fifo, + .disable_fifo = enc314_disable_fifo, + .set_input_mode = enc314_set_dig_input_mode, +}; + +void dcn314_dio_stream_encoder_construct( + struct dcn10_stream_encoder *enc1, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id, + struct vpg *vpg, + struct afmt *afmt, + const struct dcn10_stream_enc_registers *regs, + const struct dcn10_stream_encoder_shift *se_shift, + const struct dcn10_stream_encoder_mask *se_mask) +{ + enc1->base.funcs = &dcn314_str_enc_funcs; + enc1->base.ctx = ctx; + enc1->base.id = eng_id; + enc1->base.bp = bp; + enc1->base.vpg = vpg; + enc1->base.afmt = afmt; + enc1->regs = regs; + enc1->se_shift = se_shift; + enc1->se_mask = se_mask; + enc1->base.stream_enc_inst = vpg->inst; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h new file mode 100644 index 00000000000000..86548be591be09 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DIO_STREAM_ENCODER_DCN314_H__ +#define __DC_DIO_STREAM_ENCODER_DCN314_H__ + +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "stream_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" + +/* Register bit field name change */ +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf + +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L + + +#define SE_DCN314_REG_LIST(id)\ + SRI(AFMT_CNTL, DIG, id), \ + SRI(DIG_FE_CNTL, DIG, id), \ + SRI(HDMI_CONTROL, DIG, id), \ + SRI(HDMI_DB_CONTROL, DIG, id), \ + SRI(HDMI_GC, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ + SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \ + SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \ + SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \ + SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\ + SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\ + SRI(HDMI_ACR_32_0, DIG, id),\ + SRI(HDMI_ACR_32_1, DIG, id),\ + SRI(HDMI_ACR_44_0, DIG, id),\ + SRI(HDMI_ACR_44_1, DIG, id),\ + SRI(HDMI_ACR_48_0, DIG, id),\ + SRI(HDMI_ACR_48_1, DIG, id),\ + SRI(DP_DB_CNTL, DP, id), \ + SRI(DP_MSA_MISC, DP, id), \ + SRI(DP_MSA_VBID_MISC, DP, id), \ + SRI(DP_MSA_COLORIMETRY, DP, id), \ + SRI(DP_MSA_TIMING_PARAM1, DP, id), \ + SRI(DP_MSA_TIMING_PARAM2, DP, id), \ + SRI(DP_MSA_TIMING_PARAM3, DP, id), \ + SRI(DP_MSA_TIMING_PARAM4, DP, id), \ + SRI(DP_MSE_RATE_CNTL, DP, id), \ + SRI(DP_MSE_RATE_UPDATE, DP, id), \ + SRI(DP_PIXEL_FORMAT, DP, id), \ + SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_SEC_CNTL1, DP, id), \ + SRI(DP_SEC_CNTL2, DP, id), \ + SRI(DP_SEC_CNTL5, DP, id), \ + SRI(DP_SEC_CNTL6, DP, id), \ + SRI(DP_STEER_FIFO, DP, id), \ + SRI(DP_VID_M, DP, id), \ + SRI(DP_VID_N, DP, id), \ + SRI(DP_VID_STREAM_CNTL, DP, id), \ + SRI(DP_VID_TIMING, DP, id), \ + SRI(DP_SEC_AUD_N, DP, id), \ + SRI(DP_SEC_TIMESTAMP, DP, id), \ + SRI(DP_DSC_CNTL, DP, id), \ + SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI(DP_SEC_FRAMING4, DP, id), \ + SRI(DP_GSP11_CNTL, DP, id), \ + SRI(DME_CONTROL, DME, id),\ + SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI(DIG_FE_CNTL, DIG, id), \ + SRI(DIG_CLOCK_PATTERN, DIG, id), \ + SRI(DIG_FIFO_CTRL0, DIG, id) + + +#define SE_COMMON_MASK_SH_LIST_DCN314(mask_sh)\ + SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\ + SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\ + SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\ + SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\ + SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\ + SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\ + SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\ + SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\ + SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\ + SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\ + SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\ + SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\ + SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, mask_sh),\ + SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\ + SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ + SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ + SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh), \ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, mask_sh),\ + SE_SF(DP0_DP_DSC_CNTL, DP_DSC_MODE, mask_sh),\ + SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\ + SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\ + SE_SF(DME0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\ + SE_SF(DME0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\ + SE_SF(DME0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\ + SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\ + SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\ + SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\ + SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\ + SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\ + SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh) + +void dcn314_dio_stream_encoder_construct( + struct dcn10_stream_encoder *enc1, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id, + struct vpg *vpg, + struct afmt *afmt, + const struct dcn10_stream_enc_registers *regs, + const struct dcn10_stream_encoder_shift *se_shift, + const struct dcn10_stream_encoder_mask *se_mask); + +void enc3_stream_encoder_update_hdmi_info_packets( + struct stream_encoder *enc, + const struct encoder_info_frame *info_frame); + +void enc3_stream_encoder_stop_hdmi_info_packets( + struct stream_encoder *enc); + +void enc3_stream_encoder_update_dp_info_packets_sdp_line_num( + struct stream_encoder *enc, + struct encoder_info_frame *info_frame); + +void enc3_stream_encoder_update_dp_info_packets( + struct stream_encoder *enc, + const struct encoder_info_frame *info_frame); + +void enc3_audio_mute_control( + struct stream_encoder *enc, + bool mute); + +void enc3_se_dp_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info); + +void enc3_se_dp_audio_enable( + struct stream_encoder *enc); + +void enc3_se_hdmi_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info, + struct audio_crtc_info *audio_crtc_info); + +void enc3_dp_set_dsc_pps_info_packet( + struct stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update); + +void enc314_stream_encoder_dvi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + bool is_dual_link); + +void enc314_stream_encoder_hdmi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + int actual_pix_clk_khz, + bool enable_audio); + +void enc314_stream_encoder_dp_blank( + struct dc_link *link, + struct stream_encoder *enc); + +void enc314_stream_encoder_dp_unblank( + struct dc_link *link, + struct stream_encoder *enc, + const struct encoder_unblank_param *param); + +void enc314_reset_fifo(struct stream_encoder *enc, bool reset); + +void enc314_enable_fifo(struct stream_encoder *enc); + +void enc314_disable_fifo(struct stream_encoder *enc); + +void enc314_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container); + +void enc314_read_state(struct stream_encoder *enc, struct enc_state *s); + +void enc314_dp_set_odm_combine( + struct stream_encoder *enc, + bool odm_combine); + +void enc314_dp_set_dsc_config( + struct stream_encoder *enc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width); + +#endif /* __DC_DIO_STREAM_ENCODER_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c index 05783daa62ac2a..2ed382a8e79c66 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c @@ -23,7 +23,6 @@ * */ - #include "reg_helper.h" #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c index 6a179e5ab41740..6ab2a218b7694e 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c @@ -22,7 +22,6 @@ * */ - #include "dc_bios_types.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn314/dcn314_dio_stream_encoder.h" @@ -392,6 +391,14 @@ static void enc35_reset_fifo(struct stream_encoder *enc, bool reset) udelay(10); } +static bool enc35_is_fifo_enabled(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val; + + REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val); + return (reset_val == 0) ? false : true; +} void enc35_disable_fifo(struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -415,6 +422,24 @@ void enc35_enable_fifo(struct stream_encoder *enc) REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); } +static uint32_t enc35_get_pixels_per_cycle(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t value; + + REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, &value); + + switch (value) { + case 0: + return 1; + case 1: + return 2; + default: + ASSERT_CRITICAL(false); + return 1; + } +} + static const struct stream_encoder_funcs dcn35_str_enc_funcs = { .dp_set_odm_combine = enc314_dp_set_odm_combine, @@ -465,7 +490,9 @@ static const struct stream_encoder_funcs dcn35_str_enc_funcs = { .set_input_mode = enc314_set_dig_input_mode, .enable_fifo = enc35_enable_fifo, .disable_fifo = enc35_disable_fifo, + .is_fifo_enabled = enc35_is_fifo_enabled, .map_stream_to_link = enc35_stream_encoder_map_to_link, + .get_pixels_per_cycle = enc35_get_pixels_per_cycle, }; void dcn35_dio_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 34adae7ab6e86d..2e4a46f1b499d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -210,4 +210,7 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link); enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid); +bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream); +bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream); + #endif /* __DM_HELPERS__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 8a8efe408a9d92..e9fea9c2162e80 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1132,7 +1132,8 @@ static void dcn20_adjust_freesync_v_startup( patched_crtc_timing.v_addressable - patched_crtc_timing.v_border_top; - newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + /* The newVStartUp is 1 line before vsync point */ + newVstartup = asic_blank_end + 1; *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); } @@ -1562,6 +1563,8 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256; pipes[pipe_cnt].pipe.src.source_format = dm_444_32; + pipes[pipe_cnt].pipe.src.cur0_src_width = 0; + pipes[pipe_cnt].pipe.src.cur1_src_width = 0; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 7c56ad0f881224..4fce64a030b60a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) @@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, if (swath_height_c > 0) log2_swath_height_c = dml_log2(swath_height_c); - - if (req128_c && log2_swath_height_c > 0) - log2_swath_height_c -= 1; } rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 3d95bfa5aca23b..3fa9a5da02f6a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) @@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, if (swath_height_c > 0) log2_swath_height_c = dml_log2(swath_height_c); - - if (req128_c && log2_swath_height_c > 0) - log2_swath_height_c -= 1; } rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 98502a4f056726..9e1c18b90805d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -53,7 +53,7 @@ static void calculate_ttu_cursor( static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 0b132ce1d2cdcd..2b275e6803797a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1924,15 +1924,6 @@ static unsigned int CalculateVMAndRowBytes( *PixelPTEReqWidth = 32768.0 / BytePerPixel; *PTERequestSize = 64; FractionOfPTEReturnDrop = 0; - } else if (MacroTileSizeBytes == 4096) { - PixelPTEReqHeightPTEs = 1; - *PixelPTEReqHeight = MacroTileHeight; - *PixelPTEReqWidth = 8 * *MacroTileWidth; - *PTERequestSize = 64; - if (ScanDirection != dm_vert) - FractionOfPTEReturnDrop = 0; - else - FractionOfPTEReturnDrop = 7.0 / 8; } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) { PixelPTEReqHeightPTEs = 16; *PixelPTEReqHeight = 16 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9d399c4ce957d5..6f490d8d7038c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -160,8 +160,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, .pct_ideal_dram_bw_after_urgent_strobe = 67.0, .max_avg_sdp_bw_use_normal_percent = 80.0, .max_avg_fabric_bw_use_normal_percent = 60.0, @@ -871,8 +871,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, * and the max of (VBLANK blanking time, MALL region)). */ - if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && - subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) + if (drr_timing && + stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && + subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) schedulable = true; return schedulable; @@ -937,7 +938,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } - if (found) { + if (found && subvp_pipe) { phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; phantom_timing = &phantom_stream->timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 4297402bdab393..8839faf42207b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -139,8 +139,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, .pct_ideal_dram_bw_after_urgent_strobe = 67.0, .max_avg_sdp_bw_use_normal_percent = 80.0, .max_avg_fabric_bw_use_normal_percent = 60.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 410e4b67122810..641a8cd019cd52 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -523,6 +523,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_offset; unsigned int vupdate_width; unsigned int vready_offset; + unsigned int pstate_keepout; unsigned char interlaced; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index dae13f202220e3..d8bfc85e5dcd0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -39,7 +39,7 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index fea857214c0fc7..c4378e620cbf91 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -35,8 +35,6 @@ frame_warn_flag := -Wframe-larger-than=2048 endif endif -# DRIVER_BUILD is mostly used in DML2.1 source -subdir-ccflags-y += -DDRIVER_BUILD=1 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/ @@ -81,13 +79,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) @@ -104,13 +100,11 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o : CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) @@ -126,13 +120,11 @@ DML21 += src/inc/dml2_debug.o DML21 += src/dml2_core/dml2_core_dcn4.o DML21 += src/dml2_core/dml2_core_factory.o DML21 += src/dml2_core/dml2_core_dcn4_calcs.o -DML21 += src/dml2_core/dml2_core_shared.o DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o DML21 += src/dml2_dpmm/dml2_dpmm_factory.o DML21 += src/dml2_mcg/dml2_mcg_dcn4.o DML21 += src/dml2_mcg/dml2_mcg_factory.o DML21 += src/dml2_pmo/dml2_pmo_dcn3.o -DML21 += src/dml2_pmo/dml2_pmo_dcn4.o DML21 += src/dml2_pmo/dml2_pmo_factory.o DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o DML21 += src/dml2_standalone_libraries/lib_float_math.o diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 547dfcc80fde47..d851c081e3768a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -8926,7 +8926,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc // The prefetch scheduling should only be calculated once as per AllowForPStateChangeOrStutterInVBlank requirement // If the AllowForPStateChangeOrStutterInVBlank requirement is not strict (i.e. only try those power saving feature - // if possible, then will try to program for the best power saving features in order of diffculty (dram, fclk, stutter) + // if possible, then will try to program for the best power saving features in order of difficulty (dram, fclk, stutter) s->iteration = 0; s->MaxTotalRDBandwidth = 0; s->AllPrefetchModeTested = false; @@ -9977,7 +9977,7 @@ void dml_core_get_row_heights( dml_print("DML_DLG: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes); #endif - // just suppluy with enough parameters to calculate meta and dte + // just supply with enough parameters to calculate meta and dte CalculateVMAndRowBytes( 0, // dml_bool_t ViewportStationary, 1, // dml_bool_t DCCEnable, @@ -10110,7 +10110,7 @@ dml_bool_t dml_mode_support( /// Note: In this function, it is assumed that DCFCLK, SOCCLK freq are the state values, and mode_program will just use the DML calculated DPPCLK and DISPCLK /// @param mode_lib mode_lib data struct that house all the input/output/bbox and calculation values. /// @param state_idx Power state idx chosen -/// @param display_cfg Display Congiuration +/// @param display_cfg Display Configuration /// @param call_standalone Calling mode_programming without calling mode support. Some of the "support" struct member will be pre-calculated before doing mode programming /// TODO: Add clk_cfg input, could be useful for standalone mode dml_bool_t dml_mode_programming( diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 06387b8b0aee5e..8697eac1e1f7e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -31,7 +31,7 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_ else soc_bb = &dml2_socbb_dcn401; - qos_params = &dml_dcn401_soc_qos_params; + qos_params = &dml_dcn4_variant_a_soc_qos_params; } /* patch soc bb */ @@ -516,7 +516,7 @@ static void populate_dml21_stream_overrides_from_stream_state( if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy || stream->debug.force_odm_combine_segments > 0) stream_desc->overrides.disable_dynamic_odm = true; - stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp; + stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req; } static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode) @@ -725,18 +725,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm const struct scaler_data *scaler_data = get_scaler_data_for_plane(dml_ctx, plane_state, context); struct dc_stream_state *stream = context->streams[stream_index]; - if (stream->cursor_attributes.color_format == CURSOR_MODE_MONO) - plane->cursor.cursor_bpp = 2; - else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_1BIT_AND - || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA - || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - plane->cursor.cursor_bpp = 32; - } else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED - || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED) { - plane->cursor.cursor_bpp = 64; - } else - plane->cursor.cursor_bpp = 32; - + plane->cursor.cursor_bpp = 32; plane->cursor.cursor_width = 256; plane->cursor.num_cursors = 1; @@ -788,6 +777,14 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm * certain cases. Hence do corrective active and disable scaling. */ plane->composition.scaler_info.enabled = false; + } else if ((plane_state->ctx->dc->config.use_spl == true) && + (plane->composition.scaler_info.enabled == false)) { + /* To enable sharpener for 1:1, scaler must be enabled. If use_spl is set, then + * allow case where ratio is 1 but taps > 1 + */ + if ((scaler_data->taps.h_taps > 1) || (scaler_data->taps.v_taps > 1) || + (scaler_data->taps.h_taps_c > 1) || (scaler_data->taps.v_taps_c > 1)) + plane->composition.scaler_info.enabled = true; } /* always_scale is only used for debug purposes not used in production but has to be @@ -827,6 +824,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { plane->tdlut.setup_for_tdlut = true; + switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) { case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: @@ -836,6 +834,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear; break; } + switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) { case DC_CM2_GPU_MEM_SIZE_171717: plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; @@ -844,8 +843,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined break; } - } else - plane->tdlut.setup_for_tdlut = false; + } + plane->tdlut.setup_for_tdlut |= dml_ctx->config.force_tdlut_enable; plane->dynamic_meta_data.enable = false; plane->dynamic_meta_data.lines_before_active_required = 0; @@ -859,7 +858,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->immediate_flip = plane_state->flip_immediate; - plane->composition.rect_out_height_spans_vactive = plane_state->dst_rect.height >= stream->timing.v_addressable; + plane->composition.rect_out_height_spans_vactive = + plane_state->dst_rect.height >= stream->timing.v_addressable && + stream->dst.height >= stream->timing.v_addressable; } //TODO : Could be possibly moved to a common helper layer. @@ -949,6 +950,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s int stream_index, plane_index; int disp_cfg_stream_location, disp_cfg_plane_location; struct dml2_display_cfg *dml_dispcfg = &dml_ctx->v21.display_config; + unsigned int plane_count = 0; memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); @@ -958,6 +960,11 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->minimize_det_reallocation = true; dml_dispcfg->overrides.enable_subvp_implicit_pmo = true; + if (in_dc->debug.disable_unbounded_requesting) { + dml_dispcfg->overrides.hw.force_unbounded_requesting.enable = true; + dml_dispcfg->overrides.hw.force_unbounded_requesting.value = false; + } + for (stream_index = 0; stream_index < context->stream_count; stream_index++) { disp_cfg_stream_location = map_stream_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]); @@ -1002,33 +1009,39 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].overrides.uclk_pstate_change_strategy = dml21_force_pstate_method_to_uclk_state_change_strategy(dml_ctx->config.pmo.force_pstate_method_values[stream_index]); } + + plane_count++; } } } + if (plane_count == 0) { + dml_dispcfg->overrides.all_streams_blanked = true; + } + return true; } void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context) { /* TODO these should be the max of active, svp prefetch and idle should be tracked seperately */ - context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dispclk_khz; - context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.dcfclk_khz; - context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.uclk_khz; - context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.fclk_khz; - context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.uclk_khz; - context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.fclk_khz; - context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.deepsleep_dcfclk_khz; + context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dispclk_khz; + context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.dcfclk_khz; + context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.uclk_khz; + context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.fclk_khz; + context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.uclk_khz; + context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.fclk_khz; + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.deepsleep_dcfclk_khz; context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = in_ctx->v21.mode_programming.programming->fclk_pstate_supported; context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported; - context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz > 0; - context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz; + context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0; + context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz; } void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx) { struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib; - double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;; + double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz; if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) { /* invalid register set index */ @@ -1053,16 +1066,16 @@ static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_wat switch (wm_index) { case DML2_DCHUB_WATERMARK_SET_A: - wm_regs = &watermarks->dcn4.a; + wm_regs = &watermarks->dcn4x.a; break; case DML2_DCHUB_WATERMARK_SET_B: - wm_regs = &watermarks->dcn4.b; + wm_regs = &watermarks->dcn4x.b; break; case DML2_DCHUB_WATERMARK_SET_C: - wm_regs = &watermarks->dcn4.c; + wm_regs = &watermarks->dcn4x.c; break; case DML2_DCHUB_WATERMARK_SET_D: - wm_regs = &watermarks->dcn4.d; + wm_regs = &watermarks->dcn4x.d; break; case DML2_DCHUB_WATERMARK_SET_NUM: default: @@ -1110,10 +1123,11 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_ global_sync = &stream_programming->phantom_stream.global_sync; } - pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4.vstartup_lines; - pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4.vupdate_offset_pixels; - pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4.vupdate_vupdate_width_pixels; - pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4.vready_offset_pixels; + pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines; + pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels; + pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels; + pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels; + pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines; pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst; @@ -1164,3 +1178,37 @@ void dml21_get_pipe_mcache_config( mcache_pipe_config->plane1_enabled = dml21_is_plane1_enabled(pln_prog->plane_descriptor->pixel_format); } + +void dml21_set_dc_p_state_type( + struct pipe_ctx *pipe_ctx, + struct dml2_per_stream_programming *stream_programming, + bool sub_vp_enabled) +{ + switch (stream_programming->uclk_pstate_method) { + case dml2_uclk_pstate_support_method_vactive: + case dml2_uclk_pstate_support_method_fw_vactive_drr: + pipe_ctx->p_state_type = P_STATE_V_ACTIVE; + break; + case dml2_uclk_pstate_support_method_vblank: + case dml2_uclk_pstate_support_method_fw_vblank_drr: + if (sub_vp_enabled) + pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP; + else + pipe_ctx->p_state_type = P_STATE_V_BLANK; + break; + case dml2_uclk_pstate_support_method_fw_subvp_phantom: + case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr: + pipe_ctx->p_state_type = P_STATE_SUB_VP; + break; + case dml2_uclk_pstate_support_method_fw_drr: + if (sub_vp_enabled) + pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP; + else + pipe_ctx->p_state_type = P_STATE_FPO; + break; + default: + pipe_ctx->p_state_type = P_STATE_UNKNOWN; + break; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 4cc0a1fbb93d7e..476a7f6e48757b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -26,4 +26,5 @@ void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_water void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx); void dml21_map_hw_resources(struct dml2_context *dml_ctx); void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config); +void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c index d276458e50fdef..51d491bffa3241 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c @@ -11,7 +11,6 @@ #include "dml2_core_dcn4_calcs.h" - int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id) { int i; @@ -280,6 +279,23 @@ bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx) dc_is_dp_signal(pipe_ctx->stream->signal)); } + +static bool is_sub_vp_enabled(struct dc *dc, struct dc_state *context) +{ + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_paired_subvp_stream(context, pipe_ctx->stream) && + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { + return true; + } + } + return false; +} + + void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_per_stream_programming *stream_prog) { @@ -310,12 +326,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64; } - pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4.dppclk_khz; + pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4x.dppclk_khz; if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipe_ctx->plane_res.bw.dppclk_khz) context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz; dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx); memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation)); + + bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context); + + dml21_set_dc_p_state_type(pipe_ctx, stream_prog, sub_vp_enabled); } static struct dc_stream_state *dml21_add_phantom_stream(struct dml2_context *dml_ctx, @@ -459,94 +479,103 @@ void dml21_build_fams2_programming(const struct dc *dc, struct dml2_context *dml_ctx) { int i, j, k; + unsigned int num_fams2_streams = 0; /* reset fams2 data */ - context->bw_ctx.bw.dcn.fams2_stream_count = 0; memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES); + memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config)); - if (!dml_ctx->v21.mode_programming.programming->fams2_required) - return; + if (dml_ctx->v21.mode_programming.programming->fams2_required) { + for (i = 0; i < context->stream_count; i++) { + int dml_stream_idx; + struct dc_stream_state *phantom_stream; + struct dc_stream_status *phantom_status; - for (i = 0; i < context->stream_count; i++) { - int dml_stream_idx; - struct dc_stream_state *phantom_stream; - struct dc_stream_status *phantom_status; - - struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[context->bw_ctx.bw.dcn.fams2_stream_count]; - - struct dc_stream_state *stream = context->streams[i]; - - if (context->stream_status[i].plane_count == 0 || - dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) { - /* can ignore blanked or phantom streams */ - continue; - } + struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams]; - dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id); - if (dml_stream_idx < 0) { - ASSERT(dml_stream_idx >= 0); - continue; - } + struct dc_stream_state *stream = context->streams[i]; - /* copy static state from PMO */ - memcpy(static_state, - &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params, - sizeof(struct dmub_fams2_stream_static_state)); - - /* get information from context */ - static_state->num_planes = context->stream_status[i].plane_count; - static_state->otg_inst = context->stream_status[i].primary_otg_inst; - - /* populate pipe masks for planes */ - for (j = 0; j < context->stream_status[i].plane_count; j++) { - for (k = 0; k < dc->res_pool->pipe_count; k++) { - if (context->res_ctx.pipe_ctx[k].stream && - context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id && - context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) { - static_state->pipe_mask |= (1 << k); - static_state->plane_pipe_masks[j] |= (1 << k); - } + if (context->stream_status[i].plane_count == 0 || + dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) { + /* can ignore blanked or phantom streams */ + continue; } - } - /* get per method programming */ - switch (static_state->type) { - case FAMS2_STREAM_TYPE_VBLANK: - case FAMS2_STREAM_TYPE_VACTIVE: - case FAMS2_STREAM_TYPE_DRR: - break; - case FAMS2_STREAM_TYPE_SUBVP: - phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream); - if (!phantom_stream) - break; + dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id); + if (dml_stream_idx < 0) { + ASSERT(dml_stream_idx >= 0); + continue; + } - phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream); + /* copy static state from PMO */ + memcpy(static_state, + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params, + sizeof(struct dmub_fams2_stream_static_state)); - /* phantom status should always be present */ - ASSERT(phantom_status); - static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst; + /* get information from context */ + static_state->num_planes = context->stream_status[i].plane_count; + static_state->otg_inst = context->stream_status[i].primary_otg_inst; - /* populate pipe masks for phantom planes */ - for (j = 0; j < phantom_status->plane_count; j++) { + /* populate pipe masks for planes */ + for (j = 0; j < context->stream_status[i].plane_count; j++) { for (k = 0; k < dc->res_pool->pipe_count; k++) { if (context->res_ctx.pipe_ctx[k].stream && - context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id && - context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) { - static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k); - static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k); + context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id && + context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) { + static_state->pipe_mask |= (1 << k); + static_state->plane_pipe_masks[j] |= (1 << k); + } + } + } + + /* get per method programming */ + switch (static_state->type) { + case FAMS2_STREAM_TYPE_VBLANK: + case FAMS2_STREAM_TYPE_VACTIVE: + case FAMS2_STREAM_TYPE_DRR: + break; + case FAMS2_STREAM_TYPE_SUBVP: + phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream); + if (!phantom_stream) + break; + + phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream); + + /* phantom status should always be present */ + ASSERT(phantom_status); + static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst; + + /* populate pipe masks for phantom planes */ + for (j = 0; j < phantom_status->plane_count; j++) { + for (k = 0; k < dc->res_pool->pipe_count; k++) { + if (context->res_ctx.pipe_ctx[k].stream && + context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id && + context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) { + static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k); + static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k); + } } } + break; + default: + ASSERT(false); + break; } - break; - default: - ASSERT(false); - break; + + num_fams2_streams++; } + } + + if (num_fams2_streams > 0) { + /* copy FAMS2 configuration */ + memcpy(&context->bw_ctx.bw.dcn.fams2_global_config, + &dml_ctx->v21.mode_programming.programming->fams2_global_config, + sizeof(struct dmub_cmd_fams2_global_config)); - context->bw_ctx.bw.dcn.fams2_stream_count++; + context->bw_ctx.bw.dcn.fams2_global_config.num_streams = num_fams2_streams; } - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_stream_count > 0; + context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; } bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 41ecf00ed19659..d35dd507cb9f85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -66,7 +66,9 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex disable_fams2; pmo_options->disable_fams2 = disable_fams2; - pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming; + pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || + in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; + pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h index 521f77b8ac445e..d82c681a540286 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h @@ -72,7 +72,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = { .scaling_factor_mhz = 0, }, .qos_params = { - .dcn4 = { + .dcn4x = { .df_qos_response_time_fclk_cycles = 300, .max_round_trip_to_furthest_cs_fclk_cycles = 350, .mall_overhead_fclk_cycles = 50, @@ -128,7 +128,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = { }, }, }, - .qos_type = dml2_qos_param_type_dcn4, + .qos_type = dml2_qos_param_type_dcn4x, }; static const struct dml2_soc_bb dml2_socbb_dcn31 = { @@ -228,7 +228,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = { .scaling_factor_mhz = 0, }, .qos_params = { - .dcn4 = { + .dcn4x = { .df_qos_response_time_fclk_cycles = 300, .max_round_trip_to_furthest_cs_fclk_cycles = 350, .mall_overhead_fclk_cycles = 50, @@ -332,7 +332,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = { }, }, }, - .qos_type = dml2_qos_param_type_dcn4, + .qos_type = dml2_qos_param_type_dcn4x, }, .power_management_parameters = { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h index fe07fcc3d0d551..8ef7977841de00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h @@ -8,7 +8,7 @@ #include "dml_top_soc_parameter_types.h" -static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = { +static const struct dml2_soc_qos_parameters dml_dcn4_variant_a_soc_qos_params = { .derate_table = { .system_active_urgent = { .dram_derate_percent_pixel = 22, @@ -52,7 +52,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = { .scaling_factor_mhz = 0, }, .qos_params = { - .dcn4 = { + .dcn4x = { .df_qos_response_time_fclk_cycles = 300, .max_round_trip_to_furthest_cs_fclk_cycles = 350, .mall_overhead_fclk_cycles = 50, @@ -78,7 +78,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = { }, }, }, - .qos_type = dml2_qos_param_type_dcn4, + .qos_type = dml2_qos_param_type_dcn4x, }; static const struct dml2_soc_bb dml2_socbb_dcn401 = { @@ -178,7 +178,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = { .scaling_factor_mhz = 0, }, .qos_params = { - .dcn4 = { + .dcn4x = { .df_qos_response_time_fclk_cycles = 300, .max_round_trip_to_furthest_cs_fclk_cycles = 350, .mall_overhead_fclk_cycles = 50, @@ -282,7 +282,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = { }, }, }, - .qos_type = dml2_qos_param_type_dcn4, + .qos_type = dml2_qos_param_type_dcn4x, }, .power_management_parameters = { @@ -344,6 +344,9 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = { .config_return_buffer_segment_size_in_kbytes = 64, .meta_fifo_size_in_kentries = 22, .compressed_buffer_segment_size_in_kbytes = 64, + .max_flip_time_us = 80, + .max_flip_time_lines = 32, + .hostvm_mode = 0, .subvp_drr_scheduling_margin_us = 100, .subvp_prefetch_end_to_mall_start_us = 15, .subvp_fw_processing_delay = 15, @@ -351,14 +354,18 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = { .fams2 = { .max_allow_delay_us = 100 * 1000, - .scheduling_delay_us = 50, - .vertical_interrupt_ack_delay_us = 18, + .scheduling_delay_us = 125, + .vertical_interrupt_ack_delay_us = 40, .allow_programming_delay_us = 18, .min_allow_width_us = 20, .subvp_df_throttle_delay_us = 100, - .subvp_programming_delay_us = 18, + .subvp_programming_delay_us = 200, .subvp_prefetch_to_mall_delay_us = 18, - .drr_programming_delay_us = 18, + .drr_programming_delay_us = 35, + + .lock_timeout_us = 5000, + .recovery_timeout_us = 5000, + .flip_programming_delay_us = 300, }, }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h index a25f4e5977cfc6..a64ec4dcf11abe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_TOP_H__ #define __DML_TOP_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 8247289ce7d373..83fc15bf13cf7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __dml2_TOP_DCHUB_REGISTERS_H__ #define __dml2_TOP_DCHUB_REGISTERS_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h index daae77f2672bff..b132f676a68dc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_TOP_DISPLAY_CFG_TYPES_H__ #define __DML_TOP_DISPLAY_CFG_TYPES_H__ @@ -411,7 +410,6 @@ struct dml2_stream_parameters { enum dml2_odm_mode odm_mode; bool disable_dynamic_odm; bool disable_subvp; - bool disable_fams2_drr; int minimum_vblank_idle_requirement_us; bool minimize_active_latency_hiding; @@ -478,6 +476,7 @@ struct dml2_display_cfg { bool max_outstanding_when_urgent_expected_disable; bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming unsigned int best_effort_min_active_latency_hiding_us; + bool all_streams_blanked; } overrides; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h index 2f444f44877016..8f624a912e78d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_TOP_POLICY_TYPES_H__ #define __DML_TOP_POLICY_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h index 065b2afab6fbb8..ebd8abe894a9a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_TOP_SOC_PARAMETER_TYPES_H__ #define __DML_TOP_SOC_PARAMETER_TYPES_H__ @@ -27,7 +26,7 @@ struct dml2_soc_derates { struct dml2_soc_derate_values system_idle_average; }; -struct dml2_dcn3_soc_qos_params { +struct dml2_dcn32x_soc_qos_params { struct { unsigned int base_latency_us; unsigned int base_latency_pixel_vm_us; @@ -53,7 +52,7 @@ struct dml2_dcn4_uclk_dpm_dependent_qos_params { unsigned int average_latency_when_non_urgent_uclk_cycles; }; -struct dml2_dcn4_soc_qos_params { +struct dml2_dcn4x_soc_qos_params { unsigned int df_qos_response_time_fclk_cycles; unsigned int max_round_trip_to_furthest_cs_fclk_cycles; unsigned int mall_overhead_fclk_cycles; @@ -69,7 +68,7 @@ struct dml2_dcn4_soc_qos_params { enum dml2_qos_param_type { dml2_qos_param_type_dcn3, - dml2_qos_param_type_dcn4 + dml2_qos_param_type_dcn4x }; struct dml2_soc_qos_parameters { @@ -81,8 +80,8 @@ struct dml2_soc_qos_parameters { } writeback; union { - struct dml2_dcn3_soc_qos_params dcn3; - struct dml2_dcn4_soc_qos_params dcn4; + struct dml2_dcn32x_soc_qos_params dcn32x; + struct dml2_dcn4x_soc_qos_params dcn4x; } qos_params; enum dml2_qos_param_type qos_type; @@ -152,6 +151,7 @@ struct dml2_soc_bb { double phy_downspread_percent; double dcn_downspread_percent; double dispclk_dppclk_vco_speed_mhz; + bool no_dfs; bool do_urgent_latency_adjustment; unsigned int mem_word_bytes; unsigned int num_dcc_mcaches; @@ -173,6 +173,7 @@ struct dml2_ip_capabilities { unsigned int meta_fifo_size_in_kentries; unsigned int compressed_buffer_segment_size_in_kbytes; unsigned int max_flip_time_us; + unsigned int max_flip_time_lines; unsigned int hostvm_mode; unsigned int subvp_drr_scheduling_margin_us; unsigned int subvp_prefetch_end_to_mall_start_us; @@ -190,6 +191,10 @@ struct dml2_ip_capabilities { unsigned int subvp_programming_delay_us; unsigned int subvp_prefetch_to_mall_delay_us; unsigned int drr_programming_delay_us; + + unsigned int lock_timeout_us; + unsigned int recovery_timeout_us; + unsigned int flip_programming_delay_us; } fams2; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 8aa77bb190eaeb..eeb96c45565841 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -5,7 +5,6 @@ #ifndef __DML_TOP_TYPES_H__ #define __DML_TOP_TYPES_H__ -#include "dml_top_types.h" #include "dml_top_display_cfg_types.h" #include "dml_top_soc_parameter_types.h" #include "dml_top_policy_types.h" @@ -74,6 +73,7 @@ struct dml2_pmo_options { bool disable_drr_var; bool disable_drr_clamped; bool disable_drr_var_when_var_active; + bool disable_drr_clamped_when_var_active; bool disable_fams2; bool disable_vactive_det_fill_bw_pad; /* dml2_project_dcn4x_stage2_auto_drr_svp and above only */ bool disable_dyn_odm; @@ -228,7 +228,7 @@ struct dml2_per_plane_programming { union { struct { unsigned long dppclk_khz; - } dcn4; + } dcn4x; } min_clocks; struct dml2_mcache_surface_allocation mcache_allocation; @@ -262,7 +262,8 @@ union dml2_global_sync_programming { unsigned int vupdate_offset_pixels; unsigned int vupdate_vupdate_width_pixels; unsigned int vready_offset_pixels; - } dcn4; + unsigned int pstate_keepout_start_lines; + } dcn4x; }; struct dml2_per_stream_programming { @@ -273,7 +274,7 @@ struct dml2_per_stream_programming { unsigned long dscclk_khz; unsigned long dtbclk_khz; unsigned long phyclk_khz; - } dcn4; + } dcn4x; } min_clocks; union dml2_global_sync_programming global_sync; @@ -374,7 +375,7 @@ struct dml2_display_cfg_programming { unsigned long dispclk_khz; unsigned long dcfclk_deepsleep_khz; unsigned long dpp_ref_khz; - } dcn3; + } dcn32x; struct { struct { unsigned long uclk_khz; @@ -403,7 +404,7 @@ struct dml2_display_cfg_programming { uint32_t dpprefclk_did; uint32_t dtbrefclk_did; } divider_ids; - } dcn4; + } dcn4x; } min_clocks; bool uclk_pstate_supported; @@ -411,6 +412,7 @@ struct dml2_display_cfg_programming { /* indicates this configuration requires FW to support */ bool fams2_required; + struct dmub_cmd_fams2_global_config fams2_global_config; struct { bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index 04edcde423a978..0aa4e4d343b04e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_internal_shared_types.h" #include "dml2_core_shared_types.h" #include "dml2_core_dcn4.h" @@ -10,7 +9,7 @@ #include "dml2_debug.h" #include "lib_float_math.h" -struct dml2_core_ip_params core_dcn4_ip_caps_base = { +static const struct dml2_core_ip_params core_dcn4_ip_caps_base = { // Hardcoded values for DCN3x .vblank_nom_default_us = 668, .remote_iommu_outstanding_translations = 256, @@ -70,6 +69,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = { .max_num_dp2p0_streams = 4, .imall_supported = 1, .max_flip_time_us = 80, + .max_flip_time_lines = 32, .words_per_channel = 16, .subvp_fw_processing_delay_us = 15, @@ -77,84 +77,6 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = { .subvp_swath_height_margin_lines = 16, }; -struct dml2_core_ip_params core_dcn4sw_ip_caps_base = { - .vblank_nom_default_us = 668, - .remote_iommu_outstanding_translations = 256, - .rob_buffer_size_kbytes = 192, - .config_return_buffer_size_in_kbytes = 1280, - .config_return_buffer_segment_size_in_kbytes = 64, - .compressed_buffer_segment_size_in_kbytes = 64, - .dpte_buffer_size_in_pte_reqs_luma = 68, - .dpte_buffer_size_in_pte_reqs_chroma = 36, - .pixel_chunk_size_kbytes = 8, - .alpha_pixel_chunk_size_kbytes = 4, - .min_pixel_chunk_size_bytes = 1024, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 1171920, - .max_line_buffer_lines = 32, - .writeback_interface_buffer_size_kbytes = 90, - - //Number of pipes after DCN Pipe harvesting - .max_num_dpp = 4, - .max_num_otg = 4, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .dppclk_delay_subtotal = 47, - .dppclk_delay_scl = 50, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_cnvc_formatter = 28, - .dppclk_delay_cnvc_cursor = 6, - .cursor_buffer_size = 24, - .cursor_chunk_size = 2, - .dispclk_delay_subtotal = 125, - .max_inter_dcn_tile_repeaters = 8, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_buffer_size = 0, - .num_dsc = 4, - .maximum_dsc_bits_per_component = 12, - .maximum_pixels_per_line_per_dsc_unit = 5760, - .dsc422_native_support = true, - .dcc_supported = true, - .ptoi_supported = false, - - .cursor_64bpp_support = true, - .dynamic_metadata_vm_enabled = false, - - .max_num_hdmi_frl_outputs = 1, - .max_num_dp2p0_outputs = 4, - .max_num_dp2p0_streams = 4, - .imall_supported = 1, - .max_flip_time_us = 80, - .words_per_channel = 16, - - .subvp_fw_processing_delay_us = 15, - .subvp_pstate_allow_width_us = 20, - .subvp_swath_height_margin_lines = 16, - - .dcn_mrq_present = 1, - .zero_size_buffer_entries = 512, - .compbuf_reserved_space_zs = 64, - .dcc_meta_buffer_size_bytes = 6272, - .meta_chunk_size_kbytes = 2, - .min_meta_chunk_size_bytes = 256, - - .dchub_arb_to_ret_delay = 102, - .hostvm_mode = 1, -}; - static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params) { ip_caps->pipe_count = ip_params->max_num_dpp; @@ -169,6 +91,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries; ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes; ip_caps->max_flip_time_us = ip_params->max_flip_time_us; + ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines; ip_caps->hostvm_mode = ip_params->hostvm_mode; // FIXME_STAGE2: cleanup after adding all dv override to ip_caps @@ -192,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params, ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries; ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes; ip_params->max_flip_time_us = ip_caps->max_flip_time_us; + ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines; ip_params->hostvm_mode = ip_caps->hostvm_mode; } @@ -222,6 +146,7 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out) } memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb)); + memcpy(&core->clean_me_up.mode_lib.ip_caps, in_out->ip_caps, sizeof(struct dml2_ip_capabilities)); return true; } @@ -246,10 +171,12 @@ static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *p phantom->stream_index = phantom_stream_index; phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable; phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return; - phantom->composition.viewport.plane0.height = (long int unsigned) math_ceil2( - (double)phantom->composition.viewport.plane0.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0); - phantom->composition.viewport.plane1.height = (long int unsigned) math_ceil2( - (double)phantom->composition.viewport.plane1.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0); + phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2( + (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0), + (double)main->composition.viewport.plane0.height); + phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2( + (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0), + (double)main->composition.viewport.plane1.height); phantom->immediate_flip = false; phantom->dynamic_meta_data.enable = false; phantom->cursor.num_cursors = 0; @@ -344,6 +271,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in // Check if FAMS2 is required if (display_cfg->stage3.performed && display_cfg->stage3.success) { programming->fams2_required = display_cfg->stage3.fams2_required; + + dml2_core_calcs_get_global_fams2_programming(&core->clean_me_up.mode_lib, display_cfg, &programming->fams2_global_config); } // Only loop over all the main streams (the implicit svp streams will be packed as part of the main stream) @@ -621,7 +550,7 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out l->mode_programming_ex_params.min_clk_table = in_out->instance->minimum_clock_table; l->mode_programming_ex_params.cfg_support_info = in_out->cfg_support_info; l->mode_programming_ex_params.programming = in_out->programming; - l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4.active.uclk_khz, + l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4x.active.uclk_khz, &core->clean_me_up.mode_lib.soc); result = dml2_core_calcs_mode_programming_ex(&l->mode_programming_ex_params); @@ -641,20 +570,20 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out for (plane_index = 0; plane_index < in_out->programming->display_config.num_planes; plane_index++) { in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index]; - if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; - else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe) - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; - else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; - else { - if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us) - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive; - else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us) - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank; - else - in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported; - } + if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; + else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe) + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; + else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom; + else { + if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us) + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive; + else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us) + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank; + else + in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported; + } dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h index 235280c6dcf576..e62b2d3eeee653 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_CORE_DCN4_H__ #define __DML2_CORE_DCN4_H__ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 6f4026e396e098..3ea54fd52e4683 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -8,35 +8,55 @@ #include "dml2_debug.h" #include "lib_float_math.h" #include "dml_top_types.h" -#include "dml2_core_shared.h" -#define DML_VM_PTE_ADL_PATCH_EN -//#define DML_TVM_UPDATE_EN -#define DML_TDLUT_ROW_BYTES_FIX_EN -#define DML_REG_LIMIT_CLAMP_EN #define DML2_MAX_FMT_420_BUFFER_WIDTH 4096 #define DML_MAX_NUM_OF_SLICES_PER_DSC 4 -static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only) +const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type) +{ + switch (bw_type) { + case (dml2_core_internal_bw_sdp): + return("dml2_core_internal_bw_sdp"); + case (dml2_core_internal_bw_dram): + return("dml2_core_internal_bw_dram"); + case (dml2_core_internal_bw_max): + return("dml2_core_internal_bw_max"); + default: + return("dml2_core_internal_bw_unknown"); + } +} + +const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type) +{ + switch (dml2_core_internal_soc_state_type) { + case (dml2_core_internal_soc_state_sys_idle): + return("dml2_core_internal_soc_state_sys_idle"); + case (dml2_core_internal_soc_state_sys_active): + return("dml2_core_internal_soc_state_sys_active"); + case (dml2_core_internal_soc_state_svp_prefetch): + return("dml2_core_internal_soc_state_svp_prefetch"); + case dml2_core_internal_soc_state_max: + default: + return("dml2_core_internal_soc_state_unknown"); + } +} + +static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned int *remainder) +{ + *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0); + return dividend / divisor; +} + +static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only) { dml2_printf("DML: ===================================== \n"); dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n"); - if (!fail_only || support->ImmediateFlipSupport == 0) - dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); - if (!fail_only || support->WritebackLatencySupport == 0) - dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); if (!fail_only || support->ScaleRatioAndTapsSupport == 0) dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); if (!fail_only || support->SourceFormatPixelAndScanSupport == 0) dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); - if (!fail_only || support->P2IWith420 == 1) - dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420); - if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1) - dml2_printf("DML: support: DSCOnlyIfNecessaryWithBPP = %d\n", support->DSCOnlyIfNecessaryWithBPP); - if (!fail_only || support->DSC422NativeNotSupported == 1) - dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); - if (!fail_only || support->DSCSlicesODMModeSupported == 0) - dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); + if (!fail_only || support->ViewportSizeSupport == 0) + dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1) dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1) @@ -45,74 +65,87 @@ static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mod dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); if (!fail_only || support->MultistreamWithHDMIOreDP == 1) dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); + if (!fail_only || support->ExceededMultistreamSlots == 1) + dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1) dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); if (!fail_only || support->NotEnoughLanesForMSO == 1) dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); - if (!fail_only || support->NumberOfOTGSupport == 0) - dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); - if (!fail_only || support->NumberOfHDMIFRLSupport == 0) - dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); - if (!fail_only || support->NumberOfDP2p0Support == 0) - dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); - if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0) - dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); - if (!fail_only || support->CursorSupport == 0) - dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport); - if (!fail_only || support->PitchSupport == 0) - dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport); - if (!fail_only || support->ViewportExceedsSurface == 1) - dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); - if (!fail_only || support->ExceededMALLSize == 1) - dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); - if (!fail_only || support->EnoughWritebackUnits == 0) - dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); - if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1) - dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); - if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); - if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); - if (!fail_only || support->ExceededMultistreamSlots == 1) - dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); + if (!fail_only || support->P2IWith420 == 1) + dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420); + if (!fail_only || support->DSC422NativeNotSupported == 1) + dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); + if (!fail_only || support->DSCSlicesODMModeSupported == 0) + dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); if (!fail_only || support->NotEnoughDSCUnits == 1) dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); if (!fail_only || support->NotEnoughDSCSlices == 1) dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); - if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0) - dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); + if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1) + dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); + if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1) + dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1) dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); + if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0) + dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1) dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); - if (!fail_only || support->LinkCapacitySupport == 0) - dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); + if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1) + dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); if (!fail_only || support->ROBSupport == 0) dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport); if (!fail_only || support->OutstandingRequestsSupport == 0) dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0) dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); - if (!fail_only || support->PTEBufferSizeNotExceeded == 0) - dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); - if (!fail_only || support->AvgBandwidthSupport == 0) - dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); - if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0) - dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); + if (!fail_only || support->DISPCLK_DPPCLK_Support == 0) + dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); + if (!fail_only || support->TotalAvailablePipesSupport == 0) + dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); + if (!fail_only || support->NumberOfOTGSupport == 0) + dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); + if (!fail_only || support->NumberOfHDMIFRLSupport == 0) + dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); + if (!fail_only || support->NumberOfDP2p0Support == 0) + dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); + if (!fail_only || support->EnoughWritebackUnits == 0) + dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); + if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0) + dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); + if (!fail_only || support->WritebackLatencySupport == 0) + dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); + if (!fail_only || support->CursorSupport == 0) + dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport); + if (!fail_only || support->PitchSupport == 0) + dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport); + if (!fail_only || support->ViewportExceedsSurface == 1) + dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); if (!fail_only || support->PrefetchSupported == 0) dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); + if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0) + dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); + if (!fail_only || support->AvgBandwidthSupport == 0) + dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); if (!fail_only || support->DynamicMetadataSupported == 0) dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); if (!fail_only || support->VRatioInPrefetchSupported == 0) dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); - if (!fail_only || support->DISPCLK_DPPCLK_Support == 0) - dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); - if (!fail_only || support->TotalAvailablePipesSupport == 0) - dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); + if (!fail_only || support->PTEBufferSizeNotExceeded == 1) + dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); + if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1) + dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); + if (!fail_only || support->ExceededMALLSize == 1) + dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); + if (!fail_only || support->g6_temp_read_support == 0) + dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + if (!fail_only || support->ImmediateFlipSupport == 0) + dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); + if (!fail_only || support->LinkCapacitySupport == 0) + dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); + if (!fail_only || support->ModeSupport == 0) dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport); - if (!fail_only || support->ViewportSizeSupport == 0) - dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); dml2_printf("DML: ===================================== \n"); } @@ -235,6 +268,7 @@ dml_get_per_pipe_var_func(vstartup_calculated, unsigned int, mode_lib->mp.VStart dml_get_per_pipe_var_func(vupdate_offset, unsigned int, mode_lib->mp.VUpdateOffsetPix); dml_get_per_pipe_var_func(vupdate_width, unsigned int, mode_lib->mp.VUpdateWidthPix); dml_get_per_pipe_var_func(vready_offset, unsigned int, mode_lib->mp.VReadyOffsetPix); +dml_get_per_pipe_var_func(pstate_keepout_dst_lines, unsigned int, mode_lib->mp.pstate_keepout_dst_lines); dml_get_per_pipe_var_func(det_stored_buffer_size_l_bytes, unsigned int, mode_lib->mp.DETBufferSizeY); dml_get_per_pipe_var_func(det_stored_buffer_size_c_bytes, unsigned int, mode_lib->mp.DETBufferSizeC); dml_get_per_pipe_var_func(det_buffer_size_kbytes, unsigned int, mode_lib->mp.DETBufferSizeInKByte); @@ -480,7 +514,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode default: DML2_ASSERT(0); return 256; - }; + } } static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan) @@ -2051,7 +2085,11 @@ static void CalculateDCCConfiguration( unsigned int full_swath_bytes_vert_wc_l; unsigned int full_swath_bytes_vert_wc_c; - yuv420 = dml_is_420(SourcePixelFormat); + if (dml_is_420(SourcePixelFormat)) + yuv420 = 1; + else + yuv420 = 0; + horz_div_l = 1; horz_div_c = 1; vert_div_l = 1; @@ -2343,16 +2381,16 @@ static void calculate_mcache_row_bytes( } if (p->gpuvm_enable) { - meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans; + meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans; //but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic if (p->surf_vert && vmpg_bytes > blk_bytes) { - meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / 256 / p->num_chans; + meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / (float)256 / p->num_chans; } *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom } else { - meta_per_mvmpg_per_channel = (float) blk_bytes / 256 / p->num_chans; + meta_per_mvmpg_per_channel = (float) blk_bytes / (float)256 / p->num_chans; if (!p->surf_vert) *p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0; @@ -2519,8 +2557,11 @@ static void calculate_mcache_setting( l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2; // The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c: - l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l; - if (l->is_dual_plane) { + if (*p->num_mcaches_l) { + l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l; + } + + if (l->is_dual_plane && *p->num_mcaches_c) { l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c; if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) { @@ -2649,9 +2690,9 @@ static double dml_get_return_bandwidth_available( double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes; double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes; - double derate_sdp_factor = 1; - double derate_fabric_factor = 1; - double derate_dram_factor = 1; + double derate_sdp_factor; + double derate_fabric_factor; + double derate_dram_factor; double derate_sdp_bandwidth; double derate_fabric_bandwidth; @@ -2851,16 +2892,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, s->HostVMDynamicLevels = CalculateHostVMDynamicLevels(p->display_cfg->gpuvm_enable, p->display_cfg->hostvm_enable, p->HostVMMinPageSize, p->display_cfg->hostvm_max_non_cached_page_table_levels); for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { - if (p->display_cfg->hostvm_enable == true) { + if (p->display_cfg->gpuvm_enable == true) { p->vm_group_bytes[k] = 512; p->dpte_group_bytes[k] = 512; - } else if (p->display_cfg->gpuvm_enable == true) { - p->vm_group_bytes[k] = 2048; - if (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes >= 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) { - p->dpte_group_bytes[k] = 512; - } else { - p->dpte_group_bytes[k] = 2048; - } } else { p->vm_group_bytes[k] = 0; p->dpte_group_bytes[k] = 0; @@ -3185,7 +3219,7 @@ static double CalculateUrgentLatency( double fabric_max_transport_latency_margin) { double urgent_latency = 0; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0) + urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0); @@ -3196,7 +3230,7 @@ static double CalculateUrgentLatency( } } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles); dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); @@ -3226,7 +3260,7 @@ static double CalculateTripToMemory( double fabric_max_transport_latency_margin) { double trip_to_memory_us; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0) + trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0); @@ -3235,7 +3269,7 @@ static double CalculateTripToMemory( } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles); dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles); @@ -3265,7 +3299,7 @@ static double CalculateMetaTripToMemory( double fabric_max_transport_latency_margin) { double meta_trip_to_memory_us; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0) + meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0); } else { @@ -3273,7 +3307,7 @@ static double CalculateMetaTripToMemory( } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles); dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles); @@ -3781,8 +3815,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2; RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2; RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2; - p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;; - p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;; + p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64; + p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64; } if (p->SwathHeightC[k] == 0) @@ -3841,7 +3875,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *p->compbuf_reserved_space_64b = 2 * p->pixel_chunk_size_kbytes * 1024 / 64; if (*p->UnboundedRequestEnabled) { *p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b, - (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / 64)), 1.0); + (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]); dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes); @@ -3852,21 +3886,20 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch #endif *p->hw_debug5 = false; - if (!p->mrq_present) { - for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { - if (!(*p->UnboundedRequestEnabled) - && p->display_cfg->plane_descriptors[k].surface.dcc.enable - && ((p->rob_buffer_size_kbytes * 1024 + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k]))) - *p->hw_debug5 = true; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled); - dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION); - dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH); - dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte); - dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); - dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5); + for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { + if (!(p->mrq_present) && (!(*p->UnboundedRequestEnabled)) && (TotalActiveDPP == 1) + && p->display_cfg->plane_descriptors[k].surface.dcc.enable + && ((p->rob_buffer_size_kbytes * 1024 * (p->mrq_present ? MAXIMUMCOMPRESSION : 1) + + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k]))) + *p->hw_debug5 = true; +#ifdef __DML_VBA_DEBUG__ + dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled); + dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION); + dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH); + dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte); + dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); + dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5); #endif - } } } @@ -4559,15 +4592,6 @@ static void calculate_tdlut_setting( return; } - - if (!p->setup_for_tdlut) { - *p->tdlut_groups_per_2row_ub = 0; - *p->tdlut_opt_time = 0; - *p->tdlut_drain_time = 0; - *p->tdlut_bytes_per_group = 0; - return; - } - if (p->tdlut_mpc_width_flag) { tdlut_mpc_width = 33; tdlut_bytes_per_group_simple = 39*256; @@ -4616,7 +4640,7 @@ static void calculate_tdlut_setting( *p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width; //the delivery cycles is DispClk cycles per line * number of lines * number of slices tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width; - tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0; + tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1); } else { //tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements *p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256); @@ -4627,7 +4651,7 @@ static void calculate_tdlut_setting( //the tdlut is fetched during the 2 row times of prefetch. if (p->setup_for_tdlut) { - *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2(*p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1); + *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1); *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate; } @@ -4640,7 +4664,7 @@ static void calculate_tdlut_setting( dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz); dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width); - dml2_printf("DML::%s: tdlut_addressing_mode = %u\n", __func__, p->tdlut_addressing_mode); + dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear"); dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes); dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes); dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame); @@ -4706,11 +4730,12 @@ static void CalculateTarb( static double CalculateTWait( long reserved_vblank_time_ns, double UrgentLatency, - double Ttrip) + double Ttrip, + double g6_temp_read_blackout_us) { double TWait; double t_urg_trip = math_max2(UrgentLatency, Ttrip); - TWait = reserved_vblank_time_ns/1000.0 + t_urg_trip; + TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns); @@ -4858,13 +4883,23 @@ static double get_urgent_bandwidth_required( } if (!exclude_this_plane) { - surface_required_bw[k] = math_max4(NumberOfDPP[k] * prefetch_vmrow_bw[k], - l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur, - l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre, - (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]); + l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k]; + l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur; + l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; + l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]; + surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw); /* export peak required bandwidth for the surface */ surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]); + +#ifdef __DML_VBA_DEBUG__ + dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw); + dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw); + dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw); + dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw); + dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]); + dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]); +#endif } else { surface_required_bw[k] = 0.0; } @@ -4873,6 +4908,8 @@ static double get_urgent_bandwidth_required( #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]); + dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw); + dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip); dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor); dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0); dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1); @@ -4886,6 +4923,8 @@ static double get_urgent_bandwidth_required( dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]); dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]); dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]); + dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]); + dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]); dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]); dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]); @@ -4964,7 +5003,7 @@ static void CalculateExtraLatency( max_request_size_bytes = request_size_bytes_chroma[k]; } - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK; *ExtraLatency = *ExtraLatency_sr; if (max_oustanding_when_urgent_expected) @@ -4980,11 +5019,14 @@ static void CalculateExtraLatency( #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type); + dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode); + dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips); dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected); dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock); dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK); dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW); dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles); + dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes); dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb); dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency); dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr); @@ -5021,6 +5063,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->trip_to_mem = 0.0; *p->Tvm_trips = 0.0; *p->Tr0_trips = 0.0; + s->Tvm_no_trip_oto = 0.0; + s->Tr0_no_trip_oto = 0.0; s->Tvm_trips_rounded = 0.0; s->Tr0_trips_rounded = 0.0; s->max_Tsw = 0.0; @@ -5037,7 +5081,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->bytes_pp = 0.0; s->dep_bytes = 0.0; s->min_Lsw_oto = 0.0; + s->min_Lsw_equ = 0.0; s->Tsw_est1 = 0.0; + s->Tsw_est2 = 0.0; s->Tsw_est3 = 0.0; s->cursor_prefetch_bytes = 0; *p->prefetch_cursor_bw = 0; @@ -5059,7 +5105,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels); dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable); dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup); - dml2_printf("DML::%s: MaxVStartup = %u\n", __func__, p->MaxVStartup); dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable); dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait); @@ -5092,21 +5137,15 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->LineTime = p->myPipe->HTotal / p->myPipe->PixelClock; s->trip_to_mem = p->Ttrip; -#ifdef DML_TVM_UPDATE_EN *p->Tvm_trips = p->ExtraLatencyPrefetch + math_max2(s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1)), p->Turg); if (dcc_mrq_enable) *p->Tvm_trips_flip = *p->Tvm_trips; else *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem; -#else - *p->Tvm_trips = p->ExtraLatencyPrefetch + s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1)); - *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem; -#endif *p->Tr0_trips_flip = s->trip_to_mem * (s->HostVMDynamicLevelsTrips + 1); *p->Tr0_trips = math_max2(*p->Tr0_trips_flip, p->tdlut_opt_time / 2); -#ifdef DML_TVM_UPDATE_EN if (p->DynamicMetadataVMEnabled == true) { *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips; *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip; @@ -5114,15 +5153,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->Tdmdl_vm = 0; *p->Tdmdl = s->TWait_p + p->ExtraLatencyPrefetch + p->Ttrip; // Tex } -#else - if (p->DynamicMetadataVMEnabled == true) { - *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips; - *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip; - } else { - *p->Tdmdl_vm = 0; - *p->Tdmdl = p->TWait + p->ExtraLatencyPrefetch; // Tex - } -#endif if (p->DynamicMetadataEnable == true) { if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) { @@ -5186,7 +5216,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler); #endif - s->NoTimeToPrefetch = false; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips); dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips); @@ -5199,14 +5228,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime; *p->Tvm_trips_flip_rounded = math_ceil2(4.0 * *p->Tvm_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime; } else { -#ifdef DML_TVM_UPDATE_EN if (p->DynamicMetadataEnable || dcc_mrq_enable || p->setup_for_tdlut) s->Tvm_trips_rounded = math_max2(s->LineTime * math_ceil2(4.0*math_max3(p->ExtraLatencyPrefetch, p->Turg, s->trip_to_mem)/s->LineTime, 1)/4, s->LineTime/4.0); else - s->Tvm_trips_rounded = s->LineTime / 4.0; -#else - s->Tvm_trips_rounded = s->LineTime / 4.0; -#endif + s->Tvm_trips_rounded = s->LineTime / 4.0; *p->Tvm_trips_flip_rounded = s->LineTime / 4.0; } @@ -5235,16 +5260,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->Tno_bw = 0; } -#ifdef DML_TVM_UPDATE_EN if (p->mrq_present || p->display_cfg->gpuvm_max_page_table_levels >= 3) *p->Tno_bw_flip = *p->Tno_bw; else *p->Tno_bw_flip = 0; //because there is no 3DLUT for iFlip -#else - *p->Tno_bw_flip = 0; - if (p->display_cfg->gpuvm_enable == true) - *p->Tno_bw_flip = *p->Tno_bw; -#endif if (dml_is_420(p->myPipe->SourcePixelFormat)) { s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC / 4.0; @@ -5258,64 +5277,63 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime); s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC; -#ifdef DML_TDLUT_ROW_BYTES_FIX_EN s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor; s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor; -#endif s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw); s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__; s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0); s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime); + s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__; + s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0); + s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime); + vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes; extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128); if (p->setup_for_tdlut) vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0); -#ifdef DML_TDLUT_ROW_BYTES_FIX_EN tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0); -#else - tdlut_row_bytes = p->tdlut_pte_bytes_per_frame; -#endif -#ifdef DML_REG_LIMIT_CLAMP_EN s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto, p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime)); -#endif s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0; if (p->display_cfg->gpuvm_enable == true) { - s->Tvm_oto = math_max3( - *p->Tvm_trips, + s->Tvm_no_trip_oto = math_max2( *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto, s->LineTime / 4.0); + s->Tvm_oto = math_max2( + *p->Tvm_trips, + s->Tvm_no_trip_oto); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips); dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto); dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0); #endif } else { -#ifdef DML_TVM_UPDATE_EN + s->Tvm_no_trip_oto = s->Tvm_trips_rounded; s->Tvm_oto = s->Tvm_trips_rounded; -#else - s->Tvm_oto = s->LineTime / 4.0; -#endif } if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) { - s->Tr0_oto = math_max3( - *p->Tr0_trips, + s->Tr0_no_trip_oto = math_max2( (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto, s->LineTime / 4.0); + s->Tr0_oto = math_max2( + *p->Tr0_trips, + s->Tr0_no_trip_oto); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips); dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto); dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4); #endif - } else - s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0; + } else { + s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0; + s->Tr0_oto = s->Tr0_no_trip_oto; + } s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0; s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0; @@ -5325,19 +5343,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal); //Tpre_equ in line time -#ifdef DML_TVM_UPDATE_EN if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable) s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo; else s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo; -#else - s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(s->TWait_p + p->TCalc, *p->Tdmdl - p->Ttrip)) / s->LineTime - Lo; -#endif s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal); dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto); + dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip); dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch); @@ -5375,6 +5390,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0; s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime; +#ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ); dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime); dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup); @@ -5395,18 +5411,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip); dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler); dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler); - - s->dep_bytes = math_max2(vm_bytes * p->HostVMInefficiencyFactor, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes); - - dml2_printf("DML::%s: dep_bytes: %f\n", __func__, s->dep_bytes); - dml2_printf("DML::%s: prefetch_sw_bytes: %f\n", __func__, s->prefetch_sw_bytes); dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor); dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes); - - if (s->prefetch_sw_bytes < s->dep_bytes) { - s->prefetch_sw_bytes = 2 * s->dep_bytes; - dml2_printf("DML::%s: bump prefetch_sw_bytes to %f\n", __func__, s->prefetch_sw_bytes); - } + dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw); + dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre)); + dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); +#endif *p->dst_y_per_vm_vblank = 0; *p->dst_y_per_row_vblank = 0; @@ -5419,7 +5429,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch // Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time // Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time // So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less - if (s->dst_y_prefetch_equ > 1) { + bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime; + + if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) { s->prefetch_bw1 = 0.; s->prefetch_bw2 = 0.; s->prefetch_bw3 = 0.; @@ -5436,28 +5448,35 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->prefetch_bw1 = 0; dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1); - if ((p->VStartup == p->MaxVStartup) && (s->Tsw_est1 / s->LineTime < s->min_Lsw_oto) && (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) { + if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) { s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / - (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw); + (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes))); dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded); - dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_oto * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw); - dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto); + dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw); + dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); - dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw)); + dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw)); dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1); #endif } // prefetch_bw2: VM + SW - if (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded > 0) + if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) { s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) / - (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded); - else + (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded); + s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2; + } else s->prefetch_bw2 = 0; + dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2); + if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) { + s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime); + dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2); + } + // prefetch_bw3: 2*R0 + SW if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) { s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) / @@ -5467,8 +5486,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->prefetch_bw3 = 0; dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3); - if (p->VStartup == p->MaxVStartup && (s->Tsw_est3 / s->LineTime < s->min_Lsw_oto) && ((s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) { - s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded); + if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) { + s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded); dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3); } @@ -5484,6 +5503,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips)); dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1); + dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2); dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3); dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1); dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2); @@ -5504,9 +5524,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch // here is to make sure equ bw wont be more agressive than the latency-based requirement. // check vm time >= vm_trips // check r0 time >= r0_trips + + double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes); + + dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded); + dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded); + if (s->prefetch_bw1 > 0) { - if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1 >= s->Tvm_trips_rounded && - (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw1 >= s->Tr0_trips_rounded) { + double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1; + double row_transfer_time = total_row_bytes / s->prefetch_bw1; + dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time); + dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time); + if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) { Case1OK = true; } } @@ -5516,8 +5545,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch // check vm time >= vm_trips // check r0 time < r0_trips if (s->prefetch_bw2 > 0) { - if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2 >= s->Tvm_trips_rounded && - (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw2 < s->Tr0_trips_rounded) { + double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2; + double row_transfer_time = total_row_bytes / s->prefetch_bw2; + dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time); + dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time); + if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) { Case2OK = true; } } @@ -5526,8 +5558,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch // check vm time < vm_trips // check r0 time >= r0_trips if (s->prefetch_bw3 > 0) { - if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3 < s->Tvm_trips_rounded && - (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw3 >= s->Tr0_trips_rounded) { + double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3; + double row_transfer_time = total_row_bytes / s->prefetch_bw3; + dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time); + dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time); + if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) { Case3OK = true; } } @@ -5542,11 +5577,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->prefetch_bw_equ = s->prefetch_bw4; } -#ifdef DML_REG_LIMIT_CLAMP_EN s->prefetch_bw_equ = math_max3(s->prefetch_bw_equ, p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime)); -#endif #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK); dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK); @@ -5578,6 +5611,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ); dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ); #endif + // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank) + s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0; + // Use the more stressful prefetch schedule if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) { *p->dst_y_prefetch = s->dst_y_prefetch_oto; @@ -5586,29 +5622,28 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0; + s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__); #endif - } else { *p->dst_y_prefetch = s->dst_y_prefetch_equ; s->TimeForFetchingVM = s->Tvm_equ; s->TimeForFetchingRowInVBlank = s->Tr0_equ; - if (p->VStartup == p->MaxVStartup) { - *p->dst_y_per_vm_vblank = math_floor2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_floor2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; - } else { - *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; - } + *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank; + s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank; + #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__); #endif } - // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank) - s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw + /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */ + s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line); *p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime); @@ -5645,7 +5680,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch (double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0)); } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY); + dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY); *p->VRatioPrefetchY = 0; } #ifdef __DML_VBA_DEBUG__ @@ -5668,7 +5703,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0)); } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC); + dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC); *p->VRatioPrefetchC = 0; } #ifdef __DML_VBA_DEBUG__ @@ -5690,14 +5725,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch #endif } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: MyErr set, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required); - dml2_printf("DML::%s: MyErr set, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ); + dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required); + dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ); *p->VRatioPrefetchY = 0; *p->VRatioPrefetchC = 0; *p->RequiredPrefetchPixelDataBWLuma = 0; *p->RequiredPrefetchPixelDataBWChroma = 0; } - dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM); dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM); dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank); @@ -5708,7 +5742,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow); } else { - dml2_printf("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ); + dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ); + dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n", + __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime); s->NoTimeToPrefetch = true; s->TimeForFetchingVM = 0; s->TimeForFetchingRowInVBlank = 0; @@ -5727,26 +5763,26 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch if (vm_bytes == 0) { prefetch_vm_bw = 0; - } else if (*p->dst_y_per_vm_vblank > 0) { + } else if (s->dst_y_per_vm_no_trip_vblank > 0) { #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank); dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); #endif - prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime); + prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw); #endif } else { prefetch_vm_bw = 0; s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: MyErr set. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank); + dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank); } if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) { prefetch_row_bw = 0; - } else if (*p->dst_y_per_row_vblank > 0) { - prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime); + } else if (s->dst_y_per_row_no_trip_vblank > 0) { + prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow); @@ -5756,7 +5792,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } else { prefetch_row_bw = 0; s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: MyErr set. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank); + dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank); } *p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw); @@ -5773,11 +5809,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->VRatioPrefetchC = 0; *p->RequiredPrefetchPixelDataBWLuma = 0; *p->RequiredPrefetchPixelDataBWChroma = 0; + *p->prefetch_vmrow_bw = 0; } dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank); dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank); + dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw); + dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma); + dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma); dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch); + return s->NoTimeToPrefetch; } @@ -6169,6 +6210,7 @@ static void CalculateFlipSchedule( unsigned int dpte_row_height_chroma, bool use_one_row_for_frame_flip, unsigned int max_flip_time_us, + unsigned int max_flip_time_lines, unsigned int per_pipe_flip_bytes, unsigned int meta_row_bytes, unsigned int meta_row_height, @@ -6183,12 +6225,13 @@ static void CalculateFlipSchedule( { struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals; - l->dual_plane = dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha; + l->dual_plane = dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha; l->dpte_row_bytes = DPTEBytesPerRow; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable); dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us); + dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines); dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip); dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes); dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw); @@ -6239,7 +6282,8 @@ static void CalculateFlipSchedule( if (use_lb_flip_bw) { // For mode check, calculation the flip bw requirement with worst case flip time - l->max_flip_time = math_min2(l->min_row_time, math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us)); + l->max_flip_time = math_min2(math_min2(l->min_row_time, (double)max_flip_time_lines * LineTime / VRatio), + math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us)); //The lower bound on flip bandwidth // Note: The get_urgent_bandwidth_required already consider dpte_row_bw and meta_row_bw in bandwidth calculation, so leave final_flip_bw = 0 if iflip not required @@ -6257,7 +6301,7 @@ static void CalculateFlipSchedule( #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time); dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes); - dml2_printf("DML::%s: total row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_row_bytes); + dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes); dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes); dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip)); dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded)); @@ -6268,6 +6312,7 @@ static void CalculateFlipSchedule( dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows); dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime); dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows); + dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded)); } #endif l->lb_flip_bw = math_max3(l->lb_flip_bw, @@ -6284,7 +6329,7 @@ static void CalculateFlipSchedule( *dst_y_per_vm_flip = 1; // not used *dst_y_per_row_flip = 1; // not used - *ImmediateFlipSupportedForPipe = true; + *ImmediateFlipSupportedForPipe = l->min_row_time >= (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded); } else { if (iflip_enable) { l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i) @@ -6350,6 +6395,7 @@ static void CalculateFlipSchedule( dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip); dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip); dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip); + dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time); } dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw); dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe); @@ -6380,6 +6426,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->Watermark->StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep; p->Watermark->Z8StutterExitWatermark = p->mmSOCParameters.SRExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep; p->Watermark->Z8StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep; + if (p->mmSOCParameters.qos_type == dml2_qos_param_type_dcn4x) { + p->Watermark->StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us; + p->Watermark->StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us; + p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us; + p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us; + } p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark; #ifdef __DML_VBA_DEBUG__ @@ -6541,7 +6593,8 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported; if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) { - if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency) + if (p->display_cfg->overrides.all_streams_blanked || + (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)) p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive; else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0) p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive; @@ -6585,13 +6638,13 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( s->src_y_ahead_c = (unsigned int)(math_floor2(p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k], p->SwathHeightC[k]) + s->LBLatencyHidingSourceLinesC[k]); s->sub_vp_lines_c = s->src_y_pstate_c + s->src_y_ahead_c + p->meta_row_height_c[k]; - if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format)) + if (dml_is_420(p->display_cfg->plane_descriptors[k].pixel_format)) p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, 2 * s->sub_vp_lines_c)); else p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, p->meta_row_height_c[k]); + dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]); dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c); dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c); dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c); @@ -6856,7 +6909,8 @@ struct dml2_core_internal_g6_temp_read_blackouts_table { } entries[DML_MAX_CLK_TABLE_SIZE]; }; -struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = { +static const struct dml2_core_internal_g6_temp_read_blackouts_table + core_dcn4_g6_temp_read_blackout_table = { .entries = { { .uclk_khz = 96000, @@ -6921,6 +6975,43 @@ static double get_g6_temp_read_blackout_us( return (double)blackout_us; } +static double get_max_urgent_latency_us( + struct dml2_dcn4x_soc_qos_params *dcn4x, + double uclk_freq_mhz, + double FabricClock, + unsigned int min_clk_index) +{ + double latency; + latency = dcn4x->per_uclk_dpm_params[min_clk_index].maximum_latency_when_urgent_uclk_cycles / uclk_freq_mhz + * (1 + dcn4x->umc_max_latency_margin / 100.0) + + dcn4x->mall_overhead_fclk_cycles / FabricClock + + dcn4x->max_round_trip_to_furthest_cs_fclk_cycles / FabricClock + * (1 + dcn4x->fabric_max_transport_latency_margin / 100.0); + return latency; +} + +static void calculate_pstate_keepout_dst_lines( + const struct dml2_display_cfg *display_cfg, + const struct dml2_core_internal_watermarks *watermarks, + unsigned int pstate_keepout_dst_lines[]) +{ + const struct dml2_stream_parameters *stream_descriptor; + unsigned int i; + + for (i = 0; i < display_cfg->num_planes; i++) { + if (!dml_is_phantom_pipe(&display_cfg->plane_descriptors[i])) { + stream_descriptor = &display_cfg->stream_descriptors[display_cfg->plane_descriptors[i].stream_index]; + + pstate_keepout_dst_lines[i] = + (unsigned int)math_ceil(watermarks->DRAMClockChangeWatermark / ((double)stream_descriptor->timing.h_total * 1000.0 / (double)stream_descriptor->timing.pixel_clock_khz)); + + if (pstate_keepout_dst_lines[i] > stream_descriptor->timing.v_total - 1) { + pstate_keepout_dst_lines[i] = stream_descriptor->timing.v_total - 1; + } + } + } +} + static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params) { struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib; @@ -6963,7 +7054,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000); mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000); - mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params); + mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table); #if defined(__DML_VBA_DEBUG__) @@ -6981,7 +7072,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz); dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz); dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock); - dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz); dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes); dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present); @@ -7126,7 +7216,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.WritebackLatencySupport = true; for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && - (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) { + (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) { mode_lib->ms.support.WritebackLatencySupport = false; } } @@ -7202,17 +7292,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #if defined(DV_BUILD) // Assume a memory config setting of 3 in 420 mode or get a new ip parameter that reflects the programming. if (mode_lib->ms.BytePerPixelC[k] != 0.0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) { - lb_buffer_size_bits_luma = 34620 * 57;; + lb_buffer_size_bits_luma = 34620 * 57; lb_buffer_size_bits_chroma = 13560 * 57; } #endif */ - mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ / + mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 / (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, 1.0) - 2, 0.0)); if (mode_lib->ms.BytePerPixelC[k] == 0.0) { mode_lib->ms.MaximumSwathWidthInLineBufferChroma = 0; } else { - mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ / + mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 / (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, 1.0) - 2, 0.0)); } @@ -7231,10 +7321,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out /* Cursor Support Check */ mode_lib->ms.support.CursorSupport = true; for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (display_cfg->plane_descriptors[k].cursor.cursor_width > 0.0) { - if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) { + if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) { + if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) mode_lib->ms.support.CursorSupport = false; - } } } @@ -7295,7 +7384,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.ViewportExceedsSurface = false; if (!display_cfg->overrides.hw.surface_viewport_size_check_disable) { for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) { + if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || + display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) { mode_lib->ms.support.ViewportExceedsSurface = true; #if defined(__DML_VBA_DEBUG__) dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width); @@ -7304,11 +7394,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height); dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface); #endif - if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) { - if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width || - display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) { - mode_lib->ms.support.ViewportExceedsSurface = true; - } + } + if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) { + if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width || + display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) { + mode_lib->ms.support.ViewportExceedsSurface = true; } } } @@ -7466,6 +7556,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out &mode_lib->ms.OutputRate[k], &mode_lib->ms.RequiredSlots[k]); + if (s->OutputBpp[k] == 0.0) { + s->OutputBpp[k] = mode_lib->ms.OutputBpp[k]; + } + if (mode_lib->ms.RequiresDSC[k] == false) { mode_lib->ms.ODMMode[k] = s->ODMModeNoDSC; mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceNoDSC; @@ -7580,7 +7674,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width, - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height, + display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total, mode_lib->ip.writeback_line_buffer_buffer_size)); } @@ -7665,8 +7759,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && mode_lib->ip.ptoi_supported == true) mode_lib->ms.support.P2IWith420 = true; - if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary && s->OutputBpp[k] != 0) - mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = true; if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 && !mode_lib->ip.dsc422_native_support) mode_lib->ms.support.DSC422NativeNotSupported = true; @@ -7819,7 +7911,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.DSCDelay[k] = DSCDelayRequirement(mode_lib->ms.RequiresDSC[k], mode_lib->ms.ODMMode[k], mode_lib->ip.maximum_dsc_bits_per_component, - mode_lib->ms.OutputBpp[k], + s->OutputBpp[k], display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total, mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8059,59 +8151,63 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.excess_vactive_fill_bw_c); mode_lib->ms.UrgLatency = CalculateUrgentLatency( - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us, mode_lib->soc.do_urgent_latency_adjustment, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz, mode_lib->ms.FabricClock, mode_lib->ms.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->ms.TripToMemory = CalculateTripToMemory( mode_lib->ms.UrgLatency, mode_lib->ms.FabricClock, mode_lib->ms.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory); for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); bool cursor_not_enough_urgent_latency_hiding = 0; - calculate_cursor_req_attributes( - display_cfg->plane_descriptors[k].cursor.cursor_width, - display_cfg->plane_descriptors[k].cursor.cursor_bpp, - // output - &s->cursor_lines_per_chunk[k], - &s->cursor_bytes_per_line[k], - &s->cursor_bytes_per_chunk[k], - &s->cursor_bytes[k]); - - calculate_cursor_urgent_burst_factor( - mode_lib->ip.cursor_buffer_size, - display_cfg->plane_descriptors[k].cursor.cursor_width, - s->cursor_bytes_per_chunk[k], - s->cursor_lines_per_chunk[k], - line_time_us, - mode_lib->ms.UrgLatency, + if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) { + calculate_cursor_req_attributes( + display_cfg->plane_descriptors[k].cursor.cursor_width, + display_cfg->plane_descriptors[k].cursor.cursor_bpp, + + // output + &s->cursor_lines_per_chunk[k], + &s->cursor_bytes_per_line[k], + &s->cursor_bytes_per_chunk[k], + &s->cursor_bytes[k]); + + calculate_cursor_urgent_burst_factor( + mode_lib->ip.cursor_buffer_size, + display_cfg->plane_descriptors[k].cursor.cursor_width, + s->cursor_bytes_per_chunk[k], + s->cursor_lines_per_chunk[k], + line_time_us, + mode_lib->ms.UrgLatency, + + // output + &mode_lib->ms.UrgentBurstFactorCursor[k], + &cursor_not_enough_urgent_latency_hiding); + } - // output - &mode_lib->ms.UrgentBurstFactorCursor[k], - &cursor_not_enough_urgent_latency_hiding); mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k]; #ifdef __DML_VBA_DEBUG__ @@ -8254,20 +8350,20 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true; mode_lib->ms.support.avg_urgent_latency_us - = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0); + = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0); mode_lib->ms.support.avg_non_urgent_latency_us - = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0); + = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0); for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k] / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)); @@ -8287,7 +8383,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #endif } - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) { outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k] / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)); @@ -8460,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out { mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep; - calculate_hostvm_inefficiency_factor( &s->HostVMInefficiencyFactor, &s->HostVMInefficiencyFactorPrefetch, @@ -8501,10 +8596,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active]; + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) + s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); + CalculateExtraLatency( display_cfg, mode_lib->ip.rob_buffer_size_kbytes, - 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, s->ReorderingBytes, mode_lib->ms.DCFCLK, mode_lib->ms.FabricClock, @@ -8540,7 +8640,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.TWait[k] = CalculateTWait( display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns, mode_lib->ms.UrgLatency, - mode_lib->ms.TripToMemory); + mode_lib->ms.TripToMemory, + !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ? + get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0); myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k]; myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK; @@ -8587,7 +8689,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k]; - CalculatePrefetchSchedule_params->MaxVStartup = s->MaximumVStartup[k]; CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; @@ -8669,8 +8770,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]); dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]); dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]); - dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]); dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]); + dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]); } } @@ -8683,20 +8784,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.VRatioInPrefetchSupported = true; for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { - if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ || - mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) { + if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ || + mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) { mode_lib->ms.support.VRatioInPrefetchSupported = false; + dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__); + dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__); dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported); } } - s->AnyLinesForVMOrRowTooLarge = false; - for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { - if (mode_lib->ms.LinesForDPTERow[k] >= 16 || mode_lib->ms.LinesForVM[k] >= 32) { - s->AnyLinesForVMOrRowTooLarge = true; - } - } - // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok if (mode_lib->ms.support.PrefetchSupported) { for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { @@ -8845,6 +8941,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.dpte_row_height_chroma[k], mode_lib->ms.use_one_row_for_frame_flip[k], mode_lib->ip.max_flip_time_us, + mode_lib->ip.max_flip_time_lines, s->per_pipe_flip_bytes[k], mode_lib->ms.meta_row_bytes[k], s->meta_row_height_luma[k], @@ -8932,6 +9029,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out s->mSOCParameters.USRRetrainingLatency = 0; s->mSOCParameters.SMNLatency = 0; s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index); + s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index); + s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock; + s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type; CalculateWatermarks_params->display_cfg = display_cfg; CalculateWatermarks_params->USRRetrainingRequired = false; @@ -8951,7 +9051,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC; CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY; CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC; - //CalculateWatermarks_params->LBBitPerPixel = 57; // FIXME_STAGE2, need a new ip param? CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY; CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC; CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP; @@ -8979,29 +9078,24 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs; CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params); - } + calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]); + } + dml2_printf("DML::%s: Done prefetch calculation\n", __func__); // End of Prefetch Check - dml2_printf("DML::%s: Done prefetch calculation\n", __func__); + mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us; //Re-ordering Buffer Support Check - mode_lib->ms.support.max_urgent_latency_us - = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock - + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0); - - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 - / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) { + / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= s->mSOCParameters.max_urgent_latency_us) { mode_lib->ms.support.ROBSupport = true; } else { mode_lib->ms.support.ROBSupport = false; } } else { - if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) { + if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) { mode_lib->ms.support.ROBSupport = true; } else { mode_lib->ms.support.ROBSupport = false; @@ -9024,15 +9118,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.dram_change_vactive_det_fill_delay_us); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us); + dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us); dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport); #endif /*Mode Support, Voltage State and SOC Configuration*/ { - // s->dram_clock_change_support = 1; - // s->f_clock_change_support = 1; - if (mode_lib->ms.support.ScaleRatioAndTapsSupport && mode_lib->ms.support.SourceFormatPixelAndScanSupport && mode_lib->ms.support.ViewportSizeSupport @@ -9043,9 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out && !mode_lib->ms.support.ExceededMultistreamSlots && !mode_lib->ms.support.MSOOrODMSplitWithNonDPLink && !mode_lib->ms.support.NotEnoughLanesForMSO - //&& mode_lib->ms.support.LinkCapacitySupport == true // FIXME_STAGE2 && !mode_lib->ms.support.P2IWith420 - && !mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP && !mode_lib->ms.support.DSC422NativeNotSupported && mode_lib->ms.support.DSCSlicesODMModeSupported && !mode_lib->ms.support.NotEnoughDSCUnits @@ -9113,7 +9202,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #if defined(__DML_VBA_DEBUG__) if (!mode_lib->ms.support.ModeSupport) - dml2_print_dml_mode_support_info(&mode_lib->ms.support, true); + dml2_print_mode_support_info(&mode_lib->ms.support, true); dml2_printf("DML::%s: --- DONE --- \n", __func__); #endif @@ -9132,6 +9221,10 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support *in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support; dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index); + + for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++) + dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); + dml2_printf("DML::%s: ------------- DONE ----------\n", __func__); return result; @@ -9373,11 +9466,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE } else { dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0)); } -#ifdef DML_VM_PTE_ADL_PATCH_EN if (dpte_groups_per_row_luma_ub <= 2) { dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1; } -#endif dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]); dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]); @@ -9406,11 +9497,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE } else { dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0)); } -#ifdef DML_VM_PTE_ADL_PATCH_EN if (dpte_groups_per_row_chroma_ub <= 2) { dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1; } -#endif dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma); dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub); @@ -9535,17 +9624,16 @@ static void CalculateVMGroupAndRequestTimes( line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz; -#ifdef DML_VM_PTE_ADL_PATCH_EN - if (num_group_per_lower_vm_stage_flip <= 2) { - num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1; - } + if (num_group_per_lower_vm_stage_pref > 0) + TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref; + else + TimePerVMGroupVBlank[k] = 0; + + if (num_group_per_lower_vm_stage_flip > 0) + TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip; + else + TimePerVMGroupFlip[k] = 0; - if (num_group_per_lower_vm_stage_pref <= 2) { - num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1; - } -#endif - TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref; - TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip; if (num_req_per_lower_vm_stage_pref > 0) TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref; else @@ -9599,10 +9687,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc bool FoundCriticalSurface = false; double LastZ8StutterPeriod = 0; - unsigned int SwathSizeCriticalSurface; - unsigned int LastChunkOfSwathSize; - unsigned int MissingPartOfLastSwathOfDETSize; - memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals)); for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { @@ -9777,7 +9861,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / (p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) + (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) - / math_max2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) + + / math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) + *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)); @@ -9871,19 +9955,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame); #endif - SwathSizeCriticalSurface = (unsigned int)(l->BytePerPixelYCriticalSurface * l->SwathHeightYCriticalSurface * math_ceil2(l->SwathWidthYCriticalSurface, l->BlockWidth256BytesYCriticalSurface)); - LastChunkOfSwathSize = SwathSizeCriticalSurface % (p->PixelChunkSizeInKByte * 1024); - MissingPartOfLastSwathOfDETSize = (unsigned int)(math_ceil2(l->DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface) - l->DETBufferSizeYCriticalSurface); - - *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface && (LastChunkOfSwathSize > 0) && - (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0) && (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize)); + *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: SwathSizeCriticalSurface = %u\n", __func__, SwathSizeCriticalSurface); dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface); dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte); - dml2_printf("DML::%s: LastChunkOfSwathSize = %u\n", __func__, LastChunkOfSwathSize); - dml2_printf("DML::%s: MissingPartOfLastSwathOfDETSize = %u\n", __func__, MissingPartOfLastSwathOfDETSize); dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE); #endif } @@ -9928,14 +10004,14 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info); dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane); - mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0; - mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0; - mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); - mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0; - mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0; - s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000; - mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params); - mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table); + mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0; + mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0; + mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); + mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0; + mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0; + s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000; + mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); + mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table); for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; @@ -9970,18 +10046,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex for (k = 0; k < s->num_active_planes; ++k) { mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used; - mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0; + mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0; dml2_assert(mode_lib->mp.Dppclk[k] > 0); } for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; - mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0; + mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0; dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]); } - mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0; - mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0; + mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0; + mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0; dml2_assert(mode_lib->mp.Dcfclk > 0); dml2_assert(mode_lib->mp.FabricClock > 0); @@ -10462,11 +10538,16 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params); } + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) + s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); + CalculateExtraLatency( display_cfg, mode_lib->ip.rob_buffer_size_kbytes, - 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles, - s->ReorderBytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, + s->ReorderingBytes, mode_lib->mp.Dcfclk, mode_lib->mp.FabricClock, mode_lib->ip.pixel_chunk_size_kbytes, @@ -10551,32 +10632,32 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.excess_vactive_fill_bw_c); mode_lib->mp.UrgentLatency = CalculateUrgentLatency( - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us, mode_lib->soc.do_urgent_latency_adjustment, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz, mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->mp.TripToMemory = CalculateTripToMemory( mode_lib->mp.UrgentLatency, mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory); @@ -10585,38 +10666,40 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); for (k = 0; k < s->num_active_planes; ++k) { bool cursor_not_enough_urgent_latency_hiding = 0; - double line_time_us; + double line_time_us = 0.0; - calculate_cursor_req_attributes( - display_cfg->plane_descriptors[k].cursor.cursor_width, - display_cfg->plane_descriptors[k].cursor.cursor_bpp, + line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / + ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); + if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) { + calculate_cursor_req_attributes( + display_cfg->plane_descriptors[k].cursor.cursor_width, + display_cfg->plane_descriptors[k].cursor.cursor_bpp, - // output - &s->cursor_lines_per_chunk[k], - &s->cursor_bytes_per_line[k], - &s->cursor_bytes_per_chunk[k], - &s->cursor_bytes[k]); - - line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); - - calculate_cursor_urgent_burst_factor( - mode_lib->ip.cursor_buffer_size, - display_cfg->plane_descriptors[k].cursor.cursor_width, - s->cursor_bytes_per_chunk[k], - s->cursor_lines_per_chunk[k], - line_time_us, - mode_lib->mp.UrgentLatency, + // output + &s->cursor_lines_per_chunk[k], + &s->cursor_bytes_per_line[k], + &s->cursor_bytes_per_chunk[k], + &s->cursor_bytes[k]); + + calculate_cursor_urgent_burst_factor( + mode_lib->ip.cursor_buffer_size, + display_cfg->plane_descriptors[k].cursor.cursor_width, + s->cursor_bytes_per_chunk[k], + s->cursor_lines_per_chunk[k], + line_time_us, + mode_lib->mp.UrgentLatency, - // output - &mode_lib->mp.UrgentBurstFactorCursor[k], - &cursor_not_enough_urgent_latency_hiding); + // output + &mode_lib->mp.UrgentBurstFactorCursor[k], + &cursor_not_enough_urgent_latency_hiding); + } mode_lib->mp.UrgentBurstFactorCursorPre[k] = mode_lib->mp.UrgentBurstFactorCursor[k]; CalculateUrgentBurstFactor( @@ -10676,7 +10759,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.TWait[k] = CalculateTWait( display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns, mode_lib->mp.UrgentLatency, - mode_lib->mp.TripToMemory); + mode_lib->mp.TripToMemory, + !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ? + get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0); myPipe->Dppclk = mode_lib->mp.Dppclk[k]; myPipe->Dispclk = mode_lib->mp.Dispclk; @@ -10722,7 +10807,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k]; - CalculatePrefetchSchedule_params->MaxVStartup = s->MaxVStartupLines[k]; CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; @@ -10808,9 +10892,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex if (mode_lib->mp.dst_y_prefetch[k] < 2) s->DestinationLineTimesForPrefetchLessThan2 = true; - if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ || - mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) + if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ || + mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) { s->VRatioPrefetchMoreThanMax = true; + dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__); + dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__); + dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax); + } if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) { dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]); @@ -10994,6 +11082,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.dpte_row_height_chroma[k], mode_lib->mp.use_one_row_for_frame_flip[k], mode_lib->ip.max_flip_time_us, + mode_lib->ip.max_flip_time_lines, s->per_pipe_flip_bytes[k], mode_lib->mp.meta_row_bytes[k], mode_lib->mp.meta_row_height[k], @@ -11143,6 +11232,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex s->mmSOCParameters.USRRetrainingLatency = 0; s->mmSOCParameters.SMNLatency = 0; s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index); + s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index); + s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock; + s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type; CalculateWatermarks_params->display_cfg = display_cfg; CalculateWatermarks_params->USRRetrainingRequired = false; @@ -11162,7 +11254,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculateWatermarks_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC; CalculateWatermarks_params->SwathHeightY = mode_lib->mp.SwathHeightY; CalculateWatermarks_params->SwathHeightC = mode_lib->mp.SwathHeightC; - //CalculateWatermarks_params->LBBitPerPixel = 57; //FIXME_STAGE2 CalculateWatermarks_params->SwathWidthY = mode_lib->mp.SwathWidthY; CalculateWatermarks_params->SwathWidthC = mode_lib->mp.SwathWidthC; CalculateWatermarks_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY; @@ -11203,6 +11294,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex } } + calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines); + dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index); dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz)); @@ -11491,9 +11584,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params) { + dml2_printf("DML::%s: ------------- START ----------\n", __func__); bool result = dml_core_mode_programming(in_out_params); - dml2_printf("DML::%s: ------------- START ----------\n", __func__); dml2_printf("DML::%s: result = %0d\n", __func__, result); dml2_printf("DML::%s: ------------- DONE ----------\n", __func__); return result; @@ -12186,10 +12279,11 @@ void dml2_core_calcs_get_pipe_regs(const struct dml2_display_cfg *display_cfg, void dml2_core_calcs_get_global_sync_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, union dml2_global_sync_programming *out, int pipe_index) { - out->dcn4.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index); - out->dcn4.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index); - out->dcn4.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index); - out->dcn4.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index); + out->dcn4x.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index); + out->dcn4x.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index); + out->dcn4x.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index); + out->dcn4x.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index); + out->dcn4x.pstate_keepout_start_lines = dml_get_pstate_keepout_dst_lines(mode_lib, pipe_index); } void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index) @@ -12197,6 +12291,25 @@ void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_disp dml2_core_calcs_get_global_sync_programming(mode_lib, &out->global_sync, pipe_index); } +void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, + const struct display_configuation_with_meta *display_cfg, + struct dmub_cmd_fams2_global_config *fams2_global_config) +{ + fams2_global_config->features.bits.enable = display_cfg->stage3.fams2_required; + + if (fams2_global_config->features.bits.enable) { + fams2_global_config->features.bits.enable_stall_recovery = true; + fams2_global_config->features.bits.allow_delay_check_mode = FAMS2_ALLOW_DELAY_CHECK_FROM_START; + + fams2_global_config->max_allow_delay_us = mode_lib->ip_caps.fams2.max_allow_delay_us; + fams2_global_config->lock_wait_time_us = mode_lib->ip_caps.fams2.lock_timeout_us; + fams2_global_config->recovery_timeout_us = mode_lib->ip_caps.fams2.recovery_timeout_us; + fams2_global_config->hwfq_flip_programming_delay_us = mode_lib->ip_caps.fams2.flip_programming_delay_us; + + fams2_global_config->num_streams = display_cfg->display_config.num_streams; + } +} + void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, @@ -12209,6 +12322,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna unsigned int i; + if (display_cfg->display_config.overrides.all_streams_blanked) { + /* stream is blanked, so do nothing */ + return; + } + /* from display configuration */ fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total; fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total; @@ -12368,6 +12486,7 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp { double phantom_processing_delay_pix; unsigned int phantom_processing_delay_lines; + unsigned int phantom_min_v_active_lines; unsigned int phantom_v_active_lines; unsigned int phantom_v_startup_lines; unsigned int phantom_v_blank_lines; @@ -12377,14 +12496,16 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp phantom_processing_delay_pix = (double)((mode_lib->ip.subvp_fw_processing_delay_us + mode_lib->ip.subvp_pstate_allow_width_us) * ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.pixel_clock_khz / 1000)); phantom_processing_delay_lines = (unsigned int)(phantom_processing_delay_pix / (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total); - dml2_core_shared_div_rem(phantom_processing_delay_pix, + dml2_core_div_rem(phantom_processing_delay_pix, display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total, &rem); if (rem) phantom_processing_delay_lines++; phantom_v_startup_lines = dml_get_plane_max_vstartup_lines(mode_lib, plane_index); - phantom_v_active_lines = phantom_processing_delay_lines + dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) + mode_lib->ip.subvp_swath_height_margin_lines; + phantom_min_v_active_lines = (unsigned int)math_ceil((double)dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) / + display_cfg->plane_descriptors[plane_index].composition.scaler_info.plane0.v_ratio); + phantom_v_active_lines = phantom_processing_delay_lines + phantom_min_v_active_lines + mode_lib->ip.subvp_swath_height_margin_lines; // phantom_vblank = max(vbp(vstartup) + vactive + vfp(always 1) + vsync(can be 1), main_vblank) phantom_v_blank_lines = phantom_v_startup_lines + 1 + 1; @@ -12396,8 +12517,8 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp // phantom_vtotal = vactive + vblank out->phantom_v_total = phantom_v_active_lines + phantom_v_blank_lines; - out->phantom_min_v_active = dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index); - out->phantom_v_startup = dml_get_plane_max_vstartup_lines(mode_lib, plane_index); + out->phantom_min_v_active = phantom_min_v_active_lines; + out->phantom_v_startup = phantom_v_startup_lines; out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000; #if defined(__DML_VBA_DEBUG__) @@ -12418,7 +12539,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.ScaleRatioAndTapsSupport = mode_lib->ms.support.ScaleRatioAndTapsSupport; out->informative.mode_support_info.SourceFormatPixelAndScanSupport = mode_lib->ms.support.SourceFormatPixelAndScanSupport; out->informative.mode_support_info.P2IWith420 = mode_lib->ms.support.P2IWith420; - out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP; + out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = false; out->informative.mode_support_info.DSC422NativeNotSupported = mode_lib->ms.support.DSC422NativeNotSupported; out->informative.mode_support_info.LinkRateDoesNotMatchDPVersion = mode_lib->ms.support.LinkRateDoesNotMatchDPVersion; out->informative.mode_support_info.LinkRateForMultistreamNotIndicated = mode_lib->ms.support.LinkRateForMultistreamNotIndicated; @@ -12611,7 +12732,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.misc.cstate_max_cap_mode = dml_get_cstate_max_cap_mode(mode_lib); - out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib); + out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib); out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib); @@ -12724,13 +12845,13 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod } } - out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles - / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock - + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0); + out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles + / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock + + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0); - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) { out->informative.misc.ROBUrgencyAvoidance = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h index b280ab573fbb29..df2d1550a14b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_CORE_DCN4_CALCS_H__ #define __DML2_CORE_DCN4_CALCS_H__ @@ -30,6 +29,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index); void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index); void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index); +void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config); void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes); void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c index f56abe9ab9196b..28394de028855e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_core_factory.h" #include "dml2_core_dcn4.h" #include "dml2_external_lib_deps.h" @@ -11,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance { bool result = false; - if (!out) + if (out == 0) return false; memset(out, 0, sizeof(struct dml2_core_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h index 53636a8f52aa9d..411c514fe65c7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_CORE_FACTORY_H__ #define __DML2_CORE_FACTORY_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c index 81f0a6f19f87b7..8f3c1c0b1cc103 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c @@ -779,7 +779,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000; mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000); - mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params); + mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table); #if defined(__DML_VBA_DEBUG__) @@ -1776,32 +1776,32 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou #endif mode_lib->ms.UrgLatency = CalculateUrgentLatency( - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us, mode_lib->soc.do_urgent_latency_adjustment, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz, mode_lib->ms.FabricClock, mode_lib->ms.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->ms.TripToMemory = CalculateTripToMemory( mode_lib->ms.UrgLatency, mode_lib->ms.FabricClock, mode_lib->ms.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory); @@ -1995,21 +1995,21 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true; mode_lib->ms.support.avg_urgent_latency_us - = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0); + = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0); mode_lib->ms.support.avg_non_urgent_latency_us - = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0); + = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock) + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0); double outstanding_latency_us = 0; for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k] / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)); @@ -2029,7 +2029,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou #endif } - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) { outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k] / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)); @@ -2242,11 +2242,15 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou } double min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active]; + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) + s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); CalculateExtraLatency( display_cfg, mode_lib->ip.rob_buffer_size_kbytes, - 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, s->ReorderingBytes, mode_lib->ms.DCFCLK, mode_lib->ms.FabricClock, @@ -2713,13 +2717,13 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou //Re-ordering Buffer Support Check mode_lib->ms.support.max_urgent_latency_us - = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock - + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0); + = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock + + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0); - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) { mode_lib->ms.support.ROBSupport = true; @@ -2727,7 +2731,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou mode_lib->ms.support.ROBSupport = false; } } else { - if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) { + if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) { mode_lib->ms.support.ROBSupport = true; } else { mode_lib->ms.support.ROBSupport = false; @@ -5050,7 +5054,7 @@ static void calculate_mcache_row_bytes( unsigned int meta_per_mvmpg_per_channel_ub = 0; if (p->gpuvm_enable) { - meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans; + meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans; //but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic if (p->surf_vert && vmpg_bytes > blk_bytes) { @@ -5059,7 +5063,7 @@ static void calculate_mcache_row_bytes( *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom } else { - meta_per_mvmpg_per_channel = (float)blk_bytes / 256 / p->num_chans; + meta_per_mvmpg_per_channel = (float)blk_bytes / (float)256 / p->num_chans; if (!p->surf_vert) *p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0; @@ -5881,7 +5885,7 @@ static double CalculateUrgentLatency( double fabric_max_transport_latency_margin) { double urgent_latency = 0; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0) + urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0); @@ -5892,7 +5896,7 @@ static double CalculateUrgentLatency( } } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles); dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); @@ -5922,7 +5926,7 @@ static double CalculateTripToMemory( double fabric_max_transport_latency_margin) { double trip_to_memory_us; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0) + trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0); @@ -5931,7 +5935,7 @@ static double CalculateTripToMemory( } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles); dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles); @@ -5961,7 +5965,7 @@ static double CalculateMetaTripToMemory( double fabric_max_transport_latency_margin) { double meta_trip_to_memory_us; - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0) + meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0); } else { @@ -5969,7 +5973,7 @@ static double CalculateMetaTripToMemory( } #ifdef __DML_VBA_DEBUG__ - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles); dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles); @@ -6460,8 +6464,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2; l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2; l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2; - p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;; - p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;; + p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64; + p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64; } if (p->SwathHeightC[k] == 0) @@ -7165,7 +7169,7 @@ static void calculate_tdlut_setting( *p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width; //the delivery cycles is DispClk cycles per line * number of lines * number of slices tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width / 2.0, 1) * tdlut_mpc_width * tdlut_mpc_width; - tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0; + tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1); } else { //tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements *p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256); @@ -7485,7 +7489,7 @@ static void CalculateExtraLatency( max_request_size_bytes = request_size_bytes_chroma[k]; } - if (qos_type == dml2_qos_param_type_dcn4) { + if (qos_type == dml2_qos_param_type_dcn4x) { *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK; *ExtraLatency = *ExtraLatency_sr; if (max_oustanding_when_urgent_expected) @@ -7501,11 +7505,14 @@ static void CalculateExtraLatency( #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type); + dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode); + dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips); dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected); dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock); dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK); dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW); dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles); + dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes); dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb); dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency); dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr); @@ -7739,7 +7746,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime); s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC; - s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor; s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor; s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw); @@ -9304,6 +9310,10 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0)); } + if (dpte_groups_per_row_luma_ub <= 2) { + dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1; + } + dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]); dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]); @@ -9332,6 +9342,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE } else { dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0)); } + if (dpte_groups_per_row_chroma_ub <= 2) { + dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1; + } dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma); dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub); @@ -9386,8 +9399,8 @@ static void CalculateVMGroupAndRequestTimes( double TimePerVMRequestVBlank[], double TimePerVMRequestFlip[]) { - unsigned int num_group_per_lower_vm_stage = 0; - unsigned int num_req_per_lower_vm_stage = 0; + unsigned int num_group_per_lower_vm_stage = 1; + unsigned int num_req_per_lower_vm_stage = 1; #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); @@ -9451,6 +9464,14 @@ static void CalculateVMGroupAndRequestTimes( double line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz; + if (num_group_per_lower_vm_stage_flip <= 2) { + num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1; + } + + if (num_group_per_lower_vm_stage_pref <= 2) { + num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1; + } + TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref; TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip; TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref; @@ -9814,14 +9835,14 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info); dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane); - mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0; - mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0; - mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); - mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0; - mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0; - s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000; - mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params); - mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table); + mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0; + mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0; + mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); + mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0; + mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0; + s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000; + mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); + mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table); for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; @@ -9856,18 +9877,18 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e for (k = 0; k < s->num_active_planes; ++k) { mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used; - mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0; + mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0; dml2_assert(mode_lib->mp.Dppclk[k] > 0); } for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; - mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0; + mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0; dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]); } - mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0; - mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0; + mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0; + mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0; dml2_assert(mode_lib->mp.Dcfclk > 0); dml2_assert(mode_lib->mp.FabricClock > 0); @@ -10388,11 +10409,16 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params); } + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) + s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); + CalculateExtraLatency( display_cfg, mode_lib->ip.rob_buffer_size_kbytes, - 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles, - s->ReorderBytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, + s->ReorderingBytes, mode_lib->mp.Dcfclk, mode_lib->mp.FabricClock, mode_lib->ip.pixel_chunk_size_kbytes, @@ -10465,32 +10491,32 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e mode_lib->mp.WritebackDelay[k] = mode_lib->mp.WritebackDelay[j]; mode_lib->mp.UrgentLatency = CalculateUrgentLatency( - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us, mode_lib->soc.do_urgent_latency_adjustment, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us, - mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz, mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->mp.TripToMemory = CalculateTripToMemory( mode_lib->mp.UrgentLatency, mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory); @@ -10499,10 +10525,10 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e mode_lib->mp.FabricClock, mode_lib->mp.uclk_freq_mhz, mode_lib->soc.qos_parameters.qos_type, - mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles, - mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin, - mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin); + mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles, + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin, + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); for (k = 0; k < s->num_active_planes; ++k) { calculate_cursor_req_attributes( @@ -11945,14 +11971,14 @@ void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg, void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index) { - // out->min_clocks.dcn4.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2 - // out->min_clocks.dcn4.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); - // out->min_clocks.dcn4.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); - - out->global_sync.dcn4.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]]; - out->global_sync.dcn4.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]]; - out->global_sync.dcn4.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]]; - out->global_sync.dcn4.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]]; + // out->min_clocks.dcn4x.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2 + // out->min_clocks.dcn4x.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); + // out->min_clocks.dcn4x.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); + + out->global_sync.dcn4x.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]]; + out->global_sync.dcn4x.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]]; + out->global_sync.dcn4x.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]]; + out->global_sync.dcn4x.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]]; } void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx) @@ -12255,7 +12281,7 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo out->informative.misc.cstate_max_cap_mode = mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE; - out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0); + out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0); out->informative.qos.max_active_fclk_change_latency_supported = mode_lib->mp.MaxActiveFCLKChangeLatencySupported; @@ -12368,13 +12394,13 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo } } - out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles - / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0) - + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock - + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock - * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0); + out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles + / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0) + + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock + + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock + * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0); - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) { + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) { if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) { out->informative.misc.ROBUrgencyAvoidance = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h deleted file mode 100644 index d76bda907ec8f2..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - - -#ifndef __DML2_CORE_SHARED_H__ -#define __DML2_CORE_SHARED_H__ - -#define __DML_VBA_DEBUG__ -#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 // 0); + return dividend / divisor; + +} + +const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type) +{ + switch (bw_type) { + case (dml2_core_internal_bw_sdp): + return("dml2_core_internal_bw_sdp"); + case (dml2_core_internal_bw_dram): + return("dml2_core_internal_bw_dram"); + case (dml2_core_internal_bw_max): + return("dml2_core_internal_bw_max"); + default: + return("dml2_core_internal_bw_unknown"); + } +} + +bool dml2_core_utils_is_420(enum dml2_source_format_class source_format) +{ + bool val = false; + + switch (source_format) { + case dml2_444_8: + val = 0; + break; + case dml2_444_16: + val = 0; + break; + case dml2_444_32: + val = 0; + break; + case dml2_444_64: + val = 0; + break; + case dml2_420_8: + val = 1; + break; + case dml2_420_10: + val = 1; + break; + case dml2_420_12: + val = 1; + break; + case dml2_rgbe_alpha: + val = 0; + break; + case dml2_rgbe: + val = 0; + break; + case dml2_mono_8: + val = 0; + break; + case dml2_mono_16: + val = 0; + break; + default: + DML2_ASSERT(0); + break; + } + return val; +} + +void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only) +{ + dml2_printf("DML: ===================================== \n"); + dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n"); + if (!fail_only || support->ScaleRatioAndTapsSupport == 0) + dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); + if (!fail_only || support->SourceFormatPixelAndScanSupport == 0) + dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); + if (!fail_only || support->ViewportSizeSupport == 0) + dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); + if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1) + dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); + if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1) + dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated); + if (!fail_only || support->BPPForMultistreamNotIndicated == 1) + dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); + if (!fail_only || support->MultistreamWithHDMIOreDP == 1) + dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); + if (!fail_only || support->ExceededMultistreamSlots == 1) + dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); + if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1) + dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); + if (!fail_only || support->NotEnoughLanesForMSO == 1) + dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); + if (!fail_only || support->P2IWith420 == 1) + dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420); + if (!fail_only || support->DSC422NativeNotSupported == 1) + dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); + if (!fail_only || support->DSCSlicesODMModeSupported == 0) + dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); + if (!fail_only || support->NotEnoughDSCUnits == 1) + dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); + if (!fail_only || support->NotEnoughDSCSlices == 1) + dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); + if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1) + dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); + if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1) + dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); + if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1) + dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); + if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0) + dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); + if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1) + dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); + if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1) + dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); + if (!fail_only || support->ROBSupport == 0) + dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport); + if (!fail_only || support->OutstandingRequestsSupport == 0) + dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); + if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0) + dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); + if (!fail_only || support->DISPCLK_DPPCLK_Support == 0) + dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); + if (!fail_only || support->TotalAvailablePipesSupport == 0) + dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); + if (!fail_only || support->NumberOfOTGSupport == 0) + dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); + if (!fail_only || support->NumberOfHDMIFRLSupport == 0) + dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); + if (!fail_only || support->NumberOfDP2p0Support == 0) + dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); + if (!fail_only || support->EnoughWritebackUnits == 0) + dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); + if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0) + dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); + if (!fail_only || support->WritebackLatencySupport == 0) + dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); + if (!fail_only || support->CursorSupport == 0) + dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport); + if (!fail_only || support->PitchSupport == 0) + dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport); + if (!fail_only || support->ViewportExceedsSurface == 1) + dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); + if (!fail_only || support->PrefetchSupported == 0) + dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); + if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0) + dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); + if (!fail_only || support->AvgBandwidthSupport == 0) + dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); + if (!fail_only || support->DynamicMetadataSupported == 0) + dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); + if (!fail_only || support->VRatioInPrefetchSupported == 0) + dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); + if (!fail_only || support->PTEBufferSizeNotExceeded == 1) + dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); + if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1) + dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); + if (!fail_only || support->ExceededMALLSize == 1) + dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); + if (!fail_only || support->g6_temp_read_support == 0) + dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + if (!fail_only || support->ImmediateFlipSupport == 0) + dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); + if (!fail_only || support->LinkCapacitySupport == 0) + dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); + + if (!fail_only || support->ModeSupport == 0) + dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport); + dml2_printf("DML: ===================================== \n"); +} + +const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type) +{ + switch (dml2_core_internal_soc_state_type) { + case (dml2_core_internal_soc_state_sys_idle): + return("dml2_core_internal_soc_state_sys_idle"); + case (dml2_core_internal_soc_state_sys_active): + return("dml2_core_internal_soc_state_sys_active"); + case (dml2_core_internal_soc_state_svp_prefetch): + return("dml2_core_internal_soc_state_svp_prefetch"); + case dml2_core_internal_soc_state_max: + default: + return("dml2_core_internal_soc_state_unknown"); + } +} + + +void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg) +{ + for (unsigned int k = 0; k < display_cfg->num_planes; k++) { + double bpc = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.bpc; + if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_disable) { + switch (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format) { + case dml2_444: + out_bpp[k] = bpc * 3; + break; + case dml2_s422: + out_bpp[k] = bpc * 2; + break; + case dml2_n422: + out_bpp[k] = bpc * 2; + break; + case dml2_420: + default: + out_bpp[k] = bpc * 1.5; + break; + } + } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable) { + out_bpp[k] = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.dsc_compressed_bpp_x16 / 16; + } else { + out_bpp[k] = 0; + } +#ifdef __DML_VBA_DEBUG__ + dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc); + dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable); + dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]); +#endif + } +} + +unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up) +{ + unsigned int remainder; + + if (multiple == 0) + return num; + + remainder = num % multiple; + if (remainder == 0) + return num; + + if (up) + return (num + multiple - remainder); + else + return (num - remainder); +} + +unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info) +{ + unsigned int num_active_pipes = 0; + + for (unsigned int k = 0; k < num_planes; k++) { + num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used; + } + +#ifdef __DML_VBA_DEBUG__ + dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes); +#endif + return num_active_pipes; +} + +void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane) +{ + unsigned int pipe_idx = 0; + + for (unsigned int k = 0; k < DML2_MAX_PLANES; ++k) { + pipe_plane[k] = __DML2_CALCS_PIPE_NO_PLANE__; + } + + for (unsigned int plane_idx = 0; plane_idx < DML2_MAX_PLANES; plane_idx++) { + for (int i = 0; i < cfg_support_info->plane_support_info[plane_idx].dpps_used; i++) { + pipe_plane[pipe_idx] = plane_idx; + pipe_idx++; + } + } +} + +bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg) +{ + bool is_phantom = false; + + if (plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe || + plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) { + is_phantom = true; + } + + return is_phantom; +} + +unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode) +{ + switch (sw_mode) { + case (dml2_sw_linear): + return 256; break; + case (dml2_sw_256b_2d): + return 256; break; + case (dml2_sw_4kb_2d): + return 4096; break; + case (dml2_sw_64kb_2d): + return 65536; break; + case (dml2_sw_256kb_2d): + return 262144; break; + case (dml2_gfx11_sw_linear): + return 256; break; + case (dml2_gfx11_sw_64kb_d): + return 65536; break; + case (dml2_gfx11_sw_64kb_d_t): + return 65536; break; + case (dml2_gfx11_sw_64kb_d_x): + return 65536; break; + case (dml2_gfx11_sw_64kb_r_x): + return 65536; break; + case (dml2_gfx11_sw_256kb_d_x): + return 262144; break; + case (dml2_gfx11_sw_256kb_r_x): + return 262144; break; + default: + DML2_ASSERT(0); + return 256; + }; +} + + +bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan) +{ + bool is_vert = false; + if (Scan == dml2_rotation_90 || Scan == dml2_rotation_270) { + is_vert = true; + } else { + is_vert = false; + } + return is_vert; +} + + +int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode) +{ + int unsigned version = 0; + + if (sw_mode == dml2_sw_linear || + sw_mode == dml2_sw_256b_2d || + sw_mode == dml2_sw_4kb_2d || + sw_mode == dml2_sw_64kb_2d || + sw_mode == dml2_sw_256kb_2d) { + version = 12; + } else if (sw_mode == dml2_gfx11_sw_linear || + sw_mode == dml2_gfx11_sw_64kb_d || + sw_mode == dml2_gfx11_sw_64kb_d_t || + sw_mode == dml2_gfx11_sw_64kb_d_x || + sw_mode == dml2_gfx11_sw_64kb_r_x || + sw_mode == dml2_gfx11_sw_256kb_d_x || + sw_mode == dml2_gfx11_sw_256kb_r_x) { + version = 11; + } else { + dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); + DML2_ASSERT(0); + } + + return version; +} + +unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params) +{ + unsigned int i; + unsigned int index = 0; + + for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { + dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz); + + if (i == 0) + index = 0; + else + index = i - 1; + + if (uclk_freq_khz < per_uclk_dpm_params[i].minimum_uclk_khz || + per_uclk_dpm_params[i].minimum_uclk_khz == 0) { + break; + } + } +#if defined(__DML_VBA_DEBUG__) + dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz); + dml2_printf("DML::%s: index = %d\n", __func__, index); +#endif + return index; +} + +unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table) +{ + unsigned int i; + bool clk_entry_found = 0; + + for (i = 0; i < clk_table->uclk.num_clk_values; i++) { + dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]); + + if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) { + clk_entry_found = 1; + break; + } + } + + dml2_assert(clk_entry_found); +#if defined(__DML_VBA_DEBUG__) + dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); + dml2_printf("DML::%s: index = %d\n", __func__, i); +#endif + return i; +} + +bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format) +{ + bool ret_val = 0; + + if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha)) + ret_val = 1; + + return ret_val; +} + +unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend) +{ + if (a == 0) + return 0; + + return (math_log2_approx(a) - subtrahend); +} + +static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main, + const struct dml2_implicit_svp_meta *meta) +{ + memcpy(phantom, main, sizeof(struct dml2_stream_parameters)); + + phantom->timing.v_total = meta->v_total; + phantom->timing.v_active = meta->v_active; + phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; + phantom->timing.drr_config.enabled = false; +} + +static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main, + const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream) +{ + memcpy(phantom, main, sizeof(struct dml2_plane_parameters)); + + phantom->stream_index = phantom_stream_index; + phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable; + phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return; + phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2( + (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0), + (double)main->composition.viewport.plane0.height); + phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2( + (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0), + (double)main->composition.viewport.plane1.height); + phantom->immediate_flip = false; + phantom->dynamic_meta_data.enable = false; + phantom->cursor.num_cursors = 0; + phantom->cursor.cursor_width = 0; + phantom->tdlut.setup_for_tdlut = false; +} + +void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg, + struct dml2_core_scratch *scratch) +{ + unsigned int stream_index, plane_index; + const struct dml2_plane_parameters *main_plane; + const struct dml2_stream_parameters *main_stream; + const struct dml2_stream_parameters *phantom_stream; + + memcpy(svp_expanded_display_cfg, &display_cfg->display_config, sizeof(struct dml2_display_cfg)); + memset(scratch->main_stream_index_from_svp_stream_index, 0, sizeof(int) * DML2_MAX_PLANES); + memset(scratch->svp_stream_index_from_main_stream_index, 0, sizeof(int) * DML2_MAX_PLANES); + memset(scratch->main_plane_index_to_phantom_plane_index, 0, sizeof(int) * DML2_MAX_PLANES); + + if (!display_cfg->display_config.overrides.enable_subvp_implicit_pmo) + return; + + /* disable unbounded requesting for all planes until stage 3 has been performed */ + if (!display_cfg->stage3.performed) { + svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.enable = true; + svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.value = false; + } + // Create the phantom streams + for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { + main_stream = &display_cfg->display_config.stream_descriptors[stream_index]; + scratch->main_stream_index_from_svp_stream_index[stream_index] = stream_index; + scratch->svp_stream_index_from_main_stream_index[stream_index] = stream_index; + + if (display_cfg->stage3.stream_svp_meta[stream_index].valid) { + // Create the phantom stream + create_phantom_stream_from_main_stream(&svp_expanded_display_cfg->stream_descriptors[svp_expanded_display_cfg->num_streams], + main_stream, &display_cfg->stage3.stream_svp_meta[stream_index]); + + // Associate this phantom stream to the main stream + scratch->main_stream_index_from_svp_stream_index[svp_expanded_display_cfg->num_streams] = stream_index; + scratch->svp_stream_index_from_main_stream_index[stream_index] = svp_expanded_display_cfg->num_streams; + + // Increment num streams + svp_expanded_display_cfg->num_streams++; + } + } + + // Create the phantom planes + for (plane_index = 0; plane_index < display_cfg->display_config.num_planes; plane_index++) { + main_plane = &display_cfg->display_config.plane_descriptors[plane_index]; + + if (display_cfg->stage3.stream_svp_meta[main_plane->stream_index].valid) { + main_stream = &display_cfg->display_config.stream_descriptors[main_plane->stream_index]; + phantom_stream = &svp_expanded_display_cfg->stream_descriptors[scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index]]; + create_phantom_plane_from_main_plane(&svp_expanded_display_cfg->plane_descriptors[svp_expanded_display_cfg->num_planes], + main_plane, phantom_stream, scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index], main_stream); + + // Associate this phantom plane to the main plane + scratch->phantom_plane_index_to_main_plane_index[svp_expanded_display_cfg->num_planes] = plane_index; + scratch->main_plane_index_to_phantom_plane_index[plane_index] = svp_expanded_display_cfg->num_planes; + + // Increment num planes + svp_expanded_display_cfg->num_planes++; + + // Adjust the main plane settings + svp_expanded_display_cfg->plane_descriptors[plane_index].overrides.legacy_svp_config = dml2_svp_mode_override_main_pipe; + } + } +} + +bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor) +{ + switch (stream_descriptor->output.output_encoder) { + case dml2_dp: + case dml2_dp2p0: + case dml2_edp: + case dml2_hdmi: + case dml2_hdmifrl: + return true; + case dml2_none: + default: + return false; + } +} +bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor) +{ + switch (stream_descriptor->output.output_encoder) { + case dml2_dp: + case dml2_dp2p0: + case dml2_edp: + case dml2_hdmifrl: + return true; + case dml2_hdmi: + case dml2_none: + default: + return false; + } +} + + +bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor) +{ + switch (stream_descriptor->output.output_encoder) { + case dml2_dp: + case dml2_edp: + return true; + case dml2_dp2p0: + case dml2_hdmi: + case dml2_hdmifrl: + case dml2_none: + default: + return false; + } +} + +bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor) +{ + switch (stream_descriptor->output.output_encoder) { + case dml2_dp2p0: + return true; + case dml2_dp: + case dml2_edp: + case dml2_hdmi: + case dml2_hdmifrl: + case dml2_none: + default: + return false; + } +} + +bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor) +{ + return dml2_core_utils_is_dio_dp_encoder(stream_descriptor) + || dml2_core_utils_is_hpo_dp_encoder(stream_descriptor); +} + + +bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate) +{ + switch (rate) { + case dml2_dp_rate_hbr: + case dml2_dp_rate_hbr2: + case dml2_dp_rate_hbr3: + return true; + case dml2_dp_rate_na: + case dml2_dp_rate_uhbr10: + case dml2_dp_rate_uhbr13p5: + case dml2_dp_rate_uhbr20: + default: + return false; + } +} + +bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate) +{ + switch (rate) { + case dml2_dp_rate_uhbr10: + case dml2_dp_rate_uhbr13p5: + case dml2_dp_rate_uhbr20: + return true; + case dml2_dp_rate_hbr: + case dml2_dp_rate_hbr2: + case dml2_dp_rate_hbr3: + case dml2_dp_rate_na: + default: + return false; + } +} + +bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode) +{ + switch (odm_mode) { + case dml2_odm_mode_split_1to2: + case dml2_odm_mode_mso_1to2: + case dml2_odm_mode_mso_1to4: + return true; + case dml2_odm_mode_auto: + case dml2_odm_mode_bypass: + case dml2_odm_mode_combine_2to1: + case dml2_odm_mode_combine_3to1: + case dml2_odm_mode_combine_4to1: + default: + return false; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h new file mode 100644 index 00000000000000..a5cc6a07167aed --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#ifndef __DML2_CORE_UTILS_H__ +#define __DML2_CORE_UTILS_H__ +#include "dml2_internal_shared_types.h" +#include "dml2_debug.h" +#include "lib_float_math.h" + +double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder); +const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type); +bool dml2_core_utils_is_420(enum dml2_source_format_class source_format); +void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only); +const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type); +void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg); +unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up); +unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info); +void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane); +bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg); +unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode); +bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan); +int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode); +unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params); +unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table); +bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format); +unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend); +void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg, + struct dml2_core_scratch *scratch); +bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor); +bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor); +bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor); +bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor); +bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor); +bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate); +bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate); +bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode); + +#endif /* __DML2_CORE_UTILS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index c94c4f32c957f4..8869ea0893128d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_dpmm_dcn4.h" #include "dml2_internal_shared_types.h" #include "dml_top_types.h" @@ -83,9 +82,9 @@ static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dp get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency); - in_out->programming->min_clocks.dcn4.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency); - in_out->programming->min_clocks.dcn4.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency); - in_out->programming->min_clocks.dcn4.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); + in_out->programming->min_clocks.dcn4x.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency); + in_out->programming->min_clocks.dcn4x.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency); + in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); } static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) @@ -123,9 +122,9 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency); - in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency); - in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency); - in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); + in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency); + in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency); + in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); } static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) @@ -147,9 +146,9 @@ static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_ get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency); - in_out->programming->min_clocks.dcn4.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency); - in_out->programming->min_clocks.dcn4.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency); - in_out->programming->min_clocks.dcn4.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency); + in_out->programming->min_clocks.dcn4x.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency); + in_out->programming->min_clocks.dcn4x.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency); + in_out->programming->min_clocks.dcn4x.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency); } static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double margin, unsigned long vco_freq_khz, unsigned long *rounded_khz, uint32_t *divider_id) @@ -204,6 +203,26 @@ static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double ma return true; } +static bool round_to_non_dfs_granularity(unsigned long dispclk_khz, unsigned long dpprefclk_khz, unsigned long dtbrefclk_khz, + unsigned long *rounded_dispclk_khz, unsigned long *rounded_dpprefclk_khz, unsigned long *rounded_dtbrefclk_khz) +{ + unsigned long pll_frequency_khz; + + pll_frequency_khz = (unsigned long) math_max2(600000, math_ceil2(math_max3(dispclk_khz, dpprefclk_khz, dtbrefclk_khz), 1000)); + + *rounded_dispclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dispclk_khz, 32); + + *rounded_dpprefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dpprefclk_khz, 32); + + if (dtbrefclk_khz > 0) { + *rounded_dtbrefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dtbrefclk_khz, 32); + } else { + *rounded_dtbrefclk_khz = 0; + } + + return true; +} + static bool round_up_and_copy_to_next_dpm(unsigned long min_value, unsigned long *rounded_value, const struct dml2_clk_table *clock_table) { bool result = false; @@ -233,25 +252,25 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr { bool result; - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.dcfclk_khz, &state_table->dcfclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.dcfclk_khz, &state_table->dcfclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.fclk_khz, &state_table->fclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.fclk_khz, &state_table->fclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.uclk_khz, &state_table->uclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.uclk_khz, &state_table->uclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz, &state_table->dcfclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz, &state_table->dcfclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz, &state_table->fclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz, &state_table->fclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz, &state_table->uclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz, &state_table->uclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.dcfclk_khz, &state_table->dcfclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.dcfclk_khz, &state_table->dcfclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.fclk_khz, &state_table->fclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.fclk_khz, &state_table->fclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.uclk_khz, &state_table->uclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk); return result; } @@ -263,12 +282,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro result = false; for (index = 0; index < state_table->uclk.num_clk_values; index++) { - if (display_cfg->min_clocks.dcn4.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] && - display_cfg->min_clocks.dcn4.active.fclk_khz <= state_table->fclk.clk_values_khz[index] && - display_cfg->min_clocks.dcn4.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) { - display_cfg->min_clocks.dcn4.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index]; - display_cfg->min_clocks.dcn4.active.fclk_khz = state_table->fclk.clk_values_khz[index]; - display_cfg->min_clocks.dcn4.active.uclk_khz = state_table->uclk.clk_values_khz[index]; + if (display_cfg->min_clocks.dcn4x.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] && + display_cfg->min_clocks.dcn4x.active.fclk_khz <= state_table->fclk.clk_values_khz[index] && + display_cfg->min_clocks.dcn4x.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) { + display_cfg->min_clocks.dcn4x.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index]; + display_cfg->min_clocks.dcn4x.active.fclk_khz = state_table->fclk.clk_values_khz[index]; + display_cfg->min_clocks.dcn4x.active.uclk_khz = state_table->uclk.clk_values_khz[index]; result = true; break; } @@ -277,12 +296,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro if (result) { result = false; for (index = 0; index < state_table->uclk.num_clk_values; index++) { - if (display_cfg->min_clocks.dcn4.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] && - display_cfg->min_clocks.dcn4.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] && - display_cfg->min_clocks.dcn4.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) { - display_cfg->min_clocks.dcn4.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index]; - display_cfg->min_clocks.dcn4.idle.fclk_khz = state_table->fclk.clk_values_khz[index]; - display_cfg->min_clocks.dcn4.idle.uclk_khz = state_table->uclk.clk_values_khz[index]; + if (display_cfg->min_clocks.dcn4x.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] && + display_cfg->min_clocks.dcn4x.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] && + display_cfg->min_clocks.dcn4x.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) { + display_cfg->min_clocks.dcn4x.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index]; + display_cfg->min_clocks.dcn4x.idle.fclk_khz = state_table->fclk.clk_values_khz[index]; + display_cfg->min_clocks.dcn4x.idle.uclk_khz = state_table->uclk.clk_values_khz[index]; result = true; break; } @@ -290,9 +309,9 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro } // SVP is not supported on any coarse grained SoCs - display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz = 0; - display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz = 0; - display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz = 0; + display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = 0; + display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz = 0; + display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz = 0; return result; } @@ -325,30 +344,30 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo result = map_soc_min_clocks_to_dpm_coarse_grained(display_cfg, state_table); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dispclk_khz, &state_table->dispclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.deepsleep_dcfclk_khz, &state_table->dcfclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk); for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { if (result) - result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4.dppclk_khz, &state_table->dppclk); + result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk); } for (i = 0; i < display_cfg->display_config.num_streams; i++) { if (result) - result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dscclk_khz, &state_table->dscclk); + result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dscclk_khz, &state_table->dscclk); if (result) - result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dtbclk_khz, &state_table->dtbclk); + result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dtbclk_khz, &state_table->dtbclk); if (result) - result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.phyclk_khz, &state_table->phyclk); + result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.phyclk_khz, &state_table->phyclk); } if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dpprefclk_khz, &state_table->dppclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dpprefclk_khz, &state_table->dppclk); if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dtbrefclk_khz, &state_table->dtbclk); + result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dtbrefclk_khz, &state_table->dtbclk); return result; } @@ -516,15 +535,15 @@ static bool determine_power_management_features_with_fams(struct dml2_dpmm_map_m static void clamp_uclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) { - in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; - in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; - in_out->programming->min_clocks.dcn4.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; + in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; + in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; + in_out->programming->min_clocks.dcn4x.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1]; } static void clamp_fclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) { - in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1]; - in_out->programming->min_clocks.dcn4.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1]; + in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1]; + in_out->programming->min_clocks.dcn4x.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1]; } static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) @@ -540,14 +559,14 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o // In NV4, there's no support for FCLK or DCFCLK DPM change before SVP prefetch starts, therefore // active minimums must be boosted to prefetch minimums - if (in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4.active.uclk_khz) - in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz; + if (in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4x.active.uclk_khz) + in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz; - if (in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4.active.fclk_khz) - in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz; + if (in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4x.active.fclk_khz) + in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz; - if (in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4.active.dcfclk_khz) - in_out->programming->min_clocks.dcn4.active.dcfclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz; + if (in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4x.active.dcfclk_khz) + in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz; // need some massaging for the dispclk ramping cases: dispclk_khz = mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0) * (1.0 + in_out->ip->dispclk_ramp_margin_percent / 100.0); @@ -556,34 +575,42 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o // but still the required dispclk can be more than the maximum dispclk speed: dispclk_khz = math_max2(dispclk_khz, mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0)); - add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0, - (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dispclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dispclk_did); - // DPP Ref is always set to max of all DPP clocks for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { - if (in_out->programming->min_clocks.dcn4.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz) - in_out->programming->min_clocks.dcn4.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz; + if (in_out->programming->min_clocks.dcn4x.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz) + in_out->programming->min_clocks.dcn4x.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz; } + in_out->programming->min_clocks.dcn4x.dpprefclk_khz = (unsigned long) (in_out->programming->min_clocks.dcn4x.dpprefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0)); - add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dpprefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0, - (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dpprefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dpprefclk_did); - + // DTB Ref is always set to max of all DTB clocks for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { - in_out->programming->plane_programming[i].min_clocks.dcn4.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4.dpprefclk_khz / 255.0 - * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4.dpprefclk_khz, 1.0)); + if (in_out->programming->min_clocks.dcn4x.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz) + in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz; } + in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0)); - // DTB Ref is always set to max of all DTB clocks - for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { - if (in_out->programming->min_clocks.dcn4.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz) - in_out->programming->min_clocks.dcn4.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz; + if (in_out->soc_bb->no_dfs) { + round_to_non_dfs_granularity((unsigned long)dispclk_khz, in_out->programming->min_clocks.dcn4x.dpprefclk_khz, in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, + &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz); + } else { + add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0, + (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dispclk_did); + + add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 0.0, + (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dpprefclk_did); + + add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, 0.0, + (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dtbrefclk_did); } - add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dtbrefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0, - (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dtbrefclk_did); - in_out->programming->min_clocks.dcn4.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz; - in_out->programming->min_clocks.dcn4.socclk_khz = mode_support_result->global.socclk_khz; + for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { + in_out->programming->plane_programming[i].min_clocks.dcn4x.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dpprefclk_khz / 255.0 + * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 1.0)); + } + + in_out->programming->min_clocks.dcn4x.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz; + in_out->programming->min_clocks.dcn4x.socclk_khz = mode_support_result->global.socclk_khz; result = map_min_clocks_to_dpm(mode_support_result, in_out->programming, &in_out->soc_bb->clk_table); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h index 3afb69dfd040a1..b165c58dfd1125 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_DPMM_DCN4_H__ #define __DML2_DPMM_DCN4_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c index 2c983daf2dadf3..3861bc6c96219c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_dpmm_factory.h" #include "dml2_dpmm_dcn4.h" #include "dml2_external_lib_deps.h" @@ -21,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance { bool result = false; - if (!out) + if (out == 0) return false; memset(out, 0, sizeof(struct dml2_dpmm_instance)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h index 80b44b4c2e68af..20ba2e446f1d58 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_DPMM_FACTORY_H__ #define __DML2_DPMM_FACTORY_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c index 5d8887ac766d7e..f4b1a7d02d4266 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_mcg_dcn4.h" #include "dml_top_soc_parameter_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h index 19d17865143579..02da6f45cbf75c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_MCG_DCN4_H__ #define __DML2_MCG_DCN4_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c index 55085b85f8ed78..c60b8fe90819d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_mcg_factory.h" #include "dml2_mcg_dcn4.h" #include "dml2_external_lib_deps.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h index 5dfdfed04e22c2..ad307deca3b0ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_MCG_FACTORY_H__ #define __DML2_MCG_FACTORY_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c index 671f9ac2627cc8..a31db5742675d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c @@ -2,22 +2,17 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_pmo_factory.h" #include "dml2_pmo_dcn3.h" static void sort(double *list_a, int list_a_size) { - double temp; // For all elements b[i] in list_b[] for (int i = 0; i < list_a_size - 1; i++) { // Find the first element of list_a that's larger than b[i] for (int j = i; j < list_a_size - 1; j++) { - if (list_a[j] > list_a[j + 1]) { - temp = list_a[j]; - list_a[j] = list_a[j + 1]; - list_a[j + 1] = temp; - } + if (list_a[j] > list_a[j + 1]) + swap(list_a[j], list_a[j + 1]); } } } @@ -502,7 +497,6 @@ bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in in_out->cfg_support_info->plane_support_info[i].dpps_used)) { result = false; } else { - free_pipes -= planes_on_stream; break; } } else { @@ -671,7 +665,7 @@ bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_su struct dml2_pmo_instance *pmo = in_out->instance; unsigned int stream_index; bool success = false; - bool reached_end = true; + bool reached_end; memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h index cc350f88d4d2fc..f00bd9e72a8689 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_PMO_DCN3_H__ #define __DML2_PMO_DCN3_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c deleted file mode 100644 index 8952dd7e36cbec..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c +++ /dev/null @@ -1,1250 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - - -#include "dml2_pmo_factory.h" -#include "dml2_pmo_dcn4.h" - -static const int MIN_VACTIVE_MARGIN_US = 100; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding -static const int SUBVP_DRR_MARGIN_US = 100; - -static const enum dml2_pmo_pstate_strategy full_strategy_list_1_display[][4] = { - // VActive Preferred - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then SVP - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then VBlank - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Finally DRR - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, -}; - -static const int full_strategy_list_1_display_size = sizeof(full_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4); - -static const enum dml2_pmo_pstate_strategy full_strategy_list_2_display[][4] = { - // VActive only is preferred - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then VActive + VBlank - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then VBlank only - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then SVP + VBlank - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then SVP + SVP - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Finally DRR + DRR - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, -}; - -static const int full_strategy_list_2_display_size = sizeof(full_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4); - -static const enum dml2_pmo_pstate_strategy full_strategy_list_3_display[][4] = { - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, - -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, - -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 3 VBlank -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank - - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR -}; - -static const int full_strategy_list_3_display_size = sizeof(full_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4); - -static const enum dml2_pmo_pstate_strategy full_strategy_list_4_display[][4] = { - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 1 VBlank - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive }, - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, - -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 2 VBlank -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive }, -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive }, -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, - -// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, -// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive }, - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank - - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR -}; - -static const int full_strategy_list_4_display_size = sizeof(full_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4); - -static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated) -{ - bool result = true; - - if (*odm_mode == dml2_odm_mode_auto) { - switch (odms_calculated) { - case 1: - *odm_mode = dml2_odm_mode_bypass; - break; - case 2: - *odm_mode = dml2_odm_mode_combine_2to1; - break; - case 3: - *odm_mode = dml2_odm_mode_combine_3to1; - break; - case 4: - *odm_mode = dml2_odm_mode_combine_4to1; - break; - default: - result = false; - break; - } - } - - if (result) { - if (*odm_mode == dml2_odm_mode_bypass) { - *odm_mode = dml2_odm_mode_combine_2to1; - } else if (*odm_mode == dml2_odm_mode_combine_2to1) { - *odm_mode = dml2_odm_mode_combine_3to1; - } else if (*odm_mode == dml2_odm_mode_combine_3to1) { - *odm_mode = dml2_odm_mode_combine_4to1; - } else { - result = false; - } - } - - return result; -} - -static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit) -{ - if (*mpc_combine_factor < limit) { - (*mpc_combine_factor)++; - return true; - } - - return false; -} - -static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index) -{ - unsigned int i; - int count; - - count = 0; - for (i = 0; i < display_cfg->num_planes; i++) { - if (display_cfg->plane_descriptors[i].stream_index == stream_index) - count++; - } - - return count; -} - -static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out, - int free_pipes) -{ - struct dml2_pmo_instance *pmo = in_out->instance; - - unsigned int i; - bool result = true; - - for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) { - // For pipes that failed dcc mcache check, we want to increase the pipe count. - // The logic for doing this depends on how many pipes is already being used, - // and whether it's mpcc or odm combine. - if (!in_out->dcc_mcache_supported[i]) { - // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1 - if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) { - in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor = - in_out->cfg_support_info->plane_support_info[i].dpps_used; - // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit. - if (free_pipes > 0) { - if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor, - pmo->mpc_combine_limit)) { - // We've reached max pipes allocatable to a single plane, so we fail. - result = false; - break; - } else { - // Successfully added another pipe to this failing plane. - free_pipes--; - } - } else { - // No free pipes to add. - result = false; - break; - } - } else { - // If the stream of this plane needs ODM combine, no further optimization can be done. - result = false; - break; - } - } - } - - return result; -} - -bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out) -{ - struct dml2_pmo_instance *pmo = in_out->instance; - - unsigned int i, used_pipes, free_pipes, planes_on_stream; - bool result; - - if (in_out->display_config != in_out->optimized_display_cfg) { - memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg)); - } - - //Count number of free pipes, and check if any odm combine is in use. - used_pipes = 0; - for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) { - used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used; - } - free_pipes = pmo->ip_caps->pipe_count - used_pipes; - - // Optimization loop - // The goal here is to add more pipes to any planes - // which are failing mcache admissibility - result = true; - - // The optimization logic depends on whether ODM combine is enabled, and the stream count. - if (in_out->optimized_display_cfg->num_streams > 1) { - // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes - // which are not ODM combined. - - result = optimize_dcc_mcache_no_odm(in_out, free_pipes); - } else if (in_out->optimized_display_cfg->num_streams == 1) { - // In single stream cases, we still optimize mcache failures when there's ODM combine with some - // additional logic. - - if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) { - // If ODM combine is enabled, then the logic is to increase ODM combine factor. - - // Optimization for streams with > 1 ODM combine factor is only supported for single display. - planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0); - - for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) { - // For pipes that failed dcc mcache check, we want to increase the pipe count. - // The logic for doing this depends on how many pipes is already being used, - // and whether it's mpcc or odm combine. - if (!in_out->dcc_mcache_supported[i]) { - // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream. - if (free_pipes >= planes_on_stream) { - if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode, - in_out->cfg_support_info->plane_support_info[i].dpps_used)) { - result = false; - } else { - free_pipes -= planes_on_stream; - break; - } - } else { - result = false; - break; - } - } - } - } else { - // If ODM combine is not enabled, then we can actually use the same logic as before. - - result = optimize_dcc_mcache_no_odm(in_out, free_pipes); - } - } else { - result = true; - } - - return result; -} - -bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out) -{ - struct dml2_pmo_instance *pmo = in_out->instance; - - pmo->soc_bb = in_out->soc_bb; - pmo->ip_caps = in_out->ip_caps; - pmo->mpc_combine_limit = 2; - pmo->odm_combine_limit = 4; - pmo->mcg_clock_table_size = in_out->mcg_clock_table_size; - - pmo->fams_params.v1.subvp.fw_processing_delay_us = 10; - pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us = 50; - pmo->fams_params.v1.subvp.refresh_rate_limit_max = 175; - pmo->fams_params.v1.subvp.refresh_rate_limit_min = 0; - - pmo->options = in_out->options; - - return true; -} - -static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator) -{ - /* - * Htotal, Hblank start/end, and Hsync start/end all must be divisible - * in order for the horizontal timing params to be considered divisible - * by 2. Hsync start is always 0. - */ - unsigned long h_blank_start = timing->h_total - timing->h_front_porch; - - return (timing->h_total % denominator == 0) && - (h_blank_start % denominator == 0) && - (timing->h_blank_end % denominator == 0) && - (timing->h_sync_width % denominator == 0); -} - -static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type) -{ - switch (encoder_type) { - case dml2_dp: - case dml2_edp: - case dml2_dp2p0: - case dml2_none: - return true; - case dml2_hdmi: - case dml2_hdmifrl: - default: - return false; - } -} - -bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out) -{ - unsigned int i; - const struct dml2_display_cfg *display_config = - &in_out->base_display_config->display_config; - const struct dml2_core_mode_support_result *mode_support_result = - &in_out->base_display_config->mode_support_result; - - if (in_out->instance->options->disable_dyn_odm || - (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1)) - return false; - - for (i = 0; i < display_config->num_planes; i++) - /* - * vmin optimization is required to be seamlessly switched off - * at any time when the new configuration is no longer - * supported. However switching from ODM combine to MPC combine - * is not always seamless. When there not enough free pipes, we - * will have to use the same secondary OPP heads as secondary - * DPP pipes in MPC combine in new state. This transition is - * expected to cause glitches. To avoid the transition, we only - * allow vmin optimization if the stream's base configuration - * doesn't require MPC combine. This condition checks if MPC - * combine is enabled. If so do not optimize the stream. - */ - if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 && - mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1) - in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true; - - for (i = 0; i < display_config->num_streams; i++) { - if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; - else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid && - in_out->instance->options->disable_dyn_odm_for_stream_with_svp) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; - /* - * ODM Combine requires horizontal timing divisible by 2 so each - * ODM segment has the same size. - */ - else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2)) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; - /* - * Our hardware support seamless ODM transitions for DP encoders - * only. - */ - else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder)) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; - } - - return true; -} - -bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out) -{ - bool is_vmin = true; - - if (in_out->vmin_limits->dispclk_khz > 0 && - in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz) - is_vmin = false; - - return is_vmin; -} - -static int find_highest_odm_load_stream_index( - const struct dml2_display_cfg *display_config, - const struct dml2_core_mode_support_result *mode_support_result) -{ - unsigned int i; - int odm_load, highest_odm_load = -1, highest_odm_load_index = -1; - - for (i = 0; i < display_config->num_streams; i++) { - odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz - / mode_support_result->cfg_support_info.stream_support_info[i].odms_used; - if (odm_load > highest_odm_load) { - highest_odm_load_index = i; - highest_odm_load = odm_load; - } - } - - return highest_odm_load_index; -} - -bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out) -{ - int stream_index; - const struct dml2_display_cfg *display_config = - &in_out->base_display_config->display_config; - const struct dml2_core_mode_support_result *mode_support_result = - &in_out->base_display_config->mode_support_result; - unsigned int odms_used; - struct dml2_stream_parameters *stream_descriptor; - bool optimizable = false; - - /* - * highest odm load stream must be optimizable to continue as dispclk is - * bounded by it. - */ - stream_index = find_highest_odm_load_stream_index(display_config, - mode_support_result); - - if (stream_index < 0 || - in_out->base_display_config->stage4.unoptimizable_streams[stream_index]) - return false; - - odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used; - if ((int)odms_used >= in_out->instance->odm_combine_limit) - return false; - - memcpy(in_out->optimized_display_config, - in_out->base_display_config, - sizeof(struct display_configuation_with_meta)); - - stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index]; - while (!optimizable && increase_odm_combine_factor( - &stream_descriptor->overrides.odm_mode, - odms_used)) { - switch (stream_descriptor->overrides.odm_mode) { - case dml2_odm_mode_combine_2to1: - optimizable = true; - break; - case dml2_odm_mode_combine_3to1: - /* - * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of - * actual pixel rate. Therefore horizontal timing must - * be divisible by 4. - */ - if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) { - if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) { - /* - * DSC h slice count must be divisible - * by 3. - */ - if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0) - optimizable = true; - } else { - optimizable = true; - } - } - break; - case dml2_odm_mode_combine_4to1: - /* - * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of - * actual pixel rate. Therefore horizontal timing must - * be divisible by 4. - */ - if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) { - if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) { - /* - * DSC h slice count must be divisible - * by 4. - */ - if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0) - optimizable = true; - } else { - optimizable = true; - } - } - break; - case dml2_odm_mode_auto: - case dml2_odm_mode_bypass: - case dml2_odm_mode_split_1to2: - case dml2_odm_mode_mso_1to2: - case dml2_odm_mode_mso_1to4: - default: - break; - } - } - - return optimizable; -} - -static bool are_timings_trivially_synchronizable(const struct display_configuation_with_meta *display_config, int mask) -{ - unsigned char i; - bool identical = true; - bool contains_drr = false; - unsigned char remap_array[DML2_MAX_PLANES]; - unsigned char remap_array_size = 0; - - // Create a remap array to enable simple iteration through only masked stream indicies - for (i = 0; i < display_config->display_config.num_streams; i++) { - if (mask & (0x1 << i)) { - remap_array[remap_array_size++] = i; - } - } - - // 0 or 1 display is always trivially synchronizable - if (remap_array_size <= 1) - return true; - - for (i = 1; i < remap_array_size; i++) { - if (memcmp(&display_config->display_config.stream_descriptors[remap_array[i - 1]].timing, - &display_config->display_config.stream_descriptors[remap_array[i]].timing, - sizeof(struct dml2_timing_cfg))) { - identical = false; - break; - } - } - - for (i = 0; i < remap_array_size; i++) { - if (display_config->display_config.stream_descriptors[remap_array[i]].timing.drr_config.enabled) { - contains_drr = true; - break; - } - } - - return !contains_drr && identical; -} - -static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset) -{ - *bit_field = *bit_field | (0x1 << bit_offset); -} - -static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset) -{ - if (bit_field & (0x1 << bit_offset)) - return true; - - return false; -} - -static bool are_all_timings_drr_enabled(const struct display_configuation_with_meta *display_config, int mask) -{ - unsigned char i; - for (i = 0; i < DML2_MAX_PLANES; i++) { - if (is_bit_set_in_bitfield(mask, i)) { - if (!display_config->display_config.stream_descriptors[i].timing.drr_config.enabled) - return false; - } - } - - return true; -} - -static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch) -{ - int stream_index; - - scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true; - - for (stream_index = 0; stream_index < stream_count; stream_index++) { - scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index]; - - if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank) - scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false; - } - - scratch->pmo_dcn4.num_pstate_candidates++; -} - -static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy) -{ - unsigned char i; - enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na; - - if (strategy == dml2_pmo_pstate_strategy_vactive) - matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive; - else if (strategy == dml2_pmo_pstate_strategy_vblank) - matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank; - else if (strategy == dml2_pmo_pstate_strategy_fw_svp) - matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp; - else if (strategy == dml2_pmo_pstate_strategy_fw_drr) - matching_strategy = dml2_uclk_pstate_change_strategy_force_drr; - - for (i = 0; i < DML2_MAX_PLANES; i++) { - if (is_bit_set_in_bitfield(plane_mask, i)) { - if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto && - display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy) - return false; - } - } - - return true; -} - -static bool subvp_subvp_schedulable(struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg, - unsigned char *svp_stream_indicies, char svp_stream_count) -{ - struct dml2_pmo_scratch *s = &pmo->scratch; - int i; - int microschedule_lines, time_us, refresh_hz; - int max_microschedule_us = 0; - int vactive1_us, vactive2_us, vblank1_us, vblank2_us; - - const struct dml2_timing_cfg *svp_timing1 = 0; - const struct dml2_implicit_svp_meta *svp_meta1 = 0; - - const struct dml2_timing_cfg *svp_timing2 = 0; - - if (svp_stream_count <= 1) - return true; - else if (svp_stream_count > 2) - return false; - - /* Loop to calculate the maximum microschedule time between the two SubVP pipes, - * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. - */ - for (i = 0; i < svp_stream_count; i++) { - svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[i]].timing; - svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[i]]; - - microschedule_lines = svp_meta1->v_active; - - // Round up when calculating microschedule time (+ 1 at the end) - time_us = (int)((microschedule_lines * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000 + - pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us + pmo->fams_params.v1.subvp.fw_processing_delay_us + 1); - - if (time_us > max_microschedule_us) - max_microschedule_us = time_us; - - refresh_hz = (int)((double)(svp_timing1->pixel_clock_khz * 1000) / (svp_timing1->v_total * svp_timing1->h_total)); - - if (refresh_hz < pmo->fams_params.v1.subvp.refresh_rate_limit_min || - refresh_hz > pmo->fams_params.v1.subvp.refresh_rate_limit_max) { - return false; - } - } - - svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[0]].timing; - svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[0]]; - - vactive1_us = (int)((svp_timing1->v_active * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000); - - vblank1_us = (int)(((svp_timing1->v_total - svp_timing1->v_active) * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000); - - svp_timing2 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[1]].timing; - - vactive2_us = (int)((svp_timing2->v_active * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000); - - vblank2_us = (int)(((svp_timing2->v_total - svp_timing2->v_active) * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000); - - if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us && - (vactive2_us - vblank1_us) / 2 > max_microschedule_us) - return true; - - return false; -} - -static bool validate_svp_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, int svp_stream_mask) -{ - bool result = false; - unsigned char stream_index; - - unsigned char svp_stream_indicies[2] = { 0 }; - unsigned char svp_stream_count = 0; - - // Find the SVP streams, store only the first 2, but count all of them - for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) { - if (svp_stream_count < 2) - svp_stream_indicies[svp_stream_count] = stream_index; - - svp_stream_count++; - } - } - - if (svp_stream_count == 1) { - result = true; // 1 SVP is always co_functional - } else if (svp_stream_count == 2) { - result = subvp_subvp_schedulable(pmo, display_cfg, svp_stream_indicies, svp_stream_count); - } - - return result; -} - -static bool validate_drr_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, int drr_stream_mask) -{ - unsigned char stream_index; - int drr_stream_count = 0; - - // Find the SVP streams and count all of them - for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) { - drr_stream_count++; - } - } - - return drr_stream_count <= 4; -} - -static bool validate_svp_drr_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int drr_stream_mask) -{ - unsigned char stream_index; - int drr_stream_count = 0; - int svp_stream_count = 0; - - int prefetch_us = 0; - int mall_region_us = 0; - int drr_frame_us = 0; // nominal frame time - int subvp_active_us = 0; - int stretched_drr_us = 0; - int drr_stretched_vblank_us = 0; - int max_vblank_mallregion = 0; - - const struct dml2_timing_cfg *svp_timing = 0; - const struct dml2_timing_cfg *drr_timing = 0; - const struct dml2_implicit_svp_meta *svp_meta = 0; - - bool schedulable = false; - - // Find the SVP streams and count all of them - for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) { - svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing; - svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; - svp_stream_count++; - } - if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) { - drr_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing; - drr_stream_count++; - } - } - - if (svp_stream_count == 1 && drr_stream_count == 1 && svp_timing != drr_timing) { - prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch) - * svp_timing->h_total / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 + - pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us); - - subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total / - (double)(svp_timing->pixel_clock_khz * 1000) * 1000000); - - drr_frame_us = (int)(drr_timing->v_total * drr_timing->h_total / - (double)(drr_timing->pixel_clock_khz * 1000) * 1000000); - - // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total / - (double)(svp_timing->pixel_clock_khz * 1000) * 1000000); - - stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; - - drr_stretched_vblank_us = (int)((drr_timing->v_total - drr_timing->v_active) * drr_timing->h_total / - (double)(drr_timing->pixel_clock_khz * 1000) * 1000000 + (stretched_drr_us - drr_frame_us)); - - max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; - - /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the - * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis - * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, - * and the max of (VBLANK blanking time, MALL region)). - */ - if (stretched_drr_us < (1 / (double)drr_timing->drr_config.min_refresh_uhz) * 1000000 * 1000000 && - subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) - schedulable = true; - } - - return schedulable; -} - -static bool validate_svp_vblank_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int vblank_stream_mask) -{ - unsigned char stream_index; - int vblank_stream_count = 0; - int svp_stream_count = 0; - - const struct dml2_timing_cfg *svp_timing = 0; - const struct dml2_timing_cfg *vblank_timing = 0; - const struct dml2_implicit_svp_meta *svp_meta = 0; - - int prefetch_us = 0; - int mall_region_us = 0; - int vblank_frame_us = 0; - int subvp_active_us = 0; - int vblank_blank_us = 0; - int max_vblank_mallregion = 0; - - bool schedulable = false; - - // Find the SVP streams and count all of them - for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) { - svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing; - svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; - svp_stream_count++; - } - if (is_bit_set_in_bitfield(vblank_stream_mask, stream_index)) { - vblank_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing; - vblank_stream_count++; - } - } - - if (svp_stream_count == 1 && vblank_stream_count > 0) { - // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe - // Also include the prefetch end to mallstart delay time - prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch) * svp_timing->h_total - / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 + - pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us); - - // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total / - (double)(svp_timing->pixel_clock_khz * 1000) * 1000000); - - vblank_frame_us = (int)(vblank_timing->v_total * vblank_timing->h_total / - (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000); - - vblank_blank_us = (int)((vblank_timing->v_total - vblank_timing->v_active) * vblank_timing->h_total / - (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000); - - subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total / - (double)(svp_timing->pixel_clock_khz * 1000) * 1000000); - - max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us; - - // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, - // and the max of (VBLANK blanking time, MALL region) - // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0) - if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0) - schedulable = true; - } - return schedulable; -} - -static bool validate_drr_vblank_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, int drr_stream_mask, int vblank_stream_mask) -{ - return false; -} - -static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[4]) -{ - struct dml2_pmo_scratch *s = &pmo->scratch; - - unsigned char stream_index = 0; - - unsigned int svp_count = 0; - unsigned int svp_stream_mask = 0; - unsigned int drr_count = 0; - unsigned int drr_stream_mask = 0; - unsigned int vactive_count = 0; - unsigned int vactive_stream_mask = 0; - unsigned int vblank_count = 0; - unsigned int vblank_stream_mask = 0; - - bool strategy_matches_forced_requirements = true; - - bool admissible = false; - - // Tabulate everything - for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - - if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index], - per_stream_pstate_strategy[stream_index])) { - strategy_matches_forced_requirements = false; - break; - } - - if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp) { - svp_count++; - set_bit_in_bitfield(&svp_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) { - drr_count++; - set_bit_in_bitfield(&drr_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive) { - vactive_count++; - set_bit_in_bitfield(&vactive_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank) { - vblank_count++; - set_bit_in_bitfield(&vblank_stream_mask, stream_index); - } - } - - if (!strategy_matches_forced_requirements) - return false; - - // Check for trivial synchronization for vblank - if (vblank_count > 0 && (pmo->options->disable_vblank || !are_timings_trivially_synchronizable(display_cfg, vblank_stream_mask))) - return false; - - if (svp_count > 0 && pmo->options->disable_svp) - return false; - - if (drr_count > 0 && (pmo->options->disable_drr_var || !are_all_timings_drr_enabled(display_cfg, drr_stream_mask))) - return false; - - // Validate for FAMS admissibiliy - if (svp_count == 0 && drr_count == 0) { - // No FAMS - admissible = true; - } else { - admissible = false; - if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count == 0) { - // All SVP - admissible = validate_svp_cofunctionality(pmo, display_cfg, svp_stream_mask); - } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) { - // All DRR - admissible = validate_drr_cofunctionality(pmo, display_cfg, drr_stream_mask); - } else if (svp_count > 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) { - // SVP + DRR - admissible = validate_svp_drr_cofunctionality(pmo, display_cfg, svp_stream_mask, drr_stream_mask); - } else if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count > 0) { - // SVP + VBlank - admissible = validate_svp_vblank_cofunctionality(pmo, display_cfg, svp_stream_mask, vblank_stream_mask); - } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count > 0) { - // DRR + VBlank - admissible = validate_drr_vblank_cofunctionality(pmo, display_cfg, drr_stream_mask, vblank_stream_mask); - } - } - - return admissible; -} - -static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask) -{ - unsigned char i; - int min_vactive_margin_us = 0xFFFFFFF; - - for (i = 0; i < DML2_MAX_PLANES; i++) { - if (is_bit_set_in_bitfield(plane_mask, i)) { - if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us) - min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active; - } - } - - return min_vactive_margin_us; -} - -bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out) -{ - struct dml2_pmo_instance *pmo = in_out->instance; - struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3; - struct dml2_pmo_scratch *s = &pmo->scratch; - - struct display_configuation_with_meta *display_config; - const struct dml2_plane_parameters *plane_descriptor; - const enum dml2_pmo_pstate_strategy (*strategy_list)[4] = 0; - unsigned int strategy_list_size = 0; - unsigned int plane_index, stream_index, i; - - state->performed = true; - - display_config = in_out->base_display_config; - display_config->display_config.overrides.enable_subvp_implicit_pmo = true; - - memset(s, 0, sizeof(struct dml2_pmo_scratch)); - - pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency; - pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size - 1; - pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency; - - // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping) - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - plane_descriptor = &display_config->display_config.plane_descriptors[plane_index]; - - set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index); - - state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive; - } - - // Figure out which streams can do vactive, and also build up implicit SVP meta - for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { - if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= - MIN_VACTIVE_MARGIN_US) - set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index); - - s->pmo_dcn4.stream_svp_meta[stream_index].valid = true; - s->pmo_dcn4.stream_svp_meta[stream_index].v_active = - display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active; - s->pmo_dcn4.stream_svp_meta[stream_index].v_total = - display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total; - s->pmo_dcn4.stream_svp_meta[stream_index].v_front_porch = 1; - } - - switch (display_config->display_config.num_streams) { - case 1: - strategy_list = full_strategy_list_1_display; - strategy_list_size = full_strategy_list_1_display_size; - break; - case 2: - strategy_list = full_strategy_list_2_display; - strategy_list_size = full_strategy_list_2_display_size; - break; - case 3: - strategy_list = full_strategy_list_3_display; - strategy_list_size = full_strategy_list_3_display_size; - break; - case 4: - strategy_list = full_strategy_list_4_display; - strategy_list_size = full_strategy_list_4_display_size; - break; - default: - strategy_list_size = 0; - break; - } - - if (strategy_list_size == 0) - return false; - - s->pmo_dcn4.num_pstate_candidates = 0; - - for (i = 0; i < strategy_list_size && i < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) { - if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) { - insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s); - } - } - - if (s->pmo_dcn4.num_pstate_candidates > 0) { - // There's this funny case... - // If the first entry in the candidate list is all vactive, then we can consider it "tested", so the current index is 0 - // Otherwise the current index should be -1 because we run the optimization at least once - s->pmo_dcn4.cur_pstate_candidate = 0; - for (i = 0; i < display_config->display_config.num_streams; i++) { - if (s->pmo_dcn4.per_stream_pstate_strategy[0][i] != dml2_pmo_pstate_strategy_vactive) { - s->pmo_dcn4.cur_pstate_candidate = -1; - break; - } - } - return true; - } else { - return false; - } -} - -static void reset_display_configuration(struct display_configuation_with_meta *display_config) -{ - unsigned int plane_index; - unsigned int stream_index; - struct dml2_plane_parameters *plane; - - for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { - display_config->stage3.stream_svp_meta[stream_index].valid = false; - } - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - plane = &display_config->display_config.plane_descriptors[plane_index]; - - // Unset SubVP - plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto; - - // Remove reserve time - plane->overrides.reserved_vblank_time_ns = 0; - - // Reset strategy to auto - plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto; - - display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported; - } -} - -static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config, int plane_mask) -{ - unsigned char plane_index; - struct dml2_plane_parameters *plane; - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - if (is_bit_set_in_bitfield(plane_mask, plane_index)) { - plane = &display_config->display_config.plane_descriptors[plane_index]; - - // Setup DRR - plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr; - - display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr; - } - } -} - -static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config, int plane_mask) -{ - unsigned char plane_index; - int stream_index = -1; - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - if (is_bit_set_in_bitfield(plane_mask, plane_index)) { - stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index; - display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom; - } - } - - if (stream_index >= 0) { - display_config->stage3.stream_svp_meta[stream_index].valid = true; - display_config->stage3.stream_svp_meta[stream_index].v_active = - display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active; - display_config->stage3.stream_svp_meta[stream_index].v_total = - display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total; - display_config->stage3.stream_svp_meta[stream_index].v_front_porch = 1; - } -} - -static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config, int plane_mask) -{ - unsigned char plane_index; - struct dml2_plane_parameters *plane; - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - if (is_bit_set_in_bitfield(plane_mask, plane_index)) { - plane = &display_config->display_config.plane_descriptors[plane_index]; - - // Setup reserve time - plane->overrides.reserved_vblank_time_ns = 400 * 1000; - - display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank; - } - } -} - -static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config, int plane_mask) -{ - unsigned char plane_index; - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - if (is_bit_set_in_bitfield(plane_mask, plane_index)) { - display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive; - } - } -} - -static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_scratch *scratch, int strategy_index) -{ - bool success = true; - unsigned char stream_index; - - reset_display_configuration(display_config); - - for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { - if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) { - success = false; - break; - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) { - setup_planes_for_vblank_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) { - setup_planes_for_svp_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) { - setup_planes_for_drr_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) { - setup_planes_for_vactive_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } - } - - return success; -} - -static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask) -{ - int min_time_us = 0xFFFFFF; - unsigned char plane_index = 0; - - for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { - if (is_bit_set_in_bitfield(plane_mask, plane_index)) { - if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000)) - min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000; - } - } - return min_time_us; -} - -bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out) -{ - bool p_state_supported = true; - unsigned int stream_index; - struct dml2_pmo_scratch *s = &in_out->instance->scratch; - - if (s->pmo_dcn4.cur_pstate_candidate < 0) - return false; - - for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { - - if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive) { - if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_US) { - p_state_supported = false; - break; - } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank) { - if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < - in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) { - p_state_supported = false; - break; - } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp) { - if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) { - p_state_supported = false; - break; - } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) { - if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr)) { - p_state_supported = false; - break; - } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) { - p_state_supported = false; - break; - } - } - - return p_state_supported; -} - -bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out) -{ - bool success = false; - struct dml2_pmo_scratch *s = &in_out->instance->scratch; - - memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta)); - - if (in_out->last_candidate_failed) { - if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] && - s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) { - s->pmo_dcn4.cur_latency_index++; - - success = true; - } - } - - if (!success) { - s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index; - s->pmo_dcn4.cur_pstate_candidate++; - - if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) { - success = true; - } - } - - if (success) { - in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index; - setup_display_config(in_out->optimized_display_config, &in_out->instance->scratch, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate); - } - - return success; -} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h deleted file mode 100644 index 09cacc933d2131..00000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - - -#ifndef __DML2_PMO_DCN4_H__ -#define __DML2_PMO_DCN4_H__ - -#include "dml2_internal_shared_types.h" - -bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out); - -bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out); - -bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out); -bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out); -bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out); - -bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out); -bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out); -bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out); - -bool pmo_dcn4_unit_test(void); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index 6547cc2c2a773b..1cf9015e854a96 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -1,122 +1,181 @@ -/* -* Copyright 2022 Advanced Micro Devices, Inc. -* -* Permission is hereby granted, free of charge, to any person obtaining a -* copy of this software and associated documentation files (the "Software"), -* to deal in the Software without restriction, including without limitation -* the rights to use, copy, modify, merge, publish, distribute, sublicense, -* and/or sell copies of the Software, and to permit persons to whom the -* Software is furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -* OTHER DEALINGS IN THE SOFTWARE. -* -* Authors: AMD -* -*/ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. #include "dml2_pmo_factory.h" -#include "dml2_pmo_dcn4.h" #include "dml2_debug.h" #include "lib_float_math.h" #include "dml2_pmo_dcn4_fams2.h" static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding -static const enum dml2_pmo_pstate_strategy base_strategy_list_1_display[][PMO_DCN4_MAX_DISPLAYS] = { +static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = { // VActive Preferred - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, // Then SVP - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, // Then VBlank - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Finally DRR - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, + + // Then DRR + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + + // Finally VBlank, but allow base clocks for latency to increase + /* + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + */ }; -static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); +static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy); -static const enum dml2_pmo_pstate_strategy base_strategy_list_2_display[][PMO_DCN4_MAX_DISPLAYS] = { +static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = { // VActive only is preferred - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, // Then VActive + VBlank - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, // Then VBlank only - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, // Then SVP + VBlank - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, // Then SVP + DRR - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, // Then SVP + SVP - { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, // Then DRR + VActive - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Then DRR + VBlank - //{ dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, - - // Finally DRR + DRR - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + + // Then DRR + DRR + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + + // Finally VBlank, but allow base clocks for latency to increase + /* + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + */ }; -static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); - -static const enum dml2_pmo_pstate_strategy base_strategy_list_3_display[][PMO_DCN4_MAX_DISPLAYS] = { - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive - - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 1 DRR - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 2 DRR - - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR +static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy); + +static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = { + // All VActive + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + + // VActive + 1 VBlank + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, + + // All VBlank + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = false, + }, + + // All DRR + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + + // All VBlank, with state increase allowed + /* + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, + .allow_state_increase = true, + }, + */ }; -static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); - -static const enum dml2_pmo_pstate_strategy base_strategy_list_4_display[][PMO_DCN4_MAX_DISPLAYS] = { - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive - - { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, // VActive + 1 VBlank - - //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 2 VBlank - - //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank - - { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 1 DRR - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 2 DRR - - //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 3 DRR - - { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR +static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy); + +static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = { + // All VActive + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, + .allow_state_increase = true, + }, + + // VActive + 1 VBlank + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, + .allow_state_increase = false, + }, + + // All Vblank + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, + .allow_state_increase = false, + }, + + // All DRR + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, + .allow_state_increase = true, + }, + + // All VBlank, with state increase allowed + /* + { + .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, + .allow_state_increase = true, + }, + */ }; -static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); +static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy); static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated) @@ -275,7 +334,6 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o in_out->cfg_support_info->plane_support_info[i].dpps_used)) { result = false; } else { - free_pipes -= planes_on_stream; break; } } else { @@ -296,9 +354,9 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o return result; } -static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_strategy base_strategy) +static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy) { - enum dml2_pmo_pstate_strategy variant_strategy = 0; + enum dml2_pmo_pstate_method variant_strategy = 0; switch (base_strategy) { case dml2_pmo_pstate_strategy_vactive: @@ -327,11 +385,9 @@ static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum return variant_strategy; } -static enum dml2_pmo_pstate_strategy(*get_expanded_strategy_list( - struct dml2_pmo_init_data *init_data, - int stream_count))[PMO_DCN4_MAX_DISPLAYS] +static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count) { - enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL; + struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL; switch (stream_count) { case 1: @@ -361,23 +417,23 @@ static unsigned int get_num_expanded_strategies( } static void insert_strategy_into_expanded_list( - const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS], + const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_init_data *init_data) { - enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL; + struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL; expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count); if (expanded_strategy_list) { - memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++], - per_stream_pstate_strategy, - sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); + memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy)); + + init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++; } } static void expand_base_strategy(struct dml2_pmo_instance *pmo, - const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS], + const struct dml2_pmo_pstate_strategy *base_strategy, unsigned int stream_count) { bool skip_to_next_stream; @@ -386,19 +442,21 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo, unsigned int i, j; unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 }; unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 }; - enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 }; + struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 }; /* determine number of displays per method */ for (i = 0; i < stream_count; i++) { /* increment the count of the earliest index with the same method */ for (j = 0; j < stream_count; j++) { - if (base_strategy_list[i] == base_strategy_list[j]) { + if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) { num_streams_per_method[j] = num_streams_per_method[j] + 1; break; } } } + cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase; + i = 0; /* uses a while loop instead of recursion to build permutations of base strategy */ while (stream_iteration_indices[0] < stream_count) { @@ -409,12 +467,12 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo, /* determine what to do for this iteration */ if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) { /* decrement count and assign method */ - cur_strategy_list[i] = base_strategy_list[stream_iteration_indices[i]]; + cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]]; num_streams_per_method[stream_iteration_indices[i]] -= 1; if (i >= stream_count - 1) { /* insert into strategy list */ - insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data); + insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data); expanded_strategy_added = true; } else { /* skip to next stream */ @@ -450,55 +508,122 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo, } } -static void expand_variant_strategy(struct dml2_pmo_instance *pmo, - const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS], + +static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy, + const struct dml2_pmo_pstate_strategy *variant_strategy, + unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS], + unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS], unsigned int stream_count) { + bool valid = true; unsigned int i; - bool variant_found = false; - enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 }; + /* check all restrictions are met */ + for (i = 0; i < stream_count; i++) { + /* vblank + vblank_drr variants are invalid */ + if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank && + ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) || + num_streams_per_variant_method[i] > 1)) { + valid = false; + break; + } + } - /* setup variant list as base to start */ - memcpy(cur_strategy_list, base_strategy_list, sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS); + return valid; +} + +static void expand_variant_strategy(struct dml2_pmo_instance *pmo, + const struct dml2_pmo_pstate_strategy *base_strategy, + unsigned int stream_count) +{ + bool variant_found; + unsigned int i, j; + unsigned int method_index; + unsigned int stream_index; + unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 }; + unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 }; + unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 }; + enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES]; + struct dml2_pmo_pstate_strategy variant_strategy = { 0 }; + /* determine number of displays per method */ for (i = 0; i < stream_count; i++) { - cur_strategy_list[i] = convert_strategy_to_drr_variant(base_strategy_list[i]); + /* increment the count of the earliest index with the same method */ + for (j = 0; j < stream_count; j++) { + if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) { + num_streams_per_method[j] = num_streams_per_method[j] + 1; + break; + } + } + + per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]); + } + memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS); + + memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy)); + + method_index = 0; + /* uses a while loop instead of recursion to build permutations of base strategy */ + while (num_streams_per_base_method[0] > 0 || method_index != 0) { + if (method_index == stream_count) { + /* construct variant strategy */ + variant_found = false; + stream_index = 0; + + for (i = 0; i < stream_count; i++) { + for (j = 0; j < num_streams_per_base_method[i]; j++) { + variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i]; + } + + for (j = 0; j < num_streams_per_variant_method[i]; j++) { + variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i]; + if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) { + variant_found = true; + } + } + } - if (cur_strategy_list[i] != base_strategy_list[i]) { - variant_found = true; + if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) { + expand_base_strategy(pmo, &variant_strategy, stream_count); + } + + /* rollback to earliest method with bases remaining */ + for (method_index = stream_count - 1; method_index > 0; method_index--) { + if (num_streams_per_base_method[method_index]) { + /* bases remaining */ + break; + } else { + /* reset counters */ + num_streams_per_base_method[method_index] = num_streams_per_method[method_index]; + num_streams_per_variant_method[method_index] = 0; + } + } } - if (i == stream_count - 1 && variant_found) { - insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data); + if (num_streams_per_base_method[method_index]) { + num_streams_per_base_method[method_index]--; + num_streams_per_variant_method[method_index]++; + + method_index++; + } else if (method_index != 0) { + method_index++; } } } static void expand_base_strategies( struct dml2_pmo_instance *pmo, - const enum dml2_pmo_pstate_strategy(*base_strategies_list)[PMO_DCN4_MAX_DISPLAYS], + const struct dml2_pmo_pstate_strategy *base_strategies_list, const unsigned int num_base_strategies, unsigned int stream_count) { unsigned int i; - unsigned int num_pre_variant_strategies; - enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS]; /* expand every explicit base strategy (except all DRR) */ - for (i = 0; i < num_base_strategies - 1; i++) { - expand_base_strategy(pmo, base_strategies_list[i], stream_count); - } - - /* expand base strategies to DRR variants */ - num_pre_variant_strategies = get_num_expanded_strategies(&pmo->init_data, stream_count); - expanded_strategy_list = get_expanded_strategy_list(&pmo->init_data, stream_count); - for (i = 0; i < num_pre_variant_strategies; i++) { - expand_variant_strategy(pmo, expanded_strategy_list[i], stream_count); + for (i = 0; i < num_base_strategies; i++) { + expand_base_strategy(pmo, &base_strategies_list[i], stream_count); + expand_variant_strategy(pmo, &base_strategies_list[i], stream_count); } - - /* add back all DRR */ - insert_strategy_into_expanded_list(base_strategies_list[num_base_strategies - 1], stream_count, &pmo->init_data); } bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) @@ -546,8 +671,6 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) /* populate list */ expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4); break; - default: - break; } } @@ -591,6 +714,8 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out) &in_out->base_display_config->display_config; const struct dml2_core_mode_support_result *mode_support_result = &in_out->base_display_config->mode_support_result; + struct dml2_optimization_stage4_state *state = + &in_out->base_display_config->stage4; if (in_out->instance->options->disable_dyn_odm || (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1)) @@ -611,28 +736,30 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out) */ if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 && mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1) - in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true; + state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true; for (i = 0; i < display_config->num_streams; i++) { if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; + state->unoptimizable_streams[i] = true; else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid && in_out->instance->options->disable_dyn_odm_for_stream_with_svp) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; + state->unoptimizable_streams[i] = true; /* * ODM Combine requires horizontal timing divisible by 2 so each * ODM segment has the same size. */ else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2)) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; + state->unoptimizable_streams[i] = true; /* * Our hardware support seamless ODM transitions for DP encoders * only. */ else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder)) - in_out->base_display_config->stage4.unoptimizable_streams[i] = true; + state->unoptimizable_streams[i] = true; } + state->performed = true; + return true; } @@ -783,6 +910,7 @@ static void build_synchronized_timing_groups( /* clear all group masks */ memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks)); memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled)); + memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active)); memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us)); s->pmo_dcn4.num_timing_groups = 0; @@ -804,15 +932,19 @@ static void build_synchronized_timing_groups( /* if drr is in use, timing is not sychnronizable */ if (master_timing->drr_config.enabled) { s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true; + s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed && + (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable); continue; } /* find synchronizable timing groups */ for (j = i + 1; j < display_config->display_config.num_streams; j++) { if (memcmp(master_timing, - &display_config->display_config.stream_descriptors[j].timing, - sizeof(struct dml2_timing_cfg)) == 0 && - display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) { + &display_config->display_config.stream_descriptors[j].timing, + sizeof(struct dml2_timing_cfg)) == 0 && + display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder && + (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match + display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) { set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j); set_bit_in_bitfield(&stream_mapped_mask, j); } @@ -884,7 +1016,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, stream_descriptor = &display_config->display_config.stream_descriptors[i]; stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; - if (!stream_descriptor->timing.drr_config.enabled || stream_descriptor->overrides.disable_fams2_drr) + if (!stream_descriptor->timing.drr_config.enabled) return false; /* cannot support required vtotal */ @@ -967,35 +1099,24 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, return true; } -static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch) +static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch) { - int stream_index; - - scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true; - - for (stream_index = 0; stream_index < stream_count; stream_index++) { - scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index]; - - if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank || - per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) - scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false; - } - + scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy; scratch->pmo_dcn4.num_pstate_candidates++; } -static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy) +static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method) { unsigned char i; enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na; - if (strategy == dml2_pmo_pstate_strategy_vactive || strategy == dml2_pmo_pstate_strategy_fw_vactive_drr) + if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr) matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive; - else if (strategy == dml2_pmo_pstate_strategy_vblank || strategy == dml2_pmo_pstate_strategy_fw_vblank_drr) + else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr) matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank; - else if (strategy == dml2_pmo_pstate_strategy_fw_svp) + else if (method == dml2_pmo_pstate_strategy_fw_svp) matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp; - else if (strategy == dml2_pmo_pstate_strategy_fw_drr) + else if (method == dml2_pmo_pstate_strategy_fw_drr) matching_strategy = dml2_uclk_pstate_change_strategy_force_drr; for (i = 0; i < DML2_MAX_PLANES; i++) { @@ -1027,12 +1148,12 @@ static void build_method_scheduling_params( static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( struct dml2_pmo_instance *pmo, - enum dml2_pmo_pstate_strategy stream_pstate_strategy, + enum dml2_pmo_pstate_method stream_pstate_method, int stream_idx) { struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL; - switch (stream_pstate_strategy) { + switch (stream_pstate_method) { case dml2_pmo_pstate_strategy_vactive: case dml2_pmo_pstate_strategy_fw_vactive_drr: stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common; @@ -1063,7 +1184,7 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( static bool is_timing_group_schedulable( struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg, - const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS], + const struct dml2_pmo_pstate_strategy *pstate_strategy, const unsigned int timing_group_idx, struct dml2_fams2_per_method_common_meta *group_fams2_meta) { @@ -1082,7 +1203,7 @@ static bool is_timing_group_schedulable( } /* init allow start and end lines for timing group */ - stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[base_stream_idx], base_stream_idx); + stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); if (!stream_method_fams2_meta) return false; @@ -1091,9 +1212,9 @@ static bool is_timing_group_schedulable( group_fams2_meta->period_us = stream_method_fams2_meta->period_us; for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) { if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) { - stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[i], i); + stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); if (!stream_method_fams2_meta) - continue; + return false; if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) { /* set group allow start to larger otg vline */ @@ -1123,7 +1244,7 @@ static bool is_timing_group_schedulable( static bool is_config_schedulable( struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg, - const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS]) + const struct dml2_pmo_pstate_strategy *pstate_strategy) { unsigned int i, j; bool schedulable; @@ -1146,7 +1267,7 @@ static bool is_config_schedulable( for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i; s->pmo_dcn4.sorted_group_gtl_period_index[i] = i; - if (!is_timing_group_schedulable(pmo, display_cfg, per_stream_pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) { + if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) { /* synchronized timing group was not schedulable */ schedulable = false; break; @@ -1248,7 +1369,7 @@ static bool is_config_schedulable( unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1]; if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us || - s->pmo_dcn4.group_is_drr_enabled[sorted_ip1]) { + (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) { schedulable = false; break; } @@ -1260,8 +1381,8 @@ static bool is_config_schedulable( /* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */ if (s->pmo_dcn4.num_timing_groups == 2 && - !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[0]) && - !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[1])) { + !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) && + !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) { double period_ratio; double max_shift_us; double shift_per_period; @@ -1290,44 +1411,48 @@ static bool is_config_schedulable( } static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo, - const struct display_configuation_with_meta *display_cfg, - const enum dml2_pmo_pstate_strategy stream_pstate_strategy, - unsigned int stream_index) + const struct display_configuation_with_meta *display_cfg, + const enum dml2_pmo_pstate_method stream_pstate_method, + unsigned int stream_index) { const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index]; bool strategy_matches_drr_requirements = true; /* check if strategy is compatible with stream drr capability and strategy */ - if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) && + if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) && display_cfg->display_config.num_streams > 1 && stream_descriptor->timing.drr_config.enabled && (stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) { /* DRR is active, so config may become unschedulable */ strategy_matches_drr_requirements = false; - } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) && - is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) && + } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) && + is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) && stream_descriptor->timing.drr_config.enabled && stream_descriptor->timing.drr_config.drr_active_variable) { /* DRR is variable, fw exclusive methods require DRR to be clamped */ strategy_matches_drr_requirements = false; - } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) && + } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) && pmo->options->disable_drr_var_when_var_active && stream_descriptor->timing.drr_config.enabled && stream_descriptor->timing.drr_config.drr_active_variable) { /* DRR variable is active, but policy blocks DRR for p-state when this happens */ strategy_matches_drr_requirements = false; - } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) && + } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) && (pmo->options->disable_drr_var || !stream_descriptor->timing.drr_config.enabled || stream_descriptor->timing.drr_config.disallowed)) { /* DRR variable strategies are disallowed due to settings or policy */ strategy_matches_drr_requirements = false; - } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_strategy) && - (pmo->options->disable_drr_clamped || - !stream_descriptor->timing.drr_config.enabled)) { + } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) && + (pmo->options->disable_drr_clamped || + (!stream_descriptor->timing.drr_config.enabled || + (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) || + (pmo->options->disable_drr_clamped_when_var_active && + stream_descriptor->timing.drr_config.enabled && + stream_descriptor->timing.drr_config.drr_active_variable))) { /* DRR fixed strategies are disallowed due to settings or policy */ strategy_matches_drr_requirements = false; - } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) && + } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) && pmo->options->disable_fams2) { /* FW modes require FAMS2 */ strategy_matches_drr_requirements = false; @@ -1338,7 +1463,7 @@ static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo, static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg, - const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS]) + const struct dml2_pmo_pstate_strategy *pstate_strategy) { struct dml2_pmo_scratch *s = &pmo->scratch; @@ -1359,28 +1484,28 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins // Tabulate everything for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) { - if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index], - per_stream_pstate_strategy[stream_index])) { + if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index], + pstate_strategy->per_stream_pstate_method[stream_index])) { strategy_matches_forced_requirements = false; break; } strategy_matches_drr_requirements &= - stream_matches_drr_policy(pmo, display_cfg, per_stream_pstate_strategy[stream_index], stream_index); + stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index); - if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp || - per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { + if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp || + pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { svp_count++; set_bit_in_bitfield(&svp_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) { + } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) { drr_count++; set_bit_in_bitfield(&drr_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive || - per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { + } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive || + pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { vactive_count++; set_bit_in_bitfield(&vactive_stream_mask, stream_index); - } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank || - per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { + } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank || + pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { vblank_count++; set_bit_in_bitfield(&vblank_stream_mask, stream_index); } @@ -1389,7 +1514,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements) return false; - if (vactive_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))) + if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask)) return false; if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask))) @@ -1401,7 +1526,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask))) return false; - return is_config_schedulable(pmo, display_cfg, per_stream_pstate_strategy); + return is_config_schedulable(pmo, display_cfg, pstate_strategy); } static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask) @@ -1457,6 +1582,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, (stream_fams2_meta->nom_vtotal * timing->h_total); stream_fams2_meta->nom_frame_time_us = (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us; + stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active; if (stream_descriptor->timing.drr_config.enabled == true) { if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) { @@ -1510,7 +1636,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, stream_fams2_meta->method_vactive.common.allow_start_otg_vline = timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; stream_fams2_meta->method_vactive.common.allow_end_otg_vline = - timing->v_blank_end + timing->v_active - + stream_fams2_meta->vblank_start - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; } else { stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0; @@ -1520,8 +1646,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta); /* vblank */ - stream_fams2_meta->method_vblank.common.allow_start_otg_vline = - timing->v_blank_end + timing->v_active; + stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start; stream_fams2_meta->method_vblank.common.allow_end_otg_vline = stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1; stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us; @@ -1555,8 +1680,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + stream_fams2_meta->allow_to_target_delay_otg_vlines; stream_fams2_meta->method_subvp.common.allow_end_otg_vline = - stream_fams2_meta->nom_vtotal - - timing->v_front_porch - + stream_fams2_meta->vblank_start - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us; build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta); @@ -1565,20 +1689,21 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, stream_fams2_meta->method_drr.programming_delay_otg_vlines = (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us); stream_fams2_meta->method_drr.common.allow_start_otg_vline = - stream_fams2_meta->nom_vtotal + + stream_fams2_meta->vblank_start + stream_fams2_meta->allow_to_target_delay_otg_vlines; stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us; if (display_config->display_config.num_streams <= 1) { /* only need to stretch vblank for blackout time */ stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->method_drr.common.allow_start_otg_vline + + stream_fams2_meta->nom_vtotal + + stream_fams2_meta->allow_to_target_delay_otg_vlines + stream_fams2_meta->min_allow_width_otg_vlines + stream_fams2_meta->dram_clk_change_blackout_otg_vlines; } else { /* multi display needs to always be schedulable */ stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->method_drr.common.allow_start_otg_vline + - stream_fams2_meta->nom_vtotal + + stream_fams2_meta->nom_vtotal * 2 + + stream_fams2_meta->allow_to_target_delay_otg_vlines + stream_fams2_meta->min_allow_width_otg_vlines + stream_fams2_meta->dram_clk_change_blackout_otg_vlines; } @@ -1611,7 +1736,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp struct display_configuation_with_meta *display_config; const struct dml2_plane_parameters *plane_descriptor; - const enum dml2_pmo_pstate_strategy(*strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL; + const struct dml2_pmo_pstate_strategy *strategy_list = NULL; unsigned int strategy_list_size = 0; unsigned char plane_index, stream_index, i; @@ -1623,6 +1748,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp memset(s, 0, sizeof(struct dml2_pmo_scratch)); + if (display_config->display_config.overrides.all_streams_blanked) { + return true; + } + pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency; pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size; pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency; @@ -1652,6 +1781,9 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp build_synchronized_timing_groups(pmo, display_config); strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams); + if (!strategy_list) + return false; + strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams); if (strategy_list_size == 0) @@ -1660,8 +1792,8 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp s->pmo_dcn4.num_pstate_candidates = 0; for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) { - if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) { - insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s); + if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) { + insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s); } } @@ -1778,7 +1910,8 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met if (is_bit_set_in_bitfield(plane_mask, plane_index)) { plane = &display_config->display_config.plane_descriptors[plane_index]; - plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000); + plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0, + plane->overrides.reserved_vblank_time_ns); display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank; @@ -1857,26 +1990,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_ for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { - if (pmo->scratch.pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) { + if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) { success = false; break; - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) { setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) { setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) { fams2_required = true; setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { fams2_required = true; setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { fams2_required = true; setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { fams2_required = true; setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); - } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) { + } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) { fams2_required = true; setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]); } @@ -1917,6 +2050,10 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp int MIN_VACTIVE_MARGIN_DRR = 0; int REQUIRED_RESERVED_TIME = 0; + if (in_out->base_display_config->display_config.overrides.all_streams_blanked) { + return true; + } + MIN_VACTIVE_MARGIN_VBLANK = INT_MIN; MIN_VACTIVE_MARGIN_DRR = INT_MIN; REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us; @@ -1927,34 +2064,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index]; - if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive || - s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { + if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive || + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) { if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) { p_state_supported = false; break; } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank || - s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { + } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank || + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) { if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < REQUIRED_RESERVED_TIME || get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) { p_state_supported = false; break; } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp || - s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { + } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp || + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) { if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) { p_state_supported = false; break; } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) { - if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) || + } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) { + if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) || get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) { p_state_supported = false; break; } - } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) { + } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) { p_state_supported = false; break; } @@ -1971,8 +2108,8 @@ bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pst memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta)); if (in_out->last_candidate_failed) { - if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] && - s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) { + if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase && + s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) { s->pmo_dcn4.cur_latency_index++; success = true; @@ -2060,15 +2197,15 @@ bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in unsigned int i; - for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) { + for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) { if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 && pmo->scratch.pmo_dcn4.z8_vblank_optimizable && - in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) { + in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) { success = false; break; } if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 && - in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us) { + in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) { success = false; break; } @@ -2087,8 +2224,11 @@ bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in if (!in_out->last_candidate_failed) { if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) { - for (i = 0; i < in_out->optimized_display_config->display_config.num_streams; i++) { - in_out->optimized_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us = pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate]; + for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) { + /* take the max of the current and the optimal reserved time */ + in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns = + (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000, + in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns); } success = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h index 75175d93add40e..0c25bd3e9ac021 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_PMO_FAMS2_DCN4_H__ #define __DML2_PMO_FAMS2_DCN4_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c index e0b9ece7901da9..add51d41a51581 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c @@ -2,10 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_pmo_factory.h" #include "dml2_pmo_dcn4_fams2.h" -#include "dml2_pmo_dcn4.h" #include "dml2_pmo_dcn3.h" #include "dml2_external_lib_deps.h" @@ -28,15 +26,15 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance * { bool result = false; - if (!out) + if (out == 0) return false; memset(out, 0, sizeof(struct dml2_pmo_instance)); switch (project_id) { case dml2_project_dcn4x_stage1: - out->initialize = pmo_dcn4_initialize; - out->optimize_dcc_mcache = pmo_dcn4_optimize_dcc_mcache; + out->initialize = pmo_dcn4_fams2_initialize; + out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache; result = true; break; case dml2_project_dcn4x_stage2: diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h index 9d3dc5e94be12d..7218de1824cca5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_PMO_FACTORY_H__ #define __DML2_PMO_FACTORY_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c index e73579f1a88e50..e17b5ceba4471e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "lib_float_math.h" #define ASSERT(condition) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h index 537cf6fd4c1513..e13b0c5939b019 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __LIB_FLOAT_MATH_H__ #define __LIB_FLOAT_MATH_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c index 1b6dbfaa7ae8ae..d0e026d981b503 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_top_optimization.h" #include "dml2_internal_shared_types.h" #include "dml_top_mcache.h" @@ -220,7 +219,6 @@ bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config); highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency; lowest_state = 0; - cur_state = 0; while (highest_state > lowest_state) { cur_state = (highest_state + lowest_state) / 2; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h index 1536afcbf73a44..9f22ab33eab126 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_TOP_OPTIMIZATION_H__ #define __DML2_TOP_OPTIMIZATION_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c index 2fb3e2f45e0768..f9f8869cd8b839 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_internal_shared_types.h" #include "dml_top.h" #include "dml2_mcg_factory.h" @@ -28,6 +27,7 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out) bool result = false; memset(l, 0, sizeof(struct dml2_initialize_instance_locals)); + memset(dml, 0, sizeof(struct dml2_instance)); memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities)); memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb)); @@ -96,14 +96,12 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out) { struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance; struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals; - /* Borrow the build_mode_programming_locals programming struct for DPMM call. */ - struct dml2_display_cfg_programming *dpmm_programming = dml->scratch.build_mode_programming_locals.mode_programming_params.programming; + struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming; bool result = false; bool mcache_success = false; - if (dpmm_programming) - memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming)); + memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming)); setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config); @@ -130,7 +128,7 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out) /* * Call DPMM to map all requirements to minimum clock state */ - if (result && dpmm_programming) { + if (result) { l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table; l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta; l->dppm_map_mode_params.programming = dpmm_programming; @@ -268,9 +266,18 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase); - if (vmin_success) { + if (l->optimized_display_config_with_meta.stage4.performed) { + /* + * when performed is true, optimization has applied to + * optimized_display_config_with_meta and it has passed mode + * support. However it may or may not pass the test function to + * reach actual Vmin. As long as voltage is optimized even if it + * doesn't reach Vmin level, there is still power benefit so in + * this case we will still copy this optimization into base + * display config. + */ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta)); - l->base_display_config_with_meta.stage4.success = true; + l->base_display_config_with_meta.stage4.success = vmin_success; } /* diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c index 7afd417071a51e..a342ebfbe4e7fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_debug.h" #include "dml_top_mcache.h" @@ -143,12 +142,12 @@ static unsigned int count_elements_in_span(int *array, unsigned int array_size, while (span_start_index < array_size) { for (i = span_start_index; i < array_size; i++) { - if (array[i] - span_start_value > span) { + if (array[i] - span_start_value <= span) { if (i - span_start_index + 1 > greatest_element_count) { greatest_element_count = i - span_start_index + 1; } + } else break; - } } span_start_index++; @@ -208,9 +207,9 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi int temp, p0shift, p1shift; unsigned int plane_index = 0; unsigned int i; - char odm_combine_factor = 1; - char mpc_combine_factor = 1; - char num_dpps; + unsigned int odm_combine_factor; + unsigned int mpc_combine_factor; + unsigned int num_dpps; unsigned int num_boundaries; enum dml2_scaling_transform scaling_transform; const struct dml2_plane_parameters *plane; @@ -227,10 +226,10 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi plane = ¶ms->display_cfg->plane_descriptors[plane_index]; stream = ¶ms->display_cfg->stream_descriptors[plane->stream_index]; - odm_combine_factor = (char)params->cfg_support_info->stream_support_info[plane->stream_index].odms_used; + num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used; if (odm_combine_factor == 1) - mpc_combine_factor = (char)params->cfg_support_info->plane_support_info[plane_index].dpps_used; + num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used; else mpc_combine_factor = 1; @@ -260,13 +259,13 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi // The last element in the unshifted boundary array will always be the first pixel outside the // plane, which means theres no mcache associated with it, so -1 num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1; - if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0, - num_boundaries, max_per_pipe_vp_p0) <= 1) { + if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0, + num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) { p0pass = true; } num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1; - if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1, - num_boundaries, max_per_pipe_vp_p1) <= 1) { + if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1, + num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) { p1pass = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h index bb12e4c306908e..7b1f6f7143d07c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_TOP_MCACHE_H__ #define __DML_TOP_MCACHE_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c index de7d8a6a2d3d92..e9b8e10695ae0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_debug.h" int dml2_printf(const char *format, ...) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h index 0403238df10725..d51a1b6c62f263 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_DEBUG_H__ #define __DML2_DEBUG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h index 5632cdacb7f4f7..aeac9f159fa5cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML2_INTERNAL_SHARED_TYPES_H__ #define __DML2_INTERNAL_SHARED_TYPES_H__ @@ -107,10 +106,16 @@ struct dml2_dpmm_map_watermarks_params_in_out { struct dml2_display_cfg_programming *programming; }; +struct dml2_dpmm_scratch { + struct dml2_display_cfg_programming programming; +}; + struct dml2_dpmm_instance { bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out); bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out); bool (*unit_test)(void); + + struct dml2_dpmm_scratch dpmm_scratch; }; /* @@ -266,6 +271,7 @@ struct dml2_fams2_meta { unsigned int contention_delay_otg_vlines; unsigned int min_allow_width_otg_vlines; unsigned int nom_vtotal; + unsigned int vblank_start; double nom_refresh_rate_hz; double nom_frame_time_us; unsigned int max_vtotal; @@ -594,7 +600,7 @@ struct dml2_pmo_optimize_for_stutter_in_out { struct display_configuation_with_meta *optimized_display_config; }; -enum dml2_pmo_pstate_strategy { +enum dml2_pmo_pstate_method { dml2_pmo_pstate_strategy_na = 0, /* hw exclusive modes */ dml2_pmo_pstate_strategy_vactive = 1, @@ -612,6 +618,11 @@ enum dml2_pmo_pstate_strategy { dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22, }; +struct dml2_pmo_pstate_strategy { + enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES]; + bool allow_state_increase; +}; + #define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na) #define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr) #define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr) @@ -634,8 +645,7 @@ struct dml2_pmo_scratch { int stream_mask; } pmo_dcn3; struct { - enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[DML2_MAX_PLANES][DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE]; - bool allow_state_increase_for_strategy[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE]; + struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE]; int num_pstate_candidates; int cur_pstate_candidate; @@ -661,6 +671,7 @@ struct dml2_pmo_scratch { unsigned int num_timing_groups; unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES]; bool group_is_drr_enabled[DML2_MAX_PLANES]; + bool group_is_drr_active[DML2_MAX_PLANES]; double group_line_time_us[DML2_MAX_PLANES]; /* scheduling check locals */ @@ -676,10 +687,10 @@ struct dml2_pmo_init_data { union { struct { /* populated once during initialization */ - enum dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2][PMO_DCN4_MAX_DISPLAYS]; - enum dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2 * 2][PMO_DCN4_MAX_DISPLAYS]; - enum dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 2][PMO_DCN4_MAX_DISPLAYS]; - enum dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 24 * 2][PMO_DCN4_MAX_DISPLAYS]; + struct dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2]; + struct dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 4 * 4]; + struct dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 6 * 6]; + struct dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 8 * 8 * 8 * 8]; unsigned int num_expanded_strategies_per_list[PMO_DCN4_MAX_DISPLAYS]; } pmo_dcn4; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index b566f53608c6d4..140ec01545db8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -101,6 +101,7 @@ struct dml2_wrapper_scratch { struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping; bool enable_flexible_pipe_mapping; bool plane_duplicate_exists; + int hpo_stream_to_link_encoder_mapping[MAX_HPO_DP2_ENCODERS]; }; struct dml2_helper_det_policy_scratch { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c index c4c52173ef2240..11c904ae29586d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c @@ -303,7 +303,6 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m if (project == dml_project_dcn35 || project == dml_project_dcn351) { policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false; - policy->EnhancedPrefetchScheduleAccelerationFinal = 0; policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/ policy->UseOnlyMaxPrefetchModes = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 8b9dcee772660c..bde4250853b10c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -733,7 +733,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st * } static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location, - const struct dc_stream_state *in, const struct pipe_ctx *pipe) + const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2) { unsigned int output_bpc; @@ -746,8 +746,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st * case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: out->OutputEncoder[location] = dml_dp; - if (is_dp2p0_output_encoder(pipe)) - out->OutputEncoder[location] = dml_dp2p0; + if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1) + out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0; break; case SIGNAL_TYPE_EDP: out->OutputEncoder[location] = dml_edp; @@ -953,7 +953,9 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); } -static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) +static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, + const struct dc_stream_state *in, + const struct soc_bounding_box_st *soc) { dml_uint_t width, height; @@ -970,7 +972,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; - out->GPUVMMinPageSizeKBytes[location] = 256; + out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes; out->ViewportWidth[location] = width; out->ViewportHeight[location] = height; @@ -1007,7 +1009,9 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->ScalerEnabled[location] = false; } -static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context) +static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, + const struct dc_plane_state *in, struct dc_state *context, + const struct soc_bounding_box_st *soc) { struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); if (!scaler_data) @@ -1018,7 +1022,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; - out->GPUVMMinPageSizeKBytes[location] = 256; + out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes; out->ViewportWidth[location] = scaler_data->viewport.width; out->ViewportHeight[location] = scaler_data->viewport.height; @@ -1193,6 +1197,7 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, plane_index = 0; } } + static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out, unsigned int location, const struct dc_stream_state *in) { @@ -1233,6 +1238,30 @@ static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cf } } } + +static void dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(struct dml2_context *dml2, struct dc_state *context) +{ + int i; + struct pipe_ctx *current_pipe_context; + + /* Scratch gets reset to zero in dml, but link encoder instance can be zero, so reset to -1 */ + for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { + dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[i] = -1; + } + + /* If an HPO stream encoder is allocated to a pipe, get the instance of it's allocated HPO Link encoder */ + for (i = 0; i < MAX_PIPES; i++) { + current_pipe_context = &context->res_ctx.pipe_ctx[i]; + if (current_pipe_context->stream && + current_pipe_context->stream_res.hpo_dp_stream_enc && + current_pipe_context->link_res.hpo_dp_link_enc && + dc_is_dp_signal(current_pipe_context->stream->signal)) { + dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[current_pipe_context->stream_res.hpo_dp_stream_enc->inst] = + current_pipe_context->link_res.hpo_dp_link_enc->inst; + } + } +} + void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { int i = 0, j = 0, k = 0; @@ -1256,6 +1285,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter; dml2_populate_pipe_to_plane_index_mapping(dml2, context); + dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(dml2, context); for (i = 0; i < context->stream_count; i++) { current_pipe_context = NULL; @@ -1276,7 +1306,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2); /*Call site for populate_dml_writeback_cfg_from_stream_state*/ populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback, disp_cfg_stream_location, context->streams[i]); @@ -1299,7 +1329,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat disp_cfg_plane_location = dml_dispcfg->num_surfaces++; populate_dummy_dml_surface_cfg(&dml_dispcfg->surface, disp_cfg_plane_location, context->streams[i]); - populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location, context->streams[i]); + populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location, + context->streams[i], &dml2->v20.dml_core_ctx.soc); dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location; @@ -1315,7 +1346,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); - populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context); + populate_dml_plane_cfg_from_plane_state( + &dml_dispcfg->plane, disp_cfg_plane_location, + context->stream_status[i].plane_states[j], context, + &dml2->v20.dml_core_ctx.soc); if (stream_mall_type == SUBVP_MAIN) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; @@ -1337,7 +1371,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat if (j >= 1) { populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context, dml2); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 92238ff333a45d..9a33158b63bf8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -161,14 +161,6 @@ bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx) /* If this assert is hit then we have a link encoder dynamic management issue */ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); - /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */ - if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) { - return (pipe_ctx->stream_res.hpo_dp_stream_enc && - pipe_ctx->link_res.hpo_dp_link_enc && - dc_is_dp_signal(pipe_ctx->stream->signal) && - (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id)); - } - return (pipe_ctx->stream_res.hpo_dp_stream_enc && pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); @@ -421,7 +413,7 @@ unsigned int dml2_calc_max_scaled_time( void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx) { - int i, j = 0;; + int i, j = 0; struct mcif_arb_params *wb_arb_params = NULL; struct dcn_bw_writeback *bw_writeback = NULL; enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index d5dcc8b7728169..866b0abcff1bad 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -575,7 +575,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s unsigned int lowest_state_idx = 0; out_clks.p_state_supported = true; - out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000; + out_clks.dispclk_khz = 0; /* No requirement, and lowest index will generally be maximum dispclk. */ out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000; out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000; out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 023325e8f6e22f..0f944fcfd5a5bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -236,6 +236,7 @@ struct dml2_configuration_options { bool use_clock_dc_limits; bool gpuvm_enable; + bool force_tdlut_enable; struct dml2_soc_bb *bb_from_dmub; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c index f2a2d53e968948..f8f6019d8304bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c @@ -684,9 +684,6 @@ void dpp1_set_degamma( BREAK_TO_DEBUGGER(); break; } - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); } void dpp1_degamma_ram_select( diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index e16274fee31d50..8473c694bfdc2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -59,6 +59,31 @@ void dpp35_dppclk_control( DISPCLK_R_GATE_DISABLE, 0); } +void dpp35_program_bias_and_scale_fcnv( + struct dpp *dpp_base, + struct dc_bias_and_scale *params) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (!params->bias_and_scale_valid) { + REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, 0); + REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, 0); + REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, 0); + + REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, 0x1F000); + REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, 0x1F000); + REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, 0x1F000); + } else { + REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, params->bias_red); + REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, params->bias_green); + REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, params->bias_blue); + + REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, params->scale_red); + REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, params->scale_green); + REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, params->scale_blue); + } +} + static struct dpp_funcs dcn35_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, @@ -81,7 +106,7 @@ static struct dpp_funcs dcn35_dpp_funcs = { .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - .dpp_program_bias_and_scale = NULL, + .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv, .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, .set_cursor_attributes = dpp3_set_cursor_attributes, .set_cursor_position = dpp1_set_cursor_position, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h index 135872d88219d8..3ca339a16e5b03 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h @@ -61,4 +61,7 @@ bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable); +void dpp35_program_bias_and_scale_fcnv(struct dpp *dpp_base, + struct dc_bias_and_scale *bias_and_scale); + #endif // __DCN35_DPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 7cae18fd7be9c4..97bf26fa357389 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -30,6 +30,7 @@ #include "basics/conversion.h" #include "dcn30/dcn30_cm_common.h" #include "dcn32/dcn32_dpp.h" +#include "dcn35/dcn35_dpp.h" #define REG(reg)\ dpp->tf_regs->reg @@ -240,7 +241,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - .dpp_program_bias_and_scale = NULL, + .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv, .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, .set_cursor_attributes = dpp401_set_cursor_attributes, .set_cursor_position = dpp401_set_cursor_position, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index d0f8c9ff523255..3b6ca7974e188d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -120,11 +120,10 @@ void dpp401_set_cursor_attributes( enum dc_cursor_color_format color_format = cursor_attributes->color_format; int cur_rom_en = 0; + // DCN4 should always do Cursor degamma for Cursor Color modes if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } + cur_rom_en = 1; } REG_UPDATE_3(CURSOR0_CONTROL, @@ -246,16 +245,6 @@ void dpp401_set_cursor_matrix( enum dc_color_space color_space, struct dc_csc_transform cursor_csc_color_matrix) { - struct dpp_input_csc_matrix cursor_tbl_entry; - unsigned int i; - - if (cursor_csc_color_matrix.enable_adjustment == true) { - for (i = 0; i < 12; i++) - cursor_tbl_entry.regval[i] = cursor_csc_color_matrix.matrix[i]; - - cursor_tbl_entry.color_space = color_space; - dpp401_program_cursor_csc(dpp_base, color_space, &cursor_tbl_entry); - } else { - dpp401_program_cursor_csc(dpp_base, color_space, NULL); - } + //Since we don't have cursor matrix information, force bypass mode by passing in unknown color space + dpp401_program_cursor_csc(dpp_base, COLOR_SPACE_UNKNOWN, NULL); } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c index 505929800426d2..5105fd580017c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c @@ -280,7 +280,8 @@ static void dpp401_dscl_set_scaler_filter( static void dpp401_dscl_set_scl_filter( struct dcn401_dpp *dpp, const struct scaler_data *scl_data, - bool chroma_coef_mode) + bool chroma_coef_mode, + bool force_coeffs_update) { bool h_2tap_hardcode_coef_en = false; bool v_2tap_hardcode_coef_en = false; @@ -343,7 +344,7 @@ static void dpp401_dscl_set_scl_filter( || (filter_v_c && (filter_v_c != dpp->filter_v_c)); } - if (filter_updated) { + if ((filter_updated) || (force_coeffs_update)) { uint32_t scl_mode = REG_READ(SCL_MODE); if (!h_2tap_hardcode_coef_en && filter_h) { @@ -656,274 +657,252 @@ static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp, RECOUT_HEIGHT, recout->height); } /** - * dpp401_dscl_program_easf - Program EASF + * dpp401_dscl_program_easf_v - Program EASF_V * * @dpp_base: High level DPP struct * @scl_data: scalaer_data info * - * This is the primary function to program EASF + * This is the primary function to program vertical EASF registers * */ -static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data) +static void dpp401_dscl_program_easf_v(struct dpp *dpp_base, const struct scaler_data *scl_data) { struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); PERF_TRACE(); - REG_UPDATE(DSCL_SC_MODE, - SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode); - REG_UPDATE(DSCL_SC_MODE, - SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en); /* DSCL_EASF_V_MODE */ - REG_UPDATE(DSCL_EASF_V_MODE, - SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en); - REG_UPDATE(DSCL_EASF_V_MODE, - SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor); - REG_UPDATE(DSCL_EASF_V_MODE, + REG_SET_3(DSCL_EASF_V_MODE, 0, + SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en, + SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor, SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, - SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, - SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, - SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, - SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, - SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain); - REG_UPDATE(DSCL_EASF_V_BF_CNTL, + + if (!scl_data->dscl_prog_data.easf_v_en) { + PERF_TRACE(); + return; + } + + /* DSCL_EASF_V_BF_CNTL */ + REG_SET_6(DSCL_EASF_V_BF_CNTL, 0, + SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en, + SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode, + SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode, + SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain, + SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain, SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1, - SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1, + /* DSCL_EASF_V_RINGEST_3TAP_CNTLn */ + REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL1, 0, + SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt, SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2, - SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2, + REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL2, 0, + SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope, SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3, - SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope); - REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3, + REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL3, 0, + SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope, SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset); - REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, - SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1); - REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, + /* DSCL_EASF_V_RINGEST_EVENTAP_REDUCE */ + REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, 0, + SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1, SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2); - REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, - SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1); - REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, + /* DSCL_EASF_V_RINGEST_EVENTAP_GAIN */ + REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, 0, + SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1, SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2); - REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN, - SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa); - REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN, - SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb); - REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN, - SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina); - REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN, + /* DSCL_EASF_V_BF_FINAL_MAX_MIN */ + REG_SET_4(DSCL_EASF_V_BF_FINAL_MAX_MIN, 0, + SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa, + SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb, + SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina, SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0, - SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0, - SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0, + /* DSCL_EASF_V_BF1_PWL_SEGn */ + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG0, 0, + SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0, + SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0, SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1, - SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1, - SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG1, 0, + SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1, + SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1, SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2, - SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2, - SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG2, 0, + SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2, + SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2, SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3, - SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3, - SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG3, 0, + SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3, + SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3, SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4, - SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4, - SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG4, 0, + SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4, + SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4, SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5, - SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5, - SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG5, 0, + SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5, + SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5, SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6, - SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6, - SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6, + REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG6, 0, + SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6, + SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6, SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7, - SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7); - REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7, + REG_SET_2(DSCL_EASF_V_BF1_PWL_SEG7, 0, + SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7, SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0, - SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0, - SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0, + /* DSCL_EASF_V_BF3_PWL_SEGn */ + REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG0, 0, + SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0, + SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0, SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1, - SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1, - SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1, + REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG1, 0, + SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1, + SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1, SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2, - SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2, - SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2, + REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG2, 0, + SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2, + SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2, SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3, - SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3, - SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3, + REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG3, 0, + SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3, + SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3, SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4, - SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4, - SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4, + REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG4, 0, + SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4, + SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4, SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5, - SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5); - REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5, + REG_SET_2(DSCL_EASF_V_BF3_PWL_SEG5, 0, + SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5, SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5); + PERF_TRACE(); +} +/** + * dpp401_dscl_program_easf_h - Program EASF_H + * + * @dpp_base: High level DPP struct + * @scl_data: scalaer_data info + * + * This is the primary function to program horizontal EASF registers + * + */ +static void dpp401_dscl_program_easf_h(struct dpp *dpp_base, const struct scaler_data *scl_data) +{ + struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); + + PERF_TRACE(); /* DSCL_EASF_H_MODE */ - REG_UPDATE(DSCL_EASF_H_MODE, - SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en); - REG_UPDATE(DSCL_EASF_H_MODE, - SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor); - REG_UPDATE(DSCL_EASF_H_MODE, + REG_SET_3(DSCL_EASF_H_MODE, 0, + SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en, + SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor, SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, - SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, - SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, - SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, - SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, - SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain); - REG_UPDATE(DSCL_EASF_H_BF_CNTL, + + if (!scl_data->dscl_prog_data.easf_h_en) { + PERF_TRACE(); + return; + } + + /* DSCL_EASF_H_BF_CNTL */ + REG_SET_6(DSCL_EASF_H_BF_CNTL, 0, + SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en, + SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode, + SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode, + SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain, + SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain, SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain); - REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, - SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1); - REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, + /* DSCL_EASF_H_RINGEST_EVENTAP_REDUCE */ + REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, 0, + SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1, SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2); - REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, - SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1); - REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, + /* DSCL_EASF_H_RINGEST_EVENTAP_GAIN */ + REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, 0, + SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1, SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2); - REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN, - SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa); - REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN, - SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb); - REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN, - SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina); - REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN, + /* DSCL_EASF_H_BF_FINAL_MAX_MIN */ + REG_SET_4(DSCL_EASF_H_BF_FINAL_MAX_MIN, 0, + SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa, + SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb, + SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina, SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0, - SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0, - SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0, + /* DSCL_EASF_H_BF1_PWL_SEGn */ + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG0, 0, + SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0, + SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0, SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1, - SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1, - SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG1, 0, + SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1, + SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1, SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2, - SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2, - SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG2, 0, + SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2, + SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2, SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3, - SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3, - SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG3, 0, + SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3, + SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3, SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4, - SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4, - SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG4, 0, + SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4, + SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4, SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5, - SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5, - SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG5, 0, + SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5, + SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5, SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6, - SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6, - SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6, + REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG6, 0, + SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6, + SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6, SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7, - SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7); - REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7, + REG_SET_2(DSCL_EASF_H_BF1_PWL_SEG7, 0, + SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7, SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0, - SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0, - SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0, + /* DSCL_EASF_H_BF3_PWL_SEGn */ + REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG0, 0, + SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0, + SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0, SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1, - SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1, - SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1, + REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG1, 0, + SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1, + SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1, SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2, - SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2, - SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2, + REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG2, 0, + SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2, + SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2, SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3, - SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3, - SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3, + REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG3, 0, + SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3, + SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3, SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4, - SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4, - SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4, + REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG4, 0, + SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4, + SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4, SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5, - SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5); - REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5, + REG_SET_2(DSCL_EASF_H_BF3_PWL_SEG5, 0, + SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5, SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5); + PERF_TRACE(); +} +/** + * dpp401_dscl_program_easf - Program EASF + * + * @dpp_base: High level DPP struct + * @scl_data: scalaer_data info + * + * This is the primary function to program EASF + * + */ +static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data) +{ + struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); + + PERF_TRACE(); + /* DSCL_SC_MODE */ + REG_SET_2(DSCL_SC_MODE, 0, + SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode, + SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en); /* DSCL_EASF_SC_MATRIX_C0C1, DSCL_EASF_SC_MATRIX_C2C3 */ - REG_UPDATE(DSCL_SC_MATRIX_C0C1, - SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0); - REG_UPDATE(DSCL_SC_MATRIX_C0C1, + REG_SET_2(DSCL_SC_MATRIX_C0C1, 0, + SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0, SCL_SC_MATRIX_C1, scl_data->dscl_prog_data.easf_matrix_c1); - REG_UPDATE(DSCL_SC_MATRIX_C2C3, - SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2); - REG_UPDATE(DSCL_SC_MATRIX_C2C3, + REG_SET_2(DSCL_SC_MATRIX_C2C3, 0, + SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2, SCL_SC_MATRIX_C3, scl_data->dscl_prog_data.easf_matrix_c3); + dpp401_dscl_program_easf_v(dpp_base, scl_data); + dpp401_dscl_program_easf_h(dpp_base, scl_data); PERF_TRACE(); } /** @@ -958,10 +937,11 @@ static void dpp401_dscl_set_isharp_filter( REG_UPDATE(ISHARP_DELTA_CTRL, ISHARP_DELTA_LUT_HOST_SELECT, 0); + /* LUT data write is auto-indexed. Write index once */ + REG_SET(ISHARP_DELTA_INDEX, 0, + ISHARP_DELTA_INDEX, 0); for (level = 0; level < NUM_LEVELS; level++) { filter_data = filter[level]; - REG_SET(ISHARP_DELTA_INDEX, 0, - ISHARP_DELTA_INDEX, level); REG_SET(ISHARP_DELTA_DATA, 0, ISHARP_DELTA_DATA, filter_data); } @@ -971,112 +951,83 @@ static void dpp401_dscl_set_isharp_filter( * * @dpp_base: High level DPP struct * @scl_data: scalaer_data info + * @program_isharp_1dlut: flag to program isharp 1D LUT + * @bs_coeffs_updated: Blur and Scale Coefficients update flag * * This is the primary function to program isharp * */ static void dpp401_dscl_program_isharp(struct dpp *dpp_base, - const struct scaler_data *scl_data) + const struct scaler_data *scl_data, + bool program_isharp_1dlut, + bool *bs_coeffs_updated) { struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); + *bs_coeffs_updated = false; PERF_TRACE(); - /* ISHARP_EN */ - REG_UPDATE(ISHARP_MODE, - ISHARP_EN, scl_data->dscl_prog_data.isharp_en); - /* ISHARP_NOISEDET_EN */ - REG_UPDATE(ISHARP_MODE, - ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable); - /* ISHARP_NOISEDET_MODE */ - REG_UPDATE(ISHARP_MODE, - ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode); - /* ISHARP_NOISEDET_UTHRE */ - REG_UPDATE(ISHARP_NOISEDET_THRESHOLD, - ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold); - /* ISHARP_NOISEDET_DTHRE */ - REG_UPDATE(ISHARP_NOISEDET_THRESHOLD, - ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold); - REG_UPDATE(ISHARP_MODE, - ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode); - /* ISHARP_NOISEDET_UTHRE */ - REG_UPDATE(ISHARP_NOISEDET_THRESHOLD, - ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold); - /* ISHARP_NOISEDET_DTHRE */ - REG_UPDATE(ISHARP_NOISEDET_THRESHOLD, + /* ISHARP_MODE */ + REG_SET_6(ISHARP_MODE, 0, + ISHARP_EN, scl_data->dscl_prog_data.isharp_en, + ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable, + ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode, + ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode, + ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode, + ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm); + + /* Skip remaining register programming if ISHARP is disabled */ + if (!scl_data->dscl_prog_data.isharp_en) { + PERF_TRACE(); + return; + } + + /* ISHARP_NOISEDET_THRESHOLD */ + REG_SET_2(ISHARP_NOISEDET_THRESHOLD, 0, + ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold, ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold); - /* ISHARP_NOISEDET_PWL_START_IN */ - REG_UPDATE(ISHARP_NOISE_GAIN_PWL, - ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in); - /* ISHARP_NOISEDET_PWL_END_IN */ - REG_UPDATE(ISHARP_NOISE_GAIN_PWL, - ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in); - /* ISHARP_NOISEDET_PWL_SLOPE */ - REG_UPDATE(ISHARP_NOISE_GAIN_PWL, + + /* ISHARP_NOISE_GAIN_PWL */ + REG_SET_3(ISHARP_NOISE_GAIN_PWL, 0, + ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in, + ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in, ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope); - /* ISHARP_LBA_MODE */ - REG_UPDATE(ISHARP_MODE, - ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode); + /* ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG */ - REG_UPDATE(ISHARP_LBA_PWL_SEG0, - ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0]); - REG_UPDATE(ISHARP_LBA_PWL_SEG0, - ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0]); - REG_UPDATE(ISHARP_LBA_PWL_SEG0, + REG_SET_3(ISHARP_LBA_PWL_SEG0, 0, + ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0], + ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0], ISHARP_LBA_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.isharp_lba.slope_seg[0]); - REG_UPDATE(ISHARP_LBA_PWL_SEG1, - ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1]); - REG_UPDATE(ISHARP_LBA_PWL_SEG1, - ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1]); - REG_UPDATE(ISHARP_LBA_PWL_SEG1, + REG_SET_3(ISHARP_LBA_PWL_SEG1, 0, + ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1], + ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1], ISHARP_LBA_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.isharp_lba.slope_seg[1]); - REG_UPDATE(ISHARP_LBA_PWL_SEG2, - ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2]); - REG_UPDATE(ISHARP_LBA_PWL_SEG2, - ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2]); - REG_UPDATE(ISHARP_LBA_PWL_SEG2, + REG_SET_3(ISHARP_LBA_PWL_SEG2, 0, + ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2], + ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2], ISHARP_LBA_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.isharp_lba.slope_seg[2]); - REG_UPDATE(ISHARP_LBA_PWL_SEG3, - ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3]); - REG_UPDATE(ISHARP_LBA_PWL_SEG3, - ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3]); - REG_UPDATE(ISHARP_LBA_PWL_SEG3, + REG_SET_3(ISHARP_LBA_PWL_SEG3, 0, + ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3], + ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3], ISHARP_LBA_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.isharp_lba.slope_seg[3]); - REG_UPDATE(ISHARP_LBA_PWL_SEG4, - ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4]); - REG_UPDATE(ISHARP_LBA_PWL_SEG4, - ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4]); - REG_UPDATE(ISHARP_LBA_PWL_SEG4, + REG_SET_3(ISHARP_LBA_PWL_SEG4, 0, + ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4], + ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4], ISHARP_LBA_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.isharp_lba.slope_seg[4]); - REG_UPDATE(ISHARP_LBA_PWL_SEG5, - ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5]); - REG_UPDATE(ISHARP_LBA_PWL_SEG5, + REG_SET_2(ISHARP_LBA_PWL_SEG5, 0, + ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5], ISHARP_LBA_PWL_BASE_SEG5, scl_data->dscl_prog_data.isharp_lba.base_seg[5]); - /* ISHARP_FMT_MODE */ - REG_UPDATE(ISHARP_MODE, - ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode); - /* ISHARP_FMT_NORM */ - REG_UPDATE(ISHARP_MODE, - ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm); /* ISHARP_DELTA_LUT */ - dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); - /* ISHARP_NLDELTA_SCLIP_EN_P */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, - ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p); - /* ISHARP_NLDELTA_SCLIP_PIVOT_P */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, - ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p); - /* ISHARP_NLDELTA_SCLIP_SLOPE_P */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, - ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p); - /* ISHARP_NLDELTA_SCLIP_EN_N */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, - ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n); - /* ISHARP_NLDELTA_SCLIP_PIVOT_N */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, - ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n); - /* ISHARP_NLDELTA_SCLIP_SLOPE_N */ - REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP, + if (!program_isharp_1dlut) + dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); + + /* ISHARP_NLDELTA_SOFT_CLIP */ + REG_SET_6(ISHARP_NLDELTA_SOFT_CLIP, 0, + ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p, + ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p, + ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p, + ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n, + ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n, ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n); /* Blur and Scale Coefficients - SCL_COEF_RAM_TAP_SELECT */ @@ -1086,12 +1037,14 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base, dpp, scl_data->taps.v_taps, SCL_COEF_VERTICAL_BLUR_SCALE, scl_data->dscl_prog_data.filter_blur_scale_v); + *bs_coeffs_updated = true; } if (scl_data->dscl_prog_data.filter_blur_scale_h) { dpp401_dscl_set_scaler_filter( dpp, scl_data->taps.h_taps, SCL_COEF_HORIZONTAL_BLUR_SCALE, scl_data->dscl_prog_data.filter_blur_scale_h); + *bs_coeffs_updated = true; } } PERF_TRACE(); @@ -1122,12 +1075,29 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN && scl_data->format <= PIXEL_FORMAT_VIDEO_END; + bool program_isharp_1dlut = false; + bool bs_coeffs_updated = false; + if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) return; PERF_TRACE(); + /* If only sharpness has changed, then only update 1dlut, then return */ + if (scl_data->dscl_prog_data.isharp_en && + (dpp->scl_data.dscl_prog_data.sharpness_level + != scl_data->dscl_prog_data.sharpness_level)) { + /* ISHARP_DELTA_LUT */ + dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); + dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level; + dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta; + + if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) + return; + program_isharp_1dlut = true; + } + dpp->scl_data = *scl_data; if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) { @@ -1181,7 +1151,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) { if (dpp->base.ctx->dc->config.prefer_easf) dpp401_dscl_disable_easf(dpp_base, scl_data); - dpp401_dscl_program_isharp(dpp_base, scl_data); + dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated); return; } @@ -1208,12 +1178,18 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, SCL_V_NUM_TAPS_C, v_num_taps_c, SCL_H_NUM_TAPS_C, h_num_taps_c); - dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr); + /* ISharp configuration + * - B&S coeffs are written to same coeff RAM as WB scaler coeffs + * - coeff RAM toggle is in EASF programming + * - if we are only programming B&S coeffs, then need to reprogram + * WB scaler coeffs and toggle coeff RAM together + */ + //if (dpp->base.ctx->dc->config.prefer_easf) + dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated); + + dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr, bs_coeffs_updated); /* Edge adaptive scaler function configuration */ if (dpp->base.ctx->dc->config.prefer_easf) dpp401_dscl_program_easf(dpp_base, scl_data); - /* isharp configuration */ - //if (dpp->base.ctx->dc->config.prefer_easf) - dpp401_dscl_program_isharp(dpp_base, scl_data); PERF_TRACE(); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index a1727e5bf02479..ebd5df1a36e8bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -668,6 +668,7 @@ static bool decide_dsc_bandwidth_range( */ static bool decide_dsc_target_bpp_x16( const struct dc_dsc_policy *policy, + const struct dc_dsc_config_options *options, const struct dsc_enc_caps *dsc_common_caps, const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, @@ -682,7 +683,7 @@ static bool decide_dsc_target_bpp_x16( if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { - if (policy->enable_dsc_when_not_needed) + if (policy->enable_dsc_when_not_needed || options->force_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ *target_bpp_x16 = range.max_target_bpp_x16; } else if (target_bandwidth_kbps >= range.max_kbps) { @@ -882,7 +883,7 @@ static bool setup_dsc_config( memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); - dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy); + dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -1080,6 +1081,7 @@ static bool setup_dsc_config( if (target_bandwidth_kbps > 0) { is_dsc_possible = decide_dsc_target_bpp_x16( &policy, + options, &dsc_common_caps, target_bandwidth_kbps, timing, @@ -1171,7 +1173,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy) + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding) { uint32_t bpc = 0; @@ -1235,10 +1238,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16; /* enable DSC when not needed, default false */ - if (dsc_policy_enable_dsc_when_not_needed) - policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed; - else - policy->enable_dsc_when_not_needed = false; + policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed; } void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) @@ -1267,4 +1267,5 @@ void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_ options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine; options->max_target_bpp_limit_override_x16 = 0; options->slice_height_granularity = 1; + options->force_dsc_when_not_needed = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 6acb6699f146ec..61678b0a5a1e7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -27,7 +27,7 @@ static void dsc401_disconnect(struct display_stream_compressor *dsc); static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); -const struct dsc_funcs dcn401_dsc_funcs = { +static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_get_enc_caps = dsc401_get_enc_caps, .dsc_read_state = dsc401_read_state, .dsc_validate_stream = dsc401_validate_stream, diff --git a/drivers/gpu/drm/amd/display/dc/dwb/Makefile b/drivers/gpu/drm/amd/display/dc/dwb/Makefile index 16f7a454fed9a0..3952ba4cd5083f 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dwb/Makefile @@ -24,6 +24,15 @@ # ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN30 +############################################################################### +DWB_DCN30 = dcn30_dwb.o dcn30_dwb_cm.o + +AMD_DAL_DWB_DCN30 = $(addprefix $(AMDDALPATH)/dc/dwb/dcn30/,$(DWB_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DWB_DCN30) + ############################################################################### # DCN35 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h new file mode 100644 index 00000000000000..bd98b327a6c70e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h @@ -0,0 +1,78 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn10/dcn10_cm_common.h" + +#ifndef __DAL_DCN30_CM_COMMON_H__ +#define __DAL_DCN30_CM_COMMON_H__ + +#define TF_HELPER_REG_FIELD_LIST_DCN3(type) \ + TF_HELPER_REG_FIELD_LIST(type);\ + type field_region_start_base;\ + type field_offset + +struct DCN3_xfer_func_shift { + TF_HELPER_REG_FIELD_LIST_DCN3(uint8_t); +}; + +struct DCN3_xfer_func_mask { + TF_HELPER_REG_FIELD_LIST_DCN3(uint32_t); +}; + +struct dcn3_xfer_func_reg { + struct DCN3_xfer_func_shift shifts; + struct DCN3_xfer_func_mask masks; + + TF_HELPER_REG_LIST; + uint32_t offset_b; + uint32_t offset_g; + uint32_t offset_r; + uint32_t start_base_cntl_b; + uint32_t start_base_cntl_g; + uint32_t start_base_cntl_r; +}; + +void cm_helper_program_gamcor_xfer_func( + struct dc_context *ctx, + const struct pwl_params *params, + const struct dcn3_xfer_func_reg *reg); + +bool cm3_helper_translate_curve_to_hw_format( + const struct dc_transfer_func *output_tf, + struct pwl_params *lut_params, bool fixpoint); + +bool cm3_helper_translate_curve_to_degamma_hw_format( + const struct dc_transfer_func *output_tf, + struct pwl_params *lut_params); + +bool cm3_helper_convert_to_custom_float( + struct pwl_result_data *rgb_resulted, + struct curve_points3 *corner_points, + uint32_t hw_points_num, + bool fixpoint); + +bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c new file mode 100644 index 00000000000000..fae98cf520201d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c @@ -0,0 +1,285 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "reg_helper.h" +#include "resource.h" +#include "dwb.h" +#include "dcn30_dwb.h" + + +#define REG(reg)\ + dwbc30->dwbc_regs->reg + +#define CTX \ + dwbc30->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name + +#define DC_LOGGER \ + dwbc30->base.ctx->logger + +static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps) +{ + if (caps) { + caps->adapter_id = 0; /* we only support 1 adapter currently */ + caps->hw_version = DCN_VERSION_3_0; + caps->num_pipes = 2; + memset(&caps->reserved, 0, sizeof(caps->reserved)); + memset(&caps->reserved2, 0, sizeof(caps->reserved2)); + caps->sw_version = dwb_ver_2_0; + caps->caps.support_dwb = true; + caps->caps.support_ogam = true; + caps->caps.support_wbscl = true; + caps->caps.support_ocsc = false; + caps->caps.support_stereo = true; + return true; + } else { + return false; + } +} + +void dwb3_config_fc(struct dwbc *dwbc, struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + /* Set DWB source size */ + REG_UPDATE_2(FC_SOURCE_SIZE, FC_SOURCE_WIDTH, params->cnv_params.src_width, + FC_SOURCE_HEIGHT, params->cnv_params.src_height); + + /* source size is not equal the source size, then enable cropping. */ + if (params->cnv_params.crop_en) { + REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 1); + REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_X, params->cnv_params.crop_x); + REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_Y, params->cnv_params.crop_y); + REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_WIDTH, params->cnv_params.crop_width); + REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_HEIGHT, params->cnv_params.crop_height); + } else { + REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 0); + } + + /* Set CAPTURE_RATE */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_RATE, params->capture_rate); + + dwb3_set_stereo(dwbc, ¶ms->stereo_params); +} + +bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + DC_LOG_DWB("%s dwb3_enabled at inst = %d", __func__, dwbc->inst); + + /* Set WB_ENABLE (not double buffered; capture not enabled) */ + REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 1); + + /* Set FC parameters */ + dwb3_config_fc(dwbc, params); + + /* Program color processing unit */ + dwb3_program_hdr_mult(dwbc, params); + dwb3_set_gamut_remap(dwbc, params); + dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func); + + /* Program output denorm */ + dwb3_set_denorm(dwbc, params); + + /* Enable DWB capture enable (double buffered) */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE); + + /* First pixel count */ + REG_UPDATE(FC_FLOW_CTRL, FC_FIRST_PIXEL_DELAY_COUNT, 96); + + return true; +} + +bool dwb3_disable(struct dwbc *dwbc) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + /* disable FC */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE); + + /* disable WB */ + REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 0); + + DC_LOG_DWB("%s dwb3_disabled at inst = %d", __func__, dwbc->inst); + return true; +} + +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int pre_locked; + + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); + + /* Lock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); + + /* Disable FC */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); + + /* Unlock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); + + DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); +} + + +bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int pre_locked; + + /* + * Check if the caller has already locked DWB registers. + * If so: assume the caller will unlock, so don't touch the lock. + * If not: lock them for this update, then unlock after the + * update is complete. + */ + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); + DC_LOG_DWB("%s dwb update, inst = %d", __func__, dwbc->inst); + + if (pre_locked == 0) { + /* Lock DWB registers */ + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); + } + + /* Set FC parameters */ + dwb3_config_fc(dwbc, params); + + /* Program color processing unit */ + dwb3_program_hdr_mult(dwbc, params); + dwb3_set_gamut_remap(dwbc, params); + dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func); + + /* Program output denorm */ + dwb3_set_denorm(dwbc, params); + + if (pre_locked == 0) { + /* Unlock DWB registers */ + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); + } + + return true; +} + +bool dwb3_is_enabled(struct dwbc *dwbc) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int dwb_enabled = 0; + unsigned int fc_frame_capture_en = 0; + + REG_GET(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, &dwb_enabled); + REG_GET(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, &fc_frame_capture_en); + + return ((dwb_enabled != 0) && (fc_frame_capture_en != 0)); +} + +void dwb3_set_stereo(struct dwbc *dwbc, + struct dwb_stereo_params *stereo_params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + if (stereo_params->stereo_enabled) { + REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, stereo_params->stereo_eye_select); + REG_UPDATE(FC_MODE_CTRL, FC_STEREO_EYE_POLARITY, stereo_params->stereo_polarity); + DC_LOG_DWB("%s dwb stereo enabled", __func__); + } else { + REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, 0); + DC_LOG_DWB("%s dwb stereo disabled", __func__); + } +} + +void dwb3_set_new_content(struct dwbc *dwbc, + bool is_new_content) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + REG_UPDATE(FC_MODE_CTRL, FC_NEW_CONTENT, is_new_content); +} + +void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + /* Set output format*/ + REG_UPDATE(DWB_OUT_CTRL, OUT_FORMAT, params->cnv_params.fc_out_format); + + /* Set output denorm */ + if (params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_ARGB || + params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_RGBA) { + REG_UPDATE(DWB_OUT_CTRL, OUT_DENORM, params->cnv_params.out_denorm_mode); + REG_UPDATE(DWB_OUT_CTRL, OUT_MAX, params->cnv_params.out_max_pix_val); + REG_UPDATE(DWB_OUT_CTRL, OUT_MIN, params->cnv_params.out_min_pix_val); + } +} + + +static const struct dwbc_funcs dcn30_dwbc_funcs = { + .get_caps = dwb3_get_caps, + .enable = dwb3_enable, + .disable = dwb3_disable, + .update = dwb3_update, + .is_enabled = dwb3_is_enabled, + .set_fc_enable = dwb3_set_fc_enable, + .set_stereo = dwb3_set_stereo, + .set_new_content = dwb3_set_new_content, + .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename +}; + +void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, + struct dc_context *ctx, + const struct dcn30_dwbc_registers *dwbc_regs, + const struct dcn30_dwbc_shift *dwbc_shift, + const struct dcn30_dwbc_mask *dwbc_mask, + int inst) +{ + dwbc30->base.ctx = ctx; + + dwbc30->base.inst = inst; + dwbc30->base.funcs = &dcn30_dwbc_funcs; + + dwbc30->dwbc_regs = dwbc_regs; + dwbc30->dwbc_shift = dwbc_shift; + dwbc30->dwbc_mask = dwbc_mask; +} + +void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + /* + * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no + * idle cycles in HW pipeline (in number of clock cycles times 4) + */ + REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay); + + DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst); +} diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h new file mode 100644 index 00000000000000..0f3f7c5fbaecf9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h @@ -0,0 +1,920 @@ +/* Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DC_DWBC_DCN30_H__ +#define __DC_DWBC_DCN30_H__ + +#define TO_DCN30_DWBC(dwbc_base) \ + container_of(dwbc_base, struct dcn30_dwbc, base) + +#define DWBC_COMMON_REG_LIST_DCN30(inst) \ + SR(DWB_ENABLE_CLK_CTRL),\ + SR(DWB_MEM_PWR_CTRL),\ + SR(FC_MODE_CTRL),\ + SR(FC_FLOW_CTRL),\ + SR(FC_WINDOW_START),\ + SR(FC_WINDOW_SIZE),\ + SR(FC_SOURCE_SIZE),\ + SR(DWB_UPDATE_CTRL),\ + SR(DWB_CRC_CTRL),\ + SR(DWB_CRC_MASK_R_G),\ + SR(DWB_CRC_MASK_B_A),\ + SR(DWB_CRC_VAL_R_G),\ + SR(DWB_CRC_VAL_B_A),\ + SR(DWB_OUT_CTRL),\ + SR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN),\ + SR(DWB_MMHUBBUB_BACKPRESSURE_CNT),\ + SR(DWB_HOST_READ_CONTROL),\ + SR(DWB_SOFT_RESET),\ + SR(DWB_HDR_MULT_COEF),\ + SR(DWB_GAMUT_REMAP_MODE),\ + SR(DWB_GAMUT_REMAP_COEF_FORMAT),\ + SR(DWB_GAMUT_REMAPA_C11_C12),\ + SR(DWB_GAMUT_REMAPA_C13_C14),\ + SR(DWB_GAMUT_REMAPA_C21_C22),\ + SR(DWB_GAMUT_REMAPA_C23_C24),\ + SR(DWB_GAMUT_REMAPA_C31_C32),\ + SR(DWB_GAMUT_REMAPA_C33_C34),\ + SR(DWB_GAMUT_REMAPB_C11_C12),\ + SR(DWB_GAMUT_REMAPB_C13_C14),\ + SR(DWB_GAMUT_REMAPB_C21_C22),\ + SR(DWB_GAMUT_REMAPB_C23_C24),\ + SR(DWB_GAMUT_REMAPB_C31_C32),\ + SR(DWB_GAMUT_REMAPB_C33_C34),\ + SR(DWB_OGAM_CONTROL),\ + SR(DWB_OGAM_LUT_INDEX),\ + SR(DWB_OGAM_LUT_DATA),\ + SR(DWB_OGAM_LUT_CONTROL),\ + SR(DWB_OGAM_RAMA_START_CNTL_B),\ + SR(DWB_OGAM_RAMA_START_CNTL_G),\ + SR(DWB_OGAM_RAMA_START_CNTL_R),\ + SR(DWB_OGAM_RAMA_START_BASE_CNTL_B),\ + SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B),\ + SR(DWB_OGAM_RAMA_START_BASE_CNTL_G),\ + SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G),\ + SR(DWB_OGAM_RAMA_START_BASE_CNTL_R),\ + SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R),\ + SR(DWB_OGAM_RAMA_END_CNTL1_B),\ + SR(DWB_OGAM_RAMA_END_CNTL2_B),\ + SR(DWB_OGAM_RAMA_END_CNTL1_G),\ + SR(DWB_OGAM_RAMA_END_CNTL2_G),\ + SR(DWB_OGAM_RAMA_END_CNTL1_R),\ + SR(DWB_OGAM_RAMA_END_CNTL2_R),\ + SR(DWB_OGAM_RAMA_OFFSET_B),\ + SR(DWB_OGAM_RAMA_OFFSET_G),\ + SR(DWB_OGAM_RAMA_OFFSET_R),\ + SR(DWB_OGAM_RAMA_REGION_0_1),\ + SR(DWB_OGAM_RAMA_REGION_2_3),\ + SR(DWB_OGAM_RAMA_REGION_4_5),\ + SR(DWB_OGAM_RAMA_REGION_6_7),\ + SR(DWB_OGAM_RAMA_REGION_8_9),\ + SR(DWB_OGAM_RAMA_REGION_10_11),\ + SR(DWB_OGAM_RAMA_REGION_12_13),\ + SR(DWB_OGAM_RAMA_REGION_14_15),\ + SR(DWB_OGAM_RAMA_REGION_16_17),\ + SR(DWB_OGAM_RAMA_REGION_18_19),\ + SR(DWB_OGAM_RAMA_REGION_20_21),\ + SR(DWB_OGAM_RAMA_REGION_22_23),\ + SR(DWB_OGAM_RAMA_REGION_24_25),\ + SR(DWB_OGAM_RAMA_REGION_26_27),\ + SR(DWB_OGAM_RAMA_REGION_28_29),\ + SR(DWB_OGAM_RAMA_REGION_30_31),\ + SR(DWB_OGAM_RAMA_REGION_32_33),\ + SR(DWB_OGAM_RAMB_START_CNTL_B),\ + SR(DWB_OGAM_RAMB_START_CNTL_G),\ + SR(DWB_OGAM_RAMB_START_CNTL_R),\ + SR(DWB_OGAM_RAMB_START_BASE_CNTL_B),\ + SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B),\ + SR(DWB_OGAM_RAMB_START_BASE_CNTL_G),\ + SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G),\ + SR(DWB_OGAM_RAMB_START_BASE_CNTL_R),\ + SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R),\ + SR(DWB_OGAM_RAMB_END_CNTL1_B),\ + SR(DWB_OGAM_RAMB_END_CNTL2_B),\ + SR(DWB_OGAM_RAMB_END_CNTL1_G),\ + SR(DWB_OGAM_RAMB_END_CNTL2_G),\ + SR(DWB_OGAM_RAMB_END_CNTL1_R),\ + SR(DWB_OGAM_RAMB_END_CNTL2_R),\ + SR(DWB_OGAM_RAMB_OFFSET_B),\ + SR(DWB_OGAM_RAMB_OFFSET_G),\ + SR(DWB_OGAM_RAMB_OFFSET_R),\ + SR(DWB_OGAM_RAMB_REGION_0_1),\ + SR(DWB_OGAM_RAMB_REGION_2_3),\ + SR(DWB_OGAM_RAMB_REGION_4_5),\ + SR(DWB_OGAM_RAMB_REGION_6_7),\ + SR(DWB_OGAM_RAMB_REGION_8_9),\ + SR(DWB_OGAM_RAMB_REGION_10_11),\ + SR(DWB_OGAM_RAMB_REGION_12_13),\ + SR(DWB_OGAM_RAMB_REGION_14_15),\ + SR(DWB_OGAM_RAMB_REGION_16_17),\ + SR(DWB_OGAM_RAMB_REGION_18_19),\ + SR(DWB_OGAM_RAMB_REGION_20_21),\ + SR(DWB_OGAM_RAMB_REGION_22_23),\ + SR(DWB_OGAM_RAMB_REGION_24_25),\ + SR(DWB_OGAM_RAMB_REGION_26_27),\ + SR(DWB_OGAM_RAMB_REGION_28_29),\ + SR(DWB_OGAM_RAMB_REGION_30_31),\ + SR(DWB_OGAM_RAMB_REGION_32_33) + + +#define DWBC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \ + SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_ENABLE, mask_sh),\ + SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_R_DWB_GATE_DIS, mask_sh),\ + SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_G_DWB_GATE_DIS, mask_sh),\ + SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_TEST_CLK_SEL, mask_sh),\ + SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_FORCE, mask_sh),\ + SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_DIS, mask_sh),\ + SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_STATE, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_RATE, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_WINDOW_CROP_EN, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_EYE_SELECTION, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_STEREO_EYE_POLARITY, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_NEW_CONTENT, mask_sh),\ + SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN_CURRENT, mask_sh),\ + SF_DWB2(FC_FLOW_CTRL, DWB_TOP, 0, FC_FIRST_PIXEL_DELAY_COUNT, mask_sh),\ + SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_X, mask_sh),\ + SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_Y, mask_sh),\ + SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_WIDTH, mask_sh),\ + SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_HEIGHT, mask_sh),\ + SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_WIDTH, mask_sh),\ + SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_HEIGHT, mask_sh),\ + SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_LOCK, mask_sh),\ + SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_PENDING, mask_sh),\ + SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_EN, mask_sh),\ + SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_CONT_EN, mask_sh),\ + SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_SRC_SEL, mask_sh),\ + SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_RED_MASK, mask_sh),\ + SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_GREEN_MASK, mask_sh),\ + SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_BLUE_MASK, mask_sh),\ + SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_A_MASK, mask_sh),\ + SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_RED, mask_sh),\ + SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_GREEN, mask_sh),\ + SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_BLUE, mask_sh),\ + SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_A, mask_sh),\ + SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_FORMAT, mask_sh),\ + SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_DENORM, mask_sh),\ + SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MAX, mask_sh),\ + SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MIN, mask_sh),\ + SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, DWB_TOP, 0, DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, mask_sh),\ + SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT, DWB_TOP, 0, DWB_MMHUBBUB_MAX_BACKPRESSURE, mask_sh),\ + SF_DWB2(DWB_HOST_READ_CONTROL, DWB_TOP, 0, DWB_HOST_READ_RATE_CONTROL, mask_sh),\ + SF_DWB2(DWB_SOFT_RESET, DWB_TOP, 0, DWB_SOFT_RESET, mask_sh),\ + SF_DWB2(DWB_HDR_MULT_COEF, DWBCP, 0, DWB_HDR_MULT_COEF, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAP_COEF_FORMAT, DWBCP, 0, DWB_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C11, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C12, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C13, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C14, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C21, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C22, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C23, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C24, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C31, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C32, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C33, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C34, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C11, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C12, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C13, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C14, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C21, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C22, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C23, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C24, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C31, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C32, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C33, mask_sh),\ + SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C34, mask_sh),\ + SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE, mask_sh),\ + SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT, mask_sh),\ + SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_PWL_DISABLE, mask_sh),\ + SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE_CURRENT, mask_sh),\ + SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT_CURRENT, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_INDEX, DWBCP, 0, DWB_OGAM_LUT_INDEX, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_B, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_G, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_R, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh),\ + SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh) + + +#define DWBC_REG_FIELD_LIST_DCN3_0(type) \ + type DWB_ENABLE;\ + type DISPCLK_R_DWB_GATE_DIS;\ + type DISPCLK_G_DWB_GATE_DIS;\ + type DWB_TEST_CLK_SEL;\ + type DWBSCL_LUT_MEM_PWR_FORCE;\ + type DWBSCL_LUT_MEM_PWR_DIS;\ + type DWBSCL_LUT_MEM_PWR_STATE;\ + type DWBSCL_LB_MEM_PWR_FORCE;\ + type DWBSCL_LB_MEM_PWR_DIS;\ + type DWBSCL_LB_MEM_PWR_STATE;\ + type DWB_OGAM_LUT_MEM_PWR_FORCE;\ + type DWB_OGAM_LUT_MEM_PWR_DIS;\ + type DWB_OGAM_LUT_MEM_PWR_STATE;\ + type FC_FRAME_CAPTURE_EN;\ + type FC_FRAME_CAPTURE_RATE;\ + type FC_WINDOW_CROP_EN;\ + type FC_EYE_SELECTION;\ + type FC_STEREO_EYE_POLARITY;\ + type FC_NEW_CONTENT;\ + type FC_FI_EN;\ + type FC_FI_PHASE;\ + type FC_FRAME_CAPTURE_EN_CURRENT;\ + type FC_FIRST_PIXEL_DELAY_COUNT;\ + type FC_WINDOW_START_X;\ + type FC_WINDOW_START_Y;\ + type FC_WINDOW_WIDTH;\ + type FC_WINDOW_HEIGHT;\ + type FC_SOURCE_WIDTH;\ + type FC_SOURCE_HEIGHT;\ + type DWB_UPDATE_LOCK;\ + type DWB_UPDATE_PENDING;\ + type DWB_CRC_EN;\ + type DWB_CRC_CONT_EN;\ + type DWB_CRC_SRC_SEL;\ + type DWB_CRC_RED_MASK;\ + type DWB_CRC_GREEN_MASK;\ + type DWB_CRC_BLUE_MASK;\ + type DWB_CRC_A_MASK;\ + type DWB_CRC_SIG_RED;\ + type DWB_CRC_SIG_GREEN;\ + type DWB_CRC_SIG_BLUE;\ + type DWB_CRC_SIG_A;\ + type OUT_FORMAT;\ + type OUT_DENORM;\ + type OUT_MAX;\ + type OUT_MIN;\ + type DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;\ + type DWB_MMHUBBUB_MAX_BACKPRESSURE;\ + type DWB_HOST_READ_RATE_CONTROL;\ + type DWBSCL_DATA_OVERFLOW_FLAG;\ + type DWBSCL_DATA_OVERFLOW_ACK;\ + type DWBSCL_DATA_OVERFLOW_MASK;\ + type DWBSCL_DATA_OVERFLOW_INT_STATUS;\ + type DWBSCL_DATA_OVERFLOW_INT_TYPE;\ + type DWBSCL_DATA_OVERFLOW_TYPE;\ + type DWBSCL_DATA_OVERFLOW_OUT_X_CNT;\ + type DWBSCL_DATA_OVERFLOW_OUT_Y_CNT;\ + type DWB_SOFT_RESET;\ + type DWBSCL_COEF_RAM_TAP_PAIR_IDX;\ + type DWBSCL_COEF_RAM_PHASE;\ + type DWBSCL_COEF_RAM_FILTER_TYPE;\ + type DWBSCL_COEF_RAM_SELECT_RD;\ + type DWBSCL_COEF_RAM_EVEN_TAP_COEF;\ + type DWBSCL_COEF_RAM_EVEN_TAP_COEF_EN;\ + type DWBSCL_COEF_RAM_ODD_TAP_COEF;\ + type DWBSCL_COEF_RAM_ODD_TAP_COEF_EN;\ + type DWBSCL_MODE;\ + type DWBSCL_COEF_RAM_SELECT;\ + type DWBSCL_COEF_RAM_SELECT_CURRENT;\ + type DWBSCL_H_NUM_OF_TAPS;\ + type DWBSCL_V_NUM_OF_TAPS;\ + type DWBSCL_H_SCALE_RATIO;\ + type DWBSCL_H_INIT_FRAC;\ + type DWBSCL_H_INIT_INT;\ + type DWBSCL_V_SCALE_RATIO;\ + type DWBSCL_V_INIT_FRAC;\ + type DWBSCL_V_INIT_INT;\ + type DWBSCL_BOUNDARY_MODE;\ + type DWBSCL_BLACK_COLOR_RGB;\ + type DWBSCL_DEST_WIDTH;\ + type DWBSCL_DEST_HEIGHT;\ + type DWB_HDR_MULT_COEF;\ + type DWB_GAMUT_REMAP_MODE;\ + type DWB_GAMUT_REMAP_MODE_CURRENT;\ + type DWB_GAMUT_REMAP_COEF_FORMAT;\ + type DWB_GAMUT_REMAPA_C11;\ + type DWB_GAMUT_REMAPA_C12;\ + type DWB_GAMUT_REMAPA_C13;\ + type DWB_GAMUT_REMAPA_C14;\ + type DWB_GAMUT_REMAPA_C21;\ + type DWB_GAMUT_REMAPA_C22;\ + type DWB_GAMUT_REMAPA_C23;\ + type DWB_GAMUT_REMAPA_C24;\ + type DWB_GAMUT_REMAPA_C31;\ + type DWB_GAMUT_REMAPA_C32;\ + type DWB_GAMUT_REMAPA_C33;\ + type DWB_GAMUT_REMAPA_C34;\ + type DWB_GAMUT_REMAPB_C11;\ + type DWB_GAMUT_REMAPB_C12;\ + type DWB_GAMUT_REMAPB_C13;\ + type DWB_GAMUT_REMAPB_C14;\ + type DWB_GAMUT_REMAPB_C21;\ + type DWB_GAMUT_REMAPB_C22;\ + type DWB_GAMUT_REMAPB_C23;\ + type DWB_GAMUT_REMAPB_C24;\ + type DWB_GAMUT_REMAPB_C31;\ + type DWB_GAMUT_REMAPB_C32;\ + type DWB_GAMUT_REMAPB_C33;\ + type DWB_GAMUT_REMAPB_C34;\ + type DWB_OGAM_MODE;\ + type DWB_OGAM_SELECT;\ + type DWB_OGAM_PWL_DISABLE;\ + type DWB_OGAM_MODE_CURRENT;\ + type DWB_OGAM_SELECT_CURRENT;\ + type DWB_OGAM_LUT_INDEX;\ + type DWB_OGAM_LUT_DATA;\ + type DWB_OGAM_LUT_WRITE_COLOR_MASK;\ + type DWB_OGAM_LUT_READ_COLOR_SEL;\ + type DWB_OGAM_LUT_READ_DBG;\ + type DWB_OGAM_LUT_HOST_SEL;\ + type DWB_OGAM_LUT_CONFIG_MODE;\ + type DWB_OGAM_LUT_STATUS;\ + type DWB_OGAM_RAMA_EXP_REGION_START_B;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\ + type DWB_OGAM_RAMA_EXP_REGION_START_G;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G;\ + type DWB_OGAM_RAMA_EXP_REGION_START_R;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R;\ + type DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;\ + type DWB_OGAM_RAMA_EXP_REGION_START_BASE_G;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G;\ + type DWB_OGAM_RAMA_EXP_REGION_START_BASE_R;\ + type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R;\ + type DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;\ + type DWB_OGAM_RAMA_EXP_REGION_END_B;\ + type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\ + type DWB_OGAM_RAMA_EXP_REGION_END_BASE_G;\ + type DWB_OGAM_RAMA_EXP_REGION_END_G;\ + type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G;\ + type DWB_OGAM_RAMA_EXP_REGION_END_BASE_R;\ + type DWB_OGAM_RAMA_EXP_REGION_END_R;\ + type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R;\ + type DWB_OGAM_RAMA_OFFSET_B;\ + type DWB_OGAM_RAMA_OFFSET_G;\ + type DWB_OGAM_RAMA_OFFSET_R;\ + type DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS;\ + type DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET;\ + type DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION_START_B;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\ + type DWB_OGAM_RAMB_EXP_REGION_START_G;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G;\ + type DWB_OGAM_RAMB_EXP_REGION_START_R;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R;\ + type DWB_OGAM_RAMB_EXP_REGION_START_BASE_B;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B;\ + type DWB_OGAM_RAMB_EXP_REGION_START_BASE_G;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G;\ + type DWB_OGAM_RAMB_EXP_REGION_START_BASE_R;\ + type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R;\ + type DWB_OGAM_RAMB_EXP_REGION_END_BASE_B;\ + type DWB_OGAM_RAMB_EXP_REGION_END_B;\ + type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\ + type DWB_OGAM_RAMB_EXP_REGION_END_BASE_G;\ + type DWB_OGAM_RAMB_EXP_REGION_END_G;\ + type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G;\ + type DWB_OGAM_RAMB_EXP_REGION_END_BASE_R;\ + type DWB_OGAM_RAMB_EXP_REGION_END_R;\ + type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R;\ + type DWB_OGAM_RAMB_OFFSET_B;\ + type DWB_OGAM_RAMB_OFFSET_G;\ + type DWB_OGAM_RAMB_OFFSET_R;\ + type DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\ + type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\ + type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS + +struct dcn30_dwbc_registers { + /* DCN3AG */ + /* DWB_TOP */ + uint32_t DWB_ENABLE_CLK_CTRL; + uint32_t DWB_MEM_PWR_CTRL; + uint32_t FC_MODE_CTRL; + uint32_t FC_FLOW_CTRL; + uint32_t FC_WINDOW_START; + uint32_t FC_WINDOW_SIZE; + uint32_t FC_SOURCE_SIZE; + uint32_t DWB_UPDATE_CTRL; + uint32_t DWB_CRC_CTRL; + uint32_t DWB_CRC_MASK_R_G; + uint32_t DWB_CRC_MASK_B_A; + uint32_t DWB_CRC_VAL_R_G; + uint32_t DWB_CRC_VAL_B_A; + uint32_t DWB_OUT_CTRL; + uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT_EN; + uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT; + uint32_t DWB_HOST_READ_CONTROL; + uint32_t DWB_SOFT_RESET; + uint32_t DWB_DEBUG_CTRL; + uint32_t DWB_DEBUG; + uint32_t DWB_TEST_DEBUG_INDEX; + uint32_t DWB_TEST_DEBUG_DATA; + + /* DWBSCL */ + uint32_t DWBSCL_COEF_RAM_TAP_SELECT; + uint32_t DWBSCL_COEF_RAM_TAP_DATA; + uint32_t DWBSCL_MODE; + uint32_t DWBSCL_TAP_CONTROL; + uint32_t DWBSCL_HORZ_FILTER_SCALE_RATIO; + uint32_t DWBSCL_HORZ_FILTER_INIT; + uint32_t DWBSCL_VERT_FILTER_SCALE_RATIO; + uint32_t DWBSCL_VERT_FILTER_INIT; + uint32_t DWBSCL_BOUNDARY_CTRL; + uint32_t DWBSCL_DEST_SIZE; + uint32_t DWBSCL_OVERFLOW_STATUS; + uint32_t DWBSCL_OVERFLOW_COUNTER; + uint32_t DWBSCL_DEBUG; + uint32_t DWBSCL_TEST_DEBUG_INDEX; + uint32_t DWBSCL_TEST_DEBUG_DATA; + + /* DWBCP */ + uint32_t DWB_HDR_MULT_COEF; + uint32_t DWB_GAMUT_REMAP_MODE; + uint32_t DWB_GAMUT_REMAP_COEF_FORMAT; + uint32_t DWB_GAMUT_REMAPA_C11_C12; + uint32_t DWB_GAMUT_REMAPA_C13_C14; + uint32_t DWB_GAMUT_REMAPA_C21_C22; + uint32_t DWB_GAMUT_REMAPA_C23_C24; + uint32_t DWB_GAMUT_REMAPA_C31_C32; + uint32_t DWB_GAMUT_REMAPA_C33_C34; + uint32_t DWB_GAMUT_REMAPB_C11_C12; + uint32_t DWB_GAMUT_REMAPB_C13_C14; + uint32_t DWB_GAMUT_REMAPB_C21_C22; + uint32_t DWB_GAMUT_REMAPB_C23_C24; + uint32_t DWB_GAMUT_REMAPB_C31_C32; + uint32_t DWB_GAMUT_REMAPB_C33_C34; + uint32_t DWB_OGAM_CONTROL; + uint32_t DWB_OGAM_LUT_INDEX; + uint32_t DWB_OGAM_LUT_DATA; + uint32_t DWB_OGAM_LUT_CONTROL; + uint32_t DWB_OGAM_RAMA_START_CNTL_B; + uint32_t DWB_OGAM_RAMA_START_CNTL_G; + uint32_t DWB_OGAM_RAMA_START_CNTL_R; + uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_B; + uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_B; + uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_G; + uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_G; + uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_R; + uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_R; + uint32_t DWB_OGAM_RAMA_END_CNTL1_B; + uint32_t DWB_OGAM_RAMA_END_CNTL2_B; + uint32_t DWB_OGAM_RAMA_END_CNTL1_G; + uint32_t DWB_OGAM_RAMA_END_CNTL2_G; + uint32_t DWB_OGAM_RAMA_END_CNTL1_R; + uint32_t DWB_OGAM_RAMA_END_CNTL2_R; + uint32_t DWB_OGAM_RAMA_OFFSET_B; + uint32_t DWB_OGAM_RAMA_OFFSET_G; + uint32_t DWB_OGAM_RAMA_OFFSET_R; + uint32_t DWB_OGAM_RAMA_REGION_0_1; + uint32_t DWB_OGAM_RAMA_REGION_2_3; + uint32_t DWB_OGAM_RAMA_REGION_4_5; + uint32_t DWB_OGAM_RAMA_REGION_6_7; + uint32_t DWB_OGAM_RAMA_REGION_8_9; + uint32_t DWB_OGAM_RAMA_REGION_10_11; + uint32_t DWB_OGAM_RAMA_REGION_12_13; + uint32_t DWB_OGAM_RAMA_REGION_14_15; + uint32_t DWB_OGAM_RAMA_REGION_16_17; + uint32_t DWB_OGAM_RAMA_REGION_18_19; + uint32_t DWB_OGAM_RAMA_REGION_20_21; + uint32_t DWB_OGAM_RAMA_REGION_22_23; + uint32_t DWB_OGAM_RAMA_REGION_24_25; + uint32_t DWB_OGAM_RAMA_REGION_26_27; + uint32_t DWB_OGAM_RAMA_REGION_28_29; + uint32_t DWB_OGAM_RAMA_REGION_30_31; + uint32_t DWB_OGAM_RAMA_REGION_32_33; + uint32_t DWB_OGAM_RAMB_START_CNTL_B; + uint32_t DWB_OGAM_RAMB_START_CNTL_G; + uint32_t DWB_OGAM_RAMB_START_CNTL_R; + uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_B; + uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_B; + uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_G; + uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_G; + uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_R; + uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_R; + uint32_t DWB_OGAM_RAMB_END_CNTL1_B; + uint32_t DWB_OGAM_RAMB_END_CNTL2_B; + uint32_t DWB_OGAM_RAMB_END_CNTL1_G; + uint32_t DWB_OGAM_RAMB_END_CNTL2_G; + uint32_t DWB_OGAM_RAMB_END_CNTL1_R; + uint32_t DWB_OGAM_RAMB_END_CNTL2_R; + uint32_t DWB_OGAM_RAMB_OFFSET_B; + uint32_t DWB_OGAM_RAMB_OFFSET_G; + uint32_t DWB_OGAM_RAMB_OFFSET_R; + uint32_t DWB_OGAM_RAMB_REGION_0_1; + uint32_t DWB_OGAM_RAMB_REGION_2_3; + uint32_t DWB_OGAM_RAMB_REGION_4_5; + uint32_t DWB_OGAM_RAMB_REGION_6_7; + uint32_t DWB_OGAM_RAMB_REGION_8_9; + uint32_t DWB_OGAM_RAMB_REGION_10_11; + uint32_t DWB_OGAM_RAMB_REGION_12_13; + uint32_t DWB_OGAM_RAMB_REGION_14_15; + uint32_t DWB_OGAM_RAMB_REGION_16_17; + uint32_t DWB_OGAM_RAMB_REGION_18_19; + uint32_t DWB_OGAM_RAMB_REGION_20_21; + uint32_t DWB_OGAM_RAMB_REGION_22_23; + uint32_t DWB_OGAM_RAMB_REGION_24_25; + uint32_t DWB_OGAM_RAMB_REGION_26_27; + uint32_t DWB_OGAM_RAMB_REGION_28_29; + uint32_t DWB_OGAM_RAMB_REGION_30_31; + uint32_t DWB_OGAM_RAMB_REGION_32_33; + uint32_t DWBCP_DEBUG; + uint32_t DWBCP_TEST_DEBUG_INDEX; + uint32_t DWBCP_TEST_DEBUG_DATA; +}; + +/* Internal enums / structs */ +enum dwbscl_coef_filter_type_sel { + DWBSCL_COEF_RAM_FILTER_TYPE_VERT_RGB = 0, + DWBSCL_COEF_RAM_FILTER_TYPE_HORZ_RGB = 1 +}; + + +struct dcn30_dwbc_mask { + DWBC_REG_FIELD_LIST_DCN3_0(uint32_t); +}; + +struct dcn30_dwbc_shift { + DWBC_REG_FIELD_LIST_DCN3_0(uint8_t); +}; + +struct dcn30_dwbc { + struct dwbc base; + const struct dcn30_dwbc_registers *dwbc_regs; + const struct dcn30_dwbc_shift *dwbc_shift; + const struct dcn30_dwbc_mask *dwbc_mask; +}; + +void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, + struct dc_context *ctx, + const struct dcn30_dwbc_registers *dwbc_regs, + const struct dcn30_dwbc_shift *dwbc_shift, + const struct dcn30_dwbc_mask *dwbc_mask, + int inst); + +bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params); + +bool dwb3_disable(struct dwbc *dwbc); + +bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params); + +bool dwb3_is_enabled(struct dwbc *dwbc); + +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + +void dwb3_set_stereo(struct dwbc *dwbc, + struct dwb_stereo_params *stereo_params); + +void dwb3_set_new_content(struct dwbc *dwbc, + bool is_new_content); + +void dwb3_config_fc(struct dwbc *dwbc, + struct dc_dwb_params *params); + +void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params); + +void dwb3_program_hdr_mult( + struct dwbc *dwbc, + const struct dc_dwb_params *params); + +void dwb3_set_gamut_remap( + struct dwbc *dwbc, + const struct dc_dwb_params *params); + +bool dwb3_ogam_set_input_transfer_func( + struct dwbc *dwbc, + const struct dc_transfer_func *in_transfer_func_dwb_ogam); + +void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay); +#endif + + diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c new file mode 100644 index 00000000000000..03a50c32fcfe1f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c @@ -0,0 +1,395 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "fixed31_32.h" +#include "resource.h" +#include "basics/conversion.h" +#include "dwb.h" +#include "dcn30_dwb.h" +#include "dcn30_cm_common.h" +#include "dcn10/dcn10_cm_common.h" + + +#define REG(reg)\ + dwbc30->dwbc_regs->reg + +#define CTX \ + dwbc30->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name + +#define TO_DCN30_DWBC(dwbc_base) \ + container_of(dwbc_base, struct dcn30_dwbc, base) + +static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30, + struct dcn3_xfer_func_reg *reg) +{ + reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; + reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; + reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B; + reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B; + + reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +/*program dwb ogam RAM A*/ +static void dwb3_program_ogam_luta_settings( + struct dcn30_dwbc *dwbc30, + const struct pwl_params *params) +{ + struct dcn3_xfer_func_reg gam_regs; + + dwb3_get_reg_field_ogam(dwbc30, &gam_regs); + + gam_regs.start_cntl_b = REG(DWB_OGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(DWB_OGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(DWB_OGAM_RAMA_START_CNTL_R); + gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMA_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMA_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMA_START_BASE_CNTL_R); + gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMA_END_CNTL2_R); + gam_regs.offset_b = REG(DWB_OGAM_RAMA_OFFSET_B); + gam_regs.offset_g = REG(DWB_OGAM_RAMA_OFFSET_G); + gam_regs.offset_r = REG(DWB_OGAM_RAMA_OFFSET_R); + gam_regs.region_start = REG(DWB_OGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(DWB_OGAM_RAMA_REGION_32_33); + /*todo*/ + cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs); +} + +/*program dwb ogam RAM B*/ +static void dwb3_program_ogam_lutb_settings( + struct dcn30_dwbc *dwbc30, + const struct pwl_params *params) +{ + struct dcn3_xfer_func_reg gam_regs; + + dwb3_get_reg_field_ogam(dwbc30, &gam_regs); + + gam_regs.start_cntl_b = REG(DWB_OGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(DWB_OGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(DWB_OGAM_RAMB_START_CNTL_R); + gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMB_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMB_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMB_START_BASE_CNTL_R); + gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMB_END_CNTL2_R); + gam_regs.offset_b = REG(DWB_OGAM_RAMB_OFFSET_B); + gam_regs.offset_g = REG(DWB_OGAM_RAMB_OFFSET_G); + gam_regs.offset_r = REG(DWB_OGAM_RAMB_OFFSET_R); + gam_regs.region_start = REG(DWB_OGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(DWB_OGAM_RAMB_REGION_32_33); + + cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs); +} + +static enum dc_lut_mode dwb3_get_ogam_current( + struct dcn30_dwbc *dwbc30) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + uint32_t ram_select; + + REG_GET_2(DWB_OGAM_CONTROL, + DWB_OGAM_MODE_CURRENT, &state_mode, + DWB_OGAM_SELECT_CURRENT, &ram_select); + + if (state_mode == 0) { + mode = LUT_BYPASS; + } else if (state_mode == 2) { + if (ram_select == 0) + mode = LUT_RAM_A; + else if (ram_select == 1) + mode = LUT_RAM_B; + else + mode = LUT_BYPASS; + } else { + // Reserved value + mode = LUT_BYPASS; + BREAK_TO_DEBUGGER(); + return mode; + } + return mode; +} + +static void dwb3_configure_ogam_lut( + struct dcn30_dwbc *dwbc30, + bool is_ram_a) +{ + REG_UPDATE_2(DWB_OGAM_LUT_CONTROL, + DWB_OGAM_LUT_WRITE_COLOR_MASK, 7, + DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1); + + REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); +} + +static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + + uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; + uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; + uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); + + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); + + } else { + + REG_UPDATE(DWB_OGAM_LUT_CONTROL, + DWB_OGAM_LUT_WRITE_COLOR_MASK, 4); + + for (i = 0 ; i < num; i++) + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); + + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); + + REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); + + REG_UPDATE(DWB_OGAM_LUT_CONTROL, + DWB_OGAM_LUT_WRITE_COLOR_MASK, 2); + + for (i = 0 ; i < num; i++) + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg); + + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green); + + REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); + + REG_UPDATE(DWB_OGAM_LUT_CONTROL, + DWB_OGAM_LUT_WRITE_COLOR_MASK, 1); + + for (i = 0 ; i < num; i++) + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue); + } +} + +static bool dwb3_program_ogam_lut( + struct dcn30_dwbc *dwbc30, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + if (params == NULL) { + REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 0); + return false; + } + + if (params->hw_points_num == 0) + return false; + + REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); + + current_mode = dwb3_get_ogam_current(dwbc30); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dwb3_program_ogam_luta_settings(dwbc30, params); + else + dwb3_program_ogam_lutb_settings(dwbc30, params); + + dwb3_program_ogam_pwl( + dwbc30, params->rgb_resulted, params->hw_points_num); + + REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); + + return true; +} + +bool dwb3_ogam_set_input_transfer_func( + struct dwbc *dwbc, + const struct dc_transfer_func *in_transfer_func_dwb_ogam) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + bool result = false; + struct pwl_params *dwb_ogam_lut = NULL; + + if (in_transfer_func_dwb_ogam == NULL) + return result; + + dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL); + + if (dwb_ogam_lut) { + cm_helper_translate_curve_to_hw_format(dwbc->ctx, + in_transfer_func_dwb_ogam, + dwb_ogam_lut, false); + + result = dwb3_program_ogam_lut( + dwbc30, + dwb_ogam_lut); + kfree(dwb_ogam_lut); + dwb_ogam_lut = NULL; + } + + return result; +} + +static void dwb3_program_gamut_remap( + struct dwbc *dwbc, + const uint16_t *regval, + enum cm_gamut_coef_format coef_format, + enum cm_gamut_remap_select select) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) { + REG_SET(DWB_GAMUT_REMAP_MODE, 0, + DWB_GAMUT_REMAP_MODE, 0); + return; + } + + REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format); + + gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11; + gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11; + gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12; + gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12; + + switch (select) { + case CM_GAMUT_REMAP_MODE_RAMA_COEFF: + gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12); + gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPA_C33_C34); + + cm_helper_program_color_matrices( + dwbc30->base.ctx, + regval, + &gam_regs); + break; + case CM_GAMUT_REMAP_MODE_RAMB_COEFF: + gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPB_C11_C12); + gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPB_C33_C34); + + cm_helper_program_color_matrices( + dwbc30->base.ctx, + regval, + &gam_regs); + break; + case CM_GAMUT_REMAP_MODE_RESERVED: + /* should never happen, bug */ + BREAK_TO_DEBUGGER(); + return; + default: + break; + } + + REG_SET(DWB_GAMUT_REMAP_MODE, 0, + DWB_GAMUT_REMAP_MODE, select); + +} + +void dwb3_set_gamut_remap( + struct dwbc *dwbc, + const struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + struct cm_grph_csc_adjustment adjust = params->csc_params; + int i = 0; + + if (adjust.gamut_adjust_type != CM_GAMUT_ADJUST_TYPE_SW) { + /* Bypass if type is bypass or hw */ + dwb3_program_gamut_remap(dwbc, NULL, adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_BYPASS); + } else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + unsigned int current_mode; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust.temperature_matrix[i]; + + convert_float_matrix(arr_reg_val, arr_matrix, 12); + + REG_GET(DWB_GAMUT_REMAP_MODE, DWB_GAMUT_REMAP_MODE_CURRENT, ¤t_mode); + + if (current_mode == CM_GAMUT_REMAP_MODE_RAMA_COEFF) { + dwb3_program_gamut_remap(dwbc, arr_reg_val, + adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMB_COEFF); + } else { + dwb3_program_gamut_remap(dwbc, arr_reg_val, + adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMA_COEFF); + } + } +} + +void dwb3_program_hdr_mult( + struct dwbc *dwbc, + const struct dc_dwb_params *params) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + + REG_UPDATE(DWB_HDR_MULT_COEF, DWB_HDR_MULT_COEF, params->hdr_mult); +} diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c index b23a809999edcd..d5e8294f5a1686 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c @@ -21,7 +21,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - #include "reg_helper.h" #include "dcn35_dwb.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c index 46415cab23ab20..928abca18a1811 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c @@ -86,7 +86,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(2), ddc_data_regs_dcn2(3), ddc_data_regs_dcn2(4), -// ddc_data_regs_dcn2(5), + { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, { // add a dummy entry for cases no such port {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, @@ -107,7 +113,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(2), ddc_clk_regs_dcn2(3), ddc_clk_regs_dcn2(4), -// ddc_clk_regs_dcn2(5), + { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, { // add a dummy entry for cases no such port {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, diff --git a/drivers/gpu/drm/amd/display/dc/hpo/Makefile b/drivers/gpu/drm/amd/display/dc/hpo/Makefile index c248bd86b477ac..7f2c9ee0dff1d5 100644 --- a/drivers/gpu/drm/amd/display/dc/hpo/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hpo/Makefile @@ -25,6 +25,21 @@ ifdef CONFIG_DRM_AMD_DC_FP ############################################################################### +# DCN30 +############################################################################### + +AMD_DAL_HPO_DCN30 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn30/,$(HPO_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN30) +############################################################################### +# DCN31 +############################################################################### +HPO_DCN31 = dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o + +AMD_DAL_HPO_DCN31 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn31/,$(HPO_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN31) +############################################################################### # DCN32 ############################################################################### HPO_DCN32 = dcn32_hpo_dp_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c new file mode 100644 index 00000000000000..03b4ac2f1991a7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c @@ -0,0 +1,628 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_link_encoder.h" +#include "reg_helper.h" +#include "stream_encoder.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name + + +#define CTX \ + enc3->base.ctx + +enum { + DP_SAT_UPDATE_MAX_RETRY = 200 +}; + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_link_enabled; + + /* get current status of link enabled */ + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &dp_link_enabled); + + /* Enable clocks first */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1); + + /* Reset DPHY. Only reset if going from disable to enable */ + if (!dp_link_enabled) { + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0); + } + + /* Configure DPHY settings */ + REG_UPDATE_3(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 1, + PRECODER_ENABLE, 1, + NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3); +} + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + /* Configure DPHY settings */ + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 0); + + /* Shut down clock last */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0); +} + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t tp_custom; + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_ACTIVE); + break; + case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS1); + break; + case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS2); + break; + case DP_TEST_PATTERN_128b_132b_TPS1: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS2); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS7: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS7); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS9: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS9); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS11: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS11); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS15: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS15); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS23: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS23); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS31: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS31); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_SQUARE: + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: + REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, + TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + default: + break; + } +} + +static void fill_stream_allocation_row_info( + const struct link_mst_stream_allocation *stream_allocation, + uint32_t *src, + uint32_t *slots) +{ + const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc; + + if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) { + *src = stream_enc->id - ENGINE_ID_HPO_DP_0; + *slots = stream_allocation->slot_count; + } else { + *src = 0; + *slots = 0; + } +} + +/* programs DP VC payload allocation */ +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t slots = 0; + uint32_t src = 0; + + /* --- Set MSE Stream Attribute - + * Setup VC Payload Table on Tx Side, + * Issue allocation change trigger + * to commit payload on both tx and rx side + */ + + /* we should clean-up table each time */ + + if (table->stream_count >= 1) { + fill_stream_allocation_row_info( + &table->stream_allocations[0], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 2) { + fill_stream_allocation_row_info( + &table->stream_allocations[1], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 3) { + fill_stream_allocation_row_info( + &table->stream_allocations[2], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 4) { + fill_stream_allocation_row_info( + &table->stream_allocations[3], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + /* --- wait for transaction finish */ + + /* send allocation change trigger (ACT) + * this step first sends the ACT, + * then double buffers the SAT into the hardware + * making the new allocation active on the DP MST mode link + */ + + /* SAT_UPDATE: + * 0 - No Action + * 1 - Update SAT with trigger + * 2 - Update SAT without trigger + */ + REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE, + SAT_UPDATE, 1); + + /* wait for update to complete + * (i.e. SAT_UPDATE_PENDING field is set to 0) + * No need for HW to enforce keepout. + */ + /* Best case and worst case wait time for SAT_UPDATE_PENDING + * best: 109 us + * worst: 868 us + */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + SAT_UPDATE_PENDING, 0, + 100, DP_SAT_UPDATE_MAX_RETRY); +} + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t x = dc_fixpt_floor( + avg_time_slots_per_mtp); + uint32_t y = dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + x), + 25)); + + // If y rounds up to integer, carry it over to x. + if (y >> 25) { + x += 1; + y = 0; + } + + switch (stream_encoder_inst) { + case 0: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 1: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 2: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 3: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + default: + ASSERT(0); + } + + /* Best case and worst case wait time for RATE_UPDATE_PENDING + * best: 116 ns + * worst: 903 ns + */ + /* wait for update to be completed on the link */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + RATE_UPDATE_PENDING, 0, + 1, 10); +} + +static bool dcn31_hpo_dp_link_enc_is_in_alt_mode( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_alt_mode_disable = 0; + + ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); + + /* if value == 1 alt mode is disabled, otherwise it is enabled */ + REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + return (dp_alt_mode_disable == 0); +} + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + ASSERT(state); + + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &state->link_enc_enabled); + REG_GET(DP_DPHY_SYM32_CONTROL, + NUM_LANES, &state->lane_count); + REG_GET(DP_DPHY_SYM32_CONTROL, + MODE, (uint32_t *)&state->link_mode); + + REG_GET_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, &state->stream_src[0], + SAT_SLOT_COUNT, &state->slot_count[0]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, &state->stream_src[1], + SAT_SLOT_COUNT, &state->slot_count[1]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, &state->stream_src[2], + SAT_SLOT_COUNT, &state->slot_count[2]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, &state->stream_src[3], + SAT_SLOT_COUNT, &state->slot_count[3]); + + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, + STREAM_VC_RATE_X, &state->vc_rate_x[0], + STREAM_VC_RATE_Y, &state->vc_rate_y[0]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, + STREAM_VC_RATE_X, &state->vc_rate_x[1], + STREAM_VC_RATE_Y, &state->vc_rate_y[1]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, + STREAM_VC_RATE_X, &state->vc_rate_x[2], + STREAM_VC_RATE_Y, &state->vc_rate_y[2]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, + STREAM_VC_RATE_X, &state->vc_rate_x[3], + STREAM_VC_RATE_Y, &state->vc_rate_y[3]); +} + +static enum bp_result link_transmitter_control( + struct dcn31_hpo_dp_link_encoder *enc3, + struct bp_transmitter_control *cntl) +{ + enum bp_result result; + struct dc_bios *bp = enc3->base.ctx->dc_bios; + + result = bp->funcs->transmitter_control(bp, cntl); + + return result; +} + +/* enables DP PHY output for 128b132b encoding */ +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter, + enum hpd_source_id hpd_source) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Set the transmitter */ + enc3->base.transmitter = transmitter; + + /* Set the hpd source */ + enc3->base.hpd_source = hpd_source; + + /* Enable the PHY */ + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc3->base.transmitter; + //cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.action = TRANSMITTER_CONTROL_DISABLE; + cntl.transmitter = enc3->base.transmitter; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.signal = signal; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + + /* disable encoder */ + dcn31_hpo_dp_link_enc_disable(enc); +} + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.transmitter = enc3->base.transmitter; + cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.lane_settings = ffe_preset; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } +} + +static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = { + .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, + .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, + .link_enable = dcn31_hpo_dp_link_enc_enable, + .link_disable = dcn31_hpo_dp_link_enc_disable, + .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, + .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, + .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, + .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode, + .read_state = dcn31_hpo_dp_link_enc_read_state, + .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) +{ + enc31->base.ctx = ctx; + + enc31->base.inst = inst; + enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs; + enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; + enc31->base.transmitter = TRANSMITTER_UNKNOWN; + + enc31->regs = hpo_le_regs; + enc31->hpo_le_shift = hpo_le_shift; + enc31->hpo_le_mask = hpo_le_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h new file mode 100644 index 00000000000000..51f5781325e895 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h @@ -0,0 +1,229 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ + +#include "link_encoder.h" + + +#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\ + container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base) + + +#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \ + SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ + SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) + +#define DCN3_1_RDPCSTX_REG_LIST(id) \ + SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id) + + +#define DCN3_1_HPO_DP_LINK_ENC_REGS \ + uint32_t DP_LINK_ENC_CLOCK_CONTROL;\ + uint32_t DP_DPHY_SYM32_CONTROL;\ + uint32_t DP_DPHY_SYM32_STATUS;\ + uint32_t DP_DPHY_SYM32_TP_CONFIG;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\ + uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\ + uint32_t DP_DPHY_SYM32_SAT_VC0;\ + uint32_t DP_DPHY_SYM32_SAT_VC1;\ + uint32_t DP_DPHY_SYM32_SAT_VC2;\ + uint32_t DP_DPHY_SYM32_SAT_VC3;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\ + uint32_t DP_DPHY_SYM32_SAT_UPDATE + +struct dcn31_hpo_dp_link_encoder_registers { + DCN3_1_HPO_DP_LINK_ENC_REGS; + uint32_t RDPCSTX_PHY_CNTL6[5]; +}; + +#define DCN3_1_HPO_DP_LINK_ENC_RDPCSTX_MASK_SH_LIST(mask_sh)\ + SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) + +#define DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh) + +#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\ + DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(mask_sh),\ + DCN3_1_HPO_DP_LINK_ENC_RDPCSTX_MASK_SH_LIST(mask_sh)\ + +#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \ + type DP_LINK_ENC_CLOCK_EN;\ + type DPHY_RESET;\ + type DPHY_ENABLE;\ + type PRECODER_ENABLE;\ + type NUM_LANES;\ + type MODE;\ + type STATUS;\ + type SAT_UPDATE_PENDING;\ + type RATE_UPDATE_PENDING;\ + type TP_CUSTOM;\ + type TP_SELECT0;\ + type TP_SELECT1;\ + type TP_SELECT2;\ + type TP_SELECT3;\ + type TP_PRBS_SEL0;\ + type TP_PRBS_SEL1;\ + type TP_PRBS_SEL2;\ + type TP_PRBS_SEL3;\ + type TP_SQ_PULSE_WIDTH;\ + type SAT_STREAM_SOURCE;\ + type SAT_SLOT_COUNT;\ + type STREAM_VC_RATE_X;\ + type STREAM_VC_RATE_Y;\ + type SAT_UPDATE;\ + type RDPCS_PHY_DPALT_DISABLE + + +struct dcn31_hpo_dp_link_encoder_shift { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_link_encoder_mask { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_link_encoder { + struct hpo_dp_link_encoder base; + const struct dcn31_hpo_dp_link_encoder_registers *regs; + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift; + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask; +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask); + +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter, + enum hpd_source_id hpd_source); + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal); + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes); + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc); + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params); + +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp); + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state); + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset); + +#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c new file mode 100644 index 00000000000000..678db949cfe3ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -0,0 +1,779 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_stream_encoder.h" +#include "reg_helper.h" +#include "dc.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name + +#define CTX \ + enc3->base.ctx + + +enum dp2_pixel_encoding { + DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420, + DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY +}; + +enum dp2_uncompressed_component_depth { + DP_SYM32_ENC_COMPONENT_DEPTH_6BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_8BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_10BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_12BPC +}; + + +static void dcn31_hpo_dp_stream_enc_enable_stream( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable all clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 1); + + /* Assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 1); + /* Wait for reset to complete (to assert) */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 1, + 1, 10); + + /* De-assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 0); + /* Wait for reset to de-assert */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 0, + 1, 10); + + /* Enable idle pattern generation */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_unblank( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_source) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source); + + /* Enable video transmission in main framer */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 1); + + /* Reset and Enable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 1); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 0); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */ + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 1); + + /* Reset and Enable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 1); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 0); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 0, + 1, 10); + + /* For Debug -- Enable CRC */ + REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL, + CRC_ENABLE, 1, + CRC_CONT_MODE_ENABLE, 1); + + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_blank( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable video transmission */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 0); + + /* Wait for video stream transmission disabled + * Larger delay to wait until VBLANK - use max retry of + * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + + * a little more because we may not trust delay accuracy. + */ + REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_STATUS, 0, + 10, 5000); + + /* Disable SDP transmission */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 0); + + /* Disable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 0); +} + +static void dcn31_hpo_dp_stream_enc_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable DP_SYM32_ENC */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 0); + + /* Disable clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 0); +} + +static void dcn31_hpo_dp_stream_enc_set_stream_attribute( + struct hpo_dp_stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, + bool compressed_format, + bool double_buffer_en) +{ + enum dp2_pixel_encoding pixel_encoding; + enum dp2_uncompressed_component_depth component_depth; + uint32_t h_active_start; + uint32_t v_active_start; + uint32_t h_blank; + uint32_t h_back_porch; + uint32_t h_width; + uint32_t v_height; + uint64_t v_freq; + uint8_t misc0 = 0; + uint8_t misc1 = 0; + uint8_t hsp; + uint8_t vsp; + + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + struct dc_crtc_timing hw_crtc_timing = *crtc_timing; + + /* MISC0[0] = 0 video and link clocks are asynchronous + * MISC1[0] = 0 interlace not supported + * MISC1[2:1] = 0 stereo field is handled by hardware + * MISC1[5:3] = 0 Reserved + */ + + /* Interlaced not supported */ + if (hw_crtc_timing.flags.INTERLACE) { + BREAK_TO_DEBUGGER(); + } + + /* Double buffer enable for MSA and pixel format registers + * Only double buffer for changing stream attributes for active streams + * Do not double buffer when initially enabling a stream + */ + REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, + MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en); + REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, + PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en); + + /* Pixel Encoding */ + switch (hw_crtc_timing.pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422; + misc0 = misc0 | 0x2; // MISC0[2:1] = 01 + break; + case PIXEL_ENCODING_YCBCR444: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + misc0 = misc0 | 0x4; // MISC0[2:1] = 10 + + if (hw_crtc_timing.flags.Y_ONLY) { + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY; + if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) { + /* HW testing only, no use case yet. + * Color depth of Y-only could be + * 8, 10, 12, 16 bits + */ + misc1 = misc1 | 0x80; // MISC1[7] = 1 + } + } + break; + case PIXEL_ENCODING_YCBCR420: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420; + misc1 = misc1 | 0x40; // MISC1[6] = 1 + break; + case PIXEL_ENCODING_RGB: + default: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + break; + } + + /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used. + * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the + * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, + * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). + */ + if (use_vsc_sdp_for_colorimetry) + misc1 = misc1 | 0x40; + else + misc1 = misc1 & ~0x40; + + /* Color depth */ + switch (hw_crtc_timing.display_color_depth) { + case COLOR_DEPTH_666: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + // MISC0[7:5] = 000 + break; + case COLOR_DEPTH_888: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC; + misc0 = misc0 | 0x20; // MISC0[7:5] = 001 + break; + case COLOR_DEPTH_101010: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC; + misc0 = misc0 | 0x40; // MISC0[7:5] = 010 + break; + case COLOR_DEPTH_121212: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC; + misc0 = misc0 | 0x60; // MISC0[7:5] = 011 + break; + default: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + break; + } + + REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, component_depth); + + switch (output_color_space) { + case COLOR_SPACE_SRGB: + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_SRGB_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_ADOBERGB: + case COLOR_SPACE_DCIP3: + case COLOR_SPACE_XV_YCC_709: + case COLOR_SPACE_XV_YCC_601: + case COLOR_SPACE_DISPLAYNATIVE: + case COLOR_SPACE_DOLBYVISION: + case COLOR_SPACE_APPCTRL: + case COLOR_SPACE_CUSTOMPOINTS: + case COLOR_SPACE_UNKNOWN: + case COLOR_SPACE_YCBCR709_BLACK: + /* do nothing */ + break; + } + + /* calculate from vesa timing parameters + * h_active_start related to leading edge of sync + */ + h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - + hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; + + h_back_porch = h_blank - hw_crtc_timing.h_front_porch - + hw_crtc_timing.h_sync_width; + + /* start at beginning of left border */ + h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; + + v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - + hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - + hw_crtc_timing.v_front_porch; + + h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; + v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; + hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; + vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; + v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100; + + /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 + * + * Lane 0 Lane 1 Lane 2 Lane 3 + * MSA[0] = { 0, 0, 0, VFREQ[47:40]} + * MSA[1] = { 0, 0, 0, VFREQ[39:32]} + * MSA[2] = { 0, 0, 0, VFREQ[31:24]} + * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]} + * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]} + * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]} + * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]} + * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]} + * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0} + */ + REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, v_freq >> 40); + + REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 32) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 24) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8, + MSA_DATA_LANE_1, h_active_start >> 8, + MSA_DATA_LANE_2, h_width >> 8, + MSA_DATA_LANE_3, (v_freq >> 16) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff, + MSA_DATA_LANE_1, h_active_start & 0xff, + MSA_DATA_LANE_2, h_width & 0xff, + MSA_DATA_LANE_3, (v_freq >> 8) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8, + MSA_DATA_LANE_1, v_active_start >> 8, + MSA_DATA_LANE_2, v_height >> 8, + MSA_DATA_LANE_3, v_freq & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff, + MSA_DATA_LANE_1, v_active_start & 0xff, + MSA_DATA_LANE_2, v_height & 0xff, + MSA_DATA_LANE_3, misc0); + + REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0, + MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8), + MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8), + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, misc1); + + REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff, + MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, 0); +} + +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num( + struct hpo_dp_stream_encoder *enc, + struct encoder_info_frame *info_frame) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + if (info_frame->adaptive_sync.valid == true && + info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) { + //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1); + + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, + info_frame->sdp_line_num.adaptive_sync_line_num); + } +} + +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( + struct hpo_dp_stream_encoder *enc, + const struct encoder_info_frame *info_frame) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t dmdata_packet_enabled = 0; + + if (info_frame->vsc.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 0, /* packetIndex */ + &info_frame->vsc, + true); + + if (info_frame->spd.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 2, /* packetIndex */ + &info_frame->spd, + true); + + if (info_frame->hdrsmd.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 3, /* packetIndex */ + &info_frame->hdrsmd, + true); + + /* packetIndex 4 is used for send immediate sdp message, and please + * use other packetIndex (such as 5,6) for other info packet + */ + + if (info_frame->adaptive_sync.valid) + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 5, /* packetIndex */ + &info_frame->adaptive_sync, + true); + + /* enable/disable transmission of packet(s). + * If enabled, packet transmission begins on the next frame + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid); + + /* check if dynamic metadata packet transmission is enabled */ + REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, + METADATA_PACKET_ENABLE, &dmdata_packet_enabled); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets( + struct hpo_dp_stream_encoder *enc) +{ + /* stop generic packets on DP */ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t asp_enable = 0; + uint32_t atp_enable = 0; + uint32_t aip_enable = 0; + uint32_t acm_enable = 0; + + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + + /* Disable secondary data path if audio is also disabled */ + REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, &asp_enable, + ATP_ENABLE, &atp_enable, + AIP_ENABLE, &aip_enable, + ACM_ENABLE, &acm_enable); + if (!(asp_enable || atp_enable || aip_enable || acm_enable)) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); +} + +static uint32_t hpo_dp_is_gsp_enabled( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t gsp0_enabled = 0; + uint32_t gsp2_enabled = 0; + uint32_t gsp3_enabled = 0; + uint32_t gsp11_enabled = 0; + + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled); + + return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled); +} + +static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet( + struct hpo_dp_stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + if (enable) { + struct dc_info_packet pps_sdp; + int i; + + /* Configure for PPS packet size (128 bytes) */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_PAYLOAD_SIZE, 3); + + /* Load PPS into infoframe (SDP) registers */ + pps_sdp.valid = true; + pps_sdp.hb0 = 0; + pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; + pps_sdp.hb2 = 127; + pps_sdp.hb3 = 0; + + for (i = 0; i < 4; i++) { + memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); + enc3->base.vpg->funcs->update_generic_info_packet( + enc3->base.vpg, + 11 + i, + &pps_sdp, + immediate_update); + } + + /* SW should make sure VBID[6] update line number is bigger + * than PPS transmit line number + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_TRANSMISSION_LINE_NUMBER, 2); + + REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL, + VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0, + VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3); + + /* Send PPS data at the line number specified above. */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1); + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + } else { + /* Disable Generic Stream Packet 11 (GSP) transmission */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0, + GSP_PAYLOAD_SIZE, 0); + } +} + +static void dcn31_hpo_dp_stream_enc_map_stream_to_link( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_enc_inst, + uint32_t link_enc_inst) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + ASSERT(stream_enc_inst < 4 && link_enc_inst < 2); + + switch (stream_enc_inst) { + case 0: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 1: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 2: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 3: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + } +} + +static void dcn31_hpo_dp_stream_enc_audio_setup( + struct hpo_dp_stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL, + DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst); + + ASSERT(enc->apg); + enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info); +} + +static void dcn31_hpo_dp_stream_enc_audio_enable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable Audio packets */ + REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1); + + /* Program the ATP and AIP next */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ATP_ENABLE, 1, + AIP_ENABLE, 1); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + + /* Enable APG block */ + enc->apg->funcs->enable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_audio_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable Audio packets */ + REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, 0, + ATP_ENABLE, 0, + AIP_ENABLE, 0, + ACM_ENABLE, 0); + + /* Disable STP Stream Enable if other SDP GSP are also disabled */ + if (!(hpo_dp_is_gsp_enabled(enc))) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable APG block */ + enc->apg->funcs->disable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_read_state( + struct hpo_dp_stream_encoder *enc, + struct hpo_dp_stream_encoder_state *s) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_GET(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled); + REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, &s->vid_stream_enabled); + REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst); + + REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, &s->compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth); + + REG_GET(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, &s->sdp_enabled); + + switch (enc->inst) { + case 0: + REG_GET(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 1: + REG_GET(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 2: + REG_GET(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 3: + REG_GET(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + } +} + +static void dcn31_set_hblank_min_symbol_width( + struct hpo_dp_stream_encoder *enc, + uint16_t width) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0, + HBLANK_MINIMUM_SYMBOL_WIDTH, width); +} + +static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { + .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, + .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, + .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, + .disable = dcn31_hpo_dp_stream_enc_disable, + .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, + .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num, + .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, + .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, + .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, + .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, + .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup, + .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, + .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, + .read_state = dcn31_hpo_dp_stream_enc_read_state, + .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width, +}; + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask) +{ + enc3->base.funcs = &dcn30_str_enc_funcs; + enc3->base.ctx = ctx; + enc3->base.inst = inst; + enc3->base.id = eng_id; + enc3->base.bp = bp; + enc3->base.vpg = vpg; + enc3->base.apg = apg; + enc3->regs = regs; + enc3->hpo_se_shift = hpo_se_shift; + enc3->hpo_se_mask = hpo_se_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h new file mode 100644 index 00000000000000..82c3b3ac1f0d01 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h @@ -0,0 +1,245 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ + +#include "dcn30/dcn30_vpg.h" +#include "dcn31/dcn31_apg.h" +#include "stream_encoder.h" + + +#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\ + container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base) + + +/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */ +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \ + SR(DP_STREAM_MAPPER_CONTROL0),\ + SR(DP_STREAM_MAPPER_CONTROL1),\ + SR(DP_STREAM_MAPPER_CONTROL2),\ + SR(DP_STREAM_MAPPER_CONTROL3),\ + SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\ + SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ + SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) + +#define DCN3_1_HPO_DP_STREAM_ENC_REGS \ + uint32_t DP_STREAM_MAPPER_CONTROL0;\ + uint32_t DP_STREAM_MAPPER_CONTROL1;\ + uint32_t DP_STREAM_MAPPER_CONTROL2;\ + uint32_t DP_STREAM_MAPPER_CONTROL3;\ + uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\ + uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\ + uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\ + uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\ + uint32_t DP_SYM32_ENC_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA0;\ + uint32_t DP_SYM32_ENC_VID_MSA1;\ + uint32_t DP_SYM32_ENC_VID_MSA2;\ + uint32_t DP_SYM32_ENC_VID_MSA3;\ + uint32_t DP_SYM32_ENC_VID_MSA4;\ + uint32_t DP_SYM32_ENC_VID_MSA5;\ + uint32_t DP_SYM32_ENC_VID_MSA6;\ + uint32_t DP_SYM32_ENC_VID_MSA7;\ + uint32_t DP_SYM32_ENC_VID_MSA8;\ + uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ + uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ + uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\ + uint32_t DP_SYM32_ENC_HBLANK_CONTROL + + +#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh) + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \ + type DP_STREAM_LINK_TARGET;\ + type DP_STREAM_ENC_CLOCK_EN;\ + type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\ + type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\ + type FIFO_RESET;\ + type FIFO_RESET_DONE;\ + type FIFO_ENABLE;\ + type DP_SYM32_ENC_RESET;\ + type DP_SYM32_ENC_RESET_DONE;\ + type DP_SYM32_ENC_ENABLE;\ + type PIXEL_ENCODING_TYPE;\ + type UNCOMPRESSED_PIXEL_ENCODING;\ + type UNCOMPRESSED_COMPONENT_DEPTH;\ + type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\ + type MSA_DOUBLE_BUFFER_ENABLE;\ + type MSA_DATA_LANE_0;\ + type MSA_DATA_LANE_1;\ + type MSA_DATA_LANE_2;\ + type MSA_DATA_LANE_3;\ + type PIXEL_TO_SYMBOL_FIFO_RESET;\ + type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\ + type PIXEL_TO_SYMBOL_FIFO_ENABLE;\ + type VID_STREAM_ENABLE;\ + type VID_STREAM_STATUS;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\ + type SDP_STREAM_ENABLE;\ + type AUDIO_MUTE;\ + type ASP_ENABLE;\ + type ATP_ENABLE;\ + type AIP_ENABLE;\ + type ACM_ENABLE;\ + type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\ + type GSP_PAYLOAD_SIZE;\ + type GSP_TRANSMISSION_LINE_NUMBER;\ + type GSP_SOF_REFERENCE;\ + type METADATA_PACKET_ENABLE;\ + type CRC_ENABLE;\ + type CRC_CONT_MODE_ENABLE;\ + type HBLANK_MINIMUM_SYMBOL_WIDTH + + +struct dcn31_hpo_dp_stream_encoder_registers { + DCN3_1_HPO_DP_STREAM_ENC_REGS; +}; + +struct dcn31_hpo_dp_stream_encoder_shift { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_stream_encoder_mask { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_stream_encoder { + struct hpo_dp_stream_encoder base; + const struct dcn31_hpo_dp_stream_encoder_registers *regs; + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift; + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask; +}; + + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask); + + +#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index 6293173ba2b9d5..5eb3da8d5206e9 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -545,6 +545,7 @@ static void hubbub35_init(struct hubbub *hubbub) DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256); + memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate)); } /*static void hubbub35_set_request_limit(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index 181041d6d177c0..37d26fa0b6fbb9 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -75,108 +75,108 @@ bool hubbub401_program_urgent_watermarks( /* Repeat for water mark set A and B */ /* clock state A */ - if (safe_to_lower || watermarks->dcn4.a.urgent > hubbub2->watermarks.dcn4.a.urgent) { - hubbub2->watermarks.dcn4.a.urgent = watermarks->dcn4.a.urgent; + if (safe_to_lower || watermarks->dcn4x.a.urgent > hubbub2->watermarks.dcn4x.a.urgent) { + hubbub2->watermarks.dcn4x.a.urgent = watermarks->dcn4x.a.urgent; REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4.a.urgent); + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4x.a.urgent); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.a.urgent, watermarks->dcn4.a.urgent); - } else if (watermarks->dcn4.a.urgent < hubbub2->watermarks.dcn4.a.urgent) + watermarks->dcn4x.a.urgent, watermarks->dcn4x.a.urgent); + } else if (watermarks->dcn4x.a.urgent < hubbub2->watermarks.dcn4x.a.urgent) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_flip - > hubbub2->watermarks.dcn4.a.frac_urg_bw_flip) { - hubbub2->watermarks.dcn4.a.frac_urg_bw_flip = watermarks->dcn4.a.frac_urg_bw_flip; + if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_flip + > hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip) { + hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip = watermarks->dcn4x.a.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4.a.frac_urg_bw_flip); - } else if (watermarks->dcn4.a.frac_urg_bw_flip - < hubbub2->watermarks.dcn4.a.frac_urg_bw_flip) + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4x.a.frac_urg_bw_flip); + } else if (watermarks->dcn4x.a.frac_urg_bw_flip + < hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_nom - > hubbub2->watermarks.dcn4.a.frac_urg_bw_nom) { - hubbub2->watermarks.dcn4.a.frac_urg_bw_nom = watermarks->dcn4.a.frac_urg_bw_nom; + if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_nom + > hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom) { + hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom = watermarks->dcn4x.a.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4.a.frac_urg_bw_nom); - } else if (watermarks->dcn4.a.frac_urg_bw_nom - < hubbub2->watermarks.dcn4.a.frac_urg_bw_nom) + DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4x.a.frac_urg_bw_nom); + } else if (watermarks->dcn4x.a.frac_urg_bw_nom + < hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_mall - > hubbub2->watermarks.dcn4.a.frac_urg_bw_mall) { - hubbub2->watermarks.dcn4.a.frac_urg_bw_mall = watermarks->dcn4.a.frac_urg_bw_mall; + if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_mall + > hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall) { + hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall = watermarks->dcn4x.a.frac_urg_bw_mall; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4.a.frac_urg_bw_mall); - } else if (watermarks->dcn4.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4.a.frac_urg_bw_mall) + DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4x.a.frac_urg_bw_mall); + } else if (watermarks->dcn4x.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem) { - hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem = watermarks->dcn4.a.refcyc_per_trip_to_mem; + if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem) { + hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem = watermarks->dcn4x.a.refcyc_per_trip_to_mem; REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4.a.refcyc_per_trip_to_mem); - } else if (watermarks->dcn4.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem) + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4x.a.refcyc_per_trip_to_mem); + } else if (watermarks->dcn4x.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem) { - hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4.a.refcyc_per_meta_trip_to_mem; + if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem) { + hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem; REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, 0, - DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4.a.refcyc_per_meta_trip_to_mem); - } else if (watermarks->dcn4.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem) + DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem); + } else if (watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.urgent > hubbub2->watermarks.dcn4.b.urgent) { - hubbub2->watermarks.dcn4.b.urgent = watermarks->dcn4.b.urgent; + if (safe_to_lower || watermarks->dcn4x.b.urgent > hubbub2->watermarks.dcn4x.b.urgent) { + hubbub2->watermarks.dcn4x.b.urgent = watermarks->dcn4x.b.urgent; REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4.b.urgent); + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4x.b.urgent); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.b.urgent, watermarks->dcn4.b.urgent); - } else if (watermarks->dcn4.b.urgent < hubbub2->watermarks.dcn4.b.urgent) + watermarks->dcn4x.b.urgent, watermarks->dcn4x.b.urgent); + } else if (watermarks->dcn4x.b.urgent < hubbub2->watermarks.dcn4x.b.urgent) wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_flip - > hubbub2->watermarks.dcn4.b.frac_urg_bw_flip) { - hubbub2->watermarks.dcn4.b.frac_urg_bw_flip = watermarks->dcn4.b.frac_urg_bw_flip; + if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_flip + > hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip) { + hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip = watermarks->dcn4x.b.frac_urg_bw_flip; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4.b.frac_urg_bw_flip); - } else if (watermarks->dcn4.b.frac_urg_bw_flip - < hubbub2->watermarks.dcn4.b.frac_urg_bw_flip) + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4x.b.frac_urg_bw_flip); + } else if (watermarks->dcn4x.b.frac_urg_bw_flip + < hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_nom - > hubbub2->watermarks.dcn4.b.frac_urg_bw_nom) { - hubbub2->watermarks.dcn4.b.frac_urg_bw_nom = watermarks->dcn4.b.frac_urg_bw_nom; + if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_nom + > hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom) { + hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom = watermarks->dcn4x.b.frac_urg_bw_nom; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4.b.frac_urg_bw_nom); - } else if (watermarks->dcn4.b.frac_urg_bw_nom - < hubbub2->watermarks.dcn4.b.frac_urg_bw_nom) + DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4x.b.frac_urg_bw_nom); + } else if (watermarks->dcn4x.b.frac_urg_bw_nom + < hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_mall - > hubbub2->watermarks.dcn4.b.frac_urg_bw_mall) { - hubbub2->watermarks.dcn4.b.frac_urg_bw_mall = watermarks->dcn4.b.frac_urg_bw_mall; + if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_mall + > hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall) { + hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall = watermarks->dcn4x.b.frac_urg_bw_mall; REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4.b.frac_urg_bw_mall); - } else if (watermarks->dcn4.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4.b.frac_urg_bw_mall) + DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4x.b.frac_urg_bw_mall); + } else if (watermarks->dcn4x.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem) { - hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem = watermarks->dcn4.b.refcyc_per_trip_to_mem; + if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem) { + hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem = watermarks->dcn4x.b.refcyc_per_trip_to_mem; REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4.b.refcyc_per_trip_to_mem); - } else if (watermarks->dcn4.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem) + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4x.b.refcyc_per_trip_to_mem); + } else if (watermarks->dcn4x.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem) { - hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4.b.refcyc_per_meta_trip_to_mem; + if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem) { + hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem; REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, 0, - DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4.b.refcyc_per_meta_trip_to_mem); - } else if (watermarks->dcn4.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem) + DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem); + } else if (watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem) wm_pending = true; return wm_pending; @@ -192,89 +192,89 @@ bool hubbub401_program_stutter_watermarks( bool wm_pending = false; /* clock state A */ - if (safe_to_lower || watermarks->dcn4.a.sr_enter - > hubbub2->watermarks.dcn4.a.sr_enter) { - hubbub2->watermarks.dcn4.a.sr_enter = - watermarks->dcn4.a.sr_enter; + if (safe_to_lower || watermarks->dcn4x.a.sr_enter + > hubbub2->watermarks.dcn4x.a.sr_enter) { + hubbub2->watermarks.dcn4x.a.sr_enter = + watermarks->dcn4x.a.sr_enter; REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4.a.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4x.a.sr_enter); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.a.sr_enter, watermarks->dcn4.a.sr_enter); + watermarks->dcn4x.a.sr_enter, watermarks->dcn4x.a.sr_enter); // On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4.a.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4x.a.sr_enter); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4.a.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4x.a.sr_enter); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4.a.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4x.a.sr_enter); - } else if (watermarks->dcn4.a.sr_enter - < hubbub2->watermarks.dcn4.a.sr_enter) + } else if (watermarks->dcn4x.a.sr_enter + < hubbub2->watermarks.dcn4x.a.sr_enter) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.a.sr_exit - > hubbub2->watermarks.dcn4.a.sr_exit) { - hubbub2->watermarks.dcn4.a.sr_exit = - watermarks->dcn4.a.sr_exit; + if (safe_to_lower || watermarks->dcn4x.a.sr_exit + > hubbub2->watermarks.dcn4x.a.sr_exit) { + hubbub2->watermarks.dcn4x.a.sr_exit = + watermarks->dcn4x.a.sr_exit; REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4.a.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4x.a.sr_exit); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.a.sr_exit, watermarks->dcn4.a.sr_exit); + watermarks->dcn4x.a.sr_exit, watermarks->dcn4x.a.sr_exit); // On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4.a.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4x.a.sr_exit); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4.a.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4x.a.sr_exit); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4.a.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4x.a.sr_exit); - } else if (watermarks->dcn4.a.sr_exit - < hubbub2->watermarks.dcn4.a.sr_exit) + } else if (watermarks->dcn4x.a.sr_exit + < hubbub2->watermarks.dcn4x.a.sr_exit) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.sr_enter - > hubbub2->watermarks.dcn4.b.sr_enter) { - hubbub2->watermarks.dcn4.b.sr_enter = - watermarks->dcn4.b.sr_enter; + if (safe_to_lower || watermarks->dcn4x.b.sr_enter + > hubbub2->watermarks.dcn4x.b.sr_enter) { + hubbub2->watermarks.dcn4x.b.sr_enter = + watermarks->dcn4x.b.sr_enter; REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4.b.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4x.b.sr_enter); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.b.sr_enter, watermarks->dcn4.b.sr_enter); + watermarks->dcn4x.b.sr_enter, watermarks->dcn4x.b.sr_enter); // On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4.b.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4x.b.sr_enter); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4.b.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4x.b.sr_enter); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4.b.sr_enter); + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4x.b.sr_enter); - } else if (watermarks->dcn4.b.sr_enter - < hubbub2->watermarks.dcn4.b.sr_enter) + } else if (watermarks->dcn4x.b.sr_enter + < hubbub2->watermarks.dcn4x.b.sr_enter) wm_pending = true; - if (safe_to_lower || watermarks->dcn4.b.sr_exit - > hubbub2->watermarks.dcn4.b.sr_exit) { - hubbub2->watermarks.dcn4.b.sr_exit = - watermarks->dcn4.b.sr_exit; + if (safe_to_lower || watermarks->dcn4x.b.sr_exit + > hubbub2->watermarks.dcn4x.b.sr_exit) { + hubbub2->watermarks.dcn4x.b.sr_exit = + watermarks->dcn4x.b.sr_exit; REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4.b.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4x.b.sr_exit); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", - watermarks->dcn4.b.sr_exit, watermarks->dcn4.b.sr_exit); + watermarks->dcn4x.b.sr_exit, watermarks->dcn4x.b.sr_exit); // On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4.b.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4x.b.sr_exit); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4.b.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4x.b.sr_exit); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4.b.sr_exit); + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4x.b.sr_exit); - } else if (watermarks->dcn4.b.sr_exit - < hubbub2->watermarks.dcn4.b.sr_exit) + } else if (watermarks->dcn4x.b.sr_exit + < hubbub2->watermarks.dcn4x.b.sr_exit) wm_pending = true; return wm_pending; @@ -292,116 +292,116 @@ bool hubbub401_program_pstate_watermarks( /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ /* clock state A */ - if (safe_to_lower || watermarks->dcn4.a.uclk_pstate - > hubbub2->watermarks.dcn4.a.uclk_pstate) { - hubbub2->watermarks.dcn4.a.uclk_pstate = - watermarks->dcn4.a.uclk_pstate; + if (safe_to_lower || watermarks->dcn4x.a.uclk_pstate + > hubbub2->watermarks.dcn4x.a.uclk_pstate) { + hubbub2->watermarks.dcn4x.a.uclk_pstate = + watermarks->dcn4x.a.uclk_pstate; REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.uclk_pstate); + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.uclk_pstate); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.a.uclk_pstate, watermarks->dcn4.a.uclk_pstate); - } else if (watermarks->dcn4.a.uclk_pstate - < hubbub2->watermarks.dcn4.a.uclk_pstate) + watermarks->dcn4x.a.uclk_pstate, watermarks->dcn4x.a.uclk_pstate); + } else if (watermarks->dcn4x.a.uclk_pstate + < hubbub2->watermarks.dcn4x.a.uclk_pstate) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.uclk_pstate - > hubbub2->watermarks.dcn4.b.uclk_pstate) { - hubbub2->watermarks.dcn4.b.uclk_pstate = - watermarks->dcn4.b.uclk_pstate; + if (safe_to_lower || watermarks->dcn4x.b.uclk_pstate + > hubbub2->watermarks.dcn4x.b.uclk_pstate) { + hubbub2->watermarks.dcn4x.b.uclk_pstate = + watermarks->dcn4x.b.uclk_pstate; REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.uclk_pstate); + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.uclk_pstate); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.b.uclk_pstate, watermarks->dcn4.b.uclk_pstate); - } else if (watermarks->dcn4.b.uclk_pstate - < hubbub2->watermarks.dcn4.b.uclk_pstate) + watermarks->dcn4x.b.uclk_pstate, watermarks->dcn4x.b.uclk_pstate); + } else if (watermarks->dcn4x.b.uclk_pstate + < hubbub2->watermarks.dcn4x.b.uclk_pstate) wm_pending = true; /* Section for UCLK_PSTATE_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */ - if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt - > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) { - hubbub2->watermarks.dcn4.a.temp_read_or_ppt = - watermarks->dcn4.a.temp_read_or_ppt; + if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt + > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) { + hubbub2->watermarks.dcn4x.a.temp_read_or_ppt = + watermarks->dcn4x.a.temp_read_or_ppt; REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt); + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_A calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt); - } else if (watermarks->dcn4.a.temp_read_or_ppt - < hubbub2->watermarks.dcn4.a.temp_read_or_ppt) + watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt); + } else if (watermarks->dcn4x.a.temp_read_or_ppt + < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt - > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) { - hubbub2->watermarks.dcn4.b.temp_read_or_ppt = - watermarks->dcn4.b.temp_read_or_ppt; + if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt + > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) { + hubbub2->watermarks.dcn4x.b.temp_read_or_ppt = + watermarks->dcn4x.b.temp_read_or_ppt; REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt); + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_B calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt); - } else if (watermarks->dcn4.b.temp_read_or_ppt - < hubbub2->watermarks.dcn4.b.temp_read_or_ppt) + watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt); + } else if (watermarks->dcn4x.b.temp_read_or_ppt + < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) wm_pending = true; /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ /* clock state A */ - if (safe_to_lower || watermarks->dcn4.a.fclk_pstate - > hubbub2->watermarks.dcn4.a.fclk_pstate) { - hubbub2->watermarks.dcn4.a.fclk_pstate = - watermarks->dcn4.a.fclk_pstate; + if (safe_to_lower || watermarks->dcn4x.a.fclk_pstate + > hubbub2->watermarks.dcn4x.a.fclk_pstate) { + hubbub2->watermarks.dcn4x.a.fclk_pstate = + watermarks->dcn4x.a.fclk_pstate; REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.fclk_pstate); + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.fclk_pstate); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.a.fclk_pstate, watermarks->dcn4.a.fclk_pstate); - } else if (watermarks->dcn4.a.fclk_pstate - < hubbub2->watermarks.dcn4.a.fclk_pstate) + watermarks->dcn4x.a.fclk_pstate, watermarks->dcn4x.a.fclk_pstate); + } else if (watermarks->dcn4x.a.fclk_pstate + < hubbub2->watermarks.dcn4x.a.fclk_pstate) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.fclk_pstate - > hubbub2->watermarks.dcn4.b.fclk_pstate) { - hubbub2->watermarks.dcn4.b.fclk_pstate = - watermarks->dcn4.b.fclk_pstate; + if (safe_to_lower || watermarks->dcn4x.b.fclk_pstate + > hubbub2->watermarks.dcn4x.b.fclk_pstate) { + hubbub2->watermarks.dcn4x.b.fclk_pstate = + watermarks->dcn4x.b.fclk_pstate; REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.fclk_pstate); + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.fclk_pstate); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.b.fclk_pstate, watermarks->dcn4.b.fclk_pstate); - } else if (watermarks->dcn4.b.fclk_pstate - < hubbub2->watermarks.dcn4.b.fclk_pstate) + watermarks->dcn4x.b.fclk_pstate, watermarks->dcn4x.b.fclk_pstate); + } else if (watermarks->dcn4x.b.fclk_pstate + < hubbub2->watermarks.dcn4x.b.fclk_pstate) wm_pending = true; /* Section for FCLK_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */ - if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt - > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) { - hubbub2->watermarks.dcn4.a.temp_read_or_ppt = - watermarks->dcn4.a.temp_read_or_ppt; + if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt + > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) { + hubbub2->watermarks.dcn4x.a.temp_read_or_ppt = + watermarks->dcn4x.a.temp_read_or_ppt; REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt); + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_A calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt); - } else if (watermarks->dcn4.a.temp_read_or_ppt - < hubbub2->watermarks.dcn4.a.temp_read_or_ppt) + watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt); + } else if (watermarks->dcn4x.a.temp_read_or_ppt + < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt - > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) { - hubbub2->watermarks.dcn4.b.temp_read_or_ppt = - watermarks->dcn4.b.temp_read_or_ppt; + if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt + > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) { + hubbub2->watermarks.dcn4x.b.temp_read_or_ppt = + watermarks->dcn4x.b.temp_read_or_ppt; REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt); + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt); DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_B calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt); - } else if (watermarks->dcn4.b.temp_read_or_ppt - < hubbub2->watermarks.dcn4.b.temp_read_or_ppt) + watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt); + } else if (watermarks->dcn4x.b.temp_read_or_ppt + < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) wm_pending = true; return wm_pending; @@ -418,29 +418,29 @@ bool hubbub401_program_usr_watermarks( bool wm_pending = false; /* clock state A */ - if (safe_to_lower || watermarks->dcn4.a.usr - > hubbub2->watermarks.dcn4.a.usr) { - hubbub2->watermarks.dcn4.a.usr = watermarks->dcn4.a.usr; + if (safe_to_lower || watermarks->dcn4x.a.usr + > hubbub2->watermarks.dcn4x.a.usr) { + hubbub2->watermarks.dcn4x.a.usr = watermarks->dcn4x.a.usr; REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4.a.usr); + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4x.a.usr); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.a.usr, watermarks->dcn4.a.usr); - } else if (watermarks->dcn4.a.usr - < hubbub2->watermarks.dcn4.a.usr) + watermarks->dcn4x.a.usr, watermarks->dcn4x.a.usr); + } else if (watermarks->dcn4x.a.usr + < hubbub2->watermarks.dcn4x.a.usr) wm_pending = true; /* clock state B */ - if (safe_to_lower || watermarks->dcn4.b.usr - > hubbub2->watermarks.dcn4.b.usr) { - hubbub2->watermarks.dcn4.b.usr = watermarks->dcn4.b.usr; + if (safe_to_lower || watermarks->dcn4x.b.usr + > hubbub2->watermarks.dcn4x.b.usr) { + hubbub2->watermarks.dcn4x.b.usr = watermarks->dcn4x.b.usr; REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4.b.usr); + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4x.b.usr); DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", - watermarks->dcn4.b.usr, watermarks->dcn4.b.usr); - } else if (watermarks->dcn4.b.usr - < hubbub2->watermarks.dcn4.b.usr) + watermarks->dcn4x.b.usr, watermarks->dcn4x.b.usr); + } else if (watermarks->dcn4x.b.usr + < hubbub2->watermarks.dcn4x.b.usr) wm_pending = true; return wm_pending; @@ -1170,6 +1170,28 @@ static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned comp } } +static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + switch (hubp_inst) { + case 0: + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100000); /* 1 vupdate at 10hz */ + break; + case 1: + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100000); + break; + case 2: + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100000); + break; + case 3: + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100000); + break; + default: + break; + } +} + static const struct hubbub_funcs hubbub4_01_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -1192,6 +1214,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .set_request_limit = hubbub32_set_request_limit, .program_det_segments = dcn401_program_det_segments, .program_compbuf_segments = dcn401_program_compbuf_segments, + .wait_for_det_update = dcn401_wait_for_det_update, }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c index bf399819ca800e..22ac2b7e49aeae 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c @@ -749,7 +749,8 @@ bool hubp1_is_flip_pending(struct hubp *hubp) if (flip_pending) return true; - if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) + if (hubp && + earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c index 6bba020ad6fbfe..0637e4c552d8a2 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c @@ -927,7 +927,8 @@ bool hubp2_is_flip_pending(struct hubp *hubp) if (flip_pending) return true; - if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) + if (hubp && + earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c index 771fcd0d3b9911..d1f05b82b3dd5c 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c @@ -188,7 +188,7 @@ void hubp35_program_surface_config( hubp35_program_pixel_format(hubp, format); } -struct hubp_funcs dcn35_hubp_funcs = { +static struct hubp_funcs dcn35_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index eb0da6c6b87ca6..b1ebf5053b4fc3 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -725,8 +725,8 @@ void hubp401_cursor_set_position( CURSOR_ENABLE, cur_en); REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); + CURSOR_X_POSITION, x_pos, + CURSOR_Y_POSITION, y_pos); REG_SET_2(CURSOR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, pos->x_hotspot, @@ -990,7 +990,6 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_soft_reset = hubp31_soft_reset, .hubp_set_flip_int = hubp401_set_flip_int, .hubp_in_blank = hubp401_in_blank, - .hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow, .phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable, .hubp_update_mall_sel = hubp401_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 1f2eb2f727dc1e..4fbed0298adfa7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -57,6 +57,7 @@ #include "panel_cntl.h" #include "dc_state_priv.h" #include "dpcd_defs.h" +#include "dsc.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" @@ -949,7 +950,7 @@ void dce110_edp_backlight_control( { struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; - uint8_t pwrseq_instance; + uint8_t pwrseq_instance = 0; unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; unsigned int post_T7_delay = OLED_POST_T7_DELAY; @@ -1002,7 +1003,8 @@ void dce110_edp_backlight_control( */ /* dc_service_sleep_in_milliseconds(50); */ /*edp 1.2*/ - pwrseq_instance = link->panel_cntl->pwrseq_inst; + if (link->panel_cntl) + pwrseq_instance = link->panel_cntl->pwrseq_inst; if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) { if (!link->dc->config.edp_no_power_sequencing) @@ -1231,20 +1233,21 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) * has changed or they enter protection state and hang. */ msleep(60); - } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { - if (!link->dc->config.edp_no_power_sequencing) { - /* - * Sometimes, DP receiver chip power-controlled externally by an - * Embedded Controller could be treated and used as eDP, - * if it drives mobile display. In this case, - * we shouldn't be doing power-sequencing, hence we can skip - * waiting for T9-ready. - */ - link->dc->link_srv->edp_receiver_ready_T9(link); - } } } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + !link->dc->config.edp_no_power_sequencing) { + /* + * Sometimes, DP receiver chip power-controlled externally by an + * Embedded Controller could be treated and used as eDP, + * if it drives mobile display. In this case, + * we shouldn't be doing power-sequencing, hence we can skip + * waiting for T9-ready. + */ + link->dc->link_srv->edp_receiver_ready_T9(link); + } + } @@ -1549,6 +1552,7 @@ static enum dc_status dce110_enable_stream_timing( 0, 0, 0, + 0, pipe_ctx->stream->signal, true); } @@ -1597,6 +1601,11 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( &audio_output.crtc_info, &pipe_ctx->stream->audio_info, &audio_output.dp_link_info); + + if (dc->config.disable_hbr_audio_dp2) + if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio && + dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio(pipe_ctx->stream_res.audio); } /* make sure no pipes syncd to the pipe being enabled */ @@ -1815,6 +1824,48 @@ static void get_edp_links_with_sink( } } +static void clean_up_dsc_blocks(struct dc *dc) +{ + struct display_stream_compressor *dsc = NULL; + struct timing_generator *tg = NULL; + struct stream_encoder *se = NULL; + struct dccg *dccg = dc->res_pool->dccg; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + int i; + + if (dc->ctx->dce_version != DCN_VERSION_3_5 && + dc->ctx->dce_version != DCN_VERSION_3_51) + return; + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dsc = dc->res_pool->dscs[i]; + dsc->funcs->dsc_read_state(dsc, &s); + if (s.dsc_fw_en) { + /* disable DSC in OPTC */ + if (i < dc->res_pool->timing_generator_count) { + tg = dc->res_pool->timing_generators[i]; + tg->funcs->set_dsc_config(tg, OPTC_DSC_DISABLED, 0, 0); + } + /* disable DSC in stream encoder */ + if (i < dc->res_pool->stream_enc_count) { + se = dc->res_pool->stream_enc[i]; + se->funcs->dp_set_dsc_config(se, OPTC_DSC_DISABLED, 0, 0); + se->funcs->dp_set_dsc_pps_info_packet(se, false, NULL, true); + } + /* disable DSC block */ + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc->inst); + dsc->funcs->dsc_disable(dsc); + + /* power down DSC */ + if (pg_cntl != NULL) + pg_cntl->funcs->dsc_pg_control(pg_cntl, dsc->inst, false); + } + } +} + /* * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need: * 1. Power down all DC HW blocks @@ -1838,6 +1889,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + struct dc_bios *dcb = dc->ctx->dc_bios; DC_LOGGER_INIT(); @@ -1914,13 +1966,22 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ - clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb)) + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); power_down_all_hw_blocks(dc); + + /* DSC could be enabled on eDP during VBIOS post. + * To clean up dsc blocks if eDP is in link but not active. + */ + if (edp_link_with_sink && (edp_stream_num == 0)) + clean_up_dsc_blocks(dc); + disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); - clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb)) + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); } bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } @@ -2035,13 +2096,20 @@ static void set_drr(struct pipe_ctx **pipe_ctx, * as well. */ for (i = 0; i < num_pipes; i++) { - pipe_ctx[i]->stream_res.tg->funcs->set_drr( - pipe_ctx[i]->stream_res.tg, ¶ms); - - if (adjust.v_total_max != 0 && adjust.v_total_min != 0) - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx[i]->stream_res.tg, - event_triggers, num_frames); + /* dc_state_destruct() might null the stream resources, so fetch tg + * here first to avoid a race condition. The lifetime of the pointee + * itself (the timing_generator object) is not a problem here. + */ + struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; + + if ((tg != NULL) && tg->funcs) { + if (tg->funcs->set_drr) + tg->funcs->set_drr(tg, ¶ms); + if (adjust.v_total_max != 0 && adjust.v_total_min != 0) + if (tg->funcs->set_static_screen_control) + tg->funcs->set_static_screen_control( + tg, event_triggers, num_frames); + } } } @@ -2340,19 +2408,6 @@ static void dce110_setup_audio_dto( } } -static bool dce110_is_hpo_enabled(struct dc_state *context) -{ - int i; - - for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { - if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) { - return true; - } - } - - return false; -} - enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) @@ -2361,8 +2416,8 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; - bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state); - bool is_hpo_enabled = dce110_is_hpo_enabled(context); + bool was_hpo_acquired = resource_is_hpo_acquired(dc->current_state); + bool is_hpo_acquired = resource_is_hpo_acquired(context); /* reset syncd pipes from disabled pipes */ if (dc->config.use_pipe_ctx_sync_logic) @@ -2405,8 +2460,8 @@ enum dc_status dce110_apply_ctx_to_hw( dce110_setup_audio_dto(dc, context); - if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) { - dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled); + if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_acquired != is_hpo_acquired) { + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_acquired); } for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2438,7 +2493,7 @@ enum dc_status dce110_apply_ctx_to_hw( #ifdef CONFIG_DRM_AMD_DC_FP if (hws->funcs.resync_fifo_dccg_dio) - hws->funcs.resync_fifo_dccg_dio(hws, dc, context); + hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i); #endif } @@ -3312,7 +3367,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { static const struct hwseq_private_funcs dce110_private_funcs = { .init_pipes = init_pipes, - .update_plane_addr = update_plane_addr, .set_input_transfer_func = dce110_set_input_transfer_func, .set_output_transfer_func = dce110_set_output_transfer_func, .power_down = dce110_power_down, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 1d2be574f66809..a6a1db5ba8bad1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1005,6 +1005,7 @@ enum dc_status dcn10_enable_stream_timing( pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout, pipe_ctx->stream->signal, true); @@ -1554,7 +1555,7 @@ void dcn10_init_hw(struct dc *dc) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); /* Align bw context with hw config when system resume. */ - if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) { + if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) { dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz; } @@ -1674,7 +1675,7 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); } @@ -1697,10 +1698,10 @@ void dcn10_power_down_on_boot(struct dc *dc) if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwseq->funcs.edp_backlight_control && - dc->hwss.power_down && + dc->hwseq->funcs.power_down && dc->hwss.edp_power_control) { dc->hwseq->funcs.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); + dc->hwseq->funcs.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } else { for (i = 0; i < dc->link_count; i++) { @@ -1708,8 +1709,8 @@ void dcn10_power_down_on_boot(struct dc *dc) if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); + dc->hwseq->funcs.power_down) { + dc->hwseq->funcs.power_down(dc); break; } @@ -2585,8 +2586,11 @@ static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_co while (top->top_pipe) top = top->top_pipe; // Traverse to top pipe_ctx - if (top->plane_state && top->plane_state->layer_index == 0) - return true; // Front MPO plane not hidden + if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha) + // Global alpha used by top plane for PIP overlay + // Pre-multiplied/per-pixel alpha used by MPO + // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP + return true; // MPO in use and front plane not hidden } } return false; @@ -2914,7 +2918,7 @@ static void dcn10_update_dchubp_dpp( hubp->power_gated = false; - hws->funcs.update_plane_addr(dc, pipe_ctx); + dc->hwss.update_plane_addr(dc, pipe_ctx); if (is_pipe_tree_visible(pipe_ctx)) hubp->funcs->set_blank(hubp, false); @@ -2997,7 +3001,8 @@ void dcn10_program_pipe( calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); + pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout); pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c index a5bdac79a744eb..5e51e1761707de 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c @@ -78,7 +78,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, @@ -92,7 +91,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { static const struct hwseq_private_funcs dcn10_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn10_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .program_pipe = dcn10_program_pipe, .update_mpcc = dcn10_update_mpcc, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 2532ad410cb566..a80c0858293207 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -909,6 +909,7 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout, pipe_ctx->stream->signal, true); @@ -1044,7 +1045,8 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, /* * if above if is not executed then 'params' equal to 0 and set in bypass */ - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); return true; } @@ -1698,7 +1700,7 @@ static void dcn20_update_dchubp_dpp( plane_state->update_flags.bits.input_csc_change || plane_state->update_flags.bits.color_space_change || plane_state->update_flags.bits.coeff_reduction_change) { - struct dc_bias_and_scale bns_params = {0}; + struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; // program the input csc dpp->funcs->dpp_setup(dpp, @@ -1715,7 +1717,6 @@ static void dcn20_update_dchubp_dpp( } if (dpp->funcs->dpp_program_bias_and_scale) { //TODO :for CNVC set scale and bias registers if necessary - build_prescale_params(&bns_params, plane_state); dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } } @@ -1825,7 +1826,7 @@ static void dcn20_update_dchubp_dpp( params.subvp_save_surf_addr.subvp_index = pipe_ctx->subvp_index; hwss_subvp_save_surf_addr(¶ms); } - hws->funcs.update_plane_addr(dc, pipe_ctx); + dc->hwss.update_plane_addr(dc, pipe_ctx); } if (pipe_ctx->update_flags.bits.enable) @@ -1886,7 +1887,8 @@ static void dcn20_program_pipe( calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); + pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout); if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); @@ -1921,22 +1923,28 @@ static void dcn20_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->update_flags.raw || + (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || + pipe_ctx->stream->update_flags.raw) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); - if (pipe_ctx->update_flags.bits.enable - || pipe_ctx->plane_state->update_flags.bits.hdr_mult) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); if (hws->funcs.populate_mcm_luts) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; + if (pipe_ctx->plane_state) { + hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, + pipe_ctx->plane_state->lut_bank_a); + pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; + } } - if (pipe_ctx->update_flags.bits.enable || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + + if (pipe_ctx->plane_state && + (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || - pipe_ctx->plane_state->update_flags.bits.lut_3d) + pipe_ctx->plane_state->update_flags.bits.lut_3d || + pipe_ctx->update_flags.bits.enable)) hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish @@ -1946,7 +1954,8 @@ static void dcn20_program_pipe( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.out_tf || - pipe_ctx->plane_state->update_flags.bits.output_tf_change) + (pipe_ctx->plane_state && + pipe_ctx->plane_state->update_flags.bits.output_tf_change)) hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); /* If the pipe has been enabled or has a different opp, we @@ -1970,7 +1979,7 @@ static void dcn20_program_pipe( } /* Set ABM pipe after other pipe configurations done */ - if (pipe_ctx->plane_state->visible) { + if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { if (pipe_ctx->stream_res.abm) { dc->hwss.set_pipe(pipe_ctx); pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, @@ -2186,9 +2195,9 @@ static void post_unlock_reset_opp(struct dc *dc, * yet power gated. */ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc); + dsc->funcs->dsc_disable(dsc); if (dccg->funcs->set_ref_dscclk) dccg->funcs->set_ref_dscclk(dccg, dsc->inst); - dsc->funcs->dsc_disable(dsc); } } } @@ -2283,6 +2292,9 @@ void dcn20_post_unlock_program_front_end( } } + if (!hwseq) + return; + /* P-State support transitions: * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally) @@ -2290,7 +2302,7 @@ void dcn20_post_unlock_program_front_end( * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes */ - if (hwseq && hwseq->funcs.update_force_pstate) + if (hwseq->funcs.update_force_pstate) dc->hwseq->funcs.update_force_pstate(dc, context); /* Only program the MALL registers after all the main and phantom pipes @@ -2459,7 +2471,8 @@ bool dcn20_update_bandwidth( calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); + pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout); pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); @@ -2529,6 +2542,9 @@ bool dcn20_wait_for_blank_complete( { int counter; + if (!opp) + return false; + for (counter = 0; counter < 1000; counter++) { if (!opp->funcs->dpg_is_pending(opp)) break; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c index ef6488165b8fc8..32707b344f0b68 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c @@ -105,7 +105,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = { static const struct hwseq_private_funcs dcn20_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn20_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c index a13bf6c9386e0a..78351408e86422 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c @@ -96,7 +96,6 @@ static const struct hw_sequencer_funcs dcn201_funcs = { static const struct hwseq_private_funcs dcn201_private_funcs = { .init_pipes = NULL, - .update_plane_addr = dcn201_update_plane_addr, .plane_atomic_disconnect = dcn201_plane_atomic_disconnect, .program_pipe = dcn10_program_pipe, .update_mpcc = dcn201_update_mpcc, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c index 3dfac372d1654b..e044e9e0a3a17a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c @@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, @@ -109,7 +108,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = { static const struct hwseq_private_funcs dcn21_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn20_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index eaeeade31ed74f..bded33575493b6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -398,7 +398,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc, } } - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + else + DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__); + return ret; } @@ -451,7 +455,7 @@ bool dcn30_mmhubbub_warmup( struct mcif_wb *mcif_wb; struct mcif_warmup_params warmup_params = {0}; unsigned int i, i_buf; - /*make sure there is no active DWB eanbled */ + /* make sure there is no active DWB enabled */ for (i = 0; i < num_dwb; i++) { dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) { @@ -625,7 +629,7 @@ void dcn30_init_hw(struct dc *dc) uint32_t backlight = MAX_BACKLIGHT_LEVEL; uint32_t user_level = MAX_BACKLIGHT_LEVEL; - if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // Initialize the dccg @@ -731,10 +735,10 @@ void dcn30_init_hw(struct dc *dc) if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwss.edp_backlight_control && - dc->hwss.power_down && + hws->funcs.power_down && dc->hwss.edp_power_control) { dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); + hws->funcs.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } else { for (i = 0; i < dc->link_count; i++) { @@ -742,8 +746,8 @@ void dcn30_init_hw(struct dc *dc) if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); + hws->funcs.power_down) { + hws->funcs.power_down(dc); break; } @@ -786,11 +790,12 @@ void dcn30_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); //if softmax is enabled then hardmax will be set by a different call - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk && + !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 4b32497c09d08a..2a8dc40d28477b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -113,7 +113,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = { static const struct hwseq_private_funcs dcn30_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 97e33eb7ac5a96..93e49d87a67ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c @@ -111,7 +111,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = { static const struct hwseq_private_funcs dcn301_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 746c522adf84ca..3d4b31bd994691 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -256,10 +256,10 @@ void dcn31_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 9cb7afe0e731e2..56f3c70d4b5548 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, @@ -112,11 +111,11 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; static const struct hwseq_private_funcs dcn31_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 388404cdeeaaea..4e93eeedfc1bbd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -355,14 +355,18 @@ void dcn314_calculate_pix_rate_divider( } } -void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) { unsigned int i; struct pipe_ctx *pipe = NULL; bool otg_disabled[MAX_PIPES] = {false}; for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (i <= current_pipe_idx) { + pipe = &context->res_ctx.pipe_ctx[i]; + } else { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + } if (pipe->top_pipe || pipe->prev_odm_pipe) continue; @@ -377,7 +381,10 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (i <= current_pipe_idx) + pipe = &context->res_ctx.pipe_ctx[i]; + else + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (otg_disabled[i]) { int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h index fb4f90f61b22d6..2305ad282f218b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h @@ -41,7 +41,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig void dcn314_calculate_pix_rate_divider(struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream); -void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); +void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx); void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index 7a8db4b81471e9..68e6de6b5758d5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, @@ -115,11 +114,11 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; static const struct hwseq_private_funcs dcn314_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn30_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 05d8f81daa064d..2e8c9f73825968 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -582,7 +582,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc, } } - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + return ret; } @@ -779,7 +781,7 @@ void dcn32_init_hw(struct dc *dc) uint32_t backlight = MAX_BACKLIGHT_LEVEL; uint32_t user_level = MAX_BACKLIGHT_LEVEL; - if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // Initialize the dccg @@ -901,10 +903,10 @@ void dcn32_init_hw(struct dc *dc) if (edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwss.edp_backlight_control && - dc->hwss.power_down && + hws->funcs.power_down && dc->hwss.edp_power_control) { dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); + hws->funcs.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } } @@ -914,8 +916,8 @@ void dcn32_init_hw(struct dc *dc) if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); + hws->funcs.power_down) { + hws->funcs.power_down(dc); break; } @@ -958,10 +960,11 @@ void dcn32_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk && + !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) @@ -982,8 +985,19 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; - if (dc->ctx->dmub_srv->dmub->fw_version < + /* for DCN401 testing only */ + dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; + if (dc->caps.dmub_caps.fams_ver == 2) { + /* FAMS2 is enabled */ + dc->debug.fams2_config.bits.enable &= true; + } else if (dc->ctx->dmub_srv->dmub->fw_version < DMUB_FW_VERSION(7, 0, 35)) { + /* FAMS2 is disabled */ + dc->debug.fams2_config.bits.enable = false; + if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) { + /* update bounding box if FAMS2 disabled */ + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + } dc->debug.force_disable_subvp = true; dc->debug.disable_fpo_optimizations = true; } @@ -1018,6 +1032,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; @@ -1029,24 +1057,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, dsc->inst); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); - if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); - if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true); } - dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; - dsc_cfg.pic_width *= opp_cnt; - optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; - /* Enable DSC in OPTC */ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, @@ -1060,13 +1084,9 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* only disconnect DSC block, DSC is disabled when OPP head pipe is reset */ - if (dccg->funcs->set_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false); - dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); + dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { ASSERT(odm_pipe->stream_res.dsc); - if (dccg->funcs->set_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false); odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc); } } @@ -1137,10 +1157,7 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe->stream_res.dsc) { struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; - struct dccg *dccg = dc->res_pool->dccg; - if (dccg->funcs->set_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst, false); /* disconnect DSC block from stream */ dsc->funcs->dsc_disconnect(dsc); } @@ -1212,20 +1229,27 @@ void dcn32_calculate_pix_rate_divider( } } -void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context) +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) { unsigned int i; struct pipe_ctx *pipe = NULL; bool otg_disabled[MAX_PIPES] = {false}; + struct dc_state *dc_state = NULL; for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (i <= current_pipe_idx) { + pipe = &context->res_ctx.pipe_ctx[i]; + dc_state = context; + } else { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + dc_state = dc->current_state; + } if (!resource_is_pipe_type(pipe, OTG_MASTER)) continue; if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) - && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) { + && dc_state_get_pipe_subvp_type(dc_state, pipe) != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -1235,7 +1259,10 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_ hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg); for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (i <= current_pipe_idx) + pipe = &context->res_ctx.pipe_ctx[i]; + else + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (otg_disabled[i]) { int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst }; @@ -1583,7 +1610,7 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) #ifdef CONFIG_DRM_AMD_DC_FP if (hws->funcs.resync_fifo_dccg_dio) - hws->funcs.resync_fifo_dccg_dio(hws, dc, context); + hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i); #endif } } @@ -1717,6 +1744,28 @@ void dcn32_blank_phantom(struct dc *dc, hws->funcs.wait_for_blank_complete(opp); } +/* phantom stream id's can change often, but can be identical between contexts. +* This function checks for the condition the streams are identical to avoid +* redundant pipe transitions. +*/ +static bool is_subvp_phantom_topology_transition_seamless( + const struct dc_state *cur_ctx, + const struct dc_state *new_ctx, + const struct pipe_ctx *cur_pipe, + const struct pipe_ctx *new_pipe) +{ + enum mall_stream_type cur_pipe_type = dc_state_get_pipe_subvp_type(cur_ctx, cur_pipe); + enum mall_stream_type new_pipe_type = dc_state_get_pipe_subvp_type(new_ctx, new_pipe); + + const struct dc_stream_state *cur_paired_stream = dc_state_get_paired_subvp_stream(cur_ctx, cur_pipe->stream); + const struct dc_stream_state *new_paired_stream = dc_state_get_paired_subvp_stream(new_ctx, new_pipe->stream); + + return cur_pipe_type == SUBVP_PHANTOM && + cur_pipe_type == new_pipe_type && + cur_paired_stream && new_paired_stream && + cur_paired_stream->stream_id == new_paired_stream->stream_id; +} + bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx) @@ -1735,7 +1784,8 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, continue; else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) { if (resource_is_pipe_type(new_pipe, OTG_MASTER)) - if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id) + if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id || + is_subvp_phantom_topology_transition_seamless(cur_ctx, new_ctx, cur_pipe, new_pipe)) /* OTG master with the same stream is seamless */ continue; } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) { @@ -1821,3 +1871,13 @@ void dcn32_interdependent_update_lock(struct dc *dc, dc->hwss.pipe_control_lock(dc, pipe, false); } } + +void dcn32_program_outstanding_updates(struct dc *dc, + struct dc_state *context) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + /* update compbuf if required */ + if (hubbub->funcs->program_compbuf_size) + hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index db562e45d6ffeb..cac4a08b92a4d3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -75,7 +75,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div); -void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); +void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx); void dcn32_subvp_pipe_control_lock(struct dc *dc, struct dc_state *context, @@ -133,4 +133,8 @@ void dcn32_prepare_bandwidth(struct dc *dc, void dcn32_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); + +void dcn32_program_outstanding_updates(struct dc *dc, + struct dc_state *context); + #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 5c50458b12cb1f..3422b564ae9847 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -120,11 +120,11 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .blank_phantom = dcn32_blank_phantom, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, + .program_outstanding_updates = dcn32_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn32_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn32_set_input_transfer_func, @@ -163,7 +163,6 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, - .populate_mcm_luts = dcn401_populate_mcm_luts, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index d5e9aec52a0597..bd309dbdf7b2a7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -147,37 +147,6 @@ void dcn35_init_hw(struct dc *dc) hws->funcs.bios_golden_init(dc); } - if (!dc->debug.disable_clock_gate) { - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ - REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, - PHYBSYMCLK_ROOT_GATE_DISABLE, 1, - PHYCSYMCLK_ROOT_GATE_DISABLE, 1, - PHYDSYMCLK_ROOT_GATE_DISABLE, 1, - PHYESYMCLK_ROOT_GATE_DISABLE, 1); - - REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4, - DPIASYMCLK0_GATE_DISABLE, 0, - DPIASYMCLK1_GATE_DISABLE, 0, - DPIASYMCLK2_GATE_DISABLE, 0, - DPIASYMCLK3_GATE_DISABLE, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF); - REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, - DTBCLK_P0_GATE_DISABLE, 0, - DTBCLK_P1_GATE_DISABLE, 0, - DTBCLK_P2_GATE_DISABLE, 0, - DTBCLK_P3_GATE_DISABLE, 0); - REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, - DPSTREAMCLK0_GATE_DISABLE, 0, - DPSTREAMCLK1_GATE_DISABLE, 0, - DPSTREAMCLK2_GATE_DISABLE, 0, - DPSTREAMCLK3_GATE_DISABLE, 0); - - } - // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); @@ -235,7 +204,7 @@ void dcn35_init_hw(struct dc *dc) if (hws->funcs.enable_power_gating_plane) hws->funcs.enable_power_gating_plane(dc->hwseq, true); */ - if (res_pool->hubbub->funcs->dchubbub_init) + if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init) res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -271,6 +240,10 @@ void dcn35_init_hw(struct dc *dc) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } + if (res_pool->dccg->funcs->dccg_root_gate_disable_control) { + for (i = 0; i < res_pool->pipe_count; i++) + res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0); + } for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -305,20 +278,6 @@ void dcn35_init_hw(struct dc *dc) if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0, - SYMCLKB_FE_GATE_DISABLE, 0, - SYMCLKC_FE_GATE_DISABLE, 0, - SYMCLKD_FE_GATE_DISABLE, 0, - SYMCLKE_FE_GATE_DISABLE, 0); - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0); - REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0, - SYMCLKB_GATE_DISABLE, 0, - SYMCLKC_GATE_DISABLE, 0, - SYMCLKD_GATE_DISABLE, 0, - SYMCLKE_GATE_DISABLE, 0); - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } @@ -328,10 +287,10 @@ void dcn35_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); @@ -375,7 +334,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; @@ -629,10 +601,10 @@ void dcn35_power_down_on_boot(struct dc *dc) if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwseq->funcs.edp_backlight_control && - dc->hwss.power_down && + dc->hwseq->funcs.power_down && dc->hwss.edp_power_control) { dc->hwseq->funcs.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); + dc->hwseq->funcs.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } else { for (i = 0; i < dc->link_count; i++) { @@ -640,8 +612,8 @@ void dcn35_power_down_on_boot(struct dc *dc) if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); + dc->hwseq->funcs.power_down) { + dc->hwseq->funcs.power_down(dc); break; } @@ -1024,9 +996,6 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; - if (hpo_frl_stream_enc_acquired) - update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; - update_state->pg_res_update[PG_DWB] = true; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1041,7 +1010,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.hubp) update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false; - if (pipe_ctx->plane_res.dpp) + if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp) update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false; if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) @@ -1469,10 +1438,9 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; if ((tg != NULL) && tg->funcs) { - struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; - struct dc *dc = pipe_ctx[i]->stream->ctx->dc; - - if (dc->debug.static_screen_wait_frames) { + if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + struct dc *dc = pipe_ctx[i]->stream->ctx->dc; unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total); if (frame_rate >= 120 && dc->caps.ips_support && diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 428912f371291b..2bbf1fef94fd25 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -101,7 +101,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, @@ -124,11 +123,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, + .program_outstanding_updates = dcn32_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn35_private_funcs = { .init_pipes = dcn35_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn32_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 55e791552bca86..d00822e8daa52e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, @@ -123,11 +122,12 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, + .program_outstanding_updates = dcn32_program_outstanding_updates, + .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, }; static const struct hwseq_private_funcs dcn351_private_funcs = { .init_pipes = dcn35_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn32_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 2c50c0f745a0be..0b743669f23b44 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -221,8 +221,9 @@ void dcn401_init_hw(struct dc *dc) int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; uint32_t user_level = MAX_BACKLIGHT_LEVEL; + int current_dchub_ref_freq = 0; - if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) { + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) { dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // mark dcmode limits present if any clock has distinct AC and DC values from SMU @@ -264,6 +265,8 @@ void dcn401_init_hw(struct dc *dc) dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, &res_pool->ref_clocks.dccg_ref_clock_inKhz); + current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, res_pool->ref_clocks.dccg_ref_clock_inKhz, &res_pool->ref_clocks.dchub_ref_clock_inKhz); @@ -354,10 +357,10 @@ void dcn401_init_hw(struct dc *dc) if (edp_link->link_enc->funcs->is_dig_enabled && edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && dc->hwss.edp_backlight_control && - dc->hwss.power_down && + hws->funcs.power_down && dc->hwss.edp_power_control) { dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); + hws->funcs.power_down(dc); dc->hwss.edp_power_control(edp_link, false); } } @@ -367,8 +370,8 @@ void dcn401_init_hw(struct dc *dc) if (link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); + hws->funcs.power_down) { + hws->funcs.power_down(dc); break; } @@ -413,12 +416,9 @@ void dcn401_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) - dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); - if (dc->res_pool->hubbub->funcs->force_pstate_change_control) dc->res_pool->hubbub->funcs->force_pstate_change_control( dc->res_pool->hubbub, false, false); @@ -436,9 +436,12 @@ void dcn401_init_hw(struct dc *dc) dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0; dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2; - if (!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) { - /* update bounding box if FAMS2 disabled */ - dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) + || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { + /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ + if (dc->clk_mgr) + dc->res_pool->funcs->update_bw_bounding_box(dc, + dc->clk_mgr->bw_params); } } } @@ -498,6 +501,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE; enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE; bool is_17x17x17 = true; + bool rval; dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); @@ -507,11 +511,10 @@ void dcn401_populate_mcm_luts(struct dc *dc, if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format( - dc->ctx, + rval = cm3_helper_translate_curve_to_hw_format( mcm_luts.lut1d_func, &dpp_base->regamma_params, false); - m_lut_params.pwl = &dpp_base->regamma_params; + m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; } if (m_lut_params.pwl) { if (mpc->funcs->populate_lut) @@ -528,11 +531,10 @@ void dcn401_populate_mcm_luts(struct dc *dc, m_lut_params.pwl = &mcm_luts.shaper->pwl; else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { ASSERT(false); - cm_helper_translate_curve_to_hw_format( - dc->ctx, + rval = cm3_helper_translate_curve_to_hw_format( mcm_luts.shaper, &dpp_base->regamma_params, true); - m_lut_params.pwl = &dpp_base->regamma_params; + m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; } if (m_lut_params.pwl) { if (mpc->funcs->populate_lut) @@ -668,47 +670,40 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - bool result = true; + bool result; const struct pwl_params *lut_params = NULL; + bool rval; mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; // 1D LUT - if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) { - if (plane_state->blend_tf.type == TF_TYPE_HWPWL) - lut_params = &plane_state->blend_tf.pwl; - else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - &plane_state->blend_tf, - &dpp_base->regamma_params, false); - lut_params = &dpp_base->regamma_params; - } - result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); - lut_params = NULL; + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + lut_params = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, + &dpp_base->regamma_params, false); + lut_params = rval ? &dpp_base->regamma_params : NULL; } + result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); + lut_params = NULL; // Shaper - if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) { - if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) - lut_params = &plane_state->in_shaper_func.pwl; - else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { - // TODO: dpp_base replace - ASSERT(false); - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - &plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - lut_params = &dpp_base->shaper_params; - } - - result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + lut_params = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + // TODO: dpp_base replace + rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + lut_params = rval ? &dpp_base->shaper_params : NULL; } + result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); // 3D - if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) { + if (mpc->funcs->program_3dlut) { if (plane_state->lut3d_func.state.bits.initialized == 1) - result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); + result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); else - result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); + result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); } return result; @@ -742,7 +737,9 @@ bool dcn401_set_output_transfer_func(struct dc *dc, } } - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + return ret; } @@ -871,6 +868,7 @@ enum dc_status dcn401_enable_stream_timing( pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width, + pipe_ctx->pipe_dlg_param.pstate_keepout, pipe_ctx->stream->signal, true); @@ -1115,10 +1113,10 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror, .stream = pipe_ctx->stream }; + struct rect odm_slice_src = { 0 }; bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || (pipe_ctx->prev_odm_pipe != NULL); int prev_odm_width = 0; - int prev_odm_offset = 0; struct pipe_ctx *prev_odm_pipe = NULL; bool mpc_combine_on = false; int bottom_pipe_x_pos = 0; @@ -1183,12 +1181,12 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) prev_odm_pipe = pipe_ctx->prev_odm_pipe; while (prev_odm_pipe != NULL) { - prev_odm_width += prev_odm_pipe->plane_res.scl_data.recout.width; - prev_odm_offset += prev_odm_pipe->plane_res.scl_data.recout.x; + odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe); + prev_odm_width += odm_slice_src.width; prev_odm_pipe = prev_odm_pipe->prev_odm_pipe; } - x_pos -= (prev_odm_width + prev_odm_offset); + x_pos -= (prev_odm_width); } /* If the position is negative then we need to add to the hotspot @@ -1311,8 +1309,10 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable) for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL && - dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + DC_LOG_MALL("MALL SS not supported with PSR at this time\n"); return false; + } } memset(&cmd, 0, sizeof(cmd)); @@ -1322,8 +1322,9 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable) if (enable) { if (dcn401_check_no_memory_request_for_cab(dc)) { /* 1. Check no memory request case for CAB. - * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message + * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message */ + DC_LOG_MALL("sending CAB action NO_DCN_REQ\n"); cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; } else { /* 2. Check if all surfaces can fit in CAB. @@ -1351,13 +1352,16 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable) if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; cmd.cab.cab_alloc_ways = ways; + DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways); } else { cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB; + DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways); } } } else { /* Disable CAB */ cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; + DC_LOG_MALL("idle optimization disabled\n"); } dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); @@ -1395,10 +1399,10 @@ void dcn401_prepare_bandwidth(struct dc *dc, { struct hubbub *hubbub = dc->res_pool->hubbub; bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - unsigned int compbuf_size_kb = 0; + unsigned int compbuf_size = 0; - /* Any transition into or out of a FAMS config should disable MCLK switching first to avoid hangs */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* Any transition into P-State support should disable MCLK switching first to avoid hangs */ + if (p_state_change_support) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -1425,10 +1429,10 @@ void dcn401_prepare_bandwidth(struct dc *dc, /* decrease compbuf size */ if (hubbub->funcs->program_compbuf_segments) { - compbuf_size_kb = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); + compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; + dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); - hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size_kb, false); + hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false); } if (dc->debug.fams2_config.bits.enable) { @@ -1437,7 +1441,7 @@ void dcn401_prepare_bandwidth(struct dc *dc, dcn401_fams2_global_control_lock(dc, context, false); } - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { /* After disabling P-State, restore the original value to ensure we get the correct P-State * on the next optimize. */ context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; @@ -1530,7 +1534,7 @@ void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool en if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) return; - fams2_required = context->bw_ctx.bw.dcn.fams2_stream_count > 0; + fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required); } @@ -1542,7 +1546,6 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context, struct pipe_ctx *old_pipe; struct pipe_ctx *new_pipe; struct pipe_ctx *old_opp_heads[MAX_PIPES]; - struct dccg *dccg = dc->res_pool->dccg; struct pipe_ctx *old_otg_master; int old_opp_head_count = 0; @@ -1568,12 +1571,9 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context, for (i = 0; i < old_opp_head_count; i++) { old_pipe = old_opp_heads[i]; new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; - if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) { - dccg->funcs->set_dto_dscclk(dccg, - old_pipe->stream_res.dsc->inst, false); + if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) old_pipe->stream_res.dsc->funcs->dsc_disconnect( old_pipe->stream_res.dsc); - } } } } @@ -1659,7 +1659,7 @@ void dcn401_hardware_release(struct dc *dc) */ if (dc->current_state) { if ((!dc->clk_mgr->clks.p_state_change_support || - dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0) && + dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) && dc->res_pool->hubbub->funcs->force_pstate_change_control) dc->res_pool->hubbub->funcs->force_pstate_change_control( dc->res_pool->hubbub, true, true); @@ -1669,3 +1669,104 @@ void dcn401_hardware_release(struct dc *dc) } } +void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + struct pipe_ctx *dpp_pipes[MAX_PIPES]; + struct hubbub *hubbub = dc->res_pool->hubbub; + int dpp_count = 0; + + if (!otg_master->stream) + return; + + int slice_count = resource_get_opp_heads_for_otg_master(otg_master, + &context->res_ctx, opp_heads); + + for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) { + if (opp_heads[slice_idx]->plane_state) { + dpp_count = resource_get_dpp_pipes_for_opp_head( + opp_heads[slice_idx], + &context->res_ctx, + dpp_pipes); + for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { + struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx]; + if (dpp_pipe && hubbub && + dpp_pipe->plane_res.hubp && + hubbub->funcs->wait_for_det_update) + hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); + } + } + } +} + +void dcn401_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i = 0; + struct pipe_ctx *pipe = NULL; + struct timing_generator *tg = NULL; + bool pipe_unlocked[MAX_PIPES] = {0}; + + if (lock) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + dc->hwss.pipe_control_lock(dc, pipe, true); + } + } else { + /* Unlock pipes based on the change in DET allocation instead of pipe index + * Prevents over allocation of DET during unlock process + * e.g. 2 pipe config with different streams with a max of 20 DET segments + * Before: After: + * - Pipe0: 10 DET segments - Pipe0: 12 DET segments + * - Pipe1: 10 DET segments - Pipe1: 8 DET segments + * If Pipe0 gets updated first, 22 DET segments will be allocated + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + int current_pipe_idx = i; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + pipe_unlocked[i] = true; + continue; + } + + // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared + struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream); + + if (old_otg_master) + current_pipe_idx = old_otg_master->pipe_idx; + if (resource_calculate_det_for_stream(context, pipe) < + resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) { + dc->hwss.pipe_control_lock(dc, pipe, false); + pipe_unlocked[i] = true; + dcn401_wait_for_det_buffer_update(dc, context, pipe); + } + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (pipe_unlocked[i]) + continue; + pipe = &context->res_ctx.pipe_ctx[i]; + dc->hwss.pipe_control_lock(dc, pipe, false); + } + } +} + +void dcn401_program_outstanding_updates(struct dc *dc, + struct dc_state *context) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + /* update compbuf if required */ + if (hubbub->funcs->program_compbuf_segments) + hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index 8e9c1c17aa6627..a27e62081685d2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -81,4 +81,7 @@ void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); +void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); +void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index 6a768702c7bdec..a2ca07235c83d9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -38,7 +38,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn32_interdependent_update_lock, + .interdependent_update_lock = dcn401_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn401_prepare_bandwidth, .optimize_bandwidth = dcn401_optimize_bandwidth, @@ -99,12 +99,11 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .fams2_global_control_lock = dcn401_fams2_global_control_lock, .fams2_update_config = dcn401_fams2_update_config, .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, - .power_down = dce110_power_down, + .program_outstanding_updates = dcn401_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn401_private_funcs = { .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, .update_mpcc = dcn20_update_mpcc, .set_input_transfer_func = dcn32_set_input_transfer_func, @@ -115,8 +114,6 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn32_init_blank, @@ -136,12 +133,11 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .dccg_init = dcn20_dccg_init, .set_mcm_luts = dcn401_set_mcm_luts, .program_mall_pipe_config = dcn32_program_mall_pipe_config, - .update_force_pstate = dcn32_update_force_pstate, .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, - .populate_mcm_luts = dcn401_populate_mcm_luts, + .populate_mcm_luts = NULL, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index d05be65a2256c9..ac920562562336 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -240,7 +240,6 @@ struct hw_sequencer_funcs { void (*program_triplebuffer)(const struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); void (*update_pending_status)(struct pipe_ctx *pipe_ctx); - void (*power_down)(struct dc *dc); void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable); /* Pipe Lock Related */ @@ -460,6 +459,9 @@ struct hw_sequencer_funcs { bool enable); void (*fams2_global_control_lock_fast)(union block_sequence_params *params); void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); + void (*program_outstanding_updates)(struct dc *dc, + struct dc_state *context); + void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); }; void color_space_to_black_color( @@ -520,6 +522,21 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_stream_status *stream_status, struct dc_state *context); +void hwss_wait_for_all_blank_complete(struct dc *dc, + struct dc_state *context); + +void hwss_wait_for_odm_update_pending_complete(struct dc *dc, + struct dc_state *context); + +void hwss_wait_for_no_pipes_pending(struct dc *dc, + struct dc_state *context); + +void hwss_wait_for_outstanding_hw_updates(struct dc *dc, + struct dc_state *dc_context); + +void hwss_process_outstanding_hw_updates(struct dc *dc, + struct dc_state *dc_context); + void hwss_send_dmcub_cmd(union block_sequence_params *params); void hwss_program_manual_trigger(union block_sequence_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 7ac3f2a0948701..0ac67545697923 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -76,8 +76,6 @@ struct hwseq_private_funcs { void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*init_pipes)(struct dc *dc, struct dc_state *context); void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context); - void (*update_plane_addr)(const struct dc *dc, - struct pipe_ctx *pipe_ctx); void (*plane_atomic_disconnect)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); @@ -170,7 +168,8 @@ struct hwseq_private_funcs { unsigned int *k1_div, unsigned int *k2_div); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, - struct dc_state *context); + struct dc_state *context, + unsigned int current_pipe_idx); enum dc_status (*apply_single_controller_ctx_to_hw)( struct pipe_ctx *pipe_ctx, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 4c8e6436c7e1c9..bfb8b8502d2026 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -534,8 +534,8 @@ struct dcn_bw_output { unsigned int legacy_svp_drr_stream_index; bool legacy_svp_drr_stream_index_valid; struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; + struct dmub_cmd_fams2_global_config fams2_global_config; struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES]; - unsigned fams2_stream_count; struct dml2_display_arb_regs arb_regs; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h index b6203253111cab..8c18efc2aa70ac 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h @@ -46,6 +46,8 @@ struct audio_funcs { const struct audio_info *audio_info, const struct audio_dp_link_info *dp_link_info); + void (*az_disable_hbr_audio)(struct audio *audio); + void (*wall_dto_setup)(struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index d5fefce3e74bf5..2d06067ff36def 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -29,9 +29,6 @@ #include "dc.h" #include "dm_pp_smu.h" -#define DCN_MINIMUM_DISPCLK_Khz 100000 -#define DCN_MINIMUM_DPPCLK_Khz 100000 - /* Constants */ #define DDR4_DRAM_WIDTH 64 #define WM_A 0 @@ -180,6 +177,7 @@ struct clk_state_registers_and_bypass { uint32_t dispclk; uint32_t dppclk; uint32_t dtbclk; + uint32_t fclk; uint32_t dppclk_bypass; uint32_t dcfclk_bypass; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 12282f96dfe13c..c2dd061892f4d9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -191,7 +191,8 @@ enum dentist_divider_range { CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \ CLK_SR_DCN401(CLK0_CLK2_DFS_CNTL, CLK01, 0), \ CLK_SR_DCN401(CLK0_CLK3_DFS_CNTL, CLK01, 0), \ - CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0) + CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0), \ + CLK_SR_DCN401(CLK2_CLK2_DFS_CNTL, CLK20, 0) #define CLK_COMMON_MASK_SH_LIST_DCN401(mask_sh) \ CLK_COMMON_MASK_SH_LIST_DCN321(mask_sh) @@ -235,6 +236,7 @@ struct clk_mgr_registers { uint32_t CLK1_CLK2_DFS_CNTL; uint32_t CLK1_CLK3_DFS_CNTL; uint32_t CLK1_CLK4_DFS_CNTL; + uint32_t CLK2_CLK2_DFS_CNTL; uint32_t CLK1_CLK0_CURRENT_CNT; uint32_t CLK1_CLK1_CURRENT_CNT; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 4fb1aacee894be..e94e9ba60f55a6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -211,11 +211,9 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); - void (*set_dto_dscclk)( - struct dccg *dccg, - uint32_t dsc_inst, - bool enable); + void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); + void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index dd2b2864876c79..67c32401893e86 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -227,6 +227,7 @@ struct hubbub_funcs { void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use); void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); + void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 27bba47186e921..41c76ba9ba569d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -217,12 +217,13 @@ enum optc_dsc_mode { }; struct dc_bias_and_scale { - uint16_t scale_red; - uint16_t bias_red; - uint16_t scale_green; - uint16_t bias_green; - uint16_t scale_blue; - uint16_t bias_blue; + uint32_t scale_red; + uint32_t bias_red; + uint32_t scale_green; + uint32_t bias_green; + uint32_t scale_blue; + uint32_t bias_blue; + bool bias_and_scale_valid; }; enum test_pattern_dyn_range { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 5f6c7daa14d9f9..a8b44f398ce688 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -63,7 +63,7 @@ union dcn_watermark_set { struct dml2_dchub_watermark_regs b; struct dml2_dchub_watermark_regs c; struct dml2_dchub_watermark_regs d; - } dcn4; //dcn4+ + } dcn4x; //dcn4+ }; struct dce_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h index 287bf8a90ff661..03cbcbb36f1c16 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -65,6 +65,7 @@ struct optc { int vupdate_offset; int vupdate_width; int vready_offset; + int pstate_keepout; struct dc_crtc_timing orginal_patched_timing; enum signal_type signal; }; @@ -110,6 +111,7 @@ void optc1_program_timing(struct timing_generator *optc, int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios); @@ -127,7 +129,8 @@ void optc1_program_global_sync(struct timing_generator *optc, int vready_offset, int vstartup_start, int vupdate_offset, - int vupdate_width); + int vupdate_width, + int pstate_keepout); bool optc1_disable_crtc(struct timing_generator *optc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index e5e11c84e9e281..fe7f3137f2285e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -271,7 +271,9 @@ struct stream_encoder_funcs { struct stream_encoder *enc, unsigned int pix_per_container); void (*enable_fifo)(struct stream_encoder *enc); void (*disable_fifo)(struct stream_encoder *enc); + bool (*is_fifo_enabled)(struct stream_encoder *enc); void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst); + uint32_t (*get_pixels_per_cycle)(struct stream_encoder *enc); }; struct hpo_dp_stream_encoder_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 0f453452234cef..3d4c8bd42b4920 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -172,6 +172,7 @@ struct timing_generator_funcs { int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios ); @@ -256,7 +257,8 @@ struct timing_generator_funcs { int vready_offset, int vstartup_start, int vupdate_offset, - int vupdate_width); + int vupdate_width, + int pstate_keepout); void (*enable_optc_clock)(struct timing_generator *tg, bool enable); void (*program_stereo)(struct timing_generator *tg, const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h index 28da1dddf0a017..45262cba675e51 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h @@ -245,16 +245,6 @@ struct transform_funcs { void (*set_cursor_attributes)( struct transform *xfm_base, const struct dc_cursor_attributes *attr); - - bool (*transform_program_blnd_lut)( - struct transform *xfm, - const struct pwl_params *params); - bool (*transform_program_shaper_lut)( - struct transform *xfm, - const struct pwl_params *params); - bool (*transform_program_3dlut)( - struct transform *xfm, - struct tetrahedral_params *params); }; const uint16_t *get_filter_2tap_16p(void); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 96d40d33a1f99e..cd1157d225abe7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -639,4 +639,11 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx); * @dml2_options: struct to hold callbacks */ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options); + +/* + *Calculate total DET allocated for all pipes for a given OTG_MASTER pipe + */ +int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master); + +bool resource_is_hpo_acquired(struct dc_state *context); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 555c1c484cfddc..ff8fe1a94965b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -67,6 +67,8 @@ static void dp_retrain_link_dp_test(struct dc_link *link, { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; + bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state); + bool is_hpo_acquired; uint8_t count; int i; @@ -83,6 +85,12 @@ static void dp_retrain_link_dp_test(struct dc_link *link, pipes[i]); } + if (link->dc->hwss.setup_hpo_hw_control) { + is_hpo_acquired = resource_is_hpo_acquired(state); + if (was_hpo_acquired != is_hpo_acquired) + link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + } + for (i = count-1; i >= 0; i--) link_set_dpms_on(state, pipes[i]); } @@ -804,8 +812,11 @@ bool dp_set_test_pattern( break; } + if (!pipe_ctx->stream) + return false; + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_lock(pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index b76737b7b9e41b..3e47a6735912a5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -74,7 +74,10 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; - if (stream_enc && stream_enc->funcs->disable_fifo) + if (!stream_enc) + return; + + if (stream_enc->funcs->disable_fifo) stream_enc->funcs->disable_fifo(stream_enc); if (stream_enc->funcs->set_input_mode) stream_enc->funcs->set_input_mode(stream_enc, 0); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c index 46fb3649bc86a7..6499807af72a19 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c @@ -50,8 +50,31 @@ static void update_dpia_stream_allocation_table(struct dc_link *link, DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", status, mst_alloc_slots, prev_mst_slots_in_use); - ASSERT(link_enc); - link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); + if (link_enc) + link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); +} + +static void set_dio_dpia_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE) + return; + + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (!link_enc) + return; + + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_dio_dpia_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ } static const struct link_hwss dpia_link_hwss = { @@ -65,8 +88,8 @@ static const struct link_hwss dpia_link_hwss = { .ext = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, .enable_dp_link_output = enable_dio_dp_link_output, - .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, - .set_dp_lane_settings = set_dio_dp_lane_settings, + .set_dp_link_test_pattern = set_dio_dpia_link_test_pattern, + .set_dp_lane_settings = set_dio_dpia_lane_settings, .update_stream_allocation_table = update_dpia_stream_allocation_table, }, }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index e1257404357b11..cec68c5dba1322 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,6 +28,8 @@ #include "dccg.h" #include "clk_mgr.h" +#define DC_LOGGER link->ctx->logger + void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { + if (!link_res->hpo_dp_link_enc) { + DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); + return; + } + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( link->dc->res_pool->dccg, @@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { + if (!link_res->hpo_dp_link_enc) { + DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); + return; + } + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index bba644024780ad..d21ee9d12d269b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -863,7 +863,6 @@ static bool detect_link_and_local_sink(struct dc_link *link, struct dc_sink *prev_sink = NULL; struct dpcd_caps prev_dpcd_caps; enum dc_connection_type new_connection_type = dc_connection_none; - enum dc_connection_type pre_connection_type = link->type; const uint32_t post_oui_delay = 30; // 30ms DC_LOGGER_INIT(link->ctx->logger); @@ -965,7 +964,6 @@ static bool detect_link_and_local_sink(struct dc_link *link, } if (!detect_dp(link, &sink_caps, reason)) { - link->type = pre_connection_type; if (prev_sink) dc_sink_release(prev_sink); @@ -1191,8 +1189,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, //sink only can use supported link rate table, we are foreced to enable it if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) link->panel_config.ilr.optimize_edp_link_rate = true; - if (edp_is_ilr_optimization_enabled(link)) - link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link); + link->reported_link_cap.link_rate = get_max_edp_link_rate(link); } } else { @@ -1299,8 +1296,7 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason) link->dpcd_caps.is_mst_capable) is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); - if (is_local_sink_detect_success && - pre_link_type == dc_connection_mst_branch && + if (pre_link_type == dc_connection_mst_branch && link->type != dc_connection_mst_branch) is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 65607589495f44..c4e03482ba9ae4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -817,17 +817,17 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, dsc->inst); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); - if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); - if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true); } dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; dsc_cfg.pic_width *= opp_cnt; @@ -879,19 +879,32 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* disable DSC block */ - if (dccg->funcs->set_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false); - pipe_ctx->stream_res.dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc); - if (dccg->funcs->set_ref_dscclk) - dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst); - pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - if (dccg->funcs->set_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false); + for (odm_pipe = pipe_ctx; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc); + /* + * TODO - dsc_disconnect is a double buffered register. + * by the time we call dsc_disable, dsc may still remain + * connected to OPP. In this case OPTC will no longer + * get correct pixel data because DSCC is off. However + * we also can't wait for the disconnect pending + * complete, because this function can be called + * with/without OTG master lock acquired. When the lock + * is acquired we will never get pending complete until + * we release the lock later. So there is no easy way to + * solve this problem especially when the lock is + * acquired. DSC is a front end hw block it should be + * programmed as part of front end sequence, where the + * commit sequence without lock and update sequence + * with lock are completely separated. However because + * we are programming dsc as part of back end link + * programming sequence, we don't know if front end OPTC + * master lock is acquired. The back end should be + * agnostic to front end lock. DSC programming shouldn't + * belong to this sequence. + */ + odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); if (dccg->funcs->set_ref_dscclk) dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst); - odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); } } } @@ -2345,7 +2358,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) && dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, false); @@ -2578,7 +2591,7 @@ void link_set_dpms_on( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) allocate_mst_payload(pipe_ctx); - else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) && dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 8246006857b30b..5e1b5ab9fbc635 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -385,7 +385,7 @@ static void link_destruct(struct dc_link *link) if (link->panel_cntl) link->panel_cntl->funcs->destroy(&link->panel_cntl); - if (link->link_enc) { + if (link->link_enc && !link->is_dig_mapping_flexible) { /* Update link encoder resource tracking variables. These are used for * the dynamic assignment of link encoders to streams. Virtual links * are not assigned encoder resources on creation. @@ -524,6 +524,7 @@ static bool construct_phy(struct dc_link *link, link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_MXM: case CONNECTOR_ID_USBC: link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 1aed55b0ab6a09..60f15a9ba7a5e1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -287,6 +287,13 @@ static bool dp_validate_mode_timing( req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); + bool is_max_uncompressed_pixel_rate_exceeded = link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.valid && + timing->pix_clk_100hz > link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.max_uncompressed_pixel_rate_cap * 10000; + + if (is_max_uncompressed_pixel_rate_exceeded && !timing->flags.DSC) { + return false; + } + if (req_bw <= max_bw) { /* remember the biggest mode here, during * initial link training (to get diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 46bb7a855bc218..d78c8ec4de79e7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -212,6 +212,13 @@ static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in case 10000000: link_rate = LINK_RATE_UHBR10; // UHBR10 - 10.0 Gbps/Lane break; + case 13500000: + link_rate = LINK_RATE_UHBR13_5; // UHBR13.5 - 13.5 Gbps/Lane + break; + case 20000000: + link_rate = LINK_RATE_UHBR20; // UHBR20 - 20.0 Gbps/Lane + break; + default: link_rate = LINK_RATE_UNKNOWN; break; @@ -541,6 +548,23 @@ static enum dc_link_rate increase_link_rate(struct dc_link *link, } } +static void increase_edp_link_rate(struct dc_link *link, + struct dc_link_settings *current_link_setting) +{ + if (current_link_setting->use_link_rate_set) { + if (current_link_setting->link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting->link_rate_set++; + current_link_setting->link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting->link_rate_set]; + } else { + current_link_setting->use_link_rate_set = false; + current_link_setting->link_rate = LINK_RATE_UHBR10; + } + } else { + current_link_setting->link_rate = increase_link_rate(link, current_link_setting->link_rate); + } +} + static bool decide_fallback_link_setting_max_bw_policy( struct dc_link *link, const struct dc_link_settings *max, @@ -759,14 +783,7 @@ bool edp_decide_link_settings(struct dc_link *link, increase_lane_count( current_link_setting.lane_count); } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else - break; + increase_edp_link_rate(link, ¤t_link_setting); } } return false; @@ -818,9 +835,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link, if (policy) { /* minimize lane */ if (current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); + increase_edp_link_rate(link, ¤t_link_setting); } else { if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { @@ -839,9 +854,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link, increase_lane_count( current_link_setting.lane_count); } else { - current_link_setting.link_rate = - increase_link_rate(link, - current_link_setting.link_rate); + increase_edp_link_rate(link, ¤t_link_setting); current_link_setting.lane_count = initial_link_setting.lane_count; } @@ -874,18 +887,15 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link, } if (policy) { /* minimize lane */ - if (current_link_setting.link_rate_set < - link->dpcd_caps.edp_supported_link_rates_count - && current_link_setting.link_rate < max_link_rate) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + if (current_link_setting.link_rate < max_link_rate) { + increase_edp_link_rate(link, ¤t_link_setting); } else { if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { current_link_setting.lane_count = increase_lane_count( current_link_setting.lane_count); current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.use_link_rate_set = initial_link_setting.use_link_rate_set; current_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; } else @@ -899,13 +909,8 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link, increase_lane_count( current_link_setting.lane_count); } else { - if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - current_link_setting.link_rate_set++; - current_link_setting.link_rate = - link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; - current_link_setting.lane_count = - initial_link_setting.lane_count; - } else + increase_edp_link_rate(link, ¤t_link_setting); + if (current_link_setting.link_rate == LINK_RATE_UNKNOWN) break; } } @@ -1166,6 +1171,8 @@ static void get_active_converter_info( link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, hdmi_encoded_link_bw); + DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__, + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps); } if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) @@ -1541,7 +1548,11 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) * Override count to 1 if we receive a known bad count (0 or an invalid value) */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { - ASSERT(0); + /* If you see this message consistently, either the host platform has FIXED_VS flag + * incorrectly configured or the sink device is returning an invalid count. + */ + DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.", + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); } @@ -1931,6 +1942,11 @@ static bool retrieve_link_cap(struct dc_link *link) DC_LOG_DP2("\tFEC aggregated error counters are supported"); } + core_link_read_dpcd(link, + DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP, + link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw, + sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); + retrieve_cable_id(link); dpcd_write_cable_id_to_dprx(link); @@ -2254,7 +2270,7 @@ bool dp_verify_link_cap_with_retries( memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); - if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { + if (link->link_enc && (!link_detect_connection_type(link, &type) || type == dc_connection_none)) { link->verified_link_cap = fail_safe_link_settings; break; } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 988999c4447540..27b881f947e8b8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -515,6 +515,41 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status) return align_status.bits.INTERLANE_ALIGN_DONE == 1; } +bool dp_check_interlane_aligned(union lane_align_status_updated align_status, + struct dc_link *link, + uint8_t retries) +{ + /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 + * has to share encoders unlike DP and USBC + */ + return (dp_is_interlane_aligned(align_status) || + (link->skip_fallback_on_link_loss && retries)); +} + +uint32_t dp_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t offset, + uint8_t retries) +{ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (offset == 0 && retries == 1 && lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + return max(lt_settings->eq_pattern_time, (uint32_t) DPIA_CLK_SYNC_DELAY); + else + return dpia_get_eq_aux_rd_interval(link, lt_settings, offset); + } else if (is_repeater(lt_settings, offset)) + return dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + else + return lt_settings->eq_pattern_time; +} + +bool dp_check_dpcd_reqeust_status(const struct dc_link *link, + enum dc_status status) +{ + return (status != DC_OK && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA); +} + enum link_training_result dp_check_link_loss_status( struct dc_link *link, const struct link_training_settings *link_training_setting) @@ -973,13 +1008,17 @@ void repeater_training_done(struct dc_link *link, uint32_t offset) dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } -static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) { + enum dc_status status; uint8_t sink_status = 0; uint8_t i; /* clear training pattern set */ - dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; if (encoding == DP_128b_132b_ENCODING) { /* poll for intra-hop disable */ @@ -990,6 +1029,8 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding fsleep(1000); } } + + return LINK_TRAINING_SUCCESS; } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -1013,17 +1054,18 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } -void dpcd_set_training_pattern( +enum dc_status dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern) { + enum dc_status status; union dpcd_training_pattern dpcd_pattern = {0}; dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dp_training_pattern_to_dpcd_training_pattern( link, training_pattern); - core_link_write_dpcd( + status = core_link_write_dpcd( link, DP_TRAINING_PATTERN_SET, &dpcd_pattern.raw, @@ -1033,6 +1075,8 @@ void dpcd_set_training_pattern( __func__, DP_TRAINING_PATTERN_SET, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + + return status; } enum dc_status dpcd_set_link_settings( @@ -1185,6 +1229,13 @@ void dpcd_set_lt_pattern_and_lane_settings( dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + dpia_set_tps_notification( + link, + lt_settings, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET, + offset); + if (is_repeater(lt_settings, offset)) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", __func__, @@ -1455,7 +1506,8 @@ static enum link_training_result dp_transition_to_video_idle( */ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { msleep(5); - status = dp_check_link_loss_status(link, lt_settings); + if (!link->skip_fallback_on_link_loss) + status = dp_check_link_loss_status(link, lt_settings); } return status; } @@ -1521,7 +1573,9 @@ enum link_training_result dp_perform_link_training( ASSERT(0); /* exit training mode */ - dpcd_exit_training_mode(link, encoding); + if ((dpcd_exit_training_mode(link, encoding) != LINK_TRAINING_SUCCESS || status == LINK_TRAINING_ABORT) && + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + dpia_training_abort(link, <_settings, 0); /* switch to video idle */ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) @@ -1599,8 +1653,7 @@ bool perform_link_training_with_retries( dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); return true; } else { - /** @todo Consolidate USB4 DP and DPx.x training. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (!link->dc->config.consolidated_dpia_dp_lt && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { status = dpia_perform_link_training( link, &pipe_ctx->link_res, @@ -1629,8 +1682,17 @@ bool perform_link_training_with_retries( dp_trace_lt_total_count_increment(link, false); dp_trace_lt_result_update(link, status, false); dp_trace_set_lt_end_timestamp(link, false); - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { + // Update verified link settings to current one + // Because DPIA LT might fallback to lower link setting. + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); + } return true; + } } fail_count++; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h index 851bd17317a0c4..0b18aa35c33cb7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -55,7 +55,7 @@ void dp_set_hw_test_pattern( uint8_t *custom_pattern, uint32_t custom_pattern_size); -void dpcd_set_training_pattern( +enum dc_status dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern); @@ -182,4 +182,18 @@ uint32_t dp_translate_training_aux_read_interval( uint8_t dp_get_nibble_at_index(const uint8_t *buf, uint32_t index); + +bool dp_check_interlane_aligned(union lane_align_status_updated align_status, + struct dc_link *link, + uint8_t retries); + +uint32_t dp_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t offset, + uint8_t retries); + +bool dp_check_dpcd_reqeust_status(const struct dc_link *link, + enum dc_status status); + #endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 2b4c15b0b40708..3bdce32a85e3c7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -157,6 +157,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( struct link_training_settings *lt_settings, uint32_t offset) { + enum dc_status status; uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; @@ -216,7 +217,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( /* 4. Read lane status and requested drive * settings as set by the sink */ - dp_get_lane_status_and_lane_adjust( + status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, @@ -224,6 +225,9 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( dpcd_lane_adjust, offset); + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; + /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); @@ -273,6 +277,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( struct link_training_settings *lt_settings, uint32_t offset) { + enum dc_status status; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; @@ -308,12 +313,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - if (is_repeater(lt_settings, offset)) - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + wait_time_microsec = dp_get_eq_aux_rd_interval(link, lt_settings, offset, retries_ch_eq); dp_wait_for_training_aux_rd_interval( link, @@ -322,7 +322,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( /* 4. Read lane status and requested * drive settings as set by the sink*/ - dp_get_lane_status_and_lane_adjust( + status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, @@ -330,6 +330,9 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( dpcd_lane_adjust, offset); + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; + /* 5. check CR done*/ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) return dpcd_lane_status[0].bits.CR_DONE_0 ? @@ -339,7 +342,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( /* 6. check CHEQ done*/ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) + dp_check_interlane_aligned(dpcd_lane_status_updated, link, retries_ch_eq)) return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index cd1975c03f38d5..39e4b7dc9588f4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -43,9 +43,6 @@ #define DC_LOGGER \ link->ctx->logger -/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ -#define DPIA_CLK_SYNC_DELAY 16000 - /* Extend interval between training status checks for manual testing. */ #define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 @@ -566,28 +563,6 @@ static enum link_training_result dpia_training_cr_phase( return result; } -/* Return status read interval during equalization phase. */ -static uint32_t dpia_get_eq_aux_rd_interval( - const struct dc_link *link, - const struct link_training_settings *lt_settings, - uint32_t hop) -{ - uint32_t wait_time_microsec; - - if (hop == DPRX) - wait_time_microsec = lt_settings->eq_pattern_time; - else - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); - - /* Check debug option for extending aux read interval. */ - if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) - wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; - - return wait_time_microsec; -} - /* Execute equalization phase of link training for specified hop in display * path in non-transparent mode: * - driver issues both DPCD and SET_CONFIG transactions. @@ -936,6 +911,22 @@ static enum link_training_result dpia_training_end( return result; } +/* Return status read interval during equalization phase. */ +uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop) +{ + /* Check debug option for extending aux read interval. */ + if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) + return DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; + else if (hop == DPRX) + return lt_settings->eq_pattern_time; + else + return dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); +} + /* When aborting training of specified hop in display path, clean up by: * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET. * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. @@ -943,7 +934,7 @@ static enum link_training_result dpia_training_end( * @param link DPIA link being trained. * @param hop Hop in display path. DPRX = 0. */ -static void dpia_training_abort( +void dpia_training_abort( struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) @@ -968,7 +959,26 @@ static void dpia_training_abort( core_link_write_dpcd(link, dpcd_tps_offset, &data, 1); core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1); core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1); - core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); + + if (!link->dc->config.consolidated_dpia_dp_lt) + core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); +} + +void dpia_set_tps_notification( + struct dc_link *link, + const struct link_training_settings *lt_settings, + uint8_t pattern, + uint32_t hop) +{ + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + + if (lt_settings->lttpr_mode != LTTPR_MODE_NON_TRANSPARENT || pattern == DPCD_TRAINING_PATTERN_VIDEOIDLE) + return; + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + if (hop != repeater_cnt) + dc_process_dmub_dpia_set_tps_notification(link->ctx->dc, link->link_index, pattern); } enum link_training_result dpia_perform_link_training( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h index b39fb9faf1c2cd..9f4eceb494c2d2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h @@ -28,6 +28,9 @@ #define __DC_LINK_DP_TRAINING_DPIA_H__ #include "link_dp_training.h" +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + /* Train DP tunneling link for USB4 DPIA display endpoint. * DPIA equivalent of dc_link_dp_perfrorm_link_training. * Aborts link training upon detection of sink unplug. @@ -38,4 +41,20 @@ enum link_training_result dpia_perform_link_training( const struct dc_link_settings *link_setting, bool skip_video_pattern); +void dpia_training_abort( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop); + +uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop); + +void dpia_set_tps_notification( + struct dc_link *link, + const struct link_training_settings *lt_settings, + uint8_t pattern, + uint32_t offset); + #endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index bf820d2b4dc4ad..3aa05a2be6c09f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -305,16 +305,17 @@ bool edp_is_ilr_optimization_enabled(struct dc_link *link) return true; } -enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link) +enum dc_link_rate get_max_edp_link_rate(struct dc_link *link) { - enum dc_link_rate link_rate = link->reported_link_cap.link_rate; + enum dc_link_rate max_ilr_rate = LINK_RATE_UNKNOWN; + enum dc_link_rate max_non_ilr_rate = dp_get_max_link_cap(link).link_rate; for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) { - if (link_rate < link->dpcd_caps.edp_supported_link_rates[i]) - link_rate = link->dpcd_caps.edp_supported_link_rates[i]; + if (max_ilr_rate < link->dpcd_caps.edp_supported_link_rates[i]) + max_ilr_rate = link->dpcd_caps.edp_supported_link_rates[i]; } - return link_rate; + return (max_ilr_rate > max_non_ilr_rate ? max_ilr_rate : max_non_ilr_rate); } bool edp_is_ilr_optimization_required(struct dc_link *link, @@ -1167,6 +1168,9 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (link_res->hpo_dp_link_enc) { + if (link->wa_flags.disable_assr_for_uhbr) + return; + link_enc_index = link_res->hpo_dp_link_enc->inst; use_hpo_dp_link_enc = true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 8df8ac5bde5b16..30dc8c24c008c4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -69,7 +69,7 @@ bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); bool edp_is_ilr_optimization_enabled(struct dc_link *link); -enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link); +enum dc_link_rate get_max_edp_link_rate(struct dc_link *link); bool edp_backlight_enable_aux(struct dc_link *link, bool enable); void edp_add_delay_for_T9(struct dc_link *link); bool edp_receiver_ready_T9(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile index 505bc0517e082a..eab196c57c6cad 100644 --- a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile @@ -24,6 +24,15 @@ # ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN20 +############################################################################### +MMHUBBUB_DCN20 = dcn20_mmhubbub.o + +AMD_DAL_MMHUBBUB_DCN20 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn20/,$(MMHUBBUB_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN20) + ############################################################################### # DCN32 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c new file mode 100644 index 00000000000000..259a98e4ee2c28 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c @@ -0,0 +1,324 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "reg_helper.h" +#include "resource.h" +#include "mcif_wb.h" +#include "dcn20_mmhubbub.h" + + +#define REG(reg)\ + mcif_wb20->mcif_wb_regs->reg + +#define CTX \ + mcif_wb20->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + mcif_wb20->mcif_wb_shift->field_name, mcif_wb20->mcif_wb_mask->field_name + +#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 +#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 + +/* wbif programming guide: + * 1. set up wbif parameter: + * unsigned long long luma_address[4]; //4 frame buffer + * unsigned long long chroma_address[4]; + * unsigned int luma_pitch; + * unsigned int chroma_pitch; + * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10 + * unsigned int slice_lines; //slice size + * unsigned int time_per_pixel; // time per pixel, in ns + * unsigned int arbitration_slice; // 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes + * unsigned int max_scaled_time; // used for QOS generation + * unsigned int swlock=0x0; + * unsigned int cli_watermark[4]; //4 group urgent watermark + * unsigned int pstate_watermark[4]; //4 group pstate watermark + * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow + * unsigned int sw_slice_int_en; // slice end interrupt enable + * unsigned int sw_overrun_int_en; // overrun error interrupt enable + * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow + * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow + * + * 2. configure wbif register + * a. call mmhubbub_config_wbif() + * + * 3. Enable wbif + * call set_wbif_bufmgr_enable(); + * + * 4. wbif_dump_status(), option, for debug purpose + * the bufmgr status can show the progress of write back, can be used for debug purpose + */ + +static void mmhubbub2_config_mcif_buf(struct mcif_wb *mcif_wb, + struct mcif_buf_params *params, + unsigned int dest_height) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + /* sw lock buffer0~buffer3, default is 0 */ + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, params->swlock); + + /* buffer address for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0])); + REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0])); + /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, 0); + + /* buffer address for Chroma in planar mode (unused in packing mode) */ + REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0])); + REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0])); + /* right eye offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, 0); + + /* buffer address for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1])); + REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1])); + /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, 0); + + /* buffer address for Chroma in planar mode (unused in packing mode) */ + REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1])); + REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1])); + /* right eye offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, 0); + + /* buffer address for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2])); + REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2])); + /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, 0); + + /* buffer address for Chroma in planar mode (unused in packing mode) */ + REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2])); + REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2])); + /* right eye offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, 0); + + /* buffer address for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3])); + REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3])); + /* right eye sub-buffer address offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, 0); + + /* buffer address for Chroma in planar mode (unused in packing mode) */ + REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3])); + REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3])); + /* right eye offset for packing mode or Luma in planar mode */ + REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, 0); + + /* setup luma & chroma size + * should be enough to contain a whole frame Luma data, + * the programmed value is frame buffer size [27:8], 256-byte aligned + */ + REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height); + REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height); + + /* enable address fence */ + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1); + + /* setup pitch, the programmed value is [15:8], 256B align */ + REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8, + MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8); + + /* Set pitch for MC cache warm up mode */ + /* Pitch is 256 bytes aligned. The default pitch is 4K */ + /* default is 0x10 */ + REG_UPDATE(MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, params->warmup_pitch); +} + +static void mmhubbub2_config_mcif_arb(struct mcif_wb *mcif_wb, + struct mcif_arb_params *params) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + /* Programmed by the video driver based on the CRTC timing (for DWB) */ + REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel); + + /* Programming dwb watermark */ + /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */ + /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */ + REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x0); + /* urgent_watermarkA */ + REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]); + REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x1); + /* urgent_watermarkB */ + REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]); + REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x2); + /* urgent_watermarkC */ + REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]); + REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x3); + /* urgent_watermarkD */ + REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]); + + /* Programming nb pstate watermark */ + /* nbp_state_change_watermarkA */ + REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0); + REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, + NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]); + /* nbp_state_change_watermarkB */ + REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1); + REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, + NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]); + /* nbp_state_change_watermarkC */ + REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2); + REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, + NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]); + /* nbp_state_change_watermarkD */ + REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3); + REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, + NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]); + + /* max_scaled_time */ + REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time); + + /* slice_lines */ + REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1); + + /* Set arbitration unit for Luma/Chroma */ + /* arb_unit=2 should be chosen for more efficiency */ + /* Arbitration size, 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes */ + REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice); +} + +void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, + struct mcif_irq_params *params) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + /* Set interrupt mask */ + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, params->sw_int_en); + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, params->sw_slice_int_en); + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en); + + REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en); + if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN) + REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en); +} + +void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + /* Enable Mcifwb */ + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 1); +} + +void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + /* disable buffer manager */ + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 0); +} + +/* set which group of pstate watermark to use and set wbif watermark change request */ +/* +static void mmhubbub2_wbif_watermark_change_req(struct mcif_wb *mcif_wb, unsigned int wm_set) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + uint32_t change_req; + + REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, &change_req); + change_req = (change_req == 0) ? 1 : 0; + REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, wm_set); + REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, change_req); +} +*/ +/* Set watermark change interrupt disable bit */ +/* +static void mmhubbub2_set_wbif_watermark_change_int_disable(struct mcif_wb *mcif_wb, unsigned int ack_int_dis) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, ack_int_dis); +} +*/ +/* Read watermark change interrupt status */ +/* +unsigned int mmhubbub2_get_wbif_watermark_change_int_status(struct mcif_wb *mcif_wb) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + uint32_t irq_status; + + REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, &irq_status); + return irq_status; +} +*/ + +void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_params, + enum dwb_scaler_mode out_format, + unsigned int dest_width, + unsigned int dest_height, + struct mcif_wb_frame_dump_info *dump_info, + unsigned char *luma_buffer, + unsigned char *chroma_buffer, + unsigned char *dest_luma_buffer, + unsigned char *dest_chroma_buffer) +{ + struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb); + + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf); + + memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height); + memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2); + + REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0); + + dump_info->format = out_format; + dump_info->width = dest_width; + dump_info->height = dest_height; + dump_info->luma_pitch = mcif_params->luma_pitch; + dump_info->chroma_pitch = mcif_params->chroma_pitch; + dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch); +} + +static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = { + .enable_mcif = mmhubbub2_enable_mcif, + .disable_mcif = mmhubbub2_disable_mcif, + .config_mcif_buf = mmhubbub2_config_mcif_buf, + .config_mcif_arb = mmhubbub2_config_mcif_arb, + .config_mcif_irq = mmhubbub2_config_mcif_irq, + .dump_frame = mcifwb2_dump_frame, +}; + +void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, + struct dc_context *ctx, + const struct dcn20_mmhubbub_registers *mcif_wb_regs, + const struct dcn20_mmhubbub_shift *mcif_wb_shift, + const struct dcn20_mmhubbub_mask *mcif_wb_mask, + int inst) +{ + mcif_wb20->base.ctx = ctx; + + mcif_wb20->base.inst = inst; + mcif_wb20->base.funcs = &dcn20_mmhubbub_funcs; + + mcif_wb20->mcif_wb_regs = mcif_wb_regs; + mcif_wb20->mcif_wb_shift = mcif_wb_shift; + mcif_wb20->mcif_wb_mask = mcif_wb_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h new file mode 100644 index 00000000000000..5ab32aa51e1375 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h @@ -0,0 +1,517 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MCIF_WB_DCN20_H__ +#define __DC_MCIF_WB_DCN20_H__ + +#define TO_DCN20_MMHUBBUB(mcif_wb_base) \ + container_of(mcif_wb_base, struct dcn20_mmhubbub, base) + +#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \ + SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\ + SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\ + SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\ + SRI(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\ + SRI(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\ + SRI(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst),\ + SRI(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB, inst),\ + SRI(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\ + SRI(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MCIF_WB, inst),\ + SRI(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\ + SRI(MCIF_WB_WATERMARK, MCIF_WB, inst),\ + SRI(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\ + SRI(MCIF_WB_WARM_UP_CNTL, MCIF_WB, inst),\ + SRI(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\ + SRI(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\ + SRI(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\ + SRI(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\ + SRI(SMU_WM_CONTROL, WBIF, inst) + +#define MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_P_VMID, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB_BUFMGR_CUR_LINE_R, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FIELD, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_LONG_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SHORT_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FRAME_LENGTH_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_CUR_LINE_R, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FIELD, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_LONG_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SHORT_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FRAME_LENGTH_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_CUR_LINE_R, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FIELD, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_LONG_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SHORT_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FRAME_LENGTH_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_CUR_LINE_R, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FIELD, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_LONG_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SHORT_LINE_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FRAME_LENGTH_ERROR, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_CUR_LINE_R, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB_TEST_DEBUG_INDEX, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA, MCIF_WB_TEST_DEBUG_DATA, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\ + SF(MCIF_WB0_MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_SECURITY_LEVEL, MCIF_WB_SECURITY_LEVEL, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\ + SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\ + SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, mask_sh),\ + SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, mask_sh),\ + SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, mask_sh),\ + SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, mask_sh) + +#define MCIF_WB_REG_FIELD_LIST_DCN2_0(type) \ + type MCIF_WB_BUFMGR_ENABLE;\ + type MCIF_WB_BUFMGR_SW_INT_EN;\ + type MCIF_WB_BUFMGR_SW_INT_ACK;\ + type MCIF_WB_BUFMGR_SW_SLICE_INT_EN;\ + type MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN;\ + type MCIF_WB_BUFMGR_SW_LOCK;\ + type MCIF_WB_P_VMID;\ + type MCIF_WB_BUF_ADDR_FENCE_EN;\ + type MCIF_WB_BUFMGR_CUR_LINE_R;\ + type MCIF_WB_BUFMGR_VCE_INT_STATUS;\ + type MCIF_WB_BUFMGR_SW_INT_STATUS;\ + type MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS;\ + type MCIF_WB_BUFMGR_CUR_BUF;\ + type MCIF_WB_BUFMGR_BUFTAG;\ + type MCIF_WB_BUFMGR_CUR_LINE_L;\ + type MCIF_WB_BUFMGR_NEXT_BUF;\ + type MCIF_WB_BUF_LUMA_PITCH;\ + type MCIF_WB_BUF_CHROMA_PITCH;\ + type MCIF_WB_BUF_1_ACTIVE;\ + type MCIF_WB_BUF_1_SW_LOCKED;\ + type MCIF_WB_BUF_1_VCE_LOCKED;\ + type MCIF_WB_BUF_1_OVERFLOW;\ + type MCIF_WB_BUF_1_DISABLE;\ + type MCIF_WB_BUF_1_MODE;\ + type MCIF_WB_BUF_1_BUFTAG;\ + type MCIF_WB_BUF_1_NXT_BUF;\ + type MCIF_WB_BUF_1_FIELD;\ + type MCIF_WB_BUF_1_CUR_LINE_L;\ + type MCIF_WB_BUF_1_LONG_LINE_ERROR;\ + type MCIF_WB_BUF_1_SHORT_LINE_ERROR;\ + type MCIF_WB_BUF_1_FRAME_LENGTH_ERROR;\ + type MCIF_WB_BUF_1_CUR_LINE_R;\ + type MCIF_WB_BUF_1_NEW_CONTENT;\ + type MCIF_WB_BUF_1_COLOR_DEPTH;\ + type MCIF_WB_BUF_1_TMZ_BLACK_PIXEL;\ + type MCIF_WB_BUF_1_TMZ;\ + type MCIF_WB_BUF_1_Y_OVERRUN;\ + type MCIF_WB_BUF_1_C_OVERRUN;\ + type MCIF_WB_BUF_2_ACTIVE;\ + type MCIF_WB_BUF_2_SW_LOCKED;\ + type MCIF_WB_BUF_2_VCE_LOCKED;\ + type MCIF_WB_BUF_2_OVERFLOW;\ + type MCIF_WB_BUF_2_DISABLE;\ + type MCIF_WB_BUF_2_MODE;\ + type MCIF_WB_BUF_2_BUFTAG;\ + type MCIF_WB_BUF_2_NXT_BUF;\ + type MCIF_WB_BUF_2_FIELD;\ + type MCIF_WB_BUF_2_CUR_LINE_L;\ + type MCIF_WB_BUF_2_LONG_LINE_ERROR;\ + type MCIF_WB_BUF_2_SHORT_LINE_ERROR;\ + type MCIF_WB_BUF_2_FRAME_LENGTH_ERROR;\ + type MCIF_WB_BUF_2_CUR_LINE_R;\ + type MCIF_WB_BUF_2_NEW_CONTENT;\ + type MCIF_WB_BUF_2_COLOR_DEPTH;\ + type MCIF_WB_BUF_2_TMZ_BLACK_PIXEL;\ + type MCIF_WB_BUF_2_TMZ;\ + type MCIF_WB_BUF_2_Y_OVERRUN;\ + type MCIF_WB_BUF_2_C_OVERRUN;\ + type MCIF_WB_BUF_3_ACTIVE;\ + type MCIF_WB_BUF_3_SW_LOCKED;\ + type MCIF_WB_BUF_3_VCE_LOCKED;\ + type MCIF_WB_BUF_3_OVERFLOW;\ + type MCIF_WB_BUF_3_DISABLE;\ + type MCIF_WB_BUF_3_MODE;\ + type MCIF_WB_BUF_3_BUFTAG;\ + type MCIF_WB_BUF_3_NXT_BUF;\ + type MCIF_WB_BUF_3_FIELD;\ + type MCIF_WB_BUF_3_CUR_LINE_L;\ + type MCIF_WB_BUF_3_LONG_LINE_ERROR;\ + type MCIF_WB_BUF_3_SHORT_LINE_ERROR;\ + type MCIF_WB_BUF_3_FRAME_LENGTH_ERROR;\ + type MCIF_WB_BUF_3_CUR_LINE_R;\ + type MCIF_WB_BUF_3_NEW_CONTENT;\ + type MCIF_WB_BUF_3_COLOR_DEPTH;\ + type MCIF_WB_BUF_3_TMZ_BLACK_PIXEL;\ + type MCIF_WB_BUF_3_TMZ;\ + type MCIF_WB_BUF_3_Y_OVERRUN;\ + type MCIF_WB_BUF_3_C_OVERRUN;\ + type MCIF_WB_BUF_4_ACTIVE;\ + type MCIF_WB_BUF_4_SW_LOCKED;\ + type MCIF_WB_BUF_4_VCE_LOCKED;\ + type MCIF_WB_BUF_4_OVERFLOW;\ + type MCIF_WB_BUF_4_DISABLE;\ + type MCIF_WB_BUF_4_MODE;\ + type MCIF_WB_BUF_4_BUFTAG;\ + type MCIF_WB_BUF_4_NXT_BUF;\ + type MCIF_WB_BUF_4_FIELD;\ + type MCIF_WB_BUF_4_CUR_LINE_L;\ + type MCIF_WB_BUF_4_LONG_LINE_ERROR;\ + type MCIF_WB_BUF_4_SHORT_LINE_ERROR;\ + type MCIF_WB_BUF_4_FRAME_LENGTH_ERROR;\ + type MCIF_WB_BUF_4_CUR_LINE_R;\ + type MCIF_WB_BUF_4_NEW_CONTENT;\ + type MCIF_WB_BUF_4_COLOR_DEPTH;\ + type MCIF_WB_BUF_4_TMZ_BLACK_PIXEL;\ + type MCIF_WB_BUF_4_TMZ;\ + type MCIF_WB_BUF_4_Y_OVERRUN;\ + type MCIF_WB_BUF_4_C_OVERRUN;\ + type MCIF_WB_CLIENT_ARBITRATION_SLICE;\ + type MCIF_WB_TIME_PER_PIXEL;\ + type WM_CHANGE_ACK_FORCE_ON;\ + type MCIF_WB_CLI_WATERMARK_MASK;\ + type MCIF_WB_TEST_DEBUG_INDEX;\ + type MCIF_WB_TEST_DEBUG_DATA;\ + type MCIF_WB_BUF_1_ADDR_Y;\ + type MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ + type MCIF_WB_BUF_1_ADDR_C;\ + type MCIF_WB_BUF_1_ADDR_C_OFFSET;\ + type MCIF_WB_BUF_2_ADDR_Y;\ + type MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ + type MCIF_WB_BUF_2_ADDR_C;\ + type MCIF_WB_BUF_2_ADDR_C_OFFSET;\ + type MCIF_WB_BUF_3_ADDR_Y;\ + type MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ + type MCIF_WB_BUF_3_ADDR_C;\ + type MCIF_WB_BUF_3_ADDR_C_OFFSET;\ + type MCIF_WB_BUF_4_ADDR_Y;\ + type MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ + type MCIF_WB_BUF_4_ADDR_C;\ + type MCIF_WB_BUF_4_ADDR_C_OFFSET;\ + type MCIF_WB_BUFMGR_VCE_LOCK_IGNORE;\ + type MCIF_WB_BUFMGR_VCE_INT_EN;\ + type MCIF_WB_BUFMGR_VCE_INT_ACK;\ + type MCIF_WB_BUFMGR_VCE_SLICE_INT_EN;\ + type MCIF_WB_BUFMGR_VCE_LOCK;\ + type MCIF_WB_BUFMGR_SLICE_SIZE;\ + type NB_PSTATE_CHANGE_REFRESH_WATERMARK;\ + type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST;\ + type NB_PSTATE_CHANGE_FORCE_ON;\ + type NB_PSTATE_ALLOW_FOR_URGENT;\ + type NB_PSTATE_CHANGE_WATERMARK_MASK;\ + type MCIF_WB_CLI_WATERMARK;\ + type MCIF_WB_CLI_CLOCK_GATER_OVERRIDE;\ + type MCIF_WB_PITCH_SIZE_WARMUP;\ + type DIS_REFRESH_UNDER_NBPREQ;\ + type PERFRAME_SELF_REFRESH;\ + type MAX_SCALED_TIME_TO_URGENT;\ + type MCIF_WB_SECURITY_LEVEL;\ + type MCIF_WB_BUF_LUMA_SIZE;\ + type MCIF_WB_BUF_CHROMA_SIZE;\ + type MCIF_WB_BUF_1_ADDR_Y_HIGH;\ + type MCIF_WB_BUF_1_ADDR_C_HIGH;\ + type MCIF_WB_BUF_2_ADDR_Y_HIGH;\ + type MCIF_WB_BUF_2_ADDR_C_HIGH;\ + type MCIF_WB_BUF_3_ADDR_Y_HIGH;\ + type MCIF_WB_BUF_3_ADDR_C_HIGH;\ + type MCIF_WB_BUF_4_ADDR_Y_HIGH;\ + type MCIF_WB_BUF_4_ADDR_C_HIGH;\ + type MCIF_WB_BUF_1_RESOLUTION_WIDTH;\ + type MCIF_WB_BUF_1_RESOLUTION_HEIGHT;\ + type MCIF_WB_BUF_2_RESOLUTION_WIDTH;\ + type MCIF_WB_BUF_2_RESOLUTION_HEIGHT;\ + type MCIF_WB_BUF_3_RESOLUTION_WIDTH;\ + type MCIF_WB_BUF_3_RESOLUTION_HEIGHT;\ + type MCIF_WB_BUF_4_RESOLUTION_WIDTH;\ + type MCIF_WB_BUF_4_RESOLUTION_HEIGHT;\ + type MCIF_WB0_WM_CHG_SEL;\ + type MCIF_WB0_WM_CHG_REQ;\ + type MCIF_WB0_WM_CHG_ACK_INT_DIS;\ + type MCIF_WB0_WM_CHG_ACK_INT_STATUS + +#define MCIF_WB_REG_VARIABLE_LIST_DCN2_0 \ + uint32_t MCIF_WB_BUFMGR_SW_CONTROL;\ + uint32_t MCIF_WB_BUFMGR_CUR_LINE_R;\ + uint32_t MCIF_WB_BUFMGR_STATUS;\ + uint32_t MCIF_WB_BUF_PITCH;\ + uint32_t MCIF_WB_BUF_1_STATUS;\ + uint32_t MCIF_WB_BUF_1_STATUS2;\ + uint32_t MCIF_WB_BUF_2_STATUS;\ + uint32_t MCIF_WB_BUF_2_STATUS2;\ + uint32_t MCIF_WB_BUF_3_STATUS;\ + uint32_t MCIF_WB_BUF_3_STATUS2;\ + uint32_t MCIF_WB_BUF_4_STATUS;\ + uint32_t MCIF_WB_BUF_4_STATUS2;\ + uint32_t MCIF_WB_ARBITRATION_CONTROL;\ + uint32_t MCIF_WB_SCLK_CHANGE;\ + uint32_t MCIF_WB_TEST_DEBUG_INDEX;\ + uint32_t MCIF_WB_TEST_DEBUG_DATA;\ + uint32_t MCIF_WB_BUF_1_ADDR_Y;\ + uint32_t MCIF_WB_BUF_1_ADDR_Y_OFFSET;\ + uint32_t MCIF_WB_BUF_1_ADDR_C;\ + uint32_t MCIF_WB_BUF_1_ADDR_C_OFFSET;\ + uint32_t MCIF_WB_BUF_2_ADDR_Y;\ + uint32_t MCIF_WB_BUF_2_ADDR_Y_OFFSET;\ + uint32_t MCIF_WB_BUF_2_ADDR_C;\ + uint32_t MCIF_WB_BUF_2_ADDR_C_OFFSET;\ + uint32_t MCIF_WB_BUF_3_ADDR_Y;\ + uint32_t MCIF_WB_BUF_3_ADDR_Y_OFFSET;\ + uint32_t MCIF_WB_BUF_3_ADDR_C;\ + uint32_t MCIF_WB_BUF_3_ADDR_C_OFFSET;\ + uint32_t MCIF_WB_BUF_4_ADDR_Y;\ + uint32_t MCIF_WB_BUF_4_ADDR_Y_OFFSET;\ + uint32_t MCIF_WB_BUF_4_ADDR_C;\ + uint32_t MCIF_WB_BUF_4_ADDR_C_OFFSET;\ + uint32_t MCIF_WB_BUFMGR_VCE_CONTROL;\ + uint32_t MCIF_WB_NB_PSTATE_LATENCY_WATERMARK;\ + uint32_t MCIF_WB_NB_PSTATE_CONTROL;\ + uint32_t MCIF_WB_WATERMARK;\ + uint32_t MCIF_WB_CLOCK_GATER_CONTROL;\ + uint32_t MCIF_WB_WARM_UP_CNTL;\ + uint32_t MCIF_WB_SELF_REFRESH_CONTROL;\ + uint32_t MULTI_LEVEL_QOS_CTRL;\ + uint32_t MCIF_WB_SECURITY_LEVEL;\ + uint32_t MCIF_WB_BUF_LUMA_SIZE;\ + uint32_t MCIF_WB_BUF_CHROMA_SIZE;\ + uint32_t MCIF_WB_BUF_1_ADDR_Y_HIGH;\ + uint32_t MCIF_WB_BUF_1_ADDR_C_HIGH;\ + uint32_t MCIF_WB_BUF_2_ADDR_Y_HIGH;\ + uint32_t MCIF_WB_BUF_2_ADDR_C_HIGH;\ + uint32_t MCIF_WB_BUF_3_ADDR_Y_HIGH;\ + uint32_t MCIF_WB_BUF_3_ADDR_C_HIGH;\ + uint32_t MCIF_WB_BUF_4_ADDR_Y_HIGH;\ + uint32_t MCIF_WB_BUF_4_ADDR_C_HIGH;\ + uint32_t MCIF_WB_BUF_1_RESOLUTION;\ + uint32_t MCIF_WB_BUF_2_RESOLUTION;\ + uint32_t MCIF_WB_BUF_3_RESOLUTION;\ + uint32_t MCIF_WB_BUF_4_RESOLUTION;\ + uint32_t SMU_WM_CONTROL + +struct dcn20_mmhubbub_registers { + MCIF_WB_REG_VARIABLE_LIST_DCN2_0; +}; + + +struct dcn20_mmhubbub_mask { + MCIF_WB_REG_FIELD_LIST_DCN2_0(uint32_t); +}; + +struct dcn20_mmhubbub_shift { + MCIF_WB_REG_FIELD_LIST_DCN2_0(uint8_t); +}; + +struct dcn20_mmhubbub { + struct mcif_wb base; + const struct dcn20_mmhubbub_registers *mcif_wb_regs; + const struct dcn20_mmhubbub_shift *mcif_wb_shift; + const struct dcn20_mmhubbub_mask *mcif_wb_mask; +}; + +void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb, + struct mcif_irq_params *params); + +void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb); + +void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb); + +void mcifwb2_dump_frame(struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_params, + enum dwb_scaler_mode out_format, + unsigned int dest_width, + unsigned int dest_height, + struct mcif_wb_frame_dump_info *dump_info, + unsigned char *luma_buffer, + unsigned char *chroma_buffer, + unsigned char *dest_luma_buffer, + unsigned char *dest_chroma_buffer); + +void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20, + struct dc_context *ctx, + const struct dcn20_mmhubbub_registers *mcif_wb_regs, + const struct dcn20_mmhubbub_shift *mcif_wb_shift, + const struct dcn20_mmhubbub_mask *mcif_wb_mask, + int inst); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile index 7f7458c07e2a5b..5402c3529f5eee 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile @@ -24,6 +24,33 @@ # ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN10 +############################################################################### +MPC_DCN10 = dcn10_mpc.o + +AMD_DAL_MPC_DCN10 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn10/,$(MPC_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN10) + +############################################################################### +# DCN20 +############################################################################### +MPC_DCN20 = dcn20_mpc.o + +AMD_DAL_MPC_DCN20 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn20/,$(MPC_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN20) + +############################################################################### +# DCN30 +############################################################################### +MPC_DCN30 = dcn30_mpc.o + +AMD_DAL_MPC_DCN30 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn30/,$(MPC_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN30) + ############################################################################### # DCN32 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c new file mode 100644 index 00000000000000..f2f55565e98a4c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c @@ -0,0 +1,537 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn10_mpc.h" + +#define REG(reg)\ + mpc10->mpc_regs->reg + +#define CTX \ + mpc10->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name + + +void mpc1_set_bg_color(struct mpc *mpc, + struct tg_color *bg_color, + int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); + uint32_t bg_r_cr, bg_g_y, bg_b_cb; + + bottommost_mpcc->blnd_cfg.black_color = *bg_color; + + /* find bottommost mpcc. */ + while (bottommost_mpcc->mpcc_bot) { + /* avoid circular linked link */ + ASSERT(bottommost_mpcc != bottommost_mpcc->mpcc_bot); + if (bottommost_mpcc == bottommost_mpcc->mpcc_bot) + break; + + bottommost_mpcc = bottommost_mpcc->mpcc_bot; + } + + /* mpc color is 12 bit. tg_color is 10 bit */ + /* todo: might want to use 16 bit to represent color and have each + * hw block translate to correct color depth. + */ + bg_r_cr = bg_color->color_r_cr << 2; + bg_g_y = bg_color->color_g_y << 2; + bg_b_cb = bg_color->color_b_cb << 2; + + REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0, + MPCC_BG_R_CR, bg_r_cr); + REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0, + MPCC_BG_G_Y, bg_g_y); + REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, + MPCC_BG_B_CB, bg_b_cb); +} + +static void mpc1_update_blending( + struct mpc *mpc, + struct mpcc_blnd_cfg *blnd_cfg, + int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id); + + REG_UPDATE_5(MPCC_CONTROL[mpcc_id], + MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only, + MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha, + MPCC_GLOBAL_GAIN, blnd_cfg->global_gain); + + mpcc->blnd_cfg = *blnd_cfg; +} + +void mpc1_update_stereo_mix( + struct mpc *mpc, + struct mpcc_sm_cfg *sm_cfg, + int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id], + MPCC_SM_EN, sm_cfg->enable, + MPCC_SM_MODE, sm_cfg->sm_mode, + MPCC_SM_FRAME_ALT, sm_cfg->frame_alt, + MPCC_SM_FIELD_ALT, sm_cfg->field_alt, + MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity, + MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity); +} +void mpc1_assert_idle_mpcc(struct mpc *mpc, int id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id)); + REG_WAIT(MPCC_STATUS[id], + MPCC_IDLE, 1, + 1, 100000); +} + +struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + ASSERT(mpcc_id < mpc10->num_mpcc); + return &(mpc->mpcc_array[mpcc_id]); +} + +struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) +{ + struct mpcc *tmp_mpcc = tree->opp_list; + + while (tmp_mpcc != NULL) { + if (tmp_mpcc->dpp_id == dpp_id) + return tmp_mpcc; + + /* avoid circular linked list */ + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); + if (tmp_mpcc == tmp_mpcc->mpcc_bot) + break; + + tmp_mpcc = tmp_mpcc->mpcc_bot; + } + return NULL; +} + +bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + unsigned int top_sel; + unsigned int opp_id; + unsigned int idle; + + REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel); + REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); + REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle); + if (top_sel == 0xf && opp_id == 0xf && idle) + return true; + else + return false; +} + +void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + unsigned int top_sel, mpc_busy, mpc_idle; + + REG_GET(MPCC_TOP_SEL[mpcc_id], + MPCC_TOP_SEL, &top_sel); + + if (top_sel == 0xf) { + REG_GET_2(MPCC_STATUS[mpcc_id], + MPCC_BUSY, &mpc_busy, + MPCC_IDLE, &mpc_idle); + + ASSERT(mpc_busy == 0); + ASSERT(mpc_idle == 1); + } +} + +/* + * Insert DPP into MPC tree based on specified blending position. + * Only used for planes that are part of blending chain for OPP output + * + * Parameters: + * [in/out] mpc - MPC context. + * [in/out] tree - MPC tree structure that plane will be added to. + * [in] blnd_cfg - MPCC blending configuration for the new blending layer. + * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. + * stereo mix must disable for the very bottom layer of the tree config. + * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. + * [in] dpp_id - DPP instance for the plane to be added. + * [in] mpcc_id - The MPCC physical instance to use for blending. + * + * Return: struct mpcc* - MPCC that was added. + */ +struct mpcc *mpc1_insert_plane( + struct mpc *mpc, + struct mpc_tree *tree, + struct mpcc_blnd_cfg *blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *new_mpcc = NULL; + + /* sanity check parameters */ + ASSERT(mpcc_id < mpc10->num_mpcc); + ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id)); + + if (insert_above_mpcc) { + /* check insert_above_mpcc exist in tree->opp_list */ + struct mpcc *temp_mpcc = tree->opp_list; + + if (temp_mpcc != insert_above_mpcc) + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc == NULL) + return NULL; + } + + /* Get and update MPCC struct parameters */ + new_mpcc = mpc1_get_mpcc(mpc, mpcc_id); + new_mpcc->dpp_id = dpp_id; + + /* program mux and MPCC_MODE */ + if (insert_above_mpcc) { + new_mpcc->mpcc_bot = insert_above_mpcc; + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id); + REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING); + } else { + new_mpcc->mpcc_bot = NULL; + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); + REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_ONLY); + } + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); + + /* Configure VUPDATE lock set for this MPCC to map to the OPP */ + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id); + + /* update mpc tree mux setting */ + if (tree->opp_list == insert_above_mpcc) { + /* insert the toppest mpcc */ + tree->opp_list = new_mpcc; + REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id); + } else { + /* find insert position */ + struct mpcc *temp_mpcc = tree->opp_list; + + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) { + REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id); + temp_mpcc->mpcc_bot = new_mpcc; + if (!insert_above_mpcc) + REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id], + MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING); + } + } + + /* update the blending configuration */ + mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); + + /* update the stereo mix settings, if provided */ + if (sm_cfg != NULL) { + new_mpcc->sm_cfg = *sm_cfg; + mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id); + } + + /* mark this mpcc as in use */ + mpc10->mpcc_in_use_mask |= 1 << mpcc_id; + + return new_mpcc; +} + +/* + * Remove a specified MPCC from the MPC tree. + * + * Parameters: + * [in/out] mpc - MPC context. + * [in/out] tree - MPC tree structure that plane will be removed from. + * [in/out] mpcc - MPCC to be removed from tree. + * + * Return: void + */ +void mpc1_remove_mpcc( + struct mpc *mpc, + struct mpc_tree *tree, + struct mpcc *mpcc_to_remove) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + bool found = false; + int mpcc_id = mpcc_to_remove->mpcc_id; + + if (tree->opp_list == mpcc_to_remove) { + found = true; + /* remove MPCC from top of tree */ + if (mpcc_to_remove->mpcc_bot) { + /* set the next MPCC in list to be the top MPCC */ + tree->opp_list = mpcc_to_remove->mpcc_bot; + REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id); + } else { + /* there are no other MPCC is list */ + tree->opp_list = NULL; + REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf); + } + } else { + /* find mpcc to remove MPCC list */ + struct mpcc *temp_mpcc = tree->opp_list; + + while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove) + temp_mpcc = temp_mpcc->mpcc_bot; + + if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) { + found = true; + temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot; + if (mpcc_to_remove->mpcc_bot) { + /* remove MPCC in middle of list */ + REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, + MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id); + } else { + /* remove MPCC from bottom of list */ + REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, + MPCC_BOT_SEL, 0xf); + REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id], + MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH); + } + } + } + + if (found) { + /* turn off MPCC mux registers */ + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); + + /* mark this mpcc as not in use */ + mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); + mpcc_to_remove->dpp_id = 0xf; + mpcc_to_remove->mpcc_bot = NULL; + } else { + /* In case of resume from S3/S4, remove mpcc from bios left over */ + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); + } +} + +static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst) +{ + mpcc->mpcc_id = mpcc_inst; + mpcc->dpp_id = 0xf; + mpcc->mpcc_bot = NULL; + mpcc->blnd_cfg.overlap_only = false; + mpcc->blnd_cfg.global_alpha = 0xff; + mpcc->blnd_cfg.global_gain = 0xff; + mpcc->sm_cfg.enable = false; +} + +/* + * Reset the MPCC HW status by disconnecting all muxes. + * + * Parameters: + * [in/out] mpc - MPC context. + * + * Return: void + */ +void mpc1_mpc_init(struct mpc *mpc) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + int mpcc_id; + int opp_id; + + mpc10->mpcc_in_use_mask = 0; + for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) { + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); + + mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); + } + + for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { + if (REG(MUX[opp_id])) + REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf); + } +} + +void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + int opp_id; + + REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); + + REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); + REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); + REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); + + mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); + + if (opp_id < MAX_OPP && REG(MUX[opp_id])) + REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf); +} + + +void mpc1_init_mpcc_list_from_hw( + struct mpc *mpc, + struct mpc_tree *tree) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + unsigned int opp_id; + unsigned int top_sel; + unsigned int bot_sel; + unsigned int out_mux; + struct mpcc *mpcc; + int mpcc_id; + int bot_mpcc_id; + + REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux); + + if (out_mux != 0xf) { + for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) { + REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel); + REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel); + + if (bot_sel == mpcc_id) + bot_sel = 0xf; + + if ((opp_id == tree->opp_id) && (top_sel != 0xf)) { + mpcc = mpc1_get_mpcc(mpc, mpcc_id); + mpcc->dpp_id = top_sel; + mpc10->mpcc_in_use_mask |= 1 << mpcc_id; + + if (out_mux == mpcc_id) + tree->opp_list = mpcc; + if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) { + bot_mpcc_id = bot_sel; + REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id); + REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel); + if ((opp_id == tree->opp_id) && (top_sel != 0xf)) { + struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id); + + mpcc->mpcc_bot = mpcc_bottom; + } + } + } + } + } +} + +void mpc1_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); +} + +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); +} + +unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + uint32_t val = 0xf; + + if (opp_id < MAX_OPP && REG(MUX[opp_id])) + REG_GET(MUX[opp_id], MPC_OUT_MUX, &val); + + return val; +} + +static const struct mpc_funcs dcn10_mpc_funcs = { + .read_mpcc_state = mpc1_read_mpcc_state, + .insert_plane = mpc1_insert_plane, + .remove_mpcc = mpc1_remove_mpcc, + .mpc_init = mpc1_mpc_init, + .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, + .wait_for_idle = mpc1_assert_idle_mpcc, + .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, + .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, + .update_blending = mpc1_update_blending, + .cursor_lock = mpc1_cursor_lock, + .set_denorm = NULL, + .set_denorm_clamp = NULL, + .set_output_csc = NULL, + .set_output_gamma = NULL, + .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, +}; + +void dcn10_mpc_construct(struct dcn10_mpc *mpc10, + struct dc_context *ctx, + const struct dcn_mpc_registers *mpc_regs, + const struct dcn_mpc_shift *mpc_shift, + const struct dcn_mpc_mask *mpc_mask, + int num_mpcc) +{ + int i; + + mpc10->base.ctx = ctx; + + mpc10->base.funcs = &dcn10_mpc_funcs; + + mpc10->mpc_regs = mpc_regs; + mpc10->mpc_shift = mpc_shift; + mpc10->mpc_mask = mpc_mask; + + mpc10->mpcc_in_use_mask = 0; + mpc10->num_mpcc = num_mpcc; + + for (i = 0; i < MAX_MPCC; i++) + mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i); +} + diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h new file mode 100644 index 00000000000000..dbfffc6383dcbc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h @@ -0,0 +1,204 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MPCC_DCN10_H__ +#define __DC_MPCC_DCN10_H__ + +#include "mpc.h" + +#define TO_DCN10_MPC(mpc_base) \ + container_of(mpc_base, struct dcn10_mpc, base) + +#define MPC_COMMON_REG_LIST_DCN1_0(inst) \ + SRII(MPCC_TOP_SEL, MPCC, inst),\ + SRII(MPCC_BOT_SEL, MPCC, inst),\ + SRII(MPCC_CONTROL, MPCC, inst),\ + SRII(MPCC_STATUS, MPCC, inst),\ + SRII(MPCC_OPP_ID, MPCC, inst),\ + SRII(MPCC_BG_G_Y, MPCC, inst),\ + SRII(MPCC_BG_R_CR, MPCC, inst),\ + SRII(MPCC_BG_B_CB, MPCC, inst),\ + SRII(MPCC_SM_CONTROL, MPCC, inst),\ + SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) + +#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ + SRII(MUX, MPC_OUT, inst),\ + VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) + +#define MPC_COMMON_REG_VARIABLE_LIST \ + uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ + uint32_t MPCC_BOT_SEL[MAX_MPCC]; \ + uint32_t MPCC_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_STATUS[MAX_MPCC]; \ + uint32_t MPCC_OPP_ID[MAX_MPCC]; \ + uint32_t MPCC_BG_G_Y[MAX_MPCC]; \ + uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ + uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ + uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ + uint32_t MUX[MAX_OPP]; \ + uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \ + uint32_t CUR[MAX_OPP]; + +#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ + SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ + SF(MPCC0_MPCC_BOT_SEL, MPCC_BOT_SEL, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_MODE, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_ALPHA, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_GAIN, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\ + SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\ + SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\ + SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\ + SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_EN, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_MODE, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FRAME_ALT, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ + SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\ + SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh) + +#define MPC_REG_FIELD_LIST(type) \ + type MPCC_TOP_SEL;\ + type MPCC_BOT_SEL;\ + type MPCC_MODE;\ + type MPCC_ALPHA_BLND_MODE;\ + type MPCC_ALPHA_MULTIPLIED_MODE;\ + type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\ + type MPCC_GLOBAL_ALPHA;\ + type MPCC_GLOBAL_GAIN;\ + type MPCC_IDLE;\ + type MPCC_BUSY;\ + type MPCC_OPP_ID;\ + type MPCC_BG_G_Y;\ + type MPCC_BG_R_CR;\ + type MPCC_BG_B_CB;\ + type MPCC_SM_EN;\ + type MPCC_SM_MODE;\ + type MPCC_SM_FRAME_ALT;\ + type MPCC_SM_FIELD_ALT;\ + type MPCC_SM_FORCE_NEXT_FRAME_POL;\ + type MPCC_SM_FORCE_NEXT_TOP_POL;\ + type MPC_OUT_MUX;\ + type MPCC_UPDATE_LOCK_SEL;\ + type CUR_VUPDATE_LOCK_SET; + +struct dcn_mpc_registers { + MPC_COMMON_REG_VARIABLE_LIST +}; + +struct dcn_mpc_shift { + MPC_REG_FIELD_LIST(uint8_t) +}; + +struct dcn_mpc_mask { + MPC_REG_FIELD_LIST(uint32_t) +}; + +struct dcn10_mpc { + struct mpc base; + + int mpcc_in_use_mask; + int num_mpcc; + const struct dcn_mpc_registers *mpc_regs; + const struct dcn_mpc_shift *mpc_shift; + const struct dcn_mpc_mask *mpc_mask; +}; + +void dcn10_mpc_construct(struct dcn10_mpc *mpcc10, + struct dc_context *ctx, + const struct dcn_mpc_registers *mpc_regs, + const struct dcn_mpc_shift *mpc_shift, + const struct dcn_mpc_mask *mpc_mask, + int num_mpcc); + +struct mpcc *mpc1_insert_plane( + struct mpc *mpc, + struct mpc_tree *tree, + struct mpcc_blnd_cfg *blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id); + +void mpc1_remove_mpcc( + struct mpc *mpc, + struct mpc_tree *tree, + struct mpcc *mpcc); + +void mpc1_mpc_init( + struct mpc *mpc); + +void mpc1_mpc_init_single_inst( + struct mpc *mpc, + unsigned int mpcc_id); + +void mpc1_assert_idle_mpcc( + struct mpc *mpc, + int id); + +void mpc1_set_bg_color( + struct mpc *mpc, + struct tg_color *bg_color, + int id); + +void mpc1_update_stereo_mix( + struct mpc *mpc, + struct mpcc_sm_cfg *sm_cfg, + int mpcc_id); + +bool mpc1_is_mpcc_idle( + struct mpc *mpc, + int mpcc_id); + +void mpc1_assert_mpcc_idle_before_connect( + struct mpc *mpc, + int mpcc_id); + +void mpc1_init_mpcc_list_from_hw( + struct mpc *mpc, + struct mpc_tree *tree); + +struct mpcc *mpc1_get_mpcc( + struct mpc *mpc, + int mpcc_id); + +struct mpcc *mpc1_get_mpcc_for_dpp( + struct mpc_tree *tree, + int dpp_id); + +void mpc1_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s); + +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); + +unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c new file mode 100644 index 00000000000000..ea73473b970a6e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c @@ -0,0 +1,610 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn20_mpc.h" + +#include "reg_helper.h" +#include "dc.h" +#include "mem_input.h" +#include "dcn10/dcn10_cm_common.h" + +#define REG(reg)\ + mpc20->mpc_regs->reg + +#define IND_REG(index) \ + (index) + +#define CTX \ + mpc20->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + mpc20->mpc_shift->field_name, mpc20->mpc_mask->field_name + +#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) + +void mpc2_update_blending( + struct mpc *mpc, + struct mpcc_blnd_cfg *blnd_cfg, + int mpcc_id) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id); + + REG_UPDATE_7(MPCC_CONTROL[mpcc_id], + MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only, + MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha, + MPCC_GLOBAL_GAIN, blnd_cfg->global_gain, + MPCC_BG_BPC, blnd_cfg->background_color_bpc, + MPCC_BOT_GAIN_MODE, blnd_cfg->bottom_gain_mode); + + REG_SET(MPCC_TOP_GAIN[mpcc_id], 0, MPCC_TOP_GAIN, blnd_cfg->top_gain); + REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain); + REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain); + + mpcc->blnd_cfg = *blnd_cfg; +} + +void mpc2_set_denorm( + struct mpc *mpc, + int opp_id, + enum dc_color_depth output_depth) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + int denorm_mode = 0; + + switch (output_depth) { + case COLOR_DEPTH_666: + denorm_mode = 1; + break; + case COLOR_DEPTH_888: + denorm_mode = 2; + break; + case COLOR_DEPTH_999: + denorm_mode = 3; + break; + case COLOR_DEPTH_101010: + denorm_mode = 4; + break; + case COLOR_DEPTH_111111: + denorm_mode = 5; + break; + case COLOR_DEPTH_121212: + denorm_mode = 6; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* not valid used case! */ + break; + } + + REG_UPDATE(DENORM_CONTROL[opp_id], + MPC_OUT_DENORM_MODE, denorm_mode); +} + +void mpc2_set_denorm_clamp( + struct mpc *mpc, + int opp_id, + struct mpc_denorm_clamp denorm_clamp) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_UPDATE_2(DENORM_CONTROL[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr, + MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr); + REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y, + MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y); + REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb, + MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb); +} + + + +void mpc2_set_output_csc( + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode) +{ + uint32_t cur_mode; + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + struct color_matrices_reg ocsc_regs; + + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); + return; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; + ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; + ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; + ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A; + + if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); + } else { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); + } + + cm_helper_program_color_matrices( + mpc20->base.ctx, + regval, + &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); +} + +void mpc2_set_ocsc_default( + struct mpc *mpc, + int opp_id, + enum dc_color_space color_space, + enum mpc_output_csc_mode ocsc_mode) +{ + uint32_t cur_mode; + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + uint32_t arr_size; + struct color_matrices_reg ocsc_regs; + const uint16_t *regval = NULL; + + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); + return; + } + + regval = find_color_matrix(color_space, &arr_size); + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; + ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; + ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; + ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A; + + + if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); + } else { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); + } + + cm_helper_program_color_matrices( + mpc20->base.ctx, + regval, + &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); +} + +static void mpc2_ogam_get_reg_field( + struct mpc *mpc, + struct xfer_func_reg *reg) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + reg->shifts.exp_region0_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->shifts.field_region_end = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +void mpc20_power_on_ogam_lut( + struct mpc *mpc, int mpcc_id, + bool power_on) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0, + MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0); + +} + +static void mpc20_configure_ogam_lut( + struct mpc *mpc, int mpcc_id, + bool is_ram_a) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_UPDATE_2(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], + MPCC_OGAM_LUT_WRITE_EN_MASK, 7, + MPCC_OGAM_LUT_RAM_SEL, is_ram_a == true ? 0:1); + + REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); +} + +static enum dc_lut_mode mpc20_get_ogam_current(struct mpc *mpc, int mpcc_id) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], MPCC_OGAM_CONFIG_STATUS, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void mpc2_program_lutb(struct mpc *mpc, int mpcc_id, + const struct pwl_params *params) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + struct xfer_func_reg gam_regs; + + mpc2_ogam_get_reg_field(mpc, &gam_regs); + + gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]); + gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]); + gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]); + gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_B[mpcc_id]); + gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_G[mpcc_id]); + gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_R[mpcc_id]); + gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]); + gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]); + gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]); + gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]); + gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]); + gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]); + gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]); + gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]); + + cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs); + +} + +static void mpc2_program_luta(struct mpc *mpc, int mpcc_id, + const struct pwl_params *params) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + struct xfer_func_reg gam_regs; + + mpc2_ogam_get_reg_field(mpc, &gam_regs); + + gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]); + gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]); + gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]); + gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_B[mpcc_id]); + gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_G[mpcc_id]); + gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_R[mpcc_id]); + gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]); + gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]); + gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]); + gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]); + gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]); + gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]); + gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]); + gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]); + + cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs); + +} + +static void mpc20_program_ogam_pwl( + struct mpc *mpc, int mpcc_id, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + PERF_TRACE(); + REG_SEQ_START(); + + for (i = 0 ; i < num; i++) { + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, + MPCC_OGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, + MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, + MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); + } + + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); +} + +static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, + enum dc_lut_mode current_mode, + enum dc_lut_mode next_mode) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + if (mpc->ctx->dc->debug.cm_in_bypass) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + + if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) { + /*hw fixed in new review*/ + return; + } + if (current_mode == LUT_BYPASS) + /*this will only work if OTG is locked. + *if we were to support OTG unlock case, + *the workaround will be more complex + */ + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, + next_mode == LUT_RAM_A ? 1:2); +} + +void mpc2_set_output_gamma( + struct mpc *mpc, + int mpcc_id, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + if (mpc->ctx->dc->debug.cm_in_bypass) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + + if (params == NULL) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + + current_mode = mpc20_get_ogam_current(mpc, mpcc_id); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + mpc20_power_on_ogam_lut(mpc, mpcc_id, true); + mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + mpc2_program_luta(mpc, mpcc_id, params); + else + mpc2_program_lutb(mpc, mpcc_id, params); + + apply_DEDCN20_305_wa(mpc, mpcc_id, current_mode, next_mode); + + mpc20_program_ogam_pwl( + mpc, mpcc_id, params->rgb_resulted, params->hw_points_num); + + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, + next_mode == LUT_RAM_A ? 1:2); +} +void mpc2_assert_idle_mpcc(struct mpc *mpc, int id) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + unsigned int mpc_disabled; + + ASSERT(!(mpc20->mpcc_in_use_mask & 1 << id)); + REG_GET(MPCC_STATUS[id], MPCC_DISABLED, &mpc_disabled); + if (mpc_disabled) + return; + + REG_WAIT(MPCC_STATUS[id], + MPCC_IDLE, 1, + 1, 100000); +} + +void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled; + + REG_GET(MPCC_TOP_SEL[mpcc_id], + MPCC_TOP_SEL, &top_sel); + + REG_GET_3(MPCC_STATUS[mpcc_id], + MPCC_BUSY, &mpc_busy, + MPCC_IDLE, &mpc_idle, + MPCC_DISABLED, &mpc_disabled); + + if (top_sel == 0xf) { + ASSERT(!mpc_busy); + ASSERT(mpc_idle); + ASSERT(mpc_disabled); + } else { + ASSERT(!mpc_disabled); + ASSERT(!mpc_idle); + } +} + +static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) +{ + mpcc->mpcc_id = mpcc_inst; + mpcc->dpp_id = 0xf; + mpcc->mpcc_bot = NULL; + mpcc->blnd_cfg.overlap_only = false; + mpcc->blnd_cfg.global_alpha = 0xff; + mpcc->blnd_cfg.global_gain = 0xff; + mpcc->blnd_cfg.background_color_bpc = 4; + mpcc->blnd_cfg.bottom_gain_mode = 0; + mpcc->blnd_cfg.top_gain = 0x1f000; + mpcc->blnd_cfg.bottom_inside_gain = 0x1f000; + mpcc->blnd_cfg.bottom_outside_gain = 0x1f000; + mpcc->sm_cfg.enable = false; +} + +static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) +{ + struct mpcc *tmp_mpcc = tree->opp_list; + + while (tmp_mpcc != NULL) { + if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) + return tmp_mpcc; + + /* avoid circular linked list */ + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); + if (tmp_mpcc == tmp_mpcc->mpcc_bot) + break; + + tmp_mpcc = tmp_mpcc->mpcc_bot; + } + return NULL; +} + +static void mpc2_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Gamma block state */ + REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst], + MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode); +} + +static const struct mpc_funcs dcn20_mpc_funcs = { + .read_mpcc_state = mpc2_read_mpcc_state, + .insert_plane = mpc1_insert_plane, + .remove_mpcc = mpc1_remove_mpcc, + .mpc_init = mpc1_mpc_init, + .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .update_blending = mpc2_update_blending, + .cursor_lock = mpc1_cursor_lock, + .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, + .wait_for_idle = mpc2_assert_idle_mpcc, + .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, + .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, + .set_denorm = mpc2_set_denorm, + .set_denorm_clamp = mpc2_set_denorm_clamp, + .set_output_csc = mpc2_set_output_csc, + .set_ocsc_default = mpc2_set_ocsc_default, + .set_output_gamma = mpc2_set_output_gamma, + .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, + .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, +}; + +void dcn20_mpc_construct(struct dcn20_mpc *mpc20, + struct dc_context *ctx, + const struct dcn20_mpc_registers *mpc_regs, + const struct dcn20_mpc_shift *mpc_shift, + const struct dcn20_mpc_mask *mpc_mask, + int num_mpcc) +{ + int i; + + mpc20->base.ctx = ctx; + + mpc20->base.funcs = &dcn20_mpc_funcs; + + mpc20->mpc_regs = mpc_regs; + mpc20->mpc_shift = mpc_shift; + mpc20->mpc_mask = mpc_mask; + + mpc20->mpcc_in_use_mask = 0; + mpc20->num_mpcc = num_mpcc; + + for (i = 0; i < MAX_MPCC; i++) + mpc2_init_mpcc(&mpc20->base.mpcc_array[i], i); +} + diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h new file mode 100644 index 00000000000000..496658f420dbdb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h @@ -0,0 +1,312 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MPCC_DCN20_H__ +#define __DC_MPCC_DCN20_H__ + +#include "dcn10/dcn10_mpc.h" + +#define TO_DCN20_MPC(mpc_base) \ + container_of(mpc_base, struct dcn20_mpc, base) + +#define MPC_REG_LIST_DCN2_0(inst)\ + MPC_COMMON_REG_LIST_DCN1_0(inst),\ + SRII(MPCC_TOP_GAIN, MPCC, inst),\ + SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\ + SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\ + SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\ + SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_MODE, MPCC_OGAM, inst) + +#define MPC_OUT_MUX_REG_LIST_DCN2_0(inst) \ + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\ + SRII(CSC_MODE, MPC_OUT, inst),\ + SRII(CSC_C11_C12_A, MPC_OUT, inst),\ + SRII(CSC_C33_C34_A, MPC_OUT, inst),\ + SRII(CSC_C11_C12_B, MPC_OUT, inst),\ + SRII(CSC_C33_C34_B, MPC_OUT, inst),\ + SRII(DENORM_CONTROL, MPC_OUT, inst),\ + SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ + SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst) + +#define MPC_DBG_REG_LIST_DCN2_0() \ + SR(MPC_OCSC_TEST_DEBUG_DATA),\ + SR(MPC_OCSC_TEST_DEBUG_INDEX) + +#define MPC_REG_VARIABLE_LIST_DCN2_0 \ + MPC_COMMON_REG_VARIABLE_LIST \ + uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \ + uint32_t MPCC_BOT_GAIN_INSIDE[MAX_MPCC]; \ + uint32_t MPCC_BOT_GAIN_OUTSIDE[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL1_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL2_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL1_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL2_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL1_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_END_CNTL2_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_REGION_32_33[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL1_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL2_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL1_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL2_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL1_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_END_CNTL2_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_REGION_32_33[MAX_MPCC];\ + uint32_t MPCC_MEM_PWR_CTRL[MAX_MPCC];\ + uint32_t MPCC_OGAM_LUT_INDEX[MAX_MPCC];\ + uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\ + uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\ + uint32_t MPCC_OGAM_MODE[MAX_MPCC];\ + uint32_t MPC_OCSC_TEST_DEBUG_DATA;\ + uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\ + uint32_t CSC_MODE[MAX_OPP]; \ + uint32_t CSC_C11_C12_A[MAX_OPP]; \ + uint32_t CSC_C33_C34_A[MAX_OPP]; \ + uint32_t CSC_C11_C12_B[MAX_OPP]; \ + uint32_t CSC_C33_C34_B[MAX_OPP]; \ + uint32_t DENORM_CONTROL[MAX_OPP]; \ + uint32_t DENORM_CLAMP_G_Y[MAX_OPP]; \ + uint32_t DENORM_CLAMP_B_CB[MAX_OPP]; + +#define MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ + SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\ + SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_WRITE_EN_MASK, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_RAM_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_CONFIG_STATUS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_MODE, MPCC_OGAM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) + +/* + * DCN2 MPC_OCSC debug status register: + * + * Status index including current OCSC Mode is 1 + * OCSC Mode: [1..0] + */ +#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1 + +#define MPC_DEBUG_REG_LIST_SH_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0 + +#define MPC_DEBUG_REG_LIST_MASK_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3 + +#define MPC_REG_FIELD_LIST_DCN2_0(type) \ + MPC_REG_FIELD_LIST(type)\ + type MPCC_BG_BPC;\ + type MPCC_BOT_GAIN_MODE;\ + type MPCC_TOP_GAIN;\ + type MPCC_BOT_GAIN_INSIDE;\ + type MPCC_BOT_GAIN_OUTSIDE;\ + type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\ + type MPC_OCSC_TEST_DEBUG_INDEX;\ + type MPC_OCSC_MODE;\ + type MPC_OCSC_C11_A;\ + type MPC_OCSC_C12_A;\ + type MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type MPCC_OGAM_RAMA_EXP_REGION_END_B;\ + type MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\ + type MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;\ + type MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;\ + type MPCC_OGAM_RAMA_EXP_REGION_START_B;\ + type MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\ + type MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\ + type MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\ + type MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\ + type MPCC_OGAM_RAMB_EXP_REGION_END_B;\ + type MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\ + type MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B;\ + type MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;\ + type MPCC_OGAM_RAMB_EXP_REGION_START_B;\ + type MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\ + type MPCC_OGAM_MEM_PWR_FORCE;\ + type MPCC_OGAM_LUT_INDEX;\ + type MPCC_OGAM_LUT_WRITE_EN_MASK;\ + type MPCC_OGAM_LUT_RAM_SEL;\ + type MPCC_OGAM_CONFIG_STATUS;\ + type MPCC_OGAM_LUT_DATA;\ + type MPCC_OGAM_MODE;\ + type MPC_OUT_DENORM_MODE;\ + type MPC_OUT_DENORM_CLAMP_MAX_R_CR;\ + type MPC_OUT_DENORM_CLAMP_MIN_R_CR;\ + type MPC_OUT_DENORM_CLAMP_MAX_G_Y;\ + type MPC_OUT_DENORM_CLAMP_MIN_G_Y;\ + type MPC_OUT_DENORM_CLAMP_MAX_B_CB;\ + type MPC_OUT_DENORM_CLAMP_MIN_B_CB;\ + type MPCC_DISABLED;\ + type MPCC_OGAM_MEM_PWR_DIS; + +struct dcn20_mpc_registers { + MPC_REG_VARIABLE_LIST_DCN2_0 +}; + +struct dcn20_mpc_shift { + MPC_REG_FIELD_LIST_DCN2_0(uint8_t) +}; + +struct dcn20_mpc_mask { + MPC_REG_FIELD_LIST_DCN2_0(uint32_t) +}; + +struct dcn20_mpc { + struct mpc base; + + int mpcc_in_use_mask; + int num_mpcc; + const struct dcn20_mpc_registers *mpc_regs; + const struct dcn20_mpc_shift *mpc_shift; + const struct dcn20_mpc_mask *mpc_mask; +}; + +void dcn20_mpc_construct(struct dcn20_mpc *mpcc20, + struct dc_context *ctx, + const struct dcn20_mpc_registers *mpc_regs, + const struct dcn20_mpc_shift *mpc_shift, + const struct dcn20_mpc_mask *mpc_mask, + int num_mpcc); + +void mpc2_update_blending( + struct mpc *mpc, + struct mpcc_blnd_cfg *blnd_cfg, + int mpcc_id); + +void mpc2_set_denorm( + struct mpc *mpc, + int opp_id, + enum dc_color_depth output_depth); + +void mpc2_set_denorm_clamp( + struct mpc *mpc, + int opp_id, + struct mpc_denorm_clamp denorm_clamp); + +void mpc2_set_output_csc( + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode); + +void mpc2_set_ocsc_default( + struct mpc *mpc, + int opp_id, + enum dc_color_space color_space, + enum mpc_output_csc_mode ocsc_mode); + +void mpc2_set_output_gamma( + struct mpc *mpc, + int mpcc_id, + const struct pwl_params *params); + +void mpc2_assert_idle_mpcc(struct mpc *mpc, int id); +void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id); +void mpc20_power_on_ogam_lut(struct mpc *mpc, int mpcc_id, bool power_on); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c new file mode 100644 index 00000000000000..fe26fde12eeb3c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c @@ -0,0 +1,1558 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn30_mpc.h" +#include "dcn30/dcn30_cm_common.h" +#include "basics/conversion.h" +#include "dcn10/dcn10_cm_common.h" +#include "dc.h" + +#define REG(reg)\ + mpc30->mpc_regs->reg + +#define CTX \ + mpc30->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name + + +#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) + + +void mpc3_mpc_init(struct mpc *mpc) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int opp_id; + + mpc1_mpc_init(mpc); + + for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { + if (REG(MUX[opp_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); + } +} + +void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc1_mpc_init_single_inst(mpc, mpcc_id); + + /* assuming mpc out mux is connected to opp with the same index at this + * point in time (e.g. transitioning from vbios to driver) + */ + if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); +} + +bool mpc3_is_dwb_idle( + struct mpc *mpc, + int dwb_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + unsigned int status; + + REG_GET(DWB_MUX[dwb_id], MPC_DWB0_MUX_STATUS, &status); + + if (status == 0xf) + return true; + else + return false; +} + +void mpc3_set_dwb_mux( + struct mpc *mpc, + int dwb_id, + int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_SET(DWB_MUX[dwb_id], 0, + MPC_DWB0_MUX, mpcc_id); +} + +void mpc3_disable_dwb_mux( + struct mpc *mpc, + int dwb_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_SET(DWB_MUX[dwb_id], 0, + MPC_DWB0_MUX, 0xf); +} + +enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id) +{ + /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info; + *in DCN3/3AG, we need to read two separate fields to retrieve the same info + */ + enum dc_lut_mode mode; + uint32_t state_mode; + uint32_t state_ram_lut_in_use; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id], MPCC_OGAM_MODE_CURRENT, &state_mode, + MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 2: + switch (state_ram_lut_in_use) { + case 0: + mode = LUT_RAM_A; + break; + case 1: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +void mpc3_power_on_ogam_lut( + struct mpc *mpc, int mpcc_id, + bool power_on) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + /* + * Powering on: force memory active so the LUT can be updated. + * Powering off: allow entering memory low power mode + * + * Memory low power mode is controlled during MPC OGAM LUT init. + */ + REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], + MPCC_OGAM_MEM_PWR_DIS, power_on != 0); + + /* Wait for memory to be powered on - we won't be able to write to it otherwise. */ + if (power_on) + REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10); +} + +static void mpc3_configure_ogam_lut( + struct mpc *mpc, int mpcc_id, + bool is_ram_a) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_UPDATE_2(MPCC_OGAM_LUT_CONTROL[mpcc_id], + MPCC_OGAM_LUT_WRITE_COLOR_MASK, 7, + MPCC_OGAM_LUT_HOST_SEL, is_ram_a == true ? 0:1); + + REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); +} + +static void mpc3_ogam_get_reg_field( + struct mpc *mpc, + struct dcn3_xfer_func_reg *reg) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + reg->shifts.field_region_start_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B; + reg->masks.field_region_start_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B; + reg->shifts.field_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_OFFSET_B; + reg->masks.field_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_OFFSET_B; + + reg->shifts.exp_region0_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +static void mpc3_program_luta(struct mpc *mpc, int mpcc_id, + const struct pwl_params *params) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + struct dcn3_xfer_func_reg gam_regs; + + mpc3_ogam_get_reg_field(mpc, &gam_regs); + + gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]); + gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]); + gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]); + gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[mpcc_id]); + gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[mpcc_id]); + gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[mpcc_id]); + gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]); + gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]); + gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]); + gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]); + gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]); + gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]); + gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]); + gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]); + //New registers in DCN3AG/DCN OGAM block + gam_regs.offset_b = REG(MPCC_OGAM_RAMA_OFFSET_B[mpcc_id]); + gam_regs.offset_g = REG(MPCC_OGAM_RAMA_OFFSET_G[mpcc_id]); + gam_regs.offset_r = REG(MPCC_OGAM_RAMA_OFFSET_R[mpcc_id]); + gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_B[mpcc_id]); + gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_G[mpcc_id]); + gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_R[mpcc_id]); + + cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs); +} + +static void mpc3_program_lutb(struct mpc *mpc, int mpcc_id, + const struct pwl_params *params) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + struct dcn3_xfer_func_reg gam_regs; + + mpc3_ogam_get_reg_field(mpc, &gam_regs); + + gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]); + gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]); + gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]); + gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[mpcc_id]); + gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[mpcc_id]); + gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[mpcc_id]); + gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]); + gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]); + gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]); + gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]); + gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]); + gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]); + gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]); + gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]); + //New registers in DCN3AG/DCN OGAM block + gam_regs.offset_b = REG(MPCC_OGAM_RAMB_OFFSET_B[mpcc_id]); + gam_regs.offset_g = REG(MPCC_OGAM_RAMB_OFFSET_G[mpcc_id]); + gam_regs.offset_r = REG(MPCC_OGAM_RAMB_OFFSET_R[mpcc_id]); + gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_B[mpcc_id]); + gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_G[mpcc_id]); + gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_R[mpcc_id]); + + cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs); +} + + +static void mpc3_program_ogam_pwl( + struct mpc *mpc, int mpcc_id, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); + } else { + + REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], + MPCC_OGAM_LUT_WRITE_COLOR_MASK, 4); + + for (i = 0 ; i < num; i++) + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); + + REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); + + REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], + MPCC_OGAM_LUT_WRITE_COLOR_MASK, 2); + + for (i = 0 ; i < num; i++) + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); + + REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0); + + REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id], + MPCC_OGAM_LUT_WRITE_COLOR_MASK, 1); + + for (i = 0 ; i < num; i++) + REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg); + + } + +} + +void mpc3_set_output_gamma( + struct mpc *mpc, + int mpcc_id, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (mpc->ctx->dc->debug.cm_in_bypass) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + + if (params == NULL) { //disable OGAM + REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + //enable OGAM + REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 2); + + current_mode = mpc3_get_ogam_current(mpc, mpcc_id); + if (current_mode == LUT_BYPASS) + next_mode = LUT_RAM_A; + else if (current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + mpc3_power_on_ogam_lut(mpc, mpcc_id, true); + mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + mpc3_program_luta(mpc, mpcc_id, params); + else + mpc3_program_lutb(mpc, mpcc_id, params); + + mpc3_program_ogam_pwl( + mpc, mpcc_id, params->rgb_resulted, params->hw_points_num); + + /*we need to program 2 fields here as apposed to 1*/ + REG_UPDATE(MPCC_OGAM_CONTROL[mpcc_id], + MPCC_OGAM_SELECT, next_mode == LUT_RAM_A ? 0:1); + + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) + mpc3_power_on_ogam_lut(mpc, mpcc_id, false); +} + +void mpc3_set_denorm( + struct mpc *mpc, + int opp_id, + enum dc_color_depth output_depth) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + /* De-normalize Fixed U1.13 color data to different target bit depths. 0 is bypass*/ + int denorm_mode = 0; + + switch (output_depth) { + case COLOR_DEPTH_666: + denorm_mode = 1; + break; + case COLOR_DEPTH_888: + denorm_mode = 2; + break; + case COLOR_DEPTH_999: + denorm_mode = 3; + break; + case COLOR_DEPTH_101010: + denorm_mode = 4; + break; + case COLOR_DEPTH_111111: + denorm_mode = 5; + break; + case COLOR_DEPTH_121212: + denorm_mode = 6; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* not valid used case! */ + break; + } + + REG_UPDATE(DENORM_CONTROL[opp_id], + MPC_OUT_DENORM_MODE, denorm_mode); +} + +void mpc3_set_denorm_clamp( + struct mpc *mpc, + int opp_id, + struct mpc_denorm_clamp denorm_clamp) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + /*program min and max clamp values for the pixel components*/ + REG_UPDATE_2(DENORM_CONTROL[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr, + MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr); + REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y, + MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y); + REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id], + MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb, + MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb); +} + +static enum dc_lut_mode mpc3_get_shaper_current(struct mpc *mpc, uint32_t rmu_idx) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_GET(SHAPER_CONTROL[rmu_idx], MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void mpc3_configure_shaper_lut( + struct mpc *mpc, + bool is_ram_a, + uint32_t rmu_idx) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx], + MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx], + MPC_RMU_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(SHAPER_LUT_INDEX[rmu_idx], 0, MPC_RMU_SHAPER_LUT_INDEX, 0); +} + +static void mpc3_program_shaper_luta_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t rmu_idx) +{ + const struct gamma_curve *curve; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_SET_2(SHAPER_RAMA_START_CNTL_B[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(SHAPER_RAMA_START_CNTL_G[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(SHAPER_RAMA_START_CNTL_R[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + + REG_SET_2(SHAPER_RAMA_END_CNTL_B[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + REG_SET_2(SHAPER_RAMA_END_CNTL_G[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); + REG_SET_2(SHAPER_RAMA_END_CNTL_R[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(SHAPER_RAMA_REGION_0_1[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_2_3[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_4_5[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_6_7[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_8_9[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_10_11[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_12_13[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_14_15[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_16_17[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_18_19[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_20_21[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_22_23[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_24_25[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_26_27[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_28_29[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_30_31[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMA_REGION_32_33[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); +} + +static void mpc3_program_shaper_lutb_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t rmu_idx) +{ + const struct gamma_curve *curve; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_SET_2(SHAPER_RAMB_START_CNTL_B[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(SHAPER_RAMB_START_CNTL_G[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(SHAPER_RAMB_START_CNTL_R[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + + REG_SET_2(SHAPER_RAMB_END_CNTL_B[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + REG_SET_2(SHAPER_RAMB_END_CNTL_G[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); + REG_SET_2(SHAPER_RAMB_END_CNTL_R[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, + MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(SHAPER_RAMB_REGION_0_1[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_2_3[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_4_5[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_6_7[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_8_9[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_10_11[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_12_13[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_14_15[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_16_17[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_18_19[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_20_21[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_22_23[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_24_25[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_26_27[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_28_29[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_30_31[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(SHAPER_RAMB_REGION_32_33[rmu_idx], 0, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); +} + + +static void mpc3_program_shaper_lut( + struct mpc *mpc, + const struct pwl_result_data *rgb, + uint32_t num, + uint32_t rmu_idx) +{ + uint32_t i, red, green, blue; + uint32_t red_delta, green_delta, blue_delta; + uint32_t red_value, green_value, blue_value; + + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + for (i = 0 ; i < num; i++) { + + red = rgb[i].red_reg; + green = rgb[i].green_reg; + blue = rgb[i].blue_reg; + + red_delta = rgb[i].delta_red_reg; + green_delta = rgb[i].delta_green_reg; + blue_delta = rgb[i].delta_blue_reg; + + red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); + green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); + blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); + + REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, red_value); + REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, green_value); + REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, blue_value); + } + +} + +static void mpc3_power_on_shaper_3dlut( + struct mpc *mpc, + uint32_t rmu_idx, + bool power_on) +{ + uint32_t power_status_shaper = 2; + uint32_t power_status_3dlut = 2; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int max_retries = 10; + + if (rmu_idx == 0) { + REG_SET(MPC_RMU_MEM_PWR_CTRL, 0, + MPC_RMU0_MEM_PWR_DIS, power_on == true ? 1:0); + /* wait for memory to fully power up */ + if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { + REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, 0, 1, max_retries); + REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, 0, 1, max_retries); + } + + /*read status is not mandatory, it is just for debugging*/ + REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, &power_status_shaper); + REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, &power_status_3dlut); + } else if (rmu_idx == 1) { + REG_SET(MPC_RMU_MEM_PWR_CTRL, 0, + MPC_RMU1_MEM_PWR_DIS, power_on == true ? 1:0); + if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { + REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, 0, 1, max_retries); + REG_WAIT(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, 0, 1, max_retries); + } + + REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, &power_status_shaper); + REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, &power_status_3dlut); + } + /*TODO Add rmu_idx == 2 for SIENNA_CICHLID */ + if (power_status_shaper != 0 && power_on == true) + BREAK_TO_DEBUGGER(); + + if (power_status_3dlut != 0 && power_on == true) + BREAK_TO_DEBUGGER(); +} + + + +bool mpc3_program_shaper( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t rmu_idx) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (params == NULL) { + REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, 0); + return false; + } + + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) + mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true); + + current_mode = mpc3_get_shaper_current(mpc, rmu_idx); + + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, rmu_idx); + + if (next_mode == LUT_RAM_A) + mpc3_program_shaper_luta_settings(mpc, params, rmu_idx); + else + mpc3_program_shaper_lutb_settings(mpc, params, rmu_idx); + + mpc3_program_shaper_lut( + mpc, params->rgb_resulted, params->hw_points_num, rmu_idx); + + REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); + mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false); + + return true; +} + +static void mpc3_set_3dlut_mode( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17, + uint32_t rmu_idx) +{ + uint32_t lut_mode; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (mode == LUT_BYPASS) + lut_mode = 0; + else if (mode == LUT_RAM_A) + lut_mode = 1; + else + lut_mode = 2; + + REG_UPDATE_2(RMU_3DLUT_MODE[rmu_idx], + MPC_RMU_3DLUT_MODE, lut_mode, + MPC_RMU_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); +} + +static enum dc_lut_mode get3dlut_config( + struct mpc *mpc, + bool *is_17x17x17, + bool *is_12bits_color_channel, + int rmu_idx) +{ + uint32_t i_mode, i_enable_10bits, lut_size; + enum dc_lut_mode mode; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_GET(RMU_3DLUT_MODE[rmu_idx], + MPC_RMU_3DLUT_MODE_CURRENT, &i_mode); + + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], + MPC_RMU_3DLUT_30BIT_EN, &i_enable_10bits); + + switch (i_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + if (i_enable_10bits > 0) + *is_12bits_color_channel = false; + else + *is_12bits_color_channel = true; + + REG_GET(RMU_3DLUT_MODE[rmu_idx], MPC_RMU_3DLUT_SIZE, &lut_size); + + if (lut_size == 0) + *is_17x17x17 = true; + else + *is_17x17x17 = false; + + return mode; +} + +static void mpc3_select_3dlut_ram( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + uint32_t rmu_idx) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_UPDATE_2(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], + MPC_RMU_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, + MPC_RMU_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1); +} + +static void mpc3_select_3dlut_ram_mask( + struct mpc *mpc, + uint32_t ram_selection_mask, + uint32_t rmu_idx) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_UPDATE(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], MPC_RMU_3DLUT_WRITE_EN_MASK, + ram_selection_mask); + REG_SET(RMU_3DLUT_INDEX[rmu_idx], 0, MPC_RMU_3DLUT_INDEX, 0); +} + +static void mpc3_set3dlut_ram12( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t rmu_idx) +{ + uint32_t i, red, green, blue, red1, green1, blue1; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + for (i = 0 ; i < entries; i += 2) { + red = lut[i].red<<4; + green = lut[i].green<<4; + blue = lut[i].blue<<4; + red1 = lut[i+1].red<<4; + green1 = lut[i+1].green<<4; + blue1 = lut[i+1].blue<<4; + + REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, + MPC_RMU_3DLUT_DATA0, red, + MPC_RMU_3DLUT_DATA1, red1); + + REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, + MPC_RMU_3DLUT_DATA0, green, + MPC_RMU_3DLUT_DATA1, green1); + + REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0, + MPC_RMU_3DLUT_DATA0, blue, + MPC_RMU_3DLUT_DATA1, blue1); + } +} + +static void mpc3_set3dlut_ram10( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t rmu_idx) +{ + uint32_t i, red, green, blue, value; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + for (i = 0; i < entries; i++) { + red = lut[i].red; + green = lut[i].green; + blue = lut[i].blue; + //should we shift red 22bit and green 12? ask Nvenko + value = (red<<20) | (green<<10) | blue; + + REG_SET(RMU_3DLUT_DATA_30BIT[rmu_idx], 0, MPC_RMU_3DLUT_DATA_30BIT, value); + } + +} + + +void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst) +{ + mpcc->mpcc_id = mpcc_inst; + mpcc->dpp_id = 0xf; + mpcc->mpcc_bot = NULL; + mpcc->blnd_cfg.overlap_only = false; + mpcc->blnd_cfg.global_alpha = 0xff; + mpcc->blnd_cfg.global_gain = 0xff; + mpcc->blnd_cfg.background_color_bpc = 4; + mpcc->blnd_cfg.bottom_gain_mode = 0; + mpcc->blnd_cfg.top_gain = 0x1f000; + mpcc->blnd_cfg.bottom_inside_gain = 0x1f000; + mpcc->blnd_cfg.bottom_outside_gain = 0x1f000; + mpcc->sm_cfg.enable = false; + mpcc->shared_bottom = false; +} + +static void program_gamut_remap( + struct dcn30_mpc *mpc30, + int mpcc_id, + const uint16_t *regval, + int select) +{ + uint16_t selection = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == GAMUT_REMAP_BYPASS) { + REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0, + MPCC_GAMUT_REMAP_MODE, GAMUT_REMAP_BYPASS); + return; + } + switch (select) { + case GAMUT_REMAP_COEFF: + selection = 1; + break; + /*this corresponds to GAMUT_REMAP coefficients set B + * we don't have common coefficient sets in dcn3ag/dcn3 + */ + case GAMUT_REMAP_COMA_COEFF: + selection = 2; + break; + default: + break; + } + + gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; + gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; + gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; + gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; + + + if (select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); + + cm_helper_program_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } else if (select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); + + cm_helper_program_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } + //select coefficient set to use + REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0, + MPCC_GAMUT_REMAP_MODE, selection); +} + +void mpc3_set_gamut_remap( + struct mpc *mpc, + int mpcc_id, + const struct mpc_grph_gamut_adjustment *adjust) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int i = 0; + int gamut_mode; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + program_gamut_remap(mpc30, mpcc_id, NULL, GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + //current coefficient set in use + REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); + + if (gamut_mode == 0) + gamut_mode = 1; //use coefficient set A + else if (gamut_mode == 1) + gamut_mode = 2; + else + gamut_mode = 1; + + program_gamut_remap(mpc30, mpcc_id, arr_reg_val, gamut_mode); + } +} + +static void read_gamut_remap(struct dcn30_mpc *mpc30, + int mpcc_id, + uint16_t *regval, + uint32_t *select) +{ + struct color_matrices_reg gam_regs; + + //current coefficient set in use + REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select); + + gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; + gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; + gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; + gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } + +} + +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint16_t arr_reg_val[12] = {0}; + int select; + + read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + +bool mpc3_program_3dlut( + struct mpc *mpc, + const struct tetrahedral_params *params, + int rmu_idx) +{ + enum dc_lut_mode mode; + bool is_17x17x17; + bool is_12bits_color_channel; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; + int lut_size0; + int lut_size; + + if (params == NULL) { + mpc3_set_3dlut_mode(mpc, LUT_BYPASS, false, false, rmu_idx); + return false; + } + mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true); + + mode = get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, rmu_idx); + + if (mode == LUT_BYPASS || mode == LUT_RAM_B) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + + is_17x17x17 = !params->use_tetrahedral_9; + is_12bits_color_channel = params->use_12bits; + if (is_17x17x17) { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + lut_size0 = sizeof(params->tetrahedral_17.lut0)/ + sizeof(params->tetrahedral_17.lut0[0]); + lut_size = sizeof(params->tetrahedral_17.lut1)/ + sizeof(params->tetrahedral_17.lut1[0]); + } else { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + lut_size0 = sizeof(params->tetrahedral_9.lut0)/ + sizeof(params->tetrahedral_9.lut0[0]); + lut_size = sizeof(params->tetrahedral_9.lut1)/ + sizeof(params->tetrahedral_9.lut1[0]); + } + + mpc3_select_3dlut_ram(mpc, mode, + is_12bits_color_channel, rmu_idx); + mpc3_select_3dlut_ram_mask(mpc, 0x1, rmu_idx); + if (is_12bits_color_channel) + mpc3_set3dlut_ram12(mpc, lut0, lut_size0, rmu_idx); + else + mpc3_set3dlut_ram10(mpc, lut0, lut_size0, rmu_idx); + + mpc3_select_3dlut_ram_mask(mpc, 0x2, rmu_idx); + if (is_12bits_color_channel) + mpc3_set3dlut_ram12(mpc, lut1, lut_size, rmu_idx); + else + mpc3_set3dlut_ram10(mpc, lut1, lut_size, rmu_idx); + + mpc3_select_3dlut_ram_mask(mpc, 0x4, rmu_idx); + if (is_12bits_color_channel) + mpc3_set3dlut_ram12(mpc, lut2, lut_size, rmu_idx); + else + mpc3_set3dlut_ram10(mpc, lut2, lut_size, rmu_idx); + + mpc3_select_3dlut_ram_mask(mpc, 0x8, rmu_idx); + if (is_12bits_color_channel) + mpc3_set3dlut_ram12(mpc, lut3, lut_size, rmu_idx); + else + mpc3_set3dlut_ram10(mpc, lut3, lut_size, rmu_idx); + + mpc3_set_3dlut_mode(mpc, mode, is_12bits_color_channel, + is_17x17x17, rmu_idx); + + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) + mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false); + + return true; +} + +void mpc3_set_output_csc( + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + struct color_matrices_reg ocsc_regs; + + REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); + + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + return; + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A; + ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A; + ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A; + ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A; + + if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); + } else { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); + } + cm_helper_program_color_matrices( + mpc30->base.ctx, + regval, + &ocsc_regs); +} + +void mpc3_set_ocsc_default( + struct mpc *mpc, + int opp_id, + enum dc_color_space color_space, + enum mpc_output_csc_mode ocsc_mode) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint32_t arr_size; + struct color_matrices_reg ocsc_regs; + const uint16_t *regval = NULL; + + REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + return; + + regval = find_color_matrix(color_space, &arr_size); + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A; + ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A; + ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A; + ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A; + + + if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]); + } else { + ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); + ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); + } + + cm_helper_program_color_matrices( + mpc30->base.ctx, + regval, + &ocsc_regs); +} + +void mpc3_set_rmu_mux( + struct mpc *mpc, + int rmu_idx, + int value) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (rmu_idx == 0) + REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU0_MUX, value); + else if (rmu_idx == 1) + REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU1_MUX, value); + +} + +uint32_t mpc3_get_rmu_mux_status( + struct mpc *mpc, + int rmu_idx) +{ + uint32_t status = 0xf; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + if (rmu_idx == 0) + REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &status); + else if (rmu_idx == 1) + REG_GET(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, &status); + + return status; +} + +uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx) +{ + uint32_t rmu_status; + + //determine if this mpcc is already multiplexed to an RMU unit + rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx); + if (rmu_status == mpcc_id) + //return rmu_idx of pre_acquired rmu unit + return rmu_idx; + + if (rmu_status == 0xf) {//rmu unit is disabled + mpc3_set_rmu_mux(mpc, rmu_idx, mpcc_id); + return rmu_idx; + } + + //no vacant RMU units or invalid parameters acquire_post_bldn_3dlut + return -1; +} + +static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int rmu_idx; + uint32_t rmu_status; + int released_rmu = -1; + + for (rmu_idx = 0; rmu_idx < mpc30->num_rmu; rmu_idx++) { + rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx); + if (rmu_status == mpcc_id) { + mpc3_set_rmu_mux(mpc, rmu_idx, 0xf); + released_rmu = rmu_idx; + break; + } + } + return released_rmu; + +} + +static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int mpcc_id; + + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { + if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { + REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); + REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_LOW_PWR_MODE, 3); + } + + if (mpc30->mpc_mask->MPCC_OGAM_MEM_LOW_PWR_MODE) { + for (mpcc_id = 0; mpcc_id < mpc30->num_mpcc; mpcc_id++) + REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_LOW_PWR_MODE, 3); + } + } +} + +static void mpc3_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint32_t rmu_status = 0xf; + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Color blocks state */ + REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status); + + if (rmu_status == mpcc_inst) { + REG_GET(SHAPER_CONTROL[0], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } else { + REG_GET(SHAPER_CONTROL[1], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } + + REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], + MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, + MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); +} + +static const struct mpc_funcs dcn30_mpc_funcs = { + .read_mpcc_state = mpc3_read_mpcc_state, + .insert_plane = mpc1_insert_plane, + .remove_mpcc = mpc1_remove_mpcc, + .mpc_init = mpc3_mpc_init, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, + .update_blending = mpc2_update_blending, + .cursor_lock = mpc1_cursor_lock, + .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, + .wait_for_idle = mpc2_assert_idle_mpcc, + .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, + .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, + .set_denorm = mpc3_set_denorm, + .set_denorm_clamp = mpc3_set_denorm_clamp, + .set_output_csc = mpc3_set_output_csc, + .set_ocsc_default = mpc3_set_ocsc_default, + .set_output_gamma = mpc3_set_output_gamma, + .insert_plane_to_secondary = NULL, + .remove_mpcc_from_secondary = NULL, + .set_dwb_mux = mpc3_set_dwb_mux, + .disable_dwb_mux = mpc3_disable_dwb_mux, + .is_dwb_idle = mpc3_is_dwb_idle, + .set_gamut_remap = mpc3_set_gamut_remap, + .program_shaper = mpc3_program_shaper, + .acquire_rmu = mpcc3_acquire_rmu, + .program_3dlut = mpc3_program_3dlut, + .release_rmu = mpcc3_release_rmu, + .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, + .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, + .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, +}; + +void dcn30_mpc_construct(struct dcn30_mpc *mpc30, + struct dc_context *ctx, + const struct dcn30_mpc_registers *mpc_regs, + const struct dcn30_mpc_shift *mpc_shift, + const struct dcn30_mpc_mask *mpc_mask, + int num_mpcc, + int num_rmu) +{ + int i; + + mpc30->base.ctx = ctx; + + mpc30->base.funcs = &dcn30_mpc_funcs; + + mpc30->mpc_regs = mpc_regs; + mpc30->mpc_shift = mpc_shift; + mpc30->mpc_mask = mpc_mask; + + mpc30->mpcc_in_use_mask = 0; + mpc30->num_mpcc = num_mpcc; + mpc30->num_rmu = num_rmu; + + for (i = 0; i < MAX_MPCC; i++) + mpc3_init_mpcc(&mpc30->base.mpcc_array[i], i); +} + diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h new file mode 100644 index 00000000000000..ce93003dae0113 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h @@ -0,0 +1,1098 @@ +/* Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MPCC_DCN30_H__ +#define __DC_MPCC_DCN30_H__ + +#include "dcn20/dcn20_mpc.h" + +#define MAX_RMU 3 + +#define TO_DCN30_MPC(mpc_base) \ + container_of(mpc_base, struct dcn30_mpc, base) + +#ifdef SRII_MPC_RMU +#undef SRII_MPC_RMU + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#endif + + +#define MPC_REG_LIST_DCN3_0(inst)\ + MPC_COMMON_REG_LIST_DCN1_0(inst),\ + SRII(MPCC_TOP_GAIN, MPCC, inst),\ + SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\ + SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\ + SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\ + SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \ + SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst),\ + SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst),\ + SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst),\ + SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst),\ + SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst),\ + SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst),\ + SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) + +#define MPC_OUT_MUX_REG_LIST_DCN3_0(inst) \ + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\ + SRII(CSC_MODE, MPC_OUT, inst),\ + SRII(CSC_C11_C12_A, MPC_OUT, inst),\ + SRII(CSC_C33_C34_A, MPC_OUT, inst),\ + SRII(CSC_C11_C12_B, MPC_OUT, inst),\ + SRII(CSC_C33_C34_B, MPC_OUT, inst),\ + SRII(DENORM_CONTROL, MPC_OUT, inst),\ + SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ + SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), \ + SR(MPC_OUT_CSC_COEF_FORMAT) + +#define MPC_RMU_GLOBAL_REG_LIST_DCN3AG \ + SR(MPC_RMU_CONTROL),\ + SR(MPC_RMU_MEM_PWR_CTRL) + +#define MPC_RMU_REG_LIST_DCN3AG(inst) \ + SRII(SHAPER_CONTROL, MPC_RMU, inst),\ + SRII(SHAPER_OFFSET_R, MPC_RMU, inst),\ + SRII(SHAPER_OFFSET_G, MPC_RMU, inst),\ + SRII(SHAPER_OFFSET_B, MPC_RMU, inst),\ + SRII(SHAPER_SCALE_R, MPC_RMU, inst),\ + SRII(SHAPER_SCALE_G_B, MPC_RMU, inst),\ + SRII(SHAPER_LUT_INDEX, MPC_RMU, inst),\ + SRII(SHAPER_LUT_DATA, MPC_RMU, inst),\ + SRII(SHAPER_LUT_WRITE_EN_MASK, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_START_CNTL_B, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_START_CNTL_G, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_START_CNTL_R, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_END_CNTL_B, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_END_CNTL_G, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_END_CNTL_R, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_0_1, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_2_3, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_4_5, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_6_7, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_8_9, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_10_11, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_12_13, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_14_15, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_16_17, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_18_19, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_20_21, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_22_23, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_24_25, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_26_27, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_28_29, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_30_31, MPC_RMU, inst),\ + SRII(SHAPER_RAMA_REGION_32_33, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_START_CNTL_B, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_START_CNTL_G, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_START_CNTL_R, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_END_CNTL_B, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_END_CNTL_G, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_END_CNTL_R, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_0_1, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_2_3, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_4_5, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_6_7, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_8_9, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_10_11, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_12_13, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_14_15, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_16_17, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_18_19, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_20_21, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_22_23, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_24_25, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_26_27, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_28_29, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_30_31, MPC_RMU, inst),\ + SRII(SHAPER_RAMB_REGION_32_33, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_MODE, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_INDEX, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_DATA, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_DATA_30BIT, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_READ_WRITE_CONTROL, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_OUT_NORM_FACTOR, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_OUT_OFFSET_R, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_OUT_OFFSET_G, MPC_RMU, inst),\ + SRII_MPC_RMU(3DLUT_OUT_OFFSET_B, MPC_RMU, inst) + + +#define MPC_DWB_MUX_REG_LIST_DCN3_0(inst) \ + SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst) + +#define MPC_REG_VARIABLE_LIST_DCN3_0 \ + MPC_REG_VARIABLE_LIST_DCN2_0 \ + uint32_t DWB_MUX[MAX_DWB]; \ + uint32_t MPCC_GAMUT_REMAP_COEF_FORMAT[MAX_MPCC]; \ + uint32_t MPCC_GAMUT_REMAP_MODE[MAX_MPCC]; \ + uint32_t MPC_GAMUT_REMAP_C11_C12_A[MAX_MPCC]; \ + uint32_t MPC_GAMUT_REMAP_C33_C34_A[MAX_MPCC]; \ + uint32_t MPC_GAMUT_REMAP_C11_C12_B[MAX_MPCC]; \ + uint32_t MPC_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \ + uint32_t MPC_RMU_CONTROL; \ + uint32_t MPC_RMU_MEM_PWR_CTRL; \ + uint32_t SHAPER_CONTROL[MAX_RMU]; \ + uint32_t SHAPER_OFFSET_R[MAX_RMU]; \ + uint32_t SHAPER_OFFSET_G[MAX_RMU]; \ + uint32_t SHAPER_OFFSET_B[MAX_RMU]; \ + uint32_t SHAPER_SCALE_R[MAX_RMU]; \ + uint32_t SHAPER_SCALE_G_B[MAX_RMU]; \ + uint32_t SHAPER_LUT_INDEX[MAX_RMU]; \ + uint32_t SHAPER_LUT_DATA[MAX_RMU]; \ + uint32_t SHAPER_LUT_WRITE_EN_MASK[MAX_RMU]; \ + uint32_t SHAPER_RAMA_START_CNTL_B[MAX_RMU]; \ + uint32_t SHAPER_RAMA_START_CNTL_G[MAX_RMU]; \ + uint32_t SHAPER_RAMA_START_CNTL_R[MAX_RMU]; \ + uint32_t SHAPER_RAMA_END_CNTL_B[MAX_RMU]; \ + uint32_t SHAPER_RAMA_END_CNTL_G[MAX_RMU]; \ + uint32_t SHAPER_RAMA_END_CNTL_R[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_0_1[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_2_3[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_4_5[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_6_7[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_8_9[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_10_11[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_12_13[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_14_15[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_16_17[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_18_19[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_20_21[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_22_23[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_24_25[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_26_27[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_28_29[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_30_31[MAX_RMU]; \ + uint32_t SHAPER_RAMA_REGION_32_33[MAX_RMU]; \ + uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_R[MAX_MPCC];\ + uint32_t SHAPER_RAMB_START_CNTL_B[MAX_RMU]; \ + uint32_t SHAPER_RAMB_START_CNTL_G[MAX_RMU]; \ + uint32_t SHAPER_RAMB_START_CNTL_R[MAX_RMU]; \ + uint32_t SHAPER_RAMB_END_CNTL_B[MAX_RMU]; \ + uint32_t SHAPER_RAMB_END_CNTL_G[MAX_RMU]; \ + uint32_t SHAPER_RAMB_END_CNTL_R[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_0_1[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_2_3[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_4_5[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_6_7[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_8_9[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_10_11[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_12_13[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_14_15[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_16_17[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_18_19[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_20_21[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_22_23[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_24_25[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_26_27[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_28_29[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_30_31[MAX_RMU]; \ + uint32_t SHAPER_RAMB_REGION_32_33[MAX_RMU]; \ + uint32_t RMU_3DLUT_MODE[MAX_RMU]; \ + uint32_t RMU_3DLUT_INDEX[MAX_RMU]; \ + uint32_t RMU_3DLUT_DATA[MAX_RMU]; \ + uint32_t RMU_3DLUT_DATA_30BIT[MAX_RMU]; \ + uint32_t RMU_3DLUT_READ_WRITE_CONTROL[MAX_RMU]; \ + uint32_t RMU_3DLUT_OUT_NORM_FACTOR[MAX_RMU]; \ + uint32_t RMU_3DLUT_OUT_OFFSET_R[MAX_RMU]; \ + uint32_t RMU_3DLUT_OUT_OFFSET_G[MAX_RMU]; \ + uint32_t RMU_3DLUT_OUT_OFFSET_B[MAX_RMU]; \ + uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_OGAM_LUT_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_R[MAX_MPCC]; \ + uint32_t MPC_OUT_CSC_COEF_FORMAT + +#define MPC_REG_VARIABLE_LIST_DCN32 \ + uint32_t MPCC_MOVABLE_CM_LOCATION_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_SCALE_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_SCALE_G_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_LUT_INDEX[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_LUT_DATA[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_END_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_2_3[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_4_5[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_6_7[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_8_9[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_10_11[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_12_13[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_14_15[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_16_17[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_18_19[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_20_21[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_22_23[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_24_25[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_26_27[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_28_29[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_30_31[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMA_REGION_32_33[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_END_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_2_3[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_4_5[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_6_7[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_8_9[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_10_11[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_12_13[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_14_15[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_16_17[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_18_19[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_20_21[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_22_23[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_24_25[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_26_27[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_28_29[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_30_31[MAX_MPCC]; \ + uint32_t MPCC_MCM_SHAPER_RAMB_REGION_32_33[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_MODE[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_INDEX[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_DATA[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_DATA_30BIT[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_READ_WRITE_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_OUT_NORM_FACTOR[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_3DLUT_OUT_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_LUT_INDEX[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_LUT_DATA[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_LUT_CONTROL[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL1_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_END_CNTL2_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_2_3[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_4_5[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_6_7[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_8_9[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_10_11[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_12_13[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_14_15[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_16_17[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_18_19[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_20_21[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_22_23[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_24_25[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_26_27[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_28_29[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_30_31[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMA_REGION_32_33[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL1_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_END_CNTL2_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_B[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_G[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_OFFSET_R[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_0_1[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_2_3[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_4_5[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_6_7[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_8_9[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_10_11[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_12_13[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_14_15[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_16_17[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_18_19[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_20_21[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_22_23[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_24_25[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_26_27[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_28_29[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_30_31[MAX_MPCC]; \ + uint32_t MPCC_MCM_1DLUT_RAMB_REGION_32_33[MAX_MPCC]; \ + uint32_t MPCC_MCM_MEM_PWR_CTRL[MAX_MPCC] + +#define MPC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ + SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) + + +#define MPC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ + SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ + /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) + + +#define MPC_REG_FIELD_LIST_DCN3_0(type) \ + MPC_REG_FIELD_LIST_DCN2_0(type) \ + type MPC_DWB0_MUX;\ + type MPC_DWB0_MUX_STATUS;\ + type MPC_OUT_RATE_CONTROL;\ + type MPC_OUT_RATE_CONTROL_DISABLE;\ + type MPC_OUT_FLOW_CONTROL_MODE;\ + type MPC_OUT_FLOW_CONTROL_COUNT; \ + type MPCC_GAMUT_REMAP_MODE; \ + type MPCC_GAMUT_REMAP_MODE_CURRENT;\ + type MPCC_GAMUT_REMAP_COEF_FORMAT; \ + type MPCC_GAMUT_REMAP_C11_A; \ + type MPCC_GAMUT_REMAP_C12_A; \ + type MPC_RMU0_MUX; \ + type MPC_RMU1_MUX; \ + type MPC_RMU0_MUX_STATUS; \ + type MPC_RMU1_MUX_STATUS; \ + type MPC_RMU0_MEM_PWR_FORCE;\ + type MPC_RMU0_MEM_PWR_DIS;\ + type MPC_RMU0_MEM_LOW_PWR_MODE;\ + type MPC_RMU0_SHAPER_MEM_PWR_STATE;\ + type MPC_RMU0_3DLUT_MEM_PWR_STATE;\ + type MPC_RMU1_MEM_PWR_FORCE;\ + type MPC_RMU1_MEM_PWR_DIS;\ + type MPC_RMU1_MEM_LOW_PWR_MODE;\ + type MPC_RMU1_SHAPER_MEM_PWR_STATE;\ + type MPC_RMU1_3DLUT_MEM_PWR_STATE;\ + type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \ + type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\ + type MPCC_OGAM_RAMA_OFFSET_B;\ + type MPCC_OGAM_RAMA_OFFSET_G;\ + type MPCC_OGAM_RAMA_OFFSET_R;\ + type MPCC_OGAM_SELECT; \ + type MPCC_OGAM_PWL_DISABLE; \ + type MPCC_OGAM_MODE_CURRENT; \ + type MPCC_OGAM_SELECT_CURRENT; \ + type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \ + type MPCC_OGAM_LUT_READ_COLOR_SEL; \ + type MPCC_OGAM_LUT_READ_DBG; \ + type MPCC_OGAM_LUT_HOST_SEL; \ + type MPCC_OGAM_LUT_CONFIG_MODE; \ + type MPCC_OGAM_LUT_STATUS; \ + type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\ + type MPCC_OGAM_MEM_LOW_PWR_MODE;\ + type MPCC_OGAM_MEM_PWR_STATE;\ + type MPC_RMU_3DLUT_MODE; \ + type MPC_RMU_3DLUT_SIZE; \ + type MPC_RMU_3DLUT_MODE_CURRENT; \ + type MPC_RMU_3DLUT_WRITE_EN_MASK;\ + type MPC_RMU_3DLUT_RAM_SEL;\ + type MPC_RMU_3DLUT_30BIT_EN;\ + type MPC_RMU_3DLUT_CONFIG_STATUS;\ + type MPC_RMU_3DLUT_READ_SEL;\ + type MPC_RMU_3DLUT_INDEX;\ + type MPC_RMU_3DLUT_DATA0;\ + type MPC_RMU_3DLUT_DATA1;\ + type MPC_RMU_3DLUT_DATA_30BIT;\ + type MPC_RMU_SHAPER_LUT_MODE;\ + type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\ + type MPC_RMU_SHAPER_OFFSET_R;\ + type MPC_RMU_SHAPER_OFFSET_G;\ + type MPC_RMU_SHAPER_OFFSET_B;\ + type MPC_RMU_SHAPER_SCALE_R;\ + type MPC_RMU_SHAPER_SCALE_G;\ + type MPC_RMU_SHAPER_SCALE_B;\ + type MPC_RMU_SHAPER_LUT_INDEX;\ + type MPC_RMU_SHAPER_LUT_DATA;\ + type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\ + type MPC_RMU_SHAPER_LUT_WRITE_SEL;\ + type MPC_RMU_SHAPER_CONFIG_STATUS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_MODE_CURRENT + +#define MPC_REG_FIELD_LIST_DCN32(type) \ + type MPCC_MOVABLE_CM_LOCATION_CNTL;\ + type MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT;\ + type MPCC_MCM_SHAPER_MEM_PWR_FORCE;\ + type MPCC_MCM_SHAPER_MEM_PWR_DIS;\ + type MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE;\ + type MPCC_MCM_3DLUT_MEM_PWR_FORCE;\ + type MPCC_MCM_3DLUT_MEM_PWR_DIS;\ + type MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE;\ + type MPCC_MCM_1DLUT_MEM_PWR_FORCE;\ + type MPCC_MCM_1DLUT_MEM_PWR_DIS;\ + type MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE;\ + type MPCC_MCM_SHAPER_MEM_PWR_STATE;\ + type MPCC_MCM_3DLUT_MEM_PWR_STATE;\ + type MPCC_MCM_1DLUT_MEM_PWR_STATE;\ + type MPCC_MCM_3DLUT_MODE; \ + type MPCC_MCM_3DLUT_SIZE; \ + type MPCC_MCM_3DLUT_MODE_CURRENT; \ + type MPCC_MCM_3DLUT_WRITE_EN_MASK;\ + type MPCC_MCM_3DLUT_RAM_SEL;\ + type MPCC_MCM_3DLUT_30BIT_EN;\ + type MPCC_MCM_3DLUT_CONFIG_STATUS;\ + type MPCC_MCM_3DLUT_READ_SEL;\ + type MPCC_MCM_3DLUT_INDEX;\ + type MPCC_MCM_3DLUT_DATA0;\ + type MPCC_MCM_3DLUT_DATA1;\ + type MPCC_MCM_3DLUT_DATA_30BIT;\ + type MPCC_MCM_SHAPER_LUT_MODE;\ + type MPCC_MCM_SHAPER_MODE_CURRENT;\ + type MPCC_MCM_SHAPER_OFFSET_R;\ + type MPCC_MCM_SHAPER_OFFSET_G;\ + type MPCC_MCM_SHAPER_OFFSET_B;\ + type MPCC_MCM_SHAPER_SCALE_R;\ + type MPCC_MCM_SHAPER_SCALE_G;\ + type MPCC_MCM_SHAPER_SCALE_B;\ + type MPCC_MCM_SHAPER_LUT_INDEX;\ + type MPCC_MCM_SHAPER_LUT_DATA;\ + type MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK;\ + type MPCC_MCM_SHAPER_LUT_WRITE_SEL;\ + type MPCC_MCM_SHAPER_CONFIG_STATUS;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type MPCC_MCM_1DLUT_MODE;\ + type MPCC_MCM_1DLUT_SELECT;\ + type MPCC_MCM_1DLUT_PWL_DISABLE;\ + type MPCC_MCM_1DLUT_MODE_CURRENT;\ + type MPCC_MCM_1DLUT_SELECT_CURRENT;\ + type MPCC_MCM_1DLUT_LUT_INDEX;\ + type MPCC_MCM_1DLUT_LUT_DATA;\ + type MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK;\ + type MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL;\ + type MPCC_MCM_1DLUT_LUT_HOST_SEL;\ + type MPCC_MCM_1DLUT_LUT_CONFIG_MODE;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B;\ + type MPCC_MCM_1DLUT_RAMA_OFFSET_B;\ + type MPCC_MCM_1DLUT_RAMA_OFFSET_G;\ + type MPCC_MCM_1DLUT_RAMA_OFFSET_R;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS + + +#define MPC_COMMON_MASK_SH_LIST_DCN303(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ + SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ + /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) + +#define MPC_REG_FIELD_LIST_DCN3_03(type) \ + MPC_REG_FIELD_LIST_DCN2_0(type) \ + type MPC_DWB0_MUX;\ + type MPC_DWB0_MUX_STATUS;\ + type MPC_OUT_RATE_CONTROL;\ + type MPC_OUT_RATE_CONTROL_DISABLE;\ + type MPC_OUT_FLOW_CONTROL_MODE;\ + type MPC_OUT_FLOW_CONTROL_COUNT; \ + type MPCC_GAMUT_REMAP_MODE; \ + type MPCC_GAMUT_REMAP_MODE_CURRENT;\ + type MPCC_GAMUT_REMAP_COEF_FORMAT; \ + type MPCC_GAMUT_REMAP_C11_A; \ + type MPCC_GAMUT_REMAP_C12_A; \ + type MPC_RMU0_MUX; \ + type MPC_RMU0_MUX_STATUS; \ + type MPC_RMU0_MEM_PWR_FORCE;\ + type MPC_RMU0_MEM_PWR_DIS;\ + type MPC_RMU0_MEM_LOW_PWR_MODE;\ + type MPC_RMU0_SHAPER_MEM_PWR_STATE;\ + type MPC_RMU0_3DLUT_MEM_PWR_STATE;\ + type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \ + type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\ + type MPCC_OGAM_RAMA_OFFSET_B;\ + type MPCC_OGAM_RAMA_OFFSET_G;\ + type MPCC_OGAM_RAMA_OFFSET_R;\ + type MPCC_OGAM_SELECT; \ + type MPCC_OGAM_PWL_DISABLE; \ + type MPCC_OGAM_MODE_CURRENT; \ + type MPCC_OGAM_SELECT_CURRENT; \ + type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \ + type MPCC_OGAM_LUT_READ_COLOR_SEL; \ + type MPCC_OGAM_LUT_READ_DBG; \ + type MPCC_OGAM_LUT_HOST_SEL; \ + type MPCC_OGAM_LUT_CONFIG_MODE; \ + type MPCC_OGAM_LUT_STATUS; \ + type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\ + type MPCC_OGAM_MEM_LOW_PWR_MODE;\ + type MPCC_OGAM_MEM_PWR_STATE;\ + type MPC_RMU_3DLUT_MODE; \ + type MPC_RMU_3DLUT_SIZE; \ + type MPC_RMU_3DLUT_MODE_CURRENT; \ + type MPC_RMU_3DLUT_WRITE_EN_MASK;\ + type MPC_RMU_3DLUT_RAM_SEL;\ + type MPC_RMU_3DLUT_30BIT_EN;\ + type MPC_RMU_3DLUT_CONFIG_STATUS;\ + type MPC_RMU_3DLUT_READ_SEL;\ + type MPC_RMU_3DLUT_INDEX;\ + type MPC_RMU_3DLUT_DATA0;\ + type MPC_RMU_3DLUT_DATA1;\ + type MPC_RMU_3DLUT_DATA_30BIT;\ + type MPC_RMU_SHAPER_LUT_MODE;\ + type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\ + type MPC_RMU_SHAPER_OFFSET_R;\ + type MPC_RMU_SHAPER_OFFSET_G;\ + type MPC_RMU_SHAPER_OFFSET_B;\ + type MPC_RMU_SHAPER_SCALE_R;\ + type MPC_RMU_SHAPER_SCALE_G;\ + type MPC_RMU_SHAPER_SCALE_B;\ + type MPC_RMU_SHAPER_LUT_INDEX;\ + type MPC_RMU_SHAPER_LUT_DATA;\ + type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\ + type MPC_RMU_SHAPER_LUT_WRITE_SEL;\ + type MPC_RMU_SHAPER_CONFIG_STATUS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_MODE_CURRENT + +struct dcn30_mpc_registers { + MPC_REG_VARIABLE_LIST_DCN3_0; + MPC_REG_VARIABLE_LIST_DCN32; +}; + +struct dcn30_mpc_shift { + MPC_REG_FIELD_LIST_DCN3_0(uint8_t); + MPC_REG_FIELD_LIST_DCN32(uint8_t); +}; + +struct dcn30_mpc_mask { + MPC_REG_FIELD_LIST_DCN3_0(uint32_t); + MPC_REG_FIELD_LIST_DCN32(uint32_t); +}; + +struct dcn30_mpc { + struct mpc base; + + int mpcc_in_use_mask; + int num_mpcc; + const struct dcn30_mpc_registers *mpc_regs; + const struct dcn30_mpc_shift *mpc_shift; + const struct dcn30_mpc_mask *mpc_mask; + int num_rmu; +}; + +void dcn30_mpc_construct(struct dcn30_mpc *mpc30, + struct dc_context *ctx, + const struct dcn30_mpc_registers *mpc_regs, + const struct dcn30_mpc_shift *mpc_shift, + const struct dcn30_mpc_mask *mpc_mask, + int num_mpcc, + int num_rmu); + +void mpc3_mpc_init( + struct mpc *mpc); + +void mpc3_mpc_init_single_inst( + struct mpc *mpc, + unsigned int mpcc_id); + +bool mpc3_program_shaper( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t rmu_idx); + +bool mpc3_program_3dlut( + struct mpc *mpc, + const struct tetrahedral_params *params, + int rmu_idx); + +uint32_t mpcc3_acquire_rmu(struct mpc *mpc, + int mpcc_id, int rmu_idx); + +void mpc3_set_denorm( + struct mpc *mpc, + int opp_id, + enum dc_color_depth output_depth); + +void mpc3_set_denorm_clamp( + struct mpc *mpc, + int opp_id, + struct mpc_denorm_clamp denorm_clamp); + +void mpc3_set_output_csc( + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode); + +void mpc3_set_ocsc_default( + struct mpc *mpc, + int opp_id, + enum dc_color_space color_space, + enum mpc_output_csc_mode ocsc_mode); + +void mpc3_set_output_gamma( + struct mpc *mpc, + int mpcc_id, + const struct pwl_params *params); + +uint32_t mpc3_get_rmu_mux_status( + struct mpc *mpc, + int rmu_idx); + +void mpc3_set_gamut_remap( + struct mpc *mpc, + int mpcc_id, + const struct mpc_grph_gamut_adjustment *adjust); + +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust); + +void mpc3_set_rmu_mux( + struct mpc *mpc, + int rmu_idx, + int value); + +void mpc3_set_dwb_mux( + struct mpc *mpc, + int dwb_id, + int mpcc_id); + +void mpc3_disable_dwb_mux( + struct mpc *mpc, + int dwb_id); + +bool mpc3_is_dwb_idle( + struct mpc *mpc, + int dwb_id); + +void mpc3_power_on_ogam_lut( + struct mpc *mpc, int mpcc_id, + bool power_on); + +void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); + +enum dc_lut_mode mpc3_get_ogam_current( + struct mpc *mpc, + int mpcc_id); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/Makefile b/drivers/gpu/drm/amd/display/dc/opp/Makefile index fbfb3c3ad81923..1be76754db3000 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/Makefile +++ b/drivers/gpu/drm/amd/display/dc/opp/Makefile @@ -25,6 +25,22 @@ ifdef CONFIG_DRM_AMD_DC_FP ############################################################################### +# DCN10 +############################################################################### +OPP_DCN10 = dcn10_opp.o + +AMD_DAL_OPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/opp/dcn10/,$(OPP_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN10) +############################################################################### +# DCN20 +############################################################################### +OPP_DCN20 = dcn20_opp.o + +AMD_DAL_OPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/opp/dcn20/,$(OPP_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN20) +############################################################################### # DCN35 ############################################################################### OPP_DCN35 = dcn35_opp.o diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c new file mode 100644 index 00000000000000..71e9288d60ed7c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c @@ -0,0 +1,413 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dm_services.h" +#include "dcn10_opp.h" +#include "reg_helper.h" + +#define REG(reg) \ + (oppn10->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + oppn10->opp_shift->field_name, oppn10->opp_mask->field_name + +#define CTX \ + oppn10->base.ctx + +/** + * opp1_set_truncation(): + * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp + * 2) enable truncation + * 3) HW remove 12bit FMT support for DCE11 power saving reason. + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to bit_depth_reduction_params. + */ +static void opp1_set_truncation( + struct dcn10_opp *oppn10, + const struct bit_depth_reduction_params *params) +{ + REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, + FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED, + FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH, + FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE); +} + +static void opp1_set_spatial_dither( + struct dcn10_opp *oppn10, + const struct bit_depth_reduction_params *params) +{ + /*Disable spatial (random) dithering*/ + REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL, + FMT_SPATIAL_DITHER_EN, 0, + FMT_SPATIAL_DITHER_MODE, 0, + FMT_SPATIAL_DITHER_DEPTH, 0, + FMT_TEMPORAL_DITHER_EN, 0, + FMT_HIGHPASS_RANDOM_ENABLE, 0, + FMT_FRAME_RANDOM_ENABLE, 0, + FMT_RGB_RANDOM_ENABLE, 0); + + + /* only use FRAME_COUNTER_MAX if frameRandom == 1*/ + if (params->flags.FRAME_RANDOM == 1) { + if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) { + REG_UPDATE_2(FMT_CONTROL, + FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15, + FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2); + } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) { + REG_UPDATE_2(FMT_CONTROL, + FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3, + FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1); + } else { + return; + } + } else { + REG_UPDATE_2(FMT_CONTROL, + FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0, + FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0); + } + + /*Set seed for random values for + * spatial dithering for R,G,B channels*/ + + REG_SET(FMT_DITHER_RAND_R_SEED, 0, + FMT_RAND_R_SEED, params->r_seed_value); + + REG_SET(FMT_DITHER_RAND_G_SEED, 0, + FMT_RAND_G_SEED, params->g_seed_value); + + REG_SET(FMT_DITHER_RAND_B_SEED, 0, + FMT_RAND_B_SEED, params->b_seed_value); + + /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero + * offset for the R/Cr channel, lower 4LSB + * is forced to zeros. Typically set to 0 + * RGB and 0x80000 YCbCr. + */ + /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero + * offset for the G/Y channel, lower 4LSB is + * forced to zeros. Typically set to 0 RGB + * and 0x80000 YCbCr. + */ + /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero + * offset for the B/Cb channel, lower 4LSB is + * forced to zeros. Typically set to 0 RGB and + * 0x80000 YCbCr. + */ + + REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL, + /*Enable spatial dithering*/ + FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED, + /* Set spatial dithering mode + * (default is Seed patterrn AAAA...) + */ + FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE, + /*Set spatial dithering bit depth*/ + FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH, + /*Disable High pass filter*/ + FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM, + /*Reset only at startup*/ + FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM, + /*Set RGB data dithered with x^28+x^3+1*/ + FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM); +} + +void opp1_program_bit_depth_reduction( + struct output_pixel_processor *opp, + const struct bit_depth_reduction_params *params) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + opp1_set_truncation(oppn10, params); + opp1_set_spatial_dither(oppn10, params); + /* TODO + * set_temporal_dither(oppn10, params); + */ +} + +/** + * opp1_set_pixel_encoding(): + * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly + * 1: YCbCr 4:2:2 + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to clamping_and_pixel_encoding_params. + */ +static void opp1_set_pixel_encoding( + struct dcn10_opp *oppn10, + const struct clamping_and_pixel_encoding_params *params) +{ + bool force_chroma_subsampling_1tap = + oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap; + + switch (params->pixel_encoding) { + + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_SUBSAMPLING_MODE, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); + REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); + break; + case PIXEL_ENCODING_YCBCR422: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); + break; + case PIXEL_ENCODING_YCBCR420: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); + break; + default: + break; + } + + if (force_chroma_subsampling_1tap) + REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0); +} + +/** + * opp1_set_clamping(): + * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) + * 1 for 8 bpc + * 2 for 10 bpc + * 3 for 12 bpc + * 7 for programable + * 2) Enable clamp if Limited range requested + * + * @oppn10: output_pixel_processor struct instance for dcn10. + * @params: pointer to clamping_and_pixel_encoding_params. + */ +static void opp1_set_clamping( + struct dcn10_opp *oppn10, + const struct clamping_and_pixel_encoding_params *params) +{ + REG_UPDATE_2(FMT_CLAMP_CNTL, + FMT_CLAMP_DATA_EN, 0, + FMT_CLAMP_COLOR_FORMAT, 0); + + switch (params->clamping_level) { + case CLAMPING_FULL_RANGE: + REG_UPDATE_2(FMT_CLAMP_CNTL, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 0); + break; + case CLAMPING_LIMITED_RANGE_8BPC: + REG_UPDATE_2(FMT_CLAMP_CNTL, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 1); + break; + case CLAMPING_LIMITED_RANGE_10BPC: + REG_UPDATE_2(FMT_CLAMP_CNTL, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 2); + + break; + case CLAMPING_LIMITED_RANGE_12BPC: + REG_UPDATE_2(FMT_CLAMP_CNTL, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 3); + break; + case CLAMPING_LIMITED_RANGE_PROGRAMMABLE: + /* TODO */ + default: + break; + } + +} + +void opp1_set_dyn_expansion( + struct output_pixel_processor *opp, + enum dc_color_space color_sp, + enum dc_color_depth color_dpth, + enum signal_type signal) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, + FMT_DYNAMIC_EXP_EN, 0, + FMT_DYNAMIC_EXP_MODE, 0); + + if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) + return; + + /*00 - 10-bit -> 12-bit dynamic expansion*/ + /*01 - 8-bit -> 12-bit dynamic expansion*/ + if (signal == SIGNAL_TYPE_HDMI_TYPE_A || + signal == SIGNAL_TYPE_DISPLAY_PORT || + signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + signal == SIGNAL_TYPE_VIRTUAL) { + switch (color_dpth) { + case COLOR_DEPTH_888: + REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, + FMT_DYNAMIC_EXP_EN, 1, + FMT_DYNAMIC_EXP_MODE, 1); + break; + case COLOR_DEPTH_101010: + REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, + FMT_DYNAMIC_EXP_EN, 1, + FMT_DYNAMIC_EXP_MODE, 0); + break; + case COLOR_DEPTH_121212: + REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL, + FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/ + FMT_DYNAMIC_EXP_MODE, 0); + break; + default: + break; + } + } +} + +static void opp1_program_clamping_and_pixel_encoding( + struct output_pixel_processor *opp, + const struct clamping_and_pixel_encoding_params *params) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + opp1_set_clamping(oppn10, params); + opp1_set_pixel_encoding(oppn10, params); +} + +void opp1_program_fmt( + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) + REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0); + + /* dithering is affected by , hence should be + * programmed afterwards */ + opp1_program_bit_depth_reduction( + opp, + fmt_bit_depth); + + opp1_program_clamping_and_pixel_encoding( + opp, + clamping); + + return; +} + +void opp1_program_stereo( + struct output_pixel_processor *opp, + bool enable, + const struct dc_crtc_timing *timing) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right; + uint32_t space1_size = timing->v_total - timing->v_addressable; + /* TODO: confirm computation of space2_size */ + uint32_t space2_size = timing->v_total - timing->v_addressable; + + if (!enable) { + active_width = 0; + space1_size = 0; + space2_size = 0; + } + + /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */ + REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0); + + REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width); + + /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers + * In 3D progressive frames, Vactive space happens only in between the 2 frames, + * so only need to program OPPBUF_3D_VACT_SPACE1_SIZE + * In 3D alternative frames, left and right frames, top and bottom field. + */ + if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE) + REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size); + else + REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size); + + /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */ + /* + REG_UPDATE(OPPBUF_3D_PARAMETERS_0, + OPPBUF_DUMMY_DATA_R, data_r); + REG_UPDATE(OPPBUF_3D_PARAMETERS_1, + OPPBUF_DUMMY_DATA_G, data_g); + REG_UPDATE(OPPBUF_3D_PARAMETERS_1, + OPPBUF_DUMMY_DATA_B, _data_b); + */ +} + +void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + uint32_t regval = enable ? 1 : 0; + + REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval); +} + +/*****************************************/ +/* Constructor, Destructor */ +/*****************************************/ + +void opp1_destroy(struct output_pixel_processor **opp) +{ + kfree(TO_DCN10_OPP(*opp)); + *opp = NULL; +} + +static const struct opp_funcs dcn10_opp_funcs = { + .opp_set_dyn_expansion = opp1_set_dyn_expansion, + .opp_program_fmt = opp1_program_fmt, + .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, + .opp_program_stereo = opp1_program_stereo, + .opp_pipe_clock_control = opp1_pipe_clock_control, + .opp_set_disp_pattern_generator = NULL, + .opp_program_dpg_dimensions = NULL, + .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, + .opp_destroy = opp1_destroy +}; + +void dcn10_opp_construct(struct dcn10_opp *oppn10, + struct dc_context *ctx, + uint32_t inst, + const struct dcn10_opp_registers *regs, + const struct dcn10_opp_shift *opp_shift, + const struct dcn10_opp_mask *opp_mask) +{ + + oppn10->base.ctx = ctx; + oppn10->base.inst = inst; + oppn10->base.funcs = &dcn10_opp_funcs; + + oppn10->regs = regs; + oppn10->opp_shift = opp_shift; + oppn10->opp_mask = opp_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h new file mode 100644 index 00000000000000..c87de68a509e49 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h @@ -0,0 +1,191 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPP_DCN10_H__ +#define __DC_OPP_DCN10_H__ + +#include "opp.h" + +#define TO_DCN10_OPP(opp)\ + container_of(opp, struct dcn10_opp, base) + +#define OPP_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define OPP_REG_LIST_DCN(id) \ + SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \ + SRI(FMT_CONTROL, FMT, id), \ + SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \ + SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \ + SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \ + SRI(FMT_CLAMP_CNTL, FMT, id), \ + SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \ + SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \ + SRI(OPPBUF_CONTROL, OPPBUF, id),\ + SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \ + SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \ + SRI(OPP_PIPE_CONTROL, OPP_PIPE, id) + +#define OPP_REG_LIST_DCN10(id) \ + OPP_REG_LIST_DCN(id) + +#define OPP_COMMON_REG_VARIABLE_LIST \ + uint32_t FMT_BIT_DEPTH_CONTROL; \ + uint32_t FMT_CONTROL; \ + uint32_t FMT_DITHER_RAND_R_SEED; \ + uint32_t FMT_DITHER_RAND_G_SEED; \ + uint32_t FMT_DITHER_RAND_B_SEED; \ + uint32_t FMT_CLAMP_CNTL; \ + uint32_t FMT_DYNAMIC_EXP_CNTL; \ + uint32_t FMT_MAP420_MEMORY_CONTROL; \ + uint32_t OPPBUF_CONTROL; \ + uint32_t OPPBUF_CONTROL1; \ + uint32_t OPPBUF_3D_PARAMETERS_0; \ + uint32_t OPPBUF_3D_PARAMETERS_1; \ + uint32_t OPP_PIPE_CONTROL + +#define OPP_MASK_SH_LIST_DCN(mask_sh) \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh), \ + OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ + OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ + OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ + OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh), \ + OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh), \ + OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \ + OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \ + OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh), \ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\ + OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \ + OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh), \ + OPP_SF(OPP_PIPE0_OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh) + +#define OPP_MASK_SH_LIST_DCN10(mask_sh) \ + OPP_MASK_SH_LIST_DCN(mask_sh), \ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh) + +#define OPP_DCN10_REG_FIELD_LIST(type) \ + type FMT_TRUNCATE_EN; \ + type FMT_TRUNCATE_DEPTH; \ + type FMT_TRUNCATE_MODE; \ + type FMT_SPATIAL_DITHER_EN; \ + type FMT_SPATIAL_DITHER_MODE; \ + type FMT_SPATIAL_DITHER_DEPTH; \ + type FMT_TEMPORAL_DITHER_EN; \ + type FMT_HIGHPASS_RANDOM_ENABLE; \ + type FMT_FRAME_RANDOM_ENABLE; \ + type FMT_RGB_RANDOM_ENABLE; \ + type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \ + type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \ + type FMT_RAND_R_SEED; \ + type FMT_RAND_G_SEED; \ + type FMT_RAND_B_SEED; \ + type FMT_PIXEL_ENCODING; \ + type FMT_SUBSAMPLING_MODE; \ + type FMT_CBCR_BIT_REDUCTION_BYPASS; \ + type FMT_CLAMP_DATA_EN; \ + type FMT_CLAMP_COLOR_FORMAT; \ + type FMT_DYNAMIC_EXP_EN; \ + type FMT_DYNAMIC_EXP_MODE; \ + type FMT_MAP420MEM_PWR_FORCE; \ + type FMT_STEREOSYNC_OVERRIDE; \ + type OPPBUF_ACTIVE_WIDTH;\ + type OPPBUF_PIXEL_REPETITION;\ + type OPPBUF_DISPLAY_SEGMENTATION;\ + type OPPBUF_OVERLAP_PIXEL_NUM;\ + type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \ + type OPPBUF_3D_VACT_SPACE1_SIZE; \ + type OPPBUF_3D_VACT_SPACE2_SIZE; \ + type OPP_PIPE_CLOCK_EN + +struct dcn10_opp_registers { + OPP_COMMON_REG_VARIABLE_LIST; +}; + +struct dcn10_opp_shift { + OPP_DCN10_REG_FIELD_LIST(uint8_t); +}; + +struct dcn10_opp_mask { + OPP_DCN10_REG_FIELD_LIST(uint32_t); +}; + +struct dcn10_opp { + struct output_pixel_processor base; + + const struct dcn10_opp_registers *regs; + const struct dcn10_opp_shift *opp_shift; + const struct dcn10_opp_mask *opp_mask; + + bool is_write_to_ram_a_safe; +}; + +void dcn10_opp_construct(struct dcn10_opp *oppn10, + struct dc_context *ctx, + uint32_t inst, + const struct dcn10_opp_registers *regs, + const struct dcn10_opp_shift *opp_shift, + const struct dcn10_opp_mask *opp_mask); + +void opp1_set_dyn_expansion( + struct output_pixel_processor *opp, + enum dc_color_space color_sp, + enum dc_color_depth color_dpth, + enum signal_type signal); + +void opp1_program_fmt( + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping); + +void opp1_program_bit_depth_reduction( + struct output_pixel_processor *opp, + const struct bit_depth_reduction_params *params); + +void opp1_program_stereo( + struct output_pixel_processor *opp, + bool enable, + const struct dc_crtc_timing *timing); + +void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable); + +void opp1_destroy(struct output_pixel_processor **opp); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c new file mode 100644 index 00000000000000..f5fe0cac7cb064 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c @@ -0,0 +1,415 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dm_services.h" +#include "dcn20_opp.h" +#include "reg_helper.h" + +#define REG(reg) \ + (oppn20->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + oppn20->opp_shift->field_name, oppn20->opp_mask->field_name + +#define CTX \ + oppn20->base.ctx + + +void opp2_set_disp_pattern_generator( + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + const struct tg_color *solid_color, + int width, + int height, + int offset) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + enum test_pattern_color_format bit_depth; + enum test_pattern_dyn_range dyn_range; + enum test_pattern_mode mode; + + /* color ramp generator mixes 16-bits color */ + uint32_t src_bpc = 16; + /* requested bpc */ + uint32_t dst_bpc; + uint32_t index; + /* RGB values of the color bars. + * Produce two RGB colors: RGB0 - white (all Fs) + * and RGB1 - black (all 0s) + * (three RGB components for two colors) + */ + uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, + 0x0000, 0x0000}; + /* dest color (converted to the specified color format) */ + uint16_t dst_color[6]; + uint32_t inc_base; + + /* translate to bit depth */ + switch (color_depth) { + case COLOR_DEPTH_666: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; + break; + case COLOR_DEPTH_888: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; + break; + case COLOR_DEPTH_101010: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; + break; + case COLOR_DEPTH_121212: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; + break; + default: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; + break; + } + + /* set DPG dimentions */ + REG_SET_2(DPG_DIMENSIONS, 0, + DPG_ACTIVE_WIDTH, width, + DPG_ACTIVE_HEIGHT, height); + + /* set DPG offset */ + REG_SET_2(DPG_OFFSET_SEGMENT, 0, + DPG_X_OFFSET, offset, + DPG_SEGMENT_WIDTH, 0); + + switch (test_pattern) { + case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: + case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: + { + dyn_range = (test_pattern == + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? + TEST_PATTERN_DYN_RANGE_CEA : + TEST_PATTERN_DYN_RANGE_VESA); + + switch (color_space) { + case CONTROLLER_DP_COLOR_SPACE_YCBCR601: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; + break; + case CONTROLLER_DP_COLOR_SPACE_YCBCR709: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; + break; + case CONTROLLER_DP_COLOR_SPACE_RGB: + default: + mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; + break; + } + + REG_UPDATE_6(DPG_CONTROL, + DPG_EN, 1, + DPG_MODE, mode, + DPG_DYNAMIC_RANGE, dyn_range, + DPG_BIT_DEPTH, bit_depth, + DPG_VRES, 6, + DPG_HRES, 6); + } + break; + + case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: + case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: + { + mode = (test_pattern == + CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? + TEST_PATTERN_MODE_VERTICALBARS : + TEST_PATTERN_MODE_HORIZONTALBARS); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + dst_bpc = 6; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + dst_bpc = 8; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + dst_bpc = 10; + break; + default: + dst_bpc = 8; + break; + } + + /* adjust color to the required colorFormat */ + for (index = 0; index < 6; index++) { + /* dst = 2^dstBpc * src / 2^srcBpc = src >> + * (srcBpc - dstBpc); + */ + dst_color[index] = + src_color[index] >> (src_bpc - dst_bpc); + /* DPG_COLOUR registers are 16-bit MSB aligned value with bits 3:0 hardwired to ZERO. + * XXXXXXXXXX000000 for 10 bit, + * XXXXXXXX00000000 for 8 bit, + * XXXXXX0000000000 for 6 bits + */ + dst_color[index] <<= (16 - dst_bpc); + } + + REG_SET_2(DPG_COLOUR_R_CR, 0, + DPG_COLOUR1_R_CR, dst_color[0], + DPG_COLOUR0_R_CR, dst_color[3]); + REG_SET_2(DPG_COLOUR_G_Y, 0, + DPG_COLOUR1_G_Y, dst_color[1], + DPG_COLOUR0_G_Y, dst_color[4]); + REG_SET_2(DPG_COLOUR_B_CB, 0, + DPG_COLOUR1_B_CB, dst_color[2], + DPG_COLOUR0_B_CB, dst_color[5]); + + /* enable test pattern */ + REG_UPDATE_6(DPG_CONTROL, + DPG_EN, 1, + DPG_MODE, mode, + DPG_DYNAMIC_RANGE, 0, + DPG_BIT_DEPTH, bit_depth, + DPG_VRES, 0, + DPG_HRES, 0); + } + break; + + case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: + { + mode = (bit_depth == + TEST_PATTERN_COLOR_FORMAT_BPC_10 ? + TEST_PATTERN_MODE_DUALRAMP_RGB : + TEST_PATTERN_MODE_SINGLERAMP_RGB); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + dst_bpc = 6; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + dst_bpc = 8; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + dst_bpc = 10; + break; + default: + dst_bpc = 8; + break; + } + + /* increment for the first ramp for one color gradation + * 1 gradation for 6-bit color is 2^10 + * gradations in 16-bit color + */ + inc_base = (src_bpc - dst_bpc); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + { + REG_SET_3(DPG_RAMP_CONTROL, 0, + DPG_RAMP0_OFFSET, 0, + DPG_INC0, inc_base, + DPG_INC1, 0); + REG_UPDATE_2(DPG_CONTROL, + DPG_VRES, 6, + DPG_HRES, 6); + } + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + { + REG_SET_3(DPG_RAMP_CONTROL, 0, + DPG_RAMP0_OFFSET, 0, + DPG_INC0, inc_base, + DPG_INC1, 0); + REG_UPDATE_2(DPG_CONTROL, + DPG_VRES, 6, + DPG_HRES, 8); + } + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + { + REG_SET_3(DPG_RAMP_CONTROL, 0, + DPG_RAMP0_OFFSET, 384 << 6, + DPG_INC0, inc_base, + DPG_INC1, inc_base + 2); + REG_UPDATE_2(DPG_CONTROL, + DPG_VRES, 5, + DPG_HRES, 8); + } + break; + default: + break; + } + + /* enable test pattern */ + REG_UPDATE_4(DPG_CONTROL, + DPG_EN, 1, + DPG_MODE, mode, + DPG_DYNAMIC_RANGE, 0, + DPG_BIT_DEPTH, bit_depth); + } + break; + case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: + { + REG_WRITE(DPG_CONTROL, 0); + REG_WRITE(DPG_COLOUR_R_CR, 0); + REG_WRITE(DPG_COLOUR_G_Y, 0); + REG_WRITE(DPG_COLOUR_B_CB, 0); + REG_WRITE(DPG_RAMP_CONTROL, 0); + } + break; + case CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR: + { + opp2_dpg_set_blank_color(opp, solid_color); + REG_UPDATE_2(DPG_CONTROL, + DPG_EN, 1, + DPG_MODE, TEST_PATTERN_MODE_HORIZONTALBARS); + + REG_SET_2(DPG_DIMENSIONS, 0, + DPG_ACTIVE_WIDTH, width, + DPG_ACTIVE_HEIGHT, height); + } + break; + default: + break; + + } +} + +void opp2_program_dpg_dimensions( + struct output_pixel_processor *opp, + int width, int height) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + REG_SET_2(DPG_DIMENSIONS, 0, + DPG_ACTIVE_WIDTH, width, + DPG_ACTIVE_HEIGHT, height); +} + +void opp2_dpg_set_blank_color( + struct output_pixel_processor *opp, + const struct tg_color *color) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + /* 16-bit MSB aligned value. Bits 3:0 of this field are hardwired to ZERO */ + ASSERT(color); + REG_SET_2(DPG_COLOUR_B_CB, 0, + DPG_COLOUR1_B_CB, color->color_b_cb << 6, + DPG_COLOUR0_B_CB, color->color_b_cb << 6); + REG_SET_2(DPG_COLOUR_G_Y, 0, + DPG_COLOUR1_G_Y, color->color_g_y << 6, + DPG_COLOUR0_G_Y, color->color_g_y << 6); + REG_SET_2(DPG_COLOUR_R_CR, 0, + DPG_COLOUR1_R_CR, color->color_r_cr << 6, + DPG_COLOUR0_R_CR, color->color_r_cr << 6); +} + +bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t dpg_en, dpg_mode; + uint32_t double_buffer_pending; + + REG_GET_2(DPG_CONTROL, + DPG_EN, &dpg_en, + DPG_MODE, &dpg_mode); + + REG_GET(DPG_STATUS, + DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1) && + (double_buffer_pending == 0); +} + +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + +void opp2_program_left_edge_extra_pixel( + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_primary) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t count = opp2_get_left_edge_extra_pixel_count(opp, pixel_encoding, is_primary); + + /* + * Specifies the number of extra left edge pixels that are supplied to + * the 422 horizontal chroma sub-sample filter. + */ + REG_UPDATE(FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, count); +} + +uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, bool is_primary) +{ + if ((pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) && + !opp->ctx->dc->debug.force_chroma_subsampling_1tap && + !is_primary) + return 1; + else + return 0; +} + +/*****************************************/ +/* Constructor, Destructor */ +/*****************************************/ + +static struct opp_funcs dcn20_opp_funcs = { + .opp_set_dyn_expansion = opp1_set_dyn_expansion, + .opp_program_fmt = opp1_program_fmt, + .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, + .opp_program_stereo = opp1_program_stereo, + .opp_pipe_clock_control = opp1_pipe_clock_control, + .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, + .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, + .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, + .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, + .opp_destroy = opp1_destroy, + .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, + .opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count, +}; + +void dcn20_opp_construct(struct dcn20_opp *oppn20, + struct dc_context *ctx, + uint32_t inst, + const struct dcn20_opp_registers *regs, + const struct dcn20_opp_shift *opp_shift, + const struct dcn20_opp_mask *opp_mask) +{ + oppn20->base.ctx = ctx; + oppn20->base.inst = inst; + oppn20->base.funcs = &dcn20_opp_funcs; + + oppn20->regs = regs; + oppn20->opp_shift = opp_shift; + oppn20->opp_mask = opp_mask; +} + diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h new file mode 100644 index 00000000000000..34936e6c49f3fb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h @@ -0,0 +1,174 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPP_DCN20_H__ +#define __DC_OPP_DCN20_H__ + +#include "dcn10/dcn10_opp.h" + +#define TO_DCN20_OPP(opp)\ + container_of(opp, struct dcn20_opp, base) + +#define OPP_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define OPP_DPG_REG_LIST(id) \ + SRI(DPG_CONTROL, DPG, id), \ + SRI(DPG_DIMENSIONS, DPG, id), \ + SRI(DPG_OFFSET_SEGMENT, DPG, id), \ + SRI(DPG_COLOUR_B_CB, DPG, id), \ + SRI(DPG_COLOUR_G_Y, DPG, id), \ + SRI(DPG_COLOUR_R_CR, DPG, id), \ + SRI(DPG_RAMP_CONTROL, DPG, id), \ + SRI(DPG_STATUS, DPG, id) + +#define OPP_REG_LIST_DCN20(id) \ + OPP_REG_LIST_DCN10(id), \ + OPP_DPG_REG_LIST(id), \ + SRI(FMT_422_CONTROL, FMT, id), \ + SRI(OPPBUF_CONTROL1, OPPBUF, id) + +#define OPP_REG_VARIABLE_LIST_DCN2_0 \ + OPP_COMMON_REG_VARIABLE_LIST; \ + uint32_t FMT_422_CONTROL; \ + uint32_t DPG_CONTROL; \ + uint32_t DPG_DIMENSIONS; \ + uint32_t DPG_OFFSET_SEGMENT; \ + uint32_t DPG_COLOUR_B_CB; \ + uint32_t DPG_COLOUR_G_Y; \ + uint32_t DPG_COLOUR_R_CR; \ + uint32_t DPG_RAMP_CONTROL; \ + uint32_t DPG_STATUS + +#define OPP_DPG_MASK_SH_LIST(mask_sh) \ + OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \ + OPP_SF(DPG0_DPG_CONTROL, DPG_MODE, mask_sh), \ + OPP_SF(DPG0_DPG_CONTROL, DPG_DYNAMIC_RANGE, mask_sh), \ + OPP_SF(DPG0_DPG_CONTROL, DPG_BIT_DEPTH, mask_sh), \ + OPP_SF(DPG0_DPG_CONTROL, DPG_VRES, mask_sh), \ + OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \ + OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \ + OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR1_B_CB, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR0_G_Y, mask_sh), \ + OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR1_G_Y, mask_sh), \ + OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_RAMP0_OFFSET, mask_sh), \ + OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC0, mask_sh), \ + OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC1, mask_sh), \ + OPP_SF(DPG0_DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, mask_sh) + +#define OPP_MASK_SH_LIST_DCN20(mask_sh) \ + OPP_MASK_SH_LIST_DCN(mask_sh), \ + OPP_DPG_MASK_SH_LIST(mask_sh), \ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\ + OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh), \ + OPP_SF(FMT0_FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, mask_sh) + +#define OPP_DCN20_REG_FIELD_LIST(type) \ + OPP_DCN10_REG_FIELD_LIST(type); \ + type FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT; \ + type DPG_EN; \ + type DPG_MODE; \ + type DPG_DYNAMIC_RANGE; \ + type DPG_BIT_DEPTH; \ + type DPG_VRES; \ + type DPG_HRES; \ + type DPG_ACTIVE_WIDTH; \ + type DPG_ACTIVE_HEIGHT; \ + type DPG_X_OFFSET; \ + type DPG_SEGMENT_WIDTH; \ + type DPG_COLOUR0_R_CR; \ + type DPG_COLOUR1_R_CR; \ + type DPG_COLOUR0_B_CB; \ + type DPG_COLOUR1_B_CB; \ + type DPG_COLOUR0_G_Y; \ + type DPG_COLOUR1_G_Y; \ + type DPG_RAMP0_OFFSET; \ + type DPG_INC0; \ + type DPG_INC1; \ + type DPG_DOUBLE_BUFFER_PENDING + +struct dcn20_opp_registers { + OPP_REG_VARIABLE_LIST_DCN2_0; +}; + +struct dcn20_opp_shift { + OPP_DCN20_REG_FIELD_LIST(uint8_t); +}; + +struct dcn20_opp_mask { + OPP_DCN20_REG_FIELD_LIST(uint32_t); +}; + +struct dcn20_opp { + struct output_pixel_processor base; + + const struct dcn20_opp_registers *regs; + const struct dcn20_opp_shift *opp_shift; + const struct dcn20_opp_mask *opp_mask; + + bool is_write_to_ram_a_safe; +}; + +void dcn20_opp_construct(struct dcn20_opp *oppn20, + struct dc_context *ctx, + uint32_t inst, + const struct dcn20_opp_registers *regs, + const struct dcn20_opp_shift *opp_shift, + const struct dcn20_opp_mask *opp_mask); + +void opp2_set_disp_pattern_generator( + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + const struct tg_color *solid_color, + int width, + int height, + int offset); + +void opp2_program_dpg_dimensions( + struct output_pixel_processor *opp, + int width, int height); + +bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); + +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + +void opp2_dpg_set_blank_color( + struct output_pixel_processor *opp, + const struct tg_color *color); + +void opp2_program_left_edge_extra_pixel ( + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, bool is_primary); + +uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, bool is_primary); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 94427875bcdd70..097d06023e6444 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -65,7 +65,8 @@ void optc1_program_global_sync( int vready_offset, int vstartup_start, int vupdate_offset, - int vupdate_width) + int vupdate_width, + int pstate_keepout) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -73,6 +74,7 @@ void optc1_program_global_sync( optc1->vstartup_start = vstartup_start; optc1->vupdate_offset = vupdate_offset; optc1->vupdate_width = vupdate_width; + optc1->pstate_keepout = pstate_keepout; if (optc1->vstartup_start == 0) { BREAK_TO_DEBUGGER(); @@ -146,6 +148,7 @@ void optc1_setup_vertical_interrupt2( * @vstartup_start: Vstartup period. * @vupdate_offset: Vupdate starting position. * @vupdate_width: Vupdate duration. + * @pstate_keepout: determines low power mode timing during refresh * @signal: DC signal types. * @use_vbios: to program timings from BIOS command table. * @@ -157,6 +160,7 @@ void optc1_program_timing( int vstartup_start, int vupdate_offset, int vupdate_width, + int pstate_keepout, const enum signal_type signal, bool use_vbios) { @@ -177,6 +181,7 @@ void optc1_program_timing( optc1->vstartup_start = vstartup_start; optc1->vupdate_offset = vupdate_offset; optc1->vupdate_width = vupdate_width; + optc1->pstate_keepout = pstate_keepout; patched_crtc_timing = *dc_crtc_timing; apply_front_porch_workaround(&patched_crtc_timing); optc1->orginal_patched_timing = patched_crtc_timing; @@ -282,7 +287,8 @@ void optc1_program_timing( vready_offset, vstartup_start, vupdate_offset, - vupdate_width); + vupdate_width, + pstate_keepout); optc->funcs->set_vtg_params(optc, dc_crtc_timing, true); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 369a13244e5ecc..b7a57f98553d78 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -201,6 +201,7 @@ struct dcn_optc_registers { uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; uint32_t OPTC_CLOCK_CONTROL; uint32_t OPTC_WIDTH_CONTROL2; + uint32_t OTG_PSTATE_REGISTER; }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -590,7 +591,11 @@ struct dcn_optc_registers { type OTG_V_COUNT_STOP_TIMER; #define TG_REG_FIELD_LIST_DCN401(type) \ - type OPTC_SEGMENT_WIDTH_LAST; + type OPTC_SEGMENT_WIDTH_LAST;\ + type OTG_PSTATE_KEEPOUT_START;\ + type OTG_PSTATE_EXTEND;\ + type OTG_UNBLANK;\ + type OTG_PSTATE_ALLOW_WIDTH_MIN; struct dcn_optc_shift { diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 6bbbf313b2bbf6..4b6446ed4ce471 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -149,7 +149,9 @@ static bool optc31_disable_crtc(struct timing_generator *optc) return true; } - +/* + * Immediate_Disable_Crtc - this is to temp disable Timing generator without reset ODM. + */ bool optc31_immediate_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -162,10 +164,12 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc) VTG0_ENABLE, 0); /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, + if (optc->ctx->dce_environment != DCE_ENV_DIAG) + REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); + /* clear the false state */ optc1_clear_optc_underflow(optc); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index 9f5c2efa7560b8..a5d6a7dca554c3 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -396,13 +396,47 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i } } +static void optc401_program_global_sync( + struct timing_generator *optc, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width, + int pstate_keepout) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc1->vready_offset = vready_offset; + optc1->vstartup_start = vstartup_start; + optc1->vupdate_offset = vupdate_offset; + optc1->vupdate_width = vupdate_width; + optc1->pstate_keepout = pstate_keepout; + + if (optc1->vstartup_start == 0) { + BREAK_TO_DEBUGGER(); + return; + } + + REG_SET(OTG_VSTARTUP_PARAM, 0, + VSTARTUP_START, optc1->vstartup_start); + + REG_SET_2(OTG_VUPDATE_PARAM, 0, + VUPDATE_OFFSET, optc1->vupdate_offset, + VUPDATE_WIDTH, optc1->vupdate_width); + + REG_SET(OTG_VREADY_PARAM, 0, + VREADY_OFFSET, optc1->vready_offset); + + REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout); +} + static struct timing_generator_funcs dcn401_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, + .program_global_sync = optc401_program_global_sync, .enable_crtc = optc401_enable_crtc, .disable_crtc = optc401_disable_crtc, .phantom_crtc_post_enable = optc401_phantom_crtc_post_enable, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h index 3114ecef332a5c..bb13a645802d0e 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h @@ -155,7 +155,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\ + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\ + SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\ + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh) void dcn401_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile index 4860bb2531a13d..09320344d8e961 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/Makefile +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -198,8 +198,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351) ############################################################################### -############################################################################### - RESOURCE_DCN401 = dcn401_resource.o AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401)) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index fe518fd27b083d..91da5cf85b69fa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -1163,6 +1163,7 @@ static struct pipe_ctx *dce110_acquire_underlay( 0, 0, 0, + 0, pipe_ctx->stream->signal, false); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 88afb2a30eef50..162856c523e40c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -1067,7 +1067,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels clks = {0}; int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; - if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + if (!dc->bw_vbios) + return; + + if (dc->bw_vbios->memory_type == bw_def_hbm) memory_type_multiplier = MEMORY_TYPE_HBM; /*do system clock TODO PPLIB: after PPLIB implement, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 5e7cfa8e8ec93d..eea2b3b307cd5f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2040,6 +2040,7 @@ bool dcn20_fast_validate_bw( { bool out = false; int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; int pipe_cnt, i, pipe_idx, vlevel; ASSERT(pipes); @@ -2064,7 +2065,7 @@ bool dcn20_fast_validate_bw( if (vlevel > context->bw_ctx.dml.soc.num_states) goto validate_fail; - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); /*initialize pipe_just_split_from to invalid idx*/ for (i = 0; i < MAX_PIPES; i++) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 131d98025bd475..fc54483b91047a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -1007,8 +1007,10 @@ static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer( struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); - if (!head_pipe) + if (!head_pipe) { ASSERT(0); + return NULL; + } if (!idle_pipe) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 8663cbc3d1cf5e..347e6aaea582fb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -774,6 +774,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, { bool out = false; int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; int pipe_cnt, i, pipe_idx, vlevel; ASSERT(pipes); @@ -816,7 +817,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, goto validate_fail; } - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 5d1801dce2730e..ac8cb20e2e3b64 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1948,6 +1948,7 @@ static bool dcn31_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + dc->config.disable_hbr_audio_dp2 = true; /* read VBIOS LTTPR caps */ { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 969658313fd65a..a124ad9bd108c8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1651,6 +1651,9 @@ static void dcn32_enable_phantom_plane(struct dc *dc, else phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state); + if (!phantom_plane) + continue; + memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, sizeof(phantom_plane->scaling_quality)); @@ -1717,6 +1720,9 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't // already have phantom pipe assigned, etc.) by previous checks. phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index); + if (!phantom_stream) + return; + dcn32_enable_phantom_plane(dc, context, phantom_stream, index); for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2220,6 +2226,7 @@ static bool dcn32_resource_construct( dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; + dc->config.disable_hbr_audio_dp2 = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2671,8 +2678,10 @@ static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; int head_index; - if (!head_pipe) + if (!head_pipe) { ASSERT(0); + return NULL; + } /* * Modified from dcn20_acquire_idle_pipe_for_layer diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index fee67fbab8e212..7901792afb7b3a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -505,6 +505,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \ SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \ SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \ + SRI_ARR(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI_ARR(CM_TEST_DEBUG_DATA, CM, id), \ SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \ SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \ @@ -761,6 +763,7 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \ SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \ SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id), \ SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \ SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \ SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) @@ -1185,6 +1188,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_TEST_DEBUG_INDEX), \ + SR(DCHUBBUB_TEST_DEBUG_DATA), \ SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \ SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \ SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c index d184105ce2b3e6..f5a4e97c40ced2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c @@ -218,12 +218,12 @@ bool dcn32_is_center_timing(struct pipe_ctx *pipe) pipe->stream->timing.v_addressable != pipe->stream->src.height) { is_center_timing = true; } - } - if (pipe->plane_state) { - if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && - pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { - is_center_timing = true; + if (pipe->plane_state) { + if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height && + pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) { + is_center_timing = true; + } } } @@ -663,7 +663,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } @@ -724,7 +724,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 8e0588b1cf3054..827a94f84f1001 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1783,6 +1783,7 @@ static bool dcn321_resource_construct( dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; + dc->config.disable_hbr_audio_dp2 = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index ddf251901fb331..893a9d9ee870df 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -786,6 +786,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmub_reallow_idle = false, .static_screen_wait_frames = 2, .disable_timeout = true, + .min_disp_clk_khz = 50000, }; static const struct dc_panel_config panel_config_defaults = { @@ -1899,6 +1900,7 @@ static bool dcn35_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + dc->config.disable_hbr_audio_dp2 = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2153,6 +2155,7 @@ static bool dcn35_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 4c5e722baa3a68..70abd32ce2ad18 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = { .hdmichar = true, .dpstream = true, .symclk32_se = true, - .symclk32_le = true, + .symclk32_le = false, .symclk_fe = true, .physymclk = false, .dpiasymclk = true, @@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmub_reallow_idle = false, .static_screen_wait_frames = 2, .notify_dpia_hr_bw = true, + .min_disp_clk_khz = 50000, }; static const struct dc_panel_config panel_config_defaults = { @@ -2133,6 +2134,7 @@ static bool dcn351_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 34b02147881ddb..9d56fbdcd06afd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -76,6 +76,9 @@ #include "dml2/dml2_wrapper.h" +#include "spl/dc_spl_scl_easf_filters.h" +#include "spl/dc_spl_isharp_filters.h" + #define DC_LOGGER_INIT(logger) enum dcn401_clk_src_array_id { @@ -1188,7 +1191,7 @@ static struct stream_encoder *dcn401_stream_encoder_create( vpg = dcn401_vpg_create(ctx, vpg_inst); afmt = dcn401_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) { + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { kfree(enc1); kfree(vpg); kfree(afmt); @@ -1822,6 +1825,7 @@ static bool dcn401_resource_construct( dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) dc->caps.dcc_plane_width_limit = 7680; @@ -2099,6 +2103,7 @@ static bool dcn401_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.map_dc_pipes_with_callbacks = true; + dc->dml2_options.force_tdlut_enable = true; resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; @@ -2124,6 +2129,10 @@ static bool dcn401_resource_construct( dc->dml2_options.max_segments_per_hubp = 20; dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB; + /* SPL */ + spl_init_easf_filter_coeffs(); + spl_init_blur_scale_coeffs(); + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index bb46f30d11d0a1..514d1ce20df9ef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -536,7 +536,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst) /* HUBBUB */ #define HUBBUB_REG_LIST_DCN4_01_RI(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/spl/Makefile index 89cad60b1a10f0..5edf3c6cf3e2dd 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/Makefile +++ b/drivers/gpu/drm/amd/display/dc/spl/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'spl' sub-component of DAL. # It provides the scaling library interface. -SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_isharp_filters.o +SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL)) diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index e3e20cd86af62a..014e8a296f0c78 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -4,9 +4,11 @@ #include "dc_spl.h" #include "dc_spl_scl_filters.h" +#include "dc_spl_scl_easf_filters.h" #include "dc_spl_isharp_filters.h" +#include "spl_debug.h" -#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) +#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19)) #define MIN_VIEWPORT_SIZE 12 static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1) @@ -107,26 +109,26 @@ static struct spl_rect calculate_plane_rec_in_timing_active( const struct spl_rect *stream_src = &spl_in->basic_out.src_rect; const struct spl_rect *stream_dst = &spl_in->basic_out.dst_rect; struct spl_rect rec_out = {0}; - struct fixed31_32 temp; + struct spl_fixed31_32 temp; - temp = dc_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width, + temp = spl_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width, stream_src->width); - rec_out.x = stream_dst->x + dc_fixpt_round(temp); + rec_out.x = stream_dst->x + spl_fixpt_round(temp); - temp = dc_fixpt_from_fraction( + temp = spl_fixpt_from_fraction( (rec_in->x + rec_in->width) * (long long)stream_dst->width, stream_src->width); - rec_out.width = stream_dst->x + dc_fixpt_round(temp) - rec_out.x; + rec_out.width = stream_dst->x + spl_fixpt_round(temp) - rec_out.x; - temp = dc_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height, + temp = spl_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height, stream_src->height); - rec_out.y = stream_dst->y + dc_fixpt_round(temp); + rec_out.y = stream_dst->y + spl_fixpt_round(temp); - temp = dc_fixpt_from_fraction( + temp = spl_fixpt_from_fraction( (rec_in->y + rec_in->height) * (long long)stream_dst->height, stream_src->height); - rec_out.height = stream_dst->y + dc_fixpt_round(temp) - rec_out.y; + rec_out.height = stream_dst->y + spl_fixpt_round(temp) - rec_out.y; return rec_out; } @@ -144,7 +146,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; mpc_rec.height = plane_clip_rec->height; mpc_rec.y = plane_clip_rec->y; - ASSERT(mpc_slice_count == 1 || + SPL_ASSERT(mpc_slice_count == 1 || spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE || mpc_rec.width % 2 == 0); @@ -157,7 +159,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( } if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) { - ASSERT(mpc_rec.height % 2 == 0); + SPL_ASSERT(mpc_rec.height % 2 == 0); mpc_rec.height /= 2; } return mpc_rec; @@ -197,7 +199,7 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i return spl_in->basic_out.odm_slice_rect; } -static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out) +static void spl_calculate_recout(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out) { /* * A plane clip represents the desired plane size and position in Stream @@ -340,20 +342,23 @@ static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out) /* shift the overlapping area so it is with respect to current * ODM slice's position */ - spl_out->scl_data.recout = shift_rec( + spl_scratch->scl_data.recout = shift_rec( &overlapping_area, -odm_slice.x, -odm_slice.y); - spl_out->scl_data.recout.height -= + spl_scratch->scl_data.recout.height -= spl_in->debug.visual_confirm_base_offset; - spl_out->scl_data.recout.height -= + spl_scratch->scl_data.recout.height -= spl_in->debug.visual_confirm_dpp_offset; } else /* if there is no overlap, zero recout */ - memset(&spl_out->scl_data.recout, 0, + memset(&spl_scratch->scl_data.recout, 0, sizeof(struct spl_rect)); } + /* Calculate scaling ratios */ -static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *spl_out) +static void spl_calculate_scaling_ratios(struct spl_in *spl_in, + struct spl_scratch *spl_scratch, + struct spl_out *spl_out) { const int in_w = spl_in->basic_out.src_rect.width; const int in_h = spl_in->basic_out.src_rect.height; @@ -364,59 +369,75 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out * /*Swap surf_src height and width since scaling ratios are in recout rotation*/ if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 || spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270) - swap(surf_src.height, surf_src.width); + spl_swap(surf_src.height, surf_src.width); - spl_out->scl_data.ratios.horz = dc_fixpt_from_fraction( + spl_scratch->scl_data.ratios.horz = spl_fixpt_from_fraction( surf_src.width, spl_in->basic_in.dst_rect.width); - spl_out->scl_data.ratios.vert = dc_fixpt_from_fraction( + spl_scratch->scl_data.ratios.vert = spl_fixpt_from_fraction( surf_src.height, spl_in->basic_in.dst_rect.height); if (spl_in->basic_out.view_format == SPL_VIEW_3D_SIDE_BY_SIDE) - spl_out->scl_data.ratios.horz.value *= 2; + spl_scratch->scl_data.ratios.horz.value *= 2; else if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) - spl_out->scl_data.ratios.vert.value *= 2; + spl_scratch->scl_data.ratios.vert.value *= 2; - spl_out->scl_data.ratios.vert.value = div64_s64( - spl_out->scl_data.ratios.vert.value * in_h, out_h); - spl_out->scl_data.ratios.horz.value = div64_s64( - spl_out->scl_data.ratios.horz.value * in_w, out_w); + spl_scratch->scl_data.ratios.vert.value = spl_div64_s64( + spl_scratch->scl_data.ratios.vert.value * in_h, out_h); + spl_scratch->scl_data.ratios.horz.value = spl_div64_s64( + spl_scratch->scl_data.ratios.horz.value * in_w, out_w); - spl_out->scl_data.ratios.horz_c = spl_out->scl_data.ratios.horz; - spl_out->scl_data.ratios.vert_c = spl_out->scl_data.ratios.vert; + spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz; + spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert; if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8 || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) { - spl_out->scl_data.ratios.horz_c.value /= 2; - spl_out->scl_data.ratios.vert_c.value /= 2; + spl_scratch->scl_data.ratios.horz_c.value /= 2; + spl_scratch->scl_data.ratios.vert_c.value /= 2; } - spl_out->scl_data.ratios.horz = dc_fixpt_truncate( - spl_out->scl_data.ratios.horz, 19); - spl_out->scl_data.ratios.vert = dc_fixpt_truncate( - spl_out->scl_data.ratios.vert, 19); - spl_out->scl_data.ratios.horz_c = dc_fixpt_truncate( - spl_out->scl_data.ratios.horz_c, 19); - spl_out->scl_data.ratios.vert_c = dc_fixpt_truncate( - spl_out->scl_data.ratios.vert_c, 19); + spl_scratch->scl_data.ratios.horz = spl_fixpt_truncate( + spl_scratch->scl_data.ratios.horz, 19); + spl_scratch->scl_data.ratios.vert = spl_fixpt_truncate( + spl_scratch->scl_data.ratios.vert, 19); + spl_scratch->scl_data.ratios.horz_c = spl_fixpt_truncate( + spl_scratch->scl_data.ratios.horz_c, 19); + spl_scratch->scl_data.ratios.vert_c = spl_fixpt_truncate( + spl_scratch->scl_data.ratios.vert_c, 19); + + /* + * Coefficient table and some registers are different based on ratio + * that is output/input. Currently we calculate input/output + * Store 1/ratio in recip_ratio for those lookups + */ + spl_scratch->scl_data.recip_ratios.horz = spl_fixpt_recip( + spl_scratch->scl_data.ratios.horz); + spl_scratch->scl_data.recip_ratios.vert = spl_fixpt_recip( + spl_scratch->scl_data.ratios.vert); + spl_scratch->scl_data.recip_ratios.horz_c = spl_fixpt_recip( + spl_scratch->scl_data.ratios.horz_c); + spl_scratch->scl_data.recip_ratios.vert_c = spl_fixpt_recip( + spl_scratch->scl_data.ratios.vert_c); } + /* Calculate Viewport size */ -static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_out *spl_out) +static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_scratch *spl_scratch) { - spl_out->scl_data.viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz, - spl_out->scl_data.recout.width)); - spl_out->scl_data.viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert, - spl_out->scl_data.recout.height)); - spl_out->scl_data.viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz_c, - spl_out->scl_data.recout.width)); - spl_out->scl_data.viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert_c, - spl_out->scl_data.recout.height)); + spl_scratch->scl_data.viewport.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz, + spl_scratch->scl_data.recout.width)); + spl_scratch->scl_data.viewport.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert, + spl_scratch->scl_data.recout.height)); + spl_scratch->scl_data.viewport_c.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz_c, + spl_scratch->scl_data.recout.width)); + spl_scratch->scl_data.viewport_c.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert_c, + spl_scratch->scl_data.recout.height)); if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 || spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270) { - swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height); - swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height); + spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height); + spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height); } } + static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation, bool horizontal_mirror, bool *orthogonal_rotation, @@ -440,6 +461,7 @@ static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation, if (horizontal_mirror) *flip_horz_scan_dir = !*flip_horz_scan_dir; } + /* * We completely calculate vp offset, size and inits here based entirely on scaling * ratios and recout for pixel perfect pipe combine. @@ -449,13 +471,13 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir, int recout_size, int src_size, int taps, - struct fixed31_32 ratio, - struct fixed31_32 init_adj, - struct fixed31_32 *init, + struct spl_fixed31_32 ratio, + struct spl_fixed31_32 init_adj, + struct spl_fixed31_32 *init, int *vp_offset, int *vp_size) { - struct fixed31_32 temp; + struct spl_fixed31_32 temp; int int_part; /* @@ -468,33 +490,33 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir, * init_bot = init + scaling_ratio * to get pixel perfect combine add the fraction from calculating vp offset */ - temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full); - *vp_offset = dc_fixpt_floor(temp); + temp = spl_fixpt_mul_int(ratio, recout_offset_within_recout_full); + *vp_offset = spl_fixpt_floor(temp); temp.value &= 0xffffffff; - *init = dc_fixpt_add(dc_fixpt_div_int(dc_fixpt_add_int(ratio, taps + 1), 2), temp); - *init = dc_fixpt_add(*init, init_adj); - *init = dc_fixpt_truncate(*init, 19); + *init = spl_fixpt_add(spl_fixpt_div_int(spl_fixpt_add_int(ratio, taps + 1), 2), temp); + *init = spl_fixpt_add(*init, init_adj); + *init = spl_fixpt_truncate(*init, 19); /* * If viewport has non 0 offset and there are more taps than covered by init then * we should decrease the offset and increase init so we are never sampling * outside of viewport. */ - int_part = dc_fixpt_floor(*init); + int_part = spl_fixpt_floor(*init); if (int_part < taps) { int_part = taps - int_part; if (int_part > *vp_offset) int_part = *vp_offset; *vp_offset -= int_part; - *init = dc_fixpt_add_int(*init, int_part); + *init = spl_fixpt_add_int(*init, int_part); } /* * If taps are sampling outside of viewport at end of recout and there are more pixels * available in the surface we should increase the viewport size, regardless set vp to * only what is used. */ - temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1)); - *vp_size = dc_fixpt_floor(temp); + temp = spl_fixpt_add(*init, spl_fixpt_mul_int(ratio, recout_size - 1)); + *vp_size = spl_fixpt_floor(temp); if (*vp_size + *vp_offset > src_size) *vp_size = src_size - *vp_offset; @@ -509,15 +531,24 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir, static bool spl_is_yuv420(enum spl_pixel_format format) { - if ((format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN) && - (format <= SPL_PIXEL_FORMAT_VIDEO_END)) + if ((format >= SPL_PIXEL_FORMAT_420BPP8) && + (format <= SPL_PIXEL_FORMAT_420BPP10)) + return true; + + return false; +} + +static bool spl_is_rgb8(enum spl_pixel_format format) +{ + if (format == SPL_PIXEL_FORMAT_ARGB8888) return true; return false; } /*Calculate inits and viewport */ -static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_out *spl_out) +static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, + struct spl_scratch *spl_scratch) { struct spl_rect src = spl_in->basic_in.src_rect; struct spl_rect recout_dst_in_active_timing; @@ -528,11 +559,11 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_ int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8 || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - struct fixed31_32 init_adj_h = dc_fixpt_zero; - struct fixed31_32 init_adj_v = dc_fixpt_zero; + struct spl_fixed31_32 init_adj_h = spl_fixpt_zero; + struct spl_fixed31_32 init_adj_v = spl_fixpt_zero; recout_clip_in_active_timing = shift_rec( - &spl_out->scl_data.recout, odm_slice.x, odm_slice.y); + &spl_scratch->scl_data.recout, odm_slice.x, odm_slice.y); recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( spl_in, &spl_in->basic_in.dst_rect); overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing, @@ -555,8 +586,8 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_ &flip_horz_scan_dir); if (orthogonal_rotation) { - swap(src.width, src.height); - swap(flip_vert_scan_dir, flip_horz_scan_dir); + spl_swap(src.width, src.height); + spl_swap(flip_vert_scan_dir, flip_horz_scan_dir); } if (spl_is_yuv420(spl_in->basic_in.format)) { @@ -568,17 +599,17 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_ switch (spl_in->basic_in.cositing) { case CHROMA_COSITING_LEFT: - init_adj_h = dc_fixpt_zero; - init_adj_v = dc_fixpt_from_fraction(sign, 2); + init_adj_h = spl_fixpt_zero; + init_adj_v = spl_fixpt_from_fraction(sign, 4); break; case CHROMA_COSITING_NONE: - init_adj_h = dc_fixpt_from_fraction(sign, 2); - init_adj_v = dc_fixpt_from_fraction(sign, 2); + init_adj_h = spl_fixpt_from_fraction(sign, 4); + init_adj_v = spl_fixpt_from_fraction(sign, 4); break; case CHROMA_COSITING_TOPLEFT: default: - init_adj_h = dc_fixpt_zero; - init_adj_v = dc_fixpt_zero; + init_adj_h = spl_fixpt_zero; + init_adj_v = spl_fixpt_zero; break; } } @@ -586,59 +617,60 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_ spl_calculate_init_and_vp( flip_horz_scan_dir, recout_clip_in_recout_dst.x, - spl_out->scl_data.recout.width, + spl_scratch->scl_data.recout.width, src.width, - spl_out->scl_data.taps.h_taps, - spl_out->scl_data.ratios.horz, - dc_fixpt_zero, - &spl_out->scl_data.inits.h, - &spl_out->scl_data.viewport.x, - &spl_out->scl_data.viewport.width); + spl_scratch->scl_data.taps.h_taps, + spl_scratch->scl_data.ratios.horz, + spl_fixpt_zero, + &spl_scratch->scl_data.inits.h, + &spl_scratch->scl_data.viewport.x, + &spl_scratch->scl_data.viewport.width); spl_calculate_init_and_vp( flip_horz_scan_dir, recout_clip_in_recout_dst.x, - spl_out->scl_data.recout.width, + spl_scratch->scl_data.recout.width, src.width / vpc_div, - spl_out->scl_data.taps.h_taps_c, - spl_out->scl_data.ratios.horz_c, + spl_scratch->scl_data.taps.h_taps_c, + spl_scratch->scl_data.ratios.horz_c, init_adj_h, - &spl_out->scl_data.inits.h_c, - &spl_out->scl_data.viewport_c.x, - &spl_out->scl_data.viewport_c.width); + &spl_scratch->scl_data.inits.h_c, + &spl_scratch->scl_data.viewport_c.x, + &spl_scratch->scl_data.viewport_c.width); spl_calculate_init_and_vp( flip_vert_scan_dir, recout_clip_in_recout_dst.y, - spl_out->scl_data.recout.height, + spl_scratch->scl_data.recout.height, src.height, - spl_out->scl_data.taps.v_taps, - spl_out->scl_data.ratios.vert, - dc_fixpt_zero, - &spl_out->scl_data.inits.v, - &spl_out->scl_data.viewport.y, - &spl_out->scl_data.viewport.height); + spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.ratios.vert, + spl_fixpt_zero, + &spl_scratch->scl_data.inits.v, + &spl_scratch->scl_data.viewport.y, + &spl_scratch->scl_data.viewport.height); spl_calculate_init_and_vp( flip_vert_scan_dir, recout_clip_in_recout_dst.y, - spl_out->scl_data.recout.height, + spl_scratch->scl_data.recout.height, src.height / vpc_div, - spl_out->scl_data.taps.v_taps_c, - spl_out->scl_data.ratios.vert_c, + spl_scratch->scl_data.taps.v_taps_c, + spl_scratch->scl_data.ratios.vert_c, init_adj_v, - &spl_out->scl_data.inits.v_c, - &spl_out->scl_data.viewport_c.y, - &spl_out->scl_data.viewport_c.height); + &spl_scratch->scl_data.inits.v_c, + &spl_scratch->scl_data.viewport_c.y, + &spl_scratch->scl_data.viewport_c.height); if (orthogonal_rotation) { - swap(spl_out->scl_data.viewport.x, spl_out->scl_data.viewport.y); - swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height); - swap(spl_out->scl_data.viewport_c.x, spl_out->scl_data.viewport_c.y); - swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height); + spl_swap(spl_scratch->scl_data.viewport.x, spl_scratch->scl_data.viewport.y); + spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height); + spl_swap(spl_scratch->scl_data.viewport_c.x, spl_scratch->scl_data.viewport_c.y); + spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height); } - spl_out->scl_data.viewport.x += src.x; - spl_out->scl_data.viewport.y += src.y; - ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0); - spl_out->scl_data.viewport_c.x += src.x / vpc_div; - spl_out->scl_data.viewport_c.y += src.y / vpc_div; + spl_scratch->scl_data.viewport.x += src.x; + spl_scratch->scl_data.viewport.y += src.y; + SPL_ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0); + spl_scratch->scl_data.viewport_c.x += src.x / vpc_div; + spl_scratch->scl_data.viewport_c.y += src.y / vpc_div; } + static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout) { /* @@ -647,7 +679,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout) * This may break with rotation, good thing we aren't mixing hw rotation and 3d */ if (spl_in->basic_in.mpc_combine_v) { - ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 || + SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 || (spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM && spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE)); if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) @@ -665,6 +697,7 @@ static void spl_clamp_viewport(struct spl_rect *viewport) if (viewport->width < MIN_VIEWPORT_SIZE) viewport->width = MIN_VIEWPORT_SIZE; } + static bool spl_dscl_is_420_format(enum spl_pixel_format format) { if (format == SPL_PIXEL_FORMAT_420BPP8 || @@ -673,6 +706,7 @@ static bool spl_dscl_is_420_format(enum spl_pixel_format format) else return false; } + static bool spl_dscl_is_video_format(enum spl_pixel_format format) { if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN @@ -681,17 +715,21 @@ static bool spl_dscl_is_video_format(enum spl_pixel_format format) else return false; } + static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in, - const struct spl_scaler_data *data) + const struct spl_scaler_data *data, + bool enable_isharp, bool enable_easf) { - const long long one = dc_fixpt_one.value; + const long long one = spl_fixpt_one.value; enum spl_pixel_format pixel_format = spl_in->basic_in.format; + /* Bypass if ratio is 1:1 with no ISHARP or force scale on */ if (data->ratios.horz.value == one && data->ratios.vert.value == one && data->ratios.horz_c.value == one && data->ratios.vert_c.value == one - && !spl_in->basic_out.always_scale) + && !spl_in->basic_out.always_scale + && !enable_isharp) return SCL_MODE_SCALING_444_BYPASS; if (!spl_dscl_is_420_format(pixel_format)) { @@ -700,69 +738,214 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in, else return SCL_MODE_SCALING_444_RGB_ENABLE; } - if (data->ratios.horz.value == one && data->ratios.vert.value == one) - return SCL_MODE_SCALING_420_LUMA_BYPASS; - if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) - return SCL_MODE_SCALING_420_CHROMA_BYPASS; + + /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV + * downscale without EASF + */ + if ((!enable_isharp) && (!enable_easf)) { + if (data->ratios.horz.value == one && data->ratios.vert.value == one) + return SCL_MODE_SCALING_420_LUMA_BYPASS; + if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) + return SCL_MODE_SCALING_420_CHROMA_BYPASS; + } return SCL_MODE_SCALING_420_YCBCR_ENABLE; } + +static bool spl_choose_lls_policy(enum spl_pixel_format format, + enum spl_transfer_func_type tf_type, + enum spl_transfer_func_predefined tf_predefined_type, + enum linear_light_scaling *lls_pref) +{ + if (spl_is_yuv420(format)) { + *lls_pref = LLS_PREF_NO; + if ((tf_type == SPL_TF_TYPE_PREDEFINED) || + (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS)) + return true; + } else { /* RGB or YUV444 */ + if ((tf_type == SPL_TF_TYPE_PREDEFINED) || + (tf_type == SPL_TF_TYPE_BYPASS)) { + *lls_pref = LLS_PREF_YES; + return true; + } + } + *lls_pref = LLS_PREF_NO; + return false; +} + +/* Enable EASF ?*/ +static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch) +{ + int vratio = 0; + int hratio = 0; + bool skip_easf = false; + bool lls_enable_easf = true; + + if (spl_in->disable_easf) + skip_easf = true; + + vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert); + hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz); + + /* + * No EASF support for downscaling > 2:1 + * EASF support for upscaling or downscaling up to 2:1 + */ + if ((vratio > 2) || (hratio > 2)) + skip_easf = true; + + /* + * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer + * function to determine whether to use LINEAR or NONLINEAR scaling + */ + if (spl_in->lls_pref == LLS_PREF_DONT_CARE) + lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format, + spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type, + &spl_in->lls_pref); + + if (!lls_enable_easf) + skip_easf = true; + + /* Check for linear scaling or EASF preferred */ + if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf) + skip_easf = true; + + return skip_easf; +} + +/* Check if video is in fullscreen mode */ +static bool spl_is_video_fullscreen(struct spl_in *spl_in) +{ + if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen) + return true; + return false; +} + +static bool spl_get_isharp_en(struct spl_in *spl_in, + struct spl_scratch *spl_scratch) +{ + bool enable_isharp = false; + int vratio = 0; + int hratio = 0; + struct spl_taps taps = spl_scratch->scl_data.taps; + bool fullscreen = spl_is_video_fullscreen(spl_in); + + /* Return if adaptive sharpness is disabled */ + if (spl_in->adaptive_sharpness.enable == false) + return enable_isharp; + + vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert); + hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz); + + /* No iSHARP support for downscaling */ + if (vratio > 1 || hratio > 1) + return enable_isharp; + + // Scaling is up to 1:1 (no scaling) or upscaling + + /* + * Apply sharpness to RGB and YUV (NV12/P010) + * surfaces based on policy setting + */ + if (!spl_is_yuv420(spl_in->basic_in.format) && + (spl_in->debug.sharpen_policy == SHARPEN_YUV)) + return enable_isharp; + else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) && + (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) + return enable_isharp; + else if (!spl_in->is_fullscreen && + spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL) + return enable_isharp; + + /* + * Apply sharpness if supports horizontal taps 4,6 AND + * vertical taps 3, 4, 6 + */ + if ((taps.h_taps == 4 || taps.h_taps == 6) && + (taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6)) + enable_isharp = true; + + return enable_isharp; +} + /* Calculate optimal number of taps */ static bool spl_get_optimal_number_of_taps( - int max_downscale_src_width, struct spl_in *spl_in, struct spl_out *spl_out, - const struct spl_taps *in_taps) + int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch, + const struct spl_taps *in_taps, bool *enable_easf_v, bool *enable_easf_h, + bool *enable_isharp) { int num_part_y, num_part_c; int max_taps_y, max_taps_c; int min_taps_y, min_taps_c; enum lb_memory_config lb_config; + bool skip_easf = false; - if (spl_out->scl_data.viewport.width > spl_out->scl_data.h_active && + if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && - spl_out->scl_data.viewport.width > max_downscale_src_width) + spl_scratch->scl_data.viewport.width > max_downscale_src_width) return false; + + /* Check if we are using EASF or not */ + skip_easf = enable_easf(spl_in, spl_scratch); + /* * Set default taps if none are provided * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz) > 1) - spl_out->scl_data.taps.h_taps = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz), 8); - else - spl_out->scl_data.taps.h_taps = 4; - } else - spl_out->scl_data.taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 1) - spl_out->scl_data.taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int( - spl_out->scl_data.ratios.vert, 2)), 8); - else - spl_out->scl_data.taps.v_taps = 4; - } else - spl_out->scl_data.taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 1) - spl_out->scl_data.taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int( - spl_out->scl_data.ratios.vert_c, 2)), 8); - else - spl_out->scl_data.taps.v_taps_c = 4; - } else - spl_out->scl_data.taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c) > 1) - spl_out->scl_data.taps.h_taps_c = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c), 8); + if (skip_easf) { + if (in_taps->h_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) + spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz), 8); + else + spl_scratch->scl_data.taps.h_taps = 4; + } else + spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; + if (in_taps->v_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) + spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps = 4; + } else + spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) + spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert_c, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps_c = 4; + } else + spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) + spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz_c), 8); + else + spl_scratch->scl_data.taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; else - spl_out->scl_data.taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; - else - spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c; + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; + } else { + if (spl_is_yuv420(spl_in->basic_in.format)) { + spl_scratch->scl_data.taps.h_taps = 6; + spl_scratch->scl_data.taps.v_taps = 6; + spl_scratch->scl_data.taps.h_taps_c = 4; + spl_scratch->scl_data.taps.v_taps_c = 4; + } else { /* RGB */ + spl_scratch->scl_data.taps.h_taps = 6; + spl_scratch->scl_data.taps.v_taps = 6; + spl_scratch->scl_data.taps.h_taps_c = 6; + spl_scratch->scl_data.taps.v_taps_c = 6; + } + } /*Ensure we can support the requested number of vtaps*/ - min_taps_y = dc_fixpt_ceil(spl_out->scl_data.ratios.vert); - min_taps_c = dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c); + min_taps_y = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert); + min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c); /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8) @@ -771,16 +954,16 @@ static bool spl_get_optimal_number_of_taps( else lb_config = LB_MEMORY_CONFIG_0; // Determine max vtap support by calculating how much line buffer can fit - spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_out->scl_data, + spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ - if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 2) - max_taps_y = num_part_y - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) - 2); + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2) + max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2); else max_taps_y = num_part_y; - if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 2) - max_taps_c = num_part_c - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) - 2); + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2) + max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2); else max_taps_c = num_part_c; @@ -789,48 +972,108 @@ static bool spl_get_optimal_number_of_taps( else if (max_taps_c < min_taps_c) return false; - if (spl_out->scl_data.taps.v_taps > max_taps_y) - spl_out->scl_data.taps.v_taps = max_taps_y; - - if (spl_out->scl_data.taps.v_taps_c > max_taps_c) - spl_out->scl_data.taps.v_taps_c = max_taps_c; - if (spl_in->prefer_easf) { - // EASF can be enabled only for taps 3,4,6 - // If optimal no of taps is 5, then set it to 4 - // If optimal no of taps is 7 or 8, then set it to 6 - if (spl_out->scl_data.taps.v_taps == 5) - spl_out->scl_data.taps.v_taps = 4; - if (spl_out->scl_data.taps.v_taps == 7 || spl_out->scl_data.taps.v_taps == 8) - spl_out->scl_data.taps.v_taps = 6; - - if (spl_out->scl_data.taps.v_taps_c == 5) - spl_out->scl_data.taps.v_taps_c = 4; - if (spl_out->scl_data.taps.v_taps_c == 7 || spl_out->scl_data.taps.v_taps_c == 8) - spl_out->scl_data.taps.v_taps_c = 6; - - if (spl_out->scl_data.taps.h_taps == 5) - spl_out->scl_data.taps.h_taps = 4; - if (spl_out->scl_data.taps.h_taps == 7 || spl_out->scl_data.taps.h_taps == 8) - spl_out->scl_data.taps.h_taps = 6; - - if (spl_out->scl_data.taps.h_taps_c == 5) - spl_out->scl_data.taps.h_taps_c = 4; - if (spl_out->scl_data.taps.h_taps_c == 7 || spl_out->scl_data.taps.h_taps_c == 8) - spl_out->scl_data.taps.h_taps_c = 6; + if (spl_scratch->scl_data.taps.v_taps > max_taps_y) + spl_scratch->scl_data.taps.v_taps = max_taps_y; + + if (spl_scratch->scl_data.taps.v_taps_c > max_taps_c) + spl_scratch->scl_data.taps.v_taps_c = max_taps_c; + if (!skip_easf) { + /* + * RGB ( L + NL ) and Linear HDR support 6x6, 6x4, 6x3, 4x4, 4x3 + * NL YUV420 only supports 6x6, 6x4 for Y and 4x4 for UV + * + * If LB does not support 3, 4, or 6 taps, then disable EASF_V + * and only enable EASF_H. So for RGB, support 6x2, 4x2 + * and for NL YUV420, support 6x2 for Y and 4x2 for UV + * + * All other cases, have to disable EASF_V and EASF_H + * + * If optimal no of taps is 5, then set it to 4 + * If optimal no of taps is 7 or 8, then fine since max tap is 6 + * + */ + if (spl_scratch->scl_data.taps.v_taps == 5) + spl_scratch->scl_data.taps.v_taps = 4; + + if (spl_scratch->scl_data.taps.v_taps_c == 5) + spl_scratch->scl_data.taps.v_taps_c = 4; + + if (spl_scratch->scl_data.taps.h_taps == 5) + spl_scratch->scl_data.taps.h_taps = 4; + + if (spl_scratch->scl_data.taps.h_taps_c == 5) + spl_scratch->scl_data.taps.h_taps_c = 4; + + if (spl_is_yuv420(spl_in->basic_in.format)) { + if ((spl_scratch->scl_data.taps.h_taps <= 4) || + (spl_scratch->scl_data.taps.h_taps_c <= 3)) { + *enable_easf_v = false; + *enable_easf_h = false; + } else if ((spl_scratch->scl_data.taps.v_taps <= 3) || + (spl_scratch->scl_data.taps.v_taps_c <= 3)) { + *enable_easf_v = false; + *enable_easf_h = true; + } else { + *enable_easf_v = true; + *enable_easf_h = true; + } + SPL_ASSERT((spl_scratch->scl_data.taps.v_taps > 1) && + (spl_scratch->scl_data.taps.v_taps_c > 1)); + } else { /* RGB */ + if (spl_scratch->scl_data.taps.h_taps <= 3) { + *enable_easf_v = false; + *enable_easf_h = false; + } else if (spl_scratch->scl_data.taps.v_taps < 3) { + *enable_easf_v = false; + *enable_easf_h = true; + } else { + *enable_easf_v = true; + *enable_easf_h = true; + } + SPL_ASSERT(spl_scratch->scl_data.taps.v_taps > 1); + } + } else { + *enable_easf_v = false; + *enable_easf_h = false; } // end of if prefer_easf - if (!spl_in->basic_out.always_scale) { - if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz)) - spl_out->scl_data.taps.h_taps = 1; - if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert)) - spl_out->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz_c)) - spl_out->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert_c)) - spl_out->scl_data.taps.v_taps_c = 1; + + /* Sharpener requires scaler to be enabled, including for 1:1 + * Check if ISHARP can be enabled + * If ISHARP is not enabled, for 1:1, set taps to 1 and disable + * EASF + * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if + * EASF is not enabled + */ + + *enable_isharp = spl_get_isharp_en(spl_in, spl_scratch); + if (!*enable_isharp && !spl_in->basic_out.always_scale) { + if ((IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) && + (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) { + spl_scratch->scl_data.taps.h_taps = 1; + spl_scratch->scl_data.taps.v_taps = 1; + + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + spl_scratch->scl_data.taps.h_taps_c = 1; + + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + spl_scratch->scl_data.taps.v_taps_c = 1; + + *enable_easf_v = false; + *enable_easf_h = false; + } else { + if ((!*enable_easf_h) && + (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))) + spl_scratch->scl_data.taps.h_taps_c = 1; + + if ((!*enable_easf_v) && + (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) + spl_scratch->scl_data.taps.v_taps_c = 1; + } } return true; } + static void spl_set_black_color_data(enum spl_pixel_format format, struct scl_black_color *scl_black_color) { @@ -848,38 +1091,38 @@ static void spl_set_black_color_data(enum spl_pixel_format format, static void spl_set_manual_ratio_init_data(struct dscl_prog_data *dscl_prog_data, const struct spl_scaler_data *scl_data) { - struct fixed31_32 bot; + struct spl_fixed31_32 bot; - dscl_prog_data->ratios.h_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.horz) << 5; - dscl_prog_data->ratios.v_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.vert) << 5; - dscl_prog_data->ratios.h_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.horz_c) << 5; - dscl_prog_data->ratios.v_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.vert_c) << 5; + dscl_prog_data->ratios.h_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.horz) << 5; + dscl_prog_data->ratios.v_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.vert) << 5; + dscl_prog_data->ratios.h_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.horz_c) << 5; + dscl_prog_data->ratios.v_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.vert_c) << 5; /* * 0.24 format for fraction, first five bits zeroed */ dscl_prog_data->init.h_filter_init_frac = - dc_fixpt_u0d19(scl_data->inits.h) << 5; + spl_fixpt_u0d19(scl_data->inits.h) << 5; dscl_prog_data->init.h_filter_init_int = - dc_fixpt_floor(scl_data->inits.h); + spl_fixpt_floor(scl_data->inits.h); dscl_prog_data->init.h_filter_init_frac_c = - dc_fixpt_u0d19(scl_data->inits.h_c) << 5; + spl_fixpt_u0d19(scl_data->inits.h_c) << 5; dscl_prog_data->init.h_filter_init_int_c = - dc_fixpt_floor(scl_data->inits.h_c); + spl_fixpt_floor(scl_data->inits.h_c); dscl_prog_data->init.v_filter_init_frac = - dc_fixpt_u0d19(scl_data->inits.v) << 5; + spl_fixpt_u0d19(scl_data->inits.v) << 5; dscl_prog_data->init.v_filter_init_int = - dc_fixpt_floor(scl_data->inits.v); + spl_fixpt_floor(scl_data->inits.v); dscl_prog_data->init.v_filter_init_frac_c = - dc_fixpt_u0d19(scl_data->inits.v_c) << 5; + spl_fixpt_u0d19(scl_data->inits.v_c) << 5; dscl_prog_data->init.v_filter_init_int_c = - dc_fixpt_floor(scl_data->inits.v_c); - - bot = dc_fixpt_add(scl_data->inits.v, scl_data->ratios.vert); - dscl_prog_data->init.v_filter_init_bot_frac = dc_fixpt_u0d19(bot) << 5; - dscl_prog_data->init.v_filter_init_bot_int = dc_fixpt_floor(bot); - bot = dc_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c); - dscl_prog_data->init.v_filter_init_bot_frac_c = dc_fixpt_u0d19(bot) << 5; - dscl_prog_data->init.v_filter_init_bot_int_c = dc_fixpt_floor(bot); + spl_fixpt_floor(scl_data->inits.v_c); + + bot = spl_fixpt_add(scl_data->inits.v, scl_data->ratios.vert); + dscl_prog_data->init.v_filter_init_bot_frac = spl_fixpt_u0d19(bot) << 5; + dscl_prog_data->init.v_filter_init_bot_int = spl_fixpt_floor(bot); + bot = spl_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c); + dscl_prog_data->init.v_filter_init_bot_frac_c = spl_fixpt_u0d19(bot) << 5; + dscl_prog_data->init.v_filter_init_bot_int_c = spl_fixpt_floor(bot); } static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data, @@ -890,77 +1133,28 @@ static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->taps.v_taps_c = scl_data->taps.v_taps_c - 1; dscl_prog_data->taps.h_taps_c = scl_data->taps.h_taps_c - 1; } -static const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) -{ - if (taps == 8) - return spl_get_filter_8tap_64p(ratio); - else if (taps == 7) - return spl_get_filter_7tap_64p(ratio); - else if (taps == 6) - return spl_get_filter_6tap_64p(ratio); - else if (taps == 5) - return spl_get_filter_5tap_64p(ratio); - else if (taps == 4) - return spl_get_filter_4tap_64p(ratio); - else if (taps == 3) - return spl_get_filter_3tap_64p(ratio); - else if (taps == 2) - return spl_get_filter_2tap_64p(); - else if (taps == 1) - return NULL; - else { - /* should never happen, bug */ - return NULL; - } -} -static void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data, - const struct spl_scaler_data *data) -{ - dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p( - data->taps.h_taps, data->ratios.horz); - dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p( - data->taps.v_taps, data->ratios.vert); - dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p( - data->taps.h_taps_c, data->ratios.horz_c); - dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p( - data->taps.v_taps_c, data->ratios.vert_c); -} - -static const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps) -{ - if ((taps == 3) || (taps == 4) || (taps == 6)) - return spl_get_filter_isharp_bs_4tap_64p(); - else { - /* should never happen, bug */ - return NULL; - } -} -static void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data, - const struct spl_scaler_data *data) -{ - dscl_prog_data->filter_blur_scale_h = spl_dscl_get_blur_scale_coeffs_64p( - data->taps.h_taps); - dscl_prog_data->filter_blur_scale_v = spl_dscl_get_blur_scale_coeffs_64p( - data->taps.v_taps); -} /* Populate dscl prog data structure from scaler data calculated by SPL */ -static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_out) +static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *spl_scratch, + struct spl_out *spl_out, bool enable_easf_v, bool enable_easf_h, bool enable_isharp) { struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data; - const struct spl_scaler_data *data = &spl_out->scl_data; + const struct spl_scaler_data *data = &spl_scratch->scl_data; struct scl_black_color *scl_black_color = &dscl_prog_data->scl_black_color; + bool enable_easf = enable_easf_v || enable_easf_h; + // Set values for recout - dscl_prog_data->recout = spl_out->scl_data.recout; + dscl_prog_data->recout = spl_scratch->scl_data.recout; // Set values for MPC Size - dscl_prog_data->mpc_size.width = spl_out->scl_data.h_active; - dscl_prog_data->mpc_size.height = spl_out->scl_data.v_active; + dscl_prog_data->mpc_size.width = spl_scratch->scl_data.h_active; + dscl_prog_data->mpc_size.height = spl_scratch->scl_data.v_active; // SCL_MODE - Set SCL_MODE data - dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data); + dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data, enable_isharp, + enable_easf); // SCL_BLACK_COLOR spl_set_black_color_data(spl_in->basic_in.format, scl_black_color); @@ -971,103 +1165,140 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_ou // Set HTaps/VTaps spl_set_taps_data(dscl_prog_data, data); // Set viewport - dscl_prog_data->viewport = spl_out->scl_data.viewport; + dscl_prog_data->viewport = spl_scratch->scl_data.viewport; // Set viewport_c - dscl_prog_data->viewport_c = spl_out->scl_data.viewport_c; + dscl_prog_data->viewport_c = spl_scratch->scl_data.viewport_c; // Set filters data - spl_set_filters_data(dscl_prog_data, data); + spl_set_filters_data(dscl_prog_data, data, enable_easf_v, enable_easf_h); } -/* Enable EASF ?*/ -static bool enable_easf(int scale_ratio, int taps, - enum linear_light_scaling lls_pref, bool prefer_easf) + +/* Calculate C0-C3 coefficients based on HDR_mult */ +static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t sdr_white_level_nits) { - // Is downscaling > 6:1 ? - if (scale_ratio > 6) { - // END - No EASF support for downscaling > 6:1 - return false; - } - // Is upscaling or downscaling up to 2:1? - if (scale_ratio <= 2) { - // Is linear scaling or EASF preferred? - if (lls_pref == LLS_PREF_YES || prefer_easf) { - // LB support taps 3, 4, 6 - if (taps == 3 || taps == 4 || taps == 6) { - // END - EASF supported - return true; - } - } - } - // END - EASF not supported - return false; + struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult; + struct spl_fixed31_32 c0_calc, c1_calc, c2_calc; + struct spl_custom_float_format fmt; + uint32_t hdr_multx100_int; + + if ((sdr_white_level_nits >= 80) && (sdr_white_level_nits <= 480)) + hdr_multx100_int = sdr_white_level_nits * 100 / 80; + else + hdr_multx100_int = 100; /* default for 80 nits otherwise */ + + hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100_int, 100LL); + c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL); + c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL); + c2_mult = spl_fixpt_from_fraction(722LL, 10000LL); + + c0_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c0_mult, spl_fixpt_from_fraction( + 16384LL, 125LL))); + c1_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c1_mult, spl_fixpt_from_fraction( + 16384LL, 125LL))); + c2_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c2_mult, spl_fixpt_from_fraction( + 16384LL, 125LL))); + + fmt.exponenta_bits = 5; + fmt.mantissa_bits = 10; + fmt.sign = true; + + // fp1.5.10, C0 coefficient (LN_rec709: HDR_MULT * 0.212600 * 2^14/125) + spl_convert_to_custom_float_format(c0_calc, &fmt, &dscl_prog_data->easf_matrix_c0); + // fp1.5.10, C1 coefficient (LN_rec709: HDR_MULT * 0.715200 * 2^14/125) + spl_convert_to_custom_float_format(c1_calc, &fmt, &dscl_prog_data->easf_matrix_c1); + // fp1.5.10, C2 coefficient (LN_rec709: HDR_MULT * 0.072200 * 2^14/125) + spl_convert_to_custom_float_format(c2_calc, &fmt, &dscl_prog_data->easf_matrix_c2); + dscl_prog_data->easf_matrix_c3 = 0x0; // fp1.5.10, C3 coefficient } + /* Set EASF data */ -static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, - bool enable_easf_v, bool enable_easf_h, enum linear_light_scaling lls_pref, - enum spl_pixel_format format) +static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v, + bool enable_easf_h, enum linear_light_scaling lls_pref, + enum spl_pixel_format format, enum system_setup setup, + uint32_t sdr_white_level_nits) { - if (spl_is_yuv420(format)) /* TODO: 0 = RGB, 1 = YUV */ - dscl_prog_data->easf_matrix_mode = 1; - else - dscl_prog_data->easf_matrix_mode = 0; - + struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data; if (enable_easf_v) { dscl_prog_data->easf_v_en = true; dscl_prog_data->easf_v_ring = 0; - dscl_prog_data->easf_v_sharp_factor = 1; + dscl_prog_data->easf_v_sharp_factor = 0; dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode - dscl_prog_data->easf_v_bf3_mode = 2; // 2-bit, BF3 chroma mode correction calculation mode - dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control - dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control - dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control + /* 2-bit, BF3 chroma mode correction calculation mode */ + dscl_prog_data->easf_v_bf3_mode = spl_get_v_bf3_mode( + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ minCoef ]*/ dscl_prog_data->easf_v_ringest_3tap_dntilt_uptilt = - 0x9F00;// FP1.5.10 [minCoef] (-0.036109167214271) + spl_get_3tap_dntilt_uptilt_offset(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ upTiltMaxVal ]*/ dscl_prog_data->easf_v_ringest_3tap_uptilt_max = - 0x24FE; // FP1.5.10 [upTiltMaxVal] ( 0.904556445553545) + spl_get_3tap_uptilt_maxval(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ dnTiltSlope ]*/ dscl_prog_data->easf_v_ringest_3tap_dntilt_slope = - 0x3940; // FP1.5.10 [dnTiltSlope] ( 0.910488988173371) + spl_get_3tap_dntilt_slope(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ upTilt1Slope ]*/ dscl_prog_data->easf_v_ringest_3tap_uptilt1_slope = - 0x359C; // FP1.5.10 [upTilt1Slope] ( 0.125620179040899) + spl_get_3tap_uptilt1_slope(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ upTilt2Slope ]*/ dscl_prog_data->easf_v_ringest_3tap_uptilt2_slope = - 0x359C; // FP1.5.10 [upTilt2Slope] ( 0.006786817723568) + spl_get_3tap_uptilt2_slope(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10 [ upTilt2Offset ]*/ dscl_prog_data->easf_v_ringest_3tap_uptilt2_offset = - 0x9F00; // FP1.5.10 [upTilt2Offset] (-0.006139059716651) + spl_get_3tap_uptilt2_offset(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */ dscl_prog_data->easf_v_ringest_eventap_reduceg1 = - 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] + spl_get_reducer_gain4(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */ dscl_prog_data->easf_v_ringest_eventap_reduceg2 = - 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] + spl_get_reducer_gain6(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */ dscl_prog_data->easf_v_ringest_eventap_gain1 = - 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 + spl_get_gainRing4(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); + /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */ dscl_prog_data->easf_v_ringest_eventap_gain2 = - 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 + spl_get_gainRing6(spl_scratch->scl_data.taps.v_taps, + spl_scratch->scl_data.recip_ratios.vert); dscl_prog_data->easf_v_bf_maxa = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 0 dscl_prog_data->easf_v_bf_maxb = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 1 dscl_prog_data->easf_v_bf_mina = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 0 dscl_prog_data->easf_v_bf_minb = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 1 - dscl_prog_data->easf_v_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0 - dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 - dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0 - dscl_prog_data->easf_v_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1 - dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 - dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1 - dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 - dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 - dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 - dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3 - dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 - dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3 - dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4 - dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 - dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4 - dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5 - dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 - dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5 - dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6 - dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 - dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6 - dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7 - dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 if (lls_pref == LLS_PREF_YES) { + dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control + dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control + dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control + + dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512 + dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 + dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0 + dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20 + dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 + dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1 + dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3 + dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 + dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56 + dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4 + dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 + dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48 + dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5 + dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 + dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240 + dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6 + dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 + dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160 + dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7 + dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 + dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0 dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0 dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0 @@ -1088,13 +1319,41 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, 0x136B; // FP1.6.6, BF3 Slope PWL Segment 3 dscl_prog_data->easf_v_bf3_pwl_in_set4 = 0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3) - dscl_prog_data->easf_v_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4 + dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50 dscl_prog_data->easf_v_bf3_pwl_slope_set4 = 0x1200; // FP1.6.6, BF3 Slope PWL Segment 4 dscl_prog_data->easf_v_bf3_pwl_in_set5 = 0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3) - dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5 + dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63 } else { + dscl_prog_data->easf_v_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control + dscl_prog_data->easf_v_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control + dscl_prog_data->easf_v_bf2_roc_gain = 14; // U2.2, Rate Of Change control + + dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960 + dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 + dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0 + dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60 + dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 + dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1 + dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 + dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3 + dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 + dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19 + dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4 + dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 + dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16 + dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5 + dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 + dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80 + dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6 + dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 + dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53 + dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7 + dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 + dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0 dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0 dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0 @@ -1113,11 +1372,11 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, 0x1878; // FP1.6.6, BF3 Slope PWL Segment 3 dscl_prog_data->easf_v_bf3_pwl_in_set4 = 0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375) - dscl_prog_data->easf_v_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4 + dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60 dscl_prog_data->easf_v_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4 dscl_prog_data->easf_v_bf3_pwl_in_set5 = 0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5) - dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5 + dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63 } } else dscl_prog_data->easf_v_en = false; @@ -1125,52 +1384,63 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, if (enable_easf_h) { dscl_prog_data->easf_h_en = true; dscl_prog_data->easf_h_ring = 0; - dscl_prog_data->easf_h_sharp_factor = 1; + dscl_prog_data->easf_h_sharp_factor = 0; dscl_prog_data->easf_h_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_h_bf2_mode = 0xF; // 4-bit, BF2 calculation mode - dscl_prog_data->easf_h_bf3_mode = - 2; // 2-bit, BF3 chroma mode correction calculation mode - dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control - dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control - dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control + /* 2-bit, BF3 chroma mode correction calculation mode */ + dscl_prog_data->easf_h_bf3_mode = spl_get_h_bf3_mode( + spl_scratch->scl_data.recip_ratios.horz); + /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */ dscl_prog_data->easf_h_ringest_eventap_reduceg1 = - 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] + spl_get_reducer_gain4(spl_scratch->scl_data.taps.h_taps, + spl_scratch->scl_data.recip_ratios.horz); + /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */ dscl_prog_data->easf_h_ringest_eventap_reduceg2 = - 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] + spl_get_reducer_gain6(spl_scratch->scl_data.taps.h_taps, + spl_scratch->scl_data.recip_ratios.horz); + /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */ dscl_prog_data->easf_h_ringest_eventap_gain1 = - 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 + spl_get_gainRing4(spl_scratch->scl_data.taps.h_taps, + spl_scratch->scl_data.recip_ratios.horz); + /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */ dscl_prog_data->easf_h_ringest_eventap_gain2 = - 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 + spl_get_gainRing6(spl_scratch->scl_data.taps.h_taps, + spl_scratch->scl_data.recip_ratios.horz); dscl_prog_data->easf_h_bf_maxa = 63; //Horz Max BF value A in U0.6 format.Selected if H_FCNTL==0 dscl_prog_data->easf_h_bf_maxb = 63; //Horz Max BF value B in U0.6 format.Selected if H_FCNTL==1 dscl_prog_data->easf_h_bf_mina = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==0 dscl_prog_data->easf_h_bf_minb = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==1 - dscl_prog_data->easf_h_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0 - dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 - dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0 - dscl_prog_data->easf_h_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1 - dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 - dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1 - dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 - dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 - dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 - dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3 - dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 - dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3 - dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4 - dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 - dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4 - dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5 - dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 - dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5 - dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6 - dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 - dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6 - dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7 - dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 if (lls_pref == LLS_PREF_YES) { + dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control + dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control + dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control + + dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512 + dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 + dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0 + dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20 + dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 + dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1 + dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3 + dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 + dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56 + dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4 + dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 + dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48 + dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5 + dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 + dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240 + dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6 + dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 + dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160 + dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7 + dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 + dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0 dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0 dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0 @@ -1188,12 +1458,40 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x136B; // FP1.6.6, BF3 Slope PWL Segment 3 dscl_prog_data->easf_h_bf3_pwl_in_set4 = 0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3) - dscl_prog_data->easf_h_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4 + dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50 dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1200; // FP1.6.6, BF3 Slope PWL Segment 4 dscl_prog_data->easf_h_bf3_pwl_in_set5 = 0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3) - dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5 + dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63 } else { + dscl_prog_data->easf_h_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control + dscl_prog_data->easf_h_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control + dscl_prog_data->easf_h_bf2_roc_gain = 14; // U2.2, Rate Of Change control + + dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960 + dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0 + dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0 + dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60 + dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1 + dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1 + dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2 + dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3 + dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3 + dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19 + dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4 + dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4 + dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16 + dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5 + dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5 + dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80 + dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6 + dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6 + dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53 + dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7 + dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7 + dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0 dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0 dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0 @@ -1211,25 +1509,30 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x1878; // FP1.6.6, BF3 Slope PWL Segment 3 dscl_prog_data->easf_h_bf3_pwl_in_set4 = 0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375) - dscl_prog_data->easf_h_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4 + dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60 dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4 dscl_prog_data->easf_h_bf3_pwl_in_set5 = 0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5) - dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5 + dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63 } // if (lls_pref == LLS_PREF_YES) } else dscl_prog_data->easf_h_en = false; if (lls_pref == LLS_PREF_YES) { dscl_prog_data->easf_ltonl_en = 1; // Linear input - dscl_prog_data->easf_matrix_c0 = - 0x504E; // fp1.5.10, C0 coefficient (LN_BT2020: 0.2627 * (2^14)/125 = 34.43750000) - dscl_prog_data->easf_matrix_c1 = - 0x558E; // fp1.5.10, C1 coefficient (LN_BT2020: 0.6780 * (2^14)/125 = 88.87500000) - dscl_prog_data->easf_matrix_c2 = - 0x47C6; // fp1.5.10, C2 coefficient (LN_BT2020: 0.0593 * (2^14)/125 = 7.77343750) - dscl_prog_data->easf_matrix_c3 = - 0x0; // fp1.5.10, C3 coefficient + if ((setup == HDR_L) && (spl_is_rgb8(format))) { + /* Calculate C0-C3 coefficients based on HDR multiplier */ + spl_calculate_c0_c3_hdr(dscl_prog_data, sdr_white_level_nits); + } else { // HDR_L ( DWM ) and SDR_L + dscl_prog_data->easf_matrix_c0 = + 0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720) + dscl_prog_data->easf_matrix_c1 = + 0x55DC; // fp1.5.10, C1 coefficient (LN_rec709: 0.7152 * (2^14)/125 = 93.74269440) + dscl_prog_data->easf_matrix_c2 = + 0x48BB; // fp1.5.10, C2 coefficient (LN_rec709: 0.0722 * (2^14)/125 = 9.46339840) + dscl_prog_data->easf_matrix_c3 = + 0x0; // fp1.5.10, C3 coefficient + } } else { dscl_prog_data->easf_ltonl_en = 0; // Non-Linear input dscl_prog_data->easf_matrix_c0 = @@ -1241,27 +1544,43 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->easf_matrix_c3 = 0x0; // fp1.5.10, C3 coefficient } + + if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */ + dscl_prog_data->easf_matrix_mode = 1; + /* + * 2-bit, BF3 chroma mode correction calculation mode + * Needs to be disabled for YUV420 mode + * Override lookup value + */ + dscl_prog_data->easf_v_bf3_mode = 0; + dscl_prog_data->easf_h_bf3_mode = 0; + } else + dscl_prog_data->easf_matrix_mode = 0; + } + /*Set isharp noise detection */ -static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data) +static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data, + const struct spl_scaler_data *data) { // ISHARP_NOISEDET_MODE // 0: 3x5 as VxH // 1: 4x5 as VxH // 2: // 3: 5x5 as VxH - if (dscl_prog_data->taps.v_taps == 6) - dscl_prog_data->isharp_noise_det.mode = 3; // ISHARP_NOISEDET_MODE - else if (dscl_prog_data->taps.h_taps == 4) - dscl_prog_data->isharp_noise_det.mode = 1; // ISHARP_NOISEDET_MODE - else if (dscl_prog_data->taps.h_taps == 3) - dscl_prog_data->isharp_noise_det.mode = 0; // ISHARP_NOISEDET_MODE + if (data->taps.v_taps == 6) + dscl_prog_data->isharp_noise_det.mode = 3; + else if (data->taps.v_taps == 4) + dscl_prog_data->isharp_noise_det.mode = 1; + else if (data->taps.v_taps == 3) + dscl_prog_data->isharp_noise_det.mode = 0; }; /* Set Sharpener data */ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, struct adaptive_sharpness adp_sharpness, bool enable_isharp, enum linear_light_scaling lls_pref, enum spl_pixel_format format, - const struct spl_scaler_data *data) + const struct spl_scaler_data *data, struct spl_fixed31_32 ratio, + enum system_setup setup, enum scale_to_sharpness_policy scale_to_sharpness_policy) { /* Turn off sharpener if not required */ if (!enable_isharp) { @@ -1269,11 +1588,18 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, return; } + spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness, + scale_to_sharpness_policy); + dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); + dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; + dscl_prog_data->isharp_en = 1; // ISHARP_EN - dscl_prog_data->isharp_noise_det.enable = 1; // ISHARP_NOISEDET_EN // Set ISHARP_NOISEDET_MODE if htaps = 6-tap - if (dscl_prog_data->taps.h_taps == 6) - spl_set_isharp_noise_det_mode(dscl_prog_data); // ISHARP_NOISEDET_MODE + if (data->taps.h_taps == 6) { + dscl_prog_data->isharp_noise_det.enable = 1; /* ISHARP_NOISEDET_EN */ + spl_set_isharp_noise_det_mode(dscl_prog_data, data); /* ISHARP_NOISEDET_MODE */ + } else + dscl_prog_data->isharp_noise_det.enable = 0; // ISHARP_NOISEDET_EN // Program noise detection threshold dscl_prog_data->isharp_noise_det.uthreshold = 24; // ISHARP_NOISEDET_UTHRE dscl_prog_data->isharp_noise_det.dthreshold = 4; // ISHARP_NOISEDET_DTHRE @@ -1282,48 +1608,86 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->isharp_noise_det.pwl_end_in = 13; // ISHARP_NOISEDET_PWL_END_IN dscl_prog_data->isharp_noise_det.pwl_slope = 1623; // ISHARP_NOISEDET_PWL_SLOPE - if ((lls_pref == LLS_PREF_NO) && !spl_is_yuv420(format)) /* ISHARP_FMT_MODE */ + if (lls_pref == LLS_PREF_NO) /* ISHARP_FMT_MODE */ dscl_prog_data->isharp_fmt.mode = 1; else dscl_prog_data->isharp_fmt.mode = 0; dscl_prog_data->isharp_fmt.norm = 0x3C00; // ISHARP_FMT_NORM dscl_prog_data->isharp_lba.mode = 0; // ISHARP_LBA_MODE - // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0 - dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format - dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format - // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1 - dscl_prog_data->isharp_lba.in_seg[1] = 256; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format - dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format - // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2 - dscl_prog_data->isharp_lba.in_seg[2] = 614; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format - dscl_prog_data->isharp_lba.slope_seg[2] = -20; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format - // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3 - dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format - dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format - // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4 - dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format - dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format - // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5 - dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format - dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format - switch (adp_sharpness.sharpness) { - case SHARPNESS_LOW: - dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_0p5x(); - break; - case SHARPNESS_MID: - dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_1p0x(); - break; - case SHARPNESS_HIGH: - dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_2p0x(); - break; - default: - BREAK_TO_DEBUGGER(); + + if (setup == SDR_L) { + // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0 + dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[0] = 62; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1 + dscl_prog_data->isharp_lba.in_seg[1] = 130; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2 + dscl_prog_data->isharp_lba.in_seg[2] = 450; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[2] = 0x18D; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -115 + // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3 + dscl_prog_data->isharp_lba.in_seg[3] = 520; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4 + dscl_prog_data->isharp_lba.in_seg[4] = 520; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5 + dscl_prog_data->isharp_lba.in_seg[5] = 520; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format + } else if (setup == HDR_L) { + // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0 + dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1 + dscl_prog_data->isharp_lba.in_seg[1] = 254; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2 + dscl_prog_data->isharp_lba.in_seg[2] = 559; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[2] = 0x10C; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -244 + // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3 + dscl_prog_data->isharp_lba.in_seg[3] = 592; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4 + dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5 + dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format + } else { + // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0 + dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[0] = 40; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1 + dscl_prog_data->isharp_lba.in_seg[1] = 204; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2 + dscl_prog_data->isharp_lba.in_seg[2] = 818; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[2] = 0x1D9; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -39 + // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3 + dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4 + dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format + dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format + // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5 + dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format + dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format } // Program the nldelta soft clip values @@ -1346,59 +1710,6 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, // Set the values as per lookup table spl_set_blur_scale_data(dscl_prog_data, data); } -static bool spl_get_isharp_en(struct adaptive_sharpness adp_sharpness, - int vscale_ratio, int hscale_ratio, struct spl_taps taps, - enum spl_pixel_format format) -{ - bool enable_isharp = false; - - if (adp_sharpness.enable == false) - return enable_isharp; // Return if adaptive sharpness is disabled - // Is downscaling ? - if (vscale_ratio > 1 || hscale_ratio > 1) { - // END - No iSHARP support for downscaling - return enable_isharp; - } - // Scaling is up to 1:1 (no scaling) or upscaling - - /* Only apply sharpness to NV12 and not P010 */ - if (format != SPL_PIXEL_FORMAT_420BPP8) - return enable_isharp; - - // LB support horizontal taps 4,6 or vertical taps 3, 4, 6 - if (taps.h_taps == 4 || taps.h_taps == 6 || - taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6) { - // END - iSHARP supported - enable_isharp = true; - } - return enable_isharp; -} - -static bool spl_choose_lls_policy(enum spl_pixel_format format, - enum spl_transfer_func_type tf_type, - enum spl_transfer_func_predefined tf_predefined_type, - enum linear_light_scaling *lls_pref) -{ - if (spl_is_yuv420(format)) { - *lls_pref = LLS_PREF_NO; - if ((tf_type == SPL_TF_TYPE_PREDEFINED) || (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS)) - return true; - } else { /* RGB or YUV444 */ - if (tf_type == SPL_TF_TYPE_PREDEFINED) { - if ((tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG) || - (tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG12)) - *lls_pref = LLS_PREF_NO; - else - *lls_pref = LLS_PREF_YES; - return true; - } else if (tf_type == SPL_TF_TYPE_BYPASS) { - *lls_pref = LLS_PREF_YES; - return true; - } - } - *lls_pref = LLS_PREF_NO; - return false; -} /* Calculate scaler parameters */ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) @@ -1406,65 +1717,75 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) bool res = false; bool enable_easf_v = false; bool enable_easf_h = false; - bool lls_enable_easf = true; - const struct spl_scaler_data *data = &spl_out->scl_data; + int vratio = 0; + int hratio = 0; + struct spl_scratch spl_scratch; + struct spl_fixed31_32 isharp_scale_ratio; + enum system_setup setup; + bool enable_isharp = false; + const struct spl_scaler_data *data = &spl_scratch.scl_data; + + memset(&spl_scratch, 0, sizeof(struct spl_scratch)); + spl_scratch.scl_data.h_active = spl_in->h_active; + spl_scratch.scl_data.v_active = spl_in->v_active; + // All SPL calls /* recout calculation */ /* depends on h_active */ - spl_calculate_recout(spl_in, spl_out); + spl_calculate_recout(spl_in, &spl_scratch, spl_out); /* depends on pixel format */ - spl_calculate_scaling_ratios(spl_in, spl_out); + spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out); /* depends on scaling ratios and recout, does not calculate offset yet */ - spl_calculate_viewport_size(spl_in, spl_out); + spl_calculate_viewport_size(spl_in, &spl_scratch); res = spl_get_optimal_number_of_taps( spl_in->basic_out.max_downscale_src_width, spl_in, - spl_out, &spl_in->scaling_quality); + &spl_scratch, &spl_in->scaling_quality, &enable_easf_v, + &enable_easf_h, &enable_isharp); /* * Depends on recout, scaling ratios, h_active and taps * May need to re-check lb size after this in some obscure scenario */ if (res) - spl_calculate_inits_and_viewports(spl_in, spl_out); + spl_calculate_inits_and_viewports(spl_in, &spl_scratch); // Handle 3d recout - spl_handle_3d_recout(spl_in, &spl_out->scl_data.recout); + spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout); // Clamp - spl_clamp_viewport(&spl_out->scl_data.viewport); + spl_clamp_viewport(&spl_scratch.scl_data.viewport); if (!res) return res; - /* - * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer - * function to determine whether to use LINEAR or NONLINEAR scaling - */ - if (spl_in->lls_pref == LLS_PREF_DONT_CARE) - lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format, - spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type, - &spl_in->lls_pref); - // Save all calculated parameters in dscl_prog_data structure to program hw registers - spl_set_dscl_prog_data(spl_in, spl_out); + spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp); - int vratio = dc_fixpt_ceil(spl_out->scl_data.ratios.vert); - int hratio = dc_fixpt_ceil(spl_out->scl_data.ratios.horz); - if (!lls_enable_easf || spl_in->disable_easf) { - enable_easf_v = false; - enable_easf_h = false; + if (spl_in->lls_pref == LLS_PREF_YES) { + if (spl_in->is_hdr_on) + setup = HDR_L; + else + setup = SDR_L; } else { - /* Enable EASF on vertical? */ - enable_easf_v = enable_easf(vratio, spl_out->scl_data.taps.v_taps, spl_in->lls_pref, spl_in->prefer_easf); - /* Enable EASF on horizontal? */ - enable_easf_h = enable_easf(hratio, spl_out->scl_data.taps.h_taps, spl_in->lls_pref, spl_in->prefer_easf); + if (spl_in->is_hdr_on) + setup = HDR_NL; + else + setup = SDR_NL; } + // Set EASF - spl_set_easf_data(spl_out->dscl_prog_data, enable_easf_v, enable_easf_h, spl_in->lls_pref, - spl_in->basic_in.format); + spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref, + spl_in->basic_in.format, setup, spl_in->sdr_white_level_nits); + // Set iSHARP - bool enable_isharp = spl_get_isharp_en(spl_in->adaptive_sharpness, vratio, hratio, - spl_out->scl_data.taps, spl_in->basic_in.format); + vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert); + hratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.horz); + if (vratio <= hratio) + isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.vert; + else + isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz; + spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp, - spl_in->lls_pref, spl_in->basic_in.format, data); + spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup, + spl_in->debug.scale_to_sharpness_policy); return res; } diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h index f1fd3eb92f8a4b..205e59a2a8ee8f 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h @@ -9,16 +9,8 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#ifdef __cplusplus -extern "C" { -#endif - /* SPL interfaces */ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out); -#ifdef __cplusplus -} -#endif - #endif /* __DC_SPL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c new file mode 100644 index 00000000000000..99238644e0a117 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "dc_spl_filters.h" + +void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter, + uint16_t *s1_12_filter, int num_taps) +{ + int num_entries = NUM_PHASES_COEFF * num_taps; + int i; + + for (i = 0; i < num_entries; i++) + *(s1_12_filter + i) = *(s1_10_filter + i) * 4; +} diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h new file mode 100644 index 00000000000000..20439cdbdb105d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef __DC_SPL_FILTERS_H__ +#define __DC_SPL_FILTERS_H__ + +#include "dc_spl_types.h" + +#define NUM_PHASES_COEFF 33 + +void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter, + uint16_t *s1_12_filter, int num_taps); + +#endif /* __DC_SPL_FILTERS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c index 8bc838c7c3c535..e0572252c64046 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c @@ -2,7 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include "dc_spl_types.h" +#include "spl_debug.h" +#include "dc_spl_filters.h" #include "dc_spl_isharp_filters.h" //======================================== @@ -16,7 +17,7 @@ // C_start = 40.000000 // C_end = 64.000000 //======================================== -static const uint32_t filter_isharp_1D_lut_0[32] = { +static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = { 0x02010000, 0x0A070503, 0x1614100D, @@ -62,7 +63,7 @@ static const uint32_t filter_isharp_1D_lut_0[32] = { // C_end = 127.000000 //======================================== -static const uint32_t filter_isharp_1D_lut_0p5x[32] = { +static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = { 0x00000000, 0x02020101, 0x06050403, @@ -107,7 +108,7 @@ static const uint32_t filter_isharp_1D_lut_0p5x[32] = { // C_start = 96.000000 // C_end = 127.000000 //======================================== -static const uint32_t filter_isharp_1D_lut_1p0x[32] = { +static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = { 0x01000000, 0x05040302, 0x0B0A0806, @@ -152,7 +153,7 @@ static const uint32_t filter_isharp_1D_lut_1p0x[32] = { // C_start = 96.000000 // C_end = 127.000000 //======================================== -static const uint32_t filter_isharp_1D_lut_1p5x[32] = { +static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = { 0x01010000, 0x07050402, 0x110F0C0A, @@ -197,7 +198,7 @@ static const uint32_t filter_isharp_1D_lut_1p5x[32] = { // C_start = 40.000000 // C_end = 127.000000 //======================================== -static const uint32_t filter_isharp_1D_lut_2p0x[32] = { +static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = { 0x02010000, 0x0A070503, 0x1614100D, @@ -231,6 +232,53 @@ static const uint32_t filter_isharp_1D_lut_2p0x[32] = { 0x080B0D0E, 0x00020406, }; +//======================================== +// Delta Gain 1DLUT +// LUT content is packed as 4-bytes into one DWORD/entry +// A_start = 0.000000 +// A_end = 10.000000 +// A_gain = 3.000000 +// B_start = 11.000000 +// B_end = 127.000000 +// C_start = 40.000000 +// C_end = 127.000000 +//======================================== +static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = { +0x03010000, +0x0F0B0805, +0x211E1813, +0x2B292624, +0x3533302E, +0x3E3C3A37, +0x46444240, +0x4D4B4A48, +0x5352504F, +0x59575655, +0x5D5C5B5A, +0x61605F5E, +0x64646362, +0x66666565, +0x68686767, +0x68686868, +0x68686868, +0x67676868, +0x65656666, +0x62636464, +0x5E5F6061, +0x5A5B5C5D, +0x55565759, +0x4F505253, +0x484A4B4D, +0x40424446, +0x373A3C3E, +0x2E303335, +0x2426292B, +0x191B1E21, +0x0D101316, +0x0003060A, +}; + +//======================================== // Wide scaler coefficients //======================================================== // gen_scaler_coeffs.m @@ -285,7 +333,7 @@ static const uint16_t filter_isharp_wide_6tap_64p[198] = { // Blur & Scale LPF // S1.10 //======================================================== -static const uint16_t filter_isharp_bs_4tap_64p[198] = { +static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = { 0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000, 0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000, 0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000, @@ -320,6 +368,147 @@ static const uint16_t filter_isharp_bs_4tap_64p[198] = { 0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000, 0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000 }; +//======================================================== +// gen_BlurScale_coeffs.m +// 25-Apr-2022 +// 4 +// 64 +// Blur & Scale LPF +// S1.10 +//======================================================== +static const uint16_t filter_isharp_bs_4tap_64p[132] = { +0x00E5, 0x0237, 0x00E4, 0x0000, +0x00DE, 0x0237, 0x00EB, 0x0000, +0x00D7, 0x0236, 0x00F2, 0x0001, +0x00D0, 0x0235, 0x00FA, 0x0001, +0x00C9, 0x0234, 0x0101, 0x0002, +0x00C2, 0x0233, 0x0108, 0x0003, +0x00BB, 0x0232, 0x0110, 0x0003, +0x00B5, 0x0230, 0x0117, 0x0004, +0x00AE, 0x022E, 0x011F, 0x0005, +0x00A8, 0x022C, 0x0126, 0x0006, +0x00A2, 0x022A, 0x012D, 0x0007, +0x009C, 0x0228, 0x0134, 0x0008, +0x0096, 0x0225, 0x013C, 0x0009, +0x0090, 0x0222, 0x0143, 0x000B, +0x008A, 0x021F, 0x014B, 0x000C, +0x0085, 0x021C, 0x0151, 0x000E, +0x007F, 0x0218, 0x015A, 0x000F, +0x007A, 0x0215, 0x0160, 0x0011, +0x0074, 0x0211, 0x0168, 0x0013, +0x006F, 0x020D, 0x016F, 0x0015, +0x006A, 0x0209, 0x0176, 0x0017, +0x0065, 0x0204, 0x017E, 0x0019, +0x0060, 0x0200, 0x0185, 0x001B, +0x005C, 0x01FB, 0x018C, 0x001D, +0x0057, 0x01F6, 0x0193, 0x0020, +0x0053, 0x01F1, 0x019A, 0x0022, +0x004E, 0x01EC, 0x01A1, 0x0025, +0x004A, 0x01E6, 0x01A8, 0x0028, +0x0046, 0x01E1, 0x01AF, 0x002A, +0x0042, 0x01DB, 0x01B6, 0x002D, +0x003F, 0x01D5, 0x01BB, 0x0031, +0x003B, 0x01CF, 0x01C2, 0x0034, +0x0037, 0x01C9, 0x01C9, 0x0037, +}; +//======================================================== +// gen_BlurScale_coeffs.m +// 09-Jun-2022 +// 3 +// 64 +// Blur & Scale LPF +// S1.10 +//======================================================== +static const uint16_t filter_isharp_bs_3tap_64p[99] = { +0x0200, 0x0200, 0x0000, +0x01F6, 0x0206, 0x0004, +0x01EC, 0x020B, 0x0009, +0x01E2, 0x0211, 0x000D, +0x01D8, 0x0216, 0x0012, +0x01CE, 0x021C, 0x0016, +0x01C4, 0x0221, 0x001B, +0x01BA, 0x0226, 0x0020, +0x01B0, 0x022A, 0x0026, +0x01A6, 0x022F, 0x002B, +0x019C, 0x0233, 0x0031, +0x0192, 0x0238, 0x0036, +0x0188, 0x023C, 0x003C, +0x017E, 0x0240, 0x0042, +0x0174, 0x0244, 0x0048, +0x016A, 0x0248, 0x004E, +0x0161, 0x024A, 0x0055, +0x0157, 0x024E, 0x005B, +0x014D, 0x0251, 0x0062, +0x0144, 0x0253, 0x0069, +0x013A, 0x0256, 0x0070, +0x0131, 0x0258, 0x0077, +0x0127, 0x025B, 0x007E, +0x011E, 0x025C, 0x0086, +0x0115, 0x025E, 0x008D, +0x010B, 0x0260, 0x0095, +0x0102, 0x0262, 0x009C, +0x00F9, 0x0263, 0x00A4, +0x00F0, 0x0264, 0x00AC, +0x00E7, 0x0265, 0x00B4, +0x00DF, 0x0264, 0x00BD, +0x00D6, 0x0265, 0x00C5, +0x00CD, 0x0266, 0x00CD, +}; + +/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */ +static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198]; +static uint16_t filter_isharp_bs_4tap_64p_s1_12[132]; +static uint16_t filter_isharp_bs_3tap_64p_s1_12[99]; + +/* Pre-generated 1DLUT for given setup and sharpness level */ +struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { + { + 0, 0, + { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + } + }, + { + 0, 0, + { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + } + }, + { + 0, 0, + { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + } + }, + { + 0, 0, + { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + } + }, +}; + +struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { + {1125, 1000, 0}, + {11, 10, 1}, + {1075, 1000, 2}, + {105, 100, 3}, + {1025, 1000, 4}, + {1, 1, 5}, +}; + const uint32_t *spl_get_filter_isharp_1D_lut_0(void) { return filter_isharp_1D_lut_0; @@ -340,11 +529,229 @@ const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void) { return filter_isharp_1D_lut_2p0x; } +const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void) +{ + return filter_isharp_1D_lut_3p0x; +} const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void) { return filter_isharp_wide_6tap_64p; } -const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void) +uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void) +{ + return filter_isharp_bs_4tap_in_6_64p_s1_12; +} +uint16_t *spl_get_filter_isharp_bs_4tap_64p(void) { - return filter_isharp_bs_4tap_64p; + return filter_isharp_bs_4tap_64p_s1_12; } +uint16_t *spl_get_filter_isharp_bs_3tap_64p(void) +{ + return filter_isharp_bs_3tap_64p_s1_12; +} + +static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio) +{ + int j; + struct spl_fixed31_32 ratio_level; + struct scale_ratio_to_sharpness_level_adj *lookup_ptr; + unsigned int sharpness_level_down_adj; + + /* + * Adjust sharpness level based on current scaling ratio + * + * We have 5 discrete scaling ratios which we will use to adjust the + * sharpness level down by 1 as we pass each ratio. The ratios + * are + * + * 1.125 upscale and higher - no adj + * 1.100 - under 1.125 - adj level down 1 + * 1.075 - under 1.100 - adj level down 2 + * 1.050 - under 1.075 - adj level down 3 + * 1.025 - under 1.050 - adj level down 4 + * 1.000 - under 1.025 - adj level down 5 + * + */ + j = 0; + sharpness_level_down_adj = 0; + lookup_ptr = sharpness_level_adj; + while (j < NUM_SHARPNESS_ADJ_LEVELS) { + ratio_level = spl_fixpt_from_fraction(lookup_ptr->ratio_numer, + lookup_ptr->ratio_denom); + if (ratio.value >= ratio_level.value) { + sharpness_level_down_adj = lookup_ptr->level_down_adj; + break; + } + lookup_ptr++; + j++; + } + return sharpness_level_down_adj; +} + +static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio, + int discrete_sharpness_level, enum system_setup setup, + struct spl_sharpness_range sharpness_range, + enum scale_to_sharpness_policy scale_to_sharpness_policy) +{ + unsigned int sharpness_level = 0; + unsigned int sharpness_level_down_adj = 0; + + int min_sharpness, max_sharpness, mid_sharpness; + + /* + * Adjust sharpness level if policy requires we adjust it based on + * scale ratio. Based on scale ratio, we may adjust the sharpness + * level down by a certain number of steps. We will not select + * a sharpness value of 0 so the lowest sharpness level will be + * 0 or 1 depending on what the min_sharpness is + * + * If the policy is no required, this code maybe removed at a later + * date + */ + switch (setup) { + + case HDR_L: + min_sharpness = sharpness_range.hdr_rgb_min; + max_sharpness = sharpness_range.hdr_rgb_max; + mid_sharpness = sharpness_range.hdr_rgb_mid; + if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); + break; + case HDR_NL: + /* currently no use case, use Non-linear SDR values for now */ + case SDR_NL: + min_sharpness = sharpness_range.sdr_yuv_min; + max_sharpness = sharpness_range.sdr_yuv_max; + mid_sharpness = sharpness_range.sdr_yuv_mid; + if (scale_to_sharpness_policy >= SCALE_TO_SHARPNESS_ADJ_YUV) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); + break; + case SDR_L: + default: + min_sharpness = sharpness_range.sdr_rgb_min; + max_sharpness = sharpness_range.sdr_rgb_max; + mid_sharpness = sharpness_range.sdr_rgb_mid; + if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); + break; + } + + if ((min_sharpness == 0) && (sharpness_level_down_adj >= discrete_sharpness_level)) + discrete_sharpness_level = 1; + else if (sharpness_level_down_adj >= discrete_sharpness_level) + discrete_sharpness_level = 0; + else + discrete_sharpness_level -= sharpness_level_down_adj; + + int lower_half_step_size = (mid_sharpness - min_sharpness) / 5; + int upper_half_step_size = (max_sharpness - mid_sharpness) / 5; + + // lower half linear approximation + if (discrete_sharpness_level < 5) + sharpness_level = min_sharpness + (lower_half_step_size * discrete_sharpness_level); + // upper half linear approximation + else + sharpness_level = mid_sharpness + (upper_half_step_size * (discrete_sharpness_level - 5)); + + return sharpness_level; +} + +void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, + struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy) +{ + uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst; + struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level; + int j; + int size_1dlut; + int sharp_calc_int; + uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE]; + + /* Custom sharpnessX1000 value */ + unsigned int sharpnessX1000 = spl_calculate_sharpness_level(ratio, + sharpness.sharpness_level, setup, + sharpness.sharpness_range, scale_to_sharpness_policy); + sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000); + + /* + * Check if pregen 1dlut table is already precalculated + * If numer/denom is different, then recalculate + */ + if ((filter_isharp_1D_lut_pregen[setup].sharpness_numer == sharpnessX1000) && + (filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000)) + return; + + /* + * Calculate LUT_128_gained with this equation: + * + * LUT_128_gained[i] = (uint8)(0.5 + min(255,(double)(LUT_128[i])*sharpLevel/iGain)) + * where LUT_128[i] is contents of 3p0x isharp 1dlut + * where sharpLevel is desired sharpness level + * where iGain is base sharpness level 3.0 + * where LUT_128_gained[i] is adjusted 1dlut value based on desired sharpness level + */ + byte_ptr_1dlut_src = (uint8_t *)filter_isharp_1D_lut_3p0x; + byte_ptr_1dlut_dst = (uint8_t *)filter_pregen_store; + size_1dlut = sizeof(filter_isharp_1D_lut_3p0x); + memset(byte_ptr_1dlut_dst, 0, size_1dlut); + for (j = 0; j < size_1dlut; j++) { + sharp_base = spl_fixpt_from_int((int)*byte_ptr_1dlut_src); + sharp_calc = spl_fixpt_mul(sharp_base, sharp_level); + sharp_calc = spl_fixpt_div(sharp_calc, spl_fixpt_from_int(3)); + sharp_calc = spl_fixpt_min(spl_fixpt_from_int(255), sharp_calc); + sharp_calc = spl_fixpt_add(sharp_calc, spl_fixpt_from_fraction(1, 2)); + sharp_calc_int = spl_fixpt_floor(sharp_calc); + /* Clamp it at 0x7F so it doesn't wrap */ + if (sharp_calc_int > 127) + sharp_calc_int = 127; + *byte_ptr_1dlut_dst = (uint8_t)sharp_calc_int; + + byte_ptr_1dlut_src++; + byte_ptr_1dlut_dst++; + } + + /* Update 1dlut table and sharpness level */ + memcpy((void *)filter_isharp_1D_lut_pregen[setup].value, (void *)filter_pregen_store, size_1dlut); + filter_isharp_1D_lut_pregen[setup].sharpness_numer = sharpnessX1000; + filter_isharp_1D_lut_pregen[setup].sharpness_denom = 1000; +} + +uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup) +{ + return filter_isharp_1D_lut_pregen[setup].value; +} + +void spl_init_blur_scale_coeffs(void) +{ + convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p, + filter_isharp_bs_3tap_64p_s1_12, 3); + convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p, + filter_isharp_bs_4tap_64p_s1_12, 4); + convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p, + filter_isharp_bs_4tap_in_6_64p_s1_12, 6); +} + +uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps) +{ + if (taps == 3) + return spl_get_filter_isharp_bs_3tap_64p(); + else if (taps == 4) + return spl_get_filter_isharp_bs_4tap_64p(); + else if (taps == 6) + return spl_get_filter_isharp_bs_4tap_in_6_64p(); + else { + /* should never happen, bug */ + SPL_BREAK_TO_DEBUGGER(); + return NULL; + } +} + +void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data, + const struct spl_scaler_data *data) +{ + dscl_prog_data->filter_blur_scale_h = + spl_dscl_get_blur_scale_coeffs_64p(data->taps.h_taps); + + dscl_prog_data->filter_blur_scale_v = + spl_dscl_get_blur_scale_coeffs_64p(data->taps.v_taps); +} + diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h index 1aaf4c50c1bc2e..afcc66206ca2a7 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h @@ -7,11 +7,45 @@ #include "dc_spl_types.h" +#define ISHARP_LUT_TABLE_SIZE 32 const uint32_t *spl_get_filter_isharp_1D_lut_0(void); const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void); const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void); const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void); const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void); -const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void); +const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void); +uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void); +uint16_t *spl_get_filter_isharp_bs_4tap_64p(void); +uint16_t *spl_get_filter_isharp_bs_3tap_64p(void); const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void); +uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps); + +#define NUM_SHARPNESS_ADJ_LEVELS 6 +struct scale_ratio_to_sharpness_level_adj { + unsigned int ratio_numer; + unsigned int ratio_denom; + unsigned int level_down_adj; /* adjust sharpness level down */ +}; + +struct isharp_1D_lut_pregen { + unsigned int sharpness_numer; + unsigned int sharpness_denom; + uint32_t value[ISHARP_LUT_TABLE_SIZE]; +}; + +enum system_setup { + SDR_NL = 0, + SDR_L, + HDR_NL, + HDR_L, + NUM_SHARPNESS_SETUPS +}; + +void spl_init_blur_scale_coeffs(void); +void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data, + const struct spl_scaler_data *data); + +void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, + struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy); +uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup); #endif /* __DC_SPL_ISHARP_FILTERS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c new file mode 100644 index 00000000000000..09bf82f7d4688a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c @@ -0,0 +1,1726 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "spl_debug.h" +#include "dc_spl_filters.h" +#include "dc_spl_scl_filters.h" +#include "dc_spl_scl_easf_filters.h" + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.3_p_10qb_ +// 3 +// 64 +// input/output = 0.300000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_30[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F6, 0x0206, 0x0004, + 0x01EC, 0x020B, 0x0009, + 0x01E2, 0x0211, 0x000D, + 0x01D8, 0x0216, 0x0012, + 0x01CE, 0x021C, 0x0016, + 0x01C4, 0x0221, 0x001B, + 0x01BA, 0x0226, 0x0020, + 0x01B0, 0x022A, 0x0026, + 0x01A6, 0x022F, 0x002B, + 0x019C, 0x0233, 0x0031, + 0x0192, 0x0238, 0x0036, + 0x0188, 0x023C, 0x003C, + 0x017E, 0x0240, 0x0042, + 0x0174, 0x0244, 0x0048, + 0x016A, 0x0248, 0x004E, + 0x0161, 0x024A, 0x0055, + 0x0157, 0x024E, 0x005B, + 0x014D, 0x0251, 0x0062, + 0x0144, 0x0253, 0x0069, + 0x013A, 0x0256, 0x0070, + 0x0131, 0x0258, 0x0077, + 0x0127, 0x025B, 0x007E, + 0x011E, 0x025C, 0x0086, + 0x0115, 0x025E, 0x008D, + 0x010B, 0x0260, 0x0095, + 0x0102, 0x0262, 0x009C, + 0x00F9, 0x0263, 0x00A4, + 0x00F0, 0x0264, 0x00AC, + 0x00E7, 0x0265, 0x00B4, + 0x00DF, 0x0264, 0x00BD, + 0x00D6, 0x0265, 0x00C5, + 0x00CD, 0x0266, 0x00CD, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.4_p_10qb_ +// 3 +// 64 +// input/output = 0.400000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_40[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F6, 0x0206, 0x0004, + 0x01EB, 0x020E, 0x0007, + 0x01E1, 0x0214, 0x000B, + 0x01D7, 0x021A, 0x000F, + 0x01CD, 0x0220, 0x0013, + 0x01C2, 0x0226, 0x0018, + 0x01B8, 0x022C, 0x001C, + 0x01AE, 0x0231, 0x0021, + 0x01A3, 0x0237, 0x0026, + 0x0199, 0x023C, 0x002B, + 0x018F, 0x0240, 0x0031, + 0x0185, 0x0245, 0x0036, + 0x017A, 0x024A, 0x003C, + 0x0170, 0x024F, 0x0041, + 0x0166, 0x0253, 0x0047, + 0x015C, 0x0257, 0x004D, + 0x0152, 0x025A, 0x0054, + 0x0148, 0x025E, 0x005A, + 0x013E, 0x0261, 0x0061, + 0x0134, 0x0264, 0x0068, + 0x012B, 0x0266, 0x006F, + 0x0121, 0x0269, 0x0076, + 0x0117, 0x026C, 0x007D, + 0x010E, 0x026E, 0x0084, + 0x0104, 0x0270, 0x008C, + 0x00FB, 0x0271, 0x0094, + 0x00F2, 0x0272, 0x009C, + 0x00E9, 0x0273, 0x00A4, + 0x00E0, 0x0274, 0x00AC, + 0x00D7, 0x0275, 0x00B4, + 0x00CE, 0x0275, 0x00BD, + 0x00C5, 0x0276, 0x00C5, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.5_p_10qb_ +// 3 +// 64 +// input/output = 0.500000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_50[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F5, 0x0209, 0x0002, + 0x01EA, 0x0211, 0x0005, + 0x01DF, 0x021A, 0x0007, + 0x01D4, 0x0222, 0x000A, + 0x01C9, 0x022A, 0x000D, + 0x01BE, 0x0232, 0x0010, + 0x01B3, 0x0239, 0x0014, + 0x01A8, 0x0241, 0x0017, + 0x019D, 0x0248, 0x001B, + 0x0192, 0x024F, 0x001F, + 0x0187, 0x0255, 0x0024, + 0x017C, 0x025C, 0x0028, + 0x0171, 0x0262, 0x002D, + 0x0166, 0x0268, 0x0032, + 0x015B, 0x026E, 0x0037, + 0x0150, 0x0273, 0x003D, + 0x0146, 0x0278, 0x0042, + 0x013B, 0x027D, 0x0048, + 0x0130, 0x0282, 0x004E, + 0x0126, 0x0286, 0x0054, + 0x011B, 0x028A, 0x005B, + 0x0111, 0x028D, 0x0062, + 0x0107, 0x0290, 0x0069, + 0x00FD, 0x0293, 0x0070, + 0x00F3, 0x0296, 0x0077, + 0x00E9, 0x0298, 0x007F, + 0x00DF, 0x029A, 0x0087, + 0x00D5, 0x029C, 0x008F, + 0x00CC, 0x029D, 0x0097, + 0x00C3, 0x029E, 0x009F, + 0x00BA, 0x029E, 0x00A8, + 0x00B1, 0x029E, 0x00B1, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.6_p_10qb_ +// 3 +// 64 +// input/output = 0.600000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_60[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F4, 0x020B, 0x0001, + 0x01E8, 0x0216, 0x0002, + 0x01DC, 0x0221, 0x0003, + 0x01D0, 0x022B, 0x0005, + 0x01C4, 0x0235, 0x0007, + 0x01B8, 0x0240, 0x0008, + 0x01AC, 0x0249, 0x000B, + 0x01A0, 0x0253, 0x000D, + 0x0194, 0x025C, 0x0010, + 0x0188, 0x0265, 0x0013, + 0x017C, 0x026E, 0x0016, + 0x0170, 0x0277, 0x0019, + 0x0164, 0x027F, 0x001D, + 0x0158, 0x0287, 0x0021, + 0x014C, 0x028F, 0x0025, + 0x0140, 0x0297, 0x0029, + 0x0135, 0x029D, 0x002E, + 0x0129, 0x02A4, 0x0033, + 0x011D, 0x02AB, 0x0038, + 0x0112, 0x02B0, 0x003E, + 0x0107, 0x02B5, 0x0044, + 0x00FC, 0x02BA, 0x004A, + 0x00F1, 0x02BF, 0x0050, + 0x00E6, 0x02C3, 0x0057, + 0x00DB, 0x02C7, 0x005E, + 0x00D1, 0x02CA, 0x0065, + 0x00C7, 0x02CC, 0x006D, + 0x00BD, 0x02CE, 0x0075, + 0x00B3, 0x02D0, 0x007D, + 0x00A9, 0x02D2, 0x0085, + 0x00A0, 0x02D2, 0x008E, + 0x0097, 0x02D2, 0x0097, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.7_p_10qb_ +// 3 +// 64 +// input/output = 0.700000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_70[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F3, 0x020D, 0x0000, + 0x01E5, 0x021B, 0x0000, + 0x01D8, 0x0228, 0x0000, + 0x01CB, 0x0235, 0x0000, + 0x01BD, 0x0243, 0x0000, + 0x01B0, 0x024F, 0x0001, + 0x01A2, 0x025C, 0x0002, + 0x0195, 0x0268, 0x0003, + 0x0187, 0x0275, 0x0004, + 0x017A, 0x0280, 0x0006, + 0x016D, 0x028C, 0x0007, + 0x015F, 0x0298, 0x0009, + 0x0152, 0x02A2, 0x000C, + 0x0145, 0x02AD, 0x000E, + 0x0138, 0x02B7, 0x0011, + 0x012B, 0x02C0, 0x0015, + 0x011E, 0x02CA, 0x0018, + 0x0111, 0x02D3, 0x001C, + 0x0105, 0x02DB, 0x0020, + 0x00F8, 0x02E3, 0x0025, + 0x00EC, 0x02EA, 0x002A, + 0x00E0, 0x02F1, 0x002F, + 0x00D5, 0x02F6, 0x0035, + 0x00C9, 0x02FC, 0x003B, + 0x00BE, 0x0301, 0x0041, + 0x00B3, 0x0305, 0x0048, + 0x00A8, 0x0309, 0x004F, + 0x009E, 0x030C, 0x0056, + 0x0094, 0x030E, 0x005E, + 0x008A, 0x0310, 0x0066, + 0x0081, 0x0310, 0x006F, + 0x0077, 0x0312, 0x0077, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.8_p_10qb_ +// 3 +// 64 +// input/output = 0.800000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_80[99] = { + 0x0200, 0x0200, 0x0000, + 0x01F1, 0x0210, 0x0FFF, + 0x01E2, 0x0220, 0x0FFE, + 0x01D2, 0x0232, 0x0FFC, + 0x01C3, 0x0241, 0x0FFC, + 0x01B4, 0x0251, 0x0FFB, + 0x01A4, 0x0262, 0x0FFA, + 0x0195, 0x0271, 0x0FFA, + 0x0186, 0x0281, 0x0FF9, + 0x0176, 0x0291, 0x0FF9, + 0x0167, 0x02A0, 0x0FF9, + 0x0158, 0x02AE, 0x0FFA, + 0x0149, 0x02BD, 0x0FFA, + 0x013A, 0x02CB, 0x0FFB, + 0x012C, 0x02D7, 0x0FFD, + 0x011D, 0x02E5, 0x0FFE, + 0x010F, 0x02F1, 0x0000, + 0x0101, 0x02FD, 0x0002, + 0x00F3, 0x0308, 0x0005, + 0x00E5, 0x0313, 0x0008, + 0x00D8, 0x031D, 0x000B, + 0x00CB, 0x0326, 0x000F, + 0x00BE, 0x032F, 0x0013, + 0x00B2, 0x0337, 0x0017, + 0x00A6, 0x033E, 0x001C, + 0x009A, 0x0345, 0x0021, + 0x008F, 0x034A, 0x0027, + 0x0084, 0x034F, 0x002D, + 0x0079, 0x0353, 0x0034, + 0x006F, 0x0356, 0x003B, + 0x0065, 0x0358, 0x0043, + 0x005C, 0x0359, 0x004B, + 0x0053, 0x035A, 0x0053, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_0.9_p_10qb_ +// 3 +// 64 +// input/output = 0.900000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_0_90[99] = { + 0x0200, 0x0200, 0x0000, + 0x01EE, 0x0214, 0x0FFE, + 0x01DC, 0x0228, 0x0FFC, + 0x01CA, 0x023C, 0x0FFA, + 0x01B9, 0x024F, 0x0FF8, + 0x01A7, 0x0262, 0x0FF7, + 0x0195, 0x0276, 0x0FF5, + 0x0183, 0x028A, 0x0FF3, + 0x0172, 0x029C, 0x0FF2, + 0x0160, 0x02AF, 0x0FF1, + 0x014F, 0x02C2, 0x0FEF, + 0x013E, 0x02D4, 0x0FEE, + 0x012D, 0x02E5, 0x0FEE, + 0x011C, 0x02F7, 0x0FED, + 0x010C, 0x0307, 0x0FED, + 0x00FB, 0x0318, 0x0FED, + 0x00EC, 0x0327, 0x0FED, + 0x00DC, 0x0336, 0x0FEE, + 0x00CD, 0x0344, 0x0FEF, + 0x00BE, 0x0352, 0x0FF0, + 0x00B0, 0x035E, 0x0FF2, + 0x00A2, 0x036A, 0x0FF4, + 0x0095, 0x0375, 0x0FF6, + 0x0088, 0x037F, 0x0FF9, + 0x007B, 0x0388, 0x0FFD, + 0x006F, 0x0391, 0x0000, + 0x0064, 0x0397, 0x0005, + 0x0059, 0x039D, 0x000A, + 0x004E, 0x03A3, 0x000F, + 0x0045, 0x03A6, 0x0015, + 0x003B, 0x03A9, 0x001C, + 0x0033, 0x03AA, 0x0023, + 0x002A, 0x03AC, 0x002A, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 3t_64p_LanczosEd_p_1_p_10qb_ +// 3 +// 64 +// input/output = 1.000000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_3tap_64p_ratio_1_00[99] = { + 0x0200, 0x0200, 0x0000, + 0x01EB, 0x0217, 0x0FFE, + 0x01D5, 0x022F, 0x0FFC, + 0x01C0, 0x0247, 0x0FF9, + 0x01AB, 0x025E, 0x0FF7, + 0x0196, 0x0276, 0x0FF4, + 0x0181, 0x028D, 0x0FF2, + 0x016C, 0x02A5, 0x0FEF, + 0x0158, 0x02BB, 0x0FED, + 0x0144, 0x02D1, 0x0FEB, + 0x0130, 0x02E8, 0x0FE8, + 0x011C, 0x02FE, 0x0FE6, + 0x0109, 0x0313, 0x0FE4, + 0x00F6, 0x0328, 0x0FE2, + 0x00E4, 0x033C, 0x0FE0, + 0x00D2, 0x034F, 0x0FDF, + 0x00C0, 0x0363, 0x0FDD, + 0x00B0, 0x0374, 0x0FDC, + 0x009F, 0x0385, 0x0FDC, + 0x0090, 0x0395, 0x0FDB, + 0x0081, 0x03A4, 0x0FDB, + 0x0072, 0x03B3, 0x0FDB, + 0x0064, 0x03C0, 0x0FDC, + 0x0057, 0x03CC, 0x0FDD, + 0x004B, 0x03D6, 0x0FDF, + 0x003F, 0x03E0, 0x0FE1, + 0x0034, 0x03E8, 0x0FE4, + 0x002A, 0x03EF, 0x0FE7, + 0x0020, 0x03F5, 0x0FEB, + 0x0017, 0x03FA, 0x0FEF, + 0x000F, 0x03FD, 0x0FF4, + 0x0007, 0x03FF, 0x0FFA, + 0x0000, 0x0400, 0x0000, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.3_p_10qb_ +// 4 +// 64 +// input/output = 0.300000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_30[132] = { + 0x0104, 0x01F8, 0x0104, 0x0000, + 0x00FE, 0x01F7, 0x010A, 0x0001, + 0x00F8, 0x01F6, 0x010F, 0x0003, + 0x00F2, 0x01F5, 0x0114, 0x0005, + 0x00EB, 0x01F4, 0x011B, 0x0006, + 0x00E5, 0x01F3, 0x0120, 0x0008, + 0x00DF, 0x01F2, 0x0125, 0x000A, + 0x00DA, 0x01F0, 0x012A, 0x000C, + 0x00D4, 0x01EE, 0x0130, 0x000E, + 0x00CE, 0x01ED, 0x0135, 0x0010, + 0x00C8, 0x01EB, 0x013A, 0x0013, + 0x00C2, 0x01E9, 0x0140, 0x0015, + 0x00BD, 0x01E7, 0x0145, 0x0017, + 0x00B7, 0x01E5, 0x014A, 0x001A, + 0x00B1, 0x01E2, 0x0151, 0x001C, + 0x00AC, 0x01E0, 0x0155, 0x001F, + 0x00A7, 0x01DD, 0x015A, 0x0022, + 0x00A1, 0x01DB, 0x015F, 0x0025, + 0x009C, 0x01D8, 0x0165, 0x0027, + 0x0097, 0x01D5, 0x016A, 0x002A, + 0x0092, 0x01D2, 0x016E, 0x002E, + 0x008C, 0x01CF, 0x0174, 0x0031, + 0x0087, 0x01CC, 0x0179, 0x0034, + 0x0083, 0x01C9, 0x017D, 0x0037, + 0x007E, 0x01C5, 0x0182, 0x003B, + 0x0079, 0x01C2, 0x0187, 0x003E, + 0x0074, 0x01BE, 0x018C, 0x0042, + 0x0070, 0x01BA, 0x0190, 0x0046, + 0x006B, 0x01B7, 0x0195, 0x0049, + 0x0066, 0x01B3, 0x019A, 0x004D, + 0x0062, 0x01AF, 0x019E, 0x0051, + 0x005E, 0x01AB, 0x01A2, 0x0055, + 0x005A, 0x01A6, 0x01A6, 0x005A, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.4_p_10qb_ +// 4 +// 64 +// input/output = 0.400000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_40[132] = { + 0x00FB, 0x0209, 0x00FC, 0x0000, + 0x00F5, 0x0209, 0x0101, 0x0001, + 0x00EE, 0x0208, 0x0108, 0x0002, + 0x00E8, 0x0207, 0x010E, 0x0003, + 0x00E2, 0x0206, 0x0114, 0x0004, + 0x00DB, 0x0205, 0x011A, 0x0006, + 0x00D5, 0x0204, 0x0120, 0x0007, + 0x00CF, 0x0203, 0x0125, 0x0009, + 0x00C9, 0x0201, 0x012C, 0x000A, + 0x00C3, 0x01FF, 0x0132, 0x000C, + 0x00BD, 0x01FD, 0x0138, 0x000E, + 0x00B7, 0x01FB, 0x013E, 0x0010, + 0x00B1, 0x01F9, 0x0144, 0x0012, + 0x00AC, 0x01F7, 0x0149, 0x0014, + 0x00A6, 0x01F4, 0x0150, 0x0016, + 0x00A0, 0x01F2, 0x0156, 0x0018, + 0x009B, 0x01EF, 0x015C, 0x001A, + 0x0095, 0x01EC, 0x0162, 0x001D, + 0x0090, 0x01E9, 0x0168, 0x001F, + 0x008B, 0x01E6, 0x016D, 0x0022, + 0x0085, 0x01E3, 0x0173, 0x0025, + 0x0080, 0x01DF, 0x0179, 0x0028, + 0x007B, 0x01DC, 0x017E, 0x002B, + 0x0076, 0x01D8, 0x0184, 0x002E, + 0x0071, 0x01D4, 0x018A, 0x0031, + 0x006D, 0x01D1, 0x018E, 0x0034, + 0x0068, 0x01CD, 0x0193, 0x0038, + 0x0063, 0x01C8, 0x019A, 0x003B, + 0x005F, 0x01C4, 0x019E, 0x003F, + 0x005B, 0x01C0, 0x01A3, 0x0042, + 0x0056, 0x01BB, 0x01A9, 0x0046, + 0x0052, 0x01B7, 0x01AD, 0x004A, + 0x004E, 0x01B2, 0x01B2, 0x004E, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.5_p_10qb_ +// 4 +// 64 +// input/output = 0.500000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_50[132] = { + 0x00E5, 0x0236, 0x00E5, 0x0000, + 0x00DE, 0x0235, 0x00ED, 0x0000, + 0x00D7, 0x0235, 0x00F4, 0x0000, + 0x00D0, 0x0235, 0x00FB, 0x0000, + 0x00C9, 0x0234, 0x0102, 0x0001, + 0x00C2, 0x0233, 0x010A, 0x0001, + 0x00BC, 0x0232, 0x0111, 0x0001, + 0x00B5, 0x0230, 0x0119, 0x0002, + 0x00AE, 0x022F, 0x0121, 0x0002, + 0x00A8, 0x022D, 0x0128, 0x0003, + 0x00A2, 0x022B, 0x012F, 0x0004, + 0x009B, 0x0229, 0x0137, 0x0005, + 0x0095, 0x0226, 0x013F, 0x0006, + 0x008F, 0x0224, 0x0146, 0x0007, + 0x0089, 0x0221, 0x014E, 0x0008, + 0x0083, 0x021E, 0x0155, 0x000A, + 0x007E, 0x021B, 0x015C, 0x000B, + 0x0078, 0x0217, 0x0164, 0x000D, + 0x0072, 0x0213, 0x016D, 0x000E, + 0x006D, 0x0210, 0x0173, 0x0010, + 0x0068, 0x020C, 0x017A, 0x0012, + 0x0063, 0x0207, 0x0182, 0x0014, + 0x005E, 0x0203, 0x0189, 0x0016, + 0x0059, 0x01FE, 0x0191, 0x0018, + 0x0054, 0x01F9, 0x0198, 0x001B, + 0x0050, 0x01F4, 0x019F, 0x001D, + 0x004B, 0x01EF, 0x01A6, 0x0020, + 0x0047, 0x01EA, 0x01AC, 0x0023, + 0x0043, 0x01E4, 0x01B3, 0x0026, + 0x003F, 0x01DF, 0x01B9, 0x0029, + 0x003B, 0x01D9, 0x01C0, 0x002C, + 0x0037, 0x01D3, 0x01C6, 0x0030, + 0x0033, 0x01CD, 0x01CD, 0x0033, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.6_p_10qb_ +// 4 +// 64 +// input/output = 0.600000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_60[132] = { + 0x00C8, 0x026F, 0x00C9, 0x0000, + 0x00C0, 0x0270, 0x00D1, 0x0FFF, + 0x00B8, 0x0270, 0x00D9, 0x0FFF, + 0x00B1, 0x0270, 0x00E1, 0x0FFE, + 0x00A9, 0x026F, 0x00EB, 0x0FFD, + 0x00A2, 0x026E, 0x00F3, 0x0FFD, + 0x009A, 0x026D, 0x00FD, 0x0FFC, + 0x0093, 0x026C, 0x0105, 0x0FFC, + 0x008C, 0x026A, 0x010F, 0x0FFB, + 0x0085, 0x0268, 0x0118, 0x0FFB, + 0x007E, 0x0265, 0x0122, 0x0FFB, + 0x0078, 0x0263, 0x012A, 0x0FFB, + 0x0071, 0x0260, 0x0134, 0x0FFB, + 0x006B, 0x025C, 0x013E, 0x0FFB, + 0x0065, 0x0259, 0x0147, 0x0FFB, + 0x005F, 0x0255, 0x0151, 0x0FFB, + 0x0059, 0x0251, 0x015A, 0x0FFC, + 0x0054, 0x024D, 0x0163, 0x0FFC, + 0x004E, 0x0248, 0x016D, 0x0FFD, + 0x0049, 0x0243, 0x0176, 0x0FFE, + 0x0044, 0x023E, 0x017F, 0x0FFF, + 0x003F, 0x0238, 0x0189, 0x0000, + 0x003A, 0x0232, 0x0193, 0x0001, + 0x0036, 0x022C, 0x019C, 0x0002, + 0x0031, 0x0226, 0x01A5, 0x0004, + 0x002D, 0x021F, 0x01AF, 0x0005, + 0x0029, 0x0218, 0x01B8, 0x0007, + 0x0025, 0x0211, 0x01C1, 0x0009, + 0x0022, 0x020A, 0x01C9, 0x000B, + 0x001E, 0x0203, 0x01D2, 0x000D, + 0x001B, 0x01FB, 0x01DA, 0x0010, + 0x0018, 0x01F3, 0x01E3, 0x0012, + 0x0015, 0x01EB, 0x01EB, 0x0015, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.7_p_10qb_ +// 4 +// 64 +// input/output = 0.700000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_70[132] = { + 0x00A3, 0x02B9, 0x00A4, 0x0000, + 0x009A, 0x02BA, 0x00AD, 0x0FFF, + 0x0092, 0x02BA, 0x00B6, 0x0FFE, + 0x0089, 0x02BA, 0x00C1, 0x0FFC, + 0x0081, 0x02B9, 0x00CB, 0x0FFB, + 0x0079, 0x02B8, 0x00D5, 0x0FFA, + 0x0071, 0x02B7, 0x00DF, 0x0FF9, + 0x0069, 0x02B5, 0x00EA, 0x0FF8, + 0x0062, 0x02B3, 0x00F4, 0x0FF7, + 0x005B, 0x02B0, 0x00FF, 0x0FF6, + 0x0054, 0x02AD, 0x010B, 0x0FF4, + 0x004D, 0x02A9, 0x0117, 0x0FF3, + 0x0046, 0x02A5, 0x0123, 0x0FF2, + 0x0040, 0x02A1, 0x012D, 0x0FF2, + 0x003A, 0x029C, 0x0139, 0x0FF1, + 0x0034, 0x0297, 0x0145, 0x0FF0, + 0x002F, 0x0292, 0x0150, 0x0FEF, + 0x0029, 0x028C, 0x015C, 0x0FEF, + 0x0024, 0x0285, 0x0169, 0x0FEE, + 0x001F, 0x027F, 0x0174, 0x0FEE, + 0x001B, 0x0278, 0x017F, 0x0FEE, + 0x0016, 0x0270, 0x018D, 0x0FED, + 0x0012, 0x0268, 0x0199, 0x0FED, + 0x000E, 0x0260, 0x01A4, 0x0FEE, + 0x000B, 0x0258, 0x01AF, 0x0FEE, + 0x0007, 0x024F, 0x01BC, 0x0FEE, + 0x0004, 0x0246, 0x01C7, 0x0FEF, + 0x0001, 0x023D, 0x01D3, 0x0FEF, + 0x0FFE, 0x0233, 0x01DF, 0x0FF0, + 0x0FFC, 0x0229, 0x01EA, 0x0FF1, + 0x0FFA, 0x021F, 0x01F4, 0x0FF3, + 0x0FF8, 0x0215, 0x01FF, 0x0FF4, + 0x0FF6, 0x020A, 0x020A, 0x0FF6, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.8_p_10qb_ +// 4 +// 64 +// input/output = 0.800000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_80[132] = { + 0x0075, 0x0315, 0x0076, 0x0000, + 0x006C, 0x0316, 0x007F, 0x0FFF, + 0x0062, 0x0316, 0x008A, 0x0FFE, + 0x0059, 0x0315, 0x0096, 0x0FFC, + 0x0050, 0x0314, 0x00A1, 0x0FFB, + 0x0048, 0x0312, 0x00AD, 0x0FF9, + 0x0040, 0x0310, 0x00B8, 0x0FF8, + 0x0038, 0x030D, 0x00C5, 0x0FF6, + 0x0030, 0x030A, 0x00D1, 0x0FF5, + 0x0029, 0x0306, 0x00DE, 0x0FF3, + 0x0022, 0x0301, 0x00EB, 0x0FF2, + 0x001C, 0x02FC, 0x00F8, 0x0FF0, + 0x0015, 0x02F7, 0x0106, 0x0FEE, + 0x0010, 0x02F1, 0x0112, 0x0FED, + 0x000A, 0x02EA, 0x0121, 0x0FEB, + 0x0005, 0x02E3, 0x012F, 0x0FE9, + 0x0000, 0x02DB, 0x013D, 0x0FE8, + 0x0FFB, 0x02D3, 0x014C, 0x0FE6, + 0x0FF7, 0x02CA, 0x015A, 0x0FE5, + 0x0FF3, 0x02C1, 0x0169, 0x0FE3, + 0x0FF0, 0x02B7, 0x0177, 0x0FE2, + 0x0FEC, 0x02AD, 0x0186, 0x0FE1, + 0x0FE9, 0x02A2, 0x0196, 0x0FDF, + 0x0FE7, 0x0297, 0x01A4, 0x0FDE, + 0x0FE4, 0x028C, 0x01B3, 0x0FDD, + 0x0FE2, 0x0280, 0x01C2, 0x0FDC, + 0x0FE0, 0x0274, 0x01D0, 0x0FDC, + 0x0FDF, 0x0268, 0x01DE, 0x0FDB, + 0x0FDD, 0x025B, 0x01EE, 0x0FDA, + 0x0FDC, 0x024E, 0x01FC, 0x0FDA, + 0x0FDB, 0x0241, 0x020A, 0x0FDA, + 0x0FDB, 0x0233, 0x0218, 0x0FDA, + 0x0FDA, 0x0226, 0x0226, 0x0FDA, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_0.9_p_10qb_ +// 4 +// 64 +// input/output = 0.900000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_0_90[132] = { + 0x003F, 0x0383, 0x003E, 0x0000, + 0x0034, 0x0383, 0x004A, 0x0FFF, + 0x002B, 0x0383, 0x0054, 0x0FFE, + 0x0021, 0x0381, 0x0061, 0x0FFD, + 0x0019, 0x037F, 0x006C, 0x0FFC, + 0x0010, 0x037C, 0x0079, 0x0FFB, + 0x0008, 0x0378, 0x0086, 0x0FFA, + 0x0001, 0x0374, 0x0093, 0x0FF8, + 0x0FFA, 0x036E, 0x00A1, 0x0FF7, + 0x0FF3, 0x0368, 0x00B0, 0x0FF5, + 0x0FED, 0x0361, 0x00BF, 0x0FF3, + 0x0FE8, 0x035A, 0x00CD, 0x0FF1, + 0x0FE2, 0x0352, 0x00DC, 0x0FF0, + 0x0FDE, 0x0349, 0x00EB, 0x0FEE, + 0x0FD9, 0x033F, 0x00FC, 0x0FEC, + 0x0FD5, 0x0335, 0x010D, 0x0FE9, + 0x0FD2, 0x032A, 0x011D, 0x0FE7, + 0x0FCF, 0x031E, 0x012E, 0x0FE5, + 0x0FCC, 0x0312, 0x013F, 0x0FE3, + 0x0FCA, 0x0305, 0x0150, 0x0FE1, + 0x0FC8, 0x02F8, 0x0162, 0x0FDE, + 0x0FC6, 0x02EA, 0x0174, 0x0FDC, + 0x0FC5, 0x02DC, 0x0185, 0x0FDA, + 0x0FC4, 0x02CD, 0x0197, 0x0FD8, + 0x0FC3, 0x02BE, 0x01AA, 0x0FD5, + 0x0FC3, 0x02AF, 0x01BB, 0x0FD3, + 0x0FC3, 0x029F, 0x01CD, 0x0FD1, + 0x0FC3, 0x028E, 0x01E0, 0x0FCF, + 0x0FC3, 0x027E, 0x01F2, 0x0FCD, + 0x0FC4, 0x026D, 0x0203, 0x0FCC, + 0x0FC5, 0x025C, 0x0215, 0x0FCA, + 0x0FC6, 0x024B, 0x0227, 0x0FC8, + 0x0FC7, 0x0239, 0x0239, 0x0FC7, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 03-Apr-2024 +// 4t_64p_LanczosEd_p_1_p_10qb_ +// 4 +// 64 +// input/output = 1.000000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_4tap_64p_ratio_1_00[132] = { + 0x0000, 0x0400, 0x0000, 0x0000, + 0x0FF6, 0x03FF, 0x000B, 0x0000, + 0x0FED, 0x03FE, 0x0015, 0x0000, + 0x0FE4, 0x03FB, 0x0022, 0x0FFF, + 0x0FDC, 0x03F7, 0x002E, 0x0FFF, + 0x0FD5, 0x03F2, 0x003B, 0x0FFE, + 0x0FCE, 0x03EC, 0x0048, 0x0FFE, + 0x0FC8, 0x03E5, 0x0056, 0x0FFD, + 0x0FC3, 0x03DC, 0x0065, 0x0FFC, + 0x0FBE, 0x03D3, 0x0075, 0x0FFA, + 0x0FB9, 0x03C9, 0x0085, 0x0FF9, + 0x0FB6, 0x03BE, 0x0094, 0x0FF8, + 0x0FB2, 0x03B2, 0x00A6, 0x0FF6, + 0x0FB0, 0x03A5, 0x00B7, 0x0FF4, + 0x0FAD, 0x0397, 0x00CA, 0x0FF2, + 0x0FAB, 0x0389, 0x00DC, 0x0FF0, + 0x0FAA, 0x0379, 0x00EF, 0x0FEE, + 0x0FA9, 0x0369, 0x0102, 0x0FEC, + 0x0FA9, 0x0359, 0x0115, 0x0FE9, + 0x0FA9, 0x0348, 0x0129, 0x0FE6, + 0x0FA9, 0x0336, 0x013D, 0x0FE4, + 0x0FA9, 0x0323, 0x0153, 0x0FE1, + 0x0FAA, 0x0310, 0x0168, 0x0FDE, + 0x0FAC, 0x02FD, 0x017C, 0x0FDB, + 0x0FAD, 0x02E9, 0x0192, 0x0FD8, + 0x0FAF, 0x02D5, 0x01A7, 0x0FD5, + 0x0FB1, 0x02C0, 0x01BD, 0x0FD2, + 0x0FB3, 0x02AC, 0x01D2, 0x0FCF, + 0x0FB5, 0x0296, 0x01E9, 0x0FCC, + 0x0FB8, 0x0281, 0x01FE, 0x0FC9, + 0x0FBA, 0x026C, 0x0214, 0x0FC6, + 0x0FBD, 0x0256, 0x022A, 0x0FC3, + 0x0FC0, 0x0240, 0x0240, 0x0FC0, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.3_p_10qb_ +// 6 +// 64 +// input/output = 0.300000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_30[198] = { + 0x004B, 0x0100, 0x0169, 0x0101, 0x004B, 0x0000, + 0x0049, 0x00FD, 0x0169, 0x0103, 0x004E, 0x0000, + 0x0047, 0x00FA, 0x0169, 0x0106, 0x0050, 0x0000, + 0x0045, 0x00F7, 0x0168, 0x0109, 0x0052, 0x0001, + 0x0043, 0x00F5, 0x0168, 0x010B, 0x0054, 0x0001, + 0x0040, 0x00F2, 0x0168, 0x010E, 0x0057, 0x0001, + 0x003E, 0x00EF, 0x0168, 0x0110, 0x0059, 0x0002, + 0x003C, 0x00EC, 0x0167, 0x0113, 0x005C, 0x0002, + 0x003A, 0x00E9, 0x0167, 0x0116, 0x005E, 0x0002, + 0x0038, 0x00E6, 0x0166, 0x0118, 0x0061, 0x0003, + 0x0036, 0x00E3, 0x0165, 0x011C, 0x0063, 0x0003, + 0x0034, 0x00E0, 0x0165, 0x011D, 0x0066, 0x0004, + 0x0033, 0x00DD, 0x0164, 0x0120, 0x0068, 0x0004, + 0x0031, 0x00DA, 0x0163, 0x0122, 0x006B, 0x0005, + 0x002F, 0x00D7, 0x0163, 0x0125, 0x006D, 0x0005, + 0x002D, 0x00D3, 0x0162, 0x0128, 0x0070, 0x0006, + 0x002B, 0x00D0, 0x0161, 0x012A, 0x0073, 0x0007, + 0x002A, 0x00CD, 0x0160, 0x012D, 0x0075, 0x0007, + 0x0028, 0x00CA, 0x015F, 0x012F, 0x0078, 0x0008, + 0x0026, 0x00C7, 0x015E, 0x0131, 0x007B, 0x0009, + 0x0025, 0x00C4, 0x015D, 0x0133, 0x007E, 0x0009, + 0x0023, 0x00C1, 0x015C, 0x0136, 0x0080, 0x000A, + 0x0022, 0x00BE, 0x015A, 0x0138, 0x0083, 0x000B, + 0x0020, 0x00BB, 0x0159, 0x013A, 0x0086, 0x000C, + 0x001F, 0x00B8, 0x0158, 0x013B, 0x0089, 0x000D, + 0x001E, 0x00B5, 0x0156, 0x013E, 0x008C, 0x000D, + 0x001C, 0x00B2, 0x0155, 0x0140, 0x008F, 0x000E, + 0x001B, 0x00AF, 0x0153, 0x0143, 0x0091, 0x000F, + 0x0019, 0x00AC, 0x0152, 0x0145, 0x0094, 0x0010, + 0x0018, 0x00A9, 0x0150, 0x0147, 0x0097, 0x0011, + 0x0017, 0x00A6, 0x014F, 0x0148, 0x009A, 0x0012, + 0x0016, 0x00A3, 0x014D, 0x0149, 0x009D, 0x0014, + 0x0015, 0x00A0, 0x014B, 0x014B, 0x00A0, 0x0015, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.4_p_10qb_ +// 6 +// 64 +// input/output = 0.400000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_40[198] = { + 0x0028, 0x0106, 0x01A3, 0x0107, 0x0028, 0x0000, + 0x0026, 0x0102, 0x01A3, 0x010A, 0x002B, 0x0000, + 0x0024, 0x00FE, 0x01A3, 0x010F, 0x002D, 0x0FFF, + 0x0022, 0x00FA, 0x01A3, 0x0113, 0x002F, 0x0FFF, + 0x0021, 0x00F6, 0x01A3, 0x0116, 0x0031, 0x0FFF, + 0x001F, 0x00F2, 0x01A2, 0x011B, 0x0034, 0x0FFE, + 0x001D, 0x00EE, 0x01A2, 0x011F, 0x0036, 0x0FFE, + 0x001B, 0x00EA, 0x01A1, 0x0123, 0x0039, 0x0FFE, + 0x0019, 0x00E6, 0x01A1, 0x0127, 0x003B, 0x0FFE, + 0x0018, 0x00E2, 0x01A0, 0x012A, 0x003E, 0x0FFE, + 0x0016, 0x00DE, 0x01A0, 0x012E, 0x0041, 0x0FFD, + 0x0015, 0x00DA, 0x019F, 0x0132, 0x0043, 0x0FFD, + 0x0013, 0x00D6, 0x019E, 0x0136, 0x0046, 0x0FFD, + 0x0012, 0x00D2, 0x019D, 0x0139, 0x0049, 0x0FFD, + 0x0010, 0x00CE, 0x019C, 0x013D, 0x004C, 0x0FFD, + 0x000F, 0x00CA, 0x019A, 0x0141, 0x004F, 0x0FFD, + 0x000E, 0x00C6, 0x0199, 0x0144, 0x0052, 0x0FFD, + 0x000D, 0x00C2, 0x0197, 0x0148, 0x0055, 0x0FFD, + 0x000B, 0x00BE, 0x0196, 0x014C, 0x0058, 0x0FFD, + 0x000A, 0x00BA, 0x0195, 0x014F, 0x005B, 0x0FFD, + 0x0009, 0x00B6, 0x0193, 0x0153, 0x005E, 0x0FFD, + 0x0008, 0x00B2, 0x0191, 0x0157, 0x0061, 0x0FFD, + 0x0007, 0x00AE, 0x0190, 0x015A, 0x0064, 0x0FFD, + 0x0006, 0x00AA, 0x018E, 0x015D, 0x0068, 0x0FFD, + 0x0005, 0x00A6, 0x018C, 0x0161, 0x006B, 0x0FFD, + 0x0005, 0x00A2, 0x0189, 0x0164, 0x006F, 0x0FFD, + 0x0004, 0x009E, 0x0187, 0x0167, 0x0072, 0x0FFE, + 0x0003, 0x009A, 0x0185, 0x016B, 0x0075, 0x0FFE, + 0x0002, 0x0096, 0x0183, 0x016E, 0x0079, 0x0FFE, + 0x0002, 0x0093, 0x0180, 0x016F, 0x007D, 0x0FFF, + 0x0001, 0x008F, 0x017E, 0x0173, 0x0080, 0x0FFF, + 0x0001, 0x008B, 0x017B, 0x0175, 0x0084, 0x0000, + 0x0000, 0x0087, 0x0179, 0x0179, 0x0087, 0x0000, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.5_p_10qb_ +// 6 +// 64 +// input/output = 0.500000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_50[198] = { + 0x0000, 0x0107, 0x01F3, 0x0106, 0x0000, 0x0000, + 0x0FFE, 0x0101, 0x01F3, 0x010D, 0x0002, 0x0FFF, + 0x0FFD, 0x00FB, 0x01F3, 0x0113, 0x0003, 0x0FFF, + 0x0FFC, 0x00F6, 0x01F3, 0x0118, 0x0005, 0x0FFE, + 0x0FFA, 0x00F0, 0x01F3, 0x011E, 0x0007, 0x0FFE, + 0x0FF9, 0x00EB, 0x01F2, 0x0124, 0x0009, 0x0FFD, + 0x0FF8, 0x00E5, 0x01F2, 0x0129, 0x000B, 0x0FFD, + 0x0FF7, 0x00E0, 0x01F1, 0x012F, 0x000D, 0x0FFC, + 0x0FF6, 0x00DA, 0x01F0, 0x0135, 0x0010, 0x0FFB, + 0x0FF5, 0x00D4, 0x01EF, 0x013B, 0x0012, 0x0FFB, + 0x0FF4, 0x00CF, 0x01EE, 0x0141, 0x0014, 0x0FFA, + 0x0FF3, 0x00C9, 0x01ED, 0x0147, 0x0017, 0x0FF9, + 0x0FF2, 0x00C4, 0x01EB, 0x014C, 0x001A, 0x0FF9, + 0x0FF1, 0x00BF, 0x01EA, 0x0152, 0x001C, 0x0FF8, + 0x0FF1, 0x00B9, 0x01E8, 0x0157, 0x001F, 0x0FF8, + 0x0FF0, 0x00B4, 0x01E6, 0x015D, 0x0022, 0x0FF7, + 0x0FF0, 0x00AE, 0x01E4, 0x0163, 0x0025, 0x0FF6, + 0x0FEF, 0x00A9, 0x01E2, 0x0168, 0x0028, 0x0FF6, + 0x0FEF, 0x00A4, 0x01DF, 0x016E, 0x002B, 0x0FF5, + 0x0FEF, 0x009F, 0x01DD, 0x0172, 0x002E, 0x0FF5, + 0x0FEE, 0x009A, 0x01DA, 0x0178, 0x0032, 0x0FF4, + 0x0FEE, 0x0094, 0x01D8, 0x017E, 0x0035, 0x0FF3, + 0x0FEE, 0x008F, 0x01D5, 0x0182, 0x0039, 0x0FF3, + 0x0FEE, 0x008A, 0x01D2, 0x0188, 0x003C, 0x0FF2, + 0x0FEE, 0x0085, 0x01CF, 0x018C, 0x0040, 0x0FF2, + 0x0FEE, 0x0081, 0x01CB, 0x0191, 0x0044, 0x0FF1, + 0x0FEE, 0x007C, 0x01C8, 0x0196, 0x0047, 0x0FF1, + 0x0FEE, 0x0077, 0x01C4, 0x019C, 0x004B, 0x0FF0, + 0x0FEE, 0x0072, 0x01C1, 0x01A0, 0x004F, 0x0FF0, + 0x0FEE, 0x006E, 0x01BD, 0x01A4, 0x0053, 0x0FF0, + 0x0FEE, 0x0069, 0x01B9, 0x01A9, 0x0058, 0x0FEF, + 0x0FEE, 0x0065, 0x01B5, 0x01AD, 0x005C, 0x0FEF, + 0x0FEF, 0x0060, 0x01B1, 0x01B1, 0x0060, 0x0FEF, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.6_p_10qb_ +// 6 +// 64 +// input/output = 0.600000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_60[198] = { + 0x0FD9, 0x00FB, 0x0258, 0x00FB, 0x0FD9, 0x0000, + 0x0FD9, 0x00F3, 0x0258, 0x0102, 0x0FDA, 0x0000, + 0x0FD8, 0x00EB, 0x0258, 0x010B, 0x0FDB, 0x0FFF, + 0x0FD8, 0x00E3, 0x0258, 0x0112, 0x0FDC, 0x0FFF, + 0x0FD8, 0x00DC, 0x0257, 0x011B, 0x0FDC, 0x0FFE, + 0x0FD7, 0x00D4, 0x0256, 0x0123, 0x0FDE, 0x0FFE, + 0x0FD7, 0x00CD, 0x0255, 0x012B, 0x0FDF, 0x0FFD, + 0x0FD7, 0x00C5, 0x0254, 0x0133, 0x0FE0, 0x0FFD, + 0x0FD7, 0x00BE, 0x0252, 0x013C, 0x0FE1, 0x0FFC, + 0x0FD7, 0x00B6, 0x0251, 0x0143, 0x0FE3, 0x0FFC, + 0x0FD8, 0x00AF, 0x024F, 0x014B, 0x0FE4, 0x0FFB, + 0x0FD8, 0x00A8, 0x024C, 0x0154, 0x0FE6, 0x0FFA, + 0x0FD8, 0x00A1, 0x024A, 0x015B, 0x0FE8, 0x0FFA, + 0x0FD9, 0x009A, 0x0247, 0x0163, 0x0FEA, 0x0FF9, + 0x0FD9, 0x0093, 0x0244, 0x016C, 0x0FEC, 0x0FF8, + 0x0FD9, 0x008C, 0x0241, 0x0174, 0x0FEF, 0x0FF7, + 0x0FDA, 0x0085, 0x023E, 0x017B, 0x0FF1, 0x0FF7, + 0x0FDB, 0x007F, 0x023A, 0x0183, 0x0FF3, 0x0FF6, + 0x0FDB, 0x0078, 0x0237, 0x018B, 0x0FF6, 0x0FF5, + 0x0FDC, 0x0072, 0x0233, 0x0192, 0x0FF9, 0x0FF4, + 0x0FDD, 0x006C, 0x022F, 0x0199, 0x0FFC, 0x0FF3, + 0x0FDD, 0x0065, 0x022A, 0x01A3, 0x0FFF, 0x0FF2, + 0x0FDE, 0x005F, 0x0226, 0x01AA, 0x0002, 0x0FF1, + 0x0FDF, 0x005A, 0x0221, 0x01B0, 0x0006, 0x0FF0, + 0x0FE0, 0x0054, 0x021C, 0x01B7, 0x0009, 0x0FF0, + 0x0FE1, 0x004E, 0x0217, 0x01BE, 0x000D, 0x0FEF, + 0x0FE2, 0x0048, 0x0212, 0x01C6, 0x0010, 0x0FEE, + 0x0FE3, 0x0043, 0x020C, 0x01CD, 0x0014, 0x0FED, + 0x0FE4, 0x003E, 0x0207, 0x01D3, 0x0018, 0x0FEC, + 0x0FE5, 0x0039, 0x0200, 0x01DA, 0x001D, 0x0FEB, + 0x0FE6, 0x0034, 0x01FA, 0x01E1, 0x0021, 0x0FEA, + 0x0FE7, 0x002F, 0x01F5, 0x01E7, 0x0025, 0x0FE9, + 0x0FE8, 0x002A, 0x01EE, 0x01EE, 0x002A, 0x0FE8, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.7_p_10qb_ +// 6 +// 64 +// input/output = 0.700000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_70[198] = { + 0x0FC0, 0x00DA, 0x02CC, 0x00DA, 0x0FC0, 0x0000, + 0x0FC1, 0x00D0, 0x02CC, 0x00E4, 0x0FBF, 0x0000, + 0x0FC2, 0x00C6, 0x02CB, 0x00EF, 0x0FBE, 0x0000, + 0x0FC3, 0x00BC, 0x02CA, 0x00F9, 0x0FBE, 0x0000, + 0x0FC4, 0x00B2, 0x02C9, 0x0104, 0x0FBD, 0x0000, + 0x0FC5, 0x00A8, 0x02C7, 0x010F, 0x0FBD, 0x0000, + 0x0FC7, 0x009F, 0x02C5, 0x0119, 0x0FBC, 0x0000, + 0x0FC8, 0x0095, 0x02C3, 0x0124, 0x0FBC, 0x0000, + 0x0FC9, 0x008C, 0x02C0, 0x012F, 0x0FBC, 0x0000, + 0x0FCB, 0x0083, 0x02BD, 0x0139, 0x0FBC, 0x0000, + 0x0FCC, 0x007A, 0x02BA, 0x0144, 0x0FBC, 0x0000, + 0x0FCE, 0x0072, 0x02B6, 0x014D, 0x0FBD, 0x0000, + 0x0FD0, 0x0069, 0x02B2, 0x0159, 0x0FBD, 0x0FFF, + 0x0FD1, 0x0061, 0x02AD, 0x0164, 0x0FBE, 0x0FFF, + 0x0FD3, 0x0059, 0x02A9, 0x016E, 0x0FBF, 0x0FFE, + 0x0FD4, 0x0051, 0x02A4, 0x017A, 0x0FBF, 0x0FFE, + 0x0FD6, 0x0049, 0x029E, 0x0184, 0x0FC1, 0x0FFE, + 0x0FD8, 0x0042, 0x0299, 0x018E, 0x0FC2, 0x0FFD, + 0x0FD9, 0x003A, 0x0293, 0x019B, 0x0FC3, 0x0FFC, + 0x0FDB, 0x0033, 0x028D, 0x01A4, 0x0FC5, 0x0FFC, + 0x0FDC, 0x002D, 0x0286, 0x01AF, 0x0FC7, 0x0FFB, + 0x0FDE, 0x0026, 0x0280, 0x01BA, 0x0FC8, 0x0FFA, + 0x0FE0, 0x001F, 0x0279, 0x01C4, 0x0FCB, 0x0FF9, + 0x0FE1, 0x0019, 0x0272, 0x01CE, 0x0FCD, 0x0FF9, + 0x0FE3, 0x0013, 0x026A, 0x01D9, 0x0FCF, 0x0FF8, + 0x0FE4, 0x000D, 0x0263, 0x01E3, 0x0FD2, 0x0FF7, + 0x0FE6, 0x0008, 0x025B, 0x01EC, 0x0FD5, 0x0FF6, + 0x0FE7, 0x0002, 0x0253, 0x01F7, 0x0FD8, 0x0FF5, + 0x0FE9, 0x0FFD, 0x024A, 0x0202, 0x0FDB, 0x0FF3, + 0x0FEA, 0x0FF8, 0x0242, 0x020B, 0x0FDF, 0x0FF2, + 0x0FEC, 0x0FF3, 0x0239, 0x0215, 0x0FE2, 0x0FF1, + 0x0FED, 0x0FEF, 0x0230, 0x021E, 0x0FE6, 0x0FF0, + 0x0FEF, 0x0FEB, 0x0226, 0x0226, 0x0FEB, 0x0FEF, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.8_p_10qb_ +// 6 +// 64 +// input/output = 0.800000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_80[198] = { + 0x0FBF, 0x00A1, 0x0340, 0x00A1, 0x0FBF, 0x0000, + 0x0FC1, 0x0095, 0x0340, 0x00AD, 0x0FBC, 0x0001, + 0x0FC4, 0x0089, 0x033E, 0x00BA, 0x0FBA, 0x0001, + 0x0FC6, 0x007D, 0x033D, 0x00C6, 0x0FB8, 0x0002, + 0x0FC9, 0x0072, 0x033A, 0x00D3, 0x0FB6, 0x0002, + 0x0FCC, 0x0067, 0x0338, 0x00DF, 0x0FB3, 0x0003, + 0x0FCE, 0x005C, 0x0334, 0x00EE, 0x0FB1, 0x0003, + 0x0FD1, 0x0051, 0x0331, 0x00FA, 0x0FAF, 0x0004, + 0x0FD3, 0x0047, 0x032D, 0x0108, 0x0FAD, 0x0004, + 0x0FD6, 0x003D, 0x0328, 0x0116, 0x0FAB, 0x0004, + 0x0FD8, 0x0033, 0x0323, 0x0123, 0x0FAA, 0x0005, + 0x0FDB, 0x002A, 0x031D, 0x0131, 0x0FA8, 0x0005, + 0x0FDD, 0x0021, 0x0317, 0x013F, 0x0FA7, 0x0005, + 0x0FDF, 0x0018, 0x0311, 0x014D, 0x0FA5, 0x0006, + 0x0FE2, 0x0010, 0x030A, 0x015A, 0x0FA4, 0x0006, + 0x0FE4, 0x0008, 0x0302, 0x0169, 0x0FA3, 0x0006, + 0x0FE6, 0x0000, 0x02FB, 0x0177, 0x0FA2, 0x0006, + 0x0FE8, 0x0FF9, 0x02F3, 0x0185, 0x0FA1, 0x0006, + 0x0FEB, 0x0FF1, 0x02EA, 0x0193, 0x0FA1, 0x0006, + 0x0FED, 0x0FEB, 0x02E1, 0x01A1, 0x0FA0, 0x0006, + 0x0FEE, 0x0FE4, 0x02D8, 0x01B0, 0x0FA0, 0x0006, + 0x0FF0, 0x0FDE, 0x02CE, 0x01BE, 0x0FA0, 0x0006, + 0x0FF2, 0x0FD8, 0x02C5, 0x01CB, 0x0FA0, 0x0006, + 0x0FF4, 0x0FD3, 0x02BA, 0x01D8, 0x0FA1, 0x0006, + 0x0FF6, 0x0FCD, 0x02B0, 0x01E7, 0x0FA1, 0x0005, + 0x0FF7, 0x0FC8, 0x02A5, 0x01F5, 0x0FA2, 0x0005, + 0x0FF9, 0x0FC4, 0x029A, 0x0202, 0x0FA3, 0x0004, + 0x0FFA, 0x0FC0, 0x028E, 0x0210, 0x0FA4, 0x0004, + 0x0FFB, 0x0FBC, 0x0283, 0x021D, 0x0FA6, 0x0003, + 0x0FFD, 0x0FB8, 0x0276, 0x022A, 0x0FA8, 0x0003, + 0x0FFE, 0x0FB4, 0x026B, 0x0237, 0x0FAA, 0x0002, + 0x0FFF, 0x0FB1, 0x025E, 0x0245, 0x0FAC, 0x0001, + 0x0000, 0x0FAE, 0x0252, 0x0252, 0x0FAE, 0x0000, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_0.9_p_10qb_ +// 6 +// 64 +// input/output = 0.900000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_0_90[198] = { + 0x0FD8, 0x0055, 0x03A7, 0x0054, 0x0FD8, 0x0000, + 0x0FDB, 0x0047, 0x03A7, 0x0063, 0x0FD4, 0x0000, + 0x0FDF, 0x003B, 0x03A5, 0x006F, 0x0FD1, 0x0001, + 0x0FE2, 0x002E, 0x03A3, 0x007E, 0x0FCD, 0x0002, + 0x0FE5, 0x0022, 0x03A0, 0x008D, 0x0FCA, 0x0002, + 0x0FE8, 0x0017, 0x039D, 0x009B, 0x0FC6, 0x0003, + 0x0FEB, 0x000C, 0x0398, 0x00AC, 0x0FC2, 0x0003, + 0x0FEE, 0x0001, 0x0394, 0x00BA, 0x0FBF, 0x0004, + 0x0FF1, 0x0FF7, 0x038E, 0x00CA, 0x0FBB, 0x0005, + 0x0FF4, 0x0FED, 0x0388, 0x00DA, 0x0FB8, 0x0005, + 0x0FF6, 0x0FE4, 0x0381, 0x00EB, 0x0FB4, 0x0006, + 0x0FF9, 0x0FDB, 0x037A, 0x00FA, 0x0FB1, 0x0007, + 0x0FFB, 0x0FD3, 0x0372, 0x010B, 0x0FAD, 0x0008, + 0x0FFD, 0x0FCB, 0x0369, 0x011D, 0x0FAA, 0x0008, + 0x0000, 0x0FC3, 0x0360, 0x012E, 0x0FA6, 0x0009, + 0x0002, 0x0FBC, 0x0356, 0x013F, 0x0FA3, 0x000A, + 0x0003, 0x0FB6, 0x034C, 0x0150, 0x0FA0, 0x000B, + 0x0005, 0x0FB0, 0x0341, 0x0162, 0x0F9D, 0x000B, + 0x0007, 0x0FAA, 0x0336, 0x0173, 0x0F9A, 0x000C, + 0x0008, 0x0FA5, 0x032A, 0x0185, 0x0F97, 0x000D, + 0x000A, 0x0FA0, 0x031E, 0x0197, 0x0F94, 0x000D, + 0x000B, 0x0F9B, 0x0311, 0x01A9, 0x0F92, 0x000E, + 0x000C, 0x0F97, 0x0303, 0x01BC, 0x0F8F, 0x000F, + 0x000D, 0x0F94, 0x02F6, 0x01CD, 0x0F8D, 0x000F, + 0x000E, 0x0F91, 0x02E8, 0x01DE, 0x0F8B, 0x0010, + 0x000F, 0x0F8E, 0x02D9, 0x01F1, 0x0F89, 0x0010, + 0x0010, 0x0F8B, 0x02CA, 0x0202, 0x0F88, 0x0011, + 0x0010, 0x0F89, 0x02BB, 0x0214, 0x0F87, 0x0011, + 0x0011, 0x0F87, 0x02AB, 0x0226, 0x0F86, 0x0011, + 0x0011, 0x0F86, 0x029C, 0x0236, 0x0F85, 0x0012, + 0x0011, 0x0F85, 0x028B, 0x0249, 0x0F84, 0x0012, + 0x0012, 0x0F84, 0x027B, 0x0259, 0x0F84, 0x0012, + 0x0012, 0x0F84, 0x026A, 0x026A, 0x0F84, 0x0012, +}; + +//======================================================== +// gen_scaler_coeffs_cnf_file.m +// make_test_script.m +// 02-Apr-2024 +// 6t_64p_LanczosEd_p_1_p_10qb_ +// 6 +// 64 +// input/output = 1.000000000000 +// LanczosEd +// S1.10 +//======================================================== +static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = { + 0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000, + 0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000, + 0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000, + 0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000, + 0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000, + 0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001, + 0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001, + 0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001, + 0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002, + 0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002, + 0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003, + 0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004, + 0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004, + 0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005, + 0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006, + 0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007, + 0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008, + 0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008, + 0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009, + 0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B, + 0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C, + 0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D, + 0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E, + 0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F, + 0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010, + 0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011, + 0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012, + 0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014, + 0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015, + 0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016, + 0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017, + 0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018, + 0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019, +}; + +/* Converted scaler coeff tables from S1.10 to S1.12 */ +static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99]; +static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99]; +static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132]; +static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132]; +static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198]; +static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198]; + +struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x0000}, + {9, 10, 0x0000}, + {1, 1, 0x0000}, + {-1, -1, 0x0002}, +}; + +struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x0000}, + {9, 10, 0x0000}, + {1, 1, 0x0000}, + {-1, -1, 0x0002}, +}; + +struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = { + {3, 10, 0x4100}, + {4, 10, 0x4100}, + {5, 10, 0x4100}, + {6, 10, 0x4100}, + {7, 10, 0x4100}, + {8, 10, 0x4100}, + {9, 10, 0x4100}, + {1, 1, 0x4100}, + {-1, -1, 0x4100}, +}; + +struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = { + {3, 10, 0x4000}, + {4, 10, 0x4000}, + {5, 10, 0x4000}, + {6, 10, 0x4000}, + {7, 10, 0x4000}, + {8, 10, 0x4000}, + {9, 10, 0x4000}, + {1, 1, 0x4000}, + {-1, -1, 0x4000}, +}; + +struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x251F}, + {5, 10, 0x291F}, + {6, 10, 0xA51F}, + {7, 10, 0xA51F}, + {8, 10, 0xAA66}, + {9, 10, 0xA51F}, + {1, 1, 0xA640}, + {-1, -1, 0xA640}, +}; + +struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x9600}, + {5, 10, 0xA460}, + {6, 10, 0xA8E0}, + {7, 10, 0xAC00}, + {8, 10, 0xAD20}, + {9, 10, 0xAFC0}, + {1, 1, 0xB058}, + {-1, -1, 0xB058}, +}; + +struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = { + {3, 10, 0x4100}, + {4, 10, 0x4100}, + {5, 10, 0x4100}, + {6, 10, 0x4100}, + {7, 10, 0x4100}, + {8, 10, 0x4100}, + {9, 10, 0x4100}, + {1, 1, 0x4100}, + {-1, -1, 0x4100}, +}; + +struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = { + {3, 10, 0x4000}, + {4, 10, 0x4000}, + {5, 10, 0x4000}, + {6, 10, 0x4000}, + {7, 10, 0x4000}, + {8, 10, 0x4000}, + {9, 10, 0x4000}, + {1, 1, 0x4000}, + {-1, -1, 0x4000}, +}; + +struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x0000}, + {9, 10, 0x0000}, + {1, 1, 0x0000}, + {-1, -1, 0x0000}, +}; + +struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x9900}, + {7, 10, 0xA100}, + {8, 10, 0xA8C0}, + {9, 10, 0xAB20}, + {1, 1, 0xAC00}, + {-1, -1, 0xAC00}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x4100}, + {9, 10, 0x9F00}, + {1, 1, 0xA4C0}, + {-1, -1, 0xA8D8}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x4000}, + {9, 10, 0x24FE}, + {1, 1, 0x2D64}, + {-1, -1, 0x3ADB}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = { + {3, 10, 0x3800}, + {4, 10, 0x3800}, + {5, 10, 0x3800}, + {6, 10, 0x3800}, + {7, 10, 0x3800}, + {8, 10, 0x3886}, + {9, 10, 0x3940}, + {1, 1, 0x3A4E}, + {-1, -1, 0x3B66}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = { + {3, 10, 0x3800}, + {4, 10, 0x3800}, + {5, 10, 0x3800}, + {6, 10, 0x3800}, + {7, 10, 0x3800}, + {8, 10, 0x36F4}, + {9, 10, 0x359C}, + {1, 1, 0x3360}, + {-1, -1, 0x2F20}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x0000}, + {9, 10, 0x359C}, + {1, 1, 0x31F0}, + {-1, -1, 0x1F00}, +}; + +struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = { + {3, 10, 0x0000}, + {4, 10, 0x0000}, + {5, 10, 0x0000}, + {6, 10, 0x0000}, + {7, 10, 0x0000}, + {8, 10, 0x0000}, + {9, 10, 0x9F00}, + {1, 1, 0xA400}, + {-1, -1, 0x9E00}, +}; + +void spl_init_easf_filter_coeffs(void) +{ + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30, + easf_filter_3tap_64p_ratio_0_30_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40, + easf_filter_3tap_64p_ratio_0_40_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50, + easf_filter_3tap_64p_ratio_0_50_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60, + easf_filter_3tap_64p_ratio_0_60_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70, + easf_filter_3tap_64p_ratio_0_70_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80, + easf_filter_3tap_64p_ratio_0_80_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90, + easf_filter_3tap_64p_ratio_0_90_s1_12, 3); + convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00, + easf_filter_3tap_64p_ratio_1_00_s1_12, 3); + + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30, + easf_filter_4tap_64p_ratio_0_30_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40, + easf_filter_4tap_64p_ratio_0_40_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50, + easf_filter_4tap_64p_ratio_0_50_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60, + easf_filter_4tap_64p_ratio_0_60_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70, + easf_filter_4tap_64p_ratio_0_70_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80, + easf_filter_4tap_64p_ratio_0_80_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90, + easf_filter_4tap_64p_ratio_0_90_s1_12, 4); + convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00, + easf_filter_4tap_64p_ratio_1_00_s1_12, 4); + + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30, + easf_filter_6tap_64p_ratio_0_30_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40, + easf_filter_6tap_64p_ratio_0_40_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50, + easf_filter_6tap_64p_ratio_0_50_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60, + easf_filter_6tap_64p_ratio_0_60_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70, + easf_filter_6tap_64p_ratio_0_70_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80, + easf_filter_6tap_64p_ratio_0_80_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90, + easf_filter_6tap_64p_ratio_0_90_s1_12, 6); + convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00, + easf_filter_6tap_64p_ratio_1_00_s1_12, 6); +} + +uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio) +{ + if (ratio.value < spl_fixpt_from_fraction(3, 10).value) + return easf_filter_3tap_64p_ratio_0_30_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(4, 10).value) + return easf_filter_3tap_64p_ratio_0_40_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(5, 10).value) + return easf_filter_3tap_64p_ratio_0_50_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(6, 10).value) + return easf_filter_3tap_64p_ratio_0_60_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(7, 10).value) + return easf_filter_3tap_64p_ratio_0_70_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(8, 10).value) + return easf_filter_3tap_64p_ratio_0_80_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(9, 10).value) + return easf_filter_3tap_64p_ratio_0_90_s1_12; + else + return easf_filter_3tap_64p_ratio_1_00_s1_12; +} + +uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio) +{ + if (ratio.value < spl_fixpt_from_fraction(3, 10).value) + return easf_filter_4tap_64p_ratio_0_30_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(4, 10).value) + return easf_filter_4tap_64p_ratio_0_40_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(5, 10).value) + return easf_filter_4tap_64p_ratio_0_50_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(6, 10).value) + return easf_filter_4tap_64p_ratio_0_60_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(7, 10).value) + return easf_filter_4tap_64p_ratio_0_70_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(8, 10).value) + return easf_filter_4tap_64p_ratio_0_80_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(9, 10).value) + return easf_filter_4tap_64p_ratio_0_90_s1_12; + else + return easf_filter_4tap_64p_ratio_1_00_s1_12; +} + +uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio) +{ + if (ratio.value < spl_fixpt_from_fraction(3, 10).value) + return easf_filter_6tap_64p_ratio_0_30_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(4, 10).value) + return easf_filter_6tap_64p_ratio_0_40_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(5, 10).value) + return easf_filter_6tap_64p_ratio_0_50_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(6, 10).value) + return easf_filter_6tap_64p_ratio_0_60_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(7, 10).value) + return easf_filter_6tap_64p_ratio_0_70_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(8, 10).value) + return easf_filter_6tap_64p_ratio_0_80_s1_12; + else if (ratio.value < spl_fixpt_from_fraction(9, 10).value) + return easf_filter_6tap_64p_ratio_0_90_s1_12; + else + return easf_filter_6tap_64p_ratio_1_00_s1_12; +} + +uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio) +{ + if (taps == 6) + return spl_get_easf_filter_6tap_64p(ratio); + else if (taps == 4) + return spl_get_easf_filter_4tap_64p(ratio); + else if (taps == 3) + return spl_get_easf_filter_3tap_64p(ratio); + else { + /* should never happen, bug */ + SPL_BREAK_TO_DEBUGGER(); + return NULL; + } +} + +void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data, + const struct spl_scaler_data *data, bool enable_easf_v, + bool enable_easf_h) +{ + /* + * Old coefficients calculated scaling ratio = input / output + * New coefficients are calculated based on = output / input + */ + if (enable_easf_h) { + dscl_prog_data->filter_h = spl_dscl_get_easf_filter_coeffs_64p( + data->taps.h_taps, data->recip_ratios.horz); + + dscl_prog_data->filter_h_c = spl_dscl_get_easf_filter_coeffs_64p( + data->taps.h_taps_c, data->recip_ratios.horz_c); + } else { + dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p( + data->taps.h_taps, data->ratios.horz); + + dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p( + data->taps.h_taps_c, data->ratios.horz_c); + } + if (enable_easf_v) { + dscl_prog_data->filter_v = spl_dscl_get_easf_filter_coeffs_64p( + data->taps.v_taps, data->recip_ratios.vert); + + dscl_prog_data->filter_v_c = spl_dscl_get_easf_filter_coeffs_64p( + data->taps.v_taps_c, data->recip_ratios.vert_c); + } else { + dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p( + data->taps.v_taps, data->ratios.vert); + + dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p( + data->taps.v_taps_c, data->ratios.vert_c); + } +} + +static uint32_t spl_easf_get_scale_ratio_to_reg_value(struct spl_fixed31_32 ratio, + struct scale_ratio_to_reg_value_lookup *lookup_table_base_ptr, + unsigned int num_entries) +{ + unsigned int count = 0; + uint32_t value = 0; + struct scale_ratio_to_reg_value_lookup *lookup_table_index_ptr; + + lookup_table_index_ptr = (lookup_table_base_ptr + num_entries - 1); + value = lookup_table_index_ptr->reg_value; + + while (count < num_entries) { + + lookup_table_index_ptr = (lookup_table_base_ptr + count); + if (lookup_table_index_ptr->numer < 0) + break; + + if (ratio.value < spl_fixpt_from_fraction( + lookup_table_index_ptr->numer, + lookup_table_index_ptr->denom).value) { + value = lookup_table_index_ptr->reg_value; + break; + } + + count++; + } + return value; +} +uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries = sizeof(easf_v_bf3_mode_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_v_bf3_mode_lookup, num_entries); + return value; +} +uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries = sizeof(easf_h_bf3_mode_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_h_bf3_mode_lookup, num_entries); + return value; +} +uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 4) { + num_entries = sizeof(easf_reducer_gain6_4tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_reducer_gain6_4tap_lookup, num_entries); + } else if (taps == 6) { + num_entries = sizeof(easf_reducer_gain6_6tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_reducer_gain6_6tap_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 4) { + num_entries = sizeof(easf_reducer_gain4_4tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_reducer_gain4_4tap_lookup, num_entries); + } else if (taps == 6) { + num_entries = sizeof(easf_reducer_gain4_6tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_reducer_gain4_6tap_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 4) { + num_entries = sizeof(easf_gain_ring6_4tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_gain_ring6_4tap_lookup, num_entries); + } else if (taps == 6) { + num_entries = sizeof(easf_gain_ring6_6tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_gain_ring6_6tap_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 4) { + num_entries = sizeof(easf_gain_ring4_4tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_gain_ring4_4tap_lookup, num_entries); + } else if (taps == 6) { + num_entries = sizeof(easf_gain_ring4_6tap_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_gain_ring4_6tap_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_dntilt_uptilt_offset_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_dntilt_uptilt_offset_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_uptilt_maxval_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_uptilt_maxval_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_dntilt_slope_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_dntilt_slope_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_uptilt1_slope_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_uptilt1_slope_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_uptilt2_slope_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_uptilt2_slope_lookup, num_entries); + } else + value = 0; + return value; +} +uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio) +{ + uint32_t value; + unsigned int num_entries; + + if (taps == 3) { + num_entries = sizeof(easf_3tap_uptilt2_offset_lookup) / + sizeof(struct scale_ratio_to_reg_value_lookup); + value = spl_easf_get_scale_ratio_to_reg_value(ratio, + easf_3tap_uptilt2_offset_lookup, num_entries); + } else + value = 0; + return value; +} diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h new file mode 100644 index 00000000000000..8bb2b8108e38a1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef __DC_SPL_SCL_EASF_FILTERS_H__ +#define __DC_SPL_SCL_EASF_FILTERS_H__ + +#include "dc_spl_types.h" + +struct scale_ratio_to_reg_value_lookup { + int numer; + int denom; + const uint32_t reg_value; +}; + +void spl_init_easf_filter_coeffs(void); +uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio); +uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio); +uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio); +uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio); +void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data, + const struct spl_scaler_data *data, bool enable_easf_v, + bool enable_easf_h); + +uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio); +uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio); +uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio); +uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio); + +#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c index c174b2e8a1508c..b02c7b0b262b8a 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c @@ -2,7 +2,7 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include "dc_spl_types.h" +#include "spl_debug.h" #include "dc_spl_scl_filters.h" //========================================= // = 2 @@ -1318,97 +1318,97 @@ static const uint16_t filter_8tap_64p_183[264] = { 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4 }; -const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_3tap_16p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_3tap_16p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_3tap_16p_149; else return filter_3tap_16p_183; } -const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_3tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_3tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_3tap_64p_149; else return filter_3tap_64p_183; } -const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_4tap_16p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_4tap_16p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_4tap_16p_149; else return filter_4tap_16p_183; } -const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_4tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_4tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_4tap_64p_149; else return filter_4tap_64p_183; } -const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_5tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_5tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_5tap_64p_149; else return filter_5tap_64p_183; } -const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_6tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_6tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_6tap_64p_149; else return filter_6tap_64p_183; } -const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_7tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_7tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_7tap_64p_149; else return filter_7tap_64p_183; } -const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio) +const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio) { - if (ratio.value < dc_fixpt_one.value) + if (ratio.value < spl_fixpt_one.value) return filter_8tap_64p_upscale; - else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(4, 3).value) return filter_8tap_64p_116; - else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) + else if (ratio.value < spl_fixpt_from_fraction(5, 3).value) return filter_8tap_64p_149; else return filter_8tap_64p_183; @@ -1423,3 +1423,29 @@ const uint16_t *spl_get_filter_2tap_64p(void) { return filter_2tap_64p; } + +const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio) +{ + if (taps == 8) + return spl_get_filter_8tap_64p(ratio); + else if (taps == 7) + return spl_get_filter_7tap_64p(ratio); + else if (taps == 6) + return spl_get_filter_6tap_64p(ratio); + else if (taps == 5) + return spl_get_filter_5tap_64p(ratio); + else if (taps == 4) + return spl_get_filter_4tap_64p(ratio); + else if (taps == 3) + return spl_get_filter_3tap_64p(ratio); + else if (taps == 2) + return spl_get_filter_2tap_64p(); + else if (taps == 1) + return NULL; + else { + /* should never happen, bug */ + SPL_BREAK_TO_DEBUGGER(); + return NULL; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h index 6d96aca53b24db..48202bc4f81e81 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h @@ -7,53 +7,16 @@ #include "dc_spl_types.h" -const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio); -const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio); +const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio); +const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_2tap_16p(void); const uint16_t *spl_get_filter_2tap_64p(void); -const uint16_t *spl_get_filter_3tap_16p_upscale(void); -const uint16_t *spl_get_filter_3tap_16p_116(void); -const uint16_t *spl_get_filter_3tap_16p_149(void); -const uint16_t *spl_get_filter_3tap_16p_183(void); +const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio); -const uint16_t *spl_get_filter_4tap_16p_upscale(void); -const uint16_t *spl_get_filter_4tap_16p_116(void); -const uint16_t *spl_get_filter_4tap_16p_149(void); -const uint16_t *spl_get_filter_4tap_16p_183(void); - -const uint16_t *spl_get_filter_3tap_64p_upscale(void); -const uint16_t *spl_get_filter_3tap_64p_116(void); -const uint16_t *spl_get_filter_3tap_64p_149(void); -const uint16_t *spl_get_filter_3tap_64p_183(void); - -const uint16_t *spl_get_filter_4tap_64p_upscale(void); -const uint16_t *spl_get_filter_4tap_64p_116(void); -const uint16_t *spl_get_filter_4tap_64p_149(void); -const uint16_t *spl_get_filter_4tap_64p_183(void); - -const uint16_t *spl_get_filter_5tap_64p_upscale(void); -const uint16_t *spl_get_filter_5tap_64p_116(void); -const uint16_t *spl_get_filter_5tap_64p_149(void); -const uint16_t *spl_get_filter_5tap_64p_183(void); - -const uint16_t *spl_get_filter_6tap_64p_upscale(void); -const uint16_t *spl_get_filter_6tap_64p_116(void); -const uint16_t *spl_get_filter_6tap_64p_149(void); -const uint16_t *spl_get_filter_6tap_64p_183(void); - -const uint16_t *spl_get_filter_7tap_64p_upscale(void); -const uint16_t *spl_get_filter_7tap_64p_116(void); -const uint16_t *spl_get_filter_7tap_64p_149(void); -const uint16_t *spl_get_filter_7tap_64p_183(void); - -const uint16_t *spl_get_filter_8tap_64p_upscale(void); -const uint16_t *spl_get_filter_8tap_64p_116(void); -const uint16_t *spl_get_filter_8tap_64p_149(void); -const uint16_t *spl_get_filter_8tap_64p_183(void); #endif /* __DC_SPL_SCL_FILTERS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 201201d3f55be2..2a74ff5fdfdbc6 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -2,30 +2,15 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include "os_types.h" -#include "dc_hw_types.h" -#ifndef ASSERT -#define ASSERT(_bool) (void *)0 -#endif -#include "include/fixed31_32.h" // fixed31_32 and related functions #ifndef __DC_SPL_TYPES_H__ #define __DC_SPL_TYPES_H__ -enum lb_memory_config { - /* Enable all 3 pieces of memory */ - LB_MEMORY_CONFIG_0 = 0, - - /* Enable only the first piece of memory */ - LB_MEMORY_CONFIG_1 = 1, - - /* Enable only the second piece of memory */ - LB_MEMORY_CONFIG_2 = 2, - - /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the - * last piece of chroma memory used for the luma storage - */ - LB_MEMORY_CONFIG_3 = 3 -}; +#include "spl_os_types.h" // swap +#ifndef SPL_ASSERT +#define SPL_ASSERT(_bool) ((void *)0) +#endif +#include "spl_fixpt31_32.h" // fixed31_32 and related functions +#include "spl_custom_float.h" // custom float and related functions struct spl_size { uint32_t width; @@ -39,16 +24,16 @@ struct spl_rect { }; struct spl_ratios { - struct fixed31_32 horz; - struct fixed31_32 vert; - struct fixed31_32 horz_c; - struct fixed31_32 vert_c; + struct spl_fixed31_32 horz; + struct spl_fixed31_32 vert; + struct spl_fixed31_32 horz_c; + struct spl_fixed31_32 vert_c; }; struct spl_inits { - struct fixed31_32 h; - struct fixed31_32 h_c; - struct fixed31_32 v; - struct fixed31_32 v_c; + struct spl_fixed31_32 h; + struct spl_fixed31_32 h_c; + struct spl_fixed31_32 v; + struct spl_fixed31_32 v_c; }; struct spl_taps { @@ -81,6 +66,8 @@ enum spl_pixel_format { SPL_PIXEL_FORMAT_420BPP10, /*end of pixel format definition*/ SPL_PIXEL_FORMAT_INVALID, + SPL_PIXEL_FORMAT_422BPP8, + SPL_PIXEL_FORMAT_422BPP10, SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8, SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16, SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8, @@ -88,6 +75,22 @@ enum spl_pixel_format { SPL_PIXEL_FORMAT_UNKNOWN }; +enum lb_memory_config { + /* Enable all 3 pieces of memory */ + LB_MEMORY_CONFIG_0 = 0, + + /* Enable only the first piece of memory */ + LB_MEMORY_CONFIG_1 = 1, + + /* Enable only the second piece of memory */ + LB_MEMORY_CONFIG_2 = 2, + + /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the + * last piece of chroma memory used for the luma storage + */ + LB_MEMORY_CONFIG_3 = 3 +}; + /* Rotation angle */ enum spl_rotation_angle { SPL_ROTATION_ANGLE_0 = 0, @@ -120,6 +123,13 @@ enum spl_color_space { SPL_COLOR_SPACE_YCBCR709_BLACK, }; +enum chroma_cositing { + CHROMA_COSITING_NONE, + CHROMA_COSITING_LEFT, + CHROMA_COSITING_TOPLEFT, + CHROMA_COSITING_COUNT +}; + // Scratch space for calculating scaler params struct spl_scaler_data { int h_active; @@ -129,6 +139,7 @@ struct spl_scaler_data { struct spl_rect viewport_c; struct spl_rect recout; struct spl_ratios ratios; + struct spl_ratios recip_ratios; struct spl_inits inits; }; @@ -396,13 +407,19 @@ struct dscl_prog_data { /* blur and scale filter */ const uint16_t *filter_blur_scale_v; const uint16_t *filter_blur_scale_h; + int sharpness_level; /* Track sharpness level */ }; /* SPL input and output definitions */ -// SPL outputs struct -struct spl_out { +// SPL scratch struct +struct spl_scratch { // Pack all SPL outputs in scl_data struct spl_scaler_data scl_data; +}; + +/* SPL input and output definitions */ +// SPL outputs struct +struct spl_out { // Pack all output need to program hw registers struct dscl_prog_data *dscl_prog_data; }; @@ -444,20 +461,43 @@ struct basic_out { bool alpha_en; bool use_two_pixels_per_container; }; -enum explicit_sharpness { - SHARPNESS_LOW = 0, - SHARPNESS_MID, - SHARPNESS_HIGH -}; -struct adaptive_sharpness { +enum sharpness_setting { + SHARPNESS_HW_OFF = 0, + SHARPNESS_ZERO, + SHARPNESS_CUSTOM +}; +struct spl_sharpness_range { + int sdr_rgb_min; + int sdr_rgb_max; + int sdr_rgb_mid; + int sdr_yuv_min; + int sdr_yuv_max; + int sdr_yuv_mid; + int hdr_rgb_min; + int hdr_rgb_max; + int hdr_rgb_mid; +}; +struct adaptive_sharpness { bool enable; - enum explicit_sharpness sharpness; + int sharpness_level; + struct spl_sharpness_range sharpness_range; }; enum linear_light_scaling { // convert it in translation logic LLS_PREF_DONT_CARE = 0, LLS_PREF_YES, LLS_PREF_NO }; +enum sharpen_policy { + SHARPEN_ALWAYS = 0, + SHARPEN_YUV = 1, + SHARPEN_RGB_FULLSCREEN_YUV = 2, + SHARPEN_FULLSCREEN_ALL = 3 +}; +enum scale_to_sharpness_policy { + NO_SCALE_TO_SHARPNESS_ADJ = 0, + SCALE_TO_SHARPNESS_ADJ_YUV = 1, + SCALE_TO_SHARPNESS_ADJ_ALL = 2 +}; struct spl_funcs { void (*spl_calc_lb_num_partitions) (bool alpha_en, @@ -470,6 +510,8 @@ struct spl_funcs { struct spl_debug { int visual_confirm_base_offset; int visual_confirm_dpp_offset; + enum sharpen_policy sharpen_policy; + enum scale_to_sharpness_policy scale_to_sharpness_policy; }; struct spl_in { @@ -485,6 +527,11 @@ struct spl_in { bool prefer_easf; bool disable_easf; struct spl_debug debug; + bool is_fullscreen; + bool is_hdr_on; + int h_active; + int v_active; + int sdr_white_level_nits; }; // end of SPL inputs diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c new file mode 100644 index 00000000000000..be2f34d034c5c9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "spl_debug.h" +#include "spl_custom_float.h" + +static bool spl_build_custom_float(struct spl_fixed31_32 value, + const struct spl_custom_float_format *format, + bool *negative, + uint32_t *mantissa, + uint32_t *exponenta) +{ + uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1; + + const struct spl_fixed31_32 mantissa_constant_plus_max_fraction = + spl_fixpt_from_fraction((1LL << (format->mantissa_bits + 1)) - 1, + 1LL << format->mantissa_bits); + + struct spl_fixed31_32 mantiss; + + if (spl_fixpt_eq(value, spl_fixpt_zero)) { + *negative = false; + *mantissa = 0; + *exponenta = 0; + return true; + } + + if (spl_fixpt_lt(value, spl_fixpt_zero)) { + *negative = format->sign; + value = spl_fixpt_neg(value); + } else { + *negative = false; + } + + if (spl_fixpt_lt(value, spl_fixpt_one)) { + uint32_t i = 1; + + do { + value = spl_fixpt_shl(value, 1); + ++i; + } while (spl_fixpt_lt(value, spl_fixpt_one)); + + --i; + + if (exp_offset <= i) { + *mantissa = 0; + *exponenta = 0; + return true; + } + + *exponenta = exp_offset - i; + } else if (spl_fixpt_le(mantissa_constant_plus_max_fraction, value)) { + uint32_t i = 1; + + do { + value = spl_fixpt_shr(value, 1); + ++i; + } while (spl_fixpt_lt(mantissa_constant_plus_max_fraction, value)); + + *exponenta = exp_offset + i - 1; + } else { + *exponenta = exp_offset; + } + + mantiss = spl_fixpt_sub(value, spl_fixpt_one); + + if (spl_fixpt_lt(mantiss, spl_fixpt_zero) || + spl_fixpt_lt(spl_fixpt_one, mantiss)) + mantiss = spl_fixpt_zero; + else + mantiss = spl_fixpt_shl(mantiss, format->mantissa_bits); + + *mantissa = spl_fixpt_floor(mantiss); + + return true; +} + +static bool spl_setup_custom_float(const struct spl_custom_float_format *format, + bool negative, + uint32_t mantissa, + uint32_t exponenta, + uint32_t *result) +{ + uint32_t i = 0; + uint32_t j = 0; + uint32_t value = 0; + + /* verification code: + * once calculation is ok we can remove it + */ + + const uint32_t mantissa_mask = + (1 << (format->mantissa_bits + 1)) - 1; + + const uint32_t exponenta_mask = + (1 << (format->exponenta_bits + 1)) - 1; + + if (mantissa & ~mantissa_mask) { + SPL_BREAK_TO_DEBUGGER(); + mantissa = mantissa_mask; + } + + if (exponenta & ~exponenta_mask) { + SPL_BREAK_TO_DEBUGGER(); + exponenta = exponenta_mask; + } + + /* end of verification code */ + + while (i < format->mantissa_bits) { + uint32_t mask = 1 << i; + + if (mantissa & mask) + value |= mask; + + ++i; + } + + while (j < format->exponenta_bits) { + uint32_t mask = 1 << j; + + if (exponenta & mask) + value |= mask << i; + + ++j; + } + + if (negative && format->sign) + value |= 1 << (i + j); + + *result = value; + + return true; +} + +bool spl_convert_to_custom_float_format(struct spl_fixed31_32 value, + const struct spl_custom_float_format *format, + uint32_t *result) +{ + uint32_t mantissa; + uint32_t exponenta; + bool negative; + + return spl_build_custom_float(value, format, &negative, &mantissa, &exponenta) && + spl_setup_custom_float(format, + negative, + mantissa, + exponenta, + result); +} diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h new file mode 100644 index 00000000000000..cdc4e107b9de4b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef SPL_CUSTOM_FLOAT_H_ +#define SPL_CUSTOM_FLOAT_H_ + +#include "spl_os_types.h" +#include "spl_fixpt31_32.h" + +struct spl_custom_float_format { + uint32_t mantissa_bits; + uint32_t exponenta_bits; + bool sign; +}; + +struct spl_custom_float_value { + uint32_t mantissa; + uint32_t exponenta; + uint32_t value; + bool negative; +}; + +bool spl_convert_to_custom_float_format( + struct spl_fixed31_32 value, + const struct spl_custom_float_format *format, + uint32_t *result); + +#endif //SPL_CUSTOM_FLOAT_H_ diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h new file mode 100644 index 00000000000000..5696dafd0894d5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef SPL_DEBUG_H +#define SPL_DEBUG_H + +#ifdef SPL_ASSERT +#undef SPL_ASSERT +#endif +#define SPL_ASSERT(b) + +#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0) + +#ifdef SPL_DALMSG +#undef SPL_DALMSG +#endif +#define SPL_DALMSG(b) + +#ifdef SPL_DAL_ASSERT_MSG +#undef SPL_DAL_ASSERT_MSG +#endif +#define SPL_DAL_ASSERT_MSG(b, m) + +#endif // SPL_DEBUG_H diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c new file mode 100644 index 00000000000000..a95565df5487cf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2024 Advanced Micro Devices, Inc. + +#include "spl_fixpt31_32.h" + +static const struct spl_fixed31_32 spl_fixpt_two_pi = { 26986075409LL }; +static const struct spl_fixed31_32 spl_fixpt_ln2 = { 2977044471LL }; +static const struct spl_fixed31_32 spl_fixpt_ln2_div_2 = { 1488522236LL }; + +static inline unsigned long long abs_i64( + long long arg) +{ + if (arg > 0) + return (unsigned long long)arg; + else + return (unsigned long long)(-arg); +} + +/* + * @brief + * result = dividend / divisor + * *remainder = dividend % divisor + */ +static inline unsigned long long complete_integer_division_u64( + unsigned long long dividend, + unsigned long long divisor, + unsigned long long *remainder) +{ + unsigned long long result; + + ASSERT(divisor); + + result = spl_div64_u64_rem(dividend, divisor, remainder); + + return result; +} + + +#define FRACTIONAL_PART_MASK \ + ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1) + +#define GET_INTEGER_PART(x) \ + ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + +#define GET_FRACTIONAL_PART(x) \ + (FRACTIONAL_PART_MASK & (x)) + +struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator) +{ + struct spl_fixed31_32 res; + + bool arg1_negative = numerator < 0; + bool arg2_negative = denominator < 0; + + unsigned long long arg1_value = arg1_negative ? -numerator : numerator; + unsigned long long arg2_value = arg2_negative ? -denominator : denominator; + + unsigned long long remainder; + + /* determine integer part */ + + unsigned long long res_value = complete_integer_division_u64( + arg1_value, arg2_value, &remainder); + + ASSERT(res_value <= LONG_MAX); + + /* determine fractional part */ + { + unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART; + + do { + remainder <<= 1; + + res_value <<= 1; + + if (remainder >= arg2_value) { + res_value |= 1; + remainder -= arg2_value; + } + } while (--i != 0); + } + + /* round up LSB */ + { + unsigned long long summand = (remainder << 1) >= arg2_value; + + ASSERT(res_value <= LLONG_MAX - summand); + + res_value += summand; + } + + res.value = (long long)res_value; + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + + return res; +} + +struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + struct spl_fixed31_32 res; + + bool arg1_negative = arg1.value < 0; + bool arg2_negative = arg2.value < 0; + + unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value; + unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value; + + unsigned long long arg1_int = GET_INTEGER_PART(arg1_value); + unsigned long long arg2_int = GET_INTEGER_PART(arg2_value); + + unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value); + unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value); + + unsigned long long tmp; + + res.value = arg1_int * arg2_int; + + ASSERT(res.value <= (long long)LONG_MAX); + + res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; + + tmp = arg1_int * arg2_fra; + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + tmp = arg2_int * arg1_fra; + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + tmp = arg1_fra * arg2_fra; + + tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + + (tmp >= (unsigned long long)spl_fixpt_half.value); + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + + return res; +} + +struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) +{ + struct spl_fixed31_32 res; + + unsigned long long arg_value = abs_i64(arg.value); + + unsigned long long arg_int = GET_INTEGER_PART(arg_value); + + unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value); + + unsigned long long tmp; + + res.value = arg_int * arg_int; + + ASSERT(res.value <= (long long)LONG_MAX); + + res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; + + tmp = arg_int * arg_fra; + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + tmp = arg_fra * arg_fra; + + tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + + (tmp >= (unsigned long long)spl_fixpt_half.value); + + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + + res.value += tmp; + + return res; +} + +struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg) +{ + /* + * @note + * Good idea to use Newton's method + */ + + ASSERT(arg.value); + + return spl_fixpt_from_fraction( + spl_fixpt_one.value, + arg.value); +} + +struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg) +{ + struct spl_fixed31_32 square; + + struct spl_fixed31_32 res = spl_fixpt_one; + + int n = 27; + + struct spl_fixed31_32 arg_norm = arg; + + if (spl_fixpt_le( + spl_fixpt_two_pi, + spl_fixpt_abs(arg))) { + arg_norm = spl_fixpt_sub( + arg_norm, + spl_fixpt_mul_int( + spl_fixpt_two_pi, + (int)spl_div64_s64( + arg_norm.value, + spl_fixpt_two_pi.value))); + } + + square = spl_fixpt_sqr(arg_norm); + + do { + res = spl_fixpt_sub( + spl_fixpt_one, + spl_fixpt_div_int( + spl_fixpt_mul( + square, + res), + n * (n - 1))); + + n -= 2; + } while (n > 2); + + if (arg.value != arg_norm.value) + res = spl_fixpt_div( + spl_fixpt_mul(res, arg_norm), + arg); + + return res; +} + +struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg) +{ + return spl_fixpt_mul( + arg, + spl_fixpt_sinc(arg)); +} + +struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg) +{ + /* TODO implement argument normalization */ + + const struct spl_fixed31_32 square = spl_fixpt_sqr(arg); + + struct spl_fixed31_32 res = spl_fixpt_one; + + int n = 26; + + do { + res = spl_fixpt_sub( + spl_fixpt_one, + spl_fixpt_div_int( + spl_fixpt_mul( + square, + res), + n * (n - 1))); + + n -= 2; + } while (n != 0); + + return res; +} + +/* + * @brief + * result = exp(arg), + * where abs(arg) < 1 + * + * Calculated as Taylor series. + */ +static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) +{ + unsigned int n = 9; + + struct spl_fixed31_32 res = spl_fixpt_from_fraction( + n + 2, + n + 1); + /* TODO find correct res */ + + ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); + + do + res = spl_fixpt_add( + spl_fixpt_one, + spl_fixpt_div_int( + spl_fixpt_mul( + arg, + res), + n)); + while (--n != 1); + + return spl_fixpt_add( + spl_fixpt_one, + spl_fixpt_mul( + arg, + res)); +} + +struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg) +{ + /* + * @brief + * Main equation is: + * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r), + * where m = round(x / ln(2)), r = x - m * ln(2) + */ + + if (spl_fixpt_le( + spl_fixpt_ln2_div_2, + spl_fixpt_abs(arg))) { + int m = spl_fixpt_round( + spl_fixpt_div( + arg, + spl_fixpt_ln2)); + + struct spl_fixed31_32 r = spl_fixpt_sub( + arg, + spl_fixpt_mul_int( + spl_fixpt_ln2, + m)); + + ASSERT(m != 0); + + ASSERT(spl_fixpt_lt( + spl_fixpt_abs(r), + spl_fixpt_one)); + + if (m > 0) + return spl_fixpt_shl( + fixed31_32_exp_from_taylor_series(r), + (unsigned char)m); + else + return spl_fixpt_div_int( + fixed31_32_exp_from_taylor_series(r), + 1LL << -m); + } else if (arg.value != 0) + return fixed31_32_exp_from_taylor_series(arg); + else + return spl_fixpt_one; +} + +struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) +{ + struct spl_fixed31_32 res = spl_fixpt_neg(spl_fixpt_one); + /* TODO improve 1st estimation */ + + struct spl_fixed31_32 error; + + ASSERT(arg.value > 0); + /* TODO if arg is negative, return NaN */ + /* TODO if arg is zero, return -INF */ + + do { + struct spl_fixed31_32 res1 = spl_fixpt_add( + spl_fixpt_sub( + res, + spl_fixpt_one), + spl_fixpt_div( + arg, + spl_fixpt_exp(res))); + + error = spl_fixpt_sub( + res, + res1); + + res = res1; + /* TODO determine max_allowed_error based on quality of exp() */ + } while (abs_i64(error.value) > 100ULL); + + return res; +} + + +/* this function is a generic helper to translate fixed point value to + * specified integer format that will consist of integer_bits integer part and + * fractional_bits fractional part. For example it is used in + * spl_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional + * part in 32 bits. It is used in hw programming (scaler) + */ + +static inline unsigned int ux_dy( + long long value, + unsigned int integer_bits, + unsigned int fractional_bits) +{ + /* 1. create mask of integer part */ + unsigned int result = (1 << integer_bits) - 1; + /* 2. mask out fractional part */ + unsigned int fractional_part = FRACTIONAL_PART_MASK & value; + /* 3. shrink fixed point integer part to be of integer_bits width*/ + result &= GET_INTEGER_PART(value); + /* 4. make space for fractional part to be filled in after integer */ + result <<= fractional_bits; + /* 5. shrink fixed point fractional part to of fractional_bits width*/ + fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits; + /* 6. merge the result */ + return result | fractional_part; +} + +static inline unsigned int clamp_ux_dy( + long long value, + unsigned int integer_bits, + unsigned int fractional_bits, + unsigned int min_clamp) +{ + unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); + + if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) + return (1 << (integer_bits + fractional_bits)) - 1; + else if (truncated_val > min_clamp) + return truncated_val; + else + return min_clamp; +} + +unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg) +{ + return ux_dy(arg.value, 4, 19); +} + +unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg) +{ + return ux_dy(arg.value, 3, 19); +} + +unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg) +{ + return ux_dy(arg.value, 2, 19); +} + +unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg) +{ + return ux_dy(arg.value, 0, 19); +} + +unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg) +{ + return clamp_ux_dy(arg.value, 0, 14, 1); +} + +unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg) +{ + return clamp_ux_dy(arg.value, 0, 10, 1); +} + +int spl_fixpt_s4d19(struct spl_fixed31_32 arg) +{ + if (arg.value < 0) + return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19); + else + return ux_dy(arg.value, 4, 19); +} + +struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, + unsigned int integer_bits, + unsigned int fractional_bits) +{ + struct spl_fixed31_32 fixpt_value = spl_fixpt_zero; + struct spl_fixed31_32 fixpt_int_value = spl_fixpt_zero; + long long frac_mask = ((long long)1 << (long long)integer_bits) - 1; + + fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + frac_mask = frac_mask << fractional_bits; + fixpt_int_value.value = value & frac_mask; + fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + fixpt_value.value |= fixpt_int_value.value; + return fixpt_value; +} + +struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value, + unsigned int frac_value, + unsigned int integer_bits, + unsigned int fractional_bits) +{ + struct spl_fixed31_32 fixpt_value = spl_fixpt_from_int(int_value); + + fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + return fixpt_value; +} diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h new file mode 100644 index 00000000000000..8a045e2f8699a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h @@ -0,0 +1,525 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef __SPL_FIXED31_32_H__ +#define __SPL_FIXED31_32_H__ + +#include "os_types.h" +#include "spl_os_types.h" // swap +#ifndef ASSERT +#define ASSERT(_bool) ((void *)0) +#endif + +#ifndef LLONG_MAX +#define LLONG_MAX 9223372036854775807ll +#endif +#ifndef LLONG_MIN +#define LLONG_MIN (-LLONG_MAX - 1ll) +#endif + +#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32 +#ifndef LLONG_MIN +#define LLONG_MIN (1LL<<63) +#endif +#ifndef LLONG_MAX +#define LLONG_MAX (-1LL>>1) +#endif + +/* + * @brief + * Arithmetic operations on real numbers + * represented as fixed-point numbers. + * There are: 1 bit for sign, + * 31 bit for integer part, + * 32 bits for fractional part. + * + * @note + * Currently, overflows and underflows are asserted; + * no special result returned. + */ + +struct spl_fixed31_32 { + long long value; +}; + + +/* + * @brief + * Useful constants + */ + +static const struct spl_fixed31_32 spl_fixpt_zero = { 0 }; +static const struct spl_fixed31_32 spl_fixpt_epsilon = { 1LL }; +static const struct spl_fixed31_32 spl_fixpt_half = { 0x80000000LL }; +static const struct spl_fixed31_32 spl_fixpt_one = { 0x100000000LL }; + +/* + * @brief + * Initialization routines + */ + +/* + * @brief + * result = numerator / denominator + */ +struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator); + +/* + * @brief + * result = arg + */ +static inline struct spl_fixed31_32 spl_fixpt_from_int(int arg) +{ + struct spl_fixed31_32 res; + + res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART; + + return res; +} + +/* + * @brief + * Unary operators + */ + +/* + * @brief + * result = -arg + */ +static inline struct spl_fixed31_32 spl_fixpt_neg(struct spl_fixed31_32 arg) +{ + struct spl_fixed31_32 res; + + res.value = -arg.value; + + return res; +} + +/* + * @brief + * result = abs(arg) := (arg >= 0) ? arg : -arg + */ +static inline struct spl_fixed31_32 spl_fixpt_abs(struct spl_fixed31_32 arg) +{ + if (arg.value < 0) + return spl_fixpt_neg(arg); + else + return arg; +} + +/* + * @brief + * Binary relational operators + */ + +/* + * @brief + * result = arg1 < arg2 + */ +static inline bool spl_fixpt_lt(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + return arg1.value < arg2.value; +} + +/* + * @brief + * result = arg1 <= arg2 + */ +static inline bool spl_fixpt_le(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + return arg1.value <= arg2.value; +} + +/* + * @brief + * result = arg1 == arg2 + */ +static inline bool spl_fixpt_eq(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + return arg1.value == arg2.value; +} + +/* + * @brief + * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_min(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + if (arg1.value <= arg2.value) + return arg1; + else + return arg2; +} + +/* + * @brief + * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1 + */ +static inline struct spl_fixed31_32 spl_fixpt_max(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + if (arg1.value <= arg2.value) + return arg2; + else + return arg1; +} + +/* + * @brief + * | min_value, when arg <= min_value + * result = | arg, when min_value < arg < max_value + * | max_value, when arg >= max_value + */ +static inline struct spl_fixed31_32 spl_fixpt_clamp( + struct spl_fixed31_32 arg, + struct spl_fixed31_32 min_value, + struct spl_fixed31_32 max_value) +{ + if (spl_fixpt_le(arg, min_value)) + return min_value; + else if (spl_fixpt_le(max_value, arg)) + return max_value; + else + return arg; +} + +/* + * @brief + * Binary shift operators + */ + +/* + * @brief + * result = arg << shift + */ +static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift) +{ + ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || + ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift)))); + + arg.value = arg.value << shift; + + return arg; +} + +/* + * @brief + * result = arg >> shift + */ +static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift) +{ + bool negative = arg.value < 0; + + if (negative) + arg.value = -arg.value; + arg.value = arg.value >> shift; + if (negative) + arg.value = -arg.value; + return arg; +} + +/* + * @brief + * Binary additive operators + */ + +/* + * @brief + * result = arg1 + arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + struct spl_fixed31_32 res; + + ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || + ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); + + res.value = arg1.value + arg2.value; + + return res; +} + +/* + * @brief + * result = arg1 + arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_add_int(struct spl_fixed31_32 arg1, int arg2) +{ + return spl_fixpt_add(arg1, spl_fixpt_from_int(arg2)); +} + +/* + * @brief + * result = arg1 - arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + struct spl_fixed31_32 res; + + ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || + ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); + + res.value = arg1.value - arg2.value; + + return res; +} + +/* + * @brief + * result = arg1 - arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_sub_int(struct spl_fixed31_32 arg1, int arg2) +{ + return spl_fixpt_sub(arg1, spl_fixpt_from_int(arg2)); +} + + +/* + * @brief + * Binary multiplicative operators + */ + +/* + * @brief + * result = arg1 * arg2 + */ +struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2); + + +/* + * @brief + * result = arg1 * arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_mul_int(struct spl_fixed31_32 arg1, int arg2) +{ + return spl_fixpt_mul(arg1, spl_fixpt_from_int(arg2)); +} + +/* + * @brief + * result = square(arg) := arg * arg + */ +struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg); + +/* + * @brief + * result = arg1 / arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_div_int(struct spl_fixed31_32 arg1, long long arg2) +{ + return spl_fixpt_from_fraction(arg1.value, spl_fixpt_from_int((int)arg2).value); +} + +/* + * @brief + * result = arg1 / arg2 + */ +static inline struct spl_fixed31_32 spl_fixpt_div(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + return spl_fixpt_from_fraction(arg1.value, arg2.value); +} + +/* + * @brief + * Reciprocal function + */ + +/* + * @brief + * result = reciprocal(arg) := 1 / arg + * + * @note + * No special actions taken in case argument is zero. + */ +struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg); + +/* + * @brief + * Trigonometric functions + */ + +/* + * @brief + * result = sinc(arg) := sin(arg) / arg + * + * @note + * Argument specified in radians, + * internally it's normalized to [-2pi...2pi] range. + */ +struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg); + +/* + * @brief + * result = sin(arg) + * + * @note + * Argument specified in radians, + * internally it's normalized to [-2pi...2pi] range. + */ +struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg); + +/* + * @brief + * result = cos(arg) + * + * @note + * Argument specified in radians + * and should be in [-2pi...2pi] range - + * passing arguments outside that range + * will cause incorrect result! + */ +struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg); + +/* + * @brief + * Transcendent functions + */ + +/* + * @brief + * result = exp(arg) + * + * @note + * Currently, function is verified for abs(arg) <= 1. + */ +struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg); + +/* + * @brief + * result = log(arg) + * + * @note + * Currently, abs(arg) should be less than 1. + * No normalization is done. + * Currently, no special actions taken + * in case of invalid argument(s). Take care! + */ +struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg); + +/* + * @brief + * Power function + */ + +/* + * @brief + * result = pow(arg1, arg2) + * + * @note + * Currently, abs(arg1) should be less than 1. Take care! + */ +static inline struct spl_fixed31_32 spl_fixpt_pow(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2) +{ + if (arg1.value == 0) + return arg2.value == 0 ? spl_fixpt_one : spl_fixpt_zero; + + return spl_fixpt_exp( + spl_fixpt_mul( + spl_fixpt_log(arg1), + arg2)); +} + +/* + * @brief + * Rounding functions + */ + +/* + * @brief + * result = floor(arg) := greatest integer lower than or equal to arg + */ +static inline int spl_fixpt_floor(struct spl_fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} + +/* + * @brief + * result = round(arg) := integer nearest to arg + */ +static inline int spl_fixpt_round(struct spl_fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + const long long summand = spl_fixpt_half.value; + + ASSERT(LLONG_MAX - (long long)arg_value >= summand); + + arg_value += summand; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} + +/* + * @brief + * result = ceil(arg) := lowest integer greater than or equal to arg + */ +static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + const long long summand = spl_fixpt_one.value - + spl_fixpt_epsilon.value; + + ASSERT(LLONG_MAX - (long long)arg_value >= summand); + + arg_value += summand; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} + +/* the following two function are used in scaler hw programming to convert fixed + * point value to format 2 bits from integer part and 19 bits from fractional + * part. The same applies for u0d19, 0 bits from integer part and 19 bits from + * fractional + */ + +unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg); + +unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg); + +unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg); + +unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg); + +unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg); + +unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg); + +int spl_fixpt_s4d19(struct spl_fixed31_32 arg); + +static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg, unsigned int frac_bits) +{ + bool negative = arg.value < 0; + + if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) { + ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); + return arg; + } + + if (negative) + arg.value = -arg.value; + arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); + if (negative) + arg.value = -arg.value; + return arg; +} + +struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits); +struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value, + unsigned int frac_value, + unsigned int integer_bits, + unsigned int fractional_bits); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h new file mode 100644 index 00000000000000..709706ed4f2c9e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright 2024 Advanced Micro Devices, Inc. */ +/* Copyright 2019 Raptor Engineering, LLC */ + +#ifndef _SPL_OS_TYPES_H_ +#define _SPL_OS_TYPES_H_ + +#include +#include +#include +#include +#include +#include + +/* + * + * general debug capabilities + * + */ +#define SPL_BREAK_TO_DEBUGGER() ASSERT(0) + +static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) +{ + return div_u64_rem(dividend, divisor, remainder); +} + +static inline uint64_t spl_div_u64(uint64_t dividend, uint32_t divisor) +{ + return div_u64(dividend, divisor); +} + +static inline uint64_t spl_div64_u64(uint64_t dividend, uint64_t divisor) +{ + return div64_u64(dividend, divisor); +} + +static inline uint64_t spl_div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder) +{ + return div64_u64_rem(dividend, divisor, remainder); +} + +static inline int64_t spl_div64_s64(int64_t dividend, int64_t divisor) +{ + return div64_s64(dividend, divisor); +} + +#define spl_swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +#ifndef spl_min +#define spl_min(a, b) (((a) < (b)) ? (a):(b)) +#endif + +#endif /* _SPL_OS_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 6589bb9aea6bc1..fe5b6f7a3eb1ee 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -300,6 +300,7 @@ struct dmub_srv_hw_params { enum dmub_ips_disable_type disable_ips; bool disallow_phy_access; bool disable_sldo_opt; + bool enable_non_transparent_setconfig; }; /** @@ -330,6 +331,9 @@ struct dmub_diagnostic_data { uint32_t inbox0_rptr; uint32_t inbox0_wptr; uint32_t inbox0_size; + uint32_t outbox1_rptr; + uint32_t outbox1_wptr; + uint32_t outbox1_size; uint32_t gpint_datain0; struct dmub_srv_debug timeout_info; uint8_t is_dmcub_enabled : 1; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 5ff0a865705f50..ebcf68bfae2b32 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -111,7 +111,7 @@ #define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2) /* Trace buffer offset for entry */ -#define TRACE_BUFFER_ENTRY_OFFSET 16 +#define TRACE_BUFFER_ENTRY_OFFSET 16 /** * Maximum number of dirty rects supported by FW. @@ -336,6 +336,10 @@ union dmub_psr_debug_flags { */ uint32_t back_to_back_flip : 1; + /** + * Enable visual confirm for IPS + */ + uint32_t enable_ips_visual_confirm : 1; } bitfields; /** @@ -678,7 +682,7 @@ union dmub_fw_boot_options { uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ uint32_t usb4_cm_version: 1; /**< 1 CM support */ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ - uint32_t reserved0: 1; + uint32_t enable_non_transparent_setconfig: 1; /* 1 if dpia use conventional dp lt flow*/ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ @@ -1304,6 +1308,7 @@ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, + DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3, }; /* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ @@ -1875,7 +1880,12 @@ enum dmub_cmd_idle_opt_type { /** * DCN hardware notify idle. */ - DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2 + DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2, + + /** + * DCN hardware notify power state. + */ + DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, }; /** @@ -1902,6 +1912,33 @@ struct dmub_rb_cmd_idle_opt_dcn_notify_idle { struct dmub_dcn_notify_idle_cntl_data cntl_data; }; +/** + * enum dmub_idle_opt_dc_power_state - DC power states. + */ +enum dmub_idle_opt_dc_power_state { + DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN = 0, + DMUB_IDLE_OPT_DC_POWER_STATE_D0 = 1, + DMUB_IDLE_OPT_DC_POWER_STATE_D1 = 2, + DMUB_IDLE_OPT_DC_POWER_STATE_D2 = 4, + DMUB_IDLE_OPT_DC_POWER_STATE_D3 = 8, +}; + +/** + * struct dmub_idle_opt_set_dc_power_state_data - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command. + */ +struct dmub_idle_opt_set_dc_power_state_data { + uint8_t power_state; /**< power state */ + uint8_t pad[3]; /**< padding */ +}; + +/** + * struct dmub_rb_cmd_idle_opt_set_dc_power_state - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command. + */ +struct dmub_rb_cmd_idle_opt_set_dc_power_state { + struct dmub_cmd_header header; /**< header */ + struct dmub_idle_opt_set_dc_power_state_data data; +}; + /** * struct dmub_clocks - Clock update notification. */ @@ -2102,6 +2139,24 @@ struct dmub_rb_cmd_set_mst_alloc_slots { struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */ }; +/** + * Data passed from driver to FW in a DMUB_CMD__SET_TPS_NOTIFICATION command. + */ +struct dmub_cmd_tps_notification_data { + uint8_t instance; /* DPIA instance */ + uint8_t tps; /* requested training pattern */ + uint8_t reserved1; + uint8_t reserved2; +}; + +/** + * DMUB command structure for SET_TPS_NOTIFICATION command. + */ +struct dmub_rb_cmd_set_tps_notification { + struct dmub_cmd_header header; /* header */ + struct dmub_cmd_tps_notification_data tps_notification; /* set tps_notification data */ +}; + /** * DMUB command structure for DPIA HPD int enable control. */ @@ -3024,14 +3079,6 @@ struct dmub_cmd_update_dirty_rect_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; - /** - * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. - */ - uint16_t coasting_vtotal_high; - /** - * Explicit padding to 4 byte boundary. - */ - uint8_t pad[2]; }; /** @@ -5276,6 +5323,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots; + /** + * Definition of a DMUB_CMD__DPIA_SET_TPS_NOTIFICATION command. + */ + struct dmub_rb_cmd_set_tps_notification set_tps_notification; /** * Definition of a DMUB_CMD__EDID_CEA command. */ @@ -5302,6 +5353,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command. */ struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle; + /** + * Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command. + */ + struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state; /* * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 662c34e9495ccb..d9f31b191c693d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -449,6 +449,10 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index e1da270502cc92..9600b7f858b049 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -459,6 +459,10 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 916ed022e96b4e..2ccad79053c586 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -425,6 +425,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.ips_disable = params->disable_ips; boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; + boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -502,6 +503,10 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index cf139e9cc20e83..39a8cb6d7523c3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -444,6 +444,10 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnost diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); + diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); + diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); + diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); diag_data->is_dmcub_enabled = is_dmub_enabled; diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index d4cf7ead1d877e..990fa1f19c2299 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -531,4 +531,10 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne return arg; } +struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits); +struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value, + unsigned int frac_value, + unsigned int integer_bits, + unsigned int fractional_bits); + #endif diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 83479951732ace..a48d564d1660c3 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -61,6 +61,7 @@ #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) +#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index a40e6590215a64..bbd259cea4f4f6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -134,7 +134,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000000); + stream->timing.h_total) + 500000, 1000000); /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 1e495e88448421..8bc377560787bf 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -432,18 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, goto out; } - if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, &input->bstatus_read, &status, - hdcp, "bstatus_read")) - goto out; - if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + hdcp, "bstatus_read"); + + mod_hdcp_execute_and_set(check_link_integrity_dp, &input->link_integrity_check, &status, - hdcp, "link_integrity_check")) - goto out; - if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + hdcp, "link_integrity_check"); + + mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, - hdcp, "reauth_request_check")) - goto out; + hdcp, "reauth_request_check"); + out: return status; } diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f5b725f10a7cee..3f91926a50e99b 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -61,7 +61,7 @@ enum amd_apu_flags { * acquires the list of IP blocks for the GPU in use on initialization. * It can then operate on this list to perform standard driver operations * such as: init, fini, suspend, resume, etc. -* +* * * IP block implementations are named using the following convention: * _v (E.g.: gfx_v6_0). @@ -85,7 +85,7 @@ enum amd_apu_flags { * @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler * @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine * @AMD_IP_BLOCK_TYPE_VPE: Video Processing Engine -* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Schduler for Multimedia +* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Scheduler for Multimedia * @AMD_IP_BLOCK_TYPE_ISP: Image Signal Processor * @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types */ @@ -251,19 +251,92 @@ enum DC_FEATURE_MASK { DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4 }; +/** + * enum DC_DEBUG_MASK - Bits that are useful for debugging the Display Core IP + */ enum DC_DEBUG_MASK { + /** + * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting + */ DC_DISABLE_PIPE_SPLIT = 0x1, + + /** + * @DC_DISABLE_STUTTER: If set, disable memory stutter mode + */ DC_DISABLE_STUTTER = 0x2, + + /** + * @DC_DISABLE_DSC: If set, disable display stream compression + */ DC_DISABLE_DSC = 0x4, + + /** + * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations + */ DC_DISABLE_CLOCK_GATING = 0x8, + + /** + * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU + */ DC_DISABLE_PSR = 0x10, + + /** + * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even + * if mclk switch in vblank is possible + */ DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, + + /** + * @DC_DISABLE_MPO: If set, disable multi-plane offloading + */ DC_DISABLE_MPO = 0x40, + + /** + * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA + */ DC_ENABLE_DPIA_TRACE = 0x80, + + /** + * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version + * does not default to it. + */ DC_ENABLE_DML2 = 0x100, + + /** + * @DC_DISABLE_PSR_SU: If set, disable PSR SU + */ DC_DISABLE_PSR_SU = 0x200, + + /** + * @DC_DISABLE_REPLAY: If set, disable Panel Replay + */ DC_DISABLE_REPLAY = 0x400, + + /** + * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time. + * If more than one IPS debug bit is set, the lowest bit takes + * precedence. For example, if DC_FORCE_IPS_ENABLE and + * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes + * precedence. + */ DC_DISABLE_IPS = 0x800, + + /** + * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time, + * *except* when driver goes into suspend. + */ + DC_DISABLE_IPS_DYNAMIC = 0x1000, + + /** + * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if + * there is an enabled display. Otherwise, enable all IPS. + */ + DC_DISABLE_IPS2_DYNAMIC = 0x2000, + + /** + * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. + */ + DC_FORCE_IPS_ENABLE = 0x4000, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h index 8ee3149df5b71d..2ef1273e65abe6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h @@ -340,8 +340,6 @@ #define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L #define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009 #define UVD_LMI_CTRL__RFU_MASK 0xf8000000L -#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L -#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a #define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b #define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L #define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015 diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 6d094cf3587d64..e3e635a31b8a41 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -71,6 +71,11 @@ enum kgd_memory_pool { KGD_POOL_FRAMEBUFFER = 3, }; +struct kfd_cu_occupancy { + u32 wave_cnt; + u32 doorbell_off; +}; + /** * enum kfd_sched_policy * @@ -313,11 +318,18 @@ struct kfd2kgd_calls { uint32_t grace_period, uint32_t *reg_offset, uint32_t *reg_data); - void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, - int *wave_cnt, int *max_waves_per_cu, uint32_t inst); + void (*get_cu_occupancy)(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst); + uint64_t (*hqd_get_pq_addr)(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst); + uint64_t (*hqd_reset)(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id, + uint32_t inst, unsigned int utimeout); }; #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 4b20e227431354..19a48d98830a39 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -218,6 +218,7 @@ enum pp_mp1_state { PP_MP1_STATE_SHUTDOWN, PP_MP1_STATE_UNLOAD, PP_MP1_STATE_RESET, + PP_MP1_STATE_FLR, }; enum pp_df_cstate { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 8b7d6ed7e2ed23..9dc82f4d7c937a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -168,7 +168,11 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_mp1_state) { + if (mp1_state == PP_MP1_STATE_FLR) { + /* VF lost access to SMU */ + if (amdgpu_sriov_vf(adev)) + adev->pm.dpm_enabled = false; + } else if (pp_funcs && pp_funcs->set_mp1_state) { mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_mp1_state( diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c index ca1c7ae8d146d5..f06b29e33ba452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c @@ -1183,6 +1183,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, fw_info = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); + PP_ASSERT_WITH_CODE(fw_info != NULL, + "Missing firmware info!", return -EINVAL); if ((fw_info->ucTableFormatRevision == 1) && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 6e717ddbb0296e..9ace863792d484 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -2934,9 +2934,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) } } - vega10_enable_smc_features(hwmgr, false, feature_mask); - - return 0; + return vega10_enable_smc_features(hwmgr, false, feature_mask); } /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 2cf95118456182..bb3bc68dfc3978 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1257,7 +1257,6 @@ static int smu_sw_init(void *handle) atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; @@ -1265,6 +1264,7 @@ static int smu_sw_init(void *handle) smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 0b3c2f54a34339..822c6425d90e0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xC +#define SMU_METRICS_TABLE_VERSION 0xD typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -227,6 +227,10 @@ typedef struct __attribute__((packed, aligned(4))) { // PCIE LINK Speed and width uint32_t PCIeLinkSpeed; uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 076620fa3ef5a8..16af1a329621f1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1989,7 +1989,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor.Mem_FPS, activity_monitor.Mem_MinFreqStep, activity_monitor.Mem_MinActiveFreqType, @@ -2051,7 +2051,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u activity_monitor.Soc_PD_Data_error_coeff = input[8]; activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; break; - case 2: /* Memlk */ + case 2: /* Memclk */ activity_monitor.Mem_FPS = input[1]; activity_monitor.Mem_MinFreqStep = input[2]; activity_monitor.Mem_MinActiveFreqType = input[3]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 0d3e1a121b670a..9c3c48297cba03 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1691,7 +1691,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor->Mem_FPS, activity_monitor->Mem_MinFreqStep, activity_monitor->Mem_MinActiveFreqType, @@ -1756,7 +1756,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * activity_monitor->Fclk_PD_Data_error_coeff = input[8]; activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; break; - case 2: /* Memlk */ + case 2: /* Memclk */ activity_monitor->Mem_FPS = input[1]; activity_monitor->Mem_MinFreqStep = input[2]; activity_monitor->Mem_MinActiveFreqType = input[3]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index a887ab945dfa2f..1d024b122b0c02 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2569,10 +2569,14 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, workload_mask, NULL); + if (!ret) + smu->workload_mask = workload_mask; + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 9974c9f8135e99..55ed6247eb61f7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2107,8 +2107,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_v13_0_6_request_i2c_xfer(smu, req); - if (r) - goto fail; + if (r) { + /* Retry once, in case of an i2c collision */ + r = smu_v13_0_6_request_i2c_xfer(smu, req); + if (r) + goto fail; + } for (c = i = 0; i < num_msgs; i++) { if (!(msg[i].flags & I2C_M_RD)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7bc95c4043778d..b891a5e0a3969a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2501,8 +2501,11 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu->workload_mask = (1 << workload_type); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 09973615f210e1..865e916fc42544 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -452,17 +452,26 @@ int smu_v14_0_init_smc_tables(struct smu_context *smu) ret = -ENOMEM; goto err3_out; } + + smu_table->user_overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->user_overdrive_table) { + ret = -ENOMEM; + goto err4_out; + } } smu_table->combo_pptable = kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); if (!smu_table->combo_pptable) { ret = -ENOMEM; - goto err4_out; + goto err5_out; } return 0; +err5_out: + kfree(smu_table->user_overdrive_table); err4_out: kfree(smu_table->boot_overdrive_table); err3_out: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 2b45adecbed2e5..5899d01fa73d36 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -68,6 +68,18 @@ #define DEBUGSMC_MSG_Mode1Reset 2 #define LINK_SPEED_MAX 3 +#define PP_OD_FEATURE_GFXCLK_FMIN 0 +#define PP_OD_FEATURE_GFXCLK_FMAX 1 +#define PP_OD_FEATURE_UCLK_FMIN 2 +#define PP_OD_FEATURE_UCLK_FMAX 3 +#define PP_OD_FEATURE_GFX_VF_CURVE 4 +#define PP_OD_FEATURE_FAN_CURVE_TEMP 5 +#define PP_OD_FEATURE_FAN_CURVE_PWM 6 +#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7 +#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 +#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 +#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 + static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -212,6 +224,7 @@ static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), + TAB_MAP(OVERDRIVE), }; static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -1040,16 +1053,97 @@ static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, value); } +static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu, + int od_feature_bit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + + return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); +} + +static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, + int od_feature_bit, + int32_t *min, + int32_t *max) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + int32_t od_min_setting, od_max_setting; + + switch (od_feature_bit) { + case PP_OD_FEATURE_GFXCLK_FMIN: + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; + break; + case PP_OD_FEATURE_GFXCLK_FMAX: + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; + break; + case PP_OD_FEATURE_UCLK_FMIN: + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + break; + case PP_OD_FEATURE_UCLK_FMAX: + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + break; + case PP_OD_FEATURE_GFX_VF_CURVE: + od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0]; + od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_TEMP: + od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_PWM: + od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0]; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT: + od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET: + od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold; + break; + case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE: + od_min_setting = overdrive_lowerlimits->FanTargetTemperature; + od_max_setting = overdrive_upperlimits->FanTargetTemperature; + break; + case PP_OD_FEATURE_FAN_MINIMUM_PWM: + od_min_setting = overdrive_lowerlimits->FanMinimumPwm; + od_max_setting = overdrive_upperlimits->FanMinimumPwm; + break; + default: + od_min_setting = od_max_setting = INT_MAX; + break; + } + + if (min) + *min = od_min_setting; + if (max) + *max = od_max_setting; +} + static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_14_0_dpm_table *single_dpm_table; struct smu_14_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; + int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1170,6 +1264,183 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, "*" : ""); break; + case SMU_OD_SCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFXCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + break; + + case SMU_OD_MCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_UCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", + od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); + break; + + case SMU_OD_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dmV\n", + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); + break; + + case SMU_OD_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n"); + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) + size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n", + i, + (int)od_table->OverDriveTable.FanLinearTempPoints[i], + (int)od_table->OverDriveTable.FanLinearPwmPoints[i]); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n", + min_value, max_value); + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n", + min_value, max_value); + + break; + + case SMU_OD_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticLimitRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticTargetRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanTargetTemperature); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanMinimumPwm); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_RANGE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMIN, + &min_value, + NULL); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &min_value, + NULL); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", + min_value, max_value); + } + break; + default: break; } @@ -1411,7 +1682,27 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - // TODO + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + CustomSkuTable_t *skutable = &pptable->CustomSkuTable; + uint32_t power_limit; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + + if (smu_v14_0_get_current_power_limit(smu, &power_limit)) + power_limit = smu->adev->pm.ac_power ? + skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : + skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; + + if (current_power_limit) + *current_power_limit = power_limit; + if (default_power_limit) + *default_power_limit = power_limit; + + if (max_power_limit) + *max_power_limit = msg_limit; + + if (min_power_limit) + *min_power_limit = 0; return 0; } @@ -1570,10 +1861,14 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (!ret) + smu->workload_mask = 1 << workload_type; + + return ret; } static int smu_v14_0_2_baco_enter(struct smu_context *smu) @@ -1917,6 +2212,594 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static void smu_v14_0_2_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); + if (ret) + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + + return ret; +} + +static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE | + OD_OPS_SUPPORT_FAN_CURVE_SET | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; +} + +static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); + if (ret) + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); + + return ret; +} + +static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) +{ + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + OverDriveTableExternal_t *boot_od_table = + (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; + OverDriveTableExternal_t *user_od_table = + (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; + OverDriveTableExternal_t user_od_table_bak; + int ret; + int i; + + ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table); + if (ret) + return ret; + + smu_v14_0_2_dump_od_table(smu, boot_od_table); + + memcpy(od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, + user_od_table, + sizeof(OverDriveTableExternal_t)); + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + user_od_table->OverDriveTable.GfxclkFmin = + user_od_table_bak.OverDriveTable.GfxclkFmin; + user_od_table->OverDriveTable.GfxclkFmax = + user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.UclkFmin = + user_od_table_bak.OverDriveTable.UclkFmin; + user_od_table->OverDriveTable.UclkFmax = + user_od_table_bak.OverDriveTable.UclkFmax; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = + user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) { + user_od_table->OverDriveTable.FanLinearTempPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearTempPoints[i]; + user_od_table->OverDriveTable.FanLinearPwmPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i]; + } + user_od_table->OverDriveTable.AcousticLimitRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold; + user_od_table->OverDriveTable.AcousticTargetRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold; + user_od_table->OverDriveTable.FanTargetTemperature = + user_od_table_bak.OverDriveTable.FanTargetTemperature; + user_od_table->OverDriveTable.FanMinimumPwm = + user_od_table_bak.OverDriveTable.FanMinimumPwm; + } + + smu_v14_0_2_set_supported_od_feature_mask(smu); + + return 0; +} + +static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = table_context->overdrive_table; + OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; + int res; + + user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | + BIT(PP_OD_FEATURE_UCLK_BIT) | + BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | + BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); + user_od_table->OverDriveTable.FeatureCtrlMask = 0; + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); + + return res; +} + +static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *boot_overdrive_table = + (OverDriveTableExternal_t *)table_context->boot_overdrive_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + int i; + + switch (input) { + case PP_OD_EDIT_FAN_CURVE: + for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) { + od_table->OverDriveTable.FanLinearTempPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i]; + od_table->OverDriveTable.FanLinearPwmPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i]; + } + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_LIMIT: + od_table->OverDriveTable.AcousticLimitRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_TARGET: + od_table->OverDriveTable.AcousticTargetRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + od_table->OverDriveTable.FanTargetTemperature = + boot_overdrive_table->OverDriveTable.FanTargetTemperature; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_MINIMUM_PWM: + od_table->OverDriveTable.FanMinimumPwm = + boot_overdrive_table->OverDriveTable.FanMinimumPwm; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + default: + dev_info(adev->dev, "Invalid table index: %ld\n", input); + return -EINVAL; + } + + return 0; +} + +static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], + uint32_t size) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + uint32_t offset_of_voltageoffset; + int32_t minimum, maximum; + uint32_t feature_ctrlmask; + int i, ret = 0; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMIN, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + case 1: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { + dev_err(adev->dev, + "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.GfxclkFmin, + (uint32_t)od_table->OverDriveTable.GfxclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + case 1: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { + dev_err(adev->dev, + "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.UclkFmin, + (uint32_t)od_table->OverDriveTable.UclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + dev_warn(adev->dev, "Gfx offset setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 || + input[0] < 0) + return -EINVAL; + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &minimum, + &maximum); + if (input[1] < minimum || + input[1] > maximum) { + dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n", + input[1], minimum, maximum); + return -EINVAL; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &minimum, + &maximum); + if (input[2] < minimum || + input[2] > maximum) { + dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n", + input[2], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1]; + od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2]; + od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanTargetTemperature = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanMinimumPwm = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size == 1) { + ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); + if (ret) + return ret; + } else { + feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; + memcpy(od_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t)); + od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; + } + fallthrough; + case PP_OD_COMMIT_DPM_TABLE: + /* + * The member below instructs PMFW the settings focused in + * this single operation. + * `uint32_t FeatureCtrlMask;` + * It does not contain actual informations about user's custom + * settings. Thus we do not cache it. + */ + offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); + if (memcmp((u8 *)od_table + offset_of_voltageoffset, + table_context->user_overdrive_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { + smu_v14_0_2_dump_od_table(smu, od_table); + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + od_table->OverDriveTable.FeatureCtrlMask = 0; + memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, + (u8 *)od_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); + + if (!memcmp(table_context->user_overdrive_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t))) + smu->user_dpm_profile.user_od = false; + else + smu->user_dpm_profile.user_od = true; + } + break; + + default: + return -ENOSYS; + } + + return ret; +} + +static int smu_v14_0_2_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + int ret = 0; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + + if (limit <= msg_limit) { + if (smu->current_power_limit > msg_limit) { + od_table->OverDriveTable.Ppt = 0; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + } + return smu_v14_0_set_power_limit(smu, limit_type, limit); + } else if (smu->od_enabled) { + ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit); + if (ret) + return ret; + + od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + smu->current_power_limit = limit; + } else { + return -EINVAL; + } + + return 0; +} + static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, @@ -1955,13 +2838,16 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics, .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .set_default_od_settings = smu_v14_0_2_set_default_od_settings, + .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings, + .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table, .init_pptable_microcode = smu_v14_0_init_pptable_microcode, .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, .set_performance_level = smu_v14_0_set_performance_level, .gfx_off_control = smu_v14_0_gfx_off_control, .get_unique_id = smu_v14_0_2_get_unique_id, .get_power_limit = smu_v14_0_2_get_power_limit, - .set_power_limit = smu_v14_0_set_power_limit, + .set_power_limit = smu_v14_0_2_set_power_limit, .set_power_source = smu_v14_0_set_power_source, .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index d794c076bc2424..47da848fa3fc1d 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -11,6 +11,8 @@ ast-y := \ ast_main.o \ ast_mm.o \ ast_mode.o \ - ast_post.o + ast_post.o \ + ast_sil164.o \ + ast_vga.o obj-$(CONFIG_DRM_AST) := ast.o diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index e6c7f0d64e9959..00b364f9a71e54 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -4,87 +4,94 @@ #include #include + +#include +#include +#include #include +#include + #include "ast_drv.h" -bool ast_astdp_is_connected(struct ast_device *ast) +static bool ast_astdp_is_connected(struct ast_device *ast) { - if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING)) - return false; - if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) - return false; - if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS)) + if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD)) return false; return true; } -int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) +static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { - struct ast_device *ast = to_ast_device(dev); - u8 i = 0, j = 0; + struct ast_device *ast = data; + size_t rdlen = round_up(len, 4); + int ret = 0; + unsigned int i; + + if (block > 0) + return -EIO; /* extension headers not supported */ /* - * CRD1[b5]: DP MCU FW is executing - * CRDC[b0]: DP link success - * CRDF[b0]: DP HPD - * CRE5[b0]: Host reading EDID process is done + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. */ - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, - ASTDP_HOST_EDID_READ_DONE_MASK))) { - goto err_astdp_edid_not_ready; - } + mutex_lock(&ast->modeset_lock); + + /* Start reading EDID data */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE, 0x00); + + for (i = 0; i < rdlen; i += 4) { + unsigned int offset; + unsigned int j; + u8 ediddata[4]; + u8 vgacre4; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, - 0x00); + offset = (i + block * EDID_LENGTH) / 4; + if (offset >= 64) { + ret = -EIO; + goto out; + } + vgacre4 = offset; - for (i = 0; i < 32; i++) { /* * CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64 */ - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE4, - ASTDP_AND_CLEAR_MASK, (u8)i); - j = 0; + ast_set_index_reg(ast, AST_IO_VGACRI, 0xe4, vgacre4); /* * CRD7[b0]: valid flag for EDID * CRD6[b0]: mirror read pointer for EDID */ - while ((ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD7, - ASTDP_EDID_VALID_FLAG_MASK) != 0x01) || - (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD6, - ASTDP_EDID_READ_POINTER_MASK) != i)) { + for (j = 0; j < 200; ++j) { + u8 vgacrd7, vgacrd6; + /* * Delay are getting longer with each retry. - * 1. The Delays are often 2 loops when users request "Display Settings" + * + * 1. No delay on first try + * 2. The Delays are often 2 loops when users request "Display Settings" * of right-click of mouse. - * 2. The Delays are often longer a lot when system resume from S3/S4. + * 3. The Delays are often longer a lot when system resume from S3/S4. */ - mdelay(j+1); - - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, - ASTDP_MCU_FW_EXECUTING) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, - ASTDP_LINK_SUCCESS) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) { - goto err_astdp_jump_out_loop_of_edid; + if (j) + mdelay(j + 1); + + /* Wait for EDID offset to show up in mirror register */ + vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7); + if (vgacrd7 & AST_IO_VGACRD7_EDID_VALID_FLAG) { + vgacrd6 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd6); + if (vgacrd6 == offset) + break; } - - j++; - if (j > 200) - goto err_astdp_jump_out_loop_of_edid; + } + if (j == 200) { + ret = -EBUSY; + goto out; } - *(ediddata) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, - 0xD8, ASTDP_EDID_READ_DATA_MASK); - *(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD9, - ASTDP_EDID_READ_DATA_MASK); - *(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDA, - ASTDP_EDID_READ_DATA_MASK); - *(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDB, - ASTDP_EDID_READ_DATA_MASK); + ediddata[0] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd8); + ediddata[1] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd9); + ediddata[2] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xda); + ediddata[3] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdb); if (i == 31) { /* @@ -96,69 +103,53 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) * The Bytes-126 indicates the Number of extensions to * follow. 0 represents noextensions. */ - *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2); - *(ediddata + 2) = 0; + ediddata[3] = ediddata[3] + ediddata[2]; + ediddata[2] = 0; } - ediddata += 4; + memcpy(buf, ediddata, min((len - i), 4)); + buf += 4; } - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, - ASTDP_HOST_EDID_READ_DONE); +out: + /* Signal end of reading */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE, + AST_IO_VGACRE5_EDID_READ_DONE); - return 0; + mutex_unlock(&ast->modeset_lock); -err_astdp_jump_out_loop_of_edid: - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, - (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, - ASTDP_HOST_EDID_READ_DONE); - return (~(j+256) + 1); - -err_astdp_edid_not_ready: - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING))) - return (~0xD1 + 1); - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS))) - return (~0xDC + 1); - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) - return (~0xDF + 1); - if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK))) - return (~0xE5 + 1); - - return 0; + return ret; } /* * Launch Aspeed DP */ -void ast_dp_launch(struct drm_device *dev) +int ast_dp_launch(struct ast_device *ast) { - u32 i = 0; - u8 bDPExecute = 1; - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; + unsigned int i = 10; - // Wait one second then timeout. - while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) != - ASTDP_MCU_FW_EXECUTING) { - i++; - // wait 100 ms - msleep(100); + while (i) { + u8 vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); - if (i >= 10) { - // DP would not be ready. - bDPExecute = 0; + if (vgacrd1 & AST_IO_VGACRD1_MCU_FW_EXECUTING) break; - } + --i; + msleep(100); } - - if (!bDPExecute) + if (!i) { drm_err(dev, "Wait DPMCU executing timeout\n"); + return -ENODEV; + } - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, - (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, - ASTDP_HOST_EDID_READ_DONE); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, + (u8) ~AST_IO_VGACRE5_EDID_READ_DONE, + AST_IO_VGACRE5_EDID_READ_DONE); + + return 0; } -bool ast_dp_power_is_on(struct ast_device *ast) +static bool ast_dp_power_is_on(struct ast_device *ast) { u8 vgacre3; @@ -167,7 +158,7 @@ bool ast_dp_power_is_on(struct ast_device *ast) return !(vgacre3 & AST_DP_PHY_SLEEP); } -void ast_dp_power_on_off(struct drm_device *dev, bool on) +static void ast_dp_power_on_off(struct drm_device *dev, bool on) { struct ast_device *ast = to_ast_device(dev); // Read and Turn off DP PHY sleep @@ -179,11 +170,29 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on) // DP Power on/off ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3); + + msleep(50); } +static void ast_dp_link_training(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + int i; + + for (i = 0; i < 10; i++) { + u8 vgacrdc; + + if (i) + msleep(100); + vgacrdc = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdc); + if (vgacrdc & AST_IO_VGACRDC_LINK_SUCCESS) + return; + } + drm_err(dev, "Link training failed\n"); +} -void ast_dp_set_on_off(struct drm_device *dev, bool on) +static void ast_dp_set_on_off(struct drm_device *dev, bool on) { struct ast_device *ast = to_ast_device(dev); u8 video_on_off = on; @@ -192,21 +201,17 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) // Video On/Off ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on); - // If DP plug in and link successful then check video on / off status - if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) && - ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) { - video_on_off <<= 4; - while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, + video_on_off <<= 4; + while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) { - // wait 1 ms - mdelay(1); - if (++i > 200) - break; - } + // wait 1 ms + mdelay(1); + if (++i > 200) + break; } } -void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode) +static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode) { struct ast_device *ast = to_ast_device(crtc->dev); @@ -279,3 +284,188 @@ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mo ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx); } + +static void ast_wait_for_vretrace(struct ast_device *ast) +{ + unsigned long timeout = jiffies + HZ; + u8 vgair1; + + do { + vgair1 = ast_io_read8(ast, AST_IO_VGAIR1_R); + } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout)); +} + +/* + * Encoder + */ + +static const struct drm_encoder_funcs ast_astdp_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_crtc *crtc = crtc_state->crtc; + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; + + ast_dp_set_mode(crtc, vbios_mode_info); +} + +static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(dev); + struct ast_connector *ast_connector = &ast->output.astdp.connector; + + if (ast_connector->physical_status == connector_status_connected) { + ast_dp_power_on_off(dev, AST_DP_POWER_ON); + ast_dp_link_training(ast); + + ast_wait_for_vretrace(ast); + ast_dp_set_on_off(dev, 1); + } +} + +static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *dev = encoder->dev; + + ast_dp_set_on_off(dev, 0); + ast_dp_power_on_off(dev, AST_DP_POWER_OFF); +} + +static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = { + .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set, + .atomic_enable = ast_astdp_encoder_helper_atomic_enable, + .atomic_disable = ast_astdp_encoder_helper_atomic_disable, +}; + +/* + * Connector + */ + +static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + int count; + + if (ast_connector->physical_status == connector_status_connected) { + struct ast_device *ast = to_ast_device(connector->dev); + const struct drm_edid *drm_edid; + + drm_edid = drm_edid_read_custom(connector, ast_astdp_read_edid_block, ast); + drm_edid_connector_update(connector, drm_edid); + count = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); + } else { + drm_edid_connector_update(connector, NULL); + + /* + * There's no EDID data without a connected monitor. Set BMC- + * compatible modes in this case. The XGA default resolution + * should work well for all BMCs. + */ + count = drm_add_modes_noedid(connector, 4096, 4096); + if (count) + drm_set_preferred_mode(connector, 1024, 768); + } + + return count; +} + +static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + struct drm_device *dev = connector->dev; + struct ast_device *ast = to_ast_device(connector->dev); + enum drm_connector_status status = connector_status_disconnected; + bool power_is_on; + + mutex_lock(&ast->modeset_lock); + + power_is_on = ast_dp_power_is_on(ast); + if (!power_is_on) + ast_dp_power_on_off(dev, true); + + if (ast_astdp_is_connected(ast)) + status = connector_status_connected; + + if (!power_is_on && status == connector_status_disconnected) + ast_dp_power_on_off(dev, false); + + mutex_unlock(&ast->modeset_lock); + + if (status != ast_connector->physical_status) + ++connector->epoch_counter; + ast_connector->physical_status = status; + + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = { + .get_modes = ast_astdp_connector_helper_get_modes, + .detect_ctx = ast_astdp_connector_helper_detect_ctx, +}; + +static const struct drm_connector_funcs ast_astdp_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + + return 0; +} + +int ast_astdp_output_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.astdp.encoder; + struct ast_connector *ast_connector = &ast->output.astdp.connector; + struct drm_connector *connector = &ast_connector->base; + int ret; + + ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ret; + drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs); + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_astdp_connector_init(dev, connector); + if (ret) + return ret; + ast_connector->physical_status = connector->status; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 9a4c3a0963f996..e4c636f4508207 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -4,6 +4,11 @@ #include #include +#include +#include +#include +#include + #include "ast_drv.h" MODULE_FIRMWARE("ast_dp501_fw.bin"); @@ -170,7 +175,7 @@ static void clear_cmd(struct ast_device *ast) } #endif -void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) { ast_write_cmd(dev, 0x40); ast_write_data(dev, mode); @@ -272,7 +277,7 @@ static bool ast_launch_m68k(struct drm_device *dev) return true; } -bool ast_dp501_is_connected(struct ast_device *ast) +static bool ast_dp501_is_connected(struct ast_device *ast) { u32 boot_address, offset, data; @@ -313,32 +318,30 @@ bool ast_dp501_is_connected(struct ast_device *ast) return true; } -bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { - struct ast_device *ast = to_ast_device(dev); - u32 i, boot_address, offset, data; - u32 *pEDIDidx; + struct ast_device *ast = data; + size_t rdlen = round_up(len, 4); + u32 i, boot_address, offset, ediddata; - if (!ast_dp501_is_connected(ast)) - return false; + if (block > (512 / EDID_LENGTH)) + return -EIO; + + offset = AST_DP501_EDID_DATA + block * EDID_LENGTH; if (ast->config_mode == ast_use_p2a) { boot_address = get_fw_base(ast); - /* Read EDID */ - offset = AST_DP501_EDID_DATA; - for (i = 0; i < 128; i += 4) { - data = ast_mindwm(ast, boot_address + offset + i); - pEDIDidx = (u32 *)(ediddata + i); - *pEDIDidx = data; + for (i = 0; i < rdlen; i += 4) { + ediddata = ast_mindwm(ast, boot_address + offset + i); + memcpy(buf, &ediddata, min((len - i), 4)); + buf += 4; } } else { - /* Read EDID */ - offset = AST_DP501_EDID_DATA; - for (i = 0; i < 128; i += 4) { - data = readl(ast->dp501_fw_buf + offset + i); - pEDIDidx = (u32 *)(ediddata + i); - *pEDIDidx = data; + for (i = 0; i < rdlen; i += 4) { + ediddata = readl(ast->dp501_fw_buf + offset + i); + memcpy(buf, &ediddata, min((len - i), 4)); + buf += 4; } } @@ -470,3 +473,144 @@ void ast_init_3rdtx(struct drm_device *dev) } } } + +/* + * Encoder + */ + +static const struct drm_encoder_funcs ast_dp501_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *dev = encoder->dev; + + ast_set_dp501_video_output(dev, 1); +} + +static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *dev = encoder->dev; + + ast_set_dp501_video_output(dev, 0); +} + +static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = { + .atomic_enable = ast_dp501_encoder_helper_atomic_enable, + .atomic_disable = ast_dp501_encoder_helper_atomic_disable, +}; + +/* + * Connector + */ + +static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + int count; + + if (ast_connector->physical_status == connector_status_connected) { + struct ast_device *ast = to_ast_device(connector->dev); + const struct drm_edid *drm_edid; + + drm_edid = drm_edid_read_custom(connector, ast_dp512_read_edid_block, ast); + drm_edid_connector_update(connector, drm_edid); + count = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); + } else { + drm_edid_connector_update(connector, NULL); + + /* + * There's no EDID data without a connected monitor. Set BMC- + * compatible modes in this case. The XGA default resolution + * should work well for all BMCs. + */ + count = drm_add_modes_noedid(connector, 4096, 4096); + if (count) + drm_set_preferred_mode(connector, 1024, 768); + } + + return count; +} + +static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + struct ast_device *ast = to_ast_device(connector->dev); + enum drm_connector_status status = connector_status_disconnected; + + if (ast_dp501_is_connected(ast)) + status = connector_status_connected; + + if (status != ast_connector->physical_status) + ++connector->epoch_counter; + ast_connector->physical_status = status; + + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = { + .get_modes = ast_dp501_connector_helper_get_modes, + .detect_ctx = ast_dp501_connector_helper_detect_ctx, +}; + +static const struct drm_connector_funcs ast_dp501_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + + return 0; +} + +int ast_dp501_output_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.dp501.encoder; + struct ast_connector *ast_connector = &ast->output.dp501.connector; + struct drm_connector *connector = &ast_connector->base; + int ret; + + ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ret; + drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs); + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_dp501_connector_init(dev, connector); + if (ret) + return ret; + ast_connector->physical_status = connector->status; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 225817087b4d2f..3a908bb015fe33 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -287,9 +287,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - regs = pcim_iomap(pdev, 1, 0); - if (!regs) - return -EIO; + regs = pcim_iomap_region(pdev, 1, "ast"); + if (IS_ERR(regs)) + return PTR_ERR(regs); if (pdev->revision >= 0x40) { /* @@ -311,9 +311,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (len < AST_IO_MM_LENGTH) return -EIO; - ioregs = pcim_iomap(pdev, 2, 0); - if (!ioregs) - return -EIO; + ioregs = pcim_iomap_region(pdev, 2, "ast"); + if (IS_ERR(ioregs)) + return PTR_ERR(ioregs); } else { /* * Anything else is best effort. diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 47bab5596c16ed..91fe07cf7b07e3 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -147,18 +147,19 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane) } /* - * BMC + * Connector */ -struct ast_bmc_connector { +struct ast_connector { struct drm_connector base; - struct drm_connector *physical_connector; + + enum drm_connector_status physical_status; }; -static inline struct ast_bmc_connector * -to_ast_bmc_connector(struct drm_connector *connector) +static inline struct ast_connector * +to_ast_connector(struct drm_connector *connector) { - return container_of(connector, struct ast_bmc_connector, base); + return container_of(connector, struct ast_connector, base); } /* @@ -192,24 +193,20 @@ struct ast_device { struct { struct { struct drm_encoder encoder; - struct drm_connector connector; + struct ast_connector connector; } vga; struct { struct drm_encoder encoder; - struct drm_connector connector; + struct ast_connector connector; } sil164; struct { struct drm_encoder encoder; - struct drm_connector connector; + struct ast_connector connector; } dp501; struct { struct drm_encoder encoder; - struct drm_connector connector; + struct ast_connector connector; } astdp; - struct { - struct drm_encoder encoder; - struct ast_bmc_connector bmc_connector; - } bmc; } output; bool support_wide_screen; @@ -460,21 +457,17 @@ void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); void ast_patch_ahb_2500(void __iomem *regs); + +int ast_vga_output_init(struct ast_device *ast); +int ast_sil164_output_init(struct ast_device *ast); + /* ast dp501 */ -void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); -bool ast_dp501_is_connected(struct ast_device *ast); -bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); -u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); +int ast_dp501_output_init(struct ast_device *ast); /* aspeed DP */ -bool ast_astdp_is_connected(struct ast_device *ast); -int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); -void ast_dp_launch(struct drm_device *dev); -bool ast_dp_power_is_on(struct ast_device *ast); -void ast_dp_power_on_off(struct drm_device *dev, bool no); -void ast_dp_set_on_off(struct drm_device *dev, bool no); -void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode); +int ast_dp_launch(struct ast_device *ast); +int ast_astdp_output_init(struct ast_device *ast); #endif diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 0637abb70361c9..d836f2a4f9f3e0 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -115,8 +115,10 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) } else if (IS_AST_GEN7(ast)) { if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) == ASTDP_DPMCU_TX) { - ast->tx_chip_types = AST_TX_ASTDP_BIT; - ast_dp_launch(&ast->base); + int ret = ast_dp_launch(ast); + + if (!ret) + ast->tx_chip_types = AST_TX_ASTDP_BIT; } } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 049ee1477c3375..ed496fb32bf349 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -34,10 +34,8 @@ #include #include -#include #include #include -#include #include #include #include @@ -47,7 +45,6 @@ #include #include -#include "ast_ddc.h" #include "ast_drv.h" #include "ast_tables.h" @@ -1310,571 +1307,6 @@ static int ast_crtc_init(struct drm_device *dev) return 0; } -/* - * VGA Encoder - */ - -static const struct drm_encoder_funcs ast_vga_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -/* - * VGA Connector - */ - -static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { - .get_modes = drm_connector_helper_get_modes, - .detect_ctx = drm_connector_helper_detect_from_ddc, -}; - -static const struct drm_connector_funcs ast_vga_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - struct ast_device *ast = to_ast_device(dev); - struct i2c_adapter *ddc; - int ret; - - ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); - return ret; - } - - ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, - DRM_MODE_CONNECTOR_VGA, ddc); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - -static int ast_vga_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.vga.encoder; - struct drm_connector *connector = &ast->output.vga.connector; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_vga_connector_init(dev, connector); - if (ret) - return ret; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} - -/* - * SIL164 Encoder - */ - -static const struct drm_encoder_funcs ast_sil164_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -/* - * SIL164 Connector - */ - -static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = { - .get_modes = drm_connector_helper_get_modes, - .detect_ctx = drm_connector_helper_detect_from_ddc, -}; - -static const struct drm_connector_funcs ast_sil164_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - struct ast_device *ast = to_ast_device(dev); - struct i2c_adapter *ddc; - int ret; - - ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); - return ret; - } - - ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, - DRM_MODE_CONNECTOR_DVII, ddc); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - -static int ast_sil164_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.sil164.encoder; - struct drm_connector *connector = &ast->output.sil164.connector; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_sil164_connector_init(dev, connector); - if (ret) - return ret; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} - -/* - * DP501 Encoder - */ - -static const struct drm_encoder_funcs ast_dp501_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_device *dev = encoder->dev; - - ast_set_dp501_video_output(dev, 1); -} - -static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_device *dev = encoder->dev; - - ast_set_dp501_video_output(dev, 0); -} - -static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = { - .atomic_enable = ast_dp501_encoder_helper_atomic_enable, - .atomic_disable = ast_dp501_encoder_helper_atomic_disable, -}; - -/* - * DP501 Connector - */ - -static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector) -{ - void *edid; - bool succ; - int count; - - edid = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (!edid) - goto err_drm_connector_update_edid_property; - - succ = ast_dp501_read_edid(connector->dev, edid); - if (!succ) - goto err_kfree; - - drm_connector_update_edid_property(connector, edid); - count = drm_add_edid_modes(connector, edid); - kfree(edid); - - return count; - -err_kfree: - kfree(edid); -err_drm_connector_update_edid_property: - drm_connector_update_edid_property(connector, NULL); - return 0; -} - -static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force) -{ - struct ast_device *ast = to_ast_device(connector->dev); - - if (ast_dp501_is_connected(ast)) - return connector_status_connected; - return connector_status_disconnected; -} - -static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = { - .get_modes = ast_dp501_connector_helper_get_modes, - .detect_ctx = ast_dp501_connector_helper_detect_ctx, -}; - -static const struct drm_connector_funcs ast_dp501_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - -static int ast_dp501_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.dp501.encoder; - struct drm_connector *connector = &ast->output.dp501.connector; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs); - - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_dp501_connector_init(dev, connector); - if (ret) - return ret; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} - -/* - * ASPEED Display-Port Encoder - */ - -static const struct drm_encoder_funcs ast_astdp_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct drm_crtc *crtc = crtc_state->crtc; - struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); - struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; - - ast_dp_set_mode(crtc, vbios_mode_info); -} - -static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_device *dev = encoder->dev; - struct ast_device *ast = to_ast_device(dev); - - ast_dp_power_on_off(dev, AST_DP_POWER_ON); - ast_wait_for_vretrace(ast); - ast_dp_set_on_off(dev, 1); -} - -static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_device *dev = encoder->dev; - - ast_dp_set_on_off(dev, 0); - ast_dp_power_on_off(dev, AST_DP_POWER_OFF); -} - -static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = { - .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set, - .atomic_enable = ast_astdp_encoder_helper_atomic_enable, - .atomic_disable = ast_astdp_encoder_helper_atomic_disable, -}; - -/* - * ASPEED Display-Port Connector - */ - -static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) -{ - void *edid; - struct drm_device *dev = connector->dev; - struct ast_device *ast = to_ast_device(dev); - - int succ; - int count; - - edid = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (!edid) - goto err_drm_connector_update_edid_property; - - /* - * Protect access to I/O registers from concurrent modesetting - * by acquiring the I/O-register lock. - */ - mutex_lock(&ast->modeset_lock); - - succ = ast_astdp_read_edid(connector->dev, edid); - if (succ < 0) - goto err_mutex_unlock; - - mutex_unlock(&ast->modeset_lock); - - drm_connector_update_edid_property(connector, edid); - count = drm_add_edid_modes(connector, edid); - kfree(edid); - - return count; - -err_mutex_unlock: - mutex_unlock(&ast->modeset_lock); - kfree(edid); -err_drm_connector_update_edid_property: - drm_connector_update_edid_property(connector, NULL); - return 0; -} - -static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force) -{ - struct drm_device *dev = connector->dev; - struct ast_device *ast = to_ast_device(connector->dev); - enum drm_connector_status status = connector_status_disconnected; - struct drm_connector_state *connector_state = connector->state; - bool is_active = false; - - mutex_lock(&ast->modeset_lock); - - if (connector_state && connector_state->crtc) { - struct drm_crtc_state *crtc_state = connector_state->crtc->state; - - if (crtc_state && crtc_state->active) - is_active = true; - } - - if (!is_active && !ast_dp_power_is_on(ast)) { - ast_dp_power_on_off(dev, true); - msleep(50); - } - - if (ast_astdp_is_connected(ast)) - status = connector_status_connected; - - if (!is_active && status == connector_status_disconnected) - ast_dp_power_on_off(dev, false); - - mutex_unlock(&ast->modeset_lock); - - return status; -} - -static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = { - .get_modes = ast_astdp_connector_helper_get_modes, - .detect_ctx = ast_astdp_connector_helper_detect_ctx, -}; - -static const struct drm_connector_funcs ast_astdp_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - -static int ast_astdp_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.astdp.encoder; - struct drm_connector *connector = &ast->output.astdp.connector; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs); - - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_astdp_connector_init(dev, connector); - if (ret) - return ret; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} - -/* - * BMC virtual Connector - */ - -static const struct drm_encoder_funcs ast_bmc_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force) -{ - struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector); - struct drm_connector *physical_connector = bmc_connector->physical_connector; - - /* - * Most user-space compositors cannot handle more than one connected - * connector per CRTC. Hence, we only mark the BMC as connected if the - * physical connector is disconnected. If the physical connector's status - * is connected or unknown, the BMC remains disconnected. This has no - * effect on the output of the BMC. - * - * FIXME: Remove this logic once user-space compositors can handle more - * than one connector per CRTC. The BMC should always be connected. - */ - - if (physical_connector && physical_connector->status == connector_status_disconnected) - return connector_status_connected; - - return connector_status_disconnected; -} - -static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector) -{ - return drm_add_modes_noedid(connector, 4096, 4096); -} - -static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = { - .get_modes = ast_bmc_connector_helper_get_modes, - .detect_ctx = ast_bmc_connector_helper_detect_ctx, -}; - -static const struct drm_connector_funcs ast_bmc_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ast_bmc_connector_init(struct drm_device *dev, - struct ast_bmc_connector *bmc_connector, - struct drm_connector *physical_connector) -{ - struct drm_connector *connector = &bmc_connector->base; - int ret; - - ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs); - - bmc_connector->physical_connector = physical_connector; - - return 0; -} - -static int ast_bmc_output_init(struct ast_device *ast, - struct drm_connector *physical_connector) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.bmc.encoder; - struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector; - struct drm_connector *connector = &bmc_connector->base; - int ret; - - ret = drm_encoder_init(dev, encoder, - &ast_bmc_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL, "ast_bmc"); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector); - if (ret) - return ret; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} - /* * Mode config */ @@ -1926,7 +1358,6 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = { int ast_mode_config_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; - struct drm_connector *physical_connector = NULL; int ret; ret = drmm_mutex_init(dev, &ast->modeset_lock); @@ -1971,29 +1402,22 @@ int ast_mode_config_init(struct ast_device *ast) ret = ast_vga_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.vga.connector; } if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.sil164.connector; } if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.dp501.connector; } if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ret = ast_astdp_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.astdp.connector; } - ret = ast_bmc_output_init(ast, physical_connector); - if (ret) - return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 22f548805dfb05..65755798ab94a5 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -351,7 +351,7 @@ void ast_post_gpu(struct drm_device *dev) if (IS_AST_GEN7(ast)) { if (ast->tx_chip_types & AST_TX_ASTDP_BIT) - ast_dp_launch(dev); + ast_dp_launch(ast); } else if (ast->config_mode == ast_use_p2a) { if (IS_AST_GEN6(ast)) ast_post_chip_2500(dev); diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 75671d345057e4..040961cc1a198d 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -37,6 +37,12 @@ #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) +#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5) +#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0) +#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0) +#define AST_IO_VGACRDF_HPD BIT(0) +#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0) + #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) @@ -66,18 +72,6 @@ #define AST_DP_PHY_SLEEP BIT(4) #define AST_DP_VIDEO_ENABLE BIT(0) -/* - * CRD1[b5]: DP MCU FW is executing - * CRDC[b0]: DP link success - * CRDF[b0]: DP HPD - * CRE5[b0]: Host reading EDID process is done - */ -#define ASTDP_MCU_FW_EXECUTING BIT(5) -#define ASTDP_LINK_SUCCESS BIT(0) -#define ASTDP_HPD BIT(0) -#define ASTDP_HOST_EDID_READ_DONE BIT(0) -#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0) - /* * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE * Precondition: A. ~AST_DP_PHY_SLEEP && @@ -86,10 +80,6 @@ */ #define ASTDP_MIRROR_VIDEO_ENABLE BIT(4) -#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0) -#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0) -#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0) - /* * ASTDP setmode registers: * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c new file mode 100644 index 00000000000000..496c7120e51553 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT + +#include +#include +#include +#include +#include + +#include "ast_ddc.h" +#include "ast_drv.h" + +/* + * Encoder + */ + +static const struct drm_encoder_funcs ast_sil164_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +/* + * Connector + */ + +static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + int count; + + if (ast_connector->physical_status == connector_status_connected) { + count = drm_connector_helper_get_modes(connector); + } else { + /* + * There's no EDID data without a connected monitor. Set BMC- + * compatible modes in this case. The XGA default resolution + * should work well for all BMCs. + */ + count = drm_add_modes_noedid(connector, 4096, 4096); + if (count) + drm_set_preferred_mode(connector, 1024, 768); + } + + return count; +} + +static int ast_sil164_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + enum drm_connector_status status; + + status = drm_connector_helper_detect_from_ddc(connector, ctx, force); + + if (status != ast_connector->physical_status) + ++connector->epoch_counter; + ast_connector->physical_status = status; + + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = { + .get_modes = ast_sil164_connector_helper_get_modes, + .detect_ctx = ast_sil164_connector_helper_detect_ctx, +}; + +static const struct drm_connector_funcs ast_sil164_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector) +{ + struct ast_device *ast = to_ast_device(dev); + struct i2c_adapter *ddc; + int ret; + + ddc = ast_ddc_create(ast); + if (IS_ERR(ddc)) { + ret = PTR_ERR(ddc); + drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + return ret; + } + + ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII, ddc); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + + return 0; +} + +int ast_sil164_output_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.sil164.encoder; + struct ast_connector *ast_connector = &ast->output.sil164.connector; + struct drm_connector *connector = &ast_connector->base; + int ret; + + ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_sil164_connector_init(dev, connector); + if (ret) + return ret; + ast_connector->physical_status = connector->status; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c new file mode 100644 index 00000000000000..3e815da43fbd63 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT + +#include +#include +#include +#include +#include + +#include "ast_ddc.h" +#include "ast_drv.h" + +/* + * Encoder + */ + +static const struct drm_encoder_funcs ast_vga_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +/* + * Connector + */ + +static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + int count; + + if (ast_connector->physical_status == connector_status_connected) { + count = drm_connector_helper_get_modes(connector); + } else { + /* + * There's no EDID data without a connected monitor. Set BMC- + * compatible modes in this case. The XGA default resolution + * should work well for all BMCs. + */ + count = drm_add_modes_noedid(connector, 4096, 4096); + if (count) + drm_set_preferred_mode(connector, 1024, 768); + } + + return count; +} + +static int ast_vga_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_connector *ast_connector = to_ast_connector(connector); + enum drm_connector_status status; + + status = drm_connector_helper_detect_from_ddc(connector, ctx, force); + + if (status != ast_connector->physical_status) + ++connector->epoch_counter; + ast_connector->physical_status = status; + + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { + .get_modes = ast_vga_connector_helper_get_modes, + .detect_ctx = ast_vga_connector_helper_detect_ctx, +}; + +static const struct drm_connector_funcs ast_vga_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector) +{ + struct ast_device *ast = to_ast_device(dev); + struct i2c_adapter *ddc; + int ret; + + ddc = ast_ddc_create(ast); + if (IS_ERR(ddc)) { + ret = PTR_ERR(ddc); + drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + return ret; + } + + ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, ddc); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + + return 0; +} + +int ast_vga_output_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.vga.encoder; + struct ast_connector *ast_connector = &ast->output.vga.connector; + struct drm_connector *connector = &ast_connector->base; + int ret; + + ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_vga_connector_init(dev, connector); + if (ret) + return ret; + ast_connector->physical_status = connector->status; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index ddf1e4424ffd5f..bfa88409a7ff0f 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -36,11 +36,6 @@ static const bool verify_fast_training; -struct bridge_init { - struct i2c_client *client; - struct device_node *node; -}; - static void analogix_dp_init_dp(struct analogix_dp_device *dp) { analogix_dp_reset(dp); diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 88e4aa5830f3c7..a2e9bb485c366e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1647,25 +1647,15 @@ static int anx7625_get_swing_setting(struct device *dev, { int num_regs; - if (of_get_property(dev->of_node, - "analogix,lane0-swing", &num_regs)) { - if (num_regs > DP_TX_SWING_REG_CNT) - num_regs = DP_TX_SWING_REG_CNT; - + num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane0-swing", + pdata->lane0_reg_data, 1, DP_TX_SWING_REG_CNT); + if (num_regs > 0) pdata->dp_lane0_swing_reg_cnt = num_regs; - of_property_read_u8_array(dev->of_node, "analogix,lane0-swing", - pdata->lane0_reg_data, num_regs); - } - - if (of_get_property(dev->of_node, - "analogix,lane1-swing", &num_regs)) { - if (num_regs > DP_TX_SWING_REG_CNT) - num_regs = DP_TX_SWING_REG_CNT; + num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane1-swing", + pdata->lane1_reg_data, 1, DP_TX_SWING_REG_CNT); + if (num_regs > 0) pdata->dp_lane1_swing_reg_cnt = num_regs; - of_property_read_u8_array(dev->of_node, "analogix,lane1-swing", - pdata->lane1_reg_data, num_regs); - } return 0; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index dee640ab1d3ad5..41f72d458487fb 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -47,7 +47,7 @@ #include #include -#include +#include #include "cdns-mhdp8546-core.h" #include "cdns-mhdp8546-hdcp.h" diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c index 5e3b8edcf79487..31832ba4017f1a 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 1e1c06fdf20645..87b8545fccc0af 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -460,6 +460,8 @@ struct it6505 { bool enable_drv_hold; const struct drm_edid *cached_edid; + + int irq; }; struct it6505_step_train_para { @@ -2624,6 +2626,8 @@ static int it6505_poweron(struct it6505 *it6505) it6505_init(it6505); it6505_lane_off(it6505); + enable_irq(it6505->irq); + return 0; } @@ -2640,6 +2644,8 @@ static int it6505_poweroff(struct it6505 *it6505) return 0; } + disable_irq_nosync(it6505->irq); + if (pdata->gpiod_reset) gpiod_set_value_cansleep(pdata->gpiod_reset, 0); @@ -3389,7 +3395,7 @@ static int it6505_i2c_probe(struct i2c_client *client) struct it6505 *it6505; struct device *dev = &client->dev; struct extcon_dev *extcon; - int err, intp_irq; + int err; it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL); if (!it6505) @@ -3430,17 +3436,18 @@ static int it6505_i2c_probe(struct i2c_client *client) it6505_parse_dt(it6505); - intp_irq = client->irq; + it6505->irq = client->irq; - if (!intp_irq) { + if (!it6505->irq) { dev_err(dev, "Failed to get INTP IRQ"); err = -ENODEV; return err; } - err = devm_request_threaded_irq(&client->dev, intp_irq, NULL, + err = devm_request_threaded_irq(&client->dev, it6505->irq, NULL, it6505_int_threaded_handler, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, + IRQF_TRIGGER_LOW | IRQF_ONESHOT | + IRQF_NO_AUTOEN, "it6505-intp", it6505); if (err) { dev_err(dev, "Failed to request INTP threaded IRQ: %d", err); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 1a9defa15663cf..e265ab3c8c9293 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -422,22 +422,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static enum drm_mode_status -lt8912_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - if (mode->clock > 150000) - return MODE_CLOCK_HIGH; - - if (mode->hdisplay > 1920) - return MODE_BAD_HVALUE; - - if (mode->vdisplay > 1080) - return MODE_BAD_VVALUE; - - return MODE_OK; -} - static int lt8912_connector_get_modes(struct drm_connector *connector) { const struct drm_edid *drm_edid; @@ -463,7 +447,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = { .get_modes = lt8912_connector_get_modes, - .mode_valid = lt8912_connector_mode_valid, }; static void lt8912_bridge_mode_set(struct drm_bridge *bridge, @@ -605,6 +588,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) drm_bridge_hpd_disable(lt->hdmi_port); } +static enum drm_mode_status +lt8912_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 150000) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > 1920) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > 1080) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + static enum drm_connector_status lt8912_bridge_detect(struct drm_bridge *bridge) { @@ -635,6 +635,7 @@ static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge, static const struct drm_bridge_funcs lt8912_bridge_funcs = { .attach = lt8912_bridge_attach, .detach = lt8912_bridge_detach, + .mode_valid = lt8912_bridge_mode_valid, .mode_set = lt8912_bridge_mode_set, .enable = lt8912_bridge_enable, .detect = lt8912_bridge_detect, diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 4e802b54a1cb09..4d1d40e1f1b4d1 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,7 @@ struct lt9611uxc { struct device *dev; struct drm_bridge bridge; - struct drm_connector connector; + struct drm_bridge *next_bridge; struct regmap *regmap; /* Protects all accesses to registers by stopping the on-chip MCU */ @@ -120,11 +121,6 @@ static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge) return container_of(bridge, struct lt9611uxc, bridge); } -static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector) -{ - return container_of(connector, struct lt9611uxc, connector); -} - static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc) { mutex_lock(<9611uxc->ocm_lock); @@ -171,20 +167,14 @@ static void lt9611uxc_hpd_work(struct work_struct *work) struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); bool connected; - if (lt9611uxc->connector.dev) { - if (lt9611uxc->connector.dev->mode_config.funcs) - drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); - } else { - - mutex_lock(<9611uxc->ocm_lock); - connected = lt9611uxc->hdmi_connected; - mutex_unlock(<9611uxc->ocm_lock); + mutex_lock(<9611uxc->ocm_lock); + connected = lt9611uxc->hdmi_connected; + mutex_unlock(<9611uxc->ocm_lock); - drm_bridge_hpd_notify(<9611uxc->bridge, - connected ? - connector_status_connected : - connector_status_disconnected); - } + drm_bridge_hpd_notify(<9611uxc->bridge, + connected ? + connector_status_connected : + connector_status_disconnected); } static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) @@ -289,82 +279,13 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, return dsi; } -static int lt9611uxc_connector_get_modes(struct drm_connector *connector) -{ - struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); - const struct drm_edid *drm_edid; - int count; - - drm_edid = drm_bridge_edid_read(<9611uxc->bridge, connector); - drm_edid_connector_update(connector, drm_edid); - count = drm_edid_connector_add_modes(connector); - drm_edid_free(drm_edid); - - return count; -} - -static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector, - bool force) -{ - struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); - - return lt9611uxc->bridge.funcs->detect(<9611uxc->bridge); -} - -static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode); - - return lt9611uxc_mode ? MODE_OK : MODE_BAD; -} - -static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = { - .get_modes = lt9611uxc_connector_get_modes, - .mode_valid = lt9611uxc_connector_mode_valid, -}; - -static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = lt9611uxc_connector_detect, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc) -{ - int ret; - - lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD; - - drm_connector_helper_add(<9611uxc->connector, - <9611uxc_bridge_connector_helper_funcs); - ret = drm_connector_init(bridge->dev, <9611uxc->connector, - <9611uxc_bridge_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - return ret; - } - - return drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); -} - static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); - int ret; - - if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { - ret = lt9611uxc_connector_init(bridge, lt9611uxc); - if (ret < 0) - return ret; - } - return 0; + return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge, + bridge, flags); } static enum drm_mode_status @@ -525,7 +446,7 @@ static int lt9611uxc_parse_dt(struct device *dev, lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); - return 0; + return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, <9611uxc->next_bridge); } static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 8d54091ec66e4e..5f05647a3beabc 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -289,13 +289,13 @@ static int nwl_dsi_config_dpi(struct nwl_dsi *dsi) nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT); nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format); - /* - * Adjusting input polarity based on the video mode results in - * a black screen so always pick active low: - */ nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY, + dsi->mode.flags & DRM_MODE_FLAG_PVSYNC ? + NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH : NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW); nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY, + dsi->mode.flags & DRM_MODE_FLAG_PHSYNC ? + NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH : NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW); burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h index a247a8a11c7cbf..61e7d65cb1eb25 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.h +++ b/drivers/gpu/drm/bridge/nwl-dsi.h @@ -30,11 +30,11 @@ #define NWL_DSI_PIXEL_FORMAT 0x20c #define NWL_DSI_VSYNC_POLARITY 0x210 #define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0 -#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1) +#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(0) #define NWL_DSI_HSYNC_POLARITY 0x214 #define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0 -#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1) +#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(0) #define NWL_DSI_VIDEO_MODE 0x218 #define NWL_DSI_HFP 0x21c diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index e7e53a9e42afbd..430f8adebf9c44 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -10,7 +10,7 @@ * Tomasz Figa */ -#include +#include #include #include diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 6bb755e9f0a539..26b8d137bce096 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -6,7 +6,7 @@ * Andrzej Hajda */ -#include +#include #include #include diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 67b8d17a722ad1..221e9a4edb40b6 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -388,15 +389,36 @@ static int dw_hdmi_close(struct snd_pcm_substream *substream) static int dw_hdmi_hw_free(struct snd_pcm_substream *substream) { - return snd_pcm_lib_free_vmalloc_buffer(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + + vfree(runtime->dma_area); + runtime->dma_area = NULL; + return 0; } static int dw_hdmi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { + struct snd_pcm_runtime *runtime = substream->runtime; + size_t size = params_buffer_bytes(params); + /* Allocate the PCM runtime buffer, which is exposed to userspace. */ - return snd_pcm_lib_alloc_vmalloc_buffer(substream, - params_buffer_bytes(params)); + if (runtime->dma_area) { + if (runtime->dma_bytes >= size) + return 0; /* already large enough */ + vfree(runtime->dma_area); + } + runtime->dma_area = vzalloc(size); + if (!runtime->dma_area) + return -ENOMEM; + runtime->dma_bytes = size; + return 1; +} + +static struct page *dw_hdmi_get_page(struct snd_pcm_substream *substream, + unsigned long offset) +{ + return vmalloc_to_page(substream->runtime->dma_area + offset); } static int dw_hdmi_prepare(struct snd_pcm_substream *substream) @@ -515,7 +537,7 @@ static const struct snd_pcm_ops snd_dw_hdmi_ops = { .prepare = dw_hdmi_prepare, .trigger = dw_hdmi_trigger, .pointer = dw_hdmi_pointer, - .page = snd_pcm_lib_get_vmalloc_page, + .page = dw_hdmi_get_page, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 9f2bc932c37107..0031f3c54882cf 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -138,9 +138,6 @@ struct dw_hdmi { struct platform_device *audio; struct platform_device *cec; struct device *dev; - struct clk *isfr_clk; - struct clk *iahb_clk; - struct clk *cec_clk; struct dw_hdmi_i2c *i2c; struct hdmi_data_info hdmi_data; @@ -3326,6 +3323,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct device_node *ddc_node; struct dw_hdmi_cec_data cec; struct dw_hdmi *hdmi; + struct clk *clk; struct resource *iores = NULL; int irq; int ret; @@ -3405,50 +3403,27 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->regm = plat_data->regm; } - hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr"); - if (IS_ERR(hdmi->isfr_clk)) { - ret = PTR_ERR(hdmi->isfr_clk); + clk = devm_clk_get_enabled(hdmi->dev, "isfr"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret); goto err_res; } - ret = clk_prepare_enable(hdmi->isfr_clk); - if (ret) { - dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret); - goto err_res; - } - - hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb"); - if (IS_ERR(hdmi->iahb_clk)) { - ret = PTR_ERR(hdmi->iahb_clk); + clk = devm_clk_get_enabled(hdmi->dev, "iahb"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(hdmi->dev, "Unable to get HDMI iahb clk: %d\n", ret); - goto err_isfr; - } - - ret = clk_prepare_enable(hdmi->iahb_clk); - if (ret) { - dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret); - goto err_isfr; + goto err_res; } - hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec"); - if (PTR_ERR(hdmi->cec_clk) == -ENOENT) { - hdmi->cec_clk = NULL; - } else if (IS_ERR(hdmi->cec_clk)) { - ret = PTR_ERR(hdmi->cec_clk); + clk = devm_clk_get_optional_enabled(hdmi->dev, "cec"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); if (ret != -EPROBE_DEFER) dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n", ret); - - hdmi->cec_clk = NULL; - goto err_iahb; - } else { - ret = clk_prepare_enable(hdmi->cec_clk); - if (ret) { - dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n", - ret); - goto err_iahb; - } + goto err_res; } /* Product and revision IDs */ @@ -3462,12 +3437,12 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n", hdmi->version, prod_id0, prod_id1); ret = -ENODEV; - goto err_iahb; + goto err_res; } ret = dw_hdmi_detect_phy(hdmi); if (ret < 0) - goto err_iahb; + goto err_res; dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n", hdmi->version >> 12, hdmi->version & 0xfff, @@ -3479,14 +3454,14 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto err_iahb; + goto err_res; } ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); if (ret) - goto err_iahb; + goto err_res; /* * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator @@ -3603,11 +3578,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, return hdmi; -err_iahb: - clk_disable_unprepare(hdmi->iahb_clk); - clk_disable_unprepare(hdmi->cec_clk); -err_isfr: - clk_disable_unprepare(hdmi->isfr_clk); err_res: i2c_put_adapter(hdmi->ddc); @@ -3627,10 +3597,6 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi) /* Disable all interrupts */ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); - clk_disable_unprepare(hdmi->iahb_clk); - clk_disable_unprepare(hdmi->isfr_clk); - clk_disable_unprepare(hdmi->cec_clk); - if (hdmi->i2c) i2c_del_adapter(&hdmi->i2c->adap); else diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index c4e9d96933dced..0fb02e4e7f4e5f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -722,7 +722,12 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi) { - dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN); + u32 val = CRC_RX_EN | ECC_RX_EN | BTA_EN | EOTP_TX_EN; + + if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) + val &= ~EOTP_TX_EN; + + dsi_write(dsi, DSI_PCKHDL_CFG, val); } static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi, diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index b8b7a227addfb3..290e2532fab191 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -241,6 +241,10 @@ /* Link Training */ #define DP0_SRCCTRL 0x06a0 +#define DP0_SRCCTRL_PRE1 GENMASK(29, 28) +#define DP0_SRCCTRL_SWG1 GENMASK(25, 24) +#define DP0_SRCCTRL_PRE0 GENMASK(21, 20) +#define DP0_SRCCTRL_SWG0 GENMASK(17, 16) #define DP0_SRCCTRL_SCRMBLDIS BIT(13) #define DP0_SRCCTRL_EN810B BIT(12) #define DP0_SRCCTRL_NOTP (0 << 8) @@ -278,6 +282,8 @@ #define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */ #define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */ +#define DP1_SRCCTRL_PRE GENMASK(21, 20) +#define DP1_SRCCTRL_SWG GENMASK(17, 16) /* PHY */ #define DP_PHY_CTRL 0x0800 @@ -369,6 +375,7 @@ struct tc_data { u32 rev; u8 assr; + u8 pre_emphasis[2]; struct gpio_desc *sd_gpio; struct gpio_desc *reset_gpio; @@ -1090,13 +1097,17 @@ static int tc_main_link_enable(struct tc_data *tc) return ret; } - ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc)); + ret = regmap_write(tc->regmap, DP0_SRCCTRL, + tc_srcctrl(tc) | + FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) | + FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1])); if (ret) return ret; /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ ret = regmap_write(tc->regmap, DP1_SRCCTRL, (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | - ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); + ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0) | + FIELD_PREP(DP1_SRCCTRL_PRE, tc->pre_emphasis[1])); if (ret) return ret; @@ -1188,8 +1199,10 @@ static int tc_main_link_enable(struct tc_data *tc) goto err_dpcd_write; /* Reset voltage-swing & pre-emphasis */ - tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | - DP_TRAIN_PRE_EMPH_LEVEL_0; + tmp[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | + FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[0]); + tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | + FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[1]); ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2); if (ret < 0) goto err_dpcd_write; @@ -1213,7 +1226,9 @@ static int tc_main_link_enable(struct tc_data *tc) ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_AUTOCORRECT | - DP0_SRCCTRL_TP1); + DP0_SRCCTRL_TP1 | + FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) | + FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1])); if (ret) return ret; @@ -1248,7 +1263,9 @@ static int tc_main_link_enable(struct tc_data *tc) ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_AUTOCORRECT | - DP0_SRCCTRL_TP2); + DP0_SRCCTRL_TP2 | + FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) | + FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1])); if (ret) return ret; @@ -1274,7 +1291,9 @@ static int tc_main_link_enable(struct tc_data *tc) /* Clear Training Pattern, set AutoCorrect Mode = 1 */ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | - DP0_SRCCTRL_AUTOCORRECT); + DP0_SRCCTRL_AUTOCORRECT | + FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) | + FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1])); if (ret) return ret; @@ -2363,6 +2382,18 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc) return -EINVAL; } mode |= BIT(endpoint.port); + + if (endpoint.port == 2) { + of_property_read_u8_array(node, "toshiba,pre-emphasis", + tc->pre_emphasis, + ARRAY_SIZE(tc->pre_emphasis)); + + if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 || + tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) { + dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n"); + return -EINVAL; + } + } } if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) { diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 3b7cc3be2ccdbe..0b4efaca6d6823 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 84698a0b27a8ee..582cf4f73a74c7 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index 4140303d6260ae..66e70ced796fa7 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -187,6 +187,7 @@ CONFIG_MTK_DEVAPC=y CONFIG_PWM_MTK_DISP=y CONFIG_MTK_CMDQ=y CONFIG_REGULATOR_DA9211=y +CONFIG_DRM_ANALOGIX_ANX7625=y # For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware. CONFIG_ARCH_TEGRA=y diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml index d6edf3635b231b..2a94f54ce4cfe0 100644 --- a/drivers/gpu/drm/ci/container.yml +++ b/drivers/gpu/drm/ci/container.yml @@ -28,6 +28,14 @@ debian/x86_64_test-vk: rules: - when: never +debian/arm64_test-vk: + rules: + - when: never + +debian/arm64_test-gl: + rules: + - when: never + fedora/x86_64_build: rules: - when: never diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index 80fb0f57ae4690..eca47d4f816ffd 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -1,13 +1,13 @@ variables: DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa - DRM_CI_COMMIT_SHA: &drm-ci-commit-sha e2b9c5a9e3e4f9b532067af8022eaef8d6fc6c00 + DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de - UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm + UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git TARGET_BRANCH: drm-next - IGT_VERSION: 0df7b9b97f9da0e364f5ee30fe331004b8c86b56 + IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5 - DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git + DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git DEQP_RUNNER_GIT_TAG: v0.15.0 FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs @@ -85,22 +85,24 @@ include: - project: *drm-ci-project-path ref: *drm-ci-commit-sha file: + - '/.gitlab-ci/container/gitlab-ci.yml' - '/.gitlab-ci/farm-rules.yml' + - '/.gitlab-ci/lava/lava-gitlab-ci.yml' - '/.gitlab-ci/test-source-dep.yml' - - '/.gitlab-ci/container/gitlab-ci.yml' - '/.gitlab-ci/test/gitlab-ci.yml' - - '/.gitlab-ci/lava/lava-gitlab-ci.yml' - - '/src/microsoft/ci/gitlab-ci-inc.yml' - - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml' + - '/src/amd/ci/gitlab-ci-inc.yml' + - '/src/freedreno/ci/gitlab-ci-inc.yml' - '/src/gallium/drivers/crocus/ci/gitlab-ci-inc.yml' - - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml' - '/src/gallium/drivers/llvmpipe/ci/gitlab-ci-inc.yml' - - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml' - '/src/gallium/drivers/nouveau/ci/gitlab-ci-inc.yml' + - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml' + - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml' + - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml' - '/src/gallium/frontends/lavapipe/ci/gitlab-ci-inc.yml' + - '/src/gallium/frontends/rusticl/ci/gitlab-ci.yml' - '/src/intel/ci/gitlab-ci-inc.yml' - - '/src/freedreno/ci/gitlab-ci-inc.yml' - - '/src/amd/ci/gitlab-ci-inc.yml' + - '/src/microsoft/ci/gitlab-ci-inc.yml' + - '/src/nouveau/ci/gitlab-ci-inc.yml' - '/src/virtio/ci/gitlab-ci-inc.yml' - drivers/gpu/drm/ci/image-tags.yml - drivers/gpu/drm/ci/container.yml @@ -121,8 +123,9 @@ stages: - mediatek - meson - msm + - panfrost + - powervr - rockchip - - virtio-gpu - software-driver # YAML anchors for rule conditions diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh index 79f41d7da772ab..f38836ec837c6c 100755 --- a/drivers/gpu/drm/ci/igt_runner.sh +++ b/drivers/gpu/drm/ci/igt_runner.sh @@ -20,16 +20,6 @@ cat /sys/kernel/debug/dri/*/state set -e case "$DRIVER_NAME" in - rockchip|meson) - export IGT_FORCE_DRIVER="panfrost" - ;; - mediatek) - if [ "$GPU_VERSION" = "mt8173" ]; then - export IGT_FORCE_DRIVER=${DRIVER_NAME} - elif [ "$GPU_VERSION" = "mt8183" ]; then - export IGT_FORCE_DRIVER="panfrost" - fi - ;; amdgpu|vkms) # Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib mv /install/modules/lib/modules/* /lib/modules/. || true @@ -80,6 +70,7 @@ igt-runner \ --igt-folder /igt/libexec/igt-gpu-tools \ --caselist $TESTLIST \ --output /results \ + -vvvv \ $IGT_SKIPS \ $IGT_FLAKES \ $IGT_FAILS \ diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml index 13eda37bdf05e0..2c340d063a9607 100644 --- a/drivers/gpu/drm/ci/image-tags.yml +++ b/drivers/gpu/drm/ci/image-tags.yml @@ -1,15 +1,15 @@ variables: - CONTAINER_TAG: "2024-05-09-mesa-uprev" + CONTAINER_TAG: "2024-08-07-mesa-uprev" DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base" DEBIAN_BASE_TAG: "${CONTAINER_TAG}" DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build" - DEBIAN_BUILD_TAG: "2024-06-10-vkms" + DEBIAN_BUILD_TAG: "${CONTAINER_TAG}" - KERNEL_ROOTFS_TAG: "2023-10-06-amd" + KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}" DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base" DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl" - DEBIAN_X86_64_TEST_GL_TAG: "${CONTAINER_TAG}" + DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}" ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh index 0707fa706a4856..6add15083c78ad 100755 --- a/drivers/gpu/drm/ci/lava-submit.sh +++ b/drivers/gpu/drm/ci/lava-submit.sh @@ -44,6 +44,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \ --first-stage-init artifacts/ci-common/init-stage1.sh \ --ci-project-dir "${CI_PROJECT_DIR}" \ --device-type "${DEVICE_TYPE}" \ + --farm "${FARM}" \ --dtb-filename "${DTB}" \ --jwt-file "${S3_JWT_FILE}" \ --kernel-image-name "${KERNEL_IMAGE_NAME}" \ diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index ee908b66aad2fa..09d8447840e94f 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -10,6 +10,7 @@ .lava-test: extends: - .test-rules + timeout: "1h30m" script: # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY - rm -rf install @@ -69,8 +70,9 @@ .baremetal-igt-arm64: extends: - .baremetal-test-arm64 - - .use-debian/arm64_test + - .use-debian/baremetal_arm64_test - .test-rules + timeout: "1h30m" variables: FDO_CI_CONCURRENT: 10 HWCI_TEST_SCRIPT: "/install/igt_runner.sh" @@ -79,7 +81,7 @@ BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS" FARM: google needs: - - debian/arm64_test + - debian/baremetal_arm64_test - job: testing:arm64 artifacts: false - igt:arm64 @@ -160,38 +162,61 @@ msm:sdm845: script: - ./install/bare-metal/cros-servo.sh -rockchip:rk3288: - extends: - - .lava-igt:arm32 +.rockchip-device: + variables: + DTB: ${DEVICE_TYPE} + BOOT_METHOD: depthcharge + +.rockchip-display: stage: rockchip variables: DRIVER_NAME: rockchip + +.rk3288: + extends: + - .lava-igt:arm32 + - .rockchip-device + variables: DEVICE_TYPE: rk3288-veyron-jaq - DTB: ${DEVICE_TYPE} - BOOT_METHOD: depthcharge - KERNEL_IMAGE_TYPE: "zimage" GPU_VERSION: rk3288 + KERNEL_IMAGE_TYPE: "zimage" RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq -rockchip:rk3399: +.rk3399: extends: - .lava-igt:arm64 - stage: rockchip + - .rockchip-device parallel: 2 variables: - DRIVER_NAME: rockchip DEVICE_TYPE: rk3399-gru-kevin - DTB: ${DEVICE_TYPE} - BOOT_METHOD: depthcharge - KERNEL_IMAGE_TYPE: "" GPU_VERSION: rk3399 + KERNEL_IMAGE_TYPE: "" RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin +rockchip:rk3288: + extends: + - .rk3288 + - .rockchip-display + +panfrost:rk3288: + extends: + - .rk3288 + - .panfrost-gpu + +rockchip:rk3399: + extends: + - .rk3399 + - .rockchip-display + +panfrost:rk3399: + extends: + - .rk3399 + - .panfrost-gpu + .i915: extends: - .lava-igt:x86_64 stage: i915 - timeout: "1h30m" variables: DRIVER_NAME: i915 DTB: "" @@ -280,65 +305,117 @@ amdgpu:stoney: GPU_VERSION: stoney RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt -.mediatek: +.mediatek-device: extends: - .lava-igt:arm64 stage: mediatek variables: - DRIVER_NAME: mediatek DTB: ${DEVICE_TYPE} BOOT_METHOD: depthcharge KERNEL_IMAGE_TYPE: "" -mediatek:mt8173: +.mediatek-display: + stage: mediatek + variables: + DRIVER_NAME: mediatek + +.powervr-gpu: + stage: powervr + variables: + DRIVER_NAME: powervr + +.panfrost-gpu: + stage: panfrost + variables: + DRIVER_NAME: panfrost + +.mt8173: extends: - - .mediatek + - .mediatek-device parallel: 4 variables: DEVICE_TYPE: mt8173-elm-hana GPU_VERSION: mt8173 RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana -mediatek:mt8183: +.mt8183: extends: - - .mediatek + - .mediatek-device parallel: 3 variables: DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16 GPU_VERSION: mt8183 RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16 +mediatek:mt8173: + extends: + - .mt8173 + - .mediatek-display + +powervr:mt8173: + extends: + - .mt8173 + - .powervr-gpu + rules: + # TODO: powervr driver was merged in linux kernel, but there's no mediatek support yet + # Remove the rule once mediatek support is added for powervr + - when: never + +mediatek:mt8183: + extends: + - .mt8183 + - .mediatek-display + +panfrost:mt8183: + extends: + - .mt8183 + - .panfrost-gpu + # drm-mtk doesn't even probe yet in mainline for mt8192 .mediatek:mt8192: extends: - - .mediatek + - .mediatek-device parallel: 3 variables: DEVICE_TYPE: mt8192-asurada-spherion-r0 GPU_VERSION: mt8192 RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0 -.meson: +.meson-device: extends: - .lava-igt:arm64 - stage: meson variables: - DRIVER_NAME: meson DTB: ${DEVICE_TYPE} BOOT_METHOD: u-boot KERNEL_IMAGE_TYPE: "image" -meson:g12b: +.meson-display: + stage: meson + variables: + DRIVER_NAME: meson + +.g12b: extends: - - .meson + - .meson-device parallel: 3 variables: DEVICE_TYPE: meson-g12b-a311d-khadas-vim3 GPU_VERSION: g12b RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3 +meson:g12b: + extends: + - .g12b + - .meson-display + +panfrost:g12b: + extends: + - .g12b + - .panfrost-gpu + virtio_gpu:none: stage: software-driver + timeout: "1h30m" variables: CROSVM_GALLIUM_DRIVER: llvmpipe DRIVER_NAME: virtio_gpu @@ -361,6 +438,7 @@ virtio_gpu:none: vkms:none: stage: software-driver + timeout: "1h30m" variables: DRIVER_NAME: vkms GPU_VERSION: none diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt index e8c2f4044a9222..8e2fed6d76a36f 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt @@ -30,6 +30,7 @@ kms_cursor_crc@cursor-random-64x64,Fail kms_cursor_crc@cursor-size-change,Fail kms_cursor_crc@cursor-sliding-64x21,Fail kms_cursor_crc@cursor-sliding-64x64,Fail +kms_cursor_edge_walk@64x64-left-edge,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_lease@lease-uevent,Fail @@ -37,4 +38,3 @@ kms_plane@pixel-format,Fail kms_plane_cursor@primary,Fail kms_rotation_crc@primary-rotation-180,Fail perf@i915-ref-count,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt index ea512ff8c352ed..e4faa96fa0003e 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt @@ -1,8 +1,20 @@ # Board Name: hp-11A-G6-EE-grunt # Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 kms_async_flips@async-flip-with-page-flip-events + +# Board Name: hp-11A-G6-EE-grunt +# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_async_flips@crc + +# Board Name: hp-11A-G6-EE-grunt +# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_plane@pixel-format-source-clamping diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt index 3a2ce45d3cb9cd..f41b3e112976ed 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt @@ -2,9 +2,9 @@ .*suspend.* # Skip driver specific tests -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -13,6 +13,7 @@ panfrost_.* gem_.* i915_.* xe_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt index 6641520ac58734..9b84f68a51229b 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt @@ -6,11 +6,11 @@ i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail -kms_async_flips@invalid-async-flip,Timeout -kms_atomic_transition@modeset-transition-fencing,Timeout kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_fb_coherency@memset-crc,Crash -kms_flip@flip-vs-dpms-off-vs-modeset,Timeout +kms_flip@busy-flip,Timeout +kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail @@ -33,16 +33,20 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout -kms_pm_rpm@modeset-lpsp-stress,Timeout +kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout +kms_prop_blob@invalid-set-prop,Fail +kms_rotation_crc@primary-rotation-180,Timeout +kms_vblank@query-forked-hang,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt index 0a76547a103d02..581f0da4d0f200 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt @@ -1,9 +1,48 @@ # Board Name: asus-C433TA-AJ0005-rammus # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 i915_hangman@engine-engine-error + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 i915_hangman@gt-engine-hang + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_async_flips@crc + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_universal_plane@cursor-fb-leak + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_sysfs_edid_timing + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +i915_hangman@engine-engine-hang + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_pm_rpm@modeset-lpsp-stress diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt index 5663ed0420a703..5186ba3dbbc6e6 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt @@ -5,9 +5,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -19,6 +19,7 @@ gem_.* i915_pm_rc6_residency.* i915_suspend.* kms_scaling_modes.* +i915_pm_rpm.* # Kernel panic drm_fdinfo.* diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt index cb010c153a6a19..4663d4d13f358b 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt @@ -1,6 +1,6 @@ # Board Name: asus-C523NA-A20057-coral # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 kms_fb_coherency@memset-crc diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt index ab588e7a447ca6..4f50e0240ff4cd 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt @@ -7,9 +7,9 @@ kms_3d # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt index 26cd62bbf30ab5..2723e283279778 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt @@ -9,11 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_async_flips@invalid-async-flip,Timeout -kms_atomic_transition@modeset-transition-fencing,Timeout kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout kms_fb_coherency@memset-crc,Crash -kms_flip@flip-vs-dpms-off-vs-modeset,Timeout +kms_flip@busy-flip,Timeout +kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail @@ -41,20 +40,25 @@ kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_alpha_blend@constant-alpha-min,Fail kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout +kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout +kms_prop_blob@invalid-set-prop,Fail +kms_psr2_sf@cursor-plane-update-sf,Fail kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout kms_psr2_sf@overlay-plane-update-continuous-sf,Fail kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail +kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail +kms_psr2_sf@plane-move-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail kms_psr2_su@page_flip-NV12,Fail kms_psr2_su@page_flip-P010,Fail -kms_psr@psr-sprite-render,Timeout +kms_rotation_crc@primary-rotation-180,Timeout kms_setmode@basic,Fail +kms_vblank@query-forked-hang,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt index bb560ff1e2cd45..58a6001abb28db 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt @@ -1,6 +1,13 @@ # Board Name: asus-C436FA-Flip-hatch # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 kms_plane_alpha_blend@constant-alpha-min + +# Board Name: asus-C436FA-Flip-hatch +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_atomic_transition@plane-all-modeset-transition-internal-panels diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt index 93b7736fffbb53..9d753d97c9ab8d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt @@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -19,6 +19,7 @@ i915_suspend.* xe_module_load.* api_intel_allocator.* kms_cursor_legacy.* +i915_pm_rpm.* # Kernel panic drm_fdinfo.* diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt index fca15b4879296f..4821c9adefd14e 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt @@ -1,20 +1,16 @@ core_setmaster@master-drop-set-user,Fail +core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail -kms_async_flips@invalid-async-flip,Timeout -kms_atomic_transition@modeset-transition-fencing,Timeout -kms_big_fb@linear-16bpp-rotate-0,Fail -kms_big_fb@linear-16bpp-rotate-180,Fail -kms_big_fb@linear-32bpp-rotate-0,Fail -kms_big_fb@linear-32bpp-rotate-180,Fail -kms_big_fb@linear-8bpp-rotate-0,Fail -kms_big_fb@linear-8bpp-rotate-180,Fail -kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail +kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_dirtyfb@default-dirtyfb-ioctl,Fail -kms_draw_crc@draw-method-render,Fail -kms_flip@flip-vs-dpms-off-vs-modeset,Timeout +kms_dirtyfb@drrs-dirtyfb-ioctl,Fail +kms_dirtyfb@fbc-dirtyfb-ioctl,Fail +kms_flip@blocking-wf_vblank,Fail +kms_flip@busy-flip,Timeout +kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip@wf_vblank-ts-check,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail @@ -26,6 +22,7 @@ kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail @@ -38,19 +35,24 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout kms_frontbuffer_tracking@fbc-tiling-linear,Fail kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout +kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@legacy-planes,Timeout kms_pm_rpm@legacy-planes-dpms,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout +kms_prop_blob@invalid-set-prop,Fail kms_rotation_crc@multiplane-rotation,Fail kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail kms_rotation_crc@multiplane-rotation-cropping-top,Fail +kms_rotation_crc@primary-rotation-180,Timeout +kms_vblank@query-forked-hang,Timeout perf@non-zero-reason,Timeout sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt index 58fc424f8a4260..077886b76093e7 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt @@ -1,7 +1,13 @@ # Board Name: hp-x360-12b-ca0010nr-n4020-octopus # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 core_hotunplug@unplug-rescan + +# Board Name: hp-x360-12b-ca0010nr-n4020-octopus +# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_fb_coherency@memset-crc diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt index b3226b2d9ba1a7..9c64146aed9048 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt @@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt index d4fba4f55ec1a0..1de04a3308c45d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt @@ -17,10 +17,12 @@ perf@i915-ref-count,Fail perf_pmu@busy-accuracy-50,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash +prime_busy@after,Fail sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout +testdisplay,Timeout xe_module_load@force-load,Fail xe_module_load@load,Fail xe_module_load@many-reload,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt index 6cf1fed2e57511..549501e4046190 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt @@ -1,6 +1,6 @@ # Board Name: hp-x360-14-G1-sona # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 prime_busy@hang diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt index f0cf8a6dda252f..6ec2f83ffe1329 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt @@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt index 9a50e894c3e7ed..e728ccc6232619 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt @@ -1,34 +1,39 @@ -api_intel_bb@blit-noreloc-keep-cache,Timeout +api_intel_allocator@simple-allocator,Timeout +api_intel_bb@object-reloc-keep-cache,Timeout api_intel_bb@offset-control,Timeout -api_intel_bb@render-ccs,Timeout -core_getclient,Timeout -core_hotunplug@hotreplug-lateclose,Timeout -drm_read@short-buffer-block,Timeout +core_auth@getclient-simple,Timeout +core_hotunplug@hotunbind-rebind,Timeout +debugfs_test@read_all_entries_display_on,Timeout +drm_read@invalid-buffer,Timeout drm_read@short-buffer-nonblock,Timeout -dumb_buffer@map-uaf,Timeout gen3_render_tiledx_blits,Timeout gen7_exec_parse@basic-allocation,Timeout gen7_exec_parse@batch-without-end,Timeout gen9_exec_parse@batch-invalid-length,Timeout gen9_exec_parse@bb-secure,Timeout +gen9_exec_parse@secure-batches,Timeout +gen9_exec_parse@shadow-peek,Timeout +gen9_exec_parse@unaligned-jump,Timeout i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail -i915_pciid,Timeout i915_query@engine-info,Timeout +i915_query@query-topology-kernel-writes,Timeout +i915_query@test-query-geometry-subslices,Timeout kms_lease@lease-uevent,Fail kms_rotation_crc@multiplane-rotation,Fail perf@i915-ref-count,Fail -perf_pmu@busy,Timeout perf_pmu@enable-race,Timeout perf_pmu@event-wait,Timeout perf_pmu@gt-awake,Timeout +perf_pmu@interrupts,Timeout perf_pmu@module-unload,Fail perf_pmu@rc6,Crash prime_mmap@test_map_unmap,Timeout +prime_mmap@test_refcounting,Timeout prime_self_import@basic-with_one_bo,Timeout -syncobj_basic@bad-destroy,Timeout +syncobj_basic@bad-flags-fd-to-handle,Timeout syncobj_eventfd@invalid-bad-pad,Timeout syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout syncobj_wait@invalid-signal-illegal-handle,Timeout @@ -37,7 +42,9 @@ syncobj_wait@multi-wait-all-submitted,Timeout syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout syncobj_wait@wait-any-complex,Timeout syncobj_wait@wait-delayed-signal,Timeout +template@A,Timeout xe_module_load@force-load,Fail xe_module_load@load,Fail +xe_module_load@many-reload,Fail xe_module_load@reload,Fail xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt index e600782ef96ac0..b47df5855e8dff 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt @@ -12,9 +12,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt index 7582d313dd9bf2..2adae2175501eb 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt @@ -7,18 +7,10 @@ i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_async_flips@invalid-async-flip,Timeout -kms_atomic_transition@modeset-transition-fencing,Timeout -kms_big_fb@linear-16bpp-rotate-0,Fail -kms_big_fb@linear-16bpp-rotate-180,Fail -kms_big_fb@linear-32bpp-rotate-0,Fail -kms_big_fb@linear-32bpp-rotate-180,Fail -kms_big_fb@linear-8bpp-rotate-0,Fail -kms_big_fb@linear-8bpp-rotate-180,Fail -kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_dirtyfb@default-dirtyfb-ioctl,Fail -kms_draw_crc@draw-method-render,Fail +kms_dirtyfb@fbc-dirtyfb-ioctl,Fail kms_fb_coherency@memset-crc,Crash kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail @@ -40,6 +32,7 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout kms_frontbuffer_tracking@fbc-tiling-linear,Fail kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-basic,Fail @@ -47,9 +40,13 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout +kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout +kms_prop_blob@invalid-set-prop,Fail +kms_rotation_crc@primary-rotation-180,Timeout +kms_vblank@query-forked-hang,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt index 1167a58c7dd10f..60b8d1c64e70b0 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt @@ -1,6 +1,6 @@ # Board Name: dell-latitude-5400-8665U-sarien # Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 kms_pm_rpm@modeset-lpsp-stress diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt index 20bd91525f4593..29bff8922ae1d1 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt @@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -17,6 +17,7 @@ gem_.* i915_pm_rc6_residency.* i915_suspend.* kms_flip.* +i915_pm_rpm.* # Kernel panic drm_fdinfo.* diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt index cc5e9c1c2d5787..a14349a1967fdd 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt @@ -5,8 +5,15 @@ device_reset@unbind-reset-rebind,Fail dumb_buffer@invalid-bpp,Fail fbdev@eof,Fail fbdev@read,Fail -fbdev@unaligned-write,Fail kms_3d,Fail +kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail kms_bw@linear-tiling-1-displays-1920x1080p,Fail kms_bw@linear-tiling-1-displays-2160x1440p,Fail kms_bw@linear-tiling-1-displays-2560x1440p,Fail @@ -27,4 +34,3 @@ kms_properties@get_properties-sanity-atomic,Fail kms_properties@plane-properties-atomic,Fail kms_properties@plane-properties-legacy,Fail kms_rmfb@close-fd,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt index 395ac046340403..2e5bf6ae25f2cd 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt @@ -1,11 +1,41 @@ # Board Name: mt8173-elm-hana # Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 core_setmaster_vs_auth + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 dumb_buffer@create-clear + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 fbdev@unaligned-write + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 fbdev@write + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@cursor-vs-flip-atomic-transitions + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_prop_blob@invalid-set-prop diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt index 0c6108392140fe..8198e06344a31c 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt @@ -1,8 +1,8 @@ # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -10,6 +10,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt index 9ef460646d76cc..8cb2cb67853dff 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt @@ -1,8 +1,22 @@ -dumb_buffer@create-clear,Fail -dumb_buffer@create-valid-dumb,Fail +core_setmaster@master-drop-set-shared-fd,Fail +device_reset@cold-reset-bound,Fail +device_reset@reset-bound,Fail +device_reset@unbind-cold-reset-rebind,Fail +device_reset@unbind-reset-rebind,Fail +dumb_buffer@create-clear,Crash dumb_buffer@invalid-bpp,Fail -dumb_buffer@map-invalid-size,Fail -dumb_buffer@map-uaf,Fail -dumb_buffer@map-valid,Fail -panfrost_prime@gem-prime-import,Fail -tools_test@tools_test,Fail +fbdev@eof,Fail +fbdev@pan,Fail +fbdev@read,Fail +fbdev@unaligned-read,Fail +kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail +kms_bw@linear-tiling-1-displays-1920x1080p,Fail +kms_bw@linear-tiling-1-displays-3840x2160p,Fail +kms_color@invalid-gamma-lut-sizes,Fail +kms_flip@flip-vs-panning-vs-hang,Fail +kms_flip@flip-vs-suspend,Fail +kms_lease@lease-uevent,Fail +kms_properties@plane-properties-atomic,Fail +kms_rmfb@close-fd,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt new file mode 100644 index 00000000000000..df7e5ce7a03626 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt @@ -0,0 +1,20 @@ +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_bw@linear-tiling-1-displays-2560x1440p + +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@cursor-vs-flip-atomic-transitions + +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +fbdev@write diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt index 715b9a8f49975e..8198e06344a31c 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt @@ -1,7 +1,8 @@ # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -9,9 +10,7 @@ nouveau_.* # Skip intel specific tests gem_.* i915_.* - -# Panfrost is not a KMS driver, so skip the KMS tests -kms_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt index 9ef460646d76cc..328967d3e23d99 100644 --- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt +++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt @@ -1,8 +1,13 @@ -dumb_buffer@create-clear,Fail -dumb_buffer@create-valid-dumb,Fail dumb_buffer@invalid-bpp,Fail -dumb_buffer@map-invalid-size,Fail -dumb_buffer@map-uaf,Fail -dumb_buffer@map-valid,Fail -panfrost_prime@gem-prime-import,Fail -tools_test@tools_test,Fail +kms_3d,Fail +kms_cursor_legacy@forked-bo,Fail +kms_cursor_legacy@forked-move,Fail +kms_cursor_legacy@single-bo,Fail +kms_cursor_legacy@single-move,Fail +kms_cursor_legacy@torture-bo,Fail +kms_cursor_legacy@torture-move,Fail +kms_lease@lease-uevent,Fail +kms_properties@connector-properties-atomic,Fail +kms_properties@connector-properties-legacy,Fail +kms_properties@get_properties-sanity-atomic,Fail +kms_properties@get_properties-sanity-non-atomic,Fail diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt index 715b9a8f49975e..8198e06344a31c 100644 --- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt +++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt @@ -1,7 +1,8 @@ # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -9,9 +10,7 @@ nouveau_.* # Skip intel specific tests gem_.* i915_.* - -# Panfrost is not a KMS driver, so skip the KMS tests -kms_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt index 6e7fd1ccd1e3a1..4ac46168eff31f 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt @@ -4,12 +4,8 @@ device_reset@unbind-cold-reset-rebind,Fail device_reset@unbind-reset-rebind,Fail dumb_buffer@invalid-bpp,Fail kms_3d,Fail -kms_cursor_legacy@forked-move,Fail -kms_cursor_legacy@single-bo,Fail kms_cursor_legacy@torture-bo,Fail -kms_cursor_legacy@torture-move,Fail kms_force_connector_basic@force-edid,Fail kms_hdmi_inject@inject-4k,Fail kms_lease@lease-uevent,Fail -msm_mapping@ring,Fail -tools_test@tools_test,Fail +msm/msm_mapping@ring,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt index ff12202abb6eed..1674c8e214d688 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt @@ -1,7 +1,7 @@ # Skip driver specific tests ^amdgpu.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -9,6 +9,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt index 46ca69ce2ffe8f..bd0653caf7a0bd 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt @@ -5,4 +5,3 @@ device_reset@unbind-reset-rebind,Fail dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_lease@lease-uevent,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt index a275584c8bbbd2..123d92cb447034 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt @@ -1,6 +1,6 @@ # Board Name: apq8096-db820c # Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 dumb_buffer@create-clear diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt index 1c45fc6c512d7d..5550be5486ed59 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt @@ -4,7 +4,7 @@ kms_cursor_legacy@all-pipes-torture-move # Skip driver specific tests ^amdgpu.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -12,6 +12,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. @@ -23,4 +24,4 @@ core_hotunplug.* # *** gpu fault: ttbr0=00000001030ea000 iova=0000000001074000 dir=WRITE type=PERMISSION source=1f030000 (0,0,0,0) # msm_mdp 901000.display-controller: RBBM | ME master split | status=0x701000B0 # watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [kworker/u16:3:46] -msm_mapping@shadow +msm/msm_mapping@shadow diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt index eb7a3886d3976d..d42004cd69772e 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt @@ -3,13 +3,11 @@ device_reset@reset-bound,Fail device_reset@unbind-cold-reset-rebind,Fail device_reset@unbind-reset-rebind,Fail dumb_buffer@invalid-bpp,Fail -kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail kms_color@ctm-blue-to-red,Fail kms_color@ctm-green-to-red,Fail -kms_color@ctm-max,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail @@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash kms_content_protection@srm,Crash kms_content_protection@type1,Crash kms_content_protection@uevent,Crash -kms_cursor_crc@cursor-alpha-opaque,Fail -kms_cursor_crc@cursor-alpha-transparent,Fail -kms_cursor_crc@cursor-dpms,Fail -kms_cursor_crc@cursor-offscreen-128x128,Fail -kms_cursor_crc@cursor-offscreen-128x42,Fail -kms_cursor_crc@cursor-offscreen-256x256,Fail -kms_cursor_crc@cursor-offscreen-256x85,Fail -kms_cursor_crc@cursor-offscreen-32x10,Fail -kms_cursor_crc@cursor-offscreen-32x32,Fail -kms_cursor_crc@cursor-offscreen-512x170,Fail -kms_cursor_crc@cursor-offscreen-512x512,Fail -kms_cursor_crc@cursor-offscreen-64x21,Fail -kms_cursor_crc@cursor-offscreen-64x64,Fail -kms_cursor_crc@cursor-onscreen-128x128,Fail -kms_cursor_crc@cursor-onscreen-128x42,Fail -kms_cursor_crc@cursor-onscreen-256x256,Fail -kms_cursor_crc@cursor-onscreen-256x85,Fail -kms_cursor_crc@cursor-onscreen-32x10,Fail -kms_cursor_crc@cursor-onscreen-32x32,Fail -kms_cursor_crc@cursor-onscreen-512x170,Fail -kms_cursor_crc@cursor-onscreen-512x512,Fail -kms_cursor_crc@cursor-onscreen-64x21,Fail -kms_cursor_crc@cursor-onscreen-64x64,Fail -kms_cursor_crc@cursor-random-128x128,Fail -kms_cursor_crc@cursor-random-128x42,Fail -kms_cursor_crc@cursor-random-256x256,Fail -kms_cursor_crc@cursor-random-256x85,Fail -kms_cursor_crc@cursor-random-32x10,Fail -kms_cursor_crc@cursor-random-32x32,Fail -kms_cursor_crc@cursor-random-512x170,Fail -kms_cursor_crc@cursor-random-512x512,Fail -kms_cursor_crc@cursor-random-64x21,Fail -kms_cursor_crc@cursor-random-64x64,Fail -kms_cursor_crc@cursor-rapid-movement-128x128,Fail -kms_cursor_crc@cursor-rapid-movement-128x42,Fail -kms_cursor_crc@cursor-rapid-movement-256x256,Fail -kms_cursor_crc@cursor-rapid-movement-256x85,Fail -kms_cursor_crc@cursor-rapid-movement-32x10,Fail -kms_cursor_crc@cursor-rapid-movement-32x32,Fail -kms_cursor_crc@cursor-rapid-movement-512x170,Fail -kms_cursor_crc@cursor-rapid-movement-512x512,Fail -kms_cursor_crc@cursor-rapid-movement-64x21,Fail -kms_cursor_crc@cursor-rapid-movement-64x64,Fail -kms_cursor_crc@cursor-size-change,Fail -kms_cursor_crc@cursor-sliding-128x128,Fail -kms_cursor_crc@cursor-sliding-128x42,Fail -kms_cursor_crc@cursor-sliding-256x256,Fail -kms_cursor_crc@cursor-sliding-256x85,Fail -kms_cursor_crc@cursor-sliding-32x10,Fail -kms_cursor_crc@cursor-sliding-32x32,Fail -kms_cursor_crc@cursor-sliding-512x170,Fail -kms_cursor_crc@cursor-sliding-512x512,Fail -kms_cursor_crc@cursor-sliding-64x21,Fail -kms_cursor_crc@cursor-sliding-64x64,Fail -kms_cursor_edge_walk@128x128-left-edge,Fail -kms_cursor_edge_walk@128x128-right-edge,Fail -kms_cursor_edge_walk@128x128-top-bottom,Fail -kms_cursor_edge_walk@128x128-top-edge,Fail -kms_cursor_edge_walk@256x256-left-edge,Fail -kms_cursor_edge_walk@256x256-right-edge,Fail -kms_cursor_edge_walk@256x256-top-bottom,Fail -kms_cursor_edge_walk@256x256-top-edge,Fail -kms_cursor_edge_walk@64x64-left-edge,Fail -kms_cursor_edge_walk@64x64-right-edge,Fail -kms_cursor_edge_walk@64x64-top-bottom,Fail -kms_cursor_edge_walk@64x64-top-edge,Fail kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail @@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail kms_display_modes@extended-mode-basic,Fail kms_flip@2x-flip-vs-modeset-vs-hang,Fail kms_flip@2x-flip-vs-panning-vs-hang,Fail -kms_flip@absolute-wf_vblank,Fail -kms_flip@absolute-wf_vblank-interruptible,Fail -kms_flip@basic-flip-vs-wf_vblank,Fail -kms_flip@basic-plain-flip,Fail -kms_flip@blocking-absolute-wf_vblank,Fail -kms_flip@blocking-absolute-wf_vblank-interruptible,Fail -kms_flip@blocking-wf_vblank,Fail -kms_flip@busy-flip,Fail -kms_flip@dpms-off-confusion,Fail -kms_flip@dpms-off-confusion-interruptible,Fail -kms_flip@dpms-vs-vblank-race,Fail -kms_flip@dpms-vs-vblank-race-interruptible,Fail -kms_flip@flip-vs-absolute-wf_vblank,Fail -kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail -kms_flip@flip-vs-blocking-wf-vblank,Fail -kms_flip@flip-vs-expired-vblank,Fail -kms_flip@flip-vs-expired-vblank-interruptible,Fail kms_flip@flip-vs-modeset-vs-hang,Fail -kms_flip@flip-vs-panning,Fail -kms_flip@flip-vs-panning-interruptible,Fail kms_flip@flip-vs-panning-vs-hang,Fail -kms_flip@flip-vs-rmfb,Fail -kms_flip@flip-vs-rmfb-interruptible,Fail -kms_flip@flip-vs-wf_vblank-interruptible,Fail -kms_flip@modeset-vs-vblank-race,Fail -kms_flip@modeset-vs-vblank-race-interruptible,Fail -kms_flip@plain-flip-fb-recreate,Fail -kms_flip@plain-flip-fb-recreate-interruptible,Fail -kms_flip@plain-flip-interruptible,Fail -kms_flip@plain-flip-ts-check,Fail -kms_flip@plain-flip-ts-check-interruptible,Fail -kms_flip@wf_vblank-ts-check,Fail -kms_flip@wf_vblank-ts-check-interruptible,Fail -kms_lease@cursor-implicit-plane,Fail kms_lease@lease-uevent,Fail -kms_lease@page-flip-implicit-plane,Fail -kms_lease@setcrtc-implicit-plane,Fail -kms_lease@simple-lease,Fail kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail -kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail -kms_pipe_crc_basic@disable-crc-after-crtc,Fail -kms_pipe_crc_basic@nonblocking-crc,Fail -kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail -kms_pipe_crc_basic@read-crc,Fail -kms_pipe_crc_basic@read-crc-frame-sequence,Fail -kms_plane@pixel-format,Fail -kms_plane@pixel-format-source-clamping,Fail -kms_plane@plane-panning-bottom-right,Fail -kms_plane@plane-panning-top-left,Fail -kms_plane@plane-position-covered,Fail -kms_plane@plane-position-hole,Fail -kms_plane@plane-position-hole-dpms,Fail kms_plane_alpha_blend@alpha-7efc,Fail -kms_plane_alpha_blend@alpha-basic,Fail -kms_plane_alpha_blend@alpha-opaque-fb,Fail -kms_plane_alpha_blend@alpha-transparent-fb,Fail -kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_alpha_blend@constant-alpha-mid,Fail -kms_plane_alpha_blend@constant-alpha-min,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_cursor@primary,Fail kms_plane_lowres@tiling-none,Fail -kms_plane_multiple@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_rotation_crc@cursor-rotation-180,Fail -kms_rotation_crc@primary-rotation-180,Fail -kms_sequence@get-busy,Fail -kms_sequence@get-forked,Fail -kms_sequence@get-forked-busy,Fail -kms_sequence@get-idle,Fail -kms_sequence@queue-busy,Fail -kms_sequence@queue-idle,Fail -kms_vblank@accuracy-idle,Fail -kms_vblank@crtc-id,Fail -kms_vblank@query-busy,Fail -kms_vblank@query-forked,Fail -kms_vblank@query-forked-busy,Fail -kms_vblank@query-idle,Fail kms_vblank@ts-continuation-dpms-rpm,Fail -kms_vblank@ts-continuation-idle,Fail -kms_vblank@ts-continuation-modeset,Fail -kms_vblank@ts-continuation-modeset-rpm,Fail -kms_vblank@wait-busy,Fail -kms_vblank@wait-forked,Fail -kms_vblank@wait-forked-busy,Fail -kms_vblank@wait-idle,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt index 6dec63d48cfbea..d74e04405e6559 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt @@ -1,8 +1,20 @@ # Board Name: sc7180-trogdor-kingoftown # Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 +msm/msm_mapping@shadow + +# Board Name: sc7180-trogdor-kingoftown +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 +msm/msm_shrink@copy-gpu-oom-32 + +# Board Name: sc7180-trogdor-kingoftown +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u # Failure Rate: 50 -msm_mapping@shadow -msm_shrink@copy-gpu-oom-32 -msm_shrink@copy-gpu-oom-8 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 +msm/msm_shrink@copy-gpu-oom-8 diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt index 68c96005ba540b..c2833eee1c4bca 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt @@ -4,7 +4,7 @@ # Skip driver specific tests ^amdgpu.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -12,6 +12,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. @@ -19,3 +20,6 @@ core_hotunplug.* # Timeout occurs kms_flip@2x-wf_vblank-ts-check + +# Hangs the machine +kms_cursor_crc@cursor-random-max-size diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt index eb7a3886d3976d..d42004cd69772e 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt @@ -3,13 +3,11 @@ device_reset@reset-bound,Fail device_reset@unbind-cold-reset-rebind,Fail device_reset@unbind-reset-rebind,Fail dumb_buffer@invalid-bpp,Fail -kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail kms_color@ctm-blue-to-red,Fail kms_color@ctm-green-to-red,Fail -kms_color@ctm-max,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail @@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash kms_content_protection@srm,Crash kms_content_protection@type1,Crash kms_content_protection@uevent,Crash -kms_cursor_crc@cursor-alpha-opaque,Fail -kms_cursor_crc@cursor-alpha-transparent,Fail -kms_cursor_crc@cursor-dpms,Fail -kms_cursor_crc@cursor-offscreen-128x128,Fail -kms_cursor_crc@cursor-offscreen-128x42,Fail -kms_cursor_crc@cursor-offscreen-256x256,Fail -kms_cursor_crc@cursor-offscreen-256x85,Fail -kms_cursor_crc@cursor-offscreen-32x10,Fail -kms_cursor_crc@cursor-offscreen-32x32,Fail -kms_cursor_crc@cursor-offscreen-512x170,Fail -kms_cursor_crc@cursor-offscreen-512x512,Fail -kms_cursor_crc@cursor-offscreen-64x21,Fail -kms_cursor_crc@cursor-offscreen-64x64,Fail -kms_cursor_crc@cursor-onscreen-128x128,Fail -kms_cursor_crc@cursor-onscreen-128x42,Fail -kms_cursor_crc@cursor-onscreen-256x256,Fail -kms_cursor_crc@cursor-onscreen-256x85,Fail -kms_cursor_crc@cursor-onscreen-32x10,Fail -kms_cursor_crc@cursor-onscreen-32x32,Fail -kms_cursor_crc@cursor-onscreen-512x170,Fail -kms_cursor_crc@cursor-onscreen-512x512,Fail -kms_cursor_crc@cursor-onscreen-64x21,Fail -kms_cursor_crc@cursor-onscreen-64x64,Fail -kms_cursor_crc@cursor-random-128x128,Fail -kms_cursor_crc@cursor-random-128x42,Fail -kms_cursor_crc@cursor-random-256x256,Fail -kms_cursor_crc@cursor-random-256x85,Fail -kms_cursor_crc@cursor-random-32x10,Fail -kms_cursor_crc@cursor-random-32x32,Fail -kms_cursor_crc@cursor-random-512x170,Fail -kms_cursor_crc@cursor-random-512x512,Fail -kms_cursor_crc@cursor-random-64x21,Fail -kms_cursor_crc@cursor-random-64x64,Fail -kms_cursor_crc@cursor-rapid-movement-128x128,Fail -kms_cursor_crc@cursor-rapid-movement-128x42,Fail -kms_cursor_crc@cursor-rapid-movement-256x256,Fail -kms_cursor_crc@cursor-rapid-movement-256x85,Fail -kms_cursor_crc@cursor-rapid-movement-32x10,Fail -kms_cursor_crc@cursor-rapid-movement-32x32,Fail -kms_cursor_crc@cursor-rapid-movement-512x170,Fail -kms_cursor_crc@cursor-rapid-movement-512x512,Fail -kms_cursor_crc@cursor-rapid-movement-64x21,Fail -kms_cursor_crc@cursor-rapid-movement-64x64,Fail -kms_cursor_crc@cursor-size-change,Fail -kms_cursor_crc@cursor-sliding-128x128,Fail -kms_cursor_crc@cursor-sliding-128x42,Fail -kms_cursor_crc@cursor-sliding-256x256,Fail -kms_cursor_crc@cursor-sliding-256x85,Fail -kms_cursor_crc@cursor-sliding-32x10,Fail -kms_cursor_crc@cursor-sliding-32x32,Fail -kms_cursor_crc@cursor-sliding-512x170,Fail -kms_cursor_crc@cursor-sliding-512x512,Fail -kms_cursor_crc@cursor-sliding-64x21,Fail -kms_cursor_crc@cursor-sliding-64x64,Fail -kms_cursor_edge_walk@128x128-left-edge,Fail -kms_cursor_edge_walk@128x128-right-edge,Fail -kms_cursor_edge_walk@128x128-top-bottom,Fail -kms_cursor_edge_walk@128x128-top-edge,Fail -kms_cursor_edge_walk@256x256-left-edge,Fail -kms_cursor_edge_walk@256x256-right-edge,Fail -kms_cursor_edge_walk@256x256-top-bottom,Fail -kms_cursor_edge_walk@256x256-top-edge,Fail -kms_cursor_edge_walk@64x64-left-edge,Fail -kms_cursor_edge_walk@64x64-right-edge,Fail -kms_cursor_edge_walk@64x64-top-bottom,Fail -kms_cursor_edge_walk@64x64-top-edge,Fail kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail @@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail kms_display_modes@extended-mode-basic,Fail kms_flip@2x-flip-vs-modeset-vs-hang,Fail kms_flip@2x-flip-vs-panning-vs-hang,Fail -kms_flip@absolute-wf_vblank,Fail -kms_flip@absolute-wf_vblank-interruptible,Fail -kms_flip@basic-flip-vs-wf_vblank,Fail -kms_flip@basic-plain-flip,Fail -kms_flip@blocking-absolute-wf_vblank,Fail -kms_flip@blocking-absolute-wf_vblank-interruptible,Fail -kms_flip@blocking-wf_vblank,Fail -kms_flip@busy-flip,Fail -kms_flip@dpms-off-confusion,Fail -kms_flip@dpms-off-confusion-interruptible,Fail -kms_flip@dpms-vs-vblank-race,Fail -kms_flip@dpms-vs-vblank-race-interruptible,Fail -kms_flip@flip-vs-absolute-wf_vblank,Fail -kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail -kms_flip@flip-vs-blocking-wf-vblank,Fail -kms_flip@flip-vs-expired-vblank,Fail -kms_flip@flip-vs-expired-vblank-interruptible,Fail kms_flip@flip-vs-modeset-vs-hang,Fail -kms_flip@flip-vs-panning,Fail -kms_flip@flip-vs-panning-interruptible,Fail kms_flip@flip-vs-panning-vs-hang,Fail -kms_flip@flip-vs-rmfb,Fail -kms_flip@flip-vs-rmfb-interruptible,Fail -kms_flip@flip-vs-wf_vblank-interruptible,Fail -kms_flip@modeset-vs-vblank-race,Fail -kms_flip@modeset-vs-vblank-race-interruptible,Fail -kms_flip@plain-flip-fb-recreate,Fail -kms_flip@plain-flip-fb-recreate-interruptible,Fail -kms_flip@plain-flip-interruptible,Fail -kms_flip@plain-flip-ts-check,Fail -kms_flip@plain-flip-ts-check-interruptible,Fail -kms_flip@wf_vblank-ts-check,Fail -kms_flip@wf_vblank-ts-check-interruptible,Fail -kms_lease@cursor-implicit-plane,Fail kms_lease@lease-uevent,Fail -kms_lease@page-flip-implicit-plane,Fail -kms_lease@setcrtc-implicit-plane,Fail -kms_lease@simple-lease,Fail kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail -kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail -kms_pipe_crc_basic@disable-crc-after-crtc,Fail -kms_pipe_crc_basic@nonblocking-crc,Fail -kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail -kms_pipe_crc_basic@read-crc,Fail -kms_pipe_crc_basic@read-crc-frame-sequence,Fail -kms_plane@pixel-format,Fail -kms_plane@pixel-format-source-clamping,Fail -kms_plane@plane-panning-bottom-right,Fail -kms_plane@plane-panning-top-left,Fail -kms_plane@plane-position-covered,Fail -kms_plane@plane-position-hole,Fail -kms_plane@plane-position-hole-dpms,Fail kms_plane_alpha_blend@alpha-7efc,Fail -kms_plane_alpha_blend@alpha-basic,Fail -kms_plane_alpha_blend@alpha-opaque-fb,Fail -kms_plane_alpha_blend@alpha-transparent-fb,Fail -kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_alpha_blend@constant-alpha-mid,Fail -kms_plane_alpha_blend@constant-alpha-min,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_cursor@primary,Fail kms_plane_lowres@tiling-none,Fail -kms_plane_multiple@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_rotation_crc@cursor-rotation-180,Fail -kms_rotation_crc@primary-rotation-180,Fail -kms_sequence@get-busy,Fail -kms_sequence@get-forked,Fail -kms_sequence@get-forked-busy,Fail -kms_sequence@get-idle,Fail -kms_sequence@queue-busy,Fail -kms_sequence@queue-idle,Fail -kms_vblank@accuracy-idle,Fail -kms_vblank@crtc-id,Fail -kms_vblank@query-busy,Fail -kms_vblank@query-forked,Fail -kms_vblank@query-forked-busy,Fail -kms_vblank@query-idle,Fail kms_vblank@ts-continuation-dpms-rpm,Fail -kms_vblank@ts-continuation-idle,Fail -kms_vblank@ts-continuation-modeset,Fail -kms_vblank@ts-continuation-modeset-rpm,Fail -kms_vblank@wait-busy,Fail -kms_vblank@wait-forked,Fail -kms_vblank@wait-forked-busy,Fail -kms_vblank@wait-idle,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt index dcb24b835dc386..cd3d3b0befe4dc 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt @@ -1,6 +1,13 @@ # Board Name: sc7180-trogdor-lazor-limozeen-nots-r5 # Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 -msm_mapping@shadow +msm/msm_mapping@shadow + +# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_lease@page-flip-implicit-plane diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt index 1168c53acd2d45..7c69c1f1d55b84 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt @@ -4,7 +4,7 @@ # Skip driver specific tests ^amdgpu.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -12,6 +12,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt index 8f010c8a9c4f00..770a1c685fdea4 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt @@ -33,4 +33,3 @@ kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail kms_plane_cursor@overlay,Fail kms_plane_cursor@viewport,Fail kms_rmfb@close-fd,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt index 2c5f62b07632d8..2aa96b1241c3d4 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt @@ -1,19 +1,118 @@ # Board Name: sdm845-cheza-r3 # Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 -# Failure Rate: 50 kms_cursor_legacy@basic-flip-after-cursor-atomic + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@basic-flip-after-cursor-legacy + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@basic-flip-after-cursor-varying-size + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@basic-flip-before-cursor-varying-size + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@flip-vs-cursor-atomic-transitions + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@flip-vs-cursor-varying-size + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@short-flip-after-cursor-atomic-transitions + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@short-flip-after-cursor-toggle + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@short-flip-before-cursor-atomic-transitions + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size -msm_shrink@copy-gpu-32 -msm_shrink@copy-gpu-oom-32 + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 +msm/msm_shrink@copy-gpu-32 + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 +msm/msm_shrink@copy-gpu-oom-32 + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@short-flip-before-cursor-toggle + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@flip-vs-cursor-toggle + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +msm/msm_shrink@copy-mmap-oom-8 diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt index 5185212c8fb21f..90651048ab6100 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt @@ -4,12 +4,12 @@ kms_bw.* # Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches # https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e # https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1 -msm_mapping@* +msm/msm_mapping@* # Skip driver specific tests ^amdgpu.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -17,6 +17,7 @@ panfrost_.* # Skip intel specific tests gem_.* i915_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt new file mode 100644 index 00000000000000..fe8ce2ce33e691 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt @@ -0,0 +1 @@ +panfrost/panfrost_prime@gem-prime-import,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt new file mode 100644 index 00000000000000..3c7e494857b558 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt @@ -0,0 +1,23 @@ +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Panfrost is not a KMS driver, so skip the KMS tests +kms_.* + +# Skip display functionality tests for GPU-only drivers +dumb_buffer.* +fbdev.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt new file mode 100644 index 00000000000000..fe8ce2ce33e691 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt @@ -0,0 +1 @@ +panfrost/panfrost_prime@gem-prime-import,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt new file mode 100644 index 00000000000000..3c7e494857b558 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt @@ -0,0 +1,23 @@ +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Panfrost is not a KMS driver, so skip the KMS tests +kms_.* + +# Skip display functionality tests for GPU-only drivers +dumb_buffer.* +fbdev.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt new file mode 100644 index 00000000000000..4a2f4b6b14c122 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt @@ -0,0 +1 @@ +panfrost/panfrost_prime@gem-prime-import,Crash diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt new file mode 100644 index 00000000000000..feeed89b6c3f7f --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt @@ -0,0 +1,26 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* + +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Panfrost is not a KMS driver, so skip the KMS tests +kms_.* + +# Skip display functionality tests for GPU-only drivers +dumb_buffer.* +fbdev.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt new file mode 100644 index 00000000000000..fe8ce2ce33e691 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt @@ -0,0 +1 @@ +panfrost/panfrost_prime@gem-prime-import,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt new file mode 100644 index 00000000000000..ac4f8f7244d4f4 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt @@ -0,0 +1,6 @@ +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-g0df7b9b97 +# Linux Version: 6.9.0-rc7 +panfrost/panfrost_submit@pan-unhandled-pagefault diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt new file mode 100644 index 00000000000000..feeed89b6c3f7f --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt @@ -0,0 +1,26 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* + +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Panfrost is not a KMS driver, so skip the KMS tests +kms_.* + +# Skip display functionality tests for GPU-only drivers +dumb_buffer.* +fbdev.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt index e9994c9db799bf..5e6d48d98e4e16 100644 --- a/drivers/gpu/drm/ci/xfails/requirements.txt +++ b/drivers/gpu/drm/ci/xfails/requirements.txt @@ -11,7 +11,7 @@ requests==2.31.0 requests-toolbelt==1.0.0 ruamel.yaml==0.17.32 ruamel.yaml.clib==0.2.7 -setuptools==68.0.0 +setuptools==70.0.0 tenacity==8.2.3 urllib3==2.0.7 wheel==0.41.1 diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt index f9b99bf27105ca..ea7b2ceb95b910 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt @@ -1,8 +1,18 @@ +core_setmaster@master-drop-set-root,Crash +core_setmaster@master-drop-set-user,Crash +core_setmaster_vs_auth,Crash +device_reset@cold-reset-bound,Crash +device_reset@reset-bound,Crash +device_reset@unbind-cold-reset-rebind,Crash +device_reset@unbind-reset-rebind,Crash dumb_buffer@create-clear,Crash -dumb_buffer@create-valid-dumb,Crash dumb_buffer@invalid-bpp,Crash -dumb_buffer@map-invalid-size,Crash -dumb_buffer@map-uaf,Crash -dumb_buffer@map-valid,Crash -panfrost_prime@gem-prime-import,Crash -tools_test@tools_test,Crash +fbdev@pan,Crash +kms_cursor_crc@cursor-onscreen-32x10,Crash +kms_cursor_crc@cursor-onscreen-32x32,Crash +kms_cursor_crc@cursor-random-32x10,Crash +kms_cursor_crc@cursor-sliding-32x32,Crash +kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail +kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_prop_blob@invalid-set-prop,Crash +kms_prop_blob@invalid-set-prop-any,Crash diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt new file mode 100644 index 00000000000000..7ede273aab2074 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt @@ -0,0 +1,6 @@ +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@flip-vs-cursor-atomic diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt index 6d3757dca83ba4..eb16b29dee48e7 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt @@ -1,60 +1,11 @@ # Suspend to RAM seems to be broken on this machine .*suspend.* -# Too unstable, machine ends up hanging after lots of Oopses -kms_cursor_legacy.* - -# Started hanging the machine on Linux 5.19-rc2: -# -# [IGT] kms_plane_lowres: executing -# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y -# [IGT] kms_plane_lowres: exiting, ret=77 -# Console: switching to colour frame buffer device 170x48 -# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out -# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out -# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482 -# 8<--- cut here --- -# Unable to handle kernel paging request at virtual address 7812078e -# [7812078e] *pgd=00000000 -# Internal error: Oops: 5 [#1] SMP ARM -# Modules linked in: -# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1 -# Hardware name: Rockchip (Device Tree) -# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b) -# spin_dump from do_raw_spin_lock+0xa4/0xe8 -# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120 -# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c -# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168 -# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180 -# commit_tail from drm_atomic_helper_commit+0x164/0x18c -# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4 -# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284 -# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8 -# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40 -# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94 -# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc -# drm_client_dev_restore from drm_release+0xf4/0x114 -# drm_release from __fput+0x74/0x240 -# __fput from task_work_run+0x84/0xb4 -# task_work_run from do_exit+0x34c/0xa20 -# do_exit from do_group_exit+0x34/0x98 -# do_group_exit from __wake_up_parent+0x0/0x18 -# Code: e595c008 12843d19 03e00000 03093168 (15940508) -# ---[ end trace 0000000000000000 ]--- -# note: kms_plane_lowre[482] exited with preempt_count 1 -# Fixing recursive fault but reboot is needed! -kms_plane_lowres@pipe-F-tiling-y - -# Take too long, we have only two machines, and these are very flaky -kms_cursor_crc.* - -# Machine is hanging in this test, so skip it -kms_pipe_crc_basic@disable-crc-after-crtc - # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -62,9 +13,7 @@ nouveau_.* # Skip intel specific tests gem_.* i915_.* - -# Panfrost is not a KMS driver, so skip the KMS tests -kms_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt index 9ef460646d76cc..9309ff15e23a47 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt @@ -1,8 +1,84 @@ -dumb_buffer@create-clear,Fail -dumb_buffer@create-valid-dumb,Fail +device_reset@cold-reset-bound,Fail +device_reset@reset-bound,Fail +device_reset@unbind-cold-reset-rebind,Fail +device_reset@unbind-reset-rebind,Fail +dumb_buffer@create-clear,Crash dumb_buffer@invalid-bpp,Fail -dumb_buffer@map-invalid-size,Fail -dumb_buffer@map-uaf,Fail -dumb_buffer@map-valid,Fail -panfrost_prime@gem-prime-import,Fail -tools_test@tools_test,Fail +kms_atomic_transition@modeset-transition,Fail +kms_atomic_transition@modeset-transition-fencing,Fail +kms_atomic_transition@plane-toggle-modeset-transition,Fail +kms_color@gamma,Fail +kms_color@legacy-gamma,Fail +kms_cursor_crc@cursor-alpha-opaque,Fail +kms_cursor_crc@cursor-alpha-transparent,Fail +kms_cursor_crc@cursor-dpms,Fail +kms_cursor_crc@cursor-offscreen-32x10,Fail +kms_cursor_crc@cursor-offscreen-32x32,Fail +kms_cursor_crc@cursor-offscreen-64x21,Fail +kms_cursor_crc@cursor-offscreen-64x64,Fail +kms_cursor_crc@cursor-onscreen-32x10,Fail +kms_cursor_crc@cursor-onscreen-32x32,Fail +kms_cursor_crc@cursor-onscreen-64x21,Fail +kms_cursor_crc@cursor-onscreen-64x64,Fail +kms_cursor_crc@cursor-random-32x10,Fail +kms_cursor_crc@cursor-random-32x32,Fail +kms_cursor_crc@cursor-random-64x21,Fail +kms_cursor_crc@cursor-random-64x64,Fail +kms_cursor_crc@cursor-rapid-movement-32x10,Fail +kms_cursor_crc@cursor-rapid-movement-32x32,Fail +kms_cursor_crc@cursor-rapid-movement-64x21,Fail +kms_cursor_crc@cursor-rapid-movement-64x64,Fail +kms_cursor_crc@cursor-size-change,Fail +kms_cursor_crc@cursor-sliding-32x10,Fail +kms_cursor_crc@cursor-sliding-32x32,Fail +kms_cursor_crc@cursor-sliding-64x21,Fail +kms_cursor_crc@cursor-sliding-64x64,Fail +kms_cursor_edge_walk@64x64-left-edge,Fail +kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail +kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail +kms_cursor_legacy@cursor-vs-flip-atomic,Fail +kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_cursor_legacy@cursor-vs-flip-toggle,Fail +kms_cursor_legacy@flip-vs-cursor-atomic,Fail +kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail +kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-legacy,Fail +kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail +kms_flip@basic-flip-vs-wf_vblank,Fail +kms_flip@blocking-wf_vblank,Fail +kms_flip@dpms-vs-vblank-race,Fail +kms_flip@flip-vs-absolute-wf_vblank,Fail +kms_flip@flip-vs-blocking-wf-vblank,Fail +kms_flip@flip-vs-modeset-vs-hang,Fail +kms_flip@flip-vs-panning,Fail +kms_flip@flip-vs-panning-interruptible,Fail +kms_flip@flip-vs-panning-vs-hang,Fail +kms_flip@modeset-vs-vblank-race,Fail +kms_flip@modeset-vs-vblank-race-interruptible,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip@plain-flip-fb-recreate-interruptible,Fail +kms_flip@plain-flip-ts-check,Fail +kms_flip@plain-flip-ts-check-interruptible,Fail +kms_flip@wf_vblank-ts-check,Fail +kms_flip@wf_vblank-ts-check-interruptible,Fail +kms_invalid_mode@int-max-clock,Fail +kms_lease@lease-uevent,Fail +kms_lease@page-flip-implicit-plane,Fail +kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail +kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail +kms_pipe_crc_basic@disable-crc-after-crtc,Fail +kms_pipe_crc_basic@nonblocking-crc,Fail +kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail +kms_pipe_crc_basic@read-crc,Fail +kms_pipe_crc_basic@read-crc-frame-sequence,Fail +kms_plane@pixel-format,Crash +kms_plane@pixel-format-source-clamping,Crash +kms_plane@plane-panning-bottom-right,Fail +kms_plane@plane-panning-top-left,Fail +kms_plane@plane-position-covered,Fail +kms_plane@plane-position-hole,Fail +kms_plane@plane-position-hole-dpms,Fail +kms_plane_cursor@primary,Fail +kms_plane_multiple@tiling-none,Fail +kms_rmfb@close-fd,Fail +kms_universal_plane@universal-plane-functional,Fail diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt index 742c27d9a598ee..d98f6a17343cbd 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt @@ -1,6 +1,48 @@ # Board Name: rk3399-gru-kevin -# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u -# IGT Version: 1.28-g0df7b9b97 -# Linux Version: 6.9.0-rc7 +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u # Failure Rate: 50 -panfrost_submit@pan-unhandled-pagefault +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_bw@linear-tiling-1-displays-2560x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_flip@dpms-vs-vblank-race-interruptible + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_flip@flip-vs-absolute-wf_vblank-interruptible + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_flip@flip-vs-wf_vblank-interruptible + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_setmode@basic + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_bw@connected-linear-tiling-1-displays-2560x1440p diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt index 5c52b25b421396..eb16b29dee48e7 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt @@ -1,13 +1,11 @@ # Suspend to RAM seems to be broken on this machine .*suspend.* -# Too unstable, machine ends up hanging after lots of Oopses -kms_cursor_legacy.* - # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -15,9 +13,7 @@ nouveau_.* # Skip intel specific tests gem_.* i915_.* - -# Panfrost is not a KMS driver, so skip the KMS tests -kms_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt index fdf09fe1156607..c72fee70e739ff 100644 --- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt @@ -3,6 +3,70 @@ kms_addfb_basic@bo-too-small,Fail kms_addfb_basic@size-max,Fail kms_addfb_basic@too-high,Fail kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail +kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-10-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-10-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-10-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-10-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-11-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-11-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-11-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-11-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-12-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-12-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-12-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-12-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-13-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-13-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-13-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-13-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-14-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-14-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-14-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-14-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-15-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-15-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-15-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-15-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-16-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-16-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-16-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-16-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-3-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-3-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-3-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-3-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-4-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-4-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-4-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-4-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-5-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-5-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-5-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-5-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-6-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-6-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-6-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-6-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-7-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-7-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-7-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-7-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-8-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-8-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-8-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-8-displays-3840x2160p,Fail +kms_bw@connected-linear-tiling-9-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-9-displays-2160x1440p,Fail +kms_bw@connected-linear-tiling-9-displays-2560x1440p,Fail +kms_bw@connected-linear-tiling-9-displays-3840x2160p,Fail kms_bw@linear-tiling-1-displays-1920x1080p,Fail kms_bw@linear-tiling-1-displays-2160x1440p,Fail kms_bw@linear-tiling-1-displays-2560x1440p,Fail @@ -123,4 +187,3 @@ kms_vblank@wait-forked,Fail kms_vblank@wait-forked-busy,Fail kms_vblank@wait-idle,Fail perf@i915-ref-count,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt index e0ca4fadb84f24..9c9e048725f8be 100644 --- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt @@ -7,9 +7,9 @@ kms_flip@flip-vs-suspend.* # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -18,6 +18,7 @@ panfrost_.* gem_.* i915_.* xe_.* +tools_test.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt index 691c383b21a05a..5408110f4c6095 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt @@ -41,12 +41,8 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail kms_cursor_legacy@flip-vs-cursor-legacy,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail -kms_flip@flip-vs-suspend,Timeout -kms_flip@flip-vs-suspend-interruptible,Timeout -kms_flip@plain-flip-fb-recreate,Fail kms_lease@lease-uevent,Fail kms_pipe_crc_basic@nonblocking-crc,Fail -kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail kms_writeback@writeback-check-output,Fail kms_writeback@writeback-check-output-XRGB2101010,Fail kms_writeback@writeback-fb-id,Fail @@ -54,4 +50,3 @@ kms_writeback@writeback-fb-id-XRGB2101010,Fail kms_writeback@writeback-invalid-parameters,Fail kms_writeback@writeback-pixel-formats,Fail perf@i915-ref-count,Fail -tools_test@tools_test,Fail diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt index eeaa1d5825afef..62428f3c8f3164 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt @@ -67,3 +67,24 @@ kms_flip@flip-vs-absolute-wf_vblank-interruptible # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 kms_flip@flip-vs-blocking-wf-vblank + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_cursor_legacy@flip-vs-cursor-varying-size + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_flip@flip-vs-expired-vblank + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u +# Failure Rate: 50 +# IGT Version: 1.28-gf13702b8e +# Linux Version: 6.10.0-rc5 +kms_pipe_crc_basic@nonblocking-crc-frame-sequence diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt index fd5d1271115fea..5ccc771fbb36b9 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt @@ -104,11 +104,112 @@ kms_cursor_crc@cursor-rapid-movement-256x85 # CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 # CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0 +kms_cursor_crc@cursor-onscreen-256x256 +# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI +# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1 +# Hardware name: ChromiumOS crosvm, BIOS 0 +# Workqueue: vkms_composer vkms_composer_worker [vkms] +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000 +# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000 +# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff +# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30 +# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0 +# Call Trace: +# +# ? __die+0x1e/0x60 +# ? page_fault_oops+0x17b/0x4a0 +# ? exc_page_fault+0x6d/0x230 +# ? asm_exc_page_fault+0x26/0x30 +# ? compose_active_planes+0x344/0x4e0 [vkms] +# ? compose_active_planes+0x32f/0x4e0 [vkms] +# ? srso_return_thunk+0x5/0x5f +# vkms_composer_worker+0x205/0x240 [vkms] +# process_one_work+0x201/0x6c0 +# ? lock_is_held_type+0x9e/0x110 +# worker_thread+0x17e/0x350 +# ? __pfx_worker_thread+0x10/0x10 +# kthread+0xce/0x100 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork+0x2f/0x50 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork_asm+0x1a/0x30 +# +# Modules linked in: vkms +# CR2: 0000000000000018 +# ---[ end trace 0000000000000000 ]--- +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000 +# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000 +# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff +# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30 +# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0 + +kms_cursor_edge_walk@128x128-right-edge +# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI +# CPU: 0 PID: 1911 Comm: kworker/u8:3 Not tainted 6.10.0-rc5-g5e7a002eefe5 #1 +# Hardware name: ChromiumOS crosvm, BIOS 0 +# Workqueue: vkms_composer vkms_composer_worker [vkms] +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000 +# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000 +# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff +# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810 +# R13: 0000000000000031 R14: 0000000000000031 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffffa2c1abc00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000106768000 CR4: 0000000000350ef0 +# Call Trace: +# +# ? __die+0x1e/0x60 +# ? page_fault_oops+0x17b/0x4a0 +# ? srso_return_thunk+0x5/0x5f +# ? mark_held_locks+0x49/0x80 +# ? exc_page_fault+0x6d/0x230 +# ? asm_exc_page_fault+0x26/0x30 +# ? compose_active_planes+0x344/0x4e0 [vkms] +# ? compose_active_planes+0x32f/0x4e0 [vkms] +# ? srso_return_thunk+0x5/0x5f +# vkms_composer_worker+0x205/0x240 [vkms] +# process_one_work+0x201/0x6c0 +# ? lock_is_held_type+0x9e/0x110 +# worker_thread+0x17e/0x350 +# ? __pfx_worker_thread+0x10/0x10 +# kthread+0xce/0x100 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork+0x2f/0x50 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork_asm+0x1a/0x30 +# +# Modules linked in: vkms +# CR2: 0000000000000018 +# ---[ end trace 0000000000000000 ]--- +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000 +# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000 +# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff +# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810 +# R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000 + # Skip driver specific tests ^amdgpu.* -msm_.* +^msm.* nouveau_.* -panfrost_.* +^panfrost.* ^v3d.* ^vc4.* ^vmwgfx* @@ -117,3 +218,4 @@ panfrost_.* gem_.* i915_.* xe_.* +tools_test.* diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index d4c34f36414006..6ee51003de3ce6 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2328,6 +2328,31 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) #undef DEVICE_ID_ANY #undef DEVICE_ID +static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset, + struct drm_dp_dpcd_ident *ident) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); + + return ret < 0 ? ret : 0; +} + +static void drm_dp_dump_desc(struct drm_dp_aux *aux, + const char *device_name, const struct drm_dp_desc *desc) +{ + const struct drm_dp_dpcd_ident *ident = &desc->ident; + + drm_dbg_kms(aux->drm_dev, + "%s: %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", + aux->name, device_name, + (int)sizeof(ident->oui), ident->oui, + (int)strnlen(ident->device_id, sizeof(ident->device_id)), ident->device_id, + ident->hw_rev >> 4, ident->hw_rev & 0xf, + ident->sw_major_rev, ident->sw_minor_rev, + desc->quirks); +} + /** * drm_dp_read_desc - read sink/branch descriptor from DPCD * @aux: DisplayPort AUX channel @@ -2344,27 +2369,48 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, { struct drm_dp_dpcd_ident *ident = &desc->ident; unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; - int ret, dev_id_len; + int ret; - ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); + ret = drm_dp_read_ident(aux, offset, ident); if (ret < 0) return ret; desc->quirks = drm_dp_get_quirks(ident, is_branch); - dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); - - drm_dbg_kms(aux->drm_dev, - "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", - aux->name, is_branch ? "branch" : "sink", - (int)sizeof(ident->oui), ident->oui, dev_id_len, - ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf, - ident->sw_major_rev, ident->sw_minor_rev, desc->quirks); + drm_dp_dump_desc(aux, is_branch ? "DP branch" : "DP sink", desc); return 0; } EXPORT_SYMBOL(drm_dp_read_desc); +/** + * drm_dp_dump_lttpr_desc - read and dump the DPCD descriptor for an LTTPR PHY + * @aux: DisplayPort AUX channel + * @dp_phy: LTTPR PHY instance + * + * Read the DPCD LTTPR PHY descriptor for @dp_phy and print a debug message + * with its details to dmesg. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy) +{ + struct drm_dp_desc desc = {}; + int ret; + + if (drm_WARN_ON(aux->drm_dev, dp_phy < DP_PHY_LTTPR1 || dp_phy > DP_MAX_LTTPR_COUNT)) + return -EINVAL; + + ret = drm_dp_read_ident(aux, DP_OUI_PHY_REPEATER(dp_phy), &desc.ident); + if (ret < 0) + return ret; + + drm_dp_dump_desc(aux, drm_dp_phy_name(dp_phy), &desc); + + return 0; +} +EXPORT_SYMBOL(drm_dp_dump_lttpr_desc); + /** * drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment * @dsc_dpcd: DSC capabilities from DPCD diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index fc2ceae61db2d0..ac90118b9e7a81 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -89,7 +89,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, - u8 *guid); + guid_t *guid); static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port); static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port); @@ -801,7 +801,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_ int idx = 1; int i; - memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); + import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]); idx += 16; repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; idx++; @@ -829,7 +829,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_ idx++; if (idx > raw->curlen) goto fail_len; - memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); + import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -1029,7 +1029,7 @@ static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mg msg->req_type = (raw->msg[0] & 0x7f); if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { - memcpy(msg->u.nak.guid, &raw->msg[1], 16); + import_guid(&msg->u.nak.guid, &raw->msg[1]); msg->u.nak.reason = raw->msg[17]; msg->u.nak.nak_data = raw->msg[18]; return false; @@ -1078,7 +1078,7 @@ drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_ if (idx > raw->curlen) goto fail_len; - memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); + import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -1107,7 +1107,7 @@ static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst if (idx > raw->curlen) goto fail_len; - memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); + import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -2174,20 +2174,24 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, offset, size, buffer); } -static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) +static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid) { int ret = 0; - memcpy(mstb->guid, guid, 16); + guid_copy(&mstb->guid, guid); + + if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) { + u8 buf[UUID_SIZE]; + + export_guid(buf, &mstb->guid); - if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { if (mstb->port_parent) { ret = drm_dp_send_dpcd_write(mstb->mgr, mstb->port_parent, - DP_GUID, 16, mstb->guid); + DP_GUID, sizeof(buf), buf); } else { ret = drm_dp_dpcd_write(mstb->mgr->aux, - DP_GUID, mstb->guid, 16); + DP_GUID, buf, sizeof(buf)); } } @@ -2339,7 +2343,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps = 0, ret; + int ret; u8 new_pdt = DP_PEER_DEVICE_NONE; bool new_mcs = 0; bool created = false, send_link_addr = false, changed = false; @@ -2372,7 +2376,6 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, */ drm_modeset_lock(&mgr->base.lock, NULL); - old_ddps = port->ddps; changed = port->ddps != port_msg->ddps || (port->ddps && (port->ldps != port_msg->legacy_device_plug_status || @@ -2407,15 +2410,13 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, * Reprobe PBN caps on both hotplug, and when re-probing the link * for our parent mstb */ - if (old_ddps != port->ddps || !created) { - if (port->ddps && !port->input) { - ret = drm_dp_send_enum_path_resources(mgr, mstb, - port); - if (ret == 1) - changed = true; - } else { - port->full_pbn = 0; - } + if (port->ddps && !port->input) { + ret = drm_dp_send_enum_path_resources(mgr, mstb, + port); + if (ret == 1) + changed = true; + } else { + port->full_pbn = 0; } ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); @@ -2570,9 +2571,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ return mstb; } -static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( - struct drm_dp_mst_branch *mstb, - const uint8_t *guid) +static struct drm_dp_mst_branch * +get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb, + const guid_t *guid) { struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; @@ -2580,10 +2581,9 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( if (!mstb) return NULL; - if (memcmp(mstb->guid, guid, 16) == 0) + if (guid_equal(&mstb->guid, guid)) return mstb; - list_for_each_entry(port, &mstb->ports, next) { found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); @@ -2596,7 +2596,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( static struct drm_dp_mst_branch * drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, - const uint8_t *guid) + const guid_t *guid) { struct drm_dp_mst_branch *mstb; int ret; @@ -2692,18 +2692,18 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, - u8 *guid) +static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr) { - u64 salt; + queue_work(system_long_wq, &mgr->work); +} - if (memchr_inv(guid, 0, 16)) +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + guid_t *guid) +{ + if (!guid_is_null(guid)) return true; - salt = get_jiffies_64(); - - memcpy(&guid[0], &salt, sizeof(u64)); - memcpy(&guid[8], &salt, sizeof(u64)); + guid_gen(guid); return false; } @@ -2943,7 +2943,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); drm_dp_dump_link_address(mgr, reply); - ret = drm_dp_check_mstb_guid(mstb, reply->guid); + ret = drm_dp_check_mstb_guid(mstb, &reply->guid); if (ret) { char buf[64]; @@ -3685,7 +3685,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* Write reset payload */ drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f); - queue_work(system_long_wq, &mgr->work); + drm_dp_mst_queue_probe_work(mgr); ret = 0; } else { @@ -3723,6 +3723,33 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); } +/** + * drm_dp_mst_topology_queue_probe - Queue a topology probe + * @mgr: manager to probe + * + * Queue a work to probe the MST topology. Driver's should call this only to + * sync the topology's HW->SW state after the MST link's parameters have + * changed in a way the state could've become out-of-sync. This is the case + * for instance when the link rate between the source and first downstream + * branch device has switched between UHBR and non-UHBR rates. Except of those + * cases - for instance when a sink gets plugged/unplugged to a port - the SW + * state will get updated automatically via MST UP message notifications. + */ +void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + + if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary)) + goto out_unlock; + + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + drm_dp_mst_queue_probe_work(mgr); + +out_unlock: + mutex_unlock(&mgr->lock); +} +EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe); + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -3770,8 +3797,9 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, bool sync) { + u8 buf[UUID_SIZE]; + guid_t guid; int ret; - u8 guid[16]; mutex_lock(&mgr->lock); if (!mgr->mst_primary) @@ -3792,13 +3820,15 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (ret != 16) { + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret != sizeof(buf)) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } - ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); + import_guid(&guid, buf); + + ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid); if (ret) { drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); goto out_fail; @@ -3809,7 +3839,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, * state of our in-memory topology back into sync with reality. So, * restart the probing process as if we're probing a new hub */ - queue_work(system_long_wq, &mgr->work); + drm_dp_mst_queue_probe_work(mgr); mutex_unlock(&mgr->lock); if (sync) { @@ -3976,12 +4006,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, bool hotplug = false, dowork = false; if (hdr->broadcast) { - const u8 *guid = NULL; + const guid_t *guid = NULL; if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg->u.conn_stat.guid; + guid = &msg->u.conn_stat.guid; else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg->u.resource_stat.guid; + guid = &msg->u.resource_stat.guid; if (guid) mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); @@ -4963,7 +4993,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, "branch oui: %*phN devid: ", 3, buf); for (i = 0x3; i < 0x8 && buf[i]; i++) - seq_printf(m, "%c", buf[i]); + seq_putc(m, buf[i]); seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); if (dump_dp_payload_table(mgr, buf)) @@ -5569,7 +5599,6 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr); * drm_dp_atomic_release_time_slots() * * Returns: - * * 0 if the new state is valid, negative error code otherwise. */ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) @@ -5606,7 +5635,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); * topology object. * * RETURNS: - * * The MST topology state or error pointer. */ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, @@ -5626,7 +5654,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); * topology object. * * Returns: - * * The old MST topology state, or NULL if there's no topology state for this MST mgr * in the global atomic state */ @@ -5651,7 +5678,6 @@ EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state); * topology object. * * Returns: - * * The new MST topology state, or NULL if there's no topology state for this MST mgr * in the global atomic state */ @@ -6057,6 +6083,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) struct drm_dp_aux *immediate_upstream_aux; struct drm_dp_mst_port *fec_port; struct drm_dp_desc desc = {}; + u8 upstream_dsc; u8 endpoint_fec; u8 endpoint_dsc; @@ -6083,8 +6110,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) /* DP-to-DP peer device */ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { - u8 upstream_dsc; - if (drm_dp_dpcd_read(&port->aux, DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) return NULL; @@ -6130,6 +6155,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; + if (drm_dp_dpcd_read(immediate_upstream_aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + return NULL; + + if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) + return NULL; + if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) return NULL; diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index 7854820089ec6e..feb7a3a759811a 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -521,8 +521,6 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector, } EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check); -#define HDMI_MAX_INFOFRAME_SIZE 29 - static int clear_device_infoframe(struct drm_connector *connector, enum hdmi_infoframe_type type) { @@ -563,7 +561,7 @@ static int write_device_infoframe(struct drm_connector *connector, { const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs; struct drm_device *dev = connector->dev; - u8 buffer[HDMI_MAX_INFOFRAME_SIZE]; + u8 buffer[HDMI_INFOFRAME_SIZE(MAX)]; int ret; int len; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6e516c39a372fb..0fc99da93afe1d 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -63,7 +63,6 @@ EXPORT_SYMBOL(__drm_crtc_commit_free); * hardware and flipped to. * * Returns: - * * 0 on success, a negative error code otherwise. */ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) @@ -337,7 +336,6 @@ EXPORT_SYMBOL(__drm_atomic_state_free); * not created by userspace through an IOCTL call. * * Returns: - * * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. @@ -518,7 +516,6 @@ static int drm_atomic_connector_check(struct drm_connector *connector, * is consistent. * * Returns: - * * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. @@ -828,7 +825,6 @@ EXPORT_SYMBOL(drm_atomic_private_obj_fini); * object lock to make sure that the state is consistent. * * RETURNS: - * * Either the allocated state or the error code encoded into a pointer. */ struct drm_private_state * @@ -1061,7 +1057,6 @@ EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder); * make sure that the state is consistent. * * Returns: - * * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. @@ -1169,7 +1164,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, * state is consistent. * * Returns: - * * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index fb97b51b38f153..43cdf39019a445 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2266,7 +2266,6 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc) * automatically. * * Returns: - * * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, * -ENOMEM on allocation failures and -EINTR when a signal is pending. */ @@ -3009,7 +3008,6 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); * don't pass the right state structures to the callbacks. * * Returns: - * * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the * waiting for the previous commits has been interrupted. */ diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 7936c202395518..370dc676e3aa54 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -543,7 +543,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, &state->fb_damage_clips, val, -1, - sizeof(struct drm_rect), + sizeof(struct drm_mode_rect), &replaced); return ret; } else if (property == plane->scaling_filter_property) { diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d44f055dbe3e78..c6af46dd02bfa9 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -353,8 +353,13 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, bridge->encoder = NULL; list_del(&bridge->chain_node); - DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", - bridge->of_node, encoder->name, ret); + if (ret != -EPROBE_DEFER) + DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", + bridge->of_node, encoder->name, ret); + else + dev_err_probe(encoder->dev->dev, -EPROBE_DEFER, + "failed to attach bridge %pOF to encoder %s\n", + bridge->of_node, encoder->name); return ret; } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index ab6ab7ff7ea8e9..fc35f47e2849ed 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -426,6 +426,8 @@ static void drm_connector_cleanup_action(struct drm_device *dev, * * The connector structure should be allocated with drmm_kzalloc(). * + * The @drm_connector_funcs.destroy hook must be NULL. + * * Returns: * Zero on success, error code on failure. */ @@ -474,6 +476,8 @@ EXPORT_SYMBOL(drmm_connector_init); * * The connector structure should be allocated with drmm_kzalloc(). * + * The @drm_connector_funcs.destroy hook must be NULL. + * * Returns: * Zero on success, error code on failure. */ @@ -2315,24 +2319,71 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); * DOC: standard connector properties * * Colorspace: - * This property helps select a suitable colorspace based on the sink - * capability. Modern sink devices support wider gamut like BT2020. - * This helps switch to BT2020 mode if the BT2020 encoded video stream - * is being played by the user, same for any other colorspace. Thereby - * giving a good visual experience to users. - * - * The expectation from userspace is that it should parse the EDID - * and get supported colorspaces. Use this property and switch to the - * one supported. Sink supported colorspaces should be retrieved by - * userspace from EDID and driver will not explicitly expose them. - * - * Basically the expectation from userspace is: - * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink - * colorspace - * - Set this new property to let the sink know what it - * converted the CRTC output to. - * - This property is just to inform sink what colorspace - * source is trying to drive. + * This property is used to inform the driver about the color encoding + * user space configured the pixel operation properties to produce. + * The variants set the colorimetry, transfer characteristics, and which + * YCbCr conversion should be used when necessary. + * The transfer characteristics from HDR_OUTPUT_METADATA takes precedence + * over this property. + * User space always configures the pixel operation properties to produce + * full quantization range data (see the Broadcast RGB property). + * + * Drivers inform the sink about what colorimetry, transfer + * characteristics, YCbCr conversion, and quantization range to expect + * (this can depend on the output mode, output format and other + * properties). Drivers also convert the user space provided data to what + * the sink expects. + * + * User space has to check if the sink supports all of the possible + * colorimetries that the driver is allowed to pick by parsing the EDID. + * + * For historical reasons this property exposes a number of variants which + * result in undefined behavior. + * + * Default: + * The behavior is driver-specific. + * + * BT2020_RGB: + * + * BT2020_YCC: + * User space configures the pixel operation properties to produce + * RGB content with Rec. ITU-R BT.2020 colorimetry, Rec. + * ITU-R BT.2020 (Table 4, RGB) transfer characteristics and full + * quantization range. + * User space can use the HDR_OUTPUT_METADATA property to set the + * transfer characteristics to PQ (Rec. ITU-R BT.2100 Table 4) or + * HLG (Rec. ITU-R BT.2100 Table 5) in which case, user space + * configures pixel operation properties to produce content with + * the respective transfer characteristics. + * User space has to make sure the sink supports Rec. + * ITU-R BT.2020 R'G'B' and Rec. ITU-R BT.2020 Y'C'BC'R + * colorimetry. + * Drivers can configure the sink to use an RGB format, tell the + * sink to expect Rec. ITU-R BT.2020 R'G'B' colorimetry and convert + * to the appropriate quantization range. + * Drivers can configure the sink to use a YCbCr format, tell the + * sink to expect Rec. ITU-R BT.2020 Y'C'BC'R colorimetry, convert + * to YCbCr using the Rec. ITU-R BT.2020 non-constant luminance + * conversion matrix and convert to the appropriate quantization + * range. + * The variants BT2020_RGB and BT2020_YCC are equivalent and the + * driver chooses between RGB and YCbCr on its own. + * + * SMPTE_170M_YCC: + * BT709_YCC: + * XVYCC_601: + * XVYCC_709: + * SYCC_601: + * opYCC_601: + * opRGB: + * BT2020_CYCC: + * DCI-P3_RGB_D65: + * DCI-P3_RGB_Theater: + * RGB_WIDE_FIXED: + * RGB_WIDE_FLOAT: + * + * BT601_YCC: + * The behavior is undefined. * * Because between HDMI and DP have different colorspaces, * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 1f73b8d6d75057..89706aa8232fc0 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -315,4 +315,19 @@ drm_edid_load_firmware(struct drm_connector *connector) } #endif +/* drm_panic.c */ +#ifdef CONFIG_DRM_PANIC +bool drm_panic_is_enabled(struct drm_device *dev); +void drm_panic_register(struct drm_device *dev); +void drm_panic_unregister(struct drm_device *dev); +void drm_panic_init(void); +void drm_panic_exit(void); +#else +static inline bool drm_panic_is_enabled(struct drm_device *dev) { return false; } +static inline void drm_panic_register(struct drm_device *dev) {} +static inline void drm_panic_unregister(struct drm_device *dev) {} +static inline void drm_panic_init(void) {} +static inline void drm_panic_exit(void) {} +#endif + #endif /* __DRM_CRTC_INTERNAL_H__ */ diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6b239a24f1dff3..9d3e6dd68810e3 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -520,8 +520,6 @@ static const struct file_operations drm_connector_fops = { .write = connector_write }; -#define HDMI_MAX_INFOFRAME_SIZE 29 - static ssize_t audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { @@ -579,7 +577,7 @@ static ssize_t _f##_read_infoframe(struct file *filp, \ struct drm_connector *connector; \ union hdmi_infoframe *frame; \ struct drm_device *dev; \ - u8 buf[HDMI_MAX_INFOFRAME_SIZE]; \ + u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \ ssize_t len = 0; \ \ connector = filp->private_data; \ diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index 9d01d762801ff2..b4fd43783c5096 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -33,9 +33,6 @@ validate_displayid(const u8 *displayid, int length, int idx) if (IS_ERR(base)) return base; - DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", - base->rev, base->bytes, base->prod_id, base->ext_count); - /* +1 for DispID checksum */ dispid_length = sizeof(*base) + base->bytes + 1; if (dispid_length > length - idx) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 93543071a5008c..ac30b0ec9d9312 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); MODULE_LICENSE("GPL and additional rights"); -static DEFINE_SPINLOCK(drm_minor_lock); -static struct idr drm_minors_idr; +DEFINE_XARRAY_ALLOC(drm_minors_xa); /* * If the drm core fails to init for whatever reason, @@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu); * registered and unregistered dynamically according to device-state. */ +static struct xarray *drm_minor_get_xa(enum drm_minor_type type) +{ + if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER) + return &drm_minors_xa; +#if IS_ENABLED(CONFIG_DRM_ACCEL) + else if (type == DRM_MINOR_ACCEL) + return &accel_minors_xa; +#endif + else + return ERR_PTR(-EOPNOTSUPP); +} + static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, enum drm_minor_type type) { @@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, static void drm_minor_alloc_release(struct drm_device *dev, void *data) { struct drm_minor *minor = data; - unsigned long flags; WARN_ON(dev != minor->dev); put_device(minor->kdev); - if (minor->type == DRM_MINOR_ACCEL) { - accel_minor_remove(minor->index); - } else { - spin_lock_irqsave(&drm_minor_lock, flags); - idr_remove(&drm_minors_idr, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); - } + xa_erase(drm_minor_get_xa(minor->type), minor->index); } +/* + * DRM used to support 64 devices, for backwards compatibility we need to maintain the + * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes, + * and 128-191 are render nodes. + * After reaching the limit, we're allocating minors dynamically - first-come, first-serve. + * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX + * range. + */ +#define DRM_MINOR_LIMIT(t) ({ \ + typeof(t) _t = (t); \ + _t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \ +}) +#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1) + static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) { struct drm_minor *minor; - unsigned long flags; int r; minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); @@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) minor->type = type; minor->dev = dev; - idr_preload(GFP_KERNEL); - if (type == DRM_MINOR_ACCEL) { - r = accel_minor_alloc(); - } else { - spin_lock_irqsave(&drm_minor_lock, flags); - r = idr_alloc(&drm_minors_idr, - NULL, - 64 * type, - 64 * (type + 1), - GFP_NOWAIT); - spin_unlock_irqrestore(&drm_minor_lock, flags); - } - idr_preload_end(); - + r = xa_alloc(drm_minor_get_xa(type), &minor->index, + NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL); + if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)) + r = xa_alloc(&drm_minors_xa, &minor->index, + NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL); if (r < 0) return r; - minor->index = r; - r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); if (r) return r; @@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) { struct drm_minor *minor; - unsigned long flags; + void *entry; int ret; DRM_DEBUG("\n"); @@ -186,13 +193,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) goto err_debugfs; /* replace NULL with @minor so lookups will succeed from now on */ - if (minor->type == DRM_MINOR_ACCEL) { - accel_minor_replace(minor, minor->index); - } else { - spin_lock_irqsave(&drm_minor_lock, flags); - idr_replace(&drm_minors_idr, minor, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); + entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL); + if (xa_is_err(entry)) { + ret = xa_err(entry); + goto err_debugfs; } + WARN_ON(entry); DRM_DEBUG("new minor registered %d\n", minor->index); return 0; @@ -205,20 +211,13 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type) { struct drm_minor *minor; - unsigned long flags; minor = *drm_minor_get_slot(dev, type); if (!minor || !device_is_registered(minor->kdev)) return; /* replace @minor with NULL so lookups will fail from now on */ - if (minor->type == DRM_MINOR_ACCEL) { - accel_minor_replace(NULL, minor->index); - } else { - spin_lock_irqsave(&drm_minor_lock, flags); - idr_replace(&drm_minors_idr, NULL, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); - } + xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL); device_del(minor->kdev); dev_set_drvdata(minor->kdev, NULL); /* safety belt */ @@ -234,16 +233,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ * minor->dev pointer will stay valid! However, the device may get unplugged and * unregistered while you hold the minor. */ -struct drm_minor *drm_minor_acquire(unsigned int minor_id) +struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id) { struct drm_minor *minor; - unsigned long flags; - spin_lock_irqsave(&drm_minor_lock, flags); - minor = idr_find(&drm_minors_idr, minor_id); + xa_lock(minor_xa); + minor = xa_load(minor_xa, minor_id); if (minor) drm_dev_get(minor->dev); - spin_unlock_irqrestore(&drm_minor_lock, flags); + xa_unlock(minor_xa); if (!minor) { return ERR_PTR(-ENODEV); @@ -1036,7 +1034,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp) DRM_DEBUG("\n"); - minor = drm_minor_acquire(iminor(inode)); + minor = drm_minor_acquire(&drm_minors_xa, iminor(inode)); if (IS_ERR(minor)) return PTR_ERR(minor); @@ -1067,11 +1065,12 @@ static const struct file_operations drm_stub_fops = { static void drm_core_exit(void) { drm_privacy_screen_lookup_exit(); + drm_panic_exit(); accel_core_exit(); unregister_chrdev(DRM_MAJOR, "drm"); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); - idr_destroy(&drm_minors_idr); + WARN_ON(!xa_empty(&drm_minors_xa)); drm_connector_ida_destroy(); } @@ -1080,7 +1079,6 @@ static int __init drm_core_init(void) int ret; drm_connector_ida_init(); - idr_init(&drm_minors_idr); drm_memcpy_init_early(); ret = drm_sysfs_init(); @@ -1099,6 +1097,8 @@ static int __init drm_core_init(void) if (ret < 0) goto error; + drm_panic_init(); + drm_privacy_screen_lookup_init(); drm_core_init_complete = true; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f68a41eeb1fa80..855beafb76ffbe 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1817,7 +1817,7 @@ static int edid_block_tag(const void *_block) static bool edid_block_is_zero(const void *edid) { - return !memchr_inv(edid, 0, EDID_LENGTH); + return mem_is_zero(edid, EDID_LENGTH); } static bool drm_edid_eq(const struct drm_edid *drm_edid, @@ -1966,22 +1966,14 @@ static void edid_block_dump(const char *level, const void *block, int block_num) block, EDID_LENGTH, false); } -/** - * drm_edid_block_valid - Sanity check the EDID block (base or extension) - * @_block: pointer to raw EDID block - * @block_num: type of block to validate (0 for base, extension otherwise) - * @print_bad_edid: if true, dump bad EDID blocks to the console - * @edid_corrupt: if true, the header or checksum is invalid - * +/* * Validate a base or extension EDID block and optionally dump bad blocks to * the console. - * - * Return: True if the block is valid, false otherwise. */ -bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, - bool *edid_corrupt) +static bool drm_edid_block_valid(void *_block, int block_num, bool print_bad_edid, + bool *edid_corrupt) { - struct edid *block = (struct edid *)_block; + struct edid *block = _block; enum edid_block_status status; bool is_base_block = block_num == 0; bool valid; @@ -2024,7 +2016,6 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid, return valid; } -EXPORT_SYMBOL(drm_edid_block_valid); /** * drm_edid_is_valid - sanity check EDID data @@ -6629,6 +6620,11 @@ static void update_displayid_info(struct drm_connector *connector, displayid_iter_edid_begin(drm_edid, &iter); displayid_iter_for_each(block, &iter) { + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] DisplayID extension version 0x%02x, primary use 0x%02x\n", + connector->base.id, connector->name, + displayid_version(&iter), + displayid_primary_use(&iter)); if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 && (displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR || displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR)) diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c index 2da094bdf8a4d2..18e366cc4993b7 100644 --- a/drivers/gpu/drm/drm_exec.c +++ b/drivers/gpu/drm/drm_exec.c @@ -145,8 +145,7 @@ static int drm_exec_obj_locked(struct drm_exec *exec, size_t size = exec->max_objects * sizeof(void *); void *tmp; - tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE, - GFP_KERNEL); + tmp = kvrealloc(exec->objects, size + PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 56ac37ea2f2748..29c53f9f449ca8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -44,6 +44,7 @@ #include #include "drm_internal.h" +#include "drm_crtc_internal.h" static bool drm_fbdev_emulation = true; module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600); @@ -88,14 +89,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * interfaces. Drivers that use one of the shared memory managers, TTM, SHMEM, * DMA, should instead use the corresponding fbdev emulation. * - * Existing fbdev implementations should restore the fbdev console by using - * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. - * They should also notify the fb helper code from updates to the output - * configuration by using drm_fb_helper_output_poll_changed() as their - * &drm_mode_config_funcs.output_poll_changed callback. New implementations - * of fbdev should be build on top of struct &drm_client_funcs, which handles - * this automatically. Setting the old callbacks should be avoided. - * * For suspend/resume consider using drm_mode_config_helper_suspend() and * drm_mode_config_helper_resume() which takes care of fbdev as well. * @@ -259,12 +252,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: driver-allocated fbdev helper, can be NULL * - * This should be called from driver's drm &drm_driver.lastclose callback - * when implementing an fbcon on top of kms using this helper. This ensures that - * the user isn't greeted with a black screen when e.g. X dies. + * This helper should be called from fbdev emulation's &drm_client_funcs.restore + * callback. It ensures that the user isn't greeted with a black screen when the + * userspace compositor releases the display device. * - * RETURNS: - * Zero if everything went ok, negative error code otherwise. + * Returns: + * 0 on success, or a negative errno code otherwise. */ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) { @@ -527,6 +520,7 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) fb_helper->info = info; info->skip_vt_switch = true; + info->skip_panic = drm_panic_is_enabled(fb_helper->dev); return info; err_release: @@ -2001,26 +1995,11 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event); * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation * @dev: DRM device * - * This function can be used as the &drm_driver->lastclose callback for drivers - * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked(). + * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked() + * instead. */ void drm_fb_helper_lastclose(struct drm_device *dev) { drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); } EXPORT_SYMBOL(drm_fb_helper_lastclose); - -/** - * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed - * helper for fbdev emulation - * @dev: DRM device - * - * This function can be used as the - * &drm_mode_config_funcs.output_poll_changed callback for drivers that only - * need to call drm_fbdev.hotplug_event(). - */ -void drm_fb_helper_output_poll_changed(struct drm_device *dev) -{ - drm_fb_helper_hotplug_event(dev->fb_helper); -} -EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 714e42b0510800..ad1dc638c83bb1 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -62,15 +63,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) if (dev->driver->load || dev->driver->unload) return true; - /* - * Drivers with the lastclose callback assume that it's synchronized - * against concurrent opens, which again needs the BKL. The proper fix - * is to use the drm_client infrastructure with proper locking for each - * client. - */ - if (dev->driver->lastclose) - return true; - return false; } @@ -111,7 +103,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n * .poll = drm_poll, * .read = drm_read, - * .llseek = no_llseek, * .mmap = drm_gem_mmap, * }; * @@ -318,6 +309,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) return -EINVAL; + if (WARN_ON_ONCE(!(filp->f_op->fop_flags & FOP_UNSIGNED_OFFSET))) + return -EINVAL; drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n", current->comm, task_pid_nr(current), minor->index); @@ -335,7 +328,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) } filp->private_data = priv; - filp->f_mode |= FMODE_UNSIGNED_OFFSET; priv->filp = filp; mutex_lock(&dev->filelist_mutex); @@ -355,7 +347,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) * resources for it. It also calls the &drm_driver.open driver callback. * * RETURNS: - * * 0 on success or negative errno value on failure. */ int drm_open(struct inode *inode, struct file *filp) @@ -364,7 +355,7 @@ int drm_open(struct inode *inode, struct file *filp) struct drm_minor *minor; int retcode; - minor = drm_minor_acquire(iminor(inode)); + minor = drm_minor_acquire(&drm_minors_xa, iminor(inode)); if (IS_ERR(minor)) return PTR_ERR(minor); @@ -395,15 +386,12 @@ int drm_open(struct inode *inode, struct file *filp) } EXPORT_SYMBOL(drm_open); -void drm_lastclose(struct drm_device * dev) +static void drm_lastclose(struct drm_device *dev) { - drm_dbg_core(dev, "\n"); - - if (dev->driver->lastclose) - dev->driver->lastclose(dev); - drm_dbg_core(dev, "driver lastclose completed\n"); - drm_client_dev_restore(dev); + + if (dev_is_pci(dev->dev)) + vga_switcheroo_process_delayed_switch(); } /** @@ -412,12 +400,11 @@ void drm_lastclose(struct drm_device * dev) * @filp: file pointer. * * This function must be used by drivers as their &file_operations.release - * method. It frees any resources associated with the open file, and calls the - * &drm_driver.postclose driver callback. If this is the last open file for the - * DRM device also proceeds to call the &drm_driver.lastclose driver callback. + * method. It frees any resources associated with the open file. If this + * is the last open file for the DRM device, it also restores the active + * in-kernel DRM client. * * RETURNS: - * * Always succeeds and returns 0. */ int drm_release(struct inode *inode, struct file *filp) @@ -484,12 +471,10 @@ void drm_file_update_pid(struct drm_file *filp) * * This function may be used by drivers as their &file_operations.release * method. It frees any resources associated with the open file prior to taking - * the drm_global_mutex, which then calls the &drm_driver.postclose driver - * callback. If this is the last open file for the DRM device also proceeds to - * call the &drm_driver.lastclose driver callback. + * the drm_global_mutex. If this is the last open file for the DRM device, it + * then restores the active in-kernel DRM client. * * RETURNS: - * * Always succeeds and returns 0. */ int drm_release_noglobal(struct inode *inode, struct file *filp) @@ -532,7 +517,6 @@ EXPORT_SYMBOL(drm_release_noglobal); * safety. * * RETURNS: - * * Number of bytes read (always aligned to full events, and can be 0) or a * negative error code on failure. */ @@ -618,7 +602,6 @@ EXPORT_SYMBOL(drm_read); * See also drm_read(). * * RETURNS: - * * Mask of POLL flags indicating the current status of the file. */ __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) @@ -656,7 +639,6 @@ EXPORT_SYMBOL(drm_poll); * already hold &drm_device.event_lock. * * RETURNS: - * * 0 on success or a negative error code on failure. */ int drm_event_reserve_init_locked(struct drm_device *dev, @@ -698,7 +680,6 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked); * drm_event_reserve_init_locked() instead. * * RETURNS: - * * 0 on success or a negative error code on failure. */ int drm_event_reserve_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d4bbc5d109c8bc..149b8e25da5bbf 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -689,7 +689,6 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, * For a single handle lookup, use drm_gem_object_lookup(). * * Returns: - * * @objs filled in with GEM object pointers. Returned GEM objects need to be * released with drm_gem_object_put(). -ENOENT is returned on a lookup * failure. 0 is returned on success. @@ -737,12 +736,11 @@ EXPORT_SYMBOL(drm_gem_objects_lookup); * @filp: DRM file private date * @handle: userspace handle * - * Returns: + * If looking up an array of handles, use drm_gem_objects_lookup(). * + * Returns: * A reference to the object named by the handle if such exists on @filp, NULL * otherwise. - * - * If looking up an array of handles, use drm_gem_objects_lookup(). */ struct drm_gem_object * drm_gem_object_lookup(struct drm_file *filp, u32 handle) @@ -763,7 +761,6 @@ EXPORT_SYMBOL(drm_gem_object_lookup); * @timeout: timeout value in jiffies or zero to return immediately * * Returns: - * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than 0 on success. */ diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 690505a1f7a5db..1705bfc90b1e7f 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -53,7 +53,6 @@ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); struct drm_file *drm_file_alloc(struct drm_minor *minor); void drm_file_free(struct drm_file *file); -void drm_lastclose(struct drm_device *dev); #ifdef CONFIG_PCI @@ -81,10 +80,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle); -/* drm_drv.c */ -struct drm_minor *drm_minor_acquire(unsigned int minor_id); -void drm_minor_release(struct drm_minor *minor); - /* drm_managed.c */ void drm_managed_release(struct drm_device *dev); void drmm_add_final_kfree(struct drm_device *dev, void *container); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 969cfd5a01aeac..2bc3973d35a19f 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -603,6 +603,8 @@ EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral); * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_turn_on_peripheral_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi) @@ -652,6 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size); * @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries * * Enable or disable Display Stream Compression on the peripheral. + * This function is deprecated. Use mipi_dsi_compression_mode_ext_multi() instead. * * Return: 0 on success or a negative error code on failure. */ @@ -703,6 +706,7 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode); * @pps: VESA DSC 1.1 Picture Parameter Set * * Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral. + * This function is deprecated. Use mipi_dsi_picture_parameter_set_multi() instead. * * Return: 0 on success or a negative error code on failure. */ @@ -1037,6 +1041,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_read); * mipi_dsi_dcs_nop() - send DCS nop packet * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_nop_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi) @@ -1055,6 +1061,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_nop); * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_soft_reset_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi) @@ -1124,6 +1132,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format); * display module except interface communication * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_enter_sleep_mode_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi) @@ -1143,6 +1153,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode); * module * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_exit_sleep_mode_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi) @@ -1162,6 +1174,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode); * display device * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_set_display_off_multi() instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi) @@ -1181,6 +1195,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off); * display device * @dsi: DSI peripheral device * + * This function is deprecated. Use mipi_dsi_dcs_set_display_on_multi() instead. + * * Return: 0 on success or a negative error code on failure */ int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi) @@ -1202,6 +1218,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on); * @start: first column of frame memory * @end: last column of frame memory * + * This function is deprecated. Use mipi_dsi_dcs_set_column_address_multi() + * instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, @@ -1226,6 +1245,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address); * @start: first page of frame memory * @end: last page of frame memory * + * This function is deprecated. Use mipi_dsi_dcs_set_page_address_multi() + * instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, @@ -1268,6 +1290,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off); * @dsi: DSI peripheral device * @mode: the Tearing Effect Output Line mode * + * This function is deprecated. Use mipi_dsi_dcs_set_tear_on_multi() instead. + * * Return: 0 on success or a negative error code on failure */ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, @@ -1291,6 +1315,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); * @dsi: DSI peripheral device * @format: pixel format * + * This function is deprecated. Use mipi_dsi_dcs_set_pixel_format_multi() + * instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format) @@ -1312,6 +1339,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); * @dsi: DSI peripheral device * @scanline: scanline to use as trigger * + * This function is deprecated. Use mipi_dsi_dcs_set_tear_scanline_multi() + * instead. + * * Return: 0 on success or a negative error code on failure */ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) @@ -1334,6 +1364,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline); * @dsi: DSI peripheral device * @brightness: brightness value * + * This function is deprecated. Use mipi_dsi_dcs_set_display_brightness_multi() + * instead. + * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, @@ -1639,6 +1672,198 @@ void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx, } EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on_multi); +/** + * mipi_dsi_turn_on_peripheral_multi() - sends a Turn On Peripheral command + * @ctx: Context for multiple DSI transactions + * + * Like mipi_dsi_turn_on_peripheral() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_turn_on_peripheral(dsi); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to turn on peripheral: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi); + +/** + * mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module + * @ctx: Context for multiple DSI transactions + * + * Like mipi_dsi_dcs_soft_reset() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_soft_reset(dsi); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to mipi_dsi_dcs_soft_reset: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset_multi); + +/** + * mipi_dsi_dcs_set_display_brightness_multi() - sets the brightness value of + * the display + * @ctx: Context for multiple DSI transactions + * @brightness: brightness value + * + * Like mipi_dsi_dcs_set_display_brightness() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx, + u16 brightness) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to write display brightness: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_multi); + +/** + * mipi_dsi_dcs_set_pixel_format_multi() - sets the pixel format for the RGB image + * data used by the interface + * @ctx: Context for multiple DSI transactions + * @format: pixel format + * + * Like mipi_dsi_dcs_set_pixel_format() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx, + u8 format) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_set_pixel_format(dsi, format); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to set pixel format: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format_multi); + +/** + * mipi_dsi_dcs_set_column_address_multi() - define the column extent of the + * frame memory accessed by the host processor + * @ctx: Context for multiple DSI transactions + * @start: first column of frame memory + * @end: last column of frame memory + * + * Like mipi_dsi_dcs_set_column_address() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx, + u16 start, u16 end) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_set_column_address(dsi, start, end); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to set column address: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address_multi); + +/** + * mipi_dsi_dcs_set_page_address_multi() - define the page extent of the + * frame memory accessed by the host processor + * @ctx: Context for multiple DSI transactions + * @start: first page of frame memory + * @end: last page of frame memory + * + * Like mipi_dsi_dcs_set_page_address() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx, + u16 start, u16 end) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_set_page_address(dsi, start, end); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to set page address: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address_multi); + +/** + * mipi_dsi_dcs_set_tear_scanline_multi() - set the scanline to use as trigger for + * the Tearing Effect output signal of the display module + * @ctx: Context for multiple DSI transactions + * @scanline: scanline to use as trigger + * + * Like mipi_dsi_dcs_set_tear_scanline() but deals with errors in a way that + * makes it convenient to make several calls in a row. + */ +void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx, + u16 scanline) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_dcs_set_tear_scanline(dsi, scanline); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "Failed to set tear scanline: %d\n", + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline_multi); + static int mipi_dsi_drv_probe(struct device *dev) { struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 568972258222af..37d2e0a4ef4be2 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -456,6 +456,8 @@ int drmm_mode_config_init(struct drm_device *dev) if (ret == -EDEADLK) ret = drm_modeset_backoff(&modeset_ctx); + might_fault(); + ww_acquire_init(&resv_ctx, &reservation_ww_class); ret = dma_resv_lock(&resv, &resv_ctx); if (ret == -EDEADLK) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 1a0890083aee56..6ba167a3346134 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -539,7 +539,6 @@ static int fill_analog_mode(struct drm_device *dev, * to reach those resolutions. * * Returns: - * * A pointer to the mode, allocated with drm_mode_create(). Returns NULL * on error. */ diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index cfbe020de54e01..19ab0a794add31 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -161,6 +161,15 @@ int drm_panel_unprepare(struct drm_panel *panel) if (!panel) return -EINVAL; + /* + * If you are seeing the warning below it likely means one of two things: + * - Your panel driver incorrectly calls drm_panel_unprepare() in its + * shutdown routine. You should delete this. + * - You are using panel-edp or panel-simple and your DRM modeset + * driver's shutdown() callback happened after the panel's shutdown(). + * In this case the warning is harmless though ideally you should + * figure out how to reverse the order of the shutdown() callbacks. + */ if (!panel->prepared) { dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n"); return 0; @@ -245,6 +254,15 @@ int drm_panel_disable(struct drm_panel *panel) if (!panel) return -EINVAL; + /* + * If you are seeing the warning below it likely means one of two things: + * - Your panel driver incorrectly calls drm_panel_disable() in its + * shutdown routine. You should delete this. + * - You are using panel-edp or panel-simple and your DRM modeset + * driver's shutdown() callback happened after the panel's shutdown(). + * In this case the warning is harmless though ideally you should + * figure out how to reverse the order of the shutdown() callbacks. + */ if (!panel->enabled) { dev_warn(panel->dev, "Skipping disable of already disabled panel\n"); return 0; diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 948aed00595eb6..74412b7bf936c2 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -26,6 +28,9 @@ #include #include #include +#include + +#include "drm_crtc_internal.h" MODULE_AUTHOR("Jocelyn Falempe"); MODULE_DESCRIPTION("DRM panic handler"); @@ -76,11 +81,15 @@ struct drm_panic_line { #define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s} static struct drm_panic_line panic_msg[] = { - PANIC_LINE("KERNEL PANIC !"), + PANIC_LINE("KERNEL PANIC!"), PANIC_LINE(""), PANIC_LINE("Please reboot your computer."), + PANIC_LINE(""), + PANIC_LINE(""), /* will be replaced by the panic description */ }; +static const size_t panic_msg_lines = ARRAY_SIZE(panic_msg); + static const struct drm_panic_line logo_ascii[] = { PANIC_LINE(" .--. _"), PANIC_LINE(" |o_o | | |"), @@ -91,6 +100,8 @@ static const struct drm_panic_line logo_ascii[] = { PANIC_LINE(" \\___)=(___/"), }; +static const size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii); + #if defined(CONFIG_LOGO) && !defined(MODULE) static const struct linux_logo *logo_mono; @@ -249,20 +260,20 @@ static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, i static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u16 fg16) + unsigned int scale, u16 fg16) { unsigned int y, x; for (y = 0; y < height; y++) for (x = 0; x < width; x++) - if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16); } static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u32 fg32) + unsigned int scale, u32 fg32) { unsigned int y, x; @@ -270,7 +281,7 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, for (x = 0; x < width; x++) { u32 off = y * dpitch + x * 3; - if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) { + if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) { /* write blue-green-red to output in little endianness */ iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0); iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8); @@ -283,24 +294,25 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u32 fg32) + unsigned int scale, u32 fg32) { unsigned int y, x; for (y = 0; y < height; y++) for (x = 0; x < width; x++) - if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32); } static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip, - const u8 *sbuf8, unsigned int spitch, u32 fg_color) + const u8 *sbuf8, unsigned int spitch, unsigned int scale, + u32 fg_color) { unsigned int y, x; for (y = 0; y < drm_rect_height(clip); y++) for (x = 0; x < drm_rect_width(clip); x++) - if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color); } @@ -310,18 +322,22 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect * @clip: destination rectangle * @sbuf8: source buffer, in monochrome format, 8 pixels per byte. * @spitch: source pitch in bytes + * @scale: integer scale, source buffer is scale time smaller than destination + * rectangle * @fg_color: foreground color, in destination format * * This can be used to draw a font character, which is a monochrome image, to a * framebuffer in other supported format. */ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip, - const u8 *sbuf8, unsigned int spitch, u32 fg_color) + const u8 *sbuf8, unsigned int spitch, + unsigned int scale, u32 fg_color) + { struct iosys_map map; if (sb->set_pixel) - return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, fg_color); + return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color); map = sb->map[0]; iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]); @@ -329,15 +345,15 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip, switch (sb->format->cpp[0]) { case 2: drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch, - drm_rect_height(clip), drm_rect_width(clip), fg_color); + drm_rect_height(clip), drm_rect_width(clip), scale, fg_color); break; case 3: drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch, - drm_rect_height(clip), drm_rect_width(clip), fg_color); + drm_rect_height(clip), drm_rect_width(clip), scale, fg_color); break; case 4: drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch, - drm_rect_height(clip), drm_rect_width(clip), fg_color); + drm_rect_height(clip), drm_rect_width(clip), scale, fg_color); break; default: WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]); @@ -477,39 +493,51 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb, for (j = 0; j < line_len; j++) { src = get_char_bitmap(font, msg[i].txt[j], font_pitch); rec.x2 = rec.x1 + font->width; - drm_panic_blit(sb, &rec, src, font_pitch, color); + drm_panic_blit(sb, &rec, src, font_pitch, 1, color); rec.x1 += font->width; } } } +static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *font) +{ + if (logo_mono) { + drm_rect_init(rect, 0, 0, logo_mono->width, logo_mono->height); + } else { + int logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width; + + drm_rect_init(rect, 0, 0, logo_width, logo_ascii_lines * font->height); + } +} + +static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect, + const struct font_desc *font, u32 fg_color) +{ + if (logo_mono) + drm_panic_blit(sb, rect, logo_mono->data, + DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color); + else + draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, rect, + fg_color); +} + static void draw_panic_static_user(struct drm_scanout_buffer *sb) { - size_t msg_lines = ARRAY_SIZE(panic_msg); - size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii); u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format); u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format); const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL); struct drm_rect r_screen, r_logo, r_msg; - unsigned int logo_width, logo_height; + unsigned int msg_width, msg_height; if (!font) return; r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height); + drm_panic_logo_rect(&r_logo, font); - if (logo_mono) { - logo_width = logo_mono->width; - logo_height = logo_mono->height; - } else { - logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width; - logo_height = logo_ascii_lines * font->height; - } - - r_logo = DRM_RECT_INIT(0, 0, logo_width, logo_height); - r_msg = DRM_RECT_INIT(0, 0, - min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width), - min(msg_lines * font->height, sb->height)); + msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width); + msg_height = min(panic_msg_lines * font->height, sb->height); + r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height); /* Center the panic message */ drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2); @@ -517,16 +545,10 @@ static void draw_panic_static_user(struct drm_scanout_buffer *sb) /* Fill with the background color, and draw text on top */ drm_panic_fill(sb, &r_screen, bg_color); - if ((r_msg.x1 >= logo_width || r_msg.y1 >= logo_height) && - logo_width <= sb->width && logo_height <= sb->height) { - if (logo_mono) - drm_panic_blit(sb, &r_logo, logo_mono->data, DIV_ROUND_UP(logo_width, 8), - fg_color); - else - draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, &r_logo, - fg_color); - } - draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color); + if (!drm_rect_overlap(&r_logo, &r_msg)) + drm_panic_logo_draw(sb, &r_logo, font, fg_color); + + draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color); } /* @@ -608,6 +630,233 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb) } } +#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE) +/* + * It is unwise to allocate memory in the panic callback, so the buffers are + * pre-allocated. Only 2 buffers and the zlib workspace are needed. + * Two buffers are enough, using the following buffer usage: + * 1) kmsg messages are dumped in buffer1 + * 2) kmsg is zlib-compressed into buffer2 + * 3) compressed kmsg is encoded as QR-code Numeric stream in buffer1 + * 4) QR-code image is generated in buffer2 + * The Max QR code size is V40, 177x177, 4071 bytes for image, 2956 bytes for + * data segments. + * + * Typically, ~7500 bytes of kmsg, are compressed into 2800 bytes, which fits in + * a V40 QR-code (177x177). + * + * If CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL is not set, the kmsg data will be put + * directly in the QR code. + * 1) kmsg messages are dumped in buffer1 + * 2) kmsg message is encoded as byte stream in buffer2 + * 3) QR-code image is generated in buffer1 + */ + +static uint panic_qr_version = CONFIG_DRM_PANIC_SCREEN_QR_VERSION; +module_param(panic_qr_version, uint, 0644); +MODULE_PARM_DESC(panic_qr_version, "maximum version (size) of the QR code"); + +#define MAX_QR_DATA 2956 +#define MAX_ZLIB_RATIO 3 +#define QR_BUFFER1_SIZE (MAX_ZLIB_RATIO * MAX_QR_DATA) /* Must also be > 4071 */ +#define QR_BUFFER2_SIZE 4096 +#define QR_MARGIN 4 /* 4 modules of foreground color around the qr code */ + +/* Compression parameters */ +#define COMPR_LEVEL 6 +#define WINDOW_BITS 12 +#define MEM_LEVEL 4 + +static char *qrbuf1; +static char *qrbuf2; +static struct z_stream_s stream; + +static void __init drm_panic_qr_init(void) +{ + qrbuf1 = kmalloc(QR_BUFFER1_SIZE, GFP_KERNEL); + qrbuf2 = kmalloc(QR_BUFFER2_SIZE, GFP_KERNEL); + stream.workspace = kmalloc(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), + GFP_KERNEL); +} + +static void drm_panic_qr_exit(void) +{ + kfree(qrbuf1); + qrbuf1 = NULL; + kfree(qrbuf2); + qrbuf2 = NULL; + kfree(stream.workspace); + stream.workspace = NULL; +} + +extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len); + +extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size, + u8 *tmp, size_t tmp_size); + +static int drm_panic_get_qr_code_url(u8 **qr_image) +{ + struct kmsg_dump_iter iter; + char url[256]; + size_t kmsg_len, max_kmsg_size; + char *kmsg; + int max_qr_data_size, url_len; + + url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=", + utsname()->machine, utsname()->release); + + max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len); + max_kmsg_size = min(MAX_ZLIB_RATIO * max_qr_data_size, QR_BUFFER1_SIZE); + + /* get kmsg to buffer 1 */ + kmsg_dump_rewind(&iter); + kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len); + + if (!kmsg_len) + return -ENODATA; + kmsg = qrbuf1; + +try_again: + if (zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, + MEM_LEVEL, Z_DEFAULT_STRATEGY) != Z_OK) + return -EINVAL; + + stream.next_in = kmsg; + stream.avail_in = kmsg_len; + stream.total_in = 0; + stream.next_out = qrbuf2; + stream.avail_out = QR_BUFFER2_SIZE; + stream.total_out = 0; + + if (zlib_deflate(&stream, Z_FINISH) != Z_STREAM_END) + return -EINVAL; + + if (zlib_deflateEnd(&stream) != Z_OK) + return -EINVAL; + + if (stream.total_out > max_qr_data_size) { + /* too much data for the QR code, so skip the first line and try again */ + kmsg = strchr(kmsg, '\n'); + if (!kmsg) + return -EINVAL; + /* skip the first \n */ + kmsg += 1; + kmsg_len = strlen(kmsg); + goto try_again; + } + *qr_image = qrbuf2; + + /* generate qr code image in buffer2 */ + return drm_panic_qr_generate(url, qrbuf2, stream.total_out, QR_BUFFER2_SIZE, + qrbuf1, QR_BUFFER1_SIZE); +} + +static int drm_panic_get_qr_code_raw(u8 **qr_image) +{ + struct kmsg_dump_iter iter; + size_t kmsg_len; + size_t max_kmsg_size = min(drm_panic_qr_max_data_size(panic_qr_version, 0), + QR_BUFFER1_SIZE); + + kmsg_dump_rewind(&iter); + kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len); + if (!kmsg_len) + return -ENODATA; + + *qr_image = qrbuf1; + return drm_panic_qr_generate(NULL, qrbuf1, kmsg_len, QR_BUFFER1_SIZE, + qrbuf2, QR_BUFFER2_SIZE); +} + +static int drm_panic_get_qr_code(u8 **qr_image) +{ + if (strlen(CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL) > 0) + return drm_panic_get_qr_code_url(qr_image); + else + return drm_panic_get_qr_code_raw(qr_image); +} + +/* + * Draw the panic message at the center of the screen, with a QR Code + */ +static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb) +{ + u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format); + u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format); + const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL); + struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas; + unsigned int max_qr_size, scale; + unsigned int msg_width, msg_height; + int qr_width, qr_canvas_width, qr_pitch, v_margin; + u8 *qr_image; + + if (!font || !qrbuf1 || !qrbuf2 || !stream.workspace) + return -ENOMEM; + + r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height); + + drm_panic_logo_rect(&r_logo, font); + + msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width); + msg_height = min(panic_msg_lines * font->height, sb->height); + r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height); + + max_qr_size = min(3 * sb->width / 4, 3 * sb->height / 4); + + qr_width = drm_panic_get_qr_code(&qr_image); + if (qr_width <= 0) + return -ENOSPC; + + qr_canvas_width = qr_width + QR_MARGIN * 2; + scale = max_qr_size / qr_canvas_width; + /* QR code is not readable if not scaled at least by 2 */ + if (scale < 2) + return -ENOSPC; + + pr_debug("QR width %d and scale %d\n", qr_width, scale); + r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale); + + v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5; + + drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin); + r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale, + qr_width * scale, qr_width * scale); + + /* Center the panic message */ + drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, + 3 * v_margin + drm_rect_height(&r_qr_canvas)); + + /* Fill with the background color, and draw text on top */ + drm_panic_fill(sb, &r_screen, bg_color); + + if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr)) + drm_panic_logo_draw(sb, &r_logo, font, fg_color); + + draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color); + + /* Draw the qr code */ + qr_pitch = DIV_ROUND_UP(qr_width, 8); + drm_panic_fill(sb, &r_qr_canvas, fg_color); + drm_panic_fill(sb, &r_qr, bg_color); + drm_panic_blit(sb, &r_qr, qr_image, qr_pitch, scale, fg_color); + return 0; +} + +static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb) +{ + if (_draw_panic_static_qr_code(sb)) + draw_panic_static_user(sb); +} +#else +static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb) +{ + draw_panic_static_user(sb); +} + +static void drm_panic_qr_init(void) {}; +static void drm_panic_qr_exit(void) {}; +#endif + /* * drm_panic_is_format_supported() * @format: a fourcc color code @@ -626,12 +875,38 @@ static void draw_panic_dispatch(struct drm_scanout_buffer *sb) { if (!strcmp(drm_panic_screen, "kmsg")) { draw_panic_static_kmsg(sb); + } else if (!strcmp(drm_panic_screen, "qr_code")) { + draw_panic_static_qr_code(sb); } else { draw_panic_static_user(sb); } } -static void draw_panic_plane(struct drm_plane *plane) +static void drm_panic_set_description(const char *description) +{ + u32 len; + + if (description) { + struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1]; + + desc_line->txt = description; + len = strlen(description); + /* ignore the last newline character */ + if (len && description[len - 1] == '\n') + len -= 1; + desc_line->len = len; + } +} + +static void drm_panic_clear_description(void) +{ + struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1]; + + desc_line->len = 0; + desc_line->txt = NULL; +} + +static void draw_panic_plane(struct drm_plane *plane, const char *description) { struct drm_scanout_buffer sb = { }; int ret; @@ -640,6 +915,8 @@ static void draw_panic_plane(struct drm_plane *plane) if (!drm_panic_trylock(plane->dev, flags)) return; + drm_panic_set_description(description); + ret = plane->helper_private->get_scanout_buffer(plane, &sb); if (!ret && drm_panic_is_format_supported(sb.format)) { @@ -647,6 +924,7 @@ static void draw_panic_plane(struct drm_plane *plane) if (plane->helper_private->panic_flush) plane->helper_private->panic_flush(plane); } + drm_panic_clear_description(); drm_panic_unlock(plane->dev, flags); } @@ -655,12 +933,12 @@ static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd) return container_of(kd, struct drm_plane, kmsg_panic); } -static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) +static void drm_panic(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail) { struct drm_plane *plane = to_drm_plane(dumper); - if (reason == KMSG_DUMP_PANIC) - draw_panic_plane(plane); + if (detail->reason == KMSG_DUMP_PANIC) + draw_panic_plane(plane, detail->description); } @@ -680,7 +958,7 @@ static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_ if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) { struct drm_plane *plane = file->private_data; - draw_panic_plane(plane); + draw_panic_plane(plane, "Test from debugfs"); } return count; } @@ -703,6 +981,26 @@ static void debugfs_register_plane(struct drm_plane *plane, int index) static void debugfs_register_plane(struct drm_plane *plane, int index) {} #endif /* CONFIG_DRM_PANIC_DEBUG */ +/** + * drm_panic_is_enabled + * @dev: the drm device that may supports drm_panic + * + * returns true if the drm device supports drm_panic + */ +bool drm_panic_is_enabled(struct drm_device *dev) +{ + struct drm_plane *plane; + + if (!dev->mode_config.num_total_plane) + return false; + + drm_for_each_plane(plane, dev) + if (plane->helper_private && plane->helper_private->get_scanout_buffer) + return true; + return false; +} +EXPORT_SYMBOL(drm_panic_is_enabled); + /** * drm_panic_register() - Initialize DRM panic for a device * @dev: the drm device on which the panic screen will be displayed. @@ -730,7 +1028,6 @@ void drm_panic_register(struct drm_device *dev) if (registered_plane) drm_info(dev, "Registered %d planes with drm panic\n", registered_plane); } -EXPORT_SYMBOL(drm_panic_register); /** * drm_panic_unregister() @@ -749,4 +1046,19 @@ void drm_panic_unregister(struct drm_device *dev) kmsg_dump_unregister(&plane->kmsg_panic); } } -EXPORT_SYMBOL(drm_panic_unregister); + +/** + * drm_panic_init() - initialize DRM panic. + */ +void __init drm_panic_init(void) +{ + drm_panic_qr_init(); +} + +/** + * drm_panic_exit() - Free the resources taken by drm_panic_exit() + */ +void drm_panic_exit(void) +{ + drm_panic_qr_exit(); +} diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs new file mode 100644 index 00000000000000..1ef56cb07dfbd2 --- /dev/null +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -0,0 +1,1003 @@ +// SPDX-License-Identifier: MIT + +//! This is a simple QR encoder for DRM panic. +//! +//! It is called from a panic handler, so it should't allocate memory and +//! does all the work on the stack or on the provided buffers. For +//! simplification, it only supports low error correction, and applies the +//! first mask (checkerboard). It will draw the smallest QRcode that can +//! contain the string passed as parameter. To get the most compact +//! QR code, the start of the URL is encoded as binary, and the +//! compressed kmsg is encoded as numeric. +//! +//! The binary data must be a valid URL parameter, so the easiest way is +//! to use base64 encoding. But this wastes 25% of data space, so the +//! whole stack trace won't fit in the QR code. So instead it encodes +//! every 13bits of input into 4 decimal digits, and then uses the +//! efficient numeric encoding, that encode 3 decimal digits into +//! 10bits. This makes 39bits of compressed data into 12 decimal digits, +//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are +//! valid URL parameter, so the website can do the reverse, to get the +//! binary data. +//! +//! Inspired by these 3 projects, all under MIT license: +//! +//! * +//! * +//! * + +use core::cmp; +use kernel::str::CStr; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] +struct Version(usize); + +// Generator polynomials for ECC, only those that are needed for low quality. +const P7: [u8; 7] = [87, 229, 146, 149, 238, 102, 21]; +const P10: [u8; 10] = [251, 67, 46, 61, 118, 70, 64, 94, 32, 45]; +const P15: [u8; 15] = [ + 8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105, +]; +const P18: [u8; 18] = [ + 215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179, 5, 98, 96, 153, +]; +const P20: [u8; 20] = [ + 17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156, 164, 212, 212, 188, 190, +]; +const P22: [u8; 22] = [ + 210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98, 80, 219, 134, 160, 105, + 165, 231, +]; +const P24: [u8; 24] = [ + 229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226, 228, 218, 111, 0, 117, + 232, 87, 96, 227, 21, +]; +const P26: [u8; 26] = [ + 173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161, 21, 245, 142, 13, 102, + 48, 227, 153, 145, 218, 70, +]; +const P28: [u8; 28] = [ + 168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27, 232, 201, 21, 43, 245, 87, + 42, 195, 212, 119, 242, 37, 9, 123, +]; +const P30: [u8; 30] = [ + 41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222, 125, 42, 173, 226, 193, + 224, 130, 156, 37, 251, 216, 238, 40, 192, 180, +]; + +/// QR Code parameters for Low quality ECC: +/// - Error Correction polynomial. +/// - Number of blocks in group 1. +/// - Number of blocks in group 2. +/// - Block size in group 1. +/// +/// (Block size in group 2 is one more than group 1). +struct VersionParameter(&'static [u8], u8, u8, u8); +const VPARAM: [VersionParameter; 40] = [ + VersionParameter(&P7, 1, 0, 19), // V1 + VersionParameter(&P10, 1, 0, 34), // V2 + VersionParameter(&P15, 1, 0, 55), // V3 + VersionParameter(&P20, 1, 0, 80), // V4 + VersionParameter(&P26, 1, 0, 108), // V5 + VersionParameter(&P18, 2, 0, 68), // V6 + VersionParameter(&P20, 2, 0, 78), // V7 + VersionParameter(&P24, 2, 0, 97), // V8 + VersionParameter(&P30, 2, 0, 116), // V9 + VersionParameter(&P18, 2, 2, 68), // V10 + VersionParameter(&P20, 4, 0, 81), // V11 + VersionParameter(&P24, 2, 2, 92), // V12 + VersionParameter(&P26, 4, 0, 107), // V13 + VersionParameter(&P30, 3, 1, 115), // V14 + VersionParameter(&P22, 5, 1, 87), // V15 + VersionParameter(&P24, 5, 1, 98), // V16 + VersionParameter(&P28, 1, 5, 107), // V17 + VersionParameter(&P30, 5, 1, 120), // V18 + VersionParameter(&P28, 3, 4, 113), // V19 + VersionParameter(&P28, 3, 5, 107), // V20 + VersionParameter(&P28, 4, 4, 116), // V21 + VersionParameter(&P28, 2, 7, 111), // V22 + VersionParameter(&P30, 4, 5, 121), // V23 + VersionParameter(&P30, 6, 4, 117), // V24 + VersionParameter(&P26, 8, 4, 106), // V25 + VersionParameter(&P28, 10, 2, 114), // V26 + VersionParameter(&P30, 8, 4, 122), // V27 + VersionParameter(&P30, 3, 10, 117), // V28 + VersionParameter(&P30, 7, 7, 116), // V29 + VersionParameter(&P30, 5, 10, 115), // V30 + VersionParameter(&P30, 13, 3, 115), // V31 + VersionParameter(&P30, 17, 0, 115), // V32 + VersionParameter(&P30, 17, 1, 115), // V33 + VersionParameter(&P30, 13, 6, 115), // V34 + VersionParameter(&P30, 12, 7, 121), // V35 + VersionParameter(&P30, 6, 14, 121), // V36 + VersionParameter(&P30, 17, 4, 122), // V37 + VersionParameter(&P30, 4, 18, 122), // V38 + VersionParameter(&P30, 20, 4, 117), // V39 + VersionParameter(&P30, 19, 6, 118), // V40 +]; + +const MAX_EC_SIZE: usize = 30; +const MAX_BLK_SIZE: usize = 123; + +/// Position of the alignment pattern grid. +const ALIGNMENT_PATTERNS: [&[u8]; 40] = [ + &[], + &[6, 18], + &[6, 22], + &[6, 26], + &[6, 30], + &[6, 34], + &[6, 22, 38], + &[6, 24, 42], + &[6, 26, 46], + &[6, 28, 50], + &[6, 30, 54], + &[6, 32, 58], + &[6, 34, 62], + &[6, 26, 46, 66], + &[6, 26, 48, 70], + &[6, 26, 50, 74], + &[6, 30, 54, 78], + &[6, 30, 56, 82], + &[6, 30, 58, 86], + &[6, 34, 62, 90], + &[6, 28, 50, 72, 94], + &[6, 26, 50, 74, 98], + &[6, 30, 54, 78, 102], + &[6, 28, 54, 80, 106], + &[6, 32, 58, 84, 110], + &[6, 30, 58, 86, 114], + &[6, 34, 62, 90, 118], + &[6, 26, 50, 74, 98, 122], + &[6, 30, 54, 78, 102, 126], + &[6, 26, 52, 78, 104, 130], + &[6, 30, 56, 82, 108, 134], + &[6, 34, 60, 86, 112, 138], + &[6, 30, 58, 86, 114, 142], + &[6, 34, 62, 90, 118, 146], + &[6, 30, 54, 78, 102, 126, 150], + &[6, 24, 50, 76, 102, 128, 154], + &[6, 28, 54, 80, 106, 132, 158], + &[6, 32, 58, 84, 110, 136, 162], + &[6, 26, 54, 82, 110, 138, 166], + &[6, 30, 58, 86, 114, 142, 170], +]; + +/// Version information for format V7-V40. +const VERSION_INFORMATION: [u32; 34] = [ + 0b00_0111_1100_1001_0100, + 0b00_1000_0101_1011_1100, + 0b00_1001_1010_1001_1001, + 0b00_1010_0100_1101_0011, + 0b00_1011_1011_1111_0110, + 0b00_1100_0111_0110_0010, + 0b00_1101_1000_0100_0111, + 0b00_1110_0110_0000_1101, + 0b00_1111_1001_0010_1000, + 0b01_0000_1011_0111_1000, + 0b01_0001_0100_0101_1101, + 0b01_0010_1010_0001_0111, + 0b01_0011_0101_0011_0010, + 0b01_0100_1001_1010_0110, + 0b01_0101_0110_1000_0011, + 0b01_0110_1000_1100_1001, + 0b01_0111_0111_1110_1100, + 0b01_1000_1110_1100_0100, + 0b01_1001_0001_1110_0001, + 0b01_1010_1111_1010_1011, + 0b01_1011_0000_1000_1110, + 0b01_1100_1100_0001_1010, + 0b01_1101_0011_0011_1111, + 0b01_1110_1101_0111_0101, + 0b01_1111_0010_0101_0000, + 0b10_0000_1001_1101_0101, + 0b10_0001_0110_1111_0000, + 0b10_0010_1000_1011_1010, + 0b10_0011_0111_1001_1111, + 0b10_0100_1011_0000_1011, + 0b10_0101_0100_0010_1110, + 0b10_0110_1010_0110_0100, + 0b10_0111_0101_0100_0001, + 0b10_1000_1100_0110_1001, +]; + +/// Format info for low quality ECC. +const FORMAT_INFOS_QR_L: [u16; 8] = [ + 0x77c4, 0x72f3, 0x7daa, 0x789d, 0x662f, 0x6318, 0x6c41, 0x6976, +]; + +impl Version { + /// Returns the smallest QR version than can hold these segments. + fn from_segments(segments: &[&Segment<'_>]) -> Option { + for v in (1..=40).map(|k| Version(k)) { + if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() { + return Some(v); + } + } + None + } + + fn width(&self) -> u8 { + (self.0 as u8) * 4 + 17 + } + + fn max_data(&self) -> usize { + self.g1_blk_size() * self.g1_blocks() + (self.g1_blk_size() + 1) * self.g2_blocks() + } + + fn ec_size(&self) -> usize { + VPARAM[self.0 - 1].0.len() + } + + fn g1_blocks(&self) -> usize { + VPARAM[self.0 - 1].1 as usize + } + + fn g2_blocks(&self) -> usize { + VPARAM[self.0 - 1].2 as usize + } + + fn g1_blk_size(&self) -> usize { + VPARAM[self.0 - 1].3 as usize + } + + fn alignment_pattern(&self) -> &'static [u8] { + &ALIGNMENT_PATTERNS[self.0 - 1] + } + + fn poly(&self) -> &'static [u8] { + VPARAM[self.0 - 1].0 + } + + fn version_info(&self) -> u32 { + if *self >= Version(7) { + VERSION_INFORMATION[self.0 - 7] + } else { + 0 + } + } +} + +/// Exponential table for Galois Field GF(256). +const EXP_TABLE: [u8; 256] = [ + 1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152, 45, 90, 180, 117, + 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181, + 119, 238, 193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, + 95, 190, 97, 194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211, 187, + 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34, 68, 136, + 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124, 248, 237, 199, 147, 59, 118, 236, 197, + 151, 51, 102, 204, 133, 23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, + 77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209, 191, 99, 198, + 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149, + 55, 110, 220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, + 83, 166, 81, 162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72, + 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250, 233, 207, + 131, 27, 54, 108, 216, 173, 71, 142, 1, +]; + +/// Reverse exponential table for Galois Field GF(256). +const LOG_TABLE: [u8; 256] = [ + 175, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100, 224, 14, 52, 141, + 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138, 101, 47, 225, 36, 15, 33, 53, 147, 142, + 218, 240, 18, 130, 69, 29, 181, 194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114, + 166, 6, 191, 139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208, 148, + 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66, 182, 163, 195, 72, 126, + 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94, 155, 159, 10, 21, 121, 43, 78, 212, 229, 172, + 115, 243, 167, 87, 7, 112, 192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24, + 227, 165, 153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63, 209, 91, 149, + 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242, 86, 211, 171, 20, 42, 93, 158, 132, + 60, 57, 83, 71, 109, 65, 162, 31, 45, 67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12, + 111, 246, 108, 161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203, 89, 95, + 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79, 174, 213, 233, 230, 231, 173, + 232, 116, 214, 244, 234, 168, 80, 88, 175, +]; + +// 4 bits segment header. +const MODE_STOP: u16 = 0; +const MODE_NUMERIC: u16 = 1; +const MODE_BINARY: u16 = 4; +/// Padding bytes. +const PADDING: [u8; 2] = [236, 17]; + +/// Get the next 13 bits of data, starting at specified offset (in bits). +fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> { + if offset < data.len() * 8 { + let size = cmp::min(13, data.len() * 8 - offset); + let byte_off = offset / 8; + let bit_off = offset % 8; + // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13). + let b = (bit_off + size) as u16; + + let first_byte = (data[byte_off] << bit_off >> bit_off) as u16; + + let number = match b { + 0..=8 => first_byte >> (8 - b), + 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16, + _ => { + (first_byte << (b - 8)) + + ((data[byte_off + 1] as u16) << (b - 16)) + + (data[byte_off + 2] >> (24 - b)) as u16 + } + }; + Some((number, size)) + } else { + None + } +} + +/// Number of bits to encode characters in numeric mode. +const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10]; +const POW10: [u16; 4] = [1, 10, 100, 1000]; + +enum Segment<'a> { + Numeric(&'a [u8]), + Binary(&'a [u8]), +} + +impl Segment<'_> { + fn get_header(&self) -> (u16, usize) { + match self { + Segment::Binary(_) => (MODE_BINARY, 4), + Segment::Numeric(_) => (MODE_NUMERIC, 4), + } + } + + // Returns the size of the length field in bits, depending on QR Version. + fn length_bits_count(&self, version: Version) -> usize { + let Version(v) = version; + match self { + Segment::Binary(_) => match v { + 1..=9 => 8, + _ => 16, + }, + Segment::Numeric(_) => match v { + 1..=9 => 10, + 10..=26 => 12, + _ => 14, + }, + } + } + + // Number of characters in the segment. + fn character_count(&self) -> usize { + match self { + Segment::Binary(data) => data.len(), + Segment::Numeric(data) => { + let data_bits = data.len() * 8; + let last_chars = match data_bits % 13 { + 1 => 1, + k => (k + 1) / 3, + }; + // 4 decimal numbers per 13bits + remainder. + 4 * (data_bits / 13) + last_chars + } + } + } + + fn get_length_field(&self, version: Version) -> (u16, usize) { + ( + self.character_count() as u16, + self.length_bits_count(version), + ) + } + + fn total_size_bits(&self, version: Version) -> usize { + let data_size = match self { + Segment::Binary(data) => data.len() * 8, + Segment::Numeric(_) => { + let digits = self.character_count(); + 10 * (digits / 3) + NUM_CHARS_BITS[digits % 3] + } + }; + // header + length + data. + 4 + self.length_bits_count(version) + data_size + } + + fn iter(&self) -> SegmentIterator<'_> { + SegmentIterator { + segment: self, + offset: 0, + carry: 0, + carry_len: 0, + } + } +} + +struct SegmentIterator<'a> { + segment: &'a Segment<'a>, + offset: usize, + carry: u16, + carry_len: usize, +} + +impl Iterator for SegmentIterator<'_> { + type Item = (u16, usize); + + fn next(&mut self) -> Option { + match self.segment { + Segment::Binary(data) => { + if self.offset < data.len() { + let byte = data[self.offset] as u16; + self.offset += 1; + Some((byte, 8)) + } else { + None + } + } + Segment::Numeric(data) => { + if self.carry_len == 3 { + let out = (self.carry, NUM_CHARS_BITS[self.carry_len]); + self.carry_len = 0; + self.carry = 0; + Some(out) + } else if let Some((bits, size)) = get_next_13b(data, self.offset) { + self.offset += size; + let new_chars = match size { + 1 => 1, + k => (k + 1) / 3, + }; + if self.carry_len + new_chars > 3 { + self.carry_len = new_chars + self.carry_len - 3; + let out = ( + self.carry * POW10[new_chars - self.carry_len] + + bits / POW10[self.carry_len], + NUM_CHARS_BITS[3], + ); + self.carry = bits % POW10[self.carry_len]; + Some(out) + } else { + let out = ( + self.carry * POW10[new_chars] + bits, + NUM_CHARS_BITS[self.carry_len + new_chars], + ); + self.carry_len = 0; + Some(out) + } + } else if self.carry_len > 0 { + let out = (self.carry, NUM_CHARS_BITS[self.carry_len]); + self.carry_len = 0; + Some(out) + } else { + None + } + } + } + } +} + +struct EncodedMsg<'a> { + data: &'a mut [u8], + ec_size: usize, + g1_blocks: usize, + g2_blocks: usize, + g1_blk_size: usize, + g2_blk_size: usize, + poly: &'static [u8], + version: Version, +} + +/// Data to be put in the QR code, with correct segment encoding, padding, and +/// Error Code Correction. +impl EncodedMsg<'_> { + fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option> { + let version = Version::from_segments(segments)?; + let ec_size = version.ec_size(); + let g1_blocks = version.g1_blocks(); + let g2_blocks = version.g2_blocks(); + let g1_blk_size = version.g1_blk_size(); + let g2_blk_size = g1_blk_size + 1; + let poly = version.poly(); + + // clear the output. + data.fill(0); + + let mut em = EncodedMsg { + data: data, + ec_size, + g1_blocks, + g2_blocks, + g1_blk_size, + g2_blk_size, + poly, + version, + }; + em.encode(segments); + Some(em) + } + + /// Push bits of data at an offset (in bits). + fn push(&mut self, offset: &mut usize, bits: (u16, usize)) { + let (number, len_bits) = bits; + let byte_off = *offset / 8; + let bit_off = *offset % 8; + let b = bit_off + len_bits; + + match (bit_off, b) { + (0, 0..=8) => { + self.data[byte_off] = (number << (8 - b)) as u8; + } + (0, _) => { + self.data[byte_off] = (number >> (b - 8)) as u8; + self.data[byte_off + 1] = (number << (16 - b)) as u8; + } + (_, 0..=8) => { + self.data[byte_off] |= (number << (8 - b)) as u8; + } + (_, 9..=16) => { + self.data[byte_off] |= (number >> (b - 8)) as u8; + self.data[byte_off + 1] = (number << (16 - b)) as u8; + } + _ => { + self.data[byte_off] |= (number >> (b - 8)) as u8; + self.data[byte_off + 1] = (number >> (b - 16)) as u8; + self.data[byte_off + 2] = (number << (24 - b)) as u8; + } + } + *offset += len_bits; + } + + fn add_segments(&mut self, segments: &[&Segment<'_>]) { + let mut offset: usize = 0; + + for s in segments.iter() { + self.push(&mut offset, s.get_header()); + self.push(&mut offset, s.get_length_field(self.version)); + for bits in s.iter() { + self.push(&mut offset, bits); + } + } + self.push(&mut offset, (MODE_STOP, 4)); + + let pad_offset = (offset + 7) / 8; + for i in pad_offset..self.version.max_data() { + self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)]; + } + } + + fn error_code_for_blocks(&mut self, offset: usize, size: usize, ec_offset: usize) { + let mut tmp: [u8; MAX_BLK_SIZE + MAX_EC_SIZE] = [0; MAX_BLK_SIZE + MAX_EC_SIZE]; + + tmp[0..size].copy_from_slice(&self.data[offset..offset + size]); + for i in 0..size { + let lead_coeff = tmp[i] as usize; + if lead_coeff == 0 { + continue; + } + let log_lead_coeff = usize::from(LOG_TABLE[lead_coeff]); + for (u, &v) in tmp[i + 1..].iter_mut().zip(self.poly.iter()) { + *u ^= EXP_TABLE[(usize::from(v) + log_lead_coeff) % 255]; + } + } + self.data[ec_offset..ec_offset + self.ec_size] + .copy_from_slice(&tmp[size..size + self.ec_size]); + } + + fn compute_error_code(&mut self) { + let mut offset = 0; + let mut ec_offset = self.g1_blocks * self.g1_blk_size + self.g2_blocks * self.g2_blk_size; + + for _ in 0..self.g1_blocks { + self.error_code_for_blocks(offset, self.g1_blk_size, ec_offset); + offset += self.g1_blk_size; + ec_offset += self.ec_size; + } + for _ in 0..self.g2_blocks { + self.error_code_for_blocks(offset, self.g2_blk_size, ec_offset); + offset += self.g2_blk_size; + ec_offset += self.ec_size; + } + } + + fn encode(&mut self, segments: &[&Segment<'_>]) { + self.add_segments(segments); + self.compute_error_code(); + } + + fn iter(&self) -> EncodedMsgIterator<'_> { + EncodedMsgIterator { + em: self, + offset: 0, + } + } +} + +/// Iterator, to retrieve the data in the interleaved order needed by QR code. +struct EncodedMsgIterator<'a> { + em: &'a EncodedMsg<'a>, + offset: usize, +} + +impl Iterator for EncodedMsgIterator<'_> { + type Item = u8; + + // Send the bytes in interleaved mode, first byte of first block of group1, + // then first byte of second block of group1, ... + fn next(&mut self) -> Option { + let em = self.em; + let blocks = em.g1_blocks + em.g2_blocks; + let g1_end = em.g1_blocks * em.g1_blk_size; + let g2_end = g1_end + em.g2_blocks * em.g2_blk_size; + let ec_end = g2_end + em.ec_size * blocks; + + if self.offset >= ec_end { + return None; + } + + let offset = if self.offset < em.g1_blk_size * blocks { + // group1 and group2 interleaved + let blk = self.offset % blocks; + let blk_off = self.offset / blocks; + if blk < em.g1_blocks { + blk * em.g1_blk_size + blk_off + } else { + g1_end + em.g2_blk_size * (blk - em.g1_blocks) + blk_off + } + } else if self.offset < g2_end { + // last byte of group2 blocks + let blk2 = self.offset - blocks * em.g1_blk_size; + em.g1_blk_size * em.g1_blocks + blk2 * em.g2_blk_size + em.g2_blk_size - 1 + } else { + // EC blocks + let ec_offset = self.offset - g2_end; + let blk = ec_offset % blocks; + let blk_off = ec_offset / blocks; + + g2_end + blk * em.ec_size + blk_off + }; + self.offset += 1; + Some(em.data[offset]) + } +} + +/// A QR code image, encoded as a linear binary framebuffer. +/// 1 bit per module (pixel), each new line start at next byte boundary. +/// Max width is 177 for V40 QR code, so `u8` is enough for coordinate. +struct QrImage<'a> { + data: &'a mut [u8], + width: u8, + stride: u8, + version: Version, +} + +impl QrImage<'_> { + fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> { + let width = em.version.width(); + let stride = (width + 7) / 8; + let data = qrdata; + + let mut qr_image = QrImage { + data, + width, + stride, + version: em.version, + }; + qr_image.draw_all(em.iter()); + qr_image + } + + fn clear(&mut self) { + self.data.fill(0); + } + + // Set pixel to light color. + fn set(&mut self, x: u8, y: u8) { + let off = y as usize * self.stride as usize + x as usize / 8; + let mut v = self.data[off]; + v |= 0x80 >> (x % 8); + self.data[off] = v; + } + + // Invert a module color. + fn xor(&mut self, x: u8, y: u8) { + let off = y as usize * self.stride as usize + x as usize / 8; + self.data[off] ^= 0x80 >> (x % 8); + } + + // Draw a light square at (x, y) top left corner. + fn draw_square(&mut self, x: u8, y: u8, size: u8) { + for k in 0..size { + self.set(x + k, y); + self.set(x, y + k + 1); + self.set(x + size, y + k); + self.set(x + k + 1, y + size); + } + } + + // Finder pattern: 3 8x8 square at the corners. + fn draw_finders(&mut self) { + self.draw_square(1, 1, 4); + self.draw_square(self.width - 6, 1, 4); + self.draw_square(1, self.width - 6, 4); + for k in 0..8 { + self.set(k, 7); + self.set(self.width - k - 1, 7); + self.set(k, self.width - 8); + } + for k in 0..7 { + self.set(7, k); + self.set(self.width - 8, k); + self.set(7, self.width - 1 - k); + } + } + + fn is_finder(&self, x: u8, y: u8) -> bool { + let end = self.width - 8; + (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + } + + // Alignment pattern: 5x5 squares in a grid. + fn draw_alignments(&mut self) { + let positions = self.version.alignment_pattern(); + for &x in positions.iter() { + for &y in positions.iter() { + if !self.is_finder(x, y) { + self.draw_square(x - 1, y - 1, 2); + } + } + } + } + + fn is_alignment(&self, x: u8, y: u8) -> bool { + let positions = self.version.alignment_pattern(); + for &ax in positions.iter() { + for &ay in positions.iter() { + if self.is_finder(ax, ay) { + continue; + } + if x >= ax - 2 && x <= ax + 2 && y >= ay - 2 && y <= ay + 2 { + return true; + } + } + } + false + } + + // Timing pattern: 2 dotted line between the finder patterns. + fn draw_timing_patterns(&mut self) { + let end = self.width - 8; + + for x in (9..end).step_by(2) { + self.set(x, 6); + self.set(6, x); + } + } + + fn is_timing(&self, x: u8, y: u8) -> bool { + x == 6 || y == 6 + } + + // Mask info: 15 bits around the finders, written twice for redundancy. + fn draw_maskinfo(&mut self) { + let info: u16 = FORMAT_INFOS_QR_L[0]; + let mut skip = 0; + + for k in 0..7 { + if k == 6 { + skip = 1; + } + if info & (1 << (14 - k)) == 0 { + self.set(k + skip, 8); + self.set(8, self.width - 1 - k); + } + } + skip = 0; + for k in 0..8 { + if k == 2 { + skip = 1; + } + if info & (1 << (7 - k)) == 0 { + self.set(8, 8 - skip - k); + self.set(self.width - 8 + k, 8); + } + } + } + + fn is_maskinfo(&self, x: u8, y: u8) -> bool { + let end = self.width - 8; + // Count the dark module as mask info. + (x <= 8 && y == 8) || (y <= 8 && x == 8) || (x == 8 && y >= end) || (x >= end && y == 8) + } + + // Version info: 18bits written twice, close to the finders. + fn draw_version_info(&mut self) { + let vinfo = self.version.version_info(); + let pos = self.width - 11; + + if vinfo != 0 { + for x in 0..3 { + for y in 0..6 { + if vinfo & (1 << (x + y * 3)) == 0 { + self.set(x + pos, y); + self.set(y, x + pos); + } + } + } + } + } + + fn is_version_info(&self, x: u8, y: u8) -> bool { + let vinfo = self.version.version_info(); + let pos = self.width - 11; + + vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6)) + } + + // Returns true if the module is reserved (Not usable for data and EC). + fn is_reserved(&self, x: u8, y: u8) -> bool { + self.is_alignment(x, y) + || self.is_finder(x, y) + || self.is_timing(x, y) + || self.is_maskinfo(x, y) + || self.is_version_info(x, y) + } + + // Last module to draw, at bottom left corner. + fn is_last(&self, x: u8, y: u8) -> bool { + x == 0 && y == self.width - 1 + } + + // Move to the next module according to QR code order. + // From bottom right corner, to bottom left corner. + fn next(&self, x: u8, y: u8) -> (u8, u8) { + let x_adj = if x <= 6 { x + 1 } else { x }; + let column_type = (self.width - x_adj) % 4; + + match column_type { + 2 if y > 0 => (x + 1, y - 1), + 0 if y < self.width - 1 => (x + 1, y + 1), + 0 | 2 if x == 7 => (x - 2, y), + _ => (x - 1, y), + } + } + + // Find next module that can hold data. + fn next_available(&self, x: u8, y: u8) -> (u8, u8) { + let (mut x, mut y) = self.next(x, y); + while self.is_reserved(x, y) && !self.is_last(x, y) { + (x, y) = self.next(x, y); + } + (x, y) + } + + fn draw_data(&mut self, data: impl Iterator) { + let (mut x, mut y) = (self.width - 1, self.width - 1); + for byte in data { + for s in 0..8 { + if byte & (0x80 >> s) == 0 { + self.set(x, y); + } + (x, y) = self.next_available(x, y); + } + } + // Set the remaining modules (0, 3 or 7 depending on version). + // because 0 correspond to a light module. + while !self.is_last(x, y) { + if !self.is_reserved(x, y) { + self.set(x, y); + } + (x, y) = self.next(x, y); + } + } + + // Apply checkerboard mask to all non-reserved modules. + fn apply_mask(&mut self) { + for x in 0..self.width { + for y in 0..self.width { + if (x ^ y) % 2 == 0 && !self.is_reserved(x, y) { + self.xor(x, y); + } + } + } + } + + // Draw the QR code with the provided data iterator. + fn draw_all(&mut self, data: impl Iterator) { + // First clear the table, as it may have already some data. + self.clear(); + self.draw_finders(); + self.draw_alignments(); + self.draw_timing_patterns(); + self.draw_version_info(); + self.draw_data(data); + self.draw_maskinfo(); + self.apply_mask(); + } +} + +/// C entry point for the rust QR Code generator. +/// +/// Write the QR code image in the data buffer, and return the QR code width, +/// or 0, if the data doesn't fit in a QR code. +/// +/// * `url`: The base URL of the QR code. It will be encoded as Binary segment. +/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it +/// will be encoded as binary segment, otherwise it will be encoded +/// efficiently as a numeric segment, and appended to the URL. +/// * `data_len`: Length of the data, that needs to be encoded, must be less +/// than data_size. +/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold +/// a V40 QR code. It will then be overwritten with the QR code image. +/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the +/// segments and ECC. +/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes +/// long for V40. +/// +/// # Safety +/// +/// * `url` must be null or point at a nul-terminated string. +/// * `data` must be valid for reading and writing for `data_size` bytes. +/// * `tmp` must be valid for reading and writing for `tmp_size` bytes. +/// +/// They must remain valid for the duration of the function call. + +#[no_mangle] +pub unsafe extern "C" fn drm_panic_qr_generate( + url: *const i8, + data: *mut u8, + data_len: usize, + data_size: usize, + tmp: *mut u8, + tmp_size: usize, +) -> u8 { + if data_size < 4071 || tmp_size < 3706 || data_len > data_size { + return 0; + } + // SAFETY: The caller ensures that `data` is a valid pointer for reading and + // writing `data_size` bytes. + let data_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(data, data_size) }; + // SAFETY: The caller ensures that `tmp` is a valid pointer for reading and + // writing `tmp_size` bytes. + let tmp_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(tmp, tmp_size) }; + if url.is_null() { + match EncodedMsg::new(&[&Segment::Binary(&data_slice[0..data_len])], tmp_slice) { + None => 0, + Some(em) => { + let qr_image = QrImage::new(&em, data_slice); + qr_image.width + } + } + } else { + // SAFETY: The caller ensures that `url` is a valid pointer to a + // nul-terminated string. + let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) }; + let segments = &[ + &Segment::Binary(url_cstr.as_bytes()), + &Segment::Numeric(&data_slice[0..data_len]), + ]; + match EncodedMsg::new(segments, tmp_slice) { + None => 0, + Some(em) => { + let qr_image = QrImage::new(&em, data_slice); + qr_image.width + } + } + } +} + +/// Returns the maximum data size that can fit in a QR code of this version. +/// * `version`: QR code version, between 1-40. +/// * `url_len`: Length of the URL. +/// +/// * If `url_len` > 0, remove the 2 segments header/length and also count the +/// conversion to numeric segments. +/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment. +#[no_mangle] +pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize { + if version < 1 || version > 40 { + return 0; + } + let max_data = Version(version as usize).max_data(); + + if url_len > 0 { + // Binary segment (URL) 4 + 16 bits, numeric segment (kmsg) 4 + 12 bits => 5 bytes. + if url_len + 5 >= max_data { + 0 + } else { + let max = max_data - url_len - 5; + (max * 39) / 40 + } + } else { + // Remove 3 bytes for the binary segment (header 4 bits, length 16 bits, stop 4bits). + max_data - 3 + } +} diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 03bd3c7bd0dc2c..0e3f8adf162f67 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -410,22 +410,30 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, } /** - * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers + * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC - * @prime_fd: pointer to storage for the fd id of the create dma-buf * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. + * + * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it + * has created, without attaching it to any file descriptors. The difference + * between those two is similar to that between anon_inode_getfile() and + * anon_inode_getfd(); insertion into descriptor table is something you + * can not revert if any cleanup is needed, so the descriptor-returning + * variants should only be used when you are past the last failure exit + * and the only thing left is passing the new file descriptor to userland. + * When all you need is the object itself or when you need to do something + * else that might fail, use that one instead. */ -int drm_gem_prime_handle_to_fd(struct drm_device *dev, +struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, - uint32_t flags, - int *prime_fd) + uint32_t flags) { struct drm_gem_object *obj; int ret = 0; @@ -434,14 +442,14 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { - ret = -ENOENT; + dmabuf = ERR_PTR(-ENOENT); goto out_unlock; } dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); if (dmabuf) { get_dma_buf(dmabuf); - goto out_have_handle; + goto out; } mutex_lock(&dev->object_name_lock); @@ -463,7 +471,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ - ret = PTR_ERR(dmabuf); mutex_unlock(&dev->object_name_lock); goto out; } @@ -478,34 +485,51 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); mutex_unlock(&dev->object_name_lock); - if (ret) - goto fail_put_dmabuf; - -out_have_handle: - ret = dma_buf_fd(dmabuf, flags); - /* - * We must _not_ remove the buffer from the handle cache since the newly - * created dma buf is already linked in the global obj->dma_buf pointer, - * and that is invariant as long as a userspace gem handle exists. - * Closing the handle will clean out the cache anyway, so we don't leak. - */ - if (ret < 0) { - goto fail_put_dmabuf; - } else { - *prime_fd = ret; - ret = 0; + if (ret) { + dma_buf_put(dmabuf); + dmabuf = ERR_PTR(ret); } - - goto out; - -fail_put_dmabuf: - dma_buf_put(dmabuf); out: drm_gem_object_put(obj); out_unlock: mutex_unlock(&file_priv->prime.lock); + return dmabuf; +} +EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf); - return ret; +/** + * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers + * @dev: dev to export the buffer from + * @file_priv: drm file-private structure + * @handle: buffer handle to export + * @flags: flags like DRM_CLOEXEC + * @prime_fd: pointer to storage for the fd id of the create dma-buf + * + * This is the PRIME export function which must be used mandatorily by GEM + * drivers to ensure correct lifetime management of the underlying GEM object. + * The actual exporting from GEM object to a dma-buf is done through the + * &drm_gem_object_funcs.export callback. + */ +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, + int *prime_fd) +{ + struct dma_buf *dmabuf; + int fd = get_unused_fd_flags(flags); + + if (fd < 0) + return fd; + + dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags); + if (IS_ERR(dmabuf)) { + put_unused_fd(fd); + return PTR_ERR(dmabuf); + } + + fd_install(fd, dmabuf->file); + *prime_fd = fd; + return 0; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index cf24dfdeb6b274..0081190201a7f9 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -100,8 +100,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str) copy = iterator->remain; /* Copy out the bit of the string that we need */ - memcpy(iterator->data, - str + (iterator->start - iterator->offset), copy); + if (iterator->data) + memcpy(iterator->data, + str + (iterator->start - iterator->offset), copy); iterator->offset = iterator->start + copy; iterator->remain -= copy; @@ -110,7 +111,8 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str) len = min_t(ssize_t, strlen(str), iterator->remain); - memcpy(iterator->data + pos, str, len); + if (iterator->data) + memcpy(iterator->data + pos, str, len); iterator->offset += len; iterator->remain -= len; @@ -140,8 +142,9 @@ void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf) if ((iterator->offset >= iterator->start) && (len < iterator->remain)) { ssize_t pos = iterator->offset - iterator->start; - snprintf(((char *) iterator->data) + pos, - iterator->remain, "%pV", vaf); + if (iterator->data) + snprintf(((char *) iterator->data) + pos, + iterator->remain, "%pV", vaf); iterator->offset += len; iterator->remain -= len; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index bb49d552e671f1..92f21764246f8c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -714,7 +714,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); * @dev: drm_device whose connector state changed * * This function fires off the uevent for userspace and also calls the - * output_poll_changed function, which is most commonly used to inform the fbdev + * client hotplug function, which is most commonly used to inform the fbdev * emulation code and allow it to update the fbcon output configuration. * * Drivers should call this from their hotplug handling code when a change is @@ -730,11 +730,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); */ void drm_kms_helper_hotplug_event(struct drm_device *dev) { - /* send a uevent + call fbdev */ drm_sysfs_hotplug_event(dev); - if (dev->mode_config.funcs->output_poll_changed) - dev->mode_config.funcs->output_poll_changed(dev); - drm_client_dev_hotplug(dev); } EXPORT_SYMBOL(drm_kms_helper_hotplug_event); @@ -750,11 +746,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - /* send a uevent + call fbdev */ drm_sysfs_connector_hotplug_event(connector); - if (dev->mode_config.funcs->output_poll_changed) - dev->mode_config.funcs->output_poll_changed(dev); - drm_client_dev_hotplug(dev); } EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event); @@ -888,7 +880,7 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). * * If however, the polling was never initialized, this call will trigger a - * warning and return + * warning and return. * * Note that calls to enable and disable polling must be strictly ordered, which * is automatically the case when they're only call from suspend/resume diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 85c79a38c13aca..492acce0516fa8 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -85,7 +85,6 @@ static u32 clip_scaled(int src, int dst, int *clip) * factors from @src to @dst. * * RETURNS: - * * %true if rectangle @dst is still visible after being clipped, * %false otherwise. */ diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 4fcfc0b9b386cf..8e3d2d7060f80e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -715,16 +715,16 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, struct fd f = fdget(fd); int ret; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - if (f.file->f_op != &drm_syncobj_file_fops) { + if (fd_file(f)->f_op != &drm_syncobj_file_fops) { fdput(f); return -EINVAL; } /* take a reference to put in the idr */ - syncobj = f.file->private_data; + syncobj = fd_file(f)->private_data; drm_syncobj_get(syncobj); idr_preload(GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index cc3571e25a9acd..94e45ed6869d01 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -131,7 +131,7 @@ * guaranteed to be enabled. * * On many hardware disabling the vblank interrupt cannot be done in a race-free - * manner, see &drm_driver.vblank_disable_immediate and + * manner, see &drm_vblank_crtc_config.disable_immediate and * &drm_driver.max_vblank_count. In that case the vblank core only disables the * vblanks after a timer has expired, which can be configured through the * ``vblankoffdelay`` module parameter. @@ -686,7 +686,6 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * drm_atomic_helper_calc_timestamping_constants(). * * Returns: - * * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ @@ -831,7 +830,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal); * drm_atomic_helper_calc_timestamping_constants(). * * Returns: - * * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ @@ -1241,6 +1239,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get); void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); + int vblank_offdelay = vblank->config.offdelay_ms; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; @@ -1250,13 +1249,13 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&vblank->refcount)) { - if (drm_vblank_offdelay == 0) + if (!vblank_offdelay) return; - else if (drm_vblank_offdelay < 0) + else if (vblank_offdelay < 0) vblank_disable_fn(&vblank->disable_timer); - else if (!dev->vblank_disable_immediate) + else if (!vblank->config.disable_immediate) mod_timer(&vblank->disable_timer, - jiffies + ((drm_vblank_offdelay * HZ)/1000)); + jiffies + ((vblank_offdelay * HZ) / 1000)); } } @@ -1265,7 +1264,8 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts - * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. + * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms + * milliseconds. */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { @@ -1466,16 +1466,20 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_crtc_set_max_vblank_count); /** - * drm_crtc_vblank_on - enable vblank events on a CRTC + * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom + * configuration options * @crtc: CRTC in question + * @config: Vblank configuration value * - * This functions restores the vblank interrupt state captured with - * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note - * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be - * unbalanced and so can also be unconditionally called in driver load code to - * reflect the current hardware state of the crtc. + * See drm_crtc_vblank_on(). In addition, this function allows you to provide a + * custom vblank configuration for a given CRTC. + * + * Note that @config is copied, the pointer does not need to stay valid beyond + * this function call. For details of the parameters see + * struct drm_vblank_crtc_config. */ -void drm_crtc_vblank_on(struct drm_crtc *crtc) +void drm_crtc_vblank_on_config(struct drm_crtc *crtc, + const struct drm_vblank_crtc_config *config) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); @@ -1488,6 +1492,8 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc) drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n", pipe, vblank->enabled, vblank->inmodeset); + vblank->config = *config; + /* Drop our private "prevent drm_vblank_get" refcount */ if (vblank->inmodeset) { atomic_dec(&vblank->refcount); @@ -1500,10 +1506,33 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc) * re-enable interrupts if there are users left, or the * user wishes vblank interrupts to be enabled all the time. */ - if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0) + if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms) drm_WARN_ON(dev, drm_vblank_enable(dev, pipe)); spin_unlock_irq(&dev->vbl_lock); } +EXPORT_SYMBOL(drm_crtc_vblank_on_config); + +/** + * drm_crtc_vblank_on - enable vblank events on a CRTC + * @crtc: CRTC in question + * + * This functions restores the vblank interrupt state captured with + * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note + * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be + * unbalanced and so can also be unconditionally called in driver load code to + * reflect the current hardware state of the crtc. + * + * Note that unlike in drm_crtc_vblank_on_config(), default values are used. + */ +void drm_crtc_vblank_on(struct drm_crtc *crtc) +{ + const struct drm_vblank_crtc_config config = { + .offdelay_ms = drm_vblank_offdelay, + .disable_immediate = crtc->dev->vblank_disable_immediate + }; + + drm_crtc_vblank_on_config(crtc, &config); +} EXPORT_SYMBOL(drm_crtc_vblank_on); static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) @@ -1556,16 +1585,21 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) * * Note that drivers must have race-free high-precision timestamping support, * i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and - * &drm_driver.vblank_disable_immediate must be set to indicate the + * &drm_vblank_crtc_config.disable_immediate must be set to indicate the * time-stamping functions are race-free against vblank hardware counter * increments. */ void drm_crtc_vblank_restore(struct drm_crtc *crtc) { - WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp); - WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate); + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); + + drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp); + drm_WARN_ON_ONCE(dev, vblank->inmodeset); + drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate); - drm_vblank_restore(crtc->dev, drm_crtc_index(crtc)); + drm_vblank_restore(dev, pipe); } EXPORT_SYMBOL(drm_crtc_vblank_restore); @@ -1754,7 +1788,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, /* If the counter is currently enabled and accurate, short-circuit * queries to return the cached timestamp of the last vblank. */ - if (dev->vblank_disable_immediate && + if (vblank->config.disable_immediate && drm_wait_vblank_is_query(vblwait) && READ_ONCE(vblank->enabled)) { drm_wait_vblank_reply(dev, pipe, &vblwait->reply); @@ -1918,8 +1952,8 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) * been signaled. The disable has to be last (after * drm_handle_vblank_events) so that the timestamp is always accurate. */ - disable_irq = (dev->vblank_disable_immediate && - drm_vblank_offdelay > 0 && + disable_irq = (vblank->config.disable_immediate && + vblank->config.offdelay_ms > 0 && !atomic_read(&vblank->refcount)); drm_handle_vblank_events(dev, pipe); @@ -1992,7 +2026,8 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, pipe = drm_crtc_index(crtc); vblank = drm_crtc_vblank_crtc(crtc); - vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled); + vblank_enabled = READ_ONCE(vblank->config.disable_immediate) && + READ_ONCE(vblank->enabled); if (!vblank_enabled) { ret = drm_crtc_vblank_get(crtc); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 62dcfdc7894dd8..ab9ca4824b62e1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -72,7 +72,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); - drm_sched_start(&gpu->sched, true); + drm_sched_start(&gpu->sched); return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index e2c7373f20c6b7..6a6761935224ce 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -110,7 +110,7 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, void *mapping = NULL; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) - mapping = arm_iommu_create_mapping(&platform_bus_type, + mapping = arm_iommu_create_mapping(dev, EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE); else if (IS_ENABLED(CONFIG_IOMMU_DMA)) mapping = iommu_get_domain_for_dev(priv->dma_dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 81d501efd01330..23646e55f142c3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -254,10 +254,6 @@ static inline int exynos_drm_check_fimc_device(struct device *dev) } #endif -int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, - bool nonblock); - - extern struct platform_driver fimd_driver; extern struct platform_driver exynos5433_decon_driver; extern struct platform_driver decon_driver; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 142184c8c3bc55..4d7ea65b7dd836 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1125,7 +1125,7 @@ static void fimc_abort(struct exynos_drm_ipp *ipp, } } -static struct exynos_drm_ipp_funcs ipp_funcs = { +static const struct exynos_drm_ipp_funcs ipp_funcs = { .commit = fimc_commit, .abort = fimc_abort, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1b111e2c33472b..59fa2205071710 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1162,7 +1162,7 @@ static void gsc_abort(struct exynos_drm_ipp *ipp, } } -static struct exynos_drm_ipp_funcs ipp_funcs = { +static const struct exynos_drm_ipp_funcs ipp_funcs = { .commit = gsc_commit, .abort = gsc_abort, }; @@ -1174,7 +1174,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) struct exynos_drm_ipp *ipp = &ctx->ipp; ctx->drm_dev = drm_dev; - ctx->drm_dev = drm_dev; + ipp->drm_dev = drm_dev; exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index a9d46989682420..2788105ac780bc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -403,7 +403,7 @@ static int scaler_commit(struct exynos_drm_ipp *ipp, return 0; } -static struct exynos_drm_ipp_funcs ipp_funcs = { +static const struct exynos_drm_ipp_funcs ipp_funcs = { .commit = scaler_commit, }; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 3adc2c9ab72da0..f3a4517bdf27ce 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -568,7 +568,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, dev->dev, "I2C bus registration failed.\n"); goto err_encoder_cleanup; } - gma_encoder->i2c_bus->slave_addr = 0x2C; + gma_encoder->i2c_bus->target_addr = 0x2C; dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus; /* diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 8245b5603d2c06..d5924ca3ed0500 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -14,8 +14,8 @@ #include "psb_intel_drv.h" #include "psb_intel_reg.h" -#define SLAVE_ADDR1 0x70 -#define SLAVE_ADDR2 0x72 +#define TARGET_ADDR1 0x70 +#define TARGET_ADDR2 0x72 static void *find_section(struct bdb_header *bdb, int section_id) { @@ -357,10 +357,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv, /* skip the device block if device type is invalid */ continue; } - if (p_child->slave_addr != SLAVE_ADDR1 && - p_child->slave_addr != SLAVE_ADDR2) { + if (p_child->target_addr != TARGET_ADDR1 && + p_child->target_addr != TARGET_ADDR2) { /* - * If the slave address is neither 0x70 nor 0x72, + * If the target address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; @@ -371,22 +371,22 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv, DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); continue; } - DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" + DRM_DEBUG_KMS("the SDVO device with target addr %2x is found on" " %s port\n", - p_child->slave_addr, + p_child->target_addr, (p_child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); if (!p_mapping->initialized) { p_mapping->dvo_port = p_child->dvo_port; - p_mapping->slave_addr = p_child->slave_addr; + p_mapping->target_addr = p_child->target_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; p_mapping->i2c_pin = p_child->i2c_pin; p_mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", p_mapping->dvo_port, - p_mapping->slave_addr, + p_mapping->target_addr, p_mapping->dvo_wiring, p_mapping->ddc_pin, p_mapping->i2c_pin); @@ -394,10 +394,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv, DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); } - if (p_child->slave2_addr) { + if (p_child->target2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ - DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" + DRM_DEBUG_KMS("there exists the target2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h index 0e6facf21e332c..b5adea2a20c389 100644 --- a/drivers/gpu/drm/gma500/intel_bios.h +++ b/drivers/gpu/drm/gma500/intel_bios.h @@ -186,13 +186,13 @@ struct child_device_config { u16 addin_offset; u8 dvo_port; /* See Device_PORT_* above */ u8 i2c_pin; - u8 slave_addr; + u8 target_addr; u8 ddc_pin; u16 edid_ptr; u8 dvo_cfg; /* See DEVICE_CFG_* above */ u8 dvo2_port; u8 i2c2_pin; - u8 slave2_addr; + u8 target2_addr; u8 ddc2_pin; u8 capabilities; u8 dvo_wiring;/* See DEVICE_WIRE_* above */ diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index aa45509859f21a..ee8b047587f254 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -333,7 +333,7 @@ gmbus_xfer(struct i2c_adapter *adapter, clear_err: /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the - * BUS_ERROR raised by the slave's NAK. + * BUS_ERROR raised by the target's NAK. */ GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 8b64f61ffaf9d5..d67c2b3ad90116 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -498,6 +498,7 @@ static const struct file_operations psb_gem_fops = { .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct drm_driver driver = { diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 83c17689c454f7..bddf89b82fecc2 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -202,7 +202,7 @@ struct psb_intel_opregion { struct sdvo_device_mapping { u8 initialized; u8 dvo_port; - u8 slave_addr; + u8 target_addr; u8 dvo_wiring; u8 i2c_pin; u8 i2c_speed; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index c111e933e1ed20..2499fd6a80c9d8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -80,7 +80,7 @@ struct psb_intel_mode_device { struct gma_i2c_chan { struct i2c_adapter base; struct i2c_algo_bit_data algo; - u8 slave_addr; + u8 target_addr; /* for getting at dev. private (mmio etc.) */ struct drm_device *drm_dev; diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 8d1be94a443b24..138f153d38ba35 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -97,7 +97,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev, struct i2c_msg msgs[] = { { - .addr = lvds_i2c_bus->slave_addr, + .addr = lvds_i2c_bus->target_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -710,7 +710,7 @@ void psb_intel_lvds_init(struct drm_device *dev, dev->dev, "I2C bus registration failed.\n"); goto err_encoder_cleanup; } - lvds_priv->i2c_bus->slave_addr = 0x2C; + lvds_priv->i2c_bus->target_addr = 0x2C; dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus; /* diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index e4f914decebaec..8dafff963ca8bc 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -70,7 +70,7 @@ struct psb_intel_sdvo { struct gma_encoder base; struct i2c_adapter *i2c; - u8 slave_addr; + u8 target_addr; struct i2c_adapter ddc; @@ -259,13 +259,13 @@ static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 a { struct i2c_msg msgs[] = { { - .addr = psb_intel_sdvo->slave_addr, + .addr = psb_intel_sdvo->target_addr, .flags = 0, .len = 1, .buf = &addr, }, { - .addr = psb_intel_sdvo->slave_addr, + .addr = psb_intel_sdvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, @@ -463,14 +463,14 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { - msgs[i].addr = psb_intel_sdvo->slave_addr; + msgs[i].addr = psb_intel_sdvo->target_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } - msgs[i].addr = psb_intel_sdvo->slave_addr; + msgs[i].addr = psb_intel_sdvo->target_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; @@ -479,12 +479,12 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; - msgs[i+1].addr = psb_intel_sdvo->slave_addr; + msgs[i+1].addr = psb_intel_sdvo->target_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; - msgs[i+2].addr = psb_intel_sdvo->slave_addr; + msgs[i+2].addr = psb_intel_sdvo->target_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; @@ -1899,7 +1899,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi } static u8 -psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) +psb_intel_sdvo_get_target_addr(struct drm_device *dev, int sdvo_reg) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct sdvo_device_mapping *my_mapping, *other_mapping; @@ -1913,14 +1913,14 @@ psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) } /* If the BIOS described our SDVO device, take advantage of it. */ - if (my_mapping->slave_addr) - return my_mapping->slave_addr; + if (my_mapping->target_addr) + return my_mapping->target_addr; /* If the BIOS only described a different SDVO device, use the * address that it isn't using. */ - if (other_mapping->slave_addr) { - if (other_mapping->slave_addr == 0x70) + if (other_mapping->target_addr) { + if (other_mapping->target_addr == 0x70) return 0x72; else return 0x70; @@ -2446,7 +2446,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) return false; psb_intel_sdvo->sdvo_reg = sdvo_reg; - psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; + psb_intel_sdvo->target_addr = psb_intel_sdvo_get_target_addr(dev, sdvo_reg) >> 1; psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg); if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) { kfree(psb_intel_sdvo); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 207aa3f660b030..6b566f3aeecbca 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -57,7 +57,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv, int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); -int hibmc_mm_init(struct hibmc_drm_private *hibmc); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector); #endif diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c index d0c3880d7f80f8..493e730c685b80 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c @@ -170,13 +170,13 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 1, .buf = &addr, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = val, @@ -189,7 +189,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) { u8 buf[2] = { addr, val }; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 2, .buf = buf, @@ -197,7 +197,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; } -/** Probes for a CH7017 on the given bus and slave address. */ +/** Probes for a CH7017 on the given bus and target address. */ static bool ch7017_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { @@ -227,13 +227,13 @@ static bool ch7017_init(struct intel_dvo_device *dvo, break; default: DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " - "slave %d.\n", - val, adapter->name, dvo->slave_addr); + "target %d.\n", + val, adapter->name, dvo->target_addr); goto fail; } DRM_DEBUG_KMS("%s detected on %s, addr %d\n", - str, adapter->name, dvo->slave_addr); + str, adapter->name, dvo->target_addr); return true; fail: diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c index 2e8e85da5a4094..534b8544e0a41d 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c @@ -153,13 +153,13 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -176,7 +176,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; } @@ -188,7 +188,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -202,7 +202,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; @@ -229,8 +229,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, name = ch7xxx_get_id(vendor); if (!name) { - DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n", - vendor, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s target %d.\n", + vendor, adapter->name, dvo->target_addr); goto out; } @@ -240,8 +240,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, devid = ch7xxx_get_did(device); if (!devid) { - DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n", - device, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s target %d.\n", + device, adapter->name, dvo->target_addr); goto out; } diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c index eef72bb3b767cd..0d5cce6051b1b8 100644 --- a/drivers/gpu/drm/i915/display/dvo_ivch.c +++ b/drivers/gpu/drm/i915/display/dvo_ivch.c @@ -198,7 +198,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 0, }, @@ -209,7 +209,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) .buf = out_buf, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 2, .buf = in_buf, @@ -226,7 +226,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) if (!priv->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from " "%s:%02x.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; } @@ -238,7 +238,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data) struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[3]; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 3, .buf = out_buf, @@ -253,13 +253,13 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data) if (!priv->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; } -/* Probes the given bus and slave address for an ivch */ +/* Probes the given bus and target address for an ivch */ static bool ivch_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { @@ -283,10 +283,10 @@ static bool ivch_init(struct intel_dvo_device *dvo, * very unique, check that the value in the base address field matches * the address it's responding on. */ - if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { + if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->target_addr) { DRM_DEBUG_KMS("ivch detect failed due to address mismatch " "(%d vs %d)\n", - (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); + (temp & VR00_BASE_ADDRESS_MASK), dvo->target_addr); goto out; } diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c index 21486008dae982..9d47f8a93e94b5 100644 --- a/drivers/gpu/drm/i915/display/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c @@ -398,13 +398,13 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -422,7 +422,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) if (!ns->quiet) { DRM_DEBUG_KMS ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, - adapter->name, dvo->slave_addr); + adapter->name, dvo->target_addr); } return false; @@ -441,7 +441,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) u8 out_buf[2]; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -456,7 +456,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) if (!ns->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; @@ -487,8 +487,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo, goto out; if (ch != (NS2501_VID & 0xff)) { - DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n", + ch, adapter->name, dvo->target_addr); goto out; } @@ -496,8 +496,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo, goto out; if (ch != (NS2501_DID & 0xff)) { - DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n", + ch, adapter->name, dvo->target_addr); goto out; } ns->quiet = false; diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c index 6c461024c8e395..a8dd40c0099703 100644 --- a/drivers/gpu/drm/i915/display/dvo_sil164.c +++ b/drivers/gpu/drm/i915/display/dvo_sil164.c @@ -79,13 +79,13 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -102,7 +102,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) if (!sil->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; } @@ -113,7 +113,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -127,7 +127,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) if (!sil->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; @@ -153,8 +153,8 @@ static bool sil164_init(struct intel_dvo_device *dvo, goto out; if (ch != (SIL164_VID & 0xff)) { - DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n", + ch, adapter->name, dvo->target_addr); goto out; } @@ -162,8 +162,8 @@ static bool sil164_init(struct intel_dvo_device *dvo, goto out; if (ch != (SIL164_DID & 0xff)) { - DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n", + ch, adapter->name, dvo->target_addr); goto out; } sil->quiet = false; diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c index 0939e097f4f97b..d9a0cd753a878d 100644 --- a/drivers/gpu/drm/i915/display/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c @@ -100,13 +100,13 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) struct i2c_msg msgs[] = { { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -123,7 +123,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; } @@ -134,7 +134,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { - .addr = dvo->slave_addr, + .addr = dvo->target_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -148,7 +148,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, adapter->name, dvo->slave_addr); + addr, adapter->name, dvo->target_addr); } return false; @@ -183,15 +183,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo, if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " - "Slave %d.\n", - id, adapter->name, dvo->slave_addr); + "Target %d.\n", + id, adapter->name, dvo->target_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " - "Slave %d.\n", - id, adapter->name, dvo->slave_addr); + "Target %d.\n", + id, adapter->name, dvo->target_addr); goto out; } tfp->quiet = false; diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index a8e746a0f6703e..526c8c4d7b53f0 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -89,6 +89,7 @@ void g4x_dp_set_clock(struct intel_encoder *encoder, static void intel_dp_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; @@ -118,7 +119,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ - intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; + intel_dp->DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED; /* Handle DP bits in common between all three register formats */ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; @@ -140,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), + intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe), TRANS_DP_ENH_FRAMING, pipe_config->enhanced_framing ? TRANS_DP_ENH_FRAMING : 0); @@ -166,9 +167,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder, static void assert_dp_port(struct intel_dp *intel_dp, bool state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; + bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN; I915_STATE_WARN(dev_priv, cur_state != state, "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", @@ -179,7 +181,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) { - bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; + struct intel_display *display = &dev_priv->display; + bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE; I915_STATE_WARN(dev_priv, cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", @@ -191,6 +194,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) static void ilk_edp_pll_on(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -198,7 +202,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); - drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", + drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n", pipe_config->port_clock); intel_dp->DP &= ~DP_PLL_FREQ_MASK; @@ -208,8 +212,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, else intel_dp->DP |= DP_PLL_FREQ_270MHZ; - intel_de_write(dev_priv, DP_A, intel_dp->DP); - intel_de_posting_read(dev_priv, DP_A); + intel_de_write(display, DP_A, intel_dp->DP); + intel_de_posting_read(display, DP_A); udelay(500); /* @@ -223,14 +227,15 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, intel_dp->DP |= DP_PLL_ENABLE; - intel_de_write(dev_priv, DP_A, intel_dp->DP); - intel_de_posting_read(dev_priv, DP_A); + intel_de_write(display, DP_A, intel_dp->DP); + intel_de_posting_read(display, DP_A); udelay(200); } static void ilk_edp_pll_off(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -238,22 +243,23 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp, assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); - drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); + drm_dbg_kms(display->drm, "disabling eDP PLL\n"); intel_dp->DP &= ~DP_PLL_ENABLE; - intel_de_write(dev_priv, DP_A, intel_dp->DP); - intel_de_posting_read(dev_priv, DP_A); + intel_de_write(display, DP_A, intel_dp->DP); + intel_de_posting_read(display, DP_A); udelay(200); } static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, enum port port, enum pipe *pipe) { + struct intel_display *display = &dev_priv->display; enum pipe p; - for_each_pipe(dev_priv, p) { - u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); + for_each_pipe(display, p) { + u32 val = intel_de_read(display, TRANS_DP_CTL(p)); if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { *pipe = p; @@ -261,7 +267,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, } } - drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", + drm_dbg_kms(display->drm, "No pipe for DP port %c found\n", port_name(port)); /* must initialize pipe to something for the asserts */ @@ -274,10 +280,11 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t dp_reg, enum port port, enum pipe *pipe) { + struct intel_display *display = &dev_priv->display; bool ret; u32 val; - val = intel_de_read(dev_priv, dp_reg); + val = intel_de_read(display, dp_reg); ret = val & DP_PORT_EN; @@ -333,6 +340,7 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 tmp, flags = 0; @@ -344,12 +352,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder, else pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); - tmp = intel_de_read(dev_priv, intel_dp->output_reg); + tmp = intel_de_read(display, intel_dp->output_reg); pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { - u32 trans_dp = intel_de_read(dev_priv, + u32 trans_dp = intel_de_read(display, TRANS_DP_CTL(crtc->pipe)); if (trans_dp & TRANS_DP_ENH_FRAMING) @@ -390,7 +398,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, g4x_dp_get_m_n(pipe_config); if (port == PORT_A) { - if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) + if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) pipe_config->port_clock = 162000; else pipe_config->port_clock = 270000; @@ -410,17 +418,18 @@ static void intel_dp_link_down(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; - if (drm_WARN_ON(&dev_priv->drm, - (intel_de_read(dev_priv, intel_dp->output_reg) & + if (drm_WARN_ON(display->drm, + (intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN) == 0)) return; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { @@ -430,12 +439,12 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_dp->DP &= ~DP_LINK_TRAIN_MASK; intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE; } - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); intel_dp->DP &= ~DP_PORT_EN; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); /* * HW workaround for IBX, we need to move the port @@ -454,12 +463,12 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | DP_LINK_TRAIN_PAT_1; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); intel_dp->DP &= ~DP_PORT_EN; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); @@ -480,7 +489,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (!crtc_state->has_audio) @@ -488,7 +497,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder, /* Enable audio presence detect */ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); intel_audio_codec_enable(encoder, crtc_state, conn_state); } @@ -497,7 +506,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (!old_crtc_state->has_audio) @@ -507,7 +516,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder, /* Disable audio presence detect */ intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE; - intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); } static void intel_disable_dp(struct intel_atomic_state *state, @@ -596,7 +605,7 @@ cpt_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; @@ -615,8 +624,8 @@ cpt_set_link_train(struct intel_dp *intel_dp, return; } - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } static void @@ -624,7 +633,7 @@ g4x_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); intel_dp->DP &= ~DP_LINK_TRAIN_MASK; @@ -643,14 +652,14 @@ g4x_set_link_train(struct intel_dp *intel_dp, return; } - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } static void intel_dp_enable_port(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); /* enable with pattern 1 (as per spec) */ @@ -665,8 +674,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, */ intel_dp->DP |= DP_PORT_EN; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } static void intel_enable_dp(struct intel_atomic_state *state, @@ -674,12 +683,13 @@ static void intel_enable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); + u32 dp_reg = intel_de_read(display, intel_dp->output_reg); intel_wakeref_t wakeref; - if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) + if (drm_WARN_ON(display->drm, dp_reg & DP_PORT_EN)) return; with_intel_pps_lock(intel_dp, wakeref) { @@ -1026,21 +1036,21 @@ static void g4x_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = g4x_signal_levels(train_set); - drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + drm_dbg_kms(display->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); intel_dp->DP |= signal_levels; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } /* SNB CPU eDP voltage swing and pre-emphasis control */ @@ -1074,21 +1084,21 @@ static void snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = snb_cpu_edp_signal_levels(train_set); - drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + drm_dbg_kms(display->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; intel_dp->DP |= signal_levels; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } /* IVB CPU eDP voltage swing and pre-emphasis control */ @@ -1126,21 +1136,21 @@ static void ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = ivb_cpu_edp_signal_levels(train_set); - drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + drm_dbg_kms(display->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; intel_dp->DP |= signal_levels; - intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); } /* @@ -1185,15 +1195,15 @@ intel_dp_hotplug(struct intel_encoder *encoder, static bool ibx_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; + struct intel_display *display = to_intel_display(encoder); + u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin]; - return intel_de_read(dev_priv, SDEISR) & bit; + return intel_de_read(display, SDEISR) & bit; } static bool g4x_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 bit; switch (encoder->hpd_pin) { @@ -1211,15 +1221,15 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder) return false; } - return intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)) & bit; + return intel_de_read(display, PORT_HOTPLUG_STAT(display)) & bit; } static bool ilk_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; + struct intel_display *display = to_intel_display(encoder); + u32 bit = display->hotplug.hpd[encoder->hpd_pin]; - return intel_de_read(dev_priv, DEISR) & bit; + return intel_de_read(display, DEISR) & bit; } static void g4x_dp_suspend_complete(struct intel_encoder *encoder) @@ -1241,7 +1251,8 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) enum pipe vlv_active_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum pipe pipe; @@ -1254,10 +1265,11 @@ enum pipe vlv_active_pipe(struct intel_dp *intel_dp) static void intel_dp_encoder_reset(struct drm_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder->dev); struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); - intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); + intel_dp->DP = intel_de_read(display, intel_dp->output_reg); intel_dp->reset_link_params = true; @@ -1279,6 +1291,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { bool g4x_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { + struct intel_display *display = &dev_priv->display; const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; @@ -1288,11 +1301,11 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, if (!assert_port_valid(dev_priv, port)) return false; - devdata = intel_bios_encoder_data_lookup(dev_priv, port); + devdata = intel_bios_encoder_data_lookup(display, port); /* FIXME bail? */ if (!devdata) - drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n", + drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n", port_name(port)); dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); @@ -1312,7 +1325,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, mutex_init(&dig_port->hdcp_mutex); - if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, + if (drm_encoder_init(display->drm, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) goto err_encoder_init; @@ -1396,7 +1409,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->hpd_pulse = intel_dp_hpd_pulse; - if (HAS_GMCH(dev_priv)) { + if (HAS_GMCH(display)) { dig_port->connected = g4x_digital_port_connected; } else { if (port == PORT_A) diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 8096492b3faded..46f23bdb4c176e 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -686,6 +686,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { + struct intel_display *display = &dev_priv->display; const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; @@ -697,7 +698,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, if (!assert_hdmi_port_valid(dev_priv, port)) return; - devdata = intel_bios_encoder_data_lookup(dev_priv, port); + devdata = intel_bios_encoder_data_lookup(display, port); /* FIXME bail? */ if (!devdata) diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 2b7c3d270b17cd..15cda57fbc912f 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -4028,7 +4028,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv) dev_priv->display.funcs.wm = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { if (!pnv_get_cxsr_latency(dev_priv)) { - drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n"); + drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n"); /* Disable CxSR and never update its watermark again */ intel_set_memory_cxsr(dev_priv, false); dev_priv->display.funcs.wm = &nop_funcs; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index ae8f6617aa70ca..293efc1f841dff 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -27,6 +27,7 @@ #include #include +#include #include #include "i915_reg.h" @@ -330,7 +331,7 @@ static int afe_clk(struct intel_encoder *encoder, int bpp; if (crtc_state->dsc.compression_enable) - bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); + bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16); else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -863,7 +864,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * compressed and non-compressed bpp. */ if (crtc_state->dsc.compression_enable) { - mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); + mul = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16); div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); } @@ -887,7 +888,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, int bpp, line_time_us, byte_clk_period_ns; if (crtc_state->dsc.compression_enable) - bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); + bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16); else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1470,7 +1471,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; if (pipe_config->dsc.compressed_bpp_x16) { - int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); + int div = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16); int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); adjusted_mode->crtc_htotal = @@ -1944,6 +1945,7 @@ static void icl_dsi_add_properties(struct intel_connector *connector) void icl_dsi_init(struct drm_i915_private *dev_priv, const struct intel_bios_encoder_data *devdata) { + struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *intel_connector; @@ -2007,7 +2009,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, intel_dsi->panel_power_off_time = ktime_get_boottime(); - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL); + intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL); mutex_lock(&dev_priv->drm.mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 0aa3999374e255..c3b29a331d725b 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -183,9 +183,9 @@ void intel_unregister_dsm_handler(void) { } -void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) +void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); acpi_handle dhandle; union acpi_object *obj; @@ -263,15 +263,14 @@ static u32 acpi_display_type(struct intel_connector *connector) return display_type; } -void intel_acpi_device_id_update(struct drm_i915_private *dev_priv) +void intel_acpi_device_id_update(struct intel_display *display) { - struct drm_device *drm_dev = &dev_priv->drm; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; u8 display_index[16] = {}; /* Populate the ACPI IDs for all connectors for a given drm_device */ - drm_connector_list_iter_begin(drm_dev, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { u32 device_id, type; @@ -288,10 +287,10 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv) } /* NOTE: The connector order must be final before this is called. */ -void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) +void intel_acpi_assign_connector_fwnodes(struct intel_display *display) { + struct drm_device *drm_dev = display->drm; struct drm_connector_list_iter conn_iter; - struct drm_device *drm_dev = &i915->drm; struct fwnode_handle *fwnode = NULL; struct drm_connector *connector; struct acpi_device *adev; @@ -333,7 +332,7 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) fwnode_handle_put(fwnode); } -void intel_acpi_video_register(struct drm_i915_private *i915) +void intel_acpi_video_register(struct intel_display *display) { struct drm_connector_list_iter conn_iter; struct drm_connector *connector; @@ -347,7 +346,7 @@ void intel_acpi_video_register(struct drm_i915_private *i915) * a native backlight later and acpi_video_register_backlight() should * only be called after any native backlights have been registered. */ - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_panel *panel = &to_intel_connector(connector)->panel; diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h index 6a0007452f95cd..788a63071661a7 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.h +++ b/drivers/gpu/drm/i915/display/intel_acpi.h @@ -6,26 +6,26 @@ #ifndef __INTEL_ACPI_H__ #define __INTEL_ACPI_H__ -struct drm_i915_private; +struct intel_display; #ifdef CONFIG_ACPI void intel_register_dsm_handler(void); void intel_unregister_dsm_handler(void); -void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915); -void intel_acpi_device_id_update(struct drm_i915_private *i915); -void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915); -void intel_acpi_video_register(struct drm_i915_private *i915); +void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display); +void intel_acpi_device_id_update(struct intel_display *display); +void intel_acpi_assign_connector_fwnodes(struct intel_display *display); +void intel_acpi_video_register(struct intel_display *display); #else static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } static inline -void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; } +void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display) { return; } static inline -void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; } +void intel_acpi_device_id_update(struct intel_display *display) { return; } static inline -void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; } +void intel_acpi_assign_connector_fwnodes(struct intel_display *display) { return; } static inline -void intel_acpi_video_register(struct drm_i915_private *i915) { return; } +void intel_acpi_video_register(struct intel_display *display) { return; } #endif /* CONFIG_ACPI */ #endif /* __INTEL_ACPI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index 10689480338eb2..186cf4833f716b 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -139,7 +139,7 @@ static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int aux_less_wake_time, aux_less_wake_lines, silence_period, lfps_half_cycle; @@ -158,7 +158,7 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK) return false; - if (i915->display.params.psr_safest_params) + if (display->params.psr_safest_params) aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK; intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines; @@ -171,10 +171,10 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int check_entry_lines; - if (DISPLAY_VER(i915) < 20) + if (DISPLAY_VER(display) < 20) return true; /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */ @@ -187,7 +187,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state)) return false; - if (i915->display.params.psr_safest_params) + if (display->params.psr_safest_params) check_entry_lines = 15; intel_dp->alpm_parameters.check_entry_lines = check_entry_lines; @@ -212,9 +212,9 @@ static int tgl_io_buffer_wake_time(void) static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (DISPLAY_VER(i915) >= 12) + if (DISPLAY_VER(display) >= 12) return tgl_io_buffer_wake_time(); else return skl_io_buffer_wake_time(); @@ -223,7 +223,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state) bool intel_alpm_compute_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; int tfw_exit_latency = 20; /* eDP spec */ int phy_wake = 4; /* eDP spec */ @@ -236,9 +236,9 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp, fast_wake_time = precharge + preamble + phy_wake + tfw_exit_latency; - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) max_wake_lines = 68; - else if (DISPLAY_VER(i915) >= 12) + else if (DISPLAY_VER(display) >= 12) max_wake_lines = 12; else max_wake_lines = 8; @@ -255,7 +255,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp, if (!_lnl_compute_alpm_params(intel_dp, crtc_state)) return false; - if (i915->display.params.psr_safest_params) + if (display->params.psr_safest_params) io_wake_lines = fast_wake_lines = max_wake_lines; /* According to Bspec lower limit should be set as 7 lines. */ @@ -269,7 +269,7 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int waketime_in_lines, first_sdp_position; int context_latency, guardband; @@ -277,10 +277,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, if (!intel_dp_is_edp(intel_dp)) return; - if (DISPLAY_VER(i915) < 20) + if (DISPLAY_VER(display) < 20) return; - if (!intel_dp_as_sdp_supported(intel_dp)) + if (!intel_dp->as_sdp_supported) return; if (crtc_state->has_psr) @@ -309,13 +309,13 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp, static void lnl_alpm_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = dp_to_dig_port(intel_dp)->base.port; u32 alpm_ctl; - if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.sel_update_enabled && - !intel_dp_is_edp(intel_dp))) + if (DISPLAY_VER(display) < 20 || + (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp))) return; /* @@ -329,16 +329,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS | ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines); - intel_de_write(dev_priv, - PORT_ALPM_CTL(dev_priv, port), + intel_de_write(display, + PORT_ALPM_CTL(display, port), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE | PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) | PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | PORT_ALPM_CTL_SILENCE_PERIOD( intel_dp->alpm_parameters.silence_period_sym_clocks)); - intel_de_write(dev_priv, - PORT_ALPM_LFPS_CTL(dev_priv, port), + intel_de_write(display, + PORT_ALPM_LFPS_CTL(display, port), PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) | PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | @@ -356,7 +356,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines); - intel_de_write(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder), alpm_ctl); + intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl); } void intel_alpm_configure(struct intel_dp *intel_dp, @@ -368,14 +368,14 @@ void intel_alpm_configure(struct intel_dp *intel_dp, static int i915_edp_lobf_info_show(struct seq_file *m, void *data) { struct intel_connector *connector = m->private; - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_crtc *crtc; struct intel_crtc_state *crtc_state; enum transcoder cpu_transcoder; u32 alpm_ctl; int ret; - ret = drm_modeset_lock_single_interruptible(&dev_priv->drm.mode_config.connection_mutex); + ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (ret) return ret; @@ -387,14 +387,14 @@ static int i915_edp_lobf_info_show(struct seq_file *m, void *data) crtc_state = to_intel_crtc_state(crtc->state); cpu_transcoder = crtc_state->cpu_transcoder; - alpm_ctl = intel_de_read(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder)); + alpm_ctl = intel_de_read(display, ALPM_CTL(display, cpu_transcoder)); seq_printf(m, "LOBF status: %s\n", str_enabled_disabled(alpm_ctl & ALPM_CTL_LOBF_ENABLE)); seq_printf(m, "Aux-wake alpm status: %s\n", str_enabled_disabled(!(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE))); seq_printf(m, "Aux-less alpm status: %s\n", str_enabled_disabled(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE)); out: - drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return ret; } @@ -403,10 +403,10 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info); void intel_alpm_lobf_debugfs_add(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct dentry *root = connector->base.debugfs_entry; - if (DISPLAY_VER(i915) < 20 || + if (DISPLAY_VER(display) < 20 || connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) return; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 76aa10b6f64783..12d6ed94075142 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -276,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->do_async_flip = false; crtc_state->fb_bits = 0; crtc_state->update_planes = 0; - crtc_state->dsb = NULL; + crtc_state->dsb_color_vblank = NULL; + crtc_state->dsb_color_commit = NULL; return &crtc_state->uapi; } @@ -310,7 +311,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, { struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); - drm_WARN_ON(crtc->dev, crtc_state->dsb); + drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank); + drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit); __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index b9bafec06fb8b5..f5e7eefab2f160 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -26,6 +26,7 @@ #include #include +#include #include #include "i915_drv.h" @@ -452,8 +453,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, lanes = crtc_state->lane_count; drm_dbg_kms(&i915->drm, - "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n", - h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk); + "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n", + h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk); if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk)) return 0; @@ -979,7 +980,8 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915, static unsigned long i915_audio_component_get_power(struct device *kdev) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t ret; /* Catch potential impedance mismatches before they occur! */ @@ -1011,7 +1013,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) static void i915_audio_component_put_power(struct device *kdev, unsigned long cookie) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--i915->display.audio.power_refcount == 0) @@ -1024,7 +1027,8 @@ static void i915_audio_component_put_power(struct device *kdev, static void i915_audio_component_codec_wake_override(struct device *kdev, bool enable) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); unsigned long cookie; if (DISPLAY_VER(i915) < 9) @@ -1052,7 +1056,8 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, /* Get CDCLK in kHz */ static int i915_audio_component_get_cdclk_freq(struct device *kdev) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915))) return -ENODEV; @@ -1111,7 +1116,8 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int cpu_transcoder, int rate) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); struct i915_audio_component *acomp = i915->display.audio.component; const struct intel_audio_state *audio_state; struct intel_encoder *encoder; @@ -1153,7 +1159,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, int cpu_transcoder, bool *enabled, unsigned char *buf, int max_bytes) { - struct drm_i915_private *i915 = kdev_to_i915(kdev); + struct intel_display *display = to_intel_display(kdev); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_audio_state *audio_state; int ret = 0; @@ -1188,24 +1195,25 @@ static const struct drm_audio_component_ops i915_audio_component_ops = { .get_eld = i915_audio_component_get_eld, }; -static int i915_audio_component_bind(struct device *i915_kdev, +static int i915_audio_component_bind(struct device *drv_kdev, struct device *hda_kdev, void *data) { + struct intel_display *display = to_intel_display(drv_kdev); + struct drm_i915_private *i915 = to_i915(display->drm); struct i915_audio_component *acomp = data; - struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); int i; if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev)) return -EEXIST; if (drm_WARN_ON(&i915->drm, - !device_link_add(hda_kdev, i915_kdev, + !device_link_add(hda_kdev, drv_kdev, DL_FLAG_STATELESS))) return -ENOMEM; drm_modeset_lock_all(&i915->drm); acomp->base.ops = &i915_audio_component_ops; - acomp->base.dev = i915_kdev; + acomp->base.dev = drv_kdev; BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; @@ -1215,11 +1223,12 @@ static int i915_audio_component_bind(struct device *i915_kdev, return 0; } -static void i915_audio_component_unbind(struct device *i915_kdev, +static void i915_audio_component_unbind(struct device *drv_kdev, struct device *hda_kdev, void *data) { + struct intel_display *display = to_intel_display(drv_kdev); + struct drm_i915_private *i915 = to_i915(display->drm); struct i915_audio_component *acomp = data; - struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); drm_modeset_lock_all(&i915->drm); acomp->base.ops = NULL; @@ -1227,7 +1236,7 @@ static void i915_audio_component_unbind(struct device *i915_kdev, i915->display.audio.component = NULL; drm_modeset_unlock_all(&i915->drm); - device_link_remove(hda_kdev, i915_kdev); + device_link_remove(hda_kdev, drv_kdev); if (i915->display.audio.power_refcount) drm_err(&i915->drm, "audio power refcount %d after unbind\n", diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 6c3333136737e7..9e05745d797d19 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -455,7 +455,7 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state) mutex_lock(&i915->display.backlight.lock); if (panel->backlight.device) - panel->backlight.device->props.power = FB_BLANK_POWERDOWN; + panel->backlight.device->props.power = BACKLIGHT_POWER_OFF; panel->backlight.enabled = false; panel->backlight.funcs->disable(old_conn_state, 0); @@ -773,7 +773,7 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state, panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); panel->backlight.enabled = true; if (panel->backlight.device) - panel->backlight.device->props.power = FB_BLANK_UNBLANK; + panel->backlight.device->props.power = BACKLIGHT_POWER_ON; } void intel_backlight_enable(const struct intel_crtc_state *crtc_state, @@ -870,12 +870,12 @@ static int intel_backlight_device_update_status(struct backlight_device *bd) */ if (panel->backlight.enabled) { if (panel->backlight.power) { - bool enable = bd->props.power == FB_BLANK_UNBLANK && + bool enable = bd->props.power == BACKLIGHT_POWER_ON && bd->props.brightness != 0; panel->backlight.power(connector, enable); } } else { - bd->props.power = FB_BLANK_POWERDOWN; + bd->props.power = BACKLIGHT_POWER_OFF; } drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); @@ -945,9 +945,9 @@ int intel_backlight_device_register(struct intel_connector *connector) props.max_brightness); if (panel->backlight.enabled) - props.power = FB_BLANK_UNBLANK; + props.power = BACKLIGHT_POWER_ON; else - props.power = FB_BLANK_POWERDOWN; + props.power = BACKLIGHT_POWER_OFF; name = kstrdup_const("intel_backlight", GFP_KERNEL); if (!name) @@ -1011,7 +1011,7 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq), + return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq), pwm_freq_hz); } @@ -1073,7 +1073,7 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq), + return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq), pwm_freq_hz * 128); } @@ -1091,7 +1091,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_PINEVIEW(i915)) - clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); + clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq); else clock = KHz(i915->display.cdclk.hw.cdclk); @@ -1109,7 +1109,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_G4X(i915)) - clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); + clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq); else clock = KHz(i915->display.cdclk.hw.cdclk); @@ -1133,7 +1133,7 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) clock = MHz(25); mul = 16; } else { - clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); + clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq); mul = 128; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index ec1e3a38036013..bed485374ab0b5 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "i915_drv.h" #include "i915_reg.h" @@ -65,15 +66,15 @@ /* Wrapper for VBT child device config */ struct intel_bios_encoder_data { - struct drm_i915_private *i915; + struct intel_display *display; struct child_device_config child; struct dsc_compression_parameters_entry *dsc; struct list_head node; }; -#define SLAVE_ADDR1 0x70 -#define SLAVE_ADDR2 0x72 +#define TARGET_ADDR1 0x70 +#define TARGET_ADDR2 0x72 /* Get BDB block size given a pointer to Block ID. */ static u32 _get_blocksize(const u8 *block_base) @@ -144,12 +145,12 @@ struct bdb_block_entry { }; static const void * -bdb_find_section(struct drm_i915_private *i915, +bdb_find_section(struct intel_display *display, enum bdb_block_id section_id) { struct bdb_block_entry *entry; - list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) { + list_for_each_entry(entry, &display->vbt.bdb_blocks, node) { if (entry->section_id == section_id) return entry->data + 3; } @@ -199,12 +200,12 @@ static const struct { .min_size = sizeof(struct bdb_generic_dtd), }, }; -static size_t lfp_data_min_size(struct drm_i915_private *i915) +static size_t lfp_data_min_size(struct intel_display *display) { const struct bdb_lfp_data_ptrs *ptrs; size_t size; - ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS); + ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS); if (!ptrs) return 0; @@ -359,7 +360,7 @@ static void next_lfp_data_ptr(struct lfp_data_ptr_table *next, next->offset = prev->offset + size; } -static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, +static void *generate_lfp_data_ptrs(struct intel_display *display, const void *bdb) { int i, size, table_size, block_size, offset, fp_timing_size; @@ -373,7 +374,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, * include block 41 and thus we don't need to * generate one. */ - if (i915->display.vbt.version < 155) + if (display->vbt.version < 155) return NULL; fp_timing_size = 38; @@ -382,7 +383,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, if (!block) return NULL; - drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n"); + drm_dbg_kms(display->drm, "Generating LFP data table pointers\n"); block_size = get_blocksize(block); @@ -450,7 +451,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, } static void -init_bdb_block(struct drm_i915_private *i915, +init_bdb_block(struct intel_display *display, const void *bdb, enum bdb_block_id section_id, size_t min_size) { @@ -463,14 +464,14 @@ init_bdb_block(struct drm_i915_private *i915, /* Modern VBTs lack the LFP data table pointers block, make one up */ if (!block && section_id == BDB_LFP_DATA_PTRS) { - temp_block = generate_lfp_data_ptrs(i915, bdb); + temp_block = generate_lfp_data_ptrs(display, bdb); if (temp_block) block = temp_block + 3; } if (!block) return; - drm_WARN(&i915->drm, min_size == 0, + drm_WARN(display->drm, min_size == 0, "Block %d min_size is zero\n", section_id); block_size = get_blocksize(block); @@ -494,20 +495,22 @@ init_bdb_block(struct drm_i915_private *i915, kfree(temp_block); - drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n", + drm_dbg_kms(display->drm, + "Found BDB block %d (size %zu, min size %zu)\n", section_id, block_size, min_size); if (section_id == BDB_LFP_DATA_PTRS && !fixup_lfp_data_ptrs(bdb, entry->data + 3)) { - drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n"); + drm_err(display->drm, + "VBT has malformed LFP data table pointers\n"); kfree(entry); return; } - list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks); + list_add_tail(&entry->node, &display->vbt.bdb_blocks); } -static void init_bdb_blocks(struct drm_i915_private *i915, +static void init_bdb_blocks(struct intel_display *display, const void *bdb) { int i; @@ -517,14 +520,14 @@ static void init_bdb_blocks(struct drm_i915_private *i915, size_t min_size = bdb_blocks[i].min_size; if (section_id == BDB_LFP_DATA) - min_size = lfp_data_min_size(i915); + min_size = lfp_data_min_size(display); - init_bdb_block(i915, bdb, section_id, min_size); + init_bdb_block(display, bdb, section_id, min_size); } } static void -fill_detail_timing_data(struct drm_i915_private *i915, +fill_detail_timing_data(struct intel_display *display, struct drm_display_mode *panel_fixed_mode, const struct bdb_edid_dtd *dvo_timing) { @@ -567,12 +570,12 @@ fill_detail_timing_data(struct drm_i915_private *i915, /* Some VBTs have bogus h/vsync_end values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) { - drm_dbg_kms(&i915->drm, "reducing hsync_end %d->%d\n", + drm_dbg_kms(display->drm, "reducing hsync_end %d->%d\n", panel_fixed_mode->hsync_end, panel_fixed_mode->htotal); panel_fixed_mode->hsync_end = panel_fixed_mode->htotal; } if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) { - drm_dbg_kms(&i915->drm, "reducing vsync_end %d->%d\n", + drm_dbg_kms(display->drm, "reducing vsync_end %d->%d\n", panel_fixed_mode->vsync_end, panel_fixed_mode->vtotal); panel_fixed_mode->vsync_end = panel_fixed_mode->vtotal; } @@ -617,26 +620,26 @@ get_lfp_data_tail(const struct bdb_lfp_data *data, return NULL; } -static int opregion_get_panel_type(struct drm_i915_private *i915, +static int opregion_get_panel_type(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { - return intel_opregion_get_panel_type(i915); + return intel_opregion_get_panel_type(display); } -static int vbt_get_panel_type(struct drm_i915_private *i915, +static int vbt_get_panel_type(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { const struct bdb_lfp_options *lfp_options; - lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS); + lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS); if (!lfp_options) return -1; if (lfp_options->panel_type > 0xf && lfp_options->panel_type != 0xff) { - drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n", + drm_dbg_kms(display->drm, "Invalid VBT panel type 0x%x\n", lfp_options->panel_type); return -1; } @@ -644,12 +647,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915, if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2) return lfp_options->panel_type2; - drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1); + drm_WARN_ON(display->drm, + devdata && devdata->child.handle != DEVICE_HANDLE_LFP1); return lfp_options->panel_type; } -static int pnpid_get_panel_type(struct drm_i915_private *i915, +static int pnpid_get_panel_type(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { @@ -668,14 +672,14 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, product_id_nodate.week_of_manufacture = 0; product_id_nodate.year_of_manufacture = 0; - p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID"); + p = drm_dbg_printer(display->drm, DRM_UT_KMS, "EDID"); drm_edid_print_product_id(&p, &product_id, true); - ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS); + ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS); if (!ptrs) return -1; - data = bdb_find_section(i915, BDB_LFP_DATA); + data = bdb_find_section(display, BDB_LFP_DATA); if (!data) return -1; @@ -699,7 +703,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, return best; } -static int fallback_get_panel_type(struct drm_i915_private *i915, +static int fallback_get_panel_type(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { @@ -713,13 +717,13 @@ enum panel_type { PANEL_TYPE_FALLBACK, }; -static int get_panel_type(struct drm_i915_private *i915, +static int get_panel_type(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { struct { const char *name; - int (*get_panel_type)(struct drm_i915_private *i915, + int (*get_panel_type)(struct intel_display *display, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback); int panel_type; @@ -744,14 +748,14 @@ static int get_panel_type(struct drm_i915_private *i915, int i; for (i = 0; i < ARRAY_SIZE(panel_types); i++) { - panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, + panel_types[i].panel_type = panel_types[i].get_panel_type(display, devdata, drm_edid, use_fallback); - drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf && + drm_WARN_ON(display->drm, panel_types[i].panel_type > 0xf && panel_types[i].panel_type != 0xff); if (panel_types[i].panel_type >= 0) - drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n", + drm_dbg_kms(display->drm, "Panel type (%s): %d\n", panel_types[i].name, panel_types[i].panel_type); } @@ -766,7 +770,7 @@ static int get_panel_type(struct drm_i915_private *i915, else i = PANEL_TYPE_FALLBACK; - drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n", + drm_dbg_kms(display->drm, "Selected panel type (%s): %d\n", panel_types[i].name, panel_types[i].panel_type); return panel_types[i].panel_type; @@ -784,14 +788,14 @@ static bool panel_bool(unsigned int value, int panel_type) /* Parse general panel options */ static void -parse_panel_options(struct drm_i915_private *i915, +parse_panel_options(struct intel_display *display, struct intel_panel *panel) { const struct bdb_lfp_options *lfp_options; int panel_type = panel->vbt.panel_type; int drrs_mode; - lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS); + lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS); if (!lfp_options) return; @@ -815,23 +819,23 @@ parse_panel_options(struct drm_i915_private *i915, switch (drrs_mode) { case 0: panel->vbt.drrs_type = DRRS_TYPE_STATIC; - drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n"); + drm_dbg_kms(display->drm, "DRRS supported mode is static\n"); break; case 2: panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "DRRS supported mode is seamless\n"); break; default: panel->vbt.drrs_type = DRRS_TYPE_NONE; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "DRRS not supported (VBT input)\n"); break; } } static void -parse_lfp_panel_dtd(struct drm_i915_private *i915, +parse_lfp_panel_dtd(struct intel_display *display, struct intel_panel *panel, const struct bdb_lfp_data *lfp_data, const struct bdb_lfp_data_ptrs *lfp_data_ptrs) @@ -849,11 +853,11 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915, if (!panel_fixed_mode) return; - fill_detail_timing_data(i915, panel_fixed_mode, panel_dvo_timing); + fill_detail_timing_data(display, panel_fixed_mode, panel_dvo_timing); panel->vbt.lfp_vbt_mode = panel_fixed_mode; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); @@ -865,14 +869,14 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915, if (fp_timing->x_res == panel_fixed_mode->hdisplay && fp_timing->y_res == panel_fixed_mode->vdisplay) { panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT initial LVDS value %x\n", panel->vbt.bios_lvds_val); } } static void -parse_lfp_data(struct drm_i915_private *i915, +parse_lfp_data(struct intel_display *display, struct intel_panel *panel) { const struct bdb_lfp_data *data; @@ -882,41 +886,41 @@ parse_lfp_data(struct drm_i915_private *i915, struct drm_printer p; int panel_type = panel->vbt.panel_type; - ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS); + ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS); if (!ptrs) return; - data = bdb_find_section(i915, BDB_LFP_DATA); + data = bdb_find_section(display, BDB_LFP_DATA); if (!data) return; if (!panel->vbt.lfp_vbt_mode) - parse_lfp_panel_dtd(i915, panel, data, ptrs); + parse_lfp_panel_dtd(display, panel, data, ptrs); pnp_id = get_lfp_pnp_id(data, ptrs, panel_type); - p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel"); + p = drm_dbg_printer(display->drm, DRM_UT_KMS, "Panel"); drm_edid_print_product_id(&p, pnp_id, false); tail = get_lfp_data_tail(data, ptrs); if (!tail) return; - drm_dbg_kms(&i915->drm, "Panel name: %.*s\n", + drm_dbg_kms(display->drm, "Panel name: %.*s\n", (int)sizeof(tail->panel_name[0].name), tail->panel_name[panel_type].name); - if (i915->display.vbt.version >= 188) { + if (display->vbt.version >= 188) { panel->vbt.seamless_drrs_min_refresh_rate = tail->seamless_drrs_min_refresh_rate[panel_type]; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Seamless DRRS min refresh rate: %d Hz\n", panel->vbt.seamless_drrs_min_refresh_rate); } } static void -parse_generic_dtd(struct drm_i915_private *i915, +parse_generic_dtd(struct intel_display *display, struct intel_panel *panel) { const struct bdb_generic_dtd *generic_dtd; @@ -932,20 +936,20 @@ parse_generic_dtd(struct drm_i915_private *i915, * first on VBT >= 229, but still fall back to trying the old LFP * block if that fails. */ - if (i915->display.vbt.version < 229) + if (display->vbt.version < 229) return; - generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD); + generic_dtd = bdb_find_section(display, BDB_GENERIC_DTD); if (!generic_dtd) return; if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) { - drm_err(&i915->drm, "GDTD size %u is too small.\n", + drm_err(display->drm, "GDTD size %u is too small.\n", generic_dtd->gdtd_size); return; } else if (generic_dtd->gdtd_size != sizeof(struct generic_dtd_entry)) { - drm_err(&i915->drm, "Unexpected GDTD size %u\n", + drm_err(display->drm, "Unexpected GDTD size %u\n", generic_dtd->gdtd_size); /* DTD has unknown fields, but keep going */ } @@ -953,7 +957,7 @@ parse_generic_dtd(struct drm_i915_private *i915, num_dtd = (get_blocksize(generic_dtd) - sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size; if (panel->vbt.panel_type >= num_dtd) { - drm_err(&i915->drm, + drm_err(display->drm, "Panel type %d not found in table of %d DTD's\n", panel->vbt.panel_type, num_dtd); return; @@ -998,7 +1002,7 @@ parse_generic_dtd(struct drm_i915_private *i915, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); @@ -1006,7 +1010,7 @@ parse_generic_dtd(struct drm_i915_private *i915, } static void -parse_lfp_backlight(struct drm_i915_private *i915, +parse_lfp_backlight(struct intel_display *display, struct intel_panel *panel) { const struct bdb_lfp_backlight *backlight_data; @@ -1014,12 +1018,12 @@ parse_lfp_backlight(struct drm_i915_private *i915, int panel_type = panel->vbt.panel_type; u16 level; - backlight_data = bdb_find_section(i915, BDB_LFP_BACKLIGHT); + backlight_data = bdb_find_section(display, BDB_LFP_BACKLIGHT); if (!backlight_data) return; if (backlight_data->entry_size != sizeof(backlight_data->data[0])) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unsupported backlight data entry size %u\n", backlight_data->entry_size); return; @@ -1029,7 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; if (!panel->vbt.backlight.present) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PWM backlight not present in VBT (type %u)\n", entry->type); return; @@ -1037,7 +1041,7 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; panel->vbt.backlight.controller = 0; - if (i915->display.vbt.version >= 191) { + if (display->vbt.version >= 191) { const struct lfp_backlight_control_method *method; method = &backlight_data->backlight_control[panel_type]; @@ -1048,14 +1052,14 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; panel->vbt.backlight.active_low_pwm = entry->active_low_pwm; - if (i915->display.vbt.version >= 234) { + if (display->vbt.version >= 234) { u16 min_level; bool scale; level = backlight_data->brightness_level[panel_type].level; min_level = backlight_data->brightness_min_level[panel_type].level; - if (i915->display.vbt.version >= 236) + if (display->vbt.version >= 236) scale = backlight_data->brightness_precision_bits[panel_type] == 16; else scale = level > 255; @@ -1064,7 +1068,7 @@ parse_lfp_backlight(struct drm_i915_private *i915, min_level = min_level / 255; if (min_level > 255) { - drm_warn(&i915->drm, "Brightness min level > 255\n"); + drm_warn(display->drm, "Brightness min level > 255\n"); level = 255; } panel->vbt.backlight.min_brightness = min_level; @@ -1076,13 +1080,13 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.min_brightness = entry->min_brightness; } - if (i915->display.vbt.version >= 239) + if (display->vbt.version >= 239) panel->vbt.backlight.hdr_dpcd_refresh_timeout = DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100); else panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u, controller %u\n", panel->vbt.backlight.pwm_freq_hz, @@ -1093,16 +1097,16 @@ parse_lfp_backlight(struct drm_i915_private *i915, } static void -parse_sdvo_lvds_data(struct drm_i915_private *i915, +parse_sdvo_lvds_data(struct intel_display *display, struct intel_panel *panel) { const struct bdb_sdvo_lvds_dtd *dtd; struct drm_display_mode *panel_fixed_mode; int index; - index = i915->display.params.vbt_sdvo_panel_type; + index = display->params.vbt_sdvo_panel_type; if (index == -2) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Ignore SDVO LVDS mode from BIOS VBT tables.\n"); return; } @@ -1110,14 +1114,14 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915, if (index == -1) { const struct bdb_sdvo_lvds_options *sdvo_lvds_options; - sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS); + sdvo_lvds_options = bdb_find_section(display, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } - dtd = bdb_find_section(i915, BDB_SDVO_LVDS_DTD); + dtd = bdb_find_section(display, BDB_SDVO_LVDS_DTD); if (!dtd) return; @@ -1128,7 +1132,8 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915, * it here to be sure. */ if (index >= ARRAY_SIZE(dtd->dtd)) { - drm_err(&i915->drm, "index %d is larger than dtd->dtd[4] array\n", + drm_err(display->drm, + "index %d is larger than dtd->dtd[4] array\n", index); return; } @@ -1137,19 +1142,19 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915, if (!panel_fixed_mode) return; - fill_detail_timing_data(i915, panel_fixed_mode, &dtd->dtd[index]); + fill_detail_timing_data(display, panel_fixed_mode, &dtd->dtd[index]); panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Found SDVO LVDS mode in BIOS VBT tables: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); } -static int intel_bios_ssc_frequency(struct drm_i915_private *i915, +static int intel_bios_ssc_frequency(struct intel_display *display, bool alternate) { - switch (DISPLAY_VER(i915)) { + switch (DISPLAY_VER(display)) { case 2: return alternate ? 66667 : 48000; case 3: @@ -1161,45 +1166,46 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *i915, } static void -parse_general_features(struct drm_i915_private *i915) +parse_general_features(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_features *general; - general = bdb_find_section(i915, BDB_GENERAL_FEATURES); + general = bdb_find_section(display, BDB_GENERAL_FEATURES); if (!general) return; - i915->display.vbt.int_tv_support = general->int_tv_support; + display->vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ - if (i915->display.vbt.version >= 155 && - (HAS_DDI(i915) || IS_VALLEYVIEW(i915))) - i915->display.vbt.int_crt_support = general->int_crt_support; - i915->display.vbt.lvds_use_ssc = general->enable_ssc; - i915->display.vbt.lvds_ssc_freq = - intel_bios_ssc_frequency(i915, general->ssc_freq); - i915->display.vbt.display_clock_mode = general->display_clock_mode; - i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; - if (i915->display.vbt.version >= 181) { - i915->display.vbt.orientation = general->rotate_180 ? + if (display->vbt.version >= 155 && + (HAS_DDI(display) || IS_VALLEYVIEW(i915))) + display->vbt.int_crt_support = general->int_crt_support; + display->vbt.lvds_use_ssc = general->enable_ssc; + display->vbt.lvds_ssc_freq = + intel_bios_ssc_frequency(display, general->ssc_freq); + display->vbt.display_clock_mode = general->display_clock_mode; + display->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + if (display->vbt.version >= 181) { + display->vbt.orientation = general->rotate_180 ? DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : DRM_MODE_PANEL_ORIENTATION_NORMAL; } else { - i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + display->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } - if (i915->display.vbt.version >= 249 && general->afc_startup_config) { - i915->display.vbt.override_afc_startup = true; - i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; + if (display->vbt.version >= 249 && general->afc_startup_config) { + display->vbt.override_afc_startup = true; + display->vbt.override_afc_startup_val = general->afc_startup_config == 1 ? 0 : 7; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", - i915->display.vbt.int_tv_support, - i915->display.vbt.int_crt_support, - i915->display.vbt.lvds_use_ssc, - i915->display.vbt.lvds_ssc_freq, - i915->display.vbt.display_clock_mode, - i915->display.vbt.fdi_rx_polarity_inverted); + display->vbt.int_tv_support, + display->vbt.int_crt_support, + display->vbt.lvds_use_ssc, + display->vbt.lvds_ssc_freq, + display->vbt.display_clock_mode, + display->vbt.fdi_rx_polarity_inverted); } static const struct child_device_config * @@ -1209,7 +1215,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i) } static void -parse_sdvo_device_mapping(struct drm_i915_private *i915) +parse_sdvo_device_mapping(struct intel_display *display) { const struct intel_bios_encoder_data *devdata; int count = 0; @@ -1218,19 +1224,19 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) * Only parse SDVO mappings on gens that could have SDVO. This isn't * accurate and doesn't have to be, as long as it's not too strict. */ - if (!IS_DISPLAY_VER(i915, 3, 7)) { - drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n"); + if (!IS_DISPLAY_VER(display, 3, 7)) { + drm_dbg_kms(display->drm, "Skipping SDVO device mapping\n"); return; } - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; struct sdvo_device_mapping *mapping; - if (child->slave_addr != SLAVE_ADDR1 && - child->slave_addr != SLAVE_ADDR2) { + if (child->target_addr != TARGET_ADDR1 && + child->target_addr != TARGET_ADDR2) { /* - * If the slave address is neither 0x70 nor 0x72, + * If the target address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; @@ -1238,39 +1244,39 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) if (child->dvo_port != DEVICE_PORT_DVOB && child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Incorrect SDVO port. Skip it\n"); continue; } - drm_dbg_kms(&i915->drm, - "the SDVO device with slave addr %2x is found on" + drm_dbg_kms(display->drm, + "the SDVO device with target addr %2x is found on" " %s port\n", - child->slave_addr, + child->target_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); - mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1]; + mapping = &display->vbt.sdvo_mappings[child->dvo_port - 1]; if (!mapping->initialized) { mapping->dvo_port = child->dvo_port; - mapping->slave_addr = child->slave_addr; + mapping->target_addr = child->target_addr; mapping->dvo_wiring = child->dvo_wiring; mapping->ddc_pin = child->ddc_pin; mapping->i2c_pin = child->i2c_pin; mapping->initialized = 1; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", - mapping->dvo_port, mapping->slave_addr, + mapping->dvo_port, mapping->target_addr, mapping->dvo_wiring, mapping->ddc_pin, mapping->i2c_pin); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Maybe one SDVO port is shared by " "two SDVO device.\n"); } - if (child->slave2_addr) { + if (child->target2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ - drm_dbg_kms(&i915->drm, - "there exists the slave2_addr. Maybe this" + drm_dbg_kms(display->drm, + "there exists the target2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; @@ -1278,28 +1284,28 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) if (!count) { /* No SDVO device info is found */ - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "No SDVO device info is found in VBT\n"); } } static void -parse_driver_features(struct drm_i915_private *i915) +parse_driver_features(struct intel_display *display) { const struct bdb_driver_features *driver; - driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); + driver = bdb_find_section(display, BDB_DRIVER_FEATURES); if (!driver) return; - if (DISPLAY_VER(i915) >= 5) { + if (DISPLAY_VER(display) >= 5) { /* * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS * to mean "eDP". The VBT spec doesn't agree with that * interpretation, but real world VBTs seem to. */ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS) - i915->display.vbt.int_lvds_support = 0; + display->vbt.int_lvds_support = 0; } else { /* * FIXME it's not clear which BDB version has the LVDS config @@ -1312,25 +1318,25 @@ parse_driver_features(struct drm_i915_private *i915) * in the wild with the bits correctly populated. Version * 108 (on i85x) does not have the bits correctly populated. */ - if (i915->display.vbt.version >= 134 && + if (display->vbt.version >= 134 && driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS && driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) - i915->display.vbt.int_lvds_support = 0; + display->vbt.int_lvds_support = 0; } } static void -parse_panel_driver_features(struct drm_i915_private *i915, +parse_panel_driver_features(struct intel_display *display, struct intel_panel *panel) { const struct bdb_driver_features *driver; - driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); + driver = bdb_find_section(display, BDB_DRIVER_FEATURES); if (!driver) return; - if (i915->display.vbt.version < 228) { - drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n", + if (display->vbt.version < 228) { + drm_dbg_kms(display->drm, "DRRS State Enabled:%d\n", driver->drrs_enabled); /* * If DRRS is not supported, drrs_type has to be set to 0. @@ -1354,7 +1360,7 @@ parse_panel_driver_features(struct drm_i915_private *i915, } static void -parse_power_conservation_features(struct drm_i915_private *i915, +parse_power_conservation_features(struct intel_display *display, struct intel_panel *panel) { const struct bdb_lfp_power *power; @@ -1362,10 +1368,10 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.vrr = true; /* matches Windows behaviour */ - if (i915->display.vbt.version < 228) + if (display->vbt.version < 228) return; - power = bdb_find_section(i915, BDB_LFP_POWER); + power = bdb_find_section(display, BDB_LFP_POWER); if (!power) return; @@ -1388,16 +1394,16 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.drrs_type = DRRS_TYPE_NONE; } - if (i915->display.vbt.version >= 232) + if (display->vbt.version >= 232) panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type); - if (i915->display.vbt.version >= 233) + if (display->vbt.version >= 233) panel->vbt.vrr = panel_bool(power->vrr_feature_enabled, panel_type); } static void -parse_edp(struct drm_i915_private *i915, +parse_edp(struct intel_display *display, struct intel_panel *panel) { const struct bdb_edp *edp; @@ -1405,7 +1411,7 @@ parse_edp(struct drm_i915_private *i915, const struct edp_fast_link_params *edp_link_params; int panel_type = panel->vbt.panel_type; - edp = bdb_find_section(i915, BDB_EDP); + edp = bdb_find_section(display, BDB_EDP); if (!edp) return; @@ -1427,7 +1433,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.pps = *edp_pps; - if (i915->display.vbt.version >= 224) { + if (display->vbt.version >= 224) { panel->vbt.edp.rate = edp->edp_fast_link_training_rate[panel_type] * 20; } else { @@ -1442,7 +1448,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.rate = 540000; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT has unknown eDP link rate value %u\n", edp_link_params->rate); break; @@ -1460,7 +1466,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.lanes = 4; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT has unknown eDP lane count value %u\n", edp_link_params->lanes); break; @@ -1480,7 +1486,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT has unknown eDP pre-emphasis value %u\n", edp_link_params->preemphasis); break; @@ -1500,19 +1506,19 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT has unknown eDP voltage swing value %u\n", edp_link_params->vswing); break; } - if (i915->display.vbt.version >= 173) { + if (display->vbt.version >= 173) { u8 vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915->display.params.edp_vswing) { + if (display->params.edp_vswing) { panel->vbt.edp.low_vswing = - i915->display.params.edp_vswing == 1; + display->params.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; panel->vbt.edp.low_vswing = vswing == 0; @@ -1522,26 +1528,27 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.drrs_msa_timing_delay = panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2); - if (i915->display.vbt.version >= 244) + if (display->vbt.version >= 244) panel->vbt.edp.max_link_rate = edp->edp_max_port_link_rate[panel_type] * 20; - if (i915->display.vbt.version >= 251) + if (display->vbt.version >= 251) panel->vbt.edp.dsc_disable = panel_bool(edp->edp_dsc_disable, panel_type); } static void -parse_psr(struct drm_i915_private *i915, +parse_psr(struct intel_display *display, struct intel_panel *panel) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_psr *psr; const struct psr_table *psr_table; int panel_type = panel->vbt.panel_type; - psr = bdb_find_section(i915, BDB_PSR); + psr = bdb_find_section(display, BDB_PSR); if (!psr) { - drm_dbg_kms(&i915->drm, "No PSR BDB found.\n"); + drm_dbg_kms(display->drm, "No PSR BDB found.\n"); return; } @@ -1558,8 +1565,8 @@ parse_psr(struct drm_i915_private *i915, * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. */ - if (i915->display.vbt.version >= 205 && - (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) { + if (display->vbt.version >= 205 && + (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) { switch (psr_table->tp1_wakeup_time) { case 0: panel->vbt.psr.tp1_wakeup_time_us = 500; @@ -1571,7 +1578,7 @@ parse_psr(struct drm_i915_private *i915, panel->vbt.psr.tp1_wakeup_time_us = 0; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp1_wakeup_time); fallthrough; @@ -1591,7 +1598,7 @@ parse_psr(struct drm_i915_private *i915, panel->vbt.psr.tp2_tp3_wakeup_time_us = 0; break; default: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp2_tp3_wakeup_time); fallthrough; @@ -1604,7 +1611,7 @@ parse_psr(struct drm_i915_private *i915, panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } - if (i915->display.vbt.version >= 226) { + if (display->vbt.version >= 226) { u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = panel_bits(wakeup_time, panel_type, 2); @@ -1630,13 +1637,13 @@ parse_psr(struct drm_i915_private *i915, } } -static void parse_dsi_backlight_ports(struct drm_i915_private *i915, +static void parse_dsi_backlight_ports(struct intel_display *display, struct intel_panel *panel, enum port port) { - enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C; + enum port port_bc = DISPLAY_VER(display) >= 11 ? PORT_B : PORT_C; - if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) { + if (!panel->vbt.dsi.config->dual_link || display->vbt.version < 197) { panel->vbt.dsi.bl_ports = BIT(port); if (panel->vbt.dsi.config->cabc_supported) panel->vbt.dsi.cabc_ports = BIT(port); @@ -1676,7 +1683,7 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, } static void -parse_mipi_config(struct drm_i915_private *i915, +parse_mipi_config(struct intel_display *display, struct intel_panel *panel) { const struct bdb_mipi_config *start; @@ -1686,27 +1693,19 @@ parse_mipi_config(struct drm_i915_private *i915, enum port port; /* parse MIPI blocks only if LFP type is MIPI */ - if (!intel_bios_is_dsi_present(i915, &port)) + if (!intel_bios_is_dsi_present(display, &port)) return; /* Initialize this to undefined indicating no generic MIPI support */ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID; - /* Block #40 is already parsed and panel_fixed_mode is - * stored in i915->lfp_vbt_mode - * resuse this when needed - */ - - /* Parse #52 for panel index used from panel_type already - * parsed - */ - start = bdb_find_section(i915, BDB_MIPI_CONFIG); + start = bdb_find_section(display, BDB_MIPI_CONFIG); if (!start) { - drm_dbg_kms(&i915->drm, "No MIPI config BDB found"); + drm_dbg_kms(display->drm, "No MIPI config BDB found"); return; } - drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n", + drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n", panel_type); /* @@ -1727,7 +1726,7 @@ parse_mipi_config(struct drm_i915_private *i915, return; } - parse_dsi_backlight_ports(i915, panel, port); + parse_dsi_backlight_ports(display, panel, port); /* FIXME is the 90 vs. 270 correct? */ switch (config->rotation) { @@ -1759,7 +1758,7 @@ parse_mipi_config(struct drm_i915_private *i915, /* Find the sequence block and size for the given panel. */ static const u8 * -find_panel_sequence_block(struct drm_i915_private *i915, +find_panel_sequence_block(struct intel_display *display, const struct bdb_mipi_sequence *sequence, u16 panel_id, u32 *seq_size) { @@ -1777,7 +1776,8 @@ find_panel_sequence_block(struct drm_i915_private *i915, for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { if (index + header_size > total) { - drm_err(&i915->drm, "Invalid sequence block (header)\n"); + drm_err(display->drm, + "Invalid sequence block (header)\n"); return NULL; } @@ -1790,7 +1790,7 @@ find_panel_sequence_block(struct drm_i915_private *i915, index += header_size; if (index + current_size > total) { - drm_err(&i915->drm, "Invalid sequence block\n"); + drm_err(display->drm, "Invalid sequence block\n"); return NULL; } @@ -1802,12 +1802,13 @@ find_panel_sequence_block(struct drm_i915_private *i915, index += current_size; } - drm_err(&i915->drm, "Sequence block detected but no valid configuration\n"); + drm_err(display->drm, + "Sequence block detected but no valid configuration\n"); return NULL; } -static int goto_next_sequence(struct drm_i915_private *i915, +static int goto_next_sequence(struct intel_display *display, const u8 *data, int index, int total) { u16 len; @@ -1838,7 +1839,7 @@ static int goto_next_sequence(struct drm_i915_private *i915, len = *(data + index + 6) + 7; break; default: - drm_err(&i915->drm, "Unknown operation byte\n"); + drm_err(display->drm, "Unknown operation byte\n"); return 0; } } @@ -1846,7 +1847,7 @@ static int goto_next_sequence(struct drm_i915_private *i915, return 0; } -static int goto_next_sequence_v3(struct drm_i915_private *i915, +static int goto_next_sequence_v3(struct intel_display *display, const u8 *data, int index, int total) { int seq_end; @@ -1858,7 +1859,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915, * checking on the structure. */ if (total < 5) { - drm_err(&i915->drm, "Too small sequence size\n"); + drm_err(display->drm, "Too small sequence size\n"); return 0; } @@ -1875,7 +1876,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915, seq_end = index + size_of_sequence; if (seq_end > total) { - drm_err(&i915->drm, "Invalid sequence size\n"); + drm_err(display->drm, "Invalid sequence size\n"); return 0; } @@ -1885,7 +1886,8 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915, if (operation_byte == MIPI_SEQ_ELEM_END) { if (index != seq_end) { - drm_err(&i915->drm, "Invalid element structure\n"); + drm_err(display->drm, + "Invalid element structure\n"); return 0; } return index; @@ -1907,7 +1909,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915, case MIPI_SEQ_ELEM_PMIC: break; default: - drm_err(&i915->drm, "Unknown operation byte %u\n", + drm_err(display->drm, "Unknown operation byte %u\n", operation_byte); break; } @@ -1920,13 +1922,13 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915, * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, * skip all delay + gpio operands and stop at the first DSI packet op. */ -static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915, +static int get_init_otp_deassert_fragment_len(struct intel_display *display, struct intel_panel *panel) { const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; int index, len; - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, !data || panel->vbt.dsi.seq_version != 1)) return 0; @@ -1955,7 +1957,7 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915, * these devices we split the init OTP sequence into a deassert sequence and * the actual init OTP part. */ -static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915, +static void vlv_fixup_mipi_sequences(struct intel_display *display, struct intel_panel *panel) { u8 *init_otp; @@ -1973,11 +1975,11 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915, return; /* The deassert-sequence ends at the first DSI packet */ - len = get_init_otp_deassert_fragment_len(i915, panel); + len = get_init_otp_deassert_fragment_len(display, panel); if (!len) return; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Using init OTP fragment to deassert reset\n"); /* Copy the fragment, update seq byte and terminate it */ @@ -2010,29 +2012,32 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915, * or examine the contents of the sequences to * avoid false positives? */ -static void icl_fixup_mipi_sequences(struct drm_i915_private *i915, +static void icl_fixup_mipi_sequences(struct intel_display *display, struct intel_panel *panel) { if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] && panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) { - drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n"); + drm_dbg_kms(display->drm, + "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n"); swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP], panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]); } } -static void fixup_mipi_sequences(struct drm_i915_private *i915, +static void fixup_mipi_sequences(struct intel_display *display, struct intel_panel *panel) { - if (DISPLAY_VER(i915) >= 11) - icl_fixup_mipi_sequences(i915, panel); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 11) + icl_fixup_mipi_sequences(display, panel); else if (IS_VALLEYVIEW(i915)) - vlv_fixup_mipi_sequences(i915, panel); + vlv_fixup_mipi_sequences(display, panel); } static void -parse_mipi_sequence(struct drm_i915_private *i915, +parse_mipi_sequence(struct intel_display *display, struct intel_panel *panel) { int panel_type = panel->vbt.panel_type; @@ -2046,25 +2051,25 @@ parse_mipi_sequence(struct drm_i915_private *i915, if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) return; - sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE); + sequence = bdb_find_section(display, BDB_MIPI_SEQUENCE); if (!sequence) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "No MIPI Sequence found, parsing complete\n"); return; } /* Fail gracefully for forward incompatible sequence block. */ if (sequence->version >= 4) { - drm_err(&i915->drm, + drm_err(display->drm, "Unable to parse MIPI Sequence Block v%u\n", sequence->version); return; } - drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n", + drm_dbg(display->drm, "Found MIPI sequence block v%u\n", sequence->version); - seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size); + seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size); if (!seq_data) return; @@ -2079,24 +2084,24 @@ parse_mipi_sequence(struct drm_i915_private *i915, break; if (seq_id >= MIPI_SEQ_MAX) { - drm_err(&i915->drm, "Unknown sequence %u\n", + drm_err(display->drm, "Unknown sequence %u\n", seq_id); goto err; } /* Log about presence of sequences we won't run. */ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unsupported sequence %u\n", seq_id); panel->vbt.dsi.sequence[seq_id] = data + index; if (sequence->version >= 3) - index = goto_next_sequence_v3(i915, data, index, seq_size); + index = goto_next_sequence_v3(display, data, index, seq_size); else - index = goto_next_sequence(i915, data, index, seq_size); + index = goto_next_sequence(display, data, index, seq_size); if (!index) { - drm_err(&i915->drm, "Invalid sequence %u\n", + drm_err(display->drm, "Invalid sequence %u\n", seq_id); goto err; } @@ -2106,9 +2111,9 @@ parse_mipi_sequence(struct drm_i915_private *i915, panel->vbt.dsi.size = seq_size; panel->vbt.dsi.seq_version = sequence->version; - fixup_mipi_sequences(i915, panel); + fixup_mipi_sequences(display, panel); - drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n"); + drm_dbg(display->drm, "MIPI related VBT parsing complete\n"); return; err: @@ -2117,47 +2122,47 @@ parse_mipi_sequence(struct drm_i915_private *i915, } static void -parse_compression_parameters(struct drm_i915_private *i915) +parse_compression_parameters(struct intel_display *display) { const struct bdb_compression_parameters *params; struct intel_bios_encoder_data *devdata; u16 block_size; int index; - if (i915->display.vbt.version < 198) + if (display->vbt.version < 198) return; - params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS); + params = bdb_find_section(display, BDB_COMPRESSION_PARAMETERS); if (params) { /* Sanity checks */ if (params->entry_size != sizeof(params->data[0])) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT: unsupported compression param entry size\n"); return; } block_size = get_blocksize(params); if (block_size < sizeof(*params)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT: expected 16 compression param entries\n"); return; } } - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; if (!child->compression_enable) continue; if (!params) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT: compression params not available\n"); continue; } if (child->compression_method_cps) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT: CPS compression not supported\n"); continue; } @@ -2169,12 +2174,12 @@ parse_compression_parameters(struct drm_i915_private *i915) } } -static u8 translate_iboost(struct drm_i915_private *i915, u8 val) +static u8 translate_iboost(struct intel_display *display, u8 val) { static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */ if (val >= ARRAY_SIZE(mapping)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unsupported I_boost value found in VBT (%d), display may not work properly\n", val); return 0; } @@ -2231,8 +2236,9 @@ static const u8 adlp_ddc_pin_map[] = { [GMBUS_PIN_12_TC4_ICP] = ADLP_DDC_BUS_PORT_TC4, }; -static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) +static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin) { + struct drm_i915_private *i915 = to_i915(display->drm); const u8 *ddc_pin_map; int i, n_entries; @@ -2247,7 +2253,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); - } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) { + } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) { ddc_pin_map = gen9bc_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { @@ -2266,7 +2272,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) return i; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin); return 0; @@ -2324,9 +2330,10 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo, return PORT_NONE; } -static enum port dvo_port_to_port(struct drm_i915_private *i915, +static enum port dvo_port_to_port(struct intel_display *display, u8 dvo_port) { + struct drm_i915_private *i915 = to_i915(display->drm); /* * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. @@ -2378,7 +2385,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915, [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping), ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, @@ -2401,13 +2408,13 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915, } static enum port -dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port) +dsi_dvo_port_to_port(struct intel_display *display, u8 dvo_port) { switch (dvo_port) { case DVO_PORT_MIPIA: return PORT_A; case DVO_PORT_MIPIC: - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) return PORT_B; else return PORT_C; @@ -2418,13 +2425,13 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port) enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = devdata->i915; + struct intel_display *display = devdata->display; const struct child_device_config *child = &devdata->child; enum port port; - port = dvo_port_to_port(i915, child->dvo_port); - if (port == PORT_NONE && DISPLAY_VER(i915) >= 11) - port = dsi_dvo_port_to_port(i915, child->dvo_port); + port = dvo_port_to_port(display, child->dvo_port); + if (port == PORT_NONE && DISPLAY_VER(display) >= 11) + port = dsi_dvo_port_to_port(display, child->dvo_port); return port; } @@ -2469,10 +2476,10 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 216) + if (!devdata || devdata->display->vbt.version < 216) return 0; - if (devdata->i915->display.vbt.version >= 230) + if (devdata->display->vbt.version >= 230) return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate); else return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); @@ -2480,7 +2487,7 @@ int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 244) + if (!devdata || devdata->display->vbt.version < 244) return 0; return devdata->child.dp_max_lane_count + 1; @@ -2489,10 +2496,10 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { - struct drm_i915_private *i915 = devdata->i915; + struct intel_display *display = devdata->display; bool is_hdmi; - if (port != PORT_A || DISPLAY_VER(i915) >= 12) + if (port != PORT_A || DISPLAY_VER(display) >= 12) return; if (!intel_bios_encoder_supports_dvi(devdata)) @@ -2500,7 +2507,7 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata, is_hdmi = intel_bios_encoder_supports_hdmi(devdata); - drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n", + drm_dbg_kms(display->drm, "VBT claims port A supports DVI%s, ignoring\n", is_hdmi ? "/HDMI" : ""); devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; @@ -2510,7 +2517,8 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata, static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, enum port port) { - struct drm_i915_private *i915 = devdata->i915; + struct intel_display *display = devdata->display; + struct drm_i915_private *i915 = to_i915(display->drm); if (!intel_bios_encoder_supports_dvi(devdata)) return; @@ -2521,7 +2529,8 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, * up to 11, whereas the BDW max is 9. */ if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) { - drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", + drm_dbg_kms(display->drm, + "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", port_name(port), devdata->child.hdmi_level_shifter_value, 9); devdata->child.hdmi_level_shifter_value = 9; @@ -2569,14 +2578,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) { - return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon; + return devdata && HAS_LSPCON(devdata->display) && devdata->child.lspcon; } /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 158 || - DISPLAY_VER(devdata->i915) >= 14) + if (!devdata || devdata->display->vbt.version < 158 || + DISPLAY_VER(devdata->display) >= 14) return -1; return devdata->child.hdmi_level_shifter_value; @@ -2584,7 +2593,7 @@ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 204) + if (!devdata || devdata->display->vbt.version < 204) return 0; switch (devdata->child.hdmi_max_data_rate) { @@ -2606,8 +2615,9 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata } } -static bool is_port_valid(struct drm_i915_private *i915, enum port port) +static bool is_port_valid(struct intel_display *display, enum port port) { + struct drm_i915_private *i915 = to_i915(display->drm); /* * On some ICL SKUs port F is not present, but broken VBTs mark * the port as present. Only try to initialize port F for the @@ -2621,7 +2631,7 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port) static void print_ddi_port(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = devdata->i915; + struct intel_display *display = devdata->display; const struct child_device_config *child = &devdata->child; bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt; int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock; @@ -2641,7 +2651,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata) supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata); supports_tbt = intel_bios_encoder_supports_tbt(devdata); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d DP++:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, intel_bios_encoder_supports_dp_dual_mode(devdata), @@ -2651,33 +2661,33 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata) hdmi_level_shift = intel_bios_hdmi_level_shift(devdata); if (hdmi_level_shift >= 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT HDMI level shift: %d\n", port_name(port), hdmi_level_shift); } max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata); if (max_tmds_clock) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT HDMI max TMDS clock: %d kHz\n", port_name(port), max_tmds_clock); /* I_boost config for SKL and above */ dp_boost_level = intel_bios_dp_boost_level(devdata); if (dp_boost_level) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT (e)DP boost level: %d\n", port_name(port), dp_boost_level); hdmi_boost_level = intel_bios_hdmi_boost_level(devdata); if (hdmi_boost_level) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT HDMI boost level: %d\n", port_name(port), hdmi_boost_level); dp_max_link_rate = intel_bios_dp_max_link_rate(devdata); if (dp_max_link_rate) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %c VBT DP max link rate: %d\n", port_name(port), dp_max_link_rate); @@ -2685,22 +2695,22 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata) * FIXME need to implement support for VBT * vswing/preemph tables should this ever trigger. */ - drm_WARN(&i915->drm, child->use_vbt_vswing, + drm_WARN(display->drm, child->use_vbt_vswing, "Port %c asks to use VBT vswing/preemph tables\n", port_name(port)); } static void parse_ddi_port(struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = devdata->i915; + struct intel_display *display = devdata->display; enum port port; port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; - if (!is_port_valid(i915, port)) { - drm_dbg_kms(&i915->drm, + if (!is_port_valid(display, port)) { + drm_dbg_kms(display->drm, "VBT reports port %c as supported, but that can't be true: skipping\n", port_name(port)); return; @@ -2710,22 +2720,24 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) sanitize_hdmi_level_shift(devdata, port); } -static bool has_ddi_port_info(struct drm_i915_private *i915) +static bool has_ddi_port_info(struct intel_display *display) { - return DISPLAY_VER(i915) >= 5 || IS_G4X(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + return DISPLAY_VER(display) >= 5 || IS_G4X(i915); } -static void parse_ddi_ports(struct drm_i915_private *i915) +static void parse_ddi_ports(struct intel_display *display) { struct intel_bios_encoder_data *devdata; - if (!has_ddi_port_info(i915)) + if (!has_ddi_port_info(display)) return; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) + list_for_each_entry(devdata, &display->vbt.display_devices, node) parse_ddi_port(devdata); - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) + list_for_each_entry(devdata, &display->vbt.display_devices, node) print_ddi_port(devdata); } @@ -2751,27 +2763,27 @@ static int child_device_expected_size(u16 version) return 22; } -static bool child_device_size_valid(struct drm_i915_private *i915, int size) +static bool child_device_size_valid(struct intel_display *display, int size) { int expected_size; - expected_size = child_device_expected_size(i915->display.vbt.version); + expected_size = child_device_expected_size(display->vbt.version); if (expected_size < 0) { expected_size = sizeof(struct child_device_config); - drm_dbg(&i915->drm, + drm_dbg(display->drm, "Expected child device config size for VBT version %u not known; assuming %d\n", - i915->display.vbt.version, expected_size); + display->vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ if (size != expected_size) - drm_err(&i915->drm, + drm_err(display->drm, "Unexpected child device config size %d (expected %d for VBT version %u)\n", - size, expected_size, i915->display.vbt.version); + size, expected_size, display->vbt.version); /* The legacy sized child device config is the minimum we need. */ if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Child device config size %d is too small.\n", size); return false; @@ -2781,8 +2793,9 @@ static bool child_device_size_valid(struct drm_i915_private *i915, int size) } static void -parse_general_definitions(struct drm_i915_private *i915) +parse_general_definitions(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_definitions *defs; struct intel_bios_encoder_data *devdata; const struct child_device_config *child; @@ -2790,27 +2803,27 @@ parse_general_definitions(struct drm_i915_private *i915) u16 block_size; int bus_pin; - defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS); + defs = bdb_find_section(display, BDB_GENERAL_DEFINITIONS); if (!defs) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "No general definition block is found, no devices defined.\n"); return; } block_size = get_blocksize(defs); if (block_size < sizeof(*defs)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "General definitions block too small (%u)\n", block_size); return; } bus_pin = defs->crt_ddc_gmbus_pin; - drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin); + drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin); if (intel_gmbus_is_valid_pin(i915, bus_pin)) - i915->display.vbt.crt_ddc_pin = bus_pin; + display->vbt.crt_ddc_pin = bus_pin; - if (!child_device_size_valid(i915, defs->child_dev_size)) + if (!child_device_size_valid(display, defs->child_dev_size)) return; /* get the number of child device */ @@ -2821,7 +2834,7 @@ parse_general_definitions(struct drm_i915_private *i915) if (!child->device_type) continue; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Found VBT child device with type 0x%x\n", child->device_type); @@ -2829,7 +2842,7 @@ parse_general_definitions(struct drm_i915_private *i915) if (!devdata) break; - devdata->i915 = i915; + devdata->display = display; /* * Copy as much as we know (sizeof) and is available @@ -2839,37 +2852,39 @@ parse_general_definitions(struct drm_i915_private *i915) memcpy(&devdata->child, child, min_t(size_t, defs->child_dev_size, sizeof(*child))); - list_add_tail(&devdata->node, &i915->display.vbt.display_devices); + list_add_tail(&devdata->node, &display->vbt.display_devices); } - if (list_empty(&i915->display.vbt.display_devices)) - drm_dbg_kms(&i915->drm, + if (list_empty(&display->vbt.display_devices)) + drm_dbg_kms(display->drm, "no child dev is parsed from VBT\n"); } /* Common defaults which may be overridden by VBT. */ static void -init_vbt_defaults(struct drm_i915_private *i915) +init_vbt_defaults(struct intel_display *display) { - i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; + struct drm_i915_private *i915 = to_i915(display->drm); + + display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; /* general features */ - i915->display.vbt.int_tv_support = 1; - i915->display.vbt.int_crt_support = 1; + display->vbt.int_tv_support = 1; + display->vbt.int_crt_support = 1; /* driver features */ - i915->display.vbt.int_lvds_support = 1; + display->vbt.int_lvds_support = 1; /* Default to using SSC */ - i915->display.vbt.lvds_use_ssc = 1; + display->vbt.lvds_use_ssc = 1; /* * Core/SandyBridge/IvyBridge use alternative (120MHz) reference * clock for LVDS. */ - i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, - !HAS_PCH_SPLIT(i915)); - drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n", - i915->display.vbt.lvds_ssc_freq); + display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display, + !HAS_PCH_SPLIT(i915)); + drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n", + display->vbt.lvds_ssc_freq); } /* Common defaults which may be overridden by VBT. */ @@ -2885,12 +2900,13 @@ init_vbt_panel_defaults(struct intel_panel *panel) /* Defaults to initialize only if there is no VBT. */ static void -init_vbt_missing_defaults(struct drm_i915_private *i915) +init_vbt_missing_defaults(struct intel_display *display) { - unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask; + struct drm_i915_private *i915 = to_i915(display->drm); + unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask; enum port port; - if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) + if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915)) return; for_each_port_masked(port, ports) { @@ -2910,7 +2926,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) if (!devdata) break; - devdata->i915 = i915; + devdata->display = display; child = &devdata->child; if (port == PORT_F) @@ -2929,15 +2945,15 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) if (port == PORT_A) child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR; - list_add_tail(&devdata->node, &i915->display.vbt.display_devices); + list_add_tail(&devdata->node, &display->vbt.display_devices); - drm_dbg_kms(&i915->drm, - "Generating default VBT child device with type 0x04%x on port %c\n", + drm_dbg_kms(display->drm, + "Generating default VBT child device with type 0x%04x on port %c\n", child->device_type, port_name(port)); } /* Bypass some minimum baseline VBT version checks */ - i915->display.vbt.version = 155; + display->vbt.version = 155; } static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) @@ -2949,13 +2965,13 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) /** * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT - * @i915: the device + * @display: display device * @buf: pointer to a buffer to validate * @size: size of the buffer * * Returns true on valid VBT. */ -bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, +bool intel_bios_is_valid_vbt(struct intel_display *display, const void *buf, size_t size) { const struct vbt_header *vbt = buf; @@ -2965,17 +2981,18 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, return false; if (sizeof(struct vbt_header) > size) { - drm_dbg_kms(&i915->drm, "VBT header incomplete\n"); + drm_dbg_kms(display->drm, "VBT header incomplete\n"); return false; } if (memcmp(vbt->signature, "$VBT", 4)) { - drm_dbg_kms(&i915->drm, "VBT invalid signature\n"); + drm_dbg_kms(display->drm, "VBT invalid signature\n"); return false; } if (vbt->vbt_size > size) { - drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n"); + drm_dbg_kms(display->drm, + "VBT incomplete (vbt_size overflows)\n"); return false; } @@ -2985,48 +3002,48 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, vbt->bdb_offset, sizeof(struct bdb_header), size)) { - drm_dbg_kms(&i915->drm, "BDB header incomplete\n"); + drm_dbg_kms(display->drm, "BDB header incomplete\n"); return false; } bdb = get_bdb_header(vbt); if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) { - drm_dbg_kms(&i915->drm, "BDB incomplete\n"); + drm_dbg_kms(display->drm, "BDB incomplete\n"); return false; } return vbt; } -static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915, +static struct vbt_header *firmware_get_vbt(struct intel_display *display, size_t *size) { struct vbt_header *vbt = NULL; const struct firmware *fw = NULL; - const char *name = i915->display.params.vbt_firmware; + const char *name = display->params.vbt_firmware; int ret; if (!name || !*name) return NULL; - ret = request_firmware(&fw, name, i915->drm.dev); + ret = request_firmware(&fw, name, display->drm->dev); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Requesting VBT firmware \"%s\" failed (%d)\n", name, ret); return NULL; } - if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) { + if (intel_bios_is_valid_vbt(display, fw->data, fw->size)) { vbt = kmemdup(fw->data, fw->size, GFP_KERNEL); if (vbt) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Found valid VBT firmware \"%s\"\n", name); if (size) *size = fw->size; } } else { - drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n", + drm_dbg_kms(display->drm, "Invalid VBT firmware \"%s\"\n", name); } @@ -3042,9 +3059,10 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); } -static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915, +static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display, size_t *size) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 count, data, found, store = 0; u32 static_region, oprom_offset; u32 oprom_size = 0x200000; @@ -3081,10 +3099,10 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915, for (count = 0; count < vbt_size; count += 4) *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); - if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size)) + if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) goto err_free_vbt; - drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); + drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n"); if (size) *size = vbt_size; @@ -3097,10 +3115,10 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915, return NULL; } -static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, +static struct vbt_header *oprom_get_vbt(struct intel_display *display, size_t *sizep) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); void __iomem *p = NULL, *oprom; struct vbt_header *vbt; u16 vbt_size; @@ -3124,13 +3142,13 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, goto err_unmap_oprom; if (sizeof(struct vbt_header) > size) { - drm_dbg(&i915->drm, "VBT header incomplete\n"); + drm_dbg(display->drm, "VBT header incomplete\n"); goto err_unmap_oprom; } vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); if (vbt_size > size) { - drm_dbg(&i915->drm, + drm_dbg(display->drm, "VBT incomplete (vbt_size overflows)\n"); goto err_unmap_oprom; } @@ -3142,7 +3160,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, memcpy_fromio(vbt, p, vbt_size); - if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size)) + if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) goto err_free_vbt; pci_unmap_rom(pdev, oprom); @@ -3150,7 +3168,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, if (sizep) *sizep = vbt_size; - drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); + drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n"); return vbt; @@ -3162,16 +3180,17 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, return NULL; } -static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915, +static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display, size_t *sizep) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct vbt_header *vbt = NULL; intel_wakeref_t wakeref; - vbt = firmware_get_vbt(i915, sizep); + vbt = firmware_get_vbt(display, sizep); if (!vbt) - vbt = intel_opregion_get_vbt(i915, sizep); + vbt = intel_opregion_get_vbt(display, sizep); /* * If the OpRegion does not have VBT, look in SPI flash @@ -3179,76 +3198,77 @@ static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915 */ if (!vbt && IS_DGFX(i915)) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = spi_oprom_get_vbt(i915, sizep); + vbt = spi_oprom_get_vbt(display, sizep); if (!vbt) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = oprom_get_vbt(i915, sizep); + vbt = oprom_get_vbt(display, sizep); return vbt; } /** * intel_bios_init - find VBT and initialize settings from the BIOS - * @i915: i915 device instance + * @display: display device instance * * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also * initialize some defaults if the VBT is not present at all. */ -void intel_bios_init(struct drm_i915_private *i915) +void intel_bios_init(struct intel_display *display) { const struct vbt_header *vbt; const struct bdb_header *bdb; - INIT_LIST_HEAD(&i915->display.vbt.display_devices); - INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks); + INIT_LIST_HEAD(&display->vbt.display_devices); + INIT_LIST_HEAD(&display->vbt.bdb_blocks); - if (!HAS_DISPLAY(i915)) { - drm_dbg_kms(&i915->drm, + if (!HAS_DISPLAY(display)) { + drm_dbg_kms(display->drm, "Skipping VBT init due to disabled display.\n"); return; } - init_vbt_defaults(i915); + init_vbt_defaults(display); - vbt = intel_bios_get_vbt(i915, NULL); + vbt = intel_bios_get_vbt(display, NULL); if (!vbt) goto out; bdb = get_bdb_header(vbt); - i915->display.vbt.version = bdb->version; + display->vbt.version = bdb->version; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "VBT signature \"%.*s\", BDB version %d\n", - (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version); + (int)sizeof(vbt->signature), vbt->signature, + display->vbt.version); - init_bdb_blocks(i915, bdb); + init_bdb_blocks(display, bdb); /* Grab useful general definitions */ - parse_general_features(i915); - parse_general_definitions(i915); - parse_driver_features(i915); + parse_general_features(display); + parse_general_definitions(display); + parse_driver_features(display); /* Depends on child device list */ - parse_compression_parameters(i915); + parse_compression_parameters(display); out: if (!vbt) { - drm_info(&i915->drm, + drm_info(display->drm, "Failed to find VBIOS tables (VBT)\n"); - init_vbt_missing_defaults(i915); + init_vbt_missing_defaults(display); } /* Further processing on pre-parsed or generated child device data */ - parse_sdvo_device_mapping(i915); - parse_ddi_ports(i915); + parse_sdvo_device_mapping(display); + parse_ddi_ports(display); kfree(vbt); } -static void intel_bios_init_panel(struct drm_i915_private *i915, +static void intel_bios_init_panel(struct intel_display *display, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, @@ -3256,63 +3276,64 @@ static void intel_bios_init_panel(struct drm_i915_private *i915, { /* already have it? */ if (panel->vbt.panel_type >= 0) { - drm_WARN_ON(&i915->drm, !use_fallback); + drm_WARN_ON(display->drm, !use_fallback); return; } - panel->vbt.panel_type = get_panel_type(i915, devdata, + panel->vbt.panel_type = get_panel_type(display, devdata, drm_edid, use_fallback); if (panel->vbt.panel_type < 0) { - drm_WARN_ON(&i915->drm, use_fallback); + drm_WARN_ON(display->drm, use_fallback); return; } init_vbt_panel_defaults(panel); - parse_panel_options(i915, panel); - parse_generic_dtd(i915, panel); - parse_lfp_data(i915, panel); - parse_lfp_backlight(i915, panel); - parse_sdvo_lvds_data(i915, panel); - parse_panel_driver_features(i915, panel); - parse_power_conservation_features(i915, panel); - parse_edp(i915, panel); - parse_psr(i915, panel); - parse_mipi_config(i915, panel); - parse_mipi_sequence(i915, panel); + parse_panel_options(display, panel); + parse_generic_dtd(display, panel); + parse_lfp_data(display, panel); + parse_lfp_backlight(display, panel); + parse_sdvo_lvds_data(display, panel); + parse_panel_driver_features(display, panel); + parse_power_conservation_features(display, panel); + parse_edp(display, panel); + parse_psr(display, panel); + parse_mipi_config(display, panel); + parse_mipi_sequence(display, panel); } -void intel_bios_init_panel_early(struct drm_i915_private *i915, +void intel_bios_init_panel_early(struct intel_display *display, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata) { - intel_bios_init_panel(i915, panel, devdata, NULL, false); + intel_bios_init_panel(display, panel, devdata, NULL, false); } -void intel_bios_init_panel_late(struct drm_i915_private *i915, +void intel_bios_init_panel_late(struct intel_display *display, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid) { - intel_bios_init_panel(i915, panel, devdata, drm_edid, true); + intel_bios_init_panel(display, panel, devdata, drm_edid, true); } /** * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() - * @i915: i915 device instance + * @display: display device instance */ -void intel_bios_driver_remove(struct drm_i915_private *i915) +void intel_bios_driver_remove(struct intel_display *display) { struct intel_bios_encoder_data *devdata, *nd; struct bdb_block_entry *entry, *ne; - list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) { + list_for_each_entry_safe(devdata, nd, &display->vbt.display_devices, + node) { list_del(&devdata->node); kfree(devdata->dsc); kfree(devdata); } - list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) { + list_for_each_entry_safe(entry, ne, &display->vbt.bdb_blocks, node) { list_del(&entry->node); kfree(entry); } @@ -3336,22 +3357,22 @@ void intel_bios_fini_panel(struct intel_panel *panel) /** * intel_bios_is_tv_present - is integrated TV present in VBT - * @i915: i915 device instance + * @display: display device instance * * Return true if TV is present. If no child devices were parsed from VBT, * assume TV is present. */ -bool intel_bios_is_tv_present(struct drm_i915_private *i915) +bool intel_bios_is_tv_present(struct intel_display *display) { const struct intel_bios_encoder_data *devdata; - if (!i915->display.vbt.int_tv_support) + if (!display->vbt.int_tv_support) return false; - if (list_empty(&i915->display.vbt.display_devices)) + if (list_empty(&display->vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; /* @@ -3377,20 +3398,21 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) /** * intel_bios_is_lvds_present - is LVDS present in VBT - * @i915: i915 device instance + * @display: display device instance * @i2c_pin: i2c pin for LVDS if present * * Return true if LVDS is present. If no child devices were parsed from VBT, * assume LVDS is present. */ -bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) +bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_bios_encoder_data *devdata; - if (list_empty(&i915->display.vbt.display_devices)) + if (list_empty(&display->vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; /* If the device type is not LFP, continue. @@ -3417,7 +3439,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ - return intel_opregion_vbt_present(i915); + return intel_opregion_vbt_present(display); } return false; @@ -3425,25 +3447,25 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) /** * intel_bios_is_port_present - is the specified digital port present - * @i915: i915 device instance + * @display: display device instance * @port: port to check * * Return true if the device in %port is present. */ -bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) +bool intel_bios_is_port_present(struct intel_display *display, enum port port) { const struct intel_bios_encoder_data *devdata; - if (WARN_ON(!has_ddi_port_info(i915))) + if (WARN_ON(!has_ddi_port_info(display))) return true; - if (!is_port_valid(i915, port)) + if (!is_port_valid(display, port)) return false; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; - if (dvo_port_to_port(i915, child->dvo_port) == port) + if (dvo_port_to_port(display, child->dvo_port) == port) return true; } @@ -3474,32 +3496,32 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da /** * intel_bios_is_dsi_present - is DSI present in VBT - * @i915: i915 device instance + * @display: display device instance * @port: port for DSI if present * * Return true if DSI is present, and return the port in %port. */ -bool intel_bios_is_dsi_present(struct drm_i915_private *i915, +bool intel_bios_is_dsi_present(struct intel_display *display, enum port *port) { const struct intel_bios_encoder_data *devdata; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; u8 dvo_port = child->dvo_port; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; - if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) { - drm_dbg_kms(&i915->drm, + if (dsi_dvo_port_to_port(display, dvo_port) == PORT_NONE) { + drm_dbg_kms(display->drm, "VBT has unsupported DSI port %c\n", port_name(dvo_port - DVO_PORT_MIPIA)); continue; } if (port) - *port = dsi_dvo_port_to_port(i915, dvo_port); + *port = dsi_dvo_port_to_port(display, dvo_port); return true; } @@ -3510,7 +3532,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, struct dsc_compression_parameters_entry *dsc, int dsc_max_bpc) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; int bpc = 8; @@ -3524,13 +3546,13 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, else if (dsc->support_8bpc && dsc_max_bpc >= 8) bpc = 8; else - drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n", + drm_dbg_kms(display->drm, "VBT: Unsupported BPC %d for DCS\n", dsc_max_bpc); crtc_state->pipe_bpp = bpc * 3; - crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp, - VBT_DSC_MAX_BPP(dsc->max_bpp))); + crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(min(crtc_state->pipe_bpp, + VBT_DSC_MAX_BPP(dsc->max_bpp))); /* * FIXME: This is ugly, and slice count should take DSC engine @@ -3545,14 +3567,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, } else { /* FIXME */ if (!(dsc->slices_per_line & BIT(0))) - drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n"); + drm_dbg_kms(display->drm, + "VBT: Unsupported DSC slice count for DSI\n"); crtc_state->dsc.slice_count = 1; } if (crtc_state->hw.adjusted_mode.crtc_hdisplay % crtc_state->dsc.slice_count != 0) - drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n", + drm_dbg_kms(display->drm, + "VBT: DSC hdisplay %d not divisible by slice count %d\n", crtc_state->hw.adjusted_mode.crtc_hdisplay, crtc_state->dsc.slice_count); @@ -3576,16 +3600,16 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_bios_encoder_data *devdata; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; - if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) { + if (dsi_dvo_port_to_port(display, child->dvo_port) == encoder->port) { if (!devdata->dsc) return false; @@ -3645,12 +3669,13 @@ static const u8 direct_aux_ch_map[] = { [AUX_CH_I] = DP_AUX_I, /* aka AUX_CH_USBC6 */ }; -static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) +static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel) { + struct drm_i915_private *i915 = to_i915(display->drm); const u8 *aux_ch_map; int i, n_entries; - if (DISPLAY_VER(i915) >= 13) { + if (DISPLAY_VER(display) >= 13) { aux_ch_map = adlp_aux_ch_map; n_entries = ARRAY_SIZE(adlp_aux_ch_map); } else if (IS_ALDERLAKE_S(i915)) { @@ -3669,7 +3694,7 @@ static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) return i; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Ignoring alternate AUX CH: VBT claims AUX 0x%x, which is not valid for this platform\n", aux_channel); @@ -3681,22 +3706,22 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) if (!devdata || !devdata->child.aux_channel) return AUX_CH_NONE; - return map_aux_ch(devdata->i915, devdata->child.aux_channel); + return map_aux_ch(devdata->display, devdata->child.aux_channel); } bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915; + struct intel_display *display; u8 aux_channel; int count = 0; if (!devdata || !devdata->child.aux_channel) return false; - i915 = devdata->i915; + display = devdata->display; aux_channel = devdata->child.aux_channel; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { if (intel_bios_encoder_supports_dp(devdata) && aux_channel == devdata->child.aux_channel) count++; @@ -3707,18 +3732,18 @@ bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devda int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost) return 0; - return translate_iboost(devdata->i915, devdata->child.dp_iboost_level); + return translate_iboost(devdata->display, devdata->child.dp_iboost_level); } int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost) return 0; - return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level); + return translate_iboost(devdata->display, devdata->child.hdmi_iboost_level); } int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) @@ -3726,17 +3751,17 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) if (!devdata || !devdata->child.ddc_pin) return 0; - return map_ddc_pin(devdata->i915, devdata->child.ddc_pin); + return map_ddc_pin(devdata->display, devdata->child.ddc_pin); } bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c; + return devdata->display->vbt.version >= 195 && devdata->child.dp_usb_type_c; } bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; + return devdata->display->vbt.version >= 209 && devdata->child.tbt; } bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata) @@ -3750,11 +3775,11 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata } const struct intel_bios_encoder_data * -intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) +intel_bios_encoder_data_lookup(struct intel_display *display, enum port port) { struct intel_bios_encoder_data *devdata; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + list_for_each_entry(devdata, &display->vbt.display_devices, node) { if (intel_bios_encoder_port(devdata) == port) return devdata; } @@ -3762,23 +3787,23 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) return NULL; } -void intel_bios_for_each_encoder(struct drm_i915_private *i915, - void (*func)(struct drm_i915_private *i915, +void intel_bios_for_each_encoder(struct intel_display *display, + void (*func)(struct intel_display *display, const struct intel_bios_encoder_data *devdata)) { struct intel_bios_encoder_data *devdata; - list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) - func(i915, devdata); + list_for_each_entry(devdata, &display->vbt.display_devices, node) + func(display, devdata); } static int intel_bios_vbt_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; + struct intel_display *display = m->private; const void *vbt; size_t vbt_size; - vbt = intel_bios_get_vbt(i915, &vbt_size); + vbt = intel_bios_get_vbt(display, &vbt_size); if (vbt) { seq_write(m, vbt, vbt_size); @@ -3790,10 +3815,10 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused) DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt); -void intel_bios_debugfs_register(struct drm_i915_private *i915) +void intel_bios_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_vbt", 0444, minor->debugfs_root, - i915, &intel_bios_vbt_fops); + display, &intel_bios_vbt_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 06a51be4afd89b..8b703f6cfe17e2 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -33,9 +33,9 @@ #include struct drm_edid; -struct drm_i915_private; struct intel_bios_encoder_data; struct intel_crtc_state; +struct intel_display; struct intel_encoder; struct intel_panel; enum aux_ch; @@ -232,28 +232,28 @@ struct mipi_pps_data { u16 panel_power_cycle_delay; } __packed; -void intel_bios_init(struct drm_i915_private *dev_priv); -void intel_bios_init_panel_early(struct drm_i915_private *dev_priv, +void intel_bios_init(struct intel_display *display); +void intel_bios_init_panel_early(struct intel_display *display, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata); -void intel_bios_init_panel_late(struct drm_i915_private *dev_priv, +void intel_bios_init_panel_late(struct intel_display *display, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid); void intel_bios_fini_panel(struct intel_panel *panel); -void intel_bios_driver_remove(struct drm_i915_private *dev_priv); -bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, +void intel_bios_driver_remove(struct intel_display *display); +bool intel_bios_is_valid_vbt(struct intel_display *display, const void *buf, size_t size); -bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); -bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); -bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); -bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); +bool intel_bios_is_tv_present(struct intel_display *display); +bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin); +bool intel_bios_is_port_present(struct intel_display *display, enum port port); +bool intel_bios_is_dsi_present(struct intel_display *display, enum port *port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc); const struct intel_bios_encoder_data * -intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port); +intel_bios_encoder_data_lookup(struct intel_display *display, enum port port); bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata); @@ -277,10 +277,10 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata); -void intel_bios_for_each_encoder(struct drm_i915_private *i915, - void (*func)(struct drm_i915_private *i915, +void intel_bios_for_each_encoder(struct intel_display *display, + void (*func)(struct intel_display *display, const struct intel_bios_encoder_data *devdata)); -void intel_bios_debugfs_register(struct drm_i915_private *i915); +void intel_bios_debugfs_register(struct intel_display *display); #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 16d5550f7e5e56..aa3ba66c5307f6 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -23,7 +23,10 @@ #include +#include + #include "soc/intel_dram.h" + #include "hsw_ips.h" #include "i915_reg.h" #include "intel_atomic.h" @@ -2750,7 +2753,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) */ int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; int min_cdclk_bj = - (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) * + (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) * pixel_clock) / (2 * bigjoiner_interface_bits); min_cdclk = max(min_cdclk, min_cdclk_bj); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 7ac50aacec73c0..5d701f48351b96 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1313,8 +1313,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state, { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - if (crtc_state->dsb) - intel_dsb_reg_write(crtc_state->dsb, reg, val); + if (crtc_state->dsb_color_vblank) + intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val); else intel_de_write_fw(i915, reg, val); } @@ -1337,15 +1337,15 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, * unless we either write each entry twice, * or use non-posted writes */ - if (crtc_state->dsb) - intel_dsb_nonpost_start(crtc_state->dsb); + if (crtc_state->dsb_color_vblank) + intel_dsb_nonpost_start(crtc_state->dsb_color_vblank); for (i = 0; i < 256; i++) ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i), i9xx_lut_8(&lut[i])); - if (crtc_state->dsb) - intel_dsb_nonpost_end(crtc_state->dsb); + if (crtc_state->dsb_color_vblank) + intel_dsb_nonpost_end(crtc_state->dsb_color_vblank); } static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state, @@ -1870,7 +1870,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - if (crtc_state->dsb) + if (crtc_state->dsb_color_vblank) return; i915->display.funcs.color->load_luts(crtc_state); @@ -1890,8 +1890,8 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) i915->display.funcs.color->color_commit_arm(crtc_state); - if (crtc_state->dsb) - intel_dsb_commit(crtc_state->dsb, true); + if (crtc_state->dsb_color_commit) + intel_dsb_commit(crtc_state->dsb_color_commit, false); } void intel_color_post_update(const struct intel_crtc_state *crtc_state) @@ -1919,33 +1919,51 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut) return; - crtc_state->dsb = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024); - if (!crtc_state->dsb) + crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024); + if (!crtc_state->dsb_color_vblank) return; i915->display.funcs.color->load_luts(crtc_state); - intel_dsb_finish(crtc_state->dsb); + intel_dsb_finish(crtc_state->dsb_color_vblank); + + crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16); + if (!crtc_state->dsb_color_commit) { + intel_dsb_cleanup(crtc_state->dsb_color_vblank); + crtc_state->dsb_color_vblank = NULL; + return; + } + + intel_dsb_chain(state, crtc_state->dsb_color_commit, + crtc_state->dsb_color_vblank, true); + + intel_dsb_finish(crtc_state->dsb_color_commit); } void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) { - if (!crtc_state->dsb) - return; + if (crtc_state->dsb_color_commit) { + intel_dsb_cleanup(crtc_state->dsb_color_commit); + crtc_state->dsb_color_commit = NULL; + } - intel_dsb_cleanup(crtc_state->dsb); - crtc_state->dsb = NULL; + if (crtc_state->dsb_color_vblank) { + intel_dsb_cleanup(crtc_state->dsb_color_vblank); + crtc_state->dsb_color_vblank = NULL; + } } void intel_color_wait_commit(const struct intel_crtc_state *crtc_state) { - if (crtc_state->dsb) - intel_dsb_wait(crtc_state->dsb); + if (crtc_state->dsb_color_commit) + intel_dsb_wait(crtc_state->dsb_color_commit); + if (crtc_state->dsb_color_vblank) + intel_dsb_wait(crtc_state->dsb_color_vblank); } bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state) { - return crtc_state->dsb; + return crtc_state->dsb_color_vblank; } static bool intel_can_preload_luts(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 143d66951631f1..3252dab5643097 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -159,9 +159,11 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915) { - bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A); - bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D); - bool dsi_present = intel_bios_is_dsi_present(i915, NULL); + struct intel_display *display = &i915->display; + + bool ddi_a_present = intel_bios_is_port_present(display, PORT_A); + bool ddi_d_present = intel_bios_is_port_present(display, PORT_D); + bool dsi_present = intel_bios_is_dsi_present(display, NULL); /* * VBT's 'dvo port' field for child devices references the DDI, not diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 6df526e189b5b8..705ec5ad385c83 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -10,6 +10,7 @@ #include "intel_crtc_state_dump.h" #include "intel_display_types.h" #include "intel_hdmi.h" +#include "intel_vdsc.h" #include "intel_vrr.h" static void intel_dump_crtc_timings(struct drm_printer *p, @@ -369,6 +370,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, else if (IS_VALLEYVIEW(i915)) vlv_dump_csc(&p, "wgc csc", &pipe_config->csc); + intel_vdsc_state_dump(&p, 0, pipe_config); + dump_planes: if (!state) return; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index a07aca96e55177..b1c294236cc878 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -916,7 +916,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port, * instead of a specific AUX_IO_ reference without powering up any * extra wells. */ - if (intel_encoder_can_psr(&dig_port->base)) + if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state)) return intel_display_power_aux_io_domain(i915, dig_port->aux_ch); else if (DISPLAY_VER(i915) < 14 && (intel_crtc_has_dp_encoder(crtc_state) || @@ -1400,7 +1400,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, static int translate_signal_level(struct intel_dp *intel_dp, u8 signal_levels) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int i; for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) { @@ -1408,7 +1408,7 @@ static int translate_signal_level(struct intel_dp *intel_dp, return i; } - drm_WARN(&i915->drm, 1, + drm_WARN(display->drm, 1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n", signal_levels); @@ -2211,14 +2211,14 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel const struct intel_crtc_state *crtc_state, bool enable) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (!crtc_state->vrr.enable) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL, enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n", str_enable_disable(enable)); } @@ -2227,20 +2227,20 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (!crtc_state->fec_enable) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, enable ? DP_FEC_READY : 0) <= 0) - drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n", + drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n", enable ? "enabled" : "disabled"); if (enable && drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS, DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0) - drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n"); + drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n"); } static int read_fec_detected_status(struct drm_dp_aux *aux) @@ -4172,7 +4172,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), crtc_state); - if (intel_encoder_is_dp(encoder)) + if ((crtc_state && intel_crtc_has_dp_encoder(crtc_state)) || + (!crtc_state && intel_encoder_is_dp(encoder))) intel_dp_sync_state(encoder, crtc_state); } @@ -4853,9 +4854,10 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port) return false; } -void intel_ddi_init(struct drm_i915_private *dev_priv, +void intel_ddi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port; struct intel_encoder *encoder; bool init_hdmi, init_dp; @@ -4898,7 +4900,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, * driver. In that case we should skip initializing the corresponding * outputs. */ - if (intel_hti_uses_phy(dev_priv, phy)) { + if (intel_hti_uses_phy(display, phy)) { drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n", port_name(port), phy_name(phy)); return; @@ -4972,7 +4974,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, } else { drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, - "DDI %c/PHY %c", port_name(port), phy_name(phy)); + "DDI %c/PHY %c", port_name(port), phy_name(phy)); } intel_encoder_link_check_init(encoder, intel_ddi_link_check); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 434de7196875ae..6d85422bdefef3 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -15,6 +15,7 @@ struct intel_bios_encoder_data; struct intel_connector; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; @@ -53,7 +54,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port); -void intel_ddi_init(struct drm_i915_private *dev_priv, +void intel_ddi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c2c388212e2eaa..b4ef4d59da1ace 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -1014,9 +1015,14 @@ static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n; } -static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) +static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + if (!new_crtc_state->hw.active) return false; @@ -1026,9 +1032,14 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, vrr_params_changed(old_crtc_state, new_crtc_state))); } -static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) +bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + if (!old_crtc_state->hw.active) return false; @@ -1181,7 +1192,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; - if (vrr_disabling(old_crtc_state, new_crtc_state)) { + if (intel_crtc_vrr_disabling(state, crtc)) { intel_vrr_disable(old_crtc_state); intel_crtc_update_active_timings(old_crtc_state, false); } @@ -4669,11 +4680,11 @@ intel_modeset_pipe_config(struct intel_atomic_state *state, crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; - if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) { + if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) { drm_dbg_kms(&i915->drm, - "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n", + "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n", crtc->base.base.id, crtc->base.name, - BPP_X16_ARGS(crtc_state->max_link_bpp_x16)); + FXP_Q4_ARGS(crtc_state->max_link_bpp_x16)); crtc_state->bw_constrained = true; } @@ -5100,7 +5111,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (current_config->name != pipe_config->name) { \ BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ __stringify(name) " is not bool"); \ - pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected %s, found %s)", \ str_yes_no(current_config->name), \ str_yes_no(pipe_config->name)); \ @@ -6249,6 +6260,8 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: case I915_FORMAT_MOD_4_TILED: + case I915_FORMAT_MOD_4_TILED_BMG_CCS: + case I915_FORMAT_MOD_4_TILED_LNL_CCS: break; default: drm_dbg_kms(&i915->drm, @@ -6830,8 +6843,6 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -6844,7 +6855,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state, !intel_crtc_needs_modeset(new_crtc_state)) skl_detach_scalers(new_crtc_state); - if (vrr_enabling(old_crtc_state, new_crtc_state)) + if (intel_crtc_vrr_enabling(state, crtc)) intel_vrr_enable(new_crtc_state); } @@ -6944,7 +6955,7 @@ static void intel_update_crtc(struct intel_atomic_state *state, * * FIXME Should be synchronized with the start of vblank somehow... */ - if (vrr_enabling(old_crtc_state, new_crtc_state) || + if (intel_crtc_vrr_enabling(state, crtc) || new_crtc_state->update_m_n || new_crtc_state->update_lrr) intel_crtc_update_active_timings(new_crtc_state, new_crtc_state->vrr.enable); @@ -7502,7 +7513,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * * FIXME get rid of this funny new->old swapping */ - old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); + old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); + old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit); } /* Underruns don't always raise interrupts, so check manually */ @@ -7777,10 +7789,11 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port) void intel_setup_outputs(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_encoder *encoder; bool dpd_is_edp = false; - intel_pps_unlock_regs_wa(dev_priv); + intel_pps_unlock_regs_wa(display); if (!HAS_DISPLAY(dev_priv)) return; @@ -7789,7 +7802,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (intel_ddi_crt_present(dev_priv)) intel_crt_init(dev_priv); - intel_bios_for_each_encoder(dev_priv, intel_ddi_init); + intel_bios_for_each_encoder(display, intel_ddi_init); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) vlv_dsi_init(dev_priv); @@ -7851,14 +7864,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) * HDMI ports that the VBT claim are DP or eDP. */ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); - has_port = intel_bios_is_port_present(dev_priv, PORT_B); + has_port = intel_bios_is_port_present(display, PORT_B); if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); - has_port = intel_bios_is_port_present(dev_priv, PORT_C); + has_port = intel_bios_is_port_present(display, PORT_C); if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) @@ -7869,7 +7882,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) * eDP not supported on port D, * so no need to worry about it */ - has_port = intel_bios_is_port_present(dev_priv, PORT_D); + has_port = intel_bios_is_port_present(display, PORT_D); if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) @@ -7923,7 +7936,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) g4x_dp_init(dev_priv, DP_D, PORT_D); if (SUPPORTS_TV(dev_priv)) - intel_tv_init(dev_priv); + intel_tv_init(display); } else if (DISPLAY_VER(dev_priv) == 2) { if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b0cf6ca70952e1..b21d9578d5db4d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -532,6 +532,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state); void intel_update_watermarks(struct drm_i915_private *i915); +bool intel_crtc_vrr_disabling(struct intel_atomic_state *state, + struct intel_crtc *crtc); + /* modesetting */ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, const char *reason, u8 pipe_mask); diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 7715fc329057aa..0a711114ff2b4c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -237,7 +237,7 @@ struct intel_vbt_data { struct sdvo_device_mapping { u8 initialized; u8 dvo_port; - u8 slave_addr; + u8 target_addr; u8 dvo_wiring; u8 i2c_pin; u8 ddc_pin; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 91757fed9c6d28..f5f618199d3980 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -36,6 +36,7 @@ #include "intel_pps.h" #include "intel_psr.h" #include "intel_psr_regs.h" +#include "intel_vdsc.h" #include "intel_wm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -492,7 +493,7 @@ static void crtc_updates_info(struct seq_file *m, seq_printf(m, "%sMax update: %lluns\n", hdr, crtc->debug.vbl.max); seq_printf(m, "%sAverage update: %lluns\n", - hdr, div64_u64(crtc->debug.vbl.sum, count)); + hdr, div64_u64(crtc->debug.vbl.sum, count)); seq_printf(m, "%sOverruns > %uus: %u\n", hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); } @@ -551,6 +552,7 @@ static void crtc_updates_add(struct intel_crtc *crtc) static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_printer p = drm_seq_file_printer(m); const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_encoder *encoder; @@ -581,6 +583,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) crtc_state->joiner_pipes, intel_crtc_is_joiner_secondary(crtc_state) ? "slave" : "master"); + intel_vdsc_state_dump(&p, 1, crtc_state); + for_each_intel_encoder_mask(&dev_priv->drm, encoder, crtc_state->uapi.encoder_mask) intel_encoder_info(m, crtc, encoder); @@ -1008,7 +1012,7 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; } - intel_fbc_reset_underrun(dev_priv); + intel_fbc_reset_underrun(&dev_priv->display); return cnt; } @@ -1045,6 +1049,7 @@ static const struct { void intel_display_debugfs_register(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct drm_minor *minor = i915->drm.primary; int i; @@ -1060,15 +1065,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); - intel_bios_debugfs_register(i915); + intel_bios_debugfs_register(display); intel_cdclk_debugfs_register(i915); intel_dmc_debugfs_register(i915); - intel_fbc_debugfs_register(i915); + intel_fbc_debugfs_register(display); intel_hpd_debugfs_register(i915); - intel_opregion_debugfs_register(i915); - intel_psr_debugfs_register(i915); + intel_opregion_debugfs_register(display); + intel_psr_debugfs_register(display); intel_wm_debugfs_register(i915); - intel_display_debugfs_params(i915); + intel_display_debugfs_params(display); } static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c index f3571874855551..ec3ed29a83c935 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c @@ -151,13 +151,13 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode, } while (0) /* add a subdirectory with files for each intel display param */ -void intel_display_debugfs_params(struct drm_i915_private *i915) +void intel_display_debugfs_params(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; struct dentry *dir; char dirname[16]; - snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name); + snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name); dir = debugfs_lookup(dirname, minor->debugfs_root); if (!dir) dir = debugfs_create_dir(dirname, minor->debugfs_root); @@ -171,7 +171,7 @@ void intel_display_debugfs_params(struct drm_i915_private *i915) */ #define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \ - dir, #x, mode, &i915->display.params.x); + dir, #x, mode, &display->params.x); INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER); #undef REGISTER } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h index 1e9945a4044c07..a1120915a5a8ef 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h @@ -6,8 +6,8 @@ #ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__ #define __INTEL_DISPLAY_DEBUGFS_PARAMS__ -struct drm_i915_private; +struct intel_display; -void intel_display_debugfs_params(struct drm_i915_private *i915); +void intel_display_debugfs_params(struct intel_display *display); #endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index dd7dce4b0e7a1e..1b46ba98558099 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -16,14 +16,25 @@ #include "intel_display_power.h" #include "intel_display_reg_defs.h" #include "intel_fbc.h" +#include "intel_step.h" __diag_push(); __diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info"); +struct stepping_desc { + const enum intel_step *map; /* revid to step map */ + size_t size; /* map size */ +}; + +#define STEP_INFO(_map) \ + .step_info.map = _map, \ + .step_info.size = ARRAY_SIZE(_map) + struct subplatform_desc { enum intel_display_subplatform subplatform; const char *name; const u16 *pciidlist; + struct stepping_desc step_info; }; struct platform_desc { @@ -31,6 +42,7 @@ struct platform_desc { const char *name; const struct subplatform_desc *subplatforms; const struct intel_display_device_info *info; /* NULL for GMD ID */ + struct stepping_desc step_info; }; #define PLATFORM(_platform) \ @@ -610,6 +622,13 @@ static const u16 skl_ulx_ids[] = { 0 }; +static const enum intel_step skl_steppings[] = { + [0x6] = STEP_G0, + [0x7] = STEP_H0, + [0x9] = STEP_J0, + [0xA] = STEP_I1, +}; + static const struct platform_desc skl_desc = { PLATFORM(SKYLAKE), .subplatforms = (const struct subplatform_desc[]) { @@ -618,6 +637,7 @@ static const struct platform_desc skl_desc = { {}, }, .info = &skl_display, + STEP_INFO(skl_steppings), }; static const u16 kbl_ult_ids[] = { @@ -634,6 +654,16 @@ static const u16 kbl_ulx_ids[] = { 0 }; +static const enum intel_step kbl_steppings[] = { + [1] = STEP_B0, + [2] = STEP_B0, + [3] = STEP_B0, + [4] = STEP_C0, + [5] = STEP_B1, + [6] = STEP_B1, + [7] = STEP_C0, +}; + static const struct platform_desc kbl_desc = { PLATFORM(KABYLAKE), .subplatforms = (const struct subplatform_desc[]) { @@ -642,6 +672,7 @@ static const struct platform_desc kbl_desc = { {}, }, .info = &skl_display, + STEP_INFO(kbl_steppings), }; static const u16 cfl_ult_ids[] = { @@ -706,6 +737,13 @@ static const struct platform_desc cml_desc = { BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) +static const enum intel_step bxt_steppings[] = { + [0xA] = STEP_C0, + [0xB] = STEP_C0, + [0xC] = STEP_D0, + [0xD] = STEP_E0, +}; + static const struct platform_desc bxt_desc = { PLATFORM(BROXTON), .info = &(const struct intel_display_device_info) { @@ -714,6 +752,11 @@ static const struct platform_desc bxt_desc = { .__runtime_defaults.ip.ver = 9, }, + STEP_INFO(bxt_steppings), +}; + +static const enum intel_step glk_steppings[] = { + [3] = STEP_B0, }; static const struct platform_desc glk_desc = { @@ -725,6 +768,7 @@ static const struct platform_desc glk_desc = { .__runtime_defaults.ip.ver = 10, }, + STEP_INFO(glk_steppings), }; #define ICL_DISPLAY \ @@ -773,6 +817,10 @@ static const u16 icl_port_f_ids[] = { 0 }; +static const enum intel_step icl_steppings[] = { + [7] = STEP_D0, +}; + static const struct platform_desc icl_desc = { PLATFORM(ICELAKE), .subplatforms = (const struct subplatform_desc[]) { @@ -784,6 +832,7 @@ static const struct platform_desc icl_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), }, + STEP_INFO(icl_steppings), }; static const struct intel_display_device_info jsl_ehl_display = { @@ -792,14 +841,21 @@ static const struct intel_display_device_info jsl_ehl_display = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), }; +static const enum intel_step jsl_ehl_steppings[] = { + [0] = STEP_A0, + [1] = STEP_B0, +}; + static const struct platform_desc jsl_desc = { PLATFORM(JASPERLAKE), .info = &jsl_ehl_display, + STEP_INFO(jsl_ehl_steppings), }; static const struct platform_desc ehl_desc = { PLATFORM(ELKHARTLAKE), .info = &jsl_ehl_display, + STEP_INFO(jsl_ehl_steppings), }; #define XE_D_DISPLAY \ @@ -850,10 +906,23 @@ static const u16 tgl_uy_ids[] = { 0 }; +static const enum intel_step tgl_steppings[] = { + [0] = STEP_B0, + [1] = STEP_D0, +}; + +static const enum intel_step tgl_uy_steppings[] = { + [0] = STEP_A0, + [1] = STEP_C0, + [2] = STEP_C0, + [3] = STEP_D0, +}; + static const struct platform_desc tgl_desc = { PLATFORM(TIGERLAKE), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids }, + { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids, + STEP_INFO(tgl_uy_steppings) }, {}, }, .info = &(const struct intel_display_device_info) { @@ -866,6 +935,12 @@ static const struct platform_desc tgl_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6), }, + STEP_INFO(tgl_steppings), +}; + +static const enum intel_step dg1_steppings[] = { + [0] = STEP_A0, + [1] = STEP_B0, }; static const struct platform_desc dg1_desc = { @@ -876,6 +951,13 @@ static const struct platform_desc dg1_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2), }, + STEP_INFO(dg1_steppings), +}; + +static const enum intel_step rkl_steppings[] = { + [0] = STEP_A0, + [1] = STEP_B0, + [4] = STEP_C0, }; static const struct platform_desc rkl_desc = { @@ -892,6 +974,7 @@ static const struct platform_desc rkl_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2), }, + STEP_INFO(rkl_steppings), }; static const u16 adls_rpls_ids[] = { @@ -899,10 +982,24 @@ static const u16 adls_rpls_ids[] = { 0 }; +static const enum intel_step adl_s_steppings[] = { + [0x0] = STEP_A0, + [0x1] = STEP_A2, + [0x4] = STEP_B0, + [0x8] = STEP_B0, + [0xC] = STEP_C0, +}; + +static const enum intel_step adl_s_rpl_s_steppings[] = { + [0x4] = STEP_D0, + [0xC] = STEP_C0, +}; + static const struct platform_desc adl_s_desc = { PLATFORM(ALDERLAKE_S), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids }, + { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids, + STEP_INFO(adl_s_rpl_s_steppings) }, {}, }, .info = &(const struct intel_display_device_info) { @@ -913,6 +1010,7 @@ static const struct platform_desc adl_s_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }, + STEP_INFO(adl_s_steppings), }; #define XE_LPD_FEATURES \ @@ -986,15 +1084,34 @@ static const u16 adlp_rplp_ids[] = { 0 }; +static const enum intel_step adl_p_steppings[] = { + [0x0] = STEP_A0, + [0x4] = STEP_B0, + [0x8] = STEP_C0, + [0xC] = STEP_D0, +}; + +static const enum intel_step adl_p_adl_n_steppings[] = { + [0x0] = STEP_D0, +}; + +static const enum intel_step adl_p_rpl_pu_steppings[] = { + [0x4] = STEP_E0, +}; + static const struct platform_desc adl_p_desc = { PLATFORM(ALDERLAKE_P), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids }, + { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids, + STEP_INFO(adl_p_adl_n_steppings) }, + { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids, + STEP_INFO(adl_p_rpl_pu_steppings) }, + { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids, + STEP_INFO(adl_p_rpl_pu_steppings) }, {}, }, .info = &xe_lpd_display, + STEP_INFO(adl_p_steppings), }; static const struct intel_display_device_info xe_hpd_display = { @@ -1023,12 +1140,33 @@ static const u16 dg2_g12_ids[] = { 0 }; +static const enum intel_step dg2_g10_steppings[] = { + [0x0] = STEP_A0, + [0x1] = STEP_A0, + [0x4] = STEP_B0, + [0x8] = STEP_C0, +}; + +static const enum intel_step dg2_g11_steppings[] = { + [0x0] = STEP_B0, + [0x4] = STEP_C0, + [0x5] = STEP_C0, +}; + +static const enum intel_step dg2_g12_steppings[] = { + [0x0] = STEP_C0, + [0x1] = STEP_C0, +}; + static const struct platform_desc dg2_desc = { PLATFORM(DG2), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids }, - { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids }, - { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids }, + { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids, + STEP_INFO(dg2_g10_steppings) }, + { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids, + STEP_INFO(dg2_g11_steppings) }, + { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids, + STEP_INFO(dg2_g12_steppings) }, {}, }, .info = &xe_hpd_display, @@ -1261,13 +1399,66 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc) return NULL; } +static enum intel_step get_pre_gmdid_step(struct intel_display *display, + const struct stepping_desc *main, + const struct stepping_desc *sub) +{ + struct pci_dev *pdev = to_pci_dev(display->drm->dev); + const enum intel_step *map = main->map; + int size = main->size; + int revision = pdev->revision; + enum intel_step step; + + /* subplatform stepping info trumps main platform info */ + if (sub && sub->map && sub->size) { + map = sub->map; + size = sub->size; + } + + /* not all platforms define steppings, and it's fine */ + if (!map || !size) + return STEP_NONE; + + if (revision < size && map[revision] != STEP_NONE) { + step = map[revision]; + } else { + drm_warn(display->drm, "Unknown revision 0x%02x\n", revision); + + /* + * If we hit a gap in the revision to step map, use the information + * for the next revision. + * + * This may be wrong in all sorts of ways, especially if the + * steppings in the array are not monotonically increasing, but + * it's better than defaulting to 0. + */ + while (revision < size && map[revision] == STEP_NONE) + revision++; + + if (revision < size) { + drm_dbg_kms(display->drm, "Using display stepping for revision 0x%02x\n", + revision); + step = map[revision]; + } else { + drm_dbg_kms(display->drm, "Using future display stepping\n"); + step = STEP_FUTURE; + } + } + + drm_WARN_ON(display->drm, step == STEP_NONE); + + return step; +} + void intel_display_device_probe(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); const struct intel_display_device_info *info; struct intel_display_ip_ver ip_ver = {}; const struct platform_desc *desc; const struct subplatform_desc *subdesc; + enum intel_step step; /* Add drm device backpointer as early as possible. */ i915->display.drm = &i915->drm; @@ -1307,13 +1498,25 @@ void intel_display_device_probe(struct drm_i915_private *i915) DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform; } - if (ip_ver.ver || ip_ver.rel || ip_ver.step) + if (ip_ver.ver || ip_ver.rel || ip_ver.step) { DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver; + step = STEP_A0 + ip_ver.step; + if (step > STEP_FUTURE) { + drm_dbg_kms(display->drm, "Using future display stepping\n"); + step = STEP_FUTURE; + } + } else { + step = get_pre_gmdid_step(display, &desc->step_info, + subdesc ? &subdesc->step_info : NULL); + } - drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u\n", + DISPLAY_RUNTIME_INFO(i915)->step = step; + + drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n", desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "", pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver, - DISPLAY_RUNTIME_INFO(i915)->ip.rel); + DISPLAY_RUNTIME_INFO(i915)->ip.rel, + step != STEP_NONE ? intel_step_name(step) : "N/A"); return; @@ -1474,6 +1677,9 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 } } + display_runtime->rawclk_freq = intel_read_rawclk(i915); + drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq); + return; display_fused_off: @@ -1509,6 +1715,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf drm_printf(p, "display version: %u\n", runtime->ip.ver); + drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step)); + #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG @@ -1516,6 +1724,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); + + drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } /* @@ -1529,9 +1739,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf */ bool intel_display_device_enabled(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + /* Only valid when HAS_DISPLAY() is true */ - drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915)); + drm_WARN_ON(display->drm, !HAS_DISPLAY(display)); - return !i915->display.params.disable_display && - !intel_opregion_headless_sku(i915); + return !display->params.disable_display && + !intel_opregion_headless_sku(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 13453ea4daea09..dfb0c8bf5ca25f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -161,7 +161,7 @@ enum intel_display_subplatform { #define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv) /* Check that device has a display IP version within the specific range. */ -#define IS_DISPLAY_IP_RANGE(__i915, from, until) ( \ +#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \ BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ (DISPLAY_VER_FULL(__i915) >= (from) && \ DISPLAY_VER_FULL(__i915) <= (until))) @@ -175,14 +175,14 @@ enum intel_display_subplatform { * hardware fix is present and the software workaround is no longer necessary. * E.g., * - * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2) - * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER) + * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2) + * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER) * * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper * stepping bound for the specified IP version. */ -#define IS_DISPLAY_IP_STEP(__i915, ipver, from, until) \ - (IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \ +#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \ + (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__i915), (from), (until))) #define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info) @@ -194,6 +194,12 @@ enum intel_display_subplatform { #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) +#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step) + +#define IS_DISPLAY_STEP(__i915, since, until) \ + (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ + INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) + struct intel_display_runtime_info { enum intel_display_platform platform; enum intel_display_subplatform subplatform; @@ -201,8 +207,11 @@ struct intel_display_runtime_info { struct intel_display_ip_ver { u16 ver; u16 rel; - u16 step; + u16 step; /* hardware */ } ip; + int step; /* symbolic */ + + u32 rawclk_freq; u8 pipe_mask; u8 cpu_transcoder_mask; diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 794b4af380558d..069426d9260b19 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -217,7 +217,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) return ret; } - intel_bios_init(i915); + intel_bios_init(display); ret = intel_vga_register(i915); if (ret) @@ -265,7 +265,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) intel_init_quirks(display); - intel_fbc_init(i915); + intel_fbc_init(display); return 0; @@ -275,7 +275,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) cleanup_vga: intel_vga_unregister(i915); cleanup_bios: - intel_bios_driver_remove(i915); + intel_bios_driver_remove(display); return ret; } @@ -416,7 +416,8 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915) /* part #2: call after irq install, but before gem init */ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) { - struct drm_device *dev = &i915->drm; + struct intel_display *display = &i915->display; + struct drm_device *dev = display->drm; enum pipe pipe; int ret; @@ -427,7 +428,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_panel_sanitize_ssc(i915); - intel_pps_setup(i915); + intel_pps_setup(display); intel_gmbus_setup(i915); @@ -452,13 +453,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) if (i915->display.cdclk.max_cdclk_freq == 0) intel_update_max_cdclk(i915); - intel_hti_init(i915); + intel_hti_init(display); /* Just disable it once at startup */ intel_vga_disable(i915); intel_setup_outputs(i915); - ret = intel_dp_tunnel_mgr_init(i915); + ret = intel_dp_tunnel_mgr_init(display); if (ret) goto err_hdcp; @@ -466,7 +467,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); - intel_acpi_assign_connector_fwnodes(i915); + intel_acpi_assign_connector_fwnodes(display); drm_modeset_unlock_all(dev); intel_initial_plane_config(i915); @@ -526,6 +527,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915) void intel_display_driver_register(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "i915 display info:"); @@ -533,8 +535,8 @@ void intel_display_driver_register(struct drm_i915_private *i915) return; /* Must be done after probing outputs */ - intel_opregion_register(i915); - intel_acpi_video_register(i915); + intel_opregion_register(display); + intel_acpi_video_register(display); intel_audio_init(i915); @@ -578,6 +580,8 @@ void intel_display_driver_remove(struct drm_i915_private *i915) /* part #2: call after irq uninstall */ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (!HAS_DISPLAY(i915)) return; @@ -598,7 +602,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_mode_config_cleanup(i915); - intel_dp_tunnel_mgr_cleanup(i915); + intel_dp_tunnel_mgr_cleanup(display); intel_overlay_cleanup(i915); @@ -607,23 +611,27 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) destroy_workqueue(i915->display.wq.flip); destroy_workqueue(i915->display.wq.modeset); - intel_fbc_cleanup(i915); + intel_fbc_cleanup(&i915->display); } /* part #3: call after gem init */ void intel_display_driver_remove_nogem(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); intel_vga_unregister(i915); - intel_bios_driver_remove(i915); + intel_bios_driver_remove(display); } void intel_display_driver_unregister(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (!HAS_DISPLAY(i915)) return; @@ -643,7 +651,7 @@ void intel_display_driver_unregister(struct drm_i915_private *i915) drm_atomic_helper_shutdown(&i915->drm); acpi_video_unregister(); - intel_opregion_unregister(i915); + intel_opregion_unregister(display); } /* diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 5219ba295c74ab..73369847ed66f9 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -14,6 +14,7 @@ #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dp_aux.h" +#include "intel_dsb.h" #include "intel_fdi_regs.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" @@ -270,10 +271,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, static bool i915_has_asle(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) return false; - return intel_opregion_asle_present(i915); + return intel_opregion_asle_present(display); } /** @@ -497,6 +500,8 @@ void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { + struct intel_display *display = &dev_priv->display; + bool blc_event = false; enum pipe pipe; @@ -515,12 +520,13 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(display); } void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { + struct intel_display *display = &dev_priv->display; bool blc_event = false; enum pipe pipe; @@ -539,7 +545,7 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(display); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) intel_gmbus_irq_handler(dev_priv); @@ -570,6 +576,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; @@ -583,7 +590,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK) - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS) intel_gmbus_irq_handler(dev_priv); @@ -658,6 +665,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; @@ -671,7 +679,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK_CPT) - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS_CPT) intel_gmbus_irq_handler(dev_priv); @@ -695,6 +703,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; @@ -702,10 +711,10 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) ilk_hpd_irq_handler(dev_priv, hotplug_trigger); if (de_iir & DE_AUX_CHANNEL_A) - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(display); if (de_iir & DE_POISON) drm_err(&dev_priv->drm, "Poison interrupt\n"); @@ -743,6 +752,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; @@ -767,10 +777,10 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) } if (de_iir & DE_AUX_CHANNEL_A_IVB) - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(display); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) @@ -894,6 +904,7 @@ static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) static void gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) { + struct intel_display *display = &dev_priv->display; bool found = false; if (DISPLAY_VER(dev_priv) >= 14) { @@ -906,8 +917,15 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_pmdemand_irq_handler(dev_priv); found = true; } + + if (iir & XELPDP_RM_TIMEOUT) { + u32 val = intel_uncore_read(&dev_priv->uncore, + RM_TIMEOUT_REG_CAPTURE); + drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val); + found = true; + } } else if (iir & GEN8_DE_MISC_GSE) { - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(display); found = true; } @@ -1049,6 +1067,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { + struct intel_display *display = &dev_priv->display; u32 iir; enum pipe pipe; @@ -1084,7 +1103,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); if (iir & gen8_de_port_aux_mask(dev_priv)) { - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); found = true; } @@ -1149,6 +1168,17 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) flip_done_handler(dev_priv, pipe); + if (HAS_DSB(dev_priv)) { + if (iir & GEN12_DSB_INT(INTEL_DSB_0)) + intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0); + + if (iir & GEN12_DSB_INT(INTEL_DSB_1)) + intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1); + + if (iir & GEN12_DSB_INT(INTEL_DSB_2)) + intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2); + } + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -1211,8 +1241,10 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) { + struct intel_display *display = &i915->display; + if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(i915); + intel_opregion_asle_intr(display); } void gen11_display_irq_handler(struct drm_i915_private *i915) @@ -1680,6 +1712,7 @@ static void icp_irq_postinstall(struct drm_i915_private *i915); void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | @@ -1710,14 +1743,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (DISPLAY_VER(dev_priv) >= 14) { de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | - XELPDP_PMDEMAND_RSP; + XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; } else if (DISPLAY_VER(dev_priv) >= 11) { enum port port; - if (intel_bios_is_dsi_present(dev_priv, &port)) + if (intel_bios_is_dsi_present(display, &port)) de_port_masked |= DSI0_TE | DSI1_TE; } + if (HAS_DSB(dev_priv)) + de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | + GEN12_DSB_INT(INTEL_DSB_1) | + GEN12_DSB_INT(INTEL_DSB_2); + de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | gen8_de_pipe_underrun_mask(dev_priv) | diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index e82bd72d32faf4..1a45d300b6f0fa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -173,14 +173,14 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name, /** * intel_display_params_dump - dump intel display modparams - * @i915: i915 device + * @display: display device * @p: the &drm_printer * * Pretty printer for i915 modparams. */ -void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p) +void intel_display_params_dump(struct intel_display *display, struct drm_printer *p) { -#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x); +#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x); INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT); #undef PRINT } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index 48c29c55c939bc..da8dc943234b77 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -9,7 +9,7 @@ #include struct drm_printer; -struct drm_i915_private; +struct intel_display; /* * Invoke param, a function-like macro, for each intel display param, with @@ -56,7 +56,7 @@ struct intel_display_params { }; #undef MEMBER -void intel_display_params_dump(struct drm_i915_private *i915, +void intel_display_params_dump(struct intel_display *display, struct drm_printer *p); void intel_display_params_copy(struct intel_display_params *dest); void intel_display_params_free(struct intel_display_params *params); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index e288a1b21d7e60..ef2fdbf973460d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -36,7 +36,7 @@ for_each_power_well_reverse(__dev_priv, __power_well) \ for_each_if(test_bit((__domain), (__power_well)->domains.bits)) -const char * +static const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { switch (domain) { @@ -198,20 +198,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) } } -/** - * __intel_display_power_is_enabled - unlocked check for a power domain - * @dev_priv: i915 device instance - * @domain: power domain to check - * - * This is the unlocked version of intel_display_power_is_enabled() and should - * only be used from error capture and recovery code where deadlocks are - * possible. - * - * Returns: - * True when the power domain is enabled, false otherwise. - */ -bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_well *power_well; bool is_enabled; @@ -1696,7 +1684,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_dmc_load_program(dev_priv); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ - if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0))) + if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0))) intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); @@ -1704,6 +1692,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, /* Wa_14011503030:xelpd */ if (DISPLAY_VER(dev_priv) == 13) intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); + + /* Wa_15013987218 */ + if (DISPLAY_VER(dev_priv) == 20) { + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE); + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0); + } } static void icl_display_core_uninit(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index d6c2a5846bdc91..425452c5a469b1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -183,13 +183,8 @@ void intel_display_power_resume(struct drm_i915_private *i915); void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, u32 state); -const char * -intel_display_power_domain_str(enum intel_display_power_domain domain); - bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); intel_wakeref_t diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 919f712fef131c..46e9eff12c2344 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -861,6 +861,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) void bxt_enable_dc9(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + assert_can_enable_dc9(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); @@ -870,19 +872,21 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv) * because PPS registers are always on. */ if (!HAS_PCH_SPLIT(dev_priv)) - intel_pps_reset_all(dev_priv); + intel_pps_reset_all(display); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); } void bxt_disable_dc9(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + assert_can_disable_dc9(dev_priv); drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - intel_pps_unlock_regs_wa(dev_priv); + intel_pps_unlock_regs_wa(display); } static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1176,14 +1180,15 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); intel_de_write(dev_priv, CBR1_VLV, 0); - drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); + drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0); intel_de_write(dev_priv, RAWCLK_FREQ_VLV, - DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, + DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq, 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_encoder *encoder; enum pipe pipe; @@ -1229,11 +1234,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_vga_redisable_power_on(dev_priv); - intel_pps_unlock_regs_wa(dev_priv); + intel_pps_unlock_regs_wa(display); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + spin_lock_irq(&dev_priv->irq_lock); valleyview_disable_display_irqs(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -1241,7 +1248,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) /* make sure we're done processing display irqs */ intel_synchronize_irq(dev_priv); - intel_pps_reset_all(dev_priv); + intel_pps_reset_all(display); /* Prevent us from re-enabling polling on accident in late suspend */ if (!dev_priv->drm.dev->power.is_suspended) diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c index c2c347b22448ac..49e2e650ebcd1a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reset.c +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -83,7 +83,8 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv) void intel_display_reset_finish(struct drm_i915_private *i915) { - struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx; + struct intel_display *display = &i915->display; + struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx; struct drm_atomic_state *state; int ret; @@ -94,7 +95,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915) if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags)) return; - state = fetch_and_zero(&i915->display.restore.modeset_state); + state = fetch_and_zero(&display->restore.modeset_state); if (!state) goto unlock; @@ -112,7 +113,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915) * The display has been reset as well, * so need a full re-initialization. */ - intel_pps_unlock_regs_wa(i915); + intel_pps_unlock_regs_wa(display); intel_display_driver_init_hw(i915); intel_clock_gating_init(i915); intel_hpd_init(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index f9d3cc3c342bb6..f29e5dc3db910c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1396,8 +1396,8 @@ struct intel_crtc_state { /* Only valid on TGL+ */ enum transcoder mst_master_transcoder; - /* For DSB related info */ - struct intel_dsb *dsb; + /* For DSB based color LUT updates */ + struct intel_dsb *dsb_color_vblank, *dsb_color_commit; u32 psr2_man_track_ctl; @@ -1754,6 +1754,7 @@ struct intel_dp { u8 lane_count; u8 sink_count; bool link_trained; + bool needs_modeset_retry; bool use_max_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; @@ -1777,10 +1778,30 @@ struct intel_dp { int common_rates[DP_MAX_SUPPORTED_RATES]; struct { /* TODO: move the rest of link specific fields to here */ + /* common rate,lane_count configs in bw order */ + int num_configs; +#define INTEL_DP_MAX_LANE_COUNT 4 +#define INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS (ilog2(INTEL_DP_MAX_LANE_COUNT) + 1) +#define INTEL_DP_LANE_COUNT_EXP_BITS order_base_2(INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS) +#define INTEL_DP_LINK_RATE_IDX_BITS (BITS_PER_TYPE(u8) - INTEL_DP_LANE_COUNT_EXP_BITS) +#define INTEL_DP_MAX_LINK_CONFIGS (DP_MAX_SUPPORTED_RATES * \ + INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS) + struct intel_dp_link_config { + u8 link_rate_idx:INTEL_DP_LINK_RATE_IDX_BITS; + u8 lane_count_exp:INTEL_DP_LANE_COUNT_EXP_BITS; + } configs[INTEL_DP_MAX_LINK_CONFIGS]; /* Max lane count for the current link */ int max_lane_count; /* Max rate for the current link */ int max_rate; + /* + * Link parameters for which the MST topology was probed. + * Tracking these ensures that the MST path resources are + * re-enumerated whenever the link is retrained with new link + * parameters, as required by the DP standard. + */ + int mst_probed_lane_count; + int mst_probed_rate; int force_lane_count; int force_rate; bool retrain_disabled; @@ -1806,6 +1827,7 @@ struct intel_dp { /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + bool as_sdp_supported; struct drm_dp_tunnel *tunnel; bool tunnel_suspended:1; @@ -2063,8 +2085,6 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } -#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) - static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { @@ -2182,35 +2202,18 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; } -static inline int to_bpp_int(int bpp_x16) -{ - return bpp_x16 >> 4; -} - -static inline int to_bpp_frac(int bpp_x16) -{ - return bpp_x16 & 0xf; -} - -#define BPP_X16_FMT "%d.%04d" -#define BPP_X16_ARGS(bpp_x16) to_bpp_int(bpp_x16), (to_bpp_frac(bpp_x16) * 625) - -static inline int to_bpp_int_roundup(int bpp_x16) -{ - return (bpp_x16 + 0xf) >> 4; -} - -static inline int to_bpp_x16(int bpp) -{ - return bpp << 4; -} - /* * Conversion functions/macros from various pointer types to struct * intel_display pointer. */ #define __drm_device_to_intel_display(p) \ - (&to_i915(p)->display) + ((p) ? &to_i915(p)->display : NULL) +#define __device_to_intel_display(p) \ + __drm_device_to_intel_display(dev_get_drvdata(p)) +#define __pci_dev_to_intel_display(p) \ + __drm_device_to_intel_display(pci_get_drvdata(p)) +#define __intel_atomic_state_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) #define __intel_connector_to_intel_display(p) \ __drm_device_to_intel_display((p)->base.dev) #define __intel_crtc_to_intel_display(p) \ @@ -2234,6 +2237,9 @@ static inline int to_bpp_x16(int bpp) #define to_intel_display(p) \ _Generic(*p, \ __assoc(drm_device, p), \ + __assoc(device, p), \ + __assoc(pci_dev, p), \ + __assoc(intel_atomic_state, p), \ __assoc(intel_connector, p), \ __assoc(intel_crtc, p), \ __assoc(intel_crtc_state, p), \ diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h index 63201d09852c56..be644ab6ae0061 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.h +++ b/drivers/gpu/drm/i915/display/intel_display_wa.h @@ -6,8 +6,16 @@ #ifndef __INTEL_DISPLAY_WA_H__ #define __INTEL_DISPLAY_WA_H__ +#include + struct drm_i915_private; void intel_display_wa_apply(struct drm_i915_private *i915); +#ifdef I915 +static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; } +#else +bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915); +#endif + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 73977b173898c4..7c756d5ba2a221 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -391,7 +391,7 @@ static const struct stepping_info * intel_get_stepping_info(struct drm_i915_private *i915, struct stepping_info *si) { - const char *step_name = intel_display_step_name(i915); + const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915)); si->stepping = step_name[0]; si->substepping = step_name[1]; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ebe7fe5417ae40..90fa73575feb13 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include "g4x_dp.h" @@ -88,6 +90,8 @@ #include "intel_vrr.h" #include "intel_crtc_state_dump.h" +#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) + /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 @@ -130,14 +134,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return dig_port->base.type == INTEL_OUTPUT_EDP; } -bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - return HAS_AS_SDP(i915) && - drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); -} - static void intel_dp_unset_edid(struct intel_dp *intel_dp); /* Is link rate UHBR and thus 128b/132b? */ @@ -535,6 +531,10 @@ static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { /* The values must be in increasing order */ + static const int bmg_rates[] = { + 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, + 810000, 1000000, 1350000, + }; static const int mtl_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 810000, 1000000, 2000000, @@ -565,8 +565,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) intel_dp->source_rates || intel_dp->num_source_rates); if (DISPLAY_VER(dev_priv) >= 14) { - source_rates = mtl_rates; - size = ARRAY_SIZE(mtl_rates); + if (IS_BATTLEMAGE(dev_priv)) { + source_rates = bmg_rates; + size = ARRAY_SIZE(bmg_rates); + } else { + source_rates = mtl_rates; + size = ARRAY_SIZE(mtl_rates); + } max_rate = mtl_max_source_rate(intel_dp); } else if (DISPLAY_VER(dev_priv) >= 11) { source_rates = icl_rates; @@ -643,6 +648,106 @@ int intel_dp_rate_index(const int *rates, int len, int rate) return -1; } +static int intel_dp_link_config_rate(struct intel_dp *intel_dp, + const struct intel_dp_link_config *lc) +{ + return intel_dp_common_rate(intel_dp, lc->link_rate_idx); +} + +static int intel_dp_link_config_lane_count(const struct intel_dp_link_config *lc) +{ + return 1 << lc->lane_count_exp; +} + +static int intel_dp_link_config_bw(struct intel_dp *intel_dp, + const struct intel_dp_link_config *lc) +{ + return drm_dp_max_dprx_data_rate(intel_dp_link_config_rate(intel_dp, lc), + intel_dp_link_config_lane_count(lc)); +} + +static int link_config_cmp_by_bw(const void *a, const void *b, const void *p) +{ + struct intel_dp *intel_dp = (struct intel_dp *)p; /* remove const */ + const struct intel_dp_link_config *lc_a = a; + const struct intel_dp_link_config *lc_b = b; + int bw_a = intel_dp_link_config_bw(intel_dp, lc_a); + int bw_b = intel_dp_link_config_bw(intel_dp, lc_b); + + if (bw_a != bw_b) + return bw_a - bw_b; + + return intel_dp_link_config_rate(intel_dp, lc_a) - + intel_dp_link_config_rate(intel_dp, lc_b); +} + +static void intel_dp_link_config_init(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_dp_link_config *lc; + int num_common_lane_configs; + int i; + int j; + + if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp)))) + return; + + num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1; + + if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs > + ARRAY_SIZE(intel_dp->link.configs))) + return; + + intel_dp->link.num_configs = intel_dp->num_common_rates * num_common_lane_configs; + + lc = &intel_dp->link.configs[0]; + for (i = 0; i < intel_dp->num_common_rates; i++) { + for (j = 0; j < num_common_lane_configs; j++) { + lc->lane_count_exp = j; + lc->link_rate_idx = i; + + lc++; + } + } + + sort_r(intel_dp->link.configs, intel_dp->link.num_configs, + sizeof(intel_dp->link.configs[0]), + link_config_cmp_by_bw, NULL, + intel_dp); +} + +void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + const struct intel_dp_link_config *lc; + + if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs)) + idx = 0; + + lc = &intel_dp->link.configs[idx]; + + *link_rate = intel_dp_link_config_rate(intel_dp, lc); + *lane_count = intel_dp_link_config_lane_count(lc); +} + +int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count) +{ + int link_rate_idx = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, + link_rate); + int lane_count_exp = ilog2(lane_count); + int i; + + for (i = 0; i < intel_dp->link.num_configs; i++) { + const struct intel_dp_link_config *lc = &intel_dp->link.configs[i]; + + if (lc->lane_count_exp == lane_count_exp && + lc->link_rate_idx == link_rate_idx) + return i; + } + + return -1; +} + static void intel_dp_set_common_rates(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -661,6 +766,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) intel_dp->common_rates[0] = 162000; intel_dp->num_common_rates = 1; } + + intel_dp_link_config_init(intel_dp); } static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, @@ -1599,8 +1706,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); int mode_rate, link_rate, link_avail; - for (bpp = to_bpp_int(limits->link.max_bpp_x16); - bpp >= to_bpp_int(limits->link.min_bpp_x16); + for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16); + bpp >= fxp_q4_to_int(limits->link.min_bpp_x16); bpp -= 2 * 3) { int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); @@ -1928,7 +2035,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp, timeslots); if (ret == 0) { pipe_config->dsc.compressed_bpp_x16 = - to_bpp_x16(valid_dsc_bpp[i]); + fxp_q4_from_int(valid_dsc_bpp[i]); return 0; } } @@ -1971,7 +2078,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, compressed_bppx16 >= dsc_min_bpp; compressed_bppx16 -= bppx16_step) { if (intel_dp->force_dsc_fractional_bpp_en && - !to_bpp_frac(compressed_bppx16)) + !fxp_q4_to_frac(compressed_bppx16)) continue; ret = dsc_compute_link_config(intel_dp, pipe_config, @@ -1981,7 +2088,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, if (ret == 0) { pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; if (intel_dp->force_dsc_fractional_bpp_en && - to_bpp_frac(compressed_bppx16)) + fxp_q4_to_frac(compressed_bppx16)) drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); return 0; @@ -2006,7 +2113,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, dsc_src_min_bpp = dsc_src_min_compressed_bpp(); dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); - dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); + dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16)); dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, @@ -2018,7 +2125,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, adjusted_mode->hdisplay, pipe_config->joiner_pipes); dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); - dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); + dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); if (DISPLAY_VER(i915) >= 13) return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, @@ -2168,20 +2275,20 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, dsc_src_min_bpp = dsc_src_min_compressed_bpp(); dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); - dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); + dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16)); dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3); dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; - dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); + dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); /* Compressed BPP should be less than the Input DSC bpp */ dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); pipe_config->dsc.compressed_bpp_x16 = - to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); + fxp_q4_from_int(max(dsc_min_bpp, dsc_max_bpp)); pipe_config->pipe_bpp = pipe_bpp; @@ -2271,17 +2378,17 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, if (ret < 0) { drm_dbg_kms(&dev_priv->drm, "Cannot compute valid DSC parameters for Input Bpp = %d" - "Compressed BPP = " BPP_X16_FMT "\n", + "Compressed BPP = " FXP_Q4_FMT "\n", pipe_config->pipe_bpp, - BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); + FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16)); return ret; } pipe_config->dsc.compression_enable = true; drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " - "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n", + "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n", pipe_config->pipe_bpp, - BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), + FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), pipe_config->dsc.slice_count); return 0; @@ -2313,15 +2420,15 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, int max_link_bpp_x16; max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, - to_bpp_x16(limits->pipe.max_bpp)); + fxp_q4_from_int(limits->pipe.max_bpp)); if (!dsc) { - max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3)); + max_link_bpp_x16 = rounddown(max_link_bpp_x16, fxp_q4_from_int(2 * 3)); - if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp)) + if (max_link_bpp_x16 < fxp_q4_from_int(limits->pipe.min_bpp)) return false; - limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp); + limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp); } else { /* * TODO: set the DSC link limits already here, atm these are @@ -2334,7 +2441,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, limits->link.max_bpp_x16 = max_link_bpp_x16; drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n", + "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n", encoder->base.base.id, encoder->base.name, crtc->base.base.id, crtc->base.name, adjusted_mode->crtc_clock, @@ -2342,7 +2449,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, limits->max_lane_count, limits->max_rate, limits->pipe.max_bpp, - BPP_X16_ARGS(limits->link.max_bpp_x16)); + FXP_Q4_ARGS(limits->link.max_bpp_x16)); return true; } @@ -2394,7 +2501,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpp = crtc_state->dsc.compression_enable ? - to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) : + fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) : crtc_state->pipe_bpp; return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); @@ -2473,10 +2580,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, } drm_dbg_kms(&i915->drm, - "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n", + "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, - BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), + FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), intel_dp_config_required_rate(pipe_config), intel_dp_max_link_data_rate(intel_dp, pipe_config->port_clock, @@ -2626,8 +2733,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - if (!crtc_state->vrr.enable || - !intel_dp_as_sdp_supported(intel_dp)) + if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported) return; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); @@ -2876,7 +2982,6 @@ static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) drm_connector_put(&connector->base); } -/* NOTE: @state is only valid for MST links and can be %NULL for SST. */ void intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, struct intel_encoder *encoder, @@ -2885,18 +2990,19 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, struct intel_connector *connector; struct intel_digital_connector_state *conn_state; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i; + if (intel_dp->needs_modeset_retry) + return; + + intel_dp->needs_modeset_retry = true; + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { intel_dp_queue_modeset_retry_work(intel_dp->attached_connector); return; } - if (drm_WARN_ON(&i915->drm, !state)) - return; - for_each_new_intel_connector_in_state(state, connector, conn_state, i) { if (!conn_state->base.crtc) continue; @@ -2968,8 +3074,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (pipe_config->dsc.compression_enable) link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; else - link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format, - pipe_config->pipe_bpp)); + link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(pipe_config->output_format, + pipe_config->pipe_bpp)); if (intel_dp->mso_link_count) { int n = intel_dp->mso_link_count; @@ -3024,6 +3130,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp->link_trained = false; + intel_dp->needs_modeset_retry = false; intel_dp->link_rate = link_rate; intel_dp->lane_count = lane_count; } @@ -3032,6 +3139,8 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp) { intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp); intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp); + intel_dp->link.mst_probed_lane_count = 0; + intel_dp->link.mst_probed_rate = 0; intel_dp->link.retrain_disabled = false; intel_dp->link.seq_train_failures = 0; } @@ -3367,8 +3476,11 @@ void intel_dp_sync_state(struct intel_encoder *encoder, intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); - if (crtc_state) + if (crtc_state) { intel_dp_reset_link_params(intel_dp); + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); + intel_dp->link_trained = true; + } } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -3435,7 +3547,7 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) { - int bw_gbps[] = {9, 18, 24, 32, 40, 48}; + static const int bw_gbps[] = {9, 18, 24, 32, 40, 48}; int i; for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { @@ -3955,6 +4067,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector drm_dp_is_branch(intel_dp->dpcd)); intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); + intel_dp->colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + /* * Read the eDP display control registers. * @@ -4068,6 +4183,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); + intel_dp->colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + intel_dp_update_sink_caps(intel_dp); } @@ -4158,6 +4276,9 @@ intel_dp_mst_configure(struct intel_dp *intel_dp) intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST; + if (intel_dp->is_mst) + intel_dp_mst_prepare_probe(intel_dp); + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); /* Avoid stale info on the next detect cycle. */ @@ -4387,8 +4508,11 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder, if (!enable && HAS_DSC(dev_priv)) val &= ~VDIP_ENABLE_PPS; - /* When PSR is enabled, this routine doesn't disable VSC DIP */ - if (!crtc_state->has_psr) + /* + * This routine disables VSC DIP if the function is called + * to disable SDP or if it does not have PSR + */ + if (!enable || !crtc_state->has_psr) val &= ~VIDEO_DIP_ENABLE_VSC_HSW; intel_de_write(dev_priv, reg, val); @@ -5081,7 +5205,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) ack[3] |= DP_TUNNELING_IRQ; } - if (!memchr_inv(ack, 0, sizeof(ack))) + if (mem_is_zero(ack, sizeof(ack))) break; if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) @@ -5255,8 +5379,6 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_crtc *crtc; - bool mst_output = false; u8 pipe_mask; int ret; @@ -5285,78 +5407,28 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder, encoder->base.base.id, encoder->base.name, str_yes_no(intel_dp->link.force_retrain)); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { - mst_output = true; - break; - } - - /* Suppress underruns caused by re-training */ - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - if (crtc_state->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, - intel_crtc_pch_transcoder(crtc), false); - } - - /* TODO: use a modeset for SST as well. */ - if (mst_output) { - ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx); - - if (ret && ret != -EDEADLK) - drm_dbg_kms(&dev_priv->drm, - "[ENCODER:%d:%s] link retraining failed: %pe\n", - encoder->base.base.id, encoder->base.name, - ERR_PTR(ret)); - - goto out; - } - - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - intel_dp->link_trained = false; - - intel_dp_check_frl_training(intel_dp); - intel_dp_pcon_dsc_configure(intel_dp, crtc_state); - intel_dp_start_link_train(NULL, intel_dp, crtc_state); - intel_dp_stop_link_train(intel_dp, crtc_state); - break; - } - - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - /* Keep underrun reporting disabled until things are stable */ - intel_crtc_wait_for_next_vblank(crtc); + ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx); + if (ret == -EDEADLK) + return ret; - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - if (crtc_state->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, - intel_crtc_pch_transcoder(crtc), true); - } + intel_dp->link.force_retrain = false; -out: - if (ret != -EDEADLK) - intel_dp->link.force_retrain = false; + if (ret) + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] link retraining failed: %pe\n", + encoder->base.base.id, encoder->base.name, + ERR_PTR(ret)); return ret; } void intel_dp_link_check(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_modeset_acquire_ctx ctx; int ret; intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) ret = intel_dp_retrain_link(encoder, &ctx); - - drm_WARN_ON(&i915->drm, ret); } void intel_dp_check_link_state(struct intel_dp *intel_dp) @@ -5906,6 +5978,15 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn connector); } +static void +intel_dp_detect_sdp_caps(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + intel_dp->as_sdp_supported = HAS_AS_SDP(i915) && + drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); +} + static int intel_dp_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, @@ -5976,13 +6057,15 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_detect_dsc_caps(intel_dp, intel_connector); - intel_dp_mst_configure(intel_dp); + intel_dp_detect_sdp_caps(intel_dp); if (intel_dp->reset_link_params) { intel_dp_reset_link_params(intel_dp); intel_dp->reset_link_params = false; } + intel_dp_mst_configure(intel_dp); + intel_dp_print_rates(intel_dp); if (intel_dp->is_mst) { @@ -6453,8 +6536,9 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) { + struct intel_display *display = &i915->display; const struct intel_bios_encoder_data *devdata = - intel_bios_encoder_data_lookup(i915, port); + intel_bios_encoder_data_lookup(display, port); return _intel_dp_is_port_edp(i915, devdata, port); } @@ -6557,6 +6641,7 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp, static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { + struct intel_display *display = to_intel_display(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct drm_connector *connector = &intel_connector->base; struct drm_display_mode *fixed_mode; @@ -6582,7 +6667,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, return false; } - intel_bios_init_panel_early(dev_priv, &intel_connector->panel, + intel_bios_init_panel_early(display, &intel_connector->panel, encoder->devdata); if (!intel_pps_init(intel_dp)) { @@ -6679,7 +6764,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, drm_edid = ERR_PTR(-ENOENT); } - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, + intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, IS_ERR(drm_edid) ? NULL : drm_edid); intel_panel_add_edid_fixed_modes(intel_connector, true); @@ -6852,9 +6937,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, "HDCP init failed, skipping.\n"); } - intel_dp->colorimetry_support = - intel_dp_get_colorimetry_status(intel_dp); - intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index a0f990a95ecca3..1b9aaddd8c35c4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -85,7 +85,6 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state); bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp); bool intel_dp_is_edp(struct intel_dp *intel_dp); -bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp); bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); bool intel_dp_has_dsc(const struct intel_connector *connector); int intel_dp_link_symbol_size(int rate); @@ -108,6 +107,8 @@ int intel_dp_max_common_rate(struct intel_dp *intel_dp); int intel_dp_max_common_lane_count(struct intel_dp *intel_dp); int intel_dp_common_rate(struct intel_dp *intel_dp, int index); int intel_dp_rate_index(const int *rates, int len, int rate); +int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count); +void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count); void intel_dp_update_sink_caps(struct intel_dp *intel_dp); void intel_dp_reset_link_params(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index be58185a77c019..04a7acd7f73cca 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -18,12 +18,12 @@ #define AUX_CH_NAME_BUFSIZE 6 -static const char *aux_ch_name(struct drm_i915_private *i915, +static const char *aux_ch_name(struct intel_display *display, char *buf, int size, enum aux_ch aux_ch) { - if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD) + if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD) snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D); - else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1) + else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1) snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1); else snprintf(buf, size, "%c", 'A' + aux_ch); @@ -56,17 +56,18 @@ static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) static u32 intel_dp_aux_wait_done(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); const unsigned int timeout_ms = 10; u32 status; int ret; - ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0, + ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, + 0, 2, timeout_ms, &status); if (ret == -ETIMEDOUT) - drm_err(&i915->drm, + drm_err(display->drm, "%s: did not complete or timeout within %ums (status 0x%08x)\n", intel_dp->aux.name, timeout_ms, status); @@ -75,7 +76,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (index) return 0; @@ -84,12 +85,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * The clock divider is based off the hrawclk, and would like to run at * 2MHz. So, take the hrawclk value and divide by 2000 and use that */ - return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000); + return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000); } static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 freq; @@ -102,15 +103,16 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) - freq = i915->display.cdclk.hw.cdclk; + freq = display->cdclk.hw.cdclk; else - freq = RUNTIME_INFO(i915)->rawclk_freq; + freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq; return DIV_ROUND_CLOSEST(freq, 2000); } static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) { @@ -201,8 +203,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, u32 unused) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 ret; /* @@ -227,7 +229,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, * Power request bit is already set during aux power well enable. * Preserve the bit across aux transactions. */ - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; return ret; @@ -239,6 +241,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u8 *recv, int recv_size, u32 aux_send_ctl_flags) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -297,7 +300,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { - status = intel_de_read_notrace(i915, ch_ctl); + status = intel_de_read_notrace(display, ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; msleep(1); @@ -306,10 +309,10 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (try == 3) { - const u32 status = intel_de_read(i915, ch_ctl); + const u32 status = intel_de_read(display, ch_ctl); if (status != intel_dp->aux_busy_last_status) { - drm_WARN(&i915->drm, 1, + drm_WARN(display->drm, 1, "%s: not started (status 0x%08x)\n", intel_dp->aux.name, status); intel_dp->aux_busy_last_status = status; @@ -320,7 +323,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, } /* Only 5 data registers! */ - if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { + if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) { ret = -E2BIG; goto out; } @@ -336,17 +339,17 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) - intel_de_write(i915, ch_data[i >> 2], + intel_de_write(display, ch_data[i >> 2], intel_dp_aux_pack(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ - intel_de_write(i915, ch_ctl, send_ctl); + intel_de_write(display, ch_ctl, send_ctl); status = intel_dp_aux_wait_done(intel_dp); /* Clear done status and any errors */ - intel_de_write(i915, ch_ctl, + intel_de_write(display, ch_ctl, status | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); @@ -370,7 +373,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, } if ((status & DP_AUX_CH_CTL_DONE) == 0) { - drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", + drm_err(display->drm, "%s: not done (status 0x%08x)\n", intel_dp->aux.name, status); ret = -EBUSY; goto out; @@ -382,7 +385,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * not connected. */ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { - drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", + drm_err(display->drm, "%s: receive error (status 0x%08x)\n", intel_dp->aux.name, status); ret = -EIO; goto out; @@ -393,7 +396,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * -- don't fill the kernel log with these */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { - drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", + drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n", intel_dp->aux.name, status); ret = -ETIMEDOUT; goto out; @@ -408,7 +411,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * drm layer takes care for the necessary retries. */ if (recv_bytes == 0 || recv_bytes > 20) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "%s: Forbidden recv_bytes = %d on aux transaction\n", intel_dp->aux.name, recv_bytes); ret = -EBUSY; @@ -419,7 +422,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]), + intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; @@ -468,7 +471,7 @@ static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; u32 flags = intel_dp_aux_xfer_flags(msg); @@ -483,10 +486,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; rxsize = 2; /* 0 or 1 data bytes */ - if (drm_WARN_ON(&i915->drm, txsize > 20)) + if (drm_WARN_ON(display->drm, txsize > 20)) return -E2BIG; - drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); + drm_WARN_ON(display->drm, !msg->buffer != !msg->size); if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); @@ -511,7 +514,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; rxsize = msg->size + 1; - if (drm_WARN_ON(&i915->drm, rxsize > 20)) + if (drm_WARN_ON(display->drm, rxsize > 20)) return -E2BIG; ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, @@ -721,7 +724,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -732,16 +735,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: - return XELPDP_DP_AUX_CH_CTL(i915, aux_ch); + return XELPDP_DP_AUX_CH_CTL(display, aux_ch); default: MISSING_CASE(aux_ch); - return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A); + return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A); } } static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -752,10 +755,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: - return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index); + return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index); default: MISSING_CASE(aux_ch); - return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index); + return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index); } } @@ -769,19 +772,20 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp) void intel_dp_aux_init(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum aux_ch aux_ch = dig_port->aux_ch; char buf[AUX_CH_NAME_BUFSIZE]; - if (DISPLAY_VER(i915) >= 14) { + if (DISPLAY_VER(display) >= 14) { intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; - } else if (DISPLAY_VER(i915) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; intel_dp->aux_ch_data_reg = tgl_aux_data_reg; - } else if (DISPLAY_VER(i915) >= 9) { + } else if (DISPLAY_VER(display) >= 9) { intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; intel_dp->aux_ch_data_reg = skl_aux_data_reg; } else if (HAS_PCH_SPLIT(i915)) { @@ -795,7 +799,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) intel_dp->aux_ch_data_reg = g4x_aux_data_reg; } - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; @@ -804,17 +808,17 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) else intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; else intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; - intel_dp->aux.drm_dev = &i915->drm; + intel_dp->aux.drm_dev = display->drm; drm_dp_aux_init(&intel_dp->aux); /* Failure to allocate our preferred name is not critical */ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", - aux_ch_name(i915, buf, sizeof(buf), aux_ch), + aux_ch_name(display, buf, sizeof(buf), aux_ch), encoder->base.name); intel_dp->aux.transfer = intel_dp_aux_transfer; @@ -823,10 +827,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) static enum aux_ch default_aux_ch(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); /* SKL has DDI E but no AUX E */ - if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) + if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E) return AUX_CH_A; return (enum aux_ch)encoder->port; @@ -836,10 +840,10 @@ static struct intel_encoder * get_encoder_by_aux_ch(struct intel_encoder *encoder, enum aux_ch aux_ch) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_encoder *other; - for_each_intel_encoder(&i915->drm, other) { + for_each_intel_encoder(display->drm, other) { if (other == encoder) continue; @@ -855,7 +859,7 @@ get_encoder_by_aux_ch(struct intel_encoder *encoder, enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_encoder *other; const char *source; enum aux_ch aux_ch; @@ -876,23 +880,23 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) other = get_encoder_by_aux_ch(encoder, aux_ch); if (other) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name, - aux_ch_name(i915, buf, sizeof(buf), aux_ch), + aux_ch_name(display, buf, sizeof(buf), aux_ch), other->base.base.id, other->base.name); return AUX_CH_NONE; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Using AUX CH %s (%s)\n", encoder->base.base.id, encoder->base.name, - aux_ch_name(i915, buf, sizeof(buf), aux_ch), source); + aux_ch_name(display, buf, sizeof(buf), aux_ch), source); return aux_ch; } -void intel_dp_aux_irq_handler(struct drm_i915_private *i915) +void intel_dp_aux_irq_handler(struct intel_display *display) { - wake_up_all(&i915->display.gmbus.wait_queue); + wake_up_all(&display->gmbus.wait_queue); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 593f58fafab716..90ee1c5fae28d0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -9,7 +9,7 @@ #include enum aux_ch; -struct drm_i915_private; +struct intel_display; struct intel_dp; struct intel_encoder; @@ -18,7 +18,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp); enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); -void intel_dp_aux_irq_handler(struct drm_i915_private *i915); +void intel_dp_aux_irq_handler(struct intel_display *display); u32 intel_dp_aux_pack(const u8 *src, int src_bytes); int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 8ce60d53dcde05..33f72db99b58d5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -109,7 +109,7 @@ static bool is_intel_tcon_cap(const u8 tcon_cap[4]) static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct drm_dp_aux *aux = &intel_dp->aux; struct intel_panel *panel = &connector->panel; @@ -122,7 +122,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) if (ret != sizeof(tcon_cap)) return false; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", connector->base.base.id, connector->base.name, is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]); @@ -141,10 +142,10 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) * HDR static metadata we need to start maintaining table of * ranges for such panels. */ - if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && + if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1))) { - drm_info(&i915->drm, + drm_info(display->drm, "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", connector->base.base.id, connector->base.name, INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); @@ -170,14 +171,15 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) static u32 intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 tmp; u8 buf[2] = {}; if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", + drm_err(display->drm, + "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", connector->base.base.id, connector->base.name); return 0; } @@ -195,7 +197,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", + drm_err(display->drm, + "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", connector->base.base.id, connector->base.name); return 0; } @@ -253,8 +256,8 @@ static void intel_dp_aux_write_content_luminance(struct intel_connector *connector, struct hdr_output_metadata *hdr_metadata) { + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret; u8 buf[4]; @@ -270,7 +273,7 @@ intel_dp_aux_write_content_luminance(struct intel_connector *connector, INTEL_EDP_HDR_CONTENT_LUMINANCE, buf, sizeof(buf)); if (ret < 0) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Content Luminance DPCD reg write failed, err:-%d\n", ret); } @@ -280,7 +283,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state, { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); /* * According to spec segmented backlight needs to be set whenever panel is in @@ -291,7 +294,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state, *ctrl |= INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE; } - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) *ctrl &= ~INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE; if (panel->backlight.edp.intel_cap.supports_2020_gamut && @@ -311,9 +314,9 @@ static void intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct hdr_output_metadata *hdr_metadata; int ret; @@ -323,7 +326,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", + drm_err(display->drm, + "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", connector->base.base.id, connector->base.name, ret); return; } @@ -346,7 +350,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, if (ctrl != old_ctrl && drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", + drm_err(display->drm, + "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", connector->base.base.id, connector->base.name); if (intel_dp_in_hdr_mode(conn_state)) { @@ -377,7 +382,7 @@ static const char *dpcd_vs_pwm_str(bool aux) static void intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); int ret; @@ -392,7 +397,7 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector) INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE, buf, sizeof(buf)); if (ret < 0) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel Luminance DPCD reg write failed, err:-%d\n", ret); } @@ -400,20 +405,21 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector) static int intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; struct drm_luminance_range_info *luminance_range = &connector->base.display_info.luminance_range; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.intel_cap.sdr_uses_aux)); if (!panel->backlight.edp.intel_cap.sdr_uses_aux) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { - drm_err(&i915->drm, + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n", connector->base.base.id, connector->base.name, ret); return ret; @@ -430,7 +436,8 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi intel_dp_aux_write_panel_luminance_override(connector); - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", connector->base.base.id, connector->base.name, panel->backlight.min, panel->backlight.max); @@ -501,9 +508,9 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) { + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_panel *panel = &connector->panel; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); u16 current_level; u8 current_mode; int ret; @@ -514,17 +521,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { - drm_err(&i915->drm, + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", connector->base.base.id, connector->base.name, ret); return ret; @@ -553,7 +562,8 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, } } - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", connector->base.base.id, connector->base.name); return 0; @@ -562,11 +572,12 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, static bool intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", connector->base.base.id, connector->base.name); return true; } @@ -591,16 +602,15 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct drm_device *dev = connector->base.dev; struct intel_panel *panel = &connector->panel; - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool try_intel_interface = false, try_vesa_interface = false; /* Check the VBT and user's module parameters to figure out which * interfaces to probe */ - switch (i915->display.params.enable_dpcd_backlight) { + switch (display->params.enable_dpcd_backlight) { case INTEL_DP_AUX_BACKLIGHT_OFF: return -ENODEV; case INTEL_DP_AUX_BACKLIGHT_AUTO: diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index b0101d72b9c1ae..3425b364314314 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -152,7 +152,7 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); if (ret) return ret; @@ -677,8 +677,15 @@ static int intel_dp_hdcp2_get_capability(struct intel_connector *connector, bool *capable) { - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_dp_aux *aux = &dig_port->dp.aux; + struct intel_digital_port *dig_port; + struct drm_dp_aux *aux; + + *capable = false; + if (!intel_attached_encoder(connector)) + return -EINVAL; + + dig_port = intel_attached_dig_port(connector); + aux = &dig_port->dp.aux; return _intel_dp_hdcp2_get_capability(aux, capable); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index d044c8e36bb3d6..40bedc31d6bf2f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,6 +21,8 @@ * IN THE SOFTWARE. */ +#include + #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" @@ -37,13 +39,13 @@ drm_dp_phy_name(_dp_phy) #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ - drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \ + drm_dbg_kms(to_intel_display(_intel_dp)->drm, \ LT_MSG_PREFIX _format, \ LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ - drm_err(&dp_to_i915(_intel_dp)->drm, \ + drm_err(to_intel_display(_intel_dp)->drm, \ LT_MSG_PREFIX _format, \ LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ else \ @@ -114,7 +116,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : DP_PHY_REPEATER_MODE_NON_TRANSPARENT; - return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; + if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1) + return false; + + intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val; + + return true; } static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp) @@ -174,7 +182,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_ * still taking into account any LTTPR common lane- rate/count limits. */ if (lttpr_count < 0) - return 0; + goto out_reset_lttpr_count; if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { lt_dbg(intel_dp, DP_PHY_DPRX, @@ -208,7 +216,8 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); if (intel_dp_is_edp(intel_dp)) return 0; @@ -217,7 +226,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S * Detecting LTTPRs must be avoided on platforms with an AUX timeout * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). */ - if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915)) + if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915)) if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) return -EIO; @@ -248,7 +257,8 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S */ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); int lttpr_count = 0; /* @@ -256,7 +266,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). */ if (!intel_dp_is_edp(intel_dp) && - (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { + (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) { u8 dpcd[DP_RECEIVER_CAP_SIZE]; int err = intel_dp_read_dprx_caps(intel_dp, dpcd); @@ -319,10 +329,11 @@ static bool intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); - drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); + drm_WARN_ON_ONCE(display->drm, + lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); } @@ -331,7 +342,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 voltage_max; /* @@ -343,7 +354,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, else voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); - drm_WARN_ON_ONCE(&i915->drm, + drm_WARN_ON_ONCE(display->drm, voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); @@ -353,7 +364,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 preemph_max; /* @@ -365,7 +376,7 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, else preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); - drm_WARN_ON_ONCE(&i915->drm, + drm_WARN_ON_ONCE(display->drm, preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); @@ -375,10 +386,11 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || - DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915); + DISPLAY_VER(display) >= 10 || IS_BROXTON(i915); } /* 128b/132b */ @@ -697,26 +709,28 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, return true; } -static void -intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr) { u8 link_config[2]; - link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; - link_config[1] = intel_dp_is_uhbr(crtc_state) ? + link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; + link_config[1] = drm_dp_is_uhbr_rate(link_rate) ? DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); } -static void -intel_dp_update_link_bw_set(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - u8 link_bw, u8 rate_select) +static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { - u8 lane_count = crtc_state->lane_count; + intel_dp_link_training_set_mode(intel_dp, + crtc_state->port_clock, crtc_state->vrr.flipline); +} - if (crtc_state->enhanced_framing) +void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, + int link_bw, int rate_select, int lane_count, + bool enhanced_framing) +{ + if (enhanced_framing) lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN; if (link_bw) { @@ -740,6 +754,14 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp, } } +static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + u8 link_bw, u8 rate_select) +{ + intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count, + crtc_state->enhanced_framing); +} + /* * Prepare link training by configuring the link parameters. On DDI platforms * also enable the port here. @@ -932,7 +954,8 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); bool source_tps3, sink_tps3, source_tps4, sink_tps4; /* UHBR+ use separate 128b/132b TPS2 */ @@ -1152,6 +1175,36 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, return true; } +static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int *new_link_rate, int *new_lane_count) +{ + int link_rate; + int lane_count; + int i; + + i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count); + for (i--; i >= 0; i--) { + intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count); + + if ((intel_dp->link.force_rate && + intel_dp->link.force_rate != link_rate) || + (intel_dp->link.force_lane_count && + intel_dp->link.force_lane_count != lane_count)) + continue; + + break; + } + + if (i < 0) + return false; + + *new_link_rate = link_rate; + *new_lane_count = lane_count; + + return true; +} + static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate) { int rate_index; @@ -1187,6 +1240,41 @@ static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count) return current_lane_count >> 1; } +static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int *new_link_rate, int *new_lane_count) +{ + int link_rate; + int lane_count; + + lane_count = crtc_state->lane_count; + link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock); + if (link_rate < 0) { + lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count); + link_rate = intel_dp_max_common_rate(intel_dp); + } + + if (lane_count < 0) + return false; + + *new_link_rate = link_rate; + *new_lane_count = lane_count; + + return true; +} + +static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, + int *new_link_rate, int *new_lane_count) +{ + /* TODO: Use the same fallback logic on SST as on MST. */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + return reduce_link_params_in_bw_order(intel_dp, crtc_state, + new_link_rate, new_lane_count); + else + return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state, + new_link_rate, new_lane_count); +} + static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -1200,14 +1288,7 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, return 0; } - new_lane_count = crtc_state->lane_count; - new_link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock); - if (new_link_rate < 0) { - new_lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count); - new_link_rate = intel_dp_max_common_rate(intel_dp); - } - - if (new_lane_count < 0) + if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count)) return -1; if (intel_dp_is_edp(intel_dp) && @@ -1228,12 +1309,10 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, return 0; } -/* NOTE: @state is only valid for MST links and can be %NULL for SST. */ static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state, struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { @@ -1249,11 +1328,6 @@ static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state * return false; } - if (drm_WARN_ON(&i915->drm, - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && - !state)) - return false; - /* Schedule a Hotplug Uevent to userspace to start modeset */ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state); @@ -1512,14 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, * retraining with reduced link rate/lane parameters if the link training * fails. * After calling this function intel_dp_stop_link_train() must be called. - * - * NOTE: @state is only valid for MST links and can be %NULL for SST. */ void intel_dp_start_link_train(struct intel_atomic_state *state, struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(state); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; bool passed; @@ -1530,11 +1602,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, */ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); - if (drm_WARN_ON(&i915->drm, - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && - !state)) - return; - if (lttpr_count < 0) /* Still continue with enabling the port and link training. */ lttpr_count = 0; @@ -1569,7 +1636,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, * For test cases which rely on the link training or processing of HPDs * ignore_long_hpd flag can unset from the testcase. */ - if (i915->display.hotplug.ignore_long_hpd) { + if (display->hotplug.ignore_long_hpd) { lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n"); return; } @@ -1621,14 +1688,14 @@ static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *conn static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int current_rate = -1; int force_rate; int err; int i; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; @@ -1636,7 +1703,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) current_rate = intel_dp->link_rate; force_rate = intel_dp->link.force_rate; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); seq_printf(m, "%sauto%s", force_rate == 0 ? "[" : "", @@ -1692,7 +1759,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file, { struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int rate; int err; @@ -1701,14 +1768,14 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file, if (rate < 0) return rate; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; intel_dp_reset_link_params(intel_dp); intel_dp->link.force_rate = rate; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); *offp += len; @@ -1719,14 +1786,14 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate); static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int current_lane_count = -1; int force_lane_count; int err; int i; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; @@ -1734,7 +1801,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) current_lane_count = intel_dp->lane_count; force_lane_count = intel_dp->link.force_lane_count; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); seq_printf(m, "%sauto%s", force_lane_count == 0 ? "[" : "", @@ -1794,7 +1861,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file, { struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int lane_count; int err; @@ -1803,14 +1870,14 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file, if (lane_count < 0) return lane_count; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; intel_dp_reset_link_params(intel_dp); intel_dp->link.force_lane_count = lane_count; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); *offp += len; @@ -1821,17 +1888,17 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count); static int i915_dp_max_link_rate_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; *val = intel_dp->link.max_rate; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } @@ -1840,17 +1907,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show, static int i915_dp_max_lane_count_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; *val = intel_dp->link.max_lane_count; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } @@ -1859,17 +1926,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_sho static int i915_dp_force_link_training_failure_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; *val = intel_dp->link.force_train_failure; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } @@ -1877,20 +1944,20 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val) static int i915_dp_force_link_training_failure_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; if (val > 2) return -EINVAL; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; intel_dp->link.force_train_failure = val; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } @@ -1901,17 +1968,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops, static int i915_dp_force_link_retrain_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; *val = intel_dp->link.force_retrain; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } @@ -1919,17 +1986,17 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val) static int i915_dp_force_link_retrain_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; intel_dp->link.force_retrain = val; - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); @@ -1942,17 +2009,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops, static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); int err; - err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); if (err) return err; seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled)); - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 42e7fc6cb171a2..2066b914676229 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -16,6 +16,12 @@ struct intel_dp; int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]); int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp); +void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, + int link_rate, bool is_vrr); +void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, + int link_bw, int rate_select, int lane_count, + bool enhanced_framing); + void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 17978a1f9ab0a0..15541932b809e8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -43,6 +43,7 @@ #include "intel_dp_hdcp.h" #include "intel_dp_mst.h" #include "intel_dp_tunnel.h" +#include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -211,8 +212,8 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); - link_bpp_x16 = to_bpp_x16(dsc ? bpp : - intel_dp_output_bpp(crtc_state->output_format, bpp)); + link_bpp_x16 = fxp_q4_from_int(dsc ? bpp : + intel_dp_output_bpp(crtc_state->output_format, bpp)); local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, false, dsc, link_bpp_x16); @@ -289,7 +290,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, if (!dsc) crtc_state->pipe_bpp = bpp; else - crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp); + crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp); drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc); } @@ -308,8 +309,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, * YUV420 is only half of the pipe bpp value. */ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, - to_bpp_int(limits->link.max_bpp_x16), - to_bpp_int(limits->link.min_bpp_x16), + fxp_q4_to_int(limits->link.max_bpp_x16), + fxp_q4_to_int(limits->link.min_bpp_x16), limits, conn_state, 2 * 3, false); @@ -374,11 +375,11 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder, crtc_state, max_bpp / 3); max_compressed_bpp = min(max_compressed_bpp, - to_bpp_int(limits->link.max_bpp_x16)); + fxp_q4_to_int(limits->link.max_bpp_x16)); min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state); min_compressed_bpp = max(min_compressed_bpp, - to_bpp_int_roundup(limits->link.min_bpp_x16)); + fxp_q4_to_int_roundup(limits->link.min_bpp_x16)); drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n", min_compressed_bpp, max_compressed_bpp); @@ -478,10 +479,10 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne crtc->base.base.id, crtc->base.name, connector->base.base.id, connector->base.name); - if (limits->link.max_bpp_x16 < to_bpp_x16(24)) + if (limits->link.max_bpp_x16 < fxp_q4_from_int(24)) return false; - limits->link.min_bpp_x16 = to_bpp_x16(24); + limits->link.min_bpp_x16 = fxp_q4_from_int(24); return true; } @@ -489,18 +490,18 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate); if (limits->max_rate < 540000) - min_bpp_x16 = to_bpp_x16(13); + min_bpp_x16 = fxp_q4_from_int(13); else if (limits->max_rate < 810000) - min_bpp_x16 = to_bpp_x16(10); + min_bpp_x16 = fxp_q4_from_int(10); if (limits->link.min_bpp_x16 >= min_bpp_x16) return true; drm_dbg_kms(&i915->drm, - "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n", + "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n", crtc->base.base.id, crtc->base.name, connector->base.base.id, connector->base.name, - BPP_X16_ARGS(min_bpp_x16)); + FXP_Q4_ARGS(min_bpp_x16)); if (limits->link.max_bpp_x16 < min_bpp_x16) return false; @@ -1113,6 +1114,33 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, to_intel_crtc(pipe_config->uapi.crtc)); } +static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp, + int link_rate, int lane_count) +{ + return intel_dp->link.mst_probed_rate == link_rate && + intel_dp->link.mst_probed_lane_count == lane_count; +} + +static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp, + int link_rate, int lane_count) +{ + intel_dp->link.mst_probed_rate = link_rate; + intel_dp->link.mst_probed_lane_count = lane_count; +} + +static void intel_mst_reprobe_topology(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + if (intel_mst_probed_link_params_valid(intel_dp, + crtc_state->port_clock, crtc_state->lane_count)) + return; + + drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr); + + intel_mst_set_probed_link_params(intel_dp, + crtc_state->port_clock, crtc_state->lane_count); +} + static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -1149,17 +1177,19 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_dp_sink_enable_decompression(state, connector, pipe_config); - if (first_mst_stream) + if (first_mst_stream) { dig_port->base.pre_enable(state, &dig_port->base, pipe_config, NULL); + intel_mst_reprobe_topology(intel_dp, pipe_config); + } + intel_dp->active_mst_links++; ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state, drm_atomic_get_mst_payload_state(mst_state, connector->port)); if (ret < 0) - drm_dbg_kms(&dev_priv->drm, "Failed to create MST payload for %s: %d\n", - connector->base.name, ret); + intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config); /* * Before Gen 12 this is not done as part of @@ -1223,6 +1253,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, enum transcoder trans = pipe_config->cpu_transcoder; bool first_mst_stream = intel_dp->active_mst_links == 1; struct intel_crtc *pipe_crtc; + int ret; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); @@ -1254,8 +1285,11 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, if (first_mst_stream) intel_ddi_wait_for_fec_status(encoder, pipe_config, true); - drm_dp_add_payload_part2(&intel_dp->mst_mgr, - drm_atomic_get_mst_payload_state(mst_state, connector->port)); + ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr, + drm_atomic_get_mst_payload_state(mst_state, + connector->port)); + if (ret < 0) + intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config); if (DISPLAY_VER(dev_priv) >= 12) intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans), @@ -1999,6 +2033,36 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, return false; } +/** + * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing + * @intel_dp: DP port object + * + * Prepare an MST link for topology probing, programming the target + * link parameters to DPCD. This step is a requirement of the enumaration + * of path resources during probing. + */ +void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp) +{ + int link_rate = intel_dp_max_link_rate(intel_dp); + int lane_count = intel_dp_max_lane_count(intel_dp); + u8 rate_select; + u8 link_bw; + + if (intel_dp->link_trained) + return; + + if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count)) + return; + + intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select); + + intel_dp_link_training_set_mode(intel_dp, link_rate, false); + intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count, + drm_dp_enhanced_frame_cap(intel_dp->dpcd)); + + intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count); +} + /* * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD * @intel_dp: DP port object diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 9e4c7679f1c3a3..8343804ce3f8d1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -27,6 +27,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, struct intel_link_bw_limits *limits); bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, struct intel_crtc *crtc); +void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp); bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c index 6503abdc2b9885..94198bc0493973 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c @@ -69,7 +69,7 @@ static int get_current_link_bw(struct intel_dp *intel_dp, static int update_tunnel_state(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool old_bw_below_dprx; bool new_bw_below_dprx; @@ -81,7 +81,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp) ret = drm_dp_tunnel_update_state(intel_dp->tunnel); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n", drm_dp_tunnel_name(intel_dp->tunnel), encoder->base.base.id, encoder->base.name, @@ -103,7 +103,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp) !new_bw_below_dprx) return 0; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n", drm_dp_tunnel_name(intel_dp->tunnel), encoder->base.base.id, encoder->base.name, @@ -121,20 +121,20 @@ static int update_tunnel_state(struct intel_dp *intel_dp) */ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_crtc *crtc; int tunnel_bw = 0; int err; - for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int stream_bw = intel_dp_config_required_rate(crtc_state); tunnel_bw += stream_bw; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n", drm_dp_tunnel_name(intel_dp->tunnel), encoder->base.base.id, encoder->base.name, @@ -145,7 +145,7 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw); if (err) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n", drm_dp_tunnel_name(intel_dp->tunnel), encoder->base.base.id, encoder->base.name, @@ -172,12 +172,12 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp, static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_dp_tunnel *tunnel; int ret; - tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr, + tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr, &intel_dp->aux); if (IS_ERR(tunnel)) return PTR_ERR(tunnel); @@ -189,7 +189,7 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui if (ret == -EOPNOTSUPP) return 0; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n", drm_dp_tunnel_name(intel_dp->tunnel), encoder->base.base.id, encoder->base.name, @@ -266,14 +266,15 @@ bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp) */ void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) return; - drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n", + drm_dbg_kms(display->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name); @@ -295,7 +296,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool dpcd_updated) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u8 dpcd[DP_RECEIVER_CAP_SIZE]; @@ -307,7 +308,8 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp, intel_dp->tunnel_suspended = false; - drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n", + drm_dbg_kms(display->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name); @@ -347,7 +349,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp, return; out_err: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, @@ -369,12 +371,12 @@ add_inherited_tunnel(struct intel_atomic_state *state, struct drm_dp_tunnel *tunnel, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct drm_dp_tunnel *old_tunnel; old_tunnel = get_inherited_tunnel(state, crtc); if (old_tunnel) { - drm_WARN_ON(&i915->drm, old_tunnel != tunnel); + drm_WARN_ON(display->drm, old_tunnel != tunnel); return 0; } @@ -394,7 +396,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state, struct intel_dp *intel_dp, const struct intel_digital_connector_state *old_conn_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(state); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = to_intel_connector(old_conn_state->base.connector); @@ -422,7 +424,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state, old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel) return 0; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, @@ -441,12 +443,13 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state, */ void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); enum pipe pipe; if (!state->inherited_dp_tunnels) return; - for_each_pipe(to_i915(state->base.dev), pipe) + for_each_pipe(display, pipe) if (state->inherited_dp_tunnels->ref[pipe].tunnel) drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]); @@ -457,7 +460,7 @@ void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *s static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state, struct drm_dp_tunnel *tunnel) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); u32 pipe_mask; int err; @@ -466,7 +469,7 @@ static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *sta if (err) return err; - drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1)); + drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1)); return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask); } @@ -504,7 +507,7 @@ static int check_group_state(struct intel_atomic_state *state, struct intel_connector *connector, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -512,7 +515,7 @@ static int check_group_state(struct intel_atomic_state *state, if (!crtc_state->dp_tunnel_ref.tunnel) return 0; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, @@ -583,7 +586,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, const struct intel_connector *connector, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); int required_rate = intel_dp_config_required_rate(crtc_state); @@ -592,7 +595,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) return 0; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, @@ -708,7 +711,7 @@ static void queue_retry_work(struct intel_atomic_state *state, struct drm_dp_tunnel *tunnel, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_encoder *encoder; encoder = intel_get_crtc_new_encoder(state, crtc_state); @@ -716,7 +719,7 @@ static void queue_retry_work(struct intel_atomic_state *state, if (!intel_digital_port_connected(encoder)) return; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n", drm_dp_tunnel_name(tunnel), encoder->base.base.id, @@ -765,7 +768,7 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state) /** * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager - * @i915: i915 device object + * @display: display device * * Initialize the DP tunnel manager. The tunnel manager will support the * detection/management of DP tunnels on all DP connectors, so the function @@ -773,14 +776,14 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state) * * Return 0 in case of success, a negative error code otherwise. */ -int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +int intel_dp_tunnel_mgr_init(struct intel_display *display) { struct drm_dp_tunnel_mgr *tunnel_mgr; struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector; int dp_connectors = 0; - drm_connector_list_iter_begin(&i915->drm, &connector_list_iter); + drm_connector_list_iter_begin(display->drm, &connector_list_iter); for_each_intel_connector_iter(connector, &connector_list_iter) { if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; @@ -789,23 +792,23 @@ int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) } drm_connector_list_iter_end(&connector_list_iter); - tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors); + tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors); if (IS_ERR(tunnel_mgr)) return PTR_ERR(tunnel_mgr); - i915->display.dp_tunnel_mgr = tunnel_mgr; + display->dp_tunnel_mgr = tunnel_mgr; return 0; } /** * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state - * @i915: i915 device object + * @display: display device * * Clean up the DP tunnel manager state. */ -void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) +void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) { - drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr); - i915->display.dp_tunnel_mgr = NULL; + drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr); + display->dp_tunnel_mgr = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h index 08b2cba84af2b4..a0c00b7d330360 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h @@ -9,14 +9,13 @@ #include #include -struct drm_i915_private; struct drm_connector_state; struct drm_modeset_acquire_ctx; - struct intel_atomic_state; struct intel_connector; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_dp; struct intel_encoder; struct intel_link_bw_limits; @@ -53,8 +52,8 @@ int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state); -int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915); -void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915); +int intel_dp_tunnel_mgr_init(struct intel_display *display); +void intel_dp_tunnel_mgr_cleanup(struct intel_display *display); #else @@ -121,12 +120,12 @@ intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state) } static inline int -intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +intel_dp_tunnel_mgr_init(struct intel_display *display) { return 0; } -static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {} +static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {} #endif /* CONFIG_DRM_I915_DP_TUNNEL */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index d67d5e2fd570c4..340dfce480b83d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1823,6 +1823,7 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv) void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; @@ -1833,7 +1834,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) /* PLL is protected by panel, make sure we can write it */ if (i9xx_has_pps(dev_priv)) - assert_pps_unlocked(dev_priv, pipe); + assert_pps_unlocked(display, pipe); intel_de_write(dev_priv, FP0(pipe), hw_state->fp0); intel_de_write(dev_priv, FP1(pipe), hw_state->fp1); @@ -2004,6 +2005,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) void vlv_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; @@ -2012,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state) assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ - assert_pps_unlocked(dev_priv, pipe); + assert_pps_unlocked(display, pipe); /* Enable Refclk */ intel_de_write(dev_priv, DPLL(dev_priv, pipe), @@ -2150,6 +2152,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) void chv_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; @@ -2158,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ - assert_pps_unlocked(dev_priv, pipe); + assert_pps_unlocked(display, pipe); /* Enable Refclk and SSC */ intel_de_write(dev_priv, DPLL(dev_priv, pipe), diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 292d163036b12a..f490b2157828d9 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3339,6 +3339,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -3379,7 +3380,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, } /* Eliminate DPLLs from consideration if reserved by HTI */ - dpll_mask &= ~intel_hti_dpll_mask(i915); + dpll_mask &= ~intel_hti_dpll_mask(display); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 73a1918e2537ae..3a6d9904482894 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -317,3 +317,7 @@ void intel_dpt_destroy(struct i915_address_space *vm) i915_vm_put(&dpt->vm); } +u64 intel_dpt_offset(struct i915_vma *dpt_vma) +{ + return dpt_vma->node.start; +} diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h index ff18a525bfbe61..1f88b0ee17e7ee 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.h +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -6,6 +6,8 @@ #ifndef __INTEL_DPT_H__ #define __INTEL_DPT_H__ +#include + struct drm_i915_private; struct i915_address_space; @@ -20,5 +22,6 @@ void intel_dpt_suspend(struct drm_i915_private *i915); void intel_dpt_resume(struct drm_i915_private *i915); struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb); +u64 intel_dpt_offset(struct i915_vma *dpt_vma); #endif /* __INTEL_DPT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 2ab3765f6c0623..da24e041d26994 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_reg.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" @@ -42,7 +43,8 @@ struct intel_dsb { */ unsigned int ins_start_offset; - int dewake_scanline; + u32 chicken; + int hw_dewake_scanline; }; /** @@ -82,6 +84,93 @@ struct intel_dsb { #define DSB_OPCODE_POLL 0xA /* see DSB_REG_VALUE_MASK */ +static bool pre_commit_is_vrr_active(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* VRR will be enabled afterwards, if necessary */ + if (intel_crtc_needs_modeset(new_crtc_state)) + return false; + + /* VRR will have been disabled during intel_pre_plane_update() */ + return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc); +} + +static const struct intel_crtc_state * +pre_commit_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* + * During fastsets/etc. the transcoder is still + * running with the old timings at this point. + */ + if (intel_crtc_needs_modeset(new_crtc_state)) + return new_crtc_state; + else + return old_crtc_state; +} + +static int dsb_vtotal(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + + if (pre_commit_is_vrr_active(state, crtc)) + return crtc_state->vrr.vmax; + else + return intel_mode_vtotal(&crtc_state->hw.adjusted_mode); +} + +static int dsb_dewake_scanline_start(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(state->base.dev); + unsigned int latency = skl_watermark_max_latency(i915, 0); + + return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) - + intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency); +} + +static int dsb_dewake_scanline_end(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + + return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode); +} + +static int dsb_scanline_to_hw(struct intel_atomic_state *state, + struct intel_crtc *crtc, int scanline) +{ + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + int vtotal = dsb_vtotal(state, crtc); + + return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal; +} + +static u32 dsb_chicken(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + if (pre_commit_is_vrr_active(state, crtc)) + return DSB_SKIP_WAITS_EN | + DSB_CTRL_WAIT_SAFE_WINDOW | + DSB_CTRL_NO_WAIT_VBLANK | + DSB_INST_WAIT_SAFE_WINDOW | + DSB_INST_NO_WAIT_VBLANK; + else + return DSB_SKIP_WAITS_EN; +} + static bool assert_dsb_has_room(struct intel_dsb *dsb) { struct intel_crtc *crtc = dsb->crtc; @@ -281,6 +370,79 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb) intel_dsb_noop(dsb, 4); } +static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb, + u32 opcode, int lower, int upper) +{ + u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) | + ((u64)lower << DSB_SCANLINE_LOWER_SHIFT); + + intel_dsb_emit(dsb, lower_32_bits(window), + (opcode << DSB_OPCODE_SHIFT) | + upper_32_bits(window)); +} + +static void intel_dsb_wait_dsl(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int lower_in, int upper_in, + int lower_out, int upper_out) +{ + struct intel_crtc *crtc = dsb->crtc; + + lower_in = dsb_scanline_to_hw(state, crtc, lower_in); + upper_in = dsb_scanline_to_hw(state, crtc, upper_in); + + lower_out = dsb_scanline_to_hw(state, crtc, lower_out); + upper_out = dsb_scanline_to_hw(state, crtc, upper_out); + + if (upper_in >= lower_in) + intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN, + lower_in, upper_in); + else if (upper_out >= lower_out) + intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, + lower_out, upper_out); + else + drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */ +} + +static void assert_dsl_ok(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int start, int end) +{ + struct intel_crtc *crtc = dsb->crtc; + int vtotal = dsb_vtotal(state, crtc); + + /* + * Waiting for the entire frame doesn't make sense, + * (IN==don't wait, OUT=wait forever). + */ + drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1, + "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n", + crtc->base.base.id, crtc->base.name, dsb->id, + start, end, vtotal); +} + +void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int start, int end) +{ + assert_dsl_ok(state, dsb, start, end); + + intel_dsb_wait_dsl(state, dsb, + start, end, + end + 1, start - 1); +} + +void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int start, int end) +{ + assert_dsl_ok(state, dsb, start, end); + + intel_dsb_wait_dsl(state, dsb, + end + 1, start - 1, + start, end); +} + static void intel_dsb_align_tail(struct intel_dsb *dsb) { u32 aligned_tail, tail; @@ -302,8 +464,10 @@ void intel_dsb_finish(struct intel_dsb *dsb) /* * DSB_FORCE_DEWAKE remains active even after DSB is * disabled, so make sure to clear it (if set during - * intel_dsb_commit()). + * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as + * well for good measure. */ + intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0); intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id), DSB_FORCE_DEWAKE, 0); @@ -312,35 +476,109 @@ void intel_dsb_finish(struct intel_dsb *dsb) intel_dsb_buffer_flush_map(&dsb->dsb_buf); } -static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state) +static u32 dsb_error_int_status(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - unsigned int latency = skl_watermark_max_latency(i915, 0); - int vblank_start; + u32 errors; - if (crtc_state->vrr.enable) - vblank_start = intel_vrr_vmin_vblank_start(crtc_state); - else - vblank_start = intel_mode_vblank_start(adjusted_mode); + errors = DSB_GTT_FAULT_INT_STATUS | + DSB_RSPTIMEOUT_INT_STATUS | + DSB_POLL_ERR_INT_STATUS; + + /* + * All the non-existing status bits operate as + * normal r/w bits, so any attempt to clear them + * will just end up setting them. Never do that so + * we won't mistake them for actual error interrupts. + */ + if (DISPLAY_VER(display) >= 14) + errors |= DSB_ATS_FAULT_INT_STATUS; - return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency)); + return errors; } -static u32 dsb_chicken(struct intel_crtc *crtc) +static u32 dsb_error_int_en(struct intel_display *display) { - if (crtc->mode_flags & I915_MODE_FLAG_VRR) - return DSB_SKIP_WAITS_EN | - DSB_CTRL_WAIT_SAFE_WINDOW | - DSB_CTRL_NO_WAIT_VBLANK | - DSB_INST_WAIT_SAFE_WINDOW | - DSB_INST_NO_WAIT_VBLANK; - else - return DSB_SKIP_WAITS_EN; + u32 errors; + + errors = DSB_GTT_FAULT_INT_EN | + DSB_RSPTIMEOUT_INT_EN | + DSB_POLL_ERR_INT_EN; + + if (DISPLAY_VER(display) >= 14) + errors |= DSB_ATS_FAULT_INT_EN; + + return errors; +} + +static void _intel_dsb_chain(struct intel_atomic_state *state, + struct intel_dsb *dsb, + struct intel_dsb *chained_dsb, + u32 ctrl) +{ + struct intel_display *display = to_intel_display(state->base.dev); + struct intel_crtc *crtc = dsb->crtc; + enum pipe pipe = crtc->pipe; + u32 tail; + + if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id)) + return; + + tail = chained_dsb->free_pos * 4; + if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) + return; + + intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id), + ctrl | DSB_ENABLE); + + intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id), + dsb_chicken(state, crtc)); + + intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id), + dsb_error_int_status(display) | DSB_PROG_INT_STATUS | + dsb_error_int_en(display)); + + if (ctrl & DSB_WAIT_FOR_VBLANK) { + int dewake_scanline = dsb_dewake_scanline_start(state, crtc); + int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline); + + intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id), + DSB_ENABLE_DEWAKE | + DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline)); + } + + intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id), + intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf)); + + intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id), + intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail); + + if (ctrl & DSB_WAIT_FOR_VBLANK) { + /* + * Keep DEwake alive via the first DSB, in + * case we're already past dewake_scanline, + * and thus DSB_ENABLE_DEWAKE on the second + * DSB won't do its job. + */ + intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id), + DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE); + + intel_dsb_wait_scanline_out(state, dsb, + dsb_dewake_scanline_start(state, crtc), + dsb_dewake_scanline_end(state, crtc)); + } +} + +void intel_dsb_chain(struct intel_atomic_state *state, + struct intel_dsb *dsb, + struct intel_dsb *chained_dsb, + bool wait_for_vblank) +{ + _intel_dsb_chain(state, dsb, chained_dsb, + wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0); } static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, - int dewake_scanline) + int hw_dewake_scanline) { struct intel_crtc *crtc = dsb->crtc; struct intel_display *display = to_intel_display(crtc->base.dev); @@ -361,15 +599,17 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, ctrl | DSB_ENABLE); intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id), - dsb_chicken(crtc)); + dsb->chicken); + + intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), + dsb_error_int_status(display) | DSB_PROG_INT_STATUS | + dsb_error_int_en(display)); intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id), intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); - if (dewake_scanline >= 0) { - int diff, hw_dewake_scanline; - - hw_dewake_scanline = intel_crtc_scanline_to_hw(crtc, dewake_scanline); + if (hw_dewake_scanline >= 0) { + int diff, position; intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id), DSB_ENABLE_DEWAKE | @@ -379,7 +619,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, * Force DEwake immediately if we're already past * or close to racing past the target scanline. */ - diff = dewake_scanline - intel_get_crtc_scanline(crtc); + position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK; + + diff = hw_dewake_scanline - position; intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id), (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) | DSB_BLOCK_DEWAKE_EXTENSION); @@ -401,7 +643,7 @@ void intel_dsb_commit(struct intel_dsb *dsb, { _intel_dsb_commit(dsb, wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0, - wait_for_vblank ? dsb->dewake_scanline : -1); + wait_for_vblank ? dsb->hw_dewake_scanline : -1); } void intel_dsb_wait(struct intel_dsb *dsb) @@ -430,6 +672,9 @@ void intel_dsb_wait(struct intel_dsb *dsb) dsb->free_pos = 0; dsb->ins_start_offset = 0; intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0); + + intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), + dsb_error_int_status(display) | DSB_PROG_INT_STATUS); } /** @@ -451,8 +696,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, unsigned int max_cmds) { struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); intel_wakeref_t wakeref; struct intel_dsb *dsb; unsigned int size; @@ -486,7 +729,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, dsb->size = size / 4; /* in dwords */ dsb->free_pos = 0; dsb->ins_start_offset = 0; - dsb->dewake_scanline = intel_dsb_dewake_scanline(crtc_state); + + dsb->chicken = dsb_chicken(state, crtc); + dsb->hw_dewake_scanline = + dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc)); return dsb; @@ -513,3 +759,18 @@ void intel_dsb_cleanup(struct intel_dsb *dsb) intel_dsb_buffer_cleanup(&dsb->dsb_buf); kfree(dsb); } + +void intel_dsb_irq_handler(struct intel_display *display, + enum pipe pipe, enum intel_dsb_id dsb_id) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe); + u32 tmp, errors; + + tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id)); + intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp); + + errors = tmp & dsb_error_int_status(display); + if (errors) + drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n", + crtc->base.base.id, crtc->base.name, dsb_id, errors); +} diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index bb42749f2ea43a..c352c12aa59f95 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -13,8 +13,11 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_dsb; +enum pipe; + enum intel_dsb_id { INTEL_DSB_0, INTEL_DSB_1, @@ -36,9 +39,22 @@ void intel_dsb_reg_write_masked(struct intel_dsb *dsb, void intel_dsb_noop(struct intel_dsb *dsb, int count); void intel_dsb_nonpost_start(struct intel_dsb *dsb); void intel_dsb_nonpost_end(struct intel_dsb *dsb); +void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int lower, int upper); +void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, + struct intel_dsb *dsb, + int lower, int upper); +void intel_dsb_chain(struct intel_atomic_state *state, + struct intel_dsb *dsb, + struct intel_dsb *chained_dsb, + bool wait_for_vblank); void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank); void intel_dsb_wait(struct intel_dsb *dsb); +void intel_dsb_irq_handler(struct intel_display *display, + enum pipe pipe, enum intel_dsb_id dsb_id); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index e99c94edfaaebc..e8ba4ccd99d3c7 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -66,7 +66,7 @@ struct intel_dsi { /* number of DSI lanes */ unsigned int lane_count; - /* i2c bus associated with the slave device */ + /* i2c bus associated with the target device */ int i2c_bus_num; /* diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 072ef1d62bdaa8..f0e3be0fe42082 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -32,7 +32,7 @@ #include #include -#include +#include #include #include @@ -56,7 +56,7 @@ #define MIPI_PORT_SHIFT 3 struct i2c_adapter_lookup { - u16 slave_addr; + u16 target_addr; struct intel_dsi *intel_dsi; acpi_handle dev_handle; }; @@ -443,7 +443,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) if (!i2c_acpi_get_i2c_resource(ares, &sb)) return 1; - if (lookup->slave_addr != sb->slave_address) + if (lookup->target_addr != sb->slave_address) return 1; status = acpi_get_handle(lookup->dev_handle, @@ -460,12 +460,12 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) } static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, - const u16 slave_addr) + const u16 target_addr) { struct drm_device *drm_dev = intel_dsi->base.base.dev; struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); struct i2c_adapter_lookup lookup = { - .slave_addr = slave_addr, + .target_addr = target_addr, .intel_dsi = intel_dsi, .dev_handle = acpi_device_handle(adev), }; @@ -476,7 +476,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, } #else static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, - const u16 slave_addr) + const u16 target_addr) { } #endif @@ -488,17 +488,17 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) struct i2c_msg msg; int ret; u8 vbt_i2c_bus_num = *(data + 2); - u16 slave_addr = *(u16 *)(data + 3); + u16 target_addr = *(u16 *)(data + 3); u8 reg_offset = *(data + 5); u8 payload_size = *(data + 6); u8 *payload_data; - drm_dbg_kms(&i915->drm, "bus %d client-addr 0x%02x reg 0x%02x data %*ph\n", - vbt_i2c_bus_num, slave_addr, reg_offset, payload_size, data + 7); + drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n", + vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7); if (intel_dsi->i2c_bus_num < 0) { intel_dsi->i2c_bus_num = vbt_i2c_bus_num; - i2c_acpi_find_adapter(intel_dsi, slave_addr); + i2c_acpi_find_adapter(intel_dsi, target_addr); } adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); @@ -514,7 +514,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) payload_data[0] = reg_offset; memcpy(&payload_data[1], (data + 7), payload_size); - msg.addr = slave_addr; + msg.addr = target_addr; msg.flags = 0; msg.len = payload_size + 1; msg.buf = payload_data; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 091824334f26b1..12e7628cbecfb1 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -60,42 +60,42 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_TMDS, .name = "sil164", .port = PORT_C, - .slave_addr = SIL164_ADDR, + .target_addr = SIL164_ADDR, .dev_ops = &sil164_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .port = PORT_C, - .slave_addr = CH7xxx_ADDR, + .target_addr = CH7xxx_ADDR, .dev_ops = &ch7xxx_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .port = PORT_C, - .slave_addr = 0x75, /* For some ch7010 */ + .target_addr = 0x75, /* For some ch7010 */ .dev_ops = &ch7xxx_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ivch", .port = PORT_A, - .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ + .target_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ .dev_ops = &ivch_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "tfp410", .port = PORT_C, - .slave_addr = TFP410_ADDR, + .target_addr = TFP410_ADDR, .dev_ops = &tfp410_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ch7017", .port = PORT_C, - .slave_addr = 0x75, + .target_addr = 0x75, .gpio = GMBUS_PIN_DPB, .dev_ops = &ch7017_ops, }, @@ -103,7 +103,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .type = INTEL_DVO_CHIP_LVDS_NO_FIXED, .name = "ns2501", .port = PORT_B, - .slave_addr = NS2501_ADDR, + .target_addr = NS2501_ADDR, .dev_ops = &ns2501_ops, }, }; diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h index af7b04539b939f..4bf476656b8cd2 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h +++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h @@ -38,7 +38,7 @@ struct intel_dvo_device { enum port port; /* GPIO register used for i2c bus to control this device */ u32 gpio; - int slave_addr; + int target_addr; const struct intel_dvo_dev_ops *dev_ops; void *dev_priv; diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index f23547a88b1fb0..5be7bb43e2e0d0 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -163,6 +163,14 @@ struct intel_modifier_desc { static const struct intel_modifier_desc intel_modifiers[] = { { + .modifier = I915_FORMAT_MOD_4_TILED_LNL_CCS, + .display_ver = { 20, -1 }, + .plane_caps = INTEL_PLANE_CAP_TILING_4, + }, { + .modifier = I915_FORMAT_MOD_4_TILED_BMG_CCS, + .display_ver = { 14, -1 }, + .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_NEED64K_PHYS, + }, { .modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS, .display_ver = { 14, 14 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC, @@ -412,6 +420,24 @@ bool intel_fb_is_mc_ccs_modifier(u64 modifier) INTEL_PLANE_CAP_CCS_MC); } +/** + * intel_fb_needs_64k_phys: Check if modifier requires 64k physical placement. + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier requires 64k aligned physical pages. + */ +bool intel_fb_needs_64k_phys(u64 modifier) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); + + if (!md) + return false; + + return plane_caps_contain_any(md->plane_caps, + INTEL_PLANE_CAP_NEED64K_PHYS); +} + static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, u8 display_ver_from, u8 display_ver_until) { @@ -437,6 +463,14 @@ static bool plane_has_modifier(struct drm_i915_private *i915, HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes) return false; + if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS && + (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915))) + return false; + + if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS && + (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915))) + return false; + return true; } @@ -653,6 +687,8 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) return 128; else return 512; + case I915_FORMAT_MOD_4_TILED_BMG_CCS: + case I915_FORMAT_MOD_4_TILED_LNL_CCS: case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 6dee0c8b7f2264..10de437e8ef845 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -28,11 +28,13 @@ struct intel_plane_state; #define INTEL_PLANE_CAP_TILING_Y BIT(4) #define INTEL_PLANE_CAP_TILING_Yf BIT(5) #define INTEL_PLANE_CAP_TILING_4 BIT(6) +#define INTEL_PLANE_CAP_NEED64K_PHYS BIT(7) bool intel_fb_is_tiled_modifier(u64 modifier); bool intel_fb_is_ccs_modifier(u64 modifier); bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); bool intel_fb_is_mc_ccs_modifier(u64 modifier); +bool intel_fb_needs_64k_phys(u64 modifier); bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 67116c9f14643b..52b79bacef4d3c 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -56,17 +56,18 @@ #include "intel_display_device.h" #include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_display_wa.h" #include "intel_fbc.h" #include "intel_fbc_regs.h" #include "intel_frontbuffer.h" -#define for_each_fbc_id(__dev_priv, __fbc_id) \ +#define for_each_fbc_id(__display, __fbc_id) \ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ - for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) + for_each_if(DISPLAY_RUNTIME_INFO(__display)->fbc_mask & BIT(__fbc_id)) -#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ - for_each_fbc_id((__dev_priv), (__fbc_id)) \ - for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)]) +#define for_each_intel_fbc(__display, __fbc, __fbc_id) \ + for_each_fbc_id((__display), (__fbc_id)) \ + for_each_if((__fbc) = (__display)->fbc[(__fbc_id)]) struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); @@ -89,7 +90,7 @@ struct intel_fbc_state { }; struct intel_fbc { - struct drm_i915_private *i915; + struct intel_display *display; const struct intel_fbc_funcs *funcs; /* @@ -139,21 +140,24 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane return stride; } +static unsigned int intel_fbc_cfb_cpp(void) +{ + return 4; /* FBC always 4 bytes per pixel */ +} + /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) +static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state) { - unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ + unsigned int cpp = intel_fbc_cfb_cpp(); return intel_fbc_plane_stride(plane_state) * cpp; } /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) +static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display, + unsigned int cpp, unsigned int width) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int limit = 4; /* 1:4 compression limit is the worst case */ - unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ - unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; unsigned int height = 4; /* FBC segment is 4 lines */ unsigned int stride; @@ -164,7 +168,7 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane * Wa_16011863758: icl+ * Avoid some hardware segment address miscalculation. */ - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) stride += 64; /* @@ -178,40 +182,67 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane } /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) +static unsigned int _intel_fbc_cfb_stride(struct intel_display *display, + unsigned int cpp, unsigned int width, + unsigned int stride) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - unsigned int stride = _intel_fbc_cfb_stride(plane_state); - /* * At least some of the platforms require each 4 line segment to * be 512 byte aligned. Aligning each line to 512 bytes guarantees * that regardless of the compression limit we choose later. */ - if (DISPLAY_VER(i915) >= 9) - return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); + if (DISPLAY_VER(display) >= 9) + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(display, cpp, width)); else return stride; } -static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) +static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - int lines = drm_rect_height(&plane_state->uapi.src) >> 16; + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + unsigned int stride = intel_fbc_plane_cfb_stride(plane_state); + unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; + unsigned int cpp = intel_fbc_cfb_cpp(); - if (DISPLAY_VER(i915) == 7) - lines = min(lines, 2048); - else if (DISPLAY_VER(i915) >= 8) - lines = min(lines, 2560); + return _intel_fbc_cfb_stride(display, cpp, width, stride); +} - return lines * intel_fbc_cfb_stride(plane_state); +/* + * Maximum height the hardware will compress, on HSW+ + * additional lines (up to the actual plane height) will + * remain uncompressed. + */ +static unsigned int intel_fbc_max_cfb_height(struct intel_display *display) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 8) + return 2560; + else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) + return 2048; + else + return 1536; +} + +static unsigned int _intel_fbc_cfb_size(struct intel_display *display, + unsigned int height, unsigned int stride) +{ + return min(height, intel_fbc_max_cfb_height(display)) * stride; +} + +static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) +{ + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16; + + return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state)); } static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); - unsigned int stride = _intel_fbc_cfb_stride(plane_state); + unsigned int stride = intel_fbc_plane_cfb_stride(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; /* @@ -222,23 +253,31 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s * we always need to use the override there. */ if (stride != stride_aligned || - (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) + (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) return stride_aligned * 4 / 64; return 0; } +static bool intel_fbc_has_fences(struct intel_display *display) +{ + struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm); + + return intel_gt_support_legacy_fencing(to_gt(i915)); +} + static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); unsigned int cfb_stride; u32 fbc_ctl; cfb_stride = fbc_state->cfb_stride / fbc->limit; /* FBC_CTL wants 32B or 64B units */ - if (DISPLAY_VER(i915) == 2) + if (DISPLAY_VER(display) == 2) cfb_stride = (cfb_stride / 32) - 1; else cfb_stride = (cfb_stride / 64) - 1; @@ -272,21 +311,21 @@ static u32 i965_fbc_ctl2(struct intel_fbc *fbc) static void i8xx_fbc_deactivate(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 fbc_ctl; /* Disable compression */ - fbc_ctl = intel_de_read(i915, FBC_CONTROL); + fbc_ctl = intel_de_read(display, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; - intel_de_write(i915, FBC_CONTROL, fbc_ctl); + intel_de_write(display, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_de_wait_for_clear(i915, FBC_STATUS, + if (intel_de_wait_for_clear(display, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { - drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); + drm_dbg_kms(display->drm, "FBC idle timed out\n"); return; } } @@ -294,32 +333,32 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc) static void i8xx_fbc_activate(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; int i; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - intel_de_write(i915, FBC_TAG(i), 0); + intel_de_write(display, FBC_TAG(i), 0); - if (DISPLAY_VER(i915) == 4) { - intel_de_write(i915, FBC_CONTROL2, + if (DISPLAY_VER(display) == 4) { + intel_de_write(display, FBC_CONTROL2, i965_fbc_ctl2(fbc)); - intel_de_write(i915, FBC_FENCE_OFF, + intel_de_write(display, FBC_FENCE_OFF, fbc_state->fence_y_offset); } - intel_de_write(i915, FBC_CONTROL, + intel_de_write(display, FBC_CONTROL, FBC_CTL_EN | i8xx_fbc_ctl(fbc)); } static bool i8xx_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; + return intel_de_read(fbc->display, FBC_CONTROL) & FBC_CTL_EN; } static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, FBC_STATUS) & + return intel_de_read(fbc->display, FBC_STATUS) & (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); } @@ -327,7 +366,7 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; - struct drm_i915_private *dev_priv = fbc->i915; + struct drm_i915_private *dev_priv = to_i915(fbc->display->drm); intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane), intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane))); @@ -335,13 +374,14 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc) static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_fb), U32_MAX)); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_llb), U32_MAX)); @@ -364,7 +404,7 @@ static void i965_fbc_nuke(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; - struct drm_i915_private *dev_priv = fbc->i915; + struct drm_i915_private *dev_priv = to_i915(fbc->display->drm); intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane), intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane))); @@ -397,7 +437,8 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | @@ -409,7 +450,7 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) if (fbc_state->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; - if (DISPLAY_VER(i915) < 6) + if (DISPLAY_VER(display) < 6) dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); } @@ -419,43 +460,43 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) static void g4x_fbc_activate(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; - intel_de_write(i915, DPFC_FENCE_YOFF, + intel_de_write(display, DPFC_FENCE_YOFF, fbc_state->fence_y_offset); - intel_de_write(i915, DPFC_CONTROL, + intel_de_write(display, DPFC_CONTROL, DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } static void g4x_fbc_deactivate(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); + dpfc_ctl = intel_de_read(display, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); + intel_de_write(display, DPFC_CONTROL, dpfc_ctl); } } static bool g4x_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->display, DPFC_CONTROL) & DPFC_CTL_EN; } static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; + return intel_de_read(fbc->display, DPFC_STATUS) & DPFC_COMP_SEG_MASK; } static void g4x_fbc_program_cfb(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; - intel_de_write(i915, DPFC_CB_BASE, + intel_de_write(display, DPFC_CB_BASE, i915_gem_stolen_node_offset(&fbc->compressed_fb)); } @@ -471,43 +512,43 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = { static void ilk_fbc_activate(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; - intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id), + intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id), fbc_state->fence_y_offset); - intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), + intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } static void ilk_fbc_deactivate(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id)); + dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id)); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); + intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); } } static bool ilk_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; + return intel_de_read(fbc->display, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; } static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; + return intel_de_read(fbc->display, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; } static void ilk_fbc_program_cfb(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; - intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), + intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id), i915_gem_stolen_node_offset(&fbc->compressed_fb)); } @@ -523,14 +564,14 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = { static void snb_fbc_program_fence(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 ctl = 0; if (fbc_state->fence_id >= 0) ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); - intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); - intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); + intel_de_write(display, SNB_DPFC_CTL_SA, ctl); + intel_de_write(display, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); } static void snb_fbc_activate(struct intel_fbc *fbc) @@ -542,10 +583,10 @@ static void snb_fbc_activate(struct intel_fbc *fbc) static void snb_fbc_nuke(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; - intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); - intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id)); + intel_de_write(display, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); + intel_de_posting_read(display, MSG_FBC_REND_STATE(fbc->id)); } static const struct intel_fbc_funcs snb_fbc_funcs = { @@ -560,20 +601,20 @@ static const struct intel_fbc_funcs snb_fbc_funcs = { static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 val = 0; if (fbc_state->override_cfb_stride) val |= FBC_STRIDE_OVERRIDE | FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); - intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val); + intel_de_write(display, GLK_FBC_STRIDE(fbc->id), val); } static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 val = 0; /* Display WA #0529: skl, kbl, bxt. */ @@ -581,7 +622,7 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) val |= CHICKEN_FBC_STRIDE_OVERRIDE | CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); - intel_de_rmw(i915, CHICKEN_MISC_4, + intel_de_rmw(display, CHICKEN_MISC_4, CHICKEN_FBC_STRIDE_OVERRIDE | CHICKEN_FBC_STRIDE_MASK, val); } @@ -589,7 +630,8 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc); @@ -597,7 +639,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) if (IS_IVYBRIDGE(i915)) dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id); if (fbc_state->fence_id >= 0) @@ -611,35 +653,35 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) static void ivb_fbc_activate(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; u32 dpfc_ctl; - if (DISPLAY_VER(i915) >= 10) + if (DISPLAY_VER(display) >= 10) glk_fbc_program_cfb_stride(fbc); - else if (DISPLAY_VER(i915) == 9) + else if (DISPLAY_VER(display) == 9) skl_fbc_program_cfb_stride(fbc); - if (intel_gt_support_legacy_fencing(to_gt(i915))) + if (intel_fbc_has_fences(display)) snb_fbc_program_fence(fbc); /* wa_14019417088 Alternative WA*/ dpfc_ctl = ivb_dpfc_ctl(fbc); - if (DISPLAY_VER(i915) >= 20) - intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); + if (DISPLAY_VER(display) >= 20) + intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); - intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), + intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | dpfc_ctl); } static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; + return intel_de_read(fbc->display, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; } static void ivb_fbc_set_false_color(struct intel_fbc *fbc, bool enable) { - intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id), + intel_de_rmw(fbc->display, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); } @@ -684,10 +726,10 @@ static bool intel_fbc_is_compressing(struct intel_fbc *fbc) static void intel_fbc_nuke(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; lockdep_assert_held(&fbc->lock); - drm_WARN_ON(&i915->drm, fbc->flip_pending); + drm_WARN_ON(display->drm, fbc->flip_pending); trace_intel_fbc_nuke(fbc->state.plane); @@ -714,16 +756,19 @@ static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) fbc->no_fbc_reason = reason; } -static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) +static u64 intel_fbc_cfb_base_max(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) return BIT_ULL(28); else return BIT_ULL(32); } -static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) +static u64 intel_fbc_stolen_end(struct intel_display *display) { + struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm); u64 end; /* The FBC hardware for BDW/SKL doesn't have access to the stolen @@ -731,12 +776,12 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ if (IS_BROADWELL(i915) || - (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) + (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915))) end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024; else end = U64_MAX; - return min(end, intel_fbc_cfb_base_max(i915)); + return min(end, intel_fbc_cfb_base_max(display)); } static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) @@ -744,8 +789,10 @@ static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; } -static int intel_fbc_max_limit(struct drm_i915_private *i915) +static int intel_fbc_max_limit(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(i915)) return 1; @@ -760,8 +807,9 @@ static int intel_fbc_max_limit(struct drm_i915_private *i915) static int find_compression_limit(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct drm_i915_private *i915 = fbc->i915; - u64 end = intel_fbc_stolen_end(i915); + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); + u64 end = intel_fbc_stolen_end(display); int ret, limit = min_limit; size /= limit; @@ -772,7 +820,7 @@ static int find_compression_limit(struct intel_fbc *fbc, if (ret == 0) return limit; - for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { + for (; limit <= intel_fbc_max_limit(display); limit <<= 1) { ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) @@ -785,15 +833,16 @@ static int find_compression_limit(struct intel_fbc *fbc, static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); int ret; - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, i915_gem_stolen_node_allocated(&fbc->compressed_fb)); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, i915_gem_stolen_node_allocated(&fbc->compressed_llb)); - if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { + if (DISPLAY_VER(display) < 5 && !IS_G4X(i915)) { ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 4096, 4096); if (ret) @@ -804,12 +853,12 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, if (!ret) goto err_llb; else if (ret > min_limit) - drm_info_once(&i915->drm, + drm_info_once(display->drm, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); fbc->limit = ret; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit); return 0; @@ -819,7 +868,8 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); err: if (i915_gem_stolen_initialized(i915)) - drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + drm_info_once(display->drm, + "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } @@ -830,14 +880,15 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc) static void intel_fbc_program_workarounds(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) { /* * WaFbcHighMemBwCorruptionAvoidance:skl,bxt * Display WA #0883: skl,bxt */ - intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_DISABLE_DUMMY0); } @@ -847,24 +898,25 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc) * WaFbcNukeOnHostModify:skl,kbl,cfl * Display WA #0873: skl,kbl,cfl */ - intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_NUKE_ON_ANY_MODIFICATION); } /* Wa_1409120013:icl,jsl,tgl,dg1 */ - if (IS_DISPLAY_VER(i915, 11, 12)) - intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + if (IS_DISPLAY_VER(display, 11, 12)) + intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ - if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915)) - intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + if (DISPLAY_VER(display) >= 11 && !IS_DG2(i915)) + intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); } static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; @@ -875,12 +927,12 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); } -void intel_fbc_cleanup(struct drm_i915_private *i915) +void intel_fbc_cleanup(struct intel_display *display) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) { + for_each_intel_fbc(display, fbc, fbc_id) { mutex_lock(&fbc->lock); __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); @@ -932,15 +984,16 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) static bool stride_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) return icl_fbc_stride_is_valid(plane_state); - else if (DISPLAY_VER(i915) >= 9) + else if (DISPLAY_VER(display) >= 9) return skl_fbc_stride_is_valid(plane_state); - else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) return g4x_fbc_stride_is_valid(plane_state); - else if (DISPLAY_VER(i915) == 4) + else if (DISPLAY_VER(display) == 4) return i965_fbc_stride_is_valid(plane_state); else return i8xx_fbc_stride_is_valid(plane_state); @@ -948,7 +1001,7 @@ static bool stride_is_valid(const struct intel_plane_state *plane_state) static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; switch (fb->format->format) { @@ -958,7 +1011,7 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (DISPLAY_VER(i915) == 2) + if (DISPLAY_VER(display) == 2) return false; return true; default: @@ -968,7 +1021,8 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + struct drm_i915_private *i915 = to_i915(display->drm); const struct drm_framebuffer *fb = plane_state->hw.fb; switch (fb->format->format) { @@ -1003,11 +1057,12 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_ static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) return lnl_fbc_pixel_format_is_valid(plane_state); - else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) return g4x_fbc_pixel_format_is_valid(plane_state); else return i8xx_fbc_pixel_format_is_valid(plane_state); @@ -1037,43 +1092,52 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat static bool rotation_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) return skl_fbc_rotation_is_valid(plane_state); - else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) return g4x_fbc_rotation_is_valid(plane_state); else return i8xx_fbc_rotation_is_valid(plane_state); } +static void intel_fbc_max_surface_size(struct intel_display *display, + unsigned int *w, unsigned int *h) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 11) { + *w = 8192; + *h = 4096; + } else if (DISPLAY_VER(display) >= 10) { + *w = 5120; + *h = 4096; + } else if (DISPLAY_VER(display) >= 7) { + *w = 4096; + *h = 4096; + } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) { + *w = 4096; + *h = 2048; + } else { + *w = 2048; + *h = 1536; + } +} + /* * For some reason, the hardware tracking starts looking at whatever we * programmed as the display plane base address register. It does not look at * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ -static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) +static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; - if (DISPLAY_VER(i915) >= 11) { - max_w = 8192; - max_h = 4096; - } else if (DISPLAY_VER(i915) >= 10) { - max_w = 5120; - max_h = 4096; - } else if (DISPLAY_VER(i915) >= 7) { - max_w = 4096; - max_h = 4096; - } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { - max_w = 4096; - max_h = 2048; - } else { - max_w = 2048; - max_h = 1536; - } + intel_fbc_max_surface_size(display, &max_w, &max_h); effective_w = plane_state->view.color_plane[0].x + (drm_rect_width(&plane_state->uapi.src) >> 16); @@ -1083,24 +1147,32 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state * return effective_w <= max_w && effective_h <= max_h; } -static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) +static void intel_fbc_max_plane_size(struct intel_display *display, + unsigned int *w, unsigned int *h) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - unsigned int w, h, max_w, max_h; + struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(i915) >= 10) { - max_w = 5120; - max_h = 4096; - } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { - max_w = 4096; - max_h = 4096; - } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { - max_w = 4096; - max_h = 2048; + if (DISPLAY_VER(display) >= 10) { + *w = 5120; + *h = 4096; + } else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(i915)) { + *w = 4096; + *h = 4096; + } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) { + *w = 4096; + *h = 2048; } else { - max_w = 2048; - max_h = 1536; + *w = 2048; + *h = 1536; } +} + +static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) +{ + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); + unsigned int w, h, max_w, max_h; + + intel_fbc_max_plane_size(display, &max_w, &max_h); w = drm_rect_width(&plane_state->uapi.src) >> 16; h = drm_rect_height(&plane_state->uapi.src) >> 16; @@ -1122,9 +1194,9 @@ static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state) static bool tiling_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) return skl_fbc_tiling_valid(plane_state); else return i8xx_fbc_tiling_valid(plane_state); @@ -1134,7 +1206,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state = @@ -1152,8 +1224,8 @@ static void intel_fbc_update_state(struct intel_atomic_state *state, fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); - drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && - !intel_gt_support_legacy_fencing(to_gt(i915))); + drm_WARN_ON(display->drm, plane_state->flags & PLANE_HAS_FENCE && + !intel_fbc_has_fences(display)); if (plane_state->flags & PLANE_HAS_FENCE) fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma); @@ -1167,7 +1239,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state, static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); /* * The use of a CPU fence is one of two ways to detect writes by the @@ -1181,7 +1253,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) * so have no fence associated with it) due to aperture constraints * at the time of pinning. */ - return DISPLAY_VER(i915) >= 9 || + return DISPLAY_VER(display) >= 9 || (plane_state->flags & PLANE_HAS_FENCE && i915_vma_fence_id(plane_state->ggtt_vma) != -1); } @@ -1206,7 +1278,8 @@ static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) static int intel_fbc_check_plane(struct intel_atomic_state *state, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state->base.dev); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -1227,7 +1300,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (!i915->display.params.enable_fbc) { + if (!display->params.enable_fbc) { plane_state->no_fbc_reason = "disabled per module param or by default"; return 0; } @@ -1237,6 +1310,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } + if (intel_display_needs_wa_16023588340(i915)) { + plane_state->no_fbc_reason = "Wa_16023588340"; + return 0; + } + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { plane_state->no_fbc_reason = "VT-d enabled"; @@ -1260,15 +1338,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ - if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update && + if (IS_DISPLAY_VER(display, 12, 14) && crtc_state->has_sel_update && !crtc_state->has_panel_replay) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } /* Wa_14016291713 */ - if ((IS_DISPLAY_VER(i915, 12, 13) || - IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) && + if ((IS_DISPLAY_VER(display, 12, 13) || + IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) && crtc_state->has_psr && !crtc_state->has_panel_replay) { plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; return 0; @@ -1294,7 +1372,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (DISPLAY_VER(i915) < 20 && + if (DISPLAY_VER(display) < 20 && plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && fb->format->has_alpha) { plane_state->no_fbc_reason = "per-pixel alpha not supported"; @@ -1306,7 +1384,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + if (!intel_fbc_surface_size_ok(plane_state)) { plane_state->no_fbc_reason = "surface size too big"; return 0; } @@ -1316,14 +1394,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (DISPLAY_VER(i915) >= 9 && + if (DISPLAY_VER(display) >= 9 && plane_state->view.color_plane[0].y & 3) { plane_state->no_fbc_reason = "plane start Y offset misaligned"; return 0; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ - if (DISPLAY_VER(i915) >= 11 && + if (DISPLAY_VER(display) >= 11 && (plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; @@ -1399,7 +1477,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state->base.dev); struct intel_fbc *fbc = plane->fbc; bool need_vblank_wait = false; @@ -1425,7 +1503,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state, * and skipping the extra vblank wait before the plane update * if at least one frame has already passed. */ - if (fbc->activated && DISPLAY_VER(i915) >= 10) + if (fbc->activated && DISPLAY_VER(display) >= 10) need_vblank_wait = true; fbc->activated = false; @@ -1459,13 +1537,13 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state, static void __intel_fbc_disable(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; struct intel_plane *plane = fbc->state.plane; lockdep_assert_held(&fbc->lock); - drm_WARN_ON(&i915->drm, fbc->active); + drm_WARN_ON(display->drm, fbc->active); - drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", + drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); __intel_fbc_cleanup_cfb(fbc); @@ -1542,7 +1620,7 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) + for_each_intel_fbc(&i915->display, fbc, fbc_id) __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); } @@ -1581,7 +1659,7 @@ void intel_fbc_flush(struct drm_i915_private *i915, struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) + for_each_intel_fbc(&i915->display, fbc, fbc_id) __intel_fbc_flush(fbc, frontbuffer_bits, origin); } @@ -1606,7 +1684,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = plane->fbc; @@ -1625,7 +1703,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, __intel_fbc_disable(fbc); } - drm_WARN_ON(&i915->drm, fbc->active); + drm_WARN_ON(display->drm, fbc->active); fbc->no_fbc_reason = plane_state->no_fbc_reason; if (fbc->no_fbc_reason) @@ -1647,7 +1725,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, return; } - drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", + drm_dbg_kms(display->drm, "Enabling FBC on [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; @@ -1665,10 +1743,10 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc->base.dev); struct intel_plane *plane; - for_each_intel_plane(&i915->drm, plane) { + for_each_intel_plane(display->drm, plane) { struct intel_fbc *fbc = plane->fbc; if (!fbc || plane->pipe != crtc->pipe) @@ -1713,7 +1791,8 @@ void intel_fbc_update(struct intel_atomic_state *state, static void intel_fbc_underrun_work_fn(struct work_struct *work) { struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); mutex_lock(&fbc->lock); @@ -1721,7 +1800,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) if (fbc->underrun_detected || !fbc->state.plane) goto out; - drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); + drm_dbg_kms(display->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; intel_fbc_deactivate(fbc, "FIFO underrun"); @@ -1734,14 +1813,14 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; cancel_work_sync(&fbc->underrun_work); mutex_lock(&fbc->lock); if (fbc->underrun_detected) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Re-allowing FBC after fifo underrun\n"); fbc->no_fbc_reason = "FIFO underrun cleared"; } @@ -1752,22 +1831,24 @@ static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @i915: the i915 device + * @display: display * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ -void intel_fbc_reset_underrun(struct drm_i915_private *i915) +void intel_fbc_reset_underrun(struct intel_display *display) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) + for_each_intel_fbc(display, fbc, fbc_id) __intel_fbc_reset_underrun(fbc); } static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) { + struct drm_i915_private *i915 = to_i915(fbc->display->drm); + /* * There's no guarantee that underrun_detected won't be set to true * right after this check and before the work is scheduled, but that's @@ -1779,12 +1860,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) if (READ_ONCE(fbc->underrun_detected)) return; - queue_work(fbc->i915->unordered_wq, &fbc->underrun_work); + queue_work(i915->unordered_wq, &fbc->underrun_work); } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun - * @i915: i915 device + * @display: display * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can @@ -1796,12 +1877,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) * * This function is called from the IRQ handler. */ -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) +void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) + for_each_intel_fbc(display, fbc, fbc_id) __intel_fbc_handle_fifo_underrun_irq(fbc); } @@ -1814,15 +1895,17 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) * space to change the value during runtime without sanitizing it again. IGT * relies on being able to change i915.enable_fbc at runtime. */ -static int intel_sanitize_fbc_option(struct drm_i915_private *i915) +static int intel_sanitize_fbc_option(struct intel_display *display) { - if (i915->display.params.enable_fbc >= 0) - return !!i915->display.params.enable_fbc; + struct drm_i915_private *i915 = to_i915(display->drm); + + if (display->params.enable_fbc >= 0) + return !!display->params.enable_fbc; - if (!HAS_FBC(i915)) + if (!HAS_FBC(display)) return 0; - if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) + if (IS_BROADWELL(i915) || DISPLAY_VER(display) >= 9) return 1; return 0; @@ -1833,9 +1916,10 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) plane->fbc = fbc; } -static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, +static struct intel_fbc *intel_fbc_create(struct intel_display *display, enum intel_fbc_id fbc_id) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_fbc *fbc; fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); @@ -1843,19 +1927,19 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, return NULL; fbc->id = fbc_id; - fbc->i915 = i915; + fbc->display = display; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); - if (DISPLAY_VER(i915) >= 7) + if (DISPLAY_VER(display) >= 7) fbc->funcs = &ivb_fbc_funcs; - else if (DISPLAY_VER(i915) == 6) + else if (DISPLAY_VER(display) == 6) fbc->funcs = &snb_fbc_funcs; - else if (DISPLAY_VER(i915) == 5) + else if (DISPLAY_VER(display) == 5) fbc->funcs = &ilk_fbc_funcs; else if (IS_G4X(i915)) fbc->funcs = &g4x_fbc_funcs; - else if (DISPLAY_VER(i915) == 4) + else if (DISPLAY_VER(display) == 4) fbc->funcs = &i965_fbc_funcs; else fbc->funcs = &i8xx_fbc_funcs; @@ -1865,36 +1949,36 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, /** * intel_fbc_init - Initialize FBC - * @i915: the i915 device + * @display: display * * This function might be called during PM init process. */ -void intel_fbc_init(struct drm_i915_private *i915) +void intel_fbc_init(struct intel_display *display) { enum intel_fbc_id fbc_id; - i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915); - drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", - i915->display.params.enable_fbc); + display->params.enable_fbc = intel_sanitize_fbc_option(display); + drm_dbg_kms(display->drm, "Sanitized enable_fbc value: %d\n", + display->params.enable_fbc); - for_each_fbc_id(i915, fbc_id) - i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); + for_each_fbc_id(display, fbc_id) + display->fbc[fbc_id] = intel_fbc_create(display, fbc_id); } /** * intel_fbc_sanitize - Sanitize FBC - * @i915: the i915 device + * @display: display * * Make sure FBC is initially disabled since we have no * idea eg. into which parts of stolen it might be scribbling * into. */ -void intel_fbc_sanitize(struct drm_i915_private *i915) +void intel_fbc_sanitize(struct intel_display *display) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; - for_each_intel_fbc(i915, fbc, fbc_id) { + for_each_intel_fbc(display, fbc, fbc_id) { if (intel_fbc_hw_is_active(fbc)) intel_fbc_hw_deactivate(fbc); } @@ -1903,11 +1987,12 @@ void intel_fbc_sanitize(struct drm_i915_private *i915) static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_fbc *fbc = m->private; - struct drm_i915_private *i915 = fbc->i915; + struct intel_display *display = fbc->display; + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_plane *plane; intel_wakeref_t wakeref; - drm_modeset_lock_all(&i915->drm); + drm_modeset_lock_all(display->drm); wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_lock(&fbc->lock); @@ -1920,7 +2005,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); } - for_each_intel_plane(&i915->drm, plane) { + for_each_intel_plane(display->drm, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); @@ -1936,7 +2021,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) mutex_unlock(&fbc->lock); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - drm_modeset_unlock_all(&i915->drm); + drm_modeset_unlock_all(display->drm); return 0; } @@ -1993,12 +2078,12 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) } /* FIXME: remove this once igt is on board with per-crtc stuff */ -void intel_fbc_debugfs_register(struct drm_i915_private *i915) +void intel_fbc_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; struct intel_fbc *fbc; - fbc = i915->display.fbc[INTEL_FBC_A]; + fbc = display->fbc[INTEL_FBC_A]; if (fbc) intel_fbc_debugfs_add(fbc, minor->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 6720ec8ee8a2be..ceae55458e1449 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -13,6 +13,7 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_fbc; struct intel_plane; struct intel_plane_state; @@ -31,9 +32,9 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_fbc_init(struct drm_i915_private *dev_priv); -void intel_fbc_cleanup(struct drm_i915_private *dev_priv); -void intel_fbc_sanitize(struct drm_i915_private *dev_priv); +void intel_fbc_init(struct intel_display *display); +void intel_fbc_cleanup(struct intel_display *display); +void intel_fbc_sanitize(struct intel_display *display); void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_disable(struct intel_crtc *crtc); @@ -43,9 +44,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); -void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display); +void intel_fbc_reset_underrun(struct intel_display *display); void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc); -void intel_fbc_debugfs_register(struct drm_i915_private *i915); +void intel_fbc_debugfs_register(struct intel_display *display); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index d33befd7994d5d..222cd0e1a2bc85 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -5,6 +5,8 @@ #include +#include + #include "i915_reg.h" #include "intel_atomic.h" #include "intel_crtc.h" @@ -304,7 +306,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915, bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state) { int pipe_bpp = min(crtc_state->pipe_bpp, - to_bpp_int(crtc_state->max_link_bpp_x16)); + fxp_q4_to_int(crtc_state->max_link_bpp_x16)); pipe_bpp = rounddown(pipe_bpp, 2 * 3); @@ -340,7 +342,7 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc, pipe_config->fdi_lanes = lane; - intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp), + intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp), lane, fdi_dotclock, link_bw, intel_dp_bw_fec_overhead(false), diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index e5e4ca7cc499d0..8949fbb1cc60c9 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -440,7 +440,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } - intel_fbc_handle_fifo_underrun_irq(dev_priv); + intel_fbc_handle_fifo_underrun_irq(&dev_priv->display); } /** diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 4923c340a0b648..af4576dee92a66 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -83,6 +83,8 @@ static void frontbuffer_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { + struct intel_display *display = &i915->display; + /* Delay flushing when rings are still busy.*/ spin_lock(&i915->display.fb_tracking.lock); frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits; @@ -96,7 +98,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915, might_sleep(); intel_td_flush(i915); intel_drrs_flush(i915, frontbuffer_bits); - intel_psr_flush(i915, frontbuffer_bits, origin); + intel_psr_flush(display, frontbuffer_bits, origin); intel_fbc_flush(i915, frontbuffer_bits, origin); } @@ -172,6 +174,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, unsigned int frontbuffer_bits) { struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); + struct intel_display *display = &i915->display; if (origin == ORIGIN_CS) { spin_lock(&i915->display.fb_tracking.lock); @@ -183,7 +186,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin); might_sleep(); - intel_psr_invalidate(i915, frontbuffer_bits, origin); + intel_psr_invalidate(display, frontbuffer_bits, origin); intel_drrs_invalidate(i915, frontbuffer_bits); intel_fbc_invalidate(i915, frontbuffer_bits, origin); } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 9c8e1e91ff1c46..6470f75106bd4a 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -478,7 +478,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, /* * HW spec says that 512Bytes in Burst read need special treatment. * But it doesn't talk about other multiple of 256Bytes. And couldn't locate - * an I2C slave, which supports such a lengthy burst read too for experiments. + * an I2C target, which supports such a lengthy burst read too for experiments. * * So until things get clarified on HW support, to avoid the burst read length * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes. @@ -701,7 +701,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the - * BUS_ERROR raised by the slave's NAK. + * BUS_ERROR raised by the target's NAK. */ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); intel_de_write_fw(i915, GMBUS1(i915), 0); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 3ebe035f382ec9..6980b98792c212 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -42,11 +42,11 @@ intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder, return; if (DISPLAY_VER(dev_priv) >= 14) { - if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER)) + if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER)) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), 0, HDCP_LINE_REKEY_DISABLE); - else if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) || - IS_DISPLAY_IP_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER)) + else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) || + IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER)) intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder), 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE); @@ -203,11 +203,16 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, /* Is HDCP1.4 capable on Platform and Sink */ bool intel_hdcp_get_capability(struct intel_connector *connector) { - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port; const struct intel_hdcp_shim *shim = connector->hdcp.shim; bool capable = false; u8 bksv[5]; + if (!intel_attached_encoder(connector)) + return capable; + + dig_port = intel_attached_dig_port(connector); + if (!shim) return capable; @@ -2176,10 +2181,11 @@ static void intel_hdcp_check_work(struct work_struct *work) DRM_HDCP_CHECK_PERIOD_MS); } -static int i915_hdcp_component_bind(struct device *i915_kdev, +static int i915_hdcp_component_bind(struct device *drv_kdev, struct device *mei_kdev, void *data) { - struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); + struct intel_display *display = to_intel_display(drv_kdev); + struct drm_i915_private *i915 = to_i915(display->drm); drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); mutex_lock(&i915->display.hdcp.hdcp_mutex); @@ -2190,10 +2196,11 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, return 0; } -static void i915_hdcp_component_unbind(struct device *i915_kdev, +static void i915_hdcp_component_unbind(struct device *drv_kdev, struct device *mei_kdev, void *data) { - struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); + struct intel_display *display = to_intel_display(drv_kdev); + struct drm_i915_private *i915 = to_i915(display->drm); drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); mutex_lock(&i915->display.hdcp.hdcp_mutex); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c index 6548e71b4c4934..35bdb532bbb3e9 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c @@ -7,6 +7,7 @@ #include #include "i915_drv.h" +#include "intel_display_types.h" #include "intel_hdcp_gsc_message.h" int @@ -15,17 +16,19 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, { struct wired_cmd_initiate_hdcp2_session_in session_init_in = {}; struct wired_cmd_initiate_hdcp2_session_out session_init_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !ake_data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); session_init_in.header.api_version = HDCP_API_VERSION; session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; @@ -72,17 +75,19 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev, { struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {}; struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); verify_rxcert_in.header.api_version = HDCP_API_VERSION; verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; @@ -135,17 +140,19 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data, { struct wired_cmd_ake_send_hprime_in send_hprime_in = {}; struct wired_cmd_ake_send_hprime_out send_hprime_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_hprime) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); send_hprime_in.header.api_version = HDCP_API_VERSION; send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; @@ -183,17 +190,19 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat { struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {}; struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !pairing_info) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); pairing_info_in.header.api_version = HDCP_API_VERSION; pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; @@ -234,17 +243,19 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev, { struct wired_cmd_init_locality_check_in lc_init_in = {}; struct wired_cmd_init_locality_check_out lc_init_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !lc_init_data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); lc_init_in.header.api_version = HDCP_API_VERSION; lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; @@ -280,17 +291,19 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data, { struct wired_cmd_validate_locality_in verify_lprime_in = {}; struct wired_cmd_validate_locality_out verify_lprime_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_lprime) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); verify_lprime_in.header.api_version = HDCP_API_VERSION; verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; @@ -330,17 +343,19 @@ int intel_hdcp_gsc_get_session_key(struct device *dev, { struct wired_cmd_get_session_key_in get_skey_in = {}; struct wired_cmd_get_session_key_out get_skey_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !ske_data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); get_skey_in.header.api_version = HDCP_API_VERSION; get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; @@ -382,17 +397,19 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev, { struct wired_cmd_verify_repeater_in verify_repeater_in = {}; struct wired_cmd_verify_repeater_out verify_repeater_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !rep_topology || !rep_send_ack || !data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); verify_repeater_in.header.api_version = HDCP_API_VERSION; verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; @@ -442,6 +459,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev, { struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; size_t cmd_size; @@ -449,11 +467,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev, if (!dev || !stream_ready || !data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); cmd_size = struct_size(verify_mprime_in, streams, data->k); if (cmd_size == SIZE_MAX) @@ -504,17 +523,19 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev, { struct wired_cmd_enable_auth_in enable_auth_in = {}; struct wired_cmd_enable_auth_out enable_auth_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); enable_auth_in.header.api_version = HDCP_API_VERSION; enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; @@ -549,17 +570,19 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data) { struct wired_cmd_close_session_in session_close_in = {}; struct wired_cmd_close_session_out session_close_out = {}; + struct intel_display *display; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data) return -EINVAL; - i915 = kdev_to_i915(dev); - if (!i915) { + display = to_intel_display(dev); + if (!display) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } + i915 = to_i915(display->drm); session_close_in.header.api_version = HDCP_API_VERSION; session_close_in.header.command_id = WIRED_CLOSE_SESSION; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 19498ee455fa34..cd9ee171e0df3d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -60,30 +60,25 @@ #include "intel_panel.h" #include "intel_snps_phy.h" -inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi) -{ - return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev); -} - static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { - struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi); + struct intel_display *display = to_intel_display(intel_hdmi); u32 enabled_bits; - enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; + enabled_bits = HAS_DDI(display) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; - drm_WARN(&dev_priv->drm, - intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits, + drm_WARN(display->drm, + intel_de_read(display, intel_hdmi->hdmi_reg) & enabled_bits, "HDMI port enabled, expecting disabled\n"); } static void -assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, +assert_hdmi_transcoder_func_disabled(struct intel_display *display, enum transcoder cpu_transcoder) { - drm_WARN(&dev_priv->drm, - intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & + drm_WARN(display->drm, + intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE, "HDMI transcoder function enabled, expecting disabled\n"); } @@ -158,35 +153,35 @@ static u32 hsw_infoframe_enable(unsigned int type) } static i915_reg_t -hsw_dip_data_reg(struct drm_i915_private *dev_priv, +hsw_dip_data_reg(struct intel_display *display, enum transcoder cpu_transcoder, unsigned int type, int i) { switch (type) { case HDMI_PACKET_TYPE_GAMUT_METADATA: - return HSW_TVIDEO_DIP_GMP_DATA(dev_priv, cpu_transcoder, i); + return HSW_TVIDEO_DIP_GMP_DATA(display, cpu_transcoder, i); case DP_SDP_VSC: - return HSW_TVIDEO_DIP_VSC_DATA(dev_priv, cpu_transcoder, i); + return HSW_TVIDEO_DIP_VSC_DATA(display, cpu_transcoder, i); case DP_SDP_ADAPTIVE_SYNC: - return ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, cpu_transcoder, i); + return ADL_TVIDEO_DIP_AS_SDP_DATA(display, cpu_transcoder, i); case DP_SDP_PPS: - return ICL_VIDEO_DIP_PPS_DATA(dev_priv, cpu_transcoder, i); + return ICL_VIDEO_DIP_PPS_DATA(display, cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_AVI: - return HSW_TVIDEO_DIP_AVI_DATA(dev_priv, cpu_transcoder, i); + return HSW_TVIDEO_DIP_AVI_DATA(display, cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_SPD: - return HSW_TVIDEO_DIP_SPD_DATA(dev_priv, cpu_transcoder, i); + return HSW_TVIDEO_DIP_SPD_DATA(display, cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_VENDOR: - return HSW_TVIDEO_DIP_VS_DATA(dev_priv, cpu_transcoder, i); + return HSW_TVIDEO_DIP_VS_DATA(display, cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_DRM: - return GLK_TVIDEO_DIP_DRM_DATA(dev_priv, cpu_transcoder, i); + return GLK_TVIDEO_DIP_DRM_DATA(display, cpu_transcoder, i); default: MISSING_CASE(type); return INVALID_MMIO_REG; } } -static int hsw_dip_data_size(struct drm_i915_private *dev_priv, +static int hsw_dip_data_size(struct intel_display *display, unsigned int type) { switch (type) { @@ -197,7 +192,7 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv, case DP_SDP_PPS: return VIDEO_DIP_PPS_DATA_SIZE; case HDMI_PACKET_TYPE_GAMUT_METADATA: - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) return VIDEO_DIP_GMP_DATA_SIZE; else return VIDEO_DIP_DATA_SIZE; @@ -211,12 +206,12 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { + struct intel_display *display = to_intel_display(encoder); const u32 *data = frame; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); + u32 val = intel_de_read(display, VIDEO_DIP_CTL); int i; - drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -224,22 +219,22 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, val &= ~g4x_infoframe_enable(type); - intel_de_write(dev_priv, VIDEO_DIP_CTL, val); + intel_de_write(display, VIDEO_DIP_CTL, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, VIDEO_DIP_DATA, *data); + intel_de_write(display, VIDEO_DIP_DATA, *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, VIDEO_DIP_DATA, 0); + intel_de_write(display, VIDEO_DIP_DATA, 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - intel_de_write(dev_priv, VIDEO_DIP_CTL, val); - intel_de_posting_read(dev_priv, VIDEO_DIP_CTL); + intel_de_write(display, VIDEO_DIP_CTL, val); + intel_de_posting_read(display, VIDEO_DIP_CTL); } static void g4x_read_infoframe(struct intel_encoder *encoder, @@ -247,22 +242,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder, unsigned int type, void *frame, ssize_t len) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 *data = frame; int i; - intel_de_rmw(dev_priv, VIDEO_DIP_CTL, + intel_de_rmw(display, VIDEO_DIP_CTL, VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) - *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); + *data++ = intel_de_read(display, VIDEO_DIP_DATA); } static u32 g4x_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); + struct intel_display *display = to_intel_display(encoder); + u32 val = intel_de_read(display, VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -279,14 +274,14 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { + struct intel_display *display = to_intel_display(encoder); const u32 *data = frame; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); int i; - drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -294,23 +289,23 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, val &= ~g4x_infoframe_enable(type); - intel_de_write(dev_priv, reg, val); + intel_de_write(display, reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), + intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); + intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); } static void ibx_read_infoframe(struct intel_encoder *encoder, @@ -318,25 +313,25 @@ static void ibx_read_infoframe(struct intel_encoder *encoder, unsigned int type, void *frame, ssize_t len) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; - intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) - *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); + *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; i915_reg_t reg = TVIDEO_DIP_CTL(pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -354,14 +349,14 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { + struct intel_display *display = to_intel_display(encoder); const u32 *data = frame; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); int i; - drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -372,23 +367,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, if (type != HDMI_INFOFRAME_TYPE_AVI) val &= ~g4x_infoframe_enable(type); - intel_de_write(dev_priv, reg, val); + intel_de_write(display, reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), + intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); + intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); } static void cpt_read_infoframe(struct intel_encoder *encoder, @@ -396,24 +391,24 @@ static void cpt_read_infoframe(struct intel_encoder *encoder, unsigned int type, void *frame, ssize_t len) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; - intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) - *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); + *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; - u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe)); + u32 val = intel_de_read(display, TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -428,14 +423,14 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { + struct intel_display *display = to_intel_display(encoder); const u32 *data = frame; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); int i; - drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -443,24 +438,24 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, val &= ~g4x_infoframe_enable(type); - intel_de_write(dev_priv, reg, val); + intel_de_write(display, reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, + intel_de_write(display, VLV_TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, + intel_de_write(display, VLV_TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); } static void vlv_read_infoframe(struct intel_encoder *encoder, @@ -468,25 +463,25 @@ static void vlv_read_infoframe(struct intel_encoder *encoder, unsigned int type, void *frame, ssize_t len) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; - intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), + intel_de_rmw(display, VLV_TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) - *data++ = intel_de_read(dev_priv, + *data++ = intel_de_read(display, VLV_TVIDEO_DIP_DATA(crtc->pipe)); } static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; - u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe)); + u32 val = intel_de_read(display, VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -504,75 +499,75 @@ void hsw_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { + struct intel_display *display = to_intel_display(encoder); const u32 *data = frame; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(dev_priv, cpu_transcoder); + i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(display, cpu_transcoder); int data_size; int i; - u32 val = intel_de_read(dev_priv, ctl_reg); + u32 val = intel_de_read(display, ctl_reg); - data_size = hsw_dip_data_size(dev_priv, type); + data_size = hsw_dip_data_size(display, type); - drm_WARN_ON(&dev_priv->drm, len > data_size); + drm_WARN_ON(display->drm, len > data_size); val &= ~hsw_infoframe_enable(type); - intel_de_write(dev_priv, ctl_reg, val); + intel_de_write(display, ctl_reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, - hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), + intel_de_write(display, + hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < data_size; i += 4) - intel_de_write(dev_priv, - hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), + intel_de_write(display, + hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2), 0); /* Wa_14013475917 */ - if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && + if (!(IS_DISPLAY_VER(display, 13, 14) && crtc_state->has_psr && !crtc_state->has_panel_replay && type == DP_SDP_VSC)) val |= hsw_infoframe_enable(type); if (type == DP_SDP_VSC) val |= VSC_DIP_HW_DATA_SW_HEA; - intel_de_write(dev_priv, ctl_reg, val); - intel_de_posting_read(dev_priv, ctl_reg); + intel_de_write(display, ctl_reg, val); + intel_de_posting_read(display, ctl_reg); } void hsw_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 *data = frame; int i; for (i = 0; i < len; i += 4) - *data++ = intel_de_read(dev_priv, - hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2)); + *data++ = intel_de_read(display, + hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2)); } static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = intel_de_read(dev_priv, - HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder)); + struct intel_display *display = to_intel_display(encoder); + u32 val = intel_de_read(display, + HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder)); u32 mask; mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) mask |= VIDEO_DIP_ENABLE_DRM_GLK; - if (HAS_AS_SDP(dev_priv)) + if (HAS_AS_SDP(display)) mask |= VIDEO_DIP_ENABLE_AS_ADL; return val & mask; @@ -604,7 +599,7 @@ u32 intel_hdmi_infoframe_enable(unsigned int type) u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val, ret = 0; int i; @@ -615,7 +610,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { unsigned int type = infoframe_type_to_idx[i]; - if (HAS_DDI(dev_priv)) { + if (HAS_DDI(display)) { if (val & hsw_infoframe_enable(type)) ret |= BIT(i); } else { @@ -830,11 +825,11 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int ret; - if (DISPLAY_VER(dev_priv) < 10) + if (DISPLAY_VER(display) < 10) return true; if (!crtc_state->has_infoframe) @@ -848,13 +843,13 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state); if (ret < 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "couldn't set HDR metadata in infoframe\n"); return false; } ret = hdmi_drm_infoframe_check(frame); - if (drm_WARN_ON(&dev_priv->drm, ret)) + if (drm_WARN_ON(display->drm, ret)) return false; return true; @@ -865,11 +860,11 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -889,21 +884,21 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, if (!(val & VIDEO_DIP_ENABLE)) return; if (port != (val & VIDEO_DIP_PORT_MASK)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "video DIP still enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); return; } val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { if (val & VIDEO_DIP_ENABLE) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "video DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); return; @@ -916,8 +911,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -977,6 +972,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; @@ -985,8 +981,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) return false; - if (HAS_DDI(dev_priv)) - reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder); + if (HAS_DDI(display)) + reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv)) @@ -994,7 +990,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, else return false; - intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp); + intel_de_write(display, reg, crtc_state->infoframes.gcp); return true; } @@ -1002,6 +998,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; @@ -1010,8 +1007,8 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) return; - if (HAS_DDI(dev_priv)) - reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder); + if (HAS_DDI(display)) + reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv)) @@ -1019,7 +1016,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, else return; - crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg); + crtc_state->infoframes.gcp = intel_de_read(display, reg); } static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, @@ -1049,12 +1046,12 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -1068,13 +1065,13 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { - drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, + drm_WARN(display->drm, val & VIDEO_DIP_ENABLE, "DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; @@ -1089,8 +1086,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1108,11 +1105,11 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); assert_hdmi_port_disabled(intel_hdmi); @@ -1125,8 +1122,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); return; } @@ -1138,8 +1135,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1157,11 +1154,11 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -1175,13 +1172,13 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { - drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, + drm_WARN(display->drm, val & VIDEO_DIP_ENABLE, "DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; @@ -1196,8 +1193,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1215,12 +1212,12 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv, + struct intel_display *display = to_intel_display(encoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, crtc_state->cpu_transcoder); - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); - assert_hdmi_transcoder_func_disabled(dev_priv, + assert_hdmi_transcoder_func_disabled(display, crtc_state->cpu_transcoder); val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | @@ -1229,16 +1226,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL); if (!enable) { - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); return; } if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP_HSW; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1256,16 +1253,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) { - struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + struct intel_display *display = to_intel_display(hdmi); struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc; if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; - drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", + drm_dbg_kms(display->drm, "%s DP dual mode adaptor TMDS output\n", enable ? "Enabling" : "Disabling"); - drm_dp_dual_mode_set_tmds_output(&dev_priv->drm, + drm_dp_dual_mode_set_tmds_output(display->drm, hdmi->dp_dual_mode.type, ddc, enable); } @@ -1331,7 +1328,7 @@ static int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc; int ret; @@ -1339,14 +1336,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port, ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an, DRM_HDCP_AN_LEN); if (ret) { - drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n", + drm_dbg_kms(display->drm, "Write An over DDC failed (%d)\n", ret); return ret; } ret = intel_gmbus_output_aksv(ddc); if (ret < 0) { - drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret); + drm_dbg_kms(display->drm, "Failed to output aksv (%d)\n", ret); return ret; } return 0; @@ -1355,13 +1352,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port, static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret) - drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n", + drm_dbg_kms(display->drm, "Read Bksv over DDC failed (%d)\n", ret); return ret; } @@ -1370,13 +1367,14 @@ static int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret) - drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n", + drm_dbg_kms(display->drm, + "Read bstatus over DDC failed (%d)\n", ret); return ret; } @@ -1385,13 +1383,13 @@ static int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; u8 val; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", + drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n", ret); return ret; } @@ -1403,13 +1401,13 @@ static int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret) - drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n", + drm_dbg_kms(display->drm, "Read Ri' over DDC failed (%d)\n", ret); return ret; } @@ -1418,13 +1416,13 @@ static int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; u8 val; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", + drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n", ret); return ret; } @@ -1436,12 +1434,12 @@ static int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO, ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read ksv fifo over DDC failed (%d)\n", ret); return ret; } @@ -1452,7 +1450,7 @@ static int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) @@ -1461,7 +1459,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret) - drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n", + drm_dbg_kms(display->drm, + "Read V'[%d] over DDC failed (%d)\n", i, ret); return ret; } @@ -1469,15 +1468,15 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, enum transcoder cpu_transcoder) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc); u32 scanline; int ret; for (;;) { - scanline = intel_de_read(dev_priv, - PIPEDSL(dev_priv, crtc->pipe)); + scanline = intel_de_read(display, + PIPEDSL(display, crtc->pipe)); if (scanline > 100 && scanline < 200) break; usleep_range(25, 50); @@ -1486,7 +1485,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder, false, TRANS_DDI_HDCP_SIGNALLING); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Disable HDCP signalling failed (%d)\n", ret); return ret; } @@ -1494,7 +1493,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder, true, TRANS_DDI_HDCP_SIGNALLING); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Enable HDCP signalling failed (%d)\n", ret); return ret; } @@ -1507,6 +1506,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, enum transcoder cpu_transcoder, bool enable) { + struct intel_display *display = to_intel_display(dig_port); struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_connector *connector = hdmi->attached_connector; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1519,7 +1519,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, cpu_transcoder, enable, TRANS_DDI_HDCP_SIGNALLING); if (ret) { - drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", + drm_err(display->drm, "%s HDCP signalling failed (%d)\n", enable ? "Enable" : "Disable", ret); return ret; } @@ -1539,6 +1539,7 @@ static bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, struct intel_connector *connector) { + struct intel_display *display = to_intel_display(dig_port); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; @@ -1558,9 +1559,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { - drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n", - intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, - port))); + drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n", + intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, + port))); return false; } return true; @@ -1570,14 +1571,14 @@ static bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int retry; for (retry = 0; retry < 3; retry++) if (intel_hdmi_hdcp_check_link_once(dig_port, connector)) return true; - drm_err(&i915->drm, "Link check failed\n"); + drm_err(display->drm, "Link check failed\n"); return false; } @@ -1628,13 +1629,13 @@ hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, u8 msg_id, bool *msg_ready, ssize_t *msg_sz) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status); if (ret < 0) { - drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n", + drm_dbg_kms(display->drm, "rx_status read failed. Err %d\n", ret); return ret; } @@ -1655,7 +1656,7 @@ static ssize_t intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, u8 msg_id, bool paired) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); bool msg_ready = false; int timeout, ret; ssize_t msg_sz = 0; @@ -1670,7 +1671,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, !ret && msg_ready && msg_sz, timeout * 1000, 1000, 5 * 1000); if (ret) - drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n", + drm_dbg_kms(display->drm, + "msg_id: %d, ret: %d, timeout: %d\n", msg_id, ret, timeout); return ret ? ret : msg_sz; @@ -1691,8 +1693,8 @@ static int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector, u8 msg_id, void *buf, size_t size) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; unsigned int offset; @@ -1708,7 +1710,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector, * available buffer. */ if (ret > size) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "msg_sz(%zd) is more than exp size(%zu)\n", ret, size); return -EINVAL; @@ -1717,7 +1719,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector, offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret); if (ret) - drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n", + drm_dbg_kms(display->drm, "Failed to read msg_id: %d(%zd)\n", msg_id, ret); return ret; @@ -1783,16 +1785,17 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int max_tmds_clock, vbt_max_tmds_clock; - if (DISPLAY_VER(dev_priv) >= 13 || IS_ALDERLAKE_S(dev_priv)) + if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv)) max_tmds_clock = 600000; - else if (DISPLAY_VER(dev_priv) >= 10) + else if (DISPLAY_VER(display) >= 10) max_tmds_clock = 594000; - else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) + else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) max_tmds_clock = 300000; - else if (DISPLAY_VER(dev_priv) >= 5) + else if (DISPLAY_VER(display) >= 5) max_tmds_clock = 225000; else max_tmds_clock = 165000; @@ -1848,7 +1851,8 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, int clock, bool respect_downstream_limits, bool has_hdmi_sink) { - struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + struct intel_display *display = to_intel_display(hdmi); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; if (clock < 25000) @@ -1885,7 +1889,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, * FIXME: We will hopefully get an algorithmic way of programming * the MPLLB for HDMI in the future. */ - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock); else if (IS_DG2(dev_priv)) return intel_snps_phy_check_hdmi_link_rate(clock); @@ -1908,13 +1912,13 @@ int intel_hdmi_tmds_clock(int clock, int bpc, return DIV_ROUND_CLOSEST(clock * bpc, 8); } -static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) +static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bpc) { switch (bpc) { case 12: - return !HAS_GMCH(i915); + return !HAS_GMCH(display); case 10: - return DISPLAY_VER(i915) >= 11; + return DISPLAY_VER(display) >= 11; case 8: return true; default: @@ -1960,7 +1964,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, bool has_hdmi_sink, enum intel_output_format sink_format) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum drm_mode_status status = MODE_OK; int bpc; @@ -1973,7 +1977,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, for (bpc = 12; bpc >= 8; bpc -= 2) { int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); - if (!intel_hdmi_source_bpc_possible(i915, bpc)) + if (!intel_hdmi_source_bpc_possible(display, bpc)) continue; if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format)) @@ -1985,7 +1989,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, } /* can never happen */ - drm_WARN_ON(&i915->drm, status == MODE_OK); + drm_WARN_ON(display->drm, status == MODE_OK); return status; } @@ -1994,8 +1998,9 @@ static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct intel_display *display = to_intel_display(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); - struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum drm_mode_status status; int clock = mode->clock; int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; @@ -2073,17 +2078,16 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) + if (!intel_hdmi_source_bpc_possible(display, bpc)) return false; /* Display Wa_1405510057:icl,ehl */ if (intel_hdmi_is_ycbcr420(crtc_state) && - bpc == 10 && DISPLAY_VER(dev_priv) == 11 && + bpc == 10 && DISPLAY_VER(display) == 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) return false; @@ -2130,7 +2134,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, bool respect_downstream_limits) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; @@ -2153,7 +2157,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, */ crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "picking %d bpc for HDMI output (pipe bpp: %d)\n", bpc, crtc_state->pipe_bpp); @@ -2230,10 +2234,10 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, const struct drm_connector_state *conn_state, bool respect_downstream_limits) { + struct intel_display *display = to_intel_display(encoder); struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct drm_display_info *info = &connector->base.display_info; - struct drm_i915_private *i915 = to_i915(connector->base.dev); bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; @@ -2241,7 +2245,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only); if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; } @@ -2302,7 +2306,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; @@ -2335,7 +2339,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (ret) ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "unsupported HDMI clock (%d kHz), rejecting mode\n", pipe_config->hw.adjusted_mode.crtc_clock); return ret; @@ -2370,22 +2374,22 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, conn_state); if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) { - drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n"); + drm_dbg_kms(display->drm, "bad AVI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) { - drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n"); + drm_dbg_kms(display->drm, "bad SPD infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) { - drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n"); + drm_dbg_kms(display->drm, "bad HDMI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) { - drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n"); + drm_dbg_kms(display->drm, "bad DRM infoframe\n"); return -EINVAL; } @@ -2418,13 +2422,14 @@ intel_hdmi_unset_edid(struct drm_connector *connector) static void intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; struct i2c_adapter *ddc = connector->ddc; enum drm_dp_dual_mode_type type; - type = drm_dp_dual_mode_detect(&dev_priv->drm, ddc); + type = drm_dp_dual_mode_detect(display->drm, ddc); /* * Type 1 DVI adaptors are not required to implement any @@ -2438,7 +2443,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) if (type == DRM_DP_DUAL_MODE_UNKNOWN) { if (!connector->force && intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Assuming DP dual mode adaptor presence based on VBT\n"); type = DRM_DP_DUAL_MODE_TYPE1_DVI; } else { @@ -2451,17 +2456,17 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) hdmi->dp_dual_mode.type = type; hdmi->dp_dual_mode.max_tmds_clock = - drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, ddc); + drm_dp_dual_mode_max_tmds_clock(display->drm, type, ddc); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", drm_dp_get_dual_mode_type_name(type), hdmi->dp_dual_mode.max_tmds_clock); /* Older VBTs are often buggy and can't be trusted :( Play it safe. */ - if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && + if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) && !intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n"); hdmi->dp_dual_mode.max_tmds_clock = 0; } @@ -2470,6 +2475,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) static bool intel_hdmi_set_edid(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct i2c_adapter *ddc = connector->ddc; @@ -2482,7 +2488,7 @@ intel_hdmi_set_edid(struct drm_connector *connector) drm_edid = drm_edid_read_ddc(connector, ddc); if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(ddc, true); drm_edid = drm_edid_read_ddc(connector, ddc); @@ -2511,13 +2517,14 @@ intel_hdmi_set_edid(struct drm_connector *connector) static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { + struct intel_display *display = to_intel_display(connector->dev); enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; intel_wakeref_t wakeref; - drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (!intel_display_device_enabled(dev_priv)) @@ -2528,7 +2535,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - if (DISPLAY_VER(dev_priv) >= 11 && + if (DISPLAY_VER(display) >= 11 && !intel_digital_port_connected(encoder)) goto out; @@ -2549,9 +2556,10 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) static void intel_hdmi_force(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *i915 = to_i915(connector->dev); - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (!intel_display_driver_check_access(i915)) @@ -2608,9 +2616,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static int intel_hdmi_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->dev); + struct intel_display *display = to_intel_display(connector->dev); - if (HAS_DDI(i915)) + if (HAS_DDI(display)) return intel_digital_connector_atomic_check(connector, state); else return g4x_hdmi_connector_atomic_check(connector, state); @@ -2625,7 +2633,7 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_display *display = to_intel_display(intel_hdmi); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); @@ -2634,10 +2642,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_hdmi_colorspace_property(connector); drm_connector_attach_content_type_property(connector); - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) drm_connector_attach_hdr_output_metadata_property(connector); - if (!HAS_GMCH(dev_priv)) + if (!HAS_GMCH(display)) drm_connector_attach_max_bpc_property(connector, 8, 12); } @@ -2664,14 +2672,14 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool high_tmds_clock_ratio, bool scrambling) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_scrambling *sink_scrambling = &connector->display_info.hdmi.scdc.scrambling; if (!sink_scrambling->supported) return true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n", connector->base.id, connector->name, str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10); @@ -2752,7 +2760,7 @@ static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder) static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; if (intel_encoder_is_combo(encoder)) @@ -2760,7 +2768,7 @@ static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder) else if (intel_encoder_is_tc(encoder)) return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder); - drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port)); + drm_WARN(display->drm, 1, "Unknown port:%c\n", port_name(port)); return GMBUS_PIN_2_BXT; } @@ -2808,10 +2816,11 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder) static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_encoder_to_phy(encoder); - drm_WARN_ON(&i915->drm, encoder->port == PORT_A); + drm_WARN_ON(display->drm, encoder->port == PORT_A); /* * Pin mapping for GEN9 BC depends on which PCH is present. With TGP, @@ -2871,6 +2880,7 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder) static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u8 ddc_pin; @@ -2880,7 +2890,7 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) ddc_pin = dg1_encoder_to_ddc_pin(encoder); else if (IS_ROCKETLAKE(dev_priv)) ddc_pin = rkl_encoder_to_ddc_pin(encoder); - else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv)) + else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv)) ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder); else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && HAS_PCH_TGP(dev_priv)) @@ -2902,10 +2912,11 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) static struct intel_encoder * get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; - for_each_intel_encoder(&i915->drm, other) { + for_each_intel_encoder(display->drm, other) { struct intel_connector *connector; if (other == encoder) @@ -2925,6 +2936,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; const char *source; @@ -2939,20 +2951,22 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) } if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Invalid DDC pin %d\n", encoder->base.base.id, encoder->base.name, ddc_pin); return 0; } other = get_encoder_by_ddc_pin(encoder, ddc_pin); if (other) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name, ddc_pin, other->base.base.id, other->base.name); return 0; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n", encoder->base.base.id, encoder->base.name, ddc_pin, source); @@ -2962,6 +2976,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) void intel_infoframe_init(struct intel_digital_port *dig_port) { + struct intel_display *display = to_intel_display(dig_port); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); @@ -2975,7 +2990,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port) dig_port->read_infoframe = g4x_read_infoframe; dig_port->set_infoframes = g4x_set_infoframes; dig_port->infoframes_enabled = g4x_infoframes_enabled; - } else if (HAS_DDI(dev_priv)) { + } else if (HAS_DDI(display)) { if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) { dig_port->write_infoframe = lspcon_write_infoframe; dig_port->read_infoframe = lspcon_read_infoframe; @@ -3003,6 +3018,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port) void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { + struct intel_display *display = to_intel_display(dig_port); struct drm_connector *connector = &intel_connector->base; struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct intel_encoder *intel_encoder = &dig_port->base; @@ -3012,11 +3028,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct cec_connector_info conn_info; u8 ddc_pin; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Adding HDMI connector on [ENCODER:%d:%s]\n", intel_encoder->base.base.id, intel_encoder->base.name); - if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A)) + if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A)) return; if (drm_WARN(dev, dig_port->max_lanes < 4, @@ -3036,18 +3052,18 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(display) < 12) connector->interlace_allowed = true; connector->stereo_allowed = true; - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) connector->ycbcr_420_allowed = true; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_connector->base.polled = intel_connector->polled; - if (HAS_DDI(dev_priv)) + if (HAS_DDI(display)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; @@ -3061,7 +3077,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, int ret = intel_hdcp_init(intel_connector, dig_port, &intel_hdmi_hdcp_shim); if (ret) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "HDCP init failed, skipping.\n"); } @@ -3071,7 +3087,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, cec_notifier_conn_register(dev->dev, port_identifier(port), &conn_info); if (!intel_hdmi->cec_notifier) - drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n"); + drm_dbg_kms(display->drm, "CEC notifier get failed\n"); } /* diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 6b39df38d57ab6..9b97623665c51b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -58,6 +58,5 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, int src_max_slices, int src_max_slice_width, int hdmi_max_slices, int hdmi_throughput); int intel_hdmi_dsc_get_slice_height(int vactive); -struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index a1f07ee69a8639..2c4e946d557542 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -456,6 +456,7 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) { + struct intel_display *display = &dev_priv->display; u32 pin_mask = 0, long_mask = 0; u32 hotplug_trigger; @@ -477,7 +478,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - intel_dp_aux_irq_handler(dev_priv); + intel_dp_aux_irq_handler(display); } void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) @@ -513,6 +514,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) { + struct intel_display *display = &i915->display; enum hpd_pin pin; u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK); u32 trigger_aux = iir & XELPDP_AUX_TC_MASK; @@ -545,7 +547,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) } if (trigger_aux) - intel_dp_aux_irq_handler(i915); + intel_dp_aux_irq_handler(display); if (!pin_mask && !trigger_aux) drm_err(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c index a92d008d4e6e59..19d1f196d9fb05 100644 --- a/drivers/gpu/drm/i915/display/intel_hti.c +++ b/drivers/gpu/drm/i915/display/intel_hti.c @@ -9,33 +9,33 @@ #include "intel_hti.h" #include "intel_hti_regs.h" -void intel_hti_init(struct drm_i915_private *i915) +void intel_hti_init(struct intel_display *display) { /* * If the platform has HTI, we need to find out whether it has reserved * any display resources before we create our display outputs. */ - if (DISPLAY_INFO(i915)->has_hti) - i915->display.hti.state = intel_de_read(i915, HDPORT_STATE); + if (DISPLAY_INFO(display)->has_hti) + display->hti.state = intel_de_read(display, HDPORT_STATE); } -bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy) +bool intel_hti_uses_phy(struct intel_display *display, enum phy phy) { - if (drm_WARN_ON(&i915->drm, phy == PHY_NONE)) + if (drm_WARN_ON(display->drm, phy == PHY_NONE)) return false; - return i915->display.hti.state & HDPORT_ENABLED && - i915->display.hti.state & HDPORT_DDI_USED(phy); + return display->hti.state & HDPORT_ENABLED && + display->hti.state & HDPORT_DDI_USED(phy); } -u32 intel_hti_dpll_mask(struct drm_i915_private *i915) +u32 intel_hti_dpll_mask(struct intel_display *display) { - if (!(i915->display.hti.state & HDPORT_ENABLED)) + if (!(display->hti.state & HDPORT_ENABLED)) return 0; /* * Note: This is subtle. The values must coincide with what's defined * for the platform. */ - return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->display.hti.state); + return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, display->hti.state); } diff --git a/drivers/gpu/drm/i915/display/intel_hti.h b/drivers/gpu/drm/i915/display/intel_hti.h index 2893d666865718..b692571c5558d5 100644 --- a/drivers/gpu/drm/i915/display/intel_hti.h +++ b/drivers/gpu/drm/i915/display/intel_hti.h @@ -8,11 +8,11 @@ #include -struct drm_i915_private; +struct intel_display; enum phy; -void intel_hti_init(struct drm_i915_private *i915); -bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy); -u32 intel_hti_dpll_mask(struct drm_i915_private *i915); +void intel_hti_init(struct intel_display *display); +bool intel_hti_uses_phy(struct intel_display *display, enum phy phy); +u32 intel_hti_dpll_mask(struct intel_display *display); #endif /* __INTEL_HTI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index dfd7d5e23f3fa4..e7a9b860fac6e2 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include + #include "i915_drv.h" #include "intel_atomic.h" @@ -23,12 +25,13 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, struct intel_link_bw_limits *limits) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe; limits->force_fec_pipes = 0; limits->bpp_limit_reached_pipes = 0; - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, intel_crtc_for_pipe(i915, pipe)); @@ -67,12 +70,12 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, u8 pipe_mask, const char *reason) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); enum pipe max_bpp_pipe = INVALID_PIPE; struct intel_crtc *crtc; int max_bpp_x16 = 0; - for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { struct intel_crtc_state *crtc_state; int link_bpp_x16; @@ -93,7 +96,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, * is based on the pipe bpp value, set the actual link bpp * limit here once the MST BW allocation is fixed. */ - link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp); + link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp); if (link_bpp_x16 > max_bpp_x16) { max_bpp_x16 = link_bpp_x16; @@ -134,7 +137,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state, struct intel_link_bw_limits *new_limits, enum pipe pipe) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); if (pipe == INVALID_PIPE) return false; @@ -143,7 +146,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state, old_limits->max_bpp_x16[pipe]) return false; - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, new_limits->bpp_limit_reached_pipes & BIT(pipe))) return false; @@ -176,7 +179,7 @@ static int check_all_link_config(struct intel_atomic_state *state, } static bool -assert_link_limit_change_valid(struct drm_i915_private *i915, +assert_link_limit_change_valid(struct intel_display *display, const struct intel_link_bw_limits *old_limits, const struct intel_link_bw_limits *new_limits) { @@ -184,14 +187,14 @@ assert_link_limit_change_valid(struct drm_i915_private *i915, enum pipe pipe; /* FEC can't be forced off after it was forced on. */ - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, (old_limits->force_fec_pipes & new_limits->force_fec_pipes) != old_limits->force_fec_pipes)) return false; - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { /* The bpp limit can only decrease. */ - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, new_limits->max_bpp_x16[pipe] > old_limits->max_bpp_x16[pipe])) return false; @@ -202,7 +205,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915, } /* At least one limit must change. */ - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, !bpps_changed && new_limits->force_fec_pipes == old_limits->force_fec_pipes)) @@ -230,7 +233,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915, int intel_link_bw_atomic_check(struct intel_atomic_state *state, struct intel_link_bw_limits *new_limits) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_link_bw_limits old_limits = *new_limits; int ret; @@ -238,7 +241,7 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state, if (ret != -EAGAIN) return ret; - if (!assert_link_limit_change_valid(i915, &old_limits, new_limits)) + if (!assert_link_limit_change_valid(display, &old_limits, new_limits)) return -EINVAL; return -EAGAIN; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h index 6b0ccfff59dab4..e69049cf178f67 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.h +++ b/drivers/gpu/drm/i915/display/intel_link_bw.h @@ -10,8 +10,6 @@ #include "intel_display_limits.h" -struct drm_i915_private; - struct intel_atomic_state; struct intel_crtc_state; diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c index d5a0aecf3e8ffc..b457c69dc0beb9 100644 --- a/drivers/gpu/drm/i915/display/intel_load_detect.c +++ b/drivers/gpu/drm/i915/display/intel_load_detect.c @@ -48,23 +48,22 @@ struct drm_atomic_state * intel_load_detect_get_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = to_intel_display(connector->dev); struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_crtc *possible_crtc; struct intel_crtc *crtc = NULL; - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_mode_config *config = &dev->mode_config; + struct drm_mode_config *config = &display->drm->mode_config; struct drm_atomic_state *state = NULL, *restore_state = NULL; struct drm_connector_state *connector_state; struct intel_crtc_state *crtc_state; int ret; - drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.base.id, encoder->base.name); - drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); + drm_WARN_ON(display->drm, !drm_modeset_is_locked(&config->connection_mutex)); /* * Algorithm gets a little messy: @@ -89,7 +88,7 @@ intel_load_detect_get_pipe(struct drm_connector *connector, } /* Find an unused one (if possible) */ - for_each_intel_crtc(dev, possible_crtc) { + for_each_intel_crtc(display->drm, possible_crtc) { if (!(encoder->base.possible_crtcs & drm_crtc_mask(&possible_crtc->base))) continue; @@ -111,15 +110,15 @@ intel_load_detect_get_pipe(struct drm_connector *connector, * If we didn't find an unused CRTC, don't use any. */ if (!crtc) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "no pipe available for load-detect\n"); ret = -ENODEV; goto fail; } found: - state = drm_atomic_state_alloc(dev); - restore_state = drm_atomic_state_alloc(dev); + state = drm_atomic_state_alloc(display->drm); + restore_state = drm_atomic_state_alloc(display->drm); if (!state || !restore_state) { ret = -ENOMEM; goto fail; @@ -164,7 +163,7 @@ intel_load_detect_get_pipe(struct drm_connector *connector, if (!ret) ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Failed to create a copy of old state to restore: %i\n", ret); goto fail; @@ -172,7 +171,7 @@ intel_load_detect_get_pipe(struct drm_connector *connector, ret = drm_atomic_commit(state); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "failed to set mode on load-detect pipe\n"); goto fail; } @@ -204,13 +203,13 @@ void intel_load_detect_release_pipe(struct drm_connector *connector, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = to_intel_display(connector->dev); struct intel_encoder *intel_encoder = intel_attached_encoder(to_intel_connector(connector)); - struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); struct drm_encoder *encoder = &intel_encoder->base; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); @@ -219,7 +218,7 @@ void intel_load_detect_release_pipe(struct drm_connector *connector, ret = drm_atomic_helper_commit_duplicated_state(state, ctx); if (ret) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Couldn't release load detect pipe: %i\n", ret); drm_atomic_state_put(state); } diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 8b26354d6e538e..f9db867fae8918 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -79,33 +79,33 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode) static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) { - struct intel_dp *dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(dp); + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct intel_display *display = to_intel_display(intel_dp); struct drm_dp_dpcd_ident *ident; u32 vendor_oui; - if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { - drm_err(&i915->drm, "Can't read description\n"); + if (drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd))) { + drm_err(display->drm, "Can't read description\n"); return false; } - ident = &dp->desc.ident; + ident = &intel_dp->desc.ident; vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) | ident->oui[2]; switch (vendor_oui) { case LSPCON_VENDOR_MCA_OUI: lspcon->vendor = LSPCON_VENDOR_MCA; - drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n"); + drm_dbg_kms(display->drm, "Vendor: Mega Chips\n"); break; case LSPCON_VENDOR_PARADE_OUI: lspcon->vendor = LSPCON_VENDOR_PARADE; - drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n"); + drm_dbg_kms(display->drm, "Vendor: Parade Tech\n"); break; default: - drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n"); + drm_err(display->drm, "Invalid/Unknown vendor OUI\n"); return false; } @@ -123,7 +123,7 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 hdr_caps; int ret; @@ -131,10 +131,10 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) &hdr_caps, 1); if (ret < 0) { - drm_dbg_kms(&i915->drm, "HDR capability detection failed\n"); + drm_dbg_kms(display->drm, "HDR capability detection failed\n"); lspcon->hdr_supported = false; } else if (hdr_caps & 0x1) { - drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n"); + drm_dbg_kms(display->drm, "LSPCON capable of HDR\n"); lspcon->hdr_supported = true; } } @@ -142,12 +142,12 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum drm_lspcon_mode current_mode; struct i2c_adapter *ddc = &intel_dp->aux.ddc; if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, ¤t_mode)) { - drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n"); + drm_dbg_kms(display->drm, "Error reading LSPCON mode\n"); return DRM_LSPCON_MODE_INVALID; } return current_mode; @@ -169,23 +169,23 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum drm_lspcon_mode current_mode; current_mode = lspcon_get_current_mode(lspcon); if (current_mode == mode) goto out; - drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n", + drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, lspcon_get_mode_settle_timeout(lspcon)); if (current_mode != mode) - drm_err(&i915->drm, "LSPCON mode hasn't settled\n"); + drm_err(display->drm, "LSPCON mode hasn't settled\n"); out: - drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n", + drm_dbg_kms(display->drm, "Current LSPCON mode %s\n", lspcon_mode_name(current_mode)); return current_mode; @@ -195,46 +195,46 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int err; enum drm_lspcon_mode current_mode; struct i2c_adapter *ddc = &intel_dp->aux.ddc; err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, ¤t_mode); if (err) { - drm_err(&i915->drm, "Error reading LSPCON mode\n"); + drm_err(display->drm, "Error reading LSPCON mode\n"); return err; } if (current_mode == mode) { - drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n"); + drm_dbg_kms(display->drm, "Current mode = desired LSPCON mode\n"); return 0; } err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode); if (err < 0) { - drm_err(&i915->drm, "LSPCON mode change failed\n"); + drm_err(display->drm, "LSPCON mode change failed\n"); return err; } lspcon->mode = mode; - drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n"); + drm_dbg_kms(display->drm, "LSPCON mode changed done\n"); return 0; } static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 rev; if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, &rev) != 1) { - drm_dbg_kms(&i915->drm, "Native AUX CH down\n"); + drm_dbg_kms(display->drm, "Native AUX CH down\n"); return false; } - drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n", + drm_dbg_kms(display->drm, "Native AUX CH up, DPCD version: %d.%d\n", rev >> 4, rev & 0xf); return true; @@ -242,12 +242,12 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) static bool lspcon_probe(struct intel_lspcon *lspcon) { - int retry; - enum drm_dp_dual_mode_type adaptor_type; struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct i2c_adapter *ddc = &intel_dp->aux.ddc; + enum drm_dp_dual_mode_type adaptor_type; enum drm_lspcon_mode expected_mode; + int retry; expected_mode = lspcon_wake_native_aux_ch(lspcon) ? DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS; @@ -263,13 +263,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) } if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { - drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n", + drm_dbg_kms(display->drm, "No LSPCON detected, found %s\n", drm_dp_get_dual_mode_type_name(adaptor_type)); return false; } /* Yay ... got a LSPCON device */ - drm_dbg_kms(&i915->drm, "LSPCON detected\n"); + drm_dbg_kms(display->drm, "LSPCON detected\n"); lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); /* @@ -279,7 +279,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) */ if (lspcon->mode != DRM_LSPCON_MODE_PCON) { if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { - drm_err(&i915->drm, "LSPCON mode change to PCON failed\n"); + drm_err(display->drm, "LSPCON mode change to PCON failed\n"); return false; } } @@ -289,13 +289,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); unsigned long start = jiffies; while (1) { if (intel_digital_port_connected(&dig_port->base)) { - drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n", + drm_dbg_kms(display->drm, "LSPCON recovering in PCON mode after %u ms\n", jiffies_to_msecs(jiffies - start)); return; } @@ -306,7 +306,7 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) usleep_range(10000, 15000); } - drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n"); + drm_dbg_kms(display->drm, "LSPCON DP descriptor mismatch after resume\n"); } static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) @@ -477,10 +477,10 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - bool ret = true; + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); + bool ret = true; switch (type) { case HDMI_INFOFRAME_TYPE_AVI: @@ -492,7 +492,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, frame, len); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: - drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n"); + drm_dbg_kms(display->drm, "Update HDR metadata for lspcon\n"); /* It uses the legacy hsw implementation for the same */ hsw_write_infoframe(encoder, crtc_state, type, frame, len); break; @@ -501,7 +501,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, } if (!ret) { - drm_err(&i915->drm, "Failed to write infoframes\n"); + drm_err(display->drm, "Failed to write infoframes\n"); return; } } @@ -522,17 +522,17 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - ssize_t ret; - union hdmi_infoframe frame; - u8 buf[VIDEO_DIP_DATA_SIZE]; + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + union hdmi_infoframe frame; + u8 buf[VIDEO_DIP_DATA_SIZE]; + ssize_t ret; if (!lspcon->active) { - drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n"); + drm_err(display->drm, "Writing infoframes while LSPCON disabled ?\n"); return; } @@ -542,7 +542,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, conn_state->connector, adjusted_mode); if (ret < 0) { - drm_err(&i915->drm, "couldn't fill AVI infoframe\n"); + drm_err(display->drm, "couldn't fill AVI infoframe\n"); return; } @@ -583,7 +583,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { - drm_err(&i915->drm, "Failed to pack AVI IF\n"); + drm_err(display->drm, "Failed to pack AVI IF\n"); return; } @@ -624,9 +624,9 @@ static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); bool infoframes_enabled; u32 val = 0; u32 mask, tmp; @@ -640,8 +640,8 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); if (lspcon->hdr_supported) { - tmp = intel_de_read(dev_priv, - HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder)); + tmp = intel_de_read(display, + HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder)); mask = VIDEO_DIP_ENABLE_GMP_HSW; if (tmp & mask) @@ -658,32 +658,32 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) bool lspcon_init(struct intel_digital_port *dig_port) { + struct intel_display *display = to_intel_display(dig_port); struct intel_dp *intel_dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector *connector = &intel_dp->attached_connector->base; lspcon->active = false; lspcon->mode = DRM_LSPCON_MODE_INVALID; if (!lspcon_probe(lspcon)) { - drm_err(&i915->drm, "Failed to probe lspcon\n"); + drm_err(display->drm, "Failed to probe lspcon\n"); return false; } if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) { - drm_err(&i915->drm, "LSPCON DPCD read failed\n"); + drm_err(display->drm, "LSPCON DPCD read failed\n"); return false; } if (!lspcon_detect_vendor(lspcon)) { - drm_err(&i915->drm, "LSPCON vendor detection failed\n"); + drm_err(display->drm, "LSPCON vendor detection failed\n"); return false; } connector->ycbcr_420_allowed = true; lspcon->active = true; - drm_dbg_kms(&i915->drm, "Success: LSPCON init\n"); + drm_dbg_kms(display->drm, "Success: LSPCON init\n"); return true; } @@ -697,9 +697,8 @@ u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, void lspcon_resume(struct intel_digital_port *dig_port) { + struct intel_display *display = to_intel_display(dig_port); struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) @@ -707,7 +706,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) if (!lspcon->active) { if (!lspcon_init(dig_port)) { - drm_err(&i915->drm, "LSPCON init failed on port %c\n", + drm_err(display->drm, "LSPCON init failed on port %c\n", port_name(dig_port->base.port)); return; } @@ -724,7 +723,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) return; if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) - drm_err(&i915->drm, "LSPCON resume failed\n"); + drm_err(display->drm, "LSPCON resume failed\n"); else - drm_dbg_kms(&i915->drm, "LSPCON resume success\n"); + drm_dbg_kms(display->drm, "LSPCON resume success\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 9f018503d4fd6e..fb4ed9f7855b6c 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -838,6 +838,7 @@ static void intel_lvds_add_properties(struct drm_connector *connector) */ void intel_lvds_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_lvds_encoder *lvds_encoder; struct intel_connector *connector; const struct drm_edid *drm_edid; @@ -872,7 +873,7 @@ void intel_lvds_init(struct drm_i915_private *i915) } ddc_pin = GMBUS_PIN_PANEL; - if (!intel_bios_is_lvds_present(i915, &ddc_pin)) { + if (!intel_bios_is_lvds_present(display, &ddc_pin)) { if ((lvds & LVDS_PORT_EN) == 0) { drm_dbg_kms(&i915->drm, "LVDS is not present in VBT\n"); @@ -966,7 +967,7 @@ void intel_lvds_init(struct drm_i915_private *i915) } else { drm_edid = ERR_PTR(-ENOENT); } - intel_bios_init_panel_late(i915, &connector->panel, NULL, + intel_bios_init_panel_late(display, &connector->panel, NULL, IS_ERR(drm_edid) ? NULL : drm_edid); /* Try EDID first */ diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index e1213f3d93cca9..72694dde3c2256 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -987,7 +987,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, } } - intel_fbc_sanitize(i915); + intel_fbc_sanitize(&i915->display); intel_sanitize_plane_mapping(i915); diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 68bd5101ec89c0..ff11836459def8 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -252,7 +252,7 @@ struct opregion_asle_ext { #define OPREGION_SIZE (8 * 1024) struct intel_opregion { - struct drm_i915_private *i915; + struct intel_display *display; struct opregion_header *header; struct opregion_acpi *acpi; @@ -268,9 +268,9 @@ struct intel_opregion { struct notifier_block acpi_notifier; }; -static int check_swsci_function(struct drm_i915_private *i915, u32 function) +static int check_swsci_function(struct intel_display *display, u32 function) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; struct opregion_swsci *swsci; u32 main_function, sub_function; @@ -300,20 +300,20 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) return 0; } -static int swsci(struct drm_i915_private *dev_priv, +static int swsci(struct intel_display *display, u32 function, u32 parm, u32 *parm_out) { struct opregion_swsci *swsci; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u32 scic, dslp; u16 swsci_val; int ret; - ret = check_swsci_function(dev_priv, function); + ret = check_swsci_function(display, function); if (ret) return ret; - swsci = dev_priv->display.opregion->swsci; + swsci = display->opregion->swsci; /* Driver sleep timeout in ms. */ dslp = swsci->dslp; @@ -331,7 +331,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* The spec tells us to do this, but we are the only user... */ scic = swsci->scic; if (scic & SWSCI_SCIC_INDICATOR) { - drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n"); + drm_dbg(display->drm, "SWSCI request already in progress\n"); return -EBUSY; } @@ -355,7 +355,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) if (wait_for(C, dslp)) { - drm_dbg(&dev_priv->drm, "SWSCI request timed out\n"); + drm_dbg(display->drm, "SWSCI request timed out\n"); return -ETIMEDOUT; } @@ -364,7 +364,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* Note: scic == 0 is an error! */ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) { - drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic); + drm_dbg(display->drm, "SWSCI request error %u\n", scic); return -EIO; } @@ -381,28 +381,28 @@ static int swsci(struct drm_i915_private *dev_priv, #define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2 #define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3 -int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, +int intel_opregion_notify_encoder(struct intel_encoder *encoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 parm = 0; u32 type = 0; u32 port; int ret; /* don't care about old stuff for now */ - if (!HAS_DDI(dev_priv)) + if (!HAS_DDI(display)) return 0; /* Avoid port out of bounds checks if SWSCI isn't there. */ - ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE); + ret = check_swsci_function(display, SWSCI_SBCB_DISPLAY_POWER_STATE); if (ret) return ret; - if (intel_encoder->type == INTEL_OUTPUT_DSI) + if (encoder->type == INTEL_OUTPUT_DSI) port = 0; else - port = intel_encoder->port; + port = encoder->port; if (port == PORT_E) { port = 0; @@ -419,17 +419,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, * number is out of bounds after mapping. */ if (port > 4) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", - intel_encoder->base.base.id, intel_encoder->base.name, - port_name(intel_encoder->port), port); + encoder->base.base.id, encoder->base.name, + port_name(encoder->port), port); return -EINVAL; } if (!enable) parm |= 4 << 8; - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_ANALOG: type = DISPLAY_TYPE_CRT; break; @@ -444,15 +444,15 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL; break; default: - drm_WARN_ONCE(&dev_priv->drm, 1, + drm_WARN_ONCE(display->drm, 1, "unsupported intel_encoder type %d\n", - intel_encoder->type); + encoder->type); return -EINVAL; } parm |= type << (16 + port * 3); - return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); + return swsci(display, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); } static const struct { @@ -466,33 +466,33 @@ static const struct { { PCI_D3cold, 0x04 }, }; -int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, +int intel_opregion_notify_adapter(struct intel_display *display, pci_power_t state) { int i; - if (!HAS_DDI(dev_priv)) + if (!HAS_DDI(display)) return 0; for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { if (state == power_state_map[i].pci_power_state) - return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE, + return swsci(display, SWSCI_SBCB_ADAPTER_POWER_STATE, power_state_map[i].parm, NULL); } return -EINVAL; } -static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) +static u32 asle_set_backlight(struct intel_display *display, u32 bclp) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; - struct opregion_asle *asle = dev_priv->display.opregion->asle; + struct opregion_asle *asle = display->opregion->asle; - drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); + drm_dbg(display->drm, "bclp = 0x%08x\n", bclp); if (acpi_video_get_backlight_type() == acpi_backlight_native) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "opregion backlight request ignored\n"); return 0; } @@ -504,104 +504,104 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) if (bclp > 255) return ASLC_BACKLIGHT_FAILED; - drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL); + drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); /* * Update backlight on all connectors that support backlight (usually * only one). */ - drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n", + drm_dbg_kms(display->drm, "updating opregion backlight %d/255\n", bclp); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) intel_backlight_set_acpi(connector->base.state, bclp, 255); drm_connector_list_iter_end(&conn_iter); asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; - drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); return 0; } -static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) +static u32 asle_set_als_illum(struct intel_display *display, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ - drm_dbg(&dev_priv->drm, "Illum is not supported\n"); + drm_dbg(display->drm, "Illum is not supported\n"); return ASLC_ALS_ILLUM_FAILED; } -static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) +static u32 asle_set_pwm_freq(struct intel_display *display, u32 pfmb) { - drm_dbg(&dev_priv->drm, "PWM freq is not supported\n"); + drm_dbg(display->drm, "PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } -static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) +static u32 asle_set_pfit(struct intel_display *display, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ - drm_dbg(&dev_priv->drm, "Pfit is not supported\n"); + drm_dbg(display->drm, "Pfit is not supported\n"); return ASLC_PFIT_FAILED; } -static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) +static u32 asle_set_supported_rotation_angles(struct intel_display *display, u32 srot) { - drm_dbg(&dev_priv->drm, "SROT is not supported\n"); + drm_dbg(display->drm, "SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } -static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) +static u32 asle_set_button_array(struct intel_display *display, u32 iuer) { if (!iuer) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (nothing)\n"); if (iuer & ASLE_IUER_ROTATION_LOCK_BTN) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (rotation lock)\n"); if (iuer & ASLE_IUER_VOLUME_DOWN_BTN) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (volume down)\n"); if (iuer & ASLE_IUER_VOLUME_UP_BTN) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (volume up)\n"); if (iuer & ASLE_IUER_WINDOWS_BTN) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (windows)\n"); if (iuer & ASLE_IUER_POWER_BTN) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Button array event is not supported (power)\n"); return ASLC_BUTTON_ARRAY_FAILED; } -static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) +static u32 asle_set_convertible(struct intel_display *display, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Convertible is not supported (clamshell)\n"); else - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Convertible is not supported (slate)\n"); return ASLC_CONVERTIBLE_FAILED; } -static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) +static u32 asle_set_docking(struct intel_display *display, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) - drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n"); + drm_dbg(display->drm, "Docking is not supported (docked)\n"); else - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Docking is not supported (undocked)\n"); return ASLC_DOCKING_FAILED; } -static u32 asle_isct_state(struct drm_i915_private *dev_priv) +static u32 asle_isct_state(struct intel_display *display) { - drm_dbg(&dev_priv->drm, "ISCT is not supported\n"); + drm_dbg(display->drm, "ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; } @@ -609,7 +609,7 @@ static void asle_work(struct work_struct *work) { struct intel_opregion *opregion = container_of(work, struct intel_opregion, asle_work); - struct drm_i915_private *dev_priv = opregion->i915; + struct intel_display *display = opregion->display; struct opregion_asle *asle = opregion->asle; u32 aslc_stat = 0; u32 aslc_req; @@ -620,50 +620,51 @@ static void asle_work(struct work_struct *work) aslc_req = asle->aslc; if (!(aslc_req & ASLC_REQ_MSK)) { - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "No request on ASLC interrupt 0x%08x\n", aslc_req); return; } if (aslc_req & ASLC_SET_ALS_ILLUM) - aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi); + aslc_stat |= asle_set_als_illum(display, asle->alsi); if (aslc_req & ASLC_SET_BACKLIGHT) - aslc_stat |= asle_set_backlight(dev_priv, asle->bclp); + aslc_stat |= asle_set_backlight(display, asle->bclp); if (aslc_req & ASLC_SET_PFIT) - aslc_stat |= asle_set_pfit(dev_priv, asle->pfit); + aslc_stat |= asle_set_pfit(display, asle->pfit); if (aslc_req & ASLC_SET_PWM_FREQ) - aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb); + aslc_stat |= asle_set_pwm_freq(display, asle->pfmb); if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) - aslc_stat |= asle_set_supported_rotation_angles(dev_priv, + aslc_stat |= asle_set_supported_rotation_angles(display, asle->srot); if (aslc_req & ASLC_BUTTON_ARRAY) - aslc_stat |= asle_set_button_array(dev_priv, asle->iuer); + aslc_stat |= asle_set_button_array(display, asle->iuer); if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) - aslc_stat |= asle_set_convertible(dev_priv, asle->iuer); + aslc_stat |= asle_set_convertible(display, asle->iuer); if (aslc_req & ASLC_DOCKING_INDICATOR) - aslc_stat |= asle_set_docking(dev_priv, asle->iuer); + aslc_stat |= asle_set_docking(display, asle->iuer); if (aslc_req & ASLC_ISCT_STATE_CHANGE) - aslc_stat |= asle_isct_state(dev_priv); + aslc_stat |= asle_isct_state(display); asle->aslc = aslc_stat; } -bool intel_opregion_asle_present(struct drm_i915_private *i915) +bool intel_opregion_asle_present(struct intel_display *display) { - return i915->display.opregion && i915->display.opregion->asle; + return display->opregion && display->opregion->asle; } -void intel_opregion_asle_intr(struct drm_i915_private *i915) +void intel_opregion_asle_intr(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_opregion *opregion = display->opregion; if (opregion && opregion->asle) queue_work(i915->unordered_wq, &opregion->asle_work); @@ -720,9 +721,9 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static void intel_didl_outputs(struct drm_i915_private *dev_priv) +static void intel_didl_outputs(struct intel_display *display) { - struct intel_opregion *opregion = dev_priv->display.opregion; + struct intel_opregion *opregion = display->opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; @@ -737,9 +738,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) max_outputs = ARRAY_SIZE(opregion->acpi->didl) + ARRAY_SIZE(opregion->acpi->did2); - intel_acpi_device_id_update(dev_priv); + intel_acpi_device_id_update(display); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (i < max_outputs) set_did(opregion, i, connector->acpi_device_id); @@ -747,10 +748,10 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) } drm_connector_list_iter_end(&conn_iter); - drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i); + drm_dbg_kms(display->drm, "%d outputs detected\n", i); if (i > max_outputs) - drm_err(&dev_priv->drm, + drm_err(display->drm, "More than %d outputs in connector list\n", max_outputs); @@ -759,9 +760,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) set_did(opregion, i, 0); } -static void intel_setup_cadls(struct drm_i915_private *dev_priv) +static void intel_setup_cadls(struct intel_display *display) { - struct intel_opregion *opregion = dev_priv->display.opregion; + struct intel_opregion *opregion = display->opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0; @@ -776,7 +777,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) * Note that internal panels should be at the front of the connector * list already, ensuring they're not left out. */ - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (i >= ARRAY_SIZE(opregion->acpi->cadl)) break; @@ -789,9 +790,9 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) opregion->acpi->cadl[i] = 0; } -static void swsci_setup(struct drm_i915_private *dev_priv) +static void swsci_setup(struct intel_display *display) { - struct intel_opregion *opregion = dev_priv->display.opregion; + struct intel_opregion *opregion = display->opregion; bool requested_callbacks = false; u32 tmp; @@ -800,7 +801,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv) opregion->swsci_sbcb_sub_functions = 1; /* We use GBDA to ask for supported GBDA calls. */ - if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { + if (swsci(display, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ tmp <<= 1; opregion->swsci_gbda_sub_functions |= tmp; @@ -811,7 +812,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv) * must not call interfaces that are not specifically requested by the * bios. */ - if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(display, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { /* here, the bits already match sub-function codes */ opregion->swsci_sbcb_sub_functions |= tmp; requested_callbacks = true; @@ -822,7 +823,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv) * the callback is _requested_. But we still can't call interfaces that * are not requested. */ - if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(display, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ u32 low = tmp & 0x7ff; u32 high = tmp & ~0xfff; /* bit 11 is reserved */ @@ -832,7 +833,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv) if (requested_callbacks) { u32 req = opregion->swsci_sbcb_sub_functions; if ((req & tmp) != req) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp); /* XXX: for now, trust the requested callbacks */ @@ -842,7 +843,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv) } } - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n", opregion->swsci_gbda_sub_functions, opregion->swsci_sbcb_sub_functions); @@ -867,10 +868,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { { } }; -int intel_opregion_setup(struct drm_i915_private *dev_priv) +int intel_opregion_setup(struct intel_display *display) { struct intel_opregion *opregion; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; @@ -885,10 +886,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); pci_read_config_dword(pdev, ASLS, &asls); - drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n", + drm_dbg(display->drm, "graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { - drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n"); + drm_dbg(display->drm, "ACPI OpRegion not supported!\n"); return -ENOTSUPP; } @@ -896,8 +897,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (!opregion) return -ENOMEM; - opregion->i915 = dev_priv; - dev_priv->display.opregion = opregion; + opregion->display = display; + display->opregion = opregion; INIT_WORK(&opregion->asle_work, asle_work); @@ -910,20 +911,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) memcpy(buf, base, sizeof(buf)); if (memcmp(buf, OPREGION_SIGNATURE, 16)) { - drm_dbg(&dev_priv->drm, "opregion signature mismatch\n"); + drm_dbg(display->drm, "opregion signature mismatch\n"); err = -EINVAL; goto err_out; } opregion->header = base; - drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n", + drm_dbg(display->drm, "ACPI OpRegion version %u.%u.%u\n", opregion->header->over.major, opregion->header->over.minor, opregion->header->over.revision); mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { - drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n"); + drm_dbg(display->drm, "Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; /* * Indicate we handle monitor hotplug events ourselves so we do @@ -938,30 +939,30 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) u8 major = opregion->header->over.major; if (major >= 3) { - drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n"); + drm_err(display->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n"); } else { if (major >= 2) - drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n"); - drm_dbg(&dev_priv->drm, "SWSCI supported\n"); + drm_dbg(display->drm, "SWSCI Mailbox #2 present for opregion v2.x\n"); + drm_dbg(display->drm, "SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; - swsci_setup(dev_priv); + swsci_setup(display); } } if (mboxes & MBOX_ASLE) { - drm_dbg(&dev_priv->drm, "ASLE supported\n"); + drm_dbg(display->drm, "ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; opregion->asle->ardy = ASLE_ARDY_NOT_READY; } if (mboxes & MBOX_ASLE_EXT) { - drm_dbg(&dev_priv->drm, "ASLE extension supported\n"); + drm_dbg(display->drm, "ASLE extension supported\n"); opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET; } if (mboxes & MBOX_BACKLIGHT) { - drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n"); + drm_dbg(display->drm, "Mailbox #2 for backlight present\n"); } if (dmi_check_system(intel_no_opregion_vbt)) @@ -979,7 +980,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) */ if (opregion->header->over.major > 2 || opregion->header->over.minor >= 1) { - drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE); + drm_WARN_ON(display->drm, rvda < OPREGION_SIZE); rvda += asls; } @@ -989,14 +990,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) vbt = opregion->rvda; vbt_size = opregion->asle->rvds; - if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) { - drm_dbg_kms(&dev_priv->drm, + if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) { + drm_dbg_kms(display->drm, "Found valid VBT in ACPI OpRegion (RVDA)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; goto out; } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Invalid VBT in ACPI OpRegion (RVDA)\n"); memunmap(opregion->rvda); opregion->rvda = NULL; @@ -1014,13 +1015,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) vbt_size = (mboxes & MBOX_ASLE_EXT) ? OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; vbt_size -= OPREGION_VBT_OFFSET; - if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) { - drm_dbg_kms(&dev_priv->drm, + if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) { + drm_dbg_kms(display->drm, "Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Invalid VBT in ACPI OpRegion (Mailbox #4)\n"); } @@ -1031,7 +1032,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) memunmap(base); err_memremap: kfree(opregion); - dev_priv->display.opregion = NULL; + display->opregion = NULL; return err; } @@ -1054,25 +1055,25 @@ static const struct dmi_system_id intel_use_opregion_panel_type[] = { }; int -intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) +intel_opregion_get_panel_type(struct intel_display *display) { u32 panel_details; int ret; - ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); + ret = swsci(display, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) return ret; ret = (panel_details >> 8) & 0xff; if (ret > 0x10) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Invalid OpRegion panel type 0x%x\n", ret); return -EINVAL; } /* fall back to VBT panel type? */ if (ret == 0x0) { - drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n"); + drm_dbg_kms(display->drm, "No panel type in OpRegion\n"); return -ENODEV; } @@ -1082,7 +1083,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) * via a quirk list :( */ if (!dmi_check_system(intel_use_opregion_panel_type)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Ignoring OpRegion panel type (%d)\n", ret - 1); return -ENODEV; } @@ -1092,7 +1093,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) /** * intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5 - * @intel_connector: eDP connector + * @connector: eDP connector * * This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed * to it. @@ -1101,11 +1102,10 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) * The EDID in the OpRegion, or NULL if there is none or it's invalid. * */ -const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector) +const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector) { - struct drm_connector *connector = &intel_connector->base; - struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_opregion *opregion = i915->display.opregion; + struct intel_display *display = to_intel_display(connector); + struct intel_opregion *opregion = display->opregion; const struct drm_edid *drm_edid; const void *edid; int len; @@ -1117,13 +1117,13 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con /* Validity corresponds to number of 128-byte blocks */ len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128; - if (!len || !memchr_inv(edid, 0, len)) + if (!len || mem_is_zero(edid, len)) return NULL; drm_edid = drm_edid_alloc(edid, len); if (!drm_edid_valid(drm_edid)) { - drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n"); + drm_dbg_kms(display->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n"); drm_edid_free(drm_edid); drm_edid = NULL; } @@ -1131,9 +1131,9 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con return drm_edid; } -bool intel_opregion_vbt_present(struct drm_i915_private *i915) +bool intel_opregion_vbt_present(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion || !opregion->vbt) return false; @@ -1141,9 +1141,9 @@ bool intel_opregion_vbt_present(struct drm_i915_private *i915) return true; } -const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) +const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion || !opregion->vbt) return NULL; @@ -1154,9 +1154,9 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL); } -bool intel_opregion_headless_sku(struct drm_i915_private *i915) +bool intel_opregion_headless_sku(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; struct opregion_header *header; if (!opregion) @@ -1171,9 +1171,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915) return opregion->header->pcon & PCON_HEADLESS_SKU; } -void intel_opregion_register(struct drm_i915_private *i915) +void intel_opregion_register(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion) return; @@ -1184,16 +1184,16 @@ void intel_opregion_register(struct drm_i915_private *i915) register_acpi_notifier(&opregion->acpi_notifier); } - intel_opregion_resume(i915); + intel_opregion_resume(display); } -static void intel_opregion_resume_display(struct drm_i915_private *i915) +static void intel_opregion_resume_display(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (opregion->acpi) { - intel_didl_outputs(i915); - intel_setup_cadls(i915); + intel_didl_outputs(display); + intel_setup_cadls(display); /* * Notify BIOS we are ready to handle ACPI video ext notifs. @@ -1210,25 +1210,25 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915) } /* Some platforms abuse the _DSM to enable MUX */ - intel_dsm_get_bios_data_funcs_supported(i915); + intel_dsm_get_bios_data_funcs_supported(display); } -void intel_opregion_resume(struct drm_i915_private *i915) +void intel_opregion_resume(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion) return; - if (HAS_DISPLAY(i915)) - intel_opregion_resume_display(i915); + if (HAS_DISPLAY(display)) + intel_opregion_resume_display(display); - intel_opregion_notify_adapter(i915, PCI_D0); + intel_opregion_notify_adapter(display, PCI_D0); } -static void intel_opregion_suspend_display(struct drm_i915_private *i915) +static void intel_opregion_suspend_display(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; @@ -1239,24 +1239,24 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915) opregion->acpi->drdy = 0; } -void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) +void intel_opregion_suspend(struct intel_display *display, pci_power_t state) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion) return; - intel_opregion_notify_adapter(i915, state); + intel_opregion_notify_adapter(display, state); - if (HAS_DISPLAY(i915)) - intel_opregion_suspend_display(i915); + if (HAS_DISPLAY(display)) + intel_opregion_suspend_display(display); } -void intel_opregion_unregister(struct drm_i915_private *i915) +void intel_opregion_unregister(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; - intel_opregion_suspend(i915, PCI_D1); + intel_opregion_suspend(display, PCI_D1); if (!opregion) return; @@ -1267,9 +1267,9 @@ void intel_opregion_unregister(struct drm_i915_private *i915) } } -void intel_opregion_cleanup(struct drm_i915_private *i915) +void intel_opregion_cleanup(struct intel_display *display) { - struct intel_opregion *opregion = i915->display.opregion; + struct intel_opregion *opregion = display->opregion; if (!opregion) return; @@ -1278,13 +1278,13 @@ void intel_opregion_cleanup(struct drm_i915_private *i915) if (opregion->rvda) memunmap(opregion->rvda); kfree(opregion); - i915->display.opregion = NULL; + display->opregion = NULL; } static int intel_opregion_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; - struct intel_opregion *opregion = i915->display.opregion; + struct intel_display *display = m->private; + struct intel_opregion *opregion = display->opregion; if (opregion) seq_write(m, opregion->header, OPREGION_SIZE); @@ -1294,10 +1294,10 @@ static int intel_opregion_show(struct seq_file *m, void *unused) DEFINE_SHOW_ATTRIBUTE(intel_opregion); -void intel_opregion_debugfs_register(struct drm_i915_private *i915) +void intel_opregion_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_opregion", 0444, minor->debugfs_root, - i915, &intel_opregion_fops); + display, &intel_opregion_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index 4b2b8e752632ad..8101eeebfd8b7d 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -28,88 +28,88 @@ #include #include -struct drm_i915_private; struct intel_connector; +struct intel_display; struct intel_encoder; #ifdef CONFIG_ACPI -int intel_opregion_setup(struct drm_i915_private *dev_priv); -void intel_opregion_cleanup(struct drm_i915_private *i915); +int intel_opregion_setup(struct intel_display *display); +void intel_opregion_cleanup(struct intel_display *display); -void intel_opregion_register(struct drm_i915_private *dev_priv); -void intel_opregion_unregister(struct drm_i915_private *dev_priv); +void intel_opregion_register(struct intel_display *display); +void intel_opregion_unregister(struct intel_display *display); -void intel_opregion_resume(struct drm_i915_private *dev_priv); -void intel_opregion_suspend(struct drm_i915_private *dev_priv, +void intel_opregion_resume(struct intel_display *display); +void intel_opregion_suspend(struct intel_display *display, pci_power_t state); -bool intel_opregion_asle_present(struct drm_i915_private *i915); -void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); -int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, +bool intel_opregion_asle_present(struct intel_display *display); +void intel_opregion_asle_intr(struct intel_display *display); +int intel_opregion_notify_encoder(struct intel_encoder *encoder, bool enable); -int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, +int intel_opregion_notify_adapter(struct intel_display *display, pci_power_t state); -int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); +int intel_opregion_get_panel_type(struct intel_display *display); const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector); -bool intel_opregion_vbt_present(struct drm_i915_private *i915); -const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size); +bool intel_opregion_vbt_present(struct intel_display *display); +const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size); -bool intel_opregion_headless_sku(struct drm_i915_private *i915); +bool intel_opregion_headless_sku(struct intel_display *display); -void intel_opregion_debugfs_register(struct drm_i915_private *i915); +void intel_opregion_debugfs_register(struct intel_display *display); #else /* CONFIG_ACPI*/ -static inline int intel_opregion_setup(struct drm_i915_private *dev_priv) +static inline int intel_opregion_setup(struct intel_display *display) { return 0; } -static inline void intel_opregion_cleanup(struct drm_i915_private *i915) +static inline void intel_opregion_cleanup(struct intel_display *display) { } -static inline void intel_opregion_register(struct drm_i915_private *dev_priv) +static inline void intel_opregion_register(struct intel_display *display) { } -static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) +static inline void intel_opregion_unregister(struct intel_display *display) { } -static inline void intel_opregion_resume(struct drm_i915_private *dev_priv) +static inline void intel_opregion_resume(struct intel_display *display) { } -static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv, +static inline void intel_opregion_suspend(struct intel_display *display, pci_power_t state) { } -static inline bool intel_opregion_asle_present(struct drm_i915_private *i915) +static inline bool intel_opregion_asle_present(struct intel_display *display) { return false; } -static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +static inline void intel_opregion_asle_intr(struct intel_display *display) { } static inline int -intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) +intel_opregion_notify_encoder(struct intel_encoder *encoder, bool enable) { return 0; } static inline int -intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) +intel_opregion_notify_adapter(struct intel_display *display, pci_power_t state) { return 0; } -static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) +static inline int intel_opregion_get_panel_type(struct intel_display *display) { return -ENODEV; } @@ -120,23 +120,23 @@ intel_opregion_get_edid(struct intel_connector *connector) return NULL; } -static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915) +static inline bool intel_opregion_vbt_present(struct intel_display *display) { return false; } static inline const void * -intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) +intel_opregion_get_vbt(struct intel_display *display, size_t *size) { return NULL; } -static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915) +static inline bool intel_opregion_headless_sku(struct intel_display *display) { return false; } -static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915) +static inline void intel_opregion_debugfs_register(struct intel_display *display) { } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 0d48b9bec29c52..f13ab680c2cf40 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -358,6 +358,7 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state, void ilk_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -399,7 +400,7 @@ void ilk_pch_enable(struct intel_atomic_state *state, intel_enable_shared_dpll(crtc_state); /* set transcoder timing, panel must allow it */ - assert_pps_unlocked(dev_priv, pipe); + assert_pps_unlocked(display, pipe); if (intel_crtc_has_dp_encoder(crtc_state)) { intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n); intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2); diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index 9ca981b7a12c12..ceaf9e3147dad4 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -92,7 +92,7 @@ int intel_pmdemand_init(struct drm_i915_private *i915) &pmdemand_state->base, &intel_pmdemand_funcs); - if (IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) + if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) /* Wa_14016740474 */ intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 7ce926241e83a2..feddc30e3375ea 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -18,15 +18,18 @@ #include "intel_pps_regs.h" #include "intel_quirks.h" -static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, +static void vlv_steal_power_sequencer(struct intel_display *display, enum pipe pipe); static void pps_init_delays(struct intel_dp *intel_dp); static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd); -static const char *pps_name(struct drm_i915_private *i915, - struct intel_pps *pps) +static const char *pps_name(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_pps *pps = &intel_dp->pps; + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { switch (pps->pps_pipe) { case INVALID_PIPE: @@ -60,14 +63,15 @@ static const char *pps_name(struct drm_i915_private *i915, intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); intel_wakeref_t wakeref; /* * See intel_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); - mutex_lock(&dev_priv->display.pps.mutex); + mutex_lock(&display->pps.mutex); return wakeref; } @@ -75,9 +79,10 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); - mutex_unlock(&dev_priv->display.pps.mutex); + mutex_unlock(&display->pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return 0; @@ -86,7 +91,8 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps.pps_pipe; bool pll_enabled, release_cl_override = false; @@ -94,22 +100,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) enum dpio_channel ch = vlv_pipe_to_channel(pipe); u32 DP; - if (drm_WARN(&dev_priv->drm, - intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, + if (drm_WARN(display->drm, + intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN, "skipping %s kick due to [ENCODER:%d:%s] being active\n", - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), dig_port->base.base.base.id, dig_port->base.base.name)) return; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "kicking %s for [ENCODER:%d:%s]\n", - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), dig_port->base.base.base.id, dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ - DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; + DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED; DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; DP |= DP_PORT_WIDTH(1); DP |= DP_LINK_TRAIN_PAT_1; @@ -119,7 +125,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) else DP |= DP_PIPE_SEL(pipe); - pll_enabled = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE; + pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; /* * The DPLL for the pipe must be enabled for this to work. @@ -130,7 +136,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) !chv_phy_powergate_ch(dev_priv, phy, ch, true); if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to force on PLL for pipe %c!\n", pipe_name(pipe)); return; @@ -143,14 +149,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) * to make this power sequencer lock onto the port. * Otherwise even VDD force bit won't work. */ - intel_de_write(dev_priv, intel_dp->output_reg, DP); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, DP); + intel_de_posting_read(display, intel_dp->output_reg); - intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN); + intel_de_posting_read(display, intel_dp->output_reg); - intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); - intel_de_posting_read(dev_priv, intel_dp->output_reg); + intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN); + intel_de_posting_read(display, intel_dp->output_reg); if (!pll_enabled) { vlv_force_pll_off(dev_priv, pipe); @@ -160,7 +166,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) } } -static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) +static enum pipe vlv_find_free_pps(struct intel_display *display) { struct intel_encoder *encoder; unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); @@ -169,11 +175,11 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) * We don't have power sequencer currently. * Pick one that's not used by other ports. */ - for_each_intel_dp(&dev_priv->drm, encoder) { + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (encoder->type == INTEL_OUTPUT_EDP) { - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE && intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); @@ -181,7 +187,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) if (intel_dp->pps.pps_pipe != INVALID_PIPE) pipes &= ~(1 << intel_dp->pps.pps_pipe); } else { - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, intel_dp->pps.pps_pipe != INVALID_PIPE); if (intel_dp->pps.active_pipe != INVALID_PIPE) @@ -198,36 +204,36 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); /* We should never land here with regular DP ports */ - drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); + drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE && + drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE && intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); if (intel_dp->pps.pps_pipe != INVALID_PIPE) return intel_dp->pps.pps_pipe; - pipe = vlv_find_free_pps(dev_priv); + pipe = vlv_find_free_pps(display); /* * Didn't find one. This should not happen since there * are two power sequencers and up to two eDP ports. */ - if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) + if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE)) pipe = PIPE_A; - vlv_steal_power_sequencer(dev_priv, pipe); + vlv_steal_power_sequencer(display, pipe); intel_dp->pps.pps_pipe = pipe; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "picked %s for [ENCODER:%d:%s]\n", - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), dig_port->base.base.base.id, dig_port->base.base.name); /* init power sequencer on this pipe and port */ @@ -246,13 +252,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) static int bxt_power_sequencer_idx(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int pps_idx = intel_dp->pps.pps_idx; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); /* We should never land here with regular DP ports */ - drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); + drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); if (!intel_dp->pps.pps_reset) return pps_idx; @@ -268,37 +274,38 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) return pps_idx; } -typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx); +typedef bool (*pps_check)(struct intel_display *display, int pps_idx); -static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx) +static bool pps_has_pp_on(struct intel_display *display, int pps_idx) { - return intel_de_read(dev_priv, PP_STATUS(dev_priv, pps_idx)) & PP_ON; + return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON; } -static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx) +static bool pps_has_vdd_on(struct intel_display *display, int pps_idx) { - return intel_de_read(dev_priv, PP_CONTROL(dev_priv, pps_idx)) & EDP_FORCE_VDD; + return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD; } -static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx) +static bool pps_any(struct intel_display *display, int pps_idx) { return true; } static enum pipe -vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, +vlv_initial_pps_pipe(struct intel_display *display, enum port port, pps_check check) { enum pipe pipe; for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { - u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, pipe)) & + u32 port_sel = intel_de_read(display, + PP_ON_DELAYS(display, pipe)) & PANEL_PORT_SELECT_MASK; if (port_sel != PANEL_PORT_SELECT_VLV(port)) continue; - if (!check(dev_priv, pipe)) + if (!check(display, pipe)) continue; return pipe; @@ -310,41 +317,43 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum port port = dig_port->base.port; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, pps_has_pp_on); /* didn't find one? pick one where vdd is on */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, pps_has_vdd_on); /* didn't find one? pick one with just the correct port */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, pps_any); /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] no initial power sequencer\n", dig_port->base.base.base.id, dig_port->base.base.name); return; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] initial power sequencer: %s\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); } -static int intel_num_pps(struct drm_i915_private *i915) +static int intel_num_pps(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) return 2; @@ -365,23 +374,24 @@ static int intel_num_pps(struct drm_i915_private *i915) static bool intel_pps_is_valid(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); if (intel_dp->pps.pps_idx == 1 && INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_ADP) - return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; + return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; return true; } static int -bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check) +bxt_initial_pps_idx(struct intel_display *display, pps_check check) { - int pps_idx, pps_num = intel_num_pps(i915); + int pps_idx, pps_num = intel_num_pps(display); for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { - if (check(i915, pps_idx)) + if (check(display, pps_idx)) return pps_idx; } @@ -391,11 +401,12 @@ bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check) static bool pps_initial_setup(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - lockdep_assert_held(&i915->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { vlv_initial_power_sequencer_setup(intel_dp); @@ -403,46 +414,47 @@ pps_initial_setup(struct intel_dp *intel_dp) } /* first ask the VBT */ - if (intel_num_pps(i915) > 1) + if (intel_num_pps(display) > 1) intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; else intel_dp->pps.pps_idx = 0; - if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915))) + if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display))) intel_dp->pps.pps_idx = -1; /* VBT wasn't parsed yet? pick one where the panel is on */ if (intel_dp->pps.pps_idx < 0) - intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on); + intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on); /* didn't find one? pick one where vdd is on */ if (intel_dp->pps.pps_idx < 0) - intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on); + intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on); /* didn't find one? pick any */ if (intel_dp->pps.pps_idx < 0) { - intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any); + intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n", encoder->base.base.id, encoder->base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] initial power sequencer: %s\n", encoder->base.base.id, encoder->base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); } return intel_pps_is_valid(intel_dp); } -void intel_pps_reset_all(struct drm_i915_private *dev_priv) +void intel_pps_reset_all(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv))) + if (drm_WARN_ON(display->drm, !IS_LP(dev_priv))) return; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; /* @@ -455,16 +467,16 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv) * should use them always. */ - for_each_intel_dp(&dev_priv->drm, encoder) { + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); if (encoder->type != INTEL_OUTPUT_EDP) continue; - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(display) >= 9) intel_dp->pps.pps_reset = true; else intel_dp->pps.pps_pipe = INVALID_PIPE; @@ -482,7 +494,8 @@ struct pps_registers { static void intel_pps_get_registers(struct intel_dp *intel_dp, struct pps_registers *regs) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); int pps_idx; memset(regs, 0, sizeof(*regs)); @@ -494,17 +507,17 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, else pps_idx = intel_dp->pps.pps_idx; - regs->pp_ctrl = PP_CONTROL(dev_priv, pps_idx); - regs->pp_stat = PP_STATUS(dev_priv, pps_idx); - regs->pp_on = PP_ON_DELAYS(dev_priv, pps_idx); - regs->pp_off = PP_OFF_DELAYS(dev_priv, pps_idx); + regs->pp_ctrl = PP_CONTROL(display, pps_idx); + regs->pp_stat = PP_STATUS(display, pps_idx); + regs->pp_on = PP_ON_DELAYS(display, pps_idx); + regs->pp_off = PP_OFF_DELAYS(display, pps_idx); /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) regs->pp_div = INVALID_MMIO_REG; else - regs->pp_div = PP_DIVISOR(dev_priv, pps_idx); + regs->pp_div = PP_DIVISOR(display, pps_idx); } static i915_reg_t @@ -529,49 +542,51 @@ _pp_stat_reg(struct intel_dp *intel_dp) static bool edp_have_panel_power(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) return false; - return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; + return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; } static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) return false; - return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; + return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; } void intel_pps_check_power_unlocked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { - drm_WARN(&dev_priv->drm, 1, + drm_WARN(display->drm, 1, "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); - drm_dbg_kms(&dev_priv->drm, + pps_name(intel_dp)); + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps), - intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), - intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); + pps_name(intel_dp), + intel_de_read(display, _pp_stat_reg(intel_dp)), + intel_de_read(display, _pp_ctrl_reg(intel_dp))); } } @@ -589,68 +604,71 @@ static void intel_pps_verify_state(struct intel_dp *intel_dp); static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); intel_pps_verify_state(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), mask, value, - intel_de_read(dev_priv, pp_stat_reg), - intel_de_read(dev_priv, pp_ctrl_reg)); + intel_de_read(display, pp_stat_reg), + intel_de_read(display, pp_ctrl_reg)); - if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000)) - drm_err(&dev_priv->drm, + if (intel_de_wait(display, pp_stat_reg, mask, value, 5000)) + drm_err(display->drm, "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps), - intel_de_read(dev_priv, pp_stat_reg), - intel_de_read(dev_priv, pp_ctrl_reg)); + pps_name(intel_dp), + intel_de_read(display, pp_stat_reg), + intel_de_read(display, pp_ctrl_reg)); - drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); + drm_dbg_kms(display->drm, "Wait complete\n"); } static void wait_panel_on(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] %s wait for panel power on\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); } static void wait_panel_off(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] %s wait for panel power off time\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); } static void wait_panel_power_cycle(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); ktime_t panel_power_on_time; s64 panel_power_off_duration; - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] %s wait for panel power cycle\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); /* take the difference of current time and panel power off time * and then make panel wait for t11_t12 if needed. */ @@ -695,13 +713,13 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) static u32 ilk_get_pp_control(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u32 control; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); - control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); - if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && + control = intel_de_read(display, _pp_ctrl_reg(intel_dp)); + if (drm_WARN_ON(display->drm, !HAS_DDI(display) && (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; @@ -716,13 +734,14 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) */ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->pps.want_panel_vdd; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return false; @@ -733,16 +752,16 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) if (edp_have_panel_vdd(intel_dp)) return need_to_disable; - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref); + drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); @@ -750,21 +769,22 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) pp = ilk_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps), - intel_de_read(dev_priv, pp_stat_reg), - intel_de_read(dev_priv, pp_ctrl_reg)); + pps_name(intel_dp), + intel_de_read(display, pp_stat_reg), + intel_de_read(display, pp_ctrl_reg)); /* * If the panel wasn't on, delay before accessing aux channel */ if (!edp_have_panel_power(intel_dp)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s panel power wasn't enabled\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); msleep(intel_dp->pps.panel_power_up_delay); } @@ -779,7 +799,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) */ void intel_pps_vdd_on(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; bool vdd; @@ -792,27 +813,27 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp) I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, - pps_name(i915, &intel_dp->pps)); + pps_name(intel_dp)); } static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *dig_port = - dp_to_dig_port(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd); + drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd); if (!edp_have_panel_vdd(intel_dp)) return; - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; @@ -820,15 +841,16 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); /* Make sure sequencer is idle before allowing subsequent activity */ - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps), - intel_de_read(dev_priv, pp_stat_reg), - intel_de_read(dev_priv, pp_ctrl_reg)); + pps_name(intel_dp), + intel_de_read(display, pp_stat_reg), + intel_de_read(display, pp_ctrl_reg)); if ((pp & PANEL_POWER_ON) == 0) intel_dp->pps.panel_power_off_time = ktime_get_boottime(); @@ -869,7 +891,8 @@ static void edp_panel_vdd_work(struct work_struct *__work) static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); unsigned long delay; /* @@ -896,9 +919,10 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) */ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -907,7 +931,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) "[ENCODER:%d:%s] %s VDD not forced on", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); intel_dp->pps.want_panel_vdd = false; @@ -919,25 +943,26 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) void intel_pps_on_unlocked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); - if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), + if (drm_WARN(display->drm, edp_have_panel_power(intel_dp), "[ENCODER:%d:%s] %s panel power already on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, - pps_name(dev_priv, &intel_dp->pps))) + pps_name(intel_dp))) return; wait_panel_power_cycle(intel_dp); @@ -947,24 +972,36 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) if (IS_IRONLAKE(dev_priv)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); } + /* + * WA: 22019252566 + * Disable DPLS gating around power sequence. + */ + if (IS_DISPLAY_VER(display, 13, 14)) + intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, + 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + pp |= PANEL_POWER_ON; if (!IS_IRONLAKE(dev_priv)) pp |= PANEL_POWER_RESET; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); wait_panel_on(intel_dp); intel_dp->pps.last_power_on = jiffies; + if (IS_DISPLAY_VER(display, 13, 14)) + intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, + PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); + if (IS_IRONLAKE(dev_priv)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); } } @@ -981,24 +1018,25 @@ void intel_pps_on(struct intel_dp *intel_dp) void intel_pps_off_unlocked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); - drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd, + drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s need VDD to turn off panel\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); + pps_name(intel_dp)); pp = ilk_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some @@ -1010,8 +1048,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp) intel_dp->pps.want_panel_vdd = false; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); wait_panel_off(intel_dp); intel_dp->pps.panel_power_off_time = ktime_get_boottime(); @@ -1036,7 +1074,7 @@ void intel_pps_off(struct intel_dp *intel_dp) /* Enable backlight in the panel power control. */ void intel_pps_backlight_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); intel_wakeref_t wakeref; /* @@ -1054,15 +1092,15 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp) pp = ilk_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); } } /* Disable backlight in the panel power control. */ void intel_pps_backlight_off(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -1075,8 +1113,8 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp) pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; - intel_de_write(dev_priv, pp_ctrl_reg, pp); - intel_de_posting_read(dev_priv, pp_ctrl_reg); + intel_de_write(display, pp_ctrl_reg, pp); + intel_de_posting_read(display, pp_ctrl_reg); } intel_dp->pps.last_backlight_off = jiffies; @@ -1089,7 +1127,7 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp) */ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_dp *intel_dp = intel_attached_dp(connector); intel_wakeref_t wakeref; bool is_enabled; @@ -1100,7 +1138,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) if (is_enabled == enable) return; - drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", + drm_dbg_kms(display->drm, "panel power control backlight %s\n", enable ? "enable" : "disable"); if (enable) @@ -1111,14 +1149,14 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum pipe pipe = intel_dp->pps.pps_pipe; - i915_reg_t pp_on_reg = PP_ON_DELAYS(dev_priv, pipe); + i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); - if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) + if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) return; intel_pps_vdd_off_sync_unlocked(intel_dp); @@ -1132,27 +1170,27 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) * port select always when logically disconnecting a power sequencer * from a port. */ - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "detaching %s from [ENCODER:%d:%s]\n", - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), dig_port->base.base.base.id, dig_port->base.base.name); - intel_de_write(dev_priv, pp_on_reg, 0); - intel_de_posting_read(dev_priv, pp_on_reg); + intel_de_write(display, pp_on_reg, 0); + intel_de_posting_read(display, pp_on_reg); intel_dp->pps.pps_pipe = INVALID_PIPE; } -static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, +static void vlv_steal_power_sequencer(struct intel_display *display, enum pipe pipe) { struct intel_encoder *encoder; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); - for_each_intel_dp(&dev_priv->drm, encoder) { + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe, + drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe, "stealing PPS %c from active [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); @@ -1160,7 +1198,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, if (intel_dp->pps.pps_pipe != pipe) continue; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "stealing PPS %c from [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); @@ -1173,13 +1211,13 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, void vlv_pps_init(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); if (intel_dp->pps.pps_pipe != INVALID_PIPE && intel_dp->pps.pps_pipe != crtc->pipe) { @@ -1195,7 +1233,7 @@ void vlv_pps_init(struct intel_encoder *encoder, * We may be stealing the power * sequencer from another port. */ - vlv_steal_power_sequencer(dev_priv, crtc->pipe); + vlv_steal_power_sequencer(display, crtc->pipe); intel_dp->pps.active_pipe = crtc->pipe; @@ -1205,9 +1243,9 @@ void vlv_pps_init(struct intel_encoder *encoder, /* now it's all ours */ intel_dp->pps.pps_pipe = crtc->pipe; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "initializing %s for [ENCODER:%d:%s]\n", - pps_name(dev_priv, &intel_dp->pps), + pps_name(intel_dp), encoder->base.base.id, encoder->base.name); /* init power sequencer on this pipe and port */ @@ -1217,10 +1255,11 @@ void vlv_pps_init(struct intel_encoder *encoder, static void pps_vdd_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!edp_have_panel_vdd(intel_dp)) return; @@ -1231,11 +1270,11 @@ static void pps_vdd_init(struct intel_dp *intel_dp) * schedule a vdd off, so we don't hold on to the reference * indefinitely. */ - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n", dig_port->base.base.base.id, dig_port->base.base.name, - pps_name(dev_priv, &intel_dp->pps)); - drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref); + pps_name(intel_dp)); + drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); } @@ -1269,7 +1308,7 @@ static void pps_init_timestamps(struct intel_dp *intel_dp) static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u32 pp_on, pp_off, pp_ctl; struct pps_registers regs; @@ -1278,11 +1317,11 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) pp_ctl = ilk_get_pp_control(intel_dp); /* Ensure PPS is unlocked */ - if (!HAS_DDI(dev_priv)) - intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); + if (!HAS_DDI(display)) + intel_de_write(display, regs.pp_ctrl, pp_ctl); - pp_on = intel_de_read(dev_priv, regs.pp_on); - pp_off = intel_de_read(dev_priv, regs.pp_off); + pp_on = intel_de_read(display, regs.pp_on); + pp_off = intel_de_read(display, regs.pp_off); /* Pull timing values out of registers */ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); @@ -1293,7 +1332,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) if (i915_mmio_reg_valid(regs.pp_div)) { u32 pp_div; - pp_div = intel_de_read(dev_priv, regs.pp_div); + pp_div = intel_de_read(display, regs.pp_div); seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; } else { @@ -1305,9 +1344,10 @@ static void intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, const struct edp_power_seq *seq) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + drm_dbg_kms(display->drm, + "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", state_name, seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); } @@ -1315,7 +1355,7 @@ intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, static void intel_pps_verify_state(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct edp_power_seq hw; struct edp_power_seq *sw = &intel_dp->pps.pps_delays; @@ -1323,7 +1363,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp) if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { - drm_err(&i915->drm, "PPS state mismatch\n"); + drm_err(display->drm, "PPS state mismatch\n"); intel_pps_dump_state(intel_dp, "sw", sw); intel_pps_dump_state(intel_dp, "hw", &hw); } @@ -1338,9 +1378,9 @@ static bool pps_delays_valid(struct edp_power_seq *delays) static void pps_init_delays_bios(struct intel_dp *intel_dp, struct edp_power_seq *bios) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); @@ -1385,9 +1425,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp, static void pps_init_delays_spec(struct intel_dp *intel_dp, struct edp_power_seq *spec) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ @@ -1406,11 +1446,11 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp, static void pps_init_delays(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); /* already initialized? */ if (pps_delays_valid(final)) @@ -1440,13 +1480,13 @@ static void pps_init_delays(struct intel_dp *intel_dp) intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12); #undef get_delay - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "panel power up delay %d, power down delay %d, power cycle delay %d\n", intel_dp->pps.panel_power_up_delay, intel_dp->pps.panel_power_down_delay, intel_dp->pps.panel_power_cycle_delay); - drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", + drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n", intel_dp->pps.backlight_on_delay, intel_dp->pps.backlight_off_delay); @@ -1469,14 +1509,15 @@ static void pps_init_delays(struct intel_dp *intel_dp) static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pp_on, pp_off, port_sel = 0; - int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; + int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000; struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->base.port; const struct edp_power_seq *seq = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->display.pps.mutex); + lockdep_assert_held(&display->pps.mutex); intel_pps_get_registers(intel_dp, ®s); @@ -1495,16 +1536,16 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd if (force_disable_vdd) { u32 pp = ilk_get_pp_control(intel_dp); - drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, + drm_WARN(display->drm, pp & PANEL_POWER_ON, "Panel power already on\n"); if (pp & EDP_FORCE_VDD) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "VDD already on, disabling first\n"); pp &= ~EDP_FORCE_VDD; - intel_de_write(dev_priv, regs.pp_ctrl, pp); + intel_de_write(display, regs.pp_ctrl, pp); } pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | @@ -1535,32 +1576,33 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd pp_on |= port_sel; - intel_de_write(dev_priv, regs.pp_on, pp_on); - intel_de_write(dev_priv, regs.pp_off, pp_off); + intel_de_write(display, regs.pp_on, pp_on); + intel_de_write(display, regs.pp_off, pp_off); /* * Compute the divisor for the pp clock, simply match the Bspec formula. */ if (i915_mmio_reg_valid(regs.pp_div)) - intel_de_write(dev_priv, regs.pp_div, + intel_de_write(display, regs.pp_div, REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); else - intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, + intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - intel_de_read(dev_priv, regs.pp_on), - intel_de_read(dev_priv, regs.pp_off), + intel_de_read(display, regs.pp_on), + intel_de_read(display, regs.pp_off), i915_mmio_reg_valid(regs.pp_div) ? - intel_de_read(dev_priv, regs.pp_div) : - (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); + intel_de_read(display, regs.pp_div) : + (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); } void intel_pps_encoder_reset(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -1606,17 +1648,19 @@ bool intel_pps_init(struct intel_dp *intel_dp) static void pps_init_late(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) return; - if (intel_num_pps(i915) < 2) + if (intel_num_pps(display) < 2) return; - drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 && + drm_WARN(display->drm, + connector->panel.vbt.backlight.controller >= 0 && intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller, "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n", encoder->base.base.id, encoder->base.name, @@ -1645,32 +1689,34 @@ void intel_pps_init_late(struct intel_dp *intel_dp) } } -void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) +void intel_pps_unlock_regs_wa(struct intel_display *display) { int pps_num; int pps_idx; - if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv)) + if (!HAS_DISPLAY(display) || HAS_DDI(display)) return; /* * This w/a is needed at least on CPT/PPT, but to be sure apply it * everywhere where registers can be write protected. */ - pps_num = intel_num_pps(dev_priv); + pps_num = intel_num_pps(display); for (pps_idx = 0; pps_idx < pps_num; pps_idx++) - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, pps_idx), + intel_de_rmw(display, PP_CONTROL(display, pps_idx), PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); } -void intel_pps_setup(struct drm_i915_private *i915) +void intel_pps_setup(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - i915->display.pps.mmio_base = PCH_PPS_BASE; + display->pps.mmio_base = PCH_PPS_BASE; else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->display.pps.mmio_base = VLV_PPS_BASE; + display->pps.mmio_base = VLV_PPS_BASE; else - i915->display.pps.mmio_base = PPS_BASE; + display->pps.mmio_base = PPS_BASE; } static int intel_pps_show(struct seq_file *m, void *data) @@ -1704,21 +1750,23 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector) connector, &intel_pps_fops); } -void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) +void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = INVALID_PIPE; bool locked = true; - if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) + if (drm_WARN_ON(display->drm, HAS_DDI(display))) return; if (HAS_PCH_SPLIT(dev_priv)) { u32 port_sel; - pp_reg = PP_CONTROL(dev_priv, 0); - port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK; + pp_reg = PP_CONTROL(display, 0); + port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & + PANEL_PORT_SELECT_MASK; switch (port_sel) { case PANEL_PORT_SELECT_LVDS: @@ -1739,20 +1787,21 @@ void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* presumably write lock depends on pipe, not port select */ - pp_reg = PP_CONTROL(dev_priv, pipe); + pp_reg = PP_CONTROL(display, pipe); panel_pipe = pipe; } else { u32 port_sel; - pp_reg = PP_CONTROL(dev_priv, 0); - port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK; + pp_reg = PP_CONTROL(display, 0); + port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & + PANEL_PORT_SELECT_MASK; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, port_sel != PANEL_PORT_SELECT_LVDS); intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); } - val = intel_de_read(dev_priv, pp_reg); + val = intel_de_read(display, pp_reg); if (!(val & PANEL_POWER_ON) || ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index 07ef96ca8da20a..0c5da83a559ef6 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -11,9 +11,9 @@ #include "intel_wakeref.h" enum pipe; -struct drm_i915_private; struct intel_connector; struct intel_crtc_state; +struct intel_display; struct intel_dp; struct intel_encoder; @@ -43,16 +43,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp); bool intel_pps_init(struct intel_dp *intel_dp); void intel_pps_init_late(struct intel_dp *intel_dp); void intel_pps_encoder_reset(struct intel_dp *intel_dp); -void intel_pps_reset_all(struct drm_i915_private *i915); +void intel_pps_reset_all(struct intel_display *display); void vlv_pps_init(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); -void intel_pps_unlock_regs_wa(struct drm_i915_private *i915); -void intel_pps_setup(struct drm_i915_private *i915); +void intel_pps_unlock_regs_wa(struct intel_display *display); +void intel_pps_setup(struct intel_display *display); void intel_pps_connector_debugfs_add(struct intel_connector *connector); -void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe); +void assert_pps_unlocked(struct intel_display *display, enum pipe pipe); #endif /* __INTEL_PPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 9cb1cdaaeefa7d..136a0d6ca97072 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -203,16 +203,35 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder) return false; } +bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + /* + * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever + * the output is enabled. For non-eDP outputs the main link is always + * on, hence it doesn't require the HW initiated AUX wake-up signaling used + * for eDP. + * + * TODO: + * - Consider leaving AUX IO disabled for eDP / PR as well, in case + * the ALPM with main-link off mode is not enabled. + * - Leave AUX IO enabled for DP / PR, once support for ALPM with + * main-link off mode is added for it and this mode gets enabled. + */ + return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + intel_encoder_can_psr(encoder); +} + static bool psr_global_enabled(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: - if (i915->display.params.enable_psr == -1) + if (display->params.enable_psr == -1) return connector->panel.vbt.psr.enable; - return i915->display.params.enable_psr; + return display->params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; default: @@ -222,14 +241,14 @@ static bool psr_global_enabled(struct intel_dp *intel_dp) static bool psr2_global_enabled(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; default: - if (i915->display.params.enable_psr == 1) + if (display->params.enable_psr == 1) return false; return true; } @@ -237,9 +256,9 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp) static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - if (i915->display.params.enable_psr != -1) + if (display->params.enable_psr != -1) return false; return true; @@ -247,9 +266,9 @@ static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp) static bool panel_replay_global_enabled(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - if ((i915->display.params.enable_psr != -1) || + if ((display->params.enable_psr != -1) || (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE)) return false; return true; @@ -257,111 +276,111 @@ static bool panel_replay_global_enabled(struct intel_dp *intel_dp) static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR : + return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR : EDP_PSR_ERROR(intel_dp->psr.transcoder); } static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT : + return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT : EDP_PSR_POST_EXIT(intel_dp->psr.transcoder); } static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY : + return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY : EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder); } static u32 psr_irq_mask_get(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK : + return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK : EDP_PSR_MASK(intel_dp->psr.transcoder); } -static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_ctl_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_CTL(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_CTL(display, cpu_transcoder); else return HSW_SRD_CTL; } -static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_debug_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_DEBUG(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_DEBUG(display, cpu_transcoder); else return HSW_SRD_DEBUG; } -static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_perf_cnt_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_PERF_CNT(display, cpu_transcoder); else return HSW_SRD_PERF_CNT; } -static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_status_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_STATUS(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_STATUS(display, cpu_transcoder); else return HSW_SRD_STATUS; } -static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_imr_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 12) - return TRANS_PSR_IMR(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 12) + return TRANS_PSR_IMR(display, cpu_transcoder); else return EDP_PSR_IMR; } -static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_iir_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 12) - return TRANS_PSR_IIR(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 12) + return TRANS_PSR_IIR(display, cpu_transcoder); else return EDP_PSR_IIR; } -static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_aux_ctl_reg(struct intel_display *display, enum transcoder cpu_transcoder) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_AUX_CTL(display, cpu_transcoder); else return HSW_SRD_AUX_CTL; } -static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, +static i915_reg_t psr_aux_data_reg(struct intel_display *display, enum transcoder cpu_transcoder, int i) { - if (DISPLAY_VER(dev_priv) >= 8) - return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i); + if (DISPLAY_VER(display) >= 8) + return EDP_PSR_AUX_DATA(display, cpu_transcoder, i); else return HSW_SRD_AUX_DATA(i); } static void psr_irq_control(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; @@ -373,80 +392,81 @@ static void psr_irq_control(struct intel_dp *intel_dp) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), psr_irq_mask_get(intel_dp), ~mask); } -static void psr_event_print(struct drm_i915_private *i915, +static void psr_event_print(struct intel_display *display, u32 val, bool sel_update_enabled) { - drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); + drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val); if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) - drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); + drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n"); if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled) - drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); + drm_dbg_kms(display->drm, "\tPSR2 disabled\n"); if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) - drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); + drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n"); if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) - drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); + drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n"); if (val & PSR_EVENT_GRAPHICS_RESET) - drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); + drm_dbg_kms(display->drm, "\tGraphics reset\n"); if (val & PSR_EVENT_PCH_INTERRUPT) - drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); + drm_dbg_kms(display->drm, "\tPCH interrupt\n"); if (val & PSR_EVENT_MEMORY_UP) - drm_dbg_kms(&i915->drm, "\tMemory up\n"); + drm_dbg_kms(display->drm, "\tMemory up\n"); if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) - drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); + drm_dbg_kms(display->drm, "\tFront buffer modification\n"); if (val & PSR_EVENT_WD_TIMER_EXPIRE) - drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); + drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n"); if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) - drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); + drm_dbg_kms(display->drm, "\tPIPE registers updated\n"); if (val & PSR_EVENT_REGISTER_UPDATE) - drm_dbg_kms(&i915->drm, "\tRegister updated\n"); + drm_dbg_kms(display->drm, "\tRegister updated\n"); if (val & PSR_EVENT_HDCP_ENABLE) - drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); + drm_dbg_kms(display->drm, "\tHDCP enabled\n"); if (val & PSR_EVENT_KVMR_SESSION_ENABLE) - drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); + drm_dbg_kms(display->drm, "\tKVMR session enabled\n"); if (val & PSR_EVENT_VBI_ENABLE) - drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); + drm_dbg_kms(display->drm, "\tVBI enabled\n"); if (val & PSR_EVENT_LPSP_MODE_EXIT) - drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); + drm_dbg_kms(display->drm, "\tLPSP mode exited\n"); if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled) - drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); + drm_dbg_kms(display->drm, "\tPSR disabled\n"); } void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { intel_dp->psr.last_entry_attempt = time_ns; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) { intel_dp->psr.last_exit = time_ns; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[transcoder %s] PSR exit completed\n", transcoder_name(cpu_transcoder)); - if (DISPLAY_VER(dev_priv) >= 9) { + if (DISPLAY_VER(display) >= 9) { u32 val; val = intel_de_rmw(dev_priv, PSR_EVENT(dev_priv, cpu_transcoder), 0, 0); - psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled); + psr_event_print(display, val, intel_dp->psr.sel_update_enabled); } } if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { - drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", + drm_warn(display->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); intel_dp->psr.irq_aux_error = true; @@ -459,7 +479,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), + intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 0, psr_irq_psr_error_bit_get(intel_dp)); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); @@ -468,14 +488,14 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 val = 8; /* assume the worst if we can't read the value */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; else - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unable to get sink synchronization latency, assuming 8 frames\n"); return val; } @@ -516,7 +536,7 @@ intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp) */ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); ssize_t r; u16 w; u8 y; @@ -542,7 +562,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) intel_dp_get_su_x_granularity_offset(intel_dp), &w, 2); if (r != 2) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unable to read selective update x granularity\n"); /* * Spec says that if the value read is 0 the default granularity should @@ -555,7 +575,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) intel_dp_get_su_y_granularity_offset(intel_dp), &y, 1); if (r != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Unable to read selective update y granularity\n"); y = 4; } @@ -569,17 +589,17 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (intel_dp_is_edp(intel_dp)) { if (!intel_alpm_aux_less_wake_supported(intel_dp)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n"); return; } if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel doesn't support early transport, eDP Panel Replay not possible\n"); return; } @@ -590,7 +610,7 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT) intel_dp->psr.sink_panel_replay_su_support = true; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel replay %sis supported by panel\n", intel_dp->psr.sink_panel_replay_su_support ? "selective_update " : ""); @@ -598,20 +618,19 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) static void _psr_init_dpcd(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = - to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + struct intel_display *display = to_intel_display(intel_dp); - drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n", + drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR support not currently available for this panel\n"); return; } if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel lacks power state control, PSR cannot be enabled\n"); return; } @@ -620,7 +639,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp) intel_dp->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); - if (DISPLAY_VER(i915) >= 9 && + if (DISPLAY_VER(display) >= 9 && intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { bool y_req = intel_dp->psr_dpcd[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; @@ -638,7 +657,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp) */ intel_dp->psr.sink_psr2_support = y_req && intel_alpm_aux_wake_supported(intel_dp); - drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n", + drm_dbg_kms(display->drm, "PSR2 %ssupported\n", intel_dp->psr.sink_psr2_support ? "" : "not "); } } @@ -663,7 +682,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 aux_clock_divider, aux_ctl; /* write DP_SET_POWER=D0 */ @@ -679,7 +699,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) intel_de_write(dev_priv, - psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2), + psr_aux_data_reg(display, cpu_transcoder, i >> 2), intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -694,15 +714,15 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; - intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder), + intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder), aux_ctl); } static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) || + if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) || intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE) return false; @@ -741,7 +761,7 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp, static void _psr_enable_sink(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); u8 val = DP_PSR_ENABLE; if (crtc_state->has_sel_update) { @@ -750,7 +770,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp, if (intel_dp->psr.link_standby) val |= DP_PSR_MAIN_LINK_ACTIVE; - if (DISPLAY_VER(i915) >= 8) + if (DISPLAY_VER(display) >= 8) val |= DP_PSR_CRC_VERIFICATION; } @@ -802,14 +822,15 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp, static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val = 0; - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) val |= EDP_PSR_TP4_TIME_0us; - if (dev_priv->display.params.psr_safest_params) { + if (display->params.psr_safest_params) { val |= EDP_PSR_TP1_TIME_2500us; val |= EDP_PSR_TP2_TP3_TIME_2500us; goto check_tp3_sel; @@ -854,8 +875,8 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int idle_frames; /* Let's use 6 as the minimum to cover all known cases including the @@ -864,7 +885,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) idle_frames = max(6, connector->panel.vbt.psr.idle_frames); idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1); - if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) + if (drm_WARN_ON(display->drm, idle_frames > 0xf)) idle_frames = 0xf; return idle_frames; @@ -872,14 +893,15 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) static void hsw_activate_psr1(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); - if (DISPLAY_VER(dev_priv) < 20) + if (DISPLAY_VER(display) < 20) val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); if (IS_HASWELL(dev_priv)) @@ -890,23 +912,23 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) val |= intel_psr1_get_tp_time(intel_dp); - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) val |= EDP_PSR_CRC_ENABLE; - if (DISPLAY_VER(dev_priv) >= 20) + if (DISPLAY_VER(display) >= 20) val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); - intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), + intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder), ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val = 0; - if (dev_priv->display.params.psr_safest_params) + if (display->params.psr_safest_params) return EDP_PSR2_TP2_TIME_2500us; if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && @@ -950,7 +972,7 @@ static u8 frames_before_su_entry(struct intel_dp *intel_dp) static void dg2_activate_panel_replay(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; enum transcoder cpu_transcoder = intel_dp->psr.transcoder; @@ -961,38 +983,39 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp) if (intel_dp->psr.req_psr2_sdp_prior_scanline) val |= EDP_PSR2_SU_SDP_SCANLINE; - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val); } - intel_de_rmw(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder), + intel_de_rmw(display, + PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder), 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME); - intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, + intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, TRANS_DP2_PANEL_REPLAY_ENABLE); } static void hsw_activate_psr2(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; u32 psr_val = 0; val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); - if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv)) + if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv)) val |= EDP_SU_TRACK_ENABLE; - if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13) + if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13) val |= EDP_Y_COORDINATE_ENABLE; val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp)); val |= intel_psr2_get_tp_time(intel_dp); - if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) { + if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) { if (psr2_block_count(intel_dp) > 2) val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; else @@ -1000,7 +1023,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) } /* Wa_22012278275:adl-p */ - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) { static const u8 map[] = { 2, /* 5 lines */ 1, /* 6 lines */ @@ -1023,12 +1046,12 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); - } else if (DISPLAY_VER(dev_priv) >= 20) { + } else if (DISPLAY_VER(display) >= 20) { val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines); - } else if (DISPLAY_VER(dev_priv) >= 9) { + } else if (DISPLAY_VER(display) >= 9) { val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines); val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines); } @@ -1036,18 +1059,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.req_psr2_sdp_prior_scanline) val |= EDP_PSR2_SU_SDP_SCANLINE; - if (DISPLAY_VER(dev_priv) >= 20) + if (DISPLAY_VER(display) >= 20) psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; - tmp = intel_de_read(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder)); - drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); - } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0); + tmp = intel_de_read(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder)); + drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); + } else if (HAS_PSR2_SEL_FETCH(display)) { + intel_de_write(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0); } if (intel_dp->psr.su_region_et_enabled) @@ -1057,19 +1080,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val); + intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val); - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val); + intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val); } static bool -transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) +transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder) { - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(display) >= 12) return cpu_transcoder == TRANSCODER_A; - else if (DISPLAY_VER(dev_priv) >= 9) + else if (DISPLAY_VER(display) >= 9) return cpu_transcoder == TRANSCODER_EDP; else return false; @@ -1087,17 +1112,18 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state) static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), + intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder), EDP_PSR2_IDLE_FRAMES_MASK, EDP_PSR2_IDLE_FRAMES(idle_frames)); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); psr2_program_idle_frames(intel_dp, 0); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); @@ -1105,7 +1131,8 @@ static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); @@ -1140,12 +1167,13 @@ static bool dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port = dig_port->base.port; - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) return pipe <= PIPE_B && port <= PORT_B; else return pipe == PIPE_A && port == PORT_A; @@ -1155,9 +1183,10 @@ static void tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; u32 exit_scanlines; /* @@ -1181,7 +1210,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, return; /* Wa_16011303918:adl-p */ - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) return; /* @@ -1191,7 +1220,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, exit_scanlines = intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; - if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) + if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay)) return; crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; @@ -1200,17 +1229,17 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - if (!dev_priv->display.params.enable_psr2_sel_fetch && + if (!display->params.enable_psr2_sel_fetch && intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR2 sel fetch not enabled, disabled by parameter\n"); return false; } if (crtc_state->uapi.async_flip) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR2 sel fetch not enabled, async flip enabled\n"); return false; } @@ -1221,7 +1250,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, static bool psr2_granularity_check(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; @@ -1243,7 +1273,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp, * For other platforms with SW tracking we can adjust the y coordinates * to match sink requirement if multiple of 4. */ - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) y_granularity = intel_dp->psr.su_y_granularity; else if (intel_dp->psr.su_y_granularity <= 2) y_granularity = 4; @@ -1264,8 +1294,8 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp, static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 hblank_total, hblank_ns, req_ns; hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; @@ -1278,7 +1308,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; /* Not supported <13 / Wa_22012279113:adl-p */ - if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b) + if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b) return false; crtc_state->req_psr2_sdp_prior_scanline = true; @@ -1288,12 +1318,12 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); int entry_setup_frames = 0; if (psr_setup_time < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR condition failed: Invalid PSR setup time (0x%02x)\n", intel_dp->psr_dpcd[1]); return -ETIME; @@ -1301,14 +1331,14 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { - if (DISPLAY_VER(i915) >= 20) { + if (DISPLAY_VER(display) >= 20) { /* setup entry frames can be up to 3 frames */ entry_setup_frames = 1; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR setup entry frames %d\n", entry_setup_frames); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR condition failed: PSR setup time (%d us) too long\n", psr_setup_time); return -ETIME; @@ -1322,7 +1352,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool aux_less) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end - crtc_state->hw.adjusted_mode.crtc_vblank_start; int wake_lines; @@ -1330,7 +1360,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp, if (aux_less) wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines; else - wake_lines = DISPLAY_VER(i915) < 20 ? + wake_lines = DISPLAY_VER(display) < 20 ? psr2_block_count_lines(intel_dp) : intel_dp->alpm_parameters.io_wake_lines; @@ -1348,16 +1378,16 @@ static bool alpm_config_valid(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool aux_less) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (!intel_alpm_compute_params(intel_dp, crtc_state)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n"); return false; } if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PSR2/Panel Replay not enabled, too short vblank time\n"); return false; } @@ -1368,7 +1398,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp, static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; @@ -1378,24 +1409,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, /* JSL and EHL only supports eDP 1.3 */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n"); + drm_dbg_kms(display->drm, "PSR2 not supported by phy\n"); return false; } /* Wa_16011181250 */ if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG2(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n"); + drm_dbg_kms(display->drm, + "PSR2 is defeatured for this platform\n"); return false; } - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n"); + if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { + drm_dbg_kms(display->drm, + "PSR2 not completely functional in this stepping\n"); return false; } - if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { - drm_dbg_kms(&dev_priv->drm, + if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) { + drm_dbg_kms(display->drm, "PSR2 not supported in transcoder %s\n", transcoder_name(crtc_state->cpu_transcoder)); return false; @@ -1407,28 +1440,28 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * over PSR2. */ if (crtc_state->dsc.compression_enable && - (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) { - drm_dbg_kms(&dev_priv->drm, + (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) { + drm_dbg_kms(display->drm, "PSR2 cannot be enabled since DSC is enabled\n"); return false; } - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { psr_max_h = 5120; psr_max_v = 3200; max_bpp = 30; - } else if (DISPLAY_VER(dev_priv) >= 10) { + } else if (DISPLAY_VER(display) >= 10) { psr_max_h = 4096; psr_max_v = 2304; max_bpp = 24; - } else if (DISPLAY_VER(dev_priv) == 9) { + } else if (DISPLAY_VER(display) == 9) { psr_max_h = 3640; psr_max_v = 2304; max_bpp = 24; } if (crtc_state->pipe_bpp > max_bpp) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR2 not enabled, pipe bpp %d > max supported %d\n", crtc_state->pipe_bpp, max_bpp); return false; @@ -1436,8 +1469,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, /* Wa_16011303918:adl-p */ if (crtc_state->vrr.enable && - IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - drm_dbg_kms(&dev_priv->drm, + IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { + drm_dbg_kms(display->drm, "PSR2 not enabled, not compatible with HW stepping + VRR\n"); return false; } @@ -1447,7 +1480,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!crtc_state->enable_psr2_sel_fetch && (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); @@ -1462,18 +1495,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, static bool intel_sel_update_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); - if (HAS_PSR2_SEL_FETCH(dev_priv) && + if (HAS_PSR2_SEL_FETCH(display) && !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && - !HAS_PSR_HW_TRACKING(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, + !HAS_PSR_HW_TRACKING(display)) { + drm_dbg_kms(display->drm, "Selective update not enabled, selective fetch not valid and no HW tracking available\n"); goto unsupported; } if (!psr2_global_enabled(intel_dp)) { - drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n"); + drm_dbg_kms(display->drm, + "Selective update disabled by flag\n"); goto unsupported; } @@ -1481,23 +1515,23 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp, goto unsupported; if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Selective update not enabled, SDP indication do not fit in hblank\n"); goto unsupported; } - if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 || + if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 || !intel_dp->psr.sink_panel_replay_su_support)) goto unsupported; if (crtc_state->crc_enabled) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Selective update not enabled because it would inhibit pipe CRC calculation\n"); goto unsupported; } if (!psr2_granularity_check(intel_dp, crtc_state)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Selective update not enabled, SU granularity not compatible\n"); goto unsupported; } @@ -1515,7 +1549,7 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp, static bool _psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int entry_setup_frames; @@ -1534,7 +1568,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp, if (entry_setup_frames >= 0) { intel_dp->psr.entry_setup_frames = entry_setup_frames; } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR condition failed: PSR setup timing not met\n"); return false; } @@ -1547,7 +1581,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; @@ -1556,7 +1590,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, return false; if (!panel_replay_global_enabled(intel_dp)) { - drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n"); + drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n"); return false; } @@ -1567,7 +1601,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, /* 128b/132b Panel Replay is not supported on eDP */ if (intel_dp_is_uhbr(crtc_state)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel Replay is not supported with 128b/132b\n"); return false; } @@ -1578,7 +1612,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel Replay is not supported with HDCP\n"); return false; } @@ -1586,6 +1620,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, if (!alpm_config_valid(intel_dp, crtc_state, true)) return false; + if (crtc_state->crc_enabled) { + drm_dbg_kms(display->drm, + "Panel Replay not enabled because it would inhibit pipe CRC calculation\n"); + return false; + } + return true; } @@ -1593,22 +1633,22 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!psr_global_enabled(intel_dp)) { - drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); + drm_dbg_kms(display->drm, "PSR disabled by flag\n"); return; } if (intel_dp->psr.sink_not_reliable) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR sink implementation is not reliable\n"); return; } if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR condition failed: Interlaced mode enabled\n"); return; } @@ -1619,7 +1659,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, * PSR is a transcoder level feature. */ if (crtc_state->joiner_pipes) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR disabled due to joiner\n"); return; } @@ -1640,7 +1680,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, void intel_psr_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_dp *intel_dp; @@ -1673,18 +1713,18 @@ void intel_psr_get_config(struct intel_encoder *encoder, if (!intel_dp->psr.sel_update_enabled) goto unlock; - if (HAS_PSR2_SEL_FETCH(dev_priv)) { - val = intel_de_read(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder)); + if (HAS_PSR2_SEL_FETCH(display)) { + val = intel_de_read(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder)); if (val & PSR2_MAN_TRK_CTL_ENABLE) pipe_config->enable_psr2_sel_fetch = true; } pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled; - if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, - TRANS_EXITLINE(dev_priv, cpu_transcoder)); + if (DISPLAY_VER(display) >= 12) { + val = intel_de_read(display, + TRANS_EXITLINE(display, cpu_transcoder)); pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val); } unlock: @@ -1693,17 +1733,17 @@ void intel_psr_get_config(struct intel_encoder *encoder, static void intel_psr_activate(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - drm_WARN_ON(&dev_priv->drm, - transcoder_has_psr2(dev_priv, cpu_transcoder) && - intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE); + drm_WARN_ON(display->drm, + transcoder_has_psr2(display, cpu_transcoder) && + intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE); - drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); + drm_WARN_ON(display->drm, intel_dp->psr.active); lockdep_assert_held(&intel_dp->psr.lock); @@ -1742,30 +1782,31 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp) static void wm_optimization_wa(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); bool set_wa_bit = false; /* Wa_14015648006 */ - if (IS_DISPLAY_VER(dev_priv, 11, 14)) + if (IS_DISPLAY_VER(display, 11, 14)) set_wa_bit |= crtc_state->wm_level_disabled; /* Wa_16013835468 */ - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(display) == 12) set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start != crtc_state->hw.adjusted_mode.crtc_vdisplay; if (set_wa_bit) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, wa_16013835468_bit_get(intel_dp)); else - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, wa_16013835468_bit_get(intel_dp), 0); } static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask = 0; @@ -1773,7 +1814,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * Only HSW and BDW have PSR AUX registers that need to be setup. * SKL+ use hardcoded values PSR AUX transactions */ - if (DISPLAY_VER(dev_priv) < 9) + if (DISPLAY_VER(display) < 9) hsw_psr_setup_aux(intel_dp); /* @@ -1790,7 +1831,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * Panel Replay on DP: No bits are applicable * Panel Replay on eDP: All bits are applicable */ - if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp)) + if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp)) mask = EDP_PSR_DEBUG_MASK_HPD; if (intel_dp_is_edp(intel_dp)) { @@ -1804,17 +1845,17 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * As a workaround leave LPSP unmasked to prevent PSR entry * when external displays are active. */ - if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv)) + if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv)) mask |= EDP_PSR_DEBUG_MASK_LPSP; - if (DISPLAY_VER(dev_priv) < 20) + if (DISPLAY_VER(display) < 20) mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP; /* * No separate pipe reg write mask on hsw/bdw, so have to unmask all * registers in order to keep the CURSURFLIVE tricks working :( */ - if (IS_DISPLAY_VER(dev_priv, 9, 10)) + if (IS_DISPLAY_VER(display, 9, 10)) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; /* allow PSR with sprite enabled */ @@ -1822,7 +1863,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; } - intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask); + intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask); psr_irq_control(intel_dp); @@ -1831,13 +1872,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * transcoder, EXITLINE will need to be unset when disabling PSR */ if (intel_dp->psr.dc3co_exitline) - intel_de_rmw(dev_priv, - TRANS_EXITLINE(dev_priv, cpu_transcoder), + intel_de_rmw(display, + TRANS_EXITLINE(display, cpu_transcoder), EXITLINE_MASK, intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); - if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, + if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display)) + intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); @@ -1851,8 +1892,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, wm_optimization_wa(intel_dp, crtc_state); if (intel_dp->psr.sel_update_enabled) { - if (DISPLAY_VER(dev_priv) == 9) - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, + if (DISPLAY_VER(display) == 9) + intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0, PSR2_VSC_ENABLE_PROG_HEADER | PSR2_ADD_VERTICAL_LINE_COUNT); @@ -1862,27 +1903,27 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * cause issues if non-supported panels are used. */ if (!intel_dp->psr.panel_replay_enabled && - (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || + (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv))) - intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), + intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder), + IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + intel_de_rmw(display, + MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 0, MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); else if (IS_ALDERLAKE_P(dev_priv)) - intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0, + intel_de_rmw(display, CLKGATE_DIS_MISC, 0, CLKGATE_DIS_MISC_DMASC_GATING_DIS); } } static bool psr_interrupt_error_check(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; @@ -1897,11 +1938,11 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ - val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder)); + val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder)); val &= psr_irq_psr_error_bit_get(intel_dp); if (val) { intel_dp->psr.sink_not_reliable = true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR interruption error set, not enabling PSR\n"); return false; } @@ -1913,11 +1954,11 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) static void intel_psr_enable_locked(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; - drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); + drm_WARN_ON(display->drm, intel_dp->psr.enabled); intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update; intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; @@ -1938,9 +1979,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, return; if (intel_dp->psr.panel_replay_enabled) { - drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n"); + drm_dbg_kms(display->drm, "Enabling Panel Replay\n"); } else { - drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", + drm_dbg_kms(display->drm, "Enabling PSR%s\n", intel_dp->psr.sel_update_enabled ? "2" : "1"); /* @@ -1962,68 +2003,71 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, static void intel_psr_exit(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; if (!intel_dp->psr.active) { - if (transcoder_has_psr2(dev_priv, cpu_transcoder)) { - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv, cpu_transcoder)); - drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); + if (transcoder_has_psr2(display, cpu_transcoder)) { + val = intel_de_read(display, + EDP_PSR2_CTL(display, cpu_transcoder)); + drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE); } - val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); - drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); + val = intel_de_read(display, + psr_ctl_reg(display, cpu_transcoder)); + drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE); return; } if (intel_dp->psr.panel_replay_enabled) { - intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), + intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), TRANS_DP2_PANEL_REPLAY_ENABLE, 0); } else if (intel_dp->psr.sel_update_enabled) { tgl_disallow_dc3co_on_psr2_exit(intel_dp); - val = intel_de_rmw(dev_priv, - EDP_PSR2_CTL(dev_priv, cpu_transcoder), + val = intel_de_rmw(display, + EDP_PSR2_CTL(display, cpu_transcoder), EDP_PSR2_ENABLE, 0); - drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); + drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE)); } else { - val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), + val = intel_de_rmw(display, + psr_ctl_reg(display, cpu_transcoder), EDP_PSR_ENABLE, 0); - drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); + drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE)); } intel_dp->psr.active = false; } static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t psr_status; u32 psr_status_mask; if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { - psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder); + psr_status = EDP_PSR2_STATUS(display, cpu_transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = psr_status_reg(dev_priv, cpu_transcoder); + psr_status = psr_status_reg(display, cpu_transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } /* Wait till PSR is idle */ - if (intel_de_wait_for_clear(dev_priv, psr_status, + if (intel_de_wait_for_clear(display, psr_status, psr_status_mask, 2000)) - drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); + drm_err(display->drm, "Timed out waiting PSR idle state\n"); } static void intel_psr_disable_locked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; lockdep_assert_held(&intel_dp->psr.lock); @@ -2032,9 +2076,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) return; if (intel_dp->psr.panel_replay_enabled) - drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n"); + drm_dbg_kms(display->drm, "Disabling Panel Replay\n"); else - drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", + drm_dbg_kms(display->drm, "Disabling PSR%s\n", intel_dp->psr.sel_update_enabled ? "2" : "1"); intel_psr_exit(intel_dp); @@ -2044,19 +2088,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) * Wa_16013835468 * Wa_14015648006 */ - if (DISPLAY_VER(dev_priv) >= 11) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + if (DISPLAY_VER(display) >= 11) + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, wa_16013835468_bit_get(intel_dp), 0); if (intel_dp->psr.sel_update_enabled) { /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder), + IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + intel_de_rmw(display, + MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); else if (IS_ALDERLAKE_P(dev_priv)) - intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, + intel_de_rmw(display, CLKGATE_DIS_MISC, CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); } @@ -2065,12 +2109,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* Panel Replay on eDP is always using ALPM aux less. */ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) { - intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder), + intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder), ALPM_CTL_ALPM_ENABLE | ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); - intel_de_rmw(dev_priv, - PORT_ALPM_CTL(dev_priv, cpu_transcoder), + intel_de_rmw(display, + PORT_ALPM_CTL(display, cpu_transcoder), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); } @@ -2101,12 +2145,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); if (!old_crtc_state->has_psr) return; - if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp))) + if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp))) return; mutex_lock(&intel_dp->psr.lock); @@ -2126,7 +2170,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, */ void intel_psr_pause(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) @@ -2140,7 +2184,7 @@ void intel_psr_pause(struct intel_dp *intel_dp) } /* If we ever hit this, we will need to add refcount to pause/resume */ - drm_WARN_ON(&dev_priv->drm, psr->paused); + drm_WARN_ON(display->drm, psr->paused); intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); @@ -2177,45 +2221,53 @@ void intel_psr_resume(struct intel_dp *intel_dp) mutex_unlock(&psr->lock); } -static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv) +static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) { - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 : + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 : PSR2_MAN_TRK_CTL_ENABLE; } -static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv) +static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display) { - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; } -static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv) +static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display) { - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; } -static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv) +static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display) { - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME; } static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), - man_trk_ctl_enable_bit_get(dev_priv) | - man_trk_ctl_partial_frame_bit_get(dev_priv) | - man_trk_ctl_single_full_frame_bit_get(dev_priv) | - man_trk_ctl_continuos_full_frame(dev_priv)); + intel_de_write(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder), + man_trk_ctl_enable_bit_get(display) | + man_trk_ctl_partial_frame_bit_get(display) | + man_trk_ctl_single_full_frame_bit_get(display) | + man_trk_ctl_continuos_full_frame(display)); /* * Display WA #0884: skl+ @@ -2230,20 +2282,20 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) * but testing proved that it works for up display 13, for newer * than that testing will be needed. */ - intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0); + intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_encoder *encoder; if (!crtc_state->enable_psr2_sel_fetch) return; - for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, + for_each_intel_encoder_mask_with_psr(display->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2253,36 +2305,37 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st break; } - intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), crtc_state->psr2_man_track_ctl); if (!crtc_state->enable_psr2_su_region_et) return; - intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), + intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), crtc_state->pipe_srcsz_early_tpt); } static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, bool full_update) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 val = man_trk_ctl_enable_bit_get(dev_priv); + u32 val = man_trk_ctl_enable_bit_get(display); /* SF partial frame enable has to be set even on full update */ - val |= man_trk_ctl_partial_frame_bit_get(dev_priv); + val |= man_trk_ctl_partial_frame_bit_get(display); if (full_update) { - val |= man_trk_ctl_single_full_frame_bit_get(dev_priv); - val |= man_trk_ctl_continuos_full_frame(dev_priv); + val |= man_trk_ctl_single_full_frame_bit_get(display); + val |= man_trk_ctl_continuos_full_frame(display); goto exit; } if (crtc_state->psr2_su_area.y1 == -1) goto exit; - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) { + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) { val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1); val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1); } else { @@ -2335,13 +2388,14 @@ static void clip_area_update(struct drm_rect *overlap_damage_area, static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u16 y_alignment; /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ if (crtc_state->dsc.compression_enable && - (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)) + (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)) y_alignment = vdsc_cfg->slice_height; else y_alignment = crtc_state->su_y_granularity; @@ -2429,6 +2483,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *new_plane_state, *old_plane_state; @@ -2525,7 +2580,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, * calculation for those. */ if (crtc_state->psr2_su_area.y1 == -1) { - drm_info_once(&dev_priv->drm, + drm_info_once(display->drm, "Selective fetch area calculation failed in pipe %c\n", pipe_name(crtc->pipe)); full_update = true; @@ -2536,7 +2591,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, /* Wa_14014971492 */ if (!crtc_state->has_panel_replay && - ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || + ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) && crtc_state->splitter.enable) crtc_state->psr2_su_area.y1 = 0; @@ -2622,6 +2677,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -2629,7 +2685,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; - if (!HAS_PSR(i915)) + if (!HAS_PSR(display)) return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, @@ -2670,7 +2726,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, void intel_psr_post_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; @@ -2686,13 +2742,14 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, mutex_lock(&psr->lock); - drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes); + drm_WARN_ON(display->drm, + psr->enabled && !crtc_state->active_planes); keep_disabled |= psr->sink_not_reliable; keep_disabled |= !crtc_state->active_planes; /* Display WA #1136: skl, bxt */ - keep_disabled |= DISPLAY_VER(dev_priv) < 11 && + keep_disabled |= DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled; if (!psr->enabled && !keep_disabled) @@ -2717,7 +2774,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* @@ -2725,14 +2782,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * As all higher states has bit 4 of PSR2 state set we can just wait for * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. */ - return intel_de_wait_for_clear(dev_priv, - EDP_PSR2_STATUS(dev_priv, cpu_transcoder), + return intel_de_wait_for_clear(display, + EDP_PSR2_STATUS(display, cpu_transcoder), EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); } static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* @@ -2741,18 +2798,11 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ - return intel_de_wait_for_clear(dev_priv, - psr_status_reg(dev_priv, cpu_transcoder), + return intel_de_wait_for_clear(display, + psr_status_reg(display, cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, 50); } -static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp) -{ - return intel_dp_is_edp(intel_dp) ? - _psr2_ready_for_pipe_update_locked(intel_dp) : - _psr1_ready_for_pipe_update_locked(intel_dp); -} - /** * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state @@ -2762,37 +2812,36 @@ static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp) */ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(new_crtc_state); struct intel_encoder *encoder; if (!new_crtc_state->has_psr) return; - for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, + for_each_intel_encoder_mask_with_psr(display->drm, encoder, new_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int ret; lockdep_assert_held(&intel_dp->psr.lock); - if (!intel_dp->psr.enabled) + if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) continue; - if (intel_dp->psr.panel_replay_enabled) - ret = _panel_replay_ready_for_pipe_update_locked(intel_dp); - else if (intel_dp->psr.sel_update_enabled) + if (intel_dp->psr.sel_update_enabled) ret = _psr2_ready_for_pipe_update_locked(intel_dp); else ret = _psr1_ready_for_pipe_update_locked(intel_dp); if (ret) - drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); + drm_err(display->drm, + "PSR wait timed out, atomic update may fail\n"); } } static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t reg; u32 mask; @@ -2803,18 +2852,18 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { - reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder); + reg = EDP_PSR2_STATUS(display, cpu_transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = psr_status_reg(dev_priv, cpu_transcoder); + reg = psr_status_reg(display, cpu_transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } mutex_unlock(&intel_dp->psr.lock); - err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); + err = intel_de_wait_for_clear(display, reg, mask, 50); if (err) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timed out waiting for PSR Idle for re-enable\n"); /* After the unlocked wait, verify that PSR is still wanted! */ @@ -2822,7 +2871,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) return err == 0 && intel_dp->psr.enabled; } -static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) +static int intel_psr_fastset_force(struct intel_display *display) { struct drm_connector_list_iter conn_iter; struct drm_modeset_acquire_ctx ctx; @@ -2830,7 +2879,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) struct drm_connector *conn; int err = 0; - state = drm_atomic_state_alloc(&dev_priv->drm); + state = drm_atomic_state_alloc(display->drm); if (!state) return -ENOMEM; @@ -2840,7 +2889,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) to_intel_atomic_state(state)->internal = true; retry: - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); drm_for_each_connector_iter(conn, &conn_iter) { struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; @@ -2887,7 +2936,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE | I915_PSR_DEBUG_PANEL_REPLAY_DISABLE); @@ -2898,7 +2947,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) I915_PSR_DEBUG_PANEL_REPLAY_DISABLE | I915_PSR_DEBUG_MODE_MASK) || mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { - drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); + drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val); return -EINVAL; } @@ -2923,7 +2972,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) mutex_unlock(&intel_dp->psr.lock); if (old_mode != mode || old_disable_bits != disable_bits) - ret = intel_psr_fastset_force(dev_priv); + ret = intel_psr_fastset_force(display); return ret; } @@ -2975,7 +3024,7 @@ static void intel_psr_work(struct work_struct *work) static void _psr_invalidate_handle(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { @@ -2983,20 +3032,20 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* Send one update otherwise lag is observed in screen */ - intel_de_write(dev_priv, - CURSURFLIVE(dev_priv, intel_dp->psr.pipe), + intel_de_write(display, + CURSURFLIVE(display, intel_dp->psr.pipe), 0); return; } - val = man_trk_ctl_enable_bit_get(dev_priv) | - man_trk_ctl_partial_frame_bit_get(dev_priv) | - man_trk_ctl_continuos_full_frame(dev_priv); - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), + val = man_trk_ctl_enable_bit_get(display) | + man_trk_ctl_partial_frame_bit_get(display) | + man_trk_ctl_continuos_full_frame(display); + intel_de_write(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder), val); - intel_de_write(dev_priv, - CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0); + intel_de_write(display, + CURSURFLIVE(display, intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = true; } else { intel_psr_exit(intel_dp); @@ -3005,7 +3054,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) /** * intel_psr_invalidate - Invalidate PSR - * @dev_priv: i915 device + * @display: display device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the invalidate * @@ -3016,7 +3065,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." */ -void intel_psr_invalidate(struct drm_i915_private *dev_priv, +void intel_psr_invalidate(struct intel_display *display, unsigned frontbuffer_bits, enum fb_op_origin origin) { struct intel_encoder *encoder; @@ -3024,7 +3073,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, if (origin == ORIGIN_FLIP) return; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { unsigned int pipe_frontbuffer_bits = frontbuffer_bits; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -3054,7 +3103,8 @@ static void tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled || !intel_dp->psr.active) @@ -3075,17 +3125,18 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, static void _psr_flush_handle(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) { - u32 val = man_trk_ctl_enable_bit_get(dev_priv) | - man_trk_ctl_partial_frame_bit_get(dev_priv) | - man_trk_ctl_single_full_frame_bit_get(dev_priv) | - man_trk_ctl_continuos_full_frame(dev_priv); + u32 val = man_trk_ctl_enable_bit_get(display) | + man_trk_ctl_partial_frame_bit_get(display) | + man_trk_ctl_single_full_frame_bit_get(display) | + man_trk_ctl_continuos_full_frame(display); /* * Set psr2_sel_fetch_cff_enabled as false to allow selective @@ -3093,11 +3144,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * SU configuration in case update is sent for any reason after * sff bit gets cleared by the HW on next vblank. */ - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, + PSR2_MAN_TRK_CTL(display, cpu_transcoder), val); - intel_de_write(dev_priv, - CURSURFLIVE(dev_priv, intel_dp->psr.pipe), + intel_de_write(display, + CURSURFLIVE(display, intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = false; } @@ -3118,7 +3169,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) /** * intel_psr_flush - Flush PSR - * @dev_priv: i915 device + * @display: display device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the flush * @@ -3129,12 +3180,12 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. */ -void intel_psr_flush(struct drm_i915_private *dev_priv, +void intel_psr_flush(struct intel_display *display, unsigned frontbuffer_bits, enum fb_op_origin origin) { struct intel_encoder *encoder; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { unsigned int pipe_frontbuffer_bits = frontbuffer_bits; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -3183,11 +3234,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, */ void intel_psr_init(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_connector *connector = intel_dp->attached_connector; struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv))) + if (!(HAS_PSR(display) || HAS_DP20(dev_priv))) return; /* @@ -3199,21 +3251,21 @@ void intel_psr_init(struct intel_dp *intel_dp) * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. * But GEN12 supports a instance of PSR registers per transcoder. */ - if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) { - drm_dbg_kms(&dev_priv->drm, + if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) { + drm_dbg_kms(display->drm, "PSR condition failed: Port not supported\n"); return; } if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) || - DISPLAY_VER(dev_priv) >= 20) + DISPLAY_VER(display) >= 20) intel_dp->psr.source_panel_replay_support = true; - if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp)) + if (HAS_PSR(display) && intel_dp_is_edp(intel_dp)) intel_dp->psr.source_support = true; /* Set link_standby x link_off defaults */ - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(display) < 12) /* For new platforms up to TGL let's respect VBT back again */ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link; @@ -3250,7 +3302,7 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp, static void psr_alpm_check(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct drm_dp_aux *aux = &intel_dp->aux; struct intel_psr *psr = &intel_dp->psr; u8 val; @@ -3261,14 +3313,14 @@ static void psr_alpm_check(struct intel_dp *intel_dp) r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); if (r != 1) { - drm_err(&dev_priv->drm, "Error reading ALPM status\n"); + drm_err(display->drm, "Error reading ALPM status\n"); return; } if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "ALPM lock timeout error, disabling PSR\n"); /* Clearing error */ @@ -3278,21 +3330,21 @@ static void psr_alpm_check(struct intel_dp *intel_dp) static void psr_capability_changed_check(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; u8 val; int r; r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); if (r != 1) { - drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); + drm_err(display->drm, "Error reading DP_PSR_ESI\n"); return; } if (val & DP_PSR_CAPS_CHANGE) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Sink PSR capability changed, disabling PSR\n"); /* Clearing it */ @@ -3309,7 +3361,7 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp) */ void intel_psr_short_pulse(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; u8 status, error_status; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | @@ -3325,7 +3377,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) goto exit; if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Error reading PSR status or error status\n"); goto exit; } @@ -3338,20 +3390,20 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR sink internal error, disabling PSR\n"); if (error_status & DP_PSR_RFB_STORAGE_ERROR) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR RFB storage error, disabling PSR\n"); if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR VSC SDP uncorrectable error, disabling PSR\n"); if (error_status & DP_PSR_LINK_CRC_ERROR) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSR Link CRC error, disabling PSR\n"); if (error_status & ~errors) - drm_err(&dev_priv->drm, + drm_err(display->drm, "PSR_ERROR_STATUS unhandled errors %x\n", error_status & ~errors); /* clear status register */ @@ -3390,13 +3442,13 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) */ void intel_psr_lock(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct intel_encoder *encoder; if (!crtc_state->has_psr) return; - for_each_intel_encoder_mask_with_psr(&i915->drm, encoder, + for_each_intel_encoder_mask_with_psr(display->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -3413,13 +3465,13 @@ void intel_psr_lock(const struct intel_crtc_state *crtc_state) */ void intel_psr_unlock(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct intel_encoder *encoder; if (!crtc_state->has_psr) return; - for_each_intel_encoder_mask_with_psr(&i915->drm, encoder, + for_each_intel_encoder_mask_with_psr(display->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -3431,7 +3483,7 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state) static void psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; const char *status = "unknown"; u32 val, status_val; @@ -3451,8 +3503,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "BUF_ON", "TG_ON" }; - val = intel_de_read(dev_priv, - EDP_PSR2_STATUS(dev_priv, cpu_transcoder)); + val = intel_de_read(display, + EDP_PSR2_STATUS(display, cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; @@ -3467,7 +3519,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder)); + val = intel_de_read(display, + psr_status_reg(display, cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; @@ -3528,7 +3581,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp, static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; intel_wakeref_t wakeref; @@ -3553,20 +3607,20 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) } if (psr->panel_replay_enabled) { - val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder)); + val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder)); if (intel_dp_is_edp(intel_dp)) - psr2_ctl = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv, + psr2_ctl = intel_de_read(display, + EDP_PSR2_CTL(display, cpu_transcoder)); enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE; } else if (psr->sel_update_enabled) { - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv, cpu_transcoder)); + val = intel_de_read(display, + EDP_PSR2_CTL(display, cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); + val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n", @@ -3581,7 +3635,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ - val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder)); + val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder)); seq_printf(m, "Performance counter: %u\n", REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); @@ -3600,8 +3654,8 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) * frame boundary between register reads */ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { - val = intel_de_read(dev_priv, - PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame)); + val = intel_de_read(display, + PSR2_SU_STATUS(display, cpu_transcoder, frame)); su_frames_val[frame / 3] = val; } @@ -3629,15 +3683,15 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) static int i915_edp_psr_status_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; struct intel_dp *intel_dp = NULL; struct intel_encoder *encoder; - if (!HAS_PSR(dev_priv)) + if (!HAS_PSR(display)) return -ENODEV; /* Find the first EDP which supports PSR */ - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { intel_dp = enc_to_intel_dp(encoder); break; } @@ -3652,18 +3706,19 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); static int i915_edp_psr_debug_set(void *data, u64 val) { - struct drm_i915_private *dev_priv = data; + struct intel_display *display = data; + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; intel_wakeref_t wakeref; int ret = -ENODEV; - if (!HAS_PSR(dev_priv)) + if (!HAS_PSR(display)) return ret; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); + drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); @@ -3679,13 +3734,13 @@ i915_edp_psr_debug_set(void *data, u64 val) static int i915_edp_psr_debug_get(void *data, u64 *val) { - struct drm_i915_private *dev_priv = data; + struct intel_display *display = data; struct intel_encoder *encoder; - if (!HAS_PSR(dev_priv)) + if (!HAS_PSR(display)) return -ENODEV; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); // TODO: split to each transcoder's PSR debug state @@ -3700,15 +3755,15 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, i915_edp_psr_debug_get, i915_edp_psr_debug_set, "%llu\n"); -void intel_psr_debugfs_register(struct drm_i915_private *i915) +void intel_psr_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, - i915, &i915_edp_psr_debug_fops); + display, &i915_edp_psr_debug_fops); debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, - i915, &i915_edp_psr_status_fops); + display, &i915_edp_psr_status_fops); } static const char *psr_mode_str(struct intel_dp *intel_dp) @@ -3789,6 +3844,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status); void intel_psr_connector_debugfs_add(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct dentry *root = connector->base.debugfs_entry; @@ -3801,7 +3857,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector) debugfs_create_file("i915_psr_sink_status", 0444, root, connector, &i915_psr_sink_status_fops); - if (HAS_PSR(i915) || HAS_DP20(i915)) + if (HAS_PSR(display) || HAS_DP20(i915)) debugfs_create_file("i915_psr_status", 0444, root, connector, &i915_psr_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index d483c85870e1db..6eb5f15f674faa 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -11,11 +11,11 @@ enum fb_op_origin; struct drm_connector; struct drm_connector_state; -struct drm_i915_private; struct intel_atomic_state; struct intel_connector; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_dp; struct intel_encoder; struct intel_plane; @@ -25,6 +25,8 @@ struct intel_plane_state; (intel_dp)->psr.source_panel_replay_support) bool intel_encoder_can_psr(struct intel_encoder *encoder); +bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_enable_sink(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); @@ -35,10 +37,10 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value); -void intel_psr_invalidate(struct drm_i915_private *dev_priv, +void intel_psr_invalidate(struct intel_display *display, unsigned frontbuffer_bits, enum fb_op_origin origin); -void intel_psr_flush(struct drm_i915_private *dev_priv, +void intel_psr_flush(struct intel_display *display, unsigned frontbuffer_bits, enum fb_op_origin origin); void intel_psr_init(struct intel_dp *intel_dp); @@ -60,6 +62,6 @@ void intel_psr_resume(struct intel_dp *intel_dp); void intel_psr_lock(const struct intel_crtc_state *crtc_state); void intel_psr_unlock(const struct intel_crtc_state *crtc_state); void intel_psr_connector_debugfs_add(struct intel_connector *connector); -void intel_psr_debugfs_register(struct drm_i915_private *i915); +void intel_psr_debugfs_register(struct intel_display *display); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index dfd8b4960e6d68..29b56d53a340a0 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -282,7 +282,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp, !memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) && (!memcmp(q->sink_device_id, ident->device_id, sizeof(ident->device_id)) || - !memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id)))) + mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id)))) q->hook(intel_dp); } } diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index d0d712405129bd..7cc519b402e90f 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -95,7 +95,7 @@ struct intel_sdvo { struct intel_encoder base; struct i2c_adapter *i2c; - u8 slave_addr; + u8 target_addr; struct intel_sdvo_ddc ddc[3]; @@ -255,13 +255,13 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct i2c_msg msgs[] = { { - .addr = intel_sdvo->slave_addr, + .addr = intel_sdvo->target_addr, .flags = 0, .len = 1, .buf = &addr, }, { - .addr = intel_sdvo->slave_addr, + .addr = intel_sdvo->target_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, @@ -483,14 +483,14 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { - msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].addr = intel_sdvo->target_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } - msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].addr = intel_sdvo->target_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; @@ -499,12 +499,12 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; - msgs[i+1].addr = intel_sdvo->slave_addr; + msgs[i+1].addr = intel_sdvo->target_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; - msgs[i+2].addr = intel_sdvo->slave_addr; + msgs[i+2].addr = intel_sdvo->target_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; @@ -2652,9 +2652,9 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) else pin = GMBUS_PIN_DPB; - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, slave addr 0x%x\n", + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, target addr 0x%x\n", sdvo->base.base.base.id, sdvo->base.base.name, - pin, sdvo->slave_addr); + pin, sdvo->target_addr); sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); @@ -2680,7 +2680,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo) } static u8 -intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo) +intel_sdvo_get_target_addr(struct intel_sdvo *sdvo) { struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev); const struct sdvo_device_mapping *my_mapping, *other_mapping; @@ -2694,15 +2694,15 @@ intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo) } /* If the BIOS described our SDVO device, take advantage of it. */ - if (my_mapping->slave_addr) - return my_mapping->slave_addr; + if (my_mapping->target_addr) + return my_mapping->target_addr; /* * If the BIOS only described a different SDVO device, use the * address that it isn't using. */ - if (other_mapping->slave_addr) { - if (other_mapping->slave_addr == 0x70) + if (other_mapping->target_addr) { + if (other_mapping->target_addr == 0x70) return 0x72; else return 0x70; @@ -2919,6 +2919,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type) static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type) { + struct intel_display *display = to_intel_display(&intel_sdvo->base); struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_i915_private *i915 = to_i915(encoder->dev); struct drm_connector *connector; @@ -2946,7 +2947,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type) if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; - intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL); + intel_bios_init_panel_late(display, &intel_connector->panel, NULL, NULL); /* * Fetch modes from VBT. For SDVO prefer the VBT mode since some @@ -3405,7 +3406,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, "SDVO %c", port_name(port)); intel_sdvo->sdvo_reg = sdvo_reg; - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(intel_sdvo) >> 1; + intel_sdvo->target_addr = intel_sdvo_get_target_addr(intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(intel_sdvo); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index f8cceb3e5d8e51..e657b09ede999b 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -48,9 +48,9 @@ #include "intel_sprite.h" #include "intel_sprite_regs.h" -static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite) +static char sprite_name(struct intel_display *display, enum pipe pipe, int sprite) { - return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A'; + return pipe * DISPLAY_RUNTIME_INFO(display)->num_sprites[pipe] + sprite + 'A'; } static void i9xx_plane_linear_gamma(u16 gamma[8]) @@ -67,7 +67,7 @@ static void chv_sprite_update_csc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id plane_id = plane->id; /* @@ -100,35 +100,35 @@ chv_sprite_update_csc(const struct intel_plane_state *plane_state) if (!fb->format->is_yuv) return; - intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id), + intel_de_write_fw(display, SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id), + intel_de_write_fw(display, SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id), + intel_de_write_fw(display, SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - intel_de_write_fw(dev_priv, SPCSCC01(plane_id), + intel_de_write_fw(display, SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0])); - intel_de_write_fw(dev_priv, SPCSCC23(plane_id), + intel_de_write_fw(display, SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2])); - intel_de_write_fw(dev_priv, SPCSCC45(plane_id), + intel_de_write_fw(display, SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4])); - intel_de_write_fw(dev_priv, SPCSCC67(plane_id), + intel_de_write_fw(display, SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6])); - intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8])); + intel_de_write_fw(display, SPCSCC8(plane_id), SPCSC_C0(csc[8])); - intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id), + intel_de_write_fw(display, SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0)); - intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id), + intel_de_write_fw(display, SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); - intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id), + intel_de_write_fw(display, SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); - intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id), + intel_de_write_fw(display, SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id), + intel_de_write_fw(display, SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id), + intel_de_write_fw(display, SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } @@ -139,7 +139,7 @@ static void vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -168,9 +168,9 @@ vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) } /* FIXME these register are single buffered :( */ - intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id), + intel_de_write_fw(display, SPCLRC0(pipe, plane_id), SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness)); - intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id), + intel_de_write_fw(display, SPCLRC1(pipe, plane_id), SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } @@ -357,7 +357,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -373,7 +373,7 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) - intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1), + intel_de_write_fw(display, SPGAMC(pipe, plane_id, i - 1), gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } @@ -382,7 +382,7 @@ vlv_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; int crtc_x = plane_state->uapi.dst.x1; @@ -390,11 +390,11 @@ vlv_sprite_update_noarm(struct intel_plane *plane, u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), + intel_de_write_fw(display, SPSTRIDE(pipe, plane_id), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), + intel_de_write_fw(display, SPPOS(pipe, plane_id), SP_POS_Y(crtc_y) | SP_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), + intel_de_write_fw(display, SPSIZE(pipe, plane_id), SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1)); } @@ -403,6 +403,7 @@ vlv_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -420,18 +421,18 @@ vlv_sprite_update_arm(struct intel_plane *plane, chv_sprite_update_csc(plane_state); if (key->flags) { - intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), + intel_de_write_fw(display, SPKEYMINVAL(pipe, plane_id), key->min_value); - intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id), + intel_de_write_fw(display, SPKEYMSK(pipe, plane_id), key->channel_mask); - intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id), + intel_de_write_fw(display, SPKEYMAXVAL(pipe, plane_id), key->max_value); } - intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); + intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); - intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), + intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset); + intel_de_write_fw(display, SPTILEOFF(pipe, plane_id), SP_OFFSET_Y(y) | SP_OFFSET_X(x)); /* @@ -439,8 +440,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl); - intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), + intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl); + intel_de_write_fw(display, SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); vlv_sprite_update_clrc(plane_state); @@ -451,18 +452,19 @@ static void vlv_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; - intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0); + intel_de_write_fw(display, SPCNTR(pipe, plane_id), 0); + intel_de_write_fw(display, SPSURF(pipe, plane_id), 0); } static bool vlv_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; @@ -474,7 +476,7 @@ vlv_sprite_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; + ret = intel_de_read(display, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; *pipe = plane->pipe; @@ -766,7 +768,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; u16 gamma[18]; int i; @@ -778,17 +780,17 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) - intel_de_write_fw(dev_priv, SPRGAMC(pipe, i), + intel_de_write_fw(display, SPRGAMC(pipe, i), gamma[i] << 20 | gamma[i] << 10 | gamma[i]); - intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]); - intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]); - intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]); + intel_de_write_fw(display, SPRGAMC16(pipe, 0), gamma[i]); + intel_de_write_fw(display, SPRGAMC16(pipe, 1), gamma[i]); + intel_de_write_fw(display, SPRGAMC16(pipe, 2), gamma[i]); i++; - intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]); - intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]); - intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]); + intel_de_write_fw(display, SPRGAMC17(pipe, 0), gamma[i]); + intel_de_write_fw(display, SPRGAMC17(pipe, 1), gamma[i]); + intel_de_write_fw(display, SPRGAMC17(pipe, 2), gamma[i]); i++; } @@ -797,6 +799,7 @@ ivb_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; int crtc_x = plane_state->uapi.dst.x1; @@ -812,14 +815,14 @@ ivb_sprite_update_noarm(struct intel_plane *plane, SPRITE_SRC_WIDTH(src_w - 1) | SPRITE_SRC_HEIGHT(src_h - 1); - intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), + intel_de_write_fw(display, SPRSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, SPRPOS(pipe), + intel_de_write_fw(display, SPRPOS(pipe), SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), + intel_de_write_fw(display, SPRSIZE(pipe), SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) - intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); + intel_de_write_fw(display, SPRSCALE(pipe), sprscale); } static void @@ -827,6 +830,7 @@ ivb_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; @@ -840,20 +844,20 @@ ivb_sprite_update_arm(struct intel_plane *plane, linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (key->flags) { - intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); - intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), + intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value); + intel_de_write_fw(display, SPRKEYMSK(pipe), key->channel_mask); - intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value); + intel_de_write_fw(display, SPRKEYMAX(pipe), key->max_value); } /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - intel_de_write_fw(dev_priv, SPROFFSET(pipe), + intel_de_write_fw(display, SPROFFSET(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } else { - intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), + intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset); + intel_de_write_fw(display, SPRTILEOFF(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } @@ -862,8 +866,8 @@ ivb_sprite_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl); - intel_de_write_fw(dev_priv, SPRSURF(pipe), + intel_de_write_fw(display, SPRCTL(pipe), sprctl); + intel_de_write_fw(display, SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); ivb_sprite_update_gamma(plane_state); @@ -873,20 +877,22 @@ static void ivb_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - intel_de_write_fw(dev_priv, SPRCTL(pipe), 0); + intel_de_write_fw(display, SPRCTL(pipe), 0); /* Disable the scaler */ if (IS_IVYBRIDGE(dev_priv)) - intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0); - intel_de_write_fw(dev_priv, SPRSURF(pipe), 0); + intel_de_write_fw(display, SPRSCALE(pipe), 0); + intel_de_write_fw(display, SPRSURF(pipe), 0); } static bool ivb_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -897,7 +903,7 @@ ivb_sprite_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE; + ret = intel_de_read(display, SPRCTL(plane->pipe)) & SPRITE_ENABLE; *pipe = plane->pipe; @@ -1073,7 +1079,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[8]; @@ -1088,7 +1094,7 @@ static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) - intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1), + intel_de_write_fw(display, DVSGAMC_G4X(pipe, i - 1), gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } @@ -1103,7 +1109,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17]) static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[17]; @@ -1117,12 +1123,12 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) - intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i), + intel_de_write_fw(display, DVSGAMC_ILK(pipe, i), gamma[i] << 20 | gamma[i] << 10 | gamma[i]); - intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]); - intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]); - intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]); + intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 0), gamma[i]); + intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 1), gamma[i]); + intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 2), gamma[i]); i++; } @@ -1131,7 +1137,7 @@ g4x_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; @@ -1146,13 +1152,13 @@ g4x_sprite_update_noarm(struct intel_plane *plane, DVS_SRC_WIDTH(src_w - 1) | DVS_SRC_HEIGHT(src_h - 1); - intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), + intel_de_write_fw(display, DVSSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, DVSPOS(pipe), + intel_de_write_fw(display, DVSPOS(pipe), DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), + intel_de_write_fw(display, DVSSIZE(pipe), DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1)); - intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); + intel_de_write_fw(display, DVSSCALE(pipe), dvsscale); } static void @@ -1160,6 +1166,7 @@ g4x_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; @@ -1173,14 +1180,14 @@ g4x_sprite_update_arm(struct intel_plane *plane, linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (key->flags) { - intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); - intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), + intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value); + intel_de_write_fw(display, DVSKEYMSK(pipe), key->channel_mask); - intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value); + intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value); } - intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), + intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset); + intel_de_write_fw(display, DVSTILEOFF(pipe), DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); /* @@ -1188,8 +1195,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr); - intel_de_write_fw(dev_priv, DVSSURF(pipe), + intel_de_write_fw(display, DVSCNTR(pipe), dvscntr); + intel_de_write_fw(display, DVSSURF(pipe), intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) @@ -1202,19 +1209,20 @@ static void g4x_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; - intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0); + intel_de_write_fw(display, DVSCNTR(pipe), 0); /* Disable the scaler */ - intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0); - intel_de_write_fw(dev_priv, DVSSURF(pipe), 0); + intel_de_write_fw(display, DVSSCALE(pipe), 0); + intel_de_write_fw(display, DVSSURF(pipe), 0); } static bool g4x_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -1225,7 +1233,7 @@ g4x_sprite_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE; + ret = intel_de_read(display, DVSCNTR(plane->pipe)) & DVS_ENABLE; *pipe = plane->pipe; @@ -1255,7 +1263,7 @@ static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_rect *src = &plane_state->uapi.src; const struct drm_rect *dst = &plane_state->uapi.dst; @@ -1281,7 +1289,8 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (src_h & 1) { - drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n"); + drm_dbg_kms(display->drm, + "Source height must be even with interlaced modes\n"); return -EINVAL; } min_height = 6; @@ -1293,19 +1302,22 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (src_w < min_width || src_h < min_height || src_w > 2048 || src_h > 2048) { - drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", + drm_dbg_kms(display->drm, + "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", src_w, src_h, min_width, min_height, 2048, 2048); return -EINVAL; } if (width_bytes > 4096) { - drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n", + drm_dbg_kms(display->drm, + "Fetch width (%d) exceeds hardware max with scaling (%u)\n", width_bytes, 4096); return -EINVAL; } if (stride > 4096) { - drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n", + drm_dbg_kms(display->drm, + "Stride (%u) exceeds hardware max with scaling (%u)\n", stride, 4096); return -EINVAL; } @@ -1317,6 +1329,7 @@ static int g4x_sprite_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int min_scale = DRM_PLANE_NO_SCALING; @@ -1324,7 +1337,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, int ret; if (g4x_fb_scalable(plane_state->hw.fb)) { - if (DISPLAY_VER(dev_priv) < 7) { + if (DISPLAY_VER(display) < 7) { min_scale = 1; max_scale = 16 << 16; } else if (IS_IVYBRIDGE(dev_priv)) { @@ -1353,7 +1366,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (DISPLAY_VER(dev_priv) >= 7) + if (DISPLAY_VER(display) >= 7) plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state); else plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state); @@ -1364,6 +1377,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, int chv_plane_check_rotation(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); unsigned int rotation = plane_state->hw.rotation; @@ -1371,7 +1385,7 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state) if (IS_CHERRYVIEW(dev_priv) && rotation & DRM_MODE_ROTATE_180 && rotation & DRM_MODE_REFLECT_X) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Cannot rotate and reflect at the same time\n"); return -EINVAL; } @@ -1573,6 +1587,7 @@ struct intel_plane * intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int sprite) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; @@ -1604,7 +1619,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, } plane_funcs = &vlv_sprite_funcs; - } else if (DISPLAY_VER(dev_priv) >= 7) { + } else if (DISPLAY_VER(display) >= 7) { plane->update_noarm = ivb_sprite_update_noarm; plane->update_arm = ivb_sprite_update_arm; plane->disable_arm = ivb_sprite_disable_arm; @@ -1663,11 +1678,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + ret = drm_universal_plane_init(display->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, - "sprite %c", sprite_name(dev_priv, pipe, sprite)); + "sprite %c", sprite_name(display, pipe, sprite)); kfree(modifiers); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 9887967b2ca5c5..6f2ee7dbc43b35 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -393,6 +393,9 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; u32 val; + if (DISPLAY_VER(i915) >= 14) + return; + drm_WARN_ON(&i915->drm, lane_reversal && tc->mode != TC_PORT_LEGACY); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 9df0f12639135e..581844d1db9ae5 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -914,8 +914,8 @@ static struct intel_tv *intel_attached_tv(struct intel_connector *connector) static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 tmp = intel_de_read(dev_priv, TV_CTL); + struct intel_display *display = to_intel_display(encoder); + u32 tmp = intel_de_read(display, TV_CTL); *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT; @@ -928,13 +928,12 @@ intel_enable_tv(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(state); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); - intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE); + intel_de_rmw(display, TV_CTL, 0, TV_ENC_ENABLE); } static void @@ -943,10 +942,9 @@ intel_disable_tv(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(state); - intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0); + intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0); } static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) @@ -960,9 +958,10 @@ static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *i915 = to_i915(connector->dev); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); - int max_dotclk = i915->display.cdclk.max_dotclk_freq; + int max_dotclk = display->cdclk.max_dotclk_freq; enum drm_mode_status status; status = intel_cpu_transcoder_mode_valid(i915, mode); @@ -1092,6 +1091,7 @@ static void intel_tv_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1104,11 +1104,11 @@ intel_tv_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT); - tv_ctl = intel_de_read(dev_priv, TV_CTL); - hctl1 = intel_de_read(dev_priv, TV_H_CTL_1); - hctl3 = intel_de_read(dev_priv, TV_H_CTL_3); - vctl1 = intel_de_read(dev_priv, TV_V_CTL_1); - vctl2 = intel_de_read(dev_priv, TV_V_CTL_2); + tv_ctl = intel_de_read(display, TV_CTL); + hctl1 = intel_de_read(display, TV_H_CTL_1); + hctl3 = intel_de_read(display, TV_H_CTL_3); + vctl1 = intel_de_read(display, TV_V_CTL_1); + vctl2 = intel_de_read(display, TV_V_CTL_2); tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT; tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT; @@ -1143,17 +1143,17 @@ intel_tv_get_config(struct intel_encoder *encoder, break; } - tmp = intel_de_read(dev_priv, TV_WIN_POS); + tmp = intel_de_read(display, TV_WIN_POS); xpos = tmp >> 16; ypos = tmp & 0xffff; - tmp = intel_de_read(dev_priv, TV_WIN_SIZE); + tmp = intel_de_read(display, TV_WIN_SIZE); xsize = tmp >> 16; ysize = tmp & 0xffff; intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock); - drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", + drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&mode)); intel_tv_scale_mode_horiz(&mode, hdisplay, @@ -1171,10 +1171,10 @@ intel_tv_get_config(struct intel_encoder *encoder, I915_MODE_FLAG_USE_SCANLINE_COUNTER; } -static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv, +static bool intel_tv_source_too_wide(struct intel_display *display, int hdisplay) { - return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024; + return DISPLAY_VER(display) == 3 && hdisplay > 1024; } static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode, @@ -1192,6 +1192,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_atomic_state *state = to_intel_atomic_state(pipe_config->uapi.state); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); @@ -1214,7 +1215,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n"); + drm_dbg_kms(display->drm, "forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; pipe_config->port_clock = tv_mode->clock; @@ -1228,14 +1229,14 @@ intel_tv_compute_config(struct intel_encoder *encoder, intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock); drm_mode_set_crtcinfo(adjusted_mode, 0); - if (intel_tv_source_too_wide(dev_priv, hdisplay) || + if (intel_tv_source_too_wide(display, hdisplay) || !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) { int extra, top, bottom; extra = adjusted_mode->crtc_vdisplay - vdisplay; if (extra < 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "No vertical scaling for >1024 pixel wide modes\n"); return -EINVAL; } @@ -1269,7 +1270,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, tv_conn_state->bypass_vfilter = false; } - drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", + drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(adjusted_mode)); /* @@ -1355,7 +1356,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, } static void -set_tv_mode_timings(struct drm_i915_private *dev_priv, +set_tv_mode_timings(struct intel_display *display, const struct tv_mode *tv_mode, bool burst_ena) { @@ -1401,32 +1402,32 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv, vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); - intel_de_write(dev_priv, TV_H_CTL_1, hctl1); - intel_de_write(dev_priv, TV_H_CTL_2, hctl2); - intel_de_write(dev_priv, TV_H_CTL_3, hctl3); - intel_de_write(dev_priv, TV_V_CTL_1, vctl1); - intel_de_write(dev_priv, TV_V_CTL_2, vctl2); - intel_de_write(dev_priv, TV_V_CTL_3, vctl3); - intel_de_write(dev_priv, TV_V_CTL_4, vctl4); - intel_de_write(dev_priv, TV_V_CTL_5, vctl5); - intel_de_write(dev_priv, TV_V_CTL_6, vctl6); - intel_de_write(dev_priv, TV_V_CTL_7, vctl7); + intel_de_write(display, TV_H_CTL_1, hctl1); + intel_de_write(display, TV_H_CTL_2, hctl2); + intel_de_write(display, TV_H_CTL_3, hctl3); + intel_de_write(display, TV_V_CTL_1, vctl1); + intel_de_write(display, TV_V_CTL_2, vctl2); + intel_de_write(display, TV_V_CTL_3, vctl3); + intel_de_write(display, TV_V_CTL_4, vctl4); + intel_de_write(display, TV_V_CTL_5, vctl5); + intel_de_write(display, TV_V_CTL_6, vctl6); + intel_de_write(display, TV_V_CTL_7, vctl7); } -static void set_color_conversion(struct drm_i915_private *dev_priv, +static void set_color_conversion(struct intel_display *display, const struct color_conversion *color_conversion) { - intel_de_write(dev_priv, TV_CSC_Y, + intel_de_write(display, TV_CSC_Y, (color_conversion->ry << 16) | color_conversion->gy); - intel_de_write(dev_priv, TV_CSC_Y2, + intel_de_write(display, TV_CSC_Y2, (color_conversion->by << 16) | color_conversion->ay); - intel_de_write(dev_priv, TV_CSC_U, + intel_de_write(display, TV_CSC_U, (color_conversion->ru << 16) | color_conversion->gu); - intel_de_write(dev_priv, TV_CSC_U2, + intel_de_write(display, TV_CSC_U2, (color_conversion->bu << 16) | color_conversion->au); - intel_de_write(dev_priv, TV_CSC_V, + intel_de_write(display, TV_CSC_V, (color_conversion->rv << 16) | color_conversion->gv); - intel_de_write(dev_priv, TV_CSC_V2, + intel_de_write(display, TV_CSC_V2, (color_conversion->bv << 16) | color_conversion->av); } @@ -1435,6 +1436,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); @@ -1450,7 +1452,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, int xpos, ypos; unsigned int xsize, ysize; - tv_ctl = intel_de_read(dev_priv, TV_CTL); + tv_ctl = intel_de_read(display, TV_CTL); tv_ctl &= TV_CTL_SAVE; switch (intel_tv->type) { @@ -1525,21 +1527,21 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, if (IS_I915GM(dev_priv)) tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; - set_tv_mode_timings(dev_priv, tv_mode, burst_ena); + set_tv_mode_timings(display, tv_mode, burst_ena); - intel_de_write(dev_priv, TV_SC_CTL_1, scctl1); - intel_de_write(dev_priv, TV_SC_CTL_2, scctl2); - intel_de_write(dev_priv, TV_SC_CTL_3, scctl3); + intel_de_write(display, TV_SC_CTL_1, scctl1); + intel_de_write(display, TV_SC_CTL_2, scctl2); + intel_de_write(display, TV_SC_CTL_3, scctl3); - set_color_conversion(dev_priv, color_conversion); + set_color_conversion(display, color_conversion); - if (DISPLAY_VER(dev_priv) >= 4) - intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000); + if (DISPLAY_VER(display) >= 4) + intel_de_write(display, TV_CLR_KNOBS, 0x00404000); else - intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000); + intel_de_write(display, TV_CLR_KNOBS, 0x00606000); if (video_levels) - intel_de_write(dev_priv, TV_CLR_LEVEL, + intel_de_write(display, TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder); @@ -1548,7 +1550,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, tv_filter_ctl = TV_AUTO_SCALE; if (tv_conn_state->bypass_vfilter) tv_filter_ctl |= TV_V_FILTER_BYPASS; - intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl); + intel_de_write(display, TV_FILTER_CTL_1, tv_filter_ctl); xsize = tv_mode->hblank_start - tv_mode->hblank_end; ysize = intel_tv_mode_vdisplay(tv_mode); @@ -1559,31 +1561,32 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, conn_state->tv.margins.right); ysize -= (tv_conn_state->margins.top + tv_conn_state->margins.bottom); - intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos); - intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize); + intel_de_write(display, TV_WIN_POS, (xpos << 16) | ypos); + intel_de_write(display, TV_WIN_SIZE, (xsize << 16) | ysize); j = 0; for (i = 0; i < 60; i++) - intel_de_write(dev_priv, TV_H_LUMA(i), + intel_de_write(display, TV_H_LUMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 60; i++) - intel_de_write(dev_priv, TV_H_CHROMA(i), + intel_de_write(display, TV_H_CHROMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) - intel_de_write(dev_priv, TV_V_LUMA(i), + intel_de_write(display, TV_V_LUMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) - intel_de_write(dev_priv, TV_V_CHROMA(i), + intel_de_write(display, TV_V_CHROMA(i), tv_mode->filter_table[j++]); - intel_de_write(dev_priv, TV_DAC, - intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE); - intel_de_write(dev_priv, TV_CTL, tv_ctl); + intel_de_write(display, TV_DAC, + intel_de_read(display, TV_DAC) & TV_DAC_SAVE); + intel_de_write(display, TV_CTL, tv_ctl); } static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1600,8 +1603,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv, spin_unlock_irq(&dev_priv->irq_lock); } - save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC); - save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL); + save_tv_dac = tv_dac = intel_de_read(display, TV_DAC); + save_tv_ctl = tv_ctl = intel_de_read(display, TV_CTL); /* Poll for TV detection */ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK); @@ -1627,15 +1630,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv, tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); - intel_de_write(dev_priv, TV_CTL, tv_ctl); - intel_de_write(dev_priv, TV_DAC, tv_dac); - intel_de_posting_read(dev_priv, TV_DAC); + intel_de_write(display, TV_CTL, tv_ctl); + intel_de_write(display, TV_DAC, tv_dac); + intel_de_posting_read(display, TV_DAC); intel_crtc_wait_for_next_vblank(crtc); type = -1; - tv_dac = intel_de_read(dev_priv, TV_DAC); - drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac); + tv_dac = intel_de_read(display, TV_DAC); + drm_dbg_kms(display->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac); /* * A B C * 0 1 1 Composite @@ -1643,25 +1646,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv, * 0 0 0 Component */ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Detected Composite TV connection\n"); type = DRM_MODE_CONNECTOR_Composite; } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Detected S-Video TV connection\n"); type = DRM_MODE_CONNECTOR_SVIDEO; } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Detected Component TV connection\n"); type = DRM_MODE_CONNECTOR_Component; } else { - drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n"); + drm_dbg_kms(display->drm, "Unrecognised TV connection\n"); type = -1; } - intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); - intel_de_write(dev_priv, TV_CTL, save_tv_ctl); - intel_de_posting_read(dev_priv, TV_CTL); + intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + intel_de_write(display, TV_CTL, save_tv_ctl); + intel_de_posting_read(display, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ intel_crtc_wait_for_next_vblank(crtc); @@ -1711,12 +1714,13 @@ intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); enum drm_connector_status status; int type; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); if (!intel_display_device_enabled(i915)) @@ -1791,7 +1795,7 @@ intel_tv_set_mode_type(struct drm_display_mode *mode, static int intel_tv_get_modes(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i, count = 0; @@ -1805,7 +1809,7 @@ intel_tv_get_modes(struct drm_connector *connector) continue; /* no vertical scaling with wide sources on gen3 */ - if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 && + if (DISPLAY_VER(display) == 3 && input->w > 1024 && input->h > intel_tv_mode_vdisplay(tv_mode)) continue; @@ -1822,7 +1826,8 @@ intel_tv_get_modes(struct drm_connector *connector) */ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock); if (count == 0) { - drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", + drm_dbg_kms(display->drm, + "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } intel_tv_scale_mode_horiz(mode, input->w, 0, 0); @@ -1887,7 +1892,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { static void intel_tv_add_properties(struct drm_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); struct drm_connector_state *conn_state = connector->state; const char *tv_format_names[ARRAY_SIZE(tv_modes)]; int i; @@ -1903,32 +1908,32 @@ static void intel_tv_add_properties(struct drm_connector *connector) /* Create TV properties then attach current values */ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { /* 1080p50/1080p60 not supported on gen3 */ - if (DISPLAY_VER(i915) == 3 && tv_modes[i].oversample == 1) + if (DISPLAY_VER(display) == 3 && tv_modes[i].oversample == 1) break; tv_format_names[i] = tv_modes[i].name; } - drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names); + drm_mode_create_tv_properties_legacy(display->drm, i, tv_format_names); drm_object_attach_property(&connector->base, - i915->drm.mode_config.legacy_tv_mode_property, + display->drm->mode_config.legacy_tv_mode_property, conn_state->tv.legacy_mode); drm_object_attach_property(&connector->base, - i915->drm.mode_config.tv_left_margin_property, + display->drm->mode_config.tv_left_margin_property, conn_state->tv.margins.left); drm_object_attach_property(&connector->base, - i915->drm.mode_config.tv_top_margin_property, + display->drm->mode_config.tv_top_margin_property, conn_state->tv.margins.top); drm_object_attach_property(&connector->base, - i915->drm.mode_config.tv_right_margin_property, + display->drm->mode_config.tv_right_margin_property, conn_state->tv.margins.right); drm_object_attach_property(&connector->base, - i915->drm.mode_config.tv_bottom_margin_property, + display->drm->mode_config.tv_bottom_margin_property, conn_state->tv.margins.bottom); } void -intel_tv_init(struct drm_i915_private *dev_priv) +intel_tv_init(struct intel_display *display) { struct drm_connector *connector; struct intel_tv *intel_tv; @@ -1936,11 +1941,11 @@ intel_tv_init(struct drm_i915_private *dev_priv) struct intel_connector *intel_connector; u32 tv_dac_on, tv_dac_off, save_tv_dac; - if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) + if ((intel_de_read(display, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) return; - if (!intel_bios_is_tv_present(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n"); + if (!intel_bios_is_tv_present(display)) { + drm_dbg_kms(display->drm, "Integrated TV is not present.\n"); return; } @@ -1948,15 +1953,15 @@ intel_tv_init(struct drm_i915_private *dev_priv) * Sanity check the TV output by checking to see if the * DAC register holds a value */ - save_tv_dac = intel_de_read(dev_priv, TV_DAC); + save_tv_dac = intel_de_read(display, TV_DAC); - intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); - tv_dac_on = intel_de_read(dev_priv, TV_DAC); + intel_de_write(display, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); + tv_dac_on = intel_de_read(display, TV_DAC); - intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); - tv_dac_off = intel_de_read(dev_priv, TV_DAC); + intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + tv_dac_off = intel_de_read(display, TV_DAC); - intel_de_write(dev_priv, TV_DAC, save_tv_dac); + intel_de_write(display, TV_DAC, save_tv_dac); /* * If the register does not hold the state change enable @@ -1994,10 +1999,11 @@ intel_tv_init(struct drm_i915_private *dev_priv) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; intel_connector->base.polled = intel_connector->polled; - drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs, + drm_connector_init(display->drm, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); - drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_tv_enc_funcs, + drm_encoder_init(display->drm, &intel_encoder->base, + &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC, "TV"); intel_encoder->compute_config = intel_tv_compute_config; diff --git a/drivers/gpu/drm/i915/display/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h index f08827b8bf2bea..0f280f69e73c93 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.h +++ b/drivers/gpu/drm/i915/display/intel_tv.h @@ -6,12 +6,12 @@ #ifndef __INTEL_TV_H__ #define __INTEL_TV_H__ -struct drm_i915_private; +struct intel_display; #ifdef I915 -void intel_tv_init(struct drm_i915_private *dev_priv); +void intel_tv_init(struct intel_display *display); #else -static inline void intel_tv_init(struct drm_i915_private *dev_priv) +static inline void intel_tv_init(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 5b065e1cd4e44c..0b7f2134e44131 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -67,8 +67,8 @@ */ u32 i915_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; + struct intel_display *display = to_intel_display(crtc->dev); + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); const struct drm_display_mode *mode = &vblank->hwmode; enum pipe pipe = to_intel_crtc(crtc)->pipe; u32 pixel, vbl_start, hsync_start, htotal; @@ -103,8 +103,8 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc) * we get a low value that's stable across two reads of the high * register. */ - frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe), - PIPEFRAME(dev_priv, pipe)); + frame = intel_de_read64_2x32(display, PIPEFRAMEPIXEL(display, pipe), + PIPEFRAME(display, pipe)); pixel = frame & PIPE_PIXEL_MASK; frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff; @@ -119,19 +119,19 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc) u32 g4x_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; + struct intel_display *display = to_intel_display(crtc->dev); + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); enum pipe pipe = to_intel_crtc(crtc)->pipe; if (!vblank->max_vblank_count) return 0; - return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(dev_priv, pipe)); + return intel_de_read(display, PIPE_FRMCOUNT_G4X(display, pipe)); } static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); const struct drm_display_mode *mode = &vblank->hwmode; u32 htotal = mode->crtc_htotal; @@ -150,16 +150,16 @@ static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) * pipe frame time stamp. The time stamp value * is sampled at every start of vertical blank. */ - scan_prev_time = intel_de_read_fw(dev_priv, + scan_prev_time = intel_de_read_fw(display, PIPE_FRMTMSTMP(crtc->pipe)); /* * The TIMESTAMP_CTR register has the current * time stamp value. */ - scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); + scan_curr_time = intel_de_read_fw(display, IVB_TIMESTAMP_CTR); - scan_post_time = intel_de_read_fw(dev_priv, + scan_post_time = intel_de_read_fw(display, PIPE_FRMTMSTMP(crtc->pipe)); } while (scan_post_time != scan_prev_time); @@ -190,8 +190,9 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) return scanline; } -static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) +int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* @@ -220,7 +221,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) * However if queried just before the start of vblank we'll get an * answer that's slightly in the future. */ - if (DISPLAY_VER(i915) == 2) + if (DISPLAY_VER(display) == 2) return -1; else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return 2; @@ -234,8 +235,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) */ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); const struct drm_display_mode *mode = &vblank->hwmode; enum pipe pipe = crtc->pipe; @@ -249,7 +249,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) vtotal = intel_mode_vtotal(mode); - position = intel_de_read_fw(dev_priv, PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK; + position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we @@ -263,13 +263,13 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) * problem. We may need to extend this to include other platforms, * but so far testing only shows the problem on HSW. */ - if (HAS_DDI(dev_priv) && !position) { + if (HAS_DDI(display) && !position) { int i, temp; for (i = 0; i < 100; i++) { udelay(1); - temp = intel_de_read_fw(dev_priv, - PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK; + temp = intel_de_read_fw(display, + PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK; if (temp != position) { position = temp; break; @@ -284,15 +284,6 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + vtotal + crtc->scanline_offset) % vtotal; } -int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline) -{ - const struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base); - const struct drm_display_mode *mode = &vblank->hwmode; - int vtotal = intel_mode_vtotal(mode); - - return (scanline + vtotal - crtc->scanline_offset) % vtotal; -} - /* * The uncore version of the spin lock functions is used to decide * whether we need to lock the uncore lock or not. This is only @@ -303,41 +294,49 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline) * all register accesses to the same cacheline to be serialized, * otherwise they may hang. */ -static void intel_vblank_section_enter(struct drm_i915_private *i915) +#ifdef I915 +static void intel_vblank_section_enter(struct intel_display *display) __acquires(i915->uncore.lock) { -#ifdef I915 + struct drm_i915_private *i915 = to_i915(display->drm); spin_lock(&i915->uncore.lock); -#endif } -static void intel_vblank_section_exit(struct drm_i915_private *i915) +static void intel_vblank_section_exit(struct intel_display *display) __releases(i915->uncore.lock) { -#ifdef I915 + struct drm_i915_private *i915 = to_i915(display->drm); spin_unlock(&i915->uncore.lock); -#endif +} +#else +static void intel_vblank_section_enter(struct intel_display *display) +{ } +static void intel_vblank_section_exit(struct intel_display *display) +{ +} +#endif + static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { - struct drm_device *dev = _crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(_crtc->dev); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc *crtc = to_intel_crtc(_crtc); enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; - bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 || - IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 || + bool use_scanline_counter = DISPLAY_VER(display) >= 5 || + IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 || crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; - if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { - drm_dbg(&dev_priv->drm, + if (drm_WARN_ON(display->drm, !mode->crtc_clock)) { + drm_dbg(display->drm, "trying to get scanoutpos for disabled pipe %c\n", pipe_name(pipe)); return false; @@ -355,7 +354,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, * preemption disabled, so the following code must not block. */ local_irq_save(irqflags); - intel_vblank_section_enter(dev_priv); + intel_vblank_section_enter(display); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -387,7 +386,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, * We can split this into vertical and horizontal * scanout position. */ - position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + position = (intel_de_read_fw(display, PIPEFRAMEPIXEL(display, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; /* convert to pixel counts */ vbl_start *= htotal; @@ -423,7 +422,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ - intel_vblank_section_exit(dev_priv); + intel_vblank_section_exit(display); local_irq_restore(irqflags); /* @@ -458,42 +457,42 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, int intel_get_crtc_scanline(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); unsigned long irqflags; int position; local_irq_save(irqflags); - intel_vblank_section_enter(dev_priv); + intel_vblank_section_enter(display); position = __intel_get_crtc_scanline(crtc); - intel_vblank_section_exit(dev_priv); + intel_vblank_section_exit(display); local_irq_restore(irqflags); return position; } -static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, +static bool pipe_scanline_is_moving(struct intel_display *display, enum pipe pipe) { - i915_reg_t reg = PIPEDSL(dev_priv, pipe); + i915_reg_t reg = PIPEDSL(display, pipe); u32 line1, line2; - line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; + line1 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK; msleep(5); - line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; + line2 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK; return line1 != line2; } static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; /* Wait for the display line to settle/start moving */ - if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) - drm_err(&dev_priv->drm, + if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100)) + drm_err(display->drm, "pipe %c scanline %s wait timed out\n", pipe_name(pipe), str_on_off(state)); } @@ -511,8 +510,8 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, bool vrr_enable) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); u8 mode_flags = crtc_state->mode_flags; struct drm_display_mode adjusted_mode; int vmax_vblank_start = 0; @@ -521,7 +520,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); if (vrr_enable) { - drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0); + drm_WARN_ON(display->drm, + (mode_flags & I915_MODE_FLAG_VRR) == 0); adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; @@ -543,8 +543,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, * __intel_get_crtc_scanline()) with vblank_time_lock? * Need to audit everything to make sure it's safe. */ - spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags); - intel_vblank_section_enter(i915); + spin_lock_irqsave(&display->drm->vblank_time_lock, irqflags); + intel_vblank_section_enter(display); drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); @@ -553,8 +553,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, crtc->mode_flags = mode_flags; crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); - intel_vblank_section_exit(i915); - spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); + intel_vblank_section_exit(display); + spin_unlock_irqrestore(&display->drm->vblank_time_lock, irqflags); } int intel_mode_vdisplay(const struct drm_display_mode *mode) @@ -652,14 +652,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, */ if (intel_color_uses_dsb(new_crtc_state) || new_crtc_state->update_m_n || new_crtc_state->update_lrr) - evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; + evade->min -= intel_mode_vblank_start(adjusted_mode) - + intel_mode_vdisplay(adjusted_mode); } /* must be called with vblank interrupt already enabled! */ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade) { struct intel_crtc *crtc = evade->crtc; - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); long timeout = msecs_to_jiffies_timeout(1); wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); DEFINE_WAIT(wait); @@ -681,7 +682,7 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade) break; if (!timeout) { - drm_err(&i915->drm, + drm_err(display->drm, "Potential atomic update failure on pipe %c\n", pipe_name(crtc->pipe)); break; diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h index 7e526f6861e450..6d733625698261 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.h +++ b/drivers/gpu/drm/i915/display/intel_vblank.h @@ -40,6 +40,6 @@ void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc); void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc); void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, bool vrr_enable); -int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline); +int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VBLANK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 1af8407e2081d2..42022756bbd59e 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -493,7 +493,7 @@ struct child_device_config { u16 addin_offset; u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */ u8 i2c_pin; - u8 slave_addr; + u8 target_addr; u8 ddc_pin; u16 edid_ptr; u8 dvo_cfg; /* See DEVICE_CFG_* above */ @@ -502,7 +502,7 @@ struct child_device_config { struct { u8 dvo2_port; u8 i2c2_pin; - u8 slave2_addr; + u8 target2_addr; u8 ddc2_pin; } __packed; struct { @@ -1080,6 +1080,8 @@ struct bdb_edp { u16 edp_fast_link_training_rate[16]; /* 224+ */ u16 edp_max_port_link_rate[16]; /* 244+ */ u16 edp_dsc_disable; /* 251+ */ + u16 t6_delay_support; /* 260+ */ + u16 link_idle_time[16]; /* 260+ */ } __packed; /* @@ -1321,7 +1323,7 @@ struct als_data_entry { } __packed; struct aggressiveness_profile_entry { - u8 dpst_aggressiveness : 4; + u8 dpst_aggressiveness : 4; /* (228/252)-256 */ u8 lace_aggressiveness : 4; } __packed; @@ -1330,12 +1332,27 @@ struct aggressiveness_profile2_entry { u8 elp_aggressiveness : 4; } __packed; +struct aggressiveness_profile3_entry { + u8 apd_aggressiveness:4; + u8 pixoptix_aggressiveness:4; +} __packed; + +struct aggressiveness_profile4_entry { + u8 xpst_aggressiveness:4; + u8 tcon_aggressiveness:4; +} __packed; + +struct panel_identification { + u8 panel_technology:4; + u8 reserved:4; +} __packed; + struct bdb_lfp_power { struct lfp_power_features features; /* ???-227 */ struct als_data_entry als[5]; u8 lace_aggressiveness_profile:3; /* 210-227 */ u8 reserved1:5; - u16 dpst; /* 228+ */ + u16 dpst; /* 228-256 */ u16 psr; /* 228+ */ u16 drrs; /* 228+ */ u16 lace_support; /* 228+ */ @@ -1343,12 +1360,20 @@ struct bdb_lfp_power { u16 dmrrs; /* 228+ */ u16 adb; /* 228+ */ u16 lace_enabled_status; /* 228+ */ - struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */ + struct aggressiveness_profile_entry aggressiveness[16]; u16 hobl; /* 232+ */ u16 vrr_feature_enabled; /* 233+ */ - u16 elp; /* 247+ */ - u16 opst; /* 247+ */ - struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */ + u16 elp; /* 247-256 */ + u16 opst; /* 247-256 */ + struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247-256 */ + u16 apd; /* 253-256 */ + u16 pixoptix; /* 253-256 */ + struct aggressiveness_profile3_entry aggressiveness3[16]; /* 253-256 */ + struct panel_identification panel_identification[16]; /* 257+ */ + u16 xpst_support; /* 257+ */ + u16 tcon_based_backlight_optimization; /* 257+ */ + struct aggressiveness_profile4_entry aggressiveness4[16]; /* 257+ */ + u16 tcon_backlight_xpst_coexistence; /* 257+ */ } __packed; /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index b9687b7692b8b4..2e849b015e748b 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -8,6 +8,7 @@ #include #include +#include #include "i915_drv.h" #include "intel_crtc.h" @@ -76,7 +77,7 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf, static void calculate_rc_params(struct drm_dsc_config *vdsc_cfg) { - int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel); + int bpp = fxp_q4_to_int(vdsc_cfg->bits_per_pixel); int bpc = vdsc_cfg->bits_per_component; int qp_bpc_modifier = (bpc - 8) * 2; int uncompressed_bpg_rate; @@ -184,7 +185,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) } } else { /* fractional bpp part * 10000 (for precision up to 4 decimal places) */ - int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel); + int fractional_bits = fxp_q4_to_frac(vdsc_cfg->bits_per_pixel); static const s8 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 @@ -263,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; - u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); + u16 compressed_bpp = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16); int err; int ret; @@ -456,36 +457,30 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val |= DSC_PPS0_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_PPS0_VBR_ENABLE; - drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 0, pps_val); /* PPS 1 */ pps_val = DSC_PPS1_BPP(vdsc_cfg->bits_per_pixel); - drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 1, pps_val); /* PPS 2 */ pps_val = DSC_PPS2_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PPS2_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); - drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 2, pps_val); /* PPS 3 */ pps_val = DSC_PPS3_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_PPS3_SLICE_WIDTH(vdsc_cfg->slice_width); - drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 3, pps_val); /* PPS 4 */ pps_val = DSC_PPS4_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_PPS4_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); - drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 4, pps_val); /* PPS 5 */ pps_val = DSC_PPS5_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_PPS5_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); - drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 5, pps_val); /* PPS 6 */ @@ -493,25 +488,21 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_PPS6_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_PPS6_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_PPS6_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); - drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 6, pps_val); /* PPS 7 */ pps_val = DSC_PPS7_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_PPS7_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); - drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 7, pps_val); /* PPS 8 */ pps_val = DSC_PPS8_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_PPS8_INITIAL_OFFSET(vdsc_cfg->initial_offset); - drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 8, pps_val); /* PPS 9 */ pps_val = DSC_PPS9_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_PPS9_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); - drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 9, pps_val); /* PPS 10 */ @@ -519,7 +510,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_PPS10_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_PPS10_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_PPS10_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); - drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 10, pps_val); /* PPS 16 */ @@ -528,31 +518,25 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) vdsc_cfg->slice_width) | DSC_PPS16_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); - drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 16, pps_val); if (DISPLAY_VER(dev_priv) >= 14) { /* PPS 17 */ pps_val = DSC_PPS17_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset); - drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 17, pps_val); /* PPS 18 */ pps_val = DSC_PPS18_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) | DSC_PPS18_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj); - drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val); intel_dsc_pps_write(crtc_state, 18, pps_val); } /* Populate the RC_BUF_THRESH registers */ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword)); - for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { + for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); - drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, - rc_buf_thresh_dword[i / 4]); - } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); @@ -599,7 +583,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) /* Populate the RC_RANGE_PARAMETERS registers */ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword)); - for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { + for (i = 0; i < DSC_NUM_BUF_RANGES; i++) rc_range_params_dword[i / 2] |= (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset << RC_BPG_OFFSET_SHIFT) | @@ -607,9 +591,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); - drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, - rc_range_params_dword[i / 2]); - } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); @@ -989,3 +970,23 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state) out: intel_display_power_put(dev_priv, power_domain, wakeref); } + +static void intel_vdsc_dump_state(struct drm_printer *p, int indent, + const struct intel_crtc_state *crtc_state) +{ + drm_printf_indent(p, indent, + "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n", + FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16), + crtc_state->dsc.slice_count, + str_yes_no(crtc_state->dsc.dsc_split)); +} + +void intel_vdsc_state_dump(struct drm_printer *p, int indent, + const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->dsc.compression_enable) + return; + + intel_vdsc_dump_state(p, indent, crtc_state); + drm_dsc_dump_config(p, indent, &crtc_state->dsc.config); +} diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 2cc41ff0890942..290b2e9b3482e7 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -8,6 +8,8 @@ #include +struct drm_printer; + enum transcoder; struct intel_crtc; struct intel_crtc_state; @@ -27,5 +29,7 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_dsc_dp_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_vdsc_state_dump(struct drm_printer *p, int indent, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VDSC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 5a0da64c7db33e..9a51f5bac3071a 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -17,8 +17,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); const struct drm_display_info *info = &connector->base.display_info; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp; /* @@ -43,7 +43,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector) return false; } - return HAS_VRR(i915) && + return HAS_VRR(display) && info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10; } @@ -89,10 +89,9 @@ intel_vrr_check_modeset(struct intel_atomic_state *state) */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return crtc_state->vrr.guardband; else /* The hw imposes the extra scanline before frame start */ @@ -113,11 +112,11 @@ int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state) static bool is_cmrr_frac_required(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - if (!HAS_CMRR(i915)) + if (!HAS_CMRR(display)) return false; actual_refresh_k = @@ -161,8 +160,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = intel_attached_dp(connector); @@ -186,7 +184,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, if (!crtc_state->vrr.in_range) return; - if (HAS_LRR(i915)) + if (HAS_LRR(display)) crtc_state->update_lrr = true; vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, @@ -233,8 +231,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } - if (intel_dp_as_sdp_supported(intel_dp) && - crtc_state->vrr.enable) { + if (intel_dp->as_sdp_supported && crtc_state->vrr.enable) { crtc_state->vrr.vsync_start = (crtc_state->hw.adjusted_mode.crtc_vtotal - crtc_state->hw.adjusted_mode.vsync_start); @@ -247,7 +244,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, * For XE_LPD+, we use guardband and pipeline override * is deprecated. */ - if (DISPLAY_VER(i915) >= 13) { + if (DISPLAY_VER(display) >= 13) { crtc_state->vrr.guardband = crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; } else { @@ -259,9 +256,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN | XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband); else @@ -272,7 +269,7 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* @@ -280,133 +277,130 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) * TGL: generate VRR "safe window" for DSB vblank waits * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR */ - if (IS_DISPLAY_VER(dev_priv, 12, 13)) - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), + if (IS_DISPLAY_VER(display, 12, 13)) + intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0, PIPE_VBLANK_WITH_DELAY); if (!crtc_state->vrr.flipline) { - intel_de_write(dev_priv, - TRANS_VRR_CTL(dev_priv, cpu_transcoder), 0); + intel_de_write(display, + TRANS_VRR_CTL(display, cpu_transcoder), 0); return; } if (crtc_state->cmrr.enable) { - intel_de_write(dev_priv, TRANS_CMRR_M_HI(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_CMRR_M_HI(display, cpu_transcoder), upper_32_bits(crtc_state->cmrr.cmrr_m)); - intel_de_write(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_CMRR_M_LO(display, cpu_transcoder), lower_32_bits(crtc_state->cmrr.cmrr_m)); - intel_de_write(dev_priv, TRANS_CMRR_N_HI(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_CMRR_N_HI(display, cpu_transcoder), upper_32_bits(crtc_state->cmrr.cmrr_n)); - intel_de_write(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_CMRR_N_LO(display, cpu_transcoder), lower_32_bits(crtc_state->cmrr.cmrr_n)); } - intel_de_write(dev_priv, TRANS_VRR_VMIN(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), crtc_state->vrr.vmin - 1); - intel_de_write(dev_priv, TRANS_VRR_VMAX(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), crtc_state->vrr.vmax - 1); - intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), trans_vrr_ctl(crtc_state)); - intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), crtc_state->vrr.flipline - 1); } void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return; - intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN | TRANS_PUSH_SEND); } bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return false; - return intel_de_read(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder)) & TRANS_PUSH_SEND; + return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND; } void intel_vrr_enable(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return; - intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN); - if (HAS_AS_SDP(dev_priv)) - intel_de_write(dev_priv, - TRANS_VRR_VSYNC(dev_priv, cpu_transcoder), + if (HAS_AS_SDP(display)) + intel_de_write(display, + TRANS_VRR_VSYNC(display, cpu_transcoder), VRR_VSYNC_END(crtc_state->vrr.vsync_end) | VRR_VSYNC_START(crtc_state->vrr.vsync_start)); if (crtc_state->cmrr.enable) { - intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE | trans_vrr_ctl(crtc_state)); } else { - intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); } } void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(old_crtc_state); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; if (!old_crtc_state->vrr.enable) return; - intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), trans_vrr_ctl(old_crtc_state)); - intel_de_wait_for_clear(dev_priv, - TRANS_VRR_STATUS(dev_priv, cpu_transcoder), + intel_de_wait_for_clear(display, + TRANS_VRR_STATUS(display, cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); - intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder), 0); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); - if (HAS_AS_SDP(dev_priv)) - intel_de_write(dev_priv, - TRANS_VRR_VSYNC(dev_priv, cpu_transcoder), 0); + if (HAS_AS_SDP(display)) + intel_de_write(display, + TRANS_VRR_VSYNC(display, cpu_transcoder), 0); } void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 trans_vrr_ctl, trans_vrr_vsync; - trans_vrr_ctl = intel_de_read(dev_priv, - TRANS_VRR_CTL(dev_priv, cpu_transcoder)); + trans_vrr_ctl = intel_de_read(display, + TRANS_VRR_CTL(display, cpu_transcoder)); crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; - if (HAS_CMRR(dev_priv)) + if (HAS_CMRR(display)) crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE); if (crtc_state->cmrr.enable) { crtc_state->cmrr.cmrr_n = - intel_de_read64_2x32(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder), - TRANS_CMRR_N_HI(dev_priv, cpu_transcoder)); + intel_de_read64_2x32(display, TRANS_CMRR_N_LO(display, cpu_transcoder), + TRANS_CMRR_N_HI(display, cpu_transcoder)); crtc_state->cmrr.cmrr_m = - intel_de_read64_2x32(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder), - TRANS_CMRR_M_HI(dev_priv, cpu_transcoder)); + intel_de_read64_2x32(display, TRANS_CMRR_M_LO(display, cpu_transcoder), + TRANS_CMRR_M_HI(display, cpu_transcoder)); } - if (DISPLAY_VER(dev_priv) >= 13) + if (DISPLAY_VER(display) >= 13) crtc_state->vrr.guardband = REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl); else @@ -415,21 +409,21 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl); if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) { - crtc_state->vrr.flipline = intel_de_read(dev_priv, - TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder)) + 1; - crtc_state->vrr.vmax = intel_de_read(dev_priv, - TRANS_VRR_VMAX(dev_priv, cpu_transcoder)) + 1; - crtc_state->vrr.vmin = intel_de_read(dev_priv, - TRANS_VRR_VMIN(dev_priv, cpu_transcoder)) + 1; + crtc_state->vrr.flipline = intel_de_read(display, + TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1; + crtc_state->vrr.vmax = intel_de_read(display, + TRANS_VRR_VMAX(display, cpu_transcoder)) + 1; + crtc_state->vrr.vmin = intel_de_read(display, + TRANS_VRR_VMIN(display, cpu_transcoder)) + 1; } if (crtc_state->vrr.enable) { crtc_state->mode_flags |= I915_MODE_FLAG_VRR; - if (HAS_AS_SDP(dev_priv)) { + if (HAS_AS_SDP(display)) { trans_vrr_vsync = - intel_de_read(dev_priv, - TRANS_VRR_VSYNC(dev_priv, cpu_transcoder)); + intel_de_read(display, + TRANS_VRR_VSYNC(display, cpu_transcoder)); crtc_state->vrr.vsync_start = REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync); crtc_state->vrr.vsync_end = diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index ba5a628b4757c1..17d4c880ecc402 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -14,6 +14,7 @@ #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" +#include "intel_dpt.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" @@ -537,6 +538,8 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane, case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: + case I915_FORMAT_MOD_4_TILED_BMG_CCS: + case I915_FORMAT_MOD_4_TILED_LNL_CCS: /* * Align to at least 4x1 main surface * tiles (16K) to match 64B of AUX. @@ -948,6 +951,9 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier) return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_4_TILED_BMG_CCS: + case I915_FORMAT_MOD_4_TILED_LNL_CCS: + return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; @@ -1085,11 +1091,6 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, if (DISPLAY_VER(dev_priv) == 13) plane_ctl |= adlp_plane_ctl_arb_slots(plane_state); - if (GRAPHICS_VER(dev_priv) >= 20 && - fb->modifier == I915_FORMAT_MOD_4_TILED) { - plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; - } - return plane_ctl; } @@ -1162,7 +1163,7 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, * within the DPT is always 0. */ drm_WARN_ON(&i915->drm, plane_state->dpt_vma && - plane_state->dpt_vma->node.start); + intel_dpt_offset(plane_state->dpt_vma)); drm_WARN_ON(&i915->drm, offset & 0x1fffff); return offset >> 9; } else { @@ -2452,6 +2453,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915, if (gen12_plane_has_mc_ccs(i915, plane_id)) caps |= INTEL_PLANE_CAP_CCS_MC; + if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915)) + caps |= INTEL_PLANE_CAP_NEED64K_PHYS; + return caps; } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index a2726364b34d75..045c7cac166bb4 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -2830,17 +2830,17 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, } /* - * If Fixed Refresh Rate: + * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline: * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from * watermark level1 and up and above. If watermark level 1 is * invalid program it with all 1's. * Program PKG_C_LATENCY Added Wake Time = DSB execution time - * If Variable Refresh Rate: + * If Variable Refresh Rate where Vmin != Vmax != Flipline: * Program DEEP PKG_C_LATENCY Pkg C with all 1's. * Program PKG_C_LATENCY Added Wake Time = 0 */ static void -skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled) +skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc) { u32 max_latency = 0; u32 clear = 0, val = 0; @@ -2849,15 +2849,15 @@ skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled) if (DISPLAY_VER(i915) < 20) return; - if (vrr_enabled) { - max_latency = LNL_PKG_C_LATENCY_MASK; - added_wake_time = 0; - } else { + if (enable_dpkgc) { max_latency = skl_watermark_max_latency(i915, 1); if (max_latency == 0) max_latency = LNL_PKG_C_LATENCY_MASK; added_wake_time = DSB_EXE_TIME + i915->display.sagv.block_time_us; + } else { + max_latency = LNL_PKG_C_LATENCY_MASK; + added_wake_time = 0; } clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK; @@ -2873,7 +2873,7 @@ skl_compute_wm(struct intel_atomic_state *state) struct intel_crtc *crtc; struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; - bool vrr_enabled = false; + bool enable_dpkgc = false; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_build_pipe_wm(state, crtc); @@ -2899,11 +2899,13 @@ skl_compute_wm(struct intel_atomic_state *state) if (ret) return ret; - if (new_crtc_state->vrr.enable) - vrr_enabled = true; + if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax && + new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) || + !new_crtc_state->vrr.enable) + enable_dpkgc = true; } - skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled); + skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc); skl_print_wm_changes(state); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 931d2cf74ed859..d21f3fb397060c 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1879,6 +1879,7 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = { void vlv_dsi_init(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *connector; @@ -1890,7 +1891,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "\n"); /* There is no detection method for MIPI so rely on VBT */ - if (!intel_bios_is_dsi_present(dev_priv, &port)) + if (!intel_bios_is_dsi_present(display, &port)) return; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) @@ -1945,7 +1946,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_dsi->panel_power_off_time = ktime_get_boottime(); - intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL); + intel_bios_init_panel_late(display, &connector->panel, NULL, NULL); if (connector->panel.vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index d54162ce0f99a0..a3b83cfe172677 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -12,8 +12,6 @@ #include #include -#include "display/intel_frontbuffer.h" - #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" @@ -827,7 +825,7 @@ static int eb_select_context(struct i915_execbuffer *eb) struct i915_gem_context *ctx; ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); - if (unlikely(IS_ERR(ctx))) + if (IS_ERR(ctx)) return PTR_ERR(ctx); eb->gem_context = ctx; @@ -1533,7 +1531,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) u64_to_user_ptr(entry->relocs_ptr); unsigned long remain = entry->relocation_count; - if (unlikely(remain > N_RELOC(ULONG_MAX))) + if (unlikely(remain > N_RELOC(INT_MAX))) return -EINVAL; /* @@ -1641,7 +1639,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) if (size == 0) return 0; - if (size > N_RELOC(ULONG_MAX)) + if (size > N_RELOC(INT_MAX)) return -EINVAL; addr = u64_to_user_ptr(entry->relocs_ptr); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index cac6d4184506cc..21274aa9bdddc1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; + unsigned long obj_offset; resource_size_t iomap; int err; @@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) iomap -= obj->mm.region->region.start; } + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); /* PTEs are revoked in obj->ops->put_pages() */ err = remap_io_sg(area, area->vm_start, area->vm_end - area->vm_start, - obj->mm.pages->sgl, iomap); + obj->mm.pages->sgl, obj_offset, iomap); if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); @@ -293,8 +295,10 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) static void set_address_limits(struct vm_area_struct *area, struct i915_vma *vma, unsigned long obj_offset, + resource_size_t gmadr_start, unsigned long *start_vaddr, - unsigned long *end_vaddr) + unsigned long *end_vaddr, + unsigned long *pfn) { unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ long start, end; /* memory boundaries */ @@ -323,6 +327,10 @@ static void set_address_limits(struct vm_area_struct *area, /* Let's move back into the "<< PAGE_SHIFT" domain */ *start_vaddr = (unsigned long)start << PAGE_SHIFT; *end_vaddr = (unsigned long)end << PAGE_SHIFT; + + *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; + *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT; + *pfn += obj_offset - vma->gtt_view.partial.offset; } static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) @@ -441,11 +449,13 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) if (ret) goto err_unpin; - set_address_limits(area, vma, obj_offset, &start, &end); - - pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; - pfn += (start - area->vm_start) >> PAGE_SHIFT; - pfn += obj_offset - vma->gtt_view.partial.offset; + /* + * Dump all the necessary parameters in this function to perform the + * arithmetic calculation for the virtual address start and end and + * the PFN (Page Frame Number). + */ + set_address_limits(area, vma, obj_offset, ggtt->gmadr.start, + &start, &end, &pfn); /* Finally, remap it using the new GTT offset */ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); @@ -1071,9 +1081,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) rcu_read_lock(); drm_vma_offset_lock_lookup(dev->vma_offset_manager); - node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); + node = drm_vma_offset_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); if (node && drm_vma_node_is_allowed(node, priv)) { /* * Skip 0-refcnted objects as it is in the process of being diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 5d7446a48ae790..3dc61cbd2e11fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -89,7 +89,6 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); * @handle: userspace handle * * Returns: - * * A pointer to the object named by the handle if such exists on @filp, NULL * otherwise. This object is only valid whilst under the RCU read lock, and * note carefully the object may be in the process of being destroyed. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 1495b607449287..68413c05c81229 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -535,7 +535,7 @@ struct drm_i915_gem_object { * I915_CACHE_NONE. The only exception is userptr objects, where we * instead force I915_CACHE_LLC, but we also don't allow userspace to * ever change the @cache_level for such objects. Another special case - * is dma-buf, which doesn't rely on @cache_dirty, but there we + * is dma-buf, which doesn't rely on @cache_dirty, but there we * always do a forced flush when acquiring the pages, if there is a * chance that the pages can be read directly from main memory with * the GPU. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index c5e1c718a6d262..fe69f2c8527d79 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -424,7 +424,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj, struct address_space *mapping = obj->base.filp->f_mapping; const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain, offset; + u64 remain; + loff_t pos; unsigned int pg; /* Caller already validated user args */ @@ -457,12 +458,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj, */ remain = arg->size; - offset = arg->offset; - pg = offset_in_page(offset); + pos = arg->offset; + pg = offset_in_page(pos); do { unsigned int len, unwritten; - struct page *page; + struct folio *folio; void *data, *vaddr; int err; char __maybe_unused c; @@ -480,21 +481,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (err) return err; - err = aops->write_begin(obj->base.filp, mapping, offset, len, - &page, &data); + err = aops->write_begin(obj->base.filp, mapping, pos, len, + &folio, &data); if (err < 0) return err; - vaddr = kmap_local_page(page); + vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); pagefault_disable(); - unwritten = __copy_from_user_inatomic(vaddr + pg, - user_data, - len); + unwritten = __copy_from_user_inatomic(vaddr, user_data, len); pagefault_enable(); kunmap_local(vaddr); - err = aops->write_end(obj->base.filp, mapping, offset, len, - len - unwritten, page, data); + err = aops->write_end(obj->base.filp, mapping, pos, len, + len - unwritten, folio, data); if (err < 0) return err; @@ -504,7 +503,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj, remain -= len; user_data += len; - offset += len; + pos += len; pg = 0; } while (remain); @@ -660,7 +659,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, struct drm_i915_gem_object *obj; struct file *file; const struct address_space_operations *aops; - resource_size_t offset; + loff_t pos; int err; GEM_WARN_ON(IS_DGFX(i915)); @@ -672,29 +671,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, file = obj->base.filp; aops = file->f_mapping->a_ops; - offset = 0; + pos = 0; do { unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct page *page; - void *pgdata, *vaddr; + struct folio *folio; + void *fsdata; - err = aops->write_begin(file, file->f_mapping, offset, len, - &page, &pgdata); + err = aops->write_begin(file, file->f_mapping, pos, len, + &folio, &fsdata); if (err < 0) goto fail; - vaddr = kmap(page); - memcpy(vaddr, data, len); - kunmap(page); + memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); - err = aops->write_end(file, file->f_mapping, offset, len, len, - page, pgdata); + err = aops->write_end(file, file->f_mapping, pos, len, len, + folio, fsdata); if (err < 0) goto fail; size -= len; data += len; - offset += len; + pos += len; } while (size); return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 5c72462d1f57e3..b22e2019768f04 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -1131,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); } - if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0) intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index d4b918fb11cebd..1f55e62044a4be 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -266,7 +266,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) args->timeout_ns = 0; - /* Asked to wait beyond the jiffie/scheduler precision? */ + /* Asked to wait beyond the jiffy/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 3527b8f446fe3b..2fda549dd82d2b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -506,7 +506,7 @@ static int igt_dmabuf_export_vmap(void *arg) goto out; } - if (memchr_inv(ptr, 0, dmabuf->size)) { + if (!mem_is_zero(ptr, dmabuf->size)) { pr_err("Exported object not initialised to zero!\n"); err = -EINVAL; goto out; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 3b740ca2500091..4d30a86016f241 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -693,6 +693,8 @@ void intel_engines_release(struct intel_gt *gt) memset(&engine->reset, 0, sizeof(engine->reset)); } + + llist_del_all(>->i915->uabi_engines_llist); } void intel_engine_free_request_pool(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 206a5e0fedf1b7..d60a6ca0cae5e7 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -12,7 +12,6 @@ #include #include -#include "display/intel_display.h" #include "gem/i915_gem_lmem.h" #include "intel_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 93bc1cc1ee7e64..0ffba50981e3ba 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -418,7 +418,6 @@ int __i915_vma_pin_fence(struct i915_vma *vma) * For an untiled surface, this removes any existing fence. * * Returns: - * * 0 on success, negative error code on failure. */ int i915_vma_pin_fence(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 2bd8d98d211023..5394bc7d4daf81 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -220,6 +220,7 @@ #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define CMD_3DSTATE_MESH_CONTROL ((0x3 << 29) | (0x3 << 27) | (0x0 << 24) | (0x77 << 16) | (0x3)) #define XY_CTRL_SURF_INSTR_SIZE 5 #define MI_FLUSH_DW_SIZE 3 diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index b5e114d284ad3f..998ca029b73a10 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -174,7 +174,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt) int intel_gt_probe_all(struct drm_i915_private *i915); int intel_gt_tiles_init(struct drm_i915_private *i915); -void intel_gt_release_all(struct drm_i915_private *i915); #define for_each_gt(gt__, i915__, id__) \ for ((id__) = 0; \ @@ -208,4 +207,10 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, void intel_gt_bind_context_set_ready(struct intel_gt *gt); void intel_gt_bind_context_set_unready(struct intel_gt *gt); bool intel_gt_is_bind_context_ready(struct intel_gt *gt); + +static inline void intel_gt_set_wedged_async(struct intel_gt *gt) +{ + queue_work(system_highpri_wq, >->wedge); +} + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index e42b3a5d4e63dd..57a3c83d365537 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -1553,6 +1553,8 @@ #define VLV_RENDER_C0_COUNT _MMIO(0x138118) #define VLV_MEDIA_C0_COUNT _MMIO(0x13811c) +#define PCU_PWM_FAN_SPEED _MMIO(0x138140) + #define GEN12_RPSTAT1 _MMIO(0x1381b4) #define GEN12_VOLTAGE_MASK REG_GENMASK(10, 0) #define GEN12_CAGF_MASK REG_GENMASK(19, 11) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index cfdd2ad5e9549c..bcee084b1f272b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -292,6 +292,8 @@ struct intel_gt { struct gt_defaults defaults; struct kobject *sysfs_defaults; + struct work_struct wedge; + struct i915_perf_gt perf; /** link: &ggtt.gt_list */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 735cd23a43c6e3..8f1ea95471efcb 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1013,6 +1013,15 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) GT_TRACE(gt, "end\n"); } +static void set_wedged_work(struct work_struct *w) +{ + struct intel_gt *gt = container_of(w, struct intel_gt, wedge); + intel_wakeref_t wf; + + with_intel_runtime_pm(gt->uncore->rpm, wf) + __intel_gt_set_wedged(gt); +} + void intel_gt_set_wedged(struct intel_gt *gt) { intel_wakeref_t wakeref; @@ -1614,6 +1623,7 @@ void intel_gt_init_reset(struct intel_gt *gt) init_waitqueue_head(>->reset.queue); mutex_init(>->reset.mutex); init_srcu_struct(>->reset.backoff_srcu); + INIT_WORK(>->wedge, set_wedged_work); /* * While undesirable to wait inside the shrinker, complain anyway. @@ -1640,7 +1650,7 @@ static void intel_wedge_me(struct work_struct *work) struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name); - intel_gt_set_wedged(w->gt); + set_wedged_work(&w->gt->wedge); } void __intel_init_wedge(struct intel_wedge_me *w, diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 09a287c1aedd67..e539a656cfc3ae 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -111,9 +111,8 @@ static void wa_init_finish(struct i915_wa_list *wal) { /* Trim unused entries. */ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { - struct i915_wa *list = kmemdup(wal->list, - wal->count * sizeof(*list), - GFP_KERNEL); + struct i915_wa *list = kmemdup_array(wal->list, wal->count, + sizeof(*list), GFP_KERNEL); if (list) { kfree(wal->list); @@ -974,7 +973,12 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) if (ret) return ret; - cs = intel_ring_begin(rq, (wal->count * 2 + 2)); + if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) || + IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) + cs = intel_ring_begin(rq, (wal->count * 2 + 6)); + else + cs = intel_ring_begin(rq, (wal->count * 2 + 2)); + if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1004,6 +1008,15 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) } *cs++ = MI_NOOP; + /* Wa_14019789679 */ + if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) || + IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) { + *cs++ = CMD_3DSTATE_MESH_CONTROL; + *cs++ = 0; + *cs++ = 0; + *cs++ = MI_NOOP; + } + intel_uncore_forcewake_put__locked(uncore, fw); spin_unlock(&uncore->lock); intel_gt_mcr_unlock(wal->gt, flags); @@ -2058,7 +2071,7 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine) case RENDER_CLASS: /* Required by recommended tuning setting (not a workaround) */ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); - + whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); break; default: break; @@ -2073,7 +2086,7 @@ static void xelpg_whitelist_build(struct intel_engine_cs *engine) case RENDER_CLASS: /* Required by recommended tuning setting (not a workaround) */ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); - + whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); break; default: break; diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 4202df5b8c122f..222ca7c4495196 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -93,7 +93,7 @@ static int wait_for_reset(struct intel_engine_cs *engine, return -EINVAL; } - /* Give the request a jiffie to complete after flushing the worker */ + /* Give the request a jiffy to complete after flushing the worker */ if (i915_request_wait(rq, 0, max(0l, (long)(timeout - jiffies)) + 1) < 0) { pr_err("%s: hanging request %llx:%lld did not complete\n", @@ -3426,7 +3426,7 @@ static int live_preempt_timeout(void *arg) cpu_relax(); saved_timeout = engine->props.preempt_timeout_ms; - engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */ i915_request_get(rq); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 3eff364ccf3ac7..ca460cee4f8bc5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -336,7 +336,7 @@ static int clear(struct intel_migrate *migrate, if (vaddr[x] != val) { pr_err("%ps failed, (%u != %u), offset: %zu\n", - fn, vaddr[x], val, x * sizeof(u32)); + fn, vaddr[x], val, x * sizeof(u32)); igt_hexdump(vaddr + i * 1024, 4096); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c index 021f51d9b45681..aab2759067d2ec 100644 --- a/drivers/gpu/drm/i915/gt/sysfs_engines.c +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c @@ -530,9 +530,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915) err_object: kobject_put(kobj); err_engine: - dev_err(kdev, "Failed to add sysfs engine '%s'\n", - engine->name); - break; + dev_warn(kdev, "Failed to add sysfs engine '%s'\n", + engine->name); } } } diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h index 37ff539a6963d7..0c709e6c15be7a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -107,6 +107,7 @@ enum { enum { GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001, GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002, + GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE = 0x9006, }; #endif /* _ABI_GUC_KLVS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 5e60a34692af80..097fc6bd1285e4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -296,7 +296,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) /* Wa_16019325821 */ /* Wa_14019159160 */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) flags |= GUC_WA_RCS_CCS_SWITCHOUT; /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 7995f059f30df9..46fabbfc775e07 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -815,8 +815,7 @@ guc_capture_prep_lists(struct intel_guc *guc) return PAGE_ALIGN(total_size); } -static void guc_waklv_enable_simple(struct intel_guc *guc, - u32 klv_id, u32 *offset, u32 *remain) +static void guc_waklv_enable_simple(struct intel_guc *guc, u32 *offset, u32 *remain, u32 klv_id) { u32 size; u32 klv_entry[] = { @@ -850,19 +849,20 @@ static void guc_waklv_init(struct intel_guc *guc) remain = guc_ads_waklv_size(guc); /* Wa_14019159160 */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) - guc_waklv_enable_simple(guc, - GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE, - &offset, &remain); + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { + guc_waklv_enable_simple(guc, &offset, &remain, + GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE); + guc_waklv_enable_simple(guc, &offset, &remain, + GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE); + } /* Wa_16021333562 */ if ((GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 21, 1)) && (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_MEDIA_GT_IP_RANGE(gt, IP_VER(13, 0), IP_VER(13, 0)) || IS_DG2(gt->i915))) - guc_waklv_enable_simple(guc, - GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED, - &offset, &remain); + guc_waklv_enable_simple(guc, &offset, &remain, + GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED); size = guc_ads_waklv_size(guc) - remain; if (!size) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 908ebfa2293335..ed979847187f53 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2014,11 +2014,12 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) /* * Technically possible for either of these values to be non-zero here, - * but very unlikely + harmless. Regardless let's add a warn so we can + * but very unlikely + harmless. Regardless let's add an error so we can * see in CI if this happens frequently / a precursor to taking down the * machine. */ - GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); + if (atomic_read(&guc->outstanding_submission_g2h)) + guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n"); atomic_set(&guc->outstanding_submission_g2h, 0); intel_guc_global_policies_update(guc); @@ -4506,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) /* Wa_16019325821 */ /* Wa_14019159160 */ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) && - IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT; /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 7a63abf8f644c8..5b8080ec5315b6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -99,7 +99,7 @@ static void __confirm_options(struct intel_uc *uc) } if (!intel_uc_supports_guc(uc)) - gt_info(gt, "Incompatible option enable_guc=%d - %s\n", + gt_info(gt, "Incompatible option enable_guc=%d - %s\n", i915->params.enable_guc, "GuC is not supported!"); if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index af9afdb53c7f5a..c022dc73604568 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -42,8 +42,8 @@ #define GMBUS1_TOTAL_BYTES_MASK 0x1ff #define gmbus1_total_byte_count(v) (((v) >> \ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK) -#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1) -#define gmbus1_slave_index(v) (((v) >> 8) & 0xff) +#define gmbus1_target_addr(v) (((v) & 0xff) >> 1) +#define gmbus1_target_index(v) (((v) >> 8) & 0xff) #define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7) /* GMBUS0 bits definitions */ @@ -54,7 +54,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; unsigned char chr = 0; - if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { + if (edid->state == I2C_NOT_SPECIFIED || !edid->target_selected) { gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } @@ -179,7 +179,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; - u32 slave_addr; + u32 target_addr; u32 wvalue = *(u32 *)p_data; if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) { @@ -210,21 +210,21 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, i2c_edid->gmbus.total_byte_count = gmbus1_total_byte_count(wvalue); - slave_addr = gmbus1_slave_addr(wvalue); + target_addr = gmbus1_target_addr(wvalue); /* vgpu gmbus only support EDID */ - if (slave_addr == EDID_ADDR) { - i2c_edid->slave_selected = true; - } else if (slave_addr != 0) { + if (target_addr == EDID_ADDR) { + i2c_edid->target_selected = true; + } else if (target_addr != 0) { gvt_dbg_dpy( - "vgpu%d: unsupported gmbus slave addr(0x%x)\n" + "vgpu%d: unsupported gmbus target addr(0x%x)\n" " gmbus operations will be ignored.\n", - vgpu->id, slave_addr); + vgpu->id, target_addr); } if (wvalue & GMBUS_CYCLE_INDEX) i2c_edid->current_edid_read = - gmbus1_slave_index(wvalue); + gmbus1_target_index(wvalue); i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue); switch (gmbus1_bus_cycle(wvalue)) { @@ -523,7 +523,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } else if (addr == EDID_ADDR) { i2c_edid->state = I2C_AUX_CH; i2c_edid->port = port_idx; - i2c_edid->slave_selected = true; + i2c_edid->target_selected = true; if (intel_vgpu_has_monitor_on_port(vgpu, port_idx) && intel_vgpu_port_is_dp(vgpu, port_idx)) @@ -542,7 +542,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, return; if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; - if (i2c_edid->edid_available && i2c_edid->slave_selected) { + if (i2c_edid->edid_available && i2c_edid->target_selected) { unsigned char val = edid_get_byte(vgpu); aux_data_for_write = (val << 16); @@ -571,7 +571,7 @@ void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu) edid->state = I2C_NOT_SPECIFIED; edid->port = -1; - edid->slave_selected = false; + edid->target_selected = false; edid->edid_available = false; edid->current_edid_read = 0; diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index dfe0cbc6aad85c..c3b5a55aecb341 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -80,7 +80,7 @@ enum gmbus_cycle_type { * R/W Protect * Command and Status. * bit0 is the direction bit: 1 is read; 0 is write. - * bit1 - bit7 is slave 7-bit address. + * bit1 - bit7 is target 7-bit address. * bit16 - bit24 total byte count (ignore?) * * GMBUS2: @@ -130,7 +130,7 @@ struct intel_vgpu_i2c_edid { enum i2c_state state; unsigned int port; - bool slave_selected; + bool target_selected; bool edid_available; unsigned int current_edid_read; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 38830818c12020..ca0fb126b02d65 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -425,6 +425,18 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { .release = intel_vgpu_reg_release_opregion, }; +static bool edid_valid(const void *edid, size_t size) +{ + const struct drm_edid *drm_edid; + bool is_valid; + + drm_edid = drm_edid_alloc(edid, size); + is_valid = drm_edid_valid(drm_edid); + drm_edid_free(drm_edid); + + return is_valid; +} + static int handle_edid_regs(struct intel_vgpu *vgpu, struct vfio_edid_region *region, char *buf, size_t count, u16 offset, bool is_write) @@ -443,11 +455,7 @@ static int handle_edid_regs(struct intel_vgpu *vgpu, switch (offset) { case offsetof(struct vfio_region_gfx_edid, link_state): if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) { - if (!drm_edid_block_valid( - (u8 *)region->edid_blob, - 0, - true, - NULL)) { + if (!edid_valid(region->edid_blob, EDID_SIZE)) { gvt_vgpu_err("invalid EDID blob\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2bed466540ab2..908f910420c20c 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -86,7 +86,7 @@ struct efp_child_device_config { u8 skip2; u8 dvo_port; u8 i2c_pin; /* for add-in card */ - u8 slave_addr; /* for add-in card */ + u8 target_addr; /* for add-in card */ u8 ddc_pin; u16 edid_ptr; u8 dvo_config; diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 020f1aa28322b2..63874d385c6f1b 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -227,7 +227,7 @@ TRACE_EVENT(oos_sync, #define GVT_CMD_STR_LEN 40 TRACE_EVENT(gvt_command, TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, - u32 cmd_len, u32 buf_type, u32 buf_addr_type, + u32 cmd_len, u32 buf_type, u32 buf_addr_type, void *workload, const char *cmd_name), TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index bc717cf544e429..f969f585d07b98 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -66,6 +66,7 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) static int i915_capabilities(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_display *display = &i915->display; struct drm_printer p = drm_seq_file_printer(m); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); @@ -77,7 +78,7 @@ static int i915_capabilities(struct seq_file *m, void *data) kernel_param_lock(THIS_MODULE); i915_params_dump(&i915->params, &p); - intel_display_params_dump(i915, &p); + intel_display_params_dump(display, &p); kernel_param_unlock(THIS_MODULE); return 0; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index fb8e9c2fcea531..a40f05b993dad9 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -49,7 +49,7 @@ #include "display/intel_bw.h" #include "display/intel_cdclk.h" #include "display/intel_display_driver.h" -#include "display/intel_display_types.h" +#include "display/intel_display.h" #include "display/intel_dmc.h" #include "display/intel_dp.h" #include "display/intel_dpt.h" @@ -58,10 +58,8 @@ #include "display/intel_hotplug.h" #include "display/intel_overlay.h" #include "display/intel_pch_refclk.h" -#include "display/intel_pipe_crc.h" #include "display/intel_pps.h" #include "display/intel_sprite.h" -#include "display/intel_vga.h" #include "display/skl_watermark.h" #include "gem/i915_gem_context.h" @@ -442,6 +440,7 @@ static int i915_pcode_init(struct drm_i915_private *i915) */ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; @@ -451,8 +450,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (HAS_PPGTT(dev_priv)) { if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_full_ppgtt(dev_priv)) { - i915_report_error(dev_priv, - "incompatible vGPU found, support for isolated ppGTT required\n"); + drm_err(&dev_priv->drm, + "incompatible vGPU found, support for isolated ppGTT required\n"); return -ENXIO; } } @@ -465,8 +464,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_hwsp_emulation(dev_priv)) { - i915_report_error(dev_priv, - "old vGPU host found, support for HWSP emulation required\n"); + drm_err(&dev_priv->drm, + "old vGPU host found, support for HWSP emulation required\n"); return -ENXIO; } } @@ -542,7 +541,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_msi; - intel_opregion_setup(dev_priv); + intel_opregion_setup(display); ret = i915_pcode_init(dev_priv); if (ret) @@ -559,7 +558,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) return 0; err_opregion: - intel_opregion_cleanup(dev_priv); + intel_opregion_cleanup(display); err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); @@ -580,11 +579,12 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); i915_perf_fini(dev_priv); - intel_opregion_cleanup(dev_priv); + intel_opregion_cleanup(display); if (pdev->msi_enabled) pci_disable_msi(pdev); @@ -723,7 +723,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(i915)) return i915; - pci_set_drvdata(pdev, i915); + pci_set_drvdata(pdev, &i915->drm); /* Device parameters start as a copy of module parameters. */ i915_params_copy(&i915->params, &i915_modparams); @@ -1014,6 +1014,7 @@ static int i915_drm_prepare(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = &dev_priv->display; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); pci_power_t opregion_target_state; @@ -1049,7 +1050,7 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_display(dev_priv); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; - intel_opregion_suspend(dev_priv, opregion_target_state); + intel_opregion_suspend(display, opregion_target_state); dev_priv->suspend_count++; @@ -1138,6 +1139,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = &dev_priv->display; struct intel_gt *gt; int ret, i; @@ -1165,7 +1167,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_dmc_resume(dev_priv); i915_restore_display(dev_priv); - intel_pps_unlock_regs_wa(dev_priv); + intel_pps_unlock_regs_wa(display); intel_init_pch_refclk(dev_priv); @@ -1205,7 +1207,7 @@ static int i915_drm_resume(struct drm_device *dev) } intel_hpd_poll_disable(dev_priv); - intel_opregion_resume(dev_priv); + intel_opregion_resume(display); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); @@ -1454,6 +1456,7 @@ static int i915_pm_restore(struct device *kdev) static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + struct intel_display *display = &dev_priv->display; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct pci_dev *root_pdev; @@ -1528,7 +1531,7 @@ static int intel_runtime_suspend(struct device *kdev) * won't be able to restore them. Since PCI_D3hot matches the * actual specification and appears to be working, use it. */ - intel_opregion_notify_adapter(dev_priv, PCI_D3hot); + intel_opregion_notify_adapter(display, PCI_D3hot); } else { /* * current versions of firmware which depend on this opregion @@ -1537,7 +1540,7 @@ static int intel_runtime_suspend(struct device *kdev) * to distinguish it from notifications that might be sent via * the suspend path. */ - intel_opregion_notify_adapter(dev_priv, PCI_D1); + intel_opregion_notify_adapter(display, PCI_D1); } assert_forcewakes_inactive(&dev_priv->uncore); @@ -1552,6 +1555,7 @@ static int intel_runtime_suspend(struct device *kdev) static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + struct intel_display *display = &dev_priv->display; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct pci_dev *root_pdev; @@ -1566,7 +1570,7 @@ static int intel_runtime_resume(struct device *kdev) drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); disable_rpm_wakeref_asserts(rpm); - intel_opregion_notify_adapter(dev_priv, PCI_D0); + intel_opregion_notify_adapter(display, PCI_D0); root_pdev = pcie_find_root_port(pdev); if (root_pdev) @@ -1671,6 +1675,7 @@ static const struct file_operations i915_driver_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = drm_show_fdinfo, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static int @@ -1693,9 +1698,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 110340e02a0213..39f6614a0a99a5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -365,12 +365,16 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) { - return dev_get_drvdata(kdev); + struct drm_device *drm = dev_get_drvdata(kdev); + + return drm ? to_i915(drm) : NULL; } static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) { - return pci_get_drvdata(pdev); + struct drm_device *drm = pci_get_drvdata(pdev); + + return drm ? to_i915(drm) : NULL; } static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) @@ -408,14 +412,8 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) #define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision) -#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) -#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step) - -#define IS_DISPLAY_STEP(__i915, since, until) \ - (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ - INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) #define IS_GRAPHICS_STEP(__i915, since, until) \ (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ @@ -425,10 +423,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) -#define IS_BASEDIE_STEP(__i915, since, until) \ - (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \ - INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until)) - static __always_inline unsigned int __platform_mask_index(const struct intel_runtime_info *info, enum intel_platform p) @@ -680,9 +674,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \ }) -/* Early gen2 have a totally busted CS tlb and require pinned batches. */ -#define HAS_BROKEN_CS_TLB(i915) (IS_I830(i915) || IS_I845G(i915)) - #define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \ (IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1391c01d7663ee..070ab654698798 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -39,8 +39,6 @@ #include #include -#include "display/intel_display.h" - #include "gem/i915_gem_clflush.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7bd1861ddbdfbd..a9662cc6ed1e39 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -15,7 +15,6 @@ #include #include -#include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 96c6cafd5b9e4f..6469b9bcf2ec44 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -660,9 +660,10 @@ static void err_print_params(struct drm_i915_error_state_buf *m, const struct i915_params *params) { struct drm_printer p = i915_error_printer(m); + struct intel_display *display = &m->i915->display; i915_params_dump(params, &p); - intel_display_params_dump(m->i915, &p); + intel_display_params_dump(display, &p); } static void err_print_pciid(struct drm_i915_error_state_buf *m, diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 49db3e09826c70..17d30f6b84b031 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -5,6 +5,7 @@ #include #include +#include #include #include "i915_drv.h" @@ -36,6 +37,7 @@ struct hwm_reg { i915_reg_t pkg_rapl_limit; i915_reg_t energy_status_all; i915_reg_t energy_status_tile; + i915_reg_t fan_speed; }; struct hwm_energy_info { @@ -43,11 +45,17 @@ struct hwm_energy_info { long accum_energy; /* Accumulated energy for energy1_input */ }; +struct hwm_fan_info { + u32 reg_val_prev; + u64 time_prev; +}; + struct hwm_drvdata { struct i915_hwmon *hwmon; struct intel_uncore *uncore; struct device *hwmon_dev; struct hwm_energy_info ei; /* Energy info for energy1_input */ + struct hwm_fan_info fi; /* Fan info for fan1_input */ char name[12]; int gt_n; bool reset_in_progress; @@ -276,6 +284,7 @@ static const struct hwmon_channel_info * const hwm_info[] = { HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT), HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT), NULL }; @@ -613,6 +622,69 @@ hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val) } } +static umode_t +hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + + if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed)) + return 0444; + + return 0; +} + +static int +hwm_fan_input_read(struct hwm_drvdata *ddat, long *val) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + struct hwm_fan_info *fi = &ddat->fi; + u64 rotations, time_now, time; + intel_wakeref_t wakeref; + u32 reg_val; + int ret = 0; + + wakeref = intel_runtime_pm_get(ddat->uncore->rpm); + mutex_lock(&hwmon->hwmon_lock); + + reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed); + time_now = get_jiffies_64(); + + /* + * HW register value is accumulated count of pulses from + * PWM fan with the scale of 2 pulses per rotation. + */ + rotations = (reg_val - fi->reg_val_prev) / 2; + + time = jiffies_delta_to_msecs(time_now - fi->time_prev); + if (unlikely(!time)) { + ret = -EAGAIN; + goto exit; + } + + /* + * Calculate fan speed in RPM by time averaging two subsequent + * readings in minutes. + * RPM = number of rotations * msecs per minute / time in msecs + */ + *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time); + + fi->reg_val_prev = reg_val; + fi->time_prev = time_now; +exit: + mutex_unlock(&hwmon->hwmon_lock); + intel_runtime_pm_put(ddat->uncore->rpm, wakeref); + return ret; +} + +static int +hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val) +{ + if (attr == hwmon_fan_input) + return hwm_fan_input_read(ddat, val); + + return -EOPNOTSUPP; +} + static umode_t hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) @@ -628,6 +700,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type, return hwm_energy_is_visible(ddat, attr); case hwmon_curr: return hwm_curr_is_visible(ddat, attr); + case hwmon_fan: + return hwm_fan_is_visible(ddat, attr); default: return 0; } @@ -648,6 +722,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, return hwm_energy_read(ddat, attr, val); case hwmon_curr: return hwm_curr_read(ddat, attr, val); + case hwmon_fan: + return hwm_fan_read(ddat, attr, val); default: return -EOPNOTSUPP; } @@ -739,12 +815,14 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT; hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS; hwmon->rg.energy_status_tile = INVALID_MMIO_REG; + hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED; } else { hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG; hwmon->rg.energy_status_all = INVALID_MMIO_REG; hwmon->rg.energy_status_tile = INVALID_MMIO_REG; + hwmon->rg.fan_speed = INVALID_MMIO_REG; } with_intel_runtime_pm(uncore->rpm, wakeref) { @@ -755,6 +833,16 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit)) val_sku_unit = intel_uncore_read(uncore, hwmon->rg.pkg_power_sku_unit); + + /* + * Store the initial fan register value, so that we can use it for + * initial fan speed calculation. + */ + if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) { + ddat->fi.reg_val_prev = intel_uncore_read(uncore, + hwmon->rg.fan_speed); + ddat->fi.time_prev = get_jiffies_64(); + } } hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8059ac7e15fe27..2321de48d169db 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -34,7 +34,6 @@ #include #include "display/intel_display_irq.h" -#include "display/intel_display_types.h" #include "display/intel_hotplug.h" #include "display/intel_hotplug_irq.h" #include "display/intel_lpe_audio.h" diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 7998bc74ab49d4..f5c97a620962b6 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -122,13 +122,15 @@ int remap_io_mapping(struct vm_area_struct *vma, * @addr: target user address to start at * @size: size of map area * @sgl: Start sg entry + * @offset: offset from the start of the page * @iobase: Use stored dma address offset by this address or pfn if -1 * * Note: this is only safe if the mm semaphore is held when called. */ int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase) + struct scatterlist *sgl, unsigned long offset, + resource_size_t iobase) { struct remap_pfn r = { .mm = vma->vm_mm, @@ -141,6 +143,14 @@ int remap_io_sg(struct vm_area_struct *vma, /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) { + offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT; + r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase)); + if (!r.sgt.sgp) + return -EINVAL; + } + r.sgt.curr = offset << PAGE_SHIFT; + if (!use_dma(iobase)) flush_cache_range(vma, addr, size); diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h index 04c8974d822bd1..69f9351b1a1cd4 100644 --- a/drivers/gpu/drm/i915/i915_mm.h +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma, int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase); + struct scatterlist *sgl, unsigned long offset, + resource_size_t iobase); #endif /* __I915_MM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index 65acd7bf75d083..7ed6d70389af97 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -29,6 +29,12 @@ static int i915_check_nomodeset(void) * nomodeset boot option. */ + if (i915_modparams.modeset == 0) + pr_warn("i915.modeset=0 is deprecated. Please use the 'nomodeset' kernel parameter instead.\n"); + else if (i915_modparams.modeset != -1) + pr_warn("i915.modeset=%d is deprecated. Please remove it and the 'nomodeset' kernel parameter instead.\n", + i915_modparams.modeset); + if (i915_modparams.modeset == 0) use_kms = false; @@ -36,9 +42,8 @@ static int i915_check_nomodeset(void) use_kms = false; if (!use_kms) { - /* Silently fail loading to not upset userspace. */ DRM_DEBUG_DRIVER("KMS disabled.\n"); - return 1; + return -ENODEV; } return 0; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 316e55f3e87bd7..37746dd619fd8a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -64,8 +64,7 @@ struct i915_params i915_modparams __read_mostly = { */ i915_param_named(modeset, int, 0400, - "Use kernel modesetting [KMS] (0=disable, " - "1=on, -1=force vga console preference [default])"); + "Deprecated. Use the 'nomodeset' kernel parameter instead."); i915_param_named_unsafe(reset, uint, 0400, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index ce4dfd65fafa58..d37bb3a704d083 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -26,7 +26,6 @@ #include #include -#include "display/intel_display.h" #include "display/intel_display_driver.h" #include "gt/intel_gt_regs.h" #include "gt/intel_sa_media.h" @@ -880,7 +879,7 @@ static void i915_pci_remove(struct pci_dev *pdev) { struct drm_i915_private *i915; - i915 = pci_get_drvdata(pdev); + i915 = pdev_to_i915(pdev); if (!i915) /* driver load aborted, nothing to cleanup */ return; @@ -1003,7 +1002,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (i915_inject_probe_failure(pci_get_drvdata(pdev))) { + if (i915_inject_probe_failure(pdev_to_i915(pdev))) { i915_pci_remove(pdev); return -ENODEV; } @@ -1025,7 +1024,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static void i915_pci_shutdown(struct pci_dev *pdev) { - struct drm_i915_private *i915 = pci_get_drvdata(pdev); + struct drm_i915_private *i915 = pdev_to_i915(pdev); i915_driver_shutdown(i915); } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 025a79fe5920e4..2406cda75b7b4b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3751,7 +3751,6 @@ static int i915_perf_release(struct inode *inode, struct file *file) static const struct file_operations fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .release = i915_perf_release, .poll = i915_perf_poll, .read = i915_perf_read, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0e3d79227e3cc4..41f4350a7c6c58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2396,6 +2396,7 @@ /* Display Internal Timeout Register */ #define RM_TIMEOUT _MMIO(0x42060) +#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0) #define MMIO_TIMEOUT_US(us) ((us) << 0) /* interrupts */ @@ -2515,6 +2516,10 @@ #define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */ #define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */ #define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */ +#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */ +#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */ +#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */ +#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id)) #define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */ #define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */ #define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */ @@ -2574,6 +2579,7 @@ #define GEN8_DE_MISC_IMR _MMIO(0x44464) #define GEN8_DE_MISC_IIR _MMIO(0x44468) #define GEN8_DE_MISC_IER _MMIO(0x4446c) +#define XELPDP_RM_TIMEOUT REG_BIT(29) #define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27) #define GEN8_DE_MISC_GSE REG_BIT(27) #define GEN8_DE_EDP_PSR REG_BIT(19) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 613decd4776053..8775beab9cb843 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -191,8 +191,8 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) i915_gpu_error_sysfs_teardown(dev_priv); - device_remove_bin_file(kdev, &dpf_attrs_1); - device_remove_bin_file(kdev, &dpf_attrs); + device_remove_bin_file(kdev, &dpf_attrs_1); + device_remove_bin_file(kdev, &dpf_attrs); kobject_put(dev_priv->sysfs_gt); } diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 6f9e7b354b5441..2576f8f6c0f693 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -11,51 +11,10 @@ #include "i915_reg.h" #include "i915_utils.h" -#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details." - -void -__i915_printk(struct drm_i915_private *dev_priv, const char *level, - const char *fmt, ...) -{ - static bool shown_bug_once; - struct device *kdev = dev_priv->drm.dev; - bool is_error = level[1] <= KERN_ERR[1]; - bool is_debug = level[1] == KERN_DEBUG[1]; - struct va_format vaf; - va_list args; - - if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER)) - return; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - if (is_error) - dev_printk(level, kdev, "%pV", &vaf); - else - dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", - __builtin_return_address(0), &vaf); - - va_end(args); - - if (is_error && !shown_bug_once) { - /* - * Ask the user to file a bug report for the error, except - * if they may have caused the bug by fiddling with unsafe - * module parameters. - */ - if (!test_taint(TAINT_USER)) - dev_notice(kdev, "%s", FDO_BUG_MSG); - shown_bug_once = true; - } -} - void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint) { - __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n", - taint, (void *)_RET_IP_); + drm_notice(&i915->drm, "CI tainted: %#x by %pS\n", + taint, __builtin_return_address(0)); /* Failures that occur during fault injection testing are expected */ if (!i915_error_injected()) @@ -74,9 +33,9 @@ int __i915_inject_probe_error(struct drm_i915_private *i915, int err, if (++i915_probe_fail_count < i915_modparams.inject_probe_failure) return 0; - __i915_printk(i915, KERN_INFO, - "Injecting failure %d at checkpoint %u [%s:%d]\n", - err, i915_modparams.inject_probe_failure, func, line); + drm_info(&i915->drm, "Injecting failure %d at checkpoint %u [%s:%d]\n", + err, i915_modparams.inject_probe_failure, func, line); + i915_modparams.inject_probe_failure = 0; return err; } @@ -110,7 +69,7 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout) * Paranoia to make sure the compiler computes the timeout before * loading 'jiffies' as jiffies is volatile and may be updated in * the background by a timer tick. All to reduce the complexity - * of the addition and reduce the risk of losing a jiffie. + * of the addition and reduce the risk of losing a jiffy. */ barrier(); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 06ec6ceb61d579..71bdc89bd6215d 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -45,13 +45,6 @@ struct timer_list; #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) -void __printf(3, 4) -__i915_printk(struct drm_i915_private *dev_priv, const char *level, - const char *fmt, ...); - -#define i915_report_error(dev_priv, fmt, ...) \ - __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) int __i915_inject_probe_error(struct drm_i915_private *i915, int err, @@ -69,9 +62,12 @@ bool i915_error_injected(void); #define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV) -#define i915_probe_error(i915, fmt, ...) \ - __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ - fmt, ##__VA_ARGS__) +#define i915_probe_error(i915, fmt, ...) ({ \ + if (i915_error_injected()) \ + drm_dbg(&(i915)->drm, fmt, ##__VA_ARGS__); \ + else \ + drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \ +}) #define range_overflows(start, size, max) ({ \ typeof(start) start__ = (start); \ diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index e356dfb883d343..6a6be8048aa83f 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -389,7 +389,6 @@ void i915_vma_unpin_iomap(struct i915_vma *vma); * i915_vma_unpin_fence(). * * Returns: - * * True if the vma has a fence, false otherwise. */ int __must_check i915_vma_pin_fence(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index eede5417cb3fed..3c47c625993e42 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -108,8 +108,6 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); - drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step)); - drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step)); drm_printf(p, "gt: %d\n", info->gt); drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions); @@ -124,7 +122,6 @@ void intel_device_info_print(const struct intel_device_info *info, #undef PRINT_FLAG drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu)); - drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } #define ID(id) (id) @@ -377,10 +374,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) "Disabling ppGTT for VT-d support\n"); runtime->ppgtt_type = INTEL_PPGTT_NONE; } - - runtime->rawclk_freq = intel_read_rawclk(dev_priv); - drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq); - } /* diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index df73ef94615dd8..643ff1bf74eeb0 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -207,8 +207,6 @@ struct intel_runtime_info { u16 device_id; - u32 rawclk_freq; - struct intel_step_info step; unsigned int page_sizes; /* page sizes supported by the HW */ diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index a5adfb5d8fd2ac..285b96fadfd5f1 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -23,8 +23,7 @@ * use a macro to define these to make it easier to identify the platforms * where the two steppings can deviate. */ -#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x -#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x +#define COMMON_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x static const struct intel_step_info skl_revids[] = { [0x6] = { COMMON_STEP(G0) }, @@ -34,13 +33,13 @@ static const struct intel_step_info skl_revids[] = { }; static const struct intel_step_info kbl_revids[] = { - [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, - [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, - [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 }, - [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 }, - [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 }, - [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 }, - [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 }, + [1] = { COMMON_STEP(B0) }, + [2] = { COMMON_STEP(C0) }, + [3] = { COMMON_STEP(D0) }, + [4] = { COMMON_STEP(F0) }, + [5] = { COMMON_STEP(C0) }, + [6] = { COMMON_STEP(D1) }, + [7] = { COMMON_STEP(G0) }, }; static const struct intel_step_info bxt_revids[] = { @@ -64,16 +63,16 @@ static const struct intel_step_info jsl_ehl_revids[] = { }; static const struct intel_step_info tgl_uy_revids[] = { - [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, - [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, - [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, - [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, + [0] = { COMMON_STEP(A0) }, + [1] = { COMMON_STEP(B0) }, + [2] = { COMMON_STEP(B1) }, + [3] = { COMMON_STEP(C0) }, }; /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ static const struct intel_step_info tgl_revids[] = { - [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, - [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 }, + [0] = { COMMON_STEP(A0) }, + [1] = { COMMON_STEP(B0) }, }; static const struct intel_step_info rkl_revids[] = { @@ -88,49 +87,49 @@ static const struct intel_step_info dg1_revids[] = { }; static const struct intel_step_info adls_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, + [0xC] = { COMMON_STEP(D0) }, }; static const struct intel_step_info adlp_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, + [0xC] = { COMMON_STEP(C0) }, }; static const struct intel_step_info dg2_g10_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A1) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, }; static const struct intel_step_info dg2_g11_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, - [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x5] = { COMMON_STEP(B1) }, }; static const struct intel_step_info dg2_g12_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_C0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A1) }, }; static const struct intel_step_info adls_rpls_revids[] = { - [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_D0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 }, + [0x4] = { COMMON_STEP(D0) }, + [0xC] = { COMMON_STEP(D0) }, }; static const struct intel_step_info adlp_rplp_revids[] = { - [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_E0 }, + [0x4] = { COMMON_STEP(C0) }, }; static const struct intel_step_info adlp_n_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 }, + [0x0] = { COMMON_STEP(A0) }, }; static u8 gmd_to_intel_step(struct drm_i915_private *i915, @@ -158,11 +157,6 @@ void intel_step_init(struct drm_i915_private *i915) &RUNTIME_INFO(i915)->graphics.ip); step.media_step = gmd_to_intel_step(i915, &RUNTIME_INFO(i915)->media.ip); - step.display_step = STEP_A0 + DISPLAY_RUNTIME_INFO(i915)->ip.step; - if (step.display_step >= STEP_FUTURE) { - drm_dbg(&i915->drm, "Using future display steppings\n"); - step.display_step = STEP_FUTURE; - } RUNTIME_INFO(i915)->step = step; @@ -252,7 +246,6 @@ void intel_step_init(struct drm_i915_private *i915) } else { drm_dbg(&i915->drm, "Using future steppings\n"); step.graphics_step = STEP_FUTURE; - step.display_step = STEP_FUTURE; } } @@ -275,8 +268,3 @@ const char *intel_step_name(enum intel_step step) return "**"; } } - -const char *intel_display_step_name(struct drm_i915_private *i915) -{ - return intel_step_name(RUNTIME_INFO(i915)->step.display_step); -} diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h index b6f43b62477487..22f1d690516005 100644 --- a/drivers/gpu/drm/i915/intel_step.h +++ b/drivers/gpu/drm/i915/intel_step.h @@ -16,9 +16,7 @@ struct intel_step_info { * the expectation breaks gmd_to_intel_step(). */ u8 graphics_step; /* Represents the compute tile on Xe_HPC */ - u8 display_step; u8 media_step; - u8 basedie_step; }; #define STEP_ENUM_VAL(name) STEP_##name, @@ -78,6 +76,5 @@ enum intel_step { void intel_step_init(struct drm_i915_private *i915); const char *intel_step_name(enum intel_step step); -const char *intel_display_step_name(struct drm_i915_private *i915); #endif /* __INTEL_STEP_H__ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2eba289d88ad11..6aa179a3e92aac 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -24,6 +24,7 @@ #include #include +#include "gt/intel_gt.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gt_regs.h" @@ -180,14 +181,16 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) if (!wait_ack_clear(d, FORCEWAKE_KERNEL)) return; - if (fw_ack(d) == ~0) + if (fw_ack(d) == ~0) { drm_err(&d->uncore->i915->drm, "%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n", intel_uncore_forcewake_domain_to_str(d->id)); - else + intel_gt_set_wedged_async(d->uncore->gt); + } else { drm_err(&d->uncore->i915->drm, "%s: timed out waiting for forcewake ack to clear.\n", intel_uncore_forcewake_domain_to_str(d->id)); + } add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ } diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ae6070b5bf07a3..f08f6674911eec 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -517,7 +517,7 @@ static int igt_mock_max_segment(void *arg) if (!IS_ALIGNED(daddr, ps)) { pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n", - __func__, &daddr, ps); + __func__, &daddr, ps); err = -EINVAL; goto out_close; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 0bd29846873b39..91794ca17a588c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -172,7 +172,7 @@ struct drm_i915_private *mock_gem_device(void) return NULL; } - pci_set_drvdata(pdev, i915); + pci_set_drvdata(pdev, &i915->drm); /* Device parameters start as a copy of module parameters. */ i915_params_copy(&i915->params, &i915_modparams); diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index ecdd5767d8ef53..b574e23d484ba8 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -668,7 +668,7 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset, void *padding_start = ((u8 *)instance) + union_offset + member_size; size_t padding_size = union_size - member_size; - return !memchr_inv(padding_start, 0, padding_size); + return mem_is_zero(padding_start, padding_size); } /** diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 5ed9c98fb599c8..20cb4601208214 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue) } } - drm_sched_start(&queue->scheduler, true); + drm_sched_start(&queue->scheduler); } /** @@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job) } mutex_unlock(&pvr_dev->queues.lock); - drm_sched_start(sched, true); + drm_sched_start(sched); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 55dedd73f528c8..91d7808a2d8d37 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -34,7 +34,7 @@ struct imx_parallel_display_encoder { struct imx_parallel_display { struct device *dev; - void *edid; + const struct drm_edid *drm_edid; u32 bus_format; u32 bus_flags; struct drm_display_mode mode; @@ -62,9 +62,9 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) if (num_modes > 0) return num_modes; - if (imxpd->edid) { - drm_connector_update_edid_property(connector, imxpd->edid); - num_modes = drm_add_edid_modes(connector, imxpd->edid); + if (imxpd->drm_edid) { + drm_edid_connector_update(connector, imxpd->drm_edid); + num_modes = drm_edid_connector_add_modes(connector); } if (np) { @@ -331,7 +331,7 @@ static int imx_pd_probe(struct platform_device *pdev) edidp = of_get_property(np, "edid", &edid_len); if (edidp) - imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL); + imxpd->drm_edid = drm_edid_alloc(edidp, edid_len); ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { @@ -355,7 +355,11 @@ static int imx_pd_probe(struct platform_device *pdev) static void imx_pd_remove(struct platform_device *pdev) { + struct imx_parallel_display *imxpd = platform_get_drvdata(pdev); + component_del(&pdev->dev, &imx_pd_ops); + + drm_edid_free(imxpd->drm_edid); } static const struct of_device_id imx_pd_dt_ids[] = { diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index bbf3f8feab9449..1a944edb6ddc6c 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job lima_pm_idle(ldev); drm_sched_resubmit_jobs(&pipe->base); - drm_sched_start(&pipe->base, true); + drm_sched_start(&pipe->base); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c index 465f622ac05db7..2e42c6970c9fdc 100644 --- a/drivers/gpu/drm/loongson/lsdc_ttm.c +++ b/drivers/gpu/drm/loongson/lsdc_ttm.c @@ -341,16 +341,12 @@ void lsdc_bo_unpin(struct lsdc_bo *lbo) void lsdc_bo_ref(struct lsdc_bo *lbo) { - struct ttm_buffer_object *tbo = &lbo->tbo; - - ttm_bo_get(tbo); + drm_gem_object_get(&lbo->tbo.base); } void lsdc_bo_unref(struct lsdc_bo *lbo) { - struct ttm_buffer_object *tbo = &lbo->tbo; - - ttm_bo_put(tbo); + drm_gem_object_put(&lbo->tbo.base); } int lsdc_bo_kmap(struct lsdc_bo *lbo) diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 6f34f573e127ec..175b00e5a25354 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -69,6 +69,8 @@ struct mtk_crtc { /* lock for display hardware access */ struct mutex hw_lock; bool config_updating; + /* lock for config_updating to cmd buffer */ + spinlock_t config_lock; }; struct mtk_crtc_state { @@ -106,51 +108,18 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc) static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc) { + unsigned long flags; + drm_crtc_handle_vblank(&mtk_crtc->base); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { mtk_crtc_finish_page_flip(mtk_crtc); mtk_crtc->pending_needs_vblank = false; } + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); } -#if IS_REACHABLE(CONFIG_MTK_CMDQ) -static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, - size_t size) -{ - struct device *dev; - dma_addr_t dma_addr; - - pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) - return -ENOMEM; - - pkt->buf_size = size; - pkt->cl = (void *)client; - - dev = client->chan->mbox->dev; - dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); - kfree(pkt->va_base); - return -ENOMEM; - } - - pkt->pa_base = dma_addr; - - return 0; -} - -static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) -{ - struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - - dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, - DMA_TO_DEVICE); - kfree(pkt->va_base); -} -#endif - static void mtk_crtc_destroy(struct drm_crtc *crtc) { struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); @@ -158,7 +127,7 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc) mtk_mutex_put(mtk_crtc->mutex); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); if (mtk_crtc->cmdq_client.chan) { mbox_free_channel(mtk_crtc->cmdq_client.chan); @@ -308,12 +277,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client); struct mtk_crtc_state *state; unsigned int i; + unsigned long flags; if (data->sta < 0) return; state = to_mtk_crtc_state(mtk_crtc->base.state); + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (mtk_crtc->config_updating) { + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + goto ddp_cmdq_cb_out; + } + state->pending_config = false; if (mtk_crtc->pending_planes) { @@ -340,6 +316,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) mtk_crtc->pending_async_planes = false; } + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + +ddp_cmdq_cb_out: + mtk_crtc->cmdq_vblank_cnt = 0; wake_up(&mtk_crtc->cb_blocking_queue); } @@ -449,6 +429,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; int i; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { @@ -480,10 +461,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) pm_runtime_put(drm->dev); if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } } @@ -569,9 +550,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) struct mtk_drm_private *priv = crtc->dev->dev_private; unsigned int pending_planes = 0, pending_async_planes = 0; int i; + unsigned long flags; mutex_lock(&mtk_crtc->hw_lock); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = true; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + if (needs_vblank) mtk_crtc->pending_needs_vblank = true; @@ -607,7 +593,7 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); - cmdq_pkt_finalize(cmdq_handle); + cmdq_pkt_eoc(cmdq_handle); dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, cmdq_handle->pa_base, cmdq_handle->cmd_buf_size, @@ -625,7 +611,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = false; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + mutex_unlock(&mtk_crtc->hw_lock); } @@ -925,7 +914,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), mtk_ddp_comp_supported_rotations(comp), mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp)); + mtk_ddp_comp_get_num_formats(comp), i); if (ret) return ret; @@ -1068,6 +1057,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); mutex_init(&mtk_crtc->hw_lock); + spin_lock_init(&mtk_crtc->config_lock); #if IS_REACHABLE(CONFIG_MTK_CMDQ) i = priv->mbox_index++; @@ -1094,9 +1084,9 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL; } else { - ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, - &mtk_crtc->cmdq_handle, - PAGE_SIZE); + ret = cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); if (ret) { dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", drm_crtc_index(&mtk_crtc->base)); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 9d6d9fd8342e41..89b439dcf3a6af 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -56,8 +56,12 @@ #define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4) #define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8) +#define OVL_CON_CLRFMT_MAN BIT(23) #define OVL_CON_BYTE_SWAP BIT(24) -#define OVL_CON_MTX_YUV_TO_RGB (6 << 16) + +/* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */ +#define OVL_CON_RGB_SWAP BIT(25) + #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_ARGB8888 (2 << 12) #define OVL_CON_CLRFMT_RGBA8888 (3 << 12) @@ -65,6 +69,11 @@ #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) #define OVL_CON_CLRFMT_UYVY (4 << 12) #define OVL_CON_CLRFMT_YUYV (5 << 12) +#define OVL_CON_MTX_YUV_TO_RGB (6 << 16) +#define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN) +#define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP) +#define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP) #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ 0 : OVL_CON_CLRFMT_RGB) #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ @@ -377,7 +386,8 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx, DISP_REG_OVL_RDMA_CTRL(idx)); } -static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) +static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt, + unsigned int blend_mode) { /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" * is defined in mediatek HW data sheet. @@ -398,22 +408,30 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) case DRM_FORMAT_RGBA8888: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_RGBA1010102: - return OVL_CON_CLRFMT_RGBA8888; + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_RGBA8888 : + OVL_CON_CLRFMT_PRGBA8888; case DRM_FORMAT_BGRX8888: case DRM_FORMAT_BGRA8888: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_BGRA1010102: - return OVL_CON_CLRFMT_BGRA8888; + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_BGRA8888 : + OVL_CON_CLRFMT_PBGRA8888; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: - return OVL_CON_CLRFMT_ARGB8888; + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_ARGB8888 : + OVL_CON_CLRFMT_PARGB8888; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: - return OVL_CON_CLRFMT_ABGR8888; + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_ABGR8888 : + OVL_CON_CLRFMT_PABGR8888; case DRM_FORMAT_UYVY: return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB; case DRM_FORMAT_YUYV: @@ -434,6 +452,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, unsigned int fmt = pending->format; unsigned int offset = (pending->y << 16) | pending->x; unsigned int src_size = (pending->height << 16) | pending->width; + unsigned int blend_mode = state->base.pixel_blend_mode; unsigned int ignore_pixel_alpha = 0; unsigned int con; bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR; @@ -452,7 +471,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, return; } - con = ovl_fmt_convert(ovl, fmt); + con = ovl_fmt_convert(ovl, fmt, blend_mode); if (state->base.fb) { con |= OVL_CON_AEN; con |= state->base.alpha & OVL_CON_ALPHA; @@ -463,7 +482,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, * For RGB888 related formats, whether CONST_BLD is enabled or not won't * affect the result. Therefore we use !has_alpha as the condition. */ - if (state->base.fb && !state->base.fb->format->has_alpha) + if ((state->base.fb && !state->base.fb->format->has_alpha) || + blend_mode == DRM_MODE_BLEND_PIXEL_NONE) ignore_pixel_alpha = OVL_CONST_BLEND; if (pending->rotation & DRM_MODE_REFLECT_Y) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index 1a2a73757370bc..c6768210b08b87 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -17,7 +17,6 @@ #include #include -#include "mtk_crtc.h" #include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" #include "mtk_drm_drv.h" @@ -494,12 +493,12 @@ static int compare_of(struct device *dev, void *data) static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) { struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); - struct device_node *node, *parent; + struct device_node *parent; struct platform_device *comp_pdev; parent = dev->parent->parent->of_node->parent; - for_each_child_of_node(parent, node) { + for_each_child_of_node_scoped(parent, node) { const struct of_device_id *of_id; enum mtk_ovl_adaptor_comp_type type; int id; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 634bbba5d43f21..07243f3722604a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -341,14 +341,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev) dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); #endif - if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) { - ret = of_property_read_u32(dev->of_node, - "mediatek,rdma-fifo-size", - &priv->fifo_size); - if (ret) - return dev_err_probe(dev, ret, - "Failed to get rdma fifo size\n"); - } + ret = of_property_read_u32(dev->of_node, + "mediatek,rdma-fifo-size", + &priv->fifo_size); + if (ret && (ret != -EINVAL)) + return dev_err_probe(dev, ret, "Failed to get rdma fifo size\n"); /* Disable and clear pending interrupts */ writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 77b50c56c124ce..3e807195a0d03a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) * Configure the DMA segment size to make sure we get contiguous IOVA * when importing PRIME buffers. */ - ret = dma_set_max_seg_size(dma_dev, UINT_MAX); - if (ret) { - dev_err(dma_dev, "Failed to set DMA segment size\n"); - goto err_component_unbind; - } + dma_set_max_seg_size(dma_dev, UINT_MAX); ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index b6e3c011a12d88..eeec641cab60db 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -88,12 +88,15 @@ #define DSI_HSA_WC 0x50 #define DSI_HBP_WC 0x54 #define DSI_HFP_WC 0x58 +#define HFP_HS_VB_PS_WC GENMASK(30, 16) +#define HFP_HS_EN BIT(31) #define DSI_CMDQ_SIZE 0x60 #define CMDQ_SIZE 0x3f #define CMDQ_SIZE_SEL BIT(15) #define DSI_HSTX_CKL_WC 0x64 +#define HSTX_CKL_WC GENMASK(15, 2) #define DSI_RX_DATA0 0x74 #define DSI_RX_DATA1 0x78 @@ -187,6 +190,7 @@ struct mtk_dsi_driver_data { bool has_shadow_ctl; bool has_size_ctl; bool cmdq_long_packet_ctl; + bool support_per_frame_lp; }; struct mtk_dsi { @@ -426,7 +430,75 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact) writel(ps_val, dsi->regs + DSI_PSCTRL); } -static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) +static void mtk_dsi_config_vdo_timing_per_frame_lp(struct mtk_dsi *dsi) +{ + u32 horizontal_sync_active_byte; + u32 horizontal_backporch_byte; + u32 horizontal_frontporch_byte; + u32 hfp_byte_adjust, v_active_adjust; + u32 cklp_wc_min_adjust, cklp_wc_max_adjust; + u32 dsi_tmp_buf_bpp; + unsigned int da_hs_trail; + unsigned int ps_wc, hs_vb_ps_wc; + u32 v_active_roundup, hstx_cklp_wc; + u32 hstx_cklp_wc_max, hstx_cklp_wc_min; + struct videomode *vm = &dsi->vm; + + if (dsi->format == MIPI_DSI_FMT_RGB565) + dsi_tmp_buf_bpp = 2; + else + dsi_tmp_buf_bpp = 3; + + da_hs_trail = dsi->phy_timing.da_hs_trail; + ps_wc = vm->hactive * dsi_tmp_buf_bpp; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { + horizontal_sync_active_byte = + vm->hsync_len * dsi_tmp_buf_bpp - 10; + horizontal_backporch_byte = + vm->hback_porch * dsi_tmp_buf_bpp - 10; + hfp_byte_adjust = 12; + v_active_adjust = 32 + horizontal_sync_active_byte; + cklp_wc_min_adjust = 12 + 2 + 4 + horizontal_sync_active_byte; + cklp_wc_max_adjust = 20 + 6 + 4 + horizontal_sync_active_byte; + } else { + horizontal_sync_active_byte = vm->hsync_len * dsi_tmp_buf_bpp - 4; + horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * + dsi_tmp_buf_bpp - 10; + cklp_wc_min_adjust = 4; + cklp_wc_max_adjust = 12 + 4 + 4; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + hfp_byte_adjust = 18; + v_active_adjust = 28; + } else { + hfp_byte_adjust = 12; + v_active_adjust = 22; + } + } + horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp - hfp_byte_adjust; + v_active_roundup = (v_active_adjust + horizontal_backporch_byte + ps_wc + + horizontal_frontporch_byte) % dsi->lanes; + if (v_active_roundup) + horizontal_backporch_byte += dsi->lanes - v_active_roundup; + hstx_cklp_wc_min = (DIV_ROUND_UP(cklp_wc_min_adjust, dsi->lanes) + da_hs_trail + 1) + * dsi->lanes / 6 - 1; + hstx_cklp_wc_max = (DIV_ROUND_UP((cklp_wc_max_adjust + horizontal_backporch_byte + + ps_wc), dsi->lanes) + da_hs_trail + 1) * dsi->lanes / 6 - 1; + + hstx_cklp_wc = FIELD_PREP(HSTX_CKL_WC, (hstx_cklp_wc_min + hstx_cklp_wc_max) / 2); + writel(hstx_cklp_wc, dsi->regs + DSI_HSTX_CKL_WC); + + hs_vb_ps_wc = ps_wc - (dsi->phy_timing.lpx + dsi->phy_timing.da_hs_exit + + dsi->phy_timing.da_hs_prepare + dsi->phy_timing.da_hs_zero + 2) * dsi->lanes; + horizontal_frontporch_byte |= FIELD_PREP(HFP_HS_EN, 1) | + FIELD_PREP(HFP_HS_VB_PS_WC, hs_vb_ps_wc); + + writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); + writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); + writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); +} + +static void mtk_dsi_config_vdo_timing_per_line_lp(struct mtk_dsi *dsi) { u32 horizontal_sync_active_byte; u32 horizontal_backporch_byte; @@ -436,7 +508,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) u32 dsi_tmp_buf_bpp, data_phy_cycles; u32 delta; struct mtk_phy_timing *timing = &dsi->phy_timing; - struct videomode *vm = &dsi->vm; if (dsi->format == MIPI_DSI_FMT_RGB565) @@ -444,16 +515,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) else dsi_tmp_buf_bpp = 3; - writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); - writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); - writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); - writel(vm->vactive, dsi->regs + DSI_VACT_NL); - - if (dsi->driver_data->has_size_ctl) - writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) | - FIELD_PREP(DSI_WIDTH, vm->hactive), - dsi->regs + DSI_SIZE_CON); - horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) @@ -499,6 +560,26 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); +} + +static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) +{ + struct videomode *vm = &dsi->vm; + + writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); + writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); + writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); + writel(vm->vactive, dsi->regs + DSI_VACT_NL); + + if (dsi->driver_data->has_size_ctl) + writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) | + FIELD_PREP(DSI_WIDTH, vm->hactive), + dsi->regs + DSI_SIZE_CON); + + if (dsi->driver_data->support_per_frame_lp) + mtk_dsi_config_vdo_timing_per_frame_lp(dsi); + else + mtk_dsi_config_vdo_timing_per_line_lp(dsi); mtk_dsi_ps_control(dsi, false); } @@ -1197,6 +1278,7 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = { .has_shadow_ctl = true, .has_size_ctl = true, .cmdq_long_packet_ctl = true, + .support_per_frame_lp = true, }; static const struct of_device_id mtk_dsi_of_match[] = { diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index 9dfd13d32dfaae..d1d9cf8b10e16d 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -3,6 +3,7 @@ * Copyright (c) 2021 MediaTek Inc. */ +#include #include #include #include @@ -35,6 +36,7 @@ #define MIX_SRC_L0_EN BIT(0) #define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n)) #define NON_PREMULTI_SOURCE (2 << 12) +#define PREMULTI_SOURCE (3 << 12) #define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n)) #define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n)) #define MIX_FUNC_DCM0 0x120 @@ -175,7 +177,13 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, alpha_con |= state->base.alpha & MIXER_ALPHA; } - if (state->base.fb && !state->base.fb->format->has_alpha) { + if (state->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) + alpha_con |= PREMULTI_SOURCE; + else + alpha_con |= NON_PREMULTI_SOURCE; + + if ((state->base.fb && !state->base.fb->format->has_alpha) || + state->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) { /* * Mixer doesn't support CONST_BLD mode, * use a trick to make the output equivalent @@ -191,8 +199,7 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx)); mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx)); - mtk_ddp_write_mask(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx), - 0x1ff); + mtk_ddp_write(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx)); mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON, BIT(idx)); } diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 1723d4333f3718..7d2cb4e0fafad1 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -321,7 +321,7 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 *formats, - size_t num_formats) + size_t num_formats, unsigned int plane_idx) { int err; @@ -338,6 +338,22 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, return err; } + /* + * The hardware does not support repositioning planes by muxing: their + * Z-position is infact fixed and the only way to change the actual + * order is to swap the contents of the entire register set of one + * overlay with another, which may be more expensive than desired. + * + * With no repositioning, the caller of this function guarantees that + * the plane_idx is correct. This means that, for example, the PRIMARY + * plane fed to this function will always have plane_idx zero. + */ + err = drm_plane_create_zpos_immutable_property(plane, plane_idx); + if (err) { + DRM_ERROR("Failed to create zpos property for plane %u\n", plane_idx); + return err; + } + if (supported_rotations) { err = drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, @@ -346,6 +362,17 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, DRM_INFO("Create rotation property failed\n"); } + err = drm_plane_create_alpha_property(plane); + if (err) + DRM_ERROR("failed to create property: alpha\n"); + + err = drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE)); + if (err) + DRM_ERROR("failed to create property: blend_mode\n"); + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 231bb7aac94736..5b177eac67b7aa 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -49,6 +49,5 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 *formats, - size_t num_formats); - + size_t num_formats, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile index d1b25f9f6586ed..5a02203fad1291 100644 --- a/drivers/gpu/drm/mgag200/Makefile +++ b/drivers/gpu/drm/mgag200/Makefile @@ -12,6 +12,7 @@ mgag200-y := \ mgag200_g200se.o \ mgag200_g200wb.o \ mgag200_mode.o \ + mgag200_vga_bmc.o \ mgag200_vga.o obj-$(CONFIG_DRM_MGAG200) += mgag200.o diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c index 23ef85aa7e3786..a689c71ff1653d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_bmc.c +++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c @@ -9,12 +9,7 @@ #include "mgag200_drv.h" -static struct mgag200_bmc_connector *to_mgag200_bmc_connector(struct drm_connector *connector) -{ - return container_of(connector, struct mgag200_bmc_connector, base); -} - -void mgag200_bmc_disable_vidrst(struct mga_device *mdev) +void mgag200_bmc_stop_scanout(struct mga_device *mdev) { u8 tmp; int iter_max; @@ -73,15 +68,10 @@ void mgag200_bmc_disable_vidrst(struct mga_device *mdev) } } -void mgag200_bmc_enable_vidrst(struct mga_device *mdev) +void mgag200_bmc_start_scanout(struct mga_device *mdev) { u8 tmp; - /* Ensure that the vrsten and hrsten are set */ - WREG8(MGAREG_CRTCEXT_INDEX, 1); - tmp = RREG8(MGAREG_CRTCEXT_DATA); - WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88); - /* Assert rstlvl2 */ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2); tmp = RREG8(DAC_DATA); @@ -107,100 +97,3 @@ void mgag200_bmc_enable_vidrst(struct mga_device *mdev) tmp &= ~0x10; WREG_DAC(MGA1064_GEN_IO_DATA, tmp); } - -static const struct drm_encoder_funcs mgag200_bmc_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static int mgag200_bmc_connector_helper_detect_ctx(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force) -{ - struct mgag200_bmc_connector *bmc_connector = to_mgag200_bmc_connector(connector); - struct drm_connector *physical_connector = bmc_connector->physical_connector; - - /* - * Most user-space compositors cannot handle more than one connected - * connector per CRTC. Hence, we only mark the BMC as connected if the - * physical connector is disconnected. If the physical connector's status - * is connected or unknown, the BMC remains disconnected. This has no - * effect on the output of the BMC. - * - * FIXME: Remove this logic once user-space compositors can handle more - * than one connector per CRTC. The BMC should always be connected. - */ - - if (physical_connector && physical_connector->status == connector_status_disconnected) - return connector_status_connected; - - return connector_status_disconnected; -} - -static int mgag200_bmc_connector_helper_get_modes(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct mga_device *mdev = to_mga_device(dev); - const struct mgag200_device_info *minfo = mdev->info; - - return drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay); -} - -static const struct drm_connector_helper_funcs mgag200_bmc_connector_helper_funcs = { - .get_modes = mgag200_bmc_connector_helper_get_modes, - .detect_ctx = mgag200_bmc_connector_helper_detect_ctx, -}; - -static const struct drm_connector_funcs mgag200_bmc_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int mgag200_bmc_connector_init(struct drm_device *dev, - struct mgag200_bmc_connector *bmc_connector, - struct drm_connector *physical_connector) -{ - struct drm_connector *connector = &bmc_connector->base; - int ret; - - ret = drm_connector_init(dev, connector, &mgag200_bmc_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - if (ret) - return ret; - drm_connector_helper_add(connector, &mgag200_bmc_connector_helper_funcs); - - bmc_connector->physical_connector = physical_connector; - - return 0; -} - -int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector) -{ - struct drm_device *dev = &mdev->base; - struct drm_crtc *crtc = &mdev->crtc; - struct drm_encoder *encoder; - struct mgag200_bmc_connector *bmc_connector; - struct drm_connector *connector; - int ret; - - encoder = &mdev->output.bmc.encoder; - ret = drm_encoder_init(dev, encoder, &mgag200_bmc_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - bmc_connector = &mdev->output.bmc.bmc_connector; - ret = mgag200_bmc_connector_init(dev, bmc_connector, physical_connector); - if (ret) - return ret; - connector = &bmc_connector->base; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - return ret; - - return 0; -} diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 62080cf0f2da49..6623ee4e3277df 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -84,6 +85,34 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) return offset - 65536; } +static irqreturn_t mgag200_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct mga_device *mdev = to_mga_device(dev); + struct drm_crtc *crtc; + u32 status, ien; + + status = RREG32(MGAREG_STATUS); + + if (status & MGAREG_STATUS_VLINEPEN) { + ien = RREG32(MGAREG_IEN); + if (!(ien & MGAREG_IEN_VLINEIEN)) + goto out; + + crtc = drm_crtc_from_index(dev, 0); + if (WARN_ON_ONCE(!crtc)) + goto out; + drm_crtc_handle_vblank(crtc); + + WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); + + return IRQ_HANDLED; + } + +out: + return IRQ_NONE; +} + /* * DRM driver */ @@ -167,6 +196,7 @@ int mgag200_device_init(struct mga_device *mdev, const struct mgag200_device_funcs *funcs) { struct drm_device *dev = &mdev->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); u8 crtcext3, misc; int ret; @@ -192,6 +222,16 @@ int mgag200_device_init(struct mga_device *mdev, mutex_unlock(&mdev->rmmio_lock); + WREG32(MGAREG_IEN, 0); + WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); + + ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED, + dev->driver->name, dev); + if (ret) { + drm_err(dev, "Failed to acquire interrupt, error %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 7f7dfbd0f0134d..4760ba92871bb1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -179,6 +179,8 @@ struct mgag200_crtc_state { const struct drm_format_info *format; struct mgag200_pll_values pixpllc; + + bool set_vidrst; }; static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_state *base) @@ -186,11 +188,6 @@ static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_s return container_of(base, struct mgag200_crtc_state, base); } -struct mgag200_bmc_connector { - struct drm_connector base; - struct drm_connector *physical_connector; -}; - enum mga_type { G200_PCI, G200_AGP, @@ -214,8 +211,8 @@ struct mgag200_device_info { */ unsigned long max_mem_bandwidth; - /* HW has external source (e.g., BMC) to synchronize with */ - bool has_vidrst:1; + /* Synchronize scanout with BMC */ + bool sync_bmc:1; struct { unsigned data_bit:3; @@ -230,13 +227,13 @@ struct mgag200_device_info { }; #define MGAG200_DEVICE_INFO_INIT(_max_hdisplay, _max_vdisplay, _max_mem_bandwidth, \ - _has_vidrst, _i2c_data_bit, _i2c_clock_bit, \ + _sync_bmc, _i2c_data_bit, _i2c_clock_bit, \ _bug_no_startadd) \ { \ .max_hdisplay = (_max_hdisplay), \ .max_vdisplay = (_max_vdisplay), \ .max_mem_bandwidth = (_max_mem_bandwidth), \ - .has_vidrst = (_has_vidrst), \ + .sync_bmc = (_sync_bmc), \ .i2c = { \ .data_bit = (_i2c_data_bit), \ .clock_bit = (_i2c_clock_bit), \ @@ -245,18 +242,6 @@ struct mgag200_device_info { } struct mgag200_device_funcs { - /* - * Disables an external reset source (i.e., BMC) before programming - * a new display mode. - */ - void (*disable_vidrst)(struct mga_device *mdev); - - /* - * Enables an external reset source (i.e., BMC) after programming - * a new display mode. - */ - void (*enable_vidrst)(struct mga_device *mdev); - /* * Validate that the given state can be programmed into PIXPLLC. On * success, the calculated parameters should be stored in the CRTC's @@ -293,10 +278,6 @@ struct mga_device { struct drm_encoder encoder; struct drm_connector connector; } vga; - struct { - struct drm_encoder encoder; - struct mgag200_bmc_connector bmc_connector; - } bmc; } output; }; @@ -410,17 +391,24 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); +bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); #define MGAG200_CRTC_HELPER_FUNCS \ .mode_valid = mgag200_crtc_helper_mode_valid, \ .atomic_check = mgag200_crtc_helper_atomic_check, \ .atomic_flush = mgag200_crtc_helper_atomic_flush, \ .atomic_enable = mgag200_crtc_helper_atomic_enable, \ - .atomic_disable = mgag200_crtc_helper_atomic_disable + .atomic_disable = mgag200_crtc_helper_atomic_disable, \ + .get_scanout_position = mgag200_crtc_helper_get_scanout_position void mgag200_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc); void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state); +int mgag200_crtc_enable_vblank(struct drm_crtc *crtc); +void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); #define MGAG200_CRTC_FUNCS \ .reset = mgag200_crtc_reset, \ @@ -428,20 +416,26 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st .set_config = drm_atomic_helper_set_config, \ .page_flip = drm_atomic_helper_page_flip, \ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \ - .atomic_destroy_state = mgag200_crtc_atomic_destroy_state + .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \ + .enable_vblank = mgag200_crtc_enable_vblank, \ + .disable_vblank = mgag200_crtc_disable_vblank, \ + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp -void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode); +void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode, + bool set_vidrst); void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format); void mgag200_enable_display(struct mga_device *mdev); void mgag200_init_registers(struct mga_device *mdev); int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available); +/* mgag200_vga_bmc.c */ +int mgag200_vga_bmc_output_init(struct mga_device *mdev); + /* mgag200_vga.c */ int mgag200_vga_output_init(struct mga_device *mdev); - /* mgag200_bmc.c */ -void mgag200_bmc_disable_vidrst(struct mga_device *mdev); -void mgag200_bmc_enable_vidrst(struct mga_device *mdev); -int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector); +/* mgag200_bmc.c */ +void mgag200_bmc_stop_scanout(struct mga_device *mdev); +void mgag200_bmc_start_scanout(struct mga_device *mdev); #endif /* __MGAG200_DRV_H__ */ diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c index f874e294984099..77ce8d36cef054 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -403,5 +404,9 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c index 52bf49ead5c506..09ced65c1d2fbb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -214,11 +215,7 @@ static int mgag200_g200eh_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -279,5 +276,9 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c index e7f89b2a59fd05..5daa469137bd30 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -118,11 +119,7 @@ static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -184,5 +181,9 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index 4e8a1756138d7f..09cfffafe1306c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -191,11 +192,8 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; - if (funcs->disable_vidrst) - funcs->disable_vidrst(mdev); - mgag200_set_format_regs(mdev, format); - mgag200_set_mode_regs(mdev, adjusted_mode); + mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); @@ -209,8 +207,7 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_enable_display(mdev); - if (funcs->enable_vidrst) - funcs->enable_vidrst(mdev); + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { @@ -218,7 +215,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable + .atomic_disable = mgag200_crtc_helper_atomic_disable, + .get_scanout_position = mgag200_crtc_helper_get_scanout_position, }; static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = { @@ -257,11 +255,7 @@ static int mgag200_g200er_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -318,5 +312,9 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index d884f3cb0ec79c..3d48baa91d8b73 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -192,11 +193,8 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; - if (funcs->disable_vidrst) - funcs->disable_vidrst(mdev); - mgag200_set_format_regs(mdev, format); - mgag200_set_mode_regs(mdev, adjusted_mode); + mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); @@ -210,8 +208,7 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_enable_display(mdev); - if (funcs->enable_vidrst) - funcs->enable_vidrst(mdev); + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { @@ -219,7 +216,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable + .atomic_disable = mgag200_crtc_helper_atomic_disable, + .get_scanout_position = mgag200_crtc_helper_get_scanout_position, }; static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = { @@ -258,11 +256,7 @@ static int mgag200_g200ev_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -323,5 +317,9 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c index 839401e8b46545..dabc778e64e8e5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -127,11 +128,7 @@ static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -146,8 +143,6 @@ static const struct mgag200_device_info mgag200_g200ew3_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false); static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = { - .disable_vidrst = mgag200_bmc_disable_vidrst, - .enable_vidrst = mgag200_bmc_enable_vidrst, .pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB }; @@ -204,5 +199,9 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index a824bb8ad57913..9dcbe830427180 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -323,11 +324,8 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; - if (funcs->disable_vidrst) - funcs->disable_vidrst(mdev); - mgag200_set_format_regs(mdev, format); - mgag200_set_mode_regs(mdev, adjusted_mode); + mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); @@ -341,8 +339,7 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_enable_display(mdev); - if (funcs->enable_vidrst) - funcs->enable_vidrst(mdev); + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { @@ -350,7 +347,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable + .atomic_disable = mgag200_crtc_helper_atomic_disable, + .get_scanout_position = mgag200_crtc_helper_get_scanout_position, }; static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = { @@ -389,11 +387,7 @@ static int mgag200_g200se_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -523,5 +517,9 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c index 835df0f4fc13d5..83a24aedbf2f3b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "mgag200_drv.h" @@ -261,11 +262,7 @@ static int mgag200_g200wb_pipeline_init(struct mga_device *mdev) drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); - ret = mgag200_vga_output_init(mdev); - if (ret) - return ret; - - ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector); + ret = mgag200_vga_bmc_output_init(mdev); if (ret) return ret; @@ -280,8 +277,6 @@ static const struct mgag200_device_info mgag200_g200wb_device_info = MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false); static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = { - .disable_vidrst = mgag200_bmc_disable_vidrst, - .enable_vidrst = mgag200_bmc_enable_vidrst, .pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, }; @@ -328,5 +323,9 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); + ret = drm_vblank_init(dev, 1); + if (ret) + return ERR_PTR(ret); + return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d4550e4b3b01eb..7159909aca1eb6 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "mgag200_ddc.h" #include "mgag200_drv.h" @@ -201,26 +202,39 @@ void mgag200_init_registers(struct mga_device *mdev) WREG8(MGA_MISC_OUT, misc); } -void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode) +void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode, + bool set_vidrst) { - const struct mgag200_device_info *info = mdev->info; - unsigned int hdisplay, hsyncstart, hsyncend, htotal; - unsigned int vdisplay, vsyncstart, vsyncend, vtotal; + unsigned int hdispend, hsyncstr, hsyncend, htotal, hblkstr, hblkend; + unsigned int vdispend, vsyncstr, vsyncend, vtotal, vblkstr, vblkend; + unsigned int linecomp; u8 misc, crtcext1, crtcext2, crtcext5; - hdisplay = mode->hdisplay / 8 - 1; - hsyncstart = mode->hsync_start / 8 - 1; - hsyncend = mode->hsync_end / 8 - 1; - htotal = mode->htotal / 8 - 1; - + hdispend = mode->crtc_hdisplay / 8 - 1; + hsyncstr = mode->crtc_hsync_start / 8 - 1; + hsyncend = mode->crtc_hsync_end / 8 - 1; + htotal = mode->crtc_htotal / 8 - 1; /* Work around hardware quirk */ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04) htotal++; + hblkstr = mode->crtc_hblank_start / 8 - 1; + hblkend = htotal; + + vdispend = mode->crtc_vdisplay - 1; + vsyncstr = mode->crtc_vsync_start - 1; + vsyncend = mode->crtc_vsync_end - 1; + vtotal = mode->crtc_vtotal - 2; + vblkstr = mode->crtc_vblank_start; + vblkend = vtotal + 1; - vdisplay = mode->vdisplay - 1; - vsyncstart = mode->vsync_start - 1; - vsyncend = mode->vsync_end - 1; - vtotal = mode->vtotal - 2; + /* + * There's no VBLANK interrupt on Matrox chipsets, so we use + * the VLINE interrupt instead. It triggers when the current + * has been reached. For VBLANK, this is the first + * non-visible line at the bottom of the screen. Therefore, + * keep in sync with . + */ + linecomp = vblkstr; misc = RREG8(MGA_MISC_IN); @@ -235,45 +249,45 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod misc &= ~MGAREG_MISC_VSYNCPOL; crtcext1 = (((htotal - 4) & 0x100) >> 8) | - ((hdisplay & 0x100) >> 7) | - ((hsyncstart & 0x100) >> 6) | - (htotal & 0x40); - if (info->has_vidrst) + ((hblkstr & 0x100) >> 7) | + ((hsyncstr & 0x100) >> 6) | + (hblkend & 0x40); + if (set_vidrst) crtcext1 |= MGAREG_CRTCEXT1_VRSTEN | MGAREG_CRTCEXT1_HRSTEN; crtcext2 = ((vtotal & 0xc00) >> 10) | - ((vdisplay & 0x400) >> 8) | - ((vdisplay & 0xc00) >> 7) | - ((vsyncstart & 0xc00) >> 5) | - ((vdisplay & 0x400) >> 3); + ((vdispend & 0x400) >> 8) | + ((vblkstr & 0xc00) >> 7) | + ((vsyncstr & 0xc00) >> 5) | + ((linecomp & 0x400) >> 3); crtcext5 = 0x00; - WREG_CRT(0, htotal - 4); - WREG_CRT(1, hdisplay); - WREG_CRT(2, hdisplay); - WREG_CRT(3, (htotal & 0x1F) | 0x80); - WREG_CRT(4, hsyncstart); - WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F)); - WREG_CRT(6, vtotal & 0xFF); - WREG_CRT(7, ((vtotal & 0x100) >> 8) | - ((vdisplay & 0x100) >> 7) | - ((vsyncstart & 0x100) >> 6) | - ((vdisplay & 0x100) >> 5) | - ((vdisplay & 0x100) >> 4) | /* linecomp */ - ((vtotal & 0x200) >> 4) | - ((vdisplay & 0x200) >> 3) | - ((vsyncstart & 0x200) >> 2)); - WREG_CRT(9, ((vdisplay & 0x200) >> 4) | - ((vdisplay & 0x200) >> 3)); - WREG_CRT(16, vsyncstart & 0xFF); - WREG_CRT(17, (vsyncend & 0x0F) | 0x20); - WREG_CRT(18, vdisplay & 0xFF); - WREG_CRT(20, 0); - WREG_CRT(21, vdisplay & 0xFF); - WREG_CRT(22, (vtotal + 1) & 0xFF); - WREG_CRT(23, 0xc3); - WREG_CRT(24, vdisplay & 0xFF); + WREG_CRT(0x00, htotal - 4); + WREG_CRT(0x01, hdispend); + WREG_CRT(0x02, hblkstr); + WREG_CRT(0x03, (hblkend & 0x1f) | 0x80); + WREG_CRT(0x04, hsyncstr); + WREG_CRT(0x05, ((hblkend & 0x20) << 2) | (hsyncend & 0x1f)); + WREG_CRT(0x06, vtotal & 0xff); + WREG_CRT(0x07, ((vtotal & 0x100) >> 8) | + ((vdispend & 0x100) >> 7) | + ((vsyncstr & 0x100) >> 6) | + ((vblkstr & 0x100) >> 5) | + ((linecomp & 0x100) >> 4) | + ((vtotal & 0x200) >> 4) | + ((vdispend & 0x200) >> 3) | + ((vsyncstr & 0x200) >> 2)); + WREG_CRT(0x09, ((vblkstr & 0x200) >> 4) | + ((linecomp & 0x200) >> 3)); + WREG_CRT(0x10, vsyncstr & 0xff); + WREG_CRT(0x11, (vsyncend & 0x0f) | 0x20); + WREG_CRT(0x12, vdispend & 0xff); + WREG_CRT(0x14, 0); + WREG_CRT(0x15, vblkstr & 0xff); + WREG_CRT(0x16, vblkend & 0xff); + WREG_CRT(0x17, 0xc3); + WREG_CRT(0x18, linecomp & 0xff); WREG_ECRT(0x01, crtcext1); WREG_ECRT(0x02, crtcext2); @@ -631,6 +645,8 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); + struct drm_pending_vblank_event *event; + unsigned long flags; if (crtc_state->enable && crtc_state->color_mgmt_changed) { const struct drm_format_info *format = mgag200_crtc_state->format; @@ -640,6 +656,18 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s else mgag200_crtc_set_gamma_linear(mdev, format); } + + event = crtc->state->event; + if (event) { + crtc->state->event = NULL; + + spin_lock_irqsave(&dev->event_lock, flags); + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, event); + else + drm_crtc_arm_vblank_event(crtc, event); + spin_unlock_irqrestore(&dev->event_lock, flags); + } } void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) @@ -652,11 +680,8 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; - if (funcs->disable_vidrst) - funcs->disable_vidrst(mdev); - mgag200_set_format_regs(mdev, format); - mgag200_set_mode_regs(mdev, adjusted_mode); + mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); @@ -668,22 +693,41 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ mgag200_enable_display(mdev); - if (funcs->enable_vidrst) - funcs->enable_vidrst(mdev); + drm_crtc_vblank_on(crtc); } void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct mga_device *mdev = to_mga_device(crtc->dev); - const struct mgag200_device_funcs *funcs = mdev->funcs; - if (funcs->disable_vidrst) - funcs->disable_vidrst(mdev); + drm_crtc_vblank_off(crtc); mgag200_disable_display(mdev); +} + +bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + struct mga_device *mdev = to_mga_device(crtc->dev); + u32 vcount; - if (funcs->enable_vidrst) - funcs->enable_vidrst(mdev); + if (stime) + *stime = ktime_get(); + + if (vpos) { + vcount = RREG32(MGAREG_VCOUNT); + *vpos = vcount & GENMASK(11, 0); + } + + if (hpos) + *hpos = mode->htotal >> 1; // near middle of scanline on average + + if (etime) + *etime = ktime_get(); + + return true; } void mgag200_crtc_reset(struct drm_crtc *crtc) @@ -717,6 +761,7 @@ struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc new_mgag200_crtc_state->format = mgag200_crtc_state->format; memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc, sizeof(new_mgag200_crtc_state->pixpllc)); + new_mgag200_crtc_state->set_vidrst = mgag200_crtc_state->set_vidrst; return &new_mgag200_crtc_state->base; } @@ -729,6 +774,30 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st kfree(mgag200_crtc_state); } +int mgag200_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct mga_device *mdev = to_mga_device(crtc->dev); + u32 ien; + + WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); + + ien = RREG32(MGAREG_IEN); + ien |= MGAREG_IEN_VLINEIEN; + WREG32(MGAREG_IEN, ien); + + return 0; +} + +void mgag200_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct mga_device *mdev = to_mga_device(crtc->dev); + u32 ien; + + ien = RREG32(MGAREG_IEN); + ien &= ~(MGAREG_IEN_VLINEIEN); + WREG32(MGAREG_IEN, ien); +} + /* * Mode config */ diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h index aa73463674e4bd..d4fef8f25871f2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_reg.h +++ b/drivers/gpu/drm/mgag200/mgag200_reg.h @@ -102,10 +102,17 @@ #define MGAREG_EXEC 0x0100 #define MGAREG_FIFOSTATUS 0x1e10 + #define MGAREG_STATUS 0x1e14 +#define MGAREG_STATUS_VLINEPEN BIT(5) + #define MGAREG_CACHEFLUSH 0x1fff + #define MGAREG_ICLEAR 0x1e18 +#define MGAREG_ICLEAR_VLINEICLR BIT(5) + #define MGAREG_IEN 0x1e1c +#define MGAREG_IEN_VLINEIEN BIT(5) #define MGAREG_VCOUNT 0x1e20 diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c new file mode 100644 index 00000000000000..a5a3ac108bd5bc --- /dev/null +++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include "mgag200_ddc.h" +#include "mgag200_drv.h" + +static void mgag200_vga_bmc_encoder_atomic_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct mga_device *mdev = to_mga_device(encoder->dev); + + if (mdev->info->sync_bmc) + mgag200_bmc_stop_scanout(mdev); +} + +static void mgag200_vga_bmc_encoder_atomic_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct mga_device *mdev = to_mga_device(encoder->dev); + + if (mdev->info->sync_bmc) + mgag200_bmc_start_scanout(mdev); +} + +static int mgag200_vga_bmc_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *new_crtc_state, + struct drm_connector_state *new_connector_state) +{ + struct mga_device *mdev = to_mga_device(encoder->dev); + struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); + + new_mgag200_crtc_state->set_vidrst = mdev->info->sync_bmc; + + return 0; +} + +static const struct drm_encoder_helper_funcs mgag200_dac_encoder_helper_funcs = { + .atomic_disable = mgag200_vga_bmc_encoder_atomic_disable, + .atomic_enable = mgag200_vga_bmc_encoder_atomic_enable, + .atomic_check = mgag200_vga_bmc_encoder_atomic_check, +}; + +static const struct drm_encoder_funcs mgag200_dac_encoder_funcs = { + .destroy = drm_encoder_cleanup +}; + +static int mgag200_vga_bmc_connector_helper_get_modes(struct drm_connector *connector) +{ + struct mga_device *mdev = to_mga_device(connector->dev); + const struct mgag200_device_info *minfo = mdev->info; + int count; + + count = drm_connector_helper_get_modes(connector); + + if (!count) { + /* + * There's no EDID data without a connected monitor. Set BMC- + * compatible modes in this case. The XGA default resolution + * should work well for all BMCs. + */ + count = drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay); + if (count) + drm_set_preferred_mode(connector, 1024, 768); + } + + return count; +} + +/* + * There's no monitor connected if the DDC did not return an EDID. Still + * return 'connected' as there's always a BMC. Incrementing the connector's + * epoch counter triggers an update of the related properties. + */ +static int mgag200_vga_bmc_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + enum drm_connector_status old_status, status; + + if (connector->edid_blob_ptr) + old_status = connector_status_connected; + else + old_status = connector_status_disconnected; + + status = drm_connector_helper_detect_from_ddc(connector, ctx, force); + + if (status != old_status) + ++connector->epoch_counter; + return connector_status_connected; +} + +static const struct drm_connector_helper_funcs mgag200_vga_connector_helper_funcs = { + .get_modes = mgag200_vga_bmc_connector_helper_get_modes, + .detect_ctx = mgag200_vga_bmc_connector_helper_detect_ctx, +}; + +static const struct drm_connector_funcs mgag200_vga_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state +}; + +int mgag200_vga_bmc_output_init(struct mga_device *mdev) +{ + struct drm_device *dev = &mdev->base; + struct drm_crtc *crtc = &mdev->crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct i2c_adapter *ddc; + int ret; + + encoder = &mdev->output.vga.encoder; + ret = drm_encoder_init(dev, encoder, &mgag200_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + drm_err(dev, "drm_encoder_init() failed: %d\n", ret); + return ret; + } + drm_encoder_helper_add(encoder, &mgag200_dac_encoder_helper_funcs); + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ddc = mgag200_ddc_create(mdev); + if (IS_ERR(ddc)) { + ret = PTR_ERR(ddc); + drm_err(dev, "failed to add DDC bus: %d\n", ret); + return ret; + } + + connector = &mdev->output.vga.connector; + ret = drm_connector_init_with_ddc(dev, connector, + &mgag200_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, ddc); + if (ret) { + drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); + return ret; + } + drm_connector_helper_add(connector, &mgag200_vga_connector_helper_funcs); + + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) { + drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index f5e2838c6a7650..13110fcc46a8d2 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -37,6 +37,7 @@ msm-display-$(CONFIG_DRM_MSM_HDMI) += \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8996.o \ + hdmi/hdmi_phy_8998.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ hdmi/hdmi_pll_8960.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c index 0de8465b6cf021..2eb6c3e9374858 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c @@ -41,6 +41,17 @@ static const struct adreno_info a3xx_gpus[] = { .gmem = SZ_128K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a3xx_gpu_init, + }, { + .chip_ids = ADRENO_CHIP_IDS(0x03000620), + .family = ADRENO_3XX, + .revn = 308, + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, + .gmem = SZ_128K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a3xx_gpu_init, }, { .chip_ids = ADRENO_CHIP_IDS( 0x03020000, diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 5273dc8498381c..b46ff49f47cf07 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -145,6 +145,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a); + } else if (adreno_is_a306a(adreno_gpu)) { + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010); } else if (adreno_is_a320(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); @@ -237,7 +241,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); /* Enable Clock gating: */ - if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu)) + if (adreno_is_a305b(adreno_gpu) || + adreno_is_a306(adreno_gpu) || + adreno_is_a306a(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); else if (adreno_is_a320(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); @@ -334,8 +340,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ - if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) || - adreno_is_a320(adreno_gpu)) { + if (adreno_is_a305(adreno_gpu) || + adreno_is_a306(adreno_gpu) || + adreno_is_a306a(adreno_gpu) || + adreno_is_a320(adreno_gpu)) { gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index c0b5373e90d713..e09044930547fe 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; struct drm_gem_object *obj; uint32_t *ptr, dwords; @@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } } + a5xx_gpu->last_seqno[ring->id] = submit->seqno; a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu); @@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - /* Enable local preemption for finegrain preemption */ + /* + * Disable local preemption by default because it requires + * user-space to be aware of it and provide additional handling + * to restore rendering state or do various flushes on switch. + */ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); - OUT_RING(ring, 0x1); + OUT_RING(ring, 0x0); /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); OUT_RING(ring, submit->seqno); + a5xx_gpu->last_seqno[ring->id] = submit->seqno; /* * Execute a CACHE_FLUSH_TS event. This will ensure that the @@ -1793,5 +1801,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) else adreno_gpu->ubwc_config.highest_bank_bit = 14; + /* a5xx only supports UBWC 1.0, these are not configurable */ + adreno_gpu->ubwc_config.macrotile_mode = 0; + adreno_gpu->ubwc_config.ubwc_swizzle = 0x7; + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index c7187bcc5e9082..9c0d701fe4b85b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -34,8 +34,10 @@ struct a5xx_gpu { struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS]; atomic_t preempt_state; + spinlock_t preempt_start_lock; struct timer_list preempt_timer; struct drm_gem_object *shadow_bo; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index f58dd564d122ba..0469fea5501083 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); unsigned long flags; int i; @@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) spin_lock_irqsave(&ring->preempt_lock, flags); empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a5xx_gpu->cur_ring) + empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i]; spin_unlock_irqrestore(&ring->preempt_lock, flags); if (!empty) @@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) if (gpu->nr_rings == 1) return; + /* + * Serialize preemption start to ensure that we always make + * decision on latest state. Otherwise we can get stuck in + * lower priority or empty ring. + */ + spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags); + /* * Try to start preemption by moving from NONE to START. If * unsuccessful, a preemption is already in flight */ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) - return; + goto out; /* Get the next ring to preempt to */ ring = get_next_ring(gpu); @@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) set_preempt_state(a5xx_gpu, PREEMPT_ABORT); update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); - return; + goto out; } + spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); + /* Make sure the wptr doesn't update while we're in motion */ spin_lock_irqsave(&ring->preempt_lock, flags); a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); @@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) /* And actually start the preemption */ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); + return; + +out: + spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); } void a5xx_preempt_irq(struct msm_gpu *gpu) @@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); + + /* + * Try to trigger preemption again in case there was a submit or + * retire during ring switch + */ + a5xx_preempt_trigger(gpu); } void a5xx_preempt_hw_init(struct msm_gpu *gpu) @@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) return; for (i = 0; i < gpu->nr_rings; i++) { + a5xx_gpu->preempt[i]->data = 0; + a5xx_gpu->preempt[i]->info = 0; a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; @@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } } + spin_lock_init(&a5xx_gpu->preempt_start_lock); timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 68ba9aed5506ea..0312b6ee0356be 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = { {}, }; +static const struct adreno_reglist a620_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_ISDB_CNT, 0x00000182}, + {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + static const struct adreno_reglist a630_hwcg[] = { {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222}, @@ -448,7 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = { {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, - {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200}, {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111}, {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555}, {} @@ -491,7 +543,6 @@ static const u32 a630_protect_regs[] = { }; DECLARE_ADRENO_PROTECT(a630_protect, 32); -/* These are for a620 and a650 */ static const u32 a650_protect_regs[] = { A6XX_PROTECT_RDONLY(0x00000, 0x04ff), A6XX_PROTECT_RDONLY(0x00501, 0x0005), @@ -636,6 +687,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a612_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00080000, }, /* * There are (at least) three SoCs implementing A610: SM6125 @@ -651,6 +704,35 @@ static const struct adreno_info a6xx_gpus[] = { { 157, 3 }, { 127, 4 }, ), + }, { + .chip_ids = ADRENO_CHIP_IDS(0x06010500), + .family = ADRENO_6XX_GEN1, + .revn = 615, + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a630_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a615_zap.mdt", + .a6xx = &(const struct a6xx_info) { + .hwcg = a615_hwcg, + .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x0018000, + }, + .speedbins = ADRENO_SPEEDBINS( + /* + * The default speed bin (0) has the same values as + * speed bin 90 which goes up to 432 MHz. + */ + { 0, 0 }, + { 90, 0 }, + { 105, 1 }, + { 146, 2 }, + { 163, 3 }, + ), }, { .machine = "qcom,sm7150", .chip_ids = ADRENO_CHIP_IDS(0x06010800), @@ -667,6 +749,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x00180000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -689,6 +773,8 @@ static const struct adreno_info a6xx_gpus[] = { .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x00180000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -711,6 +797,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x00018000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -733,6 +821,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x00018000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -755,6 +845,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a615_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00000222, + .prim_fifo_threshold = 0x00018000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -763,6 +855,30 @@ static const struct adreno_info a6xx_gpus[] = { { 169, 2 }, { 180, 1 }, ), + }, { + .chip_ids = ADRENO_CHIP_IDS(0x06020100), + .family = ADRENO_6XX_GEN3, + .fw = { + [ADRENO_FW_SQE] = "a650_sqe.fw", + [ADRENO_FW_GMU] = "a621_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .init = a6xx_gpu_init, + .zapfw = "a620_zap.mbn", + .a6xx = &(const struct a6xx_info) { + .hwcg = a620_hwcg, + .protect = &a650_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00010000, + }, + .address_space_size = SZ_16G, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 137, 1 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS( 0x06030001, @@ -782,6 +898,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a630_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00180000, }, }, { .chip_ids = ADRENO_CHIP_IDS(0x06040001), @@ -799,6 +917,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a640_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00180000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -821,6 +941,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a650_hwcg, .protect = &a650_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00300200, }, .address_space_size = SZ_16G, .speedbins = ADRENO_SPEEDBINS( @@ -846,6 +968,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a660_hwcg, .protect = &a660_protect, + .gmu_cgc_mode = 0x00020000, + .prim_fifo_threshold = 0x00300200, }, .address_space_size = SZ_16G, }, { @@ -864,11 +988,14 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a660_hwcg, .protect = &a660_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00200200, }, .address_space_size = SZ_16G, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, { 117, 0 }, + { 129, 4 }, { 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */ { 190, 1 }, ), @@ -888,6 +1015,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a640_hwcg, .protect = &a630_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x00200200, }, }, { .chip_ids = ADRENO_CHIP_IDS(0x06090000), @@ -905,6 +1034,8 @@ static const struct adreno_info a6xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a690_hwcg, .protect = &a690_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00800200, }, .address_space_size = SZ_16G, } @@ -1165,6 +1296,8 @@ static const struct adreno_info a7xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a702_hwcg, .protect = &a650_protect, + .gmu_cgc_mode = 0x00020202, + .prim_fifo_threshold = 0x0000c000, }, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, @@ -1188,6 +1321,7 @@ static const struct adreno_info a7xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .hwcg = a730_hwcg, .protect = &a730_protect, + .gmu_cgc_mode = 0x00020000, }, .address_space_size = SZ_16G, }, { @@ -1207,6 +1341,7 @@ static const struct adreno_info a7xx_gpus[] = { .hwcg = a740_hwcg, .protect = &a730_protect, .gmu_chipid = 0x7020100, + .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, }, { @@ -1225,6 +1360,7 @@ static const struct adreno_info a7xx_gpus[] = { .hwcg = a740_hwcg, .protect = &a730_protect, .gmu_chipid = 0x7050001, + .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_256G, }, { @@ -1243,6 +1379,7 @@ static const struct adreno_info a7xx_gpus[] = { .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, .gmu_chipid = 0x7090100, + .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index cb538a262d1c13..37927bdd6fbed8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); } +static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + + /* + * GEMNoC can power collapse whilst the GPU is being powered down, resulting + * in the power down sequence not being fully executed. That in turn can + * prevent CX_GDSC from collapsing. Assert Qactive to avoid this. + */ + if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu)) + gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0)); +} + /* Let the GMU know that we are about to go into slumber */ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) { @@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) } out: + a6xx_gemnoc_workaround(gmu); + /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); return ret; @@ -525,8 +541,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (IS_ERR(pdcptr)) goto err; - if (adreno_is_a650(adreno_gpu) || - adreno_is_a660_family(adreno_gpu) || + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) pdc_in_aop = true; else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) @@ -946,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Force off SPTP in case the GMU is managing it */ a6xx_sptprac_disable(gmu); + a6xx_gemnoc_workaround(gmu); + /* Make sure there are no outstanding RPMh votes */ a6xx_gmu_rpmh_off(gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index bcaec86ac67a5c..06cab2c6fd663b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -402,7 +402,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) struct a6xx_gmu *gmu = &a6xx_gpu->gmu; const struct adreno_reglist *reg; unsigned int i; - u32 val, clock_cntl_on, cgc_mode; + u32 cgc_delay, cgc_hyst; + u32 val, clock_cntl_on; if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu))) return; @@ -416,16 +417,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) else clock_cntl_on = 0x8aa8aa82; - if (adreno_is_a7xx(adreno_gpu)) { - cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000; - - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, - state ? cgc_mode : 0); - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, - state ? 0x10111 : 0); - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, - state ? 0x5555 : 0); - } + cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111; + cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555; + + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, + state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, + state ? cgc_delay : 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, + state ? cgc_hyst : 0); if (!adreno_gpu->info->a6xx->hwcg) { gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1); @@ -493,24 +493,17 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) { - /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */ gpu->ubwc_config.rgb565_predicator = 0; - /* Unknown, introduced with A650 family */ gpu->ubwc_config.uavflagprd_inv = 0; - /* Whether the minimum access length is 64 bits */ gpu->ubwc_config.min_acc_len = 0; - /* Entirely magic, per-GPU-gen value */ - gpu->ubwc_config.ubwc_mode = 0; - /* - * The Highest Bank Bit value represents the bit of the highest DDR bank. - * This should ideally use DRAM type detection. - */ + gpu->ubwc_config.ubwc_swizzle = 0x6; + gpu->ubwc_config.macrotile_mode = 0; gpu->ubwc_config.highest_bank_bit = 15; if (adreno_is_a610(gpu)) { gpu->ubwc_config.highest_bank_bit = 13; gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_mode = 1; + gpu->ubwc_config.ubwc_swizzle = 0x7; } if (adreno_is_a618(gpu)) @@ -523,9 +516,18 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) if (adreno_is_a619_holi(gpu)) gpu->ubwc_config.highest_bank_bit = 13; + if (adreno_is_a621(gpu)) { + gpu->ubwc_config.highest_bank_bit = 13; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.uavflagprd_inv = 2; + } + if (adreno_is_a640_family(gpu)) gpu->ubwc_config.amsbc = 1; + if (adreno_is_a680(gpu)) + gpu->ubwc_config.macrotile_mode = 1; + if (adreno_is_a650(gpu) || adreno_is_a660(gpu) || adreno_is_a690(gpu) || @@ -536,6 +538,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.amsbc = 1; gpu->ubwc_config.rgb565_predicator = 1; gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; } if (adreno_is_7c3(gpu)) { @@ -543,12 +546,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.amsbc = 1; gpu->ubwc_config.rgb565_predicator = 1; gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; } if (adreno_is_a702(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_mode = 0; } } @@ -564,21 +567,26 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; u32 hbb_hi = hbb >> 2; u32 hbb_lo = hbb & 3; + u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1; + u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2); gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, + level2_swizzling_dis << 12 | adreno_gpu->ubwc_config.rgb565_predicator << 11 | hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | adreno_gpu->ubwc_config.min_acc_len << 3 | - hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); + hbb_lo << 1 | ubwc_mode); - gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 | + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, + level2_swizzling_dis << 6 | hbb_hi << 4 | adreno_gpu->ubwc_config.min_acc_len << 3 | - hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); + hbb_lo << 1 | ubwc_mode); - gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 | + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, + level2_swizzling_dis << 12 | hbb_hi << 10 | adreno_gpu->ubwc_config.uavflagprd_inv << 4 | adreno_gpu->ubwc_config.min_acc_len << 3 | - hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); + hbb_lo << 1 | ubwc_mode); if (adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, @@ -586,6 +594,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); + + gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, + adreno_gpu->ubwc_config.macrotile_mode); } static int a6xx_cp_init(struct msm_gpu *gpu) @@ -976,25 +987,11 @@ static int hw_init(struct msm_gpu *gpu) } else if (!adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); - /* Setting the primFifo thresholds default values, - * and vccCacheSkipDis=1 bit (0x200) for A640 and newer - */ - if (adreno_is_a702(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000); - else if (adreno_is_a690(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200); - else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); - else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); - else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); - else if (adreno_is_a619(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000); - else if (adreno_is_a610(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000); - else if (!adreno_is_a7xx(adreno_gpu)) - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); + + /* Set the default primFifo threshold values */ + if (adreno_gpu->info->a6xx->prim_fifo_threshold) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, + adreno_gpu->info->a6xx->prim_fifo_threshold); /* Set the AHB default slave response to "ERROR" */ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index e3e5c53ae8af2c..0fb7febf70e73b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -22,6 +22,8 @@ struct a6xx_info { const struct adreno_reglist *hwcg; const struct adreno_protect *protect; u32 gmu_chipid; + u32 gmu_cgc_mode; + u32 prim_fifo_threshold; }; struct a6xx_gpu { diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 789a11416f7a45..0fcae53c0b140b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, const u32 *debugbus_blocks, *gbif_debugbus_blocks; int i; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { debugbus_blocks = gen7_0_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks); gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { debugbus_blocks = gen7_2_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks); gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); debugbus_blocks = gen7_9_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks); gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks; @@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, const struct a6xx_debugbus_block *cx_debugbus_blocks; if (adreno_is_a7xx(adreno_gpu)) { - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu))); + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); cx_debugbus_blocks = a7xx_cx_debugbus_blocks; nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks); } else { @@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu, const struct gen7_sptp_cluster_registers *dbgahb_clusters; unsigned dbgahb_clusters_size; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { dbgahb_clusters = gen7_0_0_sptp_clusters; dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters); - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { dbgahb_clusters = gen7_2_0_sptp_clusters; dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters); + } else { + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); + dbgahb_clusters = gen7_9_0_sptp_clusters; + dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters); } a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state, @@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu, const struct gen7_cluster_registers *clusters; unsigned clusters_size; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { clusters = gen7_0_0_clusters; clusters_size = ARRAY_SIZE(gen7_0_0_clusters); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { clusters = gen7_2_0_clusters; clusters_size = ARRAY_SIZE(gen7_2_0_clusters); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); clusters = gen7_9_0_clusters; clusters_size = ARRAY_SIZE(gen7_9_0_clusters); } @@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) return; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3); } @@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, datasize); out: - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0); } } @@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu, unsigned num_shader_blocks; int i; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { shader_blocks = gen7_0_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { shader_blocks = gen7_2_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); shader_blocks = gen7_9_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks); } @@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu, const u32 *pre_crashdumper_regs; const struct gen7_reg_list *reglist; - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { reglist = gen7_0_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { reglist = gen7_2_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); reglist = gen7_9_0_reg_list; pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers; } @@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu, struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); const u32 *regs; - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) || - adreno_is_a750(adreno_gpu))); + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); regs = gen7_0_0_post_crashdumper_registers; a7xx_get_ahb_gpu_registers(gpu, @@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu, const struct a6xx_indexed_registers *indexed_regs; int i, indexed_count, mempool_count; - if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) { + if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) { indexed_regs = a7xx_indexed_reglist; indexed_count = ARRAY_SIZE(a7xx_indexed_reglist); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); indexed_regs = gen7_9_0_cp_indexed_reg_list; indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h index 260d66eccfecbf..9a327d543f27de 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { REG_A6XX_CP_ROQ_DBG_DATA, 0x00800}, { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000}, - { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, + { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200}, { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR, REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800}, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index ecc3fc5cec2270..465a4cd14a435b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -379,6 +379,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, case MSM_PARAM_RAYTRACING: *value = adreno_gpu->has_ray_tracing; return 0; + case MSM_PARAM_UBWC_SWIZZLE: + *value = adreno_gpu->ubwc_config.ubwc_swizzle; + return 0; + case MSM_PARAM_MACROTILE_MODE: + *value = adreno_gpu->ubwc_config.macrotile_mode; + return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; @@ -478,7 +484,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) ret = request_firmware_direct(&fw, fwname, drm->dev); if (!ret) { DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", - newname); + fwname); adreno_gpu->fwloc = FW_LOCATION_LEGACY; goto out; } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { @@ -688,11 +694,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) size = j + 1; if (size) { - state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL); - if (state->ring[i].data) { - memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); + state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); + if (state->ring[i].data) state->ring[i].data_size = size << 2; - } } } @@ -1083,6 +1087,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->chip_id = config->chip_id; gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; + gpu->pdev = pdev; /* Only handle the core clock when GMU is not in use (or is absent). */ if (adreno_has_gmu_wrapper(adreno_gpu) || diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 1ab523a163a00b..58d7e7915c5758 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -191,12 +191,42 @@ struct adreno_gpu { const struct firmware *fw[ADRENO_FW_MAX]; struct { + /** + * @rgb565_predicator: Unknown, introduced with A650 family, + * related to UBWC mode/ver 4 + */ u32 rgb565_predicator; + /** @uavflagprd_inv: Unknown, introduced with A650 family */ u32 uavflagprd_inv; + /** @min_acc_len: Whether the minimum access length is 64 bits */ u32 min_acc_len; - u32 ubwc_mode; + /** + * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling. + * + * UBWC 1.0 always enables all three levels. + * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3. + * UBWC 4.0 adds the optional ability to disable levels 2 & 3. + * + * This is a bitmask where BIT(0) enables level 1, BIT(1) + * controls level 2, and BIT(2) enables level 3. + */ + u32 ubwc_swizzle; + /** + * @highest_bank_bit: Highest Bank Bit + * + * The Highest Bank Bit value represents the bit of the highest + * DDR bank. This should ideally use DRAM type detection. + */ u32 highest_bank_bit; u32 amsbc; + /** + * @macrotile_mode: Macrotile Mode + * + * Whether to use 4-channel macrotiling mode or the newer + * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is + * 4-channel and 1 is 8-channel. + */ + u32 macrotile_mode; } ubwc_config; /* @@ -294,6 +324,12 @@ static inline bool adreno_is_a306(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 307); } +static inline bool adreno_is_a306a(const struct adreno_gpu *gpu) +{ + /* a306a (marketing name is a308) */ + return adreno_is_revn(gpu, 308); +} + static inline bool adreno_is_a320(const struct adreno_gpu *gpu) { return adreno_is_revn(gpu, 320); @@ -384,6 +420,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu) return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); } +static inline int adreno_is_a621(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06020100; +} + static inline int adreno_is_a630(const struct adreno_gpu *gpu) { return adreno_is_revn(gpu, 630); @@ -433,7 +474,13 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu) return adreno_is_a610(gpu) || adreno_is_a702(gpu); } -/* check for a615, a616, a618, a619 or any a630 derivatives */ +/* TODO: 615/616 */ +static inline int adreno_is_a615_family(const struct adreno_gpu *gpu) +{ + return adreno_is_a618(gpu) || + adreno_is_a619(gpu); +} + static inline int adreno_is_a630_family(const struct adreno_gpu *gpu) { if (WARN_ON_ONCE(!gpu->info)) diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 145f3d5953a30d..6ccfde82fecdb4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sm8150_mdp = { [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, }, }; @@ -290,6 +291,21 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = { }, }; +static const struct dpu_wb_cfg sm8150_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SDM845_MASK, + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + static const struct dpu_intf_cfg sm8150_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -384,6 +400,8 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = { .pingpong = sm8150_pp, .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), .merge_3d = sm8150_merge_3d, + .wb_count = ARRAY_SIZE(sm8150_wb), + .wb = sm8150_wb, .intf_count = ARRAY_SIZE(sm8150_intf), .intf = sm8150_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index 9e3bec8bc12188..bab19ddd1d4f97 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp = { [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, }, }; @@ -297,6 +298,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = { }, }; +static const struct dpu_wb_cfg sc8180x_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SDM845_MASK, + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + static const struct dpu_intf_cfg sc8180x_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -410,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = { .pingpong = sc8180x_pp, .merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d), .merge_3d = sc8180x_merge_3d, + .wb_count = ARRAY_SIZE(sc8180x_wb), + .wb = sc8180x_wb, .intf_count = ARRAY_SIZE(sc8180x_intf), .intf = sc8180x_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index 76b2ec0d2489b3..d039b96beb97cf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6125_mdp = { [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, }, }; @@ -139,6 +140,21 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = { }, }; +static const struct dpu_wb_cfg sm6125_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SDM845_MASK, + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 2160, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + static const struct dpu_intf_cfg sm6125_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -210,6 +226,8 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = { .dspp = sm6125_dspp, .pingpong_count = ARRAY_SIZE(sm6125_pp), .pingpong = sm6125_pp, + .wb_count = ARRAY_SIZE(sm6125_wb), + .wb = sm6125_wb, .intf_count = ARRAY_SIZE(sm6125_intf), .intf = sm6125_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h index e17a30be752535..0502cee2f116e8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h @@ -26,6 +26,7 @@ static const struct dpu_mdp_cfg sm6350_mdp = { [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, }, }; @@ -145,6 +146,21 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = { }, }; +static const struct dpu_wb_cfg sm6350_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 1920, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + static const struct dpu_intf_cfg sm6350_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -218,6 +234,8 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = { .dsc = sm6350_dsc, .pingpong_count = ARRAY_SIZE(sm6350_pp), .pingpong = sm6350_pp, + .wb_count = ARRAY_SIZE(sm6350_wb), + .wb = sm6350_wb, .intf_count = ARRAY_SIZE(sm6350_intf), .intf = sm6350_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index b26d5fe40c721f..febc3e764a6323 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -231,7 +231,7 @@ struct dpu_crtc_state { container_of(x, struct dpu_crtc_state, base) /** - * dpu_crtc_frame_pending - retun the number of pending frames + * dpu_crtc_frame_pending - return the number of pending frames * @crtc: Pointer to drm crtc object */ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 648c8d0a4c362d..dcb4fd85e73b9c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -96,14 +96,16 @@ #define INTF_SC7280_MASK (INTF_SC7180_MASK) -#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \ +#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \ BIT(DPU_WB_UBWC) | \ BIT(DPU_WB_YUV_CONFIG) | \ BIT(DPU_WB_PIPE_ALPHA) | \ BIT(DPU_WB_XY_ROI_OFFSET) | \ BIT(DPU_WB_QOS) | \ BIT(DPU_WB_QOS_8LVL) | \ - BIT(DPU_WB_CDP) | \ + BIT(DPU_WB_CDP)) + +#define WB_SM8250_MASK (WB_SDM845_MASK | \ BIT(DPU_WB_INPUT_CTRL)) #define DEFAULT_PIXEL_RAM_SIZE (50 * 1024) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index 6e2ac50b94a419..0f40eea7f5e247 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -2,6 +2,8 @@ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ +#include + #include #include "dpu_hwio.h" @@ -231,8 +233,38 @@ static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp) DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1); } +static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp, + enum dpu_dp_phy_sel phys[2]) +{ + struct dpu_hw_blk_reg_map *c = &mdp->hw; + unsigned int intf; + u32 sel = 0; + + sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]); + sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]); + + for (intf = 0; intf < 2; intf++) { + switch (phys[intf]) { + case DPU_DP_PHY_0: + sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1); + break; + case DPU_DP_PHY_1: + sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1); + break; + case DPU_DP_PHY_2: + sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1); + break; + default: + /* ignore */ + break; + } + } + + DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel); +} + static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, - unsigned long cap) + unsigned long cap, const struct dpu_mdss_version *mdss_rev) { ops->setup_split_pipe = dpu_hw_setup_split_pipe; ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl; @@ -245,6 +277,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, ops->get_safe_status = dpu_hw_get_safe_status; + if (mdss_rev->core_major_ver >= 5) + ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel; + if (cap & BIT(DPU_MDP_AUDIO_SELECT)) ops->intf_audio_select = dpu_hw_intf_audio_select; } @@ -252,7 +287,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, - const struct dpu_mdss_cfg *m) + const struct dpu_mdss_version *mdss_rev) { struct dpu_hw_mdp *mdp; @@ -270,7 +305,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, * Assign ops */ mdp->caps = cfg; - _setup_mdp_ops(&mdp->ops, mdp->caps->features); + _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev); return mdp; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h index 5c9a7ede991edc..f1ab9fd106e513 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h @@ -67,6 +67,13 @@ struct dpu_vsync_source_cfg { enum dpu_vsync_source vsync_source; }; +enum dpu_dp_phy_sel { + DPU_DP_PHY_NONE, + DPU_DP_PHY_0, + DPU_DP_PHY_1, + DPU_DP_PHY_2, +}; + /** * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions * Assumption is these functions will be called after clocks are enabled. @@ -125,6 +132,13 @@ struct dpu_hw_mdp_ops { void (*get_safe_status)(struct dpu_hw_mdp *mdp, struct dpu_danger_safe_status *status); + /** + * dp_phy_intf_sel - configure intf to phy mapping + * @mdp: mdp top context driver + * @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF. + */ + void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]); + /** * intf_audio_select - select the external interface for audio * @mdp: mdp top context driver @@ -148,12 +162,12 @@ struct dpu_hw_mdp { * @dev: Corresponding device for devres management * @cfg: MDP TOP configuration from catalog * @addr: Mapped register io address of MDP - * @m: Pointer to mdss catalog data + * @mdss_rev: dpu core's major and minor versions */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, - const struct dpu_mdss_cfg *m); + const struct dpu_mdss_version *mdss_rev); void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h index 5acd5683d25a40..054fe097ebf802 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h @@ -60,6 +60,13 @@ #define MDP_WD_TIMER_4_LOAD_VALUE 0x448 #define DCE_SEL 0x450 +#define MDP_DP_PHY_INTF_SEL 0x460 +#define MDP_DP_PHY_INTF_SEL_INTF0 GENMASK(2, 0) +#define MDP_DP_PHY_INTF_SEL_INTF1 GENMASK(5, 3) +#define MDP_DP_PHY_INTF_SEL_PHY0 GENMASK(8, 6) +#define MDP_DP_PHY_INTF_SEL_PHY1 GENMASK(11, 9) +#define MDP_DP_PHY_INTF_SEL_PHY2 GENMASK(14, 12) + #define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL #define MDP_PERIPH_TOP0_END CLK_CTRL3 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index d1e2143110f2bb..9bcae53c4f458c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -1146,7 +1146,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev, dpu_kms->catalog->mdp, dpu_kms->mmio, - dpu_kms->catalog); + dpu_kms->catalog->mdss_ver); if (IS_ERR(dpu_kms->hw_mdp)) { rc = PTR_ERR(dpu_kms->hw_mdp); DPU_ERROR("failed to get hw_mdp: %d\n", rc); @@ -1181,6 +1181,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms) goto err_pm_put; } + /* + * We need to program DP <-> PHY relationship only for SC8180X since it + * has fewer DP controllers than DP PHYs. + * If any other platform requires the same kind of programming, or if + * the INTF <->DP relationship isn't static anymore, this needs to be + * configured through the DT. + */ + if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu")) + dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, }); + dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog); if (IS_ERR(dpu_kms->hw_intr)) { rc = PTR_ERR(dpu_kms->hw_intr); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index 3a7f7edda96b27..500b7dc895d055 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p, drm_printf(p, "%s:%d\t%d\t%s\n", pipe2name(pipe), j, inuse, - plane ? plane->name : NULL); + plane ? plane->name : "(null)"); total += inuse; } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 9622e58dce3e7a..e1228fb093ee01 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -119,7 +119,7 @@ struct msm_dp_desc { }; static const struct msm_dp_desc sc7180_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; @@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = { }; static const struct msm_dp_desc sc8180x_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, - { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 }, - { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, {} }; @@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = { }; static const struct msm_dp_desc sm8650_dp_descs[] = { - { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 }, + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 3b59137ca67437..031446c87daec0 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; - } else { + } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { if (pll_freq <= 1000000000ULL) config->pll_clock_inverters = 0xa0; else if (pll_freq <= 2500000000ULL) @@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; + } else { + /* 4.2, 4.3 */ + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 3500000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; } config->decimal_div_start = dec; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 24abcb7254cc4c..0bfee41c2e71a0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -549,6 +549,7 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev) } static const struct of_device_id msm_hdmi_dt_match[] = { + { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config }, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 4586baf364151c..a62d2aedfbb723 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -137,6 +137,7 @@ enum hdmi_phy_type { MSM_HDMI_PHY_8960, MSM_HDMI_PHY_8x74, MSM_HDMI_PHY_8996, + MSM_HDMI_PHY_8998, MSM_HDMI_PHY_MAX, }; @@ -154,6 +155,7 @@ extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg; extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg; extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg; extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg; +extern const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg; struct hdmi_phy { struct platform_device *pdev; @@ -184,6 +186,7 @@ void __exit msm_hdmi_phy_driver_unregister(void); #ifdef CONFIG_COMMON_CLK int msm_hdmi_pll_8960_init(struct platform_device *pdev); int msm_hdmi_pll_8996_init(struct platform_device *pdev); +int msm_hdmi_pll_8998_init(struct platform_device *pdev); #else static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev) { @@ -194,6 +197,11 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev) { return -ENODEV; } + +static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev) +{ + return -ENODEV; +} #endif /* diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 88a3423b7f24d5..95b3f7535d840e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -118,6 +118,9 @@ static int msm_hdmi_phy_pll_init(struct platform_device *pdev, case MSM_HDMI_PHY_8996: ret = msm_hdmi_pll_8996_init(pdev); break; + case MSM_HDMI_PHY_8998: + ret = msm_hdmi_pll_8998_init(pdev); + break; /* * we don't have PLL support for these, don't report an error for now */ @@ -193,6 +196,8 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = { .data = &msm_hdmi_phy_8x74_cfg }, { .compatible = "qcom,hdmi-phy-8996", .data = &msm_hdmi_phy_8996_cfg }, + { .compatible = "qcom,hdmi-phy-8998", + .data = &msm_hdmi_phy_8998_cfg }, {} }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c new file mode 100644 index 00000000000000..0e3a2b16a2ce82 --- /dev/null +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Freebox SAS + */ + +#include +#include + +#include "hdmi.h" + +#define HDMI_VCO_MAX_FREQ 12000000000UL +#define HDMI_VCO_MIN_FREQ 8000000000UL + +#define HDMI_PCLK_MAX_FREQ 600000000 +#define HDMI_PCLK_MIN_FREQ 25000000 + +#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL +#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL +#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL +#define HDMI_CORECLK_DIV 5 +#define HDMI_DEFAULT_REF_CLOCK 19200000 +#define HDMI_PLL_CMP_CNT 1024 + +#define HDMI_PLL_POLL_MAX_READS 100 +#define HDMI_PLL_POLL_TIMEOUT_US 150 + +#define HDMI_NUM_TX_CHANNEL 4 + +struct hdmi_pll_8998 { + struct platform_device *pdev; + struct clk_hw clk_hw; + unsigned long rate; + + /* pll mmio base */ + void __iomem *mmio_qserdes_com; + /* tx channel base */ + void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL]; +}; + +#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8998, clk_hw) + +struct hdmi_8998_phy_pll_reg_cfg { + u32 com_svs_mode_clk_sel; + u32 com_hsclk_sel; + u32 com_pll_cctrl_mode0; + u32 com_pll_rctrl_mode0; + u32 com_cp_ctrl_mode0; + u32 com_dec_start_mode0; + u32 com_div_frac_start1_mode0; + u32 com_div_frac_start2_mode0; + u32 com_div_frac_start3_mode0; + u32 com_integloop_gain0_mode0; + u32 com_integloop_gain1_mode0; + u32 com_lock_cmp_en; + u32 com_lock_cmp1_mode0; + u32 com_lock_cmp2_mode0; + u32 com_lock_cmp3_mode0; + u32 com_core_clk_en; + u32 com_coreclk_div_mode0; + + u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_pre_driver_1[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_pre_driver_2[HDMI_NUM_TX_CHANNEL]; + u32 tx_lx_res_code_offset[HDMI_NUM_TX_CHANNEL]; + + u32 phy_mode; +}; + +struct hdmi_8998_post_divider { + u64 vco_freq; + int hsclk_divsel; + int vco_ratio; + int tx_band_sel; + int half_rate_mode; +}; + +static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8998 *pll) +{ + return platform_get_drvdata(pll->pdev); +} + +static inline void hdmi_pll_write(struct hdmi_pll_8998 *pll, int offset, + u32 data) +{ + writel(data, pll->mmio_qserdes_com + offset); +} + +static inline u32 hdmi_pll_read(struct hdmi_pll_8998 *pll, int offset) +{ + return readl(pll->mmio_qserdes_com + offset); +} + +static inline void hdmi_tx_chan_write(struct hdmi_pll_8998 *pll, int channel, + int offset, int data) +{ + writel(data, pll->mmio_qserdes_tx[channel] + offset); +} + +static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk, + bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x8; + + return 0x30; +} + +static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x16; + + return 0x18; +} + +static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc) +{ + if ((frac_start != 0) || gen_ssc) + return 0x34; + + return 0x2; +} + +static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk, + bool gen_ssc) +{ + int digclk_divsel = bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2; + u64 base; + + if ((frac_start != 0) || gen_ssc) + base = 0x3F; + else + base = 0xC4; + + base <<= (digclk_divsel == 2 ? 1 : 0); + + return (base <= 2046 ? base : 2046); +} + +static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) +{ + u64 dividend = HDMI_PLL_CMP_CNT * fdata; + u32 divisor = ref_clk * 10; + u32 rem; + + rem = do_div(dividend, divisor); + if (rem > (divisor >> 1)) + dividend++; + + return dividend - 1; +} + +static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) +{ + u64 fdata = ((u64)pll_cmp) * ref_clk * 10; + + do_div(fdata, HDMI_PLL_CMP_CNT); + + return fdata; +} + +#define HDMI_REF_CLOCK_HZ ((u64)19200000) +#define HDMI_MHZ_TO_HZ ((u64)1000000) +static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) +{ + u32 const ratio_list[] = {1, 2, 3, 4, 5, 6, + 9, 10, 12, 15, 25}; + u32 const band_list[] = {0, 1, 2, 3}; + u32 const sz_ratio = ARRAY_SIZE(ratio_list); + u32 const sz_band = ARRAY_SIZE(band_list); + u32 const cmp_cnt = 1024; + u32 const th_min = 500, th_max = 1000; + u32 half_rate_mode = 0; + u32 list_elements; + int optimal_index; + u32 i, j, k; + u32 found_hsclk_divsel = 0, found_vco_ratio; + u32 found_tx_band_sel; + u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ; + u64 freq_list[ARRAY_SIZE(ratio_list) * ARRAY_SIZE(band_list)]; + u64 found_vco_freq; + u64 freq_optimal; + +find_optimal_index: + freq_optimal = max_freq; + optimal_index = -1; + list_elements = 0; + + for (i = 0; i < sz_ratio; i++) { + for (j = 0; j < sz_band; j++) { + u64 freq = div_u64(bclk, (1 << half_rate_mode)); + + freq *= (ratio_list[i] * (1 << band_list[j])); + freq_list[list_elements++] = freq; + } + } + + for (k = 0; k < ARRAY_SIZE(freq_list); k++) { + u32 const clks_pll_div = 2, core_clk_div = 5; + u32 const rng1 = 16, rng2 = 8; + u32 th1, th2; + u64 core_clk, rvar1, rem; + + core_clk = div_u64(freq_list[k], + ratio_list[k / sz_band] * clks_pll_div * + core_clk_div); + + rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ; + rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem); + if (rem > ((cmp_cnt * core_clk) >> 1)) + rvar1++; + th1 = rvar1; + + rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ; + rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem); + if (rem > ((cmp_cnt * core_clk) >> 1)) + rvar1++; + th2 = rvar1; + + if (freq_list[k] >= min_freq && + freq_list[k] <= max_freq) { + if ((th1 >= th_min && th1 <= th_max) || + (th2 >= th_min && th2 <= th_max)) { + if (freq_list[k] <= freq_optimal) { + freq_optimal = freq_list[k]; + optimal_index = k; + } + } + } + } + + if (optimal_index == -1) { + if (!half_rate_mode) { + half_rate_mode = 1; + goto find_optimal_index; + } else { + return -EINVAL; + } + } else { + found_vco_ratio = ratio_list[optimal_index / sz_band]; + found_tx_band_sel = band_list[optimal_index % sz_band]; + found_vco_freq = freq_optimal; + } + + switch (found_vco_ratio) { + case 1: + found_hsclk_divsel = 15; + break; + case 2: + found_hsclk_divsel = 0; + break; + case 3: + found_hsclk_divsel = 4; + break; + case 4: + found_hsclk_divsel = 8; + break; + case 5: + found_hsclk_divsel = 12; + break; + case 6: + found_hsclk_divsel = 1; + break; + case 9: + found_hsclk_divsel = 5; + break; + case 10: + found_hsclk_divsel = 2; + break; + case 12: + found_hsclk_divsel = 9; + break; + case 15: + found_hsclk_divsel = 13; + break; + case 25: + found_hsclk_divsel = 14; + break; + }; + + pd->vco_freq = found_vco_freq; + pd->tx_band_sel = found_tx_band_sel; + pd->vco_ratio = found_vco_ratio; + pd->hsclk_divsel = found_hsclk_divsel; + + return 0; +} + +static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk, + struct hdmi_8998_phy_pll_reg_cfg *cfg) +{ + struct hdmi_8998_post_divider pd; + u64 bclk; + u64 dec_start; + u64 frac_start; + u64 fdata; + u32 pll_divisor; + u32 rem; + u32 cpctrl; + u32 rctrl; + u32 cctrl; + u32 integloop_gain; + u32 pll_cmp; + int i, ret; + + /* bit clk = 10 * pix_clk */ + bclk = ((u64)pix_clk) * 10; + + ret = pll_get_post_div(&pd, bclk); + if (ret) + return ret; + + dec_start = pd.vco_freq; + pll_divisor = 4 * ref_clk; + do_div(dec_start, pll_divisor); + + frac_start = pd.vco_freq * (1 << 20); + + rem = do_div(frac_start, pll_divisor); + frac_start -= dec_start * (1 << 20); + if (rem > (pll_divisor >> 1)) + frac_start++; + + cpctrl = pll_get_cpctrl(frac_start, ref_clk, false); + rctrl = pll_get_rctrl(frac_start, false); + cctrl = pll_get_cctrl(frac_start, false); + integloop_gain = pll_get_integloop_gain(frac_start, bclk, + ref_clk, false); + + fdata = pd.vco_freq; + do_div(fdata, pd.vco_ratio); + + pll_cmp = pll_get_pll_cmp(fdata, ref_clk); + + /* Convert these values to register specific values */ + if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) + cfg->com_svs_mode_clk_sel = 1; + else + cfg->com_svs_mode_clk_sel = 2; + + cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel); + cfg->com_pll_cctrl_mode0 = cctrl; + cfg->com_pll_rctrl_mode0 = rctrl; + cfg->com_cp_ctrl_mode0 = cpctrl; + cfg->com_dec_start_mode0 = dec_start; + cfg->com_div_frac_start1_mode0 = (frac_start & 0xff); + cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8); + cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16); + cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff); + cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8); + cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff); + cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8); + cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16); + cfg->com_lock_cmp_en = 0x0; + cfg->com_core_clk_en = 0x2c; + cfg->com_coreclk_div_mode0 = HDMI_CORECLK_DIV; + cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x5 : 0x4; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) + cfg->tx_lx_tx_band[i] = pd.tx_band_sel; + + if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) { + cfg->tx_lx_tx_drv_lvl[0] = 0x0f; + cfg->tx_lx_tx_drv_lvl[1] = 0x0f; + cfg->tx_lx_tx_drv_lvl[2] = 0x0f; + cfg->tx_lx_tx_drv_lvl[3] = 0x0f; + cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03; + cfg->tx_lx_tx_emp_post1_lvl[1] = 0x02; + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00; + cfg->tx_lx_pre_driver_1[0] = 0x00; + cfg->tx_lx_pre_driver_1[1] = 0x00; + cfg->tx_lx_pre_driver_1[2] = 0x00; + cfg->tx_lx_pre_driver_1[3] = 0x00; + cfg->tx_lx_pre_driver_2[0] = 0x1C; + cfg->tx_lx_pre_driver_2[1] = 0x1C; + cfg->tx_lx_pre_driver_2[2] = 0x1C; + cfg->tx_lx_pre_driver_2[3] = 0x00; + cfg->tx_lx_res_code_offset[0] = 0x03; + cfg->tx_lx_res_code_offset[1] = 0x00; + cfg->tx_lx_res_code_offset[2] = 0x00; + cfg->tx_lx_res_code_offset[3] = 0x03; + } else if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) { + cfg->tx_lx_tx_drv_lvl[0] = 0x0f; + cfg->tx_lx_tx_drv_lvl[1] = 0x0f; + cfg->tx_lx_tx_drv_lvl[2] = 0x0f; + cfg->tx_lx_tx_drv_lvl[3] = 0x0f; + cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03; + cfg->tx_lx_tx_emp_post1_lvl[1] = 0x03; + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00; + cfg->tx_lx_pre_driver_1[0] = 0x00; + cfg->tx_lx_pre_driver_1[1] = 0x00; + cfg->tx_lx_pre_driver_1[2] = 0x00; + cfg->tx_lx_pre_driver_1[3] = 0x00; + cfg->tx_lx_pre_driver_2[0] = 0x16; + cfg->tx_lx_pre_driver_2[1] = 0x16; + cfg->tx_lx_pre_driver_2[2] = 0x16; + cfg->tx_lx_pre_driver_2[3] = 0x18; + cfg->tx_lx_res_code_offset[0] = 0x03; + cfg->tx_lx_res_code_offset[1] = 0x00; + cfg->tx_lx_res_code_offset[2] = 0x00; + cfg->tx_lx_res_code_offset[3] = 0x00; + } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) { + cfg->tx_lx_tx_drv_lvl[0] = 0x0f; + cfg->tx_lx_tx_drv_lvl[1] = 0x0f; + cfg->tx_lx_tx_drv_lvl[2] = 0x0f; + cfg->tx_lx_tx_drv_lvl[3] = 0x0f; + cfg->tx_lx_tx_emp_post1_lvl[0] = 0x05; + cfg->tx_lx_tx_emp_post1_lvl[1] = 0x05; + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x05; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00; + cfg->tx_lx_pre_driver_1[0] = 0x00; + cfg->tx_lx_pre_driver_1[1] = 0x00; + cfg->tx_lx_pre_driver_1[2] = 0x00; + cfg->tx_lx_pre_driver_1[3] = 0x00; + cfg->tx_lx_pre_driver_2[0] = 0x0E; + cfg->tx_lx_pre_driver_2[1] = 0x0E; + cfg->tx_lx_pre_driver_2[2] = 0x0E; + cfg->tx_lx_pre_driver_2[3] = 0x0E; + cfg->tx_lx_res_code_offset[0] = 0x00; + cfg->tx_lx_res_code_offset[1] = 0x00; + cfg->tx_lx_res_code_offset[2] = 0x00; + cfg->tx_lx_res_code_offset[3] = 0x00; + } else { + cfg->tx_lx_tx_drv_lvl[0] = 0x01; + cfg->tx_lx_tx_drv_lvl[1] = 0x01; + cfg->tx_lx_tx_drv_lvl[2] = 0x01; + cfg->tx_lx_tx_drv_lvl[3] = 0x00; + cfg->tx_lx_tx_emp_post1_lvl[0] = 0x00; + cfg->tx_lx_tx_emp_post1_lvl[1] = 0x00; + cfg->tx_lx_tx_emp_post1_lvl[2] = 0x00; + cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00; + cfg->tx_lx_pre_driver_1[0] = 0x00; + cfg->tx_lx_pre_driver_1[1] = 0x00; + cfg->tx_lx_pre_driver_1[2] = 0x00; + cfg->tx_lx_pre_driver_1[3] = 0x00; + cfg->tx_lx_pre_driver_2[0] = 0x16; + cfg->tx_lx_pre_driver_2[1] = 0x16; + cfg->tx_lx_pre_driver_2[2] = 0x16; + cfg->tx_lx_pre_driver_2[3] = 0x18; + cfg->tx_lx_res_code_offset[0] = 0x00; + cfg->tx_lx_res_code_offset[1] = 0x00; + cfg->tx_lx_res_code_offset[2] = 0x00; + cfg->tx_lx_res_code_offset[3] = 0x00; + } + + return 0; +} + +static int hdmi_8998_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + struct hdmi_8998_phy_pll_reg_cfg cfg = {}; + int i, ret; + + ret = pll_calculate(rate, parent_rate, &cfg); + if (ret) { + DRM_ERROR("PLL calculation failed\n"); + return ret; + } + + /* Initially shut down PHY */ + hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x0); + udelay(500); + + /* Power up sequence */ + hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x1); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20); + hdmi_phy_write(phy, REG_HDMI_8998_PHY_CMN_CTRL, 0x6); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_INTERFACE_SELECT_TX_BAND, + cfg.tx_lx_tx_band[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_CLKBUF_TERM_ENABLE, + 0x1); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_LANE_MODE, + 0x20); + } + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x02); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0B); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E); + + /* Bypass VCO calibration */ + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SVS_MODE_CLK_SEL, + cfg.com_svs_mode_clk_sel); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_IVCO, 0x07); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_CTRL, 0x00); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_SEL, 0x30); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_HSCLK_SEL, + cfg.com_hsclk_sel); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP_EN, + cfg.com_lock_cmp_en); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_CCTRL_MODE0, + cfg.com_pll_cctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_RCTRL_MODE0, + cfg.com_pll_rctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CP_CTRL_MODE0, + cfg.com_cp_ctrl_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DEC_START_MODE0, + cfg.com_dec_start_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0, + cfg.com_div_frac_start1_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0, + cfg.com_div_frac_start2_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0, + cfg.com_div_frac_start3_mode0); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0, + cfg.com_integloop_gain0_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0, + cfg.com_integloop_gain1_mode0); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP1_MODE0, + cfg.com_lock_cmp1_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP2_MODE0, + cfg.com_lock_cmp2_mode0); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP3_MODE0, + cfg.com_lock_cmp3_mode0); + + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORE_CLK_EN, + cfg.com_core_clk_en); + hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORECLK_DIV_MODE0, + cfg.com_coreclk_div_mode0); + + /* TX lanes setup (TX 0/1/2/3) */ + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_DRV_LVL, + cfg.tx_lx_tx_drv_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_EMP_POST1_LVL, + cfg.tx_lx_tx_emp_post1_lvl[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_PRE_DRIVER_1, + cfg.tx_lx_pre_driver_1[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_PRE_DRIVER_2, + cfg.tx_lx_pre_driver_2[i]); + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_DRV_LVL_RES_CODE_OFFSET, + cfg.tx_lx_res_code_offset[i]); + } + + hdmi_phy_write(phy, REG_HDMI_8998_PHY_MODE, cfg.phy_mode); + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_LANE_CONFIG, + 0x10); + } + + /* + * Ensure that vco configuration gets flushed to hardware before + * enabling the PLL + */ + wmb(); + + pll->rate = rate; + + return 0; +} + +static int hdmi_8998_phy_ready_status(struct hdmi_phy *phy) +{ + u32 nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + u32 status; + int phy_ready = 0; + + while (nb_tries--) { + status = hdmi_phy_read(phy, REG_HDMI_8998_PHY_STATUS); + phy_ready = status & BIT(0); + + if (phy_ready) + break; + + udelay(timeout); + } + + return phy_ready; +} + +static int hdmi_8998_pll_lock_status(struct hdmi_pll_8998 *pll) +{ + u32 status; + int nb_tries = HDMI_PLL_POLL_MAX_READS; + unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US; + int pll_locked = 0; + + while (nb_tries--) { + status = hdmi_pll_read(pll, + REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + if (pll_locked) + break; + + udelay(timeout); + } + + return pll_locked; +} + +static int hdmi_8998_pll_prepare(struct clk_hw *hw) +{ + struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + int i, ret = 0; + + hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x1); + udelay(100); + + hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59); + udelay(100); + + ret = hdmi_8998_pll_lock_status(pll); + if (!ret) + return ret; + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + hdmi_tx_chan_write(pll, i, + REG_HDMI_8998_PHY_TXn_LANE_CONFIG, 0x1F); + } + + /* Ensure all registers are flushed to hardware */ + wmb(); + + ret = hdmi_8998_phy_ready_status(phy); + if (!ret) + return ret; + + /* Restart the retiming buffer */ + hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x58); + udelay(1); + hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59); + + /* Ensure all registers are flushed to hardware */ + wmb(); + + return 0; +} + +static long hdmi_8998_pll_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + if (rate < HDMI_PCLK_MIN_FREQ) + return HDMI_PCLK_MIN_FREQ; + else if (rate > HDMI_PCLK_MAX_FREQ) + return HDMI_PCLK_MAX_FREQ; + else + return rate; +} + +static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw); + return pll->rate; +} + +static void hdmi_8998_pll_unprepare(struct clk_hw *hw) +{ + struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw); + struct hdmi_phy *phy = pll_get_phy(pll); + + hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0); + usleep_range(100, 150); +} + +static int hdmi_8998_pll_is_enabled(struct clk_hw *hw) +{ + struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw); + u32 status; + int pll_locked; + + status = hdmi_pll_read(pll, REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS); + pll_locked = status & BIT(0); + + return pll_locked; +} + +static const struct clk_ops hdmi_8998_pll_ops = { + .set_rate = hdmi_8998_pll_set_clk_rate, + .round_rate = hdmi_8998_pll_round_rate, + .recalc_rate = hdmi_8998_pll_recalc_rate, + .prepare = hdmi_8998_pll_prepare, + .unprepare = hdmi_8998_pll_unprepare, + .is_enabled = hdmi_8998_pll_is_enabled, +}; + +static const struct clk_init_data pll_init = { + .name = "hdmipll", + .ops = &hdmi_8998_pll_ops, + .parent_data = (const struct clk_parent_data[]){ + { .fw_name = "xo", .name = "xo_board" }, + }, + .num_parents = 1, + .flags = CLK_IGNORE_UNUSED, +}; + +int msm_hdmi_pll_8998_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hdmi_pll_8998 *pll; + int ret, i; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + pll->pdev = pdev; + + pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll"); + if (IS_ERR(pll->mmio_qserdes_com)) { + DRM_DEV_ERROR(dev, "failed to map pll base\n"); + return -ENOMEM; + } + + for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) { + char name[32]; + + snprintf(name, sizeof(name), "hdmi_tx_l%d", i); + + pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name); + if (IS_ERR(pll->mmio_qserdes_tx[i])) { + DRM_DEV_ERROR(dev, "failed to map pll base\n"); + return -ENOMEM; + } + } + pll->clk_hw.init = &pll_init; + + ret = devm_clk_hw_register(dev, &pll->clk_hw); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register pll clock\n"); + return ret; + } + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +static const char * const hdmi_phy_8998_reg_names[] = { + "vddio", + "vcca", +}; + +static const char * const hdmi_phy_8998_clk_names[] = { + "iface", "ref", "xo", +}; + +const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg = { + .type = MSM_HDMI_PHY_8998, + .reg_names = hdmi_phy_8998_reg_names, + .num_regs = ARRAY_SIZE(hdmi_phy_8998_reg_names), + .clk_names = hdmi_phy_8998_clk_names, + .num_clks = ARRAY_SIZE(hdmi_phy_8998_clk_names), +}; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 4494f6d1c7cbbc..7ab607252d183f 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -357,12 +357,10 @@ void msm_debugfs_init(struct drm_minor *minor) if (priv->kms && priv->kms->funcs->debugfs_init) priv->kms->funcs->debugfs_init(priv->kms, minor); -#ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root, &fail_gem_alloc); fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root, &fail_gem_iova); -#endif } #endif diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9c33f4e3f8229e..8c13b08708d228 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -58,10 +59,8 @@ static bool modeset = true; MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); module_param(modeset, bool, 0600); -#ifdef CONFIG_FAULT_INJECTION DECLARE_FAULT_ATTR(fail_gem_alloc); DECLARE_FAULT_ATTR(fail_gem_iova); -#endif static int msm_drm_uninit(struct device *dev) { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index be016d7b4ef11a..2e28a13446366c 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -33,12 +33,8 @@ #include #include -#ifdef CONFIG_FAULT_INJECTION extern struct fault_attr fail_gem_alloc; extern struct fault_attr fail_gem_iova; -#else -# define should_fail(attr, size) 0 -#endif struct msm_kms; struct msm_gpu; @@ -215,8 +211,6 @@ struct msm_drm_private { struct notifier_block vmap_notifier; struct shrinker *shrinker; - struct drm_atomic_state *pm_state; - /** * hangcheck_period: For hang detection, in ms * @@ -254,8 +248,6 @@ void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer); void msm_atomic_commit_tail(struct drm_atomic_state *state); int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); -void msm_atomic_state_clear(struct drm_atomic_state *state); -void msm_atomic_state_free(struct drm_atomic_state *state); int msm_crtc_enable_vblank(struct drm_crtc *crtc); void msm_crtc_disable_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 3666b42b4ecd7f..a274b846642374 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -931,7 +931,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; - gpu->pdev = pdev; platform_set_drvdata(pdev, &gpu->adreno_smmu); msm_devfreq_init(gpu); diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c index 3d3da79fec2aae..d3c7889aaf267a 100644 --- a/drivers/gpu/drm/msm/msm_perf.c +++ b/drivers/gpu/drm/msm/msm_perf.c @@ -192,7 +192,6 @@ static const struct file_operations perf_debugfs_fops = { .owner = THIS_MODULE, .open = perf_open, .read = perf_read, - .llseek = no_llseek, .release = perf_release, }; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index ca44fd291c5ba4..39138e190cb965 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -227,7 +227,6 @@ static const struct file_operations rd_debugfs_fops = { .owner = THIS_MODULE, .open = rd_open, .read = rd_read, - .llseek = no_llseek, .release = rd_release, }; diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 2dfe6913ab4f52..97608603ea62d4 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -1198,6 +1198,1027 @@ to upconvert to 32b float internally? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1584,7 +2605,7 @@ to upconvert to 32b float internally? - + + + + + + Disable LRZ feedback writes - + + Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have + GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass. + In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered. + + @@ -2270,7 +3306,7 @@ to upconvert to 32b float internally? - 0.0 if GREATER - 1.0 if LESS - + @@ -2284,7 +3320,7 @@ to upconvert to 32b float internally? Disable LRZ based on previous direction and the current one. If DIR_WRITE is not enabled - there is no write to direction buffer. - + @@ -2357,7 +3393,10 @@ to upconvert to 32b float internally? - + + + + @@ -2366,7 +3405,10 @@ to upconvert to 32b float internally? - + + + + @@ -2440,7 +3482,7 @@ to upconvert to 32b float internally? - + @@ -2448,7 +3490,7 @@ to upconvert to 32b float internally? - + @@ -2605,6 +3647,7 @@ to upconvert to 32b float internally? + @@ -2903,11 +3946,21 @@ to upconvert to 32b float internally? + + + + + + + + + + + - - + @@ -2927,7 +3980,10 @@ to upconvert to 32b float internally? - + + + + @@ -4275,7 +5331,7 @@ to upconvert to 32b float internally? badly named or the functionality moved in a6xx. But downstream kernel calls this "a6xx_sp_ps_tp_2d_cluster" --> - + @@ -4286,7 +5342,7 @@ to upconvert to 32b float internally? - + @@ -4329,7 +5385,12 @@ to upconvert to 32b float internally? - + + + + @@ -4351,7 +5412,8 @@ to upconvert to 32b float internally? - + + @@ -4582,15 +5644,15 @@ to upconvert to 32b float internally? - + - + - + @@ -4623,6 +5685,19 @@ to upconvert to 32b float internally? + + + + + + + + + + + + + This register clears pending loads queued up by @@ -4791,7 +5866,7 @@ to upconvert to 32b float internally? - + Texture constant dwords @@ -4831,6 +5906,7 @@ to upconvert to 32b float internally? + (STOPPED)<----stop / + * | / / + * | / / + * o--------<-----flr / + * \ / + * o------<--------------------flr + * + * Where: + * + * * READY - represents a state in which VF is fully operable + * * PAUSED - represents a state in which VF activity is temporarily suspended + * * STOPPED - represents a state in which VF activity is definitely halted + * * pause - represents a request to temporarily suspend VF activity + * * resume - represents a request to resume VF activity + * * stop - represents a request to definitely halt VF activity + * * flr - represents a request to perform VF FLR to restore VF activity + * + * However, each state transition requires additional steps that involves + * communication with GuC that might fail or be interrupted by other requests:: + * + * .................................WIP.... + * : : + * pause--------------------->PAUSE_WIP----------------------------o + * / : / \ : | + * / : o----<---stop flr--o : | + * / : | \ / | : V + * (READY,RESUMED)<--------+------------RESUME_WIP<----+--<-----resume--(PAUSED) + * ^ \ \ : | | : / / + * | \ \ : | | : / / + * | \ \ : | | : / / + * | \ \ : o----<----------------------+--<-------stop / + * | \ \ : | | : / + * | \ \ : V | : / + * | \ stop----->STOP_WIP---------flr--->-----o : / + * | \ : | | : / + * | \ : | V : / + * | flr--------+----->----------------->FLR_WIP<-----flr + * | : | / ^ : + * | : | / | : + * o--------<-------:----+-----<----------------o | : + * : | | : + * :....|...........................|.....: + * | | + * V | + * (STOPPED)--------------------flr + * + * For details about each internal WIP state machine see: + * + * * `The VF PAUSE state machine`_ + * * `The VF RESUME state machine`_ + * * `The VF STOP state machine`_ + * * `The VF FLR state machine`_ + */ + +#ifdef CONFIG_DRM_XE_DEBUG_SRIOV +static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) +{ + switch (bit) { +#define CASE2STR(_X) \ + case XE_GT_SRIOV_STATE_##_X: return #_X + CASE2STR(WIP); + CASE2STR(FLR_WIP); + CASE2STR(FLR_SEND_START); + CASE2STR(FLR_WAIT_GUC); + CASE2STR(FLR_GUC_DONE); + CASE2STR(FLR_RESET_CONFIG); + CASE2STR(FLR_RESET_DATA); + CASE2STR(FLR_RESET_MMIO); + CASE2STR(FLR_SEND_FINISH); + CASE2STR(FLR_FAILED); + CASE2STR(PAUSE_WIP); + CASE2STR(PAUSE_SEND_PAUSE); + CASE2STR(PAUSE_WAIT_GUC); + CASE2STR(PAUSE_GUC_DONE); + CASE2STR(PAUSE_FAILED); + CASE2STR(PAUSED); + CASE2STR(RESUME_WIP); + CASE2STR(RESUME_SEND_RESUME); + CASE2STR(RESUME_FAILED); + CASE2STR(RESUMED); + CASE2STR(STOP_WIP); + CASE2STR(STOP_SEND_STOP); + CASE2STR(STOP_FAILED); + CASE2STR(STOPPED); + CASE2STR(MISMATCH); +#undef CASE2STR + default: return "?"; + } +} +#endif + +static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit) +{ + switch (bit) { + case XE_GT_SRIOV_STATE_FLR_WAIT_GUC: + case XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: + return HZ / 2; + case XE_GT_SRIOV_STATE_FLR_WIP: + case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: + return 5 * HZ; + default: + return HZ; + } +} + +static struct xe_gt_sriov_control_state *pf_pick_vf_control(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt)); + + return >->sriov.pf.vfs[vfid].control; +} + +static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); + + return &cs->state; +} + +static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + return test_bit(bit, pf_peek_vf_state(gt, vfid)); +} + +static void pf_dump_vf_state(struct xe_gt *gt, unsigned int vfid) +{ + unsigned long state = *pf_peek_vf_state(gt, vfid); + enum xe_gt_sriov_control_bits bit; + + if (state) { + xe_gt_sriov_dbg_verbose(gt, "VF%u state %#lx%s%*pbl\n", + vfid, state, state ? " bits " : "", + (int)BITS_PER_LONG, &state); + for_each_set_bit(bit, &state, BITS_PER_LONG) + xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d)\n", + vfid, control_bit_to_string(bit), bit); + } else { + xe_gt_sriov_dbg_verbose(gt, "VF%u state READY\n", vfid); + } +} + +static bool pf_expect_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + bool result = pf_check_vf_state(gt, vfid, bit); + + if (unlikely(!result)) + pf_dump_vf_state(gt, vfid); + + return result; +} + +static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + bool result = !pf_check_vf_state(gt, vfid, bit); + + if (unlikely(!result)) + pf_dump_vf_state(gt, vfid); + + return result; +} + +static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) { + xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n", + vfid, control_bit_to_string(bit), bit); + return true; + } + return false; +} + +static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) { + xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n", + vfid, control_bit_to_string(bit), bit); + return true; + } + return false; +} + +static void pf_escape_vf_state(struct xe_gt *gt, unsigned int vfid, + enum xe_gt_sriov_control_bits bit) +{ + if (pf_exit_vf_state(gt, vfid, bit)) + xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) escaped by %ps\n", + vfid, control_bit_to_string(bit), bit, + __builtin_return_address(0)); +} + +static void pf_enter_vf_mismatch(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH)) { + xe_gt_sriov_dbg(gt, "VF%u state mismatch detected by %ps\n", + vfid, __builtin_return_address(0)); + pf_dump_vf_state(gt, vfid); + } +} + +static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH)) + xe_gt_sriov_dbg(gt, "VF%u state mismatch cleared by %ps\n", + vfid, __builtin_return_address(0)); + + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED); +} + +#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \ + pf_enter_vf_mismatch((gt), (vfid)); \ +}) + +static void pf_queue_control_worker(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + queue_work(xe->sriov.wq, >->sriov.pf.control.worker); +} + +static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_pf_control *pfc = >->sriov.pf.control; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + spin_lock(&pfc->lock); + list_move_tail(>->sriov.pf.vfs[vfid].control.link, &pfc->list); + spin_unlock(&pfc->lock); + + pf_queue_control_worker(gt); +} + +static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid); +static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid); + +static bool pf_enter_vf_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) { + struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); + + reinit_completion(&cs->done); + return true; + } + return false; +} + +static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) { + struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); + + pf_exit_vf_flr_wip(gt, vfid); + pf_exit_vf_stop_wip(gt, vfid); + pf_exit_vf_pause_wip(gt, vfid); + pf_exit_vf_resume_wip(gt, vfid); + + complete_all(&cs->done); + } +} + +static int pf_wait_vf_wip_done(struct xe_gt *gt, unsigned int vfid, unsigned long timeout) +{ + struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid); + + return wait_for_completion_timeout(&cs->done, timeout) ? 0 : -ETIMEDOUT; +} + +static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) +{ + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +/** + * DOC: The VF PAUSE state machine + * + * The VF PAUSE state machine looks like:: + * + * (READY,RESUMED)<-------------<---------------------o---------o + * | \ \ + * pause \ \ + * | \ \ + * ....V...........................PAUSE_WIP........ \ \ + * : \ : o \ + * : \ o------<-----busy : | \ + * : \ / / : | | + * : PAUSE_SEND_PAUSE ---failed--->----------o--->(PAUSE_FAILED) | + * : | \ : | | + * : acked rejected---->----------o--->(MISMATCH) / + * : | : / + * : v : / + * : PAUSE_WAIT_GUC : / + * : | : / + * : done : / + * : | : / + * : v : / + * : PAUSE_GUC_DONE o-----restart + * : / : + * : / : + * :....o..............o...............o...........: + * | | | + * completed flr stop + * | | | + * V .....V..... ......V..... + * (PAUSED) : FLR_WIP : : STOP_WIP : + * :.........: :..........: + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) { + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE); + } +} + +static void pf_enter_vf_paused(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_pause_completed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_paused(gt, vfid); +} + +static void pf_enter_vf_pause_failed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_mismatch(gt, vfid); + pf_enter_vf_pause_failed(gt, vfid); +} + +static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) + return false; + + pf_enter_vf_pause_completed(gt, vfid); + return true; +} + +static void pf_enter_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) + pf_queue_vf(gt, vfid); +} + +static void pf_enter_pause_wait_guc(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)) + pf_enter_vf_state_machine_bug(gt, vfid); +} + +static bool pf_exit_pause_wait_guc(struct xe_gt *gt, unsigned int vfid) +{ + return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); +} + +static void pf_enter_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE)) + return false; + + /* GuC may actually send a PAUSE_DONE before we get a RESPONSE */ + pf_enter_pause_wait_guc(gt, vfid); + + err = pf_send_vf_pause(gt, vfid); + if (err) { + /* send failed, so we shouldn't expect PAUSE_DONE from GuC */ + pf_exit_pause_wait_guc(gt, vfid); + + if (err == -EBUSY) + pf_enter_vf_pause_send_pause(gt, vfid); + else if (err == -EIO) + pf_enter_vf_pause_rejected(gt, vfid); + else + pf_enter_vf_pause_failed(gt, vfid); + } else { + /* + * we have already moved to WAIT_GUC, maybe even to GUC_DONE + * but since GuC didn't complain, we may clear MISMATCH + */ + pf_exit_vf_mismatch(gt, vfid); + } + + return true; +} + +static bool pf_enter_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_pause_send_pause(gt, vfid); + return true; + } + + return false; +} + /** * xe_gt_sriov_pf_control_pause_vf - Pause a VF. * @gt: the &xe_gt @@ -98,7 +552,140 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid) { - return pf_send_vf_pause(gt, vfid); + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_PAUSE_WIP); + int err; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid); + return -EPERM; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u was already paused!\n", vfid); + return -ESTALE; + } + + if (!pf_enter_vf_pause_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u pause already in progress!\n", vfid); + return -EALREADY; + } + + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) { + xe_gt_sriov_dbg(gt, "VF%u pause didn't finish in %u ms (%pe)\n", + vfid, jiffies_to_msecs(timeout), ERR_PTR(err)); + return err; + } + + if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_info(gt, "VF%u paused!\n", vfid); + return 0; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED)) { + xe_gt_sriov_dbg(gt, "VF%u pause failed!\n", vfid); + return -EIO; + } + + xe_gt_sriov_dbg(gt, "VF%u pause was canceled!\n", vfid); + return -ECANCELED; +} + +/** + * DOC: The VF RESUME state machine + * + * The VF RESUME state machine looks like:: + * + * (PAUSED)<-----------------<------------------------o + * | \ + * resume \ + * | \ + * ....V............................RESUME_WIP...... \ + * : \ : o + * : \ o-------<-----busy : | + * : \ / / : | + * : RESUME_SEND_RESUME ---failed--->--------o--->(RESUME_FAILED) + * : / \ : | + * : acked rejected---->---------o--->(MISMATCH) + * : / : + * :....o..............o...............o.....o.....: + * | | | \ + * completed flr stop restart-->(READY) + * | | | + * V .....V..... ......V..... + * (RESUMED) : FLR_WIP : : STOP_WIP : + * :.........: :..........: + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP)) + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME); +} + +static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_resume_completed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_resumed(gt, vfid); +} + +static void pf_enter_vf_resume_failed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_resume_rejected(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_mismatch(gt, vfid); + pf_enter_vf_resume_failed(gt, vfid); +} + +static void pf_enter_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME)) + return false; + + err = pf_send_vf_resume(gt, vfid); + if (err == -EBUSY) + pf_enter_vf_resume_send_resume(gt, vfid); + else if (err == -EIO) + pf_enter_vf_resume_rejected(gt, vfid); + else if (err) + pf_enter_vf_resume_failed(gt, vfid); + else + pf_enter_vf_resume_completed(gt, vfid); + return true; +} + +static bool pf_enter_vf_resume_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_resume_send_resume(gt, vfid); + return true; + } + + return false; } /** @@ -112,7 +699,134 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) { - return pf_send_vf_resume(gt, vfid); + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESUME_WIP); + int err; + + if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) { + xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid); + return -EPERM; + } + + if (!pf_enter_vf_resume_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid); + return -EALREADY; + } + + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) + return err; + + if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) { + xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid); + return 0; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED)) { + xe_gt_sriov_dbg(gt, "VF%u resume failed!\n", vfid); + return -EIO; + } + + xe_gt_sriov_dbg(gt, "VF%u resume was canceled!\n", vfid); + return -ECANCELED; +} + +/** + * DOC: The VF STOP state machine + * + * The VF STOP state machine looks like:: + * + * (READY,PAUSED,RESUMED)<-------<--------------------o + * | \ + * stop \ + * | \ + * ....V..............................STOP_WIP...... \ + * : \ : o + * : \ o----<----busy : | + * : \ / / : | + * : STOP_SEND_STOP--------failed--->--------o--->(STOP_FAILED) + * : / \ : | + * : acked rejected-------->--------o--->(MISMATCH) + * : / : + * :....o..............o...............o...........: + * | | | + * completed flr restart + * | | | + * V .....V..... V + * (STOPPED) : FLR_WIP : (READY) + * :.........: + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP)) + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP); +} + +static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED); + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED); + pf_exit_vf_mismatch(gt, vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_stop_completed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_stopped(gt, vfid); +} + +static void pf_enter_vf_stop_failed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_stop_rejected(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_mismatch(gt, vfid); + pf_enter_vf_stop_failed(gt, vfid); +} + +static void pf_enter_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP)) + return false; + + err = pf_send_vf_stop(gt, vfid); + if (err == -EBUSY) + pf_enter_vf_stop_send_stop(gt, vfid); + else if (err == -EIO) + pf_enter_vf_stop_rejected(gt, vfid); + else if (err) + pf_enter_vf_stop_failed(gt, vfid); + else + pf_enter_vf_stop_completed(gt, vfid); + return true; +} + +static bool pf_enter_vf_stop_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP)) { + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_stop_send_stop(gt, vfid); + return true; + } + return false; } /** @@ -126,7 +840,280 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid) { - return pf_send_vf_stop(gt, vfid); + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_STOP_WIP); + int err; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_dbg(gt, "VF%u was already stopped!\n", vfid); + return -ESTALE; + } + + if (!pf_enter_vf_stop_wip(gt, vfid)) { + xe_gt_sriov_dbg(gt, "VF%u stop already in progress!\n", vfid); + return -EALREADY; + } + + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) + return err; + + if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) { + xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid); + return 0; + } + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED)) { + xe_gt_sriov_dbg(gt, "VF%u stop failed!\n", vfid); + return -EIO; + } + + xe_gt_sriov_dbg(gt, "VF%u stop was canceled!\n", vfid); + return -ECANCELED; +} + +/** + * DOC: The VF FLR state machine + * + * The VF FLR state machine looks like:: + * + * (READY,PAUSED,STOPPED)<------------<--------------o + * | \ + * flr \ + * | \ + * ....V..........................FLR_WIP........... \ + * : \ : \ + * : \ o----<----busy : | + * : \ / / : | + * : FLR_SEND_START---failed----->-----------o--->(FLR_FAILED)<---o + * : | \ : | | + * : acked rejected----->-----------o--->(MISMATCH) | + * : | : ^ | + * : v : | | + * : FLR_WAIT_GUC : | | + * : | : | | + * : done : | | + * : | : | | + * : v : | | + * : FLR_GUC_DONE : | | + * : | : | | + * : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o + * : | : | | + * : FLR_RESET_DATA : | | + * : | : | | + * : FLR_RESET_MMIO : | | + * : | : | | + * : | o----<----busy : | | + * : |/ / : | | + * : FLR_SEND_FINISH----failed--->-----------o--------+-----------o + * : / \ : | + * : acked rejected----->-----------o--------o + * : / : + * :....o..............................o...........: + * | | + * completed restart + * | / + * V / + * (READY)<----------<------------o + * + * For the full state machine view, see `The VF state machine`_. + */ + +static void pf_enter_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static void pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) { + xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid); + return; + } + + pf_enter_vf_wip(gt, vfid); + pf_enter_vf_flr_send_start(gt, vfid); +} + +static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) { + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START); + } +} + +static void pf_enter_vf_flr_completed(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_ready(gt, vfid); +} + +static void pf_enter_vf_flr_failed(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED)) + xe_gt_sriov_notice(gt, "VF%u FLR failed!\n", vfid); + pf_exit_vf_wip(gt, vfid); +} + +static void pf_enter_vf_flr_rejected(struct xe_gt *gt, unsigned int vfid) +{ + pf_enter_vf_mismatch(gt, vfid); + pf_enter_vf_flr_failed(gt, vfid); +} + +static void pf_enter_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH)) + return false; + + err = pf_send_vf_flr_finish(gt, vfid); + if (err == -EBUSY) + pf_enter_vf_flr_send_finish(gt, vfid); + else if (err == -EIO) + pf_enter_vf_flr_rejected(gt, vfid); + else if (err) + pf_enter_vf_flr_failed(gt, vfid); + else + pf_enter_vf_flr_completed(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO)) + return false; + + /* XXX: placeholder */ + + pf_enter_vf_flr_send_finish(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA)) + return false; + + xe_gt_sriov_pf_service_reset(gt, vfid); + xe_gt_sriov_pf_monitor_flr(gt, vfid); + + pf_enter_vf_flr_reset_mmio(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG)) + pf_enter_vf_state_machine_bug(gt, vfid); + + pf_queue_vf(gt, vfid); +} + +static bool pf_exit_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid) +{ + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_RESET_CONFIG); + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG)) + return false; + + err = xe_gt_sriov_pf_config_sanitize(gt, vfid, timeout); + if (err) + pf_enter_vf_flr_failed(gt, vfid); + else + pf_enter_vf_flr_reset_data(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC)) + pf_enter_vf_state_machine_bug(gt, vfid); +} + +static bool pf_exit_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid) +{ + return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC); +} + +static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START)) + return false; + + /* GuC may actually send a FLR_DONE before we get a RESPONSE */ + pf_enter_vf_flr_wait_guc(gt, vfid); + + err = pf_send_vf_flr_start(gt, vfid); + if (err) { + /* send failed, so we shouldn't expect FLR_DONE from GuC */ + pf_exit_vf_flr_wait_guc(gt, vfid); + + if (err == -EBUSY) + pf_enter_vf_flr_send_start(gt, vfid); + else if (err == -EIO) + pf_enter_vf_flr_rejected(gt, vfid); + else + pf_enter_vf_flr_failed(gt, vfid); + } else { + /* + * we have already moved to WAIT_GUC, maybe even to GUC_DONE + * but since GuC didn't complain, we may clear MISMATCH + */ + pf_exit_vf_mismatch(gt, vfid); + } + + return true; +} + +static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE)) + return false; + + pf_enter_vf_flr_reset_config(gt, vfid); + return true; +} + +static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE)) + pf_queue_vf(gt, vfid); } /** @@ -140,46 +1127,56 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid) */ int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid) { + unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP); int err; - /* XXX pf_send_vf_flr_start() expects ct->lock */ - mutex_lock(>->uc.guc.ct.lock); - err = pf_send_vf_flr_start(gt, vfid); - mutex_unlock(>->uc.guc.ct.lock); + pf_enter_vf_flr_wip(gt, vfid); - return err; + err = pf_wait_vf_wip_done(gt, vfid, timeout); + if (err) { + xe_gt_sriov_notice(gt, "VF%u FLR didn't finish in %u ms (%pe)\n", + vfid, jiffies_to_msecs(timeout), ERR_PTR(err)); + return err; + } + + if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED)) + return -EIO; + + return 0; } /** * DOC: The VF FLR Flow with GuC * - * PF GUC PCI - * ======================================================== - * | | | - * (1) | [ ] <----- FLR --| - * | [ ] : - * (2) [ ] <-------- NOTIFY FLR --[ ] - * [ ] | - * (3) [ ] | - * [ ] | - * [ ]-- START FLR ---------> [ ] - * | [ ] - * (4) | [ ] - * | [ ] - * [ ] <--------- FLR DONE -- [ ] - * [ ] | - * (5) [ ] | - * [ ] | - * [ ]-- FINISH FLR --------> [ ] - * | | - * - * Step 1: PCI HW generates interrupt to the GuC about VF FLR - * Step 2: GuC FW sends G2H notification to the PF about VF FLR - * Step 2a: on some platforms G2H is only received from root GuC - * Step 3: PF sends H2G request to the GuC to start VF FLR sequence - * Step 3a: on some platforms PF must send H2G to all other GuCs - * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done - * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished + * The VF FLR flow includes several steps:: + * + * PF GUC PCI + * ======================================================== + * | | | + * (1) | [ ] <----- FLR --| + * | [ ] : + * (2) [ ] <-------- NOTIFY FLR --[ ] + * [ ] | + * (3) [ ] | + * [ ] | + * [ ]-- START FLR ---------> [ ] + * | [ ] + * (4) | [ ] + * | [ ] + * [ ] <--------- FLR DONE -- [ ] + * [ ] | + * (5) [ ] | + * [ ] | + * [ ]-- FINISH FLR --------> [ ] + * | | + * + * * Step 1: PCI HW generates interrupt to the GuC about VF FLR + * * Step 2: GuC FW sends G2H notification to the PF about VF FLR + * * Step 2a: on some platforms G2H is only received from root GuC + * * Step 3: PF sends H2G request to the GuC to start VF FLR sequence + * * Step 3a: on some platforms PF must send H2G to all other GuCs + * * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done + * * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished */ static bool needs_dispatch_flr(struct xe_device *xe) @@ -197,19 +1194,41 @@ static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid) if (needs_dispatch_flr(xe)) { for_each_gt(gtit, xe, gtid) - pf_send_vf_flr_start(gtit, vfid); + pf_enter_vf_flr_wip(gtit, vfid); } else { - pf_send_vf_flr_start(gt, vfid); + pf_enter_vf_flr_wip(gt, vfid); } } static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid) { - pf_send_vf_flr_finish(gt, vfid); + if (!pf_exit_vf_flr_wait_guc(gt, vfid)) { + xe_gt_sriov_dbg(gt, "Received out of order 'VF%u FLR done'\n", vfid); + pf_enter_vf_mismatch(gt, vfid); + return; + } + + pf_enter_vf_flr_guc_done(gt, vfid); +} + +static void pf_handle_vf_pause_done(struct xe_gt *gt, u32 vfid) +{ + if (!pf_exit_pause_wait_guc(gt, vfid)) { + xe_gt_sriov_dbg(gt, "Received out of order 'VF%u PAUSE done'\n", vfid); + pf_enter_vf_mismatch(gt, vfid); + return; + } + + pf_enter_vf_pause_guc_done(gt, vfid); } static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid) { + xe_gt_sriov_dbg_verbose(gt, "received VF%u event %#x\n", vfid, eventid); + + if (vfid > xe_gt_sriov_pf_get_totalvfs(gt)) + return -EPROTO; + switch (eventid) { case GUC_PF_NOTIFY_VF_FLR: pf_handle_vf_flr(gt, vfid); @@ -218,6 +1237,7 @@ static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid) pf_handle_vf_flr_done(gt, vfid); break; case GUC_PF_NOTIFY_VF_PAUSE_DONE: + pf_handle_vf_pause_done(gt, vfid); break; case GUC_PF_NOTIFY_VF_FIXUP_DONE: break; @@ -276,3 +1296,159 @@ int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid); } + +static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_exit_vf_flr_send_start(gt, vfid)) + return true; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_FLR_WAIT_GUC)); + return false; + } + + if (pf_exit_vf_flr_guc_done(gt, vfid)) + return true; + + if (pf_exit_vf_flr_reset_config(gt, vfid)) + return true; + + if (pf_exit_vf_flr_reset_data(gt, vfid)) + return true; + + if (pf_exit_vf_flr_reset_mmio(gt, vfid)) + return true; + + if (pf_exit_vf_flr_send_finish(gt, vfid)) + return true; + + if (pf_exit_vf_stop_send_stop(gt, vfid)) + return true; + + if (pf_exit_vf_pause_send_pause(gt, vfid)) + return true; + + if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)) { + xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid, + control_bit_to_string(XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)); + return true; + } + + if (pf_exit_vf_pause_guc_done(gt, vfid)) + return true; + + if (pf_exit_vf_resume_send_resume(gt, vfid)) + return true; + + return false; +} + +static unsigned int pf_control_state_index(struct xe_gt *gt, + struct xe_gt_sriov_control_state *cs) +{ + return container_of(cs, struct xe_gt_sriov_metadata, control) - gt->sriov.pf.vfs; +} + +static void pf_worker_find_work(struct xe_gt *gt) +{ + struct xe_gt_sriov_pf_control *pfc = >->sriov.pf.control; + struct xe_gt_sriov_control_state *cs; + unsigned int vfid; + bool empty; + bool more; + + spin_lock(&pfc->lock); + cs = list_first_entry_or_null(&pfc->list, struct xe_gt_sriov_control_state, link); + if (cs) + list_del_init(&cs->link); + empty = list_empty(&pfc->list); + spin_unlock(&pfc->lock); + + if (!cs) + return; + + /* VF metadata structures are indexed by the VFID */ + vfid = pf_control_state_index(gt, cs); + xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt)); + + more = pf_process_vf_state_machine(gt, vfid); + if (more) + pf_queue_vf(gt, vfid); + else if (!empty) + pf_queue_control_worker(gt); +} + +static void control_worker_func(struct work_struct *w) +{ + struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker); + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + pf_worker_find_work(gt); +} + +static void pf_stop_worker(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + cancel_work_sync(>->sriov.pf.control.worker); +} + +static void control_fini_action(struct drm_device *dev, void *data) +{ + struct xe_gt *gt = data; + + pf_stop_worker(gt); +} + +/** + * xe_gt_sriov_pf_control_init() - Initialize PF's control data. + * @gt: the &xe_gt + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_init(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + unsigned int n, totalvfs; + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 0; n <= totalvfs; n++) { + struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, n); + + init_completion(&cs->done); + INIT_LIST_HEAD(&cs->link); + } + + spin_lock_init(>->sriov.pf.control.lock); + INIT_LIST_HEAD(>->sriov.pf.control.list); + INIT_WORK(>->sriov.pf.control.worker, control_worker_func); + + return drmm_add_action_or_reset(&xe->drm, control_fini_action, gt); +} + +/** + * xe_gt_sriov_pf_control_restart() - Restart SR-IOV control data after a GT reset. + * @gt: the &xe_gt + * + * Any per-VF status maintained by the PF or any ongoing VF control activity + * performed by the PF must be reset or cancelled when the GT is reset. + * + * This function is for PF only. + */ +void xe_gt_sriov_pf_control_restart(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + unsigned int n, totalvfs; + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + pf_stop_worker(gt); + + totalvfs = xe_sriov_pf_get_totalvfs(xe); + for (n = 1; n <= totalvfs; n++) + pf_enter_vf_ready(gt, n); +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index 405d1586f991f5..c85e64f099cc84 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -11,6 +11,9 @@ struct xe_gt; +int xe_gt_sriov_pf_control_init(struct xe_gt *gt); +void xe_gt_sriov_pf_control_restart(struct xe_gt *gt); + int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h new file mode 100644 index 00000000000000..11830aafea4505 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PF_CONTROL_TYPES_H_ +#define _XE_GT_SRIOV_PF_CONTROL_TYPES_H_ + +#include +#include +#include + +/** + * enum xe_gt_sriov_control_bits - Various bits used by the PF to represent a VF state + * + * @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress. + * @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress. + * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command. + * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC. + * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC. + * @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources. + * @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data. + * @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers. + * @XE_GT_SRIOV_STATE_FLR_SEND_FINISH: indicates that the PF wants to send a FLR FINISH message. + * @XE_GT_SRIOV_STATE_FLR_FAILED: indicates that VF FLR sequence failed. + * @XE_GT_SRIOV_STATE_PAUSE_WIP: indicates that a VF pause operation is in progress. + * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command. + * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC. + * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC. + * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. + * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. + * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. + * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command. + * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed. + * @XE_GT_SRIOV_STATE_RESUMED: indicates that the VF was resumed. + * @XE_GT_SRIOV_STATE_STOP_WIP: indicates that a VF stop operation is in progress. + * @XE_GT_SRIOV_STATE_STOP_SEND_STOP: indicates that the PF wants to send a STOP command. + * @XE_GT_SRIOV_STATE_STOP_FAILED: indicates that the VF stop operation has failed + * @XE_GT_SRIOV_STATE_STOPPED: indicates that the VF was stopped. + * @XE_GT_SRIOV_STATE_MISMATCH: indicates that the PF has detected a VF state mismatch. + */ +enum xe_gt_sriov_control_bits { + XE_GT_SRIOV_STATE_WIP = 1, + + XE_GT_SRIOV_STATE_FLR_WIP, + XE_GT_SRIOV_STATE_FLR_SEND_START, + XE_GT_SRIOV_STATE_FLR_WAIT_GUC, + XE_GT_SRIOV_STATE_FLR_GUC_DONE, + XE_GT_SRIOV_STATE_FLR_RESET_CONFIG, + XE_GT_SRIOV_STATE_FLR_RESET_DATA, + XE_GT_SRIOV_STATE_FLR_RESET_MMIO, + XE_GT_SRIOV_STATE_FLR_SEND_FINISH, + XE_GT_SRIOV_STATE_FLR_FAILED, + + XE_GT_SRIOV_STATE_PAUSE_WIP, + XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE, + XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC, + XE_GT_SRIOV_STATE_PAUSE_GUC_DONE, + XE_GT_SRIOV_STATE_PAUSE_FAILED, + XE_GT_SRIOV_STATE_PAUSED, + + XE_GT_SRIOV_STATE_RESUME_WIP, + XE_GT_SRIOV_STATE_RESUME_SEND_RESUME, + XE_GT_SRIOV_STATE_RESUME_FAILED, + XE_GT_SRIOV_STATE_RESUMED, + + XE_GT_SRIOV_STATE_STOP_WIP, + XE_GT_SRIOV_STATE_STOP_SEND_STOP, + XE_GT_SRIOV_STATE_STOP_FAILED, + XE_GT_SRIOV_STATE_STOPPED, + + XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1, +}; + +/** + * struct xe_gt_sriov_control_state - GT-level per-VF control state. + * + * Used by the PF driver to maintain per-VF control data. + */ +struct xe_gt_sriov_control_state { + /** @state: VF state bits */ + unsigned long state; + + /** @done: completion of async operations */ + struct completion done; + + /** @link: link into worker list */ + struct list_head link; +}; + +/** + * struct xe_gt_sriov_pf_control - GT-level control data. + * + * Used by the PF driver to maintain its data. + */ +struct xe_gt_sriov_pf_control { + /** @worker: worker that executes a VF operations */ + struct work_struct worker; + + /** @list: list of VF entries that have a pending work */ + struct list_head list; + + /** @lock: protects VF pending list */ + spinlock_t lock; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index 40cbaea3ef44ec..28e1b130bf87c9 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -9,6 +9,7 @@ #include #include "xe_gt_sriov_pf_config_types.h" +#include "xe_gt_sriov_pf_control_types.h" #include "xe_gt_sriov_pf_monitor_types.h" #include "xe_gt_sriov_pf_policy_types.h" #include "xe_gt_sriov_pf_service_types.h" @@ -23,6 +24,9 @@ struct xe_gt_sriov_metadata { /** @monitor: per-VF monitoring data. */ struct xe_gt_sriov_monitor monitor; + /** @control: per-VF control data. */ + struct xe_gt_sriov_control_state control; + /** @version: negotiated VF/PF ABI version */ struct xe_gt_sriov_pf_service_version version; }; @@ -30,12 +34,14 @@ struct xe_gt_sriov_metadata { /** * struct xe_gt_sriov_pf - GT level PF virtualization data. * @service: service data. + * @control: control data. * @policy: policy data. * @spare: PF-only provisioning configuration. * @vfs: metadata for all VFs. */ struct xe_gt_sriov_pf { struct xe_gt_sriov_pf_service service; + struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; struct xe_gt_sriov_spare_config spare; struct xe_gt_sriov_metadata *vfs; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 8892d6c2291ebb..4ebc82e607af65 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -495,6 +495,25 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt) return gt->sriov.vf.self_config.lmem_size; } +static struct xe_ggtt_node * +vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end) +{ + struct xe_ggtt_node *node; + int err; + + node = xe_ggtt_node_init(ggtt); + if (IS_ERR(node)) + return node; + + err = xe_ggtt_node_insert_balloon(node, start, end); + if (err) { + xe_ggtt_node_fini(node); + return ERR_PTR(err); + } + + return node; +} + static int vf_balloon_ggtt(struct xe_gt *gt) { struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; @@ -502,7 +521,6 @@ static int vf_balloon_ggtt(struct xe_gt *gt) struct xe_ggtt *ggtt = tile->mem.ggtt; struct xe_device *xe = gt_to_xe(gt); u64 start, end; - int err; xe_gt_assert(gt, IS_SRIOV_VF(xe)); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); @@ -528,35 +546,31 @@ static int vf_balloon_ggtt(struct xe_gt *gt) start = xe_wopcm_size(xe); end = config->ggtt_base; if (end != start) { - err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[0]); - if (err) - goto failed; + tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end); + if (IS_ERR(tile->sriov.vf.ggtt_balloon[0])) + return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]); } start = config->ggtt_base + config->ggtt_size; end = GUC_GGTT_TOP; if (end != start) { - err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[1]); - if (err) - goto deballoon; + tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end); + if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) { + xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); + return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]); + } } return 0; - -deballoon: - xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]); -failed: - return err; } static void deballoon_ggtt(struct drm_device *drm, void *arg) { struct xe_tile *tile = arg; - struct xe_ggtt *ggtt = tile->mem.ggtt; xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile))); - xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[1]); - xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]); + xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]); + xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]); } /** @@ -892,6 +906,32 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) return rr->value; } +/** + * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register. + * @gt: the &xe_gt + * @reg: the register to write + * @val: value to write + * + * This function is for VF use only. + * Currently it will trigger a WARN if running on debug build. + */ +void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) +{ + u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_gt_assert(gt, !reg.vf); + + /* + * In the future, we may want to handle selected writes to inaccessible + * registers in some custom way, but for now let's just log a warning + * about such attempt, as likely we might be doing something wrong. + */ + xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), + "VF is trying to write %#x to an inaccessible register %#x+%#x\n", + val, reg.addr, addr - reg.addr); +} + /** * xe_gt_sriov_vf_print_config - Print VF self config. * @gt: the &xe_gt diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index 0de7f8cbcfa6df..e541ce57bec246 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -22,6 +22,7 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt); u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt); u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt); u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg); +void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p); void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c new file mode 100644 index 00000000000000..c7364a5aef8fa3 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_stats.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include + +#include + +#include "xe_gt.h" +#include "xe_gt_stats.h" + +/** + * xe_gt_stats_incr - Increments the specified stats counter + * @gt: graphics tile + * @id: xe_gt_stats_id type id that needs to be incremented + * @incr: value to be incremented with + * + * Increments the specified stats counter. + */ +void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr) +{ + if (id >= __XE_GT_STATS_NUM_IDS) + return; + + atomic_add(incr, >->stats.counters[id]); +} + +static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = { + "tlb_inval_count", +}; + +/** + * xe_gt_stats_print_info - Print the GT stats + * @gt: graphics tile + * @p: drm_printer where it will be printed out. + * + * This prints out all the available GT stats. + */ +int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p) +{ + enum xe_gt_stats_id id; + + for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id) + drm_printf(p, "%s: %d\n", stat_description[id], + atomic_read(>->stats.counters[id])); + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h new file mode 100644 index 00000000000000..91d944f6c4e436 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_stats.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_STATS_H_ +#define _XE_GT_STATS_H_ + +struct xe_gt; +struct drm_printer; + +enum xe_gt_stats_id { + XE_GT_STATS_ID_TLB_INVAL, + /* must be the last entry */ + __XE_GT_STATS_NUM_IDS, +}; + +#ifdef CONFIG_DEBUG_FS +int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p); +void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr); +#else +static inline void +xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, + int incr) +{ +} + +#endif +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c index a05c3699e8b914..ec2b8246204b88 100644 --- a/drivers/gpu/drm/xe/xe_gt_sysfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c @@ -51,5 +51,5 @@ int xe_gt_sysfs_init(struct xe_gt *gt) gt->sysfs = &kg->base; - return devm_add_action(xe->drm.dev, gt_sysfs_fini, gt); + return devm_add_action_or_reset(xe->drm.dev, gt_sysfs_fini, gt); } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 87cb76a8718c99..cca9cf536f7693 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -12,6 +12,7 @@ #include "xe_gt_printk.h" #include "xe_guc.h" #include "xe_guc_ct.h" +#include "xe_gt_stats.h" #include "xe_mmio.h" #include "xe_pm.h" #include "xe_sriov.h" @@ -213,6 +214,7 @@ static int send_tlb_invalidation(struct xe_guc *guc, gt->tlb_invalidation.seqno = 1; } mutex_unlock(&guc->ct.lock); + xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1); return ret; } diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 25ff03ab844814..0662f71c6ede78 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -6,6 +6,7 @@ #include "xe_gt_topology.h" #include +#include #include "regs/xe_gt_regs.h" #include "xe_assert.h" @@ -31,7 +32,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) } static void -load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) +load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type) { struct xe_device *xe = gt_to_xe(gt); u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE); @@ -47,11 +48,13 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) if (GRAPHICS_VERx100(xe) < 1250) reg_val = ~reg_val & XELP_EU_MASK; - /* On PVC, one bit = one EU */ - if (GRAPHICS_VERx100(xe) == 1260) { + if (GRAPHICS_VERx100(xe) == 1260 || GRAPHICS_VER(xe) >= 20) { + /* SIMD16 EUs, one bit == one EU */ + *eu_type = XE_GT_EU_TYPE_SIMD16; val = reg_val; } else { - /* All other platforms, one bit = 2 EU */ + /* SIMD8 EUs, one bit == 2 EU */ + *eu_type = XE_GT_EU_TYPE_SIMD8; for (i = 0; i < fls(reg_val); i++) if (reg_val & BIT(i)) val |= 0x3 << 2 * i; @@ -213,7 +216,7 @@ xe_gt_topology_init(struct xe_gt *gt) XEHP_GT_COMPUTE_DSS_ENABLE, XEHPC_GT_COMPUTE_DSS_ENABLE_EXT, XE2_GT_COMPUTE_DSS_2); - load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss); + load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, >->fuse_topo.eu_type); load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask); p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology"); @@ -221,6 +224,18 @@ xe_gt_topology_init(struct xe_gt *gt) xe_gt_topology_dump(gt, &p); } +static const char *eu_type_to_str(enum xe_gt_eu_type eu_type) +{ + switch (eu_type) { + case XE_GT_EU_TYPE_SIMD16: + return "simd16"; + case XE_GT_EU_TYPE_SIMD8: + return "simd8"; + } + + return NULL; +} + void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) { @@ -231,6 +246,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS, gt->fuse_topo.eu_mask_per_dss); + drm_printf(p, "EU type: %s\n", + eu_type_to_str(gt->fuse_topo.eu_type)); drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS, gt->fuse_topo.l3_bank_mask); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index c582541970dff4..3d1c51de026874 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -10,6 +10,7 @@ #include "xe_gt_idle_types.h" #include "xe_gt_sriov_pf_types.h" #include "xe_gt_sriov_vf_types.h" +#include "xe_gt_stats.h" #include "xe_hw_engine_types.h" #include "xe_hw_fence_types.h" #include "xe_oa.h" @@ -27,6 +28,11 @@ enum xe_gt_type { XE_GT_TYPE_MEDIA, }; +enum xe_gt_eu_type { + XE_GT_EU_TYPE_SIMD8, + XE_GT_EU_TYPE_SIMD16, +}; + #define XE_MAX_DSS_FUSE_REGS 3 #define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS) #define XE_MAX_EU_FUSE_REGS 1 @@ -128,6 +134,14 @@ struct xe_gt { u8 has_indirect_ring_state:1; } info; +#if IS_ENABLED(CONFIG_DEBUG_FS) + /** @stats: GT stats */ + struct { + /** @stats.counters: counters for various GT stats */ + atomic_t counters[__XE_GT_STATS_NUM_IDS]; + } stats; +#endif + /** * @mmio: mmio info for GT. All GTs within a tile share the same * register space, but have their own copy of GSI registers at a @@ -233,9 +247,14 @@ struct xe_gt { struct pf_queue { /** @usm.pf_queue.gt: back pointer to GT */ struct xe_gt *gt; -#define PF_QUEUE_NUM_DW 128 /** @usm.pf_queue.data: data in the page fault queue */ - u32 data[PF_QUEUE_NUM_DW]; + u32 *data; + /** + * @usm.pf_queue.num_dw: number of DWORDS in the page + * fault queue. Dynamically calculated based on the number + * of compute resources available. + */ + u32 num_dw; /** * @usm.pf_queue.tail: tail pointer in DWs for page fault queue, * moved by worker which processes faults (consumer). @@ -337,6 +356,12 @@ struct xe_gt { /** @fuse_topo.l3_bank_mask: L3 bank mask */ xe_l3_bank_mask_t l3_bank_mask; + + /** + * @fuse_topo.eu_type: type/width of EU stored in + * fuse_topo.eu_mask_per_dss + */ + enum xe_gt_eu_type eu_type; } fuse_topo; /** @steering: register steering for individual HW units */ @@ -350,6 +375,12 @@ struct xe_gt { u16 instance_target; } steering[NUM_STEERING_TYPES]; + /** + * @steering_dss_per_grp: number of DSS per steering group (gslice, + * cslice, etc.). + */ + unsigned int steering_dss_per_grp; + /** * @mcr_lock: protects the MCR_SELECTOR register for the duration * of a steered operation @@ -370,8 +401,14 @@ struct xe_gt { unsigned long *engine; /** @wa_active.lrc: bitmap with active LRC workarounds */ unsigned long *lrc; - /** @wa_active.oob: bitmap with active OOB workaroudns */ + /** @wa_active.oob: bitmap with active OOB workarounds */ unsigned long *oob; + /** + * @wa_active.oob_initialized: mark oob as initialized to help + * detecting misuse of XE_WA() - it can only be called on + * initialization after OOB WAs have being processed + */ + bool oob_initialized; } wa_active; /** @user_engines: engines present in GT and available to userspace */ diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index de0fe9e65746e8..52df28032a6ffe 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -350,6 +350,8 @@ int xe_guc_init(struct xe_guc *guc) if (ret) goto out; + xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); + ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); if (ret) goto out; @@ -358,8 +360,6 @@ int xe_guc_init(struct xe_guc *guc) xe_guc_comm_init_early(guc); - xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); - return 0; out: diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index e0bbf98f849ddd..42116b167c985c 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -11,6 +11,18 @@ #include "xe_hw_engine_types.h" #include "xe_macros.h" +/* + * GuC version number components are defined to be only 8-bit size, + * so converting to a 32bit 8.8.8 integer allows simple (and safe) + * numerical comparisons. + */ +#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) +#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch) +#define GUC_SUBMIT_VER(guc) \ + MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]) +#define GUC_FIRMWARE_VER(guc) \ + MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE]) + struct drm_printer; void xe_guc_comm_init_early(struct xe_guc *guc); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 1c60b685dbc6d1..d1902a8581ca11 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -24,6 +24,7 @@ #include "xe_map.h" #include "xe_mmio.h" #include "xe_platform_types.h" +#include "xe_uc_fw.h" #include "xe_wa.h" /* Slack of a few additional entries per engine */ @@ -367,6 +368,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads) 0xC40, &offset, &remain); + if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406)) + guc_waklv_enable_simple(ads, + GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET, + &offset, &remain); + size = guc_ads_waklv_size(ads) - remain; if (!size) return; diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 64afc90ad2c519..f24dd52239268c 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -105,12 +105,20 @@ ct_to_xe(struct xe_guc_ct *ct) * enough space to avoid backpressure on the driver. We increase the size * of the receive buffer (relative to the send) to ensure a G2H response * CTB has a landing spot. + * + * In addition to submissions, the G2H buffer needs to be able to hold + * enough space for recoverable page fault notifications. The number of + * page faults is interrupt driven and can be as much as the number of + * compute resources available. However, most of the actual work for these + * is in a separate page fault worker thread. Therefore we only need to + * make sure the queue has enough space to handle all of the submissions + * and responses and an extra buffer for incoming page faults. */ #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) #define CTB_H2G_BUFFER_SIZE (SZ_4K) -#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE) -#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4) +#define CTB_G2H_BUFFER_SIZE (SZ_128K) +#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2) /** * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full @@ -516,6 +524,7 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) lockdep_assert_held(&ct->fast_lock); xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); + xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c index d9b570a154a261..af2c817d552cad 100644 --- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c +++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c @@ -6,6 +6,7 @@ #include "xe_guc_hwconfig.h" #include +#include #include "abi/guc_actions_abi.h" #include "xe_bo.h" @@ -103,3 +104,99 @@ void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst) xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0, guc->hwconfig.size); } + +void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p) +{ + size_t size = xe_guc_hwconfig_size(guc); + u32 *hwconfig; + u64 num_dw; + u32 extra_bytes; + int i = 0; + + if (size == 0) { + drm_printf(p, "No hwconfig available\n"); + return; + } + + num_dw = div_u64_rem(size, sizeof(u32), &extra_bytes); + + hwconfig = kzalloc(size, GFP_KERNEL); + if (!hwconfig) { + drm_printf(p, "Error: could not allocate hwconfig memory\n"); + return; + } + + xe_guc_hwconfig_copy(guc, hwconfig); + + /* An entry requires at least three dwords for key, length, value */ + while (i + 3 <= num_dw) { + u32 attribute = hwconfig[i++]; + u32 len_dw = hwconfig[i++]; + + if (i + len_dw > num_dw) { + drm_printf(p, "Error: Attribute %u is %u dwords, but only %llu remain\n", + attribute, len_dw, num_dw - i); + len_dw = num_dw - i; + } + + /* + * If it's a single dword (as most hwconfig attributes are), + * then it's probably a number that makes sense to display + * in decimal form. In the rare cases where it's more than + * one dword, just print it in hex form and let the user + * figure out how to interpret it. + */ + if (len_dw == 1) + drm_printf(p, "[%2u] = %u\n", attribute, hwconfig[i]); + else + drm_printf(p, "[%2u] = { %*ph }\n", attribute, + (int)(len_dw * sizeof(u32)), &hwconfig[i]); + i += len_dw; + } + + if (i < num_dw || extra_bytes) + drm_printf(p, "Error: %llu extra bytes at end of hwconfig\n", + (num_dw - i) * sizeof(u32) + extra_bytes); + + kfree(hwconfig); +} + +/* + * Lookup a specific 32-bit attribute value in the GuC's hwconfig table. + */ +int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val) +{ + size_t size = xe_guc_hwconfig_size(guc); + u64 num_dw = div_u64(size, sizeof(u32)); + u32 *hwconfig; + bool found = false; + int i = 0; + + if (num_dw == 0) + return -EINVAL; + + hwconfig = kzalloc(size, GFP_KERNEL); + if (!hwconfig) + return -ENOMEM; + + xe_guc_hwconfig_copy(guc, hwconfig); + + /* An entry requires at least three dwords for key, length, value */ + while (i + 3 <= num_dw) { + u32 key = hwconfig[i++]; + u32 len_dw = hwconfig[i++]; + + if (key != attribute) { + i += len_dw; + continue; + } + + *val = hwconfig[i]; + found = true; + break; + } + + kfree(hwconfig); + + return found ? 0 : -ENOENT; +} diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.h b/drivers/gpu/drm/xe/xe_guc_hwconfig.h index b5794d6419006c..ab4e5038236ee8 100644 --- a/drivers/gpu/drm/xe/xe_guc_hwconfig.h +++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.h @@ -8,10 +8,13 @@ #include +struct drm_printer; struct xe_guc; int xe_guc_hwconfig_init(struct xe_guc *guc); u32 xe_guc_hwconfig_size(struct xe_guc *guc); void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst); +void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p); +int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val); #endif diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c index cd0549d0ef89a2..e845425d670be7 100644 --- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c +++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c @@ -97,8 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit) if (ret) return ret; - xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n", - idm->total, str_plural(idm->total)); + xe_gt_dbg(idm_to_gt(idm), "using %u GuC ID%s\n", + idm->total, str_plural(idm->total)); return 0; } diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index ccd574e948aa30..034b29984d5ed4 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -1042,7 +1042,7 @@ static void xe_guc_pc_fini_hw(void *arg) return; XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); - XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); + xe_guc_pc_gucrc_disable(pc); XE_WARN_ON(xe_guc_pc_stop(pc)); /* Bind requested freq to mert_freq_cap before unload */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 59b36c7998c243..80062e1d3f663d 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -276,10 +276,26 @@ static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) } #endif +static void xe_guc_submit_fini(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); + int ret; + + ret = wait_event_timeout(guc->submission_state.fini_wq, + xa_empty(&guc->submission_state.exec_queue_lookup), + HZ * 5); + + drain_workqueue(xe->destroy_wq); + + xe_gt_assert(gt, ret); +} + static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; + xe_guc_submit_fini(guc); xa_destroy(&guc->submission_state.exec_queue_lookup); free_submit_wq(guc); } @@ -290,9 +306,15 @@ static void guc_submit_wedged_fini(void *arg) struct xe_exec_queue *q; unsigned long index; - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) - if (exec_queue_wedged(q)) + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + if (exec_queue_wedged(q)) { + mutex_unlock(&guc->submission_state.lock); xe_exec_queue_put(q); + mutex_lock(&guc->submission_state.lock); + } + } + mutex_unlock(&guc->submission_state.lock); } static const struct xe_exec_queue_ops guc_exec_queue_ops; @@ -345,6 +367,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) xa_init(&guc->submission_state.exec_queue_lookup); + init_waitqueue_head(&guc->submission_state.fini_wq); + primelockdep(guc); return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); @@ -361,6 +385,9 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa xe_guc_id_mgr_release_locked(&guc->submission_state.idm, q->guc->id, q->width); + + if (xa_empty(&guc->submission_state.exec_queue_lookup)) + wake_up(&guc->submission_state.fini_wq); } static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) @@ -1071,7 +1098,9 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_exec_queue *q = job->q; struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_guc *guc = exec_queue_to_guc(q); + const char *process_name = "no process"; int err = -ETIME; + pid_t pid = -1; int i = 0; bool wedged, skip_timeout_check; @@ -1168,9 +1197,14 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) goto sched_enable; } - xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx", + if (q->vm && q->vm->xef) { + process_name = q->vm->xef->process_name; + pid = q->vm->xef->pid; + } + xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]", xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), - q->guc->id, q->flags); + q->guc->id, q->flags, process_name, pid); + trace_xe_sched_job_timedout(job); if (!exec_queue_killed(q)) @@ -1261,13 +1295,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) static void guc_exec_queue_fini_async(struct xe_exec_queue *q) { + struct xe_guc *guc = exec_queue_to_guc(q); + struct xe_device *xe = guc_to_xe(guc); + INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); /* We must block on kernel engines so slabs are empty on driver unload */ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) __guc_exec_queue_fini_async(&q->guc->fini_async); else - queue_work(system_wq, &q->guc->fini_async); + queue_work(xe->destroy_wq, &q->guc->fini_async); } static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) @@ -1312,6 +1349,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms kfree(msg); } +static void __suspend_fence_signal(struct xe_exec_queue *q) +{ + if (!q->guc->suspend_pending) + return; + + WRITE_ONCE(q->guc->suspend_pending, false); + wake_up(&q->guc->suspend_wait); +} + static void suspend_fence_signal(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); @@ -1321,9 +1367,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q) guc_read_stopped(guc)); xe_assert(xe, q->guc->suspend_pending); - q->guc->suspend_pending = false; - smp_wmb(); - wake_up(&q->guc->suspend_wait); + __suspend_fence_signal(q); } static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) @@ -1360,9 +1404,11 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg) struct xe_exec_queue *q = msg->private_data; if (guc_exec_queue_allowed_to_change_state(q)) { - q->guc->resume_time = RESUME_PENDING; clear_exec_queue_suspended(q); - enable_scheduling(q); + if (!exec_queue_enabled(q)) { + q->guc->resume_time = RESUME_PENDING; + enable_scheduling(q); + } } else { clear_exec_queue_suspended(q); } @@ -1372,6 +1418,8 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg) #define SET_SCHED_PROPS 2 #define SUSPEND 3 #define RESUME 4 +#define OPCODE_MASK 0xf +#define MSG_LOCKED BIT(8) static void guc_exec_queue_process_msg(struct xe_sched_msg *msg) { @@ -1416,7 +1464,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) struct xe_device *xe = guc_to_xe(guc); struct xe_guc_exec_queue *ge; long timeout; - int err; + int err, i; xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc))); @@ -1428,6 +1476,9 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ge->q = q; init_waitqueue_head(&ge->suspend_wait); + for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i) + INIT_LIST_HEAD(&ge->static_msgs[i].link); + timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, @@ -1480,6 +1531,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q) { trace_xe_exec_queue_kill(q); set_exec_queue_killed(q); + __suspend_fence_signal(q); xe_guc_exec_queue_trigger_cleanup(q); } @@ -1489,11 +1541,26 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); INIT_LIST_HEAD(&msg->link); - msg->opcode = opcode; + msg->opcode = opcode & OPCODE_MASK; msg->private_data = q; trace_xe_sched_msg_add(msg); - xe_sched_add_msg(&q->guc->sched, msg); + if (opcode & MSG_LOCKED) + xe_sched_add_msg_locked(&q->guc->sched, msg); + else + xe_sched_add_msg(&q->guc->sched, msg); +} + +static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, + struct xe_sched_msg *msg, + u32 opcode) +{ + if (!list_empty(&msg->link)) + return false; + + guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED); + + return true; } #define STATIC_MSG_CLEANUP 0 @@ -1567,34 +1634,59 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, static int guc_exec_queue_suspend(struct xe_exec_queue *q) { + struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; - if (exec_queue_killed_or_banned_or_wedged(q) || q->guc->suspend_pending) + if (exec_queue_killed_or_banned_or_wedged(q)) return -EINVAL; - q->guc->suspend_pending = true; - guc_exec_queue_add_msg(q, msg, SUSPEND); + xe_sched_msg_lock(sched); + if (guc_exec_queue_try_add_msg(q, msg, SUSPEND)) + q->guc->suspend_pending = true; + xe_sched_msg_unlock(sched); return 0; } -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q) +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); + int ret; + + /* + * Likely don't need to check exec_queue_killed() as we clear + * suspend_pending upon kill but to be paranoid but races in which + * suspend_pending is set after kill also check kill here. + */ + ret = wait_event_interruptible_timeout(q->guc->suspend_wait, + !READ_ONCE(q->guc->suspend_pending) || + exec_queue_killed(q) || + guc_read_stopped(guc), + HZ * 5); + + if (!ret) { + xe_gt_warn(guc_to_gt(guc), + "Suspend fence, guc_id=%d, failed to respond", + q->guc->id); + /* XXX: Trigger GT reset? */ + return -ETIME; + } - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending || - guc_read_stopped(guc)); + return ret < 0 ? ret : 0; } static void guc_exec_queue_resume(struct xe_exec_queue *q) { + struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); xe_assert(xe, !q->guc->suspend_pending); - guc_exec_queue_add_msg(q, msg, RESUME); + xe_sched_msg_lock(sched); + guc_exec_queue_try_add_msg(q, msg, RESUME); + xe_sched_msg_unlock(sched); } static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) @@ -1734,6 +1826,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q) } xe_sched_submission_start(sched); + xe_sched_submission_resume_tdr(sched); } int xe_guc_submit_start(struct xe_guc *guc) diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 546ac6350a31ff..69046f69827174 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -81,6 +81,8 @@ struct xe_guc { #endif /** @submission_state.enabled: submission is enabled */ bool enabled; + /** @submission_state.fini_wq: submit fini wait queue */ + wait_queue_head_t fini_wq; } submission_state; /** @hwconfig: Hardware config state */ struct { diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index 1c9d38b6f5f181..65b2e147c4b927 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe) { struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; - if (!HAS_HECI_GSCFI(xe)) + if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe)) return; if (heci_gsc->adev) { @@ -177,12 +177,14 @@ void xe_heci_gsc_init(struct xe_device *xe) const struct heci_gsc_def *def; int ret; - if (!HAS_HECI_GSCFI(xe)) + if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe)) return; heci_gsc->irq = -1; - if (xe->info.platform == XE_PVC) { + if (xe->info.platform == XE_BATTLEMAGE) { + def = &heci_gsc_def_dg2; + } else if (xe->info.platform == XE_PVC) { def = &heci_gsc_def_pvc; } else if (xe->info.platform == XE_DG2) { def = &heci_gsc_def_dg2; @@ -232,3 +234,23 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir) if (ret) drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); } + +void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir) +{ + int ret; + + if ((iir & CSC_IRQ_INTF(1)) == 0) + return; + + if (!HAS_HECI_CSCFI(xe)) { + drm_warn_once(&xe->drm, "CSC irq: not supported"); + return; + } + + if (xe->heci_gsc.irq < 0) + return; + + ret = generic_handle_irq(xe->heci_gsc.irq); + if (ret) + drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); +} diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h index 9db454478faeb3..48b3b18380453f 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.h +++ b/drivers/gpu/drm/xe/xe_heci_gsc.h @@ -11,10 +11,15 @@ struct xe_device; struct mei_aux_device; /* - * The HECI1 bit corresponds to bit15 and HECI2 to bit14. + * GSC HECI1 bit corresponds to bit15 and HECI2 to bit14. * The reason for this is to allow growth for more interfaces in the future. */ -#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) +#define GSC_IRQ_INTF(_x) BIT(15 - (_x)) + +/* + * CSC HECI1 bit corresponds to bit9 and HECI2 to bit10. + */ +#define CSC_IRQ_INTF(_x) BIT(9 + (_x)) /** * struct xe_heci_gsc - graphics security controller for xe, HECI interface @@ -31,5 +36,6 @@ struct xe_heci_gsc { void xe_heci_gsc_init(struct xe_device *xe); void xe_heci_gsc_fini(struct xe_device *xe); void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir); +void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir); #endif /* __XE_HECI_GSC_DEV_H__ */ diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index bec4366e55138d..f5459f97af23f7 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -43,14 +43,6 @@ huc_to_guc(struct xe_huc *huc) return &container_of(huc, struct xe_uc, huc)->guc; } -static void free_gsc_pkt(struct drm_device *drm, void *arg) -{ - struct xe_huc *huc = arg; - - xe_bo_unpin_map_no_vm(huc->gsc_pkt); - huc->gsc_pkt = NULL; -} - #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K static int huc_alloc_gsc_pkt(struct xe_huc *huc) { @@ -59,17 +51,16 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc) struct xe_bo *bo; /* we use a single object for both input and output */ - bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL, - PXP43_HUC_AUTH_INOUT_SIZE * 2, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT); + bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt), + PXP43_HUC_AUTH_INOUT_SIZE * 2, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT); if (IS_ERR(bo)) return PTR_ERR(bo); huc->gsc_pkt = bo; - return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc); + return 0; } int xe_huc_init(struct xe_huc *huc) diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 07ed9fd28f1956..c9c3beb3ce8d06 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -5,7 +5,10 @@ #include "xe_hw_engine.h" +#include + #include +#include #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" @@ -20,6 +23,7 @@ #include "xe_gt_printk.h" #include "xe_gt_mcr.h" #include "xe_gt_topology.h" +#include "xe_hw_engine_group.h" #include "xe_hw_fence.h" #include "xe_irq.h" #include "xe_lrc.h" @@ -263,19 +267,28 @@ static const struct engine_info engine_infos[] = { }, }; -static void hw_engine_fini(struct drm_device *drm, void *arg) +static void hw_engine_fini(void *arg) { struct xe_hw_engine *hwe = arg; if (hwe->exl_port) xe_execlist_port_destroy(hwe->exl_port); - xe_lrc_put(hwe->kernel_lrc); hwe->gt = NULL; } -static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, - u32 val) +/** + * xe_hw_engine_mmio_write32() - Write engine register + * @hwe: engine + * @reg: register to write into + * @val: desired 32-bit value to write + * + * This function will write val into an engine specific register. + * Forcewake must be held by the caller. + * + */ +void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, + struct xe_reg reg, u32 val) { xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base)); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); @@ -285,7 +298,17 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, xe_mmio_write32(hwe->gt, reg, val); } -static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) +/** + * xe_hw_engine_mmio_read32() - Read engine register + * @hwe: engine + * @reg: register to read from + * + * This function will read from an engine specific register. + * Forcewake must be held by the caller. + * + * Return: value of the 32-bit register. + */ +u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) { xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base)); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); @@ -304,14 +327,14 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_mmio_write32(hwe->gt, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); - hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); - hw_engine_mmio_write32(hwe, RING_HWS_PGA(0), - xe_bo_ggtt_addr(hwe->hwsp)); - hw_engine_mmio_write32(hwe, RING_MODE(0), - _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); - hw_engine_mmio_write32(hwe, RING_MI_MODE(0), - _MASKED_BIT_DISABLE(STOP_RING)); - hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); + xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); + xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0), + xe_bo_ggtt_addr(hwe->hwsp)); + xe_hw_engine_mmio_write32(hwe, RING_MODE(0), + _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); + xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0), + _MASKED_BIT_DISABLE(STOP_RING)); + xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); } static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt, @@ -425,6 +448,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) 0xA, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + /* Enable Priority Mem Read */ + { XE_RTP_NAME("Priority_Mem_Read"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ, + XE_RTP_ACTION_FLAG(ENGINE_BASE))) + }, {} }; @@ -528,21 +557,13 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, goto err_name; } - hwe->kernel_lrc = xe_lrc_create(hwe, NULL, SZ_16K); - if (IS_ERR(hwe->kernel_lrc)) { - err = PTR_ERR(hwe->kernel_lrc); - goto err_hwsp; - } - if (!xe_device_uc_enabled(xe)) { hwe->exl_port = xe_execlist_port_create(xe, hwe); if (IS_ERR(hwe->exl_port)) { err = PTR_ERR(hwe->exl_port); - goto err_kernel_lrc; + goto err_hwsp; } - } - - if (xe_device_uc_enabled(xe)) { + } else { /* GSCCS has a special interrupt for reset */ if (hwe->class == XE_ENGINE_CLASS_OTHER) hwe->irq_handler = xe_gsc_hwe_irq_handler; @@ -555,10 +576,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY) gt->usm.reserved_bcs_instance = hwe->instance; - return drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe); + return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe); -err_kernel_lrc: - xe_lrc_put(hwe->kernel_lrc); err_hwsp: xe_bo_unpin_map_no_vm(hwe->hwsp); err_name: @@ -761,6 +780,9 @@ int xe_hw_engines_init(struct xe_gt *gt) } hw_engine_setup_logical_mapping(gt); + err = xe_hw_engine_setup_groups(gt); + if (err) + return err; return 0; } @@ -791,7 +813,7 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, unsigned int dss; u16 group, instance; - snapshot->reg.instdone.ring = hw_engine_mmio_read32(hwe, RING_INSTDONE(0)); + snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0)); if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) return; @@ -887,53 +909,53 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) return snapshot; snapshot->reg.ring_execlist_status = - hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); - val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); + xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); snapshot->reg.ring_execlist_status |= val << 32; snapshot->reg.ring_execlist_sq_contents = - hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); - val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); + xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); snapshot->reg.ring_execlist_sq_contents |= val << 32; - snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0)); - val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); + snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); snapshot->reg.ring_acthd |= val << 32; - snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0)); - val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); + snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); snapshot->reg.ring_bbaddr |= val << 32; snapshot->reg.ring_dma_fadd = - hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); - val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); + xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); snapshot->reg.ring_dma_fadd |= val << 32; - snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); - snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); - snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0)); + snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); + snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); + snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0)); if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) { - val = hw_engine_mmio_read32(hwe, RING_START_UDW(0)); + val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0)); snapshot->reg.ring_start |= val << 32; } if (xe_gt_has_indirect_ring_state(hwe->gt)) { snapshot->reg.indirect_ring_state = - hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0)); + xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0)); } snapshot->reg.ring_head = - hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; + xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; snapshot->reg.ring_tail = - hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR; - snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0)); + xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR; + snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0)); snapshot->reg.ring_mi_mode = - hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); - snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0)); - snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0)); - snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0)); - snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0)); - snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0)); - snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0)); + xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); + snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0)); + snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0)); + snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0)); + snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0)); + snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0)); + snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0)); xe_hw_engine_snapshot_instdone_capture(hwe, snapshot); if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) @@ -1135,3 +1157,41 @@ enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe) { return engine_infos[hwe->engine_id].domain; } + +static const enum xe_engine_class user_to_xe_engine_class[] = { + [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, + [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, + [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, + [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, + [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, +}; + +/** + * xe_hw_engine_lookup() - Lookup hardware engine for class:instance + * @xe: xe device + * @eci: engine class and instance + * + * This function will find a hardware engine for given engine + * class and instance. + * + * Return: If found xe_hw_engine pointer, NULL otherwise. + */ +struct xe_hw_engine * +xe_hw_engine_lookup(struct xe_device *xe, + struct drm_xe_engine_class_instance eci) +{ + unsigned int idx; + + if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) + return NULL; + + if (eci.gt_id >= xe->info.gt_count) + return NULL; + + idx = array_index_nospec(eci.engine_class, + ARRAY_SIZE(user_to_xe_engine_class)); + + return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), + user_to_xe_engine_class[idx], + eci.engine_instance, true); +} diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h index 900c8c99143031..022819a4a8ebb1 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.h +++ b/drivers/gpu/drm/xe/xe_hw_engine.h @@ -9,6 +9,8 @@ #include "xe_hw_engine_types.h" struct drm_printer; +struct drm_xe_engine_class_instance; +struct xe_device; #ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN #define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN @@ -62,6 +64,11 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p); void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe); bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe); + +struct xe_hw_engine * +xe_hw_engine_lookup(struct xe_device *xe, + struct drm_xe_engine_class_instance eci); + static inline bool xe_hw_engine_is_valid(struct xe_hw_engine *hwe) { return hwe->name; @@ -71,4 +78,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class); u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe); enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe); +void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, u32 val); +u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg); + #endif diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c new file mode 100644 index 00000000000000..82750520a90a59 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include + +#include "xe_assert.h" +#include "xe_device.h" +#include "xe_exec_queue.h" +#include "xe_gt.h" +#include "xe_hw_engine_group.h" +#include "xe_vm.h" + +static void +hw_engine_group_free(struct drm_device *drm, void *arg) +{ + struct xe_hw_engine_group *group = arg; + + destroy_workqueue(group->resume_wq); + kfree(group); +} + +static void +hw_engine_group_resume_lr_jobs_func(struct work_struct *w) +{ + struct xe_exec_queue *q; + struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work); + int err; + enum xe_hw_engine_group_execution_mode previous_mode; + + err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode); + if (err) + return; + + if (previous_mode == EXEC_MODE_LR) + goto put; + + list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { + if (!xe_vm_in_fault_mode(q->vm)) + continue; + + q->ops->resume(q); + } + +put: + xe_hw_engine_group_put(group); +} + +static struct xe_hw_engine_group * +hw_engine_group_alloc(struct xe_device *xe) +{ + struct xe_hw_engine_group *group; + int err; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0); + if (!group->resume_wq) + return ERR_PTR(-ENOMEM); + + init_rwsem(&group->mode_sem); + INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func); + INIT_LIST_HEAD(&group->exec_queue_list); + + err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group); + if (err) + return ERR_PTR(err); + + return group; +} + +/** + * xe_hw_engine_setup_groups() - Setup the hw engine groups for the gt + * @gt: The gt for which groups are setup + * + * Return: 0 on success, negative error code on error. + */ +int xe_hw_engine_setup_groups(struct xe_gt *gt) +{ + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs; + struct xe_device *xe = gt_to_xe(gt); + int err; + + group_rcs_ccs = hw_engine_group_alloc(xe); + if (IS_ERR(group_rcs_ccs)) { + err = PTR_ERR(group_rcs_ccs); + goto err_group_rcs_ccs; + } + + group_bcs = hw_engine_group_alloc(xe); + if (IS_ERR(group_bcs)) { + err = PTR_ERR(group_bcs); + goto err_group_bcs; + } + + group_vcs_vecs = hw_engine_group_alloc(xe); + if (IS_ERR(group_vcs_vecs)) { + err = PTR_ERR(group_vcs_vecs); + goto err_group_vcs_vecs; + } + + for_each_hw_engine(hwe, gt, id) { + switch (hwe->class) { + case XE_ENGINE_CLASS_COPY: + hwe->hw_engine_group = group_bcs; + break; + case XE_ENGINE_CLASS_RENDER: + case XE_ENGINE_CLASS_COMPUTE: + hwe->hw_engine_group = group_rcs_ccs; + break; + case XE_ENGINE_CLASS_VIDEO_DECODE: + case XE_ENGINE_CLASS_VIDEO_ENHANCE: + hwe->hw_engine_group = group_vcs_vecs; + break; + case XE_ENGINE_CLASS_OTHER: + break; + default: + drm_warn(&xe->drm, "NOT POSSIBLE"); + } + } + + return 0; + +err_group_vcs_vecs: + kfree(group_vcs_vecs); +err_group_bcs: + kfree(group_bcs); +err_group_rcs_ccs: + kfree(group_rcs_ccs); + + return err; +} + +/** + * xe_hw_engine_group_add_exec_queue() - Add an exec queue to a hw engine group + * @group: The hw engine group + * @q: The exec_queue + * + * Return: 0 on success, + * -EINTR if the lock could not be acquired + */ +int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q) +{ + int err; + struct xe_device *xe = gt_to_xe(q->gt); + + xe_assert(xe, group); + xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM)); + xe_assert(xe, q->vm); + + if (xe_vm_in_preempt_fence_mode(q->vm)) + return 0; + + err = down_write_killable(&group->mode_sem); + if (err) + return err; + + if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) { + q->ops->suspend(q); + err = q->ops->suspend_wait(q); + if (err) + goto err_suspend; + + xe_hw_engine_group_resume_faulting_lr_jobs(group); + } + + list_add(&q->hw_engine_group_link, &group->exec_queue_list); + up_write(&group->mode_sem); + + return 0; + +err_suspend: + up_write(&group->mode_sem); + return err; +} + +/** + * xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group + * @group: The hw engine group + * @q: The exec_queue + */ +void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q) +{ + struct xe_device *xe = gt_to_xe(q->gt); + + xe_assert(xe, group); + xe_assert(xe, q->vm); + + down_write(&group->mode_sem); + + if (!list_empty(&q->hw_engine_group_link)) + list_del(&q->hw_engine_group_link); + + up_write(&group->mode_sem); +} + +/** + * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's + * faulting LR jobs + * @group: The hw engine group + */ +void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group) +{ + queue_work(group->resume_wq, &group->resume_work); +} + +/** + * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group + * @group: The hw engine group + * + * Return: 0 on success, negative error code on error. + */ +static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group) +{ + int err; + struct xe_exec_queue *q; + bool need_resume = false; + + lockdep_assert_held_write(&group->mode_sem); + + list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { + if (!xe_vm_in_fault_mode(q->vm)) + continue; + + need_resume = true; + q->ops->suspend(q); + } + + list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { + if (!xe_vm_in_fault_mode(q->vm)) + continue; + + err = q->ops->suspend_wait(q); + if (err) + goto err_suspend; + } + + if (need_resume) + xe_hw_engine_group_resume_faulting_lr_jobs(group); + + return 0; + +err_suspend: + up_write(&group->mode_sem); + return err; +} + +/** + * xe_hw_engine_group_wait_for_dma_fence_jobs() - Wait for dma fence jobs to complete + * @group: The hw engine group + * + * This function is not meant to be called directly from a user IOCTL as dma_fence_wait() + * is not interruptible. + * + * Return: 0 on success, + * -ETIME if waiting for one job failed + */ +static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group) +{ + long timeout; + struct xe_exec_queue *q; + struct dma_fence *fence; + + lockdep_assert_held_write(&group->mode_sem); + + list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { + if (xe_vm_in_lr_mode(q->vm)) + continue; + + fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm); + timeout = dma_fence_wait(fence, false); + dma_fence_put(fence); + + if (timeout < 0) + return -ETIME; + } + + return 0; +} + +static int switch_mode(struct xe_hw_engine_group *group) +{ + int err = 0; + enum xe_hw_engine_group_execution_mode new_mode; + + lockdep_assert_held_write(&group->mode_sem); + + switch (group->cur_mode) { + case EXEC_MODE_LR: + new_mode = EXEC_MODE_DMA_FENCE; + err = xe_hw_engine_group_suspend_faulting_lr_jobs(group); + break; + case EXEC_MODE_DMA_FENCE: + new_mode = EXEC_MODE_LR; + err = xe_hw_engine_group_wait_for_dma_fence_jobs(group); + break; + } + + if (err) + return err; + + group->cur_mode = new_mode; + + return 0; +} + +/** + * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode + * @group: The hw engine group + * @new_mode: The new execution mode + * @previous_mode: Pointer to the previous mode provided for use by caller + * + * Return: 0 if successful, -EINTR if locking failed. + */ +int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group, + enum xe_hw_engine_group_execution_mode new_mode, + enum xe_hw_engine_group_execution_mode *previous_mode) +__acquires(&group->mode_sem) +{ + int err = down_read_interruptible(&group->mode_sem); + + if (err) + return err; + + *previous_mode = group->cur_mode; + + if (new_mode != group->cur_mode) { + up_read(&group->mode_sem); + err = down_write_killable(&group->mode_sem); + if (err) + return err; + + if (new_mode != group->cur_mode) { + err = switch_mode(group); + if (err) { + up_write(&group->mode_sem); + return err; + } + } + downgrade_write(&group->mode_sem); + } + + return err; +} + +/** + * xe_hw_engine_group_put() - Put the group + * @group: The hw engine group + */ +void xe_hw_engine_group_put(struct xe_hw_engine_group *group) +__releases(&group->mode_sem) +{ + up_read(&group->mode_sem); +} + +/** + * xe_hw_engine_group_find_exec_mode() - Find the execution mode for this exec queue + * @q: The exec_queue + */ +enum xe_hw_engine_group_execution_mode +xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q) +{ + if (xe_vm_in_fault_mode(q->vm)) + return EXEC_MODE_LR; + else + return EXEC_MODE_DMA_FENCE; +} diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.h b/drivers/gpu/drm/xe/xe_hw_engine_group.h new file mode 100644 index 00000000000000..797ee81acbf259 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_HW_ENGINE_GROUP_H_ +#define _XE_HW_ENGINE_GROUP_H_ + +#include "xe_hw_engine_group_types.h" + +struct drm_device; +struct xe_exec_queue; +struct xe_gt; + +int xe_hw_engine_setup_groups(struct xe_gt *gt); + +int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q); +void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q); + +int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group, + enum xe_hw_engine_group_execution_mode new_mode, + enum xe_hw_engine_group_execution_mode *previous_mode); +void xe_hw_engine_group_put(struct xe_hw_engine_group *group); + +enum xe_hw_engine_group_execution_mode +xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q); +void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group); + +#endif diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group_types.h b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h new file mode 100644 index 00000000000000..92b6e0712c036e --- /dev/null +++ b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_HW_ENGINE_GROUP_TYPES_H_ +#define _XE_HW_ENGINE_GROUP_TYPES_H_ + +#include "xe_force_wake_types.h" +#include "xe_lrc_types.h" +#include "xe_reg_sr_types.h" + +/** + * enum xe_hw_engine_group_execution_mode - possible execution modes of a hw + * engine group + * + * @EXEC_MODE_LR: execution in long-running mode + * @EXEC_MODE_DMA_FENCE: execution in dma fence mode + */ +enum xe_hw_engine_group_execution_mode { + EXEC_MODE_LR, + EXEC_MODE_DMA_FENCE, +}; + +/** + * struct xe_hw_engine_group - Hardware engine group + * + * hw engines belong to the same group if they share hardware resources in a way + * that prevents them from making progress when one is stuck on a page fault. + */ +struct xe_hw_engine_group { + /** + * @exec_queue_list: list of exec queues attached to this + * xe_hw_engine_group + */ + struct list_head exec_queue_list; + /** @resume_work: worker to resume faulting LR exec queues */ + struct work_struct resume_work; + /** @resume_wq: workqueue to resume faulting LR exec queues */ + struct workqueue_struct *resume_wq; + /** + * @mode_sem: used to protect this group's hardware resources and ensure + * mutual exclusion between execution only in faulting LR mode and + * execution only in DMA_FENCE mode + */ + struct rw_semaphore mode_sem; + /** @cur_mode: current execution mode of this hw engine group */ + enum xe_hw_engine_group_execution_mode cur_mode; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h index 70e6434f150d64..8be6d420ece40b 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h @@ -136,8 +136,6 @@ struct xe_hw_engine { enum xe_force_wake_domains domain; /** @hwsp: hardware status page buffer object */ struct xe_bo *hwsp; - /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */ - struct xe_lrc *kernel_lrc; /** @exl_port: execlists port */ struct xe_execlist_port *exl_port; /** @fence_irq: fence IRQ to run when a hw engine IRQ is received */ @@ -150,6 +148,8 @@ struct xe_hw_engine { struct xe_hw_engine_class_intf *eclass; /** @oa_unit: oa unit for this hw engine */ struct xe_oa_unit *oa_unit; + /** @hw_engine_group: the group of hw engines this one belongs to */ + struct xe_hw_engine_group *hw_engine_group; }; /** diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 98e3ec08279ebc..aa11728e7e796c 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -12,7 +12,6 @@ #include "regs/xe_mchbar_regs.h" #include "regs/xe_pcode_regs.h" #include "xe_device.h" -#include "xe_gt.h" #include "xe_hwmon.h" #include "xe_mmio.h" #include "xe_pcode.h" @@ -65,8 +64,8 @@ struct xe_hwmon_energy_info { struct xe_hwmon { /** @hwmon_dev: hwmon device for xe */ struct device *hwmon_dev; - /** @gt: primary gt */ - struct xe_gt *gt; + /** @xe: Xe device */ + struct xe_device *xe; /** @hwmon_lock: lock for rw attributes*/ struct mutex hwmon_lock; /** @scl_shift_power: pkg power unit */ @@ -82,7 +81,7 @@ struct xe_hwmon { static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, int channel) { - struct xe_device *xe = gt_to_xe(hwmon->gt); + struct xe_device *xe = hwmon->xe; switch (hwmon_reg) { case REG_PKG_RAPL_LIMIT: @@ -148,8 +147,9 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value) { u64 reg_val, min, max; - struct xe_device *xe = gt_to_xe(hwmon->gt); + struct xe_device *xe = hwmon->xe; struct xe_reg rapl_limit, pkg_power_sku; + struct xe_gt *mmio = xe_root_mmio_gt(xe); rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); @@ -166,7 +166,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v mutex_lock(&hwmon->hwmon_lock); - reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); + reg_val = xe_mmio_read32(mmio, rapl_limit); /* Check if PL1 limit is disabled */ if (!(reg_val & PKG_PWR_LIM_1_EN)) { *value = PL1_DISABLE; @@ -176,7 +176,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); - reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku); + reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku); min = REG_FIELD_GET(PKG_MIN_PWR, reg_val); min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); max = REG_FIELD_GET(PKG_MAX_PWR, reg_val); @@ -190,6 +190,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value) { + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); int ret = 0; u64 reg_val; struct xe_reg rapl_limit; @@ -200,10 +201,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */ if (value == PL1_DISABLE) { - reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0); - reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); + reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0); + reg_val = xe_mmio_read32(mmio, rapl_limit); if (reg_val & PKG_PWR_LIM_1_EN) { - drm_warn(>_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n"); + drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n"); ret = -EOPNOTSUPP; } goto unlock; @@ -212,7 +213,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va /* Computation in 64-bits to avoid overflow. Round to nearest. */ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val); - reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val); + reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val); unlock: mutex_unlock(&hwmon->hwmon_lock); @@ -221,6 +222,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value) { + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); u64 reg_val; @@ -229,7 +231,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l * for this register can be skipped. * See xe_hwmon_power_is_visible. */ - reg_val = xe_mmio_read32(hwmon->gt, reg); + reg_val = xe_mmio_read32(mmio, reg); reg_val = REG_FIELD_GET(PKG_TDP, reg_val); *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); } @@ -257,11 +259,12 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l static void xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) { + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; u64 reg_val; - reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, - channel)); + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, + channel)); if (reg_val >= ei->reg_val_prev) ei->accum_energy += reg_val - ei->reg_val_prev; @@ -279,19 +282,20 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at char *buf) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); u32 x, y, x_w = 2; /* 2 bits */ u64 r, tau4, out; int sensor_index = to_sensor_dev_attr(attr)->index; - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); mutex_lock(&hwmon->hwmon_lock); - r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index)); + r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index)); mutex_unlock(&hwmon->hwmon_lock); - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r); y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r); @@ -319,6 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a const char *buf, size_t count) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); u32 x, y, rxy, x_w = 2; /* 2 bits */ u64 tau4, r, max_win; unsigned long val; @@ -371,16 +376,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y); - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); mutex_lock(&hwmon->hwmon_lock); - r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index), + r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index), PKG_PWR_LIM_1_TIME, rxy); mutex_unlock(&hwmon->hwmon_lock); - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); return count; } @@ -406,11 +411,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, struct xe_hwmon *hwmon = dev_get_drvdata(dev); int ret = 0; - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0; - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); return ret; } @@ -435,20 +440,24 @@ static const struct hwmon_channel_info * const hwmon_info[] = { }; /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */ -static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval) +static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval) { + struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); + /* Avoid Illegal Subcommand error */ - if (gt_to_xe(gt)->info.platform == XE_DG2) + if (hwmon->xe->info.platform == XE_DG2) return -ENXIO; - return xe_pcode_read(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP, + return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, POWER_SETUP_SUBCOMMAND_READ_I1, 0), uval, NULL); } -static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval) +static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) { - return xe_pcode_write(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP, + struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); + + return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, POWER_SETUP_SUBCOMMAND_WRITE_I1, 0), (uval & POWER_SETUP_I1_DATA_MASK)); } @@ -461,7 +470,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, mutex_lock(&hwmon->hwmon_lock); - ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval); + ret = xe_hwmon_pcode_read_i1(hwmon, &uval); if (ret) goto unlock; @@ -481,7 +490,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, mutex_lock(&hwmon->hwmon_lock); uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); - ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval); + ret = xe_hwmon_pcode_write_i1(hwmon, uval); mutex_unlock(&hwmon->hwmon_lock); return ret; @@ -489,9 +498,10 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) { + struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); u64 reg_val; - reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); /* HW register value in units of 2.5 millivolt */ *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE); } @@ -510,7 +520,7 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) channel)) ? 0444 : 0; case hwmon_power_crit: if (channel == CHANNEL_PKG) - return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) || + return (xe_hwmon_pcode_read_i1(hwmon, &uval) || !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; break; case hwmon_power_label: @@ -563,10 +573,10 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel) switch (attr) { case hwmon_curr_crit: - return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) || + return (xe_hwmon_pcode_read_i1(hwmon, &uval) || (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; case hwmon_curr_label: - return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) || + return (xe_hwmon_pcode_read_i1(hwmon, &uval) || (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444; break; default: @@ -654,7 +664,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata; int ret; - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); switch (type) { case hwmon_power: @@ -674,7 +684,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, break; } - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); return ret; } @@ -686,7 +696,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, struct xe_hwmon *hwmon = dev_get_drvdata(dev); int ret; - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); switch (type) { case hwmon_power: @@ -706,7 +716,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, break; } - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); return ret; } @@ -718,7 +728,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, struct xe_hwmon *hwmon = dev_get_drvdata(dev); int ret; - xe_pm_runtime_get(gt_to_xe(hwmon->gt)); + xe_pm_runtime_get(hwmon->xe); switch (type) { case hwmon_power: @@ -732,7 +742,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, break; } - xe_pm_runtime_put(gt_to_xe(hwmon->gt)); + xe_pm_runtime_put(hwmon->xe); return ret; } @@ -771,6 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = { static void xe_hwmon_get_preregistration_info(struct xe_device *xe) { + struct xe_gt *mmio = xe_root_mmio_gt(xe); struct xe_hwmon *hwmon = xe->hwmon; long energy; u64 val_sku_unit = 0; @@ -783,7 +794,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe) */ pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0); if (xe_reg_is_valid(pkg_power_sku_unit)) { - val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit); + val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit); hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit); hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit); hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit); @@ -828,8 +839,8 @@ void xe_hwmon_register(struct xe_device *xe) if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon)) return; - /* primary GT to access device level properties */ - hwmon->gt = xe->tiles[0].primary_gt; + /* There's only one instance of hwmon per device */ + hwmon->xe = xe; xe_hwmon_get_preregistration_info(xe); diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 85733f993d090c..5f2c368c35adb1 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -459,6 +459,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) * the primary tile. */ if (id == 0) { + if (HAS_HECI_CSCFI(xe)) + xe_heci_csc_irq_handler(xe, master_ctl); xe_display_irq_handler(xe, master_ctl); gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); } diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 418661a8891839..8999ac511555f2 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -7,7 +7,7 @@ #include -#include "regs/xe_sriov_regs.h" +#include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_bo.h" @@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level lmtt->ops->lmtt_pte_num(level)), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | - XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED); + XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto out_free_pt; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 58121821f0814a..aec7db39c061e6 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -5,6 +5,8 @@ #include "xe_lrc.h" +#include + #include #include "instructions/xe_mi_commands.h" @@ -24,6 +26,7 @@ #include "xe_memirq.h" #include "xe_sriov.h" #include "xe_vm.h" +#include "xe_wa.h" #define LRC_VALID BIT_ULL(0) #define LRC_PRIVILEGE BIT_ULL(8) @@ -1581,19 +1584,31 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b int state_table_size = 0; /* - * At the moment we only need to emit non-register state for the RCS - * engine. + * Wa_14019789679 + * + * If the driver doesn't explicitly emit the SVG instructions while + * setting up the default LRC, the context switch will write 0's + * (noops) into the LRC memory rather than the expected instruction + * headers. Application contexts start out as a copy of the default + * LRC, and if they also do not emit specific settings for some SVG + * state, then on context restore they'll unintentionally inherit + * whatever state setting the previous context had programmed into the + * hardware (i.e., the lack of a 3DSTATE_* instruction in the LRC will + * prevent the hardware from resetting that state back to any specific + * value). + * + * The official workaround only requires emitting 3DSTATE_MESH_CONTROL + * since that's a specific state setting that can easily cause GPU + * hangs if unintentionally inherited. However to be safe we'll + * continue to emit all of the SVG state since it's best not to leak + * any of the state between contexts, even if that leakage is harmless. */ - if (q->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - switch (GRAPHICS_VERx100(xe)) { - case 1255: - case 1270 ... 2004: + if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) { state_table = xe_hpg_svg_state; state_table_size = ARRAY_SIZE(xe_hpg_svg_state); - break; - default: + } + + if (!state_table) { xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n", GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100); return; @@ -1634,7 +1649,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) if (!snapshot) return NULL; - if (lrc->bo && lrc->bo->vm) + if (lrc->bo->vm) xe_vm_get(lrc->bo->vm); snapshot->context_desc = xe_lrc_ggtt_addr(lrc); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index c9f5673353ee3e..cfd31ae49cc1f7 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include @@ -73,6 +73,7 @@ struct xe_migrate { #define NUM_PT_SLOTS 32 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M #define MAX_NUM_PTE 512 +#define IDENTITY_OFFSET 256ULL /* * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest @@ -84,15 +85,14 @@ struct xe_migrate { #define MAX_PTE_PER_SDI 0x1FE /** - * xe_tile_migrate_engine() - Get this tile's migrate engine. + * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue. * @tile: The tile. * - * Returns the default migrate engine of this tile. - * TODO: Perhaps this function is slightly misplaced, and even unneeded? + * Returns the default migrate exec queue of this tile. * - * Return: The default migrate engine + * Return: The default migrate exec queue */ -struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile) +struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile) { return tile->migrate->q; } @@ -121,14 +121,64 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level) return (slot + 1ULL) << xe_pt_shift(level + 1); } -static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr) +static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) { /* * Remove the DPA to get a correct offset into identity table for the * migrate offset */ + u64 identity_offset = IDENTITY_OFFSET; + + if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) + identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); + addr -= xe->mem.vram.dpa_base; - return addr + (256ULL << xe_pt_shift(2)); + return addr + (identity_offset << xe_pt_shift(2)); +} + +static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, + u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs) +{ + u64 pos, ofs, flags; + u64 entry; + /* XXX: Unclear if this should be usable_size? */ + u64 vram_limit = xe->mem.vram.actual_physical_size + + xe->mem.vram.dpa_base; + u32 level = 2; + + ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8; + flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, + true, 0); + + xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M)); + + /* + * Use 1GB pages when possible, last chunk always use 2M + * pages as mixing reserved memory (stolen, WOCPM) with a single + * mapping is not allowed on certain platforms. + */ + for (pos = xe->mem.vram.dpa_base; pos < vram_limit; + pos += SZ_1G, ofs += 8) { + if (pos + SZ_1G >= vram_limit) { + entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs, + pat_index); + xe_map_wr(xe, &bo->vmap, ofs, u64, entry); + + flags = vm->pt_ops->pte_encode_addr(xe, 0, + pat_index, + level - 1, + true, 0); + + for (ofs = pt_2m_ofs; pos < vram_limit; + pos += SZ_2M, ofs += 8) + xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); + break; /* Ensure pos == vram_limit assert correct */ + } + + xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); + } + + xe_assert(xe, pos == vram_limit); } static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, @@ -137,11 +187,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_device *xe = tile_to_xe(tile); u16 pat_index = xe->pat.idx[XE_CACHE_WB]; u8 id = tile->id; - u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level, - num_setup = num_level + 1; + u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; +#define VRAM_IDENTITY_MAP_COUNT 2 + u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT; +#undef VRAM_IDENTITY_MAP_COUNT u32 map_ofs, level, i; struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo; - u64 entry, pt30_ofs; + u64 entry, pt29_ofs; /* Can't bump NUM_PT_SLOTS too high */ BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE); @@ -161,9 +213,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (IS_ERR(bo)) return PTR_ERR(bo); - /* PT31 reserved for 2M identity map */ - pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; - entry = vm->pt_ops->pde_encode_bo(bo, pt30_ofs, pat_index); + /* PT30 & PT31 reserved for 2M identity map */ + pt29_ofs = bo->size - 3 * XE_PAGE_SIZE; + entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index); xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE; @@ -215,12 +267,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); - m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr); + m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); if (xe->info.has_usm) { batch = tile->primary_gt->usm.bb_pool->bo; batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); - m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr); + m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); } } @@ -254,55 +306,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Identity map the entire vram at 256GiB offset */ if (IS_DGFX(xe)) { - u64 pos, ofs, flags; - /* XXX: Unclear if this should be usable_size? */ - u64 vram_limit = xe->mem.vram.actual_physical_size + - xe->mem.vram.dpa_base; - - level = 2; - ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; - flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, - true, 0); + u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; - xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M)); + xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, + pat_index, pt30_ofs); + xe_assert(xe, xe->mem.vram.actual_physical_size <= + (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G); /* - * Use 1GB pages when possible, last chunk always use 2M - * pages as mixing reserved memory (stolen, WOCPM) with a single - * mapping is not allowed on certain platforms. + * Identity map the entire vram for compressed pat_index for xe2+ + * if flat ccs is enabled. */ - for (pos = xe->mem.vram.dpa_base; pos < vram_limit; - pos += SZ_1G, ofs += 8) { - if (pos + SZ_1G >= vram_limit) { - u64 pt31_ofs = bo->size - XE_PAGE_SIZE; - - entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs, - pat_index); - xe_map_wr(xe, &bo->vmap, ofs, u64, entry); - - flags = vm->pt_ops->pte_encode_addr(xe, 0, - pat_index, - level - 1, - true, 0); - - for (ofs = pt31_ofs; pos < vram_limit; - pos += SZ_2M, ofs += 8) - xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); - break; /* Ensure pos == vram_limit assert correct */ - } - - xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); + if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) { + u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; + u64 vram_offset = IDENTITY_OFFSET + + DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); + u64 pt31_ofs = bo->size - XE_PAGE_SIZE; + + xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE - + IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G); + xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset, + comp_pat_index, pt31_ofs); } - - xe_assert(xe, pos == vram_limit); } /* * Example layout created above, with root level = 3: * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's - * [PT9...PT27]: Userspace PT's for VM_BIND, 4 KiB PTE's - * [PT28 = PDE 0] [PT29 = PDE 1] [PT30 = PDE 2] [PT31 = 2M vram identity map] + * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's + * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map] * * This makes the lowest part of the VM point to the pagetables. * Hence the lowest 2M in the vm should point to itself, with a few writes @@ -348,6 +381,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) return logical_mask; } +static bool xe_migrate_needs_ccs_emit(struct xe_device *xe) +{ + return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)); +} + /** * xe_migrate_init() - Initialize a migrate context * @tile: Back-pointer to the tile we're initializing for. @@ -404,7 +442,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) m->q = xe_exec_queue_create_class(xe, primary_gt, vm, XE_ENGINE_CLASS_COPY, EXEC_QUEUE_FLAG_KERNEL | - EXEC_QUEUE_FLAG_PERMANENT); + EXEC_QUEUE_FLAG_PERMANENT, 0); } if (IS_ERR(m->q)) { xe_vm_close_and_put(vm); @@ -421,7 +459,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) return ERR_PTR(err); if (IS_DGFX(xe)) { - if (xe_device_has_flat_ccs(xe)) + if (xe_migrate_needs_ccs_emit(xe)) /* min chunk size corresponds to 4K of CCS Metadata */ m->min_chunk_size = SZ_4K * SZ_64K / xe_device_ccs_bytes(xe, SZ_64K); @@ -475,20 +513,26 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur) return cur->size >= size; } +#define PTE_UPDATE_FLAG_IS_VRAM BIT(0) +#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1) + static u32 pte_update_size(struct xe_migrate *m, - bool is_vram, + u32 flags, struct ttm_resource *res, struct xe_res_cursor *cur, u64 *L0, u64 *L0_ofs, u32 *L0_pt, u32 cmd_size, u32 pt_ofs, u32 avail_pts) { u32 cmds = 0; + bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags; + bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags; *L0_pt = pt_ofs; if (is_vram && xe_migrate_allow_identity(*L0, cur)) { /* Offset into identity map. */ *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), - cur->start + vram_region_gpu_offset(res)); + cur->start + vram_region_gpu_offset(res), + is_comp_pte); cmds += cmd_size; } else { /* Clip L0 to available size */ @@ -661,7 +705,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, struct xe_gt *gt = m->tile->primary_gt; u32 flush_flags = 0; - if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) { + if (!copy_ccs && dst_is_indirect) { /* * If the src is already in vram, then it should already * have been cleared by us, or has been populated by the @@ -737,6 +781,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); + bool use_comp_pat = xe_device_has_flat_ccs(xe) && + GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; /* Copying CCS between two different BOs is not supported yet. */ if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) @@ -763,10 +809,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ struct xe_sched_job *job; struct xe_bb *bb; - u32 flush_flags; + u32 flush_flags = 0; u32 update_idx; u64 ccs_ofs, ccs_size; u32 ccs_pt; + u32 pte_flags; bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; @@ -779,17 +826,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, src_L0 = min(src_L0, dst_L0); - batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0, + pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; + batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, &src_L0_ofs, &src_L0_pt, 0, 0, avail_pts); - batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0, + pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0, &dst_L0_ofs, &dst_L0_pt, 0, avail_pts, avail_pts); if (copy_system_ccs) { ccs_size = xe_device_ccs_bytes(xe, src_L0); - batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size, + batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, &ccs_pt, 0, 2 * avail_pts, avail_pts); @@ -798,7 +848,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, /* Add copy commands size here */ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + - ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0)); + ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0)); bb = xe_bb_new(gt, batch_size, usm); if (IS_ERR(bb)) { @@ -827,11 +877,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (!copy_only_ccs) emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); - flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, - IS_DGFX(xe) ? src_is_vram : src_is_pltt, - dst_L0_ofs, - IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, - src_L0, ccs_ofs, copy_ccs); + if (xe_migrate_needs_ccs_emit(xe)) + flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, + IS_DGFX(xe) ? src_is_vram : src_is_pltt, + dst_L0_ofs, + IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, + src_L0, ccs_ofs, copy_ccs); job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm), @@ -986,9 +1037,11 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, * @m: The migration context. * @bo: The buffer object @dst is currently bound to. * @dst: The dst TTM resource to be cleared. + * @clear_flags: flags to specify which data to clear: CCS, BO, or both. * - * Clear the contents of @dst to zero. On flat CCS devices, - * the CCS metadata is cleared to zero as well on VRAM destinations. + * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set. + * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA. + * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata. * TODO: Eliminate the @bo argument. * * Return: Pointer to a dma_fence representing the last clear batch, or @@ -997,18 +1050,27 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, */ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, struct xe_bo *bo, - struct ttm_resource *dst) + struct ttm_resource *dst, + u32 clear_flags) { bool clear_vram = mem_type_is_vram(dst->mem_type); + bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags; + bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags; struct xe_gt *gt = m->tile->primary_gt; struct xe_device *xe = gt_to_xe(gt); - bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false; + bool clear_only_system_ccs = false; struct dma_fence *fence = NULL; u64 size = bo->size; struct xe_res_cursor src_it; struct ttm_resource *src = dst; int err; + if (WARN_ON(!clear_bo_data && !clear_ccs)) + return NULL; + + if (!clear_bo_data && clear_ccs && !IS_DGFX(xe)) + clear_only_system_ccs = true; + if (!clear_vram) xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it); else @@ -1022,6 +1084,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, struct xe_sched_job *job; struct xe_bb *bb; u32 batch_size, update_idx; + u32 pte_flags; bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; @@ -1029,13 +1092,14 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, clear_L0 = xe_migrate_res_sizes(m, &src_it); /* Calculate final sizes and batch size.. */ + pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; batch_size = 2 + - pte_update_size(m, clear_vram, src, &src_it, + pte_update_size(m, pte_flags, src, &src_it, &clear_L0, &clear_L0_ofs, &clear_L0_pt, - clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0, + clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0, avail_pts); - if (xe_device_has_flat_ccs(xe)) + if (xe_migrate_needs_ccs_emit(xe)) batch_size += EMIT_COPY_CCS_DW; /* Clear commands */ @@ -1054,16 +1118,16 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) xe_res_next(&src_it, clear_L0); else - emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs, + emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs, &src_it, clear_L0, dst); bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; - if (!clear_system_ccs) + if (clear_bo_data) emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); - if (xe_device_has_flat_ccs(xe)) { + if (xe_migrate_needs_ccs_emit(xe)) { emit_copy_ccs(gt, bb, clear_L0_ofs, true, m->cleared_mem_ofs, false, clear_L0); flush_flags = MI_FLUSH_DW_CCS; @@ -1119,13 +1183,14 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, return ERR_PTR(err); } - if (clear_system_ccs) + if (clear_ccs) bo->ccs_cleared = true; return fence; } static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, + const struct xe_vm_pgtable_update_op *pt_op, const struct xe_vm_pgtable_update *update, struct xe_migrate_pt_update *pt_update) { @@ -1146,7 +1211,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, if (!ppgtt_ofs) ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile), xe_bo_addr(update->pt_bo, 0, - XE_PAGE_SIZE)); + XE_PAGE_SIZE), false); do { u64 addr = ppgtt_ofs + ofs * 8; @@ -1160,8 +1225,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); - ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk, - update); + if (pt_op->bind) + ops->populate(pt_update, tile, NULL, bb->cs + bb->len, + ofs, chunk, update); + else + ops->clear(pt_update, tile, NULL, bb->cs + bb->len, + ofs, chunk, update); bb->len += chunk * 2; ofs += chunk; @@ -1186,114 +1255,58 @@ struct migrate_test_params { static struct dma_fence * xe_migrate_update_pgtables_cpu(struct xe_migrate *m, - struct xe_vm *vm, struct xe_bo *bo, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, bool wait_vm, struct xe_migrate_pt_update *pt_update) { XE_TEST_DECLARE(struct migrate_test_params *test = to_migrate_test_params (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));) const struct xe_migrate_pt_update_ops *ops = pt_update->ops; - struct dma_fence *fence; + struct xe_vm *vm = pt_update->vops->vm; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &pt_update->vops->pt_update_ops[pt_update->tile_id]; int err; - u32 i; + u32 i, j; if (XE_TEST_ONLY(test && test->force_gpu)) return ERR_PTR(-ETIME); - if (bo && !dma_resv_test_signaled(bo->ttm.base.resv, - DMA_RESV_USAGE_KERNEL)) - return ERR_PTR(-ETIME); - - if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP)) - return ERR_PTR(-ETIME); - if (ops->pre_commit) { pt_update->job = NULL; err = ops->pre_commit(pt_update); if (err) return ERR_PTR(err); } - for (i = 0; i < num_updates; i++) { - const struct xe_vm_pgtable_update *update = &updates[i]; - - ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL, - update->ofs, update->qwords, update); - } - if (vm) { - trace_xe_vm_cpu_bind(vm); - xe_device_wmb(vm->xe); - } - - fence = dma_fence_get_stub(); - - return fence; -} - -static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs) -{ - struct dma_fence *fence; - int i; - - for (i = 0; i < num_syncs; i++) { - fence = syncs[i].fence; - - if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &fence->flags)) - return false; - } - if (q) { - fence = xe_exec_queue_last_fence_get(q, vm); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - dma_fence_put(fence); - return false; + for (i = 0; i < pt_update_ops->num_ops; ++i) { + const struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + + for (j = 0; j < pt_op->num_entries; j++) { + const struct xe_vm_pgtable_update *update = + &pt_op->entries[j]; + + if (pt_op->bind) + ops->populate(pt_update, m->tile, + &update->pt_bo->vmap, NULL, + update->ofs, update->qwords, + update); + else + ops->clear(pt_update, m->tile, + &update->pt_bo->vmap, NULL, + update->ofs, update->qwords, update); } - dma_fence_put(fence); } - return true; + trace_xe_vm_cpu_bind(vm); + xe_device_wmb(vm->xe); + + return dma_fence_get_stub(); } -/** - * xe_migrate_update_pgtables() - Pipelined page-table update - * @m: The migrate context. - * @vm: The vm we'll be updating. - * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr. - * @q: The exec queue to be used for the update or NULL if the default - * migration engine is to be used. - * @updates: An array of update descriptors. - * @num_updates: Number of descriptors in @updates. - * @syncs: Array of xe_sync_entry to await before updating. Note that waits - * will block the engine timeline. - * @num_syncs: Number of entries in @syncs. - * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains - * pointers to callback functions and, if subclassed, private arguments to - * those. - * - * Perform a pipelined page-table update. The update descriptors are typically - * built under the same lock critical section as a call to this function. If - * using the default engine for the updates, they will be performed in the - * order they grab the job_mutex. If different engines are used, external - * synchronization is needed for overlapping updates to maintain page-table - * consistency. Note that the meaing of "overlapping" is that the updates - * touch the same page-table, which might be a higher-level page-directory. - * If no pipelining is needed, then updates may be performed by the cpu. - * - * Return: A dma_fence that, when signaled, indicates the update completion. - */ -struct dma_fence * -xe_migrate_update_pgtables(struct xe_migrate *m, - struct xe_vm *vm, - struct xe_bo *bo, - struct xe_exec_queue *q, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, - struct xe_sync_entry *syncs, u32 num_syncs, - struct xe_migrate_pt_update *pt_update) +static struct dma_fence * +__xe_migrate_update_pgtables(struct xe_migrate *m, + struct xe_migrate_pt_update *pt_update, + struct xe_vm_pgtable_update_ops *pt_update_ops) { const struct xe_migrate_pt_update_ops *ops = pt_update->ops; struct xe_tile *tile = m->tile; @@ -1302,59 +1315,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m, struct xe_sched_job *job; struct dma_fence *fence; struct drm_suballoc *sa_bo = NULL; - struct xe_vma *vma = pt_update->vma; struct xe_bb *bb; - u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0; + u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0; + u32 num_updates = 0, current_update = 0; u64 addr; int err = 0; - bool usm = !q && xe->info.has_usm; - bool first_munmap_rebind = vma && - vma->gpuva.flags & XE_VMA_FIRST_REBIND; - struct xe_exec_queue *q_override = !q ? m->q : q; - u16 pat_index = xe->pat.idx[XE_CACHE_WB]; + bool is_migrate = pt_update_ops->q == m->q; + bool usm = is_migrate && xe->info.has_usm; + + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; - /* Use the CPU if no in syncs and engine is idle */ - if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) { - fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates, - num_updates, - first_munmap_rebind, - pt_update); - if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN)) - return fence; + num_updates += pt_op->num_entries; + for (j = 0; j < pt_op->num_entries; ++j) { + u32 num_cmds = DIV_ROUND_UP(updates[j].qwords, + MAX_PTE_PER_SDI); + + /* align noop + MI_STORE_DATA_IMM cmd prefix */ + batch_size += 4 * num_cmds + updates[j].qwords * 2; + } } /* fixed + PTE entries */ if (IS_DGFX(xe)) - batch_size = 2; + batch_size += 2; else - batch_size = 6 + num_updates * 2; + batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) + + num_updates * 2; - for (i = 0; i < num_updates; i++) { - u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI); - - /* align noop + MI_STORE_DATA_IMM cmd prefix */ - batch_size += 4 * num_cmds + updates[i].qwords * 2; - } - - /* - * XXX: Create temp bo to copy from, if batch_size becomes too big? - * - * Worst case: Sum(2 * (each lower level page size) + (top level page size)) - * Should be reasonably bound.. - */ - xe_tile_assert(tile, batch_size < SZ_128K); - - bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm); + bb = xe_bb_new(gt, batch_size, usm); if (IS_ERR(bb)) return ERR_CAST(bb); /* For sysmem PTE's, need to map them in our hole.. */ if (!IS_DGFX(xe)) { + u32 ptes, ofs; + ppgtt_ofs = NUM_KERNEL_PDE - 1; - if (q) { - xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT); + if (!is_migrate) { + u32 num_units = DIV_ROUND_UP(num_updates, + NUM_VMUSA_WRITES_PER_UNIT); - sa_bo = drm_suballoc_new(&m->vm_update_sa, 1, + if (num_units > m->vm_update_sa.size) { + err = -ENOBUFS; + goto err_bb; + } + sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units, GFP_KERNEL, true, 0); if (IS_ERR(sa_bo)) { err = PTR_ERR(sa_bo); @@ -1370,18 +1377,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m, } /* Map our PT's to gtt */ - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates); - bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; - bb->cs[bb->len++] = 0; /* upper_32_bits */ - - for (i = 0; i < num_updates; i++) { - struct xe_bo *pt_bo = updates[i].pt_bo; + i = 0; + j = 0; + ptes = num_updates; + ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; + while (ptes) { + u32 chunk = min(MAX_PTE_PER_SDI, ptes); + u32 idx = 0; + + bb->cs[bb->len++] = MI_STORE_DATA_IMM | + MI_SDI_NUM_QW(chunk); + bb->cs[bb->len++] = ofs; + bb->cs[bb->len++] = 0; /* upper_32_bits */ + + for (; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) { + struct xe_vm *vm = pt_update->vops->vm; + struct xe_bo *pt_bo = updates[j].pt_bo; + + if (idx == chunk) + goto next_cmd; + + xe_tile_assert(tile, pt_bo->size == SZ_4K); + + /* Map a PT at most once */ + if (pt_bo->update_index < 0) + pt_bo->update_index = current_update; + + addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, + XE_CACHE_WB, 0); + bb->cs[bb->len++] = lower_32_bits(addr); + bb->cs[bb->len++] = upper_32_bits(addr); + } - xe_tile_assert(tile, pt_bo->size == SZ_4K); + j = 0; + } - addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0); - bb->cs[bb->len++] = lower_32_bits(addr); - bb->cs[bb->len++] = upper_32_bits(addr); +next_cmd: + ptes -= chunk; + ofs += chunk * sizeof(u64); } bb->cs[bb->len++] = MI_BATCH_BUFFER_END; @@ -1389,19 +1427,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m, addr = xe_migrate_vm_addr(ppgtt_ofs, 0) + (page_ofs / sizeof(u64)) * XE_PAGE_SIZE; - for (i = 0; i < num_updates; i++) - write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE, - &updates[i], pt_update); + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + for (j = 0; j < pt_op->num_entries; ++j) { + struct xe_bo *pt_bo = updates[j].pt_bo; + + write_pgtable(tile, bb, addr + + pt_bo->update_index * XE_PAGE_SIZE, + pt_op, &updates[j], pt_update); + } + } } else { /* phys pages, no preamble required */ bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; - for (i = 0; i < num_updates; i++) - write_pgtable(tile, bb, 0, &updates[i], pt_update); + for (i = 0; i < pt_update_ops->num_ops; ++i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + struct xe_vm_pgtable_update *updates = pt_op->entries; + + for (j = 0; j < pt_op->num_entries; ++j) + write_pgtable(tile, bb, 0, pt_op, &updates[j], + pt_update); + } } - job = xe_bb_create_migration_job(q ?: m->q, bb, + job = xe_bb_create_migration_job(pt_update_ops->q, bb, xe_migrate_batch_base(m, usm), update_idx); if (IS_ERR(job)) { @@ -1409,46 +1464,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m, goto err_sa; } - /* Wait on BO move */ - if (bo) { - err = xe_sched_job_add_deps(job, bo->ttm.base.resv, - DMA_RESV_USAGE_KERNEL); - if (err) - goto err_job; - } - - /* - * Munmap style VM unbind, need to wait for all jobs to be complete / - * trigger preempts before moving forward - */ - if (first_munmap_rebind) { - err = xe_sched_job_add_deps(job, xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP); - if (err) - goto err_job; - } - - err = xe_sched_job_last_fence_add_dep(job, vm); - for (i = 0; !err && i < num_syncs; i++) - err = xe_sync_entry_add_deps(&syncs[i], job); - - if (err) - goto err_job; - if (ops->pre_commit) { pt_update->job = job; err = ops->pre_commit(pt_update); if (err) goto err_job; } - if (!q) + if (is_migrate) mutex_lock(&m->job_mutex); xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - if (!q) + if (is_migrate) mutex_unlock(&m->job_mutex); xe_bb_free(bb, fence); @@ -1465,6 +1494,40 @@ xe_migrate_update_pgtables(struct xe_migrate *m, return ERR_PTR(err); } +/** + * xe_migrate_update_pgtables() - Pipelined page-table update + * @m: The migrate context. + * @pt_update: PT update arguments + * + * Perform a pipelined page-table update. The update descriptors are typically + * built under the same lock critical section as a call to this function. If + * using the default engine for the updates, they will be performed in the + * order they grab the job_mutex. If different engines are used, external + * synchronization is needed for overlapping updates to maintain page-table + * consistency. Note that the meaing of "overlapping" is that the updates + * touch the same page-table, which might be a higher-level page-directory. + * If no pipelining is needed, then updates may be performed by the cpu. + * + * Return: A dma_fence that, when signaled, indicates the update completion. + */ +struct dma_fence * +xe_migrate_update_pgtables(struct xe_migrate *m, + struct xe_migrate_pt_update *pt_update) + +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &pt_update->vops->pt_update_ops[pt_update->tile_id]; + struct dma_fence *fence; + + fence = xe_migrate_update_pgtables_cpu(m, pt_update); + + /* -ETIME indicates a job is needed, anything else is legit error */ + if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME) + return fence; + + return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops); +} + /** * xe_migrate_wait() - Complete all operations using the xe_migrate context * @m: Migrate context to wait for. diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 951f19318ea48f..0109866e398a87 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -6,7 +6,7 @@ #ifndef _XE_MIGRATE_ #define _XE_MIGRATE_ -#include +#include struct dma_fence; struct iosys_map; @@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops { struct xe_tile *tile, struct iosys_map *map, void *pos, u32 ofs, u32 num_qwords, const struct xe_vm_pgtable_update *update); + /** + * @clear: Clear a command buffer or page-table with ptes. + * @pt_update: Embeddable callback argument. + * @tile: The tile for the current operation. + * @map: struct iosys_map into the memory to be populated. + * @pos: If @map is NULL, map into the memory to be populated. + * @ofs: qword offset into @map, unused if @map is NULL. + * @num_qwords: Number of qwords to write. + * @update: Information about the PTEs to be inserted. + * + * This interface is intended to be used as a callback into the + * page-table system to populate command buffers or shared + * page-tables with PTEs. + */ + void (*clear)(struct xe_migrate_pt_update *pt_update, + struct xe_tile *tile, struct iosys_map *map, + void *pos, u32 ofs, u32 num_qwords, + const struct xe_vm_pgtable_update *update); /** * @pre_commit: Callback to be called just before arming the @@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops { struct xe_migrate_pt_update { /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */ const struct xe_migrate_pt_update_ops *ops; - /** @vma: The vma we're updating the pagetable for. */ - struct xe_vma *vma; + /** @vops: VMA operations */ + struct xe_vma_ops *vops; /** @job: The job if a GPU page-table update. NULL otherwise */ struct xe_sched_job *job; - /** @start: Start of update for the range fence */ - u64 start; - /** @last: Last of update for the range fence */ - u64 last; /** @tile_id: Tile ID of the update */ u8 tile_id; }; @@ -88,23 +102,22 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct ttm_resource *dst, bool copy_only_ccs); +#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0) +#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1) +#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \ + XE_MIGRATE_CLEAR_FLAG_CCS_DATA) struct dma_fence *xe_migrate_clear(struct xe_migrate *m, struct xe_bo *bo, - struct ttm_resource *dst); + struct ttm_resource *dst, + u32 clear_flags); struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m); struct dma_fence * xe_migrate_update_pgtables(struct xe_migrate *m, - struct xe_vm *vm, - struct xe_bo *bo, - struct xe_exec_queue *q, - const struct xe_vm_pgtable_update *updates, - u32 num_updates, - struct xe_sync_entry *syncs, u32 num_syncs, struct xe_migrate_pt_update *pt_update); void xe_migrate_wait(struct xe_migrate *m); -struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile); +struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile); #endif diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index aa68cac9fdf808..3fd462fda6255c 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -29,34 +29,60 @@ static void tiles_fini(void *arg) struct xe_tile *tile; int id; - for_each_tile(tile, xe, id) - if (tile != xe_device_get_root_tile(xe)) - tile->mmio.regs = NULL; + for_each_remote_tile(tile, xe, id) + tile->mmio.regs = NULL; } -int xe_mmio_probe_tiles(struct xe_device *xe) +/* + * On multi-tile devices, partition the BAR space for MMIO on each tile, + * possibly accounting for register override on the number of tiles available. + * Resulting memory layout is like below: + * + * .----------------------. <- tile_count * tile_mmio_size + * | .... | + * |----------------------| <- 2 * tile_mmio_size + * | tile1->mmio.regs | + * |----------------------| <- 1 * tile_mmio_size + * | tile0->mmio.regs | + * '----------------------' <- 0MB + */ +static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) { - size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size; - u8 id, tile_count = xe->info.tile_count; - struct xe_gt *gt = xe_root_mmio_gt(xe); struct xe_tile *tile; void __iomem *regs; - u32 mtcfg; + u8 id; - if (tile_count == 1) - goto add_mmio_ext; + /* + * Nothing to be done as tile 0 has already been setup earlier with the + * entire BAR mapped - see xe_mmio_init() + */ + if (xe->info.tile_count == 1) + return; + /* Possibly override number of tile based on configuration register */ if (!xe->info.skip_mtcfg) { + struct xe_gt *gt = xe_root_mmio_gt(xe); + u8 tile_count; + u32 mtcfg; + + /* + * Although the per-tile mmio regs are not yet initialized, this + * is fine as it's going to the root gt, that's guaranteed to be + * initialized earlier in xe_mmio_init() + */ mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR); tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; + if (tile_count < xe->info.tile_count) { drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", xe->info.tile_count, tile_count); xe->info.tile_count = tile_count; /* - * FIXME: Needs some work for standalone media, but should be impossible - * with multi-tile for now. + * FIXME: Needs some work for standalone media, but + * should be impossible with multi-tile for now: + * multi-tile platform with standalone media doesn't + * exist */ xe->info.gt_count = xe->info.tile_count; } @@ -68,23 +94,51 @@ int xe_mmio_probe_tiles(struct xe_device *xe) tile->mmio.regs = regs; regs += tile_mmio_size; } +} -add_mmio_ext: - /* - * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile). - * When supported, there could be an additional contiguous multi-tile MMIO extension - * space ON TOP of it, and hence the necessity for distinguished MMIO spaces. - */ - if (xe->info.has_mmio_ext) { - regs = xe->mmio.regs + tile_mmio_size * tile_count; +/* + * On top of all the multi-tile MMIO space there can be a platform-dependent + * extension for each tile, resulting in a layout like below: + * + * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size + * | .... | + * |----------------------| <- ext_base + 2 * tile_mmio_ext_size + * | tile1->mmio_ext.regs | + * |----------------------| <- ext_base + 1 * tile_mmio_ext_size + * | tile0->mmio_ext.regs | + * |======================| <- ext_base = tile_count * tile_mmio_size + * | | + * | mmio.regs | + * | | + * '----------------------' <- 0MB + * + * Set up the tile[]->mmio_ext pointers/sizes. + */ +static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size, + size_t tile_mmio_ext_size) +{ + struct xe_tile *tile; + void __iomem *regs; + u8 id; - for_each_tile(tile, xe, id) { - tile->mmio_ext.size = tile_mmio_ext_size; - tile->mmio_ext.regs = regs; + if (!xe->info.has_mmio_ext) + return; - regs += tile_mmio_ext_size; - } + regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; + for_each_tile(tile, xe, id) { + tile->mmio_ext.size = tile_mmio_ext_size; + tile->mmio_ext.regs = regs; + regs += tile_mmio_ext_size; } +} + +int xe_mmio_probe_tiles(struct xe_device *xe) +{ + size_t tile_mmio_size = SZ_16M; + size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size; + + mmio_multi_tile_setup(xe, tile_mmio_size); + mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size); return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe); } @@ -174,7 +228,11 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); trace_xe_reg_rw(gt, true, addr, val, sizeof(val)); - writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + + if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) + xe_gt_sriov_vf_write32(gt, reg, val); + else + writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); } u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) @@ -277,37 +335,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) return (u64)udw << 32 | ldw; } -/** - * xe_mmio_wait32() - Wait for a register to match the desired masked value - * @gt: MMIO target GT - * @reg: register to read value from - * @mask: mask to be applied to the value read from the register - * @val: desired value after applying the mask - * @timeout_us: time out after this period of time. Wait logic tries to be - * smart, applying an exponential backoff until @timeout_us is reached. - * @out_val: if not NULL, points where to store the last unmasked value - * @atomic: needs to be true if calling from an atomic context - * - * This function polls for the desired masked value and returns zero on success - * or -ETIMEDOUT if timed out. - * - * Note that @timeout_us represents the minimum amount of time to wait before - * giving up. The actual time taken by this function can be a little more than - * @timeout_us for different reasons, specially in non-atomic contexts. Thus, - * it is possible that this function succeeds even after @timeout_us has passed. - */ -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic) +static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, + u32 *out_val, bool atomic, bool expect_match) { ktime_t cur = ktime_get_raw(); const ktime_t end = ktime_add_us(cur, timeout_us); int ret = -ETIMEDOUT; s64 wait = 10; u32 read; + bool check; for (;;) { read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) { + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) { ret = 0; break; } @@ -328,7 +373,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t if (ret != 0) { read = xe_mmio_read32(gt, reg); - if ((read & mask) == val) + + check = (read & mask) == val; + if (!expect_match) + check = !check; + + if (check) ret = 0; } @@ -339,62 +389,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t } /** - * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value + * xe_mmio_wait32() - Wait for a register to match the desired masked value * @gt: MMIO target GT * @reg: register to read value from * @mask: mask to be applied to the value read from the register - * @val: value to match after applying the mask + * @val: desired value after applying the mask * @timeout_us: time out after this period of time. Wait logic tries to be * smart, applying an exponential backoff until @timeout_us is reached. * @out_val: if not NULL, points where to store the last unmasked value * @atomic: needs to be true if calling from an atomic context * - * This function polls for a masked value to change from a given value and - * returns zero on success or -ETIMEDOUT if timed out. + * This function polls for the desired masked value and returns zero on success + * or -ETIMEDOUT if timed out. * * Note that @timeout_us represents the minimum amount of time to wait before * giving up. The actual time taken by this function can be a little more than * @timeout_us for different reasons, specially in non-atomic contexts. Thus, * it is possible that this function succeeds even after @timeout_us has passed. */ +int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, + u32 *out_val, bool atomic) +{ + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true); +} + +/** + * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value + * @gt: MMIO target GT + * @reg: register to read value from + * @mask: mask to be applied to the value read from the register + * @val: value not to be matched after applying the mask + * @timeout_us: time out after this period of time + * @out_val: if not NULL, points where to store the last unmasked value + * @atomic: needs to be true if calling from an atomic context + * + * This function works exactly like xe_mmio_wait32() with the exception that + * @val is expected not to be matched. + */ int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - ktime_t cur = ktime_get_raw(); - const ktime_t end = ktime_add_us(cur, timeout_us); - int ret = -ETIMEDOUT; - s64 wait = 10; - u32 read; - - for (;;) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) { - ret = 0; - break; - } - - cur = ktime_get_raw(); - if (!ktime_before(cur, end)) - break; - - if (ktime_after(ktime_add_us(cur, wait), end)) - wait = ktime_us_delta(end, cur); - - if (atomic) - udelay(wait); - else - usleep_range(wait, wait << 1); - wait <<= 1; - } - - if (ret != 0) { - read = xe_mmio_read32(gt, reg); - if ((read & mask) != val) - ret = 0; - } - - if (out_val) - *out_val = read; - - return ret; + return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false); } diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index 6ae0cc32c651b6..26551410ecc872 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -22,7 +22,6 @@ u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set); int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval); bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg); -int xe_mmio_probe_vram(struct xe_device *xe); u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg); int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic); diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 499540add465a4..bfc3deebdaa205 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -8,14 +8,17 @@ #include #include +#include + #include "xe_drv.h" #include "xe_hw_fence.h" #include "xe_pci.h" +#include "xe_pm.h" #include "xe_observation.h" #include "xe_sched_job.h" struct xe_modparam xe_modparam = { - .enable_display = true, + .probe_display = true, .guc_log_level = 5, .force_probe = CONFIG_DRM_XE_FORCE_PROBE, .wedged_mode = 1, @@ -25,8 +28,8 @@ struct xe_modparam xe_modparam = { module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444); MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); -module_param_named(enable_display, xe_modparam.enable_display, bool, 0444); -MODULE_PARM_DESC(enable_display, "Enable display"); +module_param_named(probe_display, xe_modparam.probe_display, bool, 0444); +MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)"); module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600); MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)"); @@ -61,12 +64,27 @@ module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600); MODULE_PARM_DESC(wedged_mode, "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang"); +static int xe_check_nomodeset(void) +{ + if (drm_firmware_drivers_only()) + return -ENODEV; + + return 0; +} + struct init_funcs { int (*init)(void); void (*exit)(void); }; +static void xe_dummy_exit(void) +{ +} + static const struct init_funcs init_funcs[] = { + { + .init = xe_check_nomodeset, + }, { .init = xe_hw_fence_module_init, .exit = xe_hw_fence_module_exit, @@ -83,17 +101,41 @@ static const struct init_funcs init_funcs[] = { .init = xe_observation_sysctl_register, .exit = xe_observation_sysctl_unregister, }, + { + .init = xe_pm_module_init, + .exit = xe_dummy_exit, + }, }; +static int __init xe_call_init_func(unsigned int i) +{ + if (WARN_ON(i >= ARRAY_SIZE(init_funcs))) + return 0; + if (!init_funcs[i].init) + return 0; + + return init_funcs[i].init(); +} + +static void xe_call_exit_func(unsigned int i) +{ + if (WARN_ON(i >= ARRAY_SIZE(init_funcs))) + return; + if (!init_funcs[i].exit) + return; + + init_funcs[i].exit(); +} + static int __init xe_init(void) { int err, i; for (i = 0; i < ARRAY_SIZE(init_funcs); i++) { - err = init_funcs[i].init(); + err = xe_call_init_func(i); if (err) { while (i--) - init_funcs[i].exit(); + xe_call_exit_func(i); return err; } } @@ -106,7 +148,7 @@ static void __exit xe_exit(void) int i; for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--) - init_funcs[i].exit(); + xe_call_exit_func(i); } module_init(xe_init); diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h index 61a0d28a28c8f8..161a5e6f717fbc 100644 --- a/drivers/gpu/drm/xe/xe_module.h +++ b/drivers/gpu/drm/xe/xe_module.h @@ -11,7 +11,7 @@ /* Module modprobe variables */ struct xe_modparam { bool force_execlist; - bool enable_display; + bool probe_display; u32 force_vram_bar_size; int guc_log_level; char *guc_firmware_path; diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 22f14eba2c6346..2804f14f8f2979 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include "abi/guc_actions_slpc_abi.h" #include "instructions/xe_mi_commands.h" @@ -645,7 +645,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, u32 offset = xe_bo_ggtt_addr(lrc->bo); do { - bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2; + bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); bb->cs[bb->len++] = offset + flex->offset * sizeof(u32); bb->cs[bb->len++] = 0; bb->cs[bb->len++] = flex->value; @@ -709,8 +709,7 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable) { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, - enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) + _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE), }, }; struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol }; @@ -742,10 +741,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, - enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) | - _MASKED_FIELD(CTX_CTRL_RUN_ALONE, - enable ? CTX_CTRL_RUN_ALONE : 0), + _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) | + _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0), }, }; struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol }; @@ -1248,8 +1245,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY, VM_MAYWRITE | VM_MAYEXEC); - xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == - (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); + xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma)); for (i = 0; i < bo->ttm.ttm->num_pages; i++) { ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]), PAGE_SIZE, vma->vm_page_prot); @@ -1264,7 +1260,6 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations xe_oa_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .release = xe_oa_release, .poll = xe_oa_poll, .read = xe_oa_read, diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 540c3ec53a6d79..8862eca73fbe32 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -11,7 +11,7 @@ #include #include -#include +#include #include "regs/xe_reg_defs.h" #include "xe_hw_engine_types.h" diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c index a78c92a44ec2d2..8ec1b84cbb9e46 100644 --- a/drivers/gpu/drm/xe/xe_observation.c +++ b/drivers/gpu/drm/xe/xe_observation.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include "xe_oa.h" #include "xe_observation.h" diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index 722278cc23fc5d..f291a1730024a0 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -5,7 +5,7 @@ #include "xe_pat.h" -#include +#include #include diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 732ee0d02124f7..5e962e72c97ea6 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -59,6 +59,7 @@ struct xe_device_desc { u8 has_display:1; u8 has_heci_gscfi:1; + u8 has_heci_cscfi:1; u8 has_llc:1; u8 has_mmio_ext:1; u8 has_sriov:1; @@ -337,14 +338,13 @@ static const struct xe_device_desc mtl_desc = { static const struct xe_device_desc lnl_desc = { PLATFORM(LUNARLAKE), .has_display = true, - .require_force_probe = true, }; static const struct xe_device_desc bmg_desc = { DGFX_FEATURES, PLATFORM(BATTLEMAGE), .has_display = true, - .require_force_probe = true, + .has_heci_cscfi = 1, }; #undef PLATFORM @@ -606,6 +606,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.is_dgfx = desc->is_dgfx; xe->info.has_heci_gscfi = desc->has_heci_gscfi; + xe->info.has_heci_cscfi = desc->has_heci_cscfi; xe->info.has_llc = desc->has_llc; xe->info.has_mmio_ext = desc->has_mmio_ext; xe->info.has_sriov = desc->has_sriov; @@ -613,9 +614,9 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.skip_mtcfg = desc->skip_mtcfg; xe->info.skip_pcode = desc->skip_pcode; - xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && - xe_modparam.enable_display && - desc->has_display; + xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && + xe_modparam.probe_display && + desc->has_display; err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); if (err) @@ -744,7 +745,7 @@ static void xe_pci_remove(struct pci_dev *pdev) { struct xe_device *xe; - xe = pci_get_drvdata(pdev); + xe = pdev_to_xe_device(pdev); if (!xe) /* driver load aborted, nothing to cleanup */ return; @@ -792,7 +793,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(xe)) return PTR_ERR(xe); - pci_set_drvdata(pdev, xe); + pci_set_drvdata(pdev, &xe->drm); xe_pm_assert_unbounded_bridge(xe); subplatform_desc = find_subplatform(xe, desc); @@ -815,7 +816,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d", + drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", desc->platform_name, subplatform_desc ? subplatform_desc->name : "", xe->info.devid, xe->info.revid, @@ -826,14 +827,13 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) xe->info.media_name, xe->info.media_verx100 / 100, xe->info.media_verx100 % 100, - str_yes_no(xe->info.enable_display), + str_yes_no(xe->info.probe_display), xe->info.dma_mask_size, xe->info.tile_count, - xe->info.has_heci_gscfi); + xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); - drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n", + drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", xe_step_name(xe->info.step.graphics), xe_step_name(xe->info.step.media), - xe_step_name(xe->info.step.display), xe_step_name(xe->info.step.basedie)); drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", @@ -924,6 +924,8 @@ static int xe_pci_resume(struct device *dev) if (err) return err; + pci_restore_state(pdev); + err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 9a3f618d22dcbf..7cf2160fe0408d 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -20,6 +20,7 @@ #include "xe_guc.h" #include "xe_irq.h" #include "xe_pcode.h" +#include "xe_trace.h" #include "xe_wa.h" /** @@ -69,11 +70,41 @@ */ #ifdef CONFIG_LOCKDEP -static struct lockdep_map xe_pm_runtime_lockdep_map = { - .name = "xe_pm_runtime_lockdep_map" +static struct lockdep_map xe_pm_runtime_d3cold_map = { + .name = "xe_rpm_d3cold_map" +}; + +static struct lockdep_map xe_pm_runtime_nod3cold_map = { + .name = "xe_rpm_nod3cold_map" }; #endif +/** + * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context + * @xe: The xe device. + * + * Return: true if it is safe to runtime resume from reclaim context. + * false otherwise. + */ +bool xe_rpm_reclaim_safe(const struct xe_device *xe) +{ + return !xe->d3cold.capable && !xe->info.has_sriov; +} + +static void xe_rpm_lockmap_acquire(const struct xe_device *xe) +{ + lock_map_acquire(xe_rpm_reclaim_safe(xe) ? + &xe_pm_runtime_nod3cold_map : + &xe_pm_runtime_d3cold_map); +} + +static void xe_rpm_lockmap_release(const struct xe_device *xe) +{ + lock_map_release(xe_rpm_reclaim_safe(xe) ? + &xe_pm_runtime_nod3cold_map : + &xe_pm_runtime_d3cold_map); +} + /** * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle * @xe: xe device instance @@ -87,6 +118,7 @@ int xe_pm_suspend(struct xe_device *xe) int err; drm_dbg(&xe->drm, "Suspending device\n"); + trace_xe_pm_suspend(xe, __builtin_return_address(0)); for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); @@ -131,6 +163,7 @@ int xe_pm_resume(struct xe_device *xe) int err; drm_dbg(&xe->drm, "Resuming device\n"); + trace_xe_pm_resume(xe, __builtin_return_address(0)); for_each_tile(tile, xe, id) xe_wa_apply_tile_workarounds(tile); @@ -326,6 +359,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) u8 id; int err = 0; + trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); /* Disable access_ongoing asserts and prevent recursive pm calls */ xe_pm_write_callback_task(xe, current); @@ -350,7 +384,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) * annotation here and in xe_pm_runtime_get() lockdep will see * the potential lock inversion and give us a nice splat. */ - lock_map_acquire(&xe_pm_runtime_lockdep_map); + xe_rpm_lockmap_acquire(xe); /* * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify @@ -362,9 +396,9 @@ int xe_pm_runtime_suspend(struct xe_device *xe) xe_bo_runtime_pm_release_mmap_offset(bo); mutex_unlock(&xe->mem_access.vram_userfault.lock); - if (xe->d3cold.allowed) { - xe_display_pm_suspend(xe, true); + xe_display_pm_runtime_suspend(xe); + if (xe->d3cold.allowed) { err = xe_bo_evict_all(xe); if (err) goto out; @@ -382,8 +416,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe) xe_display_pm_suspend_late(xe); out: if (err) - xe_display_pm_resume(xe, true); - lock_map_release(&xe_pm_runtime_lockdep_map); + xe_display_pm_runtime_resume(xe); + xe_rpm_lockmap_release(xe); xe_pm_write_callback_task(xe, NULL); return err; } @@ -400,10 +434,11 @@ int xe_pm_runtime_resume(struct xe_device *xe) u8 id; int err = 0; + trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); /* Disable access_ongoing asserts and prevent recursive pm calls */ xe_pm_write_callback_task(xe, current); - lock_map_acquire(&xe_pm_runtime_lockdep_map); + xe_rpm_lockmap_acquire(xe); if (xe->d3cold.allowed) { err = xe_pcode_ready(xe, true); @@ -426,14 +461,16 @@ int xe_pm_runtime_resume(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_resume(gt); + xe_display_pm_runtime_resume(xe); + if (xe->d3cold.allowed) { - xe_display_pm_resume(xe, true); err = xe_bo_restore_user(xe); if (err) goto out; } + out: - lock_map_release(&xe_pm_runtime_lockdep_map); + xe_rpm_lockmap_release(xe); xe_pm_write_callback_task(xe, NULL); return err; } @@ -447,15 +484,37 @@ int xe_pm_runtime_resume(struct xe_device *xe) * stuff that can happen inside the runtime_resume callback by acquiring * a dummy lock (it doesn't protect anything and gets compiled out on * non-debug builds). Lockdep then only needs to see the - * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can - * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map. + * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can + * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map. * For example if the (callers_locks) are ever grabbed in the * runtime_resume callback, lockdep should give us a nice splat. */ -static void pm_runtime_lockdep_prime(void) +static void xe_rpm_might_enter_cb(const struct xe_device *xe) { - lock_map_acquire(&xe_pm_runtime_lockdep_map); - lock_map_release(&xe_pm_runtime_lockdep_map); + xe_rpm_lockmap_acquire(xe); + xe_rpm_lockmap_release(xe); +} + +/* + * Prime the lockdep maps for known locking orders that need to + * be supported but that may not always occur on all systems. + */ +static void xe_pm_runtime_lockdep_prime(void) +{ + struct dma_resv lockdep_resv; + + dma_resv_init(&lockdep_resv); + lock_map_acquire(&xe_pm_runtime_d3cold_map); + /* D3Cold takes the dma_resv locks to evict bos */ + dma_resv_lock(&lockdep_resv, NULL); + dma_resv_unlock(&lockdep_resv); + lock_map_release(&xe_pm_runtime_d3cold_map); + + /* Shrinkers might like to wake up the device under reclaim. */ + fs_reclaim_acquire(GFP_KERNEL); + lock_map_acquire(&xe_pm_runtime_nod3cold_map); + lock_map_release(&xe_pm_runtime_nod3cold_map); + fs_reclaim_release(GFP_KERNEL); } /** @@ -464,12 +523,13 @@ static void pm_runtime_lockdep_prime(void) */ void xe_pm_runtime_get(struct xe_device *xe) { + trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); pm_runtime_get_noresume(xe->drm.dev); if (xe_pm_read_callback_task(xe) == current) return; - pm_runtime_lockdep_prime(); + xe_rpm_might_enter_cb(xe); pm_runtime_resume(xe->drm.dev); } @@ -479,6 +539,7 @@ void xe_pm_runtime_get(struct xe_device *xe) */ void xe_pm_runtime_put(struct xe_device *xe) { + trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); if (xe_pm_read_callback_task(xe) == current) { pm_runtime_put_noidle(xe->drm.dev); } else { @@ -496,10 +557,11 @@ void xe_pm_runtime_put(struct xe_device *xe) */ int xe_pm_runtime_get_ioctl(struct xe_device *xe) { + trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); if (WARN_ON(xe_pm_read_callback_task(xe) == current)) return -ELOOP; - pm_runtime_lockdep_prime(); + xe_rpm_might_enter_cb(xe); return pm_runtime_get_sync(xe->drm.dev); } @@ -533,6 +595,22 @@ bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) return pm_runtime_get_if_in_use(xe->drm.dev) > 0; } +/* + * Very unreliable! Should only be used to suppress the false positive case + * in the missing outer rpm protection warning. + */ +static bool xe_pm_suspending_or_resuming(struct xe_device *xe) +{ +#ifdef CONFIG_PM + struct device *dev = xe->drm.dev; + + return dev->power.runtime_status == RPM_SUSPENDING || + dev->power.runtime_status == RPM_RESUMING; +#else + return false; +#endif +} + /** * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming * @xe: xe device instance @@ -549,8 +627,11 @@ void xe_pm_runtime_get_noresume(struct xe_device *xe) ref = xe_pm_runtime_get_if_in_use(xe); - if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n")) + if (!ref) { pm_runtime_get_noresume(xe->drm.dev); + drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), + "Missing outer runtime PM protection\n"); + } } /** @@ -567,7 +648,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe) return true; } - pm_runtime_lockdep_prime(); + xe_rpm_might_enter_cb(xe); return pm_runtime_resume_and_get(xe->drm.dev) >= 0; } @@ -659,3 +740,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) drm_dbg(&xe->drm, "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); } + +/** + * xe_pm_module_init() - Perform xe_pm specific module initialization. + * + * Return: 0 on success. Currently doesn't fail. + */ +int __init xe_pm_module_init(void) +{ + xe_pm_runtime_lockdep_prime(); + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 104a21ae6dfd07..998d1ed645560a 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -31,6 +31,8 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe); void xe_pm_assert_unbounded_bridge(struct xe_device *xe); int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); void xe_pm_d3cold_allowed_toggle(struct xe_device *xe); +bool xe_rpm_reclaim_safe(const struct xe_device *xe); struct task_struct *xe_pm_read_callback_task(struct xe_device *xe); +int xe_pm_module_init(void); #endif diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c index c453f45328b1c5..83fbeea5aa201a 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence.c +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c @@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w) container_of(w, typeof(*pfence), preempt_work); struct xe_exec_queue *q = pfence->q; - if (pfence->error) + if (pfence->error) { dma_fence_set_error(&pfence->base, pfence->error); - else - q->ops->suspend_wait(q); + } else if (!q->ops->reset_status(q)) { + int err = q->ops->suspend_wait(q); + + if (err) + dma_fence_set_error(&pfence->base, err); + } else { + dma_fence_set_error(&pfence->base, -ENOENT); + } dma_fence_signal(&pfence->base); /* diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 31a751a5de3f1f..f27f579f4d85aa 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -3,18 +3,23 @@ * Copyright © 2022 Intel Corporation */ +#include + #include "xe_pt.h" #include "regs/xe_gtt_defs.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_drm_client.h" +#include "xe_exec_queue.h" #include "xe_gt.h" #include "xe_gt_tlb_invalidation.h" #include "xe_migrate.h" #include "xe_pt_types.h" #include "xe_pt_walk.h" #include "xe_res_cursor.h" +#include "xe_sched_job.h" +#include "xe_sync.h" #include "xe_trace.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" @@ -325,6 +330,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent, entry->pt = parent; entry->flags = 0; entry->qwords = 0; + entry->pt_bo->update_index = -1; if (alloc_entries) { entry->pt_entries = kmalloc_array(XE_PDES, @@ -842,19 +848,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t } } -static void xe_pt_abort_bind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, - u32 num_entries) +static void xe_pt_cancel_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) { u32 i, j; for (i = 0; i < num_entries; i++) { - if (!entries[i].pt_entries) + struct xe_pt *pt = entries[i].pt; + + if (!pt) continue; - for (j = 0; j < entries[i].qwords; j++) - xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL); + if (pt->level) { + for (j = 0; j < entries[i].qwords; j++) + xe_pt_destroy(entries[i].pt_entries[j].pt, + xe_vma_vm(vma)->flags, NULL); + } + kfree(entries[i].pt_entries); + entries[i].pt_entries = NULL; + entries[i].qwords = 0; } } @@ -864,18 +878,15 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) lockdep_assert_held(&vm->lock); - if (xe_vma_is_userptr(vma)) - lockdep_assert_held_read(&vm->userptr.notifier_lock); - else if (!xe_vma_is_null(vma)) + if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma)) dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv); xe_vm_assert_held(vm); } -static void xe_pt_commit_bind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, - u32 num_entries, bool rebind, - struct llist_head *deferred) +static void xe_pt_commit(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, struct llist_head *deferred) { u32 i, j; @@ -883,31 +894,90 @@ static void xe_pt_commit_bind(struct xe_vma *vma, for (i = 0; i < num_entries; i++) { struct xe_pt *pt = entries[i].pt; + + if (!pt->level) + continue; + + for (j = 0; j < entries[i].qwords; j++) { + struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + + xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred); + } + } +} + +static void xe_pt_abort_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, bool rebind) +{ + int i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = num_entries - 1; i >= 0; --i) { + struct xe_pt *pt = entries[i].pt; struct xe_pt_dir *pt_dir; if (!rebind) - pt->num_live += entries[i].qwords; + pt->num_live -= entries[i].qwords; - if (!pt->level) { - kfree(entries[i].pt_entries); + if (!pt->level) continue; + + pt_dir = as_xe_pt_dir(pt); + for (j = 0; j < entries[i].qwords; j++) { + u32 j_ = j + entries[i].ofs; + struct xe_pt *newpte = xe_pt_entry(pt_dir, j_); + struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + + pt_dir->children[j_] = oldpte ? &oldpte->base : 0; + xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); } + } +} + +static void xe_pt_commit_prepare_bind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries, bool rebind) +{ + u32 i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = 0; i < num_entries; i++) { + struct xe_pt *pt = entries[i].pt; + struct xe_pt_dir *pt_dir; + + if (!rebind) + pt->num_live += entries[i].qwords; + + if (!pt->level) + continue; pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { u32 j_ = j + entries[i].ofs; struct xe_pt *newpte = entries[i].pt_entries[j].pt; + struct xe_pt *oldpte = NULL; if (xe_pt_entry(pt_dir, j_)) - xe_pt_destroy(xe_pt_entry(pt_dir, j_), - xe_vma_vm(vma)->flags, deferred); + oldpte = xe_pt_entry(pt_dir, j_); pt_dir->children[j_] = &newpte->base; + entries[i].pt_entries[j].pt = oldpte; } - kfree(entries[i].pt_entries); } } +static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, + u32 num_entries) +{ + u32 i; + + for (i = 0; i < num_entries; i++) + kfree(entries[i].pt_entries); +} + static int xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, struct xe_vm_pgtable_update *entries, u32 *num_entries) @@ -918,20 +988,19 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, err = xe_pt_stage_bind(tile, vma, entries, num_entries); if (!err) xe_tile_assert(tile, *num_entries); - else /* abort! */ - xe_pt_abort_bind(vma, entries, *num_entries); return err; } static void xe_vm_dbg_print_entries(struct xe_device *xe, const struct xe_vm_pgtable_update *entries, - unsigned int num_entries) + unsigned int num_entries, bool bind) #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) { unsigned int i; - vm_dbg(&xe->drm, "%u entries to update\n", num_entries); + vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind", + num_entries); for (i = 0; i < num_entries; i++) { const struct xe_vm_pgtable_update *entry = &entries[i]; struct xe_pt *xe_pt = entry->pt; @@ -952,66 +1021,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, {} #endif -#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT - -static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) +static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs) { - u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; - static u32 count; + int i; - if (count++ % divisor == divisor - 1) { - struct xe_vm *vm = xe_vma_vm(&uvma->vma); + for (i = 0; i < num_syncs; i++) { + struct dma_fence *fence = syncs[i].fence; - uvma->userptr.divisor = divisor << 1; - spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&uvma->userptr.invalidate_link, - &vm->userptr.invalidated); - spin_unlock(&vm->userptr.invalidated_lock); - return true; + if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + return false; } - return false; + return true; } -#else +static int job_test_add_deps(struct xe_sched_job *job, + struct dma_resv *resv, + enum dma_resv_usage usage) +{ + if (!job) { + if (!dma_resv_test_signaled(resv, usage)) + return -ETIME; -static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) + return 0; + } + + return xe_sched_job_add_deps(job, resv, usage); +} + +static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job) { - return false; + struct xe_bo *bo = xe_vma_bo(vma); + + xe_bo_assert_held(bo); + + if (bo && !bo->vm) + return job_test_add_deps(job, bo->ttm.base.resv, + DMA_RESV_USAGE_KERNEL); + + return 0; } -#endif +static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_sched_job *job) +{ + int err = 0; -/** - * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks - * @base: Base we derive from. - * @bind: Whether this is a bind or an unbind operation. A bind operation - * makes the pre-commit callback error with -EAGAIN if it detects a - * pending invalidation. - * @locked: Whether the pre-commit callback locked the userptr notifier lock - * and it needs unlocking. - */ -struct xe_pt_migrate_pt_update { - struct xe_migrate_pt_update base; - bool bind; - bool locked; -}; + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = vma_add_deps(op->map.vma, job); + break; + case DRM_GPUVA_OP_REMAP: + if (op->remap.prev) + err = vma_add_deps(op->remap.prev, job); + if (!err && op->remap.next) + err = vma_add_deps(op->remap.next, job); + break; + case DRM_GPUVA_OP_UNMAP: + break; + case DRM_GPUVA_OP_PREFETCH: + err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } + + return err; +} -/* - * This function adds the needed dependencies to a page-table update job - * to make sure racing jobs for separate bind engines don't race writing - * to the same page-table range, wreaking havoc. Initially use a single - * fence for the entire VM. An optimization would use smaller granularity. - */ static int xe_pt_vm_dependencies(struct xe_sched_job *job, - struct xe_range_fence_tree *rftree, - u64 start, u64 last) + struct xe_vm *vm, + struct xe_vma_ops *vops, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_range_fence_tree *rftree) { struct xe_range_fence *rtfence; struct dma_fence *fence; - int err; + struct xe_vma_op *op; + int err = 0, i; + + xe_vm_assert_held(vm); + + if (!job && !no_in_syncs(vops->syncs, vops->num_syncs)) + return -ETIME; - rtfence = xe_range_fence_tree_first(rftree, start, last); + if (!job && !xe_exec_queue_is_idle(pt_update_ops->q)) + return -ETIME; + + if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) { + err = job_test_add_deps(job, xe_vm_resv(vm), + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_BOOKKEEP : + DMA_RESV_USAGE_KERNEL); + if (err) + return err; + } + + rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start, + pt_update_ops->last); while (rtfence) { fence = rtfence->fence; @@ -1029,80 +1140,175 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job, return err; } - rtfence = xe_range_fence_tree_next(rtfence, start, last); + rtfence = xe_range_fence_tree_next(rtfence, + pt_update_ops->start, + pt_update_ops->last); } - return 0; + list_for_each_entry(op, &vops->list, link) { + err = op_add_deps(vm, op, job); + if (err) + return err; + } + + if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) { + if (job) + err = xe_sched_job_last_fence_add_dep(job, vm); + else + err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm); + } + + for (i = 0; job && !err && i < vops->num_syncs; i++) + err = xe_sync_entry_add_deps(&vops->syncs[i], job); + + return err; } static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) { - struct xe_range_fence_tree *rftree = - &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id]; + struct xe_vma_ops *vops = pt_update->vops; + struct xe_vm *vm = vops->vm; + struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id]; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[pt_update->tile_id]; + + return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops, + pt_update_ops, rftree); +} + +#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT - return xe_pt_vm_dependencies(pt_update->job, rftree, - pt_update->start, pt_update->last); +static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) +{ + u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; + static u32 count; + + if (count++ % divisor == divisor - 1) { + uvma->userptr.divisor = divisor << 1; + return true; + } + + return false; } -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +#else + +static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) { - struct xe_pt_migrate_pt_update *userptr_update = - container_of(pt_update, typeof(*userptr_update), base); - struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma); - unsigned long notifier_seq = uvma->userptr.notifier_seq; - struct xe_vm *vm = xe_vma_vm(&uvma->vma); - int err = xe_pt_vm_dependencies(pt_update->job, - &vm->rftree[pt_update->tile_id], - pt_update->start, - pt_update->last); + return false; +} - if (err) - return err; +#endif - userptr_update->locked = false; +static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, + struct xe_vm_pgtable_update_ops *pt_update) +{ + struct xe_userptr_vma *uvma; + unsigned long notifier_seq; - /* - * Wait until nobody is running the invalidation notifier, and - * since we're exiting the loop holding the notifier lock, - * nobody can proceed invalidating either. - * - * Note that we don't update the vma->userptr.notifier_seq since - * we don't update the userptr pages. - */ - do { - down_read(&vm->userptr.notifier_lock); - if (!mmu_interval_read_retry(&uvma->userptr.notifier, - notifier_seq)) - break; + lockdep_assert_held_read(&vm->userptr.notifier_lock); - up_read(&vm->userptr.notifier_lock); + if (!xe_vma_is_userptr(vma)) + return 0; - if (userptr_update->bind) - return -EAGAIN; + uvma = to_userptr_vma(vma); + notifier_seq = uvma->userptr.notifier_seq; - notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier); - } while (true); + if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm)) + return 0; - /* Inject errors to test_whether they are handled correctly */ - if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) { - up_read(&vm->userptr.notifier_lock); + if (!mmu_interval_read_retry(&uvma->userptr.notifier, + notifier_seq) && + !xe_pt_userptr_inject_eagain(uvma)) + return 0; + + if (xe_vm_in_fault_mode(vm)) { return -EAGAIN; - } + } else { + spin_lock(&vm->userptr.invalidated_lock); + list_move_tail(&uvma->userptr.invalidate_link, + &vm->userptr.invalidated); + spin_unlock(&vm->userptr.invalidated_lock); - userptr_update->locked = true; + if (xe_vm_in_preempt_fence_mode(vm)) { + struct dma_resv_iter cursor; + struct dma_fence *fence; + long err; + + dma_resv_iter_begin(&cursor, xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, fence) + dma_fence_enable_sw_signaling(fence); + dma_resv_iter_end(&cursor); + + err = dma_resv_wait_timeout(xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + XE_WARN_ON(err <= 0); + } + } return 0; } -static const struct xe_migrate_pt_update_ops bind_ops = { - .populate = xe_vm_populate_pgtable, - .pre_commit = xe_pt_pre_commit, -}; +static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_vm_pgtable_update_ops *pt_update) +{ + int err = 0; -static const struct xe_migrate_pt_update_ops userptr_bind_ops = { - .populate = xe_vm_populate_pgtable, - .pre_commit = xe_pt_userptr_pre_commit, -}; + lockdep_assert_held_read(&vm->userptr.notifier_lock); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = vma_check_userptr(vm, op->map.vma, pt_update); + break; + case DRM_GPUVA_OP_REMAP: + if (op->remap.prev) + err = vma_check_userptr(vm, op->remap.prev, pt_update); + if (!err && op->remap.next) + err = vma_check_userptr(vm, op->remap.next, pt_update); + break; + case DRM_GPUVA_OP_UNMAP: + break; + case DRM_GPUVA_OP_PREFETCH: + err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), + pt_update); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } + + return err; +} + +static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +{ + struct xe_vm *vm = pt_update->vops->vm; + struct xe_vma_ops *vops = pt_update->vops; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[pt_update->tile_id]; + struct xe_vma_op *op; + int err; + + err = xe_pt_pre_commit(pt_update); + if (err) + return err; + + down_read(&vm->userptr.notifier_lock); + + list_for_each_entry(op, &vops->list, link) { + err = op_check_userptr(vm, op, pt_update_ops); + if (err) { + up_read(&vm->userptr.notifier_lock); + break; + } + } + + return err; +} struct invalidation_fence { struct xe_gt_tlb_invalidation_fence base; @@ -1144,10 +1350,10 @@ static void invalidation_fence_work_func(struct work_struct *w) ifence->end, ifence->asid); } -static int invalidation_fence_init(struct xe_gt *gt, - struct invalidation_fence *ifence, - struct dma_fence *fence, - u64 start, u64 end, u32 asid) +static void invalidation_fence_init(struct xe_gt *gt, + struct invalidation_fence *ifence, + struct dma_fence *fence, + u64 start, u64 end, u32 asid) { int ret; @@ -1172,192 +1378,6 @@ static int invalidation_fence_init(struct xe_gt *gt, } xe_gt_assert(gt, !ret || ret == -ENOENT); - - return ret && ret != -ENOENT ? ret : 0; -} - -static void xe_pt_calc_rfence_interval(struct xe_vma *vma, - struct xe_pt_migrate_pt_update *update, - struct xe_vm_pgtable_update *entries, - u32 num_entries) -{ - int i, level = 0; - - for (i = 0; i < num_entries; i++) { - const struct xe_vm_pgtable_update *entry = &entries[i]; - - if (entry->pt->level > level) - level = entry->pt->level; - } - - /* Greedy (non-optimal) calculation but simple */ - update->base.start = ALIGN_DOWN(xe_vma_start(vma), - 0x1ull << xe_pt_shift(level)); - update->base.last = ALIGN(xe_vma_end(vma), - 0x1ull << xe_pt_shift(level)) - 1; -} - -/** - * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma - * address range. - * @tile: The tile to bind for. - * @vma: The vma to bind. - * @q: The exec_queue with which to do pipelined page-table updates. - * @syncs: Entries to sync on before binding the built tree to the live vm tree. - * @num_syncs: Number of @sync entries. - * @rebind: Whether we're rebinding this vma to the same address range without - * an unbind in-between. - * - * This function builds a page-table tree (see xe_pt_stage_bind() for more - * information on page-table building), and the xe_vm_pgtable_update entries - * abstracting the operations needed to attach it to the main vm tree. It - * then takes the relevant locks and updates the metadata side of the main - * vm tree and submits the operations for pipelined attachment of the - * gpu page-table to the vm main tree, (which can be done either by the - * cpu and the GPU). - * - * Return: A valid dma-fence representing the pipelined attachment operation - * on success, an error pointer on error. - */ -struct dma_fence * -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool rebind) -{ - struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; - struct xe_pt_migrate_pt_update bind_pt_update = { - .base = { - .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops, - .vma = vma, - .tile_id = tile->id, - }, - .bind = true, - }; - struct xe_vm *vm = xe_vma_vm(vma); - u32 num_entries; - struct dma_fence *fence; - struct invalidation_fence *ifence = NULL; - struct xe_range_fence *rfence; - int err; - - bind_pt_update.locked = false; - xe_bo_assert_held(xe_vma_bo(vma)); - xe_vm_assert_held(vm); - - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "Preparing bind, with range [%llx...%llx) engine %p.\n", - xe_vma_start(vma), xe_vma_end(vma), q); - - err = xe_pt_prepare_bind(tile, vma, entries, &num_entries); - if (err) - goto err; - - err = dma_resv_reserve_fences(xe_vm_resv(vm), 1); - if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1); - if (err) - goto err; - - xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); - - xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); - xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries, - num_entries); - - /* - * If rebind, we have to invalidate TLB on !LR vms to invalidate - * cached PTEs point to freed memory. on LR vms this is done - * automatically when the context is re-enabled by the rebind worker, - * or in fault mode it was invalidated on PTE zapping. - * - * If !rebind, and scratch enabled VMs, there is a chance the scratch - * PTE is already cached in the TLB so it needs to be invalidated. - * on !LR VMs this is done in the ring ops preceding a batch, but on - * non-faulting LR, in particular on user-space batch buffer chaining, - * it needs to be done here. - */ - if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) { - ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); - if (!ifence) - return ERR_PTR(-ENOMEM); - } else if (rebind && !xe_vm_in_lr_mode(vm)) { - /* We bump also if batch_invalidate_tlb is true */ - vm->tlb_flush_seqno++; - } - - rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); - if (!rfence) { - kfree(ifence); - return ERR_PTR(-ENOMEM); - } - - fence = xe_migrate_update_pgtables(tile->migrate, - vm, xe_vma_bo(vma), q, - entries, num_entries, - syncs, num_syncs, - &bind_pt_update.base); - if (!IS_ERR(fence)) { - bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND; - LLIST_HEAD(deferred); - int err; - - err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, - &xe_range_fence_kfree_ops, - bind_pt_update.base.start, - bind_pt_update.base.last, fence); - if (err) - dma_fence_wait(fence, false); - - /* TLB invalidation must be done before signaling rebind */ - if (ifence) { - int err = invalidation_fence_init(tile->primary_gt, - ifence, fence, - xe_vma_start(vma), - xe_vma_end(vma), - xe_vma_vm(vma)->usm.asid); - if (err) { - dma_fence_put(fence); - kfree(ifence); - return ERR_PTR(err); - } - fence = &ifence->base.base; - } - - /* add shared fence now for pagetable delayed destroy */ - dma_resv_add_fence(xe_vm_resv(vm), fence, rebind || - last_munmap_rebind ? - DMA_RESV_USAGE_KERNEL : - DMA_RESV_USAGE_BOOKKEEP); - - if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, - DMA_RESV_USAGE_BOOKKEEP); - xe_pt_commit_bind(vma, entries, num_entries, rebind, - bind_pt_update.locked ? &deferred : NULL); - - /* This vma is live (again?) now */ - vma->tile_present |= BIT(tile->id); - - if (bind_pt_update.locked) { - to_userptr_vma(vma)->userptr.initial_bind = true; - up_read(&vm->userptr.notifier_lock); - xe_bo_put_commit(&deferred); - } - if (!rebind && last_munmap_rebind && - xe_vm_in_preempt_fence_mode(vm)) - xe_vm_queue_rebind_worker(vm); - } else { - kfree(rfence); - kfree(ifence); - if (bind_pt_update.locked) - up_read(&vm->userptr.notifier_lock); - xe_pt_abort_bind(vma, entries, num_entries); - } - - return fence; - -err: - return ERR_PTR(err); } struct xe_pt_stage_unbind_walk { @@ -1442,6 +1462,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); pgoff_t end_offset; u64 size = 1ull << walk->shifts[--level]; + int err; if (!IS_ALIGNED(addr, size)) addr = xe_walk->modified_start; @@ -1457,7 +1478,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset, &end_offset)) return 0; - (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false); + err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); + if (err) + return err; + xe_walk->wupd.updates[level].update->qwords = end_offset - offset; return 0; @@ -1510,8 +1534,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, void *ptr, u32 qword_ofs, u32 num_qwords, const struct xe_vm_pgtable_update *update) { - struct xe_vma *vma = pt_update->vma; - u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level); + struct xe_vm *vm = pt_update->vops->vm; + u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level); int i; if (map && map->is_iomem) @@ -1525,181 +1549,644 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update, memset64(ptr, empty, num_qwords); } +static void xe_pt_abort_unbind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) +{ + int i, j; + + xe_pt_commit_locks_assert(vma); + + for (i = num_entries - 1; i >= 0; --i) { + struct xe_vm_pgtable_update *entry = &entries[i]; + struct xe_pt *pt = entry->pt; + struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); + + pt->num_live += entry->qwords; + + if (!pt->level) + continue; + + for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) + pt_dir->children[j] = + entries[i].pt_entries[j - entry->ofs].pt ? + &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; + } +} + static void -xe_pt_commit_unbind(struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, u32 num_entries, - struct llist_head *deferred) +xe_pt_commit_prepare_unbind(struct xe_vma *vma, + struct xe_vm_pgtable_update *entries, + u32 num_entries) { - u32 j; + int i, j; xe_pt_commit_locks_assert(vma); - for (j = 0; j < num_entries; ++j) { - struct xe_vm_pgtable_update *entry = &entries[j]; + for (i = 0; i < num_entries; ++i) { + struct xe_vm_pgtable_update *entry = &entries[i]; struct xe_pt *pt = entry->pt; + struct xe_pt_dir *pt_dir; pt->num_live -= entry->qwords; - if (pt->level) { - struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); - u32 i; + if (!pt->level) + continue; - for (i = entry->ofs; i < entry->ofs + entry->qwords; - i++) { - if (xe_pt_entry(pt_dir, i)) - xe_pt_destroy(xe_pt_entry(pt_dir, i), - xe_vma_vm(vma)->flags, deferred); + pt_dir = as_xe_pt_dir(pt); + for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { + entry->pt_entries[j - entry->ofs].pt = + xe_pt_entry(pt_dir, j); + pt_dir->children[j] = NULL; + } + } +} - pt_dir->children[i] = NULL; - } +static void +xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) +{ + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + int i, level = 0; + u64 start, last; + + for (i = 0; i < pt_op->num_entries; i++) { + const struct xe_vm_pgtable_update *entry = &pt_op->entries[i]; + + if (entry->pt->level > level) + level = entry->pt->level; + } + + /* Greedy (non-optimal) calculation but simple */ + start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level)); + last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1; + + if (start < pt_update_ops->start) + pt_update_ops->start = start; + if (last > pt_update_ops->last) + pt_update_ops->last = last; +} + +static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) +{ + int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0; + + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) + return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, + xe->info.tile_count << shift); + + return 0; +} + +static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) +{ + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + int err; + + xe_bo_assert_held(xe_vma_bo(vma)); + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "Preparing bind, with range [%llx...%llx)\n", + xe_vma_start(vma), xe_vma_end(vma) - 1); + + pt_op->vma = NULL; + pt_op->bind = true; + pt_op->rebind = BIT(tile->id) & vma->tile_present; + + err = vma_reserve_fences(tile_to_xe(tile), vma); + if (err) + return err; + + err = xe_pt_prepare_bind(tile, vma, pt_op->entries, + &pt_op->num_entries); + if (!err) { + xe_tile_assert(tile, pt_op->num_entries <= + ARRAY_SIZE(pt_op->entries)); + xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, + pt_op->num_entries, true); + + xe_pt_update_ops_rfence_interval(pt_update_ops, vma); + ++pt_update_ops->current_op; + pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + + /* + * If rebind, we have to invalidate TLB on !LR vms to invalidate + * cached PTEs point to freed memory. On LR vms this is done + * automatically when the context is re-enabled by the rebind worker, + * or in fault mode it was invalidated on PTE zapping. + * + * If !rebind, and scratch enabled VMs, there is a chance the scratch + * PTE is already cached in the TLB so it needs to be invalidated. + * On !LR VMs this is done in the ring ops preceding a batch, but on + * non-faulting LR, in particular on user-space batch buffer chaining, + * it needs to be done here. + */ + if ((!pt_op->rebind && xe_vm_has_scratch(vm) && + xe_vm_in_preempt_fence_mode(vm))) + pt_update_ops->needs_invalidation = true; + else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) + /* We bump also if batch_invalidate_tlb is true */ + vm->tlb_flush_seqno++; + + vma->tile_staged |= BIT(tile->id); + pt_op->vma = vma; + xe_pt_commit_prepare_bind(vma, pt_op->entries, + pt_op->num_entries, pt_op->rebind); + } else { + xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries); + } + + return err; +} + +static int unbind_op_prepare(struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma) +{ + u32 current_op = pt_update_ops->current_op; + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; + int err; + + if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) + return 0; + + xe_bo_assert_held(xe_vma_bo(vma)); + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "Preparing unbind, with range [%llx...%llx)\n", + xe_vma_start(vma), xe_vma_end(vma) - 1); + + /* + * Wait for invalidation to complete. Can corrupt internal page table + * state if an invalidation is running while preparing an unbind. + */ + if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma))) + mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier); + + pt_op->vma = vma; + pt_op->bind = false; + pt_op->rebind = false; + + err = vma_reserve_fences(tile_to_xe(tile), vma); + if (err) + return err; + + pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries); + + xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries, + pt_op->num_entries, false); + xe_pt_update_ops_rfence_interval(pt_update_ops, vma); + ++pt_update_ops->current_op; + pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_invalidation = true; + + xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); + + return 0; +} + +static int op_prepare(struct xe_vm *vm, + struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma_op *op) +{ + int err = 0; + + xe_vm_assert_held(vm); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma); + pt_update_ops->wait_vm_kernel = true; + break; + case DRM_GPUVA_OP_REMAP: + err = unbind_op_prepare(tile, pt_update_ops, + gpuva_to_vma(op->base.remap.unmap->va)); + + if (!err && op->remap.prev) { + err = bind_op_prepare(vm, tile, pt_update_ops, + op->remap.prev); + pt_update_ops->wait_vm_bookkeep = true; } + if (!err && op->remap.next) { + err = bind_op_prepare(vm, tile, pt_update_ops, + op->remap.next); + pt_update_ops->wait_vm_bookkeep = true; + } + break; + case DRM_GPUVA_OP_UNMAP: + err = unbind_op_prepare(tile, pt_update_ops, + gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + err = bind_op_prepare(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.prefetch.va)); + pt_update_ops->wait_vm_kernel = true; + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } + + return err; } -static const struct xe_migrate_pt_update_ops unbind_ops = { - .populate = xe_migrate_clear_pgtable_callback, +static void +xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops) +{ + init_llist_head(&pt_update_ops->deferred); + pt_update_ops->start = ~0x0ull; + pt_update_ops->last = 0x0ull; +} + +/** + * xe_pt_update_ops_prepare() - Prepare PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa + * + * Prepare PT update operations which includes updating internal PT state, + * allocate memory for page tables, populate page table being pruned in, and + * create PT update operations for leaf insertion / removal. + * + * Return: 0 on success, negative error code on error. + */ +int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + struct xe_vma_op *op; + int shift = tile->media_gt ? 1 : 0; + int err; + + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + xe_pt_update_ops_init(pt_update_ops); + + err = dma_resv_reserve_fences(xe_vm_resv(vops->vm), + tile_to_xe(tile)->info.tile_count << shift); + if (err) + return err; + + list_for_each_entry(op, &vops->list, link) { + err = op_prepare(vops->vm, tile, pt_update_ops, op); + + if (err) + return err; + } + + xe_tile_assert(tile, pt_update_ops->current_op <= + pt_update_ops->num_ops); + +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE) + return -ENOSPC; +#endif + + return 0; +} + +static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma, struct dma_fence *fence, + struct dma_fence *fence2) +{ + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + if (fence2) + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + } + vma->tile_present |= BIT(tile->id); + vma->tile_staged &= ~BIT(tile->id); + if (xe_vma_is_userptr(vma)) { + lockdep_assert_held_read(&vm->userptr.notifier_lock); + to_userptr_vma(vma)->userptr.initial_bind = true; + } + + /* + * Kick rebind worker if this bind triggers preempt fences and not in + * the rebind worker + */ + if (pt_update_ops->wait_vm_bookkeep && + xe_vm_in_preempt_fence_mode(vm) && + !current->mm) + xe_vm_queue_rebind_worker(vm); +} + +static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma *vma, struct dma_fence *fence, + struct dma_fence *fence2) +{ + if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + if (fence2) + dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); + } + vma->tile_present &= ~BIT(tile->id); + if (!vma->tile_present) { + list_del_init(&vma->combined_links.rebind); + if (xe_vma_is_userptr(vma)) { + lockdep_assert_held_read(&vm->userptr.notifier_lock); + + spin_lock(&vm->userptr.invalidated_lock); + list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); + spin_unlock(&vm->userptr.invalidated_lock); + } + } +} + +static void op_commit(struct xe_vm *vm, + struct xe_tile *tile, + struct xe_vm_pgtable_update_ops *pt_update_ops, + struct xe_vma_op *op, struct dma_fence *fence, + struct dma_fence *fence2) +{ + xe_vm_assert_held(vm); + + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + if (!op->map.immediate && xe_vm_in_fault_mode(vm)) + break; + + bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, + fence2); + break; + case DRM_GPUVA_OP_REMAP: + unbind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.remap.unmap->va), fence, + fence2); + + if (op->remap.prev) + bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, + fence, fence2); + if (op->remap.next) + bind_op_commit(vm, tile, pt_update_ops, op->remap.next, + fence, fence2); + break; + case DRM_GPUVA_OP_UNMAP: + unbind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.unmap.va), fence, fence2); + break; + case DRM_GPUVA_OP_PREFETCH: + bind_op_commit(vm, tile, pt_update_ops, + gpuva_to_vma(op->base.prefetch.va), fence, fence2); + break; + default: + drm_warn(&vm->xe->drm, "NOT POSSIBLE"); + } +} + +static const struct xe_migrate_pt_update_ops migrate_ops = { + .populate = xe_vm_populate_pgtable, + .clear = xe_migrate_clear_pgtable_callback, .pre_commit = xe_pt_pre_commit, }; -static const struct xe_migrate_pt_update_ops userptr_unbind_ops = { - .populate = xe_migrate_clear_pgtable_callback, +static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { + .populate = xe_vm_populate_pgtable, + .clear = xe_migrate_clear_pgtable_callback, .pre_commit = xe_pt_userptr_pre_commit, }; /** - * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma - * address range. - * @tile: The tile to unbind for. - * @vma: The vma to unbind. - * @q: The exec_queue with which to do pipelined page-table updates. - * @syncs: Entries to sync on before disconnecting the tree to be destroyed. - * @num_syncs: Number of @sync entries. + * xe_pt_update_ops_run() - Run PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa * - * This function builds a the xe_vm_pgtable_update entries abstracting the - * operations needed to detach the page-table tree to be destroyed from the - * man vm tree. - * It then takes the relevant locks and submits the operations for - * pipelined detachment of the gpu page-table from the vm main tree, - * (which can be done either by the cpu and the GPU), Finally it frees the - * detached page-table tree. + * Run PT update operations which includes committing internal PT state changes, + * creating job for PT update operations for leaf insertion / removal, and + * installing job fence in various places. * - * Return: A valid dma-fence representing the pipelined detachment operation - * on success, an error pointer on error. + * Return: fence on success, negative ERR_PTR on error. */ struct dma_fence * -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs) +xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) { - struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; - struct xe_pt_migrate_pt_update unbind_pt_update = { - .base = { - .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops : - &unbind_ops, - .vma = vma, - .tile_id = tile->id, - }, - }; - struct xe_vm *vm = xe_vma_vm(vma); - u32 num_entries; - struct dma_fence *fence = NULL; - struct invalidation_fence *ifence; + struct xe_vm *vm = vops->vm; + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + struct dma_fence *fence; + struct invalidation_fence *ifence = NULL, *mfence = NULL; + struct dma_fence **fences = NULL; + struct dma_fence_array *cf = NULL; struct xe_range_fence *rfence; - int err; - - LLIST_HEAD(deferred); + struct xe_vma_op *op; + int err = 0, i; + struct xe_migrate_pt_update update = { + .ops = pt_update_ops->needs_userptr_lock ? + &userptr_migrate_ops : + &migrate_ops, + .vops = vops, + .tile_id = tile->id, + }; - xe_bo_assert_held(xe_vma_bo(vma)); + lockdep_assert_held(&vm->lock); xe_vm_assert_held(vm); - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "Preparing unbind, with range [%llx...%llx) engine %p.\n", - xe_vma_start(vma), xe_vma_end(vma), q); + if (!pt_update_ops->current_op) { + xe_tile_assert(tile, xe_vm_in_fault_mode(vm)); - num_entries = xe_pt_stage_unbind(tile, vma, entries); - xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); - - xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); - xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries, - num_entries); + return dma_fence_get_stub(); + } - err = dma_resv_reserve_fences(xe_vm_resv(vm), 1); - if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1); - if (err) - return ERR_PTR(err); +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN) + return ERR_PTR(-ENOSPC); +#endif - ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); - if (!ifence) - return ERR_PTR(-ENOMEM); + if (pt_update_ops->needs_invalidation) { + ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); + if (!ifence) { + err = -ENOMEM; + goto kill_vm_tile1; + } + if (tile->media_gt) { + mfence = kzalloc(sizeof(*ifence), GFP_KERNEL); + if (!mfence) { + err = -ENOMEM; + goto free_ifence; + } + fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL); + if (!fences) { + err = -ENOMEM; + goto free_ifence; + } + cf = dma_fence_array_alloc(2); + if (!cf) { + err = -ENOMEM; + goto free_ifence; + } + } + } rfence = kzalloc(sizeof(*rfence), GFP_KERNEL); if (!rfence) { - kfree(ifence); - return ERR_PTR(-ENOMEM); + err = -ENOMEM; + goto free_ifence; } - /* - * Even if we were already evicted and unbind to destroy, we need to - * clear again here. The eviction may have updated pagetables at a - * lower level, because it needs to be more conservative. - */ - fence = xe_migrate_update_pgtables(tile->migrate, - vm, NULL, q ? q : - vm->q[tile->id], - entries, num_entries, - syncs, num_syncs, - &unbind_pt_update.base); - if (!IS_ERR(fence)) { - int err; - - err = xe_range_fence_insert(&vm->rftree[tile->id], rfence, - &xe_range_fence_kfree_ops, - unbind_pt_update.base.start, - unbind_pt_update.base.last, fence); - if (err) - dma_fence_wait(fence, false); + fence = xe_migrate_update_pgtables(tile->migrate, &update); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_rfence; + } - /* TLB invalidation must be done before signaling unbind */ - err = invalidation_fence_init(tile->primary_gt, ifence, fence, - xe_vma_start(vma), - xe_vma_end(vma), - xe_vma_vm(vma)->usm.asid); - if (err) { - dma_fence_put(fence); - kfree(ifence); - return ERR_PTR(err); + /* Point of no return - VM killed if failure after this */ + for (i = 0; i < pt_update_ops->current_op; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + + xe_pt_commit(pt_op->vma, pt_op->entries, + pt_op->num_entries, &pt_update_ops->deferred); + pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */ + } + + if (xe_range_fence_insert(&vm->rftree[tile->id], rfence, + &xe_range_fence_kfree_ops, + pt_update_ops->start, + pt_update_ops->last, fence)) + dma_fence_wait(fence, false); + + /* tlb invalidation must be done before signaling rebind */ + if (ifence) { + if (mfence) + dma_fence_get(fence); + invalidation_fence_init(tile->primary_gt, ifence, fence, + pt_update_ops->start, + pt_update_ops->last, vm->usm.asid); + if (mfence) { + invalidation_fence_init(tile->media_gt, mfence, fence, + pt_update_ops->start, + pt_update_ops->last, vm->usm.asid); + fences[0] = &ifence->base.base; + fences[1] = &mfence->base.base; + dma_fence_array_init(cf, 2, fences, + vm->composite_fence_ctx, + vm->composite_fence_seqno++, + false); + fence = &cf->base; + } else { + fence = &ifence->base.base; } - fence = &ifence->base.base; + } - /* add shared fence now for pagetable delayed destroy */ + if (!mfence) { dma_resv_add_fence(xe_vm_resv(vm), fence, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_BOOKKEEP); - /* This fence will be installed by caller when doing eviction */ - if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) - dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, - DMA_RESV_USAGE_BOOKKEEP); - xe_pt_commit_unbind(vma, entries, num_entries, - unbind_pt_update.locked ? &deferred : NULL); - vma->tile_present &= ~BIT(tile->id); + list_for_each_entry(op, &vops->list, link) + op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL); } else { - kfree(rfence); - kfree(ifence); - } + dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); - if (!vma->tile_present) - list_del_init(&vma->combined_links.rebind); + dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base, + pt_update_ops->wait_vm_bookkeep ? + DMA_RESV_USAGE_KERNEL : + DMA_RESV_USAGE_BOOKKEEP); - if (unbind_pt_update.locked) { - xe_tile_assert(tile, xe_vma_is_userptr(vma)); + list_for_each_entry(op, &vops->list, link) + op_commit(vops->vm, tile, pt_update_ops, op, + &ifence->base.base, &mfence->base.base); + } - if (!vma->tile_present) { - spin_lock(&vm->userptr.invalidated_lock); - list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); - spin_unlock(&vm->userptr.invalidated_lock); - } + if (pt_update_ops->needs_userptr_lock) up_read(&vm->userptr.notifier_lock); - xe_bo_put_commit(&deferred); - } return fence; + +free_rfence: + kfree(rfence); +free_ifence: + kfree(cf); + kfree(fences); + kfree(mfence); + kfree(ifence); +kill_vm_tile1: + if (err != -EAGAIN && tile->id) + xe_vm_kill(vops->vm, false); + + return ERR_PTR(err); +} + +/** + * xe_pt_update_ops_fini() - Finish PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operations + * + * Finish PT update operations by committing to destroy page table memory + */ +void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + int i; + + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + for (i = 0; i < pt_update_ops->current_op; ++i) { + struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; + + xe_pt_free_bind(pt_op->entries, pt_op->num_entries); + } + xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); +} + +/** + * xe_pt_update_ops_abort() - Abort PT update operations + * @tile: Tile of PT update operations + * @vops: VMA operationa + * + * Abort PT update operations by unwinding internal PT state + */ +void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) +{ + struct xe_vm_pgtable_update_ops *pt_update_ops = + &vops->pt_update_ops[tile->id]; + int i; + + lockdep_assert_held(&vops->vm->lock); + xe_vm_assert_held(vops->vm); + + for (i = pt_update_ops->num_ops - 1; i >= 0; --i) { + struct xe_vm_pgtable_update_op *pt_op = + &pt_update_ops->ops[i]; + + if (!pt_op->vma || i >= pt_update_ops->current_op) + continue; + + if (pt_op->bind) + xe_pt_abort_bind(pt_op->vma, pt_op->entries, + pt_op->num_entries, + pt_op->rebind); + else + xe_pt_abort_unbind(pt_op->vma, pt_op->entries, + pt_op->num_entries); + } + + xe_pt_update_ops_fini(tile, vops); } diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h index 71a4fbfcff43a4..9ab386431caddb 100644 --- a/drivers/gpu/drm/xe/xe_pt.h +++ b/drivers/gpu/drm/xe/xe_pt.h @@ -17,6 +17,7 @@ struct xe_sync_entry; struct xe_tile; struct xe_vm; struct xe_vma; +struct xe_vma_ops; /* Largest huge pte is currently 1GiB. May become device dependent. */ #define MAX_HUGEPTE_LEVEL 2 @@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred); -struct dma_fence * -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool rebind); - -struct dma_fence * -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs); +int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops); +struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile, + struct xe_vma_ops *vops); +void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops); +void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops); bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma); diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index cee70cb0f01432..384cc04de71946 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -74,4 +74,52 @@ struct xe_vm_pgtable_update { u32 flags; }; +/** struct xe_vm_pgtable_update_op - Page table update operation */ +struct xe_vm_pgtable_update_op { + /** @entries: entries to update for this operation */ + struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1]; + /** @vma: VMA for operation, operation not valid if NULL */ + struct xe_vma *vma; + /** @num_entries: number of entries for this update operation */ + u32 num_entries; + /** @bind: is a bind */ + bool bind; + /** @rebind: is a rebind */ + bool rebind; +}; + +/** struct xe_vm_pgtable_update_ops: page table update operations */ +struct xe_vm_pgtable_update_ops { + /** @ops: operations */ + struct xe_vm_pgtable_update_op *ops; + /** @deferred: deferred list to destroy PT entries */ + struct llist_head deferred; + /** @q: exec queue for PT operations */ + struct xe_exec_queue *q; + /** @start: start address of ops */ + u64 start; + /** @last: last address of ops */ + u64 last; + /** @num_ops: number of operations */ + u32 num_ops; + /** @current_op: current operations */ + u32 current_op; + /** @needs_userptr_lock: Needs userptr lock */ + bool needs_userptr_lock; + /** @needs_invalidation: Needs invalidation */ + bool needs_invalidation; + /** + * @wait_vm_bookkeep: PT operations need to wait until VM is idle + * (bookkeep dma-resv slots are idle) and stage all future VM activity + * behind these operations (install PT operations into VM kernel + * dma-resv slot). + */ + bool wait_vm_bookkeep; + /** + * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv + * slots are idle. + */ + bool wait_vm_kernel; +}; + #endif diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 4e01df6b1b7a1d..28d9bb3b825de9 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" @@ -518,7 +518,9 @@ static int query_gt_topology(struct xe_device *xe, if (err) return err; - topo.type = DRM_XE_TOPO_EU_PER_DSS; + topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? + DRM_XE_TOPO_SIMD16_EU_PER_DSS : + DRM_XE_TOPO_EU_PER_DSS; err = copy_mask(&query_ptr, &topo, gt->fuse_topo.eu_mask_per_dss, sizeof(gt->fuse_topo.eu_mask_per_dss)); diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h index 655af89b31a98a..dca374b6521c2d 100644 --- a/drivers/gpu/drm/xe/xe_res_cursor.h +++ b/drivers/gpu/drm/xe/xe_res_cursor.h @@ -26,7 +26,6 @@ #include -#include #include #include #include diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 5efe83cc82ab5e..86c705d18c0d35 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -7,7 +7,7 @@ #include -#include +#include #include "xe_gt.h" #include "xe_gt_topology.h" @@ -217,21 +217,19 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, ctx->active_entries = active_entries; ctx->n_entries = n_entries; } +EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); static void rtp_mark_active(struct xe_device *xe, struct xe_rtp_process_ctx *ctx, - unsigned int first, unsigned int last) + unsigned int idx) { if (!ctx->active_entries) return; - if (drm_WARN_ON(&xe->drm, last > ctx->n_entries)) + if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries)) return; - if (first == last) - bitmap_set(ctx->active_entries, first, 1); - else - bitmap_set(ctx->active_entries, first, last - first + 1); + bitmap_set(ctx->active_entries, idx, 1); } /** @@ -276,8 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, } if (match) - rtp_mark_active(xe, ctx, entry - entries, - entry - entries); + rtp_mark_active(xe, ctx, entry - entries); } } EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); @@ -288,44 +285,29 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); * @entries: Table with RTP definitions * * Walk the table pointed by @entries (with an empty sentinel), executing the - * rules. A few differences from xe_rtp_process_to_sr(): - * - * 1. There is no action associated with each entry since this uses - * struct xe_rtp_entry. Its main use is for marking active workarounds via - * xe_rtp_process_ctx_enable_active_tracking(). - * 2. There is support for OR operations by having entries with no name. + * rules. One difference from xe_rtp_process_to_sr(): there is no action + * associated with each entry since this uses struct xe_rtp_entry. Its main use + * is for marking active workarounds via + * xe_rtp_process_ctx_enable_active_tracking(). */ void xe_rtp_process(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry *entries) { - const struct xe_rtp_entry *entry, *first_entry; + const struct xe_rtp_entry *entry; struct xe_hw_engine *hwe; struct xe_gt *gt; struct xe_device *xe; rtp_get_context(ctx, &hwe, >, &xe); - first_entry = entries; - if (drm_WARN_ON(&xe->drm, !first_entry->name)) - return; - for (entry = entries; entry && entry->rules; entry++) { - if (entry->name) - first_entry = entry; - if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) continue; - /* Fast-forward entry, eliminating the OR'ed entries */ - for (entry++; entry && entry->rules; entry++) - if (entry->name) - break; - entry--; - - rtp_mark_active(xe, ctx, first_entry - entries, - entry - entries); + rtp_mark_active(xe, ctx, entry - entries); } } +EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); bool xe_rtp_match_even_instance(const struct xe_gt *gt, const struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index ad446731192c19..827d932b690873 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -374,7 +374,7 @@ struct xe_reg_sr; * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry * @...: Rules * - * At least one rule is needed and up to 6 are supported. Multiple rules are + * At least one rule is needed and up to 12 are supported. Multiple rules are * AND'ed together, i.e. all the rules must evaluate to true for the entry to * be processed. See XE_RTP_MATCH_* for the possible match rules. Example: * @@ -399,7 +399,7 @@ struct xe_reg_sr; * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr * @...: Actions to be taken * - * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_* + * At least one action is needed and up to 12 are supported. See XE_RTP_ACTION_* * for the possible actions. Example: * * .. code-block:: c diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h index c59e40fd7fffac..a33b0ae98bbca4 100644 --- a/drivers/gpu/drm/xe/xe_rtp_helpers.h +++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h @@ -60,6 +60,12 @@ #define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_) #define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_) #define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_7(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_6(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_8(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_7(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_9(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_8(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_10(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_9(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_11(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_10(prefix_, sep_, _XE_TUPLE_TAIL args_) +#define XE_RTP_PASTE_12(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_11(prefix_, sep_, _XE_TUPLE_TAIL args_) /* * XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 8941522b7705d7..fe2cb2a96f7885 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -25,10 +25,9 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) drm_suballoc_manager_fini(&sa_manager->base); - if (bo->vmap.is_iomem) + if (sa_manager->is_iomem) kvfree(sa_manager->cpu_ptr); - xe_bo_unpin_map_no_vm(bo); sa_manager->bo = NULL; } @@ -47,16 +46,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 sa_manager->bo = NULL; - bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo = xe_managed_bo_create_pin_map(xe, tile, size, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE); if (IS_ERR(bo)) { drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", PTR_ERR(bo)); return (struct xe_sa_manager *)bo; } sa_manager->bo = bo; + sa_manager->is_iomem = bo->vmap.is_iomem; drm_suballoc_manager_init(&sa_manager->base, managed_size, align); sa_manager->gpu_addr = xe_bo_ggtt_addr(bo); @@ -64,7 +64,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 if (bo->vmap.is_iomem) { sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL); if (!sa_manager->cpu_ptr) { - xe_bo_unpin_map_no_vm(sa_manager->bo); sa_manager->bo = NULL; return ERR_PTR(-ENOMEM); } @@ -84,6 +83,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, unsigned int size) { + /* + * BB to large, return -ENOBUFS indicating user should split + * array of binds into smaller chunks. + */ + if (size > sa_manager->base.size) + return ERR_PTR(-ENOBUFS); + return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0); } diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h index 2ef896aeca1d41..2b070ff1292e75 100644 --- a/drivers/gpu/drm/xe/xe_sa_types.h +++ b/drivers/gpu/drm/xe/xe_sa_types.h @@ -14,6 +14,7 @@ struct xe_sa_manager { struct xe_bo *bo; u64 gpu_addr; void *cpu_ptr; + bool is_iomem; }; #endif diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 9628f9deb3c012..eeccc1c318aef1 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -5,7 +5,7 @@ #include "xe_sched_job.h" -#include +#include #include #include @@ -89,8 +89,7 @@ static void xe_sched_job_free_fences(struct xe_sched_job *job) if (ptrs->lrc_fence) xe_lrc_free_seqno_fence(ptrs->lrc_fence); - if (ptrs->chain_fence) - dma_fence_chain_free(ptrs->chain_fence); + dma_fence_chain_free(ptrs->chain_fence); } } diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index a274a5fb14018b..5a1d65e4f19f2b 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -5,7 +5,7 @@ #include -#include "regs/xe_sriov_regs.h" +#include "regs/xe_regs.h" #include "xe_assert.h" #include "xe_device.h" diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c index eaf1b718f26c90..c77b5c317fa00c 100644 --- a/drivers/gpu/drm/xe/xe_step.c +++ b/drivers/gpu/drm/xe/xe_step.c @@ -28,23 +28,17 @@ * use a macro to define these to make it easier to identify the platforms * where the two steppings can deviate. */ -#define COMMON_GT_MEDIA_STEP(x_) \ - .graphics = STEP_##x_, \ - .media = STEP_##x_ - #define COMMON_STEP(x_) \ - COMMON_GT_MEDIA_STEP(x_), \ .graphics = STEP_##x_, \ - .media = STEP_##x_, \ - .display = STEP_##x_ + .media = STEP_##x_ __diag_push(); __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ static const struct xe_step_info tgl_revids[] = { - [0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 }, - [1] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_D0 }, + [0] = { COMMON_STEP(A0) }, + [1] = { COMMON_STEP(B0) }, }; static const struct xe_step_info dg1_revids[] = { @@ -53,49 +47,49 @@ static const struct xe_step_info dg1_revids[] = { }; static const struct xe_step_info adls_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A2 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_B0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, + [0xC] = { COMMON_STEP(D0) }, }; static const struct xe_step_info adls_rpls_revids[] = { - [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_D0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 }, + [0x4] = { COMMON_STEP(D0) }, + [0xC] = { COMMON_STEP(D0) }, }; static const struct xe_step_info adlp_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 }, - [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_D0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, + [0xC] = { COMMON_STEP(C0) }, }; static const struct xe_step_info adlp_rpl_revids[] = { - [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_E0 }, + [0x4] = { COMMON_STEP(C0) }, }; static const struct xe_step_info adln_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_D0 }, + [0x0] = { COMMON_STEP(A0) }, }; static const struct xe_step_info dg2_g10_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_A0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A1) }, + [0x4] = { COMMON_STEP(B0) }, + [0x8] = { COMMON_STEP(C0) }, }; static const struct xe_step_info dg2_g11_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_C0 }, - [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x4] = { COMMON_STEP(B0) }, + [0x5] = { COMMON_STEP(B1) }, }; static const struct xe_step_info dg2_g12_revid_step_tbl[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_C0 }, - [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_C0 }, + [0x0] = { COMMON_STEP(A0) }, + [0x1] = { COMMON_STEP(A1) }, }; static const struct xe_step_info pvc_revid_step_tbl[] = { @@ -195,7 +189,6 @@ struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe) } else { drm_dbg(&xe->drm, "Using future steppings\n"); step.graphics = STEP_FUTURE; - step.display = STEP_FUTURE; } } diff --git a/drivers/gpu/drm/xe/xe_step_types.h b/drivers/gpu/drm/xe/xe_step_types.h index ccc9b4795e9593..d978cc2512f250 100644 --- a/drivers/gpu/drm/xe/xe_step_types.h +++ b/drivers/gpu/drm/xe/xe_step_types.h @@ -11,12 +11,15 @@ struct xe_step_info { u8 graphics; u8 media; - u8 display; u8 basedie; }; #define STEP_ENUM_VAL(name) STEP_##name, +/* + * Always define four minor steppings 0-3 for each stepping to match GMD ID + * spacing of values. See xe_step_gmdid_get(). + */ #define STEP_NAME_LIST(func) \ func(A0) \ func(A1) \ @@ -34,7 +37,30 @@ struct xe_step_info { func(D1) \ func(D2) \ func(D3) \ - func(E0) + func(E0) \ + func(E1) \ + func(E2) \ + func(E3) \ + func(F0) \ + func(F1) \ + func(F2) \ + func(F3) \ + func(G0) \ + func(G1) \ + func(G2) \ + func(G3) \ + func(H0) \ + func(H1) \ + func(H2) \ + func(H3) \ + func(I0) \ + func(I1) \ + func(I2) \ + func(I3) \ + func(J0) \ + func(J1) \ + func(J2) \ + func(J3) /* * Symbolic steppings that do not match the hardware. These are valid both as gt diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 80499681bd583d..bb3c2a83036237 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include "xe_device_types.h" #include "xe_exec_queue.h" @@ -204,26 +204,11 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, return 0; } -int xe_sync_entry_wait(struct xe_sync_entry *sync) -{ - if (sync->fence) - dma_fence_wait(sync->fence, true); - - return 0; -} - int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job) { - int err; - - if (sync->fence) { - err = drm_sched_job_add_dependency(&job->drm, - dma_fence_get(sync->fence)); - if (err) { - dma_fence_put(sync->fence); - return err; - } - } + if (sync->fence) + return drm_sched_job_add_dependency(&job->drm, + dma_fence_get(sync->fence)); return 0; } @@ -264,10 +249,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) { if (sync->syncobj) drm_syncobj_put(sync->syncobj); - if (sync->fence) - dma_fence_put(sync->fence); - if (sync->chain_fence) - dma_fence_chain_free(sync->chain_fence); + dma_fence_put(sync->fence); + dma_fence_chain_free(sync->chain_fence); if (sync->ufence) user_fence_put(sync->ufence); } diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h index 006dbf78079365..256ffc1e54dc7c 100644 --- a/drivers/gpu/drm/xe/xe_sync.h +++ b/drivers/gpu/drm/xe/xe_sync.h @@ -22,7 +22,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, struct xe_sync_entry *sync, struct drm_xe_sync __user *sync_user, unsigned int flags); -int xe_sync_entry_wait(struct xe_sync_entry *sync); int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job); void xe_sync_entry_signal(struct xe_sync_entry *sync, diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 01837f6f609f52..8573d7a87d8400 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -369,6 +369,58 @@ TRACE_EVENT(xe_reg_rw, (u32)(__entry->val >> 32)) ); +DECLARE_EVENT_CLASS(xe_pm_runtime, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller), + + TP_STRUCT__entry( + __string(dev, __dev_name_xe(xe)) + __field(void *, caller) + ), + + TP_fast_assign( + __assign_str(dev); + __entry->caller = caller; + ), + + TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_resume, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + +DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl, + TP_PROTO(struct xe_device *xe, void *caller), + TP_ARGS(xe, caller) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index f39f09ed349562..9b1a1d4304ae18 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc, TP_ARGS(vma) ); -DEFINE_EVENT(xe_vma, xe_vma_fail, - TP_PROTO(struct xe_vma *vma), - TP_ARGS(vma) -); - DEFINE_EVENT(xe_vma, xe_vma_bind, TP_PROTO(struct xe_vma *vma), TP_ARGS(vma) @@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit, TP_ARGS(vm) ); +DEFINE_EVENT(xe_vm, xe_vm_ops_fail, + TP_PROTO(struct xe_vm *vm), + TP_ARGS(vm) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index f46fd2df84debd..f7113cf6109d59 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -5,7 +5,6 @@ */ #include -#include #include #include diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index d4e6fa918942be..0d5e04158917be 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -39,12 +39,51 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { }, { XE_RTP_NAME("Tuning: Compression Overfetch"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), - XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)), + XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX), + SET(CCCHKNREG1, L3CMPCTRL)) + }, + { XE_RTP_NAME("Tuning: Compression Overfetch - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX), + SET(XE2LPM_CCCHKNREG1, L3CMPCTRL)) }, { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN)) }, + { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN)) + }, + { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_ACTIONS(SET(L3SQCREG2, + COMPMEMRD256BOVRFETCHEN)) + }, + { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2, + COMPMEMRD256BOVRFETCHEN)) + }, + { XE_RTP_NAME("Tuning: Stateless compression control"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, + REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) + }, + { XE_RTP_NAME("Tuning: Stateless compression control - media"), + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)), + XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, + REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) + }, + { XE_RTP_NAME("Tuning: L3 RW flush all Cache"), + XE_RTP_RULES(GRAPHICS_VERSION(2004)), + XE_RTP_ACTIONS(SET(SCRATCH3_LBCF, RWFLUSHALLEN)) + }, + { XE_RTP_NAME("Tuning: L3 RW flush all cache - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN)) + }, + {} }; @@ -93,6 +132,14 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = { REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, + /* Xe2_HPG */ + + { XE_RTP_NAME("Tuning: vs hit max value"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK, + REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f))) + }, + {} }; diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c index 78eb8db737910b..24a4209051eecd 100644 --- a/drivers/gpu/drm/xe/xe_uc_debugfs.c +++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c @@ -8,6 +8,7 @@ #include #include "xe_gt.h" +#include "xe_gsc_debugfs.h" #include "xe_guc_debugfs.h" #include "xe_huc_debugfs.h" #include "xe_macros.h" @@ -23,6 +24,7 @@ void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent) return; } + xe_gsc_debugfs_register(&uc->gsc, root); xe_guc_debugfs_register(&uc->guc, root); xe_huc_debugfs_register(&uc->huc, root); } diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 5f23ecd98376f2..d431d0031185eb 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -15,6 +15,7 @@ #include "xe_gsc.h" #include "xe_gt.h" #include "xe_gt_printk.h" +#include "xe_guc.h" #include "xe_map.h" #include "xe_mmio.h" #include "xe_module.h" @@ -105,17 +106,20 @@ struct fw_blobs_by_type { }; #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \ - fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \ - fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \ - fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \ - fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \ - fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \ - fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \ - fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \ - fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) + fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \ + fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \ + fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \ + fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \ + fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \ + fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \ + fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ + fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \ + fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \ fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ fw_def(DG1, no_ver(i915, huc, dg1)) \ fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \ @@ -125,7 +129,8 @@ struct fw_blobs_by_type { /* for the GSC FW we match the compatibility version and not the release one */ #define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \ - fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0)) + fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \ + fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0)) #define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \ __stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin" @@ -136,6 +141,8 @@ struct fw_blobs_by_type { MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a)) #define fw_filename_no_ver(dir_, uc_, shortname_) \ MAKE_FW_PATH(dir_, uc_, shortname_, "") +#define fw_filename_gsc(dir_, uc_, shortname_, a, b, c) \ + MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(b)) #define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \ { fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \ @@ -146,6 +153,9 @@ struct fw_blobs_by_type { #define uc_fw_entry_no_ver(dir_, uc_, shortname_) \ { fw_filename_no_ver(dir_, uc_, shortname_), \ 0, 0 } +#define uc_fw_entry_gsc(dir_, uc_, shortname_, a, b, c) \ + { fw_filename_gsc(dir_, uc_, shortname_, a, b, c), \ + a, b, c } /* All blobs need to be declared via MODULE_FIRMWARE() */ #define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \ @@ -161,7 +171,7 @@ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_mmp_ver, fw_filename_major_ver) XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_mmp_ver, fw_filename_no_ver) -XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver) +XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_gsc) static struct xe_gt * __uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type) @@ -204,7 +214,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) uc_fw_entry_no_ver) }; static const struct uc_fw_entry entries_gsc[] = { - XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver) + XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_gsc) }; static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = { [XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) }, @@ -306,10 +316,10 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css) xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC); - /* We don't support GuC releases older than 70.19 */ - if (release->major < 70 || (release->major == 70 && release->minor < 19)) { - xe_gt_err(gt, "Unsupported GuC v%u.%u! v70.19 or newer is required\n", - release->major, release->minor); + /* We don't support GuC releases older than 70.29.2 */ + if (MAKE_GUC_VER_STRUCT(*release) < MAKE_GUC_VER(70, 29, 2)) { + xe_gt_err(gt, "Unsupported GuC v%u.%u.%u! v70.29.2 or newer is required\n", + release->major, release->minor, release->patch); return -EINVAL; } diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 50e8fc49ba6c15..ce9dca4d4e87b4 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include @@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm) if (q->lr.pfence) { long timeout = dma_fence_wait(q->lr.pfence, false); - if (timeout < 0) + /* Only -ETIME on fence indicates VM needs to be killed */ + if (timeout < 0 || q->lr.pfence->error == -ETIME) return -ETIME; + dma_fence_put(q->lr.pfence); q->lr.pfence = NULL; } @@ -273,6 +275,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM * @vm: The VM. * @q: The exec_queue + * + * Note that this function might be called multiple times on the same queue. */ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) { @@ -280,8 +284,10 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) return; down_write(&vm->lock); - list_del(&q->lr.link); - --vm->preempt.num_exec_queues; + if (!list_empty(&q->lr.link)) { + list_del_init(&q->lr.link); + --vm->preempt.num_exec_queues; + } if (q->lr.pfence) { dma_fence_enable_sw_signaling(q->lr.pfence); dma_fence_put(q->lr.pfence); @@ -311,7 +317,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 -static void xe_vm_kill(struct xe_vm *vm, bool unlocked) +/** + * xe_vm_kill() - VM Kill + * @vm: The VM. + * @unlocked: Flag indicates the VM's dma-resv is not held + * + * Kill the VM by setting banned flag indicated VM is no longer available for + * use. If in preempt fence mode, also kill all exec queue attached to the VM. + */ +void xe_vm_kill(struct xe_vm *vm, bool unlocked) { struct xe_exec_queue *q; @@ -708,6 +722,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm) list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; } +static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) { + if (!vops->pt_update_ops[i].num_ops) + continue; + + vops->pt_update_ops[i].ops = + kmalloc_array(vops->pt_update_ops[i].num_ops, + sizeof(*vops->pt_update_ops[i].ops), + GFP_KERNEL); + if (!vops->pt_update_ops[i].ops) + return array_of_binds ? -ENOBUFS : -ENOMEM; + } + + return 0; +} + +static void xe_vma_ops_fini(struct xe_vma_ops *vops) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + kfree(vops->pt_update_ops[i].ops); +} + +static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + if (BIT(i) & tile_mask) + ++vops->pt_update_ops[i].num_ops; +} + static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, u8 tile_mask) { @@ -735,6 +785,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, xe_vm_populate_rebind(op, vma, tile_mask); list_add_tail(&op->link, &vops->list); + xe_vma_ops_incr_pt_update_ops(vops, tile_mask); return 0; } @@ -751,7 +802,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) struct xe_vma *vma, *next; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; - int err; + int err, i; lockdep_assert_held(&vm->lock); if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || @@ -759,6 +810,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) return 0; xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + vops.pt_update_ops[i].wait_vm_bookkeep = true; xe_vm_assert_held(vm); list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { @@ -775,6 +828,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) goto free_ops; } + err = xe_vma_ops_alloc(&vops, false); + if (err) + goto free_ops; + fence = ops_execute(vm, &vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); @@ -789,6 +846,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return err; } @@ -798,6 +856,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma struct dma_fence *fence = NULL; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; + struct xe_tile *tile; + u8 id; int err; lockdep_assert_held(&vm->lock); @@ -805,17 +865,30 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for_each_tile(tile, vm->xe, id) { + vops.pt_update_ops[id].wait_vm_bookkeep = true; + vops.pt_update_ops[tile->id].q = + xe_tile_migrate_exec_queue(tile); + } err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); if (err) return ERR_PTR(err); + err = xe_vma_ops_alloc(&vops, false); + if (err) { + fence = ERR_PTR(err); + goto free_ops; + } + fence = ops_execute(vm, &vops); +free_ops: list_for_each_entry_safe(op, next_op, &vops.list, link) { list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return fence; } @@ -1122,7 +1195,7 @@ static const struct drm_gpuvm_ops gpuvm_ops = { .vm_free = xe_vm_free, }; -static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) +static u64 pde_encode_pat_index(u16 pat_index) { u64 pte = 0; @@ -1135,8 +1208,7 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) return pte; } -static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index, - u32 pt_level) +static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level) { u64 pte = 0; @@ -1177,12 +1249,11 @@ static u64 pte_encode_ps(u32 pt_level) static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, const u16 pat_index) { - struct xe_device *xe = xe_bo_device(bo); u64 pde; pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pde |= XE_PAGE_PRESENT | XE_PAGE_RW; - pde |= pde_encode_pat_index(xe, pat_index); + pde |= pde_encode_pat_index(pat_index); return pde; } @@ -1190,12 +1261,11 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, u16 pat_index, u32 pt_level) { - struct xe_device *xe = xe_bo_device(bo); u64 pte; pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pte |= XE_PAGE_PRESENT | XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) @@ -1207,14 +1277,12 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, u16 pat_index, u32 pt_level) { - struct xe_device *xe = xe_vma_vm(vma)->xe; - pte |= XE_PAGE_PRESENT; if (likely(!xe_vma_read_only(vma))) pte |= XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (unlikely(xe_vma_is_null(vma))) @@ -1234,7 +1302,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr, pte = addr; pte |= XE_PAGE_PRESENT | XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (devmem) @@ -1333,6 +1401,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) init_rwsem(&vm->userptr.notifier_lock); spin_lock_init(&vm->userptr.invalidated_lock); + ttm_lru_bulk_move_init(&vm->lru_bulk_move); + INIT_WORK(&vm->destroy_work, vm_destroy_work_func); INIT_LIST_HEAD(&vm->preempt.exec_queues); @@ -1412,19 +1482,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) /* Kernel migration VM shouldn't have a circular loop.. */ if (!(flags & XE_VM_FLAG_MIGRATION)) { for_each_tile(tile, xe, id) { - struct xe_gt *gt = tile->primary_gt; - struct xe_vm *migrate_vm; struct xe_exec_queue *q; u32 create_flags = EXEC_QUEUE_FLAG_VM; if (!vm->pt_root[id]) continue; - migrate_vm = xe_migrate_get_vm(tile->migrate); - q = xe_exec_queue_create_class(xe, gt, migrate_vm, - XE_ENGINE_CLASS_COPY, - create_flags); - xe_vm_put(migrate_vm); + q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_close; @@ -1437,13 +1501,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) if (number_tiles > 1) vm->composite_fence_ctx = dma_fence_context_alloc(1); - mutex_lock(&xe->usm.lock); - if (flags & XE_VM_FLAG_FAULT_MODE) - xe->usm.num_vm_in_fault_mode++; - else if (!(flags & XE_VM_FLAG_MIGRATION)) - xe->usm.num_vm_in_non_fault_mode++; - mutex_unlock(&xe->usm.lock); - trace_xe_vm_create(vm); return vm; @@ -1458,6 +1515,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) mutex_destroy(&vm->snap_mutex); for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); + ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); kfree(vm); if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); @@ -1555,12 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) up_write(&vm->lock); - mutex_lock(&xe->usm.lock); - if (vm->flags & XE_VM_FLAG_FAULT_MODE) - xe->usm.num_vm_in_fault_mode--; - else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) - xe->usm.num_vm_in_non_fault_mode--; - + down_write(&xe->usm.lock); if (vm->usm.asid) { void *lookup; @@ -1570,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); xe_assert(xe, lookup == vm); } - mutex_unlock(&xe->usm.lock); + up_write(&xe->usm.lock); for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); @@ -1602,6 +1655,8 @@ static void vm_destroy_work_func(struct work_struct *w) trace_xe_vm_free(vm); + ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); + if (vm->xef) xe_file_put(vm->xef); @@ -1641,147 +1696,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) return q ? q : vm->q[0]; } -static struct dma_fence * -xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool first_op, bool last_op) -{ - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - struct xe_tile *tile; - struct dma_fence *fence = NULL; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - int cur_fence = 0; - int number_tiles = hweight8(vma->tile_present); - int err; - u8 id; - - trace_xe_vma_unbind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(vma->tile_present & BIT(id))) - goto next; - - fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - fence = cf ? &cf->base : !fence ? - xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; - - return fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - -static struct dma_fence * -xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool first_op, bool last_op) -{ - struct xe_tile *tile; - struct dma_fence *fence; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - struct xe_vm *vm = xe_vma_vm(vma); - int cur_fence = 0; - int number_tiles = hweight8(tile_mask); - int err; - u8 id; - - trace_xe_vma_bind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(tile_mask & BIT(id))) - goto next; - - fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0, - vma->tile_present & BIT(id)); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - return cf ? &cf->base : fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - static struct xe_user_fence * find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) { @@ -1797,48 +1711,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) return NULL; } -static struct dma_fence * -xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool immediate, bool first_op, bool last_op) -{ - struct dma_fence *fence; - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - xe_vm_assert_held(vm); - xe_bo_assert_held(bo); - - if (immediate) { - fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask, - first_op, last_op); - if (IS_ERR(fence)) - return fence; - } else { - xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); - - fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } - - return fence; -} - -static struct dma_fence * -xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct dma_fence *fence; - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op); - if (IS_ERR(fence)) - return fence; - - return fence; -} - #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \ DRM_XE_VM_CREATE_FLAG_LR_MODE | \ DRM_XE_VM_CREATE_FLAG_FAULT_MODE) @@ -1879,14 +1751,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && - xe_device_in_non_fault_mode(xe))) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) && - xe_device_in_fault_mode(xe))) - return -EINVAL; - if (XE_IOCTL_DBG(xe, args->extensions)) return -EINVAL; @@ -1901,25 +1765,18 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (IS_ERR(vm)) return PTR_ERR(vm); - mutex_lock(&xef->vm.lock); - err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); - mutex_unlock(&xef->vm.lock); - if (err) - goto err_close_and_put; - if (xe->info.has_asid) { - mutex_lock(&xe->usm.lock); + down_write(&xe->usm.lock); err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, XA_LIMIT(1, XE_MAX_ASID - 1), &xe->usm.next_asid, GFP_KERNEL); - mutex_unlock(&xe->usm.lock); + up_write(&xe->usm.lock); if (err < 0) - goto err_free_id; + goto err_close_and_put; vm->usm.asid = asid; } - args->vm_id = id; vm->xef = xe_file_get(xef); /* Record BO memory for VM pagetable created against client */ @@ -1932,12 +1789,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); #endif + /* user id alloc must always be last in ioctl to prevent UAF */ + err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); + if (err) + goto err_close_and_put; + + args->vm_id = id; + return 0; -err_free_id: - mutex_lock(&xef->vm.lock); - xa_erase(&xef->vm.xa, id); - mutex_unlock(&xef->vm.lock); err_close_and_put: xe_vm_close_and_put(vm); @@ -1979,21 +1839,6 @@ static const u32 region_to_mem_type[] = { XE_PL_VRAM1, }; -static struct dma_fence * -xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) { - return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, - vma->tile_mask, true, first_op, last_op); - } else { - return xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } -} - static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) { @@ -2281,14 +2126,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) return err; } - -static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, - struct drm_gpuva_ops *ops, - struct xe_sync_entry *syncs, u32 num_syncs, - struct xe_vma_ops *vops, bool last) +static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, + struct xe_vma_ops *vops) { struct xe_device *xe = vm->xe; - struct xe_vma_op *last_op = NULL; struct drm_gpuva_op *__op; struct xe_tile *tile; u8 id, tile_mask = 0; @@ -2302,19 +2143,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma *vma; - bool first = list_empty(&vops->list); unsigned int flags = 0; INIT_LIST_HEAD(&op->link); list_add_tail(&op->link, &vops->list); - - if (first) { - op->flags |= XE_VMA_OP_FIRST; - op->num_syncs = num_syncs; - op->syncs = syncs; - } - - op->q = q; op->tile_mask = tile_mask; switch (op->base.op) { @@ -2333,6 +2165,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, return PTR_ERR(vma); op->map.vma = vma; + if (op->map.immediate || !xe_vm_in_fault_mode(vm)) + xe_vma_ops_incr_pt_update_ops(vops, + op->tile_mask); break; } case DRM_GPUVA_OP_REMAP: @@ -2377,6 +2212,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } @@ -2413,203 +2250,30 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; } case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_PREFETCH: - /* Nothing to do */ + /* FIXME: Need to skip some prefetch ops */ + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } - last_op = op; - err = xe_vma_op_commit(vm, op); if (err) return err; } - /* FIXME: Unhandled corner case */ - XE_WARN_ON(!last_op && last && !list_empty(&vops->list)); - - if (!last_op) - return 0; - - if (last) { - last_op->flags |= XE_VMA_OP_LAST; - last_op->num_syncs = num_syncs; - last_op->syncs = syncs; - } - return 0; } -static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence = NULL; - - lockdep_assert_held(&vm->lock); - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), - op->syncs, op->num_syncs, - op->tile_mask, - op->map.immediate || !xe_vm_in_fault_mode(vm), - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_REMAP: - { - bool prev = !!op->remap.prev; - bool next = !!op->remap.next; - - if (!op->remap.unmap_done) { - if (prev || next) - vma->gpuva.flags |= XE_VMA_FIRST_REBIND; - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST && - !prev && !next); - if (IS_ERR(fence)) - break; - op->remap.unmap_done = true; - } - - if (prev) { - op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.prev, op->q, - xe_vma_bo(op->remap.prev), op->syncs, - op->num_syncs, - op->remap.prev->tile_mask, true, - false, - op->flags & XE_VMA_OP_LAST && !next); - op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.prev = NULL; - } - - if (next) { - op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.next, op->q, - xe_vma_bo(op->remap.next), - op->syncs, op->num_syncs, - op->remap.next->tile_mask, true, - false, op->flags & XE_VMA_OP_LAST); - op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.next = NULL; - } - - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - if (IS_ERR(fence)) - trace_xe_vma_fail(vma); - - return fence; -} - -static struct dma_fence * -__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence; - int err; - -retry_userptr: - fence = op_execute(vm, vma, op); - if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) { - lockdep_assert_held_write(&vm->lock); - - if (op->base.op == DRM_GPUVA_OP_REMAP) { - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - } - - if (xe_vma_is_userptr(vma)) { - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - if (!err) - goto retry_userptr; - - fence = ERR_PTR(err); - trace_xe_vma_fail(vma); - } - } - - return fence; -} - -static struct dma_fence * -xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) -{ - struct dma_fence *fence = ERR_PTR(-ENOMEM); - - lockdep_assert_held(&vm->lock); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = __xe_vma_op_execute(vm, op->map.vma, op); - break; - case DRM_GPUVA_OP_REMAP: - { - struct xe_vma *vma; - - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - - fence = __xe_vma_op_execute(vm, vma, op); - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), - op); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = __xe_vma_op_execute(vm, - gpuva_to_vma(op->base.prefetch.va), - op); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - return fence; -} - static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) @@ -2792,26 +2456,157 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return err; } +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) + return -ENOSPC; +#endif + return 0; } +static void op_trace(struct xe_vma_op *op) +{ + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + trace_xe_vma_bind(op->map.vma); + break; + case DRM_GPUVA_OP_REMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va)); + if (op->remap.prev) + trace_xe_vma_bind(op->remap.prev); + if (op->remap.next) + trace_xe_vma_bind(op->remap.next); + break; + case DRM_GPUVA_OP_UNMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va)); + break; + default: + XE_WARN_ON("NOT POSSIBLE"); + } +} + +static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + + list_for_each_entry(op, &vops->list, link) + op_trace(op); +} + +static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) +{ + struct xe_exec_queue *q = vops->q; + struct xe_tile *tile; + int number_tiles = 0; + u8 id; + + for_each_tile(tile, vm->xe, id) { + if (vops->pt_update_ops[id].num_ops) + ++number_tiles; + + if (vops->pt_update_ops[id].q) + continue; + + if (q) { + vops->pt_update_ops[id].q = q; + if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) + q = list_next_entry(q, multi_gt_list); + } else { + vops->pt_update_ops[id].q = vm->q[id]; + } + } + + return number_tiles; +} + static struct dma_fence *ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops) { - struct xe_vma_op *op, *next; + struct xe_tile *tile; struct dma_fence *fence = NULL; + struct dma_fence **fences = NULL; + struct dma_fence_array *cf = NULL; + int number_tiles = 0, current_fence = 0, err; + u8 id; - list_for_each_entry_safe(op, next, &vops->list, link) { - dma_fence_put(fence); - fence = xe_vma_op_execute(vm, op); - if (IS_ERR(fence)) { - drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld", - op->base.op, PTR_ERR(fence)); - fence = ERR_PTR(-ENOSPC); - break; + number_tiles = vm_ops_setup_tile_args(vm, vops); + if (number_tiles == 0) + return ERR_PTR(-ENODATA); + + if (number_tiles > 1) { + fences = kmalloc_array(number_tiles, sizeof(*fences), + GFP_KERNEL); + if (!fences) { + fence = ERR_PTR(-ENOMEM); + goto err_trace; } } + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + err = xe_pt_update_ops_prepare(tile, vops); + if (err) { + fence = ERR_PTR(err); + goto err_out; + } + } + + trace_xe_vm_ops_execute(vops); + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + fence = xe_pt_update_ops_run(tile, vops); + if (IS_ERR(fence)) + goto err_out; + + if (fences) + fences[current_fence++] = fence; + } + + if (fences) { + cf = dma_fence_array_create(number_tiles, fences, + vm->composite_fence_ctx, + vm->composite_fence_seqno++, + false); + if (!cf) { + --vm->composite_fence_seqno; + fence = ERR_PTR(-ENOMEM); + goto err_out; + } + fence = &cf->base; + } + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_fini(tile, vops); + } + + return fence; + +err_out: + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_abort(tile, vops); + } + while (current_fence) + dma_fence_put(fences[--current_fence]); + kfree(fences); + kfree(cf); + +err_trace: + trace_xe_vm_ops_fail(vm); return fence; } @@ -2892,12 +2687,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, fence = ops_execute(vm, vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); - /* FIXME: Killing VM rather than proper error handling */ - xe_vm_kill(vm, false); goto unlock; - } else { - vm_bind_ioctl_ops_fini(vm, vops, fence); } + + vm_bind_ioctl_ops_fini(vm, vops, fence); } unlock: @@ -2905,11 +2698,18 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, return err; } -#define SUPPORTED_FLAGS \ +#define SUPPORTED_FLAGS_STUB \ (DRM_XE_VM_BIND_FLAG_READONLY | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \ DRM_XE_VM_BIND_FLAG_NULL | \ DRM_XE_VM_BIND_FLAG_DUMPABLE) + +#ifdef TEST_VM_OPS_ERROR +#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) +#else +#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB +#endif + #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) @@ -2935,7 +2735,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, sizeof(struct drm_xe_vm_bind_op), GFP_KERNEL | __GFP_ACCOUNT); if (!*bind_ops) - return -ENOMEM; + return args->num_binds > 1 ? -ENOBUFS : -ENOMEM; err = __copy_from_user(*bind_ops, bind_user, sizeof(struct drm_xe_vm_bind_op) * @@ -3074,7 +2874,16 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, return -EINVAL; } - if (bo->flags & XE_BO_FLAG_INTERNAL_64K) { + /* + * Some platforms require 64k VM_BIND alignment, + * specifically those with XE_VRAM_FLAGS_NEED64K. + * + * Other platforms may have BO's set to 64k physical placement, + * but can be mapped at 4k offsets anyway. This check is only + * there for the former case. + */ + if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) && + (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) { if (XE_IOCTL_DBG(xe, obj_offset & XE_64K_PAGE_MASK) || XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || @@ -3254,10 +3063,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } - err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, - &vops, i == args->num_binds - 1); + err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); if (err) goto unwind_ops; + +#ifdef TEST_VM_OPS_ERROR + if (flags & FORCE_OP_ERROR) { + vops.inject_error = true; + vm->xe->vm_inject_error_position = + (vm->xe->vm_inject_error_position + 1) % + FORCE_OP_ERROR_COUNT; + } +#endif } /* Nothing to do */ @@ -3266,11 +3083,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } + err = xe_vma_ops_alloc(&vops, args->num_binds > 1); + if (err) + goto unwind_ops; + err = vm_bind_ioctl_ops_execute(vm, &vops); unwind_ops: if (err && err != -ENODATA) vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); + xe_vma_ops_fini(&vops); for (i = args->num_binds - 1; i >= 0; --i) if (ops[i]) drm_gpuva_ops_free(&vm->gpuvm, ops[i]); diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index b481608b12f1bf..c864dba35e1d5c 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) return drm_gpuvm_resv(&vm->gpuvm); } +void xe_vm_kill(struct xe_vm *vm, bool unlocked); + /** * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held. * @vm: The vm diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index ce1a63a5e3e703..7f9a303e51d896 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -21,18 +21,27 @@ struct xe_bo; struct xe_sync_entry; struct xe_user_fence; struct xe_vm; +struct xe_vm_pgtable_update_op; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +#define TEST_VM_OPS_ERROR +#define FORCE_OP_ERROR BIT(31) + +#define FORCE_OP_ERROR_LOCK 0 +#define FORCE_OP_ERROR_PREPARE 1 +#define FORCE_OP_ERROR_RUN 2 +#define FORCE_OP_ERROR_COUNT 3 +#endif #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) -#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3) -#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4) -#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) -#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) -#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) -#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8) -#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9) -#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10) +#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3) +#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4) +#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5) +#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6) +#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7) +#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8) /** struct xe_userptr - User pointer */ struct xe_userptr { @@ -99,6 +108,9 @@ struct xe_vma { */ u8 tile_present; + /** @tile_staged: bind is staged for this VMA */ + u8 tile_staged; + /** * @pat_index: The pat index to use when encoding the PTEs for this vma. */ @@ -314,31 +326,18 @@ struct xe_vma_op_prefetch { /** enum xe_vma_op_flags - flags for VMA operation */ enum xe_vma_op_flags { - /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */ - XE_VMA_OP_FIRST = BIT(0), - /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */ - XE_VMA_OP_LAST = BIT(1), /** @XE_VMA_OP_COMMITTED: VMA operation committed */ - XE_VMA_OP_COMMITTED = BIT(2), + XE_VMA_OP_COMMITTED = BIT(0), /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */ - XE_VMA_OP_PREV_COMMITTED = BIT(3), + XE_VMA_OP_PREV_COMMITTED = BIT(1), /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */ - XE_VMA_OP_NEXT_COMMITTED = BIT(4), + XE_VMA_OP_NEXT_COMMITTED = BIT(2), }; /** struct xe_vma_op - VMA operation */ struct xe_vma_op { /** @base: GPUVA base operation */ struct drm_gpuva_op base; - /** @q: exec queue for this operation */ - struct xe_exec_queue *q; - /** - * @syncs: syncs for this operation, only used on first and last - * operation - */ - struct xe_sync_entry *syncs; - /** @num_syncs: number of syncs */ - u32 num_syncs; /** @link: async operation link */ struct list_head link; /** @flags: operation flags */ @@ -362,12 +361,18 @@ struct xe_vma_ops { struct list_head list; /** @vm: VM */ struct xe_vm *vm; - /** @q: exec queue these operations */ + /** @q: exec queue for VMA operations */ struct xe_exec_queue *q; /** @syncs: syncs these operation */ struct xe_sync_entry *syncs; /** @num_syncs: number of syncs */ u32 num_syncs; + /** @pt_update_ops: page table update operations */ + struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE]; +#ifdef TEST_VM_OPS_ERROR + /** @inject_error: inject error to test error handling */ + bool inject_error; +#endif }; #endif diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 5bcd59190353ea..80ba2fc78837ac 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -182,6 +182,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = offset_hi << 32; /* HW view bits 39:32 */ offset |= offset_lo << 6; /* HW view bits 31:6 */ offset *= num_enabled; /* convert to SW view */ + offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */ /* We don't expect any holes */ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size), diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index e648265d081be2..d424992514a4dc 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -733,6 +733,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_PARTIAL_AUTOSTRIP | DIS_AUTOSTRIP)) }, + { XE_RTP_NAME("15016589081"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) + }, {} }; @@ -759,6 +763,7 @@ void xe_wa_process_oob(struct xe_gt *gt) xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob, ARRAY_SIZE(oob_was)); + gt->wa_active.oob_initialized = true; xe_rtp_process(&ctx, oob_was); } diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h index db9ddeaf69bf34..52337405b5bc8c 100644 --- a/drivers/gpu/drm/xe/xe_wa.h +++ b/drivers/gpu/drm/xe/xe_wa.h @@ -6,6 +6,8 @@ #ifndef _XE_WA_ #define _XE_WA_ +#include "xe_assert.h" + struct drm_printer; struct xe_gt; struct xe_hw_engine; @@ -25,6 +27,9 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p); * @gt__: gt instance * @id__: XE_OOB_, as generated by build system in generated/xe_wa_oob.h */ -#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob) +#define XE_WA(gt__, id__) ({ \ + xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \ + test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \ +}) #endif diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 08f7336881e32d..920ca506014661 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -27,6 +27,13 @@ 16022287689 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) 13011645652 GRAPHICS_VERSION(2004) +14022293748 GRAPHICS_VERSION(2001) + GRAPHICS_VERSION(2004) +22019794406 GRAPHICS_VERSION(2001) + GRAPHICS_VERSION(2004) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) +22019338487_display PLATFORM(LUNARLAKE) 16023588340 GRAPHICS_VERSION(2001) +14019789679 GRAPHICS_VERSION(1255) + GRAPHICS_VERSION_RANGE(1270, 2004) diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index f69721339201d6..d46fa8374980cd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "xe_device.h" #include "xe_gt.h" diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f006bc931324fa..b62e4f0e8130fb 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -404,9 +404,10 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) if (err < 0) goto put_group; - host->domain = iommu_domain_alloc(&platform_bus_type); - if (!host->domain) { - err = -ENOMEM; + host->domain = iommu_paging_domain_alloc(host->dev); + if (IS_ERR(host->domain)) { + err = PTR_ERR(host->domain); + host->domain = NULL; goto put_cache; } diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 925a118db23f57..92031b240a1796 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -81,6 +82,7 @@ struct host1x_intr_ops { void (*disable_syncpt_intr)(struct host1x *host, unsigned int id); void (*disable_all_syncpt_intrs)(struct host1x *host); int (*free_syncpt_irq)(struct host1x *host); + irqreturn_t (*isr)(int irq, void *dev_id); }; struct host1x_sid_entry { diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index 9880e0c47235e1..415f8d7e42021b 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -6,18 +6,11 @@ * Copyright (c) 2010-2013, NVIDIA Corporation. */ -#include -#include #include #include "../intr.h" #include "../dev.h" -struct host1x_intr_irq_data { - struct host1x *host; - u32 offset; -}; - static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) { struct host1x_intr_irq_data *irq_data = dev_id; @@ -54,7 +47,8 @@ static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host) } } -static void intr_hw_init(struct host1x *host, u32 cpm) +static int +host1x_intr_init_host_sync(struct host1x *host, u32 cpm) { #if HOST1X_HW < 6 /* disable the ip_busy_timeout. this prevents write drops */ @@ -85,32 +79,6 @@ static void intr_hw_init(struct host1x *host, u32 cpm) host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id)); } #endif -} - -static int -host1x_intr_init_host_sync(struct host1x *host, u32 cpm) -{ - int err, i; - struct host1x_intr_irq_data *irq_data; - - irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL); - if (!irq_data) - return -ENOMEM; - - host1x_hw_intr_disable_all_syncpt_intrs(host); - - for (i = 0; i < host->num_syncpt_irqs; i++) { - irq_data[i].host = host; - irq_data[i].offset = i; - - err = devm_request_irq(host->dev, host->syncpt_irqs[i], - syncpt_thresh_isr, IRQF_SHARED, - "host1x_syncpt", &irq_data[i]); - if (err < 0) - return err; - } - - intr_hw_init(host, cpm); return 0; } @@ -144,4 +112,5 @@ static const struct host1x_intr_ops host1x_intr_ops = { .enable_syncpt_intr = host1x_intr_enable_syncpt_intr, .disable_syncpt_intr = host1x_intr_disable_syncpt_intr, .disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs, + .isr = syncpt_thresh_isr, }; diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 995bfa980837a4..b3285dd101804c 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -6,7 +6,7 @@ */ #include - +#include #include "dev.h" #include "fence.h" #include "intr.h" @@ -100,7 +100,9 @@ void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id) int host1x_intr_init(struct host1x *host) { + struct host1x_intr_irq_data *irq_data; unsigned int id; + int i, err; mutex_init(&host->intr_mutex); @@ -111,6 +113,23 @@ int host1x_intr_init(struct host1x *host) INIT_LIST_HEAD(&syncpt->fences.list); } + irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + host1x_hw_intr_disable_all_syncpt_intrs(host); + + for (i = 0; i < host->num_syncpt_irqs; i++) { + irq_data[i].host = host; + irq_data[i].offset = i; + + err = devm_request_irq(host->dev, host->syncpt_irqs[i], + host->intr_op->isr, IRQF_SHARED, + "host1x_syncpt", &irq_data[i]); + if (err < 0) + return err; + } + return 0; } diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h index 3b5610b525e586..11cdf13e32fe2a 100644 --- a/drivers/gpu/host1x/intr.h +++ b/drivers/gpu/host1x/intr.h @@ -11,6 +11,11 @@ struct host1x; struct host1x_syncpt_fence; +struct host1x_intr_irq_data { + struct host1x *host; + u32 offset; +}; + /* Initialize host1x sync point interrupt */ int host1x_intr_init(struct host1x *host); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 365e6ddbe90fe7..18f2c92beff8ed 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -926,8 +926,7 @@ static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv) /** * vga_switcheroo_process_delayed_switch() - helper for delayed switching * - * Process a delayed switch if one is pending. DRM drivers should call this - * from their ->lastclose callback. + * Process a delayed switch if one is pending. * * Return: 0 on success. -EINVAL if no delayed switch is pending, if the client * has unregistered in the meantime or if there are other clients blocking the diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig index ab81ceceb3371d..c3f056d28b01b9 100644 --- a/drivers/greybus/Kconfig +++ b/drivers/greybus/Kconfig @@ -21,6 +21,8 @@ config GREYBUS_BEAGLEPLAY tristate "Greybus BeaglePlay driver" depends on SERIAL_DEV_BUS select CRC_CCITT + select FW_LOADER + select FW_UPLOAD help Select this option if you have a BeaglePlay where CC1352 co-processor acts as Greybus SVC. diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c index 69e46b1dff1fd5..7630a36ecf8126 100644 --- a/drivers/greybus/es2.c +++ b/drivers/greybus/es2.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "arpc.h" #include "greybus_trace.h" diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c index 33f8fad70260a4..473ac3f2d38219 100644 --- a/drivers/greybus/gb-beagleplay.c +++ b/drivers/greybus/gb-beagleplay.c @@ -6,21 +6,19 @@ * Copyright (c) 2023 BeagleBoard.org Foundation */ -#include +#include +#include +#include +#include #include -#include -#include -#include #include -#include -#include -#include -#include -#include #include #include -#include -#include + +#define CC1352_FIRMWARE_SIZE (704 * 1024) +#define CC1352_BOOTLOADER_TIMEOUT 2000 +#define CC1352_BOOTLOADER_ACK 0xcc +#define CC1352_BOOTLOADER_NACK 0x33 #define RX_HDLC_PAYLOAD 256 #define CRC_LEN 2 @@ -57,6 +55,17 @@ * @rx_buffer_len: length of receive buffer filled. * @rx_buffer: hdlc frame receive buffer * @rx_in_esc: hdlc rx flag to indicate ESC frame + * + * @fwl: underlying firmware upload device + * @bootloader_backdoor_gpio: cc1352p7 boot gpio + * @rst_gpio: cc1352p7 reset gpio + * @flashing_mode: flag to indicate that flashing is currently in progress + * @fwl_ack_com: completion to signal an Ack/Nack + * @fwl_ack: Ack/Nack byte received + * @fwl_cmd_response_com: completion to signal a bootloader command response + * @fwl_cmd_response: bootloader command response data + * @fwl_crc32: crc32 of firmware to flash + * @fwl_reset_addr: flag to indicate if we need to send COMMAND_DOWNLOAD again */ struct gb_beagleplay { struct serdev_device *sd; @@ -72,6 +81,17 @@ struct gb_beagleplay { u16 rx_buffer_len; bool rx_in_esc; u8 rx_buffer[MAX_RX_HDLC]; + + struct fw_upload *fwl; + struct gpio_desc *bootloader_backdoor_gpio; + struct gpio_desc *rst_gpio; + bool flashing_mode; + struct completion fwl_ack_com; + u8 fwl_ack; + struct completion fwl_cmd_response_com; + u32 fwl_cmd_response; + u32 fwl_crc32; + bool fwl_reset_addr; }; /** @@ -100,6 +120,87 @@ struct hdlc_greybus_frame { u8 payload[]; } __packed; +/** + * enum cc1352_bootloader_cmd: CC1352 Bootloader Commands + * + * @COMMAND_DOWNLOAD: Prepares flash programming + * @COMMAND_GET_STATUS: Returns the status of the last command that was issued + * @COMMAND_SEND_DATA: Transfers data and programs flash + * @COMMAND_RESET: Performs a system reset + * @COMMAND_CRC32: Calculates CRC32 over a specified memory area + * @COMMAND_BANK_ERASE: Performs an erase of all of the customer-accessible + * flash sectors not protected by FCFG1 and CCFG + * writeprotect bits. + * + * CC1352 Bootloader serial bus commands + */ +enum cc1352_bootloader_cmd { + COMMAND_DOWNLOAD = 0x21, + COMMAND_GET_STATUS = 0x23, + COMMAND_SEND_DATA = 0x24, + COMMAND_RESET = 0x25, + COMMAND_CRC32 = 0x27, + COMMAND_BANK_ERASE = 0x2c, +}; + +/** + * enum cc1352_bootloader_status: CC1352 Bootloader COMMAND_GET_STATUS response + * + * @COMMAND_RET_SUCCESS: Status for successful command + * @COMMAND_RET_UNKNOWN_CMD: Status for unknown command + * @COMMAND_RET_INVALID_CMD: Status for invalid command (in other words, + * incorrect packet size) + * @COMMAND_RET_INVALID_ADR: Status for invalid input address + * @COMMAND_RET_FLASH_FAIL: Status for failing flash erase or program operation + */ +enum cc1352_bootloader_status { + COMMAND_RET_SUCCESS = 0x40, + COMMAND_RET_UNKNOWN_CMD = 0x41, + COMMAND_RET_INVALID_CMD = 0x42, + COMMAND_RET_INVALID_ADR = 0x43, + COMMAND_RET_FLASH_FAIL = 0x44, +}; + +/** + * struct cc1352_bootloader_packet: CC1352 Bootloader Request Packet + * + * @len: length of packet + optional request data + * @checksum: 8-bit checksum excluding len + * @cmd: bootloader command + */ +struct cc1352_bootloader_packet { + u8 len; + u8 checksum; + u8 cmd; +} __packed; + +#define CC1352_BOOTLOADER_PKT_MAX_SIZE \ + (U8_MAX - sizeof(struct cc1352_bootloader_packet)) + +/** + * struct cc1352_bootloader_download_cmd_data: CC1352 Bootloader COMMAND_DOWNLOAD request data + * + * @addr: address to start programming data into + * @size: size of data that will be sent + */ +struct cc1352_bootloader_download_cmd_data { + __be32 addr; + __be32 size; +} __packed; + +/** + * struct cc1352_bootloader_crc32_cmd_data: CC1352 Bootloader COMMAND_CRC32 request data + * + * @addr: address where crc32 calculation starts + * @size: number of bytes comprised by crc32 calculation + * @read_repeat: number of read repeats for each data location + */ +struct cc1352_bootloader_crc32_cmd_data { + __be32 addr; + __be32 size; + __be32 read_repeat; +} __packed; + static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len) { struct hdlc_greybus_frame *gb_frame = (struct hdlc_greybus_frame *)buf; @@ -331,11 +432,135 @@ static void hdlc_deinit(struct gb_beagleplay *bg) flush_work(&bg->tx_work); } +/** + * csum8: Calculate 8-bit checksum on data + * + * @data: bytes to calculate 8-bit checksum of + * @size: number of bytes + * @base: starting value for checksum + */ +static u8 csum8(const u8 *data, size_t size, u8 base) +{ + size_t i; + u8 sum = base; + + for (i = 0; i < size; ++i) + sum += data[i]; + + return sum; +} + +static void cc1352_bootloader_send_ack(struct gb_beagleplay *bg) +{ + static const u8 ack[] = { 0x00, CC1352_BOOTLOADER_ACK }; + + serdev_device_write_buf(bg->sd, ack, sizeof(ack)); +} + +static void cc1352_bootloader_send_nack(struct gb_beagleplay *bg) +{ + static const u8 nack[] = { 0x00, CC1352_BOOTLOADER_NACK }; + + serdev_device_write_buf(bg->sd, nack, sizeof(nack)); +} + +/** + * cc1352_bootloader_pkt_rx: Process a CC1352 Bootloader Packet + * + * @bg: beagleplay greybus driver + * @data: packet buffer + * @count: packet buffer size + * + * @return: number of bytes processed + * + * Here are the steps to successfully receive a packet from cc1352 bootloader + * according to the docs: + * 1. Wait for nonzero data to be returned from the device. This is important + * as the device may send zero bytes between a sent and a received data + * packet. The first nonzero byte received is the size of the packet that is + * being received. + * 2. Read the next byte, which is the checksum for the packet. + * 3. Read the data bytes from the device. During the data phase, packet size + * minus 2 bytes is sent. + * 4. Calculate the checksum of the data bytes and verify it matches the + * checksum received in the packet. + * 5. Send an acknowledge byte or a not-acknowledge byte to the device to + * indicate the successful or unsuccessful reception of the packet. + */ +static int cc1352_bootloader_pkt_rx(struct gb_beagleplay *bg, const u8 *data, + size_t count) +{ + bool is_valid = false; + + switch (data[0]) { + /* Skip 0x00 bytes. */ + case 0x00: + return 1; + case CC1352_BOOTLOADER_ACK: + case CC1352_BOOTLOADER_NACK: + WRITE_ONCE(bg->fwl_ack, data[0]); + complete(&bg->fwl_ack_com); + return 1; + case 3: + if (count < 3) + return 0; + is_valid = data[1] == data[2]; + WRITE_ONCE(bg->fwl_cmd_response, (u32)data[2]); + break; + case 6: + if (count < 6) + return 0; + is_valid = csum8(&data[2], sizeof(__be32), 0) == data[1]; + WRITE_ONCE(bg->fwl_cmd_response, get_unaligned_be32(&data[2])); + break; + default: + return -EINVAL; + } + + if (is_valid) { + cc1352_bootloader_send_ack(bg); + complete(&bg->fwl_cmd_response_com); + } else { + dev_warn(&bg->sd->dev, + "Dropping bootloader packet with invalid checksum"); + cc1352_bootloader_send_nack(bg); + } + + return data[0]; +} + +static size_t cc1352_bootloader_rx(struct gb_beagleplay *bg, const u8 *data, + size_t count) +{ + int ret; + size_t off = 0; + + memcpy(bg->rx_buffer + bg->rx_buffer_len, data, count); + bg->rx_buffer_len += count; + + do { + ret = cc1352_bootloader_pkt_rx(bg, bg->rx_buffer + off, + bg->rx_buffer_len - off); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, ret, + "Invalid Packet"); + off += ret; + } while (ret > 0 && off < count); + + bg->rx_buffer_len -= off; + memmove(bg->rx_buffer, bg->rx_buffer + off, bg->rx_buffer_len); + + return count; +} + static size_t gb_tty_receive(struct serdev_device *sd, const u8 *data, size_t count) { struct gb_beagleplay *bg = serdev_device_get_drvdata(sd); + if (READ_ONCE(bg->flashing_mode)) + return cc1352_bootloader_rx(bg, data, count); + return hdlc_rx(bg, data, count); } @@ -343,7 +568,8 @@ static void gb_tty_wakeup(struct serdev_device *serdev) { struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev); - schedule_work(&bg->tx_work); + if (!READ_ONCE(bg->flashing_mode)) + schedule_work(&bg->tx_work); } static struct serdev_device_ops gb_beagleplay_ops = { @@ -412,6 +638,195 @@ static void gb_beagleplay_stop_svc(struct gb_beagleplay *bg) hdlc_tx_frames(bg, ADDRESS_CONTROL, 0x03, &payload, 1); } +static int cc1352_bootloader_wait_for_ack(struct gb_beagleplay *bg) +{ + int ret; + + ret = wait_for_completion_timeout( + &bg->fwl_ack_com, msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT)); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, ret, + "Failed to acquire ack semaphore"); + + switch (READ_ONCE(bg->fwl_ack)) { + case CC1352_BOOTLOADER_ACK: + return 0; + case CC1352_BOOTLOADER_NACK: + return -EAGAIN; + default: + return -EINVAL; + } +} + +static int cc1352_bootloader_sync(struct gb_beagleplay *bg) +{ + static const u8 sync_bytes[] = { 0x55, 0x55 }; + + serdev_device_write_buf(bg->sd, sync_bytes, sizeof(sync_bytes)); + return cc1352_bootloader_wait_for_ack(bg); +} + +static int cc1352_bootloader_get_status(struct gb_beagleplay *bg) +{ + int ret; + static const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt), + .checksum = COMMAND_GET_STATUS, + .cmd = COMMAND_GET_STATUS + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + ret = cc1352_bootloader_wait_for_ack(bg); + if (ret < 0) + return ret; + + ret = wait_for_completion_timeout( + &bg->fwl_cmd_response_com, + msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT)); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, ret, + "Failed to acquire last status semaphore"); + + switch (READ_ONCE(bg->fwl_cmd_response)) { + case COMMAND_RET_SUCCESS: + return 0; + default: + return -EINVAL; + } + + return 0; +} + +static int cc1352_bootloader_erase(struct gb_beagleplay *bg) +{ + int ret; + static const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt), + .checksum = COMMAND_BANK_ERASE, + .cmd = COMMAND_BANK_ERASE + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + + ret = cc1352_bootloader_wait_for_ack(bg); + if (ret < 0) + return ret; + + return cc1352_bootloader_get_status(bg); +} + +static int cc1352_bootloader_reset(struct gb_beagleplay *bg) +{ + static const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt), + .checksum = COMMAND_RESET, + .cmd = COMMAND_RESET + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + + return cc1352_bootloader_wait_for_ack(bg); +} + +/** + * cc1352_bootloader_empty_pkt: Calculate the number of empty bytes in the current packet + * + * @data: packet bytes array to check + * @size: number of bytes in array + */ +static size_t cc1352_bootloader_empty_pkt(const u8 *data, size_t size) +{ + size_t i; + + for (i = 0; i < size && data[i] == 0xff; ++i) + continue; + + return i; +} + +static int cc1352_bootloader_crc32(struct gb_beagleplay *bg, u32 *crc32) +{ + int ret; + static const struct cc1352_bootloader_crc32_cmd_data cmd_data = { + .addr = 0, .size = cpu_to_be32(704 * 1024), .read_repeat = 0 + }; + const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt) + sizeof(cmd_data), + .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data), + COMMAND_CRC32), + .cmd = COMMAND_CRC32 + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data, + sizeof(cmd_data)); + + ret = cc1352_bootloader_wait_for_ack(bg); + if (ret < 0) + return ret; + + ret = wait_for_completion_timeout( + &bg->fwl_cmd_response_com, + msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT)); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, ret, + "Failed to acquire last status semaphore"); + + *crc32 = READ_ONCE(bg->fwl_cmd_response); + + return 0; +} + +static int cc1352_bootloader_download(struct gb_beagleplay *bg, u32 size, + u32 addr) +{ + int ret; + const struct cc1352_bootloader_download_cmd_data cmd_data = { + .addr = cpu_to_be32(addr), + .size = cpu_to_be32(size), + }; + const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt) + sizeof(cmd_data), + .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data), + COMMAND_DOWNLOAD), + .cmd = COMMAND_DOWNLOAD + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data, + sizeof(cmd_data)); + + ret = cc1352_bootloader_wait_for_ack(bg); + if (ret < 0) + return ret; + + return cc1352_bootloader_get_status(bg); +} + +static int cc1352_bootloader_send_data(struct gb_beagleplay *bg, const u8 *data, + size_t size) +{ + int ret, rem = min(size, CC1352_BOOTLOADER_PKT_MAX_SIZE); + const struct cc1352_bootloader_packet pkt = { + .len = sizeof(pkt) + rem, + .checksum = csum8(data, rem, COMMAND_SEND_DATA), + .cmd = COMMAND_SEND_DATA + }; + + serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt)); + serdev_device_write_buf(bg->sd, data, rem); + + ret = cc1352_bootloader_wait_for_ack(bg); + if (ret < 0) + return ret; + + ret = cc1352_bootloader_get_status(bg); + if (ret < 0) + return ret; + + return rem; +} + static void gb_greybus_deinit(struct gb_beagleplay *bg) { gb_hd_del(bg->gb_hd); @@ -442,6 +857,157 @@ static int gb_greybus_init(struct gb_beagleplay *bg) return ret; } +static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload, + const u8 *data, u32 size) +{ + int ret; + u32 curr_crc32; + struct gb_beagleplay *bg = fw_upload->dd_handle; + + dev_info(&bg->sd->dev, "CC1352 Start Flashing..."); + + if (size != CC1352_FIRMWARE_SIZE) + return FW_UPLOAD_ERR_INVALID_SIZE; + + /* Might involve network calls */ + gb_greybus_deinit(bg); + msleep(5 * MSEC_PER_SEC); + + gb_beagleplay_stop_svc(bg); + msleep(200); + flush_work(&bg->tx_work); + + serdev_device_wait_until_sent(bg->sd, CC1352_BOOTLOADER_TIMEOUT); + + WRITE_ONCE(bg->flashing_mode, true); + + gpiod_direction_output(bg->bootloader_backdoor_gpio, 0); + gpiod_direction_output(bg->rst_gpio, 0); + msleep(200); + + gpiod_set_value(bg->rst_gpio, 1); + msleep(200); + + gpiod_set_value(bg->bootloader_backdoor_gpio, 1); + msleep(200); + + gpiod_direction_input(bg->bootloader_backdoor_gpio); + gpiod_direction_input(bg->rst_gpio); + + ret = cc1352_bootloader_sync(bg); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to sync"); + + ret = cc1352_bootloader_crc32(bg, &curr_crc32); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to fetch crc32"); + + bg->fwl_crc32 = crc32(0xffffffff, data, size) ^ 0xffffffff; + + /* Check if attempting to reflash same firmware */ + if (bg->fwl_crc32 == curr_crc32) { + dev_warn(&bg->sd->dev, "Skipping reflashing same image"); + cc1352_bootloader_reset(bg); + WRITE_ONCE(bg->flashing_mode, false); + msleep(200); + gb_greybus_init(bg); + gb_beagleplay_start_svc(bg); + return FW_UPLOAD_ERR_FW_INVALID; + } + + ret = cc1352_bootloader_erase(bg); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to erase"); + + bg->fwl_reset_addr = true; + + return FW_UPLOAD_ERR_NONE; +} + +static void cc1352_cleanup(struct fw_upload *fw_upload) +{ + struct gb_beagleplay *bg = fw_upload->dd_handle; + + WRITE_ONCE(bg->flashing_mode, false); +} + +static enum fw_upload_err cc1352_write(struct fw_upload *fw_upload, + const u8 *data, u32 offset, u32 size, + u32 *written) +{ + int ret; + size_t empty_bytes; + struct gb_beagleplay *bg = fw_upload->dd_handle; + + /* Skip 0xff packets. Significant performance improvement */ + empty_bytes = cc1352_bootloader_empty_pkt(data + offset, size); + if (empty_bytes >= CC1352_BOOTLOADER_PKT_MAX_SIZE) { + bg->fwl_reset_addr = true; + *written = empty_bytes; + return FW_UPLOAD_ERR_NONE; + } + + if (bg->fwl_reset_addr) { + ret = cc1352_bootloader_download(bg, size, offset); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, + FW_UPLOAD_ERR_HW_ERROR, + "Failed to send download cmd"); + + bg->fwl_reset_addr = false; + } + + ret = cc1352_bootloader_send_data(bg, data + offset, size); + if (ret < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to flash firmware"); + *written = ret; + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err cc1352_poll_complete(struct fw_upload *fw_upload) +{ + u32 curr_crc32; + struct gb_beagleplay *bg = fw_upload->dd_handle; + + if (cc1352_bootloader_crc32(bg, &curr_crc32) < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to fetch crc32"); + + if (bg->fwl_crc32 != curr_crc32) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_FW_INVALID, + "Invalid CRC32"); + + if (cc1352_bootloader_reset(bg) < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR, + "Failed to reset"); + + dev_info(&bg->sd->dev, "CC1352 Flashing Successful"); + WRITE_ONCE(bg->flashing_mode, false); + msleep(200); + + if (gb_greybus_init(bg) < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR, + "Failed to initialize greybus"); + + gb_beagleplay_start_svc(bg); + + return FW_UPLOAD_ERR_NONE; +} + +static void cc1352_cancel(struct fw_upload *fw_upload) +{ + struct gb_beagleplay *bg = fw_upload->dd_handle; + + dev_info(&bg->sd->dev, "CC1352 Bootloader Cancel"); + + cc1352_bootloader_reset(bg); +} + static void gb_serdev_deinit(struct gb_beagleplay *bg) { serdev_device_close(bg->sd); @@ -463,6 +1029,65 @@ static int gb_serdev_init(struct gb_beagleplay *bg) return 0; } +static const struct fw_upload_ops cc1352_bootloader_ops = { + .prepare = cc1352_prepare, + .write = cc1352_write, + .poll_complete = cc1352_poll_complete, + .cancel = cc1352_cancel, + .cleanup = cc1352_cleanup +}; + +static int gb_fw_init(struct gb_beagleplay *bg) +{ + int ret; + struct fw_upload *fwl; + struct gpio_desc *desc; + + bg->fwl = NULL; + bg->bootloader_backdoor_gpio = NULL; + bg->rst_gpio = NULL; + bg->flashing_mode = false; + bg->fwl_cmd_response = 0; + bg->fwl_ack = 0; + init_completion(&bg->fwl_ack_com); + init_completion(&bg->fwl_cmd_response_com); + + desc = devm_gpiod_get(&bg->sd->dev, "bootloader-backdoor", GPIOD_IN); + if (IS_ERR(desc)) + return PTR_ERR(desc); + bg->bootloader_backdoor_gpio = desc; + + desc = devm_gpiod_get(&bg->sd->dev, "reset", GPIOD_IN); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); + goto free_boot; + } + bg->rst_gpio = desc; + + fwl = firmware_upload_register(THIS_MODULE, &bg->sd->dev, "cc1352p7", + &cc1352_bootloader_ops, bg); + if (IS_ERR(fwl)) { + ret = PTR_ERR(fwl); + goto free_reset; + } + bg->fwl = fwl; + + return 0; + +free_reset: + devm_gpiod_put(&bg->sd->dev, bg->rst_gpio); + bg->rst_gpio = NULL; +free_boot: + devm_gpiod_put(&bg->sd->dev, bg->bootloader_backdoor_gpio); + bg->bootloader_backdoor_gpio = NULL; + return ret; +} + +static void gb_fw_deinit(struct gb_beagleplay *bg) +{ + firmware_upload_unregister(bg->fwl); +} + static int gb_beagleplay_probe(struct serdev_device *serdev) { int ret = 0; @@ -481,14 +1106,20 @@ static int gb_beagleplay_probe(struct serdev_device *serdev) if (ret) goto free_serdev; - ret = gb_greybus_init(bg); + ret = gb_fw_init(bg); if (ret) goto free_hdlc; + ret = gb_greybus_init(bg); + if (ret) + goto free_fw; + gb_beagleplay_start_svc(bg); return 0; +free_fw: + gb_fw_deinit(bg); free_hdlc: hdlc_deinit(bg); free_serdev: @@ -500,6 +1131,7 @@ static void gb_beagleplay_remove(struct serdev_device *serdev) { struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev); + gb_fw_deinit(bg); gb_greybus_deinit(bg); gb_beagleplay_stop_svc(bg); hdlc_deinit(bg); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 08446c89eff6e4..f8a56d6312425a 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -404,6 +404,12 @@ config HID_VIVALDI_COMMON option so that drivers can use common code to parse the HID descriptors for vivaldi function row keymap. +config HID_GOODIX_SPI + tristate "Goodix GT7986U SPI HID touchscreen" + depends on SPI_MASTER + help + Support for Goodix GT7986U SPI HID touchscreen device. + config HID_GOOGLE_HAMMER tristate "Google Hammer Keyboard" select HID_VIVALDI_COMMON diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index e40f1ddebbb713..496dab54c73a82 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o obj-$(CONFIG_HID_GFRM) += hid-gfrm.o obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o +obj-$(CONFIG_HID_GOODIX_SPI) += hid-goodix-spi.o obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o obj-$(CONFIG_HID_GOOGLE_STADIA_FF) += hid-google-stadiaff.o obj-$(CONFIG_HID_VIVALDI) += hid-vivaldi.o diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h index 97296f587bc727..1c91be8daeddf3 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h @@ -73,8 +73,6 @@ struct amdtp_hid_data { }; /* Interface functions between HID LL driver and AMD SFH client */ -void hid_amdtp_set_feature(struct hid_device *hid, char *buf, u32 len, int report_id); -void hid_amdtp_get_report(struct hid_device *hid, int report_id, int report_type); int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data); void amdtp_hid_remove(struct amdtp_cl_data *cli_data); int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type); diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c index 621793d92464bb..db36d87d563417 100644 --- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c @@ -299,8 +299,8 @@ static void amd_sfh_set_ops(struct amd_mp2_dev *mp2) sfh_interface_init(mp2); mp2_ops = mp2->mp2_ops; - mp2_ops->clear_intr = amd_sfh_clear_intr_v2, - mp2_ops->init_intr = amd_sfh_irq_init_v2, + mp2_ops->clear_intr = amd_sfh_clear_intr_v2; + mp2_ops->init_intr = amd_sfh_irq_init_v2; mp2_ops->suspend = amd_sfh_suspend; mp2_ops->resume = amd_sfh_resume; mp2_ops->remove = amd_mp2_pci_remove; diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index a272a086c95081..8420c227e21b3a 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -148,7 +148,7 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev, } EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report); -u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) +u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size) { int ret; struct hid_bpf_ctx_kern ctx_kern = { @@ -179,9 +179,7 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s *size = ret; } - rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL); - - return rdesc; + return krealloc(ctx_kern.data, *size, GFP_KERNEL); ignore_bpf: kfree(ctx_kern.data); diff --git a/drivers/hid/bpf/hid_bpf_struct_ops.c b/drivers/hid/bpf/hid_bpf_struct_ops.c index cd696c59ba0f4f..702c22fae136aa 100644 --- a/drivers/hid/bpf/hid_bpf_struct_ops.c +++ b/drivers/hid/bpf/hid_bpf_struct_ops.c @@ -276,9 +276,23 @@ static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx) return 0; } +static int __hid_bpf_hw_request(struct hid_bpf_ctx *ctx, unsigned char reportnum, + enum hid_report_type rtype, enum hid_class_request reqtype, + u64 source) +{ + return 0; +} + +static int __hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, u64 source) +{ + return 0; +} + static struct hid_bpf_ops __bpf_hid_bpf_ops = { .hid_device_event = __hid_bpf_device_event, .hid_rdesc_fixup = __hid_bpf_rdesc_fixup, + .hid_hw_request = __hid_bpf_hw_request, + .hid_hw_output_report = __hid_bpf_hw_output_report, }; static struct bpf_struct_ops bpf_hid_bpf_ops = { diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 669d769ea1dcd1..ba00f6e6324b09 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "hid-ids.h" /* ALPS Device Product ID */ diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index af5cf94f9dea3a..7e1ae2a2bcc247 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -620,7 +620,7 @@ static void apple_battery_timer_tick(struct timer_list *t) * MacBook JIS keyboard has wrong logical maximum * Magic Keyboard JIS has wrong logical maximum */ -static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct apple_sc *asc = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index a282388b7aa5c1..a4b47319ad8ead 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -492,12 +492,19 @@ static void asus_kbd_backlight_work(struct work_struct *work) */ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev) { + struct asus_drvdata *drvdata = hid_get_drvdata(hdev); u32 value; int ret; if (!IS_ENABLED(CONFIG_ASUS_WMI)) return false; + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && + dmi_check_system(asus_use_hid_led_dmi_ids)) { + hid_info(hdev, "using HID for asus::kbd_backlight\n"); + return false; + } + ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value); hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value); @@ -1119,7 +1126,7 @@ static const __u8 asus_g752_fixed_rdesc[] = { 0x2A, 0xFF, 0x00, /* Usage Maximum (0xFF) */ }; -static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c index cf1a562d8523a1..896304148a8708 100644 --- a/drivers/hid/hid-aureal.c +++ b/drivers/hid/hid-aureal.c @@ -18,7 +18,7 @@ #include "hid-ids.h" -static __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index be17af3d9c0c27..9f05465358d9f1 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -99,7 +99,7 @@ * - map previously unused analog trigger data to Z/RZ * - simplify feature and output descriptor */ -static __u8 pid0902_rdesc_fixed[] = { +static const __u8 pid0902_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x05, /* Usage (Game Pad) */ 0xA1, 0x01, /* Collection (Application) */ @@ -464,12 +464,12 @@ static int bigben_probe(struct hid_device *hid, return error; } -static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, +static const __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, unsigned int *rsize) { if (*rsize == PID0902_RDESC_ORIG_SIZE) { - rdesc = pid0902_rdesc_fixed; *rsize = sizeof(pid0902_rdesc_fixed); + return pid0902_rdesc_fixed; } else hid_warn(hid, "unexpected rdesc, please submit for review\n"); return rdesc; diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c index 549c73b05b8d67..a504632febfcf1 100644 --- a/drivers/hid/hid-cherry.c +++ b/drivers/hid/hid-cherry.c @@ -22,7 +22,7 @@ * Cherry Cymotion keyboard have an invalid HID report descriptor, * that needs fixing before we can parse it. */ -static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index 99954c6b3242d0..5776ec2e7159a3 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -88,8 +88,8 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, return 1; } -static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *ch_switch12_report_fixup(struct hid_device *hdev, + __u8 *rdesc, unsigned int *rsize) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); diff --git a/drivers/hid/hid-cmedia.c b/drivers/hid/hid-cmedia.c index cab42047bc99be..528d7f36121576 100644 --- a/drivers/hid/hid-cmedia.c +++ b/drivers/hid/hid-cmedia.c @@ -26,7 +26,7 @@ MODULE_LICENSE("GPL"); /* Fixed report descriptor of HS-100B audio chip * Bit 4 is an abolute Microphone mute usage instead of being unassigned. */ -static __u8 hs100b_rdesc_fixed[] = { +static const __u8 hs100b_rdesc_fixed[] = { 0x05, 0x0C, /* Usage Page (Consumer), */ 0x09, 0x01, /* Usage (Consumer Control), */ 0xA1, 0x01, /* Collection (Application), */ @@ -199,13 +199,13 @@ static struct hid_driver cmhid_driver = { .input_mapping = cmhid_input_mapping, }; -static __u8 *cmhid_hs100b_report_fixup(struct hid_device *hid, __u8 *rdesc, +static const __u8 *cmhid_hs100b_report_fixup(struct hid_device *hid, __u8 *rdesc, unsigned int *rsize) { if (*rsize == HS100B_RDESC_ORIG_SIZE) { hid_info(hid, "Fixing CMedia HS-100B report descriptor\n"); - rdesc = hs100b_rdesc_fixed; *rsize = sizeof(hs100b_rdesc_fixed); + return hs100b_rdesc_fixed; } return rdesc; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 988d0acbdf04dd..612ee6ddfc8db0 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -723,7 +723,7 @@ static void hid_device_release(struct device *dev) * items, though they are not used yet. */ -static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) +static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item) { u8 b; @@ -880,8 +880,8 @@ static int hid_scan_report(struct hid_device *hid) { struct hid_parser *parser; struct hid_item item; - __u8 *start = hid->dev_rdesc; - __u8 *end = start + hid->dev_rsize; + const __u8 *start = hid->dev_rdesc; + const __u8 *end = start + hid->dev_rsize; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { hid_scan_main, @@ -946,7 +946,7 @@ static int hid_scan_report(struct hid_device *hid) * Allocate the device report as read by the bus driver. This function should * only be called from parse() in ll drivers. */ -int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) +int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size) { hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); if (!hid->dev_rdesc) @@ -1204,10 +1204,10 @@ int hid_open_report(struct hid_device *device) struct hid_parser *parser; struct hid_item item; unsigned int size; - __u8 *start; + const __u8 *start; __u8 *buf; - __u8 *end; - __u8 *next; + const __u8 *end; + const __u8 *next; int ret; int i; static int (*dispatch_type[])(struct hid_parser *parser, @@ -1912,6 +1912,31 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) } EXPORT_SYMBOL_GPL(hid_set_field); +struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, + unsigned int application, unsigned int usage) +{ + struct list_head *report_list = &hdev->report_enum[report_type].report_list; + struct hid_report *report; + int i, j; + + list_for_each_entry(report, report_list, list) { + if (report->application != application) + continue; + + for (i = 0; i < report->maxfield; i++) { + struct hid_field *field = report->field[i]; + + for (j = 0; j < field->maxusage; j++) { + if (field->usage[j].hid == usage) + return field; + } + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(hid_find_field); + static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, const u8 *data) { diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 702f50e9841de7..62b99f5c3cf847 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -690,8 +690,8 @@ static int corsair_input_mapping(struct hid_device *dev, * - USB ID 1b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse */ -static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, + __u8 *rdesc, unsigned int *rsize) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c index 0fa785f52707ac..5596dd94032230 100644 --- a/drivers/hid/hid-cougar.c +++ b/drivers/hid/hid-cougar.c @@ -103,8 +103,8 @@ static void cougar_fix_g6_mapping(void) /* * Constant-friendly rdesc fixup for mouse interface */ -static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 && (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) { diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 20a0d1315d90fa..dae2b84a149088 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1094,7 +1094,6 @@ static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); - struct irq_data *d; u8 gpio_mask; u32 irq_type; int irq, virq, ret; @@ -1111,12 +1110,10 @@ static void cp2112_gpio_poll_callback(struct work_struct *work) if (!irq) continue; - d = irq_get_irq_data(irq); - if (!d) + irq_type = irq_get_trigger_type(irq); + if (!irq_type) continue; - irq_type = irqd_get_trigger_type(d); - if (gpio_mask & BIT(virq)) { /* Level High */ diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index b952b235e70ab2..98548201feec07 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -67,7 +67,7 @@ static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } -static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index c88224a96e9e06..84e1e90a266b21 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -199,7 +199,7 @@ static inline int drff_init(struct hid_device *hid) #define PID0011_RDESC_ORIG_SIZE 101 /* Fixed report descriptor for PID 0x011 joystick */ -static __u8 pid0011_rdesc_fixed[] = { +static const __u8 pid0011_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -228,14 +228,14 @@ static __u8 pid0011_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { switch (hdev->product) { case 0x0011: if (*rsize == PID0011_RDESC_ORIG_SIZE) { - rdesc = pid0011_rdesc_fixed; *rsize = sizeof(pid0011_rdesc_fixed); + return pid0011_rdesc_fixed; } break; } diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index 5973a3bab29f89..defcf91fdd14b3 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c @@ -53,7 +53,7 @@ static void mouse_button_fixup(struct hid_device *hdev, rdesc[padding_bit + 1] = MOUSE_BUTTONS_MAX - nbuttons; } -static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { diff --git a/drivers/hid/hid-gembird.c b/drivers/hid/hid-gembird.c index c42593fe71166c..20a8de766e5609 100644 --- a/drivers/hid/hid-gembird.c +++ b/drivers/hid/hid-gembird.c @@ -57,7 +57,7 @@ static const __u8 gembird_jpd_fixed_rdesc[] = { 0x81, 0x02, /* Input (Data,Var,Abs) */ }; -static __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { __u8 *new_rdesc; diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c index f9db991d3c5a2c..d2439399fb357a 100644 --- a/drivers/hid/hid-generic.c +++ b/drivers/hid/hid-generic.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c index 281b3a7187cec2..5bbd8124805322 100644 --- a/drivers/hid/hid-glorious.c +++ b/drivers/hid/hid-glorious.c @@ -26,7 +26,7 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice"); * keyboard HID report, causing keycodes to be misinterpreted. * Fix this by setting Usage Minimum to 0 in that report. */ -static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 213 && diff --git a/drivers/hid/hid-goodix-spi.c b/drivers/hid/hid-goodix-spi.c new file mode 100644 index 00000000000000..0f87bf9c67cfb6 --- /dev/null +++ b/drivers/hid/hid-goodix-spi.c @@ -0,0 +1,809 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Goodix GT7986U SPI Driver Code for HID. + * + * Copyright (C) 2024 Godix, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GOODIX_DEV_CONFIRM_ADDR 0x10000 +#define GOODIX_HID_DESC_ADDR 0x1058C +#define GOODIX_HID_REPORT_DESC_ADDR 0x105AA +#define GOODIX_HID_SIGN_ADDR 0x10D32 + +#define GOODIX_HID_GET_REPORT_CMD 0x02 +#define GOODIX_HID_SET_REPORT_CMD 0x03 + +#define GOODIX_HID_MAX_INBUF_SIZE 128 +#define GOODIX_HID_ACK_READY_FLAG 0x01 +#define GOODIX_HID_REPORT_READY_FLAG 0x80 + +#define GOODIX_DEV_CONFIRM_VAL 0xAA + +#define GOODIX_SPI_WRITE_FLAG 0xF0 +#define GOODIX_SPI_READ_FLAG 0xF1 +#define GOODIX_SPI_TRANS_PREFIX_LEN 1 +#define GOODIX_REGISTER_WIDTH 4 +#define GOODIX_SPI_READ_DUMMY_LEN 3 +#define GOODIX_SPI_READ_PREFIX_LEN (GOODIX_SPI_TRANS_PREFIX_LEN + \ + GOODIX_REGISTER_WIDTH + \ + GOODIX_SPI_READ_DUMMY_LEN) +#define GOODIX_SPI_WRITE_PREFIX_LEN (GOODIX_SPI_TRANS_PREFIX_LEN + \ + GOODIX_REGISTER_WIDTH) + +#define GOODIX_CHECKSUM_SIZE sizeof(u16) +#define GOODIX_NORMAL_RESET_DELAY_MS 150 + +struct goodix_hid_report_header { + u8 flag; + __le16 size; +} __packed; +#define GOODIX_HID_ACK_HEADER_SIZE sizeof(struct goodix_hid_report_header) + +struct goodix_hid_report_package { + __le16 size; + u8 data[]; +}; + +#define GOODIX_HID_PKG_LEN_SIZE sizeof(u16) +#define GOODIX_HID_COOR_DATA_LEN 82 +#define GOODIX_HID_COOR_PKG_LEN (GOODIX_HID_PKG_LEN_SIZE + \ + GOODIX_HID_COOR_DATA_LEN) + +/* power state */ +#define GOODIX_SPI_POWER_ON 0x00 +#define GOODIX_SPI_POWER_SLEEP 0x01 + +/* flags used to record the current device operating state */ +#define GOODIX_HID_STARTED 0 + +struct goodix_hid_report_event { + struct goodix_hid_report_header hdr; + u8 data[GOODIX_HID_COOR_PKG_LEN]; +} __packed; + +struct goodix_hid_desc { + __le16 desc_length; + __le16 bcd_version; + __le16 report_desc_length; + __le16 report_desc_register; + __le16 input_register; + __le16 max_input_length; + __le16 output_register; + __le16 max_output_length; + __le16 cmd_register; + __le16 data_register; + __le16 vendor_id; + __le16 product_id; + __le16 version_id; + __le32 reserved; +} __packed; + +struct goodix_ts_data { + struct device *dev; + struct spi_device *spi; + struct hid_device *hid; + struct goodix_hid_desc hid_desc; + + struct gpio_desc *reset_gpio; + u32 hid_report_addr; + + unsigned long flags; + /* lock for hid raw request operation */ + struct mutex hid_request_lock; + /* buffer used to store hid report event */ + u8 *event_buf; + u32 hid_max_event_sz; + /* buffer used to do spi data transfer */ + u8 xfer_buf[SZ_2K] ____cacheline_aligned; +}; + +static void *goodix_get_event_report(struct goodix_ts_data *ts, u32 addr, + u8 *data, size_t len) +{ + struct spi_device *spi = to_spi_device(&ts->spi->dev); + struct spi_transfer xfers; + struct spi_message spi_msg; + int error; + + /* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */ + data[0] = GOODIX_SPI_READ_FLAG; + put_unaligned_be32(addr, data + GOODIX_SPI_TRANS_PREFIX_LEN); + + spi_message_init(&spi_msg); + memset(&xfers, 0, sizeof(xfers)); + xfers.tx_buf = data; + xfers.rx_buf = data; + xfers.len = GOODIX_SPI_READ_PREFIX_LEN + len; + spi_message_add_tail(&xfers, &spi_msg); + + error = spi_sync(spi, &spi_msg); + if (error) { + dev_err(ts->dev, "spi transfer error: %d", error); + return NULL; + } + + return data + GOODIX_SPI_READ_PREFIX_LEN; +} + +static int goodix_spi_read(struct goodix_ts_data *ts, u32 addr, + void *data, size_t len) +{ + struct spi_device *spi = to_spi_device(&ts->spi->dev); + struct spi_transfer xfers; + struct spi_message spi_msg; + int error; + + if (GOODIX_SPI_READ_PREFIX_LEN + len > sizeof(ts->xfer_buf)) { + dev_err(ts->dev, "read data len exceed limit %zu", + sizeof(ts->xfer_buf) - GOODIX_SPI_READ_PREFIX_LEN); + return -EINVAL; + } + + /* buffer format: 0xF1 + addr(4bytes) + dummy(3bytes) + data */ + ts->xfer_buf[0] = GOODIX_SPI_READ_FLAG; + put_unaligned_be32(addr, ts->xfer_buf + GOODIX_SPI_TRANS_PREFIX_LEN); + + spi_message_init(&spi_msg); + memset(&xfers, 0, sizeof(xfers)); + xfers.tx_buf = ts->xfer_buf; + xfers.rx_buf = ts->xfer_buf; + xfers.len = GOODIX_SPI_READ_PREFIX_LEN + len; + spi_message_add_tail(&xfers, &spi_msg); + + error = spi_sync(spi, &spi_msg); + if (error) + dev_err(ts->dev, "spi transfer error: %d", error); + else + memcpy(data, ts->xfer_buf + GOODIX_SPI_READ_PREFIX_LEN, len); + + return error; +} + +static int goodix_spi_write(struct goodix_ts_data *ts, u32 addr, + const void *data, size_t len) +{ + struct spi_device *spi = to_spi_device(&ts->spi->dev); + struct spi_transfer xfers; + struct spi_message spi_msg; + int error; + + if (GOODIX_SPI_WRITE_PREFIX_LEN + len > sizeof(ts->xfer_buf)) { + dev_err(ts->dev, "write data len exceed limit %zu", + sizeof(ts->xfer_buf) - GOODIX_SPI_WRITE_PREFIX_LEN); + return -EINVAL; + } + + /* buffer format: 0xF0 + addr(4bytes) + data */ + ts->xfer_buf[0] = GOODIX_SPI_WRITE_FLAG; + put_unaligned_be32(addr, ts->xfer_buf + GOODIX_SPI_TRANS_PREFIX_LEN); + memcpy(ts->xfer_buf + GOODIX_SPI_WRITE_PREFIX_LEN, data, len); + + spi_message_init(&spi_msg); + memset(&xfers, 0, sizeof(xfers)); + xfers.tx_buf = ts->xfer_buf; + xfers.len = GOODIX_SPI_WRITE_PREFIX_LEN + len; + spi_message_add_tail(&xfers, &spi_msg); + + error = spi_sync(spi, &spi_msg); + if (error) + dev_err(ts->dev, "spi transfer error: %d", error); + + return error; +} + +static int goodix_dev_confirm(struct goodix_ts_data *ts) +{ + u8 tx_buf[8], rx_buf[8]; + int retry = 3; + int error; + + gpiod_set_value_cansleep(ts->reset_gpio, 0); + usleep_range(4000, 4100); + + memset(tx_buf, GOODIX_DEV_CONFIRM_VAL, sizeof(tx_buf)); + while (retry--) { + error = goodix_spi_write(ts, GOODIX_DEV_CONFIRM_ADDR, + tx_buf, sizeof(tx_buf)); + if (error) + return error; + + error = goodix_spi_read(ts, GOODIX_DEV_CONFIRM_ADDR, + rx_buf, sizeof(rx_buf)); + if (error) + return error; + + if (!memcmp(tx_buf, rx_buf, sizeof(tx_buf))) + return 0; + + usleep_range(5000, 5100); + } + + dev_err(ts->dev, "device confirm failed, rx_buf: %*ph", 8, rx_buf); + return -EINVAL; +} + +/** + * goodix_hid_parse() - hid-core .parse() callback + * @hid: hid device instance + * + * This function gets called during call to hid_add_device + * + * Return: 0 on success and non zero on error + */ +static int goodix_hid_parse(struct hid_device *hid) +{ + struct goodix_ts_data *ts = hid->driver_data; + u16 rsize; + int error; + + rsize = le16_to_cpu(ts->hid_desc.report_desc_length); + if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { + dev_err(ts->dev, "invalid report desc size, %d", rsize); + return -EINVAL; + } + + u8 *rdesc __free(kfree) = kzalloc(rsize, GFP_KERNEL); + if (!rdesc) + return -ENOMEM; + + error = goodix_spi_read(ts, GOODIX_HID_REPORT_DESC_ADDR, rdesc, rsize); + if (error) { + dev_err(ts->dev, "failed get report desc, %d", error); + return error; + } + + error = hid_parse_report(hid, rdesc, rsize); + if (error) { + dev_err(ts->dev, "failed parse report, %d", error); + return error; + } + + return 0; +} + +static int goodix_hid_get_report_length(struct hid_report *report) +{ + return ((report->size - 1) >> 3) + 1 + + report->device->report_enum[report->type].numbered + 2; +} + +static void goodix_hid_find_max_report(struct hid_device *hid, unsigned int type, + unsigned int *max) +{ + struct hid_report *report; + unsigned int size; + + list_for_each_entry(report, &hid->report_enum[type].report_list, list) { + size = goodix_hid_get_report_length(report); + if (*max < size) + *max = size; + } +} + +static int goodix_hid_start(struct hid_device *hid) +{ + struct goodix_ts_data *ts = hid->driver_data; + unsigned int bufsize = GOODIX_HID_COOR_PKG_LEN; + u32 report_size; + + goodix_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize); + goodix_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize); + goodix_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize); + + report_size = GOODIX_SPI_READ_PREFIX_LEN + + GOODIX_HID_ACK_HEADER_SIZE + bufsize; + if (report_size <= ts->hid_max_event_sz) + return 0; + + ts->event_buf = devm_krealloc(ts->dev, ts->event_buf, + report_size, GFP_KERNEL); + if (!ts->event_buf) + return -ENOMEM; + + ts->hid_max_event_sz = report_size; + return 0; +} + +static void goodix_hid_stop(struct hid_device *hid) +{ + hid->claimed = 0; +} + +static int goodix_hid_open(struct hid_device *hid) +{ + struct goodix_ts_data *ts = hid->driver_data; + + set_bit(GOODIX_HID_STARTED, &ts->flags); + return 0; +} + +static void goodix_hid_close(struct hid_device *hid) +{ + struct goodix_ts_data *ts = hid->driver_data; + + clear_bit(GOODIX_HID_STARTED, &ts->flags); +} + +/* Return date length of response data */ +static int goodix_hid_check_ack_status(struct goodix_ts_data *ts, u32 *resp_len) +{ + struct goodix_hid_report_header hdr; + int retry = 20; + int error; + int len; + + while (retry--) { + /* + * 3 bytes of hid request response data + * - byte 0: Ack flag, value of 1 for data ready + * - bytes 1-2: Response data length + */ + error = goodix_spi_read(ts, ts->hid_report_addr, + &hdr, sizeof(hdr)); + if (!error && (hdr.flag & GOODIX_HID_ACK_READY_FLAG)) { + len = le16_to_cpu(hdr.size); + if (len < GOODIX_HID_PKG_LEN_SIZE) { + dev_err(ts->dev, "hrd.size too short: %d", len); + return -EINVAL; + } + *resp_len = len; + return 0; + } + + /* Wait 10ms for another try */ + usleep_range(10000, 11000); + } + + return -EINVAL; +} + +/** + * goodix_hid_get_raw_report() - Process hidraw GET REPORT operation + * @hid: hid device instance + * @reportnum: Report ID + * @buf: Buffer for store the report date + * @len: Length fo report data + * @report_type: Report type + * + * The function for hid_ll_driver.get_raw_report to handle the HIDRAW ioctl + * get report request. The transmitted data follows the standard i2c-hid + * protocol with a specified header. + * + * Return: The length of the data in the buf on success, negative error code + */ +static int goodix_hid_get_raw_report(struct hid_device *hid, + unsigned char reportnum, + u8 *buf, size_t len, + unsigned char report_type) +{ + struct goodix_ts_data *ts = hid->driver_data; + u16 data_register = le16_to_cpu(ts->hid_desc.data_register); + u16 cmd_register = le16_to_cpu(ts->hid_desc.cmd_register); + u8 tmp_buf[GOODIX_HID_MAX_INBUF_SIZE]; + int tx_len = 0, args_len = 0; + u32 response_data_len; + u8 args[3]; + int error; + + if (report_type == HID_OUTPUT_REPORT) + return -EINVAL; + + if (reportnum == 3) { + /* Get win8 signature data */ + error = goodix_spi_read(ts, GOODIX_HID_SIGN_ADDR, buf, len); + if (error) { + dev_err(ts->dev, "failed get win8 sign: %d", error); + return -EINVAL; + } + return len; + } + + if (reportnum >= 0x0F) + args[args_len++] = reportnum; + + put_unaligned_le16(data_register, args + args_len); + args_len += sizeof(data_register); + + /* Clean 3 bytes of hid ack header data */ + memset(tmp_buf, 0, GOODIX_HID_ACK_HEADER_SIZE); + tx_len += GOODIX_HID_ACK_HEADER_SIZE; + + put_unaligned_le16(cmd_register, tmp_buf + tx_len); + tx_len += sizeof(cmd_register); + + tmp_buf[tx_len] = (report_type == HID_FEATURE_REPORT ? 0x03 : 0x01) << 4; + tmp_buf[tx_len] |= reportnum >= 0x0F ? 0x0F : reportnum; + tx_len++; + + tmp_buf[tx_len++] = GOODIX_HID_GET_REPORT_CMD; + + memcpy(tmp_buf + tx_len, args, args_len); + tx_len += args_len; + + /* Step1: write report request info */ + error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len); + if (error) { + dev_err(ts->dev, "failed send read feature cmd, %d", error); + return error; + } + + /* No need read response data */ + if (!len) + return 0; + + /* Step2: check response data status */ + error = goodix_hid_check_ack_status(ts, &response_data_len); + if (error) + return error; + + len = min(len, response_data_len - GOODIX_HID_PKG_LEN_SIZE); + /* Step3: read response data(skip 2bytes of hid pkg length) */ + error = goodix_spi_read(ts, ts->hid_report_addr + + GOODIX_HID_ACK_HEADER_SIZE + + GOODIX_HID_PKG_LEN_SIZE, buf, len); + if (error) { + dev_err(ts->dev, "failed read hid response data, %d", error); + return error; + } + + if (buf[0] != reportnum) { + dev_err(ts->dev, "incorrect report (%d vs %d expected)", + buf[0], reportnum); + return -EINVAL; + } + return len; +} + +/** + * goodix_hid_set_raw_report() - process hidraw SET REPORT operation + * @hid: HID device + * @reportnum: Report ID + * @buf: Buffer for communication + * @len: Length of data in the buffer + * @report_type: Report type + * + * The function for hid_ll_driver.get_raw_report to handle the HIDRAW ioctl + * set report request. The transmitted data follows the standard i2c-hid + * protocol with a specified header. + * + * Return: The length of the data sent, negative error code on failure + */ +static int goodix_hid_set_raw_report(struct hid_device *hid, + unsigned char reportnum, + __u8 *buf, size_t len, + unsigned char report_type) +{ + struct goodix_ts_data *ts = hid->driver_data; + u16 data_register = le16_to_cpu(ts->hid_desc.data_register); + u16 cmd_register = le16_to_cpu(ts->hid_desc.cmd_register); + int tx_len = 0, args_len = 0; + u8 tmp_buf[GOODIX_HID_MAX_INBUF_SIZE]; + u8 args[5]; + int error; + + if (reportnum >= 0x0F) { + args[args_len++] = reportnum; + reportnum = 0x0F; + } + + put_unaligned_le16(data_register, args + args_len); + args_len += sizeof(data_register); + + put_unaligned_le16(GOODIX_HID_PKG_LEN_SIZE + len, args + args_len); + args_len += GOODIX_HID_PKG_LEN_SIZE; + + /* Clean 3 bytes of hid ack header data */ + memset(tmp_buf, 0, GOODIX_HID_ACK_HEADER_SIZE); + tx_len += GOODIX_HID_ACK_HEADER_SIZE; + + put_unaligned_le16(cmd_register, tmp_buf + tx_len); + tx_len += sizeof(cmd_register); + + tmp_buf[tx_len++] = ((report_type == HID_FEATURE_REPORT ? 0x03 : 0x02) << 4) | reportnum; + tmp_buf[tx_len++] = GOODIX_HID_SET_REPORT_CMD; + + memcpy(tmp_buf + tx_len, args, args_len); + tx_len += args_len; + + memcpy(tmp_buf + tx_len, buf, len); + tx_len += len; + + error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len); + if (error) { + dev_err(ts->dev, "failed send report: %*ph", tx_len, tmp_buf); + return error; + } + return len; +} + +static int goodix_hid_raw_request(struct hid_device *hid, + unsigned char reportnum, + __u8 *buf, size_t len, + unsigned char rtype, int reqtype) +{ + struct goodix_ts_data *ts = hid->driver_data; + int error = -EINVAL; + + guard(mutex)(&ts->hid_request_lock); + switch (reqtype) { + case HID_REQ_GET_REPORT: + error = goodix_hid_get_raw_report(hid, reportnum, buf, + len, rtype); + break; + case HID_REQ_SET_REPORT: + if (buf[0] == reportnum) + error = goodix_hid_set_raw_report(hid, reportnum, + buf, len, rtype); + break; + default: + break; + } + + return error; +} + +static struct hid_ll_driver goodix_hid_ll_driver = { + .parse = goodix_hid_parse, + .start = goodix_hid_start, + .stop = goodix_hid_stop, + .open = goodix_hid_open, + .close = goodix_hid_close, + .raw_request = goodix_hid_raw_request +}; + +static irqreturn_t goodix_hid_irq(int irq, void *data) +{ + struct goodix_ts_data *ts = data; + struct goodix_hid_report_event *event; + struct goodix_hid_report_package *pkg; + u16 report_size; + + if (!test_bit(GOODIX_HID_STARTED, &ts->flags)) + return IRQ_HANDLED; + /* + * First, read buffer with space for header and coordinate package: + * - event header = 3 bytes + * - coordinate event = GOODIX_HID_COOR_PKG_LEN bytes + * + * If the data size info in the event header exceeds + * GOODIX_HID_COOR_PKG_LEN, it means that there are other packages + * besides the coordinate package. + */ + event = goodix_get_event_report(ts, ts->hid_report_addr, ts->event_buf, + GOODIX_HID_ACK_HEADER_SIZE + + GOODIX_HID_COOR_PKG_LEN); + if (!event) { + dev_err(ts->dev, "failed get coordinate data"); + return IRQ_HANDLED; + } + + /* Check coordinate data valid falg */ + if (event->hdr.flag != GOODIX_HID_REPORT_READY_FLAG) + return IRQ_HANDLED; + + pkg = (struct goodix_hid_report_package *)event->data; + if (le16_to_cpu(pkg->size) < GOODIX_HID_PKG_LEN_SIZE) { + dev_err(ts->dev, "invalid coordinate event package size, %d", + le16_to_cpu(pkg->size)); + return IRQ_HANDLED; + } + hid_input_report(ts->hid, HID_INPUT_REPORT, pkg->data, + le16_to_cpu(pkg->size) - GOODIX_HID_PKG_LEN_SIZE, 1); + + report_size = le16_to_cpu(event->hdr.size); + /* Check if there are other packages */ + if (report_size <= GOODIX_HID_COOR_PKG_LEN) + return IRQ_HANDLED; + + if (report_size >= ts->hid_max_event_sz) { + dev_err(ts->dev, "package size exceed limit %d vs %d", + report_size, ts->hid_max_event_sz); + return IRQ_HANDLED; + } + + /* Read the package behind the coordinate data */ + pkg = goodix_get_event_report(ts, ts->hid_report_addr + sizeof(*event), + ts->event_buf, + report_size - GOODIX_HID_COOR_PKG_LEN); + if (!pkg) { + dev_err(ts->dev, "failed read attachment data content"); + return IRQ_HANDLED; + } + + hid_input_report(ts->hid, HID_INPUT_REPORT, pkg->data, + le16_to_cpu(pkg->size) - GOODIX_HID_PKG_LEN_SIZE, 1); + + return IRQ_HANDLED; +} + +static int goodix_hid_init(struct goodix_ts_data *ts) +{ + struct hid_device *hid; + int error; + + /* Get hid descriptor */ + error = goodix_spi_read(ts, GOODIX_HID_DESC_ADDR, &ts->hid_desc, + sizeof(ts->hid_desc)); + if (error) { + dev_err(ts->dev, "failed get hid desc, %d", error); + return error; + } + + hid = hid_allocate_device(); + if (IS_ERR(hid)) + return PTR_ERR(hid); + + hid->driver_data = ts; + hid->ll_driver = &goodix_hid_ll_driver; + hid->bus = BUS_SPI; + hid->dev.parent = &ts->spi->dev; + + hid->version = le16_to_cpu(ts->hid_desc.bcd_version); + hid->vendor = le16_to_cpu(ts->hid_desc.vendor_id); + hid->product = le16_to_cpu(ts->hid_desc.product_id); + snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-gdix", + hid->vendor, hid->product); + + error = hid_add_device(hid); + if (error) { + dev_err(ts->dev, "failed add hid device, %d", error); + hid_destroy_device(hid); + return error; + } + + ts->hid = hid; + return 0; +} + +static int goodix_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct goodix_ts_data *ts; + int error; + + /* init spi_device */ + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + error = spi_setup(spi); + if (error) + return error; + + ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + mutex_init(&ts->hid_request_lock); + spi_set_drvdata(spi, ts); + ts->spi = spi; + ts->dev = dev; + ts->hid_max_event_sz = GOODIX_SPI_READ_PREFIX_LEN + + GOODIX_HID_ACK_HEADER_SIZE + GOODIX_HID_COOR_PKG_LEN; + ts->event_buf = devm_kmalloc(dev, ts->hid_max_event_sz, GFP_KERNEL); + if (!ts->event_buf) + return -ENOMEM; + + ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ts->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ts->reset_gpio), + "failed to request reset gpio\n"); + + error = device_property_read_u32(dev, "goodix,hid-report-addr", + &ts->hid_report_addr); + if (error) + return dev_err_probe(dev, error, + "failed get hid report addr\n"); + + error = goodix_dev_confirm(ts); + if (error) + return error; + + /* Waits 150ms for firmware to fully boot */ + msleep(GOODIX_NORMAL_RESET_DELAY_MS); + + error = goodix_hid_init(ts); + if (error) { + dev_err(dev, "failed init hid device"); + return error; + } + + error = devm_request_threaded_irq(&ts->spi->dev, ts->spi->irq, + NULL, goodix_hid_irq, IRQF_ONESHOT, + "goodix_spi_hid", ts); + if (error) { + dev_err(ts->dev, "could not register interrupt, irq = %d, %d", + ts->spi->irq, error); + goto err_destroy_hid; + } + + return 0; + +err_destroy_hid: + hid_destroy_device(ts->hid); + return error; +} + +static void goodix_spi_remove(struct spi_device *spi) +{ + struct goodix_ts_data *ts = spi_get_drvdata(spi); + + disable_irq(spi->irq); + hid_destroy_device(ts->hid); +} + +static int goodix_spi_set_power(struct goodix_ts_data *ts, int power_state) +{ + u8 power_control_cmd[] = {0x00, 0x00, 0x00, 0x87, 0x02, 0x00, 0x08}; + int error; + + /* value 0 for power on, 1 for power sleep */ + power_control_cmd[5] = power_state; + + guard(mutex)(&ts->hid_request_lock); + error = goodix_spi_write(ts, ts->hid_report_addr, power_control_cmd, + sizeof(power_control_cmd)); + if (error) { + dev_err(ts->dev, "failed set power mode: %s", + power_state == GOODIX_SPI_POWER_ON ? "on" : "sleep"); + return error; + } + return 0; +} + +static int goodix_spi_suspend(struct device *dev) +{ + struct goodix_ts_data *ts = dev_get_drvdata(dev); + + disable_irq(ts->spi->irq); + return goodix_spi_set_power(ts, GOODIX_SPI_POWER_SLEEP); +} + +static int goodix_spi_resume(struct device *dev) +{ + struct goodix_ts_data *ts = dev_get_drvdata(dev); + + enable_irq(ts->spi->irq); + return goodix_spi_set_power(ts, GOODIX_SPI_POWER_ON); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(goodix_spi_pm_ops, + goodix_spi_suspend, goodix_spi_resume); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id goodix_spi_acpi_match[] = { + { "GXTS7986" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goodix_spi_acpi_match); +#endif + +static const struct spi_device_id goodix_spi_ids[] = { + { "gt7986u" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, goodix_spi_ids); + +static struct spi_driver goodix_spi_driver = { + .driver = { + .name = "goodix-spi-hid", + .acpi_match_table = ACPI_PTR(goodix_spi_acpi_match), + .pm = pm_sleep_ptr(&goodix_spi_pm_ops), + }, + .probe = goodix_spi_probe, + .remove = goodix_spi_remove, + .id_table = goodix_spi_ids, +}; +module_spi_driver(goodix_spi_driver); + +MODULE_DESCRIPTION("Goodix SPI driver for HID touchscreen"); +MODULE_AUTHOR("Goodix, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 6e4ebc349e4526..22683ec819aaca 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include "hid-ids.h" #include "hid-vivaldi-common.h" @@ -418,38 +418,15 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field, return 0; } -static bool hammer_has_usage(struct hid_device *hdev, unsigned int report_type, - unsigned application, unsigned usage) -{ - struct hid_report_enum *re = &hdev->report_enum[report_type]; - struct hid_report *report; - int i, j; - - list_for_each_entry(report, &re->report_list, list) { - if (report->application != application) - continue; - - for (i = 0; i < report->maxfield; i++) { - struct hid_field *field = report->field[i]; - - for (j = 0; j < field->maxusage; j++) - if (field->usage[j].hid == usage) - return true; - } - } - - return false; -} - static bool hammer_has_folded_event(struct hid_device *hdev) { - return hammer_has_usage(hdev, HID_INPUT_REPORT, + return !!hid_find_field(hdev, HID_INPUT_REPORT, HID_GD_KEYBOARD, HID_USAGE_KBD_FOLDED); } static bool hammer_has_backlight_control(struct hid_device *hdev) { - return hammer_has_usage(hdev, HID_OUTPUT_REPORT, + return !!hid_find_field(hdev, HID_OUTPUT_REPORT, HID_GD_KEYBOARD, HID_AD_BRIGHTNESS); } diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 1f014ac54e14d3..d13543517a6c96 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -27,7 +27,7 @@ * to the boot interface. */ -static __u8 holtek_kbd_rdesc_fixed[] = { +static const __u8 holtek_kbd_rdesc_fixed[] = { /* Original report descriptor, with reduced number of consumer usages */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x80, /* Usage (Sys Control), */ @@ -102,14 +102,14 @@ static __u8 holtek_kbd_rdesc_fixed[] = { 0xC0, /* End Collection */ }; -static __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { - rdesc = holtek_kbd_rdesc_fixed; *rsize = sizeof(holtek_kbd_rdesc_fixed); + return holtek_kbd_rdesc_fixed; } return rdesc; } diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 343730c28e2d2e..b618a1646c13e9 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -29,8 +29,8 @@ * - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse */ -static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, + __u8 *rdesc, unsigned int *rsize) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 781c5aa298598a..86820a3d9766d3 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -417,24 +417,8 @@ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 -#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 -#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF -#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9 -#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 -#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF -#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8 -#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82 -#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C -#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116 #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 -#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A -#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C -#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F -#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5 -#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED -#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE -#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02 #define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81 #define USB_VENDOR_ID_ELECOM 0x056e @@ -521,6 +505,7 @@ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 #define I2C_VENDOR_ID_GOODIX 0x27c6 +#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0 #define I2C_DEVICE_ID_GOODIX_01E8 0x01e8 #define I2C_DEVICE_ID_GOODIX_01E9 0x01e9 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 @@ -810,6 +795,7 @@ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe +#define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index c9094a4f281e90..fda9dce3da9980 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -373,14 +373,6 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD), HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), @@ -391,32 +383,13 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_AVOID_QUERY }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW), HID_BATTERY_QUIRK_AVOID_QUERY }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2), - HID_BATTERY_QUIRK_IGNORE }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG), - HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM), HID_BATTERY_QUIRK_AVOID_QUERY }, + /* + * Elan I2C-HID touchscreens seem to all report a non present battery, + * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices. + */ + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, {} }; diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 6a7281bc27c95a..8e42780a2663c2 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -13,7 +13,7 @@ #define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0) -static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) +static const __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c index a972943baaea04..b9abd53a58649a 100644 --- a/drivers/hid/hid-keytouch.c +++ b/drivers/hid/hid-keytouch.c @@ -16,7 +16,7 @@ /* Replace the broken report descriptor of this device with rather * a default one */ -static __u8 keytouch_fixed_rdesc[] = { +static const __u8 keytouch_fixed_rdesc[] = { 0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15, 0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08, 0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91, @@ -24,15 +24,13 @@ static __u8 keytouch_fixed_rdesc[] = { 0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0 }; -static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { hid_info(hdev, "fixing up Keytouch IEC report descriptor\n"); - rdesc = keytouch_fixed_rdesc; *rsize = sizeof(keytouch_fixed_rdesc); - - return rdesc; + return keytouch_fixed_rdesc; } static const struct hid_device_id keytouch_devices[] = { diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index ca2ba3da2458b6..bd96bfa7af7097 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -8,7 +8,7 @@ * Copyright (c) 2023 David Yang */ -#include +#include #include #include #include @@ -466,7 +466,7 @@ static __u8 *kye_tablet_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int return rdesc; } -static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index e5e72aa5260a91..3b0c779ce8f712 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -133,7 +133,7 @@ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = { 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ }; -static __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { diff --git a/drivers/hid/hid-letsketch.c b/drivers/hid/hid-letsketch.c index 229820fda9602e..8602c63ed9c6ce 100644 --- a/drivers/hid/hid-letsketch.c +++ b/drivers/hid/hid-letsketch.c @@ -41,7 +41,7 @@ #include #include -#include +#include #include "hid-ids.h" diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index cfe2f4f6e93f09..9a2cfa018bd335 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -58,7 +58,7 @@ * These descriptors remove the combined Y axis and instead report * separate throttle (Y) and brake (RZ). */ -static __u8 df_rdesc_fixed[] = { +static const __u8 df_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -124,7 +124,7 @@ static __u8 df_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 dfp_rdesc_fixed[] = { +static const __u8 dfp_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -172,7 +172,7 @@ static __u8 dfp_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 fv_rdesc_fixed[] = { +static const __u8 fv_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -239,7 +239,7 @@ static __u8 fv_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 momo_rdesc_fixed[] = { +static const __u8 momo_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -285,7 +285,7 @@ static __u8 momo_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 momo2_rdesc_fixed[] = { +static const __u8 momo2_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -333,7 +333,7 @@ static __u8 momo2_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 ffg_rdesc_fixed[] = { +static const __u8 ffg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ @@ -379,7 +379,7 @@ static __u8 ffg_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 fg_rdesc_fixed[] = { +static const __u8 fg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ @@ -427,7 +427,7 @@ static __u8 fg_rdesc_fixed[] = { * above the logical maximum described in descriptor. This extends * the original value of 0x28c of logical maximum to 0x104d */ -static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); @@ -453,8 +453,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == FG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula GP report descriptor\n"); - rdesc = fg_rdesc_fixed; *rsize = sizeof(fg_rdesc_fixed); + return fg_rdesc_fixed; } else { hid_info(hdev, "rdesc size test failed for formula gp\n"); @@ -466,8 +466,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == FFG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula Force GP report descriptor\n"); - rdesc = ffg_rdesc_fixed; *rsize = sizeof(ffg_rdesc_fixed); + return ffg_rdesc_fixed; } break; @@ -476,8 +476,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); - rdesc = df_rdesc_fixed; *rsize = sizeof(df_rdesc_fixed); + return df_rdesc_fixed; } break; @@ -485,8 +485,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); - rdesc = momo_rdesc_fixed; *rsize = sizeof(momo_rdesc_fixed); + return momo_rdesc_fixed; } break; @@ -494,8 +494,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); - rdesc = momo2_rdesc_fixed; *rsize = sizeof(momo2_rdesc_fixed); + return momo2_rdesc_fixed; } break; @@ -503,8 +503,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); - rdesc = fv_rdesc_fixed; *rsize = sizeof(fv_rdesc_fixed); + return fv_rdesc_fixed; } break; @@ -512,8 +512,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); - rdesc = dfp_rdesc_fixed; *rsize = sizeof(dfp_rdesc_fixed); + return dfp_rdesc_fixed; } break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index d9580bc3e19a14..34fa71ceec2b20 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -13,7 +13,7 @@ #include #include #include /* For to_usb_interface for kvm extra intf check */ -#include +#include #include "hid-ids.h" #define DJ_MAX_PAIRED_DEVICES 7 diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 400d70e6dbe2dd..cf7a6986cf2013 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include "usbhid/usbhid.h" #include "hid-ids.h" @@ -3767,8 +3767,8 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp) /* Generic HID++ devices */ /* -------------------------------------------------------------------------- */ -static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc, - unsigned int *rsize) +static const u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc, + unsigned int *rsize) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c index aea46e5220082a..fe7576458afab8 100644 --- a/drivers/hid/hid-macally.c +++ b/drivers/hid/hid-macally.c @@ -18,8 +18,8 @@ MODULE_LICENSE("GPL"); * The Macally ikey keyboard says that its logical and usage maximums are both * 101, but the power key is 102 and the equals key is 103 */ -static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) { hid_info(hdev, diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 2eb285b97fc011..8a73b59e0827b9 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -907,8 +907,8 @@ static void magicmouse_remove(struct hid_device *hdev) hid_hw_stop(hdev); } -static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { /* * Change the usage from: diff --git a/drivers/hid/hid-maltron.c b/drivers/hid/hid-maltron.c index caba0def938c34..f0aad1ba2e6d57 100644 --- a/drivers/hid/hid-maltron.c +++ b/drivers/hid/hid-maltron.c @@ -22,7 +22,7 @@ #include "hid-ids.h" /* The original buggy USB descriptor */ -static u8 maltron_rdesc_o[] = { +static const u8 maltron_rdesc_o[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ @@ -79,7 +79,7 @@ static u8 maltron_rdesc_o[] = { }; /* The patched descriptor, allowing media key events to be accepted as valid */ -static u8 maltron_rdesc[] = { +static const u8 maltron_rdesc[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ @@ -137,8 +137,8 @@ static u8 maltron_rdesc[] = { 0xC0 /* End Collection */ }; -static __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { if (*rsize == sizeof(maltron_rdesc_o) && !memcmp(maltron_rdesc_o, rdesc, sizeof(maltron_rdesc_o))) { diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 4cf0fcddb379ec..18ac21c0bcb257 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -56,7 +56,7 @@ struct xb1s_ff_report { __u8 loop_count; } __packed; -static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct ms_data *ms = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c index 989681f73d7754..3089be990afefa 100644 --- a/drivers/hid/hid-monterey.c +++ b/drivers/hid/hid-monterey.c @@ -18,7 +18,7 @@ #include "hid-ids.h" -static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 99812c0f830b5e..638e36c6d0f103 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -212,6 +212,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); #define MT_CLS_GOOGLE 0x0111 #define MT_CLS_RAZER_BLADE_STEALTH 0x0112 #define MT_CLS_SMART_TECH 0x0113 +#define MT_CLS_SIS 0x0457 #define MT_DEFAULT_MAXCONTACT 10 #define MT_MAX_MAXCONTACT 250 @@ -396,6 +397,11 @@ static const struct mt_class mt_classes[] = { MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_SEPARATE_APP_REPORT, }, + { .name = MT_CLS_SIS, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_ALWAYS_VALID | + MT_QUIRK_CONTACT_CNT_ACCURATE, + }, { } }; @@ -1441,12 +1447,13 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, return 0; } -static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *size) { if (hdev->vendor == I2C_VENDOR_ID_GOODIX && (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 || - hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) { + hdev->product == I2C_DEVICE_ID_GOODIX_01E9 || + hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) { if (rdesc[607] == 0x15) { rdesc[607] = 0x25; dev_info( @@ -1811,6 +1818,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) mt_fix_const_fields(hdev, HID_DG_CONTACTID); + if (hdev->vendor == USB_VENDOR_ID_SIS_TOUCH) + hdev->quirks |= HID_QUIRK_NOGET; + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) return ret; @@ -2065,7 +2075,10 @@ static const struct hid_device_id mt_devices[] = { I2C_DEVICE_ID_GOODIX_01E8) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, - I2C_DEVICE_ID_GOODIX_01E8) }, + I2C_DEVICE_ID_GOODIX_01E9) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, + I2C_DEVICE_ID_GOODIX_01E0) }, /* GoodTouch panels */ { .driver_data = MT_CLS_NSMU, @@ -2113,6 +2126,12 @@ static const struct hid_device_id mt_devices[] = { USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) }, + /* Lenovo X12 TAB Gen 2 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LENOVO, + USB_DEVICE_ID_LENOVO_X12_TAB2) }, + /* Logitech devices */ { .driver_data = MT_CLS_NSMU, HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8, @@ -2275,6 +2294,11 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) }, + /* sis */ + { .driver_data = MT_CLS_SIS, + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH, + HID_ANY_ID) }, + /* Generic MT device */ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 58cd0506e431bc..55153a2f79886b 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -29,7 +29,7 @@ */ #include "hid-ids.h" -#include +#include #include #include #include diff --git a/drivers/hid/hid-nti.c b/drivers/hid/hid-nti.c index 1952e9ca5f45b5..03f7dd491228cf 100644 --- a/drivers/hid/hid-nti.c +++ b/drivers/hid/hid-nti.c @@ -29,7 +29,7 @@ MODULE_DESCRIPTION("HID driver for Network Technologies USB-SUN keyboard adapter /* * NTI Sun keyboard adapter has wrong logical maximum in report descriptor */ -static __u8 *nti_usbsun_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *nti_usbsun_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) { diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index 99e3b06a83314b..f27297269a7f40 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c @@ -22,7 +22,7 @@ #include "hid-ids.h" -static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c index 5e47634bb07d6a..1a986f077ce108 100644 --- a/drivers/hid/hid-petalynx.c +++ b/drivers/hid/hid-petalynx.c @@ -19,7 +19,7 @@ #include "hid-ids.h" /* Petalynx Maxter Remote has maximum for consumer page set too low */ -static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && diff --git a/drivers/hid/hid-picolcd_backlight.c b/drivers/hid/hid-picolcd_backlight.c index 08d16917eb60be..4b43b64537a335 100644 --- a/drivers/hid/hid-picolcd_backlight.c +++ b/drivers/hid/hid-picolcd_backlight.c @@ -31,7 +31,8 @@ static int picolcd_set_brightness(struct backlight_device *bdev) data->lcd_brightness = bdev->props.brightness & 0x0ff; data->lcd_power = bdev->props.power; spin_lock_irqsave(&data->lock, flags); - hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0); + hid_set_field(report->field[0], 0, + data->lcd_power == BACKLIGHT_POWER_ON ? data->lcd_brightness : 0); if (!(data->status & PICOLCD_FAILED)) hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&data->lock, flags); @@ -94,7 +95,7 @@ void picolcd_suspend_backlight(struct picolcd_data *data) if (!data->backlight) return; - data->backlight->props.power = FB_BLANK_POWERDOWN; + data->backlight->props.power = BACKLIGHT_POWER_OFF; picolcd_set_brightness(data->backlight); data->lcd_power = data->backlight->props.power = bl_power; } diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c index e7c309cfe3a01f..1468fb11e39dff 100644 --- a/drivers/hid/hid-playstation.c +++ b/drivers/hid/hid-playstation.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include "hid-ids.h" @@ -2143,6 +2143,26 @@ static void dualshock4_output_worker(struct work_struct *work) spin_lock_irqsave(&ds4->base.lock, flags); + /* + * Some 3rd party gamepads expect updates to rumble and lightbar + * together, and setting one may cancel the other. + * + * Let's maximise compatibility by always sending rumble and lightbar + * updates together, even when only one has been scheduled, resulting + * in: + * + * ds4->valid_flag0 >= 0x03 + * + * Hopefully this will maximise compatibility with third-party pads. + * + * Any further update bits, such as 0x04 for lightbar blinking, will + * be or'd on top of this like before. + */ + if (ds4->update_rumble || ds4->update_lightbar) { + ds4->update_rumble = true; /* 0x01 */ + ds4->update_lightbar = true; /* 0x02 */ + } + if (ds4->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR; diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 757361593e524f..3d08c190a935b8 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -728,7 +728,7 @@ static int pcmidi_snd_terminate(struct pcmidi_snd *pm) /* * PC-MIDI report descriptor for report id is wrong. */ -static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && diff --git a/drivers/hid/hid-pxrc.c b/drivers/hid/hid-pxrc.c index b0e517f9cde7b4..71fe0c06ddcdf3 100644 --- a/drivers/hid/hid-pxrc.c +++ b/drivers/hid/hid-pxrc.c @@ -17,7 +17,7 @@ struct pxrc_priv { bool alternate; }; -static __u8 pxrc_rdesc_fixed[] = { +static const __u8 pxrc_rdesc_fixed[] = { 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) 0x09, 0x04, // Usage (Joystick) 0xA1, 0x01, // Collection (Application) @@ -42,8 +42,8 @@ static __u8 pxrc_rdesc_fixed[] = { 0xC0, // End Collection }; -static __u8 *pxrc_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *pxrc_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { hid_info(hdev, "fixing up PXRC report descriptor\n"); *rsize = sizeof(pxrc_rdesc_fixed); diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c index 07d803513f27ce..20d28ed75c1e5d 100644 --- a/drivers/hid/hid-redragon.c +++ b/drivers/hid/hid-redragon.c @@ -33,7 +33,7 @@ * key codes are generated. */ -static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 102 && rdesc[100] == 0x81 && rdesc[101] == 0x00) { diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 85ac8def368fb9..6fe7c087c594ea 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -66,7 +66,7 @@ static int saitek_probe(struct hid_device *hdev, return 0; } -static __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct saitek_sc *ssc = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index d4e27142245c7a..f3908a9e04e672 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -469,7 +469,7 @@ static int samsung_universal_kbd_input_mapping(struct hid_device *hdev, return 1; } -static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (hdev->product == USB_DEVICE_ID_SAMSUNG_IR_REMOTE && hid_is_usb(hdev)) diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c index 710766f6839d9c..4fbec5fd87ce27 100644 --- a/drivers/hid/hid-semitek.c +++ b/drivers/hid/hid-semitek.c @@ -11,8 +11,8 @@ #include "hid-ids.h" -static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { /* In the report descriptor for interface 2, fix the incorrect description of report ID 0x04 (the report contains a diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index bd400f6b472b86..66f0675df24bdb 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -732,7 +732,7 @@ static int hid_sensor_custom_dev_if_add(struct hid_sensor_custom *sensor_inst) sensor_inst->custom_dev.minor = MISC_DYNAMIC_MINOR; sensor_inst->custom_dev.name = dev_name(&sensor_inst->pdev->dev); - sensor_inst->custom_dev.fops = &hid_sensor_custom_fops, + sensor_inst->custom_dev.fops = &hid_sensor_custom_fops; ret = misc_register(&sensor_inst->custom_dev); if (ret) { kfifo_free(&sensor_inst->data_fifo); diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 26e93a331a5107..7bd86eef6ec761 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -580,7 +580,7 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev) } EXPORT_SYMBOL_GPL(sensor_hub_device_close); -static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { /* diff --git a/drivers/hid/hid-sigmamicro.c b/drivers/hid/hid-sigmamicro.c index 2e7058ac0e9d58..c87276d7ba0d69 100644 --- a/drivers/hid/hid-sigmamicro.c +++ b/drivers/hid/hid-sigmamicro.c @@ -99,8 +99,8 @@ static const __u8 sm_0059_rdesc[] = { 0xc0, /* End Collection 166 */ }; -static __u8 *sm_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *sm_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { if (*rsize == sizeof(sm_0059_rdesc) && !memcmp(sm_0059_rdesc, rdesc, *rsize)) { diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index eac75f98f08ab8..d2486f3734f002 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include "hid-ids.h" @@ -99,7 +99,7 @@ static const char ghl_ps4_magic_data[] = { }; /* PS/3 Motion controller */ -static u8 motion_rdesc[] = { +static const u8 motion_rdesc[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ @@ -195,7 +195,7 @@ static u8 motion_rdesc[] = { 0xC0 /* End Collection */ }; -static u8 ps3remote_rdesc[] = { +static const u8 ps3remote_rdesc[] = { 0x05, 0x01, /* GUsagePage Generic Desktop */ 0x09, 0x05, /* LUsage 0x05 [Game Pad] */ 0xA1, 0x01, /* MCollection Application (mouse, keyboard) */ @@ -599,15 +599,15 @@ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi, return 0; } -static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc, - unsigned int *rsize) +static const u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc, + unsigned int *rsize) { *rsize = sizeof(motion_rdesc); return motion_rdesc; } -static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc, - unsigned int *rsize) +static const u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc, + unsigned int *rsize) { *rsize = sizeof(ps3remote_rdesc); return ps3remote_rdesc; @@ -743,7 +743,7 @@ static int sixaxis_mapping(struct hid_device *hdev, struct hid_input *hi, return -1; } -static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, +static const u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { struct sony_sc *sc = hid_get_drvdata(hdev); diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 2154e14f55f11f..7e83fee1ffa043 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -51,7 +51,7 @@ struct steelseries_srws1_data { * appear in the 'Generic Desktop' usage. */ -static __u8 steelseries_srws1_rdesc_fixed[] = { +static const __u8 steelseries_srws1_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop) */ 0x09, 0x08, /* Usage (MultiAxis), Changed */ 0xA1, 0x01, /* Collection (Application), */ @@ -570,8 +570,8 @@ static void steelseries_remove(struct hid_device *hdev) hid_hw_stop(hdev); } -static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, + __u8 *rdesc, unsigned int *rsize) { if (hdev->vendor != USB_VENDOR_ID_STEELSERIES || hdev->product != USB_DEVICE_ID_STEELSERIES_SRWS1) @@ -580,8 +580,8 @@ static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8 && rdesc[29] == 0xbb && rdesc[40] == 0xc5) { hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n"); - rdesc = steelseries_srws1_rdesc_fixed; *rsize = sizeof(steelseries_srws1_rdesc_fixed); + return steelseries_srws1_rdesc_fixed; } return rdesc; } diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c index f32e60d4420f67..64e4cff8ca1d16 100644 --- a/drivers/hid/hid-sunplus.c +++ b/drivers/hid/hid-sunplus.c @@ -18,7 +18,7 @@ #include "hid-ids.h" -static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c index d1d5ca310eadc0..848361f6225df1 100644 --- a/drivers/hid/hid-topre.c +++ b/drivers/hid/hid-topre.c @@ -21,8 +21,8 @@ MODULE_LICENSE("GPL"); * events it's actually sending. It claims to send array events but is instead * sending variable events. */ -static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { if (*rsize >= 119 && rdesc[69] == 0x29 && rdesc[70] == 0xe7 && rdesc[71] == 0x81 && rdesc[72] == 0x00) { diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index b176f9c0dd5278..d8008933c052f5 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -50,14 +50,14 @@ static void uclogic_inrange_timeout(struct timer_list *t) input_sync(input); } -static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); if (drvdata->desc_ptr != NULL) { - rdesc = drvdata->desc_ptr; *rsize = drvdata->desc_size; + return drvdata->desc_ptr; } return rdesc; } diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 5bab006ec165bb..ef26c7defcf61c 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -19,7 +19,7 @@ #include "hid-ids.h" #include #include -#include +#include /** * uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type @@ -681,7 +681,7 @@ void uclogic_params_cleanup(struct uclogic_params *params) * -ENOMEM, if failed to allocate memory. */ int uclogic_params_get_desc(const struct uclogic_params *params, - __u8 **pdesc, + const __u8 **pdesc, unsigned int *psize) { int rc = -ENOMEM; @@ -769,7 +769,7 @@ static void uclogic_params_init_invalid(struct uclogic_params *params) static int uclogic_params_init_with_opt_desc(struct uclogic_params *params, struct hid_device *hdev, unsigned int orig_desc_size, - __u8 *desc_ptr, + const __u8 *desc_ptr, unsigned int desc_size) { __u8 *desc_copy_ptr = NULL; diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h index d6ffadb2f60156..35ff062d09b50a 100644 --- a/drivers/hid/hid-uclogic-params.h +++ b/drivers/hid/hid-uclogic-params.h @@ -79,7 +79,7 @@ struct uclogic_params_pen { * Pointer to report descriptor part describing the pen inputs. * Allocated with kmalloc. NULL if the part is not specified. */ - __u8 *desc_ptr; + const __u8 *desc_ptr; /* * Size of the report descriptor. * Only valid, if "desc_ptr" is not NULL. @@ -118,7 +118,7 @@ struct uclogic_params_frame { * Pointer to report descriptor part describing the frame inputs. * Allocated with kmalloc. NULL if the part is not specified. */ - __u8 *desc_ptr; + const __u8 *desc_ptr; /* * Size of the report descriptor. * Only valid, if "desc_ptr" is not NULL. @@ -212,7 +212,7 @@ struct uclogic_params { * allocated with kmalloc. NULL if no common part is needed. * Only valid, if "invalid" is false. */ - __u8 *desc_ptr; + const __u8 *desc_ptr; /* * Size of the common part of the replacement report descriptor. * Only valid, if "desc_ptr" is valid and not NULL. @@ -239,7 +239,7 @@ struct uclogic_drvdata { /* Interface parameters */ struct uclogic_params params; /* Pointer to the replacement report descriptor. NULL if none. */ - __u8 *desc_ptr; + const __u8 *desc_ptr; /* * Size of the replacement report descriptor. * Only valid if desc_ptr is not NULL @@ -261,7 +261,7 @@ extern int uclogic_params_init(struct uclogic_params *params, /* Get a replacement report descriptor for a tablet's interface. */ extern int uclogic_params_get_desc(const struct uclogic_params *params, - __u8 **pdesc, + const __u8 **pdesc, unsigned int *psize); /* Free resources used by tablet interface's parameters */ diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c index acfa591ab52f17..9b9cbc2aae368e 100644 --- a/drivers/hid/hid-uclogic-rdesc.c +++ b/drivers/hid/hid-uclogic-rdesc.c @@ -16,11 +16,11 @@ #include "hid-uclogic-rdesc.h" #include -#include +#include #include /* Fixed WP4030U report descriptor */ -__u8 uclogic_rdesc_wp4030u_fixed_arr[] = { +const __u8 uclogic_rdesc_wp4030u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -65,7 +65,7 @@ const size_t uclogic_rdesc_wp4030u_fixed_size = sizeof(uclogic_rdesc_wp4030u_fixed_arr); /* Fixed WP5540U report descriptor */ -__u8 uclogic_rdesc_wp5540u_fixed_arr[] = { +const __u8 uclogic_rdesc_wp5540u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -142,7 +142,7 @@ const size_t uclogic_rdesc_wp5540u_fixed_size = sizeof(uclogic_rdesc_wp5540u_fixed_arr); /* Fixed WP8060U report descriptor */ -__u8 uclogic_rdesc_wp8060u_fixed_arr[] = { +const __u8 uclogic_rdesc_wp8060u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -219,7 +219,7 @@ const size_t uclogic_rdesc_wp8060u_fixed_size = sizeof(uclogic_rdesc_wp8060u_fixed_arr); /* Fixed WP1062 report descriptor */ -__u8 uclogic_rdesc_wp1062_fixed_arr[] = { +const __u8 uclogic_rdesc_wp1062_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -267,7 +267,7 @@ const size_t uclogic_rdesc_wp1062_fixed_size = sizeof(uclogic_rdesc_wp1062_fixed_arr); /* Fixed PF1209 report descriptor */ -__u8 uclogic_rdesc_pf1209_fixed_arr[] = { +const __u8 uclogic_rdesc_pf1209_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -344,7 +344,7 @@ const size_t uclogic_rdesc_pf1209_fixed_size = sizeof(uclogic_rdesc_pf1209_fixed_arr); /* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */ -__u8 uclogic_rdesc_twhl850_fixed0_arr[] = { +const __u8 uclogic_rdesc_twhl850_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -390,7 +390,7 @@ const size_t uclogic_rdesc_twhl850_fixed0_size = sizeof(uclogic_rdesc_twhl850_fixed0_arr); /* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */ -__u8 uclogic_rdesc_twhl850_fixed1_arr[] = { +const __u8 uclogic_rdesc_twhl850_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ @@ -430,7 +430,7 @@ const size_t uclogic_rdesc_twhl850_fixed1_size = sizeof(uclogic_rdesc_twhl850_fixed1_arr); /* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */ -__u8 uclogic_rdesc_twhl850_fixed2_arr[] = { +const __u8 uclogic_rdesc_twhl850_fixed2_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ @@ -456,7 +456,7 @@ const size_t uclogic_rdesc_twhl850_fixed2_size = sizeof(uclogic_rdesc_twhl850_fixed2_arr); /* Fixed TWHA60 report descriptor, interface 0 (stylus) */ -__u8 uclogic_rdesc_twha60_fixed0_arr[] = { +const __u8 uclogic_rdesc_twha60_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -505,7 +505,7 @@ const size_t uclogic_rdesc_twha60_fixed0_size = sizeof(uclogic_rdesc_twha60_fixed0_arr); /* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */ -__u8 uclogic_rdesc_twha60_fixed1_arr[] = { +const __u8 uclogic_rdesc_twha60_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h index 906d068f50a95e..3878a0e8c46414 100644 --- a/drivers/hid/hid-uclogic-rdesc.h +++ b/drivers/hid/hid-uclogic-rdesc.h @@ -23,15 +23,15 @@ #define UCLOGIC_RDESC_WPXXXXU_ORIG_SIZE 212 /* Fixed WP4030U report descriptor */ -extern __u8 uclogic_rdesc_wp4030u_fixed_arr[]; +extern const __u8 uclogic_rdesc_wp4030u_fixed_arr[]; extern const size_t uclogic_rdesc_wp4030u_fixed_size; /* Fixed WP5540U report descriptor */ -extern __u8 uclogic_rdesc_wp5540u_fixed_arr[]; +extern const __u8 uclogic_rdesc_wp5540u_fixed_arr[]; extern const size_t uclogic_rdesc_wp5540u_fixed_size; /* Fixed WP8060U report descriptor */ -extern __u8 uclogic_rdesc_wp8060u_fixed_arr[]; +extern const __u8 uclogic_rdesc_wp8060u_fixed_arr[]; extern const size_t uclogic_rdesc_wp8060u_fixed_size; /* Size of the original descriptor of the new WP5540U tablet */ @@ -41,14 +41,14 @@ extern const size_t uclogic_rdesc_wp8060u_fixed_size; #define UCLOGIC_RDESC_WP1062_ORIG_SIZE 254 /* Fixed WP1062 report descriptor */ -extern __u8 uclogic_rdesc_wp1062_fixed_arr[]; +extern const __u8 uclogic_rdesc_wp1062_fixed_arr[]; extern const size_t uclogic_rdesc_wp1062_fixed_size; /* Size of the original descriptor of PF1209 tablet */ #define UCLOGIC_RDESC_PF1209_ORIG_SIZE 234 /* Fixed PF1209 report descriptor */ -extern __u8 uclogic_rdesc_pf1209_fixed_arr[]; +extern const __u8 uclogic_rdesc_pf1209_fixed_arr[]; extern const size_t uclogic_rdesc_pf1209_fixed_size; /* Size of the original descriptors of TWHL850 tablet */ @@ -57,15 +57,15 @@ extern const size_t uclogic_rdesc_pf1209_fixed_size; #define UCLOGIC_RDESC_TWHL850_ORIG2_SIZE 92 /* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */ -extern __u8 uclogic_rdesc_twhl850_fixed0_arr[]; +extern const __u8 uclogic_rdesc_twhl850_fixed0_arr[]; extern const size_t uclogic_rdesc_twhl850_fixed0_size; /* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */ -extern __u8 uclogic_rdesc_twhl850_fixed1_arr[]; +extern const __u8 uclogic_rdesc_twhl850_fixed1_arr[]; extern const size_t uclogic_rdesc_twhl850_fixed1_size; /* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */ -extern __u8 uclogic_rdesc_twhl850_fixed2_arr[]; +extern const __u8 uclogic_rdesc_twhl850_fixed2_arr[]; extern const size_t uclogic_rdesc_twhl850_fixed2_size; /* Size of the original descriptors of TWHA60 tablet */ @@ -73,11 +73,11 @@ extern const size_t uclogic_rdesc_twhl850_fixed2_size; #define UCLOGIC_RDESC_TWHA60_ORIG1_SIZE 139 /* Fixed TWHA60 report descriptor, interface 0 (stylus) */ -extern __u8 uclogic_rdesc_twha60_fixed0_arr[]; +extern const __u8 uclogic_rdesc_twha60_fixed0_arr[]; extern const size_t uclogic_rdesc_twha60_fixed0_size; /* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */ -extern __u8 uclogic_rdesc_twha60_fixed1_arr[]; +extern const __u8 uclogic_rdesc_twha60_fixed1_arr[]; extern const size_t uclogic_rdesc_twha60_fixed1_size; /* Report descriptor template placeholder head */ diff --git a/drivers/hid/hid-viewsonic.c b/drivers/hid/hid-viewsonic.c index 668c2adb77b672..532bed88bdf8a2 100644 --- a/drivers/hid/hid-viewsonic.c +++ b/drivers/hid/hid-viewsonic.c @@ -22,7 +22,7 @@ #define PD1011_RDESC_ORIG_SIZE 408 /* Fixed report descriptor of PD1011 signature pad */ -static __u8 pd1011_rdesc_fixed[] = { +static const __u8 pd1011_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ @@ -70,15 +70,15 @@ static __u8 pd1011_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 *viewsonic_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *viewsonic_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_VIEWSONIC_PD1011: case USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011: if (*rsize == PD1011_RDESC_ORIG_SIZE) { - rdesc = pd1011_rdesc_fixed; *rsize = sizeof(pd1011_rdesc_fixed); + return pd1011_rdesc_fixed; } break; } diff --git a/drivers/hid/hid-vrc2.c b/drivers/hid/hid-vrc2.c index 80a2b7ef5e66a0..7dc41e92f48879 100644 --- a/drivers/hid/hid-vrc2.c +++ b/drivers/hid/hid-vrc2.c @@ -16,7 +16,7 @@ #define USB_VENDOR_ID_VRC2 (0x07c0) #define USB_DEVICE_ID_VRC2 (0x1125) -static __u8 vrc2_rdesc_fixed[] = { +static const __u8 vrc2_rdesc_fixed[] = { 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) 0x09, 0x04, // Usage (Joystick) 0xA1, 0x01, // Collection (Application) @@ -38,8 +38,8 @@ static __u8 vrc2_rdesc_fixed[] = { 0xC0, // End Collection }; -static __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { hid_info(hdev, "fixing up VRC-2 report descriptor\n"); *rsize = sizeof(vrc2_rdesc_fixed); diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c index 1e590c61eef532..be34be27d4d512 100644 --- a/drivers/hid/hid-waltop.c +++ b/drivers/hid/hid-waltop.c @@ -43,7 +43,7 @@ #define SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE 222 /* Fixed Slim Tablet 5.8 inch descriptor */ -static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = { +static const __u8 slim_tablet_5_8_inch_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -94,7 +94,7 @@ static __u8 slim_tablet_5_8_inch_rdesc_fixed[] = { #define SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE 269 /* Fixed Slim Tablet 12.1 inch descriptor */ -static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = { +static const __u8 slim_tablet_12_1_inch_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -145,7 +145,7 @@ static __u8 slim_tablet_12_1_inch_rdesc_fixed[] = { #define Q_PAD_RDESC_ORIG_SIZE 241 /* Fixed Q Pad descriptor */ -static __u8 q_pad_rdesc_fixed[] = { +static const __u8 q_pad_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -198,7 +198,7 @@ static __u8 q_pad_rdesc_fixed[] = { /* * Fixed report descriptor for tablet with PID 0038. */ -static __u8 pid_0038_rdesc_fixed[] = { +static const __u8 pid_0038_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -249,7 +249,7 @@ static __u8 pid_0038_rdesc_fixed[] = { #define MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE 300 /* Fixed Media Tablet 10.6 inch descriptor */ -static __u8 media_tablet_10_6_inch_rdesc_fixed[] = { +static const __u8 media_tablet_10_6_inch_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -362,7 +362,7 @@ static __u8 media_tablet_10_6_inch_rdesc_fixed[] = { #define MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE 309 /* Fixed Media Tablet 14.1 inch descriptor */ -static __u8 media_tablet_14_1_inch_rdesc_fixed[] = { +static const __u8 media_tablet_14_1_inch_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -473,7 +473,7 @@ static __u8 media_tablet_14_1_inch_rdesc_fixed[] = { #define SIRIUS_BATTERY_FREE_TABLET_RDESC_ORIG_SIZE 335 /* Fixed Sirius Battery Free Tablet descriptor */ -static __u8 sirius_battery_free_tablet_rdesc_fixed[] = { +static const __u8 sirius_battery_free_tablet_rdesc_fixed[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ @@ -599,50 +599,50 @@ static __u8 sirius_battery_free_tablet_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH: if (*rsize == SLIM_TABLET_5_8_INCH_RDESC_ORIG_SIZE) { - rdesc = slim_tablet_5_8_inch_rdesc_fixed; *rsize = sizeof(slim_tablet_5_8_inch_rdesc_fixed); + return slim_tablet_5_8_inch_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH: if (*rsize == SLIM_TABLET_12_1_INCH_RDESC_ORIG_SIZE) { - rdesc = slim_tablet_12_1_inch_rdesc_fixed; *rsize = sizeof(slim_tablet_12_1_inch_rdesc_fixed); + return slim_tablet_12_1_inch_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_Q_PAD: if (*rsize == Q_PAD_RDESC_ORIG_SIZE) { - rdesc = q_pad_rdesc_fixed; *rsize = sizeof(q_pad_rdesc_fixed); + return q_pad_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_PID_0038: if (*rsize == PID_0038_RDESC_ORIG_SIZE) { - rdesc = pid_0038_rdesc_fixed; *rsize = sizeof(pid_0038_rdesc_fixed); + return pid_0038_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH: if (*rsize == MEDIA_TABLET_10_6_INCH_RDESC_ORIG_SIZE) { - rdesc = media_tablet_10_6_inch_rdesc_fixed; *rsize = sizeof(media_tablet_10_6_inch_rdesc_fixed); + return media_tablet_10_6_inch_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH: if (*rsize == MEDIA_TABLET_14_1_INCH_RDESC_ORIG_SIZE) { - rdesc = media_tablet_14_1_inch_rdesc_fixed; *rsize = sizeof(media_tablet_14_1_inch_rdesc_fixed); + return media_tablet_14_1_inch_rdesc_fixed; } break; case USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET: if (*rsize == SIRIUS_BATTERY_FREE_TABLET_RDESC_ORIG_SIZE) { - rdesc = sirius_battery_free_tablet_rdesc_fixed; *rsize = sizeof(sirius_battery_free_tablet_rdesc_fixed); + return sirius_battery_free_tablet_rdesc_fixed; } break; } diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c index 10a5d87ccb96c9..831b760c66ea72 100644 --- a/drivers/hid/hid-winwing.c +++ b/drivers/hid/hid-winwing.c @@ -27,7 +27,7 @@ struct winwing_led_info { const char *led_name; }; -static struct winwing_led_info led_info[3] = { +static const struct winwing_led_info led_info[3] = { { 0, 255, "backlight" }, { 1, 1, "a-a" }, { 2, 1, "a-g" }, @@ -94,7 +94,7 @@ static int winwing_init_led(struct hid_device *hdev, return -ENOMEM; for (i = 0; i < 3; i += 1) { - struct winwing_led_info *info = &led_info[i]; + const struct winwing_led_info *info = &led_info[i]; led = &data->leds[i]; led->hdev = hdev; @@ -150,7 +150,7 @@ static int winwing_input_configured(struct hid_device *hdev, return ret; } -static __u8 original_rdesc_buttons[] = { +static const __u8 original_rdesc_buttons[] = { 0x05, 0x09, 0x19, 0x01, 0x29, 0x6F, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45, 0x01, 0x75, 0x01, 0x95, 0x6F, @@ -165,7 +165,7 @@ static __u8 original_rdesc_buttons[] = { * This module skips numbers 32-63, unused on some throttle grips. */ -static __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { int sig_length = sizeof(original_rdesc_buttons); diff --git a/drivers/hid/hid-xiaomi.c b/drivers/hid/hid-xiaomi.c index a97a90afad3381..ef6598550a40f2 100644 --- a/drivers/hid/hid-xiaomi.c +++ b/drivers/hid/hid-xiaomi.c @@ -14,7 +14,7 @@ /* Fixed Mi Silent Mouse report descriptor */ /* Button's Usage Maximum changed from 3 to 5 to make side buttons work */ #define MI_SILENT_MOUSE_ORIG_RDESC_LENGTH 87 -static __u8 mi_silent_mouse_rdesc_fixed[] = { +static const __u8 mi_silent_mouse_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ @@ -61,15 +61,15 @@ static __u8 mi_silent_mouse_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static __u8 *xiaomi_report_fixup(struct hid_device *hdev, __u8 *rdesc, - unsigned int *rsize) +static const __u8 *xiaomi_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_MI_SILENT_MOUSE: if (*rsize == MI_SILENT_MOUSE_ORIG_RDESC_LENGTH) { hid_info(hdev, "fixing up Mi Silent Mouse report descriptor\n"); - rdesc = mi_silent_mouse_rdesc_fixed; *rsize = sizeof(mi_silent_mouse_rdesc_fixed); + return mi_silent_mouse_rdesc_fixed; } break; } diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c index 998a3db19c1f7b..3bdb26f4559257 100644 --- a/drivers/hid/hid-zydacron.c +++ b/drivers/hid/hid-zydacron.c @@ -24,7 +24,7 @@ struct zc_device { * Zydacron remote control has an invalid HID report descriptor, * that needs fixing before we can parse it. */ -static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static const __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 253 && diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 716294e40e8ac4..c887f48756f4be 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -38,12 +38,20 @@ static const struct class hidraw_class = { static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DECLARE_RWSEM(minors_rwsem); +static inline bool hidraw_is_revoked(struct hidraw_list *list) +{ + return list->revoked; +} + static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); + if (hidraw_is_revoked(list)) + return -ENODEV; + mutex_lock(&list->read_mutex); while (ret == 0) { @@ -161,9 +169,13 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { + struct hidraw_list *list = file->private_data; ssize_t ret; down_read(&minors_rwsem); - ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); + if (hidraw_is_revoked(list)) + ret = -ENODEV; + else + ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); up_read(&minors_rwsem); return ret; } @@ -256,7 +268,7 @@ static __poll_t hidraw_poll(struct file *file, poll_table *wait) poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) mask |= EPOLLIN | EPOLLRDNORM; - if (!list->hidraw->exist) + if (!list->hidraw->exist || hidraw_is_revoked(list)) mask |= EPOLLERR | EPOLLHUP; return mask; } @@ -320,6 +332,9 @@ static int hidraw_fasync(int fd, struct file *file, int on) { struct hidraw_list *list = file->private_data; + if (hidraw_is_revoked(list)) + return -ENODEV; + return fasync_helper(fd, file, on, &list->fasync); } @@ -372,6 +387,13 @@ static int hidraw_release(struct inode * inode, struct file * file) return 0; } +static int hidraw_revoke(struct hidraw_list *list) +{ + list->revoked = true; + + return 0; +} + static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -379,11 +401,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned int minor = iminor(inode); long ret = 0; struct hidraw *dev; + struct hidraw_list *list = file->private_data; void __user *user_arg = (void __user*) arg; down_read(&minors_rwsem); dev = hidraw_table[minor]; - if (!dev || !dev->exist) { + if (!dev || !dev->exist || hidraw_is_revoked(list)) { ret = -ENODEV; goto out; } @@ -421,6 +444,14 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, ret = -EFAULT; break; } + case HIDIOCREVOKE: + { + if (user_arg) + ret = -EINVAL; + else + ret = hidraw_revoke(list); + break; + } default: { struct hid_device *hid = dev->hid; @@ -527,7 +558,7 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len) list_for_each_entry(list, &dev->list, node) { int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); - if (new_head == list->tail) + if (hidraw_is_revoked(list) || new_head == list->tail) continue; if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 632eaf9e11a6b6..be5d342d5d13c0 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include @@ -105,6 +105,7 @@ struct i2c_hid { wait_queue_head_t wait; /* For waiting the interrupt */ + struct mutex cmd_lock; /* protects cmdbuf and rawbuf */ struct mutex reset_lock; struct i2chid_ops *ops; @@ -220,6 +221,8 @@ static int i2c_hid_xfer(struct i2c_hid *ihid, static int i2c_hid_read_register(struct i2c_hid *ihid, __le16 reg, void *buf, size_t len) { + guard(mutex)(&ihid->cmd_lock); + *(__le16 *)ihid->cmdbuf = reg; return i2c_hid_xfer(ihid, ihid->cmdbuf, sizeof(__le16), buf, len); @@ -252,6 +255,8 @@ static int i2c_hid_get_report(struct i2c_hid *ihid, i2c_hid_dbg(ihid, "%s\n", __func__); + guard(mutex)(&ihid->cmd_lock); + /* Command register goes first */ *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister; length += sizeof(__le16); @@ -342,6 +347,8 @@ static int i2c_hid_set_or_send_report(struct i2c_hid *ihid, if (!do_set && le16_to_cpu(ihid->hdesc.wMaxOutputLength) == 0) return -ENOSYS; + guard(mutex)(&ihid->cmd_lock); + if (do_set) { /* Command register goes first */ *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister; @@ -384,6 +391,8 @@ static int i2c_hid_set_power_command(struct i2c_hid *ihid, int power_state) { size_t length; + guard(mutex)(&ihid->cmd_lock); + /* SET_POWER uses command register */ *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister; length = sizeof(__le16); @@ -440,25 +449,27 @@ static int i2c_hid_start_hwreset(struct i2c_hid *ihid) if (ret) return ret; - /* Prepare reset command. Command register goes first. */ - *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister; - length += sizeof(__le16); - /* Next is RESET command itself */ - length += i2c_hid_encode_command(ihid->cmdbuf + length, - I2C_HID_OPCODE_RESET, 0, 0); + scoped_guard(mutex, &ihid->cmd_lock) { + /* Prepare reset command. Command register goes first. */ + *(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister; + length += sizeof(__le16); + /* Next is RESET command itself */ + length += i2c_hid_encode_command(ihid->cmdbuf + length, + I2C_HID_OPCODE_RESET, 0, 0); - set_bit(I2C_HID_RESET_PENDING, &ihid->flags); + set_bit(I2C_HID_RESET_PENDING, &ihid->flags); - ret = i2c_hid_xfer(ihid, ihid->cmdbuf, length, NULL, 0); - if (ret) { - dev_err(&ihid->client->dev, - "failed to reset device: %d\n", ret); - goto err_clear_reset; - } + ret = i2c_hid_xfer(ihid, ihid->cmdbuf, length, NULL, 0); + if (ret) { + dev_err(&ihid->client->dev, + "failed to reset device: %d\n", ret); + break; + } - return 0; + return 0; + } -err_clear_reset: + /* Clean up if sending reset command failed */ clear_bit(I2C_HID_RESET_PENDING, &ihid->flags); i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP); return ret; @@ -1200,6 +1211,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, ihid->is_panel_follower = drm_is_panel_follower(&client->dev); init_waitqueue_head(&ihid->wait); + mutex_init(&ihid->cmd_lock); mutex_init(&ihid->reset_lock); INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work); diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c index 091e37933225ad..3fcff6daa0d3a6 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c @@ -152,6 +152,13 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = { .main_supply_name = "vcc33", }; +static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = { + .post_power_delay_ms = 10, + .post_gpio_reset_on_delay_ms = 300, + .hid_descriptor_address = 0x0001, + .main_supply_name = "vcc33", +}; + static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = { .post_power_delay_ms = 1, .post_gpio_reset_on_delay_ms = 200, @@ -174,6 +181,7 @@ static const struct elan_i2c_hid_chip_data ilitek_ili2901_chip_data = { static const struct of_device_id elan_i2c_hid_of_match[] = { { .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data }, + { .compatible = "elan,ekth6a12nay", .data = &elan_ekth6a12nay_chip_data }, { .compatible = "ilitek,ili9882t", .data = &ilitek_ili9882t_chip_data }, { .compatible = "ilitek,ili2901", .data = &ilitek_ili2901_chip_data }, { } diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index f82428d7f6c3d7..aae0d965b47b5e 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -28,11 +28,14 @@ enum ishtp_driver_data_index { ISHTP_DRIVER_DATA_LNL_M, }; -#define ISH_FW_FILENAME_LNL_M "intel/ish/ish_lnlm.bin" +#define ISH_FW_GEN_LNL_M "lnlm" + +#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin" +#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin" static struct ishtp_driver_data ishtp_driver_data[] = { [ISHTP_DRIVER_DATA_LNL_M] = { - .fw_filename = ISH_FW_FILENAME_LNL_M, + .fw_generation = ISH_FW_GEN_LNL_M, }, }; @@ -397,4 +400,5 @@ MODULE_AUTHOR("Srinivas Pandruvada "); MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver"); MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(ISH_FW_FILENAME_LNL_M); +MODULE_FIRMWARE(ISH_FIRMWARE_PATH(ISH_FW_GEN_LNL_M)); +MODULE_FIRMWARE(ISH_FIRMWARE_PATH_ALL); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h index 5bb85c932e4cab..53645ac89ee8e2 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.h +++ b/drivers/hid/intel-ish-hid/ishtp/bus.h @@ -46,7 +46,6 @@ struct ishtp_cl_device { }; int ishtp_bus_new_client(struct ishtp_device *dev); -void ishtp_remove_all_clients(struct ishtp_device *dev); int ishtp_cl_device_bind(struct ishtp_cl *cl); void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device); diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h index fc62dd1495dae4..d9d398fadcf7e0 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.h +++ b/drivers/hid/intel-ish-hid/ishtp/client.h @@ -109,7 +109,6 @@ struct ishtp_cl { }; /* Client connection managenment internal functions */ -int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, guid_t *uuid); int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id); void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl); void recv_ishtp_cl_msg(struct ishtp_device *dev, diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h index 181838c3d7ac9c..cdacce0a4c9d7d 100644 --- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h +++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h @@ -129,13 +129,15 @@ struct ishtp_hw_ops { * ISHTP device instance. It allows for the storage of data that is unique to * a particular driver or hardware variant. * - * @fw_filename: The firmware filename associated with a specific hardware + * @fw_generation: The generation name associated with a specific hardware * variant of the Intel Integrated Sensor Hub (ISH). This allows * the driver to load the correct firmware based on the device's - * hardware variant. + * hardware variant. For example, "lnlm" for the Lunar Lake-M + * platform. The generation name must not exceed 8 characters + * in length. */ struct ishtp_driver_data { - char *fw_filename; + char *fw_generation; }; /** diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c index fcca070bdecbe9..f76c4437a1f5f1 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.c +++ b/drivers/hid/intel-ish-hid/ishtp/loader.c @@ -35,14 +35,17 @@ #include #include +#include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -192,6 +195,119 @@ static int prepare_dma_bufs(struct ishtp_device *dev, return 0; } +#define ISH_FW_FILE_VENDOR_NAME_SKU_FMT "intel/ish/ish_%s_%08x_%08x_%08x.bin" +#define ISH_FW_FILE_VENDOR_SKU_FMT "intel/ish/ish_%s_%08x_%08x.bin" +#define ISH_FW_FILE_VENDOR_NAME_FMT "intel/ish/ish_%s_%08x_%08x.bin" +#define ISH_FW_FILE_VENDOR_FMT "intel/ish/ish_%s_%08x.bin" +#define ISH_FW_FILE_DEFAULT_FMT "intel/ish/ish_%s.bin" + +#define ISH_FW_FILENAME_LEN_MAX 56 + +#define ISH_CRC_INIT (~0u) +#define ISH_CRC_XOROUT (~0u) + +static int _request_ish_firmware(const struct firmware **firmware_p, + const char *name, struct device *dev) +{ + int ret; + + dev_dbg(dev, "Try to load firmware: %s\n", name); + ret = firmware_request_nowarn(firmware_p, name, dev); + if (!ret) + dev_info(dev, "load firmware: %s\n", name); + + return ret; +} + +/** + * request_ish_firmware() - Request and load the ISH firmware. + * @firmware_p: Pointer to the firmware image. + * @dev: Device for which firmware is being requested. + * + * This function attempts to load the Integrated Sensor Hub (ISH) firmware + * for the given device in the following order, prioritizing custom firmware + * with more precise matching patterns: + * + * ish_${fw_generation}_${SYS_VENDOR_CRC32}_$(PRODUCT_NAME_CRC32)_${PRODUCT_SKU_CRC32}.bin + * ish_${fw_generation}_${SYS_VENDOR_CRC32}_${PRODUCT_SKU_CRC32}.bin + * ish_${fw_generation}_${SYS_VENDOR_CRC32}_$(PRODUCT_NAME_CRC32).bin + * ish_${fw_generation}_${SYS_VENDOR_CRC32}.bin + * ish_${fw_generation}.bin + * + * The driver will load the first matching firmware and skip the rest. If no + * matching firmware is found, it will proceed to the next pattern in the + * specified order. If all searches fail, the default Intel firmware, listed + * last in the order above, will be loaded. + * + * The firmware file name is constructed using CRC32 checksums of strings. + * This is done to create a valid file name that does not contain spaces + * or special characters which may be present in the original strings. + * + * The CRC-32 algorithm uses the following parameters: + * Poly: 0x04C11DB7 + * Init: 0xFFFFFFFF + * RefIn: true + * RefOut: true + * XorOut: 0xFFFFFFFF + * + * Return: 0 on success, negative error code on failure. + */ +static int request_ish_firmware(const struct firmware **firmware_p, + struct device *dev) +{ + const char *gen, *sys_vendor, *product_name, *product_sku; + struct ishtp_device *ishtp = dev_get_drvdata(dev); + u32 vendor_crc, name_crc, sku_crc; + char filename[ISH_FW_FILENAME_LEN_MAX]; + int ret; + + gen = ishtp->driver_data->fw_generation; + sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + product_sku = dmi_get_system_info(DMI_PRODUCT_SKU); + + if (sys_vendor) + vendor_crc = crc32(ISH_CRC_INIT, sys_vendor, strlen(sys_vendor)) ^ ISH_CRC_XOROUT; + if (product_name) + name_crc = crc32(ISH_CRC_INIT, product_name, strlen(product_name)) ^ ISH_CRC_XOROUT; + if (product_sku) + sku_crc = crc32(ISH_CRC_INIT, product_sku, strlen(product_sku)) ^ ISH_CRC_XOROUT; + + if (sys_vendor && product_name && product_sku) { + snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_NAME_SKU_FMT, gen, + vendor_crc, name_crc, sku_crc); + ret = _request_ish_firmware(firmware_p, filename, dev); + if (!ret) + return 0; + } + + if (sys_vendor && product_sku) { + snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_SKU_FMT, gen, vendor_crc, + sku_crc); + ret = _request_ish_firmware(firmware_p, filename, dev); + if (!ret) + return 0; + } + + if (sys_vendor && product_name) { + snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_NAME_FMT, gen, vendor_crc, + name_crc); + ret = _request_ish_firmware(firmware_p, filename, dev); + if (!ret) + return 0; + } + + if (sys_vendor) { + snprintf(filename, sizeof(filename), ISH_FW_FILE_VENDOR_FMT, gen, vendor_crc); + ret = _request_ish_firmware(firmware_p, filename, dev); + if (!ret) + return 0; + } + + snprintf(filename, sizeof(filename), ISH_FW_FILE_DEFAULT_FMT, gen); + return _request_ish_firmware(firmware_p, filename, dev); +} + /** * ishtp_loader_work() - Load the ISHTP firmware * @work: The work structure @@ -220,7 +336,6 @@ void ishtp_loader_work(struct work_struct *work) struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), }; struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), }; union loader_recv_message recv_msg; - char *filename = dev->driver_data->fw_filename; const struct firmware *ish_fw; void *dma_bufs[FRAGMENT_MAX_NUM] = {}; u32 fragment_size; @@ -228,9 +343,9 @@ void ishtp_loader_work(struct work_struct *work) int retry = ISHTP_LOADER_RETRY_TIMES; int rv; - rv = request_firmware(&ish_fw, filename, dev->devc); + rv = request_ish_firmware(&ish_fw, dev->devc); if (rv < 0) { - dev_err(dev->devc, "request firmware %s failed:%d\n", filename, rv); + dev_err(dev->devc, "request ISH firmware failed:%d\n", rv); return; } diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c index 61e5814b0ad7d4..eae47e0d95ed69 100644 --- a/drivers/hid/surface-hid/surface_hid.c +++ b/drivers/hid/surface-hid/surface_hid.c @@ -8,7 +8,7 @@ * Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c index a3e9cceddfacf3..6690c24f28f0cb 100644 --- a/drivers/hid/surface-hid/surface_hid_core.c +++ b/drivers/hid/surface-hid/surface_hid_core.c @@ -7,7 +7,7 @@ * Copyright (C) 2019-2021 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c index 8c0cbb2deb11a9..383200d9121ae8 100644 --- a/drivers/hid/surface-hid/surface_kbd.c +++ b/drivers/hid/surface-hid/surface_kbd.c @@ -7,7 +7,7 @@ * Copyright (C) 2019-2021 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index a54c7995b9be09..21a70420151e47 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -803,7 +803,6 @@ static const struct file_operations uhid_fops = { .read = uhid_char_read, .write = uhid_char_write, .poll = uhid_char_poll, - .llseek = no_llseek, }; static struct miscdevice uhid_misc = { diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index cb687ea7325c19..a9e85bdd4cc656 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 77c5fb26cd14c2..6f1443999d1d9b 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -89,7 +89,7 @@ #include #include #include -#include +#include /* * Version Information diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 2541fa2e0fa3b1..59a13ad9371cd7 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1906,11 +1906,12 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, if ((code == ABS_X || code == ABS_Y) && !resolution) { resolution = WACOM_INTUOS_RES; hid_warn(input, - "Wacom usage (%d) missing resolution \n", - code); + "Using default resolution for axis type 0x%x code 0x%x\n", + type, code); } input_abs_set_res(input, code, resolution); break; + case EV_REL: case EV_KEY: case EV_MSC: case EV_SW: @@ -2047,7 +2048,23 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev, features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHRING: - wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); + if (field->flags & HID_MAIN_ITEM_RELATIVE) { + wacom_wac->relring_count++; + if (wacom_wac->relring_count == 1) { + wacom_map_usage(input, usage, field, EV_REL, REL_WHEEL_HI_RES, 0); + set_bit(REL_WHEEL, input->relbit); + } + else if (wacom_wac->relring_count == 2) { + wacom_map_usage(input, usage, field, EV_REL, REL_HWHEEL_HI_RES, 0); + set_bit(REL_HWHEEL, input->relbit); + } + } else { + wacom_wac->absring_count++; + if (wacom_wac->absring_count == 1) + wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); + else if (wacom_wac->absring_count == 2) + wacom_map_usage(input, usage, field, EV_ABS, ABS_THROTTLE, 0); + } features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHRINGSTATUS: @@ -2112,7 +2129,10 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field return; if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) { - if (usage->hid != WACOM_HID_WD_TOUCHRING) + bool is_abs_touchring = usage->hid == WACOM_HID_WD_TOUCHRING && + !(field->flags & HID_MAIN_ITEM_RELATIVE); + + if (!is_abs_touchring) wacom_wac->hid_data.inrange_state |= value; } @@ -2165,6 +2185,52 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } + else if (field->flags & HID_MAIN_ITEM_RELATIVE) { + int hires_value = value * 120 / usage->resolution_multiplier; + int *ring_value; + int lowres_code; + + if (usage->code == REL_WHEEL_HI_RES) { + /* We must invert the sign for vertical + * relative scrolling. Clockwise + * rotation produces positive values + * from HW, but userspace treats + * positive REL_WHEEL as a scroll *up*! + */ + hires_value = -hires_value; + ring_value = &wacom_wac->hid_data.ring_value; + lowres_code = REL_WHEEL; + } + else if (usage->code == REL_HWHEEL_HI_RES) { + /* No need to invert the sign for + * horizontal relative scrolling. + * Clockwise rotation produces positive + * values from HW and userspace treats + * positive REL_HWHEEL as a scroll + * right. + */ + ring_value = &wacom_wac->hid_data.ring2_value; + lowres_code = REL_HWHEEL; + } + else { + hid_err(wacom->hdev, "unrecognized relative wheel with code %d\n", + usage->code); + break; + } + + value = hires_value; + *ring_value += hires_value; + + /* Emulate a legacy wheel click for every 120 + * units of hi-res travel. + */ + if (*ring_value >= 120 || *ring_value <= -120) { + int clicks = *ring_value / 120; + + input_event(input, usage->type, lowres_code, clicks); + *ring_value -= clicks * 120; + } + } else { value = wacom_offset_rotation(input, usage, value, 1, 4); } @@ -2322,6 +2388,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0); features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3; break; + case WACOM_HID_WD_SEQUENCENUMBER: + wacom_wac->hid_data.sequence_number = -1; + break; } } @@ -2446,9 +2515,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field wacom_wac->hid_data.barrelswitch3 = value; return; case WACOM_HID_WD_SEQUENCENUMBER: - if (wacom_wac->hid_data.sequence_number != value) - hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number)); + if (wacom_wac->hid_data.sequence_number != value && + wacom_wac->hid_data.sequence_number >= 0) { + int sequence_size = field->logical_maximum - field->logical_minimum + 1; + int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size; + hid_warn(hdev, "Dropped %d packets", drop_count); + } wacom_wac->hid_data.sequence_number = value + 1; + if (wacom_wac->hid_data.sequence_number > field->logical_maximum) + wacom_wac->hid_data.sequence_number = field->logical_minimum; return; } diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 6ec499841f7095..c8803d5c6a71e7 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -312,6 +312,8 @@ struct hid_data { int width; int height; int id; + int ring_value; + int ring2_value; int cc_report; int cc_index; int cc_value_index; @@ -324,7 +326,7 @@ struct hid_data { int bat_connected; int ps_connected; bool pad_input_event_flag; - unsigned short sequence_number; + int sequence_number; ktime_t time_delayed; }; @@ -355,6 +357,8 @@ struct wacom_wac { int num_contacts_left; u8 bt_features; u8 bt_high_speed; + u8 absring_count; + u8 relring_count; int mode_report; int mode_value; struct hid_data hid_data; diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 3140990a61644e..15cb759151e669 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -116,22 +116,13 @@ static int ssi_debug_add_ctrl(struct hsi_controller *ssi) /* SSI controller */ omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); - if (!omap_ssi->dir) - return -ENOMEM; + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, &ssi_regs_fops); - debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, - &ssi_regs_fops); /* SSI GDD (DMA) */ dir = debugfs_create_dir("gdd", omap_ssi->dir); - if (!dir) - goto rback; debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); return 0; -rback: - debugfs_remove_recursive(omap_ssi->dir); - - return -ENOMEM; } static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 9c452bfbd5719d..7a35c82976e0ff 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -207,13 +207,13 @@ static int hv_die_panic_notify_crash(struct notifier_block *self, * buffer and call into Hyper-V to transfer the data. */ static void hv_kmsg_dump(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { struct kmsg_dump_iter iter; size_t bytes_written; /* We are only interested in panics. */ - if (reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) + if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) return; /* diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 965d2a4efb7e8a..9b15f7daf50597 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1306,6 +1306,13 @@ static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static void vmbus_percpu_work(struct work_struct *work) +{ + unsigned int cpu = smp_processor_id(); + + hv_synic_init(cpu); +} + /* * vmbus_bus_init -Main vmbus driver initialization routine. * @@ -1316,7 +1323,8 @@ static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id) */ static int vmbus_bus_init(void) { - int ret; + int ret, cpu; + struct work_struct __percpu *works; ret = hv_init(); if (ret != 0) { @@ -1355,12 +1363,32 @@ static int vmbus_bus_init(void) if (ret) goto err_alloc; + works = alloc_percpu(struct work_struct); + if (!works) { + ret = -ENOMEM; + goto err_alloc; + } + /* * Initialize the per-cpu interrupt state and stimer state. * Then connect to the host. */ - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", - hv_synic_init, hv_synic_cleanup); + cpus_read_lock(); + for_each_online_cpu(cpu) { + struct work_struct *work = per_cpu_ptr(works, cpu); + + INIT_WORK(work, vmbus_percpu_work); + schedule_work_on(cpu, work); + } + + for_each_online_cpu(cpu) + flush_work(per_cpu_ptr(works, cpu)); + + /* Register the callbacks for possible CPU online/offline'ing */ + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", + hv_synic_init, hv_synic_cleanup); + cpus_read_unlock(); + free_percpu(works); if (ret < 0) goto err_alloc; hyperv_cpuhp_online = ret; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index b60fe2e58ad6cb..65ea92529406d6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1511,9 +1511,10 @@ config SENSORS_LM90 config SENSORS_LM92 tristate "National Semiconductor LM92 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for National Semiconductor LM92 - and Maxim MAX6635 sensor chips. + and LM76 as well as Maxim MAX6633/6634/6635 sensor chips. This driver can also be built as a module. If so, the module will be called lm92. @@ -1532,6 +1533,7 @@ config SENSORS_LM93 config SENSORS_LM95234 tristate "National Semiconductor LM95234 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the LM95233 and LM95234 temperature sensor chips. @@ -2066,6 +2068,17 @@ config SENSORS_SFCTEMP This driver can also be built as a module. If so, the module will be called sfctemp. +config SENSORS_SG2042_MCU + tristate "Sophgo onboard MCU support" + depends on I2C + depends on ARCH_SOPHGO || COMPILE_TEST + help + Support for onboard MCU of Sophgo SG2042 SoCs. This mcu provides + power control and some basic information. + + This driver can be built as a module. If so, the module + will be called sg2042-mcu. + config SENSORS_SURFACE_FAN tristate "Surface Fan Driver" depends on SURFACE_AGGREGATOR @@ -2080,6 +2093,17 @@ config SENSORS_SURFACE_FAN Select M or Y here, if you want to be able to read the fan's speed. +config SENSORS_SURFACE_TEMP + tristate "Microsoft Surface Thermal Sensor Driver" + depends on SURFACE_AGGREGATOR + depends on SURFACE_AGGREGATOR_BUS + help + Driver for monitoring thermal sensors connected via the Surface + Aggregator Module (embedded controller) on Microsoft Surface devices. + + This driver can also be built as a module. If so, the module + will be called surface_temp. + config SENSORS_ADC128D818 tristate "Texas Instruments ADC128D818" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index b1c7056c37db59..9554d2fdcf7bb5 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -194,6 +194,7 @@ obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o obj-$(CONFIG_SENSORS_SFCTEMP) += sfctemp.o +obj-$(CONFIG_SENSORS_SG2042_MCU) += sg2042-mcu.o obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SHT21) += sht21.o @@ -209,6 +210,7 @@ obj-$(CONFIG_SENSORS_SPARX5) += sparx5-temp.o obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o obj-$(CONFIG_SENSORS_STTS751) += stts751.o obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o +obj-$(CONFIG_SENSORS_SURFACE_TEMP)+= surface_temp.o obj-$(CONFIG_SENSORS_SY7636A) += sy7636a-hwmon.o obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o obj-$(CONFIG_SENSORS_TC74) += tc74.o diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c index 25281739aa3b16..6a834a37bc65d7 100644 --- a/drivers/hwmon/adt7310.c +++ b/drivers/hwmon/adt7310.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "adt7x10.h" diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 517248d2994e1c..dbee6926fa0555 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -728,30 +728,22 @@ static const int adt7470_freq_map[] = { static int pwm1_freq_get(struct device *dev) { struct adt7470_data *data = dev_get_drvdata(dev); - unsigned int cfg_reg_1, cfg_reg_2; + unsigned int regs[2] = {ADT7470_REG_CFG, ADT7470_REG_CFG_2}; + u8 cfg_reg[2]; int index; int err; - mutex_lock(&data->lock); - err = regmap_read(data->regmap, ADT7470_REG_CFG, &cfg_reg_1); - if (err < 0) - goto out; - err = regmap_read(data->regmap, ADT7470_REG_CFG_2, &cfg_reg_2); - if (err < 0) - goto out; - mutex_unlock(&data->lock); + err = regmap_multi_reg_read(data->regmap, regs, cfg_reg, 2); + if (err) + return err; - index = (cfg_reg_2 & ADT7470_FREQ_MASK) >> ADT7470_FREQ_SHIFT; - if (!(cfg_reg_1 & ADT7470_CFG_LF)) + index = (cfg_reg[1] & ADT7470_FREQ_MASK) >> ADT7470_FREQ_SHIFT; + if (!(cfg_reg[0] & ADT7470_CFG_LF)) index += 8; if (index >= ARRAY_SIZE(adt7470_freq_map)) index = ARRAY_SIZE(adt7470_freq_map) - 1; return adt7470_freq_map[index]; - -out: - mutex_unlock(&data->lock); - return err; } static int adt7470_pwm_read(struct device *dev, u32 attr, int channel, long *val) diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 382a2bb9168a50..ca466d12475a1e 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -21,6 +21,8 @@ #include #include +#include + /* Indexes for the sysfs hooks */ enum adt_sysfs_id { INPUT = 0, @@ -1662,6 +1664,130 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client) return 0; } +struct adt7475_pwm_config { + int index; + int freq; + int flags; + int duty; +}; + +static int _adt7475_pwm_properties_parse_args(u32 args[4], struct adt7475_pwm_config *cfg) +{ + int freq_hz; + int duty; + + if (args[1] == 0) + return -EINVAL; + + freq_hz = 1000000000UL / args[1]; + if (args[3] >= args[1]) + duty = 255; + else + duty = div_u64(255ULL * args[3], args[1]); + + cfg->index = args[0]; + cfg->freq = find_closest(freq_hz, pwmfreq_table, ARRAY_SIZE(pwmfreq_table)); + cfg->flags = args[2]; + cfg->duty = duty; + + return 0; +} + +static int adt7475_pwm_properties_parse_reference_args(struct fwnode_handle *fwnode, + struct adt7475_pwm_config *cfg) +{ + int ret, i; + struct fwnode_reference_args rargs = {}; + u32 args[4] = {}; + + ret = fwnode_property_get_reference_args(fwnode, "pwms", "#pwm-cells", 0, 0, &rargs); + if (ret) + return ret; + + if (rargs.nargs != 4) { + fwnode_handle_put(rargs.fwnode); + return -EINVAL; + } + + for (i = 0; i < 4; i++) + args[i] = rargs.args[i]; + + ret = _adt7475_pwm_properties_parse_args(args, cfg); + + fwnode_handle_put(rargs.fwnode); + + return ret; +} + +static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode, + struct adt7475_pwm_config *cfg) +{ + int ret; + u32 args[4] = {}; + + ret = fwnode_property_read_u32_array(fwnode, "pwms", args, ARRAY_SIZE(args)); + if (ret) + return ret; + + return _adt7475_pwm_properties_parse_args(args, cfg); +} + +static int adt7475_fan_pwm_config(struct i2c_client *client) +{ + struct adt7475_data *data = i2c_get_clientdata(client); + struct fwnode_handle *child; + struct adt7475_pwm_config cfg = {}; + int ret; + + device_for_each_child_node(&client->dev, child) { + if (!fwnode_property_present(child, "pwms")) + continue; + + if (is_of_node(child)) + ret = adt7475_pwm_properties_parse_reference_args(child, &cfg); + else + ret = adt7475_pwm_properties_parse_args(child, &cfg); + + if (cfg.index >= ADT7475_PWM_COUNT) + return -EINVAL; + + ret = adt7475_read(PWM_CONFIG_REG(cfg.index)); + if (ret < 0) + return ret; + data->pwm[CONTROL][cfg.index] = ret; + if (cfg.flags & PWM_POLARITY_INVERTED) + data->pwm[CONTROL][cfg.index] |= BIT(4); + else + data->pwm[CONTROL][cfg.index] &= ~BIT(4); + + /* Force to manual mode so PWM values take effect */ + data->pwm[CONTROL][cfg.index] &= ~0xE0; + data->pwm[CONTROL][cfg.index] |= 0x07 << 5; + + ret = i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(cfg.index), + data->pwm[CONTROL][cfg.index]); + if (ret) + return ret; + + data->pwm[INPUT][cfg.index] = cfg.duty; + ret = i2c_smbus_write_byte_data(client, PWM_REG(cfg.index), + data->pwm[INPUT][cfg.index]); + if (ret) + return ret; + + data->range[cfg.index] = adt7475_read(TEMP_TRANGE_REG(cfg.index)); + data->range[cfg.index] &= ~0xf; + data->range[cfg.index] |= cfg.freq; + + ret = i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(cfg.index), + data->range[cfg.index]); + if (ret) + return ret; + } + + return 0; +} + static int adt7475_probe(struct i2c_client *client) { enum chips chip; @@ -1774,6 +1900,10 @@ static int adt7475_probe(struct i2c_client *client) if (ret && ret != -EINVAL) dev_warn(&client->dev, "Error configuring pwm polarity\n"); + ret = adt7475_fan_pwm_config(client); + if (ret) + dev_warn(&client->dev, "Error %d configuring fan/pwm\n", ret); + /* Start monitoring */ switch (chip) { case adt7475: diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c index 6701920de17f44..2d329391ed3fd6 100644 --- a/drivers/hwmon/adt7x10.c +++ b/drivers/hwmon/adt7x10.c @@ -170,21 +170,15 @@ static int adt7x10_temp_write(struct adt7x10_data *data, int index, long temp) static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val) { - int hyst, temp, ret; + unsigned int regs[2] = {ADT7X10_T_HYST, ADT7X10_REG_TEMP[index]}; + int hyst, ret; + u16 regdata[2]; - mutex_lock(&data->update_lock); - ret = regmap_read(data->regmap, ADT7X10_T_HYST, &hyst); - if (ret) { - mutex_unlock(&data->update_lock); - return ret; - } - - ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &temp); - mutex_unlock(&data->update_lock); + ret = regmap_multi_reg_read(data->regmap, regs, regdata, 2); if (ret) return ret; - hyst = (hyst & ADT7X10_T_HYST_MASK) * 1000; + hyst = (regdata[0] & ADT7X10_T_HYST_MASK) * 1000; /* * hysteresis is stored as a 4 bit offset in the device, convert it @@ -194,7 +188,7 @@ static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val) if (index == adt7x10_t_alarm_low) hyst = -hyst; - *val = ADT7X10_REG_TO_TEMP(data, temp) - hyst; + *val = ADT7X10_REG_TO_TEMP(data, regdata[1]) - hyst; return 0; } diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index ec94392fcb658f..ac64b407ed0eaf 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -136,29 +136,25 @@ struct amc6821_data { */ static int amc6821_get_auto_point_temps(struct regmap *regmap, int channel, u8 *temps) { - u32 pwm, regval; + u32 regs[] = { + AMC6821_REG_DCY_LOW_TEMP, + AMC6821_REG_PSV_TEMP, + channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL + }; + u8 regvals[3]; + int slope; int err; - err = regmap_read(regmap, AMC6821_REG_DCY_LOW_TEMP, &pwm); - if (err) - return err; - - err = regmap_read(regmap, AMC6821_REG_PSV_TEMP, ®val); - if (err) - return err; - temps[0] = regval; - - err = regmap_read(regmap, - channel ? AMC6821_REG_RTEMP_FAN_CTRL : AMC6821_REG_LTEMP_FAN_CTRL, - ®val); + err = regmap_multi_reg_read(regmap, regs, regvals, 3); if (err) return err; - temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regval) * 4; + temps[0] = regvals[1]; + temps[1] = FIELD_GET(AMC6821_TEMP_LIMIT_MASK, regvals[2]) * 4; /* slope is 32 >> in °C */ - regval = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regval); - if (regval) - temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - pwm, regval); + slope = 32 >> FIELD_GET(AMC6821_TEMP_SLOPE_MASK, regvals[2]); + if (slope) + temps[2] = temps[1] + DIV_ROUND_CLOSEST(255 - regvals[0], slope); else temps[2] = 255; diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c index 8e55cd2f46f53e..34cac27e4ddec3 100644 --- a/drivers/hwmon/aquacomputer_d5next.c +++ b/drivers/hwmon/aquacomputer_d5next.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define USB_VENDOR_ID_AQUACOMPUTER 0x0c70 #define USB_PRODUCT_ID_AQUAERO 0xf001 diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c index 08a2ded95e45c3..75eadda738ab67 100644 --- a/drivers/hwmon/aspeed-g6-pwm-tach.c +++ b/drivers/hwmon/aspeed-g6-pwm-tach.c @@ -456,7 +456,6 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev, *hwmon; int ret; - struct device_node *child; struct aspeed_pwm_tach_data *priv; struct pwm_chip *chip; @@ -498,10 +497,9 @@ static int aspeed_pwm_tach_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to add PWM chip\n"); - for_each_child_of_node(dev->of_node, child) { + for_each_child_of_node_scoped(dev->of_node, child) { ret = aspeed_create_fan_monitor(dev, child, priv); if (ret) { - of_node_put(child); dev_warn(dev, "Failed to create fan %d", ret); return 0; } diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 4acc1858d8acf7..aa159bf158a37f 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -907,7 +907,7 @@ static void aspeed_pwm_tacho_remove(void *data) static int aspeed_pwm_tacho_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np, *child; + struct device_node *np; struct aspeed_pwm_tacho_data *priv; void __iomem *regs; struct device *hwmon; @@ -951,12 +951,10 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev) aspeed_create_type(priv); - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { ret = aspeed_create_fan(dev, child, priv); - if (ret) { - of_node_put(child); + if (ret) return ret; - } } priv->groups[0] = &pwm_dev_group; diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index ee396f21fac5e9..9555366aeaf0d3 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -34,7 +34,7 @@ #include #include -#include +#include static char *mutex_path_override; diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 3751c1e3eddd96..1dc7e24fe4c5ef 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -783,7 +783,6 @@ static const struct file_operations atk_debugfs_ggrp_fops = { .read = atk_debugfs_ggrp_read, .open = atk_debugfs_ggrp_open, .release = atk_debugfs_ggrp_release, - .llseek = no_llseek, }; static void atk_debugfs_init(struct atk_data *data) diff --git a/drivers/hwmon/asus_rog_ryujin.c b/drivers/hwmon/asus_rog_ryujin.c index f8b20346a9956f..e5e93a20723c41 100644 --- a/drivers/hwmon/asus_rog_ryujin.c +++ b/drivers/hwmon/asus_rog_ryujin.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #define DRIVER_NAME "asus_rog_ryujin" diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c index 6ccceae21f701a..edf454474f11c9 100644 --- a/drivers/hwmon/chipcap2.c +++ b/drivers/hwmon/chipcap2.c @@ -740,37 +740,26 @@ static int cc2_probe(struct i2c_client *client) data->client = client; data->regulator = devm_regulator_get_exclusive(dev, "vdd"); - if (IS_ERR(data->regulator)) { - dev_err_probe(dev, PTR_ERR(data->regulator), - "Failed to get regulator\n"); - return PTR_ERR(data->regulator); - } + if (IS_ERR(data->regulator)) + return dev_err_probe(dev, PTR_ERR(data->regulator), + "Failed to get regulator\n"); ret = cc2_request_ready_irq(data, dev); - if (ret) { - dev_err_probe(dev, ret, "Failed to request ready irq\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to request ready irq\n"); ret = cc2_request_alarm_irqs(data, dev); - if (ret) { - dev_err_probe(dev, ret, "Failed to request alarm irqs\n"); - goto disable; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to request alarm irqs\n"); data->hwmon = devm_hwmon_device_register_with_info(dev, client->name, data, &cc2_chip_info, NULL); - if (IS_ERR(data->hwmon)) { - dev_err_probe(dev, PTR_ERR(data->hwmon), - "Failed to register hwmon device\n"); - ret = PTR_ERR(data->hwmon); - } - -disable: - cc2_disable(data); + if (IS_ERR(data->hwmon)) + return dev_err_probe(dev, PTR_ERR(data->hwmon), + "Failed to register hwmon device\n"); - return ret; + return 0; } static void cc2_remove(struct i2c_client *client) diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 0362a13f652554..f5bdf842040e6c 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -38,7 +38,7 @@ #include #include -#include +#include #define I8K_SMM_FN_STATUS 0x0025 #define I8K_SMM_POWER_STATUS 0x0069 @@ -1488,6 +1488,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = { }, .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3], }, + { + .ident = "Dell Latitude 7320", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude 7320"), + }, + .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3], + }, { .ident = "Dell Latitude E6440", .matches = { diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 1811f84d835ef3..a303959879efdd 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -948,7 +948,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = watchdog_open, .release = watchdog_release, .write = watchdog_write, diff --git a/drivers/hwmon/gigabyte_waterforce.c b/drivers/hwmon/gigabyte_waterforce.c index 8129d7b3ceaf9a..27487e215bddff 100644 --- a/drivers/hwmon/gigabyte_waterforce.c +++ b/drivers/hwmon/gigabyte_waterforce.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #define DRIVER_NAME "gigabyte_waterforce" diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index cb2f01dc432666..4514f3ed90ccd6 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -400,6 +400,7 @@ static const struct of_device_id gsc_hwmon_of_match[] = { { .compatible = "gw,gsc-adc", }, {} }; +MODULE_DEVICE_TABLE(of, gsc_hwmon_of_match); static struct platform_driver gsc_hwmon_driver = { .driver = { diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c index dfa1d6926deacc..d6bdad26feb116 100644 --- a/drivers/hwmon/hp-wmi-sensors.c +++ b/drivers/hwmon/hp-wmi-sensors.c @@ -1597,15 +1597,13 @@ static void hp_wmi_devm_notify_remove(void *ignored) } /* hp_wmi_notify - WMI event notification handler */ -static void hp_wmi_notify(u32 value, void *context) +static void hp_wmi_notify(union acpi_object *wobj, void *context) { struct hp_wmi_info *temp_info[HP_WMI_MAX_INSTANCES] = {}; - struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; struct hp_wmi_sensors *state = context; struct device *dev = &state->wdev->dev; struct hp_wmi_event event = {}; struct hp_wmi_info *fan_info; - union acpi_object *wobj; acpi_status err; int event_type; u8 count; @@ -1630,20 +1628,15 @@ static void hp_wmi_notify(u32 value, void *context) * HPBIOS_BIOSEvent instance. */ - mutex_lock(&state->lock); - - err = wmi_get_event_data(value, &out); - if (ACPI_FAILURE(err)) - goto out_unlock; - - wobj = out.pointer; if (!wobj) - goto out_unlock; + return; + + mutex_lock(&state->lock); err = populate_event_from_wobj(dev, &event, wobj); if (err) { dev_warn(dev, "Bad event data (ACPI type %d)\n", wobj->type); - goto out_free_wobj; + goto out_free; } event_type = classify_event(event.name, event.category); @@ -1668,13 +1661,10 @@ static void hp_wmi_notify(u32 value, void *context) break; } -out_free_wobj: - kfree(wobj); - +out_free: devm_kfree(dev, event.name); devm_kfree(dev, event.description); -out_unlock: mutex_unlock(&state->lock); } diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index a362080d41fa82..9c35c4d0369d7a 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -1188,24 +1188,6 @@ devm_hwmon_device_register_with_info(struct device *dev, const char *name, } EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_info); -static int devm_hwmon_match(struct device *dev, void *res, void *data) -{ - struct device **hwdev = res; - - return *hwdev == data; -} - -/** - * devm_hwmon_device_unregister - removes a previously registered hwmon device - * - * @dev: the parent device of the device to unregister - */ -void devm_hwmon_device_unregister(struct device *dev) -{ - WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev)); -} -EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister); - static char *__hwmon_sanitize_name(struct device *dev, const char *old_name) { char *name, *p; diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index 9ab4205622e254..f0fa6d073627f7 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -22,21 +22,21 @@ * Thanks to Jan Volkering */ +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include +#include #include -#include -#include -#include -#include -#include -#include +#include #include -#include - -#include /* common register definitions */ #define INA2XX_CONFIG 0x00 @@ -51,10 +51,6 @@ #define INA226_ALERT_LIMIT 0x07 #define INA226_DIE_ID 0xFF -/* register count */ -#define INA219_REGISTERS 6 -#define INA226_REGISTERS 8 - #define INA2XX_MAX_REGISTERS 8 /* settings - depend on use case */ @@ -68,39 +64,65 @@ #define INA2XX_RSHUNT_DEFAULT 10000 /* bit mask for reading the averaging setting in the configuration register */ -#define INA226_AVG_RD_MASK 0x0E00 +#define INA226_AVG_RD_MASK GENMASK(11, 9) -#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) -#define INA226_SHIFT_AVG(val) ((val) << 9) +#define INA226_READ_AVG(reg) FIELD_GET(INA226_AVG_RD_MASK, reg) -#define INA226_ALERT_POLARITY_MASK 0x0002 -#define INA226_SHIFT_ALERT_POLARITY(val) ((val) << 1) -#define INA226_ALERT_POL_LOW 0 -#define INA226_ALERT_POL_HIGH 1 +#define INA226_ALERT_LATCH_ENABLE BIT(0) +#define INA226_ALERT_POLARITY BIT(1) /* bit number of alert functions in Mask/Enable Register */ -#define INA226_SHUNT_OVER_VOLTAGE_BIT 15 -#define INA226_SHUNT_UNDER_VOLTAGE_BIT 14 -#define INA226_BUS_OVER_VOLTAGE_BIT 13 -#define INA226_BUS_UNDER_VOLTAGE_BIT 12 -#define INA226_POWER_OVER_LIMIT_BIT 11 +#define INA226_SHUNT_OVER_VOLTAGE_MASK BIT(15) +#define INA226_SHUNT_UNDER_VOLTAGE_MASK BIT(14) +#define INA226_BUS_OVER_VOLTAGE_MASK BIT(13) +#define INA226_BUS_UNDER_VOLTAGE_MASK BIT(12) +#define INA226_POWER_OVER_LIMIT_MASK BIT(11) /* bit mask for alert config bits of Mask/Enable Register */ -#define INA226_ALERT_CONFIG_MASK 0xFC00 +#define INA226_ALERT_CONFIG_MASK GENMASK(15, 10) #define INA226_ALERT_FUNCTION_FLAG BIT(4) -/* common attrs, ina226 attrs and NULL */ -#define INA2XX_MAX_ATTRIBUTE_GROUPS 3 - /* * Both bus voltage and shunt voltage conversion times for ina226 are set * to 0b0100 on POR, which translates to 2200 microseconds in total. */ #define INA226_TOTAL_CONV_TIME_DEFAULT 2200 -static struct regmap_config ina2xx_regmap_config = { +static bool ina2xx_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case INA2XX_CONFIG: + case INA2XX_CALIBRATION: + case INA226_MASK_ENABLE: + case INA226_ALERT_LIMIT: + return true; + default: + return false; + } +} + +static bool ina2xx_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case INA2XX_SHUNT_VOLTAGE: + case INA2XX_BUS_VOLTAGE: + case INA2XX_POWER: + case INA2XX_CURRENT: + return true; + default: + return false; + } +} + +static const struct regmap_config ina2xx_regmap_config = { .reg_bits = 8, .val_bits = 16, + .use_single_write = true, + .use_single_read = true, + .max_register = INA2XX_MAX_REGISTERS, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = ina2xx_volatile_reg, + .writeable_reg = ina2xx_writeable_reg, }; enum ina2xx_ids { ina219, ina226 }; @@ -108,7 +130,6 @@ enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { u16 config_default; int calibration_value; - int registers; int shunt_div; int bus_voltage_shift; int bus_voltage_lsb; /* uV */ @@ -117,21 +138,19 @@ struct ina2xx_config { struct ina2xx_data { const struct ina2xx_config *config; + enum ina2xx_ids chip; long rshunt; long current_lsb_uA; long power_lsb_uW; struct mutex config_lock; struct regmap *regmap; - - const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS]; }; static const struct ina2xx_config ina2xx_config[] = { [ina219] = { .config_default = INA219_CONFIG_DEFAULT, .calibration_value = 4096, - .registers = INA219_REGISTERS, .shunt_div = 100, .bus_voltage_shift = 3, .bus_voltage_lsb = 4000, @@ -140,7 +159,6 @@ static const struct ina2xx_config ina2xx_config[] = { [ina226] = { .config_default = INA226_CONFIG_DEFAULT, .calibration_value = 2048, - .registers = INA226_REGISTERS, .shunt_div = 400, .bus_voltage_shift = 0, .bus_voltage_lsb = 1250, @@ -171,66 +189,76 @@ static int ina226_reg_to_interval(u16 config) * Return the new, shifted AVG field value of CONFIG register, * to use with regmap_update_bits */ -static u16 ina226_interval_to_reg(int interval) +static u16 ina226_interval_to_reg(long interval) { int avg, avg_bits; + /* + * The maximum supported interval is 1,024 * (2 * 8.244ms) ~= 16.8s. + * Clamp to 32 seconds before calculations to avoid overflows. + */ + interval = clamp_val(interval, 0, 32000); + avg = DIV_ROUND_CLOSEST(interval * 1000, INA226_TOTAL_CONV_TIME_DEFAULT); avg_bits = find_closest(avg, ina226_avg_tab, ARRAY_SIZE(ina226_avg_tab)); - return INA226_SHIFT_AVG(avg_bits); + return FIELD_PREP(INA226_AVG_RD_MASK, avg_bits); } -static int ina2xx_set_alert_polarity(struct ina2xx_data *data, - unsigned long val) +static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, + unsigned int regval) { - return regmap_update_bits(data->regmap, INA226_MASK_ENABLE, - INA226_ALERT_POLARITY_MASK, - INA226_SHIFT_ALERT_POLARITY(val)); -} + int val; -/* - * Calibration register is set to the best value, which eliminates - * truncation errors on calculating current register in hardware. - * According to datasheet (eq. 3) the best values are 2048 for - * ina226 and 4096 for ina219. They are hardcoded as calibration_value. - */ -static int ina2xx_calibrate(struct ina2xx_data *data) -{ - return regmap_write(data->regmap, INA2XX_CALIBRATION, - data->config->calibration_value); + switch (reg) { + case INA2XX_SHUNT_VOLTAGE: + /* signed register */ + val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div); + break; + case INA2XX_BUS_VOLTAGE: + val = (regval >> data->config->bus_voltage_shift) * + data->config->bus_voltage_lsb; + val = DIV_ROUND_CLOSEST(val, 1000); + break; + case INA2XX_POWER: + val = regval * data->power_lsb_uW; + break; + case INA2XX_CURRENT: + /* signed register, result in mA */ + val = (s16)regval * data->current_lsb_uA; + val = DIV_ROUND_CLOSEST(val, 1000); + break; + case INA2XX_CALIBRATION: + val = regval; + break; + default: + /* programmer goofed */ + WARN_ON_ONCE(1); + val = 0; + break; + } + + return val; } /* - * Initialize the configuration and calibration registers. + * Read and convert register value from chip. If the register value is 0, + * check if the chip has been power cycled or reset. If so, re-initialize it. */ -static int ina2xx_init(struct ina2xx_data *data) -{ - int ret = regmap_write(data->regmap, INA2XX_CONFIG, - data->config->config_default); - if (ret < 0) - return ret; - - return ina2xx_calibrate(data); -} - -static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval) +static int ina2xx_read_init(struct device *dev, int reg, long *val) { struct ina2xx_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + unsigned int regval; int ret, retry; - dev_dbg(dev, "Starting register %d read\n", reg); - for (retry = 5; retry; retry--) { - - ret = regmap_read(data->regmap, reg, regval); + ret = regmap_read(regmap, reg, ®val); if (ret < 0) return ret; - dev_dbg(dev, "read %d, val = 0x%04x\n", reg, *regval); - /* * If the current value in the calibration register is 0, the * power and current registers will also remain at 0. In case @@ -239,20 +267,19 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval) * We do that extra read of the calibration register if there * is some hint of a chip reset. */ - if (*regval == 0) { + if (regval == 0) { unsigned int cal; - ret = regmap_read(data->regmap, INA2XX_CALIBRATION, - &cal); + ret = regmap_read_bypassed(regmap, INA2XX_CALIBRATION, &cal); if (ret < 0) return ret; if (cal == 0) { dev_warn(dev, "chip not calibrated, reinitializing\n"); - ret = ina2xx_init(data); - if (ret < 0) - return ret; + regcache_mark_dirty(regmap); + regcache_sync(regmap); + /* * Let's make sure the power and current * registers have been updated before trying @@ -262,6 +289,7 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval) continue; } } + *val = ina2xx_get_value(data, reg, regval); return 0; } @@ -274,101 +302,31 @@ static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval) return -ENODEV; } -static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, - unsigned int regval) -{ - int val; - - switch (reg) { - case INA2XX_SHUNT_VOLTAGE: - /* signed register */ - val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div); - break; - case INA2XX_BUS_VOLTAGE: - val = (regval >> data->config->bus_voltage_shift) - * data->config->bus_voltage_lsb; - val = DIV_ROUND_CLOSEST(val, 1000); - break; - case INA2XX_POWER: - val = regval * data->power_lsb_uW; - break; - case INA2XX_CURRENT: - /* signed register, result in mA */ - val = (s16)regval * data->current_lsb_uA; - val = DIV_ROUND_CLOSEST(val, 1000); - break; - case INA2XX_CALIBRATION: - val = regval; - break; - default: - /* programmer goofed */ - WARN_ON_ONCE(1); - val = 0; - break; - } - - return val; -} - -static ssize_t ina2xx_value_show(struct device *dev, - struct device_attribute *da, char *buf) -{ - struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct ina2xx_data *data = dev_get_drvdata(dev); - unsigned int regval; - - int err = ina2xx_read_reg(dev, attr->index, ®val); - - if (err < 0) - return err; - - return sysfs_emit(buf, "%d\n", ina2xx_get_value(data, attr->index, regval)); -} - -static int ina226_reg_to_alert(struct ina2xx_data *data, u8 bit, u16 regval) -{ - int reg; - - switch (bit) { - case INA226_SHUNT_OVER_VOLTAGE_BIT: - case INA226_SHUNT_UNDER_VOLTAGE_BIT: - reg = INA2XX_SHUNT_VOLTAGE; - break; - case INA226_BUS_OVER_VOLTAGE_BIT: - case INA226_BUS_UNDER_VOLTAGE_BIT: - reg = INA2XX_BUS_VOLTAGE; - break; - case INA226_POWER_OVER_LIMIT_BIT: - reg = INA2XX_POWER; - break; - default: - /* programmer goofed */ - WARN_ON_ONCE(1); - return 0; - } - - return ina2xx_get_value(data, reg, regval); -} - /* * Turns alert limit values into register values. * Opposite of the formula in ina2xx_get_value(). */ -static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val) +static u16 ina226_alert_to_reg(struct ina2xx_data *data, int reg, long val) { - switch (bit) { - case INA226_SHUNT_OVER_VOLTAGE_BIT: - case INA226_SHUNT_UNDER_VOLTAGE_BIT: + switch (reg) { + case INA2XX_SHUNT_VOLTAGE: + val = clamp_val(val, 0, SHRT_MAX * data->config->shunt_div); val *= data->config->shunt_div; - return clamp_val(val, SHRT_MIN, SHRT_MAX); - case INA226_BUS_OVER_VOLTAGE_BIT: - case INA226_BUS_UNDER_VOLTAGE_BIT: + return clamp_val(val, 0, SHRT_MAX); + case INA2XX_BUS_VOLTAGE: + val = clamp_val(val, 0, 200000); val = (val * 1000) << data->config->bus_voltage_shift; val = DIV_ROUND_CLOSEST(val, data->config->bus_voltage_lsb); - return clamp_val(val, 0, SHRT_MAX); - case INA226_POWER_OVER_LIMIT_BIT: + return clamp_val(val, 0, USHRT_MAX); + case INA2XX_POWER: + val = clamp_val(val, 0, UINT_MAX - data->power_lsb_uW); val = DIV_ROUND_CLOSEST(val, data->power_lsb_uW); return clamp_val(val, 0, USHRT_MAX); + case INA2XX_CURRENT: + val = clamp_val(val, INT_MIN / 1000, INT_MAX / 1000); + /* signed register, result in mA */ + val = DIV_ROUND_CLOSEST(val * 1000, data->current_lsb_uA); + return clamp_val(val, SHRT_MIN, SHRT_MAX); default: /* programmer goofed */ WARN_ON_ONCE(1); @@ -376,45 +334,37 @@ static s16 ina226_alert_to_reg(struct ina2xx_data *data, u8 bit, int val) } } -static ssize_t ina226_alert_show(struct device *dev, - struct device_attribute *da, char *buf) +static int ina226_alert_limit_read(struct ina2xx_data *data, u32 mask, int reg, long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct ina2xx_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; int regval; - int val = 0; int ret; mutex_lock(&data->config_lock); - ret = regmap_read(data->regmap, INA226_MASK_ENABLE, ®val); + ret = regmap_read(regmap, INA226_MASK_ENABLE, ®val); if (ret) goto abort; - if (regval & BIT(attr->index)) { - ret = regmap_read(data->regmap, INA226_ALERT_LIMIT, ®val); + if (regval & mask) { + ret = regmap_read(regmap, INA226_ALERT_LIMIT, ®val); if (ret) goto abort; - val = ina226_reg_to_alert(data, attr->index, regval); + *val = ina2xx_get_value(data, reg, regval); + } else { + *val = 0; } - - ret = sysfs_emit(buf, "%d\n", val); abort: mutex_unlock(&data->config_lock); return ret; } -static ssize_t ina226_alert_store(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +static int ina226_alert_limit_write(struct ina2xx_data *data, u32 mask, int reg, long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct ina2xx_data *data = dev_get_drvdata(dev); - unsigned long val; + struct regmap *regmap = data->regmap; int ret; - ret = kstrtoul(buf, 10, &val); - if (ret < 0) - return ret; + if (val < 0) + return -EINVAL; /* * Clear all alerts first to avoid accidentally triggering ALERT pin @@ -422,217 +372,462 @@ static ssize_t ina226_alert_store(struct device *dev, * if the value is non-zero. */ mutex_lock(&data->config_lock); - ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE, + ret = regmap_update_bits(regmap, INA226_MASK_ENABLE, INA226_ALERT_CONFIG_MASK, 0); if (ret < 0) goto abort; - ret = regmap_write(data->regmap, INA226_ALERT_LIMIT, - ina226_alert_to_reg(data, attr->index, val)); + ret = regmap_write(regmap, INA226_ALERT_LIMIT, + ina226_alert_to_reg(data, reg, val)); if (ret < 0) goto abort; - if (val != 0) { - ret = regmap_update_bits(data->regmap, INA226_MASK_ENABLE, - INA226_ALERT_CONFIG_MASK, - BIT(attr->index)); - if (ret < 0) - goto abort; - } - - ret = count; + if (val) + ret = regmap_update_bits(regmap, INA226_MASK_ENABLE, + INA226_ALERT_CONFIG_MASK, mask); abort: mutex_unlock(&data->config_lock); return ret; } -static ssize_t ina226_alarm_show(struct device *dev, - struct device_attribute *da, char *buf) +static int ina2xx_chip_read(struct device *dev, u32 attr, long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ina2xx_data *data = dev_get_drvdata(dev); - int regval; - int alarm = 0; + u32 regval; + int ret; + + switch (attr) { + case hwmon_chip_update_interval: + ret = regmap_read(data->regmap, INA2XX_CONFIG, ®val); + if (ret) + return ret; + + *val = ina226_reg_to_interval(regval); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ina226_alert_read(struct regmap *regmap, u32 mask, long *val) +{ + unsigned int regval; int ret; - ret = regmap_read(data->regmap, INA226_MASK_ENABLE, ®val); + ret = regmap_read_bypassed(regmap, INA226_MASK_ENABLE, ®val); if (ret) return ret; - alarm = (regval & BIT(attr->index)) && - (regval & INA226_ALERT_FUNCTION_FLAG); - return sysfs_emit(buf, "%d\n", alarm); + *val = (regval & mask) && (regval & INA226_ALERT_FUNCTION_FLAG); + + return 0; +} + +static int ina2xx_in_read(struct device *dev, u32 attr, int channel, long *val) +{ + int voltage_reg = channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE; + u32 under_voltage_mask = channel ? INA226_BUS_UNDER_VOLTAGE_MASK + : INA226_SHUNT_UNDER_VOLTAGE_MASK; + u32 over_voltage_mask = channel ? INA226_BUS_OVER_VOLTAGE_MASK + : INA226_SHUNT_OVER_VOLTAGE_MASK; + struct ina2xx_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + unsigned int regval; + int ret; + + switch (attr) { + case hwmon_in_input: + ret = regmap_read(regmap, voltage_reg, ®val); + if (ret) + return ret; + *val = ina2xx_get_value(data, voltage_reg, regval); + break; + case hwmon_in_lcrit: + return ina226_alert_limit_read(data, under_voltage_mask, + voltage_reg, val); + case hwmon_in_crit: + return ina226_alert_limit_read(data, over_voltage_mask, + voltage_reg, val); + case hwmon_in_lcrit_alarm: + return ina226_alert_read(regmap, under_voltage_mask, val); + case hwmon_in_crit_alarm: + return ina226_alert_read(regmap, over_voltage_mask, val); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ina2xx_power_read(struct device *dev, u32 attr, long *val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_power_input: + return ina2xx_read_init(dev, INA2XX_POWER, val); + case hwmon_power_crit: + return ina226_alert_limit_read(data, INA226_POWER_OVER_LIMIT_MASK, + INA2XX_POWER, val); + case hwmon_power_crit_alarm: + return ina226_alert_read(data->regmap, INA226_POWER_OVER_LIMIT_MASK, val); + default: + return -EOPNOTSUPP; + } +} + +static int ina2xx_curr_read(struct device *dev, u32 attr, long *val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + unsigned int regval; + int ret; + + /* + * While the chips supported by this driver do not directly support + * current limits, they do support setting shunt voltage limits. + * The shunt voltage divided by the shunt resistor value is the current. + * On top of that, calibration values are set such that in the shunt + * voltage register and the current register report the same values. + * That means we can report and configure current limits based on shunt + * voltage limits. + */ + switch (attr) { + case hwmon_curr_input: + /* + * Since the shunt voltage and the current register report the + * same values when the chip is calibrated, we can calculate + * the current directly from the shunt voltage without relying + * on chip calibration. + */ + ret = regmap_read(regmap, INA2XX_SHUNT_VOLTAGE, ®val); + if (ret) + return ret; + *val = ina2xx_get_value(data, INA2XX_CURRENT, regval); + return 0; + case hwmon_curr_lcrit: + return ina226_alert_limit_read(data, INA226_SHUNT_UNDER_VOLTAGE_MASK, + INA2XX_CURRENT, val); + case hwmon_curr_crit: + return ina226_alert_limit_read(data, INA226_SHUNT_OVER_VOLTAGE_MASK, + INA2XX_CURRENT, val); + case hwmon_curr_lcrit_alarm: + return ina226_alert_read(regmap, INA226_SHUNT_UNDER_VOLTAGE_MASK, val); + case hwmon_curr_crit_alarm: + return ina226_alert_read(regmap, INA226_SHUNT_OVER_VOLTAGE_MASK, val); + default: + return -EOPNOTSUPP; + } +} + +static int ina2xx_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_chip: + return ina2xx_chip_read(dev, attr, val); + case hwmon_in: + return ina2xx_in_read(dev, attr, channel, val); + case hwmon_power: + return ina2xx_power_read(dev, attr, val); + case hwmon_curr: + return ina2xx_curr_read(dev, attr, val); + default: + return -EOPNOTSUPP; + } +} + +static int ina2xx_chip_write(struct device *dev, u32 attr, long val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_chip_update_interval: + return regmap_update_bits(data->regmap, INA2XX_CONFIG, + INA226_AVG_RD_MASK, + ina226_interval_to_reg(val)); + default: + return -EOPNOTSUPP; + } +} + +static int ina2xx_in_write(struct device *dev, u32 attr, int channel, long val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_in_lcrit: + return ina226_alert_limit_write(data, + channel ? INA226_BUS_UNDER_VOLTAGE_MASK : INA226_SHUNT_UNDER_VOLTAGE_MASK, + channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE, + val); + case hwmon_in_crit: + return ina226_alert_limit_write(data, + channel ? INA226_BUS_OVER_VOLTAGE_MASK : INA226_SHUNT_OVER_VOLTAGE_MASK, + channel ? INA2XX_BUS_VOLTAGE : INA2XX_SHUNT_VOLTAGE, + val); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ina2xx_power_write(struct device *dev, u32 attr, long val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_power_crit: + return ina226_alert_limit_write(data, INA226_POWER_OVER_LIMIT_MASK, + INA2XX_POWER, val); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ina2xx_curr_write(struct device *dev, u32 attr, long val) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_curr_lcrit: + return ina226_alert_limit_write(data, INA226_SHUNT_UNDER_VOLTAGE_MASK, + INA2XX_CURRENT, val); + case hwmon_curr_crit: + return ina226_alert_limit_write(data, INA226_SHUNT_OVER_VOLTAGE_MASK, + INA2XX_CURRENT, val); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ina2xx_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + switch (type) { + case hwmon_chip: + return ina2xx_chip_write(dev, attr, val); + case hwmon_in: + return ina2xx_in_write(dev, attr, channel, val); + case hwmon_power: + return ina2xx_power_write(dev, attr, val); + case hwmon_curr: + return ina2xx_curr_write(dev, attr, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct ina2xx_data *data = _data; + enum ina2xx_ids chip = data->chip; + + switch (type) { + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return 0444; + case hwmon_in_lcrit: + case hwmon_in_crit: + if (chip == ina226) + return 0644; + break; + case hwmon_in_lcrit_alarm: + case hwmon_in_crit_alarm: + if (chip == ina226) + return 0444; + break; + default: + break; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + return 0444; + case hwmon_curr_lcrit: + case hwmon_curr_crit: + if (chip == ina226) + return 0644; + break; + case hwmon_curr_lcrit_alarm: + case hwmon_curr_crit_alarm: + if (chip == ina226) + return 0444; + break; + default: + break; + } + break; + case hwmon_power: + switch (attr) { + case hwmon_power_input: + return 0444; + case hwmon_power_crit: + if (chip == ina226) + return 0644; + break; + case hwmon_power_crit_alarm: + if (chip == ina226) + return 0444; + break; + default: + break; + } + break; + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + if (chip == ina226) + return 0644; + break; + default: + break; + } + break; + default: + break; + } + return 0; } +static const struct hwmon_channel_info * const ina2xx_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_CRIT | HWMON_I_CRIT_ALARM | + HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM, + HWMON_I_INPUT | HWMON_I_CRIT | HWMON_I_CRIT_ALARM | + HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM + ), + HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM | + HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM), + HWMON_CHANNEL_INFO(power, + HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM), + NULL +}; + +static const struct hwmon_ops ina2xx_hwmon_ops = { + .is_visible = ina2xx_is_visible, + .read = ina2xx_read, + .write = ina2xx_write, +}; + +static const struct hwmon_chip_info ina2xx_chip_info = { + .ops = &ina2xx_hwmon_ops, + .info = ina2xx_info, +}; + +/* shunt resistance */ + /* * In order to keep calibration register value fixed, the product * of current_lsb and shunt_resistor should also be fixed and equal * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order * to keep the scale. */ -static int ina2xx_set_shunt(struct ina2xx_data *data, long val) +static int ina2xx_set_shunt(struct ina2xx_data *data, unsigned long val) { unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, data->config->shunt_div); - if (val <= 0 || val > dividend) + if (!val || val > dividend) return -EINVAL; - mutex_lock(&data->config_lock); data->rshunt = val; data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); data->power_lsb_uW = data->config->power_lsb_factor * data->current_lsb_uA; - mutex_unlock(&data->config_lock); return 0; } -static ssize_t ina2xx_shunt_show(struct device *dev, - struct device_attribute *da, char *buf) +static ssize_t shunt_resistor_show(struct device *dev, + struct device_attribute *da, char *buf) { struct ina2xx_data *data = dev_get_drvdata(dev); return sysfs_emit(buf, "%li\n", data->rshunt); } -static ssize_t ina2xx_shunt_store(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +static ssize_t shunt_resistor_store(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) { + struct ina2xx_data *data = dev_get_drvdata(dev); unsigned long val; int status; - struct ina2xx_data *data = dev_get_drvdata(dev); status = kstrtoul(buf, 10, &val); if (status < 0) return status; + mutex_lock(&data->config_lock); status = ina2xx_set_shunt(data, val); + mutex_unlock(&data->config_lock); if (status < 0) return status; return count; } -static ssize_t ina226_interval_store(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) -{ - struct ina2xx_data *data = dev_get_drvdata(dev); - unsigned long val; - int status; +static DEVICE_ATTR_RW(shunt_resistor); - status = kstrtoul(buf, 10, &val); - if (status < 0) - return status; - - if (val > INT_MAX || val == 0) - return -EINVAL; - - status = regmap_update_bits(data->regmap, INA2XX_CONFIG, - INA226_AVG_RD_MASK, - ina226_interval_to_reg(val)); - if (status < 0) - return status; - - return count; -} +/* pointers to created device attributes */ +static struct attribute *ina2xx_attrs[] = { + &dev_attr_shunt_resistor.attr, + NULL, +}; +ATTRIBUTE_GROUPS(ina2xx); -static ssize_t ina226_interval_show(struct device *dev, - struct device_attribute *da, char *buf) +/* + * Initialize chip + */ +static int ina2xx_init(struct device *dev, struct ina2xx_data *data) { - struct ina2xx_data *data = dev_get_drvdata(dev); - int status; - unsigned int regval; - - status = regmap_read(data->regmap, INA2XX_CONFIG, ®val); - if (status) - return status; - - return sysfs_emit(buf, "%d\n", ina226_reg_to_interval(regval)); -} - -/* shunt voltage */ -static SENSOR_DEVICE_ATTR_RO(in0_input, ina2xx_value, INA2XX_SHUNT_VOLTAGE); -/* shunt voltage over/under voltage alert setting and alarm */ -static SENSOR_DEVICE_ATTR_RW(in0_crit, ina226_alert, - INA226_SHUNT_OVER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RW(in0_lcrit, ina226_alert, - INA226_SHUNT_UNDER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RO(in0_crit_alarm, ina226_alarm, - INA226_SHUNT_OVER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RO(in0_lcrit_alarm, ina226_alarm, - INA226_SHUNT_UNDER_VOLTAGE_BIT); - -/* bus voltage */ -static SENSOR_DEVICE_ATTR_RO(in1_input, ina2xx_value, INA2XX_BUS_VOLTAGE); -/* bus voltage over/under voltage alert setting and alarm */ -static SENSOR_DEVICE_ATTR_RW(in1_crit, ina226_alert, - INA226_BUS_OVER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RW(in1_lcrit, ina226_alert, - INA226_BUS_UNDER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RO(in1_crit_alarm, ina226_alarm, - INA226_BUS_OVER_VOLTAGE_BIT); -static SENSOR_DEVICE_ATTR_RO(in1_lcrit_alarm, ina226_alarm, - INA226_BUS_UNDER_VOLTAGE_BIT); - -/* calculated current */ -static SENSOR_DEVICE_ATTR_RO(curr1_input, ina2xx_value, INA2XX_CURRENT); - -/* calculated power */ -static SENSOR_DEVICE_ATTR_RO(power1_input, ina2xx_value, INA2XX_POWER); -/* over-limit power alert setting and alarm */ -static SENSOR_DEVICE_ATTR_RW(power1_crit, ina226_alert, - INA226_POWER_OVER_LIMIT_BIT); -static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina226_alarm, - INA226_POWER_OVER_LIMIT_BIT); + struct regmap *regmap = data->regmap; + u32 shunt; + int ret; -/* shunt resistance */ -static SENSOR_DEVICE_ATTR_RW(shunt_resistor, ina2xx_shunt, INA2XX_CALIBRATION); + if (device_property_read_u32(dev, "shunt-resistor", &shunt) < 0) + shunt = INA2XX_RSHUNT_DEFAULT; -/* update interval (ina226 only) */ -static SENSOR_DEVICE_ATTR_RW(update_interval, ina226_interval, 0); + ret = ina2xx_set_shunt(data, shunt); + if (ret < 0) + return ret; -/* pointers to created device attributes */ -static struct attribute *ina2xx_attrs[] = { - &sensor_dev_attr_in0_input.dev_attr.attr, - &sensor_dev_attr_in1_input.dev_attr.attr, - &sensor_dev_attr_curr1_input.dev_attr.attr, - &sensor_dev_attr_power1_input.dev_attr.attr, - &sensor_dev_attr_shunt_resistor.dev_attr.attr, - NULL, -}; + ret = regmap_write(regmap, INA2XX_CONFIG, data->config->config_default); + if (ret < 0) + return ret; -static const struct attribute_group ina2xx_group = { - .attrs = ina2xx_attrs, -}; + if (data->chip == ina226) { + bool active_high = device_property_read_bool(dev, "ti,alert-polarity-active-high"); -static struct attribute *ina226_attrs[] = { - &sensor_dev_attr_in0_crit.dev_attr.attr, - &sensor_dev_attr_in0_lcrit.dev_attr.attr, - &sensor_dev_attr_in0_crit_alarm.dev_attr.attr, - &sensor_dev_attr_in0_lcrit_alarm.dev_attr.attr, - &sensor_dev_attr_in1_crit.dev_attr.attr, - &sensor_dev_attr_in1_lcrit.dev_attr.attr, - &sensor_dev_attr_in1_crit_alarm.dev_attr.attr, - &sensor_dev_attr_in1_lcrit_alarm.dev_attr.attr, - &sensor_dev_attr_power1_crit.dev_attr.attr, - &sensor_dev_attr_power1_crit_alarm.dev_attr.attr, - &sensor_dev_attr_update_interval.dev_attr.attr, - NULL, -}; + regmap_update_bits(regmap, INA226_MASK_ENABLE, + INA226_ALERT_LATCH_ENABLE | INA226_ALERT_POLARITY, + INA226_ALERT_LATCH_ENABLE | + FIELD_PREP(INA226_ALERT_POLARITY, active_high)); + } -static const struct attribute_group ina226_group = { - .attrs = ina226_attrs, -}; + /* + * Calibration register is set to the best value, which eliminates + * truncation errors on calculating current register in hardware. + * According to datasheet (eq. 3) the best values are 2048 for + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. + */ + return regmap_write(regmap, INA2XX_CALIBRATION, + data->config->calibration_value); +} static int ina2xx_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ina2xx_data *data; struct device *hwmon_dev; - u32 val; - int ret, group = 0; enum ina2xx_ids chip; + int ret; chip = (uintptr_t)i2c_get_match_data(client); @@ -642,21 +837,9 @@ static int ina2xx_probe(struct i2c_client *client) /* set the device type */ data->config = &ina2xx_config[chip]; + data->chip = chip; mutex_init(&data->config_lock); - if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) { - struct ina2xx_platform_data *pdata = dev_get_platdata(dev); - - if (pdata) - val = pdata->shunt_uohms; - else - val = INA2XX_RSHUNT_DEFAULT; - } - - ina2xx_set_shunt(data, val); - - ina2xx_regmap_config.max_register = data->config->registers; - data->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config); if (IS_ERR(data->regmap)) { dev_err(dev, "failed to allocate register map\n"); @@ -667,37 +850,13 @@ static int ina2xx_probe(struct i2c_client *client) if (ret) return dev_err_probe(dev, ret, "failed to enable vs regulator\n"); - if (chip == ina226) { - if (of_property_read_bool(dev->of_node, "ti,alert-polarity-active-high")) { - ret = ina2xx_set_alert_polarity(data, - INA226_ALERT_POL_HIGH); - if (ret < 0) { - return dev_err_probe(dev, ret, - "failed to set alert polarity active high\n"); - } - } else { - /* Set default value i.e active low */ - ret = ina2xx_set_alert_polarity(data, - INA226_ALERT_POL_LOW); - if (ret < 0) { - return dev_err_probe(dev, ret, - "failed to set alert polarity active low\n"); - } - } - } - - ret = ina2xx_init(data); - if (ret < 0) { - dev_err(dev, "error configuring the device: %d\n", ret); - return -ENODEV; - } - - data->groups[group++] = &ina2xx_group; - if (chip == ina226) - data->groups[group++] = &ina226_group; + ret = ina2xx_init(dev, data); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to configure device\n"); - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, data->groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, &ina2xx_chip_info, + ina2xx_groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index f0053f87e3e623..1bf479a0f7939a 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -813,7 +813,6 @@ static int ina3221_probe_child_from_dt(struct device *dev, static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina) { const struct device_node *np = dev->of_node; - struct device_node *child; int ret; /* Compatible with non-DT platforms */ @@ -822,12 +821,10 @@ static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina) ina->single_shot = of_property_read_bool(np, "ti,single-shot"); - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { ret = ina3221_probe_child_from_dt(dev, child, ina); - if (ret) { - of_node_put(child); + if (ret) return ret; - } } return 0; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 543526bac04254..7dc19c5d62ac35 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -438,16 +438,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->disp_negative = true; } - if (boot_cpu_data.x86 == 0x15 && + data->is_zen = cpu_feature_enabled(X86_FEATURE_ZEN); + if (data->is_zen) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + } else if (boot_cpu_data.x86 == 0x15 && ((boot_cpu_data.x86_model & 0xf0) == 0x60 || (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; - data->read_tempreg = read_tempreg_nb_zen; - data->is_zen = true; + } else { + data->read_htcreg = read_htcreg_pci; + data->read_tempreg = read_tempreg_pci; + } + if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { switch (boot_cpu_data.x86_model) { case 0x1: /* Zen */ case 0x8: /* Zen+ */ @@ -469,10 +474,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) break; } } else if (boot_cpu_data.x86 == 0x19) { - data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; - data->read_tempreg = read_tempreg_nb_zen; - data->is_zen = true; - switch (boot_cpu_data.x86_model) { case 0x0 ... 0x1: /* Zen3 SP3/TR */ case 0x8: /* Zen3 TR Chagall */ @@ -496,13 +497,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(data, 12); break; } - } else if (boot_cpu_data.x86 == 0x1a) { - data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; - data->read_tempreg = read_tempreg_nb_zen; - data->is_zen = true; - } else { - data->read_htcreg = read_htcreg_pci; - data->read_tempreg = read_tempreg_pci; } for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { @@ -548,6 +542,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index ca5c52b38c0f37..511d95a0efb36a 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -2674,19 +2674,16 @@ static int lm90_parse_dt_channel_info(struct i2c_client *client, struct lm90_data *data) { int err; - struct device_node *child; struct device *dev = &client->dev; const struct device_node *np = dev->of_node; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { if (strcmp(child->name, "channel")) continue; err = lm90_probe_channel_from_dt(client, child, data); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index 46579a3e1715c9..0be439b38ee1bb 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -27,15 +27,14 @@ * with the LM92. */ -#include -#include -#include -#include -#include -#include #include +#include +#include +#include +#include #include -#include +#include +#include /* * The LM92 and MAX6635 have 2 two-state pins for address selection, @@ -43,8 +42,6 @@ */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -enum chips { lm92, max6635 }; - /* The LM92 registers */ #define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ #define LM92_REG_TEMP 0x00 /* 16-bit, RO */ @@ -66,10 +63,10 @@ static inline int TEMP_FROM_REG(s16 reg) return reg / 8 * 625 / 10; } -static inline s16 TEMP_TO_REG(long val) +static inline s16 TEMP_TO_REG(long val, int resolution) { val = clamp_val(val, -60000, 160000); - return val * 10 / 625 * 8; + return DIV_ROUND_CLOSEST(val << (resolution - 9), 1000) << (16 - resolution); } /* Alarm flags are stored in the 3 LSB of the temperature register */ @@ -78,239 +75,336 @@ static inline u8 ALARMS_FROM_REG(s16 reg) return reg & 0x0007; } -enum temp_index { - t_input, - t_crit, - t_min, - t_max, - t_hyst, - t_num_regs -}; - -static const u8 regs[t_num_regs] = { - [t_input] = LM92_REG_TEMP, - [t_crit] = LM92_REG_TEMP_CRIT, - [t_min] = LM92_REG_TEMP_LOW, - [t_max] = LM92_REG_TEMP_HIGH, - [t_hyst] = LM92_REG_TEMP_HYST, -}; - /* Client data (each client gets its own) */ struct lm92_data { - struct i2c_client *client; + struct regmap *regmap; struct mutex update_lock; - bool valid; /* false until following fields are valid */ - unsigned long last_updated; /* in jiffies */ - - /* registers values */ - s16 temp[t_num_regs]; /* index with enum temp_index */ + int resolution; }; -/* - * Sysfs attributes and callback functions - */ - -static struct lm92_data *lm92_update_device(struct device *dev) +static int lm92_temp_read(struct lm92_data *data, u32 attr, int channel, long *val) { - struct lm92_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int i; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || - !data->valid) { - dev_dbg(&client->dev, "Updating lm92 data\n"); - for (i = 0; i < t_num_regs; i++) { - data->temp[i] = - i2c_smbus_read_word_swapped(client, regs[i]); - } - data->last_updated = jiffies; - data->valid = true; + int reg = -1, hyst_reg = -1, alarm_bit = 0; + struct regmap *regmap = data->regmap; + u32 temp; + int ret; + + switch (attr) { + case hwmon_temp_input: + reg = LM92_REG_TEMP; + break; + case hwmon_temp_min: + reg = LM92_REG_TEMP_LOW; + break; + case hwmon_temp_max: + reg = LM92_REG_TEMP_HIGH; + break; + case hwmon_temp_crit: + reg = LM92_REG_TEMP_CRIT; + break; + case hwmon_temp_min_hyst: + hyst_reg = LM92_REG_TEMP_LOW; + break; + case hwmon_temp_max_hyst: + hyst_reg = LM92_REG_TEMP_HIGH; + break; + case hwmon_temp_crit_hyst: + hyst_reg = LM92_REG_TEMP_CRIT; + break; + case hwmon_temp_min_alarm: + alarm_bit = 0; + break; + case hwmon_temp_max_alarm: + alarm_bit = 1; + break; + case hwmon_temp_crit_alarm: + alarm_bit = 2; + break; + default: + return -EOPNOTSUPP; } - - mutex_unlock(&data->update_lock); - - return data; + if (reg >= 0) { + ret = regmap_read(regmap, reg, &temp); + if (ret < 0) + return ret; + *val = TEMP_FROM_REG(temp); + } else if (hyst_reg >= 0) { + u32 regs[2] = { hyst_reg, LM92_REG_TEMP_HYST }; + u16 regvals[2]; + + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); + if (ret) + return ret; + if (attr == hwmon_temp_min_hyst) + *val = TEMP_FROM_REG(regvals[0]) + TEMP_FROM_REG(regvals[1]); + else + *val = TEMP_FROM_REG(regvals[0]) - TEMP_FROM_REG(regvals[1]); + } else { + ret = regmap_read(regmap, LM92_REG_TEMP, &temp); + if (ret) + return ret; + *val = !!(temp & BIT(alarm_bit)); + } + return 0; } -static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, - char *buf) +static int lm92_chip_read(struct lm92_data *data, u32 attr, long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct lm92_data *data = lm92_update_device(dev); - - return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index])); + u32 temp; + int ret; + + switch (attr) { + case hwmon_chip_alarms: + ret = regmap_read(data->regmap, LM92_REG_TEMP, &temp); + if (ret) + return ret; + *val = ALARMS_FROM_REG(temp); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_store(struct device *dev, - struct device_attribute *devattr, const char *buf, - size_t count) +static int lm92_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int nr = attr->index; - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err) - return err; - - mutex_lock(&data->update_lock); - data->temp[nr] = TEMP_TO_REG(val); - i2c_smbus_write_word_swapped(client, regs[nr], data->temp[nr]); - mutex_unlock(&data->update_lock); - return count; -} -static ssize_t temp_hyst_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct lm92_data *data = lm92_update_device(dev); - - return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]) - - TEMP_FROM_REG(data->temp[t_hyst])); + switch (type) { + case hwmon_chip: + return lm92_chip_read(data, attr, val); + case hwmon_temp: + return lm92_temp_read(data, attr, channel, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t temp1_min_hyst_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int lm92_temp_write(struct lm92_data *data, u32 attr, long val) { - struct lm92_data *data = lm92_update_device(dev); - - return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min]) - + TEMP_FROM_REG(data->temp[t_hyst])); + struct regmap *regmap = data->regmap; + int reg, err; + u32 temp; + + switch (attr) { + case hwmon_temp_min: + reg = LM92_REG_TEMP_LOW; + break; + case hwmon_temp_max: + reg = LM92_REG_TEMP_HIGH; + break; + case hwmon_temp_crit: + reg = LM92_REG_TEMP_CRIT; + break; + case hwmon_temp_crit_hyst: + val = clamp_val(val, -120000, 220000); + mutex_lock(&data->update_lock); + err = regmap_read(regmap, LM92_REG_TEMP_CRIT, &temp); + if (err) + goto unlock; + val = TEMP_TO_REG(TEMP_FROM_REG(temp) - val, data->resolution); + err = regmap_write(regmap, LM92_REG_TEMP_HYST, val); +unlock: + mutex_unlock(&data->update_lock); + return err; + default: + return -EOPNOTSUPP; + } + return regmap_write(regmap, reg, TEMP_TO_REG(val, data->resolution)); } -static ssize_t temp_hyst_store(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) +static int lm92_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm92_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int err; - err = kstrtol(buf, 10, &val); - if (err) - return err; - - val = clamp_val(val, -120000, 220000); - mutex_lock(&data->update_lock); - data->temp[t_hyst] = - TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val); - i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST, - data->temp[t_hyst]); - mutex_unlock(&data->update_lock); - return count; + switch (type) { + case hwmon_temp: + return lm92_temp_write(data, attr, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t alarms_show(struct device *dev, struct device_attribute *attr, - char *buf) +static umode_t lm92_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) { - struct lm92_data *data = lm92_update_device(dev); - - return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp[t_input])); + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_alarms: + return 0444; + default: + break; + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + return 0644; + case hwmon_temp_input: + case hwmon_temp_min_hyst: + case hwmon_temp_max_hyst: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + return 0444; + default: + break; + } + break; + default: + break; + } + return 0; } -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int bitnr = to_sensor_dev_attr(attr)->index; - struct lm92_data *data = lm92_update_device(dev); - return sprintf(buf, "%d\n", (data->temp[t_input] >> bitnr) & 1); -} +static const struct hwmon_channel_info * const lm92_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | + HWMON_T_MIN | HWMON_T_MIN_HYST | + HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_CRIT_ALARM), + NULL +}; -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input); -static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, t_crit); -static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp_hyst, t_crit); -static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, t_min); -static DEVICE_ATTR_RO(temp1_min_hyst); -static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, t_max); -static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, temp_hyst, t_max); -static DEVICE_ATTR_RO(alarms); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 2); -static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1); +static const struct hwmon_ops lm92_hwmon_ops = { + .is_visible = lm92_is_visible, + .read = lm92_read, + .write = lm92_write, +}; + +static const struct hwmon_chip_info lm92_chip_info = { + .ops = &lm92_hwmon_ops, + .info = lm92_info, +}; /* * Detection and registration */ -static void lm92_init_client(struct i2c_client *client) +static int lm92_init_client(struct regmap *regmap) { - u8 config; - - /* Start the conversions if needed */ - config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG); - if (config & 0x01) - i2c_smbus_write_byte_data(client, LM92_REG_CONFIG, - config & 0xFE); + return regmap_clear_bits(regmap, LM92_REG_CONFIG, 0x01); } -static struct attribute *lm92_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_min.dev_attr.attr, - &dev_attr_temp1_min_hyst.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, - &dev_attr_alarms.attr, - &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - NULL -}; -ATTRIBUTE_GROUPS(lm92); - /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm92_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; - u8 config; - u16 man_id; + u8 config_addr = LM92_REG_CONFIG; + u8 man_id_addr = LM92_REG_MAN_ID; + int i, regval; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; - config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG); - man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID); - - if ((config & 0xe0) == 0x00 && man_id == 0x0180) - pr_info("lm92: Found National Semiconductor LM92 chip\n"); - else - return -ENODEV; + /* + * Register values repeat with multiples of 8. + * Read twice to improve detection accuracy. + */ + for (i = 0; i < 2; i++) { + regval = i2c_smbus_read_word_data(new_client, man_id_addr); + if (regval != 0x0180) + return -ENODEV; + regval = i2c_smbus_read_byte_data(new_client, config_addr); + if (regval < 0 || (regval & 0xe0)) + return -ENODEV; + config_addr += 8; + man_id_addr += 8; + } strscpy(info->type, "lm92", I2C_NAME_SIZE); return 0; } -static int lm92_probe(struct i2c_client *new_client) +/* regmap */ + +static int lm92_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + int ret; + + if (reg == LM92_REG_CONFIG) + ret = i2c_smbus_read_byte_data(context, reg); + else + ret = i2c_smbus_read_word_swapped(context, reg); + if (ret < 0) + return ret; + + *val = ret; + return 0; +} + +static int lm92_reg_write(void *context, unsigned int reg, unsigned int val) +{ + if (reg == LM92_REG_CONFIG) + return i2c_smbus_write_byte_data(context, LM92_REG_CONFIG, val); + + return i2c_smbus_write_word_swapped(context, reg, val); +} + +static bool lm92_regmap_is_volatile(struct device *dev, unsigned int reg) +{ + return reg == LM92_REG_TEMP; +} + +static bool lm92_regmap_is_writeable(struct device *dev, unsigned int reg) { + return reg >= LM92_REG_CONFIG; +} + +static const struct regmap_config lm92_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = LM92_REG_TEMP_HIGH, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = lm92_regmap_is_volatile, + .writeable_reg = lm92_regmap_is_writeable, +}; + +static const struct regmap_bus lm92_regmap_bus = { + .reg_write = lm92_reg_write, + .reg_read = lm92_reg_read, +}; + +static int lm92_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; struct device *hwmon_dev; struct lm92_data *data; + struct regmap *regmap; + int err; + + regmap = devm_regmap_init(dev, &lm92_regmap_bus, client, + &lm92_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); - data = devm_kzalloc(&new_client->dev, sizeof(struct lm92_data), - GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(struct lm92_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->client = new_client; + data->regmap = regmap; + data->resolution = (unsigned long)i2c_get_match_data(client); mutex_init(&data->update_lock); /* Initialize the chipset */ - lm92_init_client(new_client); + err = lm92_init_client(regmap); + if (err) + return err; - hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev, - new_client->name, - data, lm92_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, + &lm92_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } @@ -318,9 +412,10 @@ static int lm92_probe(struct i2c_client *new_client) * Module and driver stuff */ +/* .driver_data is limit register resolution */ static const struct i2c_device_id lm92_id[] = { - { "lm92", lm92 }, - { "max6635", max6635 }, + { "lm92", 13 }, + { "max6635", 9 }, { } }; MODULE_DEVICE_TABLE(i2c, lm92_id); diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c index 9a7afdb49895d4..7da6c8f073322c 100644 --- a/drivers/hwmon/lm95234.c +++ b/drivers/hwmon/lm95234.c @@ -8,16 +8,15 @@ * Copyright (C) 2008, 2010 Davide Rizzo */ -#include +#include +#include +#include #include +#include #include -#include -#include -#include -#include -#include #include -#include +#include +#include #define DRVNAME "lm95234" @@ -32,6 +31,8 @@ static const unsigned short normal_i2c[] = { #define LM95234_REG_STATUS 0x02 #define LM95234_REG_CONFIG 0x03 #define LM95234_REG_CONVRATE 0x04 +#define LM95234_REG_ENABLE 0x05 +#define LM95234_REG_FILTER 0x06 #define LM95234_REG_STS_FAULT 0x07 #define LM95234_REG_STS_TCRIT1 0x08 #define LM95234_REG_STS_TCRIT2 0x09 @@ -52,541 +53,372 @@ static const unsigned short normal_i2c[] = { /* Client data (each client gets its own) */ struct lm95234_data { - struct i2c_client *client; - const struct attribute_group *groups[3]; + struct regmap *regmap; struct mutex update_lock; - unsigned long last_updated, interval; /* in jiffies */ - bool valid; /* false until following fields are valid */ - /* registers values */ - int temp[5]; /* temperature (signed) */ - u32 status; /* fault/alarm status */ - u8 tcrit1[5]; /* critical temperature limit */ - u8 tcrit2[2]; /* high temperature limit */ - s8 toffset[4]; /* remote temperature offset */ - u8 thyst; /* common hysteresis */ - - u8 sensor_type; /* temperature sensor type */ + enum chips type; }; -static int lm95234_read_temp(struct i2c_client *client, int index, int *t) +static int lm95234_read_temp(struct regmap *regmap, int index, long *t) { - int val; - u16 temp = 0; + unsigned int regs[2]; + int temp = 0, ret; + u8 regvals[2]; if (index) { - val = i2c_smbus_read_byte_data(client, - LM95234_REG_UTEMPH(index - 1)); - if (val < 0) - return val; - temp = val << 8; - val = i2c_smbus_read_byte_data(client, - LM95234_REG_UTEMPL(index - 1)); - if (val < 0) - return val; - temp |= val; - *t = temp; + regs[0] = LM95234_REG_UTEMPH(index - 1); + regs[1] = LM95234_REG_UTEMPL(index - 1); + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); + if (ret) + return ret; + temp = (regvals[0] << 8) | regvals[1]; } /* * Read signed temperature if unsigned temperature is 0, * or if this is the local sensor. */ if (!temp) { - val = i2c_smbus_read_byte_data(client, - LM95234_REG_TEMPH(index)); - if (val < 0) - return val; - temp = val << 8; - val = i2c_smbus_read_byte_data(client, - LM95234_REG_TEMPL(index)); - if (val < 0) - return val; - temp |= val; - *t = (s16)temp; + regs[0] = LM95234_REG_TEMPH(index); + regs[1] = LM95234_REG_TEMPL(index); + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); + if (ret) + return ret; + temp = (regvals[0] << 8) | regvals[1]; + temp = sign_extend32(temp, 15); } + *t = DIV_ROUND_CLOSEST(temp * 125, 32); return 0; } -static u16 update_intervals[] = { 143, 364, 1000, 2500 }; - -/* Fill value cache. Must be called with update lock held. */ - -static int lm95234_fill_cache(struct lm95234_data *data, - struct i2c_client *client) +static int lm95234_hyst_get(struct regmap *regmap, int reg, long *val) { - int i, ret; - - ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE); - if (ret < 0) - return ret; - - data->interval = msecs_to_jiffies(update_intervals[ret & 0x03]); - - for (i = 0; i < ARRAY_SIZE(data->tcrit1); i++) { - ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT1(i)); - if (ret < 0) - return ret; - data->tcrit1[i] = ret; - } - for (i = 0; i < ARRAY_SIZE(data->tcrit2); i++) { - ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT2(i)); - if (ret < 0) - return ret; - data->tcrit2[i] = ret; - } - for (i = 0; i < ARRAY_SIZE(data->toffset); i++) { - ret = i2c_smbus_read_byte_data(client, LM95234_REG_OFFSET(i)); - if (ret < 0) - return ret; - data->toffset[i] = ret; - } - - ret = i2c_smbus_read_byte_data(client, LM95234_REG_TCRIT_HYST); - if (ret < 0) - return ret; - data->thyst = ret; + unsigned int regs[2] = {reg, LM95234_REG_TCRIT_HYST}; + u8 regvals[2]; + int ret; - ret = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL); - if (ret < 0) + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); + if (ret) return ret; - data->sensor_type = ret; - + *val = (regvals[0] - regvals[1]) * 1000; return 0; } -static int lm95234_update_device(struct lm95234_data *data) +static ssize_t lm95234_hyst_set(struct lm95234_data *data, long val) { - struct i2c_client *client = data->client; + u32 tcrit; int ret; mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + data->interval) || - !data->valid) { - int i; - - if (!data->valid) { - ret = lm95234_fill_cache(data, client); - if (ret < 0) - goto abort; - } - - data->valid = false; - for (i = 0; i < ARRAY_SIZE(data->temp); i++) { - ret = lm95234_read_temp(client, i, &data->temp[i]); - if (ret < 0) - goto abort; - } - - ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_FAULT); - if (ret < 0) - goto abort; - data->status = ret; - - ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_TCRIT1); - if (ret < 0) - goto abort; - data->status |= ret << 8; + ret = regmap_read(data->regmap, LM95234_REG_TCRIT1(0), &tcrit); + if (ret) + goto unlock; - ret = i2c_smbus_read_byte_data(client, LM95234_REG_STS_TCRIT2); - if (ret < 0) - goto abort; - data->status |= ret << 16; + val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); + val = clamp_val((int)tcrit - val, 0, 31); - data->last_updated = jiffies; - data->valid = true; - } - ret = 0; -abort: + ret = regmap_write(data->regmap, LM95234_REG_TCRIT_HYST, val); +unlock: mutex_unlock(&data->update_lock); - return ret; } -static ssize_t temp_show(struct device *dev, struct device_attribute *attr, - char *buf) +static int lm95234_crit_reg(int channel) { - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - return sprintf(buf, "%d\n", - DIV_ROUND_CLOSEST(data->temp[index] * 125, 32)); -} - -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct lm95234_data *data = dev_get_drvdata(dev); - u32 mask = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - return sprintf(buf, "%u", !!(data->status & mask)); -} - -static ssize_t type_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct lm95234_data *data = dev_get_drvdata(dev); - u8 mask = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - return sprintf(buf, data->sensor_type & mask ? "1\n" : "2\n"); + if (channel == 1 || channel == 2) + return LM95234_REG_TCRIT2(channel - 1); + return LM95234_REG_TCRIT1(channel); } -static ssize_t type_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int lm95234_temp_write(struct device *dev, u32 attr, int channel, long val) { struct lm95234_data *data = dev_get_drvdata(dev); - unsigned long val; - u8 mask = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - ret = kstrtoul(buf, 10, &val); - if (ret < 0) - return ret; - - if (val != 1 && val != 2) - return -EINVAL; - - mutex_lock(&data->update_lock); - if (val == 1) - data->sensor_type |= mask; - else - data->sensor_type &= ~mask; - data->valid = false; - i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL, - data->sensor_type); - mutex_unlock(&data->update_lock); - - return count; + struct regmap *regmap = data->regmap; + + switch (attr) { + case hwmon_temp_enable: + if (val && val != 1) + return -EINVAL; + return regmap_update_bits(regmap, LM95234_REG_ENABLE, + BIT(channel), val ? BIT(channel) : 0); + case hwmon_temp_type: + if (val != 1 && val != 2) + return -EINVAL; + return regmap_update_bits(regmap, LM95234_REG_REM_MODEL, + BIT(channel), + val == 1 ? BIT(channel) : 0); + case hwmon_temp_offset: + val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); + return regmap_write(regmap, LM95234_REG_OFFSET(channel - 1), val); + case hwmon_temp_max: + val = clamp_val(val, 0, channel == 1 ? 127000 : 255000); + val = DIV_ROUND_CLOSEST(val, 1000); + return regmap_write(regmap, lm95234_crit_reg(channel), val); + case hwmon_temp_max_hyst: + return lm95234_hyst_set(data, val); + case hwmon_temp_crit: + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); + return regmap_write(regmap, LM95234_REG_TCRIT1(channel), val); + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t tcrit2_show(struct device *dev, struct device_attribute *attr, - char *buf) +static int lm95234_alarm_reg(int channel) { - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - return sprintf(buf, "%u", data->tcrit2[index] * 1000); + if (channel == 1 || channel == 2) + return LM95234_REG_STS_TCRIT2; + return LM95234_REG_STS_TCRIT1; } -static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int lm95234_temp_read(struct device *dev, u32 attr, int channel, long *val) { struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - long val; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - ret = kstrtol(buf, 10, &val); - if (ret < 0) - return ret; - - val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000), - 1000); - - mutex_lock(&data->update_lock); - data->tcrit2[index] = val; - i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val); - mutex_unlock(&data->update_lock); + struct regmap *regmap = data->regmap; + u32 regval, mask; + int ret; - return count; + switch (attr) { + case hwmon_temp_enable: + ret = regmap_read(regmap, LM95234_REG_ENABLE, ®val); + if (ret) + return ret; + *val = !!(regval & BIT(channel)); + break; + case hwmon_temp_input: + return lm95234_read_temp(regmap, channel, val); + case hwmon_temp_max_alarm: + ret = regmap_read(regmap, lm95234_alarm_reg(channel), ®val); + if (ret) + return ret; + *val = !!(regval & BIT(channel)); + break; + case hwmon_temp_crit_alarm: + ret = regmap_read(regmap, LM95234_REG_STS_TCRIT1, ®val); + if (ret) + return ret; + *val = !!(regval & BIT(channel)); + break; + case hwmon_temp_crit_hyst: + return lm95234_hyst_get(regmap, LM95234_REG_TCRIT1(channel), val); + case hwmon_temp_type: + ret = regmap_read(regmap, LM95234_REG_REM_MODEL, ®val); + if (ret) + return ret; + *val = (regval & BIT(channel)) ? 1 : 2; + break; + case hwmon_temp_offset: + ret = regmap_read(regmap, LM95234_REG_OFFSET(channel - 1), ®val); + if (ret) + return ret; + *val = sign_extend32(regval, 7) * 500; + break; + case hwmon_temp_fault: + ret = regmap_read(regmap, LM95234_REG_STS_FAULT, ®val); + if (ret) + return ret; + mask = (BIT(0) | BIT(1)) << ((channel - 1) << 1); + *val = !!(regval & mask); + break; + case hwmon_temp_max: + ret = regmap_read(regmap, lm95234_crit_reg(channel), ®val); + if (ret) + return ret; + *val = regval * 1000; + break; + case hwmon_temp_max_hyst: + return lm95234_hyst_get(regmap, lm95234_crit_reg(channel), val); + case hwmon_temp_crit: + ret = regmap_read(regmap, LM95234_REG_TCRIT1(channel), ®val); + if (ret) + return ret; + *val = regval * 1000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t tcrit2_hyst_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - /* Result can be negative, so be careful with unsigned operands */ - return sprintf(buf, "%d", - ((int)data->tcrit2[index] - (int)data->thyst) * 1000); -} +static u16 update_intervals[] = { 143, 364, 1000, 2500 }; -static ssize_t tcrit1_show(struct device *dev, struct device_attribute *attr, - char *buf) +static int lm95234_chip_write(struct device *dev, u32 attr, long val) { struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - return sprintf(buf, "%u", data->tcrit1[index] * 1000); + switch (attr) { + case hwmon_chip_update_interval: + val = find_closest(val, update_intervals, ARRAY_SIZE(update_intervals)); + return regmap_write(data->regmap, LM95234_REG_CONVRATE, val); + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int lm95234_chip_read(struct device *dev, u32 attr, long *val) { struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - long val; - - if (ret) - return ret; - - ret = kstrtol(buf, 10, &val); - if (ret < 0) - return ret; - - val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); + u32 convrate; + int ret; - mutex_lock(&data->update_lock); - data->tcrit1[index] = val; - i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val); - mutex_unlock(&data->update_lock); + switch (attr) { + case hwmon_chip_update_interval: + ret = regmap_read(data->regmap, LM95234_REG_CONVRATE, &convrate); + if (ret) + return ret; - return count; + *val = update_intervals[convrate & 0x03]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t tcrit1_hyst_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int lm95234_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) { - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - /* Result can be negative, so be careful with unsigned operands */ - return sprintf(buf, "%d", - ((int)data->tcrit1[index] - (int)data->thyst) * 1000); + switch (type) { + case hwmon_chip: + return lm95234_chip_write(dev, attr, val); + case hwmon_temp: + return lm95234_temp_write(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t tcrit1_hyst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int lm95234_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) { - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - long val; - - if (ret) - return ret; - - ret = kstrtol(buf, 10, &val); - if (ret < 0) - return ret; - - val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); - val = clamp_val((int)data->tcrit1[index] - val, 0, 31); - - mutex_lock(&data->update_lock); - data->thyst = val; - i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val); - mutex_unlock(&data->update_lock); - - return count; + switch (type) { + case hwmon_chip: + return lm95234_chip_read(dev, attr, val); + case hwmon_temp: + return lm95234_temp_read(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t offset_show(struct device *dev, struct device_attribute *attr, - char *buf) +static umode_t lm95234_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) { - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); + const struct lm95234_data *data = _data; - if (ret) - return ret; + if (data->type == lm95233 && channel > 2) + return 0; - return sprintf(buf, "%d", data->toffset[index] * 500); + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + return 0644; + default: + break; + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_crit_alarm: + case hwmon_temp_crit_hyst: + return (channel && channel < 3) ? 0444 : 0; + case hwmon_temp_type: + case hwmon_temp_offset: + return channel ? 0644 : 0; + case hwmon_temp_fault: + return channel ? 0444 : 0; + case hwmon_temp_max: + case hwmon_temp_enable: + return 0644; + case hwmon_temp_max_hyst: + return channel ? 0444 : 0644; + case hwmon_temp_crit: + return (channel && channel < 3) ? 0644 : 0; + default: + break; + } + break; + default: + break; + } + return 0; } -static ssize_t offset_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct lm95234_data *data = dev_get_drvdata(dev); - int index = to_sensor_dev_attr(attr)->index; - int ret = lm95234_update_device(data); - long val; - - if (ret) - return ret; - - ret = kstrtol(buf, 10, &val); - if (ret < 0) - return ret; - - /* Accuracy is 1/2 degrees C */ - val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); +static const struct hwmon_channel_info * const lm95234_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_MAX_ALARM | HWMON_T_ENABLE, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_CRIT_ALARM | HWMON_T_OFFSET | HWMON_T_ENABLE, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_CRIT_ALARM | HWMON_T_OFFSET | HWMON_T_ENABLE, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE | + HWMON_T_OFFSET | HWMON_T_ENABLE, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | + HWMON_T_MAX_ALARM | HWMON_T_FAULT | HWMON_T_TYPE | + HWMON_T_OFFSET | HWMON_T_ENABLE), + NULL +}; - mutex_lock(&data->update_lock); - data->toffset[index] = val; - i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val); - mutex_unlock(&data->update_lock); +static const struct hwmon_ops lm95234_hwmon_ops = { + .is_visible = lm95234_is_visible, + .read = lm95234_read, + .write = lm95234_write, +}; - return count; -} +static const struct hwmon_chip_info lm95234_chip_info = { + .ops = &lm95234_hwmon_ops, + .info = lm95234_info, +}; -static ssize_t update_interval_show(struct device *dev, - struct device_attribute *attr, char *buf) +static bool lm95234_volatile_reg(struct device *dev, unsigned int reg) { - struct lm95234_data *data = dev_get_drvdata(dev); - int ret = lm95234_update_device(data); - - if (ret) - return ret; - - return sprintf(buf, "%lu\n", - DIV_ROUND_CLOSEST(data->interval * 1000, HZ)); + switch (reg) { + case LM95234_REG_TEMPH(0) ... LM95234_REG_TEMPH(4): + case LM95234_REG_TEMPL(0) ... LM95234_REG_TEMPL(4): + case LM95234_REG_UTEMPH(0) ... LM95234_REG_UTEMPH(3): + case LM95234_REG_UTEMPL(0) ... LM95234_REG_UTEMPL(3): + case LM95234_REG_STS_FAULT: + case LM95234_REG_STS_TCRIT1: + case LM95234_REG_STS_TCRIT2: + case LM95234_REG_REM_MODEL_STS: + return true; + default: + return false; + } } -static ssize_t update_interval_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static bool lm95234_writeable_reg(struct device *dev, unsigned int reg) { - struct lm95234_data *data = dev_get_drvdata(dev); - int ret = lm95234_update_device(data); - unsigned long val; - u8 regval; - - if (ret) - return ret; - - ret = kstrtoul(buf, 10, &val); - if (ret < 0) - return ret; - - for (regval = 0; regval < 3; regval++) { - if (val <= update_intervals[regval]) - break; + switch (reg) { + case LM95234_REG_CONFIG ... LM95234_REG_FILTER: + case LM95234_REG_REM_MODEL ... LM95234_REG_OFFSET(3): + case LM95234_REG_TCRIT1(0) ... LM95234_REG_TCRIT1(4): + case LM95234_REG_TCRIT2(0) ... LM95234_REG_TCRIT2(1): + case LM95234_REG_TCRIT_HYST: + return true; + default: + return false; } - - mutex_lock(&data->update_lock); - data->interval = msecs_to_jiffies(update_intervals[regval]); - i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval); - mutex_unlock(&data->update_lock); - - return count; } -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1); -static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2); -static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3); -static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4); - -static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, BIT(0) | BIT(1)); -static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, BIT(2) | BIT(3)); -static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, BIT(4) | BIT(5)); -static SENSOR_DEVICE_ATTR_RO(temp5_fault, alarm, BIT(6) | BIT(7)); - -static SENSOR_DEVICE_ATTR_RW(temp2_type, type, BIT(1)); -static SENSOR_DEVICE_ATTR_RW(temp3_type, type, BIT(2)); -static SENSOR_DEVICE_ATTR_RW(temp4_type, type, BIT(3)); -static SENSOR_DEVICE_ATTR_RW(temp5_type, type, BIT(4)); - -static SENSOR_DEVICE_ATTR_RW(temp1_max, tcrit1, 0); -static SENSOR_DEVICE_ATTR_RW(temp2_max, tcrit2, 0); -static SENSOR_DEVICE_ATTR_RW(temp3_max, tcrit2, 1); -static SENSOR_DEVICE_ATTR_RW(temp4_max, tcrit1, 3); -static SENSOR_DEVICE_ATTR_RW(temp5_max, tcrit1, 4); - -static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, tcrit1_hyst, 0); -static SENSOR_DEVICE_ATTR_RO(temp2_max_hyst, tcrit2_hyst, 0); -static SENSOR_DEVICE_ATTR_RO(temp3_max_hyst, tcrit2_hyst, 1); -static SENSOR_DEVICE_ATTR_RO(temp4_max_hyst, tcrit1_hyst, 3); -static SENSOR_DEVICE_ATTR_RO(temp5_max_hyst, tcrit1_hyst, 4); - -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, BIT(0 + 8)); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, BIT(1 + 16)); -static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, BIT(2 + 16)); -static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, BIT(3 + 8)); -static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, alarm, BIT(4 + 8)); - -static SENSOR_DEVICE_ATTR_RW(temp2_crit, tcrit1, 1); -static SENSOR_DEVICE_ATTR_RW(temp3_crit, tcrit1, 2); - -static SENSOR_DEVICE_ATTR_RO(temp2_crit_hyst, tcrit1_hyst, 1); -static SENSOR_DEVICE_ATTR_RO(temp3_crit_hyst, tcrit1_hyst, 2); - -static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, BIT(1 + 8)); -static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, BIT(2 + 8)); - -static SENSOR_DEVICE_ATTR_RW(temp2_offset, offset, 0); -static SENSOR_DEVICE_ATTR_RW(temp3_offset, offset, 1); -static SENSOR_DEVICE_ATTR_RW(temp4_offset, offset, 2); -static SENSOR_DEVICE_ATTR_RW(temp5_offset, offset, 3); - -static DEVICE_ATTR_RW(update_interval); - -static struct attribute *lm95234_common_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp3_input.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp3_fault.dev_attr.attr, - &sensor_dev_attr_temp2_type.dev_attr.attr, - &sensor_dev_attr_temp3_type.dev_attr.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp3_max.dev_attr.attr, - &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, - &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, - &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp3_crit.dev_attr.attr, - &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_offset.dev_attr.attr, - &sensor_dev_attr_temp3_offset.dev_attr.attr, - &dev_attr_update_interval.attr, - NULL -}; - -static const struct attribute_group lm95234_common_group = { - .attrs = lm95234_common_attrs, -}; - -static struct attribute *lm95234_attrs[] = { - &sensor_dev_attr_temp4_input.dev_attr.attr, - &sensor_dev_attr_temp5_input.dev_attr.attr, - &sensor_dev_attr_temp4_fault.dev_attr.attr, - &sensor_dev_attr_temp5_fault.dev_attr.attr, - &sensor_dev_attr_temp4_type.dev_attr.attr, - &sensor_dev_attr_temp5_type.dev_attr.attr, - &sensor_dev_attr_temp4_max.dev_attr.attr, - &sensor_dev_attr_temp5_max.dev_attr.attr, - &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, - &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, - &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp4_offset.dev_attr.attr, - &sensor_dev_attr_temp5_offset.dev_attr.attr, - NULL -}; - -static const struct attribute_group lm95234_group = { - .attrs = lm95234_attrs, +static const struct regmap_config lm95234_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = lm95234_writeable_reg, + .volatile_reg = lm95234_volatile_reg, + .cache_type = REGCACHE_MAPLE, }; static int lm95234_detect(struct i2c_client *client, @@ -649,61 +481,60 @@ static int lm95234_detect(struct i2c_client *client, return 0; } -static int lm95234_init_client(struct i2c_client *client) +static int lm95234_init_client(struct device *dev, struct regmap *regmap) { - int val, model; + u32 val, model; + int ret; /* start conversion if necessary */ - val = i2c_smbus_read_byte_data(client, LM95234_REG_CONFIG); - if (val < 0) - return val; - if (val & 0x40) - i2c_smbus_write_byte_data(client, LM95234_REG_CONFIG, - val & ~0x40); + ret = regmap_clear_bits(regmap, LM95234_REG_CONFIG, 0x40); + if (ret) + return ret; /* If diode type status reports an error, try to fix it */ - val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL_STS); - if (val < 0) - return val; - model = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL); - if (model < 0) - return model; + ret = regmap_read(regmap, LM95234_REG_REM_MODEL_STS, &val); + if (ret < 0) + return ret; + ret = regmap_read(regmap, LM95234_REG_REM_MODEL, &model); + if (ret < 0) + return ret; if (model & val) { - dev_notice(&client->dev, + dev_notice(dev, "Fixing remote diode type misconfiguration (0x%x)\n", val); - i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL, - model & ~val); + ret = regmap_write(regmap, LM95234_REG_REM_MODEL, model & ~val); } - return 0; + return ret; } static int lm95234_probe(struct i2c_client *client) { - enum chips type = (uintptr_t)i2c_get_match_data(client); struct device *dev = &client->dev; struct lm95234_data *data; struct device *hwmon_dev; + struct regmap *regmap; int err; data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL); if (!data) return -ENOMEM; - data->client = client; + data->type = (uintptr_t)i2c_get_match_data(client); + + regmap = devm_regmap_init_i2c(client, &lm95234_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + data->regmap = regmap; mutex_init(&data->update_lock); /* Initialize the LM95234 chip */ - err = lm95234_init_client(client); + err = lm95234_init_client(dev, regmap); if (err < 0) return err; - data->groups[0] = &lm95234_common_group; - if (type == lm95234) - data->groups[1] = &lm95234_group; - - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, data->groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, &lm95234_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c index d293b4f15dc126..3bdc305308479b 100644 --- a/drivers/hwmon/lm95245.c +++ b/drivers/hwmon/lm95245.c @@ -161,18 +161,18 @@ static int lm95245_read_temp(struct device *dev, u32 attr, int channel, { struct lm95245_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; - int ret, regl, regh, regvall, regvalh; + unsigned int regs[2]; + unsigned int regval; + u8 regvals[2]; + int ret; switch (attr) { case hwmon_temp_input: - regl = channel ? LM95245_REG_R_REMOTE_TEMPL_S : - LM95245_REG_R_LOCAL_TEMPL_S; - regh = channel ? LM95245_REG_R_REMOTE_TEMPH_S : - LM95245_REG_R_LOCAL_TEMPH_S; - ret = regmap_read(regmap, regl, ®vall); - if (ret < 0) - return ret; - ret = regmap_read(regmap, regh, ®valh); + regs[0] = channel ? LM95245_REG_R_REMOTE_TEMPL_S : + LM95245_REG_R_LOCAL_TEMPL_S; + regs[1] = channel ? LM95245_REG_R_REMOTE_TEMPH_S : + LM95245_REG_R_LOCAL_TEMPH_S; + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); if (ret < 0) return ret; /* @@ -181,92 +181,77 @@ static int lm95245_read_temp(struct device *dev, u32 attr, int channel, * Use signed calculation for remote if signed bit is set * or if reported temperature is below signed limit. */ - if (!channel || (regvalh & 0x80) || regvalh < 0x7f) { - *val = temp_from_reg_signed(regvalh, regvall); + if (!channel || (regvals[1] & 0x80) || regvals[1] < 0x7f) { + *val = temp_from_reg_signed(regvals[1], regvals[0]); return 0; } - ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPL_U, - ®vall); - if (ret < 0) - return ret; - ret = regmap_read(regmap, LM95245_REG_R_REMOTE_TEMPH_U, - ®valh); - if (ret < 0) + ret = regmap_bulk_read(regmap, LM95245_REG_R_REMOTE_TEMPH_U, regvals, 2); + if (ret) return ret; - *val = temp_from_reg_unsigned(regvalh, regvall); + *val = temp_from_reg_unsigned(regvals[0], regvals[1]); return 0; case hwmon_temp_max: ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT, - ®valh); + ®val); if (ret < 0) return ret; - *val = regvalh * 1000; + *val = regval * 1000; return 0; case hwmon_temp_crit: - regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT : - LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT; - ret = regmap_read(regmap, regh, ®valh); + regs[0] = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT : + LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT; + ret = regmap_read(regmap, regs[0], ®val); if (ret < 0) return ret; - *val = regvalh * 1000; + *val = regval * 1000; return 0; case hwmon_temp_max_hyst: - ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OS_LIMIT, - ®valh); + regs[0] = LM95245_REG_RW_REMOTE_OS_LIMIT; + regs[1] = LM95245_REG_RW_COMMON_HYSTERESIS; + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); if (ret < 0) return ret; - ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS, - ®vall); - if (ret < 0) - return ret; - *val = (regvalh - regvall) * 1000; + *val = (regvals[0] - regvals[1]) * 1000; return 0; case hwmon_temp_crit_hyst: - regh = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT : - LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT; - ret = regmap_read(regmap, regh, ®valh); - if (ret < 0) - return ret; - ret = regmap_read(regmap, LM95245_REG_RW_COMMON_HYSTERESIS, - ®vall); + regs[0] = channel ? LM95245_REG_RW_REMOTE_TCRIT_LIMIT : + LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT; + regs[1] = LM95245_REG_RW_COMMON_HYSTERESIS; + + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); if (ret < 0) return ret; - *val = (regvalh - regvall) * 1000; + *val = (regvals[0] - regvals[1]) * 1000; return 0; case hwmon_temp_type: - ret = regmap_read(regmap, LM95245_REG_RW_CONFIG2, ®valh); + ret = regmap_read(regmap, LM95245_REG_RW_CONFIG2, ®val); if (ret < 0) return ret; - *val = (regvalh & CFG2_REMOTE_TT) ? 1 : 2; + *val = (regval & CFG2_REMOTE_TT) ? 1 : 2; return 0; case hwmon_temp_offset: - ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFL, - ®vall); - if (ret < 0) - return ret; - ret = regmap_read(regmap, LM95245_REG_RW_REMOTE_OFFH, - ®valh); + ret = regmap_bulk_read(regmap, LM95245_REG_RW_REMOTE_OFFH, regvals, 2); if (ret < 0) return ret; - *val = temp_from_reg_signed(regvalh, regvall); + *val = temp_from_reg_signed(regvals[0], regvals[1]); return 0; case hwmon_temp_max_alarm: - ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®valh); + ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®val); if (ret < 0) return ret; - *val = !!(regvalh & STATUS1_ROS); + *val = !!(regval & STATUS1_ROS); return 0; case hwmon_temp_crit_alarm: - ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®valh); + ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®val); if (ret < 0) return ret; - *val = !!(regvalh & (channel ? STATUS1_RTCRIT : STATUS1_LOC)); + *val = !!(regval & (channel ? STATUS1_RTCRIT : STATUS1_LOC)); return 0; case hwmon_temp_fault: - ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®valh); + ret = regmap_read(regmap, LM95245_REG_R_STATUS1, ®val); if (ret < 0) return ret; - *val = !!(regvalh & STATUS1_DIODE_FAULT); + *val = !!(regval & STATUS1_DIODE_FAULT); return 0; default: return -EOPNOTSUPP; @@ -279,6 +264,7 @@ static int lm95245_write_temp(struct device *dev, u32 attr, int channel, struct lm95245_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; unsigned int regval; + u8 regvals[2]; int ret, reg; switch (attr) { @@ -311,16 +297,10 @@ static int lm95245_write_temp(struct device *dev, u32 attr, int channel, case hwmon_temp_offset: val = clamp_val(val, -128000, 127875); val = val * 256 / 1000; - mutex_lock(&data->update_lock); - ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFL, - val & 0xe0); - if (ret < 0) { - mutex_unlock(&data->update_lock); - return ret; - } - ret = regmap_write(regmap, LM95245_REG_RW_REMOTE_OFFH, - (val >> 8) & 0xff); - mutex_unlock(&data->update_lock); + regvals[0] = val >> 8; + regvals[1] = val & 0xe0; + + ret = regmap_bulk_write(regmap, LM95245_REG_RW_REMOTE_OFFH, regvals, 2); return ret; case hwmon_temp_type: if (val != 1 && val != 2) diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c index d2ff6e700770d2..244839167e51c8 100644 --- a/drivers/hwmon/ltc2947-core.c +++ b/drivers/hwmon/ltc2947-core.c @@ -11,7 +11,8 @@ #include #include #include -#include +#include +#include #include #include "ltc2947.h" @@ -1034,9 +1035,8 @@ static int ltc2947_setup(struct ltc2947_data *st) /* 19.89E-6 * 10E9 */ st->lsb_energy = 19890; } - ret = of_property_read_u32_array(st->dev->of_node, - "adi,accumulator-ctl-pol", accum, - ARRAY_SIZE(accum)); + ret = device_property_read_u32_array(st->dev, "adi,accumulator-ctl-pol", + accum, ARRAY_SIZE(accum)); if (!ret) { u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) | LTC2947_ACCUM_POL_2(accum[1]); @@ -1045,9 +1045,9 @@ static int ltc2947_setup(struct ltc2947_data *st) if (ret) return ret; } - ret = of_property_read_u32(st->dev->of_node, - "adi,accumulation-deadband-microamp", - &deadband); + ret = device_property_read_u32(st->dev, + "adi,accumulation-deadband-microamp", + &deadband); if (!ret) { /* the LSB is the same as the current, so 3mA */ ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND, @@ -1056,7 +1056,7 @@ static int ltc2947_setup(struct ltc2947_data *st) return ret; } /* check gpio cfg */ - ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol); + ret = device_property_read_u32(st->dev, "adi,gpio-out-pol", &pol); if (!ret) { /* setup GPIO as output */ u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) | @@ -1067,8 +1067,8 @@ static int ltc2947_setup(struct ltc2947_data *st) if (ret) return ret; } - ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum", - accum, ARRAY_SIZE(accum)); + ret = device_property_read_u32_array(st->dev, "adi,gpio-in-accum", + accum, ARRAY_SIZE(accum)); if (!ret) { /* * Setup the accum options. The gpioctl is already defined as diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c index d4a93223cd3b34..541fa09dc6e7d1 100644 --- a/drivers/hwmon/ltc2992.c +++ b/drivers/hwmon/ltc2992.c @@ -854,33 +854,24 @@ static const struct regmap_config ltc2992_regmap_config = { static int ltc2992_parse_dt(struct ltc2992_state *st) { - struct fwnode_handle *fwnode; - struct fwnode_handle *child; u32 addr; u32 val; int ret; - fwnode = dev_fwnode(&st->client->dev); - - fwnode_for_each_available_child_node(fwnode, child) { + device_for_each_child_node_scoped(&st->client->dev, child) { ret = fwnode_property_read_u32(child, "reg", &addr); - if (ret < 0) { - fwnode_handle_put(child); + if (ret < 0) return ret; - } - if (addr > 1) { - fwnode_handle_put(child); + if (addr > 1) return -EINVAL; - } ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val); if (!ret) { - if (!val) { - fwnode_handle_put(child); + if (!val) return dev_err_probe(&st->client->dev, -EINVAL, "shunt resistor value cannot be zero\n"); - } + st->r_sense_uohm[addr] = val; } } diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index 7ce9a89f93a0d9..0ccb5eb596fc40 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -79,7 +79,7 @@ static const bool max16065_have_current[] = { }; struct max16065_data { - enum chips type; + enum chips chip; struct i2c_client *client; const struct attribute_group *groups[4]; struct mutex update_lock; @@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range) return limit * range / 256; } -static inline int MV_TO_LIMIT(int mv, int range) +static inline int MV_TO_LIMIT(unsigned long mv, int range) { - return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255); + mv = clamp_val(mv, 0, ULONG_MAX / 256); + return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range); } static inline int ADC_TO_CURR(int adc, int gain) @@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev) MAX16065_CURR_SENSE); } - for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++) + for (i = 0; i < 2; i++) data->fault[i] = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)); + /* + * MAX16067 and MAX16068 have separate undervoltage and + * overvoltage alarm bits. Squash them together. + */ + if (data->chip == max16067 || data->chip == max16068) + data->fault[0] |= data->fault[1]; + data->last_updated = jiffies; data->valid = true; } @@ -513,6 +521,7 @@ static int max16065_probe(struct i2c_client *client) if (unlikely(!data)) return -ENOMEM; + data->chip = chip; data->client = client; mutex_init(&data->update_lock); diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index a89a519cf5d9bf..9b6d03cff4df5f 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -12,275 +12,356 @@ * http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf */ -#include -#include -#include -#include -#include -#include -#include #include -#include -#include +#include +#include +#include +#include +#include +#include static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * The MAX1619 registers - */ +#define MAX1619_REG_LOCAL_TEMP 0x00 +#define MAX1619_REG_REMOTE_TEMP 0x01 +#define MAX1619_REG_STATUS 0x02 +#define MAX1619_REG_CONFIG 0x03 +#define MAX1619_REG_CONVRATE 0x04 +#define MAX1619_REG_REMOTE_HIGH 0x07 +#define MAX1619_REG_REMOTE_LOW 0x08 +#define MAX1619_REG_REMOTE_CRIT 0x10 +#define MAX1619_REG_REMOTE_CRIT_HYST 0x11 +#define MAX1619_REG_MAN_ID 0xFE +#define MAX1619_REG_CHIP_ID 0xFF + +static int get_alarms(struct regmap *regmap) +{ + static u32 regs[2] = { MAX1619_REG_STATUS, MAX1619_REG_CONFIG }; + u8 regdata[2]; + int ret; -#define MAX1619_REG_R_MAN_ID 0xFE -#define MAX1619_REG_R_CHIP_ID 0xFF -#define MAX1619_REG_R_CONFIG 0x03 -#define MAX1619_REG_W_CONFIG 0x09 -#define MAX1619_REG_R_CONVRATE 0x04 -#define MAX1619_REG_W_CONVRATE 0x0A -#define MAX1619_REG_R_STATUS 0x02 -#define MAX1619_REG_R_LOCAL_TEMP 0x00 -#define MAX1619_REG_R_REMOTE_TEMP 0x01 -#define MAX1619_REG_R_REMOTE_HIGH 0x07 -#define MAX1619_REG_W_REMOTE_HIGH 0x0D -#define MAX1619_REG_R_REMOTE_LOW 0x08 -#define MAX1619_REG_W_REMOTE_LOW 0x0E -#define MAX1619_REG_R_REMOTE_CRIT 0x10 -#define MAX1619_REG_W_REMOTE_CRIT 0x12 -#define MAX1619_REG_R_TCRIT_HYST 0x11 -#define MAX1619_REG_W_TCRIT_HYST 0x13 + ret = regmap_multi_reg_read(regmap, regs, regdata, 2); + if (ret) + return ret; -/* - * Conversions - */ + /* OVERT status bit may be reversed */ + if (!(regdata[1] & 0x20)) + regdata[0] ^= 0x02; -static int temp_from_reg(int val) -{ - return (val & 0x80 ? val-0x100 : val) * 1000; + return regdata[0] & 0x1e; } -static int temp_to_reg(int val) +static int max1619_temp_read(struct regmap *regmap, u32 attr, int channel, long *val) { - return (val < 0 ? val+0x100*1000 : val) / 1000; + int reg = -1, alarm_bit = 0; + u32 temp; + int ret; + + switch (attr) { + case hwmon_temp_input: + reg = channel ? MAX1619_REG_REMOTE_TEMP : MAX1619_REG_LOCAL_TEMP; + break; + case hwmon_temp_min: + reg = MAX1619_REG_REMOTE_LOW; + break; + case hwmon_temp_max: + reg = MAX1619_REG_REMOTE_HIGH; + break; + case hwmon_temp_crit: + reg = MAX1619_REG_REMOTE_CRIT; + break; + case hwmon_temp_crit_hyst: + reg = MAX1619_REG_REMOTE_CRIT_HYST; + break; + case hwmon_temp_min_alarm: + alarm_bit = 3; + break; + case hwmon_temp_max_alarm: + alarm_bit = 4; + break; + case hwmon_temp_crit_alarm: + alarm_bit = 1; + break; + case hwmon_temp_fault: + alarm_bit = 2; + break; + default: + return -EOPNOTSUPP; + } + if (reg >= 0) { + ret = regmap_read(regmap, reg, &temp); + if (ret < 0) + return ret; + *val = sign_extend32(temp, 7) * 1000; + } else { + ret = get_alarms(regmap); + if (ret < 0) + return ret; + *val = !!(ret & BIT(alarm_bit)); + } + return 0; } -enum temp_index { - t_input1 = 0, - t_input2, - t_low2, - t_high2, - t_crit2, - t_hyst2, - t_num_regs -}; - -/* - * Client data (each client gets its own) - */ - -struct max1619_data { - struct i2c_client *client; - struct mutex update_lock; - bool valid; /* false until following fields are valid */ - unsigned long last_updated; /* in jiffies */ - - /* registers values */ - u8 temp[t_num_regs]; /* index with enum temp_index */ - u8 alarms; -}; +static u16 update_intervals[] = { 16000, 8000, 4000, 2000, 1000, 500, 250, 125 }; -static const u8 regs_read[t_num_regs] = { - [t_input1] = MAX1619_REG_R_LOCAL_TEMP, - [t_input2] = MAX1619_REG_R_REMOTE_TEMP, - [t_low2] = MAX1619_REG_R_REMOTE_LOW, - [t_high2] = MAX1619_REG_R_REMOTE_HIGH, - [t_crit2] = MAX1619_REG_R_REMOTE_CRIT, - [t_hyst2] = MAX1619_REG_R_TCRIT_HYST, -}; - -static const u8 regs_write[t_num_regs] = { - [t_low2] = MAX1619_REG_W_REMOTE_LOW, - [t_high2] = MAX1619_REG_W_REMOTE_HIGH, - [t_crit2] = MAX1619_REG_W_REMOTE_CRIT, - [t_hyst2] = MAX1619_REG_W_TCRIT_HYST, -}; - -static struct max1619_data *max1619_update_device(struct device *dev) +static int max1619_chip_read(struct regmap *regmap, u32 attr, long *val) { - struct max1619_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int config, i; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { - dev_dbg(&client->dev, "Updating max1619 data.\n"); - for (i = 0; i < t_num_regs; i++) - data->temp[i] = i2c_smbus_read_byte_data(client, - regs_read[i]); - data->alarms = i2c_smbus_read_byte_data(client, - MAX1619_REG_R_STATUS); - /* If OVERT polarity is low, reverse alarm bit */ - config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG); - if (!(config & 0x20)) - data->alarms ^= 0x02; - - data->last_updated = jiffies; - data->valid = true; + int alarms, ret; + u32 regval; + + switch (attr) { + case hwmon_chip_update_interval: + ret = regmap_read(regmap, MAX1619_REG_CONVRATE, ®val); + if (ret < 0) + return ret; + *val = update_intervals[regval & 7]; + break; + case hwmon_chip_alarms: + alarms = get_alarms(regmap); + if (alarms < 0) + return alarms; + *val = alarms; + break; + default: + return -EOPNOTSUPP; } - - mutex_unlock(&data->update_lock); - - return data; + return 0; } -/* - * Sysfs stuff - */ - -static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, - char *buf) +static int max1619_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max1619_data *data = max1619_update_device(dev); + struct regmap *regmap = dev_get_drvdata(dev); + + switch (type) { + case hwmon_chip: + return max1619_chip_read(regmap, attr, val); + case hwmon_temp: + return max1619_temp_read(regmap, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} - return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index])); +static int max1619_chip_write(struct regmap *regmap, u32 attr, long val) +{ + switch (attr) { + case hwmon_chip_update_interval: + val = find_closest_descending(val, update_intervals, ARRAY_SIZE(update_intervals)); + return regmap_write(regmap, MAX1619_REG_CONVRATE, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t temp_store(struct device *dev, - struct device_attribute *devattr, const char *buf, - size_t count) +static int max1619_temp_write(struct regmap *regmap, + u32 attr, int channel, long val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max1619_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long val; - int err = kstrtol(buf, 10, &val); - if (err) - return err; - - mutex_lock(&data->update_lock); - data->temp[attr->index] = temp_to_reg(val); - i2c_smbus_write_byte_data(client, regs_write[attr->index], - data->temp[attr->index]); - mutex_unlock(&data->update_lock); - return count; + int reg; + + switch (attr) { + case hwmon_temp_min: + reg = MAX1619_REG_REMOTE_LOW; + break; + case hwmon_temp_max: + reg = MAX1619_REG_REMOTE_HIGH; + break; + case hwmon_temp_crit: + reg = MAX1619_REG_REMOTE_CRIT; + break; + case hwmon_temp_crit_hyst: + reg = MAX1619_REG_REMOTE_CRIT_HYST; + break; + default: + return -EOPNOTSUPP; + } + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); + return regmap_write(regmap, reg, val); } -static ssize_t alarms_show(struct device *dev, struct device_attribute *attr, - char *buf) +static int max1619_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) { - struct max1619_data *data = max1619_update_device(dev); - return sprintf(buf, "%d\n", data->alarms); + struct regmap *regmap = dev_get_drvdata(dev); + + switch (type) { + case hwmon_chip: + return max1619_chip_write(regmap, attr, val); + case hwmon_temp: + return max1619_temp_write(regmap, attr, channel, val); + default: + return -EOPNOTSUPP; + } } -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) +static umode_t max1619_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) { - int bitnr = to_sensor_dev_attr(attr)->index; - struct max1619_data *data = max1619_update_device(dev); - return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); + switch (type) { + case hwmon_chip: + switch (attr) { + case hwmon_chip_update_interval: + return 0644; + case hwmon_chip_alarms: + return 0444; + default: + break; + } + break; + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + return 0644; + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_fault: + return 0444; + default: + break; + } + break; + default: + break; + } + return 0; } -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input1); -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, t_input2); -static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, t_low2); -static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, t_high2); -static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, t_crit2); -static SENSOR_DEVICE_ATTR_RW(temp2_crit_hyst, temp, t_hyst2); - -static DEVICE_ATTR_RO(alarms); -static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 1); -static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2); -static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4); - -static struct attribute *max1619_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_min.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, - - &dev_attr_alarms.attr, - &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, +static const struct hwmon_channel_info * const max1619_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_ALARMS | HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_CRIT_ALARM | HWMON_T_FAULT), NULL }; -ATTRIBUTE_GROUPS(max1619); + +static const struct hwmon_ops max1619_hwmon_ops = { + .is_visible = max1619_is_visible, + .read = max1619_read, + .write = max1619_write, +}; + +static const struct hwmon_chip_info max1619_chip_info = { + .ops = &max1619_hwmon_ops, + .info = max1619_info, +}; /* Return 0 if detection is successful, -ENODEV otherwise */ static int max1619_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; - u8 reg_config, reg_convrate, reg_status, man_id, chip_id; + int regval; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; - /* detection */ - reg_config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG); - reg_convrate = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONVRATE); - reg_status = i2c_smbus_read_byte_data(client, MAX1619_REG_R_STATUS); - if ((reg_config & 0x03) != 0x00 - || reg_convrate > 0x07 || (reg_status & 0x61) != 0x00) { - dev_dbg(&adapter->dev, "MAX1619 detection failed at 0x%02x\n", - client->addr); + regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CONFIG); + if (regval < 0 || (regval & 0x03)) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CONVRATE); + if (regval < 0 || regval > 0x07) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX1619_REG_STATUS); + if (regval < 0 || (regval & 0x61)) return -ENODEV; - } - /* identification */ - man_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_MAN_ID); - chip_id = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CHIP_ID); - if (man_id != 0x4D || chip_id != 0x04) { - dev_info(&adapter->dev, - "Unsupported chip (man_id=0x%02X, chip_id=0x%02X).\n", - man_id, chip_id); + regval = i2c_smbus_read_byte_data(client, MAX1619_REG_MAN_ID); + if (regval != 0x4d) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX1619_REG_CHIP_ID); + if (regval != 0x04) return -ENODEV; - } strscpy(info->type, "max1619", I2C_NAME_SIZE); return 0; } -static void max1619_init_client(struct i2c_client *client) +static int max1619_init_chip(struct regmap *regmap) { - u8 config; - - /* - * Start the conversions. - */ - i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE, - 5); /* 2 Hz */ - config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG); - if (config & 0x40) - i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG, - config & 0xBF); /* run */ + int ret; + + ret = regmap_write(regmap, MAX1619_REG_CONVRATE, 5); /* 2 Hz */ + if (ret) + return ret; + + /* Start conversions */ + return regmap_clear_bits(regmap, MAX1619_REG_CONFIG, 0x40); } -static int max1619_probe(struct i2c_client *new_client) +/* regmap */ + +static int max1619_reg_read(void *context, unsigned int reg, unsigned int *val) { - struct max1619_data *data; - struct device *hwmon_dev; + int ret; - data = devm_kzalloc(&new_client->dev, sizeof(struct max1619_data), - GFP_KERNEL); - if (!data) - return -ENOMEM; + ret = i2c_smbus_read_byte_data(context, reg); + if (ret < 0) + return ret; + + *val = ret; + return 0; +} + +static int max1619_reg_write(void *context, unsigned int reg, unsigned int val) +{ + int offset = reg < MAX1619_REG_REMOTE_CRIT ? 6 : 2; + + return i2c_smbus_write_byte_data(context, reg + offset, val); +} + +static bool max1619_regmap_is_volatile(struct device *dev, unsigned int reg) +{ + return reg <= MAX1619_REG_STATUS; +} + +static bool max1619_regmap_is_writeable(struct device *dev, unsigned int reg) +{ + return reg > MAX1619_REG_STATUS && reg <= MAX1619_REG_REMOTE_CRIT_HYST; +} + +static const struct regmap_config max1619_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MAX1619_REG_REMOTE_CRIT_HYST, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = max1619_regmap_is_volatile, + .writeable_reg = max1619_regmap_is_writeable, +}; + +static const struct regmap_bus max1619_regmap_bus = { + .reg_write = max1619_reg_write, + .reg_read = max1619_reg_read, +}; + +static int max1619_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct regmap *regmap; + int ret; - data->client = new_client; - mutex_init(&data->update_lock); + regmap = devm_regmap_init(dev, &max1619_regmap_bus, client, + &max1619_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); - /* Initialize the MAX1619 chip */ - max1619_init_client(new_client); + ret = max1619_init_chip(regmap); + if (ret) + return ret; - hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev, - new_client->name, - data, - max1619_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + regmap, &max1619_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c index 9fc583ebb11b27..a8197a86f55908 100644 --- a/drivers/hwmon/max1668.c +++ b/drivers/hwmon/max1668.c @@ -6,15 +6,15 @@ * some credit to Christoph Scheurer, but largely a rewrite */ -#include +#include +#include +#include +#include +#include #include +#include +#include #include -#include -#include -#include -#include -#include -#include /* Addresses to scan */ static const unsigned short max1668_addr_list[] = { @@ -30,14 +30,10 @@ static const unsigned short max1668_addr_list[] = { /* limits */ -/* write high limits */ -#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr)) -/* write low limits */ -#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr)) -/* read high limits */ -#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr)) +/* high limits */ +#define MAX1668_REG_LIMH(nr) (0x08 + 2 * (nr)) /* read low limits */ -#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr)) +#define MAX1668_REG_LIML(nr) (0x09 + 2 * (nr)) /* manufacturer and device ID Constants */ #define MAN_ID_MAXIM 0x4d @@ -50,309 +46,146 @@ static bool read_only; module_param(read_only, bool, 0); MODULE_PARM_DESC(read_only, "Don't set any values, read only mode"); -enum chips { max1668, max1805, max1989 }; - struct max1668_data { - struct i2c_client *client; - const struct attribute_group *groups[3]; - enum chips type; - - struct mutex update_lock; - bool valid; /* true if following fields are valid */ - unsigned long last_updated; /* In jiffies */ - - /* 1x local and 4x remote */ - s8 temp_max[5]; - s8 temp_min[5]; - s8 temp[5]; - u16 alarms; + struct regmap *regmap; + int channels; }; -static struct max1668_data *max1668_update_device(struct device *dev) +static int max1668_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) { struct max1668_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - struct max1668_data *ret = data; - s32 val; - int i; - - mutex_lock(&data->update_lock); - - if (data->valid && !time_after(jiffies, - data->last_updated + HZ + HZ / 2)) - goto abort; - - for (i = 0; i < 5; i++) { - val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i)); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i] = (s8) val; - - val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i)); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp_max[i] = (s8) val; - - val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i)); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp_min[i] = (s8) val; - } - - val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->alarms = val << 8; + struct regmap *regmap = data->regmap; + u32 regs[2] = { MAX1668_REG_STAT1, MAX1668_REG_TEMP(channel) }; + u8 regvals[2]; + u32 regval; + int ret; - val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; + switch (attr) { + case hwmon_temp_input: + ret = regmap_read(regmap, MAX1668_REG_TEMP(channel), ®val); + if (ret) + return ret; + *val = sign_extend32(regval, 7) * 1000; + break; + case hwmon_temp_min: + ret = regmap_read(regmap, MAX1668_REG_LIML(channel), ®val); + if (ret) + return ret; + *val = sign_extend32(regval, 7) * 1000; + break; + case hwmon_temp_max: + ret = regmap_read(regmap, MAX1668_REG_LIMH(channel), ®val); + if (ret) + return ret; + *val = sign_extend32(regval, 7) * 1000; + break; + case hwmon_temp_min_alarm: + ret = regmap_read(regmap, + channel ? MAX1668_REG_STAT2 : MAX1668_REG_STAT1, + ®val); + if (ret) + return ret; + if (channel) + *val = !!(regval & BIT(9 - channel * 2)); + else + *val = !!(regval & BIT(5)); + break; + case hwmon_temp_max_alarm: + ret = regmap_read(regmap, + channel ? MAX1668_REG_STAT2 : MAX1668_REG_STAT1, + ®val); + if (ret) + return ret; + if (channel) + *val = !!(regval & BIT(8 - channel * 2)); + else + *val = !!(regval & BIT(6)); + break; + case hwmon_temp_fault: + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); + if (ret) + return ret; + *val = !!((regvals[0] & BIT(4)) && regvals[1] == 127); + break; + default: + return -EOPNOTSUPP; } - data->alarms |= val; - - data->last_updated = jiffies; - data->valid = true; -abort: - mutex_unlock(&data->update_lock); - - return ret; -} - -static ssize_t show_temp(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct max1668_data *data = max1668_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - return sprintf(buf, "%d\n", data->temp[index] * 1000); -} - -static ssize_t show_temp_max(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct max1668_data *data = max1668_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - return sprintf(buf, "%d\n", data->temp_max[index] * 1000); -} - -static ssize_t show_temp_min(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct max1668_data *data = max1668_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - return sprintf(buf, "%d\n", data->temp_min[index] * 1000); -} - -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int index = to_sensor_dev_attr(attr)->index; - struct max1668_data *data = max1668_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1); -} - -static ssize_t show_fault(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct max1668_data *data = max1668_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - return sprintf(buf, "%u\n", - (data->alarms & (1 << 12)) && data->temp[index] == 127); + return 0; } -static ssize_t set_temp_max(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) +static int max1668_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) { - int index = to_sensor_dev_attr(devattr)->index; struct max1668_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long temp; - int ret; + struct regmap *regmap = data->regmap; - ret = kstrtol(buf, 10, &temp); - if (ret < 0) - return ret; + val = clamp_val(val / 1000, -128, 127); - mutex_lock(&data->update_lock); - data->temp_max[index] = clamp_val(temp/1000, -128, 127); - ret = i2c_smbus_write_byte_data(client, - MAX1668_REG_LIMH_WR(index), - data->temp_max[index]); - if (ret < 0) - count = ret; - mutex_unlock(&data->update_lock); - - return count; + switch (attr) { + case hwmon_temp_min: + return regmap_write(regmap, MAX1668_REG_LIML(channel), val); + case hwmon_temp_max: + return regmap_write(regmap, MAX1668_REG_LIMH(channel), val); + default: + return -EOPNOTSUPP; + } } -static ssize_t set_temp_min(struct device *dev, - struct device_attribute *devattr, - const char *buf, size_t count) +static umode_t max1668_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) { - int index = to_sensor_dev_attr(devattr)->index; - struct max1668_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - long temp; - int ret; - - ret = kstrtol(buf, 10, &temp); - if (ret < 0) - return ret; - - mutex_lock(&data->update_lock); - data->temp_min[index] = clamp_val(temp/1000, -128, 127); - ret = i2c_smbus_write_byte_data(client, - MAX1668_REG_LIML_WR(index), - data->temp_min[index]); - if (ret < 0) - count = ret; - mutex_unlock(&data->update_lock); - - return count; + const struct max1668_data *data = _data; + + if (channel >= data->channels) + return 0; + + switch (attr) { + case hwmon_temp_min: + case hwmon_temp_max: + return read_only ? 0444 : 0644; + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_fault: + if (channel) + return 0444; + break; + default: + break; + } + return 0; } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, - set_temp_max, 0); -static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, - set_temp_min, 0); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); -static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, - set_temp_max, 1); -static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, - set_temp_min, 1); -static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); -static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, - set_temp_max, 2); -static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, - set_temp_min, 2); -static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); -static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, - set_temp_max, 3); -static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, - set_temp_min, 3); -static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4); -static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, - set_temp_max, 4); -static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, - set_temp_min, 4); - -static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14); -static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13); -static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7); -static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6); -static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5); -static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4); -static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3); -static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2); -static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1); -static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0); - -static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1); -static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2); -static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3); -static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4); - -/* Attributes common to MAX1668, MAX1989 and MAX1805 */ -static struct attribute *max1668_attribute_common[] = { - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp1_min.dev_attr.attr, - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp2_min.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp3_max.dev_attr.attr, - &sensor_dev_attr_temp3_min.dev_attr.attr, - &sensor_dev_attr_temp3_input.dev_attr.attr, - - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, - - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp3_fault.dev_attr.attr, +static const struct hwmon_channel_info * const max1668_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT), NULL }; -/* Attributes not present on MAX1805 */ -static struct attribute *max1668_attribute_unique[] = { - &sensor_dev_attr_temp4_max.dev_attr.attr, - &sensor_dev_attr_temp4_min.dev_attr.attr, - &sensor_dev_attr_temp4_input.dev_attr.attr, - &sensor_dev_attr_temp5_max.dev_attr.attr, - &sensor_dev_attr_temp5_min.dev_attr.attr, - &sensor_dev_attr_temp5_input.dev_attr.attr, - - &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, - &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, - - &sensor_dev_attr_temp4_fault.dev_attr.attr, - &sensor_dev_attr_temp5_fault.dev_attr.attr, - NULL +static const struct hwmon_ops max1668_hwmon_ops = { + .is_visible = max1668_is_visible, + .read = max1668_read, + .write = max1668_write, }; -static umode_t max1668_attribute_mode(struct kobject *kobj, - struct attribute *attr, int index) -{ - umode_t ret = S_IRUGO; - if (read_only) - return ret; - if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr || - attr == &sensor_dev_attr_temp2_max.dev_attr.attr || - attr == &sensor_dev_attr_temp3_max.dev_attr.attr || - attr == &sensor_dev_attr_temp4_max.dev_attr.attr || - attr == &sensor_dev_attr_temp5_max.dev_attr.attr || - attr == &sensor_dev_attr_temp1_min.dev_attr.attr || - attr == &sensor_dev_attr_temp2_min.dev_attr.attr || - attr == &sensor_dev_attr_temp3_min.dev_attr.attr || - attr == &sensor_dev_attr_temp4_min.dev_attr.attr || - attr == &sensor_dev_attr_temp5_min.dev_attr.attr) - ret |= S_IWUSR; - return ret; -} - -static const struct attribute_group max1668_group_common = { - .attrs = max1668_attribute_common, - .is_visible = max1668_attribute_mode -}; - -static const struct attribute_group max1668_group_unique = { - .attrs = max1668_attribute_unique, - .is_visible = max1668_attribute_mode +static const struct hwmon_chip_info max1668_chip_info = { + .ops = &max1668_hwmon_ops, + .info = max1668_info, }; /* Return 0 if detection is successful, -ENODEV otherwise */ @@ -391,6 +224,48 @@ static int max1668_detect(struct i2c_client *client, return 0; } +/* regmap */ + +static int max1668_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(context, reg); + if (ret < 0) + return ret; + + *val = ret; + return 0; +} + +static int max1668_reg_write(void *context, unsigned int reg, unsigned int val) +{ + return i2c_smbus_write_byte_data(context, reg + 11, val); +} + +static bool max1668_regmap_is_volatile(struct device *dev, unsigned int reg) +{ + return reg <= MAX1668_REG_STAT2; +} + +static bool max1668_regmap_is_writeable(struct device *dev, unsigned int reg) +{ + return reg > MAX1668_REG_STAT2 && reg <= MAX1668_REG_LIML(4); +} + +static const struct regmap_config max1668_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = max1668_regmap_is_volatile, + .writeable_reg = max1668_regmap_is_writeable, +}; + +static const struct regmap_bus max1668_regmap_bus = { + .reg_write = max1668_reg_write, + .reg_read = max1668_reg_read, +}; + static int max1668_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; @@ -405,24 +280,22 @@ static int max1668_probe(struct i2c_client *client) if (!data) return -ENOMEM; - data->client = client; - data->type = (uintptr_t)i2c_get_match_data(client); - mutex_init(&data->update_lock); + data->regmap = devm_regmap_init(dev, &max1668_regmap_bus, client, + &max1668_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); - /* sysfs hooks */ - data->groups[0] = &max1668_group_common; - if (data->type == max1668 || data->type == max1989) - data->groups[1] = &max1668_group_unique; + data->channels = (uintptr_t)i2c_get_match_data(client); - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, data->groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, + &max1668_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id max1668_id[] = { - { "max1668", max1668 }, - { "max1805", max1805 }, - { "max1989", max1989 }, + { "max1668", 5 }, + { "max1805", 3 }, + { "max1989", 5 }, { } }; MODULE_DEVICE_TABLE(i2c, max1668_id); diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c index f54720d3d2ce34..c955b0f3a8d31e 100644 --- a/drivers/hwmon/max6639.c +++ b/drivers/hwmon/max6639.c @@ -88,25 +88,16 @@ struct max6639_data { static int max6639_temp_read_input(struct device *dev, int channel, long *temp) { + u32 regs[2] = { MAX6639_REG_TEMP_EXT(channel), MAX6639_REG_TEMP(channel) }; struct max6639_data *data = dev_get_drvdata(dev); - unsigned int val; + u8 regvals[2]; int res; - /* - * Lock isn't needed as MAX6639_REG_TEMP wpnt change for at least 250ms after reading - * MAX6639_REG_TEMP_EXT - */ - res = regmap_read(data->regmap, MAX6639_REG_TEMP_EXT(channel), &val); - if (res < 0) - return res; - - *temp = val >> 5; - res = regmap_read(data->regmap, MAX6639_REG_TEMP(channel), &val); + res = regmap_multi_reg_read(data->regmap, regs, regvals, 2); if (res < 0) return res; - *temp |= val << 3; - *temp *= 125; + *temp = ((regvals[0] >> 5) | (regvals[1] << 3)) * 125; return 0; } @@ -290,8 +281,10 @@ static umode_t max6639_fan_is_visible(const void *_data, u32 attr, int channel) static int max6639_read_pwm(struct device *dev, u32 attr, int channel, long *pwm_val) { + u32 regs[2] = { MAX6639_REG_FAN_CONFIG3(channel), MAX6639_REG_GCONFIG }; struct max6639_data *data = dev_get_drvdata(dev); unsigned int val; + u8 regvals[2]; int res; u8 i; @@ -303,26 +296,13 @@ static int max6639_read_pwm(struct device *dev, u32 attr, int channel, *pwm_val = val * 255 / 120; return 0; case hwmon_pwm_freq: - mutex_lock(&data->update_lock); - res = regmap_read(data->regmap, MAX6639_REG_FAN_CONFIG3(channel), &val); - if (res < 0) { - mutex_unlock(&data->update_lock); - return res; - } - i = val & MAX6639_FAN_CONFIG3_FREQ_MASK; - - res = regmap_read(data->regmap, MAX6639_REG_GCONFIG, &val); - if (res < 0) { - mutex_unlock(&data->update_lock); + res = regmap_multi_reg_read(data->regmap, regs, regvals, 2); + if (res < 0) return res; - } - - if (val & MAX6639_GCONFIG_PWM_FREQ_HI) + i = regvals[0] & MAX6639_FAN_CONFIG3_FREQ_MASK; + if (regvals[1] & MAX6639_GCONFIG_PWM_FREQ_HI) i |= 0x4; - i &= 0x7; *pwm_val = freq_table[i]; - - mutex_unlock(&data->update_lock); return 0; default: return -EOPNOTSUPP; diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 20981f9443dd8b..0735a1d2c20fdc 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -6,18 +6,17 @@ * Copyright (c) 2011 David George */ -#include -#include -#include -#include -#include -#include -#include +#include +#include #include +#include +#include +#include +#include #include #include - -#include +#include +#include enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694, max6697, max6698, max6699 }; @@ -33,21 +32,36 @@ static const u8 MAX6697_REG_MAX[] = { static const u8 MAX6697_REG_CRIT[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 }; +#define MAX6697_REG_MIN 0x30 /* - * Map device tree / platform data register bit map to chip bit map. + * Map device tree / internal register bit map to chip bit map. * Applies to alert register and over-temperature register. */ + +#define MAX6697_EXTERNAL_MASK_DT GENMASK(7, 1) +#define MAX6697_LOCAL_MASK_DT BIT(0) +#define MAX6697_EXTERNAL_MASK_CHIP GENMASK(6, 0) +#define MAX6697_LOCAL_MASK_CHIP BIT(7) + +/* alert - local channel is in bit 6 */ #define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ (((reg) & 0x01) << 6) | ((reg) & 0x80)) -#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) -#define MAX6697_REG_STAT(n) (0x44 + (n)) +/* over-temperature - local channel is in bit 7 */ +#define MAX6697_OVERT_MAP_BITS(reg) \ + (FIELD_PREP(MAX6697_EXTERNAL_MASK_CHIP, FIELD_GET(MAX6697_EXTERNAL_MASK_DT, reg)) | \ + FIELD_PREP(MAX6697_LOCAL_MASK_CHIP, FIELD_GET(MAX6697_LOCAL_MASK_DT, reg))) + +#define MAX6697_REG_STAT_ALARM 0x44 +#define MAX6697_REG_STAT_CRIT 0x45 +#define MAX6697_REG_STAT_FAULT 0x46 +#define MAX6697_REG_STAT_MIN_ALARM 0x47 #define MAX6697_REG_CONFIG 0x41 -#define MAX6581_CONF_EXTENDED (1 << 1) -#define MAX6693_CONF_BETA (1 << 2) -#define MAX6697_CONF_RESISTANCE (1 << 3) -#define MAX6697_CONF_TIMEOUT (1 << 5) +#define MAX6581_CONF_EXTENDED BIT(1) +#define MAX6693_CONF_BETA BIT(2) +#define MAX6697_CONF_RESISTANCE BIT(3) +#define MAX6697_CONF_TIMEOUT BIT(5) #define MAX6697_REG_ALERT_MASK 0x42 #define MAX6697_REG_OVERT_MASK 0x43 @@ -67,24 +81,18 @@ struct max6697_chip_data { u32 have_crit; u32 have_fault; u8 valid_conf; - const u8 *alarm_map; }; struct max6697_data { - struct i2c_client *client; + struct regmap *regmap; enum chips type; const struct max6697_chip_data *chip; - int update_interval; /* in milli-seconds */ int temp_offset; /* in degrees C */ struct mutex update_lock; - unsigned long last_updated; /* In jiffies */ - bool valid; /* true if following fields are valid */ - /* 1x local and up to 7x remote */ - u8 temp[8][4]; /* [nr][0]=temp [1]=ext [2]=max [3]=crit */ #define MAX6697_TEMP_INPUT 0 #define MAX6697_TEMP_EXT 1 #define MAX6697_TEMP_MAX 2 @@ -92,11 +100,6 @@ struct max6697_data { u32 alarms; }; -/* Diode fault status bits on MAX6581 are right shifted by one bit */ -static const u8 max6581_alarm_map[] = { - 0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23 }; - static const struct max6697_chip_data max6697_chip_data[] = { [max6581] = { .channels = 8, @@ -104,7 +107,6 @@ static const struct max6697_chip_data max6697_chip_data[] = { .have_ext = 0x7f, .have_fault = 0xfe, .valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT, - .alarm_map = max6581_alarm_map, }, [max6602] = { .channels = 5, @@ -173,545 +175,398 @@ static const struct max6697_chip_data max6697_chip_data[] = { }, }; -static inline int max6581_offset_to_millic(int val) +static int max6697_alarm_channel_map(int channel) { - return sign_extend32(val, 7) * 250; + switch (channel) { + case 0: + return 6; + case 7: + return 7; + default: + return channel - 1; + } } -static struct max6697_data *max6697_update_device(struct device *dev) +static int max6697_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) { + unsigned int offset_regs[2] = { MAX6581_REG_OFFSET_SELECT, MAX6581_REG_OFFSET }; + unsigned int temp_regs[2] = { MAX6697_REG_TEMP[channel], + MAX6697_REG_TEMP_EXT[channel] }; struct max6697_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - struct max6697_data *ret = data; - int val; - int i; - u32 alarms; + struct regmap *regmap = data->regmap; + u8 regdata[2] = { }; + u32 regval; + int ret; - mutex_lock(&data->update_lock); - - if (data->valid && - !time_after(jiffies, data->last_updated - + msecs_to_jiffies(data->update_interval))) - goto abort; - - for (i = 0; i < data->chip->channels; i++) { - if (data->chip->have_ext & (1 << i)) { - val = i2c_smbus_read_byte_data(client, - MAX6697_REG_TEMP_EXT[i]); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i][MAX6697_TEMP_EXT] = val; - } + switch (attr) { + case hwmon_temp_input: + ret = regmap_multi_reg_read(regmap, temp_regs, regdata, + data->chip->have_ext & BIT(channel) ? 2 : 1); + if (ret) + return ret; + *val = (((regdata[0] - data->temp_offset) << 3) | (regdata[1] >> 5)) * 125; + break; + case hwmon_temp_max: + ret = regmap_read(regmap, MAX6697_REG_MAX[channel], ®val); + if (ret) + return ret; + *val = ((int)regval - data->temp_offset) * 1000; + break; + case hwmon_temp_crit: + ret = regmap_read(regmap, MAX6697_REG_CRIT[channel], ®val); + if (ret) + return ret; + *val = ((int)regval - data->temp_offset) * 1000; + break; + case hwmon_temp_min: + ret = regmap_read(regmap, MAX6697_REG_MIN, ®val); + if (ret) + return ret; + *val = ((int)regval - data->temp_offset) * 1000; + break; + case hwmon_temp_offset: + ret = regmap_multi_reg_read(regmap, offset_regs, regdata, 2); + if (ret) + return ret; - val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i][MAX6697_TEMP_INPUT] = val; + if (!(regdata[0] & BIT(channel - 1))) + regdata[1] = 0; - val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i][MAX6697_TEMP_MAX] = val; - - if (data->chip->have_crit & (1 << i)) { - val = i2c_smbus_read_byte_data(client, - MAX6697_REG_CRIT[i]); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - data->temp[i][MAX6697_TEMP_CRIT] = val; - } - } - - alarms = 0; - for (i = 0; i < 3; i++) { - val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i)); - if (unlikely(val < 0)) { - ret = ERR_PTR(val); - goto abort; - } - alarms = (alarms << 8) | val; + *val = sign_extend32(regdata[1], 7) * 250; + break; + case hwmon_temp_fault: + ret = regmap_read(regmap, MAX6697_REG_STAT_FAULT, ®val); + if (ret) + return ret; + if (data->type == max6581) + *val = !!(regval & BIT(channel - 1)); + else + *val = !!(regval & BIT(channel)); + break; + case hwmon_temp_crit_alarm: + ret = regmap_read(regmap, MAX6697_REG_STAT_CRIT, ®val); + if (ret) + return ret; + /* + * In the MAX6581 datasheet revision 0 to 3, the local channel + * overtemperature status is reported in bit 6 of register 0x45, + * and the overtemperature status for remote channel 7 is + * reported in bit 7. In Revision 4 and later, the local channel + * overtemperature status is reported in bit 7, and the remote + * channel 7 overtemperature status is reported in bit 6. A real + * chip was found to match the functionality documented in + * Revision 4 and later. + */ + *val = !!(regval & BIT(channel ? channel - 1 : 7)); + break; + case hwmon_temp_max_alarm: + ret = regmap_read(regmap, MAX6697_REG_STAT_ALARM, ®val); + if (ret) + return ret; + *val = !!(regval & BIT(max6697_alarm_channel_map(channel))); + break; + case hwmon_temp_min_alarm: + ret = regmap_read(regmap, MAX6697_REG_STAT_MIN_ALARM, ®val); + if (ret) + return ret; + *val = !!(regval & BIT(max6697_alarm_channel_map(channel))); + break; + default: + return -EOPNOTSUPP; } - data->alarms = alarms; - data->last_updated = jiffies; - data->valid = true; -abort: - mutex_unlock(&data->update_lock); - - return ret; -} - -static ssize_t temp_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - int index = to_sensor_dev_attr(devattr)->index; - struct max6697_data *data = max6697_update_device(dev); - int temp; - - if (IS_ERR(data)) - return PTR_ERR(data); - - temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3; - temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5; - - return sprintf(buf, "%d\n", temp * 125); -} - -static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, - char *buf) -{ - int nr = to_sensor_dev_attr_2(devattr)->nr; - int index = to_sensor_dev_attr_2(devattr)->index; - struct max6697_data *data = max6697_update_device(dev); - int temp; - - if (IS_ERR(data)) - return PTR_ERR(data); - - temp = data->temp[nr][index]; - temp -= data->temp_offset; - - return sprintf(buf, "%d\n", temp * 1000); -} - -static ssize_t alarm_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - int index = to_sensor_dev_attr(attr)->index; - struct max6697_data *data = max6697_update_device(dev); - - if (IS_ERR(data)) - return PTR_ERR(data); - - if (data->chip->alarm_map) - index = data->chip->alarm_map[index]; - - return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1); + return 0; } -static ssize_t temp_store(struct device *dev, - struct device_attribute *devattr, const char *buf, - size_t count) +static int max6697_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) { - int nr = to_sensor_dev_attr_2(devattr)->nr; - int index = to_sensor_dev_attr_2(devattr)->index; struct max6697_data *data = dev_get_drvdata(dev); - long temp; + struct regmap *regmap = data->regmap; int ret; - ret = kstrtol(buf, 10, &temp); - if (ret < 0) - return ret; - - mutex_lock(&data->update_lock); - temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */ - temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset; - temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127); - data->temp[nr][index] = temp; - ret = i2c_smbus_write_byte_data(data->client, - index == 2 ? MAX6697_REG_MAX[nr] - : MAX6697_REG_CRIT[nr], - temp); - mutex_unlock(&data->update_lock); - - return ret < 0 ? ret : count; -} - -static ssize_t offset_store(struct device *dev, struct device_attribute *devattr, const char *buf, - size_t count) -{ - int val, ret, index, select; - struct max6697_data *data; - bool channel_enabled; - long temp; - - index = to_sensor_dev_attr(devattr)->index; - data = dev_get_drvdata(dev); - ret = kstrtol(buf, 10, &temp); - if (ret < 0) + switch (attr) { + case hwmon_temp_max: + val = clamp_val(val, -1000000, 1000000); /* prevent underflow */ + val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset; + val = clamp_val(val, 0, data->type == max6581 ? 255 : 127); + return regmap_write(regmap, MAX6697_REG_MAX[channel], val); + case hwmon_temp_crit: + val = clamp_val(val, -1000000, 1000000); /* prevent underflow */ + val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset; + val = clamp_val(val, 0, data->type == max6581 ? 255 : 127); + return regmap_write(regmap, MAX6697_REG_CRIT[channel], val); + case hwmon_temp_min: + val = clamp_val(val, -1000000, 1000000); /* prevent underflow */ + val = DIV_ROUND_CLOSEST(val, 1000) + data->temp_offset; + val = clamp_val(val, 0, 255); + return regmap_write(regmap, MAX6697_REG_MIN, val); + case hwmon_temp_offset: + mutex_lock(&data->update_lock); + val = clamp_val(val, MAX6581_OFFSET_MIN, MAX6581_OFFSET_MAX); + val = DIV_ROUND_CLOSEST(val, 250); + if (!val) { /* disable this (and only this) channel */ + ret = regmap_clear_bits(regmap, MAX6581_REG_OFFSET_SELECT, + BIT(channel - 1)); + } else { + /* enable channel and update offset */ + ret = regmap_set_bits(regmap, MAX6581_REG_OFFSET_SELECT, + BIT(channel - 1)); + if (ret) + goto unlock; + ret = regmap_write(regmap, MAX6581_REG_OFFSET, val); + } +unlock: + mutex_unlock(&data->update_lock); return ret; - - mutex_lock(&data->update_lock); - select = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET_SELECT); - if (select < 0) { - ret = select; - goto abort; - } - channel_enabled = (select & (1 << (index - 1))); - temp = clamp_val(temp, MAX6581_OFFSET_MIN, MAX6581_OFFSET_MAX); - val = DIV_ROUND_CLOSEST(temp, 250); - /* disable the offset for channel if the new offset is 0 */ - if (val == 0) { - if (channel_enabled) - ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET_SELECT, - select & ~(1 << (index - 1))); - ret = ret < 0 ? ret : count; - goto abort; + default: + return -EOPNOTSUPP; } - if (!channel_enabled) { - ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET_SELECT, - select | (1 << (index - 1))); - if (ret < 0) - goto abort; - } - ret = i2c_smbus_write_byte_data(data->client, MAX6581_REG_OFFSET, val); - ret = ret < 0 ? ret : count; - -abort: - mutex_unlock(&data->update_lock); - return ret; } -static ssize_t offset_show(struct device *dev, struct device_attribute *devattr, char *buf) +static umode_t max6697_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) { - struct max6697_data *data; - int select, ret, index; - - index = to_sensor_dev_attr(devattr)->index; - data = dev_get_drvdata(dev); - mutex_lock(&data->update_lock); - select = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET_SELECT); - if (select < 0) - ret = select; - else if (select & (1 << (index - 1))) - ret = i2c_smbus_read_byte_data(data->client, MAX6581_REG_OFFSET); - else - ret = 0; - mutex_unlock(&data->update_lock); - return ret < 0 ? ret : sprintf(buf, "%d\n", max6581_offset_to_millic(ret)); -} - -static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0); -static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp, 0, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp1_crit, temp, 0, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1); -static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp, 1, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp2_crit, temp, 1, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2); -static SENSOR_DEVICE_ATTR_2_RW(temp3_max, temp, 2, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp3_crit, temp, 2, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp4_input, temp_input, 3); -static SENSOR_DEVICE_ATTR_2_RW(temp4_max, temp, 3, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp4_crit, temp, 3, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp5_input, temp_input, 4); -static SENSOR_DEVICE_ATTR_2_RW(temp5_max, temp, 4, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp5_crit, temp, 4, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp6_input, temp_input, 5); -static SENSOR_DEVICE_ATTR_2_RW(temp6_max, temp, 5, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp6_crit, temp, 5, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp7_input, temp_input, 6); -static SENSOR_DEVICE_ATTR_2_RW(temp7_max, temp, 6, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp7_crit, temp, 6, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp8_input, temp_input, 7); -static SENSOR_DEVICE_ATTR_2_RW(temp8_max, temp, 7, MAX6697_TEMP_MAX); -static SENSOR_DEVICE_ATTR_2_RW(temp8_crit, temp, 7, MAX6697_TEMP_CRIT); - -static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 22); -static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 16); -static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 17); -static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 18); -static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, alarm, 19); -static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20); -static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21); -static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23); - -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15); -static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8); -static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9); -static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10); -static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11); -static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12); -static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13); -static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14); - -static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1); -static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2); -static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 3); -static SENSOR_DEVICE_ATTR_RO(temp5_fault, alarm, 4); -static SENSOR_DEVICE_ATTR_RO(temp6_fault, alarm, 5); -static SENSOR_DEVICE_ATTR_RO(temp7_fault, alarm, 6); -static SENSOR_DEVICE_ATTR_RO(temp8_fault, alarm, 7); - -/* There is no offset for local temperature so starting from temp2 */ -static SENSOR_DEVICE_ATTR_RW(temp2_offset, offset, 1); -static SENSOR_DEVICE_ATTR_RW(temp3_offset, offset, 2); -static SENSOR_DEVICE_ATTR_RW(temp4_offset, offset, 3); -static SENSOR_DEVICE_ATTR_RW(temp5_offset, offset, 4); -static SENSOR_DEVICE_ATTR_RW(temp6_offset, offset, 5); -static SENSOR_DEVICE_ATTR_RW(temp7_offset, offset, 6); -static SENSOR_DEVICE_ATTR_RW(temp8_offset, offset, 7); - -static DEVICE_ATTR(dummy, 0, NULL, NULL); - -static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr, - int index) -{ - struct device *dev = kobj_to_dev(kobj); - struct max6697_data *data = dev_get_drvdata(dev); + const struct max6697_data *data = _data; const struct max6697_chip_data *chip = data->chip; - int channel = index / 7; /* channel number */ - int nr = index % 7; /* attribute index within channel */ if (channel >= chip->channels) return 0; - if ((nr == 3 || nr == 4) && !(chip->have_crit & (1 << channel))) - return 0; - if (nr == 5 && !(chip->have_fault & (1 << channel))) - return 0; - /* offset reg is only supported on max6581 remote channels */ - if (nr == 6) - if (data->type != max6581 || channel == 0) - return 0; - - return attr->mode; + switch (attr) { + case hwmon_temp_max: + return 0644; + case hwmon_temp_input: + case hwmon_temp_max_alarm: + return 0444; + case hwmon_temp_min: + if (data->type == max6581) + return channel ? 0444 : 0644; + break; + case hwmon_temp_min_alarm: + if (data->type == max6581) + return 0444; + break; + case hwmon_temp_crit: + if (chip->have_crit & BIT(channel)) + return 0644; + break; + case hwmon_temp_crit_alarm: + if (chip->have_crit & BIT(channel)) + return 0444; + break; + case hwmon_temp_fault: + if (chip->have_fault & BIT(channel)) + return 0444; + break; + case hwmon_temp_offset: + if (data->type == max6581 && channel) + return 0644; + break; + default: + break; + } + return 0; } -/* - * max6697_is_visible uses the index into the following array to determine - * if attributes should be created or not. Any change in order or content - * must be matched in max6697_is_visible. - */ -static struct attribute *max6697_attributes[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_max.dev_attr.attr, - &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, - &dev_attr_dummy.attr, - &dev_attr_dummy.attr, - - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_max.dev_attr.attr, - &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp2_fault.dev_attr.attr, - &sensor_dev_attr_temp2_offset.dev_attr.attr, - - &sensor_dev_attr_temp3_input.dev_attr.attr, - &sensor_dev_attr_temp3_max.dev_attr.attr, - &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_crit.dev_attr.attr, - &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp3_fault.dev_attr.attr, - &sensor_dev_attr_temp3_offset.dev_attr.attr, - - &sensor_dev_attr_temp4_input.dev_attr.attr, - &sensor_dev_attr_temp4_max.dev_attr.attr, - &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp4_crit.dev_attr.attr, - &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp4_fault.dev_attr.attr, - &sensor_dev_attr_temp4_offset.dev_attr.attr, - - &sensor_dev_attr_temp5_input.dev_attr.attr, - &sensor_dev_attr_temp5_max.dev_attr.attr, - &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp5_crit.dev_attr.attr, - &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp5_fault.dev_attr.attr, - &sensor_dev_attr_temp5_offset.dev_attr.attr, - - &sensor_dev_attr_temp6_input.dev_attr.attr, - &sensor_dev_attr_temp6_max.dev_attr.attr, - &sensor_dev_attr_temp6_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp6_crit.dev_attr.attr, - &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp6_fault.dev_attr.attr, - &sensor_dev_attr_temp6_offset.dev_attr.attr, - - &sensor_dev_attr_temp7_input.dev_attr.attr, - &sensor_dev_attr_temp7_max.dev_attr.attr, - &sensor_dev_attr_temp7_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp7_crit.dev_attr.attr, - &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp7_fault.dev_attr.attr, - &sensor_dev_attr_temp7_offset.dev_attr.attr, - - &sensor_dev_attr_temp8_input.dev_attr.attr, - &sensor_dev_attr_temp8_max.dev_attr.attr, - &sensor_dev_attr_temp8_max_alarm.dev_attr.attr, - &sensor_dev_attr_temp8_crit.dev_attr.attr, - &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr, - &sensor_dev_attr_temp8_fault.dev_attr.attr, - &sensor_dev_attr_temp8_offset.dev_attr.attr, +/* Return 0 if detection is successful, -ENODEV otherwise */ +static const struct hwmon_channel_info * const max6697_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | + HWMON_T_MIN | HWMON_T_MIN_ALARM | + HWMON_T_FAULT | HWMON_T_OFFSET), NULL }; -static const struct attribute_group max6697_group = { - .attrs = max6697_attributes, .is_visible = max6697_is_visible, +static const struct hwmon_ops max6697_hwmon_ops = { + .is_visible = max6697_is_visible, + .read = max6697_read, + .write = max6697_write, }; -__ATTRIBUTE_GROUPS(max6697); -static void max6697_get_config_of(struct device_node *node, - struct max6697_platform_data *pdata) -{ - int len; - const __be32 *prop; - - pdata->smbus_timeout_disable = - of_property_read_bool(node, "smbus-timeout-disable"); - pdata->extended_range_enable = - of_property_read_bool(node, "extended-range-enable"); - pdata->beta_compensation = - of_property_read_bool(node, "beta-compensation-enable"); - - prop = of_get_property(node, "alert-mask", &len); - if (prop && len == sizeof(u32)) - pdata->alert_mask = be32_to_cpu(prop[0]); - prop = of_get_property(node, "over-temperature-mask", &len); - if (prop && len == sizeof(u32)) - pdata->over_temperature_mask = be32_to_cpu(prop[0]); - prop = of_get_property(node, "resistance-cancellation", &len); - if (prop) { - if (len == sizeof(u32)) - pdata->resistance_cancellation = be32_to_cpu(prop[0]); - else - pdata->resistance_cancellation = 0xfe; - } - prop = of_get_property(node, "transistor-ideality", &len); - if (prop && len == 2 * sizeof(u32)) { - pdata->ideality_mask = be32_to_cpu(prop[0]); - pdata->ideality_value = be32_to_cpu(prop[1]); - } -} +static const struct hwmon_chip_info max6697_chip_info = { + .ops = &max6697_hwmon_ops, + .info = max6697_info, +}; -static int max6697_init_chip(struct max6697_data *data, - struct i2c_client *client) +static int max6697_config_of(struct device_node *node, struct max6697_data *data) { - struct max6697_platform_data *pdata = dev_get_platdata(&client->dev); - struct max6697_platform_data p; const struct max6697_chip_data *chip = data->chip; - int factor = chip->channels; - int ret, reg; + struct regmap *regmap = data->regmap; + int ret, confreg; + u32 vals[2]; - /* - * Don't touch configuration if neither platform data nor OF - * configuration was specified. If that is the case, use the - * current chip configuration. - */ - if (!pdata && !client->dev.of_node) { - reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG); - if (reg < 0) - return reg; - if (data->type == max6581) { - if (reg & MAX6581_CONF_EXTENDED) - data->temp_offset = 64; - reg = i2c_smbus_read_byte_data(client, - MAX6581_REG_RESISTANCE); - if (reg < 0) - return reg; - factor += hweight8(reg); - } else { - if (reg & MAX6697_CONF_RESISTANCE) - factor++; - } - goto done; - } - - if (client->dev.of_node) { - memset(&p, 0, sizeof(p)); - max6697_get_config_of(client->dev.of_node, &p); - pdata = &p; - } - - reg = 0; - if (pdata->smbus_timeout_disable && + confreg = 0; + if (of_property_read_bool(node, "smbus-timeout-disable") && (chip->valid_conf & MAX6697_CONF_TIMEOUT)) { - reg |= MAX6697_CONF_TIMEOUT; + confreg |= MAX6697_CONF_TIMEOUT; } - if (pdata->extended_range_enable && + if (of_property_read_bool(node, "extended-range-enable") && (chip->valid_conf & MAX6581_CONF_EXTENDED)) { - reg |= MAX6581_CONF_EXTENDED; + confreg |= MAX6581_CONF_EXTENDED; data->temp_offset = 64; } - if (pdata->resistance_cancellation && - (chip->valid_conf & MAX6697_CONF_RESISTANCE)) { - reg |= MAX6697_CONF_RESISTANCE; - factor++; - } - if (pdata->beta_compensation && + if (of_property_read_bool(node, "beta-compensation-enable") && (chip->valid_conf & MAX6693_CONF_BETA)) { - reg |= MAX6693_CONF_BETA; + confreg |= MAX6693_CONF_BETA; } - ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg); - if (ret < 0) + if (of_property_read_u32(node, "alert-mask", vals)) + vals[0] = 0; + ret = regmap_write(regmap, MAX6697_REG_ALERT_MASK, + MAX6697_ALERT_MAP_BITS(vals[0])); + if (ret) return ret; - ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, - MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); - if (ret < 0) + if (of_property_read_u32(node, "over-temperature-mask", vals)) + vals[0] = 0; + ret = regmap_write(regmap, MAX6697_REG_OVERT_MASK, + MAX6697_OVERT_MAP_BITS(vals[0])); + if (ret) return ret; - ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, - MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); - if (ret < 0) - return ret; + if (data->type != max6581) { + if (of_property_read_bool(node, "resistance-cancellation") && + chip->valid_conf & MAX6697_CONF_RESISTANCE) { + confreg |= MAX6697_CONF_RESISTANCE; + } + } else { + if (of_property_read_u32(node, "resistance-cancellation", &vals[0])) { + if (of_property_read_bool(node, "resistance-cancellation")) + vals[0] = 0xfe; + else + vals[0] = 0; + } - if (data->type == max6581) { - factor += hweight8(pdata->resistance_cancellation >> 1); - ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE, - pdata->resistance_cancellation >> 1); + vals[0] &= 0xfe; + ret = regmap_write(regmap, MAX6581_REG_RESISTANCE, vals[0] >> 1); if (ret < 0) return ret; - ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY, - pdata->ideality_value); + + if (of_property_read_u32_array(node, "transistor-ideality", vals, 2)) { + vals[0] = 0; + vals[1] = 0; + } + + ret = regmap_write(regmap, MAX6581_REG_IDEALITY, vals[1]); if (ret < 0) return ret; - ret = i2c_smbus_write_byte_data(client, - MAX6581_REG_IDEALITY_SELECT, - pdata->ideality_mask >> 1); + ret = regmap_write(regmap, MAX6581_REG_IDEALITY_SELECT, + (vals[0] & 0xfe) >> 1); if (ret < 0) return ret; } -done: - data->update_interval = factor * MAX6697_CONV_TIME; - return 0; + return regmap_write(regmap, MAX6697_REG_CONFIG, confreg); } +static int max6697_init_chip(struct device_node *np, struct max6697_data *data) +{ + unsigned int reg; + int ret; + + /* + * Don't touch configuration if there is no devicetree configuration. + * If that is the case, use the current chip configuration. + */ + if (!np) { + struct regmap *regmap = data->regmap; + + ret = regmap_read(regmap, MAX6697_REG_CONFIG, ®); + if (ret < 0) + return ret; + if (data->type == max6581) { + if (reg & MAX6581_CONF_EXTENDED) + data->temp_offset = 64; + ret = regmap_read(regmap, MAX6581_REG_RESISTANCE, ®); + } + } else { + ret = max6697_config_of(np, data); + } + + return ret; +} + +static bool max6697_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case 0x00 ... 0x09: /* temperature high bytes */ + case 0x44 ... 0x47: /* status */ + case 0x51 ... 0x58: /* temperature low bytes */ + return true; + default: + return false; + } +} + +static bool max6697_writeable_reg(struct device *dev, unsigned int reg) +{ + return reg != 0x0a && reg != 0x0f && !max6697_volatile_reg(dev, reg); +} + +static const struct regmap_config max6697_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x58, + .writeable_reg = max6697_writeable_reg, + .volatile_reg = max6697_volatile_reg, + .cache_type = REGCACHE_MAPLE, +}; + static int max6697_probe(struct i2c_client *client) { - struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; struct max6697_data *data; struct device *hwmon_dev; + struct regmap *regmap; int err; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; + regmap = regmap_init_i2c(client, &max6697_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL); if (!data) return -ENOMEM; + data->regmap = regmap; data->type = (uintptr_t)i2c_get_match_data(client); data->chip = &max6697_chip_data[data->type]; - data->client = client; mutex_init(&data->update_lock); - err = max6697_init_chip(data, client); + err = max6697_init_chip(client->dev.of_node, data); if (err) return err; - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, - max6697_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, + &max6697_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c index 9aa4dcf4a6f336..096f1daa8f2bcf 100644 --- a/drivers/hwmon/nct6775-platform.c +++ b/drivers/hwmon/nct6775-platform.c @@ -1269,6 +1269,7 @@ static const char * const asus_msi_boards[] = { "EX-B760M-V5 D4", "EX-H510M-V3", "EX-H610M-V3 D4", + "G15CF", "PRIME A620M-A", "PRIME B560-PLUS", "PRIME B560-PLUS AC-HES", diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 97e8c642440313..8c9351da12c6e7 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -229,41 +229,34 @@ static int nct7802_read_temp(struct nct7802_data *data, static int nct7802_read_fan(struct nct7802_data *data, u8 reg_fan) { - unsigned int f1, f2; + unsigned int regs[2] = {reg_fan, REG_FANCOUNT_LOW}; + u8 f[2]; int ret; - mutex_lock(&data->access_lock); - ret = regmap_read(data->regmap, reg_fan, &f1); - if (ret < 0) - goto abort; - ret = regmap_read(data->regmap, REG_FANCOUNT_LOW, &f2); - if (ret < 0) - goto abort; - ret = (f1 << 5) | (f2 >> 3); + ret = regmap_multi_reg_read(data->regmap, regs, f, 2); + if (ret) + return ret; + ret = (f[0] << 5) | (f[1] >> 3); /* convert fan count to rpm */ if (ret == 0x1fff) /* maximum value, assume fan is stopped */ ret = 0; else if (ret) ret = DIV_ROUND_CLOSEST(1350000U, ret); -abort: - mutex_unlock(&data->access_lock); return ret; } static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low, u8 reg_fan_high) { - unsigned int f1, f2; + unsigned int regs[2] = {reg_fan_low, reg_fan_high}; + u8 f[2]; int ret; - mutex_lock(&data->access_lock); - ret = regmap_read(data->regmap, reg_fan_low, &f1); - if (ret < 0) - goto abort; - ret = regmap_read(data->regmap, reg_fan_high, &f2); + ret = regmap_multi_reg_read(data->regmap, regs, f, 2); if (ret < 0) - goto abort; - ret = f1 | ((f2 & 0xf8) << 5); + return ret; + + ret = f[0] | ((f[1] & 0xf8) << 5); /* convert fan count to rpm */ if (ret == 0x1fff) /* maximum value, assume no limit */ ret = 0; @@ -271,8 +264,6 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low, ret = DIV_ROUND_CLOSEST(1350000U, ret); else ret = 1350000U; -abort: - mutex_unlock(&data->access_lock); return ret; } @@ -302,33 +293,26 @@ static u8 nct7802_vmul[] = { 4, 2, 2, 2, 2 }; static int nct7802_read_voltage(struct nct7802_data *data, int nr, int index) { - unsigned int v1, v2; + u8 v[2]; int ret; - mutex_lock(&data->access_lock); if (index == 0) { /* voltage */ - ret = regmap_read(data->regmap, REG_VOLTAGE[nr], &v1); - if (ret < 0) - goto abort; - ret = regmap_read(data->regmap, REG_VOLTAGE_LOW, &v2); + unsigned int regs[2] = {REG_VOLTAGE[nr], REG_VOLTAGE_LOW}; + + ret = regmap_multi_reg_read(data->regmap, regs, v, 2); if (ret < 0) - goto abort; - ret = ((v1 << 2) | (v2 >> 6)) * nct7802_vmul[nr]; + return ret; + ret = ((v[0] << 2) | (v[1] >> 6)) * nct7802_vmul[nr]; } else { /* limit */ int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; + unsigned int regs[2] = {REG_VOLTAGE_LIMIT_LSB[index - 1][nr], + REG_VOLTAGE_LIMIT_MSB[nr]}; - ret = regmap_read(data->regmap, - REG_VOLTAGE_LIMIT_LSB[index - 1][nr], &v1); - if (ret < 0) - goto abort; - ret = regmap_read(data->regmap, REG_VOLTAGE_LIMIT_MSB[nr], - &v2); + ret = regmap_multi_reg_read(data->regmap, regs, v, 2); if (ret < 0) - goto abort; - ret = (v1 | ((v2 << shift) & 0x300)) * nct7802_vmul[nr]; + return ret; + ret = (v[0] | ((v[1] << shift) & 0x300)) * nct7802_vmul[nr]; } -abort: - mutex_unlock(&data->access_lock); return ret; } @@ -1145,17 +1129,14 @@ static int nct7802_configure_channels(struct device *dev, { /* Enable local temperature sensor by default */ u8 mode_mask = MODE_LTD_EN, mode_val = MODE_LTD_EN; - struct device_node *node; int err; if (dev->of_node) { - for_each_child_of_node(dev->of_node, node) { + for_each_child_of_node_scoped(dev->of_node, node) { err = nct7802_get_channel_config(dev, node, &mode_mask, &mode_val); - if (err) { - of_node_put(node); + if (err) return err; - } } } diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c index bc8db1dc595db1..db3b551828ebd2 100644 --- a/drivers/hwmon/npcm750-pwm-fan.c +++ b/drivers/hwmon/npcm750-pwm-fan.c @@ -927,7 +927,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev, static int npcm7xx_pwm_fan_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np, *child; + struct device_node *np; struct npcm7xx_pwm_fan_data *data; struct resource *res; struct device *hwmon; @@ -1004,11 +1004,10 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev) } } - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { ret = npcm7xx_en_pwm_fan(dev, child, data); if (ret) { dev_err(dev, "enable pwm and fan failed\n"); - of_node_put(child); return ret; } } diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index ef75b63f5894e5..b5352900463fb9 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, [NTC_LAST] = { }, }; +MODULE_DEVICE_TABLE(platform, ntc_thermistor_id); /* * A compensation table should be sorted by the values of .ohm diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c index 7caf387eb1449f..ed38645a1dc272 100644 --- a/drivers/hwmon/nzxt-kraken2.c +++ b/drivers/hwmon/nzxt-kraken2.c @@ -9,7 +9,7 @@ * Copyright 2019-2021 Jonas Malaco */ -#include +#include #include #include #include diff --git a/drivers/hwmon/nzxt-kraken3.c b/drivers/hwmon/nzxt-kraken3.c index 00f3ac90a29011..d00409bcab93ad 100644 --- a/drivers/hwmon/nzxt-kraken3.c +++ b/drivers/hwmon/nzxt-kraken3.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define USB_VENDOR_ID_NZXT 0x1e71 #define USB_PRODUCT_ID_X53 0x2007 diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c index df6fa72a6b5907..c2d1173f42fefb 100644 --- a/drivers/hwmon/nzxt-smart2.c +++ b/drivers/hwmon/nzxt-smart2.c @@ -14,7 +14,7 @@ #include #include -#include +#include /* * The device has only 3 fan channels/connectors. But all HID reports have diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index dd690f700d4990..9486db249c64fb 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c index 31159606cec7b4..5817a099c622a8 100644 --- a/drivers/hwmon/occ/p8_i2c.c +++ b/drivers/hwmon/occ/p8_i2c.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "common.h" diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c index 8d3b0f86cc57a9..83730d9318240d 100644 --- a/drivers/hwmon/oxp-sensors.c +++ b/drivers/hwmon/oxp-sensors.c @@ -1,18 +1,21 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Platform driver for OneXPlayer, AOK ZOE, and Aya Neo Handhelds that expose - * fan reading and control via hwmon sysfs. + * Platform driver for OneXPlayer, AOKZOE, AYANEO, and OrangePi Handhelds + * that expose fan reading and control via hwmon sysfs. * * Old OXP boards have the same DMI strings and they are told apart by - * the boot cpu vendor (Intel/AMD). Currently only AMD boards are - * supported but the code is made to be simple to add other handheld - * boards in the future. + * the boot cpu vendor (Intel/AMD). Of these older models only AMD is + * supported. + * * Fan control is provided via pwm interface in the range [0-255]. * Old AMD boards use [0-100] as range in the EC, the written value is * scaled to accommodate for that. Newer boards like the mini PRO and - * AOK ZOE are not scaled but have the same EC layout. + * AOKZOE are not scaled but have the same EC layout. Newer models + * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi + * are [1-244] and scaled to 0-255. * * Copyright (C) 2022 Joaquín I. Aramendía + * Copyright (C) 2024 Derek J. Clark */ #include @@ -43,32 +46,48 @@ enum oxp_board { aok_zoe_a1 = 1, aya_neo_2, aya_neo_air, + aya_neo_air_1s, aya_neo_air_plus_mendo, aya_neo_air_pro, + aya_neo_flip, aya_neo_geek, + aya_neo_kun, + orange_pi_neo, + oxp_2, + oxp_fly, oxp_mini_amd, oxp_mini_amd_a07, oxp_mini_amd_pro, + oxp_x1, }; static enum oxp_board board; /* Fan reading and PWM */ -#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */ -#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */ -#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */ +#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */ +#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */ +#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */ +#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */ +#define PWM_MODE_AUTO 0x00 +#define PWM_MODE_MANUAL 0x01 + +/* OrangePi fan reading and PWM */ +#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */ +#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */ +#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */ /* Turbo button takeover function - * Older boards have different values and EC registers + * Different boards have different values and EC registers * for the same function */ -#define OXP_OLD_TURBO_SWITCH_REG 0x1E -#define OXP_OLD_TURBO_TAKE_VAL 0x01 -#define OXP_OLD_TURBO_RETURN_VAL 0x00 +#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */ +#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */ +#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */ + +#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */ +#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */ -#define OXP_TURBO_SWITCH_REG 0xF1 -#define OXP_TURBO_TAKE_VAL 0x40 -#define OXP_TURBO_RETURN_VAL 0x00 +#define OXP_TURBO_RETURN_VAL 0x00 /* Common return val */ static const struct dmi_system_id dmi_table[] = { { @@ -88,7 +107,7 @@ static const struct dmi_system_id dmi_table[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 2"), + DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"), }, .driver_data = (void *)aya_neo_2, }, @@ -99,6 +118,13 @@ static const struct dmi_system_id dmi_table[] = { }, .driver_data = (void *)aya_neo_air, }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"), + }, + .driver_data = (void *)aya_neo_air_1s, + }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), @@ -116,10 +142,31 @@ static const struct dmi_system_id dmi_table[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "GEEK"), + DMI_MATCH(DMI_BOARD_NAME, "FLIP"), + }, + .driver_data = (void *)aya_neo_flip, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "GEEK"), }, .driver_data = (void *)aya_neo_geek, }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)aya_neo_kun, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"), + }, + .driver_data = (void *)orange_pi_neo, + }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), @@ -127,6 +174,20 @@ static const struct dmi_system_id dmi_table[] = { }, .driver_data = (void *)oxp_mini_amd, }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), + DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"), + }, + .driver_data = (void *)oxp_2, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"), + }, + .driver_data = (void *)oxp_fly, + }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), @@ -141,6 +202,13 @@ static const struct dmi_system_id dmi_table[] = { }, .driver_data = (void *)oxp_mini_amd_pro, }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), + DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1"), + }, + .driver_data = (void *)oxp_x1, + }, {}, }; @@ -192,14 +260,20 @@ static int tt_toggle_enable(void) switch (board) { case oxp_mini_amd_a07: - reg = OXP_OLD_TURBO_SWITCH_REG; - val = OXP_OLD_TURBO_TAKE_VAL; + reg = OXP_MINI_TURBO_SWITCH_REG; + val = OXP_MINI_TURBO_TAKE_VAL; break; - case oxp_mini_amd_pro: case aok_zoe_a1: + case oxp_fly: + case oxp_mini_amd_pro: reg = OXP_TURBO_SWITCH_REG; val = OXP_TURBO_TAKE_VAL; break; + case oxp_2: + case oxp_x1: + reg = OXP_2_TURBO_SWITCH_REG; + val = OXP_TURBO_TAKE_VAL; + break; default: return -EINVAL; } @@ -213,14 +287,20 @@ static int tt_toggle_disable(void) switch (board) { case oxp_mini_amd_a07: - reg = OXP_OLD_TURBO_SWITCH_REG; - val = OXP_OLD_TURBO_RETURN_VAL; + reg = OXP_MINI_TURBO_SWITCH_REG; + val = OXP_TURBO_RETURN_VAL; break; - case oxp_mini_amd_pro: case aok_zoe_a1: + case oxp_fly: + case oxp_mini_amd_pro: reg = OXP_TURBO_SWITCH_REG; val = OXP_TURBO_RETURN_VAL; break; + case oxp_2: + case oxp_x1: + reg = OXP_2_TURBO_SWITCH_REG; + val = OXP_TURBO_RETURN_VAL; + break; default: return -EINVAL; } @@ -233,8 +313,11 @@ static umode_t tt_toggle_is_visible(struct kobject *kobj, { switch (board) { case aok_zoe_a1: + case oxp_2: + case oxp_fly: case oxp_mini_amd_a07: case oxp_mini_amd_pro: + case oxp_x1: return attr->mode; default: break; @@ -273,12 +356,17 @@ static ssize_t tt_toggle_show(struct device *dev, switch (board) { case oxp_mini_amd_a07: - reg = OXP_OLD_TURBO_SWITCH_REG; + reg = OXP_MINI_TURBO_SWITCH_REG; break; - case oxp_mini_amd_pro: case aok_zoe_a1: + case oxp_fly: + case oxp_mini_amd_pro: reg = OXP_TURBO_SWITCH_REG; break; + case oxp_2: + case oxp_x1: + reg = OXP_2_TURBO_SWITCH_REG; + break; default: return -EINVAL; } @@ -295,12 +383,53 @@ static DEVICE_ATTR_RW(tt_toggle); /* PWM enable/disable functions */ static int oxp_pwm_enable(void) { - return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, 0x01); + switch (board) { + case orange_pi_neo: + return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL); + case aok_zoe_a1: + case aya_neo_2: + case aya_neo_air: + case aya_neo_air_plus_mendo: + case aya_neo_air_pro: + case aya_neo_flip: + case aya_neo_geek: + case aya_neo_kun: + case oxp_2: + case oxp_fly: + case oxp_mini_amd: + case oxp_mini_amd_a07: + case oxp_mini_amd_pro: + case oxp_x1: + return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL); + default: + return -EINVAL; + } } static int oxp_pwm_disable(void) { - return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, 0x00); + switch (board) { + case orange_pi_neo: + return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO); + case aok_zoe_a1: + case aya_neo_2: + case aya_neo_air: + case aya_neo_air_1s: + case aya_neo_air_plus_mendo: + case aya_neo_air_pro: + case aya_neo_flip: + case aya_neo_geek: + case aya_neo_kun: + case oxp_2: + case oxp_fly: + case oxp_mini_amd: + case oxp_mini_amd_a07: + case oxp_mini_amd_pro: + case oxp_x1: + return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO); + default: + return -EINVAL; + } } /* Callbacks for hwmon interface */ @@ -326,7 +455,30 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_fan: switch (attr) { case hwmon_fan_input: - return read_from_ec(OXP_SENSOR_FAN_REG, 2, val); + switch (board) { + case orange_pi_neo: + return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val); + case oxp_2: + case oxp_x1: + return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val); + case aok_zoe_a1: + case aya_neo_2: + case aya_neo_air: + case aya_neo_air_1s: + case aya_neo_air_plus_mendo: + case aya_neo_air_pro: + case aya_neo_flip: + case aya_neo_geek: + case aya_neo_kun: + case oxp_fly: + case oxp_mini_amd: + case oxp_mini_amd_a07: + case oxp_mini_amd_pro: + return read_from_ec(OXP_SENSOR_FAN_REG, 2, val); + default: + break; + } + break; default: break; } @@ -334,27 +486,72 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_pwm: switch (attr) { case hwmon_pwm_input: - ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); - if (ret) - return ret; switch (board) { + case orange_pi_neo: + ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val); + if (ret) + return ret; + /* scale from range [1-244] */ + *val = ((*val - 1) * 254 / 243) + 1; + break; + case oxp_2: + case oxp_x1: + ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); + if (ret) + return ret; + /* scale from range [0-184] */ + *val = (*val * 255) / 184; + break; case aya_neo_2: case aya_neo_air: + case aya_neo_air_1s: case aya_neo_air_plus_mendo: case aya_neo_air_pro: + case aya_neo_flip: case aya_neo_geek: + case aya_neo_kun: case oxp_mini_amd: case oxp_mini_amd_a07: + ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); + if (ret) + return ret; + /* scale from range [0-100] */ *val = (*val * 255) / 100; break; - case oxp_mini_amd_pro: case aok_zoe_a1: + case oxp_fly: + case oxp_mini_amd_pro: default: + ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); + if (ret) + return ret; break; } return 0; case hwmon_pwm_enable: - return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val); + switch (board) { + case orange_pi_neo: + return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val); + case aok_zoe_a1: + case aya_neo_2: + case aya_neo_air: + case aya_neo_air_1s: + case aya_neo_air_plus_mendo: + case aya_neo_air_pro: + case aya_neo_flip: + case aya_neo_geek: + case aya_neo_kun: + case oxp_2: + case oxp_fly: + case oxp_mini_amd: + case oxp_mini_amd_a07: + case oxp_mini_amd_pro: + case oxp_x1: + return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val); + default: + break; + } + break; default: break; } @@ -381,21 +578,36 @@ static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type, if (val < 0 || val > 255) return -EINVAL; switch (board) { + case orange_pi_neo: + /* scale to range [1-244] */ + val = ((val - 1) * 243 / 254) + 1; + return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val); + case oxp_2: + case oxp_x1: + /* scale to range [0-184] */ + val = (val * 184) / 255; + return write_to_ec(OXP_SENSOR_PWM_REG, val); case aya_neo_2: case aya_neo_air: + case aya_neo_air_1s: case aya_neo_air_plus_mendo: case aya_neo_air_pro: + case aya_neo_flip: case aya_neo_geek: + case aya_neo_kun: case oxp_mini_amd: case oxp_mini_amd_a07: + /* scale to range [0-100] */ val = (val * 100) / 255; - break; + return write_to_ec(OXP_SENSOR_PWM_REG, val); case aok_zoe_a1: + case oxp_fly: case oxp_mini_amd_pro: + return write_to_ec(OXP_SENSOR_PWM_REG, val); default: break; } - return write_to_ec(OXP_SENSOR_PWM_REG, val); + break; default: break; } @@ -467,19 +679,20 @@ static int __init oxp_platform_init(void) { const struct dmi_system_id *dmi_entry; - /* - * Have to check for AMD processor here because DMI strings are the - * same between Intel and AMD boards, the only way to tell them apart - * is the CPU. - * Intel boards seem to have different EC registers and values to - * read/write. - */ dmi_entry = dmi_first_match(dmi_table); - if (!dmi_entry || boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (!dmi_entry) return -ENODEV; board = (enum oxp_board)(unsigned long)dmi_entry->driver_data; + /* + * Have to check for AMD processor here because DMI strings are the same + * between Intel and AMD boards on older OneXPlayer devices, the only way + * to tell them apart is the CPU. Old Intel boards have an unsupported EC. + */ + if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return -ENODEV; + oxp_platform_device = platform_create_bundle(&oxp_platform_driver, oxp_platform_probe, NULL, 0, NULL, 0); diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index 9e9681b2e8c55e..788b5d58f77eaf 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -1315,7 +1315,7 @@ static void pc87360_init_device(struct platform_device *pdev, (reg & 0xC0) | 0x11); } - nr = data->innr < 11 ? data->innr : 11; + nr = min(data->innr, 11); for (i = 0; i < nr; i++) { reg = pc87360_read_value(data, LD_IN, i, PC87365_REG_IN_STATUS); diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c index 986404fe6a31c2..f5367a7bc0f519 100644 --- a/drivers/hwmon/pmbus/max15301.c +++ b/drivers/hwmon/pmbus/max15301.c @@ -31,8 +31,6 @@ MODULE_DEVICE_TABLE(i2c, max15301_id); struct max15301_data { int id; - ktime_t access; /* Chip access time */ - int delay; /* Delay between chip accesses in us */ struct pmbus_driver_info info; }; @@ -55,89 +53,6 @@ static struct max15301_data max15301_data = { } }; -/* This chip needs a delay between accesses */ -static inline void max15301_wait(const struct max15301_data *data) -{ - if (data->delay) { - s64 delta = ktime_us_delta(ktime_get(), data->access); - - if (delta < data->delay) - udelay(data->delay - delta); - } -} - -static int max15301_read_word_data(struct i2c_client *client, int page, - int phase, int reg) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct max15301_data *data = to_max15301_data(info); - int ret; - - if (page > 0) - return -ENXIO; - - if (reg >= PMBUS_VIRT_BASE) - return -ENXIO; - - max15301_wait(data); - ret = pmbus_read_word_data(client, page, phase, reg); - data->access = ktime_get(); - - return ret; -} - -static int max15301_read_byte_data(struct i2c_client *client, int page, int reg) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct max15301_data *data = to_max15301_data(info); - int ret; - - if (page > 0) - return -ENXIO; - - max15301_wait(data); - ret = pmbus_read_byte_data(client, page, reg); - data->access = ktime_get(); - - return ret; -} - -static int max15301_write_word_data(struct i2c_client *client, int page, int reg, - u16 word) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct max15301_data *data = to_max15301_data(info); - int ret; - - if (page > 0) - return -ENXIO; - - if (reg >= PMBUS_VIRT_BASE) - return -ENXIO; - - max15301_wait(data); - ret = pmbus_write_word_data(client, page, reg, word); - data->access = ktime_get(); - - return ret; -} - -static int max15301_write_byte(struct i2c_client *client, int page, u8 value) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct max15301_data *data = to_max15301_data(info); - int ret; - - if (page > 0) - return -ENXIO; - - max15301_wait(data); - ret = pmbus_write_byte(client, page, value); - data->access = ktime_get(); - - return ret; -} - static int max15301_probe(struct i2c_client *client) { int status; @@ -164,12 +79,7 @@ static int max15301_probe(struct i2c_client *client) return -ENODEV; } - max15301_data.delay = delay; - - info->read_byte_data = max15301_read_byte_data; - info->read_word_data = max15301_read_word_data; - info->write_byte = max15301_write_byte; - info->write_word_data = max15301_write_word_data; + info->access_delay = delay; return pmbus_do_probe(client, info); } diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c index 67487867c70fc6..2dcb6da853bd34 100644 --- a/drivers/hwmon/pmbus/mpq7932.c +++ b/drivers/hwmon/pmbus/mpq7932.c @@ -35,7 +35,7 @@ struct mpq7932_data { }; #if IS_ENABLED(CONFIG_SENSORS_MPQ7932_REGULATOR) -static struct regulator_desc mpq7932_regulators_desc[] = { +static const struct regulator_desc mpq7932_regulators_desc[] = { PMBUS_REGULATOR_STEP("buck", 0, MPQ7932_N_VOLTAGES, MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN), PMBUS_REGULATOR_STEP("buck", 1, MPQ7932_N_VOLTAGES, diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c index 2c6c9ec2a65250..178e0cdb7887ef 100644 --- a/drivers/hwmon/pmbus/pli1209bc.c +++ b/drivers/hwmon/pmbus/pli1209bc.c @@ -54,30 +54,6 @@ static int pli1209bc_read_word_data(struct i2c_client *client, int page, } } -static int pli1209bc_write_byte(struct i2c_client *client, int page, u8 reg) -{ - int ret; - - switch (reg) { - case PMBUS_CLEAR_FAULTS: - ret = pmbus_write_byte(client, page, reg); - /* - * PLI1209 takes 230 usec to execute the CLEAR_FAULTS command. - * During that time it's busy and NACKs all requests on the - * SMBUS interface. It also NACKs reads on PMBUS_STATUS_BYTE - * making it impossible to poll the BUSY flag. - * - * Just wait for not BUSY unconditionally. - */ - usleep_range(250, 300); - break; - default: - ret = -ENODATA; - break; - } - return ret; -} - #if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR) static const struct regulator_desc pli1209bc_reg_desc = { .name = "vout2", @@ -127,7 +103,7 @@ static struct pmbus_driver_info pli1209bc_info = { | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT, .read_word_data = pli1209bc_read_word_data, - .write_byte = pli1209bc_write_byte, + .write_delay = 250, #if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR) .num_regulators = 1, .reg_desc = &pli1209bc_reg_desc, diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 0bea603994e7b2..d605412a3173b9 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -472,6 +472,16 @@ struct pmbus_driver_info { /* custom attributes */ const struct attribute_group **groups; + + /* + * Some chips need a little delay between SMBus communication. When + * set, the generic PMBus helper functions will wait if necessary + * to meet this requirement. The access delay is honored after + * every SMBus operation. The write delay is only honored after + * SMBus write operations. + */ + int access_delay; /* in microseconds */ + int write_delay; /* in microseconds */ }; /* Regulator ops */ diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index e592446b26653e..ce7fd4ca9d89b0 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -110,6 +111,8 @@ struct pmbus_data { int vout_low[PMBUS_PAGES]; /* voltage low margin */ int vout_high[PMBUS_PAGES]; /* voltage high margin */ + ktime_t write_time; /* Last SMBUS write timestamp */ + ktime_t access_time; /* Last SMBUS access timestamp */ }; struct pmbus_debugfs_entry { @@ -160,6 +163,39 @@ void pmbus_set_update(struct i2c_client *client, u8 reg, bool update) } EXPORT_SYMBOL_NS_GPL(pmbus_set_update, PMBUS); +/* Some chips need a delay between accesses. */ +static void pmbus_wait(struct i2c_client *client) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + const struct pmbus_driver_info *info = data->info; + s64 delta; + + if (info->access_delay) { + delta = ktime_us_delta(ktime_get(), data->access_time); + + if (delta < info->access_delay) + fsleep(info->access_delay - delta); + } else if (info->write_delay) { + delta = ktime_us_delta(ktime_get(), data->write_time); + + if (delta < info->write_delay) + fsleep(info->write_delay - delta); + } +} + +/* Sets the last accessed timestamp for pmbus_wait */ +static void pmbus_update_ts(struct i2c_client *client, bool write_op) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + const struct pmbus_driver_info *info = data->info; + + if (info->access_delay) { + data->access_time = ktime_get(); + } else if (info->write_delay && write_op) { + data->write_time = ktime_get(); + } +} + int pmbus_set_page(struct i2c_client *client, int page, int phase) { struct pmbus_data *data = i2c_get_clientdata(client); @@ -170,11 +206,15 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase) if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL) && data->info->pages > 1 && page != data->currpage) { + pmbus_wait(client); rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + pmbus_update_ts(client, true); if (rv < 0) return rv; + pmbus_wait(client); rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE); + pmbus_update_ts(client, false); if (rv < 0) return rv; @@ -185,8 +225,10 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase) if (data->info->phases[page] && data->currphase != phase && !(data->info->func[page] & PMBUS_PHASE_VIRTUAL)) { + pmbus_wait(client); rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE, phase); + pmbus_update_ts(client, true); if (rv) return rv; } @@ -204,7 +246,11 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) if (rv < 0) return rv; - return i2c_smbus_write_byte(client, value); + pmbus_wait(client); + rv = i2c_smbus_write_byte(client, value); + pmbus_update_ts(client, true); + + return rv; } EXPORT_SYMBOL_NS_GPL(pmbus_write_byte, PMBUS); @@ -235,7 +281,11 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, if (rv < 0) return rv; - return i2c_smbus_write_word_data(client, reg, word); + pmbus_wait(client); + rv = i2c_smbus_write_word_data(client, reg, word); + pmbus_update_ts(client, true); + + return rv; } EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, PMBUS); @@ -353,7 +403,11 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg) if (rv < 0) return rv; - return i2c_smbus_read_word_data(client, reg); + pmbus_wait(client); + rv = i2c_smbus_read_word_data(client, reg); + pmbus_update_ts(client, false); + + return rv; } EXPORT_SYMBOL_NS_GPL(pmbus_read_word_data, PMBUS); @@ -412,7 +466,11 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) if (rv < 0) return rv; - return i2c_smbus_read_byte_data(client, reg); + pmbus_wait(client); + rv = i2c_smbus_read_byte_data(client, reg); + pmbus_update_ts(client, false); + + return rv; } EXPORT_SYMBOL_NS_GPL(pmbus_read_byte_data, PMBUS); @@ -424,7 +482,11 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value) if (rv < 0) return rv; - return i2c_smbus_write_byte_data(client, reg, value); + pmbus_wait(client); + rv = i2c_smbus_write_byte_data(client, reg, value); + pmbus_update_ts(client, true); + + return rv; } EXPORT_SYMBOL_NS_GPL(pmbus_write_byte_data, PMBUS); @@ -456,7 +518,11 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg, if (rv < 0) return rv; - return i2c_smbus_read_block_data(client, reg, data_buf); + pmbus_wait(client); + rv = i2c_smbus_read_block_data(client, reg, data_buf); + pmbus_update_ts(client, false); + + return rv; } static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page, @@ -2457,9 +2523,11 @@ static int pmbus_read_coefficients(struct i2c_client *client, data.block[1] = attr->reg; data.block[2] = 0x01; + pmbus_wait(client); rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags, I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS, I2C_SMBUS_BLOCK_PROC_CALL, &data); + pmbus_update_ts(client, true); if (rv < 0) return rv; @@ -2611,7 +2679,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, /* Enable PEC if the controller and bus supports it */ if (!(data->flags & PMBUS_NO_CAPABILITY)) { + pmbus_wait(client); ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY); + pmbus_update_ts(client, false); + if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) { if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) client->flags |= I2C_CLIENT_PEC; @@ -2624,10 +2695,16 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, * Bail out if both registers are not supported. */ data->read_status = pmbus_read_status_word; + pmbus_wait(client); ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD); + pmbus_update_ts(client, false); + if (ret < 0 || ret == 0xffff) { data->read_status = pmbus_read_status_byte; + pmbus_wait(client); ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE); + pmbus_update_ts(client, false); + if (ret < 0 || ret == 0xff) { dev_err(dev, "PMBus status register not found\n"); return -ENODEV; @@ -2642,7 +2719,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, * limit registers need to be disabled. */ if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) { + pmbus_wait(client); ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT); + pmbus_update_ts(client, false); + if (ret > 0 && (ret & PB_WP_ANY)) data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; } diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index d817c719b90bd5..5d3d1773bf520d 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -67,7 +67,6 @@ struct ucd9000_data { struct gpio_chip gpio; #endif struct dentry *debugfs; - ktime_t write_time; }; #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info) @@ -86,63 +85,6 @@ struct ucd9000_debugfs_entry { */ #define UCD90320_WAIT_DELAY_US 500 -static inline void ucd90320_wait(const struct ucd9000_data *data) -{ - s64 delta = ktime_us_delta(ktime_get(), data->write_time); - - if (delta < UCD90320_WAIT_DELAY_US) - udelay(UCD90320_WAIT_DELAY_US - delta); -} - -static int ucd90320_read_word_data(struct i2c_client *client, int page, - int phase, int reg) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct ucd9000_data *data = to_ucd9000_data(info); - - if (reg >= PMBUS_VIRT_BASE) - return -ENXIO; - - ucd90320_wait(data); - return pmbus_read_word_data(client, page, phase, reg); -} - -static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct ucd9000_data *data = to_ucd9000_data(info); - - ucd90320_wait(data); - return pmbus_read_byte_data(client, page, reg); -} - -static int ucd90320_write_word_data(struct i2c_client *client, int page, - int reg, u16 word) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct ucd9000_data *data = to_ucd9000_data(info); - int ret; - - ucd90320_wait(data); - ret = pmbus_write_word_data(client, page, reg, word); - data->write_time = ktime_get(); - - return ret; -} - -static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct ucd9000_data *data = to_ucd9000_data(info); - int ret; - - ucd90320_wait(data); - ret = pmbus_write_byte(client, page, value); - data->write_time = ktime_get(); - - return ret; -} - static int ucd9000_get_fan_config(struct i2c_client *client, int fan) { int fan_config = 0; @@ -667,10 +609,8 @@ static int ucd9000_probe(struct i2c_client *client) info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34; } else if (mid->driver_data == ucd90320) { - info->read_byte_data = ucd90320_read_byte_data; - info->read_word_data = ucd90320_read_word_data; - info->write_byte = ucd90320_write_byte; - info->write_word_data = ucd90320_write_word_data; + /* Delay SMBus operations after a write */ + info->write_delay = UCD90320_WAIT_DELAY_US; } ucd9000_probe_gpio(client, mid, data); diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c index 83458df0d0cfaf..7920a16203e137 100644 --- a/drivers/hwmon/pmbus/zl6100.c +++ b/drivers/hwmon/pmbus/zl6100.c @@ -22,8 +22,6 @@ enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105, struct zl6100_data { int id; - ktime_t access; /* chip access time */ - int delay; /* Delay between chip accesses in uS */ struct pmbus_driver_info info; }; @@ -122,16 +120,6 @@ static u16 zl6100_d2l(long val) return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800); } -/* Some chips need a delay between accesses */ -static inline void zl6100_wait(const struct zl6100_data *data) -{ - if (data->delay) { - s64 delta = ktime_us_delta(ktime_get(), data->access); - if (delta < data->delay) - udelay(data->delay - delta); - } -} - static int zl6100_read_word_data(struct i2c_client *client, int page, int phase, int reg) { @@ -174,9 +162,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, break; } - zl6100_wait(data); ret = pmbus_read_word_data(client, page, phase, vreg); - data->access = ktime_get(); if (ret < 0) return ret; @@ -195,14 +181,11 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct zl6100_data *data = to_zl6100_data(info); int ret, status; if (page >= info->pages) return -ENXIO; - zl6100_wait(data); - switch (reg) { case PMBUS_VIRT_STATUS_VMON: ret = pmbus_read_byte_data(client, 0, @@ -225,7 +208,6 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg) ret = pmbus_read_byte_data(client, page, reg); break; } - data->access = ktime_get(); return ret; } @@ -234,8 +216,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg, u16 word) { const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct zl6100_data *data = to_zl6100_data(info); - int ret, vreg; + int vreg; if (page >= info->pages) return -ENXIO; @@ -265,27 +246,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg, vreg = reg; } - zl6100_wait(data); - ret = pmbus_write_word_data(client, page, vreg, word); - data->access = ktime_get(); - - return ret; -} - -static int zl6100_write_byte(struct i2c_client *client, int page, u8 value) -{ - const struct pmbus_driver_info *info = pmbus_get_driver_info(client); - struct zl6100_data *data = to_zl6100_data(info); - int ret; - - if (page >= info->pages) - return -ENXIO; - - zl6100_wait(data); - ret = pmbus_write_byte(client, page, value); - data->access = ktime_get(); - - return ret; + return pmbus_write_word_data(client, page, vreg, word); } static const struct i2c_device_id zl6100_id[] = { @@ -363,14 +324,7 @@ static int zl6100_probe(struct i2c_client *client) * supported chips are known to require a wait time between I2C * accesses. */ - data->delay = delay; - - /* - * Since there was a direct I2C device access above, wait before - * accessing the chip again. - */ - data->access = ktime_get(); - zl6100_wait(data); + udelay(delay); info = &data->info; @@ -404,8 +358,7 @@ static int zl6100_probe(struct i2c_client *client) if (ret < 0) return ret; - data->access = ktime_get(); - zl6100_wait(data); + udelay(delay); if (ret & ZL8802_MFR_PHASES_MASK) info->func[1] |= PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; @@ -418,8 +371,7 @@ static int zl6100_probe(struct i2c_client *client) if (ret < 0) return ret; - data->access = ktime_get(); - zl6100_wait(data); + udelay(delay); ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_CONFIG); if (ret < 0) @@ -428,8 +380,7 @@ static int zl6100_probe(struct i2c_client *client) if (ret & ZL8802_MFR_XTEMP_ENABLE_2) info->func[i] |= PMBUS_HAVE_TEMP2; - data->access = ktime_get(); - zl6100_wait(data); + udelay(delay); } ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_GLOBAL_CONFIG); if (ret < 0) @@ -446,13 +397,12 @@ static int zl6100_probe(struct i2c_client *client) info->func[0] |= PMBUS_HAVE_TEMP2; } - data->access = ktime_get(); - zl6100_wait(data); + udelay(delay); + info->access_delay = delay; info->read_word_data = zl6100_read_word_data; info->read_byte_data = zl6100_read_byte_data; info->write_word_data = zl6100_write_word_data; - info->write_byte = zl6100_write_byte; return pmbus_do_probe(client, info); } diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index a1712649b07e3f..c434db4656e7df 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -167,7 +167,7 @@ static int pwm_fan_power_on(struct pwm_fan_ctx *ctx) return ret; } -static int pwm_fan_power_off(struct pwm_fan_ctx *ctx) +static int pwm_fan_power_off(struct pwm_fan_ctx *ctx, bool force_disable) { struct pwm_state *state = &ctx->pwm_state; bool enable_regulator = false; @@ -180,7 +180,8 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx) state, &enable_regulator); - state->enabled = false; + if (force_disable) + state->enabled = false; state->duty_cycle = 0; ret = pwm_apply_might_sleep(ctx->pwm, state); if (ret) { @@ -213,7 +214,7 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm) return ret; ret = pwm_fan_power_on(ctx); } else { - ret = pwm_fan_power_off(ctx); + ret = pwm_fan_power_off(ctx, false); } if (!ret) ctx->pwm_value = pwm; @@ -468,7 +469,7 @@ static void pwm_fan_cleanup(void *__ctx) del_timer_sync(&ctx->rpm_timer); /* Switch off everything */ ctx->enable_mode = pwm_disable_reg_disable; - pwm_fan_power_off(ctx); + pwm_fan_power_off(ctx, true); } static int pwm_fan_probe(struct platform_device *pdev) @@ -661,7 +662,7 @@ static int pwm_fan_suspend(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); - return pwm_fan_power_off(ctx); + return pwm_fan_power_off(ctx, true); } static int pwm_fan_resume(struct device *dev) diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c index 6e6d5415847463..a4b05ebb054609 100644 --- a/drivers/hwmon/sch5636.c +++ b/drivers/hwmon/sch5636.c @@ -416,8 +416,7 @@ static int sch5636_probe(struct platform_device *pdev) id[i] = '\0'; if (strcmp(id, "THS")) { - pr_err("Unknown Fujitsu id: %02x%02x%02x\n", - id[0], id[1], id[2]); + pr_err("Unknown Fujitsu id: %3pE (%3ph)\n", id, id); err = -ENODEV; goto error; } diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h index 7479a549a026ab..601987c6b4cd17 100644 --- a/drivers/hwmon/sch56xx-common.h +++ b/drivers/hwmon/sch56xx-common.h @@ -22,4 +22,3 @@ int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg, void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision, struct mutex *io_lock, int check_enabled); -void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data); diff --git a/drivers/hwmon/sg2042-mcu.c b/drivers/hwmon/sg2042-mcu.c new file mode 100644 index 00000000000000..1410457693545b --- /dev/null +++ b/drivers/hwmon/sg2042-mcu.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 Inochi Amaoto + * + * Sophgo power control mcu for SG2042 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* fixed MCU registers */ +#define REG_BOARD_TYPE 0x00 +#define REG_MCU_FIRMWARE_VERSION 0x01 +#define REG_PCB_VERSION 0x02 +#define REG_PWR_CTRL 0x03 +#define REG_SOC_TEMP 0x04 +#define REG_BOARD_TEMP 0x05 +#define REG_RST_COUNT 0x0a +#define REG_UPTIME 0x0b +#define REG_RESET_REASON 0x0d +#define REG_MCU_TYPE 0x18 +#define REG_REPOWER_POLICY 0x65 +#define REG_CRITICAL_TEMP 0x66 +#define REG_REPOWER_TEMP 0x67 + +#define REPOWER_POLICY_REBOOT 1 +#define REPOWER_POLICY_KEEP_OFF 2 + +#define MCU_POWER_MAX 0xff + +#define DEFINE_MCU_DEBUG_ATTR(_name, _reg, _format) \ + static int _name##_show(struct seq_file *seqf, \ + void *unused) \ + { \ + struct sg2042_mcu_data *mcu = seqf->private; \ + int ret; \ + ret = i2c_smbus_read_byte_data(mcu->client, (_reg)); \ + if (ret < 0) \ + return ret; \ + seq_printf(seqf, _format "\n", ret); \ + return 0; \ + } \ + DEFINE_SHOW_ATTRIBUTE(_name) \ + +struct sg2042_mcu_data { + struct i2c_client *client; + struct dentry *debugfs; + struct mutex mutex; +}; + +static struct dentry *sgmcu_debugfs; + +static ssize_t reset_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + int ret; + + ret = i2c_smbus_read_byte_data(mcu->client, REG_RST_COUNT); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", ret); +} + +static ssize_t uptime_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + u8 time_val[2]; + int ret; + + ret = i2c_smbus_read_i2c_block_data(mcu->client, REG_UPTIME, + sizeof(time_val), time_val); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", + (time_val[0]) | (time_val[1] << 8)); +} + +static ssize_t reset_reason_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + int ret; + + ret = i2c_smbus_read_byte_data(mcu->client, REG_RESET_REASON); + if (ret < 0) + return ret; + + return sprintf(buf, "0x%02x\n", ret); +} + +static ssize_t repower_policy_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + int ret; + const char *action; + + ret = i2c_smbus_read_byte_data(mcu->client, REG_REPOWER_POLICY); + if (ret < 0) + return ret; + + if (ret == REPOWER_POLICY_REBOOT) + action = "repower"; + else if (ret == REPOWER_POLICY_KEEP_OFF) + action = "keep"; + else + action = "unknown"; + + return sprintf(buf, "%s\n", action); +} + +static ssize_t repower_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + u8 value; + int ret; + + if (sysfs_streq("repower", buf)) + value = REPOWER_POLICY_REBOOT; + else if (sysfs_streq("keep", buf)) + value = REPOWER_POLICY_KEEP_OFF; + else + return -EINVAL; + + ret = i2c_smbus_write_byte_data(mcu->client, + REG_REPOWER_POLICY, value); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR_RO(reset_count); +static DEVICE_ATTR_RO(uptime); +static DEVICE_ATTR_RO(reset_reason); +static DEVICE_ATTR_RW(repower_policy); + +DEFINE_MCU_DEBUG_ATTR(firmware_version, REG_MCU_FIRMWARE_VERSION, "0x%02x"); +DEFINE_MCU_DEBUG_ATTR(pcb_version, REG_PCB_VERSION, "0x%02x"); +DEFINE_MCU_DEBUG_ATTR(board_type, REG_BOARD_TYPE, "0x%02x"); +DEFINE_MCU_DEBUG_ATTR(mcu_type, REG_MCU_TYPE, "%d"); + +static struct attribute *sg2042_mcu_attrs[] = { + &dev_attr_reset_count.attr, + &dev_attr_uptime.attr, + &dev_attr_reset_reason.attr, + &dev_attr_repower_policy.attr, + NULL +}; + +static const struct attribute_group sg2042_mcu_attr_group = { + .attrs = sg2042_mcu_attrs, +}; + +static const struct attribute_group *sg2042_mcu_groups[] = { + &sg2042_mcu_attr_group, + NULL +}; + +static const struct hwmon_channel_info * const sg2042_mcu_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT | + HWMON_T_CRIT_HYST, + HWMON_T_INPUT), + NULL +}; + +static int sg2042_mcu_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + int tmp; + u8 reg; + + switch (attr) { + case hwmon_temp_input: + reg = channel ? REG_BOARD_TEMP : REG_SOC_TEMP; + break; + case hwmon_temp_crit: + reg = REG_CRITICAL_TEMP; + break; + case hwmon_temp_crit_hyst: + reg = REG_REPOWER_TEMP; + break; + default: + return -EOPNOTSUPP; + } + + tmp = i2c_smbus_read_byte_data(mcu->client, reg); + if (tmp < 0) + return tmp; + *val = tmp * 1000; + + return 0; +} + +static int sg2042_mcu_write(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct sg2042_mcu_data *mcu = dev_get_drvdata(dev); + int temp = val / 1000; + int hyst_temp, crit_temp; + u8 reg; + + temp = clamp_val(temp, 0, MCU_POWER_MAX); + + guard(mutex)(&mcu->mutex); + + switch (attr) { + case hwmon_temp_crit: + hyst_temp = i2c_smbus_read_byte_data(mcu->client, + REG_REPOWER_TEMP); + if (hyst_temp < 0) + return hyst_temp; + + crit_temp = temp; + reg = REG_CRITICAL_TEMP; + break; + case hwmon_temp_crit_hyst: + crit_temp = i2c_smbus_read_byte_data(mcu->client, + REG_CRITICAL_TEMP); + if (crit_temp < 0) + return crit_temp; + + hyst_temp = temp; + reg = REG_REPOWER_TEMP; + break; + default: + return -EOPNOTSUPP; + } + + /* + * ensure hyst_temp is smaller to avoid MCU from + * keeping triggering repower event. + */ + if (crit_temp < hyst_temp) + return -EINVAL; + + return i2c_smbus_write_byte_data(mcu->client, reg, temp); +} + +static umode_t sg2042_mcu_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel == 0) + return 0644; + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_ops sg2042_mcu_ops = { + .is_visible = sg2042_mcu_is_visible, + .read = sg2042_mcu_read, + .write = sg2042_mcu_write, +}; + +static const struct hwmon_chip_info sg2042_mcu_chip_info = { + .ops = &sg2042_mcu_ops, + .info = sg2042_mcu_info, +}; + +static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu, + struct device *dev) +{ + mcu->debugfs = debugfs_create_dir(dev_name(dev), sgmcu_debugfs); + + debugfs_create_file("firmware_version", 0444, mcu->debugfs, + mcu, &firmware_version_fops); + debugfs_create_file("pcb_version", 0444, mcu->debugfs, mcu, + &pcb_version_fops); + debugfs_create_file("mcu_type", 0444, mcu->debugfs, mcu, + &mcu_type_fops); + debugfs_create_file("board_type", 0444, mcu->debugfs, mcu, + &board_type_fops); +} + +static int sg2042_mcu_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct sg2042_mcu_data *mcu; + struct device *hwmon_dev; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + mcu = devm_kmalloc(dev, sizeof(*mcu), GFP_KERNEL); + if (!mcu) + return -ENOMEM; + + mutex_init(&mcu->mutex); + mcu->client = client; + + i2c_set_clientdata(client, mcu); + + hwmon_dev = devm_hwmon_device_register_with_info(dev, "sg2042_mcu", + mcu, + &sg2042_mcu_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + sg2042_mcu_debugfs_init(mcu, dev); + + return 0; +} + +static void sg2042_mcu_i2c_remove(struct i2c_client *client) +{ + struct sg2042_mcu_data *mcu = i2c_get_clientdata(client); + + debugfs_remove_recursive(mcu->debugfs); +} + +static const struct i2c_device_id sg2042_mcu_id[] = { + { "sg2042-hwmon-mcu", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, sg2042_mcu_id); + +static const struct of_device_id sg2042_mcu_of_id[] = { + { .compatible = "sophgo,sg2042-hwmon-mcu" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sg2042_mcu_of_id); + +static struct i2c_driver sg2042_mcu_driver = { + .driver = { + .name = "sg2042-mcu", + .of_match_table = sg2042_mcu_of_id, + .dev_groups = sg2042_mcu_groups, + }, + .probe = sg2042_mcu_i2c_probe, + .remove = sg2042_mcu_i2c_remove, + .id_table = sg2042_mcu_id, +}; + +static int __init sg2042_mcu_init(void) +{ + sgmcu_debugfs = debugfs_create_dir("sg2042-mcu", NULL); + return i2c_add_driver(&sg2042_mcu_driver); +} + +static void __exit sg2042_mcu_exit(void) +{ + debugfs_remove_recursive(sgmcu_debugfs); + i2c_del_driver(&sg2042_mcu_driver); +} + +module_init(sg2042_mcu_init); +module_exit(sg2042_mcu_exit); + +MODULE_AUTHOR("Inochi Amaoto "); +MODULE_DESCRIPTION("MCU I2C driver for SG2042 soc platform"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c index ad1b827ea78219..97327313529b46 100644 --- a/drivers/hwmon/sht21.c +++ b/drivers/hwmon/sht21.c @@ -199,10 +199,7 @@ static ssize_t eic_read(struct sht21 *sht21) eic[6] = rx[0]; eic[7] = rx[1]; - ret = snprintf(sht21->eic, sizeof(sht21->eic), - "%02x%02x%02x%02x%02x%02x%02x%02x\n", - eic[0], eic[1], eic[2], eic[3], - eic[4], eic[5], eic[6], eic[7]); + ret = snprintf(sht21->eic, sizeof(sht21->eic), "%8phN\n", eic); out: if (ret < 0) sht21->eic[0] = 0; diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c index e7632081a1d145..f9e8b2869164a3 100644 --- a/drivers/hwmon/stts751.c +++ b/drivers/hwmon/stts751.c @@ -77,7 +77,7 @@ static const struct i2c_device_id stts751_id[] = { }; static const struct of_device_id __maybe_unused stts751_of_match[] = { - { .compatible = "stts751" }, + { .compatible = "st,stts751" }, { }, }; MODULE_DEVICE_TABLE(of, stts751_of_match); diff --git a/drivers/hwmon/surface_temp.c b/drivers/hwmon/surface_temp.c new file mode 100644 index 00000000000000..cd21f331f1576f --- /dev/null +++ b/drivers/hwmon/surface_temp.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Thermal sensor subsystem driver for Surface System Aggregator Module (SSAM). + * + * Copyright (C) 2022-2023 Maximilian Luz + */ + +#include +#include +#include +#include +#include + +#include +#include + +/* -- SAM interface. -------------------------------------------------------- */ + +/* + * Available sensors are indicated by a 16-bit bitfield, where a 1 marks the + * presence of a sensor. So we have at most 16 possible sensors/channels. + */ +#define SSAM_TMP_SENSOR_MAX_COUNT 16 + +/* + * All names observed so far are 6 characters long, but there's only + * zeros after the name, so perhaps they can be longer. This number reflects + * the maximum zero-padded space observed in the returned buffer. + */ +#define SSAM_TMP_SENSOR_NAME_LENGTH 18 + +struct ssam_tmp_get_name_rsp { + __le16 unknown1; + char unknown2; + char name[SSAM_TMP_SENSOR_NAME_LENGTH]; +} __packed; + +static_assert(sizeof(struct ssam_tmp_get_name_rsp) == 21); + +SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_get_available_sensors, __le16, { + .target_category = SSAM_SSH_TC_TMP, + .command_id = 0x04, +}); + +SSAM_DEFINE_SYNC_REQUEST_MD_R(__ssam_tmp_get_temperature, __le16, { + .target_category = SSAM_SSH_TC_TMP, + .command_id = 0x01, +}); + +SSAM_DEFINE_SYNC_REQUEST_MD_R(__ssam_tmp_get_name, struct ssam_tmp_get_name_rsp, { + .target_category = SSAM_SSH_TC_TMP, + .command_id = 0x0e, +}); + +static int ssam_tmp_get_available_sensors(struct ssam_device *sdev, s16 *sensors) +{ + __le16 sensors_le; + int status; + + status = __ssam_tmp_get_available_sensors(sdev, &sensors_le); + if (status) + return status; + + *sensors = le16_to_cpu(sensors_le); + return 0; +} + +static int ssam_tmp_get_temperature(struct ssam_device *sdev, u8 iid, long *temperature) +{ + __le16 temp_le; + int status; + + status = __ssam_tmp_get_temperature(sdev->ctrl, sdev->uid.target, iid, &temp_le); + if (status) + return status; + + /* Convert 1/10 °K to 1/1000 °C */ + *temperature = (le16_to_cpu(temp_le) - 2731) * 100L; + return 0; +} + +static int ssam_tmp_get_name(struct ssam_device *sdev, u8 iid, char *buf, size_t buf_len) +{ + struct ssam_tmp_get_name_rsp name_rsp; + int status; + + status = __ssam_tmp_get_name(sdev->ctrl, sdev->uid.target, iid, &name_rsp); + if (status) + return status; + + /* + * This should not fail unless the name in the returned struct is not + * null-terminated or someone changed something in the struct + * definitions above, since our buffer and struct have the same + * capacity by design. So if this fails, log an error message. Since + * the more likely cause is that the returned string isn't + * null-terminated, we might have received garbage (as opposed to just + * an incomplete string), so also fail the function. + */ + status = strscpy(buf, name_rsp.name, buf_len); + if (status < 0) { + dev_err(&sdev->dev, "received non-null-terminated sensor name string\n"); + return status; + } + + return 0; +} + +/* -- Driver.---------------------------------------------------------------- */ + +struct ssam_temp { + struct ssam_device *sdev; + s16 sensors; + char names[SSAM_TMP_SENSOR_MAX_COUNT][SSAM_TMP_SENSOR_NAME_LENGTH]; +}; + +static umode_t ssam_temp_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct ssam_temp *ssam_temp = data; + + if (!(ssam_temp->sensors & BIT(channel))) + return 0; + + return 0444; +} + +static int ssam_temp_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + const struct ssam_temp *ssam_temp = dev_get_drvdata(dev); + + return ssam_tmp_get_temperature(ssam_temp->sdev, channel + 1, value); +} + +static int ssam_temp_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + const struct ssam_temp *ssam_temp = dev_get_drvdata(dev); + + *str = ssam_temp->names[channel]; + return 0; +} + +static const struct hwmon_channel_info * const ssam_temp_hwmon_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops ssam_temp_hwmon_ops = { + .is_visible = ssam_temp_hwmon_is_visible, + .read = ssam_temp_hwmon_read, + .read_string = ssam_temp_hwmon_read_string, +}; + +static const struct hwmon_chip_info ssam_temp_hwmon_chip_info = { + .ops = &ssam_temp_hwmon_ops, + .info = ssam_temp_hwmon_info, +}; + +static int ssam_temp_probe(struct ssam_device *sdev) +{ + struct ssam_temp *ssam_temp; + struct device *hwmon_dev; + s16 sensors; + int channel; + int status; + + status = ssam_tmp_get_available_sensors(sdev, &sensors); + if (status) + return status; + + ssam_temp = devm_kzalloc(&sdev->dev, sizeof(*ssam_temp), GFP_KERNEL); + if (!ssam_temp) + return -ENOMEM; + + ssam_temp->sdev = sdev; + ssam_temp->sensors = sensors; + + /* Retrieve the name for each available sensor. */ + for (channel = 0; channel < SSAM_TMP_SENSOR_MAX_COUNT; channel++) { + if (!(sensors & BIT(channel))) + continue; + + status = ssam_tmp_get_name(sdev, channel + 1, ssam_temp->names[channel], + SSAM_TMP_SENSOR_NAME_LENGTH); + if (status) + return status; + } + + hwmon_dev = devm_hwmon_device_register_with_info(&sdev->dev, "surface_thermal", ssam_temp, + &ssam_temp_hwmon_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct ssam_device_id ssam_temp_match[] = { + { SSAM_SDEV(TMP, SAM, 0x00, 0x02) }, + { }, +}; +MODULE_DEVICE_TABLE(ssam, ssam_temp_match); + +static struct ssam_device_driver ssam_temp = { + .probe = ssam_temp_probe, + .match_table = ssam_temp_match, + .driver = { + .name = "surface_temp", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_ssam_device_driver(ssam_temp); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Thermal sensor subsystem driver for Surface System Aggregator Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 853dbe708ff5dc..02c5a3bb1071cb 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -308,7 +308,9 @@ static int tmp401_temp_read(struct device *dev, u32 attr, int channel, long *val { struct tmp401_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; + unsigned int regs[2] = { TMP401_TEMP_MSB[3][channel], TMP401_TEMP_CRIT_HYST }; unsigned int regval; + u16 regvals[2]; int reg, ret; switch (attr) { @@ -325,20 +327,11 @@ static int tmp401_temp_read(struct device *dev, u32 attr, int channel, long *val *val = tmp401_register_to_temp(regval, data->extended_range); break; case hwmon_temp_crit_hyst: - mutex_lock(&data->update_lock); - reg = TMP401_TEMP_MSB[3][channel]; - ret = regmap_read(regmap, reg, ®val); - if (ret < 0) - goto unlock; - *val = tmp401_register_to_temp(regval, data->extended_range); - ret = regmap_read(regmap, TMP401_TEMP_CRIT_HYST, ®val); - if (ret < 0) - goto unlock; - *val -= regval * 1000; -unlock: - mutex_unlock(&data->update_lock); + ret = regmap_multi_reg_read(regmap, regs, regvals, 2); if (ret < 0) return ret; + *val = tmp401_register_to_temp(regvals[0], data->extended_range) - + (regvals[1] * 1000); break; case hwmon_temp_fault: case hwmon_temp_min_alarm: diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 7a6f9532e59427..9537727aad9a89 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -410,18 +410,15 @@ static int tmp421_probe_from_dt(struct i2c_client *client, struct tmp421_data *d { struct device *dev = &client->dev; const struct device_node *np = dev->of_node; - struct device_node *child; int err; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { if (strcmp(child->name, "channel")) continue; err = tmp421_probe_child_from_dt(client, child, data); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c index 3ee1137533d632..0f629c6d7695ea 100644 --- a/drivers/hwmon/tmp464.c +++ b/drivers/hwmon/tmp464.c @@ -147,11 +147,11 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val { struct tmp464_data *data = dev_get_drvdata(dev); struct regmap *regmap = data->regmap; - unsigned int regval, regval2; + unsigned int regs[2]; + unsigned int regval; + u16 regvals[2]; int err = 0; - mutex_lock(&data->update_lock); - switch (attr) { case hwmon_temp_max_alarm: err = regmap_read(regmap, TMP464_THERM_STATUS_REG, ®val); @@ -172,26 +172,27 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val * complete. That means we have to cache the value internally * for one measurement cycle and report the cached value. */ + mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + msecs_to_jiffies(data->update_interval))) { err = regmap_read(regmap, TMP464_REMOTE_OPEN_REG, ®val); if (err < 0) - break; + goto unlock; data->open_reg = regval; data->last_updated = jiffies; data->valid = true; } *val = !!(data->open_reg & BIT(channel + 7)); +unlock: + mutex_unlock(&data->update_lock); break; case hwmon_temp_max_hyst: - err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], ®val); + regs[0] = TMP464_THERM_LIMIT[channel]; + regs[1] = TMP464_TEMP_HYST_REG; + err = regmap_multi_reg_read(regmap, regs, regvals, 2); if (err < 0) break; - err = regmap_read(regmap, TMP464_TEMP_HYST_REG, ®val2); - if (err < 0) - break; - regval -= regval2; - *val = temp_from_reg(regval); + *val = temp_from_reg(regvals[0] - regvals[1]); break; case hwmon_temp_max: err = regmap_read(regmap, TMP464_THERM_LIMIT[channel], ®val); @@ -200,14 +201,12 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val *val = temp_from_reg(regval); break; case hwmon_temp_crit_hyst: - err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], ®val); + regs[0] = TMP464_THERM2_LIMIT[channel]; + regs[1] = TMP464_TEMP_HYST_REG; + err = regmap_multi_reg_read(regmap, regs, regvals, 2); if (err < 0) break; - err = regmap_read(regmap, TMP464_TEMP_HYST_REG, ®val2); - if (err < 0) - break; - regval -= regval2; - *val = temp_from_reg(regval); + *val = temp_from_reg(regvals[0] - regvals[1]); break; case hwmon_temp_crit: err = regmap_read(regmap, TMP464_THERM2_LIMIT[channel], ®val); @@ -239,8 +238,6 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val break; } - mutex_unlock(&data->update_lock); - return err; } @@ -565,18 +562,15 @@ static int tmp464_probe_child_from_dt(struct device *dev, static int tmp464_probe_from_dt(struct device *dev, struct tmp464_data *data) { const struct device_node *np = dev->of_node; - struct device_node *child; int err; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { if (strcmp(child->name, "channel")) continue; err = tmp464_probe_child_from_dt(dev, child, data); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c index d82a3b454d0ed3..a2e350f52a9e9b 100644 --- a/drivers/hwmon/vexpress-hwmon.c +++ b/drivers/hwmon/vexpress-hwmon.c @@ -72,7 +72,7 @@ static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj, struct device_attribute, attr); if (dev_attr->show == vexpress_hwmon_label_show && - !of_get_property(dev->of_node, "label", NULL)) + !of_property_present(dev->of_node, "label")) return 0; return attr->mode; diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 0acf6bd0227fac..67728f60333fe3 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -1451,7 +1451,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .open = watchdog_open, .release = watchdog_close, .write = watchdog_write, diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 9fc6f6b863e04b..ea38ecf26fcbfb 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path) return csdev; } +u32 coresight_get_sink_id(struct coresight_device *csdev) +{ + if (!csdev->ea) + return 0; + + /* + * See function etm_perf_add_symlink_sink() to know where + * this comes from. + */ + return (u32) (unsigned long) csdev->ea->var; +} + static int coresight_sink_by_id(struct device *dev, const void *data) { struct coresight_device *csdev = to_coresight_device(dev); - unsigned long hash; if (csdev->type == CORESIGHT_DEV_TYPE_SINK || - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { - - if (!csdev->ea) - return 0; - /* - * See function etm_perf_add_symlink_sink() to know where - * this comes from. - */ - hash = (unsigned long)csdev->ea->var; - - if ((u32)hash == *(u32 *)data) + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + if (coresight_get_sink_id(csdev) == *(u32 *)data) return 1; } @@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev) struct coresight_device *csdev = to_coresight_device(dev); fwnode_handle_put(csdev->dev.fwnode); + free_percpu(csdev->perf_sink_id_map.cpu_map); kfree(csdev); } @@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); dev_set_name(&csdev->dev, "%s", desc->name); + if (csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + spin_lock_init(&csdev->perf_sink_id_map.lock); + csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t); + if (!csdev->perf_sink_id_map.cpu_map) { + kfree(csdev); + ret = -ENOMEM; + goto err_out; + } + } /* * Make sure the device registration and the connection fixup * are synchronised, so that we don't see uninitialised devices diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c index ccef04f27f12fa..d0ae10bf612811 100644 --- a/drivers/hwtracing/coresight/coresight-cti-platform.c +++ b/drivers/hwtracing/coresight/coresight-cti-platform.c @@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev, struct cti_drvdata *drvdata) { int rc = 0; - struct fwnode_handle *fwnode = dev_fwnode(dev); - struct fwnode_handle *child = NULL; - if (IS_ERR_OR_NULL(fwnode)) + if (IS_ERR_OR_NULL(dev_fwnode(dev))) return -EINVAL; - fwnode_for_each_child_node(fwnode, child) { + device_for_each_child_node_scoped(dev, child) { if (cti_plat_node_name_eq(child, CTI_DT_CONNS)) - rc = cti_plat_create_connection(dev, drvdata, - child); + rc = cti_plat_create_connection(dev, drvdata, child); if (rc != 0) break; } - fwnode_handle_put(child); return rc; } diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c index ac70c0b491bebd..bb85fa663ffcad 100644 --- a/drivers/hwtracing/coresight/coresight-dummy.c +++ b/drivers/hwtracing/coresight/coresight-dummy.c @@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source"); DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink"); static int dummy_source_enable(struct coresight_device *csdev, - struct perf_event *event, enum cs_mode mode) + struct perf_event *event, enum cs_mode mode, + __maybe_unused struct coresight_trace_id_map *id_map) { + if (!coresight_take_mode(csdev, mode)) + return -EBUSY; + dev_dbg(csdev->dev.parent, "Dummy source enabled\n"); return 0; @@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev, static void dummy_source_disable(struct coresight_device *csdev, struct perf_event *event) { + coresight_set_mode(csdev, CS_MODE_DISABLED); dev_dbg(csdev->dev.parent, "Dummy source disabled\n"); } diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 7edd3f1d0d465d..aea9ac9c4bd069 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -652,7 +652,6 @@ static const struct file_operations etb_fops = { .open = etb_open, .read = etb_read, .release = etb_release, - .llseek = no_llseek, }; static struct attribute *coresight_etb_mgmt_attrs[] = { diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index c0c60e6a1703e3..ad6a8f4b70b6fc 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -229,15 +229,24 @@ static void free_event_data(struct work_struct *work) struct list_head **ppath; ppath = etm_event_cpu_path_ptr(event_data, cpu); - if (!(IS_ERR_OR_NULL(*ppath))) + if (!(IS_ERR_OR_NULL(*ppath))) { + struct coresight_device *sink = coresight_get_sink(*ppath); + + /* + * Mark perf event as done for trace id allocator, but don't call + * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions + * never free trace IDs to ensure that the ID associated with a CPU + * cannot change during their and other's concurrent sessions. Instead, + * a refcount is used so that the last event to call + * coresight_trace_id_perf_stop() frees all IDs. + */ + coresight_trace_id_perf_stop(&sink->perf_sink_id_map); + coresight_release_path(*ppath); + } *ppath = NULL; - coresight_trace_id_put_cpu_id(cpu); } - /* mark perf event as done for trace id allocator */ - coresight_trace_id_perf_stop(); - free_percpu(event_data->path); kfree(event_data); } @@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, sink = user_sink = coresight_get_sink_by_id(id); } - /* tell the trace ID allocator that a perf event is starting up */ - coresight_trace_id_perf_start(); - /* check if user wants a coresight configuration selected */ cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); if (cfg_hash) { @@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, } /* ensure we can allocate a trace ID for this CPU */ - trace_id = coresight_trace_id_get_cpu_id(cpu); + trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map); if (!IS_VALID_CS_TRACE_ID(trace_id)) { cpumask_clear_cpu(cpu, mask); coresight_release_path(path); continue; } + coresight_trace_id_perf_start(&sink->perf_sink_id_map); *etm_event_cpu_path_ptr(event_data, cpu) = path; } @@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags) struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct list_head *path; u64 hw_id; + u8 trace_id; if (!csdev) goto fail; @@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags) goto fail_end_stop; /* Finally enable the tracer */ - if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) + if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, + &sink->perf_sink_id_map)) goto fail_disable_path; /* @@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags) */ if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) { cpumask_set_cpu(cpu, &event_data->aux_hwid_done); - hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK, - CS_AUX_HW_ID_CURR_VERSION); - hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, - coresight_trace_id_read_cpu_id(cpu)); + + trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map); + + hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK, + CS_AUX_HW_ID_MAJOR_VERSION); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK, + CS_AUX_HW_ID_MINOR_VERSION); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink)); + perf_report_aux_output_id(event, hw_id); } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index bebbadee2ceb32..744531158d6ba4 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -62,7 +62,6 @@ struct etm_event_data { struct list_head * __percpu *path; }; -#if IS_ENABLED(CONFIG_CORESIGHT) int etm_perf_symlink(struct coresight_device *csdev, bool link); int etm_perf_add_symlink_sink(struct coresight_device *csdev); void etm_perf_del_symlink_sink(struct coresight_device *csdev); @@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc); void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc); -#else -static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) -{ return -EINVAL; } -int etm_perf_add_symlink_sink(struct coresight_device *csdev) -{ return -EINVAL; } -void etm_perf_del_symlink_sink(struct coresight_device *csdev) {} -static inline void *etm_perf_sink_config(struct perf_output_handle *handle) -{ - return NULL; -} -int etm_perf_add_symlink_cscfg(struct device *dev, - struct cscfg_config_desc *config_desc) -{ return -EINVAL; } -void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {} - -#endif /* CONFIG_CORESIGHT */ - int __init etm_perf_init(void); void etm_perf_exit(void); diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 8b362605d24298..c103f4c70f5d07 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata) } static int etm_enable_perf(struct coresight_device *csdev, - struct perf_event *event) + struct perf_event *event, + struct coresight_trace_id_map *id_map) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); int trace_id; @@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev, * with perf locks - we know the ID cannot change until perf shuts down * the session */ - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); + trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map); if (!IS_VALID_CS_TRACE_ID(trace_id)) { dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", dev_name(&drvdata->csdev->dev), drvdata->cpu); @@ -553,7 +554,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) } static int etm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, struct coresight_trace_id_map *id_map) { int ret; struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event, ret = etm_enable_sysfs(csdev); break; case CS_MODE_PERF: - ret = etm_enable_perf(csdev, event); + ret = etm_enable_perf(csdev, event, id_map); break; default: ret = -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index bf01f01964cf90..66d44a404ad0cd 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -752,7 +752,8 @@ static int etm4_parse_event_config(struct coresight_device *csdev, } static int etm4_enable_perf(struct coresight_device *csdev, - struct perf_event *event) + struct perf_event *event, + struct coresight_trace_id_map *id_map) { int ret = 0, trace_id; struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev, * with perf locks - we know the ID cannot change until perf shuts down * the session */ - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); + trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map); if (!IS_VALID_CS_TRACE_ID(trace_id)) { dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", dev_name(&drvdata->csdev->dev), drvdata->cpu); @@ -837,7 +838,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev) } static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, struct coresight_trace_id_map *id_map) { int ret; @@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, ret = etm4_enable_sysfs(csdev); break; case CS_MODE_PERF: - ret = etm4_enable_perf(csdev, event); + ret = etm4_enable_perf(csdev, event, id_map); break; default: ret = -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 61a46d3bdcc85a..05f891ca6b5c9d 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig, struct coresight_device *target); void coresight_remove_links(struct coresight_device *orig, struct coresight_connection *conn); +u32 coresight_get_sink_id(struct coresight_device *csdev); #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X) extern int etm_readl_cp14(u32 off, unsigned int *val); diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 117dbb484543c3..cb3e04755c9929 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata) } static int stm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, + __maybe_unused struct coresight_trace_id_map *trace_id) { struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c index 1e67cc7758d793..a01c9e54e2edbb 100644 --- a/drivers/hwtracing/coresight/coresight-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-sysfs.c @@ -9,6 +9,7 @@ #include #include "coresight-priv.h" +#include "coresight-trace-id.h" /* * Use IDR to map the hash of the source's device name @@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev, */ lockdep_assert_held(&coresight_mutex); if (coresight_get_mode(csdev) != CS_MODE_SYSFS) { - ret = source_ops(csdev)->enable(csdev, data, mode); + ret = source_ops(csdev)->enable(csdev, data, mode, NULL); if (ret) return ret; } diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index b54562f392f3de..3a482fd2cb225b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -220,7 +220,6 @@ static const struct file_operations tmc_fops = { .open = tmc_open, .read = tmc_read, .release = tmc_release, - .llseek = no_llseek, }; static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index e75428fa1592a7..a48bb85d0e7f44 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -36,7 +36,8 @@ struct etr_buf_hw { * etr_perf_buffer - Perf buffer used for ETR * @drvdata - The ETR drvdaga this buffer has been allocated for. * @etr_buf - Actual buffer used by the ETR - * @pid - The PID this etr_perf_buffer belongs to. + * @pid - The PID of the session owner that etr_perf_buffer + * belongs to. * @snaphost - Perf session mode * @nr_pages - Number of pages in the ring buffer. * @pages - Array of Pages in the ring buffer. @@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) { tmc_free_table_pages(sg_table); tmc_free_data_pages(sg_table); + kfree(sg_table); } EXPORT_SYMBOL_GPL(tmc_free_sg_table); @@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, rc = tmc_alloc_table_pages(sg_table); if (rc) { tmc_free_sg_table(sg_table); - kfree(sg_table); return ERR_PTR(rc); } @@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) goto unlock_out; } - /* Get a handle on the pid of the process to monitor */ + /* Get a handle on the pid of the session owner */ pid = etr_perf->pid; /* Do not proceed if this device is associated with another session */ diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index c77763b49de0c6..2671926be62a37 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -171,8 +171,9 @@ struct etr_buf { * @csdev: component vitals needed by the framework. * @miscdev: specifics to handle "/dev/xyz.tmc" entry. * @spinlock: only one at a time pls. - * @pid: Process ID of the process being monitored by the session - * that is using this component. + * @pid: Process ID of the process that owns the session that is using + * this component. For example this would be the pid of the Perf + * process. * @buf: Snapshot of the trace data for ETF/ETB. * @etr_buf: details of buffer used in TMC-ETR * @len: size of the available trace for ETF/ETB. diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index 0726f8842552c6..b7d99e91ab8497 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata) } static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, + __maybe_unused struct coresight_trace_id_map *id_map) { struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, return -EBUSY; } + if (!coresight_take_mode(csdev, mode)) { + spin_unlock(&drvdata->spinlock); + return -EBUSY; + } + __tpdm_enable(drvdata); drvdata->enable = true; spin_unlock(&drvdata->spinlock); @@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev, } __tpdm_disable(drvdata); + coresight_set_mode(csdev, CS_MODE_DISABLED); drvdata->enable = false; spin_unlock(&drvdata->spinlock); diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c index af5b4ef59ceab8..d98e12cb30eca0 100644 --- a/drivers/hwtracing/coresight/coresight-trace-id.c +++ b/drivers/hwtracing/coresight/coresight-trace-id.c @@ -3,6 +3,7 @@ * Copyright (c) 2022, Linaro Limited, All rights reserved. * Author: Mike Leach */ +#include #include #include #include @@ -11,18 +12,12 @@ #include "coresight-trace-id.h" -/* Default trace ID map. Used on systems that don't require per sink mappings */ -static struct coresight_trace_id_map id_map_default; - -/* maintain a record of the mapping of IDs and pending releases per cpu */ -static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0); -static cpumask_t cpu_id_release_pending; - -/* perf session active counter */ -static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0); - -/* lock to protect id_map and cpu data */ -static DEFINE_SPINLOCK(id_map_lock); +/* Default trace ID map. Used in sysfs mode and for system sources */ +static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0); +static struct coresight_trace_id_map id_map_default = { + .cpu_map = &id_map_default_cpu_ids, + .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock) +}; /* #define TRACE_ID_DEBUG 1 */ #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST) @@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, { pr_debug("%s id_map::\n", func_name); pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); - pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids); } #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) @@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, #endif /* unlocked read of current trace ID value for given CPU */ -static int _coresight_trace_id_read_cpu_id(int cpu) +static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { - return atomic_read(&per_cpu(cpu_id, cpu)); + return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); } /* look for next available odd ID, return 0 if none found */ @@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma clear_bit(id, id_map->used_ids); } -static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map) -{ - if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id)) - return; - set_bit(id, id_map->pend_rel_ids); -} - /* - * release all pending IDs for all current maps & clear CPU associations - * - * This currently operates on the default id map, but may be extended to - * operate on all registered id maps if per sink id maps are used. + * Release all IDs and clear CPU associations. */ -static void coresight_trace_id_release_all_pending(void) +static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map) { - struct coresight_trace_id_map *id_map = &id_map_default; unsigned long flags; - int cpu, bit; + int cpu; - spin_lock_irqsave(&id_map_lock, flags); - for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) { - clear_bit(bit, id_map->used_ids); - clear_bit(bit, id_map->pend_rel_ids); - } - for_each_cpu(cpu, &cpu_id_release_pending) { - atomic_set(&per_cpu(cpu_id, cpu), 0); - cpumask_clear_cpu(cpu, &cpu_id_release_pending); - } - spin_unlock_irqrestore(&id_map_lock, flags); + spin_lock_irqsave(&id_map->lock, flags); + bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX); + for_each_possible_cpu(cpu) + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); + spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_MAP(id_map); } -static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) +static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { unsigned long flags; int id; - spin_lock_irqsave(&id_map_lock, flags); + spin_lock_irqsave(&id_map->lock, flags); /* check for existing allocation for this CPU */ - id = _coresight_trace_id_read_cpu_id(cpu); + id = _coresight_trace_id_read_cpu_id(cpu, id_map); if (id) - goto get_cpu_id_clr_pend; + goto get_cpu_id_out_unlock; /* * Find a new ID. @@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_ goto get_cpu_id_out_unlock; /* allocate the new id to the cpu */ - atomic_set(&per_cpu(cpu_id, cpu), id); - -get_cpu_id_clr_pend: - /* we are (re)using this ID - so ensure it is not marked for release */ - cpumask_clear_cpu(cpu, &cpu_id_release_pending); - clear_bit(id, id_map->pend_rel_ids); + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); get_cpu_id_out_unlock: - spin_unlock_irqrestore(&id_map_lock, flags); + spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_CPU(cpu, id); DUMP_ID_MAP(id_map); return id; } -static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) +static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { unsigned long flags; int id; /* check for existing allocation for this CPU */ - id = _coresight_trace_id_read_cpu_id(cpu); + id = _coresight_trace_id_read_cpu_id(cpu, id_map); if (!id) return; - spin_lock_irqsave(&id_map_lock, flags); + spin_lock_irqsave(&id_map->lock, flags); - if (atomic_read(&perf_cs_etm_session_active)) { - /* set release at pending if perf still active */ - coresight_trace_id_set_pend_rel(id, id_map); - cpumask_set_cpu(cpu, &cpu_id_release_pending); - } else { - /* otherwise clear id */ - coresight_trace_id_free(id, id_map); - atomic_set(&per_cpu(cpu_id, cpu), 0); - } + coresight_trace_id_free(id, id_map); + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); - spin_unlock_irqrestore(&id_map_lock, flags); + spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_CPU(cpu, id); DUMP_ID_MAP(id_map); } @@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i unsigned long flags; int id; - spin_lock_irqsave(&id_map_lock, flags); + spin_lock_irqsave(&id_map->lock, flags); /* prefer odd IDs for system components to avoid legacy CPU IDS */ id = coresight_trace_id_alloc_new_id(id_map, 0, true); - spin_unlock_irqrestore(&id_map_lock, flags); + spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID(id); DUMP_ID_MAP(id_map); @@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * { unsigned long flags; - spin_lock_irqsave(&id_map_lock, flags); + spin_lock_irqsave(&id_map->lock, flags); coresight_trace_id_free(id, id_map); - spin_unlock_irqrestore(&id_map_lock, flags); + spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID(id); DUMP_ID_MAP(id_map); @@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * int coresight_trace_id_get_cpu_id(int cpu) { - return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default); + return _coresight_trace_id_get_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id); +int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + return _coresight_trace_id_get_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map); + void coresight_trace_id_put_cpu_id(int cpu) { - coresight_trace_id_map_put_cpu_id(cpu, &id_map_default); + _coresight_trace_id_put_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id); +void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + _coresight_trace_id_put_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map); + int coresight_trace_id_read_cpu_id(int cpu) { - return _coresight_trace_id_read_cpu_id(cpu); + return _coresight_trace_id_read_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id); +int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + return _coresight_trace_id_read_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map); + int coresight_trace_id_get_system_id(void) { return coresight_trace_id_map_get_system_id(&id_map_default); @@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id) } EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); -void coresight_trace_id_perf_start(void) +void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map) { - atomic_inc(&perf_cs_etm_session_active); - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); + atomic_inc(&id_map->perf_cs_etm_session_active); + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); } EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); -void coresight_trace_id_perf_stop(void) +void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map) { - if (!atomic_dec_return(&perf_cs_etm_session_active)) - coresight_trace_id_release_all_pending(); - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); + if (!atomic_dec_return(&id_map->perf_cs_etm_session_active)) + coresight_trace_id_release_all(id_map); + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); } EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop); diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h index 3797777d367e6f..9aae50a553caa3 100644 --- a/drivers/hwtracing/coresight/coresight-trace-id.h +++ b/drivers/hwtracing/coresight/coresight-trace-id.h @@ -17,9 +17,10 @@ * released when done. * * In order to ensure that a consistent cpu / ID matching is maintained - * throughout a perf cs_etm event session - a session in progress flag will - * be maintained, and released IDs not cleared until the perf session is - * complete. This allows the same CPU to be re-allocated its prior ID. + * throughout a perf cs_etm event session - a session in progress flag will be + * maintained for each sink, and IDs are cleared when all the perf sessions + * complete. This allows the same CPU to be re-allocated its prior ID when + * events are scheduled in and out. * * * Trace ID maps will be created and initialised to prevent architecturally @@ -32,10 +33,6 @@ #include #include - -/* architecturally we have 128 IDs some of which are reserved */ -#define CORESIGHT_TRACE_IDS_MAX 128 - /* ID 0 is reserved */ #define CORESIGHT_TRACE_ID_RES_0 0 @@ -46,23 +43,6 @@ #define IS_VALID_CS_TRACE_ID(id) \ ((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP)) -/** - * Trace ID map. - * - * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs. - * Initialised so that the reserved IDs are permanently marked as - * in use. - * @pend_rel_ids: CPU IDs that have been released by the trace source but not - * yet marked as available, to allow re-allocation to the same - * CPU during a perf session. - */ -struct coresight_trace_id_map { - DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX); - DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX); -}; - -/* Allocate and release IDs for a single default trace ID map */ - /** * Read and optionally allocate a CoreSight trace ID and associate with a CPU. * @@ -78,19 +58,27 @@ struct coresight_trace_id_map { */ int coresight_trace_id_get_cpu_id(int cpu); +/** + * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate + * on to be provided. + */ +int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + /** * Release an allocated trace ID associated with the CPU. * - * This will release the CoreSight trace ID associated with the CPU, - * unless a perf session is in operation. - * - * If a perf session is in operation then the ID will be marked as pending - * release. + * This will release the CoreSight trace ID associated with the CPU. * * @cpu: The CPU index to release the associated trace ID. */ void coresight_trace_id_put_cpu_id(int cpu); +/** + * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate + * on to be provided. + */ +void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + /** * Read the current allocated CoreSight Trace ID value for the CPU. * @@ -111,6 +99,12 @@ void coresight_trace_id_put_cpu_id(int cpu); */ int coresight_trace_id_read_cpu_id(int cpu); +/** + * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate + * on to be provided. + */ +int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + /** * Allocate a CoreSight trace ID for a system component. * @@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id); /** * Notify the Trace ID allocator that a perf session is starting. * - * Increase the perf session reference count - called by perf when setting up - * a trace event. + * Increase the perf session reference count - called by perf when setting up a + * trace event. * - * This reference count is used by the ID allocator to ensure that trace IDs - * associated with a CPU cannot change or be released during a perf session. + * Perf sessions never free trace IDs to ensure that the ID associated with a + * CPU cannot change during their and other's concurrent sessions. Instead, + * this refcount is used so that the last event to finish always frees all IDs. */ -void coresight_trace_id_perf_start(void); +void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map); /** * Notify the ID allocator that a perf session is stopping. * - * Decrease the perf session reference count. - * if this causes the count to go to zero, then all Trace IDs marked as pending - * release, will be released. + * Decrease the perf session reference count. If this causes the count to go to + * zero, then all Trace IDs will be released. */ -void coresight_trace_id_perf_stop(void); +void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map); #endif /* _CORESIGHT_TRACE_ID_H */ diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c index f9ebf20c91e63d..ef7f560f0ffabb 100644 --- a/drivers/hwtracing/coresight/ultrasoc-smb.c +++ b/drivers/hwtracing/coresight/ultrasoc-smb.c @@ -163,7 +163,6 @@ static const struct file_operations smb_fops = { .open = smb_open, .read = smb_read, .release = smb_release, - .llseek = no_llseek, }; static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index be63d5b8f193e0..66123d684ac9e7 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1677,7 +1677,6 @@ static const struct file_operations intel_th_msc_fops = { .release = intel_th_msc_release, .read = intel_th_msc_read, .mmap = intel_th_msc_mmap, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index ccf39a80dc4f60..cdba4e875b281f 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -839,7 +839,6 @@ static const struct file_operations stm_fops = { .mmap = stm_char_mmap, .unlocked_ioctl = stm_char_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; static void stm_device_release(struct device *dev) diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 44710267d6699b..c232054fddd6fd 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -40,14 +40,6 @@ config I2C_BOARDINFO bool default y -config I2C_COMPAT - bool "Enable compatibility bits for old user-space" - default y - help - Say Y here if you intend to run lm-sensors 3.1.1 or older, or any - other user-space package which expects i2c adapters to be class - devices. If you don't know, say Y. - config I2C_CHARDEV tristate "I2C device interface" help diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a22f9125322a72..6b3ba7e5723aa1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -559,28 +559,33 @@ config I2C_DAVINCI For details please see http://www.ti.com/davinci config I2C_DESIGNWARE_CORE - tristate + tristate "Synopsys DesignWare I2C adapter" select REGMAP + help + This option enables support for the Synopsys DesignWare I2C adapter. + This driver includes support for the I2C host on the Synopsys + Designware I2C adapter. + + To compile the driver as a module, choose M here: the module will be + called i2c-designware-core. + +if I2C_DESIGNWARE_CORE config I2C_DESIGNWARE_SLAVE bool "Synopsys DesignWare Slave" - depends on I2C_DESIGNWARE_CORE select I2C_SLAVE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C slave adapter. - This is not a standalone module, this module compiles together with - i2c-designware-core. - config I2C_DESIGNWARE_PLATFORM - tristate "Synopsys DesignWare Platform" + tristate "Synopsys DesignWare Platform driver" depends on (ACPI && COMMON_CLK) || !ACPI - select I2C_DESIGNWARE_CORE select MFD_SYSCON if MIPS_BAIKAL_T1 + default I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the - Synopsys DesignWare I2C adapter. + Synopsys DesignWare I2C adapters on the platform bus. This driver can also be built as a module. If so, the module will be called i2c-designware-platform. @@ -613,17 +618,19 @@ config I2C_DESIGNWARE_BAYTRAIL a BayTrail system using the AXP288. config I2C_DESIGNWARE_PCI - tristate "Synopsys DesignWare PCI" + tristate "Synopsys DesignWare PCI driver" depends on PCI - select I2C_DESIGNWARE_CORE select I2C_CCGX_UCSI help If you say yes to this option, support will be included for the - Synopsys DesignWare I2C adapter. Only master mode is supported. + Synopsys DesignWare I2C adapters on the PCI bus. Only master mode is + supported. This driver can also be built as a module. If so, the module will be called i2c-designware-pci. +endif + config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" depends on ARCH_DIGICOLOR || COMPILE_TEST @@ -772,6 +779,18 @@ config I2C_JZ4780 If you don't know what to do here, say N. +config I2C_KEBA + tristate "KEBA I2C controller support" + depends on HAS_IOMEM + depends on KEBA_CP500 || COMPILE_TEST + select AUXILIARY_BUS + help + This driver supports the I2C controller found in KEBA system FPGA + devices. + + This driver can also be built as a module. If so, the module + will be called i2c-keba. + config I2C_KEMPLD tristate "Kontron COM I2C Controller" depends on MFD_KEMPLD diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 78d0561339e5be..ecc07c50f2a0fe 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_I2C_IMX) += i2c-imx.o obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o +obj-$(CONFIG_I2C_KEBA) += i2c-keba.o obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o obj-$(CONFIG_I2C_LS2X) += i2c-ls2x.o diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 9d7b4efe26ad01..544c94e86b8967 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -479,9 +479,8 @@ static struct i2c_adapter ali1535_adapter = { static const struct pci_device_id ali1535_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, - { }, + { } }; - MODULE_DEVICE_TABLE(pci, ali1535_ids); static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c index d3ac1c77a5098d..6f0ef587e76d14 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-plat.c +++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c @@ -340,7 +340,7 @@ static void i2c_amd_remove(struct platform_device *pdev) static const struct acpi_device_id i2c_amd_acpi_match[] = { { "AMDI0011" }, - { }, + { } }; MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index ce8c4846b7fae4..cc5a26637fd540 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -170,6 +170,13 @@ struct aspeed_i2c_bus { static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); +/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) +{ + bus->master_state = ASPEED_I2C_MASTER_STOP; + writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); +} + static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) { unsigned long time_left, flags; @@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) command); reinit_completion(&bus->cmd_complete); - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); + aspeed_i2c_do_stop(bus); spin_unlock_irqrestore(&bus->lock, flags); time_left = wait_for_completion_timeout( @@ -390,13 +397,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) writel(command, bus->base + ASPEED_I2C_CMD_REG); } -/* precondition: bus.lock has been acquired. */ -static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) -{ - bus->master_state = ASPEED_I2C_MASTER_STOP; - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); -} - /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) { @@ -991,7 +991,7 @@ static const struct of_device_id aspeed_i2c_bus_of_table[] = { .compatible = "aspeed,ast2600-i2c-bus", .data = aspeed_i2c_25xx_get_clk_reg_val, }, - { }, + { } }; MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e8a688d04aee0f..f31d352d98b574 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -20,12 +20,17 @@ #include #include #include +#include +#include #include +#include #include #include #include #include +#define DEFAULT_SYMBOL_NAMESPACE I2C_DW_COMMON + #include "i2c-designware-core.h" static char *abort_sources[] = { @@ -188,7 +193,7 @@ static const u32 supported_speeds[] = { I2C_MAX_STANDARD_MODE_FREQ, }; -int i2c_dw_validate_speed(struct dw_i2c_dev *dev) +static int i2c_dw_validate_speed(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; unsigned int i; @@ -208,7 +213,44 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev) return -EINVAL; } -EXPORT_SYMBOL_GPL(i2c_dw_validate_speed); + +#ifdef CONFIG_OF + +#include + +#define MSCC_ICPU_CFG_TWI_DELAY 0x0 +#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0) +#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4 + +static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev) +{ + writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE, + dev->ext + MSCC_ICPU_CFG_TWI_DELAY); + + return 0; +} + +static void i2c_dw_of_configure(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct dw_i2c_dev *dev = dev_get_drvdata(device); + + switch (dev->flags & MODEL_MASK) { + case MODEL_MSCC_OCELOT: + dev->ext = devm_platform_ioremap_resource(pdev, 1); + if (!IS_ERR(dev->ext)) + dev->set_sda_hold_time = mscc_twi_set_sda_hold_time; + break; + default: + break; + } +} + +#else /* CONFIG_OF */ + +static inline void i2c_dw_of_configure(struct device *device) { } + +#endif /* CONFIG_OF */ #ifdef CONFIG_ACPI @@ -255,7 +297,7 @@ static void i2c_dw_acpi_params(struct device *device, char method[], kfree(buf.pointer); } -int i2c_dw_acpi_configure(struct device *device) +static void i2c_dw_acpi_configure(struct device *device) { struct dw_i2c_dev *dev = dev_get_drvdata(device); struct i2c_timings *t = &dev->timings; @@ -285,10 +327,7 @@ int i2c_dw_acpi_configure(struct device *device) dev->sda_hold_time = fs_ht; break; } - - return 0; } -EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure); static u32 i2c_dw_acpi_round_bus_speed(struct device *device) { @@ -310,11 +349,13 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device) #else /* CONFIG_ACPI */ +static inline void i2c_dw_acpi_configure(struct device *device) { } + static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; } #endif /* CONFIG_ACPI */ -void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) +static void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) { u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev); struct i2c_timings *t = &dev->timings; @@ -330,10 +371,47 @@ void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) else t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; } -EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed); -u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) +int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + struct device *device = dev->dev; + struct fwnode_handle *fwnode = dev_fwnode(device); + + i2c_parse_fw_timings(device, t, false); + + i2c_dw_adjust_bus_speed(dev); + + if (is_of_node(fwnode)) + i2c_dw_of_configure(device); + else if (is_acpi_node(fwnode)) + i2c_dw_acpi_configure(device); + + return i2c_dw_validate_speed(dev); +} +EXPORT_SYMBOL_GPL(i2c_dw_fw_parse_and_configure); + +static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg) { + u32 val; + int ret; + + ret = i2c_dw_acquire_lock(dev); + if (ret) + return 0; + + ret = regmap_read(dev->map, reg, &val); + i2c_dw_release_lock(dev); + + return ret ? 0 : val; +} + +u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, + u32 tSYMBOL, u32 tf, int cond, int offset) +{ + if (!ic_clk) + return i2c_dw_read_scl_reg(dev, reg); + /* * DesignWare I2C core doesn't seem to have solid strategy to meet * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec @@ -372,8 +450,12 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) 3 + offset; } -u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) +u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, + u32 tLOW, u32 tf, int offset) { + if (!ic_clk) + return i2c_dw_read_scl_reg(dev, reg); + /* * Conditional expression: * @@ -441,6 +523,7 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) void __i2c_dw_disable(struct dw_i2c_dev *dev) { + struct i2c_timings *t = &dev->timings; unsigned int raw_intr_stats; unsigned int enable; int timeout = 100; @@ -453,6 +536,19 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD; if (abort_needed) { + if (!(enable & DW_IC_ENABLE_ENABLE)) { + regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE); + /* + * Wait 10 times the signaling period of the highest I2C + * transfer supported by the driver (for 400KHz this is + * 25us) to ensure the I2C ENABLE bit is already set + * as described in the DesignWare I2C databook. + */ + fsleep(DIV_ROUND_CLOSEST_ULL(10 * MICRO, t->bus_freq_hz)); + /* Set ENABLE bit before setting ABORT */ + enable |= DW_IC_ENABLE_ENABLE; + } + regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT); ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable, !(enable & DW_IC_ENABLE_ABORT), 10, @@ -653,6 +749,84 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) i2c_dw_release_lock(dev); } +EXPORT_SYMBOL_GPL(i2c_dw_disable); + +int i2c_dw_probe(struct dw_i2c_dev *dev) +{ + device_set_node(&dev->adapter.dev, dev_fwnode(dev->dev)); + + switch (dev->mode) { + case DW_IC_SLAVE: + return i2c_dw_probe_slave(dev); + case DW_IC_MASTER: + return i2c_dw_probe_master(dev); + default: + dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode); + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(i2c_dw_probe); + +static int i2c_dw_prepare(struct device *device) +{ + /* + * If the ACPI companion device object is present for this device, + * it may be accessed during suspend and resume of other devices via + * I2C operation regions, so tell the PM core and middle layers to + * avoid skipping system suspend/resume callbacks for it in that case. + */ + return !has_acpi_companion(device); +} + +static int i2c_dw_runtime_suspend(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + + if (dev->shared_with_punit) + return 0; + + i2c_dw_disable(dev); + i2c_dw_prepare_clk(dev, false); + + return 0; +} + +static int i2c_dw_suspend(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + + i2c_mark_adapter_suspended(&dev->adapter); + + return i2c_dw_runtime_suspend(device); +} + +static int i2c_dw_runtime_resume(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + + if (!dev->shared_with_punit) + i2c_dw_prepare_clk(dev, true); + + dev->init(dev); + + return 0; +} + +static int i2c_dw_resume(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + + i2c_dw_runtime_resume(device); + i2c_mark_adapter_resumed(&dev->adapter); + + return 0; +} + +EXPORT_GPL_DEV_PM_OPS(i2c_dw_dev_pm_ops) = { + .prepare = pm_sleep_ptr(i2c_dw_prepare), + LATE_SYSTEM_SLEEP_PM_OPS(i2c_dw_suspend, i2c_dw_resume) + RUNTIME_PM_OPS(i2c_dw_runtime_suspend, i2c_dw_runtime_resume, NULL) +}; MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index e9606c00b8d103..8e8854ec988258 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -10,11 +10,10 @@ */ #include -#include #include -#include #include #include +#include #include #include @@ -109,6 +108,7 @@ DW_IC_INTR_RX_UNDER | \ DW_IC_INTR_RD_REQ) +#define DW_IC_ENABLE_ENABLE BIT(0) #define DW_IC_ENABLE_ABORT BIT(1) #define DW_IC_STATUS_ACTIVITY BIT(0) @@ -237,7 +237,6 @@ struct reset_control; * @semaphore_idx: Index of table with semaphore type attached to the bus. It's * -1 if there is no semaphore. * @shared_with_punit: true if this bus is shared with the SoCs PUNIT - * @disable: function to disable the controller * @init: function to initialize the I2C hardware * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE @@ -295,7 +294,6 @@ struct dw_i2c_dev { void (*release_lock)(void); int semaphore_idx; bool shared_with_punit; - void (*disable)(struct dw_i2c_dev *dev); int (*init)(struct dw_i2c_dev *dev); int (*set_sda_hold_time)(struct dw_i2c_dev *dev); int mode; @@ -329,8 +327,10 @@ struct i2c_dw_semaphore_callbacks { }; int i2c_dw_init_regmap(struct dw_i2c_dev *dev); -u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); -u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); +u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, + u32 tSYMBOL, u32 tf, int cond, int offset); +u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, + u32 tLOW, u32 tf, int offset); int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev); u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev); int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare); @@ -340,7 +340,8 @@ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev); int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev); int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev); u32 i2c_dw_func(struct i2c_adapter *adap); -void i2c_dw_disable(struct dw_i2c_dev *dev); + +extern const struct dev_pm_ops i2c_dw_dev_pm_ops; static inline void __i2c_dw_enable(struct dw_i2c_dev *dev) { @@ -373,6 +374,7 @@ static inline void __i2c_dw_read_intr_mask(struct dw_i2c_dev *dev, } void __i2c_dw_disable(struct dw_i2c_dev *dev); +void i2c_dw_disable(struct dw_i2c_dev *dev); extern void i2c_dw_configure_master(struct dw_i2c_dev *dev); extern int i2c_dw_probe_master(struct dw_i2c_dev *dev); @@ -385,19 +387,6 @@ static inline void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { } static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; } #endif -static inline int i2c_dw_probe(struct dw_i2c_dev *dev) -{ - switch (dev->mode) { - case DW_IC_SLAVE: - return i2c_dw_probe_slave(dev); - case DW_IC_MASTER: - return i2c_dw_probe_master(dev); - default: - dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode); - return -EINVAL; - } -} - static inline void i2c_dw_configure(struct dw_i2c_dev *dev) { if (i2c_detect_slave_mode(dev->dev)) @@ -406,6 +395,8 @@ static inline void i2c_dw_configure(struct dw_i2c_dev *dev) i2c_dw_configure_master(dev); } +int i2c_dw_probe(struct dw_i2c_dev *dev); + #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL) int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev); #endif @@ -414,11 +405,4 @@ int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev); int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev); #endif -int i2c_dw_validate_speed(struct dw_i2c_dev *dev); -void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev); - -#if IS_ENABLED(CONFIG_ACPI) -int i2c_dw_acpi_configure(struct device *device); -#else -static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; } -#endif +int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index c7e56002809ace..e8ac9a7bf0b3d2 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -22,6 +22,8 @@ #include #include +#define DEFAULT_SYMBOL_NAMESPACE I2C_DW + #include "i2c-designware-core.h" #define AMD_TIMEOUT_MIN_US 25 @@ -64,13 +66,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) if (!dev->ss_hcnt || !dev->ss_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->ss_hcnt = - i2c_dw_scl_hcnt(ic_clk, + i2c_dw_scl_hcnt(dev, + DW_IC_SS_SCL_HCNT, + ic_clk, 4000, /* tHD;STA = tHIGH = 4.0 us */ sda_falling_time, 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->ss_lcnt = - i2c_dw_scl_lcnt(ic_clk, + i2c_dw_scl_lcnt(dev, + DW_IC_SS_SCL_LCNT, + ic_clk, 4700, /* tLOW = 4.7 us */ scl_falling_time, 0); /* No offset */ @@ -94,13 +100,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) } else { ic_clk = i2c_dw_clk_rate(dev); dev->fs_hcnt = - i2c_dw_scl_hcnt(ic_clk, + i2c_dw_scl_hcnt(dev, + DW_IC_FS_SCL_HCNT, + ic_clk, 260, /* tHIGH = 260 ns */ sda_falling_time, 0, /* DW default */ 0); /* No offset */ dev->fs_lcnt = - i2c_dw_scl_lcnt(ic_clk, + i2c_dw_scl_lcnt(dev, + DW_IC_FS_SCL_LCNT, + ic_clk, 500, /* tLOW = 500 ns */ scl_falling_time, 0); /* No offset */ @@ -114,13 +124,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) if (!dev->fs_hcnt || !dev->fs_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->fs_hcnt = - i2c_dw_scl_hcnt(ic_clk, + i2c_dw_scl_hcnt(dev, + DW_IC_FS_SCL_HCNT, + ic_clk, 600, /* tHD;STA = tHIGH = 0.6 us */ sda_falling_time, 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->fs_lcnt = - i2c_dw_scl_lcnt(ic_clk, + i2c_dw_scl_lcnt(dev, + DW_IC_FS_SCL_LCNT, + ic_clk, 1300, /* tLOW = 1.3 us */ scl_falling_time, 0); /* No offset */ @@ -142,13 +156,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) } else if (!dev->hs_hcnt || !dev->hs_lcnt) { ic_clk = i2c_dw_clk_rate(dev); dev->hs_hcnt = - i2c_dw_scl_hcnt(ic_clk, + i2c_dw_scl_hcnt(dev, + DW_IC_HS_SCL_HCNT, + ic_clk, 160, /* tHIGH = 160 ns */ sda_falling_time, 0, /* DW default */ 0); /* No offset */ dev->hs_lcnt = - i2c_dw_scl_lcnt(ic_clk, + i2c_dw_scl_lcnt(dev, + DW_IC_HS_SCL_LCNT, + ic_clk, 320, /* tLOW = 320 ns */ scl_falling_time, 0); /* No offset */ @@ -253,6 +271,34 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) __i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK); } +/* + * This function waits for the controller to be idle before disabling I2C + * When the controller is not in the IDLE state, the MST_ACTIVITY bit + * (IC_STATUS[5]) is set. + * + * Values: + * 0x1 (ACTIVE): Controller not idle + * 0x0 (IDLE): Controller is idle + * + * The function is called after completing the current transfer. + * + * Returns: + * False when the controller is in the IDLE state. + * True when the controller is in the ACTIVE state. + */ +static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev) +{ + u32 status; + + regmap_read(dev->map, DW_IC_STATUS, &status); + if (!(status & DW_IC_STATUS_MASTER_ACTIVITY)) + return false; + + return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, + !(status & DW_IC_STATUS_MASTER_ACTIVITY), + 1100, 20000) != 0; +} + static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev) { u32 val; @@ -788,6 +834,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) goto done; } + /* + * This happens rarely (~1:500) and is hard to reproduce. Debug trace + * showed that IC_STATUS had value of 0x23 when STOP_DET occurred, + * if disable IC_ENABLE.ENABLE immediately that can result in + * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if + * controller is still ACTIVE before disabling I2C. + */ + if (i2c_dw_is_controller_active(dev)) + dev_err(dev->dev, "controller active\n"); + /* * We must disable the adapter before returning and signaling the end * of the current transfer. Otherwise the hardware might continue @@ -931,7 +987,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) init_completion(&dev->cmd_complete); dev->init = i2c_dw_init_master; - dev->disable = i2c_dw_disable; ret = i2c_dw_init_regmap(dev); if (ret) @@ -1021,3 +1076,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_master); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(I2C_DW_COMMON); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index a1b379a1e90401..7b2c5d71a7fcec 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -9,7 +9,6 @@ * Copyright (C) 2009 Provigent Ltd. * Copyright (C) 2011, 2015, 2016 Intel Corporation. */ -#include #include #include #include @@ -19,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -102,7 +102,7 @@ static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev) static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { - struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); + struct dw_i2c_dev *dev = pci_get_drvdata(pdev); switch (pdev->device) { case 0x0817: @@ -152,7 +152,7 @@ static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev) static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { - struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); + struct dw_i2c_dev *dev = pci_get_drvdata(pdev); dev->flags |= MODEL_AMD_NAVI_GPU | ACCESS_POLLING; dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; @@ -194,47 +194,6 @@ static struct dw_pci_controller dw_pci_controllers[] = { }, }; -static int __maybe_unused i2c_dw_pci_runtime_suspend(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - i_dev->disable(i_dev); - return 0; -} - -static int __maybe_unused i2c_dw_pci_suspend(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - i2c_mark_adapter_suspended(&i_dev->adapter); - - return i2c_dw_pci_runtime_suspend(dev); -} - -static int __maybe_unused i2c_dw_pci_runtime_resume(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - return i_dev->init(i_dev); -} - -static int __maybe_unused i2c_dw_pci_resume(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - int ret; - - ret = i2c_dw_pci_runtime_resume(dev); - - i2c_mark_adapter_resumed(&i_dev->adapter); - - return ret; -} - -static const struct dev_pm_ops i2c_dw_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume) - SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL) -}; - static const struct property_entry dgpu_properties[] = { /* USB-C doesn't power the system */ PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE), @@ -253,7 +212,6 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, int r; struct dw_pci_controller *controller; struct dw_scl_sda_cfg *cfg; - struct i2c_timings *t; if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) return dev_err_probe(&pdev->dev, -EINVAL, @@ -288,29 +246,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags; - t = &dev->timings; - i2c_parse_fw_timings(&pdev->dev, t, false); - pci_set_drvdata(pdev, dev); if (controller->setup) { r = controller->setup(pdev, controller); - if (r) { - pci_free_irq_vectors(pdev); + if (r) return r; - } } - i2c_dw_adjust_bus_speed(dev); - - if (has_acpi_companion(&pdev->dev)) - i2c_dw_acpi_configure(&pdev->dev); - - r = i2c_dw_validate_speed(dev); - if (r) { - pci_free_irq_vectors(pdev); + r = i2c_dw_fw_parse_and_configure(dev); + if (r) return r; - } i2c_dw_configure(dev); @@ -326,14 +272,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = 0; - ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->nr = controller->bus_num; r = i2c_dw_probe(dev); - if (r) { - pci_free_irq_vectors(pdev); + if (r) return r; - } if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); @@ -354,16 +297,15 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); - dev->disable(dev); + i2c_dw_disable(dev); + pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); i2c_del_adapter(&dev->adapter); - devm_free_irq(&pdev->dev, dev->irq, dev); - pci_free_irq_vectors(pdev); } -static const struct pci_device_id i2_designware_pci_ids[] = { +static const struct pci_device_id i2c_designware_pci_ids[] = { /* Medfield */ { PCI_VDEVICE(INTEL, 0x0817), medfield }, { PCI_VDEVICE(INTEL, 0x0818), medfield }, @@ -409,21 +351,23 @@ static const struct pci_device_id i2_designware_pci_ids[] = { { PCI_VDEVICE(ATI, 0x73c4), navi_amd }, { PCI_VDEVICE(ATI, 0x7444), navi_amd }, { PCI_VDEVICE(ATI, 0x7464), navi_amd }, - { 0,} + {} }; -MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids); +MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids); static struct pci_driver dw_i2c_driver = { .name = DRIVER_NAME, - .id_table = i2_designware_pci_ids, .probe = i2c_dw_pci_probe, .remove = i2c_dw_pci_remove, .driver = { - .pm = &i2c_dw_pm_ops, + .pm = pm_ptr(&i2c_dw_dev_pm_ops), }, + .id_table = i2c_designware_pci_ids, }; module_pci_driver(dw_i2c_driver); MODULE_AUTHOR("Baruch Siach "); MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(I2C_DW); +MODULE_IMPORT_NS(I2C_DW_COMMON); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index df3dc1e8093e36..2d0c7348e4917c 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -8,7 +8,6 @@ * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. */ -#include #include #include #include @@ -21,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -30,7 +28,6 @@ #include #include #include -#include #include #include "i2c-designware-core.h" @@ -40,29 +37,6 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) return clk_get_rate(dev->clk) / KILO; } -#ifdef CONFIG_ACPI -static const struct acpi_device_id dw_i2c_acpi_match[] = { - { "INT33C2", 0 }, - { "INT33C3", 0 }, - { "INT3432", 0 }, - { "INT3433", 0 }, - { "INTC10EF", 0 }, - { "80860F41", ACCESS_NO_IRQ_SUSPEND }, - { "808622C1", ACCESS_NO_IRQ_SUSPEND }, - { "AMD0010", ACCESS_INTR_MASK }, - { "AMDI0010", ACCESS_INTR_MASK }, - { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE }, - { "AMDI0510", 0 }, - { "APMC0D0F", 0 }, - { "HISI02A1", 0 }, - { "HISI02A2", 0 }, - { "HISI02A3", 0 }, - { "HYGO0010", ACCESS_INTR_MASK }, - { } -}; -MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); -#endif - #ifdef CONFIG_OF #define BT1_I2C_CTL 0x100 #define BT1_I2C_CTL_ADDR_MASK GENMASK(7, 0) @@ -120,53 +94,11 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) dev->map = devm_regmap_init(dev->dev, NULL, dev, &bt1_i2c_cfg); return PTR_ERR_OR_ZERO(dev->map); } - -#define MSCC_ICPU_CFG_TWI_DELAY 0x0 -#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0) -#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4 - -static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev) -{ - writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE, - dev->ext + MSCC_ICPU_CFG_TWI_DELAY); - - return 0; -} - -static int dw_i2c_of_configure(struct platform_device *pdev) -{ - struct dw_i2c_dev *dev = platform_get_drvdata(pdev); - - switch (dev->flags & MODEL_MASK) { - case MODEL_MSCC_OCELOT: - dev->ext = devm_platform_ioremap_resource(pdev, 1); - if (!IS_ERR(dev->ext)) - dev->set_sda_hold_time = mscc_twi_set_sda_hold_time; - break; - default: - break; - } - - return 0; -} - -static const struct of_device_id dw_i2c_of_match[] = { - { .compatible = "snps,designware-i2c", }, - { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, - { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, - {}, -}; -MODULE_DEVICE_TABLE(of, dw_i2c_of_match); #else static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) { return -ENODEV; } - -static inline int dw_i2c_of_configure(struct platform_device *pdev) -{ - return -ENODEV; -} #endif static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev) @@ -238,11 +170,9 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) int i = 0; int ret; - ptr = i2c_dw_semaphore_cb_table; - dev->semaphore_idx = -1; - while (ptr->probe) { + for (ptr = i2c_dw_semaphore_cb_table; ptr->probe; ptr++) { ret = ptr->probe(dev); if (ret) { /* @@ -254,7 +184,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) return ret; i++; - ptr++; continue; } @@ -278,7 +207,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) { struct i2c_adapter *adap; struct dw_i2c_dev *dev; - struct i2c_timings *t; int irq, ret; irq = platform_get_irq(pdev, 0); @@ -307,18 +235,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) reset_control_deassert(dev->rst); - t = &dev->timings; - i2c_parse_fw_timings(&pdev->dev, t, false); - - i2c_dw_adjust_bus_speed(dev); - - if (pdev->dev.of_node) - dw_i2c_of_configure(pdev); - - if (has_acpi_companion(&pdev->dev)) - i2c_dw_acpi_configure(&pdev->dev); - - ret = i2c_dw_validate_speed(dev); + ret = i2c_dw_fw_parse_and_configure(dev); if (ret) goto exit_reset; @@ -346,6 +263,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) goto exit_reset; if (dev->clk) { + struct i2c_timings *t = &dev->timings; u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; @@ -360,8 +278,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) adap->owner = THIS_MODULE; adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; - ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); - adap->dev.of_node = pdev->dev.of_node; adap->nr = -1; if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { @@ -408,7 +324,7 @@ static void dw_i2c_plat_remove(struct platform_device *pdev) i2c_del_adapter(&dev->adapter); - dev->disable(dev); + i2c_dw_disable(dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); @@ -419,66 +335,34 @@ static void dw_i2c_plat_remove(struct platform_device *pdev) reset_control_assert(dev->rst); } -static int dw_i2c_plat_prepare(struct device *dev) -{ - /* - * If the ACPI companion device object is present for this device, it - * may be accessed during suspend and resume of other devices via I2C - * operation regions, so tell the PM core and middle layers to avoid - * skipping system suspend/resume callbacks for it in that case. - */ - return !has_acpi_companion(dev); -} - -static int dw_i2c_plat_runtime_suspend(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - if (i_dev->shared_with_punit) - return 0; - - i_dev->disable(i_dev); - i2c_dw_prepare_clk(i_dev, false); - - return 0; -} - -static int dw_i2c_plat_suspend(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - i2c_mark_adapter_suspended(&i_dev->adapter); - - return dw_i2c_plat_runtime_suspend(dev); -} - -static int dw_i2c_plat_runtime_resume(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - if (!i_dev->shared_with_punit) - i2c_dw_prepare_clk(i_dev, true); - - i_dev->init(i_dev); - - return 0; -} - -static int dw_i2c_plat_resume(struct device *dev) -{ - struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - - dw_i2c_plat_runtime_resume(dev); - i2c_mark_adapter_resumed(&i_dev->adapter); - - return 0; -} +static const struct of_device_id dw_i2c_of_match[] = { + { .compatible = "snps,designware-i2c", }, + { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, + { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_i2c_of_match); -static const struct dev_pm_ops dw_i2c_dev_pm_ops = { - .prepare = pm_sleep_ptr(dw_i2c_plat_prepare), - LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) - RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL) +static const struct acpi_device_id dw_i2c_acpi_match[] = { + { "80860F41", ACCESS_NO_IRQ_SUSPEND }, + { "808622C1", ACCESS_NO_IRQ_SUSPEND }, + { "AMD0010", ACCESS_INTR_MASK }, + { "AMDI0010", ACCESS_INTR_MASK }, + { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE }, + { "AMDI0510", 0 }, + { "APMC0D0F", 0 }, + { "HISI02A1", 0 }, + { "HISI02A2", 0 }, + { "HISI02A3", 0 }, + { "HYGO0010", ACCESS_INTR_MASK }, + { "INT33C2", 0 }, + { "INT33C3", 0 }, + { "INT3432", 0 }, + { "INT3433", 0 }, + { "INTC10EF", 0 }, + {} }; +MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); static const struct platform_device_id dw_i2c_platform_ids[] = { { "i2c_designware" }, @@ -491,9 +375,9 @@ static struct platform_driver dw_i2c_driver = { .remove_new = dw_i2c_plat_remove, .driver = { .name = "i2c_designware", - .of_match_table = of_match_ptr(dw_i2c_of_match), - .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match), - .pm = pm_ptr(&dw_i2c_dev_pm_ops), + .of_match_table = dw_i2c_of_match, + .acpi_match_table = dw_i2c_acpi_match, + .pm = pm_ptr(&i2c_dw_dev_pm_ops), }, .id_table = dw_i2c_platform_ids, }; @@ -513,3 +397,5 @@ module_exit(dw_i2c_exit_driver); MODULE_AUTHOR("Baruch Siach "); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(I2C_DW); +MODULE_IMPORT_NS(I2C_DW_COMMON); diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 78e2c47e3d7da7..7035296aa24ce0 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -16,6 +16,8 @@ #include #include +#define DEFAULT_SYMBOL_NAMESPACE I2C_DW + #include "i2c-designware-core.h" static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) @@ -88,7 +90,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); regmap_write(dev->map, DW_IC_INTR_MASK, 0); - dev->disable(dev); + i2c_dw_disable(dev); synchronize_irq(dev->irq); dev->slave = NULL; pm_runtime_put(dev->dev); @@ -235,7 +237,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) int ret; dev->init = i2c_dw_init_slave; - dev->disable = i2c_dw_disable; ret = i2c_dw_init_regmap(dev); if (ret) @@ -279,3 +280,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_slave); MODULE_AUTHOR("Luis Oliveira "); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(I2C_DW_COMMON); diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 3e6b80e59b9063..3dc5a46698fc95 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -357,7 +357,7 @@ static void dc_i2c_remove(struct platform_device *pdev) static const struct of_device_id dc_i2c_match[] = { { .compatible = "cnxt,cx92755-i2c" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, dc_i2c_match); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 5574094104457a..d08be3f3cede3d 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -67,7 +67,6 @@ struct em_i2c_device { void __iomem *base; struct i2c_adapter adap; struct completion msg_done; - struct clk *sclk; struct i2c_client *slave; int irq; }; @@ -361,6 +360,7 @@ static const struct i2c_algorithm em_i2c_algo = { static int em_i2c_probe(struct platform_device *pdev) { struct em_i2c_device *priv; + struct clk *sclk; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -373,13 +373,9 @@ static int em_i2c_probe(struct platform_device *pdev) strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name)); - priv->sclk = devm_clk_get(&pdev->dev, "sclk"); - if (IS_ERR(priv->sclk)) - return PTR_ERR(priv->sclk); - - ret = clk_prepare_enable(priv->sclk); - if (ret) - return ret; + sclk = devm_clk_get_enabled(&pdev->dev, "sclk"); + if (IS_ERR(sclk)) + return PTR_ERR(sclk); priv->adap.timeout = msecs_to_jiffies(100); priv->adap.retries = 5; @@ -397,26 +393,22 @@ static int em_i2c_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err_clk; + return ret; priv->irq = ret; + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, "em_i2c", priv); if (ret) - goto err_clk; + return ret; ret = i2c_add_adapter(&priv->adap); - if (ret) - goto err_clk; + return ret; dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, priv->irq); return 0; - -err_clk: - clk_disable_unprepare(priv->sclk); - return ret; } static void em_i2c_remove(struct platform_device *dev) @@ -424,7 +416,6 @@ static void em_i2c_remove(struct platform_device *dev) struct em_i2c_device *priv = platform_get_drvdata(dev); i2c_del_adapter(&priv->adap); - clk_disable_unprepare(priv->sclk); } static const struct of_device_id em_i2c_ids[] = { diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 328c0dab6b147e..299fe9d3afab0a 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1763,8 +1763,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) i801_add_tco(priv); + /* + * adapter.name is used by platform code to find the main I801 adapter + * to instantiante i2c_clients, do not change. + */ snprintf(priv->adapter.name, sizeof(priv->adapter.name), - "SMBus I801 adapter at %04lx", priv->smba); + "SMBus %s adapter at %04lx", + (priv->features & FEATURE_IDF) ? "I801 IDF" : "I801", + priv->smba); + err = i2c_add_adapter(&priv->adapter); if (err) { platform_device_unregister(priv->tco_pdev); diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 0197786892a242..976d43f73f3830 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -559,7 +559,7 @@ static const struct i2c_algorithm lpi2c_imx_algo = { static const struct of_device_id lpi2c_imx_of_match[] = { { .compatible = "fsl,imx7ulp-lpi2c" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 3842e527116b79..98539313cbc970 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -687,7 +687,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic) i2c_imx_bus_busy(i2c_imx, 0, atomic); /* Disable I2C controller */ - temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN, + temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } @@ -1549,7 +1549,7 @@ static void i2c_imx_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev) +static int i2c_imx_runtime_suspend(struct device *dev) { struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev); @@ -1558,7 +1558,7 @@ static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused i2c_imx_runtime_resume(struct device *dev) +static int i2c_imx_runtime_resume(struct device *dev) { struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev); int ret; @@ -1571,8 +1571,7 @@ static int __maybe_unused i2c_imx_runtime_resume(struct device *dev) } static const struct dev_pm_ops i2c_imx_pm_ops = { - SET_RUNTIME_PM_OPS(i2c_imx_runtime_suspend, - i2c_imx_runtime_resume, NULL) + RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL) }; static struct platform_driver i2c_imx_driver = { @@ -1580,7 +1579,7 @@ static struct platform_driver i2c_imx_driver = { .remove_new = i2c_imx_remove, .driver = { .name = DRIVER_NAME, - .pm = &i2c_imx_pm_ops, + .pm = pm_ptr(&i2c_imx_pm_ops), .of_match_table = i2c_imx_dt_ids, .acpi_match_table = i2c_imx_acpi_ids, }, diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 33dbc19d3848dd..f59158489ad9fa 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -99,8 +99,7 @@ static int sch_transaction(void) if (retries > MAX_RETRIES) { dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; - } - if (temp & 0x04) { + } else if (temp & 0x04) { result = -EIO; dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 655b5d851c48b0..c93c02aa6ac8cc 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -381,6 +381,15 @@ static int ismt_process_desc(const struct ismt_desc *desc, return -EIO; } +/** + * ismt_kill_transaction() - kill current transaction + * @priv: iSMT private data + */ +static void ismt_kill_transaction(struct ismt_priv *priv) +{ + writel(ISMT_GCTRL_KILL, priv->smba + ISMT_GR_GCTRL); +} + /** * ismt_access() - process an SMBus command * @adap: the i2c host adapter @@ -623,6 +632,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, dma_unmap_single(dev, dma_addr, dma_size, dma_direction); if (unlikely(!time_left)) { + ismt_kill_transaction(priv); ret = -ETIMEDOUT; goto out; } diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 4aafdfab630541..92cc5b09113760 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -792,26 +792,22 @@ static int jz4780_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, i2c); - i2c->clk = devm_clk_get(&pdev->dev, NULL); + i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); - ret = clk_prepare_enable(i2c->clk); - if (ret) - return ret; - ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clk_freq); if (ret) { dev_err(&pdev->dev, "clock-frequency not specified in DT\n"); - goto err; + return ret; } i2c->speed = clk_freq / 1000; if (i2c->speed == 0) { ret = -EINVAL; dev_err(&pdev->dev, "clock-frequency minimum is 1000\n"); - goto err; + return ret; } jz4780_i2c_set_speed(i2c); @@ -827,29 +823,25 @@ static int jz4780_i2c_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err; + return ret; i2c->irq = ret; + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret) - goto err; + return ret; ret = i2c_add_adapter(&i2c->adap); if (ret < 0) - goto err; + return ret; return 0; - -err: - clk_disable_unprepare(i2c->clk); - return ret; } static void jz4780_i2c_remove(struct platform_device *pdev) { struct jz4780_i2c *i2c = platform_get_drvdata(pdev); - clk_disable_unprepare(i2c->clk); i2c_del_adapter(&i2c->adap); } diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c new file mode 100644 index 00000000000000..759732a07ef087 --- /dev/null +++ b/drivers/i2c/busses/i2c-keba.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) KEBA Industrial Automation Gmbh 2024 + * + * Driver for KEBA I2C controller FPGA IP core + */ + +#include +#include +#include +#include +#include + +#define KI2C "i2c-keba" + +#define KI2C_CAPABILITY_REG 0x02 +#define KI2C_CAPABILITY_CRYPTO 0x01 +#define KI2C_CAPABILITY_DC 0x02 + +#define KI2C_CONTROL_REG 0x04 +#define KI2C_CONTROL_MEN 0x01 +#define KI2C_CONTROL_MSTA 0x02 +#define KI2C_CONTROL_RSTA 0x04 +#define KI2C_CONTROL_MTX 0x08 +#define KI2C_CONTROL_TXAK 0x10 +#define KI2C_CONTROL_DISABLE 0x00 + +#define KI2C_CONTROL_DC_REG 0x05 +#define KI2C_CONTROL_DC_SDA 0x01 +#define KI2C_CONTROL_DC_SCL 0x02 + +#define KI2C_STATUS_REG 0x08 +#define KI2C_STATUS_IN_USE 0x01 +#define KI2C_STATUS_ACK_CYC 0x02 +#define KI2C_STATUS_RXAK 0x04 +#define KI2C_STATUS_MCF 0x08 + +#define KI2C_STATUS_DC_REG 0x09 +#define KI2C_STATUS_DC_SDA 0x01 +#define KI2C_STATUS_DC_SCL 0x02 + +#define KI2C_DATA_REG 0x0c + +#define KI2C_INUSE_SLEEP_US (2 * USEC_PER_MSEC) +#define KI2C_INUSE_TIMEOUT_US (10 * USEC_PER_SEC) + +#define KI2C_POLL_DELAY_US 5 + +struct ki2c { + struct keba_i2c_auxdev *auxdev; + void __iomem *base; + struct i2c_adapter adapter; + + struct i2c_client **client; + int client_size; +}; + +static int ki2c_inuse_lock(struct ki2c *ki2c) +{ + u8 sts; + int ret; + + /* + * The I2C controller has an IN_USE bit for locking access to the + * controller. This enables the use of I2C controller by other none + * Linux processors. + * + * If the I2C controller is free, then the first read returns + * IN_USE == 0. After that the I2C controller is locked and further + * reads of IN_USE return 1. + * + * The I2C controller is unlocked by writing 1 into IN_USE. + * + * The IN_USE bit acts as a hardware semaphore for the I2C controller. + * Poll for semaphore, but sleep while polling to free the CPU. + */ + ret = readb_poll_timeout(ki2c->base + KI2C_STATUS_REG, + sts, (sts & KI2C_STATUS_IN_USE) == 0, + KI2C_INUSE_SLEEP_US, KI2C_INUSE_TIMEOUT_US); + if (ret) + dev_err(&ki2c->auxdev->auxdev.dev, "%s err!\n", __func__); + + return ret; +} + +static void ki2c_inuse_unlock(struct ki2c *ki2c) +{ + /* unlock the controller by writing 1 into IN_USE */ + iowrite8(KI2C_STATUS_IN_USE, ki2c->base + KI2C_STATUS_REG); +} + +static int ki2c_wait_for_bit(void __iomem *addr, u8 mask, unsigned long timeout) +{ + u8 val; + + return readb_poll_timeout(addr, val, (val & mask), KI2C_POLL_DELAY_US, + jiffies_to_usecs(timeout)); +} + +static int ki2c_wait_for_mcf(struct ki2c *ki2c) +{ + return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG, KI2C_STATUS_MCF, + ki2c->adapter.timeout); +} + +static int ki2c_wait_for_data(struct ki2c *ki2c) +{ + int ret; + + ret = ki2c_wait_for_mcf(ki2c); + if (ret < 0) + return ret; + + return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG, + KI2C_STATUS_ACK_CYC, + ki2c->adapter.timeout); +} + +static int ki2c_wait_for_data_ack(struct ki2c *ki2c) +{ + unsigned int reg; + int ret; + + ret = ki2c_wait_for_data(ki2c); + if (ret < 0) + return ret; + + /* RXAK == 0 means ACK reveived */ + reg = ioread8(ki2c->base + KI2C_STATUS_REG); + if (reg & KI2C_STATUS_RXAK) + return -EIO; + + return 0; +} + +static int ki2c_has_capability(struct ki2c *ki2c, unsigned int cap) +{ + unsigned int reg = ioread8(ki2c->base + KI2C_CAPABILITY_REG); + + return (reg & cap) != 0; +} + +static int ki2c_get_scl(struct ki2c *ki2c) +{ + unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG); + + /* capability KI2C_CAPABILITY_DC required */ + return (reg & KI2C_STATUS_DC_SCL) != 0; +} + +static int ki2c_get_sda(struct ki2c *ki2c) +{ + unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG); + + /* capability KI2C_CAPABILITY_DC required */ + return (reg & KI2C_STATUS_DC_SDA) != 0; +} + +static void ki2c_set_scl(struct ki2c *ki2c, int val) +{ + u8 control_dc; + + /* capability KI2C_CAPABILITY_DC and KI2C_CONTROL_MEN = 0 reqired */ + control_dc = ioread8(ki2c->base + KI2C_CONTROL_DC_REG); + if (val) + control_dc |= KI2C_CONTROL_DC_SCL; + else + control_dc &= ~KI2C_CONTROL_DC_SCL; + iowrite8(control_dc, ki2c->base + KI2C_CONTROL_DC_REG); +} + +/* + * Resetting bus bitwise is done by checking SDA and applying clock cycles as + * long as SDA is low. 9 clock cycles are applied at most. + * + * Clock cycles are generated and udelay() determines the duration of clock + * cycles. Generated clock rate is 100 KHz and so duration of both clock levels + * is: delay in ns = (10^6 / 100) / 2 + */ +#define KI2C_RECOVERY_CLK_CNT (9 * 2) +#define KI2C_RECOVERY_UDELAY 5 +static int ki2c_reset_bus_bitwise(struct ki2c *ki2c) +{ + int val = 1; + int ret = 0; + int i; + + /* disable I2C controller (MEN = 0) to get direct access to SCL/SDA */ + iowrite8(0, ki2c->base + KI2C_CONTROL_REG); + + /* generate clock cycles */ + ki2c_set_scl(ki2c, val); + udelay(KI2C_RECOVERY_UDELAY); + for (i = 0; i < KI2C_RECOVERY_CLK_CNT; i++) { + if (val) { + /* SCL shouldn't be low here */ + if (!ki2c_get_scl(ki2c)) { + dev_err(&ki2c->auxdev->auxdev.dev, + "SCL is stuck low!\n"); + ret = -EBUSY; + break; + } + + /* break if SDA is high */ + if (ki2c_get_sda(ki2c)) + break; + } + + val = !val; + ki2c_set_scl(ki2c, val); + udelay(KI2C_RECOVERY_UDELAY); + } + + if (!ki2c_get_sda(ki2c)) { + dev_err(&ki2c->auxdev->auxdev.dev, "SDA is still low!\n"); + ret = -EBUSY; + } + + /* reenable controller */ + iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG); + + return ret; +} + +/* + * Resetting bus bytewise is done by writing start bit, 9 data bits and stop + * bit. + * + * This is not 100% safe. If target is an EEPROM and a write access was + * interrupted during the ACK cycle, this approach might not be able to recover + * the bus. The reason is, that after the 9 clock cycles the EEPROM will be in + * ACK cycle again and will hold SDA low like it did before the start of the + * routine. Furthermore the EEPROM might get written one additional byte with + * 0xff into it. Thus, use bitwise approach whenever possible, especially when + * EEPROMs are on the bus. + */ +static int ki2c_reset_bus_bytewise(struct ki2c *ki2c) +{ + int ret; + + /* hold data line high for 9 clock cycles */ + iowrite8(0xFF, ki2c->base + KI2C_DATA_REG); + + /* create start condition */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK, + ki2c->base + KI2C_CONTROL_REG); + ret = ki2c_wait_for_mcf(ki2c); + if (ret < 0) { + dev_err(&ki2c->auxdev->auxdev.dev, "Start condition failed\n"); + + return ret; + } + + /* create stop condition */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_TXAK, + ki2c->base + KI2C_CONTROL_REG); + ret = ki2c_wait_for_mcf(ki2c); + if (ret < 0) + dev_err(&ki2c->auxdev->auxdev.dev, "Stop condition failed\n"); + + return ret; +} + +static int ki2c_reset_bus(struct ki2c *ki2c) +{ + int ret; + + ret = ki2c_inuse_lock(ki2c); + if (ret < 0) + return ret; + + /* + * If the I2C controller is capable of direct control of SCL/SDA, then a + * bitwise reset is used. Otherwise fall back to bytewise reset. + */ + if (ki2c_has_capability(ki2c, KI2C_CAPABILITY_DC)) + ret = ki2c_reset_bus_bitwise(ki2c); + else + ret = ki2c_reset_bus_bytewise(ki2c); + + ki2c_inuse_unlock(ki2c); + + return ret; +} + +static void ki2c_write_target_addr(struct ki2c *ki2c, struct i2c_msg *m) +{ + u8 addr; + + addr = m->addr << 1; + /* Bit 0 signals RD/WR */ + if (m->flags & I2C_M_RD) + addr |= 0x01; + + iowrite8(addr, ki2c->base + KI2C_DATA_REG); +} + +static int ki2c_start_addr(struct ki2c *ki2c, struct i2c_msg *m) +{ + int ret; + + /* + * Store target address byte in the controller. This has to be done + * before sending START condition. + */ + ki2c_write_target_addr(ki2c, m); + + /* enable controller for TX */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX, + ki2c->base + KI2C_CONTROL_REG); + + /* send START condition and target address byte */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA, + ki2c->base + KI2C_CONTROL_REG); + + ret = ki2c_wait_for_data_ack(ki2c); + if (ret < 0) + /* + * For EEPROMs this is normal behavior during internal write + * operation. + */ + dev_dbg(&ki2c->auxdev->auxdev.dev, + "%s wait for ACK err at 0x%02x!\n", __func__, m->addr); + + return ret; +} + +static int ki2c_repstart_addr(struct ki2c *ki2c, struct i2c_msg *m) +{ + int ret; + + /* repeated start and write is not supported */ + if ((m->flags & I2C_M_RD) == 0) { + dev_err(&ki2c->auxdev->auxdev.dev, + "Repeated start not supported for writes\n"); + return -EINVAL; + } + + /* send repeated start */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_RSTA, + ki2c->base + KI2C_CONTROL_REG); + + ret = ki2c_wait_for_mcf(ki2c); + if (ret < 0) { + dev_err(&ki2c->auxdev->auxdev.dev, + "%s wait for MCF err at 0x%02x!\n", __func__, m->addr); + return ret; + } + + /* write target-address byte */ + ki2c_write_target_addr(ki2c, m); + + ret = ki2c_wait_for_data_ack(ki2c); + if (ret < 0) + dev_err(&ki2c->auxdev->auxdev.dev, + "%s wait for ACK err at 0x%02x!\n", __func__, m->addr); + + return ret; +} + +static void ki2c_stop(struct ki2c *ki2c) +{ + iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG); + ki2c_wait_for_mcf(ki2c); +} + +static int ki2c_write(struct ki2c *ki2c, const u8 *data, int len) +{ + int ret; + int i; + + for (i = 0; i < len; i++) { + /* write data byte */ + iowrite8(data[i], ki2c->base + KI2C_DATA_REG); + + ret = ki2c_wait_for_data_ack(ki2c); + if (ret < 0) + return ret; + } + + return 0; +} + +static int ki2c_read(struct ki2c *ki2c, u8 *data, int len) +{ + u8 control; + int ret; + int i; + + if (len == 0) + return 0; /* nothing to do */ + + control = KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA; + + /* if just one byte => send tx-nack after transfer */ + if (len == 1) + control |= KI2C_CONTROL_TXAK; + + iowrite8(control, ki2c->base + KI2C_CONTROL_REG); + + /* dummy read to start transfer on bus */ + ioread8(ki2c->base + KI2C_DATA_REG); + + for (i = 0; i < len; i++) { + ret = ki2c_wait_for_data(ki2c); + if (ret < 0) + return ret; + + if (i == len - 2) + /* send tx-nack after transfer of last byte */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK, + ki2c->base + KI2C_CONTROL_REG); + else if (i == len - 1) + /* + * switch to TX on last byte, so that reading DATA + * register does not trigger another read transfer + */ + iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_MTX, + ki2c->base + KI2C_CONTROL_REG); + + /* read byte and start next transfer (if not last byte) */ + data[i] = ioread8(ki2c->base + KI2C_DATA_REG); + } + + return len; +} + +static int ki2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct ki2c *ki2c = i2c_get_adapdata(adap); + int ret; + int i; + + ret = ki2c_inuse_lock(ki2c); + if (ret < 0) + return ret; + + for (i = 0; i < num; i++) { + struct i2c_msg *m = &msgs[i]; + + if (i == 0) + ret = ki2c_start_addr(ki2c, m); + else + ret = ki2c_repstart_addr(ki2c, m); + if (ret < 0) + break; + + if (m->flags & I2C_M_RD) + ret = ki2c_read(ki2c, m->buf, m->len); + else + ret = ki2c_write(ki2c, m->buf, m->len); + if (ret < 0) + break; + } + + ki2c_stop(ki2c); + + ki2c_inuse_unlock(ki2c); + + return ret < 0 ? ret : num; +} + +static void ki2c_unregister_devices(struct ki2c *ki2c) +{ + int i; + + for (i = 0; i < ki2c->client_size; i++) { + struct i2c_client *client = ki2c->client[i]; + + if (client) + i2c_unregister_device(client); + } +} + +static int ki2c_register_devices(struct ki2c *ki2c) +{ + struct i2c_board_info *info = ki2c->auxdev->info; + int i; + + /* register all known I2C devices */ + for (i = 0; i < ki2c->client_size; i++) { + struct i2c_client *client; + unsigned short const addr_list[2] = { info[i].addr, + I2C_CLIENT_END }; + + client = i2c_new_scanned_device(&ki2c->adapter, &info[i], + addr_list, NULL); + if (!IS_ERR(client)) { + ki2c->client[i] = client; + } else if (PTR_ERR(client) != -ENODEV) { + ki2c->client_size = i; + ki2c_unregister_devices(ki2c); + + return PTR_ERR(client); + } + } + + return 0; +} + +static u32 ki2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm ki2c_algo = { + .master_xfer = ki2c_xfer, + .functionality = ki2c_func, +}; + +static int ki2c_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &auxdev->dev; + struct i2c_adapter *adap; + struct ki2c *ki2c; + int ret; + + ki2c = devm_kzalloc(dev, sizeof(*ki2c), GFP_KERNEL); + if (!ki2c) + return -ENOMEM; + ki2c->auxdev = container_of(auxdev, struct keba_i2c_auxdev, auxdev); + ki2c->client = devm_kcalloc(dev, ki2c->auxdev->info_size, + sizeof(*ki2c->client), GFP_KERNEL); + if (!ki2c->client) + return -ENOMEM; + ki2c->client_size = ki2c->auxdev->info_size; + auxiliary_set_drvdata(auxdev, ki2c); + + ki2c->base = devm_ioremap_resource(dev, &ki2c->auxdev->io); + if (IS_ERR(ki2c->base)) + return PTR_ERR(ki2c->base); + + adap = &ki2c->adapter; + strscpy(adap->name, "KEBA I2C adapter", sizeof(adap->name)); + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_HWMON; + adap->algo = &ki2c_algo; + adap->dev.parent = dev; + + i2c_set_adapdata(adap, ki2c); + + /* enable controller */ + iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG); + + /* reset bus before probing I2C devices */ + ret = ki2c_reset_bus(ki2c); + if (ret) + goto out; + + ret = devm_i2c_add_adapter(dev, adap); + if (ret) { + dev_err(dev, "Failed to add adapter (%d)!\n", ret); + goto out; + } + + ret = ki2c_register_devices(ki2c); + if (ret) { + dev_err(dev, "Failed to register devices (%d)!\n", ret); + goto out; + } + + return 0; + +out: + iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG); + return ret; +} + +static void ki2c_remove(struct auxiliary_device *auxdev) +{ + struct ki2c *ki2c = auxiliary_get_drvdata(auxdev); + + ki2c_unregister_devices(ki2c); + + /* disable controller */ + iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG); + + auxiliary_set_drvdata(auxdev, NULL); +} + +static const struct auxiliary_device_id ki2c_devtype_aux[] = { + { .name = "keba.i2c" }, + { } +}; +MODULE_DEVICE_TABLE(auxiliary, ki2c_devtype_aux); + +static struct auxiliary_driver ki2c_driver_aux = { + .name = KI2C, + .id_table = ki2c_devtype_aux, + .probe = ki2c_probe, + .remove = ki2c_remove, +}; +module_auxiliary_driver(ki2c_driver_aux); + +MODULE_AUTHOR("Gerhard Engleder "); +MODULE_DESCRIPTION("KEBA I2C bus controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c index 0b70621cf9d338..1dc516ef0fddc5 100644 --- a/drivers/i2c/busses/i2c-ljca.c +++ b/drivers/i2c/busses/i2c-ljca.c @@ -107,7 +107,7 @@ static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 target_addr, return 0; } -static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 target_addr) +static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c) { struct ljca_i2c_rw_packet *w_packet = (struct ljca_i2c_rw_packet *)ljca_i2c->obuf; @@ -178,7 +178,7 @@ static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 target_addr, u8 *data if (!ret) ret = ljca_i2c_pure_read(ljca_i2c, data, len); - ljca_i2c_stop(ljca_i2c, target_addr); + ljca_i2c_stop(ljca_i2c); return ret; } @@ -222,7 +222,7 @@ static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 target_addr, if (!ret) ret = ljca_i2c_pure_write(ljca_i2c, data, len); - ljca_i2c_stop(ljca_i2c, target_addr); + ljca_i2c_stop(ljca_i2c); return ret; } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 41d6c8ed163a26..236d6b8ba86757 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -88,7 +88,6 @@ struct mpc_i2c { int irq; u32 real_clk; u8 fdr, dfsrr; - struct clk *clk_per; u32 cntl_bits; enum mpc_i2c_action action; struct i2c_msg *msgs; @@ -779,7 +778,6 @@ static int fsl_i2c_probe(struct platform_device *op) struct clk *clk; int result; u32 clock; - int err; i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) @@ -809,18 +807,12 @@ static int fsl_i2c_probe(struct platform_device *op) * enable clock for the I2C peripheral (non fatal), * keep a reference upon successful allocation */ - clk = devm_clk_get_optional(&op->dev, NULL); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - err = clk_prepare_enable(clk); - if (err) { + clk = devm_clk_get_optional_enabled(&op->dev, NULL); + if (IS_ERR(clk)) { dev_err(&op->dev, "failed to enable clock\n"); - return err; + return PTR_ERR(clk); } - i2c->clk_per = clk; - if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) { clock = MPC_I2C_CLOCK_PRESERVE; } else { @@ -876,14 +868,9 @@ static int fsl_i2c_probe(struct platform_device *op) result = i2c_add_numbered_adapter(&i2c->adap); if (result) - goto fail_add; + return result; return 0; - - fail_add: - clk_disable_unprepare(i2c->clk_per); - - return result; }; static void fsl_i2c_remove(struct platform_device *op) @@ -891,8 +878,6 @@ static void fsl_i2c_remove(struct platform_device *op) struct mpc_i2c *i2c = platform_get_drvdata(op); i2c_del_adapter(&i2c->adap); - - clk_disable_unprepare(i2c->clk_per); }; static int __maybe_unused mpc_i2c_suspend(struct device *dev) diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index a8b5719c33729e..e0ba653dec2d67 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1306,12 +1306,9 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) { struct mtk_i2c *i2c = dev_id; - u16 restart_flag = 0; + u16 restart_flag = i2c->auto_restart ? I2C_RS_TRANSFER : 0; u16 intr_stat; - if (i2c->auto_restart) - restart_flag = I2C_RS_TRANSFER; - intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT); mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 2fe68615942efe..bbcb4d6668ce63 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -136,11 +136,13 @@ enum i2c_addr { * Since the addr regs are sprinkled all over the address space, * use this array to get the address or each register. */ -#define I2C_NUM_OWN_ADDR 2 +#define I2C_NUM_OWN_ADDR 10 #define I2C_NUM_OWN_ADDR_SUPPORTED 2 static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = { - NPCM_I2CADDR1, NPCM_I2CADDR2, + NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4, + NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8, + NPCM_I2CADDR9, NPCM_I2CADDR10, }; #endif diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index 9bcaa29a71913d..541d808d62d0d5 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include "i2c-ccgx-ucsi.h" diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 35a3f0a6498645..1d9ad25c89ae55 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1261,7 +1261,7 @@ static const struct of_device_id omap_i2c_of_match[] = { .compatible = "ti,omap2420-i2c", .data = &omap2420_pdata, }, - { }, + { } }; MODULE_DEVICE_TABLE(of, omap_i2c_of_match); #endif diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 4e32d57ae0bf37..febbd9950d8f89 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -146,7 +146,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, - { }, + { } }; /* diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index f448505d546827..1dafadda73af3a 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -721,7 +721,7 @@ static void i2c_pnx_remove(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id i2c_pnx_of_match[] = { { .compatible = "nxp,pnx-i2c" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, i2c_pnx_of_match); #endif diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index 6b3c6a73336823..af2094720a4dc3 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -135,7 +135,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev, static const struct pci_device_id ce4100_i2c_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)}, - { }, + { } }; static struct pci_driver ce4100_i2c_driver = { diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 031175113dd412..4d76e71cdd4be1 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -218,7 +218,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = { { "ce4100-i2c", REGS_CE4100 }, { "pxa910-i2c", REGS_PXA910 }, { "armada-3700-i2c", REGS_A3700 }, - { }, + { } }; MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 06e836e3e87733..212336f724a693 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -721,7 +721,7 @@ static const struct i2c_algorithm geni_i2c_algo = { static const struct acpi_device_id geni_i2c_acpi_match[] = { { "QCOM0220"}, { "QCOM0411" }, - { }, + { } }; MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match); #endif @@ -818,15 +818,13 @@ static int geni_i2c_probe(struct platform_device *pdev) init_completion(&gi2c->done); spin_lock_init(&gi2c->lock); platform_set_drvdata(pdev, gi2c); - ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0, + ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN, dev_name(dev), gi2c); if (ret) { dev_err(dev, "Request_irq failed:%d: err:%d\n", gi2c->irq, ret); return ret; } - /* Disable the interrupt so that the system can enter low-power mode */ - disable_irq(gi2c->irq); i2c_set_adapdata(&gi2c->adap, gi2c); gi2c->adap.dev.parent = dev; gi2c->adap.dev.of_node = dev->of_node; @@ -986,21 +984,24 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) return ret; ret = clk_prepare_enable(gi2c->core_clk); - if (ret) { - geni_icc_disable(&gi2c->se); - return ret; - } + if (ret) + goto out_icc_disable; ret = geni_se_resources_on(&gi2c->se); - if (ret) { - clk_disable_unprepare(gi2c->core_clk); - geni_icc_disable(&gi2c->se); - return ret; - } + if (ret) + goto out_clk_disable; enable_irq(gi2c->irq); gi2c->suspended = 0; + return 0; + +out_clk_disable: + clk_disable_unprepare(gi2c->core_clk); +out_icc_disable: + geni_icc_disable(&gi2c->se); + + return ret; } static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 4a2c745751a234..d480162a4d3941 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1648,7 +1648,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) static const struct acpi_device_id qup_i2c_acpi_match[] = { { "QCOM8010"}, - { }, + { } }; MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index da4b07c0ed4c07..9267df38c2d0a1 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -1164,11 +1164,6 @@ static int rcar_i2c_probe(struct platform_device *pdev) rcar_i2c_init(priv); rcar_i2c_reset_slave(priv); - if (priv->devtype < I2C_RCAR_GEN3) { - irqflags |= IRQF_NO_THREAD; - irqhandler = rcar_i2c_gen2_irq; - } - /* Stay always active when multi-master to keep arbitration working */ if (of_property_read_bool(dev->of_node, "multi-master")) priv->flags |= ID_P_PM_BLOCKED; @@ -1178,8 +1173,11 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (of_property_read_bool(dev->of_node, "smbus")) priv->flags |= ID_P_HOST_NOTIFY; - /* R-Car Gen3+ needs a reset before every transfer */ - if (priv->devtype >= I2C_RCAR_GEN3) { + if (priv->devtype < I2C_RCAR_GEN3) { + irqflags |= IRQF_NO_THREAD; + irqhandler = rcar_i2c_gen2_irq; + } else { + /* R-Car Gen3+ needs a reset before every transfer */ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rstc)) { ret = PTR_ERR(priv->rstc); diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index d6f585cdb7e5d6..c7f3a4c0247023 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -63,6 +63,8 @@ #define ICMR3_ACKWP 0x10 #define ICMR3_ACKBT 0x08 +#define ICFER_FMPE 0x80 + #define ICIER_TIE 0x80 #define ICIER_TEIE 0x40 #define ICIER_RIE 0x20 @@ -80,6 +82,7 @@ enum riic_reg_list { RIIC_ICCR2, RIIC_ICMR1, RIIC_ICMR3, + RIIC_ICFER, RIIC_ICSER, RIIC_ICIER, RIIC_ICSR2, @@ -91,7 +94,8 @@ enum riic_reg_list { }; struct riic_of_data { - u8 regs[RIIC_REG_END]; + const u8 *regs; + bool fast_mode_plus; }; struct riic_dev { @@ -105,6 +109,8 @@ struct riic_dev { struct completion msg_done; struct i2c_adapter adapter; struct clk *clk; + struct reset_control *rstc; + struct i2c_timings i2c_t; }; struct riic_irq_desc { @@ -131,11 +137,14 @@ static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct riic_dev *riic = i2c_get_adapdata(adap); + struct device *dev = adap->dev.parent; unsigned long time_left; - int i; + int i, ret; u8 start_bit; - pm_runtime_get_sync(adap->dev.parent); + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) { riic->err = -EBUSY; @@ -168,7 +177,8 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) } out: - pm_runtime_put(adap->dev.parent); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); return riic->err ?: num; } @@ -298,21 +308,21 @@ static const struct i2c_algorithm riic_algo = { .functionality = riic_func, }; -static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) +static int riic_init_hw(struct riic_dev *riic) { - int ret = 0; + int ret; unsigned long rate; int total_ticks, cks, brl, brh; + struct i2c_timings *t = &riic->i2c_t; + struct device *dev = riic->adapter.dev.parent; + bool fast_mode_plus = riic->info->fast_mode_plus; + u32 max_freq = fast_mode_plus ? I2C_MAX_FAST_MODE_PLUS_FREQ + : I2C_MAX_FAST_MODE_FREQ; - pm_runtime_get_sync(riic->adapter.dev.parent); - - if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) { - dev_err(&riic->adapter.dev, - "unsupported bus speed (%dHz). %d max\n", - t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ); - ret = -EINVAL; - goto out; - } + if (t->bus_freq_hz > max_freq) + return dev_err_probe(&riic->adapter.dev, -EINVAL, + "unsupported bus speed %uHz (%u max)\n", + t->bus_freq_hz, max_freq); rate = clk_get_rate(riic->clk); @@ -349,8 +359,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) if (brl > (0x1F + 3)) { dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n", (unsigned long)t->bus_freq_hz); - ret = -EINVAL; - goto out; + return -EINVAL; } brh = total_ticks - brl; @@ -382,6 +391,10 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) t->scl_fall_ns / (1000000000 / rate), t->scl_rise_ns / (1000000000 / rate), cks, brl, brh); + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + /* Changing the order of accessing IICRST and ICE may break things! */ riic_writeb(riic, ICCR1_IICRST | ICCR1_SOWP, RIIC_ICCR1); riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1); @@ -393,11 +406,14 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) riic_writeb(riic, 0, RIIC_ICSER); riic_writeb(riic, ICMR3_ACKWP | ICMR3_RDRFS, RIIC_ICMR3); + if (fast_mode_plus && t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) + riic_clear_set_bit(riic, 0, ICFER_FMPE, RIIC_ICFER); + riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1); -out: - pm_runtime_put(riic->adapter.dev.parent); - return ret; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return 0; } static struct riic_irq_desc riic_irqs[] = { @@ -415,13 +431,12 @@ static void riic_reset_control_assert(void *data) static int riic_i2c_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct riic_dev *riic; struct i2c_adapter *adap; - struct i2c_timings i2c_t; - struct reset_control *rstc; int i, ret; - riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL); + riic = devm_kzalloc(dev, sizeof(*riic), GFP_KERNEL); if (!riic) return -ENOMEM; @@ -429,22 +444,22 @@ static int riic_i2c_probe(struct platform_device *pdev) if (IS_ERR(riic->base)) return PTR_ERR(riic->base); - riic->clk = devm_clk_get(&pdev->dev, NULL); + riic->clk = devm_clk_get(dev, NULL); if (IS_ERR(riic->clk)) { - dev_err(&pdev->dev, "missing controller clock"); + dev_err(dev, "missing controller clock"); return PTR_ERR(riic->clk); } - rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); - if (IS_ERR(rstc)) - return dev_err_probe(&pdev->dev, PTR_ERR(rstc), + riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(riic->rstc)) + return dev_err_probe(dev, PTR_ERR(riic->rstc), "Error: missing reset ctrl\n"); - ret = reset_control_deassert(rstc); + ret = reset_control_deassert(riic->rstc); if (ret) return ret; - ret = devm_add_action_or_reset(&pdev->dev, riic_reset_control_assert, rstc); + ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc); if (ret) return ret; @@ -453,31 +468,34 @@ static int riic_i2c_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr, + ret = devm_request_irq(dev, ret, riic_irqs[i].isr, 0, riic_irqs[i].name, riic); if (ret) { - dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name); + dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name); return ret; } } - riic->info = of_device_get_match_data(&pdev->dev); + riic->info = of_device_get_match_data(dev); adap = &riic->adapter; i2c_set_adapdata(adap, riic); strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name)); adap->owner = THIS_MODULE; adap->algo = &riic_algo; - adap->dev.parent = &pdev->dev; - adap->dev.of_node = pdev->dev.of_node; + adap->dev.parent = dev; + adap->dev.of_node = dev->of_node; init_completion(&riic->msg_done); - i2c_parse_fw_timings(&pdev->dev, &i2c_t, true); + i2c_parse_fw_timings(dev, &riic->i2c_t, true); - pm_runtime_enable(&pdev->dev); + /* Default 0 to save power. Can be overridden via sysfs for lower latency. */ + pm_runtime_set_autosuspend_delay(dev, 0); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); - ret = riic_init_hw(riic, &i2c_t); + ret = riic_init_hw(riic); if (ret) goto out; @@ -487,60 +505,127 @@ static int riic_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, riic); - dev_info(&pdev->dev, "registered with %dHz bus speed\n", - i2c_t.bus_freq_hz); + dev_info(dev, "registered with %dHz bus speed\n", riic->i2c_t.bus_freq_hz); return 0; out: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); return ret; } static void riic_i2c_remove(struct platform_device *pdev) { struct riic_dev *riic = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; - pm_runtime_get_sync(&pdev->dev); - riic_writeb(riic, 0, RIIC_ICIER); - pm_runtime_put(&pdev->dev); + ret = pm_runtime_resume_and_get(dev); + if (!ret) { + riic_writeb(riic, 0, RIIC_ICIER); + pm_runtime_put(dev); + } i2c_del_adapter(&riic->adapter); - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); } +static const u8 riic_rz_a_regs[RIIC_REG_END] = { + [RIIC_ICCR1] = 0x00, + [RIIC_ICCR2] = 0x04, + [RIIC_ICMR1] = 0x08, + [RIIC_ICMR3] = 0x10, + [RIIC_ICFER] = 0x14, + [RIIC_ICSER] = 0x18, + [RIIC_ICIER] = 0x1c, + [RIIC_ICSR2] = 0x24, + [RIIC_ICBRL] = 0x34, + [RIIC_ICBRH] = 0x38, + [RIIC_ICDRT] = 0x3c, + [RIIC_ICDRR] = 0x40, +}; + static const struct riic_of_data riic_rz_a_info = { - .regs = { - [RIIC_ICCR1] = 0x00, - [RIIC_ICCR2] = 0x04, - [RIIC_ICMR1] = 0x08, - [RIIC_ICMR3] = 0x10, - [RIIC_ICSER] = 0x18, - [RIIC_ICIER] = 0x1c, - [RIIC_ICSR2] = 0x24, - [RIIC_ICBRL] = 0x34, - [RIIC_ICBRH] = 0x38, - [RIIC_ICDRT] = 0x3c, - [RIIC_ICDRR] = 0x40, - }, + .regs = riic_rz_a_regs, + .fast_mode_plus = true, +}; + +static const struct riic_of_data riic_rz_a1h_info = { + .regs = riic_rz_a_regs, +}; + +static const u8 riic_rz_v2h_regs[RIIC_REG_END] = { + [RIIC_ICCR1] = 0x00, + [RIIC_ICCR2] = 0x01, + [RIIC_ICMR1] = 0x02, + [RIIC_ICMR3] = 0x04, + [RIIC_ICFER] = 0x05, + [RIIC_ICSER] = 0x06, + [RIIC_ICIER] = 0x07, + [RIIC_ICSR2] = 0x09, + [RIIC_ICBRL] = 0x10, + [RIIC_ICBRH] = 0x11, + [RIIC_ICDRT] = 0x12, + [RIIC_ICDRR] = 0x13, }; static const struct riic_of_data riic_rz_v2h_info = { - .regs = { - [RIIC_ICCR1] = 0x00, - [RIIC_ICCR2] = 0x01, - [RIIC_ICMR1] = 0x02, - [RIIC_ICMR3] = 0x04, - [RIIC_ICSER] = 0x06, - [RIIC_ICIER] = 0x07, - [RIIC_ICSR2] = 0x09, - [RIIC_ICBRL] = 0x10, - [RIIC_ICBRH] = 0x11, - [RIIC_ICDRT] = 0x12, - [RIIC_ICDRR] = 0x13, - }, + .regs = riic_rz_v2h_regs, + .fast_mode_plus = true, +}; + +static int riic_i2c_suspend(struct device *dev) +{ + struct riic_dev *riic = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + i2c_mark_adapter_suspended(&riic->adapter); + + /* Disable output on SDA, SCL pins. */ + riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_sync(dev); + + return reset_control_assert(riic->rstc); +} + +static int riic_i2c_resume(struct device *dev) +{ + struct riic_dev *riic = dev_get_drvdata(dev); + int ret; + + ret = reset_control_deassert(riic->rstc); + if (ret) + return ret; + + ret = riic_init_hw(riic); + if (ret) { + /* + * In case this happens there is no way to recover from this + * state. The driver will remain loaded. We want to avoid + * keeping the reset line de-asserted for no reason. + */ + reset_control_assert(riic->rstc); + return ret; + } + + i2c_mark_adapter_resumed(&riic->adapter); + + return 0; +} + +static const struct dev_pm_ops riic_i2c_pm_ops = { + SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume) }; static const struct of_device_id riic_i2c_dt_ids[] = { { .compatible = "renesas,riic-rz", .data = &riic_rz_a_info }, + { .compatible = "renesas,riic-r7s72100", .data = &riic_rz_a1h_info, }, { .compatible = "renesas,riic-r9a09g057", .data = &riic_rz_v2h_info }, { /* Sentinel */ }, }; @@ -551,6 +636,7 @@ static struct platform_driver riic_i2c_driver = { .driver = { .name = "i2c-riic", .of_match_table = riic_i2c_dt_ids, + .pm = pm_ptr(&riic_i2c_pm_ops), }, }; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 01419c738cfc4f..7698d9d5974483 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -130,7 +130,7 @@ static const struct platform_device_id s3c24xx_driver_ids[] = { }, { .name = "s3c2440-hdmiphy-i2c", .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO, - }, { }, + }, { } }; MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index cfee2d9c09de36..0174ead99de6c1 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -2395,7 +2395,7 @@ static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev) struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev); if (!stm32f7_i2c_is_slave_registered(i2c_dev)) - clk_disable_unprepare(i2c_dev->clk); + clk_disable(i2c_dev->clk); return 0; } @@ -2406,9 +2406,9 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev) int ret; if (!stm32f7_i2c_is_slave_registered(i2c_dev)) { - ret = clk_prepare_enable(i2c_dev->clk); + ret = clk_enable(i2c_dev->clk); if (ret) { - dev_err(dev, "failed to prepare_enable clock\n"); + dev_err(dev, "failed to enable clock\n"); return ret; } } diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 4eccbcd0fbfc00..bbb9062669e4b2 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -550,12 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "socionext,pclk-rate", &i2c->pclkrate); - pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); + pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk"); if (IS_ERR(pclk)) return dev_err_probe(&pdev->dev, PTR_ERR(pclk), "failed to get and enable clock\n"); - i2c->pclkrate = clk_get_rate(pclk); + if (pclk) + i2c->pclkrate = clk_get_rate(pclk); if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE || i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 52ba1e0845ca78..2a351f961b8993 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -182,7 +182,7 @@ static u32 virtio_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm virtio_algorithm = { +static const struct i2c_algorithm virtio_algorithm = { .xfer = virtio_i2c_xfer, .functionality = virtio_i2c_func, }; @@ -237,7 +237,7 @@ static void virtio_i2c_remove(struct virtio_device *vdev) virtio_i2c_del_vqs(vdev); } -static struct virtio_device_id id_table[] = { +static const struct virtio_device_id id_table[] = { { VIRTIO_ID_I2C_ADAPTER, VIRTIO_DEV_ANY_ID }, {} }; diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 19468565120e1c..1d68177241a6b3 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -772,14 +772,17 @@ static irqreturn_t xiic_process(int irq, void *dev_id) goto out; } - xiic_fill_tx_fifo(i2c); - - /* current message sent and there is space in the fifo */ - if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) { + if (xiic_tx_space(i2c)) { + xiic_fill_tx_fifo(i2c); + } else { + /* current message fully written */ dev_dbg(i2c->adap.dev.parent, "%s end of message sent, nmsgs: %d\n", __func__, i2c->nmsgs); - if (i2c->nmsgs > 1) { + /* Don't move onto the next message until the TX FIFO empties, + * to ensure that a NAK is not missed. + */ + if (i2c->nmsgs > 1 && (pend & XIIC_INTR_TX_EMPTY_MASK)) { i2c->nmsgs--; i2c->tx_msg++; xfer_more = 1; @@ -790,11 +793,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id) "%s Got TX IRQ but no more to do...\n", __func__); } - } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1)) - /* current frame is sent and is last, - * make sure to disable tx half - */ - xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); + } } if (pend & XIIC_INTR_BNB_MASK) { @@ -844,23 +843,11 @@ static int xiic_bus_busy(struct xiic_i2c *i2c) return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0; } -static int xiic_busy(struct xiic_i2c *i2c) +static int xiic_wait_not_busy(struct xiic_i2c *i2c) { int tries = 3; int err; - if (i2c->tx_msg || i2c->rx_msg) - return -EBUSY; - - /* In single master mode bus can only be busy, when in use by this - * driver. If the register indicates bus being busy for some reason we - * should ignore it, since bus will never be released and i2c will be - * stuck forever. - */ - if (i2c->singlemaster) { - return 0; - } - /* for instance if previous transfer was terminated due to TX error * it might be that the bus is on it's way to become available * give it at most 3 ms to wake @@ -1104,13 +1091,36 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num) mutex_lock(&i2c->lock); - ret = xiic_busy(i2c); - if (ret) { + if (i2c->tx_msg || i2c->rx_msg) { dev_err(i2c->adap.dev.parent, "cannot start a transfer while busy\n"); + ret = -EBUSY; goto out; } + /* In single master mode bus can only be busy, when in use by this + * driver. If the register indicates bus being busy for some reason we + * should ignore it, since bus will never be released and i2c will be + * stuck forever. + */ + if (!i2c->singlemaster) { + ret = xiic_wait_not_busy(i2c); + if (ret) { + /* If the bus is stuck in a busy state, such as due to spurious low + * pulses on the bus causing a false start condition to be detected, + * then try to recover by re-initializing the controller and check + * again if the bus is still busy. + */ + dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n"); + ret = xiic_reinit(i2c); + if (ret) + goto out; + ret = xiic_wait_not_busy(i2c); + if (ret) + goto out; + } + } + i2c->tx_msg = msgs; i2c->rx_msg = NULL; i2c->nmsgs = num; @@ -1327,8 +1337,8 @@ static int xiic_i2c_probe(struct platform_device *pdev) return 0; err_pm_disable: - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); return ret; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index b63f75e442964b..7c810893bfa332 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -915,6 +915,27 @@ int i2c_dev_irq_from_resources(const struct resource *resources, return 0; } +/* + * Serialize device instantiation in case it can be instantiated explicitly + * and by auto-detection + */ +static int i2c_lock_addr(struct i2c_adapter *adap, unsigned short addr, + unsigned short flags) +{ + if (!(flags & I2C_CLIENT_TEN) && + test_and_set_bit(addr, adap->addrs_in_instantiation)) + return -EBUSY; + + return 0; +} + +static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr, + unsigned short flags) +{ + if (!(flags & I2C_CLIENT_TEN)) + clear_bit(addr, adap->addrs_in_instantiation); +} + /** * i2c_new_client_device - instantiate an i2c device * @adap: the adapter managing the device @@ -962,6 +983,10 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf goto out_err_silent; } + status = i2c_lock_addr(adap, client->addr, client->flags); + if (status) + goto out_err_silent; + /* Check for address business */ status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client)); if (status) @@ -993,6 +1018,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); + i2c_unlock_addr(adap, client->addr, client->flags); + return client; out_remove_swnode: @@ -1004,6 +1031,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x (%d)\n", client->name, client->addr, status); + i2c_unlock_addr(adap, client->addr, client->flags); out_err_silent: if (need_put) put_device(&client->dev); @@ -1068,7 +1096,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode); static const struct i2c_device_id dummy_id[] = { { "dummy", }, { "smbus_host_notify", }, - { }, + { } }; static int dummy_probe(struct i2c_client *client) @@ -1367,10 +1395,6 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev) } EXPORT_SYMBOL(i2c_verify_adapter); -#ifdef CONFIG_I2C_COMPAT -static struct class_compat *i2c_adapter_compat_class; -#endif - static void i2c_scan_static_board_info(struct i2c_adapter *adapter) { struct i2c_devinfo *devinfo; @@ -1524,7 +1548,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap) dev_set_name(&adap->dev, "i2c-%d", adap->nr); adap->dev.bus = &i2c_bus_type; adap->dev.type = &i2c_adapter_type; - res = device_register(&adap->dev); + device_initialize(&adap->dev); + + /* + * This adapter can be used as a parent immediately after device_add(), + * setup runtime-pm (especially ignore-children) before hand. + */ + device_enable_async_suspend(&adap->dev); + pm_runtime_no_callbacks(&adap->dev); + pm_suspend_ignore_children(&adap->dev, true); + pm_runtime_enable(&adap->dev); + + res = device_add(&adap->dev); if (res) { pr_err("adapter '%s': can't register device (%d)\n", adap->name, res); goto out_list; @@ -1536,25 +1571,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (res) goto out_reg; - device_enable_async_suspend(&adap->dev); - pm_runtime_no_callbacks(&adap->dev); - pm_suspend_ignore_children(&adap->dev, true); - pm_runtime_enable(&adap->dev); - res = i2c_init_recovery(adap); if (res == -EPROBE_DEFER) goto out_reg; dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); -#ifdef CONFIG_I2C_COMPAT - res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, - adap->dev.parent); - if (res) - dev_warn(&adap->dev, - "Failed to create compatibility class link\n"); -#endif - /* create pre-declared device nodes */ of_i2c_register_devices(adap); i2c_acpi_install_space_handler(adap); @@ -1761,11 +1783,6 @@ void i2c_del_adapter(struct i2c_adapter *adap) device_for_each_child(&adap->dev, NULL, __unregister_client); device_for_each_child(&adap->dev, NULL, __unregister_dummy); -#ifdef CONFIG_I2C_COMPAT - class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, - adap->dev.parent); -#endif - /* device name is gone after device_unregister */ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); @@ -2074,13 +2091,6 @@ static int __init i2c_init(void) i2c_debugfs_root = debugfs_create_dir("i2c", NULL); -#ifdef CONFIG_I2C_COMPAT - i2c_adapter_compat_class = class_compat_register("i2c-adapter"); - if (!i2c_adapter_compat_class) { - retval = -ENOMEM; - goto bus_err; - } -#endif retval = i2c_add_driver(&dummy_driver); if (retval) goto class_err; @@ -2093,10 +2103,6 @@ static int __init i2c_init(void) return 0; class_err: -#ifdef CONFIG_I2C_COMPAT - class_compat_unregister(i2c_adapter_compat_class); -bus_err: -#endif is_registered = false; bus_unregister(&i2c_bus_type); return retval; @@ -2109,9 +2115,6 @@ static void __exit i2c_exit(void) if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier)); i2c_del_driver(&dummy_driver); -#ifdef CONFIG_I2C_COMPAT - class_compat_unregister(i2c_adapter_compat_class); -#endif debugfs_remove_recursive(i2c_debugfs_root); bus_unregister(&i2c_bus_type); tracepoint_synchronize_unregister(); diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index e3765e12f93bf6..faefe1dfa8e55b 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -109,15 +109,12 @@ EXPORT_SYMBOL_GPL(i2c_slave_event); bool i2c_detect_slave_mode(struct device *dev) { if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { - struct device_node *child; u32 reg; - for_each_child_of_node(dev->of_node, child) { + for_each_child_of_node_scoped(dev->of_node, child) { of_property_read_u32(child, "reg", ®); - if (reg & I2C_OWN_SLAVE_ADDRESS) { - of_node_put(child); + if (reg & I2C_OWN_SLAVE_ADDRESS) return true; - } } } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { dev_dbg(dev, "ACPI slave is not supported yet\n"); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index f4fb212b7f3920..61f7c4003d2ff7 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -637,7 +637,6 @@ static int i2cdev_release(struct inode *inode, struct file *file) static const struct file_operations i2cdev_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = i2cdev_read, .write = i2cdev_write, .unlocked_ioctl = i2cdev_ioctl, diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c index 4c550306f3ec59..9fe3150378e863 100644 --- a/drivers/i2c/i2c-slave-testunit.c +++ b/drivers/i2c/i2c-slave-testunit.c @@ -6,7 +6,10 @@ * Copyright (C) 2020 by Renesas Electronics Corporation */ +#include #include +#include +#include #include #include #include @@ -14,12 +17,14 @@ #include #include /* FIXME: is system_long_wq the best choice? */ -#define TU_CUR_VERSION 0x01 +#define TU_VERSION_MAX_LENGTH 128 enum testunit_cmds { TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */ TU_CMD_SMBUS_HOST_NOTIFY, TU_CMD_SMBUS_BLOCK_PROC_CALL, + TU_CMD_GET_VERSION_WITH_REP_START, + TU_CMD_SMBUS_ALERT_REQUEST, TU_NUM_CMDS }; @@ -39,50 +44,38 @@ struct testunit_data { unsigned long flags; u8 regs[TU_NUM_REGS]; u8 reg_idx; + u8 read_idx; struct i2c_client *client; struct delayed_work worker; + struct gpio_desc *gpio; + struct completion alert_done; }; -static void i2c_slave_testunit_work(struct work_struct *work) -{ - struct testunit_data *tu = container_of(work, struct testunit_data, worker.work); - struct i2c_msg msg; - u8 msgbuf[256]; - int ret = 0; - - msg.addr = I2C_CLIENT_END; - msg.buf = msgbuf; +static char tu_version_info[] = "v" UTS_RELEASE "\n\0"; - switch (tu->regs[TU_REG_CMD]) { - case TU_CMD_READ_BYTES: - msg.addr = tu->regs[TU_REG_DATAL]; - msg.flags = I2C_M_RD; - msg.len = tu->regs[TU_REG_DATAH]; - break; +static int i2c_slave_testunit_smbalert_cb(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + struct testunit_data *tu = i2c_get_clientdata(client); - case TU_CMD_SMBUS_HOST_NOTIFY: - msg.addr = 0x08; - msg.flags = 0; - msg.len = 3; - msgbuf[0] = tu->client->addr; - msgbuf[1] = tu->regs[TU_REG_DATAL]; - msgbuf[2] = tu->regs[TU_REG_DATAH]; + switch (event) { + case I2C_SLAVE_READ_PROCESSED: + gpiod_set_value(tu->gpio, 0); + fallthrough; + case I2C_SLAVE_READ_REQUESTED: + *val = tu->regs[TU_REG_DATAL]; break; - default: + case I2C_SLAVE_STOP: + complete(&tu->alert_done); break; - } - if (msg.addr != I2C_CLIENT_END) { - ret = i2c_transfer(tu->client->adapter, &msg, 1); - /* convert '0 msgs transferred' to errno */ - ret = (ret == 0) ? -EIO : ret; + case I2C_SLAVE_WRITE_REQUESTED: + case I2C_SLAVE_WRITE_RECEIVED: + return -EOPNOTSUPP; } - if (ret < 0) - dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret); - - clear_bit(TU_FLAG_IN_PROCESS, &tu->flags); + return 0; } static int i2c_slave_testunit_slave_cb(struct i2c_client *client, @@ -91,9 +84,20 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client, struct testunit_data *tu = i2c_get_clientdata(client); bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 && tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL; + bool is_get_version = tu->reg_idx == 3 && + tu->regs[TU_REG_CMD] == TU_CMD_GET_VERSION_WITH_REP_START; int ret = 0; switch (event) { + case I2C_SLAVE_WRITE_REQUESTED: + if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags)) + return -EBUSY; + + memset(tu->regs, 0, TU_NUM_REGS); + tu->reg_idx = 0; + tu->read_idx = 0; + break; + case I2C_SLAVE_WRITE_RECEIVED: if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags)) return -EBUSY; @@ -127,27 +131,93 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client, tu->reg_idx = 0; break; - case I2C_SLAVE_WRITE_REQUESTED: - if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags)) - return -EBUSY; - - memset(tu->regs, 0, TU_NUM_REGS); - tu->reg_idx = 0; - break; - case I2C_SLAVE_READ_PROCESSED: - if (is_proc_call && tu->regs[TU_REG_DATAH]) + /* Advance until we reach the NUL character */ + if (is_get_version && tu_version_info[tu->read_idx] != 0) + tu->read_idx++; + else if (is_proc_call && tu->regs[TU_REG_DATAH]) tu->regs[TU_REG_DATAH]--; + fallthrough; case I2C_SLAVE_READ_REQUESTED: - *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION; + if (is_get_version) + *val = tu_version_info[tu->read_idx]; + else if (is_proc_call) + *val = tu->regs[TU_REG_DATAH]; + else + *val = test_bit(TU_FLAG_IN_PROCESS, &tu->flags) ? + tu->regs[TU_REG_CMD] : 0; break; } return ret; } +static void i2c_slave_testunit_work(struct work_struct *work) +{ + struct testunit_data *tu = container_of(work, struct testunit_data, worker.work); + unsigned long time_left; + struct i2c_msg msg; + u8 msgbuf[256]; + u16 orig_addr; + int ret = 0; + + msg.addr = I2C_CLIENT_END; + msg.buf = msgbuf; + + switch (tu->regs[TU_REG_CMD]) { + case TU_CMD_READ_BYTES: + msg.addr = tu->regs[TU_REG_DATAL]; + msg.flags = I2C_M_RD; + msg.len = tu->regs[TU_REG_DATAH]; + break; + + case TU_CMD_SMBUS_HOST_NOTIFY: + msg.addr = 0x08; + msg.flags = 0; + msg.len = 3; + msgbuf[0] = tu->client->addr; + msgbuf[1] = tu->regs[TU_REG_DATAL]; + msgbuf[2] = tu->regs[TU_REG_DATAH]; + break; + + case TU_CMD_SMBUS_ALERT_REQUEST: + i2c_slave_unregister(tu->client); + orig_addr = tu->client->addr; + tu->client->addr = 0x0c; + ret = i2c_slave_register(tu->client, i2c_slave_testunit_smbalert_cb); + if (ret) + goto out_smbalert; + + reinit_completion(&tu->alert_done); + gpiod_set_value(tu->gpio, 1); + time_left = wait_for_completion_timeout(&tu->alert_done, HZ); + if (!time_left) + ret = -ETIMEDOUT; + + i2c_slave_unregister(tu->client); +out_smbalert: + tu->client->addr = orig_addr; + i2c_slave_register(tu->client, i2c_slave_testunit_slave_cb); + break; + + default: + break; + } + + if (msg.addr != I2C_CLIENT_END) { + ret = i2c_transfer(tu->client->adapter, &msg, 1); + /* convert '0 msgs transferred' to errno */ + ret = (ret == 0) ? -EIO : ret; + } + + if (ret < 0) + dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret); + + clear_bit(TU_FLAG_IN_PROCESS, &tu->flags); +} + static int i2c_slave_testunit_probe(struct i2c_client *client) { struct testunit_data *tu; @@ -158,8 +228,18 @@ static int i2c_slave_testunit_probe(struct i2c_client *client) tu->client = client; i2c_set_clientdata(client, tu); + init_completion(&tu->alert_done); INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work); + tu->gpio = devm_gpiod_get_index_optional(&client->dev, NULL, 0, GPIOD_OUT_LOW); + if (gpiod_cansleep(tu->gpio)) { + dev_err(&client->dev, "GPIO access which may sleep is not allowed\n"); + return -EDEADLK; + } + + if (sizeof(tu_version_info) > TU_VERSION_MAX_LENGTH) + tu_version_info[TU_VERSION_MAX_LENGTH - 1] = 0; + return i2c_slave_register(client, i2c_slave_testunit_slave_cb); }; diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index db1b9057612a54..6d2f66810cdc57 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -119,4 +119,20 @@ config I2C_MUX_MLXCPLD This driver can also be built as a module. If so, the module will be called i2c-mux-mlxcpld. +config I2C_MUX_MULE + tristate "Theobroma Systems Mule I2C device multiplexer" + depends on OF && SENSORS_AMC6821 + help + Mule is an MCU that emulates a set of I2C devices, among which + devices that are reachable through an I2C-mux. The devices on the mux + can be selected by writing the appropriate device number to an I2C + configuration register. + + If you say yes to this option, support will be included for a + Theobroma Systems Mule I2C multiplexer. This driver provides access to + I2C devices connected on this mux. + + This driver can also be built as a module. If so, the module + will be called i2c-mux-mule. + endmenu diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile index 6d9d865e8518a2..4b24f49515a798 100644 --- a/drivers/i2c/muxes/Makefile +++ b/drivers/i2c/muxes/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o obj-$(CONFIG_I2C_MUX_GPMUX) += i2c-mux-gpmux.o obj-$(CONFIG_I2C_MUX_LTC4306) += i2c-mux-ltc4306.o obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o +obj-$(CONFIG_I2C_MUX_MULE) += i2c-mux-mule.o obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c new file mode 100644 index 00000000000000..8e942470b35f21 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-mule.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Theobroma Systems Mule I2C device multiplexer + * + * Copyright (C) 2024 Theobroma Systems Design und Consulting GmbH + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MULE_I2C_MUX_CONFIG_REG 0xff +#define MULE_I2C_MUX_DEFAULT_DEV 0x0 + +struct mule_i2c_reg_mux { + struct regmap *regmap; +}; + +static int mule_i2c_mux_select(struct i2c_mux_core *muxc, u32 dev) +{ + struct mule_i2c_reg_mux *mux = muxc->priv; + + return regmap_write(mux->regmap, MULE_I2C_MUX_CONFIG_REG, dev); +} + +static int mule_i2c_mux_deselect(struct i2c_mux_core *muxc, u32 dev) +{ + return mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV); +} + +static void mule_i2c_mux_remove(void *data) +{ + struct i2c_mux_core *muxc = data; + + i2c_mux_del_adapters(muxc); + + mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV); +} + +static int mule_i2c_mux_probe(struct platform_device *pdev) +{ + struct device *mux_dev = &pdev->dev; + struct mule_i2c_reg_mux *priv; + struct i2c_client *client; + struct i2c_mux_core *muxc; + struct device_node *dev; + unsigned int readback; + int ndev, ret; + bool old_fw; + + /* Count devices on the mux */ + ndev = of_get_child_count(mux_dev->of_node); + dev_dbg(mux_dev, "%d devices on the mux\n", ndev); + + client = to_i2c_client(mux_dev->parent); + + muxc = i2c_mux_alloc(client->adapter, mux_dev, ndev, sizeof(*priv), + I2C_MUX_LOCKED, mule_i2c_mux_select, mule_i2c_mux_deselect); + if (!muxc) + return -ENOMEM; + + priv = i2c_mux_priv(muxc); + + priv->regmap = dev_get_regmap(mux_dev->parent, NULL); + if (IS_ERR(priv->regmap)) + return dev_err_probe(mux_dev, PTR_ERR(priv->regmap), + "No parent i2c register map\n"); + + platform_set_drvdata(pdev, muxc); + + /* + * MULE_I2C_MUX_DEFAULT_DEV is guaranteed to exist on all old and new + * mule fw. Mule fw without mux support will accept write ops to the + * config register, but readback returns 0xff (register not updated). + */ + ret = mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV); + if (ret) + return dev_err_probe(mux_dev, ret, + "Failed to write config register\n"); + + ret = regmap_read(priv->regmap, MULE_I2C_MUX_CONFIG_REG, &readback); + if (ret) + return dev_err_probe(mux_dev, ret, + "Failed to read config register\n"); + + old_fw = (readback != MULE_I2C_MUX_DEFAULT_DEV); + + ret = devm_add_action_or_reset(mux_dev, mule_i2c_mux_remove, muxc); + if (ret) + return dev_err_probe(mux_dev, ret, + "Failed to register mux remove\n"); + + /* Create device adapters */ + for_each_child_of_node(mux_dev->of_node, dev) { + u32 reg; + + ret = of_property_read_u32(dev, "reg", ®); + if (ret) + return dev_err_probe(mux_dev, ret, + "No reg property found for %s\n", + of_node_full_name(dev)); + + if (old_fw && reg != 0) { + dev_warn(mux_dev, + "Mux is not supported, please update Mule FW\n"); + continue; + } + + ret = mule_i2c_mux_select(muxc, reg); + if (ret) { + dev_warn(mux_dev, + "Device %d not supported, please update Mule FW\n", reg); + continue; + } + + ret = i2c_mux_add_adapter(muxc, 0, reg); + if (ret) + return ret; + } + + mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV); + + return 0; +} + +static const struct of_device_id mule_i2c_mux_of_match[] = { + { .compatible = "tsd,mule-i2c-mux", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mule_i2c_mux_of_match); + +static struct platform_driver mule_i2c_mux_driver = { + .driver = { + .name = "mule-i2c-mux", + .of_match_table = mule_i2c_mux_of_match, + }, + .probe = mule_i2c_mux_probe, +}; + +module_platform_driver(mule_i2c_mux_driver); + +MODULE_AUTHOR("Farouk Bouabid "); +MODULE_DESCRIPTION("I2C mux driver for Theobroma Systems Mule"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 7028f03c2c42e2..6f3eb710a75d60 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1868,6 +1868,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) goto err_bus_cleanup; } + if (master->ops->set_speed) { + ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_SLOW_SPEED); + if (ret) + goto err_bus_cleanup; + } + /* * Reset all dynamic address that may have been assigned before * (assigned by the bootloader for example). @@ -1876,6 +1882,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) if (ret && ret != I3C_ERROR_M2) goto err_bus_cleanup; + if (master->ops->set_speed) { + master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED); + if (ret) + goto err_bus_cleanup; + } + /* Disable all slave events before starting DAA. */ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR, I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR | diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index c1627f3552ce3e..fe4d59833ad5f4 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -1562,6 +1562,7 @@ static const struct of_device_id cdns_i3c_master_of_ids[] = { { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids); static int cdns_i3c_master_probe(struct platform_device *pdev) { @@ -1666,6 +1667,7 @@ static void cdns_i3c_master_remove(struct platform_device *pdev) { struct cdns_i3c_master *master = platform_get_drvdata(pdev); + cancel_work_sync(&master->hj_work); i3c_master_unregister(&master->base); clk_disable_unprepare(master->sysclk); diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile index a658e7b8262c5d..1f8cd5c48fdef3 100644 --- a/drivers/i3c/master/mipi-i3c-hci/Makefile +++ b/drivers/i3c/master/mipi-i3c-hci/Makefile @@ -3,4 +3,5 @@ obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \ cmd_v1.o cmd_v2.o \ - dat_v1.o dct_v1.o + dat_v1.o dct_v1.o \ + hci_quirks.o diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c index 638b054d6c9275..dd636094b07f59 100644 --- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c +++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c @@ -123,17 +123,15 @@ static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci) { struct i3c_bus *bus = i3c_master_get_bus(&hci->master); - if (bus->scl_rate.i3c >= 12500000) - return MODE_I3C_SDR0; if (bus->scl_rate.i3c > 8000000) - return MODE_I3C_SDR1; + return MODE_I3C_SDR0; if (bus->scl_rate.i3c > 6000000) - return MODE_I3C_SDR2; + return MODE_I3C_SDR1; if (bus->scl_rate.i3c > 4000000) - return MODE_I3C_SDR3; + return MODE_I3C_SDR2; if (bus->scl_rate.i3c > 2000000) - return MODE_I3C_SDR4; - return MODE_I3C_Fm_FmP; + return MODE_I3C_SDR3; + return MODE_I3C_SDR4; } static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci) diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 4e7d6a43ee9b3b..a82c47c9986d35 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -27,11 +26,6 @@ * Host Controller Capabilities and Operation Registers */ -#define reg_read(r) readl(hci->base_regs + (r)) -#define reg_write(r, v) writel(v, hci->base_regs + (r)) -#define reg_set(r, v) reg_write(r, reg_read(r) | (v)) -#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v)) - #define HCI_VERSION 0x00 /* HCI Version (in BCD) */ #define HC_CONTROL 0x04 @@ -152,6 +146,10 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m) if (ret) return ret; + /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */ + if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD) + amd_set_resp_buf_thld(hci); + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL)); @@ -630,8 +628,8 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) static int i3c_hci_init(struct i3c_hci *hci) { + bool size_in_dwords, mode_selector; u32 regval, offset; - bool size_in_dwords; int ret; /* Validate HCI hardware version */ @@ -753,10 +751,17 @@ static int i3c_hci_init(struct i3c_hci *hci) return -EINVAL; } + mode_selector = hci->version_major > 1 || + (hci->version_major == 1 && hci->version_minor > 0); + + /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */ + if (hci->quirks & HCI_QUIRK_PIO_MODE) + hci->RHS_regs = NULL; + /* Try activating DMA operations first */ if (hci->RHS_regs) { reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE); - if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) { + if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) { dev_err(&hci->master.dev, "PIO mode is stuck\n"); ret = -EIO; } else { @@ -768,7 +773,7 @@ static int i3c_hci_init(struct i3c_hci *hci) /* If no DMA, try PIO */ if (!hci->io && hci->PIO_regs) { reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE); - if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) { + if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) { dev_err(&hci->master.dev, "DMA mode is stuck\n"); ret = -EIO; } else { @@ -784,6 +789,10 @@ static int i3c_hci_init(struct i3c_hci *hci) return ret; } + /* Configure OD and PP timings for AMD platforms */ + if (hci->quirks & HCI_QUIRK_OD_PP_TIMING) + amd_set_od_pp_timing(hci); + return 0; } @@ -803,6 +812,8 @@ static int i3c_hci_probe(struct platform_device *pdev) /* temporary for dev_printk's, to be replaced in i3c_master_register */ hci->master.dev.init_name = dev_name(&pdev->dev); + hci->quirks = (unsigned long)device_get_match_data(&pdev->dev); + ret = i3c_hci_init(hci); if (ret) return ret; @@ -834,12 +845,19 @@ static const __maybe_unused struct of_device_id i3c_hci_of_match[] = { }; MODULE_DEVICE_TABLE(of, i3c_hci_of_match); +static const struct acpi_device_id i3c_hci_acpi_match[] = { + { "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD }, + {} +}; +MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match); + static struct platform_driver i3c_hci_driver = { .probe = i3c_hci_probe, .remove_new = i3c_hci_remove, .driver = { .name = "mipi-i3c-hci", .of_match_table = of_match_ptr(i3c_hci_of_match), + .acpi_match_table = i3c_hci_acpi_match, }, }; module_platform_driver(i3c_hci_driver); diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h index f94d95e024becc..aaa47ac473814a 100644 --- a/drivers/i3c/master/mipi-i3c-hci/hci.h +++ b/drivers/i3c/master/mipi-i3c-hci/hci.h @@ -10,6 +10,7 @@ #ifndef HCI_H #define HCI_H +#include /* Handy logging macro to save on line length */ #define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__) @@ -26,6 +27,10 @@ #define W2_BIT_(x) BIT((x) - 64) #define W3_BIT_(x) BIT((x) - 96) +#define reg_read(r) readl(hci->base_regs + (r)) +#define reg_write(r, v) writel(v, hci->base_regs + (r)) +#define reg_set(r, v) reg_write(r, reg_read(r) | (v)) +#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v)) struct hci_cmd_ops; @@ -135,11 +140,16 @@ struct i3c_hci_dev_data { /* list of quirks */ #define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */ +#define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */ +#define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */ +#define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */ /* global functions */ void mipi_i3c_hci_resume(struct i3c_hci *hci); void mipi_i3c_hci_pio_reset(struct i3c_hci *hci); void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci); +void amd_set_od_pp_timing(struct i3c_hci *hci); +void amd_set_resp_buf_thld(struct i3c_hci *hci); #endif diff --git a/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c new file mode 100644 index 00000000000000..3b9c6e76c5366c --- /dev/null +++ b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * I3C HCI Quirks + * + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Authors: Shyam Sundar S K + * Guruvendra Punugupati + */ + +#include +#include "hci.h" + +/* Timing registers */ +#define HCI_SCL_I3C_OD_TIMING 0x214 +#define HCI_SCL_I3C_PP_TIMING 0x218 +#define HCI_SDA_HOLD_SWITCH_DLY_TIMING 0x230 + +/* Timing values to configure 9MHz frequency */ +#define AMD_SCL_I3C_OD_TIMING 0x00cf00cf +#define AMD_SCL_I3C_PP_TIMING 0x00160016 + +#define QUEUE_THLD_CTRL 0xD0 + +void amd_set_od_pp_timing(struct i3c_hci *hci) +{ + u32 data; + + reg_write(HCI_SCL_I3C_OD_TIMING, AMD_SCL_I3C_OD_TIMING); + reg_write(HCI_SCL_I3C_PP_TIMING, AMD_SCL_I3C_PP_TIMING); + data = reg_read(HCI_SDA_HOLD_SWITCH_DLY_TIMING); + /* Configure maximum TX hold time */ + data |= W0_MASK(18, 16); + reg_write(HCI_SDA_HOLD_SWITCH_DLY_TIMING, data); +} + +void amd_set_resp_buf_thld(struct i3c_hci *hci) +{ + u32 data; + + data = reg_read(QUEUE_THLD_CTRL); + data = data & ~W0_MASK(15, 8); + reg_write(QUEUE_THLD_CTRL, data); +} diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 0a68fd1b81d4e9..a7bfc678153e6c 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -127,6 +127,8 @@ /* This parameter depends on the implementation and may be tuned */ #define SVC_I3C_FIFO_SIZE 16 +#define SVC_I3C_PPBAUD_MAX 15 +#define SVC_I3C_QUICK_I2C_CLK 4170000 #define SVC_I3C_EVENT_IBI BIT(0) #define SVC_I3C_EVENT_HOTJOIN BIT(1) @@ -182,6 +184,7 @@ struct svc_i3c_regs_save { * @ibi.lock: IBI lock * @lock: Transfer lock, protect between IBI work thread and callbacks from master * @enabled_events: Bit masks for enable events (IBI, HotJoin). + * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back. */ struct svc_i3c_master { struct i3c_master_controller base; @@ -212,6 +215,7 @@ struct svc_i3c_master { } ibi; struct mutex lock; int enabled_events; + u32 mctrl_config; }; /** @@ -529,12 +533,61 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static int svc_i3c_master_set_speed(struct i3c_master_controller *m, + enum i3c_open_drain_speed speed) +{ + struct svc_i3c_master *master = to_svc_i3c_master(m); + struct i3c_bus *bus = i3c_master_get_bus(&master->base); + u32 ppbaud, odbaud, odhpp, mconfig; + unsigned long fclk_rate; + int ret; + + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return ret; + } + + switch (speed) { + case I3C_OPEN_DRAIN_SLOW_SPEED: + fclk_rate = clk_get_rate(master->fclk); + if (!fclk_rate) { + ret = -EINVAL; + goto rpm_out; + } + /* + * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first + * broadcast address is visible to all I2C/I3C devices on the I3C bus. + * I3C device working as a I2C device will turn off its 50ns Spike + * Filter to change to I3C mode. + */ + mconfig = master->mctrl_config; + ppbaud = FIELD_GET(GENMASK(11, 8), mconfig); + odhpp = 0; + odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1; + mconfig &= ~GENMASK(24, 16); + mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp); + writel(mconfig, master->regs + SVC_I3C_MCONFIG); + break; + case I3C_OPEN_DRAIN_NORMAL_SPEED: + writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG); + break; + } + +rpm_out: + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); + + return ret; +} + static int svc_i3c_master_bus_init(struct i3c_master_controller *m) { struct svc_i3c_master *master = to_svc_i3c_master(m); struct i3c_bus *bus = i3c_master_get_bus(m); struct i3c_device_info info = {}; unsigned long fclk_rate, fclk_period_ns; + unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate; unsigned int high_period_ns, od_low_period_ns; u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg; int ret; @@ -555,12 +608,15 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) } fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate); + i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c); + i2c_scl_rate = bus->scl_rate.i2c; + i3c_scl_rate = bus->scl_rate.i3c; /* * Using I3C Push-Pull mode, target is 12.5MHz/80ns period. * Simplest configuration is using a 50% duty-cycle of 40ns. */ - ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1; + ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1; pplow = 0; /* @@ -570,7 +626,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) */ odhpp = 1; high_period_ns = (ppbaud + 1) * fclk_period_ns; - odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1; + odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2; od_low_period_ns = (odbaud + 1) * high_period_ns; switch (bus->mode) { @@ -579,20 +635,27 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) odstop = 0; break; case I3C_BUS_MODE_MIXED_FAST: - case I3C_BUS_MODE_MIXED_LIMITED: /* * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference * between the high and low period does not really matter. */ - i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2; + i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2; odstop = 1; break; + case I3C_BUS_MODE_MIXED_LIMITED: case I3C_BUS_MODE_MIXED_SLOW: - /* - * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same - * constraints as the FM+ mode. - */ - i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2; + /* I3C PP + I3C OP + I2C OP both use i2c clk rate */ + if (ppbaud > SVC_I3C_PPBAUD_MAX) { + ppbaud = SVC_I3C_PPBAUD_MAX; + pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud); + } + + high_period_ns = (ppbaud + 1) * fclk_period_ns; + odhpp = 0; + odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1; + + od_low_period_ns = (odbaud + 1) * high_period_ns; + i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2; odstop = 1; break; default: @@ -611,6 +674,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) SVC_I3C_MCONFIG_I2CBAUD(i2cbaud); writel(reg, master->regs + SVC_I3C_MCONFIG); + master->mctrl_config = reg; /* Master core's registration */ ret = i3c_master_get_free_addr(m, 0); if (ret < 0) @@ -1645,6 +1709,7 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = { .disable_ibi = svc_i3c_master_disable_ibi, .enable_hotjoin = svc_i3c_master_enable_hotjoin, .disable_hotjoin = svc_i3c_master_disable_hotjoin, + .set_speed = svc_i3c_master_set_speed, }; static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) @@ -1775,6 +1840,7 @@ static void svc_i3c_master_remove(struct platform_device *pdev) { struct svc_i3c_master *master = platform_get_drvdata(pdev); + cancel_work_sync(&master->hj_work); i3c_master_unregister(&master->base); pm_runtime_dont_use_autosuspend(&pdev->dev); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 9aab7abc2ae90e..67aebfe0fed665 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -120,6 +120,12 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_INIT_XSTATE BIT(17) +/* + * Ignore the sub-state when matching mwait hints between the ACPI _CST and + * custom tables. + */ +#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -1022,6 +1028,47 @@ static struct cpuidle_state spr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state gnr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 170, + .target_residency = 650, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6P", + .desc = "MWAIT 0x21", + .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 210, + .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1315,7 +1362,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6S", .desc = "MWAIT 0x22", - .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 270, .target_residency = 700, .enter = &intel_idle, @@ -1323,7 +1371,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6SP", .desc = "MWAIT 0x23", - .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 310, .target_residency = 900, .enter = &intel_idle, @@ -1453,6 +1502,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_gnr __initconst = { + .state_table = gnr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1475,6 +1530,10 @@ static const struct idle_cpu idle_cpu_dnv __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_tmt __initconst = { + .disable_promotion_to_c1e = true, +}; + static const struct idle_cpu idle_cpu_snr __initconst = { .state_table = snr_cstates, .disable_promotion_to_c1e = true, @@ -1533,11 +1592,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr), X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv), + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &idle_cpu_tmt), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &idle_cpu_tmt), X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf), @@ -1692,7 +1754,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) } } -static bool __init intel_idle_off_by_default(u32 mwait_hint) +static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) { int cstate, limit; @@ -1709,7 +1771,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) * the interesting states are ACPI_CSTATE_FFH. */ for (cstate = 1; cstate < limit; cstate++) { - if (acpi_state_table.states[cstate].address == mwait_hint) + u32 acpi_hint = acpi_state_table.states[cstate].address; + u32 table_hint = mwait_hint; + + if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { + acpi_hint &= ~MWAIT_SUBSTATE_MASK; + table_hint &= ~MWAIT_SUBSTATE_MASK; + } + + if (acpi_hint == table_hint) return false; } return true; @@ -1719,7 +1789,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) static inline bool intel_idle_acpi_cst_extract(void) { return false; } static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } -static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) +{ + return false; +} #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ /** @@ -2046,7 +2119,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && - intel_idle_off_by_default(mwait_hint) && + intel_idle_off_by_default(state->flags, mwait_hint) && !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) state->flags |= CPUIDLE_FLAG_OFF; @@ -2075,7 +2148,7 @@ static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) drv->state_count = 1; - if (icpu) + if (icpu && icpu->state_table) intel_idle_init_cstates_icpu(drv); else intel_idle_init_cstates_acpi(drv); @@ -2209,7 +2282,11 @@ static int __init intel_idle_init(void) icpu = (const struct idle_cpu *)id->driver_data; if (icpu) { - cpuidle_state_table = icpu->state_table; + if (icpu->state_table) + cpuidle_state_table = icpu->state_table; + else if (!intel_idle_acpi_cst_extract()) + return -ENODEV; + auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; if (icpu->disable_promotion_to_c1e) c1e_promotion = C1E_PROMOTION_DISABLE; diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 80b57d3ee3a726..516c1a8e4d566f 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -177,6 +177,33 @@ config ADXL372_I2C To compile this driver as a module, choose M here: the module will be called adxl372_i2c. +config ADXL380 + tristate + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + +config ADXL380_SPI + tristate "Analog Devices ADXL380 3-Axis Accelerometer SPI Driver" + depends on SPI + select ADXL380 + select REGMAP_SPI + help + Say yes here to add support for the Analog Devices ADXL380 triaxial + acceleration sensor. + To compile this driver as a module, choose M here: the + module will be called adxl380_spi. + +config ADXL380_I2C + tristate "Analog Devices ADXL380 3-Axis Accelerometer I2C Driver" + depends on I2C + select ADXL380 + select REGMAP_I2C + help + Say yes here to add support for the Analog Devices ADXL380 triaxial + acceleration sensor. + To compile this driver as a module, choose M here: the + module will be called adxl380_i2c. + config BMA180 tristate "Bosch BMA023/BMA1x0/BMA250 3-Axis Accelerometer Driver" depends on I2C && INPUT_BMA150=n diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile index db90532ba24aab..ca8569e25aba31 100644 --- a/drivers/iio/accel/Makefile +++ b/drivers/iio/accel/Makefile @@ -21,6 +21,9 @@ obj-$(CONFIG_ADXL367_SPI) += adxl367_spi.o obj-$(CONFIG_ADXL372) += adxl372.o obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o +obj-$(CONFIG_ADXL380) += adxl380.o +obj-$(CONFIG_ADXL380_I2C) += adxl380_i2c.o +obj-$(CONFIG_ADXL380_SPI) += adxl380_spi.o obj-$(CONFIG_BMA180) += bma180.o obj-$(CONFIG_BMA220) += bma220_spi.o obj-$(CONFIG_BMA400) += bma400_core.o diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index 0c9225d18fb29b..eabaefa92f19d1 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include "adxl355.h" diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 5cf4828a5eb5eb..e790a66d86c79f 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "adxl367.h" @@ -1220,7 +1220,7 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev, return ret; st->fifo_set_size = bitmap_weight(active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); return 0; } diff --git a/drivers/iio/accel/adxl367_spi.c b/drivers/iio/accel/adxl367_spi.c index 118c894015a578..b7011726579151 100644 --- a/drivers/iio/accel/adxl367_spi.c +++ b/drivers/iio/accel/adxl367_spi.c @@ -72,7 +72,7 @@ static int adxl367_write(void *context, const void *val_buf, size_t val_size) return spi_sync(st->spi, &st->reg_write_msg); } -static struct regmap_bus adxl367_spi_regmap_bus = { +static const struct regmap_bus adxl367_spi_regmap_bus = { .read = adxl367_read, .write = adxl367_write, }; diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index c4193286eb0538..ef8dd557877bd4 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -1050,7 +1050,7 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; st->fifo_axis_mask = adxl372_axis_lookup_table[i].bits; st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); /* Configure the FIFO to store sets of impact event peak. */ if (st->peak_fifo_mode_en) { diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c new file mode 100644 index 00000000000000..f80527d899be4d --- /dev/null +++ b/drivers/iio/accel/adxl380.c @@ -0,0 +1,1905 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ADXL380 3-Axis Digital Accelerometer core driver + * + * Copyright 2024 Analog Devices Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include "adxl380.h" + +#define ADXL380_ID_VAL 380 +#define ADXL382_ID_VAL 382 + +#define ADXL380_DEVID_AD_REG 0x00 +#define ADLX380_PART_ID_REG 0x02 + +#define ADXL380_X_DATA_H_REG 0x15 +#define ADXL380_Y_DATA_H_REG 0x17 +#define ADXL380_Z_DATA_H_REG 0x19 +#define ADXL380_T_DATA_H_REG 0x1B + +#define ADXL380_MISC_0_REG 0x20 +#define ADXL380_XL382_MSK BIT(7) + +#define ADXL380_MISC_1_REG 0x21 + +#define ADXL380_X_DSM_OFFSET_REG 0x4D + +#define ADXL380_ACT_INACT_CTL_REG 0x37 +#define ADXL380_INACT_EN_MSK BIT(2) +#define ADXL380_ACT_EN_MSK BIT(0) + +#define ADXL380_SNSR_AXIS_EN_REG 0x38 +#define ADXL380_ACT_INACT_AXIS_EN_MSK GENMASK(2, 0) + +#define ADXL380_THRESH_ACT_H_REG 0x39 +#define ADXL380_TIME_ACT_H_REG 0x3B +#define ADXL380_THRESH_INACT_H_REG 0x3E +#define ADXL380_TIME_INACT_H_REG 0x40 +#define ADXL380_THRESH_MAX GENMASK(12, 0) +#define ADXL380_TIME_MAX GENMASK(24, 0) + +#define ADXL380_FIFO_CONFIG_0_REG 0x30 +#define ADXL380_FIFO_SAMPLES_8_MSK BIT(0) +#define ADXL380_FIFO_MODE_MSK GENMASK(5, 4) + +#define ADXL380_FIFO_DISABLED 0 +#define ADXL380_FIFO_NORMAL 1 +#define ADXL380_FIFO_STREAMED 2 +#define ADXL380_FIFO_TRIGGERED 3 + +#define ADXL380_FIFO_CONFIG_1_REG 0x31 +#define ADXL380_FIFO_STATUS_0_REG 0x1E + +#define ADXL380_TAP_THRESH_REG 0x43 +#define ADXL380_TAP_DUR_REG 0x44 +#define ADXL380_TAP_LATENT_REG 0x45 +#define ADXL380_TAP_WINDOW_REG 0x46 +#define ADXL380_TAP_TIME_MAX GENMASK(7, 0) + +#define ADXL380_TAP_CFG_REG 0x47 +#define ADXL380_TAP_AXIS_MSK GENMASK(1, 0) + +#define ADXL380_TRIG_CFG_REG 0x49 +#define ADXL380_TRIG_CFG_DEC_2X_MSK BIT(7) +#define ADXL380_TRIG_CFG_SINC_RATE_MSK BIT(6) + +#define ADXL380_FILTER_REG 0x50 +#define ADXL380_FILTER_EQ_FILT_MSK BIT(6) +#define ADXL380_FILTER_LPF_MODE_MSK GENMASK(5, 4) +#define ADXL380_FILTER_HPF_PATH_MSK BIT(3) +#define ADXL380_FILTER_HPF_CORNER_MSK GENMASK(2, 0) + +#define ADXL380_OP_MODE_REG 0x26 +#define ADXL380_OP_MODE_RANGE_MSK GENMASK(7, 6) +#define ADXL380_OP_MODE_MSK GENMASK(3, 0) +#define ADXL380_OP_MODE_STANDBY 0 +#define ADXL380_OP_MODE_HEART_SOUND 1 +#define ADXL380_OP_MODE_ULP 2 +#define ADXL380_OP_MODE_VLP 3 +#define ADXL380_OP_MODE_LP 4 +#define ADXL380_OP_MODE_LP_ULP 6 +#define ADXL380_OP_MODE_LP_VLP 7 +#define ADXL380_OP_MODE_RBW 8 +#define ADXL380_OP_MODE_RBW_ULP 10 +#define ADXL380_OP_MODE_RBW_VLP 11 +#define ADXL380_OP_MODE_HP 12 +#define ADXL380_OP_MODE_HP_ULP 14 +#define ADXL380_OP_MODE_HP_VLP 15 + +#define ADXL380_OP_MODE_4G_RANGE 0 +#define ADXL382_OP_MODE_15G_RANGE 0 +#define ADXL380_OP_MODE_8G_RANGE 1 +#define ADXL382_OP_MODE_30G_RANGE 1 +#define ADXL380_OP_MODE_16G_RANGE 2 +#define ADXL382_OP_MODE_60G_RANGE 2 + +#define ADXL380_DIG_EN_REG 0x27 +#define ADXL380_CHAN_EN_MSK(chan) BIT(4 + (chan)) +#define ADXL380_FIFO_EN_MSK BIT(3) + +#define ADXL380_INT0_MAP0_REG 0x2B +#define ADXL380_INT1_MAP0_REG 0x2D +#define ADXL380_INT_MAP0_INACT_INT0_MSK BIT(6) +#define ADXL380_INT_MAP0_ACT_INT0_MSK BIT(5) +#define ADXL380_INT_MAP0_FIFO_WM_INT0_MSK BIT(3) + +#define ADXL380_INT0_MAP1_REG 0x2C +#define ADXL380_INT1_MAP1_REG 0x2E +#define ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK BIT(1) +#define ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK BIT(0) + +#define ADXL380_INT0_REG 0x5D +#define ADXL380_INT0_POL_MSK BIT(7) + +#define ADXL380_RESET_REG 0x2A +#define ADXL380_FIFO_DATA 0x1D + +#define ADXL380_DEVID_AD_VAL 0xAD +#define ADXL380_RESET_CODE 0x52 + +#define ADXL380_STATUS_0_REG 0x11 +#define ADXL380_STATUS_0_FIFO_FULL_MSK BIT(1) +#define ADXL380_STATUS_0_FIFO_WM_MSK BIT(3) + +#define ADXL380_STATUS_1_INACT_MSK BIT(6) +#define ADXL380_STATUS_1_ACT_MSK BIT(5) +#define ADXL380_STATUS_1_DOUBLE_TAP_MSK BIT(1) +#define ADXL380_STATUS_1_SINGLE_TAP_MSK BIT(0) + +#define ADXL380_FIFO_SAMPLES 315UL + +enum adxl380_channels { + ADXL380_ACCEL_X, + ADXL380_ACCEL_Y, + ADXL380_ACCEL_Z, + ADXL380_TEMP, + ADXL380_CH_NUM +}; + +enum adxl380_axis { + ADXL380_X_AXIS, + ADXL380_Y_AXIS, + ADXL380_Z_AXIS, +}; + +enum adxl380_activity_type { + ADXL380_ACTIVITY, + ADXL380_INACTIVITY, +}; + +enum adxl380_tap_type { + ADXL380_SINGLE_TAP, + ADXL380_DOUBLE_TAP, +}; + +enum adxl380_tap_time_type { + ADXL380_TAP_TIME_LATENT, + ADXL380_TAP_TIME_WINDOW, +}; + +static const int adxl380_range_scale_factor_tbl[] = { 1, 2, 4 }; + +const struct adxl380_chip_info adxl380_chip_info = { + .name = "adxl380", + .chip_id = ADXL380_ID_VAL, + .scale_tbl = { + [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 }, + [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 }, + [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 }, + }, + .samp_freq_tbl = { 8000, 16000, 32000 }, + /* + * The datasheet defines an intercept of 470 LSB at 25 degC + * and a sensitivity of 10.2 LSB/C. + */ + .temp_offset = 25 * 102 / 10 - 470, + +}; +EXPORT_SYMBOL_NS_GPL(adxl380_chip_info, IIO_ADXL380); + +const struct adxl380_chip_info adxl382_chip_info = { + .name = "adxl382", + .chip_id = ADXL382_ID_VAL, + .scale_tbl = { + [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 }, + [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 }, + [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 }, + }, + .samp_freq_tbl = { 16000, 32000, 64000 }, + /* + * The datasheet defines an intercept of 570 LSB at 25 degC + * and a sensitivity of 10.2 LSB/C. + */ + .temp_offset = 25 * 102 / 10 - 570, +}; +EXPORT_SYMBOL_NS_GPL(adxl382_chip_info, IIO_ADXL380); + +static const unsigned int adxl380_th_reg_high_addr[2] = { + [ADXL380_ACTIVITY] = ADXL380_THRESH_ACT_H_REG, + [ADXL380_INACTIVITY] = ADXL380_THRESH_INACT_H_REG, +}; + +static const unsigned int adxl380_time_reg_high_addr[2] = { + [ADXL380_ACTIVITY] = ADXL380_TIME_ACT_H_REG, + [ADXL380_INACTIVITY] = ADXL380_TIME_INACT_H_REG, +}; + +static const unsigned int adxl380_tap_time_reg[2] = { + [ADXL380_TAP_TIME_LATENT] = ADXL380_TAP_LATENT_REG, + [ADXL380_TAP_TIME_WINDOW] = ADXL380_TAP_WINDOW_REG, +}; + +struct adxl380_state { + struct regmap *regmap; + struct device *dev; + const struct adxl380_chip_info *chip_info; + /* + * Synchronize access to members of driver state, and ensure atomicity + * of consecutive regmap operations. + */ + struct mutex lock; + enum adxl380_axis tap_axis_en; + u8 range; + u8 odr; + u8 fifo_set_size; + u8 transf_buf[3]; + u16 watermark; + u32 act_time_ms; + u32 act_threshold; + u32 inact_time_ms; + u32 inact_threshold; + u32 tap_latent_us; + u32 tap_window_us; + u32 tap_duration_us; + u32 tap_threshold; + int irq; + int int_map[2]; + int lpf_tbl[4]; + int hpf_tbl[7][2]; + + __be16 fifo_buf[ADXL380_FIFO_SAMPLES] __aligned(IIO_DMA_MINALIGN); +}; + +bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg) +{ + return reg == ADXL380_FIFO_DATA; +} +EXPORT_SYMBOL_NS_GPL(adxl380_readable_noinc_reg, IIO_ADXL380); + +static int adxl380_set_measure_en(struct adxl380_state *st, bool en) +{ + int ret; + unsigned int act_inact_ctl; + u8 op_mode = ADXL380_OP_MODE_STANDBY; + + if (en) { + ret = regmap_read(st->regmap, ADXL380_ACT_INACT_CTL_REG, &act_inact_ctl); + if (ret) + return ret; + + /* Activity/ Inactivity detection available only in VLP/ULP mode */ + if (FIELD_GET(ADXL380_ACT_EN_MSK, act_inact_ctl) || + FIELD_GET(ADXL380_INACT_EN_MSK, act_inact_ctl)) + op_mode = ADXL380_OP_MODE_VLP; + else + op_mode = ADXL380_OP_MODE_HP; + } + + return regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG, + ADXL380_OP_MODE_MSK, + FIELD_PREP(ADXL380_OP_MODE_MSK, op_mode)); +} + +static void adxl380_scale_act_inact_thresholds(struct adxl380_state *st, + u8 old_range, + u8 new_range) +{ + st->act_threshold = mult_frac(st->act_threshold, + adxl380_range_scale_factor_tbl[old_range], + adxl380_range_scale_factor_tbl[new_range]); + st->inact_threshold = mult_frac(st->inact_threshold, + adxl380_range_scale_factor_tbl[old_range], + adxl380_range_scale_factor_tbl[new_range]); +} + +static int adxl380_write_act_inact_threshold(struct adxl380_state *st, + enum adxl380_activity_type act, + unsigned int th) +{ + int ret; + u8 reg = adxl380_th_reg_high_addr[act]; + + if (th > ADXL380_THRESH_MAX) + return -EINVAL; + + ret = regmap_write(st->regmap, reg + 1, th & GENMASK(7, 0)); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, reg, GENMASK(2, 0), th >> 8); + if (ret) + return ret; + + if (act == ADXL380_ACTIVITY) + st->act_threshold = th; + else + st->inact_threshold = th; + + return 0; +} + +static int adxl380_set_act_inact_threshold(struct iio_dev *indio_dev, + enum adxl380_activity_type act, + u16 th) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = adxl380_write_act_inact_threshold(st, act, th); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_set_tap_threshold_value(struct iio_dev *indio_dev, u8 th) +{ + int ret; + struct adxl380_state *st = iio_priv(indio_dev); + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_write(st->regmap, ADXL380_TAP_THRESH_REG, th); + if (ret) + return ret; + + st->tap_threshold = th; + + return adxl380_set_measure_en(st, true); +} + +static int _adxl380_write_tap_time_us(struct adxl380_state *st, + enum adxl380_tap_time_type tap_time_type, + u32 us) +{ + u8 reg = adxl380_tap_time_reg[tap_time_type]; + unsigned int reg_val; + int ret; + + /* scale factor for tap window is 1250us / LSB */ + reg_val = DIV_ROUND_CLOSEST(us, 1250); + if (reg_val > ADXL380_TAP_TIME_MAX) + reg_val = ADXL380_TAP_TIME_MAX; + + ret = regmap_write(st->regmap, reg, reg_val); + if (ret) + return ret; + + if (tap_time_type == ADXL380_TAP_TIME_WINDOW) + st->tap_window_us = us; + else + st->tap_latent_us = us; + + return 0; +} + +static int adxl380_write_tap_time_us(struct adxl380_state *st, + enum adxl380_tap_time_type tap_time_type, u32 us) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = _adxl380_write_tap_time_us(st, tap_time_type, us); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_write_tap_dur_us(struct iio_dev *indio_dev, u32 us) +{ + int ret; + unsigned int reg_val; + struct adxl380_state *st = iio_priv(indio_dev); + + /* 625us per code is the scale factor of TAP_DUR register */ + reg_val = DIV_ROUND_CLOSEST(us, 625); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_write(st->regmap, ADXL380_TAP_DUR_REG, reg_val); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_read_chn(struct adxl380_state *st, u8 addr) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = regmap_bulk_read(st->regmap, addr, &st->transf_buf, 2); + if (ret) + return ret; + + return get_unaligned_be16(st->transf_buf); +} + +static int adxl380_get_odr(struct adxl380_state *st, int *odr) +{ + int ret; + unsigned int trig_cfg, odr_idx; + + ret = regmap_read(st->regmap, ADXL380_TRIG_CFG_REG, &trig_cfg); + if (ret) + return ret; + + odr_idx = (FIELD_GET(ADXL380_TRIG_CFG_SINC_RATE_MSK, trig_cfg) << 1) | + (FIELD_GET(ADXL380_TRIG_CFG_DEC_2X_MSK, trig_cfg) & 1); + + *odr = st->chip_info->samp_freq_tbl[odr_idx]; + + return 0; +} + +static const int adxl380_lpf_div[] = { + 1, 4, 8, 16, +}; + +static int adxl380_fill_lpf_tbl(struct adxl380_state *st) +{ + int ret, i; + int odr; + + ret = adxl380_get_odr(st, &odr); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(st->lpf_tbl); i++) + st->lpf_tbl[i] = DIV_ROUND_CLOSEST(odr, adxl380_lpf_div[i]); + + return 0; +} + +static const int adxl380_hpf_mul[] = { + 0, 247000, 62084, 15545, 3862, 954, 238, +}; + +static int adxl380_fill_hpf_tbl(struct adxl380_state *st) +{ + int i, ret, odr_hz; + u32 multiplier; + u64 div, rem, odr; + + ret = adxl380_get_odr(st, &odr_hz); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(adxl380_hpf_mul); i++) { + odr = mul_u64_u32_shr(odr_hz, MEGA, 0); + multiplier = adxl380_hpf_mul[i]; + div = div64_u64_rem(mul_u64_u32_shr(odr, multiplier, 0), + TERA * 100, &rem); + + st->hpf_tbl[i][0] = div; + st->hpf_tbl[i][1] = div_u64(rem, MEGA * 100); + } + + return 0; +} + +static int adxl380_set_odr(struct adxl380_state *st, u8 odr) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG, + ADXL380_TRIG_CFG_DEC_2X_MSK, + FIELD_PREP(ADXL380_TRIG_CFG_DEC_2X_MSK, odr & 1)); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG, + ADXL380_TRIG_CFG_SINC_RATE_MSK, + FIELD_PREP(ADXL380_TRIG_CFG_SINC_RATE_MSK, odr >> 1)); + if (ret) + return ret; + + ret = adxl380_set_measure_en(st, true); + if (ret) + return ret; + + ret = adxl380_fill_lpf_tbl(st); + if (ret) + return ret; + + return adxl380_fill_hpf_tbl(st); +} + +static int adxl380_find_match_1d_tbl(const int *array, unsigned int size, + int val) +{ + int i; + + for (i = 0; i < size; i++) { + if (val == array[i]) + return i; + } + + return size - 1; +} + +static int adxl380_find_match_2d_tbl(const int (*freq_tbl)[2], int n, int val, int val2) +{ + int i; + + for (i = 0; i < n; i++) { + if (freq_tbl[i][0] == val && freq_tbl[i][1] == val2) + return i; + } + + return -EINVAL; +} + +static int adxl380_get_lpf(struct adxl380_state *st, int *lpf) +{ + int ret; + unsigned int trig_cfg, lpf_idx; + + guard(mutex)(&st->lock); + + ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg); + if (ret) + return ret; + + lpf_idx = FIELD_GET(ADXL380_FILTER_LPF_MODE_MSK, trig_cfg); + + *lpf = st->lpf_tbl[lpf_idx]; + + return 0; +} + +static int adxl380_set_lpf(struct adxl380_state *st, u8 lpf) +{ + int ret; + u8 eq_bypass = 0; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + if (lpf) + eq_bypass = 1; + + ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG, + ADXL380_FILTER_EQ_FILT_MSK, + FIELD_PREP(ADXL380_FILTER_EQ_FILT_MSK, eq_bypass)); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG, + ADXL380_FILTER_LPF_MODE_MSK, + FIELD_PREP(ADXL380_FILTER_LPF_MODE_MSK, lpf)); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_get_hpf(struct adxl380_state *st, int *hpf_int, int *hpf_frac) +{ + int ret; + unsigned int trig_cfg, hpf_idx; + + guard(mutex)(&st->lock); + + ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg); + if (ret) + return ret; + + hpf_idx = FIELD_GET(ADXL380_FILTER_HPF_CORNER_MSK, trig_cfg); + + *hpf_int = st->hpf_tbl[hpf_idx][0]; + *hpf_frac = st->hpf_tbl[hpf_idx][1]; + + return 0; +} + +static int adxl380_set_hpf(struct adxl380_state *st, u8 hpf) +{ + int ret; + u8 hpf_path = 0; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + if (hpf) + hpf_path = 1; + + ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG, + ADXL380_FILTER_HPF_PATH_MSK, + FIELD_PREP(ADXL380_FILTER_HPF_PATH_MSK, hpf_path)); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG, + ADXL380_FILTER_HPF_CORNER_MSK, + FIELD_PREP(ADXL380_FILTER_HPF_CORNER_MSK, hpf)); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int _adxl380_set_act_inact_time_ms(struct adxl380_state *st, + enum adxl380_activity_type act, + u32 ms) +{ + u8 reg = adxl380_time_reg_high_addr[act]; + unsigned int reg_val; + int ret; + + /* 500us per code is the scale factor of TIME_ACT / TIME_INACT registers */ + reg_val = min(DIV_ROUND_CLOSEST(ms * 1000, 500), ADXL380_TIME_MAX); + + put_unaligned_be24(reg_val, &st->transf_buf[0]); + + ret = regmap_bulk_write(st->regmap, reg, st->transf_buf, sizeof(st->transf_buf)); + if (ret) + return ret; + + if (act == ADXL380_ACTIVITY) + st->act_time_ms = ms; + else + st->inact_time_ms = ms; + + return 0; +} + +static int adxl380_set_act_inact_time_ms(struct adxl380_state *st, + enum adxl380_activity_type act, + u32 ms) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = _adxl380_set_act_inact_time_ms(st, act, ms); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_set_range(struct adxl380_state *st, u8 range) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG, + ADXL380_OP_MODE_RANGE_MSK, + FIELD_PREP(ADXL380_OP_MODE_RANGE_MSK, range)); + + if (ret) + return ret; + + adxl380_scale_act_inact_thresholds(st, st->range, range); + + /* Activity thresholds depend on range */ + ret = adxl380_write_act_inact_threshold(st, ADXL380_ACTIVITY, + st->act_threshold); + if (ret) + return ret; + + ret = adxl380_write_act_inact_threshold(st, ADXL380_INACTIVITY, + st->inact_threshold); + if (ret) + return ret; + + st->range = range; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_write_act_inact_en(struct adxl380_state *st, + enum adxl380_activity_type type, + bool en) +{ + if (type == ADXL380_ACTIVITY) + return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG, + ADXL380_ACT_EN_MSK, + FIELD_PREP(ADXL380_ACT_EN_MSK, en)); + + return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG, + ADXL380_INACT_EN_MSK, + FIELD_PREP(ADXL380_INACT_EN_MSK, en)); +} + +static int adxl380_read_act_inact_int(struct adxl380_state *st, + enum adxl380_activity_type type, + bool *en) +{ + int ret; + unsigned int reg_val; + + guard(mutex)(&st->lock); + + ret = regmap_read(st->regmap, st->int_map[0], ®_val); + if (ret) + return ret; + + if (type == ADXL380_ACTIVITY) + *en = FIELD_GET(ADXL380_INT_MAP0_ACT_INT0_MSK, reg_val); + else + *en = FIELD_GET(ADXL380_INT_MAP0_INACT_INT0_MSK, reg_val); + + return 0; +} + +static int adxl380_write_act_inact_int(struct adxl380_state *st, + enum adxl380_activity_type act, + bool en) +{ + if (act == ADXL380_ACTIVITY) + return regmap_update_bits(st->regmap, st->int_map[0], + ADXL380_INT_MAP0_ACT_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP0_ACT_INT0_MSK, en)); + + return regmap_update_bits(st->regmap, st->int_map[0], + ADXL380_INT_MAP0_INACT_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP0_INACT_INT0_MSK, en)); +} + +static int adxl380_act_inact_config(struct adxl380_state *st, + enum adxl380_activity_type type, + bool en) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = adxl380_write_act_inact_en(st, type, en); + if (ret) + return ret; + + ret = adxl380_write_act_inact_int(st, type, en); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_write_tap_axis(struct adxl380_state *st, + enum adxl380_axis axis) +{ + int ret; + + ret = regmap_update_bits(st->regmap, ADXL380_TAP_CFG_REG, + ADXL380_TAP_AXIS_MSK, + FIELD_PREP(ADXL380_TAP_AXIS_MSK, axis)); + + if (ret) + return ret; + + st->tap_axis_en = axis; + + return 0; +} + +static int adxl380_read_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool *en) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(st->regmap, st->int_map[1], ®_val); + if (ret) + return ret; + + if (type == ADXL380_SINGLE_TAP) + *en = FIELD_GET(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, reg_val); + else + *en = FIELD_GET(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, reg_val); + + return 0; +} + +static int adxl380_write_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool en) +{ + if (type == ADXL380_SINGLE_TAP) + return regmap_update_bits(st->regmap, st->int_map[1], + ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, en)); + + return regmap_update_bits(st->regmap, st->int_map[1], + ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, en)); +} + +static int adxl380_tap_config(struct adxl380_state *st, + enum adxl380_axis axis, + enum adxl380_tap_type type, + bool en) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = adxl380_write_tap_axis(st, axis); + if (ret) + return ret; + + ret = adxl380_write_tap_int(st, type, en); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_set_fifo_samples(struct adxl380_state *st) +{ + int ret; + u16 fifo_samples = st->watermark * st->fifo_set_size; + + ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG, + ADXL380_FIFO_SAMPLES_8_MSK, + FIELD_PREP(ADXL380_FIFO_SAMPLES_8_MSK, + (fifo_samples & BIT(8)))); + if (ret) + return ret; + + return regmap_write(st->regmap, ADXL380_FIFO_CONFIG_1_REG, + fifo_samples & 0xFF); +} + +static int adxl380_get_status(struct adxl380_state *st, u8 *status0, u8 *status1) +{ + int ret; + + /* STATUS0, STATUS1 are adjacent regs */ + ret = regmap_bulk_read(st->regmap, ADXL380_STATUS_0_REG, + &st->transf_buf, 2); + if (ret) + return ret; + + *status0 = st->transf_buf[0]; + *status1 = st->transf_buf[1]; + + return 0; +} + +static int adxl380_get_fifo_entries(struct adxl380_state *st, u16 *fifo_entries) +{ + int ret; + + ret = regmap_bulk_read(st->regmap, ADXL380_FIFO_STATUS_0_REG, + &st->transf_buf, 2); + if (ret) + return ret; + + *fifo_entries = st->transf_buf[0] | ((BIT(0) & st->transf_buf[1]) << 8); + + return 0; +} + +static void adxl380_push_event(struct iio_dev *indio_dev, s64 timestamp, + u8 status1) +{ + if (FIELD_GET(ADXL380_STATUS_1_ACT_MSK, status1)) + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + timestamp); + + if (FIELD_GET(ADXL380_STATUS_1_INACT_MSK, status1)) + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + timestamp); + if (FIELD_GET(ADXL380_STATUS_1_SINGLE_TAP_MSK, status1)) + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_GESTURE, IIO_EV_DIR_SINGLETAP), + timestamp); + + if (FIELD_GET(ADXL380_STATUS_1_DOUBLE_TAP_MSK, status1)) + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_GESTURE, IIO_EV_DIR_DOUBLETAP), + timestamp); +} + +static irqreturn_t adxl380_irq_handler(int irq, void *p) +{ + struct iio_dev *indio_dev = p; + struct adxl380_state *st = iio_priv(indio_dev); + u8 status0, status1; + u16 fifo_entries; + int i; + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_get_status(st, &status0, &status1); + if (ret) + return IRQ_HANDLED; + + adxl380_push_event(indio_dev, iio_get_time_ns(indio_dev), status1); + + if (!FIELD_GET(ADXL380_STATUS_0_FIFO_WM_MSK, status0)) + return IRQ_HANDLED; + + ret = adxl380_get_fifo_entries(st, &fifo_entries); + if (ret) + return IRQ_HANDLED; + + for (i = 0; i < fifo_entries; i += st->fifo_set_size) { + ret = regmap_noinc_read(st->regmap, ADXL380_FIFO_DATA, + &st->fifo_buf[i], + 2 * st->fifo_set_size); + if (ret) + return IRQ_HANDLED; + iio_push_to_buffers(indio_dev, &st->fifo_buf[i]); + } + + return IRQ_HANDLED; +} + +static int adxl380_write_calibbias_value(struct adxl380_state *st, + unsigned long chan_addr, + s8 calibbias) +{ + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_write(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, calibbias); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_read_calibbias_value(struct adxl380_state *st, + unsigned long chan_addr, + int *calibbias) +{ + int ret; + unsigned int reg_val; + + guard(mutex)(&st->lock); + + ret = regmap_read(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, ®_val); + if (ret) + return ret; + + *calibbias = sign_extend32(reg_val, 7); + + return 0; +} + +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "1\n"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%lu\n", ADXL380_FIFO_SAMPLES); +} + +static ssize_t adxl380_get_fifo_watermark(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct adxl380_state *st = iio_priv(indio_dev); + + return sysfs_emit(buf, "%d\n", st->watermark); +} + +static ssize_t adxl380_get_fifo_enabled(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct adxl380_state *st = iio_priv(indio_dev); + int ret; + unsigned int reg_val; + + ret = regmap_read(st->regmap, ADXL380_DIG_EN_REG, ®_val); + if (ret) + return ret; + + return sysfs_emit(buf, "%lu\n", + FIELD_GET(ADXL380_FIFO_EN_MSK, reg_val)); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); +static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, + adxl380_get_fifo_watermark, NULL, 0); +static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, + adxl380_get_fifo_enabled, NULL, 0); + +static const struct iio_dev_attr *adxl380_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark_min, + &iio_dev_attr_hwfifo_watermark_max, + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, + NULL +}; + +static int adxl380_buffer_postenable(struct iio_dev *indio_dev) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int i; + int ret; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, + st->int_map[0], + ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 1)); + if (ret) + return ret; + + for_each_clear_bit(i, indio_dev->active_scan_mask, ADXL380_CH_NUM) { + ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, + ADXL380_CHAN_EN_MSK(i), + 0 << (4 + i)); + if (ret) + return ret; + } + + st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, + iio_get_masklength(indio_dev)); + + if ((st->watermark * st->fifo_set_size) > ADXL380_FIFO_SAMPLES) + st->watermark = (ADXL380_FIFO_SAMPLES / st->fifo_set_size); + + ret = adxl380_set_fifo_samples(st); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK, + FIELD_PREP(ADXL380_FIFO_EN_MSK, 1)); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static int adxl380_buffer_predisable(struct iio_dev *indio_dev) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int ret, i; + + guard(mutex)(&st->lock); + + ret = adxl380_set_measure_en(st, false); + if (ret) + return ret; + + ret = regmap_update_bits(st->regmap, + st->int_map[0], + ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, + FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 0)); + if (ret) + return ret; + + for (i = 0; i < indio_dev->num_channels; i++) { + ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, + ADXL380_CHAN_EN_MSK(i), + 1 << (4 + i)); + if (ret) + return ret; + } + + ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK, + FIELD_PREP(ADXL380_FIFO_EN_MSK, 0)); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +static const struct iio_buffer_setup_ops adxl380_buffer_ops = { + .postenable = adxl380_buffer_postenable, + .predisable = adxl380_buffer_predisable, +}; + +static int adxl380_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = adxl380_read_chn(st, chan->address); + iio_device_release_direct_mode(indio_dev); + if (ret) + return ret; + + *val = sign_extend32(ret >> chan->scan_type.shift, + chan->scan_type.realbits - 1); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ACCEL: + scoped_guard(mutex, &st->lock) { + *val = st->chip_info->scale_tbl[st->range][0]; + *val2 = st->chip_info->scale_tbl[st->range][1]; + } + return IIO_VAL_INT_PLUS_NANO; + case IIO_TEMP: + /* 10.2 LSB / Degree Celsius */ + *val = 10000; + *val2 = 102; + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_TEMP: + *val = st->chip_info->temp_offset; + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_ACCEL: + ret = adxl380_read_calibbias_value(st, chan->scan_index, val); + if (ret) + return ret; + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SAMP_FREQ: + ret = adxl380_get_odr(st, val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + ret = adxl380_get_lpf(st, val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + ret = adxl380_get_hpf(st, val, val2); + if (ret) + return ret; + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +} + +static int adxl380_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + struct adxl380_state *st = iio_priv(indio_dev); + + if (chan->type != IIO_ACCEL) + return -EINVAL; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *vals = (const int *)st->chip_info->scale_tbl; + *type = IIO_VAL_INT_PLUS_NANO; + *length = ARRAY_SIZE(st->chip_info->scale_tbl) * 2; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SAMP_FREQ: + *vals = (const int *)st->chip_info->samp_freq_tbl; + *type = IIO_VAL_INT; + *length = ARRAY_SIZE(st->chip_info->samp_freq_tbl); + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + *vals = (const int *)st->lpf_tbl; + *type = IIO_VAL_INT; + *length = ARRAY_SIZE(st->lpf_tbl); + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + *vals = (const int *)st->hpf_tbl; + *type = IIO_VAL_INT_PLUS_MICRO; + /* Values are stored in a 2D matrix */ + *length = ARRAY_SIZE(st->hpf_tbl) * 2; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int adxl380_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long info) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int odr_index, lpf_index, hpf_index, range_index; + + switch (info) { + case IIO_CHAN_INFO_SAMP_FREQ: + odr_index = adxl380_find_match_1d_tbl(st->chip_info->samp_freq_tbl, + ARRAY_SIZE(st->chip_info->samp_freq_tbl), + val); + return adxl380_set_odr(st, odr_index); + case IIO_CHAN_INFO_CALIBBIAS: + return adxl380_write_calibbias_value(st, chan->scan_index, val); + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + lpf_index = adxl380_find_match_1d_tbl(st->lpf_tbl, + ARRAY_SIZE(st->lpf_tbl), + val); + return adxl380_set_lpf(st, lpf_index); + case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY: + hpf_index = adxl380_find_match_2d_tbl(st->hpf_tbl, + ARRAY_SIZE(st->hpf_tbl), + val, val2); + if (hpf_index < 0) + return hpf_index; + return adxl380_set_hpf(st, hpf_index); + case IIO_CHAN_INFO_SCALE: + range_index = adxl380_find_match_2d_tbl(st->chip_info->scale_tbl, + ARRAY_SIZE(st->chip_info->scale_tbl), + val, val2); + if (range_index < 0) + return range_index; + return adxl380_set_range(st, range_index); + default: + return -EINVAL; + } +} + +static int adxl380_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long info) +{ + switch (info) { + case IIO_CHAN_INFO_SCALE: + if (chan->type != IIO_ACCEL) + return -EINVAL; + + return IIO_VAL_INT_PLUS_NANO; + default: + return IIO_VAL_INT_PLUS_MICRO; + } +} + +static int adxl380_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct adxl380_state *st = iio_priv(indio_dev); + int ret; + bool int_en; + bool tap_axis_en = false; + + switch (chan->channel2) { + case IIO_MOD_X: + tap_axis_en = st->tap_axis_en == ADXL380_X_AXIS; + break; + case IIO_MOD_Y: + tap_axis_en = st->tap_axis_en == ADXL380_Y_AXIS; + break; + case IIO_MOD_Z: + tap_axis_en = st->tap_axis_en == ADXL380_Z_AXIS; + break; + default: + return -EINVAL; + } + + switch (dir) { + case IIO_EV_DIR_RISING: + ret = adxl380_read_act_inact_int(st, ADXL380_ACTIVITY, &int_en); + if (ret) + return ret; + return int_en; + case IIO_EV_DIR_FALLING: + ret = adxl380_read_act_inact_int(st, ADXL380_INACTIVITY, &int_en); + if (ret) + return ret; + return int_en; + case IIO_EV_DIR_SINGLETAP: + ret = adxl380_read_tap_int(st, ADXL380_SINGLE_TAP, &int_en); + if (ret) + return ret; + return int_en && tap_axis_en; + case IIO_EV_DIR_DOUBLETAP: + ret = adxl380_read_tap_int(st, ADXL380_DOUBLE_TAP, &int_en); + if (ret) + return ret; + return int_en && tap_axis_en; + default: + return -EINVAL; + } +} + +static int adxl380_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct adxl380_state *st = iio_priv(indio_dev); + enum adxl380_axis axis; + + switch (chan->channel2) { + case IIO_MOD_X: + axis = ADXL380_X_AXIS; + break; + case IIO_MOD_Y: + axis = ADXL380_Y_AXIS; + break; + case IIO_MOD_Z: + axis = ADXL380_Z_AXIS; + break; + default: + return -EINVAL; + } + + switch (dir) { + case IIO_EV_DIR_RISING: + return adxl380_act_inact_config(st, ADXL380_ACTIVITY, state); + case IIO_EV_DIR_FALLING: + return adxl380_act_inact_config(st, ADXL380_INACTIVITY, state); + case IIO_EV_DIR_SINGLETAP: + return adxl380_tap_config(st, axis, ADXL380_SINGLE_TAP, state); + case IIO_EV_DIR_DOUBLETAP: + return adxl380_tap_config(st, axis, ADXL380_DOUBLE_TAP, state); + default: + return -EINVAL; + } +} + +static int adxl380_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) +{ + struct adxl380_state *st = iio_priv(indio_dev); + + guard(mutex)(&st->lock); + + switch (type) { + case IIO_EV_TYPE_THRESH: + switch (info) { + case IIO_EV_INFO_VALUE: { + switch (dir) { + case IIO_EV_DIR_RISING: + *val = st->act_threshold; + return IIO_VAL_INT; + case IIO_EV_DIR_FALLING: + *val = st->inact_threshold; + return IIO_VAL_INT; + default: + return -EINVAL; + } + } + case IIO_EV_INFO_PERIOD: + switch (dir) { + case IIO_EV_DIR_RISING: + *val = st->act_time_ms; + *val2 = 1000; + return IIO_VAL_FRACTIONAL; + case IIO_EV_DIR_FALLING: + *val = st->inact_time_ms; + *val2 = 1000; + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } + default: + return -EINVAL; + } + case IIO_EV_TYPE_GESTURE: + switch (info) { + case IIO_EV_INFO_VALUE: + *val = st->tap_threshold; + return IIO_VAL_INT; + case IIO_EV_INFO_RESET_TIMEOUT: + *val = st->tap_window_us; + *val2 = 1000000; + return IIO_VAL_FRACTIONAL; + case IIO_EV_INFO_TAP2_MIN_DELAY: + *val = st->tap_latent_us; + *val2 = 1000000; + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int adxl380_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct adxl380_state *st = iio_priv(indio_dev); + u32 val_ms, val_us; + + if (chan->type != IIO_ACCEL) + return -EINVAL; + + switch (type) { + case IIO_EV_TYPE_THRESH: + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + return adxl380_set_act_inact_threshold(indio_dev, + ADXL380_ACTIVITY, val); + case IIO_EV_DIR_FALLING: + return adxl380_set_act_inact_threshold(indio_dev, + ADXL380_INACTIVITY, val); + default: + return -EINVAL; + } + case IIO_EV_INFO_PERIOD: + val_ms = val * 1000 + DIV_ROUND_UP(val2, 1000); + switch (dir) { + case IIO_EV_DIR_RISING: + return adxl380_set_act_inact_time_ms(st, + ADXL380_ACTIVITY, val_ms); + case IIO_EV_DIR_FALLING: + return adxl380_set_act_inact_time_ms(st, + ADXL380_INACTIVITY, val_ms); + default: + return -EINVAL; + } + + default: + return -EINVAL; + } + case IIO_EV_TYPE_GESTURE: + switch (info) { + case IIO_EV_INFO_VALUE: + return adxl380_set_tap_threshold_value(indio_dev, val); + case IIO_EV_INFO_RESET_TIMEOUT: + val_us = val * 1000000 + val2; + return adxl380_write_tap_time_us(st, + ADXL380_TAP_TIME_WINDOW, + val_us); + case IIO_EV_INFO_TAP2_MIN_DELAY: + val_us = val * 1000000 + val2; + return adxl380_write_tap_time_us(st, + ADXL380_TAP_TIME_LATENT, + val_us); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int vals[2]; + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct adxl380_state *st = iio_priv(indio_dev); + + guard(mutex)(&st->lock); + + vals[0] = st->tap_duration_us; + vals[1] = MICRO; + + return iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, vals); +} + +static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct adxl380_state *st = iio_priv(indio_dev); + int ret, val_int, val_fract_us; + + guard(mutex)(&st->lock); + + ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract_us); + if (ret) + return ret; + + /* maximum value is 255 * 625 us = 0.159375 seconds */ + if (val_int || val_fract_us > 159375 || val_fract_us < 0) + return -EINVAL; + + ret = adxl380_write_tap_dur_us(indio_dev, val_fract_us); + if (ret) + return ret; + + return len; +} + +static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_maxtomin_time, 0); + +static struct attribute *adxl380_event_attributes[] = { + &iio_dev_attr_in_accel_gesture_tap_maxtomin_time.dev_attr.attr, + NULL +}; + +static const struct attribute_group adxl380_event_attribute_group = { + .attrs = adxl380_event_attributes, +}; + +static int adxl380_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int writeval, + unsigned int *readval) +{ + struct adxl380_state *st = iio_priv(indio_dev); + + if (readval) + return regmap_read(st->regmap, reg, readval); + + return regmap_write(st->regmap, reg, writeval); +} + +static int adxl380_set_watermark(struct iio_dev *indio_dev, unsigned int val) +{ + struct adxl380_state *st = iio_priv(indio_dev); + + st->watermark = min(val, ADXL380_FIFO_SAMPLES); + + return 0; +} + +static const struct iio_info adxl380_info = { + .read_raw = adxl380_read_raw, + .read_avail = &adxl380_read_avail, + .write_raw = adxl380_write_raw, + .write_raw_get_fmt = adxl380_write_raw_get_fmt, + .read_event_config = adxl380_read_event_config, + .write_event_config = adxl380_write_event_config, + .read_event_value = adxl380_read_event_value, + .write_event_value = adxl380_write_event_value, + .event_attrs = &adxl380_event_attribute_group, + .debugfs_reg_access = &adxl380_reg_access, + .hwfifo_set_watermark = adxl380_set_watermark, +}; + +static const struct iio_event_spec adxl380_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD), + }, + { + .type = IIO_EV_TYPE_GESTURE, + .dir = IIO_EV_DIR_SINGLETAP, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_RESET_TIMEOUT), + }, + { + .type = IIO_EV_TYPE_GESTURE, + .dir = IIO_EV_DIR_DOUBLETAP, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_RESET_TIMEOUT) | + BIT(IIO_EV_INFO_TAP2_MIN_DELAY), + }, +}; + +#define ADXL380_ACCEL_CHANNEL(index, reg, axis) { \ + .type = IIO_ACCEL, \ + .address = reg, \ + .modified = 1, \ + .channel2 = IIO_MOD_##axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_CALIBBIAS), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_all_available = \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_type = \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \ + BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \ + BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \ + .scan_index = index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ + .event_spec = adxl380_events, \ + .num_event_specs = ARRAY_SIZE(adxl380_events) \ +} + +static const struct iio_chan_spec adxl380_channels[] = { + ADXL380_ACCEL_CHANNEL(0, ADXL380_X_DATA_H_REG, X), + ADXL380_ACCEL_CHANNEL(1, ADXL380_Y_DATA_H_REG, Y), + ADXL380_ACCEL_CHANNEL(2, ADXL380_Z_DATA_H_REG, Z), + { + .type = IIO_TEMP, + .address = ADXL380_T_DATA_H_REG, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + .scan_index = 3, + .scan_type = { + .sign = 's', + .realbits = 12, + .storagebits = 16, + .shift = 4, + .endianness = IIO_BE, + }, + }, +}; + +static int adxl380_config_irq(struct iio_dev *indio_dev) +{ + struct adxl380_state *st = iio_priv(indio_dev); + unsigned long irq_flag; + struct irq_data *desc; + u32 irq_type; + u8 polarity; + int ret; + + st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT0"); + if (st->irq > 0) { + st->int_map[0] = ADXL380_INT0_MAP0_REG; + st->int_map[1] = ADXL380_INT0_MAP1_REG; + } else { + st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT1"); + if (st->irq > 0) + return dev_err_probe(st->dev, -ENODEV, + "no interrupt name specified"); + st->int_map[0] = ADXL380_INT1_MAP0_REG; + st->int_map[1] = ADXL380_INT1_MAP1_REG; + } + + desc = irq_get_irq_data(st->irq); + if (!desc) + return dev_err_probe(st->dev, -EINVAL, "Could not find IRQ %d\n", st->irq); + + irq_type = irqd_get_trigger_type(desc); + if (irq_type == IRQ_TYPE_LEVEL_HIGH) { + polarity = 0; + irq_flag = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; + } else if (irq_type == IRQ_TYPE_LEVEL_LOW) { + polarity = 1; + irq_flag = IRQF_TRIGGER_LOW | IRQF_ONESHOT; + } else { + return dev_err_probe(st->dev, -EINVAL, + "Invalid interrupt 0x%x. Only level interrupts supported\n", + irq_type); + } + + ret = regmap_update_bits(st->regmap, ADXL380_INT0_REG, + ADXL380_INT0_POL_MSK, + FIELD_PREP(ADXL380_INT0_POL_MSK, polarity)); + if (ret) + return ret; + + return devm_request_threaded_irq(st->dev, st->irq, NULL, + adxl380_irq_handler, irq_flag, + indio_dev->name, indio_dev); +} + +static int adxl380_setup(struct iio_dev *indio_dev) +{ + unsigned int reg_val; + u16 part_id, chip_id; + int ret, i; + struct adxl380_state *st = iio_priv(indio_dev); + + ret = regmap_read(st->regmap, ADXL380_DEVID_AD_REG, ®_val); + if (ret) + return ret; + + if (reg_val != ADXL380_DEVID_AD_VAL) + dev_warn(st->dev, "Unknown chip id %x\n", reg_val); + + ret = regmap_bulk_read(st->regmap, ADLX380_PART_ID_REG, + &st->transf_buf, 2); + if (ret) + return ret; + + part_id = get_unaligned_be16(st->transf_buf); + part_id >>= 4; + + if (part_id != ADXL380_ID_VAL) + dev_warn(st->dev, "Unknown part id %x\n", part_id); + + ret = regmap_read(st->regmap, ADXL380_MISC_0_REG, ®_val); + if (ret) + return ret; + + /* Bit to differentiate between ADXL380/382. */ + if (reg_val & ADXL380_XL382_MSK) + chip_id = ADXL382_ID_VAL; + else + chip_id = ADXL380_ID_VAL; + + if (chip_id != st->chip_info->chip_id) + dev_warn(st->dev, "Unknown chip id %x\n", chip_id); + + ret = regmap_write(st->regmap, ADXL380_RESET_REG, ADXL380_RESET_CODE); + if (ret) + return ret; + + /* + * A latency of approximately 0.5 ms is required after soft reset. + * Stated in the register REG_RESET description. + */ + fsleep(500); + + for (i = 0; i < indio_dev->num_channels; i++) { + ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, + ADXL380_CHAN_EN_MSK(i), + 1 << (4 + i)); + if (ret) + return ret; + } + + ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG, + ADXL380_FIFO_MODE_MSK, + FIELD_PREP(ADXL380_FIFO_MODE_MSK, ADXL380_FIFO_STREAMED)); + if (ret) + return ret; + + /* Select all 3 axis for act/inact detection. */ + ret = regmap_update_bits(st->regmap, ADXL380_SNSR_AXIS_EN_REG, + ADXL380_ACT_INACT_AXIS_EN_MSK, + FIELD_PREP(ADXL380_ACT_INACT_AXIS_EN_MSK, + ADXL380_ACT_INACT_AXIS_EN_MSK)); + if (ret) + return ret; + + ret = adxl380_config_irq(indio_dev); + if (ret) + return ret; + + ret = adxl380_fill_lpf_tbl(st); + if (ret) + return ret; + + ret = adxl380_fill_hpf_tbl(st); + if (ret) + return ret; + + return adxl380_set_measure_en(st, true); +} + +int adxl380_probe(struct device *dev, struct regmap *regmap, + const struct adxl380_chip_info *chip_info) +{ + struct iio_dev *indio_dev; + struct adxl380_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + st->dev = dev; + st->regmap = regmap; + st->chip_info = chip_info; + + mutex_init(&st->lock); + + indio_dev->channels = adxl380_channels; + indio_dev->num_channels = ARRAY_SIZE(adxl380_channels); + indio_dev->name = chip_info->name; + indio_dev->info = &adxl380_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = devm_regulator_get_enable(dev, "vddio"); + if (ret) + return dev_err_probe(st->dev, ret, + "Failed to get vddio regulator\n"); + + ret = devm_regulator_get_enable(st->dev, "vsupply"); + if (ret) + return dev_err_probe(st->dev, ret, + "Failed to get vsupply regulator\n"); + + ret = adxl380_setup(indio_dev); + if (ret) + return ret; + + ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev, + &adxl380_buffer_ops, + adxl380_fifo_attributes); + if (ret) + return ret; + + return devm_iio_device_register(dev, indio_dev); +} +EXPORT_SYMBOL_NS_GPL(adxl380_probe, IIO_ADXL380); + +MODULE_AUTHOR("Ramona Gradinariu "); +MODULE_AUTHOR("Antoniu Miclaus "); +MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/accel/adxl380.h b/drivers/iio/accel/adxl380.h new file mode 100644 index 00000000000000..a683625d897a4a --- /dev/null +++ b/drivers/iio/accel/adxl380.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ADXL380 3-Axis Digital Accelerometer + * + * Copyright 2024 Analog Devices Inc. + */ + +#ifndef _ADXL380_H_ +#define _ADXL380_H_ + +struct adxl380_chip_info { + const char *name; + const int scale_tbl[3][2]; + const int samp_freq_tbl[3]; + const int temp_offset; + const u16 chip_id; +}; + +extern const struct adxl380_chip_info adxl380_chip_info; +extern const struct adxl380_chip_info adxl382_chip_info; + +int adxl380_probe(struct device *dev, struct regmap *regmap, + const struct adxl380_chip_info *chip_info); +bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg); + +#endif /* _ADXL380_H_ */ diff --git a/drivers/iio/accel/adxl380_i2c.c b/drivers/iio/accel/adxl380_i2c.c new file mode 100644 index 00000000000000..1dc1e77be815de --- /dev/null +++ b/drivers/iio/accel/adxl380_i2c.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ADXL380 3-Axis Digital Accelerometer I2C driver + * + * Copyright 2024 Analog Devices Inc. + */ + +#include +#include +#include +#include + +#include "adxl380.h" + +static const struct regmap_config adxl380_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .readable_noinc_reg = adxl380_readable_noinc_reg, +}; + +static int adxl380_i2c_probe(struct i2c_client *client) +{ + struct regmap *regmap; + const struct adxl380_chip_info *chip_data; + + chip_data = i2c_get_match_data(client); + + regmap = devm_regmap_init_i2c(client, &adxl380_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + return adxl380_probe(&client->dev, regmap, chip_data); +} + +static const struct i2c_device_id adxl380_i2c_id[] = { + { "adxl380", (kernel_ulong_t)&adxl380_chip_info }, + { "adxl382", (kernel_ulong_t)&adxl382_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adxl380_i2c_id); + +static const struct of_device_id adxl380_of_match[] = { + { .compatible = "adi,adxl380", .data = &adxl380_chip_info }, + { .compatible = "adi,adxl382", .data = &adxl382_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(of, adxl380_of_match); + +static struct i2c_driver adxl380_i2c_driver = { + .driver = { + .name = "adxl380_i2c", + .of_match_table = adxl380_of_match, + }, + .probe = adxl380_i2c_probe, + .id_table = adxl380_i2c_id, +}; + +module_i2c_driver(adxl380_i2c_driver); + +MODULE_AUTHOR("Ramona Gradinariu "); +MODULE_AUTHOR("Antoniu Miclaus "); +MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer I2C driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADXL380); diff --git a/drivers/iio/accel/adxl380_spi.c b/drivers/iio/accel/adxl380_spi.c new file mode 100644 index 00000000000000..e7b5778cb6cfe9 --- /dev/null +++ b/drivers/iio/accel/adxl380_spi.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ADXL380 3-Axis Digital Accelerometer SPI driver + * + * Copyright 2024 Analog Devices Inc. + */ + +#include +#include +#include +#include + +#include "adxl380.h" + +static const struct regmap_config adxl380_spi_regmap_config = { + .reg_bits = 7, + .pad_bits = 1, + .val_bits = 8, + .read_flag_mask = BIT(0), + .readable_noinc_reg = adxl380_readable_noinc_reg, +}; + +static int adxl380_spi_probe(struct spi_device *spi) +{ + const struct adxl380_chip_info *chip_data; + struct regmap *regmap; + + chip_data = spi_get_device_match_data(spi); + + regmap = devm_regmap_init_spi(spi, &adxl380_spi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + return adxl380_probe(&spi->dev, regmap, chip_data); +} + +static const struct spi_device_id adxl380_spi_id[] = { + { "adxl380", (kernel_ulong_t)&adxl380_chip_info }, + { "adxl382", (kernel_ulong_t)&adxl382_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(spi, adxl380_spi_id); + +static const struct of_device_id adxl380_of_match[] = { + { .compatible = "adi,adxl380", .data = &adxl380_chip_info }, + { .compatible = "adi,adxl382", .data = &adxl382_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(of, adxl380_of_match); + +static struct spi_driver adxl380_spi_driver = { + .driver = { + .name = "adxl380_spi", + .of_match_table = adxl380_of_match, + }, + .probe = adxl380_spi_probe, + .id_table = adxl380_spi_id, +}; + +module_spi_driver(adxl380_spi_driver); + +MODULE_AUTHOR("Ramona Gradinariu "); +MODULE_AUTHOR("Antoniu Miclaus "); +MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer SPI driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_ADXL380); diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 6581772cb0c463..2445a0f7bc2bae 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -876,8 +876,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) mutex_lock(&data->mutex); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = bma180_get_data_reg(data, bit); if (ret < 0) { mutex_unlock(&data->mutex); diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index e90e2f01550ad3..f4e43c3bbf1a5f 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -21,7 +22,7 @@ #include #include -#include +#include #include #include @@ -795,21 +796,19 @@ static int bma400_enable_steps(struct bma400_data *data, int val) static int bma400_get_steps_reg(struct bma400_data *data, int *val) { - u8 *steps_raw; int ret; - steps_raw = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL); + u8 *steps_raw __free(kfree) = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL); if (!steps_raw) return -ENOMEM; ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG, steps_raw, BMA400_STEP_RAW_LEN); - if (ret) { - kfree(steps_raw); + if (ret) return ret; - } + *val = get_unaligned_le24(steps_raw); - kfree(steps_raw); + return IIO_VAL_INT; } diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c index ec13c044b3047a..765d8c4a4c4ddf 100644 --- a/drivers/iio/accel/bma400_spi.c +++ b/drivers/iio/accel/bma400_spi.c @@ -53,7 +53,7 @@ static int bma400_regmap_spi_write(void *context, const void *data, return spi_write(spi, data, count); } -static struct regmap_bus bma400_regmap_bus = { +static const struct regmap_bus bma400_regmap_bus = { .read = bma400_regmap_spi_read, .write = bma400_regmap_spi_write, .read_flag_mask = BIT(7), diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index ae0cd48a3e2936..0f32c1e92b4dc1 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -10,9 +10,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -387,7 +387,7 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev, struct iio_mount_matrix *orientation) { struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct acpi_device *adev = ACPI_COMPANION(dev); + acpi_handle handle = ACPI_HANDLE(dev); char *name, *alt_name, *label; if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) { @@ -398,9 +398,9 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev, label = "accel-display"; } - if (acpi_has_method(adev->handle, "ROTM")) { + if (acpi_has_method(handle, "ROTM")) { name = "ROTM"; - } else if (acpi_has_method(adev->handle, alt_name)) { + } else if (acpi_has_method(handle, alt_name)) { name = alt_name; indio_dev->label = label; } else { @@ -514,7 +514,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev, */ irq_info = bmc150_accel_interrupts_int1; if (data->type == BOSCH_BMC156 || - irq == of_irq_get_byname(dev->of_node, "INT2")) + irq == fwnode_irq_get_byname(dev_fwnode(dev), "INT2")) irq_info = bmc150_accel_interrupts_int2; for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++) @@ -1007,8 +1007,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, int j, bit; j = 0; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) + iio_for_each_active_channel(indio_dev, bit) memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], sizeof(data->scan.channels[0])); diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c index 469a1255d93cf0..fc1c1613d673b5 100644 --- a/drivers/iio/accel/bmi088-accel-core.c +++ b/drivers/iio/accel/bmi088-accel-core.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include "bmi088-accel.h" diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c index 7b419a7b2478af..df1adc059aa94b 100644 --- a/drivers/iio/accel/bmi088-accel-spi.c +++ b/drivers/iio/accel/bmi088-accel-spi.c @@ -36,7 +36,7 @@ static int bmi088_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, addr, sizeof(addr), val, val_size); } -static struct regmap_bus bmi088_regmap_bus = { +static const struct regmap_bus bmi088_regmap_bus = { .write = bmi088_regmap_spi_write, .read = bmi088_regmap_spi_read, }; diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c index 0f403342b1fc0d..f7e4dc02b34db4 100644 --- a/drivers/iio/accel/cros_ec_accel_legacy.c +++ b/drivers/iio/accel/cros_ec_accel_legacy.c @@ -62,7 +62,7 @@ static int cros_ec_accel_legacy_read_cmd(struct iio_dev *indio_dev, return ret; } - for_each_set_bit(i, &scan_mask, indio_dev->masklength) { + for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) { *data = st->resp->dump.sensor[sensor_num].data[i] * st->sign[i]; data++; diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c index 6644c1fec3e691..4ec70ca6910d5e 100644 --- a/drivers/iio/accel/dmard09.c +++ b/drivers/iio/accel/dmard09.c @@ -5,7 +5,7 @@ * Copyright (c) 2016, Jelle van der Waa */ -#include +#include #include #include #include diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index d25e316134131a..acadabec4df7ad 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -966,8 +966,7 @@ static int fxls8962af_fifo_flush(struct iio_dev *indio_dev) int j, bit; j = 0; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], sizeof(data->scan.channels[0])); } diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 8280d2bef0a374..b76df881632327 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -173,6 +173,7 @@ enum kx_chipset { KXCJ91008, KXTJ21009, KXTF9, + KX0221020, KX0231025, KX_MAX_CHIPS /* this must be last */ }; @@ -580,8 +581,8 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data) return ret; } - /* On KX023, route all used interrupts to INT1 for now */ - if (data->chipset == KX0231025 && data->client->irq > 0) { + /* On KX023 and KX022, route all used interrupts to INT1 for now */ + if ((data->chipset == KX0231025 || data->chipset == KX0221020) && data->client->irq > 0) { ret = i2c_smbus_write_byte_data(data->client, KX023_REG_INC4, KX023_REG_INC4_DRDY1 | KX023_REG_INC4_WUFI1); @@ -1507,6 +1508,7 @@ static int kxcjk1013_probe(struct i2c_client *client) case KXTF9: data->regs = &kxtf9_regs; break; + case KX0221020: case KX0231025: data->regs = &kx0231025_regs; break; @@ -1712,6 +1714,7 @@ static const struct i2c_device_id kxcjk1013_id[] = { {"kxcj91008", KXCJ91008}, {"kxtj21009", KXTJ21009}, {"kxtf9", KXTF9}, + {"kx022-1020", KX0221020}, {"kx023-1025", KX0231025}, {"SMO8500", KXCJ91008}, {} @@ -1724,6 +1727,7 @@ static const struct of_device_id kxcjk1013_of_match[] = { { .compatible = "kionix,kxcj91008", }, { .compatible = "kionix,kxtj21009", }, { .compatible = "kionix,kxtf9", }, + { .compatible = "kionix,kx022-1020", }, { .compatible = "kionix,kx023-1025", }, { } }; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index 4cdbf5424a5310..57025354c7cd58 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -900,8 +900,7 @@ static irqreturn_t msa311_buffer_thread(int irq, void *p) mutex_lock(&msa311->lock); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { chan = &msa311_channels[bit]; err = msa311_get_axis(msa311, chan, &axis); diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c index 306482b70fad7e..ca0ce83e42b2ce 100644 --- a/drivers/iio/accel/sca3300.c +++ b/drivers/iio/accel/sca3300.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include @@ -494,8 +494,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p) int bit, ret, val, i = 0; s16 *channels = (s16 *)data->buffer; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val); if (ret) { dev_err_ratelimited(&data->spi->dev, diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index b3534d5751b951..abead190254b5a 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -448,8 +448,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p) goto err; } } else { - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = stk8312_read_accel(data, bit); if (ret < 0) { mutex_unlock(&data->lock); diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index 6d3c7f444d21d3..a32a77324e92d0 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -330,8 +330,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p) goto err; } } else { - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = stk8ba50_read_accel(data, stk8ba50_channel_table[bit]); if (ret < 0) diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index f60fe85a30d529..97ece1a4b7e399 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -21,6 +21,18 @@ config AD_SIGMA_DELTA select IIO_BUFFER select IIO_TRIGGERED_BUFFER +config AD4000 + tristate "Analog Devices AD4000 ADC Driver" + depends on SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Analog Devices AD4000 high speed + SPI analog to digital converters (ADC). + + To compile this driver as a module, choose M here: the module will be + called ad4000. + config AD4130 tristate "Analog Device AD4130 ADC Driver" depends on SPI @@ -36,6 +48,17 @@ config AD4130 To compile this driver as a module, choose M here: the module will be called ad4130. +config AD4695 + tristate "Analog Device AD4695 ADC Driver" + depends on SPI + select REGMAP_SPI + help + Say yes here to build support for Analog Devices AD4695 and similar + analog to digital converters (ADC). + + To compile this driver as a module, choose M here: the module will be + called ad4695. + config AD7091R tristate @@ -991,6 +1014,19 @@ config NPCM_ADC This driver can also be built as a module. If so, the module will be called npcm_adc. +config PAC1921 + tristate "Microchip Technology PAC1921 driver" + depends on I2C + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Microchip Technology's PAC1921 + High-Side Power/Current Monitor with Analog Output. + + This driver can also be built as a module. If so, the module + will be called pac1921. + config PAC1934 tristate "Microchip Technology PAC1934 driver" depends on I2C @@ -1156,6 +1192,16 @@ config SC27XX_ADC This driver can also be built as a module. If so, the module will be called sc27xx_adc. +config SOPHGO_CV1800B_ADC + tristate "Sophgo CV1800B SARADC" + depends on ARCH_SOPHGO || COMPILE_TEST + help + Say yes here to build support for the SARADC integrated inside + the Sophgo CV1800B SoC. + + This driver can also be built as a module. If so, the module + will be called sophgo_cv1800b_adc. + config SPEAR_ADC tristate "ST SPEAr ADC" depends on PLAT_SPEAR || COMPILE_TEST @@ -1171,6 +1217,7 @@ config SD_ADC_MODULATOR tristate "Generic sigma delta modulator" select IIO_BUFFER select IIO_TRIGGERED_BUFFER + select IIO_BACKEND help Select this option to enables sigma delta modulator. This driver can support generic sigma delta modulators. @@ -1225,6 +1272,7 @@ config STM32_DFSDM_ADC select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER select IIO_TRIGGERED_BUFFER + select IIO_BACKEND help Select this option to support ADCSigma delta modulator for STMicroelectronics STM32 digital filter for sigma delta converter. diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index d370e066544e93..7b91cd98c0e0bd 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -6,7 +6,9 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o +obj-$(CONFIG_AD4000) += ad4000.o obj-$(CONFIG_AD4130) += ad4130.o +obj-$(CONFIG_AD4695) += ad4695.o obj-$(CONFIG_AD7091R) += ad7091r-base.o obj-$(CONFIG_AD7091R5) += ad7091r5.o obj-$(CONFIG_AD7091R8) += ad7091r8.o @@ -90,6 +92,7 @@ obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o obj-$(CONFIG_NAU7802) += nau7802.o obj-$(CONFIG_NPCM_ADC) += npcm_adc.o +obj-$(CONFIG_PAC1921) += pac1921.o obj-$(CONFIG_PAC1934) += pac1934.o obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o @@ -105,6 +108,7 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o +obj-$(CONFIG_SOPHGO_CV1800B_ADC) += sophgo-cv1800b-adc.o obj-$(CONFIG_SPEAR_ADC) += spear_adc.o obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o obj-$(CONFIG_STM32_ADC) += stm32-adc.o diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c new file mode 100644 index 00000000000000..6ea49124508499 --- /dev/null +++ b/drivers/iio/adc/ad4000.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * AD4000 SPI ADC driver + * + * Copyright 2024 Analog Devices Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define AD4000_READ_COMMAND 0x54 +#define AD4000_WRITE_COMMAND 0x14 + +#define AD4000_CONFIG_REG_DEFAULT 0xE1 + +/* AD4000 Configuration Register programmable bits */ +#define AD4000_CFG_SPAN_COMP BIT(3) /* Input span compression */ +#define AD4000_CFG_HIGHZ BIT(2) /* High impedance mode */ + +#define AD4000_SCALE_OPTIONS 2 + +#define AD4000_TQUIET1_NS 190 +#define AD4000_TQUIET2_NS 60 +#define AD4000_TCONV_NS 320 + +#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \ +{ \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .differential = 1, \ + .channel = 0, \ + .channel2 = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\ + .scan_type = { \ + .sign = _sign, \ + .realbits = _real_bits, \ + .storagebits = _storage_bits, \ + .shift = _storage_bits - _real_bits, \ + .endianness = IIO_BE, \ + }, \ +} + +#define AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \ + __AD4000_DIFF_CHANNEL((_sign), (_real_bits), \ + ((_real_bits) > 16 ? 32 : 16), (_reg_access)) + +#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\ +{ \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = 0, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\ + .scan_type = { \ + .sign = _sign, \ + .realbits = _real_bits, \ + .storagebits = _storage_bits, \ + .shift = _storage_bits - _real_bits, \ + .endianness = IIO_BE, \ + }, \ +} + +#define AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \ + __AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \ + ((_real_bits) > 16 ? 32 : 16), (_reg_access)) + +static const char * const ad4000_power_supplies[] = { + "vdd", "vio" +}; + +enum ad4000_sdi { + AD4000_SDI_MOSI, + AD4000_SDI_VIO, + AD4000_SDI_CS, + AD4000_SDI_GND, +}; + +/* maps adi,sdi-pin property value to enum */ +static const char * const ad4000_sdi_pin[] = { + [AD4000_SDI_MOSI] = "sdi", + [AD4000_SDI_VIO] = "high", + [AD4000_SDI_CS] = "cs", + [AD4000_SDI_GND] = "low", +}; + +/* Gains stored as fractions of 1000 so they can be expressed by integers. */ +static const int ad4000_gains[] = { + 454, 909, 1000, 1900, +}; + +struct ad4000_chip_info { + const char *dev_name; + struct iio_chan_spec chan_spec; + struct iio_chan_spec reg_access_chan_spec; + bool has_hardware_gain; +}; + +static const struct ad4000_chip_info ad4000_chip_info = { + .dev_name = "ad4000", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1), +}; + +static const struct ad4000_chip_info ad4001_chip_info = { + .dev_name = "ad4001", + .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1), +}; + +static const struct ad4000_chip_info ad4002_chip_info = { + .dev_name = "ad4002", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1), +}; + +static const struct ad4000_chip_info ad4003_chip_info = { + .dev_name = "ad4003", + .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1), +}; + +static const struct ad4000_chip_info ad4004_chip_info = { + .dev_name = "ad4004", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1), +}; + +static const struct ad4000_chip_info ad4005_chip_info = { + .dev_name = "ad4005", + .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1), +}; + +static const struct ad4000_chip_info ad4006_chip_info = { + .dev_name = "ad4006", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1), +}; + +static const struct ad4000_chip_info ad4007_chip_info = { + .dev_name = "ad4007", + .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1), +}; + +static const struct ad4000_chip_info ad4008_chip_info = { + .dev_name = "ad4008", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1), +}; + +static const struct ad4000_chip_info ad4010_chip_info = { + .dev_name = "ad4010", + .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0), + .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1), +}; + +static const struct ad4000_chip_info ad4011_chip_info = { + .dev_name = "ad4011", + .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1), +}; + +static const struct ad4000_chip_info ad4020_chip_info = { + .dev_name = "ad4020", + .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1), +}; + +static const struct ad4000_chip_info ad4021_chip_info = { + .dev_name = "ad4021", + .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1), +}; + +static const struct ad4000_chip_info ad4022_chip_info = { + .dev_name = "ad4022", + .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1), +}; + +static const struct ad4000_chip_info adaq4001_chip_info = { + .dev_name = "adaq4001", + .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1), + .has_hardware_gain = true, +}; + +static const struct ad4000_chip_info adaq4003_chip_info = { + .dev_name = "adaq4003", + .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0), + .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1), + .has_hardware_gain = true, +}; + +struct ad4000_state { + struct spi_device *spi; + struct gpio_desc *cnv_gpio; + struct spi_transfer xfers[2]; + struct spi_message msg; + struct mutex lock; /* Protect read modify write cycle */ + int vref_mv; + enum ad4000_sdi sdi_pin; + bool span_comp; + u16 gain_milli; + int scale_tbl[AD4000_SCALE_OPTIONS][2]; + + /* + * DMA (thus cache coherency maintenance) requires the transfer buffers + * to live in their own cache lines. + */ + struct { + union { + __be16 sample_buf16; + __be32 sample_buf32; + } data; + s64 timestamp __aligned(8); + } scan __aligned(IIO_DMA_MINALIGN); + u8 tx_buf[2]; + u8 rx_buf[2]; +}; + +static void ad4000_fill_scale_tbl(struct ad4000_state *st, + struct iio_chan_spec const *chan) +{ + int val, tmp0, tmp1; + int scale_bits; + u64 tmp2; + + /* + * ADCs that output two's complement code have one less bit to express + * voltage magnitude. + */ + if (chan->scan_type.sign == 's') + scale_bits = chan->scan_type.realbits - 1; + else + scale_bits = chan->scan_type.realbits; + + /* + * The gain is stored as a fraction of 1000 and, as we need to + * divide vref_mv by the gain, we invert the gain/1000 fraction. + * Also multiply by an extra MILLI to preserve precision. + * Thus, we have MILLI * MILLI equals MICRO as fraction numerator. + */ + val = mult_frac(st->vref_mv, MICRO, st->gain_milli); + + /* Would multiply by NANO here but we multiplied by extra MILLI */ + tmp2 = shift_right((u64)val * MICRO, scale_bits); + tmp0 = div_s64_rem(tmp2, NANO, &tmp1); + + /* Store scale for when span compression is disabled */ + st->scale_tbl[0][0] = tmp0; /* Integer part */ + st->scale_tbl[0][1] = abs(tmp1); /* Fractional part */ + + /* Store scale for when span compression is enabled */ + st->scale_tbl[1][0] = tmp0; + + /* The integer part is always zero so don't bother to divide it. */ + if (chan->differential) + st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 4, 5); + else + st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 9, 10); +} + +static int ad4000_write_reg(struct ad4000_state *st, uint8_t val) +{ + st->tx_buf[0] = AD4000_WRITE_COMMAND; + st->tx_buf[1] = val; + return spi_write(st->spi, st->tx_buf, ARRAY_SIZE(st->tx_buf)); +} + +static int ad4000_read_reg(struct ad4000_state *st, unsigned int *val) +{ + struct spi_transfer t = { + .tx_buf = st->tx_buf, + .rx_buf = st->rx_buf, + .len = 2, + }; + int ret; + + st->tx_buf[0] = AD4000_READ_COMMAND; + ret = spi_sync_transfer(st->spi, &t, 1); + if (ret < 0) + return ret; + + *val = st->rx_buf[1]; + return ret; +} + +static int ad4000_convert_and_acquire(struct ad4000_state *st) +{ + int ret; + + /* + * In 4-wire mode, the CNV line is held high for the entire conversion + * and acquisition process. In other modes, the CNV GPIO is optional + * and, if provided, replaces controller CS. If CNV GPIO is not defined + * gpiod_set_value_cansleep() has no effect. + */ + gpiod_set_value_cansleep(st->cnv_gpio, 1); + ret = spi_sync(st->spi, &st->msg); + gpiod_set_value_cansleep(st->cnv_gpio, 0); + + return ret; +} + +static int ad4000_single_conversion(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, int *val) +{ + struct ad4000_state *st = iio_priv(indio_dev); + u32 sample; + int ret; + + ret = ad4000_convert_and_acquire(st); + if (ret < 0) + return ret; + + if (chan->scan_type.storagebits > 16) + sample = be32_to_cpu(st->scan.data.sample_buf32); + else + sample = be16_to_cpu(st->scan.data.sample_buf16); + + sample >>= chan->scan_type.shift; + + if (chan->scan_type.sign == 's') + *val = sign_extend32(sample, chan->scan_type.realbits - 1); + + return IIO_VAL_INT; +} + +static int ad4000_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long info) +{ + struct ad4000_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) + return ad4000_single_conversion(indio_dev, chan, val); + unreachable(); + case IIO_CHAN_INFO_SCALE: + *val = st->scale_tbl[st->span_comp][0]; + *val2 = st->scale_tbl[st->span_comp][1]; + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OFFSET: + *val = 0; + if (st->span_comp) + *val = mult_frac(st->vref_mv, 1, 10); + + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int ad4000_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + struct ad4000_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_SCALE: + *vals = (int *)st->scale_tbl; + *length = AD4000_SCALE_OPTIONS * 2; + *type = IIO_VAL_INT_PLUS_NANO; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int ad4000_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT_PLUS_NANO; + default: + return IIO_VAL_INT_PLUS_MICRO; + } +} + +static int ad4000_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, + long mask) +{ + struct ad4000_state *st = iio_priv(indio_dev); + unsigned int reg_val; + bool span_comp_en; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + guard(mutex)(&st->lock); + + ret = ad4000_read_reg(st, ®_val); + if (ret < 0) + return ret; + + span_comp_en = val2 == st->scale_tbl[1][1]; + reg_val &= ~AD4000_CFG_SPAN_COMP; + reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en); + + ret = ad4000_write_reg(st, reg_val); + if (ret < 0) + return ret; + + st->span_comp = span_comp_en; + return 0; + } + unreachable(); + default: + return -EINVAL; + } +} + +static irqreturn_t ad4000_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ad4000_state *st = iio_priv(indio_dev); + int ret; + + ret = ad4000_convert_and_acquire(st); + if (ret < 0) + goto err_out; + + iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp); + +err_out: + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +} + +static const struct iio_info ad4000_reg_access_info = { + .read_raw = &ad4000_read_raw, + .read_avail = &ad4000_read_avail, + .write_raw = &ad4000_write_raw, + .write_raw_get_fmt = &ad4000_write_raw_get_fmt, +}; + +static const struct iio_info ad4000_info = { + .read_raw = &ad4000_read_raw, +}; + +/* + * This executes a data sample transfer for when the device connections are + * in "3-wire" mode, selected when the adi,sdi-pin device tree property is + * absent or set to "high". In this connection mode, the ADC SDI pin is + * connected to MOSI or to VIO and ADC CNV pin is connected either to a SPI + * controller CS or to a GPIO. + * AD4000 series of devices initiate conversions on the rising edge of CNV pin. + * + * If the CNV pin is connected to an SPI controller CS line (which is by default + * active low), the ADC readings would have a latency (delay) of one read. + * Moreover, since we also do ADC sampling for filling the buffer on triggered + * buffer mode, the timestamps of buffer readings would be disarranged. + * To prevent the read latency and reduce the time discrepancy between the + * sample read request and the time of actual sampling by the ADC, do a + * preparatory transfer to pulse the CS/CNV line. + */ +static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st, + const struct iio_chan_spec *chan) +{ + unsigned int cnv_pulse_time = AD4000_TCONV_NS; + struct spi_transfer *xfers = st->xfers; + + xfers[0].cs_change = 1; + xfers[0].cs_change_delay.value = cnv_pulse_time; + xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + + xfers[1].rx_buf = &st->scan.data; + xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits); + xfers[1].delay.value = AD4000_TQUIET2_NS; + xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS; + + spi_message_init_with_transfers(&st->msg, st->xfers, 2); + + return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg); +} + +/* + * This executes a data sample transfer for when the device connections are + * in "4-wire" mode, selected when the adi,sdi-pin device tree property is + * set to "cs". In this connection mode, the controller CS pin is connected to + * ADC SDI pin and a GPIO is connected to ADC CNV pin. + * The GPIO connected to ADC CNV pin is set outside of the SPI transfer. + */ +static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st, + const struct iio_chan_spec *chan) +{ + unsigned int cnv_to_sdi_time = AD4000_TCONV_NS; + struct spi_transfer *xfers = st->xfers; + + /* + * Dummy transfer to cause enough delay between CNV going high and SDI + * going low. + */ + xfers[0].cs_off = 1; + xfers[0].delay.value = cnv_to_sdi_time; + xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS; + + xfers[1].rx_buf = &st->scan.data; + xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits); + + spi_message_init_with_transfers(&st->msg, st->xfers, 2); + + return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg); +} + +static int ad4000_config(struct ad4000_state *st) +{ + unsigned int reg_val = AD4000_CONFIG_REG_DEFAULT; + + if (device_property_present(&st->spi->dev, "adi,high-z-input")) + reg_val |= FIELD_PREP(AD4000_CFG_HIGHZ, 1); + + return ad4000_write_reg(st, reg_val); +} + +static int ad4000_probe(struct spi_device *spi) +{ + const struct ad4000_chip_info *chip; + struct device *dev = &spi->dev; + struct iio_dev *indio_dev; + struct ad4000_state *st; + int gain_idx, ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + chip = spi_get_device_match_data(spi); + if (!chip) + return -EINVAL; + + st = iio_priv(indio_dev); + st->spi = spi; + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies), + ad4000_power_supplies); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable power supplies\n"); + + ret = devm_regulator_get_enable_read_voltage(dev, "ref"); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to get ref regulator reference\n"); + st->vref_mv = ret / 1000; + + st->cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_HIGH); + if (IS_ERR(st->cnv_gpio)) + return dev_err_probe(dev, PTR_ERR(st->cnv_gpio), + "Failed to get CNV GPIO"); + + ret = device_property_match_property_string(dev, "adi,sdi-pin", + ad4000_sdi_pin, + ARRAY_SIZE(ad4000_sdi_pin)); + if (ret < 0 && ret != -EINVAL) + return dev_err_probe(dev, ret, + "getting adi,sdi-pin property failed\n"); + + /* Default to usual SPI connections if pin properties are not present */ + st->sdi_pin = ret == -EINVAL ? AD4000_SDI_MOSI : ret; + switch (st->sdi_pin) { + case AD4000_SDI_MOSI: + indio_dev->info = &ad4000_reg_access_info; + indio_dev->channels = &chip->reg_access_chan_spec; + + /* + * In "3-wire mode", the ADC SDI line must be kept high when + * data is not being clocked out of the controller. + * Request the SPI controller to make MOSI idle high. + */ + spi->mode |= SPI_MOSI_IDLE_HIGH; + ret = spi_setup(spi); + if (ret < 0) + return ret; + + ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels); + if (ret) + return ret; + + ret = ad4000_config(st); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to config device\n"); + + break; + case AD4000_SDI_VIO: + indio_dev->info = &ad4000_info; + indio_dev->channels = &chip->chan_spec; + ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels); + if (ret) + return ret; + + break; + case AD4000_SDI_CS: + indio_dev->info = &ad4000_info; + indio_dev->channels = &chip->chan_spec; + ret = ad4000_prepare_4wire_mode_message(st, indio_dev->channels); + if (ret) + return ret; + + break; + case AD4000_SDI_GND: + return dev_err_probe(dev, -EPROTONOSUPPORT, + "Unsupported connection mode\n"); + + default: + return dev_err_probe(dev, -EINVAL, "Unrecognized connection mode\n"); + } + + indio_dev->name = chip->dev_name; + indio_dev->num_channels = 1; + + devm_mutex_init(dev, &st->lock); + + st->gain_milli = 1000; + if (chip->has_hardware_gain) { + ret = device_property_read_u16(dev, "adi,gain-milli", + &st->gain_milli); + if (!ret) { + /* Match gain value from dt to one of supported gains */ + gain_idx = find_closest(st->gain_milli, ad4000_gains, + ARRAY_SIZE(ad4000_gains)); + st->gain_milli = ad4000_gains[gain_idx]; + } else { + return dev_err_probe(dev, ret, + "Failed to read gain property\n"); + } + } + + ad4000_fill_scale_tbl(st, indio_dev->channels); + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + &iio_pollfunc_store_time, + &ad4000_trigger_handler, NULL); + if (ret) + return ret; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct spi_device_id ad4000_id[] = { + { "ad4000", (kernel_ulong_t)&ad4000_chip_info }, + { "ad4001", (kernel_ulong_t)&ad4001_chip_info }, + { "ad4002", (kernel_ulong_t)&ad4002_chip_info }, + { "ad4003", (kernel_ulong_t)&ad4003_chip_info }, + { "ad4004", (kernel_ulong_t)&ad4004_chip_info }, + { "ad4005", (kernel_ulong_t)&ad4005_chip_info }, + { "ad4006", (kernel_ulong_t)&ad4006_chip_info }, + { "ad4007", (kernel_ulong_t)&ad4007_chip_info }, + { "ad4008", (kernel_ulong_t)&ad4008_chip_info }, + { "ad4010", (kernel_ulong_t)&ad4010_chip_info }, + { "ad4011", (kernel_ulong_t)&ad4011_chip_info }, + { "ad4020", (kernel_ulong_t)&ad4020_chip_info }, + { "ad4021", (kernel_ulong_t)&ad4021_chip_info }, + { "ad4022", (kernel_ulong_t)&ad4022_chip_info }, + { "adaq4001", (kernel_ulong_t)&adaq4001_chip_info }, + { "adaq4003", (kernel_ulong_t)&adaq4003_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(spi, ad4000_id); + +static const struct of_device_id ad4000_of_match[] = { + { .compatible = "adi,ad4000", .data = &ad4000_chip_info }, + { .compatible = "adi,ad4001", .data = &ad4001_chip_info }, + { .compatible = "adi,ad4002", .data = &ad4002_chip_info }, + { .compatible = "adi,ad4003", .data = &ad4003_chip_info }, + { .compatible = "adi,ad4004", .data = &ad4004_chip_info }, + { .compatible = "adi,ad4005", .data = &ad4005_chip_info }, + { .compatible = "adi,ad4006", .data = &ad4006_chip_info }, + { .compatible = "adi,ad4007", .data = &ad4007_chip_info }, + { .compatible = "adi,ad4008", .data = &ad4008_chip_info }, + { .compatible = "adi,ad4010", .data = &ad4010_chip_info }, + { .compatible = "adi,ad4011", .data = &ad4011_chip_info }, + { .compatible = "adi,ad4020", .data = &ad4020_chip_info }, + { .compatible = "adi,ad4021", .data = &ad4021_chip_info }, + { .compatible = "adi,ad4022", .data = &ad4022_chip_info }, + { .compatible = "adi,adaq4001", .data = &adaq4001_chip_info }, + { .compatible = "adi,adaq4003", .data = &adaq4003_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(of, ad4000_of_match); + +static struct spi_driver ad4000_driver = { + .driver = { + .name = "ad4000", + .of_match_table = ad4000_of_match, + }, + .probe = ad4000_probe, + .id_table = ad4000_id, +}; +module_spi_driver(ad4000_driver); + +MODULE_AUTHOR("Marcelo Schmitt "); +MODULE_DESCRIPTION("Analog Devices AD4000 ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index e134d6497827a0..de32cc9d18c5ef 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c new file mode 100644 index 00000000000000..595ec4158e73c3 --- /dev/null +++ b/drivers/iio/adc/ad4695.c @@ -0,0 +1,1185 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI ADC driver for Analog Devices Inc. AD4695 and similar chips + * + * https://www.analog.com/en/products/ad4695.html + * https://www.analog.com/en/products/ad4696.html + * https://www.analog.com/en/products/ad4697.html + * https://www.analog.com/en/products/ad4698.html + * + * Copyright 2024 Analog Devices Inc. + * Copyright 2024 BayLibre, SAS + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* AD4695 registers */ +#define AD4695_REG_SPI_CONFIG_A 0x0000 +#define AD4695_REG_SPI_CONFIG_A_SW_RST (BIT(7) | BIT(0)) +#define AD4695_REG_SPI_CONFIG_A_ADDR_DIR BIT(5) +#define AD4695_REG_SPI_CONFIG_B 0x0001 +#define AD4695_REG_SPI_CONFIG_B_INST_MODE BIT(7) +#define AD4695_REG_DEVICE_TYPE 0x0003 +#define AD4695_REG_SCRATCH_PAD 0x000A +#define AD4695_REG_VENDOR_L 0x000C +#define AD4695_REG_VENDOR_H 0x000D +#define AD4695_REG_LOOP_MODE 0x000E +#define AD4695_REG_SPI_CONFIG_C 0x0010 +#define AD4695_REG_SPI_CONFIG_C_MB_STRICT BIT(7) +#define AD4695_REG_SPI_STATUS 0x0011 +#define AD4695_REG_STATUS 0x0014 +#define AD4695_REG_ALERT_STATUS1 0x0015 +#define AD4695_REG_ALERT_STATUS2 0x0016 +#define AD4695_REG_CLAMP_STATUS 0x001A +#define AD4695_REG_SETUP 0x0020 +#define AD4695_REG_SETUP_LDO_EN BIT(4) +#define AD4695_REG_SETUP_SPI_MODE BIT(2) +#define AD4695_REG_SETUP_SPI_CYC_CTRL BIT(1) +#define AD4695_REG_REF_CTRL 0x0021 +#define AD4695_REG_REF_CTRL_OV_MODE BIT(7) +#define AD4695_REG_REF_CTRL_VREF_SET GENMASK(4, 2) +#define AD4695_REG_REF_CTRL_REFHIZ_EN BIT(1) +#define AD4695_REG_REF_CTRL_REFBUF_EN BIT(0) +#define AD4695_REG_SEQ_CTRL 0x0022 +#define AD4695_REG_SEQ_CTRL_STD_SEQ_EN BIT(7) +#define AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS GENMASK(6, 0) +#define AD4695_REG_AC_CTRL 0x0023 +#define AD4695_REG_STD_SEQ_CONFIG 0x0024 +#define AD4695_REG_GPIO_CTRL 0x0026 +#define AD4695_REG_GP_MODE 0x0027 +#define AD4695_REG_TEMP_CTRL 0x0029 +#define AD4695_REG_TEMP_CTRL_TEMP_EN BIT(0) +#define AD4695_REG_CONFIG_IN(n) (0x0030 | (n)) +#define AD4695_REG_CONFIG_IN_MODE BIT(6) +#define AD4695_REG_CONFIG_IN_PAIR GENMASK(5, 4) +#define AD4695_REG_CONFIG_IN_AINHIGHZ_EN BIT(3) +#define AD4695_REG_UPPER_IN(n) (0x0040 | (2 * (n))) +#define AD4695_REG_LOWER_IN(n) (0x0060 | (2 * (n))) +#define AD4695_REG_HYST_IN(n) (0x0080 | (2 * (n))) +#define AD4695_REG_OFFSET_IN(n) (0x00A0 | (2 * (n))) +#define AD4695_REG_GAIN_IN(n) (0x00C0 | (2 * (n))) +#define AD4695_REG_AS_SLOT(n) (0x0100 | (n)) +#define AD4695_REG_AS_SLOT_INX GENMASK(3, 0) + +/* Conversion mode commands */ +#define AD4695_CMD_EXIT_CNV_MODE 0x0A +#define AD4695_CMD_TEMP_CHAN 0x0F +#define AD4695_CMD_VOLTAGE_CHAN(n) (0x10 | (n)) + +/* timing specs */ +#define AD4695_T_CONVERT_NS 415 +#define AD4695_T_WAKEUP_HW_MS 3 +#define AD4695_T_WAKEUP_SW_MS 3 +#define AD4695_T_REFBUF_MS 100 +#define AD4695_T_REGCONFIG_NS 20 +#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA) + +/* Max number of voltage input channels. */ +#define AD4695_MAX_CHANNELS 16 +/* Max size of 1 raw sample in bytes. */ +#define AD4695_MAX_CHANNEL_SIZE 2 + +enum ad4695_in_pair { + AD4695_IN_PAIR_REFGND, + AD4695_IN_PAIR_COM, + AD4695_IN_PAIR_EVEN_ODD, +}; + +struct ad4695_chip_info { + const char *name; + int max_sample_rate; + u32 t_acq_ns; + u8 num_voltage_inputs; +}; + +struct ad4695_channel_config { + unsigned int channel; + bool highz_en; + bool bipolar; + enum ad4695_in_pair pin_pairing; + unsigned int common_mode_mv; +}; + +struct ad4695_state { + struct spi_device *spi; + struct regmap *regmap; + struct regmap *regmap16; + struct gpio_desc *reset_gpio; + /* voltages channels plus temperature and timestamp */ + struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2]; + struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS]; + const struct ad4695_chip_info *chip_info; + /* Reference voltage. */ + unsigned int vref_mv; + /* Common mode input pin voltage. */ + unsigned int com_mv; + /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */ + struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2]; + struct spi_message buf_read_msg; + /* Raw conversion data received. */ + u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE, + sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN); + u16 raw_data; + /* Commands to send for single conversion. */ + u16 cnv_cmd; + u8 cnv_cmd2; +}; + +static const struct regmap_range ad4695_regmap_rd_ranges[] = { + regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B), + regmap_reg_range(AD4695_REG_DEVICE_TYPE, AD4695_REG_DEVICE_TYPE), + regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD), + regmap_reg_range(AD4695_REG_VENDOR_L, AD4695_REG_LOOP_MODE), + regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS), + regmap_reg_range(AD4695_REG_STATUS, AD4695_REG_ALERT_STATUS2), + regmap_reg_range(AD4695_REG_CLAMP_STATUS, AD4695_REG_CLAMP_STATUS), + regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL), + regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL), + regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)), + regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)), +}; + +static const struct regmap_access_table ad4695_regmap_rd_table = { + .yes_ranges = ad4695_regmap_rd_ranges, + .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_rd_ranges), +}; + +static const struct regmap_range ad4695_regmap_wr_ranges[] = { + regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B), + regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD), + regmap_reg_range(AD4695_REG_LOOP_MODE, AD4695_REG_LOOP_MODE), + regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS), + regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL), + regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL), + regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)), + regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)), +}; + +static const struct regmap_access_table ad4695_regmap_wr_table = { + .yes_ranges = ad4695_regmap_wr_ranges, + .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_wr_ranges), +}; + +static const struct regmap_config ad4695_regmap_config = { + .name = "ad4695-8", + .reg_bits = 16, + .val_bits = 8, + .max_register = AD4695_REG_AS_SLOT(127), + .rd_table = &ad4695_regmap_rd_table, + .wr_table = &ad4695_regmap_wr_table, + .can_multi_write = true, +}; + +static const struct regmap_range ad4695_regmap16_rd_ranges[] = { + regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG), + regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)), +}; + +static const struct regmap_access_table ad4695_regmap16_rd_table = { + .yes_ranges = ad4695_regmap16_rd_ranges, + .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_rd_ranges), +}; + +static const struct regmap_range ad4695_regmap16_wr_ranges[] = { + regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG), + regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)), +}; + +static const struct regmap_access_table ad4695_regmap16_wr_table = { + .yes_ranges = ad4695_regmap16_wr_ranges, + .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_wr_ranges), +}; + +static const struct regmap_config ad4695_regmap16_config = { + .name = "ad4695-16", + .reg_bits = 16, + .reg_stride = 2, + .val_bits = 16, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .max_register = AD4695_REG_GAIN_IN(15), + .rd_table = &ad4695_regmap16_rd_table, + .wr_table = &ad4695_regmap16_wr_table, + .can_multi_write = true, +}; + +static const struct iio_chan_spec ad4695_channel_template = { + .type = IIO_VOLTAGE, + .indexed = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_CALIBSCALE) | + BIT(IIO_CHAN_INFO_CALIBBIAS), + .info_mask_separate_available = BIT(IIO_CHAN_INFO_CALIBSCALE) | + BIT(IIO_CHAN_INFO_CALIBBIAS), + .scan_type = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + }, +}; + +static const struct iio_chan_spec ad4695_temp_channel_template = { + .address = AD4695_CMD_TEMP_CHAN, + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + .scan_type = { + .sign = 's', + .realbits = 16, + .storagebits = 16, + }, +}; + +static const struct iio_chan_spec ad4695_soft_timestamp_channel_template = + IIO_CHAN_SOFT_TIMESTAMP(0); + +static const char * const ad4695_power_supplies[] = { + "avdd", "vio" +}; + +static const struct ad4695_chip_info ad4695_chip_info = { + .name = "ad4695", + .max_sample_rate = 500 * KILO, + .t_acq_ns = 1715, + .num_voltage_inputs = 16, +}; + +static const struct ad4695_chip_info ad4696_chip_info = { + .name = "ad4696", + .max_sample_rate = 1 * MEGA, + .t_acq_ns = 715, + .num_voltage_inputs = 16, +}; + +static const struct ad4695_chip_info ad4697_chip_info = { + .name = "ad4697", + .max_sample_rate = 500 * KILO, + .t_acq_ns = 1715, + .num_voltage_inputs = 8, +}; + +static const struct ad4695_chip_info ad4698_chip_info = { + .name = "ad4698", + .max_sample_rate = 1 * MEGA, + .t_acq_ns = 715, + .num_voltage_inputs = 8, +}; + +/** + * ad4695_set_single_cycle_mode - Set the device in single cycle mode + * @st: The AD4695 state + * @channel: The first channel to read + * + * As per the datasheet, to enable single cycle mode, we need to set + * STD_SEQ_EN=0, NUM_SLOTS_AS=0 and CYC_CTRL=1 (Table 15). Setting SPI_MODE=1 + * triggers the first conversion using the channel in AS_SLOT0. + * + * Context: can sleep, must be called with iio_device_claim_direct held + * Return: 0 on success, a negative error code on failure + */ +static int ad4695_set_single_cycle_mode(struct ad4695_state *st, + unsigned int channel) +{ + int ret; + + ret = regmap_clear_bits(st->regmap, AD4695_REG_SEQ_CTRL, + AD4695_REG_SEQ_CTRL_STD_SEQ_EN | + AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS); + if (ret) + return ret; + + ret = regmap_write(st->regmap, AD4695_REG_AS_SLOT(0), + FIELD_PREP(AD4695_REG_AS_SLOT_INX, channel)); + if (ret) + return ret; + + return regmap_set_bits(st->regmap, AD4695_REG_SETUP, + AD4695_REG_SETUP_SPI_MODE | + AD4695_REG_SETUP_SPI_CYC_CTRL); +} + +/** + * ad4695_enter_advanced_sequencer_mode - Put the ADC in advanced sequencer mode + * @st: The driver state + * @n: The number of slots to use - must be >= 2, <= 128 + * + * As per the datasheet, to enable advanced sequencer, we need to set + * STD_SEQ_EN=0, NUM_SLOTS_AS=n-1 and CYC_CTRL=0 (Table 15). Setting SPI_MODE=1 + * triggers the first conversion using the channel in AS_SLOT0. + * + * Return: 0 on success, a negative error code on failure + */ +static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n) +{ + int ret; + + ret = regmap_update_bits(st->regmap, AD4695_REG_SEQ_CTRL, + AD4695_REG_SEQ_CTRL_STD_SEQ_EN | + AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS, + FIELD_PREP(AD4695_REG_SEQ_CTRL_STD_SEQ_EN, 0) | + FIELD_PREP(AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS, n - 1)); + if (ret) + return ret; + + return regmap_update_bits(st->regmap, AD4695_REG_SETUP, + AD4695_REG_SETUP_SPI_MODE | AD4695_REG_SETUP_SPI_CYC_CTRL, + FIELD_PREP(AD4695_REG_SETUP_SPI_MODE, 1) | + FIELD_PREP(AD4695_REG_SETUP_SPI_CYC_CTRL, 0)); +} + +/** + * ad4695_exit_conversion_mode - Exit conversion mode + * @st: The AD4695 state + * + * Sends SPI command to exit conversion mode. + * + * Return: 0 on success, a negative error code on failure + */ +static int ad4695_exit_conversion_mode(struct ad4695_state *st) +{ + struct spi_transfer xfer = { + .tx_buf = &st->cnv_cmd2, + .len = 1, + .delay.value = AD4695_T_REGCONFIG_NS, + .delay.unit = SPI_DELAY_UNIT_NSECS, + }; + + /* + * Technically, could do a 5-bit transfer, but shifting to start of + * 8 bits instead for better SPI controller support. + */ + st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3; + + return spi_sync_transfer(st->spi, &xfer, 1); +} + +static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv) +{ + u8 val; + + if (vref_mv >= 2400 && vref_mv <= 2750) + val = 0; + else if (vref_mv > 2750 && vref_mv <= 3250) + val = 1; + else if (vref_mv > 3250 && vref_mv <= 3750) + val = 2; + else if (vref_mv > 3750 && vref_mv <= 4500) + val = 3; + else if (vref_mv > 4500 && vref_mv <= 5100) + val = 4; + else + return -EINVAL; + + return regmap_update_bits(st->regmap, AD4695_REG_REF_CTRL, + AD4695_REG_REF_CTRL_VREF_SET, + FIELD_PREP(AD4695_REG_REF_CTRL_VREF_SET, val)); +} + +static int ad4695_write_chn_cfg(struct ad4695_state *st, + struct ad4695_channel_config *cfg) +{ + u32 mask, val; + + mask = AD4695_REG_CONFIG_IN_MODE; + val = FIELD_PREP(AD4695_REG_CONFIG_IN_MODE, cfg->bipolar ? 1 : 0); + + mask |= AD4695_REG_CONFIG_IN_PAIR; + val |= FIELD_PREP(AD4695_REG_CONFIG_IN_PAIR, cfg->pin_pairing); + + mask |= AD4695_REG_CONFIG_IN_AINHIGHZ_EN; + val |= FIELD_PREP(AD4695_REG_CONFIG_IN_AINHIGHZ_EN, + cfg->highz_en ? 1 : 0); + + return regmap_update_bits(st->regmap, + AD4695_REG_CONFIG_IN(cfg->channel), + mask, val); +} + +static int ad4695_buffer_preenable(struct iio_dev *indio_dev) +{ + struct ad4695_state *st = iio_priv(indio_dev); + struct spi_transfer *xfer; + u8 temp_chan_bit = st->chip_info->num_voltage_inputs; + u32 bit, num_xfer, num_slots; + u32 temp_en = 0; + int ret; + + /* + * We are using the advanced sequencer since it is the only way to read + * multiple channels that allows individual configuration of each + * voltage input channel. Slot 0 in the advanced sequencer is used to + * account for the gap between trigger polls - we don't read data from + * this slot. Each enabled voltage channel is assigned a slot starting + * with slot 1. + */ + num_slots = 1; + + memset(st->buf_read_xfer, 0, sizeof(st->buf_read_xfer)); + + /* First xfer is only to trigger conversion of slot 1, so no rx. */ + xfer = &st->buf_read_xfer[0]; + xfer->cs_change = 1; + xfer->delay.value = st->chip_info->t_acq_ns; + xfer->delay.unit = SPI_DELAY_UNIT_NSECS; + xfer->cs_change_delay.value = AD4695_T_CONVERT_NS; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + num_xfer = 1; + + iio_for_each_active_channel(indio_dev, bit) { + xfer = &st->buf_read_xfer[num_xfer]; + xfer->bits_per_word = 16; + xfer->rx_buf = &st->buf[(num_xfer - 1) * 2]; + xfer->len = 2; + xfer->cs_change = 1; + xfer->cs_change_delay.value = AD4695_T_CONVERT_NS; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + + if (bit == temp_chan_bit) { + temp_en = 1; + } else { + ret = regmap_write(st->regmap, + AD4695_REG_AS_SLOT(num_slots), + FIELD_PREP(AD4695_REG_AS_SLOT_INX, bit)); + if (ret) + return ret; + + num_slots++; + } + + num_xfer++; + } + + /* + * The advanced sequencer requires that at least 2 slots are enabled. + * Since slot 0 is always used for other purposes, we need only 1 + * enabled voltage channel to meet this requirement. If the temperature + * channel is the only enabled channel, we need to add one more slot + * in the sequence but not read from it. + */ + if (num_slots < 2) { + /* move last xfer so we can insert one more xfer before it */ + st->buf_read_xfer[num_xfer] = *xfer; + num_xfer++; + + /* modify 2nd to last xfer for extra slot */ + memset(xfer, 0, sizeof(*xfer)); + xfer->cs_change = 1; + xfer->delay.value = st->chip_info->t_acq_ns; + xfer->delay.unit = SPI_DELAY_UNIT_NSECS; + xfer->cs_change_delay.value = AD4695_T_CONVERT_NS; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + xfer++; + + /* and add the extra slot in the sequencer */ + ret = regmap_write(st->regmap, + AD4695_REG_AS_SLOT(num_slots), + FIELD_PREP(AD4695_REG_AS_SLOT_INX, 0)); + if (ret) + return ret; + + num_slots++; + } + + /* + * Don't keep CS asserted after last xfer. Also triggers conversion of + * slot 0. + */ + xfer->cs_change = 0; + + /* + * Temperature channel isn't included in the sequence, but rather + * controlled by setting a bit in the TEMP_CTRL register. + */ + + ret = regmap_update_bits(st->regmap, AD4695_REG_TEMP_CTRL, + AD4695_REG_TEMP_CTRL_TEMP_EN, + FIELD_PREP(AD4695_REG_TEMP_CTRL_TEMP_EN, temp_en)); + if (ret) + return ret; + + spi_message_init_with_transfers(&st->buf_read_msg, st->buf_read_xfer, + num_xfer); + + ret = spi_optimize_message(st->spi, &st->buf_read_msg); + if (ret) + return ret; + + /* This triggers conversion of slot 0. */ + ret = ad4695_enter_advanced_sequencer_mode(st, num_slots); + if (ret) + spi_unoptimize_message(&st->buf_read_msg); + + return ret; +} + +static int ad4695_buffer_postdisable(struct iio_dev *indio_dev) +{ + struct ad4695_state *st = iio_priv(indio_dev); + int ret; + + ret = ad4695_exit_conversion_mode(st); + if (ret) + return ret; + + spi_unoptimize_message(&st->buf_read_msg); + + return 0; +} + +static const struct iio_buffer_setup_ops ad4695_buffer_setup_ops = { + .preenable = ad4695_buffer_preenable, + .postdisable = ad4695_buffer_postdisable, +}; + +static irqreturn_t ad4695_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ad4695_state *st = iio_priv(indio_dev); + int ret; + + ret = spi_sync(st->spi, &st->buf_read_msg); + if (ret) + goto out; + + iio_push_to_buffers_with_timestamp(indio_dev, st->buf, pf->timestamp); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +/** + * ad4695_read_one_sample - Read a single sample using single-cycle mode + * @st: The AD4695 state + * @address: The address of the channel to read + * + * Upon successful return, the sample will be stored in `st->raw_data`. + * + * Context: can sleep, must be called with iio_device_claim_direct held + * Return: 0 on success, a negative error code on failure + */ +static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address) +{ + struct spi_transfer xfer[2] = { }; + int ret, i = 0; + + ret = ad4695_set_single_cycle_mode(st, address); + if (ret) + return ret; + + /* + * Setting the first channel to the temperature channel isn't supported + * in single-cycle mode, so we have to do an extra xfer to read the + * temperature. + */ + if (address == AD4695_CMD_TEMP_CHAN) { + /* We aren't reading, so we can make this a short xfer. */ + st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3; + xfer[0].tx_buf = &st->cnv_cmd2; + xfer[0].len = 1; + xfer[0].cs_change = 1; + xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS; + xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + + i = 1; + } + + /* Then read the result and exit conversion mode. */ + st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11; + xfer[i].bits_per_word = 16; + xfer[i].tx_buf = &st->cnv_cmd; + xfer[i].rx_buf = &st->raw_data; + xfer[i].len = 2; + + return spi_sync_transfer(st->spi, xfer, i + 1); +} + +static int ad4695_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct ad4695_state *st = iio_priv(indio_dev); + struct ad4695_channel_config *cfg = &st->channels_cfg[chan->scan_index]; + u8 realbits = chan->scan_type.realbits; + unsigned int reg_val; + int ret, tmp; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + ret = ad4695_read_one_sample(st, chan->address); + if (ret) + return ret; + + if (chan->scan_type.sign == 's') + *val = sign_extend32(st->raw_data, realbits - 1); + else + *val = st->raw_data; + + return IIO_VAL_INT; + } + unreachable(); + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + *val = st->vref_mv; + *val2 = chan->scan_type.realbits; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_TEMP: + /* T_scale (°C) = raw * V_REF (mV) / (-1.8 mV/°C * 2^16) */ + *val = st->vref_mv * -556; + *val2 = 16; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_VOLTAGE: + if (cfg->pin_pairing == AD4695_IN_PAIR_COM) + *val = st->com_mv * (1 << realbits) / st->vref_mv; + else if (cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD) + *val = cfg->common_mode_mv * (1 << realbits) / st->vref_mv; + else + *val = 0; + + return IIO_VAL_INT; + case IIO_TEMP: + /* T_offset (°C) = -725 mV / (-1.8 mV/°C) */ + /* T_offset (raw) = T_offset (°C) * (-1.8 mV/°C) * 2^16 / V_REF (mV) */ + *val = -47513600; + *val2 = st->vref_mv; + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_CALIBSCALE: + switch (chan->type) { + case IIO_VOLTAGE: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + ret = regmap_read(st->regmap16, + AD4695_REG_GAIN_IN(chan->scan_index), + ®_val); + if (ret) + return ret; + + *val = reg_val; + *val2 = 15; + + return IIO_VAL_FRACTIONAL_LOG2; + } + unreachable(); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_VOLTAGE: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + ret = regmap_read(st->regmap16, + AD4695_REG_OFFSET_IN(chan->scan_index), + ®_val); + if (ret) + return ret; + + tmp = sign_extend32(reg_val, 15); + + *val = tmp / 4; + *val2 = abs(tmp) % 4 * MICRO / 4; + + if (tmp < 0 && *val2) { + *val *= -1; + *val2 *= -1; + } + + return IIO_VAL_INT_PLUS_MICRO; + } + unreachable(); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int ad4695_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct ad4695_state *st = iio_priv(indio_dev); + unsigned int reg_val; + + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + switch (mask) { + case IIO_CHAN_INFO_CALIBSCALE: + switch (chan->type) { + case IIO_VOLTAGE: + if (val < 0 || val2 < 0) + reg_val = 0; + else if (val > 1) + reg_val = U16_MAX; + else + reg_val = (val * (1 << 16) + + mul_u64_u32_div(val2, 1 << 16, + MICRO)) / 2; + + return regmap_write(st->regmap16, + AD4695_REG_GAIN_IN(chan->scan_index), + reg_val); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_VOLTAGE: + if (val2 >= 0 && val > S16_MAX / 4) + reg_val = S16_MAX; + else if ((val2 < 0 ? -val : val) < S16_MIN / 4) + reg_val = S16_MIN; + else if (val2 < 0) + reg_val = clamp_t(int, + -(val * 4 + -val2 * 4 / MICRO), + S16_MIN, S16_MAX); + else if (val < 0) + reg_val = clamp_t(int, + val * 4 - val2 * 4 / MICRO, + S16_MIN, S16_MAX); + else + reg_val = clamp_t(int, + val * 4 + val2 * 4 / MICRO, + S16_MIN, S16_MAX); + + return regmap_write(st->regmap16, + AD4695_REG_OFFSET_IN(chan->scan_index), + reg_val); + default: + return -EINVAL; + } + default: + return -EINVAL; + } + } + unreachable(); +} + +static int ad4695_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + static const int ad4695_calibscale_available[6] = { + /* Range of 0 (inclusive) to 2 (exclusive) */ + 0, 15, 1, 15, U16_MAX, 15 + }; + static const int ad4695_calibbias_available[6] = { + /* + * Datasheet says FSR/8 which translates to signed/4. The step + * depends on oversampling ratio which is always 1 for now. + */ + S16_MIN / 4, 0, 0, MICRO / 4, S16_MAX / 4, S16_MAX % 4 * MICRO / 4 + }; + + switch (mask) { + case IIO_CHAN_INFO_CALIBSCALE: + switch (chan->type) { + case IIO_VOLTAGE: + *vals = ad4695_calibscale_available; + *type = IIO_VAL_FRACTIONAL_LOG2; + return IIO_AVAIL_RANGE; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_VOLTAGE: + *vals = ad4695_calibbias_available; + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_RANGE; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int ad4695_debugfs_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int writeval, + unsigned int *readval) +{ + struct ad4695_state *st = iio_priv(indio_dev); + + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + if (readval) { + if (regmap_check_range_table(st->regmap, reg, + &ad4695_regmap_rd_table)) + return regmap_read(st->regmap, reg, readval); + if (regmap_check_range_table(st->regmap16, reg, + &ad4695_regmap16_rd_table)) + return regmap_read(st->regmap16, reg, readval); + } else { + if (regmap_check_range_table(st->regmap, reg, + &ad4695_regmap_wr_table)) + return regmap_write(st->regmap, reg, writeval); + if (regmap_check_range_table(st->regmap16, reg, + &ad4695_regmap16_wr_table)) + return regmap_write(st->regmap16, reg, writeval); + } + } + + return -EINVAL; +} + +static const struct iio_info ad4695_info = { + .read_raw = &ad4695_read_raw, + .write_raw = &ad4695_write_raw, + .read_avail = &ad4695_read_avail, + .debugfs_reg_access = &ad4695_debugfs_reg_access, +}; + +static int ad4695_parse_channel_cfg(struct ad4695_state *st) +{ + struct device *dev = &st->spi->dev; + struct ad4695_channel_config *chan_cfg; + struct iio_chan_spec *iio_chan; + int ret, i; + + /* populate defaults */ + for (i = 0; i < st->chip_info->num_voltage_inputs; i++) { + chan_cfg = &st->channels_cfg[i]; + iio_chan = &st->iio_chan[i]; + + chan_cfg->highz_en = true; + chan_cfg->channel = i; + + *iio_chan = ad4695_channel_template; + iio_chan->channel = i; + iio_chan->scan_index = i; + iio_chan->address = AD4695_CMD_VOLTAGE_CHAN(i); + } + + /* modify based on firmware description */ + device_for_each_child_node_scoped(dev, child) { + u32 reg, val; + + ret = fwnode_property_read_u32(child, "reg", ®); + if (ret) + return dev_err_probe(dev, ret, + "failed to read reg property (%s)\n", + fwnode_get_name(child)); + + if (reg >= st->chip_info->num_voltage_inputs) + return dev_err_probe(dev, -EINVAL, + "reg out of range (%s)\n", + fwnode_get_name(child)); + + iio_chan = &st->iio_chan[reg]; + chan_cfg = &st->channels_cfg[reg]; + + chan_cfg->highz_en = + !fwnode_property_read_bool(child, "adi,no-high-z"); + chan_cfg->bipolar = fwnode_property_read_bool(child, "bipolar"); + + ret = fwnode_property_read_u32(child, "common-mode-channel", + &val); + if (ret && ret != -EINVAL) + return dev_err_probe(dev, ret, + "failed to read common-mode-channel (%s)\n", + fwnode_get_name(child)); + + if (ret == -EINVAL || val == AD4695_COMMON_MODE_REFGND) + chan_cfg->pin_pairing = AD4695_IN_PAIR_REFGND; + else if (val == AD4695_COMMON_MODE_COM) + chan_cfg->pin_pairing = AD4695_IN_PAIR_COM; + else + chan_cfg->pin_pairing = AD4695_IN_PAIR_EVEN_ODD; + + if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD && + val % 2 == 0) + return dev_err_probe(dev, -EINVAL, + "common-mode-channel must be odd number (%s)\n", + fwnode_get_name(child)); + + if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD && + val != reg + 1) + return dev_err_probe(dev, -EINVAL, + "common-mode-channel must be next consecutive channel (%s)\n", + fwnode_get_name(child)); + + if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD) { + char name[5]; + + snprintf(name, sizeof(name), "in%d", reg + 1); + + ret = devm_regulator_get_enable_read_voltage(dev, name); + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to get %s voltage (%s)\n", + name, fwnode_get_name(child)); + + chan_cfg->common_mode_mv = ret / 1000; + } + + if (chan_cfg->bipolar && + chan_cfg->pin_pairing == AD4695_IN_PAIR_REFGND) + return dev_err_probe(dev, -EINVAL, + "bipolar mode is not available for inputs paired with REFGND (%s).\n", + fwnode_get_name(child)); + + if (chan_cfg->bipolar) + iio_chan->scan_type.sign = 's'; + + ret = ad4695_write_chn_cfg(st, chan_cfg); + if (ret) + return ret; + } + + /* Temperature channel must be next scan index after voltage channels. */ + st->iio_chan[i] = ad4695_temp_channel_template; + st->iio_chan[i].scan_index = i; + i++; + + st->iio_chan[i] = ad4695_soft_timestamp_channel_template; + st->iio_chan[i].scan_index = i; + + return 0; +} + +static int ad4695_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct ad4695_state *st; + struct iio_dev *indio_dev; + struct gpio_desc *cnv_gpio; + bool use_internal_ldo_supply; + bool use_internal_ref_buffer; + int ret; + + cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW); + if (IS_ERR(cnv_gpio)) + return dev_err_probe(dev, PTR_ERR(cnv_gpio), + "Failed to get CNV GPIO\n"); + + /* Driver currently requires CNV pin to be connected to SPI CS */ + if (cnv_gpio) + return dev_err_probe(dev, -ENODEV, + "CNV GPIO is not supported\n"); + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + st->spi = spi; + + st->chip_info = spi_get_device_match_data(spi); + if (!st->chip_info) + return -EINVAL; + + /* Registers cannot be read at the max allowable speed */ + spi->max_speed_hz = AD4695_REG_ACCESS_SCLK_HZ; + + st->regmap = devm_regmap_init_spi(spi, &ad4695_regmap_config); + if (IS_ERR(st->regmap)) + return dev_err_probe(dev, PTR_ERR(st->regmap), + "Failed to initialize regmap\n"); + + st->regmap16 = devm_regmap_init_spi(spi, &ad4695_regmap16_config); + if (IS_ERR(st->regmap16)) + return dev_err_probe(dev, PTR_ERR(st->regmap16), + "Failed to initialize regmap16\n"); + + ret = devm_regulator_bulk_get_enable(dev, + ARRAY_SIZE(ad4695_power_supplies), + ad4695_power_supplies); + if (ret) + return dev_err_probe(dev, ret, + "Failed to enable power supplies\n"); + + /* If LDO_IN supply is present, then we are using internal LDO. */ + ret = devm_regulator_get_enable_optional(dev, "ldo-in"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, + "Failed to enable LDO_IN supply\n"); + + use_internal_ldo_supply = ret == 0; + + if (!use_internal_ldo_supply) { + /* Otherwise we need an external VDD supply. */ + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to enable VDD supply\n"); + } + + /* If REFIN supply is given, then we are using internal buffer */ + ret = devm_regulator_get_enable_read_voltage(dev, "refin"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, "Failed to get REFIN voltage\n"); + + if (ret != -ENODEV) { + st->vref_mv = ret / 1000; + use_internal_ref_buffer = true; + } else { + /* Otherwise, we need an external reference. */ + ret = devm_regulator_get_enable_read_voltage(dev, "ref"); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to get REF voltage\n"); + + st->vref_mv = ret / 1000; + use_internal_ref_buffer = false; + } + + ret = devm_regulator_get_enable_read_voltage(dev, "com"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, "Failed to get COM voltage\n"); + + st->com_mv = ret == -ENODEV ? 0 : ret / 1000; + + /* + * Reset the device using hardware reset if available or fall back to + * software reset. + */ + + st->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(st->reset_gpio)) + return PTR_ERR(st->reset_gpio); + + if (st->reset_gpio) { + gpiod_set_value(st->reset_gpio, 0); + msleep(AD4695_T_WAKEUP_HW_MS); + } else { + ret = regmap_write(st->regmap, AD4695_REG_SPI_CONFIG_A, + AD4695_REG_SPI_CONFIG_A_SW_RST); + if (ret) + return ret; + + msleep(AD4695_T_WAKEUP_SW_MS); + } + + /* Needed for regmap16 to be able to work correctly. */ + ret = regmap_set_bits(st->regmap, AD4695_REG_SPI_CONFIG_A, + AD4695_REG_SPI_CONFIG_A_ADDR_DIR); + if (ret) + return ret; + + /* Disable internal LDO if it isn't needed. */ + ret = regmap_update_bits(st->regmap, AD4695_REG_SETUP, + AD4695_REG_SETUP_LDO_EN, + FIELD_PREP(AD4695_REG_SETUP_LDO_EN, + use_internal_ldo_supply ? 1 : 0)); + if (ret) + return ret; + + /* configure reference supply */ + + if (device_property_present(dev, "adi,no-ref-current-limit")) { + ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL, + AD4695_REG_REF_CTRL_OV_MODE); + if (ret) + return ret; + } + + if (device_property_present(dev, "adi,no-ref-high-z")) { + if (use_internal_ref_buffer) + return dev_err_probe(dev, -EINVAL, + "Cannot disable high-Z mode for internal reference buffer\n"); + + ret = regmap_clear_bits(st->regmap, AD4695_REG_REF_CTRL, + AD4695_REG_REF_CTRL_REFHIZ_EN); + if (ret) + return ret; + } + + ret = ad4695_set_ref_voltage(st, st->vref_mv); + if (ret) + return ret; + + if (use_internal_ref_buffer) { + ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL, + AD4695_REG_REF_CTRL_REFBUF_EN); + if (ret) + return ret; + + /* Give the capacitor some time to charge up. */ + msleep(AD4695_T_REFBUF_MS); + } + + ret = ad4695_parse_channel_cfg(st); + if (ret) + return ret; + + indio_dev->name = st->chip_info->name; + indio_dev->info = &ad4695_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = st->iio_chan; + indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2; + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + iio_pollfunc_store_time, + ad4695_trigger_handler, + &ad4695_buffer_setup_ops); + if (ret) + return ret; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct spi_device_id ad4695_spi_id_table[] = { + { .name = "ad4695", .driver_data = (kernel_ulong_t)&ad4695_chip_info }, + { .name = "ad4696", .driver_data = (kernel_ulong_t)&ad4696_chip_info }, + { .name = "ad4697", .driver_data = (kernel_ulong_t)&ad4697_chip_info }, + { .name = "ad4698", .driver_data = (kernel_ulong_t)&ad4698_chip_info }, + { } +}; +MODULE_DEVICE_TABLE(spi, ad4695_spi_id_table); + +static const struct of_device_id ad4695_of_match_table[] = { + { .compatible = "adi,ad4695", .data = &ad4695_chip_info, }, + { .compatible = "adi,ad4696", .data = &ad4696_chip_info, }, + { .compatible = "adi,ad4697", .data = &ad4697_chip_info, }, + { .compatible = "adi,ad4698", .data = &ad4698_chip_info, }, + { } +}; +MODULE_DEVICE_TABLE(of, ad4695_of_match_table); + +static struct spi_driver ad4695_driver = { + .driver = { + .name = "ad4695", + .of_match_table = ad4695_of_match_table, + }, + .probe = ad4695_probe, + .id_table = ad4695_spi_id_table, +}; +module_spi_driver(ad4695_driver); + +MODULE_AUTHOR("Ramona Gradinariu "); +MODULE_AUTHOR("David Lechner "); +MODULE_DESCRIPTION("Analog Devices AD4695 ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c index a75837334157da..1b59708abf3076 100644 --- a/drivers/iio/adc/ad7091r5.c +++ b/drivers/iio/adc/ad7091r5.c @@ -112,13 +112,13 @@ static int ad7091r5_i2c_probe(struct i2c_client *i2c) static const struct of_device_id ad7091r5_dt_ids[] = { { .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info }, - {}, + { } }; MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids); static const struct i2c_device_id ad7091r5_i2c_ids[] = { - {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info }, - {} + { "ad7091r5", (kernel_ulong_t)&ad7091r5_init_info }, + { } }; MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids); diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c index 70056430505752..c9e014d6a77cae 100644 --- a/drivers/iio/adc/ad7091r8.c +++ b/drivers/iio/adc/ad7091r8.c @@ -159,7 +159,7 @@ static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg, return spi_write(spi, &st->tx_buf, 2); } -static struct regmap_bus ad7091r8_regmap_bus = { +static const struct regmap_bus ad7091r8_regmap_bus = { .reg_read = ad7091r_regmap_bus_reg_read, .reg_write = ad7091r_regmap_bus_reg_write, .reg_format_endian_default = REGMAP_ENDIAN_BIG, diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 108e9ccab1ef06..a5d91933f505b3 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -382,8 +382,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe cfg->vref_mv = 2500; st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK; st->adc_control |= AD7124_ADC_CTRL_REF_EN(1); - return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, - 2, st->adc_control); + return 0; default: dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel); return -EINVAL; @@ -401,24 +400,17 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co tmp = (cfg->buf_positive << 1) + cfg->buf_negative; val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) | - AD7124_CONFIG_IN_BUFF(tmp); - ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val); - if (ret < 0) - return ret; + AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits); - tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type); - ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_TYPE_MSK, - tmp, 3); - if (ret < 0) - return ret; - - ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_FS_MSK, - AD7124_FILTER_FS(cfg->odr_sel_bits), 3); + ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val); if (ret < 0) return ret; - return ad7124_spi_write_mask(st, AD7124_CONFIG(cfg->cfg_slot), AD7124_CONFIG_PGA_MSK, - AD7124_CONFIG_PGA(cfg->pga_bits), 2); + tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type) | + AD7124_FILTER_FS(cfg->odr_sel_bits); + return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), + AD7124_FILTER_TYPE_MSK | AD7124_FILTER_FS_MSK, + tmp, 3); } static struct ad7124_channel_config *ad7124_pop_config(struct ad7124_state *st) @@ -907,9 +899,9 @@ static int ad7124_setup(struct ad7124_state *st) /* Set the power mode */ st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK; st->adc_control |= AD7124_ADC_CTRL_PWR(power_mode); - ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control); - if (ret < 0) - return ret; + + st->adc_control &= ~AD7124_ADC_CTRL_MODE_MSK; + st->adc_control |= AD7124_ADC_CTRL_MODE(AD_SD_MODE_IDLE); mutex_init(&st->cfgs_lock); INIT_KFIFO(st->live_cfgs_fifo); @@ -927,6 +919,10 @@ static int ad7124_setup(struct ad7124_state *st) ad7124_set_channel_odr(st, i, 10); } + ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control); + if (ret < 0) + return ret; + return ret; } @@ -1016,14 +1012,14 @@ static const struct of_device_id ad7124_of_match[] = { .data = &ad7124_chip_info_tbl[ID_AD7124_4], }, { .compatible = "adi,ad7124-8", .data = &ad7124_chip_info_tbl[ID_AD7124_8], }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7124_of_match); static const struct spi_device_id ad71124_ids[] = { { "ad7124-4", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_4] }, { "ad7124-8", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_8] }, - {} + { } }; MODULE_DEVICE_TABLE(spi, ad71124_ids); diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 334ab90991d4c0..7042ddfdfc03ee 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -201,6 +202,7 @@ struct ad7192_chip_info { struct ad7192_state { const struct ad7192_chip_info *chip_info; struct clk *mclk; + struct clk_hw int_clk_hw; u16 int_vref_mv; u32 aincom_mv; u32 fclk; @@ -284,7 +286,7 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = { &ad7192_syscalib_mode_enum), IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE, &ad7192_syscalib_mode_enum), - {} + { } }; static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd) @@ -396,25 +398,162 @@ static inline bool ad7192_valid_external_frequency(u32 freq) freq <= AD7192_EXT_FREQ_MHZ_MAX); } -static int ad7192_clock_select(struct ad7192_state *st) +/* + * Position 0 of ad7192_clock_names, xtal, corresponds to clock source + * configuration AD7192_CLK_EXT_MCLK1_2 and position 1, mclk, corresponds to + * AD7192_CLK_EXT_MCLK2 + */ +static const char *const ad7192_clock_names[] = { + "xtal", + "mclk" +}; + +static struct ad7192_state *clk_hw_to_ad7192(struct clk_hw *hw) +{ + return container_of(hw, struct ad7192_state, int_clk_hw); +} + +static unsigned long ad7192_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return AD7192_INT_FREQ_MHZ; +} + +static int ad7192_clk_output_is_enabled(struct clk_hw *hw) +{ + struct ad7192_state *st = clk_hw_to_ad7192(hw); + + return st->clock_sel == AD7192_CLK_INT_CO; +} + +static int ad7192_clk_prepare(struct clk_hw *hw) +{ + struct ad7192_state *st = clk_hw_to_ad7192(hw); + int ret; + + st->mode &= ~AD7192_MODE_CLKSRC_MASK; + st->mode |= AD7192_CLK_INT_CO; + + ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); + if (ret) + return ret; + + st->clock_sel = AD7192_CLK_INT_CO; + + return 0; +} + +static void ad7192_clk_unprepare(struct clk_hw *hw) +{ + struct ad7192_state *st = clk_hw_to_ad7192(hw); + int ret; + + st->mode &= ~AD7192_MODE_CLKSRC_MASK; + st->mode |= AD7192_CLK_INT; + + ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); + if (ret) + return; + + st->clock_sel = AD7192_CLK_INT; +} + +static const struct clk_ops ad7192_int_clk_ops = { + .recalc_rate = ad7192_clk_recalc_rate, + .is_enabled = ad7192_clk_output_is_enabled, + .prepare = ad7192_clk_prepare, + .unprepare = ad7192_clk_unprepare, +}; + +static int ad7192_register_clk_provider(struct ad7192_state *st) { struct device *dev = &st->sd.spi->dev; - unsigned int clock_sel; + struct clk_init_data init = {}; + int ret; - clock_sel = AD7192_CLK_INT; + if (!IS_ENABLED(CONFIG_COMMON_CLK)) + return 0; - /* use internal clock */ - if (!st->mclk) { - if (device_property_read_bool(dev, "adi,int-clock-output-enable")) - clock_sel = AD7192_CLK_INT_CO; - } else { - if (device_property_read_bool(dev, "adi,clock-xtal")) - clock_sel = AD7192_CLK_EXT_MCLK1_2; - else - clock_sel = AD7192_CLK_EXT_MCLK2; + if (!device_property_present(dev, "#clock-cells")) + return 0; + + init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-clk", + fwnode_get_name(dev_fwnode(dev))); + if (!init.name) + return -ENOMEM; + + init.ops = &ad7192_int_clk_ops; + + st->int_clk_hw.init = &init; + ret = devm_clk_hw_register(dev, &st->int_clk_hw); + if (ret) + return ret; + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + &st->int_clk_hw); +} + +static int ad7192_clock_setup(struct ad7192_state *st) +{ + struct device *dev = &st->sd.spi->dev; + int ret; + + /* + * The following two if branches are kept for backward compatibility but + * the use of the two devicetree properties is highly discouraged. Clock + * configuration should be done according to the bindings. + */ + + if (device_property_read_bool(dev, "adi,int-clock-output-enable")) { + st->clock_sel = AD7192_CLK_INT_CO; + st->fclk = AD7192_INT_FREQ_MHZ; + dev_warn(dev, "Property adi,int-clock-output-enable is deprecated! Check bindings!\n"); + return 0; + } + + if (device_property_read_bool(dev, "adi,clock-xtal")) { + st->clock_sel = AD7192_CLK_EXT_MCLK1_2; + st->mclk = devm_clk_get_enabled(dev, "mclk"); + if (IS_ERR(st->mclk)) + return dev_err_probe(dev, PTR_ERR(st->mclk), + "Failed to get mclk\n"); + + st->fclk = clk_get_rate(st->mclk); + if (!ad7192_valid_external_frequency(st->fclk)) + return dev_err_probe(dev, -EINVAL, + "External clock frequency out of bounds\n"); + + dev_warn(dev, "Property adi,clock-xtal is deprecated! Check bindings!\n"); + return 0; + } + + ret = device_property_match_property_string(dev, "clock-names", + ad7192_clock_names, + ARRAY_SIZE(ad7192_clock_names)); + if (ret < 0) { + st->clock_sel = AD7192_CLK_INT; + st->fclk = AD7192_INT_FREQ_MHZ; + + ret = ad7192_register_clk_provider(st); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clock provider\n"); + return 0; } - return clock_sel; + st->clock_sel = AD7192_CLK_EXT_MCLK1_2 + ret; + + st->mclk = devm_clk_get_enabled(dev, ad7192_clock_names[ret]); + if (IS_ERR(st->mclk)) + return dev_err_probe(dev, PTR_ERR(st->mclk), + "Failed to get clock source\n"); + + st->fclk = clk_get_rate(st->mclk); + if (!ad7192_valid_external_frequency(st->fclk)) + return dev_err_probe(dev, -EINVAL, + "External clock frequency out of bounds\n"); + + return 0; } static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev) @@ -1275,21 +1414,9 @@ static int ad7192_probe(struct spi_device *spi) if (ret) return ret; - st->fclk = AD7192_INT_FREQ_MHZ; - - st->mclk = devm_clk_get_optional_enabled(dev, "mclk"); - if (IS_ERR(st->mclk)) - return PTR_ERR(st->mclk); - - st->clock_sel = ad7192_clock_select(st); - - if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 || - st->clock_sel == AD7192_CLK_EXT_MCLK2) { - st->fclk = clk_get_rate(st->mclk); - if (!ad7192_valid_external_frequency(st->fclk)) - return dev_err_probe(dev, -EINVAL, - "External clock frequency out of bounds\n"); - } + ret = ad7192_clock_setup(st); + if (ret) + return ret; ret = ad7192_setup(indio_dev, dev); if (ret) @@ -1304,7 +1431,7 @@ static const struct of_device_id ad7192_of_match[] = { { .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] }, { .compatible = "adi,ad7194", .data = &ad7192_chip_info_tbl[ID_AD7194] }, { .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] }, - {} + { } }; MODULE_DEVICE_TABLE(of, ad7192_of_match); @@ -1314,7 +1441,7 @@ static const struct spi_device_id ad7192_ids[] = { { "ad7193", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7193] }, { "ad7194", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7194] }, { "ad7195", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7195] }, - {} + { } }; MODULE_DEVICE_TABLE(spi, ad7192_ids); diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index bdac020045b48d..7949b076fb87ea 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -123,7 +123,8 @@ static int ad7266_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct ad7266_state *st = iio_priv(indio_dev); - unsigned int nr = find_first_bit(scan_mask, indio_dev->masklength); + unsigned int nr = find_first_bit(scan_mask, + iio_get_masklength(indio_dev)); ad7266_select_input(st, nr); @@ -456,8 +457,8 @@ static int ad7266_probe(struct spi_device *spi) } static const struct spi_device_id ad7266_id[] = { - {"ad7265", 0}, - {"ad7266", 0}, + { "ad7265", 0 }, + { "ad7266", 0 }, { } }; MODULE_DEVICE_TABLE(spi, ad7266_id); diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c index d4a4e15c82447c..35aa39fe4bde62 100644 --- a/drivers/iio/adc/ad7280a.c +++ b/drivers/iio/adc/ad7280a.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -803,16 +804,16 @@ static irqreturn_t ad7280_event_handler(int irq, void *private) { struct iio_dev *indio_dev = private; struct ad7280_state *st = iio_priv(indio_dev); - unsigned int *channels; int i, ret; - channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL); + unsigned int *channels __free(kfree) = kcalloc(st->scan_cnt, sizeof(*channels), + GFP_KERNEL); if (!channels) return IRQ_HANDLED; ret = ad7280_read_all_channels(st, st->scan_cnt, channels); if (ret < 0) - goto out; + return IRQ_HANDLED; for (i = 0; i < st->scan_cnt; i++) { unsigned int val; @@ -852,9 +853,6 @@ static irqreturn_t ad7280_event_handler(int irq, void *private) } } -out: - kfree(channels); - return IRQ_HANDLED; } @@ -1092,8 +1090,8 @@ static int ad7280_probe(struct spi_device *spi) } static const struct spi_device_id ad7280_id[] = { - {"ad7280a", 0}, - {} + { "ad7280a", 0 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7280_id); diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c index b59b2a51623c39..4c7f887adbbf2f 100644 --- a/drivers/iio/adc/ad7291.c +++ b/drivers/iio/adc/ad7291.c @@ -537,14 +537,14 @@ static int ad7291_probe(struct i2c_client *client) static const struct i2c_device_id ad7291_id[] = { { "ad7291" }, - {} + { } }; MODULE_DEVICE_TABLE(i2c, ad7291_id); static const struct of_device_id ad7291_of_match[] = { { .compatible = "adi,ad7291" }, - {} + { } }; MODULE_DEVICE_TABLE(of, ad7291_of_match); diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c index ede80f380911b6..a398973f313d8a 100644 --- a/drivers/iio/adc/ad7292.c +++ b/drivers/iio/adc/ad7292.c @@ -301,13 +301,13 @@ static int ad7292_probe(struct spi_device *spi) static const struct spi_device_id ad7292_id_table[] = { { "ad7292", 0 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, ad7292_id_table); static const struct of_device_id ad7292_of_match[] = { { .compatible = "adi,ad7292" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7292_of_match); diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c index c0430f71f592a4..b35bd4d9ef81b0 100644 --- a/drivers/iio/adc/ad7298.c +++ b/drivers/iio/adc/ad7298.c @@ -109,7 +109,8 @@ static int ad7298_update_scan_mode(struct iio_dev *indio_dev, int scan_count; /* Now compute overall size */ - scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength); + scan_count = bitmap_weight(active_scan_mask, + iio_get_masklength(indio_dev)); command = AD7298_WRITE | st->ext_ref; @@ -354,8 +355,8 @@ static const struct acpi_device_id ad7298_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, ad7298_acpi_ids); static const struct spi_device_id ad7298_id[] = { - {"ad7298", 0}, - {} + { "ad7298", 0 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7298_id); diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index 7568cd0a2b32fc..e8bddfb0d07dbc 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -8,9 +8,11 @@ * Datasheets of supported parts: * ad7380/1 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7380-7381.pdf * ad7383/4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-7384.pdf + * ad7386/7/8 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7386-7387-7388.pdf * ad7380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7380-4.pdf * ad7381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7381-4.pdf * ad7383/4-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-4-ad7384-4.pdf + * ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf */ #include @@ -31,7 +33,7 @@ #include #include -#define MAX_NUM_CHANNELS 4 +#define MAX_NUM_CHANNELS 8 /* 2.5V internal reference voltage */ #define AD7380_INTERNAL_REF_MV 2500 @@ -49,6 +51,8 @@ #define AD7380_REG_ADDR_ALERT_LOW_TH 0x4 #define AD7380_REG_ADDR_ALERT_HIGH_TH 0x5 +#define AD7380_CONFIG1_CH BIT(11) +#define AD7380_CONFIG1_SEQ BIT(10) #define AD7380_CONFIG1_OS_MODE BIT(9) #define AD7380_CONFIG1_OSR GENMASK(8, 6) #define AD7380_CONFIG1_CRC_W BIT(5) @@ -80,6 +84,8 @@ struct ad7380_chip_info { const char *name; const struct iio_chan_spec *channels; unsigned int num_channels; + unsigned int num_simult_channels; + bool has_mux; const char * const *vcm_supplies; unsigned int num_vcm_supplies; const unsigned long *available_scan_masks; @@ -91,82 +97,151 @@ enum { AD7380_SCAN_TYPE_RESOLUTION_BOOST, }; -/* Extended scan types for 14-bit chips. */ -static const struct iio_scan_type ad7380_scan_type_14[] = { +/* Extended scan types for 12-bit unsigned chips. */ +static const struct iio_scan_type ad7380_scan_type_12_u[] = { + [AD7380_SCAN_TYPE_NORMAL] = { + .sign = 'u', + .realbits = 12, + .storagebits = 16, + .endianness = IIO_CPU, + }, + [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = { + .sign = 'u', + .realbits = 14, + .storagebits = 16, + .endianness = IIO_CPU, + }, +}; + +/* Extended scan types for 14-bit signed chips. */ +static const struct iio_scan_type ad7380_scan_type_14_s[] = { [AD7380_SCAN_TYPE_NORMAL] = { .sign = 's', .realbits = 14, .storagebits = 16, - .endianness = IIO_CPU + .endianness = IIO_CPU, }, [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = { .sign = 's', .realbits = 16, .storagebits = 16, - .endianness = IIO_CPU + .endianness = IIO_CPU, }, }; -/* Extended scan types for 16-bit chips. */ -static const struct iio_scan_type ad7380_scan_type_16[] = { +/* Extended scan types for 14-bit unsigned chips. */ +static const struct iio_scan_type ad7380_scan_type_14_u[] = { + [AD7380_SCAN_TYPE_NORMAL] = { + .sign = 'u', + .realbits = 14, + .storagebits = 16, + .endianness = IIO_CPU, + }, + [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_CPU, + }, +}; + +/* Extended scan types for 16-bit signed_chips. */ +static const struct iio_scan_type ad7380_scan_type_16_s[] = { [AD7380_SCAN_TYPE_NORMAL] = { .sign = 's', .realbits = 16, .storagebits = 16, - .endianness = IIO_CPU + .endianness = IIO_CPU, }, [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = { .sign = 's', .realbits = 18, .storagebits = 32, - .endianness = IIO_CPU + .endianness = IIO_CPU, + }, +}; + +/* Extended scan types for 16-bit unsigned chips. */ +static const struct iio_scan_type ad7380_scan_type_16_u[] = { + [AD7380_SCAN_TYPE_NORMAL] = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_CPU, + }, + [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = { + .sign = 'u', + .realbits = 18, + .storagebits = 32, + .endianness = IIO_CPU, }, }; -#define AD7380_CHANNEL(index, bits, diff) { \ - .type = IIO_VOLTAGE, \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ - BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ - .info_mask_shared_by_type_available = \ - BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ - .indexed = 1, \ - .differential = (diff), \ - .channel = (diff) ? (2 * (index)) : (index), \ - .channel2 = (diff) ? (2 * (index) + 1) : 0, \ - .scan_index = (index), \ - .has_ext_scan_type = 1, \ - .ext_scan_type = ad7380_scan_type_##bits, \ - .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits),\ +#define AD7380_CHANNEL(index, bits, diff, sign) { \ + .type = IIO_VOLTAGE, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .indexed = 1, \ + .differential = (diff), \ + .channel = (diff) ? (2 * (index)) : (index), \ + .channel2 = (diff) ? (2 * (index) + 1) : 0, \ + .scan_index = (index), \ + .has_ext_scan_type = 1, \ + .ext_scan_type = ad7380_scan_type_##bits##_##sign, \ + .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits##_##sign), \ } -#define DEFINE_AD7380_2_CHANNEL(name, bits, diff) \ +#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \ static const struct iio_chan_spec name[] = { \ - AD7380_CHANNEL(0, bits, diff), \ - AD7380_CHANNEL(1, bits, diff), \ + AD7380_CHANNEL(0, bits, diff, sign), \ + AD7380_CHANNEL(1, bits, diff, sign), \ IIO_CHAN_SOFT_TIMESTAMP(2), \ } -#define DEFINE_AD7380_4_CHANNEL(name, bits, diff) \ +#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \ static const struct iio_chan_spec name[] = { \ - AD7380_CHANNEL(0, bits, diff), \ - AD7380_CHANNEL(1, bits, diff), \ - AD7380_CHANNEL(2, bits, diff), \ - AD7380_CHANNEL(3, bits, diff), \ + AD7380_CHANNEL(0, bits, diff, sign), \ + AD7380_CHANNEL(1, bits, diff, sign), \ + AD7380_CHANNEL(2, bits, diff, sign), \ + AD7380_CHANNEL(3, bits, diff, sign), \ IIO_CHAN_SOFT_TIMESTAMP(4), \ } +#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \ +static const struct iio_chan_spec name[] = { \ + AD7380_CHANNEL(0, bits, diff, sign), \ + AD7380_CHANNEL(1, bits, diff, sign), \ + AD7380_CHANNEL(2, bits, diff, sign), \ + AD7380_CHANNEL(3, bits, diff, sign), \ + AD7380_CHANNEL(4, bits, diff, sign), \ + AD7380_CHANNEL(5, bits, diff, sign), \ + AD7380_CHANNEL(6, bits, diff, sign), \ + AD7380_CHANNEL(7, bits, diff, sign), \ + IIO_CHAN_SOFT_TIMESTAMP(8), \ +} + /* fully differential */ -DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1); -DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1); -DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1); -DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1); +DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1, s); +DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1, s); +DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1, s); +DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1, s); /* pseudo differential */ -DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0); -DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0); -DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0); -DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0); +DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0, s); +DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0, s); +DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0, s); +DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0, s); + +/* Single ended */ +DEFINE_AD7380_4_CHANNEL(ad7386_channels, 16, 0, u); +DEFINE_AD7380_4_CHANNEL(ad7387_channels, 14, 0, u); +DEFINE_AD7380_4_CHANNEL(ad7388_channels, 12, 0, u); +DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u); +DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u); +DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u); static const char * const ad7380_2_channel_vcm_supplies[] = { "aina", "ainb", @@ -187,6 +262,60 @@ static const unsigned long ad7380_4_channel_scan_masks[] = { 0 }; +/* + * Single ended parts have a 2:1 multiplexer in front of each ADC. + * + * From an IIO point of view, all inputs are exported, i.e ad7386/7/8 + * export 4 channels and ad7386-4/7-4/8-4 export 8 channels. + * + * Inputs AinX0 of multiplexers correspond to the first half of IIO channels + * (i.e 0-1 or 0-3) and inputs AinX1 correspond to second half (i.e 2-3 or + * 4-7). Example for AD7386/7/8 (2 channels parts): + * + * IIO | AD7386/7/8 + * | +---------------------------- + * | | _____ ______ + * | | | | | | + * voltage0 | AinA0 --|--->| | | | + * | | | mux |----->| ADCA |--- + * voltage2 | AinA1 --|--->| | | | + * | | |_____| |_____ | + * | | _____ ______ + * | | | | | | + * voltage1 | AinB0 --|--->| | | | + * | | | mux |----->| ADCB |--- + * voltage3 | AinB1 --|--->| | | | + * | | |_____| |______| + * | | + * | +---------------------------- + * + * Since this is simultaneous sampling for AinX0 OR AinX1 we have two separate + * scan masks. + * When sequencer mode is enabled, chip automatically cycles through + * AinX0 and AinX1 channels. From an IIO point of view, we ca enable all + * channels, at the cost of an extra read, thus dividing the maximum rate by + * two. + */ +enum { + AD7380_SCAN_MASK_CH_0, + AD7380_SCAN_MASK_CH_1, + AD7380_SCAN_MASK_SEQ, +}; + +static const unsigned long ad7380_2x2_channel_scan_masks[] = { + [AD7380_SCAN_MASK_CH_0] = GENMASK(1, 0), + [AD7380_SCAN_MASK_CH_1] = GENMASK(3, 2), + [AD7380_SCAN_MASK_SEQ] = GENMASK(3, 0), + 0 +}; + +static const unsigned long ad7380_2x4_channel_scan_masks[] = { + [AD7380_SCAN_MASK_CH_0] = GENMASK(3, 0), + [AD7380_SCAN_MASK_CH_1] = GENMASK(7, 4), + [AD7380_SCAN_MASK_SEQ] = GENMASK(7, 0), + 0 +}; + static const struct ad7380_timing_specs ad7380_timing = { .t_csh_ns = 10, }; @@ -208,6 +337,7 @@ static const struct ad7380_chip_info ad7380_chip_info = { .name = "ad7380", .channels = ad7380_channels, .num_channels = ARRAY_SIZE(ad7380_channels), + .num_simult_channels = 2, .available_scan_masks = ad7380_2_channel_scan_masks, .timing_specs = &ad7380_timing, }; @@ -216,6 +346,7 @@ static const struct ad7380_chip_info ad7381_chip_info = { .name = "ad7381", .channels = ad7381_channels, .num_channels = ARRAY_SIZE(ad7381_channels), + .num_simult_channels = 2, .available_scan_masks = ad7380_2_channel_scan_masks, .timing_specs = &ad7380_timing, }; @@ -224,6 +355,7 @@ static const struct ad7380_chip_info ad7383_chip_info = { .name = "ad7383", .channels = ad7383_channels, .num_channels = ARRAY_SIZE(ad7383_channels), + .num_simult_channels = 2, .vcm_supplies = ad7380_2_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, @@ -234,16 +366,48 @@ static const struct ad7380_chip_info ad7384_chip_info = { .name = "ad7384", .channels = ad7384_channels, .num_channels = ARRAY_SIZE(ad7384_channels), + .num_simult_channels = 2, .vcm_supplies = ad7380_2_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, .timing_specs = &ad7380_timing, }; +static const struct ad7380_chip_info ad7386_chip_info = { + .name = "ad7386", + .channels = ad7386_channels, + .num_channels = ARRAY_SIZE(ad7386_channels), + .num_simult_channels = 2, + .has_mux = true, + .available_scan_masks = ad7380_2x2_channel_scan_masks, + .timing_specs = &ad7380_timing, +}; + +static const struct ad7380_chip_info ad7387_chip_info = { + .name = "ad7387", + .channels = ad7387_channels, + .num_channels = ARRAY_SIZE(ad7387_channels), + .num_simult_channels = 2, + .has_mux = true, + .available_scan_masks = ad7380_2x2_channel_scan_masks, + .timing_specs = &ad7380_timing, +}; + +static const struct ad7380_chip_info ad7388_chip_info = { + .name = "ad7388", + .channels = ad7388_channels, + .num_channels = ARRAY_SIZE(ad7388_channels), + .num_simult_channels = 2, + .has_mux = true, + .available_scan_masks = ad7380_2x2_channel_scan_masks, + .timing_specs = &ad7380_timing, +}; + static const struct ad7380_chip_info ad7380_4_chip_info = { .name = "ad7380-4", .channels = ad7380_4_channels, .num_channels = ARRAY_SIZE(ad7380_4_channels), + .num_simult_channels = 4, .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, }; @@ -252,6 +416,7 @@ static const struct ad7380_chip_info ad7381_4_chip_info = { .name = "ad7381-4", .channels = ad7381_4_channels, .num_channels = ARRAY_SIZE(ad7381_4_channels), + .num_simult_channels = 4, .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, }; @@ -260,6 +425,7 @@ static const struct ad7380_chip_info ad7383_4_chip_info = { .name = "ad7383-4", .channels = ad7383_4_channels, .num_channels = ARRAY_SIZE(ad7383_4_channels), + .num_simult_channels = 4, .vcm_supplies = ad7380_4_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies), .available_scan_masks = ad7380_4_channel_scan_masks, @@ -270,23 +436,58 @@ static const struct ad7380_chip_info ad7384_4_chip_info = { .name = "ad7384-4", .channels = ad7384_4_channels, .num_channels = ARRAY_SIZE(ad7384_4_channels), + .num_simult_channels = 4, .vcm_supplies = ad7380_4_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies), .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, }; +static const struct ad7380_chip_info ad7386_4_chip_info = { + .name = "ad7386-4", + .channels = ad7386_4_channels, + .num_channels = ARRAY_SIZE(ad7386_4_channels), + .num_simult_channels = 4, + .has_mux = true, + .available_scan_masks = ad7380_2x4_channel_scan_masks, + .timing_specs = &ad7380_4_timing, +}; + +static const struct ad7380_chip_info ad7387_4_chip_info = { + .name = "ad7387-4", + .channels = ad7387_4_channels, + .num_channels = ARRAY_SIZE(ad7387_4_channels), + .num_simult_channels = 4, + .has_mux = true, + .available_scan_masks = ad7380_2x4_channel_scan_masks, + .timing_specs = &ad7380_4_timing, +}; + +static const struct ad7380_chip_info ad7388_4_chip_info = { + .name = "ad7388-4", + .channels = ad7388_4_channels, + .num_channels = ARRAY_SIZE(ad7388_4_channels), + .num_simult_channels = 4, + .has_mux = true, + .available_scan_masks = ad7380_2x4_channel_scan_masks, + .timing_specs = &ad7380_4_timing, +}; + struct ad7380_state { const struct ad7380_chip_info *chip_info; struct spi_device *spi; struct regmap *regmap; unsigned int oversampling_ratio; bool resolution_boost_enabled; + unsigned int ch; + bool seq; unsigned int vref_mv; unsigned int vcm_mv[MAX_NUM_CHANNELS]; /* xfers, message an buffer for reading sample data */ - struct spi_transfer xfer[2]; - struct spi_message msg; + struct spi_transfer normal_xfer[2]; + struct spi_message normal_msg; + struct spi_transfer seq_xfer[4]; + struct spi_message seq_msg; /* * DMA (thus cache coherency maintenance) requires the transfer buffers * to live in their own cache lines. @@ -379,6 +580,43 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg, unreachable(); } +/* + * When switching channel, the ADC require an additional settling time. + * According to the datasheet, data is value on the third CS low. We already + * have an extra toggle before each read (either direct reads or buffered reads) + * to sample correct data, so we just add a single CS toggle at the end of the + * register write. + */ +static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch) +{ + struct spi_transfer xfer = { + .delay = { + .value = T_CONVERT_NS, + .unit = SPI_DELAY_UNIT_NSECS, + } + }; + int ret; + + if (st->ch == ch) + return 0; + + ret = regmap_update_bits(st->regmap, + AD7380_REG_ADDR_CONFIG1, + AD7380_CONFIG1_CH, + FIELD_PREP(AD7380_CONFIG1_CH, ch)); + + if (ret) + return ret; + + st->ch = ch; + + if (st->oversampling_ratio > 1) + xfer.delay.value = T_CONVERT_0_NS + + T_CONVERT_X_NS * (st->oversampling_ratio - 1); + + return spi_sync_transfer(st->spi, &xfer, 1); +} + /** * ad7380_update_xfers - update the SPI transfers base on the current scan type * @st: device instance specific state @@ -387,33 +625,47 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg, static void ad7380_update_xfers(struct ad7380_state *st, const struct iio_scan_type *scan_type) { - /* - * First xfer only triggers conversion and has to be long enough for - * all conversions to complete, which can be multiple conversion in the - * case of oversampling. Technically T_CONVERT_X_NS is lower for some - * chips, but we use the maximum value for simplicity for now. - */ - if (st->oversampling_ratio > 1) - st->xfer[0].delay.value = T_CONVERT_0_NS + T_CONVERT_X_NS * - (st->oversampling_ratio - 1); - else - st->xfer[0].delay.value = T_CONVERT_NS; - - st->xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS; + struct spi_transfer *xfer = st->seq ? st->seq_xfer : st->normal_xfer; + unsigned int t_convert = T_CONVERT_NS; /* - * Second xfer reads all channels. Data size depends on if resolution - * boost is enabled or not. + * In the case of oversampling, conversion time is higher than in normal + * mode. Technically T_CONVERT_X_NS is lower for some chips, but we use + * the maximum value for simplicity for now. */ - st->xfer[1].bits_per_word = scan_type->realbits; - st->xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) * - (st->chip_info->num_channels - 1); + if (st->oversampling_ratio > 1) + t_convert = T_CONVERT_0_NS + T_CONVERT_X_NS * + (st->oversampling_ratio - 1); + + if (st->seq) { + xfer[0].delay.value = xfer[1].delay.value = t_convert; + xfer[0].delay.unit = xfer[1].delay.unit = SPI_DELAY_UNIT_NSECS; + xfer[2].bits_per_word = xfer[3].bits_per_word = + scan_type->realbits; + xfer[2].len = xfer[3].len = + BITS_TO_BYTES(scan_type->storagebits) * + st->chip_info->num_simult_channels; + xfer[3].rx_buf = xfer[2].rx_buf + xfer[2].len; + /* Additional delay required here when oversampling is enabled */ + if (st->oversampling_ratio > 1) + xfer[2].delay.value = t_convert; + else + xfer[2].delay.value = 0; + xfer[2].delay.unit = SPI_DELAY_UNIT_NSECS; + } else { + xfer[0].delay.value = t_convert; + xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS; + xfer[1].bits_per_word = scan_type->realbits; + xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) * + st->chip_info->num_simult_channels; + } } static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev) { struct ad7380_state *st = iio_priv(indio_dev); const struct iio_scan_type *scan_type; + struct spi_message *msg = &st->normal_msg; /* * Currently, we always read all channels at the same time. The scan_type @@ -423,16 +675,63 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev) if (IS_ERR(scan_type)) return PTR_ERR(scan_type); + if (st->chip_info->has_mux) { + unsigned int index; + int ret; + + /* + * Depending on the requested scan_mask and current state, + * we need to either change CH bit, or enable sequencer mode + * to sample correct data. + * Sequencer mode is enabled if active mask corresponds to all + * IIO channels enabled. Otherwise, CH bit is set. + */ + ret = iio_active_scan_mask_index(indio_dev); + if (ret < 0) + return ret; + + index = ret; + if (index == AD7380_SCAN_MASK_SEQ) { + ret = regmap_update_bits(st->regmap, + AD7380_REG_ADDR_CONFIG1, + AD7380_CONFIG1_SEQ, + FIELD_PREP(AD7380_CONFIG1_SEQ, 1)); + if (ret) + return ret; + msg = &st->seq_msg; + st->seq = true; + } else { + ret = ad7380_set_ch(st, index); + if (ret) + return ret; + } + + } + ad7380_update_xfers(st, scan_type); - return spi_optimize_message(st->spi, &st->msg); + return spi_optimize_message(st->spi, msg); } static int ad7380_triggered_buffer_postdisable(struct iio_dev *indio_dev) { struct ad7380_state *st = iio_priv(indio_dev); + struct spi_message *msg = &st->normal_msg; + int ret; + + if (st->seq) { + ret = regmap_update_bits(st->regmap, + AD7380_REG_ADDR_CONFIG1, + AD7380_CONFIG1_SEQ, + FIELD_PREP(AD7380_CONFIG1_SEQ, 0)); + if (ret) + return ret; + + msg = &st->seq_msg; + st->seq = false; + } - spi_unoptimize_message(&st->msg); + spi_unoptimize_message(msg); return 0; } @@ -447,9 +746,10 @@ static irqreturn_t ad7380_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ad7380_state *st = iio_priv(indio_dev); + struct spi_message *msg = st->seq ? &st->seq_msg : &st->normal_msg; int ret; - ret = spi_sync(st->spi, &st->msg); + ret = spi_sync(st->spi, msg); if (ret) goto out; @@ -465,20 +765,43 @@ static irqreturn_t ad7380_trigger_handler(int irq, void *p) static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index, const struct iio_scan_type *scan_type, int *val) { + unsigned int index = scan_index; int ret; + if (st->chip_info->has_mux) { + unsigned int ch = 0; + + if (index >= st->chip_info->num_simult_channels) { + index -= st->chip_info->num_simult_channels; + ch = 1; + } + + ret = ad7380_set_ch(st, ch); + if (ret) + return ret; + } + ad7380_update_xfers(st, scan_type); - ret = spi_sync(st->spi, &st->msg); + ret = spi_sync(st->spi, &st->normal_msg); if (ret < 0) return ret; - if (scan_type->storagebits > 16) - *val = sign_extend32(*(u32 *)(st->scan_data + 4 * scan_index), - scan_type->realbits - 1); - else - *val = sign_extend32(*(u16 *)(st->scan_data + 2 * scan_index), - scan_type->realbits - 1); + if (scan_type->storagebits > 16) { + if (scan_type->sign == 's') + *val = sign_extend32(*(u32 *)(st->scan_data + 4 * index), + scan_type->realbits - 1); + else + *val = *(u32 *)(st->scan_data + 4 * index) & + GENMASK(scan_type->realbits - 1, 0); + } else { + if (scan_type->sign == 's') + *val = sign_extend32(*(u16 *)(st->scan_data + 2 * index), + scan_type->realbits - 1); + else + *val = *(u16 *)(st->scan_data + 2 * index) & + GENMASK(scan_type->realbits - 1, 0); + } return IIO_VAL_INT; } @@ -655,6 +978,8 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref) /* This is the default value after reset. */ st->oversampling_ratio = 1; + st->ch = 0; + st->seq = false; /* SPI 1-wire mode */ return regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG2, @@ -756,21 +1081,45 @@ static int ad7380_probe(struct spi_device *spi) "failed to allocate register map\n"); /* - * Setting up a low latency read for getting sample data. Used for both - * direct read an triggered buffer. Additional fields will be set up in - * ad7380_update_xfers() based on the current state of the driver at the - * time of the read. + * Setting up xfer structures for both normal and sequence mode. These + * struct are used for both direct read and triggered buffer. Additional + * fields will be set up in ad7380_update_xfers() based on the current + * state of the driver at the time of the read. + */ + + /* + * In normal mode a read is composed of two steps: + * - first, toggle CS (no data xfer) to trigger a conversion + * - then, read data */ + st->normal_xfer[0].cs_change = 1; + st->normal_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns; + st->normal_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + st->normal_xfer[1].rx_buf = st->scan_data; - /* toggle CS (no data xfer) to trigger a conversion */ - st->xfer[0].cs_change = 1; - st->xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns; - st->xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + spi_message_init_with_transfers(&st->normal_msg, st->normal_xfer, + ARRAY_SIZE(st->normal_xfer)); + /* + * In sequencer mode a read is composed of four steps: + * - CS toggle (no data xfer) to get the right point in the sequence + * - CS toggle (no data xfer) to trigger a conversion of AinX0 and + * acquisition of AinX1 + * - 2 data reads, to read AinX0 and AinX1 + */ + st->seq_xfer[0].cs_change = 1; + st->seq_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns; + st->seq_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + st->seq_xfer[1].cs_change = 1; + st->seq_xfer[1].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns; + st->seq_xfer[1].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - /* then do a second xfer to read the data */ - st->xfer[1].rx_buf = st->scan_data; + st->seq_xfer[2].rx_buf = st->scan_data; + st->seq_xfer[2].cs_change = 1; + st->seq_xfer[2].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns; + st->seq_xfer[2].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - spi_message_init_with_transfers(&st->msg, st->xfer, ARRAY_SIZE(st->xfer)); + spi_message_init_with_transfers(&st->seq_msg, st->seq_xfer, + ARRAY_SIZE(st->seq_xfer)); indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; @@ -798,10 +1147,16 @@ static const struct of_device_id ad7380_of_match_table[] = { { .compatible = "adi,ad7381", .data = &ad7381_chip_info }, { .compatible = "adi,ad7383", .data = &ad7383_chip_info }, { .compatible = "adi,ad7384", .data = &ad7384_chip_info }, + { .compatible = "adi,ad7386", .data = &ad7386_chip_info }, + { .compatible = "adi,ad7387", .data = &ad7387_chip_info }, + { .compatible = "adi,ad7388", .data = &ad7388_chip_info }, { .compatible = "adi,ad7380-4", .data = &ad7380_4_chip_info }, { .compatible = "adi,ad7381-4", .data = &ad7381_4_chip_info }, { .compatible = "adi,ad7383-4", .data = &ad7383_4_chip_info }, { .compatible = "adi,ad7384-4", .data = &ad7384_4_chip_info }, + { .compatible = "adi,ad7386-4", .data = &ad7386_4_chip_info }, + { .compatible = "adi,ad7387-4", .data = &ad7387_4_chip_info }, + { .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info }, { } }; @@ -810,10 +1165,16 @@ static const struct spi_device_id ad7380_id_table[] = { { "ad7381", (kernel_ulong_t)&ad7381_chip_info }, { "ad7383", (kernel_ulong_t)&ad7383_chip_info }, { "ad7384", (kernel_ulong_t)&ad7384_chip_info }, + { "ad7386", (kernel_ulong_t)&ad7386_chip_info }, + { "ad7387", (kernel_ulong_t)&ad7387_chip_info }, + { "ad7388", (kernel_ulong_t)&ad7388_chip_info }, { "ad7380-4", (kernel_ulong_t)&ad7380_4_chip_info }, { "ad7381-4", (kernel_ulong_t)&ad7381_4_chip_info }, { "ad7383-4", (kernel_ulong_t)&ad7383_4_chip_info }, { "ad7384-4", (kernel_ulong_t)&ad7384_4_chip_info }, + { "ad7386-4", (kernel_ulong_t)&ad7386_4_chip_info }, + { "ad7387-4", (kernel_ulong_t)&ad7387_4_chip_info }, + { "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info }, { } }; MODULE_DEVICE_TABLE(spi, ad7380_id_table); diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c index 80aebed47d1fa6..aeb8e383fe71b2 100644 --- a/drivers/iio/adc/ad7476.c +++ b/drivers/iio/adc/ad7476.c @@ -409,35 +409,35 @@ static int ad7476_probe(struct spi_device *spi) } static const struct spi_device_id ad7476_id[] = { - {"ad7091", ID_AD7091}, - {"ad7091r", ID_AD7091R}, - {"ad7273", ID_AD7273}, - {"ad7274", ID_AD7274}, - {"ad7276", ID_AD7276}, - {"ad7277", ID_AD7277}, - {"ad7278", ID_AD7278}, - {"ad7466", ID_AD7466}, - {"ad7467", ID_AD7467}, - {"ad7468", ID_AD7468}, - {"ad7475", ID_AD7475}, - {"ad7476", ID_AD7466}, - {"ad7476a", ID_AD7466}, - {"ad7477", ID_AD7467}, - {"ad7477a", ID_AD7467}, - {"ad7478", ID_AD7468}, - {"ad7478a", ID_AD7468}, - {"ad7495", ID_AD7495}, - {"ad7910", ID_AD7467}, - {"ad7920", ID_AD7466}, - {"ad7940", ID_AD7940}, - {"adc081s", ID_ADC081S}, - {"adc101s", ID_ADC101S}, - {"adc121s", ID_ADC121S}, - {"ads7866", ID_ADS7866}, - {"ads7867", ID_ADS7867}, - {"ads7868", ID_ADS7868}, - {"ltc2314-14", ID_LTC2314_14}, - {} + { "ad7091", ID_AD7091 }, + { "ad7091r", ID_AD7091R }, + { "ad7273", ID_AD7273 }, + { "ad7274", ID_AD7274 }, + { "ad7276", ID_AD7276}, + { "ad7277", ID_AD7277 }, + { "ad7278", ID_AD7278 }, + { "ad7466", ID_AD7466 }, + { "ad7467", ID_AD7467 }, + { "ad7468", ID_AD7468 }, + { "ad7475", ID_AD7475 }, + { "ad7476", ID_AD7466 }, + { "ad7476a", ID_AD7466 }, + { "ad7477", ID_AD7467 }, + { "ad7477a", ID_AD7467 }, + { "ad7478", ID_AD7468 }, + { "ad7478a", ID_AD7468 }, + { "ad7495", ID_AD7495 }, + { "ad7910", ID_AD7467 }, + { "ad7920", ID_AD7466 }, + { "ad7940", ID_AD7940 }, + { "adc081s", ID_ADC081S }, + { "adc101s", ID_ADC101S }, + { "adc121s", ID_ADC121S }, + { "ads7866", ID_ADS7866 }, + { "ads7867", ID_ADS7867 }, + { "ads7868", ID_ADS7868 }, + { "ltc2314-14", ID_LTC2314_14 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7476_id); diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index c321c6ef48df41..9b457472d49c17 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -70,19 +70,17 @@ static int ad7606_reg_access(struct iio_dev *indio_dev, struct ad7606_state *st = iio_priv(indio_dev); int ret; - mutex_lock(&st->lock); + guard(mutex)(&st->lock); + if (readval) { ret = st->bops->reg_read(st, reg); if (ret < 0) - goto err_unlock; + return ret; *readval = ret; - ret = 0; + return 0; } else { - ret = st->bops->reg_write(st, reg, writeval); + return st->bops->reg_write(st, reg, writeval); } -err_unlock: - mutex_unlock(&st->lock); - return ret; } static int ad7606_read_samples(struct ad7606_state *st) @@ -100,19 +98,19 @@ static irqreturn_t ad7606_trigger_handler(int irq, void *p) struct ad7606_state *st = iio_priv(indio_dev); int ret; - mutex_lock(&st->lock); + guard(mutex)(&st->lock); ret = ad7606_read_samples(st); - if (ret == 0) - iio_push_to_buffers_with_timestamp(indio_dev, st->data, - iio_get_time_ns(indio_dev)); + if (ret) + goto error_ret; + iio_push_to_buffers_with_timestamp(indio_dev, st->data, + iio_get_time_ns(indio_dev)); +error_ret: iio_trigger_notify_done(indio_dev->trig); /* The rising edge of the CONVST signal starts a new conversion. */ gpiod_set_value(st->gpio_convst, 1); - mutex_unlock(&st->lock); - return IRQ_HANDLED; } @@ -212,9 +210,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val) struct ad7606_state *st = iio_priv(indio_dev); DECLARE_BITMAP(values, 3); - values[0] = val; + values[0] = val & GENMASK(2, 0); - gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc, + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, values); /* AD7616 requires a reset to update value */ @@ -233,19 +231,17 @@ static int ad7606_write_raw(struct iio_dev *indio_dev, struct ad7606_state *st = iio_priv(indio_dev); int i, ret, ch = 0; + guard(mutex)(&st->lock); + switch (mask) { case IIO_CHAN_INFO_SCALE: - mutex_lock(&st->lock); i = find_closest(val2, st->scale_avail, st->num_scales); if (st->sw_mode_en) ch = chan->address; ret = st->write_scale(indio_dev, ch, i); - if (ret < 0) { - mutex_unlock(&st->lock); + if (ret < 0) return ret; - } st->range[ch] = i; - mutex_unlock(&st->lock); return 0; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: @@ -253,14 +249,9 @@ static int ad7606_write_raw(struct iio_dev *indio_dev, return -EINVAL; i = find_closest(val, st->oversampling_avail, st->num_os_ratios); - mutex_lock(&st->lock); ret = st->write_os(indio_dev, i); - if (ret < 0) { - mutex_unlock(&st->lock); + if (ret < 0) return ret; - } - st->oversampling = st->oversampling_avail[i]; - mutex_unlock(&st->lock); return 0; default: @@ -419,7 +410,7 @@ static int ad7606_request_gpios(struct ad7606_state *st) return PTR_ERR(st->gpio_range); st->gpio_standby = devm_gpiod_get_optional(dev, "standby", - GPIOD_OUT_HIGH); + GPIOD_OUT_LOW); if (IS_ERR(st->gpio_standby)) return PTR_ERR(st->gpio_standby); @@ -662,7 +653,7 @@ static int ad7606_suspend(struct device *dev) if (st->gpio_standby) { gpiod_set_value(st->gpio_range, 1); - gpiod_set_value(st->gpio_standby, 0); + gpiod_set_value(st->gpio_standby, 1); } return 0; diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c index 6bc587b20f05da..02d8c309304e24 100644 --- a/drivers/iio/adc/ad7606_par.c +++ b/drivers/iio/adc/ad7606_par.c @@ -125,7 +125,7 @@ static const struct of_device_id ad7606_of_match[] = { { .compatible = "adi,ad7606-4" }, { .compatible = "adi,ad7606-6" }, { .compatible = "adi,ad7606-8" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7606_of_match); diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 263a778bcf2539..62ec1219530799 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev) static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); - unsigned long os[3] = {1}; + DECLARE_BITMAP(os, 3); + bitmap_fill(os, 3); /* * Software mode is enabled when all three oversampling * pins are set to high. If oversampling gpios are defined @@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) * otherwise, they must be hardwired to VDD */ if (st->gpio_os) { - gpiod_set_array_value(ARRAY_SIZE(os), + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, os); } /* OS of 128 and 256 are available only in software mode */ @@ -333,7 +334,7 @@ static const struct spi_device_id ad7606_id_table[] = { { "ad7606-8", ID_AD7606_8 }, { "ad7606b", ID_AD7606B }, { "ad7616", ID_AD7616 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, ad7606_id_table); @@ -344,7 +345,7 @@ static const struct of_device_id ad7606_of_match[] = { { .compatible = "adi,ad7606-8" }, { .compatible = "adi,ad7606b" }, { .compatible = "adi,ad7616" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7606_of_match); diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c index 3079a0872947e0..4d570383ef0251 100644 --- a/drivers/iio/adc/ad7766.c +++ b/drivers/iio/adc/ad7766.c @@ -291,13 +291,13 @@ static int ad7766_probe(struct spi_device *spi) } static const struct spi_device_id ad7766_id[] = { - {"ad7766", ID_AD7766}, - {"ad7766-1", ID_AD7766_1}, - {"ad7766-2", ID_AD7766_2}, - {"ad7767", ID_AD7766}, - {"ad7767-1", ID_AD7766_1}, - {"ad7767-2", ID_AD7766_2}, - {} + { "ad7766", ID_AD7766 }, + { "ad7766-1", ID_AD7766_1 }, + { "ad7766-2", ID_AD7766_2 }, + { "ad7767", ID_AD7766 }, + { "ad7767-1", ID_AD7766_1 }, + { "ad7767-2", ID_AD7766_2 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7766_id); diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 70a25949142c0d..113703fb724544 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -544,13 +544,10 @@ static int ad7768_set_channel_label(struct iio_dev *indio_dev, { struct ad7768_state *st = iio_priv(indio_dev); struct device *device = indio_dev->dev.parent; - struct fwnode_handle *fwnode; - struct fwnode_handle *child; const char *label; int crt_ch = 0; - fwnode = dev_fwnode(device); - fwnode_for_each_child_node(fwnode, child) { + device_for_each_child_node_scoped(device, child) { if (fwnode_property_read_u32(child, "reg", &crt_ch)) continue; @@ -658,7 +655,7 @@ MODULE_DEVICE_TABLE(spi, ad7768_id_table); static const struct of_device_id ad7768_of_match[] = { { .compatible = "adi,ad7768-1" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7768_of_match); diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index a813fe04787c60..e9b0c577c9cca4 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -355,11 +355,11 @@ static int ad7780_probe(struct spi_device *spi) } static const struct spi_device_id ad7780_id[] = { - {"ad7170", ID_AD7170}, - {"ad7171", ID_AD7171}, - {"ad7780", ID_AD7780}, - {"ad7781", ID_AD7781}, - {} + { "ad7170", ID_AD7170 }, + { "ad7171", ID_AD7171 }, + { "ad7780", ID_AD7780 }, + { "ad7781", ID_AD7781 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7780_id); diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index d4ad7e0b515a23..abebd519cafa30 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -824,16 +824,16 @@ static int ad7793_probe(struct spi_device *spi) } static const struct spi_device_id ad7793_id[] = { - {"ad7785", ID_AD7785}, - {"ad7792", ID_AD7792}, - {"ad7793", ID_AD7793}, - {"ad7794", ID_AD7794}, - {"ad7795", ID_AD7795}, - {"ad7796", ID_AD7796}, - {"ad7797", ID_AD7797}, - {"ad7798", ID_AD7798}, - {"ad7799", ID_AD7799}, - {} + { "ad7785", ID_AD7785 }, + { "ad7792", ID_AD7792 }, + { "ad7793", ID_AD7793 }, + { "ad7794", ID_AD7794 }, + { "ad7795", ID_AD7795 }, + { "ad7796", ID_AD7796 }, + { "ad7797", ID_AD7797 }, + { "ad7798", ID_AD7798 }, + { "ad7799", ID_AD7799 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7793_id); diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index 965bdc8aa6961f..6265ce7df70306 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c @@ -329,8 +329,8 @@ static int ad7887_probe(struct spi_device *spi) } static const struct spi_device_id ad7887_id[] = { - {"ad7887", ID_AD7887}, - {} + { "ad7887", ID_AD7887 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7887_id); diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c index 9d6bf6d0927a4b..09680015a7ab54 100644 --- a/drivers/iio/adc/ad7923.c +++ b/drivers/iio/adc/ad7923.c @@ -361,14 +361,14 @@ static int ad7923_probe(struct spi_device *spi) } static const struct spi_device_id ad7923_id[] = { - {"ad7904", AD7904}, - {"ad7914", AD7914}, - {"ad7923", AD7924}, - {"ad7924", AD7924}, - {"ad7908", AD7908}, - {"ad7918", AD7918}, - {"ad7928", AD7928}, - {} + { "ad7904", AD7904 }, + { "ad7914", AD7914 }, + { "ad7923", AD7924 }, + { "ad7924", AD7924 }, + { "ad7908", AD7908 }, + { "ad7918", AD7918 }, + { "ad7928", AD7928 }, + { } }; MODULE_DEVICE_TABLE(spi, ad7923_id); @@ -380,7 +380,7 @@ static const struct of_device_id ad7923_of_match[] = { { .compatible = "adi,ad7908", }, { .compatible = "adi,ad7918", }, { .compatible = "adi,ad7928", }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ad7923_of_match); diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 0f0dcd9ca6b60d..0f107e3fc2c855 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -237,7 +237,8 @@ static int ad799x_update_scan_mode(struct iio_dev *indio_dev, if (!st->rx_buf) return -ENOMEM; - st->transfer_size = bitmap_weight(scan_mask, indio_dev->masklength) * 2; + st->transfer_size = bitmap_weight(scan_mask, + iio_get_masklength(indio_dev)) * 2; switch (st->id) { case ad7992: diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 41c1b519c57360..05fb7a75531fb7 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -104,7 +105,30 @@ #define AD9467_DEF_OUTPUT_MODE 0x08 #define AD9467_REG_VREF_MASK 0x0F +/* + * Analog Devices AD9643 14-Bit, 170/210/250 MSPS ADC + */ + +#define CHIPID_AD9643 0x82 +#define AD9643_REG_VREF_MASK 0x1F + +/* + * Analog Devices AD9652 16-bit 310 MSPS ADC + */ + +#define CHIPID_AD9652 0xC1 +#define AD9652_REG_VREF_MASK 0xC0 + +/* + * Analog Devices AD9649 14-bit 20/40/65/80 MSPS ADC + */ + +#define CHIPID_AD9649 0x6F +#define AD9649_TEST_POINTS 8 + #define AD9647_MAX_TEST_POINTS 32 +#define AD9467_CAN_INVERT(st) \ + (!(st)->info->has_dco || (st)->info->has_dco_invert) struct ad9467_chip_info { const char *name; @@ -113,12 +137,23 @@ struct ad9467_chip_info { unsigned int num_channels; const unsigned int (*scale_table)[2]; int num_scales; + unsigned long test_mask; + unsigned int test_mask_len; unsigned long max_rate; unsigned int default_output_mode; unsigned int vref_mask; unsigned int num_lanes; + unsigned int dco_en; + unsigned int test_points; /* data clock output */ bool has_dco; + bool has_dco_invert; +}; + +struct ad9467_chan_test_mode { + struct ad9467_state *st; + unsigned int idx; + u8 mode; }; struct ad9467_state { @@ -126,6 +161,8 @@ struct ad9467_state { struct iio_backend *back; struct spi_device *spi; struct clk *clk; + /* used for debugfs */ + struct ad9467_chan_test_mode *chan_test; unsigned int output_mode; unsigned int (*scales)[2]; /* @@ -138,6 +175,8 @@ struct ad9467_state { * at the io delay control section. */ DECLARE_BITMAP(calib_map, AD9647_MAX_TEST_POINTS * 2); + /* number of bits of the map */ + unsigned int calib_map_size; struct gpio_desc *pwrdown_gpio; /* ensure consistent state obtained on multiple related accesses */ struct mutex lock; @@ -211,6 +250,24 @@ static const unsigned int ad9467_scale_table[][2] = { {2300, 8}, {2400, 9}, {2500, 10}, }; +static const unsigned int ad9643_scale_table[][2] = { + {2087, 0x0F}, {2065, 0x0E}, {2042, 0x0D}, {2020, 0x0C}, {1997, 0x0B}, + {1975, 0x0A}, {1952, 0x09}, {1930, 0x08}, {1907, 0x07}, {1885, 0x06}, + {1862, 0x05}, {1840, 0x04}, {1817, 0x03}, {1795, 0x02}, {1772, 0x01}, + {1750, 0x00}, {1727, 0x1F}, {1704, 0x1E}, {1681, 0x1D}, {1658, 0x1C}, + {1635, 0x1B}, {1612, 0x1A}, {1589, 0x19}, {1567, 0x18}, {1544, 0x17}, + {1521, 0x16}, {1498, 0x15}, {1475, 0x14}, {1452, 0x13}, {1429, 0x12}, + {1406, 0x11}, {1383, 0x10}, +}; + +static const unsigned int ad9649_scale_table[][2] = { + {2000, 0}, +}; + +static const unsigned int ad9652_scale_table[][2] = { + {1250, 0}, {1125, 1}, {1200, 2}, {1250, 3}, {1000, 5}, +}; + static void __ad9467_get_scale(struct ad9467_state *st, int index, unsigned int *val, unsigned int *val2) { @@ -224,14 +281,14 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index, *val2 = tmp % 1000000; } -#define AD9467_CHAN(_chan, _si, _bits, _sign) \ +#define AD9467_CHAN(_chan, avai_mask, _si, _bits, _sign) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _chan, \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ - .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_type_available = avai_mask, \ .scan_index = _si, \ .scan_type = { \ .sign = _sign, \ @@ -241,11 +298,42 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index, } static const struct iio_chan_spec ad9434_channels[] = { - AD9467_CHAN(0, 0, 12, 's'), + AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 12, 's'), }; static const struct iio_chan_spec ad9467_channels[] = { - AD9467_CHAN(0, 0, 16, 's'), + AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'), +}; + +static const struct iio_chan_spec ad9643_channels[] = { + AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 14, 's'), + AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 14, 's'), +}; + +static const struct iio_chan_spec ad9649_channels[] = { + AD9467_CHAN(0, 0, 0, 14, 's'), +}; + +static const struct iio_chan_spec ad9652_channels[] = { + AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'), + AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 16, 's'), +}; + +static const char * const ad9467_test_modes[] = { + [AN877_ADC_TESTMODE_OFF] = "off", + [AN877_ADC_TESTMODE_MIDSCALE_SHORT] = "midscale_short", + [AN877_ADC_TESTMODE_POS_FULLSCALE] = "pos_fullscale", + [AN877_ADC_TESTMODE_NEG_FULLSCALE] = "neg_fullscale", + [AN877_ADC_TESTMODE_ALT_CHECKERBOARD] = "checkerboard", + [AN877_ADC_TESTMODE_PN23_SEQ] = "prbs23", + [AN877_ADC_TESTMODE_PN9_SEQ] = "prbs9", + [AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE] = "one_zero_toggle", + [AN877_ADC_TESTMODE_USER] = "user", + [AN877_ADC_TESTMODE_BIT_TOGGLE] = "bit_toggle", + [AN877_ADC_TESTMODE_SYNC] = "sync", + [AN877_ADC_TESTMODE_ONE_BIT_HIGH] = "one_bit_high", + [AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY] = "mixed_bit_frequency", + [AN877_ADC_TESTMODE_RAMP] = "ramp", }; static const struct ad9467_chip_info ad9467_chip_tbl = { @@ -256,6 +344,10 @@ static const struct ad9467_chip_info ad9467_chip_tbl = { .num_scales = ARRAY_SIZE(ad9467_scale_table), .channels = ad9467_channels, .num_channels = ARRAY_SIZE(ad9467_channels), + .test_points = AD9647_MAX_TEST_POINTS, + .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE, + AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1, .default_output_mode = AD9467_DEF_OUTPUT_MODE, .vref_mask = AD9467_REG_VREF_MASK, .num_lanes = 8, @@ -269,6 +361,9 @@ static const struct ad9467_chip_info ad9434_chip_tbl = { .num_scales = ARRAY_SIZE(ad9434_scale_table), .channels = ad9434_channels, .num_channels = ARRAY_SIZE(ad9434_channels), + .test_points = AD9647_MAX_TEST_POINTS, + .test_mask = GENMASK(AN877_ADC_TESTMODE_USER, AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_USER + 1, .default_output_mode = AD9434_DEF_OUTPUT_MODE, .vref_mask = AD9434_REG_VREF_MASK, .num_lanes = 6, @@ -282,17 +377,78 @@ static const struct ad9467_chip_info ad9265_chip_tbl = { .num_scales = ARRAY_SIZE(ad9265_scale_table), .channels = ad9467_channels, .num_channels = ARRAY_SIZE(ad9467_channels), + .test_points = AD9647_MAX_TEST_POINTS, + .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE, + AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1, .default_output_mode = AD9265_DEF_OUTPUT_MODE, .vref_mask = AD9265_REG_VREF_MASK, .has_dco = true, + .has_dco_invert = true, +}; + +static const struct ad9467_chip_info ad9643_chip_tbl = { + .name = "ad9643", + .id = CHIPID_AD9643, + .max_rate = 250000000UL, + .scale_table = ad9643_scale_table, + .num_scales = ARRAY_SIZE(ad9643_scale_table), + .channels = ad9643_channels, + .num_channels = ARRAY_SIZE(ad9643_channels), + .test_points = AD9647_MAX_TEST_POINTS, + .test_mask = BIT(AN877_ADC_TESTMODE_RAMP) | + GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY, AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_RAMP + 1, + .vref_mask = AD9643_REG_VREF_MASK, + .has_dco = true, + .has_dco_invert = true, + .dco_en = AN877_ADC_DCO_DELAY_ENABLE, +}; + +static const struct ad9467_chip_info ad9649_chip_tbl = { + .name = "ad9649", + .id = CHIPID_AD9649, + .max_rate = 80000000UL, + .scale_table = ad9649_scale_table, + .num_scales = ARRAY_SIZE(ad9649_scale_table), + .channels = ad9649_channels, + .num_channels = ARRAY_SIZE(ad9649_channels), + .test_points = AD9649_TEST_POINTS, + .test_mask = GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY, + AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY + 1, + .has_dco = true, + .has_dco_invert = true, + .dco_en = AN877_ADC_DCO_DELAY_ENABLE, +}; + +static const struct ad9467_chip_info ad9652_chip_tbl = { + .name = "ad9652", + .id = CHIPID_AD9652, + .max_rate = 310000000UL, + .scale_table = ad9652_scale_table, + .num_scales = ARRAY_SIZE(ad9652_scale_table), + .channels = ad9652_channels, + .num_channels = ARRAY_SIZE(ad9652_channels), + .test_points = AD9647_MAX_TEST_POINTS, + .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE, + AN877_ADC_TESTMODE_OFF), + .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1, + .vref_mask = AD9652_REG_VREF_MASK, + .has_dco = true, }; static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2) { const struct ad9467_chip_info *info = st->info; - unsigned int i, vref_val; + unsigned int vref_val; + unsigned int i = 0; int ret; + /* nothing to read if we only have one possible scale */ + if (info->num_scales == 1) + goto out_get_scale; + ret = ad9467_spi_read(st, AN877_ADC_REG_VREF); if (ret < 0) return ret; @@ -307,6 +463,7 @@ static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2) if (i == info->num_scales) return -ERANGE; +out_get_scale: __ad9467_get_scale(st, i, val, val2); return IIO_VAL_INT_PLUS_MICRO; @@ -321,6 +478,8 @@ static int ad9467_set_scale(struct ad9467_state *st, int val, int val2) if (val != 0) return -EINVAL; + if (info->num_scales == 1) + return -EOPNOTSUPP; for (i = 0; i < info->num_scales; i++) { __ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]); @@ -352,40 +511,96 @@ static int ad9467_outputmode_set(struct ad9467_state *st, unsigned int mode) AN877_ADC_TRANSFER_SYNC); } -static int ad9647_calibrate_prepare(struct ad9467_state *st) +static int ad9467_testmode_set(struct ad9467_state *st, unsigned int chan, + unsigned int test_mode) +{ + int ret; + + if (st->info->num_channels > 1) { + /* so that the test mode is only applied to one channel */ + ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX, BIT(chan)); + if (ret) + return ret; + } + + ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, test_mode); + if (ret) + return ret; + + if (st->info->num_channels > 1) { + /* go to default state where all channels get write commands */ + ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX, + GENMASK(st->info->num_channels - 1, 0)); + if (ret) + return ret; + } + + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, + AN877_ADC_TRANSFER_SYNC); +} + +static int ad9467_backend_testmode_on(struct ad9467_state *st, + unsigned int chan, + enum iio_backend_test_pattern pattern) { struct iio_backend_data_fmt data = { .enable = false, }; - unsigned int c; int ret; - ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, - AN877_ADC_TESTMODE_PN9_SEQ); + ret = iio_backend_data_format_set(st->back, chan, &data); + if (ret) + return ret; + + ret = iio_backend_test_pattern_set(st->back, chan, pattern); + if (ret) + return ret; + + return iio_backend_chan_enable(st->back, chan); +} + +static int ad9467_backend_testmode_off(struct ad9467_state *st, + unsigned int chan) +{ + struct iio_backend_data_fmt data = { + .enable = true, + .sign_extend = true, + }; + int ret; + + ret = iio_backend_chan_disable(st->back, chan); if (ret) return ret; - ret = ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); + ret = iio_backend_test_pattern_set(st->back, chan, + IIO_BACKEND_NO_TEST_PATTERN); if (ret) return ret; + return iio_backend_data_format_set(st->back, chan, &data); +} + +static int ad9647_calibrate_prepare(struct ad9467_state *st) +{ + unsigned int c; + int ret; + ret = ad9467_outputmode_set(st, st->info->default_output_mode); if (ret) return ret; for (c = 0; c < st->info->num_channels; c++) { - ret = iio_backend_data_format_set(st->back, c, &data); + ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_PN9_SEQ); if (ret) return ret; - } - ret = iio_backend_test_pattern_set(st->back, 0, - IIO_BACKEND_ADI_PRBS_9A); - if (ret) - return ret; + ret = ad9467_backend_testmode_on(st, c, + IIO_BACKEND_ADI_PRBS_9A); + if (ret) + return ret; + } - return iio_backend_chan_enable(st->back, 0); + return 0; } static int ad9647_calibrate_polarity_set(struct ad9467_state *st, @@ -442,7 +657,7 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val) if (st->info->has_dco) { ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_DELAY, - val); + val | st->info->dco_en); if (ret) return ret; @@ -461,57 +676,38 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val) static int ad9647_calibrate_stop(struct ad9467_state *st) { - struct iio_backend_data_fmt data = { - .sign_extend = true, - .enable = true, - }; unsigned int c, mode; int ret; - ret = iio_backend_chan_disable(st->back, 0); - if (ret) - return ret; - - ret = iio_backend_test_pattern_set(st->back, 0, - IIO_BACKEND_NO_TEST_PATTERN); - if (ret) - return ret; - for (c = 0; c < st->info->num_channels; c++) { - ret = iio_backend_data_format_set(st->back, c, &data); + ret = ad9467_backend_testmode_off(st, c); + if (ret) + return ret; + + ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_OFF); if (ret) return ret; } mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT; - ret = ad9467_outputmode_set(st, mode); - if (ret) - return ret; - - ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, - AN877_ADC_TESTMODE_OFF); - if (ret) - return ret; - - return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); + return ad9467_outputmode_set(st, mode); } static int ad9467_calibrate(struct ad9467_state *st) { - unsigned int point, val, inv_val, cnt, inv_cnt = 0; + unsigned int point, val, inv_val, cnt, inv_cnt = 0, c; /* * Half of the bitmap is for the inverted signal. The number of test * points is the same though... */ - unsigned int test_points = AD9647_MAX_TEST_POINTS; + unsigned int test_points = st->info->test_points; unsigned long sample_rate = clk_get_rate(st->clk); struct device *dev = &st->spi->dev; bool invert = false, stat; int ret; /* all points invalid */ - bitmap_fill(st->calib_map, BITS_PER_TYPE(st->calib_map)); + bitmap_fill(st->calib_map, st->calib_map_size); ret = ad9647_calibrate_prepare(st); if (ret) @@ -521,16 +717,31 @@ static int ad9467_calibrate(struct ad9467_state *st) if (ret) return ret; - for (point = 0; point < test_points; point++) { + for (point = 0; point < st->info->test_points; point++) { ret = ad9467_calibrate_apply(st, point); if (ret) return ret; - ret = iio_backend_chan_status(st->back, 0, &stat); - if (ret) - return ret; + for (c = 0; c < st->info->num_channels; c++) { + ret = iio_backend_chan_status(st->back, c, &stat); + if (ret) + return ret; - __assign_bit(point + invert * test_points, st->calib_map, stat); + /* + * A point is considered valid if all channels report no + * error. If one reports an error, then we consider the + * point as invalid and we can break the loop right away. + */ + if (stat) { + dev_dbg(dev, "Invalid point(%u, inv:%u) for CH:%u\n", + point, invert, c); + break; + } + + if (c == st->info->num_channels - 1) + __clear_bit(point + invert * test_points, + st->calib_map); + } } if (!invert) { @@ -541,8 +752,13 @@ static int ad9467_calibrate(struct ad9467_state *st) * a row. */ if (cnt < 3) { - invert = true; - goto retune; + if (AD9467_CAN_INVERT(st)) { + invert = true; + goto retune; + } + + if (!cnt) + return -EIO; } } else { inv_cnt = ad9467_find_optimal_point(st->calib_map, test_points, @@ -679,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev, return 0; } -static const struct iio_info ad9467_info = { +static struct iio_info ad9467_info = { .read_raw = ad9467_read_raw, .write_raw = ad9467_write_raw, .update_scan_mode = ad9467_update_scan_mode, @@ -762,12 +978,134 @@ static int ad9467_iio_backend_get(struct ad9467_state *st) return -ENODEV; } +static int ad9467_test_mode_available_show(struct seq_file *s, void *ignored) +{ + struct ad9467_state *st = s->private; + unsigned int bit; + + for_each_set_bit(bit, &st->info->test_mask, st->info->test_mask_len) + seq_printf(s, "%s\n", ad9467_test_modes[bit]); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ad9467_test_mode_available); + +static ssize_t ad9467_chan_test_mode_read(struct file *file, + char __user *userbuf, size_t count, + loff_t *ppos) +{ + struct ad9467_chan_test_mode *chan = file->private_data; + struct ad9467_state *st = chan->st; + char buf[128] = {0}; + size_t len; + int ret; + + if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ || + chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) { + len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test:\n\t", + ad9467_test_modes[chan->mode]); + + ret = iio_backend_debugfs_print_chan_status(st->back, chan->idx, + buf + len, + sizeof(buf) - len); + if (ret < 0) + return ret; + len += ret; + } else if (chan->mode == AN877_ADC_TESTMODE_OFF) { + len = scnprintf(buf, sizeof(buf), "No test Running...\n"); + } else { + len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test on CH:%u\n", + ad9467_test_modes[chan->mode], chan->idx); + } + + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} + +static ssize_t ad9467_chan_test_mode_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ad9467_chan_test_mode *chan = file->private_data; + struct ad9467_state *st = chan->st; + char test_mode[32] = {0}; + unsigned int mode; + int ret; + + ret = simple_write_to_buffer(test_mode, sizeof(test_mode) - 1, ppos, + userbuf, count); + if (ret < 0) + return ret; + + for_each_set_bit(mode, &st->info->test_mask, st->info->test_mask_len) { + if (sysfs_streq(test_mode, ad9467_test_modes[mode])) + break; + } + + if (mode == st->info->test_mask_len) + return -EINVAL; + + guard(mutex)(&st->lock); + + if (mode == AN877_ADC_TESTMODE_OFF) { + unsigned int out_mode; + + if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ || + chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) { + ret = ad9467_backend_testmode_off(st, chan->idx); + if (ret) + return ret; + } + + ret = ad9467_testmode_set(st, chan->idx, mode); + if (ret) + return ret; + + out_mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT; + ret = ad9467_outputmode_set(st, out_mode); + if (ret) + return ret; + } else { + ret = ad9467_outputmode_set(st, st->info->default_output_mode); + if (ret) + return ret; + + ret = ad9467_testmode_set(st, chan->idx, mode); + if (ret) + return ret; + + /* some patterns have a backend matching monitoring block */ + if (mode == AN877_ADC_TESTMODE_PN9_SEQ) { + ret = ad9467_backend_testmode_on(st, chan->idx, + IIO_BACKEND_ADI_PRBS_9A); + if (ret) + return ret; + } else if (mode == AN877_ADC_TESTMODE_PN23_SEQ) { + ret = ad9467_backend_testmode_on(st, chan->idx, + IIO_BACKEND_ADI_PRBS_23A); + if (ret) + return ret; + } + } + + chan->mode = mode; + + return count; +} + +static const struct file_operations ad9467_chan_test_mode_fops = { + .open = simple_open, + .read = ad9467_chan_test_mode_read, + .write = ad9467_chan_test_mode_write, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; + static ssize_t ad9467_dump_calib_table(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ad9467_state *st = file->private_data; - unsigned int bit, size = BITS_PER_TYPE(st->calib_map); + unsigned int bit; /* +2 for the newline and +1 for the string termination */ unsigned char map[AD9647_MAX_TEST_POINTS * 2 + 3]; ssize_t len = 0; @@ -776,8 +1114,8 @@ static ssize_t ad9467_dump_calib_table(struct file *file, if (*ppos) goto out_read; - for (bit = 0; bit < size; bit++) { - if (bit == size / 2) + for (bit = 0; bit < st->calib_map_size; bit++) { + if (AD9467_CAN_INVERT(st) && bit == st->calib_map_size / 2) len += scnprintf(map + len, sizeof(map) - len, "\n"); len += scnprintf(map + len, sizeof(map) - len, "%c", @@ -800,12 +1138,33 @@ static void ad9467_debugfs_init(struct iio_dev *indio_dev) { struct dentry *d = iio_get_debugfs_dentry(indio_dev); struct ad9467_state *st = iio_priv(indio_dev); + char attr_name[32]; + unsigned int chan; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return; + st->chan_test = devm_kcalloc(&st->spi->dev, st->info->num_channels, + sizeof(*st->chan_test), GFP_KERNEL); + if (!st->chan_test) + return; + debugfs_create_file("calibration_table_dump", 0400, d, st, &ad9467_calib_table_fops); + + for (chan = 0; chan < st->info->num_channels; chan++) { + snprintf(attr_name, sizeof(attr_name), "in_voltage%u_test_mode", + chan); + st->chan_test[chan].idx = chan; + st->chan_test[chan].st = st; + debugfs_create_file(attr_name, 0600, d, &st->chan_test[chan], + &ad9467_chan_test_mode_fops); + } + + debugfs_create_file("in_voltage_test_mode_available", 0400, d, st, + &ad9467_test_mode_available_fops); + + iio_backend_debugfs_add(st->back, indio_dev); } static int ad9467_probe(struct spi_device *spi) @@ -826,6 +1185,10 @@ static int ad9467_probe(struct spi_device *spi) if (!st->info) return -ENODEV; + st->calib_map_size = st->info->test_points; + if (AD9467_CAN_INVERT(st)) + st->calib_map_size *= 2; + st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk"); if (IS_ERR(st->clk)) return PTR_ERR(st->clk); @@ -850,6 +1213,8 @@ static int ad9467_probe(struct spi_device *spi) return -ENODEV; } + if (st->info->num_scales > 1) + ad9467_info.read_avail = ad9467_read_avail; indio_dev->name = st->info->name; indio_dev->channels = st->info->channels; indio_dev->num_channels = st->info->num_channels; @@ -884,7 +1249,10 @@ static const struct of_device_id ad9467_of_match[] = { { .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, }, { .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, }, { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, }, - {} + { .compatible = "adi,ad9643", .data = &ad9643_chip_tbl, }, + { .compatible = "adi,ad9649", .data = &ad9649_chip_tbl, }, + { .compatible = "adi,ad9652", .data = &ad9652_chip_tbl, }, + { } }; MODULE_DEVICE_TABLE(of, ad9467_of_match); @@ -892,7 +1260,10 @@ static const struct spi_device_id ad9467_ids[] = { { "ad9265", (kernel_ulong_t)&ad9265_chip_tbl }, { "ad9434", (kernel_ulong_t)&ad9434_chip_tbl }, { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl }, - {} + { "ad9643", (kernel_ulong_t)&ad9643_chip_tbl }, + { "ad9649", (kernel_ulong_t)&ad9649_chip_tbl, }, + { "ad9652", (kernel_ulong_t)&ad9652_chip_tbl, }, + { } }; MODULE_DEVICE_TABLE(spi, ad9467_ids); diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index dcd557e935863f..ea4aabd3960a08 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -23,7 +23,7 @@ #include #include -#include +#include #define AD_SD_COMM_CHAN_MASK 0x3 @@ -351,7 +351,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) if (sigma_delta->num_slots == 1) { channel = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); ret = ad_sigma_delta_set_channel(sigma_delta, indio_dev->channels[channel].address); if (ret) @@ -364,7 +364,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) * implementation is mandatory. */ slot = 0; - for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { sigma_delta->slots[slot] = indio_dev->channels[i].address; slot++; } @@ -526,7 +526,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l { struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); - return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots; + return bitmap_weight(mask, iio_get_masklength(indio_dev)) <= sigma_delta->num_slots; } static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 21ce7564e83db7..5c8c87eb36d19f 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -61,6 +61,10 @@ #define ADI_AXI_ADC_REG_CHAN_STATUS(c) (0x0404 + (c) * 0x40) #define ADI_AXI_ADC_CHAN_STAT_PN_MASK GENMASK(2, 1) +/* out of sync */ +#define ADI_AXI_ADC_CHAN_STAT_PN_OOS BIT(1) +/* spurious out of sync */ +#define ADI_AXI_ADC_CHAN_STAT_PN_ERR BIT(2) #define ADI_AXI_ADC_REG_CHAN_CTRL_3(c) (0x0418 + (c) * 0x40) #define ADI_AXI_ADC_CHAN_PN_SEL_MASK GENMASK(19, 16) @@ -199,17 +203,19 @@ static int axi_adc_test_pattern_set(struct iio_backend *back, return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan), ADI_AXI_ADC_CHAN_PN_SEL_MASK, FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0)); + case IIO_BACKEND_ADI_PRBS_23A: + return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan), + ADI_AXI_ADC_CHAN_PN_SEL_MASK, + FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 1)); default: return -EINVAL; } } -static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan, - bool *error) +static int axi_adc_read_chan_status(struct adi_axi_adc_state *st, unsigned int chan, + unsigned int *status) { - struct adi_axi_adc_state *st = iio_backend_get_priv(back); int ret; - u32 val; guard(mutex)(&st->lock); /* reset test bits by setting them */ @@ -221,7 +227,18 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan, /* let's give enough time to validate or erroring the incoming pattern */ fsleep(1000); - ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), &val); + return regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), + status); +} + +static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan, + bool *error) +{ + struct adi_axi_adc_state *st = iio_backend_get_priv(back); + u32 val; + int ret; + + ret = axi_adc_read_chan_status(st, chan, &val); if (ret) return ret; @@ -233,6 +250,30 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan, return 0; } +static int axi_adc_debugfs_print_chan_status(struct iio_backend *back, + unsigned int chan, char *buf, + size_t len) +{ + struct adi_axi_adc_state *st = iio_backend_get_priv(back); + u32 val; + int ret; + + ret = axi_adc_read_chan_status(st, chan, &val); + if (ret) + return ret; + + /* + * PN_ERR is cleared in case out of sync is set. Hence, no point in + * checking both bits. + */ + if (val & ADI_AXI_ADC_CHAN_STAT_PN_OOS) + return scnprintf(buf, len, "CH%u: Out of Sync.\n", chan); + if (val & ADI_AXI_ADC_CHAN_STAT_PN_ERR) + return scnprintf(buf, len, "CH%u: Spurious Out of Sync.\n", chan); + + return scnprintf(buf, len, "CH%u: OK.\n", chan); +} + static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan) { struct adi_axi_adc_state *st = iio_backend_get_priv(back); @@ -267,13 +308,24 @@ static void axi_adc_free_buffer(struct iio_backend *back, iio_dmaengine_buffer_free(buffer); } +static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg, + unsigned int writeval, unsigned int *readval) +{ + struct adi_axi_adc_state *st = iio_backend_get_priv(back); + + if (readval) + return regmap_read(st->regmap, reg, readval); + + return regmap_write(st->regmap, reg, writeval); +} + static const struct regmap_config axi_adc_regmap_config = { .val_bits = 32, .reg_bits = 32, .reg_stride = 4, }; -static const struct iio_backend_ops adi_axi_adc_generic = { +static const struct iio_backend_ops adi_axi_adc_ops = { .enable = axi_adc_enable, .disable = axi_adc_disable, .data_format_set = axi_adc_data_format_set, @@ -285,6 +337,13 @@ static const struct iio_backend_ops adi_axi_adc_generic = { .iodelay_set = axi_adc_iodelays_set, .test_pattern_set = axi_adc_test_pattern_set, .chan_status = axi_adc_chan_status, + .debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access), + .debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status), +}; + +static const struct iio_backend_info adi_axi_adc_generic = { + .name = "axi-adc", + .ops = &adi_axi_adc_ops, }; static int adi_axi_adc_probe(struct platform_device *pdev) diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 090416c0d6220b..1d5fd5f534b8a1 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -555,8 +555,7 @@ static int aspeed_adc_probe(struct platform_device *pdev) if (ret) return ret; - if (of_find_property(data->dev->of_node, "aspeed,battery-sensing", - NULL)) { + if (of_property_present(data->dev->of_node, "aspeed,battery-sensing")) { if (data->model_data->bat_sense_sup) { data->battery_sensing = 1; if (readl(data->base + ASPEED_REG_ENGINE_CONTROL) & @@ -695,7 +694,7 @@ static const struct of_device_id aspeed_adc_matches[] = { { .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data }, { .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data }, { .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data }, - {}, + { } }; MODULE_DEVICE_TABLE(of, aspeed_adc_matches); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index eb501e3c86a59a..9c39acff17e658 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -268,9 +269,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p) struct iio_chan_spec const *chan; int i, j = 0; - for (i = 0; i < idev->masklength; i++) { - if (!test_bit(i, idev->active_scan_mask)) - continue; + iio_for_each_active_channel(idev, i) { chan = idev->channels + i; st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel)); j++; @@ -543,22 +542,18 @@ static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev, int i; for (i = 0; i < st->caps->trigger_number; i++) { - char *name = kasprintf(GFP_KERNEL, - "%s-dev%d-%s", - idev->name, - iio_device_id(idev), - triggers[i].name); + char *name __free(kfree) = kasprintf(GFP_KERNEL, "%s-dev%d-%s", + idev->name, + iio_device_id(idev), + triggers[i].name); if (!name) return -ENOMEM; if (strcmp(trigger_name, name) == 0) { - kfree(name); if (triggers[i].value == 0) return -EINVAL; return triggers[i].value; } - - kfree(name); } return -EINVAL; @@ -1340,7 +1335,7 @@ static const struct of_device_id at91_adc_dt_ids[] = { { .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps }, { .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps }, { .compatible = "atmel,sama5d3-adc", .data = &sama5d3_caps }, - {}, + { } }; MODULE_DEVICE_TABLE(of, at91_adc_dt_ids); diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c index b487e577befb3d..6c1a5d1b0a83d4 100644 --- a/drivers/iio/adc/axp20x_adc.c +++ b/drivers/iio/adc/axp20x_adc.c @@ -5,6 +5,7 @@ * Quentin Schulz */ +#include #include #include #include @@ -30,6 +31,8 @@ #define AXP22X_ADC_EN1_MASK (GENMASK(7, 5) | BIT(0)) +#define AXP717_ADC_EN1_MASK GENMASK(7, 0) + #define AXP192_GPIO30_IN_RANGE_GPIO0 BIT(0) #define AXP192_GPIO30_IN_RANGE_GPIO1 BIT(1) #define AXP192_GPIO30_IN_RANGE_GPIO2 BIT(2) @@ -43,6 +46,13 @@ #define AXP22X_ADC_RATE_HZ(x) ((ilog2((x) / 100) << 6) & AXP20X_ADC_RATE_MASK) +#define AXP717_ADC_DATA_TS 0x00 +#define AXP717_ADC_DATA_TEMP 0x01 +#define AXP717_ADC_DATA_VMID 0x02 +#define AXP717_ADC_DATA_BKUP_BATT 0x03 + +#define AXP717_ADC_DATA_MASK GENMASK(13, 0) + #define AXP813_V_I_ADC_RATE_MASK GENMASK(5, 4) #define AXP813_ADC_RATE_MASK (AXP20X_ADC_RATE_MASK | AXP813_V_I_ADC_RATE_MASK) #define AXP813_TS_GPIO0_ADC_RATE_HZ(x) AXP20X_ADC_RATE_HZ(x) @@ -125,6 +135,20 @@ enum axp22x_adc_channel_i { AXP22X_BATT_DISCHRG_I, }; +enum axp717_adc_channel_v { + AXP717_BATT_V = 0, + AXP717_TS_IN, + AXP717_VBUS_V, + AXP717_VSYS_V, + AXP717_DIE_TEMP_V, + AXP717_VMID_V = 6, + AXP717_BKUP_BATT_V, +}; + +enum axp717_adc_channel_i { + AXP717_BATT_CHRG_I = 5, +}; + enum axp813_adc_channel_v { AXP813_TS_IN = 0, AXP813_GPIO0_V, @@ -179,6 +203,22 @@ static struct iio_map axp22x_maps[] = { }, { /* sentinel */ } }; +static struct iio_map axp717_maps[] = { + { + .consumer_dev_name = "axp20x-usb-power-supply", + .consumer_channel = "vbus_v", + .adc_channel_label = "vbus_v", + }, { + .consumer_dev_name = "axp20x-battery-power-supply", + .consumer_channel = "batt_v", + .adc_channel_label = "batt_v", + }, { + .consumer_dev_name = "axp20x-battery-power-supply", + .consumer_channel = "batt_chrg_i", + .adc_channel_label = "batt_chrg_i", + }, +}; + /* * Channels are mapped by physical system. Their channels share the same index. * i.e. acin_i is in_current0_raw and acin_v is in_voltage0_raw. @@ -274,6 +314,29 @@ static const struct iio_chan_spec axp22x_adc_channels[] = { AXP22X_TS_ADC_H), }; +/* + * Scale and offset is unknown for temp, ts, batt_chrg_i, vmid_v, and + * bkup_batt_v channels. Leaving scale and offset undefined for now. + */ +static const struct iio_chan_spec axp717_adc_channels[] = { + AXP20X_ADC_CHANNEL(AXP717_BATT_V, "batt_v", IIO_VOLTAGE, + AXP717_BATT_V_H), + AXP20X_ADC_CHANNEL(AXP717_TS_IN, "ts_v", IIO_VOLTAGE, + AXP717_ADC_DATA_H), + AXP20X_ADC_CHANNEL(AXP717_VBUS_V, "vbus_v", IIO_VOLTAGE, + AXP717_VBUS_V_H), + AXP20X_ADC_CHANNEL(AXP717_VSYS_V, "vsys_v", IIO_VOLTAGE, + AXP717_VSYS_V_H), + AXP20X_ADC_CHANNEL(AXP717_DIE_TEMP_V, "pmic_temp", IIO_TEMP, + AXP717_ADC_DATA_H), + AXP20X_ADC_CHANNEL(AXP717_BATT_CHRG_I, "batt_chrg_i", IIO_CURRENT, + AXP717_BATT_CHRG_I_H), + AXP20X_ADC_CHANNEL(AXP717_VMID_V, "vmid_v", IIO_VOLTAGE, + AXP717_ADC_DATA_H), + AXP20X_ADC_CHANNEL(AXP717_BKUP_BATT_V, "bkup_batt_v", IIO_VOLTAGE, + AXP717_ADC_DATA_H), +}; + static const struct iio_chan_spec axp813_adc_channels[] = { { .type = IIO_TEMP, @@ -354,6 +417,51 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } +static int axp717_adc_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val) +{ + struct axp20x_adc_iio *info = iio_priv(indio_dev); + u8 bulk_reg[2]; + int ret; + + /* + * A generic "ADC data" channel is used for TS, tdie, vmid, + * and vbackup. This channel must both first be enabled and + * also selected before it can be read. + */ + switch (chan->channel) { + case AXP717_TS_IN: + regmap_write(info->regmap, AXP717_ADC_DATA_SEL, + AXP717_ADC_DATA_TS); + break; + case AXP717_DIE_TEMP_V: + regmap_write(info->regmap, AXP717_ADC_DATA_SEL, + AXP717_ADC_DATA_TEMP); + break; + case AXP717_VMID_V: + regmap_write(info->regmap, AXP717_ADC_DATA_SEL, + AXP717_ADC_DATA_VMID); + break; + case AXP717_BKUP_BATT_V: + regmap_write(info->regmap, AXP717_ADC_DATA_SEL, + AXP717_ADC_DATA_BKUP_BATT); + break; + default: + break; + } + + /* + * All channels are 14 bits, with the first 2 bits on the high + * register reserved and the remaining bits as the ADC value. + */ + ret = regmap_bulk_read(info->regmap, chan->address, bulk_reg, 2); + if (ret < 0) + return ret; + + *val = FIELD_GET(AXP717_ADC_DATA_MASK, get_unaligned_be16(bulk_reg)); + return IIO_VAL_INT; +} + static int axp813_adc_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val) { @@ -571,6 +679,27 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val, } } +static int axp717_adc_scale(struct iio_chan_spec const *chan, int *val, + int *val2) +{ + switch (chan->type) { + case IIO_VOLTAGE: + *val = 1; + return IIO_VAL_INT; + + case IIO_CURRENT: + *val = 1; + return IIO_VAL_INT; + + case IIO_TEMP: + *val = 100; + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + static int axp813_adc_scale(struct iio_chan_spec const *chan, int *val, int *val2) { @@ -746,6 +875,22 @@ static int axp22x_read_raw(struct iio_dev *indio_dev, } } +static int axp717_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return axp717_adc_scale(chan, val, val2); + + case IIO_CHAN_INFO_RAW: + return axp717_adc_raw(indio_dev, chan, val); + + default: + return -EINVAL; + } +} + static int axp813_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -860,6 +1005,10 @@ static const struct iio_info axp22x_adc_iio_info = { .read_raw = axp22x_read_raw, }; +static const struct iio_info axp717_adc_iio_info = { + .read_raw = axp717_read_raw, +}; + static const struct iio_info axp813_adc_iio_info = { .read_raw = axp813_read_raw, }; @@ -889,7 +1038,9 @@ struct axp_data { const struct iio_info *iio_info; int num_channels; struct iio_chan_spec const *channels; + unsigned long adc_en1; unsigned long adc_en1_mask; + unsigned long adc_en2; unsigned long adc_en2_mask; int (*adc_rate)(struct axp20x_adc_iio *info, int rate); @@ -910,7 +1061,9 @@ static const struct axp_data axp20x_data = { .iio_info = &axp20x_adc_iio_info, .num_channels = ARRAY_SIZE(axp20x_adc_channels), .channels = axp20x_adc_channels, + .adc_en1 = AXP20X_ADC_EN1, .adc_en1_mask = AXP20X_ADC_EN1_MASK, + .adc_en2 = AXP20X_ADC_EN2, .adc_en2_mask = AXP20X_ADC_EN2_MASK, .adc_rate = axp20x_adc_rate, .maps = axp20x_maps, @@ -920,15 +1073,26 @@ static const struct axp_data axp22x_data = { .iio_info = &axp22x_adc_iio_info, .num_channels = ARRAY_SIZE(axp22x_adc_channels), .channels = axp22x_adc_channels, + .adc_en1 = AXP20X_ADC_EN1, .adc_en1_mask = AXP22X_ADC_EN1_MASK, .adc_rate = axp22x_adc_rate, .maps = axp22x_maps, }; +static const struct axp_data axp717_data = { + .iio_info = &axp717_adc_iio_info, + .num_channels = ARRAY_SIZE(axp717_adc_channels), + .channels = axp717_adc_channels, + .adc_en1 = AXP717_ADC_CH_EN_CONTROL, + .adc_en1_mask = AXP717_ADC_EN1_MASK, + .maps = axp717_maps, +}; + static const struct axp_data axp813_data = { .iio_info = &axp813_adc_iio_info, .num_channels = ARRAY_SIZE(axp813_adc_channels), .channels = axp813_adc_channels, + .adc_en1 = AXP20X_ADC_EN1, .adc_en1_mask = AXP22X_ADC_EN1_MASK, .adc_rate = axp813_adc_rate, .maps = axp22x_maps, @@ -938,6 +1102,7 @@ static const struct of_device_id axp20x_adc_of_match[] = { { .compatible = "x-powers,axp192-adc", .data = (void *)&axp192_data, }, { .compatible = "x-powers,axp209-adc", .data = (void *)&axp20x_data, }, { .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, }, + { .compatible = "x-powers,axp717-adc", .data = (void *)&axp717_data, }, { .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, }, { /* sentinel */ } }; @@ -947,6 +1112,7 @@ static const struct platform_device_id axp20x_adc_id_match[] = { { .name = "axp192-adc", .driver_data = (kernel_ulong_t)&axp192_data, }, { .name = "axp20x-adc", .driver_data = (kernel_ulong_t)&axp20x_data, }, { .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, }, + { .name = "axp717-adc", .driver_data = (kernel_ulong_t)&axp717_data, }, { .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, }, { /* sentinel */ }, }; @@ -988,14 +1154,16 @@ static int axp20x_probe(struct platform_device *pdev) indio_dev->channels = info->data->channels; /* Enable the ADCs on IP */ - regmap_write(info->regmap, AXP20X_ADC_EN1, info->data->adc_en1_mask); + regmap_write(info->regmap, info->data->adc_en1, + info->data->adc_en1_mask); if (info->data->adc_en2_mask) - regmap_set_bits(info->regmap, AXP20X_ADC_EN2, + regmap_set_bits(info->regmap, info->data->adc_en2, info->data->adc_en2_mask); /* Configure ADCs rate */ - info->data->adc_rate(info, 100); + if (info->data->adc_rate) + info->data->adc_rate(info, 100); ret = iio_map_array_register(indio_dev, info->data->maps); if (ret < 0) { @@ -1015,10 +1183,10 @@ static int axp20x_probe(struct platform_device *pdev) iio_map_array_unregister(indio_dev); fail_map: - regmap_write(info->regmap, AXP20X_ADC_EN1, 0); + regmap_write(info->regmap, info->data->adc_en1, 0); if (info->data->adc_en2_mask) - regmap_write(info->regmap, AXP20X_ADC_EN2, 0); + regmap_write(info->regmap, info->data->adc_en2, 0); return ret; } @@ -1031,10 +1199,10 @@ static void axp20x_remove(struct platform_device *pdev) iio_device_unregister(indio_dev); iio_map_array_unregister(indio_dev); - regmap_write(info->regmap, AXP20X_ADC_EN1, 0); + regmap_write(info->regmap, info->data->adc_en1, 0); if (info->data->adc_en2_mask) - regmap_write(info->regmap, AXP20X_ADC_EN2, 0); + regmap_write(info->regmap, info->data->adc_en2, 0); } static struct platform_driver axp20x_adc_driver = { diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index f135cf2362dfc7..8c3acc0cd7e99a 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -299,7 +299,7 @@ static int axp288_adc_probe(struct platform_device *pdev) static const struct platform_device_id axp288_adc_id_table[] = { { .name = "axp288_adc" }, - {}, + { } }; static struct platform_driver axp288_adc_driver = { diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c index 6bc149c5141413..cdfe304eaa2011 100644 --- a/drivers/iio/adc/bcm_iproc_adc.c +++ b/drivers/iio/adc/bcm_iproc_adc.c @@ -606,7 +606,7 @@ static void iproc_adc_remove(struct platform_device *pdev) static const struct of_device_id iproc_adc_of_match[] = { {.compatible = "brcm,iproc-static-adc", }, - { }, + { } }; MODULE_DEVICE_TABLE(of, iproc_adc_of_match); diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c index 4cdddc6e36e9c9..fa04e0a5f64553 100644 --- a/drivers/iio/adc/berlin2-adc.c +++ b/drivers/iio/adc/berlin2-adc.c @@ -351,7 +351,7 @@ static int berlin2_adc_probe(struct platform_device *pdev) static const struct of_device_id berlin2_adc_match[] = { { .compatible = "marvell,berlin2-adc", }, - { }, + { } }; MODULE_DEVICE_TABLE(of, berlin2_adc_match); diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c index a432342348abe6..2c51b90b71018a 100644 --- a/drivers/iio/adc/cc10001_adc.c +++ b/drivers/iio/adc/cc10001_adc.c @@ -157,9 +157,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) i = 0; sample_invalid = false; - for_each_set_bit(scan_idx, indio_dev->active_scan_mask, - indio_dev->masklength) { - + iio_for_each_active_channel(indio_dev, scan_idx) { channel = indio_dev->channels[scan_idx].channel; cc10001_adc_start(adc_dev, channel); diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 06cfbbabaf8dcb..de7252a10047d5 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -108,7 +108,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2) dln2->demux_count = 0; /* Optimize all 8-channels case */ - if (indio_dev->masklength && + if (iio_get_masklength(indio_dev) && (*indio_dev->active_scan_mask & 0xff) == 0xff) { dln2_adc_add_demux(dln2, 0, 0, 16); dln2->ts_pad_offset = 0; @@ -117,9 +117,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2) } /* Build demux table from fixed 8-channels to active_scan_mask */ - for_each_set_bit(out_ind, - indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, out_ind) { /* Handle timestamp separately */ if (out_ind == DLN2_ADC_MAX_CHANNELS) break; @@ -541,7 +539,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) /* Assign trigger channel based on first enabled channel */ trigger_chan = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); if (trigger_chan < DLN2_ADC_MAX_CHANNELS) { dln2->trigger_chan = trigger_chan; ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan, diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c index 971942ce4c6667..cc38d5e0608eed 100644 --- a/drivers/iio/adc/ep93xx_adc.c +++ b/drivers/iio/adc/ep93xx_adc.c @@ -228,7 +228,7 @@ static void ep93xx_adc_remove(struct platform_device *pdev) static const struct of_device_id ep93xx_adc_of_ids[] = { { .compatible = "cirrus,ep9301-adc" }, - {} + { } }; MODULE_DEVICE_TABLE(of, ep93xx_adc_of_ids); diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index 78fada4b7b1c86..4d00ee8dd14d08 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -519,7 +519,7 @@ static const struct of_device_id exynos_adc_match[] = { .compatible = "samsung,exynos7-adc", .data = &exynos7_adc_data, }, - {}, + { } }; MODULE_DEVICE_TABLE(of, exynos_adc_match); diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c index 771fa12bdc026d..fb635a75644030 100644 --- a/drivers/iio/adc/hi8435.c +++ b/drivers/iio/adc/hi8435.c @@ -524,7 +524,7 @@ static int hi8435_probe(struct spi_device *spi) static const struct of_device_id hi8435_dt_ids[] = { { .compatible = "holt,hi8435" }, - {}, + { } }; MODULE_DEVICE_TABLE(of, hi8435_dt_ids); diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c index b3372ccff7d5e8..8da0419ecfa357 100644 --- a/drivers/iio/adc/hx711.c +++ b/drivers/iio/adc/hx711.c @@ -363,10 +363,7 @@ static irqreturn_t hx711_trigger(int irq, void *p) memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer)); - for (i = 0; i < indio_dev->masklength; i++) { - if (!test_bit(i, indio_dev->active_scan_mask)) - continue; - + iio_for_each_active_channel(indio_dev, i) { hx711_data->buffer[j] = hx711_reset_read(hx711_data, indio_dev->channels[i].channel); j++; @@ -555,7 +552,7 @@ static int hx711_probe(struct platform_device *pdev) static const struct of_device_id of_hx711_match[] = { { .compatible = "avia,hx711", }, - {}, + { } }; MODULE_DEVICE_TABLE(of, of_hx711_match); diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 727e390bd97951..48c95e12e791f2 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -755,8 +755,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) * Single register reads: bulk_read will not work with ina226/219 * as there is no auto-increment of the register pointer. */ - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { unsigned int val; ret = regmap_read(chip->regmap, @@ -1053,12 +1052,12 @@ static void ina2xx_remove(struct i2c_client *client) } static const struct i2c_device_id ina2xx_id[] = { - {"ina219", ina219}, - {"ina220", ina219}, - {"ina226", ina226}, - {"ina230", ina226}, - {"ina231", ina226}, - {} + { "ina219", ina219 }, + { "ina220", ina219 }, + { "ina226", ina226 }, + { "ina230", ina226 }, + { "ina231", ina226 }, + { } }; MODULE_DEVICE_TABLE(i2c, ina2xx_id); @@ -1083,7 +1082,7 @@ static const struct of_device_id ina2xx_of_match[] = { .compatible = "ti,ina231", .data = (void *)ina226 }, - {}, + { } }; MODULE_DEVICE_TABLE(of, ina2xx_of_match); diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c index af70ca760797d0..1e802c8779a4c0 100644 --- a/drivers/iio/adc/ingenic-adc.c +++ b/drivers/iio/adc/ingenic-adc.c @@ -908,7 +908,7 @@ static const struct of_device_id ingenic_adc_of_match[] = { { .compatible = "ingenic,jz4760-adc", .data = &jz4760_adc_soc_data, }, { .compatible = "ingenic,jz4760b-adc", .data = &jz4760_adc_soc_data, }, { .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ingenic_adc_of_match); diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c index 0590a126f3218a..30733252aa56f8 100644 --- a/drivers/iio/adc/intel_mrfld_adc.c +++ b/drivers/iio/adc/intel_mrfld_adc.c @@ -25,7 +25,7 @@ #include #include -#include +#include #define BCOVE_GPADCREQ 0xDC #define BCOVE_GPADCREQ_BUSY BIT(0) diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c index e34ed7dacd8934..43a7bc8158b579 100644 --- a/drivers/iio/adc/lpc32xx_adc.c +++ b/drivers/iio/adc/lpc32xx_adc.c @@ -217,7 +217,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev) static const struct of_device_id lpc32xx_adc_match[] = { { .compatible = "nxp,lpc3220-adc" }, - {}, + { } }; MODULE_DEVICE_TABLE(of, lpc32xx_adc_match); diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c index 2593fa4322ebd5..f06dd0b9a85819 100644 --- a/drivers/iio/adc/ltc2496.c +++ b/drivers/iio/adc/ltc2496.c @@ -94,7 +94,7 @@ static const struct ltc2497_chip_info ltc2496_info = { static const struct of_device_id ltc2496_of_match[] = { { .compatible = "lltc,ltc2496", .data = <c2496_info, }, - {}, + { } }; MODULE_DEVICE_TABLE(of, ltc2496_of_match); diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c index 6401a7727c31a5..eb9d521e86e54d 100644 --- a/drivers/iio/adc/ltc2497.c +++ b/drivers/iio/adc/ltc2497.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include "ltc2497.h" @@ -151,7 +151,7 @@ MODULE_DEVICE_TABLE(i2c, ltc2497_id); static const struct of_device_id ltc2497_of_match[] = { { .compatible = "lltc,ltc2497", .data = <c2497_info[TYPE_LTC2497] }, { .compatible = "lltc,ltc2499", .data = <c2497_info[TYPE_LTC2499] }, - {}, + { } }; MODULE_DEVICE_TABLE(of, ltc2497_of_match); diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c index 136fcf753837c8..f5ba4a1b5a7de2 100644 --- a/drivers/iio/adc/max1027.c +++ b/drivers/iio/adc/max1027.c @@ -73,13 +73,13 @@ enum max1027_id { }; static const struct spi_device_id max1027_id[] = { - {"max1027", max1027}, - {"max1029", max1029}, - {"max1031", max1031}, - {"max1227", max1227}, - {"max1229", max1229}, - {"max1231", max1231}, - {} + { "max1027", max1027 }, + { "max1029", max1029 }, + { "max1031", max1031 }, + { "max1227", max1227 }, + { "max1229", max1229 }, + { "max1231", max1231 }, + { } }; MODULE_DEVICE_TABLE(spi, max1027_id); @@ -90,7 +90,7 @@ static const struct of_device_id max1027_adc_dt_ids[] = { { .compatible = "maxim,max1227" }, { .compatible = "maxim,max1229" }, { .compatible = "maxim,max1231" }, - {}, + { } }; MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids); diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c index 49e38dca8fe2c7..520e37f75aac4c 100644 --- a/drivers/iio/adc/max11100.c +++ b/drivers/iio/adc/max11100.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include @@ -143,8 +143,8 @@ static int max11100_probe(struct spi_device *spi) } static const struct of_device_id max11100_ids[] = { - {.compatible = "maxim,max11100"}, - { }, + { .compatible = "maxim,max11100" }, + { } }; MODULE_DEVICE_TABLE(of, max11100_ids); diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 75ab57d9aef74e..3d0a7d0eb7ee1a 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -174,8 +174,7 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) mutex_lock(&adc->lock); - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index]; int ret = max1118_read(indio_dev, scan_chan->channel); @@ -261,7 +260,7 @@ static const struct spi_device_id max1118_id[] = { { "max1117", max1117 }, { "max1118", max1118 }, { "max1119", max1119 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, max1118_id); @@ -269,7 +268,7 @@ static const struct of_device_id max1118_dt_ids[] = { { .compatible = "maxim,max1117" }, { .compatible = "maxim,max1118" }, { .compatible = "maxim,max1119" }, - {}, + { } }; MODULE_DEVICE_TABLE(of, max1118_dt_ids); diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c index 45368850b2201f..f0dc4b46090316 100644 --- a/drivers/iio/adc/max11410.c +++ b/drivers/iio/adc/max11410.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c index 500bb09ab19bc9..d62c1a01165933 100644 --- a/drivers/iio/adc/max1241.c +++ b/drivers/iio/adc/max1241.c @@ -177,12 +177,12 @@ static int max1241_probe(struct spi_device *spi) static const struct spi_device_id max1241_id[] = { { "max1241", max1241 }, - {} + { } }; static const struct of_device_id max1241_dt_ids[] = { { .compatible = "maxim,max1241" }, - {} + { } }; MODULE_DEVICE_TABLE(of, max1241_dt_ids); diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index bf4b6dc53fd280..d0c6e94f7204ee 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -818,7 +819,6 @@ static int max1363_read_event_config(struct iio_dev *indio_dev, static int max1363_monitor_mode_update(struct max1363_state *st, int enabled) { - u8 *tx_buf; int ret, i = 3, j; unsigned long numelements; int len; @@ -850,11 +850,10 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled) } numelements = bitmap_weight(modemask, MAX1363_MAX_CHANNELS); len = 3 * numelements + 3; - tx_buf = kmalloc(len, GFP_KERNEL); - if (!tx_buf) { - ret = -ENOMEM; - goto error_ret; - } + u8 *tx_buf __free(kfree) = kmalloc(len, GFP_KERNEL); + if (!tx_buf) + return -ENOMEM; + tx_buf[0] = st->configbyte; tx_buf[1] = st->setupbyte; tx_buf[2] = (st->monitor_speed << 1); @@ -893,11 +892,9 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled) ret = st->send(st->client, tx_buf, len); if (ret < 0) - goto error_ret; - if (ret != len) { - ret = -EIO; - goto error_ret; - } + return ret; + if (ret != len) + return -EIO; /* * Now that we hopefully have sensible thresholds in place it is @@ -910,18 +907,13 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled) tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0; ret = st->send(st->client, tx_buf, 2); if (ret < 0) - goto error_ret; - if (ret != 2) { - ret = -EIO; - goto error_ret; - } - ret = 0; - st->monitor_on = true; -error_ret: + return ret; + if (ret != 2) + return -EIO; - kfree(tx_buf); + st->monitor_on = true; - return ret; + return 0; } /* diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c index 6c2ea2bc52c6f1..ffec22be2d5933 100644 --- a/drivers/iio/adc/max34408.c +++ b/drivers/iio/adc/max34408.c @@ -250,14 +250,14 @@ static const struct of_device_id max34408_of_match[] = { .compatible = "maxim,max34409", .data = &max34409_model_data, }, - {} + { } }; MODULE_DEVICE_TABLE(of, max34408_of_match); static const struct i2c_device_id max34408_id[] = { { "max34408", (kernel_ulong_t)&max34408_model_data }, { "max34409", (kernel_ulong_t)&max34409_model_data }, - {} + { } }; MODULE_DEVICE_TABLE(i2c, max34408_id); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index 76e517b7b1e4f3..14fe42fc4b7dac 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -504,9 +504,9 @@ static int max9611_init(struct max9611_dev *max9611) } static const struct of_device_id max9611_of_table[] = { - {.compatible = "maxim,max9611", .data = "max9611"}, - {.compatible = "maxim,max9612", .data = "max9612"}, - { }, + { .compatible = "maxim,max9611", .data = "max9611" }, + { .compatible = "maxim,max9612", .data = "max9612" }, + { } }; MODULE_DEVICE_TABLE(of, max9611_of_table); diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index da1421bd7b6294..57cff3772ebedc 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -459,16 +459,6 @@ static int mcp320x_probe(struct spi_device *spi) } static const struct of_device_id mcp320x_dt_ids[] = { - /* NOTE: The use of compatibles with no vendor prefix is deprecated. */ - { .compatible = "mcp3001" }, - { .compatible = "mcp3002" }, - { .compatible = "mcp3004" }, - { .compatible = "mcp3008" }, - { .compatible = "mcp3201" }, - { .compatible = "mcp3202" }, - { .compatible = "mcp3204" }, - { .compatible = "mcp3208" }, - { .compatible = "mcp3301" }, { .compatible = "microchip,mcp3001" }, { .compatible = "microchip,mcp3002" }, { .compatible = "microchip,mcp3004" }, diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 0778a8fb68665c..50834fdcf7388f 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c index d83bed0e63d220..a68f1cd6883e88 100644 --- a/drivers/iio/adc/mcp3564.c +++ b/drivers/iio/adc/mcp3564.c @@ -349,8 +349,6 @@ struct mcp3564_chip_info { * struct mcp3564_state - working data for a ADC device * @chip_info: chip specific data * @spi: SPI device structure - * @vref: the regulator device used as a voltage reference in case - * external voltage reference is used * @vref_mv: voltage reference value in miliVolts * @lock: synchronize access to driver's state members * @dev_addr: hardware device address @@ -369,7 +367,6 @@ struct mcp3564_chip_info { struct mcp3564_state { const struct mcp3564_chip_info *chip_info; struct spi_device *spi; - struct regulator *vref; unsigned short vref_mv; struct mutex lock; /* Synchronize access to driver's state members */ u8 dev_addr; @@ -1085,11 +1082,6 @@ static int mcp3564_parse_fw_children(struct iio_dev *indio_dev) return 0; } -static void mcp3564_disable_reg(void *reg) -{ - regulator_disable(reg); -} - static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc) { unsigned int pow = adc->chip_info->resolution - 1; @@ -1110,7 +1102,7 @@ static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc) } } -static int mcp3564_config(struct iio_dev *indio_dev) +static int mcp3564_config(struct iio_dev *indio_dev, bool *use_internal_vref_attr) { struct mcp3564_state *adc = iio_priv(indio_dev); struct device *dev = &adc->spi->dev; @@ -1119,6 +1111,7 @@ static int mcp3564_config(struct iio_dev *indio_dev) enum mcp3564_ids ids; int ret = 0; unsigned int tmp = 0x01; + bool internal_vref; bool err = false; /* @@ -1218,36 +1211,22 @@ static int mcp3564_config(struct iio_dev *indio_dev) dev_dbg(dev, "Found %s chip\n", adc->chip_info->name); - adc->vref = devm_regulator_get_optional(dev, "vref"); - if (IS_ERR(adc->vref)) { - if (PTR_ERR(adc->vref) != -ENODEV) - return dev_err_probe(dev, PTR_ERR(adc->vref), - "failed to get regulator\n"); + ret = devm_regulator_get_enable_read_voltage(dev, "vref"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, "Failed to get vref voltage\n"); + + internal_vref = ret == -ENODEV; + adc->vref_mv = internal_vref ? MCP3564R_INT_VREF_MV : ret / MILLI; + *use_internal_vref_attr = internal_vref; + if (internal_vref) { /* Check if chip has internal vref */ if (!adc->have_vref) - return dev_err_probe(dev, PTR_ERR(adc->vref), - "Unknown Vref\n"); - adc->vref = NULL; + return dev_err_probe(dev, -ENODEV, "Unknown Vref\n"); + dev_dbg(dev, "%s: Using internal Vref\n", __func__); } else { - ret = regulator_enable(adc->vref); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, mcp3564_disable_reg, - adc->vref); - if (ret) - return ret; - dev_dbg(dev, "%s: Using External Vref\n", __func__); - - ret = regulator_get_voltage(adc->vref); - if (ret < 0) - return dev_err_probe(dev, ret, - "Failed to read vref regulator\n"); - - adc->vref_mv = ret / MILLI; } ret = mcp3564_parse_fw_children(indio_dev); @@ -1350,10 +1329,8 @@ static int mcp3564_config(struct iio_dev *indio_dev) tmp_reg |= FIELD_PREP(MCP3564_CONFIG0_CLK_SEL_MASK, MCP3564_CONFIG0_USE_INT_CLK); tmp_reg |= MCP3456_CONFIG0_BIT6_DEFAULT; - if (!adc->vref) { + if (internal_vref) tmp_reg |= FIELD_PREP(MCP3456_CONFIG0_VREF_MASK, 1); - adc->vref_mv = MCP3564R_INT_VREF_MV; - } ret = mcp3564_write_8bits(adc, MCP3564_CONFIG0_REG, tmp_reg); @@ -1412,6 +1389,7 @@ static int mcp3564_probe(struct spi_device *spi) int ret; struct iio_dev *indio_dev; struct mcp3564_state *adc; + bool use_internal_vref_attr; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc)); if (!indio_dev) @@ -1428,7 +1406,7 @@ static int mcp3564_probe(struct spi_device *spi) * enable/disable certain channels * change the sampling rate to the requested value */ - ret = mcp3564_config(indio_dev); + ret = mcp3564_config(indio_dev, &use_internal_vref_attr); if (ret) return dev_err_probe(&spi->dev, ret, "Can't configure MCP356X device\n"); @@ -1440,7 +1418,7 @@ static int mcp3564_probe(struct spi_device *spi) indio_dev->name = adc->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; - if (!adc->vref) + if (use_internal_vref_attr) indio_dev->info = &mcp3564r_info; else indio_dev->info = &mcp3564_info; diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index 7a32e7a1be9d09..b097f04172c80b 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -23,7 +23,7 @@ #include #include -#include +#include #define MCP3911_REG_CHANNEL0 0x00 #define MCP3911_REG_CHANNEL1 0x03 @@ -103,7 +103,7 @@ struct mcp3911_chip_info { const struct iio_chan_spec *channels; unsigned int num_channels; - int (*config)(struct mcp3911 *adc); + int (*config)(struct mcp3911 *adc, bool external_vref); int (*get_osr)(struct mcp3911 *adc, u32 *val); int (*set_osr)(struct mcp3911 *adc, u32 val); int (*enable_offset)(struct mcp3911 *adc, bool enable); @@ -115,7 +115,6 @@ struct mcp3911_chip_info { struct mcp3911 { struct spi_device *spi; struct mutex lock; - struct regulator *vref; struct clk *clki; u32 dev_addr; struct iio_trigger *trig; @@ -385,23 +384,11 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev, } } -static int mcp3911_calc_scale_table(struct mcp3911 *adc) +static int mcp3911_calc_scale_table(u32 vref_mv) { - struct device *dev = &adc->spi->dev; - u32 ref = MCP3911_INT_VREF_MV; u32 div; - int ret; u64 tmp; - if (adc->vref) { - ret = regulator_get_voltage(adc->vref); - if (ret < 0) { - return dev_err_probe(dev, ret, "failed to get vref voltage\n"); - } - - ref = ret / 1000; - } - /* * For 24-bit Conversion * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5 @@ -412,7 +399,7 @@ static int mcp3911_calc_scale_table(struct mcp3911 *adc) */ for (int i = 0; i < MCP3911_NUM_SCALES; i++) { div = 12582912 * BIT(i); - tmp = div_s64((s64)ref * 1000000000LL, div); + tmp = div_s64((s64)vref_mv * 1000000000LL, div); mcp3911_scale_table[i][0] = 0; mcp3911_scale_table[i][1] = tmp; @@ -523,7 +510,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p) goto out; } - for_each_set_bit(scan_index, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index]; adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]); @@ -544,7 +531,7 @@ static const struct iio_info mcp3911_info = { .write_raw_get_fmt = mcp3911_write_raw_get_fmt, }; -static int mcp3911_config(struct mcp3911 *adc) +static int mcp3911_config(struct mcp3911 *adc, bool external_vref) { struct device *dev = &adc->spi->dev; u32 regval; @@ -555,7 +542,7 @@ static int mcp3911_config(struct mcp3911 *adc) return ret; regval &= ~MCP3911_CONFIG_VREFEXT; - if (adc->vref) { + if (external_vref) { dev_dbg(dev, "use external voltage reference\n"); regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1); } else { @@ -610,7 +597,7 @@ static int mcp3911_config(struct mcp3911 *adc) return mcp3911_write(adc, MCP3911_REG_GAIN, regval, 1); } -static int mcp3910_config(struct mcp3911 *adc) +static int mcp3910_config(struct mcp3911 *adc, bool external_vref) { struct device *dev = &adc->spi->dev; u32 regval; @@ -621,7 +608,7 @@ static int mcp3910_config(struct mcp3911 *adc) return ret; regval &= ~MCP3910_CONFIG1_VREFEXT; - if (adc->vref) { + if (external_vref) { dev_dbg(dev, "use external voltage reference\n"); regval |= FIELD_PREP(MCP3910_CONFIG1_VREFEXT, 1); } else { @@ -677,11 +664,6 @@ static int mcp3910_config(struct mcp3911 *adc) return adc->chip->enable_offset(adc, 0); } -static void mcp3911_cleanup_regulator(void *vref) -{ - regulator_disable(vref); -} - static int mcp3911_set_trigger_state(struct iio_trigger *trig, bool enable) { struct mcp3911 *adc = iio_trigger_get_drvdata(trig); @@ -704,6 +686,8 @@ static int mcp3911_probe(struct spi_device *spi) struct device *dev = &spi->dev; struct iio_dev *indio_dev; struct mcp3911 *adc; + bool external_vref; + u32 vref_mv; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*adc)); @@ -714,23 +698,12 @@ static int mcp3911_probe(struct spi_device *spi) adc->spi = spi; adc->chip = spi_get_device_match_data(spi); - adc->vref = devm_regulator_get_optional(dev, "vref"); - if (IS_ERR(adc->vref)) { - if (PTR_ERR(adc->vref) == -ENODEV) { - adc->vref = NULL; - } else { - return dev_err_probe(dev, PTR_ERR(adc->vref), "failed to get regulator\n"); - } + ret = devm_regulator_get_enable_read_voltage(dev, "vref"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, "failed to get vref voltage\n"); - } else { - ret = regulator_enable(adc->vref); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, mcp3911_cleanup_regulator, adc->vref); - if (ret) - return ret; - } + external_vref = ret != -ENODEV; + vref_mv = external_vref ? ret / 1000 : MCP3911_INT_VREF_MV; adc->clki = devm_clk_get_enabled(dev, NULL); if (IS_ERR(adc->clki)) { @@ -755,11 +728,11 @@ static int mcp3911_probe(struct spi_device *spi) } dev_dbg(dev, "use device address %i\n", adc->dev_addr); - ret = adc->chip->config(adc); + ret = adc->chip->config(adc, external_vref); if (ret) return ret; - ret = mcp3911_calc_scale_table(adc); + ret = mcp3911_calc_scale_table(vref_mv); if (ret) return ret; diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c index 5f672765d4a20e..5fbf9b6abd9c7c 100644 --- a/drivers/iio/adc/mp2629_adc.c +++ b/drivers/iio/adc/mp2629_adc.c @@ -184,8 +184,8 @@ static void mp2629_adc_remove(struct platform_device *pdev) } static const struct of_device_id mp2629_adc_of_match[] = { - { .compatible = "mps,mp2629_adc"}, - {} + { .compatible = "mps,mp2629_adc" }, + { } }; MODULE_DEVICE_TABLE(of, mp2629_adc_of_match); diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c index 3710473e526fc8..83161e6d29b901 100644 --- a/drivers/iio/adc/mt6360-adc.c +++ b/drivers/iio/adc/mt6360-adc.c @@ -16,7 +16,7 @@ #include #include -#include +#include #define MT6360_REG_PMUCHGCTRL3 0x313 #define MT6360_REG_PMUADCCFG 0x356 @@ -268,7 +268,7 @@ static irqreturn_t mt6360_adc_trigger_handler(int irq, void *p) int i = 0, bit, val, ret; memset(&data, 0, sizeof(data)); - for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = mt6360_adc_read_channel(mad, bit, &val); if (ret < 0) { dev_warn(&indio_dev->dev, "Failed to get channel %d conversion val\n", bit); @@ -355,7 +355,7 @@ static int mt6360_adc_probe(struct platform_device *pdev) static const struct of_device_id mt6360_adc_of_id[] = { { .compatible = "mediatek,mt6360-adc", }, - {} + { } }; MODULE_DEVICE_TABLE(of, mt6360_adc_of_id); diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c index 600151a62f1fe3..458544cb8ee48a 100644 --- a/drivers/iio/adc/nau7802.c +++ b/drivers/iio/adc/nau7802.c @@ -539,7 +539,7 @@ MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id); static const struct of_device_id nau7802_dt_ids[] = { { .compatible = "nuvoton,nau7802" }, - {}, + { } }; MODULE_DEVICE_TABLE(of, nau7802_dt_ids); diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c new file mode 100644 index 00000000000000..36e813d9c73f1c --- /dev/null +++ b/drivers/iio/adc/pac1921.c @@ -0,0 +1,1261 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IIO driver for PAC1921 High-Side Power/Current Monitor + * + * Copyright (C) 2024 Matteo Martelli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* pac1921 registers */ +#define PAC1921_REG_GAIN_CFG 0x00 +#define PAC1921_REG_INT_CFG 0x01 +#define PAC1921_REG_CONTROL 0x02 +#define PAC1921_REG_VBUS 0x10 +#define PAC1921_REG_VSENSE 0x12 +#define PAC1921_REG_OVERFLOW_STS 0x1C +#define PAC1921_REG_VPOWER 0x1D + +/* pac1921 gain configuration bits */ +#define PAC1921_GAIN_DI_GAIN_MASK GENMASK(5, 3) +#define PAC1921_GAIN_DV_GAIN_MASK GENMASK(2, 0) + +/* pac1921 integration configuration bits */ +#define PAC1921_INT_CFG_SMPL_MASK GENMASK(7, 4) +#define PAC1921_INT_CFG_VSFEN BIT(3) +#define PAC1921_INT_CFG_VBFEN BIT(2) +#define PAC1921_INT_CFG_RIOV BIT(1) +#define PAC1921_INT_CFG_INTEN BIT(0) + +/* pac1921 control bits */ +#define PAC1921_CONTROL_MXSL_MASK GENMASK(7, 6) +enum pac1921_mxsl { + PAC1921_MXSL_VPOWER_PIN = 0, + PAC1921_MXSL_VSENSE_FREE_RUN = 1, + PAC1921_MXSL_VBUS_FREE_RUN = 2, + PAC1921_MXSL_VPOWER_FREE_RUN = 3, +}; +#define PAC1921_CONTROL_SLEEP BIT(2) + +/* pac1921 result registers mask and resolution */ +#define PAC1921_RES_MASK GENMASK(15, 6) +#define PAC1921_RES_RESOLUTION 1023 + +/* pac1921 overflow status bits */ +#define PAC1921_OVERFLOW_VSOV BIT(2) +#define PAC1921_OVERFLOW_VBOV BIT(1) +#define PAC1921_OVERFLOW_VPOV BIT(0) + +/* pac1921 constants */ +#define PAC1921_MAX_VSENSE_MV 100 +#define PAC1921_MAX_VBUS_V 32 +/* Time to first communication after power up (tINT_T) */ +#define PAC1921_POWERUP_TIME_MS 20 +/* Time from Sleep State to Start of Integration Period (tSLEEP_TO_INT) */ +#define PAC1921_SLEEP_TO_INT_TIME_US 86 + +/* pac1921 defaults */ +#define PAC1921_DEFAULT_DV_GAIN 0 /* 2^(value): 1x gain (HW default) */ +#define PAC1921_DEFAULT_DI_GAIN 0 /* 2^(value): 1x gain (HW default) */ +#define PAC1921_DEFAULT_NUM_SAMPLES 0 /* 2^(value): 1 sample (HW default) */ + +/* + * Pre-computed scale factors for BUS voltage + * format: IIO_VAL_INT_PLUS_NANO + * unit: mV + * + * Vbus scale (mV) = max_vbus (mV) / dv_gain / resolution + */ +static const int pac1921_vbus_scales[][2] = { + { 31, 280547409 }, /* dv_gain x1 */ + { 15, 640273704 }, /* dv_gain x2 */ + { 7, 820136852 }, /* dv_gain x4 */ + { 3, 910068426 }, /* dv_gain x8 */ + { 1, 955034213 }, /* dv_gain x16 */ + { 0, 977517106 }, /* dv_gain x32 */ +}; + +/* + * Pre-computed scales for SENSE voltage + * format: IIO_VAL_INT_PLUS_NANO + * unit: mV + * + * Vsense scale (mV) = max_vsense (mV) / di_gain / resolution + */ +static const int pac1921_vsense_scales[][2] = { + { 0, 97751710 }, /* di_gain x1 */ + { 0, 48875855 }, /* di_gain x2 */ + { 0, 24437927 }, /* di_gain x4 */ + { 0, 12218963 }, /* di_gain x8 */ + { 0, 6109481 }, /* di_gain x16 */ + { 0, 3054740 }, /* di_gain x32 */ + { 0, 1527370 }, /* di_gain x64 */ + { 0, 763685 }, /* di_gain x128 */ +}; + +/* + * Numbers of samples used to integrate measurements at the end of an + * integration period. + * + * Changing the number of samples affects the integration period: higher the + * number of samples, longer the integration period. + * + * These correspond to the oversampling ratios available exposed to userspace. + */ +static const int pac1921_int_num_samples[] = { + 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048 +}; + +/* + * The integration period depends on the configuration of number of integration + * samples, measurement resolution and post filters. The following array + * contains integration periods, in microsecs unit, based on table 4-5 from + * datasheet considering power integration mode, 14-Bit resolution and post + * filters on. Each index corresponds to a specific number of samples from 1 + * to 2048. + */ +static const unsigned int pac1921_int_periods_usecs[] = { + 2720, /* 1 sample */ + 4050, /* 2 samples */ + 6790, /* 4 samples */ + 12200, /* 8 samples */ + 23000, /* 16 samples */ + 46000, /* 32 samples */ + 92000, /* 64 samples */ + 184000, /* 128 samples */ + 368000, /* 256 samples */ + 736000, /* 512 samples */ + 1471000, /* 1024 samples */ + 2941000 /* 2048 samples */ +}; + +/* pac1921 regmap configuration */ +static const struct regmap_range pac1921_regmap_wr_ranges[] = { + regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL), +}; + +static const struct regmap_access_table pac1921_regmap_wr_table = { + .yes_ranges = pac1921_regmap_wr_ranges, + .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_wr_ranges), +}; + +static const struct regmap_range pac1921_regmap_rd_ranges[] = { + regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL), + regmap_reg_range(PAC1921_REG_VBUS, PAC1921_REG_VPOWER + 1), +}; + +static const struct regmap_access_table pac1921_regmap_rd_table = { + .yes_ranges = pac1921_regmap_rd_ranges, + .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_rd_ranges), +}; + +static const struct regmap_config pac1921_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .rd_table = &pac1921_regmap_rd_table, + .wr_table = &pac1921_regmap_wr_table, +}; + +enum pac1921_channels { + PAC1921_CHAN_VBUS = 0, + PAC1921_CHAN_VSENSE = 1, + PAC1921_CHAN_CURRENT = 2, + PAC1921_CHAN_POWER = 3, +}; +#define PAC1921_NUM_MEAS_CHANS 4 + +struct pac1921_priv { + struct i2c_client *client; + struct regmap *regmap; + struct regulator *vdd; + struct iio_info iio_info; + + /* + * Synchronize access to private members, and ensure atomicity of + * consecutive regmap operations. + */ + struct mutex lock; + + u32 rshunt_uohm; /* uOhm */ + u8 dv_gain; + u8 di_gain; + u8 n_samples; + u8 prev_ovf_flags; + u8 ovf_enabled_events; + + bool first_integr_started; + bool first_integr_done; + unsigned long integr_started_time_jiffies; + unsigned int integr_period_usecs; + + int current_scales[ARRAY_SIZE(pac1921_vsense_scales)][2]; + + struct { + u16 chan[PAC1921_NUM_MEAS_CHANS]; + s64 timestamp __aligned(8); + } scan; +}; + +/* + * Check if first integration after configuration update has completed. + * + * Must be called with lock held. + */ +static bool pac1921_data_ready(struct pac1921_priv *priv) +{ + if (!priv->first_integr_started) + return false; + + if (!priv->first_integr_done) { + unsigned long t_ready; + + /* + * Data valid after the device entered into integration state, + * considering worst case where the device was in sleep state, + * and completed the first integration period. + */ + t_ready = priv->integr_started_time_jiffies + + usecs_to_jiffies(PAC1921_SLEEP_TO_INT_TIME_US) + + usecs_to_jiffies(priv->integr_period_usecs); + + if (time_before(jiffies, t_ready)) + return false; + + priv->first_integr_done = true; + } + + return true; +} + +static inline void pac1921_calc_scale(int dividend, int divisor, int *val, + int *val2) +{ + s64 tmp; + + tmp = div_s64(dividend * (s64)NANO, divisor); + *val = (int)div_s64_rem(tmp, NANO, val2); +} + +/* + * Fill the table of scale factors for current + * format: IIO_VAL_INT_PLUS_NANO + * unit: mA + * + * Vsense LSB (nV) = max_vsense (nV) * di_gain / resolution + * Current scale (mA) = Vsense LSB (nV) / shunt (uOhm) + * + * Must be called with held lock when updating after first initialization. + */ +static void pac1921_calc_current_scales(struct pac1921_priv *priv) +{ + for (unsigned int i = 0; i < ARRAY_SIZE(priv->current_scales); i++) { + int max = (PAC1921_MAX_VSENSE_MV * MICRO) >> i; + int vsense_lsb = DIV_ROUND_CLOSEST(max, PAC1921_RES_RESOLUTION); + + pac1921_calc_scale(vsense_lsb, (int)priv->rshunt_uohm, + &priv->current_scales[i][0], + &priv->current_scales[i][1]); + } +} + +/* + * Check if overflow occurred and if so, push the corresponding events. + * + * Must be called with lock held. + */ +static int pac1921_check_push_overflow(struct iio_dev *indio_dev, s64 timestamp) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + unsigned int flags; + int ret; + + ret = regmap_read(priv->regmap, PAC1921_REG_OVERFLOW_STS, &flags); + if (ret) + return ret; + + if (flags & PAC1921_OVERFLOW_VBOV && + !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VBOV) && + priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV) { + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE( + IIO_VOLTAGE, PAC1921_CHAN_VBUS, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + timestamp); + } + if (flags & PAC1921_OVERFLOW_VSOV && + !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VSOV) && + priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV) { + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE( + IIO_VOLTAGE, PAC1921_CHAN_VSENSE, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + timestamp); + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE( + IIO_CURRENT, PAC1921_CHAN_CURRENT, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + timestamp); + } + if (flags & PAC1921_OVERFLOW_VPOV && + !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VPOV) && + priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV) { + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE( + IIO_POWER, PAC1921_CHAN_POWER, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + timestamp); + } + + priv->prev_ovf_flags = (u8)flags; + + return 0; +} + +/* + * Read the value from a result register + * + * Result registers contain the most recent averaged values of Vbus, Vsense and + * Vpower. Each value is 10 bits wide and spread across two consecutive 8 bit + * registers, with 6 bit LSB zero padding. + */ +static int pac1921_read_res(struct pac1921_priv *priv, unsigned long reg, + u16 *val) +{ + int ret = regmap_bulk_read(priv->regmap, (unsigned int)reg, val, + sizeof(*val)); + if (ret) + return ret; + + *val = FIELD_GET(PAC1921_RES_MASK, get_unaligned_be16(val)); + + return 0; +} + +static int pac1921_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + + guard(mutex)(&priv->lock); + + switch (mask) { + case IIO_CHAN_INFO_RAW: { + s64 ts; + u16 res_val; + int ret; + + if (!pac1921_data_ready(priv)) + return -EBUSY; + + ts = iio_get_time_ns(indio_dev); + + ret = pac1921_check_push_overflow(indio_dev, ts); + if (ret) + return ret; + + ret = pac1921_read_res(priv, chan->address, &res_val); + if (ret) + return ret; + + *val = (int)res_val; + + return IIO_VAL_INT; + } + case IIO_CHAN_INFO_SCALE: + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + *val = pac1921_vbus_scales[priv->dv_gain][0]; + *val2 = pac1921_vbus_scales[priv->dv_gain][1]; + return IIO_VAL_INT_PLUS_NANO; + + case PAC1921_CHAN_VSENSE: + *val = pac1921_vsense_scales[priv->di_gain][0]; + *val2 = pac1921_vsense_scales[priv->di_gain][1]; + return IIO_VAL_INT_PLUS_NANO; + + case PAC1921_CHAN_CURRENT: + *val = priv->current_scales[priv->di_gain][0]; + *val2 = priv->current_scales[priv->di_gain][1]; + return IIO_VAL_INT_PLUS_NANO; + + case PAC1921_CHAN_POWER: { + /* + * Power scale factor in mW: + * Current scale (mA) * max_vbus (V) / dv_gain + */ + + /* Get current scale based on di_gain */ + int *curr_scale = priv->current_scales[priv->di_gain]; + + /* Convert current_scale from INT_PLUS_NANO to INT */ + s64 tmp = curr_scale[0] * (s64)NANO + curr_scale[1]; + + /* Multiply by max_vbus (V) / dv_gain */ + tmp *= PAC1921_MAX_VBUS_V >> (int)priv->dv_gain; + + /* Convert back to INT_PLUS_NANO */ + *val = (int)div_s64_rem(tmp, NANO, val2); + + return IIO_VAL_INT_PLUS_NANO; + } + default: + return -EINVAL; + } + + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + *val = pac1921_int_num_samples[priv->n_samples]; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SAMP_FREQ: + /* + * The sampling frequency (Hz) is read-only and corresponds to + * how often the device provides integrated measurements into + * the result registers, thus it's 1/integration_period. + * The integration period depends on the number of integration + * samples, measurement resolution and post filters. + * + * 1/(integr_period_usecs/MICRO) = MICRO/integr_period_usecs + */ + *val = MICRO; + *val2 = (int)priv->integr_period_usecs; + return IIO_VAL_FRACTIONAL; + + default: + return -EINVAL; + } +} + +static int pac1921_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + *type = IIO_VAL_INT; + *vals = pac1921_int_num_samples; + *length = ARRAY_SIZE(pac1921_int_num_samples); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +/* + * Perform configuration update sequence: set the device into read state, then + * write the config register and set the device back into integration state. + * Also reset integration start time and mark first integration to be yet + * completed. + * + * Must be called with lock held. + */ +static int pac1921_update_cfg_reg(struct pac1921_priv *priv, unsigned int reg, + unsigned int mask, unsigned int val) +{ + /* Enter READ state before configuration */ + int ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_INTEN, 0); + if (ret) + return ret; + + /* Update configuration value */ + ret = regmap_update_bits(priv->regmap, reg, mask, val); + if (ret) + return ret; + + /* Re-enable integration */ + ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN); + if (ret) + return ret; + + /* + * Reset integration started time and mark this integration period as + * the first one so that new measurements will be considered as valid + * only at the end of this integration period. + */ + priv->integr_started_time_jiffies = jiffies; + priv->first_integr_done = false; + + return 0; +} + +/* + * Retrieve the index of the given scale (represented by scale_val and + * scale_val2) from scales_tbl. The returned index (if found) is the log2 of + * the gain corresponding to the given scale. + * + * Must be called with lock held if the scales_tbl can change runtime (e.g. for + * the current scales table) + */ +static int pac1921_lookup_scale(const int (*const scales_tbl)[2], size_t size, + int scale_val, int scale_val2) +{ + for (unsigned int i = 0; i < size; i++) + if (scales_tbl[i][0] == scale_val && + scales_tbl[i][1] == scale_val2) + return (int)i; + + return -EINVAL; +} + +/* + * Configure device with the given gain (only if changed) + * + * Must be called with lock held. + */ +static int pac1921_update_gain(struct pac1921_priv *priv, u8 *priv_val, u8 gain, + unsigned int mask) +{ + unsigned int reg_val; + int ret; + + if (*priv_val == gain) + return 0; + + reg_val = (gain << __ffs(mask)) & mask; + ret = pac1921_update_cfg_reg(priv, PAC1921_REG_GAIN_CFG, mask, reg_val); + if (ret) + return ret; + + *priv_val = gain; + + return 0; +} + +/* + * Given a scale factor represented by scale_val and scale_val2 with format + * IIO_VAL_INT_PLUS_NANO, find the corresponding gain value and write it to the + * device. + * + * Must be called with lock held. + */ +static int pac1921_update_gain_from_scale(struct pac1921_priv *priv, + struct iio_chan_spec const *chan, + int scale_val, int scale_val2) +{ + int ret; + + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + ret = pac1921_lookup_scale(pac1921_vbus_scales, + ARRAY_SIZE(pac1921_vbus_scales), + scale_val, scale_val2); + if (ret < 0) + return ret; + + return pac1921_update_gain(priv, &priv->dv_gain, (u8)ret, + PAC1921_GAIN_DV_GAIN_MASK); + case PAC1921_CHAN_VSENSE: + ret = pac1921_lookup_scale(pac1921_vsense_scales, + ARRAY_SIZE(pac1921_vsense_scales), + scale_val, scale_val2); + if (ret < 0) + return ret; + + return pac1921_update_gain(priv, &priv->di_gain, (u8)ret, + PAC1921_GAIN_DI_GAIN_MASK); + case PAC1921_CHAN_CURRENT: + ret = pac1921_lookup_scale(priv->current_scales, + ARRAY_SIZE(priv->current_scales), + scale_val, scale_val2); + if (ret < 0) + return ret; + + return pac1921_update_gain(priv, &priv->di_gain, (u8)ret, + PAC1921_GAIN_DI_GAIN_MASK); + default: + return -EINVAL; + } +} + +/* + * Retrieve the index of the given number of samples from the constant table. + * The returned index (if found) is the log2 of the given num_samples. + */ +static int pac1921_lookup_int_num_samples(int num_samples) +{ + for (unsigned int i = 0; i < ARRAY_SIZE(pac1921_int_num_samples); i++) + if (pac1921_int_num_samples[i] == num_samples) + return (int)i; + + return -EINVAL; +} + +/* + * Update the device with the given number of integration samples. + * + * Must be called with lock held. + */ +static int pac1921_update_int_num_samples(struct pac1921_priv *priv, + int num_samples) +{ + unsigned int reg_val; + u8 n_samples; + int ret; + + ret = pac1921_lookup_int_num_samples(num_samples); + if (ret < 0) + return ret; + + n_samples = (u8)ret; + + if (priv->n_samples == n_samples) + return 0; + + reg_val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, n_samples); + + ret = pac1921_update_cfg_reg(priv, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_SMPL_MASK, reg_val); + if (ret) + return ret; + + priv->n_samples = n_samples; + + priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples]; + + return 0; +} + +static int pac1921_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long info) +{ + switch (info) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int pac1921_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + + guard(mutex)(&priv->lock); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return pac1921_update_gain_from_scale(priv, chan, val, val2); + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + return pac1921_update_int_num_samples(priv, val); + default: + return -EINVAL; + } +} + +static int pac1921_read_label(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, char *label) +{ + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + return sprintf(label, "vbus\n"); + case PAC1921_CHAN_VSENSE: + return sprintf(label, "vsense\n"); + case PAC1921_CHAN_CURRENT: + return sprintf(label, "current\n"); + case PAC1921_CHAN_POWER: + return sprintf(label, "power\n"); + default: + return -EINVAL; + } +} + +static int pac1921_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + + guard(mutex)(&priv->lock); + + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV); + case PAC1921_CHAN_VSENSE: + case PAC1921_CHAN_CURRENT: + return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV); + case PAC1921_CHAN_POWER: + return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV); + default: + return -EINVAL; + } +} + +static int pac1921_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + u8 ovf_bit; + + guard(mutex)(&priv->lock); + + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + ovf_bit = PAC1921_OVERFLOW_VBOV; + break; + case PAC1921_CHAN_VSENSE: + case PAC1921_CHAN_CURRENT: + ovf_bit = PAC1921_OVERFLOW_VSOV; + break; + case PAC1921_CHAN_POWER: + ovf_bit = PAC1921_OVERFLOW_VPOV; + break; + default: + return -EINVAL; + } + + if (state) + priv->ovf_enabled_events |= ovf_bit; + else + priv->ovf_enabled_events &= ~ovf_bit; + + return 0; +} + +static int pac1921_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, + int *val2) +{ + switch (info) { + case IIO_EV_INFO_VALUE: + *val = PAC1921_RES_RESOLUTION; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static const struct iio_info pac1921_iio = { + .read_raw = pac1921_read_raw, + .read_avail = pac1921_read_avail, + .write_raw = pac1921_write_raw, + .write_raw_get_fmt = pac1921_write_raw_get_fmt, + .read_label = pac1921_read_label, + .read_event_config = pac1921_read_event_config, + .write_event_config = pac1921_write_event_config, + .read_event_value = pac1921_read_event_value, +}; + +static ssize_t pac1921_read_shunt_resistor(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + int vals[2]; + + if (chan->channel != PAC1921_CHAN_CURRENT) + return -EINVAL; + + guard(mutex)(&priv->lock); + + vals[0] = (int)priv->rshunt_uohm; + vals[1] = MICRO; + + return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals); +} + +static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + u64 rshunt_uohm; + int val, val_fract; + int ret; + + if (chan->channel != PAC1921_CHAN_CURRENT) + return -EINVAL; + + ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract); + if (ret) + return ret; + + rshunt_uohm = (u32)val * MICRO + (u32)val_fract; + if (rshunt_uohm == 0 || rshunt_uohm > INT_MAX) + return -EINVAL; + + guard(mutex)(&priv->lock); + + priv->rshunt_uohm = (u32)rshunt_uohm; + + pac1921_calc_current_scales(priv); + + return len; +} + +/* + * Emit on sysfs the list of available scales contained in scales_tbl + * + * TODO:: this function can be replaced with iio_format_avail_list() if the + * latter will ever be exported. + * + * Must be called with lock held if the scales_tbl can change runtime (e.g. for + * the current scales table) + */ +static ssize_t pac1921_format_scale_avail(const int (*const scales_tbl)[2], + size_t size, char *buf) +{ + ssize_t len = 0; + + for (unsigned int i = 0; i < size; i++) { + if (i != 0) { + len += sysfs_emit_at(buf, len, " "); + if (len >= PAGE_SIZE) + return -EFBIG; + } + len += sysfs_emit_at(buf, len, "%d.%09d", scales_tbl[i][0], + scales_tbl[i][1]); + if (len >= PAGE_SIZE) + return -EFBIG; + } + + len += sysfs_emit_at(buf, len, "\n"); + return len; +} + +/* + * Read available scales for a specific channel + * + * NOTE: using extended info insted of iio.read_avail() because access to + * current scales must be locked as they depend on shunt resistor which may + * change runtime. Caller of iio.read_avail() would access the table unlocked + * instead. + */ +static ssize_t pac1921_read_scale_avail(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct pac1921_priv *priv = iio_priv(indio_dev); + const int (*scales_tbl)[2]; + size_t size; + + switch (chan->channel) { + case PAC1921_CHAN_VBUS: + scales_tbl = pac1921_vbus_scales; + size = ARRAY_SIZE(pac1921_vbus_scales); + return pac1921_format_scale_avail(scales_tbl, size, buf); + + case PAC1921_CHAN_VSENSE: + scales_tbl = pac1921_vsense_scales; + size = ARRAY_SIZE(pac1921_vsense_scales); + return pac1921_format_scale_avail(scales_tbl, size, buf); + + case PAC1921_CHAN_CURRENT: { + guard(mutex)(&priv->lock); + scales_tbl = priv->current_scales; + size = ARRAY_SIZE(priv->current_scales); + return pac1921_format_scale_avail(scales_tbl, size, buf); + } + default: + return -EINVAL; + } +} + +#define PAC1921_EXT_INFO_SCALE_AVAIL { \ + .name = "scale_available", \ + .read = pac1921_read_scale_avail, \ + .shared = IIO_SEPARATE, \ +} + +static const struct iio_chan_spec_ext_info pac1921_ext_info_voltage[] = { + PAC1921_EXT_INFO_SCALE_AVAIL, + {} +}; + +static const struct iio_chan_spec_ext_info pac1921_ext_info_current[] = { + PAC1921_EXT_INFO_SCALE_AVAIL, + { + .name = "shunt_resistor", + .read = pac1921_read_shunt_resistor, + .write = pac1921_write_shunt_resistor, + .shared = IIO_SEPARATE, + }, + {} +}; + +static const struct iio_event_spec pac1921_overflow_event[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_shared_by_all = BIT(IIO_EV_INFO_VALUE), + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +static const struct iio_chan_spec pac1921_channels[] = { + { + .type = IIO_VOLTAGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .channel = PAC1921_CHAN_VBUS, + .address = PAC1921_REG_VBUS, + .scan_index = PAC1921_CHAN_VBUS, + .scan_type = { + .sign = 'u', + .realbits = 10, + .storagebits = 16, + .endianness = IIO_CPU + }, + .indexed = 1, + .event_spec = pac1921_overflow_event, + .num_event_specs = ARRAY_SIZE(pac1921_overflow_event), + .ext_info = pac1921_ext_info_voltage, + }, + { + .type = IIO_VOLTAGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .channel = PAC1921_CHAN_VSENSE, + .address = PAC1921_REG_VSENSE, + .scan_index = PAC1921_CHAN_VSENSE, + .scan_type = { + .sign = 'u', + .realbits = 10, + .storagebits = 16, + .endianness = IIO_CPU + }, + .indexed = 1, + .event_spec = pac1921_overflow_event, + .num_event_specs = ARRAY_SIZE(pac1921_overflow_event), + .ext_info = pac1921_ext_info_voltage, + }, + { + .type = IIO_CURRENT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .channel = PAC1921_CHAN_CURRENT, + .address = PAC1921_REG_VSENSE, + .scan_index = PAC1921_CHAN_CURRENT, + .scan_type = { + .sign = 'u', + .realbits = 10, + .storagebits = 16, + .endianness = IIO_CPU + }, + .event_spec = pac1921_overflow_event, + .num_event_specs = ARRAY_SIZE(pac1921_overflow_event), + .ext_info = pac1921_ext_info_current, + }, + { + .type = IIO_POWER, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .channel = PAC1921_CHAN_POWER, + .address = PAC1921_REG_VPOWER, + .scan_index = PAC1921_CHAN_POWER, + .scan_type = { + .sign = 'u', + .realbits = 10, + .storagebits = 16, + .endianness = IIO_CPU + }, + .event_spec = pac1921_overflow_event, + .num_event_specs = ARRAY_SIZE(pac1921_overflow_event), + }, + IIO_CHAN_SOFT_TIMESTAMP(PAC1921_NUM_MEAS_CHANS), +}; + +static irqreturn_t pac1921_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *idev = pf->indio_dev; + struct pac1921_priv *priv = iio_priv(idev); + int ret; + int bit; + int ch = 0; + + guard(mutex)(&priv->lock); + + if (!pac1921_data_ready(priv)) + goto done; + + ret = pac1921_check_push_overflow(idev, pf->timestamp); + if (ret) + goto done; + + iio_for_each_active_channel(idev, bit) { + u16 val; + + ret = pac1921_read_res(priv, idev->channels[ch].address, &val); + if (ret) + goto done; + + priv->scan.chan[ch++] = val; + } + + iio_push_to_buffers_with_timestamp(idev, &priv->scan, pf->timestamp); + +done: + iio_trigger_notify_done(idev->trig); + + return IRQ_HANDLED; +} + +/* + * Initialize device by writing initial configuration and putting it into + * integration state. + * + * Must be called with lock held when called after first initialization + * (e.g. from pm resume) + */ +static int pac1921_init(struct pac1921_priv *priv) +{ + unsigned int val; + int ret; + + /* Enter READ state before configuration */ + ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_INTEN, 0); + if (ret) + return ret; + + /* Configure gains, use 14-bits measurement resolution (HW default) */ + val = FIELD_PREP(PAC1921_GAIN_DI_GAIN_MASK, priv->di_gain) | + FIELD_PREP(PAC1921_GAIN_DV_GAIN_MASK, priv->dv_gain); + ret = regmap_write(priv->regmap, PAC1921_REG_GAIN_CFG, val); + if (ret) + return ret; + + /* + * Configure integration: + * - num of integration samples + * - filters enabled (HW default) + * - set READ/INT pin override (RIOV) to control operation mode via + * register instead of pin + */ + val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, priv->n_samples) | + PAC1921_INT_CFG_VSFEN | PAC1921_INT_CFG_VBFEN | + PAC1921_INT_CFG_RIOV; + ret = regmap_write(priv->regmap, PAC1921_REG_INT_CFG, val); + if (ret) + return ret; + + /* + * Init control register: + * - VPower free run integration mode + * - OUT pin full scale range: 3V (HW detault) + * - no timeout, no sleep, no sleep override, no recalc (HW defaults) + */ + val = FIELD_PREP(PAC1921_CONTROL_MXSL_MASK, + PAC1921_MXSL_VPOWER_FREE_RUN); + ret = regmap_write(priv->regmap, PAC1921_REG_CONTROL, val); + if (ret) + return ret; + + /* Enable integration */ + ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN); + if (ret) + return ret; + + priv->first_integr_started = true; + priv->integr_started_time_jiffies = jiffies; + priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples]; + + return 0; +} + +static int pac1921_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct pac1921_priv *priv = iio_priv(indio_dev); + int ret; + + guard(mutex)(&priv->lock); + + priv->first_integr_started = false; + priv->first_integr_done = false; + + ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG, + PAC1921_INT_CFG_INTEN, 0); + if (ret) + return ret; + + ret = regmap_update_bits(priv->regmap, PAC1921_REG_CONTROL, + PAC1921_CONTROL_SLEEP, PAC1921_CONTROL_SLEEP); + if (ret) + return ret; + + return regulator_disable(priv->vdd); + +} + +static int pac1921_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct pac1921_priv *priv = iio_priv(indio_dev); + int ret; + + guard(mutex)(&priv->lock); + + ret = regulator_enable(priv->vdd); + if (ret) + return ret; + + msleep(PAC1921_POWERUP_TIME_MS); + + return pac1921_init(priv); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(pac1921_pm_ops, pac1921_suspend, + pac1921_resume); + +static void pac1921_regulator_disable(void *data) +{ + struct regulator *regulator = data; + + regulator_disable(regulator); +} + +static int pac1921_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct pac1921_priv *priv; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); + if (!indio_dev) + return -ENOMEM; + + priv = iio_priv(indio_dev); + priv->client = client; + i2c_set_clientdata(client, indio_dev); + + priv->regmap = devm_regmap_init_i2c(client, &pac1921_regmap_config); + if (IS_ERR(priv->regmap)) + return dev_err_probe(dev, (int)PTR_ERR(priv->regmap), + "Cannot initialize register map\n"); + + devm_mutex_init(dev, &priv->lock); + + priv->dv_gain = PAC1921_DEFAULT_DV_GAIN; + priv->di_gain = PAC1921_DEFAULT_DI_GAIN; + priv->n_samples = PAC1921_DEFAULT_NUM_SAMPLES; + + ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &priv->rshunt_uohm); + if (ret) + return dev_err_probe(dev, ret, + "Cannot read shunt resistor property\n"); + if (priv->rshunt_uohm == 0 || priv->rshunt_uohm > INT_MAX) + return dev_err_probe(dev, -EINVAL, + "Invalid shunt resistor: %u\n", + priv->rshunt_uohm); + + pac1921_calc_current_scales(priv); + + priv->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(priv->vdd)) + return dev_err_probe(dev, (int)PTR_ERR(priv->vdd), + "Cannot get vdd regulator\n"); + + ret = regulator_enable(priv->vdd); + if (ret) + return dev_err_probe(dev, ret, "Cannot enable vdd regulator\n"); + + ret = devm_add_action_or_reset(dev, pac1921_regulator_disable, + priv->vdd); + if (ret) + return dev_err_probe(dev, ret, + "Cannot add action for vdd regulator disposal\n"); + + msleep(PAC1921_POWERUP_TIME_MS); + + ret = pac1921_init(priv); + if (ret) + return dev_err_probe(dev, ret, "Cannot initialize device\n"); + + priv->iio_info = pac1921_iio; + + indio_dev->name = "pac1921"; + indio_dev->info = &priv->iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = pac1921_channels; + indio_dev->num_channels = ARRAY_SIZE(pac1921_channels); + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + &iio_pollfunc_store_time, + &pac1921_trigger_handler, NULL); + if (ret) + return dev_err_probe(dev, ret, + "Cannot setup IIO triggered buffer\n"); + + ret = devm_iio_device_register(dev, indio_dev); + if (ret) + return dev_err_probe(dev, ret, "Cannot register IIO device\n"); + + return 0; +} + +static const struct i2c_device_id pac1921_id[] = { + { .name = "pac1921", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, pac1921_id); + +static const struct of_device_id pac1921_of_match[] = { + { .compatible = "microchip,pac1921" }, + { } +}; +MODULE_DEVICE_TABLE(of, pac1921_of_match); + +static struct i2c_driver pac1921_driver = { + .driver = { + .name = "pac1921", + .pm = pm_sleep_ptr(&pac1921_pm_ops), + .of_match_table = pac1921_of_match, + }, + .probe = pac1921_probe, + .id_table = pac1921_id, +}; + +module_i2c_driver(pac1921_driver); + +MODULE_AUTHOR("Matteo Martelli "); +MODULE_DESCRIPTION("IIO driver for PAC1921 High-Side Power/Current Monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c index ae24a27805ab6b..7ef249d8328661 100644 --- a/drivers/iio/adc/pac1934.c +++ b/drivers/iio/adc/pac1934.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include /* * maximum accumulation time should be (17 * 60 * 1000) around 17 minutes@1024 sps @@ -1571,7 +1571,7 @@ static const struct i2c_device_id pac1934_id[] = { { .name = "pac1932", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1932] }, { .name = "pac1933", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1933] }, { .name = "pac1934", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] }, - {} + { } }; MODULE_DEVICE_TABLE(i2c, pac1934_id); @@ -1592,7 +1592,7 @@ static const struct of_device_id pac1934_of_match[] = { .compatible = "microchip,pac1934", .data = &pac1934_chip_config[PAC1934] }, - {} + { } }; MODULE_DEVICE_TABLE(of, pac1934_of_match); @@ -1602,7 +1602,7 @@ MODULE_DEVICE_TABLE(of, pac1934_of_match); */ static const struct acpi_device_id pac1934_acpi_match[] = { { "MCHP1930", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] }, - {} + { } }; MODULE_DEVICE_TABLE(acpi, pac1934_acpi_match); diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c index c9d2c66434e4f4..9e1112f5acc6a4 100644 --- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c +++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c @@ -1006,7 +1006,7 @@ static const struct of_device_id pm8xxx_xoadc_id_table[] = { .compatible = "qcom,pm8921-adc", .data = &pm8921_variant, }, - { }, + { } }; MODULE_DEVICE_TABLE(of, pm8xxx_xoadc_id_table); diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c index 1402df68dd5289..63ebaf13ef19e5 100644 --- a/drivers/iio/adc/qcom-spmi-rradc.c +++ b/drivers/iio/adc/qcom-spmi-rradc.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include @@ -1002,7 +1002,7 @@ static int rradc_probe(struct platform_device *pdev) static const struct of_device_id rradc_match_table[] = { { .compatible = "qcom,pm660-rradc" }, { .compatible = "qcom,pmi8998-rradc" }, - {} + { } }; MODULE_DEVICE_TABLE(of, rradc_match_table); diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index bbe954a738c7db..240cfa391674e7 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -331,7 +331,7 @@ static const struct of_device_id rockchip_saradc_match[] = { .compatible = "rockchip,rk3588-saradc", .data = &rk3588_saradc_data, }, - {}, + { } }; MODULE_DEVICE_TABLE(of, rockchip_saradc_match); @@ -370,7 +370,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p) mutex_lock(&info->lock); - for_each_set_bit(i, i_dev->active_scan_mask, i_dev->masklength) { + iio_for_each_active_channel(i_dev, i) { const struct iio_chan_spec *chan = &i_dev->channels[i]; ret = rockchip_saradc_conversion(info, chan); diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c index bcb12984090898..56ed948a8ae10d 100644 --- a/drivers/iio/adc/rtq6056.c +++ b/drivers/iio/adc/rtq6056.c @@ -643,7 +643,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p) pm_runtime_get_sync(dev); - for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { unsigned int addr = rtq6056_channels[bit].address; ret = regmap_read(priv->regmap, addr, &raw); @@ -865,7 +865,7 @@ static const struct richtek_dev_data rtq6059_devdata = { static const struct of_device_id rtq6056_device_match[] = { { .compatible = "richtek,rtq6056", .data = &rtq6056_devdata }, { .compatible = "richtek,rtq6059", .data = &rtq6059_devdata }, - {} + { } }; MODULE_DEVICE_TABLE(of, rtq6056_device_match); diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c index 327cc2097f6c72..654b6a38b650a1 100644 --- a/drivers/iio/adc/sd_adc_modulator.c +++ b/drivers/iio/adc/sd_adc_modulator.c @@ -6,11 +6,14 @@ * Author: Arnaud Pouliquen . */ +#include #include #include #include #include #include +#include +#include static const struct iio_info iio_sd_mod_iio_info; @@ -24,7 +27,59 @@ static const struct iio_chan_spec iio_sd_mod_ch = { }, }; -static int iio_sd_mod_probe(struct platform_device *pdev) +struct iio_sd_backend_priv { + struct regulator *vref; + int vref_mv; +}; + +static int iio_sd_mod_enable(struct iio_backend *backend) +{ + struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend); + + if (priv->vref) + return regulator_enable(priv->vref); + + return 0; +}; + +static void iio_sd_mod_disable(struct iio_backend *backend) +{ + struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend); + + if (priv->vref) + regulator_disable(priv->vref); +}; + +static int iio_sd_mod_read(struct iio_backend *backend, struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *val = priv->vref_mv; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_OFFSET: + *val = 0; + return IIO_VAL_INT; + } + + return -EOPNOTSUPP; +}; + +static const struct iio_backend_ops sd_backend_ops = { + .enable = iio_sd_mod_enable, + .disable = iio_sd_mod_disable, + .read_raw = iio_sd_mod_read, +}; + +static const struct iio_backend_info sd_backend_info = { + .name = "sd-modulator", + .ops = &sd_backend_ops, +}; + +static int iio_sd_mod_register(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct iio_dev *iio; @@ -45,6 +100,45 @@ static int iio_sd_mod_probe(struct platform_device *pdev) return devm_iio_device_register(&pdev->dev, iio); } +static int iio_sd_mod_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator *vref; + struct iio_sd_backend_priv *priv; + int ret; + + /* If sd modulator is not defined as an IIO backend device, fallback to legacy */ + if (!device_property_present(dev, "#io-backend-cells")) + return iio_sd_mod_register(pdev); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* + * Get regulator reference if any, but don't enable regulator right now. + * Rely on enable and disable callbacks to manage regulator power. + */ + vref = devm_regulator_get_optional(dev, "vref"); + if (IS_ERR(vref)) { + if (PTR_ERR(vref) != -ENODEV) + return dev_err_probe(dev, PTR_ERR(vref), "Failed to get vref\n"); + } else { + /* + * Retrieve voltage right now, as regulator_get_voltage() provides it whatever + * the state of the regulator. + */ + ret = regulator_get_voltage(vref); + if (ret < 0) + return ret; + + priv->vref = vref; + priv->vref_mv = ret / 1000; + } + + return devm_iio_backend_register(&pdev->dev, &sd_backend_info, priv); +}; + static const struct of_device_id sd_adc_of_match[] = { { .compatible = "sd-modulator" }, { .compatible = "ads1201" }, @@ -65,3 +159,4 @@ module_platform_driver(iio_sd_mod_adc); MODULE_DESCRIPTION("Basic sigma delta modulator"); MODULE_AUTHOR("Arnaud Pouliquen "); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_BACKEND); diff --git a/drivers/iio/adc/sophgo-cv1800b-adc.c b/drivers/iio/adc/sophgo-cv1800b-adc.c new file mode 100644 index 00000000000000..0951deb7b11123 --- /dev/null +++ b/drivers/iio/adc/sophgo-cv1800b-adc.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Sophgo CV1800B SARADC Driver + * + * Copyright (C) Bootlin 2024 + * Author: Thomas Bonnefille + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CV1800B_ADC_CTRL_REG 0x04 +#define CV1800B_ADC_EN BIT(0) +#define CV1800B_ADC_SEL(x) BIT((x) + 5) +#define CV1800B_ADC_STATUS_REG 0x08 +#define CV1800B_ADC_BUSY BIT(0) +#define CV1800B_ADC_CYC_SET_REG 0x0C +#define CV1800B_MASK_STARTUP_CYCLE GENMASK(4, 0) +#define CV1800B_MASK_SAMPLE_WINDOW GENMASK(11, 8) +#define CV1800B_MASK_CLKDIV GENMASK(15, 12) +#define CV1800B_MASK_COMPARE_CYCLE GENMASK(19, 16) +#define CV1800B_ADC_CH_RESULT_REG(x) (0x14 + 4 * (x)) +#define CV1800B_ADC_CH_RESULT GENMASK(11, 0) +#define CV1800B_ADC_CH_VALID BIT(15) +#define CV1800B_ADC_INTR_EN_REG 0x20 +#define CV1800B_ADC_INTR_CLR_REG 0x24 +#define CV1800B_ADC_INTR_CLR_BIT BIT(0) +#define CV1800B_ADC_INTR_STA_REG 0x28 +#define CV1800B_ADC_INTR_STA_BIT BIT(0) +#define CV1800B_READ_TIMEOUT_MS 1000 +#define CV1800B_READ_TIMEOUT_US (CV1800B_READ_TIMEOUT_MS * 1000) + +#define CV1800B_ADC_CHANNEL(index) \ + { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = index, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\ + .scan_index = index, \ + } + +struct cv1800b_adc { + struct completion completion; + void __iomem *regs; + struct mutex lock; /* ADC Control and Result register */ + struct clk *clk; + int irq; +}; + +static const struct iio_chan_spec sophgo_channels[] = { + CV1800B_ADC_CHANNEL(0), + CV1800B_ADC_CHANNEL(1), + CV1800B_ADC_CHANNEL(2), +}; + +static void cv1800b_adc_start_measurement(struct cv1800b_adc *saradc, + int channel) +{ + writel(0, saradc->regs + CV1800B_ADC_CTRL_REG); + writel(CV1800B_ADC_SEL(channel) | CV1800B_ADC_EN, + saradc->regs + CV1800B_ADC_CTRL_REG); +} + +static int cv1800b_adc_wait(struct cv1800b_adc *saradc) +{ + if (saradc->irq < 0) { + u32 reg; + + return readl_poll_timeout(saradc->regs + CV1800B_ADC_STATUS_REG, + reg, !(reg & CV1800B_ADC_BUSY), + 500, CV1800B_READ_TIMEOUT_US); + } + + return wait_for_completion_timeout(&saradc->completion, + msecs_to_jiffies(CV1800B_READ_TIMEOUT_MS)) > 0 ? + 0 : -ETIMEDOUT; +} + +static int cv1800b_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct cv1800b_adc *saradc = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: { + u32 sample; + + scoped_guard(mutex, &saradc->lock) { + int ret; + + cv1800b_adc_start_measurement(saradc, chan->scan_index); + ret = cv1800b_adc_wait(saradc); + if (ret < 0) + return ret; + + sample = readl(saradc->regs + CV1800B_ADC_CH_RESULT_REG(chan->scan_index)); + } + if (!(sample & CV1800B_ADC_CH_VALID)) + return -ENODATA; + + *val = sample & CV1800B_ADC_CH_RESULT; + return IIO_VAL_INT; + } + case IIO_CHAN_INFO_SCALE: + *val = 3300; + *val2 = 12; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_CHAN_INFO_SAMP_FREQ: { + u32 status_reg = readl(saradc->regs + CV1800B_ADC_CYC_SET_REG); + unsigned int clk_div = (1 + FIELD_GET(CV1800B_MASK_CLKDIV, status_reg)); + unsigned int freq = clk_get_rate(saradc->clk) / clk_div; + unsigned int nb_startup_cycle = 1 + FIELD_GET(CV1800B_MASK_STARTUP_CYCLE, status_reg); + unsigned int nb_sample_cycle = 1 + FIELD_GET(CV1800B_MASK_SAMPLE_WINDOW, status_reg); + unsigned int nb_compare_cycle = 1 + FIELD_GET(CV1800B_MASK_COMPARE_CYCLE, status_reg); + + *val = freq / (nb_startup_cycle + nb_sample_cycle + nb_compare_cycle); + return IIO_VAL_INT; + } + default: + return -EINVAL; + } +} + +static irqreturn_t cv1800b_adc_interrupt_handler(int irq, void *private) +{ + struct cv1800b_adc *saradc = private; + u32 reg = readl(saradc->regs + CV1800B_ADC_INTR_STA_REG); + + if (!(FIELD_GET(CV1800B_ADC_INTR_STA_BIT, reg))) + return IRQ_NONE; + + writel(CV1800B_ADC_INTR_CLR_BIT, saradc->regs + CV1800B_ADC_INTR_CLR_REG); + complete(&saradc->completion); + + return IRQ_HANDLED; +} + +static const struct iio_info cv1800b_adc_info = { + .read_raw = &cv1800b_adc_read_raw, +}; + +static int cv1800b_adc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cv1800b_adc *saradc; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*saradc)); + if (!indio_dev) + return -ENOMEM; + + saradc = iio_priv(indio_dev); + indio_dev->name = "sophgo-cv1800b-adc"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &cv1800b_adc_info; + indio_dev->num_channels = ARRAY_SIZE(sophgo_channels); + indio_dev->channels = sophgo_channels; + + saradc->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(saradc->clk)) + return PTR_ERR(saradc->clk); + + saradc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(saradc->regs)) + return PTR_ERR(saradc->regs); + + saradc->irq = platform_get_irq_optional(pdev, 0); + if (saradc->irq > 0) { + init_completion(&saradc->completion); + ret = devm_request_irq(dev, saradc->irq, + cv1800b_adc_interrupt_handler, 0, + dev_name(dev), saradc); + if (ret) + return ret; + + writel(1, saradc->regs + CV1800B_ADC_INTR_EN_REG); + } + + ret = devm_mutex_init(dev, &saradc->lock); + if (ret) + return ret; + + writel(FIELD_PREP(CV1800B_MASK_STARTUP_CYCLE, 15) | + FIELD_PREP(CV1800B_MASK_SAMPLE_WINDOW, 15) | + FIELD_PREP(CV1800B_MASK_CLKDIV, 1) | + FIELD_PREP(CV1800B_MASK_COMPARE_CYCLE, 15), + saradc->regs + CV1800B_ADC_CYC_SET_REG); + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct of_device_id cv1800b_adc_match[] = { + { .compatible = "sophgo,cv1800b-saradc", }, + { } +}; +MODULE_DEVICE_TABLE(of, cv1800b_adc_match); + +static struct platform_driver cv1800b_adc_driver = { + .driver = { + .name = "sophgo-cv1800b-saradc", + .of_match_table = cv1800b_adc_match, + }, + .probe = cv1800b_adc_probe, +}; +module_platform_driver(cv1800b_adc_driver); + +MODULE_AUTHOR("Thomas Bonnefille "); +MODULE_DESCRIPTION("Sophgo CV1800B SARADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 375aa7720f8042..32ca26ed59f7af 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1261,7 +1261,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev, stm32_adc_writel(adc, adc->cfg->regs->smpr[0], adc->smpr_val[0]); stm32_adc_writel(adc, adc->cfg->regs->smpr[1], adc->smpr_val[1]); - for_each_set_bit(bit, scan_mask, indio_dev->masklength) { + for_each_set_bit(bit, scan_mask, iio_get_masklength(indio_dev)) { chan = indio_dev->channels + bit; /* * Assign one channel per SQ entry in regular @@ -1619,7 +1619,7 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev, if (ret < 0) return ret; - adc->num_conv = bitmap_weight(scan_mask, indio_dev->masklength); + adc->num_conv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev)); ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask); pm_runtime_mark_last_busy(dev); @@ -2638,7 +2638,7 @@ static const struct of_device_id stm32_adc_of_match[] = { { .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg }, { .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg }, { .compatible = "st,stm32mp13-adc", .data = (void *)&stm32mp13_adc_cfg }, - {}, + { } }; MODULE_DEVICE_TABLE(of, stm32_adc_of_match); diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index fabd654245f566..2037f73426d4b3 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,7 @@ struct stm32_dfsdm_adc { /* ADC specific */ unsigned int oversamp; struct iio_hw_consumer *hwc; + struct iio_backend **backend; struct completion completion; u32 *buffer; @@ -666,6 +668,74 @@ static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm, return 0; } +static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm, + struct iio_dev *indio_dev, + struct iio_chan_spec *ch, + struct fwnode_handle *node) +{ + struct stm32_dfsdm_channel *df_ch; + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + struct iio_backend *backend; + const char *of_str; + int ret, val; + + ret = fwnode_property_read_u32(node, "reg", &ch->channel); + if (ret < 0) { + dev_err(&indio_dev->dev, "Missing channel index %d\n", ret); + return ret; + } + + if (ch->channel >= dfsdm->num_chs) { + dev_err(&indio_dev->dev, " Error bad channel number %d (max = %d)\n", + ch->channel, dfsdm->num_chs); + return -EINVAL; + } + + ret = fwnode_property_read_string(node, "label", &ch->datasheet_name); + if (ret < 0) { + dev_err(&indio_dev->dev, + " Error parsing 'label' for idx %d\n", ch->channel); + return ret; + } + + df_ch = &dfsdm->ch_list[ch->channel]; + df_ch->id = ch->channel; + + ret = fwnode_property_read_string(node, "st,adc-channel-type", &of_str); + if (!ret) { + val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type); + if (val < 0) + return val; + } else { + val = 0; + } + df_ch->type = val; + + ret = fwnode_property_read_string(node, "st,adc-channel-clk-src", &of_str); + if (!ret) { + val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src); + if (val < 0) + return val; + } else { + val = 0; + } + df_ch->src = val; + + ret = fwnode_property_read_u32(node, "st,adc-alt-channel", &df_ch->alt_si); + if (ret != -EINVAL) + df_ch->alt_si = 0; + + if (adc->dev_data->type == DFSDM_IIO) { + backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node); + if (IS_ERR(backend)) + return dev_err_probe(&indio_dev->dev, PTR_ERR(backend), + "Failed to get backend\n"); + adc->backend[ch->scan_index] = backend; + } + + return 0; +} + static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, @@ -987,7 +1057,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev, { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - adc->nconv = bitmap_weight(scan_mask, indio_dev->masklength); + adc->nconv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev)); adc->smask = *scan_mask; dev_dbg(&indio_dev->dev, "nconv=%d mask=%lx\n", adc->nconv, *scan_mask); @@ -998,6 +1068,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev, static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + int i = 0; int ret; /* Reset adc buffer index */ @@ -1009,6 +1080,15 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) return ret; } + if (adc->backend) { + while (adc->backend[i]) { + ret = iio_backend_enable(adc->backend[i]); + if (ret < 0) + return ret; + i++; + } + } + ret = stm32_dfsdm_start_dfsdm(adc->dfsdm); if (ret < 0) goto err_stop_hwc; @@ -1041,6 +1121,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + int i = 0; stm32_dfsdm_stop_conv(indio_dev); @@ -1048,6 +1129,13 @@ static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) stm32_dfsdm_stop_dfsdm(adc->dfsdm); + if (adc->backend) { + while (adc->backend[i]) { + iio_backend_disable(adc->backend[i]); + i++; + } + } + if (adc->hwc) iio_hw_consumer_disable(adc->hwc); @@ -1220,14 +1308,25 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev, int *val2, long mask) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + + struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; + struct stm32_dfsdm_filter_osr *flo = &fl->flo[fl->fast]; + u32 max = flo->max << (flo->lshift - chan->scan_type.shift); + int idx = chan->scan_index; int ret; + if (flo->lshift < chan->scan_type.shift) + max = flo->max >> (chan->scan_type.shift - flo->lshift); + switch (mask) { case IIO_CHAN_INFO_RAW: ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; - ret = iio_hw_consumer_enable(adc->hwc); + if (adc->hwc) + ret = iio_hw_consumer_enable(adc->hwc); + if (adc->backend) + ret = iio_backend_enable(adc->backend[idx]); if (ret < 0) { dev_err(&indio_dev->dev, "%s: IIO enable failed (channel %d)\n", @@ -1236,7 +1335,10 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev, return ret; } ret = stm32_dfsdm_single_conv(indio_dev, chan, val); - iio_hw_consumer_disable(adc->hwc); + if (adc->hwc) + iio_hw_consumer_disable(adc->hwc); + if (adc->backend) + iio_backend_disable(adc->backend[idx]); if (ret < 0) { dev_err(&indio_dev->dev, "%s: Conversion failed (channel %d)\n", @@ -1256,6 +1358,50 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev, *val = adc->sample_freq; return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + /* + * Scale is expressed in mV. + * When fast mode is disabled, actual resolution may be lower + * than 2^n, where n = realbits - 1. + * This leads to underestimating the input voltage. + * To compensate this deviation, the voltage reference can be + * corrected with a factor = realbits resolution / actual max + */ + if (adc->backend) { + ret = iio_backend_read_scale(adc->backend[idx], chan, val, NULL); + if (ret < 0) + return ret; + + *val = div_u64((u64)*val * (u64)BIT(DFSDM_DATA_RES - 1), max); + *val2 = chan->scan_type.realbits; + if (chan->differential) + *val *= 2; + } + return IIO_VAL_FRACTIONAL_LOG2; + + case IIO_CHAN_INFO_OFFSET: + /* + * DFSDM output data are in the range [-2^n, 2^n], + * with n = realbits - 1. + * - Differential modulator: + * Offset correspond to SD modulator offset. + * - Single ended modulator: + * Input is in [0V, Vref] range, + * where 0V corresponds to -2^n, and Vref to 2^n. + * Add 2^n to offset. (i.e. middle of input range) + * offset = offset(sd) * vref / res(sd) * max / vref. + */ + if (adc->backend) { + ret = iio_backend_read_offset(adc->backend[idx], chan, val, NULL); + if (ret < 0) + return ret; + + *val = div_u64((u64)max * *val, BIT(*val2 - 1)); + if (!chan->differential) + *val += max; + } + return IIO_VAL_INT; } return -EINVAL; @@ -1320,7 +1466,7 @@ static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = { .read = dfsdm_adc_audio_get_spiclk, .write = dfsdm_adc_audio_set_spiclk, }, - {}, + { } }; static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev) @@ -1362,15 +1508,18 @@ static int stm32_dfsdm_dma_request(struct device *dev, return 0; } -static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, - struct iio_chan_spec *ch) +static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, struct iio_chan_spec *ch, + struct fwnode_handle *child) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); int ret; - ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch); + if (child) + ret = stm32_dfsdm_generic_channel_parse_of(adc->dfsdm, indio_dev, ch, child); + else /* Legacy binding */ + ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch); if (ret < 0) - return ret; + return dev_err_probe(&indio_dev->dev, ret, "Failed to parse channel\n"); ch->type = IIO_VOLTAGE; ch->indexed = 1; @@ -1379,12 +1528,21 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, * IIO_CHAN_INFO_RAW: used to compute regular conversion * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling */ - ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); + if (child) { + ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET); + } else { + /* Legacy. Scaling not supported */ + ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); + } + ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | BIT(IIO_CHAN_INFO_SAMP_FREQ); if (adc->dev_data->type == DFSDM_AUDIO) { ch->ext_info = dfsdm_adc_audio_ext_info; + ch->scan_index = 0; } else { ch->scan_type.shift = 8; } @@ -1396,20 +1554,67 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, &adc->dfsdm->ch_list[ch->channel]); } +static int stm32_dfsdm_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels) +{ + int num_ch = indio_dev->num_channels; + int chan_idx = 0; + int ret; + + for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { + channels[chan_idx].scan_index = chan_idx; + ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], NULL); + if (ret < 0) + return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n"); + } + + return 0; +} + +static int stm32_dfsdm_generic_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels) +{ + int chan_idx = 0, ret; + + device_for_each_child_node_scoped(&indio_dev->dev, child) { + /* Skip DAI node in DFSDM audio nodes */ + if (fwnode_property_present(child, "compatible")) + continue; + + channels[chan_idx].scan_index = chan_idx; + ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], child); + if (ret < 0) + return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n"); + + chan_idx++; + } + + return chan_idx; +} + static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); struct stm32_dfsdm_channel *d_ch; - int ret; + bool legacy = false; + int num_ch, ret; + + /* If st,adc-channels is defined legacy binding is used. Else assume generic binding. */ + num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels"); + if (num_ch == 1) + legacy = true; ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL); if (!ch) return -ENOMEM; - ch->scan_index = 0; + indio_dev->num_channels = 1; + indio_dev->channels = ch; + + if (legacy) + ret = stm32_dfsdm_chan_init(indio_dev, ch); + else + ret = stm32_dfsdm_generic_chan_init(indio_dev, ch); - ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch); if (ret < 0) { dev_err(&indio_dev->dev, "Channels init failed\n"); return ret; @@ -1420,9 +1625,6 @@ static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) adc->spi_freq = adc->dfsdm->spi_master_freq; - indio_dev->num_channels = 1; - indio_dev->channels = ch; - return stm32_dfsdm_dma_request(dev, indio_dev); } @@ -1430,43 +1632,61 @@ static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - int num_ch; - int ret, chan_idx; + int num_ch, ret; + bool legacy = false; adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING; ret = stm32_dfsdm_compute_all_osrs(indio_dev, adc->oversamp); if (ret < 0) return ret; - num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, - "st,adc-channels"); - if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) { - dev_err(&indio_dev->dev, "Bad st,adc-channels\n"); - return num_ch < 0 ? num_ch : -EINVAL; + num_ch = device_get_child_node_count(&indio_dev->dev); + if (!num_ch) { + /* No channels nodes found. Assume legacy binding */ + num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels"); + if (num_ch < 0) { + dev_err(&indio_dev->dev, "Bad st,adc-channels\n"); + return num_ch; + } + + legacy = true; } - /* Bind to SD modulator IIO device */ - adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev); - if (IS_ERR(adc->hwc)) - return -EPROBE_DEFER; + if (num_ch > adc->dfsdm->num_chs) { + dev_err(&indio_dev->dev, "Number of channel [%d] exceeds [%d]\n", + num_ch, adc->dfsdm->num_chs); + return -EINVAL; + } + indio_dev->num_channels = num_ch; - ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch), - GFP_KERNEL); - if (!ch) - return -ENOMEM; + if (legacy) { + /* Bind to SD modulator IIO device. */ + adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev); + if (IS_ERR(adc->hwc)) + return dev_err_probe(&indio_dev->dev, -EPROBE_DEFER, + "waiting for SD modulator\n"); + } else { + /* Generic binding. SD modulator IIO device not used. Use SD modulator backend. */ + adc->hwc = NULL; - for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { - ch[chan_idx].scan_index = chan_idx; - ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]); - if (ret < 0) { - dev_err(&indio_dev->dev, "Channels init failed\n"); - return ret; - } + adc->backend = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*adc->backend), + GFP_KERNEL); + if (!adc->backend) + return -ENOMEM; } - indio_dev->num_channels = num_ch; + ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch), GFP_KERNEL); + if (!ch) + return -ENOMEM; indio_dev->channels = ch; + if (legacy) + ret = stm32_dfsdm_chan_init(indio_dev, ch); + else + ret = stm32_dfsdm_generic_chan_init(indio_dev, ch); + if (ret < 0) + return ret; + init_completion(&adc->completion); /* Optionally request DMA */ @@ -1677,3 +1897,4 @@ module_platform_driver(stm32_dfsdm_adc_driver); MODULE_DESCRIPTION("STM32 sigma delta ADC"); MODULE_AUTHOR("Arnaud Pouliquen "); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(IIO_BACKEND); diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c index a05d978b8cb86f..bef59fcc0d807c 100644 --- a/drivers/iio/adc/stm32-dfsdm-core.c +++ b/drivers/iio/adc/stm32-dfsdm-core.c @@ -299,7 +299,7 @@ static const struct of_device_id stm32_dfsdm_of_match[] = { .compatible = "st,stm32mp1-dfsdm", .data = &stm32mp1_dfsdm_data, }, - {} + { } }; MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match); diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c index 8e56def1c9e5c9..b0add5a2eab532 100644 --- a/drivers/iio/adc/stmpe-adc.c +++ b/drivers/iio/adc/stmpe-adc.c @@ -347,7 +347,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume); static const struct of_device_id stmpe_adc_ids[] = { { .compatible = "st,stmpe-adc", }, - { }, + { } }; MODULE_DEVICE_TABLE(of, stmpe_adc_ids); diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c index b11ce555ba3b9c..e2dbd070c7c4f2 100644 --- a/drivers/iio/adc/ti-adc0832.c +++ b/drivers/iio/adc/ti-adc0832.c @@ -211,8 +211,7 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p) mutex_lock(&adc->lock); - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index]; int ret = adc0832_adc_conversion(adc, scan_chan->channel, @@ -310,7 +309,7 @@ static const struct of_device_id adc0832_dt_ids[] = { { .compatible = "ti,adc0832", }, { .compatible = "ti,adc0834", }, { .compatible = "ti,adc0838", }, - {} + { } }; MODULE_DEVICE_TABLE(of, adc0832_dt_ids); @@ -319,7 +318,7 @@ static const struct spi_device_id adc0832_id[] = { { "adc0832", adc0832 }, { "adc0834", adc0834 }, { "adc0838", adc0838 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, adc0832_id); diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index 1f6e53832e062a..bf98f9bf942a3f 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -166,8 +166,7 @@ static int adc084s021_buffer_preenable(struct iio_dev *indio_dev) int scan_index; int i = 0; - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *channel = &indio_dev->channels[scan_index]; adc->tx_buf[i++] = channel->channel << 3; @@ -243,13 +242,13 @@ static int adc084s021_probe(struct spi_device *spi) static const struct of_device_id adc084s021_of_match[] = { { .compatible = "ti,adc084s021", }, - {}, + { } }; MODULE_DEVICE_TABLE(of, adc084s021_of_match); static const struct spi_device_id adc084s021_id[] = { { ADC084S021_DRIVER_NAME, 0 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, adc084s021_id); diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c index c0a72d72f3a999..7f065f457b3617 100644 --- a/drivers/iio/adc/ti-adc12138.c +++ b/drivers/iio/adc/ti-adc12138.c @@ -344,8 +344,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) mutex_lock(&adc->lock); - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index]; @@ -520,7 +519,7 @@ static const struct of_device_id adc12138_dt_ids[] = { { .compatible = "ti,adc12130", }, { .compatible = "ti,adc12132", }, { .compatible = "ti,adc12138", }, - {} + { } }; MODULE_DEVICE_TABLE(of, adc12138_dt_ids); @@ -528,7 +527,7 @@ static const struct spi_device_id adc12138_id[] = { { "adc12130", adc12130 }, { "adc12132", adc12132 }, { "adc12138", adc12138 }, - {} + { } }; MODULE_DEVICE_TABLE(spi, adc12138_id); diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c index f7c78d0dd4499e..474e733fb8e046 100644 --- a/drivers/iio/adc/ti-adc161s626.c +++ b/drivers/iio/adc/ti-adc161s626.c @@ -226,14 +226,14 @@ static int ti_adc_probe(struct spi_device *spi) static const struct of_device_id ti_adc_dt_ids[] = { { .compatible = "ti,adc141s626", }, { .compatible = "ti,adc161s626", }, - {} + { } }; MODULE_DEVICE_TABLE(of, ti_adc_dt_ids); static const struct spi_device_id ti_adc_id[] = { - {"adc141s626", TI_ADC141S626}, - {"adc161s626", TI_ADC161S626}, - {}, + { "adc141s626", TI_ADC141S626 }, + { "adc161s626", TI_ADC161S626 }, + { } }; MODULE_DEVICE_TABLE(spi, ti_adc_id); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index d3363d02f2921a..6d1bc9659946df 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -456,7 +456,7 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p) mutex_lock(&data->lock); chan = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); ret = ads1015_get_adc_result(data, chan, &res); if (ret < 0) { mutex_unlock(&data->lock); @@ -1173,7 +1173,7 @@ static const struct i2c_device_id ads1015_id[] = { { "ads1015", (kernel_ulong_t)&ads1015_data }, { "ads1115", (kernel_ulong_t)&ads1115_data }, { "tla2024", (kernel_ulong_t)&tla2024_data }, - {} + { } }; MODULE_DEVICE_TABLE(i2c, ads1015_id); @@ -1181,7 +1181,7 @@ static const struct of_device_id ads1015_of_match[] = { { .compatible = "ti,ads1015", .data = &ads1015_data }, { .compatible = "ti,ads1115", .data = &ads1115_data }, { .compatible = "ti,tla2024", .data = &tla2024_data }, - {} + { } }; MODULE_DEVICE_TABLE(of, ads1015_of_match); diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c index d649980479e450..1c760637514928 100644 --- a/drivers/iio/adc/ti-ads1119.c +++ b/drivers/iio/adc/ti-ads1119.c @@ -435,7 +435,7 @@ static int ads1119_triggered_buffer_preenable(struct iio_dev *indio_dev) int ret; index = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); ret = ads1119_set_conv_mode(st, true); if (ret) @@ -508,7 +508,7 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private) if (!iio_trigger_using_own(indio_dev)) { index = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); ret = ads1119_poll_data_ready(st, &indio_dev->channels[index]); if (ret) { diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c index 4ca62121f0d176..425b48d8986f52 100644 --- a/drivers/iio/adc/ti-ads124s08.c +++ b/drivers/iio/adc/ti-ads124s08.c @@ -21,7 +21,7 @@ #include #include -#include +#include /* Commands */ #define ADS124S08_CMD_NOP 0x00 @@ -279,8 +279,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) int scan_index, j = 0; int ret; - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { ret = ads124s_write_reg(indio_dev, ADS124S08_INPUT_MUX, scan_index); if (ret) @@ -358,7 +357,7 @@ MODULE_DEVICE_TABLE(spi, ads124s_id); static const struct of_device_id ads124s_of_table[] = { { .compatible = "ti,ads124s06" }, { .compatible = "ti,ads124s08" }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ads124s_of_table); diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c index 1d1eaba3d6d129..0f9f75baaebbf7 100644 --- a/drivers/iio/adc/ti-ads1298.c +++ b/drivers/iio/adc/ti-ads1298.c @@ -23,7 +23,7 @@ #include #include -#include +#include /* Commands */ #define ADS1298_CMD_WAKEUP 0x02 @@ -502,8 +502,7 @@ static void ads1298_rdata_complete(void *context) } /* Demux the channel data into our bounce buffer */ - for_each_set_bit(scan_index, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, scan_index) { const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index]; const u8 *data = priv->rx_buffer + scan_chan->address; diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c index 96dd9366f8ffc8..31f1f229d97a78 100644 --- a/drivers/iio/adc/ti-ads131e08.c +++ b/drivers/iio/adc/ti-ads131e08.c @@ -23,7 +23,7 @@ #include #include -#include +#include /* Commands */ #define ADS131E08_CMD_RESET 0x06 @@ -637,7 +637,7 @@ static irqreturn_t ads131e08_trigger_handler(int irq, void *private) if (ret) goto out; - for_each_set_bit(chn, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, chn) { src = st->rx_buf + ADS131E08_NUM_STATUS_BYTES + chn * num_bytes; dest = st->tmp_buf.data + i * ADS131E08_NUM_STORAGE_BYTES; @@ -918,7 +918,7 @@ static const struct of_device_id ads131e08_of_match[] = { .data = &ads131e08_info_tbl[ads131e06], }, { .compatible = "ti,ads131e08", .data = &ads131e08_info_tbl[ads131e08], }, - {} + { } }; MODULE_DEVICE_TABLE(of, ads131e08_of_match); @@ -926,7 +926,7 @@ static const struct spi_device_id ads131e08_ids[] = { { "ads131e04", (kernel_ulong_t)&ads131e08_info_tbl[ads131e04] }, { "ads131e06", (kernel_ulong_t)&ads131e08_info_tbl[ads131e06] }, { "ads131e08", (kernel_ulong_t)&ads131e08_info_tbl[ads131e08] }, - {} + { } }; MODULE_DEVICE_TABLE(spi, ads131e08_ids); diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c index 4da78302359b45..66b54c0d75aab7 100644 --- a/drivers/iio/adc/ti-ads7924.c +++ b/drivers/iio/adc/ti-ads7924.c @@ -448,13 +448,13 @@ static int ads7924_probe(struct i2c_client *client) static const struct i2c_device_id ads7924_id[] = { { "ads7924" }, - {} + { } }; MODULE_DEVICE_TABLE(i2c, ads7924_id); static const struct of_device_id ads7924_of_match[] = { { .compatible = "ti,ads7924", }, - {} + { } }; MODULE_DEVICE_TABLE(of, ads7924_of_match); diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index 263fc3a1b87e1d..af28672aa8039d 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -705,7 +705,7 @@ static const struct of_device_id ads7950_of_table[] = { { .compatible = "ti,ads7959", .data = &ti_ads7950_chip_info[TI_ADS7959] }, { .compatible = "ti,ads7960", .data = &ti_ads7950_chip_info[TI_ADS7960] }, { .compatible = "ti,ads7961", .data = &ti_ads7950_chip_info[TI_ADS7961] }, - { }, + { } }; MODULE_DEVICE_TABLE(of, ads7950_of_table); diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c index bbd85cb47f816b..3bec8a2e61abbf 100644 --- a/drivers/iio/adc/ti-ads8344.c +++ b/drivers/iio/adc/ti-ads8344.c @@ -175,7 +175,7 @@ static int ads8344_probe(struct spi_device *spi) static const struct of_device_id ads8344_of_match[] = { { .compatible = "ti,ads8344", }, - {} + { } }; MODULE_DEVICE_TABLE(of, ads8344_of_match); diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 7a79f0cebfbf6c..9b1814f1965a37 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -384,9 +384,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8); int i, j = 0; - for (i = 0; i < indio_dev->masklength; i++) { - if (!test_bit(i, indio_dev->active_scan_mask)) - continue; + iio_for_each_active_channel(indio_dev, i) { buffer[j] = ads8688_read(indio_dev, i); j++; } @@ -454,9 +452,9 @@ static int ads8688_probe(struct spi_device *spi) } static const struct spi_device_id ads8688_id[] = { - {"ads8684", ID_ADS8684}, - {"ads8688", ID_ADS8688}, - {} + { "ads8684", ID_ADS8684 }, + { "ads8688", ID_ADS8688 }, + { } }; MODULE_DEVICE_TABLE(spi, ads8688_id); diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c index 84ba5c4a0eea7a..169e3591320b56 100644 --- a/drivers/iio/adc/ti-lmp92064.c +++ b/drivers/iio/adc/ti-lmp92064.c @@ -360,7 +360,7 @@ static int lmp92064_adc_probe(struct spi_device *spi) static const struct spi_device_id lmp92064_id_table[] = { { "lmp92064" }, - {} + { } }; MODULE_DEVICE_TABLE(spi, lmp92064_id_table); diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c index 30f629a553a14e..08de997584fd01 100644 --- a/drivers/iio/adc/ti-tlc4541.c +++ b/drivers/iio/adc/ti-tlc4541.c @@ -237,14 +237,14 @@ static void tlc4541_remove(struct spi_device *spi) static const struct of_device_id tlc4541_dt_ids[] = { { .compatible = "ti,tlc3541", }, { .compatible = "ti,tlc4541", }, - {} + { } }; MODULE_DEVICE_TABLE(of, tlc4541_dt_ids); static const struct spi_device_id tlc4541_id[] = { - {"tlc3541", TLC3541}, - {"tlc4541", TLC4541}, - {} + { "tlc3541", TLC3541 }, + { "tlc4541", TLC4541 }, + { } }; MODULE_DEVICE_TABLE(spi, tlc4541_id); diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c index edcef8f11522b4..b56f2503f14ceb 100644 --- a/drivers/iio/adc/ti-tsc2046.c +++ b/drivers/iio/adc/ti-tsc2046.c @@ -6,13 +6,14 @@ */ #include +#include #include #include #include #include #include -#include +#include #include #include @@ -141,7 +142,7 @@ enum tsc2046_state { struct tsc2046_adc_priv { struct spi_device *spi; const struct tsc2046_adc_dcfg *dcfg; - struct regulator *vref_reg; + bool internal_vref; struct iio_trigger *trig; struct hrtimer trig_timer; @@ -257,7 +258,7 @@ static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx, case TI_TSC2046_ADDR_VBAT: case TI_TSC2046_ADDR_TEMP0: pd |= TI_TSC2046_SER; - if (!priv->vref_reg) + if (priv->internal_vref) pd |= TI_TSC2046_PD1_VREF_ON; } @@ -273,7 +274,6 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx, u32 *effective_speed_hz) { struct tsc2046_adc_ch_cfg *ch = &priv->ch_cfg[ch_idx]; - struct tsc2046_adc_atom *rx_buf, *tx_buf; unsigned int val, val_normalized = 0; int ret, i, count_skip = 0, max_count; struct spi_transfer xfer; @@ -287,18 +287,20 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx, max_count = 1; } - if (sizeof(*tx_buf) * max_count > PAGE_SIZE) + if (sizeof(struct tsc2046_adc_atom) * max_count > PAGE_SIZE) return -ENOSPC; - tx_buf = kcalloc(max_count, sizeof(*tx_buf), GFP_KERNEL); + struct tsc2046_adc_atom *tx_buf __free(kfree) = kcalloc(max_count, + sizeof(*tx_buf), + GFP_KERNEL); if (!tx_buf) return -ENOMEM; - rx_buf = kcalloc(max_count, sizeof(*rx_buf), GFP_KERNEL); - if (!rx_buf) { - ret = -ENOMEM; - goto free_tx; - } + struct tsc2046_adc_atom *rx_buf __free(kfree) = kcalloc(max_count, + sizeof(*rx_buf), + GFP_KERNEL); + if (!rx_buf) + return -ENOMEM; /* * Do not enable automatic power down on working samples. Otherwise the @@ -326,7 +328,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx, if (ret) { dev_err_ratelimited(&priv->spi->dev, "SPI transfer failed %pe\n", ERR_PTR(ret)); - goto free_bufs; + return ret; } if (effective_speed_hz) @@ -337,14 +339,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx, val_normalized += val; } - ret = DIV_ROUND_UP(val_normalized, max_count - count_skip); - -free_bufs: - kfree(rx_buf); -free_tx: - kfree(tx_buf); - - return ret; + return DIV_ROUND_UP(val_normalized, max_count - count_skip); } static size_t tsc2046_adc_group_set_layout(struct tsc2046_adc_priv *priv, @@ -746,49 +741,6 @@ static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv) } } -static void tsc2046_adc_regulator_disable(void *data) -{ - struct tsc2046_adc_priv *priv = data; - - regulator_disable(priv->vref_reg); -} - -static int tsc2046_adc_configure_regulator(struct tsc2046_adc_priv *priv) -{ - struct device *dev = &priv->spi->dev; - int ret; - - priv->vref_reg = devm_regulator_get_optional(dev, "vref"); - if (IS_ERR(priv->vref_reg)) { - /* If regulator exists but can't be get, return an error */ - if (PTR_ERR(priv->vref_reg) != -ENODEV) - return PTR_ERR(priv->vref_reg); - priv->vref_reg = NULL; - } - if (!priv->vref_reg) { - /* Use internal reference */ - priv->vref_mv = TI_TSC2046_INT_VREF; - return 0; - } - - ret = regulator_enable(priv->vref_reg); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, tsc2046_adc_regulator_disable, - priv); - if (ret) - return ret; - - ret = regulator_get_voltage(priv->vref_reg); - if (ret < 0) - return ret; - - priv->vref_mv = ret / MILLI; - - return 0; -} - static int tsc2046_adc_probe(struct spi_device *spi) { const struct tsc2046_adc_dcfg *dcfg; @@ -830,10 +782,13 @@ static int tsc2046_adc_probe(struct spi_device *spi) indio_dev->num_channels = dcfg->num_channels; indio_dev->info = &tsc2046_adc_info; - ret = tsc2046_adc_configure_regulator(priv); - if (ret) + ret = devm_regulator_get_enable_read_voltage(dev, "vref"); + if (ret < 0 && ret != -ENODEV) return ret; + priv->internal_vref = ret == -ENODEV; + priv->vref_mv = priv->internal_vref ? TI_TSC2046_INT_VREF : ret / MILLI; + tsc2046_adc_parse_fwnode(priv); ret = tsc2046_adc_setup_spi_msg(priv); diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index e4548df3f8fb4e..5afd2feb8c3ddc 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -752,7 +752,7 @@ static int vf610_adc_buffer_postenable(struct iio_dev *indio_dev) writel(val, info->regs + VF610_REG_ADC_GC); channel = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); val = VF610_ADC_ADCHC(channel); val |= VF610_ADC_AIEN; diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index f051358d6b50b2..ebc583b07e0c00 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -1275,7 +1275,6 @@ static int ams_parse_firmware(struct iio_dev *indio_dev) struct ams *ams = iio_priv(indio_dev); struct iio_chan_spec *ams_channels, *dev_channels; struct device *dev = indio_dev->dev.parent; - struct fwnode_handle *child = NULL; struct fwnode_handle *fwnode = dev_fwnode(dev); size_t ams_size; int ret, ch_cnt = 0, i, rising_off, falling_off; @@ -1297,16 +1296,12 @@ static int ams_parse_firmware(struct iio_dev *indio_dev) num_channels += ret; } - fwnode_for_each_child_node(fwnode, child) { - if (fwnode_device_is_available(child)) { - ret = ams_init_module(indio_dev, child, ams_channels + num_channels); - if (ret < 0) { - fwnode_handle_put(child); - return ret; - } + device_for_each_child_node_scoped(dev, child) { + ret = ams_init_module(indio_dev, child, ams_channels + num_channels); + if (ret < 0) + return ret; - num_channels += ret; - } + num_channels += ret; } for (i = 0; i < num_channels; i++) { diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 564c0cad0fc797..cfbfcaefec0fcd 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -628,7 +628,7 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev, size_t n; void *data; - n = bitmap_weight(mask, indio_dev->masklength); + n = bitmap_weight(mask, iio_get_masklength(indio_dev)); data = devm_krealloc_array(indio_dev->dev.parent, xadc->data, n, sizeof(*xadc->data), GFP_KERNEL); @@ -681,8 +681,7 @@ static irqreturn_t xadc_trigger_handler(int irq, void *p) goto out; j = 0; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { chan = xadc_scan_index_to_channel(i); xadc_read_adc_reg(xadc, chan, &xadc->data[j]); j++; diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c index 12dc43d487b4ba..3ee0dd5537c168 100644 --- a/drivers/iio/addac/ad74115.c +++ b/drivers/iio/addac/ad74115.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 2410d72da49baa..69c586525d2158 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -4,7 +4,7 @@ * Author: Cosmin Tanislav */ -#include +#include #include #include #include diff --git a/drivers/iio/amplifiers/ada4250.c b/drivers/iio/amplifiers/ada4250.c index 4b32d350dc5dc0..566f0e1c98a572 100644 --- a/drivers/iio/amplifiers/ada4250.c +++ b/drivers/iio/amplifiers/ada4250.c @@ -14,7 +14,7 @@ #include #include -#include +#include /* ADA4250 Register Map */ #define ADA4250_REG_GAIN_MUX 0x00 diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c index 4c12b7a94af59a..4befc9f55201e4 100644 --- a/drivers/iio/buffer/industrialio-buffer-cb.c +++ b/drivers/iio/buffer/industrialio-buffer-cb.c @@ -77,7 +77,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, } cb_buff->indio_dev = cb_buff->channels[0].indio_dev; - cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength, + cb_buff->buffer.scan_mask = bitmap_zalloc(iio_get_masklength(cb_buff->indio_dev), GFP_KERNEL); if (cb_buff->buffer.scan_mask == NULL) { ret = -ENOMEM; diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index 647f417a045efe..dbde1443d6ede5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -248,7 +248,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) iio_dma_buffer_queue_wake(queue); dma_fence_end_signalling(cookie); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, IIO_DMA_BUFFER); /** * iio_dma_buffer_block_list_abort() - Indicate that a list block has been @@ -287,7 +287,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, iio_dma_buffer_queue_wake(queue); dma_fence_end_signalling(cookie); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, IIO_DMA_BUFFER); static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) { @@ -420,7 +420,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) return ret; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, IIO_DMA_BUFFER); static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) { @@ -506,7 +506,7 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_enable); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, IIO_DMA_BUFFER); /** * iio_dma_buffer_disable() - Disable DMA buffer @@ -530,7 +530,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_disable); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, IIO_DMA_BUFFER); static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) @@ -636,7 +636,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, { return iio_dma_buffer_io(buffer, n, user_buffer, false); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_read); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, IIO_DMA_BUFFER); /** * iio_dma_buffer_write() - DMA buffer write callback @@ -653,7 +653,7 @@ int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, return iio_dma_buffer_io(buffer, n, (__force __user char *)user_buffer, true); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_write); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, IIO_DMA_BUFFER); /** * iio_dma_buffer_usage() - DMA buffer data_available and @@ -696,7 +696,7 @@ size_t iio_dma_buffer_usage(struct iio_buffer *buf) return data_available; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_usage); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, IIO_DMA_BUFFER); struct iio_dma_buffer_block * iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer, @@ -723,7 +723,7 @@ iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer, return block; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, IIO_DMA_BUFFER); void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer, struct iio_dma_buffer_block *block) @@ -731,7 +731,7 @@ void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer, block->state = IIO_BLOCK_STATE_DEAD; iio_buffer_block_put_atomic(block); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, IIO_DMA_BUFFER); static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block) { @@ -784,7 +784,7 @@ int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer, return ret; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, IIO_DMA_BUFFER); void iio_dma_buffer_lock_queue(struct iio_buffer *buffer) { @@ -792,7 +792,7 @@ void iio_dma_buffer_lock_queue(struct iio_buffer *buffer) mutex_lock(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_lock_queue); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, IIO_DMA_BUFFER); void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer) { @@ -800,7 +800,7 @@ void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer) mutex_unlock(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_unlock_queue); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, IIO_DMA_BUFFER); /** * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback @@ -816,7 +816,7 @@ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, IIO_DMA_BUFFER); /** * iio_dma_buffer_set_length - DMA buffer set_length callback @@ -836,7 +836,7 @@ int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, IIO_DMA_BUFFER); /** * iio_dma_buffer_init() - Initialize DMA buffer queue @@ -864,7 +864,7 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, return 0; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_init); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, IIO_DMA_BUFFER); /** * iio_dma_buffer_exit() - Cleanup DMA buffer queue @@ -882,7 +882,7 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) mutex_unlock(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_exit); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, IIO_DMA_BUFFER); /** * iio_dma_buffer_release() - Release final buffer resources @@ -896,7 +896,7 @@ void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) { mutex_destroy(&queue->lock); } -EXPORT_SYMBOL_GPL(iio_dma_buffer_release); +EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, IIO_DMA_BUFFER); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 426cc614587a64..19af1caf14cd34 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -350,3 +350,4 @@ EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_DMA_BUFFER); diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c index fb58f599a80b19..526b2a8d725d1d 100644 --- a/drivers/iio/buffer/industrialio-hw-consumer.c +++ b/drivers/iio/buffer/industrialio-hw-consumer.c @@ -52,6 +52,7 @@ static const struct iio_buffer_access_funcs iio_hw_buf_access = { static struct hw_consumer_buffer *iio_hw_consumer_get_buffer( struct iio_hw_consumer *hwc, struct iio_dev *indio_dev) { + unsigned int mask_longs = BITS_TO_LONGS(iio_get_masklength(indio_dev)); struct hw_consumer_buffer *buf; list_for_each_entry(buf, &hwc->buffers, head) { @@ -59,8 +60,7 @@ static struct hw_consumer_buffer *iio_hw_consumer_get_buffer( return buf; } - buf = kzalloc(struct_size(buf, scan_mask, BITS_TO_LONGS(indio_dev->masklength)), - GFP_KERNEL); + buf = kzalloc(struct_size(buf, scan_mask, mask_longs), GFP_KERNEL); if (!buf) return NULL; diff --git a/drivers/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c index d11bc3496dda29..ba18dbbe0940ab 100644 --- a/drivers/iio/cdc/ad7746.c +++ b/drivers/iio/cdc/ad7746.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index f959252a4fe665..b2c547ac8d3499 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h @@ -12,7 +12,7 @@ #define BME680_REG_TEMP_MSB 0x22 #define BME680_REG_PRESS_MSB 0x1F -#define BM6880_REG_HUMIDITY_MSB 0x25 +#define BME680_REG_HUMIDITY_MSB 0x25 #define BME680_REG_GAS_MSB 0x2A #define BME680_REG_GAS_R_LSB 0x2B #define BME680_GAS_STAB_BIT BIT(4) @@ -39,14 +39,12 @@ #define BME680_HUM_REG_SHIFT_VAL 4 #define BME680_BIT_H1_DATA_MASK GENMASK(3, 0) -#define BME680_REG_RES_HEAT_RANGE 0x02 #define BME680_RHRANGE_MASK GENMASK(5, 4) #define BME680_REG_RES_HEAT_VAL 0x00 -#define BME680_REG_RANGE_SW_ERR 0x04 #define BME680_RSERROR_MASK GENMASK(7, 4) #define BME680_REG_RES_HEAT_0 0x5A #define BME680_REG_GAS_WAIT_0 0x64 -#define BME680_ADC_GAS_RES_SHIFT 6 +#define BME680_ADC_GAS_RES GENMASK(15, 6) #define BME680_AMB_TEMP 25 #define BME680_REG_CTRL_GAS_1 0x71 @@ -58,33 +56,24 @@ #define BME680_GAS_MEAS_BIT BIT(6) #define BME680_MEAS_BIT BIT(5) +#define BME680_TEMP_NUM_BYTES 3 +#define BME680_PRESS_NUM_BYTES 3 +#define BME680_HUMID_NUM_BYTES 2 +#define BME680_GAS_NUM_BYTES 2 + +#define BME680_MEAS_TRIM_MASK GENMASK(24, 4) + +#define BME680_STARTUP_TIME_US 5000 + /* Calibration Parameters */ #define BME680_T2_LSB_REG 0x8A -#define BME680_T3_REG 0x8C -#define BME680_P1_LSB_REG 0x8E -#define BME680_P2_LSB_REG 0x90 -#define BME680_P3_REG 0x92 -#define BME680_P4_LSB_REG 0x94 -#define BME680_P5_LSB_REG 0x96 -#define BME680_P7_REG 0x98 -#define BME680_P6_REG 0x99 -#define BME680_P8_LSB_REG 0x9C -#define BME680_P9_LSB_REG 0x9E -#define BME680_P10_REG 0xA0 -#define BME680_H2_LSB_REG 0xE2 #define BME680_H2_MSB_REG 0xE1 -#define BME680_H1_MSB_REG 0xE3 -#define BME680_H1_LSB_REG 0xE2 -#define BME680_H3_REG 0xE4 -#define BME680_H4_REG 0xE5 -#define BME680_H5_REG 0xE6 -#define BME680_H6_REG 0xE7 -#define BME680_H7_REG 0xE8 -#define BME680_T1_LSB_REG 0xE9 -#define BME680_GH2_LSB_REG 0xEB -#define BME680_GH1_REG 0xED #define BME680_GH3_REG 0xEE +#define BME680_CALIB_RANGE_1_LEN 23 +#define BME680_CALIB_RANGE_2_LEN 14 +#define BME680_CALIB_RANGE_3_LEN 5 + extern const struct regmap_config bme680_regmap_config; int bme680_core_probe(struct device *dev, struct regmap *regmap, diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 500f56834b01f6..0b96534c6867a6 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -8,18 +8,64 @@ * Datasheet: * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME680-DS001-00.pdf */ -#include #include +#include #include #include -#include #include +#include #include + #include #include +#include + #include "bme680.h" +/* 1st set of calibration data */ +enum { + /* Temperature calib indexes */ + T2_LSB = 0, + T3 = 2, + /* Pressure calib indexes */ + P1_LSB = 4, + P2_LSB = 6, + P3 = 8, + P4_LSB = 10, + P5_LSB = 12, + P7 = 14, + P6 = 15, + P8_LSB = 18, + P9_LSB = 20, + P10 = 22, +}; + +/* 2nd set of calibration data */ +enum { + /* Humidity calib indexes */ + H2_MSB = 0, + H1_LSB = 1, + H3 = 3, + H4 = 4, + H5 = 5, + H6 = 6, + H7 = 7, + /* Stray T1 calib index */ + T1_LSB = 8, + /* Gas heater calib indexes */ + GH2_LSB = 10, + GH1 = 12, + GH3 = 13, +}; + +/* 3rd set of calibration data */ +enum { + RES_HEAT_VAL = 0, + RES_HEAT_RANGE = 2, + RANGE_SW_ERR = 4, +}; + struct bme680_calib { u16 par_t1; s16 par_t2; @@ -52,16 +98,21 @@ struct bme680_calib { struct bme680_data { struct regmap *regmap; struct bme680_calib bme680; + struct mutex lock; /* Protect multiple serial R/W ops to device. */ u8 oversampling_temp; u8 oversampling_press; u8 oversampling_humid; u16 heater_dur; u16 heater_temp; - /* - * Carryover value from temperature conversion, used in pressure - * and humidity compensation calculations. - */ - s32 t_fine; + + union { + u8 buf[3]; + unsigned int check; + __be16 be16; + u8 bme680_cal_buf_1[BME680_CALIB_RANGE_1_LEN]; + u8 bme680_cal_buf_2[BME680_CALIB_RANGE_2_LEN]; + u8 bme680_cal_buf_3[BME680_CALIB_RANGE_3_LEN]; + }; }; static const struct regmap_range bme680_volatile_ranges[] = { @@ -110,217 +161,98 @@ static int bme680_read_calib(struct bme680_data *data, struct bme680_calib *calib) { struct device *dev = regmap_get_device(data->regmap); - unsigned int tmp, tmp_msb, tmp_lsb; + unsigned int tmp_msb, tmp_lsb; int ret; - __le16 buf; - - /* Temperature related coefficients */ - ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_T1_LSB_REG\n"); - return ret; - } - calib->par_t1 = le16_to_cpu(buf); ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG, - &buf, sizeof(buf)); + data->bme680_cal_buf_1, + sizeof(data->bme680_cal_buf_1)); if (ret < 0) { - dev_err(dev, "failed to read BME680_T2_LSB_REG\n"); + dev_err(dev, "failed to read 1st set of calib data;\n"); return ret; } - calib->par_t2 = le16_to_cpu(buf); - ret = regmap_read(data->regmap, BME680_T3_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_T3_REG\n"); - return ret; - } - calib->par_t3 = tmp; - - /* Pressure related coefficients */ - ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P1_LSB_REG\n"); - return ret; - } - calib->par_p1 = le16_to_cpu(buf); - - ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P2_LSB_REG\n"); - return ret; - } - calib->par_p2 = le16_to_cpu(buf); - - ret = regmap_read(data->regmap, BME680_P3_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P3_REG\n"); - return ret; - } - calib->par_p3 = tmp; - - ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P4_LSB_REG\n"); - return ret; - } - calib->par_p4 = le16_to_cpu(buf); + calib->par_t2 = get_unaligned_le16(&data->bme680_cal_buf_1[T2_LSB]); + calib->par_t3 = data->bme680_cal_buf_1[T3]; + calib->par_p1 = get_unaligned_le16(&data->bme680_cal_buf_1[P1_LSB]); + calib->par_p2 = get_unaligned_le16(&data->bme680_cal_buf_1[P2_LSB]); + calib->par_p3 = data->bme680_cal_buf_1[P3]; + calib->par_p4 = get_unaligned_le16(&data->bme680_cal_buf_1[P4_LSB]); + calib->par_p5 = get_unaligned_le16(&data->bme680_cal_buf_1[P5_LSB]); + calib->par_p7 = data->bme680_cal_buf_1[P7]; + calib->par_p6 = data->bme680_cal_buf_1[P6]; + calib->par_p8 = get_unaligned_le16(&data->bme680_cal_buf_1[P8_LSB]); + calib->par_p9 = get_unaligned_le16(&data->bme680_cal_buf_1[P9_LSB]); + calib->par_p10 = data->bme680_cal_buf_1[P10]; - ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG, - &buf, sizeof(buf)); + ret = regmap_bulk_read(data->regmap, BME680_H2_MSB_REG, + data->bme680_cal_buf_2, + sizeof(data->bme680_cal_buf_2)); if (ret < 0) { - dev_err(dev, "failed to read BME680_P5_LSB_REG\n"); + dev_err(dev, "failed to read 2nd set of calib data;\n"); return ret; } - calib->par_p5 = le16_to_cpu(buf); - ret = regmap_read(data->regmap, BME680_P6_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P6_REG\n"); - return ret; - } - calib->par_p6 = tmp; - - ret = regmap_read(data->regmap, BME680_P7_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P7_REG\n"); - return ret; - } - calib->par_p7 = tmp; - - ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P8_LSB_REG\n"); - return ret; - } - calib->par_p8 = le16_to_cpu(buf); - - ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P9_LSB_REG\n"); - return ret; - } - calib->par_p9 = le16_to_cpu(buf); - - ret = regmap_read(data->regmap, BME680_P10_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_P10_REG\n"); - return ret; - } - calib->par_p10 = tmp; - - /* Humidity related coefficients */ - ret = regmap_read(data->regmap, BME680_H1_MSB_REG, &tmp_msb); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H1_MSB_REG\n"); - return ret; - } - ret = regmap_read(data->regmap, BME680_H1_LSB_REG, &tmp_lsb); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H1_LSB_REG\n"); - return ret; - } + tmp_lsb = data->bme680_cal_buf_2[H1_LSB]; + tmp_msb = data->bme680_cal_buf_2[H1_LSB + 1]; calib->par_h1 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) | (tmp_lsb & BME680_BIT_H1_DATA_MASK); - ret = regmap_read(data->regmap, BME680_H2_MSB_REG, &tmp_msb); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H2_MSB_REG\n"); - return ret; - } - ret = regmap_read(data->regmap, BME680_H2_LSB_REG, &tmp_lsb); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H2_LSB_REG\n"); - return ret; - } + tmp_msb = data->bme680_cal_buf_2[H2_MSB]; + tmp_lsb = data->bme680_cal_buf_2[H2_MSB + 1]; calib->par_h2 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) | (tmp_lsb >> BME680_HUM_REG_SHIFT_VAL); - ret = regmap_read(data->regmap, BME680_H3_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H3_REG\n"); - return ret; - } - calib->par_h3 = tmp; - - ret = regmap_read(data->regmap, BME680_H4_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H4_REG\n"); - return ret; - } - calib->par_h4 = tmp; + calib->par_h3 = data->bme680_cal_buf_2[H3]; + calib->par_h4 = data->bme680_cal_buf_2[H4]; + calib->par_h5 = data->bme680_cal_buf_2[H5]; + calib->par_h6 = data->bme680_cal_buf_2[H6]; + calib->par_h7 = data->bme680_cal_buf_2[H7]; + calib->par_t1 = get_unaligned_le16(&data->bme680_cal_buf_2[T1_LSB]); + calib->par_gh2 = get_unaligned_le16(&data->bme680_cal_buf_2[GH2_LSB]); + calib->par_gh1 = data->bme680_cal_buf_2[GH1]; + calib->par_gh3 = data->bme680_cal_buf_2[GH3]; - ret = regmap_read(data->regmap, BME680_H5_REG, &tmp); + ret = regmap_bulk_read(data->regmap, BME680_REG_RES_HEAT_VAL, + data->bme680_cal_buf_3, + sizeof(data->bme680_cal_buf_3)); if (ret < 0) { - dev_err(dev, "failed to read BME680_H5_REG\n"); + dev_err(dev, "failed to read 3rd set of calib data;\n"); return ret; } - calib->par_h5 = tmp; - ret = regmap_read(data->regmap, BME680_H6_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H6_REG\n"); - return ret; - } - calib->par_h6 = tmp; + calib->res_heat_val = data->bme680_cal_buf_3[RES_HEAT_VAL]; - ret = regmap_read(data->regmap, BME680_H7_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_H7_REG\n"); - return ret; - } - calib->par_h7 = tmp; + calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK, + data->bme680_cal_buf_3[RES_HEAT_RANGE]); - /* Gas heater related coefficients */ - ret = regmap_read(data->regmap, BME680_GH1_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_GH1_REG\n"); - return ret; - } - calib->par_gh1 = tmp; + calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK, + data->bme680_cal_buf_3[RANGE_SW_ERR]); - ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG, - &buf, sizeof(buf)); - if (ret < 0) { - dev_err(dev, "failed to read BME680_GH2_LSB_REG\n"); - return ret; - } - calib->par_gh2 = le16_to_cpu(buf); - - ret = regmap_read(data->regmap, BME680_GH3_REG, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read BME680_GH3_REG\n"); - return ret; - } - calib->par_gh3 = tmp; + return 0; +} - /* Other coefficients */ - ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_RANGE, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read resistance heat range\n"); - return ret; - } - calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK, tmp); +static int bme680_read_temp_adc(struct bme680_data *data, u32 *adc_temp) +{ + struct device *dev = regmap_get_device(data->regmap); + u32 value_temp; + int ret; - ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_VAL, &tmp); + ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB, + data->buf, BME680_TEMP_NUM_BYTES); if (ret < 0) { - dev_err(dev, "failed to read resistance heat value\n"); + dev_err(dev, "failed to read temperature\n"); return ret; } - calib->res_heat_val = tmp; - ret = regmap_read(data->regmap, BME680_REG_RANGE_SW_ERR, &tmp); - if (ret < 0) { - dev_err(dev, "failed to read range software error\n"); - return ret; + value_temp = FIELD_GET(BME680_MEAS_TRIM_MASK, + get_unaligned_be24(data->buf)); + if (value_temp == BME680_MEAS_SKIPPED) { + /* reading was skipped */ + dev_err(dev, "reading temperature skipped\n"); + return -EINVAL; } - calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK, tmp); + *adc_temp = value_temp; return 0; } @@ -332,25 +264,65 @@ static int bme680_read_calib(struct bme680_data *data, * Returns temperature measurement in DegC, resolutions is 0.01 DegC. Therefore, * output value of "3233" represents 32.33 DegC. */ -static s16 bme680_compensate_temp(struct bme680_data *data, - s32 adc_temp) +static s32 bme680_calc_t_fine(struct bme680_data *data, u32 adc_temp) { struct bme680_calib *calib = &data->bme680; s64 var1, var2, var3; - s16 calc_temp; /* If the calibration is invalid, attempt to reload it */ if (!calib->par_t2) bme680_read_calib(data, calib); - var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1); + var1 = ((s32)adc_temp >> 3) - ((s32)calib->par_t1 << 1); var2 = (var1 * calib->par_t2) >> 11; var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14; - data->t_fine = var2 + var3; - calc_temp = (data->t_fine * 5 + 128) >> 8; + return var2 + var3; /* t_fine = var2 + var3 */ +} - return calc_temp; +static int bme680_get_t_fine(struct bme680_data *data, s32 *t_fine) +{ + u32 adc_temp; + int ret; + + ret = bme680_read_temp_adc(data, &adc_temp); + if (ret) + return ret; + + *t_fine = bme680_calc_t_fine(data, adc_temp); + + return 0; +} + +static s16 bme680_compensate_temp(struct bme680_data *data, + u32 adc_temp) +{ + return (bme680_calc_t_fine(data, adc_temp) * 5 + 128) / 256; +} + +static int bme680_read_press_adc(struct bme680_data *data, u32 *adc_press) +{ + struct device *dev = regmap_get_device(data->regmap); + u32 value_press; + int ret; + + ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB, + data->buf, BME680_PRESS_NUM_BYTES); + if (ret < 0) { + dev_err(dev, "failed to read pressure\n"); + return ret; + } + + value_press = FIELD_GET(BME680_MEAS_TRIM_MASK, + get_unaligned_be24(data->buf)); + if (value_press == BME680_MEAS_SKIPPED) { + /* reading was skipped */ + dev_err(dev, "reading pressure skipped\n"); + return -EINVAL; + } + *adc_press = value_press; + + return 0; } /* @@ -361,12 +333,12 @@ static s16 bme680_compensate_temp(struct bme680_data *data, * 97356 Pa = 973.56 hPa. */ static u32 bme680_compensate_press(struct bme680_data *data, - u32 adc_press) + u32 adc_press, s32 t_fine) { struct bme680_calib *calib = &data->bme680; s32 var1, var2, var3, press_comp; - var1 = (data->t_fine >> 1) - 64000; + var1 = (t_fine >> 1) - 64000; var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2; var2 = var2 + (var1 * calib->par_p5 << 1); var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16); @@ -394,6 +366,30 @@ static u32 bme680_compensate_press(struct bme680_data *data, return press_comp; } +static int bme680_read_humid_adc(struct bme680_data *data, u32 *adc_humidity) +{ + struct device *dev = regmap_get_device(data->regmap); + u32 value_humidity; + int ret; + + ret = regmap_bulk_read(data->regmap, BME680_REG_HUMIDITY_MSB, + &data->be16, BME680_HUMID_NUM_BYTES); + if (ret < 0) { + dev_err(dev, "failed to read humidity\n"); + return ret; + } + + value_humidity = be16_to_cpu(data->be16); + if (value_humidity == BME680_MEAS_SKIPPED) { + /* reading was skipped */ + dev_err(dev, "reading humidity skipped\n"); + return -EINVAL; + } + *adc_humidity = value_humidity; + + return 0; +} + /* * Taken from Bosch BME680 API: * https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L937 @@ -402,15 +398,15 @@ static u32 bme680_compensate_press(struct bme680_data *data, * value of "43215" represents 43.215 %rH. */ static u32 bme680_compensate_humid(struct bme680_data *data, - u16 adc_humid) + u16 adc_humid, s32 t_fine) { struct bme680_calib *calib = &data->bme680; s32 var1, var2, var3, var4, var5, var6, temp_scaled, calc_hum; - temp_scaled = (data->t_fine * 5 + 128) >> 8; - var1 = (adc_humid - ((s32) ((s32) calib->par_h1 * 16))) - - (((temp_scaled * (s32) calib->par_h3) / 100) >> 1); - var2 = ((s32) calib->par_h2 * + temp_scaled = (t_fine * 5 + 128) >> 8; + var1 = (adc_humid - (((s32)calib->par_h1 * 16))) - + (((temp_scaled * calib->par_h3) / 100) >> 1); + var2 = (calib->par_h2 * (((temp_scaled * calib->par_h4) / 100) + (((temp_scaled * ((temp_scaled * calib->par_h5) / 100)) >> 6) / 100) + (1 << 14))) >> 10; @@ -442,7 +438,7 @@ static u32 bme680_compensate_gas(struct bme680_data *data, u16 gas_res_adc, u32 calc_gas_res; /* Look up table for the possible gas range values */ - const u32 lookupTable[16] = {2147483647u, 2147483647u, + static const u32 lookupTable[16] = {2147483647u, 2147483647u, 2147483647u, 2147483647u, 2147483647u, 2126008810u, 2147483647u, 2130303777u, 2147483647u, 2147483647u, 2143188679u, @@ -540,7 +536,6 @@ static u8 bme680_oversampling_to_reg(u8 val) static int bme680_wait_for_eoc(struct bme680_data *data) { struct device *dev = regmap_get_device(data->regmap); - unsigned int check; int ret; /* * (Sum of oversampling ratios * time per oversampling) + @@ -553,16 +548,16 @@ static int bme680_wait_for_eoc(struct bme680_data *data) usleep_range(wait_eoc_us, wait_eoc_us + 100); - ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check); + ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check); if (ret) { dev_err(dev, "failed to read measurement status register.\n"); return ret; } - if (check & BME680_MEAS_BIT) { + if (data->check & BME680_MEAS_BIT) { dev_err(dev, "Device measurement cycle incomplete.\n"); return -EBUSY; } - if (!(check & BME680_NEW_DATA_BIT)) { + if (!(data->check & BME680_NEW_DATA_BIT)) { dev_err(dev, "No new data available from the device.\n"); return -ENODATA; } @@ -606,10 +601,12 @@ static int bme680_chip_config(struct bme680_data *data) ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS, BME680_OSRS_TEMP_MASK | BME680_OSRS_PRESS_MASK, osrs); - if (ret < 0) + if (ret < 0) { dev_err(dev, "failed to write ctrl_meas register\n"); + return ret; + } - return ret; + return 0; } static int bme680_gas_config(struct bme680_data *data) @@ -618,6 +615,11 @@ static int bme680_gas_config(struct bme680_data *data) int ret; u8 heatr_res, heatr_dur; + /* Go to sleep */ + ret = bme680_set_mode(data, false); + if (ret < 0) + return ret; + heatr_res = bme680_calc_heater_res(data, data->heater_temp); /* set target heater temperature */ @@ -649,77 +651,35 @@ static int bme680_gas_config(struct bme680_data *data) static int bme680_read_temp(struct bme680_data *data, int *val) { - struct device *dev = regmap_get_device(data->regmap); int ret; - __be32 tmp = 0; - s32 adc_temp; + u32 adc_temp; s16 comp_temp; - /* set forced mode to trigger measurement */ - ret = bme680_set_mode(data, true); - if (ret < 0) - return ret; - - ret = bme680_wait_for_eoc(data); + ret = bme680_read_temp_adc(data, &adc_temp); if (ret) return ret; - ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB, - &tmp, 3); - if (ret < 0) { - dev_err(dev, "failed to read temperature\n"); - return ret; - } - - adc_temp = be32_to_cpu(tmp) >> 12; - if (adc_temp == BME680_MEAS_SKIPPED) { - /* reading was skipped */ - dev_err(dev, "reading temperature skipped\n"); - return -EINVAL; - } comp_temp = bme680_compensate_temp(data, adc_temp); - /* - * val might be NULL if we're called by the read_press/read_humid - * routine which is called to get t_fine value used in - * compensate_press/compensate_humid to get compensated - * pressure/humidity readings. - */ - if (val) { - *val = comp_temp * 10; /* Centidegrees to millidegrees */ - return IIO_VAL_INT; - } - - return ret; + *val = comp_temp * 10; /* Centidegrees to millidegrees */ + return IIO_VAL_INT; } static int bme680_read_press(struct bme680_data *data, int *val, int *val2) { - struct device *dev = regmap_get_device(data->regmap); int ret; - __be32 tmp = 0; - s32 adc_press; + u32 adc_press; + s32 t_fine; - /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL); - if (ret < 0) + ret = bme680_get_t_fine(data, &t_fine); + if (ret) return ret; - ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB, - &tmp, 3); - if (ret < 0) { - dev_err(dev, "failed to read pressure\n"); + ret = bme680_read_press_adc(data, &adc_press); + if (ret) return ret; - } - - adc_press = be32_to_cpu(tmp) >> 12; - if (adc_press == BME680_MEAS_SKIPPED) { - /* reading was skipped */ - dev_err(dev, "reading pressure skipped\n"); - return -EINVAL; - } - *val = bme680_compensate_press(data, adc_press); + *val = bme680_compensate_press(data, adc_press, t_fine); *val2 = 1000; return IIO_VAL_FRACTIONAL; } @@ -727,31 +687,19 @@ static int bme680_read_press(struct bme680_data *data, static int bme680_read_humid(struct bme680_data *data, int *val, int *val2) { - struct device *dev = regmap_get_device(data->regmap); int ret; - __be16 tmp = 0; - s32 adc_humidity; - u32 comp_humidity; + u32 adc_humidity, comp_humidity; + s32 t_fine; - /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL); - if (ret < 0) + ret = bme680_get_t_fine(data, &t_fine); + if (ret) return ret; - ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB, - &tmp, sizeof(tmp)); - if (ret < 0) { - dev_err(dev, "failed to read humidity\n"); + ret = bme680_read_humid_adc(data, &adc_humidity); + if (ret) return ret; - } - adc_humidity = be16_to_cpu(tmp); - if (adc_humidity == BME680_MEAS_SKIPPED) { - /* reading was skipped */ - dev_err(dev, "reading humidity skipped\n"); - return -EINVAL; - } - comp_humidity = bme680_compensate_humid(data, adc_humidity); + comp_humidity = bme680_compensate_humid(data, adc_humidity, t_fine); *val = comp_humidity; *val2 = 1000; @@ -763,59 +711,37 @@ static int bme680_read_gas(struct bme680_data *data, { struct device *dev = regmap_get_device(data->regmap); int ret; - __be16 tmp = 0; - unsigned int check; - u16 adc_gas_res; + u16 adc_gas_res, gas_regs_val; u8 gas_range; - /* Set heater settings */ - ret = bme680_gas_config(data); - if (ret < 0) { - dev_err(dev, "failed to set gas config\n"); - return ret; - } - - /* set forced mode to trigger measurement */ - ret = bme680_set_mode(data, true); - if (ret < 0) - return ret; - - ret = bme680_wait_for_eoc(data); - if (ret) - return ret; - - ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check); - if (check & BME680_GAS_MEAS_BIT) { + ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check); + if (data->check & BME680_GAS_MEAS_BIT) { dev_err(dev, "gas measurement incomplete\n"); return -EBUSY; } - ret = regmap_read(data->regmap, BME680_REG_GAS_R_LSB, &check); + ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB, + &data->be16, BME680_GAS_NUM_BYTES); if (ret < 0) { - dev_err(dev, "failed to read gas_r_lsb register\n"); + dev_err(dev, "failed to read gas resistance\n"); return ret; } + gas_regs_val = be16_to_cpu(data->be16); + adc_gas_res = FIELD_GET(BME680_ADC_GAS_RES, gas_regs_val); + /* * occurs if either the gas heating duration was insuffient * to reach the target heater temperature or the target * heater temperature was too high for the heater sink to * reach. */ - if ((check & BME680_GAS_STAB_BIT) == 0) { + if ((gas_regs_val & BME680_GAS_STAB_BIT) == 0) { dev_err(dev, "heater failed to reach the target temperature\n"); return -EINVAL; } - ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB, - &tmp, sizeof(tmp)); - if (ret < 0) { - dev_err(dev, "failed to read gas resistance\n"); - return ret; - } - - gas_range = check & BME680_GAS_RANGE_MASK; - adc_gas_res = be16_to_cpu(tmp) >> BME680_ADC_GAS_RES_SHIFT; + gas_range = FIELD_GET(BME680_GAS_RANGE_MASK, gas_regs_val); *val = bme680_compensate_gas(data, adc_gas_res, gas_range); return IIO_VAL_INT; @@ -826,6 +752,18 @@ static int bme680_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct bme680_data *data = iio_priv(indio_dev); + int ret; + + guard(mutex)(&data->lock); + + /* set forced mode to trigger measurement */ + ret = bme680_set_mode(data, true); + if (ret < 0) + return ret; + + ret = bme680_wait_for_eoc(data); + if (ret) + return ret; switch (mask) { case IIO_CHAN_INFO_PROCESSED: @@ -871,6 +809,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev, { struct bme680_data *data = iio_priv(indio_dev); + guard(mutex)(&data->lock); + if (val2 != 0) return -EINVAL; @@ -921,52 +861,19 @@ static const struct iio_info bme680_info = { .attrs = &bme680_attribute_group, }; -static const char *bme680_match_acpi_device(struct device *dev) -{ - const struct acpi_device_id *id; - - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return NULL; - - return dev_name(dev); -} - int bme680_core_probe(struct device *dev, struct regmap *regmap, const char *name) { struct iio_dev *indio_dev; struct bme680_data *data; - unsigned int val; int ret; - ret = regmap_write(regmap, BME680_REG_SOFT_RESET, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(dev, "Failed to reset chip\n"); - return ret; - } - - ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); - if (ret < 0) { - dev_err(dev, "Error reading chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; - if (!name && ACPI_HANDLE(dev)) - name = bme680_match_acpi_device(dev); - data = iio_priv(indio_dev); + mutex_init(&data->lock); dev_set_drvdata(dev, indio_dev); data->regmap = regmap; indio_dev->name = name; @@ -982,25 +889,39 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, data->heater_temp = 320; /* degree Celsius */ data->heater_dur = 150; /* milliseconds */ - ret = bme680_chip_config(data); - if (ret < 0) { - dev_err(dev, "failed to set chip_config data\n"); - return ret; - } + ret = regmap_write(regmap, BME680_REG_SOFT_RESET, + BME680_CMD_SOFTRESET); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to reset chip\n"); - ret = bme680_gas_config(data); - if (ret < 0) { - dev_err(dev, "failed to set gas config data\n"); - return ret; + usleep_range(BME680_STARTUP_TIME_US, BME680_STARTUP_TIME_US + 1000); + + ret = regmap_read(regmap, BME680_REG_CHIP_ID, &data->check); + if (ret < 0) + return dev_err_probe(dev, ret, "Error reading chip ID\n"); + + if (data->check != BME680_CHIP_ID_VAL) { + dev_err(dev, "Wrong chip ID, got %x expected %x\n", + data->check, BME680_CHIP_ID_VAL); + return -ENODEV; } ret = bme680_read_calib(data, &data->bme680); if (ret < 0) { - dev_err(dev, + return dev_err_probe(dev, ret, "failed to read calibration coefficients at probe\n"); - return ret; } + ret = bme680_chip_config(data); + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to set chip_config data\n"); + + ret = bme680_gas_config(data); + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to set gas config data\n"); + return devm_iio_device_register(dev, indio_dev); } EXPORT_SYMBOL_NS_GPL(bme680_core_probe, IIO_BME680); diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c index 4404d42ae5ec4d..7c54bd17d4b06d 100644 --- a/drivers/iio/chemical/bme680_spi.c +++ b/drivers/iio/chemical/bme680_spi.c @@ -100,7 +100,7 @@ static int bme680_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, &addr, 1, val, val_size); } -static struct regmap_bus bme680_regmap_bus = { +static const struct regmap_bus bme680_regmap_bus = { .write = bme680_regmap_spi_write, .read = bme680_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c index 43025866d5b791..d0bd94912e0a34 100644 --- a/drivers/iio/chemical/pms7003.c +++ b/drivers/iio/chemical/pms7003.c @@ -5,7 +5,7 @@ * Copyright (c) Tomasz Duszynski */ -#include +#include #include #include #include diff --git a/drivers/iio/chemical/scd30_i2c.c b/drivers/iio/chemical/scd30_i2c.c index bd3b01ded246e0..b31dfaf52df9c5 100644 --- a/drivers/iio/chemical/scd30_i2c.c +++ b/drivers/iio/chemical/scd30_i2c.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "scd30.h" diff --git a/drivers/iio/chemical/scd30_serial.c b/drivers/iio/chemical/scd30_serial.c index 2adb76dbb0209e..55044f07d5a371 100644 --- a/drivers/iio/chemical/scd30_serial.c +++ b/drivers/iio/chemical/scd30_serial.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "scd30.h" diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c index ca6b20270711a6..52cad54e85724f 100644 --- a/drivers/iio/chemical/scd4x.c +++ b/drivers/iio/chemical/scd4x.c @@ -11,7 +11,7 @@ * https://www.sensirion.com/file/datasheet_scd4x */ -#include +#include #include #include #include diff --git a/drivers/iio/chemical/sgp40.c b/drivers/iio/chemical/sgp40.c index 7f0de14a1956fe..07d8ab830211a6 100644 --- a/drivers/iio/chemical/sgp40.c +++ b/drivers/iio/chemical/sgp40.c @@ -14,11 +14,16 @@ * 1) read raw logarithmic resistance value from sensor * --> useful to pass it to the algorithm of the sensor vendor for * measuring deteriorations and improvements of air quality. + * It can be read from the attribute in_resistance_raw. * - * 2) calculate an estimated absolute voc index (0 - 500 index points) for - * measuring the air quality. + * 2) calculate an estimated absolute voc index (in_concentration_input) + * with 0 - 500 index points) for measuring the air quality. * For this purpose the value of the resistance for which the voc index - * will be 250 can be set up using calibbias. + * will be 250 can be set up using in_resistance_calibbias (default 30000). + * + * The voc index is calculated as: + * x = (in_resistance_raw - in_resistance_calibbias) * 0.65 + * in_concentration_input = 500 / (1 + e^x) * * Compensation values of relative humidity and temperature can be set up * by writing to the out values of temp and humidityrelative. diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c index 5c31299813ecf9..1b21b6bcd0e7a4 100644 --- a/drivers/iio/chemical/sps30_i2c.c +++ b/drivers/iio/chemical/sps30_i2c.c @@ -6,7 +6,7 @@ * * I2C slave address: 0x69 */ -#include +#include #include #include #include diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 6bfe5d6847e754..9fc71a73caa125 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -198,9 +198,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev, return 0; out = (s16 *)st->samples; - for_each_set_bit(i, - indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { *out = data[i]; out++; } @@ -587,7 +585,7 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev, int ret; /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */ - for_each_set_bit(i, &scan_mask, indio_dev->masklength) { + for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) { ret = cros_ec_sensors_cmd_read_u16(ec, cros_ec_sensors_idx_to_reg(st, i), data); @@ -683,7 +681,7 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, return ret; } - for_each_set_bit(i, &scan_mask, indio_dev->masklength) { + for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) { *data = st->resp->data.data[i]; data++; } diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c index 7190eaede7fbe8..ed15dcbf4cf6b1 100644 --- a/drivers/iio/common/scmi_sensors/scmi_iio.c +++ b/drivers/iio/common/scmi_sensors/scmi_iio.c @@ -158,7 +158,7 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2) * To calculate the multiplier,we convert the sf into char string and * count the number of characters */ - sf = (u64)uHz * 0xFFFF; + sf = uHz * 0xFFFF; do_div(sf, MICROHZ_PER_HZ); mult = scnprintf(buf, sizeof(buf), "%llu", sf) - 1; diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index c69399ac66572f..1b4287991d00ae 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include "st_sensors_core.h" diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index a2596c2d3de316..1cfd7e2a622f67 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -371,6 +371,17 @@ config LTC2632 To compile this driver as a module, choose M here: the module will be called ltc2632. +config LTC2664 + tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver" + depends on SPI + select REGMAP + help + Say yes here to build support for Analog Devices + LTC2664 and LTC2672 converters (DAC). + + To compile this driver as a module, choose M here: the + module will be called ltc2664. + config M62332 tristate "Mitsubishi M62332 DAC driver" depends on I2C diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 8432a81a19dc2f..2cf148f16306db 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_DS4424) += ds4424.o obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o obj-$(CONFIG_LTC1660) += ltc1660.o obj-$(CONFIG_LTC2632) += ltc2632.o +obj-$(CONFIG_LTC2664) += ltc2664.o obj-$(CONFIG_LTC2688) += ltc2688.o obj-$(CONFIG_M62332) += m62332.o obj-$(CONFIG_MAX517) += max517.o diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index bd37d304ca70ac..7d61b2fe662436 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -5,7 +5,7 @@ * * Copyright 2021 Analog Devices Inc. */ -#include +#include #include #include #include diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 7712dc6be608f3..905988724f2731 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 8103d2cd13f66f..708629efc15734 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -22,7 +22,7 @@ #include #include -#include +#include #define MODE_PWRDWN_1k 0x1 #define MODE_PWRDWN_100k 0x2 diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c index 4572d6f49275f0..1c996016756a8b 100644 --- a/drivers/iio/dac/ad5449.c +++ b/drivers/iio/dac/ad5449.c @@ -15,13 +15,11 @@ #include #include #include -#include +#include #include #include -#include - #define AD5449_MAX_CHANNELS 2 #define AD5449_MAX_VREFS 2 @@ -268,7 +266,6 @@ static const char *ad5449_vref_name(struct ad5449 *st, int n) static int ad5449_spi_probe(struct spi_device *spi) { - struct ad5449_platform_data *pdata = spi->dev.platform_data; const struct spi_device_id *id = spi_get_device_id(spi); struct iio_dev *indio_dev; struct ad5449 *st; @@ -306,16 +303,8 @@ static int ad5449_spi_probe(struct spi_device *spi) mutex_init(&st->lock); if (st->chip_info->has_ctrl) { - unsigned int ctrl = 0x00; - if (pdata) { - if (pdata->hardware_clear_to_midscale) - ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE; - ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET; - st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED; - } else { - st->has_sdo = true; - } - ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl); + st->has_sdo = true; + ad5449_write(indio_dev, AD5449_CMD_CTRL, 0x0); } ret = iio_device_register(indio_dev); diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c index fae5e5a0e72fca..62e1fbb9e9101e 100644 --- a/drivers/iio/dac/ad5593r.c +++ b/drivers/iio/dac/ad5593r.c @@ -13,7 +13,7 @@ #include #include -#include +#include #define AD5593R_MODE_CONF (0 << 4) #define AD5593R_MODE_DAC_WRITE (1 << 4) diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 7e6f824de29938..9304d0499bae82 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include "ad5624r.h" diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c index 899894523752f0..f658ac8086aaa4 100644 --- a/drivers/iio/dac/ad5766.c +++ b/drivers/iio/dac/ad5766.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #define AD5766_UPPER_WORD_SPI_MASK GENMASK(31, 16) #define AD5766_LOWER_WORD_SPI_MASK GENMASK(15, 0) diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c index 06f05750d9216c..1d403267048240 100644 --- a/drivers/iio/dac/ad7293.c +++ b/drivers/iio/dac/ad7293.c @@ -16,7 +16,7 @@ #include #include -#include +#include #define AD7293_R1B BIT(16) #define AD7293_R2B BIT(17) diff --git a/drivers/iio/dac/ad9739a.c b/drivers/iio/dac/ad9739a.c index f56eabe5372356..615d1a196db335 100644 --- a/drivers/iio/dac/ad9739a.c +++ b/drivers/iio/dac/ad9739a.c @@ -145,7 +145,7 @@ static int ad9739a_buffer_postdisable(struct iio_dev *indio_dev) struct ad9739a_state *st = iio_priv(indio_dev); return iio_backend_data_source_set(st->back, 0, - IIO_BACKEND_INTERNAL_CONTINUOS_WAVE); + IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE); } static bool ad9739a_reg_accessible(struct device *dev, unsigned int reg) @@ -413,8 +413,7 @@ static int ad9739a_probe(struct spi_device *spi) if (ret) return ret; - ret = iio_backend_extend_chan_spec(indio_dev, st->back, - &ad9739a_channels[0]); + ret = iio_backend_extend_chan_spec(st->back, &ad9739a_channels[0]); if (ret) return ret; @@ -432,7 +431,13 @@ static int ad9739a_probe(struct spi_device *spi) indio_dev->num_channels = ARRAY_SIZE(ad9739a_channels); indio_dev->setup_ops = &ad9739a_buffer_setup_ops; - return devm_iio_device_register(&spi->dev, indio_dev); + ret = devm_iio_device_register(&spi->dev, indio_dev); + if (ret) + return ret; + + iio_backend_debugfs_add(st->back, indio_dev); + + return 0; } static const struct of_device_id ad9739a_of_match[] = { diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c index 6d56428e623d82..0cb00f3bec0453 100644 --- a/drivers/iio/dac/adi-axi-dac.c +++ b/drivers/iio/dac/adi-axi-dac.c @@ -452,7 +452,7 @@ static int axi_dac_data_source_set(struct iio_backend *back, unsigned int chan, struct axi_dac_state *st = iio_backend_get_priv(back); switch (data) { - case IIO_BACKEND_INTERNAL_CONTINUOS_WAVE: + case IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE: return regmap_update_bits(st->regmap, AXI_DAC_REG_CHAN_CNTRL_7(chan), AXI_DAC_DATA_SEL, @@ -507,7 +507,18 @@ static int axi_dac_set_sample_rate(struct iio_backend *back, unsigned int chan, return 0; } -static const struct iio_backend_ops axi_dac_generic = { +static int axi_dac_reg_access(struct iio_backend *back, unsigned int reg, + unsigned int writeval, unsigned int *readval) +{ + struct axi_dac_state *st = iio_backend_get_priv(back); + + if (readval) + return regmap_read(st->regmap, reg, readval); + + return regmap_write(st->regmap, reg, writeval); +} + +static const struct iio_backend_ops axi_dac_generic_ops = { .enable = axi_dac_enable, .disable = axi_dac_disable, .request_buffer = axi_dac_request_buffer, @@ -517,6 +528,12 @@ static const struct iio_backend_ops axi_dac_generic = { .ext_info_get = axi_dac_ext_info_get, .data_source_set = axi_dac_data_source_set, .set_sample_rate = axi_dac_set_sample_rate, + .debugfs_reg_access = iio_backend_debugfs_ptr(axi_dac_reg_access), +}; + +static const struct iio_backend_info axi_dac_generic = { + .name = "axi-dac", + .ops = &axi_dac_generic_ops, }; static const struct regmap_config axi_dac_regmap_config = { diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c index 3a3c4f4874e401..a4fb2509c950a1 100644 --- a/drivers/iio/dac/ltc2632.c +++ b/drivers/iio/dac/ltc2632.c @@ -13,7 +13,7 @@ #include #include -#include +#include #define LTC2632_CMD_WRITE_INPUT_N 0x0 #define LTC2632_CMD_UPDATE_DAC_N 0x1 diff --git a/drivers/iio/dac/ltc2664.c b/drivers/iio/dac/ltc2664.c new file mode 100644 index 00000000000000..5be5345ac5c854 --- /dev/null +++ b/drivers/iio/dac/ltc2664.c @@ -0,0 +1,735 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LTC2664 4 channel, 12-/16-Bit Voltage Output SoftSpan DAC driver + * LTC2672 5 channel, 12-/16-Bit Current Output Softspan DAC driver + * + * Copyright 2024 Analog Devices Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LTC2664_CMD_WRITE_N(n) (0x00 + (n)) +#define LTC2664_CMD_UPDATE_N(n) (0x10 + (n)) +#define LTC2664_CMD_WRITE_N_UPDATE_ALL 0x20 +#define LTC2664_CMD_WRITE_N_UPDATE_N(n) (0x30 + (n)) +#define LTC2664_CMD_POWER_DOWN_N(n) (0x40 + (n)) +#define LTC2664_CMD_POWER_DOWN_ALL 0x50 +#define LTC2664_CMD_SPAN_N(n) (0x60 + (n)) +#define LTC2664_CMD_CONFIG 0x70 +#define LTC2664_CMD_MUX 0xB0 +#define LTC2664_CMD_TOGGLE_SEL 0xC0 +#define LTC2664_CMD_GLOBAL_TOGGLE 0xD0 +#define LTC2664_CMD_NO_OPERATION 0xF0 +#define LTC2664_REF_DISABLE 0x0001 +#define LTC2664_MSPAN_SOFTSPAN 7 + +#define LTC2672_MAX_CHANNEL 5 +#define LTC2672_MAX_SPAN 7 +#define LTC2672_SCALE_MULTIPLIER(n) (50 * BIT(n)) + +enum { + LTC2664_SPAN_RANGE_0V_5V, + LTC2664_SPAN_RANGE_0V_10V, + LTC2664_SPAN_RANGE_M5V_5V, + LTC2664_SPAN_RANGE_M10V_10V, + LTC2664_SPAN_RANGE_M2V5_2V5, +}; + +enum { + LTC2664_INPUT_A, + LTC2664_INPUT_B, + LTC2664_INPUT_B_AVAIL, + LTC2664_POWERDOWN, + LTC2664_POWERDOWN_MODE, + LTC2664_TOGGLE_EN, + LTC2664_GLOBAL_TOGGLE, +}; + +static const u16 ltc2664_mspan_lut[8][2] = { + { LTC2664_SPAN_RANGE_M10V_10V, 32768 }, /* MPS2=0, MPS1=0, MSP0=0 (0)*/ + { LTC2664_SPAN_RANGE_M5V_5V, 32768 }, /* MPS2=0, MPS1=0, MSP0=1 (1)*/ + { LTC2664_SPAN_RANGE_M2V5_2V5, 32768 }, /* MPS2=0, MPS1=1, MSP0=0 (2)*/ + { LTC2664_SPAN_RANGE_0V_10V, 0 }, /* MPS2=0, MPS1=1, MSP0=1 (3)*/ + { LTC2664_SPAN_RANGE_0V_10V, 32768 }, /* MPS2=1, MPS1=0, MSP0=0 (4)*/ + { LTC2664_SPAN_RANGE_0V_5V, 0 }, /* MPS2=1, MPS1=0, MSP0=1 (5)*/ + { LTC2664_SPAN_RANGE_0V_5V, 32768 }, /* MPS2=1, MPS1=1, MSP0=0 (6)*/ + { LTC2664_SPAN_RANGE_0V_5V, 0 } /* MPS2=1, MPS1=1, MSP0=1 (7)*/ +}; + +struct ltc2664_state; + +struct ltc2664_chip_info { + const char *name; + int (*scale_get)(const struct ltc2664_state *st, int c); + int (*offset_get)(const struct ltc2664_state *st, int c); + int measurement_type; + unsigned int num_channels; + const int (*span_helper)[2]; + unsigned int num_span; + unsigned int internal_vref_mv; + bool manual_span_support; + bool rfsadj_support; +}; + +struct ltc2664_chan { + /* indicates if the channel should be toggled */ + bool toggle_chan; + /* indicates if the channel is in powered down state */ + bool powerdown; + /* span code of the channel */ + u8 span; + /* raw data of the current state of the chip registers (A/B) */ + u16 raw[2]; +}; + +struct ltc2664_state { + struct spi_device *spi; + struct regmap *regmap; + struct ltc2664_chan channels[LTC2672_MAX_CHANNEL]; + /* lock to protect against multiple access to the device and shared data */ + struct mutex lock; + const struct ltc2664_chip_info *chip_info; + struct iio_chan_spec *iio_channels; + int vref_mv; + u32 rfsadj_ohms; + u32 toggle_sel; + bool global_toggle; +}; + +static const int ltc2664_span_helper[][2] = { + { 0, 5000 }, + { 0, 10000 }, + { -5000, 5000 }, + { -10000, 10000 }, + { -2500, 2500 }, +}; + +static const int ltc2672_span_helper[][2] = { + { 0, 0 }, + { 0, 3125 }, + { 0, 6250 }, + { 0, 12500 }, + { 0, 25000 }, + { 0, 50000 }, + { 0, 100000 }, + { 0, 200000 }, + { 0, 300000 }, +}; + +static int ltc2664_scale_get(const struct ltc2664_state *st, int c) +{ + const struct ltc2664_chan *chan = &st->channels[c]; + const int (*span_helper)[2] = st->chip_info->span_helper; + int span, fs; + + span = chan->span; + if (span < 0) + return span; + + fs = span_helper[span][1] - span_helper[span][0]; + + return fs * st->vref_mv / 2500; +} + +static int ltc2672_scale_get(const struct ltc2664_state *st, int c) +{ + const struct ltc2664_chan *chan = &st->channels[c]; + int span, fs; + + span = chan->span - 1; + if (span < 0) + return span; + + fs = 1000 * st->vref_mv; + + if (span == LTC2672_MAX_SPAN) + return mul_u64_u32_div(4800, fs, st->rfsadj_ohms); + + return mul_u64_u32_div(LTC2672_SCALE_MULTIPLIER(span), fs, st->rfsadj_ohms); +} + +static int ltc2664_offset_get(const struct ltc2664_state *st, int c) +{ + const struct ltc2664_chan *chan = &st->channels[c]; + int span; + + span = chan->span; + if (span < 0) + return span; + + if (st->chip_info->span_helper[span][0] < 0) + return -32768; + + return 0; +} + +static int ltc2664_dac_code_write(struct ltc2664_state *st, u32 chan, u32 input, + u16 code) +{ + struct ltc2664_chan *c = &st->channels[chan]; + int ret, reg; + + guard(mutex)(&st->lock); + /* select the correct input register to write to */ + if (c->toggle_chan) { + ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL, + input << chan); + if (ret) + return ret; + } + /* + * If in toggle mode the dac should be updated by an + * external signal (or sw toggle) and not here. + */ + if (st->toggle_sel & BIT(chan)) + reg = LTC2664_CMD_WRITE_N(chan); + else + reg = LTC2664_CMD_WRITE_N_UPDATE_N(chan); + + ret = regmap_write(st->regmap, reg, code); + if (ret) + return ret; + + c->raw[input] = code; + + if (c->toggle_chan) { + ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL, + st->toggle_sel); + if (ret) + return ret; + } + + return 0; +} + +static void ltc2664_dac_code_read(struct ltc2664_state *st, u32 chan, u32 input, + u32 *code) +{ + guard(mutex)(&st->lock); + *code = st->channels[chan].raw[input]; +} + +static const int ltc2664_raw_range[] = { 0, 1, U16_MAX }; + +static int ltc2664_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long info) +{ + switch (info) { + case IIO_CHAN_INFO_RAW: + *vals = ltc2664_raw_range; + *type = IIO_VAL_INT; + + return IIO_AVAIL_RANGE; + default: + return -EINVAL; + } +} + +static int ltc2664_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long info) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + ltc2664_dac_code_read(st, chan->channel, LTC2664_INPUT_A, val); + + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: + *val = st->chip_info->offset_get(st, chan->channel); + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = st->chip_info->scale_get(st, chan->channel); + + *val2 = 16; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } +} + +static int ltc2664_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long info) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_CHAN_INFO_RAW: + if (val > U16_MAX || val < 0) + return -EINVAL; + + return ltc2664_dac_code_write(st, chan->channel, + LTC2664_INPUT_A, val); + default: + return -EINVAL; + } +} + +static ssize_t ltc2664_reg_bool_get(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + u32 val; + + guard(mutex)(&st->lock); + switch (private) { + case LTC2664_POWERDOWN: + val = st->channels[chan->channel].powerdown; + + return sysfs_emit(buf, "%u\n", val); + case LTC2664_POWERDOWN_MODE: + return sysfs_emit(buf, "42kohm_to_gnd\n"); + case LTC2664_TOGGLE_EN: + val = !!(st->toggle_sel & BIT(chan->channel)); + + return sysfs_emit(buf, "%u\n", val); + case LTC2664_GLOBAL_TOGGLE: + val = st->global_toggle; + + return sysfs_emit(buf, "%u\n", val); + default: + return -EINVAL; + } +} + +static ssize_t ltc2664_reg_bool_set(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + int ret; + bool en; + + ret = kstrtobool(buf, &en); + if (ret) + return ret; + + guard(mutex)(&st->lock); + switch (private) { + case LTC2664_POWERDOWN: + ret = regmap_write(st->regmap, + en ? LTC2664_CMD_POWER_DOWN_N(chan->channel) : + LTC2664_CMD_UPDATE_N(chan->channel), en); + if (ret) + return ret; + + st->channels[chan->channel].powerdown = en; + + return len; + case LTC2664_TOGGLE_EN: + if (en) + st->toggle_sel |= BIT(chan->channel); + else + st->toggle_sel &= ~BIT(chan->channel); + + ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL, + st->toggle_sel); + if (ret) + return ret; + + return len; + case LTC2664_GLOBAL_TOGGLE: + ret = regmap_write(st->regmap, LTC2664_CMD_GLOBAL_TOGGLE, en); + if (ret) + return ret; + + st->global_toggle = en; + + return len; + default: + return -EINVAL; + } +} + +static ssize_t ltc2664_dac_input_read(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + char *buf) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + u32 val; + + if (private == LTC2664_INPUT_B_AVAIL) + return sysfs_emit(buf, "[%u %u %u]\n", ltc2664_raw_range[0], + ltc2664_raw_range[1], + ltc2664_raw_range[2] / 4); + + ltc2664_dac_code_read(st, chan->channel, private, &val); + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t ltc2664_dac_input_write(struct iio_dev *indio_dev, + uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + int ret; + u16 val; + + if (private == LTC2664_INPUT_B_AVAIL) + return -EINVAL; + + ret = kstrtou16(buf, 10, &val); + if (ret) + return ret; + + ret = ltc2664_dac_code_write(st, chan->channel, private, val); + if (ret) + return ret; + + return len; +} + +static int ltc2664_reg_access(struct iio_dev *indio_dev, + unsigned int reg, + unsigned int writeval, + unsigned int *readval) +{ + struct ltc2664_state *st = iio_priv(indio_dev); + + if (readval) + return -EOPNOTSUPP; + + return regmap_write(st->regmap, reg, writeval); +} + +#define LTC2664_CHAN_EXT_INFO(_name, _what, _shared, _read, _write) { \ + .name = _name, \ + .read = (_read), \ + .write = (_write), \ + .private = (_what), \ + .shared = (_shared), \ +} + +/* + * For toggle mode we only expose the symbol attr (sw_toggle) in case a TGPx is + * not provided in dts. + */ +static const struct iio_chan_spec_ext_info ltc2664_toggle_sym_ext_info[] = { + LTC2664_CHAN_EXT_INFO("raw0", LTC2664_INPUT_A, IIO_SEPARATE, + ltc2664_dac_input_read, ltc2664_dac_input_write), + LTC2664_CHAN_EXT_INFO("raw1", LTC2664_INPUT_B, IIO_SEPARATE, + ltc2664_dac_input_read, ltc2664_dac_input_write), + LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE, + ltc2664_reg_bool_get, ltc2664_reg_bool_set), + LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE, + IIO_SEPARATE, ltc2664_reg_bool_get, NULL), + LTC2664_CHAN_EXT_INFO("symbol", LTC2664_GLOBAL_TOGGLE, IIO_SEPARATE, + ltc2664_reg_bool_get, ltc2664_reg_bool_set), + LTC2664_CHAN_EXT_INFO("toggle_en", LTC2664_TOGGLE_EN, + IIO_SEPARATE, ltc2664_reg_bool_get, + ltc2664_reg_bool_set), + { } +}; + +static const struct iio_chan_spec_ext_info ltc2664_ext_info[] = { + LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE, + ltc2664_reg_bool_get, ltc2664_reg_bool_set), + LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE, + IIO_SEPARATE, ltc2664_reg_bool_get, NULL), + { } +}; + +static const struct iio_chan_spec ltc2664_channel_template = { + .indexed = 1, + .output = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_RAW), + .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW), + .ext_info = ltc2664_ext_info, +}; + +static const struct ltc2664_chip_info ltc2664_chip = { + .name = "ltc2664", + .scale_get = ltc2664_scale_get, + .offset_get = ltc2664_offset_get, + .measurement_type = IIO_VOLTAGE, + .num_channels = 4, + .span_helper = ltc2664_span_helper, + .num_span = ARRAY_SIZE(ltc2664_span_helper), + .internal_vref_mv = 2500, + .manual_span_support = true, + .rfsadj_support = false, +}; + +static const struct ltc2664_chip_info ltc2672_chip = { + .name = "ltc2672", + .scale_get = ltc2672_scale_get, + .offset_get = ltc2664_offset_get, + .measurement_type = IIO_CURRENT, + .num_channels = 5, + .span_helper = ltc2672_span_helper, + .num_span = ARRAY_SIZE(ltc2672_span_helper), + .internal_vref_mv = 1250, + .manual_span_support = false, + .rfsadj_support = true, +}; + +static int ltc2664_set_span(const struct ltc2664_state *st, int min, int max, + int chan) +{ + const struct ltc2664_chip_info *chip_info = st->chip_info; + const int (*span_helper)[2] = chip_info->span_helper; + int span, ret; + + for (span = 0; span < chip_info->num_span; span++) { + if (min == span_helper[span][0] && max == span_helper[span][1]) + break; + } + + if (span == chip_info->num_span) + return -EINVAL; + + ret = regmap_write(st->regmap, LTC2664_CMD_SPAN_N(chan), span); + if (ret) + return ret; + + return span; +} + +static int ltc2664_channel_config(struct ltc2664_state *st) +{ + const struct ltc2664_chip_info *chip_info = st->chip_info; + struct device *dev = &st->spi->dev; + u32 reg, tmp[2], mspan; + int ret, span = 0; + + mspan = LTC2664_MSPAN_SOFTSPAN; + ret = device_property_read_u32(dev, "adi,manual-span-operation-config", + &mspan); + if (!ret) { + if (!chip_info->manual_span_support) + return dev_err_probe(dev, -EINVAL, + "adi,manual-span-operation-config not supported\n"); + + if (mspan >= ARRAY_SIZE(ltc2664_mspan_lut)) + return dev_err_probe(dev, -EINVAL, + "adi,manual-span-operation-config not in range\n"); + } + + st->rfsadj_ohms = 20000; + ret = device_property_read_u32(dev, "adi,rfsadj-ohms", &st->rfsadj_ohms); + if (!ret) { + if (!chip_info->rfsadj_support) + return dev_err_probe(dev, -EINVAL, + "adi,rfsadj-ohms not supported\n"); + + if (st->rfsadj_ohms < 19000 || st->rfsadj_ohms > 41000) + return dev_err_probe(dev, -EINVAL, + "adi,rfsadj-ohms not in range\n"); + } + + device_for_each_child_node_scoped(dev, child) { + struct ltc2664_chan *chan; + + ret = fwnode_property_read_u32(child, "reg", ®); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get reg property\n"); + + if (reg >= chip_info->num_channels) + return dev_err_probe(dev, -EINVAL, + "reg bigger than: %d\n", + chip_info->num_channels); + + chan = &st->channels[reg]; + + if (fwnode_property_read_bool(child, "adi,toggle-mode")) { + chan->toggle_chan = true; + /* assume sw toggle ABI */ + st->iio_channels[reg].ext_info = ltc2664_toggle_sym_ext_info; + + /* + * Clear IIO_CHAN_INFO_RAW bit as toggle channels expose + * out_voltage/current_raw{0|1} files. + */ + __clear_bit(IIO_CHAN_INFO_RAW, + &st->iio_channels[reg].info_mask_separate); + } + + chan->raw[0] = ltc2664_mspan_lut[mspan][1]; + chan->raw[1] = ltc2664_mspan_lut[mspan][1]; + + chan->span = ltc2664_mspan_lut[mspan][0]; + + ret = fwnode_property_read_u32_array(child, "output-range-microvolt", + tmp, ARRAY_SIZE(tmp)); + if (!ret && mspan == LTC2664_MSPAN_SOFTSPAN) { + chan->span = ltc2664_set_span(st, tmp[0] / 1000, + tmp[1] / 1000, reg); + if (span < 0) + return dev_err_probe(dev, span, + "Failed to set span\n"); + } + + ret = fwnode_property_read_u32_array(child, "output-range-microamp", + tmp, ARRAY_SIZE(tmp)); + if (!ret) { + chan->span = ltc2664_set_span(st, 0, tmp[1] / 1000, reg); + if (span < 0) + return dev_err_probe(dev, span, + "Failed to set span\n"); + } + } + + return 0; +} + +static int ltc2664_setup(struct ltc2664_state *st) +{ + const struct ltc2664_chip_info *chip_info = st->chip_info; + struct gpio_desc *gpio; + int ret, i; + + /* If we have a clr/reset pin, use that to reset the chip. */ + gpio = devm_gpiod_get_optional(&st->spi->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return dev_err_probe(&st->spi->dev, PTR_ERR(gpio), + "Failed to get reset gpio"); + if (gpio) { + fsleep(1000); + gpiod_set_value_cansleep(gpio, 0); + } + + /* + * Duplicate the default channel configuration as it can change during + * @ltc2664_channel_config() + */ + st->iio_channels = devm_kcalloc(&st->spi->dev, + chip_info->num_channels, + sizeof(struct iio_chan_spec), + GFP_KERNEL); + if (!st->iio_channels) + return -ENOMEM; + + for (i = 0; i < chip_info->num_channels; i++) { + st->iio_channels[i] = ltc2664_channel_template; + st->iio_channels[i].type = chip_info->measurement_type; + st->iio_channels[i].channel = i; + } + + ret = ltc2664_channel_config(st); + if (ret) + return ret; + + return regmap_set_bits(st->regmap, LTC2664_CMD_CONFIG, LTC2664_REF_DISABLE); +} + +static const struct regmap_config ltc2664_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = LTC2664_CMD_NO_OPERATION, +}; + +static const struct iio_info ltc2664_info = { + .write_raw = ltc2664_write_raw, + .read_raw = ltc2664_read_raw, + .read_avail = ltc2664_read_avail, + .debugfs_reg_access = ltc2664_reg_access, +}; + +static int ltc2664_probe(struct spi_device *spi) +{ + static const char * const regulators[] = { "vcc", "iovcc", "v-neg" }; + const struct ltc2664_chip_info *chip_info; + struct device *dev = &spi->dev; + struct iio_dev *indio_dev; + struct ltc2664_state *st; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + st->spi = spi; + + chip_info = spi_get_device_match_data(spi); + if (!chip_info) + return -ENODEV; + + st->chip_info = chip_info; + + mutex_init(&st->lock); + + st->regmap = devm_regmap_init_spi(spi, <c2664_regmap_config); + if (IS_ERR(st->regmap)) + return dev_err_probe(dev, PTR_ERR(st->regmap), + "Failed to init regmap"); + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators), + regulators); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable regulators\n"); + + ret = devm_regulator_get_enable_read_voltage(dev, "ref"); + if (ret < 0 && ret != -ENODEV) + return ret; + + st->vref_mv = ret > 0 ? ret / 1000 : chip_info->internal_vref_mv; + + ret = ltc2664_setup(st); + if (ret) + return ret; + + indio_dev->name = chip_info->name; + indio_dev->info = <c2664_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = st->iio_channels; + indio_dev->num_channels = chip_info->num_channels; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct spi_device_id ltc2664_id[] = { + { "ltc2664", (kernel_ulong_t)<c2664_chip }, + { "ltc2672", (kernel_ulong_t)<c2672_chip }, + { } +}; +MODULE_DEVICE_TABLE(spi, ltc2664_id); + +static const struct of_device_id ltc2664_of_id[] = { + { .compatible = "adi,ltc2664", .data = <c2664_chip }, + { .compatible = "adi,ltc2672", .data = <c2672_chip }, + { } +}; +MODULE_DEVICE_TABLE(of, ltc2664_of_id); + +static struct spi_driver ltc2664_driver = { + .driver = { + .name = "ltc2664", + .of_match_table = ltc2664_of_id, + }, + .probe = ltc2664_probe, + .id_table = ltc2664_id, +}; +module_spi_driver(ltc2664_driver); + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_AUTHOR("Kim Seer Paller "); +MODULE_DESCRIPTION("Analog Devices LTC2664 and LTC2672 DAC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c index af50d2a958985c..376dca163c91be 100644 --- a/drivers/iio/dac/ltc2688.c +++ b/drivers/iio/dac/ltc2688.c @@ -918,7 +918,7 @@ static bool ltc2688_reg_writable(struct device *dev, unsigned int reg) return false; } -static struct regmap_bus ltc2688_regmap_bus = { +static const struct regmap_bus ltc2688_regmap_bus = { .read = ltc2688_spi_read, .write = ltc2688_spi_write, .read_flag_mask = LTC2688_READ_OPERATION, diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c index c449ca949465d3..192175dc6419ea 100644 --- a/drivers/iio/dac/mcp4728.c +++ b/drivers/iio/dac/mcp4728.c @@ -84,7 +84,6 @@ enum mcp4728_scale { struct mcp4728_data { struct i2c_client *client; - struct regulator *vdd_reg; bool powerdown; int scales_avail[MCP4728_N_SCALES * 2]; struct mcp4728_channel_data chdata[MCP4728_N_CHANNELS]; @@ -415,15 +414,9 @@ static void mcp4728_init_scale_avail(enum mcp4728_scale scale, int vref_mv, data->scales_avail[scale * 2 + 1] = value_micro; } -static int mcp4728_init_scales_avail(struct mcp4728_data *data) +static int mcp4728_init_scales_avail(struct mcp4728_data *data, int vdd_mv) { - int ret; - - ret = regulator_get_voltage(data->vdd_reg); - if (ret < 0) - return ret; - - mcp4728_init_scale_avail(MCP4728_SCALE_VDD, ret / 1000, data); + mcp4728_init_scale_avail(MCP4728_SCALE_VDD, vdd_mv, data); mcp4728_init_scale_avail(MCP4728_SCALE_VINT_NO_GAIN, 2048, data); mcp4728_init_scale_avail(MCP4728_SCALE_VINT_GAIN_X2, 4096, data); @@ -530,17 +523,12 @@ static int mcp4728_init_channels_data(struct mcp4728_data *data) return 0; } -static void mcp4728_reg_disable(void *reg) -{ - regulator_disable(reg); -} - static int mcp4728_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct mcp4728_data *data; struct iio_dev *indio_dev; - int err; + int ret, vdd_mv; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) @@ -550,18 +538,11 @@ static int mcp4728_probe(struct i2c_client *client) i2c_set_clientdata(client, indio_dev); data->client = client; - data->vdd_reg = devm_regulator_get(&client->dev, "vdd"); - if (IS_ERR(data->vdd_reg)) - return PTR_ERR(data->vdd_reg); - - err = regulator_enable(data->vdd_reg); - if (err) - return err; + ret = devm_regulator_get_enable_read_voltage(&client->dev, "vdd"); + if (ret < 0) + return ret; - err = devm_add_action_or_reset(&client->dev, mcp4728_reg_disable, - data->vdd_reg); - if (err) - return err; + vdd_mv = ret / 1000; /* * MCP4728 has internal EEPROM that save each channel boot @@ -569,15 +550,15 @@ static int mcp4728_probe(struct i2c_client *client) * driver at kernel boot. mcp4728_init_channels_data() reads back DAC * settings and stores them in data structure. */ - err = mcp4728_init_channels_data(data); - if (err) { - return dev_err_probe(&client->dev, err, + ret = mcp4728_init_channels_data(data); + if (ret) { + return dev_err_probe(&client->dev, ret, "failed to read mcp4728 current configuration\n"); } - err = mcp4728_init_scales_avail(data); - if (err) { - return dev_err_probe(&client->dev, err, + ret = mcp4728_init_scales_avail(data, vdd_mv); + if (ret) { + return dev_err_probe(&client->dev, ret, "failed to init scales\n"); } diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c index 782e8f6b77829b..c1a59bbbba3cc1 100644 --- a/drivers/iio/dac/mcp4821.c +++ b/drivers/iio/dac/mcp4821.c @@ -23,7 +23,7 @@ #include #include -#include +#include #define MCP4821_ACTIVE_MODE BIT(12) #define MCP4802_SECOND_CHAN BIT(15) diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c index da4327624d45d7..26aa9905981340 100644 --- a/drivers/iio/dac/mcp4922.c +++ b/drivers/iio/dac/mcp4922.c @@ -30,7 +30,6 @@ struct mcp4922_state { struct spi_device *spi; unsigned int value[MCP4922_NUM_CHANNELS]; unsigned int vref_mv; - struct regulator *vref_reg; u8 mosi[2] __aligned(IIO_DMA_MINALIGN); }; @@ -132,27 +131,13 @@ static int mcp4922_probe(struct spi_device *spi) state = iio_priv(indio_dev); state->spi = spi; - state->vref_reg = devm_regulator_get(&spi->dev, "vref"); - if (IS_ERR(state->vref_reg)) - return dev_err_probe(&spi->dev, PTR_ERR(state->vref_reg), - "Vref regulator not specified\n"); - - ret = regulator_enable(state->vref_reg); - if (ret) { - dev_err(&spi->dev, "Failed to enable vref regulator: %d\n", - ret); - return ret; - } - ret = regulator_get_voltage(state->vref_reg); - if (ret < 0) { - dev_err(&spi->dev, "Failed to read vref regulator: %d\n", - ret); - goto error_disable_reg; - } + ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref"); + if (ret < 0) + return dev_err_probe(&spi->dev, ret, "Failed to get vref voltage\n"); + state->vref_mv = ret / 1000; - spi_set_drvdata(spi, indio_dev); id = spi_get_device_id(spi); indio_dev->info = &mcp4922_info; indio_dev->modes = INDIO_DIRECT_MODE; @@ -163,30 +148,13 @@ static int mcp4922_probe(struct spi_device *spi) indio_dev->num_channels = MCP4922_NUM_CHANNELS; indio_dev->name = id->name; - ret = iio_device_register(indio_dev); - if (ret) { - dev_err(&spi->dev, "Failed to register iio device: %d\n", - ret); - goto error_disable_reg; - } + ret = devm_iio_device_register(&spi->dev, indio_dev); + if (ret) + return dev_err_probe(&spi->dev, ret, "Failed to register iio device\n"); return 0; - -error_disable_reg: - regulator_disable(state->vref_reg); - - return ret; } -static void mcp4922_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - struct mcp4922_state *state; - - iio_device_unregister(indio_dev); - state = iio_priv(indio_dev); - regulator_disable(state->vref_reg); -} static const struct spi_device_id mcp4922_id[] = { {"mcp4902", ID_MCP4902}, @@ -202,7 +170,6 @@ static struct spi_driver mcp4922_driver = { .name = "mcp4922", }, .probe = mcp4922_probe, - .remove = mcp4922_remove, .id_table = mcp4922_id, }; module_spi_driver(mcp4922_driver); diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c index 7f89d2a52f49c4..6f4aa4794a0cab 100644 --- a/drivers/iio/dac/ti-dac7311.c +++ b/drivers/iio/dac/ti-dac7311.c @@ -249,7 +249,9 @@ static int ti_dac_probe(struct spi_device *spi) spi->mode = SPI_MODE_1; spi->bits_per_word = 16; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return dev_err_probe(dev, ret, "spi_setup failed\n"); indio_dev->info = &ti_dac_info; indio_dev->name = spi_get_device_id(spi)->name; diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index 9b2f99449a8292..4ca3f1aaff9996 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -68,7 +68,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) * Here let's pretend we have random access. And the values are in the * constant table fakedata. */ - for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength) + iio_for_each_active_channel(indio_dev, j) data[i++] = fakedata[j]; iio_push_to_buffers_with_timestamp(indio_dev, data, diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c index 9284c13f1abb34..45ceeb828d6b68 100644 --- a/drivers/iio/frequency/adf4377.c +++ b/drivers/iio/frequency/adf4377.c @@ -20,7 +20,7 @@ #include #include -#include +#include /* ADF4377 REG0000 Map */ #define ADF4377_0000_SOFT_RESET_R_MSK BIT(7) @@ -400,7 +400,13 @@ enum muxout_select_mode { ADF4377_MUXOUT_HIGH = 0x8, }; +struct adf4377_chip_info { + const char *name; + bool has_gpio_enclk2; +}; + struct adf4377_state { + const struct adf4377_chip_info *chip_info; struct spi_device *spi; struct regmap *regmap; struct clk *clkin; @@ -889,11 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st) return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk1), "failed to get the CE GPIO\n"); - st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable", - GPIOD_OUT_LOW); - if (IS_ERR(st->gpio_enclk2)) - return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2), - "failed to get the CE GPIO\n"); + if (st->chip_info->has_gpio_enclk2) { + st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable", + GPIOD_OUT_LOW); + if (IS_ERR(st->gpio_enclk2)) + return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2), + "failed to get the CE GPIO\n"); + } ret = device_property_match_property_string(&spi->dev, "adi,muxout-select", adf4377_muxout_modes, @@ -921,6 +929,16 @@ static int adf4377_freq_change(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } +static const struct adf4377_chip_info adf4377_chip_info = { + .name = "adf4377", + .has_gpio_enclk2 = true, +}; + +static const struct adf4377_chip_info adf4378_chip_info = { + .name = "adf4378", + .has_gpio_enclk2 = false, +}; + static int adf4377_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -945,6 +963,7 @@ static int adf4377_probe(struct spi_device *spi) st->regmap = regmap; st->spi = spi; + st->chip_info = spi_get_device_match_data(spi); mutex_init(&st->lock); ret = adf4377_properties_parse(st); @@ -964,13 +983,15 @@ static int adf4377_probe(struct spi_device *spi) } static const struct spi_device_id adf4377_id[] = { - { "adf4377", 0 }, + { "adf4377", (kernel_ulong_t)&adf4377_chip_info }, + { "adf4378", (kernel_ulong_t)&adf4378_chip_info }, {} }; MODULE_DEVICE_TABLE(spi, adf4377_id); static const struct of_device_id adf4377_of_match[] = { - { .compatible = "adi,adf4377" }, + { .compatible = "adi,adf4377", .data = &adf4377_chip_info }, + { .compatible = "adi,adf4378", .data = &adf4378_chip_info }, {} }; MODULE_DEVICE_TABLE(of, adf4377_of_match); diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c index c0cd5d9844fe9d..8ef583680ad0b1 100644 --- a/drivers/iio/frequency/admv1013.c +++ b/drivers/iio/frequency/admv1013.c @@ -18,7 +18,7 @@ #include #include -#include +#include /* ADMV1013 Register Map */ #define ADMV1013_REG_SPI_CONTROL 0x00 diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c index b46b73b89eb715..986b87a72577e5 100644 --- a/drivers/iio/frequency/admv1014.c +++ b/drivers/iio/frequency/admv1014.c @@ -19,7 +19,7 @@ #include #include -#include +#include /* ADMV1014 Register Map */ #define ADMV1014_REG_SPI_CONTROL 0x00 diff --git a/drivers/iio/frequency/admv4420.c b/drivers/iio/frequency/admv4420.c index 863ba8e98c95e5..3ae462b4f5c9f9 100644 --- a/drivers/iio/frequency/admv4420.c +++ b/drivers/iio/frequency/admv4420.c @@ -13,7 +13,7 @@ #include #include -#include +#include /* ADMV4420 Register Map */ #define ADMV4420_SPI_CONFIG_1 0x00 diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c index 3f46032c927527..57ee908fc747e1 100644 --- a/drivers/iio/frequency/adrf6780.c +++ b/drivers/iio/frequency/adrf6780.c @@ -16,7 +16,7 @@ #include #include -#include +#include /* ADRF6780 Register Map */ #define ADRF6780_REG_CONTROL 0x00 diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c index 33cde9e6fca59c..2535e3c9403731 100644 --- a/drivers/iio/gyro/adis16130.c +++ b/drivers/iio/gyro/adis16130.c @@ -12,7 +12,7 @@ #include -#include +#include #define ADIS16130_CON 0x0 #define ADIS16130_CON_RD (1 << 6) diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 52326dc521ac14..13e1dd4dd62cad 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include "afe440x.h" @@ -321,8 +321,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = spi_write_then_read(afe->spi, &afe4403_channel_values[bit], 1, rx, sizeof(rx)); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 7f69baa1ed53b3..d49e1572a439f8 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -333,8 +333,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], &afe->buffer[i++]); if (ret) diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 07a343e35a8186..1d074eb6a8c5e9 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -293,7 +293,7 @@ static irqreturn_t max30102_interrupt_handler(int irq, void *private) struct iio_dev *indio_dev = private; struct max30102_data *data = iio_priv(indio_dev); unsigned int measurements = bitmap_weight(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); int ret, cnt = 0; mutex_lock(&data->lock); diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index b15b7a3b66d5a4..54f11f000b6ffc 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig @@ -25,6 +25,17 @@ config DHT11 Other sensors should work as well as long as they speak the same protocol. +config ENS210 + tristate "ENS210 temperature and humidity sensor" + depends on I2C + select CRC7 + help + Say yes here to get support for the ScioSense ENS210 family of + humidity and temperature sensors. + + This driver can also be built as a module. If so, the module will be + called ens210. + config HDC100X tristate "TI HDC100x relative humidity and temperature sensor" depends on I2C diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile index 5fbeef299f61bf..34b3dc749466e3 100644 --- a/drivers/iio/humidity/Makefile +++ b/drivers/iio/humidity/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_AM2315) += am2315.o obj-$(CONFIG_DHT11) += dht11.o +obj-$(CONFIG_ENS210) += ens210.o obj-$(CONFIG_HDC100X) += hdc100x.o obj-$(CONFIG_HDC2010) += hdc2010.o obj-$(CONFIG_HDC3020) += hdc3020.o diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c index a56474be5dd233..6b0aa3a3f0252b 100644 --- a/drivers/iio/humidity/am2315.c +++ b/drivers/iio/humidity/am2315.c @@ -174,8 +174,7 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p) data->scan.chans[1] = sensor_data.temp_data; } else { i = 0; - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { data->scan.chans[i] = (bit ? sensor_data.temp_data : sensor_data.hum_data); i++; diff --git a/drivers/iio/humidity/ens210.c b/drivers/iio/humidity/ens210.c new file mode 100644 index 00000000000000..77418d97f30de0 --- /dev/null +++ b/drivers/iio/humidity/ens210.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ens210.c - Support for ScioSense ens210 temperature & humidity sensor family + * + * (7-bit I2C slave address 0x43 ENS210) + * (7-bit I2C slave address 0x43 ENS210A) + * (7-bit I2C slave address 0x44 ENS211) + * (7-bit I2C slave address 0x45 ENS212) + * (7-bit I2C slave address 0x46 ENS213A) + * (7-bit I2C slave address 0x47 ENS215) + * + * Datasheet: + * https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf + * https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* register definitions */ +#define ENS210_REG_PART_ID 0x00 +#define ENS210_REG_DIE_REV 0x02 +#define ENS210_REG_UID 0x04 +#define ENS210_REG_SYS_CTRL 0x10 +#define ENS210_REG_SYS_STAT 0x11 +#define ENS210_REG_SENS_RUN 0x21 +#define ENS210_REG_SENS_START 0x22 +#define ENS210_REG_SENS_STOP 0x23 +#define ENS210_REG_SENS_STAT 0x24 +#define ENS210_REG_T_VAL 0x30 +#define ENS210_REG_H_VAL 0x33 + +/* value definitions */ +#define ENS210_SENS_START_T_START BIT(0) +#define ENS210_SENS_START_H_START BIT(1) + +#define ENS210_SENS_STAT_T_ACTIVE BIT(0) +#define ENS210_SENS_STAT_H_ACTIVE BIT(1) + +#define ENS210_SYS_CTRL_LOW_POWER_ENABLE BIT(0) +#define ENS210_SYS_CTRL_SYS_RESET BIT(7) + +#define ENS210_SYS_STAT_SYS_ACTIVE BIT(0) + +enum ens210_partnumber { + ENS210 = 0x0210, + ENS210A = 0xa210, + ENS211 = 0x0211, + ENS212 = 0x0212, + ENS213A = 0xa213, + ENS215 = 0x0215, +}; + +/** + * struct ens210_chip_info - Humidity/Temperature chip specific information + * @name: name of device + * @part_id: chip identifier + * @conv_time_msec: time for conversion calculation in m/s + */ +struct ens210_chip_info { + const char *name; + enum ens210_partnumber part_id; + unsigned int conv_time_msec; +}; + +/** + * struct ens210_data - Humidity/Temperature sensor device structure + * @client: i2c client + * @chip_info: chip specific information + * @lock: lock protecting against simultaneous callers of get_measurement + * since multiple uninterrupted transactions are required + */ +struct ens210_data { + struct i2c_client *client; + const struct ens210_chip_info *chip_info; + struct mutex lock; +}; + +/* calculate 17-bit crc7 */ +static u8 ens210_crc7(u32 val) +{ + unsigned int val_be = (val & 0x1ffff) >> 0x8; + + return crc7_be(0xde, (u8 *)&val_be, 3) >> 1; +} + +static int ens210_get_measurement(struct iio_dev *indio_dev, bool temp, int *val) +{ + struct ens210_data *data = iio_priv(indio_dev); + struct device *dev = &data->client->dev; + u32 regval; + u8 regval_le[3]; + int ret; + + /* assert read */ + ret = i2c_smbus_write_byte_data(data->client, ENS210_REG_SENS_START, + temp ? ENS210_SENS_START_T_START : + ENS210_SENS_START_H_START); + if (ret) + return ret; + + /* wait for conversion to be ready */ + msleep(data->chip_info->conv_time_msec); + + ret = i2c_smbus_read_byte_data(data->client, ENS210_REG_SENS_STAT); + if (ret < 0) + return ret; + + /* perform read */ + ret = i2c_smbus_read_i2c_block_data( + data->client, temp ? ENS210_REG_T_VAL : ENS210_REG_H_VAL, 3, + regval_le); + if (ret < 0) { + dev_err(dev, "failed to read register"); + return -EIO; + } + if (ret != 3) { + dev_err(dev, "expected 3 bytes, received %d\n", ret); + return -EIO; + } + + regval = get_unaligned_le24(regval_le); + if (ens210_crc7(regval) != ((regval >> 17) & 0x7f)) { + dev_err(dev, "invalid crc\n"); + return -EIO; + } + + if (!((regval >> 16) & 0x1)) { + dev_err(dev, "data is not valid"); + return -EIO; + } + + *val = regval & GENMASK(15, 0); + return IIO_VAL_INT; +} + +static int ens210_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *channel, int *val, + int *val2, long mask) +{ + struct ens210_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + scoped_guard(mutex, &data->lock) { + ret = ens210_get_measurement( + indio_dev, channel->type == IIO_TEMP, val); + if (ret) + return ret; + return IIO_VAL_INT; + } + return -EINVAL; /* compiler warning workaround */ + case IIO_CHAN_INFO_SCALE: + if (channel->type == IIO_TEMP) { + *val = 15; + *val2 = 625000; + } else { + *val = 1; + *val2 = 953125; + } + return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_OFFSET: + *val = -17481; + *val2 = 600000; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + +static const struct iio_chan_spec ens210_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + }, + { + .type = IIO_HUMIDITYRELATIVE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + } +}; + +static const struct iio_info ens210_info = { + .read_raw = ens210_read_raw, +}; + +static int ens210_probe(struct i2c_client *client) +{ + struct ens210_data *data; + struct iio_dev *indio_dev; + uint16_t part_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA | + I2C_FUNC_SMBUS_WRITE_BYTE | + I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { + return dev_err_probe(&client->dev, -EOPNOTSUPP, + "adapter does not support some i2c transactions\n"); + } + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + mutex_init(&data->lock); + data->chip_info = i2c_get_match_data(client); + + ret = devm_regulator_get_enable(&client->dev, "vdd"); + if (ret) + return ret; + + /* reset device */ + ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL, + ENS210_SYS_CTRL_SYS_RESET); + if (ret) + return ret; + + /* wait for device to become active */ + usleep_range(4000, 5000); + + /* disable low power mode */ + ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL, 0x00); + if (ret) + return ret; + + /* wait for device to finish */ + usleep_range(4000, 5000); + + /* get part_id */ + ret = i2c_smbus_read_word_data(client, ENS210_REG_PART_ID); + if (ret < 0) + return ret; + part_id = ret; + + if (part_id != data->chip_info->part_id) { + dev_info(&client->dev, + "Part ID does not match (0x%04x != 0x%04x)\n", part_id, + data->chip_info->part_id); + } + + /* reenable low power */ + ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL, + ENS210_SYS_CTRL_LOW_POWER_ENABLE); + if (ret) + return ret; + + indio_dev->name = data->chip_info->name; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ens210_channels; + indio_dev->num_channels = ARRAY_SIZE(ens210_channels); + indio_dev->info = &ens210_info; + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct ens210_chip_info ens210_chip_info_data = { + .name = "ens210", + .part_id = ENS210, + .conv_time_msec = 130, +}; + +static const struct ens210_chip_info ens210a_chip_info_data = { + .name = "ens210a", + .part_id = ENS210A, + .conv_time_msec = 130, +}; + +static const struct ens210_chip_info ens211_chip_info_data = { + .name = "ens211", + .part_id = ENS211, + .conv_time_msec = 32, +}; + +static const struct ens210_chip_info ens212_chip_info_data = { + .name = "ens212", + .part_id = ENS212, + .conv_time_msec = 32, +}; + +static const struct ens210_chip_info ens213a_chip_info_data = { + .name = "ens213a", + .part_id = ENS213A, + .conv_time_msec = 130, +}; + +static const struct ens210_chip_info ens215_chip_info_data = { + .name = "ens215", + .part_id = ENS215, + .conv_time_msec = 130, +}; + +static const struct of_device_id ens210_of_match[] = { + { .compatible = "sciosense,ens210", .data = &ens210_chip_info_data }, + { .compatible = "sciosense,ens210a", .data = &ens210a_chip_info_data }, + { .compatible = "sciosense,ens211", .data = &ens211_chip_info_data }, + { .compatible = "sciosense,ens212", .data = &ens212_chip_info_data }, + { .compatible = "sciosense,ens213a", .data = &ens213a_chip_info_data }, + { .compatible = "sciosense,ens215", .data = &ens215_chip_info_data }, + { } +}; +MODULE_DEVICE_TABLE(of, ens210_of_match); + +static const struct i2c_device_id ens210_id_table[] = { + { "ens210", (kernel_ulong_t)&ens210_chip_info_data }, + { "ens210a", (kernel_ulong_t)&ens210a_chip_info_data }, + { "ens211", (kernel_ulong_t)&ens211_chip_info_data }, + { "ens212", (kernel_ulong_t)&ens212_chip_info_data }, + { "ens213a", (kernel_ulong_t)&ens213a_chip_info_data }, + { "ens215", (kernel_ulong_t)&ens215_chip_info_data }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ens210_id_table); + +static struct i2c_driver ens210_driver = { + .probe = ens210_probe, + .id_table = ens210_id_table, + .driver = { + .name = "ens210", + .of_match_table = ens210_of_match, + }, +}; +module_i2c_driver(ens210_driver); + +MODULE_DESCRIPTION("ScioSense ENS210 temperature and humidity sensor driver"); +MODULE_AUTHOR("Joshua Felmeden "); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c index a82dcc3da4216f..ffb25596d3a8ba 100644 --- a/drivers/iio/humidity/hdc3020.c +++ b/drivers/iio/humidity/hdc3020.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index 876848b42f69bd..99410733c1ca74 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 0bfd6205f5f642..6484ab8aff5519 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -202,8 +202,6 @@ enum { ADIS16400_SCAN_TIMESTAMP, }; -#ifdef CONFIG_DEBUG_FS - static ssize_t adis16400_show_serial_number(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -273,11 +271,14 @@ static int adis16400_show_flash_count(void *arg, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops, adis16400_show_flash_count, NULL, "%lld\n"); -static int adis16400_debugfs_init(struct iio_dev *indio_dev) +static void adis16400_debugfs_init(struct iio_dev *indio_dev) { struct adis16400_state *st = iio_priv(indio_dev); struct dentry *d = iio_get_debugfs_dentry(indio_dev); + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER) debugfs_create_file_unsafe("serial_number", 0400, d, st, &adis16400_serial_number_fops); @@ -286,19 +287,8 @@ static int adis16400_debugfs_init(struct iio_dev *indio_dev) d, st, &adis16400_product_id_fops); debugfs_create_file_unsafe("flash_count", 0400, d, st, &adis16400_flash_count_fops); - - return 0; } -#else - -static int adis16400_debugfs_init(struct iio_dev *indio_dev) -{ - return 0; -} - -#endif - enum adis16400_chip_variant { ADIS16300, ADIS16334, diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c index 69facd72bd7d8a..eaa38dd6201fdb 100644 --- a/drivers/iio/imu/adis16460.c +++ b/drivers/iio/imu/adis16460.c @@ -69,8 +69,6 @@ struct adis16460 { struct adis adis; }; -#ifdef CONFIG_DEBUG_FS - static int adis16460_show_serial_number(void *arg, u64 *val) { struct adis16460 *adis16460 = arg; @@ -125,30 +123,22 @@ static int adis16460_show_flash_count(void *arg, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops, adis16460_show_flash_count, NULL, "%lld\n"); -static int adis16460_debugfs_init(struct iio_dev *indio_dev) +static void adis16460_debugfs_init(struct iio_dev *indio_dev) { struct adis16460 *adis16460 = iio_priv(indio_dev); struct dentry *d = iio_get_debugfs_dentry(indio_dev); + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + debugfs_create_file_unsafe("serial_number", 0400, d, adis16460, &adis16460_serial_number_fops); debugfs_create_file_unsafe("product_id", 0400, d, adis16460, &adis16460_product_id_fops); debugfs_create_file_unsafe("flash_count", 0400, d, adis16460, &adis16460_flash_count_fops); - - return 0; -} - -#else - -static int adis16460_debugfs_init(struct iio_dev *indio_dev) -{ - return 0; } -#endif - static int adis16460_set_freq(struct iio_dev *indio_dev, int val, int val2) { struct adis16460 *st = iio_priv(indio_dev); diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c index 482258ed5a3c9e..88efe728b61bfe 100644 --- a/drivers/iio/imu/adis16475.c +++ b/drivers/iio/imu/adis16475.c @@ -164,7 +164,6 @@ module_param(low_rate_allow, bool, 0444); MODULE_PARM_DESC(low_rate_allow, "Allow IMU rates below the minimum advisable when external clk is used in SCALED mode (default: N)"); -#ifdef CONFIG_DEBUG_FS static ssize_t adis16475_show_firmware_revision(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -279,6 +278,9 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev) struct adis16475 *st = iio_priv(indio_dev); struct dentry *d = iio_get_debugfs_dentry(indio_dev); + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + debugfs_create_file_unsafe("serial_number", 0400, d, st, &adis16475_serial_number_fops); debugfs_create_file_unsafe("product_id", 0400, @@ -290,11 +292,6 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev) debugfs_create_file("firmware_date", 0400, d, st, &adis16475_firmware_date_fops); } -#else -static void adis16475_debugfs_init(struct iio_dev *indio_dev) -{ -} -#endif static int adis16475_get_freq(struct adis16475 *st, u32 *freq) { @@ -1593,8 +1590,7 @@ static int adis16475_push_single_sample(struct iio_poll_func *pf) return -EINVAL; } - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { /* * When burst mode is used, system flags is the first data * channel in the sequence, but the scan index is 7. diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index c59ef6f7cfd4e6..294181f2fcb314 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -193,8 +193,6 @@ module_param(low_rate_allow, bool, 0444); MODULE_PARM_DESC(low_rate_allow, "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)"); -#ifdef CONFIG_DEBUG_FS - static ssize_t adis16480_show_firmware_revision(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -304,11 +302,14 @@ static int adis16480_show_flash_count(void *arg, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops, adis16480_show_flash_count, NULL, "%lld\n"); -static int adis16480_debugfs_init(struct iio_dev *indio_dev) +static void adis16480_debugfs_init(struct iio_dev *indio_dev) { struct adis16480 *adis16480 = iio_priv(indio_dev); struct dentry *d = iio_get_debugfs_dentry(indio_dev); + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + debugfs_create_file_unsafe("firmware_revision", 0400, d, adis16480, &adis16480_firmware_revision_fops); debugfs_create_file_unsafe("firmware_date", 0400, @@ -319,19 +320,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev) d, adis16480, &adis16480_product_id_fops); debugfs_create_file_unsafe("flash_count", 0400, d, adis16480, &adis16480_flash_count_fops); - - return 0; -} - -#else - -static int adis16480_debugfs_init(struct iio_dev *indio_dev) -{ - return 0; } -#endif - static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2) { struct adis16480 *st = iio_priv(indio_dev); @@ -1395,7 +1385,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p) goto irq_done; } - for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { /* * When burst mode is used, temperature is the first data * channel in the sequence, but the temperature scan index diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 90aa04d94da542..495e8a74ac676e 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c @@ -435,8 +435,7 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L; __le16 sample; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { ret = regmap_bulk_read(data->regmap, base + i * sizeof(sample), &sample, sizeof(sample)); if (ret) diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h index dff126d4165839..209bccb1f335cf 100644 --- a/drivers/iio/imu/bmi323/bmi323.h +++ b/drivers/iio/imu/bmi323/bmi323.h @@ -205,5 +205,6 @@ struct device; int bmi323_core_probe(struct device *dev); extern const struct regmap_config bmi323_regmap_config; +extern const struct dev_pm_ops bmi323_core_pm_ops; #endif diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c index d708d1fe3e4254..beda8d2de53f6d 100644 --- a/drivers/iio/imu/bmi323/bmi323_core.c +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include @@ -118,6 +118,38 @@ static const struct bmi323_hw bmi323_hw[2] = { }, }; +static const unsigned int bmi323_reg_savestate[] = { + BMI323_INT_MAP1_REG, + BMI323_INT_MAP2_REG, + BMI323_IO_INT_CTR_REG, + BMI323_IO_INT_CONF_REG, + BMI323_ACC_CONF_REG, + BMI323_GYRO_CONF_REG, + BMI323_FEAT_IO0_REG, + BMI323_FIFO_WTRMRK_REG, + BMI323_FIFO_CONF_REG +}; + +static const unsigned int bmi323_ext_reg_savestate[] = { + BMI323_GEN_SET1_REG, + BMI323_TAP1_REG, + BMI323_TAP2_REG, + BMI323_TAP3_REG, + BMI323_FEAT_IO0_S_TAP_MSK, + BMI323_STEP_SC1_REG, + BMI323_ANYMO1_REG, + BMI323_NOMO1_REG, + BMI323_ANYMO1_REG + BMI323_MO2_OFFSET, + BMI323_NOMO1_REG + BMI323_MO2_OFFSET, + BMI323_ANYMO1_REG + BMI323_MO3_OFFSET, + BMI323_NOMO1_REG + BMI323_MO3_OFFSET +}; + +struct bmi323_regs_runtime_pm { + unsigned int reg_settings[ARRAY_SIZE(bmi323_reg_savestate)]; + unsigned int ext_reg_settings[ARRAY_SIZE(bmi323_ext_reg_savestate)]; +}; + struct bmi323_data { struct device *dev; struct regmap *regmap; @@ -130,6 +162,7 @@ struct bmi323_data { u32 odrns[BMI323_SENSORS_CNT]; u32 odrhz[BMI323_SENSORS_CNT]; unsigned int feature_events; + struct bmi323_regs_runtime_pm runtime_pm_status; /* * Lock to protect the members of device's private data from concurrent @@ -1972,6 +2005,11 @@ static void bmi323_disable(void *data_ptr) bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE); bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE); + + /* + * Place the peripheral in its lowest power consuming state. + */ + regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL); } static int bmi323_set_bw(struct bmi323_data *data, @@ -2030,6 +2068,13 @@ static int bmi323_init(struct bmi323_data *data) return dev_err_probe(data->dev, -EINVAL, "Sensor power error = 0x%x\n", val); + return 0; +} + +static int bmi323_init_reset(struct bmi323_data *data) +{ + int ret; + /* * Set the Bandwidth coefficient which defines the 3 dB cutoff * frequency in relation to the ODR. @@ -2078,12 +2123,18 @@ int bmi323_core_probe(struct device *dev) data = iio_priv(indio_dev); data->dev = dev; data->regmap = regmap; + data->irq_pin = BMI323_IRQ_DISABLED; + data->state = BMI323_IDLE; mutex_init(&data->mutex); ret = bmi323_init(data); if (ret) return -EINVAL; + ret = bmi323_init_reset(data); + if (ret) + return -EINVAL; + if (!iio_read_acpi_mount_matrix(dev, &data->orientation, "ROTM")) { ret = iio_read_mount_matrix(dev, &data->orientation); if (ret) @@ -2117,10 +2168,139 @@ int bmi323_core_probe(struct device *dev) return dev_err_probe(data->dev, ret, "Unable to register iio device\n"); - return 0; + return bmi323_fifo_disable(data); } EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323); +#if defined(CONFIG_PM) +static int bmi323_core_runtime_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct bmi323_data *data = iio_priv(indio_dev); + struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status; + int ret; + + guard(mutex)(&data->mutex); + + ret = iio_device_suspend_triggering(indio_dev); + if (ret) + return ret; + + /* Save registers meant to be restored by resume pm callback. */ + for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) { + ret = regmap_read(data->regmap, bmi323_reg_savestate[i], + &savestate->reg_settings[i]); + if (ret) { + dev_err(data->dev, + "Error reading bmi323 reg 0x%x: %d\n", + bmi323_reg_savestate[i], ret); + return ret; + } + } + + for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) { + ret = bmi323_read_ext_reg(data, bmi323_reg_savestate[i], + &savestate->reg_settings[i]); + if (ret) { + dev_err(data->dev, + "Error reading bmi323 external reg 0x%x: %d\n", + bmi323_reg_savestate[i], ret); + return ret; + } + } + + /* Perform soft reset to place the device in its lowest power state. */ + ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL); + if (ret) + return ret; + + return 0; +} + +static int bmi323_core_runtime_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct bmi323_data *data = iio_priv(indio_dev); + struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status; + unsigned int val; + int ret; + + guard(mutex)(&data->mutex); + + /* + * Perform the device power-on and initial setup once again + * after being reset in the lower power state by runtime-pm. + */ + ret = bmi323_init(data); + if (!ret) + return ret; + + /* Register must be cleared before changing an active config */ + ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0); + if (ret) { + dev_err(data->dev, "Error stopping feature engine\n"); + return ret; + } + + for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) { + ret = bmi323_write_ext_reg(data, bmi323_reg_savestate[i], + savestate->reg_settings[i]); + if (ret) { + dev_err(data->dev, + "Error writing bmi323 external reg 0x%x: %d\n", + bmi323_reg_savestate[i], ret); + return ret; + } + } + + for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) { + ret = regmap_write(data->regmap, bmi323_reg_savestate[i], + savestate->reg_settings[i]); + if (ret) { + dev_err(data->dev, + "Error writing bmi323 reg 0x%x: %d\n", + bmi323_reg_savestate[i], ret); + return ret; + } + } + + /* + * Clear old FIFO samples that might be generated before suspend + * or generated from a peripheral state not equal to the saved one. + */ + if (data->state == BMI323_BUFFER_FIFO) { + ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG, + BMI323_FIFO_FLUSH_MSK); + if (ret) { + dev_err(data->dev, "Error flushing FIFO buffer: %d\n", ret); + return ret; + } + } + + ret = regmap_read(data->regmap, BMI323_ERR_REG, &val); + if (ret) { + dev_err(data->dev, + "Error reading bmi323 error register: %d\n", ret); + return ret; + } + + if (val) { + dev_err(data->dev, + "Sensor power error in PM = 0x%x\n", val); + return -EINVAL; + } + + return iio_device_resume_triggering(indio_dev); +} + +#endif + +const struct dev_pm_ops bmi323_core_pm_ops = { + SET_RUNTIME_PM_OPS(bmi323_core_runtime_suspend, + bmi323_core_runtime_resume, NULL) +}; +EXPORT_SYMBOL_NS_GPL(bmi323_core_pm_ops, IIO_BMI323); + MODULE_DESCRIPTION("Bosch BMI323 IMU driver"); MODULE_AUTHOR("Jagath Jog J "); MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c index 52140bf057658e..0ba5d69d8329ae 100644 --- a/drivers/iio/imu/bmi323/bmi323_i2c.c +++ b/drivers/iio/imu/bmi323/bmi323_i2c.c @@ -61,7 +61,7 @@ static int bmi323_regmap_i2c_write(void *context, const void *data, data + sizeof(u8)); } -static struct regmap_bus bmi323_regmap_bus = { +static const struct regmap_bus bmi323_regmap_bus = { .read = bmi323_regmap_i2c_read, .write = bmi323_regmap_i2c_write, }; @@ -128,6 +128,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match); static struct i2c_driver bmi323_i2c_driver = { .driver = { .name = "bmi323", + .pm = pm_ptr(&bmi323_core_pm_ops), .of_match_table = bmi323_of_i2c_match, .acpi_match_table = bmi323_acpi_match, }, diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c index 7b1e8127d0dd9b..9de3ade78d714d 100644 --- a/drivers/iio/imu/bmi323/bmi323_spi.c +++ b/drivers/iio/imu/bmi323/bmi323_spi.c @@ -36,7 +36,7 @@ static int bmi323_regmap_spi_write(void *context, const void *data, return spi_write(spi, data_buff + 1, count - 1); } -static struct regmap_bus bmi323_regmap_bus = { +static const struct regmap_bus bmi323_regmap_bus = { .read = bmi323_regmap_spi_read, .write = bmi323_regmap_spi_write, }; @@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_spi_match); static struct spi_driver bmi323_spi_driver = { .driver = { .name = "bmi323", + .pm = pm_ptr(&bmi323_core_pm_ops), .of_match_table = bmi323_of_spi_match, }, .probe = bmi323_spi_probe, diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c index 52744dd98e65b4..ea6519b22b2f34 100644 --- a/drivers/iio/imu/bno055/bno055.c +++ b/drivers/iio/imu/bno055/bno055.c @@ -1458,7 +1458,7 @@ static irqreturn_t bno055_trigger_handler(int irq, void *p) * then we split the transfer, skipping the gap. */ for_each_set_bitrange(start, end, iio_dev->active_scan_mask, - iio_dev->masklength) { + iio_get_masklength(iio_dev)) { /* * First transfer will start from the beginning of the first * ones-field in the bitmap diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c index 694ff14a3aa279..da7873bfd34875 100644 --- a/drivers/iio/imu/bno055/bno055_ser_core.c +++ b/drivers/iio/imu/bno055/bno055_ser_core.c @@ -492,7 +492,7 @@ static const struct serdev_device_ops bno055_ser_serdev_ops = { .write_wakeup = serdev_device_write_wakeup, }; -static struct regmap_bus bno055_ser_regmap_bus = { +static const struct regmap_bus bno055_ser_regmap_bus = { .write = bno055_ser_write_reg, .read = bno055_ser_read_reg, }; diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index d37eca5ef761e3..c61c012e25bbaa 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1200,8 +1200,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p) base = KMX61_MAG_XOUT_L; mutex_lock(&data->lock); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = kmx61_read_measurement(data, base, bit); if (ret < 0) { mutex_unlock(&data->lock); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 937ff9c5a74c76..ed02679297252a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2127,25 +2127,15 @@ static const struct iio_info st_lsm6dsx_gyro_info = { .write_raw_get_fmt = st_lsm6dsx_write_raw_get_fmt, }; -static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin) -{ - struct device *dev = hw->dev; - - if (!dev_fwnode(dev)) - return -EINVAL; - - return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin); -} - static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, const struct st_lsm6dsx_reg **drdy_reg) { + struct device *dev = hw->dev; int err = 0, drdy_pin; - if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) { + if (device_property_read_u32(dev, "st,drdy-int-pin", &drdy_pin) < 0) { struct st_sensors_platform_data *pdata; - struct device *dev = hw->dev; pdata = (struct st_sensors_platform_data *)dev->platform_data; drdy_pin = pdata ? pdata->drdy_int_pin : 1; @@ -2180,7 +2170,7 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw) hub_settings = &hw->settings->shub_settings; pdata = (struct st_sensors_platform_data *)dev->platform_data; - if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) || + if (device_property_read_bool(dev, "st,pullups") || (pdata && pdata->pullups)) { if (hub_settings->pullup_en.sec_page) { err = st_lsm6dsx_set_page(hw, true); @@ -2565,7 +2555,7 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw) return err; pdata = (struct st_sensors_platform_data *)dev->platform_data; - if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) || + if (device_property_read_bool(dev, "drive-open-drain") || (pdata && pdata->open_drain)) { reg = &hw->settings->irq_config.od; err = regmap_update_bits(hw->regmap, reg->addr, reg->mask, @@ -2646,73 +2636,6 @@ static int st_lsm6dsx_init_regulators(struct device *dev) return 0; } -#ifdef CONFIG_ACPI - -static int lsm6dsx_get_acpi_mount_matrix(struct device *dev, - struct iio_mount_matrix *orientation) -{ - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_device *adev = ACPI_COMPANION(dev); - union acpi_object *obj, *elements; - acpi_status status; - int i, j, val[3]; - char *str; - - if (!has_acpi_companion(dev)) - return -EINVAL; - - if (!acpi_has_method(adev->handle, "ROTM")) - return -EINVAL; - - status = acpi_evaluate_object(adev->handle, "ROTM", NULL, &buffer); - if (ACPI_FAILURE(status)) { - dev_warn(dev, "Failed to get ACPI mount matrix: %d\n", status); - return -EINVAL; - } - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) - goto unknown_format; - - elements = obj->package.elements; - for (i = 0; i < 3; i++) { - if (elements[i].type != ACPI_TYPE_STRING) - goto unknown_format; - - str = elements[i].string.pointer; - if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) - goto unknown_format; - - for (j = 0; j < 3; j++) { - switch (val[j]) { - case -1: str = "-1"; break; - case 0: str = "0"; break; - case 1: str = "1"; break; - default: goto unknown_format; - } - orientation->rotation[i * 3 + j] = str; - } - } - - kfree(buffer.pointer); - return 0; - -unknown_format: - dev_warn(dev, "Unknown ACPI mount matrix format, ignoring\n"); - kfree(buffer.pointer); - return -EINVAL; -} - -#else - -static int lsm6dsx_get_acpi_mount_matrix(struct device *dev, - struct iio_mount_matrix *orientation) -{ - return -EOPNOTSUPP; -} - -#endif - int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, struct regmap *regmap) { @@ -2760,8 +2683,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, hub_settings = &hw->settings->shub_settings; if (hub_settings->master_en.addr && - (!dev_fwnode(dev) || - !device_property_read_bool(dev, "st,disable-sensor-hub"))) { + !device_property_read_bool(dev, "st,disable-sensor-hub")) { err = st_lsm6dsx_shub_probe(hw, name); if (err < 0) return err; @@ -2787,8 +2709,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, return err; } - err = lsm6dsx_get_acpi_mount_matrix(hw->dev, &hw->orientation); - if (err) { + if (!iio_read_acpi_mount_matrix(hw->dev, &hw->orientation, "ROTM")) { err = iio_read_mount_matrix(hw->dev, &hw->orientation); if (err) return err; @@ -2803,7 +2724,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, return err; } - if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) || + if (device_property_read_bool(dev, "wakeup-source") || (pdata && pdata->wakeup_source)) device_init_wakeup(dev, true); diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index efe05be284b6ed..20b3b5212da76a 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -32,6 +32,7 @@ #define dev_fmt(fmt) "iio-backend: " fmt #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -52,6 +54,14 @@ struct iio_backend { struct device *dev; struct module *owner; void *priv; + const char *name; + unsigned int cached_reg_addr; + /* + * This index is relative to the frontend. Meaning that for + * frontends with multiple backends, this will be the index of this + * backend. Used for the debugfs directory name. + */ + u8 idx; }; /* @@ -111,7 +121,142 @@ static DEFINE_MUTEX(iio_back_lock); __ret = iio_backend_check_op(__back, op); \ if (!__ret) \ __back->ops->op(__back, ##args); \ + else \ + dev_dbg(__back->dev, "Op(%s) not implemented\n",\ + __stringify(op)); \ +} + +static ssize_t iio_backend_debugfs_read_reg(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iio_backend *back = file->private_data; + char read_buf[20]; + unsigned int val; + int ret, len; + + ret = iio_backend_op_call(back, debugfs_reg_access, + back->cached_reg_addr, 0, &val); + if (ret) + return ret; + + len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val); + + return simple_read_from_buffer(userbuf, count, ppos, read_buf, len); +} + +static ssize_t iio_backend_debugfs_write_reg(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iio_backend *back = file->private_data; + unsigned int val; + char buf[80]; + ssize_t rc; + int ret; + + rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count); + if (rc < 0) + return rc; + + ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); + + switch (ret) { + case 1: + return count; + case 2: + ret = iio_backend_op_call(back, debugfs_reg_access, + back->cached_reg_addr, val, NULL); + if (ret) + return ret; + return count; + default: + return -EINVAL; + } +} + +static const struct file_operations iio_backend_debugfs_reg_fops = { + .open = simple_open, + .read = iio_backend_debugfs_read_reg, + .write = iio_backend_debugfs_write_reg, +}; + +static ssize_t iio_backend_debugfs_read_name(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iio_backend *back = file->private_data; + char name[128]; + int len; + + len = scnprintf(name, sizeof(name), "%s\n", back->name); + + return simple_read_from_buffer(userbuf, count, ppos, name, len); +} + +static const struct file_operations iio_backend_debugfs_name_fops = { + .open = simple_open, + .read = iio_backend_debugfs_read_name, +}; + +/** + * iio_backend_debugfs_add - Add debugfs interfaces for Backends + * @back: Backend device + * @indio_dev: IIO device + */ +void iio_backend_debugfs_add(struct iio_backend *back, + struct iio_dev *indio_dev) +{ + struct dentry *d = iio_get_debugfs_dentry(indio_dev); + struct dentry *back_d; + char name[128]; + + if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d) + return; + if (!back->ops->debugfs_reg_access && !back->name) + return; + + snprintf(name, sizeof(name), "backend%d", back->idx); + + back_d = debugfs_create_dir(name, d); + if (IS_ERR(back_d)) + return; + + if (back->ops->debugfs_reg_access) + debugfs_create_file("direct_reg_access", 0600, back_d, back, + &iio_backend_debugfs_reg_fops); + + if (back->name) + debugfs_create_file("name", 0400, back_d, back, + &iio_backend_debugfs_name_fops); } +EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, IIO_BACKEND); + +/** + * iio_backend_debugfs_print_chan_status - Print channel status + * @back: Backend device + * @chan: Channel number + * @buf: Buffer where to print the status + * @len: Available space + * + * One usecase where this is useful is for testing test tones in a digital + * interface and "ask" the backend to dump more details on why a test tone might + * have errors. + * + * RETURNS: + * Number of copied bytes on success, negative error code on failure. + */ +ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back, + unsigned int chan, char *buf, + size_t len) +{ + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return -ENODEV; + + return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf, + len); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, IIO_BACKEND); /** * iio_backend_chan_enable - Enable a backend channel @@ -146,6 +291,29 @@ static void __iio_backend_disable(void *back) iio_backend_void_op_call(back, disable); } +/** + * iio_backend_disable - Backend disable + * @back: Backend device + */ +void iio_backend_disable(struct iio_backend *back) +{ + __iio_backend_disable(back); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_disable, IIO_BACKEND); + +/** + * iio_backend_enable - Backend enable + * @back: Backend device + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_enable(struct iio_backend *back) +{ + return iio_backend_op_call(back, enable); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_enable, IIO_BACKEND); + /** * devm_iio_backend_enable - Device managed backend enable * @dev: Consumer device for the backend @@ -158,7 +326,7 @@ int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) { int ret; - ret = iio_backend_op_call(back, enable); + ret = iio_backend_enable(back); if (ret) return ret; @@ -357,6 +525,25 @@ int devm_iio_backend_request_buffer(struct device *dev, } EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND); +/** + * iio_backend_read_raw - Read a channel attribute from a backend device. + * @back: Backend device + * @chan: IIO channel reference + * @val: First returned value + * @val2: Second returned value + * @mask: Specify the attribute to return + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_read_raw(struct iio_backend *back, + struct iio_chan_spec const *chan, int *val, int *val2, + long mask) +{ + return iio_backend_op_call(back, read_raw, chan, val, val2, mask); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, IIO_BACKEND); + static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) { struct iio_backend *back = ERR_PTR(-ENODEV), *iter; @@ -451,7 +638,6 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); /** * iio_backend_extend_chan_spec - Extend an IIO channel - * @indio_dev: IIO device * @back: Backend device * @chan: IIO channel * @@ -461,8 +647,7 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); * RETURNS: * 0 on success, negative error number on failure. */ -int iio_backend_extend_chan_spec(struct iio_dev *indio_dev, - struct iio_backend *back, +int iio_backend_extend_chan_spec(struct iio_backend *back, struct iio_chan_spec *chan) { const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; @@ -533,19 +718,10 @@ static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) return 0; } -/** - * devm_iio_backend_get - Device managed backend device get - * @dev: Consumer device for the backend - * @name: Backend name - * - * Get's the backend associated with @dev. - * - * RETURNS: - * A backend pointer, negative error pointer otherwise. - */ -struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) +static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name, + struct fwnode_handle *fwnode) { - struct fwnode_handle *fwnode; + struct fwnode_handle *fwnode_back; struct iio_backend *back; unsigned int index; int ret; @@ -560,29 +736,66 @@ struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) index = 0; } - fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index); + fwnode_back = fwnode_find_reference(fwnode, "io-backends", index); if (IS_ERR(fwnode)) return dev_err_cast_probe(dev, fwnode, "Cannot get Firmware reference\n"); guard(mutex)(&iio_back_lock); list_for_each_entry(back, &iio_back_list, entry) { - if (!device_match_fwnode(back->dev, fwnode)) + if (!device_match_fwnode(back->dev, fwnode_back)) continue; - fwnode_handle_put(fwnode); + fwnode_handle_put(fwnode_back); ret = __devm_iio_backend_get(dev, back); if (ret) return ERR_PTR(ret); + if (name) + back->idx = index; + return back; } - fwnode_handle_put(fwnode); + fwnode_handle_put(fwnode_back); return ERR_PTR(-EPROBE_DEFER); } + +/** + * devm_iio_backend_get - Device managed backend device get + * @dev: Consumer device for the backend + * @name: Backend name + * + * Get's the backend associated with @dev. + * + * RETURNS: + * A backend pointer, negative error pointer otherwise. + */ +struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) +{ + return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev)); +} EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND); +/** + * devm_iio_backend_fwnode_get - Device managed backend firmware node get + * @dev: Consumer device for the backend + * @name: Backend name + * @fwnode: Firmware node of the backend consumer + * + * Get's the backend associated with a firmware node. + * + * RETURNS: + * A backend pointer, negative error pointer otherwise. + */ +struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev, + const char *name, + struct fwnode_handle *fwnode) +{ + return __devm_iio_backend_fwnode_get(dev, name, fwnode); +} +EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, IIO_BACKEND); + /** * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get * @dev: Consumer device for the backend @@ -639,20 +852,20 @@ static void iio_backend_unregister(void *arg) /** * devm_iio_backend_register - Device managed backend device register * @dev: Backend device being registered - * @ops: Backend ops + * @info: Backend info * @priv: Device private data * - * @ops is mandatory. Not providing it results in -EINVAL. + * @info is mandatory. Not providing it results in -EINVAL. * * RETURNS: * 0 on success, negative error number on failure. */ int devm_iio_backend_register(struct device *dev, - const struct iio_backend_ops *ops, void *priv) + const struct iio_backend_info *info, void *priv) { struct iio_backend *back; - if (!ops) + if (!info || !info->ops) return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); /* @@ -665,7 +878,8 @@ int devm_iio_backend_register(struct device *dev, if (!back) return -ENOMEM; - back->ops = ops; + back->ops = info->ops; + back->name = info->name; back->owner = dev->driver->owner; back->dev = dev; back->priv = priv; diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index d6fe105d2f40d9..8104696cd47509 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -508,18 +508,19 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev, static int iio_scan_mask_set(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) { + unsigned int masklength = iio_get_masklength(indio_dev); const unsigned long *mask; unsigned long *trialmask; - if (!indio_dev->masklength) { + if (!masklength) { WARN(1, "Trying to set scanmask prior to registering buffer\n"); return -EINVAL; } - trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL); + trialmask = bitmap_alloc(masklength, GFP_KERNEL); if (!trialmask) return -ENOMEM; - bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); + bitmap_copy(trialmask, buffer->scan_mask, masklength); set_bit(bit, trialmask); if (!iio_validate_scan_mask(indio_dev, trialmask)) @@ -527,12 +528,11 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, if (indio_dev->available_scan_masks) { mask = iio_scan_mask_match(indio_dev->available_scan_masks, - indio_dev->masklength, - trialmask, false); + masklength, trialmask, false); if (!mask) goto err_invalid_mask; } - bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); + bitmap_copy(buffer->scan_mask, trialmask, masklength); bitmap_free(trialmask); @@ -552,7 +552,7 @@ static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) static int iio_scan_mask_query(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) { - if (bit > indio_dev->masklength) + if (bit > iio_get_masklength(indio_dev)) return -EINVAL; if (!buffer->scan_mask) @@ -768,8 +768,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, int length, i, largest = 0; /* How much space will the demuxed element take? */ - for_each_set_bit(i, mask, - indio_dev->masklength) { + for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) { length = iio_storage_bytes_for_si(indio_dev, i); if (length < 0) return length; @@ -890,6 +889,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, struct iio_device_config *config) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + unsigned int masklength = iio_get_masklength(indio_dev); unsigned long *compound_mask; const unsigned long *scan_mask; bool strict_scanmask = false; @@ -898,7 +898,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, unsigned int modes; if (insert_buffer && - bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) { + bitmap_empty(insert_buffer->scan_mask, masklength)) { dev_dbg(&indio_dev->dev, "At least one scan element must be enabled first\n"); return -EINVAL; @@ -952,7 +952,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, } /* What scan mask do we actually have? */ - compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); + compound_mask = bitmap_zalloc(masklength, GFP_KERNEL); if (!compound_mask) return -ENOMEM; @@ -962,20 +962,19 @@ static int iio_verify_update(struct iio_dev *indio_dev, if (buffer == remove_buffer) continue; bitmap_or(compound_mask, compound_mask, buffer->scan_mask, - indio_dev->masklength); + masklength); scan_timestamp |= buffer->scan_timestamp; } if (insert_buffer) { bitmap_or(compound_mask, compound_mask, - insert_buffer->scan_mask, indio_dev->masklength); + insert_buffer->scan_mask, masklength); scan_timestamp |= insert_buffer->scan_timestamp; } if (indio_dev->available_scan_masks) { scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, - indio_dev->masklength, - compound_mask, + masklength, compound_mask, strict_scanmask); bitmap_free(compound_mask); if (!scan_mask) @@ -1040,6 +1039,7 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer, static int iio_buffer_update_demux(struct iio_dev *indio_dev, struct iio_buffer *buffer) { + unsigned int masklength = iio_get_masklength(indio_dev); int ret, in_ind = -1, out_ind, length; unsigned int in_loc = 0, out_loc = 0; struct iio_demux_table *p = NULL; @@ -1051,17 +1051,13 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, /* First work out which scan mode we will actually have */ if (bitmap_equal(indio_dev->active_scan_mask, - buffer->scan_mask, - indio_dev->masklength)) + buffer->scan_mask, masklength)) return 0; /* Now we have the two masks, work from least sig and build up sizes */ - for_each_set_bit(out_ind, - buffer->scan_mask, - indio_dev->masklength) { + for_each_set_bit(out_ind, buffer->scan_mask, masklength) { in_ind = find_next_bit(indio_dev->active_scan_mask, - indio_dev->masklength, - in_ind + 1); + masklength, in_ind + 1); while (in_ind != out_ind) { ret = iio_storage_bytes_for_si(indio_dev, in_ind); if (ret < 0) @@ -1071,8 +1067,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, /* Make sure we are aligned */ in_loc = roundup(in_loc, length) + length; in_ind = find_next_bit(indio_dev->active_scan_mask, - indio_dev->masklength, - in_ind + 1); + masklength, in_ind + 1); } ret = iio_storage_bytes_for_si(indio_dev, in_ind); if (ret < 0) @@ -2104,6 +2099,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, int index) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + unsigned int masklength = iio_get_masklength(indio_dev); struct iio_dev_attr *p; const struct iio_dev_attr *id_attr; struct attribute **attr; @@ -2166,8 +2162,8 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, iio_dev_opaque->scan_index_timestamp = channels[i].scan_index; } - if (indio_dev->masklength && !buffer->scan_mask) { - buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, + if (masklength && !buffer->scan_mask) { + buffer->scan_mask = bitmap_zalloc(masklength, GFP_KERNEL); if (!buffer->scan_mask) { ret = -ENOMEM; @@ -2273,7 +2269,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) for (i = 0; i < indio_dev->num_channels; i++) ml = max(ml, channels[i].scan_index + 1); - indio_dev->masklength = ml; + ACCESS_PRIVATE(indio_dev, masklength) = ml; } if (!iio_dev_opaque->attached_buffers_cnt) @@ -2337,7 +2333,7 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev) bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask) { - return bitmap_weight(mask, indio_dev->masklength) == 1; + return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1; } EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 0f6cda7ffe45dd..6a6568d4a2cb3a 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -667,7 +667,6 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, vals[1]); case IIO_VAL_FRACTIONAL: tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); - tmp1 = vals[1]; tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); if ((tmp2 < 0) && (tmp0 == 0)) return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); @@ -1912,7 +1911,7 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev) int i; av_masks = indio_dev->available_scan_masks; - masklength = indio_dev->masklength; + masklength = iio_get_masklength(indio_dev); longs_per_mask = BITS_TO_LONGS(masklength); /* @@ -1965,6 +1964,49 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev) } } +/** + * iio_active_scan_mask_index - Get index of the active scan mask inside the + * available scan masks array + * @indio_dev: the IIO device containing the active and available scan masks + * + * Returns: the index or -EINVAL if active_scan_mask is not set + */ +int iio_active_scan_mask_index(struct iio_dev *indio_dev) + +{ + const unsigned long *av_masks; + unsigned int masklength = iio_get_masklength(indio_dev); + int i = 0; + + if (!indio_dev->active_scan_mask) + return -EINVAL; + + /* + * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks, + * the condition here do not handle multi-long masks correctly. + * It only checks the first long to be zero, and will use such mask + * as a terminator even if there was bits set after the first long. + * + * This should be fine since the available_scan_mask has already been + * sanity tested using iio_sanity_check_avail_scan_masks. + * + * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for + * more details + */ + av_masks = indio_dev->available_scan_masks; + while (*av_masks) { + if (indio_dev->active_scan_mask == av_masks) + return i; + av_masks += BITS_TO_LONGS(masklength); + i++; + } + + dev_warn(indio_dev->dev.parent, + "active scan mask is not part of the available scan masks\n"); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(iio_active_scan_mask_index); + int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 2e84776f4fbd4d..54416a384232b3 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -347,6 +347,7 @@ int iio_trigger_detach_poll_func(struct iio_trigger *trig, iio_trigger_put_irq(trig, pf->irq); free_irq(pf->irq, pf); module_put(iio_dev_opaque->driver_module); + pf->irq = 0; return ret; } @@ -770,3 +771,29 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) if (indio_dev->trig) iio_trigger_put(indio_dev->trig); } + +int iio_device_suspend_triggering(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + guard(mutex)(&iio_dev_opaque->mlock); + + if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0)) + disable_irq(indio_dev->pollfunc->irq); + + return 0; +} +EXPORT_SYMBOL(iio_device_suspend_triggering); + +int iio_device_resume_triggering(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + guard(mutex)(&iio_dev_opaque->mlock); + + if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0)) + enable_irq(indio_dev->pollfunc->irq); + + return 0; +} +EXPORT_SYMBOL(iio_device_resume_triggering); diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index b68dcc1fbaca4c..515ff46b5b8212 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -114,6 +114,19 @@ config AS73211 This driver can also be built as a module. If so, the module will be called as73211. +config BH1745 + tristate "ROHM BH1745 colour sensor" + depends on I2C + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select IIO_GTS_HELPER + help + Say Y here to build support for the ROHM bh1745 colour sensor. + + To compile this driver as a module, choose M here: the module will + be called bh1745. + config BH1750 tristate "ROHM BH1750 ambient light sensor" depends on I2C diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile index 1a071a8e9f8e49..321010fc0b938a 100644 --- a/drivers/iio/light/Makefile +++ b/drivers/iio/light/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_APDS9300) += apds9300.o obj-$(CONFIG_APDS9306) += apds9306.o obj-$(CONFIG_APDS9960) += apds9960.o obj-$(CONFIG_AS73211) += as73211.o +obj-$(CONFIG_BH1745) += bh1745.o obj-$(CONFIG_BH1750) += bh1750.o obj-$(CONFIG_BH1780) += bh1780.o obj-$(CONFIG_CM32181) += cm32181.o diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 5169f12c3ebafd..c1b43053fbc73d 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -125,8 +125,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p) if (ret < 0) goto done; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { ret = i2c_smbus_read_word_data(data->client, ADJD_S311_DATA_REG(i)); if (ret < 0) diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c index 66a063ea3db445..079e02be100521 100644 --- a/drivers/iio/light/apds9306.c +++ b/drivers/iio/light/apds9306.c @@ -28,7 +28,7 @@ #include #include -#include +#include #define APDS9306_MAIN_CTRL_REG 0x00 #define APDS9306_ALS_MEAS_RATE_REG 0x04 diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index e9e65130b6f911..3c14e4c30805e1 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -146,6 +146,25 @@ struct apds9960_data { /* gesture buffer */ u8 buffer[4]; /* 4 8-bit channels */ + + /* calibration value buffer */ + int calibbias[5]; +}; + +enum { + APDS9960_CHAN_PROXIMITY, + APDS9960_CHAN_GESTURE_UP, + APDS9960_CHAN_GESTURE_DOWN, + APDS9960_CHAN_GESTURE_LEFT, + APDS9960_CHAN_GESTURE_RIGHT, +}; + +static const unsigned int apds9960_offset_regs[][2] = { + [APDS9960_CHAN_PROXIMITY] = {APDS9960_REG_POFFSET_UR, APDS9960_REG_POFFSET_DL}, + [APDS9960_CHAN_GESTURE_UP] = {APDS9960_REG_GOFFSET_U, 0}, + [APDS9960_CHAN_GESTURE_DOWN] = {APDS9960_REG_GOFFSET_D, 0}, + [APDS9960_CHAN_GESTURE_LEFT] = {APDS9960_REG_GOFFSET_L, 0}, + [APDS9960_CHAN_GESTURE_RIGHT] = {APDS9960_REG_GOFFSET_R, 0}, }; static const struct reg_default apds9960_reg_defaults[] = { @@ -255,6 +274,7 @@ static const struct iio_event_spec apds9960_als_event_spec[] = { #define APDS9960_GESTURE_CHANNEL(_dir, _si) { \ .type = IIO_PROXIMITY, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \ .channel = _si + 1, \ .scan_index = _si, \ .indexed = 1, \ @@ -282,7 +302,8 @@ static const struct iio_chan_spec apds9960_channels[] = { { .type = IIO_PROXIMITY, .address = APDS9960_REG_PDATA, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_CALIBBIAS), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .channel = 0, .indexed = 0, @@ -316,6 +337,28 @@ static const struct iio_chan_spec apds9960_channels[] = { APDS9960_INTENSITY_CHANNEL(BLUE), }; +static int apds9960_set_calibbias(struct apds9960_data *data, + struct iio_chan_spec const *chan, int calibbias) +{ + int ret, i; + + if (calibbias < S8_MIN || calibbias > S8_MAX) + return -EINVAL; + + guard(mutex)(&data->lock); + for (i = 0; i < 2; i++) { + if (apds9960_offset_regs[chan->channel][i] == 0) + break; + + ret = regmap_write(data->regmap, apds9960_offset_regs[chan->channel][i], calibbias); + if (ret < 0) + return ret; + } + data->calibbias[chan->channel] = calibbias; + + return 0; +} + /* integration time in us */ static const int apds9960_int_time[][2] = { { 28000, 246}, @@ -531,6 +574,12 @@ static int apds9960_read_raw(struct iio_dev *indio_dev, } mutex_unlock(&data->lock); break; + case IIO_CHAN_INFO_CALIBBIAS: + mutex_lock(&data->lock); + *val = data->calibbias[chan->channel]; + ret = IIO_VAL_INT; + mutex_unlock(&data->lock); + break; } return ret; @@ -564,6 +613,10 @@ static int apds9960_write_raw(struct iio_dev *indio_dev, default: return -EINVAL; } + case IIO_CHAN_INFO_CALIBBIAS: + if (val2 != 0) + return -EINVAL; + return apds9960_set_calibbias(data, chan, val); default: return -EINVAL; } diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c new file mode 100644 index 00000000000000..2e458e9d5d8530 --- /dev/null +++ b/drivers/iio/light/bh1745.c @@ -0,0 +1,906 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ROHM BH1745 digital colour sensor driver + * + * Copyright (C) Mudit Sharma + * + * 7-bit I2C slave addresses: + * 0x38 (ADDR pin low) + * 0x39 (ADDR pin high) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* BH1745 configuration registers */ + +/* System control */ +#define BH1745_SYS_CTRL 0x40 +#define BH1745_SYS_CTRL_SW_RESET BIT(7) +#define BH1745_SYS_CTRL_INTR_RESET BIT(6) +#define BH1745_SYS_CTRL_PART_ID_MASK GENMASK(5, 0) +#define BH1745_PART_ID 0x0B + +/* Mode control 1 */ +#define BH1745_MODE_CTRL1 0x41 +#define BH1745_CTRL1_MEASUREMENT_TIME_MASK GENMASK(2, 0) + +/* Mode control 2 */ +#define BH1745_MODE_CTRL2 0x42 +#define BH1745_CTRL2_RGBC_EN BIT(4) +#define BH1745_CTRL2_ADC_GAIN_MASK GENMASK(1, 0) + +/* Interrupt */ +#define BH1745_INTR 0x60 +#define BH1745_INTR_STATUS BIT(7) +#define BH1745_INTR_SOURCE_MASK GENMASK(3, 2) +#define BH1745_INTR_ENABLE BIT(0) + +#define BH1745_PERSISTENCE 0x61 + +/* Threshold high */ +#define BH1745_TH_LSB 0x62 +#define BH1745_TH_MSB 0x63 + +/* Threshold low */ +#define BH1745_TL_LSB 0x64 +#define BH1745_TL_MSB 0x65 + +/* BH1745 data output regs */ +#define BH1745_RED_LSB 0x50 +#define BH1745_RED_MSB 0x51 +#define BH1745_GREEN_LSB 0x52 +#define BH1745_GREEN_MSB 0x53 +#define BH1745_BLUE_LSB 0x54 +#define BH1745_BLUE_MSB 0x55 +#define BH1745_CLEAR_LSB 0x56 +#define BH1745_CLEAR_MSB 0x57 + +#define BH1745_MANU_ID_REG 0x92 + +/* From 16x max HW gain and 32x max integration time */ +#define BH1745_MAX_GAIN 512 + +enum bh1745_int_source { + BH1745_INTR_SOURCE_RED, + BH1745_INTR_SOURCE_GREEN, + BH1745_INTR_SOURCE_BLUE, + BH1745_INTR_SOURCE_CLEAR, +}; + +enum bh1745_gain { + BH1745_ADC_GAIN_1X, + BH1745_ADC_GAIN_2X, + BH1745_ADC_GAIN_16X, +}; + +enum bh1745_measurement_time { + BH1745_MEASUREMENT_TIME_160MS, + BH1745_MEASUREMENT_TIME_320MS, + BH1745_MEASUREMENT_TIME_640MS, + BH1745_MEASUREMENT_TIME_1280MS, + BH1745_MEASUREMENT_TIME_2560MS, + BH1745_MEASUREMENT_TIME_5120MS, +}; + +enum bh1745_presistence_value { + BH1745_PRESISTENCE_UPDATE_TOGGLE, + BH1745_PRESISTENCE_UPDATE_EACH_MEASUREMENT, + BH1745_PRESISTENCE_UPDATE_FOUR_MEASUREMENT, + BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT, +}; + +static const struct iio_gain_sel_pair bh1745_gain[] = { + GAIN_SCALE_GAIN(1, BH1745_ADC_GAIN_1X), + GAIN_SCALE_GAIN(2, BH1745_ADC_GAIN_2X), + GAIN_SCALE_GAIN(16, BH1745_ADC_GAIN_16X), +}; + +static const struct iio_itime_sel_mul bh1745_itimes[] = { + GAIN_SCALE_ITIME_US(5120000, BH1745_MEASUREMENT_TIME_5120MS, 32), + GAIN_SCALE_ITIME_US(2560000, BH1745_MEASUREMENT_TIME_2560MS, 16), + GAIN_SCALE_ITIME_US(1280000, BH1745_MEASUREMENT_TIME_1280MS, 8), + GAIN_SCALE_ITIME_US(640000, BH1745_MEASUREMENT_TIME_640MS, 4), + GAIN_SCALE_ITIME_US(320000, BH1745_MEASUREMENT_TIME_320MS, 2), + GAIN_SCALE_ITIME_US(160000, BH1745_MEASUREMENT_TIME_160MS, 1), +}; + +struct bh1745_data { + /* + * Lock to prevent device setting update or read before + * related calculations are completed + */ + struct mutex lock; + struct regmap *regmap; + struct device *dev; + struct iio_trigger *trig; + struct iio_gts gts; +}; + +static const struct regmap_range bh1745_volatile_ranges[] = { + regmap_reg_range(BH1745_MODE_CTRL2, BH1745_MODE_CTRL2), /* VALID */ + regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB), /* Data */ + regmap_reg_range(BH1745_INTR, BH1745_INTR), /* Interrupt */ +}; + +static const struct regmap_access_table bh1745_volatile_regs = { + .yes_ranges = bh1745_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(bh1745_volatile_ranges), +}; + +static const struct regmap_range bh1745_readable_ranges[] = { + regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2), + regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB), + regmap_reg_range(BH1745_INTR, BH1745_INTR), + regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB), + regmap_reg_range(BH1745_MANU_ID_REG, BH1745_MANU_ID_REG), +}; + +static const struct regmap_access_table bh1745_readable_regs = { + .yes_ranges = bh1745_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(bh1745_readable_ranges), +}; + +static const struct regmap_range bh1745_writable_ranges[] = { + regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2), + regmap_reg_range(BH1745_INTR, BH1745_INTR), + regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB), +}; + +static const struct regmap_access_table bh1745_writable_regs = { + .yes_ranges = bh1745_writable_ranges, + .n_yes_ranges = ARRAY_SIZE(bh1745_writable_ranges), +}; + +static const struct regmap_config bh1745_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = BH1745_MANU_ID_REG, + .cache_type = REGCACHE_RBTREE, + .volatile_table = &bh1745_volatile_regs, + .wr_table = &bh1745_writable_regs, + .rd_table = &bh1745_readable_regs, +}; + +static const struct iio_event_spec bh1745_event_spec[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_shared_by_type = BIT(IIO_EV_INFO_PERIOD), + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +#define BH1745_CHANNEL(_colour, _si, _addr) \ + { \ + .type = IIO_INTENSITY, .modified = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_INT_TIME), \ + .info_mask_shared_by_all_available = \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_INT_TIME), \ + .event_spec = bh1745_event_spec, \ + .num_event_specs = ARRAY_SIZE(bh1745_event_spec), \ + .channel2 = IIO_MOD_LIGHT_##_colour, .address = _addr, \ + .scan_index = _si, \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ + } + +static const struct iio_chan_spec bh1745_channels[] = { + BH1745_CHANNEL(RED, 0, BH1745_RED_LSB), + BH1745_CHANNEL(GREEN, 1, BH1745_GREEN_LSB), + BH1745_CHANNEL(BLUE, 2, BH1745_BLUE_LSB), + BH1745_CHANNEL(CLEAR, 3, BH1745_CLEAR_LSB), + IIO_CHAN_SOFT_TIMESTAMP(4), +}; + +static int bh1745_reset(struct bh1745_data *data) +{ + return regmap_set_bits(data->regmap, BH1745_SYS_CTRL, + BH1745_SYS_CTRL_SW_RESET | + BH1745_SYS_CTRL_INTR_RESET); +} + +static int bh1745_power_on(struct bh1745_data *data) +{ + return regmap_set_bits(data->regmap, BH1745_MODE_CTRL2, + BH1745_CTRL2_RGBC_EN); +} + +static void bh1745_power_off(void *data_ptr) +{ + struct bh1745_data *data = data_ptr; + struct device *dev = data->dev; + int ret; + + ret = regmap_clear_bits(data->regmap, BH1745_MODE_CTRL2, + BH1745_CTRL2_RGBC_EN); + if (ret) + dev_err(dev, "Failed to turn off device\n"); +} + +static int bh1745_get_scale(struct bh1745_data *data, int *val, int *val2) +{ + int ret; + int value; + int gain_sel, int_time_sel; + int gain; + const struct iio_itime_sel_mul *int_time; + + ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value); + if (ret) + return ret; + + gain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value); + gain = iio_gts_find_gain_by_sel(&data->gts, gain_sel); + + ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value); + if (ret) + return ret; + + int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value); + int_time = iio_gts_find_itime_by_sel(&data->gts, int_time_sel); + + return iio_gts_get_scale(&data->gts, gain, int_time->time_us, val, + val2); +} + +static int bh1745_set_scale(struct bh1745_data *data, int val) +{ + struct device *dev = data->dev; + int ret; + int value; + int hw_gain_sel, current_int_time_sel, new_int_time_sel; + + ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value); + if (ret) + return ret; + + current_int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, + value); + ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, + current_int_time_sel, + val, 0, &hw_gain_sel); + if (ret) { + for (int i = 0; i < data->gts.num_itime; i++) { + new_int_time_sel = data->gts.itime_table[i].sel; + + if (new_int_time_sel == current_int_time_sel) + continue; + + ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts, + new_int_time_sel, + val, 0, + &hw_gain_sel); + if (!ret) + break; + } + + if (ret) { + dev_dbg(dev, "Unsupported scale value requested: %d\n", + val); + return -EINVAL; + } + + ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL1, + BH1745_CTRL1_MEASUREMENT_TIME_MASK, + new_int_time_sel); + if (ret) + return ret; + } + + return regmap_write_bits(data->regmap, BH1745_MODE_CTRL2, + BH1745_CTRL2_ADC_GAIN_MASK, hw_gain_sel); +} + +static int bh1745_get_int_time(struct bh1745_data *data, int *val) +{ + int ret; + int value; + int int_time, int_time_sel; + + ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value); + if (ret) + return ret; + + int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value); + int_time = iio_gts_find_int_time_by_sel(&data->gts, int_time_sel); + if (int_time < 0) + return int_time; + + *val = int_time; + + return 0; +} + +static int bh1745_set_int_time(struct bh1745_data *data, int val, int val2) +{ + struct device *dev = data->dev; + int ret; + int value; + int current_int_time, current_hwgain_sel, current_hwgain; + int new_hwgain, new_hwgain_sel, new_int_time_sel; + int req_int_time = (1000000 * val) + val2; + + if (!iio_gts_valid_time(&data->gts, req_int_time)) { + dev_dbg(dev, "Unsupported integration time requested: %d\n", + req_int_time); + return -EINVAL; + } + + ret = bh1745_get_int_time(data, ¤t_int_time); + if (ret) + return ret; + + if (current_int_time == req_int_time) + return 0; + + ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value); + if (ret) + return ret; + + current_hwgain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value); + current_hwgain = iio_gts_find_gain_by_sel(&data->gts, + current_hwgain_sel); + ret = iio_gts_find_new_gain_by_old_gain_time(&data->gts, current_hwgain, + current_int_time, + req_int_time, + &new_hwgain); + if (new_hwgain < 0) { + dev_dbg(dev, "No corresponding gain for requested integration time\n"); + return ret; + } + + if (ret) { + bool in_range; + + new_hwgain = iio_find_closest_gain_low(&data->gts, new_hwgain, + &in_range); + if (new_hwgain < 0) { + new_hwgain = iio_gts_get_min_gain(&data->gts); + if (new_hwgain < 0) + return ret; + } + + if (!in_range) + dev_dbg(dev, "Optimal gain out of range\n"); + + dev_dbg(dev, "Scale changed, new hw_gain %d\n", new_hwgain); + } + + new_hwgain_sel = iio_gts_find_sel_by_gain(&data->gts, new_hwgain); + if (new_hwgain_sel < 0) + return new_hwgain_sel; + + ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL2, + BH1745_CTRL2_ADC_GAIN_MASK, + new_hwgain_sel); + if (ret) + return ret; + + new_int_time_sel = iio_gts_find_sel_by_int_time(&data->gts, + req_int_time); + if (new_int_time_sel < 0) + return new_int_time_sel; + + return regmap_write_bits(data->regmap, BH1745_MODE_CTRL1, + BH1745_CTRL1_MEASUREMENT_TIME_MASK, + new_int_time_sel); +} + +static int bh1745_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct bh1745_data *data = iio_priv(indio_dev); + int ret; + int value; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { + ret = regmap_bulk_read(data->regmap, chan->address, + &value, 2); + if (ret) + return ret; + *val = value; + + return IIO_VAL_INT; + } + unreachable(); + + case IIO_CHAN_INFO_SCALE: { + guard(mutex)(&data->lock); + ret = bh1745_get_scale(data, val, val2); + if (ret) + return ret; + + return IIO_VAL_INT; + } + + case IIO_CHAN_INFO_INT_TIME: { + guard(mutex)(&data->lock); + *val = 0; + ret = bh1745_get_int_time(data, val2); + if (ret) + return 0; + + return IIO_VAL_INT_PLUS_MICRO; + } + + default: + return -EINVAL; + } +} + +static int bh1745_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct bh1745_data *data = iio_priv(indio_dev); + + guard(mutex)(&data->lock); + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return bh1745_set_scale(data, val); + + case IIO_CHAN_INFO_INT_TIME: + return bh1745_set_int_time(data, val, val2); + + default: + return -EINVAL; + } +} + +static int bh1745_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + return IIO_VAL_INT; + + case IIO_CHAN_INFO_INT_TIME: + return IIO_VAL_INT_PLUS_MICRO; + + default: + return -EINVAL; + } +} + +static int bh1745_read_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, int *val2) +{ + struct bh1745_data *data = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + ret = regmap_bulk_read(data->regmap, BH1745_TH_LSB, + val, 2); + if (ret) + return ret; + + return IIO_VAL_INT; + + case IIO_EV_DIR_FALLING: + ret = regmap_bulk_read(data->regmap, BH1745_TL_LSB, + val, 2); + if (ret) + return ret; + + return IIO_VAL_INT; + + default: + return -EINVAL; + } + + case IIO_EV_INFO_PERIOD: + ret = regmap_read(data->regmap, BH1745_PERSISTENCE, val); + if (ret) + return ret; + + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static int bh1745_write_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct bh1745_data *data = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_EV_INFO_VALUE: + if (val < 0x0 || val > 0xFFFF) + return -EINVAL; + + switch (dir) { + case IIO_EV_DIR_RISING: + ret = regmap_bulk_write(data->regmap, BH1745_TH_LSB, + &val, 2); + if (ret) + return ret; + + return IIO_VAL_INT; + + case IIO_EV_DIR_FALLING: + ret = regmap_bulk_write(data->regmap, BH1745_TL_LSB, + &val, 2); + if (ret) + return ret; + + return IIO_VAL_INT; + + default: + return -EINVAL; + } + + case IIO_EV_INFO_PERIOD: + if (val < BH1745_PRESISTENCE_UPDATE_TOGGLE || + val > BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT) + return -EINVAL; + ret = regmap_write(data->regmap, BH1745_PERSISTENCE, val); + if (ret) + return ret; + + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static int bh1745_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct bh1745_data *data = iio_priv(indio_dev); + int ret; + int value; + int int_src; + + ret = regmap_read(data->regmap, BH1745_INTR, &value); + if (ret) + return ret; + + if (!FIELD_GET(BH1745_INTR_ENABLE, value)) + return 0; + + int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value); + + switch (chan->channel2) { + case IIO_MOD_LIGHT_RED: + if (int_src == BH1745_INTR_SOURCE_RED) + return 1; + return 0; + + case IIO_MOD_LIGHT_GREEN: + if (int_src == BH1745_INTR_SOURCE_GREEN) + return 1; + return 0; + + case IIO_MOD_LIGHT_BLUE: + if (int_src == BH1745_INTR_SOURCE_BLUE) + return 1; + return 0; + + case IIO_MOD_LIGHT_CLEAR: + if (int_src == BH1745_INTR_SOURCE_CLEAR) + return 1; + return 0; + + default: + return -EINVAL; + } +} + +static int bh1745_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) +{ + struct bh1745_data *data = iio_priv(indio_dev); + int value; + + if (state == 0) + return regmap_clear_bits(data->regmap, + BH1745_INTR, BH1745_INTR_ENABLE); + + if (state == 1) { + /* Latch is always enabled when enabling interrupt */ + value = BH1745_INTR_ENABLE; + + switch (chan->channel2) { + case IIO_MOD_LIGHT_RED: + return regmap_write(data->regmap, BH1745_INTR, + value | FIELD_PREP(BH1745_INTR_SOURCE_MASK, + BH1745_INTR_SOURCE_RED)); + + case IIO_MOD_LIGHT_GREEN: + return regmap_write(data->regmap, BH1745_INTR, + value | FIELD_PREP(BH1745_INTR_SOURCE_MASK, + BH1745_INTR_SOURCE_GREEN)); + + case IIO_MOD_LIGHT_BLUE: + return regmap_write(data->regmap, BH1745_INTR, + value | FIELD_PREP(BH1745_INTR_SOURCE_MASK, + BH1745_INTR_SOURCE_BLUE)); + + case IIO_MOD_LIGHT_CLEAR: + return regmap_write(data->regmap, BH1745_INTR, + value | FIELD_PREP(BH1745_INTR_SOURCE_MASK, + BH1745_INTR_SOURCE_CLEAR)); + + default: + return -EINVAL; + } + } + + return -EINVAL; +} + +static int bh1745_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, const int **vals, + int *type, int *length, long mask) +{ + struct bh1745_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_INT_TIME: + return iio_gts_avail_times(&data->gts, vals, type, length); + + case IIO_CHAN_INFO_SCALE: + return iio_gts_all_avail_scales(&data->gts, vals, type, length); + + default: + return -EINVAL; + } +} + +static const struct iio_info bh1745_info = { + .read_raw = bh1745_read_raw, + .write_raw = bh1745_write_raw, + .write_raw_get_fmt = bh1745_write_raw_get_fmt, + .read_event_value = bh1745_read_thresh, + .write_event_value = bh1745_write_thresh, + .read_event_config = bh1745_read_event_config, + .write_event_config = bh1745_write_event_config, + .read_avail = bh1745_read_avail, +}; + +static irqreturn_t bh1745_interrupt_handler(int interrupt, void *p) +{ + struct iio_dev *indio_dev = p; + struct bh1745_data *data = iio_priv(indio_dev); + int ret; + int value; + int int_src; + + ret = regmap_read(data->regmap, BH1745_INTR, &value); + if (ret) + return IRQ_NONE; + + int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value); + + if (value & BH1745_INTR_STATUS) { + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, int_src, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_EITHER), + iio_get_time_ns(indio_dev)); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static irqreturn_t bh1745_trigger_handler(int interrupt, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bh1745_data *data = iio_priv(indio_dev); + struct { + u16 chans[4]; + s64 timestamp __aligned(8); + } scan; + u16 value; + int ret; + int i; + int j = 0; + + iio_for_each_active_channel(indio_dev, i) { + ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i, + &value, 2); + if (ret) + goto err; + + scan.chans[j++] = value; + } + + iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + +err: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int bh1745_setup_triggered_buffer(struct iio_dev *indio_dev, + struct device *parent, + int irq) +{ + struct bh1745_data *data = iio_priv(indio_dev); + struct device *dev = data->dev; + int ret; + + ret = devm_iio_triggered_buffer_setup(parent, indio_dev, NULL, + bh1745_trigger_handler, NULL); + if (ret) + return dev_err_probe(dev, ret, + "Triggered buffer setup failed\n"); + + if (irq) { + ret = devm_request_threaded_irq(dev, irq, NULL, + bh1745_interrupt_handler, + IRQF_ONESHOT, + "bh1745_interrupt", indio_dev); + if (ret) + return dev_err_probe(dev, ret, + "Request for IRQ failed\n"); + } + + return 0; +} + +static int bh1745_init(struct bh1745_data *data) +{ + int ret; + struct device *dev = data->dev; + + mutex_init(&data->lock); + + ret = devm_iio_init_iio_gts(dev, BH1745_MAX_GAIN, 0, bh1745_gain, + ARRAY_SIZE(bh1745_gain), bh1745_itimes, + ARRAY_SIZE(bh1745_itimes), &data->gts); + if (ret) + return ret; + + ret = bh1745_reset(data); + if (ret) + return dev_err_probe(dev, ret, "Failed to reset sensor\n"); + + ret = bh1745_power_on(data); + if (ret) + return dev_err_probe(dev, ret, "Failed to turn on sensor\n"); + + ret = devm_add_action_or_reset(dev, bh1745_power_off, data); + if (ret) + return dev_err_probe(dev, ret, + "Failed to add action or reset\n"); + + return 0; +} + +static int bh1745_probe(struct i2c_client *client) +{ + int ret; + int value; + int part_id; + struct bh1745_data *data; + struct iio_dev *indio_dev; + struct device *dev = &client->dev; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + indio_dev->info = &bh1745_info; + indio_dev->name = "bh1745"; + indio_dev->channels = bh1745_channels; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->num_channels = ARRAY_SIZE(bh1745_channels); + data = iio_priv(indio_dev); + data->dev = &client->dev; + data->regmap = devm_regmap_init_i2c(client, &bh1745_regmap); + if (IS_ERR(data->regmap)) + return dev_err_probe(dev, PTR_ERR(data->regmap), + "Failed to initialize Regmap\n"); + + ret = regmap_read(data->regmap, BH1745_SYS_CTRL, &value); + if (ret) + return ret; + + part_id = FIELD_GET(BH1745_SYS_CTRL_PART_ID_MASK, value); + if (part_id != BH1745_PART_ID) + dev_warn(dev, "Unknown part ID 0x%x\n", part_id); + + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get and enable regulator\n"); + + ret = bh1745_init(data); + if (ret) + return ret; + + ret = bh1745_setup_triggered_buffer(indio_dev, indio_dev->dev.parent, + client->irq); + if (ret) + return ret; + + ret = devm_iio_device_register(dev, indio_dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to register device\n"); + + return 0; +} + +static const struct i2c_device_id bh1745_idtable[] = { + { "bh1745" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, bh1745_idtable); + +static const struct of_device_id bh1745_of_match[] = { + { .compatible = "rohm,bh1745" }, + { } +}; +MODULE_DEVICE_TABLE(of, bh1745_of_match); + +static struct i2c_driver bh1745_driver = { + .driver = { + .name = "bh1745", + .of_match_table = bh1745_of_match, + }, + .probe = bh1745_probe, + .id_table = bh1745_idtable, +}; +module_i2c_driver(bh1745_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mudit Sharma "); +MODULE_DESCRIPTION("BH1745 colour sensor driver"); +MODULE_IMPORT_NS(IIO_GTS_HELPER); diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c index 7125e011a38ae8..f8b1d7dd6f5fc6 100644 --- a/drivers/iio/light/gp2ap002.c +++ b/drivers/iio/light/gp2ap002.c @@ -420,7 +420,7 @@ static int gp2ap002_regmap_i2c_write(void *context, unsigned int reg, return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus gp2ap002_regmap_bus = { +static const struct regmap_bus gp2ap002_regmap_bus = { .reg_read = gp2ap002_regmap_i2c_read, .reg_write = gp2ap002_regmap_i2c_write, }; diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index 757383456da699..81e718cdeae32d 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include #include @@ -965,8 +965,7 @@ static irqreturn_t gp2ap020a00f_trigger_handler(int irq, void *data) size_t d_size = 0; int i, out_val, ret; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { ret = regmap_bulk_read(priv->regmap, GP2AP020A00F_DATA_REG(i), &priv->buffer[d_size], 2); @@ -1397,8 +1396,7 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) * two separate IIO channels they are treated in the driver logic * as if they were controlled independently. */ - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { switch (i) { case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR: err = gp2ap020a00f_exec_cmd(data, @@ -1435,8 +1433,7 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev) mutex_lock(&data->lock); - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { switch (i) { case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR: err = gp2ap020a00f_exec_cmd(data, diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c index 59329546df58de..b176bf4c884ba0 100644 --- a/drivers/iio/light/isl29125.c +++ b/drivers/iio/light/isl29125.c @@ -181,8 +181,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p) struct isl29125_data *data = iio_priv(indio_dev); int i, j = 0; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { int ret = i2c_smbus_read_word_data(data->client, isl29125_regs[i].data); if (ret < 0) diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c index fff1e899097d98..4f6975e63a8fe1 100644 --- a/drivers/iio/light/ltr390.c +++ b/drivers/iio/light/ltr390.c @@ -23,20 +23,30 @@ #include #include #include +#include #include -#include +#include -#define LTR390_MAIN_CTRL 0x00 -#define LTR390_PART_ID 0x06 -#define LTR390_UVS_DATA 0x10 +#define LTR390_MAIN_CTRL 0x00 +#define LTR390_ALS_UVS_MEAS_RATE 0x04 +#define LTR390_ALS_UVS_GAIN 0x05 +#define LTR390_PART_ID 0x06 +#define LTR390_ALS_DATA 0x0D +#define LTR390_UVS_DATA 0x10 +#define LTR390_INT_CFG 0x19 + +#define LTR390_PART_NUMBER_ID 0xb +#define LTR390_ALS_UVS_GAIN_MASK 0x07 +#define LTR390_ALS_UVS_INT_TIME_MASK 0x70 +#define LTR390_ALS_UVS_INT_TIME(x) FIELD_PREP(LTR390_ALS_UVS_INT_TIME_MASK, (x)) #define LTR390_SW_RESET BIT(4) #define LTR390_UVS_MODE BIT(3) #define LTR390_SENSOR_ENABLE BIT(1) -#define LTR390_PART_NUMBER_ID 0xb +#define LTR390_FRACTIONAL_PRECISION 100 /* * At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of @@ -55,11 +65,19 @@ */ #define LTR390_WINDOW_FACTOR 1 +enum ltr390_mode { + LTR390_SET_ALS_MODE, + LTR390_SET_UVS_MODE, +}; + struct ltr390_data { struct regmap *regmap; struct i2c_client *client; /* Protects device from simulataneous reads */ struct mutex lock; + enum ltr390_mode mode; + int gain; + int int_time_us; }; static const struct regmap_config ltr390_regmap_config = { @@ -75,8 +93,6 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address) int ret; u8 recieve_buffer[3]; - guard(mutex)(&data->lock); - ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer, sizeof(recieve_buffer)); if (ret) { @@ -87,6 +103,38 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address) return get_unaligned_le24(recieve_buffer); } +static int ltr390_set_mode(struct ltr390_data *data, enum ltr390_mode mode) +{ + int ret; + + if (data->mode == mode) + return 0; + + switch (mode) { + case LTR390_SET_ALS_MODE: + ret = regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE); + break; + + case LTR390_SET_UVS_MODE: + ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE); + break; + } + + if (ret) + return ret; + + data->mode = mode; + return 0; +} + +static int ltr390_counts_per_uvi(struct ltr390_data *data) +{ + const int orig_gain = 18; + const int orig_int_time = 400; + + return DIV_ROUND_CLOSEST(23 * data->gain * data->int_time_us, 10 * orig_gain * orig_int_time); +} + static int ltr390_read_raw(struct iio_dev *iio_device, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -94,29 +142,174 @@ static int ltr390_read_raw(struct iio_dev *iio_device, int ret; struct ltr390_data *data = iio_priv(iio_device); + guard(mutex)(&data->lock); switch (mask) { case IIO_CHAN_INFO_RAW: - ret = ltr390_register_read(data, LTR390_UVS_DATA); - if (ret < 0) - return ret; + switch (chan->type) { + case IIO_UVINDEX: + ret = ltr390_set_mode(data, LTR390_SET_UVS_MODE); + if (ret < 0) + return ret; + + ret = ltr390_register_read(data, LTR390_UVS_DATA); + if (ret < 0) + return ret; + break; + + case IIO_LIGHT: + ret = ltr390_set_mode(data, LTR390_SET_ALS_MODE); + if (ret < 0) + return ret; + + ret = ltr390_register_read(data, LTR390_ALS_DATA); + if (ret < 0) + return ret; + break; + + default: + return -EINVAL; + } *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - *val = LTR390_WINDOW_FACTOR; - *val2 = LTR390_COUNTS_PER_UVI; - return IIO_VAL_FRACTIONAL; + switch (chan->type) { + case IIO_UVINDEX: + *val = LTR390_WINDOW_FACTOR * LTR390_FRACTIONAL_PRECISION; + *val2 = ltr390_counts_per_uvi(data); + return IIO_VAL_FRACTIONAL; + + case IIO_LIGHT: + *val = LTR390_WINDOW_FACTOR * 6 * 100; + *val2 = data->gain * data->int_time_us; + return IIO_VAL_FRACTIONAL; + + default: + return -EINVAL; + } + + case IIO_CHAN_INFO_INT_TIME: + *val = data->int_time_us; + return IIO_VAL_INT; + default: return -EINVAL; } } -static const struct iio_info ltr390_info = { - .read_raw = ltr390_read_raw, +/* integration time in us */ +static const int ltr390_int_time_map_us[] = { 400000, 200000, 100000, 50000, 25000, 12500 }; +static const int ltr390_gain_map[] = { 1, 3, 6, 9, 18 }; + +static const struct iio_chan_spec ltr390_channels[] = { + /* UV sensor */ + { + .type = IIO_UVINDEX, + .scan_index = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE) + }, + /* ALS sensor */ + { + .type = IIO_LIGHT, + .scan_index = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE) + }, }; -static const struct iio_chan_spec ltr390_channel = { - .type = IIO_UVINDEX, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) +static int ltr390_set_gain(struct ltr390_data *data, int val) +{ + int ret, idx; + + for (idx = 0; idx < ARRAY_SIZE(ltr390_gain_map); idx++) { + if (ltr390_gain_map[idx] != val) + continue; + + guard(mutex)(&data->lock); + ret = regmap_update_bits(data->regmap, + LTR390_ALS_UVS_GAIN, + LTR390_ALS_UVS_GAIN_MASK, idx); + if (ret) + return ret; + + data->gain = ltr390_gain_map[idx]; + return 0; + } + + return -EINVAL; +} + +static int ltr390_set_int_time(struct ltr390_data *data, int val) +{ + int ret, idx; + + for (idx = 0; idx < ARRAY_SIZE(ltr390_int_time_map_us); idx++) { + if (ltr390_int_time_map_us[idx] != val) + continue; + + guard(mutex)(&data->lock); + ret = regmap_update_bits(data->regmap, + LTR390_ALS_UVS_MEAS_RATE, + LTR390_ALS_UVS_INT_TIME_MASK, + LTR390_ALS_UVS_INT_TIME(idx)); + if (ret) + return ret; + + data->int_time_us = ltr390_int_time_map_us[idx]; + return 0; + } + + return -EINVAL; +} + +static int ltr390_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *length = ARRAY_SIZE(ltr390_gain_map); + *type = IIO_VAL_INT; + *vals = ltr390_gain_map; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_INT_TIME: + *length = ARRAY_SIZE(ltr390_int_time_map_us); + *type = IIO_VAL_INT; + *vals = ltr390_int_time_map_us; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int ltr390_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct ltr390_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + if (val2 != 0) + return -EINVAL; + + return ltr390_set_gain(data, val); + + case IIO_CHAN_INFO_INT_TIME: + if (val2 != 0) + return -EINVAL; + + return ltr390_set_int_time(data, val); + + default: + return -EINVAL; + } +} + +static const struct iio_info ltr390_info = { + .read_raw = ltr390_read_raw, + .write_raw = ltr390_write_raw, + .read_avail = ltr390_read_avail, }; static int ltr390_probe(struct i2c_client *client) @@ -139,11 +332,18 @@ static int ltr390_probe(struct i2c_client *client) "regmap initialization failed\n"); data->client = client; + /* default value of integration time from pg: 15 of the datasheet */ + data->int_time_us = 100000; + /* default value of gain from pg: 16 of the datasheet */ + data->gain = 3; + /* default mode for ltr390 is ALS mode */ + data->mode = LTR390_SET_ALS_MODE; + mutex_init(&data->lock); indio_dev->info = <r390_info; - indio_dev->channels = <r390_channel; - indio_dev->num_channels = 1; + indio_dev->channels = ltr390_channels; + indio_dev->num_channels = ARRAY_SIZE(ltr390_channels); indio_dev->name = "ltr390"; ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number); @@ -161,8 +361,7 @@ static int ltr390_probe(struct i2c_client *client) /* Wait for the registers to reset before proceeding */ usleep_range(1000, 2000); - ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, - LTR390_SENSOR_ENABLE | LTR390_UVS_MODE); + ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE); if (ret) return dev_err_probe(dev, ret, "failed to enable the sensor\n"); diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c index 68dc48420a886a..37eecff571b96b 100644 --- a/drivers/iio/light/ltrf216a.c +++ b/drivers/iio/light/ltrf216a.c @@ -26,7 +26,7 @@ #include -#include +#include #define LTRF216A_ALS_RESET_MASK BIT(4) #define LTRF216A_ALS_DATA_STATUS BIT(3) @@ -68,6 +68,13 @@ static const int ltrf216a_int_time_reg[][2] = { { 25, 0x40 }, }; +struct ltr_chip_info { + /* Chip contains CLEAR_DATA_0/1/2 registers at offset 0xa..0xc */ + bool has_clear_data; + /* Lux calculation multiplier for ALS data */ + int lux_multiplier; +}; + /* * Window Factor is needed when the device is under Window glass * with coated tinted ink. This is to compensate for the light loss @@ -79,6 +86,7 @@ static const int ltrf216a_int_time_reg[][2] = { struct ltrf216a_data { struct regmap *regmap; struct i2c_client *client; + const struct ltr_chip_info *info; u32 int_time; u16 int_time_fac; u8 als_gain_fac; @@ -246,7 +254,7 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data) ltrf216a_set_power_state(data, false); - lux = greendata * 45 * LTRF216A_WIN_FAC; + lux = greendata * data->info->lux_multiplier * LTRF216A_WIN_FAC; return lux; } @@ -334,15 +342,15 @@ static const struct iio_info ltrf216a_info = { static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg) { + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct ltrf216a_data *data = iio_priv(indio_dev); + switch (reg) { case LTRF216A_MAIN_CTRL: case LTRF216A_ALS_MEAS_RES: case LTRF216A_ALS_GAIN: case LTRF216A_PART_ID: case LTRF216A_MAIN_STATUS: - case LTRF216A_ALS_CLEAR_DATA_0: - case LTRF216A_ALS_CLEAR_DATA_1: - case LTRF216A_ALS_CLEAR_DATA_2: case LTRF216A_ALS_DATA_0: case LTRF216A_ALS_DATA_1: case LTRF216A_ALS_DATA_2: @@ -355,6 +363,10 @@ static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg) case LTRF216A_ALS_THRES_LOW_1: case LTRF216A_ALS_THRES_LOW_2: return true; + case LTRF216A_ALS_CLEAR_DATA_0: + case LTRF216A_ALS_CLEAR_DATA_1: + case LTRF216A_ALS_CLEAR_DATA_2: + return data->info->has_clear_data; default: return false; } @@ -382,15 +394,23 @@ static bool ltrf216a_writable_reg(struct device *dev, unsigned int reg) static bool ltrf216a_volatile_reg(struct device *dev, unsigned int reg) { + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct ltrf216a_data *data = iio_priv(indio_dev); + switch (reg) { case LTRF216A_MAIN_STATUS: - case LTRF216A_ALS_CLEAR_DATA_0: - case LTRF216A_ALS_CLEAR_DATA_1: - case LTRF216A_ALS_CLEAR_DATA_2: case LTRF216A_ALS_DATA_0: case LTRF216A_ALS_DATA_1: case LTRF216A_ALS_DATA_2: return true; + /* + * If these registers are not present on a chip (like LTR-308), + * the missing registers are not considered volatile. + */ + case LTRF216A_ALS_CLEAR_DATA_0: + case LTRF216A_ALS_CLEAR_DATA_1: + case LTRF216A_ALS_CLEAR_DATA_2: + return data->info->has_clear_data; default: return false; } @@ -433,6 +453,7 @@ static int ltrf216a_probe(struct i2c_client *client) i2c_set_clientdata(client, indio_dev); data->client = client; + data->info = i2c_get_match_data(client); mutex_init(&data->lock); @@ -520,15 +541,27 @@ static int ltrf216a_runtime_resume(struct device *dev) static DEFINE_RUNTIME_DEV_PM_OPS(ltrf216a_pm_ops, ltrf216a_runtime_suspend, ltrf216a_runtime_resume, NULL); +static const struct ltr_chip_info ltr308_chip_info = { + .has_clear_data = false, + .lux_multiplier = 60, +}; + +static const struct ltr_chip_info ltrf216a_chip_info = { + .has_clear_data = true, + .lux_multiplier = 45, +}; + static const struct i2c_device_id ltrf216a_id[] = { - { "ltrf216a" }, + { "ltr308", .driver_data = (kernel_ulong_t)<r308_chip_info }, + { "ltrf216a", .driver_data = (kernel_ulong_t)<rf216a_chip_info }, {} }; MODULE_DEVICE_TABLE(i2c, ltrf216a_id); static const struct of_device_id ltrf216a_of_match[] = { - { .compatible = "liteon,ltrf216a" }, - { .compatible = "ltr,ltrf216a" }, + { .compatible = "liteon,ltr308", .data = <r308_chip_info }, + { .compatible = "liteon,ltrf216a", .data = <rf216a_chip_info }, + { .compatible = "ltr,ltrf216a", .data = <rf216a_chip_info }, {} }; MODULE_DEVICE_TABLE(of, ltrf216a_of_match); diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c index 596cc48c4c3415..25f63da70297e1 100644 --- a/drivers/iio/light/noa1305.c +++ b/drivers/iio/light/noa1305.c @@ -29,6 +29,7 @@ #define NOA1305_INTEGR_TIME_25MS 0x05 #define NOA1305_INTEGR_TIME_12_5MS 0x06 #define NOA1305_INTEGR_TIME_6_25MS 0x07 +#define NOA1305_INTEGR_TIME_MASK 0x07 #define NOA1305_REG_INT_SELECT 0x3 #define NOA1305_INT_SEL_ACTIVE_HIGH 0x01 #define NOA1305_INT_SEL_ACTIVE_LOW 0x02 @@ -43,12 +44,34 @@ #define NOA1305_DEVICE_ID 0x0519 #define NOA1305_DRIVER_NAME "noa1305" +static int noa1305_scale_available[] = { + 100, 8 * 77, /* 800 ms */ + 100, 4 * 77, /* 400 ms */ + 100, 2 * 77, /* 200 ms */ + 100, 1 * 77, /* 100 ms */ + 1000, 5 * 77, /* 50 ms */ + 10000, 25 * 77, /* 25 ms */ + 100000, 125 * 77, /* 12.5 ms */ + 1000000, 625 * 77, /* 6.25 ms */ +}; + +static int noa1305_int_time_available[] = { + 0, 800000, /* 800 ms */ + 0, 400000, /* 400 ms */ + 0, 200000, /* 200 ms */ + 0, 100000, /* 100 ms */ + 0, 50000, /* 50 ms */ + 0, 25000, /* 25 ms */ + 0, 12500, /* 12.5 ms */ + 0, 6250, /* 6.25 ms */ +}; + struct noa1305_priv { struct i2c_client *client; struct regmap *regmap; }; -static int noa1305_measure(struct noa1305_priv *priv) +static int noa1305_measure(struct noa1305_priv *priv, int *val) { __le16 data; int ret; @@ -58,7 +81,9 @@ static int noa1305_measure(struct noa1305_priv *priv) if (ret < 0) return ret; - return le16_to_cpu(data); + *val = le16_to_cpu(data); + + return IIO_VAL_INT; } static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2) @@ -76,91 +101,113 @@ static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2) * Integration Constant = 7.7 * Integration Time in Seconds */ - switch (data) { - case NOA1305_INTEGR_TIME_800MS: - *val = 100; - *val2 = 77 * 8; - break; - case NOA1305_INTEGR_TIME_400MS: - *val = 100; - *val2 = 77 * 4; - break; - case NOA1305_INTEGR_TIME_200MS: - *val = 100; - *val2 = 77 * 2; - break; - case NOA1305_INTEGR_TIME_100MS: - *val = 100; - *val2 = 77; - break; - case NOA1305_INTEGR_TIME_50MS: - *val = 1000; - *val2 = 77 * 5; - break; - case NOA1305_INTEGR_TIME_25MS: - *val = 10000; - *val2 = 77 * 25; - break; - case NOA1305_INTEGR_TIME_12_5MS: - *val = 100000; - *val2 = 77 * 125; - break; - case NOA1305_INTEGR_TIME_6_25MS: - *val = 1000000; - *val2 = 77 * 625; - break; - default: - return -EINVAL; - } + data &= NOA1305_INTEGR_TIME_MASK; + *val = noa1305_scale_available[2 * data + 0]; + *val2 = noa1305_scale_available[2 * data + 1]; return IIO_VAL_FRACTIONAL; } +static int noa1305_int_time(struct noa1305_priv *priv, int *val, int *val2) +{ + int data; + int ret; + + ret = regmap_read(priv->regmap, NOA1305_REG_INTEGRATION_TIME, &data); + if (ret < 0) + return ret; + + data &= NOA1305_INTEGR_TIME_MASK; + *val = noa1305_int_time_available[2 * data + 0]; + *val2 = noa1305_int_time_available[2 * data + 1]; + + return IIO_VAL_INT_PLUS_MICRO; +} + static const struct iio_chan_spec noa1305_channels[] = { { .type = IIO_LIGHT, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), } }; +static int noa1305_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, + int *length, long mask) +{ + if (chan->type != IIO_LIGHT) + return -EINVAL; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *vals = noa1305_scale_available; + *length = ARRAY_SIZE(noa1305_scale_available); + *type = IIO_VAL_FRACTIONAL; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_INT_TIME: + *vals = noa1305_int_time_available; + *length = ARRAY_SIZE(noa1305_int_time_available); + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + static int noa1305_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, int *val2, long mask) + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) { - int ret = -EINVAL; struct noa1305_priv *priv = iio_priv(indio_dev); + if (chan->type != IIO_LIGHT) + return -EINVAL; + switch (mask) { case IIO_CHAN_INFO_RAW: - switch (chan->type) { - case IIO_LIGHT: - ret = noa1305_measure(priv); - if (ret < 0) - return ret; - *val = ret; - return IIO_VAL_INT; - default: - break; - } - break; + return noa1305_measure(priv, val); case IIO_CHAN_INFO_SCALE: - switch (chan->type) { - case IIO_LIGHT: - return noa1305_scale(priv, val, val2); - default: - break; - } - break; + return noa1305_scale(priv, val, val2); + case IIO_CHAN_INFO_INT_TIME: + return noa1305_int_time(priv, val, val2); default: - break; + return -EINVAL; } +} - return ret; +static int noa1305_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct noa1305_priv *priv = iio_priv(indio_dev); + int i; + + if (chan->type != IIO_LIGHT) + return -EINVAL; + + if (mask != IIO_CHAN_INFO_INT_TIME) + return -EINVAL; + + if (val) /* >= 1s integration time not supported */ + return -EINVAL; + + /* Look up integration time register settings and write it if found. */ + for (i = 0; i < ARRAY_SIZE(noa1305_int_time_available) / 2; i++) + if (noa1305_int_time_available[2 * i + 1] == val2) + return regmap_write(priv->regmap, NOA1305_REG_INTEGRATION_TIME, i); + + return -EINVAL; } static const struct iio_info noa1305_info = { + .read_avail = noa1305_read_avail, .read_raw = noa1305_read_raw, + .write_raw = noa1305_write_raw, }; static bool noa1305_writable_reg(struct device *dev, unsigned int reg) diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c index 4937bf6fa04673..76711c3cdf7c02 100644 --- a/drivers/iio/light/rohm-bu27034.c +++ b/drivers/iio/light/rohm-bu27034.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * BU27034 ROHM Ambient Light Sensor + * BU27034ANUC ROHM Ambient Light Sensor * * Copyright (c) 2023, ROHM Semiconductor. - * https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf */ #include @@ -30,17 +29,15 @@ #define BU27034_REG_MODE_CONTROL2 0x42 #define BU27034_MASK_D01_GAIN GENMASK(7, 3) -#define BU27034_MASK_D2_GAIN_HI GENMASK(7, 6) -#define BU27034_MASK_D2_GAIN_LO GENMASK(2, 0) #define BU27034_REG_MODE_CONTROL3 0x43 #define BU27034_REG_MODE_CONTROL4 0x44 #define BU27034_MASK_MEAS_EN BIT(0) #define BU27034_MASK_VALID BIT(7) +#define BU27034_NUM_HW_DATA_CHANS 2 #define BU27034_REG_DATA0_LO 0x50 #define BU27034_REG_DATA1_LO 0x52 -#define BU27034_REG_DATA2_LO 0x54 -#define BU27034_REG_DATA2_HI 0x55 +#define BU27034_REG_DATA1_HI 0x53 #define BU27034_REG_MANUFACTURER_ID 0x92 #define BU27034_REG_MAX BU27034_REG_MANUFACTURER_ID @@ -88,58 +85,48 @@ enum { BU27034_CHAN_ALS, BU27034_CHAN_DATA0, BU27034_CHAN_DATA1, - BU27034_CHAN_DATA2, BU27034_NUM_CHANS }; static const unsigned long bu27034_scan_masks[] = { - GENMASK(BU27034_CHAN_DATA2, BU27034_CHAN_ALS), 0 + GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_DATA0), + GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_ALS), 0 }; /* - * Available scales with gain 1x - 4096x, timings 55, 100, 200, 400 mS + * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS * Time impacts to gain: 1x, 2x, 4x, 8x. * - * => Max total gain is HWGAIN * gain by integration time (8 * 4096) = 32768 + * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192 + * if 1x gain is scale 1, scale for 2x gain is 0.5, 4x => 0.25, + * ... 8192x => 0.0001220703125 => 122070.3125 nanos * - * Using NANO precision for scale we must use scale 64x corresponding gain 1x - * to avoid precision loss. (32x would result scale 976 562.5(nanos). + * Using NANO precision for scale, we must use scale 16x corresponding gain 1x + * to avoid precision loss. (8x would result scale 976 562.5(nanos). */ -#define BU27034_SCALE_1X 64 +#define BU27034_SCALE_1X 16 /* See the data sheet for the "Gain Setting" table */ #define BU27034_GSEL_1X 0x00 /* 00000 */ #define BU27034_GSEL_4X 0x08 /* 01000 */ -#define BU27034_GSEL_16X 0x0a /* 01010 */ #define BU27034_GSEL_32X 0x0b /* 01011 */ -#define BU27034_GSEL_64X 0x0c /* 01100 */ #define BU27034_GSEL_256X 0x18 /* 11000 */ #define BU27034_GSEL_512X 0x19 /* 11001 */ #define BU27034_GSEL_1024X 0x1a /* 11010 */ -#define BU27034_GSEL_2048X 0x1b /* 11011 */ -#define BU27034_GSEL_4096X 0x1c /* 11100 */ /* Available gain settings */ static const struct iio_gain_sel_pair bu27034_gains[] = { GAIN_SCALE_GAIN(1, BU27034_GSEL_1X), GAIN_SCALE_GAIN(4, BU27034_GSEL_4X), - GAIN_SCALE_GAIN(16, BU27034_GSEL_16X), GAIN_SCALE_GAIN(32, BU27034_GSEL_32X), - GAIN_SCALE_GAIN(64, BU27034_GSEL_64X), GAIN_SCALE_GAIN(256, BU27034_GSEL_256X), GAIN_SCALE_GAIN(512, BU27034_GSEL_512X), GAIN_SCALE_GAIN(1024, BU27034_GSEL_1024X), - GAIN_SCALE_GAIN(2048, BU27034_GSEL_2048X), - GAIN_SCALE_GAIN(4096, BU27034_GSEL_4096X), }; /* - * The IC has 5 modes for sampling time. 5 mS mode is exceptional as it limits - * the data collection to data0-channel only and cuts the supported range to - * 10 bit. It is not supported by the driver. - * - * "normal" modes are 55, 100, 200 and 400 mS modes - which do have direct - * multiplying impact to the register values (similar to gain). + * Measurement modes are 55, 100, 200 and 400 mS modes - which do have direct + * multiplying impact to the data register values (similar to gain). * * This means that if meas-mode is changed for example from 400 => 200, * the scale is doubled. Eg, time impact to total gain is x1, x2, x4, x8. @@ -156,13 +143,13 @@ static const struct iio_itime_sel_mul bu27034_itimes[] = { GAIN_SCALE_ITIME_US(55000, BU27034_MEAS_MODE_55MS, 1), }; -#define BU27034_CHAN_DATA(_name, _ch2) \ +#define BU27034_CHAN_DATA(_name) \ { \ .type = IIO_INTENSITY, \ .channel = BU27034_CHAN_##_name, \ - .channel2 = (_ch2), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_SCALE), \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_HARDWAREGAIN), \ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \ .info_mask_shared_by_all_available = \ @@ -195,13 +182,12 @@ static const struct iio_chan_spec bu27034_channels[] = { /* * The BU27034 DATA0 and DATA1 channels are both on the visible light * area (mostly). The data0 sensitivity peaks at 500nm, DATA1 at 600nm. - * These wave lengths are pretty much on the border of colours making - * these a poor candidates for R/G/B standardization. Hence they're both - * marked as clear channels + * These wave lengths are cyan(ish) and orange(ish), making these + * sub-optiomal candidates for R/G/B standardization. Hence the + * colour modifier is omitted. */ - BU27034_CHAN_DATA(DATA0, IIO_MOD_LIGHT_CLEAR), - BU27034_CHAN_DATA(DATA1, IIO_MOD_LIGHT_CLEAR), - BU27034_CHAN_DATA(DATA2, IIO_MOD_LIGHT_IR), + BU27034_CHAN_DATA(DATA0), + BU27034_CHAN_DATA(DATA1), IIO_CHAN_SOFT_TIMESTAMP(4), }; @@ -215,10 +201,10 @@ struct bu27034_data { struct mutex mutex; struct iio_gts gts; struct task_struct *task; - __le16 raw[3]; + __le16 raw[BU27034_NUM_HW_DATA_CHANS]; struct { u32 mlux; - __le16 channels[3]; + __le16 channels[BU27034_NUM_HW_DATA_CHANS]; s64 ts __aligned(8); } scan; }; @@ -232,7 +218,7 @@ static const struct regmap_range bu27034_volatile_ranges[] = { .range_max = BU27034_REG_MODE_CONTROL4, }, { .range_min = BU27034_REG_DATA0_LO, - .range_max = BU27034_REG_DATA2_HI, + .range_max = BU27034_REG_DATA1_HI, }, }; @@ -244,7 +230,7 @@ static const struct regmap_access_table bu27034_volatile_regs = { static const struct regmap_range bu27034_read_only_ranges[] = { { .range_min = BU27034_REG_DATA0_LO, - .range_max = BU27034_REG_DATA2_HI, + .range_max = BU27034_REG_DATA1_HI, }, { .range_min = BU27034_REG_MANUFACTURER_ID, .range_max = BU27034_REG_MANUFACTURER_ID, @@ -273,41 +259,17 @@ struct bu27034_gain_check { static int bu27034_get_gain_sel(struct bu27034_data *data, int chan) { + int reg[] = { + [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2, + [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3, + }; int ret, val; - switch (chan) { - case BU27034_CHAN_DATA0: - case BU27034_CHAN_DATA1: - { - int reg[] = { - [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2, - [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3, - }; - ret = regmap_read(data->regmap, reg[chan], &val); - if (ret) - return ret; - - return FIELD_GET(BU27034_MASK_D01_GAIN, val); - } - case BU27034_CHAN_DATA2: - { - int d2_lo_bits = fls(BU27034_MASK_D2_GAIN_LO); - - ret = regmap_read(data->regmap, BU27034_REG_MODE_CONTROL2, &val); - if (ret) - return ret; + ret = regmap_read(data->regmap, reg[chan], &val); + if (ret) + return ret; - /* - * The data2 channel gain is composed by 5 non continuous bits - * [7:6], [2:0]. Thus when we combine the 5-bit 'selector' - * from register value we must right shift the high bits by 3. - */ - return FIELD_GET(BU27034_MASK_D2_GAIN_HI, val) << d2_lo_bits | - FIELD_GET(BU27034_MASK_D2_GAIN_LO, val); - } - default: - return -EINVAL; - } + return FIELD_GET(BU27034_MASK_D01_GAIN, val); } static int bu27034_get_gain(struct bu27034_data *data, int chan, int *gain) @@ -390,44 +352,9 @@ static int bu27034_write_gain_sel(struct bu27034_data *data, int chan, int sel) }; int mask, val; - if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1) - return -EINVAL; - val = FIELD_PREP(BU27034_MASK_D01_GAIN, sel); - mask = BU27034_MASK_D01_GAIN; - if (chan == BU27034_CHAN_DATA0) { - /* - * We keep the same gain for channel 2 as we set for channel 0 - * We can't allow them to be individually controlled because - * setting one will impact also the other. Also, if we don't - * always update both gains we may result unsupported bit - * combinations. - * - * This is not nice but this is yet another place where the - * user space must be prepared to surprizes. Namely, see chan 2 - * gain changed when chan 0 gain is changed. - * - * This is not fatal for most users though. I don't expect the - * channel 2 to be used in any generic cases - the intensity - * values provided by the sensor for IR area are not openly - * documented. Also, channel 2 is not used for visible light. - * - * So, if there is application which is written to utilize the - * channel 2 - then it is probably specifically targeted to this - * sensor and knows how to utilize those values. It is safe to - * hope such user can also cope with the gain changes. - */ - mask |= BU27034_MASK_D2_GAIN_LO; - - /* - * The D2 gain bits are directly the lowest bits of selector. - * Just do add those bits to the value - */ - val |= sel & BU27034_MASK_D2_GAIN_LO; - } - return regmap_update_bits(data->regmap, reg[chan], mask, val); } @@ -435,13 +362,6 @@ static int bu27034_set_gain(struct bu27034_data *data, int chan, int gain) { int ret; - /* - * We don't allow setting channel 2 gain as it messes up the - * gain for channel 0 - which shares the high bits - */ - if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1) - return -EINVAL; - ret = iio_gts_find_sel_by_gain(&data->gts, gain); if (ret < 0) return ret; @@ -565,9 +485,6 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, int ret, time_sel, gain_sel, i; bool found = false; - if (chan == BU27034_CHAN_DATA2) - return -EINVAL; - if (chan == BU27034_CHAN_ALS) { if (val == 0 && val2 == 1000000) return 0; @@ -592,9 +509,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, /* * Populate information for the other channel which should also - * maintain the scale. (Due to the HW limitations the chan2 - * gets the same gain as chan0, so we only need to explicitly - * set the chan 0 and 1). + * maintain the scale. */ if (chan == BU27034_CHAN_DATA0) gain.chan = BU27034_CHAN_DATA1; @@ -608,7 +523,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, /* * Iterate through all the times to see if we find one which * can support requested scale for requested channel, while - * maintaining the scale for other channels + * maintaining the scale for the other channel */ for (i = 0; i < data->gts.num_itime; i++) { new_time_sel = data->gts.itime_table[i].sel; @@ -623,7 +538,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, if (ret) continue; - /* Can the other channel(s) maintain scale? */ + /* Can the other channel maintain scale? */ ret = iio_gts_find_new_gain_sel_by_old_gain_time( &data->gts, gain.old_gain, time_sel, new_time_sel, &gain.new_gain); @@ -635,7 +550,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, } if (!found) { dev_dbg(data->dev, - "Can't set scale maintaining other channels\n"); + "Can't set scale maintaining other channel\n"); ret = -EINVAL; goto unlock_out; @@ -659,102 +574,21 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, } /* - * for (D1/D0 < 0.87): - * lx = 0.004521097 * D1 - 0.002663996 * D0 + - * 0.00012213 * D1 * D1 / D0 - * - * => 115.7400832 * ch1 / gain1 / mt - - * 68.1982976 * ch0 / gain0 / mt + - * 0.00012213 * 25600 * (ch1 / gain1 / mt) * 25600 * - * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt) - * - * A = 0.00012213 * 25600 * (ch1 /gain1 / mt) * 25600 * - * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt) - * => 0.00012213 * 25600 * (ch1 /gain1 / mt) * - * (ch1 /gain1 / mt) / (ch0 / gain0 / mt) - * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) / - * (ch0 / gain0) - * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) * - * gain0 / ch0 - * => 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt /ch0 - * - * lx = (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) / - * mt + A - * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) / - * mt + 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt / - * ch0 - * - * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0 + - * 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) / - * mt - * - * For (0.87 <= D1/D0 < 1.00) - * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 0.87) * (0.385) + 1) - * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 * - * 100 * ch1 / gain1 / mt) * ((D1/D0 - 0.87) * (0.385) + 1) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * ((D1/D0 - 0.87) * (0.385) + 1) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * (0.385 * D1/D0 - 0.66505) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * (0.385 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) - 0.66505) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * (9856 * ch1 / gain1 / mt / (25600 * ch0 / gain0 / mt) + 0.66505) - * => 13.118336 * ch1 / (gain1 * mt) - * + 22.66064768 * ch0 / (gain0 * mt) - * + 8931.90144 * ch1 * ch1 * gain0 / - * (25600 * ch0 * gain1 * gain1 * mt) - * + 0.602694912 * ch1 / (gain1 * mt) - * - * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) - * + 22.66064768 * ch0 / gain0 - * + 13.721030912 * ch1 / gain1 - * ] / mt - * - * For (D1/D0 >= 1.00) + * for (D1/D0 < 1.5): + * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1/D0 – 1.5) * (0.25) + 1) * - * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 2.0) * (-0.05) + 1) - * => (0.001331* D0 + 0.0000354 * D1) * (-0.05D1/D0 + 1.1) - * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 * - * 100 * ch1 / gain1 / mt) * (-0.05D1/D0 + 1.1) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * (-0.05 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) + 1.1) - * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) * - * (-1280 * ch1 / (gain1 * mt * 25600 * ch0 / gain0 / mt) + 1.1) - * => (34.0736 * ch0 * -1280 * ch1 * gain0 * mt /( gain0 * mt * gain1 * mt * 25600 * ch0) - * + 34.0736 * 1.1 * ch0 / (gain0 * mt) - * + 0.90624 * ch1 * -1280 * ch1 *gain0 * mt / (gain1 * mt *gain1 * mt * 25600 * ch0) - * + 1.1 * 0.90624 * ch1 / (gain1 * mt) - * => -43614.208 * ch1 / (gain1 * mt * 25600) - * + 37.48096 ch0 / (gain0 * mt) - * - 1159.9872 * ch1 * ch1 * gain0 / (gain1 * gain1 * mt * 25600 * ch0) - * + 0.996864 ch1 / (gain1 * mt) - * => [ - * - 0.045312 * ch1 * ch1 * gain0 / (gain1 * gain1 * ch0) - * - 0.706816 * ch1 / gain1 - * + 37.48096 ch0 /gain0 - * ] * mt + * => -0.000745625 * D0 + 0.0002515625 * D1 + -0.000018675 * D1 * D1 / D0 * + * => (6.44 * ch1 / gain1 + 19.088 * ch0 / gain0 - + * 0.47808 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) / + * mt * - * So, the first case (D1/D0 < 0.87) can be computed to a form: + * Else + * lx = 0.001193 * D0 - 0.0000747 * D1 * - * lx = (3.126528 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) + - * 115.7400832 * ch1 / gain1 + - * -68.1982976 * ch0 / gain0 - * / mt - * - * Second case (0.87 <= D1/D0 < 1.00) goes to form: - * - * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) + - * 13.721030912 * ch1 / gain1 + - * 22.66064768 * ch0 / gain0 - * ] / mt - * - * Third case (D1/D0 >= 1.00) goes to form: - * => [-0.045312 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) + - * -0.706816 * ch1 / gain1 + - * 37.48096 ch0 /(gain0 - * ] / mt + * => (1.91232 * ch1 / gain1 + 30.5408 * ch0 / gain0 + + * [0 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0] ) / + * mt * * This can be unified to format: * lx = [ @@ -764,19 +598,14 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan, * ] / mt * * For case 1: - * A = 3.126528, - * B = 115.7400832 - * C = -68.1982976 + * A = -0.47808, + * B = 6.44, + * C = 19.088 * * For case 2: - * A = 0.3489024 - * B = 13.721030912 - * C = 22.66064768 - * - * For case 3: - * A = -0.045312 - * B = -0.706816 - * C = 37.48096 + * A = 0 + * B = 1.91232 + * C = 30.5408 */ struct bu27034_lx_coeff { @@ -881,21 +710,16 @@ static int bu27034_fixp_calc_lx(unsigned int ch0, unsigned int ch1, { static const struct bu27034_lx_coeff coeff[] = { { - .A = 31265280, /* 3.126528 */ - .B = 1157400832, /*115.7400832 */ - .C = 681982976, /* -68.1982976 */ - .is_neg = {false, false, true}, + .A = 4780800, /* -0.47808 */ + .B = 64400000, /* 6.44 */ + .C = 190880000, /* 19.088 */ + .is_neg = { true, false, false }, }, { - .A = 3489024, /* 0.3489024 */ - .B = 137210309, /* 13.721030912 */ - .C = 226606476, /* 22.66064768 */ + .A = 0, /* 0 */ + .B = 19123200, /* 1.91232 */ + .C = 305408000, /* 30.5408 */ /* All terms positive */ - }, { - .A = 453120, /* -0.045312 */ - .B = 7068160, /* -0.706816 */ - .C = 374809600, /* 37.48096 */ - .is_neg = {true, true, false}, - } + }, }; const struct bu27034_lx_coeff *c = &coeff[coeff_idx]; u64 res = 0, terms[3]; @@ -967,7 +791,6 @@ static int bu27034_read_result(struct bu27034_data *data, int chan, int *res) int reg[] = { [BU27034_CHAN_DATA0] = BU27034_REG_DATA0_LO, [BU27034_CHAN_DATA1] = BU27034_REG_DATA1_LO, - [BU27034_CHAN_DATA2] = BU27034_REG_DATA2_LO, }; int valid, ret; __le16 val; @@ -1034,7 +857,7 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan, { int ret; - if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA2) + if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA1) return -EINVAL; ret = bu27034_meas_set(data, true); @@ -1059,12 +882,10 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan, * D1 = data1/ch1_gain/meas_time_ms * 25600 * * Then: - * if (D1/D0 < 0.87) - * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 3.45 + 1) - * else if (D1/D0 < 1) - * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 0.385 + 1) - * else - * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 2) * -0.05 + 1) + * If (D1/D0 < 1.5) + * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1 / D0 – 1.5) * 0.25 + 1) + * Else + * lx = (0.001193 * D0 + (-0.0000747) * D1) * * We use it here. Users who have for example some colored lens * need to modify the calculation but I hope this gives a starting point for @@ -1115,12 +936,10 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val) d1_d0_ratio_scaled /= ch0 * gain1; } - if (d1_d0_ratio_scaled < 87) + if (d1_d0_ratio_scaled < 150) ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 0); - else if (d1_d0_ratio_scaled < 100) - ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1); else - ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 2); + ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1); if (ret < 0) return ret; @@ -1133,7 +952,7 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val) static int bu27034_get_mlux(struct bu27034_data *data, int chan, int *val) { - __le16 res[3]; + __le16 res[BU27034_NUM_HW_DATA_CHANS]; int ret; ret = bu27034_meas_set(data, true); @@ -1171,6 +990,13 @@ static int bu27034_read_raw(struct iio_dev *idev, return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_HARDWAREGAIN: + ret = bu27034_get_gain(data, chan->channel, val); + if (ret) + return ret; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: return bu27034_get_scale(data, chan->channel, val, val2); @@ -1215,12 +1041,17 @@ static int bu27034_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask) { + struct bu27034_data *data = iio_priv(indio_dev); switch (mask) { case IIO_CHAN_INFO_SCALE: return IIO_VAL_INT_PLUS_NANO; case IIO_CHAN_INFO_INT_TIME: return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_HARDWAREGAIN: + dev_dbg(data->dev, + "HARDWAREGAIN is read-only, use scale to set\n"); + return -EINVAL; default: return -EINVAL; } @@ -1501,7 +1332,7 @@ static int bu27034_probe(struct i2c_client *i2c) } static const struct of_device_id bu27034_of_match[] = { - { .compatible = "rohm,bu27034" }, + { .compatible = "rohm,bu27034anuc" }, { } }; MODULE_DEVICE_TABLE(of, bu27034_of_match); diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c index eeff6cc792f222..44fa152dbd24c2 100644 --- a/drivers/iio/light/si1133.c +++ b/drivers/iio/light/si1133.c @@ -17,7 +17,7 @@ #include -#include +#include #define SI1133_REG_PART_ID 0x00 #define SI1133_REG_REV_ID 0x01 diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index 77666b780a5c5a..66abda021696a6 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -465,11 +465,10 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) goto done; } - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { int run = 1; - while (i + run < indio_dev->masklength) { + while (i + run < iio_get_masklength(indio_dev)) { if (!test_bit(i + run, indio_dev->active_scan_mask)) break; if (indio_dev->channels[i + run].address != @@ -514,7 +513,7 @@ static int si1145_set_chlist(struct iio_dev *indio_dev, unsigned long scan_mask) if (data->scan_mask == scan_mask) return 0; - for_each_set_bit(i, &scan_mask, indio_dev->masklength) { + for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) { switch (indio_dev->channels[i].address) { case SI1145_REG_ALSVIS_DATA: reg |= SI1145_CHLIST_EN_ALSVIS; diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index e3470d6743effd..ed20b67145463d 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -35,6 +35,7 @@ #define STK3310_STATE_EN_ALS BIT(1) #define STK3310_STATE_STANDBY 0x00 +#define STK3013_CHIP_ID_VAL 0x31 #define STK3310_CHIP_ID_VAL 0x13 #define STK3311_CHIP_ID_VAL 0x1D #define STK3311A_CHIP_ID_VAL 0x15 @@ -84,6 +85,7 @@ static const struct reg_field stk3310_reg_field_flag_nf = REG_FIELD(STK3310_REG_FLAG, 0, 0); static const u8 stk3310_chip_ids[] = { + STK3013_CHIP_ID_VAL, STK3310_CHIP_ID_VAL, STK3311A_CHIP_ID_VAL, STK3311S34_CHIP_ID_VAL, @@ -496,7 +498,7 @@ static int stk3310_init(struct iio_dev *indio_dev) ret = stk3310_check_chip_id(chipid); if (ret < 0) - dev_warn(&client->dev, "unknown chip id: 0x%x\n", chipid); + dev_info(&client->dev, "new unknown chip id: 0x%x\n", chipid); state = STK3310_STATE_EN_ALS | STK3310_STATE_EN_PS; ret = stk3310_set_state(data, state); @@ -700,6 +702,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk3310_pm_ops, stk3310_suspend, stk3310_resume); static const struct i2c_device_id stk3310_i2c_id[] = { + { "STK3013" }, { "STK3310" }, { "STK3311" }, { "STK3335" }, @@ -708,6 +711,7 @@ static const struct i2c_device_id stk3310_i2c_id[] = { MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id); static const struct acpi_device_id stk3310_acpi_id[] = { + {"STK3013", 0}, {"STK3310", 0}, {"STK3311", 0}, {} @@ -716,6 +720,7 @@ static const struct acpi_device_id stk3310_acpi_id[] = { MODULE_DEVICE_TABLE(acpi, stk3310_acpi_id); static const struct of_device_id stk3310_of_match[] = { + { .compatible = "sensortek,stk3013", }, { .compatible = "sensortek,stk3310", }, { .compatible = "sensortek,stk3311", }, { .compatible = "sensortek,stk3335", }, diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index c9566615b96441..4fecdf10aeb197 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -206,8 +206,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p) struct tcs3414_data *data = iio_priv(indio_dev); int i, j = 0; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { int ret = i2c_smbus_read_word_data(data->client, TCS3414_DATA_GREEN + 2*i); if (ret < 0) diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c index 89384dba83dd11..04452b4664f306 100644 --- a/drivers/iio/light/tcs3472.c +++ b/drivers/iio/light/tcs3472.c @@ -383,8 +383,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p) if (ret < 0) goto done; - for_each_set_bit(i, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, i) { ret = i2c_smbus_read_word_data(data->client, TCS3472_CDATA + 2*i); if (ret < 0) diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c index 7bdbfe72f0f041..850c2465992fa0 100644 --- a/drivers/iio/light/tsl2591.c +++ b/drivers/iio/light/tsl2591.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c index 327f94e447af49..604be60e92acae 100644 --- a/drivers/iio/light/zopt2201.c +++ b/drivers/iio/light/zopt2201.c @@ -19,7 +19,7 @@ #include #include -#include +#include #define ZOPT2201_DRV_NAME "zopt2201" diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index cd2917d719047b..8eb718f5e50f35 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -39,7 +39,7 @@ config AK8975 select IIO_TRIGGERED_BUFFER help Say yes here to build support for Asahi Kasei AK8975, AK8963, - AK09911, AK09912 or AK09916 3-Axis Magnetometer. + AK09911, AK09912, AK09916 or AK09918 3-Axis Magnetometer. To compile this driver as a module, choose M here: the module will be called ak8975. diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index dd466c5fa6214f..18077fb463a912 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -78,6 +78,7 @@ */ #define AK09912_REG_WIA1 0x00 #define AK09912_REG_WIA2 0x01 +#define AK09918_DEVICE_ID 0x0C #define AK09916_DEVICE_ID 0x09 #define AK09912_DEVICE_ID 0x04 #define AK09911_DEVICE_ID 0x05 @@ -209,6 +210,7 @@ enum asahi_compass_chipset { AK09911, AK09912, AK09916, + AK09918, }; enum ak_ctrl_reg_addr { @@ -371,6 +373,34 @@ static const struct ak_def ak_def_array[] = { AK09912_REG_HXL, AK09912_REG_HYL, AK09912_REG_HZL}, + }, + [AK09918] = { + /* ak09918 is register compatible with ak09912 this is for avoid + * unknown id messages. + */ + .type = AK09918, + .raw_to_gauss = ak09912_raw_to_gauss, + .range = 32752, + .ctrl_regs = { + AK09912_REG_ST1, + AK09912_REG_ST2, + AK09912_REG_CNTL2, + AK09912_REG_ASAX, + AK09912_MAX_REGS}, + .ctrl_masks = { + AK09912_REG_ST1_DRDY_MASK, + AK09912_REG_ST2_HOFL_MASK, + 0, + AK09912_REG_CNTL2_MODE_MASK}, + .ctrl_modes = { + AK09912_REG_CNTL_MODE_POWER_DOWN, + AK09912_REG_CNTL_MODE_ONCE, + AK09912_REG_CNTL_MODE_SELF_TEST, + AK09912_REG_CNTL_MODE_FUSE_ROM}, + .data_regs = { + AK09912_REG_HXL, + AK09912_REG_HYL, + AK09912_REG_HZL}, } }; @@ -452,6 +482,7 @@ static int ak8975_who_i_am(struct i2c_client *client, /* * Signature for each device: * Device | WIA1 | WIA2 + * AK09918 | DEVICE_ID_| AK09918_DEVICE_ID * AK09916 | DEVICE_ID_| AK09916_DEVICE_ID * AK09912 | DEVICE_ID | AK09912_DEVICE_ID * AK09911 | DEVICE_ID | AK09911_DEVICE_ID @@ -484,10 +515,18 @@ static int ak8975_who_i_am(struct i2c_client *client, if (wia_val[1] == AK09916_DEVICE_ID) return 0; break; - default: - dev_err(&client->dev, "Type %d unknown\n", type); + case AK09918: + if (wia_val[1] == AK09918_DEVICE_ID) + return 0; + break; } - return -ENODEV; + + dev_info(&client->dev, "Device ID %x is unknown.\n", wia_val[1]); + /* + * Let driver to probe on unknown id for support more register + * compatible variants. + */ + return 0; } /* @@ -692,22 +731,8 @@ static int ak8975_start_read_axis(struct ak8975_data *data, if (ret < 0) return ret; - /* This will be executed only for non-interrupt based waiting case */ - if (ret & data->def->ctrl_masks[ST1_DRDY]) { - ret = i2c_smbus_read_byte_data(client, - data->def->ctrl_regs[ST2]); - if (ret < 0) { - dev_err(&client->dev, "Error in reading ST2\n"); - return ret; - } - if (ret & (data->def->ctrl_masks[ST2_DERR] | - data->def->ctrl_masks[ST2_HOFL])) { - dev_err(&client->dev, "ST2 status error 0x%x\n", ret); - return -EINVAL; - } - } - - return 0; + /* Return with zero if the data is ready. */ + return !data->def->ctrl_regs[ST1_DRDY]; } /* Retrieve raw flux value for one of the x, y, or z axis. */ @@ -734,6 +759,20 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val) if (ret < 0) goto exit; + /* Read out ST2 for release lock on measurment data. */ + ret = i2c_smbus_read_byte_data(client, data->def->ctrl_regs[ST2]); + if (ret < 0) { + dev_err(&client->dev, "Error in reading ST2\n"); + goto exit; + } + + if (ret & (data->def->ctrl_masks[ST2_DERR] | + data->def->ctrl_masks[ST2_HOFL])) { + dev_err(&client->dev, "ST2 status error 0x%x\n", ret); + ret = -EINVAL; + goto exit; + } + mutex_unlock(&data->lock); pm_runtime_mark_last_busy(&data->client->dev); @@ -1067,6 +1106,7 @@ static const struct i2c_device_id ak8975_id[] = { {"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] }, {"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] }, {"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] }, + {"ak09918", (kernel_ulong_t)&ak_def_array[AK09918] }, {} }; MODULE_DEVICE_TABLE(i2c, ak8975_id); @@ -1081,7 +1121,7 @@ static const struct of_device_id ak8975_of_match[] = { { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] }, { .compatible = "ak09912", .data = &ak_def_array[AK09912] }, { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] }, - { .compatible = "ak09916", .data = &ak_def_array[AK09916] }, + { .compatible = "asahi-kasei,ak09918", .data = &ak_def_array[AK09918] }, {} }; MODULE_DEVICE_TABLE(of, ak8975_of_match); diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index 42b70cd42b3935..baab918b38254b 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include "rm3100.h" @@ -464,7 +464,7 @@ static irqreturn_t rm3100_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; unsigned long scan_mask = *indio_dev->active_scan_mask; - unsigned int mask_len = indio_dev->masklength; + unsigned int mask_len = iio_get_masklength(indio_dev); struct rm3100_data *data = iio_priv(indio_dev); struct regmap *regmap = data->regmap; int ret, i, bit; diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 7b041bb386931c..65011a8598d332 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -43,7 +43,7 @@ #include #include -#include +#include /* Commonly used registers */ #define YAS5XX_DEVICE_ID 0x80 diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index 3ad38506028ef0..ce369dbb17fc15 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -31,6 +31,8 @@ config BMP280 select REGMAP select BMP280_I2C if (I2C) select BMP280_SPI if (SPI_MASTER) + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Bosch Sensortec BMP180, BMP280, BMP380 and BMP580 pressure and temperature sensors. Also supports the BME280 with @@ -248,6 +250,15 @@ config MS5637 This driver can also be built as a module. If so, the module will be called ms5637. +config SDP500 + tristate "Sensirion SDP500 differential pressure sensor I2C driver" + depends on I2C + help + Say Y here to build support for Sensirion SDP500 differential pressure + sensor I2C driver. + To compile this driver as a module, choose M here: the core module + will be called sdp500. + config IIO_ST_PRESS tristate "STMicroelectronics pressure sensor Driver" depends on (I2C || SPI_MASTER) && SYSFS diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile index a93709e357607d..6482288e07ee4f 100644 --- a/drivers/iio/pressure/Makefile +++ b/drivers/iio/pressure/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_MS5611) += ms5611_core.o obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o obj-$(CONFIG_MS5637) += ms5637.o +obj-$(CONFIG_SDP500) += sdp500.o obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o st_pressure-y := st_pressure_core.o st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 49081b72961802..a8b97b9b046182 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -41,9 +41,12 @@ #include #include +#include #include +#include +#include -#include +#include #include "bmp280.h" @@ -134,46 +137,169 @@ enum { BMP380_P11 = 20, }; +enum bmp280_scan { + BMP280_PRESS, + BMP280_TEMP, + BME280_HUMID, +}; + static const struct iio_chan_spec bmp280_channels[] = { { .type = IIO_PRESSURE, + /* PROCESSED maintained for ABI backwards compatibility */ + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, + }, + { + .type = IIO_TEMP, + /* PROCESSED maintained for ABI backwards compatibility */ + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, + }, + IIO_CHAN_SOFT_TIMESTAMP(2), +}; + +static const struct iio_chan_spec bme280_channels[] = { + { + .type = IIO_PRESSURE, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, { .type = IIO_TEMP, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, { .type = IIO_HUMIDITYRELATIVE, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .scan_index = 2, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, + IIO_CHAN_SOFT_TIMESTAMP(3), }; static const struct iio_chan_spec bmp380_channels[] = { { .type = IIO_PRESSURE, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, { .type = IIO_TEMP, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, + IIO_CHAN_SOFT_TIMESTAMP(2), +}; + +static const struct iio_chan_spec bmp580_channels[] = { { - .type = IIO_HUMIDITYRELATIVE, + .type = IIO_PRESSURE, + /* PROCESSED maintained for ABI backwards compatibility */ + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 24, + .storagebits = 32, + .endianness = IIO_LE, + }, + }, + { + .type = IIO_TEMP, + /* PROCESSED maintained for ABI backwards compatibility */ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 24, + .storagebits = 32, + .endianness = IIO_LE, + }, }, + IIO_CHAN_SOFT_TIMESTAMP(2), }; static int bmp280_read_calib(struct bmp280_data *data) @@ -289,7 +415,7 @@ static int bme280_read_humid_adc(struct bmp280_data *data, u16 *adc_humidity) int ret; ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB, - &data->be16, sizeof(data->be16)); + &data->be16, BME280_NUM_HUMIDITY_BYTES); if (ret) { dev_err(data->dev, "failed to read humidity\n"); return ret; @@ -335,7 +461,7 @@ static int bmp280_read_temp_adc(struct bmp280_data *data, u32 *adc_temp) int ret; ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, - data->buf, sizeof(data->buf)); + data->buf, BMP280_NUM_TEMP_BYTES); if (ret) { dev_err(data->dev, "failed to read temperature\n"); return ret; @@ -396,7 +522,7 @@ static int bmp280_read_press_adc(struct bmp280_data *data, u32 *adc_press) int ret; ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, - data->buf, sizeof(data->buf)); + data->buf, BMP280_NUM_PRESS_BYTES); if (ret) { dev_err(data->dev, "failed to read pressure\n"); return ret; @@ -445,10 +571,8 @@ static u32 bmp280_compensate_press(struct bmp280_data *data, return (u32)p; } -static int bmp280_read_temp(struct bmp280_data *data, - int *val, int *val2) +static int bmp280_read_temp(struct bmp280_data *data, s32 *comp_temp) { - s32 comp_temp; u32 adc_temp; int ret; @@ -456,16 +580,15 @@ static int bmp280_read_temp(struct bmp280_data *data, if (ret) return ret; - comp_temp = bmp280_compensate_temp(data, adc_temp); + *comp_temp = bmp280_compensate_temp(data, adc_temp); - *val = comp_temp * 10; - return IIO_VAL_INT; + return 0; } -static int bmp280_read_press(struct bmp280_data *data, - int *val, int *val2) +static int bmp280_read_press(struct bmp280_data *data, u32 *comp_press) { - u32 comp_press, adc_press, t_fine; + u32 adc_press; + s32 t_fine; int ret; ret = bmp280_get_t_fine(data, &t_fine); @@ -476,17 +599,13 @@ static int bmp280_read_press(struct bmp280_data *data, if (ret) return ret; - comp_press = bmp280_compensate_press(data, adc_press, t_fine); + *comp_press = bmp280_compensate_press(data, adc_press, t_fine); - *val = comp_press; - *val2 = 256000; - - return IIO_VAL_FRACTIONAL; + return 0; } -static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2) +static int bme280_read_humid(struct bmp280_data *data, u32 *comp_humidity) { - u32 comp_humidity; u16 adc_humidity; s32 t_fine; int ret; @@ -499,11 +618,9 @@ static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2) if (ret) return ret; - comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine); + *comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine); - *val = comp_humidity * 1000 / 1024; - - return IIO_VAL_INT; + return 0; } static int bmp280_read_raw_impl(struct iio_dev *indio_dev, @@ -511,6 +628,8 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct bmp280_data *data = iio_priv(indio_dev); + int chan_value; + int ret; guard(mutex)(&data->lock); @@ -518,11 +637,72 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev, case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_HUMIDITYRELATIVE: - return data->chip_info->read_humid(data, val, val2); + ret = data->chip_info->read_humid(data, &chan_value); + if (ret) + return ret; + + *val = data->chip_info->humid_coeffs[0] * chan_value; + *val2 = data->chip_info->humid_coeffs[1]; + return data->chip_info->humid_coeffs_type; + case IIO_PRESSURE: + ret = data->chip_info->read_press(data, &chan_value); + if (ret) + return ret; + + *val = data->chip_info->press_coeffs[0] * chan_value; + *val2 = data->chip_info->press_coeffs[1]; + return data->chip_info->press_coeffs_type; + case IIO_TEMP: + ret = data->chip_info->read_temp(data, &chan_value); + if (ret) + return ret; + + *val = data->chip_info->temp_coeffs[0] * chan_value; + *val2 = data->chip_info->temp_coeffs[1]; + return data->chip_info->temp_coeffs_type; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_HUMIDITYRELATIVE: + ret = data->chip_info->read_humid(data, &chan_value); + if (ret) + return ret; + + *val = chan_value; + return IIO_VAL_INT; + case IIO_PRESSURE: + ret = data->chip_info->read_press(data, &chan_value); + if (ret) + return ret; + + *val = chan_value; + return IIO_VAL_INT; + case IIO_TEMP: + ret = data->chip_info->read_temp(data, &chan_value); + if (ret) + return ret; + + *val = chan_value; + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_HUMIDITYRELATIVE: + *val = data->chip_info->humid_coeffs[0]; + *val2 = data->chip_info->humid_coeffs[1]; + return data->chip_info->humid_coeffs_type; case IIO_PRESSURE: - return data->chip_info->read_press(data, val, val2); + *val = data->chip_info->press_coeffs[0]; + *val2 = data->chip_info->press_coeffs[1]; + return data->chip_info->press_coeffs_type; case IIO_TEMP: - return data->chip_info->read_temp(data, val, val2); + *val = data->chip_info->temp_coeffs[0]; + *val2 = data->chip_info->temp_coeffs[1]; + return data->chip_info->temp_coeffs_type; default: return -EINVAL; } @@ -793,6 +973,16 @@ static const struct iio_info bmp280_info = { .write_raw = &bmp280_write_raw, }; +static const unsigned long bmp280_avail_scan_masks[] = { + BIT(BMP280_TEMP) | BIT(BMP280_PRESS), + 0 +}; + +static const unsigned long bme280_avail_scan_masks[] = { + BIT(BME280_HUMID) | BIT(BMP280_TEMP) | BIT(BMP280_PRESS), + 0 +}; + static int bmp280_chip_config(struct bmp280_data *data) { u8 osrs = FIELD_PREP(BMP280_OSRS_TEMP_MASK, data->oversampling_temp + 1) | @@ -820,8 +1010,57 @@ static int bmp280_chip_config(struct bmp280_data *data) return ret; } +static irqreturn_t bmp280_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmp280_data *data = iio_priv(indio_dev); + s32 adc_temp, adc_press, t_fine; + int ret; + + guard(mutex)(&data->lock); + + /* Burst read data registers */ + ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, + data->buf, BMP280_BURST_READ_BYTES); + if (ret) { + dev_err(data->dev, "failed to burst read sensor data\n"); + goto out; + } + + /* Temperature calculations */ + adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3])); + if (adc_temp == BMP280_TEMP_SKIPPED) { + dev_err(data->dev, "reading temperature skipped\n"); + goto out; + } + + data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp); + + /* Pressure calculations */ + adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0])); + if (adc_press == BMP280_PRESS_SKIPPED) { + dev_err(data->dev, "reading pressure skipped\n"); + goto out; + } + + t_fine = bmp280_calc_t_fine(data, adc_temp); + + data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine); + + iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 }; static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID }; +static const int bmp280_temp_coeffs[] = { 10, 1 }; +static const int bmp280_press_coeffs[] = { 1, 256000 }; const struct bmp280_chip_info bmp280_chip_info = { .id_reg = BMP280_REG_ID, @@ -830,7 +1069,8 @@ const struct bmp280_chip_info bmp280_chip_info = { .regmap_config = &bmp280_regmap_config, .start_up_time = 2000, .channels = bmp280_channels, - .num_channels = 2, + .num_channels = ARRAY_SIZE(bmp280_channels), + .avail_scan_masks = bmp280_avail_scan_masks, .oversampling_temp_avail = bmp280_oversampling_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail), @@ -850,10 +1090,17 @@ const struct bmp280_chip_info bmp280_chip_info = { .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail), .oversampling_press_default = BMP280_OSRS_PRESS_16X - 1, + .temp_coeffs = bmp280_temp_coeffs, + .temp_coeffs_type = IIO_VAL_FRACTIONAL, + .press_coeffs = bmp280_press_coeffs, + .press_coeffs_type = IIO_VAL_FRACTIONAL, + .chip_config = bmp280_chip_config, .read_temp = bmp280_read_temp, .read_press = bmp280_read_press, .read_calib = bmp280_read_calib, + + .trigger_handler = bmp280_trigger_handler, }; EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280); @@ -876,16 +1123,74 @@ static int bme280_chip_config(struct bmp280_data *data) return bmp280_chip_config(data); } +static irqreturn_t bme280_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmp280_data *data = iio_priv(indio_dev); + s32 adc_temp, adc_press, adc_humidity, t_fine; + int ret; + + guard(mutex)(&data->lock); + + /* Burst read data registers */ + ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, + data->buf, BME280_BURST_READ_BYTES); + if (ret) { + dev_err(data->dev, "failed to burst read sensor data\n"); + goto out; + } + + /* Temperature calculations */ + adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3])); + if (adc_temp == BMP280_TEMP_SKIPPED) { + dev_err(data->dev, "reading temperature skipped\n"); + goto out; + } + + data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp); + + /* Pressure calculations */ + adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0])); + if (adc_press == BMP280_PRESS_SKIPPED) { + dev_err(data->dev, "reading pressure skipped\n"); + goto out; + } + + t_fine = bmp280_calc_t_fine(data, adc_temp); + + data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine); + + /* Humidity calculations */ + adc_humidity = get_unaligned_be16(&data->buf[6]); + + if (adc_humidity == BMP280_HUMIDITY_SKIPPED) { + dev_err(data->dev, "reading humidity skipped\n"); + goto out; + } + data->sensor_data[2] = bme280_compensate_humidity(data, adc_humidity, t_fine); + + iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static const u8 bme280_chip_ids[] = { BME280_CHIP_ID }; +static const int bme280_humid_coeffs[] = { 1000, 1024 }; const struct bmp280_chip_info bme280_chip_info = { .id_reg = BMP280_REG_ID, .chip_id = bme280_chip_ids, .num_chip_id = ARRAY_SIZE(bme280_chip_ids), - .regmap_config = &bmp280_regmap_config, + .regmap_config = &bme280_regmap_config, .start_up_time = 2000, - .channels = bmp280_channels, - .num_channels = 3, + .channels = bme280_channels, + .num_channels = ARRAY_SIZE(bme280_channels), + .avail_scan_masks = bme280_avail_scan_masks, .oversampling_temp_avail = bmp280_oversampling_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail), @@ -899,11 +1204,20 @@ const struct bmp280_chip_info bme280_chip_info = { .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail), .oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1, + .temp_coeffs = bmp280_temp_coeffs, + .temp_coeffs_type = IIO_VAL_FRACTIONAL, + .press_coeffs = bmp280_press_coeffs, + .press_coeffs_type = IIO_VAL_FRACTIONAL, + .humid_coeffs = bme280_humid_coeffs, + .humid_coeffs_type = IIO_VAL_FRACTIONAL, + .chip_config = bme280_chip_config, .read_temp = bmp280_read_temp, .read_press = bmp280_read_press, .read_humid = bme280_read_humid, .read_calib = bme280_read_calib, + + .trigger_handler = bme280_trigger_handler, }; EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280); @@ -958,7 +1272,7 @@ static int bmp380_read_temp_adc(struct bmp280_data *data, u32 *adc_temp) int ret; ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB, - data->buf, sizeof(data->buf)); + data->buf, BMP280_NUM_TEMP_BYTES); if (ret) { dev_err(data->dev, "failed to read temperature\n"); return ret; @@ -1027,7 +1341,7 @@ static int bmp380_read_press_adc(struct bmp280_data *data, u32 *adc_press) int ret; ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB, - data->buf, sizeof(data->buf)); + data->buf, BMP280_NUM_PRESS_BYTES); if (ret) { dev_err(data->dev, "failed to read pressure\n"); return ret; @@ -1091,9 +1405,8 @@ static u32 bmp380_compensate_press(struct bmp280_data *data, return comp_press; } -static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2) +static int bmp380_read_temp(struct bmp280_data *data, s32 *comp_temp) { - s32 comp_temp; u32 adc_temp; int ret; @@ -1101,15 +1414,14 @@ static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2) if (ret) return ret; - comp_temp = bmp380_compensate_temp(data, adc_temp); + *comp_temp = bmp380_compensate_temp(data, adc_temp); - *val = comp_temp * 10; - return IIO_VAL_INT; + return 0; } -static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2) +static int bmp380_read_press(struct bmp280_data *data, u32 *comp_press) { - u32 adc_press, comp_press, t_fine; + u32 adc_press, t_fine; int ret; ret = bmp380_get_t_fine(data, &t_fine); @@ -1120,12 +1432,9 @@ static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2) if (ret) return ret; - comp_press = bmp380_compensate_press(data, adc_press, t_fine); - - *val = comp_press; - *val2 = 100000; + *comp_press = bmp380_compensate_press(data, adc_press, t_fine); - return IIO_VAL_FRACTIONAL; + return 0; } static int bmp380_read_calib(struct bmp280_data *data) @@ -1272,10 +1581,11 @@ static int bmp380_chip_config(struct bmp280_data *data) } /* * Waits for measurement before checking configuration error - * flag. Selected longest measure time indicated in - * section 3.9.1 in the datasheet. + * flag. Selected longest measurement time, calculated from + * formula in datasheet section 3.9.2 with an offset of ~+15% + * as it seen as well in table 3.9.1. */ - msleep(80); + msleep(150); /* Check config error flag */ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp); @@ -1293,9 +1603,58 @@ static int bmp380_chip_config(struct bmp280_data *data) return 0; } +static irqreturn_t bmp380_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmp280_data *data = iio_priv(indio_dev); + s32 adc_temp, adc_press, t_fine; + int ret; + + guard(mutex)(&data->lock); + + /* Burst read data registers */ + ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB, + data->buf, BMP280_BURST_READ_BYTES); + if (ret) { + dev_err(data->dev, "failed to burst read sensor data\n"); + goto out; + } + + /* Temperature calculations */ + adc_temp = get_unaligned_le24(&data->buf[3]); + if (adc_temp == BMP380_TEMP_SKIPPED) { + dev_err(data->dev, "reading temperature skipped\n"); + goto out; + } + + data->sensor_data[1] = bmp380_compensate_temp(data, adc_temp); + + /* Pressure calculations */ + adc_press = get_unaligned_le24(&data->buf[0]); + if (adc_press == BMP380_PRESS_SKIPPED) { + dev_err(data->dev, "reading pressure skipped\n"); + goto out; + } + + t_fine = bmp380_calc_t_fine(data, adc_temp); + + data->sensor_data[0] = bmp380_compensate_press(data, adc_press, t_fine); + + iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 }; static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128}; static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID }; +static const int bmp380_temp_coeffs[] = { 10, 1 }; +static const int bmp380_press_coeffs[] = { 1, 100000 }; const struct bmp280_chip_info bmp380_chip_info = { .id_reg = BMP380_REG_ID, @@ -1305,7 +1664,8 @@ const struct bmp280_chip_info bmp380_chip_info = { .spi_read_extra_byte = true, .start_up_time = 2000, .channels = bmp380_channels, - .num_channels = 2, + .num_channels = ARRAY_SIZE(bmp380_channels), + .avail_scan_masks = bmp280_avail_scan_masks, .oversampling_temp_avail = bmp380_oversampling_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp380_oversampling_avail), @@ -1323,11 +1683,18 @@ const struct bmp280_chip_info bmp380_chip_info = { .num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail), .iir_filter_coeff_default = 2, + .temp_coeffs = bmp380_temp_coeffs, + .temp_coeffs_type = IIO_VAL_FRACTIONAL, + .press_coeffs = bmp380_press_coeffs, + .press_coeffs_type = IIO_VAL_FRACTIONAL, + .chip_config = bmp380_chip_config, .read_temp = bmp380_read_temp, .read_press = bmp380_read_press, .read_calib = bmp380_read_calib, .preinit = bmp380_preinit, + + .trigger_handler = bmp380_trigger_handler, }; EXPORT_SYMBOL_NS(bmp380_chip_info, IIO_BMP280); @@ -1443,58 +1810,48 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write) * for what is expected on IIO ABI. */ -static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2) +static int bmp580_read_temp(struct bmp280_data *data, s32 *raw_temp) { - s32 raw_temp; + s32 value_temp; int ret; - ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB, data->buf, - sizeof(data->buf)); + ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB, + data->buf, BMP280_NUM_TEMP_BYTES); if (ret) { dev_err(data->dev, "failed to read temperature\n"); return ret; } - raw_temp = get_unaligned_le24(data->buf); - if (raw_temp == BMP580_TEMP_SKIPPED) { + value_temp = get_unaligned_le24(data->buf); + if (value_temp == BMP580_TEMP_SKIPPED) { dev_err(data->dev, "reading temperature skipped\n"); return -EIO; } + *raw_temp = sign_extend32(value_temp, 23); - /* - * Temperature is returned in Celsius degrees in fractional - * form down 2^16. We rescale by x1000 to return millidegrees - * Celsius to respect IIO ABI. - */ - raw_temp = sign_extend32(raw_temp, 23); - *val = ((s64)raw_temp * 1000) / (1 << 16); - return IIO_VAL_INT; + return 0; } -static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2) +static int bmp580_read_press(struct bmp280_data *data, u32 *raw_press) { - u32 raw_press; + u32 value_press; int ret; - ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB, data->buf, - sizeof(data->buf)); + ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB, + data->buf, BMP280_NUM_PRESS_BYTES); if (ret) { dev_err(data->dev, "failed to read pressure\n"); return ret; } - raw_press = get_unaligned_le24(data->buf); - if (raw_press == BMP580_PRESS_SKIPPED) { + value_press = get_unaligned_le24(data->buf); + if (value_press == BMP580_PRESS_SKIPPED) { dev_err(data->dev, "reading pressure skipped\n"); return -EIO; } - /* - * Pressure is returned in Pascals in fractional form down 2^16. - * We rescale /1000 to convert to kilopascal to respect IIO ABI. - */ - *val = raw_press; - *val2 = 64000; /* 2^6 * 1000 */ - return IIO_VAL_FRACTIONAL; + *raw_press = value_press; + + return 0; } static const int bmp580_odr_table[][2] = { @@ -1828,8 +2185,43 @@ static int bmp580_chip_config(struct bmp280_data *data) return 0; } +static irqreturn_t bmp580_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmp280_data *data = iio_priv(indio_dev); + int ret; + + guard(mutex)(&data->lock); + + /* Burst read data registers */ + ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB, + data->buf, BMP280_BURST_READ_BYTES); + if (ret) { + dev_err(data->dev, "failed to burst read sensor data\n"); + goto out; + } + + /* Temperature calculations */ + memcpy(&data->sensor_data[1], &data->buf[0], 3); + + /* Pressure calculations */ + memcpy(&data->sensor_data[0], &data->buf[3], 3); + + iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 }; static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT }; +/* Instead of { 1000, 16 } we do this, to avoid overflow issues */ +static const int bmp580_temp_coeffs[] = { 125, 13 }; +static const int bmp580_press_coeffs[] = { 1, 64000}; const struct bmp280_chip_info bmp580_chip_info = { .id_reg = BMP580_REG_CHIP_ID, @@ -1837,8 +2229,9 @@ const struct bmp280_chip_info bmp580_chip_info = { .num_chip_id = ARRAY_SIZE(bmp580_chip_ids), .regmap_config = &bmp580_regmap_config, .start_up_time = 2000, - .channels = bmp380_channels, - .num_channels = 2, + .channels = bmp580_channels, + .num_channels = ARRAY_SIZE(bmp580_channels), + .avail_scan_masks = bmp280_avail_scan_masks, .oversampling_temp_avail = bmp580_oversampling_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp580_oversampling_avail), @@ -1856,16 +2249,23 @@ const struct bmp280_chip_info bmp580_chip_info = { .num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail), .iir_filter_coeff_default = 2, + .temp_coeffs = bmp580_temp_coeffs, + .temp_coeffs_type = IIO_VAL_FRACTIONAL_LOG2, + .press_coeffs = bmp580_press_coeffs, + .press_coeffs_type = IIO_VAL_FRACTIONAL, + .chip_config = bmp580_chip_config, .read_temp = bmp580_read_temp, .read_press = bmp580_read_press, .preinit = bmp580_preinit, + + .trigger_handler = bmp580_trigger_handler, }; EXPORT_SYMBOL_NS(bmp580_chip_info, IIO_BMP280); static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas) { - const int conversion_time_max[] = { 4500, 7500, 13500, 25500 }; + static const int conversion_time_max[] = { 4500, 7500, 13500, 25500 }; unsigned int delay_us; unsigned int ctrl; int ret; @@ -2011,9 +2411,8 @@ static s32 bmp180_compensate_temp(struct bmp280_data *data, u32 adc_temp) return (bmp180_calc_t_fine(data, adc_temp) + 8) / 16; } -static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2) +static int bmp180_read_temp(struct bmp280_data *data, s32 *comp_temp) { - s32 comp_temp; u32 adc_temp; int ret; @@ -2021,10 +2420,9 @@ static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2) if (ret) return ret; - comp_temp = bmp180_compensate_temp(data, adc_temp); + *comp_temp = bmp180_compensate_temp(data, adc_temp); - *val = comp_temp * 100; - return IIO_VAL_INT; + return 0; } static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press) @@ -2040,7 +2438,7 @@ static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press) return ret; ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, - data->buf, sizeof(data->buf)); + data->buf, BMP280_NUM_PRESS_BYTES); if (ret) { dev_err(data->dev, "failed to read pressure\n"); return ret; @@ -2087,9 +2485,9 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, u32 adc_press, return p + ((x1 + x2 + 3791) >> 4); } -static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2) +static int bmp180_read_press(struct bmp280_data *data, u32 *comp_press) { - u32 comp_press, adc_press; + u32 adc_press; s32 t_fine; int ret; @@ -2101,12 +2499,9 @@ static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2) if (ret) return ret; - comp_press = bmp180_compensate_press(data, adc_press, t_fine); - - *val = comp_press; - *val2 = 1000; + *comp_press = bmp180_compensate_press(data, adc_press, t_fine); - return IIO_VAL_FRACTIONAL; + return 0; } static int bmp180_chip_config(struct bmp280_data *data) @@ -2114,9 +2509,41 @@ static int bmp180_chip_config(struct bmp280_data *data) return 0; } +static irqreturn_t bmp180_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmp280_data *data = iio_priv(indio_dev); + int ret, chan_value; + + guard(mutex)(&data->lock); + + ret = bmp180_read_temp(data, &chan_value); + if (ret) + goto out; + + data->sensor_data[1] = chan_value; + + ret = bmp180_read_press(data, &chan_value); + if (ret) + goto out; + + data->sensor_data[0] = chan_value; + + iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data, + iio_get_time_ns(indio_dev)); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static const int bmp180_oversampling_temp_avail[] = { 1 }; static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 }; static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID }; +static const int bmp180_temp_coeffs[] = { 100, 1 }; +static const int bmp180_press_coeffs[] = { 1, 1000 }; const struct bmp280_chip_info bmp180_chip_info = { .id_reg = BMP280_REG_ID, @@ -2125,7 +2552,8 @@ const struct bmp280_chip_info bmp180_chip_info = { .regmap_config = &bmp180_regmap_config, .start_up_time = 2000, .channels = bmp280_channels, - .num_channels = 2, + .num_channels = ARRAY_SIZE(bmp280_channels), + .avail_scan_masks = bmp280_avail_scan_masks, .oversampling_temp_avail = bmp180_oversampling_temp_avail, .num_oversampling_temp_avail = @@ -2137,10 +2565,17 @@ const struct bmp280_chip_info bmp180_chip_info = { ARRAY_SIZE(bmp180_oversampling_press_avail), .oversampling_press_default = BMP180_MEAS_PRESS_8X, + .temp_coeffs = bmp180_temp_coeffs, + .temp_coeffs_type = IIO_VAL_FRACTIONAL, + .press_coeffs = bmp180_press_coeffs, + .press_coeffs_type = IIO_VAL_FRACTIONAL, + .chip_config = bmp180_chip_config, .read_temp = bmp180_read_temp, .read_press = bmp180_read_press, .read_calib = bmp180_read_calib, + + .trigger_handler = bmp180_trigger_handler, }; EXPORT_SYMBOL_NS(bmp180_chip_info, IIO_BMP280); @@ -2186,6 +2621,30 @@ static int bmp085_fetch_eoc_irq(struct device *dev, return 0; } +static int bmp280_buffer_preenable(struct iio_dev *indio_dev) +{ + struct bmp280_data *data = iio_priv(indio_dev); + + pm_runtime_get_sync(data->dev); + + return 0; +} + +static int bmp280_buffer_postdisable(struct iio_dev *indio_dev) +{ + struct bmp280_data *data = iio_priv(indio_dev); + + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); + + return 0; +} + +static const struct iio_buffer_setup_ops bmp280_buffer_setup_ops = { + .preenable = bmp280_buffer_preenable, + .postdisable = bmp280_buffer_postdisable, +}; + static void bmp280_pm_disable(void *data) { struct device *dev = data; @@ -2232,6 +2691,7 @@ int bmp280_common_probe(struct device *dev, /* Apply initial values from chip info structure */ indio_dev->channels = chip_info->channels; indio_dev->num_channels = chip_info->num_channels; + indio_dev->available_scan_masks = chip_info->avail_scan_masks; data->oversampling_press = chip_info->oversampling_press_default; data->oversampling_humid = chip_info->oversampling_humid_default; data->oversampling_temp = chip_info->oversampling_temp_default; @@ -2317,6 +2777,14 @@ int bmp280_common_probe(struct device *dev, "failed to read calibration coefficients\n"); } + ret = devm_iio_triggered_buffer_setup(data->dev, indio_dev, + iio_pollfunc_store_time, + data->chip_info->trigger_handler, + &bmp280_buffer_setup_ops); + if (ret) + return dev_err_probe(data->dev, ret, + "iio triggered buffer setup failed\n"); + /* * Attempt to grab an optional EOC IRQ - only the BMP085 has this * however as it happens, the BMP085 shares the chip ID of BMP180 diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c index 34e3bc758493ea..5c3a63b4327c80 100644 --- a/drivers/iio/pressure/bmp280-i2c.c +++ b/drivers/iio/pressure/bmp280-i2c.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only -#include #include +#include #include #include "bmp280.h" diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c index fa52839474b18b..d27d68edd90656 100644 --- a/drivers/iio/pressure/bmp280-regmap.c +++ b/drivers/iio/pressure/bmp280-regmap.c @@ -41,7 +41,7 @@ const struct regmap_config bmp180_regmap_config = { }; EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280); -static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg) +static bool bme280_is_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case BMP280_REG_CONFIG: @@ -54,7 +54,35 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg) } } +static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP280_REG_CONFIG: + case BMP280_REG_CTRL_MEAS: + case BMP280_REG_RESET: + return true; + default: + return false; + } +} + static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP280_REG_TEMP_XLSB: + case BMP280_REG_TEMP_LSB: + case BMP280_REG_TEMP_MSB: + case BMP280_REG_PRESS_XLSB: + case BMP280_REG_PRESS_LSB: + case BMP280_REG_PRESS_MSB: + case BMP280_REG_STATUS: + return true; + default: + return false; + } +} + +static bool bme280_is_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case BME280_REG_HUMIDITY_LSB: @@ -71,7 +99,6 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg) return false; } } - static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -167,7 +194,7 @@ const struct regmap_config bmp280_regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = BME280_REG_HUMIDITY_LSB, + .max_register = BMP280_REG_TEMP_XLSB, .cache_type = REGCACHE_RBTREE, .writeable_reg = bmp280_is_writeable_reg, @@ -175,6 +202,18 @@ const struct regmap_config bmp280_regmap_config = { }; EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280); +const struct regmap_config bme280_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = BME280_REG_HUMIDITY_LSB, + .cache_type = REGCACHE_RBTREE, + + .writeable_reg = bme280_is_writeable_reg, + .volatile_reg = bme280_is_volatile_reg, +}; +EXPORT_SYMBOL_NS(bme280_regmap_config, IIO_BMP280); + const struct regmap_config bmp380_regmap_config = { .reg_bits = 8, .val_bits = 8, diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index 62b4e58104cf98..d18549d9bb647d 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -5,10 +5,10 @@ * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c */ #include -#include -#include #include +#include #include +#include #include "bmp280.h" @@ -40,14 +40,10 @@ static int bmp380_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct spi_device *spi = to_spi_device(context); - u8 rx_buf[4]; + u8 rx_buf[BME280_BURST_READ_BYTES + 1]; ssize_t status; - /* - * Maximum number of consecutive bytes read for a temperature or - * pressure measurement is 3. - */ - if (val_size > 3) + if (val_size > BME280_BURST_READ_BYTES) return -EINVAL; /* @@ -64,14 +60,14 @@ static int bmp380_regmap_spi_read(void *context, const void *reg, return 0; } -static struct regmap_bus bmp280_regmap_bus = { +static const struct regmap_bus bmp280_regmap_bus = { .write = bmp280_regmap_spi_write, .read = bmp280_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, .val_format_endian_default = REGMAP_ENDIAN_BIG, }; -static struct regmap_bus bmp380_regmap_bus = { +static const struct regmap_bus bmp380_regmap_bus = { .write = bmp280_regmap_spi_write, .read = bmp380_regmap_spi_read, .read_flag_mask = BIT(7), @@ -83,7 +79,7 @@ static int bmp280_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); const struct bmp280_chip_info *chip_info; - struct regmap_bus *bmp_regmap_bus; + struct regmap_bus const *bmp_regmap_bus; struct regmap *regmap; int ret; diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h index 7c30e4d523beb6..ccacc67c147311 100644 --- a/drivers/iio/pressure/bmp280.h +++ b/drivers/iio/pressure/bmp280.h @@ -1,10 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include -#include #include #include +#include /* BMP580 specific registers */ #define BMP580_REG_CMD 0x7E @@ -304,6 +304,16 @@ #define BMP280_PRESS_SKIPPED 0x80000 #define BMP280_HUMIDITY_SKIPPED 0x8000 +/* Number of bytes for each value */ +#define BMP280_NUM_PRESS_BYTES 3 +#define BMP280_NUM_TEMP_BYTES 3 +#define BME280_NUM_HUMIDITY_BYTES 2 +#define BMP280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \ + BMP280_NUM_TEMP_BYTES) +#define BME280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \ + BMP280_NUM_TEMP_BYTES + \ + BME280_NUM_HUMIDITY_BYTES) + /* Core exported structs */ static const char *const bmp280_supply_names[] = { @@ -397,13 +407,19 @@ struct bmp280_data { */ int sampling_freq; + /* + * Data to push to userspace triggered buffer. Up to 3 channels and + * s64 timestamp, aligned. + */ + s32 sensor_data[6] __aligned(8); + /* * DMA (thus cache coherency maintenance) may require the * transfer buffers to live in their own cache lines. */ union { /* Sensor data buffer */ - u8 buf[3]; + u8 buf[BME280_BURST_READ_BYTES]; /* Calibration data buffers */ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2]; __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2]; @@ -425,6 +441,7 @@ struct bmp280_chip_info { const struct iio_chan_spec *channels; int num_channels; unsigned int start_up_time; + const unsigned long *avail_scan_masks; const int *oversampling_temp_avail; int num_oversampling_temp_avail; @@ -446,12 +463,21 @@ struct bmp280_chip_info { int num_sampling_freq_avail; int sampling_freq_default; + const int *temp_coeffs; + const int temp_coeffs_type; + const int *press_coeffs; + const int press_coeffs_type; + const int *humid_coeffs; + const int humid_coeffs_type; + int (*chip_config)(struct bmp280_data *data); - int (*read_temp)(struct bmp280_data *data, int *val, int *val2); - int (*read_press)(struct bmp280_data *data, int *val, int *val2); - int (*read_humid)(struct bmp280_data *data, int *val, int *val2); + int (*read_temp)(struct bmp280_data *data, s32 *adc_temp); + int (*read_press)(struct bmp280_data *data, u32 *adc_press); + int (*read_humid)(struct bmp280_data *data, u32 *adc_humidity); int (*read_calib)(struct bmp280_data *data); int (*preinit)(struct bmp280_data *data); + + irqreturn_t (*trigger_handler)(int irq, void *p); }; /* Chip infos for each variant */ @@ -464,6 +490,7 @@ extern const struct bmp280_chip_info bmp580_chip_info; /* Regmap configurations */ extern const struct regmap_config bmp180_regmap_config; extern const struct regmap_config bmp280_regmap_config; +extern const struct regmap_config bme280_regmap_config; extern const struct regmap_config bmp380_regmap_config; extern const struct regmap_config bmp580_regmap_config; diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c index 0bba4c5a8d4059..e99e97ea630051 100644 --- a/drivers/iio/pressure/dlhl60d.c +++ b/drivers/iio/pressure/dlhl60d.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include /* Commands */ #define DLH_START_SINGLE 0xAA @@ -256,8 +256,7 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private) if (ret) goto out; - for_each_set_bit(chn, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, chn) { memcpy(&tmp_buf[i++], &st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES, DLH_NUM_DATA_BYTES); diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c index 261af1562827c8..44274094193379 100644 --- a/drivers/iio/pressure/hp206c.c +++ b/drivers/iio/pressure/hp206c.c @@ -18,7 +18,7 @@ #include #include -#include +#include /* I2C commands: */ #define HP206C_CMD_SOFT_RST 0x06 diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c index 1682b90d4557c2..4e6f10eeabc301 100644 --- a/drivers/iio/pressure/hsc030pa.c +++ b/drivers/iio/pressure/hsc030pa.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include "hsc030pa.h" diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c index 33a15d4c642c0a..3b6145348c2e3b 100644 --- a/drivers/iio/pressure/mprls0025pa.c +++ b/drivers/iio/pressure/mprls0025pa.c @@ -26,7 +26,7 @@ #include -#include +#include #include "mprls0025pa.h" diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index 9a0f52321fcb9d..7e2cb8b6afa223 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include "ms5611.h" diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index cc9d1f68c53cc1..87181963a3e3db 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include "ms5611.h" diff --git a/drivers/iio/pressure/sdp500.c b/drivers/iio/pressure/sdp500.c new file mode 100644 index 00000000000000..9828c73c48559c --- /dev/null +++ b/drivers/iio/pressure/sdp500.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Sensirion sdp500 and sdp510 pressure sensors + * + * Datasheet: https://sensirion.com/resource/datasheet/sdp600 + */ + +#include +#include +#include +#include +#include +#include + +#define SDP500_CRC8_POLYNOMIAL 0x31 /* x8+x5+x4+1 (normalized to 0x31) */ +#define SDP500_READ_SIZE 3 + +#define SDP500_I2C_START_MEAS 0xF1 + +struct sdp500_data { + struct device *dev; +}; + +DECLARE_CRC8_TABLE(sdp500_crc8_table); + +static int sdp500_start_measurement(struct sdp500_data *data) +{ + struct i2c_client *client = to_i2c_client(data->dev); + + return i2c_smbus_write_byte(client, SDP500_I2C_START_MEAS); +} + +static const struct iio_chan_spec sdp500_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + }, +}; + +static int sdp500_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + int ret; + u8 rxbuf[SDP500_READ_SIZE]; + u8 received_crc, calculated_crc; + struct sdp500_data *data = iio_priv(indio_dev); + struct i2c_client *client = to_i2c_client(data->dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = i2c_master_recv(client, rxbuf, SDP500_READ_SIZE); + if (ret < 0) { + dev_err(data->dev, "Failed to receive data"); + return ret; + } + if (ret != SDP500_READ_SIZE) { + dev_err(data->dev, "Data is received wrongly"); + return -EIO; + } + + received_crc = rxbuf[2]; + calculated_crc = crc8(sdp500_crc8_table, rxbuf, + sizeof(rxbuf) - 1, 0x00); + if (received_crc != calculated_crc) { + dev_err(data->dev, + "calculated crc = 0x%.2X, received 0x%.2X", + calculated_crc, received_crc); + return -EIO; + } + + *val = get_unaligned_be16(rxbuf); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = 1; + *val2 = 60; + + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } +} + +static const struct iio_info sdp500_info = { + .read_raw = &sdp500_read_raw, +}; + +static int sdp500_probe(struct i2c_client *client) +{ + struct iio_dev *indio_dev; + struct sdp500_data *data; + struct device *dev = &client->dev; + int ret; + u8 rxbuf[SDP500_READ_SIZE]; + + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret) + return dev_err_probe(dev, ret, + "Failed to get and enable regulator\n"); + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + /* has to be done before the first i2c communication */ + crc8_populate_msb(sdp500_crc8_table, SDP500_CRC8_POLYNOMIAL); + + data = iio_priv(indio_dev); + data->dev = dev; + + indio_dev->name = "sdp500"; + indio_dev->channels = sdp500_channels; + indio_dev->info = &sdp500_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->num_channels = ARRAY_SIZE(sdp500_channels); + + ret = sdp500_start_measurement(data); + if (ret) + return dev_err_probe(dev, ret, "Failed to start measurement"); + + /* First measurement is not correct, read it out to get rid of it */ + i2c_master_recv(client, rxbuf, SDP500_READ_SIZE); + + ret = devm_iio_device_register(dev, indio_dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to register indio_dev"); + + return 0; +} + +static const struct i2c_device_id sdp500_id[] = { + { "sdp500" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, sdp500_id); + +static const struct of_device_id sdp500_of_match[] = { + { .compatible = "sensirion,sdp500" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdp500_of_match); + +static struct i2c_driver sdp500_driver = { + .driver = { + .name = "sensirion,sdp500", + .of_match_table = sdp500_of_match, + }, + .probe = sdp500_probe, + .id_table = sdp500_id, +}; +module_i2c_driver(sdp500_driver); + +MODULE_AUTHOR("Thomas Sioutas "); +MODULE_DESCRIPTION("Driver for Sensirion SDP500 differential pressure sensor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 80176e3083afbb..597bf268ea5172 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include "st_pressure.h" diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index dcc87a9015e8be..950f8dee2b26b7 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -64,7 +64,7 @@ #include #include #include -#include +#include #include "zpa2326.h" /* 200 ms should be enough for the longest conversion time in one-shot mode. */ diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index 2ca3b0bc5eba10..31c679074b250e 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig @@ -32,6 +32,20 @@ config CROS_EC_MKBP_PROXIMITY To compile this driver as a module, choose M here: the module will be called cros_ec_mkbp_proximity. +config HX9023S + tristate "TYHX HX9023S SAR sensor" + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select REGMAP_I2C + depends on I2C + help + Say Y here to build a driver for TYHX HX9023S capacitive SAR sensor. + This driver supports the TYHX HX9023S capacitive + SAR sensors. This sensors is used for proximity detection applications. + + To compile this driver as a module, choose M here: the + module will be called hx9023s. + config IRSD200 tristate "Murata IRS-D200 PIR sensor" select IIO_BUFFER @@ -219,4 +233,15 @@ config VL53L0X_I2C To compile this driver as a module, choose M here: the module will be called vl53l0x-i2c. +config AW96103 + tristate "AW96103/AW96105 Awinic proximity sensor" + select REGMAP_I2C + depends on I2C + help + Say Y here to build a driver for Awinic's AW96103/AW96105 capacitive + proximity sensor. + + To compile this driver as a module, choose M here: the + module will be called aw96103. + endmenu diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile index f3659838044674..c5e76995764ab4 100644 --- a/drivers/iio/proximity/Makefile +++ b/drivers/iio/proximity/Makefile @@ -6,6 +6,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_AS3935) += as3935.o obj-$(CONFIG_CROS_EC_MKBP_PROXIMITY) += cros_ec_mkbp_proximity.o +obj-$(CONFIG_HX9023S) += hx9023s.o obj-$(CONFIG_IRSD200) += irsd200.o obj-$(CONFIG_ISL29501) += isl29501.o obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o @@ -21,4 +22,5 @@ obj-$(CONFIG_SX_COMMON) += sx_common.o obj-$(CONFIG_SX9500) += sx9500.o obj-$(CONFIG_VCNL3020) += vcnl3020.o obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o +obj-$(CONFIG_AW96103) += aw96103.o diff --git a/drivers/iio/proximity/aw96103.c b/drivers/iio/proximity/aw96103.c new file mode 100644 index 00000000000000..707ba0a510aa5f --- /dev/null +++ b/drivers/iio/proximity/aw96103.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AWINIC aw96103 proximity sensor driver + * + * Author: Wang Shuaijie + * + * Copyright (c) 2024 awinic Technology CO., LTD + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AW_DATA_PROCESS_FACTOR 1024 +#define AW96103_CHIP_ID 0xa961 +#define AW96103_BIN_VALID_DATA_OFFSET 64 +#define AW96103_BIN_DATA_LEN_OFFSET 16 +#define AW96103_BIN_DATA_REG_NUM_SIZE 4 +#define AW96103_BIN_CHIP_TYPE_SIZE 8 +#define AW96103_BIN_CHIP_TYPE_OFFSET 24 + +#define AW96103_REG_SCANCTRL0 0x0000 +#define AW96103_REG_STAT0 0x0090 +#define AW96103_REG_BLFILT_CH0 0x00A8 +#define AW96103_REG_BLRSTRNG_CH0 0x00B4 +#define AW96103_REG_DIFF_CH0 0x0240 +#define AW96103_REG_FWVER2 0x0410 +#define AW96103_REG_CMD 0xF008 +#define AW96103_REG_IRQSRC 0xF080 +#define AW96103_REG_IRQEN 0xF084 +#define AW96103_REG_RESET 0xFF0C +#define AW96103_REG_CHIPID 0xFF10 +#define AW96103_REG_EEDA0 0x0408 +#define AW96103_REG_EEDA1 0x040C +#define AW96103_REG_PROXCTRL_CH0 0x00B0 +#define AW96103_REG_PROXTH0_CH0 0x00B8 +#define AW96103_PROXTH_CH_STEP 0x3C +#define AW96103_THHYST_MASK GENMASK(13, 12) +#define AW96103_INDEB_MASK GENMASK(11, 10) +#define AW96103_OUTDEB_MASK GENMASK(9, 8) +#define AW96103_INITOVERIRQ_MASK BIT(0) +#define AW96103_BLFILT_CH_STEP 0x3C +#define AW96103_BLRSTRNG_MASK GENMASK(5, 0) +#define AW96103_CHIPID_MASK GENMASK(31, 16) +#define AW96103_BLERRTRIG_MASK BIT(25) +#define AW96103_CHAN_EN_MASK GENMASK(5, 0) +#define AW96103_REG_PROXCTRL_CH(x) \ + (AW96103_REG_PROXCTRL_CH0 + (x) * AW96103_PROXTH_CH_STEP) + +#define AW96103_REG_PROXTH0_CH(x) \ + (AW96103_REG_PROXTH0_CH0 + (x) * AW96103_PROXTH_CH_STEP) + +/** + * struct aw_bin - Store the data obtained from parsing the configuration file. + * @chip_type: Frame header information-chip type + * @valid_data_len: Length of valid data obtained after parsing + * @valid_data_addr: The offset address of the valid data obtained + * after parsing relative to info + * @len: The size of the bin file obtained from the firmware + * @data: Store the bin file obtained from the firmware + */ +struct aw_bin { + unsigned char chip_type[8]; + unsigned int valid_data_len; + unsigned int valid_data_addr; + unsigned int len; + unsigned char data[] __counted_by(len); +}; + +enum aw96103_sar_vers { + AW96103 = 2, + AW96103A = 6, + AW96103B = 0xa, +}; + +enum aw96103_operation_mode { + AW96103_ACTIVE_MODE = 1, + AW96103_SLEEP_MODE = 2, + AW96103_DEEPSLEEP_MODE = 3, + AW96103B_DEEPSLEEP_MODE = 4, +}; + +enum aw96103_sensor_type { + AW96103_VAL, + AW96105_VAL, +}; + +struct aw_channels_info { + bool used; + unsigned int old_irq_status; +}; + +struct aw_chip_info { + const char *name; + struct iio_chan_spec const *channels; + int num_channels; +}; + +struct aw96103 { + unsigned int hostirqen; + struct regmap *regmap; + struct device *dev; + /* + * There is one more logical channel than the actual channels, + * and the extra logical channel is used for temperature detection + * but not for status detection. The specific channel used for + * temperature detection is determined by the register configuration. + */ + struct aw_channels_info channels_arr[6]; + unsigned int max_channels; + unsigned int chan_en; +}; + +static const unsigned int aw96103_reg_default[] = { + 0x0000, 0x00003f3f, 0x0004, 0x00000064, 0x0008, 0x0017c11e, + 0x000c, 0x05000000, 0x0010, 0x00093ffd, 0x0014, 0x19240009, + 0x0018, 0xd81c0207, 0x001c, 0xff000000, 0x0020, 0x00241900, + 0x0024, 0x00093ff7, 0x0028, 0x58020009, 0x002c, 0xd81c0207, + 0x0030, 0xff000000, 0x0034, 0x00025800, 0x0038, 0x00093fdf, + 0x003c, 0x7d3b0009, 0x0040, 0xd81c0207, 0x0044, 0xff000000, + 0x0048, 0x003b7d00, 0x004c, 0x00093f7f, 0x0050, 0xe9310009, + 0x0054, 0xd81c0207, 0x0058, 0xff000000, 0x005c, 0x0031e900, + 0x0060, 0x00093dff, 0x0064, 0x1a0c0009, 0x0068, 0xd81c0207, + 0x006c, 0xff000000, 0x0070, 0x000c1a00, 0x0074, 0x80093fff, + 0x0078, 0x043d0009, 0x007c, 0xd81c0207, 0x0080, 0xff000000, + 0x0084, 0x003d0400, 0x00a0, 0xe6400000, 0x00a4, 0x00000000, + 0x00a8, 0x010408d2, 0x00ac, 0x00000000, 0x00b0, 0x00000000, + 0x00b8, 0x00005fff, 0x00bc, 0x00000000, 0x00c0, 0x00000000, + 0x00c4, 0x00000000, 0x00c8, 0x00000000, 0x00cc, 0x00000000, + 0x00d0, 0x00000000, 0x00d4, 0x00000000, 0x00d8, 0x00000000, + 0x00dc, 0xe6447800, 0x00e0, 0x78000000, 0x00e4, 0x010408d2, + 0x00e8, 0x00000000, 0x00ec, 0x00000000, 0x00f4, 0x00005fff, + 0x00f8, 0x00000000, 0x00fc, 0x00000000, 0x0100, 0x00000000, + 0x0104, 0x00000000, 0x0108, 0x00000000, 0x010c, 0x02000000, + 0x0110, 0x00000000, 0x0114, 0x00000000, 0x0118, 0xe6447800, + 0x011c, 0x78000000, 0x0120, 0x010408d2, 0x0124, 0x00000000, + 0x0128, 0x00000000, 0x0130, 0x00005fff, 0x0134, 0x00000000, + 0x0138, 0x00000000, 0x013c, 0x00000000, 0x0140, 0x00000000, + 0x0144, 0x00000000, 0x0148, 0x02000000, 0x014c, 0x00000000, + 0x0150, 0x00000000, 0x0154, 0xe6447800, 0x0158, 0x78000000, + 0x015c, 0x010408d2, 0x0160, 0x00000000, 0x0164, 0x00000000, + 0x016c, 0x00005fff, 0x0170, 0x00000000, 0x0174, 0x00000000, + 0x0178, 0x00000000, 0x017c, 0x00000000, 0x0180, 0x00000000, + 0x0184, 0x02000000, 0x0188, 0x00000000, 0x018c, 0x00000000, + 0x0190, 0xe6447800, 0x0194, 0x78000000, 0x0198, 0x010408d2, + 0x019c, 0x00000000, 0x01a0, 0x00000000, 0x01a8, 0x00005fff, + 0x01ac, 0x00000000, 0x01b0, 0x00000000, 0x01b4, 0x00000000, + 0x01b8, 0x00000000, 0x01bc, 0x00000000, 0x01c0, 0x02000000, + 0x01c4, 0x00000000, 0x01c8, 0x00000000, 0x01cc, 0xe6407800, + 0x01d0, 0x78000000, 0x01d4, 0x010408d2, 0x01d8, 0x00000000, + 0x01dc, 0x00000000, 0x01e4, 0x00005fff, 0x01e8, 0x00000000, + 0x01ec, 0x00000000, 0x01f0, 0x00000000, 0x01f4, 0x00000000, + 0x01f8, 0x00000000, 0x01fc, 0x02000000, 0x0200, 0x00000000, + 0x0204, 0x00000000, 0x0208, 0x00000008, 0x020c, 0x0000000d, + 0x41fc, 0x00000000, 0x4400, 0x00000000, 0x4410, 0x00000000, + 0x4420, 0x00000000, 0x4430, 0x00000000, 0x4440, 0x00000000, + 0x4450, 0x00000000, 0x4460, 0x00000000, 0x4470, 0x00000000, + 0xf080, 0x00003018, 0xf084, 0x00000fff, 0xf800, 0x00000000, + 0xf804, 0x00002e00, 0xf8d0, 0x00000001, 0xf8d4, 0x00000000, + 0xff00, 0x00000301, 0xff0c, 0x01000000, 0xffe0, 0x00000000, + 0xfff4, 0x00004011, 0x0090, 0x00000000, 0x0094, 0x00000000, + 0x0098, 0x00000000, 0x009c, 0x3f3f3f3f, +}; + +static const struct iio_event_spec aw_common_events[3] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_PERIOD), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_PERIOD), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_HYSTERESIS) | + BIT(IIO_EV_INFO_VALUE), + } +}; + +#define AW_IIO_CHANNEL(idx) \ +{ \ + .type = IIO_PROXIMITY, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .indexed = 1, \ + .channel = idx, \ + .event_spec = aw_common_events, \ + .num_event_specs = ARRAY_SIZE(aw_common_events), \ +} \ + +static const struct iio_chan_spec aw96103_channels[] = { + AW_IIO_CHANNEL(0), + AW_IIO_CHANNEL(1), + AW_IIO_CHANNEL(2), + AW_IIO_CHANNEL(3), +}; + +static const struct iio_chan_spec aw96105_channels[] = { + AW_IIO_CHANNEL(0), + AW_IIO_CHANNEL(1), + AW_IIO_CHANNEL(2), + AW_IIO_CHANNEL(3), + AW_IIO_CHANNEL(4), + AW_IIO_CHANNEL(5), +}; + +static const struct aw_chip_info aw_chip_info_tbl[] = { + [AW96103_VAL] = { + .name = "aw96103_sensor", + .channels = aw96103_channels, + .num_channels = ARRAY_SIZE(aw96103_channels), + }, + [AW96105_VAL] = { + .name = "aw96105_sensor", + .channels = aw96105_channels, + .num_channels = ARRAY_SIZE(aw96105_channels), + }, +}; + +static void aw96103_parsing_bin_file(struct aw_bin *bin) +{ + bin->valid_data_addr = AW96103_BIN_VALID_DATA_OFFSET; + bin->valid_data_len = + *(unsigned int *)(bin->data + AW96103_BIN_DATA_LEN_OFFSET) - + AW96103_BIN_DATA_REG_NUM_SIZE; + memcpy(bin->chip_type, bin->data + AW96103_BIN_CHIP_TYPE_OFFSET, + AW96103_BIN_CHIP_TYPE_SIZE); +} + +static const struct regmap_config aw96103_regmap_confg = { + .reg_bits = 16, + .val_bits = 32, +}; + +static int aw96103_get_diff_raw(struct aw96103 *aw96103, unsigned int chan, + int *buf) +{ + u32 data; + int ret; + + ret = regmap_read(aw96103->regmap, + AW96103_REG_DIFF_CH0 + chan * 4, &data); + if (ret) + return ret; + *buf = (int)(data / AW_DATA_PROCESS_FACTOR); + + return 0; +} + +static int aw96103_read_raw(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long mask) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = aw96103_get_diff_raw(aw96103, chan->channel, val); + if (ret) + return ret; + + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int aw96103_read_thresh(struct aw96103 *aw96103, + const struct iio_chan_spec *chan, int *val) +{ + int ret; + + ret = regmap_read(aw96103->regmap, + AW96103_REG_PROXTH0_CH(chan->channel), val); + if (ret) + return ret; + + return IIO_VAL_INT; +} + +static int aw96103_read_out_debounce(struct aw96103 *aw96103, + const struct iio_chan_spec *chan, + int *val) +{ + unsigned int reg_val; + int ret; + + ret = regmap_read(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), ®_val); + if (ret) + return ret; + *val = FIELD_GET(AW96103_OUTDEB_MASK, reg_val); + + return IIO_VAL_INT; +} + +static int aw96103_read_in_debounce(struct aw96103 *aw96103, + const struct iio_chan_spec *chan, int *val) +{ + unsigned int reg_val; + int ret; + + ret = regmap_read(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), ®_val); + if (ret) + return ret; + *val = FIELD_GET(AW96103_INDEB_MASK, reg_val); + + return IIO_VAL_INT; +} + +static int aw96103_read_hysteresis(struct aw96103 *aw96103, + const struct iio_chan_spec *chan, int *val) +{ + unsigned int reg_val; + int ret; + + ret = regmap_read(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), ®_val); + if (ret) + return ret; + *val = FIELD_GET(AW96103_THHYST_MASK, reg_val); + + return IIO_VAL_INT; +} + +static int aw96103_read_event_val(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + return aw96103_read_thresh(aw96103, chan, val); + case IIO_EV_INFO_PERIOD: + switch (dir) { + case IIO_EV_DIR_RISING: + return aw96103_read_out_debounce(aw96103, chan, val); + case IIO_EV_DIR_FALLING: + return aw96103_read_in_debounce(aw96103, chan, val); + default: + return -EINVAL; + } + case IIO_EV_INFO_HYSTERESIS: + return aw96103_read_hysteresis(aw96103, chan, val); + default: + return -EINVAL; + } +} + +static int aw96103_write_event_val(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + return regmap_write(aw96103->regmap, + AW96103_REG_PROXTH0_CH(chan->channel), val); + case IIO_EV_INFO_PERIOD: + switch (dir) { + case IIO_EV_DIR_RISING: + return regmap_update_bits(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), + AW96103_OUTDEB_MASK, + FIELD_PREP(AW96103_OUTDEB_MASK, val)); + + case IIO_EV_DIR_FALLING: + return regmap_update_bits(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), + AW96103_INDEB_MASK, + FIELD_PREP(AW96103_INDEB_MASK, val)); + default: + return -EINVAL; + } + case IIO_EV_INFO_HYSTERESIS: + return regmap_update_bits(aw96103->regmap, + AW96103_REG_PROXCTRL_CH(chan->channel), + AW96103_THHYST_MASK, + FIELD_PREP(AW96103_THHYST_MASK, val)); + default: + return -EINVAL; + } +} + +static int aw96103_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + + return aw96103->channels_arr[chan->channel].used; +} + +static int aw96103_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + + aw96103->channels_arr[chan->channel].used = !!state; + + return regmap_update_bits(aw96103->regmap, AW96103_REG_SCANCTRL0, + BIT(chan->channel), + state ? BIT(chan->channel) : 0); +} + +static struct iio_info iio_info = { + .read_raw = aw96103_read_raw, + .read_event_value = aw96103_read_event_val, + .write_event_value = aw96103_write_event_val, + .read_event_config = aw96103_read_event_config, + .write_event_config = aw96103_write_event_config, +}; + +static int aw96103_channel_scan_start(struct aw96103 *aw96103) +{ + int ret; + + ret = regmap_write(aw96103->regmap, AW96103_REG_CMD, + AW96103_ACTIVE_MODE); + if (ret) + return ret; + + return regmap_write(aw96103->regmap, AW96103_REG_IRQEN, + aw96103->hostirqen); +} + +static int aw96103_reg_version_comp(struct aw96103 *aw96103, + struct aw_bin *aw_bin) +{ + u32 blfilt1_data, fw_ver; + unsigned char i; + int ret; + + ret = regmap_read(aw96103->regmap, AW96103_REG_FWVER2, &fw_ver); + if (ret) + return ret; + /* + * If the chip version is AW96103A and the loaded register + * configuration file is for AW96103, special handling of the + * AW96103_REG_BLRSTRNG_CH0 register is required. + */ + if ((fw_ver != AW96103A) || (aw_bin->chip_type[7] != '\0')) + return 0; + + for (i = 0; i < aw96103->max_channels; i++) { + ret = regmap_read(aw96103->regmap, + AW96103_REG_BLFILT_CH0 + (AW96103_BLFILT_CH_STEP * i), + &blfilt1_data); + if (ret) + return ret; + if (FIELD_GET(AW96103_BLERRTRIG_MASK, blfilt1_data) != 1) + return 0; + + ret = regmap_update_bits(aw96103->regmap, + AW96103_REG_BLRSTRNG_CH0 + (AW96103_BLFILT_CH_STEP * i), + AW96103_BLRSTRNG_MASK, 1 << i); + if (ret) + return ret; + } + + return 0; +} + +static int aw96103_bin_valid_loaded(struct aw96103 *aw96103, + struct aw_bin *aw_bin_data_s) +{ + unsigned int start_addr = aw_bin_data_s->valid_data_addr; + u32 i, reg_data; + u16 reg_addr; + int ret; + + for (i = 0; i < aw_bin_data_s->valid_data_len; + i += 6, start_addr += 6) { + reg_addr = get_unaligned_le16(aw_bin_data_s->data + start_addr); + reg_data = get_unaligned_le32(aw_bin_data_s->data + + start_addr + 2); + if ((reg_addr == AW96103_REG_EEDA0) || + (reg_addr == AW96103_REG_EEDA1)) + continue; + if (reg_addr == AW96103_REG_IRQEN) { + aw96103->hostirqen = reg_data; + continue; + } + if (reg_addr == AW96103_REG_SCANCTRL0) + aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK, + reg_data); + + ret = regmap_write(aw96103->regmap, reg_addr, reg_data); + if (ret < 0) + return ret; + } + + ret = aw96103_reg_version_comp(aw96103, aw_bin_data_s); + if (ret) + return ret; + + return aw96103_channel_scan_start(aw96103); +} + +static int aw96103_para_loaded(struct aw96103 *aw96103) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(aw96103_reg_default); i += 2) { + ret = regmap_write(aw96103->regmap, + (u16)aw96103_reg_default[i], + (u32)aw96103_reg_default[i + 1]); + if (ret) + return ret; + if (aw96103_reg_default[i] == AW96103_REG_IRQEN) + aw96103->hostirqen = aw96103_reg_default[i + 1]; + else if (aw96103_reg_default[i] == AW96103_REG_SCANCTRL0) + aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK, + aw96103_reg_default[i + 1]); + } + + return aw96103_channel_scan_start(aw96103); +} + +static int aw96103_cfg_all_loaded(const struct firmware *cont, + struct aw96103 *aw96103) +{ + if (!cont) + return -EINVAL; + + struct aw_bin *aw_bin __free(kfree) = + kzalloc(cont->size + sizeof(*aw_bin), GFP_KERNEL); + if (!aw_bin) + return -ENOMEM; + + aw_bin->len = cont->size; + memcpy(aw_bin->data, cont->data, cont->size); + release_firmware(cont); + aw96103_parsing_bin_file(aw_bin); + + return aw96103_bin_valid_loaded(aw96103, aw_bin); +} + +static void aw96103_cfg_update(const struct firmware *fw, void *data) +{ + struct aw96103 *aw96103 = data; + int ret, i; + + if (!fw || !fw->data) { + dev_err(aw96103->dev, "No firmware.\n"); + return; + } + + ret = aw96103_cfg_all_loaded(fw, aw96103); + /* + * If loading the register configuration file fails, + * load the default register configuration in the driver to + * ensure the basic functionality of the device. + */ + if (ret) { + ret = aw96103_para_loaded(aw96103); + if (ret) { + dev_err(aw96103->dev, "load param error.\n"); + return; + } + } + + for (i = 0; i < aw96103->max_channels; i++) { + if ((aw96103->chan_en >> i) & 0x01) + aw96103->channels_arr[i].used = true; + else + aw96103->channels_arr[i].used = false; + } +} + +static int aw96103_sw_reset(struct aw96103 *aw96103) +{ + int ret; + + ret = regmap_write(aw96103->regmap, AW96103_REG_RESET, 0); + /* + * After reset, the initialization process starts to perform and + * it will last for a bout 20ms. + */ + msleep(20); + + return ret; +} + +enum aw96103_irq_trigger_position { + FAR = 0, + TRIGGER_TH0 = 0x01, + TRIGGER_TH1 = 0x03, + TRIGGER_TH2 = 0x07, + TRIGGER_TH3 = 0x0f, +}; + +static irqreturn_t aw96103_irq(int irq, void *data) +{ + unsigned int irq_status, curr_status_val, curr_status; + struct iio_dev *indio_dev = data; + struct aw96103 *aw96103 = iio_priv(indio_dev); + int ret, i; + + ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status); + if (ret) + return IRQ_HANDLED; + + ret = regmap_read(aw96103->regmap, AW96103_REG_STAT0, &curr_status_val); + if (ret) + return IRQ_HANDLED; + + /* + * Iteratively analyze the interrupt status of different channels, + * with each channel having 4 interrupt states. + */ + for (i = 0; i < aw96103->max_channels; i++) { + if (!aw96103->channels_arr[i].used) + continue; + + curr_status = (((curr_status_val >> (24 + i)) & 0x1)) | + (((curr_status_val >> (16 + i)) & 0x1) << 1) | + (((curr_status_val >> (8 + i)) & 0x1) << 2) | + (((curr_status_val >> i) & 0x1) << 3); + if (aw96103->channels_arr[i].old_irq_status == curr_status) + continue; + + switch (curr_status) { + case FAR: + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_RISING), + iio_get_time_ns(indio_dev)); + break; + case TRIGGER_TH0: + case TRIGGER_TH1: + case TRIGGER_TH2: + case TRIGGER_TH3: + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_FALLING), + iio_get_time_ns(indio_dev)); + break; + default: + return IRQ_HANDLED; + } + aw96103->channels_arr[i].old_irq_status = curr_status; + } + + return IRQ_HANDLED; +} + +static int aw96103_interrupt_init(struct iio_dev *indio_dev, + struct i2c_client *i2c) +{ + struct aw96103 *aw96103 = iio_priv(indio_dev); + unsigned int irq_status; + int ret; + + ret = regmap_write(aw96103->regmap, AW96103_REG_IRQEN, 0); + if (ret) + return ret; + ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status); + if (ret) + return ret; + ret = devm_request_threaded_irq(aw96103->dev, i2c->irq, NULL, + aw96103_irq, IRQF_ONESHOT, + "aw96103_irq", indio_dev); + if (ret) + return ret; + + return regmap_write(aw96103->regmap, AW96103_REG_IRQEN, + aw96103->hostirqen); +} + +static int aw96103_wait_chip_init(struct aw96103 *aw96103) +{ + unsigned int cnt = 20; + u32 reg_data; + int ret; + + while (cnt--) { + /* + * The device should generate an initialization completion + * interrupt within 20ms. + */ + ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, + ®_data); + if (ret) + return ret; + + if (FIELD_GET(AW96103_INITOVERIRQ_MASK, reg_data)) + return 0; + fsleep(1000); + } + + return -ETIMEDOUT; +} + +static int aw96103_read_chipid(struct aw96103 *aw96103) +{ + unsigned char cnt = 0; + u32 reg_val = 0; + int ret; + + while (cnt < 3) { + /* + * This retry mechanism and the subsequent delay are just + * attempts to read the chip ID as much as possible, + * preventing occasional communication failures from causing + * the chip ID read to fail. + */ + ret = regmap_read(aw96103->regmap, AW96103_REG_CHIPID, + ®_val); + if (ret < 0) { + cnt++; + fsleep(2000); + continue; + } + break; + } + if (cnt == 3) + return -ETIMEDOUT; + + if (FIELD_GET(AW96103_CHIPID_MASK, reg_val) != AW96103_CHIP_ID) + dev_info(aw96103->dev, + "unexpected chipid, id=0x%08X\n", reg_val); + + return 0; +} + +static int aw96103_i2c_probe(struct i2c_client *i2c) +{ + const struct aw_chip_info *chip_info; + struct iio_dev *indio_dev; + struct aw96103 *aw96103; + int ret; + + indio_dev = devm_iio_device_alloc(&i2c->dev, sizeof(*aw96103)); + if (!indio_dev) + return -ENOMEM; + + aw96103 = iio_priv(indio_dev); + aw96103->dev = &i2c->dev; + chip_info = i2c_get_match_data(i2c); + aw96103->max_channels = chip_info->num_channels; + + aw96103->regmap = devm_regmap_init_i2c(i2c, &aw96103_regmap_confg); + if (IS_ERR(aw96103->regmap)) + return PTR_ERR(aw96103->regmap); + + ret = devm_regulator_get_enable(aw96103->dev, "vcc"); + if (ret < 0) + return ret; + + ret = aw96103_read_chipid(aw96103); + if (ret) + return ret; + + ret = aw96103_sw_reset(aw96103); + if (ret) + return ret; + + ret = aw96103_wait_chip_init(aw96103); + if (ret) + return ret; + + ret = request_firmware_nowait(THIS_MODULE, true, "aw96103_0.bin", + aw96103->dev, GFP_KERNEL, aw96103, + aw96103_cfg_update); + if (ret) + return ret; + + ret = aw96103_interrupt_init(indio_dev, i2c); + if (ret) + return ret; + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->num_channels = chip_info->num_channels; + indio_dev->channels = chip_info->channels; + indio_dev->info = &iio_info; + indio_dev->name = chip_info->name; + + return devm_iio_device_register(aw96103->dev, indio_dev); +} + +static const struct of_device_id aw96103_dt_match[] = { + { + .compatible = "awinic,aw96103", + .data = &aw_chip_info_tbl[AW96103_VAL] + }, + { + .compatible = "awinic,aw96105", + .data = &aw_chip_info_tbl[AW96105_VAL] + }, + { } +}; +MODULE_DEVICE_TABLE(of, aw96103_dt_match); + +static const struct i2c_device_id aw96103_i2c_id[] = { + { "aw96103", (kernel_ulong_t)&aw_chip_info_tbl[AW96103_VAL] }, + { "aw96105", (kernel_ulong_t)&aw_chip_info_tbl[AW96105_VAL] }, + { } +}; +MODULE_DEVICE_TABLE(i2c, aw96103_i2c_id); + +static struct i2c_driver aw96103_i2c_driver = { + .driver = { + .name = "aw96103_sensor", + .of_match_table = aw96103_dt_match, + }, + .probe = aw96103_i2c_probe, + .id_table = aw96103_i2c_id, +}; +module_i2c_driver(aw96103_i2c_driver); + +MODULE_AUTHOR("Wang Shuaijie "); +MODULE_DESCRIPTION("Driver for Awinic AW96103 proximity sensor"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c index 4df506bb8b3810..c25472b14d4be0 100644 --- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c +++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c @@ -6,10 +6,10 @@ */ #include +#include #include #include #include -#include #include #include #include @@ -21,7 +21,7 @@ #include #include -#include +#include struct cros_ec_mkbp_proximity_data { struct cros_ec_device *ec; diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c new file mode 100644 index 00000000000000..d8fb34060d3db8 --- /dev/null +++ b/drivers/iio/proximity/hx9023s.c @@ -0,0 +1,1144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 NanjingTianyihexin Electronics Ltd. + * http://www.tianyihexin.com + * + * Driver for NanjingTianyihexin HX9023S Cap Sensor. + * Datasheet available at: + * http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define HX9023S_CHIP_ID 0x1D +#define HX9023S_CH_NUM 5 +#define HX9023S_POS 0x03 +#define HX9023S_NEG 0x02 +#define HX9023S_NOT_CONNECTED 16 + +#define HX9023S_GLOBAL_CTRL0 0x00 +#define HX9023S_PRF_CFG 0x02 +#define HX9023S_CH0_CFG_7_0 0x03 +#define HX9023S_CH4_CFG_9_8 0x0C +#define HX9023S_RANGE_7_0 0x0D +#define HX9023S_RANGE_9_8 0x0E +#define HX9023S_RANGE_18_16 0x0F +#define HX9023S_AVG0_NOSR0_CFG 0x10 +#define HX9023S_NOSR12_CFG 0x11 +#define HX9023S_NOSR34_CFG 0x12 +#define HX9023S_AVG12_CFG 0x13 +#define HX9023S_AVG34_CFG 0x14 +#define HX9023S_OFFSET_DAC0_7_0 0x15 +#define HX9023S_OFFSET_DAC4_9_8 0x1E +#define HX9023S_SAMPLE_NUM_7_0 0x1F +#define HX9023S_INTEGRATION_NUM_7_0 0x21 +#define HX9023S_CH_NUM_CFG 0x24 +#define HX9023S_LP_ALP_4_CFG 0x29 +#define HX9023S_LP_ALP_1_0_CFG 0x2A +#define HX9023S_LP_ALP_3_2_CFG 0x2B +#define HX9023S_UP_ALP_1_0_CFG 0x2C +#define HX9023S_UP_ALP_3_2_CFG 0x2D +#define HX9023S_DN_UP_ALP_0_4_CFG 0x2E +#define HX9023S_DN_ALP_2_1_CFG 0x2F +#define HX9023S_DN_ALP_4_3_CFG 0x30 +#define HX9023S_RAW_BL_RD_CFG 0x38 +#define HX9023S_INTERRUPT_CFG 0x39 +#define HX9023S_INTERRUPT_CFG1 0x3A +#define HX9023S_CALI_DIFF_CFG 0x3B +#define HX9023S_DITHER_CFG 0x3C +#define HX9023S_DEVICE_ID 0x60 +#define HX9023S_PROX_STATUS 0x6B +#define HX9023S_PROX_INT_HIGH_CFG 0x6C +#define HX9023S_PROX_INT_LOW_CFG 0x6D +#define HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 0x80 +#define HX9023S_PROX_LOW_DIFF_CFG_CH0_0 0x88 +#define HX9023S_PROX_LOW_DIFF_CFG_CH3_1 0x8F +#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 0x9E +#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_1 0x9F +#define HX9023S_PROX_LOW_DIFF_CFG_CH4_0 0xA2 +#define HX9023S_PROX_LOW_DIFF_CFG_CH4_1 0xA3 +#define HX9023S_CAP_INI_CH4_0 0xB3 +#define HX9023S_LP_DIFF_CH4_2 0xBA +#define HX9023S_RAW_BL_CH4_0 0xB5 +#define HX9023S_LP_DIFF_CH4_0 0xB8 +#define HX9023S_DSP_CONFIG_CTRL1 0xC8 +#define HX9023S_CAP_INI_CH0_0 0xE0 +#define HX9023S_RAW_BL_CH0_0 0xE8 +#define HX9023S_LP_DIFF_CH0_0 0xF4 +#define HX9023S_LP_DIFF_CH3_2 0xFF + +#define HX9023S_DATA_LOCK_MASK BIT(4) +#define HX9023S_INTERRUPT_MASK GENMASK(9, 0) +#define HX9023S_PROX_DEBOUNCE_MASK GENMASK(3, 0) + +struct hx9023s_ch_data { + s16 raw; /* Raw Data*/ + s16 lp; /* Low Pass Filter Data*/ + s16 bl; /* Base Line Data */ + s16 diff; /* Difference of Low Pass Data and Base Line Data */ + + struct { + unsigned int near; + unsigned int far; + } thres; + + u16 dac; + u8 channel_positive; + u8 channel_negative; + bool sel_bl; + bool sel_raw; + bool sel_diff; + bool sel_lp; + bool enable; +}; + +struct hx9023s_data { + struct iio_trigger *trig; + struct regmap *regmap; + unsigned long chan_prox_stat; + unsigned long chan_read; + unsigned long chan_event; + unsigned long ch_en_stat; + unsigned long chan_in_use; + unsigned int prox_state_reg; + bool trigger_enabled; + + struct { + __le16 channels[HX9023S_CH_NUM]; + s64 ts __aligned(8); + } buffer; + + /* + * Serialize access to registers below: + * HX9023S_PROX_INT_LOW_CFG, + * HX9023S_PROX_INT_HIGH_CFG, + * HX9023S_INTERRUPT_CFG, + * HX9023S_CH_NUM_CFG + * Serialize access to channel configuration in + * hx9023s_push_events and hx9023s_trigger_handler. + */ + struct mutex mutex; + struct hx9023s_ch_data ch_data[HX9023S_CH_NUM]; +}; + +static const struct reg_sequence hx9023s_reg_init_list[] = { + /* scan period */ + REG_SEQ0(HX9023S_PRF_CFG, 0x17), + + /* full scale of conversion phase of each channel */ + REG_SEQ0(HX9023S_RANGE_7_0, 0x11), + REG_SEQ0(HX9023S_RANGE_9_8, 0x02), + REG_SEQ0(HX9023S_RANGE_18_16, 0x00), + + /* ADC average number and OSR number of each channel */ + REG_SEQ0(HX9023S_AVG0_NOSR0_CFG, 0x71), + REG_SEQ0(HX9023S_NOSR12_CFG, 0x44), + REG_SEQ0(HX9023S_NOSR34_CFG, 0x00), + REG_SEQ0(HX9023S_AVG12_CFG, 0x33), + REG_SEQ0(HX9023S_AVG34_CFG, 0x00), + + /* sample & integration frequency of the ADC */ + REG_SEQ0(HX9023S_SAMPLE_NUM_7_0, 0x65), + REG_SEQ0(HX9023S_INTEGRATION_NUM_7_0, 0x65), + + /* coefficient of the first order low pass filter during each channel */ + REG_SEQ0(HX9023S_LP_ALP_1_0_CFG, 0x22), + REG_SEQ0(HX9023S_LP_ALP_3_2_CFG, 0x22), + REG_SEQ0(HX9023S_LP_ALP_4_CFG, 0x02), + + /* up coefficient of the first order low pass filter during each channel */ + REG_SEQ0(HX9023S_UP_ALP_1_0_CFG, 0x88), + REG_SEQ0(HX9023S_UP_ALP_3_2_CFG, 0x88), + REG_SEQ0(HX9023S_DN_UP_ALP_0_4_CFG, 0x18), + + /* down coefficient of the first order low pass filter during each channel */ + REG_SEQ0(HX9023S_DN_ALP_2_1_CFG, 0x11), + REG_SEQ0(HX9023S_DN_ALP_4_3_CFG, 0x11), + + /* selection of data for the Data Mux Register to output data */ + REG_SEQ0(HX9023S_RAW_BL_RD_CFG, 0xF0), + + /* enable the interrupt function */ + REG_SEQ0(HX9023S_INTERRUPT_CFG, 0xFF), + REG_SEQ0(HX9023S_INTERRUPT_CFG1, 0x3B), + REG_SEQ0(HX9023S_DITHER_CFG, 0x21), + + /* threshold of the offset compensation */ + REG_SEQ0(HX9023S_CALI_DIFF_CFG, 0x07), + + /* proximity persistency number(near & far) */ + REG_SEQ0(HX9023S_PROX_INT_HIGH_CFG, 0x01), + REG_SEQ0(HX9023S_PROX_INT_LOW_CFG, 0x01), + + /* disable the data lock */ + REG_SEQ0(HX9023S_DSP_CONFIG_CTRL1, 0x00), +}; + +static const struct iio_event_spec hx9023s_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD), + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD), + .mask_separate = BIT(IIO_EV_INFO_VALUE), + + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +#define HX9023S_CHANNEL(idx) \ +{ \ + .type = IIO_PROXIMITY, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\ + .indexed = 1, \ + .channel = idx, \ + .address = 0, \ + .event_spec = hx9023s_events, \ + .num_event_specs = ARRAY_SIZE(hx9023s_events), \ + .scan_index = idx, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ +} + +static const struct iio_chan_spec hx9023s_channels[] = { + HX9023S_CHANNEL(0), + HX9023S_CHANNEL(1), + HX9023S_CHANNEL(2), + HX9023S_CHANNEL(3), + HX9023S_CHANNEL(4), + IIO_CHAN_SOFT_TIMESTAMP(5), +}; + +static const unsigned int hx9023s_samp_freq_table[] = { + 2, 2, 4, 6, 8, 10, 14, 18, 22, 26, + 30, 34, 38, 42, 46, 50, 56, 62, 68, 74, + 80, 90, 100, 200, 300, 400, 600, 800, 1000, 2000, + 3000, 4000, +}; + +static const struct regmap_range hx9023s_rd_reg_ranges[] = { + regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2), +}; + +static const struct regmap_range hx9023s_wr_reg_ranges[] = { + regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2), +}; + +static const struct regmap_range hx9023s_volatile_reg_ranges[] = { + regmap_reg_range(HX9023S_CAP_INI_CH4_0, HX9023S_LP_DIFF_CH4_2), + regmap_reg_range(HX9023S_CAP_INI_CH0_0, HX9023S_LP_DIFF_CH3_2), + regmap_reg_range(HX9023S_PROX_STATUS, HX9023S_PROX_STATUS), +}; + +static const struct regmap_access_table hx9023s_rd_regs = { + .yes_ranges = hx9023s_rd_reg_ranges, + .n_yes_ranges = ARRAY_SIZE(hx9023s_rd_reg_ranges), +}; + +static const struct regmap_access_table hx9023s_wr_regs = { + .yes_ranges = hx9023s_wr_reg_ranges, + .n_yes_ranges = ARRAY_SIZE(hx9023s_wr_reg_ranges), +}; + +static const struct regmap_access_table hx9023s_volatile_regs = { + .yes_ranges = hx9023s_volatile_reg_ranges, + .n_yes_ranges = ARRAY_SIZE(hx9023s_volatile_reg_ranges), +}; + +static const struct regmap_config hx9023s_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_MAPLE, + .rd_table = &hx9023s_rd_regs, + .wr_table = &hx9023s_wr_regs, + .volatile_table = &hx9023s_volatile_regs, +}; + +static int hx9023s_interrupt_enable(struct hx9023s_data *data) +{ + return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG, + HX9023S_INTERRUPT_MASK, HX9023S_INTERRUPT_MASK); +} + +static int hx9023s_interrupt_disable(struct hx9023s_data *data) +{ + return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG, + HX9023S_INTERRUPT_MASK, 0x00); +} + +static int hx9023s_data_lock(struct hx9023s_data *data, bool locked) +{ + if (locked) + return regmap_update_bits(data->regmap, + HX9023S_DSP_CONFIG_CTRL1, + HX9023S_DATA_LOCK_MASK, + HX9023S_DATA_LOCK_MASK); + else + return regmap_update_bits(data->regmap, + HX9023S_DSP_CONFIG_CTRL1, + HX9023S_DATA_LOCK_MASK, 0); +} + +static int hx9023s_ch_cfg(struct hx9023s_data *data) +{ + __le16 reg_list[HX9023S_CH_NUM]; + u8 ch_pos[HX9023S_CH_NUM]; + u8 ch_neg[HX9023S_CH_NUM]; + /* Bit positions corresponding to input pin connections */ + u8 conn_cs[HX9023S_CH_NUM] = { 0, 2, 4, 6, 8 }; + unsigned int i; + u16 reg; + + for (i = 0; i < HX9023S_CH_NUM; i++) { + ch_pos[i] = data->ch_data[i].channel_positive == HX9023S_NOT_CONNECTED ? + HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_positive]; + ch_neg[i] = data->ch_data[i].channel_negative == HX9023S_NOT_CONNECTED ? + HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_negative]; + + reg = (HX9023S_POS << ch_pos[i]) | (HX9023S_NEG << ch_neg[i]); + reg_list[i] = cpu_to_le16(reg); + } + + return regmap_bulk_write(data->regmap, HX9023S_CH0_CFG_7_0, reg_list, + sizeof(reg_list)); +} + +static int hx9023s_write_far_debounce(struct hx9023s_data *data, int val) +{ + guard(mutex)(&data->mutex); + return regmap_update_bits(data->regmap, HX9023S_PROX_INT_LOW_CFG, + HX9023S_PROX_DEBOUNCE_MASK, + FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val)); +} + +static int hx9023s_write_near_debounce(struct hx9023s_data *data, int val) +{ + guard(mutex)(&data->mutex); + return regmap_update_bits(data->regmap, HX9023S_PROX_INT_HIGH_CFG, + HX9023S_PROX_DEBOUNCE_MASK, + FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val)); +} + +static int hx9023s_read_far_debounce(struct hx9023s_data *data, int *val) +{ + int ret; + + ret = regmap_read(data->regmap, HX9023S_PROX_INT_LOW_CFG, val); + if (ret) + return ret; + + *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val); + + return IIO_VAL_INT; +} + +static int hx9023s_read_near_debounce(struct hx9023s_data *data, int *val) +{ + int ret; + + ret = regmap_read(data->regmap, HX9023S_PROX_INT_HIGH_CFG, val); + if (ret) + return ret; + + *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val); + + return IIO_VAL_INT; +} + +static int hx9023s_get_thres_near(struct hx9023s_data *data, u8 ch, int *val) +{ + int ret; + __le16 buf; + unsigned int reg, tmp; + + reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 : + HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2); + + ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf)); + if (ret) + return ret; + + tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32; + data->ch_data[ch].thres.near = tmp; + *val = tmp; + + return IIO_VAL_INT; +} + +static int hx9023s_get_thres_far(struct hx9023s_data *data, u8 ch, int *val) +{ + int ret; + __le16 buf; + unsigned int reg, tmp; + + reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 : + HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2); + + ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf)); + if (ret) + return ret; + + tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32; + data->ch_data[ch].thres.far = tmp; + *val = tmp; + + return IIO_VAL_INT; +} + +static int hx9023s_set_thres_near(struct hx9023s_data *data, u8 ch, int val) +{ + __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0)); + unsigned int reg; + + data->ch_data[ch].thres.near = ((val / 32) & GENMASK(9, 0)) * 32; + reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 : + HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2); + + return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16)); +} + +static int hx9023s_set_thres_far(struct hx9023s_data *data, u8 ch, int val) +{ + __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0)); + unsigned int reg; + + data->ch_data[ch].thres.far = ((val / 32) & GENMASK(9, 0)) * 32; + reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 : + HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2); + + return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16)); +} + +static int hx9023s_get_prox_state(struct hx9023s_data *data) +{ + return regmap_read(data->regmap, HX9023S_PROX_STATUS, &data->prox_state_reg); +} + +static int hx9023s_data_select(struct hx9023s_data *data) +{ + int ret; + unsigned int i, buf; + unsigned long tmp; + + ret = regmap_read(data->regmap, HX9023S_RAW_BL_RD_CFG, &buf); + if (ret) + return ret; + + tmp = buf; + for (i = 0; i < 4; i++) { + data->ch_data[i].sel_diff = test_bit(i, &tmp); + data->ch_data[i].sel_lp = !data->ch_data[i].sel_diff; + data->ch_data[i].sel_bl = test_bit(i + 4, &tmp); + data->ch_data[i].sel_raw = !data->ch_data[i].sel_bl; + } + + ret = regmap_read(data->regmap, HX9023S_INTERRUPT_CFG1, &buf); + if (ret) + return ret; + + tmp = buf; + data->ch_data[4].sel_diff = test_bit(2, &tmp); + data->ch_data[4].sel_lp = !data->ch_data[4].sel_diff; + data->ch_data[4].sel_bl = test_bit(3, &tmp); + data->ch_data[4].sel_raw = !data->ch_data[4].sel_bl; + + return 0; +} + +static int hx9023s_sample(struct hx9023s_data *data) +{ + int ret; + unsigned int i; + u8 buf[HX9023S_CH_NUM * 3]; + u16 value; + + ret = hx9023s_data_lock(data, true); + if (ret) + return ret; + + ret = hx9023s_data_select(data); + if (ret) + goto err; + + /* 3 bytes for each of channels 0 to 3 which have contiguous registers */ + ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH0_0, buf, 12); + if (ret) + goto err; + + /* 3 bytes for channel 4 */ + ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH4_0, buf + 12, 3); + if (ret) + goto err; + + for (i = 0; i < HX9023S_CH_NUM; i++) { + value = get_unaligned_le16(&buf[i * 3 + 1]); + data->ch_data[i].raw = 0; + data->ch_data[i].bl = 0; + if (data->ch_data[i].sel_raw) + data->ch_data[i].raw = value; + if (data->ch_data[i].sel_bl) + data->ch_data[i].bl = value; + } + + /* 3 bytes for each of channels 0 to 3 which have contiguous registers */ + ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH0_0, buf, 12); + if (ret) + goto err; + + /* 3 bytes for channel 4 */ + ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH4_0, buf + 12, 3); + if (ret) + goto err; + + for (i = 0; i < HX9023S_CH_NUM; i++) { + value = get_unaligned_le16(&buf[i * 3 + 1]); + data->ch_data[i].lp = 0; + data->ch_data[i].diff = 0; + if (data->ch_data[i].sel_lp) + data->ch_data[i].lp = value; + if (data->ch_data[i].sel_diff) + data->ch_data[i].diff = value; + } + + for (i = 0; i < HX9023S_CH_NUM; i++) { + if (data->ch_data[i].sel_lp && data->ch_data[i].sel_bl) + data->ch_data[i].diff = data->ch_data[i].lp - data->ch_data[i].bl; + } + + /* 2 bytes for each of channels 0 to 4 which have contiguous registers */ + ret = regmap_bulk_read(data->regmap, HX9023S_OFFSET_DAC0_7_0, buf, 10); + if (ret) + goto err; + + for (i = 0; i < HX9023S_CH_NUM; i++) { + value = get_unaligned_le16(&buf[i * 2]); + value = FIELD_GET(GENMASK(11, 0), value); + data->ch_data[i].dac = value; + } + +err: + return hx9023s_data_lock(data, false); +} + +static int hx9023s_ch_en(struct hx9023s_data *data, u8 ch_id, bool en) +{ + int ret; + unsigned int buf; + + ret = regmap_read(data->regmap, HX9023S_CH_NUM_CFG, &buf); + if (ret) + return ret; + + data->ch_en_stat = buf; + if (en && data->ch_en_stat == 0) + data->prox_state_reg = 0; + + data->ch_data[ch_id].enable = en; + __assign_bit(ch_id, &data->ch_en_stat, en); + + return regmap_write(data->regmap, HX9023S_CH_NUM_CFG, data->ch_en_stat); +} + +static int hx9023s_property_get(struct hx9023s_data *data) +{ + struct device *dev = regmap_get_device(data->regmap); + u32 array[2]; + u32 i, reg, temp; + int ret; + + data->chan_in_use = 0; + for (i = 0; i < HX9023S_CH_NUM; i++) { + data->ch_data[i].channel_positive = HX9023S_NOT_CONNECTED; + data->ch_data[i].channel_negative = HX9023S_NOT_CONNECTED; + } + + device_for_each_child_node_scoped(dev, child) { + ret = fwnode_property_read_u32(child, "reg", ®); + if (ret || reg >= HX9023S_CH_NUM) + return dev_err_probe(dev, ret < 0 ? ret : -EINVAL, + "Failed to read reg\n"); + __set_bit(reg, &data->chan_in_use); + + ret = fwnode_property_read_u32(child, "single-channel", &temp); + if (ret == 0) { + data->ch_data[reg].channel_positive = temp; + data->ch_data[reg].channel_negative = HX9023S_NOT_CONNECTED; + } else { + ret = fwnode_property_read_u32_array(child, "diff-channels", + array, ARRAY_SIZE(array)); + if (ret == 0) { + data->ch_data[reg].channel_positive = array[0]; + data->ch_data[reg].channel_negative = array[1]; + } else { + return dev_err_probe(dev, ret, + "Property read failed: %d\n", + reg); + } + } + } + + return 0; +} + +static int hx9023s_update_chan_en(struct hx9023s_data *data, + unsigned long chan_read, + unsigned long chan_event) +{ + unsigned int i; + unsigned long channels = chan_read | chan_event; + + if ((data->chan_read | data->chan_event) != channels) { + for_each_set_bit(i, &channels, HX9023S_CH_NUM) + hx9023s_ch_en(data, i, test_bit(i, &data->chan_in_use)); + for_each_clear_bit(i, &channels, HX9023S_CH_NUM) + hx9023s_ch_en(data, i, false); + } + + data->chan_read = chan_read; + data->chan_event = chan_event; + + return 0; +} + +static int hx9023s_get_proximity(struct hx9023s_data *data, + const struct iio_chan_spec *chan, + int *val) +{ + int ret; + + ret = hx9023s_sample(data); + if (ret) + return ret; + + ret = hx9023s_get_prox_state(data); + if (ret) + return ret; + + *val = data->ch_data[chan->channel].diff; + return IIO_VAL_INT; +} + +static int hx9023s_get_samp_freq(struct hx9023s_data *data, int *val, int *val2) +{ + int ret; + unsigned int odr, index; + + ret = regmap_read(data->regmap, HX9023S_PRF_CFG, &index); + if (ret) + return ret; + + odr = hx9023s_samp_freq_table[index]; + *val = KILO / odr; + *val2 = div_u64((KILO % odr) * MICRO, odr); + + return IIO_VAL_INT_PLUS_MICRO; +} + +static int hx9023s_read_raw(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long mask) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + int ret; + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = hx9023s_get_proximity(data, chan, val); + iio_device_release_direct_mode(indio_dev); + return ret; + case IIO_CHAN_INFO_SAMP_FREQ: + return hx9023s_get_samp_freq(data, val, val2); + default: + return -EINVAL; + } +} + +static int hx9023s_set_samp_freq(struct hx9023s_data *data, int val, int val2) +{ + struct device *dev = regmap_get_device(data->regmap); + unsigned int i, period_ms; + + period_ms = div_u64(NANO, (val * MEGA + val2)); + + for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) { + if (period_ms == hx9023s_samp_freq_table[i]) + break; + } + if (i == ARRAY_SIZE(hx9023s_samp_freq_table)) { + dev_err(dev, "Period:%dms NOT found!\n", period_ms); + return -EINVAL; + } + + return regmap_write(data->regmap, HX9023S_PRF_CFG, i); +} + +static int hx9023s_write_raw(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + int val, int val2, long mask) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + if (mask != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + return hx9023s_set_samp_freq(data, val, val2); +} + +static irqreturn_t hx9023s_irq_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct hx9023s_data *data = iio_priv(indio_dev); + + if (data->trigger_enabled) + iio_trigger_poll(data->trig); + + return IRQ_WAKE_THREAD; +} + +static void hx9023s_push_events(struct iio_dev *indio_dev) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + s64 timestamp = iio_get_time_ns(indio_dev); + unsigned long prox_changed; + unsigned int chan; + int ret; + + ret = hx9023s_sample(data); + if (ret) + return; + + ret = hx9023s_get_prox_state(data); + if (ret) + return; + + prox_changed = (data->chan_prox_stat ^ data->prox_state_reg) & data->chan_event; + for_each_set_bit(chan, &prox_changed, HX9023S_CH_NUM) { + unsigned int dir; + + dir = (data->prox_state_reg & BIT(chan)) ? + IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING; + + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan, + IIO_EV_TYPE_THRESH, dir), + timestamp); + } + data->chan_prox_stat = data->prox_state_reg; +} + +static irqreturn_t hx9023s_irq_thread_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct hx9023s_data *data = iio_priv(indio_dev); + + guard(mutex)(&data->mutex); + hx9023s_push_events(indio_dev); + + return IRQ_HANDLED; +} + +static int hx9023s_read_event_val(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, int *val2) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + return hx9023s_get_thres_far(data, chan->channel, val); + case IIO_EV_DIR_FALLING: + return hx9023s_get_thres_near(data, chan->channel, val); + default: + return -EINVAL; + } + case IIO_EV_INFO_PERIOD: + switch (dir) { + case IIO_EV_DIR_RISING: + return hx9023s_read_far_debounce(data, val); + case IIO_EV_DIR_FALLING: + return hx9023s_read_near_debounce(data, val); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int hx9023s_write_event_val(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + if (chan->type != IIO_PROXIMITY) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + return hx9023s_set_thres_far(data, chan->channel, val); + case IIO_EV_DIR_FALLING: + return hx9023s_set_thres_near(data, chan->channel, val); + default: + return -EINVAL; + } + case IIO_EV_INFO_PERIOD: + switch (dir) { + case IIO_EV_DIR_RISING: + return hx9023s_write_far_debounce(data, val); + case IIO_EV_DIR_FALLING: + return hx9023s_write_near_debounce(data, val); + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static int hx9023s_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + return test_bit(chan->channel, &data->chan_event); +} + +static int hx9023s_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + if (test_bit(chan->channel, &data->chan_in_use)) { + hx9023s_ch_en(data, chan->channel, !!state); + __assign_bit(chan->channel, &data->chan_event, + data->ch_data[chan->channel].enable); + } + + return 0; +} + +static const struct iio_info hx9023s_info = { + .read_raw = hx9023s_read_raw, + .write_raw = hx9023s_write_raw, + .read_event_value = hx9023s_read_event_val, + .write_event_value = hx9023s_write_event_val, + .read_event_config = hx9023s_read_event_config, + .write_event_config = hx9023s_write_event_config, +}; + +static int hx9023s_set_trigger_state(struct iio_trigger *trig, bool state) +{ + struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); + struct hx9023s_data *data = iio_priv(indio_dev); + + guard(mutex)(&data->mutex); + if (state) + hx9023s_interrupt_enable(data); + else if (!data->chan_read) + hx9023s_interrupt_disable(data); + data->trigger_enabled = state; + + return 0; +} + +static const struct iio_trigger_ops hx9023s_trigger_ops = { + .set_trigger_state = hx9023s_set_trigger_state, +}; + +static irqreturn_t hx9023s_trigger_handler(int irq, void *private) +{ + struct iio_poll_func *pf = private; + struct iio_dev *indio_dev = pf->indio_dev; + struct hx9023s_data *data = iio_priv(indio_dev); + struct device *dev = regmap_get_device(data->regmap); + unsigned int bit, index, i = 0; + int ret; + + guard(mutex)(&data->mutex); + ret = hx9023s_sample(data); + if (ret) { + dev_warn(dev, "sampling failed\n"); + goto out; + } + + ret = hx9023s_get_prox_state(data); + if (ret) { + dev_warn(dev, "get prox failed\n"); + goto out; + } + + iio_for_each_active_channel(indio_dev, bit) { + index = indio_dev->channels[bit].channel; + data->buffer.channels[i++] = cpu_to_le16(data->ch_data[index].diff); + } + + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + pf->timestamp); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int hx9023s_buffer_preenable(struct iio_dev *indio_dev) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + unsigned long channels = 0; + unsigned int bit; + + guard(mutex)(&data->mutex); + iio_for_each_active_channel(indio_dev, bit) + __set_bit(indio_dev->channels[bit].channel, &channels); + + hx9023s_update_chan_en(data, channels, data->chan_event); + + return 0; +} + +static int hx9023s_buffer_postdisable(struct iio_dev *indio_dev) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + + guard(mutex)(&data->mutex); + hx9023s_update_chan_en(data, 0, data->chan_event); + + return 0; +} + +static const struct iio_buffer_setup_ops hx9023s_buffer_setup_ops = { + .preenable = hx9023s_buffer_preenable, + .postdisable = hx9023s_buffer_postdisable, +}; + +static int hx9023s_id_check(struct iio_dev *indio_dev) +{ + struct hx9023s_data *data = iio_priv(indio_dev); + struct device *dev = regmap_get_device(data->regmap); + unsigned int id; + int ret; + + ret = regmap_read(data->regmap, HX9023S_DEVICE_ID, &id); + if (ret) + return ret; + + if (id != HX9023S_CHIP_ID) + dev_warn(dev, "Unexpected chip ID, assuming compatible\n"); + + return 0; +} + +static int hx9023s_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct iio_dev *indio_dev; + struct hx9023s_data *data; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + mutex_init(&data->mutex); + + data->regmap = devm_regmap_init_i2c(client, &hx9023s_regmap_config); + if (IS_ERR(data->regmap)) + return dev_err_probe(dev, PTR_ERR(data->regmap), + "regmap init failed\n"); + + ret = hx9023s_property_get(data); + if (ret) + return dev_err_probe(dev, ret, "dts phase failed\n"); + + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret) + return dev_err_probe(dev, ret, "regulator get failed\n"); + + ret = hx9023s_id_check(indio_dev); + if (ret) + return dev_err_probe(dev, ret, "id check failed\n"); + + indio_dev->name = "hx9023s"; + indio_dev->channels = hx9023s_channels; + indio_dev->num_channels = ARRAY_SIZE(hx9023s_channels); + indio_dev->info = &hx9023s_info; + indio_dev->modes = INDIO_DIRECT_MODE; + i2c_set_clientdata(client, indio_dev); + + ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list, + ARRAY_SIZE(hx9023s_reg_init_list)); + if (ret) + return dev_err_probe(dev, ret, "device init failed\n"); + + ret = hx9023s_ch_cfg(data); + if (ret) + return dev_err_probe(dev, ret, "channel config failed\n"); + + ret = regcache_sync(data->regmap); + if (ret) + return dev_err_probe(dev, ret, "regcache sync failed\n"); + + if (client->irq) { + ret = devm_request_threaded_irq(dev, client->irq, + hx9023s_irq_handler, + hx9023s_irq_thread_handler, + IRQF_ONESHOT, + "hx9023s_event", indio_dev); + if (ret) + return dev_err_probe(dev, ret, "irq request failed\n"); + + data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", + indio_dev->name, + iio_device_id(indio_dev)); + if (!data->trig) + return dev_err_probe(dev, -ENOMEM, + "iio trigger alloc failed\n"); + + data->trig->ops = &hx9023s_trigger_ops; + iio_trigger_set_drvdata(data->trig, indio_dev); + + ret = devm_iio_trigger_register(dev, data->trig); + if (ret) + return dev_err_probe(dev, ret, + "iio trigger register failed\n"); + } + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + iio_pollfunc_store_time, + hx9023s_trigger_handler, + &hx9023s_buffer_setup_ops); + if (ret) + return dev_err_probe(dev, ret, + "iio triggered buffer setup failed\n"); + + return devm_iio_device_register(dev, indio_dev); +} + +static int hx9023s_suspend(struct device *dev) +{ + struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev)); + + guard(mutex)(&data->mutex); + hx9023s_interrupt_disable(data); + + return 0; +} + +static int hx9023s_resume(struct device *dev) +{ + struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev)); + + guard(mutex)(&data->mutex); + if (data->trigger_enabled) + hx9023s_interrupt_enable(data); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(hx9023s_pm_ops, hx9023s_suspend, + hx9023s_resume); + +static const struct of_device_id hx9023s_of_match[] = { + { .compatible = "tyhx,hx9023s" }, + {} +}; +MODULE_DEVICE_TABLE(of, hx9023s_of_match); + +static const struct i2c_device_id hx9023s_id[] = { + { "hx9023s" }, + {} +}; +MODULE_DEVICE_TABLE(i2c, hx9023s_id); + +static struct i2c_driver hx9023s_driver = { + .driver = { + .name = "hx9023s", + .of_match_table = hx9023s_of_match, + .pm = &hx9023s_pm_ops, + + /* + * The I2C operations in hx9023s_reg_init() and hx9023s_ch_cfg() + * are time-consuming. Prefer async so we don't delay boot + * if we're builtin to the kernel. + */ + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = hx9023s_probe, + .id_table = hx9023s_id, +}; +module_i2c_driver(hx9023s_driver); + +MODULE_AUTHOR("Yasin Lee "); +MODULE_DESCRIPTION("Driver for TYHX HX9023S SAR sensor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c index 323ac6dac90e1b..6e96b764fed8b5 100644 --- a/drivers/iio/proximity/irsd200.c +++ b/drivers/iio/proximity/irsd200.c @@ -5,7 +5,7 @@ * Copyright (C) 2023 Axis Communications AB */ -#include +#include #include #include #include diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 92630812ece236..3f4eace05cfc6a 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -654,8 +654,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private) mutex_lock(&data->mutex); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = sx9500_read_prox_data(data, &indio_dev->channels[bit], &val); if (ret < 0) diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c index a95e9814aaf2ad..71aa6dced7d342 100644 --- a/drivers/iio/proximity/sx_common.c +++ b/drivers/iio/proximity/sx_common.c @@ -369,8 +369,7 @@ static irqreturn_t sx_common_trigger_handler(int irq, void *private) mutex_lock(&data->mutex); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) { + iio_for_each_active_channel(indio_dev, bit) { ret = data->chip_info->ops.read_prox_data(data, &indio_dev->channels[bit], &val); @@ -398,8 +397,7 @@ static int sx_common_buffer_preenable(struct iio_dev *indio_dev) int bit, ret; mutex_lock(&data->mutex); - for_each_set_bit(bit, indio_dev->active_scan_mask, - indio_dev->masklength) + iio_for_each_active_channel(indio_dev, bit) __set_bit(indio_dev->channels[bit].channel, &channels); ret = sx_common_update_chan_en(data, channels, data->chan_event); diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index 21f2cfc55bf8d8..f8ea2219ab48b3 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -22,7 +22,7 @@ #include #include -#include +#include /* register map */ #define LTC2983_STATUS_REG 0x0000 diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c index 8307aae2cb457a..7ddec5cbe558e7 100644 --- a/drivers/iio/temperature/max31856.c +++ b/drivers/iio/temperature/max31856.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include /* * The MSB of the register value determines whether the following byte will diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c index 29e23652ba5a11..5a6fbe3c80e5f6 100644 --- a/drivers/iio/temperature/max31865.c +++ b/drivers/iio/temperature/max31865.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include /* * The MSB of the register value determines whether the following byte will diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 6791df64a5fe05..b7c078b7f7cfd4 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device) rdma_for_each_port (device, p) { err = ib_cache_update(device, p, true, true, true); - if (err) + if (err) { + gid_table_cleanup_one(device); return err; + } } return 0; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index dd7715ba9fd15e..05102769a918ad 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -325,9 +325,6 @@ void ib_qp_usecnt_inc(struct ib_qp *qp); void ib_qp_usecnt_dec(struct ib_qp *qp); struct rdma_dev_addr; -int rdma_resolve_ip_route(struct sockaddr *src_addr, - const struct sockaddr *dst_addr, - struct rdma_dev_addr *addr); int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 0290aca18d261d..e029401b56805f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1351,6 +1351,29 @@ static void prevent_dealloc_device(struct ib_device *ib_dev) { } +static void ib_device_notify_register(struct ib_device *device) +{ + struct net_device *netdev; + u32 port; + int ret; + + ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); + if (ret) + return; + + rdma_for_each_port(device, port) { + netdev = ib_device_get_netdev(device, port); + if (!netdev) + continue; + + ret = rdma_nl_notify_event(device, port, + RDMA_NETDEV_ATTACH_EVENT); + dev_put(netdev); + if (ret) + return; + } +} + /** * ib_register_device - Register an IB device with IB core * @device: Device to register @@ -1449,6 +1472,8 @@ int ib_register_device(struct ib_device *device, const char *name, dev_set_uevent_suppress(&device->dev, false); /* Mark for userspace that device is ready */ kobject_uevent(&device->dev.kobj, KOBJ_ADD); + + ib_device_notify_register(device); ib_device_put(device); return 0; @@ -1491,6 +1516,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev) goto out; disable_device(ib_dev); + rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT); /* Expedite removing unregistered pointers from the hash table */ free_netdevs(ib_dev); @@ -2159,6 +2185,7 @@ static void add_ndev_hash(struct ib_port_data *pdata) int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, u32 port) { + enum rdma_nl_notify_event_type etype; struct net_device *old_ndev; struct ib_port_data *pdata; unsigned long flags; @@ -2190,6 +2217,14 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, spin_unlock_irqrestore(&pdata->netdev_lock, flags); add_ndev_hash(pdata); + + /* Make sure that the device is registered before we send events */ + if (xa_load(&devices, ib_dev->index) != ib_dev) + return 0; + + etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT; + rdma_nl_notify_event(ib_dev, port, etype); + return 0; } EXPORT_SYMBOL(ib_device_set_netdev); @@ -2236,6 +2271,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, if (!rdma_is_port_valid(ib_dev, port)) return NULL; + if (!ib_dev->port_data) + return NULL; + pdata = &ib_dev->port_data[port]; /* @@ -2252,17 +2290,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, spin_unlock(&pdata->netdev_lock); } - /* - * If we are starting to unregister expedite things by preventing - * propagation of an unregistering netdev. - */ - if (res && res->reg_state != NETREG_REGISTERED) { - dev_put(res); - return NULL; - } - return res; } +EXPORT_SYMBOL(ib_device_get_netdev); /** * ib_device_get_by_netdev - Find an IB device associated with a netdev diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a6339f3a63fc9..7e3a55349e1070 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -1182,7 +1182,7 @@ static int __init iw_cm_init(void) if (ret) return ret; - iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM); if (!iwcm_wq) goto err_alloc; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 7439e47ff951d4..1fd54d5c4dd8b7 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2616,14 +2616,16 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) static void timeout_sends(struct work_struct *work) { + struct ib_mad_send_wr_private *mad_send_wr, *n; struct ib_mad_agent_private *mad_agent_priv; - struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; + struct list_head local_list; unsigned long flags, delay; mad_agent_priv = container_of(work, struct ib_mad_agent_private, timed_work.work); mad_send_wc.vendor_err = 0; + INIT_LIST_HEAD(&local_list); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->wait_list)) { @@ -2641,13 +2643,16 @@ static void timeout_sends(struct work_struct *work) break; } - list_del(&mad_send_wr->agent_list); + list_del_init(&mad_send_wr->agent_list); if (mad_send_wr->status == IB_WC_SUCCESS && !retry_send(mad_send_wr)) continue; - spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + list_add_tail(&mad_send_wr->agent_list, &local_list); + } + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) { if (mad_send_wr->status == IB_WC_SUCCESS) mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; else @@ -2655,11 +2660,8 @@ static void timeout_sends(struct work_struct *work) mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); - deref_mad_agent(mad_agent_priv); - spin_lock_irqsave(&mad_agent_priv->lock, flags); } - spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } /* @@ -2937,7 +2939,6 @@ static int ib_mad_port_open(struct ib_device *device, int ret, cq_size; struct ib_mad_port_private *port_priv; unsigned long flags; - char name[sizeof "ib_mad123"]; int has_smi; if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) @@ -2990,8 +2991,8 @@ static int ib_mad_port_open(struct ib_device *device, goto error7; } - snprintf(name, sizeof(name), "ib_mad%u", port_num); - port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM, + port_num); if (!port_priv->wq) { ret = -ENOMEM; goto error8; diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index ae2db0c70788bf..def14c54b6487f 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -311,6 +311,7 @@ int rdma_nl_net_init(struct rdma_dev_net *rnet) struct net *net = read_pnet(&rnet->net); struct netlink_kernel_cfg cfg = { .input = rdma_nl_rcv, + .flags = NL_CFG_F_NONROOT_RECV, }; struct sock *nls; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index a6b80cdc96f739..39f89a4b864982 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -170,6 +170,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING }, [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 }, + [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, @@ -1074,8 +1075,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -1123,8 +1124,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -1215,8 +1216,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 port; int err; - err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) @@ -1275,8 +1276,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, int err; unsigned int p; - err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -1331,8 +1332,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int ret; - ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -1481,8 +1482,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct sk_buff *msg; int ret; - ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL; @@ -1569,8 +1570,8 @@ static int res_get_common_dumpit(struct sk_buff *skb, u32 index, port = 0; bool filled = false; - err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in @@ -1762,8 +1763,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, char type[IFNAMSIZ]; int err; - err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; @@ -1806,8 +1807,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -1836,8 +1837,8 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, - extack); + err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, + NL_VALIDATE_LIBERAL, extack); if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) return -EINVAL; @@ -1920,8 +1921,8 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct sk_buff *msg; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (err) return err; @@ -1951,6 +1952,12 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_free(msg); return err; } + + err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1); + if (err) { + nlmsg_free(msg); + return err; + } /* * Copy-on-fork is supported. * See commits: @@ -2420,8 +2427,8 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; - ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret) return -EINVAL; @@ -2450,8 +2457,8 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb, struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; - ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, NULL); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) return -EINVAL; @@ -2482,8 +2489,8 @@ static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, u32 devid, port; int ret, i; - ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NL_VALIDATE_LIBERAL, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; @@ -2722,6 +2729,130 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { }, }; +static int fill_mon_netdev_association(struct sk_buff *msg, + struct ib_device *device, u32 port, + const struct net *net) +{ + struct net_device *netdev = ib_device_get_netdev(device, port); + int ret = 0; + + if (netdev && !net_eq(dev_net(netdev), net)) + goto out; + + ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index); + if (ret) + goto out; + + ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, + dev_name(&device->dev)); + if (ret) + goto out; + + ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port); + if (ret) + goto out; + + if (netdev) { + ret = nla_put_u32(msg, + RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); + if (ret) + goto out; + + ret = nla_put_string(msg, + RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); + } + +out: + dev_put(netdev); + return ret; +} + +static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num, + enum rdma_nl_notify_event_type type) +{ + struct net_device *netdev; + + switch (type) { + case RDMA_REGISTER_EVENT: + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor register device event\n"); + break; + case RDMA_UNREGISTER_EVENT: + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor unregister device event\n"); + break; + case RDMA_NETDEV_ATTACH_EVENT: + netdev = ib_device_get_netdev(device, port_num); + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n", + port_num, netdev->ifindex); + dev_put(netdev); + break; + case RDMA_NETDEV_DETACH_EVENT: + dev_warn_ratelimited(&device->dev, + "Failed to send RDMA monitor netdev detach event: port %d\n", + port_num); + break; + default: + break; + } +} + +int rdma_nl_notify_event(struct ib_device *device, u32 port_num, + enum rdma_nl_notify_event_type type) +{ + struct sk_buff *skb; + struct net *net; + int ret = 0; + void *nlh; + + net = read_pnet(&device->coredev.rdma_net); + if (!net) + return -EINVAL; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + nlh = nlmsg_put(skb, 0, 0, + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR), + 0, 0); + + switch (type) { + case RDMA_REGISTER_EVENT: + case RDMA_UNREGISTER_EVENT: + ret = fill_nldev_handle(skb, device); + if (ret) + goto err_free; + break; + case RDMA_NETDEV_ATTACH_EVENT: + case RDMA_NETDEV_DETACH_EVENT: + ret = fill_mon_netdev_association(skb, device, + port_num, net); + if (ret) + goto err_free; + break; + default: + break; + } + + ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type); + if (ret) + goto err_free; + + nlmsg_end(skb, nlh); + ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL); + if (ret && ret != -ESRCH) { + skb = NULL; /* skb is freed in the netlink send-op handling */ + goto err_free; + } + return 0; + +err_free: + rdma_nl_notify_err_msg(device, port_num, type); + nlmsg_free(skb); + return ret; +} + void __init nldev_init(void) { rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8175dde60b0a84..53571e6b3162ca 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1420,7 +1420,7 @@ enum opa_pr_supported { /* * opa_pr_query_possible - Check if current PR query can be an OPA query. * - * Retuns PR_NOT_SUPPORTED if a path record query is not + * Returns PR_NOT_SUPPORTED if a path record query is not * possible, PR_OPA_SUPPORTED if an OPA path record query * is possible and PR_IB_SUPPORTED if an IB path record * query is possible. diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5f5ad8faf86e55..5dbb248e96259c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1624,13 +1624,13 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); - if (!f.file) + if (!fd_file(f)) return -ENOENT; - if (f.file->f_op != &ucma_fops) { + if (fd_file(f)->f_op != &ucma_fops) { ret = -EINVAL; goto file_put; } - cur_file = f.file->private_data; + cur_file = fd_file(f)->private_data; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(cur_file, cmd.id); @@ -1817,7 +1817,6 @@ static const struct file_operations ucma_fops = { .release = ucma_close, .write = ucma_write, .poll = ucma_poll, - .llseek = no_llseek, }; static struct miscdevice ucma_misc = { diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index 39357dc2d229f0..9fcd37761264a4 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -23,6 +23,9 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); + if (umem_dmabuf->revoked) + return -EINVAL; + if (umem_dmabuf->sgt) goto wait_fence; @@ -110,10 +113,12 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) } EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages); -struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, - unsigned long offset, size_t size, - int fd, int access, - const struct dma_buf_attach_ops *ops) +static struct ib_umem_dmabuf * +ib_umem_dmabuf_get_with_dma_device(struct ib_device *device, + struct device *dma_device, + unsigned long offset, size_t size, + int fd, int access, + const struct dma_buf_attach_ops *ops) { struct dma_buf *dmabuf; struct ib_umem_dmabuf *umem_dmabuf; @@ -152,7 +157,7 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, umem_dmabuf->attach = dma_buf_dynamic_attach( dmabuf, - device->dma_device, + dma_device, ops, umem_dmabuf); if (IS_ERR(umem_dmabuf->attach)) { @@ -168,6 +173,15 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, dma_buf_put(dmabuf); return ret; } + +struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, + unsigned long offset, size_t size, + int fd, int access, + const struct dma_buf_attach_ops *ops) +{ + return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device, + offset, size, fd, access, ops); +} EXPORT_SYMBOL(ib_umem_dmabuf_get); static void @@ -184,16 +198,18 @@ static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { .move_notify = ib_umem_dmabuf_unsupported_move_notify, }; -struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, - unsigned long offset, - size_t size, int fd, - int access) +struct ib_umem_dmabuf * +ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device, + struct device *dma_device, + unsigned long offset, size_t size, + int fd, int access) { struct ib_umem_dmabuf *umem_dmabuf; int err; - umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access, - &ib_umem_dmabuf_attach_pinned_ops); + umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset, + size, fd, access, + &ib_umem_dmabuf_attach_pinned_ops); if (IS_ERR(umem_dmabuf)) return umem_dmabuf; @@ -217,17 +233,41 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, ib_umem_release(&umem_dmabuf->umem); return ERR_PTR(err); } +EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device); + +struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, + unsigned long offset, + size_t size, int fd, + int access) +{ + return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device, + offset, size, fd, access); +} EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned); -void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) +void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) { struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); + if (umem_dmabuf->revoked) + goto end; ib_umem_dmabuf_unmap_pages(umem_dmabuf); - if (umem_dmabuf->pinned) + if (umem_dmabuf->pinned) { dma_buf_unpin(umem_dmabuf->attach); + umem_dmabuf->pinned = 0; + } + umem_dmabuf->revoked = 1; +end: dma_resv_unlock(dmabuf->resv); +} +EXPORT_SYMBOL(ib_umem_dmabuf_revoke); + +void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) +{ + struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; + + ib_umem_dmabuf_revoke(umem_dmabuf); dma_buf_detach(dmabuf, umem_dmabuf->attach); dma_buf_put(dmabuf); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index f760dfffa1886b..fd67fc9fe85a46 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1082,7 +1082,6 @@ static const struct file_operations umad_fops = { #endif .open = ib_umad_open, .release = ib_umad_close, - .llseek = no_llseek, }; static int ib_umad_sm_open(struct inode *inode, struct file *filp) @@ -1150,7 +1149,6 @@ static const struct file_operations umad_sm_fops = { .owner = THIS_MODULE, .open = ib_umad_sm_open, .release = ib_umad_sm_close, - .llseek = no_llseek, }; static struct ib_umad_port *get_port(struct ib_device *ibdev, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1b3ea71f2c3334..a4cce360df2178 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -572,7 +572,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) struct inode *inode = NULL; int new_xrcd = 0; struct ib_device *ib_dev; - struct fd f = {}; + struct fd f = EMPTY_FD; int ret; ret = uverbs_request(attrs, &cmd, sizeof(cmd)); @@ -584,12 +584,12 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) if (cmd.fd != -1) { /* search for file descriptor */ f = fdget(cmd.fd); - if (!f.file) { + if (!fd_file(f)) { ret = -EBADF; goto err_tree_mutex_unlock; } - inode = file_inode(f.file); + inode = file_inode(fd_file(f)); xrcd = find_xrcd(ibudev, inode); if (!xrcd && !(cmd.oflags & O_CREAT)) { /* no file descriptor. Need CREATE flag */ @@ -632,7 +632,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) atomic_inc(&xrcd->usecnt); } - if (f.file) + if (fd_file(f)) fdput(f); mutex_unlock(&ibudev->xrcd_tree_mutex); @@ -648,7 +648,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) uobj_alloc_abort(&obj->uobject, attrs); err_tree_mutex_unlock: - if (f.file) + if (fd_file(f)) fdput(f); mutex_unlock(&ibudev->xrcd_tree_mutex); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index bc099287de9a60..94454186ed81d5 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -353,7 +353,6 @@ const struct file_operations uverbs_event_fops = { .poll = ib_uverbs_comp_event_poll, .release = uverbs_uobject_fd_release, .fasync = ib_uverbs_comp_event_fasync, - .llseek = no_llseek, }; const struct file_operations uverbs_async_event_fops = { @@ -362,7 +361,6 @@ const struct file_operations uverbs_async_event_fops = { .poll = ib_uverbs_async_event_poll, .release = uverbs_async_event_release, .fasync = ib_uverbs_async_event_fasync, - .llseek = no_llseek, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) @@ -991,7 +989,6 @@ static const struct file_operations uverbs_fops = { .write = ib_uverbs_write, .open = ib_uverbs_open, .release = ib_uverbs_close, - .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; @@ -1002,7 +999,6 @@ static const struct file_operations uverbs_mmap_fops = { .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, .release = ib_uverbs_close, - .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index 03e1db5d1e8c39..7ebc7bd3caaea4 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -239,7 +239,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)( mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd, access_flags, - &attrs->driver_udata); + attrs); if (IS_ERR(mr)) return PTR_ERR(mr); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 0912d2fa9634f2..e94518b12f86ee 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -91,6 +91,15 @@ struct bnxt_re_ring_attr { u8 mode; }; +/* + * Data structure and defines to handle + * recovery + */ +#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1 +#define BNXT_RE_COMPLETE_REMOVE 0x2 +#define BNXT_RE_POST_RECOVERY_INIT 0x4 +#define BNXT_RE_COMPLETE_INIT 0x8 + struct bnxt_re_sqp_entries { struct bnxt_qplib_sge sge; u64 wrid; @@ -107,6 +116,11 @@ struct bnxt_re_gsi_context { struct bnxt_re_sqp_entries *sqp_tbl; }; +struct bnxt_re_en_dev_info { + struct bnxt_en_dev *en_dev; + struct bnxt_re_dev *rdev; +}; + #define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_NQ_IDX 1 #define BNXT_RE_GEN_P5_MAX_VF 64 @@ -141,6 +155,7 @@ struct bnxt_re_pacing { #define BNXT_RE_GRC_FIFO_REG_BASE 0x2000 #define MAX_CQ_HASH_BITS (16) +#define MAX_SRQ_HASH_BITS (16) struct bnxt_re_dev { struct ib_device ibdev; struct list_head list; @@ -154,6 +169,7 @@ struct bnxt_re_dev { #define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 struct net_device *netdev; + struct auxiliary_device *adev; struct notifier_block nb; unsigned int version, major, minor; struct bnxt_qplib_chip_ctx *chip_ctx; @@ -196,6 +212,7 @@ struct bnxt_re_dev { struct work_struct dbq_fifo_check_work; struct delayed_work dbq_pacing_work; DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS); + DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS); }; #define to_bnxt_re_dev(ptr, member) \ @@ -216,4 +233,10 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev) } extern const struct uapi_definition bnxt_re_uapi_defs[]; + +static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev) +{ + rdev->qplib_res.pacing_data->dev_err_state = + test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); +} #endif diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7c757351a0166f..460f33914825c6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -115,6 +115,14 @@ static enum ib_access_flags __to_ib_access_flags(int qflags) return iflags; }; +static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev, + struct bnxt_qplib_mrw *qplib_mr) +{ + if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) && + pcie_relaxed_ordering_enabled(rdev->en_dev->pdev)) + qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO; +} + static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { @@ -517,15 +525,19 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; - mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) { - ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); - goto fail; - } + mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); + if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) { + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); + goto fail; + } - /* Register MR */ - mr->ib_mr.lkey = mr->qplib_mr.lkey; + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + } else { + mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR; + } mr->qplib_mr.va = (u64)(unsigned long)fence->va; mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, @@ -994,43 +1006,37 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, align = sizeof(struct sq_send_hdr); ilsize = ALIGN(init_attr->cap.max_inline_data, align); - sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge); - if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges)) - return -EINVAL; - /* For gen p4 and gen p5 backward compatibility mode - * wqe size is fixed to 128 bytes + /* For gen p4 and gen p5 fixed wqe compatibility mode + * wqe size is fixed to 128 bytes - ie 6 SGEs */ - if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) && - qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) - sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges); + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) { + sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE); + sq->max_sge = BNXT_STATIC_MAX_SGE; + } else { + sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge); + if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges)) + return -EINVAL; + } if (init_attr->cap.max_inline_data) { qplqp->max_inline_data = sq->wqe_size - sizeof(struct sq_send_hdr); init_attr->cap.max_inline_data = qplqp->max_inline_data; - if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) - sq->max_sge = qplqp->max_inline_data / - sizeof(struct sq_sge); } return 0; } static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, - struct bnxt_re_qp *qp, struct ib_udata *udata) + struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx, + struct bnxt_re_qp_req *ureq) { struct bnxt_qplib_qp *qplib_qp; - struct bnxt_re_ucontext *cntx; - struct bnxt_re_qp_req ureq; int bytes = 0, psn_sz; struct ib_umem *umem; int psn_nume; qplib_qp = &qp->qplib_qp; - cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, - ib_uctx); - if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) - return -EFAULT; bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); /* Consider mapping PSN search memory only for RC QPs. */ @@ -1038,15 +1044,20 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search); - psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? - qplib_qp->sq.max_wqe : - ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / - sizeof(struct bnxt_qplib_sge)); + if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) { + psn_nume = ureq->sq_slots; + } else { + psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / + sizeof(struct bnxt_qplib_sge)); + } + if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2)) + psn_nume = roundup_pow_of_two(psn_nume); bytes += (psn_nume * psn_sz); } bytes = PAGE_ALIGN(bytes); - umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, + umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) return PTR_ERR(umem); @@ -1055,12 +1066,12 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, qplib_qp->sq.sg_info.umem = umem; qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; - qplib_qp->qp_handle = ureq.qp_handle; + qplib_qp->qp_handle = ureq->qp_handle; if (!qp->qplib_qp.srq) { bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); bytes = PAGE_ALIGN(bytes); - umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, + umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(umem)) goto rqfail; @@ -1156,6 +1167,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6); qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; + qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.sq.q_full_delta = 1; @@ -1167,6 +1179,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6); qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; + qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; /* Q full delta can be 1 since it is internal QP */ qp->qplib_qp.rq.q_full_delta = 1; @@ -1228,6 +1241,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, */ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx); rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + rq->max_sw_wqe = rq->max_wqe; rq->q_full_delta = 0; rq->sg_info.pgsize = PAGE_SIZE; rq->sg_info.pgshft = PAGE_SHIFT; @@ -1256,14 +1270,15 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, struct ib_qp_init_attr *init_attr, - struct bnxt_re_ucontext *uctx) + struct bnxt_re_ucontext *uctx, + struct bnxt_re_qp_req *ureq) { struct bnxt_qplib_dev_attr *dev_attr; struct bnxt_qplib_qp *qplqp; struct bnxt_re_dev *rdev; struct bnxt_qplib_q *sq; + int diff = 0; int entries; - int diff; int rc; rdev = qp->rdev; @@ -1272,21 +1287,28 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, dev_attr = &rdev->dev_attr; sq->max_sge = init_attr->cap.max_send_sge; - if (sq->max_sge > dev_attr->max_qp_sges) { - sq->max_sge = dev_attr->max_qp_sges; - init_attr->cap.max_send_sge = sq->max_sge; - } + entries = init_attr->cap.max_send_wr; + if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) { + sq->max_wqe = ureq->sq_slots; + sq->max_sw_wqe = ureq->sq_slots; + sq->wqe_size = sizeof(struct sq_sge); + } else { + if (sq->max_sge > dev_attr->max_qp_sges) { + sq->max_sge = dev_attr->max_qp_sges; + init_attr->cap.max_send_sge = sq->max_sge; + } - rc = bnxt_re_setup_swqe_size(qp, init_attr); - if (rc) - return rc; + rc = bnxt_re_setup_swqe_size(qp, init_attr); + if (rc) + return rc; - entries = init_attr->cap.max_send_wr; - /* Allocate 128 + 1 more than what's provided */ - diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? - 0 : BNXT_QPLIB_RESERVED_QP_WRS; - entries = bnxt_re_init_depth(entries + diff + 1, uctx); - sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); + /* Allocate 128 + 1 more than what's provided */ + diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? + 0 : BNXT_QPLIB_RESERVED_QP_WRS; + entries = bnxt_re_init_depth(entries + diff + 1, uctx); + sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); + sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true); + } sq->q_full_delta = diff + 1; /* * Reserving one slot for Phantom WQE. Application can @@ -1349,10 +1371,10 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) + struct bnxt_re_ucontext *uctx, + struct bnxt_re_qp_req *ureq) { struct bnxt_qplib_dev_attr *dev_attr; - struct bnxt_re_ucontext *uctx; struct bnxt_qplib_qp *qplqp; struct bnxt_re_dev *rdev; struct bnxt_re_cq *cq; @@ -1362,7 +1384,6 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, qplqp = &qp->qplib_qp; dev_attr = &rdev->dev_attr; - uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); /* Setup misc params */ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); qplqp->pd = &pd->qplib_pd; @@ -1375,8 +1396,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, goto out; } qplqp->type = (u8)qptype; - qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode; - + qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx); if (init_attr->qp_type == IB_QPT_RC) { qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; @@ -1411,14 +1431,14 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, bnxt_re_adjust_gsi_rq_attr(qp); /* Setup SQ */ - rc = bnxt_re_init_sq_attr(qp, init_attr, uctx); + rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq); if (rc) goto out; if (init_attr->qp_type == IB_QPT_GSI) bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx); - if (udata) /* This will update DPI and qp_handle */ - rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); + if (uctx) /* This will update DPI and qp_handle */ + rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq); out: return rc; } @@ -1519,14 +1539,27 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata) { - struct ib_pd *ib_pd = ib_qp->pd; - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); - struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; - struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *uctx; + struct bnxt_re_qp_req ureq; + struct bnxt_re_dev *rdev; + struct bnxt_re_pd *pd; + struct bnxt_re_qp *qp; + struct ib_pd *ib_pd; u32 active_qps; int rc; + ib_pd = ib_qp->pd; + pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + rdev = pd->rdev; + dev_attr = &rdev->dev_attr; + qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + + uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); + if (udata) + if (ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)))) + return -EFAULT; + rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); if (!rc) { rc = -EINVAL; @@ -1534,7 +1567,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, } qp->rdev = rdev; - rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); + rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq); if (rc) goto fail; @@ -1685,6 +1718,10 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) if (qplib_srq->cq) nq = qplib_srq->cq->nq; + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { + free_page((unsigned long)srq->uctx_srq_page); + hash_del(&srq->hash_entry); + } bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); ib_umem_release(srq->umem); atomic_dec(&rdev->stats.res.srq_count); @@ -1789,9 +1826,18 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, } if (udata) { - struct bnxt_re_srq_resp resp; + struct bnxt_re_srq_resp resp = {}; resp.srqid = srq->qplib_srq.id; + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) { + hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id); + srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL); + if (!srq->uctx_srq_page) { + rc = -ENOMEM; + goto fail; + } + resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT; + } rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); @@ -2155,6 +2201,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe; qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; @@ -3845,9 +3892,12 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; - mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING) + bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr); + /* Allocate and register 0 as the address */ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) @@ -3945,7 +3995,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; - mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; + mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); @@ -4062,21 +4112,28 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 mr->rdev = rdev; mr->qplib_mr.pd = &pd->qplib_pd; - mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) { - ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); - rc = -EIO; - goto free_mr; + if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) { + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); + rc = -EIO; + goto free_mr; + } + /* The fixed portion of the rkey is the same as the lkey */ + mr->ib_mr.rkey = mr->qplib_mr.rkey; + } else { + mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR; } - /* The fixed portion of the rkey is the same as the lkey */ - mr->ib_mr.rkey = mr->qplib_mr.rkey; mr->ib_umem = umem; mr->qplib_mr.va = virt_addr; mr->qplib_mr.total_size = length; + if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING) + bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr); + umem_pgs = ib_umem_num_dma_blocks(umem, page_size); rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem, umem_pgs, page_size); @@ -4122,7 +4179,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, u64 length, u64 virt_addr, int fd, - int mr_access_flags, struct ib_udata *udata) + int mr_access_flags, + struct uverbs_attr_bundle *attrs) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; @@ -4187,9 +4245,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) resp.cqe_sz = sizeof(struct cq_base); resp.max_cqd = dev_attr->max_cq_wqes; - resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; - resp.mode = rdev->chip_ctx->modes.wqe_mode; - if (rdev->chip_ctx->modes.db_push) resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; @@ -4211,7 +4266,13 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) goto cfail; if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) { resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED; - uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED; + uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED; + } + if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) { + resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; + resp.mode = rdev->chip_ctx->modes.wqe_mode; + if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED; } } @@ -4265,6 +4326,19 @@ static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq return cq; } +static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id) +{ + struct bnxt_re_srq *srq = NULL, *tmp_srq; + + hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) { + if (tmp_srq->qplib_srq.id == srq_id) { + srq = tmp_srq; + break; + } + } + return srq; +} + /* Helper function to mmap the virtual memory from user app */ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) { @@ -4493,12 +4567,13 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund struct bnxt_re_ucontext *uctx; struct ib_ucontext *ib_uctx; struct bnxt_re_dev *rdev; + struct bnxt_re_srq *srq; + u32 length = PAGE_SIZE; struct bnxt_re_cq *cq; u64 mem_offset; + u32 offset = 0; u64 addr = 0; - u32 length; - u32 offset; - u32 cq_id; + u32 res_id; int err; ib_uctx = ib_uverbs_get_ucontext(attrs); @@ -4511,23 +4586,24 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); rdev = uctx->rdev; + err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID); + if (err) + return err; switch (res_type) { case BNXT_RE_CQ_TOGGLE_MEM: - err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID); - if (err) - return err; - - cq = bnxt_re_search_for_cq(rdev, cq_id); + cq = bnxt_re_search_for_cq(rdev, res_id); if (!cq) return -EINVAL; - length = PAGE_SIZE; addr = (u64)cq->uctx_cq_page; - mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE; - offset = 0; break; case BNXT_RE_SRQ_TOGGLE_MEM: + srq = bnxt_re_search_for_srq(rdev, res_id); + if (!srq) + return -EINVAL; + + addr = (u64)srq->uctx_srq_page; break; default: diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index e98cb17173385b..b789e47ec97a85 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -77,6 +77,8 @@ struct bnxt_re_srq { struct bnxt_qplib_srq qplib_srq; struct ib_umem *umem; spinlock_t lock; /* protect srq */ + void *uctx_srq_page; + struct hlist_node hash_entry; }; struct bnxt_re_qp { @@ -171,12 +173,26 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge) return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge)); } +enum { + BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL, + BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL, +}; + static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx) { - return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ? + return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ? ent : roundup_pow_of_two(ent) : ent; } +static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev, + struct bnxt_re_ucontext *uctx) +{ + if (uctx) + return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED; + else + return rdev->chip_ctx->modes.wqe_mode; +} + int bnxt_re_query_device(struct ib_device *ibdev, struct ib_device_attr *ib_attr, struct ib_udata *udata); @@ -242,7 +258,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, u64 length, u64 virt_addr, int fd, int mr_access_flags, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata); void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 9714b9ab75240b..777068de4bbc17 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -83,11 +83,12 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev); static int bnxt_re_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev); -static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev); +static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type); static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, u32 *offset); +static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable); static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) { struct bnxt_qplib_chip_ctx *cctx; @@ -129,18 +130,20 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) } } -static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) +static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev) { struct bnxt_qplib_chip_ctx *cctx; cctx = rdev->chip_ctx; - cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? - mode : BNXT_QPLIB_WQE_MODE_STATIC; + cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? + BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC; if (bnxt_re_hwrm_qcaps(rdev)) dev_err(rdev_to_dev(rdev), "Failed to query hwrm qcaps\n"); - if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) + if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) { cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT; + cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT; + } } static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) @@ -158,7 +161,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) kfree(chip_ctx); } -static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) +static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) { struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; @@ -166,6 +169,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) en_dev = rdev->en_dev; + rdev->qplib_res.pdev = en_dev->pdev; chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); if (!chip_ctx) return -ENOMEM; @@ -180,7 +184,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) rdev->qplib_res.dattr = &rdev->dev_attr; rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); - bnxt_re_set_drv_mode(rdev, wqe_mode); + bnxt_re_set_drv_mode(rdev); bnxt_re_set_db_offset(rdev); rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); @@ -290,21 +294,31 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) static void bnxt_re_shutdown(struct auxiliary_device *adev) { - struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev); + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_re_dev *rdev; - if (!rdev) + if (!en_info) return; + + rdev = en_info->rdev; ib_unregister_device(&rdev->ibdev); - bnxt_re_dev_uninit(rdev); + bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); } static void bnxt_re_stop_irq(void *handle) { - struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; - struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); + struct bnxt_qplib_rcfw *rcfw; + struct bnxt_re_dev *rdev; struct bnxt_qplib_nq *nq; int indx; + if (!en_info) + return; + + rdev = en_info->rdev; + rcfw = &rdev->rcfw; + for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { nq = &rdev->nq[indx - 1]; bnxt_qplib_nq_stop_irq(nq, false); @@ -315,12 +329,19 @@ static void bnxt_re_stop_irq(void *handle) static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) { - struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; - struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries; - struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); + struct bnxt_msix_entry *msix_ent; + struct bnxt_qplib_rcfw *rcfw; + struct bnxt_re_dev *rdev; struct bnxt_qplib_nq *nq; int indx, rc; + if (!en_info) + return; + + rdev = en_info->rdev; + msix_ent = rdev->en_dev->msix_entries; + rcfw = &rdev->rcfw; if (!ent) { /* Not setting the f/w timeout bit in rcfw. * During the driver unload the first command @@ -365,14 +386,9 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = { static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) { struct bnxt_en_dev *en_dev; - int rc; en_dev = rdev->en_dev; - - rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev); - if (!rc) - rdev->qplib_res.pdev = rdev->en_dev->pdev; - return rc; + return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev); } static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd) @@ -1573,7 +1589,7 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) return rc; } -static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) +static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; int rc; @@ -1606,8 +1622,10 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) bnxt_re_deinitialize_dbr_pacing(rdev); bnxt_re_destroy_chip_ctx(rdev); - if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) - bnxt_unregister_dev(rdev->en_dev); + if (op_type == BNXT_RE_COMPLETE_REMOVE) { + if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) + bnxt_unregister_dev(rdev->en_dev); + } } /* worker thread for polling periodic events. Now used for QoS programming*/ @@ -1620,7 +1638,7 @@ static void bnxt_re_worker(struct work_struct *work) schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); } -static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) +static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) { struct bnxt_re_ring_attr rattr = {}; struct bnxt_qplib_creq_ctx *creq; @@ -1629,16 +1647,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) u8 type; int rc; - /* Registered a new RoCE device instance to netdev */ - rc = bnxt_re_register_netdev(rdev); - if (rc) { - ibdev_err(&rdev->ibdev, - "Failed to register with netedev: %#x\n", rc); - return -EINVAL; + if (op_type == BNXT_RE_COMPLETE_INIT) { + /* Registered a new RoCE device instance to netdev */ + rc = bnxt_re_register_netdev(rdev); + if (rc) { + ibdev_err(&rdev->ibdev, + "Failed to register with netedev: %#x\n", rc); + return -EINVAL; + } } set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); - rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode); + rc = bnxt_re_setup_chip_ctx(rdev); if (rc) { bnxt_unregister_dev(rdev->en_dev); clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); @@ -1771,6 +1791,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) bnxt_re_vf_res_config(rdev); } hash_init(rdev->cq_hash); + if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) + hash_init(rdev->srq_hash); return 0; free_sctx: @@ -1785,21 +1807,38 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) free_rcfw: bnxt_qplib_free_rcfw_channel(&rdev->rcfw); fail: - bnxt_re_dev_uninit(rdev); + bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); return rc; } -static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode) +static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev, + struct bnxt_re_en_dev_info *en_info, + struct auxiliary_device *adev) +{ + /* Before updating the rdev pointer in bnxt_re_en_dev_info structure, + * take the rtnl lock to avoid accessing invalid rdev pointer from + * L2 ULP callbacks. This is applicable in all the places where rdev + * pointer is updated in bnxt_re_en_dev_info. + */ + rtnl_lock(); + en_info->rdev = rdev; + rdev->adev = adev; + rtnl_unlock(); +} + +static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type) { struct bnxt_aux_priv *aux_priv = container_of(adev, struct bnxt_aux_priv, aux_dev); + struct bnxt_re_en_dev_info *en_info; struct bnxt_en_dev *en_dev; struct bnxt_re_dev *rdev; int rc; - /* en_dev should never be NULL as long as adev and aux_dev are valid. */ - en_dev = aux_priv->edev; + en_info = auxiliary_get_drvdata(adev); + en_dev = en_info->en_dev; + rdev = bnxt_re_dev_add(aux_priv, en_dev); if (!rdev || !rdev_to_dev(rdev)) { @@ -1807,7 +1846,9 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode) goto exit; } - rc = bnxt_re_dev_init(rdev, wqe_mode); + bnxt_re_update_en_info_rdev(rdev, en_info, adev); + + rc = bnxt_re_dev_init(rdev, op_type); if (rc) goto re_dev_dealloc; @@ -1817,12 +1858,22 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode) aux_priv->aux_dev.name); goto re_dev_uninit; } - auxiliary_set_drvdata(adev, rdev); + + rdev->nb.notifier_call = bnxt_re_netdev_event; + rc = register_netdevice_notifier(&rdev->nb); + if (rc) { + rdev->nb.notifier_call = NULL; + pr_err("%s: Cannot register to netdevice_notifier", + ROCE_DRV_MODULE_NAME); + return rc; + } + bnxt_re_setup_cc(rdev, true); return 0; re_dev_uninit: - bnxt_re_dev_uninit(rdev); + bnxt_re_update_en_info_rdev(NULL, en_info, adev); + bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); re_dev_dealloc: ib_dealloc_device(&rdev->ibdev); exit: @@ -1905,14 +1956,9 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, #define BNXT_ADEV_NAME "bnxt_en" -static void bnxt_re_remove(struct auxiliary_device *adev) +static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type, + struct auxiliary_device *aux_dev) { - struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev); - - if (!rdev) - return; - - mutex_lock(&bnxt_re_mutex); if (rdev->nb.notifier_call) { unregister_netdevice_notifier(&rdev->nb); rdev->nb.notifier_call = NULL; @@ -1920,41 +1966,56 @@ static void bnxt_re_remove(struct auxiliary_device *adev) /* If notifier is null, we should have already done a * clean up before coming here. */ - goto skip_remove; + return; } bnxt_re_setup_cc(rdev, false); ib_unregister_device(&rdev->ibdev); - bnxt_re_dev_uninit(rdev); + bnxt_re_dev_uninit(rdev, op_type); ib_dealloc_device(&rdev->ibdev); -skip_remove: +} + +static void bnxt_re_remove(struct auxiliary_device *adev) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_re_dev *rdev; + + mutex_lock(&bnxt_re_mutex); + if (!en_info) { + mutex_unlock(&bnxt_re_mutex); + return; + } + rdev = en_info->rdev; + + if (rdev) + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); + kfree(en_info); mutex_unlock(&bnxt_re_mutex); } static int bnxt_re_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { - struct bnxt_re_dev *rdev; + struct bnxt_aux_priv *aux_priv = + container_of(adev, struct bnxt_aux_priv, aux_dev); + struct bnxt_re_en_dev_info *en_info; + struct bnxt_en_dev *en_dev; int rc; + en_dev = aux_priv->edev; + mutex_lock(&bnxt_re_mutex); - rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC); - if (rc) { + en_info = kzalloc(sizeof(*en_info), GFP_KERNEL); + if (!en_info) { mutex_unlock(&bnxt_re_mutex); - return rc; + return -ENOMEM; } + en_info->en_dev = en_dev; - rdev = auxiliary_get_drvdata(adev); + auxiliary_set_drvdata(adev, en_info); - rdev->nb.notifier_call = bnxt_re_netdev_event; - rc = register_netdevice_notifier(&rdev->nb); - if (rc) { - rdev->nb.notifier_call = NULL; - pr_err("%s: Cannot register to netdevice_notifier", - ROCE_DRV_MODULE_NAME); + rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT); + if (rc) goto err; - } - - bnxt_re_setup_cc(rdev, true); mutex_unlock(&bnxt_re_mutex); return 0; @@ -1967,11 +2028,15 @@ static int bnxt_re_probe(struct auxiliary_device *adev, static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) { - struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev); + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_en_dev *en_dev; + struct bnxt_re_dev *rdev; - if (!rdev) + if (!en_info) return 0; + rdev = en_info->rdev; + en_dev = en_info->en_dev; mutex_lock(&bnxt_re_mutex); /* L2 driver may invoke this callback during device error/crash or device * reset. Current RoCE driver doesn't recover the device in case of @@ -1990,13 +2055,20 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); bnxt_re_dev_stop(rdev); - bnxt_re_stop_irq(rdev); + bnxt_re_stop_irq(adev); /* Move the device states to detached and avoid sending any more * commands to HW */ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); wake_up_all(&rdev->rcfw.cmdq.waitq); + + if (rdev->pacing.dbr_pacing) + bnxt_re_set_pacing_dev_state(rdev); + + ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx", + __func__, en_dev->en_state); + bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev); mutex_unlock(&bnxt_re_mutex); return 0; @@ -2004,9 +2076,10 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) static int bnxt_re_resume(struct auxiliary_device *adev) { - struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev); + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_re_dev *rdev; - if (!rdev) + if (!en_info) return 0; mutex_lock(&bnxt_re_mutex); @@ -2017,7 +2090,9 @@ static int bnxt_re_resume(struct auxiliary_device *adev) * L2 driver want to modify the MSIx table. */ - ibdev_info(&rdev->ibdev, "Handle device resume call"); + bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT); + rdev = en_info->rdev; + ibdev_info(&rdev->ibdev, "Device resume completed"); mutex_unlock(&bnxt_re_mutex); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 49e4a4a50bfaeb..42e98e5f94cb1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -54,6 +54,10 @@ #include "qplib_rcfw.h" #include "qplib_sp.h" #include "qplib_fp.h" +#include +#include "bnxt_ulp.h" +#include "bnxt_re.h" +#include "ib_verbs.h" static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); @@ -347,6 +351,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) case NQ_BASE_TYPE_SRQ_EVENT: { struct bnxt_qplib_srq *srq; + struct bnxt_re_srq *srq_p; struct nq_srq_event *nqsrqe = (struct nq_srq_event *)nqe; @@ -354,6 +359,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32; srq = (struct bnxt_qplib_srq *)q_handle; + srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) + >> NQ_CN_TOGGLE_SFT; + srq->dbinfo.toggle = srq->toggle; + srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq); + if (srq_p->uctx_srq_page) + *((u32 *)srq_p->uctx_srq_page) = srq->toggle; bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); if (nq->srqn_handler(nq, @@ -809,13 +820,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que) { int indx; - que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL); + que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL); if (!que->swq) return -ENOMEM; que->swq_start = 0; - que->swq_last = que->max_wqe - 1; - for (indx = 0; indx < que->max_wqe; indx++) + que->swq_last = que->max_sw_wqe - 1; + for (indx = 0; indx < que->max_sw_wqe; indx++) que->swq[indx].next_idx = indx + 1; que->swq[que->swq_last].next_idx = 0; /* Make it circular */ que->swq_last = 0; @@ -851,7 +862,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.res = res; hwq_attr.sginfo = &sq->sg_info; hwq_attr.stride = sizeof(struct sq_sge); - hwq_attr.depth = bnxt_qplib_get_depth(sq); + hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false); hwq_attr.type = HWQ_TYPE_QUEUE; rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); if (rc) @@ -879,7 +890,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.res = res; hwq_attr.sginfo = &rq->sg_info; hwq_attr.stride = sizeof(struct sq_sge); - hwq_attr.depth = bnxt_qplib_get_depth(rq); + hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); hwq_attr.type = HWQ_TYPE_QUEUE; rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); if (rc) @@ -1011,7 +1022,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.res = res; hwq_attr.sginfo = &sq->sg_info; hwq_attr.stride = sizeof(struct sq_sge); - hwq_attr.depth = bnxt_qplib_get_depth(sq); + hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true); hwq_attr.aux_stride = psn_sz; hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) : 0; @@ -1052,7 +1063,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.res = res; hwq_attr.sginfo = &rq->sg_info; hwq_attr.stride = sizeof(struct sq_sge); - hwq_attr.depth = bnxt_qplib_get_depth(rq); + hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); hwq_attr.aux_stride = 0; hwq_attr.aux_depth = 0; hwq_attr.type = HWQ_TYPE_QUEUE; @@ -2471,6 +2482,32 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, return rc; } +static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot) +{ + struct bnxt_qplib_hwq *sq_hwq; + struct bnxt_qplib_swq *swq; + int cqe_sq_cons = -1; + u32 start, last; + + sq_hwq = &sq->hwq; + + start = sq->swq_start; + last = sq->swq_last; + + while (last != start) { + swq = &sq->swq[last]; + if (swq->slot_idx == cqe_slot) { + cqe_sq_cons = swq->next_idx; + dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n", + __func__, cqe_sq_cons, cqe_slot); + break; + } + + last = swq->next_idx; + } + return cqe_sq_cons; +} + static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget, @@ -2478,9 +2515,10 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, { struct bnxt_qplib_swq *swq; struct bnxt_qplib_cqe *cqe; + u32 cqe_sq_cons, slot_num; struct bnxt_qplib_qp *qp; struct bnxt_qplib_q *sq; - u32 cqe_sq_cons; + int cqe_cons; int rc = 0; qp = (struct bnxt_qplib_qp *)((unsigned long) @@ -2492,12 +2530,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, } sq = &qp->sq; - cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe; + cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe; if (qp->sq.flushed) { dev_dbg(&cq->hwq.pdev->dev, "%s: QP in Flush QP = %p\n", __func__, qp); goto done; } + + if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) { + slot_num = le16_to_cpu(hwcqe->sq_cons_idx); + cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num); + if (cqe_cons < 0) { + dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n", + __func__, slot_num); + goto done; + } + cqe_sq_cons = cqe_cons; + dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n", + __func__, cqe_sq_cons, sq->swq_last, sq->swq_start); + } + /* Require to walk the sq's swq to fabricate CQEs for all previously * signaled SWQEs due to CQE aggregation from the current sq cons * to the cqe_sq_cons @@ -2882,7 +2934,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); if (cqe_cons == 0xFFFF) goto do_rq; - cqe_cons %= sq->max_wqe; + cqe_cons %= sq->max_sw_wqe; if (qp->sq.flushed) { dev_dbg(&cq->hwq.pdev->dev, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 56538b90d6c56f..b62df8701950ff 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -105,6 +105,7 @@ struct bnxt_qplib_srq { struct bnxt_qplib_sg_info sg_info; u16 eventq_hw_ring_id; spinlock_t lock; /* protect SRQE link list */ + u8 toggle; }; struct bnxt_qplib_sge { @@ -251,6 +252,7 @@ struct bnxt_qplib_q { struct bnxt_qplib_db_info dbinfo; struct bnxt_qplib_sg_info sg_info; u32 max_wqe; + u32 max_sw_wqe; u16 wqe_size; u16 q_full_delta; u16 max_sge; @@ -586,15 +588,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx) que->swq_start = que->swq[idx].next_idx; } -static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que) +static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq) { - return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); + u32 slots; + + /* Queue depth is the number of slots. */ + slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); + /* For variable WQE mode, need to align the slots to 256 */ + if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq) + slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN); + return slots; } static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode) { return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? - que->max_wqe : bnxt_qplib_get_depth(que); + que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true); } static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode) @@ -641,4 +650,14 @@ static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn) (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) & SQ_MSN_SEARCH_START_PSN_MASK)); } + +static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp) +{ + return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE); +} + +static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status) +{ + return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp); +} #endif /* __BNXT_QPLIB_FP_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index a0f78cde314f74..c2f710364e0ffe 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -82,6 +82,7 @@ struct bnxt_qplib_db_pacing_data { u32 fifo_room_mask; u32 fifo_room_shift; u32 grc_reg_offset; + u32 dev_err_state; }; #define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000 @@ -565,4 +566,14 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) return cctx->modes.dbr_pacing; } +static inline bool _is_alloc_mr_unified(u16 dev_cap_flags) +{ + return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC; +} + +static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2) +{ + return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED; +} + #endif /* __BNXT_QPLIB_RES_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 9328db92fa6db3..4f75e7e5bcf72f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -95,11 +95,13 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_cmdqmsg msg = {}; struct creq_query_func_resp_sb *sb; struct bnxt_qplib_rcfw_sbuf sbuf; + struct bnxt_qplib_chip_ctx *cctx; struct cmdq_query_func req = {}; u8 *tqm_alloc; int i, rc; u32 temp; + cctx = rcfw->res->cctx; bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_FUNC, sizeof(req)); @@ -133,8 +135,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, * reporting the max number */ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; - attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ? - 6 : sb->max_sge; + + attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ? + min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); attr->max_cq_sges = attr->max_qp_sges; @@ -541,7 +544,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) req.pd_id = cpu_to_le32(mrw->pd->id); req.mrw_flags = mrw->type; if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR && - mrw->flags & BNXT_QPLIB_FR_PMR) || + mrw->access_flags & BNXT_QPLIB_FR_PMR) || mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A || mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B) req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY; @@ -653,9 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) << CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) & CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK)); - req.access = (mr->flags & 0xFFFF); + req.access = (mr->access_flags & 0xFFFF); req.va = cpu_to_le64(mr->va); req.key = cpu_to_le32(mr->lkey); + if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) + req.key = cpu_to_le32(mr->pd->id); + req.flags = cpu_to_le16(mr->flags); req.mr_size = cpu_to_le64(mr->total_size); bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), @@ -664,6 +670,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, if (rc) goto fail; + if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) { + mr->lkey = le32_to_cpu(resp.xid); + mr->rkey = mr->lkey; + } + return 0; fail: diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 16a67d70a6fc4b..acd9c14a31c4ba 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -40,6 +40,7 @@ #ifndef __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__ +#include #define BNXT_QPLIB_RESERVED_QP_WRS 128 struct bnxt_qplib_dev_attr { @@ -108,7 +109,7 @@ struct bnxt_qplib_ah { struct bnxt_qplib_mrw { struct bnxt_qplib_pd *pd; int type; - u32 flags; + u32 access_flags; #define BNXT_QPLIB_FR_PMR 0x80000000 u32 lkey; u32 rkey; @@ -116,6 +117,7 @@ struct bnxt_qplib_mrw { u64 va; u64 total_size; u32 npages; + u16 flags; u64 mr_handle; struct bnxt_qplib_hwq hwq; }; @@ -351,4 +353,11 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, struct bnxt_qplib_cc_param *cc_param); +#define BNXT_VAR_MAX_WQE 4352 +#define BNXT_VAR_MAX_SLOT_ALIGN 256 +#define BNXT_VAR_MAX_SGE 13 +#define BNXT_RE_MAX_RQ_WQES 65536 + +#define BNXT_STATIC_MAX_SGE 6 + #endif /* __BNXT_QPLIB_SP_H__*/ diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 0425309695057e..3ec895284e4931 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -409,7 +409,7 @@ struct creq_deinitialize_fw_resp { u8 reserved48[6]; }; -/* cmdq_create_qp (size:768b/96B) */ +/* cmdq_create_qp (size:832b/104B) */ struct cmdq_create_qp { u8 opcode; #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL @@ -430,8 +430,11 @@ struct cmdq_create_qp { #define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL #define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL + #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL + #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL #define CMDQ_CREATE_QP_QP_FLAGS_LAST \ - CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED + CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED u8 type; #define CMDQ_CREATE_QP_TYPE_RC 0x2UL #define CMDQ_CREATE_QP_TYPE_UD 0x4UL @@ -492,6 +495,9 @@ struct cmdq_create_qp { __le64 rq_pbl; __le64 irrq_addr; __le64 orrq_addr; + __le32 request_xid; + __le16 steering_tag; + __le16 reserved16; }; /* creq_create_qp_resp (size:128b/16B) */ @@ -972,13 +978,14 @@ struct creq_query_qp_extend_resp_sb_tlv { __le16 reserved_16; }; -/* cmdq_create_srq (size:384b/48B) */ +/* cmdq_create_srq (size:448b/56B) */ struct cmdq_create_srq { u8 opcode; #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL #define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ u8 cmd_size; __le16 flags; + #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -1012,6 +1019,8 @@ struct cmdq_create_srq { __le32 dpi; __le32 pd_id; __le64 pbl; + __le16 steering_tag; + u8 reserved48[6]; }; /* creq_create_srq_resp (size:128b/16B) */ @@ -1118,7 +1127,7 @@ struct creq_query_srq_resp_sb { __le32 data[4]; }; -/* cmdq_create_cq (size:384b/48B) */ +/* cmdq_create_cq (size:448b/56B) */ struct cmdq_create_cq { u8 opcode; #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL @@ -1126,6 +1135,8 @@ struct cmdq_create_cq { u8 cmd_size; __le16 flags; #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL + #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL + #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -1157,6 +1168,8 @@ struct cmdq_create_cq { __le32 dpi; __le32 cq_size; __le64 pbl; + __le16 steering_tag; + u8 reserved48[6]; }; /* creq_create_cq_resp (size:128b/16B) */ @@ -1288,11 +1301,12 @@ struct cmdq_allocate_mrw { #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B - #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xf0UL - #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4 + #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL + #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xe0UL + #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 5 u8 access; #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL - __le16 unused16; + __le16 steering_tag; __le32 pd_id; }; @@ -1359,14 +1373,16 @@ struct creq_deallocate_key_resp { __le32 bound_window_info; }; -/* cmdq_register_mr (size:384b/48B) */ +/* cmdq_register_mr (size:448b/56B) */ struct cmdq_register_mr { u8 opcode; #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL #define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR u8 cmd_size; __le16 flags; - #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL + #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL + #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL + #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -1415,6 +1431,8 @@ struct cmdq_register_mr { __le64 pbl; __le64 va; __le64 mr_size; + __le16 steering_tag; + u8 reserved48[6]; }; /* creq_register_mr_resp (size:128b/16B) */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 040ba2224f9ff6..b3757c6a0457a1 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) int ret; ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL; pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); @@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) int ret = 0; ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL; + la = (struct sockaddr_in *)&ep->com.local_addr; ra = (struct sockaddr_in *)&ep->com.remote_addr; la6 = (struct sockaddr_in6 *)&ep->com.local_addr; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 5111421f947327..14ced7b667fa2e 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -1126,13 +1126,19 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_free_mm2; mm->key = uresp.key; - mm->addr = virt_to_phys(chp->cq.queue); + mm->addr = 0; + mm->vaddr = chp->cq.queue; + mm->dma_addr = chp->cq.dma_addr; mm->len = chp->cq.memsize; + insert_flag_to_mmap(&rhp->rdev, mm, mm->addr); insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.bar2_pa; mm2->len = PAGE_SIZE; + mm2->vaddr = NULL; + mm2->dma_addr = 0; + insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr); insert_mmap(ucontext, mm2); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index f838bb6718afcb..5b3007acaa1f72 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -532,11 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) return container_of(c, struct c4iw_ucontext, ibucontext); } +enum { + CXGB4_MMAP_BAR, + CXGB4_MMAP_BAR_WC, + CXGB4_MMAP_CONTIG, + CXGB4_MMAP_NON_CONTIG, +}; + struct c4iw_mm_entry { struct list_head entry; u64 addr; u32 key; + void *vaddr; + dma_addr_t dma_addr; unsigned len; + u8 mmap_flag; }; static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, @@ -561,6 +571,32 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, return NULL; } +static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev, + struct c4iw_mm_entry *mm, u64 addr) +{ + if (addr >= pci_resource_start(rdev->lldi.pdev, 0) && + (addr < (pci_resource_start(rdev->lldi.pdev, 0) + + pci_resource_len(rdev->lldi.pdev, 0)))) + mm->mmap_flag = CXGB4_MMAP_BAR; + else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) && + (addr < (pci_resource_start(rdev->lldi.pdev, 2) + + pci_resource_len(rdev->lldi.pdev, 2)))) { + if (addr >= rdev->oc_mw_pa) { + mm->mmap_flag = CXGB4_MMAP_BAR_WC; + } else { + if (is_t4(rdev->lldi.adapter_type)) + mm->mmap_flag = CXGB4_MMAP_BAR; + else + mm->mmap_flag = CXGB4_MMAP_BAR_WC; + } + } else { + if (addr) + mm->mmap_flag = CXGB4_MMAP_CONTIG; + else + mm->mmap_flag = CXGB4_MMAP_NON_CONTIG; + } +} + static inline void insert_mmap(struct c4iw_ucontext *ucontext, struct c4iw_mm_entry *mm) { @@ -936,7 +972,6 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table); void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid, u32 nr_srqt); -int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); int c4iw_pblpool_create(struct c4iw_rdev *rdev); int c4iw_rqtpool_create(struct c4iw_rdev *rdev); int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev); @@ -944,7 +979,6 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); void c4iw_destroy_resource(struct c4iw_resource *rscp); -int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); void c4iw_register_device(struct work_struct *work); void c4iw_unregister_device(struct c4iw_dev *dev); int __init c4iw_cm_init(void); @@ -1006,8 +1040,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_sq(struct c4iw_qp *qhp); int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); -u16 c4iw_rqes_posted(struct c4iw_qp *qhp); -int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx); diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 246b739ddb2b21..10a4c738b59f9d 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -113,6 +113,9 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext, mm->key = uresp.status_page_key; mm->addr = virt_to_phys(rhp->rdev.status_page); mm->len = PAGE_SIZE; + mm->vaddr = NULL; + mm->dma_addr = 0; + insert_flag_to_mmap(&rhp->rdev, mm, mm->addr); insert_mmap(context, mm); } return 0; @@ -131,6 +134,11 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) struct c4iw_mm_entry *mm; struct c4iw_ucontext *ucontext; u64 addr; + u8 mmap_flag; + size_t size; + void *vaddr; + unsigned long vm_pgoff; + dma_addr_t dma_addr; pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff, key, len); @@ -145,47 +153,38 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) if (!mm) return -EINVAL; addr = mm->addr; + vaddr = mm->vaddr; + dma_addr = mm->dma_addr; + size = mm->len; + mmap_flag = mm->mmap_flag; kfree(mm); - if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) && - (addr < (pci_resource_start(rdev->lldi.pdev, 0) + - pci_resource_len(rdev->lldi.pdev, 0)))) { - - /* - * MA_SYNC register... - */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + switch (mmap_flag) { + case CXGB4_MMAP_BAR: + ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, + len, + pgprot_noncached(vma->vm_page_prot)); + break; + case CXGB4_MMAP_BAR_WC: ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - len, vma->vm_page_prot); - } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && - (addr < (pci_resource_start(rdev->lldi.pdev, 2) + - pci_resource_len(rdev->lldi.pdev, 2)))) { - - /* - * Map user DB or OCQP memory... - */ - if (addr >= rdev->oc_mw_pa) - vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); - else { - if (!is_t4(rdev->lldi.adapter_type)) - vma->vm_page_prot = - t4_pgprot_wc(vma->vm_page_prot); - else - vma->vm_page_prot = - pgprot_noncached(vma->vm_page_prot); - } + len, t4_pgprot_wc(vma->vm_page_prot)); + break; + case CXGB4_MMAP_CONTIG: ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); - } else { - - /* - * Map WQ or CQ contig dma memory... - */ - ret = remap_pfn_range(vma, vma->vm_start, - addr >> PAGE_SHIFT, - len, vma->vm_page_prot); + break; + case CXGB4_MMAP_NON_CONTIG: + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = 0; + ret = dma_mmap_coherent(&rdev->lldi.pdev->dev, vma, + vaddr, dma_addr, size); + vma->vm_pgoff = vm_pgoff; + break; + default: + ret = -EINVAL; + break; } return ret; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index d16d8eaa141502..7b5c4522b426a6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2281,24 +2281,39 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, if (ret) goto err_free_ma_sync_key; sq_key_mm->key = uresp.sq_key; - sq_key_mm->addr = qhp->wq.sq.phys_addr; + sq_key_mm->addr = 0; + sq_key_mm->vaddr = qhp->wq.sq.queue; + sq_key_mm->dma_addr = qhp->wq.sq.dma_addr; sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); + insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr); insert_mmap(ucontext, sq_key_mm); if (!attrs->srq) { rq_key_mm->key = uresp.rq_key; - rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); + rq_key_mm->addr = 0; + rq_key_mm->vaddr = qhp->wq.rq.queue; + rq_key_mm->dma_addr = qhp->wq.rq.dma_addr; rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); + insert_flag_to_mmap(&rhp->rdev, rq_key_mm, + rq_key_mm->addr); insert_mmap(ucontext, rq_key_mm); } sq_db_key_mm->key = uresp.sq_db_gts_key; sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; + sq_db_key_mm->vaddr = NULL; + sq_db_key_mm->dma_addr = 0; sq_db_key_mm->len = PAGE_SIZE; + insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm, + sq_db_key_mm->addr); insert_mmap(ucontext, sq_db_key_mm); if (!attrs->srq) { rq_db_key_mm->key = uresp.rq_db_gts_key; rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa; rq_db_key_mm->len = PAGE_SIZE; + rq_db_key_mm->vaddr = NULL; + rq_db_key_mm->dma_addr = 0; + insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm, + rq_db_key_mm->addr); insert_mmap(ucontext, rq_db_key_mm); } if (ma_sync_key_mm) { @@ -2307,6 +2322,10 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, (pci_resource_start(rhp->rdev.lldi.pdev, 0) + PCIE_MA_SYNC_A) & PAGE_MASK; ma_sync_key_mm->len = PAGE_SIZE; + ma_sync_key_mm->vaddr = NULL; + ma_sync_key_mm->dma_addr = 0; + insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm, + ma_sync_key_mm->addr); insert_mmap(ucontext, ma_sync_key_mm); } @@ -2761,12 +2780,19 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs, if (ret) goto err_free_srq_db_key_mm; srq_key_mm->key = uresp.srq_key; - srq_key_mm->addr = virt_to_phys(srq->wq.queue); + srq_key_mm->addr = 0; srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); + srq_key_mm->vaddr = srq->wq.queue; + srq_key_mm->dma_addr = srq->wq.dma_addr; + insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr); insert_mmap(ucontext, srq_key_mm); srq_db_key_mm->key = uresp.srq_db_gts_key; srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; srq_db_key_mm->len = PAGE_SIZE; + srq_db_key_mm->vaddr = NULL; + srq_db_key_mm->dma_addr = 0; + insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm, + srq_db_key_mm->addr); insert_mmap(ucontext, srq_db_key_mm); } diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index e580e087e9da0c..d7fc9d5eeefd32 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -168,7 +168,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length, u64 virt_addr, int fd, int access_flags, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable); diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index 4296662e59c397..cd03a5429beb52 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -674,6 +674,9 @@ struct efa_admin_feature_device_attr_desc { /* Max RDMA transfer size in bytes */ u32 max_rdma_size; + + /* Unique global ID for an EFA device */ + u64 guid; }; struct efa_admin_feature_queue_attr_desc { diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 5b9c2b16df0e5f..5a774925cdea95 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -465,6 +465,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->db_bar = resp.u.device_attr.db_bar; result->max_rdma_size = resp.u.device_attr.max_rdma_size; result->device_caps = resp.u.device_attr.device_caps; + result->guid = resp.u.device_attr.guid; if (result->admin_api_version < 1) { ibdev_err_ratelimited( diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index 9714105fcf7ec1..668d033f747762 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -112,6 +112,7 @@ struct efa_com_get_device_attr_result { u8 addr[EFA_GID_SIZE]; u64 page_size_cap; u64 max_mr_pages; + u64 guid; u32 mtu; u32 fw_version; u32 admin_api_version; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 1a777791bea35f..ad225823e6f2fe 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -441,6 +441,7 @@ static int efa_ib_device_add(struct efa_dev *dev) efa_set_host_info(dev); dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED; + dev->ibdev.node_guid = dev->dev_attr.guid; dev->ibdev.phys_port_cnt = 1; dev->ibdev.num_comp_vectors = dev->neqs ?: 1; dev->ibdev.dev.parent = &pdev->dev; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index b1e0a1b7c59d3b..cc13415ff7e70c 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1684,14 +1684,14 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start, struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length, u64 virt_addr, int fd, int access_flags, - struct ib_udata *udata) + struct uverbs_attr_bundle *attrs) { struct efa_dev *dev = to_edev(ibpd->device); struct ib_umem_dmabuf *umem_dmabuf; struct efa_mr *mr; int err; - mr = efa_alloc_mr(ibpd, access_flags, udata); + mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto err_out; diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h index c8bd698e21b045..3c166359448d06 100644 --- a/drivers/infiniband/hw/erdma/erdma.h +++ b/drivers/infiniband/hw/erdma/erdma.h @@ -274,7 +274,8 @@ void notify_eq(struct erdma_eq *eq); void *get_next_valid_eqe(struct erdma_eq *eq); int erdma_aeq_init(struct erdma_dev *dev); -void erdma_aeq_destroy(struct erdma_dev *dev); +int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth); +void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq); void erdma_aeq_event_handler(struct erdma_dev *dev); void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb); diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c index 43ff40b5a09d90..a3d8922d1ad104 100644 --- a/drivers/infiniband/hw/erdma/erdma_cmdq.c +++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c @@ -158,20 +158,13 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) { struct erdma_cmdq *cmdq = &dev->cmdq; struct erdma_eq *eq = &cmdq->eq; + int ret; - eq->depth = cmdq->max_outstandings; - eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, - &eq->qbuf_dma_addr, GFP_KERNEL); - if (!eq->qbuf) - return -ENOMEM; - - spin_lock_init(&eq->lock); - atomic64_set(&eq->event_num, 0); + ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings); + if (ret) + return ret; eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG; - eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma); - if (!eq->dbrec) - goto err_out; erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG, upper_32_bits(eq->qbuf_dma_addr)); @@ -181,12 +174,6 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma); return 0; - -err_out: - dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, - eq->qbuf_dma_addr); - - return -ENOMEM; } int erdma_cmdq_init(struct erdma_dev *dev) @@ -247,10 +234,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev) clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state); - dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT, - cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr); - - dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma); + erdma_eq_destroy(dev, &cmdq->eq); dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT, cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c index 84ccdd8144c9e8..9a72fec6d5ccff 100644 --- a/drivers/infiniband/hw/erdma/erdma_eq.c +++ b/drivers/infiniband/hw/erdma/erdma_eq.c @@ -80,50 +80,60 @@ void erdma_aeq_event_handler(struct erdma_dev *dev) notify_eq(&dev->aeq); } -int erdma_aeq_init(struct erdma_dev *dev) +int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth) { - struct erdma_eq *eq = &dev->aeq; + u32 buf_size = depth << EQE_SHIFT; - eq->depth = ERDMA_DEFAULT_EQ_DEPTH; - - eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, + eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size, &eq->qbuf_dma_addr, GFP_KERNEL); if (!eq->qbuf) return -ENOMEM; - spin_lock_init(&eq->lock); - atomic64_set(&eq->event_num, 0); - atomic64_set(&eq->notify_num, 0); - - eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG; eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma); if (!eq->dbrec) - goto err_out; + goto err_free_qbuf; - erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG, - upper_32_bits(eq->qbuf_dma_addr)); - erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG, - lower_32_bits(eq->qbuf_dma_addr)); - erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth); - erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma); + spin_lock_init(&eq->lock); + atomic64_set(&eq->event_num, 0); + atomic64_set(&eq->notify_num, 0); + eq->ci = 0; + eq->depth = depth; return 0; -err_out: - dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, +err_free_qbuf: + dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf, eq->qbuf_dma_addr); return -ENOMEM; } -void erdma_aeq_destroy(struct erdma_dev *dev) +void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq) { - struct erdma_eq *eq = &dev->aeq; - + dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma); dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, eq->qbuf_dma_addr); +} - dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma); +int erdma_aeq_init(struct erdma_dev *dev) +{ + struct erdma_eq *eq = &dev->aeq; + int ret; + + ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH); + if (ret) + return ret; + + eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG; + + erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG, + upper_32_bits(eq->qbuf_dma_addr)); + erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG, + lower_32_bits(eq->qbuf_dma_addr)); + erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth); + erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma); + + return 0; } void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb) @@ -234,32 +244,21 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn) struct erdma_eq *eq = &dev->ceqs[ceqn].eq; int ret; - eq->depth = ERDMA_DEFAULT_EQ_DEPTH; - eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, - &eq->qbuf_dma_addr, GFP_KERNEL); - if (!eq->qbuf) - return -ENOMEM; - - spin_lock_init(&eq->lock); - atomic64_set(&eq->event_num, 0); - atomic64_set(&eq->notify_num, 0); + ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH); + if (ret) + return ret; eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG + (ceqn + 1) * ERDMA_DB_SIZE; - - eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma); - if (!eq->dbrec) { - dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, - eq->qbuf, eq->qbuf_dma_addr); - return -ENOMEM; - } - - eq->ci = 0; dev->ceqs[ceqn].dev = dev; + dev->ceqs[ceqn].ready = true; /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */ ret = create_eq_cmd(dev, ceqn + 1, eq); - dev->ceqs[ceqn].ready = ret ? false : true; + if (ret) { + erdma_eq_destroy(dev, eq); + dev->ceqs[ceqn].ready = false; + } return ret; } @@ -283,9 +282,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn) if (err) return; - dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, - eq->qbuf_dma_addr); - dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma); + erdma_eq_destroy(dev, eq); } int erdma_ceqs_init(struct erdma_dev *dev) diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c index 7080f8a71ec43f..62f497a710041d 100644 --- a/drivers/infiniband/hw/erdma/erdma_main.c +++ b/drivers/infiniband/hw/erdma/erdma_main.c @@ -333,7 +333,7 @@ static int erdma_probe_dev(struct pci_dev *pdev) erdma_cmdq_destroy(dev); err_uninit_aeq: - erdma_aeq_destroy(dev); + erdma_eq_destroy(dev, &dev->aeq); err_uninit_comm_irq: erdma_comm_irq_uninit(dev); @@ -366,7 +366,7 @@ static void erdma_remove_dev(struct pci_dev *pdev) erdma_ceqs_uninit(dev); erdma_hw_reset(dev); erdma_cmdq_destroy(dev); - erdma_aeq_destroy(dev); + erdma_eq_destroy(dev, &dev->aeq); erdma_comm_irq_uninit(dev); pci_free_irq_vectors(dev->pdev); erdma_device_uninit(dev); @@ -490,6 +490,7 @@ static const struct ib_device_ops erdma_device_ops = { .dereg_mr = erdma_dereg_mr, .destroy_cq = erdma_destroy_cq, .destroy_qp = erdma_destroy_qp, + .disassociate_ucontext = erdma_disassociate_ucontext, .get_dma_mr = erdma_get_dma_mr, .get_hw_stats = erdma_get_hw_stats, .get_port_immutable = erdma_get_port_immutable, diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index d7e1cbf9f5c26b..51d619edb6c5d2 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -1544,11 +1544,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, return ret; } +static enum ib_qp_state query_qp_state(struct erdma_qp *qp) +{ + switch (qp->attrs.state) { + case ERDMA_QP_STATE_IDLE: + return IB_QPS_INIT; + case ERDMA_QP_STATE_RTR: + return IB_QPS_RTR; + case ERDMA_QP_STATE_RTS: + return IB_QPS_RTS; + case ERDMA_QP_STATE_CLOSING: + return IB_QPS_ERR; + case ERDMA_QP_STATE_TERMINATE: + return IB_QPS_ERR; + case ERDMA_QP_STATE_ERROR: + return IB_QPS_ERR; + default: + return IB_QPS_ERR; + } +} + int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { - struct erdma_qp *qp; struct erdma_dev *dev; + struct erdma_qp *qp; if (ibqp && qp_attr && qp_init_attr) { qp = to_eqp(ibqp); @@ -1575,6 +1595,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_init_attr->cap = qp_attr->cap; + qp_attr->qp_state = query_qp_state(qp); + qp_attr->cur_qp_state = query_qp_state(qp); + return 0; } @@ -1701,6 +1724,10 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return ret; } +void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext) +{ +} + void erdma_set_mtu(struct erdma_dev *dev, u32 mtu) { struct erdma_cmdq_config_mtu_req req; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h index 6afdc02f586910..c998acd39a7889 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.h +++ b/drivers/infiniband/hw/erdma/erdma_verbs.h @@ -344,6 +344,7 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *data); int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext); int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 35d2382ee6189c..ec9ee59fcf0cc6 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -203,7 +203,6 @@ static const struct file_operations __fault_opcodes_fops = { .open = fault_opcodes_open, .read = fault_opcodes_read, .write = fault_opcodes_write, - .llseek = no_llseek }; void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd) diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 3e02c474f59fec..4fc5b9d5fea87e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -64,8 +64,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, u8 tc_mode = 0; int ret; - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) - return -EOPNOTSUPP; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) { + ret = -EOPNOTSUPP; + goto err_out; + } ah->av.port = rdma_ah_get_port_num(ah_attr); ah->av.gid_index = grh->sgid_index; @@ -83,7 +85,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, ret = 0; if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) - return ret; + goto err_out; if (tc_mode == HNAE3_TC_MAP_MODE_DSCP && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) @@ -91,8 +93,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, else ah->av.sl = rdma_ah_get_sl(ah_attr); - if (!check_sl_valid(hr_dev, ah->av.sl)) - return -EINVAL; + if (!check_sl_valid(hr_dev, ah->av.sl)) { + ret = -EINVAL; + goto err_out; + } memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 02baa853a76c9b..c7c167e2a04513 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1041,9 +1041,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level) * @bt_level: base address table level * @unit: ba entries per bt page */ -static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) +static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) { - u32 step; + u64 step; int max; int i; @@ -1079,7 +1079,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, { struct hns_roce_buf_region *r; int total = 0; - int step; + u64 step; int i; for (i = 0; i < region_cnt; i++) { @@ -1110,7 +1110,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, int ret = 0; int max_ofs; int level; - u32 step; + u64 step; int end; if (hopnum <= 1) @@ -1134,10 +1134,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, /* config L1 bt to last bt and link them to corresponding parent */ for (level = 1; level < hopnum; level++) { - cur = hem_list_search_item(&mid_bt[level], offset); - if (cur) { - hem_ptrs[level] = cur; - continue; + if (!hem_list_is_bottom_bt(hopnum, level)) { + cur = hem_list_search_item(&mid_bt[level], offset); + if (cur) { + hem_ptrs[level] = cur; + continue; + } } step = hem_list_calc_ba_range(hopnum, level, unit); @@ -1147,7 +1149,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, } start_aligned = (distance / step) * step + r->offset; - end = min_t(int, start_aligned + step - 1, max_ofs); + end = min_t(u64, start_aligned + step - 1, max_ofs); cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, true); if (!cur) { @@ -1235,7 +1237,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, struct hns_roce_hem_item *hem, *temp_hem; int total = 0; int offset; - int step; + u64 step; step = hem_list_calc_ba_range(r->hopnum, 1, unit); if (step < 1) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 621b057fb9daa6..24e906b9d3ae13 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1681,8 +1681,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev, for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) { bd_idx = i / CNT_PER_DESC; - if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) && - bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC) + if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC && + !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT))) break; cnt_data = (__le64 *)&desc[bd_idx].data[0]; @@ -2972,6 +2972,9 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_exit(hr_dev); + hns_roce_function_clear(hr_dev); if (!hr_dev->is_vf) @@ -4423,12 +4426,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, upper_32_bits(to_hr_hw_page_addr(mtts[0]))); hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H); - context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); - qpc_mask->rq_nxt_blk_addr = 0; - - hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, - upper_32_bits(to_hr_hw_page_addr(mtts[1]))); - hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + context->rq_nxt_blk_addr = + cpu_to_le32(to_hr_hw_page_addr(mtts[1])); + qpc_mask->rq_nxt_blk_addr = 0; + hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, + upper_32_bits(to_hr_hw_page_addr(mtts[1]))); + hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + } return 0; } @@ -6193,6 +6198,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, struct pci_dev *pdev = hr_dev->pci_dev; struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); const struct hnae3_ae_ops *ops = ae_dev->ops; + enum hnae3_reset_type reset_type; irqreturn_t int_work = IRQ_NONE; u32 int_en; @@ -6204,10 +6210,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); + reset_type = hr_dev->is_vf ? + HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET; + /* Set reset level for reset_event() */ if (ops->set_default_reset_request) - ops->set_default_reset_request(ae_dev, - HNAE3_FUNC_RESET); + ops->set_default_reset_request(ae_dev, reset_type); if (ops->reset_event) ops->reset_event(pdev, NULL); @@ -6277,7 +6285,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) res_type == ECC_RESOURCE_SCCC) return le64_to_cpu(*data); - return le64_to_cpu(*data) << PAGE_SHIFT; + return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT; } static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, @@ -6949,9 +6957,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; hns_roce_handle_device_err(hr_dev); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) - free_mr_exit(hr_dev); - hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 1de384ce4d0e15..6b03ba671ff8f3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1460,19 +1460,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquire(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); __acquire(&send_cq->lock); } else if (send_cq == recv_cq) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } @@ -1492,13 +1492,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, spin_unlock(&recv_cq->lock); } else if (send_cq == recv_cq) { __release(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); + spin_unlock(&recv_cq->lock); } } diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index fc0ce35da14e64..eeb932e5873036 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { ibdev_err(&iwdev->ibdev, "rd_atomic = %d, above max_hw_ird=%d\n", - attr->max_rd_atomic, + attr->max_dest_rd_atomic, dev->hw_attrs.max_hw_ird); return -EINVAL; } @@ -3085,7 +3085,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 len, u64 virt, int fd, int access, - struct ib_udata *udata) + struct uverbs_attr_bundle *attrs) { struct irdma_device *iwdev = to_iwdev(pd->device); struct ib_umem_dmabuf *umem_dmabuf; diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index d13abc954d2aa3..67c2d43135a8af 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem create_req->length = umem->length; create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz); - create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT; + create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT; create_req->page_count = num_pages_total; ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n", @@ -511,13 +511,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) PAGE_SHIFT; prot = pgprot_writecombine(vma->vm_page_prot); - ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot, + ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot, NULL); if (ret) ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret); else - ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n", - pfn, gc->db_page_size, ret); + ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n", + pfn, PAGE_SIZE, ret); return ret; } diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 9a439569ffcf3b..d7327735b8d0d4 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -829,7 +829,6 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { - char alias_wq_name[22]; int ret = 0; int i, j; union ib_gid gid; @@ -875,9 +874,8 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; - snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = - alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM); + alloc_ordered_workqueue("alias_guid%d", WQ_MEM_RECLAIM, i); if (!dev->sriov.alias_guid.ports_guid[i].wq) { ret = -ENOMEM; goto err_thread; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index dc9cf45d2d3209..e6e132f1062533 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -2158,7 +2158,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) { - char name[21]; int ret = 0; int i; @@ -2194,24 +2193,21 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, goto err_mcg; } - snprintf(name, sizeof(name), "mlx4_ibt%d", port); - ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->wq = alloc_ordered_workqueue("mlx4_ibt%d", WQ_MEM_RECLAIM, port); if (!ctx->wq) { pr_err("Failed to create tunnelling WQ for port %d\n", port); ret = -ENOMEM; goto err_wq; } - snprintf(name, sizeof(name), "mlx4_ibwi%d", port); - ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->wi_wq = alloc_ordered_workqueue("mlx4_ibwi%d", WQ_MEM_RECLAIM, port); if (!ctx->wi_wq) { pr_err("Failed to create wire WQ for port %d\n", port); ret = -ENOMEM; goto err_wiwq; } - snprintf(name, sizeof(name), "mlx4_ibud%d", port); - ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->ud_wq = alloc_ordered_workqueue("mlx4_ibud%d", WQ_MEM_RECLAIM, port); if (!ctx->ud_wq) { pr_err("Failed to create up/down WQ for port %d\n", port); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index 72a526236c2e09..b38961f5058efd 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \ cong.o \ counters.o \ cq.o \ + data_direct.o \ dm.o \ doorbell.o \ gsi.o \ diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 895b62cc528df6..7c08e30089277a 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -245,3 +245,24 @@ int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid) MLX5_SET(dealloc_uar_in, in, uid, uid); return mlx5_cmd_exec_in(dev, dealloc_uar, in); } + +int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct, + char *out_vuid) +{ + u8 out[MLX5_ST_SZ_BYTES(query_vuid_out) + + MLX5_ST_SZ_BYTES(array1024_auto)] = {}; + u8 in[MLX5_ST_SZ_BYTES(query_vuid_in)] = {}; + char *vuid; + int err; + + MLX5_SET(query_vuid_in, in, opcode, MLX5_CMD_OPCODE_QUERY_VUID); + MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id)); + MLX5_SET(query_vuid_in, in, data_direct, data_direct); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + vuid = MLX5_ADDR_OF(query_vuid_out, out, vuid); + memcpy(out_vuid, vuid, MLX5_ST_SZ_BYTES(array1024_auto)); + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index e5cd31270443d8..e6c88b6ebd0da2 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -58,4 +58,6 @@ int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid); int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid); +int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct, + char *out_vuid); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c new file mode 100644 index 00000000000000..b9ba84afaae229 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/data_direct.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include "mlx5_ib.h" +#include "data_direct.h" + +static LIST_HEAD(mlx5_data_direct_dev_list); +static LIST_HEAD(mlx5_data_direct_reg_list); + +/* + * This mutex should be held when accessing either of the above lists + */ +static DEFINE_MUTEX(mlx5_data_direct_mutex); + +struct mlx5_data_direct_registration { + struct mlx5_ib_dev *ibdev; + char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1]; + struct list_head list; +}; + +static const struct pci_device_id mlx5_data_direct_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */ + { 0, } +}; + +static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start; + int ret; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data)); + return PTR_ERR(vpd_data); + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len); + if (start < 0) { + ret = start; + pci_err(pdev, "VU keyword not found, err=%d\n", ret); + goto end; + } + + dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); + ret = dev->vuid ? 0 : -ENOMEM; + +end: + kfree(vpd_data); + return ret; +} + +static void mlx5_data_direct_shutdown(struct pci_dev *pdev) +{ + pci_disable_device(pdev); +} + +static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err); + return err; + } + } + + dma_set_max_seg_size(&pdev->dev, SZ_2G); + return 0; +} + +int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid) +{ + struct mlx5_data_direct_registration *reg; + struct mlx5_data_direct_dev *dev; + + reg = kzalloc(sizeof(*reg), GFP_KERNEL); + if (!reg) + return -ENOMEM; + + reg->ibdev = ibdev; + strcpy(reg->vuid, vuid); + + mutex_lock(&mlx5_data_direct_mutex); + list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) { + if (strcmp(dev->vuid, vuid) == 0) { + mlx5_ib_data_direct_bind(ibdev, dev); + break; + } + } + + /* Add the registration to its global list, to be used upon bind/unbind + * of its affiliated data direct device + */ + list_add_tail(®->list, &mlx5_data_direct_reg_list); + mutex_unlock(&mlx5_data_direct_mutex); + return 0; +} + +void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev) +{ + struct mlx5_data_direct_registration *reg; + + mutex_lock(&mlx5_data_direct_mutex); + list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) { + if (reg->ibdev == ibdev) { + list_del(®->list); + kfree(reg); + goto end; + } + } + + WARN_ON(true); +end: + mutex_unlock(&mlx5_data_direct_mutex); +} + +static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev) +{ + struct mlx5_data_direct_registration *reg; + + mutex_lock(&mlx5_data_direct_mutex); + list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) { + if (strcmp(dev->vuid, reg->vuid) == 0) + mlx5_ib_data_direct_bind(reg->ibdev, dev); + } + + /* Add the data direct device to the global list, further IB devices may + * use it later as well + */ + list_add_tail(&dev->list, &mlx5_data_direct_dev_list); + mutex_unlock(&mlx5_data_direct_mutex); +} + +static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev) +{ + struct mlx5_data_direct_registration *reg; + + mutex_lock(&mlx5_data_direct_mutex); + /* Prevent any further affiliations */ + list_del(&dev->list); + list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) { + if (strcmp(dev->vuid, reg->vuid) == 0) + mlx5_ib_data_direct_unbind(reg->ibdev); + } + mutex_unlock(&mlx5_data_direct_mutex); +} + +static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlx5_data_direct_dev *dev; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->device = &pdev->dev; + dev->pdev = pdev; + + pci_set_drvdata(dev->pdev, dev); + err = pci_enable_device(pdev); + if (err) { + dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err); + goto err; + } + + pci_set_master(pdev); + err = mlx5_data_direct_set_dma_caps(pdev); + if (err) + goto err_disable; + + if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128)) + dev_dbg(dev->device, "Enabling pci atomics failed\n"); + + err = mlx5_data_direct_vpd_get_vuid(dev); + if (err) + goto err_disable; + + mlx5_data_direct_dev_reg(dev); + return 0; + +err_disable: + pci_disable_device(pdev); +err: + kfree(dev); + return err; +} + +static void mlx5_data_direct_remove(struct pci_dev *pdev) +{ + struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev); + + mlx5_data_direct_dev_unreg(dev); + pci_disable_device(pdev); + kfree(dev->vuid); + kfree(dev); +} + +static struct pci_driver mlx5_data_direct_driver = { + .name = KBUILD_MODNAME, + .id_table = mlx5_data_direct_pci_table, + .probe = mlx5_data_direct_probe, + .remove = mlx5_data_direct_remove, + .shutdown = mlx5_data_direct_shutdown, +}; + +int mlx5_data_direct_driver_register(void) +{ + return pci_register_driver(&mlx5_data_direct_driver); +} + +void mlx5_data_direct_driver_unregister(void) +{ + pci_unregister_driver(&mlx5_data_direct_driver); +} diff --git a/drivers/infiniband/hw/mlx5/data_direct.h b/drivers/infiniband/hw/mlx5/data_direct.h new file mode 100644 index 00000000000000..2fd2bdbe8f6924 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/data_direct.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#ifndef _MLX5_IB_DATA_DIRECT_H +#define _MLX5_IB_DATA_DIRECT_H + +struct mlx5_ib_dev; + +struct mlx5_data_direct_dev { + struct device *device; + struct pci_dev *pdev; + char *vuid; + struct list_head list; +}; + +int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid); +void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev); +int mlx5_data_direct_driver_register(void); +void mlx5_data_direct_driver_unregister(void); + +#endif diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 253fea374a72de..69999d8d24f379 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2673,7 +2673,6 @@ static const struct file_operations devx_async_cmd_event_fops = { .read = devx_async_cmd_event_read, .poll = devx_async_cmd_event_poll, .release = uverbs_uobject_fd_release, - .llseek = no_llseek, }; static ssize_t devx_async_event_read(struct file *filp, char __user *buf, @@ -2788,7 +2787,6 @@ static const struct file_operations devx_async_event_fops = { .read = devx_async_event_read, .poll = devx_async_event_poll, .release = uverbs_uobject_fd_release, - .llseek = no_llseek, }; static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj, diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index c7a4ee89612145..49af1cfbe6d174 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -13,6 +13,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, int vport_index) { struct mlx5_ib_dev *ibdev; + struct net_device *ndev; ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB); if (!ibdev) @@ -20,12 +21,9 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, ibdev->port[vport_index].rep = rep; rep->rep_data[REP_IB].priv = ibdev; - write_lock(&ibdev->port[vport_index].roce.netdev_lock); - ibdev->port[vport_index].roce.netdev = - mlx5_ib_get_rep_netdev(rep->esw, rep->vport); - write_unlock(&ibdev->port[vport_index].roce.netdev_lock); + ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport); - return 0; + return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1); } static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev); @@ -104,10 +102,15 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ibdev->is_rep = true; vport_index = rep->vport_index; ibdev->port[vport_index].rep = rep; - ibdev->port[vport_index].roce.netdev = - mlx5_ib_get_rep_netdev(lag_master->priv.eswitch, rep->vport); ibdev->mdev = lag_master; ibdev->num_ports = num_ports; + ibdev->ib_dev.phys_port_cnt = num_ports; + ret = ib_device_set_netdev(&ibdev->ib_dev, + mlx5_ib_get_rep_netdev(lag_master->priv.eswitch, + rep->vport), + vport_index + 1); + if (ret) + goto fail_add; ret = __mlx5_ib_add(ibdev, profile); if (ret) @@ -160,9 +163,8 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) } port = &dev->port[vport_index]; - write_lock(&port->roce.netdev_lock); - port->roce.netdev = NULL; - write_unlock(&port->roce.netdev_lock); + + ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1); rep->rep_data[REP_IB].priv = NULL; port->rep = NULL; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6048b9ad13bb40..4999239c8f4137 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -48,6 +48,7 @@ #include #include #include "macsec.h" +#include "data_direct.h" #define UVERBS_MODULE_NAME mlx5_ib #include @@ -146,16 +147,52 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, if (upper && port->rep->vport == MLX5_VPORT_UPLINK) continue; - - read_lock(&port->roce.netdev_lock); - rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, - port->rep->vport); - if (rep_ndev == ndev) { - read_unlock(&port->roce.netdev_lock); + rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1); + if (rep_ndev && rep_ndev == ndev) { + dev_put(rep_ndev); *port_num = i + 1; return &port->roce; } - read_unlock(&port->roce.netdev_lock); + + dev_put(rep_ndev); + } + + return NULL; +} + +static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev, + struct net_device *ndev, + struct net_device *upper, + struct net_device *ib_ndev) +{ + if (!dev->ib_active) + return false; + + /* Event is about our upper device */ + if (upper == ndev) + return true; + + /* RDMA device is not in lag and not in switchdev */ + if (!dev->is_rep && !upper && ndev == ib_ndev) + return true; + + /* RDMA devie is in switchdev */ + if (dev->is_rep && ndev == ib_ndev) + return true; + + return false; +} + +static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev) +{ + struct mlx5_ib_port *port; + int i; + + for (i = 0; i < ibdev->num_ports; i++) { + port = &ibdev->port[i]; + if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) { + return ib_device_get_netdev(&ibdev->ib_dev, i + 1); + } } return NULL; @@ -167,6 +204,7 @@ static int mlx5_netdev_event(struct notifier_block *this, struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); u32 port_num = roce->native_port_num; + struct net_device *ib_ndev = NULL; struct mlx5_core_dev *mdev; struct mlx5_ib_dev *ibdev; @@ -180,47 +218,63 @@ static int mlx5_netdev_event(struct notifier_block *this, /* Should already be registered during the load */ if (ibdev->is_rep) break; - write_lock(&roce->netdev_lock); + + ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); + /* Exit if already registered */ + if (ib_ndev) + goto put_ndev; + if (ndev->dev.parent == mdev->device) - roce->netdev = ndev; - write_unlock(&roce->netdev_lock); + ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num); break; case NETDEV_UNREGISTER: /* In case of reps, ib device goes away before the netdevs */ - write_lock(&roce->netdev_lock); - if (roce->netdev == ndev) - roce->netdev = NULL; - write_unlock(&roce->netdev_lock); - break; + if (ibdev->is_rep) + break; + ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); + if (ib_ndev == ndev) + ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num); + goto put_ndev; case NETDEV_CHANGE: case NETDEV_UP: case NETDEV_DOWN: { - struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); struct net_device *upper = NULL; - if (lag_ndev) { - upper = netdev_master_upper_dev_get(lag_ndev); - dev_put(lag_ndev); + if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { + struct net_device *lag_ndev; + + if(mlx5_lag_is_roce(mdev)) + lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1); + else /* sriov lag */ + lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev); + + if (lag_ndev) { + upper = netdev_master_upper_dev_get(lag_ndev); + dev_put(lag_ndev); + } else { + goto done; + } } if (ibdev->is_rep) roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); if (!roce) return NOTIFY_DONE; - if ((upper == ndev || - ((!upper || ibdev->is_rep) && ndev == roce->netdev)) && - ibdev->ib_active) { + + ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num); + + if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) { struct ib_event ibev = { }; enum ib_port_state port_state; if (get_port_state(&ibdev->ib_dev, port_num, &port_state)) - goto done; + goto put_ndev; if (roce->last_port_state == port_state) - goto done; + goto put_ndev; roce->last_port_state = port_state; ibev.device = &ibdev->ib_dev; @@ -229,7 +283,7 @@ static int mlx5_netdev_event(struct notifier_block *this, else if (port_state == IB_PORT_ACTIVE) ibev.event = IB_EVENT_PORT_ACTIVE; else - goto done; + goto put_ndev; ibev.element.port_num = port_num; ib_dispatch_event(&ibev); @@ -240,38 +294,13 @@ static int mlx5_netdev_event(struct notifier_block *this, default: break; } +put_ndev: + dev_put(ib_ndev); done: mlx5_ib_put_native_port_mdev(ibdev, port_num); return NOTIFY_DONE; } -static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, - u32 port_num) -{ - struct mlx5_ib_dev *ibdev = to_mdev(device); - struct net_device *ndev; - struct mlx5_core_dev *mdev; - - mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); - if (!mdev) - return NULL; - - ndev = mlx5_lag_get_roce_netdev(mdev); - if (ndev) - goto out; - - /* Ensure ndev does not disappear before we invoke dev_hold() - */ - read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); - ndev = ibdev->port[port_num - 1].roce.netdev; - dev_hold(ndev); - read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); - -out: - mlx5_ib_put_native_port_mdev(ibdev, port_num); - return ndev; -} - struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 ib_port_num, u32 *native_port_num) @@ -546,11 +575,11 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, if (!put_mdev) goto out; - ndev = mlx5_ib_get_netdev(device, port_num); + ndev = ib_device_get_netdev(device, port_num); if (!ndev) goto out; - if (dev->lag_active) { + if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { rcu_read_lock(); upper = netdev_master_upper_dev_get_rcu(ndev); if (upper) { @@ -3024,6 +3053,59 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) mutex_destroy(&devr->srq_lock); } +static int +mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev) +{ + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + struct mlx5_core_dev *mdev = dev->mdev; + void *mkc; + u32 mkey; + u32 pdn; + u32 *in; + int err; + + err = mlx5_core_alloc_pd(mdev, &pdn); + if (err) + return err; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err; + } + + MLX5_SET(create_mkey_in, in, data_direct, 1); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, rw, 1); + MLX5_SET(mkc, mkc, rr, 1); + MLX5_SET(mkc, mkc, a, 1); + MLX5_SET(mkc, mkc, pd, pdn); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + err = mlx5_core_create_mkey(mdev, &mkey, in, inlen); + kvfree(in); + if (err) + goto err; + + dev->ddr.mkey = mkey; + dev->ddr.pdn = pdn; + return 0; + +err: + mlx5_core_dealloc_pd(mdev, pdn); + return err; +} + +static void +mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev) +{ + mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey); + mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn); +} + static u32 get_core_cap_flags(struct ib_device *ibdev, struct mlx5_hca_vport_context *rep) { @@ -3124,6 +3206,60 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str) fw_rev_sub(dev->mdev)); } +static int lag_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev, + lag_events); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_ib_port *port; + struct net_device *ndev; + int i, err; + int portnum; + + portnum = 0; + switch (event) { + case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE: + ndev = data; + if (ndev) { + if (!mlx5_lag_is_roce(mdev)) { + // sriov lag + for (i = 0; i < dev->num_ports; i++) { + port = &dev->port[i]; + if (port->rep && port->rep->vport == + MLX5_VPORT_UPLINK) { + portnum = i; + break; + } + } + } + err = ib_device_set_netdev(&dev->ib_dev, ndev, + portnum + 1); + dev_put(ndev); + if (err) + return err; + /* Rescan gids after new netdev assignment */ + rdma_roce_rescan_device(&dev->ib_dev); + } + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev) +{ + dev->lag_events.notifier_call = lag_event; + blocking_notifier_chain_register(&dev->mdev->priv.lag_nh, + &dev->lag_events); +} + +static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev) +{ + blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh, + &dev->lag_events); +} + static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; @@ -3145,6 +3281,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) goto err_destroy_vport_lag; } + mlx5e_lag_event_register(dev); dev->flow_db->lag_demux_ft = ft; dev->lag_ports = mlx5_lag_get_num_ports(mdev); dev->lag_active = true; @@ -3162,6 +3299,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) if (dev->lag_active) { dev->lag_active = false; + mlx5e_lag_event_unregister(dev); mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); dev->flow_db->lag_demux_ft = NULL; @@ -3420,6 +3558,41 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, return false; } +static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev) +{ + char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {}; + int ret; + + if (!MLX5_CAP_GEN(dev->mdev, data_direct) || + !MLX5_CAP_GEN_2(dev->mdev, query_vuid)) + return 0; + + ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid); + if (ret) + return ret; + + ret = mlx5_ib_create_data_direct_resources(dev); + if (ret) + return ret; + + INIT_LIST_HEAD(&dev->data_direct_mr_list); + ret = mlx5_data_direct_ib_reg(dev, vuid); + if (ret) + mlx5_ib_free_data_direct_resources(dev); + + return ret; +} + +static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev) +{ + if (!MLX5_CAP_GEN(dev->mdev, data_direct) || + !MLX5_CAP_GEN_2(dev->mdev, query_vuid)) + return; + + mlx5_data_direct_ib_unreg(dev); + mlx5_ib_free_data_direct_resources(dev); +} + static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) { u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; @@ -3796,6 +3969,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE( dump_fill_mkey), UA_MANDATORY)); +ADD_UVERBS_ATTRIBUTES_SIMPLE( + mlx5_ib_reg_dmabuf_mr, + UVERBS_OBJECT_MR, + UVERBS_METHOD_REG_DMABUF_MR, + UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS, + enum mlx5_ib_uapi_reg_dmabuf_flags, + UA_OPTIONAL)); + static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs), @@ -3805,6 +3986,7 @@ static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR), @@ -3813,6 +3995,7 @@ static const struct uapi_definition mlx5_ib_defs[] = { static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) { + mlx5_ib_data_direct_cleanup(dev); mlx5_ib_cleanup_multiport_master(dev); WARN_ON(!xa_empty(&dev->odp_mkeys)); mutex_destroy(&dev->cap_mask_mutex); @@ -3828,13 +4011,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; - dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.dev.parent = mdev->device; dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES; for (i = 0; i < dev->num_ports; i++) { spin_lock_init(&dev->port[i].mp.mpi_lock); - rwlock_init(&dev->port[i].roce.netdev_lock); dev->port[i].roce.dev = dev; dev->port[i].roce.native_port_num = i + 1; dev->port[i].roce.last_port_state = IB_PORT_DOWN; @@ -3866,6 +4047,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev); mutex_init(&dev->cap_mask_mutex); + mutex_init(&dev->data_direct_lock); INIT_LIST_HEAD(&dev->qp_list); spin_lock_init(&dev->reset_flow_resource_lock); xa_init(&dev->odp_mkeys); @@ -3874,6 +4056,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; + err = mlx5_ib_data_direct_init(dev); + if (err) + goto err_mp; + return 0; err_mp: mlx5_ib_cleanup_multiport_master(dev); @@ -4094,7 +4280,6 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { .create_wq = mlx5_ib_create_wq, .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, .destroy_wq = mlx5_ib_destroy_wq, - .get_netdev = mlx5_ib_get_netdev, .modify_wq = mlx5_ib_modify_wq, INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table, @@ -4293,6 +4478,22 @@ static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); } +void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev, + struct mlx5_data_direct_dev *dev) +{ + mutex_lock(&ibdev->data_direct_lock); + ibdev->data_direct_dev = dev; + mutex_unlock(&ibdev->data_direct_lock); +} + +void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev) +{ + mutex_lock(&ibdev->data_direct_lock); + mlx5_ib_revoke_data_direct_mrs(ibdev); + ibdev->data_direct_dev = NULL; + mutex_unlock(&ibdev->data_direct_lock); +} + void __mlx5_ib_remove(struct mlx5_ib_dev *dev, const struct mlx5_ib_profile *profile, int stage) @@ -4522,6 +4723,7 @@ static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent, mplane->mdev = mparent->mdev; mplane->num_ports = mparent->num_plane; mplane->sub_dev_name = name; + mplane->ib_dev.phys_port_cnt = mplane->num_ports; ret = __mlx5_ib_add(mplane, &plane_profile); if (ret) @@ -4638,6 +4840,7 @@ static int mlx5r_probe(struct auxiliary_device *adev, dev->mdev = mdev; dev->num_ports = num_ports; + dev->ib_dev.phys_port_cnt = num_ports; if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev)) profile = &raw_eth_profile; @@ -4715,17 +4918,23 @@ static int __init mlx5_ib_init(void) ret = mlx5r_rep_init(); if (ret) goto rep_err; + ret = mlx5_data_direct_driver_register(); + if (ret) + goto dd_err; ret = auxiliary_driver_register(&mlx5r_mp_driver); if (ret) goto mp_err; ret = auxiliary_driver_register(&mlx5r_driver); if (ret) goto drv_err; + return 0; drv_err: auxiliary_driver_unregister(&mlx5r_mp_driver); mp_err: + mlx5_data_direct_driver_unregister(); +dd_err: mlx5r_rep_cleanup(); rep_err: mlx5_ib_qp_event_cleanup(); @@ -4737,6 +4946,7 @@ static int __init mlx5_ib_init(void) static void __exit mlx5_ib_cleanup(void) { + mlx5_data_direct_driver_unregister(); auxiliary_driver_unregister(&mlx5r_driver); auxiliary_driver_unregister(&mlx5r_mp_driver); mlx5r_rep_cleanup(); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d5eb1b726675d2..23fd72f7f63df9 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -63,17 +63,6 @@ __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits, return GENMASK(largest_pg_shift, pgsz_shift); } -/* - * For mkc users, instead of a page_offset the command has a start_iova which - * specifies both the page_offset and the on-the-wire IOVA - */ -#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \ - ib_umem_find_best_pgsz(umem, \ - __mlx5_log_page_size_to_bitmap( \ - __mlx5_bit_sz(typ, log_pgsz_fld), \ - pgsz_shift), \ - iova) - static __always_inline unsigned long __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits, unsigned int offset_shift) @@ -640,6 +629,8 @@ enum mlx5_mkey_type { MLX5_MKEY_MR = 1, MLX5_MKEY_MW, MLX5_MKEY_INDIRECT_DEVX, + MLX5_MKEY_NULL, + MLX5_MKEY_IMPLICIT_CHILD, }; struct mlx5r_cache_rb_key { @@ -682,6 +673,8 @@ struct mlx5_ib_mr { struct mlx5_ib_mkey mmkey; struct ib_umem *umem; + /* The mr is data direct related */ + u8 data_direct :1; union { /* Used only by kernel MRs (umem == NULL) */ @@ -719,6 +712,11 @@ struct mlx5_ib_mr { } odp_destroy; struct ib_odp_counters odp_stats; bool is_odp_implicit; + /* The affilated data direct crossed mr */ + struct mlx5_ib_mr *dd_crossed_mr; + struct list_head dd_node; + u8 revoked :1; + struct mlx5_ib_mkey null_mmkey; }; }; }; @@ -796,6 +794,7 @@ struct mlx5_cache_ent { u8 is_tmp:1; u8 disabled:1; u8 fill_to_high_water:1; + u8 tmp_cleanup_scheduled:1; /* * - limit is the low water mark for stored mkeys, 2* limit is the @@ -827,7 +826,6 @@ struct mlx5_mkey_cache { struct mutex rb_lock; struct dentry *fs_root; unsigned long last_add; - struct delayed_work remove_ent_dwork; }; struct mlx5_ib_port_resources { @@ -835,6 +833,11 @@ struct mlx5_ib_port_resources { struct work_struct pkey_change_work; }; +struct mlx5_data_direct_resources { + u32 pdn; + u32 mkey; +}; + struct mlx5_ib_resources { struct ib_cq *c0; struct mutex cq_lock; @@ -885,8 +888,6 @@ struct mlx5_roce { /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL * netdev pointer */ - rwlock_t netdev_lock; - struct net_device *netdev; struct notifier_block nb; struct netdev_net_notifier nn; struct notifier_block mdev_nb; @@ -1131,7 +1132,11 @@ struct mlx5_macsec { struct mlx5_ib_dev { struct ib_device ib_dev; struct mlx5_core_dev *mdev; + struct mlx5_data_direct_dev *data_direct_dev; + /* protect accessing data_direct_dev */ + struct mutex data_direct_lock; struct notifier_block mdev_events; + struct notifier_block lag_events; int num_ports; /* serialize update of capability mask */ @@ -1161,6 +1166,7 @@ struct mlx5_ib_dev { /* protect resources needed as part of reset flow */ spinlock_t reset_flow_resource_lock; struct list_head qp_list; + struct list_head data_direct_mr_list; /* Array with num_ports elements */ struct mlx5_ib_port *port; struct mlx5_sq_bfreg bfreg; @@ -1185,6 +1191,7 @@ struct mlx5_ib_dev { u16 pkey_table_len; u8 lag_ports; struct mlx5_special_mkeys mkeys; + struct mlx5_data_direct_resources ddr; #ifdef CONFIG_MLX5_MACSEC struct mlx5_macsec macsec; @@ -1345,7 +1352,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int fd, int access_flags, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); int mlx5_ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 flags, @@ -1356,7 +1363,6 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); int mlx5_ib_dealloc_mw(struct ib_mw *mw); struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, int access_flags); -void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr); struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, u64 virt_addr, int access_flags, @@ -1425,6 +1431,10 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr, struct uverbs_attr_bundle *attrs); +void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev, + struct mlx5_data_direct_dev *dev); +void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev); +void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); @@ -1633,8 +1643,6 @@ static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); } -int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); - static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) { /* @@ -1707,4 +1715,20 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port) return (port - 1) / dev->num_ports + 1; } +/* + * For mkc users, instead of a page_offset the command has a start_iova which + * specifies both the page_offset and the on-the-wire IOVA + */ +static __always_inline unsigned long +mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem, + u64 iova) +{ + int page_size_bits = + MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5; + unsigned long bitmap = + __mlx5_log_page_size_to_bitmap(page_size_bits, 0); + + return ib_umem_find_best_pgsz(umem, bitmap, iova); +} + #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 98bd8eaa393ef7..45d9dc9c6c8fda 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -43,18 +43,22 @@ #include "dm.h" #include "mlx5_ib.h" #include "umr.h" +#include "data_direct.h" enum { MAX_PENDING_REG_MR = 8, }; +#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4 #define MLX5_UMR_ALIGN 2048 static void create_mkey_callback(int status, struct mlx5_async_work *context); static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate); + unsigned int page_size, bool populate, + int access_mode); +static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr); static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, struct ib_pd *pd) @@ -211,9 +215,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) spin_lock_irqsave(&ent->mkeys_queue.lock, flags); push_mkey_locked(ent, mkey_out->mkey); + ent->pending--; /* If we are doing fill_to_high_water then keep going. */ queue_adjust_cache_locked(ent); - ent->pending--; spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); kfree(mkey_out); } @@ -527,6 +531,21 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) } } +static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) +{ + u32 mkey; + + spin_lock_irq(&ent->mkeys_queue.lock); + while (ent->mkeys_queue.ci) { + mkey = pop_mkey_locked(ent); + spin_unlock_irq(&ent->mkeys_queue.lock); + mlx5_core_destroy_mkey(dev->mdev, mkey); + spin_lock_irq(&ent->mkeys_queue.lock); + } + ent->tmp_cleanup_scheduled = false; + spin_unlock_irq(&ent->mkeys_queue.lock); +} + static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; @@ -598,7 +617,11 @@ static void delayed_cache_work_func(struct work_struct *work) struct mlx5_cache_ent *ent; ent = container_of(work, struct mlx5_cache_ent, dwork.work); - __cache_work_func(ent); + /* temp entries are never filled, only cleaned */ + if (ent->is_tmp) + clean_keys(ent->dev, ent); + else + __cache_work_func(ent); } static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1, @@ -659,6 +682,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, { struct rb_node *node = dev->cache.rb_root.rb_node; struct mlx5_cache_ent *cur, *smallest = NULL; + u64 ndescs_limit; int cmp; /* @@ -677,10 +701,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, return cur; } + /* + * Limit the usage of mkeys larger than twice the required size while + * also allowing the usage of smallest cache entry for small MRs. + */ + ndescs_limit = max_t(u64, rb_key.ndescs * 2, + MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS); + return (smallest && smallest->rb_key.access_mode == rb_key.access_mode && smallest->rb_key.access_flags == rb_key.access_flags && - smallest->rb_key.ats == rb_key.ats) ? + smallest->rb_key.ats == rb_key.ats && + smallest->rb_key.ndescs <= ndescs_limit) ? smallest : NULL; } @@ -765,21 +797,6 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, return _mlx5_mr_cache_alloc(dev, ent, access_flags); } -static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) -{ - u32 mkey; - - cancel_delayed_work(&ent->dwork); - spin_lock_irq(&ent->mkeys_queue.lock); - while (ent->mkeys_queue.ci) { - mkey = pop_mkey_locked(ent); - spin_unlock_irq(&ent->mkeys_queue.lock); - mlx5_core_destroy_mkey(dev->mdev, mkey); - spin_lock_irq(&ent->mkeys_queue.lock); - } - spin_unlock_irq(&ent->mkeys_queue.lock); -} - static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { if (!mlx5_debugfs_root || dev->is_rep) @@ -892,10 +909,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, ent->limit = 0; mlx5_mkey_cache_debugfs_add_ent(dev, ent); - } else { - mod_delayed_work(ent->dev->cache.wq, - &ent->dev->cache.remove_ent_dwork, - msecs_to_jiffies(30 * 1000)); } return ent; @@ -906,35 +919,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, return ERR_PTR(ret); } -static void remove_ent_work_func(struct work_struct *work) -{ - struct mlx5_mkey_cache *cache; - struct mlx5_cache_ent *ent; - struct rb_node *cur; - - cache = container_of(work, struct mlx5_mkey_cache, - remove_ent_dwork.work); - mutex_lock(&cache->rb_lock); - cur = rb_last(&cache->rb_root); - while (cur) { - ent = rb_entry(cur, struct mlx5_cache_ent, node); - cur = rb_prev(cur); - mutex_unlock(&cache->rb_lock); - - spin_lock_irq(&ent->mkeys_queue.lock); - if (!ent->is_tmp) { - spin_unlock_irq(&ent->mkeys_queue.lock); - mutex_lock(&cache->rb_lock); - continue; - } - spin_unlock_irq(&ent->mkeys_queue.lock); - - clean_keys(ent->dev, ent); - mutex_lock(&cache->rb_lock); - } - mutex_unlock(&cache->rb_lock); -} - int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mkey_cache *cache = &dev->cache; @@ -950,7 +934,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mutex_init(&dev->slow_path_mutex); mutex_init(&dev->cache.rb_lock); dev->cache.rb_root = RB_ROOT; - INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); if (!cache->wq) { mlx5_ib_warn(dev, "failed to create work queue\n"); @@ -962,7 +945,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mlx5_mkey_cache_debugfs_init(dev); mutex_lock(&cache->rb_lock); for (i = 0; i <= mkey_cache_max_order(dev); i++) { - rb_key.ndescs = 1 << (i + 2); + rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i; ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); if (IS_ERR(ent)) { ret = PTR_ERR(ent); @@ -1001,7 +984,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) return; mutex_lock(&dev->cache.rb_lock); - cancel_delayed_work(&dev->cache.remove_ent_dwork); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); spin_lock_irq(&ent->mkeys_queue.lock); @@ -1062,6 +1044,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) MLX5_SET(mkc, mkc, length64, 1); set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, pd); + MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats)); err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); if (err) @@ -1126,12 +1109,10 @@ static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, struct ib_umem *umem, u64 iova, - int access_flags) + int access_flags, int access_mode) { - struct mlx5r_cache_rb_key rb_key = { - .access_mode = MLX5_MKC_ACCESS_MODE_MTT, - }; struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5r_cache_rb_key rb_key = {}; struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; unsigned int page_size; @@ -1139,11 +1120,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, if (umem->is_dmabuf) page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); else - page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, - 0, iova); + page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova); if (WARN_ON(!page_size)) return ERR_PTR(-EINVAL); + rb_key.access_mode = access_mode; rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags); @@ -1154,7 +1135,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, */ if (!ent) { mutex_lock(&dev->slow_path_mutex); - mr = reg_create(pd, umem, iova, access_flags, page_size, false); + mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode); mutex_unlock(&dev->slow_path_mutex); if (IS_ERR(mr)) return mr; @@ -1175,13 +1156,71 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, return mr; } +static struct ib_mr * +reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags, + u32 crossed_lkey) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + int access_mode = MLX5_MKC_ACCESS_MODE_CROSSING; + struct mlx5_ib_mr *mr; + void *mkc; + int inlen; + u32 *in; + int err; + + if (!MLX5_CAP_GEN(dev->mdev, crossing_vhca_mkey)) + return ERR_PTR(-EOPNOTSUPP); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_1; + } + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, crossing_target_vhca_id, + MLX5_CAP_GEN(dev->mdev, vhca_id)); + MLX5_SET(mkc, mkc, translations_octword_size, crossed_lkey); + MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); + + /* for this crossing mkey IOVA should be 0 and len should be IOVA + len */ + set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd); + MLX5_SET64(mkc, mkc, len, iova + length); + + MLX5_SET(mkc, mkc, free, 0); + MLX5_SET(mkc, mkc, umr_en, 0); + err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); + if (err) + goto err_2; + + mr->mmkey.type = MLX5_MKEY_MR; + set_mr_fields(dev, mr, length, access_flags, iova); + mr->ibmr.pd = pd; + kvfree(in); + mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key); + + return &mr->ibmr; +err_2: + kvfree(in); +err_1: + kfree(mr); + return ERR_PTR(err); +} + /* * If ibmr is NULL it will be allocated by reg_create. * Else, the given ibmr will be used. */ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate) + unsigned int page_size, bool populate, + int access_mode) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr; @@ -1190,7 +1229,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, int inlen; u32 *in; int err; - bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); + bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) && + (access_mode == MLX5_MKC_ACCESS_MODE_MTT); + bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM); if (!page_size) return ERR_PTR(-EINVAL); @@ -1213,7 +1254,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, } pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); if (populate) { - if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { + if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) { err = -EINVAL; goto err_2; } @@ -1229,14 +1270,22 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); set_mkc_access_pd_addr_fields(mkc, access_flags, iova, populate ? pd : dev->umrc.pd); + /* In case a data direct flow, overwrite the pdn field by its internal kernel PD */ + if (umem->is_dmabuf && ksm_mode) + MLX5_SET(mkc, mkc, pd, dev->ddr.pdn); + MLX5_SET(mkc, mkc, free, !populate); - MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, access_mode_1_0, access_mode); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET64(mkc, mkc, len, umem->length); MLX5_SET(mkc, mkc, bsf_octword_size, 0); - MLX5_SET(mkc, mkc, translations_octword_size, - get_octo_len(iova, umem->length, mr->page_shift)); + if (ksm_mode) + MLX5_SET(mkc, mkc, translations_octword_size, + get_octo_len(iova, umem->length, mr->page_shift) * 2); + else + MLX5_SET(mkc, mkc, translations_octword_size, + get_octo_len(iova, umem->length, mr->page_shift)); MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); if (mlx5_umem_needs_ats(dev, umem, access_flags)) MLX5_SET(mkc, mkc, ma_translation_mode, 1); @@ -1373,13 +1422,15 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); if (xlt_with_umr) { - mr = alloc_cacheable_mr(pd, umem, iova, access_flags); + mr = alloc_cacheable_mr(pd, umem, iova, access_flags, + MLX5_MKC_ACCESS_MODE_MTT); } else { - unsigned int page_size = mlx5_umem_find_best_pgsz( - umem, mkc, log_page_size, 0, iova); + unsigned int page_size = + mlx5_umem_mkc_find_best_pgsz(dev, umem, iova); mutex_lock(&dev->slow_path_mutex); - mr = reg_create(pd, umem, iova, access_flags, page_size, true); + mr = reg_create(pd, umem, iova, access_flags, page_size, + true, MLX5_MKC_ACCESS_MODE_MTT); mutex_unlock(&dev->slow_path_mutex); } if (IS_ERR(mr)) { @@ -1442,7 +1493,8 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, if (IS_ERR(odp)) return ERR_CAST(odp); - mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); + mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags, + MLX5_MKC_ACCESS_MODE_MTT); if (IS_ERR(mr)) { ib_umem_release(&odp->umem); return ERR_CAST(mr); @@ -1510,35 +1562,31 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { .move_notify = mlx5_ib_dmabuf_invalidate_cb, }; -struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, - u64 length, u64 virt_addr, - int fd, int access_flags, - struct ib_udata *udata) +static struct ib_mr * +reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device, + u64 offset, u64 length, u64 virt_addr, + int fd, int access_flags, int access_mode) { + bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM); struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; struct ib_umem_dmabuf *umem_dmabuf; int err; - if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || - !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) - return ERR_PTR(-EOPNOTSUPP); - - mlx5_ib_dbg(dev, - "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n", - offset, virt_addr, length, fd, access_flags); - err = mlx5r_umr_resource_init(dev); if (err) return ERR_PTR(err); - /* dmabuf requires xlt update via umr to work. */ - if (!mlx5r_umr_can_load_pas(dev, length)) - return ERR_PTR(-EINVAL); + if (!pinned_mode) + umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, + offset, length, fd, + access_flags, + &mlx5_ib_dmabuf_attach_ops); + else + umem_dmabuf = ib_umem_dmabuf_get_pinned_with_dma_device(&dev->ib_dev, + dma_device, offset, length, + fd, access_flags); - umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, - access_flags, - &mlx5_ib_dmabuf_attach_ops); if (IS_ERR(umem_dmabuf)) { mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n", PTR_ERR(umem_dmabuf)); @@ -1546,7 +1594,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, } mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, - access_flags); + access_flags, access_mode); if (IS_ERR(mr)) { ib_umem_release(&umem_dmabuf->umem); return ERR_CAST(mr); @@ -1556,9 +1604,13 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); umem_dmabuf->private = mr; - err = mlx5r_store_odp_mkey(dev, &mr->mmkey); - if (err) - goto err_dereg_mr; + if (!pinned_mode) { + err = mlx5r_store_odp_mkey(dev, &mr->mmkey); + if (err) + goto err_dereg_mr; + } else { + mr->data_direct = true; + } err = mlx5_ib_init_dmabuf_mr(mr); if (err) @@ -1566,10 +1618,101 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, return &mr->ibmr; err_dereg_mr: - mlx5_ib_dereg_mr(&mr->ibmr, NULL); + __mlx5_ib_dereg_mr(&mr->ibmr); return ERR_PTR(err); } +static struct ib_mr * +reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset, + u64 length, u64 virt_addr, + int fd, int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_data_direct_dev *data_direct_dev; + struct ib_mr *crossing_mr; + struct ib_mr *crossed_mr; + int ret = 0; + + /* As of HW behaviour the IOVA must be page aligned in KSM mode */ + if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND)) + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&dev->data_direct_lock); + data_direct_dev = dev->data_direct_dev; + if (!data_direct_dev) { + ret = -EINVAL; + goto end; + } + + /* The device's 'data direct mkey' was created without RO flags to + * simplify things and allow for a single mkey per device. + * Since RO is not a must, mask it out accordingly. + */ + access_flags &= ~IB_ACCESS_RELAXED_ORDERING; + crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev, + offset, length, virt_addr, fd, + access_flags, MLX5_MKC_ACCESS_MODE_KSM); + if (IS_ERR(crossed_mr)) { + ret = PTR_ERR(crossed_mr); + goto end; + } + + mutex_lock(&dev->slow_path_mutex); + crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags, + crossed_mr->lkey); + mutex_unlock(&dev->slow_path_mutex); + if (IS_ERR(crossing_mr)) { + __mlx5_ib_dereg_mr(crossed_mr); + ret = PTR_ERR(crossing_mr); + goto end; + } + + list_add_tail(&to_mmr(crossed_mr)->dd_node, &dev->data_direct_mr_list); + to_mmr(crossing_mr)->dd_crossed_mr = to_mmr(crossed_mr); + to_mmr(crossing_mr)->data_direct = true; +end: + mutex_unlock(&dev->data_direct_lock); + return ret ? ERR_PTR(ret) : crossing_mr; +} + +struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, + u64 length, u64 virt_addr, + int fd, int access_flags, + struct uverbs_attr_bundle *attrs) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + int mlx5_access_flags = 0; + int err; + + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || + !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) + return ERR_PTR(-EOPNOTSUPP); + + if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS)) { + err = uverbs_get_flags32(&mlx5_access_flags, attrs, + MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS, + MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT); + if (err) + return ERR_PTR(err); + } + + mlx5_ib_dbg(dev, + "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x, mlx5_access_flags 0x%x\n", + offset, virt_addr, length, fd, access_flags, mlx5_access_flags); + + /* dmabuf requires xlt update via umr to work. */ + if (!mlx5r_umr_can_load_pas(dev, length)) + return ERR_PTR(-EINVAL); + + if (mlx5_access_flags & MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT) + return reg_user_mr_dmabuf_by_data_direct(pd, offset, length, virt_addr, + fd, access_flags); + + return reg_user_mr_dmabuf(pd, pd->device->dma_device, + offset, length, virt_addr, + fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT); +} + /* * True if the change in access flags can be done via UMR, only some access * flags can be updated. @@ -1601,8 +1744,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) return false; - *page_size = - mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); + *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova); if (WARN_ON(!*page_size)) return false; return (mr->mmkey.cache_ent->rb_key.ndescs) >= @@ -1665,7 +1807,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, struct mlx5_ib_mr *mr = to_mmr(ib_mr); int err; - if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct) return ERR_PTR(-EOPNOTSUPP); mlx5_ib_dbg( @@ -1793,7 +1935,7 @@ mlx5_alloc_priv_descs(struct ib_device *device, static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - if (!mr->umem && mr->descs) { + if (!mr->umem && !mr->data_direct && mr->descs) { struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; struct mlx5_ib_dev *dev = to_mdev(device); @@ -1847,13 +1989,51 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, return ret; } +static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr) +{ + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); + int err; + + lockdep_assert_held(&dev->data_direct_lock); + mr->revoked = true; + err = mlx5r_umr_revoke_mr(mr); + if (WARN_ON(err)) + return err; + + ib_umem_dmabuf_revoke(umem_dmabuf); + return 0; +} + +void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev) +{ + struct mlx5_ib_mr *mr, *next; + + lockdep_assert_held(&dev->data_direct_lock); + + list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) { + list_del(&mr->dd_node); + mlx5_ib_revoke_data_direct_mr(mr); + } +} + static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) { struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; - if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { + ent = mr->mmkey.cache_ent; + /* upon storing to a clean temp entry - schedule its cleanup */ + spin_lock_irq(&ent->mkeys_queue.lock); + if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { + mod_delayed_work(ent->dev->cache.wq, &ent->dwork, + msecs_to_jiffies(30 * 1000)); + ent->tmp_cleanup_scheduled = true; + } + spin_unlock_irq(&ent->mkeys_queue.lock); return 0; + } if (ent) { spin_lock_irq(&ent->mkeys_queue.lock); @@ -1864,7 +2044,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) return destroy_mkey(dev, mr); } -int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr) { struct mlx5_ib_mr *mr = to_mmr(ibmr); struct mlx5_ib_dev *dev = to_mdev(ibmr->device); @@ -1931,9 +2111,40 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) return 0; } +static int dereg_crossing_data_direct_mr(struct mlx5_ib_dev *dev, + struct mlx5_ib_mr *mr) +{ + struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr; + int ret; + + ret = __mlx5_ib_dereg_mr(&mr->ibmr); + if (ret) + return ret; + + mutex_lock(&dev->data_direct_lock); + if (!dd_crossed_mr->revoked) + list_del(&dd_crossed_mr->dd_node); + + ret = __mlx5_ib_dereg_mr(&dd_crossed_mr->ibmr); + mutex_unlock(&dev->data_direct_lock); + return ret; +} + +int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + + if (mr->data_direct) + return dereg_crossing_data_direct_mr(dev, mr); + + return __mlx5_ib_dereg_mr(ibmr); +} + static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, int access_mode, int page_shift) { + struct mlx5_ib_dev *dev = to_mdev(pd->device); void *mkc; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); @@ -1946,6 +2157,9 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, log_page_size, page_shift); + if (access_mode == MLX5_MKC_ACCESS_MODE_PA || + access_mode == MLX5_MKC_ACCESS_MODE_MTT) + MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats)); } static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index a524181f34df95..4b37446758fd4e 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -45,7 +45,7 @@ /* Contains the details of a pagefault. */ struct mlx5_pagefault { u32 bytes_committed; - u32 token; + u64 token; u8 event_subtype; u8 type; union { @@ -74,6 +74,14 @@ struct mlx5_pagefault { u32 rdma_op_len; u64 rdma_va; } rdma; + struct { + u64 va; + u32 mkey; + u32 fault_byte_count; + u32 prefetch_before_byte_count; + u32 prefetch_after_byte_count; + u8 flags; + } memory; }; struct mlx5_ib_pf_eq *eq; @@ -99,13 +107,20 @@ static u64 mlx5_imr_ksm_entries; static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, struct mlx5_ib_mr *imr, int flags) { + struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev; struct mlx5_klm *end = pklm + nentries; + int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0; + __be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ? + cpu_to_be32(imr->null_mmkey.key) : + mr_to_mdev(imr)->mkeys.null_mkey; + u64 va = + MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0; if (flags & MLX5_IB_UPD_XLT_ZAP) { - for (; pklm != end; pklm++, idx++) { + for (; pklm != end; pklm++, idx++, va += step) { pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); - pklm->key = mr_to_mdev(imr)->mkeys.null_mkey; - pklm->va = 0; + pklm->key = key; + pklm->va = cpu_to_be64(va); } return; } @@ -129,7 +144,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, */ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex); - for (; pklm != end; pklm++, idx++) { + for (; pklm != end; pklm++, idx++, va += step) { struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx); pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE); @@ -137,8 +152,8 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, pklm->key = cpu_to_be32(mtt->ibmr.lkey); pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE); } else { - pklm->key = mr_to_mdev(imr)->mkeys.null_mkey; - pklm->va = 0; + pklm->key = key; + pklm->va = cpu_to_be64(va); } } } @@ -217,6 +232,9 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) return; xa_erase(&imr->implicit_children, idx); + if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) + xa_erase(&mr_to_mdev(mr)->odp_mkeys, + mlx5_base_mkey(mr->mmkey.key)); /* Freeing a MR is a sleeping operation, so bounce to a work queue */ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); @@ -332,46 +350,46 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev) else dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT); - if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send)) caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; - if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive)) caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; - if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive)) caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; - if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive)) + if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive)) caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && @@ -388,13 +406,29 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev, int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ? pfault->wqe.wq_num : pfault->token; u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {}; + void *info; int err; MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type); - MLX5_SET(page_fault_resume_in, in, token, pfault->token); - MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); - MLX5_SET(page_fault_resume_in, in, error, !!error); + + if (pfault->event_subtype == MLX5_PFAULT_SUBTYPE_MEMORY) { + info = MLX5_ADDR_OF(page_fault_resume_in, in, + page_fault_info.mem_page_fault_info); + MLX5_SET(mem_page_fault_info, info, fault_token_31_0, + pfault->token & 0xffffffff); + MLX5_SET(mem_page_fault_info, info, fault_token_47_32, + (pfault->token >> 32) & 0xffff); + MLX5_SET(mem_page_fault_info, info, error, !!error); + } else { + info = MLX5_ADDR_OF(page_fault_resume_in, in, + page_fault_info.trans_page_fault_info); + MLX5_SET(trans_page_fault_info, info, page_fault_type, + pfault->type); + MLX5_SET(trans_page_fault_info, info, fault_token, + pfault->token); + MLX5_SET(trans_page_fault_info, info, wq_number, wq_num); + MLX5_SET(trans_page_fault_info, info, error, !!error); + } err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in); if (err) @@ -468,6 +502,16 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, } xa_unlock(&imr->implicit_children); + if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { + ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey, GFP_KERNEL); + if (xa_is_err(ret)) { + ret = ERR_PTR(xa_err(ret)); + xa_erase(&imr->implicit_children, idx); + goto out_mr; + } + mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD; + } mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr); return mr; @@ -478,6 +522,57 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, return ret; } +/* + * When using memory scheme ODP, implicit MRs can't use the reserved null mkey + * and each implicit MR needs to assign a private null mkey to get the page + * faults on. + * The null mkey is created with the properties to enable getting the page + * fault for every time it is accessed and having all relevant access flags. + */ +static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev, + struct mlx5_ib_mr *imr, + struct mlx5_ib_pd *pd) +{ + size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 64; + void *mkc; + u32 *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 4); + MLX5_SET(create_mkey_in, in, pg_access, 1); + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, a, 1); + MLX5_SET(mkc, mkc, rw, 1); + MLX5_SET(mkc, mkc, rr, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, free, 0); + MLX5_SET(mkc, mkc, umr_en, 0); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + + MLX5_SET(mkc, mkc, translations_octword_size, 4); + MLX5_SET(mkc, mkc, log_page_size, 61); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, pd, pd->pdn); + MLX5_SET64(mkc, mkc, start_addr, 0); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + + err = mlx5_core_create_mkey(dev->mdev, &imr->null_mmkey.key, in, inlen); + if (err) + goto free_in; + + imr->null_mmkey.type = MLX5_MKEY_NULL; + +free_in: + kfree(in); + return err; +} + struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, int access_flags) { @@ -510,6 +605,16 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, imr->is_odp_implicit = true; xa_init(&imr->implicit_children); + if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { + err = alloc_implicit_mr_null_mkey(dev, imr, pd); + if (err) + goto out_mr; + + err = mlx5r_store_odp_mkey(dev, &imr->null_mmkey); + if (err) + goto out_mr; + } + err = mlx5r_umr_update_xlt(imr, 0, mlx5_imr_ksm_entries, MLX5_KSM_PAGE_SHIFT, @@ -544,6 +649,14 @@ void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr) xa_erase(&mr->implicit_children, idx); mlx5_ib_dereg_mr(&mtt->ibmr, NULL); } + + if (mr->null_mmkey.key) { + xa_erase(&mr_to_mdev(mr)->odp_mkeys, + mlx5_base_mkey(mr->null_mmkey.key)); + + mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, + mr->null_mmkey.key); + } } #define MLX5_PF_FLAGS_DOWNGRADE BIT(1) @@ -693,7 +806,7 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem); u32 xlt_flags = 0; int err; - unsigned int page_size; + unsigned long page_size; if (flags & MLX5_PF_FLAGS_ENABLE) xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; @@ -710,7 +823,10 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, ib_umem_dmabuf_unmap_pages(umem_dmabuf); err = -EINVAL; } else { - err = mlx5r_umr_update_mr_pas(mr, xlt_flags); + if (mr->data_direct) + err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags); + else + err = mlx5r_umr_update_mr_pas(mr, xlt_flags); } dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); @@ -733,24 +849,31 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, * >0: Number of pages mapped */ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, - u32 *bytes_mapped, u32 flags) + u32 *bytes_mapped, u32 flags, bool permissive_fault) { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - if (unlikely(io_virt < mr->ibmr.iova)) + if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault) return -EFAULT; if (mr->umem->is_dmabuf) return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags); if (!odp->is_implicit_odp) { + u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova; u64 user_va; - if (check_add_overflow(io_virt - mr->ibmr.iova, - (u64)odp->umem.address, &user_va)) + if (check_add_overflow(offset, (u64)odp->umem.address, + &user_va)) return -EFAULT; - if (unlikely(user_va >= ib_umem_end(odp) || - ib_umem_end(odp) - user_va < bcnt)) + + if (permissive_fault) { + if (user_va < ib_umem_start(odp)) + user_va = ib_umem_start(odp); + if ((user_va + bcnt) > ib_umem_end(odp)) + bcnt = ib_umem_end(odp) - user_va; + } else if (unlikely(user_va >= ib_umem_end(odp) || + ib_umem_end(odp) - user_va < bcnt)) return -EFAULT; return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, flags); @@ -797,6 +920,27 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) return mmkey->key == key; } +static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key) +{ + struct mlx5_ib_mkey *mmkey; + + xa_lock(&dev->odp_mkeys); + mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key)); + if (!mmkey) { + mmkey = ERR_PTR(-ENOENT); + goto out; + } + if (!mkey_is_eq(mmkey, key)) { + mmkey = ERR_PTR(-EFAULT); + goto out; + } + refcount_inc(&mmkey->usecount); +out: + xa_unlock(&dev->odp_mkeys); + + return mmkey; +} + /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@ -824,32 +968,24 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, io_virt += *bytes_committed; bcnt -= *bytes_committed; - next_mr: - xa_lock(&dev->odp_mkeys); - mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key)); - if (!mmkey) { - xa_unlock(&dev->odp_mkeys); - mlx5_ib_dbg( - dev, - "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", - key); - if (bytes_mapped) - *bytes_mapped += bcnt; - /* - * The user could specify a SGL with multiple lkeys and only - * some of them are ODP. Treat the non-ODP ones as fully - * faulted. - */ - ret = 0; - goto end; - } - refcount_inc(&mmkey->usecount); - xa_unlock(&dev->odp_mkeys); - - if (!mkey_is_eq(mmkey, key)) { - mlx5_ib_dbg(dev, "failed to find mkey %x\n", key); - ret = -EFAULT; + mmkey = find_odp_mkey(dev, key); + if (IS_ERR(mmkey)) { + ret = PTR_ERR(mmkey); + if (ret == -ENOENT) { + mlx5_ib_dbg( + dev, + "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", + key); + if (bytes_mapped) + *bytes_mapped += bcnt; + /* + * The user could specify a SGL with multiple lkeys and + * only some of them are ODP. Treat the non-ODP ones as + * fully faulted. + */ + ret = 0; + } goto end; } @@ -857,7 +993,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, case MLX5_MKEY_MR: mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); - ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0); + ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false); if (ret < 0) goto end; @@ -944,7 +1080,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, } end: - if (mmkey) + if (!IS_ERR(mmkey)) mlx5r_deref_odp_mkey(mmkey); while (head) { frame = head; @@ -1266,7 +1402,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev, if (ret) mlx5_ib_err( dev, - "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n", + "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %llx\n", ret, wqe_index, pfault->token); resolve_page_fault: @@ -1325,13 +1461,13 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, } else if (ret < 0 || pages_in_range(address, length) > ret) { mlx5_ib_page_fault_resume(dev, pfault, 1); if (ret != -ENOENT) - mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", + mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n", ret, pfault->token, pfault->type); return; } mlx5_ib_page_fault_resume(dev, pfault, 0); - mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n", + mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%llx, type: 0x%x, prefetch_activated: %d\n", pfault->token, pfault->type, prefetch_activated); @@ -1347,12 +1483,80 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, prefetch_len, &bytes_committed, NULL); if (ret < 0 && ret != -EAGAIN) { - mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n", + mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%llx, address: 0x%.16llx, length = 0x%.16x\n", ret, pfault->token, address, prefetch_len); } } } +#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST BIT(7) +static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev, + struct mlx5_pagefault *pfault) +{ + u64 prefetch_va = + pfault->memory.va - pfault->memory.prefetch_before_byte_count; + size_t prefetch_size = pfault->memory.prefetch_before_byte_count + + pfault->memory.fault_byte_count + + pfault->memory.prefetch_after_byte_count; + struct mlx5_ib_mkey *mmkey; + struct mlx5_ib_mr *mr, *child_mr; + int ret = 0; + + mmkey = find_odp_mkey(dev, pfault->memory.mkey); + if (IS_ERR(mmkey)) + goto err; + + switch (mmkey->type) { + case MLX5_MKEY_IMPLICIT_CHILD: + child_mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + mr = child_mr->parent; + break; + case MLX5_MKEY_NULL: + mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey); + break; + default: + mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + break; + } + + /* If prefetch fails, handle only demanded page fault */ + ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true); + if (ret < 0) { + ret = pagefault_mr(mr, pfault->memory.va, + pfault->memory.fault_byte_count, NULL, 0, + true); + if (ret < 0) + goto err; + } + + mlx5_update_odp_stats(mr, faults, ret); + mlx5r_deref_odp_mkey(mmkey); + + if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST) + mlx5_ib_page_fault_resume(dev, pfault, 0); + + mlx5_ib_dbg( + dev, + "PAGE FAULT completed %s. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x\n", + pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST ? + "" : + "without resume cmd", + pfault->token, pfault->memory.mkey, pfault->memory.va, + pfault->memory.fault_byte_count); + + return; + +err: + if (!IS_ERR(mmkey)) + mlx5r_deref_odp_mkey(mmkey); + mlx5_ib_page_fault_resume(dev, pfault, 1); + mlx5_ib_dbg( + dev, + "PAGE FAULT error. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x, err: %d\n", + pfault->token, pfault->memory.mkey, pfault->memory.va, + pfault->memory.fault_byte_count, ret); +} + static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault) { u8 event_subtype = pfault->event_subtype; @@ -1364,6 +1568,9 @@ static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfaul case MLX5_PFAULT_SUBTYPE_RDMA: mlx5_ib_mr_rdma_pfault_handler(dev, pfault); break; + case MLX5_PFAULT_SUBTYPE_MEMORY: + mlx5_ib_mr_memory_pfault_handler(dev, pfault); + break; default: mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n", event_subtype); @@ -1382,6 +1589,7 @@ static void mlx5_ib_eqe_pf_action(struct work_struct *work) mempool_free(pfault, eq->pool); } +#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) { struct mlx5_eqe_page_fault *pf_eqe; @@ -1398,15 +1606,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) pf_eqe = &eqe->data.page_fault; pfault->event_subtype = eqe->sub_type; - pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); - - mlx5_ib_dbg(eq->dev, - "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", - eqe->sub_type, pfault->bytes_committed); switch (eqe->sub_type) { case MLX5_PFAULT_SUBTYPE_RDMA: /* RDMA based event */ + pfault->bytes_committed = + be32_to_cpu(pf_eqe->rdma.bytes_committed); pfault->type = be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; pfault->token = @@ -1420,10 +1625,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) be32_to_cpu(pf_eqe->rdma.rdma_op_len); pfault->rdma.rdma_va = be64_to_cpu(pf_eqe->rdma.rdma_va); - mlx5_ib_dbg(eq->dev, - "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", - pfault->type, pfault->token, - pfault->rdma.r_key); + mlx5_ib_dbg( + eq->dev, + "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, r_key: 0x%08x\n", + eqe->sub_type, pfault->bytes_committed, + pfault->type, pfault->token, + pfault->rdma.r_key); mlx5_ib_dbg(eq->dev, "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", pfault->rdma.rdma_op_len, @@ -1432,6 +1639,8 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) case MLX5_PFAULT_SUBTYPE_WQE: /* WQE based event */ + pfault->bytes_committed = + be32_to_cpu(pf_eqe->wqe.bytes_committed); pfault->type = (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; pfault->token = @@ -1443,11 +1652,47 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) be16_to_cpu(pf_eqe->wqe.wqe_index); pfault->wqe.packet_size = be16_to_cpu(pf_eqe->wqe.packet_length); - mlx5_ib_dbg(eq->dev, - "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", - pfault->type, pfault->token, - pfault->wqe.wq_num, - pfault->wqe.wqe_index); + mlx5_ib_dbg( + eq->dev, + "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, wq_num: 0x%06x, wqe_index: 0x%04x\n", + eqe->sub_type, pfault->bytes_committed, + pfault->type, pfault->token, pfault->wqe.wq_num, + pfault->wqe.wqe_index); + break; + + case MLX5_PFAULT_SUBTYPE_MEMORY: + /* Memory based event */ + pfault->bytes_committed = 0; + pfault->token = + be32_to_cpu(pf_eqe->memory.token31_0) | + ((u64)be16_to_cpu(pf_eqe->memory.token47_32) + << 32); + pfault->memory.va = be64_to_cpu(pf_eqe->memory.va); + pfault->memory.mkey = be32_to_cpu(pf_eqe->memory.mkey); + pfault->memory.fault_byte_count = (be32_to_cpu( + pf_eqe->memory.demand_fault_pages) >> 12) * + MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; + pfault->memory.prefetch_before_byte_count = + be16_to_cpu( + pf_eqe->memory.pre_demand_fault_pages) * + MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; + pfault->memory.prefetch_after_byte_count = + be16_to_cpu( + pf_eqe->memory.post_demand_fault_pages) * + MEMORY_SCHEME_PAGE_FAULT_GRANULARITY; + pfault->memory.flags = pf_eqe->memory.flags; + mlx5_ib_dbg( + eq->dev, + "PAGE_FAULT: subtype: 0x%02x, token: 0x%06llx, mkey: 0x%06x, fault_byte_count: 0x%06x, va: 0x%016llx, flags: 0x%02x\n", + eqe->sub_type, pfault->token, + pfault->memory.mkey, + pfault->memory.fault_byte_count, + pfault->memory.va, pfault->memory.flags); + mlx5_ib_dbg( + eq->dev, + "PAGE_FAULT: prefetch size: before: 0x%06x, after 0x%06x\n", + pfault->memory.prefetch_before_byte_count, + pfault->memory.prefetch_after_byte_count); break; default: @@ -1710,7 +1955,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w) for (i = 0; i < work->num_sge; ++i) { ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, work->frags[i].length, &bytes_mapped, - work->pf_flags); + work->pf_flags, false); if (ret <= 0) continue; mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); @@ -1761,7 +2006,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, if (IS_ERR(mr)) return PTR_ERR(mr); ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, - &bytes_mapped, pf_flags); + &bytes_mapped, pf_flags, false); if (ret < 0) { mlx5r_deref_odp_mkey(&mr->mmkey); return ret; diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c index bbfcce3bdc84ec..bdb568411091c8 100644 --- a/drivers/infiniband/hw/mlx5/std_types.c +++ b/drivers/infiniband/hw/mlx5/std_types.c @@ -10,6 +10,7 @@ #include #include #include "mlx5_ib.h" +#include "data_direct.h" #define UVERBS_MODULE_NAME mlx5_ib #include @@ -111,6 +112,23 @@ static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport, return err; } +static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_ib_uapi_query_port *info) +{ + struct mlx5_core_dev *mdev; + + mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL); + if (!mdev) + return -EINVAL; + + info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id); + info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID; + + mlx5_ib_put_native_port_mdev(dev, port_num); + + return 0; +} + static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num, struct mlx5_ib_uapi_query_port *info) { @@ -177,12 +195,60 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)( ret = fill_switchdev_info(dev, port_num, &info); if (ret) return ret; + } else if (mlx5_core_mp_enabled(dev->mdev)) { + ret = fill_multiport_info(dev, port_num, &info); + if (ret) + return ret; } return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info, sizeof(info)); } +static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)( + struct uverbs_attr_bundle *attrs) +{ + struct mlx5_data_direct_dev *data_direct_dev; + struct mlx5_ib_ucontext *c; + struct mlx5_ib_dev *dev; + int out_len = uverbs_attr_get_len(attrs, + MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH); + u32 dev_path_len; + char *dev_path; + int ret; + + c = to_mucontext(ib_uverbs_get_ucontext(attrs)); + if (IS_ERR(c)) + return PTR_ERR(c); + dev = to_mdev(c->ibucontext.device); + mutex_lock(&dev->data_direct_lock); + data_direct_dev = dev->data_direct_dev; + if (!data_direct_dev) { + ret = -ENODEV; + goto end; + } + + dev_path = kobject_get_path(&data_direct_dev->device->kobj, GFP_KERNEL); + if (!dev_path) { + ret = -ENOMEM; + goto end; + } + + dev_path_len = strlen(dev_path) + 1; + if (dev_path_len > out_len) { + ret = -ENOSPC; + goto end; + } + + ret = uverbs_copy_to(attrs, MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, dev_path, + dev_path_len); + kfree(dev_path); + +end: + mutex_unlock(&dev->data_direct_lock); + return ret; +} + DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_QUERY_PORT, UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM, @@ -193,9 +259,17 @@ DECLARE_UVERBS_NAMED_METHOD( reg_c0), UA_MANDATORY)); +DECLARE_UVERBS_NAMED_METHOD( + MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH, + UVERBS_ATTR_PTR_OUT( + MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, + UVERBS_ATTR_MIN_SIZE(0), + UA_MANDATORY)); + ADD_UVERBS_METHODS(mlx5_ib_device, UVERBS_OBJECT_DEVICE, - &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT)); + &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT), + &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)); DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_PD_QUERY, diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index ffc31b01f69051..887fd6fa3ba930 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -224,6 +224,9 @@ int mlx5r_umr_init(struct mlx5_ib_dev *dev) void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev) { + if (!dev->umrc.pd) + return; + mutex_destroy(&dev->umrc.init_lock); ib_dealloc_pd(dev->umrc.pd); } @@ -632,44 +635,47 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev, wqe->data_seg.byte_count = cpu_to_be32(sg->length); } -/* - * Send the DMA list to the HW for a normal MR using UMR. - * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP - * flag may be used. - */ -int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) +static int +_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd) { + size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); struct mlx5_ib_dev *dev = mr_to_mdev(mr); struct device *ddev = &dev->mdev->pdev->dev; struct mlx5r_umr_wqe wqe = {}; struct ib_block_iter biter; + struct mlx5_ksm *cur_ksm; struct mlx5_mtt *cur_mtt; size_t orig_sg_length; - struct mlx5_mtt *mtt; size_t final_size; + void *curr_entry; struct ib_sge sg; + void *entry; u64 offset = 0; int err = 0; - if (WARN_ON(mr->umem->is_odp)) - return -EINVAL; - - mtt = mlx5r_umr_create_xlt( - dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift), - sizeof(*mtt), flags); - if (!mtt) + entry = mlx5r_umr_create_xlt(dev, &sg, + ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift), + ent_size, flags); + if (!entry) return -ENOMEM; orig_sg_length = sg.length; - mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg); mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, mr->page_shift); + if (dd) { + /* Use the data direct internal kernel PD */ + MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn); + cur_ksm = entry; + } else { + cur_mtt = entry; + } + mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg); - cur_mtt = mtt; + curr_entry = entry; rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) { - if (cur_mtt == (void *)mtt + sg.length) { + if (curr_entry == entry + sg.length) { dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE); @@ -681,23 +687,31 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) DMA_TO_DEVICE); offset += sg.length; mlx5r_umr_update_offset(&wqe.ctrl_seg, offset); - - cur_mtt = mtt; + if (dd) + cur_ksm = entry; + else + cur_mtt = entry; } - cur_mtt->ptag = - cpu_to_be64(rdma_block_iter_dma_address(&biter) | - MLX5_IB_MTT_PRESENT); - - if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) - cur_mtt->ptag = 0; - - cur_mtt++; + if (dd) { + cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter)); + cur_ksm->key = cpu_to_be32(dev->ddr.mkey); + cur_ksm++; + curr_entry = cur_ksm; + } else { + cur_mtt->ptag = + cpu_to_be64(rdma_block_iter_dma_address(&biter) | + MLX5_IB_MTT_PRESENT); + if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) + cur_mtt->ptag = 0; + cur_mtt++; + curr_entry = cur_mtt; + } } - final_size = (void *)cur_mtt - (void *)mtt; + final_size = curr_entry - entry; sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT); - memset(cur_mtt, 0, sg.length - final_size); + memset(curr_entry, 0, sg.length - final_size); mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE); @@ -705,10 +719,32 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) err: sg.length = orig_sg_length; - mlx5r_umr_unmap_free_xlt(dev, mtt, &sg); + mlx5r_umr_unmap_free_xlt(dev, entry, &sg); return err; } +int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags) +{ + /* No invalidation flow is expected */ + if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP)) + return -EINVAL; + + return _mlx5r_umr_update_mr_pas(mr, flags, true); +} + +/* + * Send the DMA list to the HW for a normal MR using UMR. + * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP + * flag may be used. + */ +int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) +{ + if (WARN_ON(mr->umem->is_odp)) + return -EINVAL; + + return _mlx5r_umr_update_mr_pas(mr, flags, false); +} + static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h index 5f734dc72befe6..4a02c9b5aad869 100644 --- a/drivers/infiniband/hw/mlx5/umr.h +++ b/drivers/infiniband/hw/mlx5/umr.h @@ -95,6 +95,7 @@ int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr); int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, int access_flags); int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags); +int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags); int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, int page_shift, int flags); diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index db3b25c8433a17..4100656fe9a3e2 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -581,12 +581,9 @@ static int qib_create_workqueues(struct qib_devdata *dd) for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (!ppd->qib_wq) { - char wq_name[23]; - - snprintf(wq_name, sizeof(wq_name), "qib%d_%d", - dd->unit, pidx); - ppd->qib_wq = alloc_ordered_workqueue(wq_name, - WQ_MEM_RECLAIM); + ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d", + WQ_MEM_RECLAIM, + dd->unit, pidx); if (!ppd->qib_wq) goto wq_error; } diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 07548fac1d8ea7..408fe1ba74b97b 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -303,8 +303,6 @@ int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid); -void qib_rc_rnr_retry(unsigned long arg); - void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr); int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr); @@ -312,8 +310,6 @@ int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr); void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp); -void mr_rcu_callback(struct rcu_head *list); - void qib_migrate_qp(struct rvt_qp *qp); int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 7a9afd5231d5c9..5ed5cfc2b28034 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -348,13 +348,13 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, umem = ib_umem_get(pd->device, start, length, mr_access_flags); if (IS_ERR(umem)) - return (void *)umem; + return ERR_CAST(umem); n = ib_umem_num_pages(umem); mr = __rvt_alloc_mr(n, pd); if (IS_ERR(mr)) { - ret = (struct ib_mr *)mr; + ret = ERR_CAST(mr); goto bail_umem; } @@ -542,7 +542,7 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, mr = __rvt_alloc_mr(max_num_sg, pd); if (IS_ERR(mr)) - return (struct ib_mr *)mr; + return ERR_CAST(mr); return &mr->ibmr; } diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h index 46f82b27fcd2f5..1f0322491d8c1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_hdr.h +++ b/drivers/infiniband/sw/rxe/rxe_hdr.h @@ -234,7 +234,7 @@ static inline void __bth_set_resv6a(void *arg) { struct rxe_bth *bth = arg; - bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK); + bth->qpn &= cpu_to_be32(~BTH_RESV6A_MASK); } static inline int __bth_ack(void *arg) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 6596a85723c9a5..c11ab280551a86 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -341,7 +341,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, /* * See IBA C9-92 * For UD QPs we only check if the packet will fit in the - * receive buffer later. For rmda operations additional + * receive buffer later. For RDMA operations additional * length checks are performed in check_rkey. */ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { @@ -351,7 +351,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) recv_buffer_len += qp->resp.wqe->dma.sge[i].length; - if (payload + 40 > recv_buffer_len) { + if (payload + sizeof(union rdma_network_hdr) > recv_buffer_len) { rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); return RESPST_ERR_LENGTH; } diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 75253f2b3e3dca..86d4d6a2170e17 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -94,8 +94,6 @@ struct siw_device { atomic_t num_mr; atomic_t num_srq; atomic_t num_ctx; - - struct work_struct netdev_down; }; struct siw_ucontext { diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index b2b54242aa6916..17abef48abcd22 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -364,39 +364,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev) return NULL; } -/* - * Network link becomes unavailable. Mark all - * affected QP's accordingly. - */ -static void siw_netdev_down(struct work_struct *work) -{ - struct siw_device *sdev = - container_of(work, struct siw_device, netdev_down); - - struct siw_qp_attrs qp_attrs; - struct list_head *pos, *tmp; - - memset(&qp_attrs, 0, sizeof(qp_attrs)); - qp_attrs.state = SIW_QP_STATE_ERROR; - - list_for_each_safe(pos, tmp, &sdev->qp_list) { - struct siw_qp *qp = list_entry(pos, struct siw_qp, devq); - - down_write(&qp->state_lock); - WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE)); - up_write(&qp->state_lock); - } - ib_device_put(&sdev->base_dev); -} - -static void siw_device_goes_down(struct siw_device *sdev) -{ - if (ib_device_try_get(&sdev->base_dev)) { - INIT_WORK(&sdev->netdev_down, siw_netdev_down); - schedule_work(&sdev->netdev_down); - } -} - static int siw_netdev_event(struct notifier_block *nb, unsigned long event, void *arg) { @@ -418,10 +385,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event, siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE); break; - case NETDEV_GOING_DOWN: - siw_device_goes_down(sdev); - break; - case NETDEV_DOWN: sdev->state = IB_PORT_DOWN; siw_port_event(sdev, 1, IB_EVENT_PORT_ERR); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 963e936da5e3aa..abe0522b7df46a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -509,12 +509,10 @@ struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port, const char *format); int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format, struct net_device *dev); -void ipoib_ib_tx_timer_func(struct timer_list *t); void ipoib_ib_dev_flush_light(struct work_struct *work); void ipoib_ib_dev_flush_normal(struct work_struct *work); void ipoib_ib_dev_flush_heavy(struct work_struct *work); void ipoib_ib_tx_timeout_work(struct work_struct *work); -void ipoib_pkey_event(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open_default(struct net_device *dev); @@ -533,7 +531,6 @@ void ipoib_mcast_restart_task(struct work_struct *work); void ipoib_mcast_start_thread(struct net_device *dev); void ipoib_mcast_stop_thread(struct net_device *dev); -void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); @@ -610,7 +607,6 @@ int ipoib_set_mode(struct net_device *dev, const char *buf); void ipoib_setup_common(struct net_device *dev); -void ipoib_pkey_open(struct ipoib_dev_priv *priv); void ipoib_drain_cq(struct net_device *dev); void ipoib_set_ethtool_ops(struct net_device *dev); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 68429a5f796d94..1d7ac24c4c006e 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -507,10 +507,6 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task); void iser_free_rx_descriptors(struct iser_conn *iser_conn); -void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, - struct iser_data_buf *mem, - enum iser_data_dir cmd_dir); - int iser_reg_mem_fastreg(struct iscsi_iser_task *task, enum iser_data_dir dir, bool all_imm); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 88106cf5ce550c..71387811b28158 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -331,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n", + rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -351,11 +351,11 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", + rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } - req->need_inv = false; + req->mr->need_inval = false; if (req->need_inv_comp) complete(&req->inv_comp); else @@ -391,12 +391,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, clt_path = to_clt_path(con->c.path); if (req->sg_cnt) { - if (req->dir == DMA_FROM_DEVICE && req->need_inv) { + if (req->mr->need_inval) { /* - * We are here to invalidate read requests + * We are here to invalidate read/write requests * ourselves. In normal scenario server should - * send INV for all read requests, but - * we are here, thus two things could happen: + * send INV for all read requests, we do local + * invalidate for write requests ourselves, but + * we are here, thus three things could happen: * * 1. this is failover, when errno != 0 * and can_wait == 1, @@ -404,6 +405,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, * 2. something totally bad happened and * server forgot to send INV, so we * should do that ourselves. + * + * 3. write request finishes, we need to do local + * invalidate */ if (can_wait) { @@ -418,18 +422,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, refcount_inc(&req->ref); err = rtrs_inv_rkey(req); if (err) { - rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n", + rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n", req->mr->rkey, err); } else if (can_wait) { wait_for_completion(&req->inv_comp); - } else { - /* - * Something went wrong, so request will be - * completed from INV callback. - */ - WARN_ON_ONCE(1); - - return; } if (!refcount_dec_and_test(&req->ref)) return; @@ -446,8 +442,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, req->con = NULL; if (errno) { - rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", - errno, kobject_name(&clt_path->kobj), clt_path->hca_name, + rtrs_err_rl(con->c.path, + "IO %s request failed: error=%d path=%s [%s:%u] notify=%d\n", + req->dir == DMA_TO_DEVICE ? "write" : "read", errno, + kobject_name(&clt_path->kobj), clt_path->hca_name, clt_path->hca_port, notify); } @@ -501,7 +499,7 @@ static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, req = &clt_path->reqs[msg_id]; /* Drop need_inv if server responded with send with invalidation */ - req->need_inv &= !w_inval; + req->mr->need_inval &= !w_inval; complete_rdma_req(req, errno, true, false); } @@ -626,6 +624,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) return; + clt_path->s.hb_missed_cnt = 0; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); if (imm_type == RTRS_IO_RSP_IMM || @@ -643,7 +642,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) return rtrs_clt_recv_done(con, wc); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - clt_path->s.hb_missed_cnt = 0; clt_path->s.hb_cur_latency = ktime_sub(ktime_get(), clt_path->s.hb_last_sent); if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) @@ -670,6 +668,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) /* * Key invalidations from server side */ + clt_path->s.hb_missed_cnt = 0; WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || wc->wc_flags & IB_WC_WITH_IMM)); WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); @@ -967,7 +966,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->dir = dir; req->con = rtrs_permit_to_clt_con(clt_path, permit); req->conf = conf; - req->need_inv = false; + req->mr->need_inval = false; req->need_inv_comp = false; req->inv_errno = 0; refcount_set(&req->ref, 1); @@ -1089,7 +1088,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) int ret, count = 0; u32 imm, buf_id; struct ib_reg_wr rwr; - struct ib_send_wr inv_wr; struct ib_send_wr *wr = NULL; bool fr_en = false; @@ -1130,13 +1128,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) req->sg_cnt, req->dir); return ret; } - inv_wr = (struct ib_send_wr) { - .opcode = IB_WR_LOCAL_INV, - .wr_cqe = &req->inv_cqe, - .send_flags = IB_SEND_SIGNALED, - .ex.invalidate_rkey = req->mr->rkey, - }; - req->inv_cqe.done = rtrs_clt_inv_rkey_done; rwr = (struct ib_reg_wr) { .wr.opcode = IB_WR_REG_MR, .wr.wr_cqe = &fast_reg_cqe, @@ -1146,7 +1137,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) }; wr = &rwr.wr; fr_en = true; - refcount_inc(&req->ref); + req->mr->need_inval = true; } /* * Update stats now, after request is successfully sent it is not @@ -1156,7 +1147,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, req->usr_len + sizeof(*msg), - imm, wr, &inv_wr); + imm, wr, NULL); if (ret) { rtrs_err_rl(s, "Write request failed: error=%d path=%s [%s:%u]\n", @@ -1164,6 +1155,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&clt_path->stats->inflight); + if (req->mr->need_inval) { + req->mr->need_inval = false; + refcount_dec(&req->ref); + } if (req->sg_cnt) ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); @@ -1213,7 +1208,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) ret = rtrs_map_sg_fr(req, count); if (ret < 0) { rtrs_err_rl(s, - "Read request failed, failed to map fast reg. data, err: %d\n", + "Read request failed, failed to map fast reg. data, err: %d\n", ret); ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, req->dir); @@ -1237,7 +1232,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) msg->desc[0].len = cpu_to_le32(req->mr->length); /* Further invalidation is required */ - req->need_inv = !!RTRS_MSG_NEED_INVAL_F; + req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F; } else { msg->sg_cnt = 0; @@ -1270,7 +1265,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&clt_path->stats->inflight); - req->need_inv = false; + req->mr->need_inval = false; if (req->sg_cnt) ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, req->dir); @@ -1494,7 +1489,9 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, static void rtrs_clt_hb_err_handler(struct rtrs_con *c) { struct rtrs_clt_con *con = container_of(c, typeof(*con), c); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj)); rtrs_rdma_error_recovery(con); } @@ -2346,6 +2343,12 @@ static int init_conns(struct rtrs_clt_path *clt_path) if (err) goto destroy; } + + /* + * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds. + */ + cid = clt_path->s.con_num - 1; + err = alloc_path_reqs(clt_path); if (err) goto destroy; @@ -3140,8 +3143,20 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, return err; } +void rtrs_clt_ib_event_handler(struct ib_event_handler *handler, + struct ib_event *ibevent) +{ + pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event), + ibevent->event); +} + + static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev) { + INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev, + rtrs_clt_ib_event_handler); + ib_register_event_handler(&dev->event_handler); + if (!(dev->ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { pr_err("Memory registrations not supported.\n"); @@ -3151,8 +3166,15 @@ static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev) return 0; } +static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev) +{ + ib_unregister_event_handler(&dev->event_handler); +} + + static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = { - .init = rtrs_clt_ib_dev_init + .init = rtrs_clt_ib_dev_init, + .deinit = rtrs_clt_ib_dev_deinit }; static int __init rtrs_client_init(void) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h index f848c0392d982a..0f57759b3080f4 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h @@ -115,7 +115,6 @@ struct rtrs_clt_io_req { struct completion inv_comp; int inv_errno; bool need_inv_comp; - bool need_inv; refcount_t ref; }; @@ -213,6 +212,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path, void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value); int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt); void free_path(struct rtrs_clt_path *clt_path); +void rtrs_clt_ib_event_handler(struct ib_event_handler *handler, + struct ib_event *ibevent); /* rtrs-clt-stats.c */ diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index ab25619261d28b..ef29bd483b5ad2 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -69,6 +69,7 @@ struct rtrs_ib_dev; struct rtrs_rdma_dev_pd_ops { int (*init)(struct rtrs_ib_dev *dev); + void (*deinit)(struct rtrs_ib_dev *dev); }; struct rtrs_rdma_dev_pd { @@ -84,6 +85,7 @@ struct rtrs_ib_dev { struct kref ref; struct list_head entry; struct rtrs_rdma_dev_pd *pool; + struct ib_event_handler event_handler; }; struct rtrs_con { diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 1d33efb8fb03be..e83d956478521d 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -26,7 +26,10 @@ MODULE_LICENSE("GPL"); #define DEFAULT_SESS_QUEUE_DEPTH 512 #define MAX_HDR_SIZE PAGE_SIZE -static struct rtrs_rdma_dev_pd dev_pd; +static const struct rtrs_rdma_dev_pd_ops dev_pd_ops; +static struct rtrs_rdma_dev_pd dev_pd = { + .ops = &dev_pd_ops +}; const struct class rtrs_dev_class = { .name = "rtrs-server", }; @@ -672,6 +675,10 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) static void rtrs_srv_hb_err_handler(struct rtrs_con *c) { + struct rtrs_srv_con *con = container_of(c, typeof(*con), c); + struct rtrs_srv_path *srv_path = to_srv_path(con->c.path); + + rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj)); close_path(to_srv_path(c->path)); } @@ -931,12 +938,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) if (err) goto close; -out: rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); return; close: + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); close_path(srv_path); - goto out; } static int post_recv_info_req(struct rtrs_srv_con *con) @@ -987,6 +993,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path) q_size = SERVICE_CON_QUEUE_DEPTH; else q_size = srv->queue_depth; + if (srv_path->state != RTRS_SRV_CONNECTING) { + rtrs_err(s, "Path state invalid. state %s\n", + rtrs_srv_state_str(srv_path->state)); + return -EIO; + } + + if (!srv_path->s.con[cid]) { + rtrs_err(s, "Conn not set for %d\n", cid); + return -EIO; + } err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); if (err) { @@ -1229,6 +1245,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) return; + srv_path->s.hb_missed_cnt = 0; err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); @@ -2255,6 +2272,34 @@ static int check_module_params(void) return 0; } +void rtrs_srv_ib_event_handler(struct ib_event_handler *handler, + struct ib_event *ibevent) +{ + pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event), + ibevent->event); +} + +static int rtrs_srv_ib_dev_init(struct rtrs_ib_dev *dev) +{ + INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev, + rtrs_srv_ib_event_handler); + ib_register_event_handler(&dev->event_handler); + + return 0; +} + +static void rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev *dev) +{ + ib_unregister_event_handler(&dev->event_handler); +} + + +static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = { + .init = rtrs_srv_ib_dev_init, + .deinit = rtrs_srv_ib_dev_deinit +}; + + static int __init rtrs_server_init(void) { int err; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h index 5e325b82ff3364..014f85681f37d4 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h @@ -132,6 +132,8 @@ struct rtrs_srv_ib_ctx { extern const struct class rtrs_dev_class; void close_path(struct rtrs_srv_path *srv_path); +void rtrs_srv_ib_event_handler(struct ib_event_handler *handler, + struct ib_event *ibevent); static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, size_t size, int d) diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index a8ce3d14072233..b5cbb57ee5f600 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -498,6 +498,13 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer, struct input_event event; int retval = 0; + /* + * Limit amount of data we inject into the input subsystem so that + * we do not hold evdev->mutex for too long. 4096 bytes corresponds + * to 170 input events. + */ + count = min(count, 4096); + if (count != 0 && count < input_event_size()) return -EINVAL; @@ -1292,7 +1299,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .llseek = no_llseek, }; /* diff --git a/drivers/input/input.c b/drivers/input/input.c index 54c57b267b25f7..47fac29cf7c3b3 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1079,33 +1079,31 @@ static inline void input_wakeup_procfs_readers(void) wake_up(&input_devices_poll_wait); } +struct input_seq_state { + unsigned short pos; + bool mutex_acquired; + int input_devices_state; +}; + static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) { + struct seq_file *seq = file->private_data; + struct input_seq_state *state = seq->private; + poll_wait(file, &input_devices_poll_wait, wait); - if (file->f_version != input_devices_state) { - file->f_version = input_devices_state; + if (state->input_devices_state != input_devices_state) { + state->input_devices_state = input_devices_state; return EPOLLIN | EPOLLRDNORM; } return 0; } -union input_seq_state { - struct { - unsigned short pos; - bool mutex_acquired; - }; - void *p; -}; - static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) { - union input_seq_state *state = (union input_seq_state *)&seq->private; + struct input_seq_state *state = seq->private; int error; - /* We need to fit into seq->private pointer */ - BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); - error = mutex_lock_interruptible(&input_mutex); if (error) { state->mutex_acquired = false; @@ -1124,7 +1122,7 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void input_seq_stop(struct seq_file *seq, void *v) { - union input_seq_state *state = (union input_seq_state *)&seq->private; + struct input_seq_state *state = seq->private; if (state->mutex_acquired) mutex_unlock(&input_mutex); @@ -1210,7 +1208,8 @@ static const struct seq_operations input_devices_seq_ops = { static int input_proc_devices_open(struct inode *inode, struct file *file) { - return seq_open(file, &input_devices_seq_ops); + return seq_open_private(file, &input_devices_seq_ops, + sizeof(struct input_seq_state)); } static const struct proc_ops input_devices_proc_ops = { @@ -1218,17 +1217,14 @@ static const struct proc_ops input_devices_proc_ops = { .proc_poll = input_proc_devices_poll, .proc_read = seq_read, .proc_lseek = seq_lseek, - .proc_release = seq_release, + .proc_release = seq_release_private, }; static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) { - union input_seq_state *state = (union input_seq_state *)&seq->private; + struct input_seq_state *state = seq->private; int error; - /* We need to fit into seq->private pointer */ - BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); - error = mutex_lock_interruptible(&input_mutex); if (error) { state->mutex_acquired = false; @@ -1243,7 +1239,7 @@ static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - union input_seq_state *state = (union input_seq_state *)&seq->private; + struct input_seq_state *state = seq->private; state->pos = *pos + 1; return seq_list_next(v, &input_handler_list, pos); @@ -1252,7 +1248,7 @@ static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) static int input_handlers_seq_show(struct seq_file *seq, void *v) { struct input_handler *handler = container_of(v, struct input_handler, node); - union input_seq_state *state = (union input_seq_state *)&seq->private; + struct input_seq_state *state = seq->private; seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); if (handler->filter) @@ -1273,14 +1269,15 @@ static const struct seq_operations input_handlers_seq_ops = { static int input_proc_handlers_open(struct inode *inode, struct file *file) { - return seq_open(file, &input_handlers_seq_ops); + return seq_open_private(file, &input_handlers_seq_ops, + sizeof(struct input_seq_state)); } static const struct proc_ops input_handlers_proc_ops = { .proc_open = input_proc_handlers_open, .proc_read = seq_read, .proc_lseek = seq_lseek, - .proc_release = seq_release, + .proc_release = seq_release_private, }; static int __init input_proc_init(void) @@ -2224,7 +2221,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev) mt_slots = dev->mt->num_slots; } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - - dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, + dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1; mt_slots = clamp(mt_slots, 2, 32); } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { mt_slots = 2; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 5824bca02e5a09..ba2b17288bcdb2 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -718,7 +718,6 @@ static const struct file_operations joydev_fops = { .compat_ioctl = joydev_compat_ioctl, #endif .fasync = joydev_fasync, - .llseek = no_llseek, }; /* diff --git a/drivers/input/joystick/adafruit-seesaw.c b/drivers/input/joystick/adafruit-seesaw.c index 5c775ca886a581..c248c15b849d08 100644 --- a/drivers/input/joystick/adafruit-seesaw.c +++ b/drivers/input/joystick/adafruit-seesaw.c @@ -15,7 +15,7 @@ * - Add interrupt support */ -#include +#include #include #include #include diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c index de1fa4cf291b20..ff44f9978b719e 100644 --- a/drivers/input/joystick/adc-joystick.c +++ b/drivers/input/joystick/adc-joystick.c @@ -11,7 +11,7 @@ #include #include -#include +#include struct adc_joystick_axis { u32 code; @@ -132,7 +132,6 @@ static void adc_joystick_cleanup(void *data) static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy) { struct adc_joystick_axis *axes = joy->axes; - struct fwnode_handle *child; s32 range[2], fuzz, flat; unsigned int num_axes; int error, i; @@ -149,31 +148,30 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy) return -EINVAL; } - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { error = fwnode_property_read_u32(child, "reg", &i); if (error) { dev_err(dev, "reg invalid or missing\n"); - goto err_fwnode_put; + return error; } if (i >= num_axes) { - error = -EINVAL; dev_err(dev, "No matching axis for reg %d\n", i); - goto err_fwnode_put; + return -EINVAL; } error = fwnode_property_read_u32(child, "linux,code", &axes[i].code); if (error) { dev_err(dev, "linux,code invalid or missing\n"); - goto err_fwnode_put; + return error; } error = fwnode_property_read_u32_array(child, "abs-range", range, 2); if (error) { dev_err(dev, "abs-range invalid or missing\n"); - goto err_fwnode_put; + return error; } if (range[0] > range[1]) { @@ -193,10 +191,6 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy) } return 0; - -err_fwnode_put: - fwnode_handle_put(child); - return error; } diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index 84b87526b7ba31..55e6321adab9d6 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c @@ -6,7 +6,7 @@ * USB/RS232 I-Force joysticks and wheels. */ -#include +#include #include "iforce.h" MODULE_AUTHOR("Vojtech Pavlik , Johann Deneux "); diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c index 763642c8cee9f5..08c889a72f6c67 100644 --- a/drivers/input/joystick/iforce/iforce-packets.c +++ b/drivers/input/joystick/iforce/iforce-packets.c @@ -6,7 +6,7 @@ * USB/RS232 I-Force joysticks and wheels. */ -#include +#include #include "iforce.h" static struct { diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c index 49101f1c858b4d..4f2221001a95ab 100644 --- a/drivers/input/joystick/spaceball.c +++ b/drivers/input/joystick/spaceball.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver" diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 1d0c5f4c0f99e0..721ab69e84ac65 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -421,18 +421,6 @@ config KEYBOARD_MAX7359 To compile this driver as a module, choose M here: the module will be called max7359_keypad. -config KEYBOARD_MCS - tristate "MELFAS MCS Touchkey" - depends on I2C - help - Say Y here if you have the MELFAS MCS5000/5080 touchkey controller - chip in your system. - - If unsure, say N. - - To compile this driver as a module, choose M here: the - module will be called mcs_touchkey. - config KEYBOARD_MPR121 tristate "Freescale MPR121 Touchkey" depends on I2C @@ -466,6 +454,17 @@ config KEYBOARD_IMX To compile this driver as a module, choose M here: the module will be called imx_keypad. +config KEYBOARD_IMX_BBM_SCMI + tristate "IMX BBM SCMI Key Driver" + depends on IMX_SCMI_BBM_EXT || COMPILE_TEST + default y if ARCH_MXC + help + This is the BBM key driver for NXP i.MX SoCs managed through + SCMI protocol. + + To compile this driver as a module, choose M here: the + module will be called scmi-imx-bbm-key. + config KEYBOARD_IMX_SC_KEY tristate "IMX SCU Key Driver" depends on IMX_SCU @@ -485,17 +484,6 @@ config KEYBOARD_NEWTON To compile this driver as a module, choose M here: the module will be called newtonkbd. -config KEYBOARD_NOMADIK - tristate "ST-Ericsson Nomadik SKE keyboard" - depends on (ARCH_NOMADIK || ARCH_U8500 || COMPILE_TEST) - select INPUT_MATRIXKMAP - help - Say Y here if you want to use a keypad provided on the SKE controller - used on the Ux500 and Nomadik platforms - - To compile this driver as a module, choose M here: the - module will be called nmk-ske-keypad. - config KEYBOARD_NSPIRE tristate "TI-NSPIRE built-in keyboard" depends on ARCH_NSPIRE && OF diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index aecef00c5d09d3..1e0721c3070968 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o obj-$(CONFIG_KEYBOARD_IQS62X) += iqs62x-keys.o obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o +obj-$(CONFIG_KEYBOARD_IMX_BBM_SCMI) += imx-sm-bbm-key.o obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o @@ -41,12 +42,10 @@ obj-$(CONFIG_KEYBOARD_LPC32XX) += lpc32xx-keys.o obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o -obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o -obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o obj-$(CONFIG_KEYBOARD_NSPIRE) += nspire-keypad.o obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c index bf72ab8df81775..f1753207429db0 100644 --- a/drivers/input/keyboard/adc-keys.c +++ b/drivers/input/keyboard/adc-keys.c @@ -66,7 +66,6 @@ static void adc_keys_poll(struct input_dev *input) static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st) { struct adc_keys_button *map; - struct fwnode_handle *child; int i; st->num_keys = device_get_child_node_count(dev); @@ -80,11 +79,10 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st) return -ENOMEM; i = 0; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { if (fwnode_property_read_u32(child, "press-threshold-microvolt", &map[i].voltage)) { dev_err(dev, "Key with invalid or missing voltage\n"); - fwnode_handle_put(child); return -EINVAL; } map[i].voltage /= 1000; @@ -92,7 +90,6 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st) if (fwnode_property_read_u32(child, "linux,code", &map[i].keycode)) { dev_err(dev, "Key with invalid or missing linux,code\n"); - fwnode_handle_put(child); return -EINVAL; } diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 1b0279393df4bb..d25d63a807f230 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -188,6 +188,7 @@ struct adp5588_kpad { u32 cols; u32 unlock_keys[2]; int nkeys_unlock; + bool gpio_only; unsigned short keycode[ADP5588_KEYMAPSIZE]; unsigned char gpiomap[ADP5588_MAXGPIO]; struct gpio_chip gc; @@ -221,15 +222,13 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off) unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); int val; - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); if (kpad->dir[bank] & bit) val = kpad->dat_out[bank]; else val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank); - mutex_unlock(&kpad->gpio_lock); - return !!(val & bit); } @@ -240,7 +239,7 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); if (val) kpad->dat_out[bank] |= bit; @@ -248,8 +247,6 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, kpad->dat_out[bank] &= ~bit; adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]); - - mutex_unlock(&kpad->gpio_lock); } static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off, @@ -259,7 +256,6 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off, unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); bool pull_disable; - int ret; switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: @@ -272,19 +268,15 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off, return -ENOTSUPP; } - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); if (pull_disable) kpad->pull_dis[bank] |= bit; else kpad->pull_dis[bank] &= bit; - ret = adp5588_write(kpad->client, GPIO_PULL1 + bank, - kpad->pull_dis[bank]); - - mutex_unlock(&kpad->gpio_lock); - - return ret; + return adp5588_write(kpad->client, GPIO_PULL1 + bank, + kpad->pull_dis[bank]); } static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off) @@ -292,16 +284,11 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off struct adp5588_kpad *kpad = gpiochip_get_data(chip); unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); - int ret; - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); kpad->dir[bank] &= ~bit; - ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]); - - mutex_unlock(&kpad->gpio_lock); - - return ret; + return adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]); } static int adp5588_gpio_direction_output(struct gpio_chip *chip, @@ -310,9 +297,9 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, struct adp5588_kpad *kpad = gpiochip_get_data(chip); unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); - int ret; + int error; - mutex_lock(&kpad->gpio_lock); + guard(mutex)(&kpad->gpio_lock); kpad->dir[bank] |= bit; @@ -321,17 +308,16 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, else kpad->dat_out[bank] &= ~bit; - ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, - kpad->dat_out[bank]); - if (ret) - goto out_unlock; - - ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]); + error = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, + kpad->dat_out[bank]); + if (error) + return error; -out_unlock: - mutex_unlock(&kpad->gpio_lock); + error = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]); + if (error) + return error; - return ret; + return 0; } static int adp5588_build_gpiomap(struct adp5588_kpad *kpad) @@ -446,10 +432,17 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad) kpad->gc.label = kpad->client->name; kpad->gc.owner = THIS_MODULE; - girq = &kpad->gc.irq; - gpio_irq_chip_set_chip(girq, &adp5588_irq_chip); - girq->handler = handle_bad_irq; - girq->threaded = true; + if (device_property_present(dev, "interrupt-controller")) { + if (!kpad->client->irq) { + dev_err(dev, "Unable to serve as interrupt controller without interrupt"); + return -EINVAL; + } + + girq = &kpad->gc.irq; + gpio_irq_chip_set_chip(girq, &adp5588_irq_chip); + girq->handler = handle_bad_irq; + girq->threaded = true; + } mutex_init(&kpad->gpio_lock); @@ -627,7 +620,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad) for (i = 0; i < KEYP_MAX_EVENT; i++) { ret = adp5588_read(client, KEY_EVENTA); - if (ret) + if (ret < 0) return ret; } @@ -647,6 +640,18 @@ static int adp5588_fw_parse(struct adp5588_kpad *kpad) struct i2c_client *client = kpad->client; int ret, i; + /* + * Check if the device is to be operated purely in GPIO mode. To do + * so, check that no keypad rows or columns have been specified, + * since all GPINS should be configured as GPIO. + */ + if (!device_property_present(&client->dev, "keypad,num-rows") && + !device_property_present(&client->dev, "keypad,num-columns")) { + /* If purely GPIO, skip keypad setup */ + kpad->gpio_only = true; + return 0; + } + ret = matrix_keypad_parse_properties(&client->dev, &kpad->rows, &kpad->cols); if (ret) @@ -790,17 +795,19 @@ static int adp5588_probe(struct i2c_client *client) if (error) return error; - error = devm_request_threaded_irq(&client->dev, client->irq, - adp5588_hard_irq, adp5588_thread_irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - client->dev.driver->name, kpad); - if (error) { - dev_err(&client->dev, "failed to request irq %d: %d\n", - client->irq, error); - return error; + if (client->irq) { + error = devm_request_threaded_irq(&client->dev, client->irq, + adp5588_hard_irq, adp5588_thread_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + client->dev.driver->name, kpad); + if (error) { + dev_err(&client->dev, "failed to request irq %d: %d\n", + client->irq, error); + return error; + } } - dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq); + dev_info(&client->dev, "Rev.%d controller\n", revid); return 0; } diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 8996e00cd63a82..922d3ab998f3a5 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -391,10 +391,17 @@ static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off) struct adp5589_kpad *kpad = gpiochip_get_data(chip); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); + int val; - return !!(adp5589_read(kpad->client, - kpad->var->reg(ADP5589_GPI_STATUS_A) + bank) & - bit); + mutex_lock(&kpad->gpio_lock); + if (kpad->dir[bank] & bit) + val = kpad->dat_out[bank]; + else + val = adp5589_read(kpad->client, + kpad->var->reg(ADP5589_GPI_STATUS_A) + bank); + mutex_unlock(&kpad->gpio_lock); + + return !!(val & bit); } static void adp5589_gpio_set_value(struct gpio_chip *chip, @@ -936,10 +943,9 @@ static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid) static void adp5589_clear_config(void *data) { - struct i2c_client *client = data; - struct adp5589_kpad *kpad = i2c_get_clientdata(client); + struct adp5589_kpad *kpad = data; - adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0); + adp5589_write(kpad->client, kpad->var->reg(ADP5589_GENERAL_CFG), 0); } static int adp5589_probe(struct i2c_client *client) @@ -983,7 +989,7 @@ static int adp5589_probe(struct i2c_client *client) } error = devm_add_action_or_reset(&client->dev, adp5589_clear_config, - client); + kpad); if (error) return error; @@ -1010,8 +1016,6 @@ static int adp5589_probe(struct i2c_client *client) if (error) return error; - i2c_set_clientdata(client, kpad); - dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq); return 0; } diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index cf25177b483016..2c993fa8306a08 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -57,7 +57,7 @@ #include #include -#include +#include #define CREATE_TRACE_POINTS #include "applespi.h" @@ -1007,7 +1007,6 @@ static const struct file_operations applespi_tp_dim_fops = { .owner = THIS_MODULE, .open = applespi_tp_dim_open, .read = applespi_tp_dim_read, - .llseek = no_llseek, }; static void report_finger_data(struct input_dev *input, int slot, diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index f4f2078cf5015b..5855d4fc6e6a4d 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -639,7 +639,7 @@ static void atkbd_event_work(struct work_struct *work) { struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work); - mutex_lock(&atkbd->mutex); + guard(mutex)(&atkbd->mutex); if (!atkbd->enabled) { /* @@ -657,8 +657,6 @@ static void atkbd_event_work(struct work_struct *work) if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) atkbd_set_repeat_rate(atkbd); } - - mutex_unlock(&atkbd->mutex); } /* @@ -1361,7 +1359,7 @@ static int atkbd_reconnect(struct serio *serio) { struct atkbd *atkbd = atkbd_from_serio(serio); struct serio_driver *drv = serio->drv; - int retval = -1; + int error; if (!atkbd || !drv) { dev_dbg(&serio->dev, @@ -1369,16 +1367,17 @@ static int atkbd_reconnect(struct serio *serio) return -1; } - mutex_lock(&atkbd->mutex); + guard(mutex)(&atkbd->mutex); atkbd_disable(atkbd); if (atkbd->write) { - if (atkbd_probe(atkbd)) - goto out; + error = atkbd_probe(atkbd); + if (error) + return error; if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra)) - goto out; + return -EIO; /* * Restore LED state and repeat rate. While input core @@ -1404,11 +1403,7 @@ static int atkbd_reconnect(struct serio *serio) if (atkbd->write) atkbd_activate(atkbd); - retval = 0; - - out: - mutex_unlock(&atkbd->mutex); - return retval; + return 0; } static const struct serio_device_id atkbd_serio_ids[] = { @@ -1465,17 +1460,15 @@ static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t struct atkbd *atkbd = atkbd_from_serio(serio); int retval; - retval = mutex_lock_interruptible(&atkbd->mutex); - if (retval) - return retval; + scoped_guard(mutex_intr, &atkbd->mutex) { + atkbd_disable(atkbd); + retval = handler(atkbd, buf, count); + atkbd_enable(atkbd); - atkbd_disable(atkbd); - retval = handler(atkbd, buf, count); - atkbd_enable(atkbd); - - mutex_unlock(&atkbd->mutex); + return retval; + } - return retval; + return -EINTR; } static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf) diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 12eb9df180ee4a..4c81b20ff6af94 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -27,7 +27,7 @@ #include #include -#include +#include /** * struct cros_ec_keyb - Structure representing EC keyboard device diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index 6b811d6bf6258c..dcbc50304a5a46 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -6,20 +6,13 @@ * * Based on the pxa27x matrix keypad controller by Rodolfo Giometti. * - * NOTE: - * - * The 3-key reset is triggered by pressing the 3 keys in - * Row 0, Columns 2, 4, and 7 at the same time. This action can - * be disabled by setting the EP93XX_KEYPAD_DISABLE_3_KEY flag. - * - * Normal operation for the matrix does not autorepeat the key press. - * This action can be enabled by setting the EP93XX_KEYPAD_AUTOREPEAT - * flag. */ #include +#include #include #include +#include #include #include #include @@ -27,7 +20,6 @@ #include #include #include -#include #include /* @@ -61,12 +53,16 @@ #define KEY_REG_KEY1_MASK GENMASK(5, 0) #define KEY_REG_KEY1_SHIFT 0 +#define EP93XX_MATRIX_ROWS (8) +#define EP93XX_MATRIX_COLS (8) + #define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS) struct ep93xx_keypad { - struct ep93xx_keypad_platform_data *pdata; struct input_dev *input_dev; struct clk *clk; + unsigned int debounce; + u16 prescale; void __iomem *mmio_base; @@ -133,23 +129,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id) static void ep93xx_keypad_config(struct ep93xx_keypad *keypad) { - struct ep93xx_keypad_platform_data *pdata = keypad->pdata; unsigned int val = 0; - clk_set_rate(keypad->clk, pdata->clk_rate); - - if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY) - val |= KEY_INIT_DIS3KY; - if (pdata->flags & EP93XX_KEYPAD_DIAG_MODE) - val |= KEY_INIT_DIAG; - if (pdata->flags & EP93XX_KEYPAD_BACK_DRIVE) - val |= KEY_INIT_BACK; - if (pdata->flags & EP93XX_KEYPAD_TEST_MODE) - val |= KEY_INIT_T2; - - val |= ((pdata->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK); + val |= (keypad->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK; - val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK); + val |= (keypad->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK; __raw_writel(val, keypad->mmio_base + KEY_INIT); } @@ -220,17 +204,10 @@ static int ep93xx_keypad_resume(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops, ep93xx_keypad_suspend, ep93xx_keypad_resume); -static void ep93xx_keypad_release_gpio_action(void *_pdev) -{ - struct platform_device *pdev = _pdev; - - ep93xx_keypad_release_gpio(pdev); -} - static int ep93xx_keypad_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct ep93xx_keypad *keypad; - const struct matrix_keymap_data *keymap_data; struct input_dev *input_dev; int err; @@ -238,14 +215,6 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) if (!keypad) return -ENOMEM; - keypad->pdata = dev_get_platdata(&pdev->dev); - if (!keypad->pdata) - return -EINVAL; - - keymap_data = keypad->pdata->keymap_data; - if (!keymap_data) - return -EINVAL; - keypad->irq = platform_get_irq(pdev, 0); if (keypad->irq < 0) return keypad->irq; @@ -254,19 +223,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) if (IS_ERR(keypad->mmio_base)) return PTR_ERR(keypad->mmio_base); - err = ep93xx_keypad_acquire_gpio(pdev); - if (err) - return err; - - err = devm_add_action_or_reset(&pdev->dev, - ep93xx_keypad_release_gpio_action, pdev); - if (err) - return err; - keypad->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) return PTR_ERR(keypad->clk); + device_property_read_u32(dev, "debounce-delay-ms", &keypad->debounce); + device_property_read_u16(dev, "cirrus,prescale", &keypad->prescale); + input_dev = devm_input_allocate_device(&pdev->dev); if (!input_dev) return -ENOMEM; @@ -278,13 +241,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) input_dev->open = ep93xx_keypad_open; input_dev->close = ep93xx_keypad_close; - err = matrix_keypad_build_keymap(keymap_data, NULL, + err = matrix_keypad_build_keymap(NULL, NULL, EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS, keypad->keycodes, input_dev); if (err) return err; - if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT) + if (device_property_read_bool(&pdev->dev, "autorepeat")) __set_bit(EV_REP, input_dev->evbit); input_set_drvdata(input_dev, keypad); @@ -313,10 +276,17 @@ static void ep93xx_keypad_remove(struct platform_device *pdev) dev_pm_clear_wake_irq(&pdev->dev); } +static const struct of_device_id ep93xx_keypad_of_ids[] = { + { .compatible = "cirrus,ep9307-keypad" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ep93xx_keypad_of_ids); + static struct platform_driver ep93xx_keypad_driver = { .driver = { .name = "ep93xx-keypad", .pm = pm_sleep_ptr(&ep93xx_keypad_pm_ops), + .of_match_table = ep93xx_keypad_of_ids, }, .probe = ep93xx_keypad_probe, .remove_new = ep93xx_keypad_remove, diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 9f3bcd41cf67da..380fe8dab3b063 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -245,23 +245,20 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, { int n_events = get_n_events_by_type(type); const unsigned long *bitmap = get_bm_events_by_type(ddata->input, type); - unsigned long *bits; ssize_t error; int i; - bits = bitmap_alloc(n_events, GFP_KERNEL); + unsigned long *bits __free(bitmap) = bitmap_alloc(n_events, GFP_KERNEL); if (!bits) return -ENOMEM; error = bitmap_parselist(buf, bits, n_events); if (error) - goto out; + return error; /* First validate */ - if (!bitmap_subset(bits, bitmap, n_events)) { - error = -EINVAL; - goto out; - } + if (!bitmap_subset(bits, bitmap, n_events)) + return -EINVAL; for (i = 0; i < ddata->pdata->nbuttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; @@ -271,12 +268,11 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, if (test_bit(*bdata->code, bits) && !bdata->button->can_disable) { - error = -EINVAL; - goto out; + return -EINVAL; } } - mutex_lock(&ddata->disable_lock); + guard(mutex)(&ddata->disable_lock); for (i = 0; i < ddata->pdata->nbuttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; @@ -290,11 +286,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, gpio_keys_enable_button(bdata); } - mutex_unlock(&ddata->disable_lock); - -out: - bitmap_free(bits); - return error; + return 0; } #define ATTR_SHOW_FN(name, type, only_disabled) \ @@ -470,11 +462,10 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; struct input_dev *input = bdata->input; - unsigned long flags; BUG_ON(irq != bdata->irq); - spin_lock_irqsave(&bdata->lock, flags); + guard(spinlock_irqsave)(&bdata->lock); if (!bdata->key_pressed) { if (bdata->button->wakeup) @@ -497,7 +488,6 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id) ms_to_ktime(bdata->release_delay), HRTIMER_MODE_REL_HARD); out: - spin_unlock_irqrestore(&bdata->lock, flags); return IRQ_HANDLED; } @@ -768,7 +758,6 @@ gpio_keys_get_devtree_pdata(struct device *dev) { struct gpio_keys_platform_data *pdata; struct gpio_keys_button *button; - struct fwnode_handle *child; int nbuttons, irq; nbuttons = device_get_child_node_count(dev); @@ -790,7 +779,7 @@ gpio_keys_get_devtree_pdata(struct device *dev) device_property_read_string(dev, "label", &pdata->name); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { if (is_of_node(child)) { irq = of_irq_get_byname(to_of_node(child), "irq"); if (irq > 0) @@ -808,7 +797,6 @@ gpio_keys_get_devtree_pdata(struct device *dev) if (fwnode_property_read_u32(child, "linux,code", &button->code)) { dev_err(dev, "Button without keycode\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } @@ -1064,10 +1052,10 @@ static int gpio_keys_suspend(struct device *dev) if (error) return error; } else { - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); + if (input_device_enabled(input)) gpio_keys_close(input); - mutex_unlock(&input->mutex); } return 0; @@ -1077,20 +1065,20 @@ static int gpio_keys_resume(struct device *dev) { struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev); struct input_dev *input = ddata->input; - int error = 0; + int error; if (device_may_wakeup(dev)) { gpio_keys_disable_wakeup(ddata); } else { - mutex_lock(&input->mutex); - if (input_device_enabled(input)) + guard(mutex)(&input->mutex); + + if (input_device_enabled(input)) { error = gpio_keys_open(input); - mutex_unlock(&input->mutex); + if (error) + return error; + } } - if (error) - return error; - gpio_keys_report_state(ddata); return 0; } diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c index b41fd1240f4312..41ca0d3c909856 100644 --- a/drivers/input/keyboard/gpio_keys_polled.c +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -144,7 +144,6 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev) { struct gpio_keys_platform_data *pdata; struct gpio_keys_button *button; - struct fwnode_handle *child; int nbuttons; nbuttons = device_get_child_node_count(dev); @@ -166,11 +165,10 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev) device_property_read_string(dev, "label", &pdata->name); - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { if (fwnode_property_read_u32(child, "linux,code", &button->code)) { dev_err(dev, "button without keycode\n"); - fwnode_handle_put(child); return ERR_PTR(-EINVAL); } diff --git a/drivers/input/keyboard/imx-sm-bbm-key.c b/drivers/input/keyboard/imx-sm-bbm-key.c new file mode 100644 index 00000000000000..96486bd23d609d --- /dev/null +++ b/drivers/input/keyboard/imx-sm-bbm-key.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2024 NXP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBOUNCE_TIME 30 +#define REPEAT_INTERVAL 60 + +struct scmi_imx_bbm { + struct scmi_protocol_handle *ph; + const struct scmi_imx_bbm_proto_ops *ops; + struct notifier_block nb; + int keycode; + int keystate; /* 1:pressed */ + bool suspended; + struct delayed_work check_work; + struct input_dev *input; +}; + +static void scmi_imx_bbm_pwrkey_check_for_events(struct work_struct *work) +{ + struct scmi_imx_bbm *bbnsm = container_of(to_delayed_work(work), + struct scmi_imx_bbm, check_work); + struct scmi_protocol_handle *ph = bbnsm->ph; + struct input_dev *input = bbnsm->input; + u32 state = 0; + int ret; + + ret = bbnsm->ops->button_get(ph, &state); + if (ret) { + pr_err("%s: %d\n", __func__, ret); + return; + } + + pr_debug("%s: state: %d, keystate %d\n", __func__, state, bbnsm->keystate); + + /* only report new event if status changed */ + if (state ^ bbnsm->keystate) { + bbnsm->keystate = state; + input_event(input, EV_KEY, bbnsm->keycode, state); + input_sync(input); + pm_relax(bbnsm->input->dev.parent); + pr_debug("EV_KEY: %x\n", bbnsm->keycode); + } + + /* repeat check if pressed long */ + if (state) + schedule_delayed_work(&bbnsm->check_work, msecs_to_jiffies(REPEAT_INTERVAL)); +} + +static int scmi_imx_bbm_pwrkey_event(struct scmi_imx_bbm *bbnsm) +{ + struct input_dev *input = bbnsm->input; + + pm_wakeup_event(input->dev.parent, 0); + + /* + * Directly report key event after resume to make no key press + * event is missed. + */ + if (READ_ONCE(bbnsm->suspended)) { + bbnsm->keystate = 1; + input_event(input, EV_KEY, bbnsm->keycode, 1); + input_sync(input); + WRITE_ONCE(bbnsm->suspended, false); + } + + schedule_delayed_work(&bbnsm->check_work, msecs_to_jiffies(DEBOUNCE_TIME)); + + return 0; +} + +static void scmi_imx_bbm_pwrkey_act(void *pdata) +{ + struct scmi_imx_bbm *bbnsm = pdata; + + cancel_delayed_work_sync(&bbnsm->check_work); +} + +static int scmi_imx_bbm_key_notifier(struct notifier_block *nb, unsigned long event, void *data) +{ + struct scmi_imx_bbm *bbnsm = container_of(nb, struct scmi_imx_bbm, nb); + struct scmi_imx_bbm_notif_report *r = data; + + if (r->is_button) { + pr_debug("BBM Button Power key pressed\n"); + scmi_imx_bbm_pwrkey_event(bbnsm); + } else { + /* Should never reach here */ + pr_err("Unexpected BBM event: %s\n", __func__); + } + + return 0; +} + +static int scmi_imx_bbm_pwrkey_init(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device *dev = &sdev->dev; + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + struct input_dev *input; + int ret; + + if (device_property_read_u32(dev, "linux,code", &bbnsm->keycode)) { + bbnsm->keycode = KEY_POWER; + dev_warn(dev, "key code is not specified, using default KEY_POWER\n"); + } + + INIT_DELAYED_WORK(&bbnsm->check_work, scmi_imx_bbm_pwrkey_check_for_events); + + input = devm_input_allocate_device(dev); + if (!input) { + dev_err(dev, "failed to allocate the input device for SCMI IMX BBM\n"); + return -ENOMEM; + } + + input->name = dev_name(dev); + input->phys = "bbnsm-pwrkey/input0"; + input->id.bustype = BUS_HOST; + + input_set_capability(input, EV_KEY, bbnsm->keycode); + + ret = devm_add_action_or_reset(dev, scmi_imx_bbm_pwrkey_act, bbnsm); + if (ret) { + dev_err(dev, "failed to register remove action\n"); + return ret; + } + + bbnsm->input = input; + + bbnsm->nb.notifier_call = &scmi_imx_bbm_key_notifier; + ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_BBM, + SCMI_EVENT_IMX_BBM_BUTTON, + NULL, &bbnsm->nb); + + if (ret) + dev_err(dev, "Failed to register BBM Button Events %d:", ret); + + ret = input_register_device(input); + if (ret) { + dev_err(dev, "failed to register input device\n"); + return ret; + } + + return 0; +} + +static int scmi_imx_bbm_key_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device *dev = &sdev->dev; + struct scmi_protocol_handle *ph; + struct scmi_imx_bbm *bbnsm; + int ret; + + if (!handle) + return -ENODEV; + + bbnsm = devm_kzalloc(dev, sizeof(*bbnsm), GFP_KERNEL); + if (!bbnsm) + return -ENOMEM; + + bbnsm->ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_BBM, &ph); + if (IS_ERR(bbnsm->ops)) + return PTR_ERR(bbnsm->ops); + + bbnsm->ph = ph; + + device_init_wakeup(dev, true); + + dev_set_drvdata(dev, bbnsm); + + ret = scmi_imx_bbm_pwrkey_init(sdev); + if (ret) + device_init_wakeup(dev, false); + + return ret; +} + +static int __maybe_unused scmi_imx_bbm_key_suspend(struct device *dev) +{ + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + + WRITE_ONCE(bbnsm->suspended, true); + + return 0; +} + +static int __maybe_unused scmi_imx_bbm_key_resume(struct device *dev) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(scmi_imx_bbm_pm_key_ops, scmi_imx_bbm_key_suspend, + scmi_imx_bbm_key_resume); + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_BBM, "imx-bbm-key" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_bbm_key_driver = { + .driver = { + .pm = &scmi_imx_bbm_pm_key_ops, + }, + .name = "scmi-imx-bbm-key", + .probe = scmi_imx_bbm_key_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_bbm_key_driver); + +MODULE_AUTHOR("Peng Fan "); +MODULE_DESCRIPTION("IMX SM BBM Key driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c index 688d61244b5fdf..1315b0f0862fc0 100644 --- a/drivers/input/keyboard/iqs62x-keys.c +++ b/drivers/input/keyboard/iqs62x-keys.c @@ -45,7 +45,6 @@ struct iqs62x_keys_private { static int iqs62x_keys_parse_prop(struct platform_device *pdev, struct iqs62x_keys_private *iqs62x_keys) { - struct fwnode_handle *child; unsigned int val; int ret, i; @@ -68,7 +67,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev, } for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) { - child = device_get_named_child_node(&pdev->dev, + struct fwnode_handle *child __free(fwnode_handle) = + device_get_named_child_node(&pdev->dev, iqs62x_switch_names[i]); if (!child) continue; @@ -77,7 +77,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev, if (ret) { dev_err(&pdev->dev, "Failed to read switch code: %d\n", ret); - fwnode_handle_put(child); return ret; } iqs62x_keys->switches[i].code = val; @@ -91,8 +90,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev, iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ? IQS62X_EVENT_HALL_N_T : IQS62X_EVENT_HALL_S_T); - - fwnode_handle_put(child); } return 0; diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 7a56f3d3aacd27..3c38bae576edd6 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -17,19 +17,27 @@ #include #include #include +#include #include #include #include -#include -#include struct matrix_keypad { - const struct matrix_keypad_platform_data *pdata; struct input_dev *input_dev; unsigned int row_shift; + unsigned int col_scan_delay_us; + /* key debounce interval in milli-second */ + unsigned int debounce_ms; + bool drive_inactive_cols; + + struct gpio_desc *row_gpios[MATRIX_MAX_ROWS]; + unsigned int num_row_gpios; + + struct gpio_desc *col_gpios[MATRIX_MAX_ROWS]; + unsigned int num_col_gpios; + unsigned int row_irqs[MATRIX_MAX_ROWS]; - unsigned int num_row_irqs; DECLARE_BITMAP(wakeup_enabled_irqs, MATRIX_MAX_ROWS); uint32_t last_key_state[MATRIX_MAX_COLS]; @@ -45,50 +53,43 @@ struct matrix_keypad { * columns. In that case it is configured here to be input, otherwise it is * driven with the inactive value. */ -static void __activate_col(const struct matrix_keypad_platform_data *pdata, - int col, bool on) +static void __activate_col(struct matrix_keypad *keypad, int col, bool on) { - bool level_on = !pdata->active_low; - if (on) { - gpio_direction_output(pdata->col_gpios[col], level_on); + gpiod_direction_output(keypad->col_gpios[col], 1); } else { - gpio_set_value_cansleep(pdata->col_gpios[col], !level_on); - if (!pdata->drive_inactive_cols) - gpio_direction_input(pdata->col_gpios[col]); + gpiod_set_value_cansleep(keypad->col_gpios[col], 0); + if (!keypad->drive_inactive_cols) + gpiod_direction_input(keypad->col_gpios[col]); } } -static void activate_col(const struct matrix_keypad_platform_data *pdata, - int col, bool on) +static void activate_col(struct matrix_keypad *keypad, int col, bool on) { - __activate_col(pdata, col, on); + __activate_col(keypad, col, on); - if (on && pdata->col_scan_delay_us) - udelay(pdata->col_scan_delay_us); + if (on && keypad->col_scan_delay_us) + udelay(keypad->col_scan_delay_us); } -static void activate_all_cols(const struct matrix_keypad_platform_data *pdata, - bool on) +static void activate_all_cols(struct matrix_keypad *keypad, bool on) { int col; - for (col = 0; col < pdata->num_col_gpios; col++) - __activate_col(pdata, col, on); + for (col = 0; col < keypad->num_col_gpios; col++) + __activate_col(keypad, col, on); } -static bool row_asserted(const struct matrix_keypad_platform_data *pdata, - int row) +static bool row_asserted(struct matrix_keypad *keypad, int row) { - return gpio_get_value_cansleep(pdata->row_gpios[row]) ? - !pdata->active_low : pdata->active_low; + return gpiod_get_value_cansleep(keypad->row_gpios[row]); } static void enable_row_irqs(struct matrix_keypad *keypad) { int i; - for (i = 0; i < keypad->num_row_irqs; i++) + for (i = 0; i < keypad->num_row_gpios; i++) enable_irq(keypad->row_irqs[i]); } @@ -96,7 +97,7 @@ static void disable_row_irqs(struct matrix_keypad *keypad) { int i; - for (i = 0; i < keypad->num_row_irqs; i++) + for (i = 0; i < keypad->num_row_gpios; i++) disable_irq_nosync(keypad->row_irqs[i]); } @@ -109,39 +110,38 @@ static void matrix_keypad_scan(struct work_struct *work) container_of(work, struct matrix_keypad, work.work); struct input_dev *input_dev = keypad->input_dev; const unsigned short *keycodes = input_dev->keycode; - const struct matrix_keypad_platform_data *pdata = keypad->pdata; uint32_t new_state[MATRIX_MAX_COLS]; int row, col, code; /* de-activate all columns for scanning */ - activate_all_cols(pdata, false); + activate_all_cols(keypad, false); memset(new_state, 0, sizeof(new_state)); - for (row = 0; row < pdata->num_row_gpios; row++) - gpio_direction_input(pdata->row_gpios[row]); + for (row = 0; row < keypad->num_row_gpios; row++) + gpiod_direction_input(keypad->row_gpios[row]); /* assert each column and read the row status out */ - for (col = 0; col < pdata->num_col_gpios; col++) { + for (col = 0; col < keypad->num_col_gpios; col++) { - activate_col(pdata, col, true); + activate_col(keypad, col, true); - for (row = 0; row < pdata->num_row_gpios; row++) + for (row = 0; row < keypad->num_row_gpios; row++) new_state[col] |= - row_asserted(pdata, row) ? (1 << row) : 0; + row_asserted(keypad, row) ? BIT(row) : 0; - activate_col(pdata, col, false); + activate_col(keypad, col, false); } - for (col = 0; col < pdata->num_col_gpios; col++) { + for (col = 0; col < keypad->num_col_gpios; col++) { uint32_t bits_changed; bits_changed = keypad->last_key_state[col] ^ new_state[col]; if (bits_changed == 0) continue; - for (row = 0; row < pdata->num_row_gpios; row++) { - if ((bits_changed & (1 << row)) == 0) + for (row = 0; row < keypad->num_row_gpios; row++) { + if (!(bits_changed & BIT(row))) continue; code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); @@ -155,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work) memcpy(keypad->last_key_state, new_state, sizeof(new_state)); - activate_all_cols(pdata, true); + activate_all_cols(keypad, true); /* Enable IRQs again */ spin_lock_irq(&keypad->lock); @@ -182,7 +182,7 @@ static irqreturn_t matrix_keypad_interrupt(int irq, void *id) disable_row_irqs(keypad); keypad->scan_pending = true; schedule_delayed_work(&keypad->work, - msecs_to_jiffies(keypad->pdata->debounce_ms)); + msecs_to_jiffies(keypad->debounce_ms)); out: spin_unlock_irqrestore(&keypad->lock, flags); @@ -225,7 +225,8 @@ static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad) { int i; - for_each_clear_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs) + for_each_clear_bit(i, keypad->wakeup_enabled_irqs, + keypad->num_row_gpios) if (enable_irq_wake(keypad->row_irqs[i]) == 0) __set_bit(i, keypad->wakeup_enabled_irqs); } @@ -234,7 +235,8 @@ static void matrix_keypad_disable_wakeup(struct matrix_keypad *keypad) { int i; - for_each_set_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs) { + for_each_set_bit(i, keypad->wakeup_enabled_irqs, + keypad->num_row_gpios) { disable_irq_wake(keypad->row_irqs[i]); __clear_bit(i, keypad->wakeup_enabled_irqs); } @@ -272,182 +274,108 @@ static DEFINE_SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops, static int matrix_keypad_init_gpio(struct platform_device *pdev, struct matrix_keypad *keypad) { - const struct matrix_keypad_platform_data *pdata = keypad->pdata; - int i, irq, err; - - /* initialized strobe lines as outputs, activated */ - for (i = 0; i < pdata->num_col_gpios; i++) { - err = devm_gpio_request(&pdev->dev, - pdata->col_gpios[i], "matrix_kbd_col"); - if (err) { - dev_err(&pdev->dev, - "failed to request GPIO%d for COL%d\n", - pdata->col_gpios[i], i); - return err; - } + bool active_low; + int nrow, ncol; + int err; + int i; - gpio_direction_output(pdata->col_gpios[i], !pdata->active_low); + nrow = gpiod_count(&pdev->dev, "row"); + ncol = gpiod_count(&pdev->dev, "col"); + if (nrow < 0 || ncol < 0) { + dev_err(&pdev->dev, "missing row or column GPIOs\n"); + return -EINVAL; } - for (i = 0; i < pdata->num_row_gpios; i++) { - err = devm_gpio_request(&pdev->dev, - pdata->row_gpios[i], "matrix_kbd_row"); + keypad->num_row_gpios = nrow; + keypad->num_col_gpios = ncol; + + active_low = device_property_read_bool(&pdev->dev, "gpio-activelow"); + + /* initialize strobe lines as outputs, activated */ + for (i = 0; i < keypad->num_col_gpios; i++) { + keypad->col_gpios[i] = devm_gpiod_get_index(&pdev->dev, "col", + i, GPIOD_ASIS); + err = PTR_ERR_OR_ZERO(keypad->col_gpios[i]); if (err) { dev_err(&pdev->dev, - "failed to request GPIO%d for ROW%d\n", - pdata->row_gpios[i], i); + "failed to request GPIO for COL%d: %d\n", + i, err); return err; } - gpio_direction_input(pdata->row_gpios[i]); + gpiod_set_consumer_name(keypad->col_gpios[i], "matrix_kbd_col"); + + if (active_low ^ gpiod_is_active_low(keypad->col_gpios[i])) + gpiod_toggle_active_low(keypad->col_gpios[i]); + + gpiod_direction_output(keypad->col_gpios[i], 1); } - if (pdata->clustered_irq > 0) { - err = devm_request_any_context_irq(&pdev->dev, - pdata->clustered_irq, - matrix_keypad_interrupt, - pdata->clustered_irq_flags, - "matrix-keypad", keypad); - if (err < 0) { + for (i = 0; i < keypad->num_row_gpios; i++) { + keypad->row_gpios[i] = devm_gpiod_get_index(&pdev->dev, "row", + i, GPIOD_IN); + err = PTR_ERR_OR_ZERO(keypad->row_gpios[i]); + if (err) { dev_err(&pdev->dev, - "Unable to acquire clustered interrupt\n"); + "failed to request GPIO for ROW%d: %d\n", + i, err); return err; } - keypad->row_irqs[0] = pdata->clustered_irq; - keypad->num_row_irqs = 1; - } else { - for (i = 0; i < pdata->num_row_gpios; i++) { - irq = gpio_to_irq(pdata->row_gpios[i]); - if (irq < 0) { - err = irq; - dev_err(&pdev->dev, - "Unable to convert GPIO line %i to irq: %d\n", - pdata->row_gpios[i], err); - return err; - } - - err = devm_request_any_context_irq(&pdev->dev, - irq, - matrix_keypad_interrupt, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "matrix-keypad", keypad); - if (err < 0) { - dev_err(&pdev->dev, - "Unable to acquire interrupt for GPIO line %i\n", - pdata->row_gpios[i]); - return err; - } - - keypad->row_irqs[i] = irq; - } + gpiod_set_consumer_name(keypad->row_gpios[i], "matrix_kbd_row"); - keypad->num_row_irqs = pdata->num_row_gpios; + if (active_low ^ gpiod_is_active_low(keypad->row_gpios[i])) + gpiod_toggle_active_low(keypad->row_gpios[i]); } - /* initialized as disabled - enabled by input->open */ - disable_row_irqs(keypad); - return 0; } -#ifdef CONFIG_OF -static struct matrix_keypad_platform_data * -matrix_keypad_parse_dt(struct device *dev) +static int matrix_keypad_setup_interrupts(struct platform_device *pdev, + struct matrix_keypad *keypad) { - struct matrix_keypad_platform_data *pdata; - struct device_node *np = dev->of_node; - unsigned int *gpios; - int ret, i, nrow, ncol; - - if (!np) { - dev_err(dev, "device lacks DT data\n"); - return ERR_PTR(-ENODEV); - } - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - dev_err(dev, "could not allocate memory for platform data\n"); - return ERR_PTR(-ENOMEM); - } - - pdata->num_row_gpios = nrow = gpiod_count(dev, "row"); - pdata->num_col_gpios = ncol = gpiod_count(dev, "col"); - if (nrow < 0 || ncol < 0) { - dev_err(dev, "number of keypad rows/columns not specified\n"); - return ERR_PTR(-EINVAL); - } - - pdata->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat"); - - pdata->wakeup = of_property_read_bool(np, "wakeup-source") || - of_property_read_bool(np, "linux,wakeup"); /* legacy */ - - pdata->active_low = of_property_read_bool(np, "gpio-activelow"); - - pdata->drive_inactive_cols = - of_property_read_bool(np, "drive-inactive-cols"); - - of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms); - of_property_read_u32(np, "col-scan-delay-us", - &pdata->col_scan_delay_us); + int err; + int irq; + int i; - gpios = devm_kcalloc(dev, - pdata->num_row_gpios + pdata->num_col_gpios, - sizeof(unsigned int), - GFP_KERNEL); - if (!gpios) { - dev_err(dev, "could not allocate memory for gpios\n"); - return ERR_PTR(-ENOMEM); - } + for (i = 0; i < keypad->num_row_gpios; i++) { + irq = gpiod_to_irq(keypad->row_gpios[i]); + if (irq < 0) { + err = irq; + dev_err(&pdev->dev, + "Unable to convert GPIO line %i to irq: %d\n", + i, err); + return err; + } - for (i = 0; i < nrow; i++) { - ret = of_get_named_gpio(np, "row-gpios", i); - if (ret < 0) - return ERR_PTR(ret); - gpios[i] = ret; - } + err = devm_request_any_context_irq(&pdev->dev, irq, + matrix_keypad_interrupt, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "matrix-keypad", keypad); + if (err < 0) { + dev_err(&pdev->dev, + "Unable to acquire interrupt for row %i: %d\n", + i, err); + return err; + } - for (i = 0; i < ncol; i++) { - ret = of_get_named_gpio(np, "col-gpios", i); - if (ret < 0) - return ERR_PTR(ret); - gpios[nrow + i] = ret; + keypad->row_irqs[i] = irq; } - pdata->row_gpios = gpios; - pdata->col_gpios = &gpios[pdata->num_row_gpios]; - - return pdata; -} -#else -static inline struct matrix_keypad_platform_data * -matrix_keypad_parse_dt(struct device *dev) -{ - dev_err(dev, "no platform data defined\n"); + /* initialized as disabled - enabled by input->open */ + disable_row_irqs(keypad); - return ERR_PTR(-EINVAL); + return 0; } -#endif static int matrix_keypad_probe(struct platform_device *pdev) { - const struct matrix_keypad_platform_data *pdata; struct matrix_keypad *keypad; struct input_dev *input_dev; + bool wakeup; int err; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - pdata = matrix_keypad_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - } else if (!pdata->keymap_data) { - dev_err(&pdev->dev, "no keymap data defined\n"); - return -EINVAL; - } - keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL); if (!keypad) return -ENOMEM; @@ -457,40 +385,56 @@ static int matrix_keypad_probe(struct platform_device *pdev) return -ENOMEM; keypad->input_dev = input_dev; - keypad->pdata = pdata; - keypad->row_shift = get_count_order(pdata->num_col_gpios); keypad->stopped = true; INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan); spin_lock_init(&keypad->lock); + keypad->drive_inactive_cols = + device_property_read_bool(&pdev->dev, "drive-inactive-cols"); + device_property_read_u32(&pdev->dev, "debounce-delay-ms", + &keypad->debounce_ms); + device_property_read_u32(&pdev->dev, "col-scan-delay-us", + &keypad->col_scan_delay_us); + + err = matrix_keypad_init_gpio(pdev, keypad); + if (err) + return err; + + keypad->row_shift = get_count_order(keypad->num_col_gpios); + + err = matrix_keypad_setup_interrupts(pdev, keypad); + if (err) + return err; + input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->open = matrix_keypad_start; input_dev->close = matrix_keypad_stop; - err = matrix_keypad_build_keymap(pdata->keymap_data, NULL, - pdata->num_row_gpios, - pdata->num_col_gpios, + err = matrix_keypad_build_keymap(NULL, NULL, + keypad->num_row_gpios, + keypad->num_col_gpios, NULL, input_dev); if (err) { dev_err(&pdev->dev, "failed to build keymap\n"); return -ENOMEM; } - if (!pdata->no_autorepeat) + if (!device_property_read_bool(&pdev->dev, "linux,no-autorepeat")) __set_bit(EV_REP, input_dev->evbit); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_drvdata(input_dev, keypad); - err = matrix_keypad_init_gpio(pdev, keypad); - if (err) - return err; - err = input_register_device(keypad->input_dev); if (err) return err; - device_init_wakeup(&pdev->dev, pdata->wakeup); + wakeup = device_property_read_bool(&pdev->dev, "wakeup-source") || + /* legacy */ + device_property_read_bool(&pdev->dev, "linux,wakeup"); + device_init_wakeup(&pdev->dev, wakeup); + platform_set_drvdata(pdev, keypad); return 0; diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c deleted file mode 100644 index 2410f676c7f95e..00000000000000 --- a/drivers/input/keyboard/mcs_touchkey.c +++ /dev/null @@ -1,268 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Touchkey driver for MELFAS MCS5000/5080 controller - * - * Copyright (C) 2010 Samsung Electronics Co.Ltd - * Author: HeungJun Kim - * Author: Joonyoung Shim - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* MCS5000 Touchkey */ -#define MCS5000_TOUCHKEY_STATUS 0x04 -#define MCS5000_TOUCHKEY_STATUS_PRESS 7 -#define MCS5000_TOUCHKEY_FW 0x0a -#define MCS5000_TOUCHKEY_BASE_VAL 0x61 - -/* MCS5080 Touchkey */ -#define MCS5080_TOUCHKEY_STATUS 0x00 -#define MCS5080_TOUCHKEY_STATUS_PRESS 3 -#define MCS5080_TOUCHKEY_FW 0x01 -#define MCS5080_TOUCHKEY_BASE_VAL 0x1 - -enum mcs_touchkey_type { - MCS5000_TOUCHKEY, - MCS5080_TOUCHKEY, -}; - -struct mcs_touchkey_chip { - unsigned int status_reg; - unsigned int pressbit; - unsigned int press_invert; - unsigned int baseval; -}; - -struct mcs_touchkey_data { - void (*poweron)(bool); - - struct i2c_client *client; - struct input_dev *input_dev; - struct mcs_touchkey_chip chip; - unsigned int key_code; - unsigned int key_val; - unsigned short keycodes[]; -}; - -static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id) -{ - struct mcs_touchkey_data *data = dev_id; - struct mcs_touchkey_chip *chip = &data->chip; - struct i2c_client *client = data->client; - struct input_dev *input = data->input_dev; - unsigned int key_val; - unsigned int pressed; - int val; - - val = i2c_smbus_read_byte_data(client, chip->status_reg); - if (val < 0) { - dev_err(&client->dev, "i2c read error [%d]\n", val); - goto out; - } - - pressed = (val & (1 << chip->pressbit)) >> chip->pressbit; - if (chip->press_invert) - pressed ^= chip->press_invert; - - /* key_val is 0 when released, so we should use key_val of press. */ - if (pressed) { - key_val = val & (0xff >> (8 - chip->pressbit)); - if (!key_val) - goto out; - key_val -= chip->baseval; - data->key_code = data->keycodes[key_val]; - data->key_val = key_val; - } - - input_event(input, EV_MSC, MSC_SCAN, data->key_val); - input_report_key(input, data->key_code, pressed); - input_sync(input); - - dev_dbg(&client->dev, "key %d %d %s\n", data->key_val, data->key_code, - pressed ? "pressed" : "released"); - - out: - return IRQ_HANDLED; -} - -static void mcs_touchkey_poweroff(void *data) -{ - struct mcs_touchkey_data *touchkey = data; - - touchkey->poweron(false); -} - -static int mcs_touchkey_probe(struct i2c_client *client) -{ - const struct i2c_device_id *id = i2c_client_get_device_id(client); - const struct mcs_platform_data *pdata; - struct mcs_touchkey_data *data; - struct input_dev *input_dev; - unsigned int fw_reg; - int fw_ver; - int error; - int i; - - pdata = dev_get_platdata(&client->dev); - if (!pdata) { - dev_err(&client->dev, "no platform data defined\n"); - return -EINVAL; - } - - data = devm_kzalloc(&client->dev, - struct_size(data, keycodes, pdata->key_maxval + 1), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - input_dev = devm_input_allocate_device(&client->dev); - if (!input_dev) { - dev_err(&client->dev, "Failed to allocate input device\n"); - return -ENOMEM; - } - - data->client = client; - data->input_dev = input_dev; - - if (id->driver_data == MCS5000_TOUCHKEY) { - data->chip.status_reg = MCS5000_TOUCHKEY_STATUS; - data->chip.pressbit = MCS5000_TOUCHKEY_STATUS_PRESS; - data->chip.baseval = MCS5000_TOUCHKEY_BASE_VAL; - fw_reg = MCS5000_TOUCHKEY_FW; - } else { - data->chip.status_reg = MCS5080_TOUCHKEY_STATUS; - data->chip.pressbit = MCS5080_TOUCHKEY_STATUS_PRESS; - data->chip.press_invert = 1; - data->chip.baseval = MCS5080_TOUCHKEY_BASE_VAL; - fw_reg = MCS5080_TOUCHKEY_FW; - } - - fw_ver = i2c_smbus_read_byte_data(client, fw_reg); - if (fw_ver < 0) { - dev_err(&client->dev, "i2c read error[%d]\n", fw_ver); - return fw_ver; - } - dev_info(&client->dev, "Firmware version: %d\n", fw_ver); - - input_dev->name = "MELFAS MCS Touchkey"; - input_dev->id.bustype = BUS_I2C; - input_dev->evbit[0] = BIT_MASK(EV_KEY); - if (!pdata->no_autorepeat) - input_dev->evbit[0] |= BIT_MASK(EV_REP); - input_dev->keycode = data->keycodes; - input_dev->keycodesize = sizeof(data->keycodes[0]); - input_dev->keycodemax = pdata->key_maxval + 1; - - for (i = 0; i < pdata->keymap_size; i++) { - unsigned int val = MCS_KEY_VAL(pdata->keymap[i]); - unsigned int code = MCS_KEY_CODE(pdata->keymap[i]); - - data->keycodes[val] = code; - __set_bit(code, input_dev->keybit); - } - - input_set_capability(input_dev, EV_MSC, MSC_SCAN); - input_set_drvdata(input_dev, data); - - if (pdata->cfg_pin) - pdata->cfg_pin(); - - if (pdata->poweron) { - data->poweron = pdata->poweron; - data->poweron(true); - - error = devm_add_action_or_reset(&client->dev, - mcs_touchkey_poweroff, data); - if (error) - return error; - } - - error = devm_request_threaded_irq(&client->dev, client->irq, - NULL, mcs_touchkey_interrupt, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - client->dev.driver->name, data); - if (error) { - dev_err(&client->dev, "Failed to register interrupt\n"); - return error; - } - - error = input_register_device(input_dev); - if (error) - return error; - - i2c_set_clientdata(client, data); - return 0; -} - -static void mcs_touchkey_shutdown(struct i2c_client *client) -{ - struct mcs_touchkey_data *data = i2c_get_clientdata(client); - - if (data->poweron) - data->poweron(false); -} - -static int mcs_touchkey_suspend(struct device *dev) -{ - struct mcs_touchkey_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - - /* Disable the work */ - disable_irq(client->irq); - - /* Finally turn off the power */ - if (data->poweron) - data->poweron(false); - - return 0; -} - -static int mcs_touchkey_resume(struct device *dev) -{ - struct mcs_touchkey_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - - /* Enable the device first */ - if (data->poweron) - data->poweron(true); - - /* Enable irq again */ - enable_irq(client->irq); - - return 0; -} - -static DEFINE_SIMPLE_DEV_PM_OPS(mcs_touchkey_pm_ops, - mcs_touchkey_suspend, mcs_touchkey_resume); - -static const struct i2c_device_id mcs_touchkey_id[] = { - { "mcs5000_touchkey", MCS5000_TOUCHKEY }, - { "mcs5080_touchkey", MCS5080_TOUCHKEY }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mcs_touchkey_id); - -static struct i2c_driver mcs_touchkey_driver = { - .driver = { - .name = "mcs_touchkey", - .pm = pm_sleep_ptr(&mcs_touchkey_pm_ops), - }, - .probe = mcs_touchkey_probe, - .shutdown = mcs_touchkey_shutdown, - .id_table = mcs_touchkey_id, -}; - -module_i2c_driver(mcs_touchkey_driver); - -/* Module information */ -MODULE_AUTHOR("Joonyoung Shim "); -MODULE_AUTHOR("HeungJun Kim "); -MODULE_DESCRIPTION("Touchkey driver for MELFAS MCS5000/5080 controller"); -MODULE_LICENSE("GPL"); diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c index 19f69d167fbd83..e5eb025c5e9937 100644 --- a/drivers/input/keyboard/mt6779-keypad.c +++ b/drivers/input/keyboard/mt6779-keypad.c @@ -92,11 +92,6 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static void mt6779_keypad_clk_disable(void *data) -{ - clk_disable_unprepare(data); -} - static void mt6779_keypad_calc_row_col_single(unsigned int key, unsigned int *row, unsigned int *col) @@ -213,21 +208,10 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev) regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL, MTK_KPD_SEL_COLMASK(keypad->n_cols)); - keypad->clk = devm_clk_get(&pdev->dev, "kpd"); + keypad->clk = devm_clk_get_enabled(&pdev->dev, "kpd"); if (IS_ERR(keypad->clk)) return PTR_ERR(keypad->clk); - error = clk_prepare_enable(keypad->clk); - if (error) { - dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n"); - return error; - } - - error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable, - keypad->clk); - if (error) - return error; - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -260,6 +244,7 @@ static const struct of_device_id mt6779_keypad_of_match[] = { { .compatible = "mediatek,mt6873-keypad" }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, mt6779_keypad_of_match); static struct platform_driver mt6779_keypad_pdrv = { .probe = mt6779_keypad_pdrv_probe, diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c deleted file mode 100644 index b3ccc97f61e1fb..00000000000000 --- a/drivers/input/keyboard/nomadik-ske-keypad.c +++ /dev/null @@ -1,378 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson SA 2010 - * - * Author: Naveen Kumar G for ST-Ericsson - * Author: Sundar Iyer for ST-Ericsson - * - * Keypad controller driver for the SKE (Scroll Key Encoder) module used in - * the Nomadik 8815 and Ux500 platforms. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* SKE_CR bits */ -#define SKE_KPMLT (0x1 << 6) -#define SKE_KPCN (0x7 << 3) -#define SKE_KPASEN (0x1 << 2) -#define SKE_KPASON (0x1 << 7) - -/* SKE_IMSC bits */ -#define SKE_KPIMA (0x1 << 2) - -/* SKE_ICR bits */ -#define SKE_KPICS (0x1 << 3) -#define SKE_KPICA (0x1 << 2) - -/* SKE_RIS bits */ -#define SKE_KPRISA (0x1 << 2) - -#define SKE_KEYPAD_ROW_SHIFT 3 -#define SKE_KPD_NUM_ROWS 8 -#define SKE_KPD_NUM_COLS 8 - -/* keypad auto scan registers */ -#define SKE_ASR0 0x20 -#define SKE_ASR1 0x24 -#define SKE_ASR2 0x28 -#define SKE_ASR3 0x2C - -#define SKE_NUM_ASRX_REGISTERS (4) -#define KEY_PRESSED_DELAY 10 - -/** - * struct ske_keypad - data structure used by keypad driver - * @irq: irq no - * @reg_base: ske registers base address - * @input: pointer to input device object - * @board: keypad platform device - * @keymap: matrix scan code table for keycodes - * @clk: clock structure pointer - * @pclk: clock structure pointer - * @ske_keypad_lock: spinlock protecting the keypad read/writes - */ -struct ske_keypad { - int irq; - void __iomem *reg_base; - struct input_dev *input; - const struct ske_keypad_platform_data *board; - unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS]; - struct clk *clk; - struct clk *pclk; - spinlock_t ske_keypad_lock; -}; - -static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr, - u8 mask, u8 data) -{ - u32 ret; - - spin_lock(&keypad->ske_keypad_lock); - - ret = readl(keypad->reg_base + addr); - ret &= ~mask; - ret |= data; - writel(ret, keypad->reg_base + addr); - - spin_unlock(&keypad->ske_keypad_lock); -} - -/* - * ske_keypad_chip_init: init keypad controller configuration - * - * Enable Multi key press detection, auto scan mode - */ -static int __init ske_keypad_chip_init(struct ske_keypad *keypad) -{ - u32 value; - int timeout = keypad->board->debounce_ms; - - /* check SKE_RIS to be 0 */ - while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--) - cpu_relax(); - - if (timeout == -1) - return -EINVAL; - - /* - * set debounce value - * keypad dbounce is configured in DBCR[15:8] - * dbounce value in steps of 32/32.768 ms - */ - spin_lock(&keypad->ske_keypad_lock); - value = readl(keypad->reg_base + SKE_DBCR); - value = value & 0xff; - value |= ((keypad->board->debounce_ms * 32000)/32768) << 8; - writel(value, keypad->reg_base + SKE_DBCR); - spin_unlock(&keypad->ske_keypad_lock); - - /* enable multi key detection */ - ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT); - - /* - * set up the number of columns - * KPCN[5:3] defines no. of keypad columns to be auto scanned - */ - value = (keypad->board->kcol - 1) << 3; - ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value); - - /* clear keypad interrupt for auto(and pending SW) scans */ - ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS); - - /* un-mask keypad interrupts */ - ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); - - /* enable automatic scan */ - ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN); - - return 0; -} - -static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col) -{ - int row = 0, code, pos; - struct input_dev *input = keypad->input; - u32 ske_ris; - int key_pressed; - int num_of_rows; - - /* find out the row */ - num_of_rows = hweight8(status); - do { - pos = __ffs(status); - row = pos; - status &= ~(1 << pos); - - code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT); - ske_ris = readl(keypad->reg_base + SKE_RIS); - key_pressed = ske_ris & SKE_KPRISA; - - input_event(input, EV_MSC, MSC_SCAN, code); - input_report_key(input, keypad->keymap[code], key_pressed); - input_sync(input); - num_of_rows--; - } while (num_of_rows); -} - -static void ske_keypad_read_data(struct ske_keypad *keypad) -{ - u8 status; - int col = 0; - int ske_asr, i; - - /* - * Read the auto scan registers - * - * Each SKE_ASRx (x=0 to x=3) contains two row values. - * lower byte contains row value for column 2*x, - * upper byte contains row value for column 2*x + 1 - */ - for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) { - ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i)); - if (!ske_asr) - continue; - - /* now that ASRx is zero, find out the coloumn x and row y */ - status = ske_asr & 0xff; - if (status) { - col = i * 2; - ske_keypad_report(keypad, status, col); - } - status = (ske_asr & 0xff00) >> 8; - if (status) { - col = (i * 2) + 1; - ske_keypad_report(keypad, status, col); - } - } -} - -static irqreturn_t ske_keypad_irq(int irq, void *dev_id) -{ - struct ske_keypad *keypad = dev_id; - int timeout = keypad->board->debounce_ms; - - /* disable auto scan interrupt; mask the interrupt generated */ - ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); - ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA); - - while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --timeout) - cpu_relax(); - - /* SKEx registers are stable and can be read */ - ske_keypad_read_data(keypad); - - /* wait until raw interrupt is clear */ - while ((readl(keypad->reg_base + SKE_RIS)) && --timeout) - msleep(KEY_PRESSED_DELAY); - - /* enable auto scan interrupts */ - ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); - - return IRQ_HANDLED; -} - -static void ske_keypad_board_exit(void *data) -{ - struct ske_keypad *keypad = data; - - keypad->board->exit(); -} - -static int __init ske_keypad_probe(struct platform_device *pdev) -{ - const struct ske_keypad_platform_data *plat = - dev_get_platdata(&pdev->dev); - struct device *dev = &pdev->dev; - struct ske_keypad *keypad; - struct input_dev *input; - int irq; - int error; - - if (!plat) { - dev_err(&pdev->dev, "invalid keypad platform data\n"); - return -EINVAL; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - keypad = devm_kzalloc(dev, sizeof(struct ske_keypad), - GFP_KERNEL); - input = devm_input_allocate_device(dev); - if (!keypad || !input) { - dev_err(&pdev->dev, "failed to allocate keypad memory\n"); - return -ENOMEM; - } - - keypad->irq = irq; - keypad->board = plat; - keypad->input = input; - spin_lock_init(&keypad->ske_keypad_lock); - - keypad->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(keypad->reg_base)) - return PTR_ERR(keypad->reg_base); - - keypad->pclk = devm_clk_get_enabled(dev, "apb_pclk"); - if (IS_ERR(keypad->pclk)) { - dev_err(&pdev->dev, "failed to get pclk\n"); - return PTR_ERR(keypad->pclk); - } - - keypad->clk = devm_clk_get_enabled(dev, NULL); - if (IS_ERR(keypad->clk)) { - dev_err(&pdev->dev, "failed to get clk\n"); - return PTR_ERR(keypad->clk); - } - - input->id.bustype = BUS_HOST; - input->name = "ux500-ske-keypad"; - input->dev.parent = &pdev->dev; - - error = matrix_keypad_build_keymap(plat->keymap_data, NULL, - SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS, - keypad->keymap, input); - if (error) { - dev_err(&pdev->dev, "Failed to build keymap\n"); - return error; - } - - input_set_capability(input, EV_MSC, MSC_SCAN); - if (!plat->no_autorepeat) - __set_bit(EV_REP, input->evbit); - - /* go through board initialization helpers */ - if (keypad->board->init) - keypad->board->init(); - - if (keypad->board->exit) { - error = devm_add_action_or_reset(dev, ske_keypad_board_exit, - keypad); - if (error) - return error; - } - - error = ske_keypad_chip_init(keypad); - if (error) { - dev_err(&pdev->dev, "unable to init keypad hardware\n"); - return error; - } - - error = devm_request_threaded_irq(dev, keypad->irq, - NULL, ske_keypad_irq, - IRQF_ONESHOT, "ske-keypad", keypad); - if (error) { - dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); - return error; - } - - error = input_register_device(input); - if (error) { - dev_err(&pdev->dev, - "unable to register input device: %d\n", error); - return error; - } - - if (plat->wakeup_enable) - device_init_wakeup(&pdev->dev, true); - - platform_set_drvdata(pdev, keypad); - - return 0; -} - -static int ske_keypad_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ske_keypad *keypad = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - - if (device_may_wakeup(dev)) - enable_irq_wake(irq); - else - ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); - - return 0; -} - -static int ske_keypad_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ske_keypad *keypad = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - - if (device_may_wakeup(dev)) - disable_irq_wake(irq); - else - ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); - - return 0; -} - -static DEFINE_SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops, - ske_keypad_suspend, ske_keypad_resume); - -static struct platform_driver ske_keypad_driver = { - .driver = { - .name = "nmk-ske-keypad", - .pm = pm_sleep_ptr(&ske_keypad_dev_pm_ops), - }, -}; - -module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Naveen Kumar / Sundar Iyer "); -MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver"); -MODULE_ALIAS("platform:nomadik-ske-keypad"); diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c index 5a2592e6293db2..bce8157d187166 100644 --- a/drivers/input/keyboard/qt1050.c +++ b/drivers/input/keyboard/qt1050.c @@ -346,35 +346,34 @@ static int qt1050_apply_fw_data(struct qt1050_priv *ts) static int qt1050_parse_fw(struct qt1050_priv *ts) { struct device *dev = &ts->client->dev; - struct fwnode_handle *child; int nbuttons; nbuttons = device_get_child_node_count(dev); if (nbuttons == 0 || nbuttons > QT1050_MAX_KEYS) return -ENODEV; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct qt1050_key button; /* Required properties */ if (fwnode_property_read_u32(child, "linux,code", &button.keycode)) { dev_err(dev, "Button without keycode\n"); - goto err; + return -EINVAL; } if (button.keycode >= KEY_MAX) { dev_err(dev, "Invalid keycode 0x%x\n", button.keycode); - goto err; + return -EINVAL; } if (fwnode_property_read_u32(child, "reg", &button.num)) { dev_err(dev, "Button without pad number\n"); - goto err; + return -EINVAL; } if (button.num < 0 || button.num > QT1050_MAX_KEYS - 1) - goto err; + return -EINVAL; ts->reg_keys |= BIT(button.num); @@ -424,10 +423,6 @@ static int qt1050_parse_fw(struct qt1050_priv *ts) } return 0; - -err: - fwnode_handle_put(child); - return -EINVAL; } static int qt1050_probe(struct i2c_client *client) diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index ad8660be0127c2..f7b5f1e25c803d 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -100,11 +100,6 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void imx_snvs_pwrkey_disable_clk(void *data) -{ - clk_disable_unprepare(data); -} - static void imx_snvs_pwrkey_act(void *pdata) { struct pwrkey_drv_data *pd = pdata; @@ -141,28 +136,12 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n"); } - clk = devm_clk_get_optional(&pdev->dev, NULL); + clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk); return PTR_ERR(clk); } - error = clk_prepare_enable(clk); - if (error) { - dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n", - ERR_PTR(error)); - return error; - } - - error = devm_add_action_or_reset(&pdev->dev, - imx_snvs_pwrkey_disable_clk, clk); - if (error) { - dev_err(&pdev->dev, - "Failed to register clock cleanup handler (%pe)\n", - ERR_PTR(error)); - return error; - } - pdata->wakeup = of_property_read_bool(np, "wakeup-source"); pdata->irq = platform_get_irq(pdev, 0); @@ -204,7 +183,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) error = devm_request_irq(&pdev->dev, pdata->irq, imx_snvs_pwrkey_interrupt, 0, pdev->name, pdev); - if (error) { dev_err(&pdev->dev, "interrupt not available.\n"); return error; diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c index 557d00a667cea5..1df4feb8ba0178 100644 --- a/drivers/input/keyboard/spear-keyboard.c +++ b/drivers/input/keyboard/spear-keyboard.c @@ -222,7 +222,7 @@ static int spear_kbd_probe(struct platform_device *pdev) if (IS_ERR(kbd->io_base)) return PTR_ERR(kbd->io_base); - kbd->clk = devm_clk_get(&pdev->dev, NULL); + kbd->clk = devm_clk_get_prepared(&pdev->dev, NULL); if (IS_ERR(kbd->clk)) return PTR_ERR(kbd->clk); @@ -255,14 +255,9 @@ static int spear_kbd_probe(struct platform_device *pdev) return error; } - error = clk_prepare(kbd->clk); - if (error) - return error; - error = input_register_device(input_dev); if (error) { dev_err(&pdev->dev, "Unable to register keyboard device\n"); - clk_unprepare(kbd->clk); return error; } @@ -272,14 +267,6 @@ static int spear_kbd_probe(struct platform_device *pdev) return 0; } -static void spear_kbd_remove(struct platform_device *pdev) -{ - struct spear_kbd *kbd = platform_get_drvdata(pdev); - - input_unregister_device(kbd->input); - clk_unprepare(kbd->clk); -} - static int spear_kbd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -373,7 +360,6 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table); static struct platform_driver spear_kbd_driver = { .probe = spear_kbd_probe, - .remove_new = spear_kbd_remove, .driver = { .name = "keyboard", .pm = pm_sleep_ptr(&spear_kbd_pm_ops), diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index b0d86621c60a33..11988cffdfae8c 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c @@ -325,7 +325,6 @@ tc3589x_keypad_of_probe(struct device *dev) struct tc3589x_keypad_platform_data *plat; u32 cols, rows; u32 debounce_ms; - int proplen; if (!np) return ERR_PTR(-ENODEV); @@ -346,7 +345,7 @@ tc3589x_keypad_of_probe(struct device *dev) return ERR_PTR(-EINVAL); } - if (!of_get_property(np, "linux,keymap", &proplen)) { + if (!of_property_present(np, "linux,keymap")) { dev_err(dev, "property linux,keymap not found\n"); return ERR_PTR(-ENOENT); } diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index a1765ed8c825c1..6776dd94ce76ab 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -241,11 +241,10 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable) static void tegra_kbc_keypress_timer(struct timer_list *t) { struct tegra_kbc *kbc = from_timer(kbc, t, timer); - unsigned long flags; u32 val; unsigned int i; - spin_lock_irqsave(&kbc->lock, flags); + guard(spinlock_irqsave)(&kbc->lock); val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf; if (val) { @@ -270,17 +269,14 @@ static void tegra_kbc_keypress_timer(struct timer_list *t) /* All keys are released so enable the keypress interrupt */ tegra_kbc_set_fifo_interrupt(kbc, true); } - - spin_unlock_irqrestore(&kbc->lock, flags); } static irqreturn_t tegra_kbc_isr(int irq, void *args) { struct tegra_kbc *kbc = args; - unsigned long flags; u32 val; - spin_lock_irqsave(&kbc->lock, flags); + guard(spinlock_irqsave)(&kbc->lock); /* * Quickly bail out & reenable interrupts if the fifo threshold @@ -301,8 +297,6 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args) kbc->keypress_caused_wake = true; } - spin_unlock_irqrestore(&kbc->lock, flags); - return IRQ_HANDLED; } @@ -413,14 +407,13 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) static void tegra_kbc_stop(struct tegra_kbc *kbc) { - unsigned long flags; u32 val; - spin_lock_irqsave(&kbc->lock, flags); - val = readl(kbc->mmio + KBC_CONTROL_0); - val &= ~1; - writel(val, kbc->mmio + KBC_CONTROL_0); - spin_unlock_irqrestore(&kbc->lock, flags); + scoped_guard(spinlock_irqsave, &kbc->lock) { + val = readl(kbc->mmio + KBC_CONTROL_0); + val &= ~1; + writel(val, kbc->mmio + KBC_CONTROL_0); + } disable_irq(kbc->irq); del_timer_sync(&kbc->timer); @@ -491,12 +484,10 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc) struct device_node *np = kbc->dev->of_node; u32 prop; int i; - u32 num_rows = 0; - u32 num_cols = 0; + int num_rows; + int num_cols; u32 cols_cfg[KBC_MAX_GPIO]; u32 rows_cfg[KBC_MAX_GPIO]; - int proplen; - int ret; if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop)) kbc->debounce_cnt = prop; @@ -510,56 +501,23 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc) of_property_read_bool(np, "nvidia,wakeup-source")) /* legacy */ kbc->wakeup = true; - if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) { - dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n"); - return -ENOENT; - } - num_rows = proplen / sizeof(u32); - - if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) { - dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n"); - return -ENOENT; - } - num_cols = proplen / sizeof(u32); - - if (num_rows > kbc->hw_support->max_rows) { - dev_err(kbc->dev, - "Number of rows is more than supported by hardware\n"); - return -EINVAL; - } - - if (num_cols > kbc->hw_support->max_columns) { - dev_err(kbc->dev, - "Number of cols is more than supported by hardware\n"); - return -EINVAL; - } - - if (!of_get_property(np, "linux,keymap", &proplen)) { + if (!of_property_present(np, "linux,keymap")) { dev_err(kbc->dev, "property linux,keymap not found\n"); return -ENOENT; } - if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) { - dev_err(kbc->dev, - "keypad rows/columns not properly specified\n"); - return -EINVAL; - } - /* Set all pins as non-configured */ for (i = 0; i < kbc->num_rows_and_columns; i++) kbc->pin_cfg[i].type = PIN_CFG_IGNORE; - ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins", - rows_cfg, num_rows); - if (ret < 0) { + num_rows = of_property_read_variable_u32_array(np, "nvidia,kbc-row-pins", + rows_cfg, 1, KBC_MAX_GPIO); + if (num_rows < 0) { dev_err(kbc->dev, "Rows configurations are not proper\n"); - return -EINVAL; - } - - ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins", - cols_cfg, num_cols); - if (ret < 0) { - dev_err(kbc->dev, "Cols configurations are not proper\n"); + return num_rows; + } else if (num_rows > kbc->hw_support->max_rows) { + dev_err(kbc->dev, + "Number of rows is more than supported by hardware\n"); return -EINVAL; } @@ -568,11 +526,28 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc) kbc->pin_cfg[rows_cfg[i]].num = i; } + num_cols = of_property_read_variable_u32_array(np, "nvidia,kbc-col-pins", + cols_cfg, 1, KBC_MAX_GPIO); + if (num_cols < 0) { + dev_err(kbc->dev, "Cols configurations are not proper\n"); + return num_cols; + } else if (num_cols > kbc->hw_support->max_columns) { + dev_err(kbc->dev, + "Number of cols is more than supported by hardware\n"); + return -EINVAL; + } + for (i = 0; i < num_cols; i++) { kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL; kbc->pin_cfg[cols_cfg[i]].num = i; } + if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) { + dev_err(kbc->dev, + "keypad rows/columns not properly specified\n"); + return -EINVAL; + } + return 0; } @@ -724,7 +699,8 @@ static int tegra_kbc_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct tegra_kbc *kbc = platform_get_drvdata(pdev); - mutex_lock(&kbc->idev->mutex); + guard(mutex)(&kbc->idev->mutex); + if (device_may_wakeup(&pdev->dev)) { disable_irq(kbc->irq); del_timer_sync(&kbc->timer); @@ -747,11 +723,9 @@ static int tegra_kbc_suspend(struct device *dev) tegra_kbc_set_keypress_interrupt(kbc, true); enable_irq(kbc->irq); enable_irq_wake(kbc->irq); - } else { - if (input_device_enabled(kbc->idev)) - tegra_kbc_stop(kbc); + } else if (input_device_enabled(kbc->idev)) { + tegra_kbc_stop(kbc); } - mutex_unlock(&kbc->idev->mutex); return 0; } @@ -760,9 +734,10 @@ static int tegra_kbc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tegra_kbc *kbc = platform_get_drvdata(pdev); - int err = 0; + int err; + + guard(mutex)(&kbc->idev->mutex); - mutex_lock(&kbc->idev->mutex); if (device_may_wakeup(&pdev->dev)) { disable_irq_wake(kbc->irq); tegra_kbc_setup_wakekeys(kbc, false); @@ -787,13 +762,13 @@ static int tegra_kbc_resume(struct device *dev) input_report_key(kbc->idev, kbc->wakeup_key, 0); input_sync(kbc->idev); } - } else { - if (input_device_enabled(kbc->idev)) - err = tegra_kbc_start(kbc); + } else if (input_device_enabled(kbc->idev)) { + err = tegra_kbc_start(kbc); + if (err) + return err; } - mutex_unlock(&kbc->idev->mutex); - return err; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c index 5d93043bad8ecd..3bea3575a0a941 100644 --- a/drivers/input/matrix-keymap.c +++ b/drivers/input/matrix-keymap.c @@ -73,10 +73,9 @@ static int matrix_keypad_parse_keymap(const char *propname, struct device *dev = input_dev->dev.parent; unsigned int row_shift = get_count_order(cols); unsigned int max_keys = rows << row_shift; - u32 *keys; int i; int size; - int retval; + int error; if (!propname) propname = "linux,keymap"; @@ -94,30 +93,24 @@ static int matrix_keypad_parse_keymap(const char *propname, return -EINVAL; } - keys = kmalloc_array(size, sizeof(u32), GFP_KERNEL); + u32 *keys __free(kfree) = kmalloc_array(size, sizeof(*keys), GFP_KERNEL); if (!keys) return -ENOMEM; - retval = device_property_read_u32_array(dev, propname, keys, size); - if (retval) { + error = device_property_read_u32_array(dev, propname, keys, size); + if (error) { dev_err(dev, "failed to read %s property: %d\n", - propname, retval); - goto out; + propname, error); + return error; } for (i = 0; i < size; i++) { if (!matrix_keypad_map_key(input_dev, rows, cols, - row_shift, keys[i])) { - retval = -EINVAL; - goto out; - } + row_shift, keys[i])) + return -EINVAL; } - retval = 0; - -out: - kfree(keys); - return retval; + return 0; } /** diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index c086dadb45e3a7..4215f9b9c2b07a 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define IMS_PCU_KEYMAP_LEN 32 @@ -1067,7 +1067,7 @@ static ssize_t ims_pcu_attribute_store(struct device *dev, if (data_len > attr->field_length) return -EINVAL; - scoped_cond_guard(mutex, return -EINTR, &pcu->cmd_mutex) { + scoped_cond_guard(mutex_intr, return -EINTR, &pcu->cmd_mutex) { memset(field, 0, attr->field_length); memcpy(field, buf, data_len); diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c index cd14ff9f57cf23..843f8a3f3410d4 100644 --- a/drivers/input/misc/iqs269a.c +++ b/drivers/input/misc/iqs269a.c @@ -811,7 +811,6 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) { struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg; struct i2c_client *client = iqs269->client; - struct fwnode_handle *ch_node; u16 general, misc_a, misc_b; unsigned int val; int error; @@ -1049,12 +1048,10 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS); - device_for_each_child_node(&client->dev, ch_node) { + device_for_each_child_node_scoped(&client->dev, ch_node) { error = iqs269_parse_chan(iqs269, ch_node); - if (error) { - fwnode_handle_put(ch_node); + if (error) return error; - } } /* diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index 9ca5a743f19feb..be80a31de9f8f4 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #define IQS7222_PROD_NUM 0x00 #define IQS7222_PROD_NUM_A 840 diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c index 1d99206dd3a8b4..eb4173f9c82044 100644 --- a/drivers/input/misc/nxp-bbnsm-pwrkey.c +++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c @@ -38,6 +38,7 @@ struct bbnsm_pwrkey { int irq; int keycode; int keystate; /* 1:pressed */ + bool suspended; struct timer_list check_timer; struct input_dev *input; }; @@ -70,6 +71,7 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev); + struct input_dev *input = bbnsm->input; u32 event; regmap_read(bbnsm->regmap, BBNSM_EVENTS, &event); @@ -78,6 +80,18 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id) pm_wakeup_event(bbnsm->input->dev.parent, 0); + /* + * Directly report key event after resume to make sure key press + * event is never missed. + */ + if (bbnsm->suspended) { + bbnsm->keystate = 1; + input_event(input, EV_KEY, bbnsm->keycode, 1); + input_sync(input); + /* Fire at most once per suspend/resume cycle */ + bbnsm->suspended = false; + } + mod_timer(&bbnsm->check_timer, jiffies + msecs_to_jiffies(DEBOUNCE_TIME)); @@ -173,6 +187,29 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev) return 0; } +static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev); + + bbnsm->suspended = true; + + return 0; +} + +static int __maybe_unused bbnsm_pwrkey_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev); + + bbnsm->suspended = false; + + return 0; +} + +static SIMPLE_DEV_PM_OPS(bbnsm_pwrkey_pm_ops, bbnsm_pwrkey_suspend, + bbnsm_pwrkey_resume); + static const struct of_device_id bbnsm_pwrkey_ids[] = { { .compatible = "nxp,imx93-bbnsm-pwrkey" }, { /* sentinel */ } @@ -182,6 +219,7 @@ MODULE_DEVICE_TABLE(of, bbnsm_pwrkey_ids); static struct platform_driver bbnsm_pwrkey_driver = { .driver = { .name = "bbnsm_pwrkey", + .pm = &bbnsm_pwrkey_pm_ops, .of_match_table = bbnsm_pwrkey_ids, }, .probe = bbnsm_pwrkey_probe, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 445856c9127aaa..2c51ea9d01d777 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -1132,7 +1132,6 @@ static const struct file_operations uinput_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = uinput_compat_ioctl, #endif - .llseek = no_llseek, }; static struct miscdevice uinput_misc = { diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 5c4956678cd0ae..5a64557920fa59 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -990,8 +990,8 @@ static int __init copy_keymap(void) for (key = keymap; key->type != KE_END; key++) length++; - new_keymap = kmemdup(keymap, length * sizeof(struct key_entry), - GFP_KERNEL); + new_keymap = kmemdup_array(keymap, length, sizeof(struct key_entry), + GFP_KERNEL); if (!new_keymap) return -ENOMEM; @@ -1075,7 +1075,7 @@ static void wistron_led_init(struct device *parent) } if (leds_present & FE_MAIL_LED) { - /* bios_get_default_setting(MAIL) always retuns 0, so just turn the led off */ + /* bios_get_default_setting(MAIL) always returns 0, so just turn the led off */ wistron_mail_led.brightness = LED_OFF; if (led_classdev_register(parent, &wistron_mail_led)) leds_present &= ~FE_MAIL_LED; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d5ef5a112d6f03..4e37fc3f1a9e97 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1396,24 +1396,16 @@ static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse) static DEFINE_MUTEX(alps_mutex); -static void alps_register_bare_ps2_mouse(struct work_struct *work) +static int alps_do_register_bare_ps2_mouse(struct alps_data *priv) { - struct alps_data *priv = - container_of(work, struct alps_data, dev3_register_work.work); struct psmouse *psmouse = priv->psmouse; struct input_dev *dev3; - int error = 0; - - mutex_lock(&alps_mutex); - - if (priv->dev3) - goto out; + int error; dev3 = input_allocate_device(); if (!dev3) { psmouse_err(psmouse, "failed to allocate secondary device\n"); - error = -ENOMEM; - goto out; + return -ENOMEM; } snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s", @@ -1446,21 +1438,35 @@ static void alps_register_bare_ps2_mouse(struct work_struct *work) psmouse_err(psmouse, "failed to register secondary device: %d\n", error); - input_free_device(dev3); - goto out; + goto err_free_input; } priv->dev3 = dev3; + return 0; -out: - /* - * Save the error code so that we can detect that we - * already tried to create the device. - */ - if (error) - priv->dev3 = ERR_PTR(error); +err_free_input: + input_free_device(dev3); + return error; +} - mutex_unlock(&alps_mutex); +static void alps_register_bare_ps2_mouse(struct work_struct *work) +{ + struct alps_data *priv = container_of(work, struct alps_data, + dev3_register_work.work); + int error; + + guard(mutex)(&alps_mutex); + + if (!priv->dev3) { + error = alps_do_register_bare_ps2_mouse(priv); + if (error) { + /* + * Save the error code so that we can detect that we + * already tried to create the device. + */ + priv->dev3 = ERR_PTR(error); + } + } } static void alps_report_bare_ps2_packet(struct psmouse *psmouse, diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 10a03a56690588..dfdfb59cc8b597 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -834,13 +834,11 @@ static int bcm5974_open(struct input_dev *input) if (error) return error; - mutex_lock(&dev->pm_mutex); - - error = bcm5974_start_traffic(dev); - if (!error) - dev->opened = 1; - - mutex_unlock(&dev->pm_mutex); + scoped_guard(mutex, &dev->pm_mutex) { + error = bcm5974_start_traffic(dev); + if (!error) + dev->opened = 1; + } if (error) usb_autopm_put_interface(dev->intf); @@ -852,12 +850,10 @@ static void bcm5974_close(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); - mutex_lock(&dev->pm_mutex); - - bcm5974_pause_traffic(dev); - dev->opened = 0; - - mutex_unlock(&dev->pm_mutex); + scoped_guard(mutex, &dev->pm_mutex) { + bcm5974_pause_traffic(dev); + dev->opened = 0; + } usb_autopm_put_interface(dev->intf); } @@ -866,29 +862,24 @@ static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message) { struct bcm5974 *dev = usb_get_intfdata(iface); - mutex_lock(&dev->pm_mutex); + guard(mutex)(&dev->pm_mutex); if (dev->opened) bcm5974_pause_traffic(dev); - mutex_unlock(&dev->pm_mutex); - return 0; } static int bcm5974_resume(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); - int error = 0; - mutex_lock(&dev->pm_mutex); + guard(mutex)(&dev->pm_mutex); if (dev->opened) - error = bcm5974_start_traffic(dev); + return bcm5974_start_traffic(dev); - mutex_unlock(&dev->pm_mutex); - - return error; + return 0; } static int bcm5974_probe(struct usb_interface *iface, diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 60c83bc71d84e6..fc3fb954523b7c 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include "cyapa.h" diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index 2e6bcb07257ed7..3b4439f1063563 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include "cyapa.h" diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index 4ffe08fee10c8a..570c06dcef7898 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include "cyapa.h" diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index ce96513b34f64f..7521981274bd8c 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include "elan_i2c.h" diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 13dc097eb6c652..15cf4463b64ea4 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include "elan_i2c.h" diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index b4723ea395eb9f..79ad98cc1e799c 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "psmouse.h" #include "elantech.h" #include "elan_i2c.h" diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index cc1d4b424640ea..47be64284b25ed 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include "rmi_driver.h" #define RMI_PRODUCT_ID_LENGTH 10 diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c index 7e97944f761653..8246fe77114bbd 100644 --- a/drivers/input/rmi4/rmi_f12.c +++ b/drivers/input/rmi4/rmi_f12.c @@ -24,6 +24,7 @@ enum rmi_f12_object_type { }; #define F12_DATA1_BYTES_PER_OBJ 8 +#define RMI_F12_QUERY_RESOLUTION 29 struct f12_data { struct rmi_2d_sensor sensor; @@ -73,6 +74,8 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12) int pitch_y = 0; int rx_receivers = 0; int tx_receivers = 0; + u16 query_dpm_addr = 0; + int dpm_resolution = 0; item = rmi_get_register_desc_item(&f12->control_reg_desc, 8); if (!item) { @@ -122,18 +125,38 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12) offset += 4; } - if (rmi_register_desc_has_subpacket(item, 3)) { - rx_receivers = buf[offset]; - tx_receivers = buf[offset + 1]; - offset += 2; - } + /* + * Use the Query DPM feature when the resolution query register + * exists. + */ + if (rmi_get_register_desc_item(&f12->query_reg_desc, + RMI_F12_QUERY_RESOLUTION)) { + offset = rmi_register_desc_calc_reg_offset(&f12->query_reg_desc, + RMI_F12_QUERY_RESOLUTION); + query_dpm_addr = fn->fd.query_base_addr + offset; + ret = rmi_read(fn->rmi_dev, query_dpm_addr, buf); + if (ret < 0) { + dev_err(&fn->dev, "Failed to read DPM value: %d\n", ret); + return -ENODEV; + } + dpm_resolution = buf[0]; - /* Skip over sensor flags */ - if (rmi_register_desc_has_subpacket(item, 4)) - offset += 1; + sensor->x_mm = sensor->max_x / dpm_resolution; + sensor->y_mm = sensor->max_y / dpm_resolution; + } else { + if (rmi_register_desc_has_subpacket(item, 3)) { + rx_receivers = buf[offset]; + tx_receivers = buf[offset + 1]; + offset += 2; + } - sensor->x_mm = (pitch_x * rx_receivers) >> 12; - sensor->y_mm = (pitch_y * tx_receivers) >> 12; + /* Skip over sensor flags */ + if (rmi_register_desc_has_subpacket(item, 4)) + offset += 1; + + sensor->x_mm = (pitch_x * rx_receivers) >> 12; + sensor->y_mm = (pitch_y * tx_receivers) >> 12; + } rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__, sensor->x_mm, sensor->y_mm); diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index 3b3ac71e53dc58..e2468bc04a5cb3 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include "rmi_driver.h" diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c index 886557b01ebabf..fd49acc0207105 100644 --- a/drivers/input/rmi4/rmi_f34v7.c +++ b/drivers/input/rmi4/rmi_f34v7.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "rmi_driver.h" #include "rmi_f34.h" diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index bad238f69a7afd..34d1f07ea4c304 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1120,6 +1120,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, + /* + * Some TongFang barebones have touchpad and/or keyboard issues after + * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of + * them have an external PS/2 port so this can safely be set for all of + * them. + * TongFang barebones come with board_vendor and/or system_vendor set to + * a different value for each individual reseller. The only somewhat + * universal way to identify them is by board_name. + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index 0c8b390b8b4f8d..3a431395c4646f 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c @@ -429,16 +429,14 @@ static int ps2_gpio_probe(struct platform_device *pdev) } error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq, - IRQF_NO_THREAD, DRIVER_NAME, drvdata); + IRQF_NO_THREAD | IRQF_NO_AUTOEN, DRIVER_NAME, + drvdata); if (error) { dev_err(dev, "failed to request irq %d: %d\n", drvdata->irq, error); goto err_free_serio; } - /* Keep irq disabled until serio->open is called. */ - disable_irq(drvdata->irq); - serio->id.type = SERIO_8042; serio->open = ps2_gpio_open; serio->close = ps2_gpio_close; diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c index a88e2eee55c35c..1ab12b247f9878 100644 --- a/drivers/input/serio/userio.c +++ b/drivers/input/serio/userio.c @@ -267,7 +267,6 @@ static const struct file_operations userio_fops = { .read = userio_char_read, .write = userio_char_write, .poll = userio_char_poll, - .llseek = no_llseek, }; static struct miscdevice userio_misc = { diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 2d176fbab25119..2b3fbb0455d5cb 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -63,7 +63,7 @@ #include #include #include -#include +#include /* * Aiptek status packet: diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 38d36d25f6f465..794caa102909cb 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include /* * Pressure-threshold modules param code from Alex Perry diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index c821fe3ee794e3..1ac26fc2e3eb94 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -254,36 +254,6 @@ config TOUCHSCREEN_CYTTSP_SPI To compile this driver as a module, choose M here: the module will be called cyttsp_spi. -config TOUCHSCREEN_CYTTSP4_CORE - tristate "Cypress TrueTouch Gen4 Touchscreen Driver" - help - Core driver for Cypress TrueTouch(tm) Standard Product - Generation4 touchscreen controllers. - - Say Y here if you have a Cypress Gen4 touchscreen. - - If unsure, say N. - - To compile this driver as a module, choose M here. - -config TOUCHSCREEN_CYTTSP4_I2C - tristate "support I2C bus connection" - depends on TOUCHSCREEN_CYTTSP4_CORE && I2C - help - Say Y here if the touchscreen is connected via I2C bus. - - To compile this driver as a module, choose M here: the - module will be called cyttsp4_i2c. - -config TOUCHSCREEN_CYTTSP4_SPI - tristate "support SPI bus connection" - depends on TOUCHSCREEN_CYTTSP4_CORE && SPI_MASTER - help - Say Y here if the touchscreen is connected via SPI bus. - - To compile this driver as a module, choose M here: the - module will be called cyttsp4_spi. - config TOUCHSCREEN_CYTTSP5 tristate "Cypress TrueTouch Gen5 Touchscreen Driver" depends on I2C @@ -626,18 +596,6 @@ config TOUCHSCREEN_MAX11801 To compile this driver as a module, choose M here: the module will be called max11801_ts. -config TOUCHSCREEN_MCS5000 - tristate "MELFAS MCS-5000 touchscreen" - depends on I2C - help - Say Y here if you have the MELFAS MCS-5000 touchscreen controller - chip in your system. - - If unsure, say N. - - To compile this driver as a module, choose M here: the - module will be called mcs5000_ts. - config TOUCHSCREEN_MMS114 tristate "MELFAS MMS114 touchscreen" depends on I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index a81cb5aa21a5b9..82bc837ca01e2e 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -25,11 +25,8 @@ obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o -obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o +obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o -obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_CORE) += cyttsp4_core.o -obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_I2C) += cyttsp4_i2c.o cyttsp_i2c_common.o -obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_SPI) += cyttsp4_spi.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP5) += cyttsp5.o obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o @@ -63,7 +60,6 @@ obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o obj-$(CONFIG_TOUCHSCREEN_MXS_LRADC) += mxs-lradc-ts.o obj-$(CONFIG_TOUCHSCREEN_MX25) += fsl-imx25-tcq.o obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o -obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f89c0dd15d8b91..607f18af70104d 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include /* * This code has been heavily tested on a Nokia 770, and lightly diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index cfc92157701fe1..3ddabc5a2c999b 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/input/touchscreen/chipone_icn8505.c b/drivers/input/touchscreen/chipone_icn8505.c index c1b4fc28fa8d42..cde0e478950362 100644 --- a/drivers/input/touchscreen/chipone_icn8505.c +++ b/drivers/input/touchscreen/chipone_icn8505.c @@ -8,7 +8,7 @@ * Hans de Goede */ -#include +#include #include #include #include diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c index aa829725ded7f5..98d5b2ba63fb99 100644 --- a/drivers/input/touchscreen/colibri-vf50-ts.c +++ b/drivers/input/touchscreen/colibri-vf50-ts.c @@ -239,14 +239,10 @@ static void vf50_ts_close(struct input_dev *dev_input) static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d, const char *con_id, enum gpiod_flags flags) { - int error; - *gpio_d = devm_gpiod_get(dev, con_id, flags); - if (IS_ERR(*gpio_d)) { - error = PTR_ERR(*gpio_d); - dev_err(dev, "Could not get gpio_%s %d\n", con_id, error); - return error; - } + if (IS_ERR(*gpio_d)) + return dev_err_probe(dev, PTR_ERR(*gpio_d), + "Could not get gpio_%s\n", con_id); return 0; } diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c index 567c9dcaac91ee..2d4b6e34320303 100644 --- a/drivers/input/touchscreen/cy8ctma140.c +++ b/drivers/input/touchscreen/cy8ctma140.c @@ -16,7 +16,7 @@ * same. */ -#include +#include #include #include #include diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c deleted file mode 100644 index 9dc25eb2be445f..00000000000000 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ /dev/null @@ -1,2174 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cyttsp4_core.c - * Cypress TrueTouch(TM) Standard Product V4 Core driver module. - * For use with Cypress Txx4xx parts. - * Supported parts include: - * TMA4XX - * TMA1036 - * - * Copyright (C) 2012 Cypress Semiconductor - * - * Contact Cypress Semiconductor at www.cypress.com - */ - -#include "cyttsp4_core.h" -#include -#include -#include -#include -#include -#include -#include - -/* Timeout in ms. */ -#define CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT 500 -#define CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT 5000 -#define CY_CORE_MODE_CHANGE_TIMEOUT 1000 -#define CY_CORE_RESET_AND_WAIT_TIMEOUT 500 -#define CY_CORE_WAKEUP_TIMEOUT 500 - -#define CY_CORE_STARTUP_RETRY_COUNT 3 - -static const char * const cyttsp4_tch_abs_string[] = { - [CY_TCH_X] = "X", - [CY_TCH_Y] = "Y", - [CY_TCH_P] = "P", - [CY_TCH_T] = "T", - [CY_TCH_E] = "E", - [CY_TCH_O] = "O", - [CY_TCH_W] = "W", - [CY_TCH_MAJ] = "MAJ", - [CY_TCH_MIN] = "MIN", - [CY_TCH_OR] = "OR", - [CY_TCH_NUM_ABS] = "INVALID" -}; - -static const u8 ldr_exit[] = { - 0xFF, 0x01, 0x3B, 0x00, 0x00, 0x4F, 0x6D, 0x17 -}; - -static const u8 ldr_err_app[] = { - 0x01, 0x02, 0x00, 0x00, 0x55, 0xDD, 0x17 -}; - -static inline size_t merge_bytes(u8 high, u8 low) -{ - return (high << 8) + low; -} - -#ifdef VERBOSE_DEBUG -static void cyttsp4_pr_buf(struct device *dev, u8 *pr_buf, u8 *dptr, int size, - const char *data_name) -{ - int i, k; - const char fmt[] = "%02X "; - int max; - - if (!size) - return; - - max = (CY_MAX_PRBUF_SIZE - 1) - sizeof(CY_PR_TRUNCATED); - - pr_buf[0] = 0; - for (i = k = 0; i < size && k < max; i++, k += 3) - scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, dptr[i]); - - dev_vdbg(dev, "%s: %s[0..%d]=%s%s\n", __func__, data_name, size - 1, - pr_buf, size <= max ? "" : CY_PR_TRUNCATED); -} -#else -#define cyttsp4_pr_buf(dev, pr_buf, dptr, size, data_name) do { } while (0) -#endif - -static int cyttsp4_load_status_regs(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - struct device *dev = cd->dev; - int rc; - - rc = cyttsp4_adap_read(cd, CY_REG_BASE, si->si_ofs.mode_size, - si->xy_mode); - if (rc < 0) - dev_err(dev, "%s: fail read mode regs r=%d\n", - __func__, rc); - else - cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_mode, - si->si_ofs.mode_size, "xy_mode"); - - return rc; -} - -static int cyttsp4_handshake(struct cyttsp4 *cd, u8 mode) -{ - u8 cmd = mode ^ CY_HST_TOGGLE; - int rc; - - /* - * Mode change issued, handshaking now will cause endless mode change - * requests, for sync mode modechange will do same with handshake - * */ - if (mode & CY_HST_MODE_CHANGE) - return 0; - - rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd); - if (rc < 0) - dev_err(cd->dev, "%s: bus write fail on handshake (ret=%d)\n", - __func__, rc); - - return rc; -} - -static int cyttsp4_hw_soft_reset(struct cyttsp4 *cd) -{ - u8 cmd = CY_HST_RESET; - int rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd); - if (rc < 0) { - dev_err(cd->dev, "%s: FAILED to execute SOFT reset\n", - __func__); - return rc; - } - return 0; -} - -static int cyttsp4_hw_hard_reset(struct cyttsp4 *cd) -{ - if (cd->cpdata->xres) { - cd->cpdata->xres(cd->cpdata, cd->dev); - dev_dbg(cd->dev, "%s: execute HARD reset\n", __func__); - return 0; - } - dev_err(cd->dev, "%s: FAILED to execute HARD reset\n", __func__); - return -ENOSYS; -} - -static int cyttsp4_hw_reset(struct cyttsp4 *cd) -{ - int rc = cyttsp4_hw_hard_reset(cd); - if (rc == -ENOSYS) - rc = cyttsp4_hw_soft_reset(cd); - return rc; -} - -/* - * Gets number of bits for a touch filed as parameter, - * sets maximum value for field which is used as bit mask - * and returns number of bytes required for that field - */ -static int cyttsp4_bits_2_bytes(unsigned int nbits, size_t *max) -{ - *max = 1UL << nbits; - return (nbits + 7) / 8; -} - -static int cyttsp4_si_data_offsets(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - int rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(si->si_data), - &si->si_data); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read sysinfo data offsets r=%d\n", - __func__, rc); - return rc; - } - - /* Print sysinfo data offsets */ - cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)&si->si_data, - sizeof(si->si_data), "sysinfo_data_offsets"); - - /* convert sysinfo data offset bytes into integers */ - - si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh, - si->si_data.map_szl); - si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh, - si->si_data.map_szl); - si->si_ofs.cydata_ofs = merge_bytes(si->si_data.cydata_ofsh, - si->si_data.cydata_ofsl); - si->si_ofs.test_ofs = merge_bytes(si->si_data.test_ofsh, - si->si_data.test_ofsl); - si->si_ofs.pcfg_ofs = merge_bytes(si->si_data.pcfg_ofsh, - si->si_data.pcfg_ofsl); - si->si_ofs.opcfg_ofs = merge_bytes(si->si_data.opcfg_ofsh, - si->si_data.opcfg_ofsl); - si->si_ofs.ddata_ofs = merge_bytes(si->si_data.ddata_ofsh, - si->si_data.ddata_ofsl); - si->si_ofs.mdata_ofs = merge_bytes(si->si_data.mdata_ofsh, - si->si_data.mdata_ofsl); - return rc; -} - -static int cyttsp4_si_get_cydata(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - int read_offset; - int mfgid_sz, calc_mfgid_sz; - void *p; - int rc; - - if (si->si_ofs.test_ofs <= si->si_ofs.cydata_ofs) { - dev_err(cd->dev, - "%s: invalid offset test_ofs: %zu, cydata_ofs: %zu\n", - __func__, si->si_ofs.test_ofs, si->si_ofs.cydata_ofs); - return -EINVAL; - } - - si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs; - dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__, - si->si_ofs.cydata_size); - - p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: failed to allocate cydata memory\n", - __func__); - return -ENOMEM; - } - si->si_ptrs.cydata = p; - - read_offset = si->si_ofs.cydata_ofs; - - /* Read the CYDA registers up to MFGID field */ - rc = cyttsp4_adap_read(cd, read_offset, - offsetof(struct cyttsp4_cydata, mfgid_sz) - + sizeof(si->si_ptrs.cydata->mfgid_sz), - si->si_ptrs.cydata); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read cydata r=%d\n", - __func__, rc); - return rc; - } - - /* Check MFGID size */ - mfgid_sz = si->si_ptrs.cydata->mfgid_sz; - calc_mfgid_sz = si->si_ofs.cydata_size - sizeof(struct cyttsp4_cydata); - if (mfgid_sz != calc_mfgid_sz) { - dev_err(cd->dev, "%s: mismatch in MFGID size, reported:%d calculated:%d\n", - __func__, mfgid_sz, calc_mfgid_sz); - return -EINVAL; - } - - read_offset += offsetof(struct cyttsp4_cydata, mfgid_sz) - + sizeof(si->si_ptrs.cydata->mfgid_sz); - - /* Read the CYDA registers for MFGID field */ - rc = cyttsp4_adap_read(cd, read_offset, si->si_ptrs.cydata->mfgid_sz, - si->si_ptrs.cydata->mfg_id); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read cydata r=%d\n", - __func__, rc); - return rc; - } - - read_offset += si->si_ptrs.cydata->mfgid_sz; - - /* Read the rest of the CYDA registers */ - rc = cyttsp4_adap_read(cd, read_offset, - sizeof(struct cyttsp4_cydata) - - offsetof(struct cyttsp4_cydata, cyito_idh), - &si->si_ptrs.cydata->cyito_idh); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read cydata r=%d\n", - __func__, rc); - return rc; - } - - cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.cydata, - si->si_ofs.cydata_size, "sysinfo_cydata"); - return rc; -} - -static int cyttsp4_si_get_test_data(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - void *p; - int rc; - - if (si->si_ofs.pcfg_ofs <= si->si_ofs.test_ofs) { - dev_err(cd->dev, - "%s: invalid offset pcfg_ofs: %zu, test_ofs: %zu\n", - __func__, si->si_ofs.pcfg_ofs, si->si_ofs.test_ofs); - return -EINVAL; - } - - si->si_ofs.test_size = si->si_ofs.pcfg_ofs - si->si_ofs.test_ofs; - - p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: failed to allocate test memory\n", - __func__); - return -ENOMEM; - } - si->si_ptrs.test = p; - - rc = cyttsp4_adap_read(cd, si->si_ofs.test_ofs, si->si_ofs.test_size, - si->si_ptrs.test); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read test data r=%d\n", - __func__, rc); - return rc; - } - - cyttsp4_pr_buf(cd->dev, cd->pr_buf, - (u8 *)si->si_ptrs.test, si->si_ofs.test_size, - "sysinfo_test_data"); - if (si->si_ptrs.test->post_codel & - CY_POST_CODEL_WDG_RST) - dev_info(cd->dev, "%s: %s codel=%02X\n", - __func__, "Reset was a WATCHDOG RESET", - si->si_ptrs.test->post_codel); - - if (!(si->si_ptrs.test->post_codel & - CY_POST_CODEL_CFG_DATA_CRC_FAIL)) - dev_info(cd->dev, "%s: %s codel=%02X\n", __func__, - "Config Data CRC FAIL", - si->si_ptrs.test->post_codel); - - if (!(si->si_ptrs.test->post_codel & - CY_POST_CODEL_PANEL_TEST_FAIL)) - dev_info(cd->dev, "%s: %s codel=%02X\n", - __func__, "PANEL TEST FAIL", - si->si_ptrs.test->post_codel); - - dev_info(cd->dev, "%s: SCANNING is %s codel=%02X\n", - __func__, si->si_ptrs.test->post_codel & 0x08 ? - "ENABLED" : "DISABLED", - si->si_ptrs.test->post_codel); - return rc; -} - -static int cyttsp4_si_get_pcfg_data(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - void *p; - int rc; - - if (si->si_ofs.opcfg_ofs <= si->si_ofs.pcfg_ofs) { - dev_err(cd->dev, - "%s: invalid offset opcfg_ofs: %zu, pcfg_ofs: %zu\n", - __func__, si->si_ofs.opcfg_ofs, si->si_ofs.pcfg_ofs); - return -EINVAL; - } - - si->si_ofs.pcfg_size = si->si_ofs.opcfg_ofs - si->si_ofs.pcfg_ofs; - - p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: failed to allocate pcfg memory\n", - __func__); - return -ENOMEM; - } - si->si_ptrs.pcfg = p; - - rc = cyttsp4_adap_read(cd, si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size, - si->si_ptrs.pcfg); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read pcfg data r=%d\n", - __func__, rc); - return rc; - } - - si->si_ofs.max_x = merge_bytes((si->si_ptrs.pcfg->res_xh - & CY_PCFG_RESOLUTION_X_MASK), si->si_ptrs.pcfg->res_xl); - si->si_ofs.x_origin = !!(si->si_ptrs.pcfg->res_xh - & CY_PCFG_ORIGIN_X_MASK); - si->si_ofs.max_y = merge_bytes((si->si_ptrs.pcfg->res_yh - & CY_PCFG_RESOLUTION_Y_MASK), si->si_ptrs.pcfg->res_yl); - si->si_ofs.y_origin = !!(si->si_ptrs.pcfg->res_yh - & CY_PCFG_ORIGIN_Y_MASK); - si->si_ofs.max_p = merge_bytes(si->si_ptrs.pcfg->max_zh, - si->si_ptrs.pcfg->max_zl); - - cyttsp4_pr_buf(cd->dev, cd->pr_buf, - (u8 *)si->si_ptrs.pcfg, - si->si_ofs.pcfg_size, "sysinfo_pcfg_data"); - return rc; -} - -static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - struct cyttsp4_tch_abs_params *tch; - struct cyttsp4_tch_rec_params *tch_old, *tch_new; - enum cyttsp4_tch_abs abs; - int i; - void *p; - int rc; - - if (si->si_ofs.ddata_ofs <= si->si_ofs.opcfg_ofs) { - dev_err(cd->dev, - "%s: invalid offset ddata_ofs: %zu, opcfg_ofs: %zu\n", - __func__, si->si_ofs.ddata_ofs, si->si_ofs.opcfg_ofs); - return -EINVAL; - } - - si->si_ofs.opcfg_size = si->si_ofs.ddata_ofs - si->si_ofs.opcfg_ofs; - - p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: failed to allocate opcfg memory\n", - __func__); - return -ENOMEM; - } - si->si_ptrs.opcfg = p; - - rc = cyttsp4_adap_read(cd, si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size, - si->si_ptrs.opcfg); - if (rc < 0) { - dev_err(cd->dev, "%s: fail read opcfg data r=%d\n", - __func__, rc); - return rc; - } - si->si_ofs.cmd_ofs = si->si_ptrs.opcfg->cmd_ofs; - si->si_ofs.rep_ofs = si->si_ptrs.opcfg->rep_ofs; - si->si_ofs.rep_sz = (si->si_ptrs.opcfg->rep_szh * 256) + - si->si_ptrs.opcfg->rep_szl; - si->si_ofs.num_btns = si->si_ptrs.opcfg->num_btns; - si->si_ofs.num_btn_regs = (si->si_ofs.num_btns + - CY_NUM_BTN_PER_REG - 1) / CY_NUM_BTN_PER_REG; - si->si_ofs.tt_stat_ofs = si->si_ptrs.opcfg->tt_stat_ofs; - si->si_ofs.obj_cfg0 = si->si_ptrs.opcfg->obj_cfg0; - si->si_ofs.max_tchs = si->si_ptrs.opcfg->max_tchs & - CY_BYTE_OFS_MASK; - si->si_ofs.tch_rec_size = si->si_ptrs.opcfg->tch_rec_size & - CY_BYTE_OFS_MASK; - - /* Get the old touch fields */ - for (abs = CY_TCH_X; abs < CY_NUM_TCH_FIELDS; abs++) { - tch = &si->si_ofs.tch_abs[abs]; - tch_old = &si->si_ptrs.opcfg->tch_rec_old[abs]; - - tch->ofs = tch_old->loc & CY_BYTE_OFS_MASK; - tch->size = cyttsp4_bits_2_bytes(tch_old->size, - &tch->max); - tch->bofs = (tch_old->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT; - } - - /* button fields */ - si->si_ofs.btn_rec_size = si->si_ptrs.opcfg->btn_rec_size; - si->si_ofs.btn_diff_ofs = si->si_ptrs.opcfg->btn_diff_ofs; - si->si_ofs.btn_diff_size = si->si_ptrs.opcfg->btn_diff_size; - - if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) { - /* Get the extended touch fields */ - for (i = 0; i < CY_NUM_EXT_TCH_FIELDS; abs++, i++) { - tch = &si->si_ofs.tch_abs[abs]; - tch_new = &si->si_ptrs.opcfg->tch_rec_new[i]; - - tch->ofs = tch_new->loc & CY_BYTE_OFS_MASK; - tch->size = cyttsp4_bits_2_bytes(tch_new->size, - &tch->max); - tch->bofs = (tch_new->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT; - } - } - - for (abs = 0; abs < CY_TCH_NUM_ABS; abs++) { - dev_dbg(cd->dev, "%s: tch_rec_%s\n", __func__, - cyttsp4_tch_abs_string[abs]); - dev_dbg(cd->dev, "%s: ofs =%2zd\n", __func__, - si->si_ofs.tch_abs[abs].ofs); - dev_dbg(cd->dev, "%s: siz =%2zd\n", __func__, - si->si_ofs.tch_abs[abs].size); - dev_dbg(cd->dev, "%s: max =%2zd\n", __func__, - si->si_ofs.tch_abs[abs].max); - dev_dbg(cd->dev, "%s: bofs=%2zd\n", __func__, - si->si_ofs.tch_abs[abs].bofs); - } - - si->si_ofs.mode_size = si->si_ofs.tt_stat_ofs + 1; - si->si_ofs.data_size = si->si_ofs.max_tchs * - si->si_ptrs.opcfg->tch_rec_size; - - cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.opcfg, - si->si_ofs.opcfg_size, "sysinfo_opcfg_data"); - - return 0; -} - -static int cyttsp4_si_get_ddata(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - void *p; - int rc; - - si->si_ofs.ddata_size = si->si_ofs.mdata_ofs - si->si_ofs.ddata_ofs; - - p = krealloc(si->si_ptrs.ddata, si->si_ofs.ddata_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: fail alloc ddata memory\n", __func__); - return -ENOMEM; - } - si->si_ptrs.ddata = p; - - rc = cyttsp4_adap_read(cd, si->si_ofs.ddata_ofs, si->si_ofs.ddata_size, - si->si_ptrs.ddata); - if (rc < 0) - dev_err(cd->dev, "%s: fail read ddata data r=%d\n", - __func__, rc); - else - cyttsp4_pr_buf(cd->dev, cd->pr_buf, - (u8 *)si->si_ptrs.ddata, - si->si_ofs.ddata_size, "sysinfo_ddata"); - return rc; -} - -static int cyttsp4_si_get_mdata(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - void *p; - int rc; - - si->si_ofs.mdata_size = si->si_ofs.map_sz - si->si_ofs.mdata_ofs; - - p = krealloc(si->si_ptrs.mdata, si->si_ofs.mdata_size, GFP_KERNEL); - if (p == NULL) { - dev_err(cd->dev, "%s: fail alloc mdata memory\n", __func__); - return -ENOMEM; - } - si->si_ptrs.mdata = p; - - rc = cyttsp4_adap_read(cd, si->si_ofs.mdata_ofs, si->si_ofs.mdata_size, - si->si_ptrs.mdata); - if (rc < 0) - dev_err(cd->dev, "%s: fail read mdata data r=%d\n", - __func__, rc); - else - cyttsp4_pr_buf(cd->dev, cd->pr_buf, - (u8 *)si->si_ptrs.mdata, - si->si_ofs.mdata_size, "sysinfo_mdata"); - return rc; -} - -static int cyttsp4_si_get_btn_data(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - int btn; - int num_defined_keys; - u16 *key_table; - void *p; - int rc = 0; - - if (si->si_ofs.num_btns) { - si->si_ofs.btn_keys_size = si->si_ofs.num_btns * - sizeof(struct cyttsp4_btn); - - p = krealloc(si->btn, si->si_ofs.btn_keys_size, - GFP_KERNEL|__GFP_ZERO); - if (p == NULL) { - dev_err(cd->dev, "%s: %s\n", __func__, - "fail alloc btn_keys memory"); - return -ENOMEM; - } - si->btn = p; - - if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS] == NULL) - num_defined_keys = 0; - else if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS]->data == NULL) - num_defined_keys = 0; - else - num_defined_keys = cd->cpdata->sett - [CY_IC_GRPNUM_BTN_KEYS]->size; - - for (btn = 0; btn < si->si_ofs.num_btns && - btn < num_defined_keys; btn++) { - key_table = (u16 *)cd->cpdata->sett - [CY_IC_GRPNUM_BTN_KEYS]->data; - si->btn[btn].key_code = key_table[btn]; - si->btn[btn].state = CY_BTN_RELEASED; - si->btn[btn].enabled = true; - } - for (; btn < si->si_ofs.num_btns; btn++) { - si->btn[btn].key_code = KEY_RESERVED; - si->btn[btn].state = CY_BTN_RELEASED; - si->btn[btn].enabled = true; - } - - return rc; - } - - si->si_ofs.btn_keys_size = 0; - kfree(si->btn); - si->btn = NULL; - return rc; -} - -static int cyttsp4_si_get_op_data_ptrs(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - void *p; - - p = krealloc(si->xy_mode, si->si_ofs.mode_size, GFP_KERNEL|__GFP_ZERO); - if (p == NULL) - return -ENOMEM; - si->xy_mode = p; - - p = krealloc(si->xy_data, si->si_ofs.data_size, GFP_KERNEL|__GFP_ZERO); - if (p == NULL) - return -ENOMEM; - si->xy_data = p; - - p = krealloc(si->btn_rec_data, - si->si_ofs.btn_rec_size * si->si_ofs.num_btns, - GFP_KERNEL|__GFP_ZERO); - if (p == NULL) - return -ENOMEM; - si->btn_rec_data = p; - - return 0; -} - -static void cyttsp4_si_put_log_data(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - dev_dbg(cd->dev, "%s: cydata_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.cydata_ofs, si->si_ofs.cydata_size); - dev_dbg(cd->dev, "%s: test_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.test_ofs, si->si_ofs.test_size); - dev_dbg(cd->dev, "%s: pcfg_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size); - dev_dbg(cd->dev, "%s: opcfg_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size); - dev_dbg(cd->dev, "%s: ddata_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.ddata_ofs, si->si_ofs.ddata_size); - dev_dbg(cd->dev, "%s: mdata_ofs =%4zd siz=%4zd\n", __func__, - si->si_ofs.mdata_ofs, si->si_ofs.mdata_size); - - dev_dbg(cd->dev, "%s: cmd_ofs =%4zd\n", __func__, - si->si_ofs.cmd_ofs); - dev_dbg(cd->dev, "%s: rep_ofs =%4zd\n", __func__, - si->si_ofs.rep_ofs); - dev_dbg(cd->dev, "%s: rep_sz =%4zd\n", __func__, - si->si_ofs.rep_sz); - dev_dbg(cd->dev, "%s: num_btns =%4zd\n", __func__, - si->si_ofs.num_btns); - dev_dbg(cd->dev, "%s: num_btn_regs =%4zd\n", __func__, - si->si_ofs.num_btn_regs); - dev_dbg(cd->dev, "%s: tt_stat_ofs =%4zd\n", __func__, - si->si_ofs.tt_stat_ofs); - dev_dbg(cd->dev, "%s: tch_rec_size =%4zd\n", __func__, - si->si_ofs.tch_rec_size); - dev_dbg(cd->dev, "%s: max_tchs =%4zd\n", __func__, - si->si_ofs.max_tchs); - dev_dbg(cd->dev, "%s: mode_size =%4zd\n", __func__, - si->si_ofs.mode_size); - dev_dbg(cd->dev, "%s: data_size =%4zd\n", __func__, - si->si_ofs.data_size); - dev_dbg(cd->dev, "%s: map_sz =%4zd\n", __func__, - si->si_ofs.map_sz); - - dev_dbg(cd->dev, "%s: btn_rec_size =%2zd\n", __func__, - si->si_ofs.btn_rec_size); - dev_dbg(cd->dev, "%s: btn_diff_ofs =%2zd\n", __func__, - si->si_ofs.btn_diff_ofs); - dev_dbg(cd->dev, "%s: btn_diff_size =%2zd\n", __func__, - si->si_ofs.btn_diff_size); - - dev_dbg(cd->dev, "%s: max_x = 0x%04zX (%zd)\n", __func__, - si->si_ofs.max_x, si->si_ofs.max_x); - dev_dbg(cd->dev, "%s: x_origin = %zd (%s)\n", __func__, - si->si_ofs.x_origin, - si->si_ofs.x_origin == CY_NORMAL_ORIGIN ? - "left corner" : "right corner"); - dev_dbg(cd->dev, "%s: max_y = 0x%04zX (%zd)\n", __func__, - si->si_ofs.max_y, si->si_ofs.max_y); - dev_dbg(cd->dev, "%s: y_origin = %zd (%s)\n", __func__, - si->si_ofs.y_origin, - si->si_ofs.y_origin == CY_NORMAL_ORIGIN ? - "upper corner" : "lower corner"); - dev_dbg(cd->dev, "%s: max_p = 0x%04zX (%zd)\n", __func__, - si->si_ofs.max_p, si->si_ofs.max_p); - - dev_dbg(cd->dev, "%s: xy_mode=%p xy_data=%p\n", __func__, - si->xy_mode, si->xy_data); -} - -static int cyttsp4_get_sysinfo_regs(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - int rc; - - rc = cyttsp4_si_data_offsets(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_cydata(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_test_data(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_pcfg_data(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_opcfg_data(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_ddata(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_mdata(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_btn_data(cd); - if (rc < 0) - return rc; - - rc = cyttsp4_si_get_op_data_ptrs(cd); - if (rc < 0) { - dev_err(cd->dev, "%s: failed to get_op_data\n", - __func__); - return rc; - } - - cyttsp4_si_put_log_data(cd); - - /* provide flow control handshake */ - rc = cyttsp4_handshake(cd, si->si_data.hst_mode); - if (rc < 0) - dev_err(cd->dev, "%s: handshake fail on sysinfo reg\n", - __func__); - - si->ready = true; - return rc; -} - -static void cyttsp4_queue_startup_(struct cyttsp4 *cd) -{ - if (cd->startup_state == STARTUP_NONE) { - cd->startup_state = STARTUP_QUEUED; - schedule_work(&cd->startup_work); - dev_dbg(cd->dev, "%s: cyttsp4_startup queued\n", __func__); - } else { - dev_dbg(cd->dev, "%s: startup_state = %d\n", __func__, - cd->startup_state); - } -} - -static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md, - int max_slots) -{ - int t; - - if (md->num_prv_tch == 0) - return; - - for (t = 0; t < max_slots; t++) { - input_mt_slot(md->input, t); - input_mt_report_slot_inactive(md->input); - } -} - -static void cyttsp4_lift_all(struct cyttsp4_mt_data *md) -{ - if (!md->si) - return; - - if (md->num_prv_tch != 0) { - cyttsp4_report_slot_liftoff(md, - md->si->si_ofs.tch_abs[CY_TCH_T].max); - input_sync(md->input); - md->num_prv_tch = 0; - } -} - -static void cyttsp4_get_touch_axis(struct cyttsp4_mt_data *md, - int *axis, int size, int max, u8 *xy_data, int bofs) -{ - int nbyte; - int next; - - for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) { - dev_vdbg(&md->input->dev, - "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p" - " xy_data[%d]=%02X(%d) bofs=%d\n", - __func__, *axis, *axis, size, max, xy_data, next, - xy_data[next], xy_data[next], bofs); - *axis = (*axis * 256) + (xy_data[next] >> bofs); - next++; - } - - *axis &= max - 1; - - dev_vdbg(&md->input->dev, - "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p" - " xy_data[%d]=%02X(%d)\n", - __func__, *axis, *axis, size, max, xy_data, next, - xy_data[next], xy_data[next]); -} - -static void cyttsp4_get_touch(struct cyttsp4_mt_data *md, - struct cyttsp4_touch *touch, u8 *xy_data) -{ - struct device *dev = &md->input->dev; - struct cyttsp4_sysinfo *si = md->si; - enum cyttsp4_tch_abs abs; - bool flipped; - - for (abs = CY_TCH_X; abs < CY_TCH_NUM_ABS; abs++) { - cyttsp4_get_touch_axis(md, &touch->abs[abs], - si->si_ofs.tch_abs[abs].size, - si->si_ofs.tch_abs[abs].max, - xy_data + si->si_ofs.tch_abs[abs].ofs, - si->si_ofs.tch_abs[abs].bofs); - dev_vdbg(dev, "%s: get %s=%04X(%d)\n", __func__, - cyttsp4_tch_abs_string[abs], - touch->abs[abs], touch->abs[abs]); - } - - if (md->pdata->flags & CY_FLAG_FLIP) { - swap(touch->abs[CY_TCH_X], touch->abs[CY_TCH_Y]); - flipped = true; - } else - flipped = false; - - if (md->pdata->flags & CY_FLAG_INV_X) { - if (flipped) - touch->abs[CY_TCH_X] = md->si->si_ofs.max_y - - touch->abs[CY_TCH_X]; - else - touch->abs[CY_TCH_X] = md->si->si_ofs.max_x - - touch->abs[CY_TCH_X]; - } - if (md->pdata->flags & CY_FLAG_INV_Y) { - if (flipped) - touch->abs[CY_TCH_Y] = md->si->si_ofs.max_x - - touch->abs[CY_TCH_Y]; - else - touch->abs[CY_TCH_Y] = md->si->si_ofs.max_y - - touch->abs[CY_TCH_Y]; - } - - dev_vdbg(dev, "%s: flip=%s inv-x=%s inv-y=%s x=%04X(%d) y=%04X(%d)\n", - __func__, flipped ? "true" : "false", - md->pdata->flags & CY_FLAG_INV_X ? "true" : "false", - md->pdata->flags & CY_FLAG_INV_Y ? "true" : "false", - touch->abs[CY_TCH_X], touch->abs[CY_TCH_X], - touch->abs[CY_TCH_Y], touch->abs[CY_TCH_Y]); -} - -static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids) -{ - int t; - - for (t = 0; t < max_slots; t++) { - if (ids[t]) - continue; - input_mt_slot(input, t); - input_mt_report_slot_inactive(input); - } - - input_sync(input); -} - -static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch) -{ - struct device *dev = &md->input->dev; - struct cyttsp4_sysinfo *si = md->si; - struct cyttsp4_touch tch; - int sig; - int i, j, t = 0; - int ids[MAX(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)]; - - memset(ids, 0, si->si_ofs.tch_abs[CY_TCH_T].max * sizeof(int)); - for (i = 0; i < num_cur_tch; i++) { - cyttsp4_get_touch(md, &tch, si->xy_data + - (i * si->si_ofs.tch_rec_size)); - if ((tch.abs[CY_TCH_T] < md->pdata->frmwrk->abs - [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST]) || - (tch.abs[CY_TCH_T] > md->pdata->frmwrk->abs - [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MAX_OST])) { - dev_err(dev, "%s: tch=%d -> bad trk_id=%d max_id=%d\n", - __func__, i, tch.abs[CY_TCH_T], - md->pdata->frmwrk->abs[(CY_ABS_ID_OST * - CY_NUM_ABS_SET) + CY_MAX_OST]); - continue; - } - - /* use 0 based track id's */ - sig = md->pdata->frmwrk->abs - [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + 0]; - if (sig != CY_IGNORE_VALUE) { - t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs - [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST]; - if (tch.abs[CY_TCH_E] == CY_EV_LIFTOFF) { - dev_dbg(dev, "%s: t=%d e=%d lift-off\n", - __func__, t, tch.abs[CY_TCH_E]); - goto cyttsp4_get_mt_touches_pr_tch; - } - input_mt_slot(md->input, t); - input_mt_report_slot_state(md->input, MT_TOOL_FINGER, - true); - ids[t] = true; - } - - /* all devices: position and pressure fields */ - for (j = 0; j <= CY_ABS_W_OST; j++) { - sig = md->pdata->frmwrk->abs[((CY_ABS_X_OST + j) * - CY_NUM_ABS_SET) + 0]; - if (sig != CY_IGNORE_VALUE) - input_report_abs(md->input, sig, - tch.abs[CY_TCH_X + j]); - } - if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) { - /* - * TMA400 size and orientation fields: - * if pressure is non-zero and major touch - * signal is zero, then set major and minor touch - * signals to minimum non-zero value - */ - if (tch.abs[CY_TCH_P] > 0 && tch.abs[CY_TCH_MAJ] == 0) - tch.abs[CY_TCH_MAJ] = tch.abs[CY_TCH_MIN] = 1; - - /* Get the extended touch fields */ - for (j = 0; j < CY_NUM_EXT_TCH_FIELDS; j++) { - sig = md->pdata->frmwrk->abs - [((CY_ABS_MAJ_OST + j) * - CY_NUM_ABS_SET) + 0]; - if (sig != CY_IGNORE_VALUE) - input_report_abs(md->input, sig, - tch.abs[CY_TCH_MAJ + j]); - } - } - -cyttsp4_get_mt_touches_pr_tch: - if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) - dev_dbg(dev, - "%s: t=%d x=%d y=%d z=%d M=%d m=%d o=%d e=%d\n", - __func__, t, - tch.abs[CY_TCH_X], - tch.abs[CY_TCH_Y], - tch.abs[CY_TCH_P], - tch.abs[CY_TCH_MAJ], - tch.abs[CY_TCH_MIN], - tch.abs[CY_TCH_OR], - tch.abs[CY_TCH_E]); - else - dev_dbg(dev, - "%s: t=%d x=%d y=%d z=%d e=%d\n", __func__, - t, - tch.abs[CY_TCH_X], - tch.abs[CY_TCH_Y], - tch.abs[CY_TCH_P], - tch.abs[CY_TCH_E]); - } - - cyttsp4_final_sync(md->input, si->si_ofs.tch_abs[CY_TCH_T].max, ids); - - md->num_prv_tch = num_cur_tch; - - return; -} - -/* read xy_data for all current touches */ -static int cyttsp4_xy_worker(struct cyttsp4 *cd) -{ - struct cyttsp4_mt_data *md = &cd->md; - struct device *dev = &md->input->dev; - struct cyttsp4_sysinfo *si = md->si; - u8 num_cur_tch; - u8 hst_mode; - u8 rep_len; - u8 rep_stat; - u8 tt_stat; - int rc = 0; - - /* - * Get event data from cyttsp4 device. - * The event data includes all data - * for all active touches. - * Event data also includes button data - */ - /* - * Use 2 reads: - * 1st read to get mode + button bytes + touch count (core) - * 2nd read (optional) to get touch 1 - touch n data - */ - hst_mode = si->xy_mode[CY_REG_BASE]; - rep_len = si->xy_mode[si->si_ofs.rep_ofs]; - rep_stat = si->xy_mode[si->si_ofs.rep_ofs + 1]; - tt_stat = si->xy_mode[si->si_ofs.tt_stat_ofs]; - dev_vdbg(dev, "%s: %s%02X %s%d %s%02X %s%02X\n", __func__, - "hst_mode=", hst_mode, "rep_len=", rep_len, - "rep_stat=", rep_stat, "tt_stat=", tt_stat); - - num_cur_tch = GET_NUM_TOUCHES(tt_stat); - dev_vdbg(dev, "%s: num_cur_tch=%d\n", __func__, num_cur_tch); - - if (rep_len == 0 && num_cur_tch > 0) { - dev_err(dev, "%s: report length error rep_len=%d num_tch=%d\n", - __func__, rep_len, num_cur_tch); - goto cyttsp4_xy_worker_exit; - } - - /* read touches */ - if (num_cur_tch > 0) { - rc = cyttsp4_adap_read(cd, si->si_ofs.tt_stat_ofs + 1, - num_cur_tch * si->si_ofs.tch_rec_size, - si->xy_data); - if (rc < 0) { - dev_err(dev, "%s: read fail on touch regs r=%d\n", - __func__, rc); - goto cyttsp4_xy_worker_exit; - } - } - - /* print xy data */ - cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_data, num_cur_tch * - si->si_ofs.tch_rec_size, "xy_data"); - - /* check any error conditions */ - if (IS_BAD_PKT(rep_stat)) { - dev_dbg(dev, "%s: Invalid buffer detected\n", __func__); - rc = 0; - goto cyttsp4_xy_worker_exit; - } - - if (IS_LARGE_AREA(tt_stat)) - dev_dbg(dev, "%s: Large area detected\n", __func__); - - if (num_cur_tch > si->si_ofs.max_tchs) { - dev_err(dev, "%s: too many tch; set to max tch (n=%d c=%zd)\n", - __func__, num_cur_tch, si->si_ofs.max_tchs); - num_cur_tch = si->si_ofs.max_tchs; - } - - /* extract xy_data for all currently reported touches */ - dev_vdbg(dev, "%s: extract data num_cur_tch=%d\n", __func__, - num_cur_tch); - if (num_cur_tch) - cyttsp4_get_mt_touches(md, num_cur_tch); - else - cyttsp4_lift_all(md); - - rc = 0; - -cyttsp4_xy_worker_exit: - return rc; -} - -static int cyttsp4_mt_attention(struct cyttsp4 *cd) -{ - struct device *dev = cd->dev; - struct cyttsp4_mt_data *md = &cd->md; - int rc = 0; - - if (!md->si) - return 0; - - mutex_lock(&md->report_lock); - if (!md->is_suspended) { - /* core handles handshake */ - rc = cyttsp4_xy_worker(cd); - } else { - dev_vdbg(dev, "%s: Ignoring report while suspended\n", - __func__); - } - mutex_unlock(&md->report_lock); - if (rc < 0) - dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc); - - return rc; -} - -static irqreturn_t cyttsp4_irq(int irq, void *handle) -{ - struct cyttsp4 *cd = handle; - struct device *dev = cd->dev; - enum cyttsp4_mode cur_mode; - u8 cmd_ofs = cd->sysinfo.si_ofs.cmd_ofs; - u8 mode[3]; - int rc; - - /* - * Check whether this IRQ should be ignored (external) - * This should be the very first thing to check since - * ignore_irq may be set for a very short period of time - */ - if (atomic_read(&cd->ignore_irq)) { - dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__); - return IRQ_HANDLED; - } - - dev_dbg(dev, "%s int:0x%x\n", __func__, cd->int_status); - - mutex_lock(&cd->system_lock); - - /* Just to debug */ - if (cd->sleep_state == SS_SLEEP_ON || cd->sleep_state == SS_SLEEPING) - dev_vdbg(dev, "%s: Received IRQ while in sleep\n", __func__); - - rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), mode); - if (rc) { - dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc); - goto cyttsp4_irq_exit; - } - dev_vdbg(dev, "%s mode[0-2]:0x%X 0x%X 0x%X\n", __func__, - mode[0], mode[1], mode[2]); - - if (IS_BOOTLOADER(mode[0], mode[1])) { - cur_mode = CY_MODE_BOOTLOADER; - dev_vdbg(dev, "%s: bl running\n", __func__); - if (cd->mode == CY_MODE_BOOTLOADER) { - /* Signal bootloader heartbeat heard */ - wake_up(&cd->wait_q); - goto cyttsp4_irq_exit; - } - - /* switch to bootloader */ - dev_dbg(dev, "%s: restart switch to bl m=%d -> m=%d\n", - __func__, cd->mode, cur_mode); - - /* catch operation->bl glitch */ - if (cd->mode != CY_MODE_UNKNOWN) { - /* Incase startup_state do not let startup_() */ - cd->mode = CY_MODE_UNKNOWN; - cyttsp4_queue_startup_(cd); - goto cyttsp4_irq_exit; - } - - /* - * do not wake thread on this switch since - * it is possible to get an early heartbeat - * prior to performing the reset - */ - cd->mode = cur_mode; - - goto cyttsp4_irq_exit; - } - - switch (mode[0] & CY_HST_MODE) { - case CY_HST_OPERATE: - cur_mode = CY_MODE_OPERATIONAL; - dev_vdbg(dev, "%s: operational\n", __func__); - break; - case CY_HST_CAT: - cur_mode = CY_MODE_CAT; - dev_vdbg(dev, "%s: CaT\n", __func__); - break; - case CY_HST_SYSINFO: - cur_mode = CY_MODE_SYSINFO; - dev_vdbg(dev, "%s: sysinfo\n", __func__); - break; - default: - cur_mode = CY_MODE_UNKNOWN; - dev_err(dev, "%s: unknown HST mode 0x%02X\n", __func__, - mode[0]); - break; - } - - /* Check whether this IRQ should be ignored (internal) */ - if (cd->int_status & CY_INT_IGNORE) { - dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__); - goto cyttsp4_irq_exit; - } - - /* Check for wake up interrupt */ - if (cd->int_status & CY_INT_AWAKE) { - cd->int_status &= ~CY_INT_AWAKE; - wake_up(&cd->wait_q); - dev_vdbg(dev, "%s: Received wake up interrupt\n", __func__); - goto cyttsp4_irq_handshake; - } - - /* Expecting mode change interrupt */ - if ((cd->int_status & CY_INT_MODE_CHANGE) - && (mode[0] & CY_HST_MODE_CHANGE) == 0) { - cd->int_status &= ~CY_INT_MODE_CHANGE; - dev_dbg(dev, "%s: finish mode switch m=%d -> m=%d\n", - __func__, cd->mode, cur_mode); - cd->mode = cur_mode; - wake_up(&cd->wait_q); - goto cyttsp4_irq_handshake; - } - - /* compare current core mode to current device mode */ - dev_vdbg(dev, "%s: cd->mode=%d cur_mode=%d\n", - __func__, cd->mode, cur_mode); - if ((mode[0] & CY_HST_MODE_CHANGE) == 0 && cd->mode != cur_mode) { - /* Unexpected mode change occurred */ - dev_err(dev, "%s %d->%d 0x%x\n", __func__, cd->mode, - cur_mode, cd->int_status); - dev_dbg(dev, "%s: Unexpected mode change, startup\n", - __func__); - cyttsp4_queue_startup_(cd); - goto cyttsp4_irq_exit; - } - - /* Expecting command complete interrupt */ - dev_vdbg(dev, "%s: command byte:0x%x\n", __func__, mode[cmd_ofs]); - if ((cd->int_status & CY_INT_EXEC_CMD) - && mode[cmd_ofs] & CY_CMD_COMPLETE) { - cd->int_status &= ~CY_INT_EXEC_CMD; - dev_vdbg(dev, "%s: Received command complete interrupt\n", - __func__); - wake_up(&cd->wait_q); - /* - * It is possible to receive a single interrupt for - * command complete and touch/button status report. - * Continue processing for a possible status report. - */ - } - - /* This should be status report, read status regs */ - if (cd->mode == CY_MODE_OPERATIONAL) { - dev_vdbg(dev, "%s: Read status registers\n", __func__); - rc = cyttsp4_load_status_regs(cd); - if (rc < 0) - dev_err(dev, "%s: fail read mode regs r=%d\n", - __func__, rc); - } - - cyttsp4_mt_attention(cd); - -cyttsp4_irq_handshake: - /* handshake the event */ - dev_vdbg(dev, "%s: Handshake mode=0x%02X r=%d\n", - __func__, mode[0], rc); - rc = cyttsp4_handshake(cd, mode[0]); - if (rc < 0) - dev_err(dev, "%s: Fail handshake mode=0x%02X r=%d\n", - __func__, mode[0], rc); - - /* - * a non-zero udelay period is required for using - * IRQF_TRIGGER_LOW in order to delay until the - * device completes isr deassert - */ - udelay(cd->cpdata->level_irq_udelay); - -cyttsp4_irq_exit: - mutex_unlock(&cd->system_lock); - return IRQ_HANDLED; -} - -static void cyttsp4_start_wd_timer(struct cyttsp4 *cd) -{ - if (!CY_WATCHDOG_TIMEOUT) - return; - - mod_timer(&cd->watchdog_timer, jiffies + - msecs_to_jiffies(CY_WATCHDOG_TIMEOUT)); -} - -static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd) -{ - if (!CY_WATCHDOG_TIMEOUT) - return; - - /* - * Ensure we wait until the watchdog timer - * running on a different CPU finishes - */ - timer_shutdown_sync(&cd->watchdog_timer); - cancel_work_sync(&cd->watchdog_work); -} - -static void cyttsp4_watchdog_timer(struct timer_list *t) -{ - struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer); - - dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__); - - schedule_work(&cd->watchdog_work); - - return; -} - -static int cyttsp4_request_exclusive(struct cyttsp4 *cd, void *ownptr, - int timeout_ms) -{ - int t = msecs_to_jiffies(timeout_ms); - bool with_timeout = (timeout_ms != 0); - - mutex_lock(&cd->system_lock); - if (!cd->exclusive_dev && cd->exclusive_waits == 0) { - cd->exclusive_dev = ownptr; - goto exit; - } - - cd->exclusive_waits++; -wait: - mutex_unlock(&cd->system_lock); - if (with_timeout) { - t = wait_event_timeout(cd->wait_q, !cd->exclusive_dev, t); - if (IS_TMO(t)) { - dev_err(cd->dev, "%s: tmo waiting exclusive access\n", - __func__); - mutex_lock(&cd->system_lock); - cd->exclusive_waits--; - mutex_unlock(&cd->system_lock); - return -ETIME; - } - } else { - wait_event(cd->wait_q, !cd->exclusive_dev); - } - mutex_lock(&cd->system_lock); - if (cd->exclusive_dev) - goto wait; - cd->exclusive_dev = ownptr; - cd->exclusive_waits--; -exit: - mutex_unlock(&cd->system_lock); - - return 0; -} - -/* - * returns error if was not owned - */ -static int cyttsp4_release_exclusive(struct cyttsp4 *cd, void *ownptr) -{ - mutex_lock(&cd->system_lock); - if (cd->exclusive_dev != ownptr) { - mutex_unlock(&cd->system_lock); - return -EINVAL; - } - - dev_vdbg(cd->dev, "%s: exclusive_dev %p freed\n", - __func__, cd->exclusive_dev); - cd->exclusive_dev = NULL; - wake_up(&cd->wait_q); - mutex_unlock(&cd->system_lock); - return 0; -} - -static int cyttsp4_wait_bl_heartbeat(struct cyttsp4 *cd) -{ - long t; - int rc = 0; - - /* wait heartbeat */ - dev_vdbg(cd->dev, "%s: wait heartbeat...\n", __func__); - t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_BOOTLOADER, - msecs_to_jiffies(CY_CORE_RESET_AND_WAIT_TIMEOUT)); - if (IS_TMO(t)) { - dev_err(cd->dev, "%s: tmo waiting bl heartbeat cd->mode=%d\n", - __func__, cd->mode); - rc = -ETIME; - } - - return rc; -} - -static int cyttsp4_wait_sysinfo_mode(struct cyttsp4 *cd) -{ - long t; - - dev_vdbg(cd->dev, "%s: wait sysinfo...\n", __func__); - - t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_SYSINFO, - msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT)); - if (IS_TMO(t)) { - dev_err(cd->dev, "%s: tmo waiting exit bl cd->mode=%d\n", - __func__, cd->mode); - mutex_lock(&cd->system_lock); - cd->int_status &= ~CY_INT_MODE_CHANGE; - mutex_unlock(&cd->system_lock); - return -ETIME; - } - - return 0; -} - -static int cyttsp4_reset_and_wait(struct cyttsp4 *cd) -{ - int rc; - - /* reset hardware */ - mutex_lock(&cd->system_lock); - dev_dbg(cd->dev, "%s: reset hw...\n", __func__); - rc = cyttsp4_hw_reset(cd); - cd->mode = CY_MODE_UNKNOWN; - mutex_unlock(&cd->system_lock); - if (rc < 0) { - dev_err(cd->dev, "%s:Fail hw reset r=%d\n", __func__, rc); - return rc; - } - - return cyttsp4_wait_bl_heartbeat(cd); -} - -/* - * returns err if refused or timeout; block until mode change complete - * bit is set (mode change interrupt) - */ -static int cyttsp4_set_mode(struct cyttsp4 *cd, int new_mode) -{ - u8 new_dev_mode; - u8 mode; - long t; - int rc; - - switch (new_mode) { - case CY_MODE_OPERATIONAL: - new_dev_mode = CY_HST_OPERATE; - break; - case CY_MODE_SYSINFO: - new_dev_mode = CY_HST_SYSINFO; - break; - case CY_MODE_CAT: - new_dev_mode = CY_HST_CAT; - break; - default: - dev_err(cd->dev, "%s: invalid mode: %02X(%d)\n", - __func__, new_mode, new_mode); - return -EINVAL; - } - - /* change mode */ - dev_dbg(cd->dev, "%s: %s=%p new_dev_mode=%02X new_mode=%d\n", - __func__, "have exclusive", cd->exclusive_dev, - new_dev_mode, new_mode); - - mutex_lock(&cd->system_lock); - rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode); - if (rc < 0) { - mutex_unlock(&cd->system_lock); - dev_err(cd->dev, "%s: Fail read mode r=%d\n", - __func__, rc); - goto exit; - } - - /* Clear device mode bits and set to new mode */ - mode &= ~CY_HST_MODE; - mode |= new_dev_mode | CY_HST_MODE_CHANGE; - - cd->int_status |= CY_INT_MODE_CHANGE; - rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode), &mode); - mutex_unlock(&cd->system_lock); - if (rc < 0) { - dev_err(cd->dev, "%s: Fail write mode change r=%d\n", - __func__, rc); - goto exit; - } - - /* wait for mode change done interrupt */ - t = wait_event_timeout(cd->wait_q, - (cd->int_status & CY_INT_MODE_CHANGE) == 0, - msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT)); - dev_dbg(cd->dev, "%s: back from wait t=%ld cd->mode=%d\n", - __func__, t, cd->mode); - - if (IS_TMO(t)) { - dev_err(cd->dev, "%s: %s\n", __func__, - "tmo waiting mode change"); - mutex_lock(&cd->system_lock); - cd->int_status &= ~CY_INT_MODE_CHANGE; - mutex_unlock(&cd->system_lock); - rc = -EINVAL; - } - -exit: - return rc; -} - -static void cyttsp4_watchdog_work(struct work_struct *work) -{ - struct cyttsp4 *cd = - container_of(work, struct cyttsp4, watchdog_work); - u8 *mode; - int retval; - - mutex_lock(&cd->system_lock); - retval = cyttsp4_load_status_regs(cd); - if (retval < 0) { - dev_err(cd->dev, - "%s: failed to access device in watchdog timer r=%d\n", - __func__, retval); - cyttsp4_queue_startup_(cd); - goto cyttsp4_timer_watchdog_exit_error; - } - mode = &cd->sysinfo.xy_mode[CY_REG_BASE]; - if (IS_BOOTLOADER(mode[0], mode[1])) { - dev_err(cd->dev, - "%s: device found in bootloader mode when operational mode\n", - __func__); - cyttsp4_queue_startup_(cd); - goto cyttsp4_timer_watchdog_exit_error; - } - - cyttsp4_start_wd_timer(cd); -cyttsp4_timer_watchdog_exit_error: - mutex_unlock(&cd->system_lock); - return; -} - -static int cyttsp4_core_sleep_(struct cyttsp4 *cd) -{ - enum cyttsp4_sleep_state ss = SS_SLEEP_ON; - enum cyttsp4_int_state int_status = CY_INT_IGNORE; - int rc = 0; - u8 mode[2]; - - /* Already in sleep mode? */ - mutex_lock(&cd->system_lock); - if (cd->sleep_state == SS_SLEEP_ON) { - mutex_unlock(&cd->system_lock); - return 0; - } - cd->sleep_state = SS_SLEEPING; - mutex_unlock(&cd->system_lock); - - cyttsp4_stop_wd_timer(cd); - - /* Wait until currently running IRQ handler exits and disable IRQ */ - disable_irq(cd->irq); - - dev_vdbg(cd->dev, "%s: write DEEP SLEEP...\n", __func__); - mutex_lock(&cd->system_lock); - rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode); - if (rc) { - mutex_unlock(&cd->system_lock); - dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc); - goto error; - } - - if (IS_BOOTLOADER(mode[0], mode[1])) { - mutex_unlock(&cd->system_lock); - dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__); - rc = -EINVAL; - goto error; - } - - mode[0] |= CY_HST_SLEEP; - rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode[0]), &mode[0]); - mutex_unlock(&cd->system_lock); - if (rc) { - dev_err(cd->dev, "%s: Fail write adapter r=%d\n", __func__, rc); - goto error; - } - dev_vdbg(cd->dev, "%s: write DEEP SLEEP succeeded\n", __func__); - - if (cd->cpdata->power) { - dev_dbg(cd->dev, "%s: Power down HW\n", __func__); - rc = cd->cpdata->power(cd->cpdata, 0, cd->dev, &cd->ignore_irq); - } else { - dev_dbg(cd->dev, "%s: No power function\n", __func__); - rc = 0; - } - if (rc < 0) { - dev_err(cd->dev, "%s: HW Power down fails r=%d\n", - __func__, rc); - goto error; - } - - /* Give time to FW to sleep */ - msleep(50); - - goto exit; - -error: - ss = SS_SLEEP_OFF; - int_status = CY_INT_NONE; - cyttsp4_start_wd_timer(cd); - -exit: - mutex_lock(&cd->system_lock); - cd->sleep_state = ss; - cd->int_status |= int_status; - mutex_unlock(&cd->system_lock); - enable_irq(cd->irq); - return rc; -} - -static int cyttsp4_startup_(struct cyttsp4 *cd) -{ - int retry = CY_CORE_STARTUP_RETRY_COUNT; - int rc; - - cyttsp4_stop_wd_timer(cd); - -reset: - if (retry != CY_CORE_STARTUP_RETRY_COUNT) - dev_dbg(cd->dev, "%s: Retry %d\n", __func__, - CY_CORE_STARTUP_RETRY_COUNT - retry); - - /* reset hardware and wait for heartbeat */ - rc = cyttsp4_reset_and_wait(cd); - if (rc < 0) { - dev_err(cd->dev, "%s: Error on h/w reset r=%d\n", __func__, rc); - if (retry--) - goto reset; - goto exit; - } - - /* exit bl into sysinfo mode */ - dev_vdbg(cd->dev, "%s: write exit ldr...\n", __func__); - mutex_lock(&cd->system_lock); - cd->int_status &= ~CY_INT_IGNORE; - cd->int_status |= CY_INT_MODE_CHANGE; - - rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(ldr_exit), - (u8 *)ldr_exit); - mutex_unlock(&cd->system_lock); - if (rc < 0) { - dev_err(cd->dev, "%s: Fail write r=%d\n", __func__, rc); - if (retry--) - goto reset; - goto exit; - } - - rc = cyttsp4_wait_sysinfo_mode(cd); - if (rc < 0) { - u8 buf[sizeof(ldr_err_app)]; - int rc1; - - /* Check for invalid/corrupted touch application */ - rc1 = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(ldr_err_app), - buf); - if (rc1) { - dev_err(cd->dev, "%s: Fail read r=%d\n", __func__, rc1); - } else if (!memcmp(buf, ldr_err_app, sizeof(ldr_err_app))) { - dev_err(cd->dev, "%s: Error launching touch application\n", - __func__); - mutex_lock(&cd->system_lock); - cd->invalid_touch_app = true; - mutex_unlock(&cd->system_lock); - goto exit_no_wd; - } - - if (retry--) - goto reset; - goto exit; - } - - mutex_lock(&cd->system_lock); - cd->invalid_touch_app = false; - mutex_unlock(&cd->system_lock); - - /* read sysinfo data */ - dev_vdbg(cd->dev, "%s: get sysinfo regs..\n", __func__); - rc = cyttsp4_get_sysinfo_regs(cd); - if (rc < 0) { - dev_err(cd->dev, "%s: failed to get sysinfo regs rc=%d\n", - __func__, rc); - if (retry--) - goto reset; - goto exit; - } - - rc = cyttsp4_set_mode(cd, CY_MODE_OPERATIONAL); - if (rc < 0) { - dev_err(cd->dev, "%s: failed to set mode to operational rc=%d\n", - __func__, rc); - if (retry--) - goto reset; - goto exit; - } - - cyttsp4_lift_all(&cd->md); - - /* restore to sleep if was suspended */ - mutex_lock(&cd->system_lock); - if (cd->sleep_state == SS_SLEEP_ON) { - cd->sleep_state = SS_SLEEP_OFF; - mutex_unlock(&cd->system_lock); - cyttsp4_core_sleep_(cd); - goto exit_no_wd; - } - mutex_unlock(&cd->system_lock); - -exit: - cyttsp4_start_wd_timer(cd); -exit_no_wd: - return rc; -} - -static int cyttsp4_startup(struct cyttsp4 *cd) -{ - int rc; - - mutex_lock(&cd->system_lock); - cd->startup_state = STARTUP_RUNNING; - mutex_unlock(&cd->system_lock); - - rc = cyttsp4_request_exclusive(cd, cd->dev, - CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT); - if (rc < 0) { - dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n", - __func__, cd->exclusive_dev, cd->dev); - goto exit; - } - - rc = cyttsp4_startup_(cd); - - if (cyttsp4_release_exclusive(cd, cd->dev) < 0) - /* Don't return fail code, mode is already changed. */ - dev_err(cd->dev, "%s: fail to release exclusive\n", __func__); - else - dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__); - -exit: - mutex_lock(&cd->system_lock); - cd->startup_state = STARTUP_NONE; - mutex_unlock(&cd->system_lock); - - /* Wake the waiters for end of startup */ - wake_up(&cd->wait_q); - - return rc; -} - -static void cyttsp4_startup_work_function(struct work_struct *work) -{ - struct cyttsp4 *cd = container_of(work, struct cyttsp4, startup_work); - int rc; - - rc = cyttsp4_startup(cd); - if (rc < 0) - dev_err(cd->dev, "%s: Fail queued startup r=%d\n", - __func__, rc); -} - -static void cyttsp4_free_si_ptrs(struct cyttsp4 *cd) -{ - struct cyttsp4_sysinfo *si = &cd->sysinfo; - - if (!si) - return; - - kfree(si->si_ptrs.cydata); - kfree(si->si_ptrs.test); - kfree(si->si_ptrs.pcfg); - kfree(si->si_ptrs.opcfg); - kfree(si->si_ptrs.ddata); - kfree(si->si_ptrs.mdata); - kfree(si->btn); - kfree(si->xy_mode); - kfree(si->xy_data); - kfree(si->btn_rec_data); -} - -static int cyttsp4_core_sleep(struct cyttsp4 *cd) -{ - int rc; - - rc = cyttsp4_request_exclusive(cd, cd->dev, - CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT); - if (rc < 0) { - dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n", - __func__, cd->exclusive_dev, cd->dev); - return 0; - } - - rc = cyttsp4_core_sleep_(cd); - - if (cyttsp4_release_exclusive(cd, cd->dev) < 0) - dev_err(cd->dev, "%s: fail to release exclusive\n", __func__); - else - dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__); - - return rc; -} - -static int cyttsp4_core_wake_(struct cyttsp4 *cd) -{ - struct device *dev = cd->dev; - int rc; - u8 mode; - int t; - - /* Already woken? */ - mutex_lock(&cd->system_lock); - if (cd->sleep_state == SS_SLEEP_OFF) { - mutex_unlock(&cd->system_lock); - return 0; - } - cd->int_status &= ~CY_INT_IGNORE; - cd->int_status |= CY_INT_AWAKE; - cd->sleep_state = SS_WAKING; - - if (cd->cpdata->power) { - dev_dbg(dev, "%s: Power up HW\n", __func__); - rc = cd->cpdata->power(cd->cpdata, 1, dev, &cd->ignore_irq); - } else { - dev_dbg(dev, "%s: No power function\n", __func__); - rc = -ENOSYS; - } - if (rc < 0) { - dev_err(dev, "%s: HW Power up fails r=%d\n", - __func__, rc); - - /* Initiate a read transaction to wake up */ - cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode); - } else - dev_vdbg(cd->dev, "%s: HW power up succeeds\n", - __func__); - mutex_unlock(&cd->system_lock); - - t = wait_event_timeout(cd->wait_q, - (cd->int_status & CY_INT_AWAKE) == 0, - msecs_to_jiffies(CY_CORE_WAKEUP_TIMEOUT)); - if (IS_TMO(t)) { - dev_err(dev, "%s: TMO waiting for wakeup\n", __func__); - mutex_lock(&cd->system_lock); - cd->int_status &= ~CY_INT_AWAKE; - /* Try starting up */ - cyttsp4_queue_startup_(cd); - mutex_unlock(&cd->system_lock); - } - - mutex_lock(&cd->system_lock); - cd->sleep_state = SS_SLEEP_OFF; - mutex_unlock(&cd->system_lock); - - cyttsp4_start_wd_timer(cd); - - return 0; -} - -static int cyttsp4_core_wake(struct cyttsp4 *cd) -{ - int rc; - - rc = cyttsp4_request_exclusive(cd, cd->dev, - CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT); - if (rc < 0) { - dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n", - __func__, cd->exclusive_dev, cd->dev); - return 0; - } - - rc = cyttsp4_core_wake_(cd); - - if (cyttsp4_release_exclusive(cd, cd->dev) < 0) - dev_err(cd->dev, "%s: fail to release exclusive\n", __func__); - else - dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__); - - return rc; -} - -static int cyttsp4_core_suspend(struct device *dev) -{ - struct cyttsp4 *cd = dev_get_drvdata(dev); - struct cyttsp4_mt_data *md = &cd->md; - int rc; - - md->is_suspended = true; - - rc = cyttsp4_core_sleep(cd); - if (rc < 0) { - dev_err(dev, "%s: Error on sleep\n", __func__); - return -EAGAIN; - } - return 0; -} - -static int cyttsp4_core_resume(struct device *dev) -{ - struct cyttsp4 *cd = dev_get_drvdata(dev); - struct cyttsp4_mt_data *md = &cd->md; - int rc; - - md->is_suspended = false; - - rc = cyttsp4_core_wake(cd); - if (rc < 0) { - dev_err(dev, "%s: Error on wake\n", __func__); - return -EAGAIN; - } - - return 0; -} - -EXPORT_GPL_RUNTIME_DEV_PM_OPS(cyttsp4_pm_ops, - cyttsp4_core_suspend, cyttsp4_core_resume, NULL); - -static int cyttsp4_mt_open(struct input_dev *input) -{ - pm_runtime_get(input->dev.parent); - return 0; -} - -static void cyttsp4_mt_close(struct input_dev *input) -{ - struct cyttsp4_mt_data *md = input_get_drvdata(input); - mutex_lock(&md->report_lock); - if (!md->is_suspended) - pm_runtime_put(input->dev.parent); - mutex_unlock(&md->report_lock); -} - - -static int cyttsp4_setup_input_device(struct cyttsp4 *cd) -{ - struct device *dev = cd->dev; - struct cyttsp4_mt_data *md = &cd->md; - int signal = CY_IGNORE_VALUE; - int max_x, max_y, max_p, min, max; - int max_x_tmp, max_y_tmp; - int i; - int rc; - - dev_vdbg(dev, "%s: Initialize event signals\n", __func__); - __set_bit(EV_ABS, md->input->evbit); - __set_bit(EV_REL, md->input->evbit); - __set_bit(EV_KEY, md->input->evbit); - - max_x_tmp = md->si->si_ofs.max_x; - max_y_tmp = md->si->si_ofs.max_y; - - /* get maximum values from the sysinfo data */ - if (md->pdata->flags & CY_FLAG_FLIP) { - max_x = max_y_tmp - 1; - max_y = max_x_tmp - 1; - } else { - max_x = max_x_tmp - 1; - max_y = max_y_tmp - 1; - } - max_p = md->si->si_ofs.max_p; - - /* set event signal capabilities */ - for (i = 0; i < (md->pdata->frmwrk->size / CY_NUM_ABS_SET); i++) { - signal = md->pdata->frmwrk->abs - [(i * CY_NUM_ABS_SET) + CY_SIGNAL_OST]; - if (signal != CY_IGNORE_VALUE) { - __set_bit(signal, md->input->absbit); - min = md->pdata->frmwrk->abs - [(i * CY_NUM_ABS_SET) + CY_MIN_OST]; - max = md->pdata->frmwrk->abs - [(i * CY_NUM_ABS_SET) + CY_MAX_OST]; - if (i == CY_ABS_ID_OST) { - /* shift track ids down to start at 0 */ - max = max - min; - min = min - min; - } else if (i == CY_ABS_X_OST) - max = max_x; - else if (i == CY_ABS_Y_OST) - max = max_y; - else if (i == CY_ABS_P_OST) - max = max_p; - input_set_abs_params(md->input, signal, min, max, - md->pdata->frmwrk->abs - [(i * CY_NUM_ABS_SET) + CY_FUZZ_OST], - md->pdata->frmwrk->abs - [(i * CY_NUM_ABS_SET) + CY_FLAT_OST]); - dev_dbg(dev, "%s: register signal=%02X min=%d max=%d\n", - __func__, signal, min, max); - if ((i == CY_ABS_ID_OST) && - (md->si->si_ofs.tch_rec_size < - CY_TMA4XX_TCH_REC_SIZE)) - break; - } - } - - input_mt_init_slots(md->input, md->si->si_ofs.tch_abs[CY_TCH_T].max, - INPUT_MT_DIRECT); - rc = input_register_device(md->input); - if (rc < 0) - dev_err(dev, "%s: Error, failed register input device r=%d\n", - __func__, rc); - return rc; -} - -static int cyttsp4_mt_probe(struct cyttsp4 *cd) -{ - struct device *dev = cd->dev; - struct cyttsp4_mt_data *md = &cd->md; - struct cyttsp4_mt_platform_data *pdata = cd->pdata->mt_pdata; - int rc = 0; - - mutex_init(&md->report_lock); - md->pdata = pdata; - /* Create the input device and register it. */ - dev_vdbg(dev, "%s: Create the input device and register it\n", - __func__); - md->input = input_allocate_device(); - if (md->input == NULL) { - dev_err(dev, "%s: Error, failed to allocate input device\n", - __func__); - rc = -ENOSYS; - goto error_alloc_failed; - } - - md->input->name = pdata->inp_dev_name; - scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev)); - md->input->phys = md->phys; - md->input->id.bustype = cd->bus_ops->bustype; - md->input->dev.parent = dev; - md->input->open = cyttsp4_mt_open; - md->input->close = cyttsp4_mt_close; - input_set_drvdata(md->input, md); - - /* get sysinfo */ - md->si = &cd->sysinfo; - - rc = cyttsp4_setup_input_device(cd); - if (rc) - goto error_init_input; - - return 0; - -error_init_input: - input_free_device(md->input); -error_alloc_failed: - dev_err(dev, "%s failed.\n", __func__); - return rc; -} - -struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops, - struct device *dev, u16 irq, size_t xfer_buf_size) -{ - struct cyttsp4 *cd; - struct cyttsp4_platform_data *pdata = dev_get_platdata(dev); - unsigned long irq_flags; - int rc = 0; - - if (!pdata || !pdata->core_pdata || !pdata->mt_pdata) { - dev_err(dev, "%s: Missing platform data\n", __func__); - rc = -ENODEV; - goto error_no_pdata; - } - - cd = kzalloc(sizeof(*cd), GFP_KERNEL); - if (!cd) { - dev_err(dev, "%s: Error, kzalloc\n", __func__); - rc = -ENOMEM; - goto error_alloc_data; - } - - cd->xfer_buf = kzalloc(xfer_buf_size, GFP_KERNEL); - if (!cd->xfer_buf) { - dev_err(dev, "%s: Error, kzalloc\n", __func__); - rc = -ENOMEM; - goto error_free_cd; - } - - /* Initialize device info */ - cd->dev = dev; - cd->pdata = pdata; - cd->cpdata = pdata->core_pdata; - cd->bus_ops = ops; - - /* Initialize mutexes and spinlocks */ - mutex_init(&cd->system_lock); - mutex_init(&cd->adap_lock); - - /* Initialize wait queue */ - init_waitqueue_head(&cd->wait_q); - - /* Initialize works */ - INIT_WORK(&cd->startup_work, cyttsp4_startup_work_function); - INIT_WORK(&cd->watchdog_work, cyttsp4_watchdog_work); - - /* Initialize IRQ */ - cd->irq = gpio_to_irq(cd->cpdata->irq_gpio); - if (cd->irq < 0) { - rc = -EINVAL; - goto error_free_xfer; - } - - dev_set_drvdata(dev, cd); - - /* Call platform init function */ - if (cd->cpdata->init) { - dev_dbg(cd->dev, "%s: Init HW\n", __func__); - rc = cd->cpdata->init(cd->cpdata, 1, cd->dev); - } else { - dev_dbg(cd->dev, "%s: No HW INIT function\n", __func__); - rc = 0; - } - if (rc < 0) - dev_err(cd->dev, "%s: HW Init fail r=%d\n", __func__, rc); - - dev_dbg(dev, "%s: initialize threaded irq=%d\n", __func__, cd->irq); - if (cd->cpdata->level_irq_udelay > 0) - /* use level triggered interrupts */ - irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT; - else - /* use edge triggered interrupts */ - irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; - - rc = request_threaded_irq(cd->irq, NULL, cyttsp4_irq, irq_flags, - dev_name(dev), cd); - if (rc < 0) { - dev_err(dev, "%s: Error, could not request irq\n", __func__); - goto error_request_irq; - } - - /* Setup watchdog timer */ - timer_setup(&cd->watchdog_timer, cyttsp4_watchdog_timer, 0); - - /* - * call startup directly to ensure that the device - * is tested before leaving the probe - */ - rc = cyttsp4_startup(cd); - - /* Do not fail probe if startup fails but the device is detected */ - if (rc < 0 && cd->mode == CY_MODE_UNKNOWN) { - dev_err(cd->dev, "%s: Fail initial startup r=%d\n", - __func__, rc); - goto error_startup; - } - - rc = cyttsp4_mt_probe(cd); - if (rc < 0) { - dev_err(dev, "%s: Error, fail mt probe\n", __func__); - goto error_startup; - } - - pm_runtime_enable(dev); - - return cd; - -error_startup: - cancel_work_sync(&cd->startup_work); - cyttsp4_stop_wd_timer(cd); - pm_runtime_disable(dev); - cyttsp4_free_si_ptrs(cd); - free_irq(cd->irq, cd); -error_request_irq: - if (cd->cpdata->init) - cd->cpdata->init(cd->cpdata, 0, dev); -error_free_xfer: - kfree(cd->xfer_buf); -error_free_cd: - kfree(cd); -error_alloc_data: -error_no_pdata: - dev_err(dev, "%s failed.\n", __func__); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(cyttsp4_probe); - -static void cyttsp4_mt_release(struct cyttsp4_mt_data *md) -{ - input_unregister_device(md->input); - input_set_drvdata(md->input, NULL); -} - -int cyttsp4_remove(struct cyttsp4 *cd) -{ - struct device *dev = cd->dev; - - cyttsp4_mt_release(&cd->md); - - /* - * Suspend the device before freeing the startup_work and stopping - * the watchdog since sleep function restarts watchdog on failure - */ - pm_runtime_suspend(dev); - pm_runtime_disable(dev); - - cancel_work_sync(&cd->startup_work); - - cyttsp4_stop_wd_timer(cd); - - free_irq(cd->irq, cd); - if (cd->cpdata->init) - cd->cpdata->init(cd->cpdata, 0, dev); - cyttsp4_free_si_ptrs(cd); - kfree(cd); - return 0; -} -EXPORT_SYMBOL_GPL(cyttsp4_remove); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen core driver"); -MODULE_AUTHOR("Cypress"); diff --git a/drivers/input/touchscreen/cyttsp4_core.h b/drivers/input/touchscreen/cyttsp4_core.h deleted file mode 100644 index 6262f6e4507519..00000000000000 --- a/drivers/input/touchscreen/cyttsp4_core.h +++ /dev/null @@ -1,448 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * cyttsp4_core.h - * Cypress TrueTouch(TM) Standard Product V4 Core driver module. - * For use with Cypress Txx4xx parts. - * Supported parts include: - * TMA4XX - * TMA1036 - * - * Copyright (C) 2012 Cypress Semiconductor - * - * Contact Cypress Semiconductor at www.cypress.com - */ - -#ifndef _LINUX_CYTTSP4_CORE_H -#define _LINUX_CYTTSP4_CORE_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CY_REG_BASE 0x00 - -#define CY_POST_CODEL_WDG_RST 0x01 -#define CY_POST_CODEL_CFG_DATA_CRC_FAIL 0x02 -#define CY_POST_CODEL_PANEL_TEST_FAIL 0x04 - -#define CY_NUM_BTN_PER_REG 4 - -/* touch record system information offset masks and shifts */ -#define CY_BYTE_OFS_MASK 0x1F -#define CY_BOFS_MASK 0xE0 -#define CY_BOFS_SHIFT 5 - -#define CY_TMA1036_TCH_REC_SIZE 6 -#define CY_TMA4XX_TCH_REC_SIZE 9 -#define CY_TMA1036_MAX_TCH 0x0E -#define CY_TMA4XX_MAX_TCH 0x1E - -#define CY_NORMAL_ORIGIN 0 /* upper, left corner */ -#define CY_INVERT_ORIGIN 1 /* lower, right corner */ - -/* helpers */ -#define GET_NUM_TOUCHES(x) ((x) & 0x1F) -#define IS_LARGE_AREA(x) ((x) & 0x20) -#define IS_BAD_PKT(x) ((x) & 0x20) -#define IS_BOOTLOADER(hst_mode, reset_detect) \ - ((hst_mode) & 0x01 || (reset_detect) != 0) -#define IS_TMO(t) ((t) == 0) - - -enum cyttsp_cmd_bits { - CY_CMD_COMPLETE = (1 << 6), -}; - -/* Timeout in ms. */ -#define CY_WATCHDOG_TIMEOUT 1000 - -#define CY_MAX_PRINT_SIZE 512 -#ifdef VERBOSE_DEBUG -#define CY_MAX_PRBUF_SIZE PIPE_BUF -#define CY_PR_TRUNCATED " truncated..." -#endif - -enum cyttsp4_ic_grpnum { - CY_IC_GRPNUM_RESERVED, - CY_IC_GRPNUM_CMD_REGS, - CY_IC_GRPNUM_TCH_REP, - CY_IC_GRPNUM_DATA_REC, - CY_IC_GRPNUM_TEST_REC, - CY_IC_GRPNUM_PCFG_REC, - CY_IC_GRPNUM_TCH_PARM_VAL, - CY_IC_GRPNUM_TCH_PARM_SIZE, - CY_IC_GRPNUM_RESERVED1, - CY_IC_GRPNUM_RESERVED2, - CY_IC_GRPNUM_OPCFG_REC, - CY_IC_GRPNUM_DDATA_REC, - CY_IC_GRPNUM_MDATA_REC, - CY_IC_GRPNUM_TEST_REGS, - CY_IC_GRPNUM_BTN_KEYS, - CY_IC_GRPNUM_TTHE_REGS, - CY_IC_GRPNUM_NUM -}; - -enum cyttsp4_int_state { - CY_INT_NONE, - CY_INT_IGNORE = (1 << 0), - CY_INT_MODE_CHANGE = (1 << 1), - CY_INT_EXEC_CMD = (1 << 2), - CY_INT_AWAKE = (1 << 3), -}; - -enum cyttsp4_mode { - CY_MODE_UNKNOWN, - CY_MODE_BOOTLOADER = (1 << 1), - CY_MODE_OPERATIONAL = (1 << 2), - CY_MODE_SYSINFO = (1 << 3), - CY_MODE_CAT = (1 << 4), - CY_MODE_STARTUP = (1 << 5), - CY_MODE_LOADER = (1 << 6), - CY_MODE_CHANGE_MODE = (1 << 7), - CY_MODE_CHANGED = (1 << 8), - CY_MODE_CMD_COMPLETE = (1 << 9), -}; - -enum cyttsp4_sleep_state { - SS_SLEEP_OFF, - SS_SLEEP_ON, - SS_SLEEPING, - SS_WAKING, -}; - -enum cyttsp4_startup_state { - STARTUP_NONE, - STARTUP_QUEUED, - STARTUP_RUNNING, -}; - -#define CY_NUM_REVCTRL 8 -struct cyttsp4_cydata { - u8 ttpidh; - u8 ttpidl; - u8 fw_ver_major; - u8 fw_ver_minor; - u8 revctrl[CY_NUM_REVCTRL]; - u8 blver_major; - u8 blver_minor; - u8 jtag_si_id3; - u8 jtag_si_id2; - u8 jtag_si_id1; - u8 jtag_si_id0; - u8 mfgid_sz; - u8 cyito_idh; - u8 cyito_idl; - u8 cyito_verh; - u8 cyito_verl; - u8 ttsp_ver_major; - u8 ttsp_ver_minor; - u8 device_info; - u8 mfg_id[]; -} __packed; - -struct cyttsp4_test { - u8 post_codeh; - u8 post_codel; -} __packed; - -struct cyttsp4_pcfg { - u8 electrodes_x; - u8 electrodes_y; - u8 len_xh; - u8 len_xl; - u8 len_yh; - u8 len_yl; - u8 res_xh; - u8 res_xl; - u8 res_yh; - u8 res_yl; - u8 max_zh; - u8 max_zl; - u8 panel_info0; -} __packed; - -struct cyttsp4_tch_rec_params { - u8 loc; - u8 size; -} __packed; - -#define CY_NUM_TCH_FIELDS 7 -#define CY_NUM_EXT_TCH_FIELDS 3 -struct cyttsp4_opcfg { - u8 cmd_ofs; - u8 rep_ofs; - u8 rep_szh; - u8 rep_szl; - u8 num_btns; - u8 tt_stat_ofs; - u8 obj_cfg0; - u8 max_tchs; - u8 tch_rec_size; - struct cyttsp4_tch_rec_params tch_rec_old[CY_NUM_TCH_FIELDS]; - u8 btn_rec_size; /* btn record size (in bytes) */ - u8 btn_diff_ofs; /* btn data loc, diff counts */ - u8 btn_diff_size; /* btn size of diff counts (in bits) */ - struct cyttsp4_tch_rec_params tch_rec_new[CY_NUM_EXT_TCH_FIELDS]; -} __packed; - -struct cyttsp4_sysinfo_ptr { - struct cyttsp4_cydata *cydata; - struct cyttsp4_test *test; - struct cyttsp4_pcfg *pcfg; - struct cyttsp4_opcfg *opcfg; - struct cyttsp4_ddata *ddata; - struct cyttsp4_mdata *mdata; -} __packed; - -struct cyttsp4_sysinfo_data { - u8 hst_mode; - u8 reserved; - u8 map_szh; - u8 map_szl; - u8 cydata_ofsh; - u8 cydata_ofsl; - u8 test_ofsh; - u8 test_ofsl; - u8 pcfg_ofsh; - u8 pcfg_ofsl; - u8 opcfg_ofsh; - u8 opcfg_ofsl; - u8 ddata_ofsh; - u8 ddata_ofsl; - u8 mdata_ofsh; - u8 mdata_ofsl; -} __packed; - -enum cyttsp4_tch_abs { /* for ordering within the extracted touch data array */ - CY_TCH_X, /* X */ - CY_TCH_Y, /* Y */ - CY_TCH_P, /* P (Z) */ - CY_TCH_T, /* TOUCH ID */ - CY_TCH_E, /* EVENT ID */ - CY_TCH_O, /* OBJECT ID */ - CY_TCH_W, /* SIZE */ - CY_TCH_MAJ, /* TOUCH_MAJOR */ - CY_TCH_MIN, /* TOUCH_MINOR */ - CY_TCH_OR, /* ORIENTATION */ - CY_TCH_NUM_ABS -}; - -struct cyttsp4_touch { - int abs[CY_TCH_NUM_ABS]; -}; - -struct cyttsp4_tch_abs_params { - size_t ofs; /* abs byte offset */ - size_t size; /* size in bits */ - size_t max; /* max value */ - size_t bofs; /* bit offset */ -}; - -struct cyttsp4_sysinfo_ofs { - size_t chip_type; - size_t cmd_ofs; - size_t rep_ofs; - size_t rep_sz; - size_t num_btns; - size_t num_btn_regs; /* ceil(num_btns/4) */ - size_t tt_stat_ofs; - size_t tch_rec_size; - size_t obj_cfg0; - size_t max_tchs; - size_t mode_size; - size_t data_size; - size_t map_sz; - size_t max_x; - size_t x_origin; /* left or right corner */ - size_t max_y; - size_t y_origin; /* upper or lower corner */ - size_t max_p; - size_t cydata_ofs; - size_t test_ofs; - size_t pcfg_ofs; - size_t opcfg_ofs; - size_t ddata_ofs; - size_t mdata_ofs; - size_t cydata_size; - size_t test_size; - size_t pcfg_size; - size_t opcfg_size; - size_t ddata_size; - size_t mdata_size; - size_t btn_keys_size; - struct cyttsp4_tch_abs_params tch_abs[CY_TCH_NUM_ABS]; - size_t btn_rec_size; /* btn record size (in bytes) */ - size_t btn_diff_ofs;/* btn data loc ,diff counts, (Op-Mode byte ofs) */ - size_t btn_diff_size;/* btn size of diff counts (in bits) */ -}; - -enum cyttsp4_btn_state { - CY_BTN_RELEASED, - CY_BTN_PRESSED, - CY_BTN_NUM_STATE -}; - -struct cyttsp4_btn { - bool enabled; - int state; /* CY_BTN_PRESSED, CY_BTN_RELEASED */ - int key_code; -}; - -struct cyttsp4_sysinfo { - bool ready; - struct cyttsp4_sysinfo_data si_data; - struct cyttsp4_sysinfo_ptr si_ptrs; - struct cyttsp4_sysinfo_ofs si_ofs; - struct cyttsp4_btn *btn; /* button states */ - u8 *btn_rec_data; /* button diff count data */ - u8 *xy_mode; /* operational mode and status regs */ - u8 *xy_data; /* operational touch regs */ -}; - -struct cyttsp4_mt_data { - struct cyttsp4_mt_platform_data *pdata; - struct cyttsp4_sysinfo *si; - struct input_dev *input; - struct mutex report_lock; - bool is_suspended; - char phys[NAME_MAX]; - int num_prv_tch; -}; - -struct cyttsp4 { - struct device *dev; - struct mutex system_lock; - struct mutex adap_lock; - enum cyttsp4_mode mode; - enum cyttsp4_sleep_state sleep_state; - enum cyttsp4_startup_state startup_state; - int int_status; - wait_queue_head_t wait_q; - int irq; - struct work_struct startup_work; - struct work_struct watchdog_work; - struct timer_list watchdog_timer; - struct cyttsp4_sysinfo sysinfo; - void *exclusive_dev; - int exclusive_waits; - atomic_t ignore_irq; - bool invalid_touch_app; - struct cyttsp4_mt_data md; - struct cyttsp4_platform_data *pdata; - struct cyttsp4_core_platform_data *cpdata; - const struct cyttsp4_bus_ops *bus_ops; - u8 *xfer_buf; -#ifdef VERBOSE_DEBUG - u8 pr_buf[CY_MAX_PRBUF_SIZE]; -#endif -}; - -struct cyttsp4_bus_ops { - u16 bustype; - int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, - const void *values); - int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length, - void *values); -}; - -enum cyttsp4_hst_mode_bits { - CY_HST_TOGGLE = (1 << 7), - CY_HST_MODE_CHANGE = (1 << 3), - CY_HST_MODE = (7 << 4), - CY_HST_OPERATE = (0 << 4), - CY_HST_SYSINFO = (1 << 4), - CY_HST_CAT = (2 << 4), - CY_HST_LOWPOW = (1 << 2), - CY_HST_SLEEP = (1 << 1), - CY_HST_RESET = (1 << 0), -}; - -/* abs settings */ -#define CY_IGNORE_VALUE 0xFFFF - -/* abs signal capabilities offsets in the frameworks array */ -enum cyttsp4_sig_caps { - CY_SIGNAL_OST, - CY_MIN_OST, - CY_MAX_OST, - CY_FUZZ_OST, - CY_FLAT_OST, - CY_NUM_ABS_SET /* number of signal capability fields */ -}; - -/* abs axis signal offsets in the framworks array */ -enum cyttsp4_sig_ost { - CY_ABS_X_OST, - CY_ABS_Y_OST, - CY_ABS_P_OST, - CY_ABS_W_OST, - CY_ABS_ID_OST, - CY_ABS_MAJ_OST, - CY_ABS_MIN_OST, - CY_ABS_OR_OST, - CY_NUM_ABS_OST /* number of abs signals */ -}; - -enum cyttsp4_flags { - CY_FLAG_NONE = 0x00, - CY_FLAG_HOVER = 0x04, - CY_FLAG_FLIP = 0x08, - CY_FLAG_INV_X = 0x10, - CY_FLAG_INV_Y = 0x20, - CY_FLAG_VKEYS = 0x40, -}; - -enum cyttsp4_object_id { - CY_OBJ_STANDARD_FINGER, - CY_OBJ_LARGE_OBJECT, - CY_OBJ_STYLUS, - CY_OBJ_HOVER, -}; - -enum cyttsp4_event_id { - CY_EV_NO_EVENT, - CY_EV_TOUCHDOWN, - CY_EV_MOVE, /* significant displacement (> act dist) */ - CY_EV_LIFTOFF, /* record reports last position */ -}; - -/* x-axis resolution of panel in pixels */ -#define CY_PCFG_RESOLUTION_X_MASK 0x7F - -/* y-axis resolution of panel in pixels */ -#define CY_PCFG_RESOLUTION_Y_MASK 0x7F - -/* x-axis, 0:origin is on left side of panel, 1: right */ -#define CY_PCFG_ORIGIN_X_MASK 0x80 - -/* y-axis, 0:origin is on top side of panel, 1: bottom */ -#define CY_PCFG_ORIGIN_Y_MASK 0x80 - -static inline int cyttsp4_adap_read(struct cyttsp4 *ts, u16 addr, int size, - void *buf) -{ - return ts->bus_ops->read(ts->dev, ts->xfer_buf, addr, size, buf); -} - -static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u16 addr, int size, - const void *buf) -{ - return ts->bus_ops->write(ts->dev, ts->xfer_buf, addr, size, buf); -} - -extern struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops, - struct device *dev, u16 irq, size_t xfer_buf_size); -extern int cyttsp4_remove(struct cyttsp4 *ts); -int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr, - u8 length, const void *values); -int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr, - u8 length, void *values); -extern const struct dev_pm_ops cyttsp4_pm_ops; - -#endif /* _LINUX_CYTTSP4_CORE_H */ diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c deleted file mode 100644 index da32c151def500..00000000000000 --- a/drivers/input/touchscreen/cyttsp4_i2c.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cyttsp_i2c.c - * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver. - * For use with Cypress Txx4xx parts. - * Supported parts include: - * TMA4XX - * TMA1036 - * - * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. - * Copyright (C) 2012 Javier Martinez Canillas - * Copyright (C) 2013 Cypress Semiconductor - * - * Contact Cypress Semiconductor at www.cypress.com - */ - -#include "cyttsp4_core.h" - -#include -#include - -#define CYTTSP4_I2C_DATA_SIZE (3 * 256) - -static const struct cyttsp4_bus_ops cyttsp4_i2c_bus_ops = { - .bustype = BUS_I2C, - .write = cyttsp_i2c_write_block_data, - .read = cyttsp_i2c_read_block_data, -}; - -static int cyttsp4_i2c_probe(struct i2c_client *client) -{ - struct cyttsp4 *ts; - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "I2C functionality not Supported\n"); - return -EIO; - } - - ts = cyttsp4_probe(&cyttsp4_i2c_bus_ops, &client->dev, client->irq, - CYTTSP4_I2C_DATA_SIZE); - - return PTR_ERR_OR_ZERO(ts); -} - -static void cyttsp4_i2c_remove(struct i2c_client *client) -{ - struct cyttsp4 *ts = i2c_get_clientdata(client); - - cyttsp4_remove(ts); -} - -static const struct i2c_device_id cyttsp4_i2c_id[] = { - { CYTTSP4_I2C_NAME }, - { } -}; -MODULE_DEVICE_TABLE(i2c, cyttsp4_i2c_id); - -static struct i2c_driver cyttsp4_i2c_driver = { - .driver = { - .name = CYTTSP4_I2C_NAME, - .pm = pm_ptr(&cyttsp4_pm_ops), - }, - .probe = cyttsp4_i2c_probe, - .remove = cyttsp4_i2c_remove, - .id_table = cyttsp4_i2c_id, -}; - -module_i2c_driver(cyttsp4_i2c_driver); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver"); -MODULE_AUTHOR("Cypress"); diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c deleted file mode 100644 index 944fbbe9113e13..00000000000000 --- a/drivers/input/touchscreen/cyttsp4_spi.c +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Source for: - * Cypress TrueTouch(TM) Standard Product (TTSP) SPI touchscreen driver. - * For use with Cypress Txx4xx parts. - * Supported parts include: - * TMA4XX - * TMA1036 - * - * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. - * Copyright (C) 2012 Javier Martinez Canillas - * Copyright (C) 2013 Cypress Semiconductor - * - * Contact Cypress Semiconductor at www.cypress.com - */ - -#include "cyttsp4_core.h" - -#include -#include -#include - -#define CY_SPI_WR_OP 0x00 /* r/~w */ -#define CY_SPI_RD_OP 0x01 -#define CY_SPI_BITS_PER_WORD 8 -#define CY_SPI_A8_BIT 0x02 -#define CY_SPI_WR_HEADER_BYTES 2 -#define CY_SPI_RD_HEADER_BYTES 1 -#define CY_SPI_CMD_BYTES 2 -#define CY_SPI_SYNC_BYTE 0 -#define CY_SPI_SYNC_ACK 0x62 /* from TRM *A protocol */ -#define CY_SPI_DATA_SIZE (2 * 256) - -#define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE) - -static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf, - u8 op, u16 reg, u8 *buf, int length) -{ - struct spi_device *spi = to_spi_device(dev); - struct spi_message msg; - struct spi_transfer xfer[2]; - u8 *wr_buf = &xfer_buf[0]; - u8 rd_buf[CY_SPI_CMD_BYTES]; - int retval; - int i; - - if (length > CY_SPI_DATA_SIZE) { - dev_err(dev, "%s: length %d is too big.\n", - __func__, length); - return -EINVAL; - } - - memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE); - memset(rd_buf, 0, CY_SPI_CMD_BYTES); - - wr_buf[0] = op + (((reg >> 8) & 0x1) ? CY_SPI_A8_BIT : 0); - if (op == CY_SPI_WR_OP) { - wr_buf[1] = reg & 0xFF; - if (length > 0) - memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length); - } - - memset(xfer, 0, sizeof(xfer)); - spi_message_init(&msg); - - /* - We set both TX and RX buffers because Cypress TTSP - requires full duplex operation. - */ - xfer[0].tx_buf = wr_buf; - xfer[0].rx_buf = rd_buf; - switch (op) { - case CY_SPI_WR_OP: - xfer[0].len = length + CY_SPI_CMD_BYTES; - spi_message_add_tail(&xfer[0], &msg); - break; - - case CY_SPI_RD_OP: - xfer[0].len = CY_SPI_RD_HEADER_BYTES; - spi_message_add_tail(&xfer[0], &msg); - - xfer[1].rx_buf = buf; - xfer[1].len = length; - spi_message_add_tail(&xfer[1], &msg); - break; - - default: - dev_err(dev, "%s: bad operation code=%d\n", __func__, op); - return -EINVAL; - } - - retval = spi_sync(spi, &msg); - if (retval < 0) { - dev_dbg(dev, "%s: spi_sync() error %d, len=%d, op=%d\n", - __func__, retval, xfer[1].len, op); - - /* - * do not return here since was a bad ACK sequence - * let the following ACK check handle any errors and - * allow silent retries - */ - } - - if (rd_buf[CY_SPI_SYNC_BYTE] != CY_SPI_SYNC_ACK) { - dev_dbg(dev, "%s: operation %d failed\n", __func__, op); - - for (i = 0; i < CY_SPI_CMD_BYTES; i++) - dev_dbg(dev, "%s: test rd_buf[%d]:0x%02x\n", - __func__, i, rd_buf[i]); - for (i = 0; i < length; i++) - dev_dbg(dev, "%s: test buf[%d]:0x%02x\n", - __func__, i, buf[i]); - - return -EIO; - } - - return 0; -} - -static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf, - u16 addr, u8 length, void *data) -{ - int rc; - - rc = cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, NULL, 0); - if (rc) - return rc; - else - return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_RD_OP, addr, data, - length); -} - -static int cyttsp_spi_write_block_data(struct device *dev, u8 *xfer_buf, - u16 addr, u8 length, const void *data) -{ - return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, (void *)data, - length); -} - -static const struct cyttsp4_bus_ops cyttsp_spi_bus_ops = { - .bustype = BUS_SPI, - .write = cyttsp_spi_write_block_data, - .read = cyttsp_spi_read_block_data, -}; - -static int cyttsp4_spi_probe(struct spi_device *spi) -{ - struct cyttsp4 *ts; - int error; - - /* Set up SPI*/ - spi->bits_per_word = CY_SPI_BITS_PER_WORD; - spi->mode = SPI_MODE_0; - error = spi_setup(spi); - if (error < 0) { - dev_err(&spi->dev, "%s: SPI setup error %d\n", - __func__, error); - return error; - } - - ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq, - CY_SPI_DATA_BUF_SIZE); - - return PTR_ERR_OR_ZERO(ts); -} - -static void cyttsp4_spi_remove(struct spi_device *spi) -{ - struct cyttsp4 *ts = spi_get_drvdata(spi); - cyttsp4_remove(ts); -} - -static struct spi_driver cyttsp4_spi_driver = { - .driver = { - .name = CYTTSP4_SPI_NAME, - .pm = pm_ptr(&cyttsp4_pm_ops), - }, - .probe = cyttsp4_spi_probe, - .remove = cyttsp4_spi_remove, -}; - -module_spi_driver(cyttsp4_spi_driver); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver"); -MODULE_AUTHOR("Cypress"); -MODULE_ALIAS("spi:cyttsp4"); diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c index 3ca246ab192e40..eafe5a9b896484 100644 --- a/drivers/input/touchscreen/cyttsp5.c +++ b/drivers/input/touchscreen/cyttsp5.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #define CYTTSP5_NAME "cyttsp5" #define CY_I2C_DATA_SIZE (2 * 256) diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c index 132ed5786e8437..b8ce6012364cfe 100644 --- a/drivers/input/touchscreen/cyttsp_core.c +++ b/drivers/input/touchscreen/cyttsp_core.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -615,17 +614,14 @@ static int cyttsp_parse_properties(struct cyttsp *ts) return 0; } -static void cyttsp_disable_regulators(void *_ts) -{ - struct cyttsp *ts = _ts; - - regulator_bulk_disable(ARRAY_SIZE(ts->regulators), - ts->regulators); -} - struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, struct device *dev, int irq, size_t xfer_buf_size) { + /* + * VCPIN is the analog voltage supply + * VDD is the digital voltage supply + */ + static const char * const supplies[] = { "vcpin", "vdd" }; struct cyttsp *ts; struct input_dev *input_dev; int error; @@ -643,29 +639,10 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, ts->bus_ops = bus_ops; ts->irq = irq; - /* - * VCPIN is the analog voltage supply - * VDD is the digital voltage supply - */ - ts->regulators[0].supply = "vcpin"; - ts->regulators[1].supply = "vdd"; - error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators), - ts->regulators); - if (error) { - dev_err(dev, "Failed to get regulators: %d\n", error); - return ERR_PTR(error); - } - - error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators), - ts->regulators); - if (error) { - dev_err(dev, "Cannot enable regulators: %d\n", error); - return ERR_PTR(error); - } - - error = devm_add_action_or_reset(dev, cyttsp_disable_regulators, ts); + error = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), + supplies); if (error) { - dev_err(dev, "failed to install chip disable handler\n"); + dev_err(dev, "Failed to enable regulators: %d\n", error); return ERR_PTR(error); } diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h index 075509e695a276..40a605d2028500 100644 --- a/drivers/input/touchscreen/cyttsp_core.h +++ b/drivers/input/touchscreen/cyttsp_core.h @@ -122,7 +122,6 @@ struct cyttsp { enum cyttsp_state state; bool suspended; - struct regulator_bulk_data regulators[2]; struct gpio_desc *reset_gpio; bool use_hndshk; u8 act_dist; @@ -137,10 +136,6 @@ struct cyttsp { struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, struct device *dev, int irq, size_t xfer_buf_size); -int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr, - u8 length, const void *values); -int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr, - u8 length, void *values); extern const struct dev_pm_ops cyttsp_pm_ops; #endif /* __CYTTSP_CORE_H__ */ diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c index bf13b3448a6be7..cb15600549cdd6 100644 --- a/drivers/input/touchscreen/cyttsp_i2c.c +++ b/drivers/input/touchscreen/cyttsp_i2c.c @@ -22,6 +22,61 @@ #define CY_I2C_DATA_SIZE 128 +static int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, + u16 addr, u8 length, void *values) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 client_addr = client->addr | ((addr >> 8) & 0x1); + u8 addr_lo = addr & 0xFF; + struct i2c_msg msgs[] = { + { + .addr = client_addr, + .flags = 0, + .len = 1, + .buf = &addr_lo, + }, + { + .addr = client_addr, + .flags = I2C_M_RD, + .len = length, + .buf = values, + }, + }; + int retval; + + retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (retval < 0) + return retval; + + return retval != ARRAY_SIZE(msgs) ? -EIO : 0; +} + +static int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, + u16 addr, u8 length, const void *values) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 client_addr = client->addr | ((addr >> 8) & 0x1); + u8 addr_lo = addr & 0xFF; + struct i2c_msg msgs[] = { + { + .addr = client_addr, + .flags = 0, + .len = length + 1, + .buf = xfer_buf, + }, + }; + int retval; + + xfer_buf[0] = addr_lo; + memcpy(&xfer_buf[1], values, length); + + retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (retval < 0) + return retval; + + return retval != ARRAY_SIZE(msgs) ? -EIO : 0; +} + static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = { .bustype = BUS_I2C, .write = cyttsp_i2c_write_block_data, diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c deleted file mode 100644 index 7e752fb9fad776..00000000000000 --- a/drivers/input/touchscreen/cyttsp_i2c_common.c +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cyttsp_i2c_common.c - * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver. - * For use with Cypress Txx3xx and Txx4xx parts. - * Supported parts include: - * CY8CTST341 - * CY8CTMA340 - * TMA4XX - * TMA1036 - * - * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. - * Copyright (C) 2012 Javier Martinez Canillas - * - * Contact Cypress Semiconductor at www.cypress.com - */ - -#include -#include -#include -#include -#include - -#include "cyttsp4_core.h" - -int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, - u16 addr, u8 length, void *values) -{ - struct i2c_client *client = to_i2c_client(dev); - u8 client_addr = client->addr | ((addr >> 8) & 0x1); - u8 addr_lo = addr & 0xFF; - struct i2c_msg msgs[] = { - { - .addr = client_addr, - .flags = 0, - .len = 1, - .buf = &addr_lo, - }, - { - .addr = client_addr, - .flags = I2C_M_RD, - .len = length, - .buf = values, - }, - }; - int retval; - - retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (retval < 0) - return retval; - - return retval != ARRAY_SIZE(msgs) ? -EIO : 0; -} -EXPORT_SYMBOL_GPL(cyttsp_i2c_read_block_data); - -int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, - u16 addr, u8 length, const void *values) -{ - struct i2c_client *client = to_i2c_client(dev); - u8 client_addr = client->addr | ((addr >> 8) & 0x1); - u8 addr_lo = addr & 0xFF; - struct i2c_msg msgs[] = { - { - .addr = client_addr, - .flags = 0, - .len = length + 1, - .buf = xfer_buf, - }, - }; - int retval; - - xfer_buf[0] = addr_lo; - memcpy(&xfer_buf[1], values, length); - - retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (retval < 0) - return retval; - - return retval != ARRAY_SIZE(msgs) ? -EIO : 0; -} -EXPORT_SYMBOL_GPL(cyttsp_i2c_write_block_data); - - -MODULE_DESCRIPTION("Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cypress"); diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index e70415f189a556..fda49b2fe08852 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -32,7 +32,7 @@ #include #include -#include +#include #define WORK_REGISTER_THRESHOLD 0x00 #define WORK_REGISTER_REPORT_RATE 0x08 diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c index 48c69788b84af6..87eb18977b71a0 100644 --- a/drivers/input/touchscreen/eeti_ts.c +++ b/drivers/input/touchscreen/eeti_ts.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include struct eeti_ts { struct i2c_client *client; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 365765d40e6278..3fd170f75b4a65 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include /* Device, Driver information */ #define DEVICE_NAME "elants_i2c" diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c index 2e77cfb63f32a1..fdda8412b16443 100644 --- a/drivers/input/touchscreen/exc3000.c +++ b/drivers/input/touchscreen/exc3000.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define EXC3000_NUM_SLOTS 10 #define EXC3000_SLOTS_PER_FRAME 5 diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 435714f18c23a6..a3e8a51c914495 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include "goodix.h" #define GOODIX_GPIO_INT_NAME "irq" diff --git a/drivers/input/touchscreen/goodix_berlin.h b/drivers/input/touchscreen/goodix_berlin.h index 1fd77eb69c9a6c..38b6f9ddbdefce 100644 --- a/drivers/input/touchscreen/goodix_berlin.h +++ b/drivers/input/touchscreen/goodix_berlin.h @@ -20,5 +20,6 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id, struct regmap *regmap); extern const struct dev_pm_ops goodix_berlin_pm_ops; +extern const struct attribute_group *goodix_berlin_groups[]; #endif diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c index e7b41a926ef885..3fc03cf0ca23fd 100644 --- a/drivers/input/touchscreen/goodix_berlin_core.c +++ b/drivers/input/touchscreen/goodix_berlin_core.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include "goodix_berlin.h" @@ -672,6 +672,49 @@ static void goodix_berlin_power_off_act(void *data) goodix_berlin_power_off(cd); } +static ssize_t registers_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct goodix_berlin_core *cd = dev_get_drvdata(dev); + int error; + + error = regmap_raw_read(cd->regmap, off, buf, count); + + return error ? error : count; +} + +static ssize_t registers_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct goodix_berlin_core *cd = dev_get_drvdata(dev); + int error; + + error = regmap_raw_write(cd->regmap, off, buf, count); + + return error ? error : count; +} + +static BIN_ATTR_ADMIN_RW(registers, 0); + +static struct bin_attribute *goodix_berlin_bin_attrs[] = { + &bin_attr_registers, + NULL, +}; + +static const struct attribute_group goodix_berlin_attr_group = { + .bin_attrs = goodix_berlin_bin_attrs, +}; + +const struct attribute_group *goodix_berlin_groups[] = { + &goodix_berlin_attr_group, + NULL, +}; +EXPORT_SYMBOL_GPL(goodix_berlin_groups); + int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id, struct regmap *regmap) { diff --git a/drivers/input/touchscreen/goodix_berlin_i2c.c b/drivers/input/touchscreen/goodix_berlin_i2c.c index 2e7098078838f5..ad7a60d943388f 100644 --- a/drivers/input/touchscreen/goodix_berlin_i2c.c +++ b/drivers/input/touchscreen/goodix_berlin_i2c.c @@ -64,6 +64,7 @@ static struct i2c_driver goodix_berlin_i2c_driver = { .name = "goodix-berlin-i2c", .of_match_table = goodix_berlin_i2c_of_match, .pm = pm_sleep_ptr(&goodix_berlin_pm_ops), + .dev_groups = goodix_berlin_groups, }, .probe = goodix_berlin_i2c_probe, .id_table = goodix_berlin_i2c_id, diff --git a/drivers/input/touchscreen/goodix_berlin_spi.c b/drivers/input/touchscreen/goodix_berlin_spi.c index 82774a4129563a..0662e87b86929a 100644 --- a/drivers/input/touchscreen/goodix_berlin_spi.c +++ b/drivers/input/touchscreen/goodix_berlin_spi.c @@ -7,7 +7,7 @@ * * Based on goodix_ts_berlin driver. */ -#include +#include #include #include #include @@ -169,6 +169,7 @@ static struct spi_driver goodix_berlin_spi_driver = { .name = "goodix-berlin-spi", .of_match_table = goodix_berlin_spi_of_match, .pm = pm_sleep_ptr(&goodix_berlin_pm_ops), + .dev_groups = goodix_berlin_groups, }, .probe = goodix_berlin_spi_probe, .id_table = goodix_berlin_spi_ids, diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c index 682abbbe5bd633..a73369e15dda0b 100644 --- a/drivers/input/touchscreen/hideep.c +++ b/drivers/input/touchscreen/hideep.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define HIDEEP_TS_NAME "HiDeep Touchscreen" #define HIDEEP_I2C_NAME "hideep_ts" diff --git a/drivers/input/touchscreen/hycon-hy46xx.c b/drivers/input/touchscreen/hycon-hy46xx.c index 2e01d87977c168..b2ff7a45b90853 100644 --- a/drivers/input/touchscreen/hycon-hy46xx.c +++ b/drivers/input/touchscreen/hycon-hy46xx.c @@ -15,7 +15,7 @@ #include #include -#include +#include #define HY46XX_CHKSUM_CODE 0x1 #define HY46XX_FINGER_NUM 0x2 diff --git a/drivers/input/touchscreen/hynitron_cstxxx.c b/drivers/input/touchscreen/hynitron_cstxxx.c index 05946fee4fd440..1d8ca90dcda695 100644 --- a/drivers/input/touchscreen/hynitron_cstxxx.c +++ b/drivers/input/touchscreen/hynitron_cstxxx.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include /* Per chip data */ struct hynitron_ts_chip_data { @@ -470,7 +470,7 @@ static const struct hynitron_ts_chip_data cst3xx_data = { }; static const struct i2c_device_id hyn_tpd_id[] = { - { .name = "hynitron_ts", 0 }, + { .name = "hynitron_ts" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(i2c, hyn_tpd_id); diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 4573844c3395a3..260c83dc23a2e2 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #define ILI2XXX_POLL_PERIOD 15 diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c index 3eb762896345b7..0dd632724a003f 100644 --- a/drivers/input/touchscreen/ilitek_ts_i2c.c +++ b/drivers/input/touchscreen/ilitek_ts_i2c.c @@ -15,12 +15,11 @@ #include #include #include -#include #include #include #include #include -#include +#include #define ILITEK_TS_NAME "ilitek_ts" @@ -37,6 +36,8 @@ #define ILITEK_TP_CMD_GET_MCU_VER 0x61 #define ILITEK_TP_CMD_GET_IC_MODE 0xC0 +#define ILITEK_TP_I2C_REPORT_ID 0x48 + #define REPORT_COUNT_ADDRESS 61 #define ILITEK_SUPPORT_MAX_POINT 40 @@ -160,15 +161,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64); if (error) { dev_err(dev, "get touch info failed, err:%d\n", error); - goto err_sync_frame; + return error; + } + + if (buf[0] != ILITEK_TP_I2C_REPORT_ID) { + dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]); + return -EINVAL; } report_max_point = buf[REPORT_COUNT_ADDRESS]; if (report_max_point > ts->max_tp) { dev_err(dev, "FW report max point:%d > panel info. max:%d\n", report_max_point, ts->max_tp); - error = -EINVAL; - goto err_sync_frame; + return -EINVAL; } count = DIV_ROUND_UP(report_max_point, packet_max_point); @@ -178,7 +183,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) if (error) { dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n", count, error); - goto err_sync_frame; + return error; } } @@ -203,10 +208,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) ilitek_touch_down(ts, id, x, y); } -err_sync_frame: input_mt_sync_frame(input); input_sync(input); - return error; + + return 0; } /* APIs of cmds for ILITEK Touch IC */ diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c index 4d226118f3cc29..4ebd7565ae6e8d 100644 --- a/drivers/input/touchscreen/iqs5xx.c +++ b/drivers/input/touchscreen/iqs5xx.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #define IQS5XX_FW_FILE_LEN 64 #define IQS5XX_NUM_RETRIES 10 diff --git a/drivers/input/touchscreen/iqs7211.c b/drivers/input/touchscreen/iqs7211.c index f0a56cde899e48..c5d447ee6f5373 100644 --- a/drivers/input/touchscreen/iqs7211.c +++ b/drivers/input/touchscreen/iqs7211.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define IQS7211_PROD_NUM 0x00 diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c deleted file mode 100644 index 5aff8dcda0dc34..00000000000000 --- a/drivers/input/touchscreen/mcs5000_ts.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller - * - * Copyright (C) 2009 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim - * - * Based on wm97xx-core.c - */ - -#include -#include -#include -#include -#include -#include -#include - -/* Registers */ -#define MCS5000_TS_STATUS 0x00 -#define STATUS_OFFSET 0 -#define STATUS_NO (0 << STATUS_OFFSET) -#define STATUS_INIT (1 << STATUS_OFFSET) -#define STATUS_SENSING (2 << STATUS_OFFSET) -#define STATUS_COORD (3 << STATUS_OFFSET) -#define STATUS_GESTURE (4 << STATUS_OFFSET) -#define ERROR_OFFSET 4 -#define ERROR_NO (0 << ERROR_OFFSET) -#define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET) -#define ERROR_INT_RESET (2 << ERROR_OFFSET) -#define ERROR_EXT_RESET (3 << ERROR_OFFSET) -#define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET) -#define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET) - -#define MCS5000_TS_OP_MODE 0x01 -#define RESET_OFFSET 0 -#define RESET_NO (0 << RESET_OFFSET) -#define RESET_EXT_SOFT (1 << RESET_OFFSET) -#define OP_MODE_OFFSET 1 -#define OP_MODE_SLEEP (0 << OP_MODE_OFFSET) -#define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET) -#define GESTURE_OFFSET 4 -#define GESTURE_DISABLE (0 << GESTURE_OFFSET) -#define GESTURE_ENABLE (1 << GESTURE_OFFSET) -#define PROXIMITY_OFFSET 5 -#define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET) -#define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET) -#define SCAN_MODE_OFFSET 6 -#define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET) -#define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET) -#define REPORT_RATE_OFFSET 7 -#define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET) -#define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET) - -#define MCS5000_TS_SENS_CTL 0x02 -#define MCS5000_TS_FILTER_CTL 0x03 -#define PRI_FILTER_OFFSET 0 -#define SEC_FILTER_OFFSET 4 - -#define MCS5000_TS_X_SIZE_UPPER 0x08 -#define MCS5000_TS_X_SIZE_LOWER 0x09 -#define MCS5000_TS_Y_SIZE_UPPER 0x0A -#define MCS5000_TS_Y_SIZE_LOWER 0x0B - -#define MCS5000_TS_INPUT_INFO 0x10 -#define INPUT_TYPE_OFFSET 0 -#define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET) -#define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET) -#define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET) -#define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET) -#define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET) -#define GESTURE_CODE_OFFSET 3 -#define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET) - -#define MCS5000_TS_X_POS_UPPER 0x11 -#define MCS5000_TS_X_POS_LOWER 0x12 -#define MCS5000_TS_Y_POS_UPPER 0x13 -#define MCS5000_TS_Y_POS_LOWER 0x14 -#define MCS5000_TS_Z_POS 0x15 -#define MCS5000_TS_WIDTH 0x16 -#define MCS5000_TS_GESTURE_VAL 0x17 -#define MCS5000_TS_MODULE_REV 0x20 -#define MCS5000_TS_FIRMWARE_VER 0x21 - -/* Touchscreen absolute values */ -#define MCS5000_MAX_XC 0x3ff -#define MCS5000_MAX_YC 0x3ff - -enum mcs5000_ts_read_offset { - READ_INPUT_INFO, - READ_X_POS_UPPER, - READ_X_POS_LOWER, - READ_Y_POS_UPPER, - READ_Y_POS_LOWER, - READ_BLOCK_SIZE, -}; - -/* Each client has this additional data */ -struct mcs5000_ts_data { - struct i2c_client *client; - struct input_dev *input_dev; - const struct mcs_platform_data *platform_data; -}; - -static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id) -{ - struct mcs5000_ts_data *data = dev_id; - struct i2c_client *client = data->client; - u8 buffer[READ_BLOCK_SIZE]; - int err; - int x; - int y; - - err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO, - READ_BLOCK_SIZE, buffer); - if (err < 0) { - dev_err(&client->dev, "%s, err[%d]\n", __func__, err); - goto out; - } - - switch (buffer[READ_INPUT_INFO]) { - case INPUT_TYPE_NONTOUCH: - input_report_key(data->input_dev, BTN_TOUCH, 0); - input_sync(data->input_dev); - break; - - case INPUT_TYPE_SINGLE: - x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER]; - y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER]; - - input_report_key(data->input_dev, BTN_TOUCH, 1); - input_report_abs(data->input_dev, ABS_X, x); - input_report_abs(data->input_dev, ABS_Y, y); - input_sync(data->input_dev); - break; - - case INPUT_TYPE_DUAL: - /* TODO */ - break; - - case INPUT_TYPE_PALM: - /* TODO */ - break; - - case INPUT_TYPE_PROXIMITY: - /* TODO */ - break; - - default: - dev_err(&client->dev, "Unknown ts input type %d\n", - buffer[READ_INPUT_INFO]); - break; - } - - out: - return IRQ_HANDLED; -} - -static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data, - const struct mcs_platform_data *platform_data) -{ - struct i2c_client *client = data->client; - - /* Touch reset & sleep mode */ - i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, - RESET_EXT_SOFT | OP_MODE_SLEEP); - - /* Touch size */ - i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER, - platform_data->x_size >> 8); - i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER, - platform_data->x_size & 0xff); - i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER, - platform_data->y_size >> 8); - i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER, - platform_data->y_size & 0xff); - - /* Touch active mode & 80 report rate */ - i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE, - OP_MODE_ACTIVE | REPORT_RATE_80); -} - -static int mcs5000_ts_probe(struct i2c_client *client) -{ - const struct mcs_platform_data *pdata; - struct mcs5000_ts_data *data; - struct input_dev *input_dev; - int error; - - pdata = dev_get_platdata(&client->dev); - if (!pdata) - return -EINVAL; - - data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); - if (!data) { - dev_err(&client->dev, "Failed to allocate memory\n"); - return -ENOMEM; - } - - data->client = client; - - input_dev = devm_input_allocate_device(&client->dev); - if (!input_dev) { - dev_err(&client->dev, "Failed to allocate input device\n"); - return -ENOMEM; - } - - input_dev->name = "MELFAS MCS-5000 Touchscreen"; - input_dev->id.bustype = BUS_I2C; - input_dev->dev.parent = &client->dev; - - __set_bit(EV_ABS, input_dev->evbit); - __set_bit(EV_KEY, input_dev->evbit); - __set_bit(BTN_TOUCH, input_dev->keybit); - input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0); - input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0); - - data->input_dev = input_dev; - - if (pdata->cfg_pin) - pdata->cfg_pin(); - - error = devm_request_threaded_irq(&client->dev, client->irq, - NULL, mcs5000_ts_interrupt, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - "mcs5000_ts", data); - if (error) { - dev_err(&client->dev, "Failed to register interrupt\n"); - return error; - } - - error = input_register_device(data->input_dev); - if (error) { - dev_err(&client->dev, "Failed to register input device\n"); - return error; - } - - mcs5000_ts_phys_init(data, pdata); - i2c_set_clientdata(client, data); - - return 0; -} - -static int mcs5000_ts_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - - /* Touch sleep mode */ - i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP); - - return 0; -} - -static int mcs5000_ts_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct mcs5000_ts_data *data = i2c_get_clientdata(client); - const struct mcs_platform_data *pdata = dev_get_platdata(dev); - - mcs5000_ts_phys_init(data, pdata); - - return 0; -} - -static DEFINE_SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, - mcs5000_ts_suspend, mcs5000_ts_resume); - -static const struct i2c_device_id mcs5000_ts_id[] = { - { "mcs5000_ts" }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id); - -static struct i2c_driver mcs5000_ts_driver = { - .probe = mcs5000_ts_probe, - .driver = { - .name = "mcs5000_ts", - .pm = pm_sleep_ptr(&mcs5000_ts_pm), - }, - .id_table = mcs5000_ts_id, -}; - -module_i2c_driver(mcs5000_ts_driver); - -/* Module information */ -MODULE_AUTHOR("Joonyoung Shim "); -MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller"); -MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index b99a0e3c408435..a6946e3d8376d7 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #define MIP4_DEVICE_NAME "mip4_ts" diff --git a/drivers/input/touchscreen/novatek-nvt-ts.c b/drivers/input/touchscreen/novatek-nvt-ts.c index 1a797e410a3fae..0afee41ac9de0e 100644 --- a/drivers/input/touchscreen/novatek-nvt-ts.c +++ b/drivers/input/touchscreen/novatek-nvt-ts.c @@ -15,7 +15,7 @@ #include #include -#include +#include #define NVT_TS_TOUCH_START 0x00 #define NVT_TS_TOUCH_SIZE 6 diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c index 4ede0687beb096..83bf27085ebc09 100644 --- a/drivers/input/touchscreen/pixcir_i2c_ts.c +++ b/drivers/input/touchscreen/pixcir_i2c_ts.c @@ -5,7 +5,7 @@ * Copyright (C) 2010-2011 Pixcir, Inc. */ -#include +#include #include #include #include diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 92d75057de2d0c..f975b53e88252d 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include /* Slave I2C mode */ #define RM_BOOT_BLDR 0x02 diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c index a529217e748fbb..e1518a75a51b6a 100644 --- a/drivers/input/touchscreen/s6sy761.c +++ b/drivers/input/touchscreen/s6sy761.c @@ -4,7 +4,7 @@ // Copyright (c) 2017 Samsung Electronics Co., Ltd. // Copyright (c) 2017 Andi Shyti -#include +#include #include #include #include diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 6a42b27c459901..5ccc96764742f3 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -24,7 +24,7 @@ #include #include -#include +#include #define SILEAD_TS_NAME "silead_ts" diff --git a/drivers/input/touchscreen/sis_i2c.c b/drivers/input/touchscreen/sis_i2c.c index 2023c6df416f29..a625f2ad809d3b 100644 --- a/drivers/input/touchscreen/sis_i2c.c +++ b/drivers/input/touchscreen/sis_i2c.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #define SIS_I2C_NAME "sis_i2c_ts" diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c index 7efbcd0fde4fc3..6074b7730e8621 100644 --- a/drivers/input/touchscreen/surface3_spi.c +++ b/drivers/input/touchscreen/surface3_spi.c @@ -18,7 +18,7 @@ #include #include -#include +#include #define SURFACE3_PACKET_SIZE 264 diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c index b673098535ad19..787f2caf4f7342 100644 --- a/drivers/input/touchscreen/tsc2004.c +++ b/drivers/input/touchscreen/tsc2004.c @@ -42,11 +42,6 @@ static int tsc2004_probe(struct i2c_client *i2c) tsc2004_cmd); } -static void tsc2004_remove(struct i2c_client *i2c) -{ - tsc200x_remove(&i2c->dev); -} - static const struct i2c_device_id tsc2004_idtable[] = { { "tsc2004" }, { } @@ -70,7 +65,6 @@ static struct i2c_driver tsc2004_driver = { }, .id_table = tsc2004_idtable, .probe = tsc2004_probe, - .remove = tsc2004_remove, }; module_i2c_driver(tsc2004_driver); diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index 1b40ce0ca1b991..6fe8b41b3ecc0a 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c @@ -64,11 +64,6 @@ static int tsc2005_probe(struct spi_device *spi) tsc2005_cmd); } -static void tsc2005_remove(struct spi_device *spi) -{ - tsc200x_remove(&spi->dev); -} - #ifdef CONFIG_OF static const struct of_device_id tsc2005_of_match[] = { { .compatible = "ti,tsc2005" }, @@ -85,7 +80,6 @@ static struct spi_driver tsc2005_driver = { .pm = pm_sleep_ptr(&tsc200x_pm_ops), }, .probe = tsc2005_probe, - .remove = tsc2005_remove, }; module_spi_driver(tsc2005_driver); diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c index a4c0e9db9bb94d..df39dee13e1ca4 100644 --- a/drivers/input/touchscreen/tsc200x-core.c +++ b/drivers/input/touchscreen/tsc200x-core.c @@ -104,11 +104,11 @@ struct tsc200x { bool pen_down; - struct regulator *vio; - struct gpio_desc *reset_gpio; int (*tsc200x_cmd)(struct device *dev, u8 cmd); + int irq; + bool wake_irq_enabled; }; static void tsc200x_update_pen_state(struct tsc200x *ts, @@ -136,7 +136,6 @@ static void tsc200x_update_pen_state(struct tsc200x *ts, static irqreturn_t tsc200x_irq_thread(int irq, void *_ts) { struct tsc200x *ts = _ts; - unsigned long flags; unsigned int pressure; struct tsc200x_data tsdata; int error; @@ -182,13 +181,11 @@ static irqreturn_t tsc200x_irq_thread(int irq, void *_ts) if (unlikely(pressure > MAX_12BIT)) goto out; - spin_lock_irqsave(&ts->lock, flags); - - tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure); - mod_timer(&ts->penup_timer, - jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS)); - - spin_unlock_irqrestore(&ts->lock, flags); + scoped_guard(spinlock_irqsave, &ts->lock) { + tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure); + mod_timer(&ts->penup_timer, + jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS)); + } ts->last_valid_interrupt = jiffies; out: @@ -198,11 +195,9 @@ static irqreturn_t tsc200x_irq_thread(int irq, void *_ts) static void tsc200x_penup_timer(struct timer_list *t) { struct tsc200x *ts = from_timer(ts, t, penup_timer); - unsigned long flags; - spin_lock_irqsave(&ts->lock, flags); + guard(spinlock_irqsave)(&ts->lock); tsc200x_update_pen_state(ts, 0, 0, 0); - spin_unlock_irqrestore(&ts->lock, flags); } static void tsc200x_start_scan(struct tsc200x *ts) @@ -232,12 +227,10 @@ static void __tsc200x_disable(struct tsc200x *ts) { tsc200x_stop_scan(ts); - disable_irq(ts->irq); - del_timer_sync(&ts->penup_timer); + guard(disable_irq)(&ts->irq); + del_timer_sync(&ts->penup_timer); cancel_delayed_work_sync(&ts->esd_work); - - enable_irq(ts->irq); } /* must be called with ts->mutex held */ @@ -253,80 +246,79 @@ static void __tsc200x_enable(struct tsc200x *ts) } } -static ssize_t tsc200x_selftest_show(struct device *dev, - struct device_attribute *attr, - char *buf) +/* + * Test TSC200X communications via temp high register. + */ +static int tsc200x_do_selftest(struct tsc200x *ts) { - struct tsc200x *ts = dev_get_drvdata(dev); - unsigned int temp_high; unsigned int temp_high_orig; unsigned int temp_high_test; - bool success = true; + unsigned int temp_high; int error; - mutex_lock(&ts->mutex); - - /* - * Test TSC200X communications via temp high register. - */ - __tsc200x_disable(ts); - error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high_orig); if (error) { - dev_warn(dev, "selftest failed: read error %d\n", error); - success = false; - goto out; + dev_warn(ts->dev, "selftest failed: read error %d\n", error); + return error; } temp_high_test = (temp_high_orig - 1) & MAX_12BIT; error = regmap_write(ts->regmap, TSC200X_REG_TEMP_HIGH, temp_high_test); if (error) { - dev_warn(dev, "selftest failed: write error %d\n", error); - success = false; - goto out; + dev_warn(ts->dev, "selftest failed: write error %d\n", error); + return error; } error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high); if (error) { - dev_warn(dev, "selftest failed: read error %d after write\n", - error); - success = false; - goto out; - } - - if (temp_high != temp_high_test) { - dev_warn(dev, "selftest failed: %d != %d\n", - temp_high, temp_high_test); - success = false; + dev_warn(ts->dev, + "selftest failed: read error %d after write\n", error); + return error; } /* hardware reset */ tsc200x_reset(ts); - if (!success) - goto out; + if (temp_high != temp_high_test) { + dev_warn(ts->dev, "selftest failed: %d != %d\n", + temp_high, temp_high_test); + return -EINVAL; + } /* test that the reset really happened */ error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high); if (error) { - dev_warn(dev, "selftest failed: read error %d after reset\n", - error); - success = false; - goto out; + dev_warn(ts->dev, + "selftest failed: read error %d after reset\n", error); + return error; } if (temp_high != temp_high_orig) { - dev_warn(dev, "selftest failed after reset: %d != %d\n", + dev_warn(ts->dev, "selftest failed after reset: %d != %d\n", temp_high, temp_high_orig); - success = false; + return -EINVAL; } -out: - __tsc200x_enable(ts); - mutex_unlock(&ts->mutex); + return 0; +} - return sprintf(buf, "%d\n", success); +static ssize_t tsc200x_selftest_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tsc200x *ts = dev_get_drvdata(dev); + int error; + + scoped_guard(mutex, &ts->mutex) { + __tsc200x_disable(ts); + + error = tsc200x_do_selftest(ts); + + __tsc200x_enable(ts); + } + + return sprintf(buf, "%d\n", !error); } static DEVICE_ATTR(selftest, S_IRUGO, tsc200x_selftest_show, NULL); @@ -368,46 +360,42 @@ static void tsc200x_esd_work(struct work_struct *work) int error; unsigned int r; - if (!mutex_trylock(&ts->mutex)) { - /* - * If the mutex is taken, it means that disable or enable is in - * progress. In that case just reschedule the work. If the work - * is not needed, it will be canceled by disable. - */ - goto reschedule; - } - - if (time_is_after_jiffies(ts->last_valid_interrupt + - msecs_to_jiffies(ts->esd_timeout))) - goto out; - - /* We should be able to read register without disabling interrupts. */ - error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r); - if (!error && - !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) { - goto out; - } - /* - * If we could not read our known value from configuration register 0 - * then we should reset the controller as if from power-up and start - * scanning again. + * If the mutex is taken, it means that disable or enable is in + * progress. In that case just reschedule the work. If the work + * is not needed, it will be canceled by disable. */ - dev_info(ts->dev, "TSC200X not responding - resetting\n"); + scoped_guard(mutex_try, &ts->mutex) { + if (time_is_after_jiffies(ts->last_valid_interrupt + + msecs_to_jiffies(ts->esd_timeout))) + break; - disable_irq(ts->irq); - del_timer_sync(&ts->penup_timer); + /* + * We should be able to read register without disabling + * interrupts. + */ + error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r); + if (!error && + !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) { + break; + } - tsc200x_update_pen_state(ts, 0, 0, 0); + /* + * If we could not read our known value from configuration + * register 0 then we should reset the controller as if from + * power-up and start scanning again. + */ + dev_info(ts->dev, "TSC200X not responding - resetting\n"); - tsc200x_reset(ts); + scoped_guard(disable_irq, &ts->irq) { + del_timer_sync(&ts->penup_timer); + tsc200x_update_pen_state(ts, 0, 0, 0); + tsc200x_reset(ts); + } - enable_irq(ts->irq); - tsc200x_start_scan(ts); + tsc200x_start_scan(ts); + } -out: - mutex_unlock(&ts->mutex); -reschedule: /* re-arm the watchdog */ schedule_delayed_work(&ts->esd_work, round_jiffies_relative( @@ -418,15 +406,13 @@ static int tsc200x_open(struct input_dev *input) { struct tsc200x *ts = input_get_drvdata(input); - mutex_lock(&ts->mutex); + guard(mutex)(&ts->mutex); if (!ts->suspended) __tsc200x_enable(ts); ts->opened = true; - mutex_unlock(&ts->mutex); - return 0; } @@ -434,14 +420,12 @@ static void tsc200x_close(struct input_dev *input) { struct tsc200x *ts = input_get_drvdata(input); - mutex_lock(&ts->mutex); + guard(mutex)(&ts->mutex); if (!ts->suspended) __tsc200x_disable(ts); ts->opened = false; - - mutex_unlock(&ts->mutex); } int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, @@ -488,20 +472,6 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, &esd_timeout); ts->esd_timeout = error ? 0 : esd_timeout; - ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ts->reset_gpio)) { - error = PTR_ERR(ts->reset_gpio); - dev_err(dev, "error acquiring reset gpio: %d\n", error); - return error; - } - - ts->vio = devm_regulator_get(dev, "vio"); - if (IS_ERR(ts->vio)) { - error = PTR_ERR(ts->vio); - dev_err(dev, "error acquiring vio regulator: %d", error); - return error; - } - mutex_init(&ts->mutex); spin_lock_init(&ts->lock); @@ -542,60 +512,60 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, touchscreen_parse_properties(input_dev, false, &ts->prop); + ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + error = PTR_ERR_OR_ZERO(ts->reset_gpio); + if (error) { + dev_err(dev, "error acquiring reset gpio: %d\n", error); + return error; + } + + error = devm_regulator_get_enable(dev, "vio"); + if (error) { + dev_err(dev, "error acquiring vio regulator: %d\n", error); + return error; + } + + tsc200x_reset(ts); + /* Ensure the touchscreen is off */ tsc200x_stop_scan(ts); - error = devm_request_threaded_irq(dev, irq, NULL, - tsc200x_irq_thread, - IRQF_TRIGGER_RISING | IRQF_ONESHOT, - "tsc200x", ts); + error = devm_request_threaded_irq(dev, irq, NULL, tsc200x_irq_thread, + IRQF_ONESHOT, "tsc200x", ts); if (error) { dev_err(dev, "Failed to request irq, err: %d\n", error); return error; } - error = regulator_enable(ts->vio); - if (error) - return error; - dev_set_drvdata(dev, ts); error = input_register_device(ts->idev); if (error) { dev_err(dev, "Failed to register input device, err: %d\n", error); - goto disable_regulator; + return error; } - irq_set_irq_wake(irq, 1); - return 0; + device_init_wakeup(dev, + device_property_read_bool(dev, "wakeup-source")); -disable_regulator: - regulator_disable(ts->vio); - return error; + return 0; } EXPORT_SYMBOL_GPL(tsc200x_probe); -void tsc200x_remove(struct device *dev) -{ - struct tsc200x *ts = dev_get_drvdata(dev); - - regulator_disable(ts->vio); -} -EXPORT_SYMBOL_GPL(tsc200x_remove); - static int tsc200x_suspend(struct device *dev) { struct tsc200x *ts = dev_get_drvdata(dev); - mutex_lock(&ts->mutex); + guard(mutex)(&ts->mutex); if (!ts->suspended && ts->opened) __tsc200x_disable(ts); ts->suspended = true; - mutex_unlock(&ts->mutex); + if (device_may_wakeup(dev)) + ts->wake_irq_enabled = enable_irq_wake(ts->irq) == 0; return 0; } @@ -604,15 +574,18 @@ static int tsc200x_resume(struct device *dev) { struct tsc200x *ts = dev_get_drvdata(dev); - mutex_lock(&ts->mutex); + guard(mutex)(&ts->mutex); + + if (ts->wake_irq_enabled) { + disable_irq_wake(ts->irq); + ts->wake_irq_enabled = false; + } if (ts->suspended && ts->opened) __tsc200x_enable(ts); ts->suspended = false; - mutex_unlock(&ts->mutex); - return 0; } diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h index 37de91efd78ea3..e76ba7a889ddc3 100644 --- a/drivers/input/touchscreen/tsc200x-core.h +++ b/drivers/input/touchscreen/tsc200x-core.h @@ -75,6 +75,5 @@ extern const struct attribute_group *tsc200x_groups[]; int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, struct regmap *regmap, int (*tsc200x_cmd)(struct device *dev, u8 cmd)); -void tsc200x_remove(struct device *dev); #endif diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index dd6b12c6dc5818..7567efabe01404 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -68,8 +68,6 @@ struct usbtouch_device_info { */ bool irq_always; - void (*process_pkt) (struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); - /* * used to get the packet len. possible return values: * > 0: packet len @@ -94,7 +92,7 @@ struct usbtouch_usb { struct urb *irq; struct usb_interface *interface; struct input_dev *input; - struct usbtouch_device_info *type; + const struct usbtouch_device_info *type; struct mutex pm_mutex; /* serialize access to open/suspend */ bool is_open; char name[128]; @@ -103,141 +101,8 @@ struct usbtouch_usb { int x, y; int touch, press; -}; - - -/* device types */ -enum { - DEVTYPE_IGNORE = -1, - DEVTYPE_EGALAX, - DEVTYPE_PANJIT, - DEVTYPE_3M, - DEVTYPE_ITM, - DEVTYPE_ETURBO, - DEVTYPE_GUNZE, - DEVTYPE_DMC_TSC10, - DEVTYPE_IRTOUCH, - DEVTYPE_IRTOUCH_HIRES, - DEVTYPE_IDEALTEK, - DEVTYPE_GENERAL_TOUCH, - DEVTYPE_GOTOP, - DEVTYPE_JASTEC, - DEVTYPE_E2I, - DEVTYPE_ZYTRONIC, - DEVTYPE_TC45USB, - DEVTYPE_NEXIO, - DEVTYPE_ELO, - DEVTYPE_ETOUCH, -}; - -#define USB_DEVICE_HID_CLASS(vend, prod) \ - .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \ - | USB_DEVICE_ID_MATCH_DEVICE, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bInterfaceClass = USB_INTERFACE_CLASS_HID - -static const struct usb_device_id usbtouch_devices[] = { -#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX - /* ignore the HID capable devices, handled by usbhid */ - {USB_DEVICE_HID_CLASS(0x0eef, 0x0001), .driver_info = DEVTYPE_IGNORE}, - {USB_DEVICE_HID_CLASS(0x0eef, 0x0002), .driver_info = DEVTYPE_IGNORE}, - - /* normal device IDs */ - {USB_DEVICE(0x3823, 0x0001), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x3823, 0x0002), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x0123, 0x0001), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x0eef, 0x0001), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x0eef, 0x0002), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x1234, 0x0001), .driver_info = DEVTYPE_EGALAX}, - {USB_DEVICE(0x1234, 0x0002), .driver_info = DEVTYPE_EGALAX}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT - {USB_DEVICE(0x134c, 0x0001), .driver_info = DEVTYPE_PANJIT}, - {USB_DEVICE(0x134c, 0x0002), .driver_info = DEVTYPE_PANJIT}, - {USB_DEVICE(0x134c, 0x0003), .driver_info = DEVTYPE_PANJIT}, - {USB_DEVICE(0x134c, 0x0004), .driver_info = DEVTYPE_PANJIT}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_3M - {USB_DEVICE(0x0596, 0x0001), .driver_info = DEVTYPE_3M}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ITM - {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM}, - {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO - {USB_DEVICE(0x1234, 0x5678), .driver_info = DEVTYPE_ETURBO}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE - {USB_DEVICE(0x0637, 0x0001), .driver_info = DEVTYPE_GUNZE}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 - {USB_DEVICE(0x0afa, 0x03e8), .driver_info = DEVTYPE_DMC_TSC10}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH - {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, - {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, - {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, - {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK - {USB_DEVICE(0x1391, 0x1000), .driver_info = DEVTYPE_IDEALTEK}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH - {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP - {USB_DEVICE(0x08f2, 0x007f), .driver_info = DEVTYPE_GOTOP}, - {USB_DEVICE(0x08f2, 0x00ce), .driver_info = DEVTYPE_GOTOP}, - {USB_DEVICE(0x08f2, 0x00f4), .driver_info = DEVTYPE_GOTOP}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC - {USB_DEVICE(0x0f92, 0x0001), .driver_info = DEVTYPE_JASTEC}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_E2I - {USB_DEVICE(0x1ac7, 0x0001), .driver_info = DEVTYPE_E2I}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC - {USB_DEVICE(0x14c8, 0x0003), .driver_info = DEVTYPE_ZYTRONIC}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB - /* TC5UH */ - {USB_DEVICE(0x0664, 0x0309), .driver_info = DEVTYPE_TC45USB}, - /* TC4UM */ - {USB_DEVICE(0x0664, 0x0306), .driver_info = DEVTYPE_TC45USB}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO - /* data interface only */ - {USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00), - .driver_info = DEVTYPE_NEXIO}, - {USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00), - .driver_info = DEVTYPE_NEXIO}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ELO - {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO}, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH - {USB_DEVICE(0x7374, 0x0001), .driver_info = DEVTYPE_ETOUCH}, -#endif - {} + void (*process_pkt)(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); }; @@ -273,6 +138,16 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info e2i_dev_info = { + .min_xc = 0x0, + .max_xc = 0x7fff, + .min_yc = 0x0, + .max_yc = 0x7fff, + .rept_size = 6, + .init = e2i_init, + .read_data = e2i_read_data, +}; #endif @@ -292,9 +167,8 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) static int egalax_init(struct usbtouch_usb *usbtouch) { - int ret, i; - unsigned char *buf; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); + int ret, i; /* * An eGalax diagnostic packet kicks the device into using the right @@ -302,7 +176,7 @@ static int egalax_init(struct usbtouch_usb *usbtouch) * read later and ignored. */ - buf = kmalloc(3, GFP_KERNEL); + u8 *buf __free(kfree) = kmalloc(3, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -316,17 +190,11 @@ static int egalax_init(struct usbtouch_usb *usbtouch) USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 3, USB_CTRL_SET_TIMEOUT); - if (ret >= 0) { - ret = 0; - break; - } if (ret != -EPIPE) break; } - kfree(buf); - - return ret; + return ret < 0 ? ret : 0; } static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) @@ -356,6 +224,17 @@ static int egalax_get_pkt_len(unsigned char *buf, int len) return 0; } + +static const struct usbtouch_device_info egalax_dev_info = { + .min_xc = 0x0, + .max_xc = 0x07ff, + .min_yc = 0x0, + .max_yc = 0x07ff, + .rept_size = 16, + .get_pkt_len = egalax_get_pkt_len, + .read_data = egalax_read_data, + .init = egalax_init, +}; #endif /***************************************************************************** @@ -402,6 +281,16 @@ static int etouch_get_pkt_len(unsigned char *buf, int len) return 0; } + +static const struct usbtouch_device_info etouch_dev_info = { + .min_xc = 0x0, + .max_xc = 0x07ff, + .min_yc = 0x0, + .max_yc = 0x07ff, + .rept_size = 16, + .get_pkt_len = etouch_get_pkt_len, + .read_data = etouch_read_data, +}; #endif /***************************************************************************** @@ -416,6 +305,15 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info panjit_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 8, + .read_data = panjit_read_data, +}; #endif @@ -449,35 +347,13 @@ struct mtouch_priv { u8 fw_rev_minor; }; -static ssize_t mtouch_firmware_rev_show(struct device *dev, - struct device_attribute *attr, char *output) -{ - struct usb_interface *intf = to_usb_interface(dev); - struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); - struct mtouch_priv *priv = usbtouch->priv; - - return sysfs_emit(output, "%1x.%1x\n", - priv->fw_rev_major, priv->fw_rev_minor); -} -static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL); - -static struct attribute *mtouch_attrs[] = { - &dev_attr_firmware_rev.attr, - NULL -}; - -static const struct attribute_group mtouch_attr_group = { - .attrs = mtouch_attrs, -}; - static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch) { struct usb_device *udev = interface_to_usbdev(usbtouch->interface); struct mtouch_priv *priv = usbtouch->priv; - u8 *buf; int ret; - buf = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO); + u8 *buf __free(kfree) = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO); if (!buf) return -ENOMEM; @@ -489,38 +365,24 @@ static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch) if (ret != MTOUCHUSB_REQ_CTRLLR_ID_LEN) { dev_warn(&usbtouch->interface->dev, "Failed to read FW rev: %d\n", ret); - ret = ret < 0 ? ret : -EIO; - goto free; + return ret < 0 ? ret : -EIO; } priv->fw_rev_major = buf[3]; priv->fw_rev_minor = buf[4]; - ret = 0; - -free: - kfree(buf); - return ret; + return 0; } static int mtouch_alloc(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv; - int ret; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usbtouch->priv = priv; - ret = sysfs_create_group(&usbtouch->interface->dev.kobj, - &mtouch_attr_group); - if (ret) { - kfree(usbtouch->priv); - usbtouch->priv = NULL; - return ret; - } - return 0; } @@ -571,9 +433,53 @@ static void mtouch_exit(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv = usbtouch->priv; - sysfs_remove_group(&usbtouch->interface->dev.kobj, &mtouch_attr_group); kfree(priv); } + +static struct usbtouch_device_info mtouch_dev_info = { + .min_xc = 0x0, + .max_xc = 0x4000, + .min_yc = 0x0, + .max_yc = 0x4000, + .rept_size = 11, + .read_data = mtouch_read_data, + .alloc = mtouch_alloc, + .init = mtouch_init, + .exit = mtouch_exit, +}; + +static ssize_t mtouch_firmware_rev_show(struct device *dev, + struct device_attribute *attr, char *output) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); + struct mtouch_priv *priv = usbtouch->priv; + + return sysfs_emit(output, "%1x.%1x\n", + priv->fw_rev_major, priv->fw_rev_minor); +} +static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL); + +static struct attribute *mtouch_attrs[] = { + &dev_attr_firmware_rev.attr, + NULL +}; + +static bool mtouch_group_visible(struct kobject *kobj) +{ + struct device *dev = kobj_to_dev(kobj); + struct usb_interface *intf = to_usb_interface(dev); + struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); + + return usbtouch->type == &mtouch_dev_info; +} + +DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(mtouch); + +static const struct attribute_group mtouch_attr_group = { + .is_visible = SYSFS_GROUP_VISIBLE(mtouch), + .attrs = mtouch_attrs, +}; #endif @@ -608,6 +514,16 @@ static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info itm_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .max_press = 0xff, + .rept_size = 8, + .read_data = itm_read_data, +}; #endif @@ -642,6 +558,16 @@ static int eturbo_get_pkt_len(unsigned char *buf, int len) return 3; return 0; } + +static const struct usbtouch_device_info eturbo_dev_info = { + .min_xc = 0x0, + .max_xc = 0x07ff, + .min_yc = 0x0, + .max_yc = 0x07ff, + .rept_size = 8, + .get_pkt_len = eturbo_get_pkt_len, + .read_data = eturbo_read_data, +}; #endif @@ -660,6 +586,15 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info gunze_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 4, + .read_data = gunze_read_data, +}; #endif /***************************************************************************** @@ -688,24 +623,23 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt) static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); - int ret = -ENOMEM; - unsigned char *buf; + int ret; - buf = kmalloc(2, GFP_NOIO); + u8 *buf __free(kfree) = kmalloc(2, GFP_NOIO); if (!buf) - goto err_nobuf; + return -ENOMEM; + /* reset */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), - TSC10_CMD_RESET, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT); + TSC10_CMD_RESET, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) - goto err_out; - if (buf[0] != 0x06) { - ret = -ENODEV; - goto err_out; - } + return ret; + + if (buf[0] != 0x06) + return -ENODEV; /* TSC-25 data sheet specifies a delay after the RESET command */ msleep(150); @@ -713,28 +647,22 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), - TSC10_CMD_RATE, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT); + TSC10_CMD_RATE, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) - goto err_out; - if ((buf[0] != 0x06) && (buf[0] != 0x15 || buf[1] != 0x01)) { - ret = -ENODEV; - goto err_out; - } + return ret; + + if (buf[0] != 0x06 && (buf[0] != 0x15 || buf[1] != 0x01)) + return -ENODEV; /* start sending data */ - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - TSC10_CMD_DATA1, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); -err_out: - kfree(buf); -err_nobuf: - return ret; + return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + TSC10_CMD_DATA1, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } - static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x03) << 8) | pkt[1]; @@ -743,6 +671,16 @@ static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info dmc_tsc10_dev_info = { + .min_xc = 0x0, + .max_xc = 0x03ff, + .min_yc = 0x0, + .max_yc = 0x03ff, + .rept_size = 5, + .init = dmc_tsc10_init, + .read_data = dmc_tsc10_read_data, +}; #endif @@ -758,6 +696,24 @@ static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info irtouch_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 8, + .read_data = irtouch_read_data, +}; + +static const struct usbtouch_device_info irtouch_hires_dev_info = { + .min_xc = 0x0, + .max_xc = 0x7fff, + .min_yc = 0x0, + .max_yc = 0x7fff, + .rept_size = 8, + .read_data = irtouch_read_data, +}; #endif /***************************************************************************** @@ -772,6 +728,15 @@ static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info tc45usb_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 5, + .read_data = tc45usb_read_data, +}; #endif /***************************************************************************** @@ -811,6 +776,16 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 0; } } + +static const struct usbtouch_device_info idealtek_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 8, + .get_pkt_len = idealtek_get_pkt_len, + .read_data = idealtek_read_data, +}; #endif /***************************************************************************** @@ -826,6 +801,15 @@ static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info general_touch_dev_info = { + .min_xc = 0x0, + .max_xc = 0x7fff, + .min_yc = 0x0, + .max_yc = 0x7fff, + .rept_size = 7, + .read_data = general_touch_read_data, +}; #endif /***************************************************************************** @@ -840,6 +824,15 @@ static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info gotop_dev_info = { + .min_xc = 0x0, + .max_xc = 0x03ff, + .min_yc = 0x0, + .max_yc = 0x03ff, + .rept_size = 4, + .read_data = gotop_read_data, +}; #endif /***************************************************************************** @@ -854,6 +847,15 @@ static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } + +static const struct usbtouch_device_info jastec_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .rept_size = 4, + .read_data = jastec_read_data, +}; #endif /***************************************************************************** @@ -890,6 +892,16 @@ static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 0; } + +static const struct usbtouch_device_info zytronic_dev_info = { + .min_xc = 0x0, + .max_xc = 0x03ff, + .min_yc = 0x0, + .max_yc = 0x03ff, + .rept_size = 5, + .read_data = zytronic_read_data, + .irq_always = true, +}; #endif /***************************************************************************** @@ -960,7 +972,6 @@ static int nexio_init(struct usbtouch_usb *usbtouch) struct nexio_priv *priv = usbtouch->priv; int ret = -ENOMEM; int actual_len, i; - unsigned char *buf; char *firmware_ver = NULL, *device_name = NULL; int input_ep = 0, output_ep = 0; @@ -976,9 +987,9 @@ static int nexio_init(struct usbtouch_usb *usbtouch) if (!input_ep || !output_ep) return -ENXIO; - buf = kmalloc(NEXIO_BUFSIZE, GFP_NOIO); + u8 *buf __free(kfree) = kmalloc(NEXIO_BUFSIZE, GFP_NOIO); if (!buf) - goto out_buf; + return -ENOMEM; /* two empty reads */ for (i = 0; i < 2; i++) { @@ -986,7 +997,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch) buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0) - goto out_buf; + return ret; } /* send init command */ @@ -995,7 +1006,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch) buf, sizeof(nexio_init_pkt), &actual_len, NEXIO_TIMEOUT); if (ret < 0) - goto out_buf; + return ret; /* read replies */ for (i = 0; i < 3; i++) { @@ -1026,11 +1037,8 @@ static int nexio_init(struct usbtouch_usb *usbtouch) usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep), priv->ack_buf, sizeof(nexio_ack_pkt), nexio_ack_complete, usbtouch); - ret = 0; -out_buf: - kfree(buf); - return ret; + return 0; } static void nexio_exit(struct usbtouch_usb *usbtouch) @@ -1067,13 +1075,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) if (ret) dev_warn(dev, "Failed to submit ACK URB: %d\n", ret); - if (!usbtouch->type->max_xc) { - usbtouch->type->max_xc = 2 * x_len; + if (!input_abs_get_max(usbtouch->input, ABS_X)) { input_set_abs_params(usbtouch->input, ABS_X, - 0, usbtouch->type->max_xc, 0, 0); - usbtouch->type->max_yc = 2 * y_len; + 0, 2 * x_len, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, - 0, usbtouch->type->max_yc, 0, 0); + 0, 2 * y_len, 0, 0); } /* * The device reports state of IR sensors on X and Y axes. @@ -1128,6 +1134,15 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) } return 0; } + +static const struct usbtouch_device_info nexio_dev_info = { + .rept_size = 1024, + .irq_always = true, + .read_data = nexio_read_data, + .alloc = nexio_alloc, + .init = nexio_init, + .exit = nexio_exit, +}; #endif @@ -1146,241 +1161,17 @@ static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) return 1; } -#endif - - -/***************************************************************************** - * the different device descriptors - */ -#ifdef MULTI_PACKET -static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, - unsigned char *pkt, int len); -#endif - -static struct usbtouch_device_info usbtouch_dev_info[] = { -#ifdef CONFIG_TOUCHSCREEN_USB_ELO - [DEVTYPE_ELO] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .max_press = 0xff, - .rept_size = 8, - .read_data = elo_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX - [DEVTYPE_EGALAX] = { - .min_xc = 0x0, - .max_xc = 0x07ff, - .min_yc = 0x0, - .max_yc = 0x07ff, - .rept_size = 16, - .process_pkt = usbtouch_process_multi, - .get_pkt_len = egalax_get_pkt_len, - .read_data = egalax_read_data, - .init = egalax_init, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT - [DEVTYPE_PANJIT] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 8, - .read_data = panjit_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_3M - [DEVTYPE_3M] = { - .min_xc = 0x0, - .max_xc = 0x4000, - .min_yc = 0x0, - .max_yc = 0x4000, - .rept_size = 11, - .read_data = mtouch_read_data, - .alloc = mtouch_alloc, - .init = mtouch_init, - .exit = mtouch_exit, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ITM - [DEVTYPE_ITM] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .max_press = 0xff, - .rept_size = 8, - .read_data = itm_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO - [DEVTYPE_ETURBO] = { - .min_xc = 0x0, - .max_xc = 0x07ff, - .min_yc = 0x0, - .max_yc = 0x07ff, - .rept_size = 8, - .process_pkt = usbtouch_process_multi, - .get_pkt_len = eturbo_get_pkt_len, - .read_data = eturbo_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE - [DEVTYPE_GUNZE] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 4, - .read_data = gunze_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 - [DEVTYPE_DMC_TSC10] = { - .min_xc = 0x0, - .max_xc = 0x03ff, - .min_yc = 0x0, - .max_yc = 0x03ff, - .rept_size = 5, - .init = dmc_tsc10_init, - .read_data = dmc_tsc10_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH - [DEVTYPE_IRTOUCH] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 8, - .read_data = irtouch_read_data, - }, - - [DEVTYPE_IRTOUCH_HIRES] = { - .min_xc = 0x0, - .max_xc = 0x7fff, - .min_yc = 0x0, - .max_yc = 0x7fff, - .rept_size = 8, - .read_data = irtouch_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK - [DEVTYPE_IDEALTEK] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 8, - .process_pkt = usbtouch_process_multi, - .get_pkt_len = idealtek_get_pkt_len, - .read_data = idealtek_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH - [DEVTYPE_GENERAL_TOUCH] = { - .min_xc = 0x0, - .max_xc = 0x7fff, - .min_yc = 0x0, - .max_yc = 0x7fff, - .rept_size = 7, - .read_data = general_touch_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP - [DEVTYPE_GOTOP] = { - .min_xc = 0x0, - .max_xc = 0x03ff, - .min_yc = 0x0, - .max_yc = 0x03ff, - .rept_size = 4, - .read_data = gotop_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC - [DEVTYPE_JASTEC] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 4, - .read_data = jastec_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_E2I - [DEVTYPE_E2I] = { - .min_xc = 0x0, - .max_xc = 0x7fff, - .min_yc = 0x0, - .max_yc = 0x7fff, - .rept_size = 6, - .init = e2i_init, - .read_data = e2i_read_data, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC - [DEVTYPE_ZYTRONIC] = { - .min_xc = 0x0, - .max_xc = 0x03ff, - .min_yc = 0x0, - .max_yc = 0x03ff, - .rept_size = 5, - .read_data = zytronic_read_data, - .irq_always = true, - }, -#endif - -#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB - [DEVTYPE_TC45USB] = { - .min_xc = 0x0, - .max_xc = 0x0fff, - .min_yc = 0x0, - .max_yc = 0x0fff, - .rept_size = 5, - .read_data = tc45usb_read_data, - }, -#endif -#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO - [DEVTYPE_NEXIO] = { - .rept_size = 1024, - .irq_always = true, - .read_data = nexio_read_data, - .alloc = nexio_alloc, - .init = nexio_init, - .exit = nexio_exit, - }, -#endif -#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH - [DEVTYPE_ETOUCH] = { - .min_xc = 0x0, - .max_xc = 0x07ff, - .min_yc = 0x0, - .max_yc = 0x07ff, - .rept_size = 16, - .process_pkt = usbtouch_process_multi, - .get_pkt_len = etouch_get_pkt_len, - .read_data = etouch_read_data, - }, -#endif +static const struct usbtouch_device_info elo_dev_info = { + .min_xc = 0x0, + .max_xc = 0x0fff, + .min_yc = 0x0, + .max_yc = 0x0fff, + .max_press = 0xff, + .rept_size = 8, + .read_data = elo_read_data, }; +#endif /***************************************************************************** @@ -1389,10 +1180,10 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { - struct usbtouch_device_info *type = usbtouch->type; + const struct usbtouch_device_info *type = usbtouch->type; if (!type->read_data(usbtouch, pkt)) - return; + return; input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch); @@ -1485,9 +1276,15 @@ static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, usbtouch->buf_len = 0; return; } +#else +static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, + unsigned char *pkt, int len) +{ + dev_WARN_ONCE(&usbtouch->interface->dev, 1, + "Protocol has ->get_pkt_len() without #define MULTI_PACKET"); +} #endif - static void usbtouch_irq(struct urb *urb) { struct usbtouch_usb *usbtouch = urb->context; @@ -1518,7 +1315,7 @@ static void usbtouch_irq(struct urb *urb) goto exit; } - usbtouch->type->process_pkt(usbtouch, usbtouch->data, urb->actual_length); + usbtouch->process_pkt(usbtouch, usbtouch->data, urb->actual_length); exit: usb_mark_last_busy(interface_to_usbdev(usbtouch->interface)); @@ -1528,6 +1325,20 @@ static void usbtouch_irq(struct urb *urb) __func__, retval); } +static int usbtouch_start_io(struct usbtouch_usb *usbtouch) +{ + guard(mutex)(&usbtouch->pm_mutex); + + if (!usbtouch->type->irq_always) + if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) + return -EIO; + + usbtouch->interface->needs_remote_wakeup = 1; + usbtouch->is_open = true; + + return 0; +} + static int usbtouch_open(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); @@ -1536,23 +1347,12 @@ static int usbtouch_open(struct input_dev *input) usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface); r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0; - if (r < 0) - goto out; - - mutex_lock(&usbtouch->pm_mutex); - if (!usbtouch->type->irq_always) { - if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) { - r = -EIO; - goto out_put; - } - } + if (r) + return r; + + r = usbtouch_start_io(usbtouch); - usbtouch->interface->needs_remote_wakeup = 1; - usbtouch->is_open = true; -out_put: - mutex_unlock(&usbtouch->pm_mutex); usb_autopm_put_interface(usbtouch->interface); -out: return r; } @@ -1561,11 +1361,11 @@ static void usbtouch_close(struct input_dev *input) struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; - mutex_lock(&usbtouch->pm_mutex); - if (!usbtouch->type->irq_always) - usb_kill_urb(usbtouch->irq); - usbtouch->is_open = false; - mutex_unlock(&usbtouch->pm_mutex); + scoped_guard(mutex, &usbtouch->pm_mutex) { + if (!usbtouch->type->irq_always) + usb_kill_urb(usbtouch->irq); + usbtouch->is_open = false; + } r = usb_autopm_get_interface(usbtouch->interface); usbtouch->interface->needs_remote_wakeup = 0; @@ -1573,8 +1373,7 @@ static void usbtouch_close(struct input_dev *input) usb_autopm_put_interface(usbtouch->interface); } -static int usbtouch_suspend -(struct usb_interface *intf, pm_message_t message) +static int usbtouch_suspend(struct usb_interface *intf, pm_message_t message) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); @@ -1586,20 +1385,19 @@ static int usbtouch_suspend static int usbtouch_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); - int result = 0; - mutex_lock(&usbtouch->pm_mutex); + guard(mutex)(&usbtouch->pm_mutex); + if (usbtouch->is_open || usbtouch->type->irq_always) - result = usb_submit_urb(usbtouch->irq, GFP_NOIO); - mutex_unlock(&usbtouch->pm_mutex); + return usb_submit_urb(usbtouch->irq, GFP_NOIO); - return result; + return 0; } static int usbtouch_reset_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); - int err = 0; + int err; /* reinit the device */ if (usbtouch->type->init) { @@ -1613,12 +1411,12 @@ static int usbtouch_reset_resume(struct usb_interface *intf) } /* restart IO if needed */ - mutex_lock(&usbtouch->pm_mutex); + guard(mutex)(&usbtouch->pm_mutex); + if (usbtouch->is_open) - err = usb_submit_urb(usbtouch->irq, GFP_NOIO); - mutex_unlock(&usbtouch->pm_mutex); + return usb_submit_urb(usbtouch->irq, GFP_NOIO); - return err; + return 0; } static void usbtouch_free_buffers(struct usb_device *udev, @@ -1648,14 +1446,12 @@ static int usbtouch_probe(struct usb_interface *intf, struct input_dev *input_dev; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(intf); - struct usbtouch_device_info *type; + const struct usbtouch_device_info *type; int err = -ENOMEM; /* some devices are ignored */ - if (id->driver_info == DEVTYPE_IGNORE) - return -ENODEV; - - if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info)) + type = (const struct usbtouch_device_info *)id->driver_info; + if (!type) return -ENODEV; endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting); @@ -1668,11 +1464,7 @@ static int usbtouch_probe(struct usb_interface *intf, goto out_free; mutex_init(&usbtouch->pm_mutex); - - type = &usbtouch_dev_info[id->driver_info]; usbtouch->type = type; - if (!type->process_pkt) - type->process_pkt = usbtouch_process_pkt; usbtouch->data_size = type->rept_size; if (type->get_pkt_len) { @@ -1696,6 +1488,9 @@ static int usbtouch_probe(struct usb_interface *intf, usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL); if (!usbtouch->buffer) goto out_free_buffers; + usbtouch->process_pkt = usbtouch_process_multi; + } else { + usbtouch->process_pkt = usbtouch_process_pkt; } usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL); @@ -1842,6 +1637,150 @@ static void usbtouch_disconnect(struct usb_interface *intf) kfree(usbtouch); } +static const struct attribute_group *usbtouch_groups[] = { +#ifdef CONFIG_TOUCHSCREEN_USB_3M + &mtouch_attr_group, +#endif + NULL +}; + +static const struct usb_device_id usbtouch_devices[] = { +#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX + /* ignore the HID capable devices, handled by usbhid */ + { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0001, USB_INTERFACE_CLASS_HID), + .driver_info = 0 }, + { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0002, USB_INTERFACE_CLASS_HID), + .driver_info = 0 }, + + /* normal device IDs */ + { USB_DEVICE(0x3823, 0x0001), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x3823, 0x0002), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x0123, 0x0001), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x0eef, 0x0001), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x0eef, 0x0002), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x1234, 0x0001), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, + { USB_DEVICE(0x1234, 0x0002), + .driver_info = (kernel_ulong_t)&egalax_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT + { USB_DEVICE(0x134c, 0x0001), + .driver_info = (kernel_ulong_t)&panjit_dev_info }, + { USB_DEVICE(0x134c, 0x0002), + .driver_info = (kernel_ulong_t)&panjit_dev_info }, + { USB_DEVICE(0x134c, 0x0003), + .driver_info = (kernel_ulong_t)&panjit_dev_info }, + { USB_DEVICE(0x134c, 0x0004), + .driver_info = (kernel_ulong_t)&panjit_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_3M + { USB_DEVICE(0x0596, 0x0001), + .driver_info = (kernel_ulong_t)&mtouch_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_ITM + { USB_DEVICE(0x0403, 0xf9e9), + .driver_info = (kernel_ulong_t)&itm_dev_info }, + { USB_DEVICE(0x16e3, 0xf9e9), + .driver_info = (kernel_ulong_t)&itm_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO + { USB_DEVICE(0x1234, 0x5678), + .driver_info = (kernel_ulong_t)&eturbo_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE + { USB_DEVICE(0x0637, 0x0001), + .driver_info = (kernel_ulong_t)&gunze_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 + { USB_DEVICE(0x0afa, 0x03e8), + .driver_info = (kernel_ulong_t)&dmc_tsc10_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + { USB_DEVICE(0x255e, 0x0001), + .driver_info = (kernel_ulong_t)&irtouch_dev_info }, + { USB_DEVICE(0x595a, 0x0001), + .driver_info = (kernel_ulong_t)&irtouch_dev_info }, + { USB_DEVICE(0x6615, 0x0001), + .driver_info = (kernel_ulong_t)&irtouch_dev_info }, + { USB_DEVICE(0x6615, 0x0012), + .driver_info = (kernel_ulong_t)&irtouch_hires_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK + { USB_DEVICE(0x1391, 0x1000), + .driver_info = (kernel_ulong_t)&idealtek_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH + { USB_DEVICE(0x0dfc, 0x0001), + .driver_info = (kernel_ulong_t)&general_touch_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP + { USB_DEVICE(0x08f2, 0x007f), + .driver_info = (kernel_ulong_t)&gotop_dev_info }, + { USB_DEVICE(0x08f2, 0x00ce), + .driver_info = (kernel_ulong_t)&gotop_dev_info }, + { USB_DEVICE(0x08f2, 0x00f4), + .driver_info = (kernel_ulong_t)&gotop_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC + { USB_DEVICE(0x0f92, 0x0001), + .driver_info = (kernel_ulong_t)&jastec_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_E2I + { USB_DEVICE(0x1ac7, 0x0001), + .driver_info = (kernel_ulong_t)&e2i_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC + { USB_DEVICE(0x14c8, 0x0003), + .driver_info = (kernel_ulong_t)&zytronic_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB + /* TC5UH */ + { USB_DEVICE(0x0664, 0x0309), + .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, + /* TC4UM */ + { USB_DEVICE(0x0664, 0x0306), + .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO + /* data interface only */ + { USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00), + .driver_info = (kernel_ulong_t)&nexio_dev_info }, + { USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00), + .driver_info = (kernel_ulong_t)&nexio_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_ELO + { USB_DEVICE(0x04e7, 0x0020), + .driver_info = (kernel_ulong_t)&elo_dev_info }, +#endif + +#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH + { USB_DEVICE(0x7374, 0x0001), + .driver_info = (kernel_ulong_t)&etouch_dev_info }, +#endif + + { } +}; MODULE_DEVICE_TABLE(usb, usbtouch_devices); static struct usb_driver usbtouch_driver = { @@ -1852,6 +1791,7 @@ static struct usb_driver usbtouch_driver = { .resume = usbtouch_resume, .reset_resume = usbtouch_reset_resume, .id_table = usbtouch_devices, + .dev_groups = usbtouch_groups, .supports_autosuspend = 1, }; diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c index 486230985bf084..fd97a83f56649e 100644 --- a/drivers/input/touchscreen/wacom_i2c.c +++ b/drivers/input/touchscreen/wacom_i2c.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include /* Bitmasks (for data[3]) */ #define WACOM_TIP_SWITCH BIT(0) diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c index 698fc7e0ee7f86..27941245e962f6 100644 --- a/drivers/input/touchscreen/wdt87xx_i2c.c +++ b/drivers/input/touchscreen/wdt87xx_i2c.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #define WDT87XX_NAME "wdt87xx_i2c" #define WDT87XX_FW_NAME "wdt87xx_fw.bin" diff --git a/drivers/input/touchscreen/zet6223.c b/drivers/input/touchscreen/zet6223.c index 27333fded9a957..943634ba9cd972 100644 --- a/drivers/input/touchscreen/zet6223.c +++ b/drivers/input/touchscreen/zet6223.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #define ZET6223_MAX_FINGERS 16 #define ZET6223_MAX_PKT_SIZE (3 + 4 * ZET6223_MAX_FINGERS) diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index fdf2d1e770c838..df42fdf36ae3b0 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -9,21 +9,20 @@ * Author: Pieter Truter */ -#include -#include -#include -#include -#include -#include #include -#include #include -#include +#include +#include +#include #include #include -#include -#include +#include +#include #include +#include +#include +#include +#include #define WAIT_TIMEOUT msecs_to_jiffies(1000) @@ -97,9 +96,7 @@ struct zforce_point { * @suspending in the process of going to suspend (don't emit wakeup * events for commands executed to suspend the device) * @suspended device suspended - * @access_mutex serialize i2c-access, to keep multipart reads together * @command_done completion to wait for the command result - * @command_mutex serialize commands sent to the ic * @command_waiting the id of the command that is currently waiting * for a result * @command_result returned result of the command @@ -108,11 +105,8 @@ struct zforce_ts { struct i2c_client *client; struct input_dev *input; struct touchscreen_properties prop; - const struct zforce_ts_platdata *pdata; char phys[32]; - struct regulator *reg_vdd; - struct gpio_desc *gpio_int; struct gpio_desc *gpio_rst; @@ -126,10 +120,7 @@ struct zforce_ts { u16 version_build; u16 version_rev; - struct mutex access_mutex; - struct completion command_done; - struct mutex command_mutex; int command_waiting; int command_result; }; @@ -146,9 +137,7 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd) buf[1] = 1; /* data size, command only */ buf[2] = cmd; - mutex_lock(&ts->access_mutex); ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf)); - mutex_unlock(&ts->access_mutex); if (ret < 0) { dev_err(&client->dev, "i2c send data request error: %d\n", ret); return ret; @@ -157,59 +146,36 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd) return 0; } -static void zforce_reset_assert(struct zforce_ts *ts) -{ - gpiod_set_value_cansleep(ts->gpio_rst, 1); -} - -static void zforce_reset_deassert(struct zforce_ts *ts) -{ - gpiod_set_value_cansleep(ts->gpio_rst, 0); -} - static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len) { struct i2c_client *client = ts->client; int ret; - ret = mutex_trylock(&ts->command_mutex); - if (!ret) { - dev_err(&client->dev, "already waiting for a command\n"); - return -EBUSY; - } - dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n", buf[1], buf[2]); ts->command_waiting = buf[2]; - mutex_lock(&ts->access_mutex); ret = i2c_master_send(client, buf, len); - mutex_unlock(&ts->access_mutex); if (ret < 0) { dev_err(&client->dev, "i2c send data request error: %d\n", ret); - goto unlock; + return ret; } dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]); - if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) { - ret = -ETIME; - goto unlock; - } + if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) + return -ETIME; ret = ts->command_result; - -unlock: - mutex_unlock(&ts->command_mutex); - return ret; + return 0; } static int zforce_command_wait(struct zforce_ts *ts, u8 cmd) { struct i2c_client *client = ts->client; char buf[3]; - int ret; + int error; dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd); @@ -217,10 +183,11 @@ static int zforce_command_wait(struct zforce_ts *ts, u8 cmd) buf[1] = 1; /* data size, command only */ buf[2] = cmd; - ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); - if (ret < 0) { - dev_err(&client->dev, "i2c send data request error: %d\n", ret); - return ret; + error = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf)); + if (error) { + dev_err(&client->dev, "i2c send data request error: %d\n", + error); + return error; } return 0; @@ -268,40 +235,40 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1) static int zforce_start(struct zforce_ts *ts) { struct i2c_client *client = ts->client; - int ret; + int error; dev_dbg(&client->dev, "starting device\n"); - ret = zforce_command_wait(ts, COMMAND_INITIALIZE); - if (ret) { - dev_err(&client->dev, "Unable to initialize, %d\n", ret); - return ret; + error = zforce_command_wait(ts, COMMAND_INITIALIZE); + if (error) { + dev_err(&client->dev, "Unable to initialize, %d\n", error); + return error; } - ret = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y); - if (ret) { - dev_err(&client->dev, "Unable to set resolution, %d\n", ret); - goto error; + error = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y); + if (error) { + dev_err(&client->dev, "Unable to set resolution, %d\n", error); + goto err_deactivate; } - ret = zforce_scan_frequency(ts, 10, 50, 50); - if (ret) { + error = zforce_scan_frequency(ts, 10, 50, 50); + if (error) { dev_err(&client->dev, "Unable to set scan frequency, %d\n", - ret); - goto error; + error); + goto err_deactivate; } - ret = zforce_setconfig(ts, SETCONFIG_DUALTOUCH); - if (ret) { + error = zforce_setconfig(ts, SETCONFIG_DUALTOUCH); + if (error) { dev_err(&client->dev, "Unable to set config\n"); - goto error; + goto err_deactivate; } /* start sending touch events */ - ret = zforce_command(ts, COMMAND_DATAREQUEST); - if (ret) { + error = zforce_command(ts, COMMAND_DATAREQUEST); + if (error) { dev_err(&client->dev, "Unable to request data\n"); - goto error; + goto err_deactivate; } /* @@ -312,24 +279,24 @@ static int zforce_start(struct zforce_ts *ts) return 0; -error: +err_deactivate: zforce_command_wait(ts, COMMAND_DEACTIVATE); - return ret; + return error; } static int zforce_stop(struct zforce_ts *ts) { struct i2c_client *client = ts->client; - int ret; + int error; dev_dbg(&client->dev, "stopping device\n"); /* Deactivates touch sensing and puts the device into sleep. */ - ret = zforce_command_wait(ts, COMMAND_DEACTIVATE); - if (ret != 0) { + error = zforce_command_wait(ts, COMMAND_DEACTIVATE); + if (error) { dev_err(&client->dev, "could not deactivate device, %d\n", - ret); - return ret; + error); + return error; } return 0; @@ -340,6 +307,7 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) struct i2c_client *client = ts->client; struct zforce_point point; int count, i, num = 0; + u8 *p; count = payload[0]; if (count > ZFORCE_REPORT_POINTS) { @@ -350,10 +318,10 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) } for (i = 0; i < count; i++) { - point.coord_x = - payload[9 * i + 2] << 8 | payload[9 * i + 1]; - point.coord_y = - payload[9 * i + 4] << 8 | payload[9 * i + 3]; + p = &payload[i * 9 + 1]; + + point.coord_x = get_unaligned_le16(&p[0]); + point.coord_y = get_unaligned_le16(&p[2]); if (point.coord_x > ts->prop.max_x || point.coord_y > ts->prop.max_y) { @@ -362,18 +330,16 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) point.coord_x = point.coord_y = 0; } - point.state = payload[9 * i + 5] & 0x0f; - point.id = (payload[9 * i + 5] & 0xf0) >> 4; + point.state = p[4] & 0x0f; + point.id = (p[4] & 0xf0) >> 4; /* determine touch major, minor and orientation */ - point.area_major = max(payload[9 * i + 6], - payload[9 * i + 7]); - point.area_minor = min(payload[9 * i + 6], - payload[9 * i + 7]); - point.orientation = payload[9 * i + 6] > payload[9 * i + 7]; + point.area_major = max(p[5], p[6]); + point.area_minor = min(p[5], p[6]); + point.orientation = p[5] > p[6]; - point.pressure = payload[9 * i + 8]; - point.prblty = payload[9 * i + 9]; + point.pressure = p[7]; + point.prblty = p[8]; dev_dbg(&client->dev, "point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n", @@ -386,10 +352,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) /* the zforce id starts with "1", so needs to be decreased */ input_mt_slot(ts->input, point.id - 1); - input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, - point.state != STATE_UP); - - if (point.state != STATE_UP) { + if (input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, + point.state != STATE_UP)) { touchscreen_report_pos(ts->input, &ts->prop, point.coord_x, point.coord_y, true); @@ -417,41 +381,35 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) struct i2c_client *client = ts->client; int ret; - mutex_lock(&ts->access_mutex); - /* read 2 byte message header */ ret = i2c_master_recv(client, buf, 2); if (ret < 0) { dev_err(&client->dev, "error reading header: %d\n", ret); - goto unlock; + return ret; } if (buf[PAYLOAD_HEADER] != FRAME_START) { dev_err(&client->dev, "invalid frame start: %d\n", buf[0]); - ret = -EIO; - goto unlock; + return -EIO; } if (buf[PAYLOAD_LENGTH] == 0) { dev_err(&client->dev, "invalid payload length: %d\n", buf[PAYLOAD_LENGTH]); - ret = -EIO; - goto unlock; + return -EIO; } /* read the message */ ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]); if (ret < 0) { dev_err(&client->dev, "error reading payload: %d\n", ret); - goto unlock; + return ret; } dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n", buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]); -unlock: - mutex_unlock(&ts->access_mutex); - return ret; + return 0; } static void zforce_complete(struct zforce_ts *ts, int cmd, int result) @@ -482,9 +440,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) { struct zforce_ts *ts = dev_id; struct i2c_client *client = ts->client; - int ret; + int error; u8 payload_buffer[FRAME_MAXSIZE]; u8 *payload; + bool suspending; /* * When still suspended, return. @@ -498,7 +457,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) dev_dbg(&client->dev, "handling interrupt\n"); /* Don't emit wakeup events from commands run by zforce_suspend */ - if (!ts->suspending && device_may_wakeup(&client->dev)) + suspending = READ_ONCE(ts->suspending); + if (!suspending && device_may_wakeup(&client->dev)) pm_stay_awake(&client->dev); /* @@ -511,10 +471,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) * no IRQ any more) */ do { - ret = zforce_read_packet(ts, payload_buffer); - if (ret < 0) { + error = zforce_read_packet(ts, payload_buffer); + if (error) { dev_err(&client->dev, - "could not read packet, ret: %d\n", ret); + "could not read packet, ret: %d\n", error); break; } @@ -526,7 +486,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) * Always report touch-events received while * suspending, when being a wakeup source */ - if (ts->suspending && device_may_wakeup(&client->dev)) + if (suspending && device_may_wakeup(&client->dev)) pm_wakeup_event(&client->dev, 500); zforce_touch_event(ts, &payload[RESPONSE_DATA]); break; @@ -550,14 +510,15 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) * Version Payload Results * [2:major] [2:minor] [2:build] [2:rev] */ - ts->version_major = (payload[RESPONSE_DATA + 1] << 8) | - payload[RESPONSE_DATA]; - ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) | - payload[RESPONSE_DATA + 2]; - ts->version_build = (payload[RESPONSE_DATA + 5] << 8) | - payload[RESPONSE_DATA + 4]; - ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) | - payload[RESPONSE_DATA + 6]; + ts->version_major = + get_unaligned_le16(&payload[RESPONSE_DATA]); + ts->version_minor = + get_unaligned_le16(&payload[RESPONSE_DATA + 2]); + ts->version_build = + get_unaligned_le16(&payload[RESPONSE_DATA + 4]); + ts->version_rev = + get_unaligned_le16(&payload[RESPONSE_DATA + 6]); + dev_dbg(&ts->client->dev, "Firmware Version %04x:%04x %04x:%04x\n", ts->version_major, ts->version_minor, @@ -579,7 +540,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id) } } while (gpiod_get_value_cansleep(ts->gpio_int)); - if (!ts->suspending && device_may_wakeup(&client->dev)) + if (!suspending && device_may_wakeup(&client->dev)) pm_relax(&client->dev); dev_dbg(&client->dev, "finished interrupt\n"); @@ -598,24 +559,20 @@ static void zforce_input_close(struct input_dev *dev) { struct zforce_ts *ts = input_get_drvdata(dev); struct i2c_client *client = ts->client; - int ret; + int error; - ret = zforce_stop(ts); - if (ret) + error = zforce_stop(ts); + if (error) dev_warn(&client->dev, "stopping zforce failed\n"); - - return; } -static int zforce_suspend(struct device *dev) +static int __zforce_suspend(struct zforce_ts *ts) { - struct i2c_client *client = to_i2c_client(dev); - struct zforce_ts *ts = i2c_get_clientdata(client); + struct i2c_client *client = ts->client; struct input_dev *input = ts->input; - int ret = 0; + int error; - mutex_lock(&input->mutex); - ts->suspending = true; + guard(mutex)(&input->mutex); /* * When configured as a wakeup source device should always wake @@ -626,9 +583,9 @@ static int zforce_suspend(struct device *dev) /* Need to start device, if not open, to be a wakeup source. */ if (!input_device_enabled(input)) { - ret = zforce_start(ts); - if (ret) - goto unlock; + error = zforce_start(ts); + if (error) + return error; } enable_irq_wake(client->irq); @@ -636,18 +593,30 @@ static int zforce_suspend(struct device *dev) dev_dbg(&client->dev, "suspend without being a wakeup source\n"); - ret = zforce_stop(ts); - if (ret) - goto unlock; + error = zforce_stop(ts); + if (error) + return error; disable_irq(client->irq); } ts->suspended = true; + return 0; +} + +static int zforce_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct zforce_ts *ts = i2c_get_clientdata(client); + int ret; + + WRITE_ONCE(ts->suspending, true); + smp_mb(); -unlock: - ts->suspending = false; - mutex_unlock(&input->mutex); + ret = __zforce_suspend(ts); + + smp_mb(); + WRITE_ONCE(ts->suspending, false); return ret; } @@ -657,9 +626,9 @@ static int zforce_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct zforce_ts *ts = i2c_get_clientdata(client); struct input_dev *input = ts->input; - int ret = 0; + int error; - mutex_lock(&input->mutex); + guard(mutex)(&input->mutex); ts->suspended = false; @@ -670,24 +639,21 @@ static int zforce_resume(struct device *dev) /* need to stop device if it was not open on suspend */ if (!input_device_enabled(input)) { - ret = zforce_stop(ts); - if (ret) - goto unlock; + error = zforce_stop(ts); + if (error) + return error; } } else if (input_device_enabled(input)) { dev_dbg(&client->dev, "resume without being a wakeup source\n"); enable_irq(client->irq); - ret = zforce_start(ts); - if (ret < 0) - goto unlock; + error = zforce_start(ts); + if (error) + return error; } -unlock: - mutex_unlock(&input->mutex); - - return ret; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume); @@ -696,46 +662,27 @@ static void zforce_reset(void *data) { struct zforce_ts *ts = data; - zforce_reset_assert(ts); - + gpiod_set_value_cansleep(ts->gpio_rst, 1); udelay(10); - - if (!IS_ERR(ts->reg_vdd)) - regulator_disable(ts->reg_vdd); } -static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev) +static void zforce_ts_parse_legacy_properties(struct zforce_ts *ts) { - struct zforce_ts_platdata *pdata; - struct device_node *np = dev->of_node; + u32 x_max = 0; + u32 y_max = 0; - if (!np) - return ERR_PTR(-ENOENT); + device_property_read_u32(&ts->client->dev, "x-size", &x_max); + input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, x_max, 0, 0); - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - dev_err(dev, "failed to allocate platform data\n"); - return ERR_PTR(-ENOMEM); - } - - of_property_read_u32(np, "x-size", &pdata->x_max); - of_property_read_u32(np, "y-size", &pdata->y_max); - - return pdata; + device_property_read_u32(&ts->client->dev, "y-size", &y_max); + input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, y_max, 0, 0); } static int zforce_probe(struct i2c_client *client) { - const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev); struct zforce_ts *ts; struct input_dev *input_dev; - int ret; - - if (!pdata) { - pdata = zforce_parse_dt(&client->dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - } + int error; ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL); if (!ts) @@ -743,22 +690,18 @@ static int zforce_probe(struct i2c_client *client) ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ts->gpio_rst)) { - ret = PTR_ERR(ts->gpio_rst); - dev_err(&client->dev, - "failed to request reset GPIO: %d\n", ret); - return ret; - } + error = PTR_ERR_OR_ZERO(ts->gpio_rst); + if (error) + return dev_err_probe(&client->dev, error, + "failed to request reset GPIO\n"); if (ts->gpio_rst) { ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq", GPIOD_IN); - if (IS_ERR(ts->gpio_int)) { - ret = PTR_ERR(ts->gpio_int); - dev_err(&client->dev, - "failed to request interrupt GPIO: %d\n", ret); - return ret; - } + error = PTR_ERR_OR_ZERO(ts->gpio_int); + if (error) + return dev_err_probe(&client->dev, error, + "failed to request interrupt GPIO\n"); } else { /* * Deprecated GPIO handling for compatibility @@ -768,66 +711,45 @@ static int zforce_probe(struct i2c_client *client) /* INT GPIO */ ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN); - if (IS_ERR(ts->gpio_int)) { - ret = PTR_ERR(ts->gpio_int); - dev_err(&client->dev, - "failed to request interrupt GPIO: %d\n", ret); - return ret; - } + + error = PTR_ERR_OR_ZERO(ts->gpio_int); + if (error) + return dev_err_probe(&client->dev, error, + "failed to request interrupt GPIO\n"); /* RST GPIO */ ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_HIGH); - if (IS_ERR(ts->gpio_rst)) { - ret = PTR_ERR(ts->gpio_rst); - dev_err(&client->dev, - "failed to request reset GPIO: %d\n", ret); - return ret; - } - } - - ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd"); - if (IS_ERR(ts->reg_vdd)) { - ret = PTR_ERR(ts->reg_vdd); - if (ret == -EPROBE_DEFER) - return ret; - } else { - ret = regulator_enable(ts->reg_vdd); - if (ret) - return ret; - - /* - * according to datasheet add 100us grace time after regular - * regulator enable delay. - */ - udelay(100); + error = PTR_ERR_OR_ZERO(ts->gpio_rst); + if (error) + return dev_err_probe(&client->dev, error, + "failed to request reset GPIO\n"); } - ret = devm_add_action(&client->dev, zforce_reset, ts); - if (ret) { - dev_err(&client->dev, "failed to register reset action, %d\n", - ret); + error = devm_regulator_get_enable(&client->dev, "vdd"); + if (error) + return dev_err_probe(&client->dev, error, + "failed to request vdd supply\n"); - /* hereafter the regulator will be disabled by the action */ - if (!IS_ERR(ts->reg_vdd)) - regulator_disable(ts->reg_vdd); + /* + * According to datasheet add 100us grace time after regular + * regulator enable delay. + */ + usleep_range(100, 200); - return ret; - } + error = devm_add_action_or_reset(&client->dev, zforce_reset, ts); + if (error) + return dev_err_probe(&client->dev, error, + "failed to register reset action\n"); snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&client->dev)); input_dev = devm_input_allocate_device(&client->dev); - if (!input_dev) { - dev_err(&client->dev, "could not allocate input device\n"); - return -ENOMEM; - } - - mutex_init(&ts->access_mutex); - mutex_init(&ts->command_mutex); + if (!input_dev) + return dev_err_probe(&client->dev, -ENOMEM, + "could not allocate input device\n"); - ts->pdata = pdata; ts->client = client; ts->input = input_dev; @@ -838,28 +760,21 @@ static int zforce_probe(struct i2c_client *client) input_dev->open = zforce_input_open; input_dev->close = zforce_input_close; - __set_bit(EV_KEY, input_dev->evbit); - __set_bit(EV_SYN, input_dev->evbit); - __set_bit(EV_ABS, input_dev->evbit); - - /* For multi touch */ - input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, - pdata->x_max, 0, 0); - input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, - pdata->y_max, 0, 0); - + zforce_ts_parse_legacy_properties(ts); touchscreen_parse_properties(input_dev, true, &ts->prop); - if (ts->prop.max_x == 0 || ts->prop.max_y == 0) { - dev_err(&client->dev, "no size specified\n"); - return -EINVAL; - } + if (ts->prop.max_x == 0 || ts->prop.max_y == 0) + return dev_err_probe(&client->dev, -EINVAL, "no size specified"); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, ZFORCE_MAX_AREA, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, ZFORCE_MAX_AREA, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); - input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT); + + error = input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, + INPUT_MT_DIRECT); + if (error) + return error; input_set_drvdata(ts->input, ts); @@ -872,57 +787,51 @@ static int zforce_probe(struct i2c_client *client) * Therefore we can trigger the interrupt anytime it is low and do * not need to limit it to the interrupt edge. */ - ret = devm_request_threaded_irq(&client->dev, client->irq, - zforce_irq, zforce_irq_thread, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - input_dev->name, ts); - if (ret) { - dev_err(&client->dev, "irq %d request failed\n", client->irq); - return ret; - } + error = devm_request_threaded_irq(&client->dev, client->irq, + zforce_irq, zforce_irq_thread, + IRQF_ONESHOT, input_dev->name, ts); + if (error) + return dev_err_probe(&client->dev, error, + "irq %d request failed\n", client->irq); i2c_set_clientdata(client, ts); /* let the controller boot */ - zforce_reset_deassert(ts); + gpiod_set_value_cansleep(ts->gpio_rst, 0); ts->command_waiting = NOTIFICATION_BOOTCOMPLETE; if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) dev_warn(&client->dev, "bootcomplete timed out\n"); /* need to start device to get version information */ - ret = zforce_command_wait(ts, COMMAND_INITIALIZE); - if (ret) { - dev_err(&client->dev, "unable to initialize, %d\n", ret); - return ret; - } + error = zforce_command_wait(ts, COMMAND_INITIALIZE); + if (error) + return dev_err_probe(&client->dev, error, "unable to initialize\n"); /* this gets the firmware version among other information */ - ret = zforce_command_wait(ts, COMMAND_STATUS); - if (ret < 0) { - dev_err(&client->dev, "couldn't get status, %d\n", ret); + error = zforce_command_wait(ts, COMMAND_STATUS); + if (error) { + dev_err_probe(&client->dev, error, "couldn't get status\n"); zforce_stop(ts); - return ret; + return error; } /* stop device and put it into sleep until it is opened */ - ret = zforce_stop(ts); - if (ret < 0) - return ret; + error = zforce_stop(ts); + if (error) + return error; device_set_wakeup_capable(&client->dev, true); - ret = input_register_device(input_dev); - if (ret) { - dev_err(&client->dev, "could not register input device, %d\n", - ret); - return ret; - } + error = input_register_device(input_dev); + if (error) + return dev_err_probe(&client->dev, error, + "could not register input device\n"); return 0; } -static struct i2c_device_id zforce_idtable[] = { +static const struct i2c_device_id zforce_idtable[] = { { "zforce-ts" }, { } }; @@ -941,6 +850,7 @@ static struct i2c_driver zforce_driver = { .name = "zforce-ts", .pm = pm_sleep_ptr(&zforce_pm_ops), .of_match_table = of_match_ptr(zforce_dt_idtable), + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = zforce_probe, .id_table = zforce_idtable, diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 1b4807ba46242b..52b3950460e21b 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,13 @@ #define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */ #define ZINITIX_TOUCH_MODE 0x0010 + #define ZINITIX_CHIP_REVISION 0x0011 +#define ZINITIX_CHIP_BTX0X_MASK 0xF0F0 +#define ZINITIX_CHIP_BT4X2 0x4020 +#define ZINITIX_CHIP_BT4X3 0x4030 +#define ZINITIX_CHIP_BT4X4 0x4040 + #define ZINITIX_FIRMWARE_VERSION 0x0012 #define ZINITIX_USB_DETECT 0x116 @@ -62,7 +69,11 @@ #define ZINITIX_Y_RESOLUTION 0x00C1 #define ZINITIX_POINT_STATUS_REG 0x0080 -#define ZINITIX_ICON_STATUS_REG 0x00AA + +#define ZINITIX_BT4X2_ICON_STATUS_REG 0x009A +#define ZINITIX_BT4X3_ICON_STATUS_REG 0x00A0 +#define ZINITIX_BT4X4_ICON_STATUS_REG 0x00A0 +#define ZINITIX_BT5XX_ICON_STATUS_REG 0x00AA #define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2) @@ -119,6 +130,7 @@ #define DEFAULT_TOUCH_POINT_MODE 2 #define MAX_SUPPORTED_FINGER_NUM 5 +#define MAX_SUPPORTED_BUTTON_NUM 8 #define CHIP_ON_DELAY 15 // ms #define FIRMWARE_ON_DELAY 40 // ms @@ -146,6 +158,13 @@ struct bt541_ts_data { struct touchscreen_properties prop; struct regulator_bulk_data supplies[2]; u32 zinitix_mode; + u32 keycodes[MAX_SUPPORTED_BUTTON_NUM]; + int num_keycodes; + bool have_versioninfo; + u16 chip_revision; + u16 firmware_version; + u16 regdata_version; + u16 icon_status_reg; }; static int zinitix_read_data(struct i2c_client *client, @@ -190,11 +209,25 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg) return 0; } +static u16 zinitix_get_u16_reg(struct bt541_ts_data *bt541, u16 vreg) +{ + struct i2c_client *client = bt541->client; + int error; + __le16 val; + + error = zinitix_read_data(client, vreg, (void *)&val, 2); + if (error) + return U8_MAX; + + return le16_to_cpu(val); +} + static int zinitix_init_touch(struct bt541_ts_data *bt541) { struct i2c_client *client = bt541->client; int i; int error; + u16 int_flags; error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD); if (error) { @@ -202,6 +235,47 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541) return error; } + /* + * Read and cache the chip revision and firmware version the first time + * we get here. + */ + if (!bt541->have_versioninfo) { + bt541->chip_revision = zinitix_get_u16_reg(bt541, + ZINITIX_CHIP_REVISION); + bt541->firmware_version = zinitix_get_u16_reg(bt541, + ZINITIX_FIRMWARE_VERSION); + bt541->regdata_version = zinitix_get_u16_reg(bt541, + ZINITIX_DATA_VERSION_REG); + bt541->have_versioninfo = true; + + dev_dbg(&client->dev, + "chip revision %04x firmware version %04x regdata version %04x\n", + bt541->chip_revision, bt541->firmware_version, + bt541->regdata_version); + + /* + * Determine the "icon" status register which varies by the + * chip. + */ + switch (bt541->chip_revision & ZINITIX_CHIP_BTX0X_MASK) { + case ZINITIX_CHIP_BT4X2: + bt541->icon_status_reg = ZINITIX_BT4X2_ICON_STATUS_REG; + break; + + case ZINITIX_CHIP_BT4X3: + bt541->icon_status_reg = ZINITIX_BT4X3_ICON_STATUS_REG; + break; + + case ZINITIX_CHIP_BT4X4: + bt541->icon_status_reg = ZINITIX_BT4X4_ICON_STATUS_REG; + break; + + default: + bt541->icon_status_reg = ZINITIX_BT5XX_ICON_STATUS_REG; + break; + } + } + error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0); if (error) { dev_err(&client->dev, @@ -225,6 +299,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541) if (error) return error; + error = zinitix_write_u16(client, ZINITIX_BUTTON_SUPPORTED_NUM, + bt541->num_keycodes); + if (error) + return error; + error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE, bt541->zinitix_mode); if (error) @@ -235,9 +314,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541) if (error) return error; - error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, - BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE | - BIT_UP); + int_flags = BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE | BIT_UP; + if (bt541->num_keycodes) + int_flags |= BIT_ICON_EVENT; + + error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, int_flags); if (error) return error; @@ -350,12 +431,22 @@ static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot, } } +static void zinitix_report_keys(struct bt541_ts_data *bt541, u16 icon_events) +{ + int i; + + for (i = 0; i < bt541->num_keycodes; i++) + input_report_key(bt541->input_dev, + bt541->keycodes[i], icon_events & BIT(i)); +} + static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) { struct bt541_ts_data *bt541 = bt541_handler; struct i2c_client *client = bt541->client; struct touch_event touch_event; unsigned long finger_mask; + __le16 icon_events; int error; int i; @@ -368,6 +459,17 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) goto out; } + if (le16_to_cpu(touch_event.status) & BIT_ICON_EVENT) { + error = zinitix_read_data(bt541->client, bt541->icon_status_reg, + &icon_events, sizeof(icon_events)); + if (error) { + dev_err(&client->dev, "Failed to read icon events\n"); + goto out; + } + + zinitix_report_keys(bt541, le16_to_cpu(icon_events)); + } + finger_mask = touch_event.finger_mask; for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) { const struct point_coord *p = &touch_event.point_coord[i]; @@ -453,6 +555,7 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541) { struct input_dev *input_dev; int error; + int i; input_dev = devm_input_allocate_device(&bt541->client->dev); if (!input_dev) { @@ -470,6 +573,14 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541) input_dev->open = zinitix_input_open; input_dev->close = zinitix_input_close; + if (bt541->num_keycodes) { + input_dev->keycode = bt541->keycodes; + input_dev->keycodemax = bt541->num_keycodes; + input_dev->keycodesize = sizeof(bt541->keycodes[0]); + for (i = 0; i < bt541->num_keycodes; i++) + input_set_capability(input_dev, EV_KEY, bt541->keycodes[i]); + } + input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X); input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y); input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0); @@ -534,6 +645,21 @@ static int zinitix_ts_probe(struct i2c_client *client) return error; } + bt541->num_keycodes = device_property_count_u32(&client->dev, "linux,keycodes"); + if (bt541->num_keycodes > ARRAY_SIZE(bt541->keycodes)) { + dev_err(&client->dev, "too many keys defined (%d)\n", bt541->num_keycodes); + return -EINVAL; + } + + error = device_property_read_u32_array(&client->dev, "linux,keycodes", + bt541->keycodes, + bt541->num_keycodes); + if (error) { + dev_err(&client->dev, + "Unable to parse \"linux,keycodes\" property: %d\n", error); + return error; + } + error = zinitix_init_input_dev(bt541); if (error) { dev_err(&client->dev, diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c index f788db15cd76a9..b956e4050f3815 100644 --- a/drivers/interconnect/icc-clk.c +++ b/drivers/interconnect/icc-clk.c @@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL); if (!onecell) return ERR_PTR(-ENOMEM); + onecell->num_nodes = 2 * num_clocks; qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL); if (!qp) @@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell->nodes[j++] = node; } - onecell->num_nodes = j; - ret = icc_provider_register(provider); if (ret) goto err; diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 9b84cd8becefd1..de96d466134066 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -26,6 +26,15 @@ config INTERCONNECT_QCOM_MSM8916 This is a driver for the Qualcomm Network-on-Chip on msm8916-based platforms. +config INTERCONNECT_QCOM_MSM8937 + tristate "Qualcomm MSM8937 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on msm8937-based + platforms. + config INTERCONNECT_QCOM_MSM8939 tristate "Qualcomm MSM8939 interconnect driver" depends on INTERCONNECT_QCOM @@ -53,6 +62,15 @@ config INTERCONNECT_QCOM_MSM8974 This is a driver for the Qualcomm Network-on-Chip on msm8974-based platforms. +config INTERCONNECT_QCOM_MSM8976 + tristate "Qualcomm MSM8976 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on msm8976-based + platforms. + config INTERCONNECT_QCOM_MSM8996 tristate "Qualcomm MSM8996 interconnect driver" depends on INTERCONNECT_QCOM diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index 7a7b6a71876f47..bfeea8416fcf9e 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -6,9 +6,11 @@ interconnect_qcom-y := icc-common.o icc-bcm-voter-objs := bcm-voter.o qnoc-msm8909-objs := msm8909.o qnoc-msm8916-objs := msm8916.o +qnoc-msm8937-objs := msm8937.o qnoc-msm8939-objs := msm8939.o qnoc-msm8953-objs := msm8953.o qnoc-msm8974-objs := msm8974.o +qnoc-msm8976-objs := msm8976.o qnoc-msm8996-objs := msm8996.o icc-osm-l3-objs := osm-l3.o qnoc-qcm2290-objs := qcm2290.o @@ -41,9 +43,11 @@ icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o +obj-$(CONFIG_INTERCONNECT_QCOM_MSM8937) += qnoc-msm8937.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8953) += qnoc-msm8953.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o +obj-$(CONFIG_INTERCONNECT_QCOM_MSM8976) += qnoc-msm8976.o obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o diff --git a/drivers/interconnect/qcom/msm8937.c b/drivers/interconnect/qcom/msm8937.c new file mode 100644 index 00000000000000..052b14c28ef8bc --- /dev/null +++ b/drivers/interconnect/qcom/msm8937.c @@ -0,0 +1,1350 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on data from msm8937-bus.dtsi in Qualcomm's msm-3.18 release: + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "icc-rpm.h" + +enum { + QNOC_MASTER_AMPSS_M0 = 1, + QNOC_MASTER_GRAPHICS_3D, + QNOC_SNOC_BIMC_0_MAS, + QNOC_SNOC_BIMC_2_MAS, + QNOC_SNOC_BIMC_1_MAS, + QNOC_MASTER_TCU_0, + QNOC_MASTER_SPDM, + QNOC_MASTER_BLSP_1, + QNOC_MASTER_BLSP_2, + QNOC_MASTER_USB_HS, + QNOC_MASTER_XM_USB_HS1, + QNOC_MASTER_CRYPTO_CORE0, + QNOC_MASTER_SDCC_1, + QNOC_MASTER_SDCC_2, + QNOC_SNOC_PNOC_MAS, + QNOC_MASTER_QDSS_BAM, + QNOC_BIMC_SNOC_MAS, + QNOC_MASTER_JPEG, + QNOC_MASTER_MDP_PORT0, + QNOC_PNOC_SNOC_MAS, + QNOC_MASTER_VIDEO_P0, + QNOC_MASTER_VFE, + QNOC_MASTER_VFE1, + QNOC_MASTER_CPP, + QNOC_MASTER_QDSS_ETR, + QNOC_PNOC_M_0, + QNOC_PNOC_M_1, + QNOC_PNOC_INT_0, + QNOC_PNOC_INT_1, + QNOC_PNOC_INT_2, + QNOC_PNOC_INT_3, + QNOC_PNOC_SLV_0, + QNOC_PNOC_SLV_1, + QNOC_PNOC_SLV_2, + QNOC_PNOC_SLV_3, + QNOC_PNOC_SLV_4, + QNOC_PNOC_SLV_6, + QNOC_PNOC_SLV_7, + QNOC_PNOC_SLV_8, + QNOC_SNOC_QDSS_INT, + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_INT_2, + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV, + QNOC_SLAVE_SDCC_2, + QNOC_SLAVE_SPDM_WRAPPER, + QNOC_SLAVE_PDM, + QNOC_SLAVE_PRNG, + QNOC_SLAVE_TCSR, + QNOC_SLAVE_SNOC_CFG, + QNOC_SLAVE_MESSAGE_RAM, + QNOC_SLAVE_CAMERA_CFG, + QNOC_SLAVE_DISPLAY_CFG, + QNOC_SLAVE_VENUS_CFG, + QNOC_SLAVE_GRAPHICS_3D_CFG, + QNOC_SLAVE_TLMM, + QNOC_SLAVE_BLSP_1, + QNOC_SLAVE_BLSP_2, + QNOC_SLAVE_PMIC_ARB, + QNOC_SLAVE_SDCC_1, + QNOC_SLAVE_CRYPTO_0_CFG, + QNOC_SLAVE_USB_HS, + QNOC_SLAVE_TCU, + QNOC_PNOC_SNOC_SLV, + QNOC_SLAVE_APPSS, + QNOC_SLAVE_WCSS, + QNOC_SNOC_BIMC_0_SLV, + QNOC_SNOC_BIMC_1_SLV, + QNOC_SNOC_BIMC_2_SLV, + QNOC_SLAVE_OCIMEM, + QNOC_SNOC_PNOC_SLV, + QNOC_SLAVE_QDSS_STM, + QNOC_SLAVE_CATS_128, + QNOC_SLAVE_OCMEM_64, + QNOC_SLAVE_LPASS, +}; + +static const u16 mas_apps_proc_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_apps_proc = { + .name = "mas_apps_proc", + .id = QNOC_MASTER_AMPSS_M0, + .buswidth = 8, + .mas_rpm_id = 0, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_apps_proc_links), + .links = mas_apps_proc_links, +}; + +static const u16 mas_oxili_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_oxili = { + .name = "mas_oxili", + .id = QNOC_MASTER_GRAPHICS_3D, + .buswidth = 8, + .mas_rpm_id = 6, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_oxili_links), + .links = mas_oxili_links, +}; + +static const u16 mas_snoc_bimc_0_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_snoc_bimc_0 = { + .name = "mas_snoc_bimc_0", + .id = QNOC_SNOC_BIMC_0_MAS, + .buswidth = 8, + .mas_rpm_id = 3, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 3, + .num_links = ARRAY_SIZE(mas_snoc_bimc_0_links), + .links = mas_snoc_bimc_0_links, +}; + +static const u16 mas_snoc_bimc_2_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_snoc_bimc_2 = { + .name = "mas_snoc_bimc_2", + .id = QNOC_SNOC_BIMC_2_MAS, + .buswidth = 8, + .mas_rpm_id = 108, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 4, + .num_links = ARRAY_SIZE(mas_snoc_bimc_2_links), + .links = mas_snoc_bimc_2_links, +}; + +static const u16 mas_snoc_bimc_1_links[] = { + QNOC_SLAVE_EBI_CH0 +}; + +static struct qcom_icc_node mas_snoc_bimc_1 = { + .name = "mas_snoc_bimc_1", + .id = QNOC_SNOC_BIMC_1_MAS, + .buswidth = 8, + .mas_rpm_id = 76, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links), + .links = mas_snoc_bimc_1_links, +}; + +static const u16 mas_tcu_0_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_tcu_0 = { + .name = "mas_tcu_0", + .id = QNOC_MASTER_TCU_0, + .buswidth = 8, + .mas_rpm_id = 102, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 2, + .qos.qos_port = 6, + .num_links = ARRAY_SIZE(mas_tcu_0_links), + .links = mas_tcu_0_links, +}; + +static const u16 mas_spdm_links[] = { + QNOC_PNOC_M_0 +}; + +static struct qcom_icc_node mas_spdm = { + .name = "mas_spdm", + .id = QNOC_MASTER_SPDM, + .buswidth = 4, + .mas_rpm_id = 50, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mas_spdm_links), + .links = mas_spdm_links, +}; + +static const u16 mas_blsp_1_links[] = { + QNOC_PNOC_M_1 +}; + +static struct qcom_icc_node mas_blsp_1 = { + .name = "mas_blsp_1", + .id = QNOC_MASTER_BLSP_1, + .buswidth = 4, + .mas_rpm_id = 41, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_blsp_1_links), + .links = mas_blsp_1_links, +}; + +static const u16 mas_blsp_2_links[] = { + QNOC_PNOC_M_1 +}; + +static struct qcom_icc_node mas_blsp_2 = { + .name = "mas_blsp_2", + .id = QNOC_MASTER_BLSP_2, + .buswidth = 4, + .mas_rpm_id = 39, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_blsp_2_links), + .links = mas_blsp_2_links, +}; + +static const u16 mas_usb_hs1_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_usb_hs1 = { + .name = "mas_usb_hs1", + .id = QNOC_MASTER_USB_HS, + .buswidth = 4, + .mas_rpm_id = 42, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 12, + .num_links = ARRAY_SIZE(mas_usb_hs1_links), + .links = mas_usb_hs1_links, +}; + +static const u16 mas_xi_usb_hs1_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_xi_usb_hs1 = { + .name = "mas_xi_usb_hs1", + .id = QNOC_MASTER_XM_USB_HS1, + .buswidth = 8, + .mas_rpm_id = 138, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 11, + .num_links = ARRAY_SIZE(mas_xi_usb_hs1_links), + .links = mas_xi_usb_hs1_links, +}; + +static const u16 mas_crypto_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_crypto = { + .name = "mas_crypto", + .id = QNOC_MASTER_CRYPTO_CORE0, + .buswidth = 8, + .mas_rpm_id = 23, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_crypto_links), + .links = mas_crypto_links, +}; + +static const u16 mas_sdcc_1_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_sdcc_1 = { + .name = "mas_sdcc_1", + .id = QNOC_MASTER_SDCC_1, + .buswidth = 8, + .mas_rpm_id = 33, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 7, + .num_links = ARRAY_SIZE(mas_sdcc_1_links), + .links = mas_sdcc_1_links, +}; + +static const u16 mas_sdcc_2_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_sdcc_2 = { + .name = "mas_sdcc_2", + .id = QNOC_MASTER_SDCC_2, + .buswidth = 8, + .mas_rpm_id = 35, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 8, + .num_links = ARRAY_SIZE(mas_sdcc_2_links), + .links = mas_sdcc_2_links, +}; + +static const u16 mas_snoc_pcnoc_links[] = { + QNOC_PNOC_SLV_7, + QNOC_PNOC_INT_2, + QNOC_PNOC_INT_3 +}; + +static struct qcom_icc_node mas_snoc_pcnoc = { + .name = "mas_snoc_pcnoc", + .id = QNOC_SNOC_PNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 77, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 9, + .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links), + .links = mas_snoc_pcnoc_links, +}; + +static const u16 mas_qdss_bam_links[] = { + QNOC_SNOC_QDSS_INT +}; + +static struct qcom_icc_node mas_qdss_bam = { + .name = "mas_qdss_bam", + .id = QNOC_MASTER_QDSS_BAM, + .buswidth = 4, + .mas_rpm_id = 19, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 11, + .num_links = ARRAY_SIZE(mas_qdss_bam_links), + .links = mas_qdss_bam_links, +}; + +static const u16 mas_bimc_snoc_links[] = { + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_INT_2 +}; + +static struct qcom_icc_node mas_bimc_snoc = { + .name = "mas_bimc_snoc", + .id = QNOC_BIMC_SNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 21, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_bimc_snoc_links), + .links = mas_bimc_snoc_links, +}; + +static const u16 mas_jpeg_links[] = { + QNOC_SNOC_BIMC_2_SLV +}; + +static struct qcom_icc_node mas_jpeg = { + .name = "mas_jpeg", + .id = QNOC_MASTER_JPEG, + .buswidth = 16, + .mas_rpm_id = 7, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 6, + .num_links = ARRAY_SIZE(mas_jpeg_links), + .links = mas_jpeg_links, +}; + +static const u16 mas_mdp_links[] = { + QNOC_SNOC_BIMC_0_SLV +}; + +static struct qcom_icc_node mas_mdp = { + .name = "mas_mdp", + .id = QNOC_MASTER_MDP_PORT0, + .buswidth = 16, + .mas_rpm_id = 8, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 7, + .num_links = ARRAY_SIZE(mas_mdp_links), + .links = mas_mdp_links, +}; + +static const u16 mas_pcnoc_snoc_links[] = { + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_BIMC_1_SLV +}; + +static struct qcom_icc_node mas_pcnoc_snoc = { + .name = "mas_pcnoc_snoc", + .id = QNOC_PNOC_SNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 29, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links), + .links = mas_pcnoc_snoc_links, +}; + +static const u16 mas_venus_links[] = { + QNOC_SNOC_BIMC_2_SLV +}; + +static struct qcom_icc_node mas_venus = { + .name = "mas_venus", + .id = QNOC_MASTER_VIDEO_P0, + .buswidth = 16, + .mas_rpm_id = 9, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 8, + .num_links = ARRAY_SIZE(mas_venus_links), + .links = mas_venus_links, +}; + +static const u16 mas_vfe0_links[] = { + QNOC_SNOC_BIMC_0_SLV +}; + +static struct qcom_icc_node mas_vfe0 = { + .name = "mas_vfe0", + .id = QNOC_MASTER_VFE, + .buswidth = 16, + .mas_rpm_id = 11, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 9, + .num_links = ARRAY_SIZE(mas_vfe0_links), + .links = mas_vfe0_links, +}; + +static const u16 mas_vfe1_links[] = { + QNOC_SNOC_BIMC_0_SLV +}; + +static struct qcom_icc_node mas_vfe1 = { + .name = "mas_vfe1", + .id = QNOC_MASTER_VFE1, + .buswidth = 16, + .mas_rpm_id = 133, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 13, + .num_links = ARRAY_SIZE(mas_vfe1_links), + .links = mas_vfe1_links, +}; + +static const u16 mas_cpp_links[] = { + QNOC_SNOC_BIMC_2_SLV +}; + +static struct qcom_icc_node mas_cpp = { + .name = "mas_cpp", + .id = QNOC_MASTER_CPP, + .buswidth = 16, + .mas_rpm_id = 115, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 12, + .num_links = ARRAY_SIZE(mas_cpp_links), + .links = mas_cpp_links, +}; + +static const u16 mas_qdss_etr_links[] = { + QNOC_SNOC_QDSS_INT +}; + +static struct qcom_icc_node mas_qdss_etr = { + .name = "mas_qdss_etr", + .id = QNOC_MASTER_QDSS_ETR, + .buswidth = 8, + .mas_rpm_id = 31, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 10, + .num_links = ARRAY_SIZE(mas_qdss_etr_links), + .links = mas_qdss_etr_links, +}; + +static const u16 pcnoc_m_0_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node pcnoc_m_0 = { + .name = "pcnoc_m_0", + .id = QNOC_PNOC_M_0, + .buswidth = 4, + .mas_rpm_id = 87, + .slv_rpm_id = 116, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(pcnoc_m_0_links), + .links = pcnoc_m_0_links, +}; + +static const u16 pcnoc_m_1_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node pcnoc_m_1 = { + .name = "pcnoc_m_1", + .id = QNOC_PNOC_M_1, + .buswidth = 4, + .mas_rpm_id = 88, + .slv_rpm_id = 117, + .num_links = ARRAY_SIZE(pcnoc_m_1_links), + .links = pcnoc_m_1_links, +}; + +static const u16 pcnoc_int_0_links[] = { + QNOC_PNOC_SNOC_SLV, + QNOC_PNOC_SLV_7, + QNOC_PNOC_INT_3, + QNOC_PNOC_INT_2 +}; + +static struct qcom_icc_node pcnoc_int_0 = { + .name = "pcnoc_int_0", + .id = QNOC_PNOC_INT_0, + .buswidth = 8, + .mas_rpm_id = 85, + .slv_rpm_id = 114, + .num_links = ARRAY_SIZE(pcnoc_int_0_links), + .links = pcnoc_int_0_links, +}; + +static const u16 pcnoc_int_1_links[] = { + QNOC_PNOC_SNOC_SLV, + QNOC_PNOC_SLV_7, + QNOC_PNOC_INT_3, + QNOC_PNOC_INT_2 +}; + +static struct qcom_icc_node pcnoc_int_1 = { + .name = "pcnoc_int_1", + .id = QNOC_PNOC_INT_1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(pcnoc_int_1_links), + .links = pcnoc_int_1_links, +}; + +static const u16 pcnoc_int_2_links[] = { + QNOC_PNOC_SLV_2, + QNOC_PNOC_SLV_3, + QNOC_PNOC_SLV_6, + QNOC_PNOC_SLV_8 +}; + +static struct qcom_icc_node pcnoc_int_2 = { + .name = "pcnoc_int_2", + .id = QNOC_PNOC_INT_2, + .buswidth = 8, + .mas_rpm_id = 124, + .slv_rpm_id = 184, + .num_links = ARRAY_SIZE(pcnoc_int_2_links), + .links = pcnoc_int_2_links, +}; + +static const u16 pcnoc_int_3_links[] = { + QNOC_PNOC_SLV_1, + QNOC_PNOC_SLV_0, + QNOC_PNOC_SLV_4, + QNOC_SLAVE_GRAPHICS_3D_CFG, + QNOC_SLAVE_TCU +}; + +static struct qcom_icc_node pcnoc_int_3 = { + .name = "pcnoc_int_3", + .id = QNOC_PNOC_INT_3, + .buswidth = 8, + .mas_rpm_id = 125, + .slv_rpm_id = 185, + .num_links = ARRAY_SIZE(pcnoc_int_3_links), + .links = pcnoc_int_3_links, +}; + +static const u16 pcnoc_s_0_links[] = { + QNOC_SLAVE_SPDM_WRAPPER, + QNOC_SLAVE_PDM, + QNOC_SLAVE_PRNG, + QNOC_SLAVE_SDCC_2 +}; + +static struct qcom_icc_node pcnoc_s_0 = { + .name = "pcnoc_s_0", + .id = QNOC_PNOC_SLV_0, + .buswidth = 4, + .mas_rpm_id = 89, + .slv_rpm_id = 118, + .num_links = ARRAY_SIZE(pcnoc_s_0_links), + .links = pcnoc_s_0_links, +}; + +static const u16 pcnoc_s_1_links[] = { + QNOC_SLAVE_TCSR +}; + +static struct qcom_icc_node pcnoc_s_1 = { + .name = "pcnoc_s_1", + .id = QNOC_PNOC_SLV_1, + .buswidth = 4, + .mas_rpm_id = 90, + .slv_rpm_id = 119, + .num_links = ARRAY_SIZE(pcnoc_s_1_links), + .links = pcnoc_s_1_links, +}; + +static const u16 pcnoc_s_2_links[] = { + QNOC_SLAVE_SNOC_CFG +}; + +static struct qcom_icc_node pcnoc_s_2 = { + .name = "pcnoc_s_2", + .id = QNOC_PNOC_SLV_2, + .buswidth = 4, + .mas_rpm_id = 91, + .slv_rpm_id = 120, + .num_links = ARRAY_SIZE(pcnoc_s_2_links), + .links = pcnoc_s_2_links, +}; + +static const u16 pcnoc_s_3_links[] = { + QNOC_SLAVE_MESSAGE_RAM +}; + +static struct qcom_icc_node pcnoc_s_3 = { + .name = "pcnoc_s_3", + .id = QNOC_PNOC_SLV_3, + .buswidth = 4, + .mas_rpm_id = 92, + .slv_rpm_id = 121, + .num_links = ARRAY_SIZE(pcnoc_s_3_links), + .links = pcnoc_s_3_links, +}; + +static const u16 pcnoc_s_4_links[] = { + QNOC_SLAVE_CAMERA_CFG, + QNOC_SLAVE_DISPLAY_CFG, + QNOC_SLAVE_VENUS_CFG +}; + +static struct qcom_icc_node pcnoc_s_4 = { + .name = "pcnoc_s_4", + .id = QNOC_PNOC_SLV_4, + .buswidth = 4, + .mas_rpm_id = 93, + .slv_rpm_id = 122, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(pcnoc_s_4_links), + .links = pcnoc_s_4_links, +}; + +static const u16 pcnoc_s_6_links[] = { + QNOC_SLAVE_TLMM, + QNOC_SLAVE_BLSP_1, + QNOC_SLAVE_BLSP_2 +}; + +static struct qcom_icc_node pcnoc_s_6 = { + .name = "pcnoc_s_6", + .id = QNOC_PNOC_SLV_6, + .buswidth = 4, + .mas_rpm_id = 94, + .slv_rpm_id = 123, + .num_links = ARRAY_SIZE(pcnoc_s_6_links), + .links = pcnoc_s_6_links, +}; + +static const u16 pcnoc_s_7_links[] = { + QNOC_SLAVE_SDCC_1, + QNOC_SLAVE_PMIC_ARB +}; + +static struct qcom_icc_node pcnoc_s_7 = { + .name = "pcnoc_s_7", + .id = QNOC_PNOC_SLV_7, + .buswidth = 4, + .mas_rpm_id = 95, + .slv_rpm_id = 124, + .num_links = ARRAY_SIZE(pcnoc_s_7_links), + .links = pcnoc_s_7_links, +}; + +static const u16 pcnoc_s_8_links[] = { + QNOC_SLAVE_USB_HS, + QNOC_SLAVE_CRYPTO_0_CFG +}; + +static struct qcom_icc_node pcnoc_s_8 = { + .name = "pcnoc_s_8", + .id = QNOC_PNOC_SLV_8, + .buswidth = 4, + .mas_rpm_id = 96, + .slv_rpm_id = 125, + .num_links = ARRAY_SIZE(pcnoc_s_8_links), + .links = pcnoc_s_8_links, +}; + +static const u16 qdss_int_links[] = { + QNOC_SNOC_INT_1, + QNOC_SNOC_BIMC_1_SLV +}; + +static struct qcom_icc_node qdss_int = { + .name = "qdss_int", + .id = QNOC_SNOC_QDSS_INT, + .buswidth = 8, + .mas_rpm_id = 98, + .slv_rpm_id = 128, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(qdss_int_links), + .links = qdss_int_links, +}; + +static const u16 snoc_int_0_links[] = { + QNOC_SLAVE_LPASS, + QNOC_SLAVE_WCSS, + QNOC_SLAVE_APPSS +}; + +static struct qcom_icc_node snoc_int_0 = { + .name = "snoc_int_0", + .id = QNOC_SNOC_INT_0, + .buswidth = 8, + .mas_rpm_id = 99, + .slv_rpm_id = 130, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(snoc_int_0_links), + .links = snoc_int_0_links, +}; + +static const u16 snoc_int_1_links[] = { + QNOC_SLAVE_QDSS_STM, + QNOC_SLAVE_OCIMEM, + QNOC_SNOC_PNOC_SLV +}; + +static struct qcom_icc_node snoc_int_1 = { + .name = "snoc_int_1", + .id = QNOC_SNOC_INT_1, + .buswidth = 8, + .mas_rpm_id = 100, + .slv_rpm_id = 131, + .num_links = ARRAY_SIZE(snoc_int_1_links), + .links = snoc_int_1_links, +}; + +static const u16 snoc_int_2_links[] = { + QNOC_SLAVE_CATS_128, + QNOC_SLAVE_OCMEM_64 +}; + +static struct qcom_icc_node snoc_int_2 = { + .name = "snoc_int_2", + .id = QNOC_SNOC_INT_2, + .buswidth = 8, + .mas_rpm_id = 134, + .slv_rpm_id = 197, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(snoc_int_2_links), + .links = snoc_int_2_links, +}; + +static struct qcom_icc_node slv_ebi = { + .name = "slv_ebi", + .id = QNOC_SLAVE_EBI_CH0, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 0, +}; + +static const u16 slv_bimc_snoc_links[] = { + QNOC_BIMC_SNOC_MAS +}; + +static struct qcom_icc_node slv_bimc_snoc = { + .name = "slv_bimc_snoc", + .id = QNOC_BIMC_SNOC_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 2, + .num_links = ARRAY_SIZE(slv_bimc_snoc_links), + .links = slv_bimc_snoc_links, +}; + +static struct qcom_icc_node slv_sdcc_2 = { + .name = "slv_sdcc_2", + .id = QNOC_SLAVE_SDCC_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 33, +}; + +static struct qcom_icc_node slv_spdm = { + .name = "slv_spdm", + .id = QNOC_SLAVE_SPDM_WRAPPER, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 60, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_pdm = { + .name = "slv_pdm", + .id = QNOC_SLAVE_PDM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 41, +}; + +static struct qcom_icc_node slv_prng = { + .name = "slv_prng", + .id = QNOC_SLAVE_PRNG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 44, +}; + +static struct qcom_icc_node slv_tcsr = { + .name = "slv_tcsr", + .id = QNOC_SLAVE_TCSR, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 50, +}; + +static struct qcom_icc_node slv_snoc_cfg = { + .name = "slv_snoc_cfg", + .id = QNOC_SLAVE_SNOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 70, +}; + +static struct qcom_icc_node slv_message_ram = { + .name = "slv_message_ram", + .id = QNOC_SLAVE_MESSAGE_RAM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 55, +}; + +static struct qcom_icc_node slv_camera_ss_cfg = { + .name = "slv_camera_ss_cfg", + .id = QNOC_SLAVE_CAMERA_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 3, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_disp_ss_cfg = { + .name = "slv_disp_ss_cfg", + .id = QNOC_SLAVE_DISPLAY_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_venus_cfg = { + .name = "slv_venus_cfg", + .id = QNOC_SLAVE_VENUS_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 10, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_gpu_cfg = { + .name = "slv_gpu_cfg", + .id = QNOC_SLAVE_GRAPHICS_3D_CFG, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 11, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_tlmm = { + .name = "slv_tlmm", + .id = QNOC_SLAVE_TLMM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 51, +}; + +static struct qcom_icc_node slv_blsp_1 = { + .name = "slv_blsp_1", + .id = QNOC_SLAVE_BLSP_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 39, +}; + +static struct qcom_icc_node slv_blsp_2 = { + .name = "slv_blsp_2", + .id = QNOC_SLAVE_BLSP_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 37, +}; + +static struct qcom_icc_node slv_pmic_arb = { + .name = "slv_pmic_arb", + .id = QNOC_SLAVE_PMIC_ARB, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 59, +}; + +static struct qcom_icc_node slv_sdcc_1 = { + .name = "slv_sdcc_1", + .id = QNOC_SLAVE_SDCC_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 31, +}; + +static struct qcom_icc_node slv_crypto_0_cfg = { + .name = "slv_crypto_0_cfg", + .id = QNOC_SLAVE_CRYPTO_0_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 52, +}; + +static struct qcom_icc_node slv_usb_hs = { + .name = "slv_usb_hs", + .id = QNOC_SLAVE_USB_HS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 40, +}; + +static struct qcom_icc_node slv_tcu = { + .name = "slv_tcu", + .id = QNOC_SLAVE_TCU, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 133, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static const u16 slv_pcnoc_snoc_links[] = { + QNOC_PNOC_SNOC_MAS +}; + +static struct qcom_icc_node slv_pcnoc_snoc = { + .name = "slv_pcnoc_snoc", + .id = QNOC_PNOC_SNOC_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 45, + .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links), + .links = slv_pcnoc_snoc_links, +}; + +static struct qcom_icc_node slv_kpss_ahb = { + .name = "slv_kpss_ahb", + .id = QNOC_SLAVE_APPSS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 20, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_wcss = { + .name = "slv_wcss", + .id = QNOC_SLAVE_WCSS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 23, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static const u16 slv_snoc_bimc_0_links[] = { + QNOC_SNOC_BIMC_0_MAS +}; + +static struct qcom_icc_node slv_snoc_bimc_0 = { + .name = "slv_snoc_bimc_0", + .id = QNOC_SNOC_BIMC_0_SLV, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 24, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_snoc_bimc_0_links), + .links = slv_snoc_bimc_0_links, +}; + +static const u16 slv_snoc_bimc_1_links[] = { + QNOC_SNOC_BIMC_1_MAS +}; + +static struct qcom_icc_node slv_snoc_bimc_1 = { + .name = "slv_snoc_bimc_1", + .id = QNOC_SNOC_BIMC_1_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 104, + .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links), + .links = slv_snoc_bimc_1_links, +}; + +static const u16 slv_snoc_bimc_2_links[] = { + QNOC_SNOC_BIMC_2_MAS +}; + +static struct qcom_icc_node slv_snoc_bimc_2 = { + .name = "slv_snoc_bimc_2", + .id = QNOC_SNOC_BIMC_2_SLV, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 137, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_snoc_bimc_2_links), + .links = slv_snoc_bimc_2_links, +}; + +static struct qcom_icc_node slv_imem = { + .name = "slv_imem", + .id = QNOC_SLAVE_OCIMEM, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 26, +}; + +static const u16 slv_snoc_pcnoc_links[] = { + QNOC_SNOC_PNOC_MAS +}; + +static struct qcom_icc_node slv_snoc_pcnoc = { + .name = "slv_snoc_pcnoc", + .id = QNOC_SNOC_PNOC_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 28, + .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links), + .links = slv_snoc_pcnoc_links, +}; + +static struct qcom_icc_node slv_qdss_stm = { + .name = "slv_qdss_stm", + .id = QNOC_SLAVE_QDSS_STM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 30, +}; + +static struct qcom_icc_node slv_cats_0 = { + .name = "slv_cats_0", + .id = QNOC_SLAVE_CATS_128, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 106, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_cats_1 = { + .name = "slv_cats_1", + .id = QNOC_SLAVE_OCMEM_64, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 107, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_lpass = { + .name = "slv_lpass", + .id = QNOC_SLAVE_LPASS, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 21, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node *msm8937_bimc_nodes[] = { + [MAS_APPS_PROC] = &mas_apps_proc, + [MAS_OXILI] = &mas_oxili, + [MAS_SNOC_BIMC_0] = &mas_snoc_bimc_0, + [MAS_SNOC_BIMC_2] = &mas_snoc_bimc_2, + [MAS_SNOC_BIMC_1] = &mas_snoc_bimc_1, + [MAS_TCU_0] = &mas_tcu_0, + [SLV_EBI] = &slv_ebi, + [SLV_BIMC_SNOC] = &slv_bimc_snoc, +}; + +static const struct regmap_config msm8937_bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x5A000, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8937_bimc = { + .type = QCOM_ICC_BIMC, + .nodes = msm8937_bimc_nodes, + .num_nodes = ARRAY_SIZE(msm8937_bimc_nodes), + .bus_clk_desc = &bimc_clk, + .regmap_cfg = &msm8937_bimc_regmap_config, + .qos_offset = 0x8000, + .ab_coeff = 154, +}; + +static struct qcom_icc_node *msm8937_pcnoc_nodes[] = { + [MAS_SPDM] = &mas_spdm, + [MAS_BLSP_1] = &mas_blsp_1, + [MAS_BLSP_2] = &mas_blsp_2, + [MAS_USB_HS1] = &mas_usb_hs1, + [MAS_XI_USB_HS1] = &mas_xi_usb_hs1, + [MAS_CRYPTO] = &mas_crypto, + [MAS_SDCC_1] = &mas_sdcc_1, + [MAS_SDCC_2] = &mas_sdcc_2, + [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc, + [PCNOC_M_0] = &pcnoc_m_0, + [PCNOC_M_1] = &pcnoc_m_1, + [PCNOC_INT_0] = &pcnoc_int_0, + [PCNOC_INT_1] = &pcnoc_int_1, + [PCNOC_INT_2] = &pcnoc_int_2, + [PCNOC_INT_3] = &pcnoc_int_3, + [PCNOC_S_0] = &pcnoc_s_0, + [PCNOC_S_1] = &pcnoc_s_1, + [PCNOC_S_2] = &pcnoc_s_2, + [PCNOC_S_3] = &pcnoc_s_3, + [PCNOC_S_4] = &pcnoc_s_4, + [PCNOC_S_6] = &pcnoc_s_6, + [PCNOC_S_7] = &pcnoc_s_7, + [PCNOC_S_8] = &pcnoc_s_8, + [SLV_SDCC_2] = &slv_sdcc_2, + [SLV_SPDM] = &slv_spdm, + [SLV_PDM] = &slv_pdm, + [SLV_PRNG] = &slv_prng, + [SLV_TCSR] = &slv_tcsr, + [SLV_SNOC_CFG] = &slv_snoc_cfg, + [SLV_MESSAGE_RAM] = &slv_message_ram, + [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg, + [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg, + [SLV_VENUS_CFG] = &slv_venus_cfg, + [SLV_GPU_CFG] = &slv_gpu_cfg, + [SLV_TLMM] = &slv_tlmm, + [SLV_BLSP_1] = &slv_blsp_1, + [SLV_BLSP_2] = &slv_blsp_2, + [SLV_PMIC_ARB] = &slv_pmic_arb, + [SLV_SDCC_1] = &slv_sdcc_1, + [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg, + [SLV_USB_HS] = &slv_usb_hs, + [SLV_TCU] = &slv_tcu, + [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc, +}; + +static const struct regmap_config msm8937_pcnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x13080, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8937_pcnoc = { + .type = QCOM_ICC_NOC, + .nodes = msm8937_pcnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8937_pcnoc_nodes), + .bus_clk_desc = &bus_0_clk, + .qos_offset = 0x7000, + .keep_alive = true, + .regmap_cfg = &msm8937_pcnoc_regmap_config, +}; + +static struct qcom_icc_node *msm8937_snoc_nodes[] = { + [MAS_QDSS_BAM] = &mas_qdss_bam, + [MAS_BIMC_SNOC] = &mas_bimc_snoc, + [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc, + [MAS_QDSS_ETR] = &mas_qdss_etr, + [QDSS_INT] = &qdss_int, + [SNOC_INT_0] = &snoc_int_0, + [SNOC_INT_1] = &snoc_int_1, + [SNOC_INT_2] = &snoc_int_2, + [SLV_KPSS_AHB] = &slv_kpss_ahb, + [SLV_WCSS] = &slv_wcss, + [SLV_SNOC_BIMC_1] = &slv_snoc_bimc_1, + [SLV_IMEM] = &slv_imem, + [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc, + [SLV_QDSS_STM] = &slv_qdss_stm, + [SLV_CATS_1] = &slv_cats_1, + [SLV_LPASS] = &slv_lpass, +}; + +static const struct regmap_config msm8937_snoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x16080, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8937_snoc = { + .type = QCOM_ICC_NOC, + .nodes = msm8937_snoc_nodes, + .num_nodes = ARRAY_SIZE(msm8937_snoc_nodes), + .bus_clk_desc = &bus_1_clk, + .regmap_cfg = &msm8937_snoc_regmap_config, + .qos_offset = 0x7000, +}; + +static struct qcom_icc_node *msm8937_snoc_mm_nodes[] = { + [MAS_JPEG] = &mas_jpeg, + [MAS_MDP] = &mas_mdp, + [MAS_VENUS] = &mas_venus, + [MAS_VFE0] = &mas_vfe0, + [MAS_VFE1] = &mas_vfe1, + [MAS_CPP] = &mas_cpp, + [SLV_SNOC_BIMC_0] = &slv_snoc_bimc_0, + [SLV_SNOC_BIMC_2] = &slv_snoc_bimc_2, + [SLV_CATS_0] = &slv_cats_0, +}; + +static const struct qcom_icc_desc msm8937_snoc_mm = { + .type = QCOM_ICC_NOC, + .nodes = msm8937_snoc_mm_nodes, + .num_nodes = ARRAY_SIZE(msm8937_snoc_mm_nodes), + .bus_clk_desc = &bus_2_clk, + .regmap_cfg = &msm8937_snoc_regmap_config, + .qos_offset = 0x7000, + .ab_coeff = 154, +}; + +static const struct of_device_id msm8937_noc_of_match[] = { + { .compatible = "qcom,msm8937-bimc", .data = &msm8937_bimc }, + { .compatible = "qcom,msm8937-pcnoc", .data = &msm8937_pcnoc }, + { .compatible = "qcom,msm8937-snoc", .data = &msm8937_snoc }, + { .compatible = "qcom,msm8937-snoc-mm", .data = &msm8937_snoc_mm }, + { } +}; +MODULE_DEVICE_TABLE(of, msm8937_noc_of_match); + +static struct platform_driver msm8937_noc_driver = { + .probe = qnoc_probe, + .remove_new = qnoc_remove, + .driver = { + .name = "qnoc-msm8937", + .of_match_table = msm8937_noc_of_match, + .sync_state = icc_sync_state, + }, +}; +module_platform_driver(msm8937_noc_driver); + +MODULE_DESCRIPTION("Qualcomm MSM8937 NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/msm8953.c b/drivers/interconnect/qcom/msm8953.c index 9e8867c07692bd..62f8c0774b3ecc 100644 --- a/drivers/interconnect/qcom/msm8953.c +++ b/drivers/interconnect/qcom/msm8953.c @@ -1169,6 +1169,7 @@ static const struct qcom_icc_desc msm8953_bimc = { .nodes = msm8953_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8953_bimc_nodes), .qos_offset = 0x8000, + .ab_coeff = 153, .regmap_cfg = &msm8953_bimc_regmap_config }; @@ -1295,6 +1296,7 @@ static const struct qcom_icc_desc msm8953_snoc_mm = { .nodes = msm8953_snoc_mm_nodes, .num_nodes = ARRAY_SIZE(msm8953_snoc_mm_nodes), .qos_offset = 0x7000, + .ab_coeff = 153, .regmap_cfg = &msm8953_snoc_regmap_config, }; diff --git a/drivers/interconnect/qcom/msm8976.c b/drivers/interconnect/qcom/msm8976.c new file mode 100644 index 00000000000000..ab963def77c3ae --- /dev/null +++ b/drivers/interconnect/qcom/msm8976.c @@ -0,0 +1,1440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on data from msm8976-bus.dtsi in Qualcomm's msm-3.10 release: + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "icc-rpm.h" + +enum { + QNOC_MASTER_AMPSS_M0 = 1, + QNOC_MNOC_BIMC_MAS, + QNOC_SNOC_BIMC_MAS, + QNOC_MASTER_TCU_0, + QNOC_MASTER_USB_HS2, + QNOC_MASTER_BLSP_1, + QNOC_MASTER_USB_HS, + QNOC_MASTER_BLSP_2, + QNOC_MASTER_CRYPTO_CORE0, + QNOC_MASTER_SDCC_1, + QNOC_MASTER_SDCC_2, + QNOC_MASTER_SDCC_3, + QNOC_SNOC_PNOC_MAS, + QNOC_MASTER_LPASS_AHB, + QNOC_MASTER_SPDM, + QNOC_MASTER_DEHR, + QNOC_MASTER_XM_USB_HS1, + QNOC_MASTER_QDSS_BAM, + QNOC_BIMC_SNOC_MAS, + QNOC_MASTER_JPEG, + QNOC_MASTER_GRAPHICS_3D, + QNOC_MASTER_MDP_PORT0, + QNOC_MASTER_MDP_PORT1, + QNOC_PNOC_SNOC_MAS, + QNOC_MASTER_VIDEO_P0, + QNOC_MASTER_VIDEO_P1, + QNOC_MASTER_VFE0, + QNOC_MASTER_VFE1, + QNOC_MASTER_CPP, + QNOC_MASTER_QDSS_ETR, + QNOC_MASTER_LPASS_PROC, + QNOC_MASTER_IPA, + QNOC_PNOC_M_0, + QNOC_PNOC_M_1, + QNOC_PNOC_INT_0, + QNOC_PNOC_INT_1, + QNOC_PNOC_INT_2, + QNOC_PNOC_SLV_1, + QNOC_PNOC_SLV_2, + QNOC_PNOC_SLV_3, + QNOC_PNOC_SLV_4, + QNOC_PNOC_SLV_8, + QNOC_PNOC_SLV_9, + QNOC_SNOC_MM_INT_0, + QNOC_SNOC_QDSS_INT, + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_INT_2, + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV, + QNOC_SLAVE_TCSR, + QNOC_SLAVE_TLMM, + QNOC_SLAVE_CRYPTO_0_CFG, + QNOC_SLAVE_MESSAGE_RAM, + QNOC_SLAVE_PDM, + QNOC_SLAVE_PRNG, + QNOC_SLAVE_PMIC_ARB, + QNOC_SLAVE_SNOC_CFG, + QNOC_SLAVE_DCC_CFG, + QNOC_SLAVE_CAMERA_CFG, + QNOC_SLAVE_DISPLAY_CFG, + QNOC_SLAVE_VENUS_CFG, + QNOC_SLAVE_SDCC_1, + QNOC_SLAVE_BLSP_1, + QNOC_SLAVE_USB_HS, + QNOC_SLAVE_SDCC_3, + QNOC_SLAVE_SDCC_2, + QNOC_SLAVE_GRAPHICS_3D_CFG, + QNOC_SLAVE_USB_HS2, + QNOC_SLAVE_BLSP_2, + QNOC_PNOC_SNOC_SLV, + QNOC_SLAVE_APPSS, + QNOC_MNOC_BIMC_SLV, + QNOC_SNOC_BIMC_SLV, + QNOC_SLAVE_SYSTEM_IMEM, + QNOC_SNOC_PNOC_SLV, + QNOC_SLAVE_QDSS_STM, + QNOC_SLAVE_CATS_128, + QNOC_SLAVE_OCMEM_64, + QNOC_SLAVE_LPASS, +}; + +static const u16 mas_apps_proc_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_apps_proc = { + .name = "mas_apps_proc", + .id = QNOC_MASTER_AMPSS_M0, + .buswidth = 16, + .mas_rpm_id = 0, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_apps_proc_links), + .links = mas_apps_proc_links, +}; + +static const u16 mas_smmnoc_bimc_links[] = { + QNOC_SLAVE_EBI_CH0 +}; + +static struct qcom_icc_node mas_smmnoc_bimc = { + .name = "mas_smmnoc_bimc", + .id = QNOC_MNOC_BIMC_MAS, + .channels = 2, + .buswidth = 16, + .mas_rpm_id = 135, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 2, + .num_links = ARRAY_SIZE(mas_smmnoc_bimc_links), + .links = mas_smmnoc_bimc_links, +}; + +static const u16 mas_snoc_bimc_links[] = { + QNOC_SLAVE_EBI_CH0 +}; + +static struct qcom_icc_node mas_snoc_bimc = { + .name = "mas_snoc_bimc", + .id = QNOC_SNOC_BIMC_MAS, + .channels = 2, + .buswidth = 16, + .mas_rpm_id = 3, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 3, + .num_links = ARRAY_SIZE(mas_snoc_bimc_links), + .links = mas_snoc_bimc_links, +}; + +static const u16 mas_tcu_0_links[] = { + QNOC_SLAVE_EBI_CH0, + QNOC_BIMC_SNOC_SLV +}; + +static struct qcom_icc_node mas_tcu_0 = { + .name = "mas_tcu_0", + .id = QNOC_MASTER_TCU_0, + .buswidth = 16, + .mas_rpm_id = 102, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 2, + .qos.qos_port = 4, + .num_links = ARRAY_SIZE(mas_tcu_0_links), + .links = mas_tcu_0_links, +}; + +static const u16 mas_usb_hs2_links[] = { + QNOC_PNOC_M_0 +}; + +static struct qcom_icc_node mas_usb_hs2 = { + .name = "mas_usb_hs2", + .id = QNOC_MASTER_USB_HS2, + .buswidth = 4, + .mas_rpm_id = 57, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_usb_hs2_links), + .links = mas_usb_hs2_links, +}; + +static const u16 mas_blsp_1_links[] = { + QNOC_PNOC_M_1 +}; + +static struct qcom_icc_node mas_blsp_1 = { + .name = "mas_blsp_1", + .id = QNOC_MASTER_BLSP_1, + .buswidth = 4, + .mas_rpm_id = 41, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_blsp_1_links), + .links = mas_blsp_1_links, +}; + +static const u16 mas_usb_hs1_links[] = { + QNOC_PNOC_M_1 +}; + +static struct qcom_icc_node mas_usb_hs1 = { + .name = "mas_usb_hs1", + .id = QNOC_MASTER_USB_HS, + .buswidth = 4, + .mas_rpm_id = 42, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_usb_hs1_links), + .links = mas_usb_hs1_links, +}; + +static const u16 mas_blsp_2_links[] = { + QNOC_PNOC_M_1 +}; + +static struct qcom_icc_node mas_blsp_2 = { + .name = "mas_blsp_2", + .id = QNOC_MASTER_BLSP_2, + .buswidth = 4, + .mas_rpm_id = 39, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_blsp_2_links), + .links = mas_blsp_2_links, +}; + +static const u16 mas_crypto_links[] = { + QNOC_PNOC_INT_1 +}; + +static struct qcom_icc_node mas_crypto = { + .name = "mas_crypto", + .id = QNOC_MASTER_CRYPTO_CORE0, + .buswidth = 8, + .mas_rpm_id = 23, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, + .num_links = ARRAY_SIZE(mas_crypto_links), + .links = mas_crypto_links, +}; + +static const u16 mas_sdcc_1_links[] = { + QNOC_PNOC_INT_1 +}; + +static struct qcom_icc_node mas_sdcc_1 = { + .name = "mas_sdcc_1", + .id = QNOC_MASTER_SDCC_1, + .buswidth = 8, + .mas_rpm_id = 33, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 7, + .num_links = ARRAY_SIZE(mas_sdcc_1_links), + .links = mas_sdcc_1_links, +}; + +static const u16 mas_sdcc_2_links[] = { + QNOC_PNOC_INT_1 +}; + +static struct qcom_icc_node mas_sdcc_2 = { + .name = "mas_sdcc_2", + .id = QNOC_MASTER_SDCC_2, + .buswidth = 8, + .mas_rpm_id = 35, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 8, + .num_links = ARRAY_SIZE(mas_sdcc_2_links), + .links = mas_sdcc_2_links, +}; + +static const u16 mas_sdcc_3_links[] = { + QNOC_PNOC_INT_1 +}; + +static struct qcom_icc_node mas_sdcc_3 = { + .name = "mas_sdcc_3", + .id = QNOC_MASTER_SDCC_3, + .buswidth = 8, + .mas_rpm_id = 34, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 10, + .num_links = ARRAY_SIZE(mas_sdcc_3_links), + .links = mas_sdcc_3_links, +}; + +static const u16 mas_snoc_pcnoc_links[] = { + QNOC_PNOC_INT_2 +}; + +static struct qcom_icc_node mas_snoc_pcnoc = { + .name = "mas_snoc_pcnoc", + .id = QNOC_SNOC_PNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 77, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 9, + .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links), + .links = mas_snoc_pcnoc_links, +}; + +static const u16 mas_lpass_ahb_links[] = { + QNOC_PNOC_SNOC_SLV +}; + +static struct qcom_icc_node mas_lpass_ahb = { + .name = "mas_lpass_ahb", + .id = QNOC_MASTER_LPASS_AHB, + .buswidth = 8, + .mas_rpm_id = 18, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 12, + .num_links = ARRAY_SIZE(mas_lpass_ahb_links), + .links = mas_lpass_ahb_links, +}; + +static const u16 mas_spdm_links[] = { + QNOC_PNOC_M_0 +}; + +static struct qcom_icc_node mas_spdm = { + .name = "mas_spdm", + .id = QNOC_MASTER_SPDM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_spdm_links), + .links = mas_spdm_links, +}; + +static const u16 mas_dehr_links[] = { + QNOC_PNOC_M_0 +}; + +static struct qcom_icc_node mas_dehr = { + .name = "mas_dehr", + .id = QNOC_MASTER_DEHR, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_dehr_links), + .links = mas_dehr_links, +}; + +static const u16 mas_xm_usb_hs1_links[] = { + QNOC_PNOC_INT_0 +}; + +static struct qcom_icc_node mas_xm_usb_hs1 = { + .name = "mas_xm_usb_hs1", + .id = QNOC_MASTER_XM_USB_HS1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_xm_usb_hs1_links), + .links = mas_xm_usb_hs1_links, +}; + +static const u16 mas_qdss_bam_links[] = { + QNOC_SNOC_QDSS_INT +}; + +static struct qcom_icc_node mas_qdss_bam = { + .name = "mas_qdss_bam", + .id = QNOC_MASTER_QDSS_BAM, + .buswidth = 4, + .mas_rpm_id = 19, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 11, + .num_links = ARRAY_SIZE(mas_qdss_bam_links), + .links = mas_qdss_bam_links, +}; + +static const u16 mas_bimc_snoc_links[] = { + QNOC_SNOC_INT_2 +}; + +static struct qcom_icc_node mas_bimc_snoc = { + .name = "mas_bimc_snoc", + .id = QNOC_BIMC_SNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 21, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_bimc_snoc_links), + .links = mas_bimc_snoc_links, +}; + +static const u16 mas_jpeg_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_jpeg = { + .name = "mas_jpeg", + .id = QNOC_MASTER_JPEG, + .buswidth = 16, + .mas_rpm_id = 7, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 6, + .num_links = ARRAY_SIZE(mas_jpeg_links), + .links = mas_jpeg_links, +}; + +static const u16 mas_oxili_links[] = { + QNOC_MNOC_BIMC_SLV, + QNOC_SNOC_MM_INT_0 +}; + +static struct qcom_icc_node mas_oxili = { + .name = "mas_oxili", + .id = QNOC_MASTER_GRAPHICS_3D, + .channels = 2, + .buswidth = 16, + .ib_coeff = 200, + .mas_rpm_id = 6, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 16, /* [16, 17] */ + .num_links = ARRAY_SIZE(mas_oxili_links), + .links = mas_oxili_links, +}; + +static const u16 mas_mdp0_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_mdp0 = { + .name = "mas_mdp0", + .id = QNOC_MASTER_MDP_PORT0, + .buswidth = 16, + .ib_coeff = 50, + .mas_rpm_id = 8, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 7, + .num_links = ARRAY_SIZE(mas_mdp0_links), + .links = mas_mdp0_links, +}; + +static const u16 mas_mdp1_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_mdp1 = { + .name = "mas_mdp1", + .id = QNOC_MASTER_MDP_PORT1, + .buswidth = 16, + .ib_coeff = 50, + .mas_rpm_id = 61, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 13, + .num_links = ARRAY_SIZE(mas_mdp1_links), + .links = mas_mdp1_links, +}; + +static const u16 mas_pcnoc_snoc_links[] = { + QNOC_SNOC_INT_2 +}; + +static struct qcom_icc_node mas_pcnoc_snoc = { + .name = "mas_pcnoc_snoc", + .id = QNOC_PNOC_SNOC_MAS, + .buswidth = 8, + .mas_rpm_id = 29, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links), + .links = mas_pcnoc_snoc_links, +}; + +static const u16 mas_venus_0_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_venus_0 = { + .name = "mas_venus_0", + .id = QNOC_MASTER_VIDEO_P0, + .buswidth = 16, + .mas_rpm_id = 9, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 8, + .num_links = ARRAY_SIZE(mas_venus_0_links), + .links = mas_venus_0_links, +}; + +static const u16 mas_venus_1_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_venus_1 = { + .name = "mas_venus_1", + .id = QNOC_MASTER_VIDEO_P1, + .buswidth = 16, + .mas_rpm_id = 10, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 14, + .num_links = ARRAY_SIZE(mas_venus_1_links), + .links = mas_venus_1_links, +}; + +static const u16 mas_vfe_0_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_vfe_0 = { + .name = "mas_vfe_0", + .id = QNOC_MASTER_VFE0, + .buswidth = 16, + .mas_rpm_id = 11, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 9, + .num_links = ARRAY_SIZE(mas_vfe_0_links), + .links = mas_vfe_0_links, +}; + +static const u16 mas_vfe_1_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_vfe_1 = { + .name = "mas_vfe_1", + .id = QNOC_MASTER_VFE1, + .buswidth = 16, + .mas_rpm_id = 133, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 15, + .num_links = ARRAY_SIZE(mas_vfe_1_links), + .links = mas_vfe_1_links, +}; + +static const u16 mas_cpp_links[] = { + QNOC_SNOC_MM_INT_0, + QNOC_MNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_cpp = { + .name = "mas_cpp", + .id = QNOC_MASTER_CPP, + .buswidth = 16, + .mas_rpm_id = 115, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 12, + .num_links = ARRAY_SIZE(mas_cpp_links), + .links = mas_cpp_links, +}; + +static const u16 mas_qdss_etr_links[] = { + QNOC_SNOC_QDSS_INT +}; + +static struct qcom_icc_node mas_qdss_etr = { + .name = "mas_qdss_etr", + .id = QNOC_MASTER_QDSS_ETR, + .buswidth = 8, + .mas_rpm_id = 31, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 10, + .num_links = ARRAY_SIZE(mas_qdss_etr_links), + .links = mas_qdss_etr_links, +}; + +static const u16 mas_lpass_proc_links[] = { + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_BIMC_SLV +}; + +static struct qcom_icc_node mas_lpass_proc = { + .name = "mas_lpass_proc", + .id = QNOC_MASTER_LPASS_PROC, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 19, + .num_links = ARRAY_SIZE(mas_lpass_proc_links), + .links = mas_lpass_proc_links, +}; + +static const u16 mas_ipa_links[] = { + QNOC_SNOC_INT_2 +}; + +static struct qcom_icc_node mas_ipa = { + .name = "mas_ipa", + .id = QNOC_MASTER_IPA, + .buswidth = 8, + .mas_rpm_id = 59, + .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 18, + .num_links = ARRAY_SIZE(mas_ipa_links), + .links = mas_ipa_links, +}; + +static const u16 pcnoc_m_0_links[] = { + QNOC_PNOC_SNOC_SLV +}; + +static struct qcom_icc_node pcnoc_m_0 = { + .name = "pcnoc_m_0", + .id = QNOC_PNOC_M_0, + .buswidth = 4, + .mas_rpm_id = 87, + .slv_rpm_id = 116, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, + .num_links = ARRAY_SIZE(pcnoc_m_0_links), + .links = pcnoc_m_0_links, +}; + +static const u16 pcnoc_m_1_links[] = { + QNOC_PNOC_SNOC_SLV +}; + +static struct qcom_icc_node pcnoc_m_1 = { + .name = "pcnoc_m_1", + .id = QNOC_PNOC_M_1, + .buswidth = 4, + .mas_rpm_id = 88, + .slv_rpm_id = 117, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 6, + .num_links = ARRAY_SIZE(pcnoc_m_1_links), + .links = pcnoc_m_1_links, +}; + +static const u16 pcnoc_int_0_links[] = { + QNOC_PNOC_SNOC_SLV, + QNOC_PNOC_INT_2 +}; + +static struct qcom_icc_node pcnoc_int_0 = { + .name = "pcnoc_int_0", + .id = QNOC_PNOC_INT_0, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(pcnoc_int_0_links), + .links = pcnoc_int_0_links, +}; + +static const u16 pcnoc_int_1_links[] = { + QNOC_PNOC_SNOC_SLV, + QNOC_PNOC_INT_2 +}; + +static struct qcom_icc_node pcnoc_int_1 = { + .name = "pcnoc_int_1", + .id = QNOC_PNOC_INT_1, + .buswidth = 8, + .mas_rpm_id = 86, + .slv_rpm_id = 115, + .num_links = ARRAY_SIZE(pcnoc_int_1_links), + .links = pcnoc_int_1_links, +}; + +static const u16 pcnoc_int_2_links[] = { + QNOC_PNOC_SLV_1, + QNOC_PNOC_SLV_2, + QNOC_PNOC_SLV_4, + QNOC_PNOC_SLV_8, + QNOC_PNOC_SLV_9, + QNOC_PNOC_SLV_3 +}; + +static struct qcom_icc_node pcnoc_int_2 = { + .name = "pcnoc_int_2", + .id = QNOC_PNOC_INT_2, + .buswidth = 8, + .mas_rpm_id = 124, + .slv_rpm_id = 184, + .num_links = ARRAY_SIZE(pcnoc_int_2_links), + .links = pcnoc_int_2_links, +}; + +static const u16 pcnoc_s_1_links[] = { + QNOC_SLAVE_CRYPTO_0_CFG, + QNOC_SLAVE_PRNG, + QNOC_SLAVE_PDM, + QNOC_SLAVE_MESSAGE_RAM +}; + +static struct qcom_icc_node pcnoc_s_1 = { + .name = "pcnoc_s_1", + .id = QNOC_PNOC_SLV_1, + .buswidth = 4, + .mas_rpm_id = 90, + .slv_rpm_id = 119, + .num_links = ARRAY_SIZE(pcnoc_s_1_links), + .links = pcnoc_s_1_links, +}; + +static const u16 pcnoc_s_2_links[] = { + QNOC_SLAVE_PMIC_ARB +}; + +static struct qcom_icc_node pcnoc_s_2 = { + .name = "pcnoc_s_2", + .id = QNOC_PNOC_SLV_2, + .buswidth = 4, + .mas_rpm_id = 91, + .slv_rpm_id = 120, + .num_links = ARRAY_SIZE(pcnoc_s_2_links), + .links = pcnoc_s_2_links, +}; + +static const u16 pcnoc_s_3_links[] = { + QNOC_SLAVE_SNOC_CFG, + QNOC_SLAVE_DCC_CFG +}; + +static struct qcom_icc_node pcnoc_s_3 = { + .name = "pcnoc_s_3", + .id = QNOC_PNOC_SLV_3, + .buswidth = 4, + .mas_rpm_id = 92, + .slv_rpm_id = 121, + .num_links = ARRAY_SIZE(pcnoc_s_3_links), + .links = pcnoc_s_3_links, +}; + +static const u16 pcnoc_s_4_links[] = { + QNOC_SLAVE_CAMERA_CFG, + QNOC_SLAVE_DISPLAY_CFG, + QNOC_SLAVE_VENUS_CFG +}; + +static struct qcom_icc_node pcnoc_s_4 = { + .name = "pcnoc_s_4", + .id = QNOC_PNOC_SLV_4, + .buswidth = 4, + .mas_rpm_id = 93, + .slv_rpm_id = 122, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(pcnoc_s_4_links), + .links = pcnoc_s_4_links, +}; + +static const u16 pcnoc_s_8_links[] = { + QNOC_SLAVE_USB_HS, + QNOC_SLAVE_SDCC_3, + QNOC_SLAVE_BLSP_1, + QNOC_SLAVE_SDCC_1 +}; + +static struct qcom_icc_node pcnoc_s_8 = { + .name = "pcnoc_s_8", + .id = QNOC_PNOC_SLV_8, + .buswidth = 4, + .mas_rpm_id = 96, + .slv_rpm_id = 125, + .num_links = ARRAY_SIZE(pcnoc_s_8_links), + .links = pcnoc_s_8_links, +}; + +static const u16 pcnoc_s_9_links[] = { + QNOC_SLAVE_GRAPHICS_3D_CFG, + QNOC_SLAVE_USB_HS2, + QNOC_SLAVE_SDCC_2, + QNOC_SLAVE_BLSP_2 +}; + +static struct qcom_icc_node pcnoc_s_9 = { + .name = "pcnoc_s_9", + .id = QNOC_PNOC_SLV_9, + .buswidth = 4, + .mas_rpm_id = 97, + .slv_rpm_id = 126, + .num_links = ARRAY_SIZE(pcnoc_s_9_links), + .links = pcnoc_s_9_links, +}; + +static const u16 mm_int_0_links[] = { + QNOC_SNOC_INT_0 +}; + +static struct qcom_icc_node mm_int_0 = { + .name = "mm_int_0", + .id = QNOC_SNOC_MM_INT_0, + .buswidth = 16, + .ib_coeff = 200, + .mas_rpm_id = 79, + .slv_rpm_id = 108, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(mm_int_0_links), + .links = mm_int_0_links, +}; + +static const u16 qdss_int_links[] = { + QNOC_SNOC_INT_2 +}; + +static struct qcom_icc_node qdss_int = { + .name = "qdss_int", + .id = QNOC_SNOC_QDSS_INT, + .buswidth = 8, + .mas_rpm_id = 98, + .slv_rpm_id = 128, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(qdss_int_links), + .links = qdss_int_links, +}; + +static const u16 snoc_int_0_links[] = { + QNOC_SLAVE_QDSS_STM, + QNOC_SLAVE_SYSTEM_IMEM, + QNOC_SNOC_PNOC_SLV +}; + +static struct qcom_icc_node snoc_int_0 = { + .name = "snoc_int_0", + .id = QNOC_SNOC_INT_0, + .buswidth = 8, + .mas_rpm_id = 99, + .slv_rpm_id = 130, + .num_links = ARRAY_SIZE(snoc_int_0_links), + .links = snoc_int_0_links, +}; + +static const u16 snoc_int_1_links[] = { + QNOC_SLAVE_LPASS, + QNOC_SLAVE_CATS_128, + QNOC_SLAVE_OCMEM_64, + QNOC_SLAVE_APPSS +}; + +static struct qcom_icc_node snoc_int_1 = { + .name = "snoc_int_1", + .id = QNOC_SNOC_INT_1, + .buswidth = 8, + .mas_rpm_id = 100, + .slv_rpm_id = 131, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(snoc_int_1_links), + .links = snoc_int_1_links, +}; + +static const u16 snoc_int_2_links[] = { + QNOC_SNOC_INT_0, + QNOC_SNOC_INT_1, + QNOC_SNOC_BIMC_SLV +}; + +static struct qcom_icc_node snoc_int_2 = { + .name = "snoc_int_2", + .id = QNOC_SNOC_INT_2, + .buswidth = 8, + .mas_rpm_id = 134, + .slv_rpm_id = 197, + .num_links = ARRAY_SIZE(snoc_int_2_links), + .links = snoc_int_2_links, +}; + +static struct qcom_icc_node slv_ebi = { + .name = "slv_ebi", + .id = QNOC_SLAVE_EBI_CH0, + .channels = 2, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 0, +}; + +static const u16 slv_bimc_snoc_links[] = { + QNOC_BIMC_SNOC_MAS +}; + +static struct qcom_icc_node slv_bimc_snoc = { + .name = "slv_bimc_snoc", + .id = QNOC_BIMC_SNOC_SLV, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 2, + .num_links = ARRAY_SIZE(slv_bimc_snoc_links), + .links = slv_bimc_snoc_links, +}; + +static struct qcom_icc_node slv_tcsr = { + .name = "slv_tcsr", + .id = QNOC_SLAVE_TCSR, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 50, +}; + +static struct qcom_icc_node slv_tlmm = { + .name = "slv_tlmm", + .id = QNOC_SLAVE_TLMM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 51, +}; + +static struct qcom_icc_node slv_crypto_0_cfg = { + .name = "slv_crypto_0_cfg", + .id = QNOC_SLAVE_CRYPTO_0_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 52, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_message_ram = { + .name = "slv_message_ram", + .id = QNOC_SLAVE_MESSAGE_RAM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 55, +}; + +static struct qcom_icc_node slv_pdm = { + .name = "slv_pdm", + .id = QNOC_SLAVE_PDM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 41, +}; + +static struct qcom_icc_node slv_prng = { + .name = "slv_prng", + .id = QNOC_SLAVE_PRNG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 44, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_pmic_arb = { + .name = "slv_pmic_arb", + .id = QNOC_SLAVE_PMIC_ARB, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 59, +}; + +static struct qcom_icc_node slv_snoc_cfg = { + .name = "slv_snoc_cfg", + .id = QNOC_SLAVE_SNOC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 70, +}; + +static struct qcom_icc_node slv_dcc_cfg = { + .name = "slv_dcc_cfg", + .id = QNOC_SLAVE_DCC_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 155, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_camera_ss_cfg = { + .name = "slv_camera_ss_cfg", + .id = QNOC_SLAVE_CAMERA_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 3, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_disp_ss_cfg = { + .name = "slv_disp_ss_cfg", + .id = QNOC_SLAVE_DISPLAY_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 4, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_venus_cfg = { + .name = "slv_venus_cfg", + .id = QNOC_SLAVE_VENUS_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 10, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_sdcc_1 = { + .name = "slv_sdcc_1", + .id = QNOC_SLAVE_SDCC_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 31, +}; + +static struct qcom_icc_node slv_blsp_1 = { + .name = "slv_blsp_1", + .id = QNOC_SLAVE_BLSP_1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 39, +}; + +static struct qcom_icc_node slv_usb_hs = { + .name = "slv_usb_hs", + .id = QNOC_SLAVE_USB_HS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 40, +}; + +static struct qcom_icc_node slv_sdcc_3 = { + .name = "slv_sdcc_3", + .id = QNOC_SLAVE_SDCC_3, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 32, +}; + +static struct qcom_icc_node slv_sdcc_2 = { + .name = "slv_sdcc_2", + .id = QNOC_SLAVE_SDCC_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 33, +}; + +static struct qcom_icc_node slv_gpu_cfg = { + .name = "slv_gpu_cfg", + .id = QNOC_SLAVE_GRAPHICS_3D_CFG, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 11, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_usb_hs2 = { + .name = "slv_usb_hs2", + .id = QNOC_SLAVE_USB_HS2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 79, +}; + +static struct qcom_icc_node slv_blsp_2 = { + .name = "slv_blsp_2", + .id = QNOC_SLAVE_BLSP_2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 37, +}; + +static const u16 slv_pcnoc_snoc_links[] = { + QNOC_PNOC_SNOC_MAS +}; + +static struct qcom_icc_node slv_pcnoc_snoc = { + .name = "slv_pcnoc_snoc", + .id = QNOC_PNOC_SNOC_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 45, + .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links), + .links = slv_pcnoc_snoc_links, +}; + +static struct qcom_icc_node slv_kpss_ahb = { + .name = "slv_kpss_ahb", + .id = QNOC_SLAVE_APPSS, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 20, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static const u16 slv_smmnoc_bimc_links[] = { + QNOC_MNOC_BIMC_MAS +}; + +static struct qcom_icc_node slv_smmnoc_bimc = { + .name = "slv_smmnoc_bimc", + .id = QNOC_MNOC_BIMC_SLV, + .channels = 2, + .buswidth = 16, + .ib_coeff = 200, + .mas_rpm_id = -1, + .slv_rpm_id = 198, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_smmnoc_bimc_links), + .links = slv_smmnoc_bimc_links, +}; + +static const u16 slv_snoc_bimc_links[] = { + QNOC_SNOC_BIMC_MAS +}; + +static struct qcom_icc_node slv_snoc_bimc = { + .name = "slv_snoc_bimc", + .id = QNOC_SNOC_BIMC_SLV, + .channels = 2, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 24, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, + .num_links = ARRAY_SIZE(slv_snoc_bimc_links), + .links = slv_snoc_bimc_links, +}; + +static struct qcom_icc_node slv_imem = { + .name = "slv_imem", + .id = QNOC_SLAVE_SYSTEM_IMEM, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 26, +}; + +static const u16 slv_snoc_pcnoc_links[] = { + QNOC_SNOC_PNOC_MAS +}; + +static struct qcom_icc_node slv_snoc_pcnoc = { + .name = "slv_snoc_pcnoc", + .id = QNOC_SNOC_PNOC_SLV, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 28, + .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links), + .links = slv_snoc_pcnoc_links, +}; + +static struct qcom_icc_node slv_qdss_stm = { + .name = "slv_qdss_stm", + .id = QNOC_SLAVE_QDSS_STM, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 30, +}; + +static struct qcom_icc_node slv_cats_0 = { + .name = "slv_cats_0", + .id = QNOC_SLAVE_CATS_128, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 106, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_cats_1 = { + .name = "slv_cats_1", + .id = QNOC_SLAVE_OCMEM_64, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 107, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node slv_lpass = { + .name = "slv_lpass", + .id = QNOC_SLAVE_LPASS, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 21, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, +}; + +static struct qcom_icc_node * const msm8976_bimc_nodes[] = { + [MAS_APPS_PROC] = &mas_apps_proc, + [MAS_SMMNOC_BIMC] = &mas_smmnoc_bimc, + [MAS_SNOC_BIMC] = &mas_snoc_bimc, + [MAS_TCU_0] = &mas_tcu_0, + [SLV_EBI] = &slv_ebi, + [SLV_BIMC_SNOC] = &slv_bimc_snoc, +}; + +static const struct regmap_config msm8976_bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x62000, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8976_bimc = { + .type = QCOM_ICC_BIMC, + .nodes = msm8976_bimc_nodes, + .num_nodes = ARRAY_SIZE(msm8976_bimc_nodes), + .bus_clk_desc = &bimc_clk, + .regmap_cfg = &msm8976_bimc_regmap_config, + .qos_offset = 0x8000, + .ab_coeff = 154, +}; + +static struct qcom_icc_node * const msm8976_pcnoc_nodes[] = { + [MAS_USB_HS2] = &mas_usb_hs2, + [MAS_BLSP_1] = &mas_blsp_1, + [MAS_USB_HS1] = &mas_usb_hs1, + [MAS_BLSP_2] = &mas_blsp_2, + [MAS_CRYPTO] = &mas_crypto, + [MAS_SDCC_1] = &mas_sdcc_1, + [MAS_SDCC_2] = &mas_sdcc_2, + [MAS_SDCC_3] = &mas_sdcc_3, + [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc, + [MAS_LPASS_AHB] = &mas_lpass_ahb, + [MAS_SPDM] = &mas_spdm, + [MAS_DEHR] = &mas_dehr, + [MAS_XM_USB_HS1] = &mas_xm_usb_hs1, + [PCNOC_M_0] = &pcnoc_m_0, + [PCNOC_M_1] = &pcnoc_m_1, + [PCNOC_INT_0] = &pcnoc_int_0, + [PCNOC_INT_1] = &pcnoc_int_1, + [PCNOC_INT_2] = &pcnoc_int_2, + [PCNOC_S_1] = &pcnoc_s_1, + [PCNOC_S_2] = &pcnoc_s_2, + [PCNOC_S_3] = &pcnoc_s_3, + [PCNOC_S_4] = &pcnoc_s_4, + [PCNOC_S_8] = &pcnoc_s_8, + [PCNOC_S_9] = &pcnoc_s_9, + [SLV_TCSR] = &slv_tcsr, + [SLV_TLMM] = &slv_tlmm, + [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg, + [SLV_MESSAGE_RAM] = &slv_message_ram, + [SLV_PDM] = &slv_pdm, + [SLV_PRNG] = &slv_prng, + [SLV_PMIC_ARB] = &slv_pmic_arb, + [SLV_SNOC_CFG] = &slv_snoc_cfg, + [SLV_DCC_CFG] = &slv_dcc_cfg, + [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg, + [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg, + [SLV_VENUS_CFG] = &slv_venus_cfg, + [SLV_SDCC_1] = &slv_sdcc_1, + [SLV_BLSP_1] = &slv_blsp_1, + [SLV_USB_HS] = &slv_usb_hs, + [SLV_SDCC_3] = &slv_sdcc_3, + [SLV_SDCC_2] = &slv_sdcc_2, + [SLV_GPU_CFG] = &slv_gpu_cfg, + [SLV_USB_HS2] = &slv_usb_hs2, + [SLV_BLSP_2] = &slv_blsp_2, + [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc, +}; + +static const struct regmap_config msm8976_pcnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x14000, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8976_pcnoc = { + .type = QCOM_ICC_NOC, + .nodes = msm8976_pcnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8976_pcnoc_nodes), + .bus_clk_desc = &bus_0_clk, + .qos_offset = 0x7000, + .keep_alive = true, + .regmap_cfg = &msm8976_pcnoc_regmap_config, +}; + +static struct qcom_icc_node * const msm8976_snoc_nodes[] = { + [MAS_QDSS_BAM] = &mas_qdss_bam, + [MAS_BIMC_SNOC] = &mas_bimc_snoc, + [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc, + [MAS_QDSS_ETR] = &mas_qdss_etr, + [MAS_LPASS_PROC] = &mas_lpass_proc, + [MAS_IPA] = &mas_ipa, + [QDSS_INT] = &qdss_int, + [SNOC_INT_0] = &snoc_int_0, + [SNOC_INT_1] = &snoc_int_1, + [SNOC_INT_2] = &snoc_int_2, + [SLV_KPSS_AHB] = &slv_kpss_ahb, + [SLV_SNOC_BIMC] = &slv_snoc_bimc, + [SLV_IMEM] = &slv_imem, + [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc, + [SLV_QDSS_STM] = &slv_qdss_stm, + [SLV_CATS_0] = &slv_cats_0, + [SLV_CATS_1] = &slv_cats_1, + [SLV_LPASS] = &slv_lpass, +}; + +static const struct regmap_config msm8976_snoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1A000, + .fast_io = true, +}; + +static const struct qcom_icc_desc msm8976_snoc = { + .type = QCOM_ICC_NOC, + .nodes = msm8976_snoc_nodes, + .num_nodes = ARRAY_SIZE(msm8976_snoc_nodes), + .bus_clk_desc = &bus_1_clk, + .regmap_cfg = &msm8976_snoc_regmap_config, + .qos_offset = 0x7000, +}; + +static struct qcom_icc_node * const msm8976_snoc_mm_nodes[] = { + [MAS_JPEG] = &mas_jpeg, + [MAS_OXILI] = &mas_oxili, + [MAS_MDP0] = &mas_mdp0, + [MAS_MDP1] = &mas_mdp1, + [MAS_VENUS_0] = &mas_venus_0, + [MAS_VENUS_1] = &mas_venus_1, + [MAS_VFE_0] = &mas_vfe_0, + [MAS_VFE_1] = &mas_vfe_1, + [MAS_CPP] = &mas_cpp, + [MM_INT_0] = &mm_int_0, + [SLV_SMMNOC_BIMC] = &slv_smmnoc_bimc, +}; + +static const struct qcom_icc_desc msm8976_snoc_mm = { + .type = QCOM_ICC_NOC, + .nodes = msm8976_snoc_mm_nodes, + .num_nodes = ARRAY_SIZE(msm8976_snoc_mm_nodes), + .bus_clk_desc = &bus_2_clk, + .regmap_cfg = &msm8976_snoc_regmap_config, + .qos_offset = 0x7000, + .ab_coeff = 154, +}; + +static const struct of_device_id msm8976_noc_of_match[] = { + { .compatible = "qcom,msm8976-bimc", .data = &msm8976_bimc }, + { .compatible = "qcom,msm8976-pcnoc", .data = &msm8976_pcnoc }, + { .compatible = "qcom,msm8976-snoc", .data = &msm8976_snoc }, + { .compatible = "qcom,msm8976-snoc-mm", .data = &msm8976_snoc_mm }, + { } +}; +MODULE_DEVICE_TABLE(of, msm8976_noc_of_match); + +static struct platform_driver msm8976_noc_driver = { + .probe = qnoc_probe, + .remove_new = qnoc_remove, + .driver = { + .name = "qnoc-msm8976", + .of_match_table = msm8976_noc_of_match, + .sync_state = icc_sync_state, + }, +}; +module_platform_driver(msm8976_noc_driver); + +MODULE_DESCRIPTION("Qualcomm MSM8976 NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index 11b49a89c03d14..63e9ff223ac491 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "icc-rpm.h" @@ -101,6 +102,11 @@ static struct qcom_icc_node mas_apps_proc = { .buswidth = 8, .mas_rpm_id = 0, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 0, .num_links = ARRAY_SIZE(mas_apps_proc_links), .links = mas_apps_proc_links, }; @@ -116,6 +122,11 @@ static struct qcom_icc_node mas_oxili = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 2, .num_links = ARRAY_SIZE(mas_oxili_links), .links = mas_oxili_links, }; @@ -131,6 +142,11 @@ static struct qcom_icc_node mas_mdp = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 1, + .qos.qos_port = 4, .num_links = ARRAY_SIZE(mas_mdp_links), .links = mas_mdp_links, }; @@ -145,6 +161,10 @@ static struct qcom_icc_node mas_snoc_bimc_1 = { .buswidth = 8, .mas_rpm_id = 76, .slv_rpm_id = -1, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .qos.qos_port = 5, .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links), .links = mas_snoc_bimc_1_links, }; @@ -160,6 +180,11 @@ static struct qcom_icc_node mas_tcu_0 = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 0, + .qos.prio_level = 2, + .qos.qos_port = 6, .num_links = ARRAY_SIZE(mas_tcu_0_links), .links = mas_tcu_0_links, }; @@ -174,6 +199,8 @@ static struct qcom_icc_node mas_spdm = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, .num_links = ARRAY_SIZE(mas_spdm_links), .links = mas_spdm_links, }; @@ -231,6 +258,11 @@ static struct qcom_icc_node mas_crypto = { .buswidth = 8, .mas_rpm_id = 23, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 0, .num_links = ARRAY_SIZE(mas_crypto_links), .links = mas_crypto_links, }; @@ -287,6 +319,11 @@ static struct qcom_icc_node mas_qpic = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 14, .num_links = ARRAY_SIZE(mas_qpic_links), .links = mas_qpic_links, }; @@ -301,6 +338,11 @@ static struct qcom_icc_node mas_qdss_bam = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 1, .num_links = ARRAY_SIZE(mas_qdss_bam_links), .links = mas_qdss_bam_links, }; @@ -348,6 +390,11 @@ static struct qcom_icc_node mas_qdss_etr = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 0, .num_links = ARRAY_SIZE(mas_qdss_etr_links), .links = mas_qdss_etr_links, }; @@ -363,6 +410,11 @@ static struct qcom_icc_node mas_emac = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 17, .num_links = ARRAY_SIZE(mas_emac_links), .links = mas_emac_links, }; @@ -378,6 +430,11 @@ static struct qcom_icc_node mas_pcie = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 8, .num_links = ARRAY_SIZE(mas_pcie_links), .links = mas_pcie_links, }; @@ -393,6 +450,11 @@ static struct qcom_icc_node mas_usb3 = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 1, + .qos.prio_level = 1, + .qos.qos_port = 16, .num_links = ARRAY_SIZE(mas_usb3_links), .links = mas_usb3_links, }; @@ -491,6 +553,8 @@ static struct qcom_icc_node pcnoc_s_2 = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, .num_links = ARRAY_SIZE(pcnoc_s_2_links), .links = pcnoc_s_2_links, }; @@ -626,6 +690,8 @@ static struct qcom_icc_node qdss_int = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, .num_links = ARRAY_SIZE(qdss_int_links), .links = qdss_int_links, }; @@ -704,6 +770,8 @@ static struct qcom_icc_node slv_spdm = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_pdm = { @@ -752,6 +820,8 @@ static struct qcom_icc_node slv_disp_ss_cfg = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_gpu_cfg = { @@ -760,6 +830,8 @@ static struct qcom_icc_node slv_gpu_cfg = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_blsp_1 = { @@ -784,6 +856,8 @@ static struct qcom_icc_node slv_pcie = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_ethernet = { @@ -792,6 +866,8 @@ static struct qcom_icc_node slv_ethernet = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_blsp_2 = { @@ -816,6 +892,8 @@ static struct qcom_icc_node slv_tcu = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_pmic_arb = { @@ -894,6 +972,8 @@ static struct qcom_icc_node slv_kpss_ahb = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_wcss = { @@ -954,6 +1034,8 @@ static struct qcom_icc_node slv_cats_0 = { .buswidth = 16, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_cats_1 = { @@ -962,6 +1044,8 @@ static struct qcom_icc_node slv_cats_1 = { .buswidth = 8, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node slv_lpass = { @@ -970,6 +1054,8 @@ static struct qcom_icc_node slv_lpass = { .buswidth = 4, .mas_rpm_id = -1, .slv_rpm_id = -1, + .qos.ap_owned = true, + .qos.qos_mode = NOC_QOS_MODE_INVALID, }; static struct qcom_icc_node * const qcs404_bimc_nodes[] = { @@ -982,10 +1068,22 @@ static struct qcom_icc_node * const qcs404_bimc_nodes[] = { [SLAVE_BIMC_SNOC] = &slv_bimc_snoc, }; +static const struct regmap_config qcs404_bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x80000, + .fast_io = true, +}; + static const struct qcom_icc_desc qcs404_bimc = { - .bus_clk_desc = &bimc_clk, + .type = QCOM_ICC_BIMC, .nodes = qcs404_bimc_nodes, .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes), + .bus_clk_desc = &bimc_clk, + .regmap_cfg = &qcs404_bimc_regmap_config, + .qos_offset = 0x8000, + .ab_coeff = 153, }; static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = { @@ -1037,10 +1135,22 @@ static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = { [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc, }; +static const struct regmap_config qcs404_pcnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x15080, + .fast_io = true, +}; + static const struct qcom_icc_desc qcs404_pcnoc = { - .bus_clk_desc = &bus_0_clk, + .type = QCOM_ICC_NOC, .nodes = qcs404_pcnoc_nodes, .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes), + .bus_clk_desc = &bus_0_clk, + .qos_offset = 0x7000, + .keep_alive = true, + .regmap_cfg = &qcs404_pcnoc_regmap_config, }; static struct qcom_icc_node * const qcs404_snoc_nodes[] = { @@ -1066,10 +1176,21 @@ static struct qcom_icc_node * const qcs404_snoc_nodes[] = { [SLAVE_LPASS] = &slv_lpass, }; +static const struct regmap_config qcs404_snoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x23080, + .fast_io = true, +}; + static const struct qcom_icc_desc qcs404_snoc = { - .bus_clk_desc = &bus_1_clk, + .type = QCOM_ICC_NOC, .nodes = qcs404_snoc_nodes, .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes), + .bus_clk_desc = &bus_1_clk, + .qos_offset = 0x11000, + .regmap_cfg = &qcs404_snoc_regmap_config, }; diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index b321c3009acbac..4236a43dc256f9 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -628,60 +628,6 @@ static struct qcom_icc_node xm_gic = { .links = { SM8350_SLAVE_SNOC_GEM_NOC_GC }, }; -static struct qcom_icc_node qnm_mnoc_hf_disp = { - .name = "qnm_mnoc_hf_disp", - .id = SM8350_MASTER_MNOC_HF_MEM_NOC_DISP, - .channels = 2, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_SLAVE_LLCC_DISP }, -}; - -static struct qcom_icc_node qnm_mnoc_sf_disp = { - .name = "qnm_mnoc_sf_disp", - .id = SM8350_MASTER_MNOC_SF_MEM_NOC_DISP, - .channels = 2, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_SLAVE_LLCC_DISP }, -}; - -static struct qcom_icc_node llcc_mc_disp = { - .name = "llcc_mc_disp", - .id = SM8350_MASTER_LLCC_DISP, - .channels = 4, - .buswidth = 4, - .num_links = 1, - .links = { SM8350_SLAVE_EBI1_DISP }, -}; - -static struct qcom_icc_node qxm_mdp0_disp = { - .name = "qxm_mdp0_disp", - .id = SM8350_MASTER_MDP0_DISP, - .channels = 1, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP }, -}; - -static struct qcom_icc_node qxm_mdp1_disp = { - .name = "qxm_mdp1_disp", - .id = SM8350_MASTER_MDP1_DISP, - .channels = 1, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP }, -}; - -static struct qcom_icc_node qxm_rot_disp = { - .name = "qxm_rot_disp", - .id = SM8350_MASTER_ROTATOR_DISP, - .channels = 1, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP }, -}; - static struct qcom_icc_node qns_a1noc_snoc = { .name = "qns_a1noc_snoc", .id = SM8350_SLAVE_A1NOC_SNOC, @@ -1320,40 +1266,6 @@ static struct qcom_icc_node srvc_snoc = { .buswidth = 4, }; -static struct qcom_icc_node qns_llcc_disp = { - .name = "qns_llcc_disp", - .id = SM8350_SLAVE_LLCC_DISP, - .channels = 4, - .buswidth = 16, - .num_links = 1, - .links = { SM8350_MASTER_LLCC_DISP }, -}; - -static struct qcom_icc_node ebi_disp = { - .name = "ebi_disp", - .id = SM8350_SLAVE_EBI1_DISP, - .channels = 4, - .buswidth = 4, -}; - -static struct qcom_icc_node qns_mem_noc_hf_disp = { - .name = "qns_mem_noc_hf_disp", - .id = SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, - .channels = 2, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_MASTER_MNOC_HF_MEM_NOC_DISP }, -}; - -static struct qcom_icc_node qns_mem_noc_sf_disp = { - .name = "qns_mem_noc_sf_disp", - .id = SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, - .channels = 2, - .buswidth = 32, - .num_links = 1, - .links = { SM8350_MASTER_MNOC_SF_MEM_NOC_DISP }, -}; - static struct qcom_icc_bcm bcm_acv = { .name = "ACV", .enable_mask = BIT(3), @@ -1583,55 +1495,6 @@ static struct qcom_icc_bcm bcm_sn14 = { .nodes = { &qns_pcie_mem_noc }, }; -static struct qcom_icc_bcm bcm_acv_disp = { - .name = "ACV", - .keepalive = false, - .num_nodes = 1, - .nodes = { &ebi_disp }, -}; - -static struct qcom_icc_bcm bcm_mc0_disp = { - .name = "MC0", - .keepalive = false, - .num_nodes = 1, - .nodes = { &ebi_disp }, -}; - -static struct qcom_icc_bcm bcm_mm0_disp = { - .name = "MM0", - .keepalive = false, - .num_nodes = 1, - .nodes = { &qns_mem_noc_hf_disp }, -}; - -static struct qcom_icc_bcm bcm_mm1_disp = { - .name = "MM1", - .keepalive = false, - .num_nodes = 2, - .nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp }, -}; - -static struct qcom_icc_bcm bcm_mm4_disp = { - .name = "MM4", - .keepalive = false, - .num_nodes = 1, - .nodes = { &qns_mem_noc_sf_disp }, -}; - -static struct qcom_icc_bcm bcm_mm5_disp = { - .name = "MM5", - .keepalive = false, - .num_nodes = 1, - .nodes = { &qxm_rot_disp }, -}; - -static struct qcom_icc_bcm bcm_sh0_disp = { - .name = "SH0", - .keepalive = false, - .num_nodes = 1, - .nodes = { &qns_llcc_disp }, -}; - static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { }; @@ -1785,7 +1648,6 @@ static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh2, &bcm_sh3, &bcm_sh4, - &bcm_sh0_disp, }; static struct qcom_icc_node * const gem_noc_nodes[] = { @@ -1808,9 +1670,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc, [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc, [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc, - [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp, - [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp, - [SLAVE_LLCC_DISP] = &qns_llcc_disp, }; static const struct qcom_icc_desc sm8350_gem_noc = { @@ -1843,15 +1702,11 @@ static const struct qcom_icc_desc sm8350_lpass_ag_noc = { static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, - &bcm_acv_disp, - &bcm_mc0_disp, }; static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, - [MASTER_LLCC_DISP] = &llcc_mc_disp, - [SLAVE_EBI1_DISP] = &ebi_disp, }; static const struct qcom_icc_desc sm8350_mc_virt = { @@ -1866,10 +1721,6 @@ static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm1, &bcm_mm4, &bcm_mm5, - &bcm_mm0_disp, - &bcm_mm1_disp, - &bcm_mm4_disp, - &bcm_mm5_disp, }; static struct qcom_icc_node * const mmss_noc_nodes[] = { @@ -1886,11 +1737,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = { [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, [SLAVE_SERVICE_MNOC] = &srvc_mnoc, - [MASTER_MDP0_DISP] = &qxm_mdp0_disp, - [MASTER_MDP1_DISP] = &qxm_mdp1_disp, - [MASTER_ROTATOR_DISP] = &qxm_rot_disp, - [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp, - [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp, }; static const struct qcom_icc_desc sm8350_mmss_noc = { @@ -1965,6 +1811,7 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8350", .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h index 328d15238a0dbc..074c6131ab3674 100644 --- a/drivers/interconnect/qcom/sm8350.h +++ b/drivers/interconnect/qcom/sm8350.h @@ -154,15 +154,5 @@ #define SM8350_SLAVE_PCIE_1 143 #define SM8350_SLAVE_QDSS_STM 144 #define SM8350_SLAVE_TCU 145 -#define SM8350_MASTER_LLCC_DISP 146 -#define SM8350_MASTER_MNOC_HF_MEM_NOC_DISP 147 -#define SM8350_MASTER_MNOC_SF_MEM_NOC_DISP 148 -#define SM8350_MASTER_MDP0_DISP 149 -#define SM8350_MASTER_MDP1_DISP 150 -#define SM8350_MASTER_ROTATOR_DISP 151 -#define SM8350_SLAVE_EBI1_DISP 152 -#define SM8350_SLAVE_LLCC_DISP 153 -#define SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP 154 -#define SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP 155 #endif diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index a82f10054aec86..b3aa1f5d53218b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -151,7 +151,7 @@ config OF_IOMMU # IOMMU-agnostic DMA-mapping layer config IOMMU_DMA def_bool ARM64 || X86 || S390 - select DMA_OPS + select DMA_OPS_HELPERS select IOMMU_API select IOMMU_IOVA select IRQ_MSI_IOMMU @@ -424,6 +424,17 @@ config ARM_SMMU_V3_KUNIT_TEST Enable this option to unit-test arm-smmu-v3 driver functions. If unsure, say N. + +config TEGRA241_CMDQV + bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3" + depends on ACPI + help + Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The + CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues + support, except with virtualization capabilities. + + Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same + CMDQ-V extension. endif config S390_IOMMU diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 2d5945c982bde5..6386fa4556d9b8 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -43,9 +43,10 @@ int amd_iommu_enable_faulting(unsigned int cpu); extern int amd_iommu_guest_ir; extern enum io_pgtable_fmt amd_iommu_pgtable; extern int amd_iommu_gpt_level; +extern unsigned long amd_iommu_pgsize_bitmap; /* Protection domain ops */ -struct protection_domain *protection_domain_alloc(unsigned int type); +struct protection_domain *protection_domain_alloc(unsigned int type, int nid); void protection_domain_free(struct protection_domain *domain); struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, struct mm_struct *mm); @@ -87,14 +88,10 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag); void amd_iommu_flush_all_caches(struct amd_iommu *iommu); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain); -void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set); -void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size); void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, ioasid_t pasid, u64 address, size_t size); -void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data, - ioasid_t pasid); #ifdef CONFIG_IRQ_REMAP int amd_iommu_create_irq_domain(struct amd_iommu *iommu); @@ -121,11 +118,6 @@ static inline bool check_feature2(u64 mask) return (amd_iommu_efr2 & mask); } -static inline int check_feature_gpt_level(void) -{ - return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK); -} - static inline bool amd_iommu_gt_ppr_supported(void) { return (check_feature(FEATURE_GT) && @@ -143,19 +135,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr) return phys_to_virt(__sme_clr(paddr)); } -static inline -void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) -{ - domain->iop.root = (u64 *)(root & PAGE_MASK); - domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */ -} - -static inline -void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) -{ - amd_iommu_domain_set_pt_root(domain, 0); -} - static inline int get_pci_sbdf_id(struct pci_dev *pdev) { int seg = pci_domain_nr(pdev->bus); @@ -185,7 +164,6 @@ static inline struct protection_domain *to_pdomain(struct iommu_domain *dom) } bool translation_pre_enabled(struct amd_iommu *iommu); -bool amd_iommu_is_attach_deferred(struct device *dev); int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line); #ifdef CONFIG_DMI diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 2b76b5dedc1d9b..601fb4ee69009e 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -8,6 +8,7 @@ #ifndef _ASM_X86_AMD_IOMMU_TYPES_H #define _ASM_X86_AMD_IOMMU_TYPES_H +#include #include #include #include @@ -95,26 +96,21 @@ #define FEATURE_GA BIT_ULL(7) #define FEATURE_HE BIT_ULL(8) #define FEATURE_PC BIT_ULL(9) -#define FEATURE_GATS_SHIFT (12) -#define FEATURE_GATS_MASK (3ULL) +#define FEATURE_GATS GENMASK_ULL(13, 12) +#define FEATURE_GLX GENMASK_ULL(15, 14) #define FEATURE_GAM_VAPIC BIT_ULL(21) +#define FEATURE_PASMAX GENMASK_ULL(36, 32) #define FEATURE_GIOSUP BIT_ULL(48) #define FEATURE_HASUP BIT_ULL(49) #define FEATURE_EPHSUP BIT_ULL(50) #define FEATURE_HDSUP BIT_ULL(52) #define FEATURE_SNP BIT_ULL(63) -#define FEATURE_PASID_SHIFT 32 -#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) - -#define FEATURE_GLXVAL_SHIFT 14 -#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) /* Extended Feature 2 Bits */ -#define FEATURE_SNPAVICSUP_SHIFT 5 -#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT) +#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5) #define FEATURE_SNPAVICSUP_GAM(x) \ - ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1) + (FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1) /* Note: * The current driver only support 16-bit PASID. @@ -294,8 +290,13 @@ * that we support. * * 512GB Pages are not supported due to a hardware bug + * Page sizes >= the 52 bit max physical address of the CPU are not supported. */ -#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) +#define AMD_IOMMU_PGSIZES (GENMASK_ULL(51, 12) ^ SZ_512G) + +/* Special mode where page-sizes are limited to 4 KiB */ +#define AMD_IOMMU_PGSIZES_4K (PAGE_SIZE) + /* 4K, 2MB, 1G page sizes are supported */ #define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30)) @@ -419,10 +420,6 @@ #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) -#define DTE_GCR3_INDEX_A 0 -#define DTE_GCR3_INDEX_B 1 -#define DTE_GCR3_INDEX_C 1 - #define DTE_GCR3_SHIFT_A 58 #define DTE_GCR3_SHIFT_B 16 #define DTE_GCR3_SHIFT_C 43 @@ -527,7 +524,7 @@ struct amd_irte_ops; #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) #define io_pgtable_to_data(x) \ - container_of((x), struct amd_io_pgtable, iop) + container_of((x), struct amd_io_pgtable, pgtbl) #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) @@ -537,7 +534,7 @@ struct amd_irte_ops; struct protection_domain, iop) #define io_pgtable_cfg_to_data(x) \ - container_of((x), struct amd_io_pgtable, pgtbl_cfg) + container_of((x), struct amd_io_pgtable, pgtbl.cfg) struct gcr3_tbl_info { u64 *gcr3_tbl; /* Guest CR3 table */ @@ -547,8 +544,7 @@ struct gcr3_tbl_info { }; struct amd_io_pgtable { - struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable iop; + struct io_pgtable pgtbl; int mode; u64 *root; u64 *pgd; /* v2 pgtable pgd pointer */ @@ -580,7 +576,6 @@ struct protection_domain { struct amd_io_pgtable iop; spinlock_t lock; /* mostly used to lock the page table*/ u16 id; /* the domain id written to the device table */ - int nid; /* Node ID */ enum protection_domain_mode pd_mode; /* Track page table type */ bool dirty_tracking; /* dirty tracking is enabled in the domain */ unsigned dev_cnt; /* devices assigned to this domain */ diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index c89d85b54a1a58..43131c3a21726f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -192,6 +192,8 @@ bool amdr_ivrs_remap_support __read_mostly; bool amd_iommu_force_isolation __read_mostly; +unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES; + /* * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. @@ -2042,14 +2044,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) int glxval; u64 pasmax; - pasmax = amd_iommu_efr & FEATURE_PASID_MASK; - pasmax >>= FEATURE_PASID_SHIFT; + pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr); iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); - glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK; - glxval >>= FEATURE_GLXVAL_SHIFT; + glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr); if (amd_iommu_max_glx_val == -1) amd_iommu_max_glx_val = glxval; @@ -3088,7 +3088,7 @@ static int __init early_amd_iommu_init(void) /* 5 level guest page table */ if (cpu_feature_enabled(X86_FEATURE_LA57) && - check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL) + FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; /* Disable any previously enabled IOMMUs */ @@ -3494,6 +3494,12 @@ static int __init parse_amd_iommu_options(char *str) amd_iommu_pgtable = AMD_IOMMU_V2; } else if (strncmp(str, "irtcachedis", 11) == 0) { amd_iommu_irtcachedis = true; + } else if (strncmp(str, "nohugepages", 11) == 0) { + pr_info("Restricting V1 page-sizes to 4KiB"); + amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K; + } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) { + pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB"); + amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; } else { pr_notice("Unknown option - '%s'\n", str); } diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 1074ee25064d06..804b788f3f167d 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -24,27 +24,6 @@ #include "amd_iommu.h" #include "../iommu-pages.h" -static void v1_tlb_flush_all(void *cookie) -{ -} - -static void v1_tlb_flush_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ -} - -static void v1_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) -{ -} - -static const struct iommu_flush_ops v1_flush_ops = { - .tlb_flush_all = v1_tlb_flush_all, - .tlb_flush_walk = v1_tlb_flush_walk, - .tlb_add_page = v1_tlb_add_page, -}; - /* * Helper function to get the first pte of a large mapping */ @@ -132,56 +111,40 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) } } -void amd_iommu_domain_set_pgtable(struct protection_domain *domain, - u64 *root, int mode) -{ - u64 pt_root; - - /* lowest 3 bits encode pgtable mode */ - pt_root = mode & 7; - pt_root |= (u64)root; - - amd_iommu_domain_set_pt_root(domain, pt_root); -} - /* * This function is used to add another level to an IO page table. Adding * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static bool increase_address_space(struct amd_io_pgtable *pgtable, unsigned long address, gfp_t gfp) { + struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; + struct protection_domain *domain = + container_of(pgtable, struct protection_domain, iop); unsigned long flags; bool ret = true; u64 *pte; - pte = iommu_alloc_page_node(domain->nid, gfp); + pte = iommu_alloc_page_node(cfg->amd.nid, gfp); if (!pte) return false; spin_lock_irqsave(&domain->lock, flags); - if (address <= PM_LEVEL_SIZE(domain->iop.mode)) + if (address <= PM_LEVEL_SIZE(pgtable->mode)) goto out; ret = false; - if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) + if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL)) goto out; - *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); + *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); - domain->iop.root = pte; - domain->iop.mode += 1; + pgtable->root = pte; + pgtable->mode += 1; amd_iommu_update_and_flush_device_table(domain); - amd_iommu_domain_flush_complete(domain); - - /* - * Device Table needs to be updated and flushed before the new root can - * be published. - */ - amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode); pte = NULL; ret = true; @@ -193,30 +156,31 @@ static bool increase_address_space(struct protection_domain *domain, return ret; } -static u64 *alloc_pte(struct protection_domain *domain, +static u64 *alloc_pte(struct amd_io_pgtable *pgtable, unsigned long address, unsigned long page_size, u64 **pte_page, gfp_t gfp, bool *updated) { + struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; int level, end_lvl; u64 *pte, *page; BUG_ON(!is_power_of_2(page_size)); - while (address > PM_LEVEL_SIZE(domain->iop.mode)) { + while (address > PM_LEVEL_SIZE(pgtable->mode)) { /* * Return an error if there is no memory to update the * page-table. */ - if (!increase_address_space(domain, address, gfp)) + if (!increase_address_space(pgtable, address, gfp)) return NULL; } - level = domain->iop.mode - 1; - pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; + level = pgtable->mode - 1; + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; address = PAGE_SIZE_ALIGN(address, page_size); end_lvl = PAGE_SIZE_LEVEL(page_size); @@ -251,7 +215,7 @@ static u64 *alloc_pte(struct protection_domain *domain, if (!IOMMU_PTE_PRESENT(__pte) || pte_level == PAGE_MODE_NONE) { - page = iommu_alloc_page_node(domain->nid, gfp); + page = iommu_alloc_page_node(cfg->amd.nid, gfp); if (!page) return NULL; @@ -365,7 +329,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) { - struct protection_domain *dom = io_pgtable_ops_to_domain(ops); + struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); LIST_HEAD(freelist); bool updated = false; u64 __pte, *pte; @@ -382,7 +346,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, while (pgcount > 0) { count = PAGE_SIZE_PTE_COUNT(pgsize); - pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated); + pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated); ret = -ENOMEM; if (!pte) @@ -419,6 +383,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, out: if (updated) { + struct protection_domain *dom = io_pgtable_ops_to_domain(ops); unsigned long flags; spin_lock_irqsave(&dom->lock, flags); @@ -560,27 +525,17 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, */ static void v1_free_pgtable(struct io_pgtable *iop) { - struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); - struct protection_domain *dom; + struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); LIST_HEAD(freelist); if (pgtable->mode == PAGE_MODE_NONE) return; - dom = container_of(pgtable, struct protection_domain, iop); - /* Page-table is not visible to IOMMU anymore, so free it */ BUG_ON(pgtable->mode < PAGE_MODE_NONE || pgtable->mode > PAGE_MODE_6_LEVEL); free_sub_pt(pgtable->root, pgtable->mode, &freelist); - - /* Update data structure */ - amd_iommu_domain_clr_pt_root(dom); - - /* Make changes visible to IOMMUs */ - amd_iommu_domain_update(dom); - iommu_put_pages_list(&freelist); } @@ -588,17 +543,21 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo { struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); - cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES; + pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL); + if (!pgtable->root) + return NULL; + pgtable->mode = PAGE_MODE_3_LEVEL; + + cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap; cfg->ias = IOMMU_IN_ADDR_BIT_SIZE; cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; - cfg->tlb = &v1_flush_ops; - pgtable->iop.ops.map_pages = iommu_v1_map_pages; - pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; - pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; - pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; + pgtable->pgtbl.ops.map_pages = iommu_v1_map_pages; + pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages; + pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys; + pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; - return &pgtable->iop; + return &pgtable->pgtbl; } struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = { diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 664e91c88748ea..25b9042fa45307 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -51,7 +51,7 @@ static inline u64 set_pgtable_attr(u64 *page) u64 prot; prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER; - prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; + prot |= IOMMU_PAGE_ACCESS; return (iommu_virt_to_phys(page) | prot); } @@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) { - struct protection_domain *pdom = io_pgtable_ops_to_domain(ops); - struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg; + struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; u64 *pte; unsigned long map_size; unsigned long mapped_size = 0; @@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, while (mapped_size < size) { map_size = get_alloc_page_size(pgsize); - pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd, + pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd, iova, map_size, gfp, &updated); if (!pte) { ret = -EINVAL; @@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, } out: - if (updated) + if (updated) { + struct protection_domain *pdom = io_pgtable_ops_to_domain(ops); + amd_iommu_domain_flush_pages(pdom, o_iova, size); + } if (mapped) *mapped += mapped_size; @@ -281,7 +284,7 @@ static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops, struct iommu_iotlb_gather *gather) { struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - struct io_pgtable_cfg *cfg = &pgtable->iop.cfg; + struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; unsigned long unmap_size; unsigned long unmapped = 0; size_t size = pgcount << __ffs(pgsize); @@ -323,30 +326,9 @@ static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo /* * ---------------------------------------------------- */ -static void v2_tlb_flush_all(void *cookie) -{ -} - -static void v2_tlb_flush_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ -} - -static void v2_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) -{ -} - -static const struct iommu_flush_ops v2_flush_ops = { - .tlb_flush_all = v2_tlb_flush_all, - .tlb_flush_walk = v2_tlb_flush_walk, - .tlb_add_page = v2_tlb_add_page, -}; - static void v2_free_pgtable(struct io_pgtable *iop) { - struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); + struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); if (!pgtable || !pgtable->pgd) return; @@ -359,26 +341,24 @@ static void v2_free_pgtable(struct io_pgtable *iop) static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); - struct protection_domain *pdom = (struct protection_domain *)cookie; int ias = IOMMU_IN_ADDR_BIT_SIZE; - pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC); + pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL); if (!pgtable->pgd) return NULL; if (get_pgtable_level() == PAGE_MODE_5_LEVEL) ias = 57; - pgtable->iop.ops.map_pages = iommu_v2_map_pages; - pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages; - pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys; + pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages; + pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages; + pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys; - cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2, - cfg->ias = ias, - cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, - cfg->tlb = &v2_flush_ops; + cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; + cfg->ias = ias; + cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; - return &pgtable->iop; + return &pgtable->pgtbl; } struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b19e8c0f48fa25..8364cd6fa47d01 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -52,8 +52,6 @@ #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL) -#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL - static DEFINE_SPINLOCK(pd_bitmap_lock); LIST_HEAD(ioapic_map); @@ -825,10 +823,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) while (head != tail) { iommu_print_event(iommu, iommu->evt_buf + head); + + /* Update head pointer of hardware ring-buffer */ head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; + writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); } - writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); } #ifdef CONFIG_IRQ_REMAP @@ -1247,6 +1247,22 @@ static int iommu_completion_wait(struct amd_iommu *iommu) return ret; } +static void domain_flush_complete(struct protection_domain *domain) +{ + int i; + + for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { + if (domain && !domain->dev_iommu[i]) + continue; + + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + iommu_completion_wait(amd_iommus[i]); + } +} + static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) { struct iommu_cmd cmd; @@ -1483,7 +1499,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain, __domain_flush_pages(domain, address, size); /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); return; } @@ -1523,7 +1539,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain, } /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ @@ -1549,27 +1565,11 @@ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, iommu_completion_wait(iommu); } -void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data, - ioasid_t pasid) +static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, + ioasid_t pasid) { - amd_iommu_dev_flush_pasid_pages(dev_data, 0, - CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid); -} - -void amd_iommu_domain_flush_complete(struct protection_domain *domain) -{ - int i; - - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain && !domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } + amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, + CMD_INV_IOMMU_ALL_PAGES_ADDRESS); } /* Flush the not present cache if it exists */ @@ -1589,15 +1589,7 @@ static void domain_flush_np_cache(struct protection_domain *domain, /* * This function flushes the DTEs for all devices in domain */ -static void domain_flush_devices(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data); -} - -static void update_device_table(struct protection_domain *domain) +void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; @@ -1607,12 +1599,11 @@ static void update_device_table(struct protection_domain *domain) set_dte_entry(iommu, dev_data); clone_aliases(iommu, dev_data->dev); } -} -void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) -{ - update_device_table(domain); - domain_flush_devices(domain); + list_for_each_entry(dev_data, &domain->dev_list, list) + device_flush_dte(dev_data); + + domain_flush_complete(domain); } void amd_iommu_domain_update(struct protection_domain *domain) @@ -1816,7 +1807,7 @@ static int update_gcr3(struct iommu_dev_data *dev_data, else *pte = 0; - amd_iommu_dev_flush_pasid_all(dev_data, pasid); + dev_flush_pasid_all(dev_data, pasid); return 0; } @@ -1962,7 +1953,7 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) } /* Update and flush DTE for the given device */ -void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set) +static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) { struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); @@ -2032,6 +2023,7 @@ static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg; int ret = 0; /* Update data structures */ @@ -2039,8 +2031,8 @@ static int do_attach(struct iommu_dev_data *dev_data, list_add(&dev_data->list, &domain->dev_list); /* Update NUMA Node ID */ - if (domain->nid == NUMA_NO_NODE) - domain->nid = dev_to_node(dev_data->dev); + if (cfg->amd.nid == NUMA_NO_NODE) + cfg->amd.nid = dev_to_node(dev_data->dev); /* Do reference counting */ domain->dev_iommu[iommu->index] += 1; @@ -2062,7 +2054,7 @@ static void do_detach(struct iommu_dev_data *dev_data) struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); /* Clear DTE and flush the entry */ - amd_iommu_dev_update_dte(dev_data, false); + dev_update_dte(dev_data, false); /* Flush IOTLB and wait for the flushes to finish */ amd_iommu_domain_flush_all(domain); @@ -2185,11 +2177,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); iommu_dev = ERR_PTR(ret); iommu_ignore_device(iommu, dev); - } else { - amd_iommu_set_pci_msi_domain(dev, iommu); - iommu_dev = &iommu->iommu; + goto out_err; } + amd_iommu_set_pci_msi_domain(dev, iommu); + iommu_dev = &iommu->iommu; + /* * If IOMMU and device supports PASID then it will contain max * supported PASIDs, else it will be zero. @@ -2201,8 +2194,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) pci_max_pasids(to_pci_dev(dev))); } +out_err: iommu_completion_wait(iommu); + if (dev_is_pci(dev)) + pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT); + return iommu_dev; } @@ -2259,53 +2256,18 @@ static void cleanup_domain(struct protection_domain *domain) void protection_domain_free(struct protection_domain *domain) { - if (!domain) - return; - - if (domain->iop.pgtbl_cfg.tlb) - free_io_pgtable_ops(&domain->iop.iop.ops); - - if (domain->iop.root) - iommu_free_page(domain->iop.root); - - if (domain->id) - domain_id_free(domain->id); - + WARN_ON(!list_empty(&domain->dev_list)); + if (domain->domain.type & __IOMMU_DOMAIN_PAGING) + free_io_pgtable_ops(&domain->iop.pgtbl.ops); + domain_id_free(domain->id); kfree(domain); } -static int protection_domain_init_v1(struct protection_domain *domain, int mode) -{ - u64 *pt_root = NULL; - - BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); - - if (mode != PAGE_MODE_NONE) { - pt_root = iommu_alloc_page(GFP_KERNEL); - if (!pt_root) - return -ENOMEM; - } - - domain->pd_mode = PD_MODE_V1; - amd_iommu_domain_set_pgtable(domain, pt_root, mode); - - return 0; -} - -static int protection_domain_init_v2(struct protection_domain *pdom) -{ - pdom->pd_mode = PD_MODE_V2; - pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; - - return 0; -} - -struct protection_domain *protection_domain_alloc(unsigned int type) +struct protection_domain *protection_domain_alloc(unsigned int type, int nid) { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; int pgtable; - int ret; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) @@ -2313,12 +2275,12 @@ struct protection_domain *protection_domain_alloc(unsigned int type) domain->id = domain_id_alloc(); if (!domain->id) - goto out_err; + goto err_free; spin_lock_init(&domain->lock); INIT_LIST_HEAD(&domain->dev_list); INIT_LIST_HEAD(&domain->dev_data_list); - domain->nid = NUMA_NO_NODE; + domain->iop.pgtbl.cfg.amd.nid = nid; switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ @@ -2336,31 +2298,30 @@ struct protection_domain *protection_domain_alloc(unsigned int type) pgtable = AMD_IOMMU_V1; break; default: - goto out_err; + goto err_id; } switch (pgtable) { case AMD_IOMMU_V1: - ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL); + domain->pd_mode = PD_MODE_V1; break; case AMD_IOMMU_V2: - ret = protection_domain_init_v2(domain); + domain->pd_mode = PD_MODE_V2; break; default: - ret = -EINVAL; - break; + goto err_id; } - if (ret) - goto out_err; - - pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); + pgtbl_ops = + alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) - goto out_err; + goto err_id; return domain; -out_err: - protection_domain_free(domain); +err_id: + domain_id_free(domain->id); +err_free: + kfree(domain); return NULL; } @@ -2398,17 +2359,18 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (dirty_tracking && !amd_iommu_hd_support(iommu)) return ERR_PTR(-EOPNOTSUPP); - domain = protection_domain_alloc(type); + domain = protection_domain_alloc(type, + dev ? dev_to_node(dev) : NUMA_NO_NODE); if (!domain) return ERR_PTR(-ENOMEM); domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; + domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; if (iommu) { domain->domain.type = type; - domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; domain->domain.ops = iommu->iommu.ops->default_domain_ops; if (dirty_tracking) @@ -2448,9 +2410,6 @@ void amd_iommu_domain_free(struct iommu_domain *dom) struct protection_domain *domain; unsigned long flags; - if (!dom) - return; - domain = to_pdomain(dom); spin_lock_irqsave(&domain->lock, flags); @@ -2462,6 +2421,29 @@ void amd_iommu_domain_free(struct iommu_domain *dom) protection_domain_free(domain); } +static int blocked_domain_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + + if (dev_data->domain) + detach_device(dev); + + /* Clear DTE and flush the entry */ + spin_lock(&dev_data->lock); + dev_update_dte(dev_data, false); + spin_unlock(&dev_data->lock); + + return 0; +} + +static struct iommu_domain blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocked_domain_attach_device, + } +}; + static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev) { @@ -2517,7 +2499,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } /* Update device table */ - amd_iommu_dev_update_dte(dev_data, true); + dev_update_dte(dev_data, true); return ret; } @@ -2526,7 +2508,7 @@ static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, unsigned long iova, size_t size) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; if (ops->map_pages) domain_flush_np_cache(domain, iova, size); @@ -2538,7 +2520,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, int iommu_prot, gfp_t gfp, size_t *mapped) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; int prot = 0; int ret = -EINVAL; @@ -2585,7 +2567,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; size_t r; if ((domain->pd_mode == PD_MODE_V1) && @@ -2604,7 +2586,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; return ops->iova_to_phys(ops, iova); } @@ -2682,7 +2664,7 @@ static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, struct iommu_dirty_bitmap *dirty) { struct protection_domain *pdomain = to_pdomain(domain); - struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops; unsigned long lflags; if (!ops || !ops->read_and_clear_dirty) @@ -2757,7 +2739,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct device *dev) +static bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); @@ -2859,6 +2841,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev, const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, + .blocked_domain = &blocked_domain, .domain_alloc = amd_iommu_domain_alloc, .domain_alloc_user = amd_iommu_domain_alloc_user, .domain_alloc_sva = amd_iommu_domain_alloc_sva, @@ -2867,7 +2850,6 @@ const struct iommu_ops amd_iommu_ops = { .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, .def_domain_type = amd_iommu_def_domain_type, .dev_enable_feat = amd_iommu_dev_enable_feature, .dev_disable_feat = amd_iommu_dev_disable_feature, diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c index a68215f2b3e1d6..0657b9373be547 100644 --- a/drivers/iommu/amd/pasid.c +++ b/drivers/iommu/amd/pasid.c @@ -181,7 +181,7 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, struct protection_domain *pdom; int ret; - pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA); + pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev)); if (!pdom) return ERR_PTR(-ENOMEM); diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index 355173d1441d2f..dc98c88b48c827 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -2,5 +2,6 @@ obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o arm_smmu_v3-y := arm-smmu-v3.o arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o +arm_smmu_v3-$(CONFIG_TEGRA241_CMDQV) += tegra241-cmdqv.o obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c index cceb737a700126..84baa021370a86 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c @@ -30,6 +30,11 @@ static struct mm_struct sva_mm = { .pgd = (void *)0xdaedbeefdeadbeefULL, }; +enum arm_smmu_test_master_feat { + ARM_SMMU_MASTER_TEST_ATS = BIT(0), + ARM_SMMU_MASTER_TEST_STALL = BIT(1), +}; + static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry, const __le64 *used_bits, const __le64 *target, @@ -164,16 +169,22 @@ static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0; static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste, unsigned int s1dss, - const dma_addr_t dma_addr) + const dma_addr_t dma_addr, + enum arm_smmu_test_master_feat feat) { + bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS; + bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL; + struct arm_smmu_master master = { + .ats_enabled = ats_enabled, .cd_table.cdtab_dma = dma_addr, .cd_table.s1cdmax = 0xFF, .cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2, .smmu = &smmu, + .stall_enabled = stall_enabled, }; - arm_smmu_make_cdtable_ste(ste, &master, true, s1dss); + arm_smmu_make_cdtable_ste(ste, &master, ats_enabled, s1dss); } static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test) @@ -204,7 +215,7 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test) struct arm_smmu_ste ste; arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, NUM_EXPECTED_SYNCS(2)); } @@ -214,7 +225,7 @@ static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test) struct arm_smmu_ste ste; arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, NUM_EXPECTED_SYNCS(2)); } @@ -224,7 +235,7 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test) struct arm_smmu_ste ste; arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, NUM_EXPECTED_SYNCS(3)); } @@ -234,7 +245,7 @@ static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test) struct arm_smmu_ste ste; arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, NUM_EXPECTED_SYNCS(3)); } @@ -245,9 +256,9 @@ static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test) struct arm_smmu_ste s1dss_bypass; arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); /* * Flipping s1dss on a CD table STE only involves changes to the second @@ -265,7 +276,7 @@ arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test) struct arm_smmu_ste s1dss_bypass; arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition( test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2)); } @@ -276,16 +287,20 @@ arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test) struct arm_smmu_ste s1dss_bypass; arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition( test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2)); } static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste, - bool ats_enabled) + enum arm_smmu_test_master_feat feat) { + bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS; + bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL; struct arm_smmu_master master = { + .ats_enabled = ats_enabled, .smmu = &smmu, + .stall_enabled = stall_enabled, }; struct io_pgtable io_pgtable = {}; struct arm_smmu_domain smmu_domain = { @@ -308,7 +323,7 @@ static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test) { struct arm_smmu_ste ste; - arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, NUM_EXPECTED_SYNCS(2)); } @@ -317,7 +332,7 @@ static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test) { struct arm_smmu_ste ste; - arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, NUM_EXPECTED_SYNCS(2)); } @@ -326,7 +341,7 @@ static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test) { struct arm_smmu_ste ste; - arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, NUM_EXPECTED_SYNCS(2)); } @@ -335,7 +350,7 @@ static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test) { struct arm_smmu_ste ste; - arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, NUM_EXPECTED_SYNCS(2)); } @@ -346,8 +361,8 @@ static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test) struct arm_smmu_ste s2_ste; arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); - arm_smmu_test_make_s2_ste(&s2_ste, true); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); + arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste, NUM_EXPECTED_SYNCS(3)); } @@ -358,8 +373,8 @@ static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test) struct arm_smmu_ste s2_ste; arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); - arm_smmu_test_make_s2_ste(&s2_ste, true); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); + arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste, NUM_EXPECTED_SYNCS(3)); } @@ -375,9 +390,9 @@ static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test) * s1 dss field in the same update. */ arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, - fake_cdtab_dma_addr); + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS, - 0x4B4B4b4B4B); + 0x4B4B4b4B4B, ARM_SMMU_MASTER_TEST_ATS); arm_smmu_v3_test_ste_expect_non_hitless_transition( test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3)); } @@ -503,6 +518,30 @@ static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd, arm_smmu_make_sva_cd(cd, &master, NULL, asid); } +static void arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL); + arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL); + arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste, + NUM_EXPECTED_SYNCS(3)); +} + static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test) { struct arm_smmu_cd cd = {}; @@ -547,6 +586,8 @@ static struct kunit_case arm_smmu_v3_test_cases[] = { KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless), KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear), KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2_stall), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1_stall), KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear), KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release), {}, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ed2b106e02dd10..737c5b88235510 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -346,14 +346,30 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } -static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) +static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { - return &smmu->cmdq; + struct arm_smmu_cmdq *cmdq = NULL; + + if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) + cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); + + return cmdq ?: &smmu->cmdq; +} + +static bool arm_smmu_cmdq_needs_busy_polling(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq) +{ + if (cmdq == &smmu->cmdq) + return false; + + return smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV; } static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, - struct arm_smmu_queue *q, u32 prod) + struct arm_smmu_cmdq *cmdq, u32 prod) { + struct arm_smmu_queue *q = &cmdq->q; struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC, }; @@ -368,10 +384,12 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, } arm_smmu_cmdq_build_cmd(cmd, &ent); + if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) + u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS); } -static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, - struct arm_smmu_queue *q) +void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq) { static const char * const cerror_str[] = { [CMDQ_ERR_CERROR_NONE_IDX] = "No error", @@ -379,6 +397,7 @@ static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch", [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout", }; + struct arm_smmu_queue *q = &cmdq->q; int i; u64 cmd[CMDQ_ENT_DWORDS]; @@ -421,13 +440,15 @@ static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, /* Convert the erroneous command into a CMD_SYNC */ arm_smmu_cmdq_build_cmd(cmd, &cmd_sync); + if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) + u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS); queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); } static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) { - __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q); + __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq); } /* @@ -592,11 +613,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, /* Wait for the command queue to become non-full */ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, struct arm_smmu_ll_queue *llq) { unsigned long flags; struct arm_smmu_queue_poll qp; - struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); int ret = 0; /* @@ -627,11 +648,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, * Must be called with the cmdq lock held in some capacity. */ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, struct arm_smmu_ll_queue *llq) { int ret = 0; struct arm_smmu_queue_poll qp; - struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); queue_poll_init(smmu, &qp); @@ -651,10 +672,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, * Must be called with the cmdq lock held in some capacity. */ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, struct arm_smmu_ll_queue *llq) { struct arm_smmu_queue_poll qp; - struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); u32 prod = llq->prod; int ret = 0; @@ -701,12 +722,14 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, } static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, struct arm_smmu_ll_queue *llq) { - if (smmu->options & ARM_SMMU_OPT_MSIPOLL) - return __arm_smmu_cmdq_poll_until_msi(smmu, llq); + if (smmu->options & ARM_SMMU_OPT_MSIPOLL && + !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) + return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq); - return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); + return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq); } static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, @@ -743,13 +766,13 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, * CPU will appear before any of the commands from the other CPU. */ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq, u64 *cmds, int n, bool sync) { u64 cmd_sync[CMDQ_ENT_DWORDS]; u32 prod; unsigned long flags; bool owner; - struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); struct arm_smmu_ll_queue llq, head; int ret = 0; @@ -763,7 +786,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, while (!queue_has_space(&llq, n + sync)) { local_irq_restore(flags); - if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) + if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq)) dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); local_irq_save(flags); } @@ -789,7 +812,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); if (sync) { prod = queue_inc_prod_n(&llq, n); - arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); + arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod); queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); /* @@ -839,7 +862,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ if (sync) { llq.prod = queue_inc_prod_n(&llq, n); - ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); + ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq); if (ret) { dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", @@ -874,7 +897,8 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, return -EINVAL; } - return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync); + return arm_smmu_cmdq_issue_cmdlist( + smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync); } static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, @@ -889,21 +913,33 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu, return __arm_smmu_cmdq_issue_cmd(smmu, ent, true); } +static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_batch *cmds, + struct arm_smmu_cmdq_ent *ent) +{ + cmds->num = 0; + cmds->cmdq = arm_smmu_get_cmdq(smmu, ent); +} + static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_batch *cmds, struct arm_smmu_cmdq_ent *cmd) { + bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd); + bool force_sync = (cmds->num == CMDQ_BATCH_ENTRIES - 1) && + (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC); int index; - if (cmds->num == CMDQ_BATCH_ENTRIES - 1 && - (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) { - arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); - cmds->num = 0; + if (force_sync || unsupported_cmd) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, + cmds->num, true); + arm_smmu_cmdq_batch_init(smmu, cmds, cmd); } if (cmds->num == CMDQ_BATCH_ENTRIES) { - arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); - cmds->num = 0; + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, + cmds->num, false); + arm_smmu_cmdq_batch_init(smmu, cmds, cmd); } index = cmds->num * CMDQ_ENT_DWORDS; @@ -919,7 +955,8 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_batch *cmds) { - return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); + return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, + cmds->num, true); } static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused, @@ -1012,7 +1049,8 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR | STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI | - STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R); + STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2S | + STRTAB_STE_2_S2R); used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK); } @@ -1170,7 +1208,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master, }, }; - cmds.num = 0; + arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd); for (i = 0; i < master->num_streams; i++) { cmd.cfgi.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); @@ -1179,48 +1217,36 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master, arm_smmu_cmdq_batch_submit(smmu, &cmds); } -static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, - struct arm_smmu_l1_ctx_desc *l1_desc) +static void arm_smmu_write_cd_l1_desc(struct arm_smmu_cdtab_l1 *dst, + dma_addr_t l2ptr_dma) { - size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3); + u64 val = (l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; - l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, - &l1_desc->l2ptr_dma, GFP_KERNEL); - if (!l1_desc->l2ptr) { - dev_warn(smmu->dev, - "failed to allocate context descriptor table\n"); - return -ENOMEM; - } - return 0; + /* The HW has 64 bit atomicity with stores to the L2 CD table */ + WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); } -static void arm_smmu_write_cd_l1_desc(__le64 *dst, - struct arm_smmu_l1_ctx_desc *l1_desc) +static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src) { - u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | - CTXDESC_L1_DESC_V; - - /* The HW has 64 bit atomicity with stores to the L2 CD table */ - WRITE_ONCE(*dst, cpu_to_le64(val)); + return le64_to_cpu(src->l2ptr) & CTXDESC_L1_DESC_L2PTR_MASK; } struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid) { - struct arm_smmu_l1_ctx_desc *l1_desc; + struct arm_smmu_cdtab_l2 *l2; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - if (!cd_table->cdtab) + if (!arm_smmu_cdtab_allocated(cd_table)) return NULL; if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) - return (struct arm_smmu_cd *)(cd_table->cdtab + - ssid * CTXDESC_CD_DWORDS); + return &cd_table->linear.table[ssid]; - l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES]; - if (!l1_desc->l2ptr) + l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)]; + if (!l2) return NULL; - return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES]; + return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)]; } static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, @@ -1232,24 +1258,25 @@ static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, might_sleep(); iommu_group_mutex_assert(master->dev); - if (!cd_table->cdtab) { + if (!arm_smmu_cdtab_allocated(cd_table)) { if (arm_smmu_alloc_cd_tables(master)) return NULL; } if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { - unsigned int idx = ssid / CTXDESC_L2_ENTRIES; - struct arm_smmu_l1_ctx_desc *l1_desc; + unsigned int idx = arm_smmu_cdtab_l1_idx(ssid); + struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx]; - l1_desc = &cd_table->l1_desc[idx]; - if (!l1_desc->l2ptr) { - __le64 *l1ptr; + if (!*l2ptr) { + dma_addr_t l2ptr_dma; - if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr), + &l2ptr_dma, GFP_KERNEL); + if (!*l2ptr) return NULL; - l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS; - arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); + arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx], + l2ptr_dma); /* An invalid L1CD can be cached */ arm_smmu_sync_cd(master, ssid, false); } @@ -1369,7 +1396,7 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) struct arm_smmu_cd target = {}; struct arm_smmu_cd *cdptr; - if (!master->cd_table.cdtab) + if (!arm_smmu_cdtab_allocated(&master->cd_table)) return; cdptr = arm_smmu_get_cd_ptr(master, ssid); if (WARN_ON(!cdptr)) @@ -1391,74 +1418,75 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || max_contexts <= CTXDESC_L2_ENTRIES) { cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; - cd_table->num_l1_ents = max_contexts; + cd_table->linear.num_ents = max_contexts; - l1size = max_contexts * (CTXDESC_CD_DWORDS << 3); + l1size = max_contexts * sizeof(struct arm_smmu_cd), + cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size, + &cd_table->cdtab_dma, + GFP_KERNEL); + if (!cd_table->linear.table) + return -ENOMEM; } else { cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; - cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts, - CTXDESC_L2_ENTRIES); + cd_table->l2.num_l1_ents = + DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES); - cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents, - sizeof(*cd_table->l1_desc), - GFP_KERNEL); - if (!cd_table->l1_desc) + cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents, + sizeof(*cd_table->l2.l2ptrs), + GFP_KERNEL); + if (!cd_table->l2.l2ptrs) return -ENOMEM; - l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); - } - - cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma, - GFP_KERNEL); - if (!cd_table->cdtab) { - dev_warn(smmu->dev, "failed to allocate context descriptor\n"); - ret = -ENOMEM; - goto err_free_l1; + l1size = cd_table->l2.num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); + cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size, + &cd_table->cdtab_dma, + GFP_KERNEL); + if (!cd_table->l2.l2ptrs) { + ret = -ENOMEM; + goto err_free_l2ptrs; + } } - return 0; -err_free_l1: - if (cd_table->l1_desc) { - devm_kfree(smmu->dev, cd_table->l1_desc); - cd_table->l1_desc = NULL; - } +err_free_l2ptrs: + kfree(cd_table->l2.l2ptrs); + cd_table->l2.l2ptrs = NULL; return ret; } static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) { int i; - size_t size, l1size; struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - if (cd_table->l1_desc) { - size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3); - - for (i = 0; i < cd_table->num_l1_ents; i++) { - if (!cd_table->l1_desc[i].l2ptr) + if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) { + for (i = 0; i < cd_table->l2.num_l1_ents; i++) { + if (!cd_table->l2.l2ptrs[i]) continue; - dmam_free_coherent(smmu->dev, size, - cd_table->l1_desc[i].l2ptr, - cd_table->l1_desc[i].l2ptr_dma); + dma_free_coherent(smmu->dev, + sizeof(*cd_table->l2.l2ptrs[i]), + cd_table->l2.l2ptrs[i], + arm_smmu_cd_l1_get_desc(&cd_table->l2.l1tab[i])); } - devm_kfree(smmu->dev, cd_table->l1_desc); - cd_table->l1_desc = NULL; + kfree(cd_table->l2.l2ptrs); - l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); + dma_free_coherent(smmu->dev, + cd_table->l2.num_l1_ents * + sizeof(struct arm_smmu_cdtab_l1), + cd_table->l2.l1tab, cd_table->cdtab_dma); } else { - l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3); + dma_free_coherent(smmu->dev, + cd_table->linear.num_ents * + sizeof(struct arm_smmu_cd), + cd_table->linear.table, cd_table->cdtab_dma); } - - dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma); - cd_table->cdtab_dma = 0; - cd_table->cdtab = NULL; } /* Stream table manipulation functions */ -static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma) +static void arm_smmu_write_strtab_l1_desc(struct arm_smmu_strtab_l1 *dst, + dma_addr_t l2ptr_dma) { u64 val = 0; @@ -1466,7 +1494,7 @@ static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma) val |= l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; /* The HW has 64 bit atomicity with stores to the L2 STE table */ - WRITE_ONCE(*dst, cpu_to_le64(val)); + WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); } struct arm_smmu_ste_writer { @@ -1646,6 +1674,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, STRTAB_STE_2_S2ENDI | #endif STRTAB_STE_2_S2PTW | + (master->stall_enabled ? STRTAB_STE_2_S2S : 0) | STRTAB_STE_2_S2R); target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & @@ -1670,52 +1699,61 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab, static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) { - size_t size; - void *strtab; dma_addr_t l2ptr_dma; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; + struct arm_smmu_strtab_l2 **l2table; - if (desc->l2ptr) + l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)]; + if (*l2table) return 0; - size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); - strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; - - desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &l2ptr_dma, - GFP_KERNEL); - if (!desc->l2ptr) { + *l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table), + &l2ptr_dma, GFP_KERNEL); + if (!*l2table) { dev_err(smmu->dev, "failed to allocate l2 stream table for SID %u\n", sid); return -ENOMEM; } - arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT); - arm_smmu_write_strtab_l1_desc(strtab, l2ptr_dma); + arm_smmu_init_initial_stes((*l2table)->stes, + ARRAY_SIZE((*l2table)->stes)); + arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)], + l2ptr_dma); return 0; } +static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs) +{ + struct arm_smmu_stream *stream_rhs = + rb_entry(rhs, struct arm_smmu_stream, node); + const u32 *sid_lhs = lhs; + + if (*sid_lhs < stream_rhs->id) + return -1; + if (*sid_lhs > stream_rhs->id) + return 1; + return 0; +} + +static int arm_smmu_streams_cmp_node(struct rb_node *lhs, + const struct rb_node *rhs) +{ + return arm_smmu_streams_cmp_key( + &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs); +} + static struct arm_smmu_master * arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) { struct rb_node *node; - struct arm_smmu_stream *stream; lockdep_assert_held(&smmu->streams_mutex); - node = smmu->streams.rb_node; - while (node) { - stream = rb_entry(node, struct arm_smmu_stream, node); - if (stream->id < sid) - node = node->rb_right; - else if (stream->id > sid) - node = node->rb_left; - else - return stream->master; - } - - return NULL; + node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key); + if (!node) + return NULL; + return rb_entry(node, struct arm_smmu_stream, node)->master; } /* IRQ and event handlers */ @@ -1739,10 +1777,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) return -EOPNOTSUPP; } - /* Stage-2 is always pinned at the moment */ - if (evt[1] & EVTQ_1_S2) - return -EFAULT; - if (!(evt[1] & EVTQ_1_STALL)) return -EOPNOTSUPP; @@ -2021,7 +2055,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); - cmds.num = 0; + arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd); for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); @@ -2036,7 +2070,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_domain *master_domain; int i; unsigned long flags; - struct arm_smmu_cmdq_ent cmd; + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_ATC_INV, + }; struct arm_smmu_cmdq_batch cmds; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) @@ -2059,7 +2095,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, if (!atomic_read(&smmu_domain->nr_ats_masters)) return 0; - cmds.num = 0; + arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_for_each_entry(master_domain, &smmu_domain->devices, @@ -2141,7 +2177,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, num_pages++; } - cmds.num = 0; + arm_smmu_cmdq_batch_init(smmu, &cmds, cmd); while (iova < end) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { @@ -2438,16 +2474,12 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { - unsigned int idx1, idx2; - /* Two-level walk */ - idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; - idx2 = sid & ((1 << STRTAB_SPLIT) - 1); - return &cfg->l1_desc[idx1].l2ptr[idx2]; + return &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)] + ->stes[arm_smmu_strtab_l2_idx(sid)]; } else { /* Simple linear lookup */ - return (struct arm_smmu_ste *)&cfg - ->strtab[sid * STRTAB_STE_DWORDS]; + return &cfg->linear.table[sid]; } } @@ -3062,8 +3094,8 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, return ERR_PTR(-EOPNOTSUPP); smmu_domain = arm_smmu_domain_alloc(); - if (!smmu_domain) - return ERR_PTR(-ENOMEM); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain); smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; @@ -3147,12 +3179,9 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) { - unsigned long limit = smmu->strtab_cfg.num_l1_ents; - if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) - limit *= 1UL << STRTAB_SPLIT; - - return sid < limit; + return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents; + return sid < smmu->strtab_cfg.linear.num_ents; } static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid) @@ -3173,8 +3202,6 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, { int i; int ret = 0; - struct arm_smmu_stream *new_stream, *cur_stream; - struct rb_node **new_node, *parent_node = NULL; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), @@ -3185,9 +3212,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, mutex_lock(&smmu->streams_mutex); for (i = 0; i < fwspec->num_ids; i++) { + struct arm_smmu_stream *new_stream = &master->streams[i]; u32 sid = fwspec->ids[i]; - new_stream = &master->streams[i]; new_stream->id = sid; new_stream->master = master; @@ -3196,28 +3223,13 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, break; /* Insert into SID tree */ - new_node = &(smmu->streams.rb_node); - while (*new_node) { - cur_stream = rb_entry(*new_node, struct arm_smmu_stream, - node); - parent_node = *new_node; - if (cur_stream->id > new_stream->id) { - new_node = &((*new_node)->rb_left); - } else if (cur_stream->id < new_stream->id) { - new_node = &((*new_node)->rb_right); - } else { - dev_warn(master->dev, - "stream %u already in tree\n", - cur_stream->id); - ret = -EINVAL; - break; - } - } - if (ret) + if (rb_find_add(&new_stream->node, &smmu->streams, + arm_smmu_streams_cmp_node)) { + dev_warn(master->dev, "stream %u already in tree\n", + sid); + ret = -EINVAL; break; - - rb_link_node(&new_stream->node, parent_node, new_node); - rb_insert_color(&new_stream->node, &smmu->streams); + } } if (ret) { @@ -3295,6 +3307,12 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) smmu->features & ARM_SMMU_FEAT_STALL_FORCE) master->stall_enabled = true; + if (dev_is_pci(dev)) { + unsigned int stu = __ffs(smmu->pgsize_bitmap); + + pci_prepare_ats(to_pci_dev(dev), stu); + } + return &smmu->iommu; err_free_master: @@ -3317,7 +3335,7 @@ static void arm_smmu_release_device(struct device *dev) arm_smmu_disable_pasid(master); arm_smmu_remove_master(master); - if (master->cd_table.cdtab) + if (arm_smmu_cdtab_allocated(&master->cd_table)) arm_smmu_free_cd_tables(master); kfree(master); } @@ -3507,12 +3525,10 @@ static struct iommu_dirty_ops arm_smmu_dirty_ops = { }; /* Probing and initialisation functions */ -static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, - struct arm_smmu_queue *q, - void __iomem *page, - unsigned long prod_off, - unsigned long cons_off, - size_t dwords, const char *name) +int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, + struct arm_smmu_queue *q, void __iomem *page, + unsigned long prod_off, unsigned long cons_off, + size_t dwords, const char *name) { size_t qsz; @@ -3550,9 +3566,9 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return 0; } -static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) +int arm_smmu_cmdq_init(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq) { - struct arm_smmu_cmdq *cmdq = &smmu->cmdq; unsigned int nents = 1 << cmdq->q.llq.max_n_shift; atomic_set(&cmdq->owner_prod, 0); @@ -3577,7 +3593,7 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) if (ret) return ret; - ret = arm_smmu_cmdq_init(smmu); + ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq); if (ret) return ret; @@ -3606,42 +3622,32 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { - void *strtab; - u64 reg; - u32 size, l1size; + u32 l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; + unsigned int last_sid_idx = + arm_smmu_strtab_l1_idx((1 << smmu->sid_bits) - 1); /* Calculate the L1 size, capped to the SIDSIZE. */ - size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); - size = min(size, smmu->sid_bits - STRTAB_SPLIT); - cfg->num_l1_ents = 1 << size; - - size += STRTAB_SPLIT; - if (size < smmu->sid_bits) + cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES); + if (cfg->l2.num_l1_ents <= last_sid_idx) dev_warn(smmu->dev, "2-level strtab only covers %u/%u bits of SID\n", - size, smmu->sid_bits); + ilog2(cfg->l2.num_l1_ents * STRTAB_NUM_L2_STES), + smmu->sid_bits); - l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); - strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, - GFP_KERNEL); - if (!strtab) { + l1size = cfg->l2.num_l1_ents * sizeof(struct arm_smmu_strtab_l1); + cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma, + GFP_KERNEL); + if (!cfg->l2.l1tab) { dev_err(smmu->dev, "failed to allocate l1 stream table (%u bytes)\n", l1size); return -ENOMEM; } - cfg->strtab = strtab; - - /* Configure strtab_base_cfg for 2 levels */ - reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL); - reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size); - reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); - cfg->strtab_base_cfg = reg; - cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, - sizeof(*cfg->l1_desc), GFP_KERNEL); - if (!cfg->l1_desc) + cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents, + sizeof(*cfg->l2.l2ptrs), GFP_KERNEL); + if (!cfg->l2.l2ptrs) return -ENOMEM; return 0; @@ -3649,50 +3655,36 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) { - void *strtab; - u64 reg; u32 size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3); - strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma, - GFP_KERNEL); - if (!strtab) { + size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste); + cfg->linear.table = dmam_alloc_coherent(smmu->dev, size, + &cfg->linear.ste_dma, + GFP_KERNEL); + if (!cfg->linear.table) { dev_err(smmu->dev, "failed to allocate linear stream table (%u bytes)\n", size); return -ENOMEM; } - cfg->strtab = strtab; - cfg->num_l1_ents = 1 << smmu->sid_bits; + cfg->linear.num_ents = 1 << smmu->sid_bits; - /* Configure strtab_base_cfg for a linear table covering all SIDs */ - reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR); - reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); - cfg->strtab_base_cfg = reg; - - arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents); + arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents); return 0; } static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) { - u64 reg; int ret; if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) ret = arm_smmu_init_strtab_2lvl(smmu); else ret = arm_smmu_init_strtab_linear(smmu); - if (ret) return ret; - /* Set the strtab base address */ - reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK; - reg |= STRTAB_BASE_RA; - smmu->strtab_cfg.strtab_base = reg; - ida_init(&smmu->vmid_map); return 0; @@ -3709,7 +3701,14 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) if (ret) return ret; - return arm_smmu_init_strtab(smmu); + ret = arm_smmu_init_strtab(smmu); + if (ret) + return ret; + + if (smmu->impl_ops && smmu->impl_ops->init_structures) + return smmu->impl_ops->init_structures(smmu); + + return 0; } static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, @@ -3901,6 +3900,30 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; } +static void arm_smmu_write_strtab(struct arm_smmu_device *smmu) +{ + struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; + dma_addr_t dma; + u32 reg; + + if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { + reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, + STRTAB_BASE_CFG_FMT_2LVL) | + FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, + ilog2(cfg->l2.num_l1_ents) + STRTAB_SPLIT) | + FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); + dma = cfg->l2.l1_dma; + } else { + reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, + STRTAB_BASE_CFG_FMT_LINEAR) | + FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); + dma = cfg->linear.ste_dma; + } + writeq_relaxed((dma & STRTAB_BASE_ADDR_MASK) | STRTAB_BASE_RA, + smmu->base + ARM_SMMU_STRTAB_BASE); + writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG); +} + static int arm_smmu_device_reset(struct arm_smmu_device *smmu) { int ret; @@ -3936,10 +3959,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); /* Stream table */ - writeq_relaxed(smmu->strtab_cfg.strtab_base, - smmu->base + ARM_SMMU_STRTAB_BASE); - writel_relaxed(smmu->strtab_cfg.strtab_base_cfg, - smmu->base + ARM_SMMU_STRTAB_BASE_CFG); + arm_smmu_write_strtab(smmu); /* Command queue */ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); @@ -4026,6 +4046,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) return ret; } + if (smmu->impl_ops && smmu->impl_ops->device_reset) { + ret = smmu->impl_ops->device_reset(smmu); + if (ret) { + dev_err(smmu->dev, "failed to reset impl\n"); + return ret; + } + } + return 0; } @@ -4315,18 +4343,55 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } #ifdef CONFIG_ACPI -static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) +#ifdef CONFIG_TEGRA241_CMDQV +static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node, + struct arm_smmu_device *smmu) +{ + const char *uid = kasprintf(GFP_KERNEL, "%u", node->identifier); + struct acpi_device *adev; + + /* Look for an NVDA200C node whose _UID matches the SMMU node ID */ + adev = acpi_dev_get_first_match_dev("NVDA200C", uid, -1); + if (adev) { + /* Tegra241 CMDQV driver is responsible for put_device() */ + smmu->impl_dev = &adev->dev; + smmu->options |= ARM_SMMU_OPT_TEGRA241_CMDQV; + dev_info(smmu->dev, "found companion CMDQV device: %s\n", + dev_name(smmu->impl_dev)); + } + kfree(uid); +} +#else +static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node, + struct arm_smmu_device *smmu) +{ +} +#endif + +static int acpi_smmu_iort_probe_model(struct acpi_iort_node *node, + struct arm_smmu_device *smmu) { - switch (model) { + struct acpi_iort_smmu_v3 *iort_smmu = + (struct acpi_iort_smmu_v3 *)node->node_data; + + switch (iort_smmu->model) { case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; break; case ACPI_IORT_SMMU_V3_HISILICON_HI161X: smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; break; + case ACPI_IORT_SMMU_V3_GENERIC: + /* + * Tegra241 implementation stores its SMMU options and impl_dev + * in DSDT. Thus, go through the ACPI tables unconditionally. + */ + acpi_smmu_dsdt_probe_tegra241_cmdqv(node, smmu); + break; } dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); + return 0; } static int arm_smmu_device_acpi_probe(struct platform_device *pdev, @@ -4341,8 +4406,6 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, /* Retrieve SMMUv3 specific data */ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; - acpi_smmu_get_options(iort_smmu->model, smmu); - if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) smmu->features |= ARM_SMMU_FEAT_COHERENCY; @@ -4354,7 +4417,7 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, smmu->features |= ARM_SMMU_FEAT_HA; } - return 0; + return acpi_smmu_iort_probe_model(node, smmu); } #else static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, @@ -4435,6 +4498,39 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); } +static void arm_smmu_impl_remove(void *data) +{ + struct arm_smmu_device *smmu = data; + + if (smmu->impl_ops && smmu->impl_ops->device_remove) + smmu->impl_ops->device_remove(smmu); +} + +/* + * Probe all the compiled in implementations. Each one checks to see if it + * matches this HW and if so returns a devm_krealloc'd arm_smmu_device which + * replaces the callers. Otherwise the original is returned or ERR_PTR. + */ +static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu) +{ + struct arm_smmu_device *new_smmu = ERR_PTR(-ENODEV); + int ret; + + if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV)) + new_smmu = tegra241_cmdqv_probe(smmu); + + if (new_smmu == ERR_PTR(-ENODEV)) + return smmu; + if (IS_ERR(new_smmu)) + return new_smmu; + + ret = devm_add_action_or_reset(new_smmu->dev, arm_smmu_impl_remove, + new_smmu); + if (ret) + return ERR_PTR(ret); + return new_smmu; +} + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -4456,6 +4552,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (ret) return ret; + smmu = arm_smmu_impl_probe(smmu); + if (IS_ERR(smmu)) + return PTR_ERR(smmu); + /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 14bca41a981b43..1e9952ca989f87 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -14,6 +14,8 @@ #include #include +struct arm_smmu_device; + /* MMIO registers */ #define ARM_SMMU_IDR0 0x0 #define IDR0_ST_LVL GENMASK(28, 27) @@ -202,10 +204,8 @@ * 2lvl: 128k L1 entries, * 256 lazy entries per table (each table covers a PCI bus) */ -#define STRTAB_L1_SZ_SHIFT 20 #define STRTAB_SPLIT 8 -#define STRTAB_L1_DESC_DWORDS 1 #define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0) #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6) @@ -215,6 +215,26 @@ struct arm_smmu_ste { __le64 data[STRTAB_STE_DWORDS]; }; +#define STRTAB_NUM_L2_STES (1 << STRTAB_SPLIT) +struct arm_smmu_strtab_l2 { + struct arm_smmu_ste stes[STRTAB_NUM_L2_STES]; +}; + +struct arm_smmu_strtab_l1 { + __le64 l2ptr; +}; +#define STRTAB_MAX_L1_ENTRIES (1 << 17) + +static inline u32 arm_smmu_strtab_l1_idx(u32 sid) +{ + return sid / STRTAB_NUM_L2_STES; +} + +static inline u32 arm_smmu_strtab_l2_idx(u32 sid) +{ + return sid % STRTAB_NUM_L2_STES; +} + #define STRTAB_STE_0_V (1UL << 0) #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1) #define STRTAB_STE_0_CFG_ABORT 0 @@ -267,6 +287,7 @@ struct arm_smmu_ste { #define STRTAB_STE_2_S2AA64 (1UL << 51) #define STRTAB_STE_2_S2ENDI (1UL << 52) #define STRTAB_STE_2_S2PTW (1UL << 54) +#define STRTAB_STE_2_S2S (1UL << 57) #define STRTAB_STE_2_S2R (1UL << 58) #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4) @@ -280,7 +301,6 @@ struct arm_smmu_ste { */ #define CTXDESC_L2_ENTRIES 1024 -#define CTXDESC_L1_DESC_DWORDS 1 #define CTXDESC_L1_DESC_V (1UL << 0) #define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12) @@ -290,6 +310,24 @@ struct arm_smmu_cd { __le64 data[CTXDESC_CD_DWORDS]; }; +struct arm_smmu_cdtab_l2 { + struct arm_smmu_cd cds[CTXDESC_L2_ENTRIES]; +}; + +struct arm_smmu_cdtab_l1 { + __le64 l2ptr; +}; + +static inline unsigned int arm_smmu_cdtab_l1_idx(unsigned int ssid) +{ + return ssid / CTXDESC_L2_ENTRIES; +} + +static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid) +{ + return ssid % CTXDESC_L2_ENTRIES; +} + #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) @@ -320,7 +358,7 @@ struct arm_smmu_cd { * When the SMMU only supports linear context descriptor tables, pick a * reasonable size limit (64kB). */ -#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3)) +#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / sizeof(struct arm_smmu_cd)) /* Command queue */ #define CMDQ_ENT_SZ_SHIFT 4 @@ -566,10 +604,18 @@ struct arm_smmu_cmdq { atomic_long_t *valid_map; atomic_t owner_prod; atomic_t lock; + bool (*supports_cmd)(struct arm_smmu_cmdq_ent *ent); }; +static inline bool arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq *cmdq, + struct arm_smmu_cmdq_ent *ent) +{ + return cmdq->supports_cmd ? cmdq->supports_cmd(ent) : true; +} + struct arm_smmu_cmdq_batch { u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; + struct arm_smmu_cmdq *cmdq; int num; }; @@ -584,24 +630,23 @@ struct arm_smmu_priq { }; /* High-level stream table and context descriptor structures */ -struct arm_smmu_strtab_l1_desc { - struct arm_smmu_ste *l2ptr; -}; - struct arm_smmu_ctx_desc { u16 asid; }; -struct arm_smmu_l1_ctx_desc { - struct arm_smmu_cd *l2ptr; - dma_addr_t l2ptr_dma; -}; - struct arm_smmu_ctx_desc_cfg { - __le64 *cdtab; + union { + struct { + struct arm_smmu_cd *table; + unsigned int num_ents; + } linear; + struct { + struct arm_smmu_cdtab_l1 *l1tab; + struct arm_smmu_cdtab_l2 **l2ptrs; + unsigned int num_l1_ents; + } l2; + }; dma_addr_t cdtab_dma; - struct arm_smmu_l1_ctx_desc *l1_desc; - unsigned int num_l1_ents; unsigned int used_ssids; u8 in_ste; u8 s1fmt; @@ -609,6 +654,12 @@ struct arm_smmu_ctx_desc_cfg { u8 s1cdmax; }; +static inline bool +arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg) +{ + return cfg->linear.table || cfg->l2.l1tab; +} + /* True if the cd table has SSIDS > 0 in use. */ static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table) { @@ -620,18 +671,35 @@ struct arm_smmu_s2_cfg { }; struct arm_smmu_strtab_cfg { - __le64 *strtab; - dma_addr_t strtab_dma; - struct arm_smmu_strtab_l1_desc *l1_desc; - unsigned int num_l1_ents; + union { + struct { + struct arm_smmu_ste *table; + dma_addr_t ste_dma; + unsigned int num_ents; + } linear; + struct { + struct arm_smmu_strtab_l1 *l1tab; + struct arm_smmu_strtab_l2 **l2ptrs; + dma_addr_t l1_dma; + unsigned int num_l1_ents; + } l2; + }; +}; - u64 strtab_base; - u32 strtab_base_cfg; +struct arm_smmu_impl_ops { + int (*device_reset)(struct arm_smmu_device *smmu); + void (*device_remove)(struct arm_smmu_device *smmu); + int (*init_structures)(struct arm_smmu_device *smmu); + struct arm_smmu_cmdq *(*get_secondary_cmdq)( + struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent); }; /* An SMMUv3 instance */ struct arm_smmu_device { struct device *dev; + struct device *impl_dev; + const struct arm_smmu_impl_ops *impl_ops; + void __iomem *base; void __iomem *page1; @@ -664,6 +732,7 @@ struct arm_smmu_device { #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) #define ARM_SMMU_OPT_MSIPOLL (1 << 2) #define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3) +#define ARM_SMMU_OPT_TEGRA241_CMDQV (1 << 4) u32 options; struct arm_smmu_cmdq cmdq; @@ -815,6 +884,15 @@ void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, unsigned long iova, size_t size); +void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq); +int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, + struct arm_smmu_queue *q, void __iomem *page, + unsigned long prod_off, unsigned long cons_off, + size_t dwords, const char *name); +int arm_smmu_cmdq_init(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq *cmdq); + #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); @@ -860,10 +938,15 @@ static inline void arm_smmu_sva_notifier_synchronize(void) {} #define arm_smmu_sva_domain_alloc NULL -static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t id) +#endif /* CONFIG_ARM_SMMU_V3_SVA */ + +#ifdef CONFIG_TEGRA241_CMDQV +struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu); +#else /* CONFIG_TEGRA241_CMDQV */ +static inline struct arm_smmu_device * +tegra241_cmdqv_probe(struct arm_smmu_device *smmu) { + return ERR_PTR(-ENODEV); } -#endif /* CONFIG_ARM_SMMU_V3_SVA */ +#endif /* CONFIG_TEGRA241_CMDQV */ #endif /* _ARM_SMMU_V3_H */ diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c new file mode 100644 index 00000000000000..fcd13d301fff68 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */ + +#define dev_fmt(fmt) "tegra241_cmdqv: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include "arm-smmu-v3.h" + +/* CMDQV register page base and size defines */ +#define TEGRA241_CMDQV_CONFIG_BASE (0) +#define TEGRA241_CMDQV_CONFIG_SIZE (SZ_64K) +#define TEGRA241_VCMDQ_PAGE0_BASE (TEGRA241_CMDQV_CONFIG_BASE + SZ_64K) +#define TEGRA241_VCMDQ_PAGE1_BASE (TEGRA241_VCMDQ_PAGE0_BASE + SZ_64K) +#define TEGRA241_VINTF_PAGE_BASE (TEGRA241_VCMDQ_PAGE1_BASE + SZ_64K) + +/* CMDQV global base regs */ +#define TEGRA241_CMDQV_CONFIG 0x0000 +#define CMDQV_EN BIT(0) + +#define TEGRA241_CMDQV_PARAM 0x0004 +#define CMDQV_NUM_VINTF_LOG2 GENMASK(11, 8) +#define CMDQV_NUM_VCMDQ_LOG2 GENMASK(7, 4) + +#define TEGRA241_CMDQV_STATUS 0x0008 +#define CMDQV_ENABLED BIT(0) + +#define TEGRA241_CMDQV_VINTF_ERR_MAP 0x0014 +#define TEGRA241_CMDQV_VINTF_INT_MASK 0x001C +#define TEGRA241_CMDQV_CMDQ_ERR_MAP(m) (0x0024 + 0x4*(m)) + +#define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q)) +#define CMDQV_CMDQ_ALLOC_VINTF GENMASK(20, 15) +#define CMDQV_CMDQ_ALLOC_LVCMDQ GENMASK(7, 1) +#define CMDQV_CMDQ_ALLOCATED BIT(0) + +/* VINTF base regs */ +#define TEGRA241_VINTF(v) (0x1000 + 0x100*(v)) + +#define TEGRA241_VINTF_CONFIG 0x0000 +#define VINTF_HYP_OWN BIT(17) +#define VINTF_VMID GENMASK(16, 1) +#define VINTF_EN BIT(0) + +#define TEGRA241_VINTF_STATUS 0x0004 +#define VINTF_STATUS GENMASK(3, 1) +#define VINTF_ENABLED BIT(0) + +#define TEGRA241_VINTF_LVCMDQ_ERR_MAP_64(m) \ + (0x00C0 + 0x8*(m)) +#define LVCMDQ_ERR_MAP_NUM_64 2 + +/* VCMDQ base regs */ +/* -- PAGE0 -- */ +#define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q)) + +#define TEGRA241_VCMDQ_CONS 0x00000 +#define VCMDQ_CONS_ERR GENMASK(30, 24) + +#define TEGRA241_VCMDQ_PROD 0x00004 + +#define TEGRA241_VCMDQ_CONFIG 0x00008 +#define VCMDQ_EN BIT(0) + +#define TEGRA241_VCMDQ_STATUS 0x0000C +#define VCMDQ_ENABLED BIT(0) + +#define TEGRA241_VCMDQ_GERROR 0x00010 +#define TEGRA241_VCMDQ_GERRORN 0x00014 + +/* -- PAGE1 -- */ +#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q)) +#define VCMDQ_ADDR GENMASK(47, 5) +#define VCMDQ_LOG2SIZE GENMASK(4, 0) +#define VCMDQ_LOG2SIZE_MAX 19 + +#define TEGRA241_VCMDQ_BASE 0x00000 +#define TEGRA241_VCMDQ_CONS_INDX_BASE 0x00008 + +/* VINTF logical-VCMDQ pages */ +#define TEGRA241_VINTFi_PAGE0(i) (TEGRA241_VINTF_PAGE_BASE + SZ_128K*(i)) +#define TEGRA241_VINTFi_PAGE1(i) (TEGRA241_VINTFi_PAGE0(i) + SZ_64K) +#define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \ + (TEGRA241_VINTFi_PAGE0(i) + 0x80*(q)) +#define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \ + (TEGRA241_VINTFi_PAGE1(i) + 0x80*(q)) + +/* MMIO helpers */ +#define REG_CMDQV(_cmdqv, _regname) \ + ((_cmdqv)->base + TEGRA241_CMDQV_##_regname) +#define REG_VINTF(_vintf, _regname) \ + ((_vintf)->base + TEGRA241_VINTF_##_regname) +#define REG_VCMDQ_PAGE0(_vcmdq, _regname) \ + ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname) +#define REG_VCMDQ_PAGE1(_vcmdq, _regname) \ + ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname) + + +static bool disable_cmdqv; +module_param(disable_cmdqv, bool, 0444); +MODULE_PARM_DESC(disable_cmdqv, + "This allows to disable CMDQV HW and use default SMMU internal CMDQ."); + +static bool bypass_vcmdq; +module_param(bypass_vcmdq, bool, 0444); +MODULE_PARM_DESC(bypass_vcmdq, + "This allows to bypass VCMDQ for debugging use or perf comparison."); + +/** + * struct tegra241_vcmdq - Virtual Command Queue + * @idx: Global index in the CMDQV + * @lidx: Local index in the VINTF + * @enabled: Enable status + * @cmdqv: Parent CMDQV pointer + * @vintf: Parent VINTF pointer + * @cmdq: Command Queue struct + * @page0: MMIO Page0 base address + * @page1: MMIO Page1 base address + */ +struct tegra241_vcmdq { + u16 idx; + u16 lidx; + + bool enabled; + + struct tegra241_cmdqv *cmdqv; + struct tegra241_vintf *vintf; + struct arm_smmu_cmdq cmdq; + + void __iomem *page0; + void __iomem *page1; +}; + +/** + * struct tegra241_vintf - Virtual Interface + * @idx: Global index in the CMDQV + * @enabled: Enable status + * @hyp_own: Owned by hypervisor (in-kernel) + * @cmdqv: Parent CMDQV pointer + * @lvcmdqs: List of logical VCMDQ pointers + * @base: MMIO base address + */ +struct tegra241_vintf { + u16 idx; + + bool enabled; + bool hyp_own; + + struct tegra241_cmdqv *cmdqv; + struct tegra241_vcmdq **lvcmdqs; + + void __iomem *base; +}; + +/** + * struct tegra241_cmdqv - CMDQ-V for SMMUv3 + * @smmu: SMMUv3 device + * @dev: CMDQV device + * @base: MMIO base address + * @irq: IRQ number + * @num_vintfs: Total number of VINTFs + * @num_vcmdqs: Total number of VCMDQs + * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF + * @vintf_ids: VINTF id allocator + * @vintfs: List of VINTFs + */ +struct tegra241_cmdqv { + struct arm_smmu_device smmu; + struct device *dev; + + void __iomem *base; + int irq; + + /* CMDQV Hardware Params */ + u16 num_vintfs; + u16 num_vcmdqs; + u16 num_lvcmdqs_per_vintf; + + struct ida vintf_ids; + + struct tegra241_vintf **vintfs; +}; + +/* Config and Polling Helpers */ + +static inline int tegra241_cmdqv_write_config(struct tegra241_cmdqv *cmdqv, + void __iomem *addr_config, + void __iomem *addr_status, + u32 regval, const char *header, + bool *out_enabled) +{ + bool en = regval & BIT(0); + int ret; + + writel(regval, addr_config); + ret = readl_poll_timeout(addr_status, regval, + en ? regval & BIT(0) : !(regval & BIT(0)), + 1, ARM_SMMU_POLL_TIMEOUT_US); + if (ret) + dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n", + header, en ? "en" : "dis", regval); + if (out_enabled) + WRITE_ONCE(*out_enabled, regval & BIT(0)); + return ret; +} + +static inline int cmdqv_write_config(struct tegra241_cmdqv *cmdqv, u32 regval) +{ + return tegra241_cmdqv_write_config(cmdqv, + REG_CMDQV(cmdqv, CONFIG), + REG_CMDQV(cmdqv, STATUS), + regval, "CMDQV: ", NULL); +} + +static inline int vintf_write_config(struct tegra241_vintf *vintf, u32 regval) +{ + char header[16]; + + snprintf(header, 16, "VINTF%u: ", vintf->idx); + return tegra241_cmdqv_write_config(vintf->cmdqv, + REG_VINTF(vintf, CONFIG), + REG_VINTF(vintf, STATUS), + regval, header, &vintf->enabled); +} + +static inline char *lvcmdq_error_header(struct tegra241_vcmdq *vcmdq, + char *header, int hlen) +{ + WARN_ON(hlen < 64); + if (WARN_ON(!vcmdq->vintf)) + return ""; + snprintf(header, hlen, "VINTF%u: VCMDQ%u/LVCMDQ%u: ", + vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx); + return header; +} + +static inline int vcmdq_write_config(struct tegra241_vcmdq *vcmdq, u32 regval) +{ + char header[64], *h = lvcmdq_error_header(vcmdq, header, 64); + + return tegra241_cmdqv_write_config(vcmdq->cmdqv, + REG_VCMDQ_PAGE0(vcmdq, CONFIG), + REG_VCMDQ_PAGE0(vcmdq, STATUS), + regval, h, &vcmdq->enabled); +} + +/* ISR Functions */ + +static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf) +{ + int i; + + for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) { + u64 map = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i))); + + while (map) { + unsigned long lidx = __ffs64(map); + struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; + u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)); + + __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq); + writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN)); + map &= ~BIT_ULL(lidx); + } + } +} + +static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid) +{ + struct tegra241_cmdqv *cmdqv = (struct tegra241_cmdqv *)devid; + void __iomem *reg_vintf_map = REG_CMDQV(cmdqv, VINTF_ERR_MAP); + char err_str[256]; + u64 vintf_map; + + /* Use readl_relaxed() as register addresses are not 64-bit aligned */ + vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 | + (u64)readl_relaxed(reg_vintf_map); + + snprintf(err_str, sizeof(err_str), + "vintf_map: %016llx, vcmdq_map %08x:%08x:%08x:%08x", vintf_map, + readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(3))), + readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(2))), + readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(1))), + readl_relaxed(REG_CMDQV(cmdqv, CMDQ_ERR_MAP(0)))); + + dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str); + + /* Handle VINTF0 and its LVCMDQs */ + if (vintf_map & BIT_ULL(0)) { + tegra241_vintf0_handle_error(cmdqv->vintfs[0]); + vintf_map &= ~BIT_ULL(0); + } + + return IRQ_HANDLED; +} + +/* Command Queue Function */ + +static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent) +{ + switch (ent->opcode) { + case CMDQ_OP_TLBI_NH_ASID: + case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_ATC_INV: + return true; + default: + return false; + } +} + +static struct arm_smmu_cmdq * +tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) +{ + struct tegra241_cmdqv *cmdqv = + container_of(smmu, struct tegra241_cmdqv, smmu); + struct tegra241_vintf *vintf = cmdqv->vintfs[0]; + struct tegra241_vcmdq *vcmdq; + u16 lidx; + + if (READ_ONCE(bypass_vcmdq)) + return NULL; + + /* Use SMMU CMDQ if VINTF0 is uninitialized */ + if (!READ_ONCE(vintf->enabled)) + return NULL; + + /* + * Select a LVCMDQ to use. Here we use a temporal solution to + * balance out traffic on cmdq issuing: each cmdq has its own + * lock, if all cpus issue cmdlist using the same cmdq, only + * one CPU at a time can enter the process, while the others + * will be spinning at the same lock. + */ + lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf; + vcmdq = vintf->lvcmdqs[lidx]; + if (!vcmdq || !READ_ONCE(vcmdq->enabled)) + return NULL; + + /* Unsupported CMD goes for smmu->cmdq pathway */ + if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) + return NULL; + return &vcmdq->cmdq; +} + +/* HW Reset Functions */ + +static void tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq) +{ + char header[64], *h = lvcmdq_error_header(vcmdq, header, 64); + u32 gerrorn, gerror; + + if (vcmdq_write_config(vcmdq, 0)) { + dev_err(vcmdq->cmdqv->dev, + "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h, + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)), + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)), + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS))); + } + writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, PROD)); + writel_relaxed(0, REG_VCMDQ_PAGE0(vcmdq, CONS)); + writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, BASE)); + writeq_relaxed(0, REG_VCMDQ_PAGE1(vcmdq, CONS_INDX_BASE)); + + gerrorn = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)); + gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)); + if (gerror != gerrorn) { + dev_warn(vcmdq->cmdqv->dev, + "%suncleared error detected, resetting\n", h); + writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN)); + } + + dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h); +} + +static int tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq) +{ + char header[64], *h = lvcmdq_error_header(vcmdq, header, 64); + int ret; + + /* Reset VCMDQ */ + tegra241_vcmdq_hw_deinit(vcmdq); + + /* Configure and enable VCMDQ */ + writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE)); + + ret = vcmdq_write_config(vcmdq, VCMDQ_EN); + if (ret) { + dev_err(vcmdq->cmdqv->dev, + "%sGERRORN=0x%X, GERROR=0x%X, CONS=0x%X\n", h, + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERRORN)), + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR)), + readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, CONS))); + return ret; + } + + dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h); + return 0; +} + +static void tegra241_vintf_hw_deinit(struct tegra241_vintf *vintf) +{ + u16 lidx; + + for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) + if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) + tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]); + vintf_write_config(vintf, 0); +} + +static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own) +{ + u32 regval; + u16 lidx; + int ret; + + /* Reset VINTF */ + tegra241_vintf_hw_deinit(vintf); + + /* Configure and enable VINTF */ + /* + * Note that HYP_OWN bit is wired to zero when running in guest kernel, + * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a + * restricted set of supported commands. + */ + regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own); + writel(regval, REG_VINTF(vintf, CONFIG)); + + ret = vintf_write_config(vintf, regval | VINTF_EN); + if (ret) + return ret; + /* + * As being mentioned above, HYP_OWN bit is wired to zero for a guest + * kernel, so read it back from HW to ensure that reflects in hyp_own + */ + vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG))); + + for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) { + if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) { + ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]); + if (ret) { + tegra241_vintf_hw_deinit(vintf); + return ret; + } + } + } + + return 0; +} + +static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu) +{ + struct tegra241_cmdqv *cmdqv = + container_of(smmu, struct tegra241_cmdqv, smmu); + u16 qidx, lidx, idx; + u32 regval; + int ret; + + /* Reset CMDQV */ + regval = readl_relaxed(REG_CMDQV(cmdqv, CONFIG)); + ret = cmdqv_write_config(cmdqv, regval & ~CMDQV_EN); + if (ret) + return ret; + ret = cmdqv_write_config(cmdqv, regval | CMDQV_EN); + if (ret) + return ret; + + /* Assign preallocated global VCMDQs to each VINTF as LVCMDQs */ + for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) { + for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) { + regval = FIELD_PREP(CMDQV_CMDQ_ALLOC_VINTF, idx); + regval |= FIELD_PREP(CMDQV_CMDQ_ALLOC_LVCMDQ, lidx); + regval |= CMDQV_CMDQ_ALLOCATED; + writel_relaxed(regval, + REG_CMDQV(cmdqv, CMDQ_ALLOC(qidx++))); + } + } + + return tegra241_vintf_hw_init(cmdqv->vintfs[0], true); +} + +/* VCMDQ Resource Helpers */ + +static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq) +{ + struct arm_smmu_queue *q = &vcmdq->cmdq.q; + size_t nents = 1 << q->llq.max_n_shift; + size_t qsz = nents << CMDQ_ENT_SZ_SHIFT; + + if (!q->base) + return; + dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma); +} + +static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) +{ + struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu; + struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq; + struct arm_smmu_queue *q = &cmdq->q; + char name[16]; + int ret; + + snprintf(name, 16, "vcmdq%u", vcmdq->idx); + + q->llq.max_n_shift = VCMDQ_LOG2SIZE_MAX; + + /* Use the common helper to init the VCMDQ, and then... */ + ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0, + TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS, + CMDQ_ENT_DWORDS, name); + if (ret) + return ret; + + /* ...override q_base to write VCMDQ_BASE registers */ + q->q_base = q->base_dma & VCMDQ_ADDR; + q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift); + + if (!vcmdq->vintf->hyp_own) + cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd; + + return arm_smmu_cmdq_init(smmu, cmdq); +} + +/* VINTF Logical VCMDQ Resource Helpers */ + +static void tegra241_vintf_deinit_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) +{ + vintf->lvcmdqs[lidx] = NULL; +} + +static int tegra241_vintf_init_lvcmdq(struct tegra241_vintf *vintf, u16 lidx, + struct tegra241_vcmdq *vcmdq) +{ + struct tegra241_cmdqv *cmdqv = vintf->cmdqv; + u16 idx = vintf->idx; + + vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx; + vcmdq->lidx = lidx; + vcmdq->cmdqv = cmdqv; + vcmdq->vintf = vintf; + vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx); + vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx); + + vintf->lvcmdqs[lidx] = vcmdq; + return 0; +} + +static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) +{ + struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; + char header[64]; + + tegra241_vcmdq_free_smmu_cmdq(vcmdq); + tegra241_vintf_deinit_lvcmdq(vintf, lidx); + + dev_dbg(vintf->cmdqv->dev, + "%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64)); + kfree(vcmdq); +} + +static struct tegra241_vcmdq * +tegra241_vintf_alloc_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) +{ + struct tegra241_cmdqv *cmdqv = vintf->cmdqv; + struct tegra241_vcmdq *vcmdq; + char header[64]; + int ret; + + vcmdq = kzalloc(sizeof(*vcmdq), GFP_KERNEL); + if (!vcmdq) + return ERR_PTR(-ENOMEM); + + ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq); + if (ret) + goto free_vcmdq; + + /* Build an arm_smmu_cmdq for each LVCMDQ */ + ret = tegra241_vcmdq_alloc_smmu_cmdq(vcmdq); + if (ret) + goto deinit_lvcmdq; + + dev_dbg(cmdqv->dev, + "%sallocated\n", lvcmdq_error_header(vcmdq, header, 64)); + return vcmdq; + +deinit_lvcmdq: + tegra241_vintf_deinit_lvcmdq(vintf, lidx); +free_vcmdq: + kfree(vcmdq); + return ERR_PTR(ret); +} + +/* VINTF Resource Helpers */ + +static void tegra241_cmdqv_deinit_vintf(struct tegra241_cmdqv *cmdqv, u16 idx) +{ + kfree(cmdqv->vintfs[idx]->lvcmdqs); + ida_free(&cmdqv->vintf_ids, idx); + cmdqv->vintfs[idx] = NULL; +} + +static int tegra241_cmdqv_init_vintf(struct tegra241_cmdqv *cmdqv, u16 max_idx, + struct tegra241_vintf *vintf) +{ + + u16 idx; + int ret; + + ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL); + if (ret < 0) + return ret; + idx = ret; + + vintf->idx = idx; + vintf->cmdqv = cmdqv; + vintf->base = cmdqv->base + TEGRA241_VINTF(idx); + + vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf, + sizeof(*vintf->lvcmdqs), GFP_KERNEL); + if (!vintf->lvcmdqs) { + ida_free(&cmdqv->vintf_ids, idx); + return -ENOMEM; + } + + cmdqv->vintfs[idx] = vintf; + return ret; +} + +/* Remove Helpers */ + +static void tegra241_vintf_remove_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) +{ + tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]); + tegra241_vintf_free_lvcmdq(vintf, lidx); +} + +static void tegra241_cmdqv_remove_vintf(struct tegra241_cmdqv *cmdqv, u16 idx) +{ + struct tegra241_vintf *vintf = cmdqv->vintfs[idx]; + u16 lidx; + + /* Remove LVCMDQ resources */ + for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) + if (vintf->lvcmdqs[lidx]) + tegra241_vintf_remove_lvcmdq(vintf, lidx); + + /* Remove VINTF resources */ + tegra241_vintf_hw_deinit(vintf); + + dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx); + tegra241_cmdqv_deinit_vintf(cmdqv, idx); + kfree(vintf); +} + +static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu) +{ + struct tegra241_cmdqv *cmdqv = + container_of(smmu, struct tegra241_cmdqv, smmu); + u16 idx; + + /* Remove VINTF resources */ + for (idx = 0; idx < cmdqv->num_vintfs; idx++) { + if (cmdqv->vintfs[idx]) { + /* Only vintf0 should remain at this stage */ + WARN_ON(idx > 0); + tegra241_cmdqv_remove_vintf(cmdqv, idx); + } + } + + /* Remove cmdqv resources */ + ida_destroy(&cmdqv->vintf_ids); + + if (cmdqv->irq > 0) + free_irq(cmdqv->irq, cmdqv); + iounmap(cmdqv->base); + kfree(cmdqv->vintfs); + put_device(cmdqv->dev); /* smmu->impl_dev */ +} + +static struct arm_smmu_impl_ops tegra241_cmdqv_impl_ops = { + .get_secondary_cmdq = tegra241_cmdqv_get_cmdq, + .device_reset = tegra241_cmdqv_hw_reset, + .device_remove = tegra241_cmdqv_remove, +}; + +/* Probe Functions */ + +static int tegra241_cmdqv_acpi_is_memory(struct acpi_resource *res, void *data) +{ + struct resource_win win; + + return !acpi_dev_resource_address_space(res, &win); +} + +static int tegra241_cmdqv_acpi_get_irqs(struct acpi_resource *ares, void *data) +{ + struct resource r; + int *irq = data; + + if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r)) + *irq = r.start; + return 1; /* No need to add resource to the list */ +} + +static struct resource * +tegra241_cmdqv_find_acpi_resource(struct device *dev, int *irq) +{ + struct acpi_device *adev = to_acpi_device(dev); + struct list_head resource_list; + struct resource_entry *rentry; + struct resource *res = NULL; + int ret; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, + tegra241_cmdqv_acpi_is_memory, NULL); + if (ret < 0) { + dev_err(dev, "failed to get memory resource: %d\n", ret); + return NULL; + } + + rentry = list_first_entry_or_null(&resource_list, + struct resource_entry, node); + if (!rentry) { + dev_err(dev, "failed to get memory resource entry\n"); + goto free_list; + } + + /* Caller must free the res */ + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + goto free_list; + + *res = *rentry->res; + + acpi_dev_free_resource_list(&resource_list); + + INIT_LIST_HEAD(&resource_list); + + if (irq) + ret = acpi_dev_get_resources(adev, &resource_list, + tegra241_cmdqv_acpi_get_irqs, irq); + if (ret < 0 || !irq || *irq <= 0) + dev_warn(dev, "no interrupt. errors will not be reported\n"); + +free_list: + acpi_dev_free_resource_list(&resource_list); + return res; +} + +static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) +{ + struct tegra241_cmdqv *cmdqv = + container_of(smmu, struct tegra241_cmdqv, smmu); + struct tegra241_vintf *vintf; + int lidx; + int ret; + + vintf = kzalloc(sizeof(*vintf), GFP_KERNEL); + if (!vintf) + goto out_fallback; + + /* Init VINTF0 for in-kernel use */ + ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf); + if (ret) { + dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret); + goto free_vintf; + } + + /* Preallocate logical VCMDQs to VINTF0 */ + for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) { + struct tegra241_vcmdq *vcmdq; + + vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx); + if (IS_ERR(vcmdq)) + goto free_lvcmdq; + } + + /* Now, we are ready to run all the impl ops */ + smmu->impl_ops = &tegra241_cmdqv_impl_ops; + return 0; + +free_lvcmdq: + for (lidx--; lidx >= 0; lidx--) + tegra241_vintf_free_lvcmdq(vintf, lidx); + tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx); +free_vintf: + kfree(vintf); +out_fallback: + dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n"); + smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV; + tegra241_cmdqv_remove(smmu); + return 0; +} + +struct dentry *cmdqv_debugfs_dir; + +static struct arm_smmu_device * +__tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res, + int irq) +{ + static const struct arm_smmu_impl_ops init_ops = { + .init_structures = tegra241_cmdqv_init_structures, + .device_remove = tegra241_cmdqv_remove, + }; + struct tegra241_cmdqv *cmdqv = NULL; + struct arm_smmu_device *new_smmu; + void __iomem *base; + u32 regval; + int ret; + + static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0); + + base = ioremap(res->start, resource_size(res)); + if (!base) { + dev_err(smmu->dev, "failed to ioremap\n"); + return NULL; + } + + regval = readl(base + TEGRA241_CMDQV_CONFIG); + if (disable_cmdqv) { + dev_info(smmu->dev, "Detected disable_cmdqv=true\n"); + writel(regval & ~CMDQV_EN, base + TEGRA241_CMDQV_CONFIG); + goto iounmap; + } + + cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL); + if (!cmdqv) + goto iounmap; + new_smmu = &cmdqv->smmu; + + cmdqv->irq = irq; + cmdqv->base = base; + cmdqv->dev = smmu->impl_dev; + + if (cmdqv->irq > 0) { + ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv", + cmdqv); + if (ret) { + dev_err(cmdqv->dev, "failed to request irq (%d): %d\n", + cmdqv->irq, ret); + goto iounmap; + } + } + + regval = readl_relaxed(REG_CMDQV(cmdqv, PARAM)); + cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval); + cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval); + cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs; + + cmdqv->vintfs = + kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL); + if (!cmdqv->vintfs) + goto free_irq; + + ida_init(&cmdqv->vintf_ids); + +#ifdef CONFIG_IOMMU_DEBUGFS + if (!cmdqv_debugfs_dir) { + cmdqv_debugfs_dir = + debugfs_create_dir("tegra241_cmdqv", iommu_debugfs_dir); + debugfs_create_bool("bypass_vcmdq", 0644, cmdqv_debugfs_dir, + &bypass_vcmdq); + } +#endif + + /* Provide init-level ops only, until tegra241_cmdqv_init_structures */ + new_smmu->impl_ops = &init_ops; + + return new_smmu; + +free_irq: + if (cmdqv->irq > 0) + free_irq(cmdqv->irq, cmdqv); +iounmap: + iounmap(base); + return NULL; +} + +struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu) +{ + struct arm_smmu_device *new_smmu; + struct resource *res = NULL; + int irq; + + if (!smmu->dev->of_node) + res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq); + if (!res) + goto out_fallback; + + new_smmu = __tegra241_cmdqv_probe(smmu, res, irq); + kfree(res); + + if (new_smmu) + return new_smmu; + +out_fallback: + dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n"); + smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV; + put_device(smmu->impl_dev); + return ERR_PTR(-ENODEV); +} diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 36c6b36ad4ff74..6372f3e25c4bc2 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -282,6 +282,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) u32 smr; int i; + /* + * MSM8998 LPASS SMMU reports 13 context banks, but accessing + * the last context bank crashes the system. + */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && + smmu->num_context_banks == 13) { + smmu->num_context_banks = 12; + } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) { + if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */ + smmu->num_context_banks = 7; + else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */ + smmu->num_context_banks = 13; + } + /* * Some platforms support more than the Arm SMMU architected maximum of * 128 stream matching groups. For unknown reasons, the additional @@ -338,6 +352,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) return 0; } +static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu) +{ + /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */ + smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K; + + /* TZ protects several last context banks, hide them from Linux */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") && + smmu->num_context_banks == 5) + smmu->num_context_banks = 2; + + return 0; +} + static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; @@ -436,6 +463,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = { static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { .init_context = qcom_adreno_smmu_init_context, + .cfg_probe = qcom_adreno_smmuv2_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 723273440c2118..8321962b37148b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -417,7 +417,7 @@ void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx, void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx, const struct arm_smmu_context_fault_info *cfi) { - dev_dbg(smmu->dev, + dev_err(smmu->dev, "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 7b1dfa0665df60..2a9fa0c8cc00fe 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1037,9 +1038,23 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, return NULL; } -static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, - size_t size, enum dma_data_direction dir, gfp_t gfp, - unsigned long attrs) +/* + * This is the actual return value from the iommu_dma_alloc_noncontiguous. + * + * The users of the DMA API should only care about the sg_table, but to make + * the DMA-API internal vmaping and freeing easier we stash away the page + * array as well (except for the fallback case). This can go away any time, + * e.g. when a vmap-variant that takes a scatterlist comes along. + */ +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; +#define sgt_handle(sgt) \ + container_of((sgt), struct dma_sgt_handle, sgt) + +struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) { struct dma_sgt_handle *sh; @@ -1055,7 +1070,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, return &sh->sgt; } -static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, +void iommu_dma_free_noncontiguous(struct device *dev, size_t size, struct sg_table *sgt, enum dma_data_direction dir) { struct dma_sgt_handle *sh = sgt_handle(sgt); @@ -1066,8 +1081,26 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, kfree(sh); } -static void iommu_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); +} + +int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt) +{ + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) + return -ENXIO; + return vm_map_pages(vma, sgt_handle(sgt)->pages, count); +} + +void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir) { phys_addr_t phys; @@ -1081,8 +1114,8 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, swiotlb_sync_single_for_cpu(dev, phys, size, dir); } -static void iommu_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir) { phys_addr_t phys; @@ -1096,9 +1129,8 @@ static void iommu_dma_sync_single_for_device(struct device *dev, arch_sync_dma_for_device(phys, size, dir); } -static void iommu_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) +void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; @@ -1112,9 +1144,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); } -static void iommu_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) +void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; @@ -1129,9 +1160,9 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); } -static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) { phys_addr_t phys = page_to_phys(page) + offset; bool coherent = dev_is_dma_coherent(dev); @@ -1189,7 +1220,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, return iova; } -static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, +void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); @@ -1342,8 +1373,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, * impedance-matching, to be able to hand off a suitably-aligned list, * but still preserve the original offsets and sizes for the caller. */ -static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -1462,8 +1493,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, return ret; } -static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) { dma_addr_t end = 0, start; struct scatterlist *tmp; @@ -1512,7 +1543,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, __iommu_dma_unmap(dev, start, end - start); } -static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, +dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, @@ -1520,7 +1551,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, dma_get_mask(dev)); } -static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, +void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { __iommu_dma_unmap(dev, handle, size); @@ -1557,7 +1588,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) dma_free_contiguous(dev, page, alloc_size); } -static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, +void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __iommu_dma_unmap(dev, handle, size); @@ -1601,8 +1632,8 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, return NULL; } -static void *iommu_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs) +void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, unsigned long attrs) { bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); @@ -1635,7 +1666,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, return cpu_addr; } -static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, +int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { @@ -1666,7 +1697,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot); } -static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, +int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { @@ -1693,19 +1724,19 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, return ret; } -static unsigned long iommu_dma_get_merge_boundary(struct device *dev) +unsigned long iommu_dma_get_merge_boundary(struct device *dev) { struct iommu_domain *domain = iommu_get_dma_domain(dev); return (1UL << __ffs(domain->pgsize_bitmap)) - 1; } -static size_t iommu_dma_opt_mapping_size(void) +size_t iommu_dma_opt_mapping_size(void) { return iova_rcache_range(); } -static size_t iommu_dma_max_mapping_size(struct device *dev) +size_t iommu_dma_max_mapping_size(struct device *dev) { if (dev_is_untrusted(dev)) return swiotlb_max_mapping_size(dev); @@ -1713,32 +1744,6 @@ static size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } -static const struct dma_map_ops iommu_dma_ops = { - .flags = DMA_F_PCI_P2PDMA_SUPPORTED | - DMA_F_CAN_SKIP_SYNC, - .alloc = iommu_dma_alloc, - .free = iommu_dma_free, - .alloc_pages_op = dma_common_alloc_pages, - .free_pages = dma_common_free_pages, - .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, - .free_noncontiguous = iommu_dma_free_noncontiguous, - .mmap = iommu_dma_mmap, - .get_sgtable = iommu_dma_get_sgtable, - .map_page = iommu_dma_map_page, - .unmap_page = iommu_dma_unmap_page, - .map_sg = iommu_dma_map_sg, - .unmap_sg = iommu_dma_unmap_sg, - .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, - .sync_single_for_device = iommu_dma_sync_single_for_device, - .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, - .sync_sg_for_device = iommu_dma_sync_sg_for_device, - .map_resource = iommu_dma_map_resource, - .unmap_resource = iommu_dma_unmap_resource, - .get_merge_boundary = iommu_dma_get_merge_boundary, - .opt_mapping_size = iommu_dma_opt_mapping_size, - .max_mapping_size = iommu_dma_max_mapping_size, -}; - void iommu_setup_dma_ops(struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); @@ -1746,19 +1751,15 @@ void iommu_setup_dma_ops(struct device *dev) if (dev_is_pci(dev)) dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; - if (iommu_is_dma_domain(domain)) { - if (iommu_dma_init_domain(domain, dev)) - goto out_err; - dev->dma_ops = &iommu_dma_ops; - } else if (dev->dma_ops == &iommu_dma_ops) { - /* Clean up if we've switched *from* a DMA domain */ - dev->dma_ops = NULL; - } + dev->dma_iommu = iommu_is_dma_domain(domain); + if (dev->dma_iommu && iommu_dma_init_domain(domain, dev)) + goto out_err; return; out_err: - pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", - dev_name(dev)); + pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", + dev_name(dev)); + dev->dma_iommu = false; } static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index e9d2bff4659b7c..30be786bff11e6 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -416,14 +416,12 @@ static struct iommu_group *fsl_pamu_device_group(struct device *dev) static struct iommu_device *fsl_pamu_probe_device(struct device *dev) { - int len; - /* * uboot must fill the fsl,liodn for platform devices to be supported by * the iommu. */ if (!dev_is_pci(dev) && - !of_get_property(dev->of_node, "fsl,liodn", &len)) + !of_property_present(dev->of_node, "fsl,liodn")) return ERR_PTR(-ENODEV); return &pamu_iommu; diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index f52fb39c968eb1..88fd32a9323c5a 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -12,7 +12,6 @@ config DMAR_DEBUG config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" depends on PCI_MSI && ACPI && X86 - select DMA_OPS select IOMMU_API select IOMMU_IOVA select IOMMUFD_DRIVER if IOMMUFD diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index 44e92638c0cd17..e5b89f728ad3b2 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -190,6 +190,13 @@ int cache_tag_assign_domain(struct dmar_domain *domain, u16 did = domain_get_id_for_dev(domain, dev); int ret; + /* domain->qi_bach will be freed in iommu_free_domain() path. */ + if (!domain->qi_batch) { + domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL); + if (!domain->qi_batch) + return -ENOMEM; + } + ret = __cache_tag_assign_domain(domain, did, dev, pasid); if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED) return ret; @@ -255,6 +262,154 @@ static unsigned long calculate_psi_aligned_address(unsigned long start, return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask); } +static void qi_batch_flush_descs(struct intel_iommu *iommu, struct qi_batch *batch) +{ + if (!iommu || !batch->index) + return; + + qi_submit_sync(iommu, batch->descs, batch->index, 0); + + /* Reset the index value and clean the whole batch buffer. */ + memset(batch, 0, sizeof(*batch)); +} + +static void qi_batch_increment_index(struct intel_iommu *iommu, struct qi_batch *batch) +{ + if (++batch->index == QI_MAX_BATCHED_DESC_COUNT) + qi_batch_flush_descs(iommu, batch); +} + +static void qi_batch_add_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + struct qi_batch *batch) +{ + qi_desc_iotlb(iommu, did, addr, size_order, type, &batch->descs[batch->index]); + qi_batch_increment_index(iommu, batch); +} + +static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned int mask, + struct qi_batch *batch) +{ + /* + * According to VT-d spec, software is recommended to not submit any Device-TLB + * invalidation requests while address remapping hardware is disabled. + */ + if (!(iommu->gcmd & DMA_GCMD_TE)) + return; + + qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &batch->descs[batch->index]); + qi_batch_increment_index(iommu, batch); +} + +static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, + u64 addr, unsigned long npages, bool ih, + struct qi_batch *batch) +{ + /* + * npages == -1 means a PASID-selective invalidation, otherwise, + * a positive value for Page-selective-within-PASID invalidation. + * 0 is not a valid input. + */ + if (!npages) + return; + + qi_desc_piotlb(did, pasid, addr, npages, ih, &batch->descs[batch->index]); + qi_batch_increment_index(iommu, batch); +} + +static void qi_batch_add_pasid_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u32 pasid, u16 qdep, u64 addr, + unsigned int size_order, struct qi_batch *batch) +{ + /* + * According to VT-d spec, software is recommended to not submit any + * Device-TLB invalidation requests while address remapping hardware + * is disabled. + */ + if (!(iommu->gcmd & DMA_GCMD_TE)) + return; + + qi_desc_dev_iotlb_pasid(sid, pfsid, pasid, qdep, addr, size_order, + &batch->descs[batch->index]); + qi_batch_increment_index(iommu, batch); +} + +static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag, + unsigned long addr, unsigned long pages, + unsigned long mask, int ih) +{ + struct intel_iommu *iommu = tag->iommu; + u64 type = DMA_TLB_PSI_FLUSH; + + if (domain->use_first_level) { + qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr, + pages, ih, domain->qi_batch); + return; + } + + /* + * Fallback to domain selective flush if no PSI support or the size + * is too big. + */ + if (!cap_pgsel_inv(iommu->cap) || + mask > cap_max_amask_val(iommu->cap) || pages == -1) { + addr = 0; + mask = 0; + ih = 0; + type = DMA_TLB_DSI_FLUSH; + } + + if (ecap_qis(iommu->ecap)) + qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih, mask, type, + domain->qi_batch); + else + __iommu_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type); +} + +static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_tag *tag, + unsigned long addr, unsigned long mask) +{ + struct intel_iommu *iommu = tag->iommu; + struct device_domain_info *info; + u16 sid; + + info = dev_iommu_priv_get(tag->dev); + sid = PCI_DEVID(info->bus, info->devfn); + + if (tag->pasid == IOMMU_NO_PASID) { + qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, + addr, mask, domain->qi_batch); + if (info->dtlb_extra_inval) + qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, + addr, mask, domain->qi_batch); + return; + } + + qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid, + info->ats_qdep, addr, mask, domain->qi_batch); + if (info->dtlb_extra_inval) + qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid, + info->ats_qdep, addr, mask, + domain->qi_batch); +} + +static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag) +{ + struct intel_iommu *iommu = tag->iommu; + struct device_domain_info *info; + u16 sid; + + info = dev_iommu_priv_get(tag->dev); + sid = PCI_DEVID(info->bus, info->devfn); + + qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, + MAX_AGAW_PFN_WIDTH, domain->qi_batch); + if (info->dtlb_extra_inval) + qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, + MAX_AGAW_PFN_WIDTH, domain->qi_batch); +} + /* * Invalidates a range of IOVA from @start (inclusive) to @end (inclusive) * when the memory mappings in the target domain have been modified. @@ -262,6 +417,7 @@ static unsigned long calculate_psi_aligned_address(unsigned long start, void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, unsigned long end, int ih) { + struct intel_iommu *iommu = NULL; unsigned long pages, mask, addr; struct cache_tag *tag; unsigned long flags; @@ -270,30 +426,14 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, spin_lock_irqsave(&domain->cache_lock, flags); list_for_each_entry(tag, &domain->cache_tags, node) { - struct intel_iommu *iommu = tag->iommu; - struct device_domain_info *info; - u16 sid; + if (iommu && iommu != tag->iommu) + qi_batch_flush_descs(iommu, domain->qi_batch); + iommu = tag->iommu; switch (tag->type) { case CACHE_TAG_IOTLB: case CACHE_TAG_NESTING_IOTLB: - if (domain->use_first_level) { - qi_flush_piotlb(iommu, tag->domain_id, - tag->pasid, addr, pages, ih); - } else { - /* - * Fallback to domain selective flush if no - * PSI support or the size is too big. - */ - if (!cap_pgsel_inv(iommu->cap) || - mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, tag->domain_id, - 0, 0, DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, tag->domain_id, - addr | ih, mask, - DMA_TLB_PSI_FLUSH); - } + cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih); break; case CACHE_TAG_NESTING_DEVTLB: /* @@ -307,23 +447,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, mask = MAX_AGAW_PFN_WIDTH; fallthrough; case CACHE_TAG_DEVTLB: - info = dev_iommu_priv_get(tag->dev); - sid = PCI_DEVID(info->bus, info->devfn); - - if (tag->pasid == IOMMU_NO_PASID) - qi_flush_dev_iotlb(iommu, sid, info->pfsid, - info->ats_qdep, addr, mask); - else - qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, - tag->pasid, info->ats_qdep, - addr, mask); - - quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep); + cache_tag_flush_devtlb_psi(domain, tag, addr, mask); break; } trace_cache_tag_flush_range(tag, start, end, addr, pages, mask); } + qi_batch_flush_descs(iommu, domain->qi_batch); spin_unlock_irqrestore(&domain->cache_lock, flags); } @@ -333,39 +463,30 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, */ void cache_tag_flush_all(struct dmar_domain *domain) { + struct intel_iommu *iommu = NULL; struct cache_tag *tag; unsigned long flags; spin_lock_irqsave(&domain->cache_lock, flags); list_for_each_entry(tag, &domain->cache_tags, node) { - struct intel_iommu *iommu = tag->iommu; - struct device_domain_info *info; - u16 sid; + if (iommu && iommu != tag->iommu) + qi_batch_flush_descs(iommu, domain->qi_batch); + iommu = tag->iommu; switch (tag->type) { case CACHE_TAG_IOTLB: case CACHE_TAG_NESTING_IOTLB: - if (domain->use_first_level) - qi_flush_piotlb(iommu, tag->domain_id, - tag->pasid, 0, -1, 0); - else - iommu->flush.flush_iotlb(iommu, tag->domain_id, - 0, 0, DMA_TLB_DSI_FLUSH); + cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0); break; case CACHE_TAG_DEVTLB: case CACHE_TAG_NESTING_DEVTLB: - info = dev_iommu_priv_get(tag->dev); - sid = PCI_DEVID(info->bus, info->devfn); - - qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, - 0, MAX_AGAW_PFN_WIDTH); - quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, - IOMMU_NO_PASID, info->ats_qdep); + cache_tag_flush_devtlb_all(domain, tag); break; } trace_cache_tag_flush_all(tag); } + qi_batch_flush_descs(iommu, domain->qi_batch); spin_unlock_irqrestore(&domain->cache_lock, flags); } @@ -383,6 +504,7 @@ void cache_tag_flush_all(struct dmar_domain *domain) void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, unsigned long end) { + struct intel_iommu *iommu = NULL; unsigned long pages, mask, addr; struct cache_tag *tag; unsigned long flags; @@ -391,7 +513,9 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, spin_lock_irqsave(&domain->cache_lock, flags); list_for_each_entry(tag, &domain->cache_tags, node) { - struct intel_iommu *iommu = tag->iommu; + if (iommu && iommu != tag->iommu) + qi_batch_flush_descs(iommu, domain->qi_batch); + iommu = tag->iommu; if (!cap_caching_mode(iommu->cap) || domain->use_first_level) { iommu_flush_write_buffer(iommu); @@ -399,22 +523,11 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, } if (tag->type == CACHE_TAG_IOTLB || - tag->type == CACHE_TAG_NESTING_IOTLB) { - /* - * Fallback to domain selective flush if no - * PSI support or the size is too big. - */ - if (!cap_pgsel_inv(iommu->cap) || - mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, tag->domain_id, - 0, 0, DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, tag->domain_id, - addr, mask, - DMA_TLB_PSI_FLUSH); - } + tag->type == CACHE_TAG_NESTING_IOTLB) + cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0); trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask); } + qi_batch_flush_descs(iommu, domain->qi_batch); spin_unlock_irqrestore(&domain->cache_lock, flags); } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 1c8d3141cb55c0..eaf862e8dea1a9 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1204,9 +1204,7 @@ static void free_iommu(struct intel_iommu *iommu) */ static inline void reclaim_free_desc(struct q_inval *qi) { - while (qi->desc_status[qi->free_tail] == QI_DONE || - qi->desc_status[qi->free_tail] == QI_ABORT) { - qi->desc_status[qi->free_tail] = QI_FREE; + while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) { qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; qi->free_cnt++; } @@ -1463,8 +1461,16 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, raw_spin_lock(&qi->q_lock); } - for (i = 0; i < count; i++) - qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE; + /* + * The reclaim code can free descriptors from multiple submissions + * starting from the tail of the queue. When count == 0, the + * status of the standalone wait descriptor at the tail of the queue + * must be set to QI_FREE to allow the reclaim code to proceed. + * It is also possible that descriptors from one of the previous + * submissions has to be reclaimed by a subsequent submission. + */ + for (i = 0; i <= count; i++) + qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE; reclaim_free_desc(qi); raw_spin_unlock_irqrestore(&qi->q_lock, flags); @@ -1520,24 +1526,9 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type) { - u8 dw = 0, dr = 0; - struct qi_desc desc; - int ih = 0; - - if (cap_write_drain(iommu->cap)) - dw = 1; - - if (cap_read_drain(iommu->cap)) - dr = 1; - - desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) - | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; - desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) - | QI_IOTLB_AM(size_order); - desc.qw2 = 0; - desc.qw3 = 0; + qi_desc_iotlb(iommu, did, addr, size_order, type, &desc); qi_submit_sync(iommu, &desc, 1, 0); } @@ -1555,20 +1546,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, if (!(iommu->gcmd & DMA_GCMD_TE)) return; - if (mask) { - addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; - desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; - } else - desc.qw1 = QI_DEV_IOTLB_ADDR(addr); - - if (qdep >= QI_DEV_IOTLB_MAX_INVS) - qdep = 0; - - desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | - QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); - desc.qw2 = 0; - desc.qw3 = 0; - + qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &desc); qi_submit_sync(iommu, &desc, 1, 0); } @@ -1588,28 +1566,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, return; } - if (npages == -1) { - desc.qw0 = QI_EIOTLB_PASID(pasid) | - QI_EIOTLB_DID(did) | - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | - QI_EIOTLB_TYPE; - desc.qw1 = 0; - } else { - int mask = ilog2(__roundup_pow_of_two(npages)); - unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); - - if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) - addr = ALIGN_DOWN(addr, align); - - desc.qw0 = QI_EIOTLB_PASID(pasid) | - QI_EIOTLB_DID(did) | - QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | - QI_EIOTLB_TYPE; - desc.qw1 = QI_EIOTLB_ADDR(addr) | - QI_EIOTLB_IH(ih) | - QI_EIOTLB_AM(mask); - } - + qi_desc_piotlb(did, pasid, addr, npages, ih, &desc); qi_submit_sync(iommu, &desc, 1, 0); } @@ -1617,7 +1574,6 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, u32 pasid, u16 qdep, u64 addr, unsigned int size_order) { - unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; /* @@ -1629,40 +1585,9 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, if (!(iommu->gcmd & DMA_GCMD_TE)) return; - desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | - QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | - QI_DEV_IOTLB_PFSID(pfsid); - - /* - * If S bit is 0, we only flush a single page. If S bit is set, - * The least significant zero bit indicates the invalidation address - * range. VT-d spec 6.5.2.6. - * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB. - * size order = 0 is PAGE_SIZE 4KB - * Max Invs Pending (MIP) is set to 0 for now until we have DIT in - * ECAP. - */ - if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) - pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", - addr, size_order); - - /* Take page address */ - desc.qw1 = QI_DEV_EIOTLB_ADDR(addr); - - if (size_order) { - /* - * Existing 0s in address below size_order may be the least - * significant bit, we must set them to 1s to avoid having - * smaller size than desired. - */ - desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, - VTD_PAGE_SHIFT); - /* Clear size_order bit to indicate size */ - desc.qw1 &= ~mask; - /* Set the S bit to indicate flushing more than 1 page */ - desc.qw1 |= QI_DEV_EIOTLB_SIZE; - } - + qi_desc_dev_iotlb_pasid(sid, pfsid, pasid, + qdep, addr, size_order, + &desc); qi_submit_sync(iommu, &desc, 1, 0); } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4aa070cf56e703..9f6b0780f2ef5e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -167,15 +167,6 @@ static void device_rbtree_remove(struct device_domain_info *info) spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); } -/* - * This domain is a statically identity mapping domain. - * 1. This domain creats a static 1:1 mapping to all usable memory. - * 2. It maps to each iommu if successful. - * 3. Each iommu mapps to this domain if successful. - */ -static struct dmar_domain *si_domain; -static int hw_pass_through = 1; - struct dmar_rmrr_unit { struct list_head list; /* list of rmrr units */ struct acpi_dmar_header *hdr; /* ACPI header */ @@ -293,11 +284,6 @@ static int __init intel_iommu_setup(char *str) } __setup("intel_iommu=", intel_iommu_setup); -static int domain_type_is_si(struct dmar_domain *domain) -{ - return domain->domain.type == IOMMU_DOMAIN_IDENTITY; -} - static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; @@ -492,7 +478,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain) domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); - domain_update_iotlb(domain); } struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, @@ -1199,9 +1184,8 @@ static void __iommu_flush_context(struct intel_iommu *iommu, raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } -/* return value determine if we need a write buffer flush */ -static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, - u64 addr, unsigned int size_order, u64 type) +void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type) { int tlb_offset = ecap_iotlb_offset(iommu->ecap); u64 val = 0, val_iva = 0; @@ -1270,32 +1254,6 @@ domain_lookup_dev_info(struct dmar_domain *domain, return NULL; } -void domain_update_iotlb(struct dmar_domain *domain) -{ - struct dev_pasid_info *dev_pasid; - struct device_domain_info *info; - bool has_iotlb_device = false; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - list_for_each_entry(info, &domain->devices, link) { - if (info->ats_enabled) { - has_iotlb_device = true; - break; - } - } - - list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) { - info = dev_iommu_priv_get(dev_pasid->dev); - if (info->ats_enabled) { - has_iotlb_device = true; - break; - } - } - domain->has_iotlb_device = has_iotlb_device; - spin_unlock_irqrestore(&domain->lock, flags); -} - /* * The extra devTLB flush quirk impacts those QAT devices with PCI device * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device() @@ -1322,20 +1280,9 @@ static void iommu_enable_pci_caps(struct device_domain_info *info) return; pdev = to_pci_dev(info->dev); - - /* The PCIe spec, in its wisdom, declares that the behaviour of - the device if you enable PASID support after ATS support is - undefined. So always enable PASID support on devices which - have it, even if we can't yet know if we're ever going to - use it. */ - if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1)) - info->pasid_enabled = 1; - if (info->ats_supported && pci_ats_page_aligned(pdev) && - !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { + !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) info->ats_enabled = 1; - domain_update_iotlb(info->domain); - } } static void iommu_disable_pci_caps(struct device_domain_info *info) @@ -1350,12 +1297,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info) if (info->ats_enabled) { pci_disable_ats(pdev); info->ats_enabled = 0; - domain_update_iotlb(info->domain); - } - - if (info->pasid_enabled) { - pci_disable_pasid(pdev); - info->pasid_enabled = 0; } } @@ -1447,10 +1388,10 @@ static int iommu_init_domains(struct intel_iommu *iommu) * entry for first-level or pass-through translation modes should * be programmed with a domain id different from those used for * second-level or nested translation. We reserve a domain id for - * this purpose. + * this purpose. This domain id is also used for identity domain + * in legacy mode. */ - if (sm_supported(iommu)) - set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); + set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); return 0; } @@ -1524,7 +1465,6 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->nid = NUMA_NO_NODE; if (first_level_by_default(type)) domain->use_first_level = true; - domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); INIT_LIST_HEAD(&domain->dev_pasids); INIT_LIST_HEAD(&domain->cache_tags); @@ -1632,9 +1572,65 @@ static void domain_exit(struct dmar_domain *domain) if (WARN_ON(!list_empty(&domain->devices))) return; + kfree(domain->qi_batch); kfree(domain); } +/* + * For kdump cases, old valid entries may be cached due to the + * in-flight DMA and copied pgtable, but there is no unmapping + * behaviour for them, thus we need an explicit cache flush for + * the newly-mapped device. For kdump, at this point, the device + * is supposed to finish reset at its driver probe stage, so no + * in-flight DMA will exist, and we don't need to worry anymore + * hereafter. + */ +static void copied_context_tear_down(struct intel_iommu *iommu, + struct context_entry *context, + u8 bus, u8 devfn) +{ + u16 did_old; + + if (!context_copied(iommu, bus, devfn)) + return; + + assert_spin_locked(&iommu->lock); + + did_old = context_domain_id(context); + context_clear_entry(context); + + if (did_old < cap_ndoms(iommu->cap)) { + iommu->flush.flush_context(iommu, did_old, + (((u16)bus) << 8) | devfn, + DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL); + iommu->flush.flush_iotlb(iommu, did_old, 0, 0, + DMA_TLB_DSI_FLUSH); + } + + clear_context_copied(iommu, bus, devfn); +} + +/* + * It's a non-present to present mapping. If hardware doesn't cache + * non-present entry we only need to flush the write-buffer. If the + * _does_ cache non-present entries, then it does so in the special + * domain #0, which we have to flush: + */ +static void context_present_cache_flush(struct intel_iommu *iommu, u16 did, + u8 bus, u8 devfn) +{ + if (cap_caching_mode(iommu->cap)) { + iommu->flush.flush_context(iommu, 0, + (((u16)bus) << 8) | devfn, + DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL); + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + } else { + iommu_flush_write_buffer(iommu); + } +} + static int domain_context_mapping_one(struct dmar_domain *domain, struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -1647,9 +1643,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, struct context_entry *context; int agaw, ret; - if (hw_pass_through && domain_type_is_si(domain)) - translation = CONTEXT_TT_PASS_THROUGH; - pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -1663,83 +1656,35 @@ static int domain_context_mapping_one(struct dmar_domain *domain, if (context_present(context) && !context_copied(iommu, bus, devfn)) goto out_unlock; - /* - * For kdump cases, old valid entries may be cached due to the - * in-flight DMA and copied pgtable, but there is no unmapping - * behaviour for them, thus we need an explicit cache flush for - * the newly-mapped device. For kdump, at this point, the device - * is supposed to finish reset at its driver probe stage, so no - * in-flight DMA will exist, and we don't need to worry anymore - * hereafter. - */ - if (context_copied(iommu, bus, devfn)) { - u16 did_old = context_domain_id(context); - - if (did_old < cap_ndoms(iommu->cap)) { - iommu->flush.flush_context(iommu, did_old, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - iommu->flush.flush_iotlb(iommu, did_old, 0, 0, - DMA_TLB_DSI_FLUSH); - } - - clear_context_copied(iommu, bus, devfn); - } - + copied_context_tear_down(iommu, context, bus, devfn); context_clear_entry(context); - context_set_domain_id(context, did); - if (translation != CONTEXT_TT_PASS_THROUGH) { - /* - * Skip top levels of page tables for iommu which has - * less agaw than default. Unnecessary for PT mode. - */ - for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { - ret = -ENOMEM; - pgd = phys_to_virt(dma_pte_addr(pgd)); - if (!dma_pte_present(pgd)) - goto out_unlock; - } - - if (info && info->ats_supported) - translation = CONTEXT_TT_DEV_IOTLB; - else - translation = CONTEXT_TT_MULTI_LEVEL; + context_set_domain_id(context, did); - context_set_address_root(context, virt_to_phys(pgd)); - context_set_address_width(context, agaw); - } else { - /* - * In pass through mode, AW must be programmed to - * indicate the largest AGAW value supported by - * hardware. And ASR is ignored by hardware. - */ - context_set_address_width(context, iommu->msagaw); + /* + * Skip top levels of page tables for iommu which has + * less agaw than default. Unnecessary for PT mode. + */ + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { + ret = -ENOMEM; + pgd = phys_to_virt(dma_pte_addr(pgd)); + if (!dma_pte_present(pgd)) + goto out_unlock; } + if (info && info->ats_supported) + translation = CONTEXT_TT_DEV_IOTLB; + else + translation = CONTEXT_TT_MULTI_LEVEL; + + context_set_address_root(context, virt_to_phys(pgd)); + context_set_address_width(context, agaw); context_set_translation_type(context, translation); context_set_fault_enable(context); context_set_present(context); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(context, sizeof(*context)); - - /* - * It's a non-present to present mapping. If hardware doesn't cache - * non-present entry we only need to flush the write-buffer. If the - * _does_ cache non-present entries, then it does so in the special - * domain #0, which we have to flush: - */ - if (cap_caching_mode(iommu->cap)) { - iommu->flush.flush_context(iommu, 0, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); - } else { - iommu_flush_write_buffer(iommu); - } - + context_present_cache_flush(iommu, did, bus, devfn); ret = 0; out_unlock: @@ -2000,80 +1945,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev) pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); } -static int iommu_domain_identity_map(struct dmar_domain *domain, - unsigned long first_vpfn, - unsigned long last_vpfn) -{ - /* - * RMRR range might have overlap with physical memory range, - * clear it first - */ - dma_pte_clear_range(domain, first_vpfn, last_vpfn); - - return __domain_mapping(domain, first_vpfn, - first_vpfn, last_vpfn - first_vpfn + 1, - DMA_PTE_READ|DMA_PTE_WRITE, GFP_KERNEL); -} - -static int md_domain_init(struct dmar_domain *domain, int guest_width); - -static int __init si_domain_init(int hw) -{ - struct dmar_rmrr_unit *rmrr; - struct device *dev; - int i, nid, ret; - - si_domain = alloc_domain(IOMMU_DOMAIN_IDENTITY); - if (!si_domain) - return -EFAULT; - - if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { - domain_exit(si_domain); - si_domain = NULL; - return -EFAULT; - } - - if (hw) - return 0; - - for_each_online_node(nid) { - unsigned long start_pfn, end_pfn; - int i; - - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { - ret = iommu_domain_identity_map(si_domain, - mm_to_dma_pfn_start(start_pfn), - mm_to_dma_pfn_end(end_pfn-1)); - if (ret) - return ret; - } - } - - /* - * Identity map the RMRRs so that devices with RMRRs could also use - * the si_domain. - */ - for_each_rmrr_units(rmrr) { - for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, - i, dev) { - unsigned long long start = rmrr->base_address; - unsigned long long end = rmrr->end_address; - - if (WARN_ON(end < start || - end >> agaw_to_width(si_domain->agaw))) - continue; - - ret = iommu_domain_identity_map(si_domain, - mm_to_dma_pfn_start(start >> PAGE_SHIFT), - mm_to_dma_pfn_end(end >> PAGE_SHIFT)); - if (ret) - return ret; - } - } - - return 0; -} - static int dmar_domain_attach_device(struct dmar_domain *domain, struct device *dev) { @@ -2096,8 +1967,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (!sm_supported(iommu)) ret = domain_context_mapping(domain, dev); - else if (hw_pass_through && domain_type_is_si(domain)) - ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID); else if (domain->use_first_level) ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID); else @@ -2106,8 +1975,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (ret) goto out_block_translation; - if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) - iommu_enable_pci_caps(info); + iommu_enable_pci_caps(info); ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID); if (ret) @@ -2151,6 +2019,16 @@ static bool device_rmrr_is_relaxable(struct device *dev) static int device_def_domain_type(struct device *dev) { + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + + /* + * Hardware does not support the passthrough translation mode. + * Always use a dynamaic mapping domain. + */ + if (!ecap_pass_through(iommu->ecap)) + return IOMMU_DOMAIN_DMA; + if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); @@ -2441,8 +2319,6 @@ static int __init init_dmars(void) } } - if (!ecap_pass_through(iommu->ecap)) - hw_pass_through = 0; intel_svm_check(iommu); } @@ -2458,10 +2334,6 @@ static int __init init_dmars(void) check_tylersburg_isoch(); - ret = si_domain_init(hw_pass_through); - if (ret) - goto free_iommu; - /* * for each drhd * enable fault log @@ -2507,10 +2379,6 @@ static int __init init_dmars(void) disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } - if (si_domain) { - domain_exit(si_domain); - si_domain = NULL; - } return ret; } @@ -2885,12 +2753,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) if (ret) goto out; - if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { - pr_warn("%s: Doesn't support hardware pass through.\n", - iommu->name); - return -ENXIO; - } - sp = domain_update_iommu_superpage(NULL, iommu) - 1; if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { pr_warn("%s: Doesn't support large page.\n", @@ -3141,43 +3003,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) return 0; } -static int intel_iommu_memory_notifier(struct notifier_block *nb, - unsigned long val, void *v) -{ - struct memory_notify *mhp = v; - unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn); - unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn + - mhp->nr_pages - 1); - - switch (val) { - case MEM_GOING_ONLINE: - if (iommu_domain_identity_map(si_domain, - start_vpfn, last_vpfn)) { - pr_warn("Failed to build identity map for [%lx-%lx]\n", - start_vpfn, last_vpfn); - return NOTIFY_BAD; - } - break; - - case MEM_OFFLINE: - case MEM_CANCEL_ONLINE: - { - LIST_HEAD(freelist); - - domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist); - iommu_put_pages_list(&freelist); - } - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block intel_iommu_memory_nb = { - .notifier_call = intel_iommu_memory_notifier, - .priority = 0 -}; - static void intel_disable_iommus(void) { struct intel_iommu *iommu = NULL; @@ -3474,12 +3299,7 @@ int __init intel_iommu_init(void) iommu_pmu_register(iommu); } - up_read(&dmar_global_lock); - - if (si_domain && !hw_pass_through) - register_memory_notifier(&intel_iommu_memory_nb); - down_read(&dmar_global_lock); if (probe_acpi_namespace_devices()) pr_warn("ACPI name space devices didn't probe correctly\n"); @@ -3624,7 +3444,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st xa_init(&domain->iommu_array); domain->nid = dev_to_node(dev); - domain->has_iotlb_device = info->ats_enabled; domain->use_first_level = first_stage; /* calculate the address width */ @@ -3693,8 +3512,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) domain->geometry.force_aperture = true; return domain; - case IOMMU_DOMAIN_IDENTITY: - return &si_domain->domain; default: return NULL; } @@ -3761,8 +3578,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain) WARN_ON(dmar_domain->nested_parent && !list_empty(&dmar_domain->s1_domains)); - if (domain != &si_domain->domain) - domain_exit(dmar_domain); + domain_exit(dmar_domain); } int prepare_domain_attach_device(struct iommu_domain *domain, @@ -3812,11 +3628,9 @@ int prepare_domain_attach_device(struct iommu_domain *domain, static int intel_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { - struct device_domain_info *info = dev_iommu_priv_get(dev); int ret; - if (info->domain) - device_block_translation(dev); + device_block_translation(dev); ret = prepare_domain_attach_device(domain, dev); if (ret) @@ -4093,6 +3907,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) dev_iommu_priv_set(dev, info); if (pdev && pci_ats_supported(pdev)) { + pci_prepare_ats(pdev, VTD_PAGE_SHIFT); ret = device_rbtree_insert(iommu, info); if (ret) goto free; @@ -4114,6 +3929,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) intel_iommu_debugfs_create_dev(info); + /* + * The PCIe spec, in its wisdom, declares that the behaviour of the + * device is undefined if you enable PASID support after ATS support. + * So always enable PASID support on devices which have it, even if + * we can't yet know if we're ever going to use it. + */ + if (info->pasid_supported && + !pci_enable_pasid(pdev, info->pasid_supported & ~1)) + info->pasid_enabled = 1; + return &iommu->iommu; free_table: intel_pasid_free_table(dev); @@ -4130,6 +3955,11 @@ static void intel_iommu_release_device(struct device *dev) struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; + if (info->pasid_enabled) { + pci_disable_pasid(to_pci_dev(dev)); + info->pasid_enabled = 0; + } + mutex_lock(&iommu->iopf_lock); if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev))) device_rbtree_remove(info); @@ -4424,11 +4254,17 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain) { struct device_domain_info *info = dev_iommu_priv_get(dev); - struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dev_pasid_info *curr, *dev_pasid = NULL; struct intel_iommu *iommu = info->iommu; + struct dmar_domain *dmar_domain; unsigned long flags; + if (domain->type == IOMMU_DOMAIN_IDENTITY) { + intel_pasid_tear_down_entry(iommu, dev, pasid, false); + return; + } + + dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { @@ -4483,9 +4319,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (ret) goto out_detach_iommu; - if (domain_type_is_si(dmar_domain)) - ret = intel_pasid_setup_pass_through(iommu, dev, pasid); - else if (dmar_domain->use_first_level) + if (dmar_domain->use_first_level) ret = domain_setup_first_level(iommu, dmar_domain, dev, pasid); else @@ -4655,9 +4489,111 @@ static const struct iommu_dirty_ops intel_dirty_ops = { .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, }; +static int context_setup_pass_through(struct device *dev, u8 bus, u8 devfn) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct context_entry *context; + + spin_lock(&iommu->lock); + context = iommu_context_addr(iommu, bus, devfn, 1); + if (!context) { + spin_unlock(&iommu->lock); + return -ENOMEM; + } + + if (context_present(context) && !context_copied(iommu, bus, devfn)) { + spin_unlock(&iommu->lock); + return 0; + } + + copied_context_tear_down(iommu, context, bus, devfn); + context_clear_entry(context); + context_set_domain_id(context, FLPT_DEFAULT_DID); + + /* + * In pass through mode, AW must be programmed to indicate the largest + * AGAW value supported by hardware. And ASR is ignored by hardware. + */ + context_set_address_width(context, iommu->msagaw); + context_set_translation_type(context, CONTEXT_TT_PASS_THROUGH); + context_set_fault_enable(context); + context_set_present(context); + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); + context_present_cache_flush(iommu, FLPT_DEFAULT_DID, bus, devfn); + spin_unlock(&iommu->lock); + + return 0; +} + +static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *data) +{ + struct device *dev = data; + + if (dev != &pdev->dev) + return 0; + + return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff); +} + +static int device_setup_pass_through(struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + + if (!dev_is_pci(dev)) + return context_setup_pass_through(dev, info->bus, info->devfn); + + return pci_for_each_dma_alias(to_pci_dev(dev), + context_setup_pass_through_cb, dev); +} + +static int identity_domain_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + int ret; + + device_block_translation(dev); + + if (dev_is_real_dma_subdevice(dev)) + return 0; + + if (sm_supported(iommu)) { + ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID); + if (!ret) + iommu_enable_pci_caps(info); + } else { + ret = device_setup_pass_through(dev); + } + + return ret; +} + +static int identity_domain_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + + if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) + return -EOPNOTSUPP; + + return intel_pasid_setup_pass_through(iommu, dev, pasid); +} + +static struct iommu_domain identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = identity_domain_attach_dev, + .set_dev_pasid = identity_domain_set_dev_pasid, + }, +}; + const struct iommu_ops intel_iommu_ops = { .blocked_domain = &blocking_domain, .release_domain = &blocking_domain, + .identity_domain = &identity_domain, .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index a969be2258b1c2..1497f3112b12cd 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -584,11 +584,23 @@ struct iommu_domain_info { * to VT-d spec, section 9.3 */ }; +/* + * We start simply by using a fixed size for the batched descriptors. This + * size is currently sufficient for our needs. Future improvements could + * involve dynamically allocating the batch buffer based on actual demand, + * allowing us to adjust the batch size for optimal performance in different + * scenarios. + */ +#define QI_MAX_BATCHED_DESC_COUNT 16 +struct qi_batch { + struct qi_desc descs[QI_MAX_BATCHED_DESC_COUNT]; + unsigned int index; +}; + struct dmar_domain { int nid; /* node id */ struct xarray iommu_array; /* Attached IOMMU array */ - u8 has_iotlb_device: 1; u8 iommu_coherency: 1; /* indicate coherency of iommu access */ u8 force_snooping : 1; /* Create IOPTEs with snoop control */ u8 set_pte_snp:1; @@ -609,6 +621,7 @@ struct dmar_domain { spinlock_t cache_lock; /* Protect the cache tag list */ struct list_head cache_tags; /* Cache tag list */ + struct qi_batch *qi_batch; /* Batched QI descriptors */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, @@ -687,8 +700,6 @@ struct iommu_pmu { DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX); struct perf_event *event_list[IOMMU_PMU_IDX_MAX]; unsigned char irq_name[16]; - struct hlist_node cpuhp_node; - int cpu; }; #define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED) @@ -1067,6 +1078,115 @@ static inline unsigned long nrpages_to_size(unsigned long npages) return npages << VTD_PAGE_SHIFT; } +static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + struct qi_desc *desc) +{ + u8 dw = 0, dr = 0; + int ih = 0; + + if (cap_write_drain(iommu->cap)) + dw = 1; + + if (cap_read_drain(iommu->cap)) + dr = 1; + + desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) + | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; + desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) + | QI_IOTLB_AM(size_order); + desc->qw2 = 0; + desc->qw3 = 0; +} + +static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr, + unsigned int mask, struct qi_desc *desc) +{ + if (mask) { + addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; + desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; + } else { + desc->qw1 = QI_DEV_IOTLB_ADDR(addr); + } + + if (qdep >= QI_DEV_IOTLB_MAX_INVS) + qdep = 0; + + desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | + QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); + desc->qw2 = 0; + desc->qw3 = 0; +} + +static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr, + unsigned long npages, bool ih, + struct qi_desc *desc) +{ + if (npages == -1) { + desc->qw0 = QI_EIOTLB_PASID(pasid) | + QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | + QI_EIOTLB_TYPE; + desc->qw1 = 0; + } else { + int mask = ilog2(__roundup_pow_of_two(npages)); + unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); + + if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) + addr = ALIGN_DOWN(addr, align); + + desc->qw0 = QI_EIOTLB_PASID(pasid) | + QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | + QI_EIOTLB_TYPE; + desc->qw1 = QI_EIOTLB_ADDR(addr) | + QI_EIOTLB_IH(ih) | + QI_EIOTLB_AM(mask); + } +} + +static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid, + u16 qdep, u64 addr, + unsigned int size_order, + struct qi_desc *desc) +{ + unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); + + desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | + QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | + QI_DEV_IOTLB_PFSID(pfsid); + + /* + * If S bit is 0, we only flush a single page. If S bit is set, + * The least significant zero bit indicates the invalidation address + * range. VT-d spec 6.5.2.6. + * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB. + * size order = 0 is PAGE_SIZE 4KB + * Max Invs Pending (MIP) is set to 0 for now until we have DIT in + * ECAP. + */ + if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) + pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", + addr, size_order); + + /* Take page address */ + desc->qw1 = QI_DEV_EIOTLB_ADDR(addr); + + if (size_order) { + /* + * Existing 0s in address below size_order may be the least + * significant bit, we must set them to 1s to avoid having + * smaller size than desired. + */ + desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, + VTD_PAGE_SHIFT); + /* Clear size_order bit to indicate size */ + desc->qw1 &= ~mask; + /* Set the S bit to indicate flushing more than 1 page */ + desc->qw1 |= QI_DEV_EIOTLB_SIZE; + } +} + /* Convert value to context PASID directory size field coding. */ #define context_pdts(pds) (((pds) & 0x7) << 9) @@ -1098,13 +1218,15 @@ void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, unsigned int count, unsigned long options); + +void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); /* * Options used in qi_submit_sync: * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. */ #define QI_OPT_WAIT_DRAIN BIT(0) -void domain_update_iotlb(struct dmar_domain *domain); int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void device_block_translation(struct device *dev); diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index e090ca07364bd4..7a6d188e3bea09 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1352,12 +1352,11 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, case X86_IRQ_ALLOC_TYPE_IOAPIC: /* Set source-id of interrupt request */ set_ioapic_sid(irte, info->devid); - apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", - info->devid, irte->present, irte->fpd, - irte->dst_mode, irte->redir_hint, - irte->trigger_mode, irte->dlvry_mode, - irte->avail, irte->vector, irte->dest_id, - irte->sid, irte->sq, irte->svt); + apic_pr_verbose("IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", + info->devid, irte->present, irte->fpd, irte->dst_mode, + irte->redir_hint, irte->trigger_mode, irte->dlvry_mode, + irte->avail, irte->vector, irte->dest_id, irte->sid, + irte->sq, irte->svt); sub_handle = info->ioapic.pin; break; case X86_IRQ_ALLOC_TYPE_HPET: diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index 16a2bcf5cfeb9d..433c58944401f9 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -66,8 +66,6 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, list_add(&info->link, &dmar_domain->devices); spin_unlock_irqrestore(&dmar_domain->lock, flags); - domain_update_iotlb(dmar_domain); - return 0; unassign_tag: cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID); @@ -85,6 +83,7 @@ static void intel_nested_domain_free(struct iommu_domain *domain) spin_lock(&s2_domain->s1_lock); list_del(&dmar_domain->s2_link); spin_unlock(&s2_domain->s1_lock); + kfree(dmar_domain->qi_batch); kfree(dmar_domain); } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index b51fc268dc845a..2e5fa0a232999f 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -264,9 +264,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); - /* Device IOTLB doesn't need to be flushed in caching mode. */ - if (!cap_caching_mode(iommu->cap)) - devtlb_invalidation_with_pasid(iommu, dev, pasid); + devtlb_invalidation_with_pasid(iommu, dev, pasid); } /* @@ -493,9 +491,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); - /* Device IOTLB doesn't need to be flushed in caching mode. */ - if (!cap_caching_mode(iommu->cap)) - devtlb_invalidation_with_pasid(iommu, dev, pasid); + devtlb_invalidation_with_pasid(iommu, dev, pasid); return 0; } @@ -572,9 +568,7 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, pasid_cache_invalidation_with_pasid(iommu, did, pasid); qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); - /* Device IOTLB doesn't need to be flushed in caching mode. */ - if (!cap_caching_mode(iommu->cap)) - devtlb_invalidation_with_pasid(iommu, dev, pasid); + devtlb_invalidation_with_pasid(iommu, dev, pasid); } /** diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c index 44083d01852dbf..75f493bcb353ed 100644 --- a/drivers/iommu/intel/perfmon.c +++ b/drivers/iommu/intel/perfmon.c @@ -34,28 +34,9 @@ static struct attribute_group iommu_pmu_events_attr_group = { .attrs = attrs_empty, }; -static cpumask_t iommu_pmu_cpu_mask; - -static ssize_t -cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &iommu_pmu_cpu_mask); -} -static DEVICE_ATTR_RO(cpumask); - -static struct attribute *iommu_pmu_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL -}; - -static struct attribute_group iommu_pmu_cpumask_attr_group = { - .attrs = iommu_pmu_cpumask_attrs, -}; - static const struct attribute_group *iommu_pmu_attr_groups[] = { &iommu_pmu_format_attr_group, &iommu_pmu_events_attr_group, - &iommu_pmu_cpumask_attr_group, NULL }; @@ -565,6 +546,7 @@ static int __iommu_pmu_register(struct intel_iommu *iommu) iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups; iommu_pmu->pmu.attr_update = iommu_pmu_attr_update; iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE; iommu_pmu->pmu.module = THIS_MODULE; return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1); @@ -773,89 +755,6 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu) iommu->perf_irq = 0; } -static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node); - - if (cpumask_empty(&iommu_pmu_cpu_mask)) - cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask); - - if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask)) - iommu_pmu->cpu = cpu; - - return 0; -} - -static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) -{ - struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node); - int target = cpumask_first(&iommu_pmu_cpu_mask); - - /* - * The iommu_pmu_cpu_mask has been updated when offline the CPU - * for the first iommu_pmu. Migrate the other iommu_pmu to the - * new target. - */ - if (target < nr_cpu_ids && target != iommu_pmu->cpu) { - perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target); - iommu_pmu->cpu = target; - return 0; - } - - if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask)) - return 0; - - target = cpumask_any_but(cpu_online_mask, cpu); - - if (target < nr_cpu_ids) - cpumask_set_cpu(target, &iommu_pmu_cpu_mask); - else - return 0; - - perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target); - iommu_pmu->cpu = target; - - return 0; -} - -static int nr_iommu_pmu; -static enum cpuhp_state iommu_cpuhp_slot; - -static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu) -{ - int ret; - - if (!nr_iommu_pmu) { - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "driver/iommu/intel/perfmon:online", - iommu_pmu_cpu_online, - iommu_pmu_cpu_offline); - if (ret < 0) - return ret; - iommu_cpuhp_slot = ret; - } - - ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node); - if (ret) { - if (!nr_iommu_pmu) - cpuhp_remove_multi_state(iommu_cpuhp_slot); - return ret; - } - nr_iommu_pmu++; - - return 0; -} - -static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu) -{ - cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node); - - if (--nr_iommu_pmu) - return; - - cpuhp_remove_multi_state(iommu_cpuhp_slot); -} - void iommu_pmu_register(struct intel_iommu *iommu) { struct iommu_pmu *iommu_pmu = iommu->pmu; @@ -866,17 +765,12 @@ void iommu_pmu_register(struct intel_iommu *iommu) if (__iommu_pmu_register(iommu)) goto err; - if (iommu_pmu_cpuhp_setup(iommu_pmu)) - goto unregister; - /* Set interrupt for overflow */ if (iommu_pmu_set_interrupt(iommu)) - goto cpuhp_free; + goto unregister; return; -cpuhp_free: - iommu_pmu_cpuhp_free(iommu_pmu); unregister: perf_pmu_unregister(&iommu_pmu->pmu); err: @@ -892,6 +786,5 @@ void iommu_pmu_unregister(struct intel_iommu *iommu) return; iommu_pmu_unset_interrupt(iommu); - iommu_pmu_cpuhp_free(iommu_pmu); perf_pmu_unregister(&iommu_pmu->pmu); } diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 0e3a9b38bef210..078d1e32a24eeb 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -184,7 +184,10 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static void intel_mm_free_notifier(struct mmu_notifier *mn) { - kfree(container_of(mn, struct dmar_domain, notifier)); + struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier); + + kfree(domain->qi_batch); + kfree(domain); } static const struct mmu_notifier_ops intel_mmuops = { @@ -311,7 +314,7 @@ void intel_drain_pasid_prq(struct device *dev, u32 pasid) domain = info->domain; pdev = to_pci_dev(dev); sid = PCI_DEVID(info->bus, info->devfn); - did = domain_id_iommu(domain, iommu); + did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; qdep = pci_ats_queue_depth(pdev); /* diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index ff4149ae1751d4..0e67f1721a3d98 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -274,13 +274,13 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, sizeof(*ptep) * num_entries, DMA_TO_DEVICE); } -static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) +static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries) { + for (int i = 0; i < num_entries; i++) + ptep[i] = 0; - *ptep = 0; - - if (!cfg->coherent_walk) - __arm_lpae_sync_pte(ptep, 1, cfg); + if (!cfg->coherent_walk && num_entries) + __arm_lpae_sync_pte(ptep, num_entries, cfg); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, @@ -653,26 +653,29 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; num_entries = min_t(int, pgcount, max_entries); - while (i < num_entries) { - pte = READ_ONCE(*ptep); + /* Find and handle non-leaf entries */ + for (i = 0; i < num_entries; i++) { + pte = READ_ONCE(ptep[i]); if (WARN_ON(!pte)) break; - __arm_lpae_clear_pte(ptep, &iop->cfg); - if (!iopte_leaf(pte, lvl, iop->fmt)) { + __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); + /* Also flush any partial walks */ io_pgtable_tlb_flush_walk(iop, iova + i * size, size, ARM_LPAE_GRANULE(data)); __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); - } else if (!iommu_iotlb_gather_queued(gather)) { - io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); } - - ptep++; - i++; } + /* Clear the remaining entries */ + __arm_lpae_clear_pte(ptep, &iop->cfg, i); + + if (gather && !iommu_iotlb_gather_queued(gather)) + for (int j = 0; j < i; j++) + io_pgtable_tlb_add_page(iop, gather, iova + j * size, size); + return i * size; } else if (iopte_leaf(pte, lvl, iop->fmt)) { /* diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index ed6c5cb60c5aee..83c8e617a2c588 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3578,6 +3578,7 @@ int iommu_replace_group_handle(struct iommu_group *group, ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); if (ret) goto err_unlock; + handle->domain = new_domain; } ret = __iommu_group_set_domain(group, new_domain); diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 3214a4c17c6b3b..5fd3dd42029015 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ +#include #include #include -#include #include -#include "../iommu-priv.h" +#include "../iommu-priv.h" #include "io_pagetable.h" #include "iommufd_private.h" @@ -327,8 +327,9 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup, return 0; } -static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging, - struct iommufd_device *idev) +static int +iommufd_device_attach_reserved_iova(struct iommufd_device *idev, + struct iommufd_hwpt_paging *hwpt_paging) { int rc; @@ -354,6 +355,7 @@ static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging, int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev) { + struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt); int rc; mutex_lock(&idev->igroup->lock); @@ -363,8 +365,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, goto err_unlock; } - if (hwpt_is_paging(hwpt)) { - rc = iommufd_hwpt_paging_attach(to_hwpt_paging(hwpt), idev); + if (hwpt_paging) { + rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging); if (rc) goto err_unlock; } @@ -387,9 +389,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, mutex_unlock(&idev->igroup->lock); return 0; err_unresv: - if (hwpt_is_paging(hwpt)) - iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt, - idev->dev); + if (hwpt_paging) + iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev); err_unlock: mutex_unlock(&idev->igroup->lock); return rc; @@ -399,6 +400,7 @@ struct iommufd_hw_pagetable * iommufd_hw_pagetable_detach(struct iommufd_device *idev) { struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt; + struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt); mutex_lock(&idev->igroup->lock); list_del(&idev->group_item); @@ -406,9 +408,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev) iommufd_hwpt_detach_device(hwpt, idev); idev->igroup->hwpt = NULL; } - if (hwpt_is_paging(hwpt)) - iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt, - idev->dev); + if (hwpt_paging) + iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev); mutex_unlock(&idev->igroup->lock); /* Caller must destroy hwpt */ @@ -440,17 +441,17 @@ iommufd_group_remove_reserved_iova(struct iommufd_group *igroup, } static int -iommufd_group_do_replace_paging(struct iommufd_group *igroup, - struct iommufd_hwpt_paging *hwpt_paging) +iommufd_group_do_replace_reserved_iova(struct iommufd_group *igroup, + struct iommufd_hwpt_paging *hwpt_paging) { - struct iommufd_hw_pagetable *old_hwpt = igroup->hwpt; + struct iommufd_hwpt_paging *old_hwpt_paging; struct iommufd_device *cur; int rc; lockdep_assert_held(&igroup->lock); - if (!hwpt_is_paging(old_hwpt) || - hwpt_paging->ioas != to_hwpt_paging(old_hwpt)->ioas) { + old_hwpt_paging = find_hwpt_paging(igroup->hwpt); + if (!old_hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas) { list_for_each_entry(cur, &igroup->device_list, group_item) { rc = iopt_table_enforce_dev_resv_regions( &hwpt_paging->ioas->iopt, cur->dev, NULL); @@ -473,6 +474,8 @@ static struct iommufd_hw_pagetable * iommufd_device_do_replace(struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt) { + struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt); + struct iommufd_hwpt_paging *old_hwpt_paging; struct iommufd_group *igroup = idev->igroup; struct iommufd_hw_pagetable *old_hwpt; unsigned int num_devices; @@ -491,9 +494,8 @@ iommufd_device_do_replace(struct iommufd_device *idev, } old_hwpt = igroup->hwpt; - if (hwpt_is_paging(hwpt)) { - rc = iommufd_group_do_replace_paging(igroup, - to_hwpt_paging(hwpt)); + if (hwpt_paging) { + rc = iommufd_group_do_replace_reserved_iova(igroup, hwpt_paging); if (rc) goto err_unlock; } @@ -502,11 +504,10 @@ iommufd_device_do_replace(struct iommufd_device *idev, if (rc) goto err_unresv; - if (hwpt_is_paging(old_hwpt) && - (!hwpt_is_paging(hwpt) || - to_hwpt_paging(hwpt)->ioas != to_hwpt_paging(old_hwpt)->ioas)) - iommufd_group_remove_reserved_iova(igroup, - to_hwpt_paging(old_hwpt)); + old_hwpt_paging = find_hwpt_paging(old_hwpt); + if (old_hwpt_paging && + (!hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas)) + iommufd_group_remove_reserved_iova(igroup, old_hwpt_paging); igroup->hwpt = hwpt; @@ -524,9 +525,8 @@ iommufd_device_do_replace(struct iommufd_device *idev, /* Caller must destroy old_hwpt */ return old_hwpt; err_unresv: - if (hwpt_is_paging(hwpt)) - iommufd_group_remove_reserved_iova(igroup, - to_hwpt_paging(hwpt)); + if (hwpt_paging) + iommufd_group_remove_reserved_iova(igroup, hwpt_paging); err_unlock: mutex_unlock(&idev->igroup->lock); return ERR_PTR(rc); diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c index a643d5c7c535f5..e590973ce5cfa2 100644 --- a/drivers/iommu/iommufd/fault.c +++ b/drivers/iommu/iommufd/fault.c @@ -3,14 +3,14 @@ */ #define pr_fmt(fmt) "iommufd: " fmt +#include #include #include +#include #include #include -#include #include #include -#include #include #include "../iommu-priv.h" @@ -161,7 +161,6 @@ static int __fault_domain_replace_dev(struct iommufd_device *idev, if (!handle) return -ENOMEM; - handle->handle.domain = hwpt->domain; handle->idev = idev; ret = iommu_replace_group_handle(idev->igroup->group, hwpt->domain, &handle->handle); @@ -361,7 +360,6 @@ static const struct file_operations iommufd_fault_fops = { .write = iommufd_fault_fops_write, .poll = iommufd_fault_fops_poll, .release = iommufd_fault_fops_release, - .llseek = no_llseek, }; int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index aefde4443671ed..d06bf6e6c19fd2 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -225,7 +225,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || !user_data->len || !ops->domain_alloc_user) return ERR_PTR(-EOPNOTSUPP); - if (parent->auto_domain || !parent->nest_parent) + if (parent->auto_domain || !parent->nest_parent || + parent->common.domain->owner != ops) return ERR_PTR(-EINVAL); hwpt_nested = __iommufd_object_alloc( diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 05fd9d3abf1b80..4bf7ccd39d465c 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -8,17 +8,17 @@ * The datastructure uses the iopt_pages to optimize the storage of the PFNs * between the domains and xarray. */ +#include +#include +#include #include #include -#include #include -#include #include -#include #include -#include "io_pagetable.h" #include "double_span.h" +#include "io_pagetable.h" struct iopt_pages_list { struct iopt_pages *pages; @@ -112,6 +112,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, unsigned long page_offset = uptr % PAGE_SIZE; struct interval_tree_double_span_iter used_span; struct interval_tree_span_iter allowed_span; + unsigned long max_alignment = PAGE_SIZE; unsigned long iova_alignment; lockdep_assert_held(&iopt->iova_rwsem); @@ -131,6 +132,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, roundup_pow_of_two(length), 1UL << __ffs64(uptr)); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + max_alignment = HPAGE_SIZE; +#endif + /* Protect against ALIGN() overflow */ + if (iova_alignment >= max_alignment) + iova_alignment = max_alignment; + if (iova_alignment < iopt->iova_alignment) return -EINVAL; diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h index 0ec3509b7e3392..c61d74471684ee 100644 --- a/drivers/iommu/iommufd/io_pagetable.h +++ b/drivers/iommu/iommufd/io_pagetable.h @@ -6,8 +6,8 @@ #define __IO_PAGETABLE_H #include -#include #include +#include #include #include "iommufd_private.h" diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c index 157a89b993e438..2c4b2bb11e78ce 100644 --- a/drivers/iommu/iommufd/ioas.c +++ b/drivers/iommu/iommufd/ioas.c @@ -3,8 +3,8 @@ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ #include -#include #include +#include #include #include "io_pagetable.h" diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 92efe30a8f0d00..f1d865e6fab66a 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -4,13 +4,14 @@ #ifndef __IOMMUFD_PRIVATE_H #define __IOMMUFD_PRIVATE_H -#include -#include -#include -#include #include #include +#include +#include +#include +#include #include + #include "../iommu-priv.h" struct iommu_domain; @@ -324,6 +325,25 @@ to_hwpt_paging(struct iommufd_hw_pagetable *hwpt) return container_of(hwpt, struct iommufd_hwpt_paging, common); } +static inline struct iommufd_hwpt_nested * +to_hwpt_nested(struct iommufd_hw_pagetable *hwpt) +{ + return container_of(hwpt, struct iommufd_hwpt_nested, common); +} + +static inline struct iommufd_hwpt_paging * +find_hwpt_paging(struct iommufd_hw_pagetable *hwpt) +{ + switch (hwpt->obj.type) { + case IOMMUFD_OBJ_HWPT_PAGING: + return to_hwpt_paging(hwpt); + case IOMMUFD_OBJ_HWPT_NESTED: + return to_hwpt_nested(hwpt)->parent; + default: + return NULL; + } +} + static inline struct iommufd_hwpt_paging * iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id) { @@ -490,8 +510,10 @@ static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev) { - if (hwpt->fault) + if (hwpt->fault) { iommufd_fault_domain_detach_dev(hwpt, idev); + return; + } iommu_detach_group(hwpt->domain, idev->igroup->group); } diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index acbbba1c66716e..f4bc23a92f9a2e 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -4,8 +4,8 @@ #ifndef _UAPI_IOMMUFD_TEST_H #define _UAPI_IOMMUFD_TEST_H -#include #include +#include enum { IOMMU_TEST_OP_ADD_RESERVED = 1, diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c index b9e964b1ad5cc1..d90b9e253412ff 100644 --- a/drivers/iommu/iommufd/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -3,10 +3,10 @@ * Copyright (c) 2022, Oracle and/or its affiliates. * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ +#include #include #include #include -#include #define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 83bbd7c5d16084..b5f5d27ee9634e 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -8,15 +8,15 @@ */ #define pr_fmt(fmt) "iommufd: " fmt +#include #include #include -#include -#include +#include #include +#include #include -#include +#include #include -#include #include "io_pagetable.h" #include "iommufd_private.h" diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 117f644a0c5b75..93d806c9c07318 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -45,16 +45,16 @@ * last_iova + 1 can overflow. An iopt_pages index will always be much less than * ULONG_MAX so last_index + 1 cannot overflow. */ +#include +#include +#include +#include #include #include -#include #include -#include -#include -#include -#include "io_pagetable.h" #include "double_span.h" +#include "io_pagetable.h" #ifndef CONFIG_IOMMUFD_TEST #define TEMP_MEMORY_LIMIT 65536 diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 222cfc11ebfd00..540437be168a0d 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -3,13 +3,14 @@ * * Kernel side components to support tools/testing/selftests/iommu */ -#include -#include -#include -#include #include +#include #include +#include +#include #include +#include +#include #include #include "../iommu-priv.h" @@ -1342,7 +1343,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, unsigned long page_size, void __user *uptr, u32 flags) { - unsigned long bitmap_size, i, max; + unsigned long i, max; struct iommu_test_cmd *cmd = ucmd->cmd; struct iommufd_hw_pagetable *hwpt; struct mock_iommu_domain *mock; @@ -1363,15 +1364,14 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, } max = length / page_size; - bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE); - - tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long), + GFP_KERNEL_ACCOUNT); if (!tmp) { rc = -ENOMEM; goto out_put; } - if (copy_from_user(tmp, uptr, bitmap_size)) { + if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) { rc = -EFAULT; goto out_free; } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index b657cc09605f4d..ff55b8c3071269 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -804,8 +804,7 @@ static int ipmmu_init_arm_mapping(struct device *dev) if (!mmu->mapping) { struct dma_iommu_mapping *mapping; - mapping = arm_iommu_create_mapping(&platform_bus_type, - SZ_1G, SZ_2G); + mapping = arm_iommu_create_mapping(dev, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); ret = PTR_ERR(mapping); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index c6ea5b4baff3be..ee4e55b6b19003 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -433,8 +433,7 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, mtk_mapping = data->mapping; if (!mtk_mapping) { /* MTK iommu support 4GB iova address space. */ - mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, - 0, 1ULL << 32); + mtk_mapping = arm_iommu_create_mapping(dev, 0, 1ULL << 32); if (IS_ERR(mtk_mapping)) return PTR_ERR(mtk_mapping); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 78d61da75257c2..e7a6a1611d193b 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -214,7 +214,7 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list) * that represent reservations in the IOVA space, which are regions that should * not be mapped. */ - if (of_find_property(it.node, "reg", NULL)) { + if (of_property_present(it.node, "reg")) { err = of_address_to_resource(it.node, 0, &phys); if (err < 0) { dev_err(dev, "failed to parse memory region %pOF: %d\n", diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index d078bdc48c38f1..341cd9ca5a05e4 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -685,6 +685,7 @@ config LOONGSON_PCH_MSI depends on PCI default MACH_LOONGSON64 select IRQ_DOMAIN_HIERARCHY + select IRQ_MSI_LIB select PCI_MSI help Support for the Loongson PCH MSI Controller. diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 15635812b2d660..e3679ec2b9f76e 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -110,7 +110,7 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o -obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 5c534d9fd2b00d..da5250f0155cfa 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -234,7 +234,10 @@ enum fiq_hwirq { AIC_NR_FIQ }; +/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */ static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); +/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */ +static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi); struct aic_info { int version; @@ -252,6 +255,7 @@ struct aic_info { /* Features */ bool fast_ipi; + bool local_fast_ipi; }; static const struct aic_info aic1_info __initconst = { @@ -270,17 +274,32 @@ static const struct aic_info aic1_fipi_info __initconst = { .fast_ipi = true, }; +static const struct aic_info aic1_local_fipi_info __initconst = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, + + .fast_ipi = true, + .local_fast_ipi = true, +}; + static const struct aic_info aic2_info __initconst = { .version = 2, .irq_cfg = AIC2_IRQ_CFG, .fast_ipi = true, + .local_fast_ipi = true, }; static const struct of_device_id aic_info_match[] = { { .compatible = "apple,t8103-aic", + .data = &aic1_local_fipi_info, + }, + { + .compatible = "apple,t8015-aic", .data = &aic1_fipi_info, }, { @@ -532,14 +551,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) * we check for everything here, even things we don't support yet. */ - if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) { - if (static_branch_likely(&use_fast_ipi)) { - aic_handle_ipi(regs); - } else { - pr_err_ratelimited("Fast IPI fired. Acking.\n"); - write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); - } - } + if (static_branch_likely(&use_fast_ipi) && + (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING)) + aic_handle_ipi(regs); if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) generic_handle_domain_irq(aic_irqc->hw_domain, @@ -574,8 +588,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) AIC_FIQ_HWIRQ(irq)); } - if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ && - (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { + if (static_branch_likely(&use_fast_ipi) && + (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) && + (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { /* Same story with uncore PMCs */ pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n"); sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, @@ -750,12 +765,12 @@ static void aic_ipi_send_fast(int cpu) u64 cluster = MPIDR_CLUSTER(mpidr); u64 idx = MPIDR_CPU(mpidr); - if (MPIDR_CLUSTER(my_mpidr) == cluster) - write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), - SYS_IMP_APL_IPI_RR_LOCAL_EL1); - else + if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) { + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1); + } else { write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster), SYS_IMP_APL_IPI_RR_GLOBAL_EL1); + } isb(); } @@ -811,7 +826,8 @@ static int aic_init_cpu(unsigned int cpu) /* Mask all hard-wired per-CPU IRQ/FIQ sources */ /* Pending Fast IPI FIQs */ - write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + if (static_branch_likely(&use_fast_ipi)) + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); /* Timer FIQs */ sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); @@ -832,8 +848,10 @@ static int aic_init_cpu(unsigned int cpu) FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); /* Uncore PMC FIQ */ - sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, - FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); + if (static_branch_likely(&use_fast_ipi)) { + sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, + FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); + } /* Commit all of the above */ isb(); @@ -987,11 +1005,12 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */ off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */ - if (irqc->info.fast_ipi) - static_branch_enable(&use_fast_ipi); - else + if (!irqc->info.fast_ipi) static_branch_disable(&use_fast_ipi); + if (!irqc->info.local_fast_ipi) + static_branch_disable(&use_local_fast_ipi); + irqc->info.die_stride = off - start_off; irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node), diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index dce2b80bf439cc..d7c5ef24847440 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Marvell Armada 370 and Armada XP SoC IRQ handling * @@ -7,13 +8,11 @@ * Gregory CLEMENT * Thomas Petazzoni * Ben Dooks - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ +#include #include +#include #include #include #include @@ -66,19 +65,17 @@ * device * * The "global interrupt mask/unmask" is modified using the - * ARMADA_370_XP_INT_SET_ENABLE_OFFS and - * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative - * to "main_int_base". + * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE + * registers, which are relative to "mpic->base". * - * The "per-CPU mask/unmask" is modified using the - * ARMADA_370_XP_INT_SET_MASK_OFFS and - * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to - * "per_cpu_int_base". This base address points to a special address, + * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK + * and MPIC_INT_CLEAR_MASK registers, which are relative to + * "mpic->per_cpu". This base address points to a special address, * which automatically accesses the registers of the current CPU. * * The per-CPU mask/unmask can also be adjusted using the global - * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use - * to configure interrupt affinity. + * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to + * configure interrupt affinity. * * Due to this model, all interrupts need to be mask/unmasked at two * different levels: at the global level and at the per-CPU level. @@ -92,9 +89,8 @@ * the current CPU, running the ->map() code. This allows to have * the interrupt unmasked at this level in non-SMP * configurations. In SMP configurations, the ->set_affinity() - * callback is called, which using the - * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask - * for the interrupt. + * callback is called, which using the MPIC_INT_SOURCE_CTL() + * readjusts the per-CPU mask/unmask for the interrupt. * * The ->mask() and ->unmask() operations only mask/unmask the * interrupt at the "global" level. @@ -116,58 +112,84 @@ * at the per-CPU level. */ -/* Registers relative to main_int_base */ -#define ARMADA_370_XP_INT_CONTROL (0x00) -#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04) -#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) -#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) -#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) -#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF -#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid) - -/* Registers relative to per_cpu_int_base */ -#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08) -#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c) -#define ARMADA_375_PPI_CAUSE (0x10) -#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) -#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) -#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) -#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54) -#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu) - -#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) +/* Registers relative to mpic->base */ +#define MPIC_INT_CONTROL 0x00 +#define MPIC_INT_CONTROL_NUMINT_MASK GENMASK(12, 2) +#define MPIC_SW_TRIG_INT 0x04 +#define MPIC_INT_SET_ENABLE 0x30 +#define MPIC_INT_CLEAR_ENABLE 0x34 +#define MPIC_INT_SOURCE_CTL(hwirq) (0x100 + (hwirq) * 4) +#define MPIC_INT_SOURCE_CPU_MASK GENMASK(3, 0) +#define MPIC_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << (cpuid)) + +/* Registers relative to mpic->per_cpu */ +#define MPIC_IN_DRBEL_CAUSE 0x08 +#define MPIC_IN_DRBEL_MASK 0x0c +#define MPIC_PPI_CAUSE 0x10 +#define MPIC_CPU_INTACK 0x44 +#define MPIC_CPU_INTACK_IID_MASK GENMASK(9, 0) +#define MPIC_INT_SET_MASK 0x48 +#define MPIC_INT_CLEAR_MASK 0x4C +#define MPIC_INT_FABRIC_MASK 0x54 +#define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu) + +#define MPIC_PER_CPU_IRQS_NR 29 /* IPI and MSI interrupt definitions for IPI platforms */ -#define IPI_DOORBELL_START (0) -#define IPI_DOORBELL_END (8) -#define IPI_DOORBELL_MASK 0xFF -#define PCI_MSI_DOORBELL_START (16) -#define PCI_MSI_DOORBELL_NR (16) -#define PCI_MSI_DOORBELL_END (32) -#define PCI_MSI_DOORBELL_MASK 0xFFFF0000 +#define IPI_DOORBELL_NR 8 +#define IPI_DOORBELL_MASK GENMASK(7, 0) +#define PCI_MSI_DOORBELL_START 16 +#define PCI_MSI_DOORBELL_NR 16 +#define PCI_MSI_DOORBELL_MASK GENMASK(31, 16) /* MSI interrupt definitions for non-IPI platforms */ #define PCI_MSI_FULL_DOORBELL_START 0 #define PCI_MSI_FULL_DOORBELL_NR 32 -#define PCI_MSI_FULL_DOORBELL_END 32 #define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0) #define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0) #define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16) -static void __iomem *per_cpu_int_base; -static void __iomem *main_int_base; -static struct irq_domain *armada_370_xp_mpic_domain; -static u32 doorbell_mask_reg; -static int parent_irq; +/** + * struct mpic - MPIC private data structure + * @base: MPIC registers base address + * @per_cpu: per-CPU registers base address + * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller + * @domain: MPIC main interrupt domain + * @ipi_domain: IPI domain + * @msi_domain: MSI domain + * @msi_inner_domain: MSI inner domain + * @msi_used: bitmap of used MSI numbers + * @msi_lock: mutex serializing access to @msi_used + * @msi_doorbell_addr: physical address of MSI doorbell register + * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or + * PCI_MSI_FULL_DOORBELL_MASK) + * @msi_doorbell_start: first set bit in @msi_doorbell_mask + * @msi_doorbell_size: number of set bits in @msi_doorbell_mask + * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume + */ +struct mpic { + void __iomem *base; + void __iomem *per_cpu; + int parent_irq; + struct irq_domain *domain; +#ifdef CONFIG_SMP + struct irq_domain *ipi_domain; +#endif #ifdef CONFIG_PCI_MSI -static struct irq_domain *armada_370_xp_msi_domain; -static struct irq_domain *armada_370_xp_msi_inner_domain; -static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR); -static DEFINE_MUTEX(msi_used_lock); -static phys_addr_t msi_doorbell_addr; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; + DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR); + struct mutex msi_lock; + phys_addr_t msi_doorbell_addr; + u32 msi_doorbell_mask; + unsigned int msi_doorbell_start, msi_doorbell_size; #endif + u32 doorbell_mask; +}; + +static struct mpic *mpic_data __ro_after_init; -static inline bool is_ipi_available(void) +static inline bool mpic_is_ipi_available(struct mpic *mpic) { /* * We distinguish IPI availability in the IC by the IC not having a @@ -175,39 +197,12 @@ static inline bool is_ipi_available(void) * interrupt controller (e.g. GIC) that takes care of inter-processor * interrupts. */ - return parent_irq <= 0; -} - -static inline u32 msi_doorbell_mask(void) -{ - return is_ipi_available() ? PCI_MSI_DOORBELL_MASK : - PCI_MSI_FULL_DOORBELL_MASK; -} - -static inline unsigned int msi_doorbell_start(void) -{ - return is_ipi_available() ? PCI_MSI_DOORBELL_START : - PCI_MSI_FULL_DOORBELL_START; + return mpic->parent_irq <= 0; } -static inline unsigned int msi_doorbell_size(void) +static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq) { - return is_ipi_available() ? PCI_MSI_DOORBELL_NR : - PCI_MSI_FULL_DOORBELL_NR; -} - -static inline unsigned int msi_doorbell_end(void) -{ - return is_ipi_available() ? PCI_MSI_DOORBELL_END : - PCI_MSI_FULL_DOORBELL_END; -} - -static inline bool is_percpu_irq(irq_hw_number_t irq) -{ - if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS) - return true; - - return false; + return hwirq < MPIC_PER_CPU_IRQS_NR; } /* @@ -215,55 +210,53 @@ static inline bool is_percpu_irq(irq_hw_number_t irq) * For shared global interrupts, mask/unmask global enable bit * For CPU interrupts, mask/unmask the calling CPU's bit */ -static void armada_370_xp_irq_mask(struct irq_data *d) +static void mpic_irq_mask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (!is_percpu_irq(hwirq)) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_SET_MASK_OFFS); + writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); } -static void armada_370_xp_irq_unmask(struct irq_data *d) +static void mpic_irq_unmask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (!is_percpu_irq(hwirq)) - writel(hwirq, main_int_base + - ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); else - writel(hwirq, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); } #ifdef CONFIG_PCI_MSI -static struct irq_chip armada_370_xp_msi_irq_chip = { - .name = "MPIC MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, +static struct irq_chip mpic_msi_irq_chip = { + .name = "MPIC MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, }; -static struct msi_domain_info armada_370_xp_msi_domain_info = { +static struct msi_domain_info mpic_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .chip = &armada_370_xp_msi_irq_chip, + .chip = &mpic_msi_irq_chip, }; -static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { - unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data)); + unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + struct mpic *mpic = irq_data_get_irq_chip_data(d); - msg->address_lo = lower_32_bits(msi_doorbell_addr); - msg->address_hi = upper_32_bits(msi_doorbell_addr); - msg->data = BIT(cpu + 8) | (data->hwirq + msi_doorbell_start()); + msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); + msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); + msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); } -static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) +static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { unsigned int cpu; @@ -275,33 +268,34 @@ static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data, if (cpu >= nr_cpu_ids) return -EINVAL; - irq_data_update_effective_affinity(irq_data, cpumask_of(cpu)); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK; } -static struct irq_chip armada_370_xp_msi_bottom_irq_chip = { +static struct irq_chip mpic_msi_bottom_irq_chip = { .name = "MPIC MSI", - .irq_compose_msi_msg = armada_370_xp_compose_msi_msg, - .irq_set_affinity = armada_370_xp_msi_set_affinity, + .irq_compose_msi_msg = mpic_compose_msi_msg, + .irq_set_affinity = mpic_msi_set_affinity, }; -static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *args) +static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, + void *args) { - int hwirq, i; + struct mpic *mpic = domain->host_data; + int hwirq; - mutex_lock(&msi_used_lock); - hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(), + mutex_lock(&mpic->msi_lock); + hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, order_base_2(nr_irqs)); - mutex_unlock(&msi_used_lock); + mutex_unlock(&mpic->msi_lock); if (hwirq < 0) return -ENOSPC; - for (i = 0; i < nr_irqs; i++) { + for (unsigned int i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, hwirq + i, - &armada_370_xp_msi_bottom_irq_chip, + &mpic_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); } @@ -309,76 +303,84 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, return 0; } -static void armada_370_xp_msi_free(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) +static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct mpic *mpic = domain->host_data; - mutex_lock(&msi_used_lock); - bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs)); - mutex_unlock(&msi_used_lock); + mutex_lock(&mpic->msi_lock); + bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); + mutex_unlock(&mpic->msi_lock); } -static const struct irq_domain_ops armada_370_xp_msi_domain_ops = { - .alloc = armada_370_xp_msi_alloc, - .free = armada_370_xp_msi_free, +static const struct irq_domain_ops mpic_msi_domain_ops = { + .alloc = mpic_msi_alloc, + .free = mpic_msi_free, }; -static void armada_370_xp_msi_reenable_percpu(void) +static void mpic_msi_reenable_percpu(struct mpic *mpic) { u32 reg; /* Enable MSI doorbell mask and combined cpu local interrupt */ - reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); - reg |= msi_doorbell_mask(); - writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + reg |= mpic->msi_doorbell_mask; + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); /* Unmask local doorbell interrupt */ - writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); } -static int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) +static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, + phys_addr_t main_int_phys_base) { - msi_doorbell_addr = main_int_phys_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS; + mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; + + mutex_init(&mpic->msi_lock); + + if (mpic_is_ipi_available(mpic)) { + mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; + mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; + mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; + } else { + mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; + mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; + mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; + } - armada_370_xp_msi_inner_domain = - irq_domain_add_linear(NULL, msi_doorbell_size(), - &armada_370_xp_msi_domain_ops, NULL); - if (!armada_370_xp_msi_inner_domain) + mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size, + &mpic_msi_domain_ops, mpic); + if (!mpic->msi_inner_domain) return -ENOMEM; - armada_370_xp_msi_domain = - pci_msi_create_irq_domain(of_node_to_fwnode(node), - &armada_370_xp_msi_domain_info, - armada_370_xp_msi_inner_domain); - if (!armada_370_xp_msi_domain) { - irq_domain_remove(armada_370_xp_msi_inner_domain); + mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info, + mpic->msi_inner_domain); + if (!mpic->msi_domain) { + irq_domain_remove(mpic->msi_inner_domain); return -ENOMEM; } - armada_370_xp_msi_reenable_percpu(); + mpic_msi_reenable_percpu(mpic); /* Unmask low 16 MSI irqs on non-IPI platforms */ - if (!is_ipi_available()) - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + if (!mpic_is_ipi_available(mpic)) + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); return 0; } #else -static __maybe_unused void armada_370_xp_msi_reenable_percpu(void) {} +static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {} -static inline int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) +static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node, + phys_addr_t main_int_phys_base) { return 0; } #endif -static void armada_xp_mpic_perf_init(void) +static void mpic_perf_init(struct mpic *mpic) { - unsigned long cpuid; + u32 cpuid; /* * This Performance Counter Overflow interrupt is specific for @@ -390,38 +392,39 @@ static void armada_xp_mpic_perf_init(void) cpuid = cpu_logical_map(smp_processor_id()); /* Enable Performance Counter Overflow interrupts */ - writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), - per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS); + writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); } #ifdef CONFIG_SMP -static struct irq_domain *ipi_domain; - -static void armada_370_xp_ipi_mask(struct irq_data *d) +static void mpic_ipi_mask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); u32 reg; - reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); reg &= ~BIT(d->hwirq); - writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); } -static void armada_370_xp_ipi_unmask(struct irq_data *d) +static void mpic_ipi_unmask(struct irq_data *d) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); u32 reg; - reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + + reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); reg |= BIT(d->hwirq); - writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); } -static void armada_370_xp_ipi_send_mask(struct irq_data *d, - const struct cpumask *mask) +static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) { - unsigned long map = 0; - int cpu; + struct mpic *mpic = irq_data_get_irq_chip_data(d); + unsigned int cpu; + u32 map = 0; /* Convert our logical CPU mask into a physical one. */ for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); + map |= BIT(cpu_logical_map(cpu)); /* * Ensure that stores to Normal memory are visible to the @@ -430,451 +433,465 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d, dsb(); /* submit softirq */ - writel((map << 8) | d->hwirq, main_int_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS); + writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); } -static void armada_370_xp_ipi_ack(struct irq_data *d) +static void mpic_ipi_ack(struct irq_data *d) { - writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + struct mpic *mpic = irq_data_get_irq_chip_data(d); + + writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); } -static struct irq_chip ipi_irqchip = { +static struct irq_chip mpic_ipi_irqchip = { .name = "IPI", - .irq_ack = armada_370_xp_ipi_ack, - .irq_mask = armada_370_xp_ipi_mask, - .irq_unmask = armada_370_xp_ipi_unmask, - .ipi_send_mask = armada_370_xp_ipi_send_mask, + .irq_ack = mpic_ipi_ack, + .irq_mask = mpic_ipi_mask, + .irq_unmask = mpic_ipi_unmask, + .ipi_send_mask = mpic_ipi_send_mask, }; -static int armada_370_xp_ipi_alloc(struct irq_domain *d, - unsigned int virq, - unsigned int nr_irqs, void *args) +static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *args) { - int i; - - for (i = 0; i < nr_irqs; i++) { + for (unsigned int i = 0; i < nr_irqs; i++) { irq_set_percpu_devid(virq + i); - irq_domain_set_info(d, virq + i, i, &ipi_irqchip, - d->host_data, - handle_percpu_devid_irq, - NULL, NULL); + irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); } return 0; } -static void armada_370_xp_ipi_free(struct irq_domain *d, - unsigned int virq, - unsigned int nr_irqs) +static void mpic_ipi_free(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs) { /* Not freeing IPIs */ } -static const struct irq_domain_ops ipi_domain_ops = { - .alloc = armada_370_xp_ipi_alloc, - .free = armada_370_xp_ipi_free, +static const struct irq_domain_ops mpic_ipi_domain_ops = { + .alloc = mpic_ipi_alloc, + .free = mpic_ipi_free, }; -static void ipi_resume(void) +static void mpic_ipi_resume(struct mpic *mpic) { - int i; - - for (i = 0; i < IPI_DOORBELL_END; i++) { - int irq; + for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) { + unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); + struct irq_data *d; - irq = irq_find_mapping(ipi_domain, i); - if (irq <= 0) + if (!virq || !irq_percpu_is_enabled(virq)) continue; - if (irq_percpu_is_enabled(irq)) { - struct irq_data *d; - d = irq_domain_get_irq_data(ipi_domain, irq); - armada_370_xp_ipi_unmask(d); - } + + d = irq_domain_get_irq_data(mpic->ipi_domain, virq); + mpic_ipi_unmask(d); } } -static __init void armada_xp_ipi_init(struct device_node *node) +static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) { int base_ipi; - ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), - IPI_DOORBELL_END, - &ipi_domain_ops, NULL); - if (WARN_ON(!ipi_domain)) - return; + mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR, + &mpic_ipi_domain_ops, mpic); + if (WARN_ON(!mpic->ipi_domain)) + return -ENOMEM; - irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); - base_ipi = irq_domain_alloc_irqs(ipi_domain, IPI_DOORBELL_END, NUMA_NO_NODE, NULL); + irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); + base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); if (WARN_ON(!base_ipi)) - return; + return -ENOMEM; + + set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR); - set_smp_ipi_range(base_ipi, IPI_DOORBELL_END); + return 0; } -static int armada_xp_set_affinity(struct irq_data *d, - const struct cpumask *mask_val, bool force) +static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { + struct mpic *mpic = irq_data_get_irq_chip_data(d); irq_hw_number_t hwirq = irqd_to_hwirq(d); - int cpu; + unsigned int cpu; /* Select a single core from the affinity mask which is online */ cpu = cpumask_any_and(mask_val, cpu_online_mask); - atomic_io_modify(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq), - ARMADA_370_XP_INT_SOURCE_CPU_MASK, - BIT(cpu_logical_map(cpu))); + atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), + MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu))); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK; } -static void armada_xp_mpic_smp_cpu_init(void) +static void mpic_smp_cpu_init(struct mpic *mpic) { - u32 control; - int nr_irqs, i; - - control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); - nr_irqs = (control >> 2) & 0x3ff; + for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) + writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); - for (i = 0; i < nr_irqs; i++) - writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); - - if (!is_ipi_available()) + if (!mpic_is_ipi_available(mpic)) return; /* Disable all IPIs */ - writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); /* Clear pending IPIs */ - writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); /* Unmask IPI interrupt */ - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); } -static void armada_xp_mpic_reenable_percpu(void) +static void mpic_reenable_percpu(struct mpic *mpic) { - unsigned int irq; - /* Re-enable per-CPU interrupts that were enabled before suspend */ - for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) { - struct irq_data *data; - int virq; - - virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq); - if (virq == 0) - continue; - - data = irq_get_irq_data(virq); + for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) { + unsigned int virq = irq_linear_revmap(mpic->domain, i); + struct irq_data *d; - if (!irq_percpu_is_enabled(virq)) + if (!virq || !irq_percpu_is_enabled(virq)) continue; - armada_370_xp_irq_unmask(data); + d = irq_get_irq_data(virq); + mpic_irq_unmask(d); } - if (is_ipi_available()) - ipi_resume(); + if (mpic_is_ipi_available(mpic)) + mpic_ipi_resume(mpic); - armada_370_xp_msi_reenable_percpu(); + mpic_msi_reenable_percpu(mpic); } -static int armada_xp_mpic_starting_cpu(unsigned int cpu) +static int mpic_starting_cpu(unsigned int cpu) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); - armada_xp_mpic_reenable_percpu(); + struct mpic *mpic = irq_get_default_host()->host_data; + + mpic_perf_init(mpic); + mpic_smp_cpu_init(mpic); + mpic_reenable_percpu(mpic); + return 0; } static int mpic_cascaded_starting_cpu(unsigned int cpu) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_reenable_percpu(); - enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + struct mpic *mpic = mpic_data; + + mpic_perf_init(mpic); + mpic_reenable_percpu(mpic); + enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); + return 0; } #else -static void armada_xp_mpic_smp_cpu_init(void) {} -static void ipi_resume(void) {} +static void mpic_smp_cpu_init(struct mpic *mpic) {} +static void mpic_ipi_resume(struct mpic *mpic) {} #endif -static struct irq_chip armada_370_xp_irq_chip = { +static struct irq_chip mpic_irq_chip = { .name = "MPIC", - .irq_mask = armada_370_xp_irq_mask, - .irq_mask_ack = armada_370_xp_irq_mask, - .irq_unmask = armada_370_xp_irq_unmask, + .irq_mask = mpic_irq_mask, + .irq_mask_ack = mpic_irq_mask, + .irq_unmask = mpic_irq_unmask, #ifdef CONFIG_SMP - .irq_set_affinity = armada_xp_set_affinity, + .irq_set_affinity = mpic_set_affinity, #endif .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, }; -static int armada_370_xp_mpic_irq_map(struct irq_domain *h, - unsigned int virq, irq_hw_number_t hw) +static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { + struct mpic *mpic = domain->host_data; + /* IRQs 0 and 1 cannot be mapped, they are handled internally */ - if (hw <= 1) + if (hwirq <= 1) return -EINVAL; - armada_370_xp_irq_mask(irq_get_irq_data(virq)); - if (!is_percpu_irq(hw)) - writel(hw, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + irq_set_chip_data(virq, mpic); + + mpic_irq_mask(irq_get_irq_data(virq)); + if (!mpic_is_percpu_irq(hwirq)) + writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); else - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); irq_set_status_flags(virq, IRQ_LEVEL); - if (is_percpu_irq(hw)) { + if (mpic_is_percpu_irq(hwirq)) { irq_set_percpu_devid(virq); - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_percpu_devid_irq); + irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq); } else { - irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, - handle_level_irq); + irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); } irq_set_probe(virq); - return 0; } -static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { - .map = armada_370_xp_mpic_irq_map, - .xlate = irq_domain_xlate_onecell, +static const struct irq_domain_ops mpic_irq_ops = { + .map = mpic_irq_map, + .xlate = irq_domain_xlate_onecell, }; #ifdef CONFIG_PCI_MSI -static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained) +static void mpic_handle_msi_irq(struct mpic *mpic) { - u32 msimask, msinr; - - msimask = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); - msimask &= msi_doorbell_mask(); + unsigned long cause; + unsigned int i; - writel(~msimask, per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); + cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); + cause &= mpic->msi_doorbell_mask; + writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); - for (msinr = msi_doorbell_start(); - msinr < msi_doorbell_end(); msinr++) { - unsigned int irq; + for_each_set_bit(i, &cause, BITS_PER_LONG) + generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); +} +#else +static void mpic_handle_msi_irq(struct mpic *mpic) {} +#endif - if (!(msimask & BIT(msinr))) - continue; +#ifdef CONFIG_SMP +static void mpic_handle_ipi_irq(struct mpic *mpic) +{ + unsigned long cause; + irq_hw_number_t i; - irq = msinr - msi_doorbell_start(); + cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); + cause &= IPI_DOORBELL_MASK; - generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq); - } + for_each_set_bit(i, &cause, IPI_DOORBELL_NR) + generic_handle_domain_irq(mpic->ipi_domain, i); } #else -static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {} +static inline void mpic_handle_ipi_irq(struct mpic *mpic) {} #endif -static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc) +static void mpic_handle_cascade_irq(struct irq_desc *desc) { + struct mpic *mpic = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - unsigned long irqmap, irqn, irqsrc, cpuid; + unsigned long cause; + u32 irqsrc, cpuid; + irq_hw_number_t i; chained_irq_enter(chip, desc); - irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); + cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); cpuid = cpu_logical_map(smp_processor_id()); - for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { - irqsrc = readl_relaxed(main_int_base + - ARMADA_370_XP_INT_SOURCE_CTL(irqn)); + for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) { + irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); /* Check if the interrupt is not masked on current CPU. * Test IRQ (0-1) and FIQ (8-9) mask bits. */ - if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid))) + if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid))) continue; - if (irqn == 0 || irqn == 1) { - armada_370_xp_handle_msi_irq(NULL, true); + if (i == 0 || i == 1) { + mpic_handle_msi_irq(mpic); continue; } - generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn); + generic_handle_domain_irq(mpic->domain, i); } chained_irq_exit(chip, desc); } -static void __exception_irq_entry -armada_370_xp_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs) { - u32 irqstat, irqnr; + struct mpic *mpic = irq_get_default_host()->host_data; + irq_hw_number_t i; + u32 irqstat; do { - irqstat = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_CPU_INTACK_OFFS); - irqnr = irqstat & 0x3FF; + irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); + i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat); - if (irqnr > 1022) + if (i > 1022) break; - if (irqnr > 1) { - generic_handle_domain_irq(armada_370_xp_mpic_domain, - irqnr); - continue; - } + if (i > 1) + generic_handle_domain_irq(mpic->domain, i); /* MSI handling */ - if (irqnr == 1) - armada_370_xp_handle_msi_irq(regs, false); + if (i == 1) + mpic_handle_msi_irq(mpic); -#ifdef CONFIG_SMP /* IPI Handling */ - if (irqnr == 0) { - unsigned long ipimask; - int ipi; - - ipimask = readl_relaxed(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) - & IPI_DOORBELL_MASK; - - for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END) - generic_handle_domain_irq(ipi_domain, ipi); - } -#endif - + if (i == 0) + mpic_handle_ipi_irq(mpic); } while (1); } -static int armada_370_xp_mpic_suspend(void) +static int mpic_suspend(void) { - doorbell_mask_reg = readl(per_cpu_int_base + - ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + struct mpic *mpic = mpic_data; + + mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); + return 0; } -static void armada_370_xp_mpic_resume(void) +static void mpic_resume(void) { + struct mpic *mpic = mpic_data; bool src0, src1; - int nirqs; - irq_hw_number_t irq; /* Re-enable interrupts */ - nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff; - for (irq = 0; irq < nirqs; irq++) { - struct irq_data *data; - int virq; + for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { + unsigned int virq = irq_linear_revmap(mpic->domain, i); + struct irq_data *d; - virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq); - if (virq == 0) + if (!virq) continue; - data = irq_get_irq_data(virq); + d = irq_get_irq_data(virq); - if (!is_percpu_irq(irq)) { + if (!mpic_is_percpu_irq(i)) { /* Non per-CPU interrupts */ - writel(irq, per_cpu_int_base + - ARMADA_370_XP_INT_CLEAR_MASK_OFFS); - if (!irqd_irq_disabled(data)) - armada_370_xp_irq_unmask(data); + writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); + if (!irqd_irq_disabled(d)) + mpic_irq_unmask(d); } else { /* Per-CPU interrupts */ - writel(irq, main_int_base + - ARMADA_370_XP_INT_SET_ENABLE_OFFS); + writel(i, mpic->base + MPIC_INT_SET_ENABLE); /* - * Re-enable on the current CPU, - * armada_xp_mpic_reenable_percpu() will take - * care of secondary CPUs when they come up. + * Re-enable on the current CPU, mpic_reenable_percpu() + * will take care of secondary CPUs when they come up. */ if (irq_percpu_is_enabled(virq)) - armada_370_xp_irq_unmask(data); + mpic_irq_unmask(d); } } /* Reconfigure doorbells for IPIs and MSIs */ - writel(doorbell_mask_reg, - per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS); + writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); - if (is_ipi_available()) { - src0 = doorbell_mask_reg & IPI_DOORBELL_MASK; - src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK; + if (mpic_is_ipi_available(mpic)) { + src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; + src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; } else { - src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK; - src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK; + src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; + src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; } if (src0) - writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); if (src1) - writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); - if (is_ipi_available()) - ipi_resume(); + if (mpic_is_ipi_available(mpic)) + mpic_ipi_resume(mpic); } -static struct syscore_ops armada_370_xp_mpic_syscore_ops = { - .suspend = armada_370_xp_mpic_suspend, - .resume = armada_370_xp_mpic_resume, +static struct syscore_ops mpic_syscore_ops = { + .suspend = mpic_suspend, + .resume = mpic_resume, }; -static int __init armada_370_xp_mpic_of_init(struct device_node *node, - struct device_node *parent) +static int __init mpic_map_region(struct device_node *np, int index, + void __iomem **base, phys_addr_t *phys_base) { - struct resource main_int_res, per_cpu_int_res; - int nr_irqs, i; - u32 control; + struct resource res; + int err; + + err = of_address_to_resource(np, index, &res); + if (WARN_ON(err)) + goto fail; + + if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) { + err = -EBUSY; + goto fail; + } + + *base = ioremap(res.start, resource_size(&res)); + if (WARN_ON(!*base)) { + err = -ENOMEM; + goto fail; + } - BUG_ON(of_address_to_resource(node, 0, &main_int_res)); - BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); + if (phys_base) + *phys_base = res.start; - BUG_ON(!request_mem_region(main_int_res.start, - resource_size(&main_int_res), - node->full_name)); - BUG_ON(!request_mem_region(per_cpu_int_res.start, - resource_size(&per_cpu_int_res), - node->full_name)); + return 0; + +fail: + pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err)); + return err; +} - main_int_base = ioremap(main_int_res.start, - resource_size(&main_int_res)); - BUG_ON(!main_int_base); +static int __init mpic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t phys_base; + unsigned int nr_irqs; + struct mpic *mpic; + int err; + + mpic = kzalloc(sizeof(*mpic), GFP_KERNEL); + if (WARN_ON(!mpic)) + return -ENOMEM; - per_cpu_int_base = ioremap(per_cpu_int_res.start, - resource_size(&per_cpu_int_res)); - BUG_ON(!per_cpu_int_base); + mpic_data = mpic; - control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); - nr_irqs = (control >> 2) & 0x3ff; + err = mpic_map_region(node, 0, &mpic->base, &phys_base); + if (err) + return err; - for (i = 0; i < nr_irqs; i++) - writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); + err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); + if (err) + return err; - armada_370_xp_mpic_domain = - irq_domain_add_linear(node, nr_irqs, - &armada_370_xp_mpic_irq_ops, NULL); - BUG_ON(!armada_370_xp_mpic_domain); - irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED); + nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); + + for (irq_hw_number_t i = 0; i < nr_irqs; i++) + writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); + + /* + * Initialize mpic->parent_irq before calling any other functions, since + * it is used to distinguish between IPI and non-IPI platforms. + */ + mpic->parent_irq = irq_of_parse_and_map(node, 0); /* - * Initialize parent_irq before calling any other functions, since it is - * used to distinguish between IPI and non-IPI platforms. + * On non-IPI platforms the driver currently supports only the per-CPU + * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq(). */ - parent_irq = irq_of_parse_and_map(node, 0); + if (!mpic_is_ipi_available(mpic)) + nr_irqs = MPIC_PER_CPU_IRQS_NR; + + mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic); + if (!mpic->domain) { + pr_err("%pOF: Unable to add IRQ domain\n", node); + return -ENOMEM; + } + + irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); /* Setup for the boot CPU */ - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); + mpic_perf_init(mpic); + mpic_smp_cpu_init(mpic); - armada_370_xp_msi_init(node, main_int_res.start); + err = mpic_msi_init(mpic, node, phys_base); + if (err) { + pr_err("%pOF: Unable to initialize MSI domain\n", node); + return err; + } - if (parent_irq <= 0) { - irq_set_default_host(armada_370_xp_mpic_domain); - set_handle_irq(armada_370_xp_handle_irq); + if (mpic_is_ipi_available(mpic)) { + irq_set_default_host(mpic->domain); + set_handle_irq(mpic_handle_irq); #ifdef CONFIG_SMP - armada_xp_ipi_init(node); + err = mpic_ipi_init(mpic, node); + if (err) { + pr_err("%pOF: Unable to initialize IPI domain\n", node); + return err; + } + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, "irqchip/armada/ipi:starting", - armada_xp_mpic_starting_cpu, NULL); + mpic_starting_cpu, NULL); #endif } else { #ifdef CONFIG_SMP @@ -882,13 +899,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, "irqchip/armada/cascade:starting", mpic_cascaded_starting_cpu, NULL); #endif - irq_set_chained_handler(parent_irq, - armada_370_xp_mpic_handle_cascade_irq); + irq_set_chained_handler_and_data(mpic->parent_irq, + mpic_handle_cascade_irq, mpic); } - register_syscore_ops(&armada_370_xp_mpic_syscore_ops); + register_syscore_ops(&mpic_syscore_ops); return 0; } -IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init); +IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init); diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 4631f68479537b..3839ad79ad31dc 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -57,8 +57,7 @@ static struct irq_domain *aic_domain; -static asmlinkage void __exception_irq_entry -aic_handle(struct pt_regs *regs) +static void __exception_irq_entry aic_handle(struct pt_regs *regs) { struct irq_domain_chip_generic *dgc = aic_domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 145535bd756058..c0f55dc7b050df 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -67,8 +67,7 @@ static struct irq_domain *aic5_domain; -static asmlinkage void __exception_irq_entry -aic5_handle(struct pt_regs *regs) +static void __exception_irq_entry aic5_handle(struct pt_regs *regs) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0); u32 irqnr; diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index e731e0784f7e3e..806ebb1de201a4 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -69,7 +69,7 @@ static struct { struct irq_domain_ops ops; } *clps711x_intc; -static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) +static void __exception_irq_entry clps711x_irqh(struct pt_regs *regs) { u32 irqstat; diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c index 7482c8ed34b230..f4f8e9fadbbfb6 100644 --- a/drivers/irqchip/irq-davinci-cp-intc.c +++ b/drivers/irqchip/irq-davinci-cp-intc.c @@ -116,8 +116,7 @@ static struct irq_chip davinci_cp_intc_irq_chip = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static asmlinkage void __exception_irq_entry -davinci_cp_intc_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs) { int gpir, irqnr, none; diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index 359efc1d1be73b..b91c358ea6db17 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = { /* Local static for the IRQ entry call */ static struct ft010_irq_data firq; -static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs) { struct ft010_irq_data *f = &firq; int irq; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 74f21e03d4a374..ce87205e3e8237 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -930,7 +930,7 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) __gic_handle_nmi(irqnr, regs); } -static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) __gic_handle_irq_from_irqsoff(regs); diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index ca32ac19d28450..58c28895f8c425 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -97,7 +97,7 @@ bool gic_cpuif_has_vsgi(void) fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT); - return fld >= 0x3; + return fld >= ID_AA64PFR0_EL1_GIC_V4P1; } #else bool gic_cpuif_has_vsgi(void) diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c index 5fba907b9052b2..f23b02f62a5c3f 100644 --- a/drivers/irqchip/irq-ixp4xx.c +++ b/drivers/irqchip/irq-ixp4xx.c @@ -105,8 +105,7 @@ static void ixp4xx_irq_unmask(struct irq_data *d) } } -static asmlinkage void __exception_irq_entry -ixp4xx_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs) { struct ixp4xx_irq *ixi = &ixirq; unsigned long status; diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c new file mode 100644 index 00000000000000..0f6e465dd30956 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2024 Loongson Technologies, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "irq-msi-lib.h" +#include "irq-loongson.h" + +#define VECTORS_PER_REG 64 +#define IRR_VECTOR_MASK 0xffUL +#define IRR_INVALID_MASK 0x80000000UL +#define AVEC_MSG_OFFSET 0x100000 + +#ifdef CONFIG_SMP +struct pending_list { + struct list_head head; +}; + +static struct cpumask intersect_mask; +static DEFINE_PER_CPU(struct pending_list, pending_list); +#endif + +static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); + +struct avecintc_chip { + raw_spinlock_t lock; + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct irq_matrix *vector_matrix; + phys_addr_t msi_base_addr; +}; + +static struct avecintc_chip loongarch_avec; + +struct avecintc_data { + struct list_head entry; + unsigned int cpu; + unsigned int vec; + unsigned int prev_cpu; + unsigned int prev_vec; + unsigned int moving; +}; + +static inline void avecintc_ack_irq(struct irq_data *d) +{ +} + +static inline void avecintc_mask_irq(struct irq_data *d) +{ +} + +static inline void avecintc_unmask_irq(struct irq_data *d) +{ +} + +#ifdef CONFIG_SMP +static inline void pending_list_init(int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + INIT_LIST_HEAD(&plist->head); +} + +static void avecintc_sync(struct avecintc_data *adata) +{ + struct pending_list *plist; + + if (cpu_online(adata->prev_cpu)) { + plist = per_cpu_ptr(&pending_list, adata->prev_cpu); + list_add_tail(&adata->entry, &plist->head); + adata->moving = 1; + mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); + } +} + +static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) +{ + int cpu, ret, vector; + struct avecintc_data *adata; + + scoped_guard(raw_spinlock, &loongarch_avec.lock) { + adata = irq_data_get_irq_chip_data(data); + + if (adata->moving) + return -EBUSY; + + if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) + return 0; + + cpumask_and(&intersect_mask, dest, cpu_online_mask); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu); + if (ret < 0) + return ret; + + vector = ret; + adata->cpu = cpu; + adata->vec = vector; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); + avecintc_sync(adata); + } + + irq_data_update_effective_affinity(data, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; +} + +static int avecintc_cpu_online(unsigned int cpu) +{ + if (!loongarch_avec.vector_matrix) + return 0; + + guard(raw_spinlock)(&loongarch_avec.lock); + + irq_matrix_online(loongarch_avec.vector_matrix); + + pending_list_init(cpu); + + return 0; +} + +static int avecintc_cpu_offline(unsigned int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + if (!loongarch_avec.vector_matrix) + return 0; + + guard(raw_spinlock)(&loongarch_avec.lock); + + if (!list_empty(&plist->head)) + pr_warn("CPU#%d vector is busy\n", cpu); + + irq_matrix_offline(loongarch_avec.vector_matrix); + + return 0; +} + +void complete_irq_moving(void) +{ + struct pending_list *plist = this_cpu_ptr(&pending_list); + struct avecintc_data *adata, *tdata; + int cpu, vector, bias; + uint64_t isr; + + guard(raw_spinlock)(&loongarch_avec.lock); + + list_for_each_entry_safe(adata, tdata, &plist->head, entry) { + cpu = adata->prev_cpu; + vector = adata->prev_vec; + bias = vector / VECTORS_PER_REG; + switch (bias) { + case 0: + isr = csr_read64(LOONGARCH_CSR_ISR0); + break; + case 1: + isr = csr_read64(LOONGARCH_CSR_ISR1); + break; + case 2: + isr = csr_read64(LOONGARCH_CSR_ISR2); + break; + case 3: + isr = csr_read64(LOONGARCH_CSR_ISR3); + break; + } + + if (isr & (1UL << (vector % VECTORS_PER_REG))) { + mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR); + continue; + } + list_del(&adata->entry); + irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false); + this_cpu_write(irq_map[vector], NULL); + adata->moving = 0; + adata->prev_cpu = adata->cpu; + adata->prev_vec = adata->vec; + } +} +#endif + +static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + msg->address_hi = 0x0; + msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) + | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); + msg->data = 0x0; +} + +static struct irq_chip avec_irq_controller = { + .name = "AVECINTC", + .irq_ack = avecintc_ack_irq, + .irq_mask = avecintc_mask_irq, + .irq_unmask = avecintc_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = avecintc_set_affinity, +#endif + .irq_compose_msi_msg = avecintc_compose_msi_msg, +}; + +static void avecintc_irq_dispatch(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_desc *d; + + chained_irq_enter(chip, desc); + + while (true) { + unsigned long vector = csr_read64(LOONGARCH_CSR_IRR); + if (vector & IRR_INVALID_MASK) + break; + + vector &= IRR_VECTOR_MASK; + + d = this_cpu_read(irq_map[vector]); + if (d) { + generic_handle_irq_desc(d); + } else { + spurious_interrupt(); + pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector); + } + } + + chained_irq_exit(chip, desc); +} + +static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + int cpu, ret; + + guard(raw_spinlock_irqsave)(&loongarch_avec.lock); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu); + if (ret < 0) + return ret; + + adata->prev_cpu = adata->cpu = cpu; + adata->prev_vec = adata->vec = ret; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); + + return 0; +} + +static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i); + struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL); + int ret; + + if (!adata) + return -ENOMEM; + + ret = avecintc_alloc_vector(irqd, adata); + if (ret < 0) { + kfree(adata); + return ret; + } + + irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller, + adata, handle_edge_irq, NULL, NULL); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; +} + +static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + guard(raw_spinlock_irqsave)(&loongarch_avec.lock); + + per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false); + +#ifdef CONFIG_SMP + if (!adata->moving) + return; + + per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); + list_del_init(&adata->entry); +#endif +} + +static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + if (d) { + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + avecintc_free_vector(d, adata); + irq_domain_reset_irq_data(d); + kfree(adata); + } + } +} + +static const struct irq_domain_ops avecintc_domain_ops = { + .alloc = avecintc_domain_alloc, + .free = avecintc_domain_free, + .select = msi_lib_irq_domain_select, +}; + +static int __init irq_matrix_init(void) +{ + loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS); + if (!loongarch_avec.vector_matrix) + return -ENOMEM; + + for (int i = 0; i < NR_LEGACY_VECTORS; i++) + irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); + + irq_matrix_online(loongarch_avec.vector_matrix); + + return 0; +} + +static int __init avecintc_init(struct irq_domain *parent) +{ + int ret, parent_irq; + unsigned long value; + + raw_spin_lock_init(&loongarch_avec.lock); + + loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); + if (!loongarch_avec.fwnode) { + pr_err("Unable to allocate domain handle\n"); + ret = -ENOMEM; + goto out; + } + + loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, + &avecintc_domain_ops, NULL); + if (!loongarch_avec.domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -ENOMEM; + goto out_free_handle; + } + + parent_irq = irq_create_mapping(parent, INT_AVEC); + if (!parent_irq) { + pr_err("Failed to mapping hwirq\n"); + ret = -EINVAL; + goto out_remove_domain; + } + + ret = irq_matrix_init(); + if (ret < 0) { + pr_err("Failed to init irq matrix\n"); + goto out_remove_domain; + } + irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL); + +#ifdef CONFIG_SMP + pending_list_init(0); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING, + "irqchip/loongarch/avecintc:starting", + avecintc_cpu_online, avecintc_cpu_offline); +#endif + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); + + return ret; + +out_remove_domain: + irq_domain_remove(loongarch_avec.domain); +out_free_handle: + irq_domain_free_fwnode(loongarch_avec.fwnode); +out: + return ret; +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + + loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; + + return pch_msi_acpi_init_avec(loongarch_avec.domain); +} + +static inline int __init acpi_cascade_irqdomain_init(void) +{ + return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); +} + +int __init avecintc_acpi_init(struct irq_domain *parent) +{ + int ret = avecintc_init(parent); + if (ret < 0) { + pr_err("Failed to init IRQ domain\n"); + return ret; + } + + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) { + pr_err("Failed to init cascade IRQ domain\n"); + return ret; + } + + return ret; +} diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index b35903a06902f7..e62dab4c97fcf5 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -13,6 +13,8 @@ #include #include +#include "irq-loongson.h" + static struct irq_domain *irq_domain; struct fwnode_handle *cpuintc_handle; @@ -140,7 +142,10 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; - return 0; + if (cpu_has_avecint) + r = avecintc_acpi_init(irq_domain); + + return r; } static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index b1f2080be2beb2..e24db71a8783cb 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + #define EIOINTC_REG_NODEMAP 0x14a0 #define EIOINTC_REG_IPMAP 0x14c0 #define EIOINTC_REG_ENABLE 0x1600 @@ -360,6 +362,9 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; + if (cpu_has_avecint) + return 0; + r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); if (r < 0) return r; @@ -396,8 +401,8 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, if (nr_pics == 1) { register_syscore_ops(&eiointc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, - "irqchip/loongarch/intc:starting", + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, + "irqchip/loongarch/eiointc:starting", eiointc_router_init, NULL); } diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c index 0bff728b25e3da..5da02c7ad0b307 100644 --- a/drivers/irqchip/irq-loongson-htvec.c +++ b/drivers/irqchip/irq-loongson-htvec.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define HTVEC_EN_OFF 0x20 #define HTVEC_MAX_PARENT_IRQ 8 diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 7c4fe7ab4b830e..2b1bd4a96665b4 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -22,6 +22,8 @@ #include #endif +#include "irq-loongson.h" + #define LIOINTC_CHIP_IRQ 32 #define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_CORES 4 diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c index 9b35492fb6be9e..2d4c3ec128b8f2 100644 --- a/drivers/irqchip/irq-loongson-pch-lpc.c +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -15,6 +15,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define LPC_INT_CTL 0x00 #define LPC_INT_ENA 0x04 diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index dd4d699170f4ec..bd337ecddb4097 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,6 +15,9 @@ #include #include +#include "irq-msi-lib.h" +#include "irq-loongson.h" + static int nr_pics; struct pch_msi_data { @@ -27,26 +30,6 @@ struct pch_msi_data { static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS]; -static void pch_msi_mask_msi_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void pch_msi_unmask_msi_irq(struct irq_data *d) -{ - irq_chip_unmask_parent(d); - pci_msi_unmask_irq(d); -} - -static struct irq_chip pch_msi_irq_chip = { - .name = "PCH PCI MSI", - .irq_mask = pch_msi_mask_msi_irq, - .irq_unmask = pch_msi_unmask_msi_irq, - .irq_ack = irq_chip_ack_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, -}; - static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req) { int first; @@ -85,12 +68,6 @@ static void pch_msi_compose_msi_msg(struct irq_data *data, msg->data = data->hwirq; } -static struct msi_domain_info pch_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, - .chip = &pch_msi_irq_chip, -}; - static struct irq_chip middle_irq_chip = { .name = "PCH MSI", .irq_mask = irq_chip_mask_parent, @@ -155,13 +132,31 @@ static void pch_msi_middle_domain_free(struct irq_domain *domain, static const struct irq_domain_ops pch_msi_middle_domain_ops = { .alloc = pch_msi_middle_domain_alloc, .free = pch_msi_middle_domain_free, + .select = msi_lib_irq_domain_select, +}; + +#define PCH_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define PCH_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static struct msi_parent_ops pch_msi_parent_ops = { + .required_flags = PCH_MSI_FLAGS_REQUIRED, + .supported_flags = PCH_MSI_FLAGS_SUPPORTED, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "PCH-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int pch_msi_init_domains(struct pch_msi_data *priv, struct irq_domain *parent, struct fwnode_handle *domain_handle) { - struct irq_domain *middle_domain, *msi_domain; + struct irq_domain *middle_domain; middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs, domain_handle, @@ -174,14 +169,8 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); - msi_domain = pci_msi_create_irq_domain(domain_handle, - &pch_msi_domain_info, - middle_domain); - if (!msi_domain) { - pr_err("Failed to create PCI MSI domain\n"); - irq_domain_remove(middle_domain); - return -ENOMEM; - } + middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + middle_domain->msi_parent_ops = &pch_msi_parent_ops; return 0; } @@ -266,17 +255,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); #ifdef CONFIG_ACPI struct fwnode_handle *get_pch_msi_handle(int pci_segment) { - int i; + if (cpu_has_avecint) + return pch_msi_handle[0]; - for (i = 0; i < MAX_IO_PICS; i++) { + for (int i = 0; i < MAX_IO_PICS; i++) { if (msi_group[i].pci_segment == pci_segment) return pch_msi_handle[i]; } - return NULL; + return pch_msi_handle[0]; } -int __init pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi) +int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi) { int ret; struct fwnode_handle *domain_handle; @@ -289,4 +278,18 @@ int __init pch_msi_acpi_init(struct irq_domain *parent, return ret; } + +int __init pch_msi_acpi_init_avec(struct irq_domain *parent) +{ + if (pch_msi_handle[0]) + return 0; + + pch_msi_handle[0] = parent->fwnode; + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + parent->msi_parent_ops = &pch_msi_parent_ops; + + return 0; +} #endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index cbaef65e804cec..69efda35a8e785 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define PCH_PIC_MASK 0x20 #define PCH_PIC_HTMSI_EN 0x40 diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h new file mode 100644 index 00000000000000..11fa138d1f4434 --- /dev/null +++ b/drivers/irqchip/irq-loongson.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H +#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H + +int find_pch_pic(u32 gsi); + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +int avecintc_acpi_init(struct irq_domain *parent); + +int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_msi_acpi_init_avec(struct irq_domain *parent); + +#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */ diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 53cc08387588f1..6f69f4e5dbacda 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -234,37 +234,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev, struct mbigen_device *mgn_chip) { struct platform_device *child; - struct device_node *np; u32 num_pins; - int ret = 0; - for_each_child_of_node(pdev->dev.of_node, np) { + for_each_child_of_node_scoped(pdev->dev.of_node, np) { if (!of_property_read_bool(np, "interrupt-controller")) continue; child = of_platform_device_create(np, NULL, NULL); - if (!child) { - ret = -ENOMEM; - break; - } + if (!child) + return -ENOMEM; if (of_property_read_u32(child->dev.of_node, "num-pins", &num_pins) < 0) { dev_err(&pdev->dev, "No num-pins property\n"); - ret = -EINVAL; - break; + return -EINVAL; } - if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) { - ret = -ENOMEM; - break; - } + if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) + return -ENOMEM; } - if (ret) - of_node_put(np); - - return ret; + return 0; } #ifdef CONFIG_ACPI diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index dc82162ba76314..ad84a2f03368aa 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -325,8 +325,7 @@ static int __init omap_init_irq(u32 base, struct device_node *node) return ret; } -static asmlinkage void __exception_irq_entry -omap_intc_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs) { extern unsigned long irq_err_count; u32 irqnr; diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c index 4a3ffe856d6c30..7cd6b646774b9a 100644 --- a/drivers/irqchip/irq-riscv-aplic-direct.c +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -4,6 +4,7 @@ * Copyright (C) 2022 Ventana Micro Systems Inc. */ +#include #include #include #include @@ -189,17 +190,22 @@ static int aplic_direct_starting_cpu(unsigned int cpu) } static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, - u32 *parent_hwirq, unsigned long *parent_hartid) + u32 *parent_hwirq, unsigned long *parent_hartid, + struct aplic_priv *priv) { struct of_phandle_args parent; + unsigned long hartid; int rc; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!is_of_node(dev->fwnode)) - return -EINVAL; + if (!is_of_node(dev->fwnode)) { + hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index); + if (hartid == INVALID_HARTID) + return -ENODEV; + + *parent_hartid = hartid; + *parent_hwirq = RV_IRQ_EXT; + return 0; + } rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); if (rc) @@ -237,7 +243,7 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs) /* Setup per-CPU IDC and target CPU mask */ current_cpu = get_cpu(); for (i = 0; i < priv->nr_idcs; i++) { - rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); + rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv); if (rc) { dev_warn(dev, "parent irq for IDC%d not found\n", i); continue; diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c index 981fad6fb8f71f..900e72541db9e5 100644 --- a/drivers/irqchip/irq-riscv-aplic-main.c +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -4,8 +4,10 @@ * Copyright (C) 2022 Ventana Micro Systems Inc. */ +#include #include #include +#include #include #include #include @@ -125,39 +127,50 @@ static void aplic_init_hw_irqs(struct aplic_priv *priv) writel(0, priv->regs + APLIC_DOMAINCFG); } +#ifdef CONFIG_ACPI +static const struct acpi_device_id aplic_acpi_match[] = { + { "RSCV0002", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, aplic_acpi_match); + +#endif + int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) { struct device_node *np = to_of_node(dev->fwnode); struct of_phandle_args parent; int rc; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!np) - return -EINVAL; - /* Save device pointer and register base */ priv->dev = dev; priv->regs = regs; - /* Find out number of interrupt sources */ - rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); - if (rc) { - dev_err(dev, "failed to get number of interrupt sources\n"); - return rc; - } - - /* - * Find out number of IDCs based on parent interrupts - * - * If "msi-parent" property is present then we ignore the - * APLIC IDCs which forces the APLIC driver to use MSI mode. - */ - if (!of_property_present(np, "msi-parent")) { - while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) - priv->nr_idcs++; + if (np) { + /* Find out number of interrupt sources */ + rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); + if (rc) { + dev_err(dev, "failed to get number of interrupt sources\n"); + return rc; + } + + /* + * Find out number of IDCs based on parent interrupts + * + * If "msi-parent" property is present then we ignore the + * APLIC IDCs which forces the APLIC driver to use MSI mode. + */ + if (!of_property_present(np, "msi-parent")) { + while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) + priv->nr_idcs++; + } + } else { + rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id, + &priv->nr_irqs, &priv->nr_idcs); + if (rc) { + dev_err(dev, "failed to find GSI mapping\n"); + return rc; + } } /* Setup initial state APLIC interrupts */ @@ -184,7 +197,11 @@ static int aplic_probe(struct platform_device *pdev) * If msi-parent property is present then setup APLIC MSI * mode otherwise setup APLIC direct mode. */ - msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); + if (is_of_node(dev->fwnode)) + msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); + else + msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0; + if (msi_mode) rc = aplic_msi_setup(dev, regs); else @@ -192,6 +209,11 @@ static int aplic_probe(struct platform_device *pdev) if (rc) dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct"); +#ifdef CONFIG_ACPI + if (!acpi_disabled) + acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); +#endif + return rc; } @@ -204,6 +226,7 @@ static struct platform_driver aplic_driver = { .driver = { .name = "riscv-aplic", .of_match_table = aplic_match, + .acpi_match_table = ACPI_PTR(aplic_acpi_match), }, .probe = aplic_probe, }; diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h index 4393927d8c8070..b0ad8cde69b131 100644 --- a/drivers/irqchip/irq-riscv-aplic-main.h +++ b/drivers/irqchip/irq-riscv-aplic-main.h @@ -28,6 +28,7 @@ struct aplic_priv { u32 gsi_base; u32 nr_irqs; u32 nr_idcs; + u32 acpi_aplic_id; void __iomem *regs; struct aplic_msicfg msicfg; }; diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c index d7773f76e5d0a5..945bff28265cdc 100644 --- a/drivers/irqchip/irq-riscv-aplic-msi.c +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -175,6 +175,7 @@ static const struct msi_domain_template aplic_msi_template = { int aplic_msi_setup(struct device *dev, void __iomem *regs) { const struct imsic_global_config *imsic_global; + struct irq_domain *msi_domain; struct aplic_priv *priv; struct aplic_msicfg *mc; phys_addr_t pa; @@ -257,8 +258,14 @@ int aplic_msi_setup(struct device *dev, void __iomem *regs) * IMSIC and the IMSIC MSI domains are created later through * the platform driver probing so we set it explicitly here. */ - if (is_of_node(dev->fwnode)) + if (is_of_node(dev->fwnode)) { of_msi_configure(dev, to_of_node(dev->fwnode)); + } else { + msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), + DOMAIN_BUS_PLATFORM_MSI); + if (msi_domain) + dev_set_msi_domain(dev, msi_domain); + } } if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template, diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c index 4fbb37074d29b9..c5c2e6929a2f5a 100644 --- a/drivers/irqchip/irq-riscv-imsic-early.c +++ b/drivers/irqchip/irq-riscv-imsic-early.c @@ -5,13 +5,16 @@ */ #define pr_fmt(fmt) "riscv-imsic: " fmt +#include #include #include #include #include #include #include +#include #include +#include #include #include @@ -182,7 +185,7 @@ static int __init imsic_early_dt_init(struct device_node *node, struct device_no int rc; /* Setup IMSIC state */ - rc = imsic_setup_state(fwnode); + rc = imsic_setup_state(fwnode, NULL); if (rc) { pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); return rc; @@ -199,3 +202,62 @@ static int __init imsic_early_dt_init(struct device_node *node, struct device_no } IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); + +#ifdef CONFIG_ACPI + +static struct fwnode_handle *imsic_acpi_fwnode; + +struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) +{ + return imsic_acpi_fwnode; +} + +static int __init imsic_early_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header; + int rc; + + imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic"); + if (!imsic_acpi_fwnode) { + pr_err("unable to allocate IMSIC FW node\n"); + return -ENOMEM; + } + + /* Setup IMSIC state */ + rc = imsic_setup_state(imsic_acpi_fwnode, imsic); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc); + return rc; + } + + /* Do early setup of IMSIC state and IPIs */ + rc = imsic_early_probe(imsic_acpi_fwnode); + if (rc) { + irq_domain_free_fwnode(imsic_acpi_fwnode); + imsic_acpi_fwnode = NULL; + return rc; + } + + rc = imsic_platform_acpi_probe(imsic_acpi_fwnode); + +#ifdef CONFIG_PCI + if (!rc) + pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode); +#endif + + if (rc) + pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n", + imsic_acpi_fwnode, rc); + + /* + * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can + * continue to work. So, no need to return failure. This is similar to + * DT where IPI works but MSI probe fails for some reason. + */ + return 0; +} + +IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL, + 1, imsic_early_acpi_init); +#endif diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index 11723a763c102a..64905e6f52d789 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) "riscv-imsic: " fmt +#include #include #include #include @@ -348,18 +349,37 @@ int imsic_irqdomain_init(void) return 0; } -static int imsic_platform_probe(struct platform_device *pdev) +static int imsic_platform_probe_common(struct fwnode_handle *fwnode) { - struct device *dev = &pdev->dev; - - if (imsic && imsic->fwnode != dev->fwnode) { - dev_err(dev, "fwnode mismatch\n"); + if (imsic && imsic->fwnode != fwnode) { + pr_err("%pfwP: fwnode mismatch\n", fwnode); return -ENODEV; } return imsic_irqdomain_init(); } +static int imsic_platform_dt_probe(struct platform_device *pdev) +{ + return imsic_platform_probe_common(pdev->dev.fwnode); +} + +#ifdef CONFIG_ACPI + +/* + * On ACPI based systems, PCI enumeration happens early during boot in + * acpi_scan_init(). PCI enumeration expects MSI domain setup before + * it calls pci_set_msi_domain(). Hence, unlike in DT where + * imsic-platform drive probe happens late during boot, ACPI based + * systems need to setup the MSI domain early. + */ +int imsic_platform_acpi_probe(struct fwnode_handle *fwnode) +{ + return imsic_platform_probe_common(fwnode); +} + +#endif + static const struct of_device_id imsic_platform_match[] = { { .compatible = "riscv,imsics" }, {} @@ -370,6 +390,6 @@ static struct platform_driver imsic_platform_driver = { .name = "riscv-imsic", .of_match_table = imsic_platform_match, }, - .probe = imsic_platform_probe, + .probe = imsic_platform_dt_probe, }; builtin_platform_driver(imsic_platform_driver); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c index 5479f872e62b42..b97e6cd89ed742 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.c +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) "riscv-imsic: " fmt +#include #include #include #include @@ -510,18 +511,90 @@ static int __init imsic_matrix_init(void) return 0; } +static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs) +{ + int rc; + + /* Find number of guest index bits in MSI address */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", + &global->guest_index_bits); + if (rc) + global->guest_index_bits = 0; + + /* Find number of HART index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", + &global->hart_index_bits); + if (rc) { + /* Assume default value */ + global->hart_index_bits = __fls(*nr_parent_irqs); + if (BIT(global->hart_index_bits) < *nr_parent_irqs) + global->hart_index_bits++; + } + + /* Find number of group index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", + &global->group_index_bits); + if (rc) + global->group_index_bits = 0; + + /* + * Find first bit position of group index. + * If not specified assumed the default APLIC-IMSIC configuration. + */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", + &global->group_index_shift); + if (rc) + global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; + + /* Find number of interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", + &global->nr_ids); + if (rc) { + pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; + } + + /* Find number of guest interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", + &global->nr_guest_ids); + if (rc) + global->nr_guest_ids = global->nr_ids; + + return 0; +} + +static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, void *opaque) +{ + struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque; + + global->guest_index_bits = imsic->guest_index_bits; + global->hart_index_bits = imsic->hart_index_bits; + global->group_index_bits = imsic->group_index_bits; + global->group_index_shift = imsic->group_index_shift; + global->nr_ids = imsic->num_ids; + global->nr_guest_ids = imsic->num_guest_ids; + return 0; +} + static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, u32 index, unsigned long *hartid) { struct of_phandle_args parent; int rc; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!is_of_node(fwnode)) - return -EINVAL; + if (!is_of_node(fwnode)) { + if (hartid) + *hartid = acpi_rintc_index_to_hartid(index); + + if (!hartid || (*hartid == INVALID_HARTID)) + return -EINVAL; + + return 0; + } rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); if (rc) @@ -540,12 +613,8 @@ static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, u32 index, struct resource *res) { - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ if (!is_of_node(fwnode)) - return -EINVAL; + return acpi_rintc_get_imsic_mmio_info(index, res); return of_address_to_resource(to_of_node(fwnode), index, res); } @@ -553,20 +622,14 @@ static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, struct imsic_global_config *global, u32 *nr_parent_irqs, - u32 *nr_mmios) + u32 *nr_mmios, + void *opaque) { unsigned long hartid; struct resource res; int rc; u32 i; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!is_of_node(fwnode)) - return -EINVAL; - *nr_parent_irqs = 0; *nr_mmios = 0; @@ -578,50 +641,13 @@ static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, return -EINVAL; } - /* Find number of guest index bits in MSI address */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", - &global->guest_index_bits); - if (rc) - global->guest_index_bits = 0; - - /* Find number of HART index bits */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", - &global->hart_index_bits); - if (rc) { - /* Assume default value */ - global->hart_index_bits = __fls(*nr_parent_irqs); - if (BIT(global->hart_index_bits) < *nr_parent_irqs) - global->hart_index_bits++; - } - - /* Find number of group index bits */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", - &global->group_index_bits); - if (rc) - global->group_index_bits = 0; + if (is_of_node(fwnode)) + rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs); + else + rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque); - /* - * Find first bit position of group index. - * If not specified assumed the default APLIC-IMSIC configuration. - */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", - &global->group_index_shift); if (rc) - global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; - - /* Find number of interrupt identities */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", - &global->nr_ids); - if (rc) { - pr_err("%pfwP: number of interrupt identities not found\n", fwnode); return rc; - } - - /* Find number of guest interrupt identities */ - rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", - &global->nr_guest_ids); - if (rc) - global->nr_guest_ids = global->nr_ids; /* Sanity check guest index bits */ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; @@ -688,7 +714,7 @@ static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, return 0; } -int __init imsic_setup_state(struct fwnode_handle *fwnode) +int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque) { u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; struct imsic_global_config *global; @@ -729,7 +755,7 @@ int __init imsic_setup_state(struct fwnode_handle *fwnode) } /* Parse IMSIC fwnode */ - rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios); + rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque); if (rc) goto out_free_local; diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h index 5ae2f69b035b6e..391e4428082757 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.h +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -102,7 +102,7 @@ void imsic_vector_debug_show_summary(struct seq_file *m, int ind); void imsic_state_online(void); void imsic_state_offline(void); -int imsic_setup_state(struct fwnode_handle *fwnode); +int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque); int imsic_irqdomain_init(void); #endif diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 47f3200476da5e..8c54113862205b 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -250,6 +250,85 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); #ifdef CONFIG_ACPI +struct rintc_data { + union { + u32 ext_intc_id; + struct { + u32 context_id : 16, + reserved : 8, + aplic_plic_id : 8; + }; + }; + unsigned long hart_id; + u64 imsic_addr; + u32 imsic_size; +}; + +static u32 nr_rintc; +static struct rintc_data *rintc_acpi_data[NR_CPUS]; + +#define for_each_matching_plic(_plic_id) \ + unsigned int _plic; \ + \ + for (_plic = 0; _plic < nr_rintc; _plic++) \ + if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \ + continue; \ + else + +unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) +{ + unsigned int nctx = 0; + + for_each_matching_plic(plic_id) + nctx++; + + return nctx; +} + +static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + unsigned int ctxt = 0; + + for_each_matching_plic(plic_id) { + if (ctxt == ctxt_idx) + return rintc_acpi_data[_plic]; + + ctxt++; + } + + return NULL; +} + +unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx) +{ + struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); + + return data ? data->hart_id : INVALID_HARTID; +} + +unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) +{ + struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); + + return data ? data->context_id : INVALID_CONTEXT; +} + +unsigned long acpi_rintc_index_to_hartid(u32 index) +{ + return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id; +} + +int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) +{ + if (index >= nr_rintc) + return -1; + + res->start = rintc_acpi_data[index]->imsic_addr; + res->end = res->start + rintc_acpi_data[index]->imsic_size - 1; + res->flags = IORESOURCE_MEM; + return 0; +} + static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { @@ -258,6 +337,15 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, int rc; rintc = (struct acpi_madt_rintc *)header; + rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL); + if (!rintc_acpi_data[nr_rintc]) + return -ENOMEM; + + rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id; + rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id; + rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr; + rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size; + nr_rintc++; /* * The ACPI MADT will have one INTC for each CPU (or HART) @@ -277,6 +365,8 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, rc = riscv_intc_init_common(fn, &riscv_intc_chip); if (rc) irq_domain_free_fwnode(fn); + else + acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id); return rc; } diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c index 31c202a1ae626b..9d0b80271949d3 100644 --- a/drivers/irqchip/irq-sa11x0.c +++ b/drivers/irqchip/irq-sa11x0.c @@ -127,8 +127,7 @@ static int __init sa1100irq_init_devicefs(void) device_initcall(sa1100irq_init_devicefs); -static asmlinkage void __exception_irq_entry -sa1100_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry sa1100_handle_irq(struct pt_regs *regs) { uint32_t icip, icmr, mask; diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 4d9ea718086d30..2f6ef5c495bdaa 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -4,6 +4,7 @@ * Copyright (C) 2018 Christoph Hellwig */ #define pr_fmt(fmt) "riscv-plic: " fmt +#include #include #include #include @@ -71,6 +72,8 @@ struct plic_priv { unsigned long plic_quirks; unsigned int nr_irqs; unsigned long *prio_save; + u32 gsi_base; + int acpi_plic_id; }; struct plic_handler { @@ -325,6 +328,10 @@ static int plic_irq_domain_translate(struct irq_domain *d, { struct plic_priv *priv = d->host_data; + /* For DT, gsi_base is always zero. */ + if (fwspec->param[0] >= priv->gsi_base) + fwspec->param[0] = fwspec->param[0] - priv->gsi_base; + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) return irq_domain_translate_twocell(d, fwspec, hwirq, type); @@ -426,17 +433,36 @@ static const struct of_device_id plic_match[] = { {} }; +#ifdef CONFIG_ACPI + +static const struct acpi_device_id plic_acpi_match[] = { + { "RSCV0001", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, plic_acpi_match); + +#endif static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, - u32 *nr_irqs, u32 *nr_contexts) + u32 *nr_irqs, u32 *nr_contexts, + u32 *gsi_base, u32 *id) { int rc; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!is_of_node(fwnode)) - return -EINVAL; + if (!is_of_node(fwnode)) { + rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); + if (rc) { + pr_err("%pfwP: failed to find GSI mapping\n", fwnode); + return rc; + } + + *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); + if (WARN_ON(!*nr_contexts)) { + pr_err("%pfwP: no PLIC context available\n", fwnode); + return -EINVAL; + } + + return 0; + } rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); if (rc) { @@ -450,22 +476,28 @@ static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, return -EINVAL; } + *gsi_base = 0; + *id = 0; + return 0; } static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, - u32 *parent_hwirq, int *parent_cpu) + u32 *parent_hwirq, int *parent_cpu, u32 id) { struct of_phandle_args parent; unsigned long hartid; int rc; - /* - * Currently, only OF fwnode is supported so extend this - * function for ACPI support. - */ - if (!is_of_node(fwnode)) - return -EINVAL; + if (!is_of_node(fwnode)) { + hartid = acpi_rintc_ext_parent_to_hartid(id, context); + if (hartid == INVALID_HARTID) + return -EINVAL; + + *parent_cpu = riscv_hartid_to_cpuid(hartid); + *parent_hwirq = RV_IRQ_EXT; + return 0; + } rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); if (rc) @@ -489,6 +521,8 @@ static int plic_probe(struct fwnode_handle *fwnode) struct plic_priv *priv; irq_hw_number_t hwirq; void __iomem *regs; + int id, context_id; + u32 gsi_base; if (is_of_node(fwnode)) { const struct of_device_id *id; @@ -501,10 +535,12 @@ static int plic_probe(struct fwnode_handle *fwnode) if (!regs) return -ENOMEM; } else { - return -ENODEV; + regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); } - error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts); + error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); if (error) goto fail_free_regs; @@ -518,6 +554,8 @@ static int plic_probe(struct fwnode_handle *fwnode) priv->plic_quirks = plic_quirks; priv->nr_irqs = nr_irqs; priv->regs = regs; + priv->gsi_base = gsi_base; + priv->acpi_plic_id = id; priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); if (!priv->prio_save) { @@ -526,12 +564,23 @@ static int plic_probe(struct fwnode_handle *fwnode) } for (i = 0; i < nr_contexts; i++) { - error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu); + error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, + priv->acpi_plic_id); if (error) { pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); continue; } + if (is_of_node(fwnode)) { + context_id = i; + } else { + context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); + if (context_id == INVALID_CONTEXT) { + pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); + continue; + } + } + /* * Skip contexts other than external interrupts for our * privilege level. @@ -569,10 +618,10 @@ static int plic_probe(struct fwnode_handle *fwnode) cpumask_set_cpu(cpu, &priv->lmask); handler->present = true; handler->hart_base = priv->regs + CONTEXT_BASE + - i * CONTEXT_SIZE; + context_id * CONTEXT_SIZE; raw_spin_lock_init(&handler->enable_lock); handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + - i * CONTEXT_ENABLE_SIZE; + context_id * CONTEXT_ENABLE_SIZE; handler->priv = priv; handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), @@ -588,8 +637,8 @@ static int plic_probe(struct fwnode_handle *fwnode) nr_handlers++; } - priv->irqdomain = irq_domain_add_linear(to_of_node(fwnode), nr_irqs + 1, - &plic_irqdomain_ops, priv); + priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, + &plic_irqdomain_ops, priv); if (WARN_ON(!priv->irqdomain)) goto fail_cleanup_contexts; @@ -626,13 +675,18 @@ static int plic_probe(struct fwnode_handle *fwnode) } } +#ifdef CONFIG_ACPI + if (!acpi_disabled) + acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); +#endif + pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", fwnode, nr_irqs, nr_handlers, nr_contexts); return 0; fail_cleanup_contexts: for (i = 0; i < nr_contexts; i++) { - if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu)) + if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) continue; if (parent_hwirq != RV_IRQ_EXT || cpu < 0) continue; @@ -663,6 +717,7 @@ static struct platform_driver plic_driver = { .name = "riscv-plic", .of_match_table = plic_match, .suppress_bind_attrs = true, + .acpi_match_table = ACPI_PTR(plic_acpi_match), }, .probe = plic_platform_probe, }; diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 5018a06060e65c..ca471c6fee998b 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -128,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) * Keep iterating over all registered FPGA IRQ controllers until there are * no pending interrupts. */ -static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) { int i, handled; diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 3ed2573345621e..70dee9ad4bae27 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -1024,7 +1024,6 @@ static int capi_release(struct inode *inode, struct file *file) static const struct file_operations capi_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = capi_read, .write = capi_write, .poll = capi_poll, diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index 509b362d6465d3..044e4961376c6f 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "ipac.h" diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 61cb45c5d0d840..53fad9487574aa 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -82,7 +82,7 @@ * - has multiple clocks. * - has no usable clock due to jitter or packet loss (VoIP). * In this case the system's clock is used. The clock resolution depends on - * the jiffie resolution. + * the jiffy resolution. * * If a member joins a conference: * diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 83d6b484d3c6c2..7cfa8c61dba0a6 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -266,7 +266,6 @@ static const struct file_operations mISDN_fops = { .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, - .llseek = no_llseek, }; static struct miscdevice mISDNtimer = { diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 8d9d8da376e462..b784bb74a8378a 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -825,6 +825,14 @@ config LEDS_BLINKM This option enables support for the BlinkM RGB LED connected through I2C. Say Y to enable support for the BlinkM LED. +config LEDS_BLINKM_MULTICOLOR + bool "Enable multicolor support for BlinkM I2C RGB LED" + depends on LEDS_BLINKM + depends on LEDS_CLASS_MULTICOLOR=y || LEDS_CLASS_MULTICOLOR=LEDS_BLINKM + help + This option enables multicolor sysfs class support for BlinkM LED and + disables the older, separated sysfs interface + config LEDS_POWERNV tristate "LED support for PowerNV Platform" depends on LEDS_CLASS diff --git a/drivers/leds/flash/leds-aat1290.c b/drivers/leds/flash/leds-aat1290.c index e8f9dd29359262..c7b6a1f0128828 100644 --- a/drivers/leds/flash/leds-aat1290.c +++ b/drivers/leds/flash/leds-aat1290.c @@ -7,6 +7,7 @@ * Author: Jacek Anaszewski */ +#include #include #include #include @@ -215,7 +216,6 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, struct device_node **sub_node) { struct device *dev = &led->pdev->dev; - struct device_node *child_node; #if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) struct pinctrl *pinctrl; #endif @@ -246,7 +246,8 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, } #endif - child_node = of_get_next_available_child(dev_of_node(dev), NULL); + struct device_node *child_node __free(device_node) = + of_get_next_available_child(dev_of_node(dev), NULL); if (!child_node) { dev_err(dev, "No DT child node found for connected LED.\n"); return -EINVAL; @@ -267,7 +268,7 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, if (ret < 0) { dev_err(dev, "flash-max-microamp DT property missing\n"); - goto err_parse_dt; + return ret; } ret = of_property_read_u32(child_node, "flash-max-timeout-us", @@ -275,15 +276,12 @@ static int aat1290_led_parse_dt(struct aat1290_led *led, if (ret < 0) { dev_err(dev, "flash-max-timeout-us DT property missing\n"); - goto err_parse_dt; + return ret; } *sub_node = child_node; -err_parse_dt: - of_node_put(child_node); - - return ret; + return 0; } static void aat1290_led_validate_mm_current(struct aat1290_led *led, diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c index 2c6ef321b7c803..2f2d783c62c318 100644 --- a/drivers/leds/flash/leds-as3645a.c +++ b/drivers/leds/flash/leds-as3645a.c @@ -478,14 +478,12 @@ static int as3645a_detect(struct as3645a *flash) return as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE); } -static int as3645a_parse_node(struct as3645a *flash, - struct fwnode_handle *fwnode) +static int as3645a_parse_node(struct device *dev, struct as3645a *flash) { struct as3645a_config *cfg = &flash->cfg; - struct fwnode_handle *child; int rval; - fwnode_for_each_child_node(fwnode, child) { + device_for_each_child_node_scoped(dev, child) { u32 id = 0; fwnode_property_read_u32(child, "reg", &id); @@ -686,7 +684,7 @@ static int as3645a_probe(struct i2c_client *client) flash->client = client; - rval = as3645a_parse_node(flash, dev_fwnode(&client->dev)); + rval = as3645a_parse_node(&client->dev, flash); if (rval < 0) return rval; diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c index 7bb0aa2753e365..16a01a200c0b75 100644 --- a/drivers/leds/flash/leds-ktd2692.c +++ b/drivers/leds/flash/leds-ktd2692.c @@ -6,6 +6,7 @@ * Ingi Kim */ +#include #include #include #include @@ -208,7 +209,6 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, struct ktd2692_led_config_data *cfg) { struct device_node *np = dev_of_node(dev); - struct device_node *child_node; int ret; if (!np) @@ -239,7 +239,8 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, } } - child_node = of_get_next_available_child(np, NULL); + struct device_node *child_node __free(device_node) = + of_get_next_available_child(np, NULL); if (!child_node) { dev_err(dev, "No DT child node found for connected LED.\n"); return -EINVAL; @@ -252,26 +253,24 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, &cfg->movie_max_microamp); if (ret) { dev_err(dev, "failed to parse led-max-microamp\n"); - goto err_parse_dt; + return ret; } ret = of_property_read_u32(child_node, "flash-max-microamp", &cfg->flash_max_microamp); if (ret) { dev_err(dev, "failed to parse flash-max-microamp\n"); - goto err_parse_dt; + return ret; } ret = of_property_read_u32(child_node, "flash-max-timeout-us", &cfg->flash_max_timeout); if (ret) { dev_err(dev, "failed to parse flash-max-timeout-us\n"); - goto err_parse_dt; + return ret; } -err_parse_dt: - of_node_put(child_node); - return ret; + return 0; } static const struct led_flash_ops flash_ops = { diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c index 7e93c447fec5c1..abf6b96ade3d58 100644 --- a/drivers/leds/flash/leds-lm3601x.c +++ b/drivers/leds/flash/leds-lm3601x.c @@ -190,7 +190,7 @@ static int lm3601x_brightness_set(struct led_classdev *cdev, goto out; } - ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness); + ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness - 1); if (ret < 0) goto out; @@ -341,8 +341,9 @@ static int lm3601x_register_leds(struct lm3601x_led *led, led_cdev = &led->fled_cdev.led_cdev; led_cdev->brightness_set_blocking = lm3601x_brightness_set; - led_cdev->max_brightness = DIV_ROUND_UP(led->torch_current_max, - LM3601X_TORCH_REG_DIV); + led_cdev->max_brightness = + DIV_ROUND_UP(led->torch_current_max - LM3601X_MIN_TORCH_I_UA + 1, + LM3601X_TORCH_REG_DIV); led_cdev->flags |= LED_DEV_CAP_FLASH; init_data.fwnode = fwnode; @@ -386,6 +387,14 @@ static int lm3601x_parse_node(struct lm3601x_led *led, goto out_err; } + if (led->torch_current_max > LM3601X_MAX_TORCH_I_UA) { + dev_warn(&led->client->dev, + "Max torch current set too high (%d vs %d)\n", + led->torch_current_max, + LM3601X_MAX_TORCH_I_UA); + led->torch_current_max = LM3601X_MAX_TORCH_I_UA; + } + ret = fwnode_property_read_u32(child, "flash-max-microamp", &led->flash_current_max); if (ret) { @@ -434,6 +443,10 @@ static int lm3601x_probe(struct i2c_client *client) return ret; } + ret = regmap_write(led->regmap, LM3601X_DEV_ID_REG, LM3601X_SW_RESET); + if (ret) + dev_warn(&client->dev, "Failed to reset the LED controller\n"); + mutex_init(&led->lock); return lm3601x_register_leds(led, fwnode); diff --git a/drivers/leds/flash/leds-max77693.c b/drivers/leds/flash/leds-max77693.c index 9f016b851193c2..90d78b3d22f882 100644 --- a/drivers/leds/flash/leds-max77693.c +++ b/drivers/leds/flash/leds-max77693.c @@ -599,7 +599,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, { struct device *dev = &led->pdev->dev; struct max77693_sub_led *sub_leds = led->sub_leds; - struct device_node *node = dev_of_node(dev), *child_node; + struct device_node *node = dev_of_node(dev); struct property *prop; u32 led_sources[2]; int i, ret, fled_id; @@ -608,7 +608,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout); of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys); - for_each_available_child_of_node(node, child_node) { + for_each_available_child_of_node_scoped(node, child_node) { prop = of_find_property(child_node, "led-sources", NULL); if (prop) { const __be32 *srcs = NULL; @@ -622,7 +622,6 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, } else { dev_err(dev, "led-sources DT property missing\n"); - of_node_put(child_node); return -EINVAL; } @@ -638,18 +637,16 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, } else { dev_err(dev, "Wrong led-sources DT property value.\n"); - of_node_put(child_node); return -EINVAL; } if (sub_nodes[fled_id]) { dev_err(dev, "Conflicting \"led-sources\" DT properties\n"); - of_node_put(child_node); return -EINVAL; } - sub_nodes[fled_id] = child_node; + sub_nodes[fled_id] = of_node_get(child_node); sub_leds[fled_id].fled_id = fled_id; cfg->label[fled_id] = @@ -681,10 +678,8 @@ static int max77693_led_parse_dt(struct max77693_led_device *led, if (++cfg->num_leds == 2 || (max77693_fled_used(led, FLED1) && - max77693_fled_used(led, FLED2))) { - of_node_put(child_node); + max77693_fled_used(led, FLED2))) break; - } } if (cfg->num_leds == 0) { @@ -968,7 +963,7 @@ static int max77693_led_probe(struct platform_device *pdev) ret = max77693_setup(led, &led_cfg); if (ret < 0) - return ret; + goto err_setup; mutex_init(&led->lock); @@ -1000,6 +995,8 @@ static int max77693_led_probe(struct platform_device *pdev) else goto err_register_led1; } + of_node_put(sub_nodes[i]); + sub_nodes[i] = NULL; } return 0; @@ -1013,6 +1010,9 @@ static int max77693_led_probe(struct platform_device *pdev) err_register_led1: mutex_destroy(&led->lock); +err_setup: + for (i = FLED1; i <= FLED2; i++) + of_node_put(sub_nodes[i]); return ret; } diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index bf70bf6fb0d593..41ce034f700ee5 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -14,6 +14,9 @@ #include /* registers definitions */ +#define FLASH_REVISION_REG 0x00 +#define FLASH_4CH_REVISION_V0P1 0x01 + #define FLASH_TYPE_REG 0x04 #define FLASH_TYPE_VAL 0x18 @@ -73,6 +76,16 @@ #define UA_PER_MA 1000 +/* thermal threshold constants */ +#define OTST_3CH_MIN_VAL 3 +#define OTST1_4CH_MIN_VAL 0 +#define OTST1_4CH_V0P1_MIN_VAL 3 +#define OTST2_4CH_MIN_VAL 0 + +#define OTST1_MAX_CURRENT_MA 1000 +#define OTST2_MAX_CURRENT_MA 500 +#define OTST3_MAX_CURRENT_MA 200 + enum hw_type { QCOM_MVFLASH_3CH, QCOM_MVFLASH_4CH, @@ -98,6 +111,9 @@ enum { REG_IRESOLUTION, REG_CHAN_STROBE, REG_CHAN_EN, + REG_THERM_THRSH1, + REG_THERM_THRSH2, + REG_THERM_THRSH3, REG_MAX_COUNT, }; @@ -111,6 +127,9 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x47, 0, 5), /* iresolution */ REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */ REG_FIELD(0x4c, 0, 2), /* chan_en */ + REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */ + REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */ + REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */ }; static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { @@ -123,6 +142,8 @@ static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = { REG_FIELD(0x49, 0, 3), /* iresolution */ REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */ REG_FIELD(0x4e, 0, 3), /* chan_en */ + REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */ + REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */ }; struct qcom_flash_data { @@ -130,9 +151,11 @@ struct qcom_flash_data { struct regmap_field *r_fields[REG_MAX_COUNT]; struct mutex lock; enum hw_type hw_type; + u32 total_ma; u8 leds_count; u8 max_channels; u8 chan_en_bits; + u8 revision; }; struct qcom_flash_led { @@ -143,6 +166,7 @@ struct qcom_flash_led { u32 max_timeout_ms; u32 flash_current_ma; u32 flash_timeout_ms; + u32 current_in_use_ma; u8 *chan_id; u8 chan_count; bool enabled; @@ -172,6 +196,127 @@ static int set_flash_module_en(struct qcom_flash_led *led, bool en) return rc; } +static int update_allowed_flash_current(struct qcom_flash_led *led, u32 *current_ma, bool strobe) +{ + struct qcom_flash_data *flash_data = led->flash_data; + u32 therm_ma, avail_ma, thrsh[3], min_thrsh, sts; + int rc = 0; + + mutex_lock(&flash_data->lock); + /* + * Put previously allocated current into allowed budget in either of these two cases: + * 1) LED is disabled; + * 2) LED is enabled repeatedly + */ + if (!strobe || led->current_in_use_ma != 0) { + if (flash_data->total_ma >= led->current_in_use_ma) + flash_data->total_ma -= led->current_in_use_ma; + else + flash_data->total_ma = 0; + + led->current_in_use_ma = 0; + if (!strobe) + goto unlock; + } + + /* + * Cache the default thermal threshold settings, and set them to the lowest levels before + * reading over-temp real time status. If over-temp has been triggered at the lowest + * threshold, it's very likely that it would be triggered at a higher (default) threshold + * when more flash current is requested. Prevent device from triggering over-temp condition + * by limiting the flash current for the new request. + */ + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH1], &thrsh[0]); + if (rc < 0) + goto unlock; + + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH2], &thrsh[1]); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH3], &thrsh[2]); + if (rc < 0) + goto unlock; + } + + min_thrsh = OTST_3CH_MIN_VAL; + if (flash_data->hw_type == QCOM_MVFLASH_4CH) + min_thrsh = (flash_data->revision == FLASH_4CH_REVISION_V0P1) ? + OTST1_4CH_V0P1_MIN_VAL : OTST1_4CH_MIN_VAL; + + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], min_thrsh); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_4CH) + min_thrsh = OTST2_4CH_MIN_VAL; + + /* + * The default thermal threshold settings have been updated hence + * restore them if any fault happens starting from here. + */ + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], min_thrsh); + if (rc < 0) + goto restore; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], min_thrsh); + if (rc < 0) + goto restore; + } + + /* Read thermal level status to get corresponding derating flash current */ + rc = regmap_field_read(flash_data->r_fields[REG_STATUS2], &sts); + if (rc) + goto restore; + + therm_ma = FLASH_TOTAL_CURRENT_MAX_UA / 1000; + if (flash_data->hw_type == QCOM_MVFLASH_3CH) { + if (sts & FLASH_STS_3CH_OTST3) + therm_ma = OTST3_MAX_CURRENT_MA; + else if (sts & FLASH_STS_3CH_OTST2) + therm_ma = OTST2_MAX_CURRENT_MA; + else if (sts & FLASH_STS_3CH_OTST1) + therm_ma = OTST1_MAX_CURRENT_MA; + } else { + if (sts & FLASH_STS_4CH_OTST2) + therm_ma = OTST2_MAX_CURRENT_MA; + else if (sts & FLASH_STS_4CH_OTST1) + therm_ma = OTST1_MAX_CURRENT_MA; + } + + /* Calculate the allowed flash current for the request */ + if (therm_ma <= flash_data->total_ma) + avail_ma = 0; + else + avail_ma = therm_ma - flash_data->total_ma; + + *current_ma = min_t(u32, *current_ma, avail_ma); + led->current_in_use_ma = *current_ma; + flash_data->total_ma += led->current_in_use_ma; + + dev_dbg(led->flash.led_cdev.dev, "allowed flash current: %dmA, total current: %dmA\n", + led->current_in_use_ma, flash_data->total_ma); + +restore: + /* Restore to default thermal threshold settings */ + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], thrsh[0]); + if (rc < 0) + goto unlock; + + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], thrsh[1]); + if (rc < 0) + goto unlock; + + if (flash_data->hw_type == QCOM_MVFLASH_3CH) + rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], thrsh[2]); + +unlock: + mutex_unlock(&flash_data->lock); + return rc; +} + static int set_flash_current(struct qcom_flash_led *led, u32 current_ma, enum led_mode mode) { struct qcom_flash_data *flash_data = led->flash_data; @@ -313,6 +458,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat if (rc) return rc; + rc = update_allowed_flash_current(led, &led->flash_current_ma, state); + if (rc < 0) + return rc; + rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE); if (rc) return rc; @@ -429,6 +578,10 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev, if (rc) return rc; + rc = update_allowed_flash_current(led, ¤t_ma, enable); + if (rc < 0) + return rc; + rc = set_flash_current(led, current_ma, TORCH_MODE); if (rc) return rc; @@ -707,6 +860,14 @@ static int qcom_flash_led_probe(struct platform_device *pdev) flash_data->hw_type = QCOM_MVFLASH_4CH; flash_data->max_channels = 4; regs = mvflash_4ch_regs; + + rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val); + if (rc < 0) { + dev_err(dev, "Failed to read flash LED module revision, rc=%d\n", rc); + return rc; + } + + flash_data->revision = val; } else { dev_err(dev, "flash LED subtype %#x is not yet supported\n", val); return -ENODEV; diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 033ab5fed38a4b..81238376484beb 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -115,7 +115,7 @@ static int pm860x_led_set(struct led_classdev *cdev, static int pm860x_led_dt_init(struct platform_device *pdev, struct pm860x_led *data) { - struct device_node *nproot, *np; + struct device_node *nproot; int iset = 0; if (!dev_of_node(pdev->dev.parent)) @@ -125,12 +125,11 @@ static int pm860x_led_dt_init(struct platform_device *pdev, dev_err(&pdev->dev, "failed to find leds node\n"); return -ENODEV; } - for_each_available_child_of_node(nproot, np) { + for_each_available_child_of_node_scoped(nproot, np) { if (of_node_name_eq(np, data->name)) { of_property_read_u32(np, "marvell,88pm860x-iset", &iset); data->iset = PM8606_LED_CURRENT(iset); - of_node_put(np); break; } } diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c index 6475eadcb0df94..216755d6010fed 100644 --- a/drivers/leds/leds-aw2013.c +++ b/drivers/leds/leds-aw2013.c @@ -263,7 +263,7 @@ static int aw2013_blink_set(struct led_classdev *cdev, static int aw2013_probe_dt(struct aw2013 *chip) { - struct device_node *np = dev_of_node(&chip->client->dev), *child; + struct device_node *np = dev_of_node(&chip->client->dev); int count, ret = 0, i = 0; struct aw2013_led *led; @@ -273,7 +273,7 @@ static int aw2013_probe_dt(struct aw2013 *chip) regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET); - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { struct led_init_data init_data = {}; u32 source; u32 imax; @@ -304,10 +304,8 @@ static int aw2013_probe_dt(struct aw2013 *chip) ret = devm_led_classdev_register_ext(&chip->client->dev, &led->cdev, &init_data); - if (ret < 0) { - of_node_put(child); + if (ret < 0) return ret; - } i++; } diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 246f1296ab0920..29f5bad6179654 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -392,7 +392,6 @@ static int bcm6328_leds_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev_of_node(&pdev->dev); - struct device_node *child; void __iomem *mem; spinlock_t *lock; /* memory lock */ unsigned long val, *blink_leds, *blink_delay; @@ -435,7 +434,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev) val |= BCM6328_SERIAL_LED_SHIFT_DIR; bcm6328_led_write(mem + BCM6328_REG_INIT, val); - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { int rc; u32 reg; @@ -454,10 +453,8 @@ static int bcm6328_leds_probe(struct platform_device *pdev) rc = bcm6328_led(dev, child, reg, mem, lock, blink_leds, blink_delay); - if (rc < 0) { - of_node_put(child); + if (rc < 0) return rc; - } } return 0; diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c index 86e51d44a5a741..51fcff2a64fd10 100644 --- a/drivers/leds/leds-bcm6358.c +++ b/drivers/leds/leds-bcm6358.c @@ -147,7 +147,6 @@ static int bcm6358_leds_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev_of_node(&pdev->dev); - struct device_node *child; void __iomem *mem; spinlock_t *lock; /* memory lock */ unsigned long val; @@ -184,7 +183,7 @@ static int bcm6358_leds_probe(struct platform_device *pdev) } bcm6358_led_write(mem + BCM6358_REG_CTRL, val); - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { int rc; u32 reg; @@ -198,10 +197,8 @@ static int bcm6358_leds_probe(struct platform_device *pdev) } rc = bcm6358_led(dev, child, reg, mem, lock); - if (rc < 0) { - of_node_put(child); + if (rc < 0) return rc; - } } return 0; diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c index 3fda712d2f8095..c1181a35d0f762 100644 --- a/drivers/leds/leds-bd2606mvv.c +++ b/drivers/leds/leds-bd2606mvv.c @@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = { static int bd2606mvv_probe(struct i2c_client *client) { - struct fwnode_handle *np, *child; struct device *dev = &client->dev; struct bd2606mvv_priv *priv; struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 }; int active_pairs[BD2606_MAX_LEDS / 2] = { 0 }; int err, reg; - int i; + int i, j; - np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client) i2c_set_clientdata(client, priv); - fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { struct bd2606mvv_led *led; err = fwnode_property_read_u32(child, "reg", ®); - if (err) { - fwnode_handle_put(child); + if (err) return err; - } - if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) { - fwnode_handle_put(child); + + if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) return -EINVAL; - } + led = &priv->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); active_pairs[reg / 2]++; led->priv = priv; led->led_no = reg; @@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client) &priv->leds[i].ldev, &init_data); if (err < 0) { - fwnode_handle_put(child); + for (j = i; j < BD2606_MAX_LEDS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, err, "couldn't register LED %s\n", priv->leds[i].ldev.name); diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c index e40b87aead2dfd..577497b9d426b8 100644 --- a/drivers/leds/leds-blinkm.c +++ b/drivers/leds/leds-blinkm.c @@ -2,6 +2,7 @@ /* * leds-blinkm.c * (c) Jan-Simon Möller (dl9pf@gmx.de) + * (c) Joseph Strauss (jstrauss@mailbox.org) */ #include @@ -15,6 +16,10 @@ #include #include #include +#include +#include + +#define NUM_LEDS 3 /* Addresses to scan - BlinkM is on 0x09 by default*/ static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END }; @@ -22,19 +27,25 @@ static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END }; static int blinkm_transfer_hw(struct i2c_client *client, int cmd); static int blinkm_test_run(struct i2c_client *client); +/* Contains structs for both the color-separated sysfs classes, and the new multicolor class */ struct blinkm_led { struct i2c_client *i2c_client; - struct led_classdev led_cdev; + union { + /* used when multicolor support is disabled */ + struct led_classdev led_cdev; + struct led_classdev_mc mcled_cdev; + } cdev; int id; }; -#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev) +#define led_cdev_to_blmled(c) container_of(c, struct blinkm_led, cdev.led_cdev) +#define mcled_cdev_to_led(c) container_of(c, struct blinkm_led, cdev.mcled_cdev) struct blinkm_data { struct i2c_client *i2c_client; struct mutex update_lock; /* used for led class interface */ - struct blinkm_led blinkm_leds[3]; + struct blinkm_led blinkm_leds[NUM_LEDS]; /* used for "blinkm" sysfs interface */ u8 red; /* color red */ u8 green; /* color green */ @@ -419,11 +430,29 @@ static int blinkm_transfer_hw(struct i2c_client *client, int cmd) return 0; } +static int blinkm_set_mc_brightness(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev); + struct blinkm_led *led = mcled_cdev_to_led(mcled_cdev); + struct blinkm_data *data = i2c_get_clientdata(led->i2c_client); + + led_mc_calc_color_components(mcled_cdev, value); + + data->next_red = (u8) mcled_cdev->subled_info[RED].brightness; + data->next_green = (u8) mcled_cdev->subled_info[GREEN].brightness; + data->next_blue = (u8) mcled_cdev->subled_info[BLUE].brightness; + + blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB); + + return 0; +} + static int blinkm_led_common_set(struct led_classdev *led_cdev, enum led_brightness value, int color) { /* led_brightness is 0, 127 or 255 - we just use it here as-is */ - struct blinkm_led *led = cdev_to_blmled(led_cdev); + struct blinkm_led *led = led_cdev_to_blmled(led_cdev); struct blinkm_data *data = i2c_get_clientdata(led->i2c_client); switch (color) { @@ -565,117 +594,175 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info) return 0; } -static int blinkm_probe(struct i2c_client *client) +static int register_separate_colors(struct i2c_client *client, struct blinkm_data *data) { - struct blinkm_data *data; - struct blinkm_led *led[3]; - int err, i; + /* 3 separate classes for red, green, and blue respectively */ + struct blinkm_led *leds[NUM_LEDS]; + int err; char blinkm_led_name[28]; - - data = devm_kzalloc(&client->dev, - sizeof(struct blinkm_data), GFP_KERNEL); - if (!data) { - err = -ENOMEM; - goto exit; - } - - data->i2c_addr = 0x08; - /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */ - data->fw_ver = 0xfe; - /* firmware version - use fake until we read real value - * (currently broken - BlinkM confused!) */ - data->script_id = 0x01; - data->i2c_client = client; - - i2c_set_clientdata(client, data); - mutex_init(&data->update_lock); - - /* Register sysfs hooks */ - err = sysfs_create_group(&client->dev.kobj, &blinkm_group); - if (err < 0) { - dev_err(&client->dev, "couldn't register sysfs group\n"); - goto exit; - } - - for (i = 0; i < 3; i++) { + /* Register red, green, and blue sysfs classes */ + for (int i = 0; i < NUM_LEDS; i++) { /* RED = 0, GREEN = 1, BLUE = 2 */ - led[i] = &data->blinkm_leds[i]; - led[i]->i2c_client = client; - led[i]->id = i; - led[i]->led_cdev.max_brightness = 255; - led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME; + leds[i] = &data->blinkm_leds[i]; + leds[i]->i2c_client = client; + leds[i]->id = i; + leds[i]->cdev.led_cdev.max_brightness = 255; + leds[i]->cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME; switch (i) { case RED: - snprintf(blinkm_led_name, sizeof(blinkm_led_name), + scnprintf(blinkm_led_name, sizeof(blinkm_led_name), "blinkm-%d-%d-red", client->adapter->nr, client->addr); - led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set_blocking = + leds[i]->cdev.led_cdev.name = blinkm_led_name; + leds[i]->cdev.led_cdev.brightness_set_blocking = blinkm_led_red_set; err = led_classdev_register(&client->dev, - &led[i]->led_cdev); + &leds[i]->cdev.led_cdev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", - led[i]->led_cdev.name); + leds[i]->cdev.led_cdev.name); goto failred; } break; case GREEN: - snprintf(blinkm_led_name, sizeof(blinkm_led_name), + scnprintf(blinkm_led_name, sizeof(blinkm_led_name), "blinkm-%d-%d-green", client->adapter->nr, client->addr); - led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set_blocking = + leds[i]->cdev.led_cdev.name = blinkm_led_name; + leds[i]->cdev.led_cdev.brightness_set_blocking = blinkm_led_green_set; err = led_classdev_register(&client->dev, - &led[i]->led_cdev); + &leds[i]->cdev.led_cdev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", - led[i]->led_cdev.name); + leds[i]->cdev.led_cdev.name); goto failgreen; } break; case BLUE: - snprintf(blinkm_led_name, sizeof(blinkm_led_name), + scnprintf(blinkm_led_name, sizeof(blinkm_led_name), "blinkm-%d-%d-blue", client->adapter->nr, client->addr); - led[i]->led_cdev.name = blinkm_led_name; - led[i]->led_cdev.brightness_set_blocking = + leds[i]->cdev.led_cdev.name = blinkm_led_name; + leds[i]->cdev.led_cdev.brightness_set_blocking = blinkm_led_blue_set; err = led_classdev_register(&client->dev, - &led[i]->led_cdev); + &leds[i]->cdev.led_cdev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", - led[i]->led_cdev.name); + leds[i]->cdev.led_cdev.name); goto failblue; } break; + default: + break; } /* end switch */ } /* end for */ - - /* Initialize the blinkm */ - blinkm_init_hw(client); - return 0; failblue: - led_classdev_unregister(&led[GREEN]->led_cdev); - + led_classdev_unregister(&leds[GREEN]->cdev.led_cdev); failgreen: - led_classdev_unregister(&led[RED]->led_cdev); - + led_classdev_unregister(&leds[RED]->cdev.led_cdev); failred: sysfs_remove_group(&client->dev.kobj, &blinkm_group); -exit: + return err; } +static int register_multicolor(struct i2c_client *client, struct blinkm_data *data) +{ + struct blinkm_led *mc_led; + struct mc_subled *mc_led_info; + char blinkm_led_name[28]; + int err; + + /* Register multicolor sysfs class */ + /* The first element of leds is used for multicolor facilities */ + mc_led = &data->blinkm_leds[RED]; + mc_led->i2c_client = client; + + mc_led_info = devm_kcalloc(&client->dev, NUM_LEDS, sizeof(*mc_led_info), + GFP_KERNEL); + if (!mc_led_info) + return -ENOMEM; + + mc_led_info[RED].color_index = LED_COLOR_ID_RED; + mc_led_info[GREEN].color_index = LED_COLOR_ID_GREEN; + mc_led_info[BLUE].color_index = LED_COLOR_ID_BLUE; + + mc_led->cdev.mcled_cdev.subled_info = mc_led_info; + mc_led->cdev.mcled_cdev.num_colors = NUM_LEDS; + mc_led->cdev.mcled_cdev.led_cdev.brightness = 255; + mc_led->cdev.mcled_cdev.led_cdev.max_brightness = 255; + mc_led->cdev.mcled_cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME; + + scnprintf(blinkm_led_name, sizeof(blinkm_led_name), + "blinkm-%d-%d:rgb:indicator", + client->adapter->nr, + client->addr); + mc_led->cdev.mcled_cdev.led_cdev.name = blinkm_led_name; + mc_led->cdev.mcled_cdev.led_cdev.brightness_set_blocking = blinkm_set_mc_brightness; + + err = led_classdev_multicolor_register(&client->dev, &mc_led->cdev.mcled_cdev); + if (err < 0) { + dev_err(&client->dev, "couldn't register LED %s\n", + mc_led->cdev.led_cdev.name); + sysfs_remove_group(&client->dev.kobj, &blinkm_group); + } + return 0; +} + +static int blinkm_probe(struct i2c_client *client) +{ + struct blinkm_data *data; + int err; + + data = devm_kzalloc(&client->dev, + sizeof(struct blinkm_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->i2c_addr = 0x08; + /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */ + data->fw_ver = 0xfe; + /* firmware version - use fake until we read real value + * (currently broken - BlinkM confused!) + */ + data->script_id = 0x01; + data->i2c_client = client; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &blinkm_group); + if (err < 0) { + dev_err(&client->dev, "couldn't register sysfs group\n"); + return err; + } + + if (!IS_ENABLED(CONFIG_LEDS_BLINKM_MULTICOLOR)) { + err = register_separate_colors(client, data); + if (err < 0) + return err; + } else { + err = register_multicolor(client, data); + if (err < 0) + return err; + } + + blinkm_init_hw(client); + + return 0; +} + static void blinkm_remove(struct i2c_client *client) { struct blinkm_data *data = i2c_get_clientdata(client); @@ -683,8 +770,8 @@ static void blinkm_remove(struct i2c_client *client) int i; /* make sure no workqueue entries are pending */ - for (i = 0; i < 3; i++) - led_classdev_unregister(&data->blinkm_leds[i].led_cdev); + for (i = 0; i < NUM_LEDS; i++) + led_classdev_unregister(&data->blinkm_leds[i].cdev.led_cdev); /* reset rgb */ data->next_red = 0x00; @@ -740,6 +827,7 @@ static struct i2c_driver blinkm_driver = { module_i2c_driver(blinkm_driver); MODULE_AUTHOR("Jan-Simon Moeller "); +MODULE_AUTHOR("Joseph Strauss "); MODULE_DESCRIPTION("BlinkM RGB LED driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 83fcd7b6afff76..4d1612d557c841 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -150,7 +150,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) { struct fwnode_handle *child; struct gpio_leds_priv *priv; - int count, ret; + int count, used, ret; count = device_get_child_node_count(dev); if (!count) @@ -159,9 +159,11 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) priv = devm_kzalloc(dev, struct_size(priv, leds, count), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); + priv->num_leds = count; + used = 0; device_for_each_child_node(dev, child) { - struct gpio_led_data *led_dat = &priv->leds[priv->num_leds]; + struct gpio_led_data *led_dat = &priv->leds[used]; struct gpio_led led = {}; /* @@ -197,8 +199,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) /* Set gpiod label to match the corresponding LED name. */ gpiod_set_consumer_name(led_dat->gpiod, led_dat->cdev.dev->kobj.name); - priv->num_leds++; + used++; } + priv->num_leds = used; return priv; } diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c index 5e1a4d39a107f8..27bfab3da47984 100644 --- a/drivers/leds/leds-is31fl319x.c +++ b/drivers/leds/leds-is31fl319x.c @@ -392,7 +392,7 @@ static int is31fl319x_parse_child_fw(const struct device *dev, static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31) { - struct fwnode_handle *fwnode = dev_fwnode(dev), *child; + struct fwnode_handle *fwnode = dev_fwnode(dev); int count; int ret; @@ -404,7 +404,7 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31) is31->cdef = device_get_match_data(dev); count = 0; - fwnode_for_each_available_child_node(fwnode, child) + device_for_each_child_node_scoped(dev, child) count++; dev_dbg(dev, "probing with %d leds defined in DT\n", count); @@ -414,33 +414,25 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31) "Number of leds defined must be between 1 and %u\n", is31->cdef->num_leds); - fwnode_for_each_available_child_node(fwnode, child) { + device_for_each_child_node_scoped(dev, child) { struct is31fl319x_led *led; u32 reg; ret = fwnode_property_read_u32(child, "reg", ®); - if (ret) { - ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n"); - goto put_child_node; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to read led 'reg' property\n"); - if (reg < 1 || reg > is31->cdef->num_leds) { - ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg); - goto put_child_node; - } + if (reg < 1 || reg > is31->cdef->num_leds) + return dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg); led = &is31->leds[reg - 1]; - if (led->configured) { - ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg); - goto put_child_node; - } + if (led->configured) + return dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg); ret = is31fl319x_parse_child_fw(dev, child, led, is31); - if (ret) { - ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg); - goto put_child_node; - } + if (ret) + return dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg); led->configured = true; } @@ -454,10 +446,6 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31) } return 0; - -put_child_node: - fwnode_handle_put(child); - return ret; } static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp) diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c index b0a0be77bb33b9..8793330dd4142f 100644 --- a/drivers/leds/leds-is31fl32xx.c +++ b/drivers/leds/leds-is31fl32xx.c @@ -363,10 +363,9 @@ static struct is31fl32xx_led_data *is31fl32xx_find_led_data( static int is31fl32xx_parse_dt(struct device *dev, struct is31fl32xx_priv *priv) { - struct device_node *child; int ret = 0; - for_each_available_child_of_node(dev_of_node(dev), child) { + for_each_available_child_of_node_scoped(dev_of_node(dev), child) { struct led_init_data init_data = {}; struct is31fl32xx_led_data *led_data = &priv->leds[priv->num_leds]; @@ -376,7 +375,7 @@ static int is31fl32xx_parse_dt(struct device *dev, ret = is31fl32xx_parse_child_dt(dev, child, led_data); if (ret) - goto err; + return ret; /* Detect if channel is already in use by another child */ other_led_data = is31fl32xx_find_led_data(priv, @@ -385,8 +384,7 @@ static int is31fl32xx_parse_dt(struct device *dev, dev_err(dev, "Node %pOF 'reg' conflicts with another LED\n", child); - ret = -EINVAL; - goto err; + return -EINVAL; } init_data.fwnode = of_fwnode_handle(child); @@ -396,17 +394,13 @@ static int is31fl32xx_parse_dt(struct device *dev, if (ret) { dev_err(dev, "Failed to register LED for %pOF: %d\n", child, ret); - goto err; + return ret; } priv->num_leds++; } return 0; - -err: - of_node_put(child); - return ret; } static const struct of_device_id of_is31fl32xx_match[] = { diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 29e7142dca7281..5a2e259679cfdf 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -965,24 +965,16 @@ EXPORT_SYMBOL_GPL(lp55xx_update_bits); bool lp55xx_is_extclk_used(struct lp55xx_chip *chip) { struct clk *clk; - int err; - clk = devm_clk_get(&chip->cl->dev, "32k_clk"); + clk = devm_clk_get_enabled(&chip->cl->dev, "32k_clk"); if (IS_ERR(clk)) goto use_internal_clk; - err = clk_prepare_enable(clk); - if (err) + if (clk_get_rate(clk) != LP55XX_CLK_32K) goto use_internal_clk; - if (clk_get_rate(clk) != LP55XX_CLK_32K) { - clk_disable_unprepare(clk); - goto use_internal_clk; - } - dev_info(&chip->cl->dev, "%dHz external clock used\n", LP55XX_CLK_32K); - chip->clk = clk; return true; use_internal_clk: @@ -995,9 +987,6 @@ static void lp55xx_deinit_device(struct lp55xx_chip *chip) { struct lp55xx_platform_data *pdata = chip->pdata; - if (chip->clk) - clk_disable_unprepare(chip->clk); - if (pdata->enable_gpiod) gpiod_set_value(pdata->enable_gpiod, 0); } @@ -1173,16 +1162,13 @@ static int lp55xx_parse_multi_led(struct device_node *np, struct lp55xx_led_config *cfg, int child_number) { - struct device_node *child; int num_colors = 0, ret; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { ret = lp55xx_parse_multi_led_child(child, cfg, child_number, num_colors); - if (ret) { - of_node_put(child); + if (ret) return ret; - } num_colors++; } diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h index 1bb7c559662c9c..8fd64ec4091903 100644 --- a/drivers/leds/leds-lp55xx-common.h +++ b/drivers/leds/leds-lp55xx-common.h @@ -193,7 +193,6 @@ struct lp55xx_engine { */ struct lp55xx_chip { struct i2c_client *cl; - struct clk *clk; struct lp55xx_platform_data *pdata; struct mutex lock; /* lock for user-space interface */ int num_leds; diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c index bbd1d359bba47c..da99d114bfb212 100644 --- a/drivers/leds/leds-mc13783.c +++ b/drivers/leds/leds-mc13783.c @@ -12,6 +12,7 @@ * Eric Miao */ +#include #include #include #include @@ -113,7 +114,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt( { struct mc13xxx_leds *leds = platform_get_drvdata(pdev); struct mc13xxx_leds_platform_data *pdata; - struct device_node *parent, *child; + struct device_node *child; struct device *dev = &pdev->dev; int i = 0, ret = -ENODATA; @@ -121,24 +122,23 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt( if (!pdata) return ERR_PTR(-ENOMEM); - parent = of_get_child_by_name(dev_of_node(dev->parent), "leds"); + struct device_node *parent __free(device_node) = + of_get_child_by_name(dev_of_node(dev->parent), "leds"); if (!parent) - goto out_node_put; + return ERR_PTR(-ENODATA); ret = of_property_read_u32_array(parent, "led-control", pdata->led_control, leds->devtype->num_regs); if (ret) - goto out_node_put; + return ERR_PTR(ret); pdata->num_leds = of_get_available_child_count(parent); pdata->led = devm_kcalloc(dev, pdata->num_leds, sizeof(*pdata->led), GFP_KERNEL); - if (!pdata->led) { - ret = -ENOMEM; - goto out_node_put; - } + if (!pdata->led) + return ERR_PTR(-ENOMEM); for_each_available_child_of_node(parent, child) { const char *str; @@ -158,12 +158,10 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt( } pdata->num_leds = i; - ret = i > 0 ? 0 : -ENODATA; - -out_node_put: - of_node_put(parent); + if (i <= 0) + return ERR_PTR(-ENODATA); - return ret ? ERR_PTR(ret) : pdata; + return pdata; } #else static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt( diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c index 40d50851082329..a19e8e0b6d1b1d 100644 --- a/drivers/leds/leds-mt6323.c +++ b/drivers/leds/leds-mt6323.c @@ -527,7 +527,6 @@ static int mt6323_led_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev_of_node(dev); - struct device_node *child; struct mt6397_chip *hw = dev_get_drvdata(dev->parent); struct mt6323_leds *leds; struct mt6323_led *led; @@ -565,28 +564,25 @@ static int mt6323_led_probe(struct platform_device *pdev) return ret; } - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { struct led_init_data init_data = {}; bool is_wled; ret = of_property_read_u32(child, "reg", ®); if (ret) { dev_err(dev, "Failed to read led 'reg' property\n"); - goto put_child_node; + return ret; } if (reg >= max_leds || reg >= MAX_SUPPORTED_LEDS || leds->led[reg]) { dev_err(dev, "Invalid led reg %u\n", reg); - ret = -EINVAL; - goto put_child_node; + return -EINVAL; } led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); - if (!led) { - ret = -ENOMEM; - goto put_child_node; - } + if (!led) + return -ENOMEM; is_wled = of_property_read_bool(child, "mediatek,is-wled"); @@ -612,7 +608,7 @@ static int mt6323_led_probe(struct platform_device *pdev) if (ret < 0) { dev_err(leds->dev, "Failed to LED set default from devicetree\n"); - goto put_child_node; + return ret; } init_data.fwnode = of_fwnode_handle(child); @@ -621,15 +617,11 @@ static int mt6323_led_probe(struct platform_device *pdev) &init_data); if (ret) { dev_err(dev, "Failed to register LED: %d\n", ret); - goto put_child_node; + return ret; } } return 0; - -put_child_node: - of_node_put(child); - return ret; } static void mt6323_led_remove(struct platform_device *pdev) diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index 77213b79f84d95..af5a908b8d9edd 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c @@ -423,7 +423,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev, struct device_node *gpio_ext_np; struct platform_device *gpio_ext_pdev; struct device *gpio_ext_dev; - struct device_node *child; struct netxbig_gpio_ext *gpio_ext; struct netxbig_led_timer *timers; struct netxbig_led *leds, *led; @@ -507,7 +506,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev, } led = leds; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { const char *string; int *mode_val; int num_modes; @@ -515,17 +514,17 @@ static int netxbig_leds_get_of_pdata(struct device *dev, ret = of_property_read_u32(child, "mode-addr", &led->mode_addr); if (ret) - goto err_node_put; + goto put_device; ret = of_property_read_u32(child, "bright-addr", &led->bright_addr); if (ret) - goto err_node_put; + goto put_device; ret = of_property_read_u32(child, "max-brightness", &led->bright_max); if (ret) - goto err_node_put; + goto put_device; mode_val = devm_kcalloc(dev, @@ -533,7 +532,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev, GFP_KERNEL); if (!mode_val) { ret = -ENOMEM; - goto err_node_put; + goto put_device; } for (i = 0; i < NETXBIG_LED_MODE_NUM; i++) @@ -542,12 +541,12 @@ static int netxbig_leds_get_of_pdata(struct device *dev, ret = of_property_count_u32_elems(child, "mode-val"); if (ret < 0 || ret % 2) { ret = -EINVAL; - goto err_node_put; + goto put_device; } num_modes = ret / 2; if (num_modes > NETXBIG_LED_MODE_NUM) { ret = -EINVAL; - goto err_node_put; + goto put_device; } for (i = 0; i < num_modes; i++) { @@ -560,7 +559,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev, "mode-val", 2 * i + 1, &val); if (mode >= NETXBIG_LED_MODE_NUM) { ret = -EINVAL; - goto err_node_put; + goto put_device; } mode_val[mode] = val; } @@ -583,8 +582,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev, return 0; -err_node_put: - of_node_put(child); put_device: put_device(gpio_ext_dev); return ret; diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 9f3fac66a11c7f..1b47acf54720b3 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -215,8 +215,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led, if (other->state == PCA9532_PWM1) { if (other->ldev.blink_delay_on != delay_on || other->ldev.blink_delay_off != delay_off) { - dev_err(&led->client->dev, - "HW can handle only one blink configuration at a time\n"); + /* HW can handle only one blink configuration at a time */ return -EINVAL; } } @@ -224,7 +223,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led, psc = ((delay_on + delay_off) * PCA9532_PWM_PERIOD_DIV - 1) / 1000; if (psc > U8_MAX) { - dev_err(&led->client->dev, "Blink period too long to be handled by hardware\n"); + /* Blink period too long to be handled by hardware */ return -EINVAL; } @@ -506,7 +505,6 @@ static struct pca9532_platform_data * pca9532_of_populate_pdata(struct device *dev, struct device_node *np) { struct pca9532_platform_data *pdata; - struct device_node *child; int devid, maxleds; int i = 0; const char *state; @@ -525,7 +523,7 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np) of_property_read_u8_array(np, "nxp,psc", &pdata->psc[PCA9532_PWM_ID_0], ARRAY_SIZE(pdata->psc)); - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { if (of_property_read_string(child, "label", &pdata->leds[i].name)) pdata->leds[i].name = child->name; @@ -538,10 +536,8 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np) else if (!strcmp(state, "keep")) pdata->leds[i].state = PCA9532_KEEP; } - if (++i >= maxleds) { - of_node_put(child); + if (++i >= maxleds) break; - } } return pdata; diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c index 78215dff14997c..11c7bb69573e8c 100644 --- a/drivers/leds/leds-pca995x.c +++ b/drivers/leds/leds-pca995x.c @@ -19,10 +19,6 @@ #define PCA995X_MODE1 0x00 #define PCA995X_MODE2 0x01 #define PCA995X_LEDOUT0 0x02 -#define PCA9955B_PWM0 0x08 -#define PCA9952_PWM0 0x0A -#define PCA9952_IREFALL 0x43 -#define PCA9955B_IREFALL 0x45 /* Auto-increment disabled. Normal mode */ #define PCA995X_MODE1_CFG 0x00 @@ -34,17 +30,38 @@ #define PCA995X_LDRX_MASK 0x3 #define PCA995X_LDRX_BITS 2 -#define PCA995X_MAX_OUTPUTS 16 +#define PCA995X_MAX_OUTPUTS 24 #define PCA995X_OUTPUTS_PER_REG 4 #define PCA995X_IREFALL_FULL_CFG 0xFF #define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2) -#define PCA995X_TYPE_NON_B 0 -#define PCA995X_TYPE_B 1 - #define ldev_to_led(c) container_of(c, struct pca995x_led, ldev) +struct pca995x_chipdef { + unsigned int num_leds; + u8 pwm_base; + u8 irefall; +}; + +static const struct pca995x_chipdef pca9952_chipdef = { + .num_leds = 16, + .pwm_base = 0x0a, + .irefall = 0x43, +}; + +static const struct pca995x_chipdef pca9955b_chipdef = { + .num_leds = 16, + .pwm_base = 0x08, + .irefall = 0x45, +}; + +static const struct pca995x_chipdef pca9956b_chipdef = { + .num_leds = 24, + .pwm_base = 0x0a, + .irefall = 0x40, +}; + struct pca995x_led { unsigned int led_no; struct led_classdev ldev; @@ -54,7 +71,7 @@ struct pca995x_led { struct pca995x_chip { struct regmap *regmap; struct pca995x_led leds[PCA995X_MAX_OUTPUTS]; - int btype; + const struct pca995x_chipdef *chipdef; }; static int pca995x_brightness_set(struct led_classdev *led_cdev, @@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev, { struct pca995x_led *led = ldev_to_led(led_cdev); struct pca995x_chip *chip = led->chip; + const struct pca995x_chipdef *chipdef = chip->chipdef; u8 ledout_addr, pwmout_addr; int shift, ret; - pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no; + pwmout_addr = chipdef->pwm_base + led->led_no; ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG); shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG); @@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = { static int pca995x_probe(struct i2c_client *client) { struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 }; - struct fwnode_handle *np, *child; struct device *dev = &client->dev; + const struct pca995x_chipdef *chipdef; struct pca995x_chip *chip; struct pca995x_led *led; - int i, btype, reg, ret; + int i, j, reg, ret; - btype = (unsigned long)device_get_match_data(&client->dev); + chipdef = device_get_match_data(&client->dev); - np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; - chip->btype = btype; + chip->chipdef = chipdef; chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap); if (IS_ERR(chip->regmap)) return PTR_ERR(chip->regmap); i2c_set_clientdata(client, chip); - fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { ret = fwnode_property_read_u32(child, "reg", ®); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - } - if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) { - fwnode_handle_put(child); + if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) return -EINVAL; - } led = &chip->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); led->chip = chip; led->led_no = reg; led->ldev.brightness_set_blocking = pca995x_brightness_set; @@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client) &chip->leds[i].ldev, &init_data); if (ret < 0) { - fwnode_handle_put(child); + for (j = i; j < PCA995X_MAX_OUTPUTS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, ret, "Could not register LED %s\n", chip->leds[i].ldev.name); @@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client) return ret; /* IREF Output current value for all LEDn outputs */ - return regmap_write(chip->regmap, - btype ? PCA9955B_IREFALL : PCA9952_IREFALL, - PCA995X_IREFALL_HALF_CFG); + return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG); } static const struct i2c_device_id pca995x_id[] = { - { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B }, - { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B }, + { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef }, + { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef }, + { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef }, {} }; MODULE_DEVICE_TABLE(i2c, pca995x_id); static const struct of_device_id pca995x_of_match[] = { - { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B }, - { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B }, + { .compatible = "nxp,pca9952", .data = &pca9952_chipdef }, + { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef }, + { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef }, {}, }; MODULE_DEVICE_TABLE(of, pca995x_of_match); diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c index f04db793e8d61c..cca98c644aa65e 100644 --- a/drivers/leds/leds-sc27xx-bltc.c +++ b/drivers/leds/leds-sc27xx-bltc.c @@ -276,7 +276,7 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv) static int sc27xx_led_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = dev_of_node(dev), *child; + struct device_node *np = dev_of_node(dev); struct sc27xx_led_priv *priv; u32 base, count, reg; int err; @@ -304,17 +304,13 @@ static int sc27xx_led_probe(struct platform_device *pdev) return err; } - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { err = of_property_read_u32(child, "reg", ®); - if (err) { - of_node_put(child); + if (err) return err; - } - if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active) { - of_node_put(child); + if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active) return -EINVAL; - } priv->leds[reg].fwnode = of_fwnode_handle(child); priv->leds[reg].active = true; diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c index 119eff9471f054..4c468d48748648 100644 --- a/drivers/leds/leds-sun50i-a100.c +++ b/drivers/leds/leds-sun50i-a100.c @@ -368,7 +368,7 @@ static int sun50i_a100_ledc_suspend(struct device *dev) if (!xfer_active) break; - msleep(1); + usleep_range(1000, 1100); } clk_disable_unprepare(priv->mod_clk); diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index 39f740be058f14..4cff8c4b020ca1 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -452,7 +452,7 @@ static int omnia_mcu_get_features(const struct i2c_client *client) static int omnia_leds_probe(struct i2c_client *client) { struct device *dev = &client->dev; - struct device_node *np = dev_of_node(dev), *child; + struct device_node *np = dev_of_node(dev); struct omnia_leds *leds; struct omnia_led *led; int ret, count; @@ -497,12 +497,10 @@ static int omnia_leds_probe(struct i2c_client *client) } led = &leds->leds[0]; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { ret = omnia_led_register(client, led, child); - if (ret < 0) { - of_node_put(child); + if (ret < 0) return ret; - } led += ret; } @@ -532,6 +530,7 @@ static const struct of_device_id of_omnia_leds_match[] = { { .compatible = "cznic,turris-omnia-leds", }, {}, }; +MODULE_DEVICE_TABLE(of, of_omnia_leds_match); static const struct i2c_device_id omnia_id[] = { { "omnia" }, diff --git a/drivers/leds/rgb/leds-mt6370-rgb.c b/drivers/leds/rgb/leds-mt6370-rgb.c index 359ef00498b43a..10a0b5b45227ba 100644 --- a/drivers/leds/rgb/leds-mt6370-rgb.c +++ b/drivers/leds/rgb/leds-mt6370-rgb.c @@ -21,7 +21,7 @@ #include #include -#include +#include enum { MT6370_LED_ISNK1 = 0, diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index e74b2ceed1c26d..f3c9ef2bfa572f 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -1368,7 +1368,6 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) { struct led_init_data init_data = {}; struct led_classdev *cdev; - struct device_node *child; struct mc_subled *info; struct lpg_led *led; const char *state; @@ -1399,12 +1398,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np) if (!info) return -ENOMEM; i = 0; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { ret = lpg_parse_channel(lpg, child, &led->channels[i]); - if (ret < 0) { - of_node_put(child); + if (ret < 0) return ret; - } info[i].color_index = led->channels[i]->color; info[i].intensity = 0; @@ -1600,7 +1597,6 @@ static int lpg_init_sdam(struct lpg *lpg) static int lpg_probe(struct platform_device *pdev) { - struct device_node *np; struct lpg *lpg; int ret; int i; @@ -1640,12 +1636,10 @@ static int lpg_probe(struct platform_device *pdev) if (ret < 0) return ret; - for_each_available_child_of_node(pdev->dev.of_node, np) { + for_each_available_child_of_node_scoped(pdev->dev.of_node, np) { ret = lpg_add_led(lpg, np); - if (ret) { - of_node_put(np); + if (ret) return ret; - } } for (i = 0; i < lpg->num_channels; i++) diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 22bba8e97642a4..4b0863db901a9e 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -39,6 +39,8 @@ * (has carrier) or not * tx - LED blinks on transmitted data * rx - LED blinks on receive data + * tx_err - LED blinks on transmit error + * rx_err - LED blinks on receive error * * Note: If the user selects a mode that is not supported by hw, default * behavior is to fall back to software control of the LED. However not every @@ -144,7 +146,9 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) * checking stats */ if (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) || - test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode)) + test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) || + test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) || + test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode)) schedule_delayed_work(&trigger_data->work, 0); } } @@ -337,6 +341,8 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf, case TRIGGER_NETDEV_FULL_DUPLEX: case TRIGGER_NETDEV_TX: case TRIGGER_NETDEV_RX: + case TRIGGER_NETDEV_TX_ERR: + case TRIGGER_NETDEV_RX_ERR: bit = attr; break; default: @@ -371,6 +377,8 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf, case TRIGGER_NETDEV_FULL_DUPLEX: case TRIGGER_NETDEV_TX: case TRIGGER_NETDEV_RX: + case TRIGGER_NETDEV_TX_ERR: + case TRIGGER_NETDEV_RX_ERR: bit = attr; break; default: @@ -429,6 +437,8 @@ DEFINE_NETDEV_TRIGGER(half_duplex, TRIGGER_NETDEV_HALF_DUPLEX); DEFINE_NETDEV_TRIGGER(full_duplex, TRIGGER_NETDEV_FULL_DUPLEX); DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX); DEFINE_NETDEV_TRIGGER(rx, TRIGGER_NETDEV_RX); +DEFINE_NETDEV_TRIGGER(tx_err, TRIGGER_NETDEV_TX_ERR); +DEFINE_NETDEV_TRIGGER(rx_err, TRIGGER_NETDEV_RX_ERR); static ssize_t interval_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -538,6 +548,8 @@ static struct attribute *netdev_trig_attrs[] = { &dev_attr_half_duplex.attr, &dev_attr_rx.attr, &dev_attr_tx.attr, + &dev_attr_rx_err.attr, + &dev_attr_tx_err.attr, &dev_attr_interval.attr, &dev_attr_offloaded.attr, NULL @@ -628,7 +640,9 @@ static void netdev_trig_work(struct work_struct *work) /* If we are not looking for RX/TX then return */ if (!test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) && - !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode)) + !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) && + !test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) && + !test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode)) return; dev_stats = dev_get_stats(trigger_data->net_dev, &temp); @@ -636,7 +650,11 @@ static void netdev_trig_work(struct work_struct *work) (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ? dev_stats->tx_packets : 0) + (test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ? - dev_stats->rx_packets : 0); + dev_stats->rx_packets : 0) + + (test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) ? + dev_stats->tx_errors : 0) + + (test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode) ? + dev_stats->rx_errors : 0); if (trigger_data->last_activity != new_activity) { led_stop_software_blink(trigger_data->led_cdev); diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c index 3d361c9200302e..374a841f18c375 100644 --- a/drivers/leds/uleds.c +++ b/drivers/leds/uleds.c @@ -200,7 +200,6 @@ static const struct file_operations uleds_fops = { .read = uleds_read, .write = uleds_write, .poll = uleds_poll, - .llseek = no_llseek, }; static struct miscdevice uleds_misc = { diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 2633bc254935cc..126dd1cfba8efe 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index b0407c5fadb2a9..88adee42ba8209 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -842,7 +842,6 @@ static ssize_t adb_write(struct file *file, const char __user *buf, static const struct file_operations adb_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = adb_read, .write = adb_write, .open = adb_open, diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 13626205530d36..bede200e32e8ff 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -387,7 +387,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); -#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS) +#if defined(CONFIG_PCI) && defined(CONFIG_ARCH_HAS_DMA_OPS) /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine @@ -396,7 +396,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, */ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops; -#endif /* CONFIG_PCI && CONFIG_DMA_OPS */ +#endif /* CONFIG_PCI && CONFIG_ARCH_HAS_DMA_OPS */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index b2b78a53e53224..a01bc5090cdfd2 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -1314,7 +1314,6 @@ static int smu_release(struct inode *inode, struct file *file) static const struct file_operations smu_device_fops = { - .llseek = no_llseek, .read = smu_read, .write = smu_write, .poll = smu_fpoll, diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 89450645c2305b..26bd9ed5e66455 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -178,7 +178,7 @@ void __init pmu_backlight_init(void) } bd->props.brightness = level; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); printk(KERN_INFO "PMU Backlight initialized (%s)\n", name); diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 9d5703b6093759..b0f09c70f1ff87 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2334,7 +2334,7 @@ static const struct platform_suspend_ops pmu_pm_ops = { .valid = pmu_sleep_valid, }; -static int register_pmu_pm_ops(void) +static int __init register_pmu_pm_ops(void) { if (pmu_kind == PMU_OHARE_BASED) powerbook_sleep_init_3400(); diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 4eed972959279a..6fb995778636a3 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -25,6 +25,7 @@ config ARM_MHU_V2 config ARM_MHU_V3 tristate "ARM MHUv3 Mailbox" + depends on ARM64 || COMPILE_TEST depends on HAS_IOMEM || COMPILE_TEST depends on OF help @@ -73,7 +74,7 @@ config ARMADA_37XX_RWTM_MBOX config OMAP2PLUS_MBOX tristate "OMAP2+ Mailbox framework support" - depends on ARCH_OMAP2PLUS || ARCH_K3 + depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST help Mailbox implementation for OMAP family chips with hardware for interprocessor communication involving DSP, IVA1.0 and IVA2 in diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c index fbfd0202047c37..ea12fb8d24015c 100644 --- a/drivers/mailbox/bcm2835-mailbox.c +++ b/drivers/mailbox/bcm2835-mailbox.c @@ -145,7 +145,8 @@ static int bcm2835_mbox_probe(struct platform_device *pdev) spin_lock_init(&mbox->lock); ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0), - bcm2835_mbox_irq, 0, dev_name(dev), mbox); + bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev), + mbox); if (ret) { dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n", ret); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index d17efb1dd0cb17..f815dab3be50cd 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -30,7 +30,7 @@ #define IMX_MU_SCU_CHANS 6 /* TX0/RX0 */ #define IMX_MU_S4_CHANS 2 -#define IMX_MU_CHAN_NAME_SIZE 20 +#define IMX_MU_CHAN_NAME_SIZE 32 #define IMX_MU_V2_PAR_OFF 0x4 #define IMX_MU_V2_TR_MASK GENMASK(7, 0) @@ -782,7 +782,7 @@ static int imx_mu_init_generic(struct imx_mu_priv *priv) cp->chan = &priv->mbox_chans[i]; priv->mbox_chans[i].con_priv = cp; snprintf(cp->irq_desc, sizeof(cp->irq_desc), - "imx_mu_chan[%i-%i]", cp->type, cp->idx); + "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx); } priv->mbox.num_chans = IMX_MU_CHANS; @@ -819,7 +819,7 @@ static int imx_mu_init_specific(struct imx_mu_priv *priv) cp->chan = &priv->mbox_chans[i]; priv->mbox_chans[i].con_priv = cp; snprintf(cp->irq_desc, sizeof(cp->irq_desc), - "imx_mu_chan[%i-%i]", cp->type, cp->idx); + "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx); } priv->mbox.num_chans = num_chans; diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index ebff3baf304515..d3d26a2c98956c 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -450,30 +450,20 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name) { struct device_node *np = cl->dev->of_node; - struct property *prop; - const char *mbox_name; - int index = 0; + int index; if (!np) { dev_err(cl->dev, "%s() currently only supports DT\n", __func__); return ERR_PTR(-EINVAL); } - if (!of_get_property(np, "mbox-names", NULL)) { - dev_err(cl->dev, - "%s() requires an \"mbox-names\" property\n", __func__); + index = of_property_match_string(np, "mbox-names", name); + if (index < 0) { + dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", + __func__, name); return ERR_PTR(-EINVAL); } - - of_property_for_each_string(np, "mbox-names", prop, mbox_name) { - if (!strncmp(name, mbox_name, strlen(name))) - return mbox_request_channel(cl, index); - index++; - } - - dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", - __func__, name); - return ERR_PTR(-EINVAL); + return mbox_request_channel(cl, index); } EXPORT_SYMBOL_GPL(mbox_request_channel_byname); diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index 7a87424657a158..6797770474a55d 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -603,7 +603,7 @@ static struct platform_driver omap_mbox_driver = { .driver = { .name = "omap-mailbox", .pm = &omap_mbox_pm_ops, - .of_match_table = of_match_ptr(omap_mailbox_of_match), + .of_match_table = omap_mailbox_of_match, }, }; module_platform_driver(omap_mbox_driver); diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c index 8ffad059e8984e..4d966cb2ed0367 100644 --- a/drivers/mailbox/rockchip-mailbox.c +++ b/drivers/mailbox/rockchip-mailbox.c @@ -159,7 +159,7 @@ static const struct of_device_id rockchip_mbox_of_match[] = { { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data}, { }, }; -MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match); +MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match); static int rockchip_mbox_probe(struct platform_device *pdev) { diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c index 9ae57de77d4d76..ee8539dfcef549 100644 --- a/drivers/mailbox/sprd-mailbox.c +++ b/drivers/mailbox/sprd-mailbox.c @@ -62,7 +62,6 @@ struct sprd_mbox_priv { void __iomem *outbox_base; /* Base register address for supplementary outbox */ void __iomem *supp_base; - struct clk *clk; u32 outbox_fifo_depth; struct mutex lock; @@ -291,19 +290,13 @@ static const struct mbox_chan_ops sprd_mbox_ops = { .shutdown = sprd_mbox_shutdown, }; -static void sprd_mbox_disable(void *data) -{ - struct sprd_mbox_priv *priv = data; - - clk_disable_unprepare(priv->clk); -} - static int sprd_mbox_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sprd_mbox_priv *priv; int ret, inbox_irq, outbox_irq, supp_irq; unsigned long id, supp; + struct clk *clk; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -331,20 +324,10 @@ static int sprd_mbox_probe(struct platform_device *pdev) if (IS_ERR(priv->outbox_base)) return PTR_ERR(priv->outbox_base); - priv->clk = devm_clk_get(dev, "enable"); - if (IS_ERR(priv->clk)) { + clk = devm_clk_get_enabled(dev, "enable"); + if (IS_ERR(clk)) { dev_err(dev, "failed to get mailbox clock\n"); - return PTR_ERR(priv->clk); - } - - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv); - if (ret) { - dev_err(dev, "failed to add mailbox disable action\n"); - return ret; + return PTR_ERR(clk); } inbox_irq = platform_get_irq_byname(pdev, "inbox"); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 098bf526136c94..d478aafa02c97f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -529,9 +529,6 @@ static struct dm_buffer *list_to_buffer(struct list_head *l) { struct lru_entry *le = list_entry(l, struct lru_entry, list); - if (!le) - return NULL; - return le_to_buffer(le); } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 17f0fab1e25496..aaeeabfab09b0a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1368,7 +1368,7 @@ static void mg_copy(struct work_struct *ws) */ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); - BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */ + BUG_ON(rb); /* An exclusive lock must _not_ be held for this block */ mg->overwrite_bio = NULL; inc_io_migrations(mg->cache); mg_full_copy(ws); @@ -3200,8 +3200,6 @@ static int parse_cblock_range(struct cache *cache, const char *str, * Try and parse form (ii) first. */ r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); - if (r < 0) - return r; if (r == 2) { result->begin = to_cblock(b); @@ -3213,8 +3211,6 @@ static int parse_cblock_range(struct cache *cache, const char *str, * That didn't work, try form (i). */ r = sscanf(str, "%llu%c", &b, &dummy); - if (r < 0) - return r; if (r == 1) { result->begin = to_cblock(b); diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index 2db84cd2202b06..14c5c28d938b26 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -530,10 +530,7 @@ static int __load_bitset_in_core(struct dm_clone_metadata *cmd) return r; for (i = 0; ; i++) { - if (dm_bitset_cursor_get_value(&c)) - __set_bit(i, cmd->region_map); - else - __clear_bit(i, cmd->region_map); + __assign_bit(i, cmd->region_map, dm_bitset_cursor_get_value(&c)); if (i >= (cmd->nr_regions - 1)) break; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 348b4b26c27233..1ae2c71bb383b7 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -147,6 +147,7 @@ enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */ CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ + CRYPT_KEY_MAC_SIZE_SET, /* The integrity_key_size option was used */ }; /* @@ -2613,35 +2614,31 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string key = request_key(type, key_desc + 1, NULL); if (IS_ERR(key)) { - kfree_sensitive(new_key_string); - return PTR_ERR(key); + ret = PTR_ERR(key); + goto free_new_key_string; } down_read(&key->sem); - ret = set_key(cc, key); - if (ret < 0) { - up_read(&key->sem); - key_put(key); - kfree_sensitive(new_key_string); - return ret; - } - up_read(&key->sem); key_put(key); + if (ret < 0) + goto free_new_key_string; /* clear the flag since following operations may invalidate previously valid key */ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); ret = crypt_setkey(cc); + if (ret) + goto free_new_key_string; - if (!ret) { - set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - kfree_sensitive(cc->key_string); - cc->key_string = new_key_string; - } else - kfree_sensitive(new_key_string); + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + kfree_sensitive(cc->key_string); + cc->key_string = new_key_string; + return 0; +free_new_key_string: + kfree_sensitive(new_key_string); return ret; } @@ -2937,7 +2934,8 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) if (IS_ERR(mac)) return PTR_ERR(mac); - cc->key_mac_size = crypto_ahash_digestsize(mac); + if (!test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags)) + cc->key_mac_size = crypto_ahash_digestsize(mac); crypto_free_ahash(mac); cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); @@ -3219,6 +3217,13 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar cc->cipher_auth = kstrdup(sval, GFP_KERNEL); if (!cc->cipher_auth) return -ENOMEM; + } else if (sscanf(opt_string, "integrity_key_size:%u%c", &val, &dummy) == 1) { + if (!val) { + ti->error = "Invalid integrity_key_size argument"; + return -EINVAL; + } + cc->key_mac_size = val; + set_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags); } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { if (cc->sector_size < (1 << SECTOR_SHIFT) || cc->sector_size > 4096 || @@ -3607,10 +3612,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); + num_feature_args += !!cc->used_tag_size; num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); - if (cc->used_tag_size) - num_feature_args++; + num_feature_args += test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags); if (num_feature_args) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -3631,6 +3636,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%d", cc->sector_size); if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) DMEMIT(" iv_large_sectors"); + if (test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags)) + DMEMIT(" integrity_key_size:%u", cc->key_mac_size); } break; @@ -3758,7 +3765,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 27, 0}, + .version = {1, 28, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index acff2f64f251f9..ee9f7cecd78e0e 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -284,6 +284,7 @@ struct dm_integrity_c { mempool_t recheck_pool; struct bio_set recheck_bios; + struct bio_set recalc_bios; struct notifier_block reboot_notifier; }; @@ -321,7 +322,9 @@ struct dm_integrity_io { struct dm_bio_details bio_details; char *integrity_payload; + unsigned payload_len; bool integrity_payload_from_mempool; + bool integrity_range_locked; }; struct journal_completion { @@ -359,7 +362,7 @@ static struct kmem_cache *journal_io_cache; #endif static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); -static int dm_integrity_map_inline(struct dm_integrity_io *dio); +static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map); static void integrity_bio_wait(struct work_struct *w); static void dm_integrity_dtr(struct dm_target *ti); @@ -491,7 +494,8 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) __u8 *sb = (__u8 *)ic->sb; __u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size; - if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) { + if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT || + mac_size > HASH_MAX_DIGESTSIZE) { dm_integrity_io_error(ic, "digest is too long", -EINVAL); return -EINVAL; } @@ -1500,15 +1504,15 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat if (!ic->meta_dev) flush_data = false; if (flush_data) { - fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, - fr.io_req.mem.type = DM_IO_KMEM, - fr.io_req.mem.ptr.addr = NULL, - fr.io_req.notify.fn = flush_notify, + fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + fr.io_req.mem.type = DM_IO_KMEM; + fr.io_req.mem.ptr.addr = NULL; + fr.io_req.notify.fn = flush_notify; fr.io_req.notify.context = &fr; - fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), - fr.io_reg.bdev = ic->dev->bdev, - fr.io_reg.sector = 0, - fr.io_reg.count = 0, + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio); + fr.io_reg.bdev = ic->dev->bdev; + fr.io_reg.sector = 0; + fr.io_reg.count = 0; fr.ic = ic; init_completion(&fr.comp); r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT); @@ -1946,8 +1950,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) dio->bi_status = 0; dio->op = bio_op(bio); - if (ic->mode == 'I') - return dm_integrity_map_inline(dio); + if (ic->mode == 'I') { + bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector); + dio->integrity_payload = NULL; + dio->integrity_payload_from_mempool = false; + dio->integrity_range_locked = false; + return dm_integrity_map_inline(dio, true); + } if (unlikely(dio->op == REQ_OP_DISCARD)) { if (ti->max_io_len) { @@ -2397,15 +2406,13 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map do_endio_flush(ic, dio); } -static int dm_integrity_map_inline(struct dm_integrity_io *dio) +static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map) { struct dm_integrity_c *ic = dio->ic; struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); struct bio_integrity_payload *bip; - unsigned payload_len, digest_size, extra_size, ret; - - dio->integrity_payload = NULL; - dio->integrity_payload_from_mempool = false; + unsigned ret; + sector_t recalc_sector; if (unlikely(bio_integrity(bio))) { bio->bi_status = BLK_STS_NOTSUPP; @@ -2418,28 +2425,67 @@ static int dm_integrity_map_inline(struct dm_integrity_io *dio) return DM_MAPIO_REMAPPED; retry: - payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); - digest_size = crypto_shash_digestsize(ic->internal_hash); - extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; - payload_len += extra_size; - dio->integrity_payload = kmalloc(payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (unlikely(!dio->integrity_payload)) { - const unsigned x_size = PAGE_SIZE << 1; - if (payload_len > x_size) { - unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block; - if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) { - bio->bi_status = BLK_STS_NOTSUPP; - bio_endio(bio); - return DM_MAPIO_SUBMITTED; + if (!dio->integrity_payload) { + unsigned digest_size, extra_size; + dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); + digest_size = crypto_shash_digestsize(ic->internal_hash); + extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; + dio->payload_len += extra_size; + dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); + if (unlikely(!dio->integrity_payload)) { + const unsigned x_size = PAGE_SIZE << 1; + if (dio->payload_len > x_size) { + unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block; + if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) { + bio->bi_status = BLK_STS_NOTSUPP; + bio_endio(bio); + return DM_MAPIO_SUBMITTED; + } + dm_accept_partial_bio(bio, sectors); + goto retry; } - dm_accept_partial_bio(bio, sectors); - goto retry; } + } + + dio->range.logical_sector = bio->bi_iter.bi_sector; + dio->range.n_sectors = bio_sectors(bio); + + if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) + goto skip_spinlock; +#ifdef CONFIG_64BIT + /* + * On 64-bit CPUs we can optimize the lock away (so that it won't cause + * cache line bouncing) and use acquire/release barriers instead. + * + * Paired with smp_store_release in integrity_recalc_inline. + */ + recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector)); + if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)) + goto skip_spinlock; +#endif + spin_lock_irq(&ic->endio_wait.lock); + recalc_sector = le64_to_cpu(ic->sb->recalc_sector); + if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector) + goto skip_unlock; + if (unlikely(!add_new_range(ic, &dio->range, true))) { + if (from_map) { + spin_unlock_irq(&ic->endio_wait.lock); + INIT_WORK(&dio->work, integrity_bio_wait); + queue_work(ic->wait_wq, &dio->work); + return DM_MAPIO_SUBMITTED; + } + wait_and_add_new_range(ic, &dio->range); + } + dio->integrity_range_locked = true; +skip_unlock: + spin_unlock_irq(&ic->endio_wait.lock); +skip_spinlock: + + if (unlikely(!dio->integrity_payload)) { dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO)); dio->integrity_payload_from_mempool = true; } - bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector); dio->bio_details.bi_iter = bio->bi_iter; if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) { @@ -2449,7 +2495,7 @@ static int dm_integrity_map_inline(struct dm_integrity_io *dio) bio->bi_iter.bi_sector += ic->start + SB_SECTORS; bip = bio_integrity_alloc(bio, GFP_NOIO, 1); - if (unlikely(IS_ERR(bip))) { + if (IS_ERR(bip)) { bio->bi_status = errno_to_blk_status(PTR_ERR(bip)); bio_endio(bio); return DM_MAPIO_SUBMITTED; @@ -2470,8 +2516,8 @@ static int dm_integrity_map_inline(struct dm_integrity_io *dio) } ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload), - payload_len, offset_in_page(dio->integrity_payload)); - if (unlikely(ret != payload_len)) { + dio->payload_len, offset_in_page(dio->integrity_payload)); + if (unlikely(ret != dio->payload_len)) { bio->bi_status = BLK_STS_RESOURCE; bio_endio(bio); return DM_MAPIO_SUBMITTED; @@ -2522,7 +2568,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w) } bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1); - if (unlikely(IS_ERR(bip))) { + if (IS_ERR(bip)) { bio_put(outgoing_bio); bio->bi_status = errno_to_blk_status(PTR_ERR(bip)); bio_endio(bio); @@ -2579,6 +2625,9 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) { unsigned pos = 0; + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + unlikely(dio->integrity_range_locked)) + goto skip_check; while (dio->bio_details.bi_iter.bi_size) { char digest[HASH_MAX_DIGESTSIZE]; struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter); @@ -2598,9 +2647,10 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT); } } - if (likely(dio->op == REQ_OP_READ) || likely(dio->op == REQ_OP_WRITE)) { - dm_integrity_free_payload(dio); - } +skip_check: + dm_integrity_free_payload(dio); + if (unlikely(dio->integrity_range_locked)) + remove_range(ic, &dio->range); } return DM_ENDIO_DONE; } @@ -2608,8 +2658,26 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status static void integrity_bio_wait(struct work_struct *w) { struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); + struct dm_integrity_c *ic = dio->ic; - dm_integrity_map_continue(dio, false); + if (ic->mode == 'I') { + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + int r = dm_integrity_map_inline(dio, false); + switch (r) { + case DM_MAPIO_KILL: + bio->bi_status = BLK_STS_IOERR; + fallthrough; + case DM_MAPIO_REMAPPED: + submit_bio_noacct(bio); + fallthrough; + case DM_MAPIO_SUBMITTED: + return; + default: + BUG(); + } + } else { + dm_integrity_map_continue(dio, false); + } } static void pad_uncommitted(struct dm_integrity_c *ic) @@ -3081,6 +3149,133 @@ static void integrity_recalc(struct work_struct *w) kvfree(recalc_tags); } +static void integrity_recalc_inline(struct work_struct *w) +{ + struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); + size_t recalc_tags_size; + u8 *recalc_buffer = NULL; + u8 *recalc_tags = NULL; + struct dm_integrity_range range; + struct bio *bio; + struct bio_integrity_payload *bip; + __u8 *t; + unsigned int i; + int r; + unsigned ret; + unsigned int super_counter = 0; + unsigned recalc_sectors = RECALC_SECTORS; + +retry: + recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN); + if (!recalc_buffer) { +oom: + recalc_sectors >>= 1; + if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) + goto retry; + DMCRIT("out of memory for recalculate buffer - recalculation disabled"); + goto free_ret; + } + + recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size; + recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN); + if (!recalc_tags) { + kfree(recalc_buffer); + recalc_buffer = NULL; + goto oom; + } + + spin_lock_irq(&ic->endio_wait.lock); + +next_chunk: + if (unlikely(dm_post_suspending(ic->ti))) + goto unlock_ret; + + range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); + if (unlikely(range.logical_sector >= ic->provided_data_sectors)) + goto unlock_ret; + range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); + + add_new_range_and_wait(ic, &range); + spin_unlock_irq(&ic->endio_wait.lock); + + if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { + recalc_write_super(ic); + super_counter = 0; + } + + if (unlikely(dm_integrity_failed(ic))) + goto err; + + DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors); + + bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios); + bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; + __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + r = submit_bio_wait(bio); + bio_put(bio); + if (unlikely(r)) { + dm_integrity_io_error(ic, "reading data", r); + goto err; + } + + t = recalc_tags; + for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) { + memset(t, 0, ic->tuple_size); + integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t); + t += ic->tuple_size; + } + + bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios); + bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; + __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + + bip = bio_integrity_alloc(bio, GFP_NOIO, 1); + if (unlikely(IS_ERR(bip))) { + bio_put(bio); + DMCRIT("out of memory for bio integrity payload - recalculation disabled"); + goto err; + } + ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags)); + if (unlikely(ret != t - recalc_tags)) { + bio_put(bio); + dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM); + goto err; + } + + r = submit_bio_wait(bio); + bio_put(bio); + if (unlikely(r)) { + dm_integrity_io_error(ic, "writing data", r); + goto err; + } + + cond_resched(); + spin_lock_irq(&ic->endio_wait.lock); + remove_range_unlocked(ic, &range); +#ifdef CONFIG_64BIT + /* Paired with smp_load_acquire in dm_integrity_map_inline. */ + smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors)); +#else + ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); +#endif + goto next_chunk; + +err: + remove_range(ic, &range); + goto free_ret; + +unlock_ret: + spin_unlock_irq(&ic->endio_wait.lock); + + recalc_write_super(ic); + +free_ret: + kfree(recalc_buffer); + kfree(recalc_tags); +} + static void bitmap_block_work(struct work_struct *w) { struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); @@ -4619,6 +4814,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv r = -ENOMEM; goto bad; } + r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS); + if (r) { + ti->error = "Cannot allocate bio set"; + goto bad; + } + r = bioset_integrity_create(&ic->recalc_bios, 1); + if (r) { + ti->error = "Cannot allocate bio integrity set"; + r = -ENOMEM; + goto bad; + } } ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", @@ -4717,13 +4923,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv ti->error = "Block size doesn't match the information in superblock"; goto bad; } - if (!le32_to_cpu(ic->sb->journal_sections) != (ic->mode == 'I')) { - r = -EINVAL; - if (ic->mode != 'I') + if (ic->mode != 'I') { + if (!le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; ti->error = "Corrupted superblock, journal_sections is 0"; - else + goto bad; + } + } else { + if (le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; ti->error = "Corrupted superblock, journal_sections is not 0"; - goto bad; + goto bad; + } } /* make sure that ti->max_io_len doesn't overflow */ if (!ic->meta_dev) { @@ -4830,7 +5041,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv r = -ENOMEM; goto bad; } - INIT_WORK(&ic->recalc_work, integrity_recalc); + INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc); } else { if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { ti->error = "Recalculate can only be specified with internal_hash"; @@ -4847,17 +5058,15 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv goto bad; } - if (ic->mode != 'I') { - ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, - 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0); - if (IS_ERR(ic->bufio)) { - r = PTR_ERR(ic->bufio); - ti->error = "Cannot initialize dm-bufio"; - ic->bufio = NULL; - goto bad; - } - dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); + ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, + 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0); + if (IS_ERR(ic->bufio)) { + r = PTR_ERR(ic->bufio); + ti->error = "Cannot initialize dm-bufio"; + ic->bufio = NULL; + goto bad; } + dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); if (ic->mode != 'R' && ic->mode != 'I') { r = create_journal(ic, &ti->error); @@ -4979,6 +5188,7 @@ static void dm_integrity_dtr(struct dm_target *ti) kvfree(ic->bbs); if (ic->bufio) dm_bufio_client_destroy(ic->bufio); + bioset_exit(&ic->recalc_bios); bioset_exit(&ic->recheck_bios); mempool_exit(&ic->recheck_pool); mempool_exit(&ic->journal_io_mempool); @@ -5033,7 +5243,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 0c3323e0adb2a0..1e0d3b9b75d6fe 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2519,7 +2519,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) rdev->saved_raid_disk = rdev->raid_disk; } - /* Reshape support -> restore repective data offsets */ + /* Reshape support -> restore respective data offsets */ rdev->data_offset = le64_to_cpu(sb->data_offset); rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); @@ -3949,7 +3949,9 @@ static int __load_dirty_region_bitmap(struct raid_set *rs) /* Try loading the bitmap unless "raid0", which does not have one */ if (!rs_is_raid0(rs) && !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { - r = md_bitmap_load(&rs->md); + struct mddev *mddev = &rs->md; + + r = mddev->bitmap_ops->load(mddev); if (r) DMERR("Failed to load bitmap"); } @@ -4066,7 +4068,8 @@ static int raid_preresume(struct dm_target *ti) mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize; - r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0); + r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors, + chunksize, false); if (r) DMERR("Failed to resize bitmap"); } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f7e9a3632eb3d9..499f8cc8a39fbf 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); dm_put_live_table(md, srcu_idx); - return BLK_STS_RESOURCE; + return BLK_STS_IOERR; } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index a0c1620e90c8ce..89632ce9776056 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2948,7 +2948,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); if (IS_ERR(pmd)) { *error = "Error creating metadata object"; - return (struct pool *)pmd; + return ERR_CAST(pmd); } pool = kzalloc(sizeof(*pool), GFP_KERNEL); diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index ab3ea833780946..0d502f6a86ad02 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -501,6 +501,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb memset(&data_vio->record_name, 0, sizeof(data_vio->record_name)); memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate)); + vdo_reset_completion(&data_vio->decrement_completion); vdo_reset_completion(completion); completion->error_handler = handle_data_vio_error; set_data_vio_logical_callback(data_vio, attempt_logical_block_lock); @@ -1273,12 +1274,14 @@ static void clean_hash_lock(struct vdo_completion *completion) static void finish_cleanup(struct data_vio *data_vio) { struct vdo_completion *completion = &data_vio->vio.completion; + u32 discard_size = min_t(u32, data_vio->remaining_discard, + VDO_BLOCK_SIZE - data_vio->offset); VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, "complete data_vio has no allocation lock"); VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, "complete data_vio has no hash lock"); - if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) || + if ((data_vio->remaining_discard <= discard_size) || (completion->result != VDO_SUCCESS)) { struct data_vio_pool *pool = completion->vdo->data_vio_pool; @@ -1287,12 +1290,12 @@ static void finish_cleanup(struct data_vio *data_vio) return; } - data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard, - VDO_BLOCK_SIZE - data_vio->offset); + data_vio->remaining_discard -= discard_size; data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE); data_vio->read = data_vio->is_partial; data_vio->offset = 0; completion->requeue = true; + data_vio->first_reference_operation_complete = false; launch_data_vio(data_vio, data_vio->logical.lbn + 1); } @@ -1965,7 +1968,8 @@ static void allocate_block(struct vdo_completion *completion) .state = VDO_MAPPING_STATE_UNCOMPRESSED, }; - if (data_vio->fua) { + if (data_vio->fua || + data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) { prepare_for_dedupe(data_vio); return; } @@ -2042,7 +2046,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion) return; } - /* * We don't need to write any data, so skip allocation and just update the block map and * reference counts (via the journal). @@ -2051,7 +2054,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion) if (data_vio->is_zero) data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED; - if (data_vio->remaining_discard > VDO_BLOCK_SIZE) { + if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) { /* This is not the final block of a discard so we can't acknowledge it yet. */ update_metadata_for_data_vio_write(data_vio, NULL); return; diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 39ac68614419fe..80628ae93fbacc 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -729,6 +729,7 @@ static void process_update_result(struct data_vio *agent) !change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) return; + agent->dedupe_context = NULL; release_context(context); } @@ -1648,6 +1649,7 @@ static void process_query_result(struct data_vio *agent) if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) { agent->is_duplicate = decode_uds_advice(context); + agent->dedupe_context = NULL; release_context(context); } } @@ -2321,6 +2323,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion) * send its requestor on its way. */ list_del_init(&context->list_entry); + context->requestor->dedupe_context = NULL; continue_data_vio(context->requestor); timed_out++; } diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index dd05691e4097dc..0e04c20216825f 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -1105,6 +1105,9 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv, if ((argc == 1) && (strcasecmp(argv[0], "stats") == 0)) { vdo_write_stats(vdo, result_buffer, maxlen); result = 1; + } else if ((argc == 1) && (strcasecmp(argv[0], "config") == 0)) { + vdo_write_config(vdo, &result_buffer, &maxlen); + result = 1; } else { result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv)); } @@ -2293,6 +2296,14 @@ static void handle_load_error(struct vdo_completion *completion) return; } + if ((completion->result == VDO_UNSUPPORTED_VERSION) && + (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) { + vdo_log_error("Aborting load due to unsupported version"); + vdo->admin.phase = LOAD_PHASE_FINISHED; + load_callback(completion); + return; + } + vdo_log_error_strerror(completion->result, "Entering read-only mode due to load error"); vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY; @@ -2737,6 +2748,19 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo) vdo_log_info("starting device '%s'", device_name); result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback, handle_load_error, "load"); + if (result == VDO_UNSUPPORTED_VERSION) { + /* + * A component version is not supported. This can happen when the + * recovery journal metadata is in an old version format. Abort the + * load without saving the state. + */ + vdo->suspend_type = VDO_ADMIN_STATE_SUSPENDING; + perform_admin_operation(vdo, SUSPEND_PHASE_START, + suspend_callback, suspend_callback, + "suspend"); + return result; + } + if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) { /* * Something has gone very wrong. Make sure everything has drained and @@ -2808,7 +2832,8 @@ static int vdo_preresume(struct dm_target *ti) vdo_register_thread_device_id(&instance_thread, &vdo->instance); result = vdo_preresume_registered(ti, vdo); - if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE)) + if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE) || + (result == VDO_UNSUPPORTED_VERSION)) result = -EINVAL; vdo_unregister_thread_device_id(); return vdo_status_to_errno(result); @@ -2832,7 +2857,7 @@ static void vdo_resume(struct dm_target *ti) static struct target_type vdo_target_bio = { .features = DM_TARGET_SINGLETON, .name = "vdo", - .version = { 9, 0, 0 }, + .version = { 9, 1, 0 }, .module = THIS_MODULE, .ctr = vdo_ctr, .dtr = vdo_dtr, diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c index 7e32a25d3f2f38..fb1db41c794b33 100644 --- a/drivers/md/dm-vdo/indexer/chapter-index.c +++ b/drivers/md/dm-vdo/indexer/chapter-index.c @@ -177,7 +177,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index, if (list_number < 0) return UDS_OVERFLOW; - next_list = first_list + list_number--, + next_list = first_list + list_number--; result = uds_start_delta_index_search(delta_index, next_list, 0, &entry); if (result != UDS_SUCCESS) diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 9a3716bb3c05ec..ab62abe18827bb 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -346,7 +346,6 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); - VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); vdo_reset_completion(completion); completion->error_handler = error_handler; diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c index 2802cf92922bd0..75dfcd7c5f6314 100644 --- a/drivers/md/dm-vdo/message-stats.c +++ b/drivers/md/dm-vdo/message-stats.c @@ -4,6 +4,7 @@ */ #include "dedupe.h" +#include "indexer.h" #include "logger.h" #include "memory-alloc.h" #include "message-stats.h" @@ -430,3 +431,50 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen) vdo_free(stats); return VDO_SUCCESS; } + +static void write_index_memory(u32 mem, char **buf, unsigned int *maxlen) +{ + char *prefix = "memorySize : "; + + /* Convert index memory to fractional value */ + if (mem == (u32)UDS_MEMORY_CONFIG_256MB) + write_string(prefix, "0.25, ", NULL, buf, maxlen); + else if (mem == (u32)UDS_MEMORY_CONFIG_512MB) + write_string(prefix, "0.50, ", NULL, buf, maxlen); + else if (mem == (u32)UDS_MEMORY_CONFIG_768MB) + write_string(prefix, "0.75, ", NULL, buf, maxlen); + else + write_u32(prefix, mem, ", ", buf, maxlen); +} + +static void write_index_config(struct index_config *config, char **buf, + unsigned int *maxlen) +{ + write_string("index : ", "{ ", NULL, buf, maxlen); + /* index mem size */ + write_index_memory(config->mem, buf, maxlen); + /* whether the index is sparse or not */ + write_bool("isSparse : ", config->sparse, ", ", buf, maxlen); + write_string(NULL, "}", ", ", buf, maxlen); +} + +int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen) +{ + struct vdo_config *config = &vdo->states.vdo.config; + + write_string(NULL, "{ ", NULL, buf, maxlen); + /* version */ + write_u32("version : ", 1, ", ", buf, maxlen); + /* physical size */ + write_block_count_t("physicalSize : ", config->physical_blocks * VDO_BLOCK_SIZE, ", ", + buf, maxlen); + /* logical size */ + write_block_count_t("logicalSize : ", config->logical_blocks * VDO_BLOCK_SIZE, ", ", + buf, maxlen); + /* slab size */ + write_block_count_t("slabSize : ", config->slab_size, ", ", buf, maxlen); + /* index config */ + write_index_config(&vdo->geometry.index_config, buf, maxlen); + write_string(NULL, "}", NULL, buf, maxlen); + return VDO_SUCCESS; +} diff --git a/drivers/md/dm-vdo/message-stats.h b/drivers/md/dm-vdo/message-stats.h index f7fceca9acab47..f9c95eff569dcd 100644 --- a/drivers/md/dm-vdo/message-stats.h +++ b/drivers/md/dm-vdo/message-stats.h @@ -8,6 +8,7 @@ #include "types.h" +int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen); int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen); #endif /* VDO_MESSAGE_STATS_H */ diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c index 3a989efae1420a..13008b0892062e 100644 --- a/drivers/md/dm-vdo/murmurhash3.c +++ b/drivers/md/dm-vdo/murmurhash3.c @@ -8,7 +8,7 @@ #include "murmurhash3.h" -#include +#include static inline u64 rotl64(u64 x, s8 r) { diff --git a/drivers/md/dm-vdo/numeric.h b/drivers/md/dm-vdo/numeric.h index dc8c400b21d260..f568dc59e6f18c 100644 --- a/drivers/md/dm-vdo/numeric.h +++ b/drivers/md/dm-vdo/numeric.h @@ -6,7 +6,7 @@ #ifndef UDS_NUMERIC_H #define UDS_NUMERIC_H -#include +#include #include #include diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c index 7e0009d2f67d5d..ffff2c999518eb 100644 --- a/drivers/md/dm-vdo/repair.c +++ b/drivers/md/dm-vdo/repair.c @@ -1202,17 +1202,14 @@ static bool __must_check is_valid_recovery_journal_block(const struct recovery_j * @journal: The journal to use. * @header: The unpacked block header to check. * @sequence: The expected sequence number. - * @type: The expected metadata type. * * Return: True if the block matches. */ static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal, const struct recovery_block_header *header, - sequence_number_t sequence, - enum vdo_metadata_type type) + sequence_number_t sequence) { - return ((header->metadata_type == type) && - (header->sequence_number == sequence) && + return ((header->sequence_number == sequence) && (is_valid_recovery_journal_block(journal, header, true))); } @@ -1371,7 +1368,8 @@ static void extract_entries_from_block(struct repair_completion *repair, get_recovery_journal_block_header(journal, repair->journal_data, sequence); - if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) { + if (!is_exact_recovery_journal_block(journal, &header, sequence) || + (header.metadata_type != format)) { /* This block is invalid, so skip it. */ return; } @@ -1557,10 +1555,13 @@ static int parse_journal_for_recovery(struct repair_completion *repair) sequence_number_t i, head; bool found_entries = false; struct recovery_journal *journal = repair->completion.vdo->recovery_journal; + struct recovery_block_header header; + enum vdo_metadata_type expected_format; head = min(repair->block_map_head, repair->slab_journal_head); + header = get_recovery_journal_block_header(journal, repair->journal_data, head); + expected_format = header.metadata_type; for (i = head; i <= repair->highest_tail; i++) { - struct recovery_block_header header; journal_entry_count_t block_entries; u8 j; @@ -1572,19 +1573,15 @@ static int parse_journal_for_recovery(struct repair_completion *repair) }; header = get_recovery_journal_block_header(journal, repair->journal_data, i); - if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) { - /* This is an old format block, so we need to upgrade */ - vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, - "Recovery journal is in the old format, a read-only rebuild is required."); - vdo_enter_read_only_mode(repair->completion.vdo, - VDO_UNSUPPORTED_VERSION); - return VDO_UNSUPPORTED_VERSION; - } - - if (!is_exact_recovery_journal_block(journal, &header, i, - VDO_METADATA_RECOVERY_JOURNAL_2)) { + if (!is_exact_recovery_journal_block(journal, &header, i)) { /* A bad block header was found so this must be the end of the journal. */ break; + } else if (header.metadata_type != expected_format) { + /* There is a mix of old and new format blocks, so we need to rebuild. */ + vdo_log_error_strerror(VDO_CORRUPT_JOURNAL, + "Recovery journal is in an invalid format, a read-only rebuild is required."); + vdo_enter_read_only_mode(repair->completion.vdo, VDO_CORRUPT_JOURNAL); + return VDO_CORRUPT_JOURNAL; } block_entries = header.entry_count; @@ -1620,8 +1617,14 @@ static int parse_journal_for_recovery(struct repair_completion *repair) break; } - if (!found_entries) + if (!found_entries) { return validate_heads(repair); + } else if (expected_format == VDO_METADATA_RECOVERY_JOURNAL) { + /* All journal blocks have the old format, so we need to upgrade. */ + vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, + "Recovery journal is in the old format. Downgrade and complete recovery, then upgrade with a clean volume"); + return VDO_UNSUPPORTED_VERSION; + } /* Set the tail to the last valid tail block, if there is one. */ if (repair->tail_recovery_point.sector_count == 0) diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c index d3493450b1696b..dd252d660b6da7 100644 --- a/drivers/md/dm-vdo/status-codes.c +++ b/drivers/md/dm-vdo/status-codes.c @@ -28,7 +28,7 @@ const struct error_info vdo_status_list[] = { { "VDO_LOCK_ERROR", "A lock is held incorrectly" }, { "VDO_READ_ONLY", "The device is in read-only mode" }, { "VDO_SHUTTING_DOWN", "The device is shutting down" }, - { "VDO_CORRUPT_JOURNAL", "Recovery journal entries corrupted" }, + { "VDO_CORRUPT_JOURNAL", "Recovery journal corrupted" }, { "VDO_TOO_MANY_SLABS", "Exceeds maximum number of slabs supported" }, { "VDO_INVALID_FRAGMENT", "Compressed block fragment is invalid" }, { "VDO_RETRY_AFTER_REBUILD", "Retry operation after rebuilding finishes" }, diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h index 72da04159f883f..426dc8e2ca5d20 100644 --- a/drivers/md/dm-vdo/status-codes.h +++ b/drivers/md/dm-vdo/status-codes.h @@ -52,7 +52,7 @@ enum vdo_status_codes { VDO_READ_ONLY, /* the VDO is shutting down */ VDO_SHUTTING_DOWN, - /* the recovery journal has corrupt entries */ + /* the recovery journal has corrupt entries or corrupt metadata */ VDO_CORRUPT_JOURNAL, /* exceeds maximum number of slabs supported */ VDO_TOO_MANY_SLABS, diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index cf659c8feb29fc..7d4d90b4395aee 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -22,6 +22,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "verity" @@ -35,11 +36,13 @@ #define DM_VERITY_OPT_LOGGING "ignore_corruption" #define DM_VERITY_OPT_RESTART "restart_on_corruption" #define DM_VERITY_OPT_PANIC "panic_on_corruption" +#define DM_VERITY_OPT_ERROR_RESTART "restart_on_error" +#define DM_VERITY_OPT_ERROR_PANIC "panic_on_error" #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" #define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet" -#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \ +#define DM_VERITY_OPTS_MAX (5 + DM_VERITY_OPTS_FEC + \ DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; @@ -582,6 +585,11 @@ static inline bool verity_is_system_shutting_down(void) || system_state == SYSTEM_RESTART; } +static void restart_io_error(struct work_struct *w) +{ + kernel_restart("dm-verity device has I/O error"); +} + /* * End one "io" structure with a given error. */ @@ -596,6 +604,23 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh) verity_fec_finish_io(io); + if (unlikely(status != BLK_STS_OK) && + unlikely(!(bio->bi_opf & REQ_RAHEAD)) && + !verity_is_system_shutting_down()) { + if (v->error_mode == DM_VERITY_MODE_PANIC) { + panic("dm-verity device has I/O error"); + } + if (v->error_mode == DM_VERITY_MODE_RESTART) { + static DECLARE_WORK(restart_work, restart_io_error); + queue_work(v->verify_wq, &restart_work); + /* + * We deliberately don't call bio_endio here, because + * the machine will be restarted anyway. + */ + return; + } + } + bio_endio(bio); } @@ -804,6 +829,8 @@ static void verity_status(struct dm_target *ti, status_type_t type, DMEMIT("%02x", v->salt[x]); if (v->mode != DM_VERITY_MODE_EIO) args++; + if (v->error_mode != DM_VERITY_MODE_EIO) + args++; if (verity_fec_is_enabled(v)) args += DM_VERITY_OPTS_FEC; if (v->zero_digest) @@ -833,6 +860,19 @@ static void verity_status(struct dm_target *ti, status_type_t type, BUG(); } } + if (v->error_mode != DM_VERITY_MODE_EIO) { + DMEMIT(" "); + switch (v->error_mode) { + case DM_VERITY_MODE_RESTART: + DMEMIT(DM_VERITY_OPT_ERROR_RESTART); + break; + case DM_VERITY_MODE_PANIC: + DMEMIT(DM_VERITY_OPT_ERROR_PANIC); + break; + default: + BUG(); + } + } if (v->zero_digest) DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES); if (v->validated_blocks) @@ -885,6 +925,19 @@ static void verity_status(struct dm_target *ti, status_type_t type, DMEMIT("invalid"); } } + if (v->error_mode != DM_VERITY_MODE_EIO) { + DMEMIT(",verity_error_mode="); + switch (v->error_mode) { + case DM_VERITY_MODE_RESTART: + DMEMIT(DM_VERITY_OPT_ERROR_RESTART); + break; + case DM_VERITY_MODE_PANIC: + DMEMIT(DM_VERITY_OPT_ERROR_PANIC); + break; + default: + DMEMIT("invalid"); + } + } DMEMIT(";"); break; } @@ -930,6 +983,41 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->dma_alignment = limits->logical_block_size - 1; } +#ifdef CONFIG_SECURITY + +static int verity_init_sig(struct dm_verity *v, const void *sig, + size_t sig_size) +{ + v->sig_size = sig_size; + + if (sig) { + v->root_digest_sig = kmemdup(sig, v->sig_size, GFP_KERNEL); + if (!v->root_digest_sig) + return -ENOMEM; + } + + return 0; +} + +static void verity_free_sig(struct dm_verity *v) +{ + kfree(v->root_digest_sig); +} + +#else + +static inline int verity_init_sig(struct dm_verity *v, const void *sig, + size_t sig_size) +{ + return 0; +} + +static inline void verity_free_sig(struct dm_verity *v) +{ +} + +#endif /* CONFIG_SECURITY */ + static void verity_dtr(struct dm_target *ti) { struct dm_verity *v = ti->private; @@ -949,6 +1037,7 @@ static void verity_dtr(struct dm_target *ti) kfree(v->initial_hashstate); kfree(v->root_digest); kfree(v->zero_digest); + verity_free_sig(v); if (v->ahash_tfm) { static_branch_dec(&ahash_enabled); @@ -1051,6 +1140,25 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name) return 0; } +static inline bool verity_is_verity_error_mode(const char *arg_name) +{ + return (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART) || + !strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC)); +} + +static int verity_parse_verity_error_mode(struct dm_verity *v, const char *arg_name) +{ + if (v->error_mode) + return -EINVAL; + + if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART)) + v->error_mode = DM_VERITY_MODE_RESTART; + else if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC)) + v->error_mode = DM_VERITY_MODE_PANIC; + + return 0; +} + static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, struct dm_verity_sig_opts *verify_args, bool only_modifier_opts) @@ -1085,6 +1193,16 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, } continue; + } else if (verity_is_verity_error_mode(arg_name)) { + if (only_modifier_opts) + continue; + r = verity_parse_verity_error_mode(v, arg_name); + if (r) { + ti->error = "Conflicting error handling parameters"; + return r; + } + continue; + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) { if (only_modifier_opts) continue; @@ -1418,6 +1536,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Root hash verification failed"; goto bad; } + + r = verity_init_sig(v, verify_args.sig, verify_args.sig_size); + if (r < 0) { + ti->error = "Cannot allocate root digest signature"; + goto bad; + } + v->hash_per_block_bits = __fls((1 << v->hash_dev_block_bits) / v->digest_size); @@ -1559,8 +1684,79 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i return 0; } +#ifdef CONFIG_SECURITY + +#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG + +static int verity_security_set_signature(struct block_device *bdev, + struct dm_verity *v) +{ + /* + * if the dm-verity target is unsigned, v->root_digest_sig will + * be NULL, and the hook call is still required to let LSMs mark + * the device as unsigned. This information is crucial for LSMs to + * block operations such as execution on unsigned files + */ + return security_bdev_setintegrity(bdev, + LSM_INT_DMVERITY_SIG_VALID, + v->root_digest_sig, + v->sig_size); +} + +#else + +static inline int verity_security_set_signature(struct block_device *bdev, + struct dm_verity *v) +{ + return 0; +} + +#endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */ + +/* + * Expose verity target's root hash and signature data to LSMs before resume. + * + * Returns 0 on success, or -ENOMEM if the system is out of memory. + */ +static int verity_preresume(struct dm_target *ti) +{ + struct block_device *bdev; + struct dm_verity_digest root_digest; + struct dm_verity *v; + int r; + + v = ti->private; + bdev = dm_disk(dm_table_get_md(ti->table))->part0; + root_digest.digest = v->root_digest; + root_digest.digest_len = v->digest_size; + if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) + root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm); + else + root_digest.alg = crypto_shash_alg_name(v->shash_tfm); + + r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest, + sizeof(root_digest)); + if (r) + return r; + + r = verity_security_set_signature(bdev, v); + if (r) + goto bad; + + return 0; + +bad: + + security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, NULL, 0); + + return r; +} + +#endif /* CONFIG_SECURITY */ + static struct target_type verity_target = { .name = "verity", +/* Note: the LSMs depend on the singleton and immutable features */ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, .version = {1, 10, 0}, .module = THIS_MODULE, @@ -1571,6 +1767,9 @@ static struct target_type verity_target = { .prepare_ioctl = verity_prepare_ioctl, .iterate_devices = verity_iterate_devices, .io_hints = verity_io_hints, +#ifdef CONFIG_SECURITY + .preresume = verity_preresume, +#endif /* CONFIG_SECURITY */ }; module_dm(verity); diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c index d351d7d39c60a6..a9e2c6c0a33c6d 100644 --- a/drivers/md/dm-verity-verify-sig.c +++ b/drivers/md/dm-verity-verify-sig.c @@ -127,7 +127,7 @@ int verity_verify_root_hash(const void *root_hash, size_t root_hash_len, #endif VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); #ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING - if (ret == -ENOKEY) + if (ret == -ENOKEY || ret == -EKEYREJECTED) ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data, sig_len, VERIFY_USE_PLATFORM_KEYRING, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index aac3a1b1d94add..6b75159bf835ac 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -45,6 +45,10 @@ struct dm_verity { u8 *salt; /* salt: its size is salt_size */ u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */ u8 *zero_digest; /* digest for a zero block */ +#ifdef CONFIG_SECURITY + u8 *root_digest_sig; /* signature of the root digest */ + unsigned int sig_size; /* root digest signature size */ +#endif /* CONFIG_SECURITY */ unsigned int salt_size; sector_t data_start; /* data offset in 512-byte sectors */ sector_t hash_start; /* hash start in blocks */ @@ -60,6 +64,7 @@ struct dm_verity { unsigned int digest_size; /* digest size for the current hash algorithm */ unsigned int hash_reqsize; /* the size of temporary space for crypto */ enum verity_mode mode; /* mode for handling verification errors */ + enum verity_mode error_mode;/* mode for handling I/O errors */ unsigned int corrupted_errs;/* Number of errors for corrupted blocks */ struct workqueue_struct *verify_wq; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 87bb9030343582..ff4a6b570b7644 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2030,10 +2030,15 @@ static void dm_submit_bio(struct bio *bio) struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + } - /* If suspended, or map not yet available, queue this IO for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || - unlikely(!map)) { + /* If suspended, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else if (bio->bi_opf & REQ_RAHEAD) diff --git a/drivers/md/dm.h b/drivers/md/dm.h index cc466ad5cb1df2..8ad782249af888 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -109,7 +109,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone); int dm_blk_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); bool dm_is_zone_write(struct mapped_device *md, struct bio *bio); -int dm_zone_map_bio(struct dm_target_io *io); int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, sector_t sector, unsigned int nr_zones, unsigned long *need_reset); @@ -119,10 +118,6 @@ static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) { return false; } -static inline int dm_zone_map_bio(struct dm_target_io *tio) -{ - return DM_MAPIO_KILL; -} #endif /* diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 08232d8dc815ee..29da10e6f703e2 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -32,11 +32,210 @@ #include "md.h" #include "md-bitmap.h" +#define BITMAP_MAJOR_LO 3 +/* version 4 insists the bitmap is in little-endian order + * with version 3, it is host-endian which is non-portable + * Version 5 is currently set only for clustered devices + */ +#define BITMAP_MAJOR_HI 4 +#define BITMAP_MAJOR_CLUSTERED 5 +#define BITMAP_MAJOR_HOSTENDIAN 3 + +/* + * in-memory bitmap: + * + * Use 16 bit block counters to track pending writes to each "chunk". + * The 2 high order bits are special-purpose, the first is a flag indicating + * whether a resync is needed. The second is a flag indicating whether a + * resync is active. + * This means that the counter is actually 14 bits: + * + * +--------+--------+------------------------------------------------+ + * | resync | resync | counter | + * | needed | active | | + * | (0-1) | (0-1) | (0-16383) | + * +--------+--------+------------------------------------------------+ + * + * The "resync needed" bit is set when: + * a '1' bit is read from storage at startup. + * a write request fails on some drives + * a resync is aborted on a chunk with 'resync active' set + * It is cleared (and resync-active set) when a resync starts across all drives + * of the chunk. + * + * + * The "resync active" bit is set when: + * a resync is started on all drives, and resync_needed is set. + * resync_needed will be cleared (as long as resync_active wasn't already set). + * It is cleared when a resync completes. + * + * The counter counts pending write requests, plus the on-disk bit. + * When the counter is '1' and the resync bits are clear, the on-disk + * bit can be cleared as well, thus setting the counter to 0. + * When we set a bit, or in the counter (to start a write), if the fields is + * 0, we first set the disk bit and set the counter to 1. + * + * If the counter is 0, the on-disk bit is clear and the stripe is clean + * Anything that dirties the stripe pushes the counter to 2 (at least) + * and sets the on-disk bit (lazily). + * If a periodic sweep find the counter at 2, it is decremented to 1. + * If the sweep find the counter at 1, the on-disk bit is cleared and the + * counter goes to zero. + * + * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block + * counters as a fallback when "page" memory cannot be allocated: + * + * Normal case (page memory allocated): + * + * page pointer (32-bit) + * + * [ ] ------+ + * | + * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) + * c1 c2 c2048 + * + * Hijacked case (page memory allocation failed): + * + * hijacked page pointer (32-bit) + * + * [ ][ ] (no page memory allocated) + * counter #1 (16-bit) counter #2 (16-bit) + * + */ + +#define PAGE_BITS (PAGE_SIZE << 3) +#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) + +#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) +#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) +#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) + +/* how many counters per page? */ +#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) +/* same, except a shift value for more efficient bitops */ +#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) +/* same, except a mask value for more efficient bitops */ +#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) + +#define BITMAP_BLOCK_SHIFT 9 + +/* + * bitmap structures: + */ + +/* the in-memory bitmap is represented by bitmap_pages */ +struct bitmap_page { + /* + * map points to the actual memory page + */ + char *map; + /* + * in emergencies (when map cannot be alloced), hijack the map + * pointer and use it as two counters itself + */ + unsigned int hijacked:1; + /* + * If any counter in this page is '1' or '2' - and so could be + * cleared then that page is marked as 'pending' + */ + unsigned int pending:1; + /* + * count of dirty bits on the page + */ + unsigned int count:30; +}; + +/* the main bitmap structure - one per mddev */ +struct bitmap { + + struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + /* total number of pages in the bitmap */ + unsigned long pages; + /* number of pages not yet allocated */ + unsigned long missing_pages; + /* chunksize = 2^chunkshift (for bitops) */ + unsigned long chunkshift; + /* total number of data chunks for the array */ + unsigned long chunks; + } counts; + + struct mddev *mddev; /* the md device that the bitmap is for */ + + __u64 events_cleared; + int need_sync; + + struct bitmap_storage { + /* backing disk file */ + struct file *file; + /* cached copy of the bitmap file superblock */ + struct page *sb_page; + unsigned long sb_index; + /* list of cache pages for the file */ + struct page **filemap; + /* attributes associated filemap pages */ + unsigned long *filemap_attr; + /* number of pages in the file */ + unsigned long file_pages; + /* total bytes in the bitmap */ + unsigned long bytes; + } storage; + + unsigned long flags; + + int allclean; + + atomic_t behind_writes; + /* highest actual value at runtime */ + unsigned long behind_writes_used; + + /* + * the bitmap daemon - periodically wakes up and sweeps the bitmap + * file, cleaning up bits and flushing out pages to disk as necessary + */ + unsigned long daemon_lastrun; /* jiffies of last run */ + /* + * when we lasted called end_sync to update bitmap with resync + * progress. + */ + unsigned long last_end_sync; + + /* pending writes to the bitmap file */ + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + + struct kernfs_node *sysfs_can_clear; + /* slot offset for clustered env */ + int cluster_slot; +}; + +static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, + int chunksize, bool init); + static inline char *bmname(struct bitmap *bitmap) { return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; } +static bool __bitmap_enabled(struct bitmap *bitmap) +{ + return bitmap->storage.filemap && + !test_bit(BITMAP_STALE, &bitmap->flags); +} + +static bool bitmap_enabled(struct mddev *mddev) +{ + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) + return false; + + return __bitmap_enabled(bitmap); +} + /* * check a page and, if necessary, allocate it (or hijack it if the alloc fails) * @@ -360,7 +559,7 @@ static int read_file_page(struct file *file, unsigned long index, pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); - bh = alloc_page_buffers(page, blocksize, false); + bh = alloc_page_buffers(page, blocksize); if (!bh) { ret = -ENOMEM; goto out; @@ -472,9 +671,10 @@ static void md_bitmap_wait_writes(struct bitmap *bitmap) /* update the event counter and sync the superblock to disk */ -void md_bitmap_update_sb(struct bitmap *bitmap) +static void bitmap_update_sb(void *data) { bitmap_super_t *sb; + struct bitmap *bitmap = data; if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ return; @@ -510,10 +710,8 @@ void md_bitmap_update_sb(struct bitmap *bitmap) write_sb_page(bitmap, bitmap->storage.sb_index, bitmap->storage.sb_page, 1); } -EXPORT_SYMBOL(md_bitmap_update_sb); -/* print out the bitmap file superblock */ -void md_bitmap_print_sb(struct bitmap *bitmap) +static void bitmap_print_sb(struct bitmap *bitmap) { bitmap_super_t *sb; @@ -760,7 +958,7 @@ static int md_bitmap_read_sb(struct bitmap *bitmap) bitmap->mddev->bitmap_info.space > sectors_reserved) bitmap->mddev->bitmap_info.space = sectors_reserved; } else { - md_bitmap_print_sb(bitmap); + bitmap_print_sb(bitmap); if (bitmap->cluster_slot < 0) md_cluster_stop(bitmap->mddev); } @@ -893,7 +1091,7 @@ static void md_bitmap_file_unmap(struct bitmap_storage *store) static void md_bitmap_file_kick(struct bitmap *bitmap) { if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { - md_bitmap_update_sb(bitmap); + bitmap_update_sb(bitmap); if (bitmap->storage.file) { pr_warn("%s: kicking failed bitmap file %pD4 from array!\n", @@ -1028,13 +1226,13 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) /* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ -void md_bitmap_unplug(struct bitmap *bitmap) +static void __bitmap_unplug(struct bitmap *bitmap) { unsigned long i; int dirty, need_write; int writing = 0; - if (!md_bitmap_enabled(bitmap)) + if (!__bitmap_enabled(bitmap)) return; /* look at each page to see if there are any set bits that need to be @@ -1060,7 +1258,6 @@ void md_bitmap_unplug(struct bitmap *bitmap) if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) md_bitmap_file_kick(bitmap); } -EXPORT_SYMBOL(md_bitmap_unplug); struct bitmap_unplug_work { struct work_struct work; @@ -1073,11 +1270,11 @@ static void md_bitmap_unplug_fn(struct work_struct *work) struct bitmap_unplug_work *unplug_work = container_of(work, struct bitmap_unplug_work, work); - md_bitmap_unplug(unplug_work->bitmap); + __bitmap_unplug(unplug_work->bitmap); complete(unplug_work->done); } -void md_bitmap_unplug_async(struct bitmap *bitmap) +static void bitmap_unplug_async(struct bitmap *bitmap) { DECLARE_COMPLETION_ONSTACK(done); struct bitmap_unplug_work unplug_work; @@ -1089,7 +1286,19 @@ void md_bitmap_unplug_async(struct bitmap *bitmap) queue_work(md_bitmap_wq, &unplug_work.work); wait_for_completion(&done); } -EXPORT_SYMBOL(md_bitmap_unplug_async); + +static void bitmap_unplug(struct mddev *mddev, bool sync) +{ + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) + return; + + if (sync) + __bitmap_unplug(bitmap); + else + bitmap_unplug_async(bitmap); +} static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); @@ -1226,22 +1435,21 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) return ret; } -void md_bitmap_write_all(struct bitmap *bitmap) +/* just flag bitmap pages as needing to be written. */ +static void bitmap_write_all(struct mddev *mddev) { - /* We don't actually write all bitmap blocks here, - * just flag them as needing to be written - */ int i; + struct bitmap *bitmap = mddev->bitmap; if (!bitmap || !bitmap->storage.filemap) return; + + /* Only one copy, so nothing needed */ if (bitmap->storage.file) - /* Only one copy, so nothing needed */ return; for (i = 0; i < bitmap->storage.file_pages; i++) - set_page_attr(bitmap, i, - BITMAP_PAGE_NEEDWRITE); + set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; } @@ -1290,7 +1498,7 @@ static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout, * bitmap daemon -- periodically wakes up to clean bits and flush pages * out to disk */ -void md_bitmap_daemon_work(struct mddev *mddev) +static void bitmap_daemon_work(struct mddev *mddev) { struct bitmap *bitmap; unsigned long j; @@ -1461,8 +1669,11 @@ __acquires(bitmap->lock) &(bitmap->bp[page].map[pageoff]); } -int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) +static int bitmap_startwrite(struct mddev *mddev, sector_t offset, + unsigned long sectors, bool behind) { + struct bitmap *bitmap = mddev->bitmap; + if (!bitmap) return 0; @@ -1523,13 +1734,15 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s } return 0; } -EXPORT_SYMBOL(md_bitmap_startwrite); -void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int success, int behind) +static void bitmap_endwrite(struct mddev *mddev, sector_t offset, + unsigned long sectors, bool success, bool behind) { + struct bitmap *bitmap = mddev->bitmap; + if (!bitmap) return; + if (behind) { if (atomic_dec_and_test(&bitmap->behind_writes)) wake_up(&bitmap->behind_wait); @@ -1576,26 +1789,27 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, sectors = 0; } } -EXPORT_SYMBOL(md_bitmap_endwrite); -static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, - int degraded) +static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, + sector_t *blocks, bool degraded) { bitmap_counter_t *bmc; - int rv; + bool rv; + if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ *blocks = 1024; - return 1; /* always resync if no bitmap */ + return true; /* always resync if no bitmap */ } spin_lock_irq(&bitmap->counts.lock); + + rv = false; bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); - rv = 0; if (bmc) { /* locked */ - if (RESYNC(*bmc)) - rv = 1; - else if (NEEDED(*bmc)) { - rv = 1; + if (RESYNC(*bmc)) { + rv = true; + } else if (NEEDED(*bmc)) { + rv = true; if (!degraded) { /* don't set/clear bits if degraded */ *bmc |= RESYNC_MASK; *bmc &= ~NEEDED_MASK; @@ -1603,11 +1817,12 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t } } spin_unlock_irq(&bitmap->counts.lock); + return rv; } -int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, - int degraded) +static bool bitmap_start_sync(struct mddev *mddev, sector_t offset, + sector_t *blocks, bool degraded) { /* bitmap_start_sync must always report on multiples of whole * pages, otherwise resync (which is very PAGE_SIZE based) will @@ -1616,21 +1831,22 @@ int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *block * At least PAGE_SIZE>>9 blocks are covered. * Return the 'or' of the result. */ - int rv = 0; + bool rv = false; sector_t blocks1; *blocks = 0; while (*blocks < (PAGE_SIZE>>9)) { - rv |= __bitmap_start_sync(bitmap, offset, + rv |= __bitmap_start_sync(mddev->bitmap, offset, &blocks1, degraded); offset += blocks1; *blocks += blocks1; } + return rv; } -EXPORT_SYMBOL(md_bitmap_start_sync); -void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) +static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset, + sector_t *blocks, bool aborted) { bitmap_counter_t *bmc; unsigned long flags; @@ -1659,9 +1875,14 @@ void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks unlock: spin_unlock_irqrestore(&bitmap->counts.lock, flags); } -EXPORT_SYMBOL(md_bitmap_end_sync); -void md_bitmap_close_sync(struct bitmap *bitmap) +static void bitmap_end_sync(struct mddev *mddev, sector_t offset, + sector_t *blocks) +{ + __bitmap_end_sync(mddev->bitmap, offset, blocks, true); +} + +static void bitmap_close_sync(struct mddev *mddev) { /* Sync has finished, and any bitmap chunks that weren't synced * properly have been aborted. It remains to us to clear the @@ -1669,19 +1890,23 @@ void md_bitmap_close_sync(struct bitmap *bitmap) */ sector_t sector = 0; sector_t blocks; + struct bitmap *bitmap = mddev->bitmap; + if (!bitmap) return; + while (sector < bitmap->mddev->resync_max_sectors) { - md_bitmap_end_sync(bitmap, sector, &blocks, 0); + __bitmap_end_sync(bitmap, sector, &blocks, false); sector += blocks; } } -EXPORT_SYMBOL(md_bitmap_close_sync); -void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) +static void bitmap_cond_end_sync(struct mddev *mddev, sector_t sector, + bool force) { sector_t s = 0; sector_t blocks; + struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; @@ -1700,34 +1925,32 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { - md_bitmap_end_sync(bitmap, s, &blocks, 0); + __bitmap_end_sync(bitmap, s, &blocks, false); s += blocks; } bitmap->last_end_sync = jiffies; sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); } -EXPORT_SYMBOL(md_bitmap_cond_end_sync); -void md_bitmap_sync_with_cluster(struct mddev *mddev, - sector_t old_lo, sector_t old_hi, - sector_t new_lo, sector_t new_hi) +static void bitmap_sync_with_cluster(struct mddev *mddev, + sector_t old_lo, sector_t old_hi, + sector_t new_lo, sector_t new_hi) { struct bitmap *bitmap = mddev->bitmap; sector_t sector, blocks = 0; for (sector = old_lo; sector < new_lo; ) { - md_bitmap_end_sync(bitmap, sector, &blocks, 0); + __bitmap_end_sync(bitmap, sector, &blocks, false); sector += blocks; } WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); for (sector = old_hi; sector < new_hi; ) { - md_bitmap_start_sync(bitmap, sector, &blocks, 0); + bitmap_start_sync(mddev, sector, &blocks, false); sector += blocks; } WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); } -EXPORT_SYMBOL(md_bitmap_sync_with_cluster); static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) { @@ -1756,12 +1979,18 @@ static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, in } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ -void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) +static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s, + unsigned long e) { unsigned long chunk; + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) + return; for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; + md_bitmap_set_memory_bits(bitmap, sec, 1); md_bitmap_file_set_bit(bitmap, sec); if (sec < bitmap->mddev->recovery_cp) @@ -1773,10 +2002,7 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long } } -/* - * flush out any pending updates - */ -void md_bitmap_flush(struct mddev *mddev) +static void bitmap_flush(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; long sleep; @@ -1789,23 +2015,21 @@ void md_bitmap_flush(struct mddev *mddev) */ sleep = mddev->bitmap_info.daemon_sleep * 2; bitmap->daemon_lastrun -= sleep; - md_bitmap_daemon_work(mddev); + bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; - md_bitmap_daemon_work(mddev); + bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; - md_bitmap_daemon_work(mddev); + bitmap_daemon_work(mddev); if (mddev->bitmap_info.external) md_super_wait(mddev); - md_bitmap_update_sb(bitmap); + bitmap_update_sb(bitmap); } -/* - * free memory that was allocated - */ -void md_bitmap_free(struct bitmap *bitmap) +static void md_bitmap_free(void *data) { unsigned long k, pages; struct bitmap_page *bp; + struct bitmap *bitmap = data; if (!bitmap) /* there was no bitmap */ return; @@ -1836,9 +2060,8 @@ void md_bitmap_free(struct bitmap *bitmap) kfree(bp); kfree(bitmap); } -EXPORT_SYMBOL(md_bitmap_free); -void md_bitmap_wait_behind_writes(struct mddev *mddev) +static void bitmap_wait_behind_writes(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; @@ -1852,14 +2075,14 @@ void md_bitmap_wait_behind_writes(struct mddev *mddev) } } -void md_bitmap_destroy(struct mddev *mddev) +static void bitmap_destroy(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) /* there was no bitmap */ return; - md_bitmap_wait_behind_writes(mddev); + bitmap_wait_behind_writes(mddev); if (!mddev->serialize_policy) mddev_destroy_serial_pool(mddev, NULL); @@ -1878,7 +2101,7 @@ void md_bitmap_destroy(struct mddev *mddev) * if this returns an error, bitmap_destroy must be called to do clean up * once mddev->bitmap is set */ -struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) +static struct bitmap *__bitmap_create(struct mddev *mddev, int slot) { struct bitmap *bitmap; sector_t blocks = mddev->resync_max_sectors; @@ -1948,7 +2171,8 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) goto error; bitmap->daemon_lastrun = jiffies; - err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); + err = __bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, + true); if (err) goto error; @@ -1965,7 +2189,18 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) return ERR_PTR(err); } -int md_bitmap_load(struct mddev *mddev) +static int bitmap_create(struct mddev *mddev, int slot) +{ + struct bitmap *bitmap = __bitmap_create(mddev, slot); + + if (IS_ERR(bitmap)) + return PTR_ERR(bitmap); + + mddev->bitmap = bitmap; + return 0; +} + +static int bitmap_load(struct mddev *mddev) { int err = 0; sector_t start = 0; @@ -1989,10 +2224,10 @@ int md_bitmap_load(struct mddev *mddev) */ while (sector < mddev->resync_max_sectors) { sector_t blocks; - md_bitmap_start_sync(bitmap, sector, &blocks, 0); + bitmap_start_sync(mddev, sector, &blocks, false); sector += blocks; } - md_bitmap_close_sync(bitmap); + bitmap_close_sync(mddev); if (mddev->degraded == 0 || bitmap->events_cleared == mddev->events) @@ -2014,22 +2249,21 @@ int md_bitmap_load(struct mddev *mddev) mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); md_wakeup_thread(mddev->thread); - md_bitmap_update_sb(bitmap); + bitmap_update_sb(bitmap); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) err = -EIO; out: return err; } -EXPORT_SYMBOL_GPL(md_bitmap_load); /* caller need to free returned bitmap with md_bitmap_free() */ -struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) +static void *bitmap_get_from_slot(struct mddev *mddev, int slot) { int rv = 0; struct bitmap *bitmap; - bitmap = md_bitmap_create(mddev, slot); + bitmap = __bitmap_create(mddev, slot); if (IS_ERR(bitmap)) { rv = PTR_ERR(bitmap); return ERR_PTR(rv); @@ -2043,20 +2277,19 @@ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) return bitmap; } -EXPORT_SYMBOL(get_bitmap_from_slot); /* Loads the bitmap associated with slot and copies the resync information * to our bitmap */ -int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, - sector_t *low, sector_t *high, bool clear_bits) +static int bitmap_copy_from_slot(struct mddev *mddev, int slot, sector_t *low, + sector_t *high, bool clear_bits) { int rv = 0, i, j; sector_t block, lo = 0, hi = 0; struct bitmap_counts *counts; struct bitmap *bitmap; - bitmap = get_bitmap_from_slot(mddev, slot); + bitmap = bitmap_get_from_slot(mddev, slot); if (IS_ERR(bitmap)) { pr_err("%s can't get bitmap from slot %d\n", __func__, slot); return -1; @@ -2076,53 +2309,59 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, } if (clear_bits) { - md_bitmap_update_sb(bitmap); + bitmap_update_sb(bitmap); /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ for (i = 0; i < bitmap->storage.file_pages; i++) if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); - md_bitmap_unplug(bitmap); + __bitmap_unplug(bitmap); } - md_bitmap_unplug(mddev->bitmap); + __bitmap_unplug(mddev->bitmap); *low = lo; *high = hi; md_bitmap_free(bitmap); return rv; } -EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot); +static void bitmap_set_pages(void *data, unsigned long pages) +{ + struct bitmap *bitmap = data; + + bitmap->counts.pages = pages; +} -void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap) +static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) { - unsigned long chunk_kb; + struct bitmap_storage *storage; struct bitmap_counts *counts; + struct bitmap *bitmap = data; + bitmap_super_t *sb; if (!bitmap) - return; + return -ENOENT; + + sb = kmap_local_page(bitmap->storage.sb_page); + stats->sync_size = le64_to_cpu(sb->sync_size); + kunmap_local(sb); counts = &bitmap->counts; + stats->missing_pages = counts->missing_pages; + stats->pages = counts->pages; - chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; - seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " - "%lu%s chunk", - counts->pages - counts->missing_pages, - counts->pages, - (counts->pages - counts->missing_pages) - << (PAGE_SHIFT - 10), - chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, - chunk_kb ? "KB" : "B"); - if (bitmap->storage.file) { - seq_printf(seq, ", file: "); - seq_file_path(seq, bitmap->storage.file, " \t\n"); - } + storage = &bitmap->storage; + stats->file_pages = storage->file_pages; + stats->file = storage->file; - seq_printf(seq, "\n"); + stats->behind_writes = atomic_read(&bitmap->behind_writes); + stats->behind_wait = wq_has_sleeper(&bitmap->behind_wait); + stats->events_cleared = bitmap->events_cleared; + return 0; } -int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, - int chunksize, int init) +static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, + int chunksize, bool init) { /* If chunk_size is 0, choose an appropriate chunk size. * Then possibly allocate new storage space. @@ -2320,14 +2559,24 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, spin_unlock_irq(&bitmap->counts.lock); if (!init) { - md_bitmap_unplug(bitmap); + __bitmap_unplug(bitmap); bitmap->mddev->pers->quiesce(bitmap->mddev, 0); } ret = 0; err: return ret; } -EXPORT_SYMBOL_GPL(md_bitmap_resize); + +static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize, + bool init) +{ + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) + return 0; + + return __bitmap_resize(bitmap, blocks, chunksize, init); +} static ssize_t location_show(struct mddev *mddev, char *page) @@ -2367,7 +2616,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len) goto out; } - md_bitmap_destroy(mddev); + bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; @@ -2377,7 +2626,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len) } else { /* No bitmap, OK to set a location */ long long offset; - struct bitmap *bitmap; if (strncmp(buf, "none", 4) == 0) /* nothing to be done */; @@ -2404,17 +2652,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len) } mddev->bitmap_info.offset = offset; - bitmap = md_bitmap_create(mddev, -1); - if (IS_ERR(bitmap)) { - rv = PTR_ERR(bitmap); + rv = bitmap_create(mddev, -1); + if (rv) goto out; - } - mddev->bitmap = bitmap; - rv = md_bitmap_load(mddev); + rv = bitmap_load(mddev); if (rv) { mddev->bitmap_info.offset = 0; - md_bitmap_destroy(mddev); + bitmap_destroy(mddev); goto out; } } @@ -2450,6 +2695,7 @@ space_show(struct mddev *mddev, char *page) static ssize_t space_store(struct mddev *mddev, const char *buf, size_t len) { + struct bitmap *bitmap; unsigned long sectors; int rv; @@ -2460,8 +2706,8 @@ space_store(struct mddev *mddev, const char *buf, size_t len) if (sectors == 0) return -EINVAL; - if (mddev->bitmap && - sectors < (mddev->bitmap->storage.bytes + 511) >> 9) + bitmap = mddev->bitmap; + if (bitmap && sectors < (bitmap->storage.bytes + 511) >> 9) return -EFBIG; /* Bitmap is too big for this small space */ /* could make sure it isn't too big, but that isn't really @@ -2569,7 +2815,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len) mddev_create_serial_pool(mddev, rdev); } if (old_mwb != backlog) - md_bitmap_update_sb(mddev->bitmap); + bitmap_update_sb(mddev->bitmap); mddev_unlock_and_resume(mddev); return len; @@ -2638,10 +2884,13 @@ __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); static ssize_t can_clear_show(struct mddev *mddev, char *page) { int len; + struct bitmap *bitmap; + spin_lock(&mddev->lock); - if (mddev->bitmap) - len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? - "false" : "true")); + bitmap = mddev->bitmap; + if (bitmap) + len = sprintf(page, "%s\n", (bitmap->need_sync ? "false" : + "true")); else len = sprintf(page, "\n"); spin_unlock(&mddev->lock); @@ -2650,17 +2899,24 @@ static ssize_t can_clear_show(struct mddev *mddev, char *page) static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) { - if (mddev->bitmap == NULL) + struct bitmap *bitmap = mddev->bitmap; + + if (!bitmap) return -ENOENT; - if (strncmp(buf, "false", 5) == 0) - mddev->bitmap->need_sync = 1; - else if (strncmp(buf, "true", 4) == 0) { + + if (strncmp(buf, "false", 5) == 0) { + bitmap->need_sync = 1; + return len; + } + + if (strncmp(buf, "true", 4) == 0) { if (mddev->degraded) return -EBUSY; - mddev->bitmap->need_sync = 0; - } else - return -EINVAL; - return len; + bitmap->need_sync = 0; + return len; + } + + return -EINVAL; } static struct md_sysfs_entry bitmap_can_clear = @@ -2670,21 +2926,26 @@ static ssize_t behind_writes_used_show(struct mddev *mddev, char *page) { ssize_t ret; + struct bitmap *bitmap; + spin_lock(&mddev->lock); - if (mddev->bitmap == NULL) + bitmap = mddev->bitmap; + if (!bitmap) ret = sprintf(page, "0\n"); else - ret = sprintf(page, "%lu\n", - mddev->bitmap->behind_writes_used); + ret = sprintf(page, "%lu\n", bitmap->behind_writes_used); spin_unlock(&mddev->lock); + return ret; } static ssize_t behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) { - if (mddev->bitmap) - mddev->bitmap->behind_writes_used = 0; + struct bitmap *bitmap = mddev->bitmap; + + if (bitmap) + bitmap->behind_writes_used = 0; return len; } @@ -2707,3 +2968,38 @@ const struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, }; + +static struct bitmap_operations bitmap_ops = { + .enabled = bitmap_enabled, + .create = bitmap_create, + .resize = bitmap_resize, + .load = bitmap_load, + .destroy = bitmap_destroy, + .flush = bitmap_flush, + .write_all = bitmap_write_all, + .dirty_bits = bitmap_dirty_bits, + .unplug = bitmap_unplug, + .daemon_work = bitmap_daemon_work, + .wait_behind_writes = bitmap_wait_behind_writes, + + .startwrite = bitmap_startwrite, + .endwrite = bitmap_endwrite, + .start_sync = bitmap_start_sync, + .end_sync = bitmap_end_sync, + .cond_end_sync = bitmap_cond_end_sync, + .close_sync = bitmap_close_sync, + + .update_sb = bitmap_update_sb, + .get_stats = bitmap_get_stats, + + .sync_with_cluster = bitmap_sync_with_cluster, + .get_from_slot = bitmap_get_from_slot, + .copy_from_slot = bitmap_copy_from_slot, + .set_pages = bitmap_set_pages, + .free = md_bitmap_free, +}; + +void mddev_set_bitmap_ops(struct mddev *mddev) +{ + mddev->bitmap_ops = &bitmap_ops; +} diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h index bb9eb418780a62..662e6fc141a775 100644 --- a/drivers/md/md-bitmap.h +++ b/drivers/md/md-bitmap.h @@ -7,81 +7,7 @@ #ifndef BITMAP_H #define BITMAP_H 1 -#define BITMAP_MAJOR_LO 3 -/* version 4 insists the bitmap is in little-endian order - * with version 3, it is host-endian which is non-portable - * Version 5 is currently set only for clustered devices - */ -#define BITMAP_MAJOR_HI 4 -#define BITMAP_MAJOR_CLUSTERED 5 -#define BITMAP_MAJOR_HOSTENDIAN 3 - -/* - * in-memory bitmap: - * - * Use 16 bit block counters to track pending writes to each "chunk". - * The 2 high order bits are special-purpose, the first is a flag indicating - * whether a resync is needed. The second is a flag indicating whether a - * resync is active. - * This means that the counter is actually 14 bits: - * - * +--------+--------+------------------------------------------------+ - * | resync | resync | counter | - * | needed | active | | - * | (0-1) | (0-1) | (0-16383) | - * +--------+--------+------------------------------------------------+ - * - * The "resync needed" bit is set when: - * a '1' bit is read from storage at startup. - * a write request fails on some drives - * a resync is aborted on a chunk with 'resync active' set - * It is cleared (and resync-active set) when a resync starts across all drives - * of the chunk. - * - * - * The "resync active" bit is set when: - * a resync is started on all drives, and resync_needed is set. - * resync_needed will be cleared (as long as resync_active wasn't already set). - * It is cleared when a resync completes. - * - * The counter counts pending write requests, plus the on-disk bit. - * When the counter is '1' and the resync bits are clear, the on-disk - * bit can be cleared as well, thus setting the counter to 0. - * When we set a bit, or in the counter (to start a write), if the fields is - * 0, we first set the disk bit and set the counter to 1. - * - * If the counter is 0, the on-disk bit is clear and the stripe is clean - * Anything that dirties the stripe pushes the counter to 2 (at least) - * and sets the on-disk bit (lazily). - * If a periodic sweep find the counter at 2, it is decremented to 1. - * If the sweep find the counter at 1, the on-disk bit is cleared and the - * counter goes to zero. - * - * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block - * counters as a fallback when "page" memory cannot be allocated: - * - * Normal case (page memory allocated): - * - * page pointer (32-bit) - * - * [ ] ------+ - * | - * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) - * c1 c2 c2048 - * - * Hijacked case (page memory allocation failed): - * - * hijacked page pointer (32-bit) - * - * [ ][ ] (no page memory allocated) - * counter #1 (16-bit) counter #2 (16-bit) - * - */ - -#ifdef __KERNEL__ - -#define PAGE_BITS (PAGE_SIZE << 3) -#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) +#define BITMAP_MAGIC 0x6d746962 typedef __u16 bitmap_counter_t; #define COUNTER_BITS 16 @@ -91,26 +17,6 @@ typedef __u16 bitmap_counter_t; #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) #define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) #define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) -#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) -#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) -#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) - -/* how many counters per page? */ -#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) -/* same, except a shift value for more efficient bitops */ -#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) -/* same, except a mask value for more efficient bitops */ -#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) - -#define BITMAP_BLOCK_SHIFT 9 - -#endif - -/* - * bitmap structures: - */ - -#define BITMAP_MAGIC 0x6d746962 /* use these for bitmap->flags and bitmap->sb->state bit-fields */ enum bitmap_state { @@ -152,136 +58,58 @@ typedef struct bitmap_super_s { * devices. For raid10 it is the size of the array. */ -#ifdef __KERNEL__ +struct md_bitmap_stats { + u64 events_cleared; + int behind_writes; + bool behind_wait; -/* the in-memory bitmap is represented by bitmap_pages */ -struct bitmap_page { - /* - * map points to the actual memory page - */ - char *map; - /* - * in emergencies (when map cannot be alloced), hijack the map - * pointer and use it as two counters itself - */ - unsigned int hijacked:1; - /* - * If any counter in this page is '1' or '2' - and so could be - * cleared then that page is marked as 'pending' - */ - unsigned int pending:1; - /* - * count of dirty bits on the page - */ - unsigned int count:30; + unsigned long missing_pages; + unsigned long file_pages; + unsigned long sync_size; + unsigned long pages; + struct file *file; }; -/* the main bitmap structure - one per mddev */ -struct bitmap { - - struct bitmap_counts { - spinlock_t lock; - struct bitmap_page *bp; - unsigned long pages; /* total number of pages - * in the bitmap */ - unsigned long missing_pages; /* number of pages - * not yet allocated */ - unsigned long chunkshift; /* chunksize = 2^chunkshift - * (for bitops) */ - unsigned long chunks; /* Total number of data - * chunks for the array */ - } counts; - - struct mddev *mddev; /* the md device that the bitmap is for */ - - __u64 events_cleared; - int need_sync; - - struct bitmap_storage { - struct file *file; /* backing disk file */ - struct page *sb_page; /* cached copy of the bitmap - * file superblock */ - unsigned long sb_index; - struct page **filemap; /* list of cache pages for - * the file */ - unsigned long *filemap_attr; /* attributes associated - * w/ filemap pages */ - unsigned long file_pages; /* number of pages in the file*/ - unsigned long bytes; /* total bytes in the bitmap */ - } storage; - - unsigned long flags; - - int allclean; - - atomic_t behind_writes; - unsigned long behind_writes_used; /* highest actual value at runtime */ - - /* - * the bitmap daemon - periodically wakes up and sweeps the bitmap - * file, cleaning up bits and flushing out pages to disk as necessary - */ - unsigned long daemon_lastrun; /* jiffies of last run */ - unsigned long last_end_sync; /* when we lasted called end_sync to - * update bitmap with resync progress */ - - atomic_t pending_writes; /* pending writes to the bitmap file */ - wait_queue_head_t write_wait; - wait_queue_head_t overflow_wait; - wait_queue_head_t behind_wait; - - struct kernfs_node *sysfs_can_clear; - int cluster_slot; /* Slot offset for clustered env */ +struct bitmap_operations { + bool (*enabled)(struct mddev *mddev); + int (*create)(struct mddev *mddev, int slot); + int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize, + bool init); + + int (*load)(struct mddev *mddev); + void (*destroy)(struct mddev *mddev); + void (*flush)(struct mddev *mddev); + void (*write_all)(struct mddev *mddev); + void (*dirty_bits)(struct mddev *mddev, unsigned long s, + unsigned long e); + void (*unplug)(struct mddev *mddev, bool sync); + void (*daemon_work)(struct mddev *mddev); + void (*wait_behind_writes)(struct mddev *mddev); + + int (*startwrite)(struct mddev *mddev, sector_t offset, + unsigned long sectors, bool behind); + void (*endwrite)(struct mddev *mddev, sector_t offset, + unsigned long sectors, bool success, bool behind); + bool (*start_sync)(struct mddev *mddev, sector_t offset, + sector_t *blocks, bool degraded); + void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks); + void (*cond_end_sync)(struct mddev *mddev, sector_t sector, bool force); + void (*close_sync)(struct mddev *mddev); + + void (*update_sb)(void *data); + int (*get_stats)(void *data, struct md_bitmap_stats *stats); + + void (*sync_with_cluster)(struct mddev *mddev, + sector_t old_lo, sector_t old_hi, + sector_t new_lo, sector_t new_hi); + void *(*get_from_slot)(struct mddev *mddev, int slot); + int (*copy_from_slot)(struct mddev *mddev, int slot, sector_t *lo, + sector_t *hi, bool clear_bits); + void (*set_pages)(void *data, unsigned long pages); + void (*free)(void *data); }; /* the bitmap API */ - -/* these are used only by md/bitmap */ -struct bitmap *md_bitmap_create(struct mddev *mddev, int slot); -int md_bitmap_load(struct mddev *mddev); -void md_bitmap_flush(struct mddev *mddev); -void md_bitmap_destroy(struct mddev *mddev); - -void md_bitmap_print_sb(struct bitmap *bitmap); -void md_bitmap_update_sb(struct bitmap *bitmap); -void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap); - -int md_bitmap_setallbits(struct bitmap *bitmap); -void md_bitmap_write_all(struct bitmap *bitmap); - -void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); - -/* these are exported */ -int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int behind); -void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int success, int behind); -int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); -void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); -void md_bitmap_close_sync(struct bitmap *bitmap); -void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); -void md_bitmap_sync_with_cluster(struct mddev *mddev, - sector_t old_lo, sector_t old_hi, - sector_t new_lo, sector_t new_hi); - -void md_bitmap_unplug(struct bitmap *bitmap); -void md_bitmap_unplug_async(struct bitmap *bitmap); -void md_bitmap_daemon_work(struct mddev *mddev); - -int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, - int chunksize, int init); -struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot); -int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, - sector_t *lo, sector_t *hi, bool clear_bits); -void md_bitmap_free(struct bitmap *bitmap); -void md_bitmap_wait_behind_writes(struct mddev *mddev); - -static inline bool md_bitmap_enabled(struct bitmap *bitmap) -{ - return bitmap && bitmap->storage.filemap && - !test_bit(BITMAP_STALE, &bitmap->flags); -} - -#endif +void mddev_set_bitmap_ops(struct mddev *mddev); #endif diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 1d0db62f035149..6595f89becdb7d 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -317,7 +317,7 @@ static void recover_bitmaps(struct md_thread *thread) str, ret); goto clear_bit; } - ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true); + ret = mddev->bitmap_ops->copy_from_slot(mddev, slot, &lo, &hi, true); if (ret) { pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); goto clear_bit; @@ -497,8 +497,8 @@ static void process_suspend_info(struct mddev *mddev, * we don't want to trigger lots of WARN. */ if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) - md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, - cinfo->sync_hi, lo, hi); + mddev->bitmap_ops->sync_with_cluster(mddev, cinfo->sync_low, + cinfo->sync_hi, lo, hi); cinfo->sync_low = lo; cinfo->sync_hi = hi; @@ -628,8 +628,9 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) break; case BITMAP_RESIZE: if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0)) - ret = md_bitmap_resize(mddev->bitmap, - le64_to_cpu(msg->high), 0, 0); + ret = mddev->bitmap_ops->resize(mddev, + le64_to_cpu(msg->high), + 0, false); break; default: ret = -1; @@ -856,7 +857,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) } /* Read the disk bitmap sb and check if it needs recovery */ - ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false); + ret = mddev->bitmap_ops->copy_from_slot(mddev, i, &lo, &hi, false); if (ret) { pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); lockres_free(bm_lockres); @@ -1143,13 +1144,16 @@ static int update_bitmap_size(struct mddev *mddev, sector_t size) static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize) { - struct bitmap_counts *counts; - char str[64]; - struct dlm_lock_resource *bm_lockres; - struct bitmap *bitmap = mddev->bitmap; - unsigned long my_pages = bitmap->counts.pages; + void *bitmap = mddev->bitmap; + struct md_bitmap_stats stats; + unsigned long my_pages; int i, rv; + rv = mddev->bitmap_ops->get_stats(bitmap, &stats); + if (rv) + return rv; + + my_pages = stats.pages; /* * We need to ensure all the nodes can grow to a larger * bitmap size before make the reshaping. @@ -1159,17 +1163,22 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz return rv; for (i = 0; i < mddev->bitmap_info.nodes; i++) { + struct dlm_lock_resource *bm_lockres; + char str[64]; + if (i == md_cluster_ops->slot_number(mddev)) continue; - bitmap = get_bitmap_from_slot(mddev, i); + bitmap = mddev->bitmap_ops->get_from_slot(mddev, i); if (IS_ERR(bitmap)) { pr_err("can't get bitmap from slot %d\n", i); bitmap = NULL; goto out; } - counts = &bitmap->counts; + rv = mddev->bitmap_ops->get_stats(bitmap, &stats); + if (rv) + goto out; /* * If we can hold the bitmap lock of one node then * the slot is not occupied, update the pages. @@ -1183,21 +1192,21 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz bm_lockres->flags |= DLM_LKF_NOQUEUE; rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); if (!rv) - counts->pages = my_pages; + mddev->bitmap_ops->set_pages(bitmap, my_pages); lockres_free(bm_lockres); - if (my_pages != counts->pages) + if (my_pages != stats.pages) /* * Let's revert the bitmap size if one node * can't resize bitmap */ goto out; - md_bitmap_free(bitmap); + mddev->bitmap_ops->free(bitmap); } return 0; out: - md_bitmap_free(bitmap); + mddev->bitmap_ops->free(bitmap); update_bitmap_size(mddev, oldsize); return -1; } @@ -1207,24 +1216,27 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz */ static int cluster_check_sync_size(struct mddev *mddev) { - int i, rv; - bitmap_super_t *sb; - unsigned long my_sync_size, sync_size = 0; - int node_num = mddev->bitmap_info.nodes; int current_slot = md_cluster_ops->slot_number(mddev); - struct bitmap *bitmap = mddev->bitmap; - char str[64]; + int node_num = mddev->bitmap_info.nodes; struct dlm_lock_resource *bm_lockres; + struct md_bitmap_stats stats; + void *bitmap = mddev->bitmap; + unsigned long sync_size = 0; + unsigned long my_sync_size; + char str[64]; + int i, rv; - sb = kmap_atomic(bitmap->storage.sb_page); - my_sync_size = sb->sync_size; - kunmap_atomic(sb); + rv = mddev->bitmap_ops->get_stats(bitmap, &stats); + if (rv) + return rv; + + my_sync_size = stats.sync_size; for (i = 0; i < node_num; i++) { if (i == current_slot) continue; - bitmap = get_bitmap_from_slot(mddev, i); + bitmap = mddev->bitmap_ops->get_from_slot(mddev, i); if (IS_ERR(bitmap)) { pr_err("can't get bitmap from slot %d\n", i); return -1; @@ -1238,25 +1250,28 @@ static int cluster_check_sync_size(struct mddev *mddev) bm_lockres = lockres_init(mddev, str, NULL, 1); if (!bm_lockres) { pr_err("md-cluster: Cannot initialize %s\n", str); - md_bitmap_free(bitmap); + mddev->bitmap_ops->free(bitmap); return -1; } bm_lockres->flags |= DLM_LKF_NOQUEUE; rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); if (!rv) - md_bitmap_update_sb(bitmap); + mddev->bitmap_ops->update_sb(bitmap); lockres_free(bm_lockres); - sb = kmap_atomic(bitmap->storage.sb_page); - if (sync_size == 0) - sync_size = sb->sync_size; - else if (sync_size != sb->sync_size) { - kunmap_atomic(sb); - md_bitmap_free(bitmap); + rv = mddev->bitmap_ops->get_stats(bitmap, &stats); + if (rv) { + mddev->bitmap_ops->free(bitmap); + return rv; + } + + if (sync_size == 0) { + sync_size = stats.sync_size; + } else if (sync_size != stats.sync_size) { + mddev->bitmap_ops->free(bitmap); return -1; } - kunmap_atomic(sb); - md_bitmap_free(bitmap); + mddev->bitmap_ops->free(bitmap); } return (my_sync_size == sync_size) ? 0 : -1; @@ -1585,7 +1600,7 @@ static int gather_bitmaps(struct md_rdev *rdev) for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { if (sn == (cinfo->slot_number - 1)) continue; - err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); + err = mddev->bitmap_ops->copy_from_slot(mddev, sn, &lo, &hi, false); if (err) { pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); goto out; diff --git a/drivers/md/md.c b/drivers/md/md.c index d3a837506a36d8..179ee4afe93766 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -546,137 +546,30 @@ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_n return 0; } -/* - * Generic flush handling for md - */ - -static void md_end_flush(struct bio *bio) -{ - struct md_rdev *rdev = bio->bi_private; - struct mddev *mddev = rdev->mddev; - - bio_put(bio); - - rdev_dec_pending(rdev, mddev); - - if (atomic_dec_and_test(&mddev->flush_pending)) - /* The pre-request flush has finished */ - queue_work(md_wq, &mddev->flush_work); -} - -static void md_submit_flush_data(struct work_struct *ws); - -static void submit_flushes(struct work_struct *ws) +bool md_flush_request(struct mddev *mddev, struct bio *bio) { - struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct md_rdev *rdev; - - mddev->start_flush = ktime_get_boottime(); - INIT_WORK(&mddev->flush_work, md_submit_flush_data); - atomic_set(&mddev->flush_pending, 1); - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - !test_bit(Faulty, &rdev->flags)) { - struct bio *bi; - - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - bi = bio_alloc_bioset(rdev->bdev, 0, - REQ_OP_WRITE | REQ_PREFLUSH, - GFP_NOIO, &mddev->bio_set); - bi->bi_end_io = md_end_flush; - bi->bi_private = rdev; - atomic_inc(&mddev->flush_pending); - submit_bio(bi); - rcu_read_lock(); - } - rcu_read_unlock(); - if (atomic_dec_and_test(&mddev->flush_pending)) - queue_work(md_wq, &mddev->flush_work); -} - -static void md_submit_flush_data(struct work_struct *ws) -{ - struct mddev *mddev = container_of(ws, struct mddev, flush_work); - struct bio *bio = mddev->flush_bio; + struct bio *new; /* - * must reset flush_bio before calling into md_handle_request to avoid a - * deadlock, because other bios passed md_handle_request suspend check - * could wait for this and below md_handle_request could wait for those - * bios because of suspend check + * md_flush_reqeust() should be called under md_handle_request() and + * 'active_io' is already grabbed. Hence it's safe to get rdev directly + * without rcu protection. */ - spin_lock_irq(&mddev->lock); - mddev->prev_flush_start = mddev->start_flush; - mddev->flush_bio = NULL; - spin_unlock_irq(&mddev->lock); - wake_up(&mddev->sb_wait); + WARN_ON(percpu_ref_is_zero(&mddev->active_io)); - if (bio->bi_iter.bi_size == 0) { - /* an empty barrier - all done */ - bio_endio(bio); - } else { - bio->bi_opf &= ~REQ_PREFLUSH; - - /* - * make_requst() will never return error here, it only - * returns error in raid5_make_request() by dm-raid. - * Since dm always splits data and flush operation into - * two separate io, io size of flush submitted by dm - * always is 0, make_request() will not be called here. - */ - if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio))) - bio_io_error(bio); - } - - /* The pair is percpu_ref_get() from md_flush_request() */ - percpu_ref_put(&mddev->active_io); -} + rdev_for_each(rdev, mddev) { + if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags)) + continue; -/* - * Manages consolidation of flushes and submitting any flushes needed for - * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is - * being finished in another context. Returns false if the flushing is - * complete but still needs the I/O portion of the bio to be processed. - */ -bool md_flush_request(struct mddev *mddev, struct bio *bio) -{ - ktime_t req_start = ktime_get_boottime(); - spin_lock_irq(&mddev->lock); - /* flush requests wait until ongoing flush completes, - * hence coalescing all the pending requests. - */ - wait_event_lock_irq(mddev->sb_wait, - !mddev->flush_bio || - ktime_before(req_start, mddev->prev_flush_start), - mddev->lock); - /* new request after previous flush is completed */ - if (ktime_after(req_start, mddev->prev_flush_start)) { - WARN_ON(mddev->flush_bio); - /* - * Grab a reference to make sure mddev_suspend() will wait for - * this flush to be done. - * - * md_flush_reqeust() is called under md_handle_request() and - * 'active_io' is already grabbed, hence percpu_ref_is_zero() - * won't pass, percpu_ref_tryget_live() can't be used because - * percpu_ref_kill() can be called by mddev_suspend() - * concurrently. - */ - WARN_ON(percpu_ref_is_zero(&mddev->active_io)); - percpu_ref_get(&mddev->active_io); - mddev->flush_bio = bio; - spin_unlock_irq(&mddev->lock); - INIT_WORK(&mddev->flush_work, submit_flushes); - queue_work(md_wq, &mddev->flush_work); - return true; + new = bio_alloc_bioset(rdev->bdev, 0, + REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO, + &mddev->bio_set); + bio_chain(new, bio); + submit_bio(new); } - /* flush was performed for some other bio while we waited. */ - spin_unlock_irq(&mddev->lock); - if (bio->bi_iter.bi_size == 0) { - /* pure flush without data - all done */ + if (bio_sectors(bio) == 0) { bio_endio(bio); return true; } @@ -763,7 +656,6 @@ int mddev_init(struct mddev *mddev) atomic_set(&mddev->openers, 0); atomic_set(&mddev->sync_seq, 0); spin_lock_init(&mddev->lock); - atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; @@ -772,6 +664,7 @@ int mddev_init(struct mddev *mddev) mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->level = LEVEL_NONE; + mddev_set_bitmap_ops(mddev); INIT_WORK(&mddev->sync_work, md_start_sync); INIT_WORK(&mddev->del_work, mddev_delayed_delete); @@ -1372,6 +1265,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor return ret; } +static u64 md_bitmap_events_cleared(struct mddev *mddev) +{ + struct md_bitmap_stats stats; + int err; + + err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); + if (err) + return 0; + + return stats.events_cleared; +} + /* * validate_super for 0.90.0 * note: we are not using "freshest" for 0.9 superblock @@ -1464,7 +1369,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru /* if adding to array with a bitmap, then we can accept an * older device ... but not too old. */ - if (ev1 < mddev->bitmap->events_cleared) + if (ev1 < md_bitmap_events_cleared(mddev)) return 0; if (ev1 < mddev->events) set_bit(Bitmap_sync, &rdev->flags); @@ -1991,7 +1896,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc /* If adding to array with a bitmap, then we can accept an * older device, but not too old. */ - if (ev1 < mddev->bitmap->events_cleared) + if (ev1 < md_bitmap_events_cleared(mddev)) return 0; if (ev1 < mddev->events) set_bit(Bitmap_sync, &rdev->flags); @@ -2323,7 +2228,6 @@ super_1_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) { /* All necessary checks on new >= old have been done */ - struct bitmap *bitmap; if (new_offset >= rdev->data_offset) return 1; @@ -2340,11 +2244,18 @@ super_1_allow_new_offset(struct md_rdev *rdev, */ if (rdev->sb_start + (32+4)*2 > new_offset) return 0; - bitmap = rdev->mddev->bitmap; - if (bitmap && !rdev->mddev->bitmap_info.file && - rdev->sb_start + rdev->mddev->bitmap_info.offset + - bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) - return 0; + + if (!rdev->mddev->bitmap_info.file) { + struct mddev *mddev = rdev->mddev; + struct md_bitmap_stats stats; + int err; + + err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); + if (!err && rdev->sb_start + mddev->bitmap_info.offset + + stats.file_pages * (PAGE_SIZE >> 9) > new_offset) + return 0; + } + if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) return 0; @@ -2820,7 +2731,7 @@ void md_update_sb(struct mddev *mddev, int force_change) mddev_add_trace_msg(mddev, "md md_update_sb"); rewrite: - md_bitmap_update_sb(mddev->bitmap); + mddev->bitmap_ops->update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { if (rdev->sb_loaded != 1) continue; /* no noise on spare devices */ @@ -4141,6 +4052,34 @@ level_store(struct mddev *mddev, const char *buf, size_t len) static struct md_sysfs_entry md_level = __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); +static ssize_t +new_level_show(struct mddev *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->new_level); +} + +static ssize_t +new_level_store(struct mddev *mddev, const char *buf, size_t len) +{ + unsigned int n; + int err; + + err = kstrtouint(buf, 10, &n); + if (err < 0) + return err; + err = mddev_lock(mddev); + if (err) + return err; + + mddev->new_level = n; + md_update_sb(mddev, 1); + + mddev_unlock(mddev); + return len; +} +static struct md_sysfs_entry md_new_level = +__ATTR(new_level, 0664, new_level_show, new_level_store); + static ssize_t layout_show(struct mddev *mddev, char *page) { @@ -4680,17 +4619,23 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len) /* buf should be ... or - ... (range) */ while (*buf) { chunk = end_chunk = simple_strtoul(buf, &end, 0); - if (buf == end) break; + if (buf == end) + break; + if (*end == '-') { /* range */ buf = end + 1; end_chunk = simple_strtoul(buf, &end, 0); - if (buf == end) break; + if (buf == end) + break; } - if (*end && !isspace(*end)) break; - md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); + + if (*end && !isspace(*end)) + break; + + mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk); buf = skip_spaces(end); } - md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ + mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */ out: mddev_unlock(mddev); return len; @@ -5666,6 +5611,7 @@ __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, static struct attribute *md_default_attrs[] = { &md_level.attr, + &md_new_level.attr, &md_layout.attr, &md_raid_disks.attr, &md_uuid.attr, @@ -6206,16 +6152,10 @@ int md_run(struct mddev *mddev) } if (err == 0 && pers->sync_request && (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { - struct bitmap *bitmap; - - bitmap = md_bitmap_create(mddev, -1); - if (IS_ERR(bitmap)) { - err = PTR_ERR(bitmap); + err = mddev->bitmap_ops->create(mddev, -1); + if (err) pr_warn("%s: failed to create bitmap (%d)\n", mdname(mddev), err); - } else - mddev->bitmap = bitmap; - } if (err) goto bitmap_abort; @@ -6285,7 +6225,7 @@ int md_run(struct mddev *mddev) pers->free(mddev, mddev->private); mddev->private = NULL; module_put(pers->owner); - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); abort: bioset_exit(&mddev->io_clone_set); exit_sync_set: @@ -6304,9 +6244,10 @@ int do_md_run(struct mddev *mddev) err = md_run(mddev); if (err) goto out; - err = md_bitmap_load(mddev); + + err = mddev->bitmap_ops->load(mddev); if (err) { - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); goto out; } @@ -6450,7 +6391,8 @@ static void __md_stop_writes(struct mddev *mddev) mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } - md_bitmap_flush(mddev); + + mddev->bitmap_ops->flush(mddev); if (md_is_rdwr(mddev) && ((!mddev->in_sync && !mddev_is_clustered(mddev)) || @@ -6477,7 +6419,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { - md_bitmap_wait_behind_writes(mddev); + mddev->bitmap_ops->wait_behind_writes(mddev); if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); @@ -6492,7 +6434,8 @@ static void mddev_detach(struct mddev *mddev) static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; - md_bitmap_destroy(mddev); + + mddev->bitmap_ops->destroy(mddev); mddev_detach(mddev); spin_lock(&mddev->lock); mddev->pers = NULL; @@ -7270,22 +7213,19 @@ static int set_bitmap_file(struct mddev *mddev, int fd) err = 0; if (mddev->pers) { if (fd >= 0) { - struct bitmap *bitmap; + err = mddev->bitmap_ops->create(mddev, -1); + if (!err) + err = mddev->bitmap_ops->load(mddev); - bitmap = md_bitmap_create(mddev, -1); - if (!IS_ERR(bitmap)) { - mddev->bitmap = bitmap; - err = md_bitmap_load(mddev); - } else - err = PTR_ERR(bitmap); if (err) { - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); fd = -1; } } else if (fd < 0) { - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); } } + if (fd < 0) { struct file *f = mddev->bitmap_info.file; if (f) { @@ -7554,7 +7494,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) goto err; } if (info->state & (1<bitmap) { rv = -EEXIST; @@ -7568,24 +7507,24 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->bitmap_info.default_offset; mddev->bitmap_info.space = mddev->bitmap_info.default_space; - bitmap = md_bitmap_create(mddev, -1); - if (!IS_ERR(bitmap)) { - mddev->bitmap = bitmap; - rv = md_bitmap_load(mddev); - } else - rv = PTR_ERR(bitmap); + rv = mddev->bitmap_ops->create(mddev, -1); + if (!rv) + rv = mddev->bitmap_ops->load(mddev); + if (rv) - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); } else { - /* remove the bitmap */ - if (!mddev->bitmap) { - rv = -ENOENT; + struct md_bitmap_stats stats; + + rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); + if (rv) goto err; - } - if (mddev->bitmap->storage.file) { + + if (stats.file) { rv = -EINVAL; goto err; } + if (mddev->bitmap_info.nodes) { /* hold PW on all the bitmap lock */ if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { @@ -7600,7 +7539,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) module_put(md_cluster_mod); mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; } - md_bitmap_destroy(mddev); + mddev->bitmap_ops->destroy(mddev); mddev->bitmap_info.offset = 0; } } @@ -8370,6 +8309,33 @@ static void md_seq_stop(struct seq_file *seq, void *v) spin_unlock(&all_mddevs_lock); } +static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev) +{ + struct md_bitmap_stats stats; + unsigned long used_pages; + unsigned long chunk_kb; + int err; + + err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); + if (err) + return; + + chunk_kb = mddev->bitmap_info.chunksize >> 10; + used_pages = stats.pages - stats.missing_pages; + + seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk", + used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10), + chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, + chunk_kb ? "KB" : "B"); + + if (stats.file) { + seq_puts(seq, ", file: "); + seq_file_path(seq, stats.file, " \t\n"); + } + + seq_putc(seq, '\n'); +} + static int md_seq_show(struct seq_file *seq, void *v) { struct mddev *mddev; @@ -8390,14 +8356,19 @@ static int md_seq_show(struct seq_file *seq, void *v) spin_unlock(&all_mddevs_lock); spin_lock(&mddev->lock); if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { - seq_printf(seq, "%s : %sactive", mdname(mddev), - mddev->pers ? "" : "in"); + seq_printf(seq, "%s : ", mdname(mddev)); if (mddev->pers) { + if (test_bit(MD_BROKEN, &mddev->flags)) + seq_printf(seq, "broken"); + else + seq_printf(seq, "active"); if (mddev->ro == MD_RDONLY) seq_printf(seq, " (read-only)"); if (mddev->ro == MD_AUTO_READ) seq_printf(seq, " (auto-read-only)"); seq_printf(seq, " %s", mddev->pers->name); + } else { + seq_printf(seq, "inactive"); } sectors = 0; @@ -8453,7 +8424,7 @@ static int md_seq_show(struct seq_file *seq, void *v) } else seq_printf(seq, "\n "); - md_bitmap_status(seq, mddev->bitmap); + md_bitmap_status(seq, mddev); seq_printf(seq, "\n"); } @@ -8668,7 +8639,6 @@ void md_write_start(struct mddev *mddev, struct bio *bi) BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { /* need to switch to read/write */ - flush_work(&mddev->sync_work); mddev->ro = MD_RDWR; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -9506,7 +9476,7 @@ static void md_start_sync(struct work_struct *ws) * stored on all devices. So make sure all bitmap pages get written. */ if (spares) - md_bitmap_write_all(mddev->bitmap); + mddev->bitmap_ops->write_all(mddev); name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ? "reshape" : "resync"; @@ -9594,7 +9564,7 @@ static void unregister_sync_thread(struct mddev *mddev) void md_check_recovery(struct mddev *mddev) { if (mddev->bitmap) - md_bitmap_daemon_work(mddev); + mddev->bitmap_ops->daemon_work(mddev); if (signal_pending(current)) { if (mddev->pers->sync_request && !mddev->external) { @@ -9965,7 +9935,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) if (ret) pr_info("md-cluster: resize failed\n"); else - md_bitmap_update_sb(mddev->bitmap); + mddev->bitmap_ops->update_sb(mddev->bitmap); } /* Check for change of roles in the active devices */ diff --git a/drivers/md/md.h b/drivers/md/md.h index a0d6827dced9b7..5d2e6bd58e4da2 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -535,7 +535,8 @@ struct mddev { struct percpu_ref writes_pending; int sync_checkers; /* # of threads checking writes_pending */ - struct bitmap *bitmap; /* the bitmap for the device */ + void *bitmap; /* the bitmap for the device */ + struct bitmap_operations *bitmap_ops; struct { struct file *file; /* the bitmap file */ loff_t offset; /* offset from superblock of @@ -571,16 +572,6 @@ struct mddev { */ struct bio_set io_clone_set; - /* Generic flush handling. - * The last to finish preflush schedules a worker to submit - * the rest of the request (without the REQ_PREFLUSH flag). - */ - struct bio *flush_bio; - atomic_t flush_pending; - ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed - * flush was started. - */ - struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ mempool_t *serial_info_pool; void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 2ea1710a3b705e..4378d3250bd757 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -140,7 +140,7 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio, * If bitmap is not enabled, it's safe to submit the io directly, and * this can get optimal performance. */ - if (!md_bitmap_enabled(mddev->bitmap)) { + if (!mddev->bitmap_ops->enabled(mddev)) { raid1_submit_write(bio); return true; } @@ -166,12 +166,9 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio, * while current io submission must wait for bitmap io to be done. In order to * avoid such deadlock, submit bitmap io asynchronously. */ -static inline void raid1_prepare_flush_writes(struct bitmap *bitmap) +static inline void raid1_prepare_flush_writes(struct mddev *mddev) { - if (current->bio_list) - md_bitmap_unplug_async(bitmap); - else - md_bitmap_unplug(bitmap); + mddev->bitmap_ops->unplug(mddev, current->bio_list == NULL); } /* diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 761989d6790683..6c9d24203f39f0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -411,18 +411,20 @@ static void raid1_end_read_request(struct bio *bio) static void close_write(struct r1bio *r1_bio) { + struct mddev *mddev = r1_bio->mddev; + /* it really is the end of this request */ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { bio_free_pages(r1_bio->behind_master_bio); bio_put(r1_bio->behind_master_bio); r1_bio->behind_master_bio = NULL; } + /* clear the bitmap if all writes complete successfully */ - md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, - r1_bio->sectors, - !test_bit(R1BIO_Degraded, &r1_bio->state), - test_bit(R1BIO_BehindIO, &r1_bio->state)); - md_write_end(r1_bio->mddev); + mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors, + !test_bit(R1BIO_Degraded, &r1_bio->state), + test_bit(R1BIO_BehindIO, &r1_bio->state)); + md_write_end(mddev); } static void r1_bio_write_done(struct r1bio *r1_bio) @@ -900,7 +902,7 @@ static void wake_up_barrier(struct r1conf *conf) static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ - raid1_prepare_flush_writes(conf->mddev->bitmap); + raid1_prepare_flush_writes(conf->mddev); wake_up_barrier(conf); while (bio) { /* submit pending writes */ @@ -1317,13 +1319,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct r1conf *conf = mddev->private; struct raid1_info *mirror; struct bio *read_bio; - struct bitmap *bitmap = mddev->bitmap; const enum req_op op = bio_op(bio); const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; int rdisk; bool r1bio_existed = !!r1_bio; - char b[BDEVNAME_SIZE]; /* * If r1_bio is set, we are blocking the raid1d thread @@ -1332,16 +1332,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, */ gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; - if (r1bio_existed) { - /* Need to get the block device name carefully */ - struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; - - if (rdev) - snprintf(b, sizeof(b), "%pg", rdev->bdev); - else - strcpy(b, "???"); - } - /* * Still need barrier for READ in case that whole * array is frozen. @@ -1363,15 +1353,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, * used and no empty request is available. */ rdisk = read_balance(conf, r1_bio, &max_sectors); - if (rdisk < 0) { /* couldn't find anywhere to read from */ - if (r1bio_existed) { - pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + if (r1bio_existed) + pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n", mdname(mddev), - b, - (unsigned long long)r1_bio->sector); - } + conf->mirrors[r1_bio->read_disk].rdev->bdev, + r1_bio->sector); raid_end_bio_io(r1_bio); return; } @@ -1383,15 +1371,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, (unsigned long long)r1_bio->sector, mirror->rdev->bdev); - if (test_bit(WriteMostly, &mirror->rdev->flags) && - bitmap) { + if (test_bit(WriteMostly, &mirror->rdev->flags)) { /* * Reading from a write-mostly device must take care not to * over-take any writes that are 'behind' */ mddev_add_trace_msg(mddev, "raid1 wait behind writes"); - wait_event(bitmap->behind_wait, - atomic_read(&bitmap->behind_writes) == 0); + mddev->bitmap_ops->wait_behind_writes(mddev); } if (max_sectors < bio_sectors(bio)) { @@ -1432,7 +1418,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct r1conf *conf = mddev->private; struct r1bio *r1_bio; int i, disks; - struct bitmap *bitmap = mddev->bitmap; unsigned long flags; struct md_rdev *blocked_rdev; int first_clone; @@ -1585,7 +1570,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, * at a time and thus needs a new bio that can fit the whole payload * this bio in page sized chunks. */ - if (write_behind && bitmap) + if (write_behind && mddev->bitmap) max_sectors = min_t(int, max_sectors, BIO_MAX_VECS * (PAGE_SIZE >> 9)); if (max_sectors < bio_sectors(bio)) { @@ -1612,19 +1597,23 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, continue; if (first_clone) { + unsigned long max_write_behind = + mddev->bitmap_info.max_write_behind; + struct md_bitmap_stats stats; + int err; + /* do behind I/O ? * Not if there are too many, or cannot * allocate memory, or a reader on WriteMostly * is waiting for behind writes to flush */ - if (bitmap && write_behind && - (atomic_read(&bitmap->behind_writes) - < mddev->bitmap_info.max_write_behind) && - !waitqueue_active(&bitmap->behind_wait)) { + err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); + if (!err && write_behind && !stats.behind_wait && + stats.behind_writes < max_write_behind) alloc_behind_master_bio(r1_bio, bio); - } - md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, - test_bit(R1BIO_BehindIO, &r1_bio->state)); + mddev->bitmap_ops->startwrite( + mddev, r1_bio->sector, r1_bio->sectors, + test_bit(R1BIO_BehindIO, &r1_bio->state)); first_clone = 0; } @@ -2042,7 +2031,7 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) /* make sure these bits don't get cleared. */ do { - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); + mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks); s += sync_blocks; sectors_to_go -= sync_blocks; } while (sectors_to_go > 0); @@ -2771,7 +2760,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int wonly = -1; int write_targets = 0, read_targets = 0; sector_t sync_blocks; - int still_degraded = 0; + bool still_degraded = false; int good_sectors = RESYNC_SECTORS; int min_bad = 0; /* number of sectors that are bad in all devices */ int idx = sector_to_idx(sector_nr); @@ -2788,12 +2777,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, * We can find the current addess in mddev->curr_resync */ if (mddev->curr_resync < max_sector) /* aborted */ - md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, - &sync_blocks, 1); + mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync, + &sync_blocks); else /* completed sync */ conf->fullsync = 0; - md_bitmap_close_sync(mddev->bitmap); + mddev->bitmap_ops->close_sync(mddev); close_sync(conf); if (mddev_is_clustered(mddev)) { @@ -2813,7 +2802,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, /* before building a request, check if we can skip these blocks.. * This call the bitmap_start_sync doesn't actually record anything */ - if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block, and probably several more */ *skipped = 1; @@ -2831,9 +2820,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, * sector_nr + two times RESYNC_SECTORS */ - md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, - mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); - + mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, + mddev_is_clustered(mddev) && + (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); if (raise_barrier(conf, sector_nr)) return 0; @@ -2864,7 +2853,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { if (i < conf->raid_disks) - still_degraded = 1; + still_degraded = true; } else if (!test_bit(In_sync, &rdev->flags)) { bio->bi_opf = REQ_OP_WRITE; bio->bi_end_io = end_sync_write; @@ -2988,8 +2977,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (len == 0) break; if (sync_blocks == 0) { - if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, - &sync_blocks, still_degraded) && + if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, + &sync_blocks, still_degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; @@ -3313,14 +3302,16 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) * worth it. */ sector_t newsize = raid1_size(mddev, sectors, 0); + int ret; + if (mddev->external_size && mddev->array_sectors > newsize) return -EINVAL; - if (mddev->bitmap) { - int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); - if (ret) - return ret; - } + + ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false); + if (ret) + return ret; + md_set_array_sectors(mddev, newsize); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2a9c4ee982e023..f3bf1116794a4e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -426,12 +426,13 @@ static void raid10_end_read_request(struct bio *bio) static void close_write(struct r10bio *r10_bio) { + struct mddev *mddev = r10_bio->mddev; + /* clear the bitmap if all writes complete successfully */ - md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, - r10_bio->sectors, - !test_bit(R10BIO_Degraded, &r10_bio->state), - 0); - md_write_end(r10_bio->mddev); + mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors, + !test_bit(R10BIO_Degraded, &r10_bio->state), + false); + md_write_end(mddev); } static void one_write_done(struct r10bio *r10_bio) @@ -884,7 +885,7 @@ static void flush_pending_writes(struct r10conf *conf) __set_current_state(TASK_RUNNING); blk_start_plug(&plug); - raid1_prepare_flush_writes(conf->mddev->bitmap); + raid1_prepare_flush_writes(conf->mddev); wake_up(&conf->wait_barrier); while (bio) { /* submit pending writes */ @@ -1100,7 +1101,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) /* we aren't scheduling, so we can do the write-out directly. */ bio = bio_list_get(&plug->pending); - raid1_prepare_flush_writes(mddev->bitmap); + raid1_prepare_flush_writes(mddev); wake_up_barrier(conf); while (bio) { /* submit pending writes */ @@ -1492,7 +1493,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, md_account_bio(mddev, &bio); r10_bio->master_bio = bio; atomic_set(&r10_bio->remaining, 1); - md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); + mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors, + false); for (i = 0; i < conf->copies; i++) { if (r10_bio->devs[i].bio) @@ -2465,7 +2467,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) s = PAGE_SIZE >> 9; rdev = conf->mirrors[dr].rdev; - addr = r10_bio->devs[0].addr + sect, + addr = r10_bio->devs[0].addr + sect; ok = sync_page_io(rdev, addr, s << 9, @@ -3192,13 +3194,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (mddev->curr_resync < max_sector) { /* aborted */ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) - md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, - &sync_blocks, 1); + mddev->bitmap_ops->end_sync(mddev, + mddev->curr_resync, + &sync_blocks); else for (i = 0; i < conf->geo.raid_disks; i++) { sector_t sect = raid10_find_virt(conf, mddev->curr_resync, i); - md_bitmap_end_sync(mddev->bitmap, sect, - &sync_blocks, 1); + + mddev->bitmap_ops->end_sync(mddev, sect, + &sync_blocks); } } else { /* completed sync */ @@ -3218,7 +3222,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } conf->fullsync = 0; } - md_bitmap_close_sync(mddev->bitmap); + mddev->bitmap_ops->close_sync(mddev); close_sync(conf); *skipped = 1; return sectors_skipped; @@ -3287,10 +3291,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio = NULL; for (i = 0 ; i < conf->geo.raid_disks; i++) { - int still_degraded; + bool still_degraded; struct r10bio *rb2; sector_t sect; - int must_sync; + bool must_sync; int any_working; struct raid10_info *mirror = &conf->mirrors[i]; struct md_rdev *mrdev, *mreplace; @@ -3307,7 +3311,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (!mrdev && !mreplace) continue; - still_degraded = 0; + still_degraded = false; /* want to reconstruct this device */ rb2 = r10_bio; sect = raid10_find_virt(conf, sector_nr, i); @@ -3320,8 +3324,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, * we only need to recover the block if it is set in * the bitmap */ - must_sync = md_bitmap_start_sync(mddev->bitmap, sect, - &sync_blocks, 1); + must_sync = mddev->bitmap_ops->start_sync(mddev, sect, + &sync_blocks, + true); if (sync_blocks < max_sync) max_sync = sync_blocks; if (!must_sync && @@ -3359,13 +3364,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, struct md_rdev *rdev = conf->mirrors[j].rdev; if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { - still_degraded = 1; + still_degraded = false; break; } } - must_sync = md_bitmap_start_sync(mddev->bitmap, sect, - &sync_blocks, still_degraded); + must_sync = mddev->bitmap_ops->start_sync(mddev, sect, + &sync_blocks, still_degraded); any_working = 0; for (j=0; jcopies;j++) { @@ -3538,12 +3543,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, * safety reason, which ensures curr_resync_completed is * updated in bitmap_cond_end_sync. */ - md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, + mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); - if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, - &sync_blocks, mddev->degraded) && + if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, + &sync_blocks, + mddev->degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block */ @@ -4190,6 +4196,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) */ struct r10conf *conf = mddev->private; sector_t oldsize, size; + int ret; if (mddev->reshape_position != MaxSector) return -EBUSY; @@ -4202,11 +4209,11 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) if (mddev->external_size && mddev->array_sectors > size) return -EINVAL; - if (mddev->bitmap) { - int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); - if (ret) - return ret; - } + + ret = mddev->bitmap_ops->resize(mddev, size, 0, false); + if (ret) + return ret; + md_set_array_sectors(mddev, size); if (sectors > mddev->dev_sectors && mddev->recovery_cp > oldsize) { @@ -4472,7 +4479,7 @@ static int raid10_start_reshape(struct mddev *mddev) newsize = raid10_size(mddev, 0, conf->geo.raid_disks); if (!mddev_is_clustered(mddev)) { - ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); + ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false); if (ret) goto abort; else @@ -4487,20 +4494,20 @@ static int raid10_start_reshape(struct mddev *mddev) /* * some node is already performing reshape, and no need to - * call md_bitmap_resize again since it should be called when + * call bitmap_ops->resize again since it should be called when * receiving BITMAP_RESIZE msg */ if ((sb && (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize)) goto out; - ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); + ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false); if (ret) goto abort; ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); if (ret) { - md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); + mddev->bitmap_ops->resize(mddev, oldsize, 0, false); goto abort; } } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 874874fe4fa12d..b4f7b79fd187d0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -313,10 +313,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf, if (sh->dev[i].written) { set_bit(R5_UPTODATE, &sh->dev[i].flags); r5c_return_dev_pending_writes(conf, &sh->dev[i]); - md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, - RAID5_STRIPE_SECTORS(conf), - !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + conf->mddev->bitmap_ops->endwrite(conf->mddev, + sh->sector, RAID5_STRIPE_SECTORS(conf), + !test_bit(STRIPE_DEGRADED, &sh->state), + false); } } } @@ -2798,7 +2798,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, { struct r5l_log *log = READ_ONCE(conf->log); int i; - int do_wakeup = 0; sector_t tree_index; void __rcu **pslot; uintptr_t refcount; @@ -2815,7 +2814,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, for (i = sh->disks; i--; ) { clear_bit(R5_InJournal, &sh->dev[i].flags); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - do_wakeup = 1; + wake_up_bit(&sh->dev[i].flags, R5_Overlap); } /* @@ -2828,9 +2827,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, if (atomic_dec_and_test(&conf->pending_full_writes)) md_wakeup_thread(conf->mddev->thread); - if (do_wakeup) - wake_up(&conf->wait_for_overlap); - spin_lock_irq(&log->stripe_in_journal_lock); list_del_init(&sh->r5c); spin_unlock_irq(&log->stripe_in_journal_lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c14cf2410365dd..dc2ea636d17342 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2337,7 +2337,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&sh->raid_conf->wait_for_overlap); + wake_up_bit(&dev->flags, R5_Overlap); } } local_unlock(&conf->percpu->lock); @@ -3473,7 +3473,7 @@ static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi, * With PPL only writes to consecutive data chunks within a * stripe are allowed because for a single stripe_head we can * only have one PPL entry at a time, which describes one data - * range. Not really an overlap, but wait_for_overlap can be + * range. Not really an overlap, but R5_Overlap can be * used to handle this. */ sector_t sector; @@ -3563,8 +3563,8 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi, */ set_bit(STRIPE_BITMAP_PENDING, &sh->state); spin_unlock_irq(&sh->stripe_lock); - md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, - RAID5_STRIPE_SECTORS(conf), 0); + conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector, + RAID5_STRIPE_SECTORS(conf), false); spin_lock_irq(&sh->stripe_lock); clear_bit(STRIPE_BITMAP_PENDING, &sh->state); if (!sh->batch_head) { @@ -3652,7 +3652,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, log_stripe_write_finished(sh); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); + wake_up_bit(&sh->dev[i].flags, R5_Overlap); while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { @@ -3663,8 +3663,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi = nextbi; } if (bitmap_end) - md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, - RAID5_STRIPE_SECTORS(conf), 0, 0); + conf->mddev->bitmap_ops->endwrite(conf->mddev, + sh->sector, RAID5_STRIPE_SECTORS(conf), + false, false); bitmap_end = 0; /* and fail all 'written' */ bi = sh->dev[i].written; @@ -3696,7 +3697,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].toread = NULL; spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); + wake_up_bit(&sh->dev[i].flags, R5_Overlap); if (bi) s->to_read--; while (bi && bi->bi_iter.bi_sector < @@ -3709,8 +3710,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, } } if (bitmap_end) - md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, - RAID5_STRIPE_SECTORS(conf), 0, 0); + conf->mddev->bitmap_ops->endwrite(conf->mddev, + sh->sector, RAID5_STRIPE_SECTORS(conf), + false, false); /* If we were in the middle of a write the parity block might * still be locked - so just clear all R5_LOCKED flags */ @@ -3734,7 +3736,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, BUG_ON(sh->batch_head); clear_bit(STRIPE_SYNCING, &sh->state); if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) - wake_up(&conf->wait_for_overlap); + wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); s->syncing = 0; s->replacing = 0; /* There is nothing more to do for sync/check/repair. @@ -4059,10 +4061,10 @@ static void handle_stripe_clean_event(struct r5conf *conf, bio_endio(wbi); wbi = wbi2; } - md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, - RAID5_STRIPE_SECTORS(conf), - !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + conf->mddev->bitmap_ops->endwrite(conf->mddev, + sh->sector, RAID5_STRIPE_SECTORS(conf), + !test_bit(STRIPE_DEGRADED, &sh->state), + false); if (head_sh->batch_head) { sh = list_first_entry(&sh->batch_list, struct stripe_head, @@ -4875,7 +4877,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, { struct stripe_head *sh, *next; int i; - int do_wakeup = 0; list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { @@ -4911,7 +4912,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, spin_unlock_irq(&sh->stripe_lock); for (i = 0; i < sh->disks; i++) { if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - do_wakeup = 1; + wake_up_bit(&sh->dev[i].flags, R5_Overlap); sh->dev[i].flags = head_sh->dev[i].flags & (~((1 << R5_WriteError) | (1 << R5_Overlap))); } @@ -4925,12 +4926,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, spin_unlock_irq(&head_sh->stripe_lock); for (i = 0; i < head_sh->disks; i++) if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) - do_wakeup = 1; + wake_up_bit(&head_sh->dev[i].flags, R5_Overlap); if (head_sh->state & handle_flags) set_bit(STRIPE_HANDLE, &head_sh->state); - - if (do_wakeup) - wake_up(&head_sh->raid_conf->wait_for_overlap); } static void handle_stripe(struct stripe_head *sh) @@ -5196,7 +5194,7 @@ static void handle_stripe(struct stripe_head *sh) md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); clear_bit(STRIPE_SYNCING, &sh->state); if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) - wake_up(&conf->wait_for_overlap); + wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); } /* If the failed drives are just a ReadError, then we might need @@ -5259,7 +5257,7 @@ static void handle_stripe(struct stripe_head *sh) } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { clear_bit(STRIPE_EXPAND_READY, &sh->state); atomic_dec(&conf->reshape_stripes); - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); } @@ -5753,12 +5751,11 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) int d; again: sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0); - prepare_to_wait(&conf->wait_for_overlap, &w, - TASK_UNINTERRUPTIBLE); set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); if (test_bit(STRIPE_SYNCING, &sh->state)) { raid5_release_stripe(sh); - schedule(); + wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap, + TASK_UNINTERRUPTIBLE); goto again; } clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); @@ -5770,12 +5767,12 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) set_bit(R5_Overlap, &sh->dev[d].flags); spin_unlock_irq(&sh->stripe_lock); raid5_release_stripe(sh); - schedule(); + wait_on_bit(&sh->dev[d].flags, R5_Overlap, + TASK_UNINTERRUPTIBLE); goto again; } } set_bit(STRIPE_DISCARD, &sh->state); - finish_wait(&conf->wait_for_overlap, &w); sh->overwrite_disks = 0; for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -5788,13 +5785,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) } spin_unlock_irq(&sh->stripe_lock); if (conf->mddev->bitmap) { - for (d = 0; - d < conf->raid_disks - conf->max_degraded; + for (d = 0; d < conf->raid_disks - conf->max_degraded; d++) - md_bitmap_startwrite(mddev->bitmap, - sh->sector, - RAID5_STRIPE_SECTORS(conf), - 0); + mddev->bitmap_ops->startwrite(mddev, sh->sector, + RAID5_STRIPE_SECTORS(conf), false); sh->bm_seq = conf->seq_flush + 1; set_bit(STRIPE_BIT_DELAY, &sh->state); } @@ -5855,7 +5849,6 @@ static int add_all_stripe_bios(struct r5conf *conf, struct bio *bi, int forwrite, int previous) { int dd_idx; - int ret = 1; spin_lock_irq(&sh->stripe_lock); @@ -5871,14 +5864,19 @@ static int add_all_stripe_bios(struct r5conf *conf, if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { set_bit(R5_Overlap, &dev->flags); - ret = 0; - continue; + spin_unlock_irq(&sh->stripe_lock); + raid5_release_stripe(sh); + /* release batch_last before wait to avoid risk of deadlock */ + if (ctx->batch_last) { + raid5_release_stripe(ctx->batch_last); + ctx->batch_last = NULL; + } + md_wakeup_thread(conf->mddev->thread); + wait_on_bit(&dev->flags, R5_Overlap, TASK_UNINTERRUPTIBLE); + return 0; } } - if (!ret) - goto out; - for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { struct r5dev *dev = &sh->dev[dd_idx]; @@ -5894,9 +5892,8 @@ static int add_all_stripe_bios(struct r5conf *conf, RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do); } -out: spin_unlock_irq(&sh->stripe_lock); - return ret; + return 1; } enum reshape_loc { @@ -5992,17 +5989,17 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, goto out_release; } - if (test_bit(STRIPE_EXPANDING, &sh->state) || - !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { - /* - * Stripe is busy expanding or add failed due to - * overlap. Flush everything and wait a while. - */ + if (test_bit(STRIPE_EXPANDING, &sh->state)) { md_wakeup_thread(mddev->thread); ret = STRIPE_SCHEDULE_AND_RETRY; goto out_release; } + if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { + ret = STRIPE_RETRY; + goto out; + } + if (stripe_can_batch(sh)) { stripe_add_to_batch_list(conf, sh, ctx->batch_last); if (ctx->batch_last) @@ -6073,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf, static bool raid5_make_request(struct mddev *mddev, struct bio * bi) { DEFINE_WAIT_FUNC(wait, woken_wake_function); + bool on_wq; struct r5conf *conf = mddev->private; sector_t logical_sector; struct stripe_request_ctx ctx = {}; @@ -6146,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) * sequential IO pattern. We don't bother with the optimization when * reshaping as the performance benefit is not worth the complexity. */ - if (likely(conf->reshape_progress == MaxSector)) + if (likely(conf->reshape_progress == MaxSector)) { logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); + on_wq = false; + } else { + add_wait_queue(&conf->wait_for_reshape, &wait); + on_wq = true; + } s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf); - add_wait_queue(&conf->wait_for_overlap, &wait); while (1) { res = make_stripe_request(mddev, conf, &ctx, logical_sector, bi); @@ -6161,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) continue; if (res == STRIPE_SCHEDULE_AND_RETRY) { + WARN_ON_ONCE(!on_wq); /* * Must release the reference to batch_last before * scheduling and waiting for work to be done, @@ -6185,7 +6188,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) logical_sector = ctx.first_sector + (s << RAID5_STRIPE_SHIFT(conf)); } - remove_wait_queue(&conf->wait_for_overlap, &wait); + if (unlikely(on_wq)) + remove_wait_queue(&conf->wait_for_reshape, &wait); if (ctx.batch_last) raid5_release_stripe(ctx.batch_last); @@ -6338,7 +6342,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk : (safepos < writepos && readpos > writepos)) || time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { /* Cannot proceed until we've updated the superblock... */ - wait_event(conf->wait_for_overlap, + wait_event(conf->wait_for_reshape, atomic_read(&conf->reshape_stripes)==0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (atomic_read(&conf->reshape_stripes) != 0) @@ -6364,7 +6368,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); sysfs_notify_dirent_safe(mddev->sysfs_completed); } @@ -6447,7 +6451,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk (sector_nr - mddev->curr_resync_completed) * 2 >= mddev->resync_max - mddev->curr_resync_completed) { /* Cannot proceed until we've updated the superblock... */ - wait_event(conf->wait_for_overlap, + wait_event(conf->wait_for_reshape, atomic_read(&conf->reshape_stripes) == 0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (atomic_read(&conf->reshape_stripes) != 0) @@ -6473,7 +6477,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); sysfs_notify_dirent_safe(mddev->sysfs_completed); } ret: @@ -6486,7 +6490,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n struct r5conf *conf = mddev->private; struct stripe_head *sh; sector_t sync_blocks; - int still_degraded = 0; + bool still_degraded = false; int i; if (sector_nr >= max_sector) { @@ -6498,17 +6502,17 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n } if (mddev->curr_resync < max_sector) /* aborted */ - md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, - &sync_blocks, 1); + mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync, + &sync_blocks); else /* completed sync */ conf->fullsync = 0; - md_bitmap_close_sync(mddev->bitmap); + mddev->bitmap_ops->close_sync(mddev); return 0; } /* Allow raid5_quiesce to complete */ - wait_event(conf->wait_for_overlap, conf->quiesce != 2); + wait_event(conf->wait_for_reshape, conf->quiesce != 2); if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) return reshape_request(mddev, sector_nr, skipped); @@ -6531,7 +6535,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n } if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !conf->fullsync && - !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, + true) && sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { /* we can skip this block, and probably more */ do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); @@ -6540,7 +6545,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n return sync_blocks * RAID5_STRIPE_SECTORS(conf); } - md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); + mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false); sh = raid5_get_active_stripe(conf, NULL, sector_nr, R5_GAS_NOBLOCK); @@ -6559,10 +6564,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n struct md_rdev *rdev = conf->disks[i].rdev; if (rdev == NULL || test_bit(Faulty, &rdev->flags)) - still_degraded = 1; + still_degraded = true; } - md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); + mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, + still_degraded); set_bit(STRIPE_SYNC_REQUESTED, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); @@ -6767,7 +6773,7 @@ static void raid5d(struct md_thread *thread) /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; spin_unlock_irq(&conf->device_lock); - md_bitmap_unplug(mddev->bitmap); + mddev->bitmap_ops->unplug(mddev, true); spin_lock_irq(&conf->device_lock); conf->seq_write = conf->seq_flush; activate_bit_delay(conf, conf->temp_inactive_list); @@ -7492,7 +7498,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) init_waitqueue_head(&conf->wait_for_quiescent); init_waitqueue_head(&conf->wait_for_stripe); - init_waitqueue_head(&conf->wait_for_overlap); + init_waitqueue_head(&conf->wait_for_reshape); INIT_LIST_HEAD(&conf->handle_list); INIT_LIST_HEAD(&conf->loprio_list); INIT_LIST_HEAD(&conf->hold_list); @@ -8312,6 +8318,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) */ sector_t newsize; struct r5conf *conf = mddev->private; + int ret; if (raid5_has_log(conf) || raid5_has_ppl(conf)) return -EINVAL; @@ -8320,11 +8327,11 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) if (mddev->external_size && mddev->array_sectors > newsize) return -EINVAL; - if (mddev->bitmap) { - int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); - if (ret) - return ret; - } + + ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false); + if (ret) + return ret; + md_set_array_sectors(mddev, newsize); if (sectors > mddev->dev_sectors && mddev->recovery_cp > mddev->dev_sectors) { @@ -8550,7 +8557,7 @@ static void end_reshape(struct r5conf *conf) !test_bit(In_sync, &rdev->flags)) rdev->recovery_offset = MaxSector; spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); mddev_update_io_opt(conf->mddev, conf->raid_disks - conf->max_degraded); @@ -8614,13 +8621,13 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce) conf->quiesce = 1; unlock_all_device_hash_locks_irq(conf); /* allow reshape to continue */ - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); } else { /* re-enable writes */ lock_all_device_hash_locks_irq(conf); conf->quiesce = 0; wake_up(&conf->wait_for_quiescent); - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); unlock_all_device_hash_locks_irq(conf); } log_quiesce(conf, quiesce); @@ -8939,7 +8946,7 @@ static void raid5_prepare_suspend(struct mddev *mddev) { struct r5conf *conf = mddev->private; - wake_up(&conf->wait_for_overlap); + wake_up(&conf->wait_for_reshape); } static struct md_personality raid6_personality = diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 9b5a7dc3f2a04b..896ecfc4afa6fa 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -668,7 +668,7 @@ struct r5conf { struct llist_head released_stripes; wait_queue_head_t wait_for_quiescent; wait_queue_head_t wait_for_stripe; - wait_queue_head_t wait_for_overlap; + wait_queue_head_t wait_for_reshape; unsigned long cache_state; struct shrinker *shrinker; int pool_size; /* number of disks in stripeheads in pool */ diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index da09834990b875..c7d36010c89027 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -673,8 +673,9 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, /* Retry this message */ data->attempts -= attempts_made; if (msg->timeout) - dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n", - msg->len, msg->msg, data->attempts, msg->reply); + dprintk(2, "retransmit: %*ph (attempts: %d, wait for %*ph)\n", + msg->len, msg->msg, data->attempts, + data->match_len, data->match_reply); else dprintk(2, "retransmit: %*ph (attempts: %d)\n", msg->len, msg->msg, data->attempts); @@ -780,6 +781,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, { struct cec_data *data; bool is_raw = msg_is_raw(msg); + bool reply_vendor_id = (msg->flags & CEC_MSG_FL_REPLY_VENDOR_ID) && + msg->len > 1 && msg->msg[1] == CEC_MSG_VENDOR_COMMAND_WITH_ID; int err; if (adap->devnode.unregistered) @@ -794,12 +797,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, msg->tx_low_drive_cnt = 0; msg->tx_error_cnt = 0; msg->sequence = 0; + msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW | + (reply_vendor_id ? CEC_MSG_FL_REPLY_VENDOR_ID : 0); - if (msg->reply && msg->timeout == 0) { + if ((reply_vendor_id || msg->reply) && msg->timeout == 0) { /* Make sure the timeout isn't 0. */ msg->timeout = 1000; } - msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW; if (!msg->timeout) msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS; @@ -809,6 +813,11 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, dprintk(1, "%s: invalid length %d\n", __func__, msg->len); return -EINVAL; } + if (reply_vendor_id && msg->len < 6) { + dprintk(1, "%s: message too short\n", + __func__); + return -EINVAL; + } memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); @@ -900,8 +909,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, __func__); return -ENONET; } - if (msg->reply) { - dprintk(1, "%s: invalid msg->reply\n", __func__); + if (reply_vendor_id || msg->reply) { + dprintk(1, "%s: adapter is unconfigured so reply is not supported\n", + __func__); return -EINVAL; } } @@ -923,6 +933,14 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, data->fh = fh; data->adap = adap; data->blocking = block; + if (reply_vendor_id) { + memcpy(data->match_reply, msg->msg + 1, 4); + data->match_reply[4] = msg->reply; + data->match_len = 5; + } else if (msg->timeout) { + data->match_reply[0] = msg->reply; + data->match_len = 1; + } init_completion(&data->c); INIT_DELAYED_WORK(&data->work, cec_wait_timeout); @@ -1211,13 +1229,15 @@ void cec_received_msg_ts(struct cec_adapter *adap, if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC && (cmd == CEC_MSG_REPORT_ARC_INITIATED || cmd == CEC_MSG_REPORT_ARC_TERMINATED) && - (dst->reply == CEC_MSG_REPORT_ARC_INITIATED || - dst->reply == CEC_MSG_REPORT_ARC_TERMINATED)) + (data->match_reply[0] == CEC_MSG_REPORT_ARC_INITIATED || + data->match_reply[0] == CEC_MSG_REPORT_ARC_TERMINATED)) { dst->reply = cmd; + data->match_reply[0] = cmd; + } /* Does the command match? */ if ((abort && cmd != dst->msg[1]) || - (!abort && cmd != dst->reply)) + (!abort && memcmp(data->match_reply, msg->msg + 1, data->match_len))) continue; /* Does the addressing match? */ @@ -2318,18 +2338,21 @@ int cec_adap_status(struct seq_file *file, void *priv) } data = adap->transmitting; if (data) - seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n", - data->msg.len, data->msg.msg, data->msg.reply, + seq_printf(file, "transmitting message: %*ph (reply: %*ph, timeout: %ums)\n", + data->msg.len, data->msg.msg, + data->match_len, data->match_reply, data->msg.timeout); seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz); list_for_each_entry(data, &adap->transmit_queue, list) { - seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n", - data->msg.len, data->msg.msg, data->msg.reply, + seq_printf(file, "queued tx message: %*ph (reply: %*ph, timeout: %ums)\n", + data->msg.len, data->msg.msg, + data->match_len, data->match_reply, data->msg.timeout); } list_for_each_entry(data, &adap->wait_queue, list) { - seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n", - data->msg.len, data->msg.msg, data->msg.reply, + seq_printf(file, "message waiting for reply: %*ph (reply: %*ph, timeout: %ums)\n", + data->msg.len, data->msg.msg, + data->match_len, data->match_reply, data->msg.timeout); } diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c index 3ef91534430444..c50299246fc46d 100644 --- a/drivers/media/cec/core/cec-api.c +++ b/drivers/media/cec/core/cec-api.c @@ -580,7 +580,7 @@ static int cec_open(struct inode *inode, struct file *filp) fh->mode_initiator = CEC_MODE_INITIATOR; fh->adap = adap; - err = cec_get_device(devnode); + err = cec_get_device(adap); if (err) { kfree(fh); return err; @@ -686,7 +686,7 @@ static int cec_release(struct inode *inode, struct file *filp) mutex_unlock(&fh->lock); kfree(fh); - cec_put_device(devnode); + cec_put_device(adap); filp->private_data = NULL; return 0; } @@ -698,5 +698,4 @@ const struct file_operations cec_devnode_fops = { .compat_ioctl = cec_ioctl, .release = cec_release, .poll = cec_poll, - .llseek = no_llseek, }; diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c index 6f940df0230c95..48282d272fe649 100644 --- a/drivers/media/cec/core/cec-core.c +++ b/drivers/media/cec/core/cec-core.c @@ -51,35 +51,6 @@ static struct dentry *top_cec_dir; /* dev to cec_devnode */ #define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev) -int cec_get_device(struct cec_devnode *devnode) -{ - /* - * Check if the cec device is available. This needs to be done with - * the devnode->lock held to prevent an open/unregister race: - * without the lock, the device could be unregistered and freed between - * the devnode->registered check and get_device() calls, leading to - * a crash. - */ - mutex_lock(&devnode->lock); - /* - * return ENODEV if the cec device has been removed - * already or if it is not registered anymore. - */ - if (!devnode->registered) { - mutex_unlock(&devnode->lock); - return -ENODEV; - } - /* and increase the device refcount */ - get_device(&devnode->dev); - mutex_unlock(&devnode->lock); - return 0; -} - -void cec_put_device(struct cec_devnode *devnode) -{ - put_device(&devnode->dev); -} - /* Called when the last user of the cec device exits. */ static void cec_devnode_release(struct device *cd) { @@ -273,7 +244,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, adap->cec_pin_is_high = true; adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE; - adap->capabilities = caps; + adap->capabilities = caps | CEC_CAP_REPLY_VENDOR_ID; if (debug_phys_addr) adap->capabilities |= CEC_CAP_PHYS_ADDR; adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD; diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h index ed1f8c67626bf9..ce42a37c4ac04c 100644 --- a/drivers/media/cec/core/cec-priv.h +++ b/drivers/media/cec/core/cec-priv.h @@ -37,8 +37,6 @@ static inline bool msg_is_raw(const struct cec_msg *msg) /* cec-core.c */ extern int cec_debug; -int cec_get_device(struct cec_devnode *devnode); -void cec_put_device(struct cec_devnode *devnode); /* cec-adap.c */ int cec_monitor_all_cnt_inc(struct cec_adapter *adap); diff --git a/drivers/media/cec/usb/Kconfig b/drivers/media/cec/usb/Kconfig index 3f3a5c75287a7e..6faf4742981dff 100644 --- a/drivers/media/cec/usb/Kconfig +++ b/drivers/media/cec/usb/Kconfig @@ -3,6 +3,7 @@ # USB drivers if USB_SUPPORT && TTY +source "drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig" source "drivers/media/cec/usb/pulse8/Kconfig" source "drivers/media/cec/usb/rainshadow/Kconfig" endif diff --git a/drivers/media/cec/usb/Makefile b/drivers/media/cec/usb/Makefile index e4183d1bfa9acb..c082679f5318e0 100644 --- a/drivers/media/cec/usb/Makefile +++ b/drivers/media/cec/usb/Makefile @@ -2,5 +2,6 @@ # # Makefile for the CEC USB device drivers. # +obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) += extron-da-hd-4k-plus/ obj-$(CONFIG_USB_PULSE8_CEC) += pulse8/ obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow/ diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig new file mode 100644 index 00000000000000..5354f0eebe5c83 --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config USB_EXTRON_DA_HD_4K_PLUS_CEC + tristate "Extron DA HD 4K Plus CEC driver" + depends on VIDEO_DEV + depends on USB + depends on USB_ACM + select CEC_CORE + select SERIO + select SERIO_SERPORT + help + This is a CEC driver for the Extron DA HD 4K Plus HDMI Splitter. + + To compile this driver as a module, choose M here: the + module will be called extron-da-hd-4k-plus-cec. diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile new file mode 100644 index 00000000000000..2e8f7f60263f1c --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile @@ -0,0 +1,8 @@ +extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o +obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o + +all: + $(MAKE) -C $(KDIR) M=$(shell pwd) modules + +install: + $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c new file mode 100644 index 00000000000000..73fdec4b791d19 --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include + +#include "cec-splitter.h" + +/* + * Helper function to reply to a received message with a Feature Abort + * message. + */ +static int cec_feature_abort_reason(struct cec_adapter *adap, + struct cec_msg *msg, u8 reason) +{ + struct cec_msg tx_msg = { }; + + /* + * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT + * message! + */ + if (msg->msg[1] == CEC_MSG_FEATURE_ABORT) + return 0; + /* Don't Feature Abort messages from 'Unregistered' */ + if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED) + return 0; + cec_msg_set_reply_to(&tx_msg, msg); + cec_msg_feature_abort(&tx_msg, msg->msg[1], reason); + return cec_transmit_msg(adap, &tx_msg, false); +} + +/* Transmit an Active Source message from this output port to a sink */ +static void cec_port_out_active_source(struct cec_splitter_port *p) +{ + struct cec_adapter *adap = p->adap; + struct cec_msg msg; + + if (!adap->is_configured) + return; + p->is_active_source = true; + cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0); + cec_msg_active_source(&msg, adap->phys_addr); + cec_transmit_msg(adap, &msg, false); +} + +/* Transmit Active Source messages from all output ports to the sinks */ +static void cec_out_active_source(struct cec_splitter *splitter) +{ + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) + cec_port_out_active_source(splitter->ports[i]); +} + +/* Transmit a Standby message from this output port to a sink */ +static void cec_port_out_standby(struct cec_splitter_port *p) +{ + struct cec_adapter *adap = p->adap; + struct cec_msg msg; + + if (!adap->is_configured) + return; + cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0); + cec_msg_standby(&msg); + cec_transmit_msg(adap, &msg, false); +} + +/* Transmit Standby messages from all output ports to the sinks */ +static void cec_out_standby(struct cec_splitter *splitter) +{ + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) + cec_port_out_standby(splitter->ports[i]); +} + +/* Transmit an Image/Text View On message from this output port to a sink */ +static void cec_port_out_wakeup(struct cec_splitter_port *p, u8 opcode) +{ + struct cec_adapter *adap = p->adap; + u8 la = adap->log_addrs.log_addr[0]; + struct cec_msg msg; + + if (la == CEC_LOG_ADDR_INVALID) + la = CEC_LOG_ADDR_UNREGISTERED; + cec_msg_init(&msg, la, 0); + msg.len = 2; + msg.msg[1] = opcode; + cec_transmit_msg(adap, &msg, false); +} + +/* Transmit Image/Text View On messages from all output ports to the sinks */ +static void cec_out_wakeup(struct cec_splitter *splitter, u8 opcode) +{ + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) + cec_port_out_wakeup(splitter->ports[i], opcode); +} + +/* + * Update the power state of the unconfigured CEC device to either + * Off or On depending on the current state of the splitter. + * This keeps the outputs in a consistent state. + */ +void cec_splitter_unconfigured_output(struct cec_splitter_port *p) +{ + p->video_latency = 1; + p->power_status = p->splitter->is_standby ? + CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON; + + /* The adapter was unconfigured, so clear the sequence and ts values */ + p->out_give_device_power_status_seq = 0; + p->out_give_device_power_status_ts = ktime_set(0, 0); + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); +} + +/* + * Update the power state of the newly configured CEC device to either + * Off or On depending on the current state of the splitter. + * This keeps the outputs in a consistent state. + */ +void cec_splitter_configured_output(struct cec_splitter_port *p) +{ + p->video_latency = 1; + p->power_status = p->splitter->is_standby ? + CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON; + + if (p->splitter->is_standby) { + /* + * Some sinks only obey Standby if it comes from the + * active source. + */ + cec_port_out_active_source(p); + cec_port_out_standby(p); + } else { + cec_port_out_wakeup(p, CEC_MSG_IMAGE_VIEW_ON); + } +} + +/* Pass the in_msg on to all output ports */ +static void cec_out_passthrough(struct cec_splitter *splitter, + const struct cec_msg *in_msg) +{ + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + struct cec_msg msg; + + if (!adap->is_configured) + continue; + cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0); + msg.len = in_msg->len; + memcpy(msg.msg + 1, in_msg->msg + 1, msg.len - 1); + cec_transmit_msg(adap, &msg, false); + } +} + +/* + * See if all output ports received the Report Current Latency message, + * and if so, transmit the result from the input port to the video source. + */ +static void cec_out_report_current_latency(struct cec_splitter *splitter, + struct cec_adapter *input_adap) +{ + struct cec_msg reply = {}; + unsigned int reply_lat = 0; + unsigned int cnt = 0; + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + + /* Skip unconfigured ports */ + if (!adap->is_configured) + continue; + /* Return if a port is still waiting for a reply */ + if (p->out_request_current_latency_seq) + return; + reply_lat += p->video_latency - 1; + cnt++; + } + + /* + * All ports that can reply, replied, so clear the sequence + * and timestamp values. + */ + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); + } + + /* + * Return if there were no replies or the input port is no longer + * configured. + */ + if (!cnt || !input_adap->is_configured) + return; + + /* Reply with the average latency */ + reply_lat = 1 + reply_lat / cnt; + cec_msg_init(&reply, input_adap->log_addrs.log_addr[0], + splitter->request_current_latency_dest); + cec_msg_report_current_latency(&reply, input_adap->phys_addr, + reply_lat, 1, 1, 1); + cec_transmit_msg(input_adap, &reply, false); +} + +/* Transmit Request Current Latency to all output ports */ +static int cec_out_request_current_latency(struct cec_splitter *splitter) +{ + ktime_t now = ktime_get(); + bool error = true; + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + + if (!adap->is_configured) { + /* Clear if not configured */ + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); + } else if (!p->out_request_current_latency_seq) { + /* + * Keep the old ts if an earlier request is still + * pending. This ensures that the request will + * eventually time out based on the timestamp of + * the first request if the sink is unresponsive. + */ + p->out_request_current_latency_ts = now; + } + } + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + struct cec_msg msg; + + if (!adap->is_configured) + continue; + cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0); + cec_msg_request_current_latency(&msg, true, adap->phys_addr); + if (cec_transmit_msg(adap, &msg, false)) + continue; + p->out_request_current_latency_seq = msg.sequence | (1U << 31); + error = false; + } + return error ? -ENODEV : 0; +} + +/* + * See if all output ports received the Report Power Status message, + * and if so, transmit the result from the input port to the video source. + */ +static void cec_out_report_power_status(struct cec_splitter *splitter, + struct cec_adapter *input_adap) +{ + struct cec_msg reply = {}; + /* The target power status of the splitter itself */ + u8 splitter_pwr = splitter->is_standby ? + CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON; + /* + * The transient power status of the splitter, used if not all + * output report the target power status. + */ + u8 splitter_transient_pwr = splitter->is_standby ? + CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON; + u8 reply_pwr = splitter_pwr; + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + + /* Skip if no sink was found (HPD was low for more than 5s) */ + if (!p->found_sink) + continue; + + /* Return if a port is still waiting for a reply */ + if (p->out_give_device_power_status_seq) + return; + if (p->power_status != splitter_pwr) + reply_pwr = splitter_transient_pwr; + } + + /* + * All ports that can reply, replied, so clear the sequence + * and timestamp values. + */ + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + + p->out_give_device_power_status_seq = 0; + p->out_give_device_power_status_ts = ktime_set(0, 0); + } + + /* Return if the input port is no longer configured. */ + if (!input_adap->is_configured) + return; + + /* Reply with the new power status */ + cec_msg_init(&reply, input_adap->log_addrs.log_addr[0], + splitter->give_device_power_status_dest); + cec_msg_report_power_status(&reply, reply_pwr); + cec_transmit_msg(input_adap, &reply, false); +} + +/* Transmit Give Device Power Status to all output ports */ +static int cec_out_give_device_power_status(struct cec_splitter *splitter) +{ + ktime_t now = ktime_get(); + bool error = true; + unsigned int i; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + + /* + * Keep the old ts if an earlier request is still + * pending. This ensures that the request will + * eventually time out based on the timestamp of + * the first request if the sink is unresponsive. + */ + if (adap->is_configured && !p->out_give_device_power_status_seq) + p->out_give_device_power_status_ts = now; + } + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + struct cec_adapter *adap = p->adap; + struct cec_msg msg; + + if (!adap->is_configured) + continue; + + cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0); + cec_msg_give_device_power_status(&msg, true); + if (cec_transmit_msg(adap, &msg, false)) + continue; + p->out_give_device_power_status_seq = msg.sequence | (1U << 31); + error = false; + } + return error ? -ENODEV : 0; +} + +/* + * CEC messages received on the HDMI input of the splitter are + * forwarded (if relevant) to the HDMI outputs of the splitter. + */ +int cec_splitter_received_input(struct cec_splitter_port *p, struct cec_msg *msg) +{ + if (!cec_msg_status_is_ok(msg)) + return 0; + + if (msg->len < 2) + return -ENOMSG; + + switch (msg->msg[1]) { + case CEC_MSG_DEVICE_VENDOR_ID: + case CEC_MSG_REPORT_POWER_STATUS: + case CEC_MSG_SET_STREAM_PATH: + case CEC_MSG_ROUTING_CHANGE: + case CEC_MSG_REQUEST_ACTIVE_SOURCE: + case CEC_MSG_SYSTEM_AUDIO_MODE_STATUS: + return 0; + + case CEC_MSG_STANDBY: + p->splitter->is_standby = true; + cec_out_standby(p->splitter); + return 0; + + case CEC_MSG_IMAGE_VIEW_ON: + case CEC_MSG_TEXT_VIEW_ON: + p->splitter->is_standby = false; + cec_out_wakeup(p->splitter, msg->msg[1]); + return 0; + + case CEC_MSG_ACTIVE_SOURCE: + cec_out_active_source(p->splitter); + return 0; + + case CEC_MSG_SET_SYSTEM_AUDIO_MODE: + cec_out_passthrough(p->splitter, msg); + return 0; + + case CEC_MSG_GIVE_DEVICE_POWER_STATUS: + p->splitter->give_device_power_status_dest = + cec_msg_initiator(msg); + if (cec_out_give_device_power_status(p->splitter)) + cec_feature_abort_reason(p->adap, msg, + CEC_OP_ABORT_INCORRECT_MODE); + return 0; + + case CEC_MSG_REQUEST_CURRENT_LATENCY: { + u16 pa; + + p->splitter->request_current_latency_dest = + cec_msg_initiator(msg); + cec_ops_request_current_latency(msg, &pa); + if (pa == p->adap->phys_addr && + cec_out_request_current_latency(p->splitter)) + cec_feature_abort_reason(p->adap, msg, + CEC_OP_ABORT_INCORRECT_MODE); + return 0; + } + + default: + return -ENOMSG; + } + return -ENOMSG; +} + +void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *p, + const struct cec_msg *msg, + struct cec_adapter *input_adap) +{ + struct cec_splitter *splitter = p->splitter; + u32 seq = msg->sequence | (1U << 31); + + /* + * If this is the result of a failed non-blocking transmit, or it is + * the result of the failed reply to a non-blocking transmit, then + * check if the original transmit was to get the current power status + * or latency and, if so, assume that the remove device is for one + * reason or another unavailable and assume that it is in the same + * power status as the splitter, or has no video latency. + */ + if ((cec_msg_recv_is_tx_result(msg) && !(msg->tx_status & CEC_TX_STATUS_OK)) || + (cec_msg_recv_is_rx_result(msg) && !(msg->rx_status & CEC_RX_STATUS_OK))) { + u8 tx_op = msg->msg[1]; + + if (msg->len < 2) + return; + if (cec_msg_recv_is_rx_result(msg) && + (msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT)) + tx_op = msg->msg[2]; + switch (tx_op) { + case CEC_MSG_GIVE_DEVICE_POWER_STATUS: + if (p->out_give_device_power_status_seq != seq) + break; + p->out_give_device_power_status_seq = 0; + p->out_give_device_power_status_ts = ktime_set(0, 0); + p->power_status = splitter->is_standby ? + CEC_OP_POWER_STATUS_STANDBY : + CEC_OP_POWER_STATUS_ON; + cec_out_report_power_status(splitter, input_adap); + break; + case CEC_MSG_REQUEST_CURRENT_LATENCY: + if (p->out_request_current_latency_seq != seq) + break; + p->video_latency = 1; + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); + cec_out_report_current_latency(splitter, input_adap); + break; + } + return; + } + + if (cec_msg_recv_is_tx_result(msg)) { + if (p->out_request_current_latency_seq != seq) + return; + p->out_request_current_latency_ts = ns_to_ktime(msg->tx_ts); + return; + } +} + +/* + * CEC messages received on an HDMI output of the splitter + * are processed here. + */ +int cec_splitter_received_output(struct cec_splitter_port *p, struct cec_msg *msg, + struct cec_adapter *input_adap) +{ + struct cec_adapter *adap = p->adap; + struct cec_splitter *splitter = p->splitter; + u32 seq = msg->sequence | (1U << 31); + struct cec_msg reply = {}; + u16 pa; + + if (!adap->is_configured || msg->len < 2) + return -ENOMSG; + + switch (msg->msg[1]) { + case CEC_MSG_REPORT_POWER_STATUS: { + u8 pwr; + + cec_ops_report_power_status(msg, &pwr); + if (pwr > CEC_OP_POWER_STATUS_TO_STANDBY) + pwr = splitter->is_standby ? + CEC_OP_POWER_STATUS_TO_STANDBY : + CEC_OP_POWER_STATUS_TO_ON; + p->power_status = pwr; + if (p->out_give_device_power_status_seq == seq) { + p->out_give_device_power_status_seq = 0; + p->out_give_device_power_status_ts = ktime_set(0, 0); + } + cec_out_report_power_status(splitter, input_adap); + return 0; + } + + case CEC_MSG_REPORT_CURRENT_LATENCY: { + u8 video_lat; + u8 low_lat_mode; + u8 audio_out_comp; + u8 audio_out_delay; + + cec_ops_report_current_latency(msg, &pa, + &video_lat, &low_lat_mode, + &audio_out_comp, &audio_out_delay); + if (!video_lat || video_lat >= 252) + video_lat = 1; + p->video_latency = video_lat; + if (p->out_request_current_latency_seq == seq) { + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); + } + cec_out_report_current_latency(splitter, input_adap); + return 0; + } + + case CEC_MSG_STANDBY: + case CEC_MSG_ROUTING_CHANGE: + case CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS: + return 0; + + case CEC_MSG_ACTIVE_SOURCE: + cec_ops_active_source(msg, &pa); + if (pa == 0) + p->is_active_source = false; + return 0; + + case CEC_MSG_REQUEST_ACTIVE_SOURCE: + if (!p->is_active_source) + return 0; + cec_msg_set_reply_to(&reply, msg); + cec_msg_active_source(&reply, adap->phys_addr); + cec_transmit_msg(adap, &reply, false); + return 0; + + case CEC_MSG_GIVE_DEVICE_POWER_STATUS: + cec_msg_set_reply_to(&reply, msg); + cec_msg_report_power_status(&reply, splitter->is_standby ? + CEC_OP_POWER_STATUS_STANDBY : + CEC_OP_POWER_STATUS_ON); + cec_transmit_msg(adap, &reply, false); + return 0; + + case CEC_MSG_SET_STREAM_PATH: + cec_ops_set_stream_path(msg, &pa); + if (pa == adap->phys_addr) { + cec_msg_set_reply_to(&reply, msg); + cec_msg_active_source(&reply, pa); + cec_transmit_msg(adap, &reply, false); + } + return 0; + + default: + return -ENOMSG; + } + return -ENOMSG; +} + +/* + * Called every second to check for timed out messages and whether there + * still is a video sink connected or not. + * + * Returns true if sinks were lost. + */ +bool cec_splitter_poll(struct cec_splitter *splitter, + struct cec_adapter *input_adap, bool debug) +{ + ktime_t now = ktime_get(); + u8 pwr = splitter->is_standby ? + CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON; + unsigned int max_delay_ms = input_adap->xfer_timeout_ms + 2000; + unsigned int i; + bool res = false; + + for (i = 0; i < splitter->num_out_ports; i++) { + struct cec_splitter_port *p = splitter->ports[i]; + s64 pwr_delta, lat_delta; + bool pwr_timeout, lat_timeout; + + if (!p) + continue; + + pwr_delta = ktime_ms_delta(now, p->out_give_device_power_status_ts); + pwr_timeout = p->out_give_device_power_status_seq && + pwr_delta >= max_delay_ms; + lat_delta = ktime_ms_delta(now, p->out_request_current_latency_ts); + lat_timeout = p->out_request_current_latency_seq && + lat_delta >= max_delay_ms; + + /* + * If the HPD is low for more than 5 seconds, then assume no display + * is connected. + */ + if (p->found_sink && ktime_to_ns(p->lost_sink_ts) && + ktime_ms_delta(now, p->lost_sink_ts) > 5000) { + if (debug) + dev_info(splitter->dev, + "port %u: HPD low for more than 5s, assume no sink is connected.\n", + p->port); + p->found_sink = false; + p->lost_sink_ts = ktime_set(0, 0); + res = true; + } + + /* + * If the power status request timed out, then set the port's + * power status to that of the splitter, ensuring a consistent + * power state. + */ + if (pwr_timeout) { + mutex_lock(&p->adap->lock); + if (debug) + dev_info(splitter->dev, + "port %u: give up on power status for seq %u\n", + p->port, + p->out_give_device_power_status_seq & ~(1 << 31)); + p->power_status = pwr; + p->out_give_device_power_status_seq = 0; + p->out_give_device_power_status_ts = ktime_set(0, 0); + mutex_unlock(&p->adap->lock); + cec_out_report_power_status(splitter, input_adap); + } + + /* + * If the current latency request timed out, then set the port's + * latency to 1. + */ + if (lat_timeout) { + mutex_lock(&p->adap->lock); + if (debug) + dev_info(splitter->dev, + "port %u: give up on latency for seq %u\n", + p->port, + p->out_request_current_latency_seq & ~(1 << 31)); + p->video_latency = 1; + p->out_request_current_latency_seq = 0; + p->out_request_current_latency_ts = ktime_set(0, 0); + mutex_unlock(&p->adap->lock); + cec_out_report_current_latency(splitter, input_adap); + } + } + return res; +} diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h new file mode 100644 index 00000000000000..7422f7c5719edd --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _CEC_SPLITTER_H_ +#define _CEC_SPLITTER_H_ + +struct cec_splitter; + +#define STATE_CHANGE_MAX_REPEATS 2 + +struct cec_splitter_port { + struct cec_splitter *splitter; + struct cec_adapter *adap; + unsigned int port; + bool is_active_source; + bool found_sink; + ktime_t lost_sink_ts; + u32 out_request_current_latency_seq; + ktime_t out_request_current_latency_ts; + u8 video_latency; + u32 out_give_device_power_status_seq; + ktime_t out_give_device_power_status_ts; + u8 power_status; +}; + +struct cec_splitter { + struct device *dev; + unsigned int num_out_ports; + struct cec_splitter_port **ports; + + /* High-level splitter state */ + u8 request_current_latency_dest; + u8 give_device_power_status_dest; + bool is_standby; +}; + +void cec_splitter_unconfigured_output(struct cec_splitter_port *port); +void cec_splitter_configured_output(struct cec_splitter_port *port); +int cec_splitter_received_input(struct cec_splitter_port *port, struct cec_msg *msg); +int cec_splitter_received_output(struct cec_splitter_port *port, struct cec_msg *msg, + struct cec_adapter *input_adap); +void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *port, + const struct cec_msg *msg, + struct cec_adapter *input_adap); +bool cec_splitter_poll(struct cec_splitter *splitter, + struct cec_adapter *input_adap, bool debug); + +#endif diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c new file mode 100644 index 00000000000000..8526f613a40ef4 --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c @@ -0,0 +1,1836 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +/* + * Currently this driver does not fully support the serial port of the + * Extron, only the USB port is fully supported. + * + * Issues specific to using the serial port instead of the USB since the + * serial port doesn't detect if the device is powered off: + * + * - Some periodic ping mechanism is needed to detect when the Extron is + * powered off and when it is powered on again. + * - What to do when it is powered off and the driver is modprobed? Keep + * trying to contact the Extron indefinitely? + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "extron-da-hd-4k-plus.h" + +MODULE_AUTHOR("Hans Verkuil "); +MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver"); +MODULE_LICENSE("GPL"); + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "debug level (0-1)"); + +static unsigned int vendor_id; +module_param(vendor_id, uint, 0444); +MODULE_PARM_DESC(vendor_id, "CEC Vendor ID"); + +static char manufacturer_name[4]; +module_param_string(manufacturer_name, manufacturer_name, + sizeof(manufacturer_name), 0644); +MODULE_PARM_DESC(manufacturer_name, + "EDID Vendor String (3 uppercase characters)"); + +static bool hpd_never_low; +module_param(hpd_never_low, bool, 0644); +MODULE_PARM_DESC(hpd_never_low, "Input HPD will never go low (1), or go low if all output HPDs are low (0, default)"); + +#define EXTRON_TIMEOUT_SECS 6 + +static const u8 hdmi_edid[256] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, + 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78, + 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, + 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, + 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, + 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18, + 0x87, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68, + 0x64, 0x6d, 0x69, 0x2d, 0x31, 0x30, 0x38, 0x30, + 0x70, 0x36, 0x30, 0x0a, 0x00, 0x00, 0x00, 0xfe, + 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x95, + + 0x02, 0x03, 0x1b, 0xf1, 0x42, 0x10, 0x01, 0x23, + 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x68, + 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x21, 0x01, + 0xe2, 0x00, 0xca, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, +}; + +static const u8 hdmi_edid_4k_300[256] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, + 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78, + 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, + 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, + 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, + 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18, + 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68, + 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36, + 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe, + 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x87, + + 0x02, 0x03, 0x1f, 0xf1, 0x43, 0x10, 0x5f, 0x01, + 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, + 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, + 0x21, 0x00, 0x20, 0x01, 0xe2, 0x00, 0xca, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, +}; + +static const u8 hdmi_edid_4k_600[256] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, + 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78, + 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, + 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, 0xe8, + 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58, + 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18, + 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68, + 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36, + 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe, + 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x4c, + + 0x02, 0x03, 0x28, 0xf1, 0x44, 0x61, 0x5f, 0x10, + 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, + 0x00, 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, + 0x3c, 0x21, 0x00, 0x20, 0x01, 0x67, 0xd8, 0x5d, + 0xc4, 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, +}; + +static int extron_send_byte(struct serio *serio, char byte) +{ + int err, i; + + for (i = 0; i < 100; i++) { + err = serio_write(serio, byte); + if (!err) + break; + usleep_range(80, 120); + } + if (err) + dev_warn(&serio->dev, "unable to write byte after 100 attempts\n"); + return err ? -EIO : 0; +} + +static int extron_send_len(struct serio *serio, const char *command, + const unsigned char *bin, unsigned int len) +{ + int err = 0; + + for (; !err && *command; command++) + err = extron_send_byte(serio, *command); + if (!err) + err = extron_send_byte(serio, '\r'); + if (bin) + for (; !err && len; len--) + err = extron_send_byte(serio, *bin++); + return err; +} + +static int extron_send_and_wait_len(struct extron *extron, struct extron_port *port, + const char *cmd, const unsigned char *bin, + unsigned int len, const char *response) +{ + int timeout = EXTRON_TIMEOUT_SECS * HZ; + int err; + + if (debug) { + if (response) + dev_info(extron->dev, "transmit %s (response: %s)\n", + cmd, response); + else + dev_info(extron->dev, "transmit %s\n", cmd); + } + + mutex_lock(&extron->serio_lock); + if (port) { + init_completion(&port->cmd_done); + port->cmd_error = 0; + port->response = response; + } else { + init_completion(&extron->cmd_done); + extron->cmd_error = 0; + extron->response = response; + } + err = extron_send_len(extron->serio, cmd, bin, len); + + if (!err && response && + !wait_for_completion_timeout(port ? &port->cmd_done : &extron->cmd_done, timeout)) { + dev_info(extron->dev, "transmit %s failed with %s (expected: %s)\n", + cmd, extron->reply, response); + err = -ETIMEDOUT; + } + + if (!err && response && (port ? port->cmd_error : extron->cmd_error)) { + dev_info(extron->dev, "transmit %s failed with E%02u (expected: %s)\n", + cmd, port ? port->cmd_error : extron->cmd_error, response); + if (port) + port->cmd_error = 0; + else + extron->cmd_error = 0; + err = -EPROTO; + } + if (port) + port->response = NULL; + else + extron->response = NULL; + mutex_unlock(&extron->serio_lock); + return err; +} + +static int extron_send_and_wait(struct extron *extron, struct extron_port *port, + const char *cmd, const char *response) +{ + return extron_send_and_wait_len(extron, port, cmd, NULL, 0, response); +} + +static void extron_parse_edid(struct extron_port *port) +{ + const u8 *edid = port->edid; + unsigned int i, end; + u8 d; + + port->has_4kp30 = false; + port->has_4kp60 = false; + port->has_qy = false; + port->has_qs = false; + /* Store Established Timings 1 and 2 */ + port->est_i = edid[0x23]; + port->est_ii = edid[0x24]; + + // Check DTDs in base block + for (i = 0; i < 4; i++) { + const u8 *dtd = edid + 0x36 + i * 18; + unsigned int w, h; + unsigned int mhz; + u64 pclk; + + if (!dtd[0] && !dtd[1]) + continue; + w = dtd[2] + ((dtd[4] & 0xf0) << 4); + h = dtd[5] + ((dtd[7] & 0xf0) << 4); + if (w != 3840 || h != 2160) + continue; + + w += dtd[3] + ((dtd[4] & 0x0f) << 8); + h += dtd[6] + ((dtd[7] & 0x0f) << 8); + pclk = dtd[0] + (dtd[1] << 8); + pclk *= 100000; + mhz = div_u64(pclk, w * h); + if (mhz >= 297) + port->has_4kp30 = true; + if (mhz >= 594) + port->has_4kp60 = true; + } + + if (port->edid_blocks == 1) + return; + + edid += 128; + + /* Return if not a CTA-861 extension block */ + if (edid[0] != 0x02 || edid[1] != 0x03) + return; + + /* search Video Data Block (tag 2) */ + d = edid[2] & 0x7f; + /* Check if there are Data Blocks */ + if (d <= 4) + return; + + i = 4; + end = d; + + do { + u8 tag = edid[i] >> 5; + u8 len = edid[i] & 0x1f; + + /* Avoid buffer overrun in case the EDID is malformed */ + if (i + len + 1 > 0x7f) + return; + + switch (tag) { + case 2: /* Video Data Block */ + /* Search for VIC 97 */ + if (memchr(edid + i + 1, 97, len)) + port->has_4kp60 = true; + /* Search for VIC 95 */ + if (memchr(edid + i + 1, 95, len)) + port->has_4kp30 = true; + break; + + case 7: /* Use Extended Tag */ + switch (edid[i + 1]) { + case 0: /* Video Capability Data Block */ + if (edid[i + 2] & 0x80) + port->has_qy = true; + if (edid[i + 2] & 0x40) + port->has_qs = true; + break; + } + break; + } + i += len + 1; + } while (i < end); +} + +static int get_edid_tag_location(const u8 *edid, unsigned int size, + u8 want_tag, u8 ext_tag) +{ + unsigned int offset = 128; + int i, end; + u8 d; + + edid += offset; + + /* Return if not a CTA-861 extension block */ + if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03) + return -1; + + /* search tag */ + d = edid[0x02] & 0x7f; + if (d <= 4) + return -1; + + i = 0x04; + end = 0x00 + d; + + do { + unsigned char tag = edid[i] >> 5; + unsigned char len = edid[i] & 0x1f; + + if (tag != want_tag || i + len > end) { + i += len + 1; + continue; + } + + if (tag < 7 || (len >= 1 && edid[i + 1] == ext_tag)) + return offset + i; + i += len + 1; + } while (i < end); + return -1; +} + +static void extron_edid_crc(u8 *edid) +{ + u8 sum = 0; + int offset; + + /* Update CRC */ + for (offset = 0; offset < 127; offset++) + sum += edid[offset]; + edid[127] = 256 - sum; +} + +/* + * Fill in EDID string. As per VESA EDID-1.3, strings are at most 13 chars + * long. If shorter then add a 0x0a character after the string and pad the + * remainder with spaces. + */ +static void extron_set_edid_string(u8 *start, const char *s) +{ + const unsigned int max_len = 13; + int len = strlen(s); + + memset(start, ' ', max_len); + if (len > max_len) + len = max_len; + memcpy(start, s, len); + if (len < max_len) + start[len] = 0x0a; +} + +static void extron_update_edid(struct extron_port *port, unsigned int blocks) +{ + int offset; + u8 c1, c2; + + c1 = ((manufacturer_name[0] - '@') << 2) | + (((manufacturer_name[1] - '@') >> 3) & 0x03); + c2 = (((manufacturer_name[1] - '@') & 0x07) << 5) | + ((manufacturer_name[2] - '@') & 0x1f); + + port->edid_tmp[8] = c1; + port->edid_tmp[9] = c2; + + /* Set Established Timings, but always enable VGA */ + port->edid_tmp[0x23] = port->est_i | 0x20; + port->edid_tmp[0x24] = port->est_ii; + + /* Set the Monitor Name to the unit name */ + extron_set_edid_string(port->edid_tmp + 0x5f, port->extron->unit_name); + /* Set the ASCII String to the CEC adapter name */ + extron_set_edid_string(port->edid_tmp + 0x71, port->adap->name); + + extron_edid_crc(port->edid_tmp); + + /* Find Video Capability Data Block */ + offset = get_edid_tag_location(port->edid_tmp, blocks * 128, 7, 0); + if (offset > 0) { + port->edid_tmp[offset + 2] &= ~0xc0; + if (port->has_qy) + port->edid_tmp[offset + 2] |= 0x80; + if (port->has_qs) + port->edid_tmp[offset + 2] |= 0x40; + } + + extron_edid_crc(port->edid_tmp + 128); +} + +static int extron_write_edid(struct extron_port *port, + const u8 *edid, unsigned int blocks) +{ + struct extron *extron = port->extron; + u16 phys_addr = CEC_PHYS_ADDR_INVALID; + int ret; + + if (cec_get_edid_spa_location(edid, blocks * 128)) + phys_addr = 0; + + if (mutex_lock_interruptible(&extron->edid_lock)) + return -EINTR; + + memcpy(port->edid_tmp, edid, blocks * 128); + + if (manufacturer_name[0]) + extron_update_edid(port, blocks); + + ret = extron_send_and_wait_len(port->extron, port, "W+UF256,in.bin", + port->edid_tmp, sizeof(port->edid_tmp), + "Upl"); + if (ret) + goto unlock; + ret = extron_send_and_wait(port->extron, port, "WI1,in.binEDID", + "EdidI01"); + if (ret) + goto unlock; + + port->edid_blocks = blocks; + memcpy(port->edid, port->edid_tmp, blocks * 128); + port->read_edid = true; + mutex_unlock(&extron->edid_lock); + + cec_s_phys_addr(port->adap, phys_addr, false); + return 0; + +unlock: + mutex_unlock(&extron->edid_lock); + return ret; +} + +static void update_edid_work(struct work_struct *w) +{ + struct extron *extron = container_of(w, struct extron, + work_update_edid.work); + struct extron_port *in = extron->ports[extron->num_out_ports]; + struct extron_port *p; + bool has_edid = false; + bool has_4kp30 = true; + bool has_4kp60 = true; + bool has_qy = true; + bool has_qs = true; + u8 est_i = 0xff; + u8 est_ii = 0xff; + unsigned int out; + + for (out = 0; has_4kp60 && out < extron->num_out_ports; out++) { + p = extron->ports[out]; + if (p->read_edid) { + has_4kp60 = p->has_4kp60; + est_i &= p->est_i; + est_ii &= p->est_ii; + has_edid = true; + } + } + for (out = 0; has_4kp30 && out < extron->num_out_ports; out++) + if (extron->ports[out]->read_edid) + has_4kp30 = extron->ports[out]->has_4kp30; + + for (out = 0; has_qy && out < extron->num_out_ports; out++) + if (extron->ports[out]->read_edid) + has_qy = extron->ports[out]->has_qy; + + for (out = 0; has_qs && out < extron->num_out_ports; out++) + if (extron->ports[out]->read_edid) + has_qs = extron->ports[out]->has_qs; + + /* exit if no output port had an EDID */ + if (!has_edid) + return; + + /* exit if the input EDID properties remained unchanged */ + if (has_4kp60 == in->has_4kp60 && has_4kp30 == in->has_4kp30 && + has_qy == in->has_qy && has_qs == in->has_qs && + est_i == in->est_i && est_ii == in->est_ii) + return; + + in->has_4kp60 = has_4kp60; + in->has_4kp30 = has_4kp30; + in->has_qy = has_qy; + in->has_qs = has_qs; + in->est_i = est_i; + in->est_ii = est_ii; + extron_write_edid(extron->ports[extron->num_out_ports], + has_4kp60 ? hdmi_edid_4k_600 : + (has_4kp30 ? hdmi_edid_4k_300 : hdmi_edid), 2); +} + +static void extron_read_edid(struct extron_port *port) +{ + struct extron *extron = port->extron; + char cmd[10], reply[10]; + unsigned int idx; + + idx = port->port.port + (port->is_input ? 0 : extron->num_in_ports); + snprintf(cmd, sizeof(cmd), "WR%uEDID", idx); + snprintf(reply, sizeof(reply), "EdidR%u", idx); + if (mutex_lock_interruptible(&extron->edid_lock)) + return; + if (port->read_edid) + goto unlock; + extron->edid_bytes_read = 0; + extron->edid_port = port; + port->edid_blocks = 0; + if (!port->has_edid) + goto no_edid; + + extron->edid_reading = true; + + if (!extron_send_and_wait(extron, port, cmd, reply)) + wait_for_completion_killable_timeout(&extron->edid_completion, + msecs_to_jiffies(1000)); + if (port->edid_blocks) { + extron_parse_edid(port); + port->read_edid = true; + if (!port->is_input) + v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 1); + } +no_edid: + extron->edid_reading = false; +unlock: + mutex_unlock(&extron->edid_lock); + cancel_delayed_work_sync(&extron->work_update_edid); + if (manufacturer_name[0]) + schedule_delayed_work(&extron->work_update_edid, + msecs_to_jiffies(1000)); +} + +static void extron_irq_work_handler(struct work_struct *work) +{ + struct extron_port *port = + container_of(work, struct extron_port, irq_work); + struct extron *extron = port->extron; + unsigned long flags; + bool update_pa; + u16 pa; + bool update_has_signal; + bool has_signal; + bool update_has_edid; + bool has_edid; + u32 status; + + spin_lock_irqsave(&port->msg_lock, flags); + while (port->rx_msg_num) { + spin_unlock_irqrestore(&port->msg_lock, flags); + cec_received_msg(port->adap, + &port->rx_msg[port->rx_msg_cur_idx]); + spin_lock_irqsave(&port->msg_lock, flags); + if (port->rx_msg_num) + port->rx_msg_num--; + port->rx_msg_cur_idx = + (port->rx_msg_cur_idx + 1) % NUM_MSGS; + } + update_pa = port->update_phys_addr; + pa = port->phys_addr; + port->update_phys_addr = false; + update_has_signal = port->update_has_signal; + has_signal = port->has_signal; + port->update_has_signal = false; + update_has_edid = port->update_has_edid; + has_edid = port->has_edid; + port->update_has_edid = false; + status = port->tx_done_status; + port->tx_done_status = 0; + spin_unlock_irqrestore(&port->msg_lock, flags); + + if (status) + cec_transmit_done(port->adap, status, 0, 0, 0, 0); + + if (update_has_signal && port->is_input) + v4l2_ctrl_s_ctrl(port->ctrl_rx_power_present, has_signal); + + if (update_has_edid && !port->is_input) { + v4l2_ctrl_s_ctrl(port->ctrl_tx_hotplug, + port->has_edid); + if (port->has_edid) { + port->port.found_sink = true; + port->port.lost_sink_ts = ktime_set(0, 0); + } else { + port->port.lost_sink_ts = ktime_get(); + } + if (!has_edid) { + port->edid_blocks = 0; + port->read_edid = false; + if (extron->edid_reading && !has_edid && + extron->edid_port == port) + extron->edid_reading = false; + v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 0); + } else if (!extron->edid_reading || extron->edid_port != port) { + extron_read_edid(port); + } + } + if (update_pa) + cec_s_phys_addr(port->adap, pa, false); +} + +static void extron_process_received(struct extron_port *port, const char *data) +{ + struct cec_msg msg = {}; + unsigned int len = strlen(data); + unsigned long irq_flags; + unsigned int idx; + + if (!port || port->disconnected) + return; + + if (len < 5 || (len - 2) % 3 || data[len - 2] != '*') + goto malformed; + + while (*data != '*') { + int v = hex2bin(&msg.msg[msg.len], data + 1, 1); + + if (*data != '%' || v) + goto malformed; + msg.len++; + data += 3; + } + + spin_lock_irqsave(&port->msg_lock, irq_flags); + idx = (port->rx_msg_cur_idx + port->rx_msg_num) % + NUM_MSGS; + if (port->rx_msg_num == NUM_MSGS) { + dev_warn(port->dev, + "message queue is full, dropping %*ph\n", + msg.len, msg.msg); + spin_unlock_irqrestore(&port->msg_lock, + irq_flags); + return; + } + port->rx_msg_num++; + port->rx_msg[idx] = msg; + spin_unlock_irqrestore(&port->msg_lock, irq_flags); + if (!port->disconnected) + schedule_work(&port->irq_work); + return; + +malformed: + dev_info(port->extron->dev, "malformed msg received: '%s'\n", data); +} + +static void extron_port_signal_change(struct extron_port *port, bool has_sig) +{ + unsigned long irq_flags; + bool update = false; + + if (!port) + return; + + spin_lock_irqsave(&port->msg_lock, irq_flags); + if (!port->update_has_signal && port->has_signal != has_sig) { + port->update_has_signal = true; + update = true; + } + port->has_signal = has_sig; + spin_unlock_irqrestore(&port->msg_lock, irq_flags); + if (update && !port->disconnected) + schedule_work(&port->irq_work); +} + +static void extron_process_signal_change(struct extron *extron, const char *data) +{ + unsigned int i; + + extron_port_signal_change(extron->ports[extron->num_out_ports], + data[0] == '1'); + for (i = 0; i < extron->num_out_ports; i++) + extron_port_signal_change(extron->ports[i], + data[2 + 2 * i] != '0'); +} + +static void extron_port_edid_change(struct extron_port *port, bool has_edid) +{ + unsigned long irq_flags; + bool update = false; + + if (!port) + return; + + spin_lock_irqsave(&port->msg_lock, irq_flags); + if (!port->update_has_edid && port->has_edid != has_edid) { + port->update_has_edid = true; + update = true; + } + port->has_edid = has_edid; + spin_unlock_irqrestore(&port->msg_lock, irq_flags); + if (update && !port->disconnected) + schedule_work(&port->irq_work); +} + +static void extron_process_edid_change(struct extron *extron, const char *data) +{ + unsigned int i; + + /* + * Do nothing if the Extron isn't ready yet. Trying to do this + * while the Extron firmware is still settling will fail. + */ + if (!extron->is_ready) + return; + + for (i = 0; i < extron->num_out_ports; i++) + extron_port_edid_change(extron->ports[i], + data[2 + 2 * i] != '0'); +} + +static void extron_phys_addr_change(struct extron_port *port, u16 pa) +{ + unsigned long irq_flags; + bool update = false; + + if (!port) + return; + + spin_lock_irqsave(&port->msg_lock, irq_flags); + if (!port->update_phys_addr && port->phys_addr != pa) { + update = true; + port->update_phys_addr = true; + } + port->phys_addr = pa; + spin_unlock_irqrestore(&port->msg_lock, irq_flags); + if (update && !port->disconnected) + schedule_work(&port->irq_work); +} + +static void extron_process_tx_done(struct extron_port *port, char status) +{ + unsigned long irq_flags; + unsigned int tx_status; + + if (!port) + return; + + switch (status) { + case '0': + tx_status = CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES; + break; + case '1': + tx_status = CEC_TX_STATUS_OK; + break; + default: + tx_status = CEC_TX_STATUS_ERROR; + break; + } + spin_lock_irqsave(&port->msg_lock, irq_flags); + port->tx_done_status = tx_status; + spin_unlock_irqrestore(&port->msg_lock, irq_flags); + if (!port->disconnected) + schedule_work(&port->irq_work); +} + +static void extron_add_edid(struct extron_port *port, const char *hex) +{ + struct extron *extron = port ? port->extron : NULL; + + if (!port || port != extron->edid_port) + return; + while (extron->edid_bytes_read < sizeof(port->edid) && *hex) { + int err = hex2bin(&port->edid[extron->edid_bytes_read], hex, 1); + + if (err) { + extron->edid_reading = false; + complete(&extron->edid_completion); + break; + } + extron->edid_bytes_read++; + hex += 2; + } + if (extron->edid_bytes_read == 128 && + port->edid[126] == 0) { + /* There are no extension blocks, we're done */ + port->edid_blocks = 1; + extron->edid_reading = false; + complete(&extron->edid_completion); + } + if (extron->edid_bytes_read < sizeof(port->edid)) + return; + if (!*hex) + port->edid_blocks = 2; + extron->edid_reading = false; + complete(&extron->edid_completion); +} + +static irqreturn_t extron_interrupt(struct serio *serio, unsigned char data, + unsigned int flags) +{ + struct extron *extron = serio_get_drvdata(serio); + struct extron_port *port = NULL; + bool found_response; + unsigned int p; + + if (data == '\r' || data == '\n') { + if (extron->idx == 0) + return IRQ_HANDLED; + memcpy(extron->data, extron->buf, extron->idx); + extron->len = extron->idx; + extron->data[extron->len] = 0; + if (debug) + dev_info(extron->dev, "received %s\n", extron->data); + extron->idx = 0; + if (!memcmp(extron->data, "Sig", 3) && + extron->data[4] == '*') { + extron_process_signal_change(extron, extron->data + 3); + } else if (!memcmp(extron->data, "Hdcp", 4) && + extron->data[5] == '*') { + extron_process_edid_change(extron, extron->data + 4); + } else if (!memcmp(extron->data, "DcecI", 5) && + extron->data[5] >= '1' && + extron->data[5] < '1' + extron->num_in_ports) { + unsigned int p = extron->data[5] - '1'; + + p += extron->num_out_ports; + extron_process_tx_done(extron->ports[p], + extron->data[extron->len - 1]); + } else if (!memcmp(extron->data, "Ceci", 4) && + extron->data[4] >= '1' && + extron->data[4] < '1' + extron->num_in_ports && + extron->data[5] == '*') { + unsigned int p = extron->data[4] - '1'; + + p += extron->num_out_ports; + extron_process_received(extron->ports[p], + extron->data + 6); + } else if (!memcmp(extron->data, "DcecO", 5) && + extron->data[5] >= '1' && + extron->data[5] < '1' + extron->num_out_ports) { + unsigned int p = extron->data[5] - '1'; + + extron_process_tx_done(extron->ports[p], + extron->data[extron->len - 1]); + } else if (!memcmp(extron->data, "Ceco", 4) && + extron->data[4] >= '1' && + extron->data[4] < '1' + extron->num_out_ports && + extron->data[5] == '*') { + unsigned int p = extron->data[4] - '1'; + + extron_process_received(extron->ports[p], + extron->data + 6); + } else if (!memcmp(extron->data, "Pceco", 5) && + extron->data[5] >= '1' && + extron->data[5] < '1' + extron->num_out_ports) { + unsigned int p = extron->data[5] - '1'; + unsigned int tmp_pa[2] = { 0xff, 0xff }; + + if (sscanf(extron->data + 7, "%%%02x%%%02x", + &tmp_pa[0], &tmp_pa[1]) == 2) + extron_phys_addr_change(extron->ports[p], + tmp_pa[0] << 8 | tmp_pa[1]); + } else if (!memcmp(extron->data, "Pceci", 5) && + extron->data[5] >= '1' && + extron->data[5] < '1' + extron->num_in_ports) { + unsigned int p = extron->data[5] - '1'; + unsigned int tmp_pa[2] = { 0xff, 0xff }; + + p += extron->num_out_ports; + if (sscanf(extron->data + 7, "%%%02x%%%02x", + &tmp_pa[0], &tmp_pa[1]) == 2) + extron_phys_addr_change(extron->ports[p], + tmp_pa[0] << 8 | tmp_pa[1]); + } else if (!memcmp(extron->data, "EdidR", 5) && + extron->data[5] >= '1' && + extron->data[5] < '1' + extron->num_ports && + extron->data[6] == '*') { + unsigned int p = extron->data[5] - '1'; + + if (p) + p--; + else + p = extron->num_out_ports; + extron_add_edid(extron->ports[p], extron->data + 7); + } else if (extron->edid_reading && extron->len == 32 && + extron->edid_port) { + extron_add_edid(extron->edid_port, extron->data); + } + + found_response = false; + if (extron->response && + !strncmp(extron->response, extron->data, + strlen(extron->response))) + found_response = true; + + for (p = 0; !found_response && p < extron->num_ports; p++) { + port = extron->ports[p]; + if (port && port->response && + !strncmp(port->response, extron->data, + strlen(port->response))) + found_response = true; + } + + if (!found_response && extron->response && + extron->data[0] == 'E' && + isdigit(extron->data[1]) && + isdigit(extron->data[2]) && + !extron->data[3]) { + extron->cmd_error = (extron->data[1] - '0') * 10 + + extron->data[2] - '0'; + extron->response = NULL; + complete(&extron->cmd_done); + } + + if (!found_response) + return IRQ_HANDLED; + + memcpy(extron->reply, extron->data, extron->len); + extron->reply[extron->len] = 0; + if (!port) { + extron->response = NULL; + complete(&extron->cmd_done); + } else { + port->response = NULL; + complete(&port->cmd_done); + } + return IRQ_HANDLED; + } + + if (extron->idx >= DATA_SIZE - 1) { + dev_info(extron->dev, + "throwing away %d bytes of garbage\n", extron->idx); + extron->idx = 0; + } + extron->buf[extron->idx++] = (char)data; + return IRQ_HANDLED; +} + +static int extron_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct extron_port *port = cec_get_drvdata(adap); + + return (port->disconnected && enable) ? -ENODEV : 0; +} + +static int extron_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +{ + struct extron_port *port = cec_get_drvdata(adap); + char cmd[26]; + char resp[25]; + u8 la = log_addr == CEC_LOG_ADDR_INVALID ? 15 : log_addr; + int err; + + if (port->disconnected) + return -ENODEV; + snprintf(cmd, sizeof(cmd), "W%c%u*%uLCEC", + port->direction, port->port.port, la); + snprintf(resp, sizeof(resp), "Lcec%c%u*%u", + port->direction, port->port.port, la); + err = extron_send_and_wait(port->extron, port, cmd, resp); + return log_addr != CEC_LOG_ADDR_INVALID && err ? err : 0; +} + +static int extron_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct extron_port *port = cec_get_drvdata(adap); + char buf[CEC_MAX_MSG_SIZE * 3 + 1]; + char cmd[CEC_MAX_MSG_SIZE * 3 + 13]; + unsigned int i; + + if (port->disconnected) + return -ENODEV; + buf[0] = 0; + for (i = 0; i < msg->len - 1; i++) + sprintf(buf + i * 3, "%%%02X", msg->msg[i + 1]); + snprintf(cmd, sizeof(cmd), "W%c%u*%u*%u*%sDCEC", + port->direction, port->port.port, + cec_msg_initiator(msg), cec_msg_destination(msg), buf); + return extron_send_and_wait(port->extron, port, cmd, NULL); +} + +static void extron_cec_adap_unconfigured(struct cec_adapter *adap) +{ + struct extron_port *port = cec_get_drvdata(adap); + + if (port->disconnected) + return; + if (debug) + dev_info(port->extron->dev, "unconfigured port %d (%s)\n", + port->port.port, + port->extron->splitter.is_standby ? "Off" : "On"); + if (!port->is_input) + cec_splitter_unconfigured_output(&port->port); +} + +static void extron_cec_configured(struct cec_adapter *adap) +{ + struct extron_port *port = cec_get_drvdata(adap); + + if (port->disconnected) + return; + if (debug) + dev_info(port->extron->dev, "configured port %d (%s)\n", + port->port.port, + port->extron->splitter.is_standby ? "Off" : "On"); + if (!port->is_input) + cec_splitter_configured_output(&port->port); +} + +static void extron_cec_adap_nb_transmit_canceled(struct cec_adapter *adap, + const struct cec_msg *msg) +{ + struct extron_port *port = cec_get_drvdata(adap); + struct cec_adapter *input_adap; + + if (!vendor_id) + return; + if (port->disconnected || port->is_input) + return; + input_adap = port->extron->ports[port->extron->num_out_ports]->adap; + cec_splitter_nb_transmit_canceled_output(&port->port, msg, input_adap); +} + +static int extron_received(struct cec_adapter *adap, struct cec_msg *msg) +{ + struct extron_port *port = cec_get_drvdata(adap); + + if (!vendor_id) + return -ENOMSG; + if (port->disconnected) + return -ENOMSG; + if (port->is_input) + return cec_splitter_received_input(&port->port, msg); + return cec_splitter_received_output(&port->port, msg, + port->extron->ports[port->extron->num_out_ports]->adap); +} + +#define log_printf(adap, file, fmt, arg...) \ + do { \ + if (file) \ + seq_printf((file), fmt, ## arg); \ + else \ + pr_info("cec-%s: " fmt, (adap)->name, ## arg); \ + } while (0) + +static const char * const pwr_state[] = { + "on", + "standby", + "to on", + "to standby", +}; + +static void extron_adap_status_port(struct extron_port *port, struct seq_file *file) +{ + struct cec_adapter *adap = port->adap; + + if (port->disconnected) { + log_printf(adap, file, + "\tport %u: disconnected\n", port->port.port); + return; + } + if (port->is_input) + log_printf(adap, file, + "\tport %u: %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %s\n", + port->port.port, + port->has_signal ? "has" : "no", + port->has_edid ? "has" : "no", + port->has_4kp30 ? "has" : "no", + port->has_4kp60 ? "has" : "no", + port->has_qs ? "" : "no ", + port->has_qy ? "" : "no ", + !port->port.adap->is_configured ? "not configured" : + pwr_state[port->extron->splitter.is_standby]); + else + log_printf(adap, file, + "\tport %u: %s sink, %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %sactive source, is %s\n", + port->port.port, + port->port.found_sink ? "found" : "no", + port->has_signal ? "has" : "no", + port->has_edid ? "has" : "no", + port->has_4kp30 ? "has" : "no", + port->has_4kp60 ? "has" : "no", + port->has_qs ? "" : "no ", + port->has_qy ? "" : "no ", + port->port.is_active_source ? "" : "not ", + !port->port.adap->is_configured ? "not configured" : + pwr_state[port->port.power_status & 3]); + if (port->port.out_give_device_power_status_seq) + log_printf(adap, file, + "\tport %u: querying power status (%u, %lldms)\n", + port->port.port, + port->port.out_give_device_power_status_seq & ~(1 << 31), + ktime_ms_delta(ktime_get(), + port->port.out_give_device_power_status_ts)); + if (port->port.out_request_current_latency_seq) + log_printf(adap, file, + "\tport %u: querying latency (%u, %lldms)\n", + port->port.port, + port->port.out_request_current_latency_seq & ~(1 << 31), + ktime_ms_delta(ktime_get(), + port->port.out_request_current_latency_ts)); +} + +static void extron_adap_status(struct cec_adapter *adap, struct seq_file *file) +{ + struct extron_port *port = cec_get_drvdata(adap); + struct extron *extron = port->extron; + unsigned int i; + + log_printf(adap, file, "name: %s type: %s\n", + extron->unit_name, extron->unit_type); + log_printf(adap, file, "model: 60-160%c-01 (1 input, %u outputs)\n", + '6' + extron->num_out_ports / 2, extron->num_out_ports); + log_printf(adap, file, "firmware version: %s CEC engine version: %s\n", + extron->unit_fw_version, extron->unit_cec_engine_version); + if (extron->hpd_never_low) + log_printf(adap, file, "always keep input HPD high\n"); + else + log_printf(adap, file, + "pull input HPD low if all output HPDs are low\n"); + if (vendor_id) + log_printf(adap, file, + "splitter vendor ID: 0x%06x\n", vendor_id); + if (manufacturer_name[0]) + log_printf(adap, file, "splitter manufacturer name: %s\n", + manufacturer_name); + log_printf(adap, file, "splitter power status: %s\n", + pwr_state[extron->splitter.is_standby]); + log_printf(adap, file, "%s port: %d (%s)\n", + port->is_input ? "input" : "output", + port->port.port, port->name); + log_printf(adap, file, "splitter input port:\n"); + extron_adap_status_port(extron->ports[extron->num_out_ports], file); + + log_printf(adap, file, "splitter output ports:\n"); + for (i = 0; i < extron->num_out_ports; i++) + extron_adap_status_port(extron->ports[i], file); + + if (!port->has_edid || !port->read_edid) + return; + + for (i = 0; i < port->edid_blocks * 128; i += 16) { + if (i % 128 == 0) + log_printf(adap, file, "\n"); + log_printf(adap, file, "EDID: %*ph\n", 16, port->edid + i); + } +} + +static const struct cec_adap_ops extron_cec_adap_ops = { + .adap_enable = extron_cec_adap_enable, + .adap_log_addr = extron_cec_adap_log_addr, + .adap_transmit = extron_cec_adap_transmit, + .adap_nb_transmit_canceled = extron_cec_adap_nb_transmit_canceled, + .adap_unconfigured = extron_cec_adap_unconfigured, + .adap_status = extron_adap_status, + .configured = extron_cec_configured, + .received = extron_received, +}; + +static int extron_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct extron_port *port = video_drvdata(file); + + strscpy(cap->driver, "extron-da-hd-4k-plus-cec", sizeof(cap->driver)); + strscpy(cap->card, cap->driver, sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "serio:%s", port->name); + return 0; +} + +static int extron_enum_input(struct file *file, void *priv, struct v4l2_input *inp) +{ + struct extron_port *port = video_drvdata(file); + + if (inp->index) + return -EINVAL; + inp->type = V4L2_INPUT_TYPE_CAMERA; + snprintf(inp->name, sizeof(inp->name), "HDMI IN %u", port->port.port); + inp->status = v4l2_ctrl_g_ctrl(port->ctrl_rx_power_present) ? + 0 : V4L2_IN_ST_NO_SIGNAL; + return 0; +} + +static int extron_g_input(struct file *file, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int extron_s_input(struct file *file, void *priv, unsigned int i) +{ + return i ? -EINVAL : 0; +} + +static int extron_enum_output(struct file *file, void *priv, struct v4l2_output *out) +{ + struct extron_port *port = video_drvdata(file); + + if (out->index) + return -EINVAL; + out->type = V4L2_OUTPUT_TYPE_ANALOG; + snprintf(out->name, sizeof(out->name), "HDMI OUT %u", port->port.port); + return 0; +} + +static int extron_g_output(struct file *file, void *priv, unsigned int *o) +{ + *o = 0; + return 0; +} + +static int extron_s_output(struct file *file, void *priv, unsigned int o) +{ + return o ? -EINVAL : 0; +} + +static int extron_g_edid(struct file *file, void *_fh, + struct v4l2_edid *edid) +{ + struct extron_port *port = video_drvdata(file); + + memset(edid->reserved, 0, sizeof(edid->reserved)); + if (port->disconnected) + return -ENODEV; + if (edid->pad) + return -EINVAL; + if (!port->has_edid) + return -ENODATA; + if (!port->read_edid) + extron_read_edid(port); + if (!port->read_edid) + return -ENODATA; + if (edid->start_block == 0 && edid->blocks == 0) { + edid->blocks = port->edid_blocks; + return 0; + } + if (edid->start_block >= port->edid_blocks) + return -EINVAL; + if (edid->blocks > port->edid_blocks - edid->start_block) + edid->blocks = port->edid_blocks - edid->start_block; + memcpy(edid->edid, port->edid + edid->start_block * 128, edid->blocks * 128); + return 0; +} + +static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid) +{ + struct extron_port *port = video_drvdata(file); + + memset(edid->reserved, 0, sizeof(edid->reserved)); + if (port->disconnected) + return -ENODEV; + if (edid->pad) + return -EINVAL; + + /* Unfortunately it is not possible to clear the EDID */ + if (edid->blocks == 0) + return -EINVAL; + + if (edid->blocks > MAX_EDID_BLOCKS) { + edid->blocks = MAX_EDID_BLOCKS; + return -E2BIG; + } + + if (cec_get_edid_spa_location(edid->edid, edid->blocks * 128)) + v4l2_set_edid_phys_addr(edid->edid, edid->blocks * 128, 0); + extron_parse_edid(port); + return extron_write_edid(port, edid->edid, edid->blocks); +} + +static int extron_log_status(struct file *file, void *priv) +{ + struct extron_port *port = video_drvdata(file); + + extron_adap_status(port->adap, NULL); + return v4l2_ctrl_log_status(file, priv); +} + +static const struct v4l2_ioctl_ops extron_ioctl_ops = { + .vidioc_querycap = extron_querycap, + .vidioc_enum_input = extron_enum_input, + .vidioc_g_input = extron_g_input, + .vidioc_s_input = extron_s_input, + .vidioc_enum_output = extron_enum_output, + .vidioc_g_output = extron_g_output, + .vidioc_s_output = extron_s_output, + .vidioc_g_edid = extron_g_edid, + .vidioc_s_edid = extron_s_edid, + .vidioc_log_status = extron_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static const struct v4l2_file_operations extron_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = v4l2_fh_release, + .poll = v4l2_ctrl_poll, + .unlocked_ioctl = video_ioctl2, +}; + +static const struct video_device extron_videodev = { + .name = "extron-da-hd-4k-plus-cec", + .vfl_dir = VFL_DIR_RX, + .fops = &extron_fops, + .ioctl_ops = &extron_ioctl_ops, + .minor = -1, + .release = video_device_release_empty, +}; + +static void extron_disconnect(struct serio *serio) +{ + struct extron *extron = serio_get_drvdata(serio); + unsigned int p; + + kthread_stop(extron->kthread_setup); + + for (p = 0; p < extron->num_ports; p++) { + struct extron_port *port = extron->ports[p]; + + if (!port) + continue; + port->disconnected = true; + cancel_work_sync(&port->irq_work); + } + cancel_delayed_work_sync(&extron->work_update_edid); + for (p = 0; p < extron->num_ports; p++) { + struct extron_port *port = extron->ports[p]; + + if (!port) + continue; + + if (port->cec_was_registered) { + if (cec_is_registered(port->adap)) + cec_unregister_adapter(port->adap); + /* + * After registering the adapter, the + * extron_setup_thread() function took an extra + * reference to the device. We call the corresponding + * put here. + */ + cec_put_device(port->adap); + } else { + cec_delete_adapter(port->adap); + } + video_unregister_device(&port->vdev); + } + + complete(&extron->edid_completion); + + for (p = 0; p < extron->num_ports; p++) { + struct extron_port *port = extron->ports[p]; + + if (!port) + continue; + v4l2_ctrl_handler_free(&port->hdl); + mutex_destroy(&port->video_lock); + kfree(port); + } + mutex_destroy(&extron->edid_lock); + mutex_destroy(&extron->serio_lock); + extron->serio = NULL; + serio_set_drvdata(serio, NULL); + serio_close(serio); +} + +static int extron_setup(struct extron *extron) +{ + struct serio *serio = extron->serio; + struct extron_port *port; + u8 *reply = extron->reply; + unsigned int p; + unsigned int major, minor; + int err; + + /* + * Attempt to disable CEC: avoid received CEC messages + * from interfering with the other serial port traffic. + */ + extron_send_and_wait(extron, NULL, "WI1*0CCEC", NULL); + extron_send_and_wait(extron, NULL, "WO0*CCEC", NULL); + + /* Obtain unit part number */ + err = extron_send_and_wait(extron, NULL, "N", "Pno"); + if (err) + return err; + dev_info(extron->dev, "Unit part number: %s\n", reply + 3); + if (strcmp(reply + 3, "60-1607-01") && + strcmp(reply + 3, "60-1608-01") && + strcmp(reply + 3, "60-1609-01")) { + dev_err(extron->dev, "Unsupported model\n"); + return -ENODEV; + } + /* Up to 6 output ports and one input port */ + extron->num_out_ports = 2 * (reply[9] - '6'); + extron->splitter.num_out_ports = extron->num_out_ports; + extron->splitter.ports = extron->splitter_ports; + extron->splitter.dev = extron->dev; + extron->num_in_ports = 1; + extron->num_ports = extron->num_out_ports + extron->num_in_ports; + dev_info(extron->dev, "Unit output ports: %d\n", extron->num_out_ports); + dev_info(extron->dev, "Unit input ports: %d\n", extron->num_in_ports); + + err = extron_send_and_wait(extron, NULL, "W CN", "Ipn "); + if (err) + return err; + dev_info(extron->dev, "Unit name: %s\n", reply + 4); + strscpy(extron->unit_name, reply + 4, sizeof(extron->unit_name)); + + err = extron_send_and_wait(extron, NULL, "*Q", "Bld"); + if (err) + return err; + dev_info(extron->dev, "Unit FW Version: %s\n", reply + 3); + strscpy(extron->unit_fw_version, reply + 3, + sizeof(extron->unit_fw_version)); + if (sscanf(reply + 3, "%u.%u.", &major, &minor) < 2 || + major < 1 || minor < 2) { + dev_err(extron->dev, + "Unsupported FW version (only 1.02 or up is supported)\n"); + return -ENODEV; + } + + err = extron_send_and_wait(extron, NULL, "2i", "Inf02*"); + if (err) + return err; + dev_info(extron->dev, "Unit Type: %s\n", reply + 6); + strscpy(extron->unit_type, reply + 6, sizeof(extron->unit_type)); + + err = extron_send_and_wait(extron, NULL, "39Q", "Ver39*"); + if (err) + return err; + dev_info(extron->dev, "CEC Engine Version: %s\n", reply + 6); + strscpy(extron->unit_cec_engine_version, reply + 6, + sizeof(extron->unit_cec_engine_version)); + + /* Disable CEC */ + err = extron_send_and_wait(extron, NULL, "WI1*0CCEC", "CcecI1*"); + if (err) + return err; + err = extron_send_and_wait(extron, NULL, "WO0*CCEC", "CcecO0"); + if (err) + return err; + + extron->hpd_never_low = hpd_never_low; + + /* Pull input port HPD low if all output ports also have a low HPD */ + if (hpd_never_low) { + dev_info(extron->dev, "Always keep input HPD high\n"); + } else { + dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n"); + extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1"); + } + + for (p = 0; p < extron->num_ports; p++) { + u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL; + + if (vendor_id) + caps &= ~CEC_CAP_LOG_ADDRS; + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + INIT_WORK(&port->irq_work, extron_irq_work_handler); + spin_lock_init(&port->msg_lock); + mutex_init(&port->video_lock); + port->extron = extron; + port->is_input = p >= extron->num_out_ports; + port->direction = port->is_input ? 'I' : 'O'; + port->port.port = 1 + (port->is_input ? p - extron->num_out_ports : p); + port->port.splitter = &extron->splitter; + port->phys_addr = CEC_PHYS_ADDR_INVALID; + snprintf(port->name, sizeof(port->name), "%s-%s-%u", + dev_name(&serio->dev), port->is_input ? "in" : "out", + port->port.port); + + port->dev = extron->dev; + port->adap = cec_allocate_adapter(&extron_cec_adap_ops, port, + port->name, caps, 1); + err = PTR_ERR_OR_ZERO(port->adap); + if (err < 0) { + kfree(port); + return err; + } + + port->adap->xfer_timeout_ms = EXTRON_TIMEOUT_SECS * 1000; + port->port.adap = port->adap; + port->vdev = extron_videodev; + port->vdev.lock = &port->video_lock; + port->vdev.v4l2_dev = &extron->v4l2_dev; + port->vdev.ctrl_handler = &port->hdl; + port->vdev.device_caps = V4L2_CAP_EDID; + video_set_drvdata(&port->vdev, port); + + v4l2_ctrl_handler_init(&port->hdl, 2); + + if (port->is_input) { + port->vdev.vfl_dir = VFL_DIR_RX; + port->ctrl_rx_power_present = + v4l2_ctrl_new_std(&port->hdl, NULL, + V4L2_CID_DV_RX_POWER_PRESENT, + 0, 1, 0, 0); + port->has_edid = true; + } else { + port->vdev.vfl_dir = VFL_DIR_TX; + port->ctrl_tx_hotplug = + v4l2_ctrl_new_std(&port->hdl, NULL, + V4L2_CID_DV_TX_HOTPLUG, + 0, 1, 0, 0); + port->ctrl_tx_edid_present = + v4l2_ctrl_new_std(&port->hdl, NULL, + V4L2_CID_DV_TX_EDID_PRESENT, + 0, 1, 0, 0); + } + + err = port->hdl.error; + if (err < 0) { + cec_delete_adapter(port->adap); + kfree(port); + return err; + } + extron->ports[p] = port; + extron->splitter_ports[p] = &port->port; + if (port->is_input && manufacturer_name[0]) + extron_write_edid(port, hdmi_edid, 2); + } + + /* Enable CEC (manual mode, i.e. controlled by the driver) */ + err = extron_send_and_wait(extron, NULL, "WI1*20CCEC", "CcecI1*"); + if (err) + return err; + + err = extron_send_and_wait(extron, NULL, "WO20*CCEC", "CcecO20"); + if (err) + return err; + + /* Set logical addresses to 15 */ + err = extron_send_and_wait(extron, NULL, "WI1*15LCEC", "LcecI1*15"); + if (err) + return err; + + for (p = 0; p < extron->num_out_ports; p++) { + char cmd[20]; + char resp[20]; + + snprintf(cmd, sizeof(cmd), "WO%u*15LCEC", p + 1); + snprintf(resp, sizeof(resp), "LcecO%u*15", p + 1); + err = extron_send_and_wait(extron, extron->ports[p], cmd, resp); + if (err) + return err; + } + + /* + * The Extron is now ready for operation. Specifically it is now + * possible to retrieve EDIDs. + */ + extron->is_ready = true; + + /* Query HDCP and Signal states, used to update the initial state */ + err = extron_send_and_wait(extron, NULL, "WHDCP", "Hdcp"); + if (err) + return err; + + return extron_send_and_wait(extron, NULL, "WLS", "Sig"); +} + +static int extron_setup_thread(void *_extron) +{ + struct extron *extron = _extron; + struct extron_port *port; + unsigned int p; + bool poll_splitter = false; + bool was_connected = true; + int err; + + while (1) { + if (kthread_should_stop()) + return 0; + err = extron_send_and_wait(extron, NULL, "W3CV", "Vrb3"); + // that should make it possible to detect a serio disconnect + // here by stopping the workqueue + if (err >= 0) + break; + was_connected = false; + ssleep(1); + } + + /* + * If the Extron was not connected at probe() time, i.e. it just got + * powered up and while the serial port is working, the firmware is + * still booting up, then wait 10 seconds for the firmware to settle. + * + * Trying to continue too soon means that some commands will not + * work yet. + */ + if (!was_connected) + ssleep(10); + + err = extron_setup(extron); + if (err) + goto disable_ports; + + for (p = 0; p < extron->num_ports; p++) { + struct cec_log_addrs log_addrs = {}; + + port = extron->ports[p]; + if (port->is_input && manufacturer_name[0]) + v4l2_disable_ioctl(&port->vdev, VIDIOC_S_EDID); + err = video_register_device(&port->vdev, VFL_TYPE_VIDEO, -1); + if (err) { + v4l2_err(&extron->v4l2_dev, "Failed to register video device\n"); + goto disable_ports; + } + + err = cec_register_adapter(port->adap, extron->dev); + if (err < 0) + goto disable_ports; + port->dev = &port->adap->devnode.dev; + port->cec_was_registered = true; + /* + * This driver is unusual in that the whole setup takes place + * in a thread since it can take such a long time before the + * Extron Splitter boots up, and you do not want to block the + * probe function on this driver. In addition, as soon as + * CEC adapters come online, they can be used, and you cannot + * just unregister them again if an error occurs, since that + * can delete the underlying CEC adapter, which might already + * be in use. + * + * So we take an additional reference to the adapter. This + * allows us to unregister the device node if needed, without + * deleting the actual adapter. + * + * In the disconnect function we will do the corresponding + * put call to ensure the adapter is deleted. + */ + cec_get_device(port->adap); + + /* + * If vendor_id wasn't set, then userspace configures the + * CEC devices. Otherwise the driver configures the CEC + * devices as TV (input) and Playback (outputs) devices + * and the driver processes all CEC messages. + */ + if (!vendor_id) + continue; + + log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0; + log_addrs.num_log_addrs = 1; + log_addrs.vendor_id = vendor_id; + if (port->is_input) { + strscpy(log_addrs.osd_name, "Splitter In", + sizeof(log_addrs.osd_name)); + log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV; + log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_TV; + log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV; + } else { + snprintf(log_addrs.osd_name, sizeof(log_addrs.osd_name), + "Splitter Out%u", port->port.port); + log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK; + log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_PLAYBACK; + log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK; + } + err = cec_s_log_addrs(port->adap, &log_addrs, false); + if (err < 0) + goto disable_ports; + } + poll_splitter = true; + + port = extron->ports[extron->num_out_ports]; + while (!kthread_should_stop()) { + ssleep(1); + if (hpd_never_low != extron->hpd_never_low) { + /* + * Keep input port HPD high at all times, or pull it low + * if all output ports also have a low HPD + */ + if (hpd_never_low) { + dev_info(extron->dev, "Always keep input HPD high\n"); + extron_send_and_wait(extron, NULL, "W0ihpd", "Ihpd0"); + } else { + dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n"); + extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1"); + } + extron->hpd_never_low = hpd_never_low; + } + if (poll_splitter && + cec_splitter_poll(&extron->splitter, port->adap, debug) && + manufacturer_name[0]) { + /* + * Sinks were lost, so see if the input edid needs to + * be updated. + */ + cancel_delayed_work_sync(&extron->work_update_edid); + schedule_delayed_work(&extron->work_update_edid, + msecs_to_jiffies(1000)); + } + } + return 0; + +disable_ports: + extron->is_ready = false; + for (p = 0; p < extron->num_ports; p++) { + struct extron_port *port = extron->ports[p]; + + if (!port) + continue; + port->disconnected = true; + cancel_work_sync(&port->irq_work); + video_unregister_device(&port->vdev); + if (port->cec_was_registered) + cec_unregister_adapter(port->adap); + } + cancel_delayed_work_sync(&extron->work_update_edid); + complete(&extron->edid_completion); + dev_err(extron->dev, "Setup failed with error %d\n", err); + while (!kthread_should_stop()) + ssleep(1); + return err; +} + +static int extron_connect(struct serio *serio, struct serio_driver *drv) +{ + struct extron *extron; + int err = -ENOMEM; + + if (manufacturer_name[0] && + (!isupper(manufacturer_name[0]) || + !isupper(manufacturer_name[1]) || + !isupper(manufacturer_name[2]))) { + dev_warn(&serio->dev, "ignoring invalid manufacturer name\n"); + manufacturer_name[0] = 0; + } + + extron = kzalloc(sizeof(*extron), GFP_KERNEL); + + if (!extron) + return -ENOMEM; + + extron->serio = serio; + extron->dev = &serio->dev; + mutex_init(&extron->serio_lock); + mutex_init(&extron->edid_lock); + INIT_DELAYED_WORK(&extron->work_update_edid, update_edid_work); + + err = v4l2_device_register(extron->dev, &extron->v4l2_dev); + if (err) + goto free_device; + + err = serio_open(serio, drv); + if (err) + goto unreg_v4l2_dev; + + serio_set_drvdata(serio, extron); + init_completion(&extron->edid_completion); + + extron->kthread_setup = kthread_run(extron_setup_thread, extron, + "extron-da-hd-4k-plus-cec-%s", dev_name(&serio->dev)); + if (!IS_ERR(extron->kthread_setup)) + return 0; + + dev_err(extron->dev, "kthread_run() failed\n"); + err = PTR_ERR(extron->kthread_setup); + + extron->serio = NULL; + serio_set_drvdata(serio, NULL); + serio_close(serio); +unreg_v4l2_dev: + v4l2_device_unregister(&extron->v4l2_dev); +free_device: + mutex_destroy(&extron->edid_lock); + mutex_destroy(&extron->serio_lock); + kfree(extron); + return err; +} + +static const struct serio_device_id extron_serio_ids[] = { + { + .type = SERIO_RS232, + .proto = SERIO_EXTRON_DA_HD_4K_PLUS, + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(serio, extron_serio_ids); + +static struct serio_driver extron_drv = { + .driver = { + .name = "extron-da-hd-4k-plus-cec", + }, + .description = "Extron DA HD 4K PLUS HDMI CEC driver", + .id_table = extron_serio_ids, + .interrupt = extron_interrupt, + .connect = extron_connect, + .disconnect = extron_disconnect, +}; + +module_serio_driver(extron_drv); diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h new file mode 100644 index 00000000000000..b79f1253ab5db7 --- /dev/null +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#ifndef _EXTRON_DA_HD_4K_PLUS_H_ +#define _EXTRON_DA_HD_4K_PLUS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cec-splitter.h" + +#define DATA_SIZE 256 + +#define PING_PERIOD (15 * HZ) + +#define NUM_MSGS CEC_MAX_MSG_RX_QUEUE_SZ + +#define MAX_PORTS (1 + 6) + +#define MAX_EDID_BLOCKS 2 + +struct extron; + +struct extron_port { + struct cec_splitter_port port; + struct device *dev; + struct cec_adapter *adap; + struct video_device vdev; + struct v4l2_ctrl_handler hdl; + struct v4l2_ctrl *ctrl_rx_power_present; + struct v4l2_ctrl *ctrl_tx_hotplug; + struct v4l2_ctrl *ctrl_tx_edid_present; + bool is_input; + char direction; + char name[26]; + unsigned char edid[MAX_EDID_BLOCKS * 128]; + unsigned char edid_tmp[MAX_EDID_BLOCKS * 128]; + unsigned int edid_blocks; + bool read_edid; + struct extron *extron; + struct work_struct irq_work; + struct completion cmd_done; + const char *response; + unsigned int cmd_error; + struct cec_msg rx_msg[NUM_MSGS]; + unsigned int rx_msg_cur_idx, rx_msg_num; + /* protect rx_msg_cur_idx and rx_msg_num */ + spinlock_t msg_lock; + u32 tx_done_status; + bool update_phys_addr; + u16 phys_addr; + bool cec_was_registered; + bool disconnected; + bool update_has_signal; + bool has_signal; + bool update_has_edid; + bool has_edid; + bool has_4kp30; + bool has_4kp60; + bool has_qy; + bool has_qs; + u8 est_i, est_ii; + + /* locks access to the video_device */ + struct mutex video_lock; +}; + +struct extron { + struct cec_splitter splitter; + struct device *dev; + struct serio *serio; + /* locks access to serio */ + struct mutex serio_lock; + unsigned int num_ports; + unsigned int num_in_ports; + unsigned int num_out_ports; + char unit_name[32]; + char unit_type[64]; + char unit_fw_version[32]; + char unit_cec_engine_version[32]; + struct extron_port *ports[MAX_PORTS]; + struct cec_splitter_port *splitter_ports[MAX_PORTS]; + struct v4l2_device v4l2_dev; + bool hpd_never_low; + struct task_struct *kthread_setup; + struct delayed_work work_update_edid; + + /* serializes EDID reading */ + struct mutex edid_lock; + unsigned int edid_bytes_read; + struct extron_port *edid_port; + struct completion edid_completion; + bool edid_reading; + bool is_ready; + + struct completion cmd_done; + const char *response; + unsigned int cmd_error; + char data[DATA_SIZE]; + unsigned int len; + char reply[DATA_SIZE]; + char buf[DATA_SIZE]; + unsigned int idx; +}; + +#endif diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index b6f1eb5dbbdf35..3732367e0c627b 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -1132,8 +1132,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev, * return: 0 on success, <0 on error. */ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, - int mode, - loadfirmware_t loadfirmware_handler) + int mode) { int rc = -ENOENT; u8 *fw_buf; @@ -1147,8 +1146,7 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, } pr_debug("Firmware name: %s\n", fw_filename); - if (!loadfirmware_handler && - !(coredev->device_flags & SMS_DEVICE_FAMILY2)) + if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) return -EINVAL; rc = request_firmware(&fw, fw_filename, coredev->device); @@ -1166,10 +1164,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, memcpy(fw_buf, fw->data, fw->size); fw_buf_size = fw->size; - rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ? - smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size) - : loadfirmware_handler(coredev->context, fw_buf, - fw_buf_size); + rc = smscore_load_firmware_family2(coredev, fw_buf, + fw_buf_size); } kfree(fw_buf); @@ -1353,8 +1349,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) } if (!(coredev->modes_supported & (1 << mode))) { - rc = smscore_load_firmware_from_file(coredev, - mode, NULL); + rc = smscore_load_firmware_from_file(coredev, mode); if (rc >= 0) pr_debug("firmware download success\n"); } else { diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index 82d9f8a64d9950..d945a2d6d62465 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h @@ -97,7 +97,6 @@ typedef int (*hotplug_t)(struct smscore_device_t *coredev, typedef int (*setmode_t)(void *context, int mode); typedef void (*detectmode_t)(void *context, int *mode); typedef int (*sendrequest_t)(void *context, void *buffer, size_t size); -typedef int (*loadfirmware_t)(void *context, void *buffer, size_t size); typedef int (*preload_t)(void *context); typedef int (*postload_t)(void *context); @@ -1102,9 +1101,6 @@ extern int smscore_register_device(struct smsdevice_params_t *params, extern void smscore_unregister_device(struct smscore_device_t *coredev); extern int smscore_start_device(struct smscore_device_t *coredev); -extern int smscore_load_firmware(struct smscore_device_t *coredev, - char *filename, - loadfirmware_t loadfirmware_handler); extern int smscore_set_device_mode(struct smscore_device_t *coredev, int mode); extern int smscore_get_device_mode(struct smscore_device_t *coredev); @@ -1119,12 +1115,6 @@ extern int smsclient_sendrequest(struct smscore_client_t *client, extern void smscore_onresponse(struct smscore_device_t *coredev, struct smscore_buffer_t *cb); -extern int smscore_get_common_buffer_size(struct smscore_device_t *coredev); -extern int smscore_map_common_buffer(struct smscore_device_t *coredev, - struct vm_area_struct *vma); -extern int smscore_send_fw_file(struct smscore_device_t *coredev, - u8 *ufwbuf, int size); - extern struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev); extern void smscore_putbuffer(struct smscore_device_t *coredev, diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 0217392fcc0d9c..29a8d876e6c281 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -303,14 +303,22 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) if (!p->mem_priv) return; - if (p->dbuf_mapped) - call_void_memop(vb, unmap_dmabuf, p->mem_priv); + if (!p->dbuf_duplicated) { + if (p->dbuf_mapped) + call_void_memop(vb, unmap_dmabuf, p->mem_priv); + + call_void_memop(vb, detach_dmabuf, p->mem_priv); + } - call_void_memop(vb, detach_dmabuf, p->mem_priv); dma_buf_put(p->dbuf); p->mem_priv = NULL; p->dbuf = NULL; p->dbuf_mapped = 0; + p->bytesused = 0; + p->length = 0; + p->m.fd = 0; + p->data_offset = 0; + p->dbuf_duplicated = false; } /* @@ -319,9 +327,15 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) */ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) { - unsigned int plane; + int plane; - for (plane = 0; plane < vb->num_planes; ++plane) + /* + * When multiple planes share the same DMA buffer attachment, the plane + * with the lowest index owns the mem_priv. + * Put planes in the reversed order so that we don't leave invalid + * mem_priv behind. + */ + for (plane = vb->num_planes - 1; plane >= 0; --plane) __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); } @@ -1369,7 +1383,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; void *mem_priv; - unsigned int plane; + unsigned int plane, i; int ret = 0; bool reacquired = vb->planes[0].mem_priv == NULL; @@ -1383,11 +1397,13 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) for (plane = 0; plane < vb->num_planes; ++plane) { struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); + planes[plane].dbuf = dbuf; + if (IS_ERR_OR_NULL(dbuf)) { dprintk(q, 1, "invalid dmabuf fd for plane %d\n", plane); ret = -EINVAL; - goto err; + goto err_put_planes; } /* use DMABUF size if length is not provided */ @@ -1398,80 +1414,86 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", planes[plane].length, plane, vb->planes[plane].min_length); - dma_buf_put(dbuf); ret = -EINVAL; - goto err; + goto err_put_planes; } /* Skip the plane if already verified */ if (dbuf == vb->planes[plane].dbuf && - vb->planes[plane].length == planes[plane].length) { - dma_buf_put(dbuf); + vb->planes[plane].length == planes[plane].length) continue; - } dprintk(q, 3, "buffer for plane %d changed\n", plane); - if (!reacquired) { - reacquired = true; + reacquired = true; + } + + if (reacquired) { + if (vb->planes[0].mem_priv) { vb->copied_timestamp = 0; call_void_vb_qop(vb, buf_cleanup, vb); + __vb2_buf_dmabuf_put(vb); } - /* Release previously acquired memory if present */ - __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); - vb->planes[plane].bytesused = 0; - vb->planes[plane].length = 0; - vb->planes[plane].m.fd = 0; - vb->planes[plane].data_offset = 0; + for (plane = 0; plane < vb->num_planes; ++plane) { + /* + * This is an optimization to reduce dma_buf attachment/mapping. + * When the same dma_buf is used for multiple planes, there is no need + * to create duplicated attachments. + */ + for (i = 0; i < plane; ++i) { + if (planes[plane].dbuf == vb->planes[i].dbuf && + q->alloc_devs[plane] == q->alloc_devs[i]) { + vb->planes[plane].dbuf_duplicated = true; + vb->planes[plane].dbuf = vb->planes[i].dbuf; + vb->planes[plane].mem_priv = vb->planes[i].mem_priv; + break; + } + } - /* Acquire each plane's memory */ - mem_priv = call_ptr_memop(attach_dmabuf, - vb, - q->alloc_devs[plane] ? : q->dev, - dbuf, - planes[plane].length); - if (IS_ERR(mem_priv)) { - dprintk(q, 1, "failed to attach dmabuf\n"); - ret = PTR_ERR(mem_priv); - dma_buf_put(dbuf); - goto err; - } + if (vb->planes[plane].dbuf_duplicated) + continue; - vb->planes[plane].dbuf = dbuf; - vb->planes[plane].mem_priv = mem_priv; - } + /* Acquire each plane's memory */ + mem_priv = call_ptr_memop(attach_dmabuf, + vb, + q->alloc_devs[plane] ? : q->dev, + planes[plane].dbuf, + planes[plane].length); + if (IS_ERR(mem_priv)) { + dprintk(q, 1, "failed to attach dmabuf\n"); + ret = PTR_ERR(mem_priv); + goto err_put_vb2_buf; + } - /* - * This pins the buffer(s) with dma_buf_map_attachment()). It's done - * here instead just before the DMA, while queueing the buffer(s) so - * userspace knows sooner rather than later if the dma-buf map fails. - */ - for (plane = 0; plane < vb->num_planes; ++plane) { - if (vb->planes[plane].dbuf_mapped) - continue; + vb->planes[plane].dbuf = planes[plane].dbuf; + vb->planes[plane].mem_priv = mem_priv; - ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); - if (ret) { - dprintk(q, 1, "failed to map dmabuf for plane %d\n", - plane); - goto err; + /* + * This pins the buffer(s) with dma_buf_map_attachment()). It's done + * here instead just before the DMA, while queueing the buffer(s) so + * userspace knows sooner rather than later if the dma-buf map fails. + */ + ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); + if (ret) { + dprintk(q, 1, "failed to map dmabuf for plane %d\n", + plane); + goto err_put_vb2_buf; + } + vb->planes[plane].dbuf_mapped = 1; } - vb->planes[plane].dbuf_mapped = 1; - } - /* - * Now that everything is in order, copy relevant information - * provided by userspace. - */ - for (plane = 0; plane < vb->num_planes; ++plane) { - vb->planes[plane].bytesused = planes[plane].bytesused; - vb->planes[plane].length = planes[plane].length; - vb->planes[plane].m.fd = planes[plane].m.fd; - vb->planes[plane].data_offset = planes[plane].data_offset; - } + /* + * Now that everything is in order, copy relevant information + * provided by userspace. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].bytesused = planes[plane].bytesused; + vb->planes[plane].length = planes[plane].length; + vb->planes[plane].m.fd = planes[plane].m.fd; + vb->planes[plane].data_offset = planes[plane].data_offset; + } - if (reacquired) { /* * Call driver-specific initialization on the newly acquired buffer, * if provided. @@ -1479,19 +1501,28 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) ret = call_vb_qop(vb, buf_init, vb); if (ret) { dprintk(q, 1, "buffer initialization failed\n"); - goto err; + goto err_put_vb2_buf; } + } else { + for (plane = 0; plane < vb->num_planes; ++plane) + dma_buf_put(planes[plane].dbuf); } ret = call_vb_qop(vb, buf_prepare, vb); if (ret) { dprintk(q, 1, "buffer preparation failed\n"); call_void_vb_qop(vb, buf_cleanup, vb); - goto err; + goto err_put_vb2_buf; } return 0; -err: + +err_put_planes: + for (plane = 0; plane < vb->num_planes; ++plane) { + if (!IS_ERR_OR_NULL(planes[plane].dbuf)) + dma_buf_put(planes[plane].dbuf); + } +err_put_vb2_buf: /* In case of errors, release planes that were already acquired */ __vb2_buf_dmabuf_put(vb); @@ -2601,13 +2632,6 @@ int vb2_core_queue_init(struct vb2_queue *q) if (WARN_ON(q->supports_requests && q->min_queued_buffers)) return -EINVAL; - /* - * The minimum requirement is 2: one buffer is used - * by the hardware while the other is being processed by userspace. - */ - if (q->min_reqbufs_allocation < 2) - q->min_reqbufs_allocation = 2; - /* * If the driver needs 'min_queued_buffers' in the queue before * calling start_streaming() then the minimum requirement is diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 3d4fd4ef53107c..bb0b7fa67b539a 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size) return -ENODEV; } if (dma_get_max_seg_size(dev) < size) - return dma_set_max_seg_size(dev, size); - + dma_set_max_seg_size(dev, size); return 0; } EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c index f39887c04978f4..bf2773c5b97ae7 100644 --- a/drivers/media/dvb-frontends/a8293.c +++ b/drivers/media/dvb-frontends/a8293.c @@ -256,7 +256,7 @@ static void a8293_remove(struct i2c_client *client) } static const struct i2c_device_id a8293_id_table[] = { - {"a8293", 0}, + { "a8293" }, {} }; MODULE_DEVICE_TABLE(i2c, a8293_id_table); diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c index 5afdbe244596b3..befd6a4eafd9a7 100644 --- a/drivers/media/dvb-frontends/af9013.c +++ b/drivers/media/dvb-frontends/af9013.c @@ -1553,7 +1553,7 @@ static void af9013_remove(struct i2c_client *client) } static const struct i2c_device_id af9013_id_table[] = { - {"af9013", 0}, + { "af9013" }, {} }; MODULE_DEVICE_TABLE(i2c, af9013_id_table); diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index 49b7b04a789939..eed2ea4da8fac6 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -1173,7 +1173,7 @@ static void af9033_remove(struct i2c_client *client) } static const struct i2c_device_id af9033_id_table[] = { - {"af9033", 0}, + { "af9033" }, {} }; MODULE_DEVICE_TABLE(i2c, af9033_id_table); diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c index d02a92a81c60f4..58c4c489bf973f 100644 --- a/drivers/media/dvb-frontends/au8522_decoder.c +++ b/drivers/media/dvb-frontends/au8522_decoder.c @@ -767,7 +767,7 @@ static void au8522_remove(struct i2c_client *client) } static const struct i2c_device_id au8522_id[] = { - {"au8522", 0}, + { "au8522" }, {} }; diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c index 3f3b8574366682..5e6e18819a0d23 100644 --- a/drivers/media/dvb-frontends/cxd2099.c +++ b/drivers/media/dvb-frontends/cxd2099.c @@ -672,7 +672,7 @@ static void cxd2099_remove(struct i2c_client *client) } static const struct i2c_device_id cxd2099_id[] = { - {"cxd2099", 0}, + { "cxd2099" }, {} }; MODULE_DEVICE_TABLE(i2c, cxd2099_id); diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c index 7feb08dccfa1c8..c3d8ced6c3baa6 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -723,7 +723,7 @@ static void cxd2820r_remove(struct i2c_client *client) } static const struct i2c_device_id cxd2820r_id_table[] = { - {"cxd2820r", 0}, + { "cxd2820r" }, {} }; MODULE_DEVICE_TABLE(i2c, cxd2820r_id_table); diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index b25d11be8611b4..6ab9d4de65ce2f 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -2244,7 +2244,7 @@ static void lgdt3306a_remove(struct i2c_client *client) } static const struct i2c_device_id lgdt3306a_id_table[] = { - {"lgdt3306a", 0}, + { "lgdt3306a" }, {} }; MODULE_DEVICE_TABLE(i2c, lgdt3306a_id_table); diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c index 081d6ad3ce729b..cab442a350a5a8 100644 --- a/drivers/media/dvb-frontends/lgdt330x.c +++ b/drivers/media/dvb-frontends/lgdt330x.c @@ -983,7 +983,7 @@ static void lgdt330x_remove(struct i2c_client *client) } static const struct i2c_device_id lgdt330x_id_table[] = { - {"lgdt330x", 0}, + { "lgdt330x" }, {} }; MODULE_DEVICE_TABLE(i2c, lgdt330x_id_table); diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c index 73d1e52de569ed..729751671c3d1a 100644 --- a/drivers/media/dvb-frontends/mn88472.c +++ b/drivers/media/dvb-frontends/mn88472.c @@ -708,7 +708,7 @@ static void mn88472_remove(struct i2c_client *client) } static const struct i2c_device_id mn88472_id_table[] = { - {"mn88472", 0}, + { "mn88472" }, {} }; MODULE_DEVICE_TABLE(i2c, mn88472_id_table); diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c index eb50591c0e7a0f..fefc640d8afbf0 100644 --- a/drivers/media/dvb-frontends/mn88473.c +++ b/drivers/media/dvb-frontends/mn88473.c @@ -743,7 +743,7 @@ static void mn88473_remove(struct i2c_client *client) } static const struct i2c_device_id mn88473_id_table[] = { - {"mn88473", 0}, + { "mn88473" }, {} }; MODULE_DEVICE_TABLE(i2c, mn88473_id_table); diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c index 91e9c378397c81..930da176e5bfb4 100644 --- a/drivers/media/dvb-frontends/mxl5xx.c +++ b/drivers/media/dvb-frontends/mxl5xx.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "mxl5xx.h" diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c index 2a31bde2630f88..bbc2bc778225f4 100644 --- a/drivers/media/dvb-frontends/mxl692.c +++ b/drivers/media/dvb-frontends/mxl692.c @@ -1346,7 +1346,7 @@ static void mxl692_remove(struct i2c_client *client) } static const struct i2c_device_id mxl692_id_table[] = { - {"mxl692", 0}, + { "mxl692" }, {} }; MODULE_DEVICE_TABLE(i2c, mxl692_id_table); diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 30d10fe4b33e34..aa4ef9aedf172c 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on index, pid, onoff); /* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0; if (onoff) @@ -876,7 +876,7 @@ static void rtl2830_remove(struct i2c_client *client) } static const struct i2c_device_id rtl2830_id_table[] = { - {"rtl2830", 0}, + { "rtl2830" }, {} }; MODULE_DEVICE_TABLE(i2c, rtl2830_id_table); diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 5142820b1b3d97..3b4e46dac1bf11 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, index, pid, onoff, dev->slave_ts); /* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0; if (onoff) @@ -1125,7 +1125,7 @@ static void rtl2832_remove(struct i2c_client *client) } static const struct i2c_device_id rtl2832_id_table[] = { - {"rtl2832", 0}, + { "rtl2832" }, {} }; MODULE_DEVICE_TABLE(i2c, rtl2832_id_table); diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c index 013d423d326356..f87c9357cee375 100644 --- a/drivers/media/dvb-frontends/si2165.c +++ b/drivers/media/dvb-frontends/si2165.c @@ -1281,7 +1281,7 @@ static void si2165_remove(struct i2c_client *client) } static const struct i2c_device_id si2165_id_table[] = { - {"si2165", 0}, + { "si2165" }, {} }; MODULE_DEVICE_TABLE(i2c, si2165_id_table); diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 26828fd41e6843..d6b6b8bc7d4e05 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -788,7 +788,7 @@ static void si2168_remove(struct i2c_client *client) } static const struct i2c_device_id si2168_id_table[] = { - {"si2168", 0}, + { "si2168" }, {} }; MODULE_DEVICE_TABLE(i2c, si2168_id_table); diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c index 4d7d0b8b51b452..75adf2a4589fd0 100644 --- a/drivers/media/dvb-frontends/sp2.c +++ b/drivers/media/dvb-frontends/sp2.c @@ -407,7 +407,7 @@ static void sp2_remove(struct i2c_client *client) } static const struct i2c_device_id sp2_id[] = { - {"sp2", 0}, + { "sp2" }, {} }; MODULE_DEVICE_TABLE(i2c, sp2_id); diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 3b02d504941ffd..f273efa330cfe2 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -5079,7 +5079,7 @@ struct dvb_frontend *stv090x_attach(struct stv090x_config *config, EXPORT_SYMBOL_GPL(stv090x_attach); static const struct i2c_device_id stv090x_id_table[] = { - {"stv090x", 0}, + { "stv090x" }, {} }; MODULE_DEVICE_TABLE(i2c, stv090x_id_table); diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index c678f47d2449c7..33c8105da1c3bb 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -470,7 +470,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe, EXPORT_SYMBOL_GPL(stv6110x_attach); static const struct i2c_device_id stv6110x_id_table[] = { - {"stv6110x", 0}, + { "stv6110x" }, {} }; MODULE_DEVICE_TABLE(i2c, stv6110x_id_table); diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 6640851d8bbcae..e23794b821cdb8 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -1230,7 +1230,7 @@ static void tda10071_remove(struct i2c_client *client) } static const struct i2c_device_id tda10071_id_table[] = { - {"tda10071_cx24118", 0}, + { "tda10071_cx24118" }, {} }; MODULE_DEVICE_TABLE(i2c, tda10071_id_table); diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index a5ebce57f35e6e..a5baca2449c76d 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -710,8 +710,8 @@ static void ts2020_remove(struct i2c_client *client) } static const struct i2c_device_id ts2020_id_table[] = { - {"ts2020", 0}, - {"ts2022", 0}, + { "ts2020" }, + { "ts2022" }, {} }; MODULE_DEVICE_TABLE(i2c, ts2020_id_table); diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c index 1543d24f522c32..f60271082fb5b0 100644 --- a/drivers/media/i2c/ad5820.c +++ b/drivers/media/i2c/ad5820.c @@ -347,8 +347,8 @@ static void ad5820_remove(struct i2c_client *client) } static const struct i2c_device_id ad5820_id_table[] = { - { "ad5820", 0 }, - { "ad5821", 0 }, + { "ad5820" }, + { "ad5821" }, { } }; MODULE_DEVICE_TABLE(i2c, ad5820_id_table); diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c index 5ace7b5804d44c..391bc75bfcd0f6 100644 --- a/drivers/media/i2c/adp1653.c +++ b/drivers/media/i2c/adp1653.c @@ -522,7 +522,7 @@ static void adp1653_remove(struct i2c_client *client) } static const struct i2c_device_id adp1653_id_table[] = { - { ADP1653_NAME, 0 }, + { ADP1653_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, adp1653_id_table); diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c index 4a2b9fd9e2da16..ef8682b980b44e 100644 --- a/drivers/media/i2c/adv7170.c +++ b/drivers/media/i2c/adv7170.c @@ -377,8 +377,8 @@ static void adv7170_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7170_id[] = { - { "adv7170", 0 }, - { "adv7171", 0 }, + { "adv7170" }, + { "adv7171" }, { } }; MODULE_DEVICE_TABLE(i2c, adv7170_id); diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c index e454cba4b02637..384da1ec5bf9d7 100644 --- a/drivers/media/i2c/adv7175.c +++ b/drivers/media/i2c/adv7175.c @@ -432,8 +432,8 @@ static void adv7175_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7175_id[] = { - { "adv7175", 0 }, - { "adv7176", 0 }, + { "adv7175" }, + { "adv7176" }, { } }; MODULE_DEVICE_TABLE(i2c, adv7175_id); diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c index 2a2cace4a1538c..25a31a6dd456de 100644 --- a/drivers/media/i2c/adv7183.c +++ b/drivers/media/i2c/adv7183.c @@ -619,8 +619,8 @@ static void adv7183_remove(struct i2c_client *client) } static const struct i2c_device_id adv7183_id[] = { - {"adv7183", 0}, - {}, + { "adv7183" }, + {} }; MODULE_DEVICE_TABLE(i2c, adv7183_id); diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c index 4fbe4e18570e95..b96443404a2691 100644 --- a/drivers/media/i2c/adv7343.c +++ b/drivers/media/i2c/adv7343.c @@ -502,8 +502,8 @@ static void adv7343_remove(struct i2c_client *client) } static const struct i2c_device_id adv7343_id[] = { - {"adv7343", 0}, - {}, + { "adv7343" }, + {} }; MODULE_DEVICE_TABLE(i2c, adv7343_id); diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c index 7638af455cefa1..c7994bd0bbd466 100644 --- a/drivers/media/i2c/adv7393.c +++ b/drivers/media/i2c/adv7393.c @@ -446,8 +446,8 @@ static void adv7393_remove(struct i2c_client *client) } static const struct i2c_device_id adv7393_id[] = { - {"adv7393", 0}, - {}, + { "adv7393" }, + {} }; MODULE_DEVICE_TABLE(i2c, adv7393_id); diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index 261871be833f12..e9406d552699f8 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -1949,7 +1949,7 @@ static void adv7511_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7511_id[] = { - { "adv7511-v4l2", 0 }, + { "adv7511-v4l2" }, { } }; MODULE_DEVICE_TABLE(i2c, adv7511_id); diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index f2d4217310e7ff..014fc913225c4a 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -3617,7 +3617,7 @@ static void adv7842_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7842_id[] = { - { "adv7842", 0 }, + { "adv7842" }, { } }; MODULE_DEVICE_TABLE(i2c, adv7842_id); diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c index ce840adc2aa7fe..ee575d01a67613 100644 --- a/drivers/media/i2c/ak881x.c +++ b/drivers/media/i2c/ak881x.c @@ -304,8 +304,8 @@ static void ak881x_remove(struct i2c_client *client) } static const struct i2c_device_id ak881x_id[] = { - { "ak8813", 0 }, - { "ak8814", 0 }, + { "ak8813" }, + { "ak8814" }, { } }; MODULE_DEVICE_TABLE(i2c, ak881x_id); diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c index 09331cf95c62d0..fc27238dd4d391 100644 --- a/drivers/media/i2c/ar0521.c +++ b/drivers/media/i2c/ar0521.c @@ -835,21 +835,30 @@ static const struct initial_reg { be(0x0707)), /* 3F44: couple k factor 2 */ }; -static int ar0521_power_off(struct device *dev) +static void __ar0521_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ar0521_dev *sensor = to_ar0521_dev(sd); int i; - clk_disable_unprepare(sensor->extclk); - if (sensor->reset_gpio) - gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */ + /* assert RESET signal */ + gpiod_set_value_cansleep(sensor->reset_gpio, 1); for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) { if (sensor->supplies[i]) regulator_disable(sensor->supplies[i]); } +} + +static int ar0521_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ar0521_dev *sensor = to_ar0521_dev(sd); + + clk_disable_unprepare(sensor->extclk); + __ar0521_power_off(dev); + return 0; } @@ -878,7 +887,7 @@ static int ar0521_power_on(struct device *dev) if (sensor->reset_gpio) /* deassert RESET signal */ - gpiod_set_value(sensor->reset_gpio, 0); + gpiod_set_value_cansleep(sensor->reset_gpio, 0); usleep_range(4500, 5000); /* min 45000 clocks */ for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) { @@ -908,7 +917,8 @@ static int ar0521_power_on(struct device *dev) return 0; off: - ar0521_power_off(dev); + clk_disable_unprepare(sensor->extclk); + __ar0521_power_off(dev); return ret; } diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c index b4a25cc996dc16..f97245f91f8814 100644 --- a/drivers/media/i2c/bt819.c +++ b/drivers/media/i2c/bt819.c @@ -457,9 +457,9 @@ static void bt819_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id bt819_id[] = { - { "bt819a", 0 }, - { "bt817a", 0 }, - { "bt815a", 0 }, + { "bt819a" }, + { "bt817a" }, + { "bt815a" }, { } }; MODULE_DEVICE_TABLE(i2c, bt819_id); diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c index 814acbd6a5a80a..6852aa47cafba0 100644 --- a/drivers/media/i2c/bt856.c +++ b/drivers/media/i2c/bt856.c @@ -230,7 +230,7 @@ static void bt856_remove(struct i2c_client *client) } static const struct i2c_device_id bt856_id[] = { - { "bt856", 0 }, + { "bt856" }, { } }; MODULE_DEVICE_TABLE(i2c, bt856_id); diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c index dada059cbce42a..a2cc34d35ed24d 100644 --- a/drivers/media/i2c/bt866.c +++ b/drivers/media/i2c/bt866.c @@ -197,7 +197,7 @@ static void bt866_remove(struct i2c_client *client) } static const struct i2c_device_id bt866_id[] = { - { "bt866", 0 }, + { "bt866" }, { } }; MODULE_DEVICE_TABLE(i2c, bt866_id); diff --git a/drivers/media/i2c/ccs/ccs-reg-access.c b/drivers/media/i2c/ccs/ccs-reg-access.c index ed79075505e6f9..a696a0ec8ff5c7 100644 --- a/drivers/media/i2c/ccs/ccs-reg-access.c +++ b/drivers/media/i2c/ccs/ccs-reg-access.c @@ -9,7 +9,7 @@ * Contact: Sakari Ailus */ -#include +#include #include #include diff --git a/drivers/media/i2c/ccs/ccs-reg-access.h b/drivers/media/i2c/ccs/ccs-reg-access.h index 78c43f92d99acf..4b56b21a26b538 100644 --- a/drivers/media/i2c/ccs/ccs-reg-access.h +++ b/drivers/media/i2c/ccs/ccs-reg-access.h @@ -21,16 +21,13 @@ struct ccs_sensor; -int ccs_read_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 *val); int ccs_read_addr(struct ccs_sensor *sensor, u32 reg, u32 *val); int ccs_read_addr_8only(struct ccs_sensor *sensor, u32 reg, u32 *val); int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val); -int ccs_write_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 val); int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val); int ccs_write_data_regs(struct ccs_sensor *sensor, struct ccs_reg *regs, size_t num_regs); -unsigned int ccs_reg_width(u32 reg); u32 ccs_reg_conv(struct ccs_sensor *sensor, u32 reg, u32 val); #define ccs_read(sensor, reg_name, val) \ diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c index 61afa3d799d254..078e0066ce4bd8 100644 --- a/drivers/media/i2c/cs3308.c +++ b/drivers/media/i2c/cs3308.c @@ -109,7 +109,7 @@ static void cs3308_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id cs3308_id[] = { - { "cs3308", 0 }, + { "cs3308" }, { } }; MODULE_DEVICE_TABLE(i2c, cs3308_id); diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c index 3019a132e07909..3a9797a50e828b 100644 --- a/drivers/media/i2c/cs5345.c +++ b/drivers/media/i2c/cs5345.c @@ -189,7 +189,7 @@ static void cs5345_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id cs5345_id[] = { - { "cs5345", 0 }, + { "cs5345" }, { } }; MODULE_DEVICE_TABLE(i2c, cs5345_id); diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c index 82881b79e7300a..c4cad3293905c3 100644 --- a/drivers/media/i2c/cs53l32a.c +++ b/drivers/media/i2c/cs53l32a.c @@ -200,7 +200,7 @@ static void cs53l32a_remove(struct i2c_client *client) } static const struct i2c_device_id cs53l32a_id[] = { - { "cs53l32a", 0 }, + { "cs53l32a" }, { } }; MODULE_DEVICE_TABLE(i2c, cs53l32a_id); diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index 04461c893d9068..a90a9e5705a030 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -3964,7 +3964,7 @@ static void cx25840_remove(struct i2c_client *client) } static const struct i2c_device_id cx25840_id[] = { - { "cx25840", 0 }, + { "cx25840" }, { } }; MODULE_DEVICE_TABLE(i2c, cx25840_id); diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c index ca9bb29dab890c..8eed4a200fd89b 100644 --- a/drivers/media/i2c/ds90ub913.c +++ b/drivers/media/i2c/ds90ub913.c @@ -877,7 +877,10 @@ static void ub913_remove(struct i2c_client *client) ub913_gpiochip_remove(priv); } -static const struct i2c_device_id ub913_id[] = { { "ds90ub913a-q1", 0 }, {} }; +static const struct i2c_device_id ub913_id[] = { + { "ds90ub913a-q1" }, + {} +}; MODULE_DEVICE_TABLE(i2c, ub913_id); static const struct of_device_id ub913_dt_ids[] = { diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c index 0e88ce0ef8d780..2ddd7daa79e28a 100644 --- a/drivers/media/i2c/dw9714.c +++ b/drivers/media/i2c/dw9714.c @@ -279,8 +279,8 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev) } static const struct i2c_device_id dw9714_id_table[] = { - { DW9714_NAME, 0 }, - { { 0 } } + { DW9714_NAME }, + { } }; MODULE_DEVICE_TABLE(i2c, dw9714_id_table); diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index e932d25ca7b3ae..7519863d77b1c4 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -1501,7 +1501,7 @@ static const struct of_device_id et8ek8_of_table[] = { MODULE_DEVICE_TABLE(of, et8ek8_of_table); static const struct i2c_device_id et8ek8_id_table[] = { - { ET8EK8_NAME, 0 }, + { ET8EK8_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, et8ek8_id_table); diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c index dcba29ee725c90..0413c557e594bb 100644 --- a/drivers/media/i2c/gc05a2.c +++ b/drivers/media/i2c/gc05a2.c @@ -65,7 +65,7 @@ static const char *const gc05a2_test_pattern_menu[] = { "No Pattern", "Fade_to_gray_Color Bar", "Color Bar", - "PN9", "Horizental_gradient", "Checkboard Pattern", + "PN9", "Horizontal_gradient", "Checkboard Pattern", "Slant", "Resolution", "Solid Black", "Solid White", }; diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c index 7680d807e7a5f5..84de5cff958d6c 100644 --- a/drivers/media/i2c/gc08a3.c +++ b/drivers/media/i2c/gc08a3.c @@ -948,7 +948,7 @@ static int gc08a3_start_streaming(struct gc08a3 *gc08a3) ret = cci_write(gc08a3->regmap, GC08A3_STREAMING_REG, 1, NULL); if (ret < 0) { - dev_err(gc08a3->dev, "write STRAEMING_REG failed: %d\n", ret); + dev_err(gc08a3->dev, "write STREAMING_REG failed: %d\n", ret); goto err_rpm_put; } diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c index b440f386f0622e..f31f9886c924e4 100644 --- a/drivers/media/i2c/hi556.c +++ b/drivers/media/i2c/hi556.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c index 52d9ca68a86c8c..172772decd3db1 100644 --- a/drivers/media/i2c/hi846.c +++ b/drivers/media/i2c/hi846.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2021 Purism SPC -#include +#include #include #include #include diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c index 72c60747a83991..546833f5b5f598 100644 --- a/drivers/media/i2c/hi847.c +++ b/drivers/media/i2c/hi847.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c index 639e05340dbb5c..2184c90f7864d4 100644 --- a/drivers/media/i2c/imx208.c +++ b/drivers/media/i2c/imx208.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #define IMX208_REG_MODE_SELECT 0x0100 #define IMX208_MODE_STANDBY 0x00 diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c index 1a99eaaff21a00..9e30fce1f22367 100644 --- a/drivers/media/i2c/imx258.c +++ b/drivers/media/i2c/imx258.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #define IMX258_REG_MODE_SELECT CCI_REG8(0x0100) #define IMX258_MODE_STANDBY 0x00 diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index 3800de974e8a9f..a2b824986027ee 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -1949,7 +1949,7 @@ static const struct of_device_id imx274_of_id_table[] = { MODULE_DEVICE_TABLE(of, imx274_of_id_table); static const struct i2c_device_id imx274_id[] = { - { "IMX274", 0 }, + { "IMX274" }, { } }; MODULE_DEVICE_TABLE(i2c, imx274_id); diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c index 8490618c507150..94276f4f2d8369 100644 --- a/drivers/media/i2c/imx283.c +++ b/drivers/media/i2c/imx283.c @@ -465,6 +465,39 @@ static const struct imx283_mode supported_modes_12bit[] = { .horizontal_ob = 48, .vertical_ob = 4, + .crop = { + .top = 40, + .left = 108, + .width = 5472, + .height = 3648, + }, + }, + { + /* + * Readout mode 3 : 3/3 binned mode (1824x1216) + */ + .mode = IMX283_MODE_3, + .bpp = 12, + .width = 1824, + .height = 1216, + .min_hmax = 1894, /* Pixels (284 * 480MHz/72MHz + padding) */ + .min_vmax = 4200, /* Lines */ + + /* 60.00 fps */ + .default_hmax = 1900, /* 285 @ 480MHz/72Mhz */ + .default_vmax = 4200, + + .veff = 1234, + .vst = 0, + .vct = 0, + + .hbin_ratio = 3, + .vbin_ratio = 3, + + .min_shr = 16, + .horizontal_ob = 32, + .vertical_ob = 4, + .crop = { .top = 40, .left = 108, diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c index 4150e6e4b9a635..458905dfb3e110 100644 --- a/drivers/media/i2c/imx290.c +++ b/drivers/media/i2c/imx290.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c index 8fe3933f31467c..dd1b4ff983dcb1 100644 --- a/drivers/media/i2c/imx319.c +++ b/drivers/media/i2c/imx319.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Intel Corporation -#include +#include #include #include #include diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c index 40863d87d3413a..a544fc3df39c28 100644 --- a/drivers/media/i2c/imx334.c +++ b/drivers/media/i2c/imx334.c @@ -4,7 +4,7 @@ * * Copyright (C) 2021 Intel Corporation */ -#include +#include #include #include diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c index 990d74214cc2e4..fcfd1d851bd4aa 100644 --- a/drivers/media/i2c/imx335.c +++ b/drivers/media/i2c/imx335.c @@ -4,7 +4,7 @@ * * Copyright (C) 2021 Intel Corporation */ -#include +#include #include #include @@ -997,7 +997,7 @@ static int imx335_parse_hw_config(struct imx335 *imx335) /* Request optional reset pin */ imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(imx335->reset_gpio)) { dev_err(imx335->dev, "failed to get reset gpio %ld\n", PTR_ERR(imx335->reset_gpio)); @@ -1110,8 +1110,7 @@ static int imx335_power_on(struct device *dev) usleep_range(500, 550); /* Tlow */ - /* Set XCLR */ - gpiod_set_value_cansleep(imx335->reset_gpio, 1); + gpiod_set_value_cansleep(imx335->reset_gpio, 0); ret = clk_prepare_enable(imx335->inclk); if (ret) { @@ -1124,7 +1123,7 @@ static int imx335_power_on(struct device *dev) return 0; error_reset: - gpiod_set_value_cansleep(imx335->reset_gpio, 0); + gpiod_set_value_cansleep(imx335->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies); return ret; @@ -1141,7 +1140,7 @@ static int imx335_power_off(struct device *dev) struct v4l2_subdev *sd = dev_get_drvdata(dev); struct imx335 *imx335 = to_imx335(sd); - gpiod_set_value_cansleep(imx335->reset_gpio, 0); + gpiod_set_value_cansleep(imx335->reset_gpio, 1); clk_disable_unprepare(imx335->inclk); regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies); diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c index 7e9c2f65fa0812..b2dce67c0b6bca 100644 --- a/drivers/media/i2c/imx355.c +++ b/drivers/media/i2c/imx355.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Intel Corporation -#include +#include #include #include #include @@ -1520,6 +1520,7 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = { static int imx355_init_controls(struct imx355 *imx355) { struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + struct v4l2_fwnode_device_properties props; struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max; s64 vblank_def; @@ -1531,7 +1532,7 @@ static int imx355_init_controls(struct imx355 *imx355) int ret; ctrl_hdlr = &imx355->ctrl_handler; - ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10); + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12); if (ret) return ret; @@ -1603,6 +1604,15 @@ static int imx355_init_controls(struct imx355 *imx355) goto error; } + ret = v4l2_fwnode_device_parse(&client->dev, &props); + if (ret) + goto error; + + ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx355_ctrl_ops, + &props); + if (ret) + goto error; + imx355->sd.ctrl_handler = ctrl_hdlr; return 0; diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c index 7d1f7af0a9dff3..0bfe3046fcc872 100644 --- a/drivers/media/i2c/imx412.c +++ b/drivers/media/i2c/imx412.c @@ -4,7 +4,7 @@ * * Copyright (C) 2021 Intel Corporation */ -#include +#include #include #include diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index b37a2aaf8ac047..c84e1e0e6109af 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -35,7 +35,7 @@ * Copyright (C) 2011 Andy Walls */ -#include +#include #include #include #include diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c index c7089035bbc10b..5ffd53e005ee56 100644 --- a/drivers/media/i2c/isl7998x.c +++ b/drivers/media/i2c/isl7998x.c @@ -1561,8 +1561,8 @@ static const struct of_device_id isl7998x_of_match[] = { MODULE_DEVICE_TABLE(of, isl7998x_of_match); static const struct i2c_device_id isl7998x_id[] = { - { "isl79987", 0 }, - { /* sentinel */ }, + { "isl79987" }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, isl7998x_id); diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c index 9d0a763cd5035e..f3fba9179684e3 100644 --- a/drivers/media/i2c/ks0127.c +++ b/drivers/media/i2c/ks0127.c @@ -677,9 +677,9 @@ static void ks0127_remove(struct i2c_client *client) } static const struct i2c_device_id ks0127_id[] = { - { "ks0127", 0 }, - { "ks0127b", 0 }, - { "ks0122s", 0 }, + { "ks0127" }, + { "ks0127b" }, + { "ks0122s" }, { } }; MODULE_DEVICE_TABLE(i2c, ks0127_id); diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c index 05283ac68f2d8e..f4cc844f4e3cc9 100644 --- a/drivers/media/i2c/lm3560.c +++ b/drivers/media/i2c/lm3560.c @@ -455,8 +455,8 @@ static void lm3560_remove(struct i2c_client *client) } static const struct i2c_device_id lm3560_id_table[] = { - {LM3559_NAME, 0}, - {LM3560_NAME, 0}, + { LM3559_NAME }, + { LM3560_NAME }, {} }; diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c index fab3a7e05f922a..2d16e42ec224e2 100644 --- a/drivers/media/i2c/lm3646.c +++ b/drivers/media/i2c/lm3646.c @@ -386,7 +386,7 @@ static void lm3646_remove(struct i2c_client *client) } static const struct i2c_device_id lm3646_id_table[] = { - {LM3646_NAME, 0}, + { LM3646_NAME }, {} }; diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c index f8a69142aae968..9e1ecfd01e2a7f 100644 --- a/drivers/media/i2c/m52790.c +++ b/drivers/media/i2c/m52790.c @@ -163,7 +163,7 @@ static void m52790_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id m52790_id[] = { - { "m52790", 0 }, + { "m52790" }, { } }; MODULE_DEVICE_TABLE(i2c, m52790_id); diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c index cd73d2096ae451..bf02ca23a284c0 100644 --- a/drivers/media/i2c/max2175.c +++ b/drivers/media/i2c/max2175.c @@ -1413,8 +1413,8 @@ static void max2175_remove(struct i2c_client *client) } static const struct i2c_device_id max2175_id[] = { - { DRIVER_NAME, 0}, - {}, + { DRIVER_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, max2175_id); diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c index c97de66631e0c9..159753b13777cb 100644 --- a/drivers/media/i2c/max96714.c +++ b/drivers/media/i2c/max96714.c @@ -25,6 +25,7 @@ #define MAX96714_NPORTS 2 #define MAX96714_PAD_SINK 0 #define MAX96714_PAD_SOURCE 1 +#define MAX96714_CSI_NLANES 4 /* DEV */ #define MAX96714_REG13 CCI_REG8(0x0d) @@ -52,9 +53,9 @@ #define MAX96714_PATGEN_V2D CCI_REG24(0x254) #define MAX96714_PATGEN_DE_HIGH CCI_REG16(0x257) #define MAX96714_PATGEN_DE_LOW CCI_REG16(0x259) -#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25B) +#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25b) #define MAX96714_PATGEN_GRAD_INC CCI_REG8(0x25d) -#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25E) +#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25e) #define MAX96714_PATGEN_CHKB_COLOR_B CCI_REG24(0x261) #define MAX96714_PATGEN_CHKB_RPT_CNT_A CCI_REG8(0x264) #define MAX96714_PATGEN_CHKB_RPT_CNT_B CCI_REG8(0x265) @@ -724,8 +725,9 @@ static int max96714_init_tx_port(struct max96714_priv *priv) * Unused lanes need to be mapped as well to not have * the same lanes mapped twice. */ - for (; lane < 4; lane++) { - unsigned int idx = find_first_zero_bit(&lanes_used, 4); + for (; lane < MAX96714_CSI_NLANES; lane++) { + unsigned int idx = find_first_zero_bit(&lanes_used, + MAX96714_CSI_NLANES); val |= idx << (lane * 2); lanes_used |= BIT(idx); @@ -757,9 +759,7 @@ static int max96714_rxport_disable_poc(struct max96714_priv *priv) static int max96714_parse_dt_txport(struct max96714_priv *priv) { struct device *dev = &priv->client->dev; - struct v4l2_fwnode_endpoint vep = { - .bus_type = V4L2_MBUS_CSI2_DPHY - }; + struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep_fwnode; u32 num_data_lanes; int ret; @@ -791,14 +791,14 @@ static int max96714_parse_dt_txport(struct max96714_priv *priv) } num_data_lanes = vep.bus.mipi_csi2.num_data_lanes; - if (num_data_lanes < 1 || num_data_lanes > 4) { + if (num_data_lanes < 1 || num_data_lanes > MAX96714_CSI_NLANES) { dev_err(dev, "tx: invalid number of data lanes must be 1 to 4\n"); ret = -EINVAL; goto err_free_vep; } - memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2)); + priv->mipi_csi2 = vep.bus.mipi_csi2; err_free_vep: v4l2_fwnode_endpoint_free(&vep); diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c index 949306485873c9..4e85b8eb1e7767 100644 --- a/drivers/media/i2c/max96717.c +++ b/drivers/media/i2c/max96717.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -24,6 +25,7 @@ #define MAX96717_PORTS 2 #define MAX96717_PAD_SINK 0 #define MAX96717_PAD_SOURCE 1 +#define MAX96717_CSI_NLANES 4 #define MAX96717_DEFAULT_CLKOUT_RATE 24000000UL @@ -38,9 +40,35 @@ #define MAX96717_DEV_REV_MASK GENMASK(3, 0) /* VID_TX Z */ +#define MAX96717_VIDEO_TX0 CCI_REG8(0x110) +#define MAX96717_VIDEO_AUTO_BPP BIT(3) #define MAX96717_VIDEO_TX2 CCI_REG8(0x112) #define MAX96717_VIDEO_PCLKDET BIT(7) +/* VTX_Z */ +#define MAX96717_VTX0 CCI_REG8(0x24e) +#define MAX96717_VTX1 CCI_REG8(0x24f) +#define MAX96717_PATTERN_CLK_FREQ GENMASK(3, 1) +#define MAX96717_VTX_VS_DLY CCI_REG24(0x250) +#define MAX96717_VTX_VS_HIGH CCI_REG24(0x253) +#define MAX96717_VTX_VS_LOW CCI_REG24(0x256) +#define MAX96717_VTX_V2H CCI_REG24(0x259) +#define MAX96717_VTX_HS_HIGH CCI_REG16(0x25c) +#define MAX96717_VTX_HS_LOW CCI_REG16(0x25e) +#define MAX96717_VTX_HS_CNT CCI_REG16(0x260) +#define MAX96717_VTX_V2D CCI_REG24(0x262) +#define MAX96717_VTX_DE_HIGH CCI_REG16(0x265) +#define MAX96717_VTX_DE_LOW CCI_REG16(0x267) +#define MAX96717_VTX_DE_CNT CCI_REG16(0x269) +#define MAX96717_VTX29 CCI_REG8(0x26b) +#define MAX96717_VTX_MODE GENMASK(1, 0) +#define MAX96717_VTX_GRAD_INC CCI_REG8(0x26c) +#define MAX96717_VTX_CHKB_COLOR_A CCI_REG24(0x26d) +#define MAX96717_VTX_CHKB_COLOR_B CCI_REG24(0x270) +#define MAX96717_VTX_CHKB_RPT_CNT_A CCI_REG8(0x273) +#define MAX96717_VTX_CHKB_RPT_CNT_B CCI_REG8(0x274) +#define MAX96717_VTX_CHKB_ALT CCI_REG8(0x275) + /* GPIO */ #define MAX96717_NUM_GPIO 11 #define MAX96717_GPIO_REG_A(gpio) CCI_REG8(0x2be + (gpio) * 3) @@ -82,6 +110,12 @@ /* MISC */ #define PIO_SLEW_1 CCI_REG8(0x570) +enum max96717_vpg_mode { + MAX96717_VPG_DISABLED = 0, + MAX96717_VPG_CHECKERBOARD = 1, + MAX96717_VPG_GRADIENT = 2, +}; + struct max96717_priv { struct i2c_client *client; struct regmap *regmap; @@ -89,6 +123,7 @@ struct max96717_priv { struct v4l2_mbus_config_mipi_csi2 mipi_csi2; struct v4l2_subdev sd; struct media_pad pads[MAX96717_PORTS]; + struct v4l2_ctrl_handler ctrl_handler; struct v4l2_async_notifier notifier; struct v4l2_subdev *source_sd; u16 source_sd_pad; @@ -96,6 +131,7 @@ struct max96717_priv { u8 pll_predef_index; struct clk_hw clk_hw; struct gpio_chip gpio_chip; + enum max96717_vpg_mode pattern; }; static inline struct max96717_priv *sd_to_max96717(struct v4l2_subdev *sd) @@ -131,6 +167,118 @@ static inline int max96717_start_csi(struct max96717_priv *priv, bool start) start ? MAX96717_START_PORT_B : 0, NULL); } +static int max96717_apply_patgen_timing(struct max96717_priv *priv, + struct v4l2_subdev_state *state) +{ + struct v4l2_mbus_framefmt *fmt = + v4l2_subdev_state_get_format(state, MAX96717_PAD_SOURCE); + const u32 h_active = fmt->width; + const u32 h_fp = 88; + const u32 h_sw = 44; + const u32 h_bp = 148; + u32 h_tot; + const u32 v_active = fmt->height; + const u32 v_fp = 4; + const u32 v_sw = 5; + const u32 v_bp = 36; + u32 v_tot; + int ret = 0; + + h_tot = h_active + h_fp + h_sw + h_bp; + v_tot = v_active + v_fp + v_sw + v_bp; + + /* 75 Mhz pixel clock */ + cci_update_bits(priv->regmap, MAX96717_VTX1, + MAX96717_PATTERN_CLK_FREQ, 0xa, &ret); + + dev_info(&priv->client->dev, "height: %d width: %d\n", fmt->height, + fmt->width); + + cci_write(priv->regmap, MAX96717_VTX_VS_DLY, 0, &ret); + cci_write(priv->regmap, MAX96717_VTX_VS_HIGH, v_sw * h_tot, &ret); + cci_write(priv->regmap, MAX96717_VTX_VS_LOW, + (v_active + v_fp + v_bp) * h_tot, &ret); + cci_write(priv->regmap, MAX96717_VTX_HS_HIGH, h_sw, &ret); + cci_write(priv->regmap, MAX96717_VTX_HS_LOW, h_active + h_fp + h_bp, + &ret); + cci_write(priv->regmap, MAX96717_VTX_V2D, + h_tot * (v_sw + v_bp) + (h_sw + h_bp), &ret); + cci_write(priv->regmap, MAX96717_VTX_HS_CNT, v_tot, &ret); + cci_write(priv->regmap, MAX96717_VTX_DE_HIGH, h_active, &ret); + cci_write(priv->regmap, MAX96717_VTX_DE_LOW, h_fp + h_sw + h_bp, + &ret); + cci_write(priv->regmap, MAX96717_VTX_DE_CNT, v_active, &ret); + /* B G R */ + cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_A, 0xfecc00, &ret); + /* B G R */ + cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_B, 0x006aa7, &ret); + cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_A, 0x3c, &ret); + cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_B, 0x3c, &ret); + cci_write(priv->regmap, MAX96717_VTX_CHKB_ALT, 0x3c, &ret); + cci_write(priv->regmap, MAX96717_VTX_GRAD_INC, 0x10, &ret); + + return ret; +} + +static int max96717_apply_patgen(struct max96717_priv *priv, + struct v4l2_subdev_state *state) +{ + unsigned int val; + int ret = 0; + + if (priv->pattern) + ret = max96717_apply_patgen_timing(priv, state); + + cci_write(priv->regmap, MAX96717_VTX0, priv->pattern ? 0xfb : 0, + &ret); + + val = FIELD_PREP(MAX96717_VTX_MODE, priv->pattern); + cci_update_bits(priv->regmap, MAX96717_VTX29, MAX96717_VTX_MODE, + val, &ret); + return ret; +} + +static int max96717_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct max96717_priv *priv = + container_of(ctrl->handler, struct max96717_priv, ctrl_handler); + int ret; + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN: + if (priv->enabled_source_streams) + return -EBUSY; + priv->pattern = ctrl->val; + break; + default: + return -EINVAL; + } + + /* Use bpp from bpp register */ + ret = cci_update_bits(priv->regmap, MAX96717_VIDEO_TX0, + MAX96717_VIDEO_AUTO_BPP, + priv->pattern ? 0 : MAX96717_VIDEO_AUTO_BPP, + NULL); + + /* + * Pattern generator doesn't work with tunnel mode. + * Needs RGB color format and deserializer tunnel mode must be disabled. + */ + return cci_update_bits(priv->regmap, MAX96717_MIPI_RX_EXT11, + MAX96717_TUN_MODE, + priv->pattern ? 0 : MAX96717_TUN_MODE, &ret); +} + +static const char * const max96717_test_pattern[] = { + "Disabled", + "Checkerboard", + "Gradient" +}; + +static const struct v4l2_ctrl_ops max96717_ctrl_ops = { + .s_ctrl = max96717_s_ctrl, +}; + static int max96717_gpiochip_get(struct gpio_chip *gpiochip, unsigned int offset) { @@ -348,24 +496,28 @@ static int max96717_enable_streams(struct v4l2_subdev *sd, u64 streams_mask) { struct max96717_priv *priv = sd_to_max96717(sd); - struct device *dev = &priv->client->dev; u64 sink_streams; int ret; - sink_streams = v4l2_subdev_state_xlate_streams(state, - MAX96717_PAD_SOURCE, - MAX96717_PAD_SINK, - &streams_mask); - if (!priv->enabled_source_streams) max96717_start_csi(priv, true); - ret = v4l2_subdev_enable_streams(priv->source_sd, priv->source_sd_pad, - sink_streams); - if (ret) { - dev_err(dev, "Fail to start streams:%llu on remote subdev\n", - sink_streams); + ret = max96717_apply_patgen(priv, state); + if (ret) goto stop_csi; + + if (!priv->pattern) { + sink_streams = + v4l2_subdev_state_xlate_streams(state, + MAX96717_PAD_SOURCE, + MAX96717_PAD_SINK, + &streams_mask); + + ret = v4l2_subdev_enable_streams(priv->source_sd, + priv->source_sd_pad, + sink_streams); + if (ret) + goto stop_csi; } priv->enabled_source_streams |= streams_mask; @@ -375,6 +527,7 @@ static int max96717_enable_streams(struct v4l2_subdev *sd, stop_csi: if (!priv->enabled_source_streams) max96717_start_csi(priv, false); + return ret; } @@ -394,13 +547,23 @@ static int max96717_disable_streams(struct v4l2_subdev *sd, if (!priv->enabled_source_streams) max96717_start_csi(priv, false); - sink_streams = v4l2_subdev_state_xlate_streams(state, - MAX96717_PAD_SOURCE, - MAX96717_PAD_SINK, - &streams_mask); + if (!priv->pattern) { + int ret; + + sink_streams = + v4l2_subdev_state_xlate_streams(state, + MAX96717_PAD_SOURCE, + MAX96717_PAD_SINK, + &streams_mask); + + ret = v4l2_subdev_disable_streams(priv->source_sd, + priv->source_sd_pad, + sink_streams); + if (ret) + return ret; + } - return v4l2_subdev_disable_streams(priv->source_sd, priv->source_sd_pad, - sink_streams); + return 0; } static const struct v4l2_subdev_pad_ops max96717_pad_ops = { @@ -513,6 +676,19 @@ static int max96717_subdev_init(struct max96717_priv *priv) v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96717_subdev_ops); priv->sd.internal_ops = &max96717_internal_ops; + v4l2_ctrl_handler_init(&priv->ctrl_handler, 1); + priv->sd.ctrl_handler = &priv->ctrl_handler; + + v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, + &max96717_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(max96717_test_pattern) - 1, + 0, 0, max96717_test_pattern); + if (priv->ctrl_handler.error) { + ret = priv->ctrl_handler.error; + goto err_free_ctrl; + } + priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS; priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; priv->sd.entity.ops = &max96717_entity_ops; @@ -552,6 +728,8 @@ static int max96717_subdev_init(struct max96717_priv *priv) v4l2_subdev_cleanup(&priv->sd); err_entity_cleanup: media_entity_cleanup(&priv->sd.entity); +err_free_ctrl: + v4l2_ctrl_handler_free(&priv->ctrl_handler); return ret; } @@ -563,6 +741,7 @@ static void max96717_subdev_uninit(struct max96717_priv *priv) v4l2_async_nf_cleanup(&priv->notifier); v4l2_subdev_cleanup(&priv->sd); media_entity_cleanup(&priv->sd.entity); + v4l2_ctrl_handler_free(&priv->ctrl_handler); } struct max96717_pll_predef_freq { @@ -588,11 +767,8 @@ max96717_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) static unsigned int max96717_clk_find_best_index(struct max96717_priv *priv, unsigned long rate) { - unsigned int i, idx; - unsigned long diff_new, diff_old; - - diff_old = U32_MAX; - idx = 0; + unsigned int i, idx = 0; + unsigned long diff_new, diff_old = U32_MAX; for (i = 0; i < ARRAY_SIZE(max96717_predef_freqs); i++) { diff_new = abs(rate - max96717_predef_freqs[i].freq); @@ -679,8 +855,7 @@ static int max96717_register_clkout(struct max96717_priv *priv) struct clk_init_data init = { .ops = &max96717_clk_ops }; int ret; - init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out", - dev_name(dev)); + init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out", dev_name(dev)); if (!init.name) return -ENOMEM; @@ -763,8 +938,9 @@ static int max96717_init_csi_lanes(struct max96717_priv *priv) * Unused lanes need to be mapped as well to not have * the same lanes mapped twice. */ - for (; lane < 4; lane++) { - unsigned int idx = find_first_zero_bit(&lanes_used, 4); + for (; lane < MAX96717_CSI_NLANES; lane++) { + unsigned int idx = find_first_zero_bit(&lanes_used, + MAX96717_CSI_NLANES); val |= idx << (lane * 2); lanes_used |= BIT(idx); @@ -818,9 +994,7 @@ static int max96717_hw_init(struct max96717_priv *priv) static int max96717_parse_dt(struct max96717_priv *priv) { struct device *dev = &priv->client->dev; - struct v4l2_fwnode_endpoint vep = { - .bus_type = V4L2_MBUS_CSI2_DPHY - }; + struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep_fwnode; unsigned char num_data_lanes; int ret; @@ -838,11 +1012,11 @@ static int max96717_parse_dt(struct max96717_priv *priv) return dev_err_probe(dev, ret, "Failed to parse sink endpoint"); num_data_lanes = vep.bus.mipi_csi2.num_data_lanes; - if (num_data_lanes < 1 || num_data_lanes > 4) + if (num_data_lanes < 1 || num_data_lanes > MAX96717_CSI_NLANES) return dev_err_probe(dev, -EINVAL, "Invalid data lanes must be 1 to 4\n"); - memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2)); + priv->mipi_csi2 = vep.bus.mipi_csi2; return 0; } diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c index 5b72d443422467..57ba3693649abe 100644 --- a/drivers/media/i2c/ml86v7667.c +++ b/drivers/media/i2c/ml86v7667.c @@ -424,8 +424,8 @@ static void ml86v7667_remove(struct i2c_client *client) } static const struct i2c_device_id ml86v7667_id[] = { - {DRV_NAME, 0}, - {}, + { DRV_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, ml86v7667_id); diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c index 599a5bc7cbb352..4c0b0ad68c084e 100644 --- a/drivers/media/i2c/msp3400-driver.c +++ b/drivers/media/i2c/msp3400-driver.c @@ -874,7 +874,7 @@ static const struct dev_pm_ops msp3400_pm_ops = { }; static const struct i2c_device_id msp_id[] = { - { "msp3400", 0 }, + { "msp3400" }, { } }; MODULE_DEVICE_TABLE(i2c, msp_id); diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c index ad1a3ab774119a..12d3e86bdc0fdc 100644 --- a/drivers/media/i2c/mt9m001.c +++ b/drivers/media/i2c/mt9m001.c @@ -854,7 +854,7 @@ static void mt9m001_remove(struct i2c_client *client) } static const struct i2c_device_id mt9m001_id[] = { - { "mt9m001", 0 }, + { "mt9m001" }, { } }; MODULE_DEVICE_TABLE(i2c, mt9m001_id); diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c index ceeeb94c38d5d2..9aa5dcda380570 100644 --- a/drivers/media/i2c/mt9m111.c +++ b/drivers/media/i2c/mt9m111.c @@ -1383,7 +1383,7 @@ static const struct of_device_id mt9m111_of_match[] = { MODULE_DEVICE_TABLE(of, mt9m111_of_match); static const struct i2c_device_id mt9m111_id[] = { - { "mt9m111", 0 }, + { "mt9m111" }, { } }; MODULE_DEVICE_TABLE(i2c, mt9m111_id); diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index f4b48121235633..d8735c246e5283 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -112,11 +113,6 @@ #define MT9P031_TEST_PATTERN_RED 0xa2 #define MT9P031_TEST_PATTERN_BLUE 0xa3 -enum mt9p031_model { - MT9P031_MODEL_COLOR, - MT9P031_MODEL_MONOCHROME, -}; - struct mt9p031 { struct v4l2_subdev subdev; struct media_pad pad; @@ -129,7 +125,7 @@ struct mt9p031 { struct clk *clk; struct regulator_bulk_data regulators[3]; - enum mt9p031_model model; + u32 code; struct aptina_pll pll; unsigned int clk_div; bool use_pll; @@ -712,12 +708,7 @@ static int mt9p031_init_state(struct v4l2_subdev *subdev, crop->height = MT9P031_WINDOW_HEIGHT_DEF; format = __mt9p031_get_pad_format(mt9p031, sd_state, 0, which); - - if (mt9p031->model == MT9P031_MODEL_MONOCHROME) - format->code = MEDIA_BUS_FMT_Y12_1X12; - else - format->code = MEDIA_BUS_FMT_SGRBG12_1X12; - + format->code = mt9p031->code; format->width = MT9P031_WINDOW_WIDTH_DEF; format->height = MT9P031_WINDOW_HEIGHT_DEF; format->field = V4L2_FIELD_NONE; @@ -1102,7 +1093,6 @@ mt9p031_get_pdata(struct i2c_client *client) static int mt9p031_probe(struct i2c_client *client) { - const struct i2c_device_id *did = i2c_client_get_device_id(client); struct mt9p031_platform_data *pdata = mt9p031_get_pdata(client); struct i2c_adapter *adapter = client->adapter; struct mt9p031 *mt9p031; @@ -1127,7 +1117,7 @@ static int mt9p031_probe(struct i2c_client *client) mt9p031->pdata = pdata; mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF; mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC; - mt9p031->model = did->driver_data; + mt9p031->code = (uintptr_t)i2c_get_match_data(client); mt9p031->regulators[0].supply = "vdd"; mt9p031->regulators[1].supply = "vdd_io"; @@ -1224,26 +1214,24 @@ static void mt9p031_remove(struct i2c_client *client) } static const struct i2c_device_id mt9p031_id[] = { - { "mt9p006", MT9P031_MODEL_COLOR }, - { "mt9p031", MT9P031_MODEL_COLOR }, - { "mt9p031m", MT9P031_MODEL_MONOCHROME }, - { } + { "mt9p006", MEDIA_BUS_FMT_SGRBG12_1X12 }, + { "mt9p031", MEDIA_BUS_FMT_SGRBG12_1X12 }, + { "mt9p031m", MEDIA_BUS_FMT_Y12_1X12 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, mt9p031_id); -#if IS_ENABLED(CONFIG_OF) static const struct of_device_id mt9p031_of_match[] = { - { .compatible = "aptina,mt9p006", }, - { .compatible = "aptina,mt9p031", }, - { .compatible = "aptina,mt9p031m", }, - { /* sentinel */ }, + { .compatible = "aptina,mt9p006", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 }, + { .compatible = "aptina,mt9p031", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 }, + { .compatible = "aptina,mt9p031m", .data = (void *)MEDIA_BUS_FMT_Y12_1X12 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mt9p031_of_match); -#endif static struct i2c_driver mt9p031_i2c_driver = { .driver = { - .of_match_table = of_match_ptr(mt9p031_of_match), + .of_match_table = mt9p031_of_match, .name = "mt9p031", }, .probe = mt9p031_probe, diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c index fb1588c57cc806..878dff9b757773 100644 --- a/drivers/media/i2c/mt9t112.c +++ b/drivers/media/i2c/mt9t112.c @@ -1109,7 +1109,7 @@ static void mt9t112_remove(struct i2c_client *client) } static const struct i2c_device_id mt9t112_id[] = { - { "mt9t112", 0 }, + { "mt9t112" }, { } }; MODULE_DEVICE_TABLE(i2c, mt9t112_id); diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c index 8834ff8786e598..055b7915260a23 100644 --- a/drivers/media/i2c/mt9v011.c +++ b/drivers/media/i2c/mt9v011.c @@ -582,7 +582,7 @@ static void mt9v011_remove(struct i2c_client *c) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id mt9v011_id[] = { - { "mt9v011", 0 }, + { "mt9v011" }, { } }; MODULE_DEVICE_TABLE(i2c, mt9v011_id); diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index b0b98ed3c1502f..723fe138e7bcc0 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -1263,8 +1263,9 @@ static void mt9v111_remove(struct i2c_client *client) static const struct of_device_id mt9v111_of_match[] = { { .compatible = "aptina,mt9v111", }, - { /* sentinel */ }, + { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, mt9v111_of_match); static struct i2c_driver mt9v111_driver = { .driver = { diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c index bac9597faf68f7..78d5d406e4b72a 100644 --- a/drivers/media/i2c/og01a1b.c +++ b/drivers/media/i2c/og01a1b.c @@ -1,12 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. -#include +#include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -418,6 +421,12 @@ static const struct og01a1b_mode supported_modes[] = { }; struct og01a1b { + struct clk *xvclk; + struct gpio_desc *reset_gpio; + struct regulator *avdd; + struct regulator *dovdd; + struct regulator *dvdd; + struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; @@ -898,8 +907,10 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b) return 0; } -static int og01a1b_check_hwcfg(struct device *dev) +static int og01a1b_check_hwcfg(struct og01a1b *og01a1b) { + struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd); + struct device *dev = &client->dev; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); struct v4l2_fwnode_endpoint bus_cfg = { @@ -913,10 +924,13 @@ static int og01a1b_check_hwcfg(struct device *dev) return -ENXIO; ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk); - if (ret) { - dev_err(dev, "can't get clock frequency"); - return ret; + if (!og01a1b->xvclk) { + dev_err(dev, "can't get clock frequency"); + return ret; + } + + mclk = clk_get_rate(og01a1b->xvclk); } if (mclk != OG01A1B_MCLK) { @@ -967,6 +981,83 @@ static int og01a1b_check_hwcfg(struct device *dev) return ret; } +/* Power/clock management functions */ +static int og01a1b_power_on(struct device *dev) +{ + unsigned long delay = DIV_ROUND_UP(8192UL * USEC_PER_SEC, OG01A1B_MCLK); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct og01a1b *og01a1b = to_og01a1b(sd); + int ret; + + if (og01a1b->avdd) { + ret = regulator_enable(og01a1b->avdd); + if (ret) + return ret; + } + + if (og01a1b->dovdd) { + ret = regulator_enable(og01a1b->dovdd); + if (ret) + goto avdd_disable; + } + + if (og01a1b->dvdd) { + ret = regulator_enable(og01a1b->dvdd); + if (ret) + goto dovdd_disable; + } + + ret = clk_prepare_enable(og01a1b->xvclk); + if (ret) + goto dvdd_disable; + + gpiod_set_value_cansleep(og01a1b->reset_gpio, 0); + + if (og01a1b->reset_gpio) + usleep_range(5 * USEC_PER_MSEC, 6 * USEC_PER_MSEC); + else if (og01a1b->xvclk) + usleep_range(delay, 2 * delay); + + return 0; + +dvdd_disable: + if (og01a1b->dvdd) + regulator_disable(og01a1b->dvdd); +dovdd_disable: + if (og01a1b->dovdd) + regulator_disable(og01a1b->dovdd); +avdd_disable: + if (og01a1b->avdd) + regulator_disable(og01a1b->avdd); + + return ret; +} + +static int og01a1b_power_off(struct device *dev) +{ + unsigned long delay = DIV_ROUND_UP(512 * USEC_PER_SEC, OG01A1B_MCLK); + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct og01a1b *og01a1b = to_og01a1b(sd); + + if (og01a1b->xvclk) + usleep_range(delay, 2 * delay); + + clk_disable_unprepare(og01a1b->xvclk); + + gpiod_set_value_cansleep(og01a1b->reset_gpio, 1); + + if (og01a1b->dvdd) + regulator_disable(og01a1b->dvdd); + + if (og01a1b->dovdd) + regulator_disable(og01a1b->dovdd); + + if (og01a1b->avdd) + regulator_disable(og01a1b->avdd); + + return 0; +} + static void og01a1b_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); @@ -984,22 +1075,78 @@ static int og01a1b_probe(struct i2c_client *client) struct og01a1b *og01a1b; int ret; - ret = og01a1b_check_hwcfg(&client->dev); + og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL); + if (!og01a1b) + return -ENOMEM; + + v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops); + + og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL); + if (IS_ERR(og01a1b->xvclk)) { + ret = PTR_ERR(og01a1b->xvclk); + dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret); + return ret; + } + + ret = og01a1b_check_hwcfg(og01a1b); if (ret) { dev_err(&client->dev, "failed to check HW configuration: %d", ret); return ret; } - og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL); - if (!og01a1b) - return -ENOMEM; + og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(og01a1b->reset_gpio)) { + dev_err(&client->dev, "cannot get reset GPIO\n"); + return PTR_ERR(og01a1b->reset_gpio); + } + + og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd"); + if (IS_ERR(og01a1b->avdd)) { + ret = PTR_ERR(og01a1b->avdd); + if (ret != -ENODEV) { + dev_err_probe(&client->dev, ret, + "Failed to get 'avdd' regulator\n"); + return ret; + } + + og01a1b->avdd = NULL; + } + + og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd"); + if (IS_ERR(og01a1b->dovdd)) { + ret = PTR_ERR(og01a1b->dovdd); + if (ret != -ENODEV) { + dev_err_probe(&client->dev, ret, + "Failed to get 'dovdd' regulator\n"); + return ret; + } + + og01a1b->dovdd = NULL; + } + + og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd"); + if (IS_ERR(og01a1b->dvdd)) { + ret = PTR_ERR(og01a1b->dvdd); + if (ret != -ENODEV) { + dev_err_probe(&client->dev, ret, + "Failed to get 'dvdd' regulator\n"); + return ret; + } + + og01a1b->dvdd = NULL; + } + + /* The sensor must be powered on to read the CHIP_ID register */ + ret = og01a1b_power_on(&client->dev); + if (ret) + return ret; - v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops); ret = og01a1b_identify_module(og01a1b); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); - return ret; + goto power_off; } mutex_init(&og01a1b->mutex); @@ -1028,10 +1175,7 @@ static int og01a1b_probe(struct i2c_client *client) goto probe_error_media_entity_cleanup; } - /* - * Device is already turned on by i2c-core with ACPI domain PM. - * Enable runtime PM and turn off the device. - */ + /* Enable runtime PM and turn off the device */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); @@ -1045,9 +1189,16 @@ static int og01a1b_probe(struct i2c_client *client) v4l2_ctrl_handler_free(og01a1b->sd.ctrl_handler); mutex_destroy(&og01a1b->mutex); +power_off: + og01a1b_power_off(&client->dev); + return ret; } +static const struct dev_pm_ops og01a1b_pm_ops = { + SET_RUNTIME_PM_OPS(og01a1b_power_off, og01a1b_power_on, NULL) +}; + #ifdef CONFIG_ACPI static const struct acpi_device_id og01a1b_acpi_ids[] = { {"OVTI01AC"}, @@ -1057,10 +1208,18 @@ static const struct acpi_device_id og01a1b_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids); #endif +static const struct of_device_id og01a1b_of_match[] = { + { .compatible = "ovti,og01a1b" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, og01a1b_of_match); + static struct i2c_driver og01a1b_i2c_driver = { .driver = { .name = "og01a1b", + .pm = &og01a1b_pm_ops, .acpi_match_table = ACPI_PTR(og01a1b_acpi_ids), + .of_match_table = og01a1b_of_match, }, .probe = og01a1b_probe, .remove = og01a1b_remove, diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c index 5606437f37d0ec..0b9fb1ddbe5970 100644 --- a/drivers/media/i2c/ov01a10.c +++ b/drivers/media/i2c/ov01a10.c @@ -3,7 +3,7 @@ * Copyright (c) 2023 Intel Corporation. */ -#include +#include #include #include diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c index 48df077522ad0b..7ead3c720e0e11 100644 --- a/drivers/media/i2c/ov08x40.c +++ b/drivers/media/i2c/ov08x40.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c index 09387e335d805b..7a3fc1d2851419 100644 --- a/drivers/media/i2c/ov13858.c +++ b/drivers/media/i2c/ov13858.c @@ -1740,8 +1740,8 @@ static void ov13858_remove(struct i2c_client *client) } static const struct i2c_device_id ov13858_id_table[] = { - {"ov13858", 0}, - {}, + { "ov13858" }, + {} }; MODULE_DEVICE_TABLE(i2c, ov13858_id_table); diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c index 67c4bd2916e80f..d27fc2df64e6d8 100644 --- a/drivers/media/i2c/ov2640.c +++ b/drivers/media/i2c/ov2640.c @@ -1271,7 +1271,7 @@ static void ov2640_remove(struct i2c_client *client) } static const struct i2c_device_id ov2640_id[] = { - { "ov2640", 0 }, + { "ov2640" }, { } }; MODULE_DEVICE_TABLE(i2c, ov2640_id); diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index d1653d7431d0e9..06b7896c3eaf14 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -1551,8 +1551,8 @@ static const struct dev_pm_ops ov2659_pm_ops = { }; static const struct i2c_device_id ov2659_id[] = { - { "ov2659", 0 }, - { /* sentinel */ }, + { "ov2659" }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, ov2659_id); diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c index c48dbcde98770d..bd0b2f0f0d45b9 100644 --- a/drivers/media/i2c/ov2740.c +++ b/drivers/media/i2c/ov2740.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 5162d45fe73bd7..c1d3fce4a7d383 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -4003,8 +4003,8 @@ static const struct dev_pm_ops ov5640_pm_ops = { }; static const struct i2c_device_id ov5640_id[] = { - {"ov5640", 0}, - {}, + { "ov5640" }, + {} }; MODULE_DEVICE_TABLE(i2c, ov5640_id); diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index 3b22b9e127873d..0c32bd2940ecf0 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -635,7 +635,7 @@ static int ov5645_set_register_array(struct ov5645 *ov5645, return 0; } -static int ov5645_set_power_off(struct device *dev) +static void __ov5645_set_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov5645 *ov5645 = to_ov5645(sd); @@ -643,8 +643,16 @@ static int ov5645_set_power_off(struct device *dev) ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58); gpiod_set_value_cansleep(ov5645->rst_gpio, 1); gpiod_set_value_cansleep(ov5645->enable_gpio, 0); - clk_disable_unprepare(ov5645->xclk); regulator_bulk_disable(OV5645_NUM_SUPPLIES, ov5645->supplies); +} + +static int ov5645_set_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov5645 *ov5645 = to_ov5645(sd); + + __ov5645_set_power_off(dev); + clk_disable_unprepare(ov5645->xclk); return 0; } @@ -686,7 +694,8 @@ static int ov5645_set_power_on(struct device *dev) return 0; exit: - ov5645_set_power_off(dev); + __ov5645_set_power_off(dev); + clk_disable_unprepare(ov5645->xclk); return ret; } @@ -1272,7 +1281,7 @@ static void ov5645_remove(struct i2c_client *client) } static const struct i2c_device_id ov5645_id[] = { - { "ov5645", 0 }, + { "ov5645" }, {} }; MODULE_DEVICE_TABLE(i2c, ov5645_id); diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c index 0fb4d7bff9d143..a727beb9d57e29 100644 --- a/drivers/media/i2c/ov5647.c +++ b/drivers/media/i2c/ov5647.c @@ -1487,7 +1487,7 @@ static const struct dev_pm_ops ov5647_pm_ops = { }; static const struct i2c_device_id ov5647_id[] = { - { "ov5647", 0 }, + { "ov5647" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, ov5647_id); diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c index 2aee85965cf73a..f051045d340f23 100644 --- a/drivers/media/i2c/ov5670.c +++ b/drivers/media/i2c/ov5670.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c index 3641911bc73f68..2833b14ee139dc 100644 --- a/drivers/media/i2c/ov5675.c +++ b/drivers/media/i2c/ov5675.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Intel Corporation. -#include +#include #include #include #include @@ -972,12 +972,10 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable) static int ov5675_power_off(struct device *dev) { - /* 512 xvclk cycles after the last SCCB transation or MIPI frame end */ - u32 delay_us = DIV_ROUND_UP(512, OV5675_XVCLK_19_2 / 1000 / 1000); struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov5675 *ov5675 = to_ov5675(sd); - usleep_range(delay_us, delay_us * 2); + usleep_range(90, 100); clk_disable_unprepare(ov5675->xvclk); gpiod_set_value_cansleep(ov5675->reset_gpio, 1); @@ -988,7 +986,6 @@ static int ov5675_power_off(struct device *dev) static int ov5675_power_on(struct device *dev) { - u32 delay_us = DIV_ROUND_UP(8192, OV5675_XVCLK_19_2 / 1000 / 1000); struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov5675 *ov5675 = to_ov5675(sd); int ret; @@ -1014,8 +1011,11 @@ static int ov5675_power_on(struct device *dev) gpiod_set_value_cansleep(ov5675->reset_gpio, 0); - /* 8192 xvclk cycles prior to the first SCCB transation */ - usleep_range(delay_us, delay_us * 2); + /* Worst case quiesence gap is 1.365 milliseconds @ 6MHz XVCLK + * Add an additional threshold grace period to ensure reset + * completion before initiating our first I2C transaction. + */ + usleep_range(1500, 1600); return 0; } diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index b65befb22a7916..9c76271611422c 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -1128,7 +1128,7 @@ static void ov6650_remove(struct i2c_client *client) } static const struct i2c_device_id ov6650_id[] = { - { "ov6650", 0 }, + { "ov6650" }, { } }; MODULE_DEVICE_TABLE(i2c, ov6650_id); diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c index 293f5f4043587d..9f68d89936eb76 100644 --- a/drivers/media/i2c/ov7640.c +++ b/drivers/media/i2c/ov7640.c @@ -77,7 +77,7 @@ static void ov7640_remove(struct i2c_client *client) } static const struct i2c_device_id ov7640_id[] = { - { "ov7640", 0 }, + { "ov7640" }, { } }; MODULE_DEVICE_TABLE(i2c, ov7640_id); diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c index 3e36a55274efb2..3b0fdb3c70c0b3 100644 --- a/drivers/media/i2c/ov772x.c +++ b/drivers/media/i2c/ov772x.c @@ -1546,7 +1546,7 @@ static void ov772x_remove(struct i2c_client *client) } static const struct i2c_device_id ov772x_id[] = { - { "ov772x", 0 }, + { "ov772x" }, { } }; MODULE_DEVICE_TABLE(i2c, ov772x_id); diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c index 47b1b14d8796bd..0830676e5d5a40 100644 --- a/drivers/media/i2c/ov7740.c +++ b/drivers/media/i2c/ov7740.c @@ -1152,7 +1152,7 @@ static int __maybe_unused ov7740_runtime_resume(struct device *dev) } static const struct i2c_device_id ov7740_id[] = { - { "ov7740", 0 }, + { "ov7740" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, ov7740_id); diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c index 6ffe10e57b5b14..3b94338f55ed39 100644 --- a/drivers/media/i2c/ov8856.c +++ b/drivers/media/i2c/ov8856.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c index 174c65f768860e..326f50a5ab51ff 100644 --- a/drivers/media/i2c/ov8858.c +++ b/drivers/media/i2c/ov8858.c @@ -5,7 +5,7 @@ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd. */ -#include +#include #include #include diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c index 251a4b53491438..9f52af6f047f3c 100644 --- a/drivers/media/i2c/ov9282.c +++ b/drivers/media/i2c/ov9282.c @@ -4,7 +4,7 @@ * * Copyright (C) 2021 Intel Corporation */ -#include +#include #include #include diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c index e9a52a8a9dc0af..01dbc0ba89c878 100644 --- a/drivers/media/i2c/ov9640.c +++ b/drivers/media/i2c/ov9640.c @@ -751,7 +751,7 @@ static void ov9640_remove(struct i2c_client *client) } static const struct i2c_device_id ov9640_id[] = { - { "ov9640", 0 }, + { "ov9640" }, { } }; MODULE_DEVICE_TABLE(i2c, ov9640_id); diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 66cd0e9ddc9ab8..56df97c9886b51 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -1566,8 +1566,8 @@ static void ov965x_remove(struct i2c_client *client) } static const struct i2c_device_id ov965x_id[] = { - { "OV9650", 0 }, - { "OV9652", 0 }, + { "OV9650" }, + { "OV9652" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, ov965x_id); diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c index d997285974318f..bf9e2adbff3473 100644 --- a/drivers/media/i2c/ov9734.c +++ b/drivers/media/i2c/ov9734.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 Intel Corporation. -#include +#include #include #include #include diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c index a59db10153cdcb..b7ca39f63dba84 100644 --- a/drivers/media/i2c/rj54n1cb0c.c +++ b/drivers/media/i2c/rj54n1cb0c.c @@ -1410,7 +1410,7 @@ static void rj54n1_remove(struct i2c_client *client) } static const struct i2c_device_id rj54n1_id[] = { - { "rj54n1cb0c", 0 }, + { "rj54n1cb0c" }, { } }; MODULE_DEVICE_TABLE(i2c, rj54n1_id); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index cf6be509af33bd..7716dfe2b8c9e0 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1392,6 +1392,16 @@ static int __s5c73m3_power_on(struct s5c73m3 *state) return ret; } +/* + * This function has been created just to avoid a smatch warning, + * please do not merge into __s5c73m3_power_off() until you have + * confirmed that it does not introduce a new warning. + */ +static void s5c73m3_enable_clk(struct s5c73m3 *state) +{ + clk_prepare_enable(state->clock); +} + static int __s5c73m3_power_off(struct s5c73m3 *state) { int i, ret; @@ -1421,7 +1431,8 @@ static int __s5c73m3_power_off(struct s5c73m3 *state) state->supplies[i].supply, r); } - clk_prepare_enable(state->clock); + s5c73m3_enable_clk(state); + return ret; } @@ -1724,7 +1735,7 @@ static void s5c73m3_remove(struct i2c_client *client) } static const struct i2c_device_id s5c73m3_id[] = { - { DRIVER_NAME, 0 }, + { DRIVER_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, s5c73m3_id); diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c index 6b11039c357982..24f399cd2124d8 100644 --- a/drivers/media/i2c/s5k5baf.c +++ b/drivers/media/i2c/s5k5baf.c @@ -2018,8 +2018,8 @@ static void s5k5baf_remove(struct i2c_client *c) } static const struct i2c_device_id s5k5baf_id[] = { - { S5K5BAF_DRIVER_NAME, 0 }, - { }, + { S5K5BAF_DRIVER_NAME }, + { } }; MODULE_DEVICE_TABLE(i2c, s5k5baf_id); diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c index dea9fc09356fa5..fb09e4560d8a0e 100644 --- a/drivers/media/i2c/saa6588.c +++ b/drivers/media/i2c/saa6588.c @@ -496,7 +496,7 @@ static void saa6588_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa6588_id[] = { - { "saa6588", 0 }, + { "saa6588" }, { } }; MODULE_DEVICE_TABLE(i2c, saa6588_id); diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c index 897eaa669b8622..1ed8b5edb3fbaa 100644 --- a/drivers/media/i2c/saa6752hs.c +++ b/drivers/media/i2c/saa6752hs.c @@ -770,7 +770,7 @@ static void saa6752hs_remove(struct i2c_client *client) } static const struct i2c_device_id saa6752hs_id[] = { - { "saa6752hs", 0 }, + { "saa6752hs" }, { } }; MODULE_DEVICE_TABLE(i2c, saa6752hs_id); diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c index 1520790338ce27..942aeeb40c524f 100644 --- a/drivers/media/i2c/saa7110.c +++ b/drivers/media/i2c/saa7110.c @@ -439,7 +439,7 @@ static void saa7110_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa7110_id[] = { - { "saa7110", 0 }, + { "saa7110" }, { } }; MODULE_DEVICE_TABLE(i2c, saa7110_id); diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c index 933ec0171430c2..b0793bb0c02a44 100644 --- a/drivers/media/i2c/saa717x.c +++ b/drivers/media/i2c/saa717x.c @@ -1334,7 +1334,7 @@ static void saa717x_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa717x_id[] = { - { "saa717x", 0 }, + { "saa717x" }, { } }; MODULE_DEVICE_TABLE(i2c, saa717x_id); diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c index 5535d71f4860ca..c04e452a332bc2 100644 --- a/drivers/media/i2c/saa7185.c +++ b/drivers/media/i2c/saa7185.c @@ -334,7 +334,7 @@ static void saa7185_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa7185_id[] = { - { "saa7185", 0 }, + { "saa7185" }, { } }; MODULE_DEVICE_TABLE(i2c, saa7185_id); diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c index 0f53834f3ae4da..16072a9f824770 100644 --- a/drivers/media/i2c/sony-btf-mpx.c +++ b/drivers/media/i2c/sony-btf-mpx.c @@ -366,7 +366,7 @@ static void sony_btf_mpx_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id sony_btf_mpx_id[] = { - { "sony-btf-mpx", 0 }, + { "sony-btf-mpx" }, { } }; MODULE_DEVICE_TABLE(i2c, sony_btf_mpx_id); diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 0307fee3cce914..65d58ddf02870d 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -2197,7 +2197,7 @@ static void tc358743_remove(struct i2c_client *client) } static const struct i2c_device_id tc358743_id[] = { - {"tc358743", 0}, + { "tc358743" }, {} }; diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c index edf79107adc518..389582420ba782 100644 --- a/drivers/media/i2c/tc358746.c +++ b/drivers/media/i2c/tc358746.c @@ -1616,6 +1616,16 @@ static void tc358746_remove(struct i2c_client *client) pm_runtime_dont_use_autosuspend(sd->dev); } +/* + * This function has been created just to avoid a smatch warning, + * please do not merge it into tc358746_suspend until you have + * confirmed that it does not introduce a new warning. + */ +static void tc358746_clk_enable(struct tc358746 *tc358746) +{ + clk_prepare_enable(tc358746->refclk); +} + static int tc358746_suspend(struct device *dev) { struct tc358746 *tc358746 = dev_get_drvdata(dev); @@ -1626,7 +1636,7 @@ static int tc358746_suspend(struct device *dev) err = regulator_bulk_disable(ARRAY_SIZE(tc358746_supplies), tc358746->supplies); if (err) - clk_prepare_enable(tc358746->refclk); + tc358746_clk_enable(tc358746); return err; } diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index 58ce8fec3041a2..3b7e5ff5b010b5 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -2514,7 +2514,7 @@ static void tda1997x_codec_remove(struct snd_soc_component *component) { } -static struct snd_soc_component_driver tda1997x_codec_driver = { +static const struct snd_soc_component_driver tda1997x_codec_driver = { .probe = tda1997x_codec_probe, .remove = tda1997x_codec_remove, .idle_bias_on = 1, diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c index 6ecdc8e2e0c660..76ef0fdddf7613 100644 --- a/drivers/media/i2c/tda7432.c +++ b/drivers/media/i2c/tda7432.c @@ -400,7 +400,7 @@ static void tda7432_remove(struct i2c_client *client) } static const struct i2c_device_id tda7432_id[] = { - { "tda7432", 0 }, + { "tda7432" }, { } }; MODULE_DEVICE_TABLE(i2c, tda7432_id); diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c index 1911ef2126be36..d61da811c9da5f 100644 --- a/drivers/media/i2c/tda9840.c +++ b/drivers/media/i2c/tda9840.c @@ -182,7 +182,7 @@ static void tda9840_remove(struct i2c_client *client) } static const struct i2c_device_id tda9840_id[] = { - { "tda9840", 0 }, + { "tda9840" }, { } }; MODULE_DEVICE_TABLE(i2c, tda9840_id); diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c index 3ed6e441d51531..4aaf663536103c 100644 --- a/drivers/media/i2c/tea6415c.c +++ b/drivers/media/i2c/tea6415c.c @@ -141,7 +141,7 @@ static void tea6415c_remove(struct i2c_client *client) } static const struct i2c_device_id tea6415c_id[] = { - { "tea6415c", 0 }, + { "tea6415c" }, { } }; MODULE_DEVICE_TABLE(i2c, tea6415c_id); diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c index 63f23784bb41da..5c5ea39732516c 100644 --- a/drivers/media/i2c/tea6420.c +++ b/drivers/media/i2c/tea6420.c @@ -123,7 +123,7 @@ static void tea6420_remove(struct i2c_client *client) } static const struct i2c_device_id tea6420_id[] = { - { "tea6420", 0 }, + { "tea6420" }, { } }; MODULE_DEVICE_TABLE(i2c, tea6420_id); diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c index 19bd923a7315bd..c77440ff098cde 100644 --- a/drivers/media/i2c/thp7312.c +++ b/drivers/media/i2c/thp7312.c @@ -4,7 +4,7 @@ * Copyright (C) 2023 Ideas on Board Oy */ -#include +#include #include #include @@ -1503,7 +1503,7 @@ static int __thp7312_flash_reg_read(struct thp7312_device *thp7312, msgs[0].addr = client->addr; msgs[0].flags = 0; - msgs[0].len = sizeof(thp7312_cmd_read_reg), + msgs[0].len = sizeof(thp7312_cmd_read_reg); msgs[0].buf = (u8 *)thp7312_cmd_read_reg; msgs[1].addr = client->addr; diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c index 49ed83a0ac9401..7526fabc7ee413 100644 --- a/drivers/media/i2c/ths7303.c +++ b/drivers/media/i2c/ths7303.c @@ -369,9 +369,9 @@ static void ths7303_remove(struct i2c_client *client) } static const struct i2c_device_id ths7303_id[] = { - {"ths7303", 0}, - {"ths7353", 0}, - {}, + { "ths7303" }, + { "ths7353" }, + {} }; MODULE_DEVICE_TABLE(i2c, ths7303_id); diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c index ce0a7f809f195f..686f10641c7a6a 100644 --- a/drivers/media/i2c/ths8200.c +++ b/drivers/media/i2c/ths8200.c @@ -487,8 +487,8 @@ static void ths8200_remove(struct i2c_client *client) } static const struct i2c_device_id ths8200_id[] = { - { "ths8200", 0 }, - {}, + { "ths8200" }, + {} }; MODULE_DEVICE_TABLE(i2c, ths8200_id); diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c index d800ff8af1ffee..b7b31b6192af36 100644 --- a/drivers/media/i2c/tlv320aic23b.c +++ b/drivers/media/i2c/tlv320aic23b.c @@ -188,7 +188,7 @@ static void tlv320aic23b_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id tlv320aic23b_id[] = { - { "tlv320aic23b", 0 }, + { "tlv320aic23b" }, { } }; MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id); diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c index ba20f35cafd5c1..654725dfafac17 100644 --- a/drivers/media/i2c/tvaudio.c +++ b/drivers/media/i2c/tvaudio.c @@ -2086,7 +2086,7 @@ static void tvaudio_remove(struct i2c_client *client) detect which device is present. So rather than listing all supported devices here, we pretend to support a single, fake device type. */ static const struct i2c_device_id tvaudio_id[] = { - { "tvaudio", 0 }, + { "tvaudio" }, { } }; MODULE_DEVICE_TABLE(i2c, tvaudio_id); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 64b91aa3c82a89..e3675c744d9edc 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -514,7 +514,7 @@ struct i2c_vbi_ram_value { * and so on. There are 16 possible locations from 0 to 15. */ -static struct i2c_vbi_ram_value vbi_ram_default[] = { +static const struct i2c_vbi_ram_value vbi_ram_default[] = { /* * FIXME: Current api doesn't handle all VBI types, those not @@ -1812,7 +1812,7 @@ static const struct regmap_access_table tvp5150_readable_table = { .n_yes_ranges = ARRAY_SIZE(tvp5150_readable_ranges), }; -static struct regmap_config tvp5150_config = { +static const struct regmap_config tvp5150_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff, @@ -2265,7 +2265,7 @@ static const struct dev_pm_ops tvp5150_pm_ops = { }; static const struct i2c_device_id tvp5150_id[] = { - { "tvp5150", 0 }, + { "tvp5150" }, { } }; MODULE_DEVICE_TABLE(i2c, tvp5150_id); diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c index ea01bd86450e06..c09a5bd71fd0bb 100644 --- a/drivers/media/i2c/tvp7002.c +++ b/drivers/media/i2c/tvp7002.c @@ -1070,7 +1070,7 @@ static void tvp7002_remove(struct i2c_client *c) /* I2C Device ID table */ static const struct i2c_device_id tvp7002_id[] = { - { "tvp7002", 0 }, + { "tvp7002" }, { } }; MODULE_DEVICE_TABLE(i2c, tvp7002_id); diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c index 6a2521e3a25c2e..3d154f4fb5f9df 100644 --- a/drivers/media/i2c/tw2804.c +++ b/drivers/media/i2c/tw2804.c @@ -414,7 +414,7 @@ static void tw2804_remove(struct i2c_client *client) } static const struct i2c_device_id tw2804_id[] = { - { "tw2804", 0 }, + { "tw2804" }, { } }; MODULE_DEVICE_TABLE(i2c, tw2804_id); diff --git a/drivers/media/i2c/tw9900.c b/drivers/media/i2c/tw9900.c index bc7623ec46e581..53efdeaed1dba6 100644 --- a/drivers/media/i2c/tw9900.c +++ b/drivers/media/i2c/tw9900.c @@ -753,7 +753,7 @@ static const struct dev_pm_ops tw9900_pm_ops = { }; static const struct i2c_device_id tw9900_id[] = { - { "tw9900", 0 }, + { "tw9900" }, { } }; MODULE_DEVICE_TABLE(i2c, tw9900_id); diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c index 996be3960af3f8..b996a05e56f28f 100644 --- a/drivers/media/i2c/tw9903.c +++ b/drivers/media/i2c/tw9903.c @@ -245,7 +245,7 @@ static void tw9903_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id tw9903_id[] = { - { "tw9903", 0 }, + { "tw9903" }, { } }; MODULE_DEVICE_TABLE(i2c, tw9903_id); diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c index 25c625f6d6e488..6220f4fddbabcd 100644 --- a/drivers/media/i2c/tw9906.c +++ b/drivers/media/i2c/tw9906.c @@ -213,7 +213,7 @@ static void tw9906_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id tw9906_id[] = { - { "tw9906", 0 }, + { "tw9906" }, { } }; MODULE_DEVICE_TABLE(i2c, tw9906_id); diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c index 6dffaaa9ed5697..f3e400304e042e 100644 --- a/drivers/media/i2c/tw9910.c +++ b/drivers/media/i2c/tw9910.c @@ -996,7 +996,7 @@ static void tw9910_remove(struct i2c_client *client) } static const struct i2c_device_id tw9910_id[] = { - { "tw9910", 0 }, + { "tw9910" }, { } }; MODULE_DEVICE_TABLE(i2c, tw9910_id); diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c index abd052a44bd7a5..2e4540ee2df2dd 100644 --- a/drivers/media/i2c/uda1342.c +++ b/drivers/media/i2c/uda1342.c @@ -79,7 +79,7 @@ static void uda1342_remove(struct i2c_client *client) } static const struct i2c_device_id uda1342_id[] = { - { "uda1342", 0 }, + { "uda1342" }, { } }; MODULE_DEVICE_TABLE(i2c, uda1342_id); diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c index 54c2ba0ba375f4..9d0b72a213be41 100644 --- a/drivers/media/i2c/upd64031a.c +++ b/drivers/media/i2c/upd64031a.c @@ -219,7 +219,7 @@ static void upd64031a_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64031a_id[] = { - { "upd64031a", 0 }, + { "upd64031a" }, { } }; MODULE_DEVICE_TABLE(i2c, upd64031a_id); diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c index 2a820589a4cb54..2e99ed5da42cac 100644 --- a/drivers/media/i2c/upd64083.c +++ b/drivers/media/i2c/upd64083.c @@ -190,7 +190,7 @@ static void upd64083_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64083_id[] = { - { "upd64083", 0 }, + { "upd64083" }, { } }; MODULE_DEVICE_TABLE(i2c, upd64083_id); diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c index 30378e9620166d..409d2d4ffb4bb2 100644 --- a/drivers/media/i2c/vgxy61.c +++ b/drivers/media/i2c/vgxy61.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c index 0ba3c2b6803761..06fd46a63c7243 100644 --- a/drivers/media/i2c/vp27smpx.c +++ b/drivers/media/i2c/vp27smpx.c @@ -172,7 +172,7 @@ static void vp27smpx_remove(struct i2c_client *client) /* ----------------------------------------------------------------------- */ static const struct i2c_device_id vp27smpx_id[] = { - { "vp27smpx", 0 }, + { "vp27smpx" }, { } }; MODULE_DEVICE_TABLE(i2c, vp27smpx_id); diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c index 1eaae886f217ad..5f1a22284168d4 100644 --- a/drivers/media/i2c/vpx3220.c +++ b/drivers/media/i2c/vpx3220.c @@ -535,9 +535,9 @@ static void vpx3220_remove(struct i2c_client *client) } static const struct i2c_device_id vpx3220_id[] = { - { "vpx3220a", 0 }, - { "vpx3216b", 0 }, - { "vpx3214c", 0 }, + { "vpx3220a" }, + { "vpx3216b" }, + { "vpx3214c" }, { } }; MODULE_DEVICE_TABLE(i2c, vpx3220_id); diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c index 19bf7a00dff9de..c091b78a5b4173 100644 --- a/drivers/media/i2c/wm8739.c +++ b/drivers/media/i2c/wm8739.c @@ -243,7 +243,7 @@ static void wm8739_remove(struct i2c_client *client) } static const struct i2c_device_id wm8739_id[] = { - { "wm8739", 0 }, + { "wm8739" }, { } }; MODULE_DEVICE_TABLE(i2c, wm8739_id); diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c index d1b716fd6f1192..619b2988577cbe 100644 --- a/drivers/media/i2c/wm8775.c +++ b/drivers/media/i2c/wm8775.c @@ -289,7 +289,7 @@ static void wm8775_remove(struct i2c_client *client) } static const struct i2c_device_id wm8775_id[] = { - { "wm8775", 0 }, + { "wm8775" }, { } }; MODULE_DEVICE_TABLE(i2c, wm8775_id); diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c index 318e267e798e3a..56444edaf13651 100644 --- a/drivers/media/mc/mc-devnode.c +++ b/drivers/media/mc/mc-devnode.c @@ -204,7 +204,6 @@ static const struct file_operations media_devnode_fops = { #endif /* CONFIG_COMPAT */ .release = media_release, .poll = media_poll, - .llseek = no_llseek, }; int __must_check media_devnode_register(struct media_device *mdev, diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c index addb8f2d8939ef..e064914c476e7c 100644 --- a/drivers/media/mc/mc-request.c +++ b/drivers/media/mc/mc-request.c @@ -254,12 +254,12 @@ media_request_get_by_fd(struct media_device *mdev, int request_fd) return ERR_PTR(-EBADR); f = fdget(request_fd); - if (!f.file) + if (!fd_file(f)) goto err_no_req_fd; - if (f.file->f_op != &request_fops) + if (fd_file(f)->f_op != &request_fops) goto err_fput; - req = f.file->private_data; + req = fd_file(f)->private_data; if (req->mdev != mdev) goto err_fput; diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index 867c1308de2356..365b04e5ae4d27 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include "bttvp.h" diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig index 40e20f0aa5ae5d..49e4fb696573f6 100644 --- a/drivers/media/pci/intel/ipu6/Kconfig +++ b/drivers/media/pci/intel/ipu6/Kconfig @@ -4,8 +4,13 @@ config VIDEO_INTEL_IPU6 depends on VIDEO_DEV depends on X86 && X86_64 && HAS_DMA depends on IPU_BRIDGE || !IPU_BRIDGE + # + # This driver incorrectly tries to override the dma_ops. It should + # never have done that, but for now keep it working on architectures + # that use dma ops + # + depends on ARCH_HAS_DMA_OPS select AUXILIARY_BUS - select DMA_OPS select IOMMU_IOVA select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index bbd646378ab3ed..7fb707d3530967 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -390,20 +390,18 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_ISYS_NAME); if (IS_ERR(isys_adev)) { - dev_err_probe(dev, PTR_ERR(isys_adev), - "ipu6_bus_initialize_device isys failed\n"); kfree(pdata); - return ERR_CAST(isys_adev); + return dev_err_cast_probe(dev, isys_adev, + "ipu6_bus_initialize_device isys failed\n"); } isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID, &ipdata->hw_variant); if (IS_ERR(isys_adev->mmu)) { - dev_err_probe(dev, PTR_ERR(isys_adev->mmu), - "ipu6_mmu_init(isys_adev->mmu) failed\n"); put_device(&isys_adev->auxdev.dev); kfree(pdata); - return ERR_CAST(isys_adev->mmu); + return dev_err_cast_probe(dev, isys_adev->mmu, + "ipu6_mmu_init(isys_adev->mmu) failed\n"); } isys_adev->mmu->dev = &isys_adev->auxdev.dev; @@ -436,20 +434,18 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_PSYS_NAME); if (IS_ERR(psys_adev)) { - dev_err_probe(&pdev->dev, PTR_ERR(psys_adev), - "ipu6_bus_initialize_device psys failed\n"); kfree(pdata); - return ERR_CAST(psys_adev); + return dev_err_cast_probe(&pdev->dev, psys_adev, + "ipu6_bus_initialize_device psys failed\n"); } psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID, &ipdata->hw_variant); if (IS_ERR(psys_adev->mmu)) { - dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu), - "ipu6_mmu_init(psys_adev->mmu) failed\n"); put_device(&psys_adev->auxdev.dev); kfree(pdata); - return ERR_CAST(psys_adev->mmu); + return dev_err_cast_probe(&pdev->dev, psys_adev->mmu, + "ipu6_mmu_init(psys_adev->mmu) failed\n"); } psys_adev->mmu->dev = &psys_adev->auxdev.dev; @@ -576,9 +572,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); - ret = dma_set_max_seg_size(dev, UINT_MAX); - if (ret) - return dev_err_probe(dev, ret, "Failed to set max_seg_size\n"); + dma_set_max_seg_size(dev, UINT_MAX); ret = ipu6_pci_config_setup(pdev, isp->hw_ver); if (ret) diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c index ab4f07e2e56030..2819bbdab4842f 100644 --- a/drivers/media/pci/mgb4/mgb4_core.c +++ b/drivers/media/pci/mgb4/mgb4_core.c @@ -302,7 +302,7 @@ static int init_i2c(struct mgb4_dev *mgbdev) /* create dummy clock required by the xiic-i2c adapter */ snprintf(clk_name, sizeof(clk_name), "xiic-i2c.%d", id); mgbdev->i2c_clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, - 0, 125000000); + 0, MGB4_HW_FREQ); if (IS_ERR(mgbdev->i2c_clk)) { dev_err(dev, "failed to register I2C clock\n"); return PTR_ERR(mgbdev->i2c_clk); diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h index 2a946e46aec170..b52cd67270b56d 100644 --- a/drivers/media/pci/mgb4/mgb4_core.h +++ b/drivers/media/pci/mgb4/mgb4_core.h @@ -13,6 +13,8 @@ #include #include "mgb4_regs.h" +#define MGB4_HW_FREQ 125000000 + #define MGB4_VIN_DEVICES 2 #define MGB4_VOUT_DEVICES 2 diff --git a/drivers/media/pci/mgb4/mgb4_io.h b/drivers/media/pci/mgb4/mgb4_io.h index 8698db1be4a9e5..dd8696d7df31e0 100644 --- a/drivers/media/pci/mgb4/mgb4_io.h +++ b/drivers/media/pci/mgb4/mgb4_io.h @@ -7,11 +7,9 @@ #ifndef __MGB4_IO_H__ #define __MGB4_IO_H__ +#include #include - -#define MGB4_DEFAULT_WIDTH 1280 -#define MGB4_DEFAULT_HEIGHT 640 -#define MGB4_DEFAULT_PERIOD (125000000 / 60) +#include "mgb4_core.h" /* Register access error indication */ #define MGB4_ERR_NO_REG 0xFFFFFFFE @@ -20,6 +18,9 @@ #define MGB4_ERR_QUEUE_EMPTY 0xFFFFFFFC #define MGB4_ERR_QUEUE_FULL 0xFFFFFFFB +#define MGB4_PERIOD(numerator, denominator) \ + ((u32)div_u64((MGB4_HW_FREQ * (u64)(numerator)), (denominator))) + struct mgb4_frame_buffer { struct vb2_v4l2_buffer vb; struct list_head list; @@ -30,4 +31,24 @@ static inline struct mgb4_frame_buffer *to_frame_buffer(struct vb2_v4l2_buffer * return container_of(vbuf, struct mgb4_frame_buffer, vb); } +static inline bool has_yuv_and_timeperframe(struct mgb4_regs *video) +{ + u32 status = mgb4_read_reg(video, 0xD0); + + return (status & (1U << 8)); +} + +#define has_yuv(video) has_yuv_and_timeperframe(video) +#define has_timeperframe(video) has_yuv_and_timeperframe(video) + +static inline u32 pixel_size(struct v4l2_dv_timings *timings) +{ + struct v4l2_bt_timings *bt = &timings->bt; + + u32 height = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch; + u32 width = bt->width + bt->hfrontporch + bt->hsync + bt->hbackporch; + + return width * height; +} + #endif diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_out.c b/drivers/media/pci/mgb4/mgb4_sysfs_out.c index 9f6e81c577267e..573aa61c69d4b9 100644 --- a/drivers/media/pci/mgb4/mgb4_sysfs_out.c +++ b/drivers/media/pci/mgb4/mgb4_sysfs_out.c @@ -229,9 +229,9 @@ static ssize_t frame_rate_show(struct device *dev, struct video_device *vdev = to_video_device(dev); struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev); u32 period = mgb4_read_reg(&voutdev->mgbdev->video, - voutdev->config->regs.frame_period); + voutdev->config->regs.frame_limit); - return sprintf(buf, "%u\n", 125000000 / period); + return sprintf(buf, "%u\n", period ? MGB4_HW_FREQ / period : 0); } /* @@ -245,14 +245,15 @@ static ssize_t frame_rate_store(struct device *dev, struct video_device *vdev = to_video_device(dev); struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev); unsigned long val; - int ret; + int limit, ret; ret = kstrtoul(buf, 10, &val); if (ret) return ret; + limit = val ? MGB4_HW_FREQ / val : 0; mgb4_write_reg(&voutdev->mgbdev->video, - voutdev->config->regs.frame_period, 125000000 / val); + voutdev->config->regs.frame_limit, limit); return count; } diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c index 2cd78c53988984..e9332abb31729e 100644 --- a/drivers/media/pci/mgb4/mgb4_vin.c +++ b/drivers/media/pci/mgb4/mgb4_vin.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -34,8 +35,8 @@ ATTRIBUTE_GROUPS(mgb4_fpdl3_in); ATTRIBUTE_GROUPS(mgb4_gmsl_in); static const struct mgb4_vin_config vin_cfg[] = { - {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28}}, - {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58}} + {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28, 0xE8}}, + {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58, 0xEC}} }; static const struct i2c_board_info fpdl3_deser_info[] = { @@ -76,6 +77,9 @@ static const struct v4l2_dv_timings_cap video_timings_cap = { }, }; +/* Dummy timings when no signal present */ +static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60; + /* * Returns the video output connected with the given video input if the input * is in loopback mode. @@ -186,8 +190,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers, struct device *alloc_devs[]) { struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q); + struct mgb4_regs *video = &vindev->mgbdev->video; + u32 config = mgb4_read_reg(video, vindev->config->regs.config); + u32 pixelsize = (config & (1U << 16)) ? 2 : 4; unsigned int size = (vindev->timings.bt.width + vindev->padding) - * vindev->timings.bt.height * 4; + * vindev->timings.bt.height * pixelsize; /* * If I/O reconfiguration is in process, do not allow to start @@ -220,9 +227,12 @@ static int buffer_init(struct vb2_buffer *vb) static int buffer_prepare(struct vb2_buffer *vb) { struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue); + struct mgb4_regs *video = &vindev->mgbdev->video; struct device *dev = &vindev->mgbdev->pdev->dev; + u32 config = mgb4_read_reg(video, vindev->config->regs.config); + u32 pixelsize = (config & (1U << 16)) ? 2 : 4; unsigned int size = (vindev->timings.bt.width + vindev->padding) - * vindev->timings.bt.height * 4; + * vindev->timings.bt.height * pixelsize; if (vb2_plane_size(vb, 0) < size) { dev_err(dev, "buffer too small (%lu < %u)\n", @@ -312,7 +322,8 @@ static int fh_open(struct file *file) if (!v4l2_fh_is_singular_file(file)) goto out; - get_timings(vindev, &vindev->timings); + if (get_timings(vindev, &vindev->timings) < 0) + vindev->timings = cea1080p60; set_loopback_padding(vindev, vindev->padding); out: @@ -359,33 +370,42 @@ static int vidioc_querycap(struct file *file, void *priv, static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { - if (f->index != 0) - return -EINVAL; - - f->pixelformat = V4L2_PIX_FMT_ABGR32; + struct mgb4_vin_dev *vindev = video_drvdata(file); + struct mgb4_regs *video = &vindev->mgbdev->video; - return 0; + if (f->index == 0) { + f->pixelformat = V4L2_PIX_FMT_ABGR32; + return 0; + } else if (f->index == 1 && has_yuv(video)) { + f->pixelformat = V4L2_PIX_FMT_YUYV; + return 0; + } else { + return -EINVAL; + } } static int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *ival) { struct mgb4_vin_dev *vindev = video_drvdata(file); + struct mgb4_regs *video = &vindev->mgbdev->video; if (ival->index != 0) return -EINVAL; - if (ival->pixel_format != V4L2_PIX_FMT_ABGR32) + if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 || + ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV)))) return -EINVAL; if (ival->width != vindev->timings.bt.width || ival->height != vindev->timings.bt.height) return -EINVAL; - ival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; - ival->stepwise.min.denominator = 60; - ival->stepwise.min.numerator = 1; - ival->stepwise.max.denominator = 1; - ival->stepwise.max.numerator = 1; - ival->stepwise.step = ival->stepwise.max; + ival->type = V4L2_FRMIVAL_TYPE_STEPWISE; + ival->stepwise.max.denominator = MGB4_HW_FREQ; + ival->stepwise.max.numerator = 0xFFFFFFFF; + ival->stepwise.min.denominator = vindev->timings.bt.pixelclock; + ival->stepwise.min.numerator = pixel_size(&vindev->timings); + ival->stepwise.step.denominator = MGB4_HW_FREQ; + ival->stepwise.step.numerator = 1; return 0; } @@ -393,13 +413,29 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv, static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vin_dev *vindev = video_drvdata(file); + struct mgb4_regs *video = &vindev->mgbdev->video; + u32 config = mgb4_read_reg(video, vindev->config->regs.config); - f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; f->fmt.pix.width = vindev->timings.bt.width; f->fmt.pix.height = vindev->timings.bt.height; f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4; + + if (config & (1U << 16)) { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; + if (config & (1U << 20)) { + f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + } else { + if (config & (1U << 19)) + f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + else + f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + } + f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 2; + } else { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4; + } f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; return 0; @@ -408,14 +444,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vin_dev *vindev = video_drvdata(file); + struct mgb4_regs *video = &vindev->mgbdev->video; + u32 pixelsize; - f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; f->fmt.pix.width = vindev->timings.bt.width; f->fmt.pix.height = vindev->timings.bt.height; f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4, - ALIGN_DOWN(f->fmt.pix.bytesperline, 4)); + + if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) { + pixelsize = 2; + if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 || + f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M)) + f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + } else { + pixelsize = 4; + f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + } + + if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize && + f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2) + f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline, + pixelsize); + else + f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; return 0; @@ -425,13 +477,36 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vin_dev *vindev = video_drvdata(file); struct mgb4_regs *video = &vindev->mgbdev->video; + u32 config, pixelsize; if (vb2_is_busy(&vindev->queue)) return -EBUSY; vidioc_try_fmt(file, priv, f); - vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4; + config = mgb4_read_reg(video, vindev->config->regs.config); + if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) { + pixelsize = 2; + config |= 1U << 16; + + if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) { + config |= 1U << 20; + config |= 1U << 19; + } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) { + config &= ~(1U << 20); + config |= 1U << 19; + } else { + config &= ~(1U << 20); + config &= ~(1U << 19); + } + } else { + pixelsize = 4; + config &= ~(1U << 16); + } + mgb4_write_reg(video, vindev->config->regs.config, config); + + vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width + * pixelsize)) / pixelsize; mgb4_write_reg(video, vindev->config->regs.padding, vindev->padding); set_loopback_padding(vindev, vindev->padding); @@ -467,7 +542,8 @@ static int vidioc_enum_framesizes(struct file *file, void *fh, { struct mgb4_vin_dev *vindev = video_drvdata(file); - if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_ABGR32) + if (fsize->index != 0 || !(fsize->pixel_format == V4L2_PIX_FMT_ABGR32 || + fsize->pixel_format == V4L2_PIX_FMT_YUYV)) return -EINVAL; fsize->discrete.width = vindev->timings.bt.width; @@ -488,27 +564,56 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) return 0; } -static int vidioc_parm(struct file *file, void *priv, - struct v4l2_streamparm *parm) +static int vidioc_g_parm(struct file *file, void *priv, + struct v4l2_streamparm *parm) { struct mgb4_vin_dev *vindev = video_drvdata(file); struct mgb4_regs *video = &vindev->mgbdev->video; - const struct mgb4_vin_regs *regs = &vindev->config->regs; - struct v4l2_fract timeperframe = { - .numerator = mgb4_read_reg(video, regs->frame_period), - .denominator = 125000000, - }; - - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; + struct v4l2_fract *tpf = &parm->parm.output.timeperframe; + u32 timer; parm->parm.capture.readbuffers = 2; - parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; - parm->parm.capture.timeperframe = timeperframe; + + if (has_timeperframe(video)) { + timer = mgb4_read_reg(video, vindev->config->regs.timer); + if (timer < 0xFFFF) { + tpf->numerator = pixel_size(&vindev->timings); + tpf->denominator = vindev->timings.bt.pixelclock; + } else { + tpf->numerator = timer; + tpf->denominator = MGB4_HW_FREQ; + } + + parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + } return 0; } +static int vidioc_s_parm(struct file *file, void *priv, + struct v4l2_streamparm *parm) +{ + struct mgb4_vin_dev *vindev = video_drvdata(file); + struct mgb4_regs *video = &vindev->mgbdev->video; + struct v4l2_fract *tpf = &parm->parm.output.timeperframe; + u32 period, timer; + + if (has_timeperframe(video)) { + timer = tpf->denominator ? + MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0; + if (timer) { + period = MGB4_PERIOD(pixel_size(&vindev->timings), + vindev->timings.bt.pixelclock); + if (timer < period) + timer = 0; + } + + mgb4_write_reg(video, vindev->config->regs.timer, timer); + } + + return vidioc_g_parm(file, priv, parm); +} + static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { @@ -592,8 +697,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, - .vidioc_g_parm = vidioc_parm, - .vidioc_s_parm = vidioc_parm, + .vidioc_g_parm = vidioc_g_parm, + .vidioc_s_parm = vidioc_s_parm, .vidioc_dv_timings_cap = vidioc_dv_timings_cap, .vidioc_enum_dv_timings = vidioc_enum_dv_timings, .vidioc_g_dv_timings = vidioc_g_dv_timings, @@ -776,10 +881,16 @@ static void debugfs_init(struct mgb4_vin_dev *vindev) vindev->regs[7].offset = vindev->config->regs.signal2; vindev->regs[8].name = "PADDING_PIXELS"; vindev->regs[8].offset = vindev->config->regs.padding; + if (has_timeperframe(video)) { + vindev->regs[9].name = "TIMER"; + vindev->regs[9].offset = vindev->config->regs.timer; + vindev->regset.nregs = 10; + } else { + vindev->regset.nregs = 9; + } vindev->regset.base = video->membase; vindev->regset.regs = vindev->regs; - vindev->regset.nregs = ARRAY_SIZE(vindev->regs); debugfs_create_regset32("registers", 0444, vindev->debugfs, &vindev->regset); diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h index 0249b400ad4d72..9693bd0ce18060 100644 --- a/drivers/media/pci/mgb4/mgb4_vin.h +++ b/drivers/media/pci/mgb4/mgb4_vin.h @@ -25,6 +25,7 @@ struct mgb4_vin_regs { u32 signal; u32 signal2; u32 padding; + u32 timer; }; struct mgb4_vin_config { @@ -59,7 +60,7 @@ struct mgb4_vin_dev { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; struct debugfs_regset32 regset; - struct debugfs_reg32 regs[9]; + struct debugfs_reg32 regs[sizeof(struct mgb4_vin_regs) / 4]; #endif }; diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c index 241353ee77a5b4..998edcbd972387 100644 --- a/drivers/media/pci/mgb4/mgb4_vout.c +++ b/drivers/media/pci/mgb4/mgb4_vout.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "mgb4_core.h" #include "mgb4_dma.h" #include "mgb4_sysfs.h" @@ -23,12 +24,16 @@ #include "mgb4_cmt.h" #include "mgb4_vout.h" +#define DEFAULT_WIDTH 1280 +#define DEFAULT_HEIGHT 640 +#define DEFAULT_PERIOD (MGB4_HW_FREQ / 60) + ATTRIBUTE_GROUPS(mgb4_fpdl3_out); ATTRIBUTE_GROUPS(mgb4_gmsl_out); static const struct mgb4_vout_config vout_cfg[] = { - {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7c}}, - {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8c, 0x90, 0x9c}} + {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7C, 0xE0}}, + {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8C, 0x90, 0x9C, 0xE4}} }; static const struct i2c_board_info fpdl3_ser_info[] = { @@ -40,6 +45,49 @@ static const struct mgb4_i2c_kv fpdl3_i2c[] = { {0x05, 0xFF, 0x04}, {0x06, 0xFF, 0x01}, {0xC2, 0xFF, 0x80} }; +static const struct v4l2_dv_timings_cap video_timings_cap = { + .type = V4L2_DV_BT_656_1120, + .bt = { + .min_width = 320, + .max_width = 4096, + .min_height = 240, + .max_height = 2160, + .min_pixelclock = 1843200, /* 320 x 240 x 24Hz */ + .max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */ + .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, + .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | + V4L2_DV_BT_CAP_CUSTOM, + }, +}; + +static void get_timings(struct mgb4_vout_dev *voutdev, + struct v4l2_dv_timings *timings) +{ + struct mgb4_regs *video = &voutdev->mgbdev->video; + const struct mgb4_vout_regs *regs = &voutdev->config->regs; + + u32 hsync = mgb4_read_reg(video, regs->hsync); + u32 vsync = mgb4_read_reg(video, regs->vsync); + u32 resolution = mgb4_read_reg(video, regs->resolution); + + memset(timings, 0, sizeof(*timings)); + timings->type = V4L2_DV_BT_656_1120; + timings->bt.width = resolution >> 16; + timings->bt.height = resolution & 0xFFFF; + if (hsync & (1U << 31)) + timings->bt.polarities |= V4L2_DV_HSYNC_POS_POL; + if (vsync & (1U << 31)) + timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL; + timings->bt.pixelclock = voutdev->freq * 1000; + timings->bt.hsync = (hsync & 0x00FF0000) >> 16; + timings->bt.vsync = (vsync & 0x00FF0000) >> 16; + timings->bt.hbackporch = (hsync & 0x0000FF00) >> 8; + timings->bt.hfrontporch = hsync & 0x000000FF; + timings->bt.vbackporch = (vsync & 0x0000FF00) >> 8; + timings->bt.vfrontporch = vsync & 0x000000FF; +} + static void return_all_buffers(struct mgb4_vout_dev *voutdev, enum vb2_buffer_state state) { @@ -59,7 +107,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers, struct device *alloc_devs[]) { struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q); - unsigned int size; + struct mgb4_regs *video = &voutdev->mgbdev->video; + u32 config = mgb4_read_reg(video, voutdev->config->regs.config); + u32 pixelsize = (config & (1U << 16)) ? 2 : 4; + unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height + * pixelsize; /* * If I/O reconfiguration is in process, do not allow to start @@ -69,8 +121,6 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers, if (test_bit(0, &voutdev->mgbdev->io_reconfig)) return -EBUSY; - size = (voutdev->width + voutdev->padding) * voutdev->height * 4; - if (*nplanes) return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; @@ -93,9 +143,11 @@ static int buffer_prepare(struct vb2_buffer *vb) { struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vb->vb2_queue); struct device *dev = &voutdev->mgbdev->pdev->dev; - unsigned int size; - - size = (voutdev->width + voutdev->padding) * voutdev->height * 4; + struct mgb4_regs *video = &voutdev->mgbdev->video; + u32 config = mgb4_read_reg(video, voutdev->config->regs.config); + u32 pixelsize = (config & (1U << 16)) ? 2 : 4; + unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height + * pixelsize; if (vb2_plane_size(vb, 0) < size) { dev_err(dev, "buffer too small (%lu < %u)\n", @@ -194,24 +246,47 @@ static int vidioc_querycap(struct file *file, void *priv, static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { - if (f->index != 0) - return -EINVAL; - - f->pixelformat = V4L2_PIX_FMT_ABGR32; + struct mgb4_vin_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; - return 0; + if (f->index == 0) { + f->pixelformat = V4L2_PIX_FMT_ABGR32; + return 0; + } else if (f->index == 1 && has_yuv(video)) { + f->pixelformat = V4L2_PIX_FMT_YUYV; + return 0; + } else { + return -EINVAL; + } } static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vout_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; + u32 config = mgb4_read_reg(video, voutdev->config->regs.config); - f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; f->fmt.pix.width = voutdev->width; f->fmt.pix.height = voutdev->height; f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4; + + if (config & (1U << 16)) { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; + if (config & (1U << 20)) { + f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + } else { + if (config & (1U << 19)) + f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + else + f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + } + f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 2; + } else { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4; + } + f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; return 0; @@ -220,14 +295,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vout_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; + u32 pixelsize; - f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; f->fmt.pix.width = voutdev->width; f->fmt.pix.height = voutdev->height; f->fmt.pix.field = V4L2_FIELD_NONE; - f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; - f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4, - ALIGN_DOWN(f->fmt.pix.bytesperline, 4)); + + if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) { + pixelsize = 2; + if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 || + f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M)) + f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + } else { + pixelsize = 4; + f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32; + f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; + } + + if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize && + f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2) + f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline, + pixelsize); + else + f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize; f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; return 0; @@ -237,13 +328,39 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct mgb4_vout_dev *voutdev = video_drvdata(file); struct mgb4_regs *video = &voutdev->mgbdev->video; + u32 config, pixelsize; + int ret; if (vb2_is_busy(&voutdev->queue)) return -EBUSY; - vidioc_try_fmt(file, priv, f); + ret = vidioc_try_fmt(file, priv, f); + if (ret < 0) + return ret; + + config = mgb4_read_reg(video, voutdev->config->regs.config); + if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) { + pixelsize = 2; + config |= 1U << 16; + + if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) { + config |= 1U << 20; + config |= 1U << 19; + } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) { + config &= ~(1U << 20); + config |= 1U << 19; + } else { + config &= ~(1U << 20); + config &= ~(1U << 19); + } + } else { + pixelsize = 4; + config &= ~(1U << 16); + } + mgb4_write_reg(video, voutdev->config->regs.config, config); - voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4; + voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width + * pixelsize)) / pixelsize; mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding); return 0; @@ -267,11 +384,128 @@ static int vidioc_enum_output(struct file *file, void *priv, return -EINVAL; out->type = V4L2_OUTPUT_TYPE_ANALOG; + out->capabilities = V4L2_OUT_CAP_DV_TIMINGS; strscpy(out->name, "MGB4", sizeof(out->name)); return 0; } +static int vidioc_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *ival) +{ + struct mgb4_vout_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; + struct v4l2_dv_timings timings; + + if (ival->index != 0) + return -EINVAL; + if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 || + ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV)))) + return -EINVAL; + if (ival->width != voutdev->width || ival->height != voutdev->height) + return -EINVAL; + + get_timings(voutdev, &timings); + + ival->type = V4L2_FRMIVAL_TYPE_STEPWISE; + ival->stepwise.max.denominator = MGB4_HW_FREQ; + ival->stepwise.max.numerator = 0xFFFFFFFF; + ival->stepwise.min.denominator = timings.bt.pixelclock; + ival->stepwise.min.numerator = pixel_size(&timings); + ival->stepwise.step.denominator = MGB4_HW_FREQ; + ival->stepwise.step.numerator = 1; + + return 0; +} + +static int vidioc_g_parm(struct file *file, void *priv, + struct v4l2_streamparm *parm) +{ + struct mgb4_vout_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; + struct v4l2_fract *tpf = &parm->parm.output.timeperframe; + struct v4l2_dv_timings timings; + u32 timer; + + parm->parm.output.writebuffers = 2; + + if (has_timeperframe(video)) { + timer = mgb4_read_reg(video, voutdev->config->regs.timer); + if (timer < 0xFFFF) { + get_timings(voutdev, &timings); + tpf->numerator = pixel_size(&timings); + tpf->denominator = timings.bt.pixelclock; + } else { + tpf->numerator = timer; + tpf->denominator = MGB4_HW_FREQ; + } + + parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + } + + return 0; +} + +static int vidioc_s_parm(struct file *file, void *priv, + struct v4l2_streamparm *parm) +{ + struct mgb4_vout_dev *voutdev = video_drvdata(file); + struct mgb4_regs *video = &voutdev->mgbdev->video; + struct v4l2_fract *tpf = &parm->parm.output.timeperframe; + struct v4l2_dv_timings timings; + u32 timer, period; + + if (has_timeperframe(video)) { + timer = tpf->denominator ? + MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0; + if (timer) { + get_timings(voutdev, &timings); + period = MGB4_PERIOD(pixel_size(&timings), + timings.bt.pixelclock); + if (timer < period) + timer = 0; + } + + mgb4_write_reg(video, voutdev->config->regs.timer, timer); + } + + return vidioc_g_parm(file, priv, parm); +} + +static int vidioc_g_dv_timings(struct file *file, void *fh, + struct v4l2_dv_timings *timings) +{ + struct mgb4_vout_dev *voutdev = video_drvdata(file); + + get_timings(voutdev, timings); + + return 0; +} + +static int vidioc_s_dv_timings(struct file *file, void *fh, + struct v4l2_dv_timings *timings) +{ + struct mgb4_vout_dev *voutdev = video_drvdata(file); + + get_timings(voutdev, timings); + + return 0; +} + +static int vidioc_enum_dv_timings(struct file *file, void *fh, + struct v4l2_enum_dv_timings *timings) +{ + return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL); +} + +static int vidioc_dv_timings_cap(struct file *file, void *fh, + struct v4l2_dv_timings_cap *cap) +{ + *cap = video_timings_cap; + + return 0; +} + static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt, @@ -279,8 +513,15 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_s_fmt_vid_out = vidioc_s_fmt, .vidioc_g_fmt_vid_out = vidioc_g_fmt, .vidioc_enum_output = vidioc_enum_output, + .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_g_output = vidioc_g_output, .vidioc_s_output = vidioc_s_output, + .vidioc_g_parm = vidioc_g_parm, + .vidioc_s_parm = vidioc_s_parm, + .vidioc_dv_timings_cap = vidioc_dv_timings_cap, + .vidioc_enum_dv_timings = vidioc_enum_dv_timings, + .vidioc_g_dv_timings = vidioc_g_dv_timings, + .vidioc_s_dv_timings = vidioc_s_dv_timings, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, @@ -423,13 +664,13 @@ static void fpga_init(struct mgb4_vout_dev *voutdev) mgb4_write_reg(video, regs->config, 0x00000011); mgb4_write_reg(video, regs->resolution, - (MGB4_DEFAULT_WIDTH << 16) | MGB4_DEFAULT_HEIGHT); - mgb4_write_reg(video, regs->hsync, 0x00102020); - mgb4_write_reg(video, regs->vsync, 0x40020202); - mgb4_write_reg(video, regs->frame_period, MGB4_DEFAULT_PERIOD); + (DEFAULT_WIDTH << 16) | DEFAULT_HEIGHT); + mgb4_write_reg(video, regs->hsync, 0x00283232); + mgb4_write_reg(video, regs->vsync, 0x40141F1E); + mgb4_write_reg(video, regs->frame_limit, DEFAULT_PERIOD); mgb4_write_reg(video, regs->padding, 0x00000000); - voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 70000 >> 1) << 1; + voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 61150 >> 1) << 1; mgb4_write_reg(video, regs->config, (voutdev->config->id + MGB4_VIN_DEVICES) << 2 | 1 << 4); @@ -455,14 +696,20 @@ static void debugfs_init(struct mgb4_vout_dev *voutdev) voutdev->regs[3].offset = voutdev->config->regs.hsync; voutdev->regs[4].name = "VIDEO_PARAMS_2"; voutdev->regs[4].offset = voutdev->config->regs.vsync; - voutdev->regs[5].name = "FRAME_PERIOD"; - voutdev->regs[5].offset = voutdev->config->regs.frame_period; - voutdev->regs[6].name = "PADDING"; + voutdev->regs[5].name = "FRAME_LIMIT"; + voutdev->regs[5].offset = voutdev->config->regs.frame_limit; + voutdev->regs[6].name = "PADDING_PIXELS"; voutdev->regs[6].offset = voutdev->config->regs.padding; + if (has_timeperframe(video)) { + voutdev->regs[7].name = "TIMER"; + voutdev->regs[7].offset = voutdev->config->regs.timer; + voutdev->regset.nregs = 8; + } else { + voutdev->regset.nregs = 7; + } voutdev->regset.base = video->membase; voutdev->regset.regs = voutdev->regs; - voutdev->regset.nregs = ARRAY_SIZE(voutdev->regs); debugfs_create_regset32("registers", 0444, voutdev->debugfs, &voutdev->regset); diff --git a/drivers/media/pci/mgb4/mgb4_vout.h b/drivers/media/pci/mgb4/mgb4_vout.h index b163dee711fde0..adc8fe1e7ae68b 100644 --- a/drivers/media/pci/mgb4/mgb4_vout.h +++ b/drivers/media/pci/mgb4/mgb4_vout.h @@ -19,10 +19,11 @@ struct mgb4_vout_regs { u32 config; u32 status; u32 resolution; - u32 frame_period; + u32 frame_limit; u32 hsync; u32 vsync; u32 padding; + u32 timer; }; struct mgb4_vout_config { @@ -55,7 +56,7 @@ struct mgb4_vout_dev { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; struct debugfs_regset32 regset; - struct debugfs_reg32 regs[7]; + struct debugfs_reg32 regs[sizeof(struct mgb4_vout_regs) / 4]; #endif }; diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c index ca70a864a3ef15..5f100e5e03d98a 100644 --- a/drivers/media/pci/solo6x10/solo6x10-p2m.c +++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c @@ -57,7 +57,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev, int desc_cnt) { struct solo_p2m_dev *p2m_dev; - unsigned int timeout; + unsigned long time_left; unsigned int config = 0; int ret = 0; unsigned int p2m_id = 0; @@ -99,12 +99,12 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev, desc[1].ctrl); } - timeout = wait_for_completion_timeout(&p2m_dev->completion, - solo_dev->p2m_jiffies); + time_left = wait_for_completion_timeout(&p2m_dev->completion, + solo_dev->p2m_jiffies); if (WARN_ON_ONCE(p2m_dev->error)) ret = -EIO; - else if (timeout == 0) { + else if (time_left == 0) { solo_dev->p2m_timeouts++; ret = -EAGAIN; } diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c index da61f9beb6b4f5..73606cee586ede 100644 --- a/drivers/media/platform/allegro-dvt/allegro-core.c +++ b/drivers/media/platform/allegro-dvt/allegro-core.c @@ -179,7 +179,7 @@ struct allegro_dev { struct list_head channels; }; -static struct regmap_config allegro_regmap_config = { +static const struct regmap_config allegro_regmap_config = { .name = "regmap", .reg_bits = 32, .val_bits = 32, @@ -188,7 +188,7 @@ static struct regmap_config allegro_regmap_config = { .cache_type = REGCACHE_NONE, }; -static struct regmap_config allegro_sram_config = { +static const struct regmap_config allegro_sram_config = { .name = "sram", .reg_bits = 32, .val_bits = 32, @@ -1415,11 +1415,11 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev, static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev, unsigned long timeout_ms) { - unsigned long tmo; + unsigned long time_left; - tmo = wait_for_completion_timeout(&dev->init_complete, - msecs_to_jiffies(timeout_ms)); - if (tmo == 0) + time_left = wait_for_completion_timeout(&dev->init_complete, + msecs_to_jiffies(timeout_ms)); + if (time_left == 0) return -ETIMEDOUT; reinit_completion(&dev->init_complete); @@ -2481,14 +2481,14 @@ static void allegro_mcu_interrupt(struct allegro_dev *dev) static void allegro_destroy_channel(struct allegro_channel *channel) { struct allegro_dev *dev = channel->dev; - unsigned long timeout; + unsigned long time_left; if (channel_exists(channel)) { reinit_completion(&channel->completion); allegro_mcu_send_destroy_channel(dev, channel); - timeout = wait_for_completion_timeout(&channel->completion, - msecs_to_jiffies(5000)); - if (timeout == 0) + time_left = wait_for_completion_timeout(&channel->completion, + msecs_to_jiffies(5000)); + if (time_left == 0) v4l2_warn(&dev->v4l2_dev, "channel %d: timeout while destroying\n", channel->mcu_channel_id); @@ -2544,7 +2544,7 @@ static void allegro_destroy_channel(struct allegro_channel *channel) static int allegro_create_channel(struct allegro_channel *channel) { struct allegro_dev *dev = channel->dev; - unsigned long timeout; + unsigned long time_left; if (channel_exists(channel)) { v4l2_warn(&dev->v4l2_dev, @@ -2595,9 +2595,9 @@ static int allegro_create_channel(struct allegro_channel *channel) reinit_completion(&channel->completion); allegro_mcu_send_create_channel(dev, channel); - timeout = wait_for_completion_timeout(&channel->completion, - msecs_to_jiffies(5000)); - if (timeout == 0) + time_left = wait_for_completion_timeout(&channel->completion, + msecs_to_jiffies(5000)); + if (time_left == 0) channel->error = -ETIMEDOUT; if (channel->error) goto err; diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index c1108df72dd516..5c823d3f9cc0c9 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -242,7 +242,7 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id) #define WAIT_ISI_DISABLE 0 static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset) { - unsigned long timeout; + unsigned long time_left; /* * The reset or disable will only succeed if we have a * pixel clock from the camera. @@ -257,9 +257,9 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset) isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); } - timeout = wait_for_completion_timeout(&isi->complete, - msecs_to_jiffies(500)); - if (timeout == 0) + time_left = wait_for_completion_timeout(&isi->complete, + msecs_to_jiffies(500)); + if (time_left == 0) return -ETIMEDOUT; return 0; diff --git a/drivers/media/platform/chips-media/coda/coda-bit.c b/drivers/media/platform/chips-media/coda/coda-bit.c index ed47d5bd8d61e9..84ded154adfe37 100644 --- a/drivers/media/platform/chips-media/coda/coda-bit.c +++ b/drivers/media/platform/chips-media/coda/coda-bit.c @@ -585,7 +585,7 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx, if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) { /* worst case slice size */ - size = (DIV_ROUND_UP(q_data->rect.width, 16) * + size = (unsigned long)(DIV_ROUND_UP(q_data->rect.width, 16) * DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512; ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size, "slicebuf"); diff --git a/drivers/media/platform/chips-media/coda/coda-jpeg.c b/drivers/media/platform/chips-media/coda/coda-jpeg.c index ba8f410029172d..5746892658b12a 100644 --- a/drivers/media/platform/chips-media/coda/coda-jpeg.c +++ b/drivers/media/platform/chips-media/coda/coda-jpeg.c @@ -5,7 +5,7 @@ * Copyright (C) 2014 Philipp Zabel, Pengutronix */ -#include +#include #include #include #include diff --git a/drivers/media/platform/imagination/Kconfig b/drivers/media/platform/imagination/Kconfig index 7139ae22219b43..a302c955483dca 100644 --- a/drivers/media/platform/imagination/Kconfig +++ b/drivers/media/platform/imagination/Kconfig @@ -2,6 +2,7 @@ config VIDEO_E5010_JPEG_ENC tristate "Imagination E5010 JPEG Encoder Driver" depends on VIDEO_DEV + depends on ARCH_K3 || COMPILE_TEST select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_VMALLOC select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c index 11ca2c2fbaade8..e62c1c18758bb1 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c @@ -595,7 +595,7 @@ static void mtk_init_vdec_params(struct mtk_vcodec_dec_ctx *ctx) } } -static struct vb2_ops mtk_vdec_frame_vb2_ops = { +static const struct vb2_ops mtk_vdec_frame_vb2_ops = { .queue_setup = vb2ops_vdec_queue_setup, .buf_prepare = vb2ops_vdec_buf_prepare, .wait_prepare = vb2_ops_wait_prepare, diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c index b903e39fee8928..3307dc15fc1dfe 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c @@ -854,7 +854,7 @@ static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb) return 0; } -static struct vb2_ops mtk_vdec_request_vb2_ops = { +static const struct vb2_ops mtk_vdec_request_vb2_ops = { .queue_setup = vb2ops_vdec_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c index 73d5cef33b2abf..1e1b32faac77bc 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c @@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, return vpu_dec_reset(vpu); fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr; mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p", inst->num_nalu, y_fb_dma, c_fb_dma, fb); diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c index 2d4611e7fa0b2f..1ed0ccec56655e 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c @@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs return vpu_dec_reset(vpu); fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr; mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx", inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma); diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c index e27e728f392e6c..232ef3bd246a3d 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c @@ -334,14 +334,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); - dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + y_fb_dma = fb->base_y.dma_addr; if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) c_fb_dma = y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; else - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + c_fb_dma = fb->base_c.dma_addr; inst->vsi->dec.bs_dma = (u64)bs->dma_addr; inst->vsi->dec.bs_sz = bs->size; diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c index f3a5cbacadbe09..28e56f6a695da2 100644 --- a/drivers/media/platform/microchip/microchip-isc-base.c +++ b/drivers/media/platform/microchip/microchip-isc-base.c @@ -902,8 +902,11 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f) return 0; } -static int isc_validate(struct isc_device *isc) +static int isc_link_validate(struct media_link *link) { + struct video_device *vdev = + media_entity_to_video_device(link->sink->entity); + struct isc_device *isc = video_get_drvdata(vdev); int ret; int i; struct isc_format *sd_fmt = NULL; @@ -1906,20 +1909,6 @@ int microchip_isc_pipeline_init(struct isc_device *isc) } EXPORT_SYMBOL_GPL(microchip_isc_pipeline_init); -static int isc_link_validate(struct media_link *link) -{ - struct video_device *vdev = - media_entity_to_video_device(link->sink->entity); - struct isc_device *isc = video_get_drvdata(vdev); - int ret; - - ret = v4l2_subdev_link_validate(link); - if (ret) - return ret; - - return isc_validate(isc); -} - static const struct media_entity_operations isc_entity_operations = { .link_validate = isc_link_validate, }; diff --git a/drivers/media/platform/microchip/microchip-sama5d2-isc.c b/drivers/media/platform/microchip/microchip-sama5d2-isc.c index 5ac149cf3647f1..60b6d922d764eb 100644 --- a/drivers/media/platform/microchip/microchip-sama5d2-isc.c +++ b/drivers/media/platform/microchip/microchip-sama5d2-isc.c @@ -353,33 +353,29 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = { static int isc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; - struct device_node *epn = NULL; + struct device_node *epn; struct isc_subdev_entity *subdev_entity; unsigned int flags; - int ret; INIT_LIST_HEAD(&isc->subdev_entities); - while (1) { + for_each_endpoint_of_node(np, epn) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; - - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - return 0; + int ret; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { - ret = -EINVAL; + of_node_put(epn); dev_err(dev, "Could not parse the endpoint\n"); - break; + return -EINVAL; } subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity), GFP_KERNEL); if (!subdev_entity) { - ret = -ENOMEM; - break; + of_node_put(epn); + return -ENOMEM; } subdev_entity->epn = epn; @@ -400,9 +396,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc) list_add_tail(&subdev_entity->list, &isc->subdev_entities); } - of_node_put(epn); - return ret; + return 0; } static int microchip_isc_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/microchip/microchip-sama7g5-isc.c b/drivers/media/platform/microchip/microchip-sama7g5-isc.c index 73445f33d26ba3..e97abe3e35af0e 100644 --- a/drivers/media/platform/microchip/microchip-sama7g5-isc.c +++ b/drivers/media/platform/microchip/microchip-sama7g5-isc.c @@ -336,36 +336,32 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = { static int xisc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; - struct device_node *epn = NULL; + struct device_node *epn; struct isc_subdev_entity *subdev_entity; unsigned int flags; - int ret; bool mipi_mode; INIT_LIST_HEAD(&isc->subdev_entities); mipi_mode = of_property_read_bool(np, "microchip,mipi-mode"); - while (1) { + for_each_endpoint_of_node(np, epn) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; - - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - return 0; + int ret; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { - ret = -EINVAL; + of_node_put(epn); dev_err(dev, "Could not parse the endpoint\n"); - break; + return -EINVAL; } subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity), GFP_KERNEL); if (!subdev_entity) { - ret = -ENOMEM; - break; + of_node_put(epn); + return -ENOMEM; } subdev_entity->epn = epn; @@ -389,9 +385,8 @@ static int xisc_parse_dt(struct device *dev, struct isc_device *isc) list_add_tail(&subdev_entity->list, &isc->subdev_entities); } - of_node_put(epn); - return ret; + return 0; } static int microchip_xisc_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c index d8812fc06c676b..0e56a4331b0d70 100644 --- a/drivers/media/platform/nvidia/tegra-vde/h264.c +++ b/drivers/media/platform/nvidia/tegra-vde/h264.c @@ -623,14 +623,14 @@ static int tegra_vde_decode_end(struct tegra_vde *vde) unsigned int read_bytes, macroblocks_nb; struct device *dev = vde->dev; dma_addr_t bsev_ptr; - long timeout; + long time_left; int ret; - timeout = wait_for_completion_interruptible_timeout( + time_left = wait_for_completion_interruptible_timeout( &vde->decode_completion, msecs_to_jiffies(1000)); - if (timeout < 0) { - ret = timeout; - } else if (timeout == 0) { + if (time_left < 0) { + ret = time_left; + } else if (time_left == 0) { bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10); macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF; read_bytes = bsev_ptr ? bsev_ptr - vde->bitstream_data_addr : 0; diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c index b9729a8883d6ba..44e1402e8be198 100644 --- a/drivers/media/platform/nxp/imx-mipi-csis.c +++ b/drivers/media/platform/nxp/imx-mipi-csis.c @@ -861,18 +861,21 @@ static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_error { unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 8; + unsigned int counters[MIPI_CSIS_NUM_EVENTS]; unsigned long flags; unsigned int i; spin_lock_irqsave(&csis->slock, flags); + for (i = 0; i < num_events; ++i) + counters[i] = csis->events[i].counter; + spin_unlock_irqrestore(&csis->slock, flags); for (i = 0; i < num_events; ++i) { - if (csis->events[i].counter > 0 || csis->debug.enable) + if (counters[i] > 0 || csis->debug.enable) dev_info(csis->dev, "%s events: %d\n", csis->events[i].name, - csis->events[i].counter); + counters[i]); } - spin_unlock_irqrestore(&csis->slock, flags); } static int mipi_csis_dump_regs(struct mipi_csis_device *csis) @@ -1344,7 +1347,7 @@ static int mipi_csis_async_register(struct mipi_csis_device *csis) * Suspend/resume */ -static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev) +static int mipi_csis_runtime_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd); @@ -1359,7 +1362,7 @@ static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused mipi_csis_runtime_resume(struct device *dev) +static int mipi_csis_runtime_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd); @@ -1379,8 +1382,8 @@ static int __maybe_unused mipi_csis_runtime_resume(struct device *dev) } static const struct dev_pm_ops mipi_csis_pm_ops = { - SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume, - NULL) + RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume, + NULL) }; /* ----------------------------------------------------------------------------- @@ -1571,7 +1574,7 @@ static struct platform_driver mipi_csis_driver = { .driver = { .of_match_table = mipi_csis_of_match, .name = CSIS_DRIVER_NAME, - .pm = &mipi_csis_pm_ops, + .pm = pm_ptr(&mipi_csis_pm_ops), }, }; diff --git a/drivers/media/platform/nxp/imx-pxp.h b/drivers/media/platform/nxp/imx-pxp.h index 44f95c749d2ea6..476f2042fa6f32 100644 --- a/drivers/media/platform/nxp/imx-pxp.h +++ b/drivers/media/platform/nxp/imx-pxp.h @@ -594,12 +594,17 @@ (((v) << 18) & BM_PXP_CSC1_COEF0_C0) #define BP_PXP_CSC1_COEF0_UV_OFFSET 9 #define BM_PXP_CSC1_COEF0_UV_OFFSET 0x0003FE00 + +/* + * We use v * (1 << 9) instead of v << 9, to workaround a gcc5 bug. + * The compiler cannot understand that the expression is constant. + */ #define BF_PXP_CSC1_COEF0_UV_OFFSET(v) \ - (((v) << 9) & BM_PXP_CSC1_COEF0_UV_OFFSET) + (((v) * (1 << 9)) & BM_PXP_CSC1_COEF0_UV_OFFSET) #define BP_PXP_CSC1_COEF0_Y_OFFSET 0 #define BM_PXP_CSC1_COEF0_Y_OFFSET 0x000001FF #define BF_PXP_CSC1_COEF0_Y_OFFSET(v) \ - (((v) << 0) & BM_PXP_CSC1_COEF0_Y_OFFSET) + ((v) & BM_PXP_CSC1_COEF0_Y_OFFSET) #define HW_PXP_CSC1_COEF1 (0x000001b0) diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c index ba2e81f24965d8..d4a6c553296990 100644 --- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c +++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c @@ -693,7 +693,7 @@ static int imx8mq_mipi_csi_pm_resume(struct device *dev) return ret ? -EAGAIN : 0; } -static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev) +static int imx8mq_mipi_csi_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); @@ -705,7 +705,7 @@ static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev) return 0; } -static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev) +static int imx8mq_mipi_csi_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); @@ -716,7 +716,7 @@ static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev) return imx8mq_mipi_csi_pm_resume(dev); } -static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev) +static int imx8mq_mipi_csi_runtime_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); @@ -731,7 +731,7 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev) return ret; } -static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev) +static int imx8mq_mipi_csi_runtime_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct csi_state *state = mipi_sd_to_csi2_state(sd); @@ -747,10 +747,9 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = { - SET_RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend, - imx8mq_mipi_csi_runtime_resume, - NULL) - SET_SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume) + RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend, + imx8mq_mipi_csi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume) }; /* ----------------------------------------------------------------------------- @@ -958,7 +957,7 @@ static struct platform_driver imx8mq_mipi_csi_driver = { .driver = { .of_match_table = imx8mq_mipi_csi_of_match, .name = MIPI_CSI2_DRIVER_NAME, - .pm = &imx8mq_mipi_csi_pm_ops, + .pm = pm_ptr(&imx8mq_mipi_csi_pm_ops), }, }; diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index cd72feca618ca4..3b8fc31d957c77 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -297,12 +297,6 @@ static void video_stop_streaming(struct vb2_queue *q) ret = v4l2_subdev_call(subdev, video, s_stream, 0); - if (entity->use_count > 1) { - /* Don't stop if other instances of the pipeline are still running */ - dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n"); - return; - } - if (ret) { dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret); return; diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index 51b1d3550421a4..d64985ca6e884f 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -2283,6 +2283,8 @@ static int camss_probe(struct platform_device *pdev) v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); + pm_runtime_enable(dev); + num_subdevs = camss_of_parse_ports(camss); if (num_subdevs < 0) { ret = num_subdevs; @@ -2323,8 +2325,6 @@ static int camss_probe(struct platform_device *pdev) } } - pm_runtime_enable(dev); - return 0; err_register_subdevs: @@ -2332,6 +2332,7 @@ static int camss_probe(struct platform_device *pdev) err_v4l2_device_unregister: v4l2_device_unregister(&camss->v4l2_dev); v4l2_async_nf_cleanup(&camss->notifier); + pm_runtime_disable(dev); err_genpd_cleanup: camss_genpd_cleanup(camss); diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 165c947a670354..84e95a46dfc983 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -430,6 +430,7 @@ static void venus_remove(struct platform_device *pdev) struct device *dev = core->dev; int ret; + cancel_delayed_work_sync(&core->work); ret = pm_runtime_get_sync(dev); WARN_ON(ret < 0); diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index fe7da2b3048299..66a18830e66dac 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -316,10 +316,10 @@ int venus_firmware_init(struct venus_core *core) core->fw.dev = &pdev->dev; - iommu_dom = iommu_domain_alloc(&platform_bus_type); - if (!iommu_dom) { + iommu_dom = iommu_paging_domain_alloc(core->fw.dev); + if (IS_ERR(iommu_dom)) { dev_err(core->fw.dev, "Failed to allocate iommu domain\n"); - ret = -ENOMEM; + ret = PTR_ERR(iommu_dom); goto err_unregister; } diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c index 3418d2dd937116..3ae063094e3e1f 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -156,7 +156,7 @@ void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt) pkt->hdr.size = sizeof(*pkt); pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY; pkt->num_properties = 1; - pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION; + pkt->data = HFI_PROPERTY_SYS_IMAGE_VERSION; } int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie, @@ -331,7 +331,7 @@ int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie, pkt->alloc_len = out_frame->alloc_len; pkt->filled_len = out_frame->filled_len; pkt->offset = out_frame->offset; - pkt->data[0] = out_frame->extradata_size; + pkt->data = out_frame->extradata_size; return 0; } @@ -402,7 +402,7 @@ static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt, pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY; pkt->shdr.session_id = hash32_ptr(cookie); pkt->num_properties = 1; - pkt->data[0] = ptype; + pkt->data = ptype; return 0; } @@ -1110,7 +1110,7 @@ pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt, switch (ptype) { case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: - pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY; + pkt->data = HFI_PROPERTY_CONFIG_VDEC_ENTROPY; break; default: ret = pkt_session_get_property_1x(pkt, cookie, ptype); diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h index 20acd412ee7b91..a83125bc17aab9 100644 --- a/drivers/media/platform/qcom/venus/hfi_cmds.h +++ b/drivers/media/platform/qcom/venus/hfi_cmds.h @@ -74,7 +74,7 @@ struct hfi_sys_set_property_pkt { struct hfi_sys_get_property_pkt { struct hfi_pkt_hdr hdr; u32 num_properties; - u32 data[1]; + u32 data; }; struct hfi_sys_set_buffers_pkt { @@ -82,7 +82,7 @@ struct hfi_sys_set_buffers_pkt { u32 buffer_type; u32 buffer_size; u32 num_buffers; - u32 buffer_addr[1]; + u32 buffer_addr[]; }; struct hfi_sys_ping_pkt { @@ -151,7 +151,7 @@ struct hfi_session_empty_buffer_compressed_pkt { u32 input_tag; u32 packet_buffer; u32 extradata_buffer; - u32 data[1]; + u32 data; }; struct hfi_session_empty_buffer_uncompressed_plane0_pkt { @@ -168,7 +168,7 @@ struct hfi_session_empty_buffer_uncompressed_plane0_pkt { u32 input_tag; u32 packet_buffer; u32 extradata_buffer; - u32 data[1]; + u32 data; }; struct hfi_session_empty_buffer_uncompressed_plane1_pkt { @@ -177,7 +177,7 @@ struct hfi_session_empty_buffer_uncompressed_plane1_pkt { u32 filled_len; u32 offset; u32 packet_buffer2; - u32 data[1]; + u32 data; }; struct hfi_session_empty_buffer_uncompressed_plane2_pkt { @@ -186,7 +186,7 @@ struct hfi_session_empty_buffer_uncompressed_plane2_pkt { u32 filled_len; u32 offset; u32 packet_buffer3; - u32 data[1]; + u32 data; }; struct hfi_session_fill_buffer_pkt { @@ -198,7 +198,7 @@ struct hfi_session_fill_buffer_pkt { u32 output_tag; u32 packet_buffer; u32 extradata_buffer; - u32 data[1]; + u32 data; }; struct hfi_session_flush_pkt { @@ -217,7 +217,7 @@ struct hfi_session_resume_pkt { struct hfi_session_get_property_pkt { struct hfi_session_hdr_pkt shdr; u32 num_properties; - u32 data[1]; + u32 data; }; struct hfi_session_release_buffer_pkt { @@ -227,7 +227,7 @@ struct hfi_session_release_buffer_pkt { u32 extradata_size; u32 response_req; u32 num_buffers; - u32 buffer_info[1]; + u32 buffer_info[] __counted_by(num_buffers); }; struct hfi_session_release_resources_pkt { diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h index e4c05d62cfc744..f44059f19505a0 100644 --- a/drivers/media/platform/qcom/venus/hfi_helper.h +++ b/drivers/media/platform/qcom/venus/hfi_helper.h @@ -761,7 +761,7 @@ struct hfi_multi_stream_3x { struct hfi_multi_view_format { u32 views; - u32 view_order[1]; + u32 view_order[]; }; #define HFI_MULTI_SLICE_OFF 0x1 @@ -1005,13 +1005,13 @@ struct hfi_uncompressed_plane_constraints { struct hfi_uncompressed_plane_info { u32 format; u32 num_planes; - struct hfi_uncompressed_plane_constraints plane_constraints[1]; + struct hfi_uncompressed_plane_constraints plane_constraints; }; struct hfi_uncompressed_format_supported { u32 buffer_type; u32 format_entries; - struct hfi_uncompressed_plane_info plane_info[1]; + struct hfi_uncompressed_plane_info plane_info; }; struct hfi_uncompressed_plane_actual { @@ -1038,7 +1038,7 @@ struct hfi_codec_supported { struct hfi_properties_supported { u32 num_properties; - u32 properties[1]; + u32 properties[]; }; struct hfi_max_sessions_supported { @@ -1085,12 +1085,12 @@ struct hfi_resource_ocmem_requirement { struct hfi_resource_ocmem_requirement_info { u32 num_entries; - struct hfi_resource_ocmem_requirement requirements[1]; + struct hfi_resource_ocmem_requirement requirements[]; }; struct hfi_property_sys_image_version_info_type { u32 string_size; - u8 str_image_version[1]; + u8 str_image_version[]; }; struct hfi_codec_mask_supported { @@ -1141,7 +1141,7 @@ struct hfi_extradata_header { u32 port_index; u32 type; u32 data_size; - u8 data[1]; + u8 data[]; }; struct hfi_batch_info { @@ -1236,7 +1236,7 @@ static inline void hfi_bufreq_set_count_min_host(struct hfi_buffer_requirements struct hfi_data_payload { u32 size; - u8 data[1]; + u8 data[]; }; struct hfi_enable_picture { @@ -1264,12 +1264,12 @@ struct hfi_interlace_format_supported { struct hfi_buffer_alloc_mode_supported { u32 buffer_type; u32 num_entries; - u32 data[1]; + u32 data[]; }; struct hfi_mb_error_map { u32 error_map_size; - u8 error_map[1]; + u8 error_map[]; }; struct hfi_metadata_pass_through { diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index c43839539d4dda..3df241dc3a118b 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -157,7 +157,7 @@ static void parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_uncompressed_format_supported *fmt = data; - struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info; + struct hfi_uncompressed_plane_info *pinfo = &fmt->plane_info; struct hfi_uncompressed_plane_constraints *constr; struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {}; u32 entries = fmt->format_entries; diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c index f5a655973c081b..6289166786ec36 100644 --- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c +++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c @@ -1063,51 +1063,51 @@ struct enc_bufsize_ops { u32 (*persist)(void); }; -static struct dec_bufsize_ops dec_h264_ops = { +static const struct dec_bufsize_ops dec_h264_ops = { .scratch = h264d_scratch_size, .scratch1 = h264d_scratch1_size, .persist1 = h264d_persist1_size, }; -static struct dec_bufsize_ops dec_h265_ops = { +static const struct dec_bufsize_ops dec_h265_ops = { .scratch = h265d_scratch_size, .scratch1 = h265d_scratch1_size, .persist1 = h265d_persist1_size, }; -static struct dec_bufsize_ops dec_vp8_ops = { +static const struct dec_bufsize_ops dec_vp8_ops = { .scratch = vpxd_scratch_size, .scratch1 = vp8d_scratch1_size, .persist1 = vp8d_persist1_size, }; -static struct dec_bufsize_ops dec_vp9_ops = { +static const struct dec_bufsize_ops dec_vp9_ops = { .scratch = vpxd_scratch_size, .scratch1 = vp9d_scratch1_size, .persist1 = vp9d_persist1_size, }; -static struct dec_bufsize_ops dec_mpeg2_ops = { +static const struct dec_bufsize_ops dec_mpeg2_ops = { .scratch = mpeg2d_scratch_size, .scratch1 = mpeg2d_scratch1_size, .persist1 = mpeg2d_persist1_size, }; -static struct enc_bufsize_ops enc_h264_ops = { +static const struct enc_bufsize_ops enc_h264_ops = { .scratch = h264e_scratch_size, .scratch1 = h264e_scratch1_size, .scratch2 = enc_scratch2_size, .persist = enc_persist_size, }; -static struct enc_bufsize_ops enc_h265_ops = { +static const struct enc_bufsize_ops enc_h265_ops = { .scratch = h265e_scratch_size, .scratch1 = h265e_scratch1_size, .scratch2 = enc_scratch2_size, .persist = enc_persist_size, }; -static struct enc_bufsize_ops enc_vp8_ops = { +static const struct enc_bufsize_ops enc_vp8_ops = { .scratch = vp8e_scratch_size, .scratch1 = vp8e_scratch1_size, .scratch2 = enc_scratch2_size, @@ -1186,7 +1186,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype, u32 codec = params->codec; u32 width = params->width, height = params->height, out_min_count; u32 out_width = params->out_width, out_height = params->out_height; - struct dec_bufsize_ops *dec_ops; + const struct dec_bufsize_ops *dec_ops; bool is_secondary_output = params->dec.is_secondary_output; bool is_interlaced = params->dec.is_interlaced; u32 max_mbs_per_frame = params->dec.max_mbs_per_frame; @@ -1260,7 +1260,7 @@ static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype, struct hfi_buffer_requirements *bufreq) { enum hfi_version version = params->version; - struct enc_bufsize_ops *enc_ops; + const struct enc_bufsize_ops *enc_ops; u32 width = params->width; u32 height = params->height; bool is_tenbit = params->enc.is_tenbit; diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 4ce76ce6dd4d84..ea8a2bd9419e6d 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -876,7 +876,7 @@ static int vcodec_domains_get(struct venus_core *core) if (!res->vcodec_pmdomains_num) goto skip_pmdomains; - ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains); + ret = devm_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains); if (ret < 0) return ret; @@ -902,14 +902,11 @@ static int vcodec_domains_get(struct venus_core *core) return 0; opp_attach_err: - dev_pm_domain_detach_list(core->pmdomains); return ret; } static void vcodec_domains_put(struct venus_core *core) { - dev_pm_domain_detach_list(core->pmdomains); - if (!core->has_opp_table) return; diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig index 38c0f8305d620d..46765a2e4c4d15 100644 --- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig +++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig @@ -2,6 +2,7 @@ config VIDEO_RASPBERRYPI_PISP_BE tristate "Raspberry Pi PiSP Backend (BE) ISP driver" depends on V4L_PLATFORM_DRIVERS depends on VIDEO_DEV + depends on ARCH_BCM2835 || COMPILE_TEST select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER select VIDEOBUF2_DMA_CONTIG diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c index 809c3a38cc4af9..695d884a22d1b9 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c @@ -1274,16 +1274,7 @@ static const struct rvin_info rcar_info_r8a77995 = { .scaler = rvin_scaler_gen3, }; -static const struct rvin_info rcar_info_r8a779a0 = { - .model = RCAR_GEN3, - .use_mc = true, - .use_isp = true, - .nv12 = true, - .max_width = 4096, - .max_height = 4096, -}; - -static const struct rvin_info rcar_info_r8a779g0 = { +static const struct rvin_info rcar_info_gen4 = { .model = RCAR_GEN3, .use_mc = true, .use_isp = true, @@ -1354,12 +1345,18 @@ static const struct of_device_id rvin_of_id_table[] = { .data = &rcar_info_r8a77995, }, { + /* Keep to be compatible with old DTS files. */ .compatible = "renesas,vin-r8a779a0", - .data = &rcar_info_r8a779a0, + .data = &rcar_info_gen4, }, { + /* Keep to be compatible with old DTS files. */ .compatible = "renesas,vin-r8a779g0", - .data = &rcar_info_r8a779g0, + .data = &rcar_info_gen4, + }, + { + .compatible = "renesas,rcar-gen4-vin", + .data = &rcar_info_gen4, }, { /* Sentinel */ }, }; diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c index fff349e45067ed..e50fe7525a732b 100644 --- a/drivers/media/platform/renesas/rcar_jpu.c +++ b/drivers/media/platform/renesas/rcar_jpu.c @@ -14,7 +14,7 @@ * 3) V4L2_CID_JPEG_ACTIVE_MARKER */ -#include +#include #include #include #include diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c index e68fcdaea207aa..c7fdee347ac8ae 100644 --- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c +++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c @@ -865,6 +865,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = { { .compatible = "renesas,rzg2l-csi2", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table); static struct platform_driver rzg2l_csi2_pdrv = { .remove_new = rzg2l_csi2_remove, diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c index fdb46ec0c872a1..e728f9f5160e42 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_video.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c @@ -1081,6 +1081,27 @@ static const struct v4l2_file_operations vsp1_video_fops = { .mmap = vb2_fop_mmap, }; +/* ----------------------------------------------------------------------------- + * Media entity operations + */ + +static int vsp1_video_link_validate(struct media_link *link) +{ + /* + * Ideally, link validation should be implemented here instead of + * calling vsp1_video_verify_format() in vsp1_video_streamon() + * manually. That would however break userspace that start one video + * device before configures formats on other video devices in the + * pipeline. This operation is just a no-op to silence the warnings + * from v4l2_subdev_link_validate(). + */ + return 0; +} + +static const struct media_entity_operations vsp1_video_media_ops = { + .link_validate = vsp1_video_link_validate, +}; + /* ----------------------------------------------------------------------------- * Suspend and Resume */ @@ -1215,6 +1236,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, /* ... and the video node... */ video->video.v4l2_dev = &video->vsp1->v4l2_dev; + video->video.entity.ops = &vsp1_video_media_ops; video->video.fops = &vsp1_video_fops; snprintf(video->video.name, sizeof(video->video.name), "%s %s", rwpf->entity.subdev.name, direction); diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c index f956b90a407a10..60c97bb7b18b3b 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c @@ -178,3 +178,17 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, rkisp1_sd_adjust_crop_rect(crop, &crop_bounds); } + +void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern, + const u32 input[4], u32 output[4]) +{ + static const unsigned int swap[4][4] = { + [RKISP1_RAW_RGGB] = { 0, 1, 2, 3 }, + [RKISP1_RAW_GRBG] = { 1, 0, 3, 2 }, + [RKISP1_RAW_GBRG] = { 2, 3, 0, 1 }, + [RKISP1_RAW_BGGR] = { 3, 2, 1, 0 }, + }; + + for (unsigned int i = 0; i < 4; ++i) + output[i] = input[swap[pattern][i]]; +} diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index 26573f6ae57550..ca952fd0829ba7 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -33,9 +33,10 @@ struct regmap; #define RKISP1_ISP_SD_SRC BIT(0) #define RKISP1_ISP_SD_SINK BIT(1) -/* min and max values for the widths and heights of the entities */ -#define RKISP1_ISP_MAX_WIDTH 4032 -#define RKISP1_ISP_MAX_HEIGHT 3024 +/* + * Minimum values for the width and height of entities. The maximum values are + * model-specific and stored in the rkisp1_info structure. + */ #define RKISP1_ISP_MIN_WIDTH 32 #define RKISP1_ISP_MIN_HEIGHT 32 @@ -115,6 +116,8 @@ enum rkisp1_isp_pad { * @RKISP1_FEATURE_SELF_PATH: The ISP has a self path * @RKISP1_FEATURE_DUAL_CROP: The ISP has the dual crop block at the resizer input * @RKISP1_FEATURE_DMA_34BIT: The ISP uses 34-bit DMA addresses + * @RKISP1_FEATURE_BLS: The ISP has a dedicated BLS block + * @RKISP1_FEATURE_COMPAND: The ISP has a companding block * * The ISP features are stored in a bitmask in &rkisp1_info.features and allow * the driver to implement support for features present in some ISP versions @@ -126,6 +129,8 @@ enum rkisp1_feature { RKISP1_FEATURE_SELF_PATH = BIT(2), RKISP1_FEATURE_DUAL_CROP = BIT(3), RKISP1_FEATURE_DMA_34BIT = BIT(4), + RKISP1_FEATURE_BLS = BIT(5), + RKISP1_FEATURE_COMPAND = BIT(6), }; #define rkisp1_has_feature(rkisp1, feature) \ @@ -140,6 +145,8 @@ enum rkisp1_feature { * @isr_size: number of entries in the @isrs array * @isp_ver: ISP version * @features: bitmask of rkisp1_feature features implemented by the ISP + * @max_width: maximum input frame width + * @max_height: maximum input frame height * * This structure contains information about the ISP specific to a particular * ISP model, version, or integration in a particular SoC. @@ -151,6 +158,8 @@ struct rkisp1_info { unsigned int isr_size; enum rkisp1_cif_isp_version isp_ver; unsigned int features; + unsigned int max_width; + unsigned int max_height; }; /* @@ -232,7 +241,7 @@ struct rkisp1_vdev_node { /* * struct rkisp1_buffer - A container for the vb2 buffers used by the video devices: - * params, stats, mainpath, selfpath + * stats, mainpath, selfpath * * @vb: vb2 buffer * @queue: entry of the buffer in the queue @@ -244,6 +253,26 @@ struct rkisp1_buffer { dma_addr_t buff_addr[VIDEO_MAX_PLANES]; }; +/* + * struct rkisp1_params_buffer - A container for the vb2 buffers used by the + * params video device + * + * @vb: vb2 buffer + * @queue: entry of the buffer in the queue + * @cfg: scratch buffer used for caching the ISP configuration parameters + */ +struct rkisp1_params_buffer { + struct vb2_v4l2_buffer vb; + struct list_head queue; + void *cfg; +}; + +static inline struct rkisp1_params_buffer * +to_rkisp1_params_buffer(struct vb2_v4l2_buffer *vbuf) +{ + return container_of(vbuf, struct rkisp1_params_buffer, vb); +} + /* * struct rkisp1_dummy_buffer - A buffer to write the next frame to in case * there are no vb2 buffers available. @@ -372,9 +401,11 @@ struct rkisp1_params_ops { * @ops: pointer to the variant-specific operations * @config_lock: locks the buffer list 'params' * @params: queue of rkisp1_buffer - * @vdev_fmt: v4l2_format of the metadata format + * @metafmt the currently enabled metadata format * @quantization: the quantization configured on the isp's src pad + * @ycbcr_encoding the YCbCr encoding * @raw_type: the bayer pattern on the isp video sink pad + * @enabled_blocks: bitmask of enabled ISP blocks */ struct rkisp1_params { struct rkisp1_vdev_node vnode; @@ -383,11 +414,14 @@ struct rkisp1_params { spinlock_t config_lock; /* locks the buffers list 'params' */ struct list_head params; - struct v4l2_format vdev_fmt; + + const struct v4l2_meta_format *metafmt; enum v4l2_quantization quantization; enum v4l2_ycbcr_encoding ycbcr_encoding; enum rkisp1_fmt_raw_pat_type raw_type; + + u32 enabled_blocks; }; /* @@ -573,6 +607,9 @@ void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop, void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, const struct v4l2_mbus_framefmt *bounds); +void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern, + const u32 input[4], u32 output[4]); + /* * rkisp1_mbus_info_get_by_code - get the isp info of the media bus code * diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c index 4202642e052392..841e58c20f7fcb 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c @@ -307,6 +307,7 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { + struct rkisp1_csi *csi = to_rkisp1_csi(sd); const struct rkisp1_mbus_info *mbus_info; struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; @@ -326,10 +327,10 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd, sink_fmt->width = clamp_t(u32, fmt->format.width, RKISP1_ISP_MIN_WIDTH, - RKISP1_ISP_MAX_WIDTH); + csi->rkisp1->info->max_width); sink_fmt->height = clamp_t(u32, fmt->format.height, RKISP1_ISP_MIN_HEIGHT, - RKISP1_ISP_MAX_HEIGHT); + csi->rkisp1->info->max_height); fmt->format = *sink_fmt; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index bb0202386c7017..dd114ab77800f8 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -509,7 +509,10 @@ static const struct rkisp1_info px30_isp_info = { .isp_ver = RKISP1_V12, .features = RKISP1_FEATURE_MIPI_CSI2 | RKISP1_FEATURE_SELF_PATH - | RKISP1_FEATURE_DUAL_CROP, + | RKISP1_FEATURE_DUAL_CROP + | RKISP1_FEATURE_BLS, + .max_width = 3264, + .max_height = 2448, }; static const char * const rk3399_isp_clks[] = { @@ -530,7 +533,10 @@ static const struct rkisp1_info rk3399_isp_info = { .isp_ver = RKISP1_V10, .features = RKISP1_FEATURE_MIPI_CSI2 | RKISP1_FEATURE_SELF_PATH - | RKISP1_FEATURE_DUAL_CROP, + | RKISP1_FEATURE_DUAL_CROP + | RKISP1_FEATURE_BLS, + .max_width = 4416, + .max_height = 3312, }; static const char * const imx8mp_isp_clks[] = { @@ -550,7 +556,10 @@ static const struct rkisp1_info imx8mp_isp_info = { .isr_size = ARRAY_SIZE(imx8mp_isp_isrs), .isp_ver = RKISP1_V_IMX8MP, .features = RKISP1_FEATURE_MAIN_STRIDE - | RKISP1_FEATURE_DMA_34BIT, + | RKISP1_FEATURE_DMA_34BIT + | RKISP1_FEATURE_COMPAND, + .max_width = 4096, + .max_height = 3072, }; static const struct of_device_id rkisp1_of_match[] = { diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index 91301d17d356c4..d9491721182826 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -517,6 +517,7 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { + struct rkisp1_isp *isp = to_rkisp1_isp(sd); const struct rkisp1_mbus_info *mbus_info; if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS || @@ -539,9 +540,9 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd, return -EINVAL; fse->min_width = RKISP1_ISP_MIN_WIDTH; - fse->max_width = RKISP1_ISP_MAX_WIDTH; + fse->max_width = isp->rkisp1->info->max_width; fse->min_height = RKISP1_ISP_MIN_HEIGHT; - fse->max_height = RKISP1_ISP_MAX_HEIGHT; + fse->max_height = isp->rkisp1->info->max_height; return 0; } @@ -772,10 +773,10 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, sink_fmt->width = clamp_t(u32, format->width, RKISP1_ISP_MIN_WIDTH, - RKISP1_ISP_MAX_WIDTH); + isp->rkisp1->info->max_width); sink_fmt->height = clamp_t(u32, format->height, RKISP1_ISP_MIN_HEIGHT, - RKISP1_ISP_MAX_HEIGHT); + isp->rkisp1->info->max_height); /* * Adjust the color space fields. Accept any color primaries and diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index 173d1ea4187482..320581a9f866e9 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -5,6 +5,9 @@ * Copyright (C) 2017 Rockchip Electronics Co., Ltd. */ +#include +#include + #include #include #include @@ -33,6 +36,59 @@ #define RKISP1_ISP_CC_COEFF(n) \ (RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4) +#define RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS BIT(0) +#define RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC BIT(1) + +union rkisp1_ext_params_config { + struct rkisp1_ext_params_block_header header; + struct rkisp1_ext_params_bls_config bls; + struct rkisp1_ext_params_dpcc_config dpcc; + struct rkisp1_ext_params_sdg_config sdg; + struct rkisp1_ext_params_lsc_config lsc; + struct rkisp1_ext_params_awb_gain_config awbg; + struct rkisp1_ext_params_flt_config flt; + struct rkisp1_ext_params_bdm_config bdm; + struct rkisp1_ext_params_ctk_config ctk; + struct rkisp1_ext_params_goc_config goc; + struct rkisp1_ext_params_dpf_config dpf; + struct rkisp1_ext_params_dpf_strength_config dpfs; + struct rkisp1_ext_params_cproc_config cproc; + struct rkisp1_ext_params_ie_config ie; + struct rkisp1_ext_params_awb_meas_config awbm; + struct rkisp1_ext_params_hst_config hst; + struct rkisp1_ext_params_aec_config aec; + struct rkisp1_ext_params_afc_config afc; + struct rkisp1_ext_params_compand_bls_config compand_bls; + struct rkisp1_ext_params_compand_curve_config compand_curve; +}; + +enum rkisp1_params_formats { + RKISP1_PARAMS_FIXED, + RKISP1_PARAMS_EXTENSIBLE, +}; + +static const struct v4l2_meta_format rkisp1_params_formats[] = { + [RKISP1_PARAMS_FIXED] = { + .dataformat = V4L2_META_FMT_RK_ISP1_PARAMS, + .buffersize = sizeof(struct rkisp1_params_cfg), + }, + [RKISP1_PARAMS_EXTENSIBLE] = { + .dataformat = V4L2_META_FMT_RK_ISP1_EXT_PARAMS, + .buffersize = sizeof(struct rkisp1_ext_params_cfg), + }, +}; + +static const struct v4l2_meta_format * +rkisp1_params_get_format_info(u32 dataformat) +{ + for (unsigned int i = 0; i < ARRAY_SIZE(rkisp1_params_formats); i++) { + if (rkisp1_params_formats[i].dataformat == dataformat) + return &rkisp1_params_formats[i]; + } + + return &rkisp1_params_formats[RKISP1_PARAMS_FIXED]; +} + static inline void rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask) { @@ -112,54 +168,20 @@ static void rkisp1_bls_config(struct rkisp1_params *params, new_control &= RKISP1_CIF_ISP_BLS_ENA; /* fixed subtraction values */ if (!arg->enable_auto) { - const struct rkisp1_cif_isp_bls_fixed_val *pval = - &arg->fixed_val; - - switch (params->raw_type) { - case RKISP1_RAW_BGGR: - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED, - pval->r); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED, - pval->gr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED, - pval->gb); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED, - pval->b); - break; - case RKISP1_RAW_GBRG: - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED, - pval->r); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED, - pval->gr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED, - pval->gb); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED, - pval->b); - break; - case RKISP1_RAW_GRBG: - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED, - pval->r); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED, - pval->gr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED, - pval->gb); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED, - pval->b); - break; - case RKISP1_RAW_RGGB: - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED, - pval->r); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED, - pval->gr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED, - pval->gb); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED, - pval->b); - break; - default: - break; - } - + static const u32 regs[] = { + RKISP1_CIF_ISP_BLS_A_FIXED, + RKISP1_CIF_ISP_BLS_B_FIXED, + RKISP1_CIF_ISP_BLS_C_FIXED, + RKISP1_CIF_ISP_BLS_D_FIXED, + }; + u32 swapped[4]; + + rkisp1_bls_swap_regs(params->raw_type, regs, swapped); + + rkisp1_write(params->rkisp1, swapped[0], arg->fixed_val.r); + rkisp1_write(params->rkisp1, swapped[1], arg->fixed_val.gr); + rkisp1_write(params->rkisp1, swapped[2], arg->fixed_val.gb); + rkisp1_write(params->rkisp1, swapped[3], arg->fixed_val.b); } else { if (arg->en_windows & BIT(1)) { rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_START, @@ -1239,6 +1261,93 @@ rkisp1_dpf_strength_config(struct rkisp1_params *params, rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_R, arg->r); } +static void rkisp1_compand_write_px_curve(struct rkisp1_params *params, + unsigned int addr, const u8 *curve) +{ + const unsigned int points_per_reg = 6; + const unsigned int num_regs = + DIV_ROUND_UP(RKISP1_CIF_ISP_COMPAND_NUM_POINTS, + points_per_reg); + + /* + * The compand curve is specified as a piecewise linear function with + * 64 points. X coordinates are stored as a log2 of the displacement + * from the previous point, in 5 bits, with 6 values per register. The + * last register stores 4 values. + */ + for (unsigned int reg = 0; reg < num_regs; ++reg) { + unsigned int num_points = + min(RKISP1_CIF_ISP_COMPAND_NUM_POINTS - + reg * points_per_reg, points_per_reg); + u32 val = 0; + + for (unsigned int i = 0; i < num_points; i++) + val |= (*curve++ & 0x1f) << (i * 5); + + rkisp1_write(params->rkisp1, addr, val); + addr += 4; + } +} + +static void +rkisp1_compand_write_curve_mem(struct rkisp1_params *params, + unsigned int reg_addr, unsigned int reg_data, + const u32 curve[RKISP1_CIF_ISP_COMPAND_NUM_POINTS]) +{ + for (unsigned int i = 0; i < RKISP1_CIF_ISP_COMPAND_NUM_POINTS; i++) { + rkisp1_write(params->rkisp1, reg_addr, i); + rkisp1_write(params->rkisp1, reg_data, curve[i]); + } +} + +static void +rkisp1_compand_bls_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_compand_bls_config *arg) +{ + static const u32 regs[] = { + RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED, + RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED, + RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED, + RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED, + }; + u32 swapped[4]; + + rkisp1_bls_swap_regs(params->raw_type, regs, swapped); + + rkisp1_write(params->rkisp1, swapped[0], arg->r); + rkisp1_write(params->rkisp1, swapped[1], arg->gr); + rkisp1_write(params->rkisp1, swapped[2], arg->gb); + rkisp1_write(params->rkisp1, swapped[3], arg->b); +} + +static void +rkisp1_compand_expand_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_compand_curve_config *arg) +{ + rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(0), + arg->px); + rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR, + RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA, + arg->y); + rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR, + RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA, + arg->x); +} + +static void +rkisp1_compand_compress_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_compand_curve_config *arg) +{ + rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(0), + arg->px); + rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR, + RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA, + arg->y); + rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR, + RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA, + arg->x); +} + static void rkisp1_isp_isr_other_config(struct rkisp1_params *params, const struct rkisp1_params_cfg *new_params) @@ -1249,6 +1358,12 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, module_cfg_update = new_params->module_cfg_update; module_ens = new_params->module_ens; + if (!rkisp1_has_feature(params->rkisp1, BLS)) { + module_en_update &= ~RKISP1_CIF_ISP_MODULE_BLS; + module_cfg_update &= ~RKISP1_CIF_ISP_MODULE_BLS; + module_ens &= ~RKISP1_CIF_ISP_MODULE_BLS; + } + /* update dpc config */ if (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC) rkisp1_dpcc_config(params, @@ -1501,21 +1616,551 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, } } -static bool rkisp1_params_get_buffer(struct rkisp1_params *params, - struct rkisp1_buffer **buf, - struct rkisp1_params_cfg **cfg) +/*------------------------------------------------------------------------------ + * Extensible parameters format handling + */ + +static void +rkisp1_ext_params_bls(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_bls_config *bls = &block->bls; + + if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL, + RKISP1_CIF_ISP_BLS_ENA); + return; + } + + rkisp1_bls_config(params, &bls->config); + + if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(bls->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_BLS_CTRL, + RKISP1_CIF_ISP_BLS_ENA); +} + +static void +rkisp1_ext_params_dpcc(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) { - if (list_empty(¶ms->params)) - return false; + const struct rkisp1_ext_params_dpcc_config *dpcc = &block->dpcc; + + if (dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE, + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); + return; + } - *buf = list_first_entry(¶ms->params, struct rkisp1_buffer, queue); - *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0); + rkisp1_dpcc_config(params, &dpcc->config); - return true; + if ((dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(dpcc->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPCC_MODE, + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); +} + +static void +rkisp1_ext_params_sdg(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_sdg_config *sdg = &block->sdg; + + if (sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA); + return; + } + + rkisp1_sdg_config(params, &sdg->config); + + if ((sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(sdg->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA); +} + +static void +rkisp1_ext_params_lsc(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_lsc_config *lsc = &block->lsc; + + if (lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); + return; + } + + rkisp1_lsc_config(params, &lsc->config); + + if ((lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(lsc->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); +} + +static void +rkisp1_ext_params_awbg(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_awb_gain_config *awbg = &block->awbg; + + if (awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA); + return; + } + + params->ops->awb_gain_config(params, &awbg->config); + + if ((awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(awbg->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA); +} + +static void +rkisp1_ext_params_flt(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_flt_config *flt = &block->flt; + + if (flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE, + RKISP1_CIF_ISP_FLT_ENA); + return; + } + + rkisp1_flt_config(params, &flt->config); + + if ((flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(flt->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_FILT_MODE, + RKISP1_CIF_ISP_FLT_ENA); +} + +static void +rkisp1_ext_params_bdm(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_bdm_config *bdm = &block->bdm; + + if (bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC, + RKISP1_CIF_ISP_DEMOSAIC_BYPASS); + return; + } + + rkisp1_bdm_config(params, &bdm->config); + + if ((bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(bdm->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DEMOSAIC, + RKISP1_CIF_ISP_DEMOSAIC_BYPASS); +} + +static void +rkisp1_ext_params_ctk(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_ctk_config *ctk = &block->ctk; + + if (ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_ctk_enable(params, false); + return; + } + + rkisp1_ctk_config(params, &ctk->config); + + if ((ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(ctk->header.type))) + rkisp1_ctk_enable(params, true); +} + +static void +rkisp1_ext_params_goc(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_goc_config *goc = &block->goc; + + if (goc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA); + return; + } + + params->ops->goc_config(params, &goc->config); + + /* + * Unconditionally re-enable the GOC module which gets disabled by + * goc_config(). + */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA); +} + +static void +rkisp1_ext_params_dpf(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_dpf_config *dpf = &block->dpf; + + if (dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE, + RKISP1_CIF_ISP_DPF_MODE_EN); + return; + } + + rkisp1_dpf_config(params, &dpf->config); + + if ((dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(dpf->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE, + RKISP1_CIF_ISP_DPF_MODE_EN); +} + +static void +rkisp1_ext_params_dpfs(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_dpf_strength_config *dpfs = &block->dpfs; + + rkisp1_dpf_strength_config(params, &dpfs->config); +} + +static void +rkisp1_ext_params_cproc(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_cproc_config *cproc = &block->cproc; + + if (cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL, + RKISP1_CIF_C_PROC_CTR_ENABLE); + return; + } + + rkisp1_cproc_config(params, &cproc->config); + + if ((cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(cproc->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL, + RKISP1_CIF_C_PROC_CTR_ENABLE); +} + +static void +rkisp1_ext_params_ie(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_ie_config *ie = &block->ie; + + if (ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_ie_enable(params, false); + return; + } + + rkisp1_ie_config(params, &ie->config); + + if ((ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(ie->header.type))) + rkisp1_ie_enable(params, true); +} + +static void +rkisp1_ext_params_awbm(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_awb_meas_config *awbm = &block->awbm; + + if (awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + params->ops->awb_meas_enable(params, &awbm->config, + false); + return; + } + + params->ops->awb_meas_config(params, &awbm->config); + + if ((awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(awbm->header.type))) + params->ops->awb_meas_enable(params, &awbm->config, + true); +} + +static void +rkisp1_ext_params_hstm(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_hst_config *hst = &block->hst; + + if (hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + params->ops->hst_enable(params, &hst->config, false); + return; + } + + params->ops->hst_config(params, &hst->config); + + if ((hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(hst->header.type))) + params->ops->hst_enable(params, &hst->config, true); +} + +static void +rkisp1_ext_params_aecm(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_aec_config *aec = &block->aec; + + if (aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL, + RKISP1_CIF_ISP_EXP_ENA); + return; + } + + params->ops->aec_config(params, &aec->config); + + if ((aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(aec->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL, + RKISP1_CIF_ISP_EXP_ENA); +} + +static void +rkisp1_ext_params_afcm(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_afc_config *afc = &block->afc; + + if (afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL, + RKISP1_CIF_ISP_AFM_ENA); + return; + } + + params->ops->afm_config(params, &afc->config); + + if ((afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(afc->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL, + RKISP1_CIF_ISP_AFM_ENA); +} + +static void rkisp1_ext_params_compand_bls(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_compand_bls_config *bls = + &block->compand_bls; + + if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE); + return; + } + + rkisp1_compand_bls_config(params, &bls->config); + + if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(bls->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE); +} + +static void rkisp1_ext_params_compand_expand(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_compand_curve_config *curve = + &block->compand_curve; + + if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE); + return; + } + + rkisp1_compand_expand_config(params, &curve->config); + + if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(curve->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE); +} + +static void rkisp1_ext_params_compand_compress(struct rkisp1_params *params, + const union rkisp1_ext_params_config *block) +{ + const struct rkisp1_ext_params_compand_curve_config *curve = + &block->compand_curve; + + if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) { + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE); + return; + } + + rkisp1_compand_compress_config(params, &curve->config); + + if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) && + !(params->enabled_blocks & BIT(curve->header.type))) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL, + RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE); +} + +typedef void (*rkisp1_block_handler)(struct rkisp1_params *params, + const union rkisp1_ext_params_config *config); + +static const struct rkisp1_ext_params_handler { + size_t size; + rkisp1_block_handler handler; + unsigned int group; + unsigned int features; +} rkisp1_ext_params_handlers[] = { + [RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS] = { + .size = sizeof(struct rkisp1_ext_params_bls_config), + .handler = rkisp1_ext_params_bls, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + .features = RKISP1_FEATURE_BLS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC] = { + .size = sizeof(struct rkisp1_ext_params_dpcc_config), + .handler = rkisp1_ext_params_dpcc, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG] = { + .size = sizeof(struct rkisp1_ext_params_sdg_config), + .handler = rkisp1_ext_params_sdg, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN] = { + .size = sizeof(struct rkisp1_ext_params_awb_gain_config), + .handler = rkisp1_ext_params_awbg, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT] = { + .size = sizeof(struct rkisp1_ext_params_flt_config), + .handler = rkisp1_ext_params_flt, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM] = { + .size = sizeof(struct rkisp1_ext_params_bdm_config), + .handler = rkisp1_ext_params_bdm, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK] = { + .size = sizeof(struct rkisp1_ext_params_ctk_config), + .handler = rkisp1_ext_params_ctk, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC] = { + .size = sizeof(struct rkisp1_ext_params_goc_config), + .handler = rkisp1_ext_params_goc, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF] = { + .size = sizeof(struct rkisp1_ext_params_dpf_config), + .handler = rkisp1_ext_params_dpf, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH] = { + .size = sizeof(struct rkisp1_ext_params_dpf_strength_config), + .handler = rkisp1_ext_params_dpfs, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC] = { + .size = sizeof(struct rkisp1_ext_params_cproc_config), + .handler = rkisp1_ext_params_cproc, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_IE] = { + .size = sizeof(struct rkisp1_ext_params_ie_config), + .handler = rkisp1_ext_params_ie, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC] = { + .size = sizeof(struct rkisp1_ext_params_lsc_config), + .handler = rkisp1_ext_params_lsc, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS] = { + .size = sizeof(struct rkisp1_ext_params_awb_meas_config), + .handler = rkisp1_ext_params_awbm, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS] = { + .size = sizeof(struct rkisp1_ext_params_hst_config), + .handler = rkisp1_ext_params_hstm, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS] = { + .size = sizeof(struct rkisp1_ext_params_aec_config), + .handler = rkisp1_ext_params_aecm, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS] = { + .size = sizeof(struct rkisp1_ext_params_afc_config), + .handler = rkisp1_ext_params_afcm, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS] = { + .size = sizeof(struct rkisp1_ext_params_compand_bls_config), + .handler = rkisp1_ext_params_compand_bls, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + .features = RKISP1_FEATURE_COMPAND, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND] = { + .size = sizeof(struct rkisp1_ext_params_compand_curve_config), + .handler = rkisp1_ext_params_compand_expand, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + .features = RKISP1_FEATURE_COMPAND, + }, + [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS] = { + .size = sizeof(struct rkisp1_ext_params_compand_curve_config), + .handler = rkisp1_ext_params_compand_compress, + .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS, + .features = RKISP1_FEATURE_COMPAND, + }, +}; + +static void rkisp1_ext_params_config(struct rkisp1_params *params, + struct rkisp1_ext_params_cfg *cfg, + u32 block_group_mask) +{ + size_t block_offset = 0; + + if (WARN_ON(!cfg)) + return; + + /* Walk the list of parameter blocks and process them. */ + while (block_offset < cfg->data_size) { + const struct rkisp1_ext_params_handler *block_handler; + const union rkisp1_ext_params_config *block; + + block = (const union rkisp1_ext_params_config *) + &cfg->data[block_offset]; + block_offset += block->header.size; + + /* + * Make sure the block is supported by the platform and in the + * list of groups to configure. + */ + block_handler = &rkisp1_ext_params_handlers[block->header.type]; + if (!(block_handler->group & block_group_mask)) + continue; + + if ((params->rkisp1->info->features & block_handler->features) != + block_handler->features) + continue; + + block_handler->handler(params, block); + + if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) + params->enabled_blocks &= ~BIT(block->header.type); + else if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) + params->enabled_blocks |= BIT(block->header.type); + } } static void rkisp1_params_complete_buffer(struct rkisp1_params *params, - struct rkisp1_buffer *buf, + struct rkisp1_params_buffer *buf, unsigned int frame_sequence) { list_del(&buf->queue); @@ -1527,17 +2172,24 @@ static void rkisp1_params_complete_buffer(struct rkisp1_params *params, void rkisp1_params_isr(struct rkisp1_device *rkisp1) { struct rkisp1_params *params = &rkisp1->params; - struct rkisp1_params_cfg *new_params; - struct rkisp1_buffer *cur_buf; + struct rkisp1_params_buffer *cur_buf; spin_lock(¶ms->config_lock); - if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + cur_buf = list_first_entry_or_null(¶ms->params, + struct rkisp1_params_buffer, queue); + if (!cur_buf) goto unlock; - rkisp1_isp_isr_other_config(params, new_params); - rkisp1_isp_isr_lsc_config(params, new_params); - rkisp1_isp_isr_meas_config(params, new_params); + if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) { + rkisp1_isp_isr_other_config(params, cur_buf->cfg); + rkisp1_isp_isr_lsc_config(params, cur_buf->cfg); + rkisp1_isp_isr_meas_config(params, cur_buf->cfg); + } else { + rkisp1_ext_params_config(params, cur_buf->cfg, + RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS | + RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC); + } /* update shadow register immediately */ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, @@ -1603,8 +2255,7 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params, enum v4l2_ycbcr_encoding ycbcr_encoding) { struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config; - struct rkisp1_params_cfg *new_params; - struct rkisp1_buffer *cur_buf; + struct rkisp1_params_buffer *cur_buf; params->quantization = quantization; params->ycbcr_encoding = ycbcr_encoding; @@ -1633,11 +2284,18 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params, /* apply the first buffer if there is one already */ - if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + cur_buf = list_first_entry_or_null(¶ms->params, + struct rkisp1_params_buffer, queue); + if (!cur_buf) goto unlock; - rkisp1_isp_isr_other_config(params, new_params); - rkisp1_isp_isr_meas_config(params, new_params); + if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) { + rkisp1_isp_isr_other_config(params, cur_buf->cfg); + rkisp1_isp_isr_meas_config(params, cur_buf->cfg); + } else { + rkisp1_ext_params_config(params, cur_buf->cfg, + RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS); + } /* update shadow register immediately */ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, @@ -1649,8 +2307,7 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params, void rkisp1_params_post_configure(struct rkisp1_params *params) { - struct rkisp1_params_cfg *new_params; - struct rkisp1_buffer *cur_buf; + struct rkisp1_params_buffer *cur_buf; spin_lock_irq(¶ms->config_lock); @@ -1662,11 +2319,16 @@ void rkisp1_params_post_configure(struct rkisp1_params *params) * ordering doesn't affect other ISP versions negatively, do so * unconditionally. */ - - if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + cur_buf = list_first_entry_or_null(¶ms->params, + struct rkisp1_params_buffer, queue); + if (!cur_buf) goto unlock; - rkisp1_isp_isr_lsc_config(params, new_params); + if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) + rkisp1_isp_isr_lsc_config(params, cur_buf->cfg); + else + rkisp1_ext_params_config(params, cur_buf->cfg, + RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC); /* update shadow register immediately */ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, @@ -1742,12 +2404,12 @@ static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct video_device *video = video_devdata(file); - struct rkisp1_params *params = video_get_drvdata(video); - if (f->index > 0 || f->type != video->queue->type) + if (f->index >= ARRAY_SIZE(rkisp1_params_formats) || + f->type != video->queue->type) return -EINVAL; - f->pixelformat = params->vdev_fmt.fmt.meta.dataformat; + f->pixelformat = rkisp1_params_formats[f->index].dataformat; return 0; } @@ -1762,9 +2424,40 @@ static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh, if (f->type != video->queue->type) return -EINVAL; - memset(meta, 0, sizeof(*meta)); - meta->dataformat = params->vdev_fmt.fmt.meta.dataformat; - meta->buffersize = params->vdev_fmt.fmt.meta.buffersize; + *meta = *params->metafmt; + + return 0; +} + +static int rkisp1_params_try_fmt_meta_out(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct video_device *video = video_devdata(file); + struct v4l2_meta_format *meta = &f->fmt.meta; + + if (f->type != video->queue->type) + return -EINVAL; + + *meta = *rkisp1_params_get_format_info(meta->dataformat); + + return 0; +} + +static int rkisp1_params_s_fmt_meta_out(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct video_device *video = video_devdata(file); + struct rkisp1_params *params = video_get_drvdata(video); + struct v4l2_meta_format *meta = &f->fmt.meta; + + if (f->type != video->queue->type) + return -EINVAL; + + if (vb2_is_busy(video->queue)) + return -EBUSY; + + params->metafmt = rkisp1_params_get_format_info(meta->dataformat); + *meta = *params->metafmt; return 0; } @@ -1794,8 +2487,8 @@ static const struct v4l2_ioctl_ops rkisp1_params_ioctl = { .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out, .vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out, - .vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out, - .vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out, + .vidioc_s_fmt_meta_out = rkisp1_params_s_fmt_meta_out, + .vidioc_try_fmt_meta_out = rkisp1_params_try_fmt_meta_out, .vidioc_querycap = rkisp1_params_querycap, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, @@ -1807,22 +2500,46 @@ static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq, unsigned int sizes[], struct device *alloc_devs[]) { + struct rkisp1_params *params = vq->drv_priv; + *num_buffers = clamp_t(u32, *num_buffers, RKISP1_ISP_PARAMS_REQ_BUFS_MIN, RKISP1_ISP_PARAMS_REQ_BUFS_MAX); *num_planes = 1; - sizes[0] = sizeof(struct rkisp1_params_cfg); + sizes[0] = params->metafmt->buffersize; return 0; } +static int rkisp1_params_vb2_buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf); + struct rkisp1_params *params = vb->vb2_queue->drv_priv; + + params_buf->cfg = kvmalloc(params->metafmt->buffersize, + GFP_KERNEL); + if (!params_buf->cfg) + return -ENOMEM; + + return 0; +} + +static void rkisp1_params_vb2_buf_cleanup(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf); + + kvfree(params_buf->cfg); + params_buf->cfg = NULL; +} + static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct rkisp1_buffer *params_buf = - container_of(vbuf, struct rkisp1_buffer, vb); + struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf); struct vb2_queue *vq = vb->vb2_queue; struct rkisp1_params *params = vq->drv_priv; @@ -1831,12 +2548,133 @@ static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb) spin_unlock_irq(¶ms->config_lock); } +static int rkisp1_params_prepare_ext_params(struct rkisp1_params *params, + struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf); + size_t header_size = offsetof(struct rkisp1_ext_params_cfg, data); + struct rkisp1_ext_params_cfg *cfg = params_buf->cfg; + size_t payload_size = vb2_get_plane_payload(vb, 0); + struct rkisp1_ext_params_cfg *usr_cfg = + vb2_plane_vaddr(&vbuf->vb2_buf, 0); + size_t block_offset = 0; + size_t cfg_size; + + /* + * Validate the buffer payload size before copying the parameters. The + * payload has to be smaller than the destination buffer size and larger + * than the header size. + */ + if (payload_size > params->metafmt->buffersize) { + dev_dbg(params->rkisp1->dev, + "Too large buffer payload size %zu\n", payload_size); + return -EINVAL; + } + + if (payload_size < header_size) { + dev_dbg(params->rkisp1->dev, + "Buffer payload %zu smaller than header size %zu\n", + payload_size, header_size); + return -EINVAL; + } + + /* + * Copy the parameters buffer to the internal scratch buffer to avoid + * userspace modifying the buffer content while the driver processes it. + */ + memcpy(cfg, usr_cfg, payload_size); + + /* Only v1 is supported at the moment. */ + if (cfg->version != RKISP1_EXT_PARAM_BUFFER_V1) { + dev_dbg(params->rkisp1->dev, + "Unsupported extensible format version: %u\n", + cfg->version); + return -EINVAL; + } + + /* Validate the size reported in the parameters buffer header. */ + cfg_size = header_size + cfg->data_size; + if (cfg_size != payload_size) { + dev_dbg(params->rkisp1->dev, + "Data size %zu different than buffer payload size %zu\n", + cfg_size, payload_size); + return -EINVAL; + } + + /* Walk the list of parameter blocks and validate them. */ + cfg_size = cfg->data_size; + while (cfg_size >= sizeof(struct rkisp1_ext_params_block_header)) { + const struct rkisp1_ext_params_block_header *block; + const struct rkisp1_ext_params_handler *handler; + + block = (const struct rkisp1_ext_params_block_header *) + &cfg->data[block_offset]; + + if (block->type >= ARRAY_SIZE(rkisp1_ext_params_handlers)) { + dev_dbg(params->rkisp1->dev, + "Invalid parameters block type\n"); + return -EINVAL; + } + + if (block->size > cfg_size) { + dev_dbg(params->rkisp1->dev, + "Premature end of parameters data\n"); + return -EINVAL; + } + + if ((block->flags & (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE | + RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) == + (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE | + RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) { + dev_dbg(params->rkisp1->dev, + "Invalid parameters block flags\n"); + return -EINVAL; + } + + handler = &rkisp1_ext_params_handlers[block->type]; + if (block->size != handler->size) { + dev_dbg(params->rkisp1->dev, + "Invalid parameters block size\n"); + return -EINVAL; + } + + block_offset += block->size; + cfg_size -= block->size; + } + + if (cfg_size) { + dev_dbg(params->rkisp1->dev, + "Unexpected data after the parameters buffer end\n"); + return -EINVAL; + } + + return 0; +} + static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb) { - if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg)) + struct rkisp1_params *params = vb->vb2_queue->drv_priv; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf); + struct rkisp1_params_cfg *cfg = vb2_plane_vaddr(&vbuf->vb2_buf, 0); + size_t payload = vb2_get_plane_payload(vb, 0); + + if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) + return rkisp1_params_prepare_ext_params(params, vb); + + /* + * For the fixed parameters format the payload size must be exactly the + * size of the parameters structure. + */ + if (payload != sizeof(*cfg)) return -EINVAL; - vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg)); + /* + * Copy the parameters buffer to the internal scratch buffer to avoid + * userspace modifying the buffer content while the driver processes it. + */ + memcpy(params_buf->cfg, cfg, payload); return 0; } @@ -1844,7 +2682,7 @@ static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb) static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq) { struct rkisp1_params *params = vq->drv_priv; - struct rkisp1_buffer *buf; + struct rkisp1_params_buffer *buf; LIST_HEAD(tmp_list); /* @@ -1858,16 +2696,19 @@ static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq) list_for_each_entry(buf, &tmp_list, queue) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + + params->enabled_blocks = 0; } static const struct vb2_ops rkisp1_params_vb2_ops = { .queue_setup = rkisp1_params_vb2_queue_setup, + .buf_init = rkisp1_params_vb2_buf_init, + .buf_cleanup = rkisp1_params_vb2_buf_cleanup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_queue = rkisp1_params_vb2_buf_queue, .buf_prepare = rkisp1_params_vb2_buf_prepare, .stop_streaming = rkisp1_params_vb2_stop_streaming, - }; static const struct v4l2_file_operations rkisp1_params_fops = { @@ -1890,26 +2731,13 @@ static int rkisp1_params_init_vb2_queue(struct vb2_queue *q, q->drv_priv = params; q->ops = &rkisp1_params_vb2_ops; q->mem_ops = &vb2_vmalloc_memops; - q->buf_struct_size = sizeof(struct rkisp1_buffer); + q->buf_struct_size = sizeof(struct rkisp1_params_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &node->vlock; return vb2_queue_init(q); } -static void rkisp1_init_params(struct rkisp1_params *params) -{ - params->vdev_fmt.fmt.meta.dataformat = - V4L2_META_FMT_RK_ISP1_PARAMS; - params->vdev_fmt.fmt.meta.buffersize = - sizeof(struct rkisp1_params_cfg); - - if (params->rkisp1->info->isp_ver == RKISP1_V12) - params->ops = &rkisp1_v12_params_ops; - else - params->ops = &rkisp1_v10_params_ops; -} - int rkisp1_params_register(struct rkisp1_device *rkisp1) { struct rkisp1_params *params = &rkisp1->params; @@ -1938,7 +2766,14 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1) vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT; vdev->vfl_dir = VFL_DIR_TX; rkisp1_params_init_vb2_queue(vdev->queue, params); - rkisp1_init_params(params); + + params->metafmt = &rkisp1_params_formats[RKISP1_PARAMS_FIXED]; + + if (params->rkisp1->info->isp_ver == RKISP1_V12) + params->ops = &rkisp1_v12_params_ops; + else + params->ops = &rkisp1_v10_params_ops; + video_set_drvdata(vdev, params); node->pad.flags = MEDIA_PAD_FL_SOURCE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h index fccf4c17ee8d0a..bf0260600a1923 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h @@ -704,6 +704,12 @@ #define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1f #define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3ff +/* COMPAND */ +#define RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE BIT(0) +#define RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE BIT(1) +#define RKISP1_CIF_ISP_COMPAND_CTRL_SOFT_RESET_FLAG BIT(2) +#define RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE BIT(3) + /* =================================================================== */ /* CIF Registers */ /* =================================================================== */ @@ -1394,6 +1400,23 @@ #define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001c) #define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020) +#define RKISP1_CIF_ISP_COMPAND_BASE 0x00003200 +#define RKISP1_CIF_ISP_COMPAND_CTRL (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000000) +#define RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000004) +#define RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000008) +#define RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000000c) +#define RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000010) +#define RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000014 + (n) * 4) +#define RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000040 + (n) * 4) +#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000006c) +#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000070) +#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000074) +#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000078) +#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000007c) +#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000080) +#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000084) +#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000088) + #define RKISP1_CIF_ISP_CSI0_BASE 0x00007000 #define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c index 1fa991227fa904..f073e72a0d3732 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c @@ -494,10 +494,10 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, sink_fmt->width = clamp_t(u32, format->width, RKISP1_ISP_MIN_WIDTH, - RKISP1_ISP_MAX_WIDTH); + rsz->rkisp1->info->max_width); sink_fmt->height = clamp_t(u32, format->height, RKISP1_ISP_MIN_HEIGHT, - RKISP1_ISP_MAX_HEIGHT); + rsz->rkisp1->info->max_height); /* * Adjust the color space fields. Accept any color primaries and diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c index 2795eef91bddbc..a502719e916a92 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c @@ -304,48 +304,25 @@ static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats, static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats, struct rkisp1_stat_buffer *pbuf) { + static const u32 regs[] = { + RKISP1_CIF_ISP_BLS_A_MEASURED, + RKISP1_CIF_ISP_BLS_B_MEASURED, + RKISP1_CIF_ISP_BLS_C_MEASURED, + RKISP1_CIF_ISP_BLS_D_MEASURED, + }; struct rkisp1_device *rkisp1 = stats->rkisp1; const struct rkisp1_mbus_info *in_fmt = rkisp1->isp.sink_fmt; struct rkisp1_cif_isp_bls_meas_val *bls_val; + u32 swapped[4]; + + rkisp1_bls_swap_regs(in_fmt->bayer_pat, regs, swapped); bls_val = &pbuf->params.ae.bls_val; - if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) { - bls_val->meas_b = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED); - bls_val->meas_gb = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED); - bls_val->meas_gr = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED); - bls_val->meas_r = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED); - } else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) { - bls_val->meas_gb = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED); - bls_val->meas_b = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED); - bls_val->meas_r = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED); - bls_val->meas_gr = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED); - } else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) { - bls_val->meas_gr = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED); - bls_val->meas_r = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED); - bls_val->meas_b = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED); - bls_val->meas_gb = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED); - } else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) { - bls_val->meas_r = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED); - bls_val->meas_gr = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED); - bls_val->meas_gb = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED); - bls_val->meas_b = - rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED); - } + + bls_val->meas_r = rkisp1_read(rkisp1, swapped[0]); + bls_val->meas_gr = rkisp1_read(rkisp1, swapped[1]); + bls_val->meas_gb = rkisp1_read(rkisp1, swapped[2]); + bls_val->meas_b = rkisp1_read(rkisp1, swapped[3]); } static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = { diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c index 618ae55fe3963c..f45f5c8612a6b3 100644 --- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c @@ -1225,7 +1225,7 @@ static void gsc_remove(struct platform_device *pdev) static int gsc_m2m_suspend(struct gsc_dev *gsc) { unsigned long flags; - int timeout; + long time_left; spin_lock_irqsave(&gsc->slock, flags); if (!gsc_m2m_pending(gsc)) { @@ -1236,12 +1236,12 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc) set_bit(ST_M2M_SUSPENDING, &gsc->state); spin_unlock_irqrestore(&gsc->slock, flags); - timeout = wait_event_timeout(gsc->irq_queue, - test_bit(ST_M2M_SUSPENDED, &gsc->state), - GSC_SHUTDOWN_TIMEOUT); + time_left = wait_event_timeout(gsc->irq_queue, + test_bit(ST_M2M_SUSPENDED, &gsc->state), + GSC_SHUTDOWN_TIMEOUT); clear_bit(ST_M2M_SUSPENDING, &gsc->state); - return timeout == 0 ? -EAGAIN : 0; + return time_left == 0 ? -EAGAIN : 0; } static void gsc_m2m_resume(struct gsc_dev *gsc) diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c index aae74b501a42d2..adfc2d73d04b7f 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c @@ -822,7 +822,7 @@ static int fimc_clk_get(struct fimc_dev *fimc) static int fimc_m2m_suspend(struct fimc_dev *fimc) { unsigned long flags; - int timeout; + long time_left; spin_lock_irqsave(&fimc->slock, flags); if (!fimc_m2m_pending(fimc)) { @@ -833,12 +833,12 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc) set_bit(ST_M2M_SUSPENDING, &fimc->state); spin_unlock_irqrestore(&fimc->slock, flags); - timeout = wait_event_timeout(fimc->irq_queue, - test_bit(ST_M2M_SUSPENDED, &fimc->state), - FIMC_SHUTDOWN_TIMEOUT); + time_left = wait_event_timeout(fimc->irq_queue, + test_bit(ST_M2M_SUSPENDED, &fimc->state), + FIMC_SHUTDOWN_TIMEOUT); clear_bit(ST_M2M_SUSPENDING, &fimc->state); - return timeout == 0 ? -EAGAIN : 0; + return time_left == 0 ? -EAGAIN : 0; } static int fimc_m2m_resume(struct fimc_dev *fimc) diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c index 1328b4eb6b9ff8..c7ee6e1a445146 100644 --- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c @@ -1160,7 +1160,7 @@ static void bdisp_irq_timeout(struct work_struct *ptr) static int bdisp_m2m_suspend(struct bdisp_dev *bdisp) { unsigned long flags; - int timeout; + long time_left; spin_lock_irqsave(&bdisp->slock, flags); if (!test_bit(ST_M2M_RUNNING, &bdisp->state)) { @@ -1171,13 +1171,13 @@ static int bdisp_m2m_suspend(struct bdisp_dev *bdisp) set_bit(ST_M2M_SUSPENDING, &bdisp->state); spin_unlock_irqrestore(&bdisp->slock, flags); - timeout = wait_event_timeout(bdisp->irq_queue, - test_bit(ST_M2M_SUSPENDED, &bdisp->state), - BDISP_WORK_TIMEOUT); + time_left = wait_event_timeout(bdisp->irq_queue, + test_bit(ST_M2M_SUSPENDED, &bdisp->state), + BDISP_WORK_TIMEOUT); clear_bit(ST_M2M_SUSPENDING, &bdisp->state); - if (!timeout) { + if (!time_left) { dev_err(bdisp->dev, "%s IRQ timeout\n", __func__); return -EAGAIN; } diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c index 097a3a08ef7d1f..d07e980aba61c8 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c @@ -35,7 +35,18 @@ struct sun4i_csi_traits { bool has_isp; }; +static int sun4i_csi_video_link_validate(struct media_link *link) +{ + dev_warn_once(link->graph_obj.mdev->dev, + "Driver bug: link validation not implemented\n"); + return 0; +} + static const struct media_entity_operations sun4i_csi_video_entity_ops = { + .link_validate = sun4i_csi_video_link_validate, +}; + +static const struct media_entity_operations sun4i_csi_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; @@ -214,6 +225,7 @@ static int sun4i_csi_probe(struct platform_device *pdev) subdev->internal_ops = &sun4i_csi_subdev_internal_ops; subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + subdev->entity.ops = &sun4i_csi_subdev_entity_ops; subdev->owner = THIS_MODULE; snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0"); v4l2_set_subdevdata(subdev, csi); diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c index 77e12457d1495f..009ff68a2b43ca 100644 --- a/drivers/media/platform/ti/am437x/am437x-vpfe.c +++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c @@ -2287,7 +2287,7 @@ static const struct v4l2_async_notifier_operations vpfe_async_ops = { static struct vpfe_config * vpfe_get_pdata(struct vpfe_device *vpfe) { - struct device_node *endpoint = NULL; + struct device_node *endpoint; struct device *dev = vpfe->pdev; struct vpfe_subdev_info *sdinfo; struct vpfe_config *pdata; @@ -2306,14 +2306,11 @@ vpfe_get_pdata(struct vpfe_device *vpfe) if (!pdata) return NULL; - for (i = 0; ; i++) { + i = 0; + for_each_endpoint_of_node(dev->of_node, endpoint) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; struct device_node *rem; - endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint); - if (!endpoint) - break; - sdinfo = &pdata->sub_devs[i]; sdinfo->grp_id = 0; @@ -2371,9 +2368,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe) of_node_put(rem); if (IS_ERR(pdata->asd[i])) goto cleanup; + + i++; } - of_node_put(endpoint); return pdata; cleanup: diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c index 4afc2ad0033039..42dfe08b765f6b 100644 --- a/drivers/media/platform/ti/cal/cal-camerarx.c +++ b/drivers/media/platform/ti/cal/cal-camerarx.c @@ -798,7 +798,7 @@ static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = { .init_state = cal_camerarx_sd_init_state, }; -static struct media_entity_operations cal_camerarx_media_ops = { +static const struct media_entity_operations cal_camerarx_media_ops = { .link_validate = v4l2_subdev_link_validate, }; diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c index 528909ae4bd63e..5c2c04142aeed2 100644 --- a/drivers/media/platform/ti/cal/cal.c +++ b/drivers/media/platform/ti/cal/cal.c @@ -549,7 +549,7 @@ void cal_ctx_start(struct cal_ctx *ctx) void cal_ctx_stop(struct cal_ctx *ctx) { struct cal_camerarx *phy = ctx->phy; - long timeout; + long time_left; WARN_ON(phy->vc_enable_count[ctx->vc] == 0); @@ -565,9 +565,9 @@ void cal_ctx_stop(struct cal_ctx *ctx) ctx->dma.state = CAL_DMA_STOP_REQUESTED; spin_unlock_irq(&ctx->dma.lock); - timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), - msecs_to_jiffies(500)); - if (!timeout) { + time_left = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), + msecs_to_jiffies(500)); + if (!time_left) { ctx_err(ctx, "failed to disable dma cleanly\n"); cal_ctx_wr_dma_disable(ctx); } diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c index c28794b6677b77..16326437767f80 100644 --- a/drivers/media/platform/ti/davinci/vpif_capture.c +++ b/drivers/media/platform/ti/davinci/vpif_capture.c @@ -1487,7 +1487,7 @@ static struct vpif_capture_config * vpif_capture_get_pdata(struct platform_device *pdev, struct v4l2_device *v4l2_dev) { - struct device_node *endpoint = NULL; + struct device_node *endpoint; struct device_node *rem = NULL; struct vpif_capture_config *pdata; struct vpif_subdev_info *sdinfo; @@ -1517,16 +1517,12 @@ vpif_capture_get_pdata(struct platform_device *pdev, if (!pdata->subdev_info) return NULL; - for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) { + i = 0; + for_each_endpoint_of_node(pdev->dev.of_node, endpoint) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; unsigned int flags; int err; - endpoint = of_graph_get_next_endpoint(pdev->dev.of_node, - endpoint); - if (!endpoint) - break; - rem = of_graph_get_remote_port_parent(endpoint); if (!rem) { dev_dbg(&pdev->dev, "Remote device at %pOF not found\n", @@ -1577,6 +1573,10 @@ vpif_capture_get_pdata(struct platform_device *pdev, goto err_cleanup; of_node_put(rem); + + i++; + if (i >= VPIF_CAPTURE_NUM_CHANNELS) + break; } done: diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index 1cda23244c7bc7..91101ba88ef01f 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -1965,7 +1965,7 @@ static int isp_attach_iommu(struct isp_device *isp) * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. */ - mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); + mapping = arm_iommu_create_mapping(isp->dev, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); return PTR_ERR(mapping); diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig index 149d0b32c324f3..3272a24db71d2a 100644 --- a/drivers/media/platform/verisilicon/Kconfig +++ b/drivers/media/platform/verisilicon/Kconfig @@ -21,6 +21,14 @@ config VIDEO_HANTRO To compile this driver as a module, choose M here: the module will be called hantro-vpu. +config VIDEO_HANTRO_HEVC_RFC + bool "Use reference frame compression for HEVC" + depends on VIDEO_HANTRO + default n + help + Enable the reference frame compression feature for the HEVC codec. + It will use more memory but save bandwidth on memory bus. + config VIDEO_HANTRO_IMX8M bool "Hantro VPU i.MX8M support" depends on VIDEO_HANTRO diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile index eb38a1833b02fa..f6f019d04ff003 100644 --- a/drivers/media/platform/verisilicon/Makefile +++ b/drivers/media/platform/verisilicon/Makefile @@ -14,13 +14,6 @@ hantro-vpu-y += \ hantro_g2.o \ hantro_g2_hevc_dec.o \ hantro_g2_vp9_dec.o \ - rockchip_vpu2_hw_jpeg_enc.o \ - rockchip_vpu2_hw_h264_dec.o \ - rockchip_vpu2_hw_mpeg2_dec.o \ - rockchip_vpu2_hw_vp8_dec.o \ - rockchip_vpu981_hw_av1_dec.o \ - rockchip_av1_filmgrain.o \ - rockchip_av1_entropymode.o \ hantro_jpeg.o \ hantro_h264.o \ hantro_hevc.o \ @@ -35,6 +28,13 @@ hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \ sama5d4_vdec_hw.o hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \ + rockchip_vpu2_hw_jpeg_enc.o \ + rockchip_vpu2_hw_h264_dec.o \ + rockchip_vpu2_hw_mpeg2_dec.o \ + rockchip_vpu2_hw_vp8_dec.o \ + rockchip_vpu981_hw_av1_dec.o \ + rockchip_av1_filmgrain.o \ + rockchip_av1_entropymode.o \ rockchip_vpu_hw.o hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \ diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 34b123dafd890b..05bbac853c4fd7 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -722,6 +722,7 @@ static const struct of_device_id of_hantro_match[] = { { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, }, { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, }, { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, }, + { .compatible = "rockchip,rk3588-vepu121", .data = &rk3568_vepu_variant, }, { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, }, #endif #ifdef CONFIG_VIDEO_HANTRO_IMX8M @@ -992,6 +993,49 @@ static const struct media_device_ops hantro_m2m_media_ops = { .req_queue = v4l2_m2m_request_queue, }; +/* + * Some SoCs, like RK3588 have multiple identical Hantro cores, but the + * kernel is currently missing support for multi-core handling. Exposing + * separate devices for each core to userspace is bad, since that does + * not allow scheduling tasks properly (and creates ABI). With this workaround + * the driver will only probe for the first core and early exit for the other + * cores. Once the driver gains multi-core support, the same technique + * for detecting the main core can be used to cluster all cores together. + */ +static int hantro_disable_multicore(struct hantro_dev *vpu) +{ + struct device_node *node = NULL; + const char *compatible; + bool is_main_core; + int ret; + + /* Intentionally ignores the fallback strings */ + ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible); + if (ret) + return ret; + + /* The first compatible and available node found is considered the main core */ + do { + node = of_find_compatible_node(node, NULL, compatible); + if (of_device_is_available(node)) + break; + } while (node); + + if (!node) + return -EINVAL; + + is_main_core = (vpu->dev->of_node == node); + + of_node_put(node); + + if (!is_main_core) { + dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n"); + return -ENODEV; + } + + return 0; +} + static int hantro_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -1011,6 +1055,10 @@ static int hantro_probe(struct platform_device *pdev) match = of_match_node(of_hantro_match, pdev->dev.of_node); vpu->variant = match->data; + ret = hantro_disable_multicore(vpu); + if (ret) + return ret; + /* * Support for nxp,imx8mq-vpu is kept for backwards compatibility * but it's deprecated. Please update your DTS file to use diff --git a/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c index 9aea331e1a3c92..e0d6bd0a6e44f5 100644 --- a/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c @@ -5,7 +5,7 @@ * Copyright (C) 2018 Rockchip Electronics Co., Ltd. */ -#include +#include #include #include #include "hantro.h" diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c index b880a6849d5852..5c1d799d861840 100644 --- a/drivers/media/platform/verisilicon/hantro_g2.c +++ b/drivers/media/platform/verisilicon/hantro_g2.c @@ -56,3 +56,32 @@ size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx) return ALIGN((cr_offset * 3) / 2, G2_ALIGN); } + +static size_t hantro_g2_mv_size(struct hantro_ctx *ctx) +{ + const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls; + const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps; + unsigned int pic_width_in_ctbs, pic_height_in_ctbs; + unsigned int max_log2_ctb_size; + + max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 + + sps->log2_diff_max_min_luma_coding_block_size; + pic_width_in_ctbs = (sps->pic_width_in_luma_samples + + (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size; + pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1) + >> max_log2_ctb_size; + + return pic_width_in_ctbs * pic_height_in_ctbs * (1 << (2 * (max_log2_ctb_size - 4))) * 16; +} + +size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx) +{ + return hantro_g2_motion_vectors_offset(ctx) + + hantro_g2_mv_size(ctx); +} + +size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx) +{ + return hantro_g2_luma_compress_offset(ctx) + + hantro_hevc_luma_compressed_size(ctx->dst_fmt.width, ctx->dst_fmt.height); +} diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index d3f8c33eb16c2b..85a44143b3786b 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -367,11 +367,14 @@ static int set_ref(struct hantro_ctx *ctx) const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params; const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb; dma_addr_t luma_addr, chroma_addr, mv_addr = 0; + dma_addr_t compress_luma_addr, compress_chroma_addr = 0; struct hantro_dev *vpu = ctx->dev; struct vb2_v4l2_buffer *vb2_dst; struct hantro_decoded_buffer *dst; size_t cr_offset = hantro_g2_chroma_offset(ctx); size_t mv_offset = hantro_g2_motion_vectors_offset(ctx); + size_t compress_luma_offset = hantro_g2_luma_compress_offset(ctx); + size_t compress_chroma_offset = hantro_g2_chroma_compress_offset(ctx); u32 max_ref_frames; u16 dpb_longterm_e; static const struct hantro_reg cur_poc[] = { @@ -445,6 +448,8 @@ static int set_ref(struct hantro_ctx *ctx) chroma_addr = luma_addr + cr_offset; mv_addr = luma_addr + mv_offset; + compress_luma_addr = luma_addr + compress_luma_offset; + compress_chroma_addr = luma_addr + compress_chroma_offset; if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE) dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i); @@ -452,6 +457,8 @@ static int set_ref(struct hantro_ctx *ctx) hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr); hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr); hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr); + hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr); + hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), compress_chroma_addr); } vb2_dst = hantro_get_dst_buf(ctx); @@ -465,19 +472,27 @@ static int set_ref(struct hantro_ctx *ctx) chroma_addr = luma_addr + cr_offset; mv_addr = luma_addr + mv_offset; + compress_luma_addr = luma_addr + compress_luma_offset; + compress_chroma_addr = luma_addr + compress_chroma_offset; hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr); hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr); - hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr); + hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr); + hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr); + hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i++), compress_chroma_addr); hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr); hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr); hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr); + hantro_write_addr(vpu, G2_OUT_COMP_LUMA_ADDR, compress_luma_addr); + hantro_write_addr(vpu, G2_OUT_COMP_CHROMA_ADDR, compress_chroma_addr); for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) { hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0); hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0); hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0); + hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), 0); + hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), 0); } hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e); @@ -594,8 +609,7 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx) /* Don't disable output */ hantro_reg_write(vpu, &g2_out_dis, 0); - /* Don't compress buffers */ - hantro_reg_write(vpu, &g2_ref_compress_bypass, 1); + hantro_reg_write(vpu, &g2_ref_compress_bypass, !ctx->hevc_dec.use_compression); /* Bus width and max burst */ hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128); diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h index 82606783591a2d..b943b1816db7fd 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_regs.h +++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h @@ -318,6 +318,10 @@ #define G2_TILE_BSD_ADDR (G2_SWREG(183)) #define G2_DS_DST (G2_SWREG(186)) #define G2_DS_DST_CHR (G2_SWREG(188)) +#define G2_OUT_COMP_LUMA_ADDR (G2_SWREG(190)) +#define G2_REF_COMP_LUMA_ADDR(i) (G2_SWREG(192) + ((i) * 0x8)) +#define G2_OUT_COMP_CHROMA_ADDR (G2_SWREG(224)) +#define G2_REF_COMP_CHROMA_ADDR(i) (G2_SWREG(226) + ((i) * 0x8)) #define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff) #define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff) diff --git a/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c index 12d69503d6baa5..86cc1a07026f0d 100644 --- a/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c +++ b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c @@ -5,7 +5,7 @@ * Copyright (C) 2018 Rockchip Electronics Co., Ltd. */ -#include +#include #include #include "hantro_jpeg.h" #include "hantro.h" diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c index 2c14330bc5621e..83cd12b0ddd655 100644 --- a/drivers/media/platform/verisilicon/hantro_hevc.c +++ b/drivers/media/platform/verisilicon/hantro_hevc.c @@ -25,6 +25,11 @@ #define MAX_TILE_COLS 20 #define MAX_TILE_ROWS 22 +static bool hevc_use_compression = IS_ENABLED(CONFIG_VIDEO_HANTRO_HEVC_RFC); +module_param_named(hevc_use_compression, hevc_use_compression, bool, 0644); +MODULE_PARM_DESC(hevc_use_compression, + "Use reference frame compression for HEVC"); + void hantro_hevc_ref_init(struct hantro_ctx *ctx) { struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec; @@ -275,5 +280,8 @@ int hantro_hevc_dec_init(struct hantro_ctx *ctx) hantro_hevc_ref_init(ctx); + hevc_dec->use_compression = + hevc_use_compression & hantro_needs_postproc(ctx, ctx->vpu_dst_fmt); + return 0; } diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h index 7737320cc8cc62..c9b6556f8b2b78 100644 --- a/drivers/media/platform/verisilicon/hantro_hw.h +++ b/drivers/media/platform/verisilicon/hantro_hw.h @@ -42,6 +42,13 @@ #define MAX_POSTPROC_BUFFERS 64 +#define CBS_SIZE 16 /* compression table size in bytes */ +#define CBS_LUMA 8 /* luminance CBS is composed of 1 8x8 coded block */ +#define CBS_CHROMA_W (8 * 2) /* chrominance CBS is composed of two 8x4 coded + * blocks, with Cb CB first then Cr CB following + */ +#define CBS_CHROMA_H 4 + struct hantro_dev; struct hantro_ctx; struct hantro_buf; @@ -144,6 +151,7 @@ struct hantro_hevc_dec_ctrls { * @ref_bufs_used: Bitfield of used reference buffers * @ctrls: V4L2 controls attached to a run * @num_tile_cols_allocated: number of allocated tiles + * @use_compression: use reference buffer compression */ struct hantro_hevc_dec_hw_ctx { struct hantro_aux_buf tile_sizes; @@ -156,6 +164,7 @@ struct hantro_hevc_dec_hw_ctx { u32 ref_bufs_used; struct hantro_hevc_dec_ctrls ctrls; unsigned int num_tile_cols_allocated; + bool use_compression; }; /** @@ -510,6 +519,33 @@ hantro_hevc_mv_size(unsigned int width, unsigned int height) return width * height / 16; } +static inline size_t +hantro_hevc_luma_compressed_size(unsigned int width, unsigned int height) +{ + u32 pic_width_in_cbsy = + round_up((width + CBS_LUMA - 1) / CBS_LUMA, CBS_SIZE); + u32 pic_height_in_cbsy = (height + CBS_LUMA - 1) / CBS_LUMA; + + return round_up(pic_width_in_cbsy * pic_height_in_cbsy, CBS_SIZE); +} + +static inline size_t +hantro_hevc_chroma_compressed_size(unsigned int width, unsigned int height) +{ + u32 pic_width_in_cbsc = + round_up((width + CBS_CHROMA_W - 1) / CBS_CHROMA_W, CBS_SIZE); + u32 pic_height_in_cbsc = (height / 2 + CBS_CHROMA_H - 1) / CBS_CHROMA_H; + + return round_up(pic_width_in_cbsc * pic_height_in_cbsc, CBS_SIZE); +} + +static inline size_t +hantro_hevc_compressed_size(unsigned int width, unsigned int height) +{ + return hantro_hevc_luma_compressed_size(width, height) + + hantro_hevc_chroma_compressed_size(width, height); +} + static inline unsigned short hantro_av1_num_sbs(unsigned short dimension) { return DIV_ROUND_UP(dimension, 64); @@ -525,6 +561,8 @@ hantro_av1_mv_size(unsigned int width, unsigned int height) size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx); size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx); +size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx); +size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx); int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx); int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx); diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c index 41e93176300bdd..232c93eea7eea6 100644 --- a/drivers/media/platform/verisilicon/hantro_postproc.c +++ b/drivers/media/platform/verisilicon/hantro_postproc.c @@ -213,9 +213,13 @@ static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx) else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME) buf_size += hantro_vp9_mv_size(pix_mp.width, pix_mp.height); - else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) + else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) { buf_size += hantro_hevc_mv_size(pix_mp.width, pix_mp.height); + if (ctx->hevc_dec.use_compression) + buf_size += hantro_hevc_compressed_size(pix_mp.width, + pix_mp.height); + } else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME) buf_size += hantro_av1_mv_size(pix_mp.width, pix_mp.height); diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c index df6f2536263bb6..62d3962c18d992 100644 --- a/drivers/media/platform/verisilicon/hantro_v4l2.c +++ b/drivers/media/platform/verisilicon/hantro_v4l2.c @@ -303,11 +303,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx, coded = capture == ctx->is_encoder; - vpu_debug(4, "trying format %c%c%c%c\n", - (pix_mp->pixelformat & 0x7f), - (pix_mp->pixelformat >> 8) & 0x7f, - (pix_mp->pixelformat >> 16) & 0x7f, - (pix_mp->pixelformat >> 24) & 0x7f); + vpu_debug(4, "trying format %p4cc\n", &pix_mp->pixelformat); fmt = hantro_find_format(ctx, pix_mp->pixelformat); if (!fmt) { diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c index 8395c4d48dd0bc..61621b1be8a2f8 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c @@ -22,7 +22,7 @@ * zigzag, nor linear. */ -#include +#include #include #include "hantro_jpeg.h" #include "hantro.h" diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c index b66737fab46b4b..50a3a3eeaa00d6 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c @@ -5,7 +5,7 @@ * Copyright (C) 2018 Rockchip Electronics Co., Ltd. */ -#include +#include #include #include #include "hantro.h" diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c index cc4483857489c5..65e8f2d074005c 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c @@ -257,7 +257,8 @@ static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx) struct hantro_dev *vpu = ctx->dev; struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec; struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls; - unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col; + const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info; + unsigned int num_tile_cols = tile_info->tile_cols; unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64); unsigned int height_in_sb = height / 64; unsigned int stripe_num = ((height + 8) + 63) / 64; diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h index 850ff0f844248d..e4008da64f19b8 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h +++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h @@ -327,7 +327,7 @@ #define av1_apf_threshold AV1_DEC_REG(55, 0, 0xffff) #define av1_apf_single_pu_mode AV1_DEC_REG(55, 30, 0x1) -#define av1_apf_disable AV1_DEC_REG(55, 30, 0x1) +#define av1_apf_disable AV1_DEC_REG(55, 31, 0x1) #define av1_dec_max_burst AV1_DEC_REG(58, 0, 0xff) #define av1_dec_buswidth AV1_DEC_REG(58, 8, 0x7) @@ -337,10 +337,10 @@ #define av1_dec_mc_polltime AV1_DEC_REG(58, 17, 0x3ff) #define av1_dec_mc_pollmode AV1_DEC_REG(58, 27, 0x3) -#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x3f) -#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x3f) -#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x3f) -#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x3f) +#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x7f) +#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x7f) +#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x7f) +#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x7f) #define av1_ref0_sign_bias AV1_DEC_REG(59, 28, 0x1) #define av1_ref1_sign_bias AV1_DEC_REG(59, 29, 0x1) #define av1_ref2_sign_bias AV1_DEC_REG(59, 30, 0x1) diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c index f9752767078355..964122e7c35593 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c @@ -82,7 +82,6 @@ static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12, .codec_mode = HANTRO_MODE_NONE, - .match_depth = true, .postprocessed = true, .frmsize = { .min_width = ROCKCHIP_VPU981_MIN_SIZE, diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index 996684a7303831..bfe48cc0ab5254 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -199,18 +199,13 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev) struct media_pad *sink_pad; struct xvip_graph_entity *ent; struct v4l2_fwnode_link link; - struct device_node *ep = NULL; + struct device_node *ep; struct xvip_dma *dma; int ret = 0; dev_dbg(xdev->dev, "creating links for DMA engines\n"); - while (1) { - /* Get the next endpoint and parse its link. */ - ep = of_graph_get_next_endpoint(node, ep); - if (ep == NULL) - break; - + for_each_endpoint_of_node(node, ep) { dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep); ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link); diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c index c3180d53c2824c..64c7452c05b57b 100644 --- a/drivers/media/radio/radio-raremono.c +++ b/drivers/media/radio/radio-raremono.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 14e7dd3889ff00..dd85b0b1bcd962 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -502,7 +502,7 @@ static void tea5764_i2c_remove(struct i2c_client *client) /* I2C subsystem interface */ static const struct i2c_device_id tea5764_id[] = { - { "radio-tea5764", 0 }, + { "radio-tea5764" }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(i2c, tea5764_id); diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c index 91345198bbf10b..d9eecddffd91d8 100644 --- a/drivers/media/radio/saa7706h.c +++ b/drivers/media/radio/saa7706h.c @@ -395,8 +395,8 @@ static void saa7706h_remove(struct i2c_client *client) } static const struct i2c_device_id saa7706h_id[] = { - {DRIVER_NAME, 0}, - {}, + { DRIVER_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, saa7706h_id); diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index fd449e42c19130..cdd2ac198f2c75 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -28,7 +28,7 @@ /* I2C Device ID List */ static const struct i2c_device_id si470x_i2c_id[] = { /* Generic Entry */ - { "si470x", 0 }, + { "si470x" }, /* Terminating entry */ { } }; diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h index e57ab54a27fcac..2915c0023fcd6c 100644 --- a/drivers/media/radio/si470x/radio-si470x.h +++ b/drivers/media/radio/si470x/radio-si470x.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c index ddaf7a60b7d019..e71272c6de37a5 100644 --- a/drivers/media/radio/si4713/si4713.c +++ b/drivers/media/radio/si4713/si4713.c @@ -1639,8 +1639,8 @@ static void si4713_remove(struct i2c_client *client) /* si4713_i2c_driver - i2c driver interface */ static const struct i2c_device_id si4713_id[] = { - { "si4713" , 0 }, - { }, + { "si4713" }, + { } }; MODULE_DEVICE_TABLE(i2c, si4713_id); diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 215168aa1588ef..b00ccf6519225a 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -173,8 +173,8 @@ static void tef6862_remove(struct i2c_client *client) } static const struct i2c_device_id tef6862_id[] = { - {DRIVER_NAME, 0}, - {}, + { DRIVER_NAME }, + {} }; MODULE_DEVICE_TABLE(i2c, tef6862_id); diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c index 11ee21a7db8f00..67722e2e47ff78 100644 --- a/drivers/media/rc/ene_ir.c +++ b/drivers/media/rc/ene_ir.c @@ -451,9 +451,6 @@ static void ene_rx_setup(struct ene_device *dev) dev->rdev->max_timeout = 200000; } - if (dev->hw_learning_and_tx_capable) - dev->rdev->tx_resolution = sample_period; - if (dev->rdev->timeout > dev->rdev->max_timeout) dev->rdev->timeout = dev->rdev->max_timeout; if (dev->rdev->timeout < dev->rdev->min_timeout) diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 69e630d85262f6..533faa11751744 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -12,7 +12,7 @@ * Copyright (C) 2011 Peter Kooiman */ -#include +#include #include #include #include diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index fcfadd7ea31cf3..2bacecb022623e 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -1380,7 +1380,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev->timeout = IR_DEFAULT_TIMEOUT; rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; rdev->rx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000; - rdev->tx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000; /* set up transmitter related values */ rdev->tx_ir = ite_tx_ir; diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 717c441b4a8654..f042f3f14afa9d 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -706,7 +706,6 @@ static const struct file_operations lirc_fops = { .poll = lirc_poll, .open = lirc_open, .release = lirc_close, - .llseek = no_llseek, }; static void lirc_release_device(struct device *ld) @@ -820,20 +819,20 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write) struct lirc_fh *fh; struct rc_dev *dev; - if (!f.file) + if (!fd_file(f)) return ERR_PTR(-EBADF); - if (f.file->f_op != &lirc_fops) { + if (fd_file(f)->f_op != &lirc_fops) { fdput(f); return ERR_PTR(-EINVAL); } - if (write && !(f.file->f_mode & FMODE_WRITE)) { + if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) { fdput(f); return ERR_PTR(-EPERM); } - fh = f.file->private_data; + fh = fd_file(f)->private_data; dev = fh->rc; get_device(&dev->dev); diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c index 5303e6da58095e..9cdb45821eccd6 100644 --- a/drivers/media/rc/meson-ir.c +++ b/drivers/media/rc/meson-ir.c @@ -567,6 +567,32 @@ static void meson_ir_shutdown(struct platform_device *pdev) spin_unlock_irqrestore(&ir->lock, flags); } +static __maybe_unused int meson_ir_resume(struct device *dev) +{ + struct meson_ir *ir = dev_get_drvdata(dev); + + if (ir->param->support_hw_decoder) + meson_ir_hw_decoder_init(ir->rc, &ir->rc->enabled_protocols); + else + meson_ir_sw_decoder_init(ir->rc); + + return 0; +} + +static __maybe_unused int meson_ir_suspend(struct device *dev) +{ + struct meson_ir *ir = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&ir->lock, flags); + regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0); + spin_unlock_irqrestore(&ir->lock, flags); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(meson_ir_pm_ops, meson_ir_suspend, meson_ir_resume); + static const struct meson_ir_param meson6_ir_param = { .support_hw_decoder = false, .max_register = IR_DEC_REG1, @@ -607,6 +633,7 @@ static struct platform_driver meson_ir_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = meson_ir_match, + .pm = pm_ptr(&meson_ir_pm_ops), }, }; diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index b356041c5c00e7..8288366f891fc9 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c @@ -230,7 +230,6 @@ static int __init loop_init(void) rc->min_timeout = 1; rc->max_timeout = IR_MAX_TIMEOUT; rc->rx_resolution = 1; - rc->tx_resolution = 1; rc->s_tx_mask = loop_set_tx_mask; rc->s_tx_carrier = loop_set_tx_carrier; rc->s_tx_duty_cycle = loop_set_tx_duty_cycle; diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 9f2947af33aa7c..d89a4cfe3c8959 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -31,7 +31,7 @@ * -- */ -#include +#include #include #include #include diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c index 3e011fe62ae11c..846e90c062910e 100644 --- a/drivers/media/test-drivers/vicodec/vicodec-core.c +++ b/drivers/media/test-drivers/vicodec/vicodec-core.c @@ -1215,8 +1215,7 @@ static int vicodec_encoder_cmd(struct file *file, void *fh, if (ret < 0) return ret; - if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) || - !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) + if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec); @@ -1250,8 +1249,7 @@ static int vicodec_decoder_cmd(struct file *file, void *fh, if (ret < 0) return ret; - if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) || - !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) + if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) return 0; ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc); diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c index 7a0cd9601917ff..505f96fccbf300 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_demod.c +++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c @@ -407,7 +407,7 @@ static const struct dvb_frontend_ops vidtv_demod_ops = { }; static const struct i2c_device_id vidtv_demod_i2c_id_table[] = { - {"dvb_vidtv_demod", 0}, + { "dvb_vidtv_demod" }, {} }; MODULE_DEVICE_TABLE(i2c, vidtv_demod_i2c_id_table); diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c index a748737d47f3b0..4ba302d569d6a4 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c +++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c @@ -385,7 +385,7 @@ static const struct dvb_tuner_ops vidtv_tuner_ops = { }; static const struct i2c_device_id vidtv_tuner_i2c_id_table[] = { - {"dvb_vidtv_tuner", 0}, + { "dvb_vidtv_tuner" }, {} }; MODULE_DEVICE_TABLE(i2c, vidtv_tuner_i2c_id_table); diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c index 941ef426321425..356a988dd6a135 100644 --- a/drivers/media/test-drivers/vivid/vivid-cec.c +++ b/drivers/media/test-drivers/vivid/vivid-cec.c @@ -316,15 +316,16 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) struct vivid_dev *dev = cec_get_drvdata(adap); struct cec_msg reply; u8 dest = cec_msg_destination(msg); - u8 disp_ctl; - char osd[14]; if (cec_msg_is_broadcast(msg)) dest = adap->log_addrs.log_addr[0]; cec_msg_init(&reply, dest, cec_msg_initiator(msg)); switch (cec_msg_opcode(msg)) { - case CEC_MSG_SET_OSD_STRING: + case CEC_MSG_SET_OSD_STRING: { + u8 disp_ctl; + char osd[14]; + if (!cec_is_sink(adap)) return -ENOMSG; cec_ops_set_osd_string(msg, &disp_ctl, osd); @@ -348,6 +349,47 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) break; } break; + } + case CEC_MSG_VENDOR_COMMAND_WITH_ID: { + u32 vendor_id; + u8 size; + const u8 *vendor_cmd; + + /* + * If we receive with our vendor ID + * and with a payload of size 1, and the payload value is odd, + * then we reply with the same message, but with the payload + * byte incremented by 1. + * + * If the size is 1 and the payload value is even, then we + * ignore the message. + * + * The reason we reply to odd instead of even payload values + * is that it allows for testing of the corner case where the + * reply value is 0 (0xff + 1 % 256). + * + * For other sizes we Feature Abort. + * + * This is added for the specific purpose of testing the + * CEC_MSG_FL_REPLY_VENDOR_ID flag using vivid. + */ + cec_ops_vendor_command_with_id(msg, &vendor_id, &size, &vendor_cmd); + if (vendor_id != adap->log_addrs.vendor_id) + break; + if (size == 1) { + // Ignore even op values + if (!(vendor_cmd[0] & 1)) + break; + reply.len = msg->len; + memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1); + reply.msg[msg->len - 1]++; + } else { + cec_msg_feature_abort(&reply, cec_msg_opcode(msg), + CEC_OP_ABORT_INVALID_OP); + } + cec_transmit_msg(adap, &reply, false); + break; + } default: return -ENOMSG; } diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c index 3893a00c18ce1d..549b2009f97487 100644 --- a/drivers/media/tuners/e4000.c +++ b/drivers/media/tuners/e4000.c @@ -719,7 +719,7 @@ static void e4000_remove(struct i2c_client *client) } static const struct i2c_device_id e4000_id_table[] = { - {"e4000", 0}, + { "e4000" }, {} }; MODULE_DEVICE_TABLE(i2c, e4000_id_table); diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index f6613dcf40a39f..046389896dc5f7 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c @@ -600,7 +600,7 @@ static void fc2580_remove(struct i2c_client *client) } static const struct i2c_device_id fc2580_id_table[] = { - {"fc2580", 0}, + { "fc2580" }, {} }; MODULE_DEVICE_TABLE(i2c, fc2580_id_table); diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c index 2cd7f0e0c70d5d..cc57980ed417ed 100644 --- a/drivers/media/tuners/m88rs6000t.c +++ b/drivers/media/tuners/m88rs6000t.c @@ -709,7 +709,7 @@ static void m88rs6000t_remove(struct i2c_client *client) } static const struct i2c_device_id m88rs6000t_id[] = { - {"m88rs6000t", 0}, + { "m88rs6000t" }, {} }; MODULE_DEVICE_TABLE(i2c, m88rs6000t_id); diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c index 4205ed4cf46752..4b9dca2f17cc6c 100644 --- a/drivers/media/tuners/mt2060.c +++ b/drivers/media/tuners/mt2060.c @@ -514,7 +514,7 @@ static void mt2060_remove(struct i2c_client *client) } static const struct i2c_device_id mt2060_id_table[] = { - {"mt2060", 0}, + { "mt2060" }, {} }; MODULE_DEVICE_TABLE(i2c, mt2060_id_table); diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c index 9b2b237745ae39..7c03d4132763f7 100644 --- a/drivers/media/tuners/mxl301rf.c +++ b/drivers/media/tuners/mxl301rf.c @@ -317,7 +317,7 @@ static void mxl301rf_remove(struct i2c_client *client) static const struct i2c_device_id mxl301rf_id[] = { - {"mxl301rf", 0}, + { "mxl301rf" }, {} }; MODULE_DEVICE_TABLE(i2c, mxl301rf_id); diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c index af2d3618b9d5b0..c53aeb558413ca 100644 --- a/drivers/media/tuners/qm1d1b0004.c +++ b/drivers/media/tuners/qm1d1b0004.c @@ -243,7 +243,7 @@ static void qm1d1b0004_remove(struct i2c_client *client) static const struct i2c_device_id qm1d1b0004_id[] = { - {"qm1d1b0004", 0}, + { "qm1d1b0004" }, {} }; diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c index ce7223315b0cf4..c58f5b6526f15a 100644 --- a/drivers/media/tuners/qm1d1c0042.c +++ b/drivers/media/tuners/qm1d1c0042.c @@ -434,7 +434,7 @@ static void qm1d1c0042_remove(struct i2c_client *client) static const struct i2c_device_id qm1d1c0042_id[] = { - {"qm1d1c0042", 0}, + { "qm1d1c0042" }, {} }; MODULE_DEVICE_TABLE(i2c, qm1d1c0042_id); diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c index 8d742bd61df094..39f2dc9c2845c6 100644 --- a/drivers/media/tuners/tda18212.c +++ b/drivers/media/tuners/tda18212.c @@ -254,7 +254,7 @@ static void tda18212_remove(struct i2c_client *client) } static const struct i2c_device_id tda18212_id[] = { - {"tda18212", 0}, + { "tda18212" }, {} }; MODULE_DEVICE_TABLE(i2c, tda18212_id); diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c index 32ea473f3f496a..68d0275f29e1b7 100644 --- a/drivers/media/tuners/tda18250.c +++ b/drivers/media/tuners/tda18250.c @@ -868,7 +868,7 @@ static void tda18250_remove(struct i2c_client *client) } static const struct i2c_device_id tda18250_id_table[] = { - {"tda18250", 0}, + { "tda18250" }, {} }; MODULE_DEVICE_TABLE(i2c, tda18250_id_table); diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c index 03a3a022b0a887..562a7a5c26f56a 100644 --- a/drivers/media/tuners/tua9001.c +++ b/drivers/media/tuners/tua9001.c @@ -245,7 +245,7 @@ static void tua9001_remove(struct i2c_client *client) } static const struct i2c_device_id tua9001_id_table[] = { - {"tua9001", 0}, + { "tua9001" }, {} }; MODULE_DEVICE_TABLE(i2c, tua9001_id_table); diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h index 07aeead0644a31..724952e001cd13 100644 --- a/drivers/media/tuners/tuner-i2c.h +++ b/drivers/media/tuners/tuner-i2c.h @@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props, } \ if (0 == __ret) { \ state = kzalloc(sizeof(type), GFP_KERNEL); \ - if (!state) { \ - __ret = -ENOMEM; \ + if (NULL == state) \ goto __fail; \ - } \ state->i2c_props.addr = i2caddr; \ state->i2c_props.adap = i2cadap; \ state->i2c_props.name = devname; \ diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c index 352b8a3679b721..8e6638e5f6889a 100644 --- a/drivers/media/tuners/xc2028.c +++ b/drivers/media/tuners/xc2028.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "tuner-i2c.h" #include "xc2028.h" #include "xc2028-types.h" diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index 29bc63021c5aae..3cf54d776d36c8 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index c88a202daf5fc1..a2054b1b100fe4 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -17,7 +17,7 @@ #include #include "tuner-simple.h" -#include +#include /* debug */ static int dvb_usb_m920x_debug; diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c index db1fab96d52934..a155b987282f89 100644 --- a/drivers/media/usb/go7007/s2250-board.c +++ b/drivers/media/usb/go7007/s2250-board.c @@ -611,7 +611,7 @@ static void s2250_remove(struct i2c_client *client) } static const struct i2c_device_id s2250_id[] = { - { "s2250", 0 }, + { "s2250" }, { } }; MODULE_DEVICE_TABLE(i2c, s2250_id); diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c index 1a1258d4ffcadf..14fa41cb8148f1 100644 --- a/drivers/media/usb/uvc/uvc_debugfs.c +++ b/drivers/media/usb/uvc/uvc_debugfs.c @@ -59,7 +59,6 @@ static int uvc_debugfs_stats_release(struct inode *inode, struct file *file) static const struct file_operations uvc_debugfs_stats_fops = { .owner = THIS_MODULE, .open = uvc_debugfs_stats_open, - .llseek = no_llseek, .read = uvc_debugfs_stats_read, .release = uvc_debugfs_stats_release, }; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index f0febdc08c2d65..0fac689c6350b2 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index cd9c29532fb031..e00f38dd07d935 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c index 1ff94affbaf3a3..e9ecf47859465d 100644 --- a/drivers/media/v4l2-core/v4l2-cci.c +++ b/drivers/media/v4l2-core/v4l2-cci.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index be2ba7ca5de205..3d7711cc42bc58 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -483,7 +483,6 @@ static const struct file_operations v4l2_fops = { #endif .release = v4l2_release, .poll = v4l2_poll, - .llseek = no_llseek, }; /** @@ -557,6 +556,7 @@ static void determine_valid_ioctls(struct video_device *vdev) bool is_tx = vdev->vfl_dir != VFL_DIR_RX; bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC; bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING; + bool is_edid = vdev->device_caps & V4L2_CAP_EDID; bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE); @@ -784,6 +784,20 @@ static void determine_valid_ioctls(struct video_device *vdev) SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner); SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek); } + if (is_edid) { + SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); + if (is_tx) { + SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); + SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); + SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); + } + if (is_rx) { + SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); + SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); + SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); + SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); + } + } bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls, BASE_VIDIOC_PRIVATE); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 5eb4d797d259f0..e14db67be97c50 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -484,7 +484,7 @@ static void v4l_print_create_buffers(const void *arg, bool write_only) { const struct v4l2_create_buffers *p = arg; - pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u", + pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ", p->index, p->count, prt_names(p->memory, v4l2_memory_names), p->capabilities, p->max_num_buffers); v4l_print_format(&p->format, write_only); @@ -1458,6 +1458,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; + case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break; case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break; case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c index b8bece739d07a9..6e264732352262 100644 --- a/drivers/media/v4l2-core/v4l2-jpeg.c +++ b/drivers/media/v4l2-core/v4l2-jpeg.c @@ -9,7 +9,7 @@ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf */ -#include +#include #include #include #include diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 7c5812d5531512..3a4ba08810d249 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -1443,16 +1443,53 @@ int v4l2_subdev_link_validate(struct media_link *link) bool states_locked; int ret; - if (!is_media_entity_v4l2_subdev(link->sink->entity) || - !is_media_entity_v4l2_subdev(link->source->entity)) { - pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n", - !is_media_entity_v4l2_subdev(link->sink->entity) ? - "sink" : "source", - link->source->entity->name, link->source->index, - link->sink->entity->name, link->sink->index); - return 0; + /* + * Links are validated in the context of the sink entity. Usage of this + * helper on a sink that is not a subdev is a clear driver bug. + */ + if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) + return -EINVAL; + + /* + * If the source is a video device, delegate link validation to it. This + * allows usage of this helper for subdev connected to a video output + * device, provided that the driver implement the video output device's + * .link_validate() operation. + */ + if (is_media_entity_v4l2_video_device(link->source->entity)) { + struct media_entity *source = link->source->entity; + + if (!source->ops || !source->ops->link_validate) { + /* + * Many existing drivers do not implement the required + * .link_validate() operation for their video devices. + * Print a warning to get the drivers fixed, and return + * 0 to avoid breaking userspace. This should + * eventually be turned into a WARN_ON() when all + * drivers will have been fixed. + */ + pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", + source->name); + return 0; + } + + /* + * Avoid infinite loops in case a video device incorrectly uses + * this helper function as its .link_validate() handler. + */ + if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) + return -EINVAL; + + return source->ops->link_validate(link); } + /* + * If the source is still not a subdev, usage of this helper is a clear + * driver bug. + */ + if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) + return -EINVAL; + sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); source_sd = media_entity_to_v4l2_subdev(link->source->entity); diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index e8bb5f37f5cbbe..8db970da9af960 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -6,6 +6,7 @@ * Copyright (C) 2013 Jean-Jacques Hiblot */ +#include #include #include #include @@ -517,7 +518,7 @@ static int atmel_ebi_dev_disable(struct atmel_ebi *ebi, struct device_node *np) static int atmel_ebi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *child, *np = dev->of_node, *smc_np; + struct device_node *np = dev->of_node; struct atmel_ebi *ebi; int ret, reg_cells; struct clk *clk; @@ -541,30 +542,24 @@ static int atmel_ebi_probe(struct platform_device *pdev) ebi->clk = clk; - smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); + struct device_node *smc_np __free(device_node) = + of_parse_phandle(dev->of_node, "atmel,smc", 0); ebi->smc.regmap = syscon_node_to_regmap(smc_np); - if (IS_ERR(ebi->smc.regmap)) { - ret = PTR_ERR(ebi->smc.regmap); - goto put_node; - } + if (IS_ERR(ebi->smc.regmap)) + return PTR_ERR(ebi->smc.regmap); ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); - if (IS_ERR(ebi->smc.layout)) { - ret = PTR_ERR(ebi->smc.layout); - goto put_node; - } + if (IS_ERR(ebi->smc.layout)) + return PTR_ERR(ebi->smc.layout); ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { - if (PTR_ERR(ebi->smc.clk) != -ENOENT) { - ret = PTR_ERR(ebi->smc.clk); - goto put_node; - } + if (PTR_ERR(ebi->smc.clk) != -ENOENT) + return PTR_ERR(ebi->smc.clk); ebi->smc.clk = NULL; } - of_node_put(smc_np); ret = clk_prepare_enable(ebi->smc.clk); if (ret) return ret; @@ -597,7 +592,7 @@ static int atmel_ebi_probe(struct platform_device *pdev) reg_cells += val; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { if (!of_property_present(child, "reg")) continue; @@ -607,18 +602,12 @@ static int atmel_ebi_probe(struct platform_device *pdev) child); ret = atmel_ebi_dev_disable(ebi, child); - if (ret) { - of_node_put(child); + if (ret) return ret; - } } } return of_platform_populate(np, NULL, NULL, dev); - -put_node: - of_node_put(smc_np); - return ret; } static __maybe_unused int atmel_ebi_resume(struct device *dev) diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 8c5ad5c025fad1..99eb7d1baa5ff6 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -7,6 +7,7 @@ * Aneesh V * Santosh Shilimkar */ +#include #include #include #include @@ -57,7 +58,6 @@ struct emif_data { u8 temperature_level; u8 lpmode; struct list_head node; - unsigned long irq_state; void __iomem *base; struct device *dev; struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES]; @@ -69,7 +69,6 @@ struct emif_data { static struct emif_data *emif1; static DEFINE_SPINLOCK(emif_lock); -static unsigned long irq_state; static LIST_HEAD(device_list); static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif, @@ -523,18 +522,18 @@ static void setup_temperature_sensitive_regs(struct emif_data *emif, static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif) { u32 old_temp_level; - irqreturn_t ret = IRQ_HANDLED; + irqreturn_t ret; struct emif_custom_configs *custom_configs; - spin_lock_irqsave(&emif_lock, irq_state); + guard(spinlock_irqsave)(&emif_lock); old_temp_level = emif->temperature_level; get_temperature_level(emif); if (unlikely(emif->temperature_level == old_temp_level)) { - goto out; + return IRQ_HANDLED; } else if (!emif->curr_regs) { dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n"); - goto out; + return IRQ_HANDLED; } custom_configs = emif->plat_data->custom_configs; @@ -554,8 +553,7 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif) * from thread context */ emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN; - ret = IRQ_WAKE_THREAD; - goto out; + return IRQ_WAKE_THREAD; } } @@ -571,10 +569,9 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif) /* Temperature is going up - handle immediately */ setup_temperature_sensitive_regs(emif, emif->curr_regs); do_freq_update(); + ret = IRQ_HANDLED; } -out: - spin_unlock_irqrestore(&emif_lock, irq_state); return ret; } @@ -617,6 +614,7 @@ static irqreturn_t emif_interrupt_handler(int irq, void *dev_id) static irqreturn_t emif_threaded_isr(int irq, void *dev_id) { struct emif_data *emif = dev_id; + unsigned long irq_state; if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n"); @@ -864,7 +862,7 @@ static void of_get_custom_configs(struct device_node *np_emif, be32_to_cpup(poll_intvl); } - if (of_find_property(np_emif, "extended-temp-part", &len)) + if (of_property_read_bool(np_emif, "extended-temp-part")) cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART; if (!is_custom_config_valid(cust_cfgs, emif->dev)) { @@ -880,13 +878,9 @@ static void of_get_ddr_info(struct device_node *np_emif, struct ddr_device_info *dev_info) { u32 density = 0, io_width = 0; - int len; - if (of_find_property(np_emif, "cs1-used", &len)) - dev_info->cs1_used = true; - - if (of_find_property(np_emif, "cal-resistor-per-cs", &len)) - dev_info->cal_resistors_per_cs = true; + dev_info->cs1_used = of_property_read_bool(np_emif, "cs1-used"); + dev_info->cal_resistors_per_cs = of_property_read_bool(np_emif, "cal-resistor-per-cs"); if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4")) dev_info->type = DDR_TYPE_LPDDR2_S4; @@ -916,7 +910,6 @@ static struct emif_data *of_get_memory_device_details( struct ddr_device_info *dev_info = NULL; struct emif_platform_data *pd = NULL; struct device_node *np_ddr; - int len; np_ddr = of_parse_phandle(np_emif, "device-handle", 0); if (!np_ddr) @@ -944,7 +937,7 @@ static struct emif_data *of_get_memory_device_details( of_property_read_u32(np_emif, "phy-type", &pd->phy_type); - if (of_find_property(np_emif, "hw-caps-ll-interface", &len)) + if (of_property_read_bool(np_emif, "hw-caps-ll-interface")) pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE; of_get_ddr_info(np_emif, np_ddr, dev_info); diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index fbe52ecc0eca82..2bc034dff691bf 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -771,13 +771,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev) if (IS_ERR(common->smi_ao_base)) return PTR_ERR(common->smi_ao_base); - common->clk_async = devm_clk_get(dev, "async"); + common->clk_async = devm_clk_get_enabled(dev, "async"); if (IS_ERR(common->clk_async)) return PTR_ERR(common->clk_async); - - ret = clk_prepare_enable(common->clk_async); - if (ret) - return ret; } else { common->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(common->base)) diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 80d038884207ba..c8a0d82f9c27df 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -9,6 +9,7 @@ * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar */ +#include #include #include #include @@ -989,18 +990,18 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) if (size > (1 << GPMC_SECTION_SHIFT)) return -ENOMEM; - spin_lock(&gpmc_mem_lock); - if (gpmc_cs_reserved(cs)) { - r = -EBUSY; - goto out; - } + guard(spinlock)(&gpmc_mem_lock); + + if (gpmc_cs_reserved(cs)) + return -EBUSY; + if (gpmc_cs_mem_enabled(cs)) r = adjust_resource(res, res->start & ~(size - 1), size); if (r < 0) r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0, size, NULL, NULL); if (r < 0) - goto out; + return r; /* Disable CS while changing base address and size mask */ gpmc_cs_disable_mem(cs); @@ -1008,16 +1009,15 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) r = gpmc_cs_set_memconf(cs, res->start, resource_size(res)); if (r < 0) { release_resource(res); - goto out; + return r; } /* Enable CS */ gpmc_cs_enable_mem(cs); *base = res->start; gpmc_cs_set_reserved(cs, 1); -out: - spin_unlock(&gpmc_mem_lock); - return r; + + return 0; } EXPORT_SYMBOL(gpmc_cs_request); @@ -1026,10 +1026,9 @@ void gpmc_cs_free(int cs) struct gpmc_cs_data *gpmc; struct resource *res; - spin_lock(&gpmc_mem_lock); + guard(spinlock)(&gpmc_mem_lock); if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { WARN(1, "Trying to free non-reserved GPMC CS%d\n", cs); - spin_unlock(&gpmc_mem_lock); return; } gpmc = &gpmc_cs[cs]; @@ -1039,7 +1038,6 @@ void gpmc_cs_free(int cs) if (res->flags) release_resource(res); gpmc_cs_set_reserved(cs, 0); - spin_unlock(&gpmc_mem_lock); } EXPORT_SYMBOL(gpmc_cs_free); diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c index 9eb8cc7de494a8..be7ba599cccf31 100644 --- a/drivers/memory/pl172.c +++ b/drivers/memory/pl172.c @@ -187,6 +187,13 @@ static int pl172_parse_cs_config(struct amba_device *adev, return -EINVAL; } +static void pl172_amba_release_regions(void *data) +{ + struct amba_device *adev = data; + + amba_release_regions(adev); +} + static const char * const pl172_revisions[] = {"r1", "r2", "r2p3", "r2p4"}; static const char * const pl175_revisions[] = {"r1"}; static const char * const pl176_revisions[] = {"r0"}; @@ -216,38 +223,30 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id) if (!pl172) return -ENOMEM; - pl172->clk = devm_clk_get(dev, "mpmcclk"); - if (IS_ERR(pl172->clk)) { - dev_err(dev, "no mpmcclk provided clock\n"); - return PTR_ERR(pl172->clk); - } - - ret = clk_prepare_enable(pl172->clk); - if (ret) { - dev_err(dev, "unable to mpmcclk enable clock\n"); - return ret; - } + pl172->clk = devm_clk_get_enabled(dev, "mpmcclk"); + if (IS_ERR(pl172->clk)) + return dev_err_probe(dev, PTR_ERR(pl172->clk), + "no mpmcclk provided clock\n"); pl172->rate = clk_get_rate(pl172->clk) / MSEC_PER_SEC; - if (!pl172->rate) { - dev_err(dev, "unable to get mpmcclk clock rate\n"); - ret = -EINVAL; - goto err_clk_enable; - } + if (!pl172->rate) + return dev_err_probe(dev, -EINVAL, + "unable to get mpmcclk clock rate\n"); ret = amba_request_regions(adev, NULL); if (ret) { dev_err(dev, "unable to request AMBA regions\n"); - goto err_clk_enable; + return ret; } + ret = devm_add_action_or_reset(dev, pl172_amba_release_regions, adev); + if (ret) + return ret; + pl172->base = devm_ioremap(dev, adev->res.start, resource_size(&adev->res)); - if (!pl172->base) { - dev_err(dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err_no_ioremap; - } + if (!pl172->base) + return dev_err_probe(dev, -ENOMEM, "ioremap failed\n"); amba_set_drvdata(adev, pl172); @@ -265,20 +264,6 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id) } return 0; - -err_no_ioremap: - amba_release_regions(adev); -err_clk_enable: - clk_disable_unprepare(pl172->clk); - return ret; -} - -static void pl172_remove(struct amba_device *adev) -{ - struct pl172_data *pl172 = amba_get_drvdata(adev); - - clk_disable_unprepare(pl172->clk); - amba_release_regions(adev); } static const struct amba_id pl172_ids[] = { @@ -306,7 +291,6 @@ static struct amba_driver pl172_driver = { .name = "memory-pl172", }, .probe = pl172_probe, - .remove = pl172_remove, .id_table = pl172_ids, }; module_amba_driver(pl172_driver); diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index 56e51737c81fe9..28a8cc56003cd9 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -74,73 +74,39 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) struct device_node *of_node = adev->dev.of_node; const struct of_device_id *match = NULL; struct pl353_smc_data *pl353_smc; - struct device_node *child; - int err; pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL); if (!pl353_smc) return -ENOMEM; - pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk"); - if (IS_ERR(pl353_smc->aclk)) { - dev_err(&adev->dev, "aclk clock not found.\n"); - return PTR_ERR(pl353_smc->aclk); - } - - pl353_smc->memclk = devm_clk_get(&adev->dev, "memclk"); - if (IS_ERR(pl353_smc->memclk)) { - dev_err(&adev->dev, "memclk clock not found.\n"); - return PTR_ERR(pl353_smc->memclk); - } + pl353_smc->aclk = devm_clk_get_enabled(&adev->dev, "apb_pclk"); + if (IS_ERR(pl353_smc->aclk)) + return dev_err_probe(&adev->dev, PTR_ERR(pl353_smc->aclk), + "aclk clock not found.\n"); - err = clk_prepare_enable(pl353_smc->aclk); - if (err) { - dev_err(&adev->dev, "Unable to enable AXI clock.\n"); - return err; - } - - err = clk_prepare_enable(pl353_smc->memclk); - if (err) { - dev_err(&adev->dev, "Unable to enable memory clock.\n"); - goto disable_axi_clk; - } + pl353_smc->memclk = devm_clk_get_enabled(&adev->dev, "memclk"); + if (IS_ERR(pl353_smc->memclk)) + return dev_err_probe(&adev->dev, PTR_ERR(pl353_smc->memclk), + "memclk clock not found.\n"); amba_set_drvdata(adev, pl353_smc); /* Find compatible children. Only a single child is supported */ - for_each_available_child_of_node(of_node, child) { + for_each_available_child_of_node_scoped(of_node, child) { match = of_match_node(pl353_smc_supported_children, child); if (!match) { dev_warn(&adev->dev, "unsupported child node\n"); continue; } + of_platform_device_create(child, NULL, &adev->dev); break; } if (!match) { - err = -ENODEV; dev_err(&adev->dev, "no matching children\n"); - goto disable_mem_clk; + return -ENODEV; } - of_platform_device_create(child, NULL, &adev->dev); - of_node_put(child); - return 0; - -disable_mem_clk: - clk_disable_unprepare(pl353_smc->memclk); -disable_axi_clk: - clk_disable_unprepare(pl353_smc->aclk); - - return err; -} - -static void pl353_smc_remove(struct amba_device *adev) -{ - struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev); - - clk_disable_unprepare(pl353_smc->memclk); - clk_disable_unprepare(pl353_smc->aclk); } static const struct amba_id pl353_ids[] = { @@ -159,7 +125,6 @@ static struct amba_driver pl353_smc_driver = { }, .id_table = pl353_ids, .probe = pl353_smc_probe, - .remove = pl353_smc_remove, }; module_amba_driver(pl353_smc_driver); diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 3167826b236a34..7fbd36fa1a1b46 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -367,7 +367,7 @@ int rpcif_hw_init(struct device *dev, bool hyperflash) regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) | RPCIF_CMNCR_BSZ(3), - RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) | + RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(3) | RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); else regmap_update_bits(rpc->regmap, RPCIF_CMNCR, diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index da7ecd921c72ab..7d80322754fada 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -4,6 +4,7 @@ * Author: Lukasz Luba */ +#include #include #include #include @@ -339,19 +340,20 @@ static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set) static int exynos5_init_freq_table(struct exynos5_dmc *dmc, struct devfreq_dev_profile *profile) { + struct device *dev = dmc->dev; int i, ret; int idx; unsigned long freq; - ret = devm_pm_opp_of_add_table(dmc->dev); + ret = devm_pm_opp_of_add_table(dev); if (ret < 0) { - dev_err(dmc->dev, "Failed to get OPP table\n"); + dev_err(dev, "Failed to get OPP table\n"); return ret; } - dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev); + dmc->opp_count = dev_pm_opp_get_opp_count(dev); - dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count, + dmc->opp = devm_kmalloc_array(dev, dmc->opp_count, sizeof(struct dmc_opp_table), GFP_KERNEL); if (!dmc->opp) return -ENOMEM; @@ -360,7 +362,7 @@ static int exynos5_init_freq_table(struct exynos5_dmc *dmc, for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) { struct dev_pm_opp *opp; - opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq); + opp = dev_pm_opp_find_freq_floor(dev, &freq); if (IS_ERR(opp)) return PTR_ERR(opp); @@ -1175,51 +1177,44 @@ static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row, static int of_get_dram_timings(struct exynos5_dmc *dmc) { int ret = 0; + struct device *dev = dmc->dev; int idx; - struct device_node *np_ddr; u32 freq_mhz, clk_period_ps; - np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0); + struct device_node *np_ddr __free(device_node) = + of_parse_phandle(dev->of_node, "device-handle", 0); if (!np_ddr) { - dev_warn(dmc->dev, "could not find 'device-handle' in DT\n"); + dev_warn(dev, "could not find 'device-handle' in DT\n"); return -EINVAL; } - dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, + dmc->timing_row = devm_kmalloc_array(dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_row) { - ret = -ENOMEM; - goto put_node; - } + if (!dmc->timing_row) + return -ENOMEM; - dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, + dmc->timing_data = devm_kmalloc_array(dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_data) { - ret = -ENOMEM; - goto put_node; - } + if (!dmc->timing_data) + return -ENOMEM; - dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, + dmc->timing_power = devm_kmalloc_array(dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_power) { - ret = -ENOMEM; - goto put_node; - } + if (!dmc->timing_power) + return -ENOMEM; - dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, + dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dev, DDR_TYPE_LPDDR3, &dmc->timings_arr_size); if (!dmc->timings) { - dev_warn(dmc->dev, "could not get timings from DT\n"); - ret = -EINVAL; - goto put_node; + dev_warn(dev, "could not get timings from DT\n"); + return -EINVAL; } - dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); + dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dev); if (!dmc->min_tck) { - dev_warn(dmc->dev, "could not get tck from DT\n"); - ret = -EINVAL; - goto put_node; + dev_warn(dev, "could not get tck from DT\n"); + return -EINVAL; } /* Sorted array of OPPs with frequency ascending */ @@ -1239,8 +1234,6 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) dmc->bypass_timing_data = dmc->timing_data[idx - 1]; dmc->bypass_timing_power = dmc->timing_power[idx - 1]; -put_node: - of_node_put(np_ddr); return ret; } @@ -1254,34 +1247,34 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc) { int ret; + struct device *dev = dmc->dev; unsigned long target_volt = 0; unsigned long target_rate = 0; unsigned int tmp; - dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll"); + dmc->fout_spll = devm_clk_get(dev, "fout_spll"); if (IS_ERR(dmc->fout_spll)) return PTR_ERR(dmc->fout_spll); - dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll"); + dmc->fout_bpll = devm_clk_get(dev, "fout_bpll"); if (IS_ERR(dmc->fout_bpll)) return PTR_ERR(dmc->fout_bpll); - dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex"); + dmc->mout_mclk_cdrex = devm_clk_get(dev, "mout_mclk_cdrex"); if (IS_ERR(dmc->mout_mclk_cdrex)) return PTR_ERR(dmc->mout_mclk_cdrex); - dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll"); + dmc->mout_bpll = devm_clk_get(dev, "mout_bpll"); if (IS_ERR(dmc->mout_bpll)) return PTR_ERR(dmc->mout_bpll); - dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev, - "mout_mx_mspll_ccore"); + dmc->mout_mx_mspll_ccore = devm_clk_get(dev, "mout_mx_mspll_ccore"); if (IS_ERR(dmc->mout_mx_mspll_ccore)) return PTR_ERR(dmc->mout_mx_mspll_ccore); - dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2"); + dmc->mout_spll = devm_clk_get(dev, "ff_dout_spll2"); if (IS_ERR(dmc->mout_spll)) { - dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll"); + dmc->mout_spll = devm_clk_get(dev, "mout_sclk_spll"); if (IS_ERR(dmc->mout_spll)) return PTR_ERR(dmc->mout_spll); } @@ -1329,38 +1322,37 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc) */ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) { + struct device *dev = dmc->dev; int ret, i; - dmc->num_counters = devfreq_event_get_edev_count(dmc->dev, - "devfreq-events"); + dmc->num_counters = devfreq_event_get_edev_count(dev, "devfreq-events"); if (dmc->num_counters < 0) { - dev_err(dmc->dev, "could not get devfreq-event counters\n"); + dev_err(dev, "could not get devfreq-event counters\n"); return dmc->num_counters; } - dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters, + dmc->counter = devm_kcalloc(dev, dmc->num_counters, sizeof(*dmc->counter), GFP_KERNEL); if (!dmc->counter) return -ENOMEM; for (i = 0; i < dmc->num_counters; i++) { dmc->counter[i] = - devfreq_event_get_edev_by_phandle(dmc->dev, - "devfreq-events", i); + devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i); if (IS_ERR_OR_NULL(dmc->counter[i])) return -EPROBE_DEFER; } ret = exynos5_counters_enable_edev(dmc); if (ret < 0) { - dev_err(dmc->dev, "could not enable event counter\n"); + dev_err(dev, "could not enable event counter\n"); return ret; } ret = exynos5_counters_set_event(dmc); if (ret < 0) { exynos5_counters_disable_edev(dmc); - dev_err(dmc->dev, "could not set event counter\n"); + dev_err(dev, "could not set event counter\n"); return ret; } diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c index 1c63eeacd071cc..566c225f71c06e 100644 --- a/drivers/memory/stm32-fmc2-ebi.c +++ b/drivers/memory/stm32-fmc2-ebi.c @@ -1573,29 +1573,22 @@ static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi, static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) { struct device *dev = ebi->dev; - struct device_node *child; bool child_found = false; u32 bank; int ret; - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { ret = of_property_read_u32(child, "reg", &bank); - if (ret) { - dev_err(dev, "could not retrieve reg property: %d\n", - ret); - of_node_put(child); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "could not retrieve reg property\n"); if (bank >= FMC2_MAX_BANKS) { dev_err(dev, "invalid reg value: %d\n", bank); - of_node_put(child); return -EINVAL; } if (ebi->bank_assigned & BIT(bank)) { dev_err(dev, "bank already assigned: %d\n", bank); - of_node_put(child); return -EINVAL; } @@ -1603,19 +1596,15 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) ret = ebi->data->check_rif(ebi, bank + 1); if (ret) { dev_err(dev, "bank access failed: %d\n", bank); - of_node_put(child); return ret; } } if (bank < FMC2_MAX_EBI_CE) { ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank); - if (ret) { - dev_err(dev, "setup chip select %d failed: %d\n", - bank, ret); - of_node_put(child); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "setup chip select %d failed\n", bank); } ebi->bank_assigned |= BIT(bank); diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 224b488794e5bc..bd5b58f1fd4294 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -450,7 +450,6 @@ static int load_one_timing(struct tegra_mc *mc, static int load_timings(struct tegra_mc *mc, struct device_node *node) { - struct device_node *child; struct tegra_mc_timing *timing; int child_count = of_get_child_count(node); int i = 0, err; @@ -462,14 +461,12 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node) mc->num_timings = child_count; - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { timing = &mc->timings[i++]; err = load_one_timing(mc, timing, child); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; @@ -477,7 +474,6 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node) static int tegra_mc_setup_timings(struct tegra_mc *mc) { - struct device_node *node; u32 ram_code, node_ram_code; int err; @@ -485,14 +481,13 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc) mc->num_timings = 0; - for_each_child_of_node(mc->dev->of_node, node) { + for_each_child_of_node_scoped(mc->dev->of_node, node) { err = of_property_read_u32(node, "nvidia,ram-code", &node_ram_code); if (err || (node_ram_code != ram_code)) continue; err = load_timings(mc, node); - of_node_put(node); if (err) return err; break; diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c index 47c0c19e13fd5a..03f1daa2d132a8 100644 --- a/drivers/memory/tegra/tegra124-emc.c +++ b/drivers/memory/tegra/tegra124-emc.c @@ -992,7 +992,6 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, struct device_node *node) { int child_count = of_get_child_count(node); - struct device_node *child; struct emc_timing *timing; unsigned int i = 0; int err; @@ -1004,14 +1003,12 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, emc->num_timings = child_count; - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { timing = &emc->timings[i++]; err = load_one_timing_from_dt(emc, timing, child); - if (err) { - of_node_put(child); + if (err) return err; - } } sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c index 57d9ae12fcfe1a..33d67d25171940 100644 --- a/drivers/memory/tegra/tegra186-emc.c +++ b/drivers/memory/tegra/tegra186-emc.c @@ -35,11 +35,6 @@ struct tegra186_emc { struct icc_provider provider; }; -static inline struct tegra186_emc *to_tegra186_emc(struct icc_provider *provider) -{ - return container_of(provider, struct tegra186_emc, provider); -} - /* * debugfs interface * diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index 97cf59523b0b17..7193f848d17e66 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -410,7 +410,6 @@ static int cmp_timings(const void *_a, const void *_b) static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, struct device_node *node) { - struct device_node *child; struct emc_timing *timing; int child_count; int err; @@ -428,15 +427,13 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, timing = emc->timings; - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { if (of_node_name_eq(child, "lpddr2")) continue; err = load_one_timing_from_dt(emc, timing++, child); - if (err) { - of_node_put(child); + if (err) return err; - } emc->num_timings++; } diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c index 4cb608c71ead52..a30a646ec4680f 100644 --- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c +++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c @@ -75,29 +75,29 @@ enum { * The division portion of the average operation. */ #define __AVERAGE_PTFV(dev) \ - ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \ - next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \ + ({ next->ptfv_list[(dev)] = \ + next->ptfv_list[(dev)] / \ next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; }) /* * Convert val to fixed point and add it to the temporary average. */ #define __INCREMENT_PTFV(dev, val) \ - ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \ + ({ next->ptfv_list[(dev)] += \ ((val) * MOVAVG_PRECISION_FACTOR); }) /* * Convert a moving average back to integral form and return the value. */ #define __MOVAVG_AC(timing, dev) \ - ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \ + ((timing)->ptfv_list[(dev)] / \ MOVAVG_PRECISION_FACTOR) /* Weighted update. */ #define __WEIGHTED_UPDATE_PTFV(dev, nval) \ do { \ int w = PTFV_MOVAVG_WEIGHT_INDEX; \ - int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \ + int dqs = (dev); \ \ next->ptfv_list[dqs] = \ ((nval * MOVAVG_PRECISION_FACTOR) + \ @@ -105,315 +105,91 @@ enum { next->ptfv_list[w])) / \ (next->ptfv_list[w] + 1); \ \ - emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \ + emc_dbg(emc, EMA_UPDATES, "%s: (s=%u) EMA: %u\n", \ __stringify(dev), nval, next->ptfv_list[dqs]); \ } while (0) /* Access a particular average. */ #define __MOVAVG(timing, dev) \ - ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX]) + ((timing)->ptfv_list[(dev)]) -static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type) +static bool tegra210_emc_compare_update_delay(struct tegra210_emc_timing *timing, + u32 measured, u32 idx) { - bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE; - struct tegra210_emc_timing *last = emc->last; - struct tegra210_emc_timing *next = emc->next; - u32 last_timing_rate_mhz = last->rate / 1000; - u32 next_timing_rate_mhz = next->rate / 1000; - bool dvfs_update = type == DVFS_UPDATE; - s32 tdel = 0, tmdel = 0, adel = 0; - bool dvfs_pt1 = type == DVFS_PT1; - unsigned long cval = 0; - u32 temp[2][2], value; - unsigned int i; - - /* - * Dev0 MSB. - */ - if (dvfs_pt1 || periodic_training_update) { - value = tegra210_emc_mrr_read(emc, 2, 19); - - for (i = 0; i < emc->num_channels; i++) { - temp[i][0] = (value & 0x00ff) << 8; - temp[i][1] = (value & 0xff00) << 0; - value >>= 16; - } - - /* - * Dev0 LSB. - */ - value = tegra210_emc_mrr_read(emc, 2, 18); - - for (i = 0; i < emc->num_channels; i++) { - temp[i][0] |= (value & 0x00ff) >> 0; - temp[i][1] |= (value & 0xff00) >> 8; - value >>= 16; - } - } - - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[0][0]; - } + u32 *curr = &timing->current_dram_clktree[idx]; + u32 rate_mhz = timing->rate / 1000; + u32 tmdel; - if (dvfs_pt1) - __INCREMENT_PTFV(C0D0U0, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C0D0U0); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C0D0U0, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C0D0U0] - - __MOVAVG_AC(next, C0D0U0); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C0D0U0] = - __MOVAVG_AC(next, C0D0U0); - } + tmdel = abs(*curr - measured); - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[0][1]; + if (tmdel * 128 * rate_mhz / 1000000 > timing->tree_margin) { + *curr = measured; + return true; } - if (dvfs_pt1) - __INCREMENT_PTFV(C0D0U1, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C0D0U1); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C0D0U1, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C0D0U1] - - __MOVAVG_AC(next, C0D0U1); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C0D0U1] = - __MOVAVG_AC(next, C0D0U1); - } - - if (emc->num_channels > 1) { - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[1][0]; - } - - if (dvfs_pt1) - __INCREMENT_PTFV(C1D0U0, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C1D0U0); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C1D0U0, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C1D0U0] - - __MOVAVG_AC(next, C1D0U0); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C1D0U0] = - __MOVAVG_AC(next, C1D0U0); - } - - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[1][1]; - } - - if (dvfs_pt1) - __INCREMENT_PTFV(C1D0U1, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C1D0U1); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C1D0U1, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C1D0U1] - - __MOVAVG_AC(next, C1D0U1); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C1D0U1] = - __MOVAVG_AC(next, C1D0U1); - } - } - - if (emc->num_devices < 2) - goto done; - - /* - * Dev1 MSB. - */ - if (dvfs_pt1 || periodic_training_update) { - value = tegra210_emc_mrr_read(emc, 1, 19); + return false; +} - for (i = 0; i < emc->num_channels; i++) { - temp[i][0] = (value & 0x00ff) << 8; - temp[i][1] = (value & 0xff00) << 0; - value >>= 16; - } +static void tegra210_emc_get_clktree_delay(struct tegra210_emc *emc, + u32 delay[DRAM_CLKTREE_NUM]) +{ + struct tegra210_emc_timing *curr = emc->last; + u32 rate_mhz = curr->rate / 1000; + u32 msb, lsb, dqsosc, delay_us; + unsigned int c, d, idx; + unsigned long clocks; - /* - * Dev1 LSB. - */ - value = tegra210_emc_mrr_read(emc, 1, 18); + clocks = tegra210_emc_actual_osc_clocks(curr->run_clocks); + delay_us = 2 + (clocks / rate_mhz); - for (i = 0; i < emc->num_channels; i++) { - temp[i][0] |= (value & 0x00ff) >> 0; - temp[i][1] |= (value & 0xff00) >> 8; - value >>= 16; - } - } + tegra210_emc_start_periodic_compensation(emc); + udelay(delay_us); - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[0][0]; - } + for (d = 0; d < emc->num_devices; d++) { + /* Read DQSOSC from MRR18/19 */ + msb = tegra210_emc_mrr_read(emc, 2 - d, 19); + lsb = tegra210_emc_mrr_read(emc, 2 - d, 18); - if (dvfs_pt1) - __INCREMENT_PTFV(C0D1U0, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C0D1U0); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C0D1U0, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C0D1U0] - - __MOVAVG_AC(next, C0D1U0); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C0D1U0] = - __MOVAVG_AC(next, C0D1U0); - } + for (c = 0; c < emc->num_channels; c++) { + /* C[c]D[d]U[0] */ + idx = c * 4 + d * 2; - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[0][1]; - } + dqsosc = (msb & 0x00ff) << 8; + dqsosc |= (lsb & 0x00ff) >> 0; - if (dvfs_pt1) - __INCREMENT_PTFV(C0D1U1, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C0D1U1); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C0D1U1, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C0D1U1] - - __MOVAVG_AC(next, C0D1U1); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C0D1U1] = - __MOVAVG_AC(next, C0D1U1); - } + /* Check for unpopulated channels */ + if (dqsosc) + delay[idx] = (clocks * 1000000) / + (rate_mhz * 2 * dqsosc); - if (emc->num_channels > 1) { - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[1][0]; - } + /* C[c]D[d]U[1] */ + idx++; - if (dvfs_pt1) - __INCREMENT_PTFV(C1D1U0, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C1D1U0); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C1D1U0, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C1D1U0] - - __MOVAVG_AC(next, C1D1U0); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C1D1U0] = - __MOVAVG_AC(next, C1D1U0); - } + dqsosc = (msb & 0xff00) << 0; + dqsosc |= (lsb & 0xff00) >> 8; - if (dvfs_pt1 || periodic_training_update) { - cval = tegra210_emc_actual_osc_clocks(last->run_clocks); - cval *= 1000000; - cval /= last_timing_rate_mhz * 2 * temp[1][1]; - } + /* Check for unpopulated channels */ + if (dqsosc) + delay[idx] = (clocks * 1000000) / + (rate_mhz * 2 * dqsosc); - if (dvfs_pt1) - __INCREMENT_PTFV(C1D1U1, cval); - else if (dvfs_update) - __AVERAGE_PTFV(C1D1U1); - else if (periodic_training_update) - __WEIGHTED_UPDATE_PTFV(C1D1U1, cval); - - if (dvfs_update || periodic_training_update) { - tdel = next->current_dram_clktree[C1D1U1] - - __MOVAVG_AC(next, C1D1U1); - tmdel = (tdel < 0) ? -1 * tdel : tdel; - - if (tmdel > adel) - adel = tmdel; - - if (tmdel * 128 * next_timing_rate_mhz / 1000000 > - next->tree_margin) - next->current_dram_clktree[C1D1U1] = - __MOVAVG_AC(next, C1D1U1); + msb >>= 16; + lsb >>= 16; } } - -done: - return adel; } -static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type, - struct tegra210_emc_timing *last, - struct tegra210_emc_timing *next) +static bool periodic_compensation_handler(struct tegra210_emc *emc, u32 type, + struct tegra210_emc_timing *last, + struct tegra210_emc_timing *next) { #define __COPY_EMA(nt, lt, dev) \ ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \ (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; }) - u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; - u32 delay; - - delay = tegra210_emc_actual_osc_clocks(last->run_clocks); - delay *= 1000; - delay = 2 + (delay / last->rate); + u32 i, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; + u32 delay[DRAM_CLKTREE_NUM], idx; + bool over = false; if (!next->periodic_training) return 0; @@ -427,57 +203,46 @@ static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type, * calibration then we can reuse the previous * frequencies EMA data. */ - __COPY_EMA(next, last, C0D0U0); - __COPY_EMA(next, last, C0D0U1); - __COPY_EMA(next, last, C1D0U0); - __COPY_EMA(next, last, C1D0U1); - __COPY_EMA(next, last, C0D1U0); - __COPY_EMA(next, last, C0D1U1); - __COPY_EMA(next, last, C1D1U0); - __COPY_EMA(next, last, C1D1U1); + for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) + __COPY_EMA(next, last, idx); } else { /* Reset the EMA.*/ - __MOVAVG(next, C0D0U0) = 0; - __MOVAVG(next, C0D0U1) = 0; - __MOVAVG(next, C1D0U0) = 0; - __MOVAVG(next, C1D0U1) = 0; - __MOVAVG(next, C0D1U0) = 0; - __MOVAVG(next, C0D1U1) = 0; - __MOVAVG(next, C1D1U0) = 0; - __MOVAVG(next, C1D1U1) = 0; + for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) + __MOVAVG(next, idx) = 0; for (i = 0; i < samples; i++) { - tegra210_emc_start_periodic_compensation(emc); - udelay(delay); + /* Generate next sample of data. */ + tegra210_emc_get_clktree_delay(emc, delay); - /* - * Generate next sample of data. - */ - adel = update_clock_tree_delay(emc, DVFS_PT1); + for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) + __INCREMENT_PTFV(idx, delay[idx]); } } - /* - * Seems like it should be part of the - * 'if (last_timing->periodic_training)' conditional - * since is already done for the else clause. - */ - adel = update_clock_tree_delay(emc, DVFS_UPDATE); + for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) { + /* Do the division part of the moving average */ + __AVERAGE_PTFV(idx); + over |= tegra210_emc_compare_update_delay(next, + __MOVAVG_AC(next, idx), idx); + } } if (type == PERIODIC_TRAINING_SEQUENCE) { - tegra210_emc_start_periodic_compensation(emc); - udelay(delay); + tegra210_emc_get_clktree_delay(emc, delay); - adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE); + for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) { + __WEIGHTED_UPDATE_PTFV(idx, delay[idx]); + over |= tegra210_emc_compare_update_delay(next, + __MOVAVG_AC(next, idx), idx); + } } - return adel; + return over; } static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc) { - u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value; + u32 emc_cfg, emc_cfg_o, emc_cfg_update, value; static const u32 list[] = { EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1, @@ -492,7 +257,6 @@ static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc) }; struct tegra210_emc_timing *last = emc->last; unsigned int items = ARRAY_SIZE(list), i; - unsigned long delay; if (last->periodic_training) { emc_dbg(emc, PER_TRAIN, "Periodic training starting\n"); @@ -530,30 +294,18 @@ static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc) /* * 2. osc kick off - this assumes training and dvfs have set * correct MR23. - */ - tegra210_emc_start_periodic_compensation(emc); - - /* + * * 3. Let dram capture its clock tree delays. - */ - delay = tegra210_emc_actual_osc_clocks(last->run_clocks); - delay *= 1000; - delay /= last->rate + 1; - udelay(delay); - - /* + * * 4. Check delta wrt previous values (save value if margin * exceeds what is set in table). */ - del = periodic_compensation_handler(emc, - PERIODIC_TRAINING_SEQUENCE, - last, last); - + if (periodic_compensation_handler(emc, PERIODIC_TRAINING_SEQUENCE, + last, last)) { /* * 5. Apply compensation w.r.t. trained values (if clock tree * has drifted more than the set margin). */ - if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) { for (i = 0; i < items; i++) { value = tegra210_emc_compensate(last, list[i]); emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", @@ -734,16 +486,7 @@ static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc) EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK, 0); - tegra210_emc_start_periodic_compensation(emc); - - delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks); - udelay((delay / last->rate) + 2); - - value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake, - next); - value = (value * 128 * next->rate / 1000) / 1000000; - - if (next->periodic_training && value > next->tree_margin) + if (periodic_compensation_handler(emc, DVFS_SEQUENCE, fake, next)) compensate_trimmer_applicable = true; } diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c index d7b0a23c2d7dbd..921dce1b8bc638 100644 --- a/drivers/memory/tegra/tegra30-emc.c +++ b/drivers/memory/tegra/tegra30-emc.c @@ -979,7 +979,6 @@ static int emc_check_mc_timings(struct tegra_emc *emc) static int emc_load_timings_from_dt(struct tegra_emc *emc, struct device_node *node) { - struct device_node *child; struct emc_timing *timing; int child_count; int err; @@ -998,12 +997,10 @@ static int emc_load_timings_from_dt(struct tegra_emc *emc, emc->num_timings = child_count; timing = emc->timings; - for_each_child_of_node(node, child) { + for_each_child_of_node_scoped(node, child) { err = load_one_timing_from_dt(emc, timing++, child); - if (err) { - of_node_put(child); + if (err) return err; - } } sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index e192db9e0e4b2e..d54dc3cfff73cd 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c @@ -17,7 +17,6 @@ #include #include #include -#include #define TA_SHIFT 2 #define RHOLD_SHIFT 4 @@ -330,42 +329,27 @@ static int aemif_probe(struct platform_device *pdev) int ret = -ENODEV; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct device_node *child_np; struct aemif_device *aemif; - struct aemif_platform_data *pdata; - struct of_dev_auxdata *dev_lookup; aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL); if (!aemif) return -ENOMEM; - pdata = dev_get_platdata(&pdev->dev); - dev_lookup = pdata ? pdata->dev_lookup : NULL; - platform_set_drvdata(pdev, aemif); - aemif->clk = devm_clk_get(dev, NULL); - if (IS_ERR(aemif->clk)) { - dev_err(dev, "cannot get clock 'aemif'\n"); - return PTR_ERR(aemif->clk); - } - - ret = clk_prepare_enable(aemif->clk); - if (ret) - return ret; + aemif->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(aemif->clk)) + return dev_err_probe(dev, PTR_ERR(aemif->clk), + "cannot get clock 'aemif'\n"); aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC; if (np && of_device_is_compatible(np, "ti,da850-aemif")) aemif->cs_offset = 2; - else if (pdata) - aemif->cs_offset = pdata->cs_offset; aemif->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(aemif->base)) { - ret = PTR_ERR(aemif->base); - goto error; - } + if (IS_ERR(aemif->base)) + return PTR_ERR(aemif->base); if (np) { /* @@ -374,17 +358,10 @@ static int aemif_probe(struct platform_device *pdev) * functions iterate over these nodes and update the cs data * array. */ - for_each_available_child_of_node(np, child_np) { + for_each_available_child_of_node_scoped(np, child_np) { ret = of_aemif_parse_abus_config(pdev, child_np); - if (ret < 0) { - of_node_put(child_np); - goto error; - } - } - } else if (pdata && pdata->num_abus_data > 0) { - for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) { - aemif->cs_data[i].cs = pdata->abus_data[i].cs; - aemif_get_hw_params(pdev, i); + if (ret < 0) + return ret; } } @@ -393,7 +370,7 @@ static int aemif_probe(struct platform_device *pdev) if (ret < 0) { dev_err(dev, "Error configuring chip select %d\n", aemif->cs_data[i].cs); - goto error; + return ret; } } @@ -402,41 +379,18 @@ static int aemif_probe(struct platform_device *pdev) * child will be probed after the AEMIF timing parameters are set. */ if (np) { - for_each_available_child_of_node(np, child_np) { - ret = of_platform_populate(child_np, NULL, - dev_lookup, dev); - if (ret < 0) { - of_node_put(child_np); - goto error; - } - } - } else if (pdata) { - for (i = 0; i < pdata->num_sub_devices; i++) { - pdata->sub_devices[i].dev.parent = dev; - ret = platform_device_register(&pdata->sub_devices[i]); - if (ret) { - dev_warn(dev, "Error register sub device %s\n", - pdata->sub_devices[i].name); - } + for_each_available_child_of_node_scoped(np, child_np) { + ret = of_platform_populate(child_np, NULL, NULL, dev); + if (ret < 0) + return ret; } } return 0; -error: - clk_disable_unprepare(aemif->clk); - return ret; -} - -static void aemif_remove(struct platform_device *pdev) -{ - struct aemif_device *aemif = platform_get_drvdata(pdev); - - clk_disable_unprepare(aemif->clk); } static struct platform_driver aemif_driver = { .probe = aemif_probe, - .remove_new = aemif_remove, .driver = { .name = "ti-aemif", .of_match_table = of_match_ptr(aemif_of_match), diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 246876ac713c25..ffdd8de9ec5d79 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include struct rtsx_usb_ms { struct platform_device *pdev; diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h index 3770cb1cff7d92..1167a16d8fb4be 100644 --- a/drivers/message/fusion/lsi/mpi_cnfg.h +++ b/drivers/message/fusion/lsi/mpi_cnfg.h @@ -1018,14 +1018,6 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL #define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08) -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX -#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1) -#endif - typedef struct _CONFIG_PAGE_IOC_2 { CONFIG_PAGE_HEADER Header; /* 00h */ @@ -1034,7 +1026,7 @@ typedef struct _CONFIG_PAGE_IOC_2 U8 MaxVolumes; /* 09h */ U8 NumActivePhysDisks; /* 0Ah */ U8 MaxPhysDisks; /* 0Bh */ - CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */ + CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[] __counted_by(NumActiveVolumes); /* 0Ch */ } CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, IOCPage2_t, MPI_POINTER pIOCPage2_t; @@ -1064,21 +1056,13 @@ typedef struct _IOC_3_PHYS_DISK } IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK, Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t; -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX -#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1) -#endif - typedef struct _CONFIG_PAGE_IOC_3 { CONFIG_PAGE_HEADER Header; /* 00h */ U8 NumPhysDisks; /* 04h */ U8 Reserved1; /* 05h */ U16 Reserved2; /* 06h */ - IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */ + IOC_3_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 08h */ } CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3, IOCPage3_t, MPI_POINTER pIOCPage3_t; @@ -1093,21 +1077,13 @@ typedef struct _IOC_4_SEP } IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP, Ioc4Sep_t, MPI_POINTER pIoc4Sep_t; -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI_IOC_PAGE_4_SEP_MAX -#define MPI_IOC_PAGE_4_SEP_MAX (1) -#endif - typedef struct _CONFIG_PAGE_IOC_4 { CONFIG_PAGE_HEADER Header; /* 00h */ U8 ActiveSEP; /* 04h */ U8 MaxSEP; /* 05h */ U16 Reserved1; /* 06h */ - IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */ + IOC_4_SEP SEP[] __counted_by(ActiveSEP); /* 08h */ } CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4, IOCPage4_t, MPI_POINTER pIOCPage4_t; @@ -2295,14 +2271,6 @@ typedef struct _RAID_VOL0_SETTINGS #define MPI_RAID_HOT_SPARE_POOL_6 (0x40) #define MPI_RAID_HOT_SPARE_POOL_7 (0x80) -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX -#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) -#endif - typedef struct _CONFIG_PAGE_RAID_VOL_0 { CONFIG_PAGE_HEADER Header; /* 00h */ @@ -2321,7 +2289,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0 U8 DataScrubRate; /* 25h */ U8 ResyncRate; /* 26h */ U8 InactiveStatus; /* 27h */ - RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */ + RAID_VOL0_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 28h */ } CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; @@ -2455,14 +2423,6 @@ typedef struct _RAID_PHYS_DISK1_PATH #define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001) -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength or NumPhysDiskPaths at runtime. - */ -#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX -#define MPI_RAID_PHYS_DISK1_PATH_MAX (1) -#endif - typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1 { CONFIG_PAGE_HEADER Header; /* 00h */ @@ -2470,7 +2430,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1 U8 PhysDiskNum; /* 05h */ U16 Reserved2; /* 06h */ U32 Reserved1; /* 08h */ - RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */ + RAID_PHYS_DISK1_PATH Path[] __counted_by(NumPhysDiskPaths);/* 0Ch */ } CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1, RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t; @@ -2555,14 +2515,6 @@ typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA } MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA, SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData; -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI_SAS_IOUNIT0_PHY_MAX -#define MPI_SAS_IOUNIT0_PHY_MAX (1) -#endif - typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 { CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ @@ -2571,7 +2523,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 U8 NumPhys; /* 0Ch */ U8 Reserved2; /* 0Dh */ U16 Reserved3; /* 0Eh */ - MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */ + MPI_SAS_IO_UNIT0_PHY_DATA PhyData[] __counted_by(NumPhys); /* 10h */ } CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0, SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 4bf669c5564932..738bc4e60a18b0 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1856,10 +1856,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize workqueue */ INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); - snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, - "mpt_poll_%d", ioc->id); - ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name, - WQ_MEM_RECLAIM, 0); + ioc->reset_work_q = + alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id); if (!ioc->reset_work_q) { printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", ioc->name); @@ -1986,9 +1984,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->fw_event_list); spin_lock_init(&ioc->fw_event_lock); - snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); - ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name, - WQ_MEM_RECLAIM, 0); + ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id); if (!ioc->fw_event_q) { printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", ioc->name); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 8031173c3655d5..b406fd676da097 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -729,7 +729,6 @@ typedef struct _MPT_ADAPTER struct list_head fw_event_list; spinlock_t fw_event_lock; u8 fw_events_off; /* if '1', then ignore events */ - char fw_event_q_name[MPT_KOBJ_NAME_LEN]; struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; @@ -764,7 +763,6 @@ typedef struct _MPT_ADAPTER u8 fc_link_speed[2]; spinlock_t fc_rescan_work_lock; struct work_struct fc_rescan_work; - char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN]; struct workqueue_struct *fc_rescan_work_q; /* driver forced bus resets count */ @@ -778,7 +776,6 @@ typedef struct _MPT_ADAPTER spinlock_t scsi_lookup_lock; u64 dma_mask; u32 broadcast_aen_busy; - char reset_work_q_name[MPT_KOBJ_NAME_LEN]; struct workqueue_struct *reset_work_q; struct delayed_work fault_reset_work; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 9f3999750c23a5..77fa55df70d040 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -1609,7 +1609,7 @@ mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); - max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; + max = min(maxEvents, MPTCTL_EVENT_LOG_SIZE); /* If fewer than 1 event is requested, there must have * been some type of error. @@ -2691,7 +2691,6 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) static const struct file_operations mptctl_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index a3c17c4fe69c54..91242f26defb00 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1349,11 +1349,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* initialize workqueue */ - snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), - "mptfc_wq_%d", sh->host_no); - ioc->fc_rescan_work_q = - alloc_ordered_workqueue(ioc->fc_rescan_work_q_name, - WQ_MEM_RECLAIM); + ioc->fc_rescan_work_q = alloc_ordered_workqueue( + "mptfc_wq_%d", WQ_MEM_RECLAIM, sh->host_no); if (!ioc->fc_rescan_work_q) { error = -ENOMEM; goto out_mptfc_host; diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c index 384ecf5301d25d..e9941da58b18b7 100644 --- a/drivers/mfd/88pm800.c +++ b/drivers/mfd/88pm800.c @@ -391,7 +391,7 @@ static void device_irq_exit_800(struct pm80x_chip *chip) regmap_del_irq_chip(chip->irq, chip->irq_data); } -static struct regmap_irq_chip pm800_irq_chip = { +static const struct regmap_irq_chip pm800_irq_chip = { .name = "88pm800", .irqs = pm800_irqs, .num_irqs = ARRAY_SIZE(pm800_irqs), diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c index 205f0762a92876..f5d6663172eeac 100644 --- a/drivers/mfd/88pm805.c +++ b/drivers/mfd/88pm805.c @@ -73,7 +73,7 @@ static const struct mfd_cell codec_devs[] = { }, }; -static struct regmap_irq pm805_irqs[] = { +static const struct regmap_irq pm805_irqs[] = { /* INT0 */ [PM805_IRQ_LDO_OFF] = { .mask = PM805_INT1_HP1_SHRT, @@ -163,7 +163,7 @@ static void device_irq_exit_805(struct pm80x_chip *chip) regmap_del_irq_chip(chip->irq, chip->irq_data); } -static struct regmap_irq_chip pm805_irq_chip = { +static const struct regmap_irq_chip pm805_irq_chip = { .name = "88pm805", .irqs = pm805_irqs, .num_irqs = ARRAY_SIZE(pm805_irqs), diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 7f003f71e1af11..8e68b64bd7f8ae 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -916,7 +916,7 @@ static void device_power_init(struct pm860x_chip *chip, power_devs[0].platform_data = pdata->power; power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata); power_devs[0].num_resources = ARRAY_SIZE(battery_resources); - power_devs[0].resources = &battery_resources[0], + power_devs[0].resources = &battery_resources[0]; ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1, &battery_resources[0], chip->irq_base, NULL); if (ret < 0) @@ -925,7 +925,7 @@ static void device_power_init(struct pm860x_chip *chip, power_devs[1].platform_data = pdata->power; power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata); power_devs[1].num_resources = ARRAY_SIZE(charger_resources); - power_devs[1].resources = &charger_resources[0], + power_devs[1].resources = &charger_resources[0]; ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1, &charger_resources[0], chip->irq_base, NULL); if (ret < 0) @@ -942,7 +942,7 @@ static void device_power_init(struct pm860x_chip *chip, pdata->chg_desc->charger_regulators = &chg_desc_regulator_data[0]; pdata->chg_desc->num_charger_regulators = - ARRAY_SIZE(chg_desc_regulator_data), + ARRAY_SIZE(chg_desc_regulator_data); power_devs[3].platform_data = pdata->chg_desc; power_devs[3].pdata_size = sizeof(*pdata->chg_desc); ret = mfd_add_devices(chip->dev, 0, &power_devs[3], 1, @@ -958,7 +958,7 @@ static void device_onkey_init(struct pm860x_chip *chip, int ret; onkey_devs[0].num_resources = ARRAY_SIZE(onkey_resources); - onkey_devs[0].resources = &onkey_resources[0], + onkey_devs[0].resources = &onkey_resources[0]; ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], ARRAY_SIZE(onkey_devs), &onkey_resources[0], chip->irq_base, NULL); @@ -972,7 +972,7 @@ static void device_codec_init(struct pm860x_chip *chip, int ret; codec_devs[0].num_resources = ARRAY_SIZE(codec_resources); - codec_devs[0].resources = &codec_resources[0], + codec_devs[0].resources = &codec_resources[0]; ret = mfd_add_devices(chip->dev, 0, &codec_devs[0], ARRAY_SIZE(codec_devs), &codec_resources[0], 0, NULL); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index bc8be2e593b6bc..f9325bcce1b94e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -20,6 +20,18 @@ config MFD_CS5535 This is the core driver for CS5535/CS5536 MFD functions. This is necessary for using the board's GPIO and MFGPT functionality. +config MFD_ADP5585 + tristate "Analog Devices ADP5585 keypad decoder and I/O expander driver" + select MFD_CORE + select REGMAP_I2C + depends on I2C + depends on OF || COMPILE_TEST + help + Say yes here to add support for the Analog Devices ADP5585 GPIO + expander, PWM and keypad controller. This includes the I2C driver and + the core APIs _only_, you have to select individual components like + the GPIO and PWM functions under the corresponding menus. + config MFD_ALTERA_A10SR bool "Altera Arria10 DevKit System Resource chip" depends on ARCH_INTEL_SOCFPGA && SPI_MASTER=y && OF diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 02b651cd753521..2a9f91e81af836 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -193,6 +193,7 @@ obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o +obj-$(CONFIG_MFD_ADP5585) += adp5585.o obj-$(CONFIG_MFD_KEMPLD) += kempld-core.o obj-$(CONFIG_MFD_INTEL_QUARK_I2C_GPIO) += intel_quark_i2c_gpio.o obj-$(CONFIG_LPC_SCH) += lpc_sch.o diff --git a/drivers/mfd/adp5585.c b/drivers/mfd/adp5585.c new file mode 100644 index 00000000000000..160e0b38106a6d --- /dev/null +++ b/drivers/mfd/adp5585.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller + * + * Copyright 2022 NXP + * Copyright 2024 Ideas on Board Oy + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct mfd_cell adp5585_devs[] = { + { .name = "adp5585-gpio", }, + { .name = "adp5585-pwm", }, +}; + +static const struct regmap_range adp5585_volatile_ranges[] = { + regmap_reg_range(ADP5585_ID, ADP5585_GPI_STATUS_B), +}; + +static const struct regmap_access_table adp5585_volatile_regs = { + .yes_ranges = adp5585_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(adp5585_volatile_ranges), +}; + +/* + * Chip variants differ in the default configuration of pull-up and pull-down + * resistors, and therefore have different default register values: + * + * - The -00, -01 and -03 variants (collectively referred to as + * ADP5585_REGMAP_00) have pull-up on all GPIO pins by default. + * - The -02 variant has no default pull-up or pull-down resistors. + * - The -04 variant has default pull-down resistors on all GPIO pins. + */ + +static const u8 adp5585_regmap_defaults_00[ADP5585_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 adp5585_regmap_defaults_02[ADP5585_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, + /* 0x18 */ 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 adp5585_regmap_defaults_04[ADP5585_MAX_REG + 1] = { + /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, + /* 0x18 */ 0x05, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +enum adp5585_regmap_type { + ADP5585_REGMAP_00, + ADP5585_REGMAP_02, + ADP5585_REGMAP_04, +}; + +static const struct regmap_config adp5585_regmap_configs[] = { + [ADP5585_REGMAP_00] = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ADP5585_MAX_REG, + .volatile_table = &adp5585_volatile_regs, + .cache_type = REGCACHE_MAPLE, + .reg_defaults_raw = adp5585_regmap_defaults_00, + .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_00), + }, + [ADP5585_REGMAP_02] = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ADP5585_MAX_REG, + .volatile_table = &adp5585_volatile_regs, + .cache_type = REGCACHE_MAPLE, + .reg_defaults_raw = adp5585_regmap_defaults_02, + .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_02), + }, + [ADP5585_REGMAP_04] = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ADP5585_MAX_REG, + .volatile_table = &adp5585_volatile_regs, + .cache_type = REGCACHE_MAPLE, + .reg_defaults_raw = adp5585_regmap_defaults_04, + .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_04), + }, +}; + +static int adp5585_i2c_probe(struct i2c_client *i2c) +{ + const struct regmap_config *regmap_config; + struct adp5585_dev *adp5585; + unsigned int id; + int ret; + + adp5585 = devm_kzalloc(&i2c->dev, sizeof(*adp5585), GFP_KERNEL); + if (!adp5585) + return -ENOMEM; + + i2c_set_clientdata(i2c, adp5585); + + regmap_config = i2c_get_match_data(i2c); + adp5585->regmap = devm_regmap_init_i2c(i2c, regmap_config); + if (IS_ERR(adp5585->regmap)) + return dev_err_probe(&i2c->dev, PTR_ERR(adp5585->regmap), + "Failed to initialize register map\n"); + + ret = regmap_read(adp5585->regmap, ADP5585_ID, &id); + if (ret) + return dev_err_probe(&i2c->dev, ret, + "Failed to read device ID\n"); + + if ((id & ADP5585_MAN_ID_MASK) != ADP5585_MAN_ID_VALUE) + return dev_err_probe(&i2c->dev, -ENODEV, + "Invalid device ID 0x%02x\n", id); + + ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, + adp5585_devs, ARRAY_SIZE(adp5585_devs), + NULL, 0, NULL); + if (ret) + return dev_err_probe(&i2c->dev, ret, + "Failed to add child devices\n"); + + return 0; +} + +static int adp5585_suspend(struct device *dev) +{ + struct adp5585_dev *adp5585 = dev_get_drvdata(dev); + + regcache_cache_only(adp5585->regmap, true); + + return 0; +} + +static int adp5585_resume(struct device *dev) +{ + struct adp5585_dev *adp5585 = dev_get_drvdata(dev); + + regcache_cache_only(adp5585->regmap, false); + regcache_mark_dirty(adp5585->regmap); + + return regcache_sync(adp5585->regmap); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume); + +static const struct of_device_id adp5585_of_match[] = { + { + .compatible = "adi,adp5585-00", + .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + }, { + .compatible = "adi,adp5585-01", + .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + }, { + .compatible = "adi,adp5585-02", + .data = &adp5585_regmap_configs[ADP5585_REGMAP_02], + }, { + .compatible = "adi,adp5585-03", + .data = &adp5585_regmap_configs[ADP5585_REGMAP_00], + }, { + .compatible = "adi,adp5585-04", + .data = &adp5585_regmap_configs[ADP5585_REGMAP_04], + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, adp5585_of_match); + +static struct i2c_driver adp5585_i2c_driver = { + .driver = { + .name = "adp5585", + .of_match_table = adp5585_of_match, + .pm = pm_sleep_ptr(&adp5585_pm), + }, + .probe = adp5585_i2c_probe, +}; +module_i2c_driver(adp5585_i2c_driver); + +MODULE_DESCRIPTION("ADP5585 core driver"); +MODULE_AUTHOR("Haibo Chen "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/atc260x-core.c b/drivers/mfd/atc260x-core.c index 67473b58b03d6c..6b6d5f1b9d76b4 100644 --- a/drivers/mfd/atc260x-core.c +++ b/drivers/mfd/atc260x-core.c @@ -235,8 +235,8 @@ int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_c mutex_init(atc260x->regmap_mutex); - regmap_cfg->lock = regmap_lock_mutex, - regmap_cfg->unlock = regmap_unlock_mutex, + regmap_cfg->lock = regmap_lock_mutex; + regmap_cfg->unlock = regmap_unlock_mutex; regmap_cfg->lock_arg = atc260x->regmap_mutex; return 0; diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index dacd3c96c9f57c..4051551757f2dc 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -209,13 +209,23 @@ static const struct regmap_access_table axp313a_volatile_table = { }; static const struct regmap_range axp717_writeable_ranges[] = { + regmap_reg_range(AXP717_PMU_FAULT, AXP717_MODULE_EN_CONTROL_1), + regmap_reg_range(AXP717_MIN_SYS_V_CONTROL, AXP717_BOOST_CONTROL), + regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF), regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN), regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE), + regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET), regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL), + regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL), + regmap_reg_range(AXP717_ADC_DATA_SEL, AXP717_ADC_DATA_SEL), }; static const struct regmap_range axp717_volatile_ranges[] = { + regmap_reg_range(AXP717_ON_INDICATE, AXP717_PMU_FAULT), regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE), + regmap_reg_range(AXP717_BATT_PERCENT_DATA, AXP717_BATT_PERCENT_DATA), + regmap_reg_range(AXP717_BATT_V_H, AXP717_BATT_CHRG_I_L), + regmap_reg_range(AXP717_ADC_DATA_H, AXP717_ADC_DATA_L), }; static const struct regmap_access_table axp717_writeable_table = { @@ -308,6 +318,12 @@ static const struct resource axp22x_usb_power_supply_resources[] = { DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"), }; +static const struct resource axp717_usb_power_supply_resources[] = { + DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_OVER_V, "VBUS_OVER_V"), + DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"), + DEFINE_RES_IRQ_NAMED(AXP717_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"), +}; + /* AXP803 and AXP813/AXP818 share the same interrupts */ static const struct resource axp803_usb_power_supply_resources[] = { DEFINE_RES_IRQ_NAMED(AXP803_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"), @@ -422,7 +438,7 @@ static const struct regmap_config axp717_regmap_config = { .val_bits = 8, .wr_table = &axp717_writeable_table, .volatile_table = &axp717_volatile_table, - .max_register = AXP717_CPUSLDO_CONTROL, + .max_register = AXP717_ADC_DATA_L, .cache_type = REGCACHE_MAPLE, }; @@ -1024,6 +1040,13 @@ static struct mfd_cell axp313a_cells[] = { static struct mfd_cell axp717_cells[] = { MFD_CELL_NAME("axp20x-regulator"), MFD_CELL_RES("axp20x-pek", axp717_pek_resources), + MFD_CELL_OF("axp717-adc", + NULL, NULL, 0, 0, "x-powers,axp717-adc"), + MFD_CELL_OF("axp20x-usb-power-supply", + axp717_usb_power_supply_resources, NULL, 0, 0, + "x-powers,axp717-usb-power-supply"), + MFD_CELL_OF("axp20x-battery-power-supply", + NULL, NULL, 0, 0, "x-powers,axp717-battery-power-supply"), }; static const struct resource axp288_adc_resources[] = { diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index e7c2ac74d99888..db8c2963fb485f 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c @@ -93,7 +93,7 @@ static const struct regmap_irq bd9571mwv_irqs[] = { BD9571MWV_INT_INTREQ_BKUP_TRG_INT), }; -static struct regmap_irq_chip bd9571mwv_irq_chip = { +static const struct regmap_irq_chip bd9571mwv_irq_chip = { .name = "bd9571mwv", .status_base = BD9571MWV_INT_INTREQ, .mask_base = BD9571MWV_INT_INTMASK, @@ -159,7 +159,7 @@ static const struct regmap_config bd9574mwf_regmap_config = { .max_register = 0xff, }; -static struct regmap_irq_chip bd9574mwf_irq_chip = { +static const struct regmap_irq_chip bd9574mwf_irq_chip = { .name = "bd9574mwf", .status_base = BD9571MWV_INT_INTREQ, .mask_base = BD9571MWV_INT_INTMASK, diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index e2aae89186795c..f3dc812b359f34 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space + * ChromeOS Embedded Controller * * Copyright (C) 2014 Google, Inc. */ @@ -353,22 +353,17 @@ static int __init cros_ec_dev_init(void) { int ret; - ret = class_register(&cros_class); + ret = class_register(&cros_class); if (ret) { pr_err(CROS_EC_DEV_NAME ": failed to register device class\n"); return ret; } - /* Register the driver */ ret = platform_driver_register(&cros_ec_dev_driver); - if (ret < 0) { + if (ret) { pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret); - goto failed_devreg; + class_unregister(&cros_class); } - return 0; - -failed_devreg: - class_unregister(&cros_class); return ret; } @@ -382,6 +377,6 @@ module_init(cros_ec_dev_init); module_exit(cros_ec_dev_exit); MODULE_AUTHOR("Bill Richardson "); -MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller"); +MODULE_DESCRIPTION("ChromeOS Embedded Controller"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index dbbc4779170a0f..637c5f47a4b0c7 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c @@ -25,7 +25,7 @@ #define DA9062_IRQ_LOW 0 #define DA9062_IRQ_HIGH 1 -static struct regmap_irq da9061_irqs[] = { +static const struct regmap_irq da9061_irqs[] = { /* EVENT A */ [DA9061_IRQ_ONKEY] = { .reg_offset = DA9062_REG_EVENT_A_OFFSET, @@ -79,7 +79,7 @@ static struct regmap_irq da9061_irqs[] = { }, }; -static struct regmap_irq_chip da9061_irq_chip = { +static const struct regmap_irq_chip da9061_irq_chip = { .name = "da9061-irq", .irqs = da9061_irqs, .num_irqs = DA9061_NUM_IRQ, @@ -89,7 +89,7 @@ static struct regmap_irq_chip da9061_irq_chip = { .ack_base = DA9062AA_EVENT_A, }; -static struct regmap_irq da9062_irqs[] = { +static const struct regmap_irq da9062_irqs[] = { /* EVENT A */ [DA9062_IRQ_ONKEY] = { .reg_offset = DA9062_REG_EVENT_A_OFFSET, @@ -151,7 +151,7 @@ static struct regmap_irq da9062_irqs[] = { }, }; -static struct regmap_irq_chip da9062_irq_chip = { +static const struct regmap_irq_chip da9062_irq_chip = { .name = "da9062-irq", .irqs = da9062_irqs, .num_irqs = DA9062_NUM_IRQ, @@ -470,7 +470,7 @@ static const struct regmap_range_cfg da9061_range_cfg[] = { } }; -static struct regmap_config da9061_regmap_config = { +static const struct regmap_config da9061_regmap_config = { .reg_bits = 8, .val_bits = 8, .ranges = da9061_range_cfg, @@ -576,7 +576,7 @@ static const struct regmap_range_cfg da9062_range_cfg[] = { } }; -static struct regmap_config da9062_regmap_config = { +static const struct regmap_config da9062_regmap_config = { .reg_bits = 8, .val_bits = 8, .ranges = da9062_range_cfg, diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index 74f38bf3778f89..2e4ab24041547c 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -16,7 +16,7 @@ #include #include -static struct regmap_config mx25_tsadc_regmap_config = { +static const struct regmap_config mx25_tsadc_regmap_config = { .fast_io = true, .max_register = 8, .reg_bits = 32, diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c index b02bfdc871e9ec..a3301502ce6a5b 100644 --- a/drivers/mfd/gateworks-gsc.c +++ b/drivers/mfd/gateworks-gsc.c @@ -20,7 +20,7 @@ #include #include -#include +#include /* * The GSC suffers from an errata where occasionally during @@ -160,7 +160,7 @@ static const struct of_device_id gsc_of_match[] = { }; MODULE_DEVICE_TABLE(of, gsc_of_match); -static struct regmap_bus gsc_regmap_bus = { +static const struct regmap_bus gsc_regmap_bus = { .reg_read = gsc_read, .reg_write = gsc_write, }; diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index 042109304db419..5f61909c85e926 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -41,7 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = { .mask_base = HI655X_IRQ_MASK_BASE, }; -static struct regmap_config hi655x_regmap_config = { +static const struct regmap_config hi655x_regmap_config = { .reg_bits = 32, .reg_stride = HI655X_STRIDE, .val_bits = 8, diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 1362b3f64ade61..1d8cdc4d5819b6 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -424,6 +424,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_spi_info }, { PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_spi_info }, { PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info }, + /* ARL-H */ + { PCI_VDEVICE(INTEL, 0x7725), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x7726), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x7727), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0x7730), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0x7746), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0x7750), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x7751), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x7752), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x7778), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x7779), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x777a), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x777b), (kernel_ulong_t)&bxt_i2c_info }, /* RPL-S */ { PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info }, @@ -594,6 +607,32 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info }, { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info }, { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info }, + /* PTL-H */ + { PCI_VDEVICE(INTEL, 0xe325), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe326), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe327), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe330), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe346), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe350), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe351), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe352), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe378), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe379), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe37a), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe37b), (kernel_ulong_t)&ehl_i2c_info }, + /* PTL-P */ + { PCI_VDEVICE(INTEL, 0xe425), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe426), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe427), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe430), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe446), (kernel_ulong_t)&tgl_spi_info }, + { PCI_VDEVICE(INTEL, 0xe450), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe451), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe452), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xe478), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe479), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe47a), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xe47b), (kernel_ulong_t)&ehl_i2c_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c index 698c5933938b79..4fa9d380c62b5f 100644 --- a/drivers/mfd/intel-m10-bmc-pmci.c +++ b/drivers/mfd/intel-m10-bmc-pmci.c @@ -336,7 +336,7 @@ static const struct regmap_access_table m10bmc_pmci_access_table = { .n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range), }; -static struct regmap_config m10bmc_pmci_regmap_config = { +static const struct regmap_config m10bmc_pmci_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c index d64d28199df656..36f631ef7a6774 100644 --- a/drivers/mfd/intel-m10-bmc-spi.c +++ b/drivers/mfd/intel-m10-bmc-spi.c @@ -24,7 +24,7 @@ static const struct regmap_access_table m10bmc_access_table = { .n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range), }; -static struct regmap_config intel_m10bmc_regmap_config = { +static const struct regmap_config intel_m10bmc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/mfd/intel_pmc_bxt.c b/drivers/mfd/intel_pmc_bxt.c index 9f01d38acc7f5b..e405d7513ca170 100644 --- a/drivers/mfd/intel_pmc_bxt.c +++ b/drivers/mfd/intel_pmc_bxt.c @@ -23,8 +23,7 @@ #include #include #include - -#include +#include /* Residency with clock rate at 19.2MHz to usecs */ #define S0IX_RESIDENCY_IN_USECS(d, s) \ diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index ba32cacfc499f3..ccd76800d8e49b 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c @@ -15,8 +15,7 @@ #include #include #include - -#include +#include /* PMIC device registers */ #define REG_ADDR_MASK GENMASK(15, 8) @@ -138,7 +137,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_crit[] = { REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)), }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip = { .name = "bxtwc_irq_chip", .status_base = BXTWC_IRQLVL1, .mask_base = BXTWC_MIRQLVL1, @@ -147,7 +146,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = { .num_regs = 1, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { .name = "bxtwc_irq_chip_pwrbtn", .status_base = BXTWC_PWRBTNIRQ, .mask_base = BXTWC_MPWRBTNIRQ, @@ -156,7 +155,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { .num_regs = 1, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { .name = "bxtwc_irq_chip_tmu", .status_base = BXTWC_TMUIRQ, .mask_base = BXTWC_MTMUIRQ, @@ -165,7 +164,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { .num_regs = 1, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = { .name = "bxtwc_irq_chip_bcu", .status_base = BXTWC_BCUIRQ, .mask_base = BXTWC_MBCUIRQ, @@ -174,7 +173,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = { .num_regs = 1, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = { .name = "bxtwc_irq_chip_adc", .status_base = BXTWC_ADCIRQ, .mask_base = BXTWC_MADCIRQ, @@ -183,7 +182,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = { .num_regs = 1, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = { .name = "bxtwc_irq_chip_chgr", .status_base = BXTWC_CHGR0IRQ, .mask_base = BXTWC_MCHGR0IRQ, @@ -192,7 +191,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = { .num_regs = 2, }; -static struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = { +static const struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = { .name = "bxtwc_irq_chip_crit", .status_base = BXTWC_CRITIRQ, .mask_base = BXTWC_MCRITIRQ, diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c index 7fce3ef7ab453d..2a83f540d4c9d8 100644 --- a/drivers/mfd/intel_soc_pmic_chtwc.c +++ b/drivers/mfd/intel_soc_pmic_chtwc.c @@ -178,7 +178,6 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = { .driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, }, diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c index 71da861e8c271d..77121775c1a35a 100644 --- a/drivers/mfd/intel_soc_pmic_mrfld.c +++ b/drivers/mfd/intel_soc_pmic_mrfld.c @@ -12,11 +12,10 @@ #include #include #include +#include #include #include -#include - /* * Level 2 IRQs * diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c index 1b465590567cf4..ee017617d1d1bb 100644 --- a/drivers/mfd/iqs62x.c +++ b/drivers/mfd/iqs62x.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #define IQS62X_PROD_NUM 0x00 diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 67bf4de4c0c1f4..6fce79ec2dc646 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -143,6 +143,7 @@ static const struct of_device_id max14577_dt_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, max14577_dt_match); static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg) { diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index 74ef3f6d576cca..89b30ef91f4f11 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -400,7 +400,7 @@ static int max77620_config_fps(struct max77620_chip *chip, static int max77620_initialise_fps(struct max77620_chip *chip) { struct device *dev = chip->dev; - struct device_node *fps_np, *fps_child; + struct device_node *fps_np; u8 config; int fps_id; int ret; @@ -414,10 +414,9 @@ static int max77620_initialise_fps(struct max77620_chip *chip) if (!fps_np) goto skip_fps; - for_each_child_of_node(fps_np, fps_child) { + for_each_child_of_node_scoped(fps_np, fps_child) { ret = max77620_config_fps(chip, fps_child); if (ret < 0) { - of_node_put(fps_child); of_node_put(fps_np); return ret; } diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c index c973e2579bdfb6..9f438d5d4326cb 100644 --- a/drivers/mfd/mc13xxx-spi.c +++ b/drivers/mfd/mc13xxx-spi.c @@ -116,7 +116,7 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count) * single transfer. */ -static struct regmap_bus regmap_mc13xxx_bus = { +static const struct regmap_bus regmap_mc13xxx_bus = { .write = mc13xxx_spi_write, .read = mc13xxx_spi_read, }; diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c index 2685efa5c9e2d3..b9b1036c8ff463 100644 --- a/drivers/mfd/mt6360-core.c +++ b/drivers/mfd/mt6360-core.c @@ -5,6 +5,7 @@ * Author: Gene Chen */ +#include #include #include #include @@ -404,7 +405,6 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size, u8 reg_addr = *(u8 *)(reg + 1); struct i2c_client *i2c; bool crc_needed = false; - u8 *buf; int buf_len = MT6360_ALLOC_READ_SIZE(val_size); int read_size = val_size; u8 crc; @@ -423,7 +423,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size, read_size += MT6360_CRC_CRC8_SIZE; } - buf = kzalloc(buf_len, GFP_KERNEL); + u8 *buf __free(kfree) = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -433,24 +433,19 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size, ret = i2c_smbus_read_i2c_block_data(i2c, reg_addr, read_size, buf + MT6360_CRC_PREDATA_OFFSET); if (ret < 0) - goto out; - else if (ret != read_size) { - ret = -EIO; - goto out; - } + return ret; + else if (ret != read_size) + return -EIO; if (crc_needed) { crc = crc8(ddata->crc8_tbl, buf, val_size + MT6360_CRC_PREDATA_OFFSET, 0); - if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET]) { - ret = -EIO; - goto out; - } + if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET]) + return -EIO; } memcpy(val, buf + MT6360_CRC_PREDATA_OFFSET, val_size); -out: - kfree(buf); - return (ret < 0) ? ret : 0; + + return 0; } static int mt6360_regmap_write(void *context, const void *val, size_t val_size) diff --git a/drivers/mfd/ntxec.c b/drivers/mfd/ntxec.c index 4416cd37e539bf..08c68de0f01bce 100644 --- a/drivers/mfd/ntxec.c +++ b/drivers/mfd/ntxec.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #define NTXEC_REG_VERSION 0x00 #define NTXEC_REG_POWEROFF 0x50 diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c index eab5bf6cff105a..b4b178caf754d8 100644 --- a/drivers/mfd/qcom-spmi-pmic.c +++ b/drivers/mfd/qcom-spmi-pmic.c @@ -84,7 +84,6 @@ static const struct of_device_id pmic_spmi_id_table[] = { static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx) { struct device_node *spmi_bus; - struct device_node *child; int function_parent_usid, ret; u32 pmic_addr; @@ -108,10 +107,9 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str */ spmi_bus = of_get_parent(sdev->dev.of_node); sdev = ERR_PTR(-ENODATA); - for_each_child_of_node(spmi_bus, child) { + for_each_child_of_node_scoped(spmi_bus, child) { ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr); if (ret) { - of_node_put(child); sdev = ERR_PTR(ret); break; } @@ -125,7 +123,6 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str */ sdev = ERR_PTR(-EPROBE_DEFER); } - of_node_put(child); break; } } diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index ef326d6d566e69..c1b78d127a261e 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include /* * UART protocol using following entities: diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c index 9184e553fafdde..1d43458b4938a5 100644 --- a/drivers/mfd/retu-mfd.c +++ b/drivers/mfd/retu-mfd.c @@ -65,13 +65,13 @@ static const struct mfd_cell retu_devs[] = { } }; -static struct regmap_irq retu_irqs[] = { +static const struct regmap_irq retu_irqs[] = { [RETU_INT_PWR] = { .mask = 1 << RETU_INT_PWR, } }; -static struct regmap_irq_chip retu_irq_chip = { +static const struct regmap_irq_chip retu_irq_chip = { .name = "RETU", .irqs = retu_irqs, .num_irqs = ARRAY_SIZE(retu_irqs), @@ -101,13 +101,13 @@ static const struct mfd_cell tahvo_devs[] = { }, }; -static struct regmap_irq tahvo_irqs[] = { +static const struct regmap_irq tahvo_irqs[] = { [TAHVO_INT_VBUS] = { .mask = 1 << TAHVO_INT_VBUS, } }; -static struct regmap_irq_chip tahvo_irq_chip = { +static const struct regmap_irq_chip tahvo_irq_chip = { .name = "TAHVO", .irqs = tahvo_irqs, .num_irqs = ARRAY_SIZE(tahvo_irqs), @@ -120,7 +120,7 @@ static struct regmap_irq_chip tahvo_irq_chip = { static const struct retu_data { char *chip_name; char *companion_name; - struct regmap_irq_chip *irq_chip; + const struct regmap_irq_chip *irq_chip; const struct mfd_cell *children; int nchildren; } retu_data[] = { @@ -216,7 +216,7 @@ static int retu_regmap_write(void *context, const void *data, size_t count) return i2c_smbus_write_word_data(i2c, reg, val); } -static struct regmap_bus retu_bus = { +static const struct regmap_bus retu_bus = { .read = retu_regmap_read, .write = retu_regmap_write, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c index 5eda3c0dbbdf11..39ab114ea669d1 100644 --- a/drivers/mfd/rk8xx-core.c +++ b/drivers/mfd/rk8xx-core.c @@ -531,7 +531,7 @@ static const struct regmap_irq rk817_irqs[RK817_IRQ_END] = { REGMAP_IRQ_REG_LINE(23, 8) }; -static struct regmap_irq_chip rk805_irq_chip = { +static const struct regmap_irq_chip rk805_irq_chip = { .name = "rk805", .irqs = rk805_irqs, .num_irqs = ARRAY_SIZE(rk805_irqs), @@ -542,7 +542,7 @@ static struct regmap_irq_chip rk805_irq_chip = { .init_ack_masked = true, }; -static struct regmap_irq_chip rk806_irq_chip = { +static const struct regmap_irq_chip rk806_irq_chip = { .name = "rk806", .irqs = rk806_irqs, .num_irqs = ARRAY_SIZE(rk806_irqs), @@ -578,7 +578,7 @@ static const struct regmap_irq_chip rk816_irq_chip = { .init_ack_masked = true, }; -static struct regmap_irq_chip rk817_irq_chip = { +static const struct regmap_irq_chip rk817_irq_chip = { .name = "rk817", .irqs = rk817_irqs, .num_irqs = ARRAY_SIZE(rk817_irqs), diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c index 69a6b297d72381..37287b06dab0d4 100644 --- a/drivers/mfd/rk8xx-i2c.c +++ b/drivers/mfd/rk8xx-i2c.c @@ -21,6 +21,17 @@ struct rk8xx_i2c_platform_data { int variant; }; +static bool rk806_is_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case RK806_POWER_EN0 ... RK806_POWER_EN5: + case RK806_DVS_START_CTRL ... RK806_INT_MSK1: + return true; + } + + return false; +} + static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg) { /* @@ -121,6 +132,14 @@ static const struct regmap_config rk805_regmap_config = { .volatile_reg = rk808_is_volatile_reg, }; +static const struct regmap_config rk806_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = RK806_BUCK_RSERVE_REG5, + .cache_type = REGCACHE_MAPLE, + .volatile_reg = rk806_is_volatile_reg, +}; + static const struct regmap_config rk808_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -150,6 +169,11 @@ static const struct rk8xx_i2c_platform_data rk805_data = { .variant = RK805_ID, }; +static const struct rk8xx_i2c_platform_data rk806_data = { + .regmap_cfg = &rk806_regmap_config, + .variant = RK806_ID, +}; + static const struct rk8xx_i2c_platform_data rk808_data = { .regmap_cfg = &rk808_regmap_config, .variant = RK808_ID, @@ -201,6 +225,7 @@ static SIMPLE_DEV_PM_OPS(rk8xx_i2c_pm_ops, rk8xx_suspend, rk8xx_resume); static const struct of_device_id rk8xx_i2c_of_match[] = { { .compatible = "rockchip,rk805", .data = &rk805_data }, + { .compatible = "rockchip,rk806", .data = &rk806_data }, { .compatible = "rockchip,rk808", .data = &rk808_data }, { .compatible = "rockchip,rk809", .data = &rk809_data }, { .compatible = "rockchip,rk816", .data = &rk816_data }, diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c index 5b4290f116fc78..39f7514aa3d875 100644 --- a/drivers/mfd/rohm-bd71828.c +++ b/drivers/mfd/rohm-bd71828.c @@ -316,7 +316,7 @@ static const struct regmap_irq bd71815_irqs[] = { REGMAP_IRQ_REG(BD71815_INT_RTC2, 11, BD71815_INT_RTC2_MASK), }; -static struct regmap_irq bd71828_irqs[] = { +static const struct regmap_irq bd71828_irqs[] = { REGMAP_IRQ_REG(BD71828_INT_BUCK1_OCP, 0, BD71828_INT_BUCK1_OCP_MASK), REGMAP_IRQ_REG(BD71828_INT_BUCK2_OCP, 0, BD71828_INT_BUCK2_OCP_MASK), REGMAP_IRQ_REG(BD71828_INT_BUCK3_OCP, 0, BD71828_INT_BUCK3_OCP_MASK), @@ -407,7 +407,7 @@ static struct regmap_irq bd71828_irqs[] = { REGMAP_IRQ_REG(BD71828_INT_RTC2, 11, BD71828_INT_RTC2_MASK), }; -static struct regmap_irq_chip bd71828_irq_chip = { +static const struct regmap_irq_chip bd71828_irq_chip = { .name = "bd71828_irq", .main_status = BD71828_REG_INT_MAIN, .irqs = &bd71828_irqs[0], @@ -423,7 +423,7 @@ static struct regmap_irq_chip bd71828_irq_chip = { .irq_reg_stride = 1, }; -static struct regmap_irq_chip bd71815_irq_chip = { +static const struct regmap_irq_chip bd71815_irq_chip = { .name = "bd71815_irq", .main_status = BD71815_REG_INT_STAT, .irqs = &bd71815_irqs[0], @@ -491,7 +491,7 @@ static int bd71828_i2c_probe(struct i2c_client *i2c) int ret; struct regmap *regmap; const struct regmap_config *regmap_config; - struct regmap_irq_chip *irqchip; + const struct regmap_irq_chip *irqchip; unsigned int chip_type; struct mfd_cell *mfd; int cells; diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c index 7755a4c073bfeb..25e494a93d48e5 100644 --- a/drivers/mfd/rohm-bd718x7.c +++ b/drivers/mfd/rohm-bd718x7.c @@ -60,7 +60,7 @@ static const struct regmap_irq bd718xx_irqs[] = { REGMAP_IRQ_REG(BD718XX_INT_STBY_REQ, 0, BD718XX_INT_STBY_REQ_MASK), }; -static struct regmap_irq_chip bd718xx_irq_chip = { +static const struct regmap_irq_chip bd718xx_irq_chip = { .name = "bd718xx-irq", .irqs = bd718xx_irqs, .num_irqs = ARRAY_SIZE(bd718xx_irqs), diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c index 3a9f61961721b0..17323ae3980301 100644 --- a/drivers/mfd/rohm-bd9576.c +++ b/drivers/mfd/rohm-bd9576.c @@ -57,7 +57,7 @@ static const struct regmap_access_table volatile_regs = { .n_yes_ranges = ARRAY_SIZE(volatile_ranges), }; -static struct regmap_config bd957x_regmap = { +static const struct regmap_config bd957x_regmap = { .reg_bits = 8, .val_bits = 8, .volatile_table = &volatile_regs, @@ -65,7 +65,7 @@ static struct regmap_config bd957x_regmap = { .cache_type = REGCACHE_MAPLE, }; -static struct regmap_irq bd9576_irqs[] = { +static const struct regmap_irq bd9576_irqs[] = { REGMAP_IRQ_REG(BD9576_INT_THERM, 0, BD957X_MASK_INT_MAIN_THERM), REGMAP_IRQ_REG(BD9576_INT_OVP, 0, BD957X_MASK_INT_MAIN_OVP), REGMAP_IRQ_REG(BD9576_INT_SCP, 0, BD957X_MASK_INT_MAIN_SCP), @@ -76,7 +76,7 @@ static struct regmap_irq bd9576_irqs[] = { REGMAP_IRQ_REG(BD9576_INT_SYS, 0, BD957X_MASK_INT_MAIN_SYS), }; -static struct regmap_irq_chip bd9576_irq_chip = { +static const struct regmap_irq_chip bd9576_irq_chip = { .name = "bd9576_irq", .irqs = &bd9576_irqs[0], .num_irqs = ARRAY_SIZE(bd9576_irqs), diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c index c9a0ec084aa85a..3bb2decfebd31a 100644 --- a/drivers/mfd/si476x-cmd.c +++ b/drivers/mfd/si476x-cmd.c @@ -20,7 +20,7 @@ #include -#include +#include #define msb(x) ((u8)((u16) x >> 8)) #define lsb(x) ((u8)((u16) x & 0x00FF)) diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 81e517cdfb27d9..7186e2108108f0 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -135,7 +135,7 @@ static int sprd_pmic_spi_read(void *context, return 0; } -static struct regmap_bus sprd_pmic_regmap = { +static const struct regmap_bus sprd_pmic_regmap = { .write = sprd_pmic_spi_write, .read = sprd_pmic_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 33f1e07ab24dc2..2ce15f60eb1071 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -8,6 +8,7 @@ * Author: Dong Aisheng */ +#include #include #include #include @@ -45,7 +46,6 @@ static const struct regmap_config syscon_regmap_config = { static struct syscon *of_syscon_register(struct device_node *np, bool check_res) { struct clk *clk; - struct syscon *syscon; struct regmap *regmap; void __iomem *base; u32 reg_io_width; @@ -54,20 +54,16 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res) struct resource res; struct reset_control *reset; - syscon = kzalloc(sizeof(*syscon), GFP_KERNEL); + struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL); if (!syscon) return ERR_PTR(-ENOMEM); - if (of_address_to_resource(np, 0, &res)) { - ret = -ENOMEM; - goto err_map; - } + if (of_address_to_resource(np, 0, &res)) + return ERR_PTR(-ENOMEM); base = of_iomap(np, 0); - if (!base) { - ret = -ENOMEM; - goto err_map; - } + if (!base) + return ERR_PTR(-ENOMEM); /* Parse the device's DT node for an endianness specification */ if (of_property_read_bool(np, "big-endian")) @@ -152,7 +148,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res) list_add_tail(&syscon->list, &syscon_list); spin_unlock(&syscon_list_slock); - return syscon; + return_ptr(syscon); err_reset: reset_control_put(reset); @@ -163,8 +159,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res) regmap_exit(regmap); err_regmap: iounmap(base); -err_map: - kfree(syscon); return ERR_PTR(ret); } diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index db28eb0c8995f2..ef953ee7314536 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -312,8 +312,6 @@ static int tc3589x_device_init(struct tc3589x *tc3589x) } static const struct of_device_id tc3589x_match[] = { - /* Legacy compatible string */ - { .compatible = "tc3589x", .data = (void *) TC3589X_UNKNOWN }, { .compatible = "toshiba,tc35890", .data = (void *) TC3589X_TC35890 }, { .compatible = "toshiba,tc35892", .data = (void *) TC3589X_TC35892 }, { .compatible = "toshiba,tc35893", .data = (void *) TC3589X_TC35893 }, diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c index 0da1cecb5af6a9..e2f6858d101e15 100644 --- a/drivers/mfd/tps6105x.c +++ b/drivers/mfd/tps6105x.c @@ -23,7 +23,7 @@ #include #include -static struct regmap_config tps6105x_regmap_config = { +static const struct regmap_config tps6105x_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = TPS6105X_REG_3, diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c index 5ef0a7e0d61d8c..54832e9321b95d 100644 --- a/drivers/mfd/tps65086.c +++ b/drivers/mfd/tps65086.c @@ -45,7 +45,7 @@ static const struct regmap_irq tps65086_irqs[] = { REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK), }; -static struct regmap_irq_chip tps65086_irq_chip = { +static const struct regmap_irq_chip tps65086_irq_chip = { .name = "tps65086", .status_base = TPS65086_IRQ, .mask_base = TPS65086_IRQ_MASK, diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c index b82cd484ac857a..24f42175a9b44b 100644 --- a/drivers/mfd/tps65090.c +++ b/drivers/mfd/tps65090.c @@ -120,7 +120,7 @@ static const struct regmap_irq tps65090_irqs[] = { }, }; -static struct regmap_irq_chip tps65090_irq_chip = { +static const struct regmap_irq_chip tps65090_irq_chip = { .name = "tps65090", .irqs = tps65090_irqs, .num_irqs = ARRAY_SIZE(tps65090_irqs), diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 427a2b97f11711..4f3e632f726ff3 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -186,7 +186,7 @@ static const struct regmap_irq tps65218_irqs[] = { }, }; -static struct regmap_irq_chip tps65218_irq_chip = { +static const struct regmap_irq_chip tps65218_irq_chip = { .name = "tps65218", .irqs = tps65218_irqs, .num_irqs = ARRAY_SIZE(tps65218_irqs), diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c index 0e0c42e4fdfc75..57ff5cb294a664 100644 --- a/drivers/mfd/tps65219.c +++ b/drivers/mfd/tps65219.c @@ -159,7 +159,7 @@ static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = { #define TPS65219_REGMAP_IRQ_REG(int_name, register_position) \ REGMAP_IRQ_REG(int_name, register_position, int_name##_MASK) -static struct regmap_irq tps65219_irqs[] = { +static const struct regmap_irq tps65219_irqs[] = { TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_SCG, TPS65219_REG_INT_LDO_3_4_POS), TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_OC, TPS65219_REG_INT_LDO_3_4_POS), TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_UV, TPS65219_REG_INT_LDO_3_4_POS), @@ -211,7 +211,7 @@ static struct regmap_irq tps65219_irqs[] = { TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_REG_INT_PB_POS), }; -static struct regmap_irq_chip tps65219_irq_chip = { +static const struct regmap_irq_chip tps65219_irq_chip = { .name = "tps65219_irq", .main_status = TPS65219_REG_INT_SOURCE, .num_main_regs = 1, diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c index 8fb0384d5a8ed7..6a7b7a697fb780 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -197,7 +197,7 @@ static const struct regmap_irq tps65910_irqs[] = { }, }; -static struct regmap_irq_chip tps65911_irq_chip = { +static const struct regmap_irq_chip tps65911_irq_chip = { .name = "tps65910", .irqs = tps65911_irqs, .num_irqs = ARRAY_SIZE(tps65911_irqs), @@ -208,7 +208,7 @@ static struct regmap_irq_chip tps65911_irq_chip = { .ack_base = TPS65910_INT_STS, }; -static struct regmap_irq_chip tps65910_irq_chip = { +static const struct regmap_irq_chip tps65910_irq_chip = { .name = "tps65910", .irqs = tps65910_irqs, .num_irqs = ARRAY_SIZE(tps65910_irqs), @@ -223,7 +223,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq, struct tps65910_platform_data *pdata) { int ret; - static struct regmap_irq_chip *tps6591x_irqs_chip; + static const struct regmap_irq_chip *tps6591x_irqs_chip; if (!irq) { dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n"); diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c index 87ee6aac376363..a9dcd7f0d9e3d8 100644 --- a/drivers/mfd/tps65912-core.c +++ b/drivers/mfd/tps65912-core.c @@ -57,7 +57,7 @@ static const struct regmap_irq tps65912_irqs[] = { REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO10, 3, TPS65912_INT_STS4_PGOOD_LDO10), }; -static struct regmap_irq_chip tps65912_irq_chip = { +static const struct regmap_irq_chip tps65912_irq_chip = { .name = "tps65912", .irqs = tps65912_irqs, .num_irqs = ARRAY_SIZE(tps65912_irqs), diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index c184e8bfab7c8f..218d6195fad201 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -620,7 +620,7 @@ static const struct regmap_irq twl6040_irqs[] = { { .reg_offset = 0, .mask = TWL6040_READYINT, }, }; -static struct regmap_irq_chip twl6040_irq_chip = { +static const struct regmap_irq_chip twl6040_irq_chip = { .name = "twl6040", .irqs = twl6040_irqs, .num_irqs = ARRAY_SIZE(twl6040_irqs), diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c index 7b9873b72c37b6..fcd182d519811a 100644 --- a/drivers/mfd/wcd934x.c +++ b/drivers/mfd/wcd934x.c @@ -109,7 +109,7 @@ static const struct regmap_range_cfg wcd934x_ranges[] = { }, }; -static struct regmap_config wcd934x_regmap_config = { +static const struct regmap_config wcd934x_regmap_config = { .reg_bits = 16, .val_bits = 8, .cache_type = REGCACHE_MAPLE, diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 41c54051347ab7..3fe7e2a9bd294d 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -104,6 +104,16 @@ config PHANTOM If you choose to build module, its name will be phantom. If unsure, say N here. +config RPMB + tristate "RPMB partition interface" + depends on MMC + help + Unified RPMB unit interface for RPMB capable devices such as eMMC and + UFS. Provides interface for in-kernel security controllers to access + RPMB unit. + + If unsure, select N. + config TIFM_CORE tristate "TI Flash Media interface support" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index c2f990862d2bb8..a9f94525e1819d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_LKDTM) += lkdtm/ obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_RPMB) += rpmb-core.o obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 587427b73914c5..bbe3967c3a4c79 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -9,7 +9,7 @@ * Copyright (C) 2010,2011 Igor M. Liplianin */ -#include +#include #include #include #include diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.c b/drivers/misc/bcm-vk/bcm_vk_sg.c index 2e9daaf3e492bb..d309216ee18192 100644 --- a/drivers/misc/bcm-vk/bcm_vk_sg.c +++ b/drivers/misc/bcm-vk/bcm_vk_sg.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 117b3c24f910c9..be3d4e0e50ccd6 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c index bcc005dff1c0fa..03633cccd04389 100644 --- a/drivers/misc/cxl/of.c +++ b/drivers/misc/cxl/of.c @@ -7,65 +7,12 @@ #include #include #include +#include #include #include #include "cxl.h" - -static const __be32 *read_prop_string(const struct device_node *np, - const char *prop_name) -{ - const __be32 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (cxl_verbose && prop) - pr_info("%s: %s\n", prop_name, (char *) prop); - return prop; -} - -static const __be32 *read_prop_dword(const struct device_node *np, - const char *prop_name, u32 *val) -{ - const __be32 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (prop) - *val = be32_to_cpu(prop[0]); - if (cxl_verbose && prop) - pr_info("%s: %#x (%u)\n", prop_name, *val, *val); - return prop; -} - -static const __be64 *read_prop64_dword(const struct device_node *np, - const char *prop_name, u64 *val) -{ - const __be64 *prop; - - prop = of_get_property(np, prop_name, NULL); - if (prop) - *val = be64_to_cpu(prop[0]); - if (cxl_verbose && prop) - pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val); - return prop; -} - - -static int read_handle(struct device_node *np, u64 *handle) -{ - const __be32 *prop; - u64 size; - - /* Get address and size of the node */ - prop = of_get_address(np, 0, &size, NULL); - if (size) - return -EINVAL; - - /* Helper to read a big number; size is in cells (not bytes) */ - *handle = of_read_number(prop, of_n_addr_cells(np)); - return 0; -} - static int read_phys_addr(struct device_node *np, char *prop_name, struct cxl_afu *afu) { @@ -100,9 +47,6 @@ static int read_phys_addr(struct device_node *np, char *prop_name, type, prop_name); return -EINVAL; } - if (cxl_verbose) - pr_info("%s: %#x %#llx (size %#llx)\n", - prop_name, type, addr, size); } } return 0; @@ -130,36 +74,17 @@ static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) { - if (read_handle(afu_np, &afu->guest->handle)) - return -EINVAL; - pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); - - return 0; + return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL); } int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) { - int i, len, rc; - char *p; - const __be32 *prop; + int i, rc; u16 device_id, vendor_id; u32 val = 0, class_code; /* Properties are read in the same order as listed in PAPR */ - if (cxl_verbose) { - pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n"); - - prop = of_get_property(np, "compatible", &len); - i = 0; - while (i < len) { - p = (char *) prop + i; - pr_info("compatible: %s\n", p); - i += strlen(p) + 1; - } - read_prop_string(np, "name"); - } - rc = read_phys_addr(np, "reg", afu); if (rc) return rc; @@ -173,25 +98,15 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) else afu->psa = true; - if (cxl_verbose) { - read_prop_string(np, "ibm,loc-code"); - read_prop_string(np, "device_type"); - } + of_property_read_u32(np, "ibm,#processes", &afu->max_procs_virtualised); - read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised); - - if (cxl_verbose) { - read_prop_dword(np, "ibm,scratchpad-size", &val); - read_prop_dword(np, "ibm,programmable", &val); - read_prop_string(np, "ibm,phandle"); + if (cxl_verbose) read_vpd(NULL, afu); - } - read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); + of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints); afu->irqs_max = afu->guest->max_ints; - prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs); - if (prop) { + if (!of_property_read_u32(np, "ibm,min-ints-per-process", &afu->pp_irqs)) { /* One extra interrupt for the PSL interrupt is already * included. Remove it now to keep only AFU interrupts and * match the native case. @@ -199,21 +114,13 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) afu->pp_irqs--; } - if (cxl_verbose) { - read_prop_dword(np, "ibm,max-ints", &val); - read_prop_dword(np, "ibm,vpd-size", &val); - } - - read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len); + of_property_read_u64(np, "ibm,error-buffer-size", &afu->eb_len); afu->eb_offset = 0; - if (cxl_verbose) - read_prop_dword(np, "ibm,config-record-type", &val); - - read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len); + of_property_read_u64(np, "ibm,config-record-size", &afu->crs_len); afu->crs_offset = 0; - read_prop_dword(np, "ibm,#config-records", &afu->crs_num); + of_property_read_u32(np, "ibm,#config-records", &afu->crs_num); if (cxl_verbose) { for (i = 0; i < afu->crs_num; i++) { @@ -235,35 +142,18 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) i, class_code); } } - - read_prop_dword(np, "ibm,function-number", &val); - read_prop_dword(np, "ibm,privileged-function", &val); - read_prop_dword(np, "vendor-id", &val); - read_prop_dword(np, "device-id", &val); - read_prop_dword(np, "revision-id", &val); - read_prop_dword(np, "class-code", &val); - read_prop_dword(np, "subsystem-vendor-id", &val); - read_prop_dword(np, "subsystem-id", &val); } /* * if "ibm,process-mmio" doesn't exist then per-process mmio is * not supported */ val = 0; - prop = read_prop_dword(np, "ibm,process-mmio", &val); - if (prop && val == 1) + if (!of_property_read_u32(np, "ibm,process-mmio", &val) && val == 1) afu->pp_psa = true; else afu->pp_psa = false; - if (cxl_verbose) { - read_prop_dword(np, "ibm,supports-aur", &val); - read_prop_dword(np, "ibm,supports-csrp", &val); - read_prop_dword(np, "ibm,supports-prr", &val); - } - - prop = read_prop_dword(np, "ibm,function-error-interrupt", &val); - if (prop) + if (!of_property_read_u32(np, "ibm,function-error-interrupt", &val)) afu->serr_hwirq = val; pr_devel("AFU handle: %#llx\n", afu->guest->handle); @@ -334,95 +224,44 @@ static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np) int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np) { - if (read_handle(np, &adapter->guest->handle)) - return -EINVAL; - pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle); - - return 0; + return of_property_read_reg(np, 0, &adapter->guest->handle, NULL); } int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np) { - int rc, len, naddr, i; - char *p; - const __be32 *prop; + int rc; + const char *p; u32 val = 0; /* Properties are read in the same order as listed in PAPR */ - naddr = of_n_addr_cells(np); - - if (cxl_verbose) { - pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n"); - - read_prop_dword(np, "#address-cells", &val); - read_prop_dword(np, "#size-cells", &val); - - prop = of_get_property(np, "compatible", &len); - i = 0; - while (i < len) { - p = (char *) prop + i; - pr_info("compatible: %s\n", p); - i += strlen(p) + 1; - } - read_prop_string(np, "name"); - read_prop_string(np, "model"); - - prop = of_get_property(np, "reg", NULL); - if (prop) { - pr_info("reg: addr:%#llx size:%#x\n", - of_read_number(prop, naddr), - be32_to_cpu(prop[naddr])); - } - - read_prop_string(np, "ibm,loc-code"); - } - if ((rc = read_adapter_irq_config(adapter, np))) return rc; - if (cxl_verbose) { - read_prop_string(np, "device_type"); - read_prop_string(np, "ibm,phandle"); - } - - prop = read_prop_dword(np, "ibm,caia-version", &val); - if (prop) { + if (!of_property_read_u32(np, "ibm,caia-version", &val)) { adapter->caia_major = (val & 0xFF00) >> 8; adapter->caia_minor = val & 0xFF; } - prop = read_prop_dword(np, "ibm,psl-revision", &val); - if (prop) + if (!of_property_read_u32(np, "ibm,psl-revision", &val)) adapter->psl_rev = val; - prop = read_prop_string(np, "status"); - if (prop) { - adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop); + if (!of_property_read_string(np, "status", &p)) { + adapter->guest->status = kasprintf(GFP_KERNEL, "%s", p); if (adapter->guest->status == NULL) return -ENOMEM; } - prop = read_prop_dword(np, "vendor-id", &val); - if (prop) + if (!of_property_read_u32(np, "vendor-id", &val)) adapter->guest->vendor = val; - prop = read_prop_dword(np, "device-id", &val); - if (prop) + if (!of_property_read_u32(np, "device-id", &val)) adapter->guest->device = val; - if (cxl_verbose) { - read_prop_dword(np, "ibm,privileged-facility", &val); - read_prop_dword(np, "revision-id", &val); - read_prop_dword(np, "class-code", &val); - } - - prop = read_prop_dword(np, "subsystem-vendor-id", &val); - if (prop) + if (!of_property_read_u32(np, "subsystem-vendor-id", &val)) adapter->guest->subsystem_vendor = val; - prop = read_prop_dword(np, "subsystem-id", &val); - if (prop) + if (!of_property_read_u32(np, "subsystem-id", &val)) adapter->guest->subsystem = val; if (cxl_verbose) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 4cf9e7c42a244e..3d52f9b92d0de8 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -363,17 +363,17 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, { int rc; struct device_node *np; - const __be32 *prop; + u32 id; if (!(np = pnv_pci_get_phb_node(dev))) return -ENODEV; - while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) + while (np && of_property_read_u32(np, "ibm,chip-id", &id)) np = of_get_next_parent(np); if (!np) return -ENODEV; - *chipid = be32_to_cpup(prop); + *chipid = id; rc = get_phb_index(np, phb_index); if (rc) { @@ -398,32 +398,26 @@ static DEFINE_MUTEX(indications_mutex); static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind, u64 *nbwind) { - static u64 nbw, asn, capi = 0; + static u32 val[3]; struct device_node *np; - const __be32 *prop; mutex_lock(&indications_mutex); - if (!capi) { + if (!val[0]) { if (!(np = pnv_pci_get_phb_node(dev))) { mutex_unlock(&indications_mutex); return -ENODEV; } - prop = of_get_property(np, "ibm,phb-indications", NULL); - if (!prop) { - nbw = 0x0300UL; /* legacy values */ - asn = 0x0400UL; - capi = 0x0200UL; - } else { - nbw = (u64)be32_to_cpu(prop[2]); - asn = (u64)be32_to_cpu(prop[1]); - capi = (u64)be32_to_cpu(prop[0]); + if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) { + val[2] = 0x0300UL; /* legacy values */ + val[1] = 0x0400UL; + val[0] = 0x0200UL; } of_node_put(np); } - *capiind = capi; - *asnind = asn; - *nbwind = nbw; + *capiind = val[0]; + *asnind = val[1]; + *nbwind = val[2]; mutex_unlock(&indications_mutex); return 0; } @@ -605,7 +599,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) /* Do not fail when CAPP timebase sync is not supported by OPAL */ of_node_get(np); - if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) { + if (!of_property_present(np, "ibm,capp-timebase-sync")) { of_node_put(np); dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); return; diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 315c43f17dd3e5..409bd1c3966315 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -579,7 +579,7 @@ static void release_afu_config_record(struct kobject *kobj) kfree(cr); } -static struct kobj_type afu_config_record_type = { +static const struct kobj_type afu_config_record_type = { .sysfs_ops = &kobj_sysfs_ops, .release = release_afu_config_record, .default_groups = afu_cr_groups, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index da87abe93daf25..74181b8c386b78 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -27,7 +27,8 @@ #define MDSP_DOMAIN_ID (1) #define SDSP_DOMAIN_ID (2) #define CDSP_DOMAIN_ID (3) -#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ +#define CDSP1_DOMAIN_ID (4) +#define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */ #define FASTRPC_MAX_SESSIONS 14 #define FASTRPC_MAX_VMIDS 16 #define FASTRPC_ALIGN 128 @@ -106,7 +107,7 @@ #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev) static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", - "sdsp", "cdsp"}; + "sdsp", "cdsp", "cdsp1" }; struct fastrpc_phy_page { u64 addr; /* physical address */ u64 size; /* size of contiguous region */ @@ -2269,7 +2270,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return err; } - for (i = 0; i <= CDSP_DOMAIN_ID; i++) { + for (i = 0; i < FASTRPC_DEV_MAX; i++) { if (!strcmp(domains[i], domain)) { domain_id = i; break; @@ -2327,13 +2328,14 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) case ADSP_DOMAIN_ID: case MDSP_DOMAIN_ID: case SDSP_DOMAIN_ID: - /* Unsigned PD offloading is only supported on CDSP*/ + /* Unsigned PD offloading is only supported on CDSP and CDSP1 */ data->unsigned_support = false; err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]); if (err) goto fdev_error; break; case CDSP_DOMAIN_ID: + case CDSP1_DOMAIN_ID: data->unsigned_support = true; /* Create both device nodes so that we can allow both Signed and Unsigned PD */ err = fastrpc_device_register(rdev, data, true, domains[domain_id]); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e5413a..0cf31164b4706e 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -95,6 +95,7 @@ #include #include +#include #define v1printk(a...) do { \ if (verbose) \ @@ -126,7 +127,6 @@ static int final_ack; static int force_hwbrks; static int hwbreaks_ok; static int hw_break_val; -static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; @@ -284,7 +284,7 @@ static void hw_rem_access_break(char *arg) static void hw_break_val_access(void) { - hw_break_val2 = hw_break_val; + READ_ONCE(hw_break_val); } static void hw_break_val_write(void) diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index bac4df2e523147..93949df3bcff1a 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #define FIRMWARE_NAME "lattice-ecp3.bit" diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 49868a45c0ad1a..4233dc4cc7d679 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -669,7 +669,6 @@ static int lis3lv02d_misc_fasync(int fd, struct file *file, int on) static const struct file_operations lis3lv02d_misc_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = lis3lv02d_misc_read, .open = lis3lv02d_misc_open, .release = lis3lv02d_misc_release, @@ -1038,7 +1037,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3) pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; if (of_property_read_bool(np, "st,wakeup-z-hi")) pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; - if (of_get_property(np, "st,wakeup-threshold", &val)) + if (!of_property_read_u32(np, "st,wakeup-threshold", &val)) pdata->wakeup_thresh = val; if (of_property_read_bool(np, "st,wakeup2-x-lo")) @@ -1053,7 +1052,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3) pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO; if (of_property_read_bool(np, "st,wakeup2-z-hi")) pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI; - if (of_get_property(np, "st,wakeup2-threshold", &val)) + if (!of_property_read_u32(np, "st,wakeup2-threshold", &val)) pdata->wakeup_thresh2 = val; if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 40c3fe26f76df0..1f5aaf16e300f0 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1176,7 +1176,6 @@ static const struct file_operations mei_fops = { .poll = mei_poll, .fsync = mei_fsync, .fasync = mei_fasync, - .llseek = no_llseek }; /** diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index d02f6e881139f1..20a11b299bcd00 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include "mei_dev.h" #include "vsc-tp.h" diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index 084d0205f97d6d..9f129bc641f699 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include "vsc-tp.h" diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c index 3c2f743c58b00c..4954553b7baa6f 100644 --- a/drivers/misc/ntsync.c +++ b/drivers/misc/ntsync.c @@ -126,7 +126,6 @@ static const struct file_operations ntsync_obj_fops = { .release = ntsync_obj_release, .unlocked_ioctl = ntsync_obj_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, @@ -233,7 +232,6 @@ static const struct file_operations ntsync_fops = { .release = ntsync_char_release, .unlocked_ioctl = ntsync_char_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; static struct miscdevice ntsync_misc = { diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index 10125a22d5a534..d2028d6c6f08d2 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -97,8 +97,6 @@ struct ocxl_process_element { __be32 software_state; }; -int ocxl_create_cdev(struct ocxl_afu *afu); -void ocxl_destroy_cdev(struct ocxl_afu *afu); int ocxl_file_register_afu(struct ocxl_afu *afu); void ocxl_file_unregister_afu(struct ocxl_afu *afu); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 30bd7c39c2618b..701db2c5859be9 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -279,7 +279,6 @@ static const struct file_operations phantom_file_ops = { .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, - .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) diff --git a/drivers/misc/rpmb-core.c b/drivers/misc/rpmb-core.c new file mode 100644 index 00000000000000..bc68cde1a8bfd8 --- /dev/null +++ b/drivers/misc/rpmb-core.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2015 - 2019 Intel Corporation. All rights reserved. + * Copyright(c) 2021 - 2024 Linaro Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_IDA(rpmb_ida); +static DEFINE_MUTEX(rpmb_mutex); + +/** + * rpmb_dev_get() - increase rpmb device ref counter + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + if (rdev) + get_device(&rdev->dev); + return rdev; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put() - decrease rpmb device ref counter + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + if (rdev) + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +/** + * rpmb_route_frames() - route rpmb frames to rpmb device + * @rdev: rpmb device + * @req: rpmb request frames + * @req_len: length of rpmb request frames in bytes + * @rsp: rpmb response frames + * @rsp_len: length of rpmb response frames in bytes + * + * Returns: < 0 on failure + */ +int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req, + unsigned int req_len, u8 *rsp, unsigned int rsp_len) +{ + if (!req || !req_len || !rsp || !rsp_len) + return -EINVAL; + + return rdev->descr.route_frames(rdev->dev.parent, req, req_len, + rsp, rsp_len); +} +EXPORT_SYMBOL_GPL(rpmb_route_frames); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + mutex_lock(&rpmb_mutex); + ida_simple_remove(&rpmb_ida, rdev->id); + mutex_unlock(&rpmb_mutex); + kfree(rdev->descr.dev_id); + kfree(rdev); +} + +static struct class rpmb_class = { + .name = "rpmb", + .dev_release = rpmb_dev_release, +}; + +/** + * rpmb_dev_find_device() - return first matching rpmb device + * @start: rpmb device to begin with + * @data: data for the match function + * @match: the matching function + * + * Iterate over registered RPMB devices, and call @match() for each passing + * it the RPMB device and @data. + * + * The return value of @match() is checked for each call. If it returns + * anything other 0, break and return the found RPMB device. + * + * It's the callers responsibility to call rpmb_dev_put() on the returned + * device, when it's done with it. + * + * Returns: a matching rpmb device or NULL on failure + */ +struct rpmb_dev *rpmb_dev_find_device(const void *data, + const struct rpmb_dev *start, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + const struct device *start_dev = NULL; + + if (start) + start_dev = &start->dev; + dev = class_find_device(&rpmb_class, start_dev, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_device); + +int rpmb_interface_register(struct class_interface *intf) +{ + intf->class = &rpmb_class; + + return class_interface_register(intf); +} +EXPORT_SYMBOL_GPL(rpmb_interface_register); + +void rpmb_interface_unregister(struct class_interface *intf) +{ + class_interface_unregister(intf); +} +EXPORT_SYMBOL_GPL(rpmb_interface_unregister); + +/** + * rpmb_dev_unregister() - unregister RPMB partition from the RPMB subsystem + * @rdev: the rpmb device to unregister + * + * This function should be called from the release function of the + * underlying device used when the RPMB device was registered. + * + * Returns: < 0 on failure + */ +int rpmb_dev_unregister(struct rpmb_dev *rdev) +{ + if (!rdev) + return -EINVAL; + + device_del(&rdev->dev); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * @dev: storage device of the rpmb device + * @descr: RPMB device description + * + * While registering the RPMB partition extract needed device information + * while needed resources are available. + * + * Returns: a pointer to a 'struct rpmb_dev' or an ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, + struct rpmb_descr *descr) +{ + struct rpmb_dev *rdev; + int ret; + + if (!dev || !descr || !descr->route_frames || !descr->dev_id || + !descr->dev_id_len) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + rdev->descr = *descr; + rdev->descr.dev_id = kmemdup(descr->dev_id, descr->dev_id_len, + GFP_KERNEL); + if (!rdev->descr.dev_id) { + ret = -ENOMEM; + goto err_free_rdev; + } + + mutex_lock(&rpmb_mutex); + ret = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); + mutex_unlock(&rpmb_mutex); + if (ret < 0) + goto err_free_dev_id; + rdev->id = ret; + + dev_set_name(&rdev->dev, "rpmb%d", rdev->id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + + ret = device_register(&rdev->dev); + if (ret) { + put_device(&rdev->dev); + return ERR_PTR(ret); + } + + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; + +err_free_dev_id: + kfree(rdev->descr.dev_id); +err_free_rdev: + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + int ret; + + ret = class_register(&rpmb_class); + if (ret) { + pr_err("couldn't create class\n"); + return ret; + } + ida_init(&rpmb_ida); + return 0; +} + +static void __exit rpmb_exit(void) +{ + ida_destroy(&rpmb_ida); + class_unregister(&rpmb_class); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Jens Wiklander "); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index 2ad4387c98374c..1a7796ab3fad12 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -185,10 +185,10 @@ static ssize_t tsl2550_store_power_state(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); - unsigned long val = simple_strtoul(buf, NULL, 10); + unsigned long val; int ret; - if (val > 1) + if (kstrtoul(buf, 10, &val) || val > 1) return -EINVAL; mutex_lock(&data->update_lock); @@ -217,10 +217,10 @@ static ssize_t tsl2550_store_operating_mode(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); - unsigned long val = simple_strtoul(buf, NULL, 10); + unsigned long val; int ret; - if (val > 1) + if (kstrtoul(buf, 10, &val) || val > 1) return -EINVAL; if (data->power_state == 0) diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c index 73c6da7d096310..734fdfac19ef3c 100644 --- a/drivers/misc/xilinx_tmr_inject.c +++ b/drivers/misc/xilinx_tmr_inject.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index bf4e29ef023c0a..14d2ecbb04d33a 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -37,6 +37,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + depends on RPMB || !RPMB imply IOSCHED_BFQ default y help diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2c9963248fcbd6..ef06a4d5d65bb2 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -76,6 +78,48 @@ MODULE_ALIAS("mmc:block"); #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) +/** + * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51) + * + * @stuff : stuff bytes + * @key_mac : The authentication key or the message authentication + * code (MAC) depending on the request/response type. + * The MAC will be delivered in the last (or the only) + * block of data. + * @data : Data to be written or read by signed access. + * @nonce : Random number generated by the host for the requests + * and copied to the response by the RPMB engine. + * @write_counter: Counter value for the total amount of the successful + * authenticated data write requests made by the host. + * @addr : Address of the data to be programmed to or read + * from the RPMB. Address is the serial number of + * the accessed block (half sector 256B). + * @block_count : Number of blocks (half sectors, 256B) requested to be + * read/programmed. + * @result : Includes information about the status of the write counter + * (valid, expired) and result of the access made to the RPMB. + * @req_resp : Defines the type of request and response to/from the memory. + * + * The stuff bytes and big-endian properties are modeled to fit to the spec. + */ +struct rpmb_frame { + u8 stuff[196]; + u8 key_mac[32]; + u8 data[256]; + u8 nonce[16]; + __be32 write_counter; + __be16 addr; + __be16 block_count; + __be16 result; + __be16 req_resp; +} __packed; + +#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */ +#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */ +#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */ +#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */ +#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */ + static DEFINE_MUTEX(block_mutex); /* @@ -155,6 +199,7 @@ static const struct bus_type mmc_rpmb_bus_type = { * @id: unique device ID number * @part_index: partition index (0 on first) * @md: parent MMC block device + * @rdev: registered RPMB device * @node: list item, so we can put this device on a list */ struct mmc_rpmb_data { @@ -163,6 +208,7 @@ struct mmc_rpmb_data { int id; unsigned int part_index; struct mmc_blk_data *md; + struct rpmb_dev *rdev; struct list_head node; }; @@ -307,10 +353,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; - char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); - unsigned long set = simple_strtoul(buf, &end, 0); - if (end == buf) { + unsigned long set; + + if (kstrtoul(buf, 0, &set)) { ret = -EINVAL; goto out; } @@ -2484,7 +2530,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, return ERR_PTR(devidx); } - md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); + md = kzalloc(sizeof(*md), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; @@ -2670,7 +2716,6 @@ static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) get_device(&rpmb->dev); filp->private_data = rpmb; - mmc_blk_get(rpmb->md->disk); return nonseekable_open(inode, filp); } @@ -2680,7 +2725,6 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, struct mmc_rpmb_data, chrdev); - mmc_blk_put(rpmb->md); put_device(&rpmb->dev); return 0; @@ -2690,7 +2734,6 @@ static const struct file_operations mmc_rpmb_fileops = { .release = mmc_rpmb_chrdev_release, .open = mmc_rpmb_chrdev_open, .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = mmc_rpmb_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mmc_rpmb_ioctl_compat, @@ -2701,10 +2744,165 @@ static void mmc_blk_rpmb_device_release(struct device *dev) { struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + rpmb_dev_unregister(rpmb->rdev); + mmc_blk_put(rpmb->md); ida_free(&mmc_rpmb_ida, rpmb->id); kfree(rpmb); } +static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count) +{ + unsigned int n; + + for (n = 0; n < cmd_count; n++) + kfree(idata[n]); + kfree(idata); +} + +static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb, + unsigned int cmd_count) +{ + struct mmc_blk_ioc_data **idata; + unsigned int n; + + idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL); + if (!idata) + return NULL; + + for (n = 0; n < cmd_count; n++) { + idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL); + if (!idata[n]) { + free_idata(idata, n); + return NULL; + } + idata[n]->rpmb = rpmb; + } + + return idata; +} + +static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode, + int write_flag, u8 *buf, unsigned int buf_bytes) +{ + /* + * The size of an RPMB frame must match what's expected by the + * hardware. + */ + BUILD_BUG_ON(sizeof(struct rpmb_frame) != 512); + + idata->ic.opcode = opcode; + idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + idata->ic.write_flag = write_flag; + idata->ic.blksz = sizeof(struct rpmb_frame); + idata->ic.blocks = buf_bytes / idata->ic.blksz; + idata->buf = buf; + idata->buf_bytes = buf_bytes; +} + +static int mmc_route_rpmb_frames(struct device *dev, u8 *req, + unsigned int req_len, u8 *resp, + unsigned int resp_len) +{ + struct rpmb_frame *frm = (struct rpmb_frame *)req; + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_blk_data *md = rpmb->md; + struct mmc_blk_ioc_data **idata; + struct mmc_queue_req *mq_rq; + unsigned int cmd_count; + struct request *rq; + u16 req_type; + bool write; + int ret; + + if (IS_ERR(md->queue.card)) + return PTR_ERR(md->queue.card); + + if (req_len < sizeof(*frm)) + return -EINVAL; + + req_type = be16_to_cpu(frm->req_resp); + switch (req_type) { + case RPMB_PROGRAM_KEY: + if (req_len != sizeof(struct rpmb_frame) || + resp_len != sizeof(struct rpmb_frame)) + return -EINVAL; + write = true; + break; + case RPMB_GET_WRITE_COUNTER: + if (req_len != sizeof(struct rpmb_frame) || + resp_len != sizeof(struct rpmb_frame)) + return -EINVAL; + write = false; + break; + case RPMB_WRITE_DATA: + if (req_len % sizeof(struct rpmb_frame) || + resp_len != sizeof(struct rpmb_frame)) + return -EINVAL; + write = true; + break; + case RPMB_READ_DATA: + if (req_len != sizeof(struct rpmb_frame) || + resp_len % sizeof(struct rpmb_frame)) + return -EINVAL; + write = false; + break; + default: + return -EINVAL; + } + + if (write) + cmd_count = 3; + else + cmd_count = 2; + + idata = alloc_idata(rpmb, cmd_count); + if (!idata) + return -ENOMEM; + + if (write) { + struct rpmb_frame *frm = (struct rpmb_frame *)resp; + + /* Send write request frame(s) */ + set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, + 1 | MMC_CMD23_ARG_REL_WR, req, req_len); + + /* Send result request frame */ + memset(frm, 0, sizeof(*frm)); + frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); + set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp, + resp_len); + + /* Read response frame */ + set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); + } else { + /* Send write request frame(s) */ + set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len); + + /* Read response frame */ + set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); + } + + rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out; + } + + mq_rq = req_to_mmc_queue_req(rq); + mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB; + mq_rq->drv_op_result = -EIO; + mq_rq->drv_op_data = idata; + mq_rq->ioc_count = cmd_count; + blk_execute_rq(rq, false); + ret = req_to_mmc_queue_req(rq)->drv_op_result; + + blk_mq_free_request(rq); + +out: + free_idata(idata, cmd_count); + return ret; +} + static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_index, @@ -2739,6 +2937,7 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, rpmb->dev.release = mmc_blk_rpmb_device_release; device_initialize(&rpmb->dev); dev_set_drvdata(&rpmb->dev, rpmb); + mmc_blk_get(md->disk); rpmb->md = md; cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); @@ -3000,6 +3199,42 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card, #endif /* CONFIG_DEBUG_FS */ +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_rpmb_data *rpmb; + struct rpmb_dev *rdev; + unsigned int n; + u32 cid[4]; + struct rpmb_descr descr = { + .type = RPMB_TYPE_EMMC, + .route_frames = mmc_route_rpmb_frames, + .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ? + 2 : 32, + .capacity = card->ext_csd.raw_rpmb_size_mult, + .dev_id = (void *)cid, + .dev_id_len = sizeof(cid), + }; + + /* + * Provice CID as an octet array. The CID needs to be interpreted + * when used as input to derive the RPMB key since some fields + * will change due to firmware updates. + */ + for (n = 0; n < 4; n++) + cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]); + + list_for_each_entry(rpmb, &md->rpmbs, node) { + rdev = rpmb_dev_register(&rpmb->dev, &descr); + if (IS_ERR(rdev)) { + pr_warn("%s: could not register RPMB device\n", + dev_name(&rpmb->dev)); + continue; + } + rpmb->rdev = rdev; + } +} + static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md; @@ -3045,6 +3280,8 @@ static int mmc_blk_probe(struct mmc_card *card) pm_runtime_enable(&card->dev); } + mmc_blk_rpmb_add(card); + return 0; out: diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 5b2f7c2854619d..6a23be214543db 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -51,20 +51,6 @@ static const unsigned int taac_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; -#define UNSTUFF_BITS(resp,start,size) \ - ({ \ - const int __size = size; \ - const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ - const int __off = 3 - ((start) / 32); \ - const int __shft = (start) & 31; \ - u32 __res; \ - \ - __res = resp[__off] >> __shft; \ - if (__size + __shft > 32) \ - __res |= resp[__off-1] << ((32 - __shft) % 32); \ - __res & __mask; \ - }) - /* * Given the decoded CSD structure, decode the raw CID to our CID structure. */ @@ -85,36 +71,36 @@ static int mmc_decode_cid(struct mmc_card *card) switch (card->csd.mmca_vsn) { case 0: /* MMC v1.0 - v1.2 */ case 1: /* MMC v1.4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); - card->cid.serial = UNSTUFF_BITS(resp, 16, 24); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + card->cid.manfid = unstuff_bits(resp, 104, 24); + card->cid.prod_name[0] = unstuff_bits(resp, 96, 8); + card->cid.prod_name[1] = unstuff_bits(resp, 88, 8); + card->cid.prod_name[2] = unstuff_bits(resp, 80, 8); + card->cid.prod_name[3] = unstuff_bits(resp, 72, 8); + card->cid.prod_name[4] = unstuff_bits(resp, 64, 8); + card->cid.prod_name[5] = unstuff_bits(resp, 56, 8); + card->cid.prod_name[6] = unstuff_bits(resp, 48, 8); + card->cid.hwrev = unstuff_bits(resp, 44, 4); + card->cid.fwrev = unstuff_bits(resp, 40, 4); + card->cid.serial = unstuff_bits(resp, 16, 24); + card->cid.month = unstuff_bits(resp, 12, 4); + card->cid.year = unstuff_bits(resp, 8, 4) + 1997; break; case 2: /* MMC v2.0 - v2.2 */ case 3: /* MMC v3.1 - v3.3 */ case 4: /* MMC v4 */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); - card->cid.prv = UNSTUFF_BITS(resp, 48, 8); - card->cid.serial = UNSTUFF_BITS(resp, 16, 32); - card->cid.month = UNSTUFF_BITS(resp, 12, 4); - card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; + card->cid.manfid = unstuff_bits(resp, 120, 8); + card->cid.oemid = unstuff_bits(resp, 104, 16); + card->cid.prod_name[0] = unstuff_bits(resp, 96, 8); + card->cid.prod_name[1] = unstuff_bits(resp, 88, 8); + card->cid.prod_name[2] = unstuff_bits(resp, 80, 8); + card->cid.prod_name[3] = unstuff_bits(resp, 72, 8); + card->cid.prod_name[4] = unstuff_bits(resp, 64, 8); + card->cid.prod_name[5] = unstuff_bits(resp, 56, 8); + card->cid.prv = unstuff_bits(resp, 48, 8); + card->cid.serial = unstuff_bits(resp, 16, 32); + card->cid.month = unstuff_bits(resp, 12, 4); + card->cid.year = unstuff_bits(resp, 8, 4) + 1997; break; default: @@ -161,43 +147,43 @@ static int mmc_decode_csd(struct mmc_card *card) * v1.2 has extra information in bits 15, 11 and 10. * We also support eMMC v4.4 & v4.41. */ - csd->structure = UNSTUFF_BITS(resp, 126, 2); + csd->structure = unstuff_bits(resp, 126, 2); if (csd->structure == 0) { pr_err("%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd->structure); return -EINVAL; } - csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); + csd->mmca_vsn = unstuff_bits(resp, 122, 4); + m = unstuff_bits(resp, 115, 4); + e = unstuff_bits(resp, 112, 3); csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; - csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_clks = unstuff_bits(resp, 104, 8) * 100; - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); + m = unstuff_bits(resp, 99, 4); + e = unstuff_bits(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + csd->cmdclass = unstuff_bits(resp, 84, 12); - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); + e = unstuff_bits(resp, 47, 3); + m = unstuff_bits(resp, 62, 12); csd->capacity = (1 + m) << (e + 2); - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + csd->read_blkbits = unstuff_bits(resp, 80, 4); + csd->read_partial = unstuff_bits(resp, 79, 1); + csd->write_misalign = unstuff_bits(resp, 78, 1); + csd->read_misalign = unstuff_bits(resp, 77, 1); + csd->dsr_imp = unstuff_bits(resp, 76, 1); + csd->r2w_factor = unstuff_bits(resp, 26, 3); + csd->write_blkbits = unstuff_bits(resp, 22, 4); + csd->write_partial = unstuff_bits(resp, 21, 1); if (csd->write_blkbits >= 9) { - a = UNSTUFF_BITS(resp, 42, 5); - b = UNSTUFF_BITS(resp, 37, 5); + a = unstuff_bits(resp, 42, 5); + b = unstuff_bits(resp, 37, 5); csd->erase_size = (a + 1) * (b + 1); csd->erase_size <<= csd->write_blkbits - 9; - csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5); + csd->wp_grp_size = unstuff_bits(resp, 32, 5); } return 0; diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 92d4194c78934c..06017110e1b00d 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -56,5 +56,19 @@ int mmc_cmdq_enable(struct mmc_card *card); int mmc_cmdq_disable(struct mmc_card *card); int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms); +static inline u32 unstuff_bits(const u32 *resp, int start, int size) +{ + const int __size = size; + const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; + const int __off = 3 - (start / 32); + const int __shft = start & 31; + u32 __res = resp[__off] >> __shft; + + if (__size + __shft > 32) + __res |= resp[__off - 1] << ((32 - __shft) % 32); + + return __res & __mask; +} + #endif diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c index 005247a49e51bf..01747ab1024ec1 100644 --- a/drivers/mmc/core/regulator.c +++ b/drivers/mmc/core/regulator.c @@ -255,7 +255,9 @@ int mmc_regulator_get_supply(struct mmc_host *mmc) if (IS_ERR(mmc->supply.vmmc)) { if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; + return dev_err_probe(dev, -EPROBE_DEFER, + "vmmc regulator not available\n"); + dev_dbg(dev, "No vmmc regulator found\n"); } else { ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); @@ -267,7 +269,9 @@ int mmc_regulator_get_supply(struct mmc_host *mmc) if (IS_ERR(mmc->supply.vqmmc)) { if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; + return dev_err_probe(dev, -EPROBE_DEFER, + "vqmmc regulator not available\n"); + dev_dbg(dev, "No vqmmc regulator found\n"); } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index ee37ad14e79eea..12fe282bea77ef 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -56,20 +56,6 @@ static const unsigned int sd_au_size[] = { SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, }; -#define UNSTUFF_BITS(resp,start,size) \ - ({ \ - const int __size = size; \ - const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ - const int __off = 3 - ((start) / 32); \ - const int __shft = (start) & 31; \ - u32 __res; \ - \ - __res = resp[__off] >> __shft; \ - if (__size + __shft > 32) \ - __res |= resp[__off-1] << ((32 - __shft) % 32); \ - __res & __mask; \ - }) - #define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000 #define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000 @@ -95,18 +81,18 @@ void mmc_decode_cid(struct mmc_card *card) * SD doesn't currently have a version field so we will * have to assume we can parse this. */ - card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); - card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); - card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); - card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); - card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); - card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); - card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); - card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); - card->cid.serial = UNSTUFF_BITS(resp, 24, 32); - card->cid.year = UNSTUFF_BITS(resp, 12, 8); - card->cid.month = UNSTUFF_BITS(resp, 8, 4); + card->cid.manfid = unstuff_bits(resp, 120, 8); + card->cid.oemid = unstuff_bits(resp, 104, 16); + card->cid.prod_name[0] = unstuff_bits(resp, 96, 8); + card->cid.prod_name[1] = unstuff_bits(resp, 88, 8); + card->cid.prod_name[2] = unstuff_bits(resp, 80, 8); + card->cid.prod_name[3] = unstuff_bits(resp, 72, 8); + card->cid.prod_name[4] = unstuff_bits(resp, 64, 8); + card->cid.hwrev = unstuff_bits(resp, 60, 4); + card->cid.fwrev = unstuff_bits(resp, 56, 4); + card->cid.serial = unstuff_bits(resp, 24, 32); + card->cid.year = unstuff_bits(resp, 12, 8); + card->cid.month = unstuff_bits(resp, 8, 4); card->cid.year += 2000; /* SD cards year offset */ } @@ -120,41 +106,41 @@ static int mmc_decode_csd(struct mmc_card *card) unsigned int e, m, csd_struct; u32 *resp = card->raw_csd; - csd_struct = UNSTUFF_BITS(resp, 126, 2); + csd_struct = unstuff_bits(resp, 126, 2); switch (csd_struct) { case 0: - m = UNSTUFF_BITS(resp, 115, 4); - e = UNSTUFF_BITS(resp, 112, 3); + m = unstuff_bits(resp, 115, 4); + e = unstuff_bits(resp, 112, 3); csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; - csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_clks = unstuff_bits(resp, 104, 8) * 100; - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); + m = unstuff_bits(resp, 99, 4); + e = unstuff_bits(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + csd->cmdclass = unstuff_bits(resp, 84, 12); - e = UNSTUFF_BITS(resp, 47, 3); - m = UNSTUFF_BITS(resp, 62, 12); + e = unstuff_bits(resp, 47, 3); + m = unstuff_bits(resp, 62, 12); csd->capacity = (1 + m) << (e + 2); - csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); - csd->read_partial = UNSTUFF_BITS(resp, 79, 1); - csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); - csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); - csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1); - csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); - csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); - csd->write_partial = UNSTUFF_BITS(resp, 21, 1); + csd->read_blkbits = unstuff_bits(resp, 80, 4); + csd->read_partial = unstuff_bits(resp, 79, 1); + csd->write_misalign = unstuff_bits(resp, 78, 1); + csd->read_misalign = unstuff_bits(resp, 77, 1); + csd->dsr_imp = unstuff_bits(resp, 76, 1); + csd->r2w_factor = unstuff_bits(resp, 26, 3); + csd->write_blkbits = unstuff_bits(resp, 22, 4); + csd->write_partial = unstuff_bits(resp, 21, 1); - if (UNSTUFF_BITS(resp, 46, 1)) { + if (unstuff_bits(resp, 46, 1)) { csd->erase_size = 1; } else if (csd->write_blkbits >= 9) { - csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; + csd->erase_size = unstuff_bits(resp, 39, 7) + 1; csd->erase_size <<= csd->write_blkbits - 9; } - if (UNSTUFF_BITS(resp, 13, 1)) + if (unstuff_bits(resp, 13, 1)) mmc_card_set_readonly(card); break; case 1: @@ -169,17 +155,17 @@ static int mmc_decode_csd(struct mmc_card *card) csd->taac_ns = 0; /* Unused */ csd->taac_clks = 0; /* Unused */ - m = UNSTUFF_BITS(resp, 99, 4); - e = UNSTUFF_BITS(resp, 96, 3); + m = unstuff_bits(resp, 99, 4); + e = unstuff_bits(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; - csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); - csd->c_size = UNSTUFF_BITS(resp, 48, 22); + csd->cmdclass = unstuff_bits(resp, 84, 12); + csd->c_size = unstuff_bits(resp, 48, 22); /* SDXC cards have a minimum C_SIZE of 0x00FFFF */ if (csd->c_size >= 0xFFFF) mmc_card_set_ext_capacity(card); - m = UNSTUFF_BITS(resp, 48, 22); + m = unstuff_bits(resp, 48, 22); csd->capacity = (1 + m) << 10; csd->read_blkbits = 9; @@ -191,7 +177,7 @@ static int mmc_decode_csd(struct mmc_card *card) csd->write_partial = 0; csd->erase_size = 1; - if (UNSTUFF_BITS(resp, 13, 1)) + if (unstuff_bits(resp, 13, 1)) mmc_card_set_readonly(card); break; default: @@ -217,33 +203,33 @@ static int mmc_decode_scr(struct mmc_card *card) resp[3] = card->raw_scr[1]; resp[2] = card->raw_scr[0]; - scr_struct = UNSTUFF_BITS(resp, 60, 4); + scr_struct = unstuff_bits(resp, 60, 4); if (scr_struct != 0) { pr_err("%s: unrecognised SCR structure version %d\n", mmc_hostname(card->host), scr_struct); return -EINVAL; } - scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); - scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); + scr->sda_vsn = unstuff_bits(resp, 56, 4); + scr->bus_widths = unstuff_bits(resp, 48, 4); if (scr->sda_vsn == SCR_SPEC_VER_2) /* Check if Physical Layer Spec v3.0 is supported */ - scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1); + scr->sda_spec3 = unstuff_bits(resp, 47, 1); if (scr->sda_spec3) { - scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1); - scr->sda_specx = UNSTUFF_BITS(resp, 38, 4); + scr->sda_spec4 = unstuff_bits(resp, 42, 1); + scr->sda_specx = unstuff_bits(resp, 38, 4); } - if (UNSTUFF_BITS(resp, 55, 1)) + if (unstuff_bits(resp, 55, 1)) card->erased_byte = 0xFF; else card->erased_byte = 0x0; if (scr->sda_spec4) - scr->cmds = UNSTUFF_BITS(resp, 32, 4); + scr->cmds = unstuff_bits(resp, 32, 4); else if (scr->sda_spec3) - scr->cmds = UNSTUFF_BITS(resp, 32, 2); + scr->cmds = unstuff_bits(resp, 32, 2); /* SD Spec says: any SD Card shall set at least bits 0 and 2 */ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) || @@ -289,17 +275,17 @@ static int mmc_read_ssr(struct mmc_card *card) kfree(raw_ssr); /* - * UNSTUFF_BITS only works with four u32s so we have to offset the + * unstuff_bits only works with four u32s so we have to offset the * bitfield positions accordingly. */ - au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4); + au = unstuff_bits(card->raw_ssr, 428 - 384, 4); if (au) { if (au <= 9 || card->scr.sda_spec3) { card->ssr.au = sd_au_size[au]; - es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16); - et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6); + es = unstuff_bits(card->raw_ssr, 408 - 384, 16); + et = unstuff_bits(card->raw_ssr, 402 - 384, 6); if (es && et) { - eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2); + eo = unstuff_bits(card->raw_ssr, 400 - 384, 2); card->ssr.erase_timeout = (et * 1000) / es; card->ssr.erase_offset = eo * 1000; } @@ -313,7 +299,7 @@ static int mmc_read_ssr(struct mmc_card *card) * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set */ resp[3] = card->raw_ssr[6]; - discard_support = UNSTUFF_BITS(resp, 313 - 288, 1); + discard_support = unstuff_bits(resp, 313 - 288, 1); card->erase_arg = (card->scr.sda_specx && discard_support) ? SD_DISCARD_ARG : SD_ERASE_ARG; @@ -346,7 +332,7 @@ static int mmc_read_switch(struct mmc_card *card) * The argument does not matter, as the support bits do not * change with the arguments. */ - err = mmc_sd_switch(card, 0, 0, 0, status); + err = mmc_sd_switch(card, SD_SWITCH_CHECK, 0, 0, status); if (err) { /* * If the host or the card can't do the switch, @@ -402,7 +388,8 @@ int mmc_sd_switch_hs(struct mmc_card *card) if (!status) return -ENOMEM; - err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status); + err = mmc_sd_switch(card, SD_SWITCH_SET, 0, + HIGH_SPEED_BUS_SPEED, status); if (err) goto out; @@ -434,7 +421,8 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status) card_drv_type, &drv_type); if (drive_strength) { - err = mmc_sd_switch(card, 1, 2, drive_strength, status); + err = mmc_sd_switch(card, SD_SWITCH_SET, 2, + drive_strength, status); if (err) return err; if ((status[15] & 0xF) != drive_strength) { @@ -514,7 +502,7 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) return 0; } - err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status); + err = mmc_sd_switch(card, SD_SWITCH_SET, 0, card->sd_bus_speed, status); if (err) return err; @@ -605,7 +593,8 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status) current_limit = SD_SET_CURRENT_LIMIT_200; if (current_limit != SD_SET_CURRENT_NO_CHANGE) { - err = mmc_sd_switch(card, 1, 3, current_limit, status); + err = mmc_sd_switch(card, SD_SWITCH_SET, 3, + current_limit, status); if (err) return err; diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 8b9b34286ef3e7..f93c392040ae7a 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -336,14 +336,13 @@ int mmc_app_send_scr(struct mmc_card *card) return 0; } -int mmc_sd_switch(struct mmc_card *card, int mode, int group, +int mmc_sd_switch(struct mmc_card *card, bool mode, int group, u8 value, u8 *resp) { u32 cmd_args; /* NOTE: caller guarantees resp is heap-allocated */ - mode = !!mode; value &= 0xF; cmd_args = mode << 31 | 0x00FFFFFF; cmd_args &= ~(0xF << (group * 4)); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index eb3ecfe055910d..7199cb0bd0b9e7 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -252,6 +252,18 @@ config MMC_SDHCI_OF_SPARX5 If unsure, say N. +config MMC_SDHCI_OF_MA35D1 + tristate "SDHCI OF support for the MA35D1 SDHCI controller" + depends on ARCH_MA35 || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + help + This selects the MA35D1 Secure Digital Host Controller Interface. + The controller supports SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SDHCI_CADENCE tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller" depends on MMC_SDHCI_PLTFM diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index f53f86d200ac28..3ccffebbe59b91 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -88,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o +obj-$(CONFIG_MMC_SDHCI_OF_MA35D1) += sdhci-of-ma35d1.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o obj-$(CONFIG_MMC_SDHCI_NPCM) += sdhci-npcm.o diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 6490df54a6f55b..cdbd2edf4b2e7c 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -37,7 +37,7 @@ #include #include -#include +#include #define ATMCI_MAX_NR_SLOTS 2 diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c index a02da26a1efd1e..178277d90c31c5 100644 --- a/drivers/mmc/host/cqhci-core.c +++ b/drivers/mmc/host/cqhci-core.c @@ -33,6 +33,11 @@ struct cqhci_slot { #define CQHCI_HOST_OTHER BIT(4) }; +static bool cqhci_halted(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; +} + static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) { return cq_host->desc_base + (tag * cq_host->slot_sz); @@ -282,7 +287,7 @@ static void __cqhci_enable(struct cqhci_host *cq_host) cqhci_writel(cq_host, cqcfg, CQHCI_CFG); - if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) + if (cqhci_halted(cq_host)) cqhci_writel(cq_host, 0, CQHCI_CTL); mmc->cqe_on = true; @@ -617,7 +622,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) cqhci_writel(cq_host, 0, CQHCI_CTL); mmc->cqe_on = true; pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); - if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { + if (cqhci_halted(cq_host)) { pr_err("%s: cqhci: CQE failed to exit halt state\n", mmc_hostname(mmc)); } @@ -953,11 +958,6 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) return ret; } -static bool cqhci_halted(struct cqhci_host *cq_host) -{ - return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; -} - static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) { struct cqhci_host *cq_host = mmc->cqe_private; diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index b07190ba4b7acb..f96260fd143b4c 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -15,7 +15,17 @@ #include "dw_mmc.h" #include "dw_mmc-pltfm.h" -#define RK3288_CLKGEN_DIV 2 +#define RK3288_CLKGEN_DIV 2 +#define SDMMC_TIMING_CON0 0x130 +#define SDMMC_TIMING_CON1 0x134 +#define ROCKCHIP_MMC_DELAY_SEL BIT(10) +#define ROCKCHIP_MMC_DEGREE_MASK 0x3 +#define ROCKCHIP_MMC_DEGREE_OFFSET 1 +#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 +#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) +#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 +#define HIWORD_UPDATE(val, mask, shift) \ + ((val) << (shift) | (mask) << ((shift) + 16)) static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 }; @@ -24,8 +34,143 @@ struct dw_mci_rockchip_priv_data { struct clk *sample_clk; int default_sample_phase; int num_phases; + bool internal_phase; }; +/* + * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to + * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg. + */ +static int rockchip_mmc_get_internal_phase(struct dw_mci *host, bool sample) +{ + unsigned long rate = clk_get_rate(host->ciu_clk); + u32 raw_value; + u16 degrees; + u32 delay_num = 0; + + /* Constant signal, no measurable phase shift */ + if (!rate) + return 0; + + if (sample) + raw_value = mci_readl(host, TIMING_CON1); + else + raw_value = mci_readl(host, TIMING_CON0); + + raw_value >>= ROCKCHIP_MMC_DEGREE_OFFSET; + degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; + + if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { + /* degrees/delaynum * 1000000 */ + unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * + 36 * (rate / 10000); + + delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); + delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; + degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000); + } + + return degrees % 360; +} + +static int rockchip_mmc_get_phase(struct dw_mci *host, bool sample) +{ + struct dw_mci_rockchip_priv_data *priv = host->priv; + struct clk *clock = sample ? priv->sample_clk : priv->drv_clk; + + if (priv->internal_phase) + return rockchip_mmc_get_internal_phase(host, sample); + else + return clk_get_phase(clock); +} + +static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int degrees) +{ + unsigned long rate = clk_get_rate(host->ciu_clk); + u8 nineties, remainder; + u8 delay_num; + u32 raw_value; + u32 delay; + + /* + * The below calculation is based on the output clock from + * MMC host to the card, which expects the phase clock inherits + * the clock rate from its parent, namely the output clock + * provider of MMC host. However, things may go wrong if + * (1) It is orphan. + * (2) It is assigned to the wrong parent. + * + * This check help debug the case (1), which seems to be the + * most likely problem we often face and which makes it difficult + * for people to debug unstable mmc tuning results. + */ + if (!rate) { + dev_err(host->dev, "%s: invalid clk rate\n", __func__); + return -EINVAL; + } + + nineties = degrees / 90; + remainder = (degrees % 90); + + /* + * Due to the inexact nature of the "fine" delay, we might + * actually go non-monotonic. We don't go _too_ monotonic + * though, so we should be OK. Here are options of how we may + * work: + * + * Ideally we end up with: + * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0 + * + * On one extreme (if delay is actually 44ps): + * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0 + * The other (if delay is actually 77ps): + * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90 + * + * It's possible we might make a delay that is up to 25 + * degrees off from what we think we're making. That's OK + * though because we should be REALLY far from any bad range. + */ + + /* + * Convert to delay; do a little extra work to make sure we + * don't overflow 32-bit / 64-bit numbers. + */ + delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */ + delay *= remainder; + delay = DIV_ROUND_CLOSEST(delay, + (rate / 1000) * 36 * + (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10)); + + delay_num = (u8) min_t(u32, delay, 255); + + raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; + raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; + raw_value |= nineties; + + if (sample) + mci_writel(host, TIMING_CON1, HIWORD_UPDATE(raw_value, 0x07ff, 1)); + else + mci_writel(host, TIMING_CON0, HIWORD_UPDATE(raw_value, 0x07ff, 1)); + + dev_dbg(host->dev, "set %s_phase(%d) delay_nums=%u actual_degrees=%d\n", + sample ? "sample" : "drv", degrees, delay_num, + rockchip_mmc_get_phase(host, sample) + ); + + return 0; +} + +static int rockchip_mmc_set_phase(struct dw_mci *host, bool sample, int degrees) +{ + struct dw_mci_rockchip_priv_data *priv = host->priv; + struct clk *clock = sample ? priv->sample_clk : priv->drv_clk; + + if (priv->internal_phase) + return rockchip_mmc_set_internal_phase(host, sample, degrees); + else + return clk_set_phase(clock, degrees); +} + static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) { struct dw_mci_rockchip_priv_data *priv = host->priv; @@ -64,7 +209,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) /* Make sure we use phases which we can enumerate with */ if (!IS_ERR(priv->sample_clk) && ios->timing <= MMC_TIMING_SD_HS) - clk_set_phase(priv->sample_clk, priv->default_sample_phase); + rockchip_mmc_set_phase(host, true, priv->default_sample_phase); /* * Set the drive phase offset based on speed mode to achieve hold times. @@ -127,7 +272,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) break; } - clk_set_phase(priv->drv_clk, phase); + rockchip_mmc_set_phase(host, false, phase); } } @@ -151,6 +296,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) int longest_range_len = -1; int longest_range = -1; int middle_phase; + int phase; if (IS_ERR(priv->sample_clk)) { dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n"); @@ -164,8 +310,10 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) /* Try each phase and extract good ranges */ for (i = 0; i < priv->num_phases; ) { - clk_set_phase(priv->sample_clk, - TUNING_ITERATION_TO_PHASE(i, priv->num_phases)); + rockchip_mmc_set_phase(host, true, + TUNING_ITERATION_TO_PHASE( + i, + priv->num_phases)); v = !mmc_send_tuning(mmc, opcode, NULL); @@ -211,7 +359,8 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) } if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) { - clk_set_phase(priv->sample_clk, priv->default_sample_phase); + rockchip_mmc_set_phase(host, true, priv->default_sample_phase); + dev_info(host->dev, "All phases work, using default phase %d.", priv->default_sample_phase); goto free; @@ -248,19 +397,17 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) middle_phase = ranges[longest_range].start + longest_range_len / 2; middle_phase %= priv->num_phases; - dev_info(host->dev, "Successfully tuned phase to %d\n", - TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases)); + phase = TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases); + dev_info(host->dev, "Successfully tuned phase to %d\n", phase); - clk_set_phase(priv->sample_clk, - TUNING_ITERATION_TO_PHASE(middle_phase, - priv->num_phases)); + rockchip_mmc_set_phase(host, true, phase); free: kfree(ranges); return ret; } -static int dw_mci_rk3288_parse_dt(struct dw_mci *host) +static int dw_mci_common_parse_dt(struct dw_mci *host) { struct device_node *np = host->dev->of_node; struct dw_mci_rockchip_priv_data *priv; @@ -270,13 +417,29 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host) return -ENOMEM; if (of_property_read_u32(np, "rockchip,desired-num-phases", - &priv->num_phases)) + &priv->num_phases)) priv->num_phases = 360; if (of_property_read_u32(np, "rockchip,default-sample-phase", - &priv->default_sample_phase)) + &priv->default_sample_phase)) priv->default_sample_phase = 0; + host->priv = priv; + + return 0; +} + +static int dw_mci_rk3288_parse_dt(struct dw_mci *host) +{ + struct dw_mci_rockchip_priv_data *priv; + int err; + + err = dw_mci_common_parse_dt(host); + if (err) + return err; + + priv = host->priv; + priv->drv_clk = devm_clk_get(host->dev, "ciu-drive"); if (IS_ERR(priv->drv_clk)) dev_dbg(host->dev, "ciu-drive not available\n"); @@ -285,7 +448,21 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host) if (IS_ERR(priv->sample_clk)) dev_dbg(host->dev, "ciu-sample not available\n"); - host->priv = priv; + priv->internal_phase = false; + + return 0; +} + +static int dw_mci_rk3576_parse_dt(struct dw_mci *host) +{ + struct dw_mci_rockchip_priv_data *priv; + int err = dw_mci_common_parse_dt(host); + if (err) + return err; + + priv = host->priv; + + priv->internal_phase = true; return 0; } @@ -331,11 +508,21 @@ static const struct dw_mci_drv_data rk3288_drv_data = { .init = dw_mci_rockchip_init, }; +static const struct dw_mci_drv_data rk3576_drv_data = { + .common_caps = MMC_CAP_CMD23, + .set_ios = dw_mci_rk3288_set_ios, + .execute_tuning = dw_mci_rk3288_execute_tuning, + .parse_dt = dw_mci_rk3576_parse_dt, + .init = dw_mci_rockchip_init, +}; + static const struct of_device_id dw_mci_rockchip_match[] = { { .compatible = "rockchip,rk2928-dw-mshc", .data = &rk2928_drv_data }, { .compatible = "rockchip,rk3288-dw-mshc", .data = &rk3288_drv_data }, + { .compatible = "rockchip,rk3576-dw-mshc", + .data = &rk3576_drv_data }, {}, }; MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match); diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index c9caa1ece7ef91..8fee7052f2ef4f 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -26,7 +26,7 @@ #include #include -#include +#include /* NOTES: diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index f5da7f9baa52d4..9dc51859c2e51e 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host) host->mmc->max_seg_size = host->mmc->max_req_size; } - return dma_set_max_seg_size(dev, host->mmc->max_seg_size); + dma_set_max_seg_size(dev, host->mmc->max_seg_size); + return 0; } static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index e386f78e326790..89018b6c97b9a7 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -795,14 +795,13 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks) { struct mmc_host *mmc = mmc_from_priv(host); - u64 timeout, clk_ns; - u32 mode = 0; + u64 timeout; + u32 clk_ns, mode = 0; if (mmc->actual_clock == 0) { timeout = 0; } else { - clk_ns = 1000000000ULL; - do_div(clk_ns, mmc->actual_clock); + clk_ns = 1000000000U / mmc->actual_clock; timeout = ns + clk_ns - 1; do_div(timeout, clk_ns); timeout += clks; @@ -831,7 +830,7 @@ static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks) timeout = msdc_timeout_cal(host, ns, clks); sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, - (u32)(timeout > 255 ? 255 : timeout)); + min_t(u32, timeout, 255)); } static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) @@ -840,7 +839,7 @@ static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) timeout = msdc_timeout_cal(host, ns, clks); sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC, - (u32)(timeout > 8191 ? 8191 : timeout)); + min_t(u32, timeout, 8191)); } static void msdc_gate_clock(struct msdc_host *host) diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index af7f21888e273b..d859b1a3ab7157 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include "mvsdio.h" diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index caf1d2e2334321..1dcaa050f26486 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -285,6 +285,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, }, { .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, }, { .compatible = "renesas,sdhi-r9a09g011", .data = &of_rzg2l_compatible, }, + { .compatible = "renesas,sdhi-r9a09g057", .data = &of_rzg2l_compatible, }, { .compatible = "renesas,rzg2l-sdhi", .data = &of_rzg2l_compatible, }, { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, }, diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 7dfe7c4e007704..20e79109be16d6 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include struct realtek_pci_sdmmc { diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index ded9b6849e35e9..4e86f0a705b60a 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -21,7 +21,7 @@ #include #include -#include +#include #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ defined(CONFIG_MMC_REALTEK_USB_MODULE)) diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index e79aa4b3b6c360..8999b97263af99 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -108,18 +109,20 @@ #define DLL_LOCK_WO_TMOUT(x) \ ((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \ (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0)) -#define RK35xx_MAX_CLKS 3 /* PHY register area pointer */ #define DWC_MSHC_PTR_PHY_R 0x300 /* PHY general configuration */ -#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) -#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */ -#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */ -#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */ -#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */ -#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */ +#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) +#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */ +#define PHY_CNFG_PHY_PWRGOOD_MASK BIT_MASK(1) /* bit [1] */ +#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */ +#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */ +#define PHY_CNFG_PAD_SP_SG2042 0x09 /* PMOS TX drive strength for SG2042 */ +#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */ +#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */ +#define PHY_CNFG_PAD_SN_SG2042 0x08 /* NMOS TX drive strength for SG2042 */ /* PHY command/response pad settings */ #define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) @@ -148,10 +151,12 @@ #define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */ #define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */ #define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */ +#define PHY_PAD_TXSLEW_CTRL_N_SG2042 0x2 /* Slew control for N-Type pad TX for SG2042 */ /* PHY CLK delay line settings */ #define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) -#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */ +#define PHY_SDCLKDL_CNFG_EXTDLY_EN BIT(0) +#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */ /* PHY CLK delay line delay code */ #define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) @@ -159,10 +164,14 @@ #define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */ #define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */ +#define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20) +#define PHY_SMPLDL_CNFG_BYPASS_EN BIT(1) + /* PHY drift_cclk_rx delay line configuration setting */ #define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) #define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */ #define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */ +#define PHY_ATDL_CNFG_INPSEL_SG2042 0x2 /* delay line input source for SG2042 */ /* PHY DLL control settings */ #define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) @@ -193,29 +202,69 @@ SDHCI_TRNS_BLK_CNT_EN | \ SDHCI_TRNS_DMA) +/* SMC call for BlueField-3 eMMC RST_N */ +#define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007 + enum dwcmshc_rk_type { DWCMSHC_RK3568, DWCMSHC_RK3588, }; struct rk35xx_priv { - /* Rockchip specified optional clocks */ - struct clk_bulk_data rockchip_clks[RK35xx_MAX_CLKS]; struct reset_control *reset; enum dwcmshc_rk_type devtype; u8 txclk_tapnum; }; +#define DWCMSHC_MAX_OTHER_CLKS 3 + struct dwcmshc_priv { struct clk *bus_clk; int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */ int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */ + int num_other_clks; + struct clk_bulk_data other_clks[DWCMSHC_MAX_OTHER_CLKS]; + void *priv; /* pointer to SoC private stuff */ u16 delay_line; u16 flags; }; +struct dwcmshc_pltfm_data { + const struct sdhci_pltfm_data pdata; + int (*init)(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv); + void (*postinit)(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv); +}; + +static int dwcmshc_get_enable_other_clks(struct device *dev, + struct dwcmshc_priv *priv, + int num_clks, + const char * const clk_ids[]) +{ + int err; + + if (num_clks > DWCMSHC_MAX_OTHER_CLKS) + return -EINVAL; + + for (int i = 0; i < num_clks; i++) + priv->other_clks[i].id = clk_ids[i]; + + err = devm_clk_bulk_get_optional(dev, num_clks, priv->other_clks); + if (err) { + dev_err(dev, "failed to get clocks %d\n", err); + return err; + } + + err = clk_bulk_prepare_enable(num_clks, priv->other_clks); + if (err) + dev_err(dev, "failed to enable clocks %d\n", err); + + priv->num_other_clks = num_clks; + + return err; +} + /* * If DMA addr spans 128MB boundary, we split the DMA transfer into two * so that each DMA transfer doesn't exceed the boundary. @@ -681,6 +730,63 @@ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); } +static int dwcmshc_rk35xx_init(struct device *dev, struct sdhci_host *host, + struct dwcmshc_priv *dwc_priv) +{ + static const char * const clk_ids[] = {"axi", "block", "timer"}; + struct rk35xx_priv *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(struct rk35xx_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (of_device_is_compatible(dev->of_node, "rockchip,rk3588-dwcmshc")) + priv->devtype = DWCMSHC_RK3588; + else + priv->devtype = DWCMSHC_RK3568; + + priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc)); + if (IS_ERR(priv->reset)) { + err = PTR_ERR(priv->reset); + dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err); + return err; + } + + err = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv, + ARRAY_SIZE(clk_ids), clk_ids); + if (err) + return err; + + if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum", + &priv->txclk_tapnum)) + priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT; + + /* Disable cmd conflict check */ + sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3); + /* Reset previous settings */ + sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK); + sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN); + + dwc_priv->priv = priv; + + return 0; +} + +static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) +{ + /* + * Don't support highspeed bus mode with low clk speed as we + * cannot use DLL for this condition. + */ + if (host->mmc->f_max <= 52000000) { + dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n", + host->mmc->f_max); + host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400); + host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR); + } +} + static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -755,6 +861,35 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask) } } +static int th1520_init(struct device *dev, + struct sdhci_host *host, + struct dwcmshc_priv *dwc_priv) +{ + dwc_priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; + + if (device_property_read_bool(dev, "mmc-ddr-1_8v") || + device_property_read_bool(dev, "mmc-hs200-1_8v") || + device_property_read_bool(dev, "mmc-hs400-1_8v")) + dwc_priv->flags |= FLAG_IO_FIXED_1V8; + else + dwc_priv->flags &= ~FLAG_IO_FIXED_1V8; + + /* + * start_signal_voltage_switch() will try 3.3V first + * then 1.8V. Use SDHCI_SIGNALING_180 rather than + * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V + * in sdhci_start_signal_voltage_switch(). + */ + if (dwc_priv->flags & FLAG_IO_FIXED_1V8) { + host->flags &= ~SDHCI_SIGNALING_330; + host->flags |= SDHCI_SIGNALING_180; + } + + sdhci_enable_v4_mode(host); + + return 0; +} + static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -891,6 +1026,85 @@ static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return ret; } +static inline void sg2042_sdhci_phy_init(struct sdhci_host *host) +{ + u32 val; + + /* Asset phy reset & set tx drive strength */ + val = sdhci_readl(host, PHY_CNFG_R); + val &= ~PHY_CNFG_RSTN_DEASSERT; + val |= FIELD_PREP(PHY_CNFG_PHY_PWRGOOD_MASK, 1); + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP_SG2042); + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN_SG2042); + sdhci_writel(host, val, PHY_CNFG_R); + + /* Configure phy pads */ + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042); + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); + + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042); + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); + + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042); + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); + + /* Configure delay line */ + /* Enable fixed delay */ + sdhci_writeb(host, PHY_SDCLKDL_CNFG_EXTDLY_EN, PHY_SDCLKDL_CNFG_R); + /* + * Set delay line. + * Its recommended that bit UPDATE_DC[4] is 1 when SDCLKDL_DC is being written. + * Ensure UPDATE_DC[4] is '0' when not updating code. + */ + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val |= PHY_SDCLKDL_CNFG_UPDATE; + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + /* Add 10 * 70ps = 0.7ns for output delay */ + sdhci_writeb(host, 10, PHY_SDCLKDL_DC_R); + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + + /* Set SMPLDL_CNFG, Bypass */ + sdhci_writeb(host, PHY_SMPLDL_CNFG_BYPASS_EN, PHY_SMPLDL_CNFG_R); + + /* Set ATDL_CNFG, tuning clk not use for init */ + val = FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL_SG2042); + sdhci_writeb(host, val, PHY_ATDL_CNFG_R); + + /* Deasset phy reset */ + val = sdhci_readl(host, PHY_CNFG_R); + val |= PHY_CNFG_RSTN_DEASSERT; + sdhci_writel(host, val, PHY_CNFG_R); +} + +static void sg2042_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + if (mask & SDHCI_RESET_ALL) + sg2042_sdhci_phy_init(host); +} + +static int sg2042_init(struct device *dev, struct sdhci_host *host, + struct dwcmshc_priv *dwc_priv) +{ + static const char * const clk_ids[] = {"timer"}; + + return dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv, + ARRAY_SIZE(clk_ids), clk_ids); +} + static const struct sdhci_ops sdhci_dwcmshc_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -901,6 +1115,29 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = { .irq = dwcmshc_cqe_irq_handler, }; +#ifdef CONFIG_ACPI +static void dwcmshc_bf3_hw_reset(struct sdhci_host *host) +{ + struct arm_smccc_res res = { 0 }; + + arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0) + pr_err("%s: RST_N failed.\n", mmc_hostname(host->mmc)); +} + +static const struct sdhci_ops sdhci_dwcmshc_bf3_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = dwcmshc_set_uhs_signaling, + .get_max_clock = dwcmshc_get_max_clock, + .reset = sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, + .irq = dwcmshc_cqe_irq_handler, + .hw_reset = dwcmshc_bf3_hw_reset, +}; +#endif + static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { .set_clock = dwcmshc_rk3568_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -932,39 +1169,71 @@ static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = { .platform_execute_tuning = cv18xx_sdhci_execute_tuning, }; -static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { - .ops = &sdhci_dwcmshc_ops, - .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +static const struct sdhci_ops sdhci_dwcmshc_sg2042_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = dwcmshc_set_uhs_signaling, + .get_max_clock = dwcmshc_get_max_clock, + .reset = sg2042_sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, + .platform_execute_tuning = th1520_execute_tuning, +}; + +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, }; #ifdef CONFIG_ACPI -static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = { - .ops = &sdhci_dwcmshc_ops, - .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | - SDHCI_QUIRK2_ACMD23_BROKEN, +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_bf3_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_bf3_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_ACMD23_BROKEN, + }, }; #endif -static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { - .ops = &sdhci_dwcmshc_rk35xx_ops, - .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | - SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_rk35xx_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, + }, + .init = dwcmshc_rk35xx_init, + .postinit = dwcmshc_rk35xx_postinit, }; -static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = { - .ops = &sdhci_dwcmshc_th1520_ops, - .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_th1520_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, + .init = th1520_init, }; -static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = { - .ops = &sdhci_dwcmshc_cv18xx_ops, - .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_cv18xx_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_cv18xx_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, +}; + +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_sg2042_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_sg2042_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, + .init = sg2042_init, }; static const struct cqhci_host_ops dwcmshc_cqhci_ops = { @@ -1034,61 +1303,6 @@ static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device * host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); } -static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) -{ - int err; - struct rk35xx_priv *priv = dwc_priv->priv; - - priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc)); - if (IS_ERR(priv->reset)) { - err = PTR_ERR(priv->reset); - dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err); - return err; - } - - priv->rockchip_clks[0].id = "axi"; - priv->rockchip_clks[1].id = "block"; - priv->rockchip_clks[2].id = "timer"; - err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK35xx_MAX_CLKS, - priv->rockchip_clks); - if (err) { - dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err); - return err; - } - - err = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, priv->rockchip_clks); - if (err) { - dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err); - return err; - } - - if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum", - &priv->txclk_tapnum)) - priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT; - - /* Disable cmd conflict check */ - sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3); - /* Reset previous settings */ - sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK); - sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN); - - return 0; -} - -static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) -{ - /* - * Don't support highspeed bus mode with low clk speed as we - * cannot use DLL for this condition. - */ - if (host->mmc->f_max <= 52000000) { - dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n", - host->mmc->f_max); - host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400); - host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR); - } -} - static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { { .compatible = "rockchip,rk3588-dwcmshc", @@ -1114,6 +1328,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .compatible = "thead,th1520-dwcmshc", .data = &sdhci_dwcmshc_th1520_pdata, }, + { + .compatible = "sophgo,sg2042-dwcmshc", + .data = &sdhci_dwcmshc_sg2042_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids); @@ -1135,8 +1353,7 @@ static int dwcmshc_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; struct dwcmshc_priv *priv; - struct rk35xx_priv *rk_priv = NULL; - const struct sdhci_pltfm_data *pltfm_data; + const struct dwcmshc_pltfm_data *pltfm_data; int err; u32 extra, caps; @@ -1146,7 +1363,7 @@ static int dwcmshc_probe(struct platform_device *pdev) return -ENODEV; } - host = sdhci_pltfm_init(pdev, pltfm_data, + host = sdhci_pltfm_init(pdev, &pltfm_data->pdata, sizeof(struct dwcmshc_priv)); if (IS_ERR(host)) return PTR_ERR(host); @@ -1191,49 +1408,12 @@ static int dwcmshc_probe(struct platform_device *pdev) host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe; host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning; - if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) { - rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL); - if (!rk_priv) { - err = -ENOMEM; - goto err_clk; - } - - if (of_device_is_compatible(pdev->dev.of_node, "rockchip,rk3588-dwcmshc")) - rk_priv->devtype = DWCMSHC_RK3588; - else - rk_priv->devtype = DWCMSHC_RK3568; - - priv->priv = rk_priv; - - err = dwcmshc_rk35xx_init(host, priv); + if (pltfm_data->init) { + err = pltfm_data->init(&pdev->dev, host, priv); if (err) goto err_clk; } - if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) { - priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; - - if (device_property_read_bool(dev, "mmc-ddr-1_8v") || - device_property_read_bool(dev, "mmc-hs200-1_8v") || - device_property_read_bool(dev, "mmc-hs400-1_8v")) - priv->flags |= FLAG_IO_FIXED_1V8; - else - priv->flags &= ~FLAG_IO_FIXED_1V8; - - /* - * start_signal_voltage_switch() will try 3.3V first - * then 1.8V. Use SDHCI_SIGNALING_180 rather than - * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V - * in sdhci_start_signal_voltage_switch(). - */ - if (priv->flags & FLAG_IO_FIXED_1V8) { - host->flags &= ~SDHCI_SIGNALING_330; - host->flags |= SDHCI_SIGNALING_180; - } - - sdhci_enable_v4_mode(host); - } - #ifdef CONFIG_ACPI if (pltfm_data == &sdhci_dwcmshc_bf3_pdata) sdhci_enable_v4_mode(host); @@ -1261,8 +1441,8 @@ static int dwcmshc_probe(struct platform_device *pdev) dwcmshc_cqhci_init(host, pdev); } - if (rk_priv) - dwcmshc_rk35xx_postinit(host, priv); + if (pltfm_data->postinit) + pltfm_data->postinit(host, priv); err = __sdhci_add_host(host); if (err) @@ -1280,9 +1460,7 @@ static int dwcmshc_probe(struct platform_device *pdev) err_clk: clk_disable_unprepare(pltfm_host->clk); clk_disable_unprepare(priv->bus_clk); - if (rk_priv) - clk_bulk_disable_unprepare(RK35xx_MAX_CLKS, - rk_priv->rockchip_clks); + clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks); free_pltfm: sdhci_pltfm_free(pdev); return err; @@ -1304,7 +1482,6 @@ static void dwcmshc_remove(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); - struct rk35xx_priv *rk_priv = priv->priv; pm_runtime_get_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -1316,9 +1493,7 @@ static void dwcmshc_remove(struct platform_device *pdev) clk_disable_unprepare(pltfm_host->clk); clk_disable_unprepare(priv->bus_clk); - if (rk_priv) - clk_bulk_disable_unprepare(RK35xx_MAX_CLKS, - rk_priv->rockchip_clks); + clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks); sdhci_pltfm_free(pdev); } @@ -1328,7 +1503,6 @@ static int dwcmshc_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); - struct rk35xx_priv *rk_priv = priv->priv; int ret; pm_runtime_resume(dev); @@ -1347,9 +1521,7 @@ static int dwcmshc_suspend(struct device *dev) if (!IS_ERR(priv->bus_clk)) clk_disable_unprepare(priv->bus_clk); - if (rk_priv) - clk_bulk_disable_unprepare(RK35xx_MAX_CLKS, - rk_priv->rockchip_clks); + clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks); return ret; } @@ -1359,7 +1531,6 @@ static int dwcmshc_resume(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); - struct rk35xx_priv *rk_priv = priv->priv; int ret; ret = clk_prepare_enable(pltfm_host->clk); @@ -1372,29 +1543,24 @@ static int dwcmshc_resume(struct device *dev) goto disable_clk; } - if (rk_priv) { - ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, - rk_priv->rockchip_clks); - if (ret) - goto disable_bus_clk; - } + ret = clk_bulk_prepare_enable(priv->num_other_clks, priv->other_clks); + if (ret) + goto disable_bus_clk; ret = sdhci_resume_host(host); if (ret) - goto disable_rockchip_clks; + goto disable_other_clks; if (host->mmc->caps2 & MMC_CAP2_CQE) { ret = cqhci_resume(host->mmc); if (ret) - goto disable_rockchip_clks; + goto disable_other_clks; } return 0; -disable_rockchip_clks: - if (rk_priv) - clk_bulk_disable_unprepare(RK35xx_MAX_CLKS, - rk_priv->rockchip_clks); +disable_other_clks: + clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks); disable_bus_clk: if (!IS_ERR(priv->bus_clk)) clk_disable_unprepare(priv->bus_clk); diff --git a/drivers/mmc/host/sdhci-of-ma35d1.c b/drivers/mmc/host/sdhci-of-ma35d1.c new file mode 100644 index 00000000000000..b84c2927bd4ade --- /dev/null +++ b/drivers/mmc/host/sdhci-of-ma35d1.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2024 Nuvoton Technology Corp. + * + * Author: Shan-Chun Hung + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci.h" + +#define MA35_SYS_MISCFCR0 0x070 +#define MA35_SDHCI_MSHCCTL 0x508 +#define MA35_SDHCI_MBIUCTL 0x510 + +#define MA35_SDHCI_CMD_CONFLICT_CHK BIT(0) +#define MA35_SDHCI_INCR_MSK GENMASK(3, 0) +#define MA35_SDHCI_INCR16 BIT(3) +#define MA35_SDHCI_INCR8 BIT(2) + +struct ma35_priv { + struct reset_control *rst; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_uhs; + struct pinctrl_state *pins_default; +}; + +struct ma35_restore_data { + u32 reg; + u32 width; +}; + +static const struct ma35_restore_data restore_data[] = { + { SDHCI_CLOCK_CONTROL, sizeof(u32)}, + { SDHCI_BLOCK_SIZE, sizeof(u32)}, + { SDHCI_INT_ENABLE, sizeof(u32)}, + { SDHCI_SIGNAL_ENABLE, sizeof(u32)}, + { SDHCI_AUTO_CMD_STATUS, sizeof(u32)}, + { SDHCI_HOST_CONTROL, sizeof(u32)}, + { SDHCI_TIMEOUT_CONTROL, sizeof(u8) }, + { MA35_SDHCI_MSHCCTL, sizeof(u16)}, + { MA35_SDHCI_MBIUCTL, sizeof(u16)}, +}; + +/* + * If DMA addr spans 128MB boundary, we split the DMA transfer into two + * so that each DMA transfer doesn't exceed the boundary. + */ +static void ma35_adma_write_desc(struct sdhci_host *host, void **desc, dma_addr_t addr, int len, + unsigned int cmd) +{ + int tmplen, offset; + + if (likely(!len || (ALIGN(addr, SZ_128M) == ALIGN(addr + len - 1, SZ_128M)))) { + sdhci_adma_write_desc(host, desc, addr, len, cmd); + return; + } + + offset = addr & (SZ_128M - 1); + tmplen = SZ_128M - offset; + sdhci_adma_write_desc(host, desc, addr, tmplen, cmd); + + addr += tmplen; + len -= tmplen; + sdhci_adma_write_desc(host, desc, addr, len, cmd); +} + +static void ma35_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u32 ctl; + + /* + * If the clock frequency exceeds MMC_HIGH_52_MAX_DTR, + * disable command conflict check. + */ + ctl = sdhci_readw(host, MA35_SDHCI_MSHCCTL); + if (clock > MMC_HIGH_52_MAX_DTR) + ctl &= ~MA35_SDHCI_CMD_CONFLICT_CHK; + else + ctl |= MA35_SDHCI_CMD_CONFLICT_CHK; + sdhci_writew(host, ctl, MA35_SDHCI_MSHCCTL); + + sdhci_set_clock(host, clock); +} + +static int ma35_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_180: + if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_uhs)) + pinctrl_select_state(priv->pinctrl, priv->pins_uhs); + break; + case MMC_SIGNAL_VOLTAGE_330: + if (!IS_ERR(priv->pinctrl) && !IS_ERR(priv->pins_default)) + pinctrl_select_state(priv->pinctrl, priv->pins_default); + break; + default: + dev_err(mmc_dev(host->mmc), "Unsupported signal voltage!\n"); + return -EINVAL; + } + + return sdhci_start_signal_voltage_switch(mmc, ios); +} + +static void ma35_voltage_switch(struct sdhci_host *host) +{ + /* Wait for 5ms after set 1.8V signal enable bit */ + fsleep(5000); +} + +static int ma35_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct ma35_priv *priv = sdhci_pltfm_priv(pltfm_host); + int idx; + u32 regs[ARRAY_SIZE(restore_data)] = {}; + + /* + * Limitations require a reset of SD/eMMC before tuning and + * saving the registers before resetting, then restoring + * after the reset. + */ + for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) { + if (restore_data[idx].width == sizeof(u32)) + regs[idx] = sdhci_readl(host, restore_data[idx].reg); + else if (restore_data[idx].width == sizeof(u16)) + regs[idx] = sdhci_readw(host, restore_data[idx].reg); + else if (restore_data[idx].width == sizeof(u8)) + regs[idx] = sdhci_readb(host, restore_data[idx].reg); + } + + reset_control_assert(priv->rst); + reset_control_deassert(priv->rst); + + for (idx = 0; idx < ARRAY_SIZE(restore_data); idx++) { + if (restore_data[idx].width == sizeof(u32)) + sdhci_writel(host, regs[idx], restore_data[idx].reg); + else if (restore_data[idx].width == sizeof(u16)) + sdhci_writew(host, regs[idx], restore_data[idx].reg); + else if (restore_data[idx].width == sizeof(u8)) + sdhci_writeb(host, regs[idx], restore_data[idx].reg); + } + + return sdhci_execute_tuning(mmc, opcode); +} + +static const struct sdhci_ops sdhci_ma35_ops = { + .set_clock = ma35_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .reset = sdhci_reset, + .adma_write_desc = ma35_adma_write_desc, + .voltage_switch = ma35_voltage_switch, +}; + +static const struct sdhci_pltfm_data sdhci_ma35_pdata = { + .ops = &sdhci_ma35_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_BROKEN_DDR50 | + SDHCI_QUIRK2_ACMD23_BROKEN, +}; + +static int ma35_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_host *host; + struct ma35_priv *priv; + int err; + u32 extra, ctl; + + host = sdhci_pltfm_init(pdev, &sdhci_ma35_pdata, sizeof(struct ma35_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + /* Extra adma table cnt for cross 128M boundary handling. */ + extra = DIV_ROUND_UP_ULL(dma_get_required_mask(dev), SZ_128M); + extra = min(extra, SDHCI_MAX_SEGS); + + host->adma_table_cnt += extra; + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + + pltfm_host->clk = devm_clk_get_optional_enabled(dev, NULL); + if (IS_ERR(pltfm_host->clk)) { + err = dev_err_probe(dev, PTR_ERR(pltfm_host->clk), "failed to get clk\n"); + goto err_sdhci; + } + + err = mmc_of_parse(host->mmc); + if (err) + goto err_sdhci; + + priv->rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(priv->rst)) { + err = dev_err_probe(dev, PTR_ERR(priv->rst), "failed to get reset control\n"); + goto err_sdhci; + } + + sdhci_get_of_property(pdev); + + priv->pinctrl = devm_pinctrl_get(dev); + if (!IS_ERR(priv->pinctrl)) { + priv->pins_default = pinctrl_lookup_state(priv->pinctrl, "default"); + priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl, "state_uhs"); + pinctrl_select_state(priv->pinctrl, priv->pins_default); + } + + if (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)) { + struct regmap *regmap; + u32 reg; + + regmap = syscon_regmap_lookup_by_phandle(dev_of_node(dev), "nuvoton,sys"); + if (!IS_ERR(regmap)) { + /* Enable SDHCI voltage stable for 1.8V */ + regmap_read(regmap, MA35_SYS_MISCFCR0, ®); + reg |= BIT(17); + regmap_write(regmap, MA35_SYS_MISCFCR0, reg); + } + + host->mmc_host_ops.start_signal_voltage_switch = + ma35_start_signal_voltage_switch; + } + + host->mmc_host_ops.execute_tuning = ma35_execute_tuning; + + err = sdhci_add_host(host); + if (err) + goto err_sdhci; + + /* + * Split data into chunks of 16 or 8 bytes for transmission. + * Each chunk transfer is guaranteed to be uninterrupted on the bus. + * This likely corresponds to the AHB bus DMA burst size. + */ + ctl = sdhci_readw(host, MA35_SDHCI_MBIUCTL); + ctl &= ~MA35_SDHCI_INCR_MSK; + ctl |= MA35_SDHCI_INCR16 | MA35_SDHCI_INCR8; + sdhci_writew(host, ctl, MA35_SDHCI_MBIUCTL); + + return 0; + +err_sdhci: + sdhci_pltfm_free(pdev); + return err; +} + +static void ma35_disable_card_clk(struct sdhci_host *host) +{ + u16 ctrl; + + ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (ctrl & SDHCI_CLOCK_CARD_EN) { + ctrl &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL); + } +} + +static void ma35_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + + sdhci_remove_host(host, 0); + ma35_disable_card_clk(host); + sdhci_pltfm_free(pdev); +} + +static const struct of_device_id sdhci_ma35_dt_ids[] = { + { .compatible = "nuvoton,ma35d1-sdhci" }, + {} +}; + +static struct platform_driver sdhci_ma35_driver = { + .driver = { + .name = "sdhci-ma35", + .of_match_table = sdhci_ma35_dt_ids, + }, + .probe = ma35_probe, + .remove_new = ma35_remove, +}; +module_platform_driver(sdhci_ma35_driver); + +MODULE_DESCRIPTION("SDHCI platform driver for Nuvoton MA35"); +MODULE_AUTHOR("Shan-Chun Hung "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index b75cbea88b4020..7b957f6d55884a 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -126,7 +126,7 @@ static void pxav1_request_done(struct sdhci_host *host, struct mmc_request *mrq) struct sdhci_pxav2_host *pxav2_host; /* If this is an SDIO command, perform errata workaround for silicon bug */ - if (mrq->cmd && !mrq->cmd->error && + if (!mrq->cmd->error && (mrq->cmd->opcode == SD_IO_RW_DIRECT || mrq->cmd->opcode == SD_IO_RW_EXTENDED)) { /* Reset data port */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 64e10f7c9faa33..0aa3c40ea6ed8e 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -86,6 +86,7 @@ #define CLOCK_TOO_SLOW_HZ 50000000 #define SDHCI_AM654_AUTOSUSPEND_DELAY -1 +#define RETRY_TUNING_MAX 10 /* Command Queue Host Controller Interface Base address */ #define SDHCI_AM654_CQE_BASE_ADDR 0x200 @@ -151,6 +152,7 @@ struct sdhci_am654_data { u32 flags; u32 quirks; bool dll_enable; + u32 tuning_loop; #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) }; @@ -443,7 +445,7 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) #define ITAPDLY_LENGTH 32 #define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1) -static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window +static int sdhci_am654_calculate_itap(struct sdhci_host *host, struct window *fail_window, u8 num_fails, bool circular_buffer) { u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0; @@ -453,12 +455,16 @@ static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window int prev_fail_end = -1; u8 i; - if (!num_fails) - return ITAPDLY_LAST_INDEX >> 1; + if (!num_fails) { + /* Retry tuning */ + dev_dbg(dev, "No failing region found, retry tuning\n"); + return -1; + } if (fail_window->length == ITAPDLY_LENGTH) { - dev_err(dev, "No passing ITAPDLY, return 0\n"); - return 0; + /* Retry tuning */ + dev_dbg(dev, "No passing itapdly, retry tuning\n"); + return -1; } first_fail_start = fail_window->start; @@ -494,13 +500,14 @@ static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap; } -static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, - u32 opcode) +static int sdhci_am654_do_tuning(struct sdhci_host *host, + u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); unsigned char timing = host->mmc->ios.timing; struct window fail_window[ITAPDLY_LENGTH]; + struct device *dev = mmc_dev(host->mmc); u8 curr_pass, itap; u8 fail_index = 0; u8 prev_pass = 1; @@ -521,6 +528,7 @@ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, if (!curr_pass) { fail_window[fail_index].end = itap; fail_window[fail_index].length++; + dev_dbg(dev, "Failed itapdly=%d\n", itap); } if (curr_pass && !prev_pass) @@ -532,13 +540,34 @@ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, if (fail_window[fail_index].length != 0) fail_index++; - itap = sdhci_am654_calculate_itap(host, fail_window, fail_index, - sdhci_am654->dll_enable); + return sdhci_am654_calculate_itap(host, fail_window, fail_index, + sdhci_am654->dll_enable); +} - sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]); +static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, + u32 opcode) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + unsigned char timing = host->mmc->ios.timing; + struct device *dev = mmc_dev(host->mmc); + int itapdly; + + do { + itapdly = sdhci_am654_do_tuning(host, opcode); + if (itapdly >= 0) + break; + } while (++sdhci_am654->tuning_loop < RETRY_TUNING_MAX); + + if (itapdly < 0) { + dev_err(dev, "Failed to find itapdly, fail tuning\n"); + return -1; + } + dev_dbg(dev, "Passed tuning, final itapdly=%d\n", itapdly); + sdhci_am654_write_itapdly(sdhci_am654, itapdly, sdhci_am654->itap_del_ena[timing]); /* Save ITAPDLY */ - sdhci_am654->itap_del_sel[timing] = itap; + sdhci_am654->itap_del_sel[timing] = itapdly; return 0; } @@ -742,6 +771,9 @@ static int sdhci_am654_init(struct sdhci_host *host) regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK, TUNINGFORSDR50_MASK); + /* Use to re-execute tuning */ + sdhci_am654->tuning_loop = 0; + ret = sdhci_setup_host(host); if (ret) return ret; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index b359876cc33d70..45a474ccab1c98 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -895,8 +895,8 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) * It seems, VccQ should be switched on after Vcc, this is also what the * omap_hsmmc.c driver does. */ - if (!IS_ERR(mmc->supply.vqmmc) && !ret) { - ret = regulator_enable(mmc->supply.vqmmc); + if (!ret) { + ret = mmc_regulator_enable_vqmmc(mmc); usleep_range(200, 300); } @@ -909,8 +909,7 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host) { struct mmc_host *mmc = host->mmc; - if (!IS_ERR(mmc->supply.vqmmc)) - regulator_disable(mmc->supply.vqmmc); + mmc_regulator_disable_vqmmc(mmc); if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 66044f4f5bade8..10cd1d9b48859d 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev, * get them */ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!mtd->name) + return -ENOMEM; + mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 28131a127d065e..8297b366a06699 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -296,10 +296,12 @@ static int __init init_slram(void) T("slram: devname = %s\n", devname); if ((!map) || (!(devstart = strsep(&map, ",")))) { E("slram: No devicestart specified.\n"); + break; } T("slram: devstart = %s\n", devstart); if ((!map) || (!(devlength = strsep(&map, ",")))) { E("slram: No devicelength / -end specified.\n"); + break; } T("slram: devlength = %s\n", devlength); if (parse_cmdline(devname, devstart, devlength) != 0) { diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 193428de6a4bdf..f56f44aa8625ce 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -204,7 +204,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs, } /* make a copy of vecs */ - vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL); + vecs_copy = kmemdup_array(vecs, count, sizeof(struct kvec), GFP_KERNEL); if (!vecs_copy) return -ENOMEM; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 2f11585b5613ee..7bf3777e1f1374 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -298,14 +298,14 @@ static void find_next_position(struct mtdoops_context *cxt) } static void mtdoops_do_dump(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { struct mtdoops_context *cxt = container_of(dumper, struct mtdoops_context, dump); struct kmsg_dump_iter iter; /* Only dump oopses if dump_oops is set */ - if (reason == KMSG_DUMP_OOPS && !dump_oops) + if (detail->reason == KMSG_DUMP_OOPS && !dump_oops) return; kmsg_dump_rewind(&iter); @@ -317,7 +317,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, record_size - sizeof(struct mtdoops_hdr), NULL); clear_bit(0, &cxt->oops_buf_busy); - if (reason != KMSG_DUMP_OOPS) { + if (detail->reason != KMSG_DUMP_OOPS) { /* Panics must be written immediately */ mtdoops_write(cxt, 1); } else { diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 6142573085169a..d0aaccf72d781d 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -448,6 +448,12 @@ config MTD_NAND_RENESAS Enables support for the NAND controller found on Renesas R-Car Gen3 and RZ/N1 SoC families. +config MTD_NAND_TS72XX + tristate "ts72xx NAND controller" + depends on ARCH_EP93XX && HAS_IOMEM + help + Enables support for NAND controller on ts72xx SBCs. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 25120a4afadac2..d0b0e6b83568d1 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o +obj-$(CONFIG_MTD_NAND_TS72XX) += technologic-nand-controller.o obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 2ff1d2b13e3c3a..5436ec4a8fde42 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -1360,7 +1360,7 @@ static void anfc_chips_cleanup(struct arasan_nfc *nfc) static int anfc_chips_init(struct arasan_nfc *nfc) { - struct device_node *np = nfc->dev->of_node, *nand_np; + struct device_node *np = nfc->dev->of_node; int nchips = of_get_child_count(np); int ret; @@ -1370,10 +1370,9 @@ static int anfc_chips_init(struct arasan_nfc *nfc) return -EINVAL; } - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = anfc_chip_init(nfc, nand_np); if (ret) { - of_node_put(nand_np); anfc_chips_cleanup(nfc); break; } diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index dc75d50d52e84e..f9ccfd02e80456 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2049,7 +2049,10 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, dma_cap_set(DMA_MEMCPY, mask); nc->dmac = dma_request_channel(mask, NULL, NULL); - if (!nc->dmac) + if (nc->dmac) + dev_info(nc->dev, "using %s for DMA transfers\n", + dma_chan_name(nc->dmac)); + else dev_err(nc->dev, "Failed to request DMA channel\n"); } diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index ff92c17def8351..3bc89b3569632d 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2836,7 +2836,6 @@ static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl) { struct device_node *np = cdns_ctrl->dev->of_node; - struct device_node *nand_np; int max_cs = cdns_ctrl->caps2.max_banks; int nchips, ret; @@ -2849,10 +2848,9 @@ static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl) return -EINVAL; } - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = cadence_nand_chip_init(cdns_ctrl, nand_np); if (ret) { - of_node_put(nand_np); cadence_nand_chips_cleanup(cdns_ctrl); return ret; } diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 051deea768dbb7..392678143a36b2 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -20,8 +20,71 @@ #include #include -#include -#include +#define NRCSR_OFFSET 0x00 +#define NANDFCR_OFFSET 0x60 +#define NANDFSR_OFFSET 0x64 +#define NANDF1ECC_OFFSET 0x70 + +/* 4-bit ECC syndrome registers */ +#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc +#define NAND_4BIT_ECC1_OFFSET 0xc0 +#define NAND_4BIT_ECC2_OFFSET 0xc4 +#define NAND_4BIT_ECC3_OFFSET 0xc8 +#define NAND_4BIT_ECC4_OFFSET 0xcc +#define NAND_ERR_ADD1_OFFSET 0xd0 +#define NAND_ERR_ADD2_OFFSET 0xd4 +#define NAND_ERR_ERRVAL1_OFFSET 0xd8 +#define NAND_ERR_ERRVAL2_OFFSET 0xdc + +/* NOTE: boards don't need to use these address bits + * for ALE/CLE unless they support booting from NAND. + * They're used unless platform data overrides them. + */ +#define MASK_ALE 0x08 +#define MASK_CLE 0x10 + +struct davinci_nand_pdata { + uint32_t mask_ale; + uint32_t mask_cle; + + /* + * 0-indexed chip-select number of the asynchronous + * interface to which the NAND device has been connected. + * + * So, if you have NAND connected to CS3 of DA850, you + * will pass '1' here. Since the asynchronous interface + * on DA850 starts from CS2. + */ + uint32_t core_chipsel; + + /* for packages using two chipselects */ + uint32_t mask_chipsel; + + /* board's default static partition info */ + struct mtd_partition *parts; + unsigned int nr_parts; + + /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!) + * soft == NAND_ECC_ENGINE_TYPE_SOFT + * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits + * + * All DaVinci-family chips support 1-bit hardware ECC. + * Newer ones also support 4-bit ECC, but are awkward + * using it with large page chips. + */ + enum nand_ecc_engine_type engine_type; + enum nand_ecc_placement ecc_placement; + u8 ecc_bits; + + /* e.g. NAND_BUSWIDTH_16 */ + unsigned int options; + /* e.g. NAND_BBT_USE_FLASH */ + unsigned int bbt_options; + + /* Main and mirror bbt descriptor overrides */ + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; +}; /* * This is a device driver for the NAND flash controller found on the @@ -54,8 +117,6 @@ struct davinci_nand_info { uint32_t mask_cle; uint32_t core_chipsel; - - struct davinci_aemif_timing *timing; }; static DEFINE_SPINLOCK(davinci_nand_lock); @@ -775,7 +836,6 @@ static int nand_davinci_probe(struct platform_device *pdev) info->chip.options = pdata->options; info->chip.bbt_td = pdata->bbt_td; info->chip.bbt_md = pdata->bbt_md; - info->timing = pdata->timing; info->current_cs = info->vaddr; info->core_chipsel = pdata->core_chipsel; diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index edac8749bb9391..2f5666511fda8d 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -145,15 +145,15 @@ static int denali_dt_probe(struct platform_device *pdev) if (IS_ERR(denali->host)) return PTR_ERR(denali->host); - dt->clk = devm_clk_get(dev, "nand"); + dt->clk = devm_clk_get_enabled(dev, "nand"); if (IS_ERR(dt->clk)) return PTR_ERR(dt->clk); - dt->clk_x = devm_clk_get(dev, "nand_x"); + dt->clk_x = devm_clk_get_enabled(dev, "nand_x"); if (IS_ERR(dt->clk_x)) return PTR_ERR(dt->clk_x); - dt->clk_ecc = devm_clk_get(dev, "ecc"); + dt->clk_ecc = devm_clk_get_enabled(dev, "ecc"); if (IS_ERR(dt->clk_ecc)) return PTR_ERR(dt->clk_ecc); @@ -165,18 +165,6 @@ static int denali_dt_probe(struct platform_device *pdev) if (IS_ERR(dt->rst_reg)) return PTR_ERR(dt->rst_reg); - ret = clk_prepare_enable(dt->clk); - if (ret) - return ret; - - ret = clk_prepare_enable(dt->clk_x); - if (ret) - goto out_disable_clk; - - ret = clk_prepare_enable(dt->clk_ecc); - if (ret) - goto out_disable_clk_x; - denali->clk_rate = clk_get_rate(dt->clk); denali->clk_x_rate = clk_get_rate(dt->clk_x); @@ -187,7 +175,7 @@ static int denali_dt_probe(struct platform_device *pdev) */ ret = reset_control_deassert(dt->rst_reg); if (ret) - goto out_disable_clk_ecc; + return ret; ret = reset_control_deassert(dt->rst); if (ret) @@ -222,12 +210,6 @@ static int denali_dt_probe(struct platform_device *pdev) reset_control_assert(dt->rst); out_assert_rst_reg: reset_control_assert(dt->rst_reg); -out_disable_clk_ecc: - clk_disable_unprepare(dt->clk_ecc); -out_disable_clk_x: - clk_disable_unprepare(dt->clk_x); -out_disable_clk: - clk_disable_unprepare(dt->clk); return ret; } @@ -239,9 +221,6 @@ static void denali_dt_remove(struct platform_device *pdev) denali_remove(&dt->controller); reset_control_assert(dt->rst); reset_control_assert(dt->rst_reg); - clk_disable_unprepare(dt->clk_ecc); - clk_disable_unprepare(dt->clk_x); - clk_disable_unprepare(dt->clk); } static struct platform_driver denali_dt_driver = { diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index de7e722d382625..e22094e39546e9 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -77,18 +77,20 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->reg = devm_ioremap(denali->dev, csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); - return -ENOMEM; + ret = -ENOMEM; + goto regions_release; } denali->host = devm_ioremap(denali->dev, mem_base, mem_len); if (!denali->host) { dev_err(&dev->dev, "Spectra: ioremap failed!"); - return -ENOMEM; + ret = -ENOMEM; + goto regions_release; } ret = denali_init(denali); if (ret) - return ret; + goto regions_release; nsels = denali->nbanks; @@ -116,6 +118,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) out_remove_denali: denali_remove(denali); +regions_release: + pci_release_regions(dev); return ret; } @@ -123,6 +127,7 @@ static void denali_pci_remove(struct pci_dev *dev) { struct denali_controller *denali = pci_get_drvdata(dev); + pci_release_regions(dev); denali_remove(denali); } diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c index 78174c463b3686..f0f0522b2fa254 100644 --- a/drivers/mtd/nand/raw/intel-nand-controller.c +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #define EBU_CLC 0x000 #define EBU_CLC_RST 0x00000000u diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 5b0f5a9cef81b5..aa113a5d88c89e 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -84,7 +84,7 @@ #include #include #include -#include +#include #include #include @@ -2771,7 +2771,6 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int max_cs = nfc->caps->max_cs_nb; int nchips; int ret; @@ -2798,20 +2797,15 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) return ret; } - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = marvell_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); - goto cleanup_chips; + marvell_nand_chips_cleanup(nfc); + return ret; } } return 0; - -cleanup_chips: - marvell_nand_chips_cleanup(nfc); - - return ret; } static int marvell_nfc_init_dma(struct marvell_nfc *nfc) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 9eb5470344d095..fbb06aa305cb5e 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1475,7 +1475,7 @@ meson_nfc_nand_chip_init(struct device *dev, return 0; } -static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc) +static void meson_nfc_nand_chips_cleanup(struct meson_nfc *nfc) { struct meson_nfc_nand_chip *meson_chip; struct mtd_info *mtd; @@ -1495,14 +1495,12 @@ static int meson_nfc_nand_chips_init(struct device *dev, struct meson_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int ret; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = meson_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { - meson_nfc_nand_chip_cleanup(nfc); - of_node_put(nand_np); + meson_nfc_nand_chips_cleanup(nfc); return ret; } } @@ -1616,7 +1614,7 @@ static void meson_nfc_remove(struct platform_device *pdev) { struct meson_nfc *nfc = platform_get_drvdata(pdev); - meson_nfc_nand_chip_cleanup(nfc); + meson_nfc_nand_chips_cleanup(nfc); meson_nfc_disable_clk(nfc); } diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 17477bb2d48ff0..586868b4139f51 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, return 0; } +static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc) +{ + struct mtk_nfc_nand_chip *mtk_chip; + struct nand_chip *chip; + int ret; + + while (!list_empty(&nfc->chips)) { + mtk_chip = list_first_entry(&nfc->chips, + struct mtk_nfc_nand_chip, node); + chip = &mtk_chip->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&mtk_chip->node); + } +} + static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int ret; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); + mtk_nfc_nand_chips_cleanup(nfc); return ret; } } @@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev) static void mtk_nfc_remove(struct platform_device *pdev) { struct mtk_nfc *nfc = platform_get_drvdata(pdev); - struct mtk_nfc_nand_chip *mtk_chip; - struct nand_chip *chip; - int ret; - - while (!list_empty(&nfc->chips)) { - mtk_chip = list_first_entry(&nfc->chips, - struct mtk_nfc_nand_chip, node); - chip = &mtk_chip->nand; - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - list_del(&mtk_chip->node); - } + mtk_nfc_nand_chips_cleanup(nfc); mtk_ecc_release(nfc->ecc); } diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 179b28459b4bdd..df48b7d01d16c3 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -1381,7 +1381,7 @@ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns) } /* - * Retuns a pointer to the current byte, within the current page. + * Returns a pointer to the current byte, within the current page. */ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) { diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c index 1c76ee98efb713..2570fd0beea0d3 100644 --- a/drivers/mtd/nand/raw/pl35x-nand-controller.c +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -1111,7 +1111,7 @@ static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc) static int pl35x_nand_chips_init(struct pl35x_nandc *nfc) { - struct device_node *np = nfc->dev->of_node, *nand_np; + struct device_node *np = nfc->dev->of_node; int nchips = of_get_child_count(np); int ret; @@ -1121,10 +1121,9 @@ static int pl35x_nand_chips_init(struct pl35x_nandc *nfc) return -EINVAL; } - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = pl35x_nand_chip_init(nfc, nand_np); if (ret) { - of_node_put(nand_np); pl35x_nand_chips_cleanup(nfc); break; } diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c index c9a01feff8dfe5..0e92d50c5249b0 100644 --- a/drivers/mtd/nand/raw/renesas-nand-controller.c +++ b/drivers/mtd/nand/raw/renesas-nand-controller.c @@ -1297,23 +1297,17 @@ static void rnandc_chips_cleanup(struct rnandc *rnandc) static int rnandc_chips_init(struct rnandc *rnandc) { - struct device_node *np; int ret; - for_each_child_of_node(rnandc->dev->of_node, np) { + for_each_child_of_node_scoped(rnandc->dev->of_node, np) { ret = rnandc_chip_init(rnandc, np); if (ret) { - of_node_put(np); - goto cleanup_chips; + rnandc_chips_cleanup(rnandc); + return ret; } } return 0; - -cleanup_chips: - rnandc_chips_cleanup(rnandc); - - return ret; } static int rnandc_probe(struct platform_device *pdev) diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 55580447633be0..51c9cf9013dc28 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -1211,7 +1211,7 @@ static void rk_nfc_chips_cleanup(struct rk_nfc *nfc) static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc) { - struct device_node *np = dev->of_node, *nand_np; + struct device_node *np = dev->of_node; int nchips = of_get_child_count(np); int ret; @@ -1221,10 +1221,9 @@ static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc) return -EINVAL; } - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = rk_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); rk_nfc_chips_cleanup(nfc); return ret; } diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 264556939a00f0..0f67e96cc24020 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1851,7 +1851,6 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc) { struct device_node *dn = nfc->dev->of_node; - struct device_node *child; int nchips = of_get_child_count(dn); int ret = 0; @@ -1865,12 +1864,10 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc) return -EINVAL; } - for_each_child_of_node(dn, child) { + for_each_child_of_node_scoped(dn, child) { ret = stm32_fmc2_nfc_parse_child(nfc, child); - if (ret < 0) { - of_node_put(child); + if (ret < 0) return ret; - } } return ret; diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 4ec17c8bce5a1b..c28634e20abf8a 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2025,13 +2025,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int ret; - for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = sunxi_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); sunxi_nand_chips_cleanup(nfc); return ret; } diff --git a/drivers/mtd/nand/raw/technologic-nand-controller.c b/drivers/mtd/nand/raw/technologic-nand-controller.c new file mode 100644 index 00000000000000..0e45a6fd91dd4b --- /dev/null +++ b/drivers/mtd/nand/raw/technologic-nand-controller.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Technologic Systems TS72xx NAND controller driver + * + * Copyright (C) 2023 Nikita Shubin + * + * Derived from: plat_nand.c + * Author: Vitaly Wool + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TS72XX_NAND_CONTROL_ADDR_LINE BIT(22) /* 0xN0400000 */ +#define TS72XX_NAND_BUSY_ADDR_LINE BIT(23) /* 0xN0800000 */ + +#define TS72XX_NAND_ALE BIT(0) +#define TS72XX_NAND_CLE BIT(1) +#define TS72XX_NAND_NCE BIT(2) + +#define TS72XX_NAND_CTRL_CLE (TS72XX_NAND_NCE | TS72XX_NAND_CLE) +#define TS72XX_NAND_CTRL_ALE (TS72XX_NAND_NCE | TS72XX_NAND_ALE) + +struct ts72xx_nand_data { + struct nand_controller controller; + struct nand_chip chip; + void __iomem *base; + void __iomem *ctrl; + void __iomem *busy; +}; + +static inline struct ts72xx_nand_data *chip_to_ts72xx(struct nand_chip *chip) +{ + return container_of(chip, struct ts72xx_nand_data, chip); +} + +static int ts72xx_nand_attach_chip(struct nand_chip *chip) +{ + switch (chip->ecc.engine_type) { + case NAND_ECC_ENGINE_TYPE_ON_HOST: + return -EINVAL; + case NAND_ECC_ENGINE_TYPE_SOFT: + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + fallthrough; + default: + return 0; + } +} + +static void ts72xx_nand_ctrl(struct nand_chip *chip, u8 value) +{ + struct ts72xx_nand_data *data = chip_to_ts72xx(chip); + unsigned char bits = ioread8(data->ctrl) & ~GENMASK(2, 0); + + iowrite8(bits | value, data->ctrl); +} + +static int ts72xx_nand_exec_instr(struct nand_chip *chip, + const struct nand_op_instr *instr) +{ + struct ts72xx_nand_data *data = chip_to_ts72xx(chip); + unsigned int timeout_us; + u32 status; + int ret; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_CLE); + iowrite8(instr->ctx.cmd.opcode, data->base); + ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE); + break; + + case NAND_OP_ADDR_INSTR: + ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_ALE); + iowrite8_rep(data->base, instr->ctx.addr.addrs, instr->ctx.addr.naddrs); + ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE); + break; + + case NAND_OP_DATA_IN_INSTR: + ioread8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + iowrite8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; + ret = readb_poll_timeout(data->busy, status, status & BIT(5), 0, timeout_us); + if (ret) + return ret; + + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return 0; +} + +static int ts72xx_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + unsigned int i; + int ret; + + if (check_only) + return 0; + + for (i = 0; i < op->ninstrs; i++) { + ret = ts72xx_nand_exec_instr(chip, &op->instrs[i]); + if (ret) + return ret; + } + + return 0; +} + +static const struct nand_controller_ops ts72xx_nand_ops = { + .attach_chip = ts72xx_nand_attach_chip, + .exec_op = ts72xx_nand_exec_op, +}; + +static int ts72xx_nand_probe(struct platform_device *pdev) +{ + struct ts72xx_nand_data *data; + struct fwnode_handle *child; + struct mtd_info *mtd; + int err; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + nand_controller_init(&data->controller); + data->controller.ops = &ts72xx_nand_ops; + data->chip.controller = &data->controller; + + data->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + data->ctrl = data->base + TS72XX_NAND_CONTROL_ADDR_LINE; + data->busy = data->base + TS72XX_NAND_BUSY_ADDR_LINE; + + child = fwnode_get_next_child_node(dev_fwnode(&pdev->dev), NULL); + if (!child) + return dev_err_probe(&pdev->dev, -ENXIO, + "ts72xx controller node should have exactly one child\n"); + + nand_set_flash_node(&data->chip, to_of_node(child)); + mtd = nand_to_mtd(&data->chip); + mtd->dev.parent = &pdev->dev; + platform_set_drvdata(pdev, data); + + /* + * This driver assumes that the default ECC engine should be TYPE_SOFT. + * Set ->engine_type before registering the NAND devices in order to + * provide a driver specific default value. + */ + data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + + /* Scan to find existence of the device */ + err = nand_scan(&data->chip, 1); + if (err) + goto err_handle_put; + + err = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); + if (err) + goto err_clean_nand; + + return 0; + +err_clean_nand: + nand_cleanup(&data->chip); +err_handle_put: + fwnode_handle_put(child); + return err; +} + +static void ts72xx_nand_remove(struct platform_device *pdev) +{ + struct ts72xx_nand_data *data = platform_get_drvdata(pdev); + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + struct nand_chip *chip = &data->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + fwnode_handle_put(fwnode); +} + +static const struct of_device_id ts72xx_id_table[] = { + { .compatible = "technologic,ts7200-nand" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ts72xx_id_table); + +static struct platform_driver ts72xx_nand_driver = { + .driver = { + .name = "ts72xx-nand", + .of_match_table = ts72xx_id_table, + }, + .probe = ts72xx_nand_probe, + .remove_new = ts72xx_nand_remove, +}; +module_platform_driver(ts72xx_nand_driver); + +MODULE_AUTHOR("Nikita Shubin "); +MODULE_DESCRIPTION("Technologic Systems TS72xx NAND controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index e0b6715e5dfed6..4d76f9f71a0e9e 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -34,7 +34,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) return 0; } -static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) +int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) { struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, spinand->scratchbuf); @@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand, enable ? CFG_ECC_ENABLE : 0); } +static int spinand_cont_read_enable(struct spinand_device *spinand, + bool enable) +{ + return spinand->set_cont_read(spinand, enable); +} + static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) { struct nand_device *nand = spinand_to_nand(spinand); @@ -311,10 +317,22 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, /* Finish a page read: check the status, report errors/bitflips */ ret = spinand_check_ecc_status(spinand, engine_conf->status); - if (ret == -EBADMSG) + if (ret == -EBADMSG) { mtd->ecc_stats.failed++; - else if (ret > 0) - mtd->ecc_stats.corrected += ret; + } else if (ret > 0) { + unsigned int pages; + + /* + * Continuous reads don't allow us to get the detail, + * so we may exagerate the actual number of corrected bitflips. + */ + if (!req->continuous) + pages = 1; + else + pages = req->datalen / nanddev_page_size(nand); + + mtd->ecc_stats.corrected += ret * pages; + } return ret; } @@ -369,7 +387,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, if (req->datalen) { buf = spinand->databuf; - nbytes = nanddev_page_size(nand); + if (!req->continuous) + nbytes = nanddev_page_size(nand); + else + nbytes = round_up(req->dataoffs + req->datalen, + nanddev_page_size(nand)); column = 0; } @@ -386,6 +408,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, else rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; + if (spinand->flags & SPINAND_HAS_READ_PLANE_SELECT_BIT) + column |= req->pos.plane << fls(nanddev_page_size(nand)); + while (nbytes) { ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); if (ret < 0) @@ -397,6 +422,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, nbytes -= ret; column += ret; buf += ret; + + /* + * Dirmap accesses are allowed to toggle the CS. + * Toggling the CS during a continuous read is forbidden. + */ + if (nbytes && req->continuous) + return -EIO; } if (req->datalen) @@ -460,6 +492,9 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, else wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; + if (spinand->flags & SPINAND_HAS_PROG_PLANE_SELECT_BIT) + column |= req->pos.plane << fls(nanddev_page_size(nand)); + while (nbytes) { ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); if (ret < 0) @@ -630,25 +665,20 @@ static int spinand_write_page(struct spinand_device *spinand, return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } -static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, - struct mtd_oob_ops *ops) +static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops, + unsigned int *max_bitflips) { struct spinand_device *spinand = mtd_to_spinand(mtd); struct nand_device *nand = mtd_to_nanddev(mtd); - struct mtd_ecc_stats old_stats; - unsigned int max_bitflips = 0; struct nand_io_iter iter; bool disable_ecc = false; bool ecc_failed = false; - int ret = 0; + int ret; - if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) + if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) disable_ecc = true; - mutex_lock(&spinand->lock); - - old_stats = mtd->ecc_stats; - nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { if (disable_ecc) iter.req.mode = MTD_OPS_RAW; @@ -664,13 +694,155 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, if (ret == -EBADMSG) ecc_failed = true; else - max_bitflips = max_t(unsigned int, max_bitflips, ret); + *max_bitflips = max_t(unsigned int, *max_bitflips, ret); ret = 0; ops->retlen += iter.req.datalen; ops->oobretlen += iter.req.ooblen; } + if (ecc_failed && !ret) + ret = -EBADMSG; + + return ret; +} + +static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops, + unsigned int *max_bitflips) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + struct nand_device *nand = mtd_to_nanddev(mtd); + struct nand_io_iter iter; + u8 status; + int ret; + + ret = spinand_cont_read_enable(spinand, true); + if (ret) + return ret; + + /* + * The cache is divided into two halves. While one half of the cache has + * the requested data, the other half is loaded with the next chunk of data. + * Therefore, the host can read out the data continuously from page to page. + * Each data read must be a multiple of 4-bytes and full pages should be read; + * otherwise, the data output might get out of sequence from one read command + * to another. + */ + nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) { + ret = spinand_select_target(spinand, iter.req.pos.target); + if (ret) + goto end_cont_read; + + ret = nand_ecc_prepare_io_req(nand, &iter.req); + if (ret) + goto end_cont_read; + + ret = spinand_load_page_op(spinand, &iter.req); + if (ret) + goto end_cont_read; + + ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US, + SPINAND_READ_POLL_DELAY_US, NULL); + if (ret < 0) + goto end_cont_read; + + ret = spinand_read_from_cache_op(spinand, &iter.req); + if (ret) + goto end_cont_read; + + ops->retlen += iter.req.datalen; + + ret = spinand_read_status(spinand, &status); + if (ret) + goto end_cont_read; + + spinand_ondie_ecc_save_status(nand, status); + + ret = nand_ecc_finish_io_req(nand, &iter.req); + if (ret < 0) + goto end_cont_read; + + *max_bitflips = max_t(unsigned int, *max_bitflips, ret); + ret = 0; + } + +end_cont_read: + /* + * Once all the data has been read out, the host can either pull CS# + * high and wait for tRST or manually clear the bit in the configuration + * register to terminate the continuous read operation. We have no + * guarantee the SPI controller drivers will effectively deassert the CS + * when we expect them to, so take the register based approach. + */ + spinand_cont_read_enable(spinand, false); + + return ret; +} + +static void spinand_cont_read_init(struct spinand_device *spinand) +{ + struct nand_device *nand = spinand_to_nand(spinand); + enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type; + + /* OOBs cannot be retrieved so external/on-host ECC engine won't work */ + if (spinand->set_cont_read && + (engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE || + engine_type == NAND_ECC_ENGINE_TYPE_NONE)) { + spinand->cont_read_possible = true; + } +} + +static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct spinand_device *spinand = nand_to_spinand(nand); + struct nand_pos start_pos, end_pos; + + if (!spinand->cont_read_possible) + return false; + + /* OOBs won't be retrieved */ + if (ops->ooblen || ops->oobbuf) + return false; + + nanddev_offs_to_pos(nand, from, &start_pos); + nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos); + + /* + * Continuous reads never cross LUN boundaries. Some devices don't + * support crossing planes boundaries. Some devices don't even support + * crossing blocks boundaries. The common case being to read through UBI, + * we will very rarely read two consequent blocks or more, so it is safer + * and easier (can be improved) to only enable continuous reads when + * reading within the same erase block. + */ + if (start_pos.target != end_pos.target || + start_pos.plane != end_pos.plane || + start_pos.eraseblock != end_pos.eraseblock) + return false; + + return start_pos.page < end_pos.page; +} + +static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + struct mtd_ecc_stats old_stats; + unsigned int max_bitflips = 0; + int ret; + + mutex_lock(&spinand->lock); + + old_stats = mtd->ecc_stats; + + if (spinand_use_cont_read(mtd, from, ops)) + ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips); + else + ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips); + if (ops->stats) { ops->stats->uncorrectable_errors += mtd->ecc_stats.failed - old_stats.failed; @@ -680,9 +852,6 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, mutex_unlock(&spinand->lock); - if (ecc_failed && !ret) - ret = -EBADMSG; - return ret ? ret : max_bitflips; } @@ -862,6 +1031,9 @@ static int spinand_create_dirmap(struct spinand_device *spinand, }; struct spi_mem_dirmap_desc *desc; + if (spinand->cont_read_possible) + info.length = nanddev_eraseblock_size(nand); + /* The plane number is passed in MSB just above the column address */ info.offset = plane << fls(nand->memorg.pagesize); @@ -1095,6 +1267,7 @@ int spinand_match_and_init(struct spinand_device *spinand, spinand->flags = table[i].flags; spinand->id.len = 1 + table[i].devid.len; spinand->select_target = table[i].select_target; + spinand->set_cont_read = table[i].set_cont_read; op = spinand_select_op_variant(spinand, info->op_variants.read_cache); @@ -1236,9 +1409,8 @@ static int spinand_init(struct spinand_device *spinand) * may use this buffer for DMA access. * Memory allocated by devm_ does not guarantee DMA-safe alignment. */ - spinand->databuf = kzalloc(nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand), - GFP_KERNEL); + spinand->databuf = kzalloc(nanddev_eraseblock_size(nand), + GFP_KERNEL); if (!spinand->databuf) { ret = -ENOMEM; goto err_free_bufs; @@ -1267,6 +1439,12 @@ static int spinand_init(struct spinand_device *spinand) if (ret) goto err_cleanup_nanddev; + /* + * Continuous read can only be enabled with an on-die ECC engine, so the + * ECC initialization must have happened previously. + */ + spinand_cont_read_init(spinand); + mtd->_read_oob = spinand_mtd_read; mtd->_write_oob = spinand_mtd_write; mtd->_block_isbad = spinand_mtd_block_isbad; @@ -1287,6 +1465,7 @@ static int spinand_init(struct spinand_device *spinand) /* Propagate ECC information to mtd_info */ mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength; mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size; + mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); ret = spinand_create_dirmaps(spinand); if (ret) { diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 3f9e9c57285426..d277c3220fdcbc 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -5,12 +5,25 @@ * Author: Boris Brezillon */ +#include #include #include #include #define SPINAND_MFR_MACRONIX 0xC2 -#define MACRONIX_ECCSR_MASK 0x0F +#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr) +#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr) +#define MACRONIX_CFG_CONT_READ BIT(2) + +#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4) + +/* Bitflip theshold configuration register */ +#define REG_CFG_BFT 0x10 +#define CFG_BFT(x) FIELD_PREP(GENMASK(7, 4), (x)) + +struct macronix_priv { + bool cont_read; +}; static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -49,8 +62,9 @@ static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = { .free = mx35lfxge4ab_ooblayout_free, }; -static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) +static int macronix_get_eccsr(struct spinand_device *spinand, u8 *eccsr) { + struct macronix_priv *priv = spinand->priv; struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_DUMMY(1, 1), @@ -60,12 +74,21 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) if (ret) return ret; - *eccsr &= MACRONIX_ECCSR_MASK; + /* + * ECCSR exposes the number of bitflips for the last read page in bits [3:0]. + * Continuous read compatible chips also expose the maximum number of + * bitflips for the whole (continuous) read operation in bits [7:4]. + */ + if (!priv->cont_read) + *eccsr = MACRONIX_ECCSR_BF_LAST_PAGE(*eccsr); + else + *eccsr = MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(*eccsr); + return 0; } -static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, - u8 status) +static int macronix_ecc_get_status(struct spinand_device *spinand, + u8 status) { struct nand_device *nand = spinand_to_nand(spinand); u8 eccsr; @@ -83,16 +106,14 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, * in order to avoid forcing the wear-leveling layer to move * data around if it's not necessary. */ - if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf)) + if (macronix_get_eccsr(spinand, spinand->scratchbuf)) return nanddev_get_ecc_conf(nand)->strength; eccsr = *spinand->scratchbuf; - if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || - !eccsr)) + if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr)) return nanddev_get_ecc_conf(nand)->strength; return eccsr; - default: break; } @@ -100,6 +121,21 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } +static int macronix_set_cont_read(struct spinand_device *spinand, bool enable) +{ + struct macronix_priv *priv = spinand->priv; + int ret; + + ret = spinand_upd_cfg(spinand, MACRONIX_CFG_CONT_READ, + enable ? MACRONIX_CFG_CONT_READ : 0); + if (ret) + return ret; + + priv->cont_read = enable; + + return 0; +} + static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO("MX35LF1GE4AB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), @@ -110,7 +146,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35LF2GE4AB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), @@ -118,7 +154,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT | + SPINAND_HAS_READ_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), SPINAND_INFO("MX35LF2GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03), @@ -129,7 +167,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35LF4GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03), NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), @@ -139,7 +178,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35LF1G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -156,7 +196,8 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), SPINAND_INFO("MX35LF2G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03), @@ -174,7 +215,8 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), SPINAND_INFO("MX35LF4G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03), @@ -194,7 +236,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX31UF1GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), @@ -204,7 +246,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35LF2G14AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20), @@ -213,9 +255,11 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT | + SPINAND_HAS_READ_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF4G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), @@ -223,9 +267,10 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF4G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), @@ -235,7 +280,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF4GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), @@ -245,7 +290,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35UF2G14AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), @@ -253,9 +299,11 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT | + SPINAND_HAS_READ_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF2G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), @@ -263,9 +311,10 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, + SPINAND_HAS_QE_BIT | + SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF2G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), @@ -275,7 +324,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF2GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), @@ -285,7 +334,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35UF2GE4AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), @@ -295,7 +345,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35UF1G14AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), @@ -305,7 +356,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF1G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -315,7 +366,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX35UF1GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -325,7 +376,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX35UF1GE4AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), @@ -335,8 +387,8 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), - + macronix_ecc_get_status), + SPINAND_CONT_READ(macronix_set_cont_read)), SPINAND_INFO("MX31LF2GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), @@ -346,7 +398,7 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), SPINAND_INFO("MX3UF2GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), @@ -356,10 +408,30 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - mx35lf1ge4ab_ecc_get_status)), + macronix_ecc_get_status)), }; +static int macronix_spinand_init(struct spinand_device *spinand) +{ + struct macronix_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spinand->priv = priv; + + return 0; +} + +static void macronix_spinand_cleanup(struct spinand_device *spinand) +{ + kfree(spinand->priv); +} + static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { + .init = macronix_spinand_init, + .cleanup = macronix_spinand_cleanup, }; const struct spinand_manufacturer macronix_spinand_manufacturer = { diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index ba7c813b9542bc..f3bb81d7e46045 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -76,6 +76,18 @@ static int w25m02gv_select_target(struct spinand_device *spinand, return spi_mem_exec_op(spinand->spimem, &op); } +static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 3) + return -ERANGE; + + region->offset = 64 + (8 * section); + region->length = 7; + + return 0; +} + static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { @@ -100,6 +112,11 @@ static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section, return 0; } +static const struct mtd_ooblayout_ops w25n01kv_ooblayout = { + .ecc = w25n01kv_ooblayout_ecc, + .free = w25n02kv_ooblayout_free, +}; + static const struct mtd_ooblayout_ops w25n02kv_ooblayout = { .ecc = w25n02kv_ooblayout_ecc, .free = w25n02kv_ooblayout_free, @@ -163,6 +180,15 @@ static const struct spinand_info winbond_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_INFO("W25N01KV", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21), + NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)), SPINAND_INFO("W25N02KV", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c index 13daf9bffd0819..49c8e7f27f218c 100644 --- a/drivers/mtd/parsers/bcm47xxpart.c +++ b/drivers/mtd/parsers/bcm47xxpart.c @@ -95,7 +95,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, uint32_t blocksize = master->erasesize; int trx_parts[2]; /* Array with indexes of TRX partitions */ int trx_num = 0; /* Number of found TRX partitions */ - int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; + static const int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; int err; /* diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c index e7b8e9d0a91033..abfa687989182f 100644 --- a/drivers/mtd/parsers/ofpart_core.c +++ b/drivers/mtd/parsers/ofpart_core.c @@ -157,10 +157,10 @@ static int parse_fixed_partitions(struct mtd_info *master, partname = of_get_property(pp, "name", &len); parts[i].name = partname; - if (of_get_property(pp, "read-only", &len)) + if (of_property_read_bool(pp, "read-only")) parts[i].mask_flags |= MTD_WRITEABLE; - if (of_get_property(pp, "lock", &len)) + if (of_property_read_bool(pp, "lock")) parts[i].mask_flags |= MTD_POWERUP_LOCK; if (of_property_read_bool(pp, "slc-mode")) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index e0c4efc424f498..9d6e85bf227b92 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -3281,7 +3281,8 @@ static const struct flash_info *spi_nor_match_name(struct spi_nor *nor, for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { for (j = 0; j < manufacturers[i]->nparts; j++) { - if (!strcmp(name, manufacturers[i]->parts[j].name)) { + if (manufacturers[i]->parts[j].name && + !strcmp(name, manufacturers[i]->parts[j].name)) { nor->manufacturer = manufacturers[i]; return &manufacturers[i]->parts[j]; } diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 3c6499fdb712e4..e6bab2d00c92e8 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -436,6 +436,8 @@ static const struct flash_info st_nor_parts[] = { .id = SNOR_ID(0x20, 0xbb, 0x17), .name = "n25q064a", .size = SZ_8M, + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | + SPI_NOR_BP3_SR_BIT6, .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ, }, { .id = SNOR_ID(0x20, 0xbb, 0x18), diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 6cc237c24e0728..d6c92595f6bc9b 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -966,6 +966,10 @@ static const struct flash_info spansion_nor_parts[] = { .name = "s28hl01gt", .mfr_flags = USE_CLPEF, .fixups = &s28hx_t_fixups, + }, { + .id = SNOR_ID(0x34, 0x5b, 0x19), + .mfr_flags = USE_CLPEF, + .fixups = &s28hx_t_fixups, }, { .id = SNOR_ID(0x34, 0x5b, 0x1a), .name = "s28hs512t", diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c index 180b7390690cb3..b5ad7118c49a2b 100644 --- a/drivers/mtd/spi-nor/sst.c +++ b/drivers/mtd/spi-nor/sst.c @@ -167,6 +167,21 @@ static const struct flash_info sst_nor_parts[] = { } }; +static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + u8 op = (len == 1) ? SPINOR_OP_BP : SPINOR_OP_AAI_WP; + int ret; + + nor->program_opcode = op; + ret = spi_nor_write_data(nor, to, 1, buf); + if (ret < 0) + return ret; + WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret); + + return spi_nor_wait_till_ready(nor); +} + static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { @@ -188,16 +203,10 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len, /* Start write from odd address. */ if (to % 2) { - nor->program_opcode = SPINOR_OP_BP; - /* write one byte. */ - ret = spi_nor_write_data(nor, to, 1, buf); + ret = sst_nor_write_data(nor, to, 1, buf); if (ret < 0) goto out; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) - goto out; to++; actual++; @@ -205,16 +214,11 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len, /* Write out most of the data here. */ for (; actual < len - 1; actual += 2) { - nor->program_opcode = SPINOR_OP_AAI_WP; - /* write two bytes. */ - ret = spi_nor_write_data(nor, to, 2, buf + actual); + ret = sst_nor_write_data(nor, to, 2, buf + actual); if (ret < 0) goto out; - WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) - goto out; + to += 2; nor->sst_write_second = true; } @@ -234,14 +238,9 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len, if (ret) goto out; - nor->program_opcode = SPINOR_OP_BP; - ret = spi_nor_write_data(nor, to, 1, buf + actual); + ret = sst_nor_write_data(nor, to, 1, buf + actual); if (ret < 0) goto out; - WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret); - ret = spi_nor_wait_till_ready(nor); - if (ret) - goto out; actual += 1; diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index e065e4fd42a33d..9f7ce5763e7104 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -17,6 +17,31 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_OUT(1, buf, 0)) +static int +w25q128_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt) +{ + /* + * Zetta ZD25Q128C is a clone of the Winbond device. But the encoded + * size is really wrong. It seems that they confused Mbit with MiB. + * Thus the flash is discovered as a 2MiB device. + */ + if (bfpt_header->major == SFDP_JESD216_MAJOR && + bfpt_header->minor == SFDP_JESD216_MINOR && + nor->params->size == SZ_2M && + nor->params->erase_map.regions[0].size == SZ_2M) { + nor->params->size = SZ_16M; + nor->params->erase_map.regions[0].size = SZ_16M; + } + + return 0; +} + +static const struct spi_nor_fixups w25q128_fixups = { + .post_bfpt = w25q128_post_bfpt_fixups, +}; + static int w25q256_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, @@ -108,6 +133,7 @@ static const struct flash_info winbond_nor_parts[] = { .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, + .fixups = &w25q128_fixups, }, { .id = SNOR_ID(0xef, 0x40, 0x19), .name = "w25q256", diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 0d8f04cf03c5bb..6bb80d7714bc8b 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -1095,7 +1095,6 @@ const struct file_operations ubi_vol_cdev_operations = { /* UBI character device operations */ const struct file_operations ubi_cdev_operations = { .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = ubi_cdev_ioctl, .compat_ioctl = compat_ptr_ioctl, }; @@ -1105,5 +1104,4 @@ const struct file_operations ubi_ctrl_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ctrl_cdev_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 9ec3b8b6a0aa8f..d2a53961d8e207 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -470,7 +470,6 @@ static const struct file_operations dfs_fops = { .read = dfs_file_read, .write = dfs_file_write, .open = simple_open, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 6d15ab3bfbbcac..0433a0f36d1b4e 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3098,9 +3098,9 @@ static void amt_link_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->priv_flags |= IFF_NO_QUEUE; - dev->features |= NETIF_F_LLTX; + dev->lltx = true; + dev->netns_local = true; dev->features |= NETIF_F_GSO_SOFTWARE; - dev->features |= NETIF_F_NETNS_LOCAL; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 7aca0544fb29c9..e057526448d780 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -68,6 +68,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) __be16 proto; void *oiph; int err; + int nh; bareudp = rcu_dereference_sk_user_data(sk); if (!bareudp) @@ -148,10 +149,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; - oiph = skb_network_header(skb); - skb_reset_network_header(skb); skb_reset_mac_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + + skb_reset_network_header(skb); + + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(bareudp->dev, rx_length_errors); + DEV_STATS_INC(bareudp->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else @@ -301,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be32 saddr; int err; + if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN; @@ -368,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; + if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN; @@ -553,7 +575,6 @@ static void bareudp_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &bareudp_type); dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; - dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_RXCSUM; @@ -566,6 +587,7 @@ static void bareudp_setup(struct net_device *dev) dev->type = ARPHRD_NONE; netif_keep_dst(dev); dev->priv_flags |= IFF_NO_QUEUE; + dev->lltx = true; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bb9c3d6ef43592..b1bffd8e9a958f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -418,6 +418,41 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, /*---------------------------------- XFRM -----------------------------------*/ #ifdef CONFIG_XFRM_OFFLOAD +/** + * bond_ipsec_dev - Get active device for IPsec offload + * @xs: pointer to transformer state struct + * + * Context: caller must hold rcu_read_lock. + * + * Return: the device for ipsec offload, or NULL if not exist. + **/ +static struct net_device *bond_ipsec_dev(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond; + struct slave *slave; + + if (!bond_dev) + return NULL; + + bond = netdev_priv(bond_dev); + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + return NULL; + + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) + return NULL; + + if (!xs->xso.real_dev) + return NULL; + + if (xs->xso.real_dev != slave->dev) + pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n", + bond_dev->name, slave->dev->name, xs->xso.real_dev->name); + + return slave->dev; +} + /** * bond_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct @@ -640,23 +675,12 @@ static void bond_ipsec_free_sa(struct xfrm_state *xs) **/ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { - struct net_device *bond_dev = xs->xso.dev; struct net_device *real_dev; - struct slave *curr_active; - struct bonding *bond; bool ok = false; - bond = netdev_priv(bond_dev); rcu_read_lock(); - curr_active = rcu_dereference(bond->curr_active_slave); - if (!curr_active) - goto out; - real_dev = curr_active->dev; - - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - goto out; - - if (!xs->xso.real_dev) + real_dev = bond_ipsec_dev(xs); + if (!real_dev) goto out; if (!real_dev->xfrmdev_ops || @@ -670,11 +694,61 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) return ok; } +/** + * bond_advance_esn_state - ESN support for IPSec HW offload + * @xs: pointer to transformer state struct + **/ +static void bond_advance_esn_state(struct xfrm_state *xs) +{ + struct net_device *real_dev; + + rcu_read_lock(); + real_dev = bond_ipsec_dev(xs); + if (!real_dev) + goto out; + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) { + pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name); + goto out; + } + + real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs); +out: + rcu_read_unlock(); +} + +/** + * bond_xfrm_update_stats - Update xfrm state + * @xs: pointer to transformer state struct + **/ +static void bond_xfrm_update_stats(struct xfrm_state *xs) +{ + struct net_device *real_dev; + + rcu_read_lock(); + real_dev = bond_ipsec_dev(xs); + if (!real_dev) + goto out; + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) { + pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name); + goto out; + } + + real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs); +out: + rcu_read_unlock(); +} + static const struct xfrmdev_ops bond_xfrmdev_ops = { .xdo_dev_state_add = bond_ipsec_add_sa, .xdo_dev_state_delete = bond_ipsec_del_sa, .xdo_dev_state_free = bond_ipsec_free_sa, .xdo_dev_offload_ok = bond_ipsec_offload_ok, + .xdo_dev_state_advance_esn = bond_advance_esn_state, + .xdo_dev_state_update_stats = bond_xfrm_update_stats, }; #endif /* CONFIG_XFRM_OFFLOAD */ @@ -2300,7 +2374,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_sysfs_del; } - res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + res = dev_xdp_propagate(slave_dev, &xdp); if (res < 0) { /* ndo_bpf() sets extack error message */ slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res); @@ -2436,7 +2510,7 @@ static int __bond_release_one(struct net_device *bond_dev, .prog = NULL, .extack = NULL, }; - if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) + if (dev_xdp_propagate(slave_dev, &xdp)) slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n"); } @@ -5536,9 +5610,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) break; default: - /* Should never happen. Mode guarded by bond_xdp_check() */ - netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); - WARN_ON_ONCE(1); + if (net_ratelimit()) + netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", + BOND_MODE(bond)); return NULL; } @@ -5626,7 +5700,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } - err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + err = dev_xdp_propagate(slave_dev, &xdp); if (err < 0) { /* ndo_bpf() sets extack error message */ slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err); @@ -5658,7 +5732,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (slave == rollback_slave) break; - err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + err_unwind = dev_xdp_propagate(slave_dev, &xdp); if (err_unwind < 0) slave_err(dev, slave_dev, "Error %d when unwinding XDP program change\n", err_unwind); @@ -5812,9 +5886,6 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev, if (real_dev) { ret = ethtool_get_ts_info_by_layer(real_dev, info); } else { - info->phc_index = -1; - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; /* Check if all slaves support software tx timestamping */ rcu_read_lock(); bond_for_each_slave_rcu(bond, slave, iter) { @@ -5928,7 +5999,10 @@ void bond_setup(struct net_device *bond_dev) #endif /* CONFIG_XFRM_OFFLOAD */ /* don't acquire bond device's netif_tx_lock when transmitting */ - bond_dev->features |= NETIF_F_LLTX; + bond_dev->lltx = true; + + /* Don't allow bond devices to change network namespaces. */ + bond_dev->netns_local = true; /* By default, we declare the bond to be fully * VLAN hardware accelerated capable. Special @@ -5937,9 +6011,6 @@ void bond_setup(struct net_device *bond_dev) * capable */ - /* Don't allow bond devices to change network namespaces. */ - bond_dev->features |= NETIF_F_NETNS_LOCAL; - bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -6384,7 +6455,8 @@ static int bond_init(struct net_device *bond_dev) netdev_dbg(bond_dev, "Begin bond_init\n"); - bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); + bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + bond_dev->name); if (!bond->wq) return -ENOMEM; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7f9b60a42d29ce..cf989bea9aa333 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -225,6 +225,7 @@ source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/peak_canfd/Kconfig" source "drivers/net/can/rcar/Kconfig" +source "drivers/net/can/rockchip/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" source "drivers/net/can/spi/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 4669cd51e7bf5f..a71db2cfe990d5 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CAN_SLCAN) += slcan/ obj-y += dev/ obj-y += esd/ obj-y += rcar/ +obj-y += rockchip/ obj-y += spi/ obj-y += usb/ obj-y += softing/ diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 11f434d708b35a..191707d7e3dac2 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1191,7 +1191,7 @@ MODULE_DEVICE_TABLE(platform, at91_can_id_table); static struct platform_driver at91_can_driver = { .probe = at91_can_probe, - .remove_new = at91_can_remove, + .remove = at91_can_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(at91_can_dt_ids), diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index 49cf9682b9254c..bfc60eb33dc375 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -1092,7 +1092,7 @@ static struct platform_driver bxcan_driver = { .of_match_table = bxcan_of_match, }, .probe = bxcan_probe, - .remove_new = bxcan_remove, + .remove = bxcan_remove, }; module_platform_driver(bxcan_driver); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e2ec69aa46e531..6cba9717a6d87d 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -476,7 +476,7 @@ static struct platform_driver c_can_plat_driver = { .of_match_table = c_can_of_table, }, .probe = c_can_plat_probe, - .remove_new = c_can_plat_remove, + .remove = c_can_plat_remove, .suspend = c_can_suspend, .resume = c_can_resume, .id_table = c_can_id_table, diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index 22009440a983ee..d0676281715395 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -307,7 +307,7 @@ static void cc770_isa_remove(struct platform_device *pdev) static struct platform_driver cc770_isa_driver = { .probe = cc770_isa_probe, - .remove_new = cc770_isa_remove, + .remove = cc770_isa_remove, .driver = { .name = KBUILD_MODNAME, }, diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 13bcfba05f18ff..b6c4f02ffb97aa 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg, static int cc770_get_of_node_data(struct platform_device *pdev, struct cc770_priv *priv) { + u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0; struct device_node *np = pdev->dev.of_node; - const u32 *prop; - int prop_size; - u32 clkext; - - prop = of_get_property(np, "bosch,external-clock-frequency", - &prop_size); - if (prop && (prop_size == sizeof(u32))) - clkext = *prop; - else - clkext = CC770_PLATFORM_CAN_CLOCK; /* default */ + + of_property_read_u32(np, "bosch,external-clock-frequency", &clkext); priv->can.clock.freq = clkext; /* The system clock may not exceed 10 MHz */ @@ -98,7 +91,7 @@ static int cc770_get_of_node_data(struct platform_device *pdev, if (of_property_read_bool(np, "bosch,iso-low-speed-mux")) priv->cpu_interface |= CPUIF_MUX; - if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) + if (!of_property_read_bool(np, "bosch,no-comperator-bypass")) priv->bus_config |= BUSCFG_CBY; if (of_property_read_bool(np, "bosch,disconnect-rx0-input")) priv->bus_config |= BUSCFG_DR0; @@ -109,25 +102,22 @@ static int cc770_get_of_node_data(struct platform_device *pdev, if (of_property_read_bool(np, "bosch,polarity-dominant")) priv->bus_config |= BUSCFG_POL; - prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop > 0) { - u32 cdv = clkext / *prop; - int slew; + of_property_read_u32(np, "bosch,clock-out-frequency", &clkout); + if (clkout > 0) { + u32 cdv = clkext / clkout; if (cdv > 0 && cdv < 16) { + u32 slew; + priv->cpu_interface |= CPUIF_CEN; priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK; - prop = of_get_property(np, "bosch,slew-rate", - &prop_size); - if (prop && (prop_size == sizeof(u32))) { - slew = *prop; - } else { + if (of_property_read_u32(np, "bosch,slew-rate", &slew)) { /* Determine default slew rate */ slew = (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT) - ((cdv * clkext - 1) / 8000000); - if (slew < 0) + if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT)) slew = 0; } priv->clkout |= (slew << CLKOUT_SL_SHIFT) & @@ -257,7 +247,7 @@ static struct platform_driver cc770_platform_driver = { .of_match_table = cc770_platform_table, }, .probe = cc770_platform_probe, - .remove_new = cc770_platform_remove, + .remove = cc770_platform_remove, }; module_platform_driver(cc770_platform_driver); diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c index 55bb10b157b498..70e2577c85416c 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_platform.c +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, ctucan_of_match); static struct platform_driver ctucanfd_driver = { .probe = ctucan_platform_probe, - .remove_new = ctucan_platform_remove, + .remove = ctucan_platform_remove, .driver = { .name = DRV_NAME, .pm = &ctucan_platform_pm_ops, diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 87828f9530739c..6792c14fd7eb00 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -380,12 +380,9 @@ int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_ON); info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index dfdc039d92a6c1..01aacdcda26066 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -65,15 +65,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], if (!data) return 0; - if (data[IFLA_CAN_BITTIMING]) { - struct can_bittiming bt; - - memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_validate_bittiming(&bt, extack); - if (err) - return err; - } - if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK; @@ -114,6 +105,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_CAN_BITTIMING]) { + struct can_bittiming bt; + + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); + err = can_validate_bittiming(&bt, extack); + if (err) + return err; + } + if (is_can_fd) { if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) return -EOPNOTSUPP; @@ -195,48 +195,6 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* We need synchronization with dev->stop() */ ASSERT_RTNL(); - if (data[IFLA_CAN_BITTIMING]) { - struct can_bittiming bt; - - /* Do not allow changing bittiming while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Calculate bittiming parameters based on - * bittiming_const if set, otherwise pass bitrate - * directly via do_set_bitrate(). Bail out if neither - * is given. - */ - if (!priv->bittiming_const && !priv->do_set_bittiming && - !priv->bitrate_const) - return -EOPNOTSUPP; - - memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); - err = can_get_bittiming(dev, &bt, - priv->bittiming_const, - priv->bitrate_const, - priv->bitrate_const_cnt, - extack); - if (err) - return err; - - if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { - NL_SET_ERR_MSG_FMT(extack, - "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps", - bt.bitrate, priv->bitrate_max); - return -EINVAL; - } - - memcpy(&priv->bittiming, &bt, sizeof(bt)); - - if (priv->do_set_bittiming) { - /* Finally, set the bit-timing registers */ - err = priv->do_set_bittiming(dev); - if (err) - return err; - } - } - if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm; u32 ctrlstatic; @@ -284,6 +242,48 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK; } + if (data[IFLA_CAN_BITTIMING]) { + struct can_bittiming bt; + + /* Do not allow changing bittiming while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Calculate bittiming parameters based on + * bittiming_const if set, otherwise pass bitrate + * directly via do_set_bitrate(). Bail out if neither + * is given. + */ + if (!priv->bittiming_const && !priv->do_set_bittiming && + !priv->bitrate_const) + return -EOPNOTSUPP; + + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); + err = can_get_bittiming(dev, &bt, + priv->bittiming_const, + priv->bitrate_const, + priv->bitrate_const_cnt, + extack); + if (err) + return err; + + if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { + NL_SET_ERR_MSG_FMT(extack, + "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps", + bt.bitrate, priv->bitrate_max); + return -EINVAL; + } + + memcpy(&priv->bittiming, &bt, sizeof(bt)); + + if (priv->do_set_bittiming) { + /* Finally, set the bit-timing registers */ + err = priv->do_set_bittiming(dev); + if (err) + return err; + } + } + if (data[IFLA_CAN_RESTART_MS]) { /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c index b7cdcffd0e450a..5d6d2828cd0458 100644 --- a/drivers/net/can/esd/esd_402_pci-core.c +++ b/drivers/net/can/esd/esd_402_pci-core.c @@ -369,12 +369,13 @@ static int pci402_init_cores(struct pci_dev *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); priv = netdev_priv(netdev); + priv->can.clock.freq = card->ov.core_frequency; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_CC_LEN8_DLC; - - priv->can.clock.freq = card->ov.core_frequency; + if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) priv->can.bittiming_const = &pci402_bittiming_const_canfd; else diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c index 121cbbf814581d..c80032bc1a5218 100644 --- a/drivers/net/can/esd/esdacc.c +++ b/drivers/net/can/esd/esdacc.c @@ -17,6 +17,9 @@ /* esdACC DLC register layout */ #define ACC_DLC_DLC_MASK GENMASK(3, 0) #define ACC_DLC_RTR_FLAG BIT(4) +#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */ + +/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */ #define ACC_DLC_TXD_FLAG BIT(5) /* ecc value of esdACC equals SJA1000's ECC register */ @@ -43,8 +46,8 @@ static void acc_resetmode_enter(struct acc_core *core) { - acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, - ACC_REG_CONTROL_MASK_MODE_RESETMODE); + acc_set_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_RESETMODE); /* Read back reset mode bit to flush PCI write posting */ acc_resetmode_entered(core); @@ -52,14 +55,14 @@ static void acc_resetmode_enter(struct acc_core *core) static void acc_resetmode_leave(struct acc_core *core) { - acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE, - ACC_REG_CONTROL_MASK_MODE_RESETMODE); + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_RESETMODE); /* Read back reset mode bit to flush PCI write posting */ acc_resetmode_entered(core); } -static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc, +static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc, const void *data) { acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1, @@ -172,7 +175,7 @@ int acc_open(struct net_device *netdev) struct acc_net_priv *priv = netdev_priv(netdev); struct acc_core *core = priv->core; u32 tx_fifo_status; - u32 ctrl_mode; + u32 ctrl; int err; /* Retry to enter RESET mode if out of sync. */ @@ -187,19 +190,19 @@ int acc_open(struct net_device *netdev) if (err) return err; - ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX | - ACC_REG_CONTROL_MASK_IE_TXERROR | - ACC_REG_CONTROL_MASK_IE_ERRWARN | - ACC_REG_CONTROL_MASK_IE_OVERRUN | - ACC_REG_CONTROL_MASK_IE_ERRPASS; + ctrl = ACC_REG_CTRL_MASK_IE_RXTX | + ACC_REG_CTRL_MASK_IE_TXERROR | + ACC_REG_CTRL_MASK_IE_ERRWARN | + ACC_REG_CTRL_MASK_IE_OVERRUN | + ACC_REG_CTRL_MASK_IE_ERRPASS; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) - ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR; + ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM; + ctrl |= ACC_REG_CTRL_MASK_LOM; - acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode); + acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl); acc_resetmode_leave(core); priv->can.state = CAN_STATE_ERROR_ACTIVE; @@ -218,13 +221,13 @@ int acc_close(struct net_device *netdev) struct acc_net_priv *priv = netdev_priv(netdev); struct acc_core *core = priv->core; - acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE, - ACC_REG_CONTROL_MASK_IE_RXTX | - ACC_REG_CONTROL_MASK_IE_TXERROR | - ACC_REG_CONTROL_MASK_IE_ERRWARN | - ACC_REG_CONTROL_MASK_IE_OVERRUN | - ACC_REG_CONTROL_MASK_IE_ERRPASS | - ACC_REG_CONTROL_MASK_IE_BUSERR); + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_IE_RXTX | + ACC_REG_CTRL_MASK_IE_TXERROR | + ACC_REG_CTRL_MASK_IE_ERRWARN | + ACC_REG_CTRL_MASK_IE_OVERRUN | + ACC_REG_CTRL_MASK_IE_ERRPASS | + ACC_REG_CTRL_MASK_IE_BUSERR); netif_stop_queue(netdev); acc_resetmode_enter(core); @@ -233,9 +236,9 @@ int acc_close(struct net_device *netdev) /* Mark pending TX requests to be aborted after controller restart. */ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff); - /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */ - acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE, - ACC_REG_CONTROL_MASK_MODE_LOM); + /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */ + acc_clear_bits(core, ACC_CORE_OF_CTRL, + ACC_REG_CTRL_MASK_LOM); close_candev(netdev); return 0; @@ -249,7 +252,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev) u8 tx_fifo_head = core->tx_fifo_head; int fifo_usage; u32 acc_id; - u8 acc_dlc; + u32 acc_dlc; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; @@ -274,6 +277,8 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev) acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); if (cf->can_id & CAN_RTR_FLAG) acc_dlc |= ACC_DLC_RTR_FLAG; + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + acc_dlc |= ACC_DLC_SSTX_FLAG; if (cf->can_id & CAN_EFF_FLAG) { acc_id = cf->can_id & CAN_EFF_MASK; diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h index a70488b25d3905..6b7ebd8c91b2d6 100644 --- a/drivers/net/can/esd/esdacc.h +++ b/drivers/net/can/esd/esdacc.h @@ -35,6 +35,7 @@ */ #define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16) #define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16) +#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16) #define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0) #define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1) @@ -50,7 +51,7 @@ #define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31) /* esdACC CAN Core Module */ -#define ACC_CORE_OF_CTRL_MODE 0x0000 +#define ACC_CORE_OF_CTRL 0x0000 #define ACC_CORE_OF_STATUS_IRQ 0x0008 #define ACC_CORE_OF_BRP 0x000c #define ACC_CORE_OF_BTR 0x0010 @@ -66,21 +67,22 @@ #define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8 #define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc -#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0) -#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1) -#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2) -#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5) -#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6) -#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7) - -#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8) -#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9) -#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10) -#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11) -#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12) -#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13) -#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14) -#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15) +/* CTRL register layout */ +#define ACC_REG_CTRL_MASK_RESETMODE BIT(0) +#define ACC_REG_CTRL_MASK_LOM BIT(1) +#define ACC_REG_CTRL_MASK_STM BIT(2) +#define ACC_REG_CTRL_MASK_TRANSEN BIT(5) +#define ACC_REG_CTRL_MASK_TS BIT(6) +#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7) + +#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8) +#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9) +#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10) +#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11) +#define ACC_REG_CTRL_MASK_IE_TSI BIT(12) +#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13) +#define ACC_REG_CTRL_MASK_IE_ALI BIT(14) +#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15) /* BRP and BTR register layout for CAN-Classic version */ #define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0) @@ -300,9 +302,9 @@ static inline void acc_clear_bits(struct acc_core *core, static inline int acc_resetmode_entered(struct acc_core *core) { - u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE); + u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL); - return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0; + return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0; } static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs) diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 8ea7f2795551bb..ac1a860986df69 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -354,6 +354,14 @@ static struct flexcan_devtype_data fsl_imx93_devtype_data = { FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; +static const struct flexcan_devtype_data fsl_imx95_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, +}; + static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | @@ -544,6 +552,13 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); + } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) { + /* For the SCMI mode, driver do nothing, ATF will send request to + * SM(system manager, M33 core) through SCMI protocol after linux + * suspend. Once SM get this request, it will send IPG_STOP signal + * to Flex_CAN, let CAN in STOP mode. + */ + return 0; } return flexcan_low_power_enter_ack(priv); @@ -555,7 +570,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) u32 reg_mcr; int ret; - /* remove stop request */ + /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, + * do nothing here, because ATF already send request to SM before + * linux resume. Once SM get this request, it will deassert the + * IPG_STOP signal to Flex_CAN. + */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { ret = flexcan_stop_mode_enable_scfw(priv, false); if (ret < 0) @@ -1983,6 +2002,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) ret = flexcan_setup_stop_mode_scfw(pdev); else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) ret = flexcan_setup_stop_mode_gpr(pdev); + else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) + /* ATF will handle all STOP_IPG related work */ + ret = 0; else /* return 0 directly if doesn't support stop mode feature */ return 0; @@ -2009,6 +2031,7 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, }, { .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, }, { .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, }, + { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, }, { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, }, @@ -2309,9 +2332,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device) if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, true); - err = pm_runtime_force_suspend(device); - if (err) - return err; + /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send + * to SM through SCMI protocol, SM will assert the IPG_STOP + * signal. But all this works need the CAN clocks keep on. + * After the CAN module get the IPG_STOP mode, and switch to + * STOP mode, whether still keep the CAN clocks on or gate them + * off depend on the Hardware design. + */ + if (!(device_may_wakeup(device) && + priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) { + err = pm_runtime_force_suspend(device); + if (err) + return err; + } } return 0; @@ -2325,9 +2358,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) if (netif_running(dev)) { int err; - err = pm_runtime_force_resume(device); - if (err) - return err; + if (!(device_may_wakeup(device) && + priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) { + err = pm_runtime_force_resume(device); + if (err) + return err; + } if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); @@ -2349,7 +2385,7 @@ static struct platform_driver flexcan_driver = { .of_match_table = flexcan_of_match, }, .probe = flexcan_probe, - .remove_new = flexcan_remove, + .remove = flexcan_remove, .id_table = flexcan_id_table, }; diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h index 025c3417031f42..4933d8c7439e62 100644 --- a/drivers/net/can/flexcan/flexcan.h +++ b/drivers/net/can/flexcan/flexcan.h @@ -68,6 +68,8 @@ #define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15) /* Device supports RX via FIFO */ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) +/* Setup stop mode with ATF SCMI protocol to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17) struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 6d3ba71a6a7336..cdf0ec9fa7f3c2 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1725,7 +1725,7 @@ static struct platform_driver grcan_driver = { .of_match_table = grcan_match, }, .probe = grcan_probe, - .remove_new = grcan_remove, + .remove = grcan_remove, }; module_platform_driver(grcan_driver); diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 72307297d75e4d..d32b10900d2f62 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -1033,7 +1033,7 @@ static struct platform_driver ifi_canfd_plat_driver = { .of_match_table = ifi_canfd_of_table, }, .probe = ifi_canfd_plat_probe, - .remove_new = ifi_canfd_plat_remove, + .remove = ifi_canfd_plat_remove, }; module_platform_driver(ifi_canfd_plat_driver); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index d048ea565b8925..60c7b83b4539e1 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -2049,7 +2049,7 @@ static struct platform_driver ican3_driver = { .name = DRV_NAME, }, .probe = ican3_probe, - .remove_new = ican3_remove, + .remove = ican3_remove, }; module_platform_driver(ican3_driver); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 9ffc3ffb4e8f8f..fee012b57f3371 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1053,13 +1053,13 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, void __iomem *serdes_base; u32 word1, word2; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; - word2 = addr >> 32; -#else - word1 = addr; - word2 = 0; -#endif + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) { + word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT; + word2 = upper_32_bits(addr); + } else { + word1 = addr; + word2 = 0; + } serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; iowrite32(word1, serdes_base); iowrite32(word2, serdes_base + 0x4); @@ -1072,9 +1072,9 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; u32 msb = 0x0; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - msb = addr >> 32; -#endif + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; iowrite32(lsb, serdes_base); iowrite32(msb, serdes_base + 0x4); @@ -1087,9 +1087,9 @@ static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; u32 msb = 0x0; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - msb = addr >> 32; -#endif + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + msb = upper_32_bits(addr); + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; iowrite32(msb, serdes_base); iowrite32(lsb, serdes_base + 0x4); @@ -1104,6 +1104,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) /* Disable the DMA */ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + + dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64)); + for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, KVASER_PCIEFD_DMA_SIZE, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 012c3d22b01dd3..a978b960f1f1e1 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1434,7 +1434,8 @@ static int m_can_chip_config(struct net_device *dev) /* Disable unused interrupts */ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF | - IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F); + IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F | + IR_TSW); err = m_can_config_enable(cdev); if (err) @@ -1763,11 +1764,7 @@ static int m_can_close(struct net_device *dev) netif_stop_queue(dev); - if (!cdev->is_peripheral) - napi_disable(&cdev->napi); - m_can_stop(dev); - m_can_clk_stop(cdev); free_irq(dev->irq, dev); m_can_clean(dev); @@ -1776,10 +1773,13 @@ static int m_can_close(struct net_device *dev) destroy_workqueue(cdev->tx_wq); cdev->tx_wq = NULL; can_rx_offload_disable(&cdev->offload); + } else { + napi_disable(&cdev->napi); } close_candev(dev); + m_can_clk_stop(cdev); phy_power_off(cdev->transceiver); return 0; @@ -2030,6 +2030,8 @@ static int m_can_open(struct net_device *dev) if (cdev->is_peripheral) can_rx_offload_enable(&cdev->offload); + else + napi_enable(&cdev->napi); /* register interrupt handler */ if (cdev->is_peripheral) { @@ -2063,9 +2065,6 @@ static int m_can_open(struct net_device *dev) if (err) goto exit_start_fail; - if (!cdev->is_peripheral) - napi_enable(&cdev->napi); - netif_start_queue(dev); return 0; @@ -2079,6 +2078,8 @@ static int m_can_open(struct net_device *dev) out_wq_fail: if (cdev->is_peripheral) can_rx_offload_disable(&cdev->offload); + else + napi_disable(&cdev->napi); close_candev(dev); exit_disable_clks: m_can_clk_stop(cdev); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 983ab80260ddbf..b832566efda042 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -231,7 +231,7 @@ static struct platform_driver m_can_plat_driver = { .pm = &m_can_pmops, }, .probe = m_can_plat_probe, - .remove_new = m_can_plat_remove, + .remove = m_can_plat_remove, }; module_platform_driver(m_can_plat_driver); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 5b3d69c3b6b66f..0080c39ee182cf 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -435,7 +435,7 @@ static struct platform_driver mpc5xxx_can_driver = { .of_match_table = mpc5xxx_can_table, }, .probe = mpc5xxx_can_probe, - .remove_new = mpc5xxx_can_remove, + .remove = mpc5xxx_can_remove, #ifdef CONFIG_PM .suspend = mpc5xxx_can_suspend, .resume = mpc5xxx_can_resume, diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index b50005397463b6..28f3fd80527306 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -781,11 +781,8 @@ static int peak_get_ts_info(struct net_device *dev, { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index f5aa5dbacaf21f..2b7dd359f27b7d 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -907,7 +907,7 @@ static struct platform_driver rcar_can_driver = { .pm = &rcar_can_pm_ops, }, .probe = rcar_can_probe, - .remove_new = rcar_can_remove, + .remove = rcar_can_remove, }; module_platform_driver(rcar_can_driver); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index c919668bbe7a55..df1a5d0b37b226 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -2118,7 +2118,7 @@ static struct platform_driver rcar_canfd_driver = { .pm = &rcar_canfd_pm_ops, }, .probe = rcar_canfd_probe, - .remove_new = rcar_canfd_remove, + .remove = rcar_canfd_remove, }; module_platform_driver(rcar_canfd_driver); diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig new file mode 100644 index 00000000000000..e029e2a3ca4b04 --- /dev/null +++ b/drivers/net/can/rockchip/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +config CAN_ROCKCHIP_CANFD + tristate "Rockchip CAN-FD controller" + depends on OF || COMPILE_TEST + select CAN_RX_OFFLOAD + help + Say Y here if you want to use CAN-FD controller found on + Rockchip SoCs. diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile new file mode 100644 index 00000000000000..3760d3e1baa3a1 --- /dev/null +++ b/drivers/net/can/rockchip/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o + +rockchip_canfd-objs := +rockchip_canfd-objs += rockchip_canfd-core.o +rockchip_canfd-objs += rockchip_canfd-ethtool.o +rockchip_canfd-objs += rockchip_canfd-rx.o +rockchip_canfd-objs += rockchip_canfd-timestamp.o +rockchip_canfd-objs += rockchip_canfd-tx.o diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c new file mode 100644 index 00000000000000..df18c85fc07841 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -0,0 +1,967 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde +// +// Based on: +// +// Rockchip CANFD driver +// +// Copyright (c) 2020 Rockchip Electronics Co. Ltd. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rockchip_canfd.h" + +static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = { + .model = RKCANFD_MODEL_RK3568V2, + .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 | + RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 | + RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 | + RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 | + RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 | + RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 | + RKCANFD_QUIRK_CANFD_BROKEN, +}; + +/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00 + * states that only the rk3568v2 is affected by erratum 5, but tests + * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is + * sometimes too high. In contrast to the errata sheet mark rk3568v3 + * as effected by erratum 5, too. + */ +static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = { + .model = RKCANFD_MODEL_RK3568V3, + .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 | + RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 | + RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 | + RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 | + RKCANFD_QUIRK_CANFD_BROKEN, +}; + +static const char *__rkcanfd_get_model_str(enum rkcanfd_model model) +{ + switch (model) { + case RKCANFD_MODEL_RK3568V2: + return "rk3568v2"; + case RKCANFD_MODEL_RK3568V3: + return "rk3568v3"; + } + + return ""; +} + +static inline const char * +rkcanfd_get_model_str(const struct rkcanfd_priv *priv) +{ + return __rkcanfd_get_model_str(priv->devtype_data.model); +} + +/* Note: + * + * The formula to calculate the CAN System Clock is: + * + * Tsclk = 2 x Tclk x (brp + 1) + * + * Double the data sheet's brp_min, brp_max and brp_inc values (both + * for the arbitration and data bit timing) to take the "2 x" into + * account. + */ +static const struct can_bittiming_const rkcanfd_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 2, /* value from data sheet x2 */ + .brp_max = 512, /* value from data sheet x2 */ + .brp_inc = 2, /* value from data sheet x2 */ +}; + +static const struct can_bittiming_const rkcanfd_data_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 1, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 2, /* value from data sheet x2 */ + .brp_max = 512, /* value from data sheet x2 */ + .brp_inc = 2, /* value from data sheet x2 */ +}; + +static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv) +{ + reset_control_assert(priv->reset); + udelay(2); + reset_control_deassert(priv->reset); + + rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0); +} + +static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default); +} + +static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv) +{ + const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *bt = &priv->can.bittiming; + u32 reg_nbt, reg_dbt, reg_tdc; + u32 tdco; + + reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW, + bt->sjw - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP, + (bt->brp / 2) - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2, + bt->phase_seg2 - 1) | + FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1, + bt->prop_seg + bt->phase_seg1 - 1); + + rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt); + + if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) + return 0; + + reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW, + dbt->sjw - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP, + (dbt->brp / 2) - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2, + dbt->phase_seg2 - 1) | + FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1, + dbt->prop_seg + dbt->phase_seg1 - 1); + + rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt); + + tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3; + tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET)); + + reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) | + RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION, + reg_tdc); + + return 0; +} + +static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv, + struct can_berr_counter *bec) +{ + struct can_berr_counter bec_raw; + u32 reg_state; + + bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT); + bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT); + bec_raw = *bec; + + /* Tests show that sometimes both CAN bus error counters read + * 0x0, even if the controller is in warning mode + * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE + * set). + * + * In case both error counters read 0x0, use the struct + * priv->bec, otherwise save the read value to priv->bec. + * + * rkcanfd_handle_rx_int_one() handles the decrementing of + * priv->bec.rxerr for successfully RX'ed CAN frames. + * + * Luckily the controller doesn't decrement the RX CAN bus + * error counter in hardware for self received TX'ed CAN + * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't + * interfere with proper RX CAN bus error counters. + * + * rkcanfd_handle_tx_done_one() handles the decrementing of + * priv->bec.txerr for successfully TX'ed CAN frames. + */ + if (!bec->rxerr && !bec->txerr) + *bec = priv->bec; + else + priv->bec = *bec; + + reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE); + netdev_vdbg(priv->ndev, + "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n", + __func__, + bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr, + !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE), + !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE)); +} + +static int rkcanfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + int err; + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + return err; + + rkcanfd_get_berr_counter_corrected(priv, bec); + + pm_runtime_put(ndev->dev.parent); + + return 0; +} + +static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default); + + netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__, + rkcanfd_read(priv, RKCANFD_REG_INT_MASK)); +} + +static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv) +{ + rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL); +} + +static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv) +{ + u32 reg; + + /* TXE FIFO */ + reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); + reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg); + + /* RX FIFO */ + reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); + reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg); + + WRITE_ONCE(priv->tx_head, 0); + WRITE_ONCE(priv->tx_tail, 0); + netdev_reset_queue(priv->ndev); +} + +static void rkcanfd_chip_start(struct rkcanfd_priv *priv) +{ + u32 reg; + + rkcanfd_chip_set_reset_mode(priv); + + /* Receiving Filter: accept all */ + rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0); + rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID); + + /* enable: + * - CAN_FD: enable CAN-FD + * - AUTO_RETX_MODE: auto retransmission on TX error + * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames + * - RXSTX_MODE: Receive Self Transmit data mode + * - WORK_MODE: transition from reset to working mode + */ + reg = rkcanfd_read(priv, RKCANFD_REG_MODE); + priv->reg_mode_default = reg | + RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE | + RKCANFD_REG_MODE_AUTO_RETX_MODE | + RKCANFD_REG_MODE_COVER_MODE | + RKCANFD_REG_MODE_RXSTX_MODE | + RKCANFD_REG_MODE_WORK_MODE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE | + RKCANFD_REG_MODE_SILENT_MODE | + RKCANFD_REG_MODE_SELF_TEST; + + /* mask, i.e. ignore: + * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt + * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt + * - OVERLOAD_INT - CAN bus overload interrupt + * - TX_FINISH_INT - Transmit finish interrupt + */ + priv->reg_int_mask_default = + RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | + RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | + RKCANFD_REG_INT_OVERLOAD_INT | + RKCANFD_REG_INT_TX_FINISH_INT; + + /* Do not mask the bus error interrupt if the bus error + * reporting is requested. + */ + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT; + + memset(&priv->bec, 0x0, sizeof(priv->bec)); + + rkcanfd_chip_fifo_setup(priv); + rkcanfd_timestamp_init(priv); + rkcanfd_timestamp_start(priv); + + rkcanfd_set_bittiming(priv); + + rkcanfd_chip_interrupts_disable(priv); + rkcanfd_chip_set_work_mode(priv); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__, + rkcanfd_read(priv, RKCANFD_REG_MODE)); +} + +static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_chip_set_reset_mode(priv); + rkcanfd_chip_interrupts_disable(priv); +} + +static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_timestamp_stop(priv); + __rkcanfd_chip_stop(priv, state); +} + +static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state) +{ + priv->can.state = state; + + rkcanfd_timestamp_stop_sync(priv); + __rkcanfd_chip_stop(priv, state); +} + +static int rkcanfd_set_mode(struct net_device *ndev, + enum can_mode mode) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + + switch (mode) { + case CAN_MODE_START: + rkcanfd_chip_start(priv); + rkcanfd_chip_interrupts_enable(priv); + netif_wake_queue(ndev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static struct sk_buff * +rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv, + struct can_frame **cf, u32 *timestamp) +{ + struct sk_buff *skb; + + *timestamp = rkcanfd_get_timestamp(priv); + + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) + rkcanfd_skb_set_timestamp(priv, skb, *timestamp); + + return skb; +} + +static const char *rkcanfd_get_error_type_str(unsigned int type) +{ + switch (type) { + case RKCANFD_REG_ERROR_CODE_TYPE_BIT: + return "Bit"; + case RKCANFD_REG_ERROR_CODE_TYPE_STUFF: + return "Stuff"; + case RKCANFD_REG_ERROR_CODE_TYPE_FORM: + return "Form"; + case RKCANFD_REG_ERROR_CODE_TYPE_ACK: + return "ACK"; + case RKCANFD_REG_ERROR_CODE_TYPE_CRC: + return "CRC"; + } + + return ""; +} + +#define RKCAN_ERROR_CODE(reg_ec, code) \ + ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "") + +static void +rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf, + const u32 reg_ec) +{ + struct net_device_stats *stats = &priv->ndev->stats; + unsigned int type; + u32 reg_state, reg_cmd; + + type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec); + reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD); + reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE); + + netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n", + rkcanfd_get_error_type_str(type), + reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX", + reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration", + RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD), + RKCAN_ERROR_CODE(reg_ec, TX_ERROR), + RKCAN_ERROR_CODE(reg_ec, TX_ACK), + RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF), + RKCAN_ERROR_CODE(reg_ec, TX_CRC), + RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT), + RKCAN_ERROR_CODE(reg_ec, TX_DATA), + RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC), + RKCAN_ERROR_CODE(reg_ec, TX_IDLE), + RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT), + RKCAN_ERROR_CODE(reg_ec, RX_SPACE), + RKCAN_ERROR_CODE(reg_ec, RX_EOF), + RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM), + RKCAN_ERROR_CODE(reg_ec, RX_ACK), + RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM), + RKCAN_ERROR_CODE(reg_ec, RX_CRC), + RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT), + RKCAN_ERROR_CODE(reg_ec, RX_DATA), + RKCAN_ERROR_CODE(reg_ec, RX_DLC), + RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI), + RKCAN_ERROR_CODE(reg_ec, RX_RES), + RKCAN_ERROR_CODE(reg_ec, RX_FDF), + RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR), + RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE), + RKCAN_ERROR_CODE(reg_ec, RX_IDLE), + reg_ec, reg_cmd, + !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD), + !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD), + !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE), + !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE)); + + priv->can.can_stats.bus_error++; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) + stats->rx_errors++; + else + stats->tx_errors++; + + if (!cf) + return; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) { + if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE) + cf->data[3] = CAN_ERR_PROT_LOC_SOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR) + cf->data[3] = CAN_ERR_PROT_LOC_RTR; + /* RKCANFD_REG_ERROR_CODE_RX_FDF */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES) + cf->data[3] = CAN_ERR_PROT_LOC_RES0; + /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC) + cf->data[3] = CAN_ERR_PROT_LOC_DLC; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA) + cf->data[3] = CAN_ERR_PROT_LOC_DATA; + /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF) + cf->data[3] = CAN_ERR_PROT_LOC_EOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE) + cf->data[3] = CAN_ERR_PROT_LOC_EOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT) + cf->data[3] = CAN_ERR_PROT_LOC_INTERM; + } else { + cf->data[2] |= CAN_ERR_PROT_TX; + + if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC) + cf->data[3] = CAN_ERR_PROT_LOC_SOF; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA) + cf->data[3] = CAN_ERR_PROT_LOC_DATA; + /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + /* RKCANFD_REG_ERROR_CODE_TX_ERROR */ + else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } + + switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) { + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_BIT): + + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_STUFF): + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_FORM): + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_ACK): + cf->can_id |= CAN_ERR_ACK; + break; + case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE, + RKCANFD_REG_ERROR_CODE_TYPE_CRC): + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + break; + } +} + +static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_frame *cf = NULL; + u32 reg_ec, timestamp; + struct sk_buff *skb; + int err; + + reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE); + + if (!reg_ec) + return 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + if (cf) { + struct can_berr_counter bec; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + } + + rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec); + + if (!cf) + return 0; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + enum can_state new_state, rx_state, tx_state; + struct net_device *ndev = priv->ndev; + struct can_berr_counter bec; + struct can_frame *cf = NULL; + struct sk_buff *skb; + u32 timestamp; + int err; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state); + + new_state = max(tx_state, rx_state); + if (new_state == priv->can.state) + return 0; + + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + can_change_state(ndev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF); + can_bus_off(ndev); + } + + if (!skb) + return 0; + + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static int +rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_berr_counter bec; + struct can_frame *cf = NULL; + struct sk_buff *skb; + u32 timestamp; + int err; + + stats->rx_over_errors++; + stats->rx_errors++; + + netdev_dbg(priv->ndev, "RX-FIFO overflow\n"); + + skb = rkcanfd_alloc_can_err_skb(priv, &cf, ×tamp); + if (skb) + return 0; + + rkcanfd_get_berr_counter_corrected(priv, &bec); + + cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +#define rkcanfd_handle(priv, irq, ...) \ +({ \ + struct rkcanfd_priv *_priv = (priv); \ + int err; \ +\ + err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \ + if (err) \ + netdev_err(_priv->ndev, \ + "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \ + __stringify(irq), ERR_PTR(err)); \ + err; \ +}) + +static irqreturn_t rkcanfd_irq(int irq, void *dev_id) +{ + struct rkcanfd_priv *priv = dev_id; + u32 reg_int_unmasked, reg_int; + + reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT); + reg_int = reg_int_unmasked & ~priv->reg_int_mask_default; + + if (!reg_int) + return IRQ_NONE; + + /* First ACK then handle, to avoid lost-IRQ race condition on + * fast re-occurring interrupts. + */ + rkcanfd_write(priv, RKCANFD_REG_INT, reg_int); + + if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT) + rkcanfd_handle(priv, rx_int); + + if (reg_int & RKCANFD_REG_INT_ERROR_INT) + rkcanfd_handle(priv, error_int); + + if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT | + RKCANFD_REG_INT_PASSIVE_ERROR_INT | + RKCANFD_REG_INT_ERROR_WARNING_INT) || + priv->can.state > CAN_STATE_ERROR_ACTIVE) + rkcanfd_handle(priv, state_error_int); + + if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT) + rkcanfd_handle(priv, rx_fifo_overflow_int); + + if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR | + RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | + RKCANFD_REG_INT_RX_FINISH_INT)) + netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int); + + if (reg_int & RKCANFD_REG_INT_WAKEUP_INT) + netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT) + netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT) + netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT) + netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT) + netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__); + + if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT) + netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__); + + can_rx_offload_irq_finish(&priv->offload); + + return IRQ_HANDLED; +} + +static int rkcanfd_open(struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + int err; + + err = open_candev(ndev); + if (err) + return err; + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + goto out_close_candev; + + rkcanfd_chip_start(priv); + can_rx_offload_enable(&priv->offload); + + err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv); + if (err) + goto out_rkcanfd_chip_stop; + + rkcanfd_chip_interrupts_enable(priv); + + netif_start_queue(ndev); + + return 0; + +out_rkcanfd_chip_stop: + rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED); + pm_runtime_put(ndev->dev.parent); +out_close_candev: + close_candev(ndev); + return err; +} + +static int rkcanfd_stop(struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + rkcanfd_chip_interrupts_disable(priv); + free_irq(ndev->irq, priv); + can_rx_offload_disable(&priv->offload); + rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED); + close_candev(ndev); + + pm_runtime_put(ndev->dev.parent); + + return 0; +} + +static const struct net_device_ops rkcanfd_netdev_ops = { + .ndo_open = rkcanfd_open, + .ndo_stop = rkcanfd_stop, + .ndo_start_xmit = rkcanfd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev) +{ + struct rkcanfd_priv *priv = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(priv->clks_num, priv->clks); + + return 0; +} + +static int __maybe_unused rkcanfd_runtime_resume(struct device *dev) +{ + struct rkcanfd_priv *priv = dev_get_drvdata(dev); + + return clk_bulk_prepare_enable(priv->clks_num, priv->clks); +} + +static void rkcanfd_register_done(const struct rkcanfd_priv *priv) +{ + u32 dev_id; + + dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION); + + netdev_info(priv->ndev, + "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n", + rkcanfd_get_model_str(priv), + FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id), + FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id), + priv->devtype_data.quirks); + + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 && + priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN) + netdev_info(priv->ndev, + "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n", + priv->can.clock.freq / MEGA, + RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA); +} + +static int rkcanfd_register(struct rkcanfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int err; + + pm_runtime_enable(ndev->dev.parent); + + err = pm_runtime_resume_and_get(ndev->dev.parent); + if (err) + goto out_pm_runtime_disable; + + rkcanfd_ethtool_init(priv); + + err = register_candev(ndev); + if (err) + goto out_pm_runtime_put_sync; + + rkcanfd_register_done(priv); + + pm_runtime_put(ndev->dev.parent); + + return 0; + +out_pm_runtime_put_sync: + pm_runtime_put_sync(ndev->dev.parent); +out_pm_runtime_disable: + pm_runtime_disable(ndev->dev.parent); + + return err; +} + +static inline void rkcanfd_unregister(struct rkcanfd_priv *priv) +{ + struct net_device *ndev = priv->ndev; + + unregister_candev(ndev); + pm_runtime_disable(ndev->dev.parent); +} + +static const struct of_device_id rkcanfd_of_match[] = { + { + .compatible = "rockchip,rk3568v2-canfd", + .data = &rkcanfd_devtype_data_rk3568v2, + }, { + .compatible = "rockchip,rk3568v3-canfd", + .data = &rkcanfd_devtype_data_rk3568v3, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, rkcanfd_of_match); + +static int rkcanfd_probe(struct platform_device *pdev) +{ + struct rkcanfd_priv *priv; + struct net_device *ndev; + const void *match; + int err; + + ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + err = ndev->irq; + goto out_free_candev; + } + + priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks); + if (priv->clks_num < 0) { + err = priv->clks_num; + goto out_free_candev; + } + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_free_candev; + } + + priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(priv->reset)) { + err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset), + "Failed to get reset line\n"); + goto out_free_candev; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->netdev_ops = &rkcanfd_netdev_ops; + ndev->flags |= IFF_ECHO; + + platform_set_drvdata(pdev, priv); + priv->can.clock.freq = clk_get_rate(priv->clks[0].clk); + priv->can.bittiming_const = &rkcanfd_bittiming_const; + priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_BERR_REPORTING; + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->can.do_set_mode = rkcanfd_set_mode; + priv->can.do_get_berr_counter = rkcanfd_get_berr_counter; + priv->ndev = ndev; + + match = device_get_match_data(&pdev->dev); + if (match) + priv->devtype_data = *(struct rkcanfd_devtype_data *)match; + + err = can_rx_offload_add_manual(ndev, &priv->offload, + RKCANFD_NAPI_WEIGHT); + if (err) + goto out_free_candev; + + err = rkcanfd_register(priv); + if (err) + goto out_can_rx_offload_del; + + return 0; + +out_can_rx_offload_del: + can_rx_offload_del(&priv->offload); +out_free_candev: + free_candev(ndev); + + return err; +} + +static void rkcanfd_remove(struct platform_device *pdev) +{ + struct rkcanfd_priv *priv = platform_get_drvdata(pdev); + struct net_device *ndev = priv->ndev; + + can_rx_offload_del(&priv->offload); + rkcanfd_unregister(priv); + free_candev(ndev); +} + +static const struct dev_pm_ops rkcanfd_pm_ops = { + SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend, + rkcanfd_runtime_resume, NULL) +}; + +static struct platform_driver rkcanfd_driver = { + .driver = { + .name = DEVICE_NAME, + .pm = &rkcanfd_pm_ops, + .of_match_table = rkcanfd_of_match, + }, + .probe = rkcanfd_probe, + .remove = rkcanfd_remove, +}; +module_platform_driver(rkcanfd_driver); + +MODULE_AUTHOR("Marc Kleine-Budde "); +MODULE_DESCRIPTION("Rockchip CAN-FD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c new file mode 100644 index 00000000000000..5aeeef64a67a05 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde +// + +#include + +#include "rockchip_canfd.h" + +enum rkcanfd_stats_type { + RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS, + RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS, +}; + +static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = { + [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors", + [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors", +}; + +static void +rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, rkcanfd_stats_strings, + sizeof(rkcanfd_stats_strings)); + } +} + +static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rkcanfd_stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static void +rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + struct rkcanfd_stats *rkcanfd_stats; + unsigned int start; + + rkcanfd_stats = &priv->stats; + + do { + start = u64_stats_fetch_begin(&rkcanfd_stats->syncp); + + data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = + u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors); + data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = + u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors); + } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start)); +} + +static const struct ethtool_ops rkcanfd_ethtool_ops = { + .get_ts_info = can_ethtool_op_get_ts_info_hwts, + .get_strings = rkcanfd_ethtool_get_strings, + .get_sset_count = rkcanfd_ethtool_get_sset_count, + .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats, +}; + +void rkcanfd_ethtool_init(struct rkcanfd_priv *priv) +{ + priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops; + + u64_stats_init(&priv->stats.syncp); +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c new file mode 100644 index 00000000000000..475c0409e215a8 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde +// + +#include + +#include "rockchip_canfd.h" + +static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1, + const struct canfd_frame *const cfd2, + const bool is_canfd) +{ + const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF; + canid_t mask = CAN_EFF_FLAG; + + if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len)) + return false; + + if (!is_canfd) + mask |= CAN_RTR_FLAG; + + if (cfd1->can_id & CAN_EFF_FLAG) + mask |= CAN_EFF_MASK; + else + mask |= CAN_SFF_MASK; + + if ((cfd1->can_id & mask) != (cfd2->can_id & mask)) + return false; + + if (is_canfd && + (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags)) + return false; + + return true; +} + +static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1, + const struct canfd_frame *cfd2, + const bool is_canfd) +{ + u8 len; + + if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG)) + return true; + + len = canfd_sanitize_len(cfd1->len); + + return !memcmp(cfd1->data, cfd2->data, len); +} + +static unsigned int +rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv, + const struct rkcanfd_fifo_header *header, + struct canfd_frame *cfd) +{ + unsigned int len = sizeof(*cfd) - sizeof(cfd->data); + u8 dlc; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT) + cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) | + CAN_EFF_FLAG; + else + cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id); + + dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + header->frameinfo); + + /* CAN-FD */ + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) { + cfd->len = can_fd_dlc2len(dlc); + + /* The cfd is not allocated by alloc_canfd_skb(), so + * set CANFD_FDF here. + */ + cfd->flags |= CANFD_FDF; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS) + cfd->flags |= CANFD_BRS; + } else { + cfd->len = can_cc_dlc2len(dlc); + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) { + cfd->can_id |= CAN_RTR_FLAG; + + return len; + } + } + + return len + cfd->len; +} + +static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv, + const struct canfd_frame *cfd_rx, const u32 ts, + bool *tx_done) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct rkcanfd_stats *rkcanfd_stats = &priv->stats; + const struct canfd_frame *cfd_nominal; + const struct sk_buff *skb; + unsigned int tx_tail; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + if (!skb) { + netdev_err(priv->ndev, + "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n", + __func__, tx_tail, + priv->tx_head, priv->tx_tail); + + return -ENOMSG; + } + cfd_nominal = (struct canfd_frame *)skb->data; + + /* We RX'ed a frame identical to our pending TX frame. */ + if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal, + cfd_rx->flags & CANFD_FDF) && + rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal, + cfd_rx->flags & CANFD_FDF)) { + unsigned int frame_len; + + rkcanfd_handle_tx_done_one(priv, ts, &frame_len); + + WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1); + netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_START_THRESHOLD); + + *tx_done = true; + + return 0; + } + + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6)) + return 0; + + /* Erratum 6: Extended frames may be send as standard frames. + * + * Not affected if: + * - TX'ed a standard frame -or- + * - RX'ed an extended frame + */ + if (!(cfd_nominal->can_id & CAN_EFF_FLAG) || + (cfd_rx->can_id & CAN_EFF_FLAG)) + return 0; + + /* Not affected if: + * - standard part and RTR flag of the TX'ed frame + * is not equal the CAN-ID and RTR flag of the RX'ed frame. + */ + if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) != + (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK))) + return 0; + + /* Not affected if: + * - length is not the same + */ + if (cfd_nominal->len != cfd_rx->len) + return 0; + + /* Not affected if: + * - the data of non RTR frames is different + */ + if (!(cfd_nominal->can_id & CAN_RTR_FLAG) && + memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len)) + return 0; + + /* Affected by Erratum 6 */ + u64_stats_update_begin(&rkcanfd_stats->syncp); + u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors); + u64_stats_update_end(&rkcanfd_stats->syncp); + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.txerr) + priv->bec.txerr--; + + *tx_done = true; + + stats->tx_packets++; + stats->tx_errors++; + + rkcanfd_xmit_retry(priv); + + return 0; +} + +static inline bool +rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header) +{ + /* Erratum 5: If the FIFO is empty, we read the same value for + * all elements. + */ + return header->frameinfo == header->id && + header->frameinfo == header->ts; +} + +static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct canfd_frame cfd[1] = { }, *skb_cfd; + struct rkcanfd_fifo_header header[1] = { }; + struct sk_buff *skb; + unsigned int len; + int err; + + /* read header into separate struct and convert it later */ + rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA, + header, sizeof(*header)); + /* read data directly into cfd */ + rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA, + cfd->data, sizeof(cfd->data)); + + /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */ + if (rkcanfd_fifo_header_empty(header)) { + struct rkcanfd_stats *rkcanfd_stats = &priv->stats; + + u64_stats_update_begin(&rkcanfd_stats->syncp); + u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors); + u64_stats_update_end(&rkcanfd_stats->syncp); + + return 0; + } + + len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd); + + /* Drop any received CAN-FD frames if CAN-FD mode is not + * requested. + */ + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF && + !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) { + stats->rx_dropped++; + + return 0; + } + + if (rkcanfd_get_tx_pending(priv)) { + bool tx_done = false; + + err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done); + if (err) + return err; + if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) + return 0; + } + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.rxerr) + priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD, + priv->bec.rxerr) - 1; + + if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) + skb = alloc_canfd_skb(priv->ndev, &skb_cfd); + else + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd); + + if (!skb) { + stats->rx_dropped++; + + return 0; + } + + memcpy(skb_cfd, cfd, len); + rkcanfd_skb_set_timestamp(priv, skb, header->ts); + + err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static inline unsigned int +rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv) +{ + const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); + + return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg); +} + +int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv) +{ + unsigned int len; + int err; + + while ((len = rkcanfd_rx_fifo_get_len(priv))) { + err = rkcanfd_handle_rx_int_one(priv); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c new file mode 100644 index 00000000000000..43d4b572181215 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde +// + +#include + +#include "rockchip_canfd.h" + +static u64 rkcanfd_timestamp_read(const struct cyclecounter *cc) +{ + const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc); + + return rkcanfd_get_timestamp(priv); +} + +void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv, + struct sk_buff *skb, const u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, timestamp); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void rkcanfd_timestamp_work(struct work_struct *work) +{ + const struct delayed_work *delayed_work = to_delayed_work(work); + struct rkcanfd_priv *priv; + + priv = container_of(delayed_work, struct rkcanfd_priv, timestamp); + timecounter_read(&priv->tc); + + schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); +} + +void rkcanfd_timestamp_init(struct rkcanfd_priv *priv) +{ + const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *bt = &priv->can.bittiming; + struct cyclecounter *cc = &priv->cc; + u32 bitrate, div, reg, rate; + u64 work_delay_ns; + u64 max_cycles; + + /* At the standard clock rate of 300Mhz on the rk3658, the 32 + * bit timer overflows every 14s. This means that we have to + * poll it quite often to avoid missing a wrap around. + * + * Divide it down to a reasonable rate, at least twice the bit + * rate. + */ + bitrate = max(bt->bitrate, dbt->bitrate); + div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2), + FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1); + + reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE, + div - 1) | + RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE; + rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg); + + cc->read = rkcanfd_timestamp_read; + cc->mask = CYCLECOUNTER_MASK(32); + + rate = priv->can.clock.freq / div; + clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC, + RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC); + + max_cycles = div_u64(ULLONG_MAX, cc->mult); + max_cycles = min(max_cycles, cc->mask); + work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift); + priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ); + INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work); + + netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n", + priv->can.clock.freq / MEGA, + priv->can.clock.freq % MEGA / KILO / 10, + bitrate / MEGA, + bitrate % MEGA / KILO / 100, + div, + rate / MEGA, + rate % MEGA / KILO / 10, + cc->mult, cc->shift, + priv->work_delay_jiffies / HZ); +} + +void rkcanfd_timestamp_start(struct rkcanfd_priv *priv) +{ + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + + schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); +} + +void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv) +{ + cancel_delayed_work(&priv->timestamp); +} + +void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv) +{ + cancel_delayed_work_sync(&priv->timestamp); +} diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c new file mode 100644 index 00000000000000..865a15e033a9e5 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2023, 2024 Pengutronix, +// Marc Kleine-Budde +// + +#include + +#include "rockchip_canfd.h" + +static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv) +{ + const struct canfd_frame *cfd; + const struct sk_buff *skb; + unsigned int tx_tail; + + if (!rkcanfd_get_tx_pending(priv)) + return false; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + if (!skb) { + netdev_err(priv->ndev, + "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n", + __func__, tx_tail, + priv->tx_head, priv->tx_tail); + + return false; + } + + cfd = (struct canfd_frame *)skb->data; + + return cfd->can_id & CAN_EFF_FLAG; +} + +unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv) +{ + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 && + rkcanfd_tx_tail_is_eff(priv)) + return 0; + + return rkcanfd_get_tx_free(priv); +} + +static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv, + const u32 reg_cmd) +{ + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12) + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default | + RKCANFD_REG_MODE_SPACE_RX_MODE); + + rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd); + + if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12) + rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default); +} + +void rkcanfd_xmit_retry(struct rkcanfd_priv *priv) +{ + const unsigned int tx_head = rkcanfd_get_tx_head(priv); + const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); + + rkcanfd_start_xmit_write_cmd(priv, reg_cmd); +} + +netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct rkcanfd_priv *priv = netdev_priv(ndev); + u32 reg_frameinfo, reg_id, reg_cmd; + unsigned int tx_head, frame_len; + const struct canfd_frame *cfd; + int err; + u8 i; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (!netif_subqueue_maybe_stop(priv->ndev, 0, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_STOP_THRESHOLD, + RKCANFD_TX_START_THRESHOLD)) { + if (net_ratelimit()) + netdev_info(priv->ndev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n", + priv->tx_head, priv->tx_tail, + rkcanfd_get_tx_pending(priv)); + + return NETDEV_TX_BUSY; + } + + cfd = (struct canfd_frame *)skb->data; + + if (cfd->can_id & CAN_EFF_FLAG) { + reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT; + reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id); + } else { + reg_frameinfo = 0; + reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id); + } + + if (cfd->can_id & CAN_RTR_FLAG) + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR; + + if (can_is_canfd_skb(skb)) { + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF; + + if (cfd->flags & CANFD_BRS) + reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS; + + reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + can_fd_len2dlc(cfd->len)); + } else { + reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH, + cfd->len); + } + + tx_head = rkcanfd_get_tx_head(priv); + reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head); + + rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo); + rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id); + for (i = 0; i < cfd->len; i += 4) + rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i, + *(u32 *)(cfd->data + i)); + + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len); + + WRITE_ONCE(priv->tx_head, priv->tx_head + 1); + + rkcanfd_start_xmit_write_cmd(priv, reg_cmd); + + netif_subqueue_maybe_stop(priv->ndev, 0, + rkcanfd_get_effective_tx_free(priv), + RKCANFD_TX_STOP_THRESHOLD, + RKCANFD_TX_START_THRESHOLD); + + return NETDEV_TX_OK; +} + +void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts, + unsigned int *frame_len_p) +{ + struct net_device_stats *stats = &priv->ndev->stats; + unsigned int tx_tail; + struct sk_buff *skb; + + tx_tail = rkcanfd_get_tx_tail(priv); + skb = priv->can.echo_skb[tx_tail]; + + /* Manual handling of CAN Bus Error counters. See + * rkcanfd_get_corrected_berr_counter() for detailed + * explanation. + */ + if (priv->bec.txerr) + priv->bec.txerr--; + + if (skb) + rkcanfd_skb_set_timestamp(priv, skb, ts); + stats->tx_bytes += + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + tx_tail, ts, + frame_len_p); + stats->tx_packets++; +} diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h new file mode 100644 index 00000000000000..93131c7d7f54d0 --- /dev/null +++ b/drivers/net/can/rockchip/rockchip_canfd.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2023, 2024 Pengutronix, + * Marc Kleine-Budde + */ + +#ifndef _ROCKCHIP_CANFD_H +#define _ROCKCHIP_CANFD_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RKCANFD_REG_MODE 0x000 +#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15) +#define RKCANFD_REG_MODE_DPEE BIT(14) +#define RKCANFD_REG_MODE_BRSD BIT(13) +#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12) +#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11) +#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10) +#define RKCANFD_REG_MODE_OVLD_MODE BIT(9) +#define RKCANFD_REG_MODE_COVER_MODE BIT(8) +#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7) +#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6) +#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5) +#define RKCANFD_REG_MODE_LBACK_MODE BIT(4) +#define RKCANFD_REG_MODE_SILENT_MODE BIT(3) +#define RKCANFD_REG_MODE_SELF_TEST BIT(2) +#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1) +#define RKCANFD_REG_MODE_WORK_MODE BIT(0) + +#define RKCANFD_REG_CMD 0x004 +#define RKCANFD_REG_CMD_TX1_REQ BIT(1) +#define RKCANFD_REG_CMD_TX0_REQ BIT(0) +#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i)) + +#define RKCANFD_REG_STATE 0x008 +#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6) +#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5) +#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4) +#define RKCANFD_REG_STATE_TX_PERIOD BIT(3) +#define RKCANFD_REG_STATE_RX_PERIOD BIT(2) +#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1) +#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0) + +#define RKCANFD_REG_INT 0x00c +#define RKCANFD_REG_INT_WAKEUP_INT BIT(14) +#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13) +#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12) +#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11) +#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10) +#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9) +#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8) +#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7) +#define RKCANFD_REG_INT_ERROR_INT BIT(6) +#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5) +#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4) +#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3) +#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2) +#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1) +#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0) + +#define RKCANFD_REG_INT_ALL \ + (RKCANFD_REG_INT_WAKEUP_INT | \ + RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \ + RKCANFD_REG_INT_TXE_FIFO_OV_INT | \ + RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \ + RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \ + RKCANFD_REG_INT_BUS_OFF_INT | \ + RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \ + RKCANFD_REG_INT_RX_FIFO_FULL_INT | \ + RKCANFD_REG_INT_ERROR_INT | \ + RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \ + RKCANFD_REG_INT_PASSIVE_ERROR_INT | \ + RKCANFD_REG_INT_OVERLOAD_INT | \ + RKCANFD_REG_INT_ERROR_WARNING_INT | \ + RKCANFD_REG_INT_TX_FINISH_INT | \ + RKCANFD_REG_INT_RX_FINISH_INT) + +#define RKCANFD_REG_INT_ALL_ERROR \ + (RKCANFD_REG_INT_BUS_OFF_INT | \ + RKCANFD_REG_INT_ERROR_INT | \ + RKCANFD_REG_INT_PASSIVE_ERROR_INT | \ + RKCANFD_REG_INT_ERROR_WARNING_INT) + +#define RKCANFD_REG_INT_MASK 0x010 + +#define RKCANFD_REG_DMA_CTL 0x014 +#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1) +#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9) + +#define RKCANFD_REG_BITTIMING 0x018 +#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16) +#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14) +#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8) +#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4) +#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0) + +#define RKCANFD_REG_ARBITFAIL 0x028 +#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0) + +/* Register seems to be clear or read */ +#define RKCANFD_REG_ERROR_CODE 0x02c +#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29) +#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26) +#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0 +#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1 +#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2 +#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3 +#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4 +#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25) +#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16) +#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24) +#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23) +#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22) +#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21) +#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20) +#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19) +#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18) +#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17) +#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16) +#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0) +#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15) +#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14) +#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13) +#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12) +#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11) +#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10) +#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9) +#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8) +#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7) +#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6) +#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5) +#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4) +#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3) +#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2) +#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1) +#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0) + +#define RKCANFD_REG_ERROR_CODE_NOACK \ + (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \ + RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \ + RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \ + RKCANFD_REG_ERROR_CODE_RX_ACK) + +#define RKCANFD_REG_RXERRORCNT 0x034 +#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0) + +#define RKCANFD_REG_TXERRORCNT 0x038 +#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0) + +#define RKCANFD_REG_IDCODE 0x03c +#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0) +#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0) + +#define RKCANFD_REG_IDMASK 0x040 + +#define RKCANFD_REG_TXFRAMEINFO 0x050 +#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7) +#define RKCANFD_REG_FRAMEINFO_RTR BIT(6) +#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0) + +#define RKCANFD_REG_TXID 0x054 +#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0) + +#define RKCANFD_REG_TXDATA0 0x058 +#define RKCANFD_REG_TXDATA1 0x05C +#define RKCANFD_REG_RXFRAMEINFO 0x060 +#define RKCANFD_REG_RXID 0x064 +#define RKCANFD_REG_RXDATA0 0x068 +#define RKCANFD_REG_RXDATA1 0x06c + +#define RKCANFD_REG_RTL_VERSION 0x070 +#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4) +#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0) + +#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100 +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8) +#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0) + +#define RKCANFD_REG_FD_DATA_BITTIMING 0x104 +#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21) +#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17) +#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9) +#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5) +#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0) + +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108 +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1) +#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0) + +#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c +/* datasheet says 6:1, which is wrong */ +#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1) +#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0) + +#define RKCANFD_REG_TIMESTAMP 0x110 + +#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114 +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5) +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1) +#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0) + +#define RKCANFD_REG_RX_FIFO_CTRL 0x118 +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4) +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1) +#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0) + +#define RKCANFD_REG_AFC_CTRL 0x11c +#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4) +#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3) +#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2) +#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1) +#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0) + +#define RKCANFD_REG_IDCODE0 0x120 +#define RKCANFD_REG_IDMASK0 0x124 +#define RKCANFD_REG_IDCODE1 0x128 +#define RKCANFD_REG_IDMASK1 0x12c +#define RKCANFD_REG_IDCODE2 0x130 +#define RKCANFD_REG_IDMASK2 0x134 +#define RKCANFD_REG_IDCODE3 0x138 +#define RKCANFD_REG_IDMASK3 0x13c +#define RKCANFD_REG_IDCODE4 0x140 +#define RKCANFD_REG_IDMASK4 0x144 + +#define RKCANFD_REG_FD_TXFRAMEINFO 0x200 +#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7) +#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6) +#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5) +#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4) +#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0) + +#define RKCANFD_REG_FD_TXID 0x204 +#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0) +#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0) + +#define RKCANFD_REG_FD_TXDATA0 0x208 +#define RKCANFD_REG_FD_TXDATA1 0x20c +#define RKCANFD_REG_FD_TXDATA2 0x210 +#define RKCANFD_REG_FD_TXDATA3 0x214 +#define RKCANFD_REG_FD_TXDATA4 0x218 +#define RKCANFD_REG_FD_TXDATA5 0x21c +#define RKCANFD_REG_FD_TXDATA6 0x220 +#define RKCANFD_REG_FD_TXDATA7 0x224 +#define RKCANFD_REG_FD_TXDATA8 0x228 +#define RKCANFD_REG_FD_TXDATA9 0x22c +#define RKCANFD_REG_FD_TXDATA10 0x230 +#define RKCANFD_REG_FD_TXDATA11 0x234 +#define RKCANFD_REG_FD_TXDATA12 0x238 +#define RKCANFD_REG_FD_TXDATA13 0x23c +#define RKCANFD_REG_FD_TXDATA14 0x240 +#define RKCANFD_REG_FD_TXDATA15 0x244 + +#define RKCANFD_REG_FD_RXFRAMEINFO 0x300 +#define RKCANFD_REG_FD_RXID 0x304 +#define RKCANFD_REG_FD_RXTIMESTAMP 0x308 +#define RKCANFD_REG_FD_RXDATA0 0x30c +#define RKCANFD_REG_FD_RXDATA1 0x310 +#define RKCANFD_REG_FD_RXDATA2 0x314 +#define RKCANFD_REG_FD_RXDATA3 0x318 +#define RKCANFD_REG_FD_RXDATA4 0x31c +#define RKCANFD_REG_FD_RXDATA5 0x320 +#define RKCANFD_REG_FD_RXDATA6 0x320 +#define RKCANFD_REG_FD_RXDATA7 0x328 +#define RKCANFD_REG_FD_RXDATA8 0x32c +#define RKCANFD_REG_FD_RXDATA9 0x330 +#define RKCANFD_REG_FD_RXDATA10 0x334 +#define RKCANFD_REG_FD_RXDATA11 0x338 +#define RKCANFD_REG_FD_RXDATA12 0x33c +#define RKCANFD_REG_FD_RXDATA13 0x340 +#define RKCANFD_REG_FD_RXDATA14 0x344 +#define RKCANFD_REG_FD_RXDATA15 0x348 + +#define RKCANFD_REG_RX_FIFO_RDATA 0x400 +#define RKCANFD_REG_TXE_FIFO_RDATA 0x500 + +#define DEVICE_NAME "rockchip_canfd" +#define RKCANFD_NAPI_WEIGHT 32 +#define RKCANFD_TXFIFO_DEPTH 2 +#define RKCANFD_TX_STOP_THRESHOLD 1 +#define RKCANFD_TX_START_THRESHOLD 1 + +#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60 +#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA) + +/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */ + +/* Erratum 1: The error frame sent by the CAN controller has an + * abnormal format. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0) + +/* Erratum 2: The error frame sent after detecting a CRC error has an + * abnormal position. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1) + +/* Erratum 3: Intermittent CRC calculation errors. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2) + +/* Erratum 4: Intermittent occurrence of stuffing errors. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3) + +/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit + * abnormal counting behavior. + * + * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00 + * states that only the rk3568v2 is affected by this erratum, but + * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is + * sometimes too high. This leads to CAN frames being read from the + * FIFO, which is then already empty. + * + * Further tests on the rk3568v2 and rk3568v3 show that in this + * situation (i.e. empty FIFO) all elements of the FIFO header + * (frameinfo, id, ts) contain the same data. + * + * On the rk3568v2 and rk3568v3, this problem only occurs extremely + * rarely with the standard clock of 300 MHz, but almost immediately + * at 80 MHz. + * + * To workaround this problem, check for empty FIFO with + * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit + * early. + * + * To reproduce: + * assigned-clocks = <&cru CLK_CANx>; + * assigned-clock-rates = <80000000>; + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4) + +/* Erratum 6: The CAN controller's transmission of extended frames may + * intermittently change into standard frames + * + * Work around this issue by activating self reception (RXSTX). If we + * have pending TX CAN frames, check all RX'ed CAN frames in + * rkcanfd_rxstx_filter(). + * + * If it's a frame we've send and it's OK, call the TX complete + * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ. + * + * If it's a frame we've send, but the CAN-ID is mangled, resend the + * original extended frame. + * + * To reproduce: + * host: + * canfdtest -evx -g can0 + * candump any,0:80000000 -cexdtA + * dut: + * canfdtest -evx can0 + * ethtool -S can0 + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5) + +/* Erratum 7: In the passive error state, the CAN controller's + * interframe space segment counting is inaccurate. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6) + +/* Erratum 8: The Format-Error error flag is transmitted one bit + * later. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7) + +/* Erratum 9: In the arbitration segment, the CAN controller will + * identify stuffing errors as arbitration failures. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8) + +/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9) + +/* Erratum 11: Arbitration error. */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10) + +/* Erratum 12: A dominant bit at the third bit of the intermission may + * cause a transmission error. + */ +#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11) + +/* Tests on the rk3568v2 and rk3568v3 show that receiving certain + * CAN-FD frames trigger an Error Interrupt. + * + * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0 + * Error-Warning=1 Bus-Off=0 + * To reproduce: + * host: + * cansend can0 002##01f + * DUT: + * candump any,0:0,#FFFFFFFF -cexdHtA + * + * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0 + * Error-Warning=1 Bus-Off=0 + * To reproduce: + * host: + * cansend can0 002##07217010000000000 + * DUT: + * candump any,0:0,#FFFFFFFF -cexdHtA + */ +#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12) + +/* known issues with rk3568v3: + * + * - Overload situation during high bus load + * To reproduce: + * host: + * # add a 2nd CAN adapter to the CAN bus + * cangen can0 -I 1 -Li -Di -p10 -g 0.3 + * cansequence -rve + * DUT: + * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e + * cansequence -rv -i 1 + * + * - TX starvation after repeated Bus-Off + * To reproduce: + * host: + * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0 + * DUT: + * cangen can0 -I2 -Li -Di -p10 -g 0.05 + */ + +enum rkcanfd_model { + RKCANFD_MODEL_RK3568V2 = 0x35682, + RKCANFD_MODEL_RK3568V3 = 0x35683, +}; + +struct rkcanfd_devtype_data { + enum rkcanfd_model model; + u32 quirks; +}; + +struct rkcanfd_fifo_header { + u32 frameinfo; + u32 id; + u32 ts; +}; + +struct rkcanfd_stats { + struct u64_stats_sync syncp; + + /* Erratum 5 */ + u64_stats_t rx_fifo_empty_errors; + + /* Erratum 6 */ + u64_stats_t tx_extended_as_standard_errors; +}; + +struct rkcanfd_priv { + struct can_priv can; + struct can_rx_offload offload; + struct net_device *ndev; + + void __iomem *regs; + unsigned int tx_head; + unsigned int tx_tail; + + u32 reg_mode_default; + u32 reg_int_mask_default; + struct rkcanfd_devtype_data devtype_data; + + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + unsigned long work_delay_jiffies; + + struct can_berr_counter bec; + + struct rkcanfd_stats stats; + + struct reset_control *reset; + struct clk_bulk_data *clks; + int clks_num; +}; + +static inline u32 +rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg) +{ + return readl(priv->regs + reg); +} + +static inline void +rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg, + void *buf, unsigned int len) +{ + readsl(priv->regs + reg, buf, len / sizeof(u32)); +} + +static inline void +rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val) +{ + writel(val, priv->regs + reg); +} + +static inline u32 +rkcanfd_get_timestamp(const struct rkcanfd_priv *priv) +{ + return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP); +} + +static inline unsigned int +rkcanfd_get_tx_head(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1); +} + +static inline unsigned int +rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1); +} + +static inline unsigned int +rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv) +{ + return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail); +} + +static inline unsigned int +rkcanfd_get_tx_free(const struct rkcanfd_priv *priv) +{ + return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv); +} + +void rkcanfd_ethtool_init(struct rkcanfd_priv *priv); + +int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv); + +void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv, + struct sk_buff *skb, const u32 timestamp); +void rkcanfd_timestamp_init(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_start(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv); +void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv); + +unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv); +void rkcanfd_xmit_retry(struct rkcanfd_priv *priv); +netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev); +void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts, + unsigned int *frame_len_p); + +#endif diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index fca5a9a1d85792..2d1f715459d75a 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -245,7 +245,7 @@ static void sja1000_isa_remove(struct platform_device *pdev) static struct platform_driver sja1000_isa_driver = { .probe = sja1000_isa_probe, - .remove_new = sja1000_isa_remove, + .remove = sja1000_isa_remove, .driver = { .name = DRV_NAME, }, diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 33f0e46ab1c2f5..c42ebe9da55abc 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -329,7 +329,7 @@ static void sp_remove(struct platform_device *pdev) static struct platform_driver sp_driver = { .probe = sp_probe, - .remove_new = sp_remove, + .remove = sp_remove, .driver = { .name = DRV_NAME, .of_match_table = sp_of_table, diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index bd25137062c5e6..278ee8722770c2 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -854,7 +854,7 @@ static struct platform_driver softing_driver = { .name = KBUILD_MODNAME, }, .probe = softing_pdev_probe, - .remove_new = softing_pdev_remove, + .remove = softing_pdev_remove, }; module_platform_driver(softing_driver); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 3e7526274e34bb..3bc56517fe7a99 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -12,7 +12,7 @@ // Copyright (c) 2019 Martin Sperl // -#include +#include #include #include #include diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 65150e76200720..8c5be8d1c51965 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -8,7 +8,7 @@ #include "mcp251xfd.h" -#include +#include static const struct regmap_config mcp251xfd_regmap_crc; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index 83c18035b2a24d..e684991fa3917d 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -12,7 +12,7 @@ // Copyright (c) 2019 Martin Sperl // -#include +#include #include "mcp251xfd.h" #include "mcp251xfd-ram.h" diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c index b1de8052a45ccb..747ae3e8a76859 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -12,7 +12,7 @@ // Copyright (c) 2019 Martin Sperl // -#include +#include #include #include "mcp251xfd.h" diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index ab8d017846869d..360158c295d348 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -914,7 +914,7 @@ static struct platform_driver sun4i_can_driver = { .of_match_table = sun4ican_of_match, }, .probe = sun4ican_probe, - .remove_new = sun4ican_remove, + .remove = sun4ican_remove, }; module_platform_driver(sun4i_can_driver); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 5aab440074c690..644e8b8eb91e74 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -1025,7 +1025,7 @@ static struct platform_driver ti_hecc_driver = { .of_match_table = ti_hecc_dt_ids, }, .probe = ti_hecc_probe, - .remove_new = ti_hecc_remove, + .remove = ti_hecc_remove, .suspend = ti_hecc_suspend, .resume = ti_hecc_resume, }; diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 3e1fba12c0c3b8..9dae0c71a2e1b8 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -17,11 +17,12 @@ config CAN_EMS_USB config CAN_ESD_USB tristate "esd electronics gmbh CAN/USB interfaces" help - This driver adds supports for several CAN/USB interfaces + This driver adds support for several CAN/USB interfaces from esd electronics gmbh (https://www.esd.eu). The drivers supports the following devices: - esd CAN-USB/2 + - esd CAN-USB/3-FD - esd CAN-USB/Micro To compile this driver as a module, choose M here: the module diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 41a0e4261d15e9..03ad10b01867d8 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -3,7 +3,7 @@ * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro * * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs - * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus + * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus */ #include @@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev) if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= ESD_USB_3_BAUDRATE_FLAG_LOM; - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; - baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) @@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { case ESD_USB_CANUSB3_PRODUCT_ID: priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c index 4151b18fd045da..1888ca1de7b6a8 100644 --- a/drivers/net/can/usb/etas_es58x/es581_4.c +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -9,7 +9,7 @@ * Copyright (c) 2020-2022 Vincent Mailhol */ -#include +#include #include #include diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 5e3a72b7c46919..71f24dc0a92711 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -10,7 +10,7 @@ * Copyright (c) 2020-2022 Vincent Mailhol */ -#include +#include #include #include #include diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index fa87b0b78e3eb5..84ffa1839bac11 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -11,7 +11,7 @@ * Copyright (c) 2020-2022 Vincent Mailhol */ -#include +#include #include #include diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c index ec8cef7fd2d535..bc0c8903fe7794 100644 --- a/drivers/net/can/usb/f81604.c +++ b/drivers/net/can/usb/f81604.c @@ -13,7 +13,7 @@ #include #include -#include +#include /* vendor and product id */ #define F81604_VENDOR_ID 0x2c42 diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index ff10b3790d844a..078496d9b7ba2a 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -22,6 +22,8 @@ */ #include +#include +#include #include #include #include @@ -39,7 +41,6 @@ #define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0) #define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1) #define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2) -#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3) /* Device capabilities */ #define KVASER_USB_CAP_BERR_CAP 0x01 @@ -68,6 +69,7 @@ struct kvaser_usb_dev_card_data { u32 ctrlmode_supported; u32 capabilities; struct kvaser_usb_dev_card_data_hydra hydra; + u32 usbcan_timestamp_msb; }; /* Context for an outstanding, not yet ACKed, transmission */ @@ -216,4 +218,26 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev); extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const; +static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + u64 ticks) +{ + return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); +} + +static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + const __le16 *timestamp) +{ + u64 ticks = le16_to_cpu(timestamp[0]) | + (u64)(le16_to_cpu(timestamp[1])) << 16 | + (u64)(le16_to_cpu(timestamp[2])) << 32; + + return kvaser_usb_ticks_to_ktime(cfg, ticks); +} + +static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg, + __le64 timestamp) +{ + return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp)); +} + #endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index daa34b532aa8f0..7d12776ab63e6d 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -94,7 +94,7 @@ #define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { - .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP, + .quirks = 0, .ops = &kvaser_usb_hydra_dev_ops, }; @@ -754,13 +754,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, } static const struct net_device_ops kvaser_usb_netdev_ops = { - .ndo_open = kvaser_usb_open, - .ndo_stop = kvaser_usb_close, - .ndo_start_xmit = kvaser_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static const struct net_device_ops kvaser_usb_netdev_ops_hwts = { .ndo_open = kvaser_usb_open, .ndo_stop = kvaser_usb_close, .ndo_eth_ioctl = can_eth_ioctl_hwts, @@ -769,10 +762,6 @@ static const struct net_device_ops kvaser_usb_netdev_ops_hwts = { }; static const struct ethtool_ops kvaser_usb_ethtool_ops = { - .get_ts_info = ethtool_op_get_ts_info, -}; - -static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = { .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; @@ -859,13 +848,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) netdev->flags |= IFF_ECHO; netdev->netdev_ops = &kvaser_usb_netdev_ops; - if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) { - netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts; - netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts; - } else { - netdev->netdev_ops = &kvaser_usb_netdev_ops; - netdev->ethtool_ops = &kvaser_usb_ethtool_ops; - } + netdev->ethtool_ops = &kvaser_usb_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->intf->dev); netdev->dev_id = channel; @@ -915,10 +898,8 @@ static int kvaser_usb_probe(struct usb_interface *intf, ops = driver_info->ops; err = ops->dev_setup_endpoints(dev); - if (err) { - dev_err(&intf->dev, "Cannot get usb endpoint(s)"); - return err; - } + if (err) + return dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)"); dev->udev = interface_to_usbdev(intf); @@ -929,26 +910,20 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev->card_data.ctrlmode_supported = 0; dev->card_data.capabilities = 0; err = ops->dev_init_card(dev); - if (err) { - dev_err(&intf->dev, - "Failed to initialize card, error %d\n", err); - return err; - } + if (err) + return dev_err_probe(&intf->dev, err, + "Failed to initialize card\n"); err = ops->dev_get_software_info(dev); - if (err) { - dev_err(&intf->dev, - "Cannot get software info, error %d\n", err); - return err; - } + if (err) + return dev_err_probe(&intf->dev, err, + "Cannot get software info\n"); if (ops->dev_get_software_details) { err = ops->dev_get_software_details(dev); - if (err) { - dev_err(&intf->dev, - "Cannot get software details, error %d\n", err); - return err; - } + if (err) + return dev_err_probe(&intf->dev, err, + "Cannot get software details\n"); } if (WARN_ON(!dev->cfg)) @@ -962,18 +937,16 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); err = ops->dev_get_card_info(dev); - if (err) { - dev_err(&intf->dev, "Cannot get card info, error %d\n", err); - return err; - } + if (err) + return dev_err_probe(&intf->dev, err, + "Cannot get card info\n"); if (ops->dev_get_capabilities) { err = ops->dev_get_capabilities(dev); if (err) { - dev_err(&intf->dev, - "Cannot get capabilities, error %d\n", err); kvaser_usb_remove_interfaces(dev); - return err; + return dev_err_probe(&intf->dev, err, + "Cannot get capabilities\n"); } } diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index c7ba768dfe173d..3764b263add3a8 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -10,7 +10,6 @@ * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only * reported after a call to do_get_berr_counter(), since firmware does not * distinguish between ERROR_WARNING and ERROR_ACTIVE. - * - Hardware timestamps are not set for CAN Tx frames. */ #include @@ -261,6 +260,15 @@ struct kvaser_cmd_tx_can { u8 reserved[11]; } __packed; +struct kvaser_cmd_tx_ack { + __le32 id; + u8 data[8]; + u8 dlc; + u8 flags; + __le16 timestamp[3]; + u8 reserved0[8]; +} __packed; + struct kvaser_cmd_header { u8 cmd_no; /* The destination HE address is stored in 0..5 of he_addr. @@ -297,6 +305,7 @@ struct kvaser_cmd { struct kvaser_cmd_rx_can rx_can; struct kvaser_cmd_tx_can tx_can; + struct kvaser_cmd_tx_ack tx_ack; } __packed; } __packed; @@ -522,23 +531,25 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev, return priv; } -static ktime_t -kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg, - const struct kvaser_cmd *cmd) +static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg, + const struct kvaser_cmd *cmd) { - u64 ticks; + ktime_t hwtstamp = 0; if (cmd->header.cmd_no == CMD_EXTENDED) { struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; - ticks = le64_to_cpu(cmd_ext->rx_can.timestamp); - } else { - ticks = le16_to_cpu(cmd->rx_can.timestamp[0]); - ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16; - ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32; + if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD) + hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp); + else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD) + hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp); + } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) { + hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp); + } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) { + hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp); } - return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); + return hwtstamp; } static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, @@ -1175,6 +1186,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, bool one_shot_fail = false; bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + struct sk_buff *skb; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) @@ -1201,6 +1213,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); + skb = priv->can.echo_skb[context->echo_index]; + if (skb) + skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd); len = can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; @@ -1234,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, stats = &priv->netdev->stats; flags = cmd->rx_can.flags; - hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd); + hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, @@ -1302,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, KVASER_USB_KCAN_DATA_DLC_SHIFT; flags = le32_to_cpu(cmd->rx_can.flags); - hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd); + hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 23bd7574b1c7e1..6b9122ab1464f9 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -119,6 +119,10 @@ /* Extended CAN identifier flag */ #define KVASER_EXTENDED_FRAME BIT(31) +/* USBCanII timestamp */ +#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16) +#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10 + struct kvaser_cmd_simple { u8 tid; u8 channel; @@ -235,6 +239,20 @@ struct kvaser_cmd_tx_acknowledge_header { u8 tid; } __packed; +struct leaf_cmd_tx_acknowledge { + u8 channel; + u8 tid; + __le16 time[3]; + u8 padding[2]; +} __packed; + +struct usbcan_cmd_tx_acknowledge { + u8 channel; + u8 tid; + __le16 time; + u8 padding[2]; +} __packed; + struct leaf_cmd_can_error_event { u8 tid; u8 flags; @@ -281,6 +299,12 @@ struct usbcan_cmd_error_event { __le16 padding; } __packed; +struct usbcan_cmd_clk_overflow_event { + u8 tid; + u8 padding; + __le32 time; +} __packed; + struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; @@ -347,6 +371,7 @@ struct kvaser_cmd { struct leaf_cmd_error_event error_event; struct kvaser_cmd_cap_req cap_req; struct kvaser_cmd_cap_res cap_res; + struct leaf_cmd_tx_acknowledge tx_ack; } __packed leaf; union { @@ -355,6 +380,8 @@ struct kvaser_cmd { struct usbcan_cmd_chip_state_event chip_state_event; struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; + struct usbcan_cmd_tx_acknowledge tx_ack; + struct usbcan_cmd_clk_overflow_event clk_overflow_event; } __packed usbcan; struct kvaser_cmd_tx_can tx_can; @@ -370,7 +397,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), - [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), @@ -388,15 +415,14 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), - [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), - /* ignored events: */ - [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, + [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event), }; /* Summary of a kvaser error event, for a unified Leaf/Usbcan error @@ -463,11 +489,27 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, - .timestamp_freq = 1, + .timestamp_freq = 16, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 24, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 32, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; @@ -475,7 +517,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, - .timestamp_freq = 1, + .timestamp_freq = 16, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; @@ -483,7 +525,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { .freq = 24 * MEGA /* Hz */, }, - .timestamp_freq = 1, + .timestamp_freq = 24, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; @@ -491,10 +533,19 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { .freq = 32 * MEGA /* Hz */, }, - .timestamp_freq = 1, + .timestamp_freq = 32, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; +static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev, + __le16 timestamp) +{ + u64 ticks = le16_to_cpu(timestamp) | + dev->card_data.usbcan_timestamp_msb; + + return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR); +} + static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -678,8 +729,19 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock + * Though, the reported freq is used for timestamps */ - dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg; + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz; + break; + } } else { switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: @@ -880,6 +942,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; unsigned long flags; u8 channel, tid; + struct sk_buff *skb; + ktime_t hwtstamp = 0; channel = cmd->u.tx_acknowledge_header.channel; tid = cmd->u.tx_acknowledge_header.tid; @@ -901,14 +965,14 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, /* Sometimes the state change doesn't come after a bus-off event */ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { - struct sk_buff *skb; + struct sk_buff *err_skb; struct can_frame *cf; - skb = alloc_can_err_skb(priv->netdev, &cf); - if (skb) { + err_skb = alloc_can_err_skb(priv->netdev, &cf); + if (err_skb) { cf->can_id |= CAN_ERR_RESTARTED; - netif_rx(skb); + netif_rx(err_skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); @@ -919,9 +983,20 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, priv->can.state = CAN_STATE_ERROR_ACTIVE; } + switch (dev->driver_info->family) { + case KVASER_LEAF: + hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time); + break; + case KVASER_USBCAN: + hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time); + break; + } spin_lock_irqsave(&priv->tx_contexts_lock, flags); + skb = priv->can.echo_skb[context->echo_index]; + if (skb) + skb_hwtstamps(skb)->hwtstamp = hwtstamp; stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(priv->netdev, context->echo_index, NULL); @@ -1299,6 +1374,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, struct net_device_stats *stats; u8 channel = cmd->u.rx_can_header.channel; const u8 *rx_data = NULL; /* GCC */ + ktime_t hwtstamp = 0; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, @@ -1329,9 +1405,11 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; + hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time); break; case KVASER_USBCAN: rx_data = cmd->u.usbcan.rx_can.data; + hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time); break; } @@ -1375,6 +1453,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, memcpy(cf->data, &rx_data[6], cf->len); } + skb_hwtstamps(skb)->hwtstamp = hwtstamp; stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; @@ -1508,7 +1587,7 @@ static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, complete(&priv->get_busparams_comp); } -static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, +static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) @@ -1554,12 +1633,15 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, kvaser_usb_leaf_get_busparams_reply(dev, cmd); break; - /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) goto warn; + dev->card_data.usbcan_timestamp_msb = + le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) & + KVASER_USB_USBCAN_CLK_OVERFLOW_MASK; break; + /* Ignored commands */ case CMD_FLUSH_QUEUE_REPLY: if (dev->driver_info->family != KVASER_LEAF) goto warn; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 47619e9cb0055b..41c0a1c399bf36 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -6,7 +6,7 @@ * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c */ -#include +#include #include #include #include diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index b211b6e283a2c0..c75df1755b3b95 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -8,7 +8,7 @@ * * Many thanks to Klaus Hitschler */ -#include +#include #include #include diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 3d68fef46ded3a..59f7cd8ceb397f 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -901,11 +901,8 @@ int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index d944911d7f0534..436c0e4b0344c7 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2103,7 +2103,7 @@ static void xcan_remove(struct platform_device *pdev) static struct platform_driver xcan_driver = { .probe = xcan_probe, - .remove_new = xcan_remove, + .remove = xcan_remove, .driver = { .name = DRIVER_NAME, .pm = &xcan_dev_pm_ops, diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 897e5e8b3d6927..31d070bf161a34 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -343,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, dev); ret = b53_switch_register(dev); - if (ret) { - dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret); - return ret; - } + if (ret) + return dev_err_probe(&mdiodev->dev, ret, + "failed to register switch\n"); return ret; } diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 308f15d3832e8a..467da057579e5b 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -16,7 +16,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include #include diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index c1b906c05a025c..12a86585a77fdb 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -1,14 +1,17 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig NET_DSA_MICROCHIP_KSZ_COMMON - tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support" + tristate "Microchip KSZ8XXX/KSZ9XXX/LAN937X series switch support" depends on NET_DSA select NET_DSA_TAG_KSZ select NET_DSA_TAG_NONE select NET_IEEE8021Q_HELPERS select DCB help - This driver adds support for Microchip KSZ9477 series switch and - KSZ8795/KSZ88x3 switch chips. + This driver adds support for Microchip KSZ8, KSZ9 and + LAN937X series switch chips, being KSZ8863/8873, + KSZ8895/8864, KSZ8794/8795/8765, + KSZ9477/9897/9896/9567/8567, KSZ9893/9563/8563 and + LAN9370/9371/9372/9373/9374. config NET_DSA_MICROCHIP_KSZ9477_I2C tristate "KSZ series I2C connected switch driver" diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 1cfba1ec9355a3..9347cfb3d0b5e7 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o ksz_switch-objs := ksz_common.o ksz_dcb.o ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o -ksz_switch-objs += ksz8795.o +ksz_switch-objs += ksz8.o ksz_switch-objs += lan937x_main.o ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c new file mode 100644 index 00000000000000..da7110d675583d --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8.c @@ -0,0 +1,1975 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ8XXX series switch driver + * + * It supports the following switches: + * - KSZ8863, KSZ8873 aka KSZ88X3 + * - KSZ8895, KSZ8864 aka KSZ8895 family + * - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX + * Note that it does NOT support: + * - KSZ8563, KSZ8567 - see KSZ9477 driver + * + * Copyright (C) 2017 Microchip Technology Inc. + * Tristram Ha + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ksz_common.h" +#include "ksz8_reg.h" +#include "ksz8.h" + +static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) +{ + regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); +} + +static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, + bool set) +{ + regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), + bits, set ? bits : 0); +} + +/** + * ksz8_ind_write8 - EEE/ACL/PME indirect register write + * @dev: The device structure. + * @table: Function & table select, register 110. + * @addr: Indirect access control, register 111. + * @data: The data to be written. + * + * This function performs an indirect register write for EEE, ACL or + * PME switch functionalities. Both 8-bit registers 110 and 111 are + * written at once with ksz_write16, using the serial multiple write + * functionality. + * + * Return: 0 on success, or an error code on failure. + */ +static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data) +{ + const u16 *regs; + u16 ctrl_addr; + int ret = 0; + + regs = dev->info->regs; + + mutex_lock(&dev->alu_mutex); + + ctrl_addr = IND_ACC_TABLE(table) | addr; + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + if (!ret) + ret = ksz_write8(dev, regs[REG_IND_BYTE], data); + + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +/** + * ksz8_ind_read8 - EEE/ACL/PME indirect register read + * @dev: The device structure. + * @table: Function & table select, register 110. + * @addr: Indirect access control, register 111. + * @val: The value read. + * + * This function performs an indirect register read for EEE, ACL or + * PME switch functionalities. Both 8-bit registers 110 and 111 are + * written at once with ksz_write16, using the serial multiple write + * functionality. + * + * Return: 0 on success, or an error code on failure. + */ +static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val) +{ + const u16 *regs; + u16 ctrl_addr; + int ret = 0; + + regs = dev->info->regs; + + mutex_lock(&dev->alu_mutex); + + ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + if (!ret) + ret = ksz_read8(dev, regs[REG_IND_BYTE], val); + + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value) +{ + return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value); +} + +int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data) +{ + u8 table = (u8)(offset >> 8 | (port + 1)); + + return ksz8_ind_read8(dev, table, (u8)(offset), data); +} + +int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data) +{ + u8 table = (u8)(offset >> 8 | (port + 1)); + + return ksz8_ind_write8(dev, table, (u8)(offset), data); +} + +int ksz8_reset_switch(struct ksz_device *dev) +{ + if (ksz_is_ksz88x3(dev)) { + /* reset switch */ + ksz_cfg(dev, KSZ8863_REG_SW_RESET, + KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true); + ksz_cfg(dev, KSZ8863_REG_SW_RESET, + KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false); + } else { + /* reset switch */ + ksz_write8(dev, REG_POWER_MANAGEMENT_1, + SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); + ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); + } + + return 0; +} + +static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size) +{ + u8 ctrl2 = 0; + + if (frame_size <= KSZ8_LEGAL_PACKET_SIZE) + ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE; + else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE) + ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE; + + return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE | + KSZ8863_HUGE_PACKET_ENABLE, ctrl2); +} + +static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size) +{ + u8 ctrl1 = 0, ctrl2 = 0; + int ret; + + if (frame_size > KSZ8_LEGAL_PACKET_SIZE) + ctrl2 |= SW_LEGAL_PACKET_DISABLE; + if (frame_size > KSZ8863_NORMAL_PACKET_SIZE) + ctrl1 |= SW_HUGE_PACKET; + + ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1); + if (ret) + return ret; + + return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2); +} + +int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu) +{ + u16 frame_size; + + if (!dsa_is_cpu_port(dev->ds, port)) + return 0; + + frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + switch (dev->chip_id) { + case KSZ8795_CHIP_ID: + case KSZ8794_CHIP_ID: + case KSZ8765_CHIP_ID: + return ksz8795_change_mtu(dev, frame_size); + case KSZ88X3_CHIP_ID: + case KSZ8864_CHIP_ID: + case KSZ8895_CHIP_ID: + return ksz8863_change_mtu(dev, frame_size); + } + + return -EOPNOTSUPP; +} + +static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues) +{ + u8 mask_4q, mask_2q; + u8 reg_4q, reg_2q; + u8 data_4q = 0; + u8 data_2q = 0; + int ret; + + if (ksz_is_ksz88x3(dev)) { + mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN; + mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN; + reg_4q = REG_PORT_CTRL_0; + reg_2q = REG_PORT_CTRL_2; + + /* KSZ8795 family switches have Weighted Fair Queueing (WFQ) + * enabled by default. Enable it for KSZ8873 family switches + * too. Default value for KSZ8873 family is strict priority, + * which should be enabled by using TC_SETUP_QDISC_ETS, not + * by default. + */ + ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE, + WEIGHTED_FAIR_QUEUE_ENABLE); + if (ret) + return ret; + } else { + mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN; + mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN; + reg_4q = REG_PORT_CTRL_13; + reg_2q = REG_PORT_CTRL_0; + + /* TODO: this is legacy from initial KSZ8795 driver, should be + * moved to appropriate place in the future. + */ + ret = ksz_rmw8(dev, REG_SW_CTRL_19, + SW_OUT_RATE_LIMIT_QUEUE_BASED, + SW_OUT_RATE_LIMIT_QUEUE_BASED); + if (ret) + return ret; + } + + if (queues == 4) + data_4q = mask_4q; + else if (queues == 2) + data_2q = mask_2q; + + ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q); + if (ret) + return ret; + + return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q); +} + +int ksz8_all_queues_split(struct ksz_device *dev, int queues) +{ + struct dsa_switch *ds = dev->ds; + const struct dsa_port *dp; + + dsa_switch_for_each_port(dp, ds) { + int ret = ksz8_port_queue_split(dev, dp->index, queues); + + if (ret) + return ret; + } + + return 0; +} + +void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) +{ + const u32 *masks; + const u16 *regs; + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + masks = dev->info->masks; + regs = dev->info->regs; + + ctrl_addr = addr + dev->info->reg_mib_cnt * port; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); + + if (check & masks[MIB_COUNTER_VALID]) { + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); + if (check & masks[MIB_COUNTER_OVERFLOW]) + *cnt += MIB_COUNTER_VALUE + 1; + *cnt += data & MIB_COUNTER_VALUE; + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + const u32 *masks; + const u16 *regs; + u16 ctrl_addr; + u32 data; + u8 check; + int loop; + + masks = dev->info->masks; + regs = dev->info->regs; + + addr -= dev->info->reg_mib_cnt; + ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port; + ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + + /* It is almost guaranteed to always read the valid bit because of + * slow SPI speed. + */ + for (loop = 2; loop > 0; loop--) { + ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); + + if (check & masks[MIB_COUNTER_VALID]) { + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); + if (addr < 2) { + u64 total; + + total = check & MIB_TOTAL_BYTES_H; + total <<= 32; + *cnt += total; + *cnt += data; + if (check & masks[MIB_COUNTER_OVERFLOW]) { + total = MIB_TOTAL_BYTES_H + 1; + total <<= 32; + *cnt += total; + } + } else { + if (check & masks[MIB_COUNTER_OVERFLOW]) + *cnt += MIB_PACKET_DROPPED + 1; + *cnt += data & MIB_PACKET_DROPPED; + } + break; + } + } + mutex_unlock(&dev->alu_mutex); +} + +static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + u32 *last = (u32 *)dropped; + const u16 *regs; + u16 ctrl_addr; + u32 data; + u32 cur; + + regs = dev->info->regs; + + addr -= dev->info->reg_mib_cnt; + ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 : + KSZ8863_MIB_PACKET_DROPPED_RX_0; + ctrl_addr += port; + ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + ksz_read32(dev, regs[REG_IND_DATA_LO], &data); + mutex_unlock(&dev->alu_mutex); + + data &= MIB_PACKET_DROPPED; + cur = last[addr]; + if (data != cur) { + last[addr] = data; + if (data < cur) + data += MIB_PACKET_DROPPED + 1; + data -= cur; + *cnt += data; + } +} + +void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + if (is_ksz88xx(dev)) + ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt); + else + ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt); +} + +void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze) +{ + if (is_ksz88xx(dev)) + return; + + /* enable the port for flush/freeze function */ + if (freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze); + + /* disable the port after freeze is done */ + if (!freeze) + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); +} + +void ksz8_port_init_cnt(struct ksz_device *dev, int port) +{ + struct ksz_port_mib *mib = &dev->ports[port].mib; + u64 *dropped; + + /* For KSZ8795 family. */ + if (ksz_is_ksz87xx(dev)) { + /* flush all enabled port MIB counters */ + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); + ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); + ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); + } + + mib->cnt_ptr = 0; + + /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->info->reg_mib_cnt) { + dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, + &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + + /* last one in storage */ + dropped = &mib->counters[dev->info->mib_cnt]; + + /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->info->mib_cnt) { + dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, + dropped, &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } +} + +static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) +{ + const u16 *regs; + u16 ctrl_addr; + int ret; + + regs = dev->info->regs; + + ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + if (ret) + goto unlock_alu; + + ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data); +unlock_alu: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data) +{ + const u16 *regs; + u16 ctrl_addr; + int ret; + + regs = dev->info->regs; + + ctrl_addr = IND_ACC_TABLE(table) | addr; + + mutex_lock(&dev->alu_mutex); + ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data); + if (ret) + goto unlock_alu; + + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); +unlock_alu: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data) +{ + int timeout = 100; + const u32 *masks; + const u16 *regs; + int ret; + + masks = dev->info->masks; + regs = dev->info->regs; + + do { + ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data); + if (ret) + return ret; + + timeout--; + } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout); + + /* Entry is not ready for accessing. */ + if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) + return -ETIMEDOUT; + + /* Entry is ready for accessing. */ + return ksz_read8(dev, regs[REG_IND_DATA_8], data); +} + +static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u16 *entries) +{ + u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; + const u16 *regs; + u16 ctrl_addr; + u64 buf = 0; + u8 data; + int cnt; + int ret; + + shifts = dev->info->shifts; + masks = dev->info->masks; + regs = dev->info->regs; + + ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; + + mutex_lock(&dev->alu_mutex); + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + if (ret) + goto unlock_alu; + + ret = ksz8_valid_dyn_entry(dev, &data); + if (ret) + goto unlock_alu; + + if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) { + *entries = 0; + goto unlock_alu; + } + + ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf); + if (ret) + goto unlock_alu; + + data_hi = (u32)(buf >> 32); + data_lo = (u32)buf; + + /* Check out how many valid entry in the table. */ + cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H]; + cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H]; + cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >> + shifts[DYNAMIC_MAC_ENTRIES]; + *entries = cnt + 1; + + *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >> + shifts[DYNAMIC_MAC_FID]; + *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >> + shifts[DYNAMIC_MAC_SRC_PORT]; + + mac_addr[5] = (u8)data_lo; + mac_addr[4] = (u8)(data_lo >> 8); + mac_addr[3] = (u8)(data_lo >> 16); + mac_addr[2] = (u8)(data_lo >> 24); + + mac_addr[1] = (u8)data_hi; + mac_addr[0] = (u8)(data_hi >> 8); + +unlock_alu: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu, bool *valid) +{ + u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; + u64 data; + int ret; + + shifts = dev->info->shifts; + masks = dev->info->masks; + + ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data); + if (ret) + return ret; + + data_hi = data >> 32; + data_lo = (u32)data; + + if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] | + masks[STATIC_MAC_TABLE_OVERRIDE]))) { + *valid = false; + return 0; + } + + alu->mac[5] = (u8)data_lo; + alu->mac[4] = (u8)(data_lo >> 8); + alu->mac[3] = (u8)(data_lo >> 16); + alu->mac[2] = (u8)(data_lo >> 24); + alu->mac[1] = (u8)data_hi; + alu->mac[0] = (u8)(data_hi >> 8); + alu->port_forward = + (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >> + shifts[STATIC_MAC_FWD_PORTS]; + alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0; + + /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and + * STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the + * static MAC table compared to doing write. + */ + if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) + data_hi >>= 1; + alu->is_static = true; + alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0; + alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >> + shifts[STATIC_MAC_FID]; + + *valid = true; + + return 0; +} + +static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) +{ + u32 data_hi, data_lo; + const u8 *shifts; + const u32 *masks; + u64 data; + + shifts = dev->info->shifts; + masks = dev->info->masks; + + data_lo = ((u32)alu->mac[2] << 24) | + ((u32)alu->mac[3] << 16) | + ((u32)alu->mac[4] << 8) | alu->mac[5]; + data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1]; + data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS]; + + if (alu->is_override) + data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE]; + if (alu->is_use_fid) { + data_hi |= masks[STATIC_MAC_TABLE_USE_FID]; + data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID]; + } + if (alu->is_static) + data_hi |= masks[STATIC_MAC_TABLE_VALID]; + else + data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE]; + + data = (u64)data_hi << 32 | data_lo; + + return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data); +} + +static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid, + u8 *member, u8 *valid) +{ + const u8 *shifts; + const u32 *masks; + + shifts = dev->info->shifts; + masks = dev->info->masks; + + *fid = vlan & masks[VLAN_TABLE_FID]; + *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >> + shifts[VLAN_TABLE_MEMBERSHIP_S]; + *valid = !!(vlan & masks[VLAN_TABLE_VALID]); +} + +static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid, + u16 *vlan) +{ + const u8 *shifts; + const u32 *masks; + + shifts = dev->info->shifts; + masks = dev->info->masks; + + *vlan = fid; + *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S]; + if (valid) + *vlan |= masks[VLAN_TABLE_VALID]; +} + +static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr) +{ + const u8 *shifts; + u64 data; + int i; + + shifts = dev->info->shifts; + + ksz8_r_table(dev, TABLE_VLAN, addr, &data); + addr *= 4; + for (i = 0; i < 4; i++) { + dev->vlan_cache[addr + i].table[0] = (u16)data; + data >>= shifts[VLAN_TABLE]; + } +} + +static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8_r_table(dev, TABLE_VLAN, addr, &buf); + *vlan = data[index]; +} + +static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) +{ + int index; + u16 *data; + u16 addr; + u64 buf; + + data = (u16 *)&buf; + addr = vid / 4; + index = vid & 3; + ksz8_r_table(dev, TABLE_VLAN, addr, &buf); + data[index] = vlan; + dev->vlan_cache[vid].table[0] = vlan; + ksz8_w_table(dev, TABLE_VLAN, addr, buf); +} + +/** + * ksz879x_get_loopback - KSZ879x specific function to get loopback + * configuration status for a specific port + * @dev: Pointer to the device structure + * @port: Port number to query + * @val: Pointer to store the result + * + * This function reads the SMI registers to determine whether loopback mode + * is enabled for a specific port. + * + * Return: 0 on success, error code on failure. + */ +static int ksz879x_get_loopback(struct ksz_device *dev, u16 port, + u16 *val) +{ + u8 stat3; + int ret; + + ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3); + if (ret) + return ret; + + if (stat3 & PORT_PHY_LOOPBACK) + *val |= BMCR_LOOPBACK; + + return 0; +} + +/** + * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for + * a specific port + * @dev: Pointer to the device structure. + * @port: Port number to modify. + * @val: Value indicating whether to enable or disable loopback mode. + * + * This function translates loopback bit of the BMCR register into the + * corresponding hardware register bit value and writes it to the SMI interface. + * + * Return: 0 on success, error code on failure. + */ +static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val) +{ + u8 stat3 = 0; + + if (val & BMCR_LOOPBACK) + stat3 |= PORT_PHY_LOOPBACK; + + return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK, + stat3); +} + +/** + * ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be read. + * @val: The value read from the SMI interface. + * + * This function reads the SMI interface and translates the hardware register + * bit values into their corresponding control settings for a MIIM PHY Control + * register. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val) +{ + const u16 *regs = dev->info->regs; + u8 reg_val; + int ret; + + *val = 0; + + ret = ksz_pread8(dev, port, regs[P_LINK_STATUS], ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_MDIX_STATUS) + *val |= KSZ886X_CTRL_MDIX_STAT; + + ret = ksz_pread8(dev, port, REG_PORT_LINK_MD_CTRL, ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_FORCE_LINK) + *val |= KSZ886X_CTRL_FORCE_LINK; + + if (reg_val & PORT_POWER_SAVING) + *val |= KSZ886X_CTRL_PWRSAVE; + + if (reg_val & PORT_PHY_REMOTE_LOOPBACK) + *val |= KSZ886X_CTRL_REMOTE_LOOPBACK; + + return 0; +} + +/** + * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY + * Basic mode control register (Reg. 0). + * @dev: The KSZ device instance. + * @port: The port number to be read. + * @val: The value read from the SMI interface. + * + * This function reads the SMI interface and translates the hardware register + * bit values into their corresponding control settings for a MIIM PHY Basic + * mode control register. + * + * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873 + * ------------------------------------------------------------------- + * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit + * ----------------------------+-----------------------------+---------------- + * Bit 15 - Soft Reset | 0xF/4 | Not supported + * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY) + * Bit 13 - Force 100 | 0xC/6 = 0xC/6 + * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7 + * Bit 11 - Power Down | 0xD/3 = 0xD/3 + * Bit 10 - PHY Isolate | 0xF/5 | Not supported + * Bit 9 - Restart AN | 0xD/5 = 0xD/5 + * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5 + * Bit 7 - Collision Test/Res. | Not supported | Not supported + * Bit 6 - Reserved | Not supported | Not supported + * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7 + * Bit 4 - Force MDI | 0xD/1 = 0xD/1 + * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2 + * Bit 2 - Disable Far-End F. | ???? | 0xD/4 + * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6 + * Bit 0 - Disable LED | 0xD/7 = 0xD/7 + * ------------------------------------------------------------------- + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val) +{ + const u16 *regs = dev->info->regs; + u8 restart, speed, ctrl; + int ret; + + *val = 0; + + ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart); + if (ret) + return ret; + + ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed); + if (ret) + return ret; + + ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl); + if (ret) + return ret; + + if (ctrl & PORT_FORCE_100_MBIT) + *val |= BMCR_SPEED100; + + if (ksz_is_ksz88x3(dev)) { + if (restart & KSZ8873_PORT_PHY_LOOPBACK) + *val |= BMCR_LOOPBACK; + + if ((ctrl & PORT_AUTO_NEG_ENABLE)) + *val |= BMCR_ANENABLE; + } else { + ret = ksz879x_get_loopback(dev, port, val); + if (ret) + return ret; + + if (!(ctrl & PORT_AUTO_NEG_DISABLE)) + *val |= BMCR_ANENABLE; + } + + if (restart & PORT_POWER_DOWN) + *val |= BMCR_PDOWN; + + if (restart & PORT_AUTO_NEG_RESTART) + *val |= BMCR_ANRESTART; + + if (ctrl & PORT_FORCE_FULL_DUPLEX) + *val |= BMCR_FULLDPLX; + + if (speed & PORT_HP_MDIX) + *val |= KSZ886X_BMCR_HP_MDIX; + + if (restart & PORT_FORCE_MDIX) + *val |= KSZ886X_BMCR_FORCE_MDI; + + if (restart & PORT_AUTO_MDIX_DISABLE) + *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX; + + if (restart & PORT_TX_DISABLE) + *val |= KSZ886X_BMCR_DISABLE_TRANSMIT; + + if (restart & PORT_LED_OFF) + *val |= KSZ886X_BMCR_DISABLE_LED; + + return 0; +} + +int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) +{ + u8 ctrl, link, val1, val2; + int processed = true; + const u16 *regs; + u16 data = 0; + u16 p = phy; + int ret; + + regs = dev->info->regs; + + switch (reg) { + case MII_BMCR: + ret = ksz8_r_phy_bmcr(dev, p, &data); + if (ret) + return ret; + break; + case MII_BMSR: + ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link); + if (ret) + return ret; + + data = BMSR_100FULL | + BMSR_100HALF | + BMSR_10FULL | + BMSR_10HALF | + BMSR_ANEGCAPABLE; + if (link & PORT_AUTO_NEG_COMPLETE) + data |= BMSR_ANEGCOMPLETE; + if (link & PORT_STAT_LINK_GOOD) + data |= BMSR_LSTATUS; + break; + case MII_PHYSID1: + data = KSZ8795_ID_HI; + break; + case MII_PHYSID2: + if (ksz_is_ksz88x3(dev)) + data = KSZ8863_ID_LO; + else + data = KSZ8795_ID_LO; + break; + case MII_ADVERTISE: + ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); + if (ret) + return ret; + + data = ADVERTISE_CSMA; + if (ctrl & PORT_AUTO_NEG_SYM_PAUSE) + data |= ADVERTISE_PAUSE_CAP; + if (ctrl & PORT_AUTO_NEG_100BTX_FD) + data |= ADVERTISE_100FULL; + if (ctrl & PORT_AUTO_NEG_100BTX) + data |= ADVERTISE_100HALF; + if (ctrl & PORT_AUTO_NEG_10BT_FD) + data |= ADVERTISE_10FULL; + if (ctrl & PORT_AUTO_NEG_10BT) + data |= ADVERTISE_10HALF; + break; + case MII_LPA: + ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link); + if (ret) + return ret; + + data = LPA_SLCT; + if (link & PORT_REMOTE_SYM_PAUSE) + data |= LPA_PAUSE_CAP; + if (link & PORT_REMOTE_100BTX_FD) + data |= LPA_100FULL; + if (link & PORT_REMOTE_100BTX) + data |= LPA_100HALF; + if (link & PORT_REMOTE_10BT_FD) + data |= LPA_10FULL; + if (link & PORT_REMOTE_10BT) + data |= LPA_10HALF; + if (data & ~LPA_SLCT) + data |= LPA_LPACK; + break; + case PHY_REG_LINK_MD: + ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1); + if (ret) + return ret; + + ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2); + if (ret) + return ret; + + if (val1 & PORT_START_CABLE_DIAG) + data |= PHY_START_CABLE_DIAG; + + if (val1 & PORT_CABLE_10M_SHORT) + data |= PHY_CABLE_10M_SHORT; + + data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M, + FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1)); + + data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M, + (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) | + FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2)); + break; + case PHY_REG_PHY_CTRL: + ret = ksz8_r_phy_ctrl(dev, p, &data); + if (ret) + return ret; + + break; + default: + processed = false; + break; + } + if (processed) + *val = data; + + return 0; +} + +/** + * ksz8_w_phy_ctrl - Translates and writes to the SMI interface from a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be configured. + * @val: The register value to be written. + * + * This function translates control settings from a MIIM PHY Control register + * into their corresponding hardware register bit values for the SMI + * interface. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val) +{ + u8 reg_val = 0; + int ret; + + if (val & KSZ886X_CTRL_FORCE_LINK) + reg_val |= PORT_FORCE_LINK; + + if (val & KSZ886X_CTRL_PWRSAVE) + reg_val |= PORT_POWER_SAVING; + + if (val & KSZ886X_CTRL_REMOTE_LOOPBACK) + reg_val |= PORT_PHY_REMOTE_LOOPBACK; + + ret = ksz_prmw8(dev, port, REG_PORT_LINK_MD_CTRL, PORT_FORCE_LINK | + PORT_POWER_SAVING | PORT_PHY_REMOTE_LOOPBACK, reg_val); + return ret; +} + +/** + * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY + * Basic mode control register (Reg. 0). + * @dev: The KSZ device instance. + * @port: The port number to be configured. + * @val: The register value to be written. + * + * This function translates control settings from a MIIM PHY Basic mode control + * register into their corresponding hardware register bit values for the SMI + * interface. + * + * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873 + * ------------------------------------------------------------------- + * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit + * ----------------------------+-----------------------------+---------------- + * Bit 15 - Soft Reset | 0xF/4 | Not supported + * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY) + * Bit 13 - Force 100 | 0xC/6 = 0xC/6 + * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7 + * Bit 11 - Power Down | 0xD/3 = 0xD/3 + * Bit 10 - PHY Isolate | 0xF/5 | Not supported + * Bit 9 - Restart AN | 0xD/5 = 0xD/5 + * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5 + * Bit 7 - Collision Test/Res. | Not supported | Not supported + * Bit 6 - Reserved | Not supported | Not supported + * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7 + * Bit 4 - Force MDI | 0xD/1 = 0xD/1 + * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2 + * Bit 2 - Disable Far-End F. | ???? | 0xD/4 + * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6 + * Bit 0 - Disable LED | 0xD/7 = 0xD/7 + * ------------------------------------------------------------------- + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val) +{ + u8 restart, speed, ctrl, restart_mask; + const u16 *regs = dev->info->regs; + int ret; + + /* Do not support PHY reset function. */ + if (val & BMCR_RESET) + return 0; + + speed = 0; + if (val & KSZ886X_BMCR_HP_MDIX) + speed |= PORT_HP_MDIX; + + ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed); + if (ret) + return ret; + + ctrl = 0; + if (ksz_is_ksz88x3(dev)) { + if ((val & BMCR_ANENABLE)) + ctrl |= PORT_AUTO_NEG_ENABLE; + } else { + if (!(val & BMCR_ANENABLE)) + ctrl |= PORT_AUTO_NEG_DISABLE; + + /* Fiber port does not support auto-negotiation. */ + if (dev->ports[port].fiber) + ctrl |= PORT_AUTO_NEG_DISABLE; + } + + if (val & BMCR_SPEED100) + ctrl |= PORT_FORCE_100_MBIT; + + if (val & BMCR_FULLDPLX) + ctrl |= PORT_FORCE_FULL_DUPLEX; + + ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT | + /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same + * bits + */ + PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl); + if (ret) + return ret; + + restart = 0; + restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART | + PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX; + + if (val & KSZ886X_BMCR_DISABLE_LED) + restart |= PORT_LED_OFF; + + if (val & KSZ886X_BMCR_DISABLE_TRANSMIT) + restart |= PORT_TX_DISABLE; + + if (val & BMCR_ANRESTART) + restart |= PORT_AUTO_NEG_RESTART; + + if (val & BMCR_PDOWN) + restart |= PORT_POWER_DOWN; + + if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX) + restart |= PORT_AUTO_MDIX_DISABLE; + + if (val & KSZ886X_BMCR_FORCE_MDI) + restart |= PORT_FORCE_MDIX; + + if (ksz_is_ksz88x3(dev)) { + restart_mask |= KSZ8873_PORT_PHY_LOOPBACK; + + if (val & BMCR_LOOPBACK) + restart |= KSZ8873_PORT_PHY_LOOPBACK; + } else { + ret = ksz879x_set_loopback(dev, port, val); + if (ret) + return ret; + } + + return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask, + restart); +} + +int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) +{ + const u16 *regs; + u8 ctrl, data; + u16 p = phy; + int ret; + + regs = dev->info->regs; + + switch (reg) { + case MII_BMCR: + ret = ksz8_w_phy_bmcr(dev, p, val); + if (ret) + return ret; + break; + case MII_ADVERTISE: + ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); + if (ret) + return ret; + + data = ctrl; + data &= ~(PORT_AUTO_NEG_SYM_PAUSE | + PORT_AUTO_NEG_100BTX_FD | + PORT_AUTO_NEG_100BTX | + PORT_AUTO_NEG_10BT_FD | + PORT_AUTO_NEG_10BT); + if (val & ADVERTISE_PAUSE_CAP) + data |= PORT_AUTO_NEG_SYM_PAUSE; + if (val & ADVERTISE_100FULL) + data |= PORT_AUTO_NEG_100BTX_FD; + if (val & ADVERTISE_100HALF) + data |= PORT_AUTO_NEG_100BTX; + if (val & ADVERTISE_10FULL) + data |= PORT_AUTO_NEG_10BT_FD; + if (val & ADVERTISE_10HALF) + data |= PORT_AUTO_NEG_10BT; + + if (data != ctrl) { + ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data); + if (ret) + return ret; + } + break; + case PHY_REG_LINK_MD: + if (val & PHY_START_CABLE_DIAG) + ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true); + break; + + case PHY_REG_PHY_CTRL: + ret = ksz8_w_phy_ctrl(dev, p, val); + if (ret) + return ret; + break; + default: + break; + } + + return 0; +} + +void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) +{ + u8 data; + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + data &= ~PORT_VLAN_MEMBERSHIP; + data |= (member & dev->port_mask); + ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); +} + +void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) +{ + u8 learn[DSA_MAX_PORTS]; + int first, index, cnt; + const u16 *regs; + + regs = dev->info->regs; + + if ((uint)port < dev->info->port_cnt) { + first = port; + cnt = port + 1; + } else { + /* Flush all ports. */ + first = 0; + cnt = dev->info->port_cnt; + } + for (index = first; index < cnt; index++) { + ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]); + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, regs[P_STP_CTRL], + learn[index] | PORT_LEARN_DISABLE); + } + ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); + for (index = first; index < cnt; index++) { + if (!(learn[index] & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]); + } +} + +int ksz8_fdb_dump(struct ksz_device *dev, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + u8 mac[ETH_ALEN]; + u8 src_port, fid; + u16 entries = 0; + int ret, i; + + for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) { + ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port, + &entries); + if (ret) + return ret; + + if (i >= entries) + return 0; + + if (port == src_port) { + ret = cb(mac, fid, false, data); + if (ret) + return ret; + } + } + + return 0; +} + +static int ksz8_add_sta_mac(struct ksz_device *dev, int port, + const unsigned char *addr, u16 vid) +{ + struct alu_struct alu; + int index, ret; + int empty = 0; + + alu.port_forward = 0; + for (index = 0; index < dev->info->num_statics; index++) { + bool valid; + + ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid); + if (ret) + return ret; + if (!valid) { + /* Remember the first empty entry. */ + if (!empty) + empty = index + 1; + continue; + } + + if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid) + break; + } + + /* no available entry */ + if (index == dev->info->num_statics && !empty) + return -ENOSPC; + + /* add entry */ + if (index == dev->info->num_statics) { + index = empty - 1; + memset(&alu, 0, sizeof(alu)); + memcpy(alu.mac, addr, ETH_ALEN); + alu.is_static = true; + } + alu.port_forward |= BIT(port); + if (vid) { + alu.is_use_fid = true; + + /* Need a way to map VID to FID. */ + alu.fid = vid; + } + + return ksz8_w_sta_mac_table(dev, index, &alu); +} + +static int ksz8_del_sta_mac(struct ksz_device *dev, int port, + const unsigned char *addr, u16 vid) +{ + struct alu_struct alu; + int index, ret; + + for (index = 0; index < dev->info->num_statics; index++) { + bool valid; + + ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid); + if (ret) + return ret; + if (!valid) + continue; + + if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid) + break; + } + + /* no available entry */ + if (index == dev->info->num_statics) + return 0; + + /* clear port */ + alu.port_forward &= ~BIT(port); + if (!alu.port_forward) + alu.is_static = false; + + return ksz8_w_sta_mac_table(dev, index, &alu); +} + +int ksz8_mdb_add(struct ksz_device *dev, int port, + const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) +{ + return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid); +} + +int ksz8_mdb_del(struct ksz_device *dev, int port, + const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) +{ + return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid); +} + +int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr, + u16 vid, struct dsa_db db) +{ + return ksz8_add_sta_mac(dev, port, addr, vid); +} + +int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr, + u16 vid, struct dsa_db db) +{ + return ksz8_del_sta_mac(dev, port, addr, vid); +} + +int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag, + struct netlink_ext_ack *extack) +{ + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + + /* Discard packets with VID not enabled on the switch */ + ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); + + /* Discard packets with VID not enabled on the ingress port */ + for (port = 0; port < dev->phy_port_cnt; ++port) + ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER, + flag); + + return 0; +} + +static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state) +{ + if (ksz_is_ksz88x3(dev)) { + ksz_cfg(dev, REG_SW_INSERT_SRC_PVID, + 0x03 << (4 - 2 * port), state); + } else { + ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00); + } +} + +int ksz8_port_vlan_add(struct ksz_device *dev, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct ksz_port *p = &dev->ports[port]; + u16 data, new_pvid = 0; + u8 fid, member, valid; + + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + + /* If a VLAN is added with untagged flag different from the + * port's Remove Tag flag, we need to change the latter. + * Ignore VID 0, which is always untagged. + * Ignore CPU port, which will always be tagged. + */ + if (untagged != p->remove_tag && vlan->vid != 0 && + port != dev->cpu_port) { + unsigned int vid; + + /* Reject attempts to add a VLAN that requires the + * Remove Tag flag to be changed, unless there are no + * other VLANs currently configured. + */ + for (vid = 1; vid < dev->info->num_vlans; ++vid) { + /* Skip the VID we are going to add or reconfigure */ + if (vid == vlan->vid) + continue; + + ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0], + &fid, &member, &valid); + if (valid && (member & BIT(port))) + return -EINVAL; + } + + ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); + p->remove_tag = untagged; + } + + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(dev, data, &fid, &member, &valid); + + /* First time to setup the VLAN entry. */ + if (!valid) { + /* Need to find a way to map VID to FID. */ + fid = 1; + valid = 1; + } + member |= BIT(port); + + ksz8_to_vlan(dev, fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); + + /* change PVID */ + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + new_pvid = vlan->vid; + + if (new_pvid) { + u16 vid; + + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid); + vid &= ~VLAN_VID_MASK; + vid |= new_pvid; + ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid); + + ksz8_port_enable_pvid(dev, port, true); + } + + return 0; +} + +int ksz8_port_vlan_del(struct ksz_device *dev, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + u16 data, pvid; + u8 fid, member, valid; + + if (ksz_is_ksz88x3(dev)) + return -ENOTSUPP; + + ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); + pvid = pvid & 0xFFF; + + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(dev, data, &fid, &member, &valid); + + member &= ~BIT(port); + + /* Invalidate the entry if no more member. */ + if (!member) { + fid = 0; + valid = 0; + } + + ksz8_to_vlan(dev, fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); + + if (pvid == vlan->vid) + ksz8_port_enable_pvid(dev, port, false); + + return 0; +} + +int ksz8_port_mirror_add(struct ksz_device *dev, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + if (ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); + dev->mirror_rx |= BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); + dev->mirror_tx |= BIT(port); + } + + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); + + /* configure mirror port */ + if (dev->mirror_rx || dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, true); + + return 0; +} + +void ksz8_port_mirror_del(struct ksz_device *dev, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + u8 data; + + if (mirror->ingress) { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); + dev->mirror_rx &= ~BIT(port); + } else { + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); + dev->mirror_tx &= ~BIT(port); + } + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + + if (!dev->mirror_rx && !dev->mirror_tx) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, false); +} + +static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) +{ + struct ksz_port *p = &dev->ports[port]; + + if (!ksz_is_ksz87xx(dev)) + return; + + if (!p->interface && dev->compat_interface) { + dev_warn(dev->dev, + "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " + "Please update your device tree.\n", + port); + p->interface = dev->compat_interface; + } +} + +void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) +{ + const u16 *regs = dev->info->regs; + struct dsa_switch *ds = dev->ds; + const u32 *masks; + int queues; + u8 member; + + masks = dev->info->masks; + + /* enable broadcast storm limit */ + ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); + + /* For KSZ88x3 enable only one queue by default, otherwise we won't + * be able to get rid of PCP prios on Port 2. + */ + if (ksz_is_ksz88x3(dev)) + queues = 1; + else + queues = dev->info->num_tx_queues; + + ksz8_port_queue_split(dev, port, queues); + + /* replace priority */ + ksz_port_cfg(dev, port, P_802_1P_CTRL, + masks[PORT_802_1P_REMAPPING], false); + + if (cpu_port) + member = dsa_user_ports(ds); + else + member = BIT(dsa_upstream_port(ds, port)); + + ksz8_cfg_port_member(dev, port, member); + + /* Disable all WoL options by default. Otherwise + * ksz_switch_macaddr_get/put logic will not work properly. + * CPU port 4 has no WoL functionality. + */ + if (ksz_is_ksz87xx(dev) && !cpu_port) + ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0); +} + +static void ksz88x3_config_rmii_clk(struct ksz_device *dev) +{ + struct dsa_port *cpu_dp = dsa_to_port(dev->ds, dev->cpu_port); + bool rmii_clk_internal; + + if (!ksz_is_ksz88x3(dev)) + return; + + rmii_clk_internal = of_property_read_bool(cpu_dp->dn, + "microchip,rmii-clk-internal"); + + ksz_cfg(dev, KSZ88X3_REG_FVID_AND_HOST_MODE, + KSZ88X3_PORT3_RMII_CLK_INTERNAL, rmii_clk_internal); +} + +void ksz8_config_cpu_port(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + const u32 *masks; + const u16 *regs; + u8 remote; + int i; + + masks = dev->info->masks; + regs = dev->info->regs; + + ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); + + ksz8_port_setup(dev, dev->cpu_port, true); + + ksz8795_cpu_interface_select(dev, dev->cpu_port); + ksz88x3_config_rmii_clk(dev); + + for (i = 0; i < dev->phy_port_cnt; i++) { + ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); + } + for (i = 0; i < dev->phy_port_cnt; i++) { + p = &dev->ports[i]; + + /* For KSZ8795 family. */ + if (ksz_is_ksz87xx(dev)) { + ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote); + if (remote & KSZ8_PORT_FIBER_MODE) + p->fiber = 1; + } + if (p->fiber) + ksz_port_cfg(dev, i, regs[P_STP_CTRL], + PORT_FORCE_FLOW_CTRL, true); + else + ksz_port_cfg(dev, i, regs[P_STP_CTRL], + PORT_FORCE_FLOW_CTRL, false); + } +} + +/** + * ksz8_phy_port_link_up - Configures ports with integrated PHYs + * @dev: The KSZ device instance. + * @port: The port number to configure. + * @duplex: The desired duplex mode. + * @tx_pause: If true, enables transmit pause. + * @rx_pause: If true, enables receive pause. + * + * Description: + * The function configures flow control settings for a given port based on the + * desired settings and current duplex mode. + * + * According to the KSZ8873 datasheet, the PORT_FORCE_FLOW_CTRL bit in the + * Port Control 2 register (0x1A for Port 1, 0x22 for Port 2, 0x32 for Port 3) + * determines how flow control is handled on the port: + * "1 = will always enable full-duplex flow control on the port, regardless + * of AN result. + * 0 = full-duplex flow control is enabled based on AN result." + * + * This means that the flow control behavior depends on the state of this bit: + * - If PORT_FORCE_FLOW_CTRL is set to 1, the switch will ignore AN results and + * force flow control on the port. + * - If PORT_FORCE_FLOW_CTRL is set to 0, the switch will enable or disable + * flow control based on the AN results. + * + * However, there is a potential limitation in this configuration. It is + * currently not possible to force disable flow control on a port if we still + * advertise pause support. While such a configuration is not currently + * supported by Linux, and may not make practical sense, it's important to be + * aware of this limitation when working with the KSZ8873 and similar devices. + */ +static void ksz8_phy_port_link_up(struct ksz_device *dev, int port, int duplex, + bool tx_pause, bool rx_pause) +{ + const u16 *regs = dev->info->regs; + u8 sctrl = 0; + + /* The KSZ8795 switch differs from the KSZ8873 by supporting + * asymmetric pause control. However, since a single bit is used to + * control both RX and TX pause, we can't enforce asymmetric pause + * control - both TX and RX pause will be either enabled or disabled + * together. + * + * If auto-negotiation is enabled, we usually allow the flow control to + * be determined by the auto-negotiation process based on the + * capabilities of both link partners. However, for KSZ8873, the + * PORT_FORCE_FLOW_CTRL bit may be set by the hardware bootstrap, + * ignoring the auto-negotiation result. Thus, even in auto-negotiation + * mode, we need to ensure that the PORT_FORCE_FLOW_CTRL bit is + * properly cleared. + * + * In the absence of pause auto-negotiation, we will enforce symmetric + * pause control for both variants of switches - KSZ8873 and KSZ8795. + * + * Autoneg Pause Autoneg rx,tx PORT_FORCE_FLOW_CTRL + * 1 1 x 0 + * 0 1 x 0 (flow control probably disabled) + * x 0 1 1 (flow control force enabled) + * 1 0 0 0 (flow control still depends on + * aneg result due to hardware) + * 0 0 0 0 (flow control probably disabled) + */ + if (dev->ports[port].manual_flow && tx_pause) + sctrl |= PORT_FORCE_FLOW_CTRL; + + ksz_prmw8(dev, port, regs[P_STP_CTRL], PORT_FORCE_FLOW_CTRL, sctrl); +} + +/** + * ksz8_cpu_port_link_up - Configures the CPU port of the switch. + * @dev: The KSZ device instance. + * @speed: The desired link speed. + * @duplex: The desired duplex mode. + * @tx_pause: If true, enables transmit pause. + * @rx_pause: If true, enables receive pause. + * + * Description: + * The function configures flow control and speed settings for the CPU + * port of the switch based on the desired settings, current duplex mode, and + * speed. + */ +static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + const u16 *regs = dev->info->regs; + u8 ctrl = 0; + + /* SW_FLOW_CTRL, SW_HALF_DUPLEX, and SW_10_MBIT bits are bootstrappable + * at least on KSZ8873. They can have different values depending on your + * board setup. + */ + if (tx_pause || rx_pause) + ctrl |= SW_FLOW_CTRL; + + if (duplex == DUPLEX_HALF) + ctrl |= SW_HALF_DUPLEX; + + /* This hardware only supports SPEED_10 and SPEED_100. For SPEED_10 + * we need to set the SW_10_MBIT bit. Otherwise, we can leave it 0. + */ + if (speed == SPEED_10) + ctrl |= SW_10_MBIT; + + ksz_rmw8(dev, regs[S_BROADCAST_CTRL], SW_HALF_DUPLEX | SW_FLOW_CTRL | + SW_10_MBIT, ctrl); +} + +void ksz8_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + int port = dp->index; + + /* If the port is the CPU port, apply special handling. Only the CPU + * port is configured via global registers. + */ + if (dev->cpu_port == port) + ksz8_cpu_port_link_up(dev, speed, duplex, tx_pause, rx_pause); + else if (dev->info->internal_phy[port]) + ksz8_phy_port_link_up(dev, port, duplex, tx_pause, rx_pause); +} + +static int ksz8_handle_global_errata(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + int ret = 0; + + /* KSZ87xx Errata DS80000687C. + * Module 2: Link drops with some EEE link partners. + * An issue with the EEE next page exchange between the + * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in + * the link dropping. + */ + if (dev->info->ksz87xx_eee_link_erratum) + ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0); + + return ret; +} + +int ksz8_enable_stp_addr(struct ksz_device *dev) +{ + struct alu_struct alu; + + /* Setup STP address for STP operation. */ + memset(&alu, 0, sizeof(alu)); + ether_addr_copy(alu.mac, eth_stp_addr); + alu.is_static = true; + alu.is_override = true; + alu.port_forward = dev->info->cpu_ports; + + return ksz8_w_sta_mac_table(dev, 0, &alu); +} + +int ksz8_setup(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + int i, ret = 0; + + ds->mtu_enforcement_ingress = true; + + /* We rely on software untagging on the CPU port, so that we + * can support both tagged and untagged VLANs + */ + ds->untag_bridge_pvid = true; + + /* VLAN filtering is partly controlled by the global VLAN + * Enable flag + */ + ds->vlan_filtering_is_global = true; + + /* Enable automatic fast aging when link changed detected. */ + ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); + + /* Enable aggressive back off algorithm in half duplex mode. */ + regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, + SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + + /* + * Make sure unicast VLAN boundary is set as default and + * enable no excessive collision drop. + */ + regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + + ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); + + ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + + if (!ksz_is_ksz88x3(dev)) + ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true); + + for (i = 0; i < (dev->info->num_vlans / 4); i++) + ksz8_r_vlan_entries(dev, i); + + /* Make sure PME (WoL) is not enabled. If requested, it will + * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs + * do not like PME events changes before shutdown. PME only + * available on KSZ87xx family. + */ + if (ksz_is_ksz87xx(dev)) { + ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0); + if (!ret) + ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0); + } + + if (!ret) + return ksz8_handle_global_errata(ds); + else + return ret; +} + +void ksz8_get_caps(struct ksz_device *dev, int port, + struct phylink_config *config) +{ + config->mac_capabilities = MAC_10 | MAC_100; + + /* Silicon Errata Sheet (DS80000830A): + * "Port 1 does not respond to received flow control PAUSE frames" + * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3 + * switches. + */ + if (!ksz_is_ksz88x3(dev) || port) + config->mac_capabilities |= MAC_SYM_PAUSE; + + /* Asym pause is not supported on KSZ8863 and KSZ8873 */ + if (!ksz_is_ksz88x3(dev)) + config->mac_capabilities |= MAC_ASYM_PAUSE; +} + +u32 ksz8_get_port_addr(int port, int offset) +{ + return PORT_CTRL_ADDR(port, offset); +} + +int ksz8_switch_init(struct ksz_device *dev) +{ + dev->cpu_port = fls(dev->info->cpu_ports) - 1; + dev->phy_port_cnt = dev->info->port_cnt - 1; + dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports; + + return 0; +} + +void ksz8_switch_exit(struct ksz_device *dev) +{ + ksz8_reset_switch(dev); +} + +MODULE_AUTHOR("Tristram Ha "); +MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h index ae43077e76c35c..e1c79ff9712375 100644 --- a/drivers/net/dsa/microchip/ksz8.h +++ b/drivers/net/dsa/microchip/ksz8.h @@ -54,6 +54,9 @@ int ksz8_reset_switch(struct ksz_device *dev); int ksz8_switch_init(struct ksz_device *dev); void ksz8_switch_exit(struct ksz_device *dev); int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu); +int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value); +int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data); +int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data); void ksz8_phylink_mac_link_up(struct phylink_config *config, struct phy_device *phydev, unsigned int mode, phy_interface_t interface, int speed, int duplex, diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c deleted file mode 100644 index d27b9c36d73fca..00000000000000 --- a/drivers/net/dsa/microchip/ksz8795.c +++ /dev/null @@ -1,1874 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Microchip KSZ8795 switch driver - * - * Copyright (C) 2017 Microchip Technology Inc. - * Tristram Ha - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ksz_common.h" -#include "ksz8795_reg.h" -#include "ksz8.h" - -static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) -{ - regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); -} - -static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, - bool set) -{ - regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset), - bits, set ? bits : 0); -} - -static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data) -{ - const u16 *regs; - u16 ctrl_addr; - int ret = 0; - - regs = dev->info->regs; - - mutex_lock(&dev->alu_mutex); - - ctrl_addr = IND_ACC_TABLE(table) | addr; - ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - if (!ret) - ret = ksz_write8(dev, regs[REG_IND_BYTE], data); - - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -int ksz8_reset_switch(struct ksz_device *dev) -{ - if (ksz_is_ksz88x3(dev)) { - /* reset switch */ - ksz_cfg(dev, KSZ8863_REG_SW_RESET, - KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true); - ksz_cfg(dev, KSZ8863_REG_SW_RESET, - KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false); - } else { - /* reset switch */ - ksz_write8(dev, REG_POWER_MANAGEMENT_1, - SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S); - ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0); - } - - return 0; -} - -static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size) -{ - u8 ctrl2 = 0; - - if (frame_size <= KSZ8_LEGAL_PACKET_SIZE) - ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE; - else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE) - ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE; - - return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE | - KSZ8863_HUGE_PACKET_ENABLE, ctrl2); -} - -static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size) -{ - u8 ctrl1 = 0, ctrl2 = 0; - int ret; - - if (frame_size > KSZ8_LEGAL_PACKET_SIZE) - ctrl2 |= SW_LEGAL_PACKET_DISABLE; - if (frame_size > KSZ8863_NORMAL_PACKET_SIZE) - ctrl1 |= SW_HUGE_PACKET; - - ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1); - if (ret) - return ret; - - return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2); -} - -int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu) -{ - u16 frame_size; - - if (!dsa_is_cpu_port(dev->ds, port)) - return 0; - - frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - - switch (dev->chip_id) { - case KSZ8795_CHIP_ID: - case KSZ8794_CHIP_ID: - case KSZ8765_CHIP_ID: - return ksz8795_change_mtu(dev, frame_size); - case KSZ8830_CHIP_ID: - return ksz8863_change_mtu(dev, frame_size); - } - - return -EOPNOTSUPP; -} - -static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues) -{ - u8 mask_4q, mask_2q; - u8 reg_4q, reg_2q; - u8 data_4q = 0; - u8 data_2q = 0; - int ret; - - if (ksz_is_ksz88x3(dev)) { - mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN; - mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN; - reg_4q = REG_PORT_CTRL_0; - reg_2q = REG_PORT_CTRL_2; - - /* KSZ8795 family switches have Weighted Fair Queueing (WFQ) - * enabled by default. Enable it for KSZ8873 family switches - * too. Default value for KSZ8873 family is strict priority, - * which should be enabled by using TC_SETUP_QDISC_ETS, not - * by default. - */ - ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE, - WEIGHTED_FAIR_QUEUE_ENABLE); - if (ret) - return ret; - } else { - mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN; - mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN; - reg_4q = REG_PORT_CTRL_13; - reg_2q = REG_PORT_CTRL_0; - - /* TODO: this is legacy from initial KSZ8795 driver, should be - * moved to appropriate place in the future. - */ - ret = ksz_rmw8(dev, REG_SW_CTRL_19, - SW_OUT_RATE_LIMIT_QUEUE_BASED, - SW_OUT_RATE_LIMIT_QUEUE_BASED); - if (ret) - return ret; - } - - if (queues == 4) - data_4q = mask_4q; - else if (queues == 2) - data_2q = mask_2q; - - ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q); - if (ret) - return ret; - - return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q); -} - -int ksz8_all_queues_split(struct ksz_device *dev, int queues) -{ - struct dsa_switch *ds = dev->ds; - const struct dsa_port *dp; - - dsa_switch_for_each_port(dp, ds) { - int ret = ksz8_port_queue_split(dev, dp->index, queues); - - if (ret) - return ret; - } - - return 0; -} - -void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) -{ - const u32 *masks; - const u16 *regs; - u16 ctrl_addr; - u32 data; - u8 check; - int loop; - - masks = dev->info->masks; - regs = dev->info->regs; - - ctrl_addr = addr + dev->info->reg_mib_cnt * port; - ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); - - mutex_lock(&dev->alu_mutex); - ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - - /* It is almost guaranteed to always read the valid bit because of - * slow SPI speed. - */ - for (loop = 2; loop > 0; loop--) { - ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); - - if (check & masks[MIB_COUNTER_VALID]) { - ksz_read32(dev, regs[REG_IND_DATA_LO], &data); - if (check & masks[MIB_COUNTER_OVERFLOW]) - *cnt += MIB_COUNTER_VALUE + 1; - *cnt += data & MIB_COUNTER_VALUE; - break; - } - } - mutex_unlock(&dev->alu_mutex); -} - -static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt) -{ - const u32 *masks; - const u16 *regs; - u16 ctrl_addr; - u32 data; - u8 check; - int loop; - - masks = dev->info->masks; - regs = dev->info->regs; - - addr -= dev->info->reg_mib_cnt; - ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port; - ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0; - ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); - - mutex_lock(&dev->alu_mutex); - ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - - /* It is almost guaranteed to always read the valid bit because of - * slow SPI speed. - */ - for (loop = 2; loop > 0; loop--) { - ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check); - - if (check & masks[MIB_COUNTER_VALID]) { - ksz_read32(dev, regs[REG_IND_DATA_LO], &data); - if (addr < 2) { - u64 total; - - total = check & MIB_TOTAL_BYTES_H; - total <<= 32; - *cnt += total; - *cnt += data; - if (check & masks[MIB_COUNTER_OVERFLOW]) { - total = MIB_TOTAL_BYTES_H + 1; - total <<= 32; - *cnt += total; - } - } else { - if (check & masks[MIB_COUNTER_OVERFLOW]) - *cnt += MIB_PACKET_DROPPED + 1; - *cnt += data & MIB_PACKET_DROPPED; - } - break; - } - } - mutex_unlock(&dev->alu_mutex); -} - -static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt) -{ - u32 *last = (u32 *)dropped; - const u16 *regs; - u16 ctrl_addr; - u32 data; - u32 cur; - - regs = dev->info->regs; - - addr -= dev->info->reg_mib_cnt; - ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 : - KSZ8863_MIB_PACKET_DROPPED_RX_0; - ctrl_addr += port; - ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); - - mutex_lock(&dev->alu_mutex); - ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - ksz_read32(dev, regs[REG_IND_DATA_LO], &data); - mutex_unlock(&dev->alu_mutex); - - data &= MIB_PACKET_DROPPED; - cur = last[addr]; - if (data != cur) { - last[addr] = data; - if (data < cur) - data += MIB_PACKET_DROPPED + 1; - data -= cur; - *cnt += data; - } -} - -void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt) -{ - if (ksz_is_ksz88x3(dev)) - ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt); - else - ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt); -} - -void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze) -{ - if (ksz_is_ksz88x3(dev)) - return; - - /* enable the port for flush/freeze function */ - if (freeze) - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); - ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze); - - /* disable the port after freeze is done */ - if (!freeze) - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); -} - -void ksz8_port_init_cnt(struct ksz_device *dev, int port) -{ - struct ksz_port_mib *mib = &dev->ports[port].mib; - u64 *dropped; - - if (!ksz_is_ksz88x3(dev)) { - /* flush all enabled port MIB counters */ - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true); - ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true); - ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false); - } - - mib->cnt_ptr = 0; - - /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->info->reg_mib_cnt) { - dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, - &mib->counters[mib->cnt_ptr]); - ++mib->cnt_ptr; - } - - /* last one in storage */ - dropped = &mib->counters[dev->info->mib_cnt]; - - /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->info->mib_cnt) { - dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, - dropped, &mib->counters[mib->cnt_ptr]); - ++mib->cnt_ptr; - } -} - -static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) -{ - const u16 *regs; - u16 ctrl_addr; - int ret; - - regs = dev->info->regs; - - ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr; - - mutex_lock(&dev->alu_mutex); - ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - if (ret) - goto unlock_alu; - - ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data); -unlock_alu: - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data) -{ - const u16 *regs; - u16 ctrl_addr; - int ret; - - regs = dev->info->regs; - - ctrl_addr = IND_ACC_TABLE(table) | addr; - - mutex_lock(&dev->alu_mutex); - ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data); - if (ret) - goto unlock_alu; - - ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); -unlock_alu: - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data) -{ - int timeout = 100; - const u32 *masks; - const u16 *regs; - int ret; - - masks = dev->info->masks; - regs = dev->info->regs; - - do { - ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data); - if (ret) - return ret; - - timeout--; - } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout); - - /* Entry is not ready for accessing. */ - if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) - return -ETIMEDOUT; - - /* Entry is ready for accessing. */ - return ksz_read8(dev, regs[REG_IND_DATA_8], data); -} - -static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, - u8 *fid, u8 *src_port, u16 *entries) -{ - u32 data_hi, data_lo; - const u8 *shifts; - const u32 *masks; - const u16 *regs; - u16 ctrl_addr; - u64 buf = 0; - u8 data; - int cnt; - int ret; - - shifts = dev->info->shifts; - masks = dev->info->masks; - regs = dev->info->regs; - - ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; - - mutex_lock(&dev->alu_mutex); - ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); - if (ret) - goto unlock_alu; - - ret = ksz8_valid_dyn_entry(dev, &data); - if (ret) - goto unlock_alu; - - if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) { - *entries = 0; - goto unlock_alu; - } - - ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf); - if (ret) - goto unlock_alu; - - data_hi = (u32)(buf >> 32); - data_lo = (u32)buf; - - /* Check out how many valid entry in the table. */ - cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H]; - cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H]; - cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >> - shifts[DYNAMIC_MAC_ENTRIES]; - *entries = cnt + 1; - - *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >> - shifts[DYNAMIC_MAC_FID]; - *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >> - shifts[DYNAMIC_MAC_SRC_PORT]; - - mac_addr[5] = (u8)data_lo; - mac_addr[4] = (u8)(data_lo >> 8); - mac_addr[3] = (u8)(data_lo >> 16); - mac_addr[2] = (u8)(data_lo >> 24); - - mac_addr[1] = (u8)data_hi; - mac_addr[0] = (u8)(data_hi >> 8); - -unlock_alu: - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu, bool *valid) -{ - u32 data_hi, data_lo; - const u8 *shifts; - const u32 *masks; - u64 data; - int ret; - - shifts = dev->info->shifts; - masks = dev->info->masks; - - ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data); - if (ret) - return ret; - - data_hi = data >> 32; - data_lo = (u32)data; - - if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] | - masks[STATIC_MAC_TABLE_OVERRIDE]))) { - *valid = false; - return 0; - } - - alu->mac[5] = (u8)data_lo; - alu->mac[4] = (u8)(data_lo >> 8); - alu->mac[3] = (u8)(data_lo >> 16); - alu->mac[2] = (u8)(data_lo >> 24); - alu->mac[1] = (u8)data_hi; - alu->mac[0] = (u8)(data_hi >> 8); - alu->port_forward = - (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >> - shifts[STATIC_MAC_FWD_PORTS]; - alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0; - - /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and - * STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the - * static MAC table compared to doing write. - */ - if (ksz_is_ksz87xx(dev)) - data_hi >>= 1; - alu->is_static = true; - alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0; - alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >> - shifts[STATIC_MAC_FID]; - - *valid = true; - - return 0; -} - -static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu) -{ - u32 data_hi, data_lo; - const u8 *shifts; - const u32 *masks; - u64 data; - - shifts = dev->info->shifts; - masks = dev->info->masks; - - data_lo = ((u32)alu->mac[2] << 24) | - ((u32)alu->mac[3] << 16) | - ((u32)alu->mac[4] << 8) | alu->mac[5]; - data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1]; - data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS]; - - if (alu->is_override) - data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE]; - if (alu->is_use_fid) { - data_hi |= masks[STATIC_MAC_TABLE_USE_FID]; - data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID]; - } - if (alu->is_static) - data_hi |= masks[STATIC_MAC_TABLE_VALID]; - else - data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE]; - - data = (u64)data_hi << 32 | data_lo; - - return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data); -} - -static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid, - u8 *member, u8 *valid) -{ - const u8 *shifts; - const u32 *masks; - - shifts = dev->info->shifts; - masks = dev->info->masks; - - *fid = vlan & masks[VLAN_TABLE_FID]; - *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >> - shifts[VLAN_TABLE_MEMBERSHIP_S]; - *valid = !!(vlan & masks[VLAN_TABLE_VALID]); -} - -static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid, - u16 *vlan) -{ - const u8 *shifts; - const u32 *masks; - - shifts = dev->info->shifts; - masks = dev->info->masks; - - *vlan = fid; - *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S]; - if (valid) - *vlan |= masks[VLAN_TABLE_VALID]; -} - -static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr) -{ - const u8 *shifts; - u64 data; - int i; - - shifts = dev->info->shifts; - - ksz8_r_table(dev, TABLE_VLAN, addr, &data); - addr *= 4; - for (i = 0; i < 4; i++) { - dev->vlan_cache[addr + i].table[0] = (u16)data; - data >>= shifts[VLAN_TABLE]; - } -} - -static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) -{ - int index; - u16 *data; - u16 addr; - u64 buf; - - data = (u16 *)&buf; - addr = vid / 4; - index = vid & 3; - ksz8_r_table(dev, TABLE_VLAN, addr, &buf); - *vlan = data[index]; -} - -static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) -{ - int index; - u16 *data; - u16 addr; - u64 buf; - - data = (u16 *)&buf; - addr = vid / 4; - index = vid & 3; - ksz8_r_table(dev, TABLE_VLAN, addr, &buf); - data[index] = vlan; - dev->vlan_cache[vid].table[0] = vlan; - ksz8_w_table(dev, TABLE_VLAN, addr, buf); -} - -/** - * ksz879x_get_loopback - KSZ879x specific function to get loopback - * configuration status for a specific port - * @dev: Pointer to the device structure - * @port: Port number to query - * @val: Pointer to store the result - * - * This function reads the SMI registers to determine whether loopback mode - * is enabled for a specific port. - * - * Return: 0 on success, error code on failure. - */ -static int ksz879x_get_loopback(struct ksz_device *dev, u16 port, - u16 *val) -{ - u8 stat3; - int ret; - - ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3); - if (ret) - return ret; - - if (stat3 & PORT_PHY_LOOPBACK) - *val |= BMCR_LOOPBACK; - - return 0; -} - -/** - * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for - * a specific port - * @dev: Pointer to the device structure. - * @port: Port number to modify. - * @val: Value indicating whether to enable or disable loopback mode. - * - * This function translates loopback bit of the BMCR register into the - * corresponding hardware register bit value and writes it to the SMI interface. - * - * Return: 0 on success, error code on failure. - */ -static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val) -{ - u8 stat3 = 0; - - if (val & BMCR_LOOPBACK) - stat3 |= PORT_PHY_LOOPBACK; - - return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK, - stat3); -} - -/** - * ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY - * Control register (Reg. 31). - * @dev: The KSZ device instance. - * @port: The port number to be read. - * @val: The value read from the SMI interface. - * - * This function reads the SMI interface and translates the hardware register - * bit values into their corresponding control settings for a MIIM PHY Control - * register. - * - * Return: 0 on success, error code on failure. - */ -static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val) -{ - const u16 *regs = dev->info->regs; - u8 reg_val; - int ret; - - *val = 0; - - ret = ksz_pread8(dev, port, regs[P_LINK_STATUS], ®_val); - if (ret < 0) - return ret; - - if (reg_val & PORT_MDIX_STATUS) - *val |= KSZ886X_CTRL_MDIX_STAT; - - ret = ksz_pread8(dev, port, REG_PORT_LINK_MD_CTRL, ®_val); - if (ret < 0) - return ret; - - if (reg_val & PORT_FORCE_LINK) - *val |= KSZ886X_CTRL_FORCE_LINK; - - if (reg_val & PORT_POWER_SAVING) - *val |= KSZ886X_CTRL_PWRSAVE; - - if (reg_val & PORT_PHY_REMOTE_LOOPBACK) - *val |= KSZ886X_CTRL_REMOTE_LOOPBACK; - - return 0; -} - -/** - * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY - * Basic mode control register (Reg. 0). - * @dev: The KSZ device instance. - * @port: The port number to be read. - * @val: The value read from the SMI interface. - * - * This function reads the SMI interface and translates the hardware register - * bit values into their corresponding control settings for a MIIM PHY Basic - * mode control register. - * - * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873 - * ------------------------------------------------------------------- - * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit - * ----------------------------+-----------------------------+---------------- - * Bit 15 - Soft Reset | 0xF/4 | Not supported - * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY) - * Bit 13 - Force 100 | 0xC/6 = 0xC/6 - * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7 - * Bit 11 - Power Down | 0xD/3 = 0xD/3 - * Bit 10 - PHY Isolate | 0xF/5 | Not supported - * Bit 9 - Restart AN | 0xD/5 = 0xD/5 - * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5 - * Bit 7 - Collision Test/Res. | Not supported | Not supported - * Bit 6 - Reserved | Not supported | Not supported - * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7 - * Bit 4 - Force MDI | 0xD/1 = 0xD/1 - * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2 - * Bit 2 - Disable Far-End F. | ???? | 0xD/4 - * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6 - * Bit 0 - Disable LED | 0xD/7 = 0xD/7 - * ------------------------------------------------------------------- - * - * Return: 0 on success, error code on failure. - */ -static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val) -{ - const u16 *regs = dev->info->regs; - u8 restart, speed, ctrl; - int ret; - - *val = 0; - - ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart); - if (ret) - return ret; - - ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed); - if (ret) - return ret; - - ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl); - if (ret) - return ret; - - if (ctrl & PORT_FORCE_100_MBIT) - *val |= BMCR_SPEED100; - - if (ksz_is_ksz88x3(dev)) { - if (restart & KSZ8873_PORT_PHY_LOOPBACK) - *val |= BMCR_LOOPBACK; - - if ((ctrl & PORT_AUTO_NEG_ENABLE)) - *val |= BMCR_ANENABLE; - } else { - ret = ksz879x_get_loopback(dev, port, val); - if (ret) - return ret; - - if (!(ctrl & PORT_AUTO_NEG_DISABLE)) - *val |= BMCR_ANENABLE; - } - - if (restart & PORT_POWER_DOWN) - *val |= BMCR_PDOWN; - - if (restart & PORT_AUTO_NEG_RESTART) - *val |= BMCR_ANRESTART; - - if (ctrl & PORT_FORCE_FULL_DUPLEX) - *val |= BMCR_FULLDPLX; - - if (speed & PORT_HP_MDIX) - *val |= KSZ886X_BMCR_HP_MDIX; - - if (restart & PORT_FORCE_MDIX) - *val |= KSZ886X_BMCR_FORCE_MDI; - - if (restart & PORT_AUTO_MDIX_DISABLE) - *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX; - - if (restart & PORT_TX_DISABLE) - *val |= KSZ886X_BMCR_DISABLE_TRANSMIT; - - if (restart & PORT_LED_OFF) - *val |= KSZ886X_BMCR_DISABLE_LED; - - return 0; -} - -int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) -{ - u8 ctrl, link, val1, val2; - int processed = true; - const u16 *regs; - u16 data = 0; - u16 p = phy; - int ret; - - regs = dev->info->regs; - - switch (reg) { - case MII_BMCR: - ret = ksz8_r_phy_bmcr(dev, p, &data); - if (ret) - return ret; - break; - case MII_BMSR: - ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link); - if (ret) - return ret; - - data = BMSR_100FULL | - BMSR_100HALF | - BMSR_10FULL | - BMSR_10HALF | - BMSR_ANEGCAPABLE; - if (link & PORT_AUTO_NEG_COMPLETE) - data |= BMSR_ANEGCOMPLETE; - if (link & PORT_STAT_LINK_GOOD) - data |= BMSR_LSTATUS; - break; - case MII_PHYSID1: - data = KSZ8795_ID_HI; - break; - case MII_PHYSID2: - if (ksz_is_ksz88x3(dev)) - data = KSZ8863_ID_LO; - else - data = KSZ8795_ID_LO; - break; - case MII_ADVERTISE: - ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); - if (ret) - return ret; - - data = ADVERTISE_CSMA; - if (ctrl & PORT_AUTO_NEG_SYM_PAUSE) - data |= ADVERTISE_PAUSE_CAP; - if (ctrl & PORT_AUTO_NEG_100BTX_FD) - data |= ADVERTISE_100FULL; - if (ctrl & PORT_AUTO_NEG_100BTX) - data |= ADVERTISE_100HALF; - if (ctrl & PORT_AUTO_NEG_10BT_FD) - data |= ADVERTISE_10FULL; - if (ctrl & PORT_AUTO_NEG_10BT) - data |= ADVERTISE_10HALF; - break; - case MII_LPA: - ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link); - if (ret) - return ret; - - data = LPA_SLCT; - if (link & PORT_REMOTE_SYM_PAUSE) - data |= LPA_PAUSE_CAP; - if (link & PORT_REMOTE_100BTX_FD) - data |= LPA_100FULL; - if (link & PORT_REMOTE_100BTX) - data |= LPA_100HALF; - if (link & PORT_REMOTE_10BT_FD) - data |= LPA_10FULL; - if (link & PORT_REMOTE_10BT) - data |= LPA_10HALF; - if (data & ~LPA_SLCT) - data |= LPA_LPACK; - break; - case PHY_REG_LINK_MD: - ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1); - if (ret) - return ret; - - ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2); - if (ret) - return ret; - - if (val1 & PORT_START_CABLE_DIAG) - data |= PHY_START_CABLE_DIAG; - - if (val1 & PORT_CABLE_10M_SHORT) - data |= PHY_CABLE_10M_SHORT; - - data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M, - FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1)); - - data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M, - (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) | - FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2)); - break; - case PHY_REG_PHY_CTRL: - ret = ksz8_r_phy_ctrl(dev, p, &data); - if (ret) - return ret; - - break; - default: - processed = false; - break; - } - if (processed) - *val = data; - - return 0; -} - -/** - * ksz8_w_phy_ctrl - Translates and writes to the SMI interface from a MIIM PHY - * Control register (Reg. 31). - * @dev: The KSZ device instance. - * @port: The port number to be configured. - * @val: The register value to be written. - * - * This function translates control settings from a MIIM PHY Control register - * into their corresponding hardware register bit values for the SMI - * interface. - * - * Return: 0 on success, error code on failure. - */ -static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val) -{ - u8 reg_val = 0; - int ret; - - if (val & KSZ886X_CTRL_FORCE_LINK) - reg_val |= PORT_FORCE_LINK; - - if (val & KSZ886X_CTRL_PWRSAVE) - reg_val |= PORT_POWER_SAVING; - - if (val & KSZ886X_CTRL_REMOTE_LOOPBACK) - reg_val |= PORT_PHY_REMOTE_LOOPBACK; - - ret = ksz_prmw8(dev, port, REG_PORT_LINK_MD_CTRL, PORT_FORCE_LINK | - PORT_POWER_SAVING | PORT_PHY_REMOTE_LOOPBACK, reg_val); - return ret; -} - -/** - * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY - * Basic mode control register (Reg. 0). - * @dev: The KSZ device instance. - * @port: The port number to be configured. - * @val: The register value to be written. - * - * This function translates control settings from a MIIM PHY Basic mode control - * register into their corresponding hardware register bit values for the SMI - * interface. - * - * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873 - * ------------------------------------------------------------------- - * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit - * ----------------------------+-----------------------------+---------------- - * Bit 15 - Soft Reset | 0xF/4 | Not supported - * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY) - * Bit 13 - Force 100 | 0xC/6 = 0xC/6 - * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7 - * Bit 11 - Power Down | 0xD/3 = 0xD/3 - * Bit 10 - PHY Isolate | 0xF/5 | Not supported - * Bit 9 - Restart AN | 0xD/5 = 0xD/5 - * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5 - * Bit 7 - Collision Test/Res. | Not supported | Not supported - * Bit 6 - Reserved | Not supported | Not supported - * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7 - * Bit 4 - Force MDI | 0xD/1 = 0xD/1 - * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2 - * Bit 2 - Disable Far-End F. | ???? | 0xD/4 - * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6 - * Bit 0 - Disable LED | 0xD/7 = 0xD/7 - * ------------------------------------------------------------------- - * - * Return: 0 on success, error code on failure. - */ -static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val) -{ - u8 restart, speed, ctrl, restart_mask; - const u16 *regs = dev->info->regs; - int ret; - - /* Do not support PHY reset function. */ - if (val & BMCR_RESET) - return 0; - - speed = 0; - if (val & KSZ886X_BMCR_HP_MDIX) - speed |= PORT_HP_MDIX; - - ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed); - if (ret) - return ret; - - ctrl = 0; - if (ksz_is_ksz88x3(dev)) { - if ((val & BMCR_ANENABLE)) - ctrl |= PORT_AUTO_NEG_ENABLE; - } else { - if (!(val & BMCR_ANENABLE)) - ctrl |= PORT_AUTO_NEG_DISABLE; - - /* Fiber port does not support auto-negotiation. */ - if (dev->ports[port].fiber) - ctrl |= PORT_AUTO_NEG_DISABLE; - } - - if (val & BMCR_SPEED100) - ctrl |= PORT_FORCE_100_MBIT; - - if (val & BMCR_FULLDPLX) - ctrl |= PORT_FORCE_FULL_DUPLEX; - - ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT | - /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same - * bits - */ - PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl); - if (ret) - return ret; - - restart = 0; - restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART | - PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX; - - if (val & KSZ886X_BMCR_DISABLE_LED) - restart |= PORT_LED_OFF; - - if (val & KSZ886X_BMCR_DISABLE_TRANSMIT) - restart |= PORT_TX_DISABLE; - - if (val & BMCR_ANRESTART) - restart |= PORT_AUTO_NEG_RESTART; - - if (val & BMCR_PDOWN) - restart |= PORT_POWER_DOWN; - - if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX) - restart |= PORT_AUTO_MDIX_DISABLE; - - if (val & KSZ886X_BMCR_FORCE_MDI) - restart |= PORT_FORCE_MDIX; - - if (ksz_is_ksz88x3(dev)) { - restart_mask |= KSZ8873_PORT_PHY_LOOPBACK; - - if (val & BMCR_LOOPBACK) - restart |= KSZ8873_PORT_PHY_LOOPBACK; - } else { - ret = ksz879x_set_loopback(dev, port, val); - if (ret) - return ret; - } - - return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask, - restart); -} - -int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) -{ - const u16 *regs; - u8 ctrl, data; - u16 p = phy; - int ret; - - regs = dev->info->regs; - - switch (reg) { - case MII_BMCR: - ret = ksz8_w_phy_bmcr(dev, p, val); - if (ret) - return ret; - break; - case MII_ADVERTISE: - ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl); - if (ret) - return ret; - - data = ctrl; - data &= ~(PORT_AUTO_NEG_SYM_PAUSE | - PORT_AUTO_NEG_100BTX_FD | - PORT_AUTO_NEG_100BTX | - PORT_AUTO_NEG_10BT_FD | - PORT_AUTO_NEG_10BT); - if (val & ADVERTISE_PAUSE_CAP) - data |= PORT_AUTO_NEG_SYM_PAUSE; - if (val & ADVERTISE_100FULL) - data |= PORT_AUTO_NEG_100BTX_FD; - if (val & ADVERTISE_100HALF) - data |= PORT_AUTO_NEG_100BTX; - if (val & ADVERTISE_10FULL) - data |= PORT_AUTO_NEG_10BT_FD; - if (val & ADVERTISE_10HALF) - data |= PORT_AUTO_NEG_10BT; - - if (data != ctrl) { - ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data); - if (ret) - return ret; - } - break; - case PHY_REG_LINK_MD: - if (val & PHY_START_CABLE_DIAG) - ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true); - break; - - case PHY_REG_PHY_CTRL: - ret = ksz8_w_phy_ctrl(dev, p, val); - if (ret) - return ret; - break; - default: - break; - } - - return 0; -} - -void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) -{ - u8 data; - - ksz_pread8(dev, port, P_MIRROR_CTRL, &data); - data &= ~PORT_VLAN_MEMBERSHIP; - data |= (member & dev->port_mask); - ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); -} - -void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) -{ - u8 learn[DSA_MAX_PORTS]; - int first, index, cnt; - const u16 *regs; - - regs = dev->info->regs; - - if ((uint)port < dev->info->port_cnt) { - first = port; - cnt = port + 1; - } else { - /* Flush all ports. */ - first = 0; - cnt = dev->info->port_cnt; - } - for (index = first; index < cnt; index++) { - ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]); - if (!(learn[index] & PORT_LEARN_DISABLE)) - ksz_pwrite8(dev, index, regs[P_STP_CTRL], - learn[index] | PORT_LEARN_DISABLE); - } - ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); - for (index = first; index < cnt; index++) { - if (!(learn[index] & PORT_LEARN_DISABLE)) - ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]); - } -} - -int ksz8_fdb_dump(struct ksz_device *dev, int port, - dsa_fdb_dump_cb_t *cb, void *data) -{ - u8 mac[ETH_ALEN]; - u8 src_port, fid; - u16 entries = 0; - int ret, i; - - for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) { - ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port, - &entries); - if (ret) - return ret; - - if (i >= entries) - return 0; - - if (port == src_port) { - ret = cb(mac, fid, false, data); - if (ret) - return ret; - } - } - - return 0; -} - -static int ksz8_add_sta_mac(struct ksz_device *dev, int port, - const unsigned char *addr, u16 vid) -{ - struct alu_struct alu; - int index, ret; - int empty = 0; - - alu.port_forward = 0; - for (index = 0; index < dev->info->num_statics; index++) { - bool valid; - - ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid); - if (ret) - return ret; - if (!valid) { - /* Remember the first empty entry. */ - if (!empty) - empty = index + 1; - continue; - } - - if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid) - break; - } - - /* no available entry */ - if (index == dev->info->num_statics && !empty) - return -ENOSPC; - - /* add entry */ - if (index == dev->info->num_statics) { - index = empty - 1; - memset(&alu, 0, sizeof(alu)); - memcpy(alu.mac, addr, ETH_ALEN); - alu.is_static = true; - } - alu.port_forward |= BIT(port); - if (vid) { - alu.is_use_fid = true; - - /* Need a way to map VID to FID. */ - alu.fid = vid; - } - - return ksz8_w_sta_mac_table(dev, index, &alu); -} - -static int ksz8_del_sta_mac(struct ksz_device *dev, int port, - const unsigned char *addr, u16 vid) -{ - struct alu_struct alu; - int index, ret; - - for (index = 0; index < dev->info->num_statics; index++) { - bool valid; - - ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid); - if (ret) - return ret; - if (!valid) - continue; - - if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid) - break; - } - - /* no available entry */ - if (index == dev->info->num_statics) - return 0; - - /* clear port */ - alu.port_forward &= ~BIT(port); - if (!alu.port_forward) - alu.is_static = false; - - return ksz8_w_sta_mac_table(dev, index, &alu); -} - -int ksz8_mdb_add(struct ksz_device *dev, int port, - const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) -{ - return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid); -} - -int ksz8_mdb_del(struct ksz_device *dev, int port, - const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) -{ - return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid); -} - -int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr, - u16 vid, struct dsa_db db) -{ - return ksz8_add_sta_mac(dev, port, addr, vid); -} - -int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr, - u16 vid, struct dsa_db db) -{ - return ksz8_del_sta_mac(dev, port, addr, vid); -} - -int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag, - struct netlink_ext_ack *extack) -{ - if (ksz_is_ksz88x3(dev)) - return -ENOTSUPP; - - /* Discard packets with VID not enabled on the switch */ - ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag); - - /* Discard packets with VID not enabled on the ingress port */ - for (port = 0; port < dev->phy_port_cnt; ++port) - ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER, - flag); - - return 0; -} - -static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state) -{ - if (ksz_is_ksz88x3(dev)) { - ksz_cfg(dev, REG_SW_INSERT_SRC_PVID, - 0x03 << (4 - 2 * port), state); - } else { - ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00); - } -} - -int ksz8_port_vlan_add(struct ksz_device *dev, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) -{ - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - struct ksz_port *p = &dev->ports[port]; - u16 data, new_pvid = 0; - u8 fid, member, valid; - - if (ksz_is_ksz88x3(dev)) - return -ENOTSUPP; - - /* If a VLAN is added with untagged flag different from the - * port's Remove Tag flag, we need to change the latter. - * Ignore VID 0, which is always untagged. - * Ignore CPU port, which will always be tagged. - */ - if (untagged != p->remove_tag && vlan->vid != 0 && - port != dev->cpu_port) { - unsigned int vid; - - /* Reject attempts to add a VLAN that requires the - * Remove Tag flag to be changed, unless there are no - * other VLANs currently configured. - */ - for (vid = 1; vid < dev->info->num_vlans; ++vid) { - /* Skip the VID we are going to add or reconfigure */ - if (vid == vlan->vid) - continue; - - ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0], - &fid, &member, &valid); - if (valid && (member & BIT(port))) - return -EINVAL; - } - - ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged); - p->remove_tag = untagged; - } - - ksz8_r_vlan_table(dev, vlan->vid, &data); - ksz8_from_vlan(dev, data, &fid, &member, &valid); - - /* First time to setup the VLAN entry. */ - if (!valid) { - /* Need to find a way to map VID to FID. */ - fid = 1; - valid = 1; - } - member |= BIT(port); - - ksz8_to_vlan(dev, fid, member, valid, &data); - ksz8_w_vlan_table(dev, vlan->vid, data); - - /* change PVID */ - if (vlan->flags & BRIDGE_VLAN_INFO_PVID) - new_pvid = vlan->vid; - - if (new_pvid) { - u16 vid; - - ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid); - vid &= ~VLAN_VID_MASK; - vid |= new_pvid; - ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid); - - ksz8_port_enable_pvid(dev, port, true); - } - - return 0; -} - -int ksz8_port_vlan_del(struct ksz_device *dev, int port, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 data, pvid; - u8 fid, member, valid; - - if (ksz_is_ksz88x3(dev)) - return -ENOTSUPP; - - ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid); - pvid = pvid & 0xFFF; - - ksz8_r_vlan_table(dev, vlan->vid, &data); - ksz8_from_vlan(dev, data, &fid, &member, &valid); - - member &= ~BIT(port); - - /* Invalidate the entry if no more member. */ - if (!member) { - fid = 0; - valid = 0; - } - - ksz8_to_vlan(dev, fid, member, valid, &data); - ksz8_w_vlan_table(dev, vlan->vid, data); - - if (pvid == vlan->vid) - ksz8_port_enable_pvid(dev, port, false); - - return 0; -} - -int ksz8_port_mirror_add(struct ksz_device *dev, int port, - struct dsa_mall_mirror_tc_entry *mirror, - bool ingress, struct netlink_ext_ack *extack) -{ - if (ingress) { - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); - dev->mirror_rx |= BIT(port); - } else { - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); - dev->mirror_tx |= BIT(port); - } - - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); - - /* configure mirror port */ - if (dev->mirror_rx || dev->mirror_tx) - ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, - PORT_MIRROR_SNIFFER, true); - - return 0; -} - -void ksz8_port_mirror_del(struct ksz_device *dev, int port, - struct dsa_mall_mirror_tc_entry *mirror) -{ - u8 data; - - if (mirror->ingress) { - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); - dev->mirror_rx &= ~BIT(port); - } else { - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); - dev->mirror_tx &= ~BIT(port); - } - - ksz_pread8(dev, port, P_MIRROR_CTRL, &data); - - if (!dev->mirror_rx && !dev->mirror_tx) - ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, - PORT_MIRROR_SNIFFER, false); -} - -static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) -{ - struct ksz_port *p = &dev->ports[port]; - - if (!ksz_is_ksz87xx(dev)) - return; - - if (!p->interface && dev->compat_interface) { - dev_warn(dev->dev, - "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " - "Please update your device tree.\n", - port); - p->interface = dev->compat_interface; - } -} - -void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) -{ - struct dsa_switch *ds = dev->ds; - const u32 *masks; - int queues; - u8 member; - - masks = dev->info->masks; - - /* enable broadcast storm limit */ - ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - - /* For KSZ88x3 enable only one queue by default, otherwise we won't - * be able to get rid of PCP prios on Port 2. - */ - if (ksz_is_ksz88x3(dev)) - queues = 1; - else - queues = dev->info->num_tx_queues; - - ksz8_port_queue_split(dev, port, queues); - - /* replace priority */ - ksz_port_cfg(dev, port, P_802_1P_CTRL, - masks[PORT_802_1P_REMAPPING], false); - - if (cpu_port) - member = dsa_user_ports(ds); - else - member = BIT(dsa_upstream_port(ds, port)); - - ksz8_cfg_port_member(dev, port, member); -} - -static void ksz88x3_config_rmii_clk(struct ksz_device *dev) -{ - struct dsa_port *cpu_dp = dsa_to_port(dev->ds, dev->cpu_port); - bool rmii_clk_internal; - - if (!ksz_is_ksz88x3(dev)) - return; - - rmii_clk_internal = of_property_read_bool(cpu_dp->dn, - "microchip,rmii-clk-internal"); - - ksz_cfg(dev, KSZ88X3_REG_FVID_AND_HOST_MODE, - KSZ88X3_PORT3_RMII_CLK_INTERNAL, rmii_clk_internal); -} - -void ksz8_config_cpu_port(struct dsa_switch *ds) -{ - struct ksz_device *dev = ds->priv; - struct ksz_port *p; - const u32 *masks; - const u16 *regs; - u8 remote; - int i; - - masks = dev->info->masks; - regs = dev->info->regs; - - ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); - - ksz8_port_setup(dev, dev->cpu_port, true); - - ksz8795_cpu_interface_select(dev, dev->cpu_port); - ksz88x3_config_rmii_clk(dev); - - for (i = 0; i < dev->phy_port_cnt; i++) { - ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); - } - for (i = 0; i < dev->phy_port_cnt; i++) { - p = &dev->ports[i]; - - if (!ksz_is_ksz88x3(dev)) { - ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote); - if (remote & KSZ8_PORT_FIBER_MODE) - p->fiber = 1; - } - if (p->fiber) - ksz_port_cfg(dev, i, regs[P_STP_CTRL], - PORT_FORCE_FLOW_CTRL, true); - else - ksz_port_cfg(dev, i, regs[P_STP_CTRL], - PORT_FORCE_FLOW_CTRL, false); - } -} - -/** - * ksz8_phy_port_link_up - Configures ports with integrated PHYs - * @dev: The KSZ device instance. - * @port: The port number to configure. - * @duplex: The desired duplex mode. - * @tx_pause: If true, enables transmit pause. - * @rx_pause: If true, enables receive pause. - * - * Description: - * The function configures flow control settings for a given port based on the - * desired settings and current duplex mode. - * - * According to the KSZ8873 datasheet, the PORT_FORCE_FLOW_CTRL bit in the - * Port Control 2 register (0x1A for Port 1, 0x22 for Port 2, 0x32 for Port 3) - * determines how flow control is handled on the port: - * "1 = will always enable full-duplex flow control on the port, regardless - * of AN result. - * 0 = full-duplex flow control is enabled based on AN result." - * - * This means that the flow control behavior depends on the state of this bit: - * - If PORT_FORCE_FLOW_CTRL is set to 1, the switch will ignore AN results and - * force flow control on the port. - * - If PORT_FORCE_FLOW_CTRL is set to 0, the switch will enable or disable - * flow control based on the AN results. - * - * However, there is a potential limitation in this configuration. It is - * currently not possible to force disable flow control on a port if we still - * advertise pause support. While such a configuration is not currently - * supported by Linux, and may not make practical sense, it's important to be - * aware of this limitation when working with the KSZ8873 and similar devices. - */ -static void ksz8_phy_port_link_up(struct ksz_device *dev, int port, int duplex, - bool tx_pause, bool rx_pause) -{ - const u16 *regs = dev->info->regs; - u8 sctrl = 0; - - /* The KSZ8795 switch differs from the KSZ8873 by supporting - * asymmetric pause control. However, since a single bit is used to - * control both RX and TX pause, we can't enforce asymmetric pause - * control - both TX and RX pause will be either enabled or disabled - * together. - * - * If auto-negotiation is enabled, we usually allow the flow control to - * be determined by the auto-negotiation process based on the - * capabilities of both link partners. However, for KSZ8873, the - * PORT_FORCE_FLOW_CTRL bit may be set by the hardware bootstrap, - * ignoring the auto-negotiation result. Thus, even in auto-negotiation - * mode, we need to ensure that the PORT_FORCE_FLOW_CTRL bit is - * properly cleared. - * - * In the absence of pause auto-negotiation, we will enforce symmetric - * pause control for both variants of switches - KSZ8873 and KSZ8795. - * - * Autoneg Pause Autoneg rx,tx PORT_FORCE_FLOW_CTRL - * 1 1 x 0 - * 0 1 x 0 (flow control probably disabled) - * x 0 1 1 (flow control force enabled) - * 1 0 0 0 (flow control still depends on - * aneg result due to hardware) - * 0 0 0 0 (flow control probably disabled) - */ - if (dev->ports[port].manual_flow && tx_pause) - sctrl |= PORT_FORCE_FLOW_CTRL; - - ksz_prmw8(dev, port, regs[P_STP_CTRL], PORT_FORCE_FLOW_CTRL, sctrl); -} - -/** - * ksz8_cpu_port_link_up - Configures the CPU port of the switch. - * @dev: The KSZ device instance. - * @speed: The desired link speed. - * @duplex: The desired duplex mode. - * @tx_pause: If true, enables transmit pause. - * @rx_pause: If true, enables receive pause. - * - * Description: - * The function configures flow control and speed settings for the CPU - * port of the switch based on the desired settings, current duplex mode, and - * speed. - */ -static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex, - bool tx_pause, bool rx_pause) -{ - const u16 *regs = dev->info->regs; - u8 ctrl = 0; - - /* SW_FLOW_CTRL, SW_HALF_DUPLEX, and SW_10_MBIT bits are bootstrappable - * at least on KSZ8873. They can have different values depending on your - * board setup. - */ - if (tx_pause || rx_pause) - ctrl |= SW_FLOW_CTRL; - - if (duplex == DUPLEX_HALF) - ctrl |= SW_HALF_DUPLEX; - - /* This hardware only supports SPEED_10 and SPEED_100. For SPEED_10 - * we need to set the SW_10_MBIT bit. Otherwise, we can leave it 0. - */ - if (speed == SPEED_10) - ctrl |= SW_10_MBIT; - - ksz_rmw8(dev, regs[S_BROADCAST_CTRL], SW_HALF_DUPLEX | SW_FLOW_CTRL | - SW_10_MBIT, ctrl); -} - -void ksz8_phylink_mac_link_up(struct phylink_config *config, - struct phy_device *phydev, unsigned int mode, - phy_interface_t interface, int speed, int duplex, - bool tx_pause, bool rx_pause) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct ksz_device *dev = dp->ds->priv; - int port = dp->index; - - /* If the port is the CPU port, apply special handling. Only the CPU - * port is configured via global registers. - */ - if (dev->cpu_port == port) - ksz8_cpu_port_link_up(dev, speed, duplex, tx_pause, rx_pause); - else if (dev->info->internal_phy[port]) - ksz8_phy_port_link_up(dev, port, duplex, tx_pause, rx_pause); -} - -static int ksz8_handle_global_errata(struct dsa_switch *ds) -{ - struct ksz_device *dev = ds->priv; - int ret = 0; - - /* KSZ87xx Errata DS80000687C. - * Module 2: Link drops with some EEE link partners. - * An issue with the EEE next page exchange between the - * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in - * the link dropping. - */ - if (dev->info->ksz87xx_eee_link_erratum) - ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0); - - return ret; -} - -int ksz8_enable_stp_addr(struct ksz_device *dev) -{ - struct alu_struct alu; - - /* Setup STP address for STP operation. */ - memset(&alu, 0, sizeof(alu)); - ether_addr_copy(alu.mac, eth_stp_addr); - alu.is_static = true; - alu.is_override = true; - alu.port_forward = dev->info->cpu_ports; - - return ksz8_w_sta_mac_table(dev, 0, &alu); -} - -int ksz8_setup(struct dsa_switch *ds) -{ - struct ksz_device *dev = ds->priv; - int i; - - ds->mtu_enforcement_ingress = true; - - /* We rely on software untagging on the CPU port, so that we - * can support both tagged and untagged VLANs - */ - ds->untag_bridge_pvid = true; - - /* VLAN filtering is partly controlled by the global VLAN - * Enable flag - */ - ds->vlan_filtering_is_global = true; - - /* Enable automatic fast aging when link changed detected. */ - ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); - - /* Enable aggressive back off algorithm in half duplex mode. */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, - SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); - - /* - * Make sure unicast VLAN boundary is set as default and - * enable no excessive collision drop. - */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); - - ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); - - ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); - - if (!ksz_is_ksz88x3(dev)) - ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true); - - for (i = 0; i < (dev->info->num_vlans / 4); i++) - ksz8_r_vlan_entries(dev, i); - - return ksz8_handle_global_errata(ds); -} - -void ksz8_get_caps(struct ksz_device *dev, int port, - struct phylink_config *config) -{ - config->mac_capabilities = MAC_10 | MAC_100; - - /* Silicon Errata Sheet (DS80000830A): - * "Port 1 does not respond to received flow control PAUSE frames" - * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3 - * switches. - */ - if (!ksz_is_ksz88x3(dev) || port) - config->mac_capabilities |= MAC_SYM_PAUSE; - - /* Asym pause is not supported on KSZ8863 and KSZ8873 */ - if (!ksz_is_ksz88x3(dev)) - config->mac_capabilities |= MAC_ASYM_PAUSE; -} - -u32 ksz8_get_port_addr(int port, int offset) -{ - return PORT_CTRL_ADDR(port, offset); -} - -int ksz8_switch_init(struct ksz_device *dev) -{ - dev->cpu_port = fls(dev->info->cpu_ports) - 1; - dev->phy_port_cnt = dev->info->port_cnt - 1; - dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports; - - return 0; -} - -void ksz8_switch_exit(struct ksz_device *dev) -{ - ksz8_reset_switch(dev); -} - -MODULE_AUTHOR("Tristram Ha "); -MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h deleted file mode 100644 index 69566a5d9cda1d..00000000000000 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ /dev/null @@ -1,798 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Microchip KSZ8795 register definitions - * - * Copyright (c) 2017 Microchip Technology Inc. - * Tristram Ha - */ - -#ifndef __KSZ8795_REG_H -#define __KSZ8795_REG_H - -#define KS_PORT_M 0x1F - -#define KS_PRIO_M 0x3 -#define KS_PRIO_S 2 - -#define SW_REVISION_M 0x0E -#define SW_REVISION_S 1 - -#define KSZ8863_REG_SW_RESET 0x43 - -#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4) -#define KSZ8863_PCS_RESET BIT(0) - -#define KSZ88X3_REG_FVID_AND_HOST_MODE 0xC6 -#define KSZ88X3_PORT3_RMII_CLK_INTERNAL BIT(3) - -#define REG_SW_CTRL_0 0x02 - -#define SW_NEW_BACKOFF BIT(7) -#define SW_GLOBAL_RESET BIT(6) -#define SW_FLUSH_DYN_MAC_TABLE BIT(5) -#define SW_FLUSH_STA_MAC_TABLE BIT(4) -#define SW_LINK_AUTO_AGING BIT(0) - -#define REG_SW_CTRL_1 0x03 - -#define SW_HUGE_PACKET BIT(6) -#define SW_TX_FLOW_CTRL_DISABLE BIT(5) -#define SW_RX_FLOW_CTRL_DISABLE BIT(4) -#define SW_CHECK_LENGTH BIT(3) -#define SW_AGING_ENABLE BIT(2) -#define SW_FAST_AGING BIT(1) -#define SW_AGGR_BACKOFF BIT(0) - -#define REG_SW_CTRL_2 0x04 - -#define UNICAST_VLAN_BOUNDARY BIT(7) -#define SW_BACK_PRESSURE BIT(5) -#define FAIR_FLOW_CTRL BIT(4) -#define NO_EXC_COLLISION_DROP BIT(3) -#define SW_LEGAL_PACKET_DISABLE BIT(1) - -#define KSZ8863_HUGE_PACKET_ENABLE BIT(2) -#define KSZ8863_LEGAL_PACKET_ENABLE BIT(1) - -#define REG_SW_CTRL_3 0x05 - #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3) - -#define SW_VLAN_ENABLE BIT(7) -#define SW_IGMP_SNOOP BIT(6) -#define SW_MIRROR_RX_TX BIT(0) - -#define REG_SW_CTRL_4 0x06 - -#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7) -#define SW_HALF_DUPLEX BIT(6) -#define SW_FLOW_CTRL BIT(5) -#define SW_10_MBIT BIT(4) -#define SW_REPLACE_VID BIT(3) - -#define REG_SW_CTRL_5 0x07 - -#define REG_SW_CTRL_6 0x08 - -#define SW_MIB_COUNTER_FLUSH BIT(7) -#define SW_MIB_COUNTER_FREEZE BIT(6) -#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M - -#define REG_SW_CTRL_9 0x0B - -#define SPI_CLK_125_MHZ 0x80 -#define SPI_CLK_62_5_MHZ 0x40 -#define SPI_CLK_31_25_MHZ 0x00 - -#define SW_LED_MODE_M 0x3 -#define SW_LED_MODE_S 4 -#define SW_LED_LINK_ACT_SPEED 0 -#define SW_LED_LINK_ACT 1 -#define SW_LED_LINK_ACT_DUPLEX 2 -#define SW_LED_LINK_DUPLEX 3 - -#define REG_SW_CTRL_10 0x0C - -#define SW_PASS_PAUSE BIT(0) - -#define REG_SW_CTRL_11 0x0D - -#define REG_POWER_MANAGEMENT_1 0x0E - -#define SW_PLL_POWER_DOWN BIT(5) -#define SW_POWER_MANAGEMENT_MODE_M 0x3 -#define SW_POWER_MANAGEMENT_MODE_S 3 -#define SW_POWER_NORMAL 0 -#define SW_ENERGY_DETECTION 1 -#define SW_SOFTWARE_POWER_DOWN 2 - -#define REG_POWER_MANAGEMENT_2 0x0F - -#define REG_PORT_1_CTRL_0 0x10 -#define REG_PORT_2_CTRL_0 0x20 -#define REG_PORT_3_CTRL_0 0x30 -#define REG_PORT_4_CTRL_0 0x40 -#define REG_PORT_5_CTRL_0 0x50 - -#define PORT_BROADCAST_STORM BIT(7) -#define PORT_DIFFSERV_ENABLE BIT(6) -#define PORT_802_1P_ENABLE BIT(5) -#define PORT_BASED_PRIO_S 3 -#define PORT_BASED_PRIO_M KS_PRIO_M -#define PORT_BASED_PRIO_0 0 -#define PORT_BASED_PRIO_1 1 -#define PORT_BASED_PRIO_2 2 -#define PORT_BASED_PRIO_3 3 -#define PORT_INSERT_TAG BIT(2) -#define PORT_REMOVE_TAG BIT(1) -#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0) -#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0) - -#define REG_PORT_1_CTRL_1 0x11 -#define REG_PORT_2_CTRL_1 0x21 -#define REG_PORT_3_CTRL_1 0x31 -#define REG_PORT_4_CTRL_1 0x41 -#define REG_PORT_5_CTRL_1 0x51 - -#define PORT_MIRROR_SNIFFER BIT(7) -#define PORT_MIRROR_RX BIT(6) -#define PORT_MIRROR_TX BIT(5) -#define PORT_VLAN_MEMBERSHIP KS_PORT_M - -#define REG_PORT_1_CTRL_2 0x12 -#define REG_PORT_2_CTRL_2 0x22 -#define REG_PORT_3_CTRL_2 0x32 -#define REG_PORT_4_CTRL_2 0x42 -#define REG_PORT_5_CTRL_2 0x52 - -#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7) -#define PORT_INGRESS_FILTER BIT(6) -#define PORT_DISCARD_NON_VID BIT(5) -#define PORT_FORCE_FLOW_CTRL BIT(4) -#define PORT_BACK_PRESSURE BIT(3) - -#define REG_PORT_1_CTRL_3 0x13 -#define REG_PORT_2_CTRL_3 0x23 -#define REG_PORT_3_CTRL_3 0x33 -#define REG_PORT_4_CTRL_3 0x43 -#define REG_PORT_5_CTRL_3 0x53 -#define REG_PORT_1_CTRL_4 0x14 -#define REG_PORT_2_CTRL_4 0x24 -#define REG_PORT_3_CTRL_4 0x34 -#define REG_PORT_4_CTRL_4 0x44 -#define REG_PORT_5_CTRL_4 0x54 - -#define PORT_DEFAULT_VID 0x0001 - -#define REG_PORT_1_CTRL_5 0x15 -#define REG_PORT_2_CTRL_5 0x25 -#define REG_PORT_3_CTRL_5 0x35 -#define REG_PORT_4_CTRL_5 0x45 -#define REG_PORT_5_CTRL_5 0x55 - -#define PORT_ACL_ENABLE BIT(2) -#define PORT_AUTHEN_MODE 0x3 -#define PORT_AUTHEN_PASS 0 -#define PORT_AUTHEN_BLOCK 1 -#define PORT_AUTHEN_TRAP 2 - -#define REG_PORT_5_CTRL_6 0x56 - -#define PORT_MII_INTERNAL_CLOCK BIT(7) -#define PORT_GMII_MAC_MODE BIT(2) - -#define REG_PORT_1_CTRL_7 0x17 -#define REG_PORT_2_CTRL_7 0x27 -#define REG_PORT_3_CTRL_7 0x37 -#define REG_PORT_4_CTRL_7 0x47 - -#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5) -#define PORT_AUTO_NEG_SYM_PAUSE BIT(4) -#define PORT_AUTO_NEG_100BTX_FD BIT(3) -#define PORT_AUTO_NEG_100BTX BIT(2) -#define PORT_AUTO_NEG_10BT_FD BIT(1) -#define PORT_AUTO_NEG_10BT BIT(0) - -#define REG_PORT_1_STATUS_0 0x18 -#define REG_PORT_2_STATUS_0 0x28 -#define REG_PORT_3_STATUS_0 0x38 -#define REG_PORT_4_STATUS_0 0x48 - -/* For KSZ8765. */ -#define PORT_REMOTE_ASYM_PAUSE BIT(5) -#define PORT_REMOTE_SYM_PAUSE BIT(4) -#define PORT_REMOTE_100BTX_FD BIT(3) -#define PORT_REMOTE_100BTX BIT(2) -#define PORT_REMOTE_10BT_FD BIT(1) -#define PORT_REMOTE_10BT BIT(0) - -#define REG_PORT_1_STATUS_1 0x19 -#define REG_PORT_2_STATUS_1 0x29 -#define REG_PORT_3_STATUS_1 0x39 -#define REG_PORT_4_STATUS_1 0x49 - -#define PORT_HP_MDIX BIT(7) -#define PORT_REVERSED_POLARITY BIT(5) -#define PORT_TX_FLOW_CTRL BIT(4) -#define PORT_RX_FLOW_CTRL BIT(3) -#define PORT_STAT_SPEED_100MBIT BIT(2) -#define PORT_STAT_FULL_DUPLEX BIT(1) - -#define PORT_REMOTE_FAULT BIT(0) - -#define REG_PORT_1_LINK_MD_CTRL 0x1A -#define REG_PORT_2_LINK_MD_CTRL 0x2A -#define REG_PORT_3_LINK_MD_CTRL 0x3A -#define REG_PORT_4_LINK_MD_CTRL 0x4A - -#define PORT_CABLE_10M_SHORT BIT(7) -#define PORT_CABLE_DIAG_RESULT_M GENMASK(6, 5) -#define PORT_CABLE_DIAG_RESULT_S 5 -#define PORT_CABLE_STAT_NORMAL 0 -#define PORT_CABLE_STAT_OPEN 1 -#define PORT_CABLE_STAT_SHORT 2 -#define PORT_CABLE_STAT_FAILED 3 -#define PORT_START_CABLE_DIAG BIT(4) -#define PORT_FORCE_LINK BIT(3) -#define PORT_POWER_SAVING BIT(2) -#define PORT_PHY_REMOTE_LOOPBACK BIT(1) -#define PORT_CABLE_FAULT_COUNTER_H 0x01 - -#define REG_PORT_1_LINK_MD_RESULT 0x1B -#define REG_PORT_2_LINK_MD_RESULT 0x2B -#define REG_PORT_3_LINK_MD_RESULT 0x3B -#define REG_PORT_4_LINK_MD_RESULT 0x4B - -#define PORT_CABLE_FAULT_COUNTER_L 0xFF -#define PORT_CABLE_FAULT_COUNTER 0x1FF - -#define REG_PORT_1_CTRL_9 0x1C -#define REG_PORT_2_CTRL_9 0x2C -#define REG_PORT_3_CTRL_9 0x3C -#define REG_PORT_4_CTRL_9 0x4C - -#define PORT_AUTO_NEG_ENABLE BIT(7) -#define PORT_AUTO_NEG_DISABLE BIT(7) -#define PORT_FORCE_100_MBIT BIT(6) -#define PORT_FORCE_FULL_DUPLEX BIT(5) - -#define REG_PORT_1_CTRL_10 0x1D -#define REG_PORT_2_CTRL_10 0x2D -#define REG_PORT_3_CTRL_10 0x3D -#define REG_PORT_4_CTRL_10 0x4D - -#define PORT_LED_OFF BIT(7) -#define PORT_TX_DISABLE BIT(6) -#define PORT_AUTO_NEG_RESTART BIT(5) -#define PORT_POWER_DOWN BIT(3) -#define PORT_AUTO_MDIX_DISABLE BIT(2) -#define PORT_FORCE_MDIX BIT(1) -#define PORT_MAC_LOOPBACK BIT(0) -#define KSZ8873_PORT_PHY_LOOPBACK BIT(0) - -#define REG_PORT_1_STATUS_2 0x1E -#define REG_PORT_2_STATUS_2 0x2E -#define REG_PORT_3_STATUS_2 0x3E -#define REG_PORT_4_STATUS_2 0x4E - -#define PORT_MDIX_STATUS BIT(7) -#define PORT_AUTO_NEG_COMPLETE BIT(6) -#define PORT_STAT_LINK_GOOD BIT(5) - -#define REG_PORT_1_STATUS_3 0x1F -#define REG_PORT_2_STATUS_3 0x2F -#define REG_PORT_3_STATUS_3 0x3F -#define REG_PORT_4_STATUS_3 0x4F - -#define PORT_PHY_LOOPBACK BIT(7) -#define PORT_PHY_ISOLATE BIT(5) -#define PORT_PHY_SOFT_RESET BIT(4) -#define PORT_PHY_FORCE_LINK BIT(3) -#define PORT_PHY_MODE_M 0x7 -#define PHY_MODE_IN_AUTO_NEG 1 -#define PHY_MODE_10BT_HALF 2 -#define PHY_MODE_100BT_HALF 3 -#define PHY_MODE_10BT_FULL 5 -#define PHY_MODE_100BT_FULL 6 -#define PHY_MODE_ISOLDATE 7 - -#define REG_PORT_CTRL_0 0x00 -#define REG_PORT_CTRL_1 0x01 -#define REG_PORT_CTRL_2 0x02 -#define REG_PORT_CTRL_VID 0x03 - -#define REG_PORT_CTRL_5 0x05 - -#define REG_PORT_STATUS_1 0x09 -#define REG_PORT_LINK_MD_CTRL 0x0A -#define REG_PORT_LINK_MD_RESULT 0x0B -#define REG_PORT_CTRL_9 0x0C -#define REG_PORT_CTRL_10 0x0D -#define REG_PORT_STATUS_3 0x0F - -#define REG_PORT_CTRL_12 0xA0 -#define REG_PORT_CTRL_13 0xA1 -#define REG_PORT_RATE_CTRL_3 0xA2 -#define REG_PORT_RATE_CTRL_2 0xA3 -#define REG_PORT_RATE_CTRL_1 0xA4 -#define REG_PORT_RATE_CTRL_0 0xA5 -#define REG_PORT_RATE_LIMIT 0xA6 -#define REG_PORT_IN_RATE_0 0xA7 -#define REG_PORT_IN_RATE_1 0xA8 -#define REG_PORT_IN_RATE_2 0xA9 -#define REG_PORT_IN_RATE_3 0xAA -#define REG_PORT_OUT_RATE_0 0xAB -#define REG_PORT_OUT_RATE_1 0xAC -#define REG_PORT_OUT_RATE_2 0xAD -#define REG_PORT_OUT_RATE_3 0xAE - -#define PORT_CTRL_ADDR(port, addr) \ - ((addr) + REG_PORT_1_CTRL_0 + (port) * \ - (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) - -#define TABLE_EXT_SELECT_S 5 -#define TABLE_EEE_V 1 -#define TABLE_ACL_V 2 -#define TABLE_PME_V 4 -#define TABLE_LINK_MD_V 5 -#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S) -#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S) -#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S) -#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S) -#define TABLE_READ BIT(4) -#define TABLE_SELECT_S 2 -#define TABLE_STATIC_MAC_V 0 -#define TABLE_VLAN_V 1 -#define TABLE_DYNAMIC_MAC_V 2 -#define TABLE_MIB_V 3 -#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S) -#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S) -#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S) -#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S) - -#define REG_IND_CTRL_1 0x6F - -#define TABLE_ENTRY_MASK 0x03FF -#define TABLE_EXT_ENTRY_MASK 0x0FFF - -#define REG_IND_DATA_5 0x73 -#define REG_IND_DATA_2 0x76 -#define REG_IND_DATA_1 0x77 -#define REG_IND_DATA_0 0x78 - -#define REG_IND_DATA_PME_EEE_ACL 0xA0 - -#define REG_INT_STATUS 0x7C -#define REG_INT_ENABLE 0x7D - -#define INT_PME BIT(4) - -#define REG_ACL_INT_STATUS 0x7E -#define REG_ACL_INT_ENABLE 0x7F - -#define INT_PORT_5 BIT(4) -#define INT_PORT_4 BIT(3) -#define INT_PORT_3 BIT(2) -#define INT_PORT_2 BIT(1) -#define INT_PORT_1 BIT(0) - -#define INT_PORT_ALL \ - (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1) - -#define REG_SW_CTRL_12 0x80 -#define REG_SW_CTRL_13 0x81 - -#define SWITCH_802_1P_MASK 3 -#define SWITCH_802_1P_BASE 3 -#define SWITCH_802_1P_SHIFT 2 - -#define SW_802_1P_MAP_M KS_PRIO_M -#define SW_802_1P_MAP_S KS_PRIO_S - -#define REG_SWITCH_CTRL_14 0x82 - -#define SW_PRIO_MAPPING_M KS_PRIO_M -#define SW_PRIO_MAPPING_S 6 -#define SW_PRIO_MAP_3_HI 0 -#define SW_PRIO_MAP_2_HI 2 -#define SW_PRIO_MAP_0_LO 3 - -#define REG_SW_CTRL_15 0x83 -#define REG_SW_CTRL_16 0x84 -#define REG_SW_CTRL_17 0x85 -#define REG_SW_CTRL_18 0x86 - -#define SW_SELF_ADDR_FILTER_ENABLE BIT(6) - -#define REG_SW_UNK_UCAST_CTRL 0x83 -#define REG_SW_UNK_MCAST_CTRL 0x84 -#define REG_SW_UNK_VID_CTRL 0x85 -#define REG_SW_UNK_IP_MCAST_CTRL 0x86 - -#define SW_UNK_FWD_ENABLE BIT(5) -#define SW_UNK_FWD_MAP KS_PORT_M - -#define REG_SW_CTRL_19 0x87 - -#define SW_IN_RATE_LIMIT_PERIOD_M 0x3 -#define SW_IN_RATE_LIMIT_PERIOD_S 4 -#define SW_IN_RATE_LIMIT_16_MS 0 -#define SW_IN_RATE_LIMIT_64_MS 1 -#define SW_IN_RATE_LIMIT_256_MS 2 -#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3) -#define SW_INS_TAG_ENABLE BIT(2) - -#define REG_TOS_PRIO_CTRL_0 0x90 -#define REG_TOS_PRIO_CTRL_1 0x91 -#define REG_TOS_PRIO_CTRL_2 0x92 -#define REG_TOS_PRIO_CTRL_3 0x93 -#define REG_TOS_PRIO_CTRL_4 0x94 -#define REG_TOS_PRIO_CTRL_5 0x95 -#define REG_TOS_PRIO_CTRL_6 0x96 -#define REG_TOS_PRIO_CTRL_7 0x97 -#define REG_TOS_PRIO_CTRL_8 0x98 -#define REG_TOS_PRIO_CTRL_9 0x99 -#define REG_TOS_PRIO_CTRL_10 0x9A -#define REG_TOS_PRIO_CTRL_11 0x9B -#define REG_TOS_PRIO_CTRL_12 0x9C -#define REG_TOS_PRIO_CTRL_13 0x9D -#define REG_TOS_PRIO_CTRL_14 0x9E -#define REG_TOS_PRIO_CTRL_15 0x9F - -#define TOS_PRIO_M KS_PRIO_M -#define TOS_PRIO_S KS_PRIO_S - -#define REG_SW_CTRL_21 0xA4 - -#define SW_IPV6_MLD_OPTION BIT(3) -#define SW_IPV6_MLD_SNOOP BIT(2) - -#define REG_PORT_1_CTRL_12 0xB0 -#define REG_PORT_2_CTRL_12 0xC0 -#define REG_PORT_3_CTRL_12 0xD0 -#define REG_PORT_4_CTRL_12 0xE0 -#define REG_PORT_5_CTRL_12 0xF0 - -#define PORT_PASS_ALL BIT(6) -#define PORT_INS_TAG_FOR_PORT_5_S 3 -#define PORT_INS_TAG_FOR_PORT_5 BIT(3) -#define PORT_INS_TAG_FOR_PORT_4 BIT(2) -#define PORT_INS_TAG_FOR_PORT_3 BIT(1) -#define PORT_INS_TAG_FOR_PORT_2 BIT(0) - -#define REG_PORT_1_CTRL_13 0xB1 -#define REG_PORT_2_CTRL_13 0xC1 -#define REG_PORT_3_CTRL_13 0xD1 -#define REG_PORT_4_CTRL_13 0xE1 -#define REG_PORT_5_CTRL_13 0xF1 - -#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1) -#define PORT_DROP_TAG BIT(0) - -#define REG_PORT_1_CTRL_14 0xB2 -#define REG_PORT_2_CTRL_14 0xC2 -#define REG_PORT_3_CTRL_14 0xD2 -#define REG_PORT_4_CTRL_14 0xE2 -#define REG_PORT_5_CTRL_14 0xF2 -#define REG_PORT_1_CTRL_15 0xB3 -#define REG_PORT_2_CTRL_15 0xC3 -#define REG_PORT_3_CTRL_15 0xD3 -#define REG_PORT_4_CTRL_15 0xE3 -#define REG_PORT_5_CTRL_15 0xF3 -#define REG_PORT_1_CTRL_16 0xB4 -#define REG_PORT_2_CTRL_16 0xC4 -#define REG_PORT_3_CTRL_16 0xD4 -#define REG_PORT_4_CTRL_16 0xE4 -#define REG_PORT_5_CTRL_16 0xF4 -#define REG_PORT_1_CTRL_17 0xB5 -#define REG_PORT_2_CTRL_17 0xC5 -#define REG_PORT_3_CTRL_17 0xD5 -#define REG_PORT_4_CTRL_17 0xE5 -#define REG_PORT_5_CTRL_17 0xF5 - -#define REG_PORT_1_RATE_CTRL_3 0xB2 -#define REG_PORT_1_RATE_CTRL_2 0xB3 -#define REG_PORT_1_RATE_CTRL_1 0xB4 -#define REG_PORT_1_RATE_CTRL_0 0xB5 -#define REG_PORT_2_RATE_CTRL_3 0xC2 -#define REG_PORT_2_RATE_CTRL_2 0xC3 -#define REG_PORT_2_RATE_CTRL_1 0xC4 -#define REG_PORT_2_RATE_CTRL_0 0xC5 -#define REG_PORT_3_RATE_CTRL_3 0xD2 -#define REG_PORT_3_RATE_CTRL_2 0xD3 -#define REG_PORT_3_RATE_CTRL_1 0xD4 -#define REG_PORT_3_RATE_CTRL_0 0xD5 -#define REG_PORT_4_RATE_CTRL_3 0xE2 -#define REG_PORT_4_RATE_CTRL_2 0xE3 -#define REG_PORT_4_RATE_CTRL_1 0xE4 -#define REG_PORT_4_RATE_CTRL_0 0xE5 -#define REG_PORT_5_RATE_CTRL_3 0xF2 -#define REG_PORT_5_RATE_CTRL_2 0xF3 -#define REG_PORT_5_RATE_CTRL_1 0xF4 -#define REG_PORT_5_RATE_CTRL_0 0xF5 - -#define RATE_CTRL_ENABLE BIT(7) -#define RATE_RATIO_M (BIT(7) - 1) - -#define PORT_OUT_RATE_ENABLE BIT(7) - -#define REG_PORT_1_RATE_LIMIT 0xB6 -#define REG_PORT_2_RATE_LIMIT 0xC6 -#define REG_PORT_3_RATE_LIMIT 0xD6 -#define REG_PORT_4_RATE_LIMIT 0xE6 -#define REG_PORT_5_RATE_LIMIT 0xF6 - -#define PORT_IN_PORT_BASED_S 6 -#define PORT_RATE_PACKET_BASED_S 5 -#define PORT_IN_FLOW_CTRL_S 4 -#define PORT_IN_LIMIT_MODE_M 0x3 -#define PORT_IN_LIMIT_MODE_S 2 -#define PORT_COUNT_IFG_S 1 -#define PORT_COUNT_PREAMBLE_S 0 -#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S) -#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S) -#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S) -#define PORT_IN_ALL 0 -#define PORT_IN_UNICAST 1 -#define PORT_IN_MULTICAST 2 -#define PORT_IN_BROADCAST 3 -#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S) -#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S) - -#define REG_PORT_1_IN_RATE_0 0xB7 -#define REG_PORT_2_IN_RATE_0 0xC7 -#define REG_PORT_3_IN_RATE_0 0xD7 -#define REG_PORT_4_IN_RATE_0 0xE7 -#define REG_PORT_5_IN_RATE_0 0xF7 -#define REG_PORT_1_IN_RATE_1 0xB8 -#define REG_PORT_2_IN_RATE_1 0xC8 -#define REG_PORT_3_IN_RATE_1 0xD8 -#define REG_PORT_4_IN_RATE_1 0xE8 -#define REG_PORT_5_IN_RATE_1 0xF8 -#define REG_PORT_1_IN_RATE_2 0xB9 -#define REG_PORT_2_IN_RATE_2 0xC9 -#define REG_PORT_3_IN_RATE_2 0xD9 -#define REG_PORT_4_IN_RATE_2 0xE9 -#define REG_PORT_5_IN_RATE_2 0xF9 -#define REG_PORT_1_IN_RATE_3 0xBA -#define REG_PORT_2_IN_RATE_3 0xCA -#define REG_PORT_3_IN_RATE_3 0xDA -#define REG_PORT_4_IN_RATE_3 0xEA -#define REG_PORT_5_IN_RATE_3 0xFA - -#define PORT_IN_RATE_ENABLE BIT(7) -#define PORT_RATE_LIMIT_M (BIT(7) - 1) - -#define REG_PORT_1_OUT_RATE_0 0xBB -#define REG_PORT_2_OUT_RATE_0 0xCB -#define REG_PORT_3_OUT_RATE_0 0xDB -#define REG_PORT_4_OUT_RATE_0 0xEB -#define REG_PORT_5_OUT_RATE_0 0xFB -#define REG_PORT_1_OUT_RATE_1 0xBC -#define REG_PORT_2_OUT_RATE_1 0xCC -#define REG_PORT_3_OUT_RATE_1 0xDC -#define REG_PORT_4_OUT_RATE_1 0xEC -#define REG_PORT_5_OUT_RATE_1 0xFC -#define REG_PORT_1_OUT_RATE_2 0xBD -#define REG_PORT_2_OUT_RATE_2 0xCD -#define REG_PORT_3_OUT_RATE_2 0xDD -#define REG_PORT_4_OUT_RATE_2 0xED -#define REG_PORT_5_OUT_RATE_2 0xFD -#define REG_PORT_1_OUT_RATE_3 0xBE -#define REG_PORT_2_OUT_RATE_3 0xCE -#define REG_PORT_3_OUT_RATE_3 0xDE -#define REG_PORT_4_OUT_RATE_3 0xEE -#define REG_PORT_5_OUT_RATE_3 0xFE - -/* 88x3 specific */ - -#define REG_SW_INSERT_SRC_PVID 0xC2 - -/* PME */ - -#define SW_PME_OUTPUT_ENABLE BIT(1) -#define SW_PME_ACTIVE_HIGH BIT(0) - -#define PORT_MAGIC_PACKET_DETECT BIT(2) -#define PORT_LINK_UP_DETECT BIT(1) -#define PORT_ENERGY_DETECT BIT(0) - -/* ACL */ - -#define ACL_FIRST_RULE_M 0xF - -#define ACL_MODE_M 0x3 -#define ACL_MODE_S 4 -#define ACL_MODE_DISABLE 0 -#define ACL_MODE_LAYER_2 1 -#define ACL_MODE_LAYER_3 2 -#define ACL_MODE_LAYER_4 3 -#define ACL_ENABLE_M 0x3 -#define ACL_ENABLE_S 2 -#define ACL_ENABLE_2_COUNT 0 -#define ACL_ENABLE_2_TYPE 1 -#define ACL_ENABLE_2_MAC 2 -#define ACL_ENABLE_2_BOTH 3 -#define ACL_ENABLE_3_IP 1 -#define ACL_ENABLE_3_SRC_DST_COMP 2 -#define ACL_ENABLE_4_PROTOCOL 0 -#define ACL_ENABLE_4_TCP_PORT_COMP 1 -#define ACL_ENABLE_4_UDP_PORT_COMP 2 -#define ACL_ENABLE_4_TCP_SEQN_COMP 3 -#define ACL_SRC BIT(1) -#define ACL_EQUAL BIT(0) - -#define ACL_MAX_PORT 0xFFFF - -#define ACL_MIN_PORT 0xFFFF -#define ACL_IP_ADDR 0xFFFFFFFF -#define ACL_TCP_SEQNUM 0xFFFFFFFF - -#define ACL_RESERVED 0xF8 -#define ACL_PORT_MODE_M 0x3 -#define ACL_PORT_MODE_S 1 -#define ACL_PORT_MODE_DISABLE 0 -#define ACL_PORT_MODE_EITHER 1 -#define ACL_PORT_MODE_IN_RANGE 2 -#define ACL_PORT_MODE_OUT_OF_RANGE 3 - -#define ACL_TCP_FLAG_ENABLE BIT(0) - -#define ACL_TCP_FLAG_M 0xFF - -#define ACL_TCP_FLAG 0xFF -#define ACL_ETH_TYPE 0xFFFF -#define ACL_IP_M 0xFFFFFFFF - -#define ACL_PRIO_MODE_M 0x3 -#define ACL_PRIO_MODE_S 6 -#define ACL_PRIO_MODE_DISABLE 0 -#define ACL_PRIO_MODE_HIGHER 1 -#define ACL_PRIO_MODE_LOWER 2 -#define ACL_PRIO_MODE_REPLACE 3 -#define ACL_PRIO_M 0x7 -#define ACL_PRIO_S 3 -#define ACL_VLAN_PRIO_REPLACE BIT(2) -#define ACL_VLAN_PRIO_M 0x7 -#define ACL_VLAN_PRIO_HI_M 0x3 - -#define ACL_VLAN_PRIO_LO_M 0x8 -#define ACL_VLAN_PRIO_S 7 -#define ACL_MAP_MODE_M 0x3 -#define ACL_MAP_MODE_S 5 -#define ACL_MAP_MODE_DISABLE 0 -#define ACL_MAP_MODE_OR 1 -#define ACL_MAP_MODE_AND 2 -#define ACL_MAP_MODE_REPLACE 3 -#define ACL_MAP_PORT_M 0x1F - -#define ACL_CNT_M (BIT(11) - 1) -#define ACL_CNT_S 5 -#define ACL_MSEC_UNIT BIT(4) -#define ACL_INTR_MODE BIT(3) - -#define REG_PORT_ACL_BYTE_EN_MSB 0x10 - -#define ACL_BYTE_EN_MSB_M 0x3F - -#define REG_PORT_ACL_BYTE_EN_LSB 0x11 - -#define ACL_ACTION_START 0xA -#define ACL_ACTION_LEN 2 -#define ACL_INTR_CNT_START 0xB -#define ACL_RULESET_START 0xC -#define ACL_RULESET_LEN 2 -#define ACL_TABLE_LEN 14 - -#define ACL_ACTION_ENABLE 0x000C -#define ACL_MATCH_ENABLE 0x1FF0 -#define ACL_RULESET_ENABLE 0x2003 -#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF) -#define ACL_MODE_ENABLE (0x10 << 8) - -#define REG_PORT_ACL_CTRL_0 0x12 - -#define PORT_ACL_WRITE_DONE BIT(6) -#define PORT_ACL_READ_DONE BIT(5) -#define PORT_ACL_WRITE BIT(4) -#define PORT_ACL_INDEX_M 0xF - -#define REG_PORT_ACL_CTRL_1 0x13 - -#define PORT_ACL_FORCE_DLR_MISS BIT(0) - -#define KSZ8795_ID_HI 0x0022 -#define KSZ8795_ID_LO 0x1550 -#define KSZ8863_ID_LO 0x1430 - -#define KSZ8795_SW_ID 0x8795 - -#define PHY_REG_LINK_MD 0x1D - -#define PHY_START_CABLE_DIAG BIT(15) -#define PHY_CABLE_DIAG_RESULT_M GENMASK(14, 13) -#define PHY_CABLE_DIAG_RESULT 0x6000 -#define PHY_CABLE_STAT_NORMAL 0x0000 -#define PHY_CABLE_STAT_OPEN 0x2000 -#define PHY_CABLE_STAT_SHORT 0x4000 -#define PHY_CABLE_STAT_FAILED 0x6000 -#define PHY_CABLE_10M_SHORT BIT(12) -#define PHY_CABLE_FAULT_COUNTER_M GENMASK(8, 0) - -#define PHY_REG_PHY_CTRL 0x1F - -#define PHY_MODE_M 0x7 -#define PHY_MODE_S 8 -#define PHY_STAT_REVERSED_POLARITY BIT(5) -#define PHY_STAT_MDIX BIT(4) -#define PHY_FORCE_LINK BIT(3) -#define PHY_POWER_SAVING_ENABLE BIT(2) -#define PHY_REMOTE_LOOPBACK BIT(1) - -/* Chip resource */ - -#define PRIO_QUEUES 4 - -#define KS_PRIO_IN_REG 4 - -#define MIB_COUNTER_NUM 0x20 - -/* Common names used by other drivers */ - -#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0 -#define P_PRIO_CTRL REG_PORT_CTRL_0 -#define P_TAG_CTRL REG_PORT_CTRL_0 -#define P_MIRROR_CTRL REG_PORT_CTRL_1 -#define P_802_1P_CTRL REG_PORT_CTRL_2 -#define P_PASS_ALL_CTRL REG_PORT_CTRL_12 -#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12 -#define P_DROP_TAG_CTRL REG_PORT_CTRL_13 -#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT - -#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12 -#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID - -#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0 -#define S_LINK_AGING_CTRL REG_SW_CTRL_0 -#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1 -#define S_MIRROR_CTRL REG_SW_CTRL_3 -#define S_REPLACE_VID_CTRL REG_SW_CTRL_4 -#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10 -#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12 -#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0 -#define S_IPV6_MLD_CTRL REG_SW_CTRL_21 - -#define IND_ACC_TABLE(table) ((table) << 8) - -/* */ -#define REG_IND_EEE_GLOB2_LO 0x34 -#define REG_IND_EEE_GLOB2_HI 0x35 - -/** - * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF - * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF - * MIB_PACKET_DROPPED 00-00000000-0000FFFF - * MIB_COUNTER_VALID 00-00000020-00000000 - * MIB_COUNTER_OVERFLOW 00-00000040-00000000 - */ - -#define MIB_COUNTER_VALUE 0x3FFFFFFF - -#define KSZ8795_MIB_TOTAL_RX_0 0x100 -#define KSZ8795_MIB_TOTAL_TX_0 0x101 -#define KSZ8795_MIB_TOTAL_RX_1 0x104 -#define KSZ8795_MIB_TOTAL_TX_1 0x105 - -#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100 -#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105 - -#define MIB_PACKET_DROPPED 0x0000FFFF - -#define MIB_TOTAL_BYTES_H 0x0000000F - -#define TAIL_TAG_OVERRIDE BIT(6) -#define TAIL_TAG_LOOKUP BIT(7) - -#define FID_ENTRIES 128 -#define KSZ8_DYN_MAC_ENTRIES 1024 - -#endif diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c index 5711a59e2ac949..a8bfcd917bf70f 100644 --- a/drivers/net/dsa/microchip/ksz8863_smi.c +++ b/drivers/net/dsa/microchip/ksz8863_smi.c @@ -199,11 +199,11 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev) static const struct of_device_id ksz8863_dt_ids[] = { { .compatible = "microchip,ksz8863", - .data = &ksz_switch_chips[KSZ8830] + .data = &ksz_switch_chips[KSZ88X3] }, { .compatible = "microchip,ksz8873", - .data = &ksz_switch_chips[KSZ8830] + .data = &ksz_switch_chips[KSZ88X3] }, { }, }; diff --git a/drivers/net/dsa/microchip/ksz8_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h new file mode 100644 index 00000000000000..329688603a582b --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8_reg.h @@ -0,0 +1,799 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Microchip KSZ8XXX series register definitions + * + * The base for these definitions is KSZ8795 but unless indicated + * differently by their prefix, they apply to all KSZ8 series + * devices. Registers and masks that do change are defined in + * dedicated structures in ksz_common.c. + * + * Copyright (c) 2017 Microchip Technology Inc. + * Tristram Ha + */ + +#ifndef __KSZ8_REG_H +#define __KSZ8_REG_H + +#define KS_PORT_M 0x1F + +#define KS_PRIO_M 0x3 +#define KS_PRIO_S 2 + +#define SW_REVISION_M 0x0E +#define SW_REVISION_S 1 + +#define KSZ8863_REG_SW_RESET 0x43 + +#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4) +#define KSZ8863_PCS_RESET BIT(0) + +#define KSZ88X3_REG_FVID_AND_HOST_MODE 0xC6 +#define KSZ88X3_PORT3_RMII_CLK_INTERNAL BIT(3) + +#define REG_SW_CTRL_0 0x02 + +#define SW_NEW_BACKOFF BIT(7) +#define SW_GLOBAL_RESET BIT(6) +#define SW_FLUSH_DYN_MAC_TABLE BIT(5) +#define SW_FLUSH_STA_MAC_TABLE BIT(4) +#define SW_LINK_AUTO_AGING BIT(0) + +#define REG_SW_CTRL_1 0x03 + +#define SW_HUGE_PACKET BIT(6) +#define SW_TX_FLOW_CTRL_DISABLE BIT(5) +#define SW_RX_FLOW_CTRL_DISABLE BIT(4) +#define SW_CHECK_LENGTH BIT(3) +#define SW_AGING_ENABLE BIT(2) +#define SW_FAST_AGING BIT(1) +#define SW_AGGR_BACKOFF BIT(0) + +#define REG_SW_CTRL_2 0x04 + +#define UNICAST_VLAN_BOUNDARY BIT(7) +#define SW_BACK_PRESSURE BIT(5) +#define FAIR_FLOW_CTRL BIT(4) +#define NO_EXC_COLLISION_DROP BIT(3) +#define SW_LEGAL_PACKET_DISABLE BIT(1) + +#define KSZ8863_HUGE_PACKET_ENABLE BIT(2) +#define KSZ8863_LEGAL_PACKET_ENABLE BIT(1) + +#define REG_SW_CTRL_3 0x05 + #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3) + +#define SW_VLAN_ENABLE BIT(7) +#define SW_IGMP_SNOOP BIT(6) +#define SW_MIRROR_RX_TX BIT(0) + +#define REG_SW_CTRL_4 0x06 + +#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7) +#define SW_HALF_DUPLEX BIT(6) +#define SW_FLOW_CTRL BIT(5) +#define SW_10_MBIT BIT(4) +#define SW_REPLACE_VID BIT(3) + +#define REG_SW_CTRL_5 0x07 + +#define REG_SW_CTRL_6 0x08 + +#define SW_MIB_COUNTER_FLUSH BIT(7) +#define SW_MIB_COUNTER_FREEZE BIT(6) +#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M + +#define REG_SW_CTRL_9 0x0B + +#define SPI_CLK_125_MHZ 0x80 +#define SPI_CLK_62_5_MHZ 0x40 +#define SPI_CLK_31_25_MHZ 0x00 + +#define SW_LED_MODE_M 0x3 +#define SW_LED_MODE_S 4 +#define SW_LED_LINK_ACT_SPEED 0 +#define SW_LED_LINK_ACT 1 +#define SW_LED_LINK_ACT_DUPLEX 2 +#define SW_LED_LINK_DUPLEX 3 + +#define REG_SW_CTRL_10 0x0C + +#define SW_PASS_PAUSE BIT(0) + +#define REG_SW_CTRL_11 0x0D + +#define REG_POWER_MANAGEMENT_1 0x0E + +#define SW_PLL_POWER_DOWN BIT(5) +#define SW_POWER_MANAGEMENT_MODE_M 0x3 +#define SW_POWER_MANAGEMENT_MODE_S 3 +#define SW_POWER_NORMAL 0 +#define SW_ENERGY_DETECTION 1 +#define SW_SOFTWARE_POWER_DOWN 2 + +#define REG_POWER_MANAGEMENT_2 0x0F + +#define REG_PORT_1_CTRL_0 0x10 +#define REG_PORT_2_CTRL_0 0x20 +#define REG_PORT_3_CTRL_0 0x30 +#define REG_PORT_4_CTRL_0 0x40 +#define REG_PORT_5_CTRL_0 0x50 + +#define PORT_BROADCAST_STORM BIT(7) +#define PORT_DIFFSERV_ENABLE BIT(6) +#define PORT_802_1P_ENABLE BIT(5) +#define PORT_BASED_PRIO_S 3 +#define PORT_BASED_PRIO_M KS_PRIO_M +#define PORT_BASED_PRIO_0 0 +#define PORT_BASED_PRIO_1 1 +#define PORT_BASED_PRIO_2 2 +#define PORT_BASED_PRIO_3 3 +#define PORT_INSERT_TAG BIT(2) +#define PORT_REMOVE_TAG BIT(1) +#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0) +#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0) + +#define REG_PORT_1_CTRL_1 0x11 +#define REG_PORT_2_CTRL_1 0x21 +#define REG_PORT_3_CTRL_1 0x31 +#define REG_PORT_4_CTRL_1 0x41 +#define REG_PORT_5_CTRL_1 0x51 + +#define PORT_MIRROR_SNIFFER BIT(7) +#define PORT_MIRROR_RX BIT(6) +#define PORT_MIRROR_TX BIT(5) +#define PORT_VLAN_MEMBERSHIP KS_PORT_M + +#define REG_PORT_1_CTRL_2 0x12 +#define REG_PORT_2_CTRL_2 0x22 +#define REG_PORT_3_CTRL_2 0x32 +#define REG_PORT_4_CTRL_2 0x42 +#define REG_PORT_5_CTRL_2 0x52 + +#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7) +#define PORT_INGRESS_FILTER BIT(6) +#define PORT_DISCARD_NON_VID BIT(5) +#define PORT_FORCE_FLOW_CTRL BIT(4) +#define PORT_BACK_PRESSURE BIT(3) + +#define REG_PORT_1_CTRL_3 0x13 +#define REG_PORT_2_CTRL_3 0x23 +#define REG_PORT_3_CTRL_3 0x33 +#define REG_PORT_4_CTRL_3 0x43 +#define REG_PORT_5_CTRL_3 0x53 +#define REG_PORT_1_CTRL_4 0x14 +#define REG_PORT_2_CTRL_4 0x24 +#define REG_PORT_3_CTRL_4 0x34 +#define REG_PORT_4_CTRL_4 0x44 +#define REG_PORT_5_CTRL_4 0x54 + +#define PORT_DEFAULT_VID 0x0001 + +#define REG_PORT_1_CTRL_5 0x15 +#define REG_PORT_2_CTRL_5 0x25 +#define REG_PORT_3_CTRL_5 0x35 +#define REG_PORT_4_CTRL_5 0x45 +#define REG_PORT_5_CTRL_5 0x55 + +#define PORT_ACL_ENABLE BIT(2) +#define PORT_AUTHEN_MODE 0x3 +#define PORT_AUTHEN_PASS 0 +#define PORT_AUTHEN_BLOCK 1 +#define PORT_AUTHEN_TRAP 2 + +#define REG_PORT_5_CTRL_6 0x56 + +#define PORT_MII_INTERNAL_CLOCK BIT(7) +#define PORT_GMII_MAC_MODE BIT(2) + +#define REG_PORT_1_CTRL_7 0x17 +#define REG_PORT_2_CTRL_7 0x27 +#define REG_PORT_3_CTRL_7 0x37 +#define REG_PORT_4_CTRL_7 0x47 + +#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5) +#define PORT_AUTO_NEG_SYM_PAUSE BIT(4) +#define PORT_AUTO_NEG_100BTX_FD BIT(3) +#define PORT_AUTO_NEG_100BTX BIT(2) +#define PORT_AUTO_NEG_10BT_FD BIT(1) +#define PORT_AUTO_NEG_10BT BIT(0) + +#define REG_PORT_1_STATUS_0 0x18 +#define REG_PORT_2_STATUS_0 0x28 +#define REG_PORT_3_STATUS_0 0x38 +#define REG_PORT_4_STATUS_0 0x48 + +/* For KSZ8765. */ +#define PORT_REMOTE_ASYM_PAUSE BIT(5) +#define PORT_REMOTE_SYM_PAUSE BIT(4) +#define PORT_REMOTE_100BTX_FD BIT(3) +#define PORT_REMOTE_100BTX BIT(2) +#define PORT_REMOTE_10BT_FD BIT(1) +#define PORT_REMOTE_10BT BIT(0) + +#define REG_PORT_1_STATUS_1 0x19 +#define REG_PORT_2_STATUS_1 0x29 +#define REG_PORT_3_STATUS_1 0x39 +#define REG_PORT_4_STATUS_1 0x49 + +#define PORT_HP_MDIX BIT(7) +#define PORT_REVERSED_POLARITY BIT(5) +#define PORT_TX_FLOW_CTRL BIT(4) +#define PORT_RX_FLOW_CTRL BIT(3) +#define PORT_STAT_SPEED_100MBIT BIT(2) +#define PORT_STAT_FULL_DUPLEX BIT(1) + +#define PORT_REMOTE_FAULT BIT(0) + +#define REG_PORT_1_LINK_MD_CTRL 0x1A +#define REG_PORT_2_LINK_MD_CTRL 0x2A +#define REG_PORT_3_LINK_MD_CTRL 0x3A +#define REG_PORT_4_LINK_MD_CTRL 0x4A + +#define PORT_CABLE_10M_SHORT BIT(7) +#define PORT_CABLE_DIAG_RESULT_M GENMASK(6, 5) +#define PORT_CABLE_DIAG_RESULT_S 5 +#define PORT_CABLE_STAT_NORMAL 0 +#define PORT_CABLE_STAT_OPEN 1 +#define PORT_CABLE_STAT_SHORT 2 +#define PORT_CABLE_STAT_FAILED 3 +#define PORT_START_CABLE_DIAG BIT(4) +#define PORT_FORCE_LINK BIT(3) +#define PORT_POWER_SAVING BIT(2) +#define PORT_PHY_REMOTE_LOOPBACK BIT(1) +#define PORT_CABLE_FAULT_COUNTER_H 0x01 + +#define REG_PORT_1_LINK_MD_RESULT 0x1B +#define REG_PORT_2_LINK_MD_RESULT 0x2B +#define REG_PORT_3_LINK_MD_RESULT 0x3B +#define REG_PORT_4_LINK_MD_RESULT 0x4B + +#define PORT_CABLE_FAULT_COUNTER_L 0xFF +#define PORT_CABLE_FAULT_COUNTER 0x1FF + +#define REG_PORT_1_CTRL_9 0x1C +#define REG_PORT_2_CTRL_9 0x2C +#define REG_PORT_3_CTRL_9 0x3C +#define REG_PORT_4_CTRL_9 0x4C + +#define PORT_AUTO_NEG_ENABLE BIT(7) +#define PORT_AUTO_NEG_DISABLE BIT(7) +#define PORT_FORCE_100_MBIT BIT(6) +#define PORT_FORCE_FULL_DUPLEX BIT(5) + +#define REG_PORT_1_CTRL_10 0x1D +#define REG_PORT_2_CTRL_10 0x2D +#define REG_PORT_3_CTRL_10 0x3D +#define REG_PORT_4_CTRL_10 0x4D + +#define PORT_LED_OFF BIT(7) +#define PORT_TX_DISABLE BIT(6) +#define PORT_AUTO_NEG_RESTART BIT(5) +#define PORT_POWER_DOWN BIT(3) +#define PORT_AUTO_MDIX_DISABLE BIT(2) +#define PORT_FORCE_MDIX BIT(1) +#define PORT_MAC_LOOPBACK BIT(0) +#define KSZ8873_PORT_PHY_LOOPBACK BIT(0) + +#define REG_PORT_1_STATUS_2 0x1E +#define REG_PORT_2_STATUS_2 0x2E +#define REG_PORT_3_STATUS_2 0x3E +#define REG_PORT_4_STATUS_2 0x4E + +#define PORT_MDIX_STATUS BIT(7) +#define PORT_AUTO_NEG_COMPLETE BIT(6) +#define PORT_STAT_LINK_GOOD BIT(5) + +#define REG_PORT_1_STATUS_3 0x1F +#define REG_PORT_2_STATUS_3 0x2F +#define REG_PORT_3_STATUS_3 0x3F +#define REG_PORT_4_STATUS_3 0x4F + +#define PORT_PHY_LOOPBACK BIT(7) +#define PORT_PHY_ISOLATE BIT(5) +#define PORT_PHY_SOFT_RESET BIT(4) +#define PORT_PHY_FORCE_LINK BIT(3) +#define PORT_PHY_MODE_M 0x7 +#define PHY_MODE_IN_AUTO_NEG 1 +#define PHY_MODE_10BT_HALF 2 +#define PHY_MODE_100BT_HALF 3 +#define PHY_MODE_10BT_FULL 5 +#define PHY_MODE_100BT_FULL 6 +#define PHY_MODE_ISOLDATE 7 + +#define REG_PORT_CTRL_0 0x00 +#define REG_PORT_CTRL_1 0x01 +#define REG_PORT_CTRL_2 0x02 +#define REG_PORT_CTRL_VID 0x03 + +#define REG_PORT_CTRL_5 0x05 + +#define REG_PORT_STATUS_1 0x09 +#define REG_PORT_LINK_MD_CTRL 0x0A +#define REG_PORT_LINK_MD_RESULT 0x0B +#define REG_PORT_CTRL_9 0x0C +#define REG_PORT_CTRL_10 0x0D +#define REG_PORT_STATUS_3 0x0F + +#define REG_PORT_CTRL_12 0xA0 +#define REG_PORT_CTRL_13 0xA1 +#define REG_PORT_RATE_CTRL_3 0xA2 +#define REG_PORT_RATE_CTRL_2 0xA3 +#define REG_PORT_RATE_CTRL_1 0xA4 +#define REG_PORT_RATE_CTRL_0 0xA5 +#define REG_PORT_RATE_LIMIT 0xA6 +#define REG_PORT_IN_RATE_0 0xA7 +#define REG_PORT_IN_RATE_1 0xA8 +#define REG_PORT_IN_RATE_2 0xA9 +#define REG_PORT_IN_RATE_3 0xAA +#define REG_PORT_OUT_RATE_0 0xAB +#define REG_PORT_OUT_RATE_1 0xAC +#define REG_PORT_OUT_RATE_2 0xAD +#define REG_PORT_OUT_RATE_3 0xAE + +#define PORT_CTRL_ADDR(port, addr) \ + ((addr) + REG_PORT_1_CTRL_0 + (port) * \ + (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) + +#define TABLE_EXT_SELECT_S 5 +#define TABLE_EEE_V 1 +#define TABLE_ACL_V 2 +#define TABLE_PME_V 4 +#define TABLE_LINK_MD_V 5 +#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S) +#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S) +#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S) +#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S) +#define TABLE_READ BIT(4) +#define TABLE_SELECT_S 2 +#define TABLE_STATIC_MAC_V 0 +#define TABLE_VLAN_V 1 +#define TABLE_DYNAMIC_MAC_V 2 +#define TABLE_MIB_V 3 +#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S) +#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S) +#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S) +#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S) + +#define REG_IND_CTRL_1 0x6F + +#define TABLE_ENTRY_MASK 0x03FF +#define TABLE_EXT_ENTRY_MASK 0x0FFF + +#define REG_IND_DATA_5 0x73 +#define REG_IND_DATA_2 0x76 +#define REG_IND_DATA_1 0x77 +#define REG_IND_DATA_0 0x78 + +#define REG_INT_STATUS 0x7C +#define REG_INT_ENABLE 0x7D + +#define INT_PME BIT(4) + +#define REG_ACL_INT_STATUS 0x7E +#define REG_ACL_INT_ENABLE 0x7F + +#define INT_PORT_5 BIT(4) +#define INT_PORT_4 BIT(3) +#define INT_PORT_3 BIT(2) +#define INT_PORT_2 BIT(1) +#define INT_PORT_1 BIT(0) + +#define INT_PORT_ALL \ + (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1) + +#define REG_SW_CTRL_12 0x80 +#define REG_SW_CTRL_13 0x81 + +#define SWITCH_802_1P_MASK 3 +#define SWITCH_802_1P_BASE 3 +#define SWITCH_802_1P_SHIFT 2 + +#define SW_802_1P_MAP_M KS_PRIO_M +#define SW_802_1P_MAP_S KS_PRIO_S + +#define REG_SWITCH_CTRL_14 0x82 + +#define SW_PRIO_MAPPING_M KS_PRIO_M +#define SW_PRIO_MAPPING_S 6 +#define SW_PRIO_MAP_3_HI 0 +#define SW_PRIO_MAP_2_HI 2 +#define SW_PRIO_MAP_0_LO 3 + +#define REG_SW_CTRL_15 0x83 +#define REG_SW_CTRL_16 0x84 +#define REG_SW_CTRL_17 0x85 +#define REG_SW_CTRL_18 0x86 + +#define SW_SELF_ADDR_FILTER_ENABLE BIT(6) + +#define REG_SW_UNK_UCAST_CTRL 0x83 +#define REG_SW_UNK_MCAST_CTRL 0x84 +#define REG_SW_UNK_VID_CTRL 0x85 +#define REG_SW_UNK_IP_MCAST_CTRL 0x86 + +#define SW_UNK_FWD_ENABLE BIT(5) +#define SW_UNK_FWD_MAP KS_PORT_M + +#define REG_SW_CTRL_19 0x87 + +#define SW_IN_RATE_LIMIT_PERIOD_M 0x3 +#define SW_IN_RATE_LIMIT_PERIOD_S 4 +#define SW_IN_RATE_LIMIT_16_MS 0 +#define SW_IN_RATE_LIMIT_64_MS 1 +#define SW_IN_RATE_LIMIT_256_MS 2 +#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3) +#define SW_INS_TAG_ENABLE BIT(2) + +#define REG_TOS_PRIO_CTRL_0 0x90 +#define REG_TOS_PRIO_CTRL_1 0x91 +#define REG_TOS_PRIO_CTRL_2 0x92 +#define REG_TOS_PRIO_CTRL_3 0x93 +#define REG_TOS_PRIO_CTRL_4 0x94 +#define REG_TOS_PRIO_CTRL_5 0x95 +#define REG_TOS_PRIO_CTRL_6 0x96 +#define REG_TOS_PRIO_CTRL_7 0x97 +#define REG_TOS_PRIO_CTRL_8 0x98 +#define REG_TOS_PRIO_CTRL_9 0x99 +#define REG_TOS_PRIO_CTRL_10 0x9A +#define REG_TOS_PRIO_CTRL_11 0x9B +#define REG_TOS_PRIO_CTRL_12 0x9C +#define REG_TOS_PRIO_CTRL_13 0x9D +#define REG_TOS_PRIO_CTRL_14 0x9E +#define REG_TOS_PRIO_CTRL_15 0x9F + +#define TOS_PRIO_M KS_PRIO_M +#define TOS_PRIO_S KS_PRIO_S + +#define REG_SW_CTRL_21 0xA4 + +#define SW_IPV6_MLD_OPTION BIT(3) +#define SW_IPV6_MLD_SNOOP BIT(2) + +#define REG_PORT_1_CTRL_12 0xB0 +#define REG_PORT_2_CTRL_12 0xC0 +#define REG_PORT_3_CTRL_12 0xD0 +#define REG_PORT_4_CTRL_12 0xE0 +#define REG_PORT_5_CTRL_12 0xF0 + +#define PORT_PASS_ALL BIT(6) +#define PORT_INS_TAG_FOR_PORT_5_S 3 +#define PORT_INS_TAG_FOR_PORT_5 BIT(3) +#define PORT_INS_TAG_FOR_PORT_4 BIT(2) +#define PORT_INS_TAG_FOR_PORT_3 BIT(1) +#define PORT_INS_TAG_FOR_PORT_2 BIT(0) + +#define REG_PORT_1_CTRL_13 0xB1 +#define REG_PORT_2_CTRL_13 0xC1 +#define REG_PORT_3_CTRL_13 0xD1 +#define REG_PORT_4_CTRL_13 0xE1 +#define REG_PORT_5_CTRL_13 0xF1 + +#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1) +#define PORT_DROP_TAG BIT(0) + +#define REG_PORT_1_CTRL_14 0xB2 +#define REG_PORT_2_CTRL_14 0xC2 +#define REG_PORT_3_CTRL_14 0xD2 +#define REG_PORT_4_CTRL_14 0xE2 +#define REG_PORT_5_CTRL_14 0xF2 +#define REG_PORT_1_CTRL_15 0xB3 +#define REG_PORT_2_CTRL_15 0xC3 +#define REG_PORT_3_CTRL_15 0xD3 +#define REG_PORT_4_CTRL_15 0xE3 +#define REG_PORT_5_CTRL_15 0xF3 +#define REG_PORT_1_CTRL_16 0xB4 +#define REG_PORT_2_CTRL_16 0xC4 +#define REG_PORT_3_CTRL_16 0xD4 +#define REG_PORT_4_CTRL_16 0xE4 +#define REG_PORT_5_CTRL_16 0xF4 +#define REG_PORT_1_CTRL_17 0xB5 +#define REG_PORT_2_CTRL_17 0xC5 +#define REG_PORT_3_CTRL_17 0xD5 +#define REG_PORT_4_CTRL_17 0xE5 +#define REG_PORT_5_CTRL_17 0xF5 + +#define REG_PORT_1_RATE_CTRL_3 0xB2 +#define REG_PORT_1_RATE_CTRL_2 0xB3 +#define REG_PORT_1_RATE_CTRL_1 0xB4 +#define REG_PORT_1_RATE_CTRL_0 0xB5 +#define REG_PORT_2_RATE_CTRL_3 0xC2 +#define REG_PORT_2_RATE_CTRL_2 0xC3 +#define REG_PORT_2_RATE_CTRL_1 0xC4 +#define REG_PORT_2_RATE_CTRL_0 0xC5 +#define REG_PORT_3_RATE_CTRL_3 0xD2 +#define REG_PORT_3_RATE_CTRL_2 0xD3 +#define REG_PORT_3_RATE_CTRL_1 0xD4 +#define REG_PORT_3_RATE_CTRL_0 0xD5 +#define REG_PORT_4_RATE_CTRL_3 0xE2 +#define REG_PORT_4_RATE_CTRL_2 0xE3 +#define REG_PORT_4_RATE_CTRL_1 0xE4 +#define REG_PORT_4_RATE_CTRL_0 0xE5 +#define REG_PORT_5_RATE_CTRL_3 0xF2 +#define REG_PORT_5_RATE_CTRL_2 0xF3 +#define REG_PORT_5_RATE_CTRL_1 0xF4 +#define REG_PORT_5_RATE_CTRL_0 0xF5 + +#define RATE_CTRL_ENABLE BIT(7) +#define RATE_RATIO_M (BIT(7) - 1) + +#define PORT_OUT_RATE_ENABLE BIT(7) + +#define REG_PORT_1_RATE_LIMIT 0xB6 +#define REG_PORT_2_RATE_LIMIT 0xC6 +#define REG_PORT_3_RATE_LIMIT 0xD6 +#define REG_PORT_4_RATE_LIMIT 0xE6 +#define REG_PORT_5_RATE_LIMIT 0xF6 + +#define PORT_IN_PORT_BASED_S 6 +#define PORT_RATE_PACKET_BASED_S 5 +#define PORT_IN_FLOW_CTRL_S 4 +#define PORT_IN_LIMIT_MODE_M 0x3 +#define PORT_IN_LIMIT_MODE_S 2 +#define PORT_COUNT_IFG_S 1 +#define PORT_COUNT_PREAMBLE_S 0 +#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S) +#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S) +#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S) +#define PORT_IN_ALL 0 +#define PORT_IN_UNICAST 1 +#define PORT_IN_MULTICAST 2 +#define PORT_IN_BROADCAST 3 +#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S) +#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S) + +#define REG_PORT_1_IN_RATE_0 0xB7 +#define REG_PORT_2_IN_RATE_0 0xC7 +#define REG_PORT_3_IN_RATE_0 0xD7 +#define REG_PORT_4_IN_RATE_0 0xE7 +#define REG_PORT_5_IN_RATE_0 0xF7 +#define REG_PORT_1_IN_RATE_1 0xB8 +#define REG_PORT_2_IN_RATE_1 0xC8 +#define REG_PORT_3_IN_RATE_1 0xD8 +#define REG_PORT_4_IN_RATE_1 0xE8 +#define REG_PORT_5_IN_RATE_1 0xF8 +#define REG_PORT_1_IN_RATE_2 0xB9 +#define REG_PORT_2_IN_RATE_2 0xC9 +#define REG_PORT_3_IN_RATE_2 0xD9 +#define REG_PORT_4_IN_RATE_2 0xE9 +#define REG_PORT_5_IN_RATE_2 0xF9 +#define REG_PORT_1_IN_RATE_3 0xBA +#define REG_PORT_2_IN_RATE_3 0xCA +#define REG_PORT_3_IN_RATE_3 0xDA +#define REG_PORT_4_IN_RATE_3 0xEA +#define REG_PORT_5_IN_RATE_3 0xFA + +#define PORT_IN_RATE_ENABLE BIT(7) +#define PORT_RATE_LIMIT_M (BIT(7) - 1) + +#define REG_PORT_1_OUT_RATE_0 0xBB +#define REG_PORT_2_OUT_RATE_0 0xCB +#define REG_PORT_3_OUT_RATE_0 0xDB +#define REG_PORT_4_OUT_RATE_0 0xEB +#define REG_PORT_5_OUT_RATE_0 0xFB +#define REG_PORT_1_OUT_RATE_1 0xBC +#define REG_PORT_2_OUT_RATE_1 0xCC +#define REG_PORT_3_OUT_RATE_1 0xDC +#define REG_PORT_4_OUT_RATE_1 0xEC +#define REG_PORT_5_OUT_RATE_1 0xFC +#define REG_PORT_1_OUT_RATE_2 0xBD +#define REG_PORT_2_OUT_RATE_2 0xCD +#define REG_PORT_3_OUT_RATE_2 0xDD +#define REG_PORT_4_OUT_RATE_2 0xED +#define REG_PORT_5_OUT_RATE_2 0xFD +#define REG_PORT_1_OUT_RATE_3 0xBE +#define REG_PORT_2_OUT_RATE_3 0xCE +#define REG_PORT_3_OUT_RATE_3 0xDE +#define REG_PORT_4_OUT_RATE_3 0xEE +#define REG_PORT_5_OUT_RATE_3 0xFE + +/* 88x3 specific */ + +#define REG_SW_INSERT_SRC_PVID 0xC2 + +/* PME */ + +#define SW_PME_OUTPUT_ENABLE BIT(1) +#define SW_PME_ACTIVE_HIGH BIT(0) + +#define PORT_MAGIC_PACKET_DETECT BIT(2) +#define PORT_LINK_UP_DETECT BIT(1) +#define PORT_ENERGY_DETECT BIT(0) + +/* ACL */ + +#define ACL_FIRST_RULE_M 0xF + +#define ACL_MODE_M 0x3 +#define ACL_MODE_S 4 +#define ACL_MODE_DISABLE 0 +#define ACL_MODE_LAYER_2 1 +#define ACL_MODE_LAYER_3 2 +#define ACL_MODE_LAYER_4 3 +#define ACL_ENABLE_M 0x3 +#define ACL_ENABLE_S 2 +#define ACL_ENABLE_2_COUNT 0 +#define ACL_ENABLE_2_TYPE 1 +#define ACL_ENABLE_2_MAC 2 +#define ACL_ENABLE_2_BOTH 3 +#define ACL_ENABLE_3_IP 1 +#define ACL_ENABLE_3_SRC_DST_COMP 2 +#define ACL_ENABLE_4_PROTOCOL 0 +#define ACL_ENABLE_4_TCP_PORT_COMP 1 +#define ACL_ENABLE_4_UDP_PORT_COMP 2 +#define ACL_ENABLE_4_TCP_SEQN_COMP 3 +#define ACL_SRC BIT(1) +#define ACL_EQUAL BIT(0) + +#define ACL_MAX_PORT 0xFFFF + +#define ACL_MIN_PORT 0xFFFF +#define ACL_IP_ADDR 0xFFFFFFFF +#define ACL_TCP_SEQNUM 0xFFFFFFFF + +#define ACL_RESERVED 0xF8 +#define ACL_PORT_MODE_M 0x3 +#define ACL_PORT_MODE_S 1 +#define ACL_PORT_MODE_DISABLE 0 +#define ACL_PORT_MODE_EITHER 1 +#define ACL_PORT_MODE_IN_RANGE 2 +#define ACL_PORT_MODE_OUT_OF_RANGE 3 + +#define ACL_TCP_FLAG_ENABLE BIT(0) + +#define ACL_TCP_FLAG_M 0xFF + +#define ACL_TCP_FLAG 0xFF +#define ACL_ETH_TYPE 0xFFFF +#define ACL_IP_M 0xFFFFFFFF + +#define ACL_PRIO_MODE_M 0x3 +#define ACL_PRIO_MODE_S 6 +#define ACL_PRIO_MODE_DISABLE 0 +#define ACL_PRIO_MODE_HIGHER 1 +#define ACL_PRIO_MODE_LOWER 2 +#define ACL_PRIO_MODE_REPLACE 3 +#define ACL_PRIO_M 0x7 +#define ACL_PRIO_S 3 +#define ACL_VLAN_PRIO_REPLACE BIT(2) +#define ACL_VLAN_PRIO_M 0x7 +#define ACL_VLAN_PRIO_HI_M 0x3 + +#define ACL_VLAN_PRIO_LO_M 0x8 +#define ACL_VLAN_PRIO_S 7 +#define ACL_MAP_MODE_M 0x3 +#define ACL_MAP_MODE_S 5 +#define ACL_MAP_MODE_DISABLE 0 +#define ACL_MAP_MODE_OR 1 +#define ACL_MAP_MODE_AND 2 +#define ACL_MAP_MODE_REPLACE 3 +#define ACL_MAP_PORT_M 0x1F + +#define ACL_CNT_M (BIT(11) - 1) +#define ACL_CNT_S 5 +#define ACL_MSEC_UNIT BIT(4) +#define ACL_INTR_MODE BIT(3) + +#define REG_PORT_ACL_BYTE_EN_MSB 0x10 + +#define ACL_BYTE_EN_MSB_M 0x3F + +#define REG_PORT_ACL_BYTE_EN_LSB 0x11 + +#define ACL_ACTION_START 0xA +#define ACL_ACTION_LEN 2 +#define ACL_INTR_CNT_START 0xB +#define ACL_RULESET_START 0xC +#define ACL_RULESET_LEN 2 +#define ACL_TABLE_LEN 14 + +#define ACL_ACTION_ENABLE 0x000C +#define ACL_MATCH_ENABLE 0x1FF0 +#define ACL_RULESET_ENABLE 0x2003 +#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF) +#define ACL_MODE_ENABLE (0x10 << 8) + +#define REG_PORT_ACL_CTRL_0 0x12 + +#define PORT_ACL_WRITE_DONE BIT(6) +#define PORT_ACL_READ_DONE BIT(5) +#define PORT_ACL_WRITE BIT(4) +#define PORT_ACL_INDEX_M 0xF + +#define REG_PORT_ACL_CTRL_1 0x13 + +#define PORT_ACL_FORCE_DLR_MISS BIT(0) + +#define KSZ8795_ID_HI 0x0022 +#define KSZ8795_ID_LO 0x1550 +#define KSZ8863_ID_LO 0x1430 + +#define PHY_REG_LINK_MD 0x1D + +#define PHY_START_CABLE_DIAG BIT(15) +#define PHY_CABLE_DIAG_RESULT_M GENMASK(14, 13) +#define PHY_CABLE_DIAG_RESULT 0x6000 +#define PHY_CABLE_STAT_NORMAL 0x0000 +#define PHY_CABLE_STAT_OPEN 0x2000 +#define PHY_CABLE_STAT_SHORT 0x4000 +#define PHY_CABLE_STAT_FAILED 0x6000 +#define PHY_CABLE_10M_SHORT BIT(12) +#define PHY_CABLE_FAULT_COUNTER_M GENMASK(8, 0) + +#define PHY_REG_PHY_CTRL 0x1F + +#define PHY_MODE_M 0x7 +#define PHY_MODE_S 8 +#define PHY_STAT_REVERSED_POLARITY BIT(5) +#define PHY_STAT_MDIX BIT(4) +#define PHY_FORCE_LINK BIT(3) +#define PHY_POWER_SAVING_ENABLE BIT(2) +#define PHY_REMOTE_LOOPBACK BIT(1) + +/* Chip resource */ + +#define PRIO_QUEUES 4 + +#define KS_PRIO_IN_REG 4 + +#define MIB_COUNTER_NUM 0x20 + +/* Common names used by other drivers */ + +#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0 +#define P_PRIO_CTRL REG_PORT_CTRL_0 +#define P_TAG_CTRL REG_PORT_CTRL_0 +#define P_MIRROR_CTRL REG_PORT_CTRL_1 +#define P_802_1P_CTRL REG_PORT_CTRL_2 +#define P_PASS_ALL_CTRL REG_PORT_CTRL_12 +#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12 +#define P_DROP_TAG_CTRL REG_PORT_CTRL_13 +#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT + +#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12 +#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID + +#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0 +#define S_LINK_AGING_CTRL REG_SW_CTRL_0 +#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1 +#define S_MIRROR_CTRL REG_SW_CTRL_3 +#define S_REPLACE_VID_CTRL REG_SW_CTRL_4 +#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10 +#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12 +#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0 +#define S_IPV6_MLD_CTRL REG_SW_CTRL_21 + +#define IND_ACC_TABLE(table) ((table) << 8) + +/* */ +#define REG_IND_EEE_GLOB2_LO 0x34 +#define REG_IND_EEE_GLOB2_HI 0x35 + +/** + * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF + * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF + * MIB_PACKET_DROPPED 00-00000000-0000FFFF + * MIB_COUNTER_VALID 00-00000020-00000000 + * MIB_COUNTER_OVERFLOW 00-00000040-00000000 + */ + +#define MIB_COUNTER_VALUE 0x3FFFFFFF + +#define KSZ8795_MIB_TOTAL_RX_0 0x100 +#define KSZ8795_MIB_TOTAL_TX_0 0x101 +#define KSZ8795_MIB_TOTAL_RX_1 0x104 +#define KSZ8795_MIB_TOTAL_TX_1 0x105 + +#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100 +#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105 + +#define MIB_PACKET_DROPPED 0x0000FFFF + +#define MIB_TOTAL_BYTES_H 0x0000000F + +#define TAIL_TAG_OVERRIDE BIT(6) +#define TAIL_TAG_LOOKUP BIT(7) + +#define FID_ENTRIES 128 +#define KSZ8_DYN_MAC_ENTRIES 1024 + +#endif diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 425e20daf1e92d..0ba658a72d8fea 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -56,187 +56,6 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) REG_SW_MTU_MASK, frame_size); } -/** - * ksz9477_handle_wake_reason - Handle wake reason on a specified port. - * @dev: The device structure. - * @port: The port number. - * - * This function reads the PME (Power Management Event) status register of a - * specified port to determine the wake reason. If there is no wake event, it - * returns early. Otherwise, it logs the wake reason which could be due to a - * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register - * is then cleared to acknowledge the handling of the wake event. - * - * Return: 0 on success, or an error code on failure. - */ -static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port) -{ - u8 pme_status; - int ret; - - ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status); - if (ret) - return ret; - - if (!pme_status) - return 0; - - dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, - pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "", - pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "", - pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : ""); - - return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status); -} - -/** - * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port. - * @dev: The device structure. - * @port: The port number. - * @wol: Pointer to ethtool Wake-on-LAN settings structure. - * - * This function checks the PME Pin Control Register to see if PME Pin Output - * Enable is set, indicating PME is enabled. If enabled, it sets the supported - * and active WoL flags. - */ -void ksz9477_get_wol(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol) -{ - u8 pme_ctrl; - int ret; - - if (!dev->wakeup_source) - return; - - wol->supported = WAKE_PHY; - - /* Check if the current MAC address on this port can be set - * as global for WAKE_MAGIC support. The result may vary - * dynamically based on other ports configurations. - */ - if (ksz_is_port_mac_global_usable(dev->ds, port)) - wol->supported |= WAKE_MAGIC; - - ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl); - if (ret) - return; - - if (pme_ctrl & PME_WOL_MAGICPKT) - wol->wolopts |= WAKE_MAGIC; - if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY)) - wol->wolopts |= WAKE_PHY; -} - -/** - * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port. - * @dev: The device structure. - * @port: The port number. - * @wol: Pointer to ethtool Wake-on-LAN settings structure. - * - * This function configures Wake-on-LAN (WoL) settings for a specified port. - * It validates the provided WoL options, checks if PME is enabled via the - * switch's PME Pin Control Register, clears any previous wake reasons, - * and sets the Magic Packet flag in the port's PME control register if - * specified. - * - * Return: 0 on success, or other error codes on failure. - */ -int ksz9477_set_wol(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol) -{ - u8 pme_ctrl = 0, pme_ctrl_old = 0; - bool magic_switched_off; - bool magic_switched_on; - int ret; - - if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) - return -EINVAL; - - if (!dev->wakeup_source) - return -EOPNOTSUPP; - - ret = ksz9477_handle_wake_reason(dev, port); - if (ret) - return ret; - - if (wol->wolopts & WAKE_MAGIC) - pme_ctrl |= PME_WOL_MAGICPKT; - if (wol->wolopts & WAKE_PHY) - pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY; - - ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old); - if (ret) - return ret; - - if (pme_ctrl_old == pme_ctrl) - return 0; - - magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) && - !(pme_ctrl & PME_WOL_MAGICPKT); - magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) && - (pme_ctrl & PME_WOL_MAGICPKT); - - /* To keep reference count of MAC address, we should do this - * operation only on change of WOL settings. - */ - if (magic_switched_on) { - ret = ksz_switch_macaddr_get(dev->ds, port, NULL); - if (ret) - return ret; - } else if (magic_switched_off) { - ksz_switch_macaddr_put(dev->ds); - } - - ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl); - if (ret) { - if (magic_switched_on) - ksz_switch_macaddr_put(dev->ds); - return ret; - } - - return 0; -} - -/** - * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while - * considering Wake-on-LAN (WoL) settings. - * @dev: The switch device structure. - * @wol_enabled: Pointer to a boolean which will be set to true if WoL is - * enabled on any port. - * - * This function prepares the switch device for a safe shutdown while taking - * into account the Wake-on-LAN (WoL) settings on the user ports. It updates - * the wol_enabled flag accordingly to reflect whether WoL is active on any - * port. - */ -void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled) -{ - struct dsa_port *dp; - int ret; - - *wol_enabled = false; - - if (!dev->wakeup_source) - return; - - dsa_switch_for_each_user_port(dp, dev->ds) { - u8 pme_ctrl = 0; - - ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl); - if (!ret && pme_ctrl) - *wol_enabled = true; - - /* make sure there are no pending wake events which would - * prevent the device from going to sleep/shutdown. - */ - ksz9477_handle_wake_reason(dev, dp->index); - } - - /* Now we are save to enable PME pin. */ - if (*wol_enabled) - ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE); -} - static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) { unsigned int val; @@ -427,54 +246,70 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) mutex_unlock(&p->mib.cnt_mutex); } -int ksz9477_errata_monitor(struct ksz_device *dev, int port, - u64 tx_late_col) +static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port, + u64 tx_late_col) { + u8 lue_ctrl; u32 pmavbc; - u8 status; u16 pqm; int ret; - ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status); + /* Errata DS80000754 recommends monitoring potential faults in + * half-duplex mode. The switch might not be able to communicate anymore + * in these states. If you see this message, please read the + * errata-sheet for more information: + * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf + * To workaround this issue, half-duplex mode should be avoided. + * A software reset could be implemented to recover from this state. + */ + dev_warn_once(dev->dev, + "Half-duplex detected on port %d, transmission halt may occur\n", + port); + if (tx_late_col != 0) { + /* Transmission halt with late collisions */ + dev_crit_once(dev->dev, + "TX late collisions detected, transmission may be halted on port %d\n", + port); + } + ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl); if (ret) return ret; - if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) && - !(status & PORT_INTF_FULL_DUPLEX)) { - /* Errata DS80000754 recommends monitoring potential faults in - * half-duplex mode. The switch might not be able to communicate anymore - * in these states. - * If you see this message, please read the errata-sheet for more information: - * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf - * To workaround this issue, half-duplex mode should be avoided. - * A software reset could be implemented to recover from this state. - */ - dev_warn_once(dev->dev, - "Half-duplex detected on port %d, transmission halt may occur\n", - port); - if (tx_late_col != 0) { - /* Transmission halt with late collisions */ - dev_crit_once(dev->dev, - "TX late collisions detected, transmission may be halted on port %d\n", - port); - } - ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status); + if (lue_ctrl & SW_VLAN_ENABLE) { + ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm); if (ret) return ret; - if (status & SW_VLAN_ENABLE) { - ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm); - if (ret) - return ret; - ret = ksz_read32(dev, REG_PMAVBC, &pmavbc); - if (ret) - return ret; - if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) || - (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) { - /* Transmission halt with Half-Duplex and VLAN */ - dev_crit_once(dev->dev, - "resources out of limits, transmission may be halted\n"); - } + + ret = ksz_read32(dev, REG_PMAVBC, &pmavbc); + if (ret) + return ret; + + if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) || + (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) { + /* Transmission halt with Half-Duplex and VLAN */ + dev_crit_once(dev->dev, + "resources out of limits, transmission may be halted\n"); } } + + return ret; +} + +int ksz9477_errata_monitor(struct ksz_device *dev, int port, + u64 tx_late_col) +{ + u8 status; + int ret; + + ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status); + if (ret) + return ret; + + if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) + == PORT_INTF_SPEED_NONE) && + !(status & PORT_INTF_FULL_DUPLEX)) { + ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col); + } + return ret; } @@ -1188,6 +1023,7 @@ void ksz9477_port_queue_split(struct ksz_device *dev, int port) void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { + const u16 *regs = dev->info->regs; struct dsa_switch *ds = dev->ds; u16 data16; u8 member; @@ -1232,12 +1068,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz9477_port_acl_init(dev, port); /* clear pending wake flags */ - ksz9477_handle_wake_reason(dev, port); + ksz_handle_wake_reason(dev, port); /* Disable all WoL options by default. Otherwise * ksz_switch_macaddr_get/put logic will not work properly. */ - ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0); + ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0); } void ksz9477_config_cpu_port(struct dsa_switch *ds) @@ -1334,6 +1170,7 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev) int ksz9477_setup(struct dsa_switch *ds) { struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; int ret = 0; ds->mtu_enforcement_ingress = true; @@ -1364,13 +1201,11 @@ int ksz9477_setup(struct dsa_switch *ds) /* enable global MIB counter freeze function */ ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); - /* Make sure PME (WoL) is not enabled. If requested, it will be - * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not - * like PME events changes before shutdown. + /* Make sure PME (WoL) is not enabled. If requested, it will + * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs + * do not like PME events changes before shutdown. */ - ksz_write8(dev, REG_SW_PME_CTRL, 0); - - return 0; + return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0); } u32 ksz9477_get_port_addr(int port, int offset) diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h index 239a281da10b3f..d2166b0d881ed0 100644 --- a/drivers/net/dsa/microchip/ksz9477.h +++ b/drivers/net/dsa/microchip/ksz9477.h @@ -60,11 +60,6 @@ void ksz9477_switch_exit(struct ksz_device *dev); void ksz9477_port_queue_split(struct ksz_device *dev, int port); void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr); void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr); -void ksz9477_get_wol(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol); -int ksz9477_set_wol(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol); -void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled); int ksz9477_port_acl_init(struct ksz_device *dev, int port); void ksz9477_port_acl_free(struct ksz_device *dev, int port); diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index d5354c600ea1e9..04235c22bf40e4 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -38,11 +38,6 @@ #define SWITCH_REVISION_S 4 #define SWITCH_RESET 0x01 -#define REG_SW_PME_CTRL 0x0006 - -#define PME_ENABLE BIT(1) -#define PME_POLARITY BIT(0) - #define REG_GLOBAL_OPTIONS 0x000F #define SW_GIGABIT_ABLE BIT(6) @@ -807,13 +802,6 @@ #define REG_PORT_AVB_SR_1_TYPE 0x0008 #define REG_PORT_AVB_SR_2_TYPE 0x000A -#define REG_PORT_PME_STATUS 0x0013 -#define REG_PORT_PME_CTRL 0x0017 - -#define PME_WOL_MAGICPKT BIT(2) -#define PME_WOL_LINKUP BIT(1) -#define PME_WOL_ENERGY BIT(0) - #define REG_PORT_INT_STATUS 0x001B #define REG_PORT_INT_MASK 0x001F diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 1491099528be8d..4e8710c7cb7bb2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2,7 +2,7 @@ /* * Microchip switch driver main logic * - * Copyright (C) 2017-2019 Microchip Technology Inc. + * Copyright (C) 2017-2024 Microchip Technology Inc. */ #include @@ -246,16 +246,16 @@ static const struct ksz_drive_strength ksz9477_drive_strengths[] = { { SW_DRIVE_STRENGTH_28MA, 28000 }, }; -/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, .. +/* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, .. * variants. * This values are documented in KSZ8873 and KSZ8863 datasheets. */ -static const struct ksz_drive_strength ksz8830_drive_strengths[] = { +static const struct ksz_drive_strength ksz88x3_drive_strengths[] = { { 0, 8000 }, { KSZ8873_DRIVE_STRENGTH_16MA, 16000 }, }; -static void ksz8830_phylink_mac_config(struct phylink_config *config, +static void ksz88x3_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); static void ksz_phylink_mac_config(struct phylink_config *config, @@ -265,8 +265,8 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface); -static const struct phylink_mac_ops ksz8830_phylink_mac_ops = { - .mac_config = ksz8830_phylink_mac_config, +static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = { + .mac_config = ksz88x3_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz8_phylink_mac_link_up, }; @@ -277,7 +277,7 @@ static const struct phylink_mac_ops ksz8_phylink_mac_ops = { .mac_link_up = ksz8_phylink_mac_link_up, }; -static const struct ksz_dev_ops ksz8_dev_ops = { +static const struct ksz_dev_ops ksz88xx_dev_ops = { .setup = ksz8_setup, .get_port_addr = ksz8_get_port_addr, .cfg_port_member = ksz8_cfg_port_member, @@ -307,6 +307,44 @@ static const struct ksz_dev_ops ksz8_dev_ops = { .init = ksz8_switch_init, .exit = ksz8_switch_exit, .change_mtu = ksz8_change_mtu, + .pme_write8 = ksz8_pme_write8, + .pme_pread8 = ksz8_pme_pread8, + .pme_pwrite8 = ksz8_pme_pwrite8, +}; + +static const struct ksz_dev_ops ksz87xx_dev_ops = { + .setup = ksz8_setup, + .get_port_addr = ksz8_get_port_addr, + .cfg_port_member = ksz8_cfg_port_member, + .flush_dyn_mac_table = ksz8_flush_dyn_mac_table, + .port_setup = ksz8_port_setup, + .r_phy = ksz8_r_phy, + .w_phy = ksz8_w_phy, + .r_mib_cnt = ksz8_r_mib_cnt, + .r_mib_pkt = ksz8_r_mib_pkt, + .r_mib_stat64 = ksz_r_mib_stats64, + .freeze_mib = ksz8_freeze_mib, + .port_init_cnt = ksz8_port_init_cnt, + .fdb_dump = ksz8_fdb_dump, + .fdb_add = ksz8_fdb_add, + .fdb_del = ksz8_fdb_del, + .mdb_add = ksz8_mdb_add, + .mdb_del = ksz8_mdb_del, + .vlan_filtering = ksz8_port_vlan_filtering, + .vlan_add = ksz8_port_vlan_add, + .vlan_del = ksz8_port_vlan_del, + .mirror_add = ksz8_port_mirror_add, + .mirror_del = ksz8_port_mirror_del, + .get_caps = ksz8_get_caps, + .config_cpu_port = ksz8_config_cpu_port, + .enable_stp_addr = ksz8_enable_stp_addr, + .reset = ksz8_reset_switch, + .init = ksz8_switch_init, + .exit = ksz8_switch_exit, + .change_mtu = ksz8_change_mtu, + .pme_write8 = ksz8_pme_write8, + .pme_pread8 = ksz8_pme_pread8, + .pme_pwrite8 = ksz8_pme_pwrite8, }; static void ksz9477_phylink_mac_link_up(struct phylink_config *config, @@ -348,9 +386,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .mdb_add = ksz9477_mdb_add, .mdb_del = ksz9477_mdb_del, .change_mtu = ksz9477_change_mtu, - .get_wol = ksz9477_get_wol, - .set_wol = ksz9477_set_wol, - .wol_pre_shutdown = ksz9477_wol_pre_shutdown, + .pme_write8 = ksz_write8, + .pme_pread8 = ksz_pread8, + .pme_pwrite8 = ksz_pwrite8, .config_cpu_port = ksz9477_config_cpu_port, .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc, .enable_stp_addr = ksz9477_enable_stp_addr, @@ -423,6 +461,9 @@ static const u16 ksz8795_regs[] = { [S_MULTICAST_CTRL] = 0x04, [P_XMII_CTRL_0] = 0x06, [P_XMII_CTRL_1] = 0x06, + [REG_SW_PME_CTRL] = 0x8003, + [REG_PORT_PME_STATUS] = 0x8003, + [REG_PORT_PME_CTRL] = 0x8007, }; static const u32 ksz8795_masks[] = { @@ -531,6 +572,61 @@ static u8 ksz8863_shifts[] = { [DYNAMIC_MAC_SRC_PORT] = 20, }; +static const u16 ksz8895_regs[] = { + [REG_SW_MAC_ADDR] = 0x68, + [REG_IND_CTRL_0] = 0x6E, + [REG_IND_DATA_8] = 0x70, + [REG_IND_DATA_CHECK] = 0x72, + [REG_IND_DATA_HI] = 0x71, + [REG_IND_DATA_LO] = 0x75, + [REG_IND_MIB_CHECK] = 0x75, + [P_FORCE_CTRL] = 0x0C, + [P_LINK_STATUS] = 0x0E, + [P_LOCAL_CTRL] = 0x0C, + [P_NEG_RESTART_CTRL] = 0x0D, + [P_REMOTE_STATUS] = 0x0E, + [P_SPEED_STATUS] = 0x09, + [S_TAIL_TAG_CTRL] = 0x0C, + [P_STP_CTRL] = 0x02, + [S_START_CTRL] = 0x01, + [S_BROADCAST_CTRL] = 0x06, + [S_MULTICAST_CTRL] = 0x04, +}; + +static const u32 ksz8895_masks[] = { + [PORT_802_1P_REMAPPING] = BIT(7), + [SW_TAIL_TAG_ENABLE] = BIT(1), + [MIB_COUNTER_OVERFLOW] = BIT(7), + [MIB_COUNTER_VALID] = BIT(6), + [VLAN_TABLE_FID] = GENMASK(6, 0), + [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7), + [VLAN_TABLE_VALID] = BIT(12), + [STATIC_MAC_TABLE_VALID] = BIT(21), + [STATIC_MAC_TABLE_USE_FID] = BIT(23), + [STATIC_MAC_TABLE_FID] = GENMASK(30, 24), + [STATIC_MAC_TABLE_OVERRIDE] = BIT(22), + [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16), + [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0), + [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7), + [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7), + [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29), + [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16), + [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24), + [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27), +}; + +static const u8 ksz8895_shifts[] = { + [VLAN_TABLE_MEMBERSHIP_S] = 7, + [VLAN_TABLE] = 13, + [STATIC_MAC_FWD_PORTS] = 16, + [STATIC_MAC_FID] = 24, + [DYNAMIC_MAC_ENTRIES_H] = 3, + [DYNAMIC_MAC_ENTRIES] = 29, + [DYNAMIC_MAC_FID] = 16, + [DYNAMIC_MAC_TIMESTAMP] = 27, + [DYNAMIC_MAC_SRC_PORT] = 24, +}; + static const u16 ksz9477_regs[] = { [REG_SW_MAC_ADDR] = 0x0302, [P_STP_CTRL] = 0x0B04, @@ -539,6 +635,9 @@ static const u16 ksz9477_regs[] = { [S_MULTICAST_CTRL] = 0x0331, [P_XMII_CTRL_0] = 0x0300, [P_XMII_CTRL_1] = 0x0301, + [REG_SW_PME_CTRL] = 0x0006, + [REG_PORT_PME_STATUS] = 0x0013, + [REG_PORT_PME_CTRL] = 0x0017, }; static const u32 ksz9477_masks[] = { @@ -1253,12 +1352,12 @@ const struct ksz_chip_data ksz_switch_chips[] = { .dev_name = "KSZ8795", .num_vlans = 4096, .num_alus = 0, - .num_statics = 8, + .num_statics = 32, .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, .num_ipms = 4, - .ops = &ksz8_dev_ops, + .ops = &ksz87xx_dev_ops, .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, @@ -1294,12 +1393,12 @@ const struct ksz_chip_data ksz_switch_chips[] = { .dev_name = "KSZ8794", .num_vlans = 4096, .num_alus = 0, - .num_statics = 8, + .num_statics = 32, .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, .num_ipms = 4, - .ops = &ksz8_dev_ops, + .ops = &ksz87xx_dev_ops, .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, @@ -1321,12 +1420,12 @@ const struct ksz_chip_data ksz_switch_chips[] = { .dev_name = "KSZ8765", .num_vlans = 4096, .num_alus = 0, - .num_statics = 8, + .num_statics = 32, .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, .num_ipms = 4, - .ops = &ksz8_dev_ops, + .ops = &ksz87xx_dev_ops, .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, @@ -1343,8 +1442,8 @@ const struct ksz_chip_data ksz_switch_chips[] = { .internal_phy = {true, true, true, true, false}, }, - [KSZ8830] = { - .chip_id = KSZ8830_CHIP_ID, + [KSZ88X3] = { + .chip_id = KSZ88X3_CHIP_ID, .dev_name = "KSZ8863/KSZ8873", .num_vlans = 16, .num_alus = 0, @@ -1353,8 +1452,8 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 3, .num_tx_queues = 4, .num_ipms = 4, - .ops = &ksz8_dev_ops, - .phylink_mac_ops = &ksz8830_phylink_mac_ops, + .ops = &ksz88xx_dev_ops, + .phylink_mac_ops = &ksz88x3_phylink_mac_ops, .mib_names = ksz88xx_mib_names, .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1368,6 +1467,61 @@ const struct ksz_chip_data ksz_switch_chips[] = { .rd_table = &ksz8873_register_set, }, + [KSZ8864] = { + /* WARNING + * ======= + * KSZ8864 is similar to KSZ8895, except the first port + * does not exist. + * external cpu + * KSZ8864 1,2,3 4 + * KSZ8895 0,1,2,3 4 + * port_cnt is configured as 5, even though it is 4 + */ + .chip_id = KSZ8864_CHIP_ID, + .dev_name = "KSZ8864", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 32, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total cpu and user ports */ + .num_tx_queues = 4, + .num_ipms = 4, + .ops = &ksz88xx_dev_ops, + .phylink_mac_ops = &ksz88x3_phylink_mac_ops, + .mib_names = ksz88xx_mib_names, + .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, + .regs = ksz8895_regs, + .masks = ksz8895_masks, + .shifts = ksz8895_shifts, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .internal_phy = {false, true, true, true, false}, + }, + + [KSZ8895] = { + .chip_id = KSZ8895_CHIP_ID, + .dev_name = "KSZ8895", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 32, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total cpu and user ports */ + .num_tx_queues = 4, + .num_ipms = 4, + .ops = &ksz88xx_dev_ops, + .phylink_mac_ops = &ksz88x3_phylink_mac_ops, + .mib_names = ksz88xx_mib_names, + .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, + .regs = ksz8895_regs, + .masks = ksz8895_masks, + .shifts = ksz8895_shifts, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .internal_phy = {true, true, true, true, false}, + }, + [KSZ9477] = { .chip_id = KSZ9477_CHIP_ID, .dev_name = "KSZ9477", @@ -2570,7 +2724,7 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) struct ksz_device *dev = ds->priv; switch (dev->chip_id) { - case KSZ8830_CHIP_ID: + case KSZ88X3_CHIP_ID: /* Silicon Errata Sheet (DS80000830A): * Port 1 does not work with LinkMD Cable-Testing. * Port 1 does not respond to received PAUSE control frames. @@ -2893,12 +3047,10 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds, struct ksz_device *dev = ds->priv; enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE; - if (dev->chip_id == KSZ8795_CHIP_ID || - dev->chip_id == KSZ8794_CHIP_ID || - dev->chip_id == KSZ8765_CHIP_ID) + if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) proto = DSA_TAG_PROTO_KSZ8795; - if (dev->chip_id == KSZ8830_CHIP_ID || + if (dev->chip_id == KSZ88X3_CHIP_ID || dev->chip_id == KSZ8563_CHIP_ID || dev->chip_id == KSZ9893_CHIP_ID || dev->chip_id == KSZ9563_CHIP_ID) @@ -3010,7 +3162,9 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port) case KSZ8794_CHIP_ID: case KSZ8765_CHIP_ID: return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; - case KSZ8830_CHIP_ID: + case KSZ88X3_CHIP_ID: + case KSZ8864_CHIP_ID: + case KSZ8895_CHIP_ID: return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; case KSZ8563_CHIP_ID: case KSZ8567_CHIP_ID: @@ -3180,7 +3334,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit) return interface; } -static void ksz8830_phylink_mac_config(struct phylink_config *config, +static void ksz88x3_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -3364,10 +3518,22 @@ static int ksz_switch_detect(struct ksz_device *dev) break; case KSZ88_FAMILY_ID: if (id2 == KSZ88_CHIP_ID_63) - dev->chip_id = KSZ8830_CHIP_ID; + dev->chip_id = KSZ88X3_CHIP_ID; else return -ENODEV; break; + case KSZ8895_FAMILY_ID: + if (id2 == KSZ8895_CHIP_ID_95 || + id2 == KSZ8895_CHIP_ID_95R) + dev->chip_id = KSZ8895_CHIP_ID; + else + return -ENODEV; + ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4); + if (ret) + return ret; + if (id4 & SW_KSZ8864) + dev->chip_id = KSZ8864_CHIP_ID; + break; default: ret = ksz_read32(dev, REG_CHIP_ID0, &id32); if (ret) @@ -3742,24 +3908,214 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port, } } +/** + * ksz_handle_wake_reason - Handle wake reason on a specified port. + * @dev: The device structure. + * @port: The port number. + * + * This function reads the PME (Power Management Event) status register of a + * specified port to determine the wake reason. If there is no wake event, it + * returns early. Otherwise, it logs the wake reason which could be due to a + * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register + * is then cleared to acknowledge the handling of the wake event. + * + * Return: 0 on success, or an error code on failure. + */ +int ksz_handle_wake_reason(struct ksz_device *dev, int port) +{ + const struct ksz_dev_ops *ops = dev->dev_ops; + const u16 *regs = dev->info->regs; + u8 pme_status; + int ret; + + ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS], + &pme_status); + if (ret) + return ret; + + if (!pme_status) + return 0; + + dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, + pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "", + pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "", + pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : ""); + + return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS], + pme_status); +} + +/** + * ksz_get_wol - Get Wake-on-LAN settings for a specified port. + * @ds: The dsa_switch structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function checks the device PME wakeup_source flag and chip_id. + * If enabled and supported, it sets the supported and active WoL + * flags. + */ static void ksz_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + u8 pme_ctrl; + int ret; - if (dev->dev_ops->get_wol) - dev->dev_ops->get_wol(dev, port, wol); + if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev)) + return; + + if (!dev->wakeup_source) + return; + + wol->supported = WAKE_PHY; + + /* Check if the current MAC address on this port can be set + * as global for WAKE_MAGIC support. The result may vary + * dynamically based on other ports configurations. + */ + if (ksz_is_port_mac_global_usable(dev->ds, port)) + wol->supported |= WAKE_MAGIC; + + ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], + &pme_ctrl); + if (ret) + return; + + if (pme_ctrl & PME_WOL_MAGICPKT) + wol->wolopts |= WAKE_MAGIC; + if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY)) + wol->wolopts |= WAKE_PHY; } +/** + * ksz_set_wol - Set Wake-on-LAN settings for a specified port. + * @ds: The dsa_switch structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function configures Wake-on-LAN (WoL) settings for a specified + * port. It validates the provided WoL options, checks if PME is + * enabled and supported, clears any previous wake reasons, and sets + * the Magic Packet flag in the port's PME control register if + * specified. + * + * Return: 0 on success, or other error codes on failure. + */ static int ksz_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { + u8 pme_ctrl = 0, pme_ctrl_old = 0; struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + bool magic_switched_off; + bool magic_switched_on; + int ret; - if (dev->dev_ops->set_wol) - return dev->dev_ops->set_wol(dev, port, wol); + if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; - return -EOPNOTSUPP; + if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev)) + return -EOPNOTSUPP; + + if (!dev->wakeup_source) + return -EOPNOTSUPP; + + ret = ksz_handle_wake_reason(dev, port); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + pme_ctrl |= PME_WOL_MAGICPKT; + if (wol->wolopts & WAKE_PHY) + pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY; + + ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL], + &pme_ctrl_old); + if (ret) + return ret; + + if (pme_ctrl_old == pme_ctrl) + return 0; + + magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) && + !(pme_ctrl & PME_WOL_MAGICPKT); + magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) && + (pme_ctrl & PME_WOL_MAGICPKT); + + /* To keep reference count of MAC address, we should do this + * operation only on change of WOL settings. + */ + if (magic_switched_on) { + ret = ksz_switch_macaddr_get(dev->ds, port, NULL); + if (ret) + return ret; + } else if (magic_switched_off) { + ksz_switch_macaddr_put(dev->ds); + } + + ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], + pme_ctrl); + if (ret) { + if (magic_switched_on) + ksz_switch_macaddr_put(dev->ds); + return ret; + } + + return 0; +} + +/** + * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while + * considering Wake-on-LAN (WoL) settings. + * @dev: The switch device structure. + * @wol_enabled: Pointer to a boolean which will be set to true if WoL is + * enabled on any port. + * + * This function prepares the switch device for a safe shutdown while taking + * into account the Wake-on-LAN (WoL) settings on the user ports. It updates + * the wol_enabled flag accordingly to reflect whether WoL is active on any + * port. + */ +static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled) +{ + const struct ksz_dev_ops *ops = dev->dev_ops; + const u16 *regs = dev->info->regs; + u8 pme_pin_en = PME_ENABLE; + struct dsa_port *dp; + int ret; + + *wol_enabled = false; + + if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev)) + return; + + if (!dev->wakeup_source) + return; + + dsa_switch_for_each_user_port(dp, dev->ds) { + u8 pme_ctrl = 0; + + ret = ops->pme_pread8(dev, dp->index, + regs[REG_PORT_PME_CTRL], &pme_ctrl); + if (!ret && pme_ctrl) + *wol_enabled = true; + + /* make sure there are no pending wake events which would + * prevent the device from going to sleep/shutdown. + */ + ksz_handle_wake_reason(dev, dp->index); + } + + /* Now we are save to enable PME pin. */ + if (*wol_enabled) { + if (dev->pme_active_high) + pme_pin_en |= PME_POLARITY; + ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en); + if (ksz_is_ksz87xx(dev)) + ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK); + } } static int ksz_port_set_mac_address(struct dsa_switch *ds, int port, @@ -4072,8 +4428,7 @@ void ksz_switch_shutdown(struct ksz_device *dev) { bool wol_enabled = false; - if (dev->dev_ops->wol_pre_shutdown) - dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled); + ksz_wol_pre_shutdown(dev, &wol_enabled); if (dev->dev_ops->reset && !wol_enabled) dev->dev_ops->reset(dev); @@ -4237,24 +4592,24 @@ static int ksz9477_drive_strength_write(struct ksz_device *dev, } /** - * ksz8830_drive_strength_write() - Set the drive strength configuration for - * KSZ8830 compatible chip variants. + * ksz88x3_drive_strength_write() - Set the drive strength configuration for + * KSZ8863 compatible chip variants. * @dev: ksz device * @props: Array of drive strength properties to be set * @num_props: Number of properties in the array * - * This function applies the specified drive strength settings to KSZ8830 chip + * This function applies the specified drive strength settings to KSZ88X3 chip * variants (KSZ8873, KSZ8863). * It ensures the configurations align with what the chip variant supports and * warns or errors out on unsupported settings. * * Return: 0 on success, error code otherwise */ -static int ksz8830_drive_strength_write(struct ksz_device *dev, +static int ksz88x3_drive_strength_write(struct ksz_device *dev, struct ksz_driver_strength_prop *props, int num_props) { - size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths); + size_t array_size = ARRAY_SIZE(ksz88x3_drive_strengths); int microamp; int i, ret; @@ -4267,10 +4622,10 @@ static int ksz8830_drive_strength_write(struct ksz_device *dev, } microamp = props[KSZ_DRIVER_STRENGTH_IO].value; - ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size, + ret = ksz_drive_strength_to_reg(ksz88x3_drive_strengths, array_size, microamp); if (ret < 0) { - ksz_drive_strength_error(dev, ksz8830_drive_strengths, + ksz_drive_strength_error(dev, ksz88x3_drive_strengths, array_size, microamp); return ret; } @@ -4330,8 +4685,8 @@ static int ksz_parse_drive_strength(struct ksz_device *dev) return 0; switch (dev->chip_id) { - case KSZ8830_CHIP_ID: - return ksz8830_drive_strength_write(dev, of_props, + case KSZ88X3_CHIP_ID: + return ksz88x3_drive_strength_write(dev, of_props, ARRAY_SIZE(of_props)); case KSZ8795_CHIP_ID: case KSZ8794_CHIP_ID: @@ -4362,7 +4717,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev) int ksz_switch_register(struct ksz_device *dev) { const struct ksz_chip_data *info; - struct device_node *port, *ports; + struct device_node *ports; phy_interface_t interface; unsigned int port_num; int ret; @@ -4448,12 +4803,11 @@ int ksz_switch_register(struct ksz_device *dev) if (!ports) ports = of_get_child_by_name(dev->dev->of_node, "ports"); if (ports) { - for_each_available_child_of_node(ports, port) { + for_each_available_child_of_node_scoped(ports, port) { if (of_property_read_u32(port, "reg", &port_num)) continue; if (!(dev->port_mask & BIT(port_num))) { - of_node_put(port); of_node_put(ports); return -EINVAL; } @@ -4475,6 +4829,8 @@ int ksz_switch_register(struct ksz_device *dev) dev->wakeup_source = of_property_read_bool(dev->dev->of_node, "wakeup-source"); + dev->pme_active_high = of_property_read_bool(dev->dev->of_node, + "microchip,pme-active-high"); } ret = dsa_register_switch(dev->ds); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 5f0a628b9849e8..bec846e20682ff 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Microchip switch driver common header * - * Copyright (C) 2017-2019 Microchip Technology Inc. + * Copyright (C) 2017-2024 Microchip Technology Inc. */ #ifndef __KSZ_COMMON_H @@ -174,6 +174,7 @@ struct ksz_device { bool synclko_125; bool synclko_disable; bool wakeup_source; + bool pme_active_high; struct vlan_table *vlan_cache; @@ -199,7 +200,9 @@ enum ksz_model { KSZ8795, KSZ8794, KSZ8765, - KSZ8830, + KSZ88X3, + KSZ8864, + KSZ8895, KSZ9477, KSZ9896, KSZ9897, @@ -235,6 +238,9 @@ enum ksz_regs { S_MULTICAST_CTRL, P_XMII_CTRL_0, P_XMII_CTRL_1, + REG_SW_PME_CTRL, + REG_PORT_PME_STATUS, + REG_PORT_PME_CTRL, }; enum ksz_masks { @@ -354,6 +360,11 @@ struct ksz_dev_ops { void (*get_caps)(struct ksz_device *dev, int port, struct phylink_config *config); int (*change_mtu)(struct ksz_device *dev, int port, int mtu); + int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value); + int (*pme_pread8)(struct ksz_device *dev, int port, int offset, + u8 *data); + int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset, + u8 data); void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); void (*port_init_cnt)(struct ksz_device *dev, int port); void (*phylink_mac_link_up)(struct ksz_device *dev, int port, @@ -363,11 +374,6 @@ struct ksz_dev_ops { int duplex, bool tx_pause, bool rx_pause); void (*setup_rgmii_delay)(struct ksz_device *dev, int port); int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val); - void (*get_wol)(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol); - int (*set_wol)(struct ksz_device *dev, int port, - struct ethtool_wolinfo *wol); - void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled); void (*config_cpu_port)(struct dsa_switch *ds); int (*enable_stp_addr)(struct ksz_device *dev); int (*reset)(struct ksz_device *dev); @@ -391,6 +397,7 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); void ksz_switch_macaddr_put(struct dsa_switch *ds); void ksz_switch_shutdown(struct ksz_device *dev); +int ksz_handle_wake_reason(struct ksz_device *dev, int port); /* Common register access functions */ static inline struct regmap *ksz_regmap_8(struct ksz_device *dev) @@ -621,12 +628,29 @@ static inline bool ksz_is_ksz87xx(struct ksz_device *dev) static inline bool ksz_is_ksz88x3(struct ksz_device *dev) { - return dev->chip_id == KSZ8830_CHIP_ID; + return dev->chip_id == KSZ88X3_CHIP_ID; +} + +static inline bool ksz_is_8895_family(struct ksz_device *dev) +{ + return dev->chip_id == KSZ8895_CHIP_ID || + dev->chip_id == KSZ8864_CHIP_ID; } static inline bool is_ksz8(struct ksz_device *dev) { - return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev); + return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) || + ksz_is_8895_family(dev); +} + +static inline bool is_ksz88xx(struct ksz_device *dev) +{ + return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev); +} + +static inline bool is_ksz9477(struct ksz_device *dev) +{ + return dev->chip_id == KSZ9477_CHIP_ID; } static inline int is_lan937x(struct ksz_device *dev) @@ -655,6 +679,7 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port) #define SW_FAMILY_ID_M GENMASK(15, 8) #define KSZ87_FAMILY_ID 0x87 #define KSZ88_FAMILY_ID 0x88 +#define KSZ8895_FAMILY_ID 0x95 #define KSZ8_PORT_STATUS_0 0x08 #define KSZ8_PORT_FIBER_MODE BIT(7) @@ -663,6 +688,12 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port) #define KSZ87_CHIP_ID_94 0x6 #define KSZ87_CHIP_ID_95 0x9 #define KSZ88_CHIP_ID_63 0x3 +#define KSZ8895_CHIP_ID_95 0x4 +#define KSZ8895_CHIP_ID_95R 0x6 + +/* KSZ8895 specific register */ +#define REG_KSZ8864_CHIP_ID 0xFE +#define SW_KSZ8864 BIT(7) #define SW_REV_ID_M GENMASK(7, 4) @@ -695,6 +726,17 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port) #define P_MII_MAC_MODE BIT(2) #define P_MII_SEL_M 0x3 +/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */ +#define PME_WOL_MAGICPKT BIT(2) +#define PME_WOL_LINKUP BIT(1) +#define PME_WOL_ENERGY BIT(0) + +#define PME_ENABLE BIT(1) +#define PME_POLARITY BIT(0) + +#define KSZ87XX_REG_INT_EN 0x7D +#define KSZ87XX_INT_PME_MASK BIT(4) + /* Interrupt */ #define REG_SW_PORT_INT_STATUS__1 0x001B #define REG_SW_PORT_INT_MASK__1 0x001F diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c index 086bc9b3cf5362..30b4a6186e38f4 100644 --- a/drivers/net/dsa/microchip/ksz_dcb.c +++ b/drivers/net/dsa/microchip/ksz_dcb.c @@ -113,7 +113,7 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg, static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg, int *per_reg, u8 *mask) { - if (ksz_is_ksz87xx(dev)) { + if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) { *reg = KSZ8765_REG_TOS_DSCP_CTRL; *per_reg = 4; *mask = GENMASK(1, 0); diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index 8e8d83213b04ce..1c6652f2b9fee0 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -2,11 +2,11 @@ /* * Microchip ksz series register access through SPI * - * Copyright (C) 2017 Microchip Technology Inc. + * Copyright (C) 2017-2024 Microchip Technology Inc. * Tristram Ha */ -#include +#include #include #include @@ -54,12 +54,15 @@ static int ksz_spi_probe(struct spi_device *spi) if (!chip) return -EINVAL; - if (chip->chip_id == KSZ8830_CHIP_ID) + if (chip->chip_id == KSZ88X3_CHIP_ID) regmap_config = ksz8863_regmap_config; else if (chip->chip_id == KSZ8795_CHIP_ID || chip->chip_id == KSZ8794_CHIP_ID || chip->chip_id == KSZ8765_CHIP_ID) regmap_config = ksz8795_regmap_config; + else if (chip->chip_id == KSZ8895_CHIP_ID || + chip->chip_id == KSZ8864_CHIP_ID) + regmap_config = ksz8863_regmap_config; else regmap_config = ksz9477_regmap_config; @@ -134,11 +137,19 @@ static const struct of_device_id ksz_dt_ids[] = { }, { .compatible = "microchip,ksz8863", - .data = &ksz_switch_chips[KSZ8830] + .data = &ksz_switch_chips[KSZ88X3] + }, + { + .compatible = "microchip,ksz8864", + .data = &ksz_switch_chips[KSZ8864] }, { .compatible = "microchip,ksz8873", - .data = &ksz_switch_chips[KSZ8830] + .data = &ksz_switch_chips[KSZ88X3] + }, + { + .compatible = "microchip,ksz8895", + .data = &ksz_switch_chips[KSZ8895] }, { .compatible = "microchip,ksz9477", @@ -201,7 +212,9 @@ static const struct spi_device_id ksz_spi_ids[] = { { "ksz8794" }, { "ksz8795" }, { "ksz8863" }, + { "ksz8864" }, { "ksz8873" }, + { "ksz8895" }, { "ksz9477" }, { "ksz9896" }, { "ksz9897" }, diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c index b74a230a3f1353..10dc49961f15d7 100644 --- a/drivers/net/dsa/mt7530-mmio.c +++ b/drivers/net/dsa/mt7530-mmio.c @@ -11,6 +11,7 @@ #include "mt7530.h" static const struct of_device_id mt7988_of_match[] = { + { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], }, { .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], }, { /* sentinel */ }, }; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index ec18e68bf3a8cf..d84ee1b419a614 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1152,7 +1152,8 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that * is affine to the inbound user port. */ - if (priv->id == ID_MT7531 || priv->id == ID_MT7988) + if (priv->id == ID_MT7531 || priv->id == ID_MT7988 || + priv->id == ID_EN7581) mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); /* CPU port gets connected to all user ports of @@ -2207,7 +2208,7 @@ mt7530_setup_irq(struct mt7530_priv *priv) return priv->irq ? : -EINVAL; } - if (priv->id == ID_MT7988) + if (priv->id == ID_MT7988 || priv->id == ID_EN7581) priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, &mt7988_irq_domain_ops, priv); @@ -2438,8 +2439,10 @@ mt7530_setup(struct dsa_switch *ds) /* Clear link settings and enable force mode to force link down * on all ports until they're enabled later. */ - mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | - MT7530_FORCE_MODE, MT7530_FORCE_MODE); + mt7530_rmw(priv, MT753X_PMCR_P(i), + PMCR_LINK_SETTINGS_MASK | + MT753X_FORCE_MODE(priv->id), + MT753X_FORCE_MODE(priv->id)); /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, @@ -2550,8 +2553,10 @@ mt7531_setup_common(struct dsa_switch *ds) /* Clear link settings and enable force mode to force link down * on all ports until they're enabled later. */ - mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | - MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK); + mt7530_rmw(priv, MT753X_PMCR_P(i), + PMCR_LINK_SETTINGS_MASK | + MT753X_FORCE_MODE(priv->id), + MT753X_FORCE_MODE(priv->id)); /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, @@ -2783,6 +2788,28 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, } } +static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + switch (port) { + /* Ports which are connected to switch PHYs. There is no MII pinout. */ + case 0 ... 4: + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + + config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD; + break; + + /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ + case 6: + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + + config->mac_capabilities |= MAC_10000FD; + break; + } +} + static void mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) @@ -3220,6 +3247,16 @@ const struct mt753x_info mt753x_table[] = { .phy_write_c45 = mt7531_ind_c45_phy_write, .mac_port_get_caps = mt7988_mac_port_get_caps, }, + [ID_EN7581] = { + .id = ID_EN7581, + .pcs_ops = &mt7530_pcs_ops, + .sw_setup = mt7988_setup, + .phy_read_c22 = mt7531_ind_c22_phy_read, + .phy_write_c22 = mt7531_ind_c22_phy_write, + .phy_read_c45 = mt7531_ind_c45_phy_read, + .phy_write_c45 = mt7531_ind_c45_phy_write, + .mac_port_get_caps = en7581_mac_port_get_caps, + }, }; EXPORT_SYMBOL_GPL(mt753x_table); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 28592123070bb2..6ad33a9f6b1dff 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -19,6 +19,7 @@ enum mt753x_id { ID_MT7621 = 1, ID_MT7531 = 2, ID_MT7988 = 3, + ID_EN7581 = 4, }; #define NUM_TRGMII_CTRL 5 @@ -64,25 +65,30 @@ enum mt753x_id { #define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x) #define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \ - id == ID_MT7988) ? \ + id == ID_MT7988 || \ + id == ID_EN7581) ? \ MT7531_CFC : MT753X_MFC) #define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \ - id == ID_MT7988) ? \ + id == ID_MT7988 || \ + id == ID_EN7581) ? \ MT7531_MIRROR_EN : MT7530_MIRROR_EN) #define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \ - id == ID_MT7988) ? \ + id == ID_MT7988 || \ + id == ID_EN7581) ? \ MT7531_MIRROR_PORT_MASK : \ MT7530_MIRROR_PORT_MASK) #define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \ - id == ID_MT7988) ? \ + id == ID_MT7988 || \ + id == ID_EN7581) ? \ MT7531_MIRROR_PORT_GET(val) : \ MT7530_MIRROR_PORT_GET(val)) #define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \ - id == ID_MT7988) ? \ + id == ID_MT7988 || \ + id == ID_EN7581) ? \ MT7531_MIRROR_PORT_SET(val) : \ MT7530_MIRROR_PORT_SET(val)) @@ -355,6 +361,10 @@ enum mt7530_vlan_port_acc_frm { MT7531_FORCE_MODE_TX_FC | \ MT7531_FORCE_MODE_EEE100 | \ MT7531_FORCE_MODE_EEE1G) +#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_FORCE_MODE_MASK : \ + MT7530_FORCE_MODE) #define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \ PMCR_FORCE_EEE1G | \ PMCR_FORCE_EEE100 | \ diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c index 61ab6cc4fbfcca..53a6d3ed63b320 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip, * @chip: chip private data * @pin: gpio index * - * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX). + * Return: 0 for output, 1 for input. */ static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip, unsigned int pin) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 4a705f7333f43b..3aa9c997018a5c 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1370,9 +1370,8 @@ static int felix_parse_ports_node(struct felix *felix, phy_interface_t *port_phy_modes) { struct device *dev = felix->ocelot.dev; - struct device_node *child; - for_each_available_child_of_node(ports_node, child) { + for_each_available_child_of_node_scoped(ports_node, child) { phy_interface_t phy_mode; u32 port; int err; @@ -1381,7 +1380,6 @@ static int felix_parse_ports_node(struct felix *felix, if (of_property_read_u32(child, "reg", &port) < 0) { dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); - of_node_put(child); return -ENODEV; } @@ -1391,7 +1389,6 @@ static int felix_parse_ports_node(struct felix *felix, dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", port); - of_node_put(child); return -ENODEV; } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index ecfa73725d2510..0102a82e88cc61 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1736,7 +1736,7 @@ struct felix_stream_gate { u64 cycletime; u64 cycletime_ext; u32 num_entries; - struct action_gate_entry entries[]; + struct action_gate_entry entries[] __counted_by(num_entries); }; struct felix_stream_gate_entry { diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index b9674f68b756a5..ad7044b295ec1a 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1740,7 +1740,7 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv) } /* Configure chip interrupt signal polarity */ - irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + irq_trig = irq_get_trigger_type(irq); switch (irq_trig) { case IRQF_TRIGGER_RISING: case IRQF_TRIGGER_HIGH: diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 9e821b42e5f37c..c7a8cd06058781 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -599,7 +599,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv) } /* Fetch IRQ edge information from the descriptor */ - irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + irq_trig = irq_get_trigger_type(irq); switch (irq_trig) { case IRQF_TRIGGER_RISING: case IRQF_TRIGGER_HIGH: @@ -1009,8 +1009,8 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) static int rtl8366rb_setup_leds(struct realtek_priv *priv) { - struct device_node *leds_np, *led_np; struct dsa_switch *ds = &priv->ds; + struct device_node *leds_np; struct dsa_port *dp; int ret = 0; @@ -1025,13 +1025,11 @@ static int rtl8366rb_setup_leds(struct realtek_priv *priv) continue; } - for_each_child_of_node(leds_np, led_np) { + for_each_child_of_node_scoped(leds_np, led_np) { ret = rtl8366rb_setup_led(priv, dp, of_fwnode_handle(led_np)); - if (ret) { - of_node_put(led_np); + if (ret) break; - } } of_node_put(leds_np); diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c index 35709a1756ae06..3c5018d5e1f93b 100644 --- a/drivers/net/dsa/realtek/rtl83xx.c +++ b/drivers/net/dsa/realtek/rtl83xx.c @@ -185,11 +185,9 @@ rtl83xx_probe(struct device *dev, /* TODO: if power is software controlled, set up any regulators here */ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL); - if (IS_ERR(priv->reset_ctl)) { - ret = PTR_ERR(priv->reset_ctl); - dev_err_probe(dev, ret, "failed to get reset control\n"); - return ERR_CAST(priv->reset_ctl); - } + if (IS_ERR(priv->reset_ctl)) + return dev_err_cast_probe(dev, priv->reset_ctl, + "failed to get reset control\n"); priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(priv->reset)) { diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index c7282ce3d11c54..bc7e50dcb57c77 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1188,9 +1188,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device_node *ports_node) { struct device *dev = &priv->spidev->dev; - struct device_node *child; - for_each_available_child_of_node(ports_node, child) { + for_each_available_child_of_node_scoped(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; @@ -1200,7 +1199,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, if (of_property_read_u32(child, "reg", &index) < 0) { dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); - of_node_put(child); return -ENODEV; } @@ -1210,7 +1208,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", index); - of_node_put(child); return -ENODEV; } @@ -1219,7 +1216,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, if (!of_phy_is_fixed_link(child)) { dev_err(dev, "phy-handle or fixed-link " "properties missing!\n"); - of_node_put(child); return -ENODEV; } /* phy-handle is missing, but fixed-link isn't. @@ -1233,10 +1229,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, priv->phy_mode[index] = phy_mode; err = sja1105_parse_rgmii_delays(priv, index, child); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index 212421e9d42e4f..e4b98fd5164321 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,8 @@ #define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */ #define CPU_PORT 6 /* CPU port */ +#define VSC73XX_NUM_FDB_ROWS 2048 +#define VSC73XX_NUM_BUCKETS 4 /* MAC Block registers */ #define VSC73XX_MAC_CFG 0x00 @@ -196,6 +199,40 @@ #define VSC73XX_SRCMASKS_MIRROR BIT(26) #define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0) +#define VSC73XX_MACHDATA_VID GENMASK(27, 16) +#define VSC73XX_MACHDATA_MAC0 GENMASK(15, 8) +#define VSC73XX_MACHDATA_MAC1 GENMASK(7, 0) +#define VSC73XX_MACLDATA_MAC2 GENMASK(31, 24) +#define VSC73XX_MACLDATA_MAC3 GENMASK(23, 16) +#define VSC73XX_MACLDATA_MAC4 GENMASK(15, 8) +#define VSC73XX_MACLDATA_MAC5 GENMASK(7, 0) + +#define VSC73XX_HASH0_VID_FROM_MASK GENMASK(5, 0) +#define VSC73XX_HASH0_MAC0_FROM_MASK GENMASK(7, 4) +#define VSC73XX_HASH1_MAC0_FROM_MASK GENMASK(3, 0) +#define VSC73XX_HASH1_MAC1_FROM_MASK GENMASK(7, 1) +#define VSC73XX_HASH2_MAC1_FROM_MASK BIT(0) +#define VSC73XX_HASH2_MAC2_FROM_MASK GENMASK(7, 0) +#define VSC73XX_HASH2_MAC3_FROM_MASK GENMASK(7, 6) +#define VSC73XX_HASH3_MAC3_FROM_MASK GENMASK(5, 0) +#define VSC73XX_HASH3_MAC4_FROM_MASK GENMASK(7, 3) +#define VSC73XX_HASH4_MAC4_FROM_MASK GENMASK(2, 0) + +#define VSC73XX_HASH0_VID_TO_MASK GENMASK(9, 4) +#define VSC73XX_HASH0_MAC0_TO_MASK GENMASK(3, 0) +#define VSC73XX_HASH1_MAC0_TO_MASK GENMASK(10, 7) +#define VSC73XX_HASH1_MAC1_TO_MASK GENMASK(6, 0) +#define VSC73XX_HASH2_MAC1_TO_MASK BIT(10) +#define VSC73XX_HASH2_MAC2_TO_MASK GENMASK(9, 2) +#define VSC73XX_HASH2_MAC3_TO_MASK GENMASK(1, 0) +#define VSC73XX_HASH3_MAC3_TO_MASK GENMASK(10, 5) +#define VSC73XX_HASH3_MAC4_TO_MASK GENMASK(4, 0) +#define VSC73XX_HASH4_MAC4_TO_MASK GENMASK(10, 8) + +#define VSC73XX_MACTINDX_SHADOW BIT(13) +#define VSC73XX_MACTINDX_BUCKET_MSK GENMASK(12, 11) +#define VSC73XX_MACTINDX_INDEX_MSK GENMASK(10, 0) + #define VSC73XX_MACACCESS_CPU_COPY BIT(14) #define VSC73XX_MACACCESS_FWD_KILL BIT(13) #define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12) @@ -225,9 +262,27 @@ #define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3 /* MII block 3 registers */ -#define VSC73XX_MII_STAT 0x0 -#define VSC73XX_MII_CMD 0x1 -#define VSC73XX_MII_DATA 0x2 +#define VSC73XX_MII_STAT 0x0 +#define VSC73XX_MII_CMD 0x1 +#define VSC73XX_MII_DATA 0x2 +#define VSC73XX_MII_MPRES 0x3 + +#define VSC73XX_MII_STAT_BUSY BIT(3) +#define VSC73XX_MII_STAT_READ BIT(2) +#define VSC73XX_MII_STAT_WRITE BIT(1) + +#define VSC73XX_MII_CMD_SCAN BIT(27) +#define VSC73XX_MII_CMD_OPERATION BIT(26) +#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21) +#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16) +#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0) + +#define VSC73XX_MII_DATA_FAILURE BIT(16) +#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0) + +#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6) +#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0) +#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */ #define VSC73XX_MII_STAT_BUSY BIT(3) @@ -313,6 +368,13 @@ struct vsc73xx_counter { const char *name; }; +struct vsc73xx_fdb { + u16 vid; + u8 port; + u8 mac[ETH_ALEN]; + bool valid; +}; + /* Counters are named according to the MIB standards where applicable. * Some counters are custom, non-standard. The standard counters are * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex @@ -568,8 +630,11 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) return ret; /* Setting bit 26 means "read" */ - cmd = BIT(26) | (phy << 21) | (regnum << 16); - ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + cmd = VSC73XX_MII_CMD_OPERATION | + FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) | + FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, + VSC73XX_MII_CMD, cmd); if (ret) return ret; @@ -577,15 +642,16 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) if (ret) return ret; - ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val); + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, + VSC73XX_MII_DATA, &val); if (ret) return ret; - if (val & BIT(16)) { + if (val & VSC73XX_MII_DATA_FAILURE) { dev_err(vsc->dev, "reading reg %02x from phy%d failed\n", regnum, phy); return -EIO; } - val &= 0xFFFFU; + val &= VSC73XX_MII_DATA_READ_DATA; dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n", regnum, phy, val); @@ -604,8 +670,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, if (ret) return ret; - cmd = (phy << 21) | (regnum << 16) | val; - ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) | + FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) | + FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, + VSC73XX_MII_CMD, cmd); if (ret) return ret; @@ -714,15 +783,77 @@ vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set) return vsc73xx_write_vlan_table_entry(vsc, vid, portmap); } +static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds) +{ + /* Keep 2.0 ns delay for backward complatibility */ + u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS; + u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS; + struct dsa_port *dp = dsa_to_port(ds, CPU_PORT); + struct device_node *port_dn = dp->dn; + struct vsc73xx *vsc = ds->priv; + u32 delay; + + if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) { + switch (delay) { + case 0: + tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE; + break; + case 1400: + tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS; + break; + case 1700: + tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS; + break; + case 2000: + break; + default: + dev_err(vsc->dev, + "Unsupported RGMII Transmit Clock Delay\n"); + return -EINVAL; + } + } else { + dev_dbg(vsc->dev, + "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n"); + } + + if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) { + switch (delay) { + case 0: + rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE; + break; + case 1400: + rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS; + break; + case 1700: + rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS; + break; + case 2000: + break; + default: + dev_err(vsc->dev, + "Unsupported RGMII Receive Clock Delay value\n"); + return -EINVAL; + } + } else { + dev_dbg(vsc->dev, + "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n"); + } + + /* MII delay, set both GTX and RX delay */ + return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, + tx_delay | rx_delay); +} + static int vsc73xx_setup(struct dsa_switch *ds) { struct vsc73xx *vsc = ds->priv; - int i, ret; + int i, ret, val; dev_info(vsc->dev, "set up the switch\n"); ds->untag_bridge_pvid = true; ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; + ds->fdb_isolation = true; /* Issue RESET */ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, @@ -776,10 +907,11 @@ static int vsc73xx_setup(struct dsa_switch *ds) VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); } - /* MII delay, set both GTX and RX delay to 2 ns */ - vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, - VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS | - VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS); + /* Configure RGMII delay */ + ret = vsc73xx_configure_rgmii_port_delay(ds); + if (ret) + return ret; + /* Ingess VLAN reception mask (table 145) */ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK, 0xff); @@ -789,6 +921,15 @@ static int vsc73xx_setup(struct dsa_switch *ds) mdelay(50); + /* Disable preamble and use maximum allowed clock for the internal + * mdio bus, used for communication with internal PHYs only. + */ + val = VSC73XX_MII_MPRES_NOPREAMBLE | + FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL, + VSC73XX_MII_PRESCALEVAL_MIN); + vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL, + VSC73XX_MII_MPRES, val); + /* Release reset from the internal PHYs */ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, VSC73XX_GLORESET_PHY_RESET); @@ -1763,6 +1904,312 @@ static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port, vsc73xx_refresh_fwd_map(ds, port, state); } +static u16 vsc73xx_calc_hash(const unsigned char *addr, u16 vid) +{ + /* VID 5-0, MAC 47-44 */ + u16 hash = FIELD_PREP(VSC73XX_HASH0_VID_TO_MASK, + FIELD_GET(VSC73XX_HASH0_VID_FROM_MASK, vid)) | + FIELD_PREP(VSC73XX_HASH0_MAC0_TO_MASK, + FIELD_GET(VSC73XX_HASH0_MAC0_FROM_MASK, addr[0])); + /* MAC 43-33 */ + hash ^= FIELD_PREP(VSC73XX_HASH1_MAC0_TO_MASK, + FIELD_GET(VSC73XX_HASH1_MAC0_FROM_MASK, addr[0])) | + FIELD_PREP(VSC73XX_HASH1_MAC1_TO_MASK, + FIELD_GET(VSC73XX_HASH1_MAC1_FROM_MASK, addr[1])); + /* MAC 32-22 */ + hash ^= FIELD_PREP(VSC73XX_HASH2_MAC1_TO_MASK, + FIELD_GET(VSC73XX_HASH2_MAC1_FROM_MASK, addr[1])) | + FIELD_PREP(VSC73XX_HASH2_MAC2_TO_MASK, + FIELD_GET(VSC73XX_HASH2_MAC2_FROM_MASK, addr[2])) | + FIELD_PREP(VSC73XX_HASH2_MAC3_TO_MASK, + FIELD_GET(VSC73XX_HASH2_MAC3_FROM_MASK, addr[3])); + /* MAC 21-11 */ + hash ^= FIELD_PREP(VSC73XX_HASH3_MAC3_TO_MASK, + FIELD_GET(VSC73XX_HASH3_MAC3_FROM_MASK, addr[3])) | + FIELD_PREP(VSC73XX_HASH3_MAC4_TO_MASK, + FIELD_GET(VSC73XX_HASH3_MAC4_FROM_MASK, addr[4])); + /* MAC 10-0 */ + hash ^= FIELD_PREP(VSC73XX_HASH4_MAC4_TO_MASK, + FIELD_GET(VSC73XX_HASH4_MAC4_FROM_MASK, addr[4])) | + addr[5]; + + return hash; +} + +static int +vsc73xx_port_wait_for_mac_table_cmd(struct vsc73xx *vsc) +{ + int ret, err; + u32 val; + + ret = read_poll_timeout(vsc73xx_read, err, + err < 0 || + ((val & VSC73XX_MACACCESS_CMD_MASK) == + VSC73XX_MACACCESS_CMD_IDLE), + VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US, + false, vsc, VSC73XX_BLOCK_ANALYZER, + 0, VSC73XX_MACACCESS, &val); + if (ret) + return ret; + return err; +} + +static int vsc73xx_port_read_mac_table_row(struct vsc73xx *vsc, u16 index, + struct vsc73xx_fdb *fdb) +{ + int ret, i; + u32 val; + + if (!fdb) + return -EINVAL; + if (index >= VSC73XX_NUM_FDB_ROWS) + return -EINVAL; + + for (i = 0; i < VSC73XX_NUM_BUCKETS; i++) { + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACTINDX, + (i ? 0 : VSC73XX_MACTINDX_SHADOW) | + FIELD_PREP(VSC73XX_MACTINDX_BUCKET_MSK, i) | + index); + if (ret) + return ret; + + ret = vsc73xx_port_wait_for_mac_table_cmd(vsc); + if (ret) + return ret; + + ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, + VSC73XX_MACACCESS_CMD_MASK, + VSC73XX_MACACCESS_CMD_READ_ENTRY); + if (ret) + return ret; + + ret = vsc73xx_port_wait_for_mac_table_cmd(vsc); + if (ret) + return ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, &val); + if (ret) + return ret; + + fdb[i].valid = FIELD_GET(VSC73XX_MACACCESS_VALID, val); + if (!fdb[i].valid) + continue; + + fdb[i].port = FIELD_GET(VSC73XX_MACACCESS_DEST_IDX_MASK, val); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACHDATA, &val); + if (ret) + return ret; + + fdb[i].vid = FIELD_GET(VSC73XX_MACHDATA_VID, val); + fdb[i].mac[0] = FIELD_GET(VSC73XX_MACHDATA_MAC0, val); + fdb[i].mac[1] = FIELD_GET(VSC73XX_MACHDATA_MAC1, val); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACLDATA, &val); + if (ret) + return ret; + + fdb[i].mac[2] = FIELD_GET(VSC73XX_MACLDATA_MAC2, val); + fdb[i].mac[3] = FIELD_GET(VSC73XX_MACLDATA_MAC3, val); + fdb[i].mac[4] = FIELD_GET(VSC73XX_MACLDATA_MAC4, val); + fdb[i].mac[5] = FIELD_GET(VSC73XX_MACLDATA_MAC5, val); + } + + return ret; +} + +static int +vsc73xx_fdb_operation(struct vsc73xx *vsc, const unsigned char *addr, u16 vid, + u16 hash, u16 cmd_mask, u16 cmd_val) +{ + int ret; + u32 val; + + val = FIELD_PREP(VSC73XX_MACHDATA_VID, vid) | + FIELD_PREP(VSC73XX_MACHDATA_MAC0, addr[0]) | + FIELD_PREP(VSC73XX_MACHDATA_MAC1, addr[1]); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACHDATA, + val); + if (ret) + return ret; + + val = FIELD_PREP(VSC73XX_MACLDATA_MAC2, addr[2]) | + FIELD_PREP(VSC73XX_MACLDATA_MAC3, addr[3]) | + FIELD_PREP(VSC73XX_MACLDATA_MAC4, addr[4]) | + FIELD_PREP(VSC73XX_MACLDATA_MAC5, addr[5]); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACLDATA, + val); + if (ret) + return ret; + + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACTINDX, + hash); + if (ret) + return ret; + + ret = vsc73xx_port_wait_for_mac_table_cmd(vsc); + if (ret) + return ret; + + ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, cmd_mask, cmd_val); + if (ret) + return ret; + + return vsc73xx_port_wait_for_mac_table_cmd(vsc); +} + +static int vsc73xx_fdb_del_entry(struct vsc73xx *vsc, int port, + const unsigned char *addr, u16 vid) +{ + struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS]; + u16 hash = vsc73xx_calc_hash(addr, vid); + int bucket, ret; + + mutex_lock(&vsc->fdb_lock); + + ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb); + if (ret) + goto err; + + for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) { + if (fdb[bucket].valid && fdb[bucket].port == port && + ether_addr_equal(addr, fdb[bucket].mac)) + break; + } + + if (bucket == VSC73XX_NUM_BUCKETS) { + /* Can't find MAC in MAC table */ + ret = -ENODATA; + goto err; + } + + ret = vsc73xx_fdb_operation(vsc, addr, vid, hash, + VSC73XX_MACACCESS_CMD_MASK, + VSC73XX_MACACCESS_CMD_FORGET); +err: + mutex_unlock(&vsc->fdb_lock); + return ret; +} + +static int vsc73xx_fdb_add_entry(struct vsc73xx *vsc, int port, + const unsigned char *addr, u16 vid) +{ + struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS]; + u16 hash = vsc73xx_calc_hash(addr, vid); + int bucket, ret; + u32 val; + + mutex_lock(&vsc->fdb_lock); + + ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb); + if (ret) + goto err; + + for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) { + if (!fdb[bucket].valid) + break; + } + + if (bucket == VSC73XX_NUM_BUCKETS) { + /* Bucket is full */ + ret = -EOVERFLOW; + goto err; + } + + val = VSC73XX_MACACCESS_VALID | VSC73XX_MACACCESS_LOCKED | + FIELD_PREP(VSC73XX_MACACCESS_DEST_IDX_MASK, port) | + VSC73XX_MACACCESS_CMD_LEARN; + ret = vsc73xx_fdb_operation(vsc, addr, vid, hash, + VSC73XX_MACACCESS_VALID | + VSC73XX_MACACCESS_LOCKED | + VSC73XX_MACACCESS_DEST_IDX_MASK | + VSC73XX_MACACCESS_CMD_MASK, val); +err: + mutex_unlock(&vsc->fdb_lock); + return ret; +} + +static int vsc73xx_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct vsc73xx *vsc = ds->priv; + + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } + + return vsc73xx_fdb_add_entry(vsc, port, addr, vid); +} + +static int vsc73xx_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct vsc73xx *vsc = ds->priv; + + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } + + return vsc73xx_fdb_del_entry(vsc, port, addr, vid); +} + +static int vsc73xx_port_fdb_dump(struct dsa_switch *ds, + int port, dsa_fdb_dump_cb_t *cb, void *data) +{ + struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS]; + struct vsc73xx *vsc = ds->priv; + u16 i, bucket; + int err = 0; + + mutex_lock(&vsc->fdb_lock); + + for (i = 0; i < VSC73XX_NUM_FDB_ROWS; i++) { + err = vsc73xx_port_read_mac_table_row(vsc, i, fdb); + if (err) + goto unlock; + + for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) { + if (!fdb[bucket].valid || fdb[bucket].port != port) + continue; + + /* We need to hide dsa_8021q VLANs from the user */ + if (vid_is_dsa_8021q(fdb[bucket].vid)) + fdb[bucket].vid = 0; + + err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data); + if (err) + goto unlock; + } + } +unlock: + mutex_unlock(&vsc->fdb_lock); + return err; +} + static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = { .mac_config = vsc73xx_mac_config, .mac_link_down = vsc73xx_mac_link_down, @@ -1785,6 +2232,9 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = { .port_bridge_join = dsa_tag_8021q_bridge_join, .port_bridge_leave = dsa_tag_8021q_bridge_leave, .port_change_mtu = vsc73xx_change_mtu, + .port_fdb_add = vsc73xx_fdb_add, + .port_fdb_del = vsc73xx_fdb_del, + .port_fdb_dump = vsc73xx_port_fdb_dump, .port_max_mtu = vsc73xx_get_max_mtu, .port_stp_state_set = vsc73xx_port_stp_state_set, .port_vlan_filtering = vsc73xx_port_vlan_filtering, @@ -1915,6 +2365,8 @@ int vsc73xx_probe(struct vsc73xx *vsc) return -ENODEV; } + mutex_init(&vsc->fdb_lock); + eth_random_addr(vsc->addr); dev_info(vsc->dev, "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n", diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h index 3ca579acc798db..3c30e143c14f1b 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.h +++ b/drivers/net/dsa/vitesse-vsc73xx.h @@ -45,6 +45,7 @@ struct vsc73xx_portinfo { * @vlans: List of configured vlans. Contains port mask and untagged status of * every vlan configured in port vlan operation. It doesn't cover tag_8021q * vlans. + * @fdb_lock: Mutex protects fdb access */ struct vsc73xx { struct device *dev; @@ -57,6 +58,7 @@ struct vsc73xx { void *priv; struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS]; struct list_head vlans; + struct mutex fdb_lock; }; /** diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d29b5d7af0d72b..e9c5e1e11fa02d 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -109,9 +109,10 @@ static void dummy_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; + dev->lltx = true; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; dev->features |= NETIF_F_GSO_SOFTWARE; - dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; dev->hw_enc_features |= dev->features; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 0baac25db4f843..9a542e3c9b05d8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -158,6 +158,17 @@ config ETHOC help Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. +config OA_TC6 + tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" + depends on SPI + select PHYLIB + help + This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY + Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs. + + To know the implementation details, refer documentation in + . + source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index c03203439c0ed3..99fa180dedb805 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_OA_TC6) += oa_tc6.o diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 0713f1e2c7f38b..a98b3139606ade 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -26,7 +26,7 @@ #include -#include +#include #define ADIN1110_PHY_ID 0x1 @@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv) netdev->netdev_ops = &adin1110_netdev_ops; netdev->ethtool_ops = &adin1110_ethtool_ops; netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->features |= NETIF_F_NETNS_LOCAL; + netdev->netns_local = true; port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false); if (IS_ERR(port_priv->phydev)) { diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 78231c85234d0c..f62851708d4f3a 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1678,17 +1678,15 @@ static int slic_init(struct slic_device *sdev) slic_card_reset(sdev); err = slic_load_firmware(sdev); - if (err) { - dev_err(&sdev->pdev->dev, "failed to load firmware\n"); - return err; - } + if (err) + return dev_err_probe(&sdev->pdev->dev, err, + "failed to load firmware\n"); /* we need the shared memory to read EEPROM so set it up temporarily */ err = slic_init_shmem(sdev); - if (err) { - dev_err(&sdev->pdev->dev, "failed to init shared memory\n"); - return err; - } + if (err) + return dev_err_probe(&sdev->pdev->dev, err, + "failed to init shared memory\n"); err = slic_read_eeprom(sdev); if (err) { @@ -1741,10 +1739,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err; err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, + "failed to enable PCI device\n"); pci_set_master(pdev); pci_try_set_mwi(pdev); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 3d8ac63132fb21..9e6f91df2ba080 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue) } -static void ace_tasklet(struct tasklet_struct *t) +static void ace_bh_work(struct work_struct *work) { - struct ace_private *ap = from_tasklet(ap, t, ace_tasklet); + struct ace_private *ap = from_work(ap, work, ace_bh_work); struct net_device *dev = ap->ndev; int cur_size; @@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t) #endif ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); } - ap->tasklet_pending = 0; + ap->bh_work_pending = 0; } @@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap) * * Loading rings is safe without holding the spin lock since this is * done only before the device is enabled, thus no interrupts are - * generated and by the interrupt handler/tasklet handler. + * generated and by the interrupt handler/bh handler. */ static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) { @@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) */ if (netif_running(dev)) { int cur_size; - int run_tasklet = 0; + int run_bh_work = 0; cur_size = atomic_read(&ap->cur_rx_bufs); if (cur_size < RX_LOW_STD_THRES) { @@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } if (!ACE_IS_TIGON_I(ap)) { @@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } } @@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); } else - run_tasklet = 1; + run_bh_work = 1; } } - if (run_tasklet && !ap->tasklet_pending) { - ap->tasklet_pending = 1; - tasklet_schedule(&ap->ace_tasklet); + if (run_bh_work && !ap->bh_work_pending) { + ap->bh_work_pending = 1; + queue_work(system_bh_wq, &ap->ace_bh_work); } } @@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev) /* * Setup the bottom half rx ring refill handler */ - tasklet_setup(&ap->ace_tasklet, ace_tasklet); + INIT_WORK(&ap->ace_bh_work, ace_bh_work); return 0; } @@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev) cmd.idx = 0; ace_issue_cmd(regs, &cmd); - tasklet_kill(&ap->ace_tasklet); + cancel_work_sync(&ap->ace_bh_work); /* * Make sure one CPU is not processing packets while diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h index ca5ce0cbbad153..0e45a97b9c9b2d 100644 --- a/drivers/net/ethernet/alteon/acenic.h +++ b/drivers/net/ethernet/alteon/acenic.h @@ -2,7 +2,7 @@ #ifndef _ACENIC_H_ #define _ACENIC_H_ #include - +#include /* * Generate TX index update each time, when TX ring is closed. @@ -667,8 +667,8 @@ struct ace_private struct rx_desc *rx_mini_ring; struct rx_desc *rx_return_ring; - int tasklet_pending, jumbo; - struct tasklet_struct ace_tasklet; + int bh_work_pending, jumbo; + struct work_struct ace_bh_work; struct event *evt_ring; @@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev); static netdev_tx_t ace_start_xmit(struct sk_buff *skb, struct net_device *dev); static int ace_close(struct net_device *dev); -static void ace_tasklet(struct tasklet_struct *t); +static void ace_bh_work(struct work_struct *work); static void ace_dump_trace(struct ace_private *ap); static void ace_set_multicast_list(struct net_device *dev); static int ace_change_mtu(struct net_device *dev, int new_mtu); diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 6de0d590be34fe..9d9fa655935476 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -7,6 +7,21 @@ #define ENA_ADMIN_RSS_KEY_PARTS 10 +#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F +#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F + + /* customer metrics - in correlation with + * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK + */ +enum ena_admin_customer_metrics_id { + ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0, + ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1, + ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2, + ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3, + ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4, + ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5, +}; + enum ena_admin_aq_opcode { ENA_ADMIN_CREATE_SQ = 1, ENA_ADMIN_DESTROY_SQ = 2, @@ -51,6 +66,9 @@ enum ena_admin_aq_feature_id { /* device capabilities */ enum ena_admin_aq_caps_id { ENA_ADMIN_ENI_STATS = 0, + /* ENA SRD customer metrics */ + ENA_ADMIN_ENA_SRD_INFO = 1, + ENA_ADMIN_CUSTOMER_METRICS = 2, }; enum ena_admin_placement_policy_type { @@ -99,6 +117,9 @@ enum ena_admin_get_stats_type { ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, /* extra HW stats for specific network interface */ ENA_ADMIN_GET_STATS_TYPE_ENI = 2, + /* extra HW stats for ENA SRD */ + ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3, + ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4, }; enum ena_admin_get_stats_scope { @@ -106,6 +127,16 @@ enum ena_admin_get_stats_scope { ENA_ADMIN_ETH_TRAFFIC = 1, }; +/* ENA SRD configuration for ENI */ +enum ena_admin_ena_srd_flags { + /* Feature enabled */ + ENA_ADMIN_ENA_SRD_ENABLED = BIT(0), + /* UDP support enabled */ + ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1), + /* Bypass Rx UDP ordering */ + ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2), +}; + struct ena_admin_aq_common_desc { /* 11:0 : command_id * 15:12 : reserved12 @@ -363,6 +394,9 @@ struct ena_admin_aq_get_stats_cmd { * stats of other device */ u16 device_id; + + /* a bitmap representing the requested metric values */ + u64 requested_metrics; }; /* Basic Statistics Command. */ @@ -419,6 +453,40 @@ struct ena_admin_eni_stats { u64 linklocal_allowance_exceeded; }; +struct ena_admin_ena_srd_stats { + /* Number of packets transmitted over ENA SRD */ + u64 ena_srd_tx_pkts; + + /* Number of packets transmitted or could have been + * transmitted over ENA SRD + */ + u64 ena_srd_eligible_tx_pkts; + + /* Number of packets received over ENA SRD */ + u64 ena_srd_rx_pkts; + + /* Percentage of the ENA SRD resources that is in use */ + u64 ena_srd_resource_utilization; +}; + +/* ENA SRD Statistics Command */ +struct ena_admin_ena_srd_info { + /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for + * details + */ + u64 flags; + + struct ena_admin_ena_srd_stats ena_srd_stats; +}; + +/* Customer Metrics Command. */ +struct ena_admin_customer_metrics { + /* A bitmap representing the reported customer metrics according to + * the order they are reported + */ + u64 reported_metrics; +}; + struct ena_admin_acq_get_stats_resp { struct ena_admin_acq_common_desc acq_common_desc; @@ -428,6 +496,10 @@ struct ena_admin_acq_get_stats_resp { struct ena_admin_basic_stats basic_stats; struct ena_admin_eni_stats eni_stats; + + struct ena_admin_ena_srd_info ena_srd_info; + + struct ena_admin_customer_metrics customer_metrics; } u; }; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 713a595370bffa..d958cda9e58ba9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1881,6 +1881,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev, return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); } +static int ena_get_dev_stats(struct ena_com_dev *ena_dev, + struct ena_com_stats_ctx *ctx, + enum ena_admin_get_stats_type type) +{ + struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; + struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; + struct ena_com_admin_queue *admin_queue; + int ret; + + admin_queue = &ena_dev->admin_queue; + + get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; + get_cmd->aq_common_descriptor.flags = 0; + get_cmd->type = type; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); + + return ret; +} + +static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev) +{ + struct ena_customer_metrics *customer_metrics; + struct ena_com_stats_ctx ctx; + int ret; + + customer_metrics = &ena_dev->customer_metrics; + if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) { + customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK; + return; + } + + memset(&ctx, 0x0, sizeof(ctx)); + ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK; + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS); + if (likely(ret == 0)) + customer_metrics->supported_metrics = + ctx.get_resp.u.customer_metrics.reported_metrics; + else + netdev_err(ena_dev->net_device, + "Failed to query customer metrics support. error: %d\n", ret); +} + int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { @@ -1960,6 +2010,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, else return rc; + ena_com_set_supported_customer_metrics(ena_dev); + return 0; } @@ -2104,50 +2156,44 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, return 0; } -static int ena_get_dev_stats(struct ena_com_dev *ena_dev, - struct ena_com_stats_ctx *ctx, - enum ena_admin_get_stats_type type) +int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, + struct ena_admin_eni_stats *stats) { - struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; - struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; - struct ena_com_admin_queue *admin_queue; + struct ena_com_stats_ctx ctx; int ret; - admin_queue = &ena_dev->admin_queue; - - get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; - get_cmd->aq_common_descriptor.flags = 0; - get_cmd->type = type; - - ret = ena_com_execute_admin_command(admin_queue, - (struct ena_admin_aq_entry *)get_cmd, - sizeof(*get_cmd), - (struct ena_admin_acq_entry *)get_resp, - sizeof(*get_resp)); + if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { + netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", + ENA_ADMIN_ENI_STATS); + return -EOPNOTSUPP; + } - if (unlikely(ret)) - netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.u.eni_stats, + sizeof(ctx.get_resp.u.eni_stats)); return ret; } -int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, - struct ena_admin_eni_stats *stats) +int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev, + struct ena_admin_ena_srd_info *info) { struct ena_com_stats_ctx ctx; int ret; - if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { + if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) { netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", - ENA_ADMIN_ENI_STATS); + ENA_ADMIN_ENA_SRD_INFO); return -EOPNOTSUPP; } memset(&ctx, 0x0, sizeof(ctx)); - ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD); if (likely(ret == 0)) - memcpy(stats, &ctx.get_resp.u.eni_stats, - sizeof(ctx.get_resp.u.eni_stats)); + memcpy(info, &ctx.get_resp.u.ena_srd_info, + sizeof(ctx.get_resp.u.ena_srd_info)); return ret; } @@ -2167,6 +2213,50 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, return ret; } +int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len) +{ + struct ena_admin_aq_get_stats_cmd *get_cmd; + struct ena_com_stats_ctx ctx; + int ret; + + if (unlikely(len > ena_dev->customer_metrics.buffer_len)) { + netdev_err(ena_dev->net_device, + "Invalid buffer size %u. The given buffer is too big.\n", len); + return -EINVAL; + } + + if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) { + netdev_err(ena_dev->net_device, "Capability %d not supported.\n", + ENA_ADMIN_CUSTOMER_METRICS); + return -EOPNOTSUPP; + } + + if (!ena_dev->customer_metrics.supported_metrics) { + netdev_err(ena_dev->net_device, "No supported customer metrics.\n"); + return -EOPNOTSUPP; + } + + get_cmd = &ctx.get_cmd; + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd->u.control_buffer.address, + ena_dev->customer_metrics.buffer_dma_addr); + if (unlikely(ret)) { + netdev_err(ena_dev->net_device, "Memory address set failed.\n"); + return ret; + } + + get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len; + get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics; + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS); + if (likely(ret == 0)) + memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len); + else + netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret); + + return ret; +} + int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) { struct ena_com_admin_queue *admin_queue; @@ -2706,6 +2796,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, return 0; } +int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev) +{ + struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; + + customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; + customer_metrics->buffer_virt_addr = NULL; + + customer_metrics->buffer_virt_addr = + dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len, + &customer_metrics->buffer_dma_addr, GFP_KERNEL); + if (!customer_metrics->buffer_virt_addr) { + customer_metrics->buffer_len = 0; + return -ENOMEM; + } + + return 0; +} + void ena_com_delete_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; @@ -2728,6 +2836,19 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) } } +void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev) +{ + struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; + + if (customer_metrics->buffer_virt_addr) { + dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len, + customer_metrics->buffer_virt_addr, + customer_metrics->buffer_dma_addr); + customer_metrics->buffer_virt_addr = NULL; + customer_metrics->buffer_len = 0; + } +} + int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 924f03f5a6c7d7..a372c5e768a74c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -42,6 +42,8 @@ #define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) #define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) +#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512 + /*****************************************************************************/ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ @@ -278,6 +280,16 @@ struct ena_rss { }; +struct ena_customer_metrics { + /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK + * and ena_admin_customer_metrics_id + */ + u64 supported_metrics; + dma_addr_t buffer_dma_addr; + void *buffer_virt_addr; + u32 buffer_len; +}; + struct ena_host_attribute { /* Debug area */ u8 *debug_area_virt_addr; @@ -327,6 +339,8 @@ struct ena_com_dev { struct ena_intr_moder_entry *intr_moder_tbl; struct ena_com_llq_info llq_info; + + struct ena_customer_metrics customer_metrics; }; struct ena_com_dev_get_features_ctx { @@ -595,6 +609,24 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); +/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics + * @ena_dev: ENA communication layer struct + * @info: ena srd stats and flags + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev, + struct ena_admin_ena_srd_info *info); + +/* ena_com_get_customer_metrics - Get customer metrics for network interface + * @ena_dev: ENA communication layer struct + * @buffer: buffer for returned customer metrics + * @len: size of the buffer + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len); + /* ena_com_set_dev_mtu - Configure the device mtu. * @ena_dev: ENA communication layer struct * @mtu: mtu value @@ -805,6 +837,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, u32 debug_area_size); +/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev); + /* ena_com_delete_debug_area - Free the debug area resources. * @ena_dev: ENA communication layer struct * @@ -819,6 +858,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); */ void ena_com_delete_host_info(struct ena_com_dev *ena_dev); +/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocated customer metrics area. + */ +void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev); + /* ena_com_set_host_attributes - Update the device with the host * attributes (debug area and host info) base address. * @ena_dev: ENA communication layer struct @@ -975,6 +1021,28 @@ static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev, return !!(ena_dev->capabilities & BIT(cap_id)); } +/* ena_com_get_customer_metric_support - query whether device supports a given customer metric. + * @ena_dev: ENA communication layer struct + * @metric_id: enum value representing the customer metric + * + * @return - true if customer metric is supported or false otherwise + */ +static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev, + enum ena_admin_customer_metrics_id metric_id) +{ + return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id)); +} + +/* ena_com_get_customer_metric_count - return the number of supported customer metrics. + * @ena_dev: ENA communication layer struct + * + * @return - the number of supported customer metrics + */ +static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev) +{ + return hweight64(ena_dev->customer_metrics.supported_metrics); +} + /* ena_com_update_intr_reg - Prepare interrupt register * @intr_reg: interrupt register to update. * @rx_delay_interval: Rx interval in usecs diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index b24cc3f0524850..60fb35ec4b15ac 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -14,6 +14,10 @@ struct ena_stats { int stat_offset; }; +struct ena_hw_metrics { + char name[ETH_GSTRING_LEN]; +}; + #define ENA_STAT_ENA_COM_ENTRY(stat) { \ .name = #stat, \ .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \ @@ -41,6 +45,18 @@ struct ena_stats { #define ENA_STAT_ENI_ENTRY(stat) \ ENA_STAT_HW_ENTRY(stat, eni_stats) +#define ENA_STAT_ENA_SRD_ENTRY(stat) \ + ENA_STAT_HW_ENTRY(stat, ena_srd_stats) + +#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \ +} + +#define ENA_METRIC_ENI_ENTRY(stat) { \ + .name = #stat \ +} + static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), ENA_STAT_GLOBAL_ENTRY(suspend), @@ -52,6 +68,9 @@ static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(reset_fail), }; +/* A partial list of hw stats. Used when admin command + * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported + */ static const struct ena_stats ena_stats_eni_strings[] = { ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), @@ -60,6 +79,23 @@ static const struct ena_stats ena_stats_eni_strings[] = { ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), }; +static const struct ena_hw_metrics ena_hw_stats_strings[] = { + ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded), + ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded), + ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded), + ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded), + ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded), + ENA_METRIC_ENI_ENTRY(conntrack_allowance_available), +}; + +static const struct ena_stats ena_srd_info_strings[] = { + ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode), + ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts), + ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts), + ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts), + ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization) +}; + static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), @@ -112,7 +148,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = { #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) -#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings) +#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) +#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings) +#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings) static void ena_safe_update_stat(u64 *src, u64 *dst, struct u64_stats_sync *syncp) @@ -125,6 +163,57 @@ static void ena_safe_update_stat(u64 *src, u64 *dst, } while (u64_stats_fetch_retry(syncp, start)); } +static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data) +{ + struct ena_com_dev *dev = adapter->ena_dev; + const struct ena_stats *ena_stats; + u64 *ptr; + int i; + + if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) { + u32 supported_metrics_count; + int len; + + supported_metrics_count = ena_com_get_customer_metric_count(dev); + len = supported_metrics_count * sizeof(u64); + + /* Fill the data buffer, and advance its pointer */ + ena_com_get_customer_metrics(dev, (char *)(*data), len); + (*data) += supported_metrics_count; + + } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) { + ena_com_get_eni_stats(dev, &adapter->eni_stats); + /* Updating regardless of rc - once we told ethtool how many stats we have + * it will print that much stats. We can't leave holes in the stats + */ + for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) { + ena_stats = &ena_stats_eni_strings[i]; + + ptr = (u64 *)&adapter->eni_stats + + ena_stats->stat_offset; + + ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); + } + } + + if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) { + ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info); + /* Get ENA SRD mode */ + ptr = (u64 *)&adapter->ena_srd_info; + ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); + for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) { + ena_stats = &ena_srd_info_strings[i]; + /* Wrapped within an outer struct - need to accommodate an + * additional offset of the ENA SRD mode that was already processed + */ + ptr = (u64 *)&adapter->ena_srd_info + + ena_stats->stat_offset + 1; + + ena_safe_update_stat(ptr, (*data)++, &adapter->syncp); + } + } +} + static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) { const struct ena_stats *ena_stats; @@ -179,7 +268,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) static void ena_get_stats(struct ena_adapter *adapter, u64 *data, - bool eni_stats_needed) + bool hw_stats_needed) { const struct ena_stats *ena_stats; u64 *ptr; @@ -193,17 +282,8 @@ static void ena_get_stats(struct ena_adapter *adapter, ena_safe_update_stat(ptr, data++, &adapter->syncp); } - if (eni_stats_needed) { - ena_update_hw_stats(adapter); - for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { - ena_stats = &ena_stats_eni_strings[i]; - - ptr = (u64 *)&adapter->eni_stats + - ena_stats->stat_offset; - - ena_safe_update_stat(ptr, data++, &adapter->syncp); - } - } + if (hw_stats_needed) + ena_metrics_stats(adapter, &data); ena_queue_stats(adapter, &data); ena_dev_admin_queue_stats(adapter, &data); @@ -214,9 +294,8 @@ static void ena_get_ethtool_stats(struct net_device *netdev, u64 *data) { struct ena_adapter *adapter = netdev_priv(netdev); - struct ena_com_dev *dev = adapter->ena_dev; - ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); + ena_get_stats(adapter, data, true); } static int ena_get_sw_stats_count(struct ena_adapter *adapter) @@ -228,9 +307,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter) static int ena_get_hw_stats_count(struct ena_adapter *adapter) { - bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS); + struct ena_com_dev *dev = adapter->ena_dev; + int count; + + count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO); - return ENA_STATS_ARRAY_ENI(adapter) * supported; + if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) + count += ena_com_get_customer_metric_count(dev); + else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) + count += ENA_STATS_ARRAY_ENI; + + return count; } int ena_get_sset_count(struct net_device *netdev, int sset) @@ -246,6 +333,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset) return -EOPNOTSUPP; } +static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data) +{ + struct ena_com_dev *dev = adapter->ena_dev; + const struct ena_hw_metrics *ena_metrics; + const struct ena_stats *ena_stats; + int i; + + if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) { + for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) { + if (ena_com_get_customer_metric_support(dev, i)) { + ena_metrics = &ena_hw_stats_strings[i]; + ethtool_puts(data, ena_metrics->name); + } + } + } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) { + for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) { + ena_stats = &ena_stats_eni_strings[i]; + ethtool_puts(data, ena_stats->name); + } + } + + if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) { + for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) { + ena_stats = &ena_srd_info_strings[i]; + ethtool_puts(data, ena_stats->name); + } + } +} + static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) { const struct ena_stats *ena_stats; @@ -291,7 +407,7 @@ static void ena_com_dev_strings(u8 **data) static void ena_get_strings(struct ena_adapter *adapter, u8 *data, - bool eni_stats_needed) + bool hw_stats_needed) { const struct ena_stats *ena_stats; int i; @@ -301,12 +417,8 @@ static void ena_get_strings(struct ena_adapter *adapter, ethtool_puts(&data, ena_stats->name); } - if (eni_stats_needed) { - for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { - ena_stats = &ena_stats_eni_strings[i]; - ethtool_puts(&data, ena_stats->name); - } - } + if (hw_stats_needed) + ena_metrics_stats_strings(adapter, &data); ena_queue_strings(adapter, &data); ena_com_dev_strings(&data); @@ -317,11 +429,10 @@ static void ena_get_ethtool_strings(struct net_device *netdev, u8 *data) { struct ena_adapter *adapter = netdev_priv(netdev); - struct ena_com_dev *dev = adapter->ena_dev; switch (sset) { case ETH_SS_STATS: - ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); + ena_get_strings(adapter, data, true); break; } } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 184b6e6cbed457..c5b50cfa935a38 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2798,19 +2798,6 @@ static void ena_config_debug_area(struct ena_adapter *adapter) ena_com_delete_debug_area(adapter->ena_dev); } -int ena_update_hw_stats(struct ena_adapter *adapter) -{ - int rc; - - rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); - if (rc) { - netdev_err(adapter->netdev, "Failed to get ENI stats\n"); - return rc; - } - - return 0; -} - static void ena_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3944,10 +3931,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, adapter); + rc = ena_com_allocate_customer_metrics_buffer(ena_dev); + if (rc) { + netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n"); + goto err_netdev_destroy; + } + rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); - goto err_netdev_destroy; + goto err_metrics_destroy; } rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state); @@ -3955,7 +3948,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; - goto err_netdev_destroy; + goto err_metrics_destroy; } /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. @@ -4076,6 +4069,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_device_destroy: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); +err_metrics_destroy: + ena_com_delete_customer_metrics_buffer(ena_dev); err_netdev_destroy: free_netdev(netdev); err_free_region: @@ -4139,6 +4134,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) ena_com_delete_host_info(ena_dev); + ena_com_delete_customer_metrics_buffer(ena_dev); + ena_release_bars(ena_dev, pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index d59509747d1aac..6e12ae3b12e5b9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -373,6 +373,7 @@ struct ena_adapter { struct u64_stats_sync syncp; struct ena_stats_dev dev_stats; struct ena_admin_eni_stats eni_stats; + struct ena_admin_ena_srd_info ena_srd_info; /* last queue index that was checked for uncompleted tx packets */ u32 last_monitored_tx_qid; @@ -390,7 +391,6 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter); void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); -int ena_update_hw_stats(struct ena_adapter *adapter); int ena_update_queue_params(struct ena_adapter *adapter, u32 new_tx_size, diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index 6bdd02b7aa6de2..ac37a4e738ae7d 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -112,7 +112,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) struct pdsc_cq *cq = &qcq->cq; qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry); - if (IS_ERR_OR_NULL(qcq_dentry)) + if (IS_ERR(qcq_dentry)) return; qcq->dentry = qcq_dentry; @@ -123,7 +123,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work); q_dentry = debugfs_create_dir("q", qcq->dentry); - if (IS_ERR_OR_NULL(q_dentry)) + if (IS_ERR(q_dentry)) return; debugfs_create_u32("index", 0400, q_dentry, &q->index); @@ -135,7 +135,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) debugfs_create_u16("head", 0400, q_dentry, &q->head_idx); cq_dentry = debugfs_create_dir("cq", qcq->dentry); - if (IS_ERR_OR_NULL(cq_dentry)) + if (IS_ERR(cq_dentry)) return; debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); @@ -148,7 +148,7 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx]; intr_dentry = debugfs_create_dir("intr", qcq->dentry); - if (IS_ERR_OR_NULL(intr_dentry)) + if (IS_ERR(intr_dentry)) return; debugfs_create_u32("index", 0400, intr_dentry, &intr->index); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index c4a4e316683fa8..5475867708f426 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period, return false; } -static void xgbe_ecc_isr_task(struct tasklet_struct *t) +static void xgbe_ecc_isr_bh_work(struct work_struct *work) { - struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc); + struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work); unsigned int ecc_isr; bool stop = false; @@ -465,17 +465,17 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data) { struct xgbe_prv_data *pdata = data; - if (pdata->isr_as_tasklet) - tasklet_schedule(&pdata->tasklet_ecc); + if (pdata->isr_as_bh_work) + queue_work(system_bh_wq, &pdata->ecc_bh_work); else - xgbe_ecc_isr_task(&pdata->tasklet_ecc); + xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work); return IRQ_HANDLED; } -static void xgbe_isr_task(struct tasklet_struct *t) +static void xgbe_isr_bh_work(struct work_struct *work) { - struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev); + struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work); struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; unsigned int dma_isr, dma_ch_isr; @@ -582,7 +582,7 @@ static void xgbe_isr_task(struct tasklet_struct *t) /* If there is not a separate ECC irq, handle it here */ if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq)) - xgbe_ecc_isr_task(&pdata->tasklet_ecc); + xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work); /* If there is not a separate I2C irq, handle it here */ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) @@ -604,10 +604,10 @@ static irqreturn_t xgbe_isr(int irq, void *data) { struct xgbe_prv_data *pdata = data; - if (pdata->isr_as_tasklet) - tasklet_schedule(&pdata->tasklet_dev); + if (pdata->isr_as_bh_work) + queue_work(system_bh_wq, &pdata->dev_bh_work); else - xgbe_isr_task(&pdata->tasklet_dev); + xgbe_isr_bh_work(&pdata->dev_bh_work); return IRQ_HANDLED; } @@ -1007,8 +1007,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata) unsigned int i; int ret; - tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task); - tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task); + INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work); + INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work); ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, netdev_name(netdev), pdata); @@ -1078,8 +1078,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - tasklet_kill(&pdata->tasklet_dev); - tasklet_kill(&pdata->tasklet_ecc); + cancel_work_sync(&pdata->dev_bh_work); + cancel_work_sync(&pdata->ecc_bh_work); if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 21407a26f80614..5fc94c2f638e04 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -582,16 +582,12 @@ static int xgbe_get_ts_info(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (pdata->ptp_clock) ts_info->phc_index = ptp_clock_index(pdata->ptp_clock); - else - ts_info->phc_index = -1; ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index a9ccc4258ee50d..7a833894f52ad4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata, XI2C_IOREAD(pdata, IC_CLR_STOP_DET); } -static void xgbe_i2c_isr_task(struct tasklet_struct *t) +static void xgbe_i2c_isr_bh_work(struct work_struct *work) { - struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c); + struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work); struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; unsigned int isr; @@ -321,10 +321,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; - if (pdata->isr_as_tasklet) - tasklet_schedule(&pdata->tasklet_i2c); + if (pdata->isr_as_bh_work) + queue_work(system_bh_wq, &pdata->i2c_bh_work); else - xgbe_i2c_isr_task(&pdata->tasklet_i2c); + xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work); return IRQ_HANDLED; } @@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr) static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata) { - xgbe_i2c_isr_task(&pdata->tasklet_i2c); + xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work); return IRQ_HANDLED; } @@ -449,7 +449,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) if (pdata->dev_irq != pdata->i2c_irq) { devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); - tasklet_kill(&pdata->tasklet_i2c); + cancel_work_sync(&pdata->i2c_bh_work); } } @@ -464,7 +464,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata) /* If we have a separate I2C irq, enable it */ if (pdata->dev_irq != pdata->i2c_irq) { - tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task); + INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work); ret = devm_request_irq(pdata->dev, pdata->i2c_irq, xgbe_i2c_isr, 0, pdata->i2c_name, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4a2dc705b52801..07f4f3418d0187 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -703,9 +703,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata) } } -static void xgbe_an_isr_task(struct tasklet_struct *t) +static void xgbe_an_isr_bh_work(struct work_struct *work) { - struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an); + struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work); netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n"); @@ -727,17 +727,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; - if (pdata->isr_as_tasklet) - tasklet_schedule(&pdata->tasklet_an); + if (pdata->isr_as_bh_work) + queue_work(system_bh_wq, &pdata->an_bh_work); else - xgbe_an_isr_task(&pdata->tasklet_an); + xgbe_an_isr_bh_work(&pdata->an_bh_work); return IRQ_HANDLED; } static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata) { - xgbe_an_isr_task(&pdata->tasklet_an); + xgbe_an_isr_bh_work(&pdata->an_bh_work); return IRQ_HANDLED; } @@ -1454,7 +1454,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) if (pdata->dev_irq != pdata->an_irq) { devm_free_irq(pdata->dev, pdata->an_irq, pdata); - tasklet_kill(&pdata->tasklet_an); + cancel_work_sync(&pdata->an_bh_work); } pdata->phy_if.phy_impl.stop(pdata); @@ -1477,7 +1477,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* If we have a separate AN irq, enable it */ if (pdata->dev_irq != pdata->an_irq) { - tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task); + INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work); ret = devm_request_irq(pdata->dev, pdata->an_irq, xgbe_an_isr, 0, pdata->an_name, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index c5e5fac49779e5..c636999a6a8491 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -139,7 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata) return ret; } - pdata->isr_as_tasklet = 1; + pdata->isr_as_bh_work = 1; pdata->irq_count = ret; pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0); @@ -176,7 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata) return ret; } - pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0; + pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0; pdata->irq_count = 1; pdata->channel_irq_count = 1; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f01a1e566da676..d85386cac8d166 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1298,11 +1298,11 @@ struct xgbe_prv_data { unsigned int lpm_ctrl; /* CTRL1 for resume */ - unsigned int isr_as_tasklet; - struct tasklet_struct tasklet_dev; - struct tasklet_struct tasklet_ecc; - struct tasklet_struct tasklet_i2c; - struct tasklet_struct tasklet_an; + unsigned int isr_as_bh_work; + struct work_struct dev_bh_work; + struct work_struct ecc_bh_work; + struct work_struct i2c_bh_work; + struct work_struct an_bh_work; struct dentry *xgbe_debugfs; diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 292b1f9cd9e78e..785f4b4ff75812 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1317,7 +1317,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0); - ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); + ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev); if (ret) { printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); goto err_out_iounmap_rx; @@ -1336,7 +1336,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) /* Mask chip interrupts and disable chip, will be * re-enabled on open() */ - disable_irq(dev->irq); pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); if (register_netdev(dev) != 0) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index d0aecd1d735738..440ff4616fec1f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -266,7 +266,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev, const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names); const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names); char tc_string[8]; - int tc; + unsigned int tc; memset(tc_string, 0, sizeof(tc_string)); memcpy(p, aq_ethtool_stat_names, @@ -275,22 +275,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (tc = 0; tc < cfg->tcs; tc++) { if (cfg->is_qos) - snprintf(tc_string, 8, "TC%d ", tc); + snprintf(tc_string, 8, "TC%u ", tc); for (i = 0; i < cfg->vecs; i++) { for (si = 0; si < rx_stat_cnt; si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_ethtool_queue_rx_stat_names[si], tc_string, AQ_NIC_CFG_TCVEC2RING(cfg, tc, i)); - p += ETH_GSTRING_LEN; } for (si = 0; si < tx_stat_cnt; si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_ethtool_queue_tx_stat_names[si], tc_string, AQ_NIC_CFG_TCVEC2RING(cfg, tc, i)); - p += ETH_GSTRING_LEN; } } } @@ -305,20 +303,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) { for (si = 0; si < rx_stat_cnt; si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_ethtool_queue_rx_stat_names[si], tc_string, i ? PTP_HWST_RING_IDX : ptp_ring_idx); - p += ETH_GSTRING_LEN; } if (i >= tx_ring_cnt) continue; for (si = 0; si < tx_stat_cnt; si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_ethtool_queue_tx_stat_names[si], tc_string, i ? PTP_HWST_RING_IDX : ptp_ring_idx); - p += ETH_GSTRING_LEN; } } } @@ -338,9 +334,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (si = 0; si < ARRAY_SIZE(aq_macsec_txsc_stat_names); si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_macsec_txsc_stat_names[si], i); - p += ETH_GSTRING_LEN; } aq_txsc = &nic->macsec_cfg->aq_txsc[i]; for (sa = 0; sa < MACSEC_NUM_AN; sa++) { @@ -349,10 +344,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (si = 0; si < ARRAY_SIZE(aq_macsec_txsa_stat_names); si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_macsec_txsa_stat_names[si], i, sa); - p += ETH_GSTRING_LEN; } } } @@ -369,10 +363,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev, for (si = 0; si < ARRAY_SIZE(aq_macsec_rxsa_stat_names); si++) { - snprintf(p, ETH_GSTRING_LEN, + ethtool_sprintf(&p, aq_macsec_rxsa_stat_names[si], i, sa); - p += ETH_GSTRING_LEN; } } } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index f7433abd659159..f21de0c21e5244 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -557,7 +557,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, } frag_cnt++; - next_ = buff_->next, + next_ = buff_->next; buff_ = &self->buff_ring[next_]; is_rsc_completed = aq_ring_dx_in_range(self->sw_head, @@ -583,7 +583,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, err = -EIO; goto err_exit; } - next_ = buff_->next, + next_ = buff_->next; buff_ = &self->buff_ring[next_]; buff_->is_cleaned = true; diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 482c58c4c5844a..bec5cdf8d1da07 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_ATHEROS bool "Atheros devices" default y - depends on (PCI || ATH79) + depends on PCI || ATH79 || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS config AG71XX tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support" - depends on ATH79 + depends on ATH79 || COMPILE_TEST select PHYLINK imply NET_SELFTESTS help diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index a38be924cdaa0d..9586b6894f7e7f 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -149,11 +149,11 @@ #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ #define FIFO_CFG4_DR BIT(10) /* Dribble */ -#define FIFO_CFG4_LE BIT(11) /* Long Event */ -#define FIFO_CFG4_CF BIT(12) /* Control Frame */ -#define FIFO_CFG4_PF BIT(13) /* Pause Frame */ -#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ -#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ +#define FIFO_CFG4_CF BIT(11) /* Control Frame */ +#define FIFO_CFG4_PF BIT(12) /* Pause Frame */ +#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */ +#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */ +#define FIFO_CFG4_LE BIT(15) /* Long Event */ #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ @@ -168,28 +168,28 @@ #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ #define FIFO_CFG5_FC BIT(2) /* False Carrier */ #define FIFO_CFG5_CE BIT(3) /* Code Error */ -#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ -#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ -#define FIFO_CFG5_OK BIT(6) /* Packet is OK */ -#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ -#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ -#define FIFO_CFG5_DR BIT(9) /* Dribble */ -#define FIFO_CFG5_CF BIT(10) /* Control Frame */ -#define FIFO_CFG5_PF BIT(11) /* Pause Frame */ -#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ -#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ -#define FIFO_CFG5_LE BIT(14) /* Long Event */ -#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ -#define FIFO_CFG5_16 BIT(16) /* unknown */ -#define FIFO_CFG5_17 BIT(17) /* unknown */ +#define FIFO_CFG5_CR BIT(4) /* CRC error */ +#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */ +#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */ +#define FIFO_CFG5_OK BIT(7) /* Packet is OK */ +#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */ +#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */ +#define FIFO_CFG5_DR BIT(10) /* Dribble */ +#define FIFO_CFG5_CF BIT(11) /* Control Frame */ +#define FIFO_CFG5_PF BIT(12) /* Pause Frame */ +#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */ +#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */ +#define FIFO_CFG5_LE BIT(15) /* Long Event */ +#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */ +#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */ #define FIFO_CFG5_SF BIT(18) /* Short Frame */ #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ - FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ - FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ - FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ - FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ - FIFO_CFG5_17 | FIFO_CFG5_SF) + FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \ + FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \ + FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \ + FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \ + FIFO_CFG5_UC | FIFO_CFG5_SF) #define AG71XX_REG_TX_CTRL 0x0180 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ @@ -379,10 +379,7 @@ struct ag71xx { u32 fifodata[3]; int mac_idx; - struct reset_control *mdio_reset; - struct mii_bus *mii_bus; struct clk *clk_mdio; - struct clk *clk_eth; }; static int ag71xx_desc_empty(struct ag71xx_desc *desc) @@ -447,6 +444,13 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints) ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); } +static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct ag71xx *ag = netdev_priv(ndev); + + return phylink_mii_ioctl(ag->phylink, ifr, cmd); +} + static void ag71xx_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -504,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset, switch (sset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) - memcpy(data + i * ETH_GSTRING_LEN, - ag71xx_statistics[i].name, ETH_GSTRING_LEN); + ethtool_puts(&data, ag71xx_statistics[i].name); break; case ETH_SS_TEST: net_selftest_get_strings(data); @@ -685,36 +688,27 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct net_device *ndev = ag->ndev; + struct reset_control *mdio_reset; static struct mii_bus *mii_bus; struct device_node *np, *mnp; int err; np = dev->of_node; - ag->mii_bus = NULL; - ag->clk_mdio = devm_clk_get(dev, "mdio"); + ag->clk_mdio = devm_clk_get_enabled(dev, "mdio"); if (IS_ERR(ag->clk_mdio)) { netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); return PTR_ERR(ag->clk_mdio); } - err = clk_prepare_enable(ag->clk_mdio); - if (err) { - netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); - return err; - } - mii_bus = devm_mdiobus_alloc(dev); - if (!mii_bus) { - err = -ENOMEM; - goto mdio_err_put_clk; - } + if (!mii_bus) + return -ENOMEM; - ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); - if (IS_ERR(ag->mdio_reset)) { + mdio_reset = devm_reset_control_get_exclusive(dev, "mdio"); + if (IS_ERR(mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - err = PTR_ERR(ag->mdio_reset); - goto mdio_err_put_clk; + return PTR_ERR(mdio_reset); } mii_bus->name = "ag71xx_mdio"; @@ -725,33 +719,18 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) mii_bus->parent = dev; snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); - if (!IS_ERR(ag->mdio_reset)) { - reset_control_assert(ag->mdio_reset); - msleep(100); - reset_control_deassert(ag->mdio_reset); - msleep(200); - } + reset_control_assert(mdio_reset); + msleep(100); + reset_control_deassert(mdio_reset); + msleep(200); mnp = of_get_child_by_name(np, "mdio"); - err = of_mdiobus_register(mii_bus, mnp); + err = devm_of_mdiobus_register(dev, mii_bus, mnp); of_node_put(mnp); if (err) - goto mdio_err_put_clk; - - ag->mii_bus = mii_bus; + return err; return 0; - -mdio_err_put_clk: - clk_disable_unprepare(ag->clk_mdio); - return err; -} - -static void ag71xx_mdio_remove(struct ag71xx *ag) -{ - if (ag->mii_bus) - mdiobus_unregister(ag->mii_bus); - clk_disable_unprepare(ag->clk_mdio); } static void ag71xx_hw_stop(struct ag71xx *ag) @@ -1637,7 +1616,6 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) unsigned int i = ring->curr & ring_mask; struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); int pktlen; - int err = 0; if (ag71xx_desc_empty(desc)) break; @@ -1660,6 +1638,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); if (!skb) { + ndev->stats.rx_errors++; skb_free_frag(ring->buf[i].rx.rx_buf); goto next; } @@ -1667,14 +1646,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit) skb_reserve(skb, offset); skb_put(skb, pktlen); - if (err) { - ndev->stats.rx_dropped++; - kfree_skb(skb); - } else { - skb->dev = ndev; - skb->ip_summed = CHECKSUM_NONE; - list_add_tail(&skb->list, &rx_list); - } + skb->dev = ndev; + skb->ip_summed = CHECKSUM_NONE; + list_add_tail(&skb->list, &rx_list); next: ring->buf[i].rx.rx_buf = NULL; @@ -1799,7 +1773,7 @@ static const struct net_device_ops ag71xx_netdev_ops = { .ndo_open = ag71xx_open, .ndo_stop = ag71xx_stop, .ndo_start_xmit = ag71xx_hard_start_xmit, - .ndo_eth_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = ag71xx_do_ioctl, .ndo_tx_timeout = ag71xx_tx_timeout, .ndo_change_mtu = ag71xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, @@ -1816,6 +1790,7 @@ static int ag71xx_probe(struct platform_device *pdev) const struct ag71xx_dcfg *dcfg; struct net_device *ndev; struct resource *res; + struct clk *clk_eth; int tx_size, err, i; struct ag71xx *ag; @@ -1846,10 +1821,10 @@ static int ag71xx_probe(struct platform_device *pdev) return -EINVAL; } - ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); - if (IS_ERR(ag->clk_eth)) { + clk_eth = devm_clk_get_enabled(&pdev->dev, "eth"); + if (IS_ERR(clk_eth)) { netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); - return PTR_ERR(ag->clk_eth); + return PTR_ERR(clk_eth); } SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1870,6 +1845,12 @@ static int ag71xx_probe(struct platform_device *pdev) if (!ag->mac_base) return -ENOMEM; + /* ensure that HW is in manual polling mode before interrupts are + * activated. Otherwise ag71xx_interrupt might call napi_schedule + * before it is initialized by netif_napi_add. + */ + ag71xx_int_disable(ag, AG71XX_INT_POLL); + ndev->irq = platform_get_irq(pdev, 0); err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 0x0, dev_name(&pdev->dev), ndev); @@ -1912,6 +1893,8 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc->next = (u32)ag->stop_desc_dma; err = of_get_ethdev_address(np, ndev); + if (err == -EPROBE_DEFER) + return err; if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); eth_hw_addr_random(ndev); @@ -1926,33 +1909,27 @@ static int ag71xx_probe(struct platform_device *pdev) netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); - err = clk_prepare_enable(ag->clk_eth); - if (err) { - netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); - return err; - } - ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); ag71xx_hw_init(ag); err = ag71xx_mdio_probe(ag); if (err) - goto err_put_clk; + return err; platform_set_drvdata(pdev, ndev); err = ag71xx_phylink_setup(ag); if (err) { netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err); - goto err_mdio_remove; + return err; } - err = register_netdev(ndev); + err = devm_register_netdev(&pdev->dev, ndev); if (err) { netif_err(ag, probe, ndev, "unable to register net device\n"); platform_set_drvdata(pdev, NULL); - goto err_mdio_remove; + return err; } netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", @@ -1960,27 +1937,6 @@ static int ag71xx_probe(struct platform_device *pdev) phy_modes(ag->phy_if_mode)); return 0; - -err_mdio_remove: - ag71xx_mdio_remove(ag); -err_put_clk: - clk_disable_unprepare(ag->clk_eth); - return err; -} - -static void ag71xx_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct ag71xx *ag; - - if (!ndev) - return; - - ag = netdev_priv(ndev); - unregister_netdev(ndev); - ag71xx_mdio_remove(ag); - clk_disable_unprepare(ag->clk_eth); - platform_set_drvdata(pdev, NULL); } static const u32 ar71xx_fifo_ar7100[] = { @@ -2064,10 +2020,10 @@ static const struct of_device_id ag71xx_match[] = { { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 }, {} }; +MODULE_DEVICE_TABLE(of, ag71xx_match); static struct platform_driver ag71xx_driver = { .probe = ag71xx_probe, - .remove_new = ag71xx_remove, .driver = { .name = "ag71xx", .of_match_table = ag71xx_match, @@ -2075,4 +2031,5 @@ static struct platform_driver ag71xx_driver = { }; module_platform_driver(ag71xx_driver); +MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index 20c6529ec1350e..297c2682a9cf97 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -1300,9 +1300,9 @@ static void bcmasp_remove_intfs(struct bcmasp_priv *priv) static int bcmasp_probe(struct platform_device *pdev) { - struct device_node *ports_node, *intf_node; const struct bcmasp_plat_data *pdata; struct device *dev = &pdev->dev; + struct device_node *ports_node; struct bcmasp_priv *priv; struct bcmasp_intf *intf; int ret = 0, count = 0; @@ -1374,12 +1374,11 @@ static int bcmasp_probe(struct platform_device *pdev) } i = 0; - for_each_available_child_of_node(ports_node, intf_node) { + for_each_available_child_of_node_scoped(ports_node, intf_node) { intf = bcmasp_interface_create(priv, intf_node, i); if (!intf) { dev_err(dev, "Cannot create eth interface %d\n", i); bcmasp_remove_intfs(priv); - of_node_put(intf_node); ret = -ENOMEM; goto of_put_exit; } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index 484fc2b5626fe2..ca163c8e372972 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #define pr_fmt(fmt) "bcmasp_ethtool: " fmt -#include +#include #include #include #include diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c7b56a5e542540..adf7b6b94941c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3640,16 +3640,12 @@ static int bnx2x_get_ts_info(struct net_device *dev, if (bp->flags & PTP_SUPPORTED) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (bp->ptp_clock) info->phc_index = ptp_clock_index(bp->ptp_clock); - else - info->phc_index = -1; info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 4e9215bce4ad49..a018f251d19867 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -868,6 +868,8 @@ #define DORQ_REG_VF_TYPE_VALUE_0 0x170258 #define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340 +extern const u32 dmae_reg_go_c[]; + /* [RW 4] Initial activity counter value on the load request; when the shortcut is done. */ #define DORQ_REG_SHRT_ACT_CNT 0x170070 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 77d4cb4ad78236..12198fc3ab22be 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* vlan */ if (bulletin->valid_bitmap & (1 << VLAN_VALID)) /* vlan configured by ndo so its in bulletin board */ - memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); + ivi->vlan = bulletin->vlan; else /* function has not been loaded yet. Show vlans as 0s */ - memset(&ivi->vlan, 0, VLAN_HLEN); + ivi->vlan = 0; mutex_unlock(&bp->vfdb->bulletin_mutex); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 2bb133ae61c397..ba6729f2f9c0f2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -23,8 +23,6 @@ #include "bnx2x_cmn.h" #include "bnx2x_sriov.h" -extern const u32 dmae_reg_go_c[]; - /* Statistics */ /* diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 04a623b3eee29e..6e422e24750a28 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -69,6 +69,7 @@ #include "bnxt_tc.h" #include "bnxt_devlink.h" #include "bnxt_debugfs.h" +#include "bnxt_coredump.h" #include "bnxt_hwmon.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -301,10 +302,6 @@ static bool bnxt_vf_pciid(enum board_idx idx) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) -#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) - -#define BNXT_CP_DB_IRQ_DIS(db) \ - writel(DB_CP_IRQ_DIS_FLAGS, db) #define BNXT_DB_CQ(db, idx) \ writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) @@ -2853,34 +2850,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) return TX_CMP_VALID(txcmp, raw_cons); } -static irqreturn_t bnxt_inta(int irq, void *dev_instance) -{ - struct bnxt_napi *bnapi = dev_instance; - struct bnxt *bp = bnapi->bp; - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - u32 cons = RING_CMP(cpr->cp_raw_cons); - u32 int_status; - - prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); - - if (!bnxt_has_work(bp, cpr)) { - int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); - /* return if erroneous interrupt */ - if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) - return IRQ_NONE; - } - - /* disable ring IRQ */ - BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); - - /* Return here if interrupt is shared and is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) - return IRQ_HANDLED; - - napi_schedule(&bnapi->napi); - return IRQ_HANDLED; -} - static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, int budget) { @@ -6579,7 +6548,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + req->mru = cpu_to_le16(vnic->mru); req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV @@ -6715,6 +6685,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) + bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; } hwrm_req_drop(bp, req); return rc; @@ -6872,15 +6844,14 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, req->cq_handle = cpu_to_le64(ring->handle); req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); - } else if (bp->flags & BNXT_FLAG_USING_MSIX) { + } else { req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; } break; case HWRM_RING_ALLOC_NQ: req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; req->length = cpu_to_le32(bp->cp_ring_mask + 1); - if (bp->flags & BNXT_FLAG_USING_MSIX) - req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", @@ -8943,6 +8914,80 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) return 0; } +static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) +{ + struct hwrm_dbg_crashdump_medium_cfg_input *req; + u16 page_attr; + int rc; + + if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); + if (rc) + return rc; + + if (BNXT_PAGE_SIZE == 0x2000) + page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; + else if (BNXT_PAGE_SIZE == 0x10000) + page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; + else + page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; + req->pg_size_lvl = cpu_to_le16(page_attr | + bp->fw_crash_mem->ring_mem.depth); + req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); + req->size = cpu_to_le32(bp->fw_crash_len); + req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); + return hwrm_req_send(bp, req); +} + +static void bnxt_free_crash_dump_mem(struct bnxt *bp) +{ + if (bp->fw_crash_mem) { + bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); + kfree(bp->fw_crash_mem); + bp->fw_crash_mem = NULL; + } +} + +static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) +{ + u32 mem_size = 0; + int rc; + + if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) + return 0; + + rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); + if (rc) + return rc; + + mem_size = round_up(mem_size, 4); + + /* keep and use the existing pages */ + if (bp->fw_crash_mem && + mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) + goto alloc_done; + + if (bp->fw_crash_mem) + bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); + else + bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), + GFP_KERNEL); + if (!bp->fw_crash_mem) + return -ENOMEM; + + rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); + if (rc) { + bnxt_free_crash_dump_mem(bp); + return rc; + } + +alloc_done: + bp->fw_crash_len = mem_size; + return 0; +} + int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp; @@ -9118,6 +9163,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) @@ -10089,6 +10136,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } +int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u8 valid) +{ + struct hwrm_vnic_update_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); + if (rc) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) + req->mru = cpu_to_le16(vnic->mru); + + req->enables = cpu_to_le32(valid); + + return hwrm_req_send(bp, req); +} + int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc; @@ -10553,22 +10620,32 @@ static void bnxt_setup_msix(struct bnxt *bp) } } -static void bnxt_setup_inta(struct bnxt *bp) +static int bnxt_init_int_mode(struct bnxt *bp); + +static int bnxt_change_msix(struct bnxt *bp, int total) { - const int len = sizeof(bp->irq_tbl[0].name); + struct msi_map map; + int i; - if (bp->num_tc) { - netdev_reset_tc(bp->dev); - bp->num_tc = 0; + /* add MSIX to the end if needed */ + for (i = bp->total_irqs; i < total; i++) { + map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); + if (map.index < 0) + return bp->total_irqs; + bp->irq_tbl[i].vector = map.virq; + bp->total_irqs++; } - snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", - 0); - bp->irq_tbl[0].handler = bnxt_inta; + /* trim MSIX from the end if needed */ + for (i = bp->total_irqs; i > total; i--) { + map.index = i - 1; + map.virq = bp->irq_tbl[i - 1].vector; + pci_msix_free_irq(bp->pdev, map); + bp->total_irqs--; + } + return bp->total_irqs; } -static int bnxt_init_int_mode(struct bnxt *bp); - static int bnxt_setup_int_mode(struct bnxt *bp) { int rc; @@ -10579,10 +10656,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc ?: -ENODEV; } - if (bp->flags & BNXT_FLAG_USING_MSIX) - bnxt_setup_msix(bp); - else - bnxt_setup_inta(bp); + bnxt_setup_msix(bp); rc = bnxt_set_real_num_queues(bp); return rc; @@ -10670,10 +10744,9 @@ static int bnxt_get_num_msix(struct bnxt *bp) return bnxt_nq_rings_in_use(bp); } -static int bnxt_init_msix(struct bnxt *bp) +static int bnxt_init_int_mode(struct bnxt *bp) { - int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; - struct msix_entry *msix_ent; + int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; total_vecs = bnxt_get_num_msix(bp); max = bnxt_get_max_func_irqs(bp); @@ -10683,29 +10756,24 @@ static int bnxt_init_msix(struct bnxt *bp) if (!total_vecs) return 0; - msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); - if (!msix_ent) - return -ENOMEM; - - for (i = 0; i < total_vecs; i++) { - msix_ent[i].entry = i; - msix_ent[i].vector = 0; - } - if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) min = 2; - total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); + total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, + PCI_IRQ_MSIX); ulp_msix = bnxt_get_ulp_msix_num(bp); if (total_vecs < 0 || total_vecs < ulp_msix) { rc = -ENODEV; goto msix_setup_exit; } - bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); + tbl_size = total_vecs; + if (pci_msix_can_alloc_dyn(bp->pdev)) + tbl_size = max; + bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); if (bp->irq_tbl) { for (i = 0; i < total_vecs; i++) - bp->irq_tbl[i].vector = msix_ent[i].vector; + bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ @@ -10723,61 +10791,28 @@ static int bnxt_init_msix(struct bnxt *bp) rc = -ENOMEM; goto msix_setup_exit; } - bp->flags |= BNXT_FLAG_USING_MSIX; - kfree(msix_ent); return 0; msix_setup_exit: - netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); + netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); kfree(bp->irq_tbl); bp->irq_tbl = NULL; - pci_disable_msix(bp->pdev); - kfree(msix_ent); - return rc; -} - -static int bnxt_init_inta(struct bnxt *bp) -{ - bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); - if (!bp->irq_tbl) - return -ENOMEM; - - bp->total_irqs = 1; - bp->rx_nr_rings = 1; - bp->tx_nr_rings = 1; - bp->cp_nr_rings = 1; - bp->flags |= BNXT_FLAG_SHARED_RINGS; - bp->irq_tbl[0].vector = bp->pdev->irq; - return 0; -} - -static int bnxt_init_int_mode(struct bnxt *bp) -{ - int rc = -ENODEV; - - if (bp->flags & BNXT_FLAG_MSIX_CAP) - rc = bnxt_init_msix(bp); - - if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { - /* fallback to INTA */ - rc = bnxt_init_inta(bp); - } + pci_free_irq_vectors(bp->pdev); return rc; } static void bnxt_clear_int_mode(struct bnxt *bp) { - if (bp->flags & BNXT_FLAG_USING_MSIX) - pci_disable_msix(bp->pdev); + pci_free_irq_vectors(bp->pdev); kfree(bp->irq_tbl); bp->irq_tbl = NULL; - bp->flags &= ~BNXT_FLAG_USING_MSIX; } int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) { bool irq_cleared = false; + bool irq_change = false; int tcs = bp->num_tc; int irqs_required; int rc; @@ -10796,15 +10831,21 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) } if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { - bnxt_ulp_irq_stop(bp); - bnxt_clear_int_mode(bp); - irq_cleared = true; + irq_change = true; + if (!pci_msix_can_alloc_dyn(bp->pdev)) { + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + irq_cleared = true; + } } rc = __bnxt_reserve_rings(bp); if (irq_cleared) { if (!rc) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); + } else if (irq_change && !rc) { + if (bnxt_change_msix(bp, irqs_required) != irqs_required) + rc = -ENOSPC; } if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); @@ -10870,9 +10911,6 @@ static int bnxt_request_irq(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL rmap = bp->dev->rx_cpu_rmap; #endif - if (!(bp->flags & BNXT_FLAG_USING_MSIX)) - flags = IRQF_SHARED; - for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; @@ -10937,29 +10975,22 @@ static void bnxt_del_napi(struct bnxt *bp) static void bnxt_init_napi(struct bnxt *bp) { - int i; + int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; unsigned int cp_nr_rings = bp->cp_nr_rings; struct bnxt_napi *bnapi; + int i; - if (bp->flags & BNXT_FLAG_USING_MSIX) { - int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; - - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - poll_fn = bnxt_poll_p5; - else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - cp_nr_rings--; - for (i = 0; i < cp_nr_rings; i++) { - bnapi = bp->bnapi[i]; - netif_napi_add(bp->dev, &bnapi->napi, poll_fn); - } - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { - bnapi = bp->bnapi[cp_nr_rings]; - netif_napi_add(bp->dev, &bnapi->napi, - bnxt_poll_nitroa0); - } - } else { - bnapi = bp->bnapi[0]; - netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + poll_fn = bnxt_poll_p5; + else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + cp_nr_rings--; + for (i = 0; i < cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + netif_napi_add(bp->dev, &bnapi->napi, poll_fn); + } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnapi = bp->bnapi[cp_nr_rings]; + netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); } } @@ -11947,20 +11978,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp) return rc; } -/* Common routine to pre-map certain register block to different GRC window. - * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows - * in PF and 3 windows in VF that can be customized to map in different - * register blocks. - */ -static void bnxt_preset_reg_win(struct bnxt *bp) -{ - if (BNXT_PF(bp)) { - /* CAG registers map to GRC window #4 */ - writel(BNXT_CAG_REG_BASE, - bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); - } -} - static int bnxt_init_dflt_ring_mode(struct bnxt *bp); static int bnxt_reinit_after_abort(struct bnxt *bp) @@ -12065,7 +12082,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; - bnxt_preset_reg_win(bp); netif_carrier_off(bp->dev); if (irq_re_init) { /* Reserve rings now if none were reserved at driver probe. */ @@ -12078,12 +12094,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_reserve_rings(bp, irq_re_init); if (rc) return rc; - if ((bp->flags & BNXT_FLAG_RFS) && - !(bp->flags & BNXT_FLAG_USING_MSIX)) { - /* disable RFS if falling back to INTA */ - bp->dev->hw_features &= ~NETIF_F_NTUPLE; - bp->flags &= ~BNXT_FLAG_RFS; - } rc = bnxt_alloc_mem(bp, irq_re_init); if (rc) { @@ -12810,7 +12820,7 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) return bnxt_rfs_supported(bp); - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) + if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; hwr.grp = bp->rx_nr_rings; @@ -13793,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; struct bnxt_hw_rings hwr = {0}; int rx_rings = rx; + int rc; if (tcs) tx_sets = tcs; @@ -13825,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) hwr.cp_p5 = hwr.tx + rx; - return bnxt_hwrm_check_rings(bp, &hwr); + rc = bnxt_hwrm_check_rings(bp, &hwr); + if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { + if (!bnxt_ulp_registered(bp->edev)) { + hwr.cp += bnxt_get_ulp_msix_num(bp); + hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); + } + if (hwr.cp > bp->total_irqs) { + int total_msix = bnxt_change_msix(bp, hwr.cp); + + if (total_msix < hwr.cp) { + netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", + hwr.cp, total_msix); + rc = -ENOSPC; + } + } + } + return rc; } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -13963,6 +13990,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (rc) return -ENODEV; + rc = bnxt_alloc_crash_dump_mem(bp); + if (rc) + netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", + rc); + if (!rc) { + rc = bnxt_hwrm_crash_dump_mem_cfg(bp); + if (rc) { + bnxt_free_crash_dump_mem(bp); + netdev_warn(bp->dev, + "hwrm crash dump mem failure rc: %d\n", rc); + } + } + if (bnxt_fw_pre_resv_vnics(bp)) bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; @@ -15154,7 +15194,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) struct bnxt *bp = netdev_priv(dev); struct bnxt_rx_ring_info *rxr, *clone; struct bnxt_cp_ring_info *cpr; - int rc; + struct bnxt_vnic_info *vnic; + int i, rc; rxr = &bp->rx_ring[idx]; clone = qmem; @@ -15179,11 +15220,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); - napi_enable(&rxr->bnapi->napi); - cpr = &rxr->bnapi->cp_ring; cpr->sw_stats->rx.rx_resets++; + for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + vnic = &bp->vnic_info[i]; + vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + } + return 0; err_free_hwrm_rx_ring: @@ -15195,9 +15241,17 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) { struct bnxt *bp = netdev_priv(dev); struct bnxt_rx_ring_info *rxr; + struct bnxt_vnic_info *vnic; + int i; + + for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + vnic = &bp->vnic_info[i]; + vnic->mru = 0; + bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + } rxr = &bp->rx_ring[idx]; - napi_disable(&rxr->bnapi->napi); bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); rxr->rx_next_cons = 0; @@ -15257,6 +15311,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); + bnxt_free_crash_dump_mem(bp); kfree(bp->rss_indir_tbl); bp->rss_indir_tbl = NULL; bnxt_free_port_stats(bp); @@ -15644,6 +15699,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_is_bridge(pdev)) return -ENODEV; + if (!pdev->msix_cap) { + dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); + return -ENODEV; + } + /* Clear any pending DMA transactions from crash kernel * while loading driver in capture kernel. */ @@ -15670,9 +15730,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); - if (pdev->msix_cap) - bp->flags |= BNXT_FLAG_MSIX_CAP; - rc = bnxt_init_board(pdev, dev); if (rc < 0) goto init_err_free; @@ -15681,7 +15738,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stat_ops = &bnxt_stat_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -15862,6 +15918,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + if (BNXT_SUPPORTS_QUEUE_API(bp)) + dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; rc = register_netdev(dev); if (rc) @@ -15895,6 +15953,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); + bnxt_free_crash_dump_mem(bp); kfree(bp->rss_indir_tbl); bp->rss_indir_tbl = NULL; @@ -15986,6 +16045,8 @@ static int bnxt_resume(struct device *device) rc = -ENODEV; goto resume_exit; } + if (bp->fw_crash_mem) + bnxt_hwrm_crash_dump_mem_cfg(bp); bnxt_get_wol_settings(bp); if (netif_running(dev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 059a6f81c1a871..69231e85140b2e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1217,12 +1217,15 @@ struct bnxt_napi { bool in_reset; }; +/* "TxRx", 2 hypens, plus maximum integer */ +#define BNXT_IRQ_NAME_EXTRA 17 + struct bnxt_irq { irq_handler_t handler; unsigned int vector; u8 requested:1; u8 have_cpumask:1; - char name[IFNAMSIZ + 2]; + char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA]; cpumask_var_t cpu_mask; }; @@ -1250,6 +1253,7 @@ struct bnxt_vnic_info { #define BNXT_MAX_CTX_PER_VNIC 8 u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_l2_ctx_id; + u16 mru; #define BNXT_MAX_UC_ADDRS 4 struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS]; /* index 0 always dev_addr */ @@ -1355,7 +1359,6 @@ struct bnxt_vf_info { u16 vlan; u16 func_qcfg_flags; u32 flags; -#define BNXT_VF_QOS 0x1 #define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 @@ -1755,8 +1758,6 @@ struct bnxt_test_info { #define BNXT_GRCPF_REG_CHIMP_COMM 0x0 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 -#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 -#define BNXT_CAG_REG_BASE 0x300000 #define BNXT_GRC_REG_STATUS_P5 0x520 @@ -2199,8 +2200,6 @@ struct bnxt { #define BNXT_FLAG_STRIP_VLAN 0x20 #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ BNXT_FLAG_LRO) - #define BNXT_FLAG_USING_MSIX 0x40 - #define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 @@ -2437,6 +2436,7 @@ struct bnxt { #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37) #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38) #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) + #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40) u32 fw_dbg_cap; @@ -2449,6 +2449,9 @@ struct bnxt { #define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX)) +#define BNXT_SUPPORTS_QUEUE_API(bp) \ + (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ + ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH)) u32 hwrm_spec_code; u16 hwrm_cmd_seq; @@ -2644,6 +2647,9 @@ struct bnxt { #endif u32 thermal_threshold_type; enum board_idx board_idx; + + struct bnxt_ctx_pg_info *fw_crash_mem; + u32 fw_crash_len; }; #define BNXT_NUM_RX_RING_STATS 8 @@ -2837,6 +2843,8 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_func_qcaps(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); +int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u8 valid); int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index c067898820360e..4e2b938ed1f7e0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -372,20 +372,81 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) return rc; } +static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf, + u32 dump_len) +{ + u32 data_copied = 0; + u32 data_len; + int i; + + for (i = 0; i < rmem->nr_pages; i++) { + data_len = rmem->page_size; + if (data_copied + data_len > dump_len) + data_len = dump_len - data_copied; + memcpy(buf + data_copied, rmem->pg_arr[i], data_len); + data_copied += data_len; + if (data_copied >= dump_len) + break; + } + return data_copied; +} + +static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len) +{ + struct bnxt_ring_mem_info *rmem; + u32 offset = 0; + + if (!bp->fw_crash_mem) + return -ENOENT; + + rmem = &bp->fw_crash_mem->ring_mem; + + if (rmem->depth > 1) { + int i; + + for (i = 0; i < rmem->nr_pages; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i]; + offset += bnxt_copy_crash_data(&pg_tbl->ring_mem, + buf + offset, + dump_len - offset); + if (offset >= dump_len) + break; + } + } else { + bnxt_copy_crash_data(rmem, buf, dump_len); + } + + return 0; +} + +static bool bnxt_crash_dump_avail(struct bnxt *bp) +{ + u32 sig = 0; + + /* First 4 bytes(signature) of crash dump is always non-zero */ + bnxt_copy_crash_dump(bp, &sig, sizeof(sig)); + return !!sig; +} + int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len) { if (dump_type == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR) + return bnxt_copy_crash_dump(bp, buf, *dump_len); #ifdef CONFIG_TEE_BNXT_FW - return tee_bnxt_copy_coredump(buf, 0, *dump_len); -#else - return -EOPNOTSUPP; + else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR) + return tee_bnxt_copy_coredump(buf, 0, *dump_len); #endif + else + return -EOPNOTSUPP; } else { return __bnxt_get_coredump(bp, buf, dump_len); } } -static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) +int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) { struct hwrm_dbg_qcfg_output *resp; struct hwrm_dbg_qcfg_input *req; @@ -395,7 +456,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) return -EOPNOTSUPP; if (dump_type == BNXT_DUMP_CRASH && - !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)) + !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR || + (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))) return -EOPNOTSUPP; rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG); @@ -403,8 +465,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) return rc; req->fid = cpu_to_le16(0xffff); - if (dump_type == BNXT_DUMP_CRASH) - req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR); + if (dump_type == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR) + req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC); + else + req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST); + } resp = hwrm_req_hold(bp, req); rc = hwrm_req_send(bp, req); @@ -412,7 +478,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) goto get_dump_len_exit; if (dump_type == BNXT_DUMP_CRASH) { - *dump_len = le32_to_cpu(resp->crashdump_size); + if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR) + *dump_len = BNXT_CRASH_DUMP_LEN; + else + *dump_len = le32_to_cpu(resp->crashdump_size); } else { /* Driver adds coredump header and "HWRM_VER_GET response" * segment additionally to coredump. @@ -434,10 +503,17 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type) { u32 len = 0; + if (dump_type == BNXT_DUMP_CRASH && + bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR && + bp->fw_crash_mem) { + if (!bnxt_crash_dump_avail(bp)) + return 0; + + return bp->fw_crash_len; + } + if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) { - if (dump_type == BNXT_DUMP_CRASH) - len = BNXT_CRASH_DUMP_LEN; - else + if (dump_type != BNXT_DUMP_CRASH) __bnxt_get_coredump(bp, NULL, &len); } return len; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h index b1a1b2fffb194d..a76d5c281413fd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output { #define HWRM_DBG_CMN_FLAGS_MORE 1 }; +#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \ + DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR +#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \ + DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR +#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \ + DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR + int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len); +int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len); u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c index 156c2404854fce..127b7015f6762c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c @@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = { static void debugfs_dim_ring_init(struct dim *dim, int ring_idx, struct dentry *dd) { - static char qname[16]; + static char qname[12]; - snprintf(qname, 10, "%d", ring_idx); + snprintf(qname, sizeof(qname), "%d", ring_idx); debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 4cf9bf8b01b09d..f71cc8188b4e5b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev, } tx_xdp = req_rx_rings; } - rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); - if (rc) { - netdev_warn(dev, "Unable to allocate the requested rings\n"); - return rc; - } if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && @@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev, return -EINVAL; } + rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to allocate the requested rings\n"); + return rc; + } + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -4157,7 +4158,7 @@ static void bnxt_get_pkgver(struct net_device *dev) if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { len = strlen(bp->fw_ver_str); - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, "/pkg %s", buf); } } @@ -4989,9 +4990,16 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) return -EINVAL; } - if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) { - netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); - return -EOPNOTSUPP; + if (dump->flag == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && + (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { + netdev_info(dev, + "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); + return -EOPNOTSUPP; + } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { + netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); + return -EOPNOTSUPP; + } } bp->dump_flag = dump->flag; @@ -5036,11 +5044,8 @@ static int bnxt_get_ts_info(struct net_device *dev, struct bnxt_ptp_cfg *ptp; ptp = bp->ptp_cfg; - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; - info->phc_index = -1; if (!ptp) return 0; @@ -5285,7 +5290,7 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, - .cap_rss_ctx_supported = 1, + .rxfh_per_ctx_key = 1, .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index f219709f956355..f8ef6f1a1964e6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -403,6 +403,9 @@ struct cmd_nums { #define HWRM_FUNC_LAG_UPDATE 0x1b1UL #define HWRM_FUNC_LAG_FREE 0x1b2UL #define HWRM_FUNC_LAG_QCFG 0x1b3UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL + #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -430,6 +433,9 @@ struct cmd_nums { #define HWRM_STAT_GENERIC_QSTATS 0x218UL #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL + #define HWRM_MFG_TESTS 0x21bUL + #define HWRM_PORT_POE_CFG 0x230UL + #define HWRM_PORT_POE_QCFG 0x231UL #define HWRM_UDCC_QCAPS 0x258UL #define HWRM_UDCC_CFG 0x259UL #define HWRM_UDCC_QCFG 0x25aUL @@ -439,6 +445,9 @@ struct cmd_nums { #define HWRM_UDCC_COMP_CFG 0x25eUL #define HWRM_UDCC_COMP_QCFG 0x25fUL #define HWRM_UDCC_COMP_QUERY 0x260UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL #define HWRM_TF 0x2bcUL #define HWRM_TF_VERSION_GET 0x2bdUL #define HWRM_TF_SESSION_OPEN 0x2c6UL @@ -500,10 +509,8 @@ struct cmd_nums { #define HWRM_TFC_IF_TBL_GET 0x399UL #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL - #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL - #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL - #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL #define HWRM_SV 0x400UL + #define HWRM_DBG_SERDES_TEST 0xff0eUL #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -533,6 +540,9 @@ struct cmd_nums { #define HWRM_DBG_USEQ_RUN 0xff29UL #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL + #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL + #define HWRM_DBG_PTRACE 0xff2dUL + #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL #define HWRM_NVM_DEFRAG 0xffecUL @@ -582,6 +592,7 @@ struct ret_codes { #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL + #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -613,8 +624,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 44 -#define HWRM_VERSION_STR "1.10.3.44" +#define HWRM_VERSION_RSVD 68 +#define HWRM_VERSION_STR "1.10.3.68" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -850,7 +861,10 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1691,7 +1705,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:1088b/136B) */ +/* hwrm_func_qcaps_output (size:1152b/144B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1824,6 +1838,9 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1845,7 +1862,9 @@ struct hwrm_func_qcaps_output { __le32 roce_vf_max_qp; __le32 roce_vf_max_srq; __le32 roce_vf_max_gid; - u8 unused_3[3]; + __le32 flags_ext3; + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL + u8 unused_3[7]; u8 valid; }; @@ -2021,7 +2040,8 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __le16 host_mtu; - u8 unused_3[2]; + __le16 flags2; + #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL u8 unused_4[2]; u8 port_kdnet_mode; #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL @@ -3671,33 +3691,38 @@ struct hwrm_func_backing_store_cfg_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; __le32 flags; #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL @@ -3772,6 +3797,11 @@ struct hwrm_func_backing_store_qcfg_v2_input { #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3785,29 +3815,34 @@ struct hwrm_func_backing_store_qcfg_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; __le32 flags; __le64 page_dir; @@ -3883,6 +3918,13 @@ struct ts_split_entries { __le32 rsvd2[2]; }; +/* ck_split_entries (size:128b/16B) */ +struct ck_split_entries { + __le32 num_quic_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + /* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */ struct hwrm_func_backing_store_qcaps_v2_input { __le16 req_type; @@ -3891,33 +3933,38 @@ struct hwrm_func_backing_store_qcaps_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; }; @@ -3928,39 +3975,45 @@ struct hwrm_func_backing_store_qcaps_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL __le32 instance_bit_map; u8 ctx_init_value; u8 ctx_init_offset; @@ -4410,6 +4463,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE __le32 preemphasis; @@ -4941,7 +4995,9 @@ struct hwrm_port_qstats_output { __le16 resp_len; __le16 tx_stat_size; __le16 rx_stat_size; - u8 unused_0[3]; + u8 flags; + #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL + u8 unused_0[2]; u8 valid; }; @@ -5074,6 +5130,7 @@ struct hwrm_port_qstats_ext_output { __le16 total_active_cos_queues; u8 flags; #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL u8 valid; }; @@ -6510,6 +6567,43 @@ struct hwrm_vnic_alloc_output { u8 valid; }; +/* hwrm_vnic_update_input (size:256b/32B) */ +struct hwrm_vnic_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 enables; + #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL + #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL + #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL + u8 vnic_state; + #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL + #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL + #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP + u8 metadata_format_type; + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 + __le16 mru; + u8 unused_1[4]; +}; + +/* hwrm_vnic_update_output (size:128b/16B) */ +struct hwrm_vnic_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_vnic_free_input (size:192b/24B) */ struct hwrm_vnic_free_input { __le16 req_type; @@ -6640,6 +6734,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL + #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -7484,23 +7579,24 @@ struct hwrm_cfa_l2_filter_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL @@ -8766,7 +8862,7 @@ struct ctx_hw_stats_ext { __le64 rx_tpa_events; }; -/* hwrm_stat_ctx_alloc_input (size:320b/40B) */ +/* hwrm_stat_ctx_alloc_input (size:384b/48B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -8776,13 +8872,16 @@ struct hwrm_stat_ctx_alloc_input { __le64 stats_dma_addr; __le32 update_period_ms; u8 stat_ctx_flags; - #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL u8 unused_0; __le16 stats_dma_length; __le16 flags; #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL __le16 steering_tag; - __le32 unused_1; + __le32 stat_ctx_id; + __le16 alloc_seq_id; + u8 unused_1[6]; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -9650,10 +9749,13 @@ struct hwrm_dbg_qcaps_output { __le32 coredump_component_disable_caps; #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL __le32 flags; - #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL - #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL - #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL - #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL + #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL + #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL + #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL + #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL u8 unused_1[3]; u8 valid; }; @@ -10092,16 +10194,19 @@ struct hwrm_nvm_erase_dir_entry_output { u8 valid; }; -/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +/* hwrm_nvm_get_dev_info_input (size:192b/24B) */ struct hwrm_nvm_get_dev_info_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; + u8 flags; + #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL + u8 unused_0[7]; }; -/* hwrm_nvm_get_dev_info_output (size:704b/88B) */ +/* hwrm_nvm_get_dev_info_output (size:768b/96B) */ struct hwrm_nvm_get_dev_info_output { __le16 error_code; __le16 req_type; @@ -10135,6 +10240,10 @@ struct hwrm_nvm_get_dev_info_output { __le16 netctrl_fw_minor; __le16 netctrl_fw_build; __le16 netctrl_fw_patch; + __le16 srt2_fw_major; + __le16 srt2_fw_minor; + __le16 srt2_fw_build; + __le16 srt2_fw_patch; u8 unused_0[7]; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 22898d3d088b05..7bb8a5d744308a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" @@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; - ivi->vlan = vf->vlan; - if (vf->flags & BNXT_VF_QOS) - ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; - else - ivi->qos = 0; + ivi->vlan = vf->vlan & VLAN_VID_MASK; + ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); ivi->trusted = bnxt_is_trusted_vf(bp, vf); if (!(vf->flags & BNXT_VF_LINK_FORCED)) @@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (bp->hwrm_spec_code < 0x10201) return -ENOTSUPP; - if (vlan_proto != htons(ETH_P_8021Q)) + if (vlan_proto != htons(ETH_P_8021Q) && + (vlan_proto != htons(ETH_P_8021AD) || + !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP))) return -EPROTONOSUPPORT; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; - /* TODO: needed to implement proper handling of user priority, - * currently fail the command if there is valid priority - */ - if (vlan_id > 4095 || qos) + if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES || + (!vlan_id && qos)) return -EINVAL; vf = &bp->pf.vf[vf_id]; - vlan_tag = vlan_id; + vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT; if (vlan_tag == vf->vlan) return 0; @@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, req->fid = cpu_to_le16(vf->fw_fid); req->dflt_vlan = cpu_to_le16(vlan_tag); req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID); + req->tpid = vlan_proto; + } rc = hwrm_req_send(bp, req); if (!rc) vf->vlan = vlan_tag; @@ -900,11 +902,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { - netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); - return 0; - } - rtnl_lock(); if (!netif_running(dev)) { netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b9e7d3e7b15d07..fdd6356f21efb3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev); static int bnxt_set_dflt_ulp_msix(struct bnxt *bp) { - u32 roce_msix = BNXT_VF(bp) ? - BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX; + int roce_msix = BNXT_MAX_ROCE_MSIX; - return ((bp->flags & BNXT_FLAG_ROCE_CAP) ? - min_t(u32, roce_msix, num_online_cpus()) : 0); + if (BNXT_VF(bp)) + roce_msix = BNXT_MAX_ROCE_MSIX_VF; + else if (bp->port_partition_type) + roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF; + + /* NQ MSIX vectors should match the number of CPUs plus 1 more for + * the CREQ MSIX, up to the default. + */ + return min_t(int, roce_msix, num_online_cpus() + 1); } int bnxt_send_msg(struct bnxt_en_dev *edev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 4eafe6ec0abfad..4f4914f5c84c91 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -15,8 +15,10 @@ #define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_STAT_CTXS 1 -#define BNXT_MAX_ROCE_MSIX 9 -#define BNXT_MAX_VF_ROCE_MSIX 2 + +#define BNXT_MAX_ROCE_MSIX_VF 2 +#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5 +#define BNXT_MAX_ROCE_MSIX 64 struct hwrm_async_event_cmpl; struct bnxt; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index c2b4188a1ef1c5..a9040c42d2ff97 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -31,6 +31,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_VLAN_8021Q) #define BCM_VLAN 1 #endif @@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk) return cnic_service_bnx2_queues(dev); } -static void cnic_service_bnx2_msix(struct tasklet_struct *t) +static void cnic_service_bnx2_msix(struct work_struct *work) { - struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task); + struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work); struct cnic_dev *dev = cp->dev; cp->last_status_idx = cnic_service_bnx2_queues(dev); @@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev) prefetch(cp->status_blk.gen); prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - tasklet_schedule(&cp->cnic_irq_task); + queue_work(system_bh_wq, &cp->cnic_irq_bh_work); } } @@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) return last_status; } -static void cnic_service_bnx2x_bh(struct tasklet_struct *t) +static void cnic_service_bnx2x_bh_work(struct work_struct *work) { - struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task); + struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work); struct cnic_dev *dev = cp->dev; struct bnx2x *bp = netdev_priv(dev->netdev); u32 status_idx, new_status_idx; @@ -4428,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev) if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { cp->disable_int_sync(dev); - tasklet_kill(&cp->cnic_irq_task); + cancel_work_sync(&cp->cnic_irq_bh_work); free_irq(ethdev->irq_arr[0].vector, dev); } } @@ -4441,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev) err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); if (err) - tasklet_disable(&cp->cnic_irq_task); + disable_work_sync(&cp->cnic_irq_bh_work); return err; } @@ -4464,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev) CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); cp->last_status_idx = cp->status_blk.bnx2->status_idx; - tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix); + INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix); err = cnic_request_irq(dev); if (err) return err; @@ -4873,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev) struct cnic_eth_dev *ethdev = cp->ethdev; int err = 0; - tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh); + INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work); if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) err = cnic_request_irq(dev); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index fedc84ada937d9..1a314a75d2d2a3 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -268,7 +268,7 @@ struct cnic_local { u32 bnx2x_igu_sb_id; u32 int_num; u32 last_status_idx; - struct tasklet_struct cnic_irq_task; + struct work_struct cnic_irq_bh_work; struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index c7e7dac057a336..f7be886570d887 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -37,7 +37,7 @@ #include #include -#include +#include #include "bcmgenet.h" diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0ec5f01551f980..37881591774175 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6145,9 +6145,7 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info { struct tg3 *tp = netdev_priv(dev); - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; if (tg3_flag(tp, PTP_CAPABLE)) { info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | @@ -6157,8 +6155,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info if (tp->ptp_clock) info->phc_index = ptp_clock_index(tp->ptp_clock); - else - info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index ea71612f6b36fb..5740c98d8c9f03 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -13,6 +13,7 @@ #include #include #include +#include #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP) #define MACB_EXT_DESC @@ -1330,7 +1331,7 @@ struct macb { spinlock_t rx_fs_lock; unsigned int max_tuples; - struct tasklet_struct hresp_err_tasklet; + struct work_struct hresp_err_bh_work; int rx_bd_rd_prefetch; int tx_bd_rd_prefetch; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index dcd3f54ed0cf00..f06babec04a0bf 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1792,9 +1792,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget) return work_done; } -static void macb_hresp_error_task(struct tasklet_struct *t) +static void macb_hresp_error_task(struct work_struct *work) { - struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); + struct macb *bp = from_work(bp, work, hresp_err_bh_work); struct net_device *dev = bp->dev; struct macb_queue *queue; unsigned int q; @@ -1994,7 +1994,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_BIT(HRESP)) { - tasklet_schedule(&bp->hresp_err_tasklet); + queue_work(system_bh_wq, &bp->hresp_err_bh_work); netdev_err(dev, "DMA bus error: HRESP not OK\n"); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -3410,8 +3410,6 @@ static int gem_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -3423,7 +3421,8 @@ static int gem_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); - info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); return 0; } @@ -4184,6 +4183,8 @@ static int macb_init(struct platform_device *pdev) dev->ethtool_ops = &macb_ethtool_ops; } + netdev_sw_irq_coalesce_default_on(dev); + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* Set features */ @@ -5119,12 +5120,12 @@ static int macb_probe(struct platform_device *pdev) goto err_out_free_netdev; } - /* MTU range: 68 - 1500 or 10240 */ + /* MTU range: 68 - 1518 or 10240 */ dev->min_mtu = GEM_MTU_MIN_SIZE; if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; else - dev->max_mtu = ETH_DATA_LEN; + dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN; if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); @@ -5172,7 +5173,7 @@ static int macb_probe(struct platform_device *pdev) goto err_out_unregister_mdio; } - tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); + INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), @@ -5216,7 +5217,7 @@ static void macb_remove(struct platform_device *pdev) mdiobus_free(bp->mii_bus); unregister_netdev(dev); - tasklet_kill(&bp->hresp_err_tasklet); + cancel_work_sync(&bp->hresp_err_bh_work); pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index f66d22de5168d2..fc4f5aee6ab3f6 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -19,8 +19,7 @@ #define PCI_DRIVER_NAME "macb_pci" #define PLAT_DRIVER_NAME "macb" -#define CDNS_VENDOR_ID 0x17cd -#define CDNS_DEVICE_ID 0xe007 +#define PCI_DEVICE_ID_CDNS_MACB 0xe007 #define GEM_PCLK_RATE 50000000 #define GEM_HCLK_RATE 50000000 @@ -117,7 +116,7 @@ static void macb_remove(struct pci_dev *pdev) } static const struct pci_device_id dev_id_table[] = { - { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, + { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_MACB) }, { 0, } }; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h index 2d06097d3f61e1..40f529d0bc4cda 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h @@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); int cn23xx_setup_octeon_vf_device(struct octeon_device *oct); u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); - -void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h index 8ed57134ee0ce1..129c8b84f549cd 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -86,7 +86,6 @@ u32 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq); void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused); void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused); -void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip, struct octeon_reg_list *reg_list); u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 5835965dbc32e6..c849e2c871a98f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2496,37 +2496,31 @@ static int lio_set_intr_coalesce(struct net_device *netdev, return ret; } +#ifdef PTP_HARDWARE_TIMESTAMPING static int lio_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { struct lio *lio = GET_LIO(netdev); info->so_timestamping = -#ifdef PTP_HARDWARE_TIMESTAMPING SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | -#endif - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); - else - info->phc_index = -1; -#ifdef PTP_HARDWARE_TIMESTAMPING info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); -#endif return 0; } +#endif /* Return register dump len. */ static int lio_get_regs_len(struct net_device *dev) @@ -3146,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = { .set_coalesce = lio_set_intr_coalesce, .get_priv_flags = lio_get_priv_flags, .set_priv_flags = lio_set_priv_flags, +#ifdef PTP_HARDWARE_TIMESTAMPING .get_ts_info = lio_get_ts_info, +#endif }; static const struct ethtool_ops lio_vf_ethtool_ops = { @@ -3169,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { .set_coalesce = lio_set_intr_coalesce, .get_priv_flags = lio_get_priv_flags, .set_priv_flags = lio_set_priv_flags, +#ifdef PTP_HARDWARE_TIMESTAMPING .get_ts_info = lio_get_ts_info, +#endif }; void liquidio_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index fb380b4f3e0207..d26364c2ac81ce 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -804,13 +804,6 @@ int octeon_init_consoles(struct octeon_device *oct); int octeon_add_console(struct octeon_device *oct, u32 console_num, char *dbg_enb); -/** write or read from a console */ -int octeon_console_write(struct octeon_device *oct, u32 console_num, - char *buffer, u32 write_request_size, u32 flags); -int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); - -int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); - /** Removes all attached consoles. */ void octeon_remove_consoles(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index c9b19e624dce78..232ae72c0e37d1 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct, void *octeon_get_dispatch_arg(struct octeon_device *oct, u16 opcode, u16 subcode); -void octeon_droq_print_stats(void); - u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); int octeon_create_droq(struct octeon_device *oct, u32 q_no, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index bebf3bd349c6f2..a04f36a0e1a080 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype); -void octeon_dump_soft_command(struct octeon_device *oct, - struct octeon_soft_command *sc); - void octeon_prepare_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc, u8 opcode, u8 subcode, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 6a04d253017692..d0ff0c170b1a0a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -844,8 +844,6 @@ static int nicvf_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 8453defc296c24..b7531041c56dfd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); /* Register access APIs */ void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); u64 nicvf_reg_read(struct nicvf *nic, u64 offset); -void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); -u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, u64 qidx, u64 val); u64 nicvf_queue_reg_read(struct nicvf *nic, diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index cdea4939218578..84f16ababaee68 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -219,9 +219,7 @@ void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf); void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf); void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode); -void octeon_mdiobus_force_mod_depencency(void); void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); -void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); unsigned bgx_get_map(int node); int bgx_get_lmac_count(int node, int bgx); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h index e56eff7013953b..304bb282ab03d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb/common.h +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -329,8 +329,6 @@ irqreturn_t t1_slow_intr_handler(adapter_t *adapter); int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); const struct board_info *t1_get_board_info(unsigned int board_id); -const struct board_info *t1_get_board_info_from_ids(unsigned int devid, - unsigned short ssid); int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, struct adapter_params *p); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 7d7d3e0098df2e..3b7068832f95ef 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1034,7 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA; + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; + netdev->lltx = true; if (vlan_tso_capable(adapter)) { netdev->features |= diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h index ba15675d56dffa..64f93dcc676b0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb/tp.h +++ b/drivers/net/ethernet/chelsio/cxgb/tp.h @@ -65,9 +65,7 @@ void t1_tp_intr_enable(struct petp *tp); void t1_tp_intr_clear(struct petp *tp); int t1_tp_intr_handler(struct petp *tp); -void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); -int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h index f04e81f337951c..a08fc762a43883 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h @@ -106,6 +106,4 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, return &e->t3c_tid; } -int attach_t3cdev(struct t3cdev *dev); -void detach_t3cdev(struct t3cdev *dev); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index fca9533bc01127..bbf7641a0fc799 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1958,11 +1958,6 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate); void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); -void t4_wol_magic_enable(struct adapter *adap, unsigned int port, - const u8 *addr); -int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, - u64 mask0, u64 mask1, unsigned int crc, bool enable); - int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state); int t4_fw_bye(struct adapter *adap, unsigned int mbox); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 80c6627fe981f5..c80a93347a8cb1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -122,7 +122,6 @@ void cxgb4_dcb_version_init(struct net_device *); void cxgb4_dcb_reset(struct net_device *dev); void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input); void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *); -void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops; static inline __u8 bitswap_1(unsigned char val) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 3d091947ae00f0..7f3f5afa864f4a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1556,12 +1556,9 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts struct adapter *adapter = pi->adapter; ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - - ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1575,8 +1572,6 @@ static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts if (adapter->ptp_clock) ts_info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - ts_info->phc_index = -1; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c index 33b2c0c455093e..f6f745f5c02218 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c @@ -81,8 +81,7 @@ int cxgb_fcoe_enable(struct net_device *netdev) netdev->features |= NETIF_F_FCOE_CRC; netdev->vlan_features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FCOE_MTU; - netdev->vlan_features |= NETIF_F_FCOE_MTU; + netdev->fcoe_mtu = true; netdev_features_change(netdev); @@ -112,8 +111,7 @@ int cxgb_fcoe_disable(struct net_device *netdev) netdev->features &= ~NETIF_F_FCOE_CRC; netdev->vlan_features &= ~NETIF_F_FCOE_CRC; - netdev->features &= ~NETIF_F_FCOE_MTU; - netdev->vlan_features &= ~NETIF_F_FCOE_MTU; + netdev->fcoe_mtu = false; netdev_features_change(netdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index 9050568a034cdf..64663112cad8a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -242,7 +242,7 @@ struct cxgb4_next_header { * field's value to jump to next header such as IHL field * in IPv4 header. */ - struct tc_u32_sel sel; + struct tc_u32_sel_hdr sel; struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index a9599ba2697576..d8cafaa7ddb4f8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -508,7 +508,6 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); unsigned int cxgb4_port_e2cchan(const struct net_device *dev); unsigned int cxgb4_port_viid(const struct net_device *dev); -unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid); unsigned int cxgb4_port_idx(const struct net_device *dev); unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, unsigned int *idx); diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 854d87e1125cbb..2e3973a32d9d1d 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm) } EXPORT_SYMBOL(cxgbi_ppm_release); -static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, - unsigned int *pcpu_ppmax) +static struct cxgbi_ppm_pool __percpu * +ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax) { - struct cxgbi_ppm_pool *pools; + struct cxgbi_ppm_pool __percpu *pools; unsigned int ppmax = (*total) / num_possible_cpus(); unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; unsigned int bmap; @@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, unsigned int iscsi_edram_size) { struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); - struct cxgbi_ppm_pool *pool = NULL; + struct cxgbi_ppm_pool __percpu *pool = NULL; unsigned int pool_index_max = 0; unsigned int ppmax_pool = 0; unsigned int ppod_bmap_size; diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 1f495cfd7959b0..c2007cd864163b 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -16,13 +16,12 @@ #include #include #include +#include #include #include #include #include -#include - #define DRV_MODULE_NAME "ep93xx-eth" #define RX_QUEUE_ENTRIES 64 @@ -738,25 +737,6 @@ static const struct net_device_ops ep93xx_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) -{ - struct net_device *dev; - - dev = alloc_etherdev(sizeof(struct ep93xx_priv)); - if (dev == NULL) - return NULL; - - eth_hw_addr_set(dev, data->dev_addr); - - dev->ethtool_ops = &ep93xx_ethtool_ops; - dev->netdev_ops = &ep93xx_netdev_ops; - - dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - - return dev; -} - - static void ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; @@ -786,27 +766,49 @@ static void ep93xx_eth_remove(struct platform_device *pdev) static int ep93xx_eth_probe(struct platform_device *pdev) { - struct ep93xx_eth_data *data; struct net_device *dev; struct ep93xx_priv *ep; struct resource *mem; + void __iomem *base_addr; + struct device_node *np; + u8 addr[ETH_ALEN]; + u32 phy_id; int irq; int err; if (pdev == NULL) return -ENODEV; - data = dev_get_platdata(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!mem || irq < 0) return -ENXIO; - dev = ep93xx_dev_alloc(data); + base_addr = ioremap(mem->start, resource_size(mem)); + if (!base_addr) + return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n"); + + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!np) + return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n"); + + err = of_property_read_u32(np, "reg", &phy_id); + of_node_put(np); + if (err) + return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n"); + + dev = alloc_etherdev(sizeof(struct ep93xx_priv)); if (dev == NULL) { err = -ENOMEM; goto err_out; } + + memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN); + eth_hw_addr_set(dev, addr); + dev->ethtool_ops = &ep93xx_ethtool_ops; + dev->netdev_ops = &ep93xx_netdev_ops; + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; + ep = netdev_priv(dev); ep->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); @@ -822,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev) goto err_out; } - ep->base_addr = ioremap(mem->start, resource_size(mem)); - if (ep->base_addr == NULL) { - dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); - err = -EIO; - goto err_out; - } + ep->base_addr = base_addr; ep->irq = irq; - ep->mii.phy_id = data->phy_id; + ep->mii.phy_id = phy_id; ep->mii.phy_id_mask = 0x1f; ep->mii.reg_num_mask = 0x1f; ep->mii.dev = dev; @@ -857,12 +854,18 @@ static int ep93xx_eth_probe(struct platform_device *pdev) return err; } +static const struct of_device_id ep93xx_eth_of_ids[] = { + { .compatible = "cirrus,ep9301-eth" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids); static struct platform_driver ep93xx_eth_driver = { .probe = ep93xx_eth_probe, .remove_new = ep93xx_eth_remove, .driver = { .name = "ep93xx-eth", + .of_match_table = ep93xx_eth_of_ids, }, }; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 300ad05ee05bc2..0cc3644ee8554f 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -128,6 +128,40 @@ struct vxlan_offload { u8 flags; }; +struct enic_wq_stats { + u64 packets; /* pkts queued for Tx */ + u64 stopped; /* Tx ring almost full, queue stopped */ + u64 wake; /* Tx ring no longer full, queue woken up*/ + u64 tso; /* non-encap tso pkt */ + u64 encap_tso; /* encap tso pkt */ + u64 encap_csum; /* encap HW csum */ + u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */ + u64 csum_none; /* HW csum not required */ + u64 bytes; /* bytes queued for Tx */ + u64 add_vlan; /* HW adds vlan tag */ + u64 cq_work; /* Tx completions processed */ + u64 cq_bytes; /* Tx bytes processed */ + u64 null_pkt; /* skb length <= 0 */ + u64 skb_linear_fail; /* linearize failures */ + u64 desc_full_awake; /* TX ring full while queue awake */ +}; + +struct enic_rq_stats { + u64 packets; /* pkts received */ + u64 bytes; /* bytes received */ + u64 l4_rss_hash; /* hashed on l4 */ + u64 l3_rss_hash; /* hashed on l3 */ + u64 csum_unnecessary; /* HW verified csum */ + u64 csum_unnecessary_encap; /* HW verified csum on encap packet */ + u64 vlan_stripped; /* HW stripped vlan */ + u64 napi_complete; /* napi complete intr reenabled */ + u64 napi_repoll; /* napi poll again */ + u64 bad_fcs; /* bad pkts */ + u64 pkt_truncated; /* truncated pkts */ + u64 no_skb; /* out of skbs */ + u64 desc_skip; /* Rx pkt went into later buffer */ +}; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -162,16 +196,16 @@ struct enic { /* work queue cache line section */ ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; spinlock_t wq_lock[ENIC_WQ_MAX]; + struct enic_wq_stats wq_stats[ENIC_WQ_MAX]; unsigned int wq_count; u16 loop_enable; u16 loop_tag; /* receive queue cache line section */ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; + struct enic_rq_stats rq_stats[ENIC_RQ_MAX]; unsigned int rq_count; struct vxlan_offload vxlan; - u64 rq_truncated_pkts; - u64 rq_bad_fcs; struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; /* interrupt resource cache line section */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f2f1055880b29d..f7986f2b6a1794 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -32,6 +32,41 @@ struct enic_stat { .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\ } +#define ENIC_PER_RQ_STAT(stat) { \ + .name = "rq[%d]_"#stat, \ + .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \ +} + +#define ENIC_PER_WQ_STAT(stat) { \ + .name = "wq[%d]_"#stat, \ + .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \ +} + +static const struct enic_stat enic_per_rq_stats[] = { + ENIC_PER_RQ_STAT(l4_rss_hash), + ENIC_PER_RQ_STAT(l3_rss_hash), + ENIC_PER_RQ_STAT(csum_unnecessary_encap), + ENIC_PER_RQ_STAT(vlan_stripped), + ENIC_PER_RQ_STAT(napi_complete), + ENIC_PER_RQ_STAT(napi_repoll), + ENIC_PER_RQ_STAT(no_skb), + ENIC_PER_RQ_STAT(desc_skip), +}; + +#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats) + +static const struct enic_stat enic_per_wq_stats[] = { + ENIC_PER_WQ_STAT(encap_tso), + ENIC_PER_WQ_STAT(encap_csum), + ENIC_PER_WQ_STAT(add_vlan), + ENIC_PER_WQ_STAT(cq_work), + ENIC_PER_WQ_STAT(cq_bytes), + ENIC_PER_WQ_STAT(null_pkt), + ENIC_PER_WQ_STAT(skb_linear_fail), + ENIC_PER_WQ_STAT(desc_full_awake), +}; + +#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats) static const struct enic_stat enic_tx_stats[] = { ENIC_TX_STAT(tx_frames_ok), ENIC_TX_STAT(tx_unicast_frames_ok), @@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = { ENIC_TX_STAT(tx_tso), }; +#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats) + static const struct enic_stat enic_rx_stats[] = { ENIC_RX_STAT(rx_frames_ok), ENIC_RX_STAT(rx_frames_total), @@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = { ENIC_RX_STAT(rx_frames_to_max), }; +#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats) + static const struct enic_stat enic_gen_stats[] = { ENIC_GEN_STAT(dma_map_error), }; -static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); -static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); -static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats); +#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats) static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) { @@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev, static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct enic *enic = netdev_priv(netdev); unsigned int i; + unsigned int j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < enic_n_tx_stats; i++) { + for (i = 0; i < NUM_ENIC_TX_STATS; i++) { memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } - for (i = 0; i < enic_n_rx_stats; i++) { + for (i = 0; i < NUM_ENIC_RX_STATS; i++) { memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } - for (i = 0; i < enic_n_gen_stats; i++) { + for (i = 0; i < NUM_ENIC_GEN_STATS; i++) { memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } + for (i = 0; i < enic->rq_count; i++) { + for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { + snprintf(data, ETH_GSTRING_LEN, + enic_per_rq_stats[j].name, i); + data += ETH_GSTRING_LEN; + } + } + for (i = 0; i < enic->wq_count; i++) { + for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { + snprintf(data, ETH_GSTRING_LEN, + enic_per_wq_stats[j].name, i); + data += ETH_GSTRING_LEN; + } + } break; } } @@ -242,9 +295,19 @@ static int enic_set_ringparam(struct net_device *netdev, static int enic_get_sset_count(struct net_device *netdev, int sset) { + struct enic *enic = netdev_priv(netdev); + unsigned int n_per_rq_stats; + unsigned int n_per_wq_stats; + unsigned int n_stats; + switch (sset) { case ETH_SS_STATS: - return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats; + n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count; + n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count; + n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS + + NUM_ENIC_GEN_STATS + + n_per_rq_stats + n_per_wq_stats; + return n_stats; default: return -EOPNOTSUPP; } @@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; + unsigned int j; int err; err = enic_dev_stats_dump(enic, &vstats); @@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev, if (err == -ENOMEM) return; - for (i = 0; i < enic_n_tx_stats; i++) + for (i = 0; i < NUM_ENIC_TX_STATS; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; - for (i = 0; i < enic_n_rx_stats; i++) + for (i = 0; i < NUM_ENIC_RX_STATS; i++) *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; - for (i = 0; i < enic_n_gen_stats; i++) + for (i = 0; i < NUM_ENIC_GEN_STATS; i++) *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; + for (i = 0; i < enic->rq_count; i++) { + struct enic_rq_stats *rqstats = &enic->rq_stats[i]; + int index; + + for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { + index = enic_per_rq_stats[j].index; + *(data++) = ((u64 *)rqstats)[index]; + } + } + for (i = 0; i < enic->wq_count; i++) { + struct enic_wq_stats *wqstats = &enic->wq_stats[i]; + int index; + + for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { + index = enic_per_wq_stats[j].index; + *(data++) = ((u64 *)wqstats)[index]; + } + } } static u32 enic_get_msglevel(struct net_device *netdev) @@ -601,9 +683,7 @@ static int enic_set_rxfh(struct net_device *netdev, static int enic_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 5f26fc3ad65556..ffed14b63d41d1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -339,6 +340,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { + struct enic *enic = vnic_dev_priv(wq->vdev); + + enic->wq_stats[wq->index].cq_work++; + enic->wq_stats[wq->index].cq_bytes += buf->len; enic_free_wq_buf(wq, buf); } @@ -355,8 +360,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && vnic_wq_desc_avail(&enic->wq[q_number]) >= - (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) + (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { netif_wake_subqueue(enic->netdev, q_number); + enic->wq_stats[q_number].wake++; + } spin_unlock(&enic->wq_lock[q_number]); @@ -590,6 +597,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + /* The enic_queue_wq_desc() above does not do HW checksum */ + enic->wq_stats[wq->index].csum_none++; + enic->wq_stats[wq->index].packets++; + enic->wq_stats[wq->index].bytes += skb->len; + return err; } @@ -622,6 +634,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + enic->wq_stats[wq->index].csum_partial++; + enic->wq_stats[wq->index].packets++; + enic->wq_stats[wq->index].bytes += skb->len; + return err; } @@ -676,15 +692,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, unsigned int offset = 0; unsigned int hdr_len; dma_addr_t dma_addr; + unsigned int pkts; unsigned int len; skb_frag_t *frag; if (skb->encapsulation) { hdr_len = skb_inner_tcp_all_headers(skb); enic_preload_tcp_csum_encap(skb); + enic->wq_stats[wq->index].encap_tso++; } else { hdr_len = skb_tcp_all_headers(skb); enic_preload_tcp_csum(skb); + enic->wq_stats[wq->index].tso++; } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors @@ -705,7 +724,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, } if (eop) - return 0; + goto tso_out_stats; /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for additional data fragments @@ -732,6 +751,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, } } +tso_out_stats: + /* calculate how many packets tso sent */ + len = skb->len - hdr_len; + pkts = len / mss; + if ((len % mss) > 0) + pkts++; + enic->wq_stats[wq->index].packets += pkts; + enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len)); + return 0; } @@ -764,6 +792,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + enic->wq_stats[wq->index].encap_csum++; + enic->wq_stats[wq->index].packets++; + enic->wq_stats[wq->index].bytes += skb->len; + return err; } @@ -780,6 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = skb_vlan_tag_get(skb); + enic->wq_stats[wq->index].add_vlan++; } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; @@ -792,7 +825,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, else if (skb->encapsulation) err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); - else if (skb->ip_summed == CHECKSUM_PARTIAL) + else if (skb->ip_summed == CHECKSUM_PARTIAL) err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); else @@ -825,13 +858,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, unsigned int txq_map; struct netdev_queue *txq; + txq_map = skb_get_queue_mapping(skb) % enic->wq_count; + wq = &enic->wq[txq_map]; + if (skb->len <= 0) { dev_kfree_skb_any(skb); + enic->wq_stats[wq->index].null_pkt++; return NETDEV_TX_OK; } - txq_map = skb_get_queue_mapping(skb) % enic->wq_count; - wq = &enic->wq[txq_map]; txq = netdev_get_tx_queue(netdev, txq_map); /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, @@ -843,6 +878,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb_any(skb); + enic->wq_stats[wq->index].skb_linear_fail++; return NETDEV_TX_OK; } @@ -854,14 +890,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); spin_unlock(&enic->wq_lock[txq_map]); + enic->wq_stats[wq->index].desc_full_awake++; return NETDEV_TX_BUSY; } if (enic_queue_wq_skb(enic, wq, skb)) goto error; - if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) + if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); + enic->wq_stats[wq->index].stopped++; + } skb_tx_timestamp(skb); if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); @@ -878,7 +917,10 @@ static void enic_get_stats(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; + u64 pkt_truncated = 0; + u64 bad_fcs = 0; int err; + int i; err = enic_dev_stats_dump(enic, &stats); /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump @@ -897,8 +939,17 @@ static void enic_get_stats(struct net_device *netdev, net_stats->rx_bytes = stats->rx.rx_bytes_ok; net_stats->rx_errors = stats->rx.rx_errors; net_stats->multicast = stats->rx.rx_multicast_frames_ok; - net_stats->rx_over_errors = enic->rq_truncated_pkts; - net_stats->rx_crc_errors = enic->rq_bad_fcs; + + for (i = 0; i < ENIC_RQ_MAX; i++) { + struct enic_rq_stats *rqs = &enic->rq_stats[i]; + + if (!enic->rq->ctrl) + break; + pkt_truncated += rqs->pkt_truncated; + bad_fcs += rqs->bad_fcs; + } + net_stats->rx_over_errors = pkt_truncated; + net_stats->rx_crc_errors = bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; } @@ -1261,8 +1312,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) return 0; } skb = netdev_alloc_skb_ip_align(netdev, len); - if (!skb) + if (!skb) { + enic->rq_stats[rq->index].no_skb++; return -ENOMEM; + } dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); @@ -1313,6 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, struct net_device *netdev = enic->netdev; struct sk_buff *skb; struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index]; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; @@ -1323,8 +1377,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, u32 rss_hash; bool outer_csum_ok = true, encap = false; - if (skipped) + rqstats->packets++; + if (skipped) { + rqstats->desc_skip++; return; + } skb = buf->os_buf; @@ -1342,9 +1399,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, if (!fcs_ok) { if (bytes_written > 0) - enic->rq_bad_fcs++; + rqstats->bad_fcs++; else if (bytes_written == 0) - enic->rq_truncated_pkts++; + rqstats->pkt_truncated++; } dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, @@ -1359,7 +1416,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Good receive */ - + rqstats->bytes += bytes_written; if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { buf->os_buf = NULL; dma_unmap_single(&enic->pdev->dev, buf->dma_addr, @@ -1377,11 +1434,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); + rqstats->l4_rss_hash++; break; case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); + rqstats->l3_rss_hash++; break; } } @@ -1418,11 +1477,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, (ipv4_csum_ok || ipv6)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = encap; + if (encap) + rqstats->csum_unnecessary_encap++; + else + rqstats->csum_unnecessary++; } - if (vlan_stripped) + if (vlan_stripped) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); - + rqstats->vlan_stripped++; + } skb_mark_napi_id(skb, &enic->napi[rq->index]); if (!(netdev->features & NETIF_F_GRO)) netif_receive_skb(skb); @@ -1435,7 +1499,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Buffer overflow */ - + rqstats->pkt_truncated++; dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); @@ -1568,6 +1632,9 @@ static int enic_poll(struct napi_struct *napi, int budget) if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); + enic->rq_stats[0].napi_complete++; + } else { + enic->rq_stats[0].napi_repoll++; } return rq_work_done; @@ -1693,6 +1760,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); + enic->rq_stats[rq].napi_complete++; + } else { + enic->rq_stats[rq].napi_repoll++; } return work_done; @@ -2502,6 +2572,54 @@ static void enic_clear_intr_mode(struct enic *enic) vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); } +static void enic_get_queue_stats_rx(struct net_device *dev, int idx, + struct netdev_queue_stats_rx *rxs) +{ + struct enic *enic = netdev_priv(dev); + struct enic_rq_stats *rqstats = &enic->rq_stats[idx]; + + rxs->bytes = rqstats->bytes; + rxs->packets = rqstats->packets; + rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated; + rxs->hw_drop_overruns = rqstats->pkt_truncated; + rxs->csum_unnecessary = rqstats->csum_unnecessary + + rqstats->csum_unnecessary_encap; +} + +static void enic_get_queue_stats_tx(struct net_device *dev, int idx, + struct netdev_queue_stats_tx *txs) +{ + struct enic *enic = netdev_priv(dev); + struct enic_wq_stats *wqstats = &enic->wq_stats[idx]; + + txs->bytes = wqstats->bytes; + txs->packets = wqstats->packets; + txs->csum_none = wqstats->csum_none; + txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum + + wqstats->tso; + txs->hw_gso_packets = wqstats->tso; + txs->stop = wqstats->stopped; + txs->wake = wqstats->wake; +} + +static void enic_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rxs, + struct netdev_queue_stats_tx *txs) +{ + rxs->bytes = 0; + rxs->packets = 0; + rxs->hw_drops = 0; + rxs->hw_drop_overruns = 0; + rxs->csum_unnecessary = 0; + txs->bytes = 0; + txs->packets = 0; + txs->csum_none = 0; + txs->needs_csum = 0; + txs->hw_gso_packets = 0; + txs->stop = 0; + txs->wake = 0; +} + static const struct net_device_ops enic_netdev_dynamic_ops = { .ndo_open = enic_open, .ndo_stop = enic_stop, @@ -2550,6 +2668,12 @@ static const struct net_device_ops enic_netdev_ops = { .ndo_features_check = enic_features_check, }; +static const struct netdev_stat_ops enic_netdev_stat_ops = { + .get_queue_stats_rx = enic_get_queue_stats_rx, + .get_queue_stats_tx = enic_get_queue_stats_tx, + .get_base_stats = enic_get_base_stats, +}; + static void enic_dev_deinit(struct enic *enic) { unsigned int i; @@ -2892,6 +3016,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &enic_netdev_dynamic_ops; else netdev->netdev_ops = &enic_netdev_ops; + netdev->stat_ops = &enic_netdev_stat_ops; netdev->watchdog_timeo = 2 * HZ; enic_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c index bcfe52c11804aa..59ea48d4c9deda 100644 --- a/drivers/net/ethernet/davicom/dm9051.c +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -1235,6 +1235,7 @@ static const struct of_device_id dm9051_match_table[] = { { .compatible = "davicom,dm9051" }, {} }; +MODULE_DEVICE_TABLE(of, dm9051_match_table); static const struct spi_device_id dm9051_id_table[] = { { "dm9051", 0 }, diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index cd3dc4b89518f0..0a161a4db2426a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -49,7 +49,7 @@ #include #include #include -#include +#include MODULE_AUTHOR("Jeff Garzik "); MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver"); diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c index d5657ff15e3c4a..71ff9e6db209a8 100644 --- a/drivers/net/ethernet/dec/tulip/eeprom.c +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -13,7 +13,7 @@ #include #include #include "tulip.h" -#include +#include diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h index bd786dfbc066a4..5e010e1fa6f79c 100644 --- a/drivers/net/ethernet/dec/tulip/tulip.h +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index ecfad43df45a7c..27e01d780cd02e 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #ifdef CONFIG_SPARC diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 7bfeae04b52bab..d0ea9260787061 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1842,7 +1842,7 @@ static int rio_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); #define RIO_PM_OPS (&rio_pm_ops) #else diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 61fe9625bed1f2..e48b861e4ce15d 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -966,9 +966,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, void be_link_status_update(struct be_adapter *adapter, u8 link_status); void be_parse_stats(struct be_adapter *adapter); int be_load_fw(struct be_adapter *adapter, u8 *func); -bool be_is_wol_supported(struct be_adapter *adapter); bool be_pause_supported(struct be_adapter *adapter); -u32 be_get_fw_log_level(struct be_adapter *adapter); int be_update_queues(struct be_adapter *adapter); int be_poll(struct napi_struct *napi, int budget); void be_eqd_update(struct be_adapter *adapter, bool force_update); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index e2085c68c0ee78..d70818f06be730 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2381,7 +2381,6 @@ struct be_cmd_req_manage_iface_filters { } __packed; u16 be_POST_stage_get(struct be_adapter *adapter); -int be_pci_fnum_get(struct be_adapter *adapter); int be_fw_wait_ready(struct be_adapter *adapter); int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, bool permanent, u32 if_handle, u32 pmac_id); @@ -2406,7 +2405,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, u8 *link_status, u32 dom); -int be_cmd_reset(struct be_adapter *adapter); int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); int lancer_cmd_get_pport_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); @@ -2488,7 +2486,6 @@ int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); int lancer_initiate_dump(struct be_adapter *adapter); int lancer_delete_dump(struct be_adapter *adapter); bool dump_present(struct be_adapter *adapter); -int lancer_test_and_set_rdy_state(struct be_adapter *adapter); int be_cmd_query_port_name(struct be_adapter *adapter); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c index 9aa286ba1f00bb..228a638eae1698 100644 --- a/drivers/net/ethernet/engleder/tsnep_ethtool.c +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -310,16 +310,12 @@ static int tsnep_ethtool_get_ts_info(struct net_device *netdev, struct tsnep_adapter *adapter = netdev_priv(netdev); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 4c546c3aef0fec..f3cc14cc757d27 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -50,6 +51,15 @@ #define FTGMAC_100MHZ 100000000 #define FTGMAC_25MHZ 25000000 +/* For NC-SI to register a fixed-link phy device */ +static struct fixed_phy_status ncsi_phy_status = { + .link = 1, + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + .pause = 0, + .asym_pause = 0 +}; + struct ftgmac100 { /* Registers */ struct resource *res; @@ -1541,7 +1551,8 @@ static int ftgmac100_open(struct net_device *netdev) if (netdev->phydev) { /* If we have a PHY, start polling */ phy_start(netdev->phydev); - } else if (priv->use_ncsi) { + } + if (priv->use_ncsi) { /* If using NC-SI, set our carrier on and start the stack */ netif_carrier_on(netdev); @@ -1554,6 +1565,7 @@ static int ftgmac100_open(struct net_device *netdev) return 0; err_ncsi: + phy_stop(netdev->phydev); napi_disable(&priv->napi); netif_stop_queue(netdev); err_alloc: @@ -1587,7 +1599,7 @@ static int ftgmac100_stop(struct net_device *netdev) netif_napi_del(&priv->napi); if (netdev->phydev) phy_stop(netdev->phydev); - else if (priv->use_ncsi) + if (priv->use_ncsi) ncsi_stop_dev(priv->ndev); ftgmac100_stop_hw(priv); @@ -1725,6 +1737,9 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev) phy_disconnect(netdev->phydev); if (of_phy_is_fixed_link(priv->dev->of_node)) of_phy_deregister_fixed_link(priv->dev->of_node); + + if (priv->use_ncsi) + fixed_phy_unregister(netdev->phydev); } static void ftgmac100_destroy_mdio(struct net_device *netdev) @@ -1802,6 +1817,7 @@ static int ftgmac100_probe(struct platform_device *pdev) struct resource *res; int irq; struct net_device *netdev; + struct phy_device *phydev; struct ftgmac100 *priv; struct device_node *np; int err = 0; @@ -1889,6 +1905,14 @@ static int ftgmac100_probe(struct platform_device *pdev) err = -EINVAL; goto err_phy_connect; } + + phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL); + err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { + dev_err(&pdev->dev, "Connecting PHY failed\n"); + goto err_phy_connect; + } } else if (np && of_phy_is_fixed_link(np)) { struct phy_device *phy; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 4a55e521c17ecd..e15dd3d858df21 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -229,7 +229,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->max_mtu = dpaa_get_max_mtu(); net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_LLTX | NETIF_F_RXHASH); + NETIF_F_RXHASH); net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; /* The kernels enables GSO automatically, if we declare NETIF_F_SG. @@ -239,6 +239,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->features |= NETIF_F_RXCSUM; net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + net_dev->lltx = true; /* we do not want shared skbs on TX */ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; @@ -3163,8 +3164,9 @@ static void dpaa_napi_del(struct net_device *net_dev) for_each_possible_cpu(cpu) { percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); - netif_napi_del(&percpu_priv->np.napi); + __netif_napi_del(&percpu_priv->np.napi); } + synchronize_net(); } static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 6866807973daa1..29886a8ba73f33 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4594,12 +4594,13 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev) net_dev->priv_flags |= supported; net_dev->priv_flags &= ~not_supported; + net_dev->lltx = true; /* Features */ net_dev->features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO; + NETIF_F_HW_TC | NETIF_F_TSO; net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS; net_dev->hw_features = net_dev->features; net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 5c45f42232d326..032d8eadd003f6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -977,7 +977,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) return j; } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK static void enetc_get_rx_tstamp(struct net_device *ndev, union enetc_rx_bd *rxbd, struct sk_buff *skb) @@ -1001,7 +1000,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev, shhwtstamps->hwtstamp = ns_to_ktime(tstamp); } } -#endif static void enetc_get_offloads(struct enetc_bdr *rx_ring, union enetc_rx_bd *rxbd, struct sk_buff *skb) @@ -1041,10 +1039,9 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring, __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (priv->active_offloads & ENETC_F_RX_TSTAMP) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && + (priv->active_offloads & ENETC_F_RX_TSTAMP)) enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); -#endif } /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, @@ -2305,12 +2302,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv) snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); - err = request_irq(irq, enetc_msix, 0, v->name, v); + err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); if (err) { dev_err(priv->dev, "request_irq() failed!\n"); goto irq_err; } - disable_irq(irq); v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); @@ -2882,7 +2878,6 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features) } EXPORT_SYMBOL_GPL(enetc_set_features); -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -2951,17 +2946,17 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } -#endif int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct enetc_ndev_priv *priv = netdev_priv(ndev); -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (cmd == SIOCSHWTSTAMP) - return enetc_hwtstamp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return enetc_hwtstamp_get(ndev, rq); -#endif + + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { + if (cmd == SIOCSHWTSTAMP) + return enetc_hwtstamp_set(ndev, rq); + if (cmd == SIOCGHWTSTAMP) + return enetc_hwtstamp_get(ndev, rq); + } if (!priv->phylink) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index a9c2ff22431c57..97524dfa234c7d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -184,10 +184,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) { int hw_idx = i; -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (rx_ring->ext_en) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en) hw_idx = 2 * i; -#endif + return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); } @@ -199,10 +198,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring, new_rxbd++; -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK - if (rx_ring->ext_en) + if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en) new_rxbd++; -#endif if (unlikely(++new_index == rx_ring->bd_count)) { new_rxbd = rx_ring->bd_base; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 5e684b23c5f512..2563eb8ac7b63a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -849,28 +849,26 @@ static int enetc_get_ts_info(struct net_device *ndev, if (phc_idx) { info->phc_index = *phc_idx; symbol_put(enetc_phc_index); - } else { - info->phc_index = -1; } -#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; + + return 0; + } + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON) | (1 << HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); -#else - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; -#endif + return 0; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 11b14555802c9a..8f6b0bf48139ac 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2017-2019 NXP */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a19cb2a786fd28..1cca0425d49397 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -691,10 +691,19 @@ struct fec_enet_private { /* XDP BPF Program */ struct bpf_prog *xdp_prog; + struct { + int pps_enable; + u64 ns_sys, ns_phc; + u32 at_corr; + u8 at_inc_corr; + } ptp_saved_state; + u64 ethtool_stats[]; }; void fec_ptp_init(struct platform_device *pdev, int irq_idx); +void fec_ptp_restore_state(struct fec_enet_private *fep); +void fec_ptp_save_state(struct fec_enet_private *fep); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a923cb95cdc62c..31ebf6a4f973bd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1077,6 +1077,8 @@ fec_restart(struct net_device *ndev) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = FEC_ECR_ETHEREN; + fec_ptp_save_state(fep); + /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. @@ -1244,8 +1246,10 @@ fec_restart(struct net_device *ndev) writel(ecntl, fep->hwp + FEC_ECNTRL); fec_enet_active_rxring(ndev); - if (fep->bufdesc_ex) + if (fep->bufdesc_ex) { fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); + } /* Enable interrupts we wish to service */ if (fep->link) @@ -1336,6 +1340,8 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } + fec_ptp_save_state(fep); + /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. @@ -1366,6 +1372,9 @@ fec_stop(struct net_device *ndev) val = readl(fep->hwp + FEC_ECNTRL); val |= FEC_ECR_EN1588; writel(val, fep->hwp + FEC_ECNTRL); + + fec_ptp_start_cyclecounter(ndev); + fec_ptp_restore_state(fep); } } @@ -2775,15 +2784,11 @@ static int fec_enet_get_ts_info(struct net_device *ndev, if (fep->bufdesc_ex) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (fep->ptp_clock) info->phc_index = ptp_clock_index(fep->ptp_clock); - else - info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -4606,7 +4611,7 @@ fec_drv_remove(struct platform_device *pdev) free_netdev(ndev); } -static int __maybe_unused fec_suspend(struct device *dev) +static int fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4659,7 +4664,7 @@ static int __maybe_unused fec_suspend(struct device *dev) return 0; } -static int __maybe_unused fec_resume(struct device *dev) +static int fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4714,7 +4719,7 @@ static int __maybe_unused fec_resume(struct device *dev) return ret; } -static int __maybe_unused fec_runtime_suspend(struct device *dev) +static int fec_runtime_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4725,7 +4730,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused fec_runtime_resume(struct device *dev) +static int fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); @@ -4746,14 +4751,14 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) } static const struct dev_pm_ops fec_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) }; static struct platform_driver fec_driver = { .driver = { .name = DRIVER_NAME, - .pm = &fec_pm_ops, + .pm = pm_ptr(&fec_pm_ops), .of_match_table = fec_dt_ids, .suppress_bind_attrs = true, }, diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 2e4f3e1782a252..a4eb6edb850add 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -90,6 +90,30 @@ #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL +/** + * fec_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 fec_ptp_read(const struct cyclecounter *cc) +{ + struct fec_enet_private *fep = + container_of(cc, struct fec_enet_private, cc); + u32 tempval; + + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + return readl(fep->hwp + FEC_ATIME); +} + /** * fec_ptp_enable_pps * @fep: the fec_enet_private structure handle @@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds * to current timer would be next second. */ - tempval = fep->cc.read(&fep->cc); + tempval = fec_ptp_read(&fep->cc); /* Convert the ptp local counter to 1588 timestamp */ ns = timecounter_cyc2time(&fep->tc, tempval); ts = ns_to_timespec64(ns); @@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep) timecounter_read(&fep->tc); /* Get the current ptp hardware time counter */ - temp_val = readl(fep->hwp + FEC_ATIME_CTRL); - temp_val |= FEC_T_CTRL_CAPTURE; - writel(temp_val, fep->hwp + FEC_ATIME_CTRL); - if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) - udelay(1); - - ptp_hc = readl(fep->hwp + FEC_ATIME); + ptp_hc = fec_ptp_read(&fep->cc); /* Convert the ptp local counter to 1588 timestamp */ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); @@ -271,30 +289,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) return HRTIMER_NORESTART; } -/** - * fec_ptp_read - read raw cycle counter (to be used by time counter) - * @cc: the cyclecounter structure - * - * this function reads the cyclecounter registers and is called by the - * cyclecounter structure used to construct a ns counter from the - * arbitrary fixed point registers - */ -static u64 fec_ptp_read(const struct cyclecounter *cc) -{ - struct fec_enet_private *fep = - container_of(cc, struct fec_enet_private, cc); - u32 tempval; - - tempval = readl(fep->hwp + FEC_ATIME_CTRL); - tempval |= FEC_T_CTRL_CAPTURE; - writel(tempval, fep->hwp + FEC_ATIME_CTRL); - - if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) - udelay(1); - - return readl(fep->hwp + FEC_ATIME); -} - /** * fec_ptp_start_cyclecounter - create the cycle counter from hw * @ndev: network device @@ -770,6 +764,56 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) schedule_delayed_work(&fep->time_keep, HZ); } +void fec_ptp_save_state(struct fec_enet_private *fep) +{ + unsigned long flags; + u32 atime_inc_corr; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + fep->ptp_saved_state.pps_enable = fep->pps_enable; + + fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc); + fep->ptp_saved_state.ns_sys = ktime_get_ns(); + + fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); + atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; + fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); +} + +/* Restore PTP functionality after a reset */ +void fec_ptp_restore_state(struct fec_enet_private *fep) +{ + u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; + unsigned long flags; + u32 counter; + u64 ns; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* Reset turned it off, so adjust our status flag */ + fep->pps_enable = 0; + + writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); + atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; + writel(atime_inc, fep->hwp + FEC_ATIME_INC); + + ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc; + counter = ns & fep->cc.mask; + writel(counter, fep->hwp + FEC_ATIME); + timecounter_init(&fep->tc, &fep->cc, ns); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + /* Restart PPS if needed */ + if (fep->ptp_saved_state.pps_enable) { + /* Re-enable PPS */ + fec_ptp_enable_pps(fep, 1); + } +} + void fec_ptp_stop(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 406e75e9e5ea20..f17a4e51151017 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct resource res; struct resource *dev_res; u32 val; - int err = 0, lenp; + int err = 0; enum fman_port_type port_type; u16 port_speed; u8 port_id; @@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev) if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { port_type = FMAN_PORT_TYPE_TX; port_speed = 1000; - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) + if (of_property_read_bool(port_node, "fsl,fman-10g-port")) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { @@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev) } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { port_type = FMAN_PORT_TYPE_RX; port_speed = 1000; - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) + if (of_property_read_bool(port_node, "fsl,fman-10g-port")) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 7f20840fde0742..57013bf14d7c5e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -3,7 +3,7 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) select MII - select PHYLIB + select PHYLINK config FS_ENET_MPC5121_FEC def_bool y if (FS_ENET && PPC_MPC512x) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cf392faa610521..3425c4a6abcbdb 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -9,10 +10,6 @@ * * Heavily based on original FEC driver by Dan Malek * and modifications by Joakim Tjernlund - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -29,17 +26,18 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -72,6 +70,13 @@ static void fs_set_multicast_list(struct net_device *dev) (*fep->ops->set_multicast_list)(dev); } +static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_mii_ioctl(fep->phylink, ifr, cmd); +} + static void skb_align(struct sk_buff *skb, int align) { int off = ((unsigned long)skb->data) & (align - 1); @@ -84,15 +89,14 @@ static void skb_align(struct sk_buff *skb, int align) static int fs_enet_napi(struct napi_struct *napi, int budget) { struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); - struct net_device *dev = fep->ndev; const struct fs_platform_info *fpi = fep->fpi; - cbd_t __iomem *bdp; + struct net_device *dev = fep->ndev; + int curidx, dirtyidx, received = 0; + int do_wake = 0, do_restart = 0; + int tx_left = TX_RING_SIZE; struct sk_buff *skb, *skbn; - int received = 0; + cbd_t __iomem *bdp; u16 pkt_len, sc; - int curidx; - int dirtyidx, do_wake, do_restart; - int tx_left = TX_RING_SIZE; spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; @@ -100,7 +104,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) /* clear status bits for napi*/ (*fep->ops->napi_clear_event)(dev); - do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) { dirtyidx = bdp - fep->tx_bd_base; @@ -109,12 +112,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) skb = fep->tx_skbuff[dirtyidx]; - /* - * Check for errors. - */ + /* Check for errors. */ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - if (sc & BD_ENET_TX_HB) /* No heartbeat */ dev->stats.tx_heartbeat_errors++; if (sc & BD_ENET_TX_LC) /* Late collision */ @@ -130,16 +130,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) dev->stats.tx_errors++; do_restart = 1; } - } else + } else { dev->stats.tx_packets++; + } if (sc & BD_ENET_TX_READY) { dev_warn(fep->dev, "HEY! Enet xmit interrupt and TX_READY.\n"); } - /* - * Deferred means some collisions occurred during transmit, + /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (sc & BD_ENET_TX_DEF) @@ -153,25 +153,20 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), CBDR_DATLEN(bdp), DMA_TO_DEVICE); - /* - * Free the sk buffer associated with this last transmit. - */ + /* Free the sk buffer associated with this last transmit. */ if (skb) { dev_kfree_skb(skb); fep->tx_skbuff[dirtyidx] = NULL; } - /* - * Update pointer to next buffer descriptor to be transmitted. + /* Update pointer to next buffer descriptor to be transmitted. */ if ((sc & BD_ENET_TX_WRAP) == 0) bdp++; else bdp = fep->tx_bd_base; - /* - * Since we have freed up a buffer, the ring is no longer - * full. + /* Since we have freed up a buffer, the ring is no longer full. */ if (++fep->tx_free == MAX_SKB_FRAGS) do_wake = 1; @@ -188,8 +183,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (do_wake) netif_wake_queue(dev); - /* - * First, grab all of the stats for the incoming packet. + /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx; @@ -198,16 +192,13 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) received < budget) { curidx = bdp - fep->rx_bd_base; - /* - * Since we have allocated space to hold a complete frame, + /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((sc & BD_ENET_RX_LAST) == 0) dev_warn(fep->dev, "rcv is not +last\n"); - /* - * Check for errors. - */ + /* Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { dev->stats.rx_errors++; @@ -228,9 +219,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) } else { skb = fep->rx_skbuff[curidx]; - /* - * Process the incoming frame. - */ + /* Process the incoming frame */ dev->stats.rx_packets++; pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ dev->stats.rx_bytes += pkt_len + 4; @@ -238,15 +227,15 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) if (pkt_len <= fpi->rx_copybreak) { /* +2 to make IP header L1 cache aligned */ skbn = netdev_alloc_skb(dev, pkt_len + 2); - if (skbn != NULL) { + if (skbn) { skb_reserve(skbn, 2); /* align IP header */ - skb_copy_from_linear_data(skb, - skbn->data, pkt_len); + skb_copy_from_linear_data(skb, skbn->data, + pkt_len); swap(skb, skbn); dma_sync_single_for_cpu(fep->dev, - CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(pkt_len), - DMA_FROM_DEVICE); + CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(pkt_len), + DMA_FROM_DEVICE); } } else { skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); @@ -256,20 +245,18 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) skb_align(skbn, ENET_RX_ALIGN); - dma_unmap_single(fep->dev, - CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); - dma = dma_map_single(fep->dev, - skbn->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + dma = dma_map_single(fep->dev, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); CBDW_BUFADDR(bdp, dma); } } - if (skbn != NULL) { + if (skbn) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); received++; @@ -284,9 +271,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) CBDW_DATLEN(bdp, 0); CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); - /* - * Update BD pointer to next entry. - */ + /* Update BD pointer to next entry */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; else @@ -308,19 +293,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) return budget; } -/* - * The interrupt handler. +/* The interrupt handler. * This is called from the MPC core interrupt. */ static irqreturn_t fs_enet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; + u32 int_events, int_clr_events; struct fs_enet_private *fep; - u32 int_events; - u32 int_clr_events; - int nr, napi_ok; - int handled; + int nr, napi_ok, handled; fep = netdev_priv(dev); @@ -342,12 +324,12 @@ fs_enet_interrupt(int irq, void *dev_id) (*fep->ops->napi_disable)(dev); (*fep->ops->clear_int_events)(dev, fep->ev_napi); - /* NOTE: it is possible for FCCs in NAPI mode */ - /* to submit a spurious interrupt while in poll */ + /* NOTE: it is possible for FCCs in NAPI mode + * to submit a spurious interrupt while in poll + */ if (napi_ok) __napi_schedule(&fep->napi); } - } handled = nr > 0; @@ -357,45 +339,40 @@ fs_enet_interrupt(int irq, void *dev_id) void fs_init_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t __iomem *bdp; struct sk_buff *skb; + cbd_t __iomem *bdp; int i; fs_cleanup_bds(dev); - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->dirty_tx = fep->tx_bd_base; + fep->cur_tx = fep->tx_bd_base; fep->tx_free = fep->tx_ring; fep->cur_rx = fep->rx_bd_base; - /* - * Initialize the receive buffer descriptors. - */ + /* Initialize the receive buffer descriptors */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - if (skb == NULL) + if (!skb) break; skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; - CBDW_BUFADDR(bdp, - dma_map_single(fep->dev, skb->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); CBDW_DATLEN(bdp, 0); /* zero */ CBDW_SC(bdp, BD_ENET_RX_EMPTY | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); } - /* - * if we failed, fillup remainder - */ + + /* if we failed, fillup remainder */ for (; i < fep->rx_ring; i++, bdp++) { fep->rx_skbuff[i] = NULL; CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); } - /* - * ...and the same for transmit. - */ + /* ...and the same for transmit. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fep->tx_skbuff[i] = NULL; CBDW_BUFADDR(bdp, 0); @@ -411,32 +388,30 @@ void fs_cleanup_bds(struct net_device *dev) cbd_t __iomem *bdp; int i; - /* - * Reset SKB transmit buffers. - */ + /* Reset SKB transmit buffers. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { - if ((skb = fep->tx_skbuff[i]) == NULL) + skb = fep->tx_skbuff[i]; + if (!skb) continue; /* unmap */ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - skb->len, DMA_TO_DEVICE); + skb->len, DMA_TO_DEVICE); fep->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } - /* - * Reset SKB receive buffers - */ + /* Reset SKB receive buffers */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { - if ((skb = fep->rx_skbuff[i]) == NULL) + skb = fep->rx_skbuff[i]; + if (!skb) continue; /* unmap */ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); fep->rx_skbuff[i] = NULL; @@ -444,12 +419,8 @@ void fs_cleanup_bds(struct net_device *dev) } } -/**********************************************************************************/ - #ifdef CONFIG_FS_ENET_MPC5121_FEC -/* - * MPC5121 FEC requeries 4-byte alignment for TX data buffer! - */ +/* MPC5121 FEC requires 4-byte alignment for TX data buffer! */ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { @@ -481,15 +452,12 @@ static netdev_tx_t fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + int curidx, nr_frags, len; cbd_t __iomem *bdp; - int curidx; - u16 sc; - int nr_frags; skb_frag_t *frag; - int len; + u16 sc; #ifdef CONFIG_FS_ENET_MPC5121_FEC - int is_aligned = 1; - int i; + int i, is_aligned = 1; if (!IS_ALIGNED((unsigned long)skb->data, 4)) { is_aligned = 0; @@ -507,8 +475,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!is_aligned) { skb = tx_skb_align_workaround(dev, skb); if (!skb) { - /* - * We have lost packet due to memory allocation error + /* We have lost packet due to memory allocation error * in tx_skb_align_workaround(). Hopefully original * skb is still valid, so try transmit it later. */ @@ -519,9 +486,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&fep->tx_lock); - /* - * Fill in a Tx ring entry - */ + /* Fill in a Tx ring entry */ bdp = fep->cur_tx; nr_frags = skb_shinfo(skb)->nr_frags; @@ -529,8 +494,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); spin_unlock(&fep->tx_lock); - /* - * Ooops. All transmit buffers are full. Bail out. + /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since the tx queue should be stopped. */ dev_warn(fep->dev, "tx queue full!.\n"); @@ -543,12 +507,12 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += len; if (nr_frags) len -= skb->data_len; + fep->tx_free -= nr_frags + 1; - /* - * Push the data cache so the CPM does not get stale memory data. + /* Push the data cache so the CPM does not get stale memory data. */ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, - skb->data, len, DMA_TO_DEVICE)); + skb->data, len, DMA_TO_DEVICE)); CBDW_DATLEN(bdp, len); fep->mapped_as_page[curidx] = 0; @@ -585,9 +549,11 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* note that while FEC does not have this bit * it marks it as available for software use - * yay for hw reuse :) */ + * yay for hw reuse :) + */ if (skb->len <= 60) sc |= BD_ENET_TX_PAD; + CBDC_SC(bdp, BD_ENET_TX_STATS); CBDS_SC(bdp, sc); @@ -599,6 +565,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) bdp++; else bdp = fep->tx_bd_base; + fep->cur_tx = bdp; if (fep->tx_free < MAX_SKB_FRAGS) @@ -623,15 +590,21 @@ static void fs_timeout_work(struct work_struct *work) dev->stats.tx_errors++; - spin_lock_irqsave(&fep->lock, flags); + /* In the event a timeout was detected, but the netdev is brought down + * shortly after, it no longer makes sense to try to recover from the + * timeout. netif_running() will return false when called from the + * .ndo_close() callback. Calling the following recovery code while + * called from .ndo_close() could deadlock on rtnl. + */ + if (!netif_running(dev)) + return; - if (dev->flags & IFF_UP) { - phy_stop(dev->phydev); - (*fep->ops->stop)(dev); - (*fep->ops->restart)(dev); - } + rtnl_lock(); + phylink_stop(fep->phylink); + phylink_start(fep->phylink); + rtnl_unlock(); - phy_start(dev->phydev); + spin_lock_irqsave(&fep->lock, flags); wake = fep->tx_free >= MAX_SKB_FRAGS && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -647,82 +620,37 @@ static void fs_timeout(struct net_device *dev, unsigned int txqueue) schedule_work(&fep->timeout_work); } -/*----------------------------------------------------------------------------- - * generic link-change handler - should be sufficient for most cases - *-----------------------------------------------------------------------------*/ -static void generic_adjust_link(struct net_device *dev) +static void fs_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - int new_state = 0; - - if (phydev->link) { - /* adjust to duplex mode */ - if (phydev->duplex != fep->oldduplex) { - new_state = 1; - fep->oldduplex = phydev->duplex; - } - - if (phydev->speed != fep->oldspeed) { - new_state = 1; - fep->oldspeed = phydev->speed; - } - - if (!fep->oldlink) { - new_state = 1; - fep->oldlink = 1; - } - - if (new_state) - fep->ops->restart(dev); - } else if (fep->oldlink) { - new_state = 1; - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - } + struct net_device *ndev = to_net_dev(config->dev); + struct fs_enet_private *fep = netdev_priv(ndev); + unsigned long flags; - if (new_state && netif_msg_link(fep)) - phy_print_status(phydev); + spin_lock_irqsave(&fep->lock, flags); + fep->ops->restart(ndev, interface, speed, duplex); + spin_unlock_irqrestore(&fep->lock, flags); } - -static void fs_adjust_link(struct net_device *dev) +static void fs_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) { - struct fs_enet_private *fep = netdev_priv(dev); + struct net_device *ndev = to_net_dev(config->dev); + struct fs_enet_private *fep = netdev_priv(ndev); unsigned long flags; spin_lock_irqsave(&fep->lock, flags); - - if(fep->ops->adjust_link) - fep->ops->adjust_link(dev); - else - generic_adjust_link(dev); - + fep->ops->stop(ndev); spin_unlock_irqrestore(&fep->lock, flags); } -static int fs_init_phy(struct net_device *dev) +static void fs_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev; - phy_interface_t iface; - - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - - iface = fep->fpi->use_rmii ? - PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; - - phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, - iface); - if (!phydev) { - dev_err(&dev->dev, "Could not attach to PHY\n"); - return -ENODEV; - } - - return 0; + /* Nothing to do */ } static int fs_enet_open(struct net_device *dev) @@ -731,8 +659,9 @@ static int fs_enet_open(struct net_device *dev) int r; int err; - /* to initialize the fep->cur_rx,... */ - /* not doing this, will cause a crash in fs_enet_napi */ + /* to initialize the fep->cur_rx,... + * not doing this, will cause a crash in fs_enet_napi + */ fs_init_bds(fep->ndev); napi_enable(&fep->napi); @@ -746,13 +675,13 @@ static int fs_enet_open(struct net_device *dev) return -EINVAL; } - err = fs_init_phy(dev); + err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0); if (err) { free_irq(fep->interrupt, dev); napi_disable(&fep->napi); return err; } - phy_start(dev->phydev); + phylink_start(fep->phylink); netif_start_queue(dev); @@ -765,28 +694,25 @@ static int fs_enet_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); - netif_carrier_off(dev); napi_disable(&fep->napi); - cancel_work_sync(&fep->timeout_work); - phy_stop(dev->phydev); + cancel_work(&fep->timeout_work); + phylink_stop(fep->phylink); spin_lock_irqsave(&fep->lock, flags); spin_lock(&fep->tx_lock); (*fep->ops->stop)(dev); spin_unlock(&fep->tx_lock); spin_unlock_irqrestore(&fep->lock, flags); + phylink_disconnect_phy(fep->phylink); /* release any irqs */ - phy_disconnect(dev->phydev); free_irq(fep->interrupt, dev); return 0; } -/*************************************************************************/ - static void fs_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); } @@ -799,7 +725,7 @@ static int fs_get_regs_len(struct net_device *dev) } static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *p) + void *p) { struct fs_enet_private *fep = netdev_priv(dev); unsigned long flags; @@ -818,12 +744,14 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static u32 fs_get_msglevel(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + return fep->msg_enable; } static void fs_set_msglevel(struct net_device *dev, u32 value) { struct fs_enet_private *fep = netdev_priv(dev); + fep->msg_enable = value; } @@ -865,6 +793,22 @@ static int fs_set_tunable(struct net_device *dev, return ret; } +static int fs_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_ethtool_ksettings_set(fep->phylink, cmd); +} + +static int fs_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return phylink_ethtool_ksettings_get(fep->phylink, cmd); +} + static const struct ethtool_ops fs_ethtool_ops = { .get_drvinfo = fs_get_drvinfo, .get_regs_len = fs_get_regs_len, @@ -874,14 +818,12 @@ static const struct ethtool_ops fs_ethtool_ops = { .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, .get_ts_info = ethtool_op_get_ts_info, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = fs_ethtool_get_link_ksettings, + .set_link_ksettings = fs_ethtool_set_link_ksettings, .get_tunable = fs_get_tunable, .set_tunable = fs_set_tunable, }; -/**************************************************************************************/ - #ifdef CONFIG_FS_ENET_HAS_FEC #define IS_FEC(ops) ((ops) == &fs_fec_ops) #else @@ -894,7 +836,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { .ndo_start_xmit = fs_enet_start_xmit, .ndo_tx_timeout = fs_timeout, .ndo_set_rx_mode = fs_set_multicast_list, - .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = fs_eth_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -902,17 +844,23 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; +static const struct phylink_mac_ops fs_enet_phylink_mac_ops = { + .mac_config = fs_mac_config, + .mac_link_down = fs_mac_link_down, + .mac_link_up = fs_mac_link_up, +}; + static int fs_enet_probe(struct platform_device *ofdev) { + int privsize, len, ret = -ENODEV; + struct fs_platform_info *fpi; + struct fs_enet_private *fep; + phy_interface_t phy_mode; const struct fs_ops *ops; struct net_device *ndev; - struct fs_enet_private *fep; - struct fs_platform_info *fpi; + struct phylink *phylink; const u32 *data; struct clk *clk; - int err; - const char *phy_connection_type; - int privsize, len, ret = -ENODEV; ops = device_get_match_data(&ofdev->dev); if (!ops) @@ -930,51 +878,36 @@ static int fs_enet_probe(struct platform_device *ofdev) fpi->cp_command = *data; } + ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode); + if (ret) { + /* For compatibility, if the mode isn't specified in DT, + * assume MII + */ + phy_mode = PHY_INTERFACE_MODE_MII; + } + fpi->rx_ring = RX_RING_SIZE; fpi->tx_ring = TX_RING_SIZE; fpi->rx_copybreak = 240; fpi->napi_weight = 17; - fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); - if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { - err = of_phy_register_fixed_link(ofdev->dev.of_node); - if (err) - goto out_free_fpi; - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - fpi->phy_node = of_node_get(ofdev->dev.of_node); - } - - if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { - phy_connection_type = of_get_property(ofdev->dev.of_node, - "phy-connection-type", NULL); - if (phy_connection_type && !strcmp("rmii", phy_connection_type)) - fpi->use_rmii = 1; - } /* make clock lookup non-fatal (the driver is shared among platforms), * but require enable to succeed when a clock was specified/found, * keep a reference to the clock upon successful acquisition */ - clk = devm_clk_get(&ofdev->dev, "per"); - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_deregister_fixed_link; - - fpi->clk_per = clk; - } + clk = devm_clk_get_optional_enabled(&ofdev->dev, "per"); + if (IS_ERR(clk)) + goto out_free_fpi; privsize = sizeof(*fep) + - sizeof(struct sk_buff **) * + sizeof(struct sk_buff **) * (fpi->rx_ring + fpi->tx_ring) + sizeof(char) * fpi->tx_ring; ndev = alloc_etherdev(privsize); if (!ndev) { ret = -ENOMEM; - goto out_put; + goto out_free_fpi; } SET_NETDEV_DEV(ndev, &ofdev->dev); @@ -986,9 +919,29 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->fpi = fpi; fep->ops = ops; + fep->phylink_config.dev = &ndev->dev; + fep->phylink_config.type = PHYLINK_NETDEV; + fep->phylink_config.mac_capabilities = MAC_10 | MAC_100; + + __set_bit(PHY_INTERFACE_MODE_MII, + fep->phylink_config.supported_interfaces); + + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) + __set_bit(PHY_INTERFACE_MODE_RMII, + fep->phylink_config.supported_interfaces); + + phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev), + phy_mode, &fs_enet_phylink_mac_ops); + if (IS_ERR(phylink)) { + ret = PTR_ERR(phylink); + goto out_free_dev; + } + + fep->phylink = phylink; + ret = fep->ops->setup_data(ndev); if (ret) - goto out_free_dev; + goto out_phylink; fep->rx_skbuff = (struct sk_buff **)&fep[1]; fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; @@ -1018,8 +971,6 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->ethtool_ops = &fs_ethtool_ops; - netif_carrier_off(ndev); - ndev->features |= NETIF_F_SG; ret = register_netdev(ndev); @@ -1034,14 +985,10 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->ops->free_bd(ndev); out_cleanup_data: fep->ops->cleanup_data(ndev); +out_phylink: + phylink_destroy(fep->phylink); out_free_dev: free_netdev(ndev); -out_put: - clk_disable_unprepare(fpi->clk_per); -out_deregister_fixed_link: - of_node_put(fpi->phy_node); - if (of_phy_is_fixed_link(ofdev->dev.of_node)) - of_phy_deregister_fixed_link(ofdev->dev.of_node); out_free_fpi: kfree(fpi); return ret; @@ -1057,10 +1004,7 @@ static void fs_enet_remove(struct platform_device *ofdev) fep->ops->free_bd(ndev); fep->ops->cleanup_data(ndev); dev_set_drvdata(fep->dev, NULL); - of_node_put(fep->fpi->phy_node); - clk_disable_unprepare(fep->fpi->clk_per); - if (of_phy_is_fixed_link(ofdev->dev.of_node)) - of_phy_deregister_fixed_link(ofdev->dev.of_node); + phylink_destroy(fep->phylink); free_netdev(ndev); } @@ -1114,9 +1058,9 @@ static struct platform_driver fs_enet_driver = { #ifdef CONFIG_NET_POLL_CONTROLLER static void fs_enet_netpoll(struct net_device *dev) { - disable_irq(dev->irq); - fs_enet_interrupt(dev->irq, dev); - enable_irq(dev->irq); + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev); + enable_irq(dev->irq); } #endif diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index 21c07ac05225ff..36e4fcc29e36f1 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -3,11 +3,11 @@ #define FS_ENET_H #include -#include #include #include #include #include +#include #include #ifdef CONFIG_CPM1 @@ -77,8 +77,8 @@ struct fs_ops { void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); - void (*adjust_link)(struct net_device *dev); - void (*restart)(struct net_device *dev); + void (*restart)(struct net_device *dev, phy_interface_t interface, + int speed, int duplex); void (*stop)(struct net_device *dev); void (*napi_clear_event)(struct net_device *dev); void (*napi_enable)(struct net_device *dev); @@ -93,14 +93,6 @@ struct fs_ops { void (*tx_restart)(struct net_device *dev); }; -struct phy_info { - unsigned int id; - const char *name; - void (*startup) (struct net_device * dev); - void (*shutdown) (struct net_device * dev); - void (*ack_int) (struct net_device * dev); -}; - /* The FEC stores dest/src/type, data, and checksum for receive packets. */ #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ @@ -122,15 +114,9 @@ struct fs_platform_info { u32 dpram_offset; - struct device_node *phy_node; - int rx_ring, tx_ring; /* number of buffers on rx */ int rx_copybreak; /* limit we copy small frames */ int napi_weight; /* NAPI weight */ - - int use_rmii; /* use RMII mode */ - - struct clk *clk_per; /* 'per' clock for register access */ }; struct fs_enet_private { @@ -154,14 +140,11 @@ struct fs_enet_private { cbd_t __iomem *cur_rx; cbd_t __iomem *cur_tx; int tx_free; - const struct phy_info *phy; u32 msg_enable; - struct mii_if_info mii_if; - unsigned int last_mii_status; + struct phylink *phylink; + struct phylink_config phylink_config; int interrupt; - int oldduplex, oldspeed, oldlink; /* current settings */ - /* event masks */ u32 ev_napi; /* mask of NAPI events */ u32 ev; /* event mask */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index e2ffac9eb2adbc..be63293511d97c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * FCC driver for Motorola MPC82xx (PQ2). * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -25,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -239,7 +235,8 @@ static void set_multicast_list(struct net_device *dev) set_promiscuous_mode(dev); } -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; @@ -363,8 +360,8 @@ static void restart(struct net_device *dev) fs_init_bds(dev); /* adjust to speed (for RMII mode) */ - if (fpi->use_rmii) { - if (dev->phydev->speed == 100) + if (interface == PHY_INTERFACE_MODE_RMII) { + if (speed == SPEED_100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -386,11 +383,11 @@ static void restart(struct net_device *dev) W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); - if (fpi->use_rmii) + if (interface == PHY_INTERFACE_MODE_RMII) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (dev->phydev->duplex) + if (duplex == DUPLEX_FULL) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index cdc89d83cf075a..f2ecd20027cfb3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Freescale Ethernet controllers * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -26,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -224,7 +220,8 @@ static void set_multicast_list(struct net_device *dev) set_promiscuous_mode(dev); } -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); struct fec __iomem *fecp = fep->fec.fecp; @@ -306,13 +303,13 @@ static void restart(struct net_device *dev) * Only set MII/RMII mode - do not touch maximum frame length * configured before. */ - FS(fecp, r_cntrl, fpi->use_rmii ? - FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); + FS(fecp, r_cntrl, interface == PHY_INTERFACE_MODE_RMII ? + FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); #endif /* * adjust to duplex mode */ - if (dev->phydev->duplex) { + if (duplex == DUPLEX_FULL) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index a64cb6270515a8..6c97191649de25 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -25,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -131,15 +127,14 @@ static int setup_data(struct net_device *dev) static int allocate_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; + struct fs_platform_info *fpi = fep->fpi; - fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) * - sizeof(cbd_t), 8); - if (IS_ERR_VALUE(fep->ring_mem_addr)) + fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), 8); + if (IS_ERR_VALUE(fpi->dpram_offset)) return -ENOMEM; - fep->ring_base = (void __iomem __force*) - cpm_muram_addr(fep->ring_mem_addr); + fep->ring_base = cpm_muram_addr(fpi->dpram_offset); return 0; } @@ -147,9 +142,10 @@ static int allocate_bd(struct net_device *dev) static void free_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; if (fep->ring_base) - cpm_muram_free(fep->ring_mem_addr); + cpm_muram_free(fpi->dpram_offset); } static void cleanup_data(struct net_device *dev) @@ -230,7 +226,8 @@ static void set_multicast_list(struct net_device *dev) * change. This only happens when switching between half and full * duplex. */ -static void restart(struct net_device *dev) +static void restart(struct net_device *dev, phy_interface_t interface, + int speed, int duplex) { struct fs_enet_private *fep = netdev_priv(dev); scc_t __iomem *sccp = fep->scc.sccp; @@ -247,9 +244,9 @@ static void restart(struct net_device *dev) __fs_out8((u8 __iomem *)ep + i, 0); /* point to bds */ - W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); + W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset); W16(ep, sen_genscc.scc_tbase, - fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); + fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring); /* Initialize function code registers for big-endian. */ @@ -341,7 +338,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (dev->phydev->duplex) + if (duplex == DUPLEX_FULL) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); /* Restore multicast and promiscuous settings */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index f965a2329055e8..2e210a00355843 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 7bb69727952a83..93d91e8ad0de53 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * @@ -6,10 +7,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2baef59f741dda..ecb1703ea150fe 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; err = of_get_ethdev_address(np, dev); + if (err == -EPROBE_DEFER) + goto err_grp_init; if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index f581402ad740b5..a99b95c4bcfbc5 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1455,12 +1455,8 @@ static int gfar_get_ts_info(struct net_device *dev, struct device_node *ptp_node; struct ptp_qoriq *ptp = NULL; - info->phc_index = -1; - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } @@ -1478,9 +1474,7 @@ static int gfar_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c index a7fbd4cd560ac0..ce97b76f9ae04f 100644 --- a/drivers/net/ethernet/fungible/funcore/fun_dev.c +++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c @@ -546,17 +546,14 @@ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0, unsigned int id0, enum fun_admin_bind_type type1, unsigned int id1) { - struct { - struct fun_admin_bind_req req; - struct fun_admin_bind_entry entry[2]; - } cmd = { - .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND, - sizeof(cmd)), - .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0), - .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1), - }; + DEFINE_RAW_FLEX(struct fun_admin_bind_req, cmd, entry, 2); + + cmd->common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND, + __struct_size(cmd)); + cmd->entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0); + cmd->entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1); - return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0); + return fun_submit_admin_sync_cmd(fdev, &cmd->common, NULL, 0, 0); } EXPORT_SYMBOL_GPL(fun_bind); diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c index 7f081e6e8c877c..ba83dbf4ed222b 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c @@ -1042,12 +1042,9 @@ static int fun_set_rxfh(struct net_device *netdev, static int fun_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_RX_HARDWARE | + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 84ac004d395339..301fa1ea4f5167 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -784,6 +784,8 @@ struct gve_priv { u32 adminq_verify_driver_compatibility_cnt; u32 adminq_query_flow_rules_cnt; u32 adminq_cfg_flow_rule_cnt; + u32 adminq_cfg_rss_cnt; + u32 adminq_query_rss_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -831,6 +833,9 @@ struct gve_priv { u32 num_flow_rules; struct gve_flow_rules_cache flow_rules_cache; + + u16 rss_key_size; + u16 rss_lut_size; }; enum gve_service_task_flags_bit { @@ -1148,7 +1153,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, int idx); void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_alloc_rings_cfg *cfg); -int gve_rx_alloc_rings(struct gve_priv *priv); int gve_rx_alloc_rings_gqi(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); void gve_rx_free_rings_gqi(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index c5bbc1b7524ed2..e44e8b139633fc 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -45,6 +45,7 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, + struct gve_device_option_rss_config **dev_op_rss_config, struct gve_device_option_modify_ring **dev_op_modify_ring) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); @@ -207,6 +208,23 @@ void gve_parse_device_option(struct gve_priv *priv, "Flow Steering"); *dev_op_flow_steering = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_RSS_CONFIG: + if (option_length < sizeof(**dev_op_rss_config) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "RSS config", + (int)sizeof(**dev_op_rss_config), + GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_rss_config)) + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "RSS config"); + *dev_op_rss_config = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -227,6 +245,7 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, struct gve_device_option_flow_steering **dev_op_flow_steering, + struct gve_device_option_rss_config **dev_op_rss_config, struct gve_device_option_modify_ring **dev_op_modify_ring) { const int num_options = be16_to_cpu(descriptor->num_device_options); @@ -249,7 +268,8 @@ gve_process_device_options(struct gve_priv *priv, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, - dev_op_flow_steering, dev_op_modify_ring); + dev_op_flow_steering, dev_op_rss_config, + dev_op_modify_ring); dev_opt = next_opt; } @@ -289,6 +309,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) priv->adminq_get_ptype_map_cnt = 0; priv->adminq_query_flow_rules_cnt = 0; priv->adminq_cfg_flow_rule_cnt = 0; + priv->adminq_cfg_rss_cnt = 0; + priv->adminq_query_rss_cnt = 0; /* Setup Admin queue with the device */ if (priv->pdev->revision < 0x1) { @@ -534,6 +556,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_CONFIGURE_FLOW_RULE: priv->adminq_cfg_flow_rule_cnt++; break; + case GVE_ADMINQ_CONFIGURE_RSS: + priv->adminq_cfg_rss_cnt++; + break; + case GVE_ADMINQ_QUERY_RSS: + priv->adminq_query_rss_cnt++; + break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); } @@ -867,6 +895,8 @@ static void gve_enable_supported_features(struct gve_priv *priv, *dev_op_buffer_sizes, const struct gve_device_option_flow_steering *dev_op_flow_steering, + const struct gve_device_option_rss_config + *dev_op_rss_config, const struct gve_device_option_modify_ring *dev_op_modify_ring) { @@ -931,6 +961,14 @@ static void gve_enable_supported_features(struct gve_priv *priv, priv->max_flow_rules); } } + + if (dev_op_rss_config && + (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) { + priv->rss_key_size = + be16_to_cpu(dev_op_rss_config->hash_key_size); + priv->rss_lut_size = + be16_to_cpu(dev_op_rss_config->hash_lut_size); + } } int gve_adminq_describe_device(struct gve_priv *priv) @@ -939,6 +977,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; + struct gve_device_option_rss_config *dev_op_rss_config = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; @@ -973,6 +1012,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) &dev_op_jumbo_frames, &dev_op_dqo_qpl, &dev_op_buffer_sizes, &dev_op_flow_steering, + &dev_op_rss_config, &dev_op_modify_ring); if (err) goto free_device_descriptor; @@ -1035,7 +1075,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, dev_op_flow_steering, - dev_op_modify_ring); + dev_op_rss_config, dev_op_modify_ring); free_device_descriptor: dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); @@ -1248,6 +1288,81 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv) return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); } +int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh) +{ + dma_addr_t lut_bus = 0, key_bus = 0; + u16 key_size = 0, lut_size = 0; + union gve_adminq_command cmd; + __be32 *lut = NULL; + u8 hash_alg = 0; + u8 *key = NULL; + int err = 0; + u16 i; + + switch (rxfh->hfunc) { + case ETH_RSS_HASH_NO_CHANGE: + break; + case ETH_RSS_HASH_TOP: + hash_alg = ETH_RSS_HASH_TOP; + break; + default: + return -EOPNOTSUPP; + } + + if (rxfh->indir) { + lut_size = priv->rss_lut_size; + lut = dma_alloc_coherent(&priv->pdev->dev, + lut_size * sizeof(*lut), + &lut_bus, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + for (i = 0; i < priv->rss_lut_size; i++) + lut[i] = cpu_to_be32(rxfh->indir[i]); + } + + if (rxfh->key) { + key_size = priv->rss_key_size; + key = dma_alloc_coherent(&priv->pdev->dev, + key_size, &key_bus, GFP_KERNEL); + if (!key) { + err = -ENOMEM; + goto out; + } + + memcpy(key, rxfh->key, key_size); + } + + /* Zero-valued fields in the cmd.configure_rss instruct the device to + * not update those fields. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS); + cmd.configure_rss = (struct gve_adminq_configure_rss) { + .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) | + BIT(GVE_RSS_HASH_UDPV4) | + BIT(GVE_RSS_HASH_TCPV6) | + BIT(GVE_RSS_HASH_UDPV6)), + .hash_alg = hash_alg, + .hash_key_size = cpu_to_be16(key_size), + .hash_lut_size = cpu_to_be16(lut_size), + .hash_key_addr = cpu_to_be64(key_bus), + .hash_lut_addr = cpu_to_be64(lut_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + +out: + if (lut) + dma_free_coherent(&priv->pdev->dev, + lut_size * sizeof(*lut), + lut, lut_bus); + if (key) + dma_free_coherent(&priv->pdev->dev, + key_size, key, key_bus); + return err; +} + /* In the dma memory that the driver allocated for the device to query the flow rules, the device * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device * will write an array of rules or rule ids with the count that specified in the descriptor. @@ -1325,3 +1440,66 @@ int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 sta dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); return err; } + +static int gve_adminq_process_rss_query(struct gve_priv *priv, + struct gve_query_rss_descriptor *descriptor, + struct ethtool_rxfh_param *rxfh) +{ + u32 total_memory_length; + u16 hash_lut_length; + void *rss_info_addr; + __be32 *lut; + u16 i; + + total_memory_length = be32_to_cpu(descriptor->total_length); + hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir); + + if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) { + dev_err(&priv->dev->dev, + "rss query desc from device has invalid length parameter.\n"); + return -EINVAL; + } + + rxfh->hfunc = descriptor->hash_alg; + + rss_info_addr = (void *)(descriptor + 1); + if (rxfh->key) + memcpy(rxfh->key, rss_info_addr, priv->rss_key_size); + + rss_info_addr += priv->rss_key_size; + lut = (__be32 *)rss_info_addr; + if (rxfh->indir) { + for (i = 0; i < priv->rss_lut_size; i++) + rxfh->indir[i] = be32_to_cpu(lut[i]); + } + + return 0; +} + +int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh) +{ + struct gve_query_rss_descriptor *descriptor; + union gve_adminq_command cmd; + dma_addr_t descriptor_bus; + int err = 0; + + descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus); + if (!descriptor) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS); + cmd.query_rss = (struct gve_adminq_query_rss) { + .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE), + .rss_descriptor_addr = cpu_to_be64(descriptor_bus), + }; + err = gve_adminq_execute_cmd(priv, &cmd); + if (err) + goto out; + + err = gve_adminq_process_rss_query(priv, descriptor, rxfh); + +out: + dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); + return err; +} diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index ed1370c9b19715..863683de96942e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -20,12 +20,14 @@ enum gve_adminq_opcodes { GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7, GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8, GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9, + GVE_ADMINQ_CONFIGURE_RSS = 0xA, GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB, GVE_ADMINQ_REPORT_STATS = 0xC, GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, GVE_ADMINQ_GET_PTYPE_MAP = 0xE, GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, GVE_ADMINQ_QUERY_FLOW_RULES = 0x10, + GVE_ADMINQ_QUERY_RSS = 0x12, /* For commands that are larger than 56 bytes */ GVE_ADMINQ_EXTENDED_COMMAND = 0xFF, @@ -164,6 +166,14 @@ struct gve_device_option_flow_steering { static_assert(sizeof(struct gve_device_option_flow_steering) == 12); +struct gve_device_option_rss_config { + __be32 supported_features_mask; + __be16 hash_key_size; + __be16 hash_lut_size; +}; + +static_assert(sizeof(struct gve_device_option_rss_config) == 8); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -182,6 +192,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, + GVE_DEV_OPT_ID_RSS_CONFIG = 0xe, }; enum gve_dev_opt_req_feat_mask { @@ -194,6 +205,7 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0, }; enum gve_sup_feature_mask { @@ -201,6 +213,7 @@ enum gve_sup_feature_mask { GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, GVE_SUP_FLOW_STEERING_MASK = 1 << 5, + GVE_SUP_RSS_CONFIG_MASK = 1 << 7, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -214,6 +227,7 @@ enum gve_driver_capbility { gve_driver_capability_dqo_rda = 3, gve_driver_capability_alt_miss_compl = 4, gve_driver_capability_flexible_buffer_size = 5, + gve_driver_capability_flexible_rss_size = 6, }; #define GVE_CAP1(a) BIT((int)a) @@ -226,7 +240,8 @@ enum gve_driver_capbility { GVE_CAP1(gve_driver_capability_gqi_rda) | \ GVE_CAP1(gve_driver_capability_dqo_rda) | \ GVE_CAP1(gve_driver_capability_alt_miss_compl) | \ - GVE_CAP1(gve_driver_capability_flexible_buffer_size)) + GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \ + GVE_CAP1(gve_driver_capability_flexible_rss_size)) #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 @@ -509,6 +524,44 @@ struct gve_adminq_query_flow_rules { static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24); +enum gve_rss_hash_type { + GVE_RSS_HASH_IPV4, + GVE_RSS_HASH_TCPV4, + GVE_RSS_HASH_IPV6, + GVE_RSS_HASH_IPV6_EX, + GVE_RSS_HASH_TCPV6, + GVE_RSS_HASH_TCPV6_EX, + GVE_RSS_HASH_UDPV4, + GVE_RSS_HASH_UDPV6, + GVE_RSS_HASH_UDPV6_EX, +}; + +struct gve_adminq_configure_rss { + __be16 hash_types; + u8 hash_alg; + u8 reserved; + __be16 hash_key_size; + __be16 hash_lut_size; + __be64 hash_key_addr; + __be64 hash_lut_addr; +}; + +static_assert(sizeof(struct gve_adminq_configure_rss) == 24); + +struct gve_query_rss_descriptor { + __be32 total_length; + __be16 hash_types; + u8 hash_alg; + u8 reserved; +}; + +struct gve_adminq_query_rss { + __be64 available_length; + __be64 rss_descriptor_addr; +}; + +static_assert(sizeof(struct gve_adminq_query_rss) == 16); + union gve_adminq_command { struct { __be32 opcode; @@ -530,6 +583,8 @@ union gve_adminq_command { struct gve_adminq_verify_driver_compatibility verify_driver_compatibility; struct gve_adminq_query_flow_rules query_flow_rules; + struct gve_adminq_configure_rss configure_rss; + struct gve_adminq_query_rss query_rss; struct gve_adminq_extended_command extended_command; }; }; @@ -568,6 +623,8 @@ int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc); int gve_adminq_reset_flow_rules(struct gve_priv *priv); int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc); +int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); +int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh); struct gve_ptype_lut; int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 5a8b490ab3ad08..bdfc6e77b2af56 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -75,7 +75,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", - "adminq_query_flow_rules", "adminq_cfg_flow_rule", + "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt", + "adminq_query_rss_cnt", }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -453,6 +454,8 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_get_ptype_map_cnt; data[i++] = priv->adminq_query_flow_rules_cnt; data[i++] = priv->adminq_cfg_flow_rule_cnt; + data[i++] = priv->adminq_cfg_rss_cnt; + data[i++] = priv->adminq_query_rss_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -838,6 +841,41 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u return err; } +static u32 gve_get_rxfh_key_size(struct net_device *netdev) +{ + struct gve_priv *priv = netdev_priv(netdev); + + return priv->rss_key_size; +} + +static u32 gve_get_rxfh_indir_size(struct net_device *netdev) +{ + struct gve_priv *priv = netdev_priv(netdev); + + return priv->rss_lut_size; +} + +static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) +{ + struct gve_priv *priv = netdev_priv(netdev); + + if (!priv->rss_key_size || !priv->rss_lut_size) + return -EOPNOTSUPP; + + return gve_adminq_query_rss_config(priv, rxfh); +} + +static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + + if (!priv->rss_key_size || !priv->rss_lut_size) + return -EOPNOTSUPP; + + return gve_adminq_configure_rss(priv, rxfh); +} + const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, @@ -851,6 +889,10 @@ const struct ethtool_ops gve_ethtool_ops = { .get_channels = gve_get_channels, .set_rxnfc = gve_set_rxnfc, .get_rxnfc = gve_get_rxnfc, + .get_rxfh_indir_size = gve_get_rxfh_indir_size, + .get_rxfh_key_size = gve_get_rxfh_key_size, + .get_rxfh = gve_get_rxfh, + .set_rxfh = gve_set_rxfh, .get_link = ethtool_op_get_link, .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index b91e7a06b97f7d..beb815e5289b12 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -947,6 +947,7 @@ static int hip04_mac_probe(struct platform_device *pdev) priv->tx_coalesce_timer.function = tx_done; priv->map = syscon_node_to_regmap(arg.np); + of_node_put(arg.np); if (IS_ERR(priv->map)) { dev_warn(d, "no syscon hisilicon,hip04-ppe\n"); ret = PTR_ERR(priv->map); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index f75668c4793519..58baac7103b380 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -734,7 +734,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, return -ENODATA; phy = get_phy_device(mdio, addr, is_c45); - if (!phy || IS_ERR(phy)) + if (IS_ERR_OR_NULL(phy)) return -EIO; phy->irq = mdio->irq[addr]; @@ -933,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) mac_cb->cpld_ctrl = NULL; } else { syscon = syscon_node_to_regmap(cpld_args.np); + of_node_put(cpld_args.np); if (IS_ERR_OR_NULL(syscon)) { dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); mac_cb->cpld_ctrl = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6c33195a1168f8..bd86efd92a5a7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -13,8 +13,9 @@ #include #include #include -#include + #include + #include "hclge_cmd.h" #include "hclge_dcb.h" #include "hclge_main.h" @@ -6290,15 +6291,15 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule, u8 ip_proto) { - be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, - IPV6_SIZE); + ipv6_addr_be32_to_cpu(rule->tuples.src_ip, + fs->h_u.tcp_ip6_spec.ip6src); + ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, + fs->m_u.tcp_ip6_spec.ip6src); - be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, - IPV6_SIZE); + ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, + fs->h_u.tcp_ip6_spec.ip6dst); + ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, + fs->m_u.tcp_ip6_spec.ip6dst); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); @@ -6319,15 +6320,15 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule) { - be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, - IPV6_SIZE); + ipv6_addr_be32_to_cpu(rule->tuples.src_ip, + fs->h_u.usr_ip6_spec.ip6src); + ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, + fs->m_u.usr_ip6_spec.ip6src); - be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, - IPV6_SIZE); + ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, + fs->h_u.usr_ip6_spec.ip6dst); + ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, + fs->m_u.usr_ip6_spec.ip6dst); rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; @@ -6756,21 +6757,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, struct ethtool_tcpip6_spec *spec, struct ethtool_tcpip6_spec *spec_mask) { - cpu_to_be32_array(spec->ip6src, - rule->tuples.src_ip, IPV6_SIZE); - cpu_to_be32_array(spec->ip6dst, - rule->tuples.dst_ip, IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip); + ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip); if (rule->unused_tuple & BIT(INNER_SRC_IP)) memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); else - cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, - IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec_mask->ip6src, + rule->tuples_mask.src_ip); if (rule->unused_tuple & BIT(INNER_DST_IP)) memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); else - cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, - IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec_mask->ip6dst, + rule->tuples_mask.dst_ip); spec->tclass = rule->tuples.ip_tos; spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? @@ -6789,19 +6788,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, struct ethtool_usrip6_spec *spec, struct ethtool_usrip6_spec *spec_mask) { - cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); - cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip); + ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip); if (rule->unused_tuple & BIT(INNER_SRC_IP)) memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); else - cpu_to_be32_array(spec_mask->ip6src, - rule->tuples_mask.src_ip, IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec_mask->ip6src, + rule->tuples_mask.src_ip); if (rule->unused_tuple & BIT(INNER_DST_IP)) memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); else - cpu_to_be32_array(spec_mask->ip6dst, - rule->tuples_mask.dst_ip, IPV6_SIZE); + ipv6_addr_cpu_to_be32(spec_mask->ip6dst, + rule->tuples_mask.dst_ip); spec->tclass = rule->tuples.ip_tos; spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? @@ -7019,7 +7018,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, } else { int i; - for (i = 0; i < IPV6_SIZE; i++) { + for (i = 0; i < IPV6_ADDR_WORDS; i++) { tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); } @@ -7274,14 +7273,14 @@ static int hclge_get_cls_key_ip(const struct flow_rule *flow, struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(flow, &match); - be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.src_ip, - match.mask->src.s6_addr32, IPV6_SIZE); - be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, - IPV6_SIZE); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - match.mask->dst.s6_addr32, IPV6_SIZE); + ipv6_addr_be32_to_cpu(rule->tuples.src_ip, + match.key->src.s6_addr32); + ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip, + match.mask->src.s6_addr32); + ipv6_addr_be32_to_cpu(rule->tuples.dst_ip, + match.key->dst.s6_addr32); + ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip, + match.mask->dst.s6_addr32); } else { rule->unused_tuple |= BIT(INNER_SRC_IP); rule->unused_tuple |= BIT(INNER_DST_IP); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b5178b0f88b34d..b9fc719880bb52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -8,7 +8,9 @@ #include #include #include + #include +#include #include "hclge_cmd.h" #include "hclge_ptp.h" @@ -718,15 +720,15 @@ struct hclge_fd_cfg { }; #define IPV4_INDEX 3 -#define IPV6_SIZE 4 + struct hclge_fd_rule_tuples { u8 src_mac[ETH_ALEN]; u8 dst_mac[ETH_ALEN]; /* Be compatible for ip address of both ipv4 and ipv6. * For ipv4 address, we store it in src/dst_ip[3]. */ - u32 src_ip[IPV6_SIZE]; - u32 dst_ip[IPV6_SIZE]; + u32 src_ip[IPV6_ADDR_WORDS]; + u32 dst_ip[IPV6_ADDR_WORDS]; u16 src_port; u16 dst_port; u16 vlan_tag1; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 5fff8ed388f8b6..5505caea88e981 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -389,16 +389,12 @@ int hclge_ptp_get_ts_info(struct hnae3_handle *handle, } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (hdev->ptp->clock) info->phc_index = ptp_clock_index(hdev->ptp->clock); - else - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c index 65b9dcd3813756..6db415d8b9176c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c @@ -134,17 +134,17 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, reg += hclgevf_reg_get_header(reg); /* fetching per-VF registers values from VF PCIe register space */ - reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + reg_um = ARRAY_SIZE(cmdq_reg_addr_list); reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg); for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); - reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + reg_um = ARRAY_SIZE(common_reg_addr_list); reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg); for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); - reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + reg_um = ARRAY_SIZE(ring_reg_addr_list); for (j = 0; j < hdev->num_tqps; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg); for (i = 0; i < reg_um; i++) @@ -153,7 +153,7 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, HCLGEVF_RING_REG_OFFSET * j); } - reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list); for (j = 0; j < hdev->num_msi_used - 1; j++) { reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg); for (i = 0; i < reg_um; i++) diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index ed73707176c1af..8a047145f0c50b 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev) MDIO_SC_RESET_ST; } } + of_node_put(reg_args.np); } else { dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret); mdio_dev->subctrl_vbase = NULL; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 0304f03d40936c..c559dd4291d306 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -1471,7 +1471,6 @@ static void hinic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hinic_dev *nic_dev = netdev_priv(netdev); - char *p = (char *)data; u16 i, j; switch (stringset) { @@ -1479,31 +1478,19 @@ static void hinic_get_strings(struct net_device *netdev, memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); return; case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) { - memcpy(p, hinic_function_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) + ethtool_puts(&data, hinic_function_stats[i].name); - for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) { - memcpy(p, hinic_port_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) + ethtool_puts(&data, hinic_port_stats[i].name); - for (i = 0; i < nic_dev->num_qps; i++) { - for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) { - sprintf(p, hinic_tx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < nic_dev->num_qps; i++) + for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) + ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i); - for (i = 0; i < nic_dev->num_qps; i++) { - for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) { - sprintf(p, hinic_rx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < nic_dev->num_qps; i++) + for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) + ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i); return; default: diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 1e29e5c9a2dfbe..c41c3f1cc506ff 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port) static int ehea_setup_ports(struct ehea_adapter *adapter) { struct device_node *lhea_dn; - struct device_node *eth_dn = NULL; + struct device_node *eth_dn; const u32 *dn_log_port_id; int i = 0; lhea_dn = adapter->ofdev->dev.of_node; - while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { - + for_each_child_of_node(lhea_dn, eth_dn) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (!dn_log_port_id) { @@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, u32 logical_port_id) { struct device_node *lhea_dn; - struct device_node *eth_dn = NULL; + struct device_node *eth_dn; const u32 *dn_log_port_id; lhea_dn = adapter->ofdev->dev.of_node; - while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { - + for_each_child_of_node(lhea_dn, eth_dn) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (dn_log_port_id) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index a19d098f2e2bf9..dac570f3c11036 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -96,11 +95,6 @@ MODULE_LICENSE("GPL"); static u32 busy_phy_map; static DEFINE_MUTEX(emac_phy_map_lock); -/* This is the wait queue used to wait on any event related to probe, that - * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... - */ -static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); - /* Having stable interface names is a doomed idea. However, it would be nice * if we didn't have completely random interface names at boot too :-) It's * just a matter of making everybody's life easier. Since we are doing @@ -116,9 +110,6 @@ static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); #define EMAC_BOOT_LIST_SIZE 4 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE]; -/* How long should I wait for dependent devices ? */ -#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5) - /* I don't want to litter system log with timeout errors * when we have brain-damaged PHY. */ @@ -418,8 +409,8 @@ static int emac_reset(struct emac_instance *dev) static void emac_hash_mc(struct emac_instance *dev) { + u32 __iomem *gaht_base = emac_gaht_base(dev); const int regs = EMAC_XAHT_REGS(dev); - u32 *gaht_base = emac_gaht_base(dev); u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; @@ -973,8 +964,6 @@ static void __emac_set_multicast_list(struct emac_instance *dev) * we need is just to stop RX channel. This seems to work on all * tested SoCs. --ebs * - * If we need the full reset, we might just trigger the workqueue - * and do it async... a bit nasty but should work --BenH */ dev->mcast_pending = 0; emac_rx_disable(dev); @@ -1228,18 +1217,10 @@ static void emac_print_link_status(struct emac_instance *dev) static int emac_open(struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); - int err, i; + int i; DBG(dev, "open" NL); - /* Setup error IRQ handler */ - err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); - if (err) { - printk(KERN_ERR "%s: failed to request IRQ %d\n", - ndev->name, dev->emac_irq); - return err; - } - /* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) if (emac_alloc_rx_skb(dev, i)) { @@ -1293,8 +1274,6 @@ static int emac_open(struct net_device *ndev) return 0; oom: emac_clean_rx_ring(dev); - free_irq(dev->emac_irq, dev); - return -ENOMEM; } @@ -1408,8 +1387,6 @@ static int emac_close(struct net_device *ndev) emac_clean_tx_ring(dev); emac_clean_rx_ring(dev); - free_irq(dev->emac_irq, dev); - netif_carrier_off(ndev); return 0; @@ -2390,7 +2367,9 @@ static int emac_check_deps(struct emac_instance *dev, if (deps[i].drvdata != NULL) there++; } - return there == EMAC_DEP_COUNT; + if (there != EMAC_DEP_COUNT) + return -EPROBE_DEFER; + return 0; } static void emac_put_deps(struct emac_instance *dev) @@ -2402,19 +2381,6 @@ static void emac_put_deps(struct emac_instance *dev) platform_device_put(dev->tah_dev); } -static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - /* We are only intereted in device addition */ - if (action == BUS_NOTIFY_BOUND_DRIVER) - wake_up_all(&emac_probe_wait); - return 0; -} - -static struct notifier_block emac_of_bus_notifier = { - .notifier_call = emac_of_bus_notify -}; - static int emac_wait_deps(struct emac_instance *dev) { struct emac_depentry deps[EMAC_DEP_COUNT]; @@ -2431,18 +2397,13 @@ static int emac_wait_deps(struct emac_instance *dev) deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; if (dev->blist && dev->blist > emac_boot_list) deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; - bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier); - wait_event_timeout(emac_probe_wait, - emac_check_deps(dev, deps), - EMAC_PROBE_DEP_TIMEOUT); - bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); - err = emac_check_deps(dev, deps) ? 0 : -ENODEV; + err = emac_check_deps(dev, deps); for (i = 0; i < EMAC_DEP_COUNT; i++) { of_node_put(deps[i].node); if (err) platform_device_put(deps[i].ofdev); } - if (err == 0) { + if (!err) { dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev; dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev; dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev; @@ -2456,22 +2417,21 @@ static int emac_wait_deps(struct emac_instance *dev) static int emac_read_uint_prop(struct device_node *np, const char *name, u32 *val, int fatal) { - int len; - const u32 *prop = of_get_property(np, name, &len); - if (prop == NULL || len < sizeof(u32)) { + int err; + + err = of_property_read_u32(np, name, val); + if (err) { if (fatal) - printk(KERN_ERR "%pOF: missing %s property\n", - np, name); - return -ENODEV; + pr_err("%pOF: missing %s property", np, name); + return err; } - *val = *prop; return 0; } static void emac_adjust_link(struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); - struct phy_device *phy = dev->phy_dev; + struct phy_device *phy = ndev->phydev; dev->phy.autoneg = phy->autoneg; dev->phy.speed = phy->speed; @@ -2522,22 +2482,20 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy, static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); phy->autoneg = AUTONEG_ENABLE; phy->advertising = advertise; - return emac_mdio_phy_start_aneg(phy, dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, ndev->phydev); } static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; - return emac_mdio_phy_start_aneg(phy, dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, ndev->phydev); } static int emac_mdio_poll_link(struct mii_phy *phy) @@ -2546,20 +2504,19 @@ static int emac_mdio_poll_link(struct mii_phy *phy) struct emac_instance *dev = netdev_priv(ndev); int res; - res = phy_read_status(dev->phy_dev); + res = phy_read_status(ndev->phydev); if (res) { dev_err(&dev->ofdev->dev, "link update failed (%d).", res); return ethtool_op_get_link(ndev); } - return dev->phy_dev->link; + return ndev->phydev->link; } static int emac_mdio_read_link(struct mii_phy *phy) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); - struct phy_device *phy_dev = dev->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int res; res = phy_read_status(phy_dev); @@ -2576,10 +2533,9 @@ static int emac_mdio_read_link(struct mii_phy *phy) static int emac_mdio_init_phy(struct mii_phy *phy) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); - phy_start(dev->phy_dev); - return phy_init_hw(dev->phy_dev); + phy_start(ndev->phydev); + return phy_init_hw(ndev->phydev); } static const struct mii_phy_ops emac_dt_mdio_phy_ops = { @@ -2593,6 +2549,7 @@ static const struct mii_phy_ops emac_dt_mdio_phy_ops = { static int emac_dt_mdio_probe(struct emac_instance *dev) { struct device_node *mii_np; + struct mii_bus *bus; int res; mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio"); @@ -2606,23 +2563,23 @@ static int emac_dt_mdio_probe(struct emac_instance *dev) goto put_node; } - dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev); - if (!dev->mii_bus) { + bus = devm_mdiobus_alloc(&dev->ofdev->dev); + if (!bus) { res = -ENOMEM; goto put_node; } - dev->mii_bus->priv = dev->ndev; - dev->mii_bus->parent = dev->ndev->dev.parent; - dev->mii_bus->name = "emac_mdio"; - dev->mii_bus->read = &emac_mii_bus_read; - dev->mii_bus->write = &emac_mii_bus_write; - dev->mii_bus->reset = &emac_mii_bus_reset; - snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name); - res = of_mdiobus_register(dev->mii_bus, mii_np); + bus->priv = dev->ndev; + bus->parent = dev->ndev->dev.parent; + bus->name = "emac_mdio"; + bus->read = &emac_mii_bus_read; + bus->write = &emac_mii_bus_write; + bus->reset = &emac_mii_bus_reset; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name); + res = devm_of_mdiobus_register(&dev->ofdev->dev, bus, mii_np); if (res) { dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)", - dev->mii_bus->name, res); + bus->name, res); } put_node: @@ -2633,26 +2590,28 @@ static int emac_dt_mdio_probe(struct emac_instance *dev) static int emac_dt_phy_connect(struct emac_instance *dev, struct device_node *phy_handle) { + struct phy_device *phy_dev; + dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), GFP_KERNEL); if (!dev->phy.def) return -ENOMEM; - dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, - 0, dev->phy_mode); - if (!dev->phy_dev) { + phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, 0, + dev->phy_mode); + if (!phy_dev) { dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n"); return -ENODEV; } - dev->phy.def->phy_id = dev->phy_dev->drv->phy_id; - dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask; - dev->phy.def->name = dev->phy_dev->drv->name; + dev->phy.def->phy_id = phy_dev->drv->phy_id; + dev->phy.def->phy_id_mask = phy_dev->drv->phy_id_mask; + dev->phy.def->name = phy_dev->drv->name; dev->phy.def->ops = &emac_dt_mdio_phy_ops; ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features, - dev->phy_dev->supported); - dev->phy.address = dev->phy_dev->mdio.addr; - dev->phy.mode = dev->phy_dev->interface; + phy_dev->supported); + dev->phy.address = phy_dev->mdio.addr; + dev->phy.mode = phy_dev->interface; return 0; } @@ -2668,8 +2627,6 @@ static int emac_dt_phy_probe(struct emac_instance *dev) res = emac_dt_mdio_probe(dev); if (!res) { res = emac_dt_phy_connect(dev, phy_handle); - if (res) - mdiobus_unregister(dev->mii_bus); } } @@ -2708,13 +2665,11 @@ static int emac_init_phy(struct emac_instance *dev) return res; res = of_phy_register_fixed_link(np); - dev->phy_dev = of_phy_find_device(np); - if (res || !dev->phy_dev) { - mdiobus_unregister(dev->mii_bus); + ndev->phydev = of_phy_find_device(np); + if (res || !ndev->phydev) return res ? res : -EINVAL; - } emac_adjust_link(dev->ndev); - put_device(&dev->phy_dev->mdio.dev); + put_device(&ndev->phydev->mdio.dev); } return 0; } @@ -3053,7 +3008,7 @@ static int emac_probe(struct platform_device *ofdev) /* Allocate our net_device structure */ err = -ENOMEM; - ndev = alloc_etherdev(sizeof(struct emac_instance)); + ndev = devm_alloc_etherdev(&ofdev->dev, sizeof(struct emac_instance)); if (!ndev) goto err_gone; @@ -3072,35 +3027,40 @@ static int emac_probe(struct platform_device *ofdev) /* Init various config data based on device-tree */ err = emac_init_config(dev); if (err) - goto err_free; + goto err_gone; - /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ + /* Get interrupts. EMAC irq is mandatory */ dev->emac_irq = irq_of_parse_and_map(np, 0); - dev->wol_irq = irq_of_parse_and_map(np, 1); if (!dev->emac_irq) { printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); err = -ENODEV; - goto err_free; + goto err_gone; + } + + /* Setup error IRQ handler */ + err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC", + dev); + if (err) { + dev_err_probe(&ofdev->dev, err, "failed to request IRQ %d", + dev->emac_irq); + goto err_gone; } + ndev->irq = dev->emac_irq; /* Map EMAC regs */ // TODO : platform_get_resource() and devm_ioremap_resource() - dev->emacp = of_iomap(np, 0); - if (dev->emacp == NULL) { - printk(KERN_ERR "%pOF: Can't map device registers!\n", np); + dev->emacp = devm_of_iomap(&ofdev->dev, np, 0, NULL); + if (!dev->emacp) { + dev_err(&ofdev->dev, "can't map device registers"); err = -ENOMEM; - goto err_irq_unmap; + goto err_gone; } /* Wait for dependent devices */ err = emac_wait_deps(dev); - if (err) { - printk(KERN_ERR - "%pOF: Timeout waiting for dependent devices\n", np); - /* display more info about what's missing ? */ - goto err_reg_unmap; - } + if (err) + goto err_gone; dev->mal = platform_get_drvdata(dev->mal_dev); if (dev->mdio_dev != NULL) dev->mdio_instance = platform_get_drvdata(dev->mdio_dev); @@ -3187,7 +3147,7 @@ static int emac_probe(struct platform_device *ofdev) netif_carrier_off(ndev); - err = register_netdev(ndev); + err = devm_register_netdev(&ofdev->dev, ndev); if (err) { printk(KERN_ERR "%pOF: failed to register net device (%d)!\n", np, err); @@ -3200,10 +3160,6 @@ static int emac_probe(struct platform_device *ofdev) wmb(); platform_set_drvdata(ofdev, dev); - /* There's a new kid in town ! Let's tell everybody */ - wake_up_all(&emac_probe_wait); - - printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", ndev->name, dev->cell_index, np, ndev->dev_addr); @@ -3232,24 +3188,9 @@ static int emac_probe(struct platform_device *ofdev) mal_unregister_commac(dev->mal, &dev->commac); err_rel_deps: emac_put_deps(dev); - err_reg_unmap: - iounmap(dev->emacp); - err_irq_unmap: - if (dev->wol_irq) - irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq) - irq_dispose_mapping(dev->emac_irq); - err_free: - free_netdev(ndev); err_gone: - /* if we were on the bootlist, remove us as we won't show up and - * wake up all waiters to notify them in case they were waiting - * on us - */ - if (blist) { + if (blist) *blist = NULL; - wake_up_all(&emac_probe_wait); - } return err; } @@ -3259,8 +3200,6 @@ static void emac_remove(struct platform_device *ofdev) DBG(dev, "remove" NL); - unregister_netdev(dev->ndev); - cancel_work_sync(&dev->reset_work); if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) @@ -3270,26 +3209,11 @@ static void emac_remove(struct platform_device *ofdev) if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) zmii_detach(dev->zmii_dev, dev->zmii_port); - if (dev->phy_dev) - phy_disconnect(dev->phy_dev); - - if (dev->mii_bus) - mdiobus_unregister(dev->mii_bus); - busy_phy_map &= ~(1 << dev->phy.address); DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); mal_unregister_commac(dev->mal, &dev->commac); emac_put_deps(dev); - - iounmap(dev->emacp); - - if (dev->wol_irq) - irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq) - irq_dispose_mapping(dev->emac_irq); - - free_netdev(dev->ndev); } /* XXX Features in here should be replaced by properties... */ @@ -3328,16 +3252,15 @@ static void __init emac_make_bootlist(void) /* Collect EMACs */ while((np = of_find_all_nodes(np)) != NULL) { - const u32 *idx; + u32 idx; if (of_match_node(emac_match, np) == NULL) continue; if (of_property_read_bool(np, "unused")) continue; - idx = of_get_property(np, "cell-index", NULL); - if (idx == NULL) + if (of_property_read_u32(np, "cell-index", &idx)) continue; - cell_indices[i] = *idx; + cell_indices[i] = idx; emac_boot_list[i++] = of_node_get(np); if (i >= EMAC_BOOT_LIST_SIZE) { of_node_put(np); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 295516b0766266..89fa1683ec3ca1 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -188,10 +188,6 @@ struct emac_instance { struct emac_instance *mdio_instance; struct mutex mdio_lock; - /* Device-tree based phy configuration */ - struct mii_bus *mii_bus; - struct phy_device *phy_dev; - /* ZMII infos if any */ u32 zmii_ph; u32 zmii_port; @@ -400,7 +396,7 @@ static inline int emac_has_feature(struct emac_instance *dev, ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \ ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1))) -static inline u32 *emac_xaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int offset; @@ -413,10 +409,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev) else offset = offsetof(struct emac_regs, u0.emac4.iaht1); - return (u32 *)((ptrdiff_t)p + offset); + return (u32 __iomem *)((__force ptrdiff_t)p + offset); } -static inline u32 *emac_gaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev) { /* GAHT registers always come after an identical number of * IAHT registers. diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4c9d9badd69867..b619a3ec245b24 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -39,7 +39,8 @@ #include "ibmveth.h" static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, + bool reuse); static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); static struct kobj_type ktype_veth_pool; @@ -226,6 +227,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, for (i = 0; i < count; ++i) { union ibmveth_buf_desc desc; + free_index = pool->consumer_index; + index = pool->free_map[free_index]; + skb = NULL; + + BUG_ON(index == IBM_VETH_INVALID_MAP); + + /* are we allocating a new buffer or recycling an old one */ + if (pool->skbuff[index]) + goto reuse; + skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); if (!skb) { @@ -235,46 +246,46 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, break; } - free_index = pool->consumer_index; - pool->consumer_index++; - if (pool->consumer_index >= pool->size) - pool->consumer_index = 0; - index = pool->free_map[free_index]; - - BUG_ON(index == IBM_VETH_INVALID_MAP); - BUG_ON(pool->skbuff[index] != NULL); - dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto failure; - pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->dma_addr[index] = dma_addr; pool->skbuff[index] = skb; - correlator = ((u64)pool->index << 32) | index; - *(u64 *)skb->data = correlator; - - desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; - desc.fields.address = dma_addr; - if (rx_flush) { unsigned int len = min(pool->buff_size, - adapter->netdev->mtu + - IBMVETH_BUFF_OH); + adapter->netdev->mtu + + IBMVETH_BUFF_OH); ibmveth_flush_buffer(skb->data, len); } +reuse: + dma_addr = pool->dma_addr[index]; + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; + desc.fields.address = dma_addr; + + correlator = ((u64)pool->index << 32) | index; + *(u64 *)pool->skbuff[index]->data = correlator; + lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); if (lpar_rc != H_SUCCESS) { + netdev_warn(adapter->netdev, + "%sadd_logical_lan failed %lu\n", + skb ? "" : "When recycling: ", lpar_rc); goto failure; - } else { - buffers_added++; - adapter->replenish_add_buff_success++; } + + pool->free_map[free_index] = IBM_VETH_INVALID_MAP; + pool->consumer_index++; + if (pool->consumer_index >= pool->size) + pool->consumer_index = 0; + + buffers_added++; + adapter->replenish_add_buff_success++; } mb(); @@ -282,17 +293,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, return; failure: - pool->free_map[free_index] = index; - pool->skbuff[index] = NULL; - if (pool->consumer_index == 0) - pool->consumer_index = pool->size - 1; - else - pool->consumer_index--; - if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) + + if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr)) dma_unmap_single(&adapter->vdev->dev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + dev_kfree_skb_any(pool->skbuff[index]); + pool->skbuff[index] = NULL; adapter->replenish_add_buff_failure++; mb(); @@ -365,7 +372,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, /* remove a buffer from a pool */ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, - u64 correlator) + u64 correlator, bool reuse) { unsigned int pool = correlator >> 32; unsigned int index = correlator & 0xffffffffUL; @@ -376,15 +383,23 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, BUG_ON(index >= adapter->rx_buff_pool[pool].size); skb = adapter->rx_buff_pool[pool].skbuff[index]; - BUG_ON(skb == NULL); - adapter->rx_buff_pool[pool].skbuff[index] = NULL; + /* if we are going to reuse the buffer then keep the pointers around + * but mark index as available. replenish will see the skb pointer and + * assume it is to be recycled. + */ + if (!reuse) { + /* remove the skb pointer to mark free. actual freeing is done + * by upper level networking after gro_recieve + */ + adapter->rx_buff_pool[pool].skbuff[index] = NULL; - dma_unmap_single(&adapter->vdev->dev, - adapter->rx_buff_pool[pool].dma_addr[index], - adapter->rx_buff_pool[pool].buff_size, - DMA_FROM_DEVICE); + dma_unmap_single(&adapter->vdev->dev, + adapter->rx_buff_pool[pool].dma_addr[index], + adapter->rx_buff_pool[pool].buff_size, + DMA_FROM_DEVICE); + } free_index = adapter->rx_buff_pool[pool].producer_index; adapter->rx_buff_pool[pool].producer_index++; @@ -411,51 +426,13 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada return adapter->rx_buff_pool[pool].skbuff[index]; } -/* recycle the current buffer on the rx queue */ -static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, + bool reuse) { - u32 q_index = adapter->rx_queue.index; - u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; - unsigned int pool = correlator >> 32; - unsigned int index = correlator & 0xffffffffUL; - union ibmveth_buf_desc desc; - unsigned long lpar_rc; - int ret = 1; - - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); - - if (!adapter->rx_buff_pool[pool].active) { - ibmveth_rxq_harvest_buffer(adapter); - ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); - goto out; - } - - desc.fields.flags_len = IBMVETH_BUF_VALID | - adapter->rx_buff_pool[pool].buff_size; - desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; - - lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); - - if (lpar_rc != H_SUCCESS) { - netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " - "during recycle rc=%ld", lpar_rc); - ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); - ret = 0; - } - - if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { - adapter->rx_queue.index = 0; - adapter->rx_queue.toggle = !adapter->rx_queue.toggle; - } - -out: - return ret; -} + u64 cor; -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) -{ - ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); + cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; + ibmveth_remove_buffer_from_pool(adapter, cor, reuse); if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { adapter->rx_queue.index = 0; @@ -1337,6 +1314,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; u16 mss = 0; +restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1346,7 +1324,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); - ibmveth_rxq_recycle_buffer(adapter); + ibmveth_rxq_harvest_buffer(adapter, true); } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); @@ -1379,11 +1357,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); - if (!ibmveth_rxq_recycle_buffer(adapter)) - kfree_skb(skb); + ibmveth_rxq_harvest_buffer(adapter, true); skb = new_skb; } else { - ibmveth_rxq_harvest_buffer(adapter); + ibmveth_rxq_harvest_buffer(adapter, false); skb_reserve(skb, offset); } @@ -1420,24 +1397,25 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) ibmveth_replenish_task(adapter); - if (frames_processed < budget) { - napi_complete_done(napi, frames_processed); + if (frames_processed == budget) + goto out; - /* We think we are done - reenable interrupts, - * then check once more to make sure we are done. - */ - lpar_rc = h_vio_signal(adapter->vdev->unit_address, - VIO_IRQ_ENABLE); + if (!napi_complete_done(napi, frames_processed)) + goto out; - BUG_ON(lpar_rc != H_SUCCESS); + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. + */ + lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); + BUG_ON(lpar_rc != H_SUCCESS); - if (ibmveth_rxq_pending_buffer(adapter) && - napi_schedule(napi)) { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, - VIO_IRQ_DISABLE); - } + if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + goto restart_poll; } +out: return frames_processed; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 23ebeb143987ff..87e693a814331a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb); static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); static void flush_reset_queue(struct ibmvnic_adapter *adapter); +static void print_subcrq_error(struct device *dev, int rc, const char *func); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -2140,63 +2141,49 @@ static int ibmvnic_close(struct net_device *netdev) } /** - * build_hdr_data - creates L2/L3/L4 header data buffer + * get_hdr_lens - fills list of L2/L3/L4 hdr lens * @hdr_field: bitfield determining needed headers * @skb: socket buffer - * @hdr_len: array of header lengths - * @hdr_data: buffer to write the header to + * @hdr_len: array of header lengths to be filled * * Reads hdr_field to determine which headers are needed by firmware. * Builds a buffer containing these headers. Saves individual header * lengths and total buffer length to be used to build descriptors. + * + * Return: total len of all headers */ -static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, - int *hdr_len, u8 *hdr_data) +static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb, + int *hdr_len) { int len = 0; - u8 *hdr; - if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) - hdr_len[0] = sizeof(struct vlan_ethhdr); - else - hdr_len[0] = sizeof(struct ethhdr); + + if ((hdr_field >> 6) & 1) { + hdr_len[0] = skb_mac_header_len(skb); + len += hdr_len[0]; + } + + if ((hdr_field >> 5) & 1) { + hdr_len[1] = skb_network_header_len(skb); + len += hdr_len[1]; + } + + if (!((hdr_field >> 4) & 1)) + return len; if (skb->protocol == htons(ETH_P_IP)) { - hdr_len[1] = ip_hdr(skb)->ihl * 4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) hdr_len[2] = tcp_hdrlen(skb); else if (ip_hdr(skb)->protocol == IPPROTO_UDP) hdr_len[2] = sizeof(struct udphdr); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hdr_len[1] = sizeof(struct ipv6hdr); if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) hdr_len[2] = tcp_hdrlen(skb); else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) hdr_len[2] = sizeof(struct udphdr); - } else if (skb->protocol == htons(ETH_P_ARP)) { - hdr_len[1] = arp_hdr_len(skb->dev); - hdr_len[2] = 0; } - memset(hdr_data, 0, 120); - if ((hdr_field >> 6) & 1) { - hdr = skb_mac_header(skb); - memcpy(hdr_data, hdr, hdr_len[0]); - len += hdr_len[0]; - } - - if ((hdr_field >> 5) & 1) { - hdr = skb_network_header(skb); - memcpy(hdr_data + len, hdr, hdr_len[1]); - len += hdr_len[1]; - } - - if ((hdr_field >> 4) & 1) { - hdr = skb_transport_header(skb); - memcpy(hdr_data + len, hdr, hdr_len[2]); - len += hdr_len[2]; - } - return len; + return len + hdr_len[2]; } /** @@ -2209,12 +2196,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, * * Creates header and, if needed, header extension descriptors and * places them in a descriptor array, scrq_arr + * + * Return: Number of header descs */ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, union sub_crq *scrq_arr) { - union sub_crq hdr_desc; + union sub_crq *hdr_desc; int tmp_len = len; int num_descs = 0; u8 *data, *cur; @@ -2223,28 +2212,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, while (tmp_len > 0) { cur = hdr_data + len - tmp_len; - memset(&hdr_desc, 0, sizeof(hdr_desc)); - if (cur != hdr_data) { - data = hdr_desc.hdr_ext.data; + hdr_desc = &scrq_arr[num_descs]; + if (num_descs) { + data = hdr_desc->hdr_ext.data; tmp = tmp_len > 29 ? 29 : tmp_len; - hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; - hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; - hdr_desc.hdr_ext.len = tmp; + hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD; + hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC; + hdr_desc->hdr_ext.len = tmp; } else { - data = hdr_desc.hdr.data; + data = hdr_desc->hdr.data; tmp = tmp_len > 24 ? 24 : tmp_len; - hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; - hdr_desc.hdr.type = IBMVNIC_HDR_DESC; - hdr_desc.hdr.len = tmp; - hdr_desc.hdr.l2_len = (u8)hdr_len[0]; - hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); - hdr_desc.hdr.l4_len = (u8)hdr_len[2]; - hdr_desc.hdr.flag = hdr_field << 1; + hdr_desc->hdr.first = IBMVNIC_CRQ_CMD; + hdr_desc->hdr.type = IBMVNIC_HDR_DESC; + hdr_desc->hdr.len = tmp; + hdr_desc->hdr.l2_len = (u8)hdr_len[0]; + hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); + hdr_desc->hdr.l4_len = (u8)hdr_len[2]; + hdr_desc->hdr.flag = hdr_field << 1; } memcpy(data, cur, tmp); tmp_len -= tmp; - *scrq_arr = hdr_desc; - scrq_arr++; num_descs++; } @@ -2267,13 +2254,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb, int *num_entries, u8 hdr_field) { int hdr_len[3] = {0, 0, 0}; - u8 hdr_data[140] = {0}; int tot_len; - tot_len = build_hdr_data(hdr_field, skb, hdr_len, - hdr_data); - *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, - indir_arr + 1); + tot_len = get_hdr_lens(hdr_field, skb, hdr_len); + *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb), + tot_len, hdr_len, indir_arr + 1); } static int ibmvnic_xmit_workarounds(struct sk_buff *skb, @@ -2350,8 +2335,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, } } +static int send_subcrq_direct(struct ibmvnic_adapter *adapter, + u64 remote_handle, u64 *entry) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + int rc; + + /* Make sure the hypervisor sees the complete request */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, + cpu_to_be64(remote_handle), + cpu_to_be64(entry[0]), cpu_to_be64(entry[1]), + cpu_to_be64(entry[2]), cpu_to_be64(entry[3])); + + if (rc) + print_subcrq_error(dev, rc, __func__); + + return rc; +} + static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, - struct ibmvnic_sub_crq_queue *tx_scrq) + struct ibmvnic_sub_crq_queue *tx_scrq, + bool indirect) { struct ibmvnic_ind_xmit_queue *ind_bufp; u64 dma_addr; @@ -2366,7 +2372,13 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, if (!entries) return 0; - rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + + if (indirect) + rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + else + rc = send_subcrq_direct(adapter, handle, + (u64 *)ind_bufp->indir_arr); + if (rc) ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); else @@ -2397,6 +2409,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned long lpar_rc; union sub_crq tx_crq; unsigned int offset; + bool use_scrq_send_direct = false; int num_entries = 1; unsigned char *dst; int bufidx = 0; @@ -2424,7 +2437,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_dropped++; tx_send_failed++; ret = NETDEV_TX_OK; - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_err; goto out; @@ -2442,7 +2455,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_err; goto out; @@ -2456,6 +2469,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memset(dst, 0, tx_pool->buf_size); data_dma_addr = ltb->addr + offset; + /* if we are going to send_subcrq_direct this then we need to + * update the checksum before copying the data into ltb. Essentially + * these packets force disable CSO so that we can guarantee that + * FW does not need header info and we can send direct. + */ + if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) { + use_scrq_send_direct = true; + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb)) + use_scrq_send_direct = false; + } + if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -2475,9 +2500,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) skb_copy_from_linear_data(skb, dst, skb->len); } - /* post changes to long_term_buff *dst before VIOS accessing it */ - dma_wmb(); - tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; @@ -2540,6 +2562,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); hdrs += 2; + } else if (use_scrq_send_direct) { + /* See above comment, CSO disabled with direct xmit */ + tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD); + ind_bufp->index = 1; + tx_buff->num_entries = 1; + netdev_tx_sent_queue(txq, skb->len); + ind_bufp->indir_arr[0] = tx_crq; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false); + if (lpar_rc != H_SUCCESS) + goto tx_err; + + goto early_exit; } if ((*hdrs >> 7) & 1) @@ -2549,7 +2583,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->num_entries = num_entries; /* flush buffer if current entry can not fit */ if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_flush_err; } @@ -2557,15 +2591,17 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) indir_arr[0] = tx_crq; memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], num_entries * sizeof(struct ibmvnic_generic_scrq)); + ind_bufp->index += num_entries; if (__netdev_tx_sent_queue(txq, skb->len, netdev_xmit_more() && ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_err; } +early_exit: if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { netdev_dbg(netdev, "Stopping queue %d\n", queue_num); @@ -3527,9 +3563,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) } if (adapter->state != VNIC_CLOSING && - ((atomic_read(&adapter->rx_pool[scrq_num].available) < - adapter->req_rx_add_entries_per_subcrq / 2) || - frames_processed < budget)) + (atomic_read(&adapter->rx_pool[scrq_num].available) < + adapter->req_rx_add_entries_per_subcrq / 2)) replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); if (frames_processed < budget) { if (napi_complete_done(napi, frames_processed)) { @@ -4169,20 +4204,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { struct device *dev = &adapter->vdev->dev; + int num_packets = 0, total_bytes = 0; struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_buff *txbuff; struct netdev_queue *txq; union sub_crq *next; - int index; - int i; + int index, i; restart_loop: while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; int num_entries = 0; - int total_bytes = 0; - int num_packets = 0; - next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { index = be32_to_cpu(next->tx_comp.correlators[i]); @@ -4218,8 +4250,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, /* remove tx_comp scrq*/ next->tx_comp.first = 0; - txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); - netdev_tx_completed_queue(txq, num_packets, total_bytes); if (atomic_sub_return(num_entries, &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && @@ -4244,6 +4274,9 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, goto restart_loop; } + txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); + netdev_tx_completed_queue(txq, num_packets, total_bytes); + return 0; } diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index aa139b67a55bbf..3a5bbda235cbb2 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -146,7 +146,7 @@ #include #include #include -#include +#include #define DRV_NAME "e100" diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 360ee26557f770..f103249b12facf 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6671,8 +6671,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); - if (retval) - return retval; + if (retval) { + e_err("Failed to enable wakeup\n"); + goto skip_phy_configurations; + } } else { /* enable wakeup by the MAC */ ew32(WUFC, wufc); @@ -6693,8 +6695,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) * or broadcast. */ retval = e1000_enable_ulp_lpt_lp(hw, !runtime); - if (retval) - return retval; + if (retval) { + e_err("Failed to enable ULP\n"); + goto skip_phy_configurations; + } } } @@ -6726,6 +6730,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) hw->phy.ops.release(hw); } +skip_phy_configurations: /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -6968,15 +6973,13 @@ static int e1000e_pm_suspend(struct device *dev) e1000e_pm_freeze(dev); rc = __e1000_shutdown(pdev, false); - if (rc) { - e1000e_pm_thaw(dev); - } else { + if (!rc) { /* Introduce S0ix implementation */ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) e1000e_s0ix_entry_flow(adapter); } - return rc; + return 0; } static int e1000e_pm_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d546567e0286e4..2089a0e172bfaf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -4,6 +4,7 @@ #ifndef _I40E_H_ #define _I40E_H_ +#include #include #include #include diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1d0d2e526adb45..f2506511bbfff4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2555,16 +2555,12 @@ static int i40e_get_ts_info(struct net_device *dev, return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (pf->ptp_clock) info->phc_index = ptp_clock_index(pf->ptp_clock); - else - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); @@ -5641,6 +5637,26 @@ static int i40e_get_module_eeprom(struct net_device *netdev, return 0; } +static void i40e_eee_capability_to_kedata_supported(__le16 eee_capability_, + unsigned long *supported) +{ + const int eee_capability = le16_to_cpu(eee_capability_); + static const int lut[] = { + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }; + + linkmode_zero(supported); + for (unsigned int i = ARRAY_SIZE(lut); i--; ) + if (eee_capability & BIT(i + 1)) + linkmode_set_bit(lut[i], supported); +} + static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -5648,7 +5664,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - int status = 0; + int status; /* Get initial PHY capabilities */ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL); @@ -5661,11 +5677,18 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) if (phy_cfg.eee_capability == 0) return -EOPNOTSUPP; + i40e_eee_capability_to_kedata_supported(phy_cfg.eee_capability, + edata->supported); + linkmode_copy(edata->lp_advertised, edata->supported); + /* Get current configuration */ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL); if (status) return -EAGAIN; + linkmode_zero(edata->advertised); + if (phy_cfg.eee_capability) + linkmode_copy(edata->advertised, edata->supported); edata->eee_enabled = !!phy_cfg.eee_capability; edata->tx_lpi_enabled = pf->stats.tx_lpi_status; @@ -5681,10 +5704,11 @@ static int i40e_is_eee_param_supported(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ethtool_not_used { - u32 value; + bool value; const char *name; } param[] = { - {edata->tx_lpi_timer, "tx-timer"}, + {!!(edata->advertised[0] & ~edata->supported[0]), "advertise"}, + {!!edata->tx_lpi_timer, "tx-timer"}, {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} }; int i; @@ -5710,7 +5734,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; __le16 eee_capability; - int status = 0; + int status; /* Deny parameters we don't support */ if (i40e_is_eee_param_supported(netdev, edata)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbcfada7b357a8..03205eb9f925f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7264,6 +7264,26 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) } #endif /* CONFIG_I40E_DCB */ +static void i40e_print_link_message_eee(struct i40e_vsi *vsi, + const char *speed, const char *fc) +{ + struct ethtool_keee kedata; + + memzero_explicit(&kedata, sizeof(kedata)); + if (vsi->netdev->ethtool_ops->get_eee) + vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata); + + if (!linkmode_empty(kedata.supported)) + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n", + speed, fc, + kedata.eee_enabled ? "Enabled" : "Disabled"); + else + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); +} + /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message @@ -7395,9 +7415,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, req_fec, fec, an, fc); } else { - netdev_info(vsi->netdev, - "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", - speed, fc); + i40e_print_link_message_eee(vsi, speed, fc); } } diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 23a6557fc3db2b..48cd1d06761c86 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -33,6 +33,7 @@ #include #include #include +#include #include "iavf_type.h" #include @@ -393,6 +394,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_VLAN_V2) #define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_CRC) +#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_TC_U32) #define VLAN_V2_FILTERING_ALLOWED(_a) \ (VLAN_V2_ALLOWED((_a)) && \ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ @@ -437,6 +440,7 @@ struct iavf_adapter { #define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */ u16 fdir_active_fltr; + u16 raw_fdir_active_fltr; struct list_head fdir_list_head; spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */ @@ -444,6 +448,32 @@ struct iavf_adapter { spinlock_t adv_rss_lock; /* protect the RSS management list */ }; +/* Must be called with fdir_fltr_lock lock held */ +static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter) +{ + return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >= + IAVF_MAX_FDIR_FILTERS; +} + +static inline void +iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (iavf_is_raw_fdir(fltr)) + adapter->raw_fdir_active_fltr++; + else + adapter->fdir_active_fltr++; +} + +static inline void +iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (iavf_is_raw_fdir(fltr)) + adapter->raw_fdir_active_fltr--; + else + adapter->fdir_active_fltr--; +} /* Ethtool Private Flags */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 52273f7eab2c4e..74a1e9fe182128 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -927,7 +927,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, spin_lock_bh(&adapter->fdir_fltr_lock); - rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + rule = iavf_find_fdir_fltr(adapter, false, fsp->location); if (!rule) { ret = -EINVAL; goto release_lock; @@ -1072,6 +1072,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fltr, &adapter->fdir_list_head, list) { + if (iavf_is_raw_fdir(fltr)) + continue; + if (cnt == cmd->rule_cnt) { val = -EMSGSIZE; goto release_lock; @@ -1263,15 +1266,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx return -EINVAL; spin_lock_bh(&adapter->fdir_fltr_lock); - if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { - spin_unlock_bh(&adapter->fdir_fltr_lock); - dev_err(&adapter->pdev->dev, - "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", - IAVF_MAX_FDIR_FILTERS); - return -ENOSPC; - } - - if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { + if (iavf_find_fdir_fltr(adapter, false, fsp->location)) { dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); spin_unlock_bh(&adapter->fdir_fltr_lock); return -EEXIST; @@ -1291,23 +1286,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx } err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); - if (err) - goto ret; - - spin_lock_bh(&adapter->fdir_fltr_lock); - iavf_fdir_list_add_fltr(adapter, fltr); - adapter->fdir_active_fltr++; - - if (adapter->link_up) - fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - else - fltr->state = IAVF_FDIR_FLTR_INACTIVE; - spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!err) + err = iavf_fdir_add_fltr(adapter, fltr); - if (adapter->link_up) - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); -ret: - if (err && fltr) + if (err) kfree(fltr); mutex_unlock(&adapter->crit_lock); @@ -1324,34 +1306,11 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct iavf_fdir_fltr *fltr = NULL; - int err = 0; if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; - spin_lock_bh(&adapter->fdir_fltr_lock); - fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); - if (fltr) { - if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { - fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; - } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { - list_del(&fltr->list); - kfree(fltr); - adapter->fdir_active_fltr--; - fltr = NULL; - } else { - err = -EBUSY; - } - } else if (adapter->fdir_active_fltr) { - err = -EINVAL; - } - spin_unlock_bh(&adapter->fdir_fltr_lock); - - if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); - - return err; + return iavf_fdir_del_fltr(adapter, false, fsp->location); } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 2d47b0b4640e4a..a1b3b44cc14a17 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr * spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(tmp, &adapter->fdir_list_head, list) { + if (iavf_is_raw_fdir(fltr)) + continue; + if (tmp->flow_type != fltr->flow_type) continue; @@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr * } /** - * iavf_find_fdir_fltr_by_loc - find filter with location + * iavf_find_fdir_fltr - find FDIR filter * @adapter: pointer to the VF adapter structure - * @loc: location to find. + * @is_raw: filter type, is raw (tc u32) or not (ethtool) + * @data: data to ID the filter, type dependent * - * Returns pointer to Flow Director filter if found or null + * Returns: pointer to Flow Director filter if found or NULL. Lock must be held. */ -struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) +struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter, + bool is_raw, u32 data) { struct iavf_fdir_fltr *rule; - list_for_each_entry(rule, &adapter->fdir_list_head, list) - if (rule->loc == loc) + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if ((is_raw && rule->cls_u32_handle == data) || + (!is_raw && rule->loc == data)) return rule; + } return NULL; } /** - * iavf_fdir_list_add_fltr - add a new node to the flow director filter list + * iavf_fdir_add_fltr - add a new node to the flow director filter list * @adapter: pointer to the VF adapter structure * @fltr: filter node to add to structure + * + * Return: 0 on success or negative errno on failure. */ -void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +int iavf_fdir_add_fltr(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *rule, *parent = NULL; + spin_lock_bh(&adapter->fdir_fltr_lock); + if (iavf_fdir_max_reached(adapter)) { + spin_unlock_bh(&adapter->fdir_fltr_lock); + dev_err(&adapter->pdev->dev, + "Unable to add Flow Director filter (limit (%u) reached)\n", + IAVF_MAX_FDIR_FILTERS); + return -ENOSPC; + } + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if (iavf_is_raw_fdir(fltr)) + break; + if (rule->loc >= fltr->loc) break; parent = rule; @@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr list_add(&fltr->list, &parent->list); else list_add(&fltr->list, &adapter->fdir_list_head); + + iavf_inc_fdir_active_fltr(adapter, fltr); + + if (adapter->link_up) + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + else + fltr->state = IAVF_FDIR_FLTR_INACTIVE; + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (adapter->link_up) + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); + + return 0; +} + +/** + * iavf_fdir_del_fltr - delete a flow director filter from the list + * @adapter: pointer to the VF adapter structure + * @is_raw: filter type, is raw (tc u32) or not (ethtool) + * @data: data to ID the filter, type dependent + * + * Return: 0 on success or negative errno on failure. + */ +int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data) +{ + struct iavf_fdir_fltr *fltr = NULL; + int err = 0; + + spin_lock_bh(&adapter->fdir_fltr_lock); + fltr = iavf_find_fdir_fltr(adapter, is_raw, data); + + if (fltr) { + if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { + fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { + list_del(&fltr->list); + iavf_dec_fdir_active_fltr(adapter, fltr); + kfree(fltr); + fltr = NULL; + } else { + err = -EBUSY; + } + } else if (adapter->fdir_active_fltr) { + err = -EINVAL; + } + + if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); + + spin_unlock_bh(&adapter->fdir_fltr_lock); + return err; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index d31bd923ba8cbf..e84a5351162f7d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -117,17 +117,26 @@ struct iavf_fdir_fltr { u32 flow_id; + u32 cls_u32_handle; /* for FDIR added via tc u32 */ u32 loc; /* Rule location inside the flow table */ u32 q_index; struct virtchnl_fdir_add vc_add_msg; }; +static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr) +{ + return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count; +} + int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); -void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); -struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); +int iavf_fdir_add_fltr(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr); +int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data); +struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter, + bool is_raw, u32 data); #endif /* _IAVF_FDIR_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index ff11bafb3b4fdd..f782402cd78986 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -4013,7 +4013,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, /** * iavf_setup_tc_cls_flower - flower classifier offloads - * @adapter: board private structure + * @adapter: pointer to iavf adapter structure * @cls_flower: pointer to flow_cls_offload struct with flow info */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, @@ -4031,6 +4031,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, } } +/** + * iavf_add_cls_u32 - Add U32 classifier offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_add_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + struct netlink_ext_ack *extack = cls_u32->common.extack; + struct virtchnl_fdir_rule *rule_cfg; + struct virtchnl_filter_action *vact; + struct virtchnl_proto_hdrs *hdrs; + struct ethhdr *spec_h, *mask_h; + const struct tc_action *act; + struct iavf_fdir_fltr *fltr; + struct tcf_exts *exts; + unsigned int q_index; + int i, status = 0; + int off_base = 0; + + if (cls_u32->knode.link_handle) { + NL_SET_ERR_MSG_MOD(extack, "Linking not supported"); + return -EOPNOTSUPP; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) + return -ENOMEM; + + rule_cfg = &fltr->vc_add_msg.rule_cfg; + hdrs = &rule_cfg->proto_hdrs; + hdrs->count = 0; + + /* The parser lib at the PF expects the packet starting with MAC hdr */ + switch (ntohs(cls_u32->common.protocol)) { + case ETH_P_802_3: + break; + case ETH_P_IP: + spec_h = (struct ethhdr *)hdrs->raw.spec; + mask_h = (struct ethhdr *)hdrs->raw.mask; + spec_h->h_proto = htons(ETH_P_IP); + mask_h->h_proto = htons(0xFFFF); + off_base += ETH_HLEN; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported"); + status = -EOPNOTSUPP; + goto free_alloc; + } + + for (i = 0; i < cls_u32->knode.sel->nkeys; i++) { + __be32 val, mask; + int off; + + off = off_base + cls_u32->knode.sel->keys[i].off; + val = cls_u32->knode.sel->keys[i].val; + mask = cls_u32->knode.sel->keys[i].mask; + + if (off >= sizeof(hdrs->raw.spec)) { + NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed."); + status = -EINVAL; + goto free_alloc; + } + + memcpy(&hdrs->raw.spec[off], &val, sizeof(val)); + memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask)); + hdrs->raw.pkt_len = off + sizeof(val); + } + + /* Only one action is allowed */ + rule_cfg->action_set.count = 1; + vact = &rule_cfg->action_set.actions[0]; + exts = cls_u32->knode.exts; + + tcf_exts_for_each_action(i, act, exts) { + /* FDIR queue */ + if (is_tcf_skbedit_rx_queue_mapping(act)) { + q_index = tcf_skbedit_rx_queue_mapping(act); + if (q_index >= adapter->num_active_queues) { + status = -EINVAL; + goto free_alloc; + } + + vact->type = VIRTCHNL_ACTION_QUEUE; + vact->act_conf.queue.index = q_index; + break; + } + + /* Drop */ + if (is_tcf_gact_shot(act)) { + vact->type = VIRTCHNL_ACTION_DROP; + break; + } + + /* Unsupported action */ + NL_SET_ERR_MSG_MOD(extack, "Unsupported action."); + status = -EOPNOTSUPP; + goto free_alloc; + } + + fltr->vc_add_msg.vsi_id = adapter->vsi.id; + fltr->cls_u32_handle = cls_u32->knode.handle; + return iavf_fdir_add_fltr(adapter, fltr); + +free_alloc: + kfree(fltr); + return status; +} + +/** + * iavf_del_cls_u32 - Delete U32 classifier offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_del_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle); +} + +/** + * iavf_setup_tc_cls_u32 - U32 filter offloads + * @adapter: pointer to iavf adapter structure + * @cls_u32: pointer to tc_cls_u32_offload struct with flow info + * + * Return: 0 on success or negative errno on failure. + */ +static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter, + struct tc_cls_u32_offload *cls_u32) +{ + if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter)) + return -EOPNOTSUPP; + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return iavf_add_cls_u32(adapter, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return iavf_del_cls_u32(adapter, cls_u32); + default: + return -EOPNOTSUPP; + } +} + /** * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload @@ -4050,6 +4198,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); + case TC_SETUP_CLSU32: + return iavf_setup_tc_cls_u32(cb_priv, type_data); default: return -EOPNOTSUPP; } @@ -4332,8 +4482,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter) fdir->state == IAVF_FDIR_FLTR_INACTIVE) { /* Delete filters not registered in PF */ list_del(&fdir->list); + iavf_dec_fdir_active_fltr(adapter, fdir); kfree(fdir); - adapter->fdir_active_fltr--; } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || fdir->state == IAVF_FDIR_FLTR_ACTIVE) { @@ -4843,9 +4993,11 @@ int iavf_process_config(struct iavf_adapter *adapter) /* get HW VLAN features that can be toggled */ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); - /* Enable cloud filter if ADQ is supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) + /* Enable HW TC offload if ADQ or tc U32 is supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ || + TC_U32_SUPPORT(adapter)) hw_features |= NETIF_F_HW_TC; + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) hw_features |= NETIF_F_GSO_UDP_L4; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 1e543f6a7c3033..7e810b65380ca3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_TC_U32 | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | @@ -1961,8 +1962,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) * list on PF is already cleared after a reset */ list_del(&f->list); + iavf_dec_fdir_active_fltr(adapter, f); kfree(f); - adapter->fdir_active_fltr--; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -2135,8 +2136,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, dev_err(&adapter->pdev->dev, "%s\n", msg); list_del(&fdir->list); + iavf_dec_fdir_active_fltr(adapter, fdir); kfree(fdir); - adapter->fdir_active_fltr--; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -2451,8 +2452,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { - dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", - fdir->loc); + if (!iavf_is_raw_fdir(fdir)) + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", + fdir->loc); + else + dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n", + TC_U32_USERHTID(fdir->cls_u32_handle)); fdir->state = IAVF_FDIR_FLTR_ACTIVE; fdir->flow_id = add_fltr->flow_id; } else { @@ -2460,8 +2465,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, add_fltr->status); iavf_print_fdir_fltr(adapter, fdir); list_del(&fdir->list); + iavf_dec_fdir_active_fltr(adapter, fdir); kfree(fdir); - adapter->fdir_active_fltr--; } } } @@ -2479,11 +2484,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || del_fltr->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { - dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", - fdir->loc); + if (!iavf_is_raw_fdir(fdir)) + dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", + fdir->loc); + else + dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n", + TC_U32_USERHTID(fdir->cls_u32_handle)); list_del(&fdir->list); + iavf_dec_fdir_active_fltr(adapter, fdir); kfree(fdir); - adapter->fdir_active_fltr--; } else { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 03500e28ac99c9..3307d551f431a4 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -28,9 +28,13 @@ ice-y := ice_main.o \ ice_vlan_mode.o \ ice_flex_pipe.o \ ice_flow.o \ + ice_parser.o \ + ice_parser_rt.o \ ice_idc.o \ devlink/devlink.o \ devlink/devlink_port.o \ + ice_sf_eth.o \ + ice_sf_vsi_vlan_ops.o \ ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 810a901d7afd83..415445cefdb2aa 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -6,9 +6,11 @@ #include "ice.h" #include "ice_lib.h" #include "devlink.h" +#include "devlink_port.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" +#include "ice_sf_eth.h" /* context for devlink info version reporting */ struct ice_info_ctx { @@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_sched_node *tc_node, struct ice_pf *pf) { struct devlink_rate *rate_node = NULL; + struct ice_dynamic_port *sf; struct ice_vf *vf; int i; @@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && pf->vsi[node->vsi_handle]->vf) { vf = pf->vsi[node->vsi_handle]->vf; if (!vf->devlink_port.devlink_rate) @@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node */ devl_rate_leaf_create(&vf->devlink_port, node, node->parent->rate_node); + } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && + pf->vsi[node->vsi_handle]->sf) { + sf = pf->vsi[node->vsi_handle]->sf; + if (!sf->devlink_port.devlink_rate) + /* leaf nodes doesn't have children + * so we don't set rate_node + */ + devl_rate_leaf_create(&sf->devlink_port, node, + node->parent->rate_node); } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && node->parent->rate_node) { rate_node = devl_rate_node_create(devlink, node, node->name, @@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = { .rate_leaf_parent_set = ice_devlink_set_parent, .rate_node_parent_set = ice_devlink_set_parent, + + .port_new = ice_devlink_port_new, }; +static const struct devlink_ops ice_sf_devlink_ops; + static int ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) @@ -1561,6 +1579,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev) return devlink_priv(devlink); } +/** + * ice_allocate_sf - Allocate devlink and return SF structure pointer + * @dev: the device to allocate for + * @pf: pointer to the PF structure + * + * Allocate a devlink instance for SF. + * + * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error + */ +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) +{ + struct devlink *devlink; + int err; + + devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), + dev); + if (!devlink) + return ERR_PTR(-ENOMEM); + + err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); + if (err) { + devlink_free(devlink); + return ERR_PTR(err); + } + + return devlink_priv(devlink); +} + /** * ice_devlink_register - Register devlink interface for this PF * @pf: the PF to register the devlink for. diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h index d291c0e2e17bfe..1af3b0763fbbaf 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h @@ -5,6 +5,7 @@ #define _ICE_DEVLINK_H_ struct ice_pf *ice_allocate_pf(struct device *dev); +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c index 62ef8e2fb5f1ba..928c8bdb66494a 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -5,6 +5,9 @@ #include "ice.h" #include "devlink.h" +#include "devlink_port.h" +#include "ice_lib.h" +#include "ice_fltr.h" static int ice_active_port_option = -1; @@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) devl_rate_leaf_destroy(&vf->devlink_port); devl_port_unregister(&vf->devlink_port); } + +/** + * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Register virtual flavour devlink port for the subfunction auxiliary device + * created after activating a dynamically added devlink port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + struct devlink_port_attrs attrs = {}; + struct ice_dynamic_port *dyn_port; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + + dyn_port = sf_dev->dyn_port; + vsi = dyn_port->vsi; + + devlink_port = &sf_dev->priv->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(sf_dev->priv); + + return devl_port_register(devlink, devlink_port, vsi->idx); +} + +/** + * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Unregisters the virtual port associated with this subfunction. + */ +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + devl_port_unregister(&sf_dev->priv->devlink_port); +} + +/** + * ice_activate_dynamic_port - Activate a dynamic port + * @dyn_port: dynamic port instance to activate + * @extack: extack for reporting error messages + * + * Activate the dynamic port based on its flavour. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + int err; + + if (dyn_port->active) + return 0; + + err = ice_sf_eth_activate(dyn_port, extack); + if (err) + return err; + + dyn_port->active = true; + + return 0; +} + +/** + * ice_deactivate_dynamic_port - Deactivate a dynamic port + * @dyn_port: dynamic port instance to deactivate + * + * Undo activation of a dynamic port. + */ +static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + if (!dyn_port->active) + return; + + ice_sf_eth_deactivate(dyn_port); + dyn_port->active = false; +} + +/** + * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port + * @dyn_port: dynamic port instance to deallocate + * + * Free resources associated with a dynamically added devlink port. Will + * deactivate the port if its currently active. + */ +static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port *devlink_port = &dyn_port->devlink_port; + struct ice_pf *pf = dyn_port->pf; + + ice_deactivate_dynamic_port(dyn_port); + + xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf); + ice_eswitch_detach_sf(pf, dyn_port); + ice_vsi_free(dyn_port->vsi); + xa_erase(&pf->dyn_ports, dyn_port->vsi->idx); + kfree(dyn_port); +} + +/** + * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports + * @pf: pointer to the pf structure + */ +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf) +{ + struct ice_dynamic_port *dyn_port; + unsigned long index; + + xa_for_each(&pf->dyn_ports, index, dyn_port) + ice_dealloc_dynamic_port(dyn_port); +} + +/** + * ice_devlink_port_new_check_attr - Check that new port attributes are valid + * @pf: pointer to the PF structure + * @new_attr: the attributes for the new port + * @extack: extack for reporting error messages + * + * Check that the attributes for the new port are valid before continuing to + * allocate the devlink port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_new_check_attr(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack) +{ + if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) { + NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->controller_valid) { + NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->port_index_valid) { + NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment"); + return -EOPNOTSUPP; + } + + if (new_attr->pfnum != pf->hw.pf_id) { + NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied"); + return -EINVAL; + } + + if (!pci_msix_can_alloc_dyn(pf->pdev)) { + NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * ice_devlink_port_del - devlink handler for port delete + * @devlink: pointer to devlink + * @port: devlink port to be deleted + * @extack: pointer to extack + * + * Deletes devlink port and deallocates all resources associated with + * created subfunction. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + ice_dealloc_dynamic_port(dyn_port); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Sets mac address for the port, verifies arguments and copies address + * to the subfunction structure. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->attached) { + NL_SET_ERR_MSG_MOD(extack, + "Ethernet address can be change only in detached state"); + return -EBUSY; + } + + if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address"); + return -EADDRNOTAVAIL; + } + + ether_addr_copy(dyn_port->hw_addr, hw_addr); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Returns mac address for the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr, + int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + ether_addr_copy(hw_addr, dyn_port->hw_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_fn_state_set - devlink handler for port state set + * @port: pointer to devlink port + * @state: state to set + * @extack: extack for reporting error messages + * + * Activates or deactivates the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_set(struct devlink_port *port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + switch (state) { + case DEVLINK_PORT_FN_STATE_ACTIVE: + return ice_activate_dynamic_port(dyn_port, extack); + + case DEVLINK_PORT_FN_STATE_INACTIVE: + ice_deactivate_dynamic_port(dyn_port); + break; + } + + return 0; +} + +/** + * ice_devlink_port_fn_state_get - devlink handler for port state get + * @port: pointer to devlink port + * @state: admin configured state of the port + * @opstate: current port operational state + * @extack: extack for reporting error messages + * + * Gets port state. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_get(struct devlink_port *port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->active) + *state = DEVLINK_PORT_FN_STATE_ACTIVE; + else + *state = DEVLINK_PORT_FN_STATE_INACTIVE; + + if (dyn_port->attached) + *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED; + else + *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED; + + return 0; +} + +static const struct devlink_port_ops ice_devlink_port_sf_ops = { + .port_del = ice_devlink_port_del, + .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set, + .port_fn_state_get = ice_devlink_port_fn_state_get, + .port_fn_state_set = ice_devlink_port_fn_state_set, +}; + +/** + * ice_reserve_sf_num - Reserve a subfunction number for this port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @sfnum: on success, the sf number reserved + * + * Reserve a subfunction number for this port. Only called for + * DEVLINK_PORT_FLAVOUR_PCI_SF ports. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_reserve_sf_num(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, u32 *sfnum) +{ + int err; + + /* If user didn't request an explicit number, pick one */ + if (!new_attr->sfnum_valid) + return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b, + GFP_KERNEL); + + /* Otherwise, check and use the number provided */ + err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL); + if (err) { + if (err == -EBUSY) + NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists"); + return err; + } + + *sfnum = new_attr->sfnum; + + return 0; +} + +/** + * ice_devlink_create_sf_port - Register PCI subfunction devlink port + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Register PCI subfunction flavour devlink port for a dynamically added + * subfunction port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct ice_pf *pf; + + vsi = dyn_port->vsi; + pf = dyn_port->pf; + + devlink_port = &dyn_port->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF; + attrs.pci_sf.pf = pf->hw.pf_id; + attrs.pci_sf.sf = dyn_port->sfnum; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + return devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_sf_ops); +} + +/** + * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Unregisters the devlink_port structure associated with this SF. + */ +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port) +{ + devl_rate_leaf_destroy(&dyn_port->devlink_port); + devl_port_unregister(&dyn_port->devlink_port); +} + +/** + * ice_alloc_dynamic_port - Allocate new dynamic port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @devlink_port: index of newly created devlink port + * + * Allocate a new dynamic port instance and prepare it for configuration + * with devlink. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_alloc_dynamic_port(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_dynamic_port *dyn_port; + struct ice_vsi *vsi; + u32 sfnum; + int err; + + err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum); + if (err) + return err; + + dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL); + if (!dyn_port) { + err = -ENOMEM; + goto unroll_reserve_sf_num; + } + + vsi = ice_vsi_alloc(pf); + if (!vsi) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI"); + err = -ENOMEM; + goto unroll_dyn_port_alloc; + } + + dyn_port->vsi = vsi; + dyn_port->pf = pf; + dyn_port->sfnum = sfnum; + eth_random_addr(dyn_port->hw_addr); + + err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed"); + goto unroll_vsi_alloc; + } + + err = ice_eswitch_attach_sf(pf, dyn_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch"); + goto unroll_xa_insert; + } + + *devlink_port = &dyn_port->devlink_port; + + return 0; + +unroll_xa_insert: + xa_erase(&pf->dyn_ports, vsi->idx); +unroll_vsi_alloc: + ice_vsi_free(vsi); +unroll_dyn_port_alloc: + kfree(dyn_port); +unroll_reserve_sf_num: + xa_erase(&pf->sf_nums, sfnum); + + return err; +} + +/** + * ice_devlink_port_new - devlink handler for the new port + * @devlink: pointer to devlink + * @new_attr: pointer to the port new attributes + * @extack: extack for reporting error messages + * @devlink_port: pointer to a new port + * + * Creates new devlink port, checks new port attributes and reject + * any unsupported parameters, allocates new subfunction for that port. + * + * Return: zero on success or an error code on failure. + */ +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + err = ice_devlink_port_new_check_attr(pf, new_attr, extack); + if (err) + return err; + + return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h index 9223bcdb644428..d60efc340945e3 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h @@ -4,9 +4,55 @@ #ifndef _DEVLINK_PORT_H_ #define _DEVLINK_PORT_H_ +#include "../ice.h" +#include "../ice_sf_eth.h" + +/** + * struct ice_dynamic_port - Track dynamically added devlink port instance + * @hw_addr: the HW address for this port + * @active: true if the port has been activated + * @attached: true it the prot is attached + * @devlink_port: the associated devlink port structure + * @pf: pointer to the PF private structure + * @vsi: the VSI associated with this port + * @repr_id: the representor ID + * @sfnum: the subfunction ID + * @sf_dev: pointer to the subfunction device + * + * An instance of a dynamically added devlink port. Each port flavour + */ +struct ice_dynamic_port { + u8 hw_addr[ETH_ALEN]; + u8 active: 1; + u8 attached: 1; + struct devlink_port devlink_port; + struct ice_pf *pf; + struct ice_vsi *vsi; + unsigned long repr_id; + u32 sfnum; + /* Flavour-specific implementation data */ + union { + struct ice_sf_dev *sf_dev; + }; +}; + +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf); + int ice_devlink_create_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf); int ice_devlink_create_vf_port(struct ice_vf *vf); void ice_devlink_destroy_vf_port(struct ice_vf *vf); +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port); +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port); +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev); +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev); + +#define ice_devlink_port_to_dyn(port) \ + container_of(port, struct ice_dynamic_port, devlink_port) +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port); #endif /* _DEVLINK_PORT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index ce8b5505b16da8..d6f80da30decf4 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -451,7 +451,12 @@ struct ice_vsi { struct_group_tagged(ice_vsi_cfg_params, params, struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_channel *ch; /* VSI's channel structure, may be NULL */ - struct ice_vf *vf; /* VF associated with this VSI, may be NULL */ + union { + /* VF associated with this VSI, may be NULL */ + struct ice_vf *vf; + /* SF associated with this VSI, may be NULL */ + struct ice_dynamic_port *sf; + }; u32 flags; /* VSI flags used for rebuild and configuration */ enum ice_vsi_type type; /* the type of the VSI */ ); @@ -652,6 +657,9 @@ struct ice_pf { struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; + struct xarray dyn_ports; + struct xarray sf_nums; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -918,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +void ice_set_ethtool_sf_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); @@ -1003,6 +1012,14 @@ void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); int ice_init_dev(struct ice_pf *pf); void ice_deinit_dev(struct ice_pf *pf); +int ice_change_mtu(struct net_device *netdev, int new_mtu); +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); +void ice_set_netdev_features(struct net_device *netdev); +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void ice_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 66f02988d54964..0be1a98d7cc1b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -2632,12 +2632,16 @@ struct ice_aq_desc { /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ #define ICE_AQ_LG_BUF 512 +#define ICE_AQ_FLAG_DD_S 0 +#define ICE_AQ_FLAG_CMP_S 1 #define ICE_AQ_FLAG_ERR_S 2 #define ICE_AQ_FLAG_LB_S 9 #define ICE_AQ_FLAG_RD_S 10 #define ICE_AQ_FLAG_BUF_S 12 #define ICE_AQ_FLAG_SI_S 13 +#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ +#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ #define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ #define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ #define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c158749a80e05a..4a9a6899fc453c 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -325,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_SF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + break; default: return; } @@ -540,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ring->rx_buf_len = ring->vsi->rx_buf_len; - if (ring->vsi->type == ICE_VSI_PF) { + if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 66f29bac783a0c..27208a60cece51 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -10,6 +10,7 @@ #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" +#include "ice_parser.h" #include #include "ice_switch.h" #include "ice_fdir.h" diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index ffaa6511c45562..e3959ad442a23d 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) return -ENOMEM; cq->sq.desc_buf.size = size; - cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, - sizeof(struct ice_sq_cd), GFP_KERNEL); - if (!cq->sq.cmd_buf) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, - cq->sq.desc_buf.va, cq->sq.desc_buf.pa); - cq->sq.desc_buf.va = NULL; - cq->sq.desc_buf.pa = 0; - cq->sq.desc_buf.size = 0; - return -ENOMEM; - } - return 0; } @@ -188,7 +177,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); desc->opcode = 0; - /* This is in accordance with Admin queue design, there is no + /* This is in accordance with control queue design, there is no * register for buffer size configuration */ desc->datalen = cpu_to_le16(bi->size); @@ -338,8 +327,6 @@ do { \ (qi)->ring.r.ring##_bi[i].size = 0;\ } \ } \ - /* free the buffer info list */ \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ /* free DMA head */ \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ } while (0) @@ -405,11 +392,11 @@ static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** - * ice_init_rq - initialize ARQ + * ice_init_rq - initialize receive side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * - * The main initialization routine for the Admin Receive (Event) Queue. + * The main initialization routine for Receive side of a control queue. * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries @@ -465,7 +452,7 @@ static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** - * ice_shutdown_sq - shutdown the Control ATQ + * ice_shutdown_sq - shutdown the transmit side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * @@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto shutdown_sq_out; } - /* Stop firmware AdminQ processing */ + /* Stop processing of the control queue */ wr32(hw, cq->sq.head, 0); wr32(hw, cq->sq.tail, 0); wr32(hw, cq->sq.len, 0); @@ -501,7 +488,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** - * ice_aq_ver_check - Check the reported AQ API version. + * ice_aq_ver_check - Check the reported AQ API version * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. @@ -521,14 +508,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw) } else if (hw->api_maj_ver == exp_fw_api_ver_major) { if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); } else { /* Major API version is older than expected, log a warning */ dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); } return true; } @@ -855,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw) } /** - * ice_clean_sq - cleans Admin send queue (ATQ) + * ice_clean_sq - cleans send side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * @@ -865,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { struct ice_ctl_q_ring *sq = &cq->sq; u16 ntc = sq->next_to_clean; - struct ice_sq_cd *details; struct ice_aq_desc *desc; desc = ICE_CTL_Q_DESC(*sq, ntc); - details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); memset(desc, 0, sizeof(*desc)); - memset(details, 0, sizeof(*details)); ntc++; if (ntc == sq->count) ntc = 0; desc = ICE_CTL_Q_DESC(*sq, ntc); - details = ICE_CTL_Q_DETAILS(*sq, ntc); } sq->next_to_clean = ntc; @@ -887,19 +876,44 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) return ICE_CTL_Q_DESC_UNUSED(sq); } +/** + * ice_ctl_q_str - Convert control queue type to string + * @qtype: the control queue type + * + * Return: A string name for the given control queue type. + */ +static const char *ice_ctl_q_str(enum ice_ctl_q qtype) +{ + switch (qtype) { + case ICE_CTL_Q_UNKNOWN: + return "Unknown CQ"; + case ICE_CTL_Q_ADMIN: + return "AQ"; + case ICE_CTL_Q_MAILBOX: + return "MBXQ"; + case ICE_CTL_Q_SB: + return "SBQ"; + default: + return "Unrecognized CQ"; + } +} + /** * ice_debug_cq * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue * @desc: pointer to control queue descriptor * @buf: pointer to command buffer * @buf_len: max length of buf + * @response: true if this is the writeback response * * Dumps debug log about control command with descriptor contents. */ -static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) +static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq, + void *desc, void *buf, u16 buf_len, bool response) { struct ice_aq_desc *cq_desc = desc; - u16 len; + u16 datalen, flags; if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) @@ -908,48 +922,63 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) if (!desc) return; - len = le16_to_cpu(cq_desc->datalen); + datalen = le16_to_cpu(cq_desc->datalen); + flags = le16_to_cpu(cq_desc->flags); - ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(cq_desc->opcode), - le16_to_cpu(cq_desc->flags), - le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", + ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n", + ice_ctl_q_str(cq->qtype), response ? "Response" : "Command", + le16_to_cpu(cq_desc->opcode), flags, datalen, + le16_to_cpu(cq_desc->retval), le32_to_cpu(cq_desc->cookie_high), - le32_to_cpu(cq_desc->cookie_low)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->cookie_low), le32_to_cpu(cq_desc->params.generic.param0), - le32_to_cpu(cq_desc->params.generic.param1)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.param1), le32_to_cpu(cq_desc->params.generic.addr_high), le32_to_cpu(cq_desc->params.generic.addr_low)); - if (buf && cq_desc->datalen != 0) { - ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); - if (buf_len < len) - len = buf_len; - - ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); + /* Dump buffer iff 1) one exists and 2) is either a response indicated + * by the DD and/or CMP flag set or a command with the RD flag set. + */ + if (buf && cq_desc->datalen && + (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) { + char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 "; + + sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ", + le32_to_cpu(cq_desc->params.generic.addr_high), + le32_to_cpu(cq_desc->params.generic.addr_low)); + ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix, + buf, + min_t(u16, buf_len, datalen)); } } /** - * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) + * ice_sq_done - poll until the last send on a control queue has completed * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * - * Returns true if the firmware has processed all descriptors on the - * admin send queue. Returns false if there are still requests pending. + * Use read_poll_timeout to poll the control queue head, checking until it + * matches next_to_use. According to the control queue designers, this has + * better timing reliability than the DD bit. + * + * Return: true if all the descriptors on the send side of a control queue + * are finished processing, false otherwise. */ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - /* AQ designers suggest use of head for better - * timing reliability than DD bit + u32 head; + + /* Wait a short time before the initial check, to allow hardware time + * for completion. */ - return rd32(hw, cq->sq.head) == cq->sq.next_to_use; + udelay(5); + + return !rd32_poll_timeout(hw, cq->sq.head, + head, head == cq->sq.next_to_use, + 20, ICE_CTL_Q_SQ_CMD_TIMEOUT); } /** - * ice_sq_send_cmd - send command to Control Queue (ATQ) + * ice_sq_send_cmd - send command to a control queue * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command @@ -957,8 +986,9 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * - * This is the main send command routine for the ATQ. It runs the queue, - * cleans the queue, etc. + * Main command for the transmit side of a control queue. It puts the command + * on the queue, bumps the tail, waits for processing of the command, captures + * command status and results, etc. */ int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -968,8 +998,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - struct ice_sq_cd *details; - unsigned long timeout; int status = 0; u16 retval = 0; u32 val = 0; @@ -1013,12 +1041,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, goto sq_send_command_error; } - details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); - if (cd) - *details = *cd; - else - memset(details, 0, sizeof(*details)); - /* Call clean and check queue available function to reclaim the * descriptors that were processed by FW/MBX; the function returns the * number of desc available. The clean function called here could be @@ -1055,7 +1077,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, /* Debug desc and buffer */ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); - ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); + ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false); (cq->sq.next_to_use)++; if (cq->sq.next_to_use == cq->sq.count) @@ -1063,20 +1085,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, wr32(hw, cq->sq.tail, cq->sq.next_to_use); ice_flush(hw); - /* Wait a short time before initial ice_sq_done() check, to allow - * hardware time for completion. + /* Wait for the command to complete. If it finishes within the + * timeout, copy the descriptor back to temp. */ - udelay(5); - - timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; - do { - if (ice_sq_done(hw, cq)) - break; - - usleep_range(100, 150); - } while (time_before(jiffies, timeout)); - - /* if ready, copy the desc back to temp */ if (ice_sq_done(hw, cq)) { memcpy(desc, desc_on_ring, sizeof(*desc)); if (buf) { @@ -1108,12 +1119,11 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); - ice_debug_cq(hw, (void *)desc, buf, buf_size); + ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true); /* save writeback AQ if requested */ - if (details->wb_desc) - memcpy(details->wb_desc, desc_on_ring, - sizeof(*details->wb_desc)); + if (cd && cd->wb_desc) + memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc)); /* update the error if time out occurred */ if (!cmd_completed) { @@ -1154,9 +1164,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * - * This function cleans one Admin Receive Queue element and returns - * the contents through e. It can also return how many events are - * left to process through 'pending'. + * Clean one element from the receive side of a control queue. On return 'e' + * contains contents of the message, and 'pending' contains the number of + * events left to process. */ int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -1212,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); - ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); + ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 1d54b1cdb1c5ab..ca97b7365a1b89 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -43,14 +43,13 @@ enum ice_ctl_q { }; /* Control Queue timeout settings - max delay 1s */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ struct ice_dma_mem desc_buf; /* descriptor ring memory */ - void *cmd_buf; /* command buffer memory */ union { struct ice_dma_mem *sq_bi; @@ -80,8 +79,6 @@ struct ice_sq_cd { struct ice_aq_desc *wb_desc; }; -#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) - /* rq event information */ struct ice_rq_event_info { struct ice_aq_desc desc; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a94e7072b57069..a7c51083282400 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); break; case ICE_VSI_CHNL: + case ICE_VSI_SF: vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); vsi->tc_cfg.numtc = 1; break; diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index f182179529b7de..953262b88a586f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -289,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, * indicates a base offset of 10, and the index for the entry is 2, then * section handler function should set the offset to 10 + 2 = 12. */ -static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, - struct ice_pkg_enum *state, u32 sect_type, - u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) +void *ice_pkg_enum_entry(struct ice_seg *ice_seg, + struct ice_pkg_enum *state, u32 sect_type, + u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) { void *entry; diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index 622543f08b4313..97f27231747548 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -261,10 +261,17 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 #define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 #define ICE_SID_RXPARSER_METADATA_INIT 58 #define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 #define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT_KEY_BUILDER_PE 81 @@ -276,6 +283,7 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_PE 87 #define ICE_SID_CDID_REDIR_PE 88 +#define ICE_SID_RXPARSER_FLAG_REDIR 97 /* Label Metadata section IDs */ #define ICE_SID_LBL_FIRST 0x80000010 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 @@ -451,6 +459,11 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)); void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index e92be6f130a3da..cd95705d1e7fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -9,6 +9,7 @@ #define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50 #define ICE_DPLL_PIN_IDX_INVALID 0xff #define ICE_DPLL_RCLK_NUM_PER_PF 1 +#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25 /** * enum ice_dpll_pin_type - enumerate ice pin types: @@ -30,6 +31,10 @@ static const char * const pin_type_name[] = { [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", }; +static const struct dpll_pin_frequency ice_esync_range[] = { + DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ), +}; + /** * ice_dpll_is_reset - check if reset is in progress * @pf: private board structure @@ -394,8 +399,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, switch (pin_type) { case ICE_DPLL_PIN_TYPE_INPUT: - ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, - NULL, &pin->flags[0], + ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status, + NULL, NULL, &pin->flags[0], &pin->freq, &pin->phase_adjust); if (ret) goto err; @@ -430,7 +435,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, goto err; parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; - if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { + if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { pin->state[pf->dplls.eec.dpll_idx] = parent == pf->dplls.eec.dpll_idx ? DPLL_PIN_STATE_CONNECTED : @@ -1098,6 +1103,214 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, return 0; } +/** + * ice_dpll_output_esync_set - callback for setting embedded sync + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @freq: requested embedded sync frequency + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting embedded sync frequency value + * on output pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flags = 0; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN) + flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + if (freq == DPLL_PIN_FREQUENCY_1_HZ) { + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) { + ret = 0; + } else { + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags, + 0, 0, 0); + } + } else { + if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) { + ret = 0; + } else { + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags, + 0, 0, 0); + } + } + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_esync_get - callback for getting embedded sync config + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @esync: on success holds embedded sync pin properties + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting embedded sync frequency value + * and capabilities on output pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) || + p->freq != DPLL_PIN_FREQUENCY_10_MHZ) { + mutex_unlock(&pf->dplls.lock); + return -EOPNOTSUPP; + } + esync->range = ice_esync_range; + esync->range_num = ARRAY_SIZE(ice_esync_range); + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) { + esync->freq = DPLL_PIN_FREQUENCY_1_HZ; + esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT; + } else { + esync->freq = 0; + esync->pulse = 0; + } + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_esync_set - callback for setting embedded sync + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @freq: requested embedded sync frequency + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting embedded sync frequency value + * on input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flags_en = 0; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN) + flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + if (freq == DPLL_PIN_FREQUENCY_1_HZ) { + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) { + ret = 0; + } else { + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, + flags_en, 0, 0); + } + } else { + if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) { + ret = 0; + } else { + flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, + flags_en, 0, 0); + } + } + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_esync_get - callback for getting embedded sync config + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @esync: on success holds embedded sync pin properties + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting embedded sync frequency value + * and capabilities on input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) || + p->freq != DPLL_PIN_FREQUENCY_10_MHZ) { + mutex_unlock(&pf->dplls.lock); + return -EOPNOTSUPP; + } + esync->range = ice_esync_range; + esync->range_num = ARRAY_SIZE(ice_esync_range); + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) { + esync->freq = DPLL_PIN_FREQUENCY_1_HZ; + esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT; + } else { + esync->freq = 0; + esync->pulse = 0; + } + mutex_unlock(&pf->dplls.lock); + + return 0; +} + /** * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin * @pin: pointer to a pin @@ -1222,6 +1435,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = { .phase_adjust_get = ice_dpll_pin_phase_adjust_get, .phase_adjust_set = ice_dpll_input_phase_adjust_set, .phase_offset_get = ice_dpll_phase_offset_get, + .esync_set = ice_dpll_input_esync_set, + .esync_get = ice_dpll_input_esync_get, }; static const struct dpll_pin_ops ice_dpll_output_ops = { @@ -1232,6 +1447,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = { .direction_get = ice_dpll_output_direction, .phase_adjust_get = ice_dpll_pin_phase_adjust_get, .phase_adjust_set = ice_dpll_output_phase_adjust_set, + .esync_set = ice_dpll_output_esync_set, + .esync_get = ice_dpll_output_esync_get, }; static const struct dpll_device_ops ice_dpll_ops = { diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h index 93172e93995b94..c320f1bf7d6d66 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.h +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -31,6 +31,7 @@ struct ice_dpll_pin { struct dpll_pin_properties prop; u32 freq; s32 phase_adjust; + u8 status; }; /** ice_dpll - store info required for DPLL control diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 3cfa071e371858..c0b3e70a7ea30a 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf) ice_eswitch_start_all_tx_queues(pf); } -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +static int +ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id) { - struct devlink *devlink = priv_to_devlink(pf); - struct ice_repr *repr; int err; if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) @@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_stop_reprs(pf); - devl_lock(devlink); - repr = ice_repr_add_vf(vf); - devl_unlock(devlink); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); + err = repr->ops.add(repr); + if (err) goto err_create_repr; - } err = ice_eswitch_setup_repr(pf, repr); if (err) @@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) if (err) goto err_xa_alloc; - vf->repr_id = repr->id; + *id = repr->id; ice_eswitch_start_reprs(pf); @@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err_xa_alloc: ice_eswitch_release_repr(pf, repr); err_setup_repr: - devl_lock(devlink); - ice_repr_rem_vf(repr); - devl_unlock(devlink); + repr->ops.rem(repr); err_create_repr: if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); @@ -506,14 +498,59 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) return err; } -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +/** + * ice_eswitch_attach_vf - attach VF to a eswitch + * @pf: pointer to PF structure + * @vf: pointer to VF structure to be attached + * + * During attaching port representor for VF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct ice_repr *repr = ice_repr_create_vf(vf); struct devlink *devlink = priv_to_devlink(pf); + int err; - if (!repr) - return; + if (IS_ERR(repr)) + return PTR_ERR(repr); + + devl_lock(devlink); + err = ice_eswitch_attach(pf, repr, &vf->repr_id); + if (err) + ice_repr_destroy(repr); + devl_unlock(devlink); + + return err; +} + +/** + * ice_eswitch_attach_sf - attach SF to a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be attached + * + * During attaching port representor for SF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = ice_repr_create_sf(sf); + int err; + if (IS_ERR(repr)) + return PTR_ERR(repr); + + err = ice_eswitch_attach(pf, repr, &sf->repr_id); + if (err) + ice_repr_destroy(repr); + + return err; +} + +static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) +{ ice_eswitch_stop_reprs(pf); xa_erase(&pf->eswitch.reprs, repr->id); @@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); - devl_lock(devlink); - ice_repr_rem_vf(repr); + repr->ops.rem(repr); + ice_repr_destroy(repr); if (xa_empty(&pf->eswitch.reprs)) { + struct devlink *devlink = priv_to_devlink(pf); + /* since all port representors are destroyed, there is * no point in keeping the nodes */ @@ -533,9 +572,41 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) } else { ice_eswitch_start_reprs(pf); } +} + +/** + * ice_eswitch_detach_vf - detach VF from a eswitch + * @pf: pointer to PF structure + * @vf: pointer to VF structure to be detached + */ +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); + + if (!repr) + return; + + devl_lock(devlink); + ice_eswitch_detach(pf, repr); devl_unlock(devlink); } +/** + * ice_eswitch_detach_sf - detach SF from a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be detached + */ +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id); + + if (!repr) + return; + + ice_eswitch_detach(pf, repr); +} + /** * ice_eswitch_get_target - get netdev based on src_vsi from descriptor * @rx_ring: ring used to receive the packet diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 78fd39a6935d35..20ce32dda69cbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -5,11 +5,13 @@ #define _ICE_ESWITCH_H_ #include +#include "devlink/devlink_port.h" #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf); +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf); +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac); void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } +static inline void +ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline void +ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { } + +static inline int +ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} static inline int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index bc79ba974e4958..d5cc934d135949 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3792,8 +3792,6 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -4414,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); @@ -4426,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct ice_repr *repr = ice_netdev_to_repr(netdev); /* for port representors only ETH_SS_STATS is supported */ - if (ice_check_vf_ready_for_cfg(repr->vf) || - stringset != ETH_SS_STATS) + if (repr->ops.ready(repr) || stringset != ETH_SS_STATS) return; __ice_get_strings(netdev, stringset, data, repr->src_vsi); @@ -4440,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); @@ -4725,6 +4722,7 @@ static const struct ethtool_ops ice_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, .cap_rss_sym_xor_supported = true, + .rxfh_per_ctx_key = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_fec_stats = ice_get_fec_stats, diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 20d5db88c99fe5..ed95072ca6e316 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, } /** + * ice_disable_fd_swap - set register appropriately to disable FD SWAP + * @hw: pointer to the HW struct + * @prof_id: profile ID + */ +static void +ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id) +{ + u16 swap_val, fvw_num; + unsigned int i; + + swap_val = ICE_SWAP_VALID; + fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE; + + /* Since the SWAP Flag in the Programming Desc doesn't work, + * here add method to disable the SWAP Option via setting + * certain SWAP and INSET register sets. + */ + for (i = 0; i < fvw_num ; i++) { + u32 raw_swap, raw_in; + unsigned int j; + + raw_swap = 0; + raw_in = 0; + + for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) { + raw_swap |= (swap_val++) << (j * BITS_PER_BYTE); + raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE); + } + + /* write the FDIR swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n", + prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap); + + /* write the FDIR inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, i), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n", + prof_id, i, GLQF_FDINSET(prof_id, i), raw_in); + } +} + +/* * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block @@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence * @symm: symmetric setting for RSS profiles + * @fd_swap: enable/disable FDIR paired src/dst fields swap option * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated @@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks, bool symm) + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; - if (blk == ICE_BLK_FD) { + if (blk == ICE_BLK_FD && fd_swap) { /* For Flow Director block, the extraction sequence may * need to be altered in the case where there are paired * fields that have no match. This is necessary because @@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_update_fd_swap(hw, prof_id, es); if (status) goto err_ice_add_prof; + } else if (blk == ICE_BLK_FD) { + ice_disable_fd_swap(hw, prof_id); } status = ice_update_prof_masking(hw, blk, prof_id, masks); if (status) @@ -4098,6 +4145,54 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) return status; } +/** + * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI + * @hw: pointer to the HW struct + * @blk: HW block + * @dest_vsi: dest VSI + * @fdir_vsi: fdir programming VSI + * @hdl: profile handle + * + * Update the hardware tables to enable the FDIR profile indicated by @hdl for + * the VSI specified by @dest_vsi. On success, the flow will be enabled. + * + * Return: 0 on success or negative errno on failure. + */ +int +ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi, u16 fdir_vsi, u64 hdl) +{ + u16 vsi_num; + int status; + + if (blk != ICE_BLK_FD) + return -EINVAL; + + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n", + status); + return status; + } + + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n", + status); + goto err; + } + + return 0; + +err: + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); + ice_rem_prof_id_flow(hw, blk, vsi_num, hdl); + + return status; +} + /** * ice_rem_prof_from_list - remove a profile from list * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index b39d7cdc381fa2..90b9b09931221d 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -6,6 +6,8 @@ #include "ice_type.h" +#define ICE_FDIR_REG_SET_SIZE 4 + int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); @@ -42,13 +44,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks, bool symm); + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); +int +ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi, u16 fdir_vsi, u64 hdl); enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index fc2b58f56279d2..d97b751052f221 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = { }; /* Packet types for GTPU */ +static const struct ice_ptype_attributes ice_attr_gtpu_session[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, +}; + static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, @@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask, symm); + params->mask, symm, true); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, return status; } +#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13) +#define FLAG_GTP_EH_PDU BIT_ULL(14) + +#define HI_BYTE_IN_WORD GENMASK(15, 8) +#define LO_BYTE_IN_WORD GENMASK(7, 0) + +#define FLAG_GTPU_MSK \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_UP \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_DW FLAG_GTP_EH_PDU + +/** + * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info + * @hw: pointer to the HW struct + * @dest_vsi: dest VSI + * @fdir_vsi: fdir programming VSI + * @prof: stores parsed profile info from raw flow + * @blk: classification blk + * + * Return: 0 on success or negative errno on failure. + */ +int +ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, + struct ice_parser_profile *prof, enum ice_block blk) +{ + u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + struct ice_flow_prof_params *params __free(kfree); + u8 fv_words = hw->blk[blk].es.fvw; + int status; + int i, idx; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + params->es[i].prot_id = ICE_PROT_INVALID; + params->es[i].off = ICE_FV_OFFSET_INVAL; + } + + for (i = 0; i < prof->fv_num; i++) { + if (hw->blk[blk].es.reverse) + idx = fv_words - i - 1; + else + idx = i; + params->es[idx].prot_id = prof->fv[i].proto_id; + params->es[idx].off = prof->fv[i].offset; + params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) & + HI_BYTE_IN_WORD) | + (((prof->fv[i].msk) >> BITS_PER_BYTE) & + LO_BYTE_IN_WORD); + } + + switch (prof->flags) { + case FLAG_GTPU_DW: + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + break; + case FLAG_GTPU_UP: + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + break; + default: + if (prof->flags_msk & FLAG_GTPU_MSK) { + params->attr = ice_attr_gtpu_session; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session); + } + break; + } + + status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes, + params->attr, params->attr_cnt, + params->es, params->mask, false, false); + if (status) + return status; + + status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id); + if (status) + ice_rem_prof(hw, blk, id); + + return status; +} + /** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 2fd2e0cb483d6d..6cb7bb879c98ea 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -5,6 +5,7 @@ #define _ICE_FLOW_H_ #include "ice_flex_type.h" +#include "ice_parser.h" #define ICE_FLOW_ENTRY_HANDLE_INVAL 0 #define ICE_FLOW_FLD_OFF_INVAL 0xffff @@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type { ICE_RSS_ANY_HEADERS }; +struct ice_vsi; struct ice_rss_hash_cfg { u32 addl_hdrs; /* protocol header fields */ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ @@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int +ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, + struct ice_parser_profile *prof, enum ice_block blk); +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index f81db6c107c8ef..2702a0da5c3e46 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2019, Intel Corporation. */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 2405e5ed912866..06e712cdc3d9ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -7,6 +7,7 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_dcb_lib.h" +#include "ice_type.h" #include "ice_vsi_vlan_ops.h" /** @@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_PF"; case ICE_VSI_VF: return "ICE_VSI_VF"; + case ICE_VSI_SF: + return "ICE_VSI_SF"; case ICE_VSI_CTRL: return "ICE_VSI_CTRL"; case ICE_VSI_CHNL: @@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SF: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; + case ICE_VSI_SF: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + vsi->num_q_vectors = 1; + vsi->irq_dyn_alloc = true; + break; case ICE_VSI_VF: if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; @@ -423,7 +433,7 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI */ -static void ice_vsi_free(struct ice_vsi *vsi) +void ice_vsi_free(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; struct device *dev; @@ -559,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SF: /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; @@ -595,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; @@ -889,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) max_rss_size); vsi->rss_lut_type = ICE_LUT_PF; break; + case ICE_VSI_SF: + vsi->rss_table_size = ICE_LUT_VSI_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); + vsi->rss_lut_type = ICE_LUT_VSI; + break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -1136,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; break; case ICE_VSI_VF: + case ICE_VSI_SF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; break; @@ -1214,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_SF: case ICE_VSI_CHNL: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; @@ -2095,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: + case ICE_VSI_SF: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2264,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_SF: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2641,7 +2661,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); @@ -2669,7 +2690,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); @@ -2739,6 +2761,26 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL); } +/** + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered + * + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) + */ +void ice_napi_add(struct ice_vsi *vsi) +{ + int v_idx; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, v_idx) + netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, + ice_napi_poll); +} + /** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 36d86535695dd5..1a6cfc8693ce47 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -45,6 +45,7 @@ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); void ice_vsi_set_napi_queues(struct ice_vsi *vsi); +void ice_napi_add(struct ice_vsi *vsi); void ice_vsi_clear_napi_queues(struct ice_vsi *vsi); @@ -59,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); int ice_vsi_cfg(struct ice_vsi *vsi); +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf); +void ice_vsi_free(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ea780d468579fa..eeb48cc48e089f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -15,6 +15,7 @@ #include "ice_dcb_nl.h" #include "devlink/devlink.h" #include "devlink/devlink_port.h" +#include "ice_sf_eth.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the @@ -2974,6 +2975,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) if (avail < cpus / 2) return -ENOMEM; + if (vsi->type == ICE_VSI_SF) + avail = vsi->alloc_txq; + vsi->num_xdp_txq = min_t(u16, avail, cpus); if (vsi->num_xdp_txq < cpus) @@ -3089,14 +3093,14 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev, * @dev: netdevice * @xdp: XDP command */ -static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; int ret; - if (vsi->type != ICE_VSI_PF) { - NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); return -EINVAL; } @@ -3555,26 +3559,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) return 0; } -/** - * ice_napi_add - register NAPI handler for the VSI - * @vsi: VSI for which NAPI handler is to be registered - * - * This function is only called in the driver's load path. Registering the NAPI - * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, - * reset/rebuild, etc.) - */ -static void ice_napi_add(struct ice_vsi *vsi) -{ - int v_idx; - - if (!vsi->netdev) - return; - - ice_for_each_q_vector(vsi, v_idx) - netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, - ice_napi_poll); -} - /** * ice_set_ops - set netdev and ethtools ops for the given netdev * @vsi: the VSI associated with the new netdev @@ -3608,7 +3592,7 @@ static void ice_set_ops(struct ice_vsi *vsi) * ice_set_netdev_features - set features for the given netdev * @netdev: netdev instance */ -static void ice_set_netdev_features(struct net_device *netdev) +void ice_set_netdev_features(struct net_device *netdev) { struct ice_pf *pf = ice_netdev_to_pf(netdev); bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); @@ -3790,8 +3774,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) * * net_device_ops implementation for adding VLAN IDs */ -static int -ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -3853,8 +3836,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) * * net_device_ops implementation for removing VLAN IDs */ -static int -ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -4023,6 +4005,9 @@ static void ice_deinit_pf(struct ice_pf *pf) if (pf->ptp.clock) ptp_clock_unregister(pf->ptp.clock); + + xa_destroy(&pf->dyn_ports); + xa_destroy(&pf->sf_nums); } /** @@ -4116,6 +4101,9 @@ static int ice_init_pf(struct ice_pf *pf) hash_init(pf->vfs.table); ice_mbx_init_snapshot(&pf->hw); + xa_init(&pf->dyn_ports); + xa_init(&pf->sf_nums); + return 0; } @@ -5457,6 +5445,7 @@ static void ice_remove(struct pci_dev *pdev) ice_remove_arfs(pf); devl_lock(priv_to_devlink(pf)); + ice_dealloc_all_dynamic_ports(pf); ice_deinit_devlink(pf); ice_unload(pf); @@ -5944,8 +5933,16 @@ static int __init ice_module_init(void) goto err_dest_lag_wq; } + status = ice_sf_driver_register(); + if (status) { + pr_err("Failed to register SF driver, err %d\n", status); + goto err_sf_driver; + } + return 0; +err_sf_driver: + pci_unregister_driver(&ice_driver); err_dest_lag_wq: destroy_workqueue(ice_lag_wq); ice_debugfs_exit(); @@ -5963,6 +5960,7 @@ module_init(ice_module_init); */ static void __exit ice_module_exit(void) { + ice_sf_driver_unregister(); pci_unregister_driver(&ice_driver); ice_debugfs_exit(); destroy_workqueue(ice_wq); @@ -6764,7 +6762,8 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev && vsi->type == ICE_VSI_PF) { + ((vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)))) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); @@ -7122,7 +7121,6 @@ void ice_update_pf_stats(struct ice_pf *pf) * @netdev: network interface device structure * @stats: main device statistics structure */ -static void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -7463,7 +7461,7 @@ int ice_vsi_open(struct ice_vsi *vsi) ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); - if (vsi->type == ICE_VSI_PF) { + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); if (err) @@ -7799,7 +7797,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) * * Returns 0 on success, negative on failure */ -static int ice_change_mtu(struct net_device *netdev, int new_mtu) +int ice_change_mtu(struct net_device *netdev, int new_mtu) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -8223,7 +8221,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, * @netdev: network interface device structure * @txqueue: Tx queue */ -static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tx_ring *tx_ring = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index a2562f04267f23..b9f383494b3fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifndef CONFIG_64BIT #include @@ -23,6 +24,9 @@ #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \ + read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr) + #define ice_flush(a) rd32((a), GLGEN_STAT) #define ICE_M(m, s) ((m ## U) << (s)) @@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw); #define ice_debug(hw, type, fmt, args...) \ dev_dbg(ice_hw_to_dev(hw), fmt, ##args) -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ - print_hex_dump_debug(KBUILD_MODNAME " ", \ - DUMP_PREFIX_OFFSET, rowsize, \ - groupsize, buf, len, false) -#else +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \ + rowsize, groupsize, buf, len, false) +#else /* CONFIG_DYNAMIC_DEBUG */ #define ice_debug(hw, type, fmt, args...) \ do { \ if ((type) & (hw)->debug_mask) \ @@ -51,16 +54,15 @@ do { \ } while (0) #ifdef DEBUG -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ do { \ if ((type) & (hw)->debug_mask) \ - print_hex_dump_debug(KBUILD_MODNAME, \ - DUMP_PREFIX_OFFSET, \ + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\ rowsize, groupsize, buf, \ len, false); \ } while (0) -#else -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +#else /* DEBUG */ +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ do { \ struct ice_hw *hw_l = hw; \ if ((type) & (hw_l)->debug_mask) { \ @@ -78,4 +80,10 @@ do { \ #endif /* DEBUG */ #endif /* CONFIG_DYNAMIC_DEBUG */ +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ + _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len) + +#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \ + _ice_debug_array(hw, type, prefix, 16, 1, buf, len) + #endif /* _ICE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c new file mode 100644 index 00000000000000..664beb64f5570d --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser.c @@ -0,0 +1,2430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Intel Corporation */ + +#include "ice_common.h" + +struct ice_pkg_sect_hdr { + __le16 count; + __le16 offset; +}; + +/** + * ice_parser_sect_item_get - parse an item from a section + * @sect_type: section type + * @section: section object + * @index: index of the item to get + * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter + * + * Return: a pointer to the item or NULL. + */ +static void *ice_parser_sect_item_get(u32 sect_type, void *section, + u32 index, u32 __maybe_unused *offset) +{ + size_t data_off = ICE_SEC_DATA_OFFSET; + struct ice_pkg_sect_hdr *hdr; + size_t size; + + if (!section) + return NULL; + + switch (sect_type) { + case ICE_SID_RXPARSER_IMEM: + size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_METADATA_INIT: + size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_CAM: + size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PG_SPILL: + size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_CAM: + size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_SPILL: + size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_BOOST_TCAM: + size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE; + break; + case ICE_SID_LBL_RXPARSER_TMEM: + data_off = ICE_SEC_LBL_DATA_OFFSET; + size = ICE_SID_LBL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_PTYPE: + size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_GRP: + size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PROTO_GRP: + size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_FLAG_REDIR: + size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE; + break; + default: + return NULL; + } + + hdr = section; + if (index >= le16_to_cpu(hdr->count)) + return NULL; + + return section + data_off + index * size; +} + +/** + * ice_parser_create_table - create an item table from a section + * @hw: pointer to the hardware structure + * @sect_type: section type + * @item_size: item size in bytes + * @length: number of items in the table to create + * @parse_item: the function to parse the item + * @no_offset: ignore header offset, calculate index from 0 + * + * Return: a pointer to the allocated table or ERR_PTR. + */ +static void * +ice_parser_create_table(struct ice_hw *hw, u32 sect_type, + u32 item_size, u32 length, + void (*parse_item)(struct ice_hw *hw, u16 idx, + void *item, void *data, + int size), bool no_offset) +{ + struct ice_pkg_enum state = {}; + struct ice_seg *seg = hw->seg; + void *table, *data, *item; + u16 idx = 0; + + if (!seg) + return ERR_PTR(-EINVAL); + + table = kzalloc(item_size * length, GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + do { + data = ice_pkg_enum_entry(seg, &state, sect_type, NULL, + ice_parser_sect_item_get); + seg = NULL; + if (data) { + struct ice_pkg_sect_hdr *hdr = state.sect; + + if (!no_offset) + idx = le16_to_cpu(hdr->offset) + + state.entry_idx; + + item = (void *)((uintptr_t)table + idx * item_size); + parse_item(hw, idx, item, data, item_size); + + if (no_offset) + idx++; + } + } while (data); + + return table; +} + +/*** ICE_SID_RXPARSER_IMEM section ***/ +static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "boost main:\n"); + dev_info(dev, "\talu0 = %d\n", bm->alu0); + dev_info(dev, "\talu1 = %d\n", bm->alu1); + dev_info(dev, "\talu2 = %d\n", bm->alu2); + dev_info(dev, "\tpg = %d\n", bm->pg); +} + +static void ice_imem_bst_kb_dump(struct ice_hw *hw, + struct ice_bst_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "boost key builder:\n"); + dev_info(dev, "\tpriority = %d\n", kb->prio); + dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl); +} + +static void ice_imem_np_kb_dump(struct ice_hw *hw, + struct ice_np_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "next proto key builder:\n"); + dev_info(dev, "\topc = %d\n", kb->opc); + dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0); + dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1); +} + +static void ice_imem_pg_kb_dump(struct ice_hw *hw, + struct ice_pg_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "parse graph key builder:\n"); + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void ice_imem_alu_dump(struct ice_hw *hw, + struct ice_alu *alu, int index) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "alu%d:\n", index); + dev_info(dev, "\topc = %d\n", alu->opc); + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(dev, "\tinc0 = %d\n", alu->inc0); + dev_info(dev, "\tinc1 = %d\n", alu->inc1); + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(dev, "\timm = %d\n", alu->imm); + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); +} + +/** + * ice_imem_dump - dump an imem item info + * @hw: pointer to the hardware structure + * @item: imem item to dump + */ +static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + ice_imem_bst_bm_dump(hw, &item->b_m); + ice_imem_bst_kb_dump(hw, &item->b_kb); + dev_info(dev, "pg priority = %d\n", item->pg_prio); + ice_imem_np_kb_dump(hw, &item->np_kb); + ice_imem_pg_kb_dump(hw, &item->pg_kb); + ice_imem_alu_dump(hw, &item->alu0, 0); + ice_imem_alu_dump(hw, &item->alu1, 1); + ice_imem_alu_dump(hw, &item->alu2, 2); +} + +#define ICE_IM_BM_ALU0 BIT(0) +#define ICE_IM_BM_ALU1 BIT(1) +#define ICE_IM_BM_ALU2 BIT(2) +#define ICE_IM_BM_PG BIT(3) + +/** + * ice_imem_bm_init - parse 4 bits of Boost Main + * @bm: pointer to the Boost Main structure + * @data: Boost Main data to be parsed + */ +static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data) +{ + bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data); + bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data); + bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data); + bm->pg = FIELD_GET(ICE_IM_BM_PG, data); +} + +#define ICE_IM_BKB_PRIO GENMASK(7, 0) +#define ICE_IM_BKB_TSR_CTRL BIT(8) + +/** + * ice_imem_bkb_init - parse 10 bits of Boost Main Build + * @bkb: pointer to the Boost Main Build structure + * @data: Boost Main Build data to be parsed + */ +static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data) +{ + bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data); + bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data); +} + +#define ICE_IM_NPKB_OPC GENMASK(1, 0) +#define ICE_IM_NPKB_S_R0 GENMASK(9, 2) +#define ICE_IM_NPKB_L_R1 GENMASK(17, 10) + +/** + * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build + * @kb: pointer to the Next Protocol Key Build structure + * @data: Next Protocol Key Build data to be parsed + */ +static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data); + kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data); + kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data); +} + +#define ICE_IM_PGKB_F0_ENA BIT_ULL(0) +#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1) +#define ICE_IM_PGKB_F1_ENA BIT_ULL(7) +#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8) +#define ICE_IM_PGKB_F2_ENA BIT_ULL(14) +#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15) +#define ICE_IM_PGKB_F3_ENA BIT_ULL(21) +#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22) +#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28) + +/** + * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build + * @kb: pointer to the Parse Graph Key Build structure + * @data: Parse Graph Key Build data to be parsed + */ +static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data); + kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data); + kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data); + kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data); + kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data); + kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data); + kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data); + kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data); + kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data); +} + +#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0) +#define ICE_IM_ALU_SS GENMASK_ULL(13, 6) +#define ICE_IM_ALU_SL GENMASK_ULL(18, 14) +#define ICE_IM_ALU_SXS BIT_ULL(19) +#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20) +#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24) +#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31) +#define ICE_IM_ALU_INC0 BIT_ULL(38) +#define ICE_IM_ALU_INC1 BIT_ULL(39) +#define ICE_IM_ALU_POO GENMASK_ULL(41, 40) +#define ICE_IM_ALU_PO GENMASK_ULL(49, 42) +#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ +#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \ + 50 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \ + 58 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \ + 75 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \ + 81 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \ + 88 - ICE_IM_ALU_BA_S) + +/** + * ice_imem_alu_init - parse 96 bits of ALU entry + * @alu: pointer to the ALU entry structure + * @data: ALU entry data to be parsed + * @off: offset of the ALU entry data + */ +static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off) +{ + u64 d64; + u8 idd; + + d64 = *((u64 *)data) >> off; + + alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64); + alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64); + alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64); + alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64); + alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64); + alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64); + alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64); + alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64); + alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64); + alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64); + alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64); + + idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE; + off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE; + d64 = *((u64 *)(&data[idd])) >> off; + + alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64); + alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64); + alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64); + alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64); + alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64); + alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64); + alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64); +} + +#define ICE_IMEM_BM_S 0 +#define ICE_IMEM_BKB_S 4 +#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE) +#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE) +#define ICE_IMEM_PGP GENMASK(15, 14) +#define ICE_IMEM_NPKB_S 16 +#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE) +#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE) +#define ICE_IMEM_PGKB_S 34 +#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE) +#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU0_S 69 +#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU1_S 165 +#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU2_S 357 +#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE) + +/** + * ice_imem_parse_item - parse 384 bits of IMEM entry + * @hw: pointer to the hardware structure + * @idx: index of IMEM entry + * @item: item of IMEM entry + * @data: IMEM entry data to be parsed + * @size: size of IMEM entry + */ +static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_imem_item *ii = item; + u8 *buf = data; + + ii->idx = idx; + + ice_imem_bm_init(&ii->b_m, *(u8 *)buf); + ice_imem_bkb_init(&ii->b_kb, + *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >> + ICE_IMEM_BKB_OFF); + + ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf); + + ice_imem_npkb_init(&ii->np_kb, + *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >> + ICE_IMEM_NPKB_OFF); + ice_imem_pgkb_init(&ii->pg_kb, + *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >> + ICE_IMEM_PGKB_OFF); + + ice_imem_alu_init(&ii->alu0, + &buf[ICE_IMEM_ALU0_IDD], + ICE_IMEM_ALU0_OFF); + ice_imem_alu_init(&ii->alu1, + &buf[ICE_IMEM_ALU1_IDD], + ICE_IMEM_ALU1_OFF); + ice_imem_alu_init(&ii->alu2, + &buf[ICE_IMEM_ALU2_IDD], + ICE_IMEM_ALU2_OFF); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_imem_dump(hw, ii); +} + +/** + * ice_imem_table_get - create an imem table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated IMEM table. + */ +static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM, + sizeof(struct ice_imem_item), + ICE_IMEM_TABLE_SIZE, + ice_imem_parse_item, false); +} + +/*** ICE_SID_RXPARSER_METADATA_INIT section ***/ +/** + * ice_metainit_dump - dump an metainit item info + * @hw: pointer to the hardware structure + * @item: metainit item to dump + */ +static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + + dev_info(dev, "tsr = %d\n", item->tsr); + dev_info(dev, "ho = %d\n", item->ho); + dev_info(dev, "pc = %d\n", item->pc); + dev_info(dev, "pg_rn = %d\n", item->pg_rn); + dev_info(dev, "cd = %d\n", item->cd); + + dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl); + dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid); + dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start); + dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len); + dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id); + + dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl); + dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid); + dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start); + dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len); + dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id); + + dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl); + dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid); + dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start); + dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len); + dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id); + + dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl); + dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid); + dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start); + dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len); + dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id); + + dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags)); +} + +#define ICE_MI_TSR GENMASK_ULL(7, 0) +#define ICE_MI_HO GENMASK_ULL(16, 8) +#define ICE_MI_PC GENMASK_ULL(24, 17) +#define ICE_MI_PGRN GENMASK_ULL(35, 25) +#define ICE_MI_CD GENMASK_ULL(38, 36) +#define ICE_MI_GAC BIT_ULL(39) +#define ICE_MI_GADM GENMASK_ULL(44, 40) +#define ICE_MI_GADS GENMASK_ULL(48, 45) +#define ICE_MI_GADL GENMASK_ULL(53, 49) +#define ICE_MI_GAI GENMASK_ULL(59, 56) +#define ICE_MI_GBC BIT_ULL(60) +#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */ +#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE) +#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE) + +#define ICE_MI_GBDM_GENMASK_ULL(high, low) \ + GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S) +#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61) +#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66) +#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70) +#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77) +#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S) +#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82) +#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87) +#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91) +#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98) +#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S) +#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103) +#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108) +#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112) +#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119) +#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */ +#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE) +#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE) +#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \ + 123 - ICE_MI_FLAG_S) + +/** + * ice_metainit_parse_item - parse 192 bits of Metadata Init entry + * @hw: pointer to the hardware structure + * @idx: index of Metadata Init entry + * @item: item of Metadata Init entry + * @data: Metadata Init entry data to be parsed + * @size: size of Metadata Init entry + */ +static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_metainit_item *mi = item; + u8 *buf = data; + u64 d64; + + mi->idx = idx; + + d64 = *(u64 *)buf; + + mi->tsr = FIELD_GET(ICE_MI_TSR, d64); + mi->ho = FIELD_GET(ICE_MI_HO, d64); + mi->pc = FIELD_GET(ICE_MI_PC, d64); + mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64); + mi->cd = FIELD_GET(ICE_MI_CD, d64); + + mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64); + mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64); + mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64); + mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64); + mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64); + + mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64); + + d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF; + + mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64); + mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64); + mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64); + mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64); + + mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64); + mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64); + mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64); + mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64); + mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64); + + mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64); + mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64); + mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64); + mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64); + mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64); + + d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF; + + mi->flags = FIELD_GET(ICE_MI_FLAG, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_metainit_dump(hw, mi); +} + +/** + * ice_metainit_table_get - create a metainit table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Metadata initialization table. + */ +static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT, + sizeof(struct ice_metainit_item), + ICE_METAINIT_TABLE_SIZE, + ice_metainit_parse_item, false); +} + +/** + * ice_bst_tcam_search - find a TCAM item with specific type + * @tcam_table: the TCAM table + * @lbl_table: the lbl table to search + * @type: the type we need to match against + * @start: start searching from this index + * + * Return: a pointer to the matching BOOST TCAM item or NULL. + */ +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + enum ice_lbl_type type, u16 *start) +{ + u16 i = *start; + + for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + if (lbl_table[i].type == type) { + *start = i; + return &tcam_table[lbl_table[i].idx]; + } + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM + * sections ***/ +static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "key:\n"); + dev_info(dev, "\tvalid = %d\n", key->valid); + dev_info(dev, "\tnode_id = %d\n", key->node_id); + dev_info(dev, "\tflag0 = %d\n", key->flag0); + dev_info(dev, "\tflag1 = %d\n", key->flag1); + dev_info(dev, "\tflag2 = %d\n", key->flag2); + dev_info(dev, "\tflag3 = %d\n", key->flag3); + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); + dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto); +} + +static void ice_pg_nm_cam_key_dump(struct ice_hw *hw, + struct ice_pg_nm_cam_key *key) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "key:\n"); + dev_info(dev, "\tvalid = %d\n", key->valid); + dev_info(dev, "\tnode_id = %d\n", key->node_id); + dev_info(dev, "\tflag0 = %d\n", key->flag0); + dev_info(dev, "\tflag1 = %d\n", key->flag1); + dev_info(dev, "\tflag2 = %d\n", key->flag2); + dev_info(dev, "\tflag3 = %d\n", key->flag3); + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); +} + +static void ice_pg_cam_action_dump(struct ice_hw *hw, + struct ice_pg_cam_action *action) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "action:\n"); + dev_info(dev, "\tnext_node = %d\n", action->next_node); + dev_info(dev, "\tnext_pc = %d\n", action->next_pc); + dev_info(dev, "\tis_pg = %d\n", action->is_pg); + dev_info(dev, "\tproto_id = %d\n", action->proto_id); + dev_info(dev, "\tis_mg = %d\n", action->is_mg); + dev_info(dev, "\tmarker_id = %d\n", action->marker_id); + dev_info(dev, "\tis_last_round = %d\n", action->is_last_round); + dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity); + dev_info(dev, "\tho_inc = %d\n", action->ho_inc); +} + +/** + * ice_pg_cam_dump - dump an parse graph cam info + * @hw: pointer to the hardware structure + * @item: parse graph cam to dump + */ +static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + ice_pg_cam_key_dump(hw, &item->key); + ice_pg_cam_action_dump(hw, &item->action); +} + +/** + * ice_pg_nm_cam_dump - dump an parse graph no match cam info + * @hw: pointer to the hardware structure + * @item: parse graph no match cam to dump + */ +static void ice_pg_nm_cam_dump(struct ice_hw *hw, + struct ice_pg_nm_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + ice_pg_nm_cam_key_dump(hw, &item->key); + ice_pg_cam_action_dump(hw, &item->action); +} + +#define ICE_PGCA_NN GENMASK_ULL(10, 0) +#define ICE_PGCA_NPC GENMASK_ULL(18, 11) +#define ICE_PGCA_IPG BIT_ULL(19) +#define ICE_PGCA_PID GENMASK_ULL(30, 23) +#define ICE_PGCA_IMG BIT_ULL(31) +#define ICE_PGCA_MID GENMASK_ULL(39, 32) +#define ICE_PGCA_ILR BIT_ULL(40) +#define ICE_PGCA_HOP BIT_ULL(41) +#define ICE_PGCA_HOI GENMASK_ULL(50, 42) + +/** + * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action + * @action: pointer to the Parse Graph CAM Action structure + * @data: Parse Graph CAM Action data to be parsed + */ +static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data) +{ + action->next_node = FIELD_GET(ICE_PGCA_NN, data); + action->next_pc = FIELD_GET(ICE_PGCA_NPC, data); + action->is_pg = FIELD_GET(ICE_PGCA_IPG, data); + action->proto_id = FIELD_GET(ICE_PGCA_PID, data); + action->is_mg = FIELD_GET(ICE_PGCA_IMG, data); + action->marker_id = FIELD_GET(ICE_PGCA_MID, data); + action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data); + action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data); + action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data); +} + +#define ICE_PGNCK_VLD BIT_ULL(0) +#define ICE_PGNCK_NID GENMASK_ULL(11, 1) +#define ICE_PGNCK_F0 BIT_ULL(12) +#define ICE_PGNCK_F1 BIT_ULL(13) +#define ICE_PGNCK_F2 BIT_ULL(14) +#define ICE_PGNCK_F3 BIT_ULL(15) +#define ICE_PGNCK_BH BIT_ULL(16) +#define ICE_PGNCK_BI GENMASK_ULL(24, 17) +#define ICE_PGNCK_AR GENMASK_ULL(40, 25) + +/** + * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key + * @key: pointer to the Parse Graph NoMatch CAM Key structure + * @data: Parse Graph NoMatch CAM Key data to be parsed + */ +static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data) +{ + key->valid = FIELD_GET(ICE_PGNCK_VLD, data); + key->node_id = FIELD_GET(ICE_PGNCK_NID, data); + key->flag0 = FIELD_GET(ICE_PGNCK_F0, data); + key->flag1 = FIELD_GET(ICE_PGNCK_F1, data); + key->flag2 = FIELD_GET(ICE_PGNCK_F2, data); + key->flag3 = FIELD_GET(ICE_PGNCK_F3, data); + + if (FIELD_GET(ICE_PGNCK_BH, data)) + key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data); + else + key->boost_idx = 0; + + key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data); +} + +#define ICE_PGCK_VLD BIT_ULL(0) +#define ICE_PGCK_NID GENMASK_ULL(11, 1) +#define ICE_PGCK_F0 BIT_ULL(12) +#define ICE_PGCK_F1 BIT_ULL(13) +#define ICE_PGCK_F2 BIT_ULL(14) +#define ICE_PGCK_F3 BIT_ULL(15) +#define ICE_PGCK_BH BIT_ULL(16) +#define ICE_PGCK_BI GENMASK_ULL(24, 17) +#define ICE_PGCK_AR GENMASK_ULL(40, 25) +#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */ +#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE) +#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE) +#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \ + 41 - ICE_PGCK_NPK_S) + +/** + * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key + * @key: pointer to the Parse Graph CAM Key structure + * @data: Parse Graph CAM Key data to be parsed + */ +static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data) +{ + u64 d64 = *(u64 *)data; + + key->valid = FIELD_GET(ICE_PGCK_VLD, d64); + key->node_id = FIELD_GET(ICE_PGCK_NID, d64); + key->flag0 = FIELD_GET(ICE_PGCK_F0, d64); + key->flag1 = FIELD_GET(ICE_PGCK_F1, d64); + key->flag2 = FIELD_GET(ICE_PGCK_F2, d64); + key->flag3 = FIELD_GET(ICE_PGCK_F3, d64); + + if (FIELD_GET(ICE_PGCK_BH, d64)) + key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64); + else + key->boost_idx = 0; + + key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64); + + d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF; + + key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64); +} + +#define ICE_PG_CAM_ACT_S 73 +#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph CAM Entry + * @item: item of Parse Graph CAM Entry + * @data: Parse Graph CAM Entry data to be parsed + * @size: size of Parse Graph CAM Entry + */ +static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + ice_pg_cam_key_init(&ci->key, buf); + + d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF; + ice_pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +#define ICE_PG_SP_CAM_KEY_S 56 +#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE) + +/** + * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph Spill CAM Entry + * @item: item of Parse Graph Spill CAM Entry + * @data: Parse Graph Spill CAM Entry data to be parsed + * @size: size of Parse Graph Spill CAM Entry + */ +static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_cam_action_init(&ci->action, d64); + + ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +#define ICE_PG_NM_CAM_ACT_S 41 +#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph NoMatch CAM Entry + * @item: item of Parse Graph NoMatch CAM Entry + * @data: Parse Graph NoMatch CAM Entry data to be parsed + * @size: size of Parse Graph NoMatch CAM Entry + */ +static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_nm_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_nm_cam_key_init(&ci->key, d64); + + d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF; + ice_pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +#define ICE_PG_NM_SP_CAM_ACT_S 56 +#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill + * CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph NoMatch Spill CAM Entry + * @item: item of Parse Graph NoMatch Spill CAM Entry + * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed + * @size: size of Parse Graph NoMatch Spill CAM Entry + */ +static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, + void *item, void *data, + int __maybe_unused size) +{ + struct ice_pg_nm_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_cam_action_init(&ci->action, d64); + + d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >> + ICE_PG_NM_SP_CAM_ACT_OFF; + ice_pg_nm_cam_key_init(&ci->key, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +/** + * ice_pg_cam_table_get - create a parse graph cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph CAM table. + */ +static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM, + sizeof(struct ice_pg_cam_item), + ICE_PG_CAM_TABLE_SIZE, + ice_pg_cam_parse_item, false); +} + +/** + * ice_pg_sp_cam_table_get - create a parse graph spill cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph Spill CAM table. + */ +static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL, + sizeof(struct ice_pg_cam_item), + ICE_PG_SP_CAM_TABLE_SIZE, + ice_pg_sp_cam_parse_item, false); +} + +/** + * ice_pg_nm_cam_table_get - create a parse graph no match cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph No Match CAM table. + */ +static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_CAM_TABLE_SIZE, + ice_pg_nm_cam_parse_item, false); +} + +/** + * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph No Match Spill CAM table. + */ +static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_SP_CAM_TABLE_SIZE, + ice_pg_nm_sp_cam_parse_item, false); +} + +static bool __ice_pg_cam_match(struct ice_pg_cam_item *item, + struct ice_pg_cam_key *key) +{ + return (item->key.valid && + !memcmp(&item->key.val, &key->val, sizeof(key->val))); +} + +static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item, + struct ice_pg_cam_key *key) +{ + return (item->key.valid && + !memcmp(&item->key.val, &key->val, sizeof(item->key.val))); +} + +/** + * ice_pg_cam_match - search parse graph cam table by key + * @table: parse graph cam table to search + * @size: cam table size + * @key: search key + * + * Return: a pointer to the matching PG CAM item or NULL. + */ +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_cam_item *item = &table[i]; + + if (__ice_pg_cam_match(item, key)) + return item; + } + + return NULL; +} + +/** + * ice_pg_nm_cam_match - search parse graph no match cam table by key + * @table: parse graph no match cam table to search + * @size: cam table size + * @key: search key + * + * Return: a pointer to the matching PG No Match CAM item or NULL. + */ +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_nm_cam_item *item = &table[i]; + + if (__ice_pg_nm_cam_match(item, key)) + return item; + } + + return NULL; +} + +/*** Ternary match ***/ +/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv + * Rules (per bit): + * Key == 0 and Key_inv == 0 : Never match (Don't care) + * Key == 0 and Key_inv == 1 : Match on bit == 1 + * Key == 1 and Key_inv == 0 : Match on bit == 0 + * Key == 1 and Key_inv == 1 : Always match (Don't care) + * + * Return: true if all bits match, false otherwise. + */ +static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat) +{ + u8 bit_key, bit_key_inv, bit_pat; + int i; + + for (i = 0; i < BITS_PER_BYTE; i++) { + bit_key = key & BIT(i); + bit_key_inv = key_inv & BIT(i); + bit_pat = pat & BIT(i); + + if (bit_key != 0 && bit_key_inv != 0) + continue; + + if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat) + return false; + } + + return true; +} + +static bool ice_ternary_match(const u8 *key, const u8 *key_inv, + const u8 *pat, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i])) + return false; + + return true; +} + +/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ +static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "next proto key builder:\n"); + dev_info(dev, "\topc = %d\n", kb->opc); + dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0); + dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1); +} + +static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "parse graph key builder:\n"); + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "alu%d:\n", idx); + dev_info(dev, "\topc = %d\n", alu->opc); + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(dev, "\tinc0 = %d\n", alu->inc0); + dev_info(dev, "\tinc1 = %d\n", alu->inc1); + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(dev, "\timm = %d\n", alu->imm); + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); +} + +/** + * ice_bst_tcam_dump - dump a boost tcam info + * @hw: pointer to the hardware structure + * @item: boost tcam to dump + */ +static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "addr = %d\n", item->addr); + + dev_info(dev, "key : "); + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "key_inv: "); + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key_inv[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp); + dev_info(dev, "pg_prio = %d\n", item->pg_prio); + + ice_bst_np_kb_dump(hw, &item->np_kb); + ice_bst_pg_kb_dump(hw, &item->pg_kb); + + ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX); + ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX); + ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX); +} + +static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %u\n", item->idx); + dev_info(dev, "type = %u\n", item->type); + dev_info(dev, "label = %s\n", item->label); +} + +#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0) +#define ICE_BST_ALU_SS GENMASK_ULL(13, 6) +#define ICE_BST_ALU_SL GENMASK_ULL(18, 14) +#define ICE_BST_ALU_SXS BIT_ULL(19) +#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20) +#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24) +#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31) +#define ICE_BST_ALU_INC0 BIT_ULL(38) +#define ICE_BST_ALU_INC1 BIT_ULL(39) +#define ICE_BST_ALU_POO GENMASK_ULL(41, 40) +#define ICE_BST_ALU_PO GENMASK_ULL(49, 42) +#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ +#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \ + 50 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \ + 58 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \ + 75 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \ + 81 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \ + 88 - ICE_BST_ALU_BA_S) + +/** + * ice_bst_alu_init - parse 96 bits of ALU entry + * @alu: pointer to the ALU entry structure + * @data: ALU entry data to be parsed + * @off: offset of the ALU entry data + */ +static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off) +{ + u64 d64; + u8 idd; + + d64 = *((u64 *)data) >> off; + + alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64); + alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64); + alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64); + alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64); + alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64); + alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64); + alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64); + alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64); + alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64); + alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64); + alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64); + + idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE; + off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE; + d64 = *((u64 *)(&data[idd])) >> off; + + alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64); + alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64); + alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64); + alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64); + alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64); + alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64); + alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64); +} + +#define ICE_BST_PGKB_F0_ENA BIT_ULL(0) +#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1) +#define ICE_BST_PGKB_F1_ENA BIT_ULL(7) +#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8) +#define ICE_BST_PGKB_F2_ENA BIT_ULL(14) +#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15) +#define ICE_BST_PGKB_F3_ENA BIT_ULL(21) +#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22) +#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28) + +/** + * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build + * @kb: pointer to the Parse Graph Key Build structure + * @data: Parse Graph Key Build data to be parsed + */ +static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data); + kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data); + kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data); + kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data); + kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data); + kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data); + kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data); + kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data); + kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data); +} + +#define ICE_BST_NPKB_OPC GENMASK(1, 0) +#define ICE_BST_NPKB_S_R0 GENMASK(9, 2) +#define ICE_BST_NPKB_L_R1 GENMASK(17, 10) + +/** + * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build + * @kb: pointer to the Next Protocol Key Build structure + * @data: Next Protocol Key Build data to be parsed + */ +static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data); + kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data); + kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data); +} + +#define ICE_BT_KEY_S 32 +#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE) +#define ICE_BT_KIV_S 192 +#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE) +#define ICE_BT_HIG_S 352 +#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE) +#define ICE_BT_PGP_S 360 +#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE) +#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S) +#define ICE_BT_NPKB_S 362 +#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE) +#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE) +#define ICE_BT_PGKB_S 380 +#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE) +#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE) +#define ICE_BT_ALU0_S 415 +#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE) +#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE) +#define ICE_BT_ALU1_S 511 +#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE) +#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE) +#define ICE_BT_ALU2_S 607 +#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE) +#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE) + +/** + * ice_bst_parse_item - parse 704 bits of Boost TCAM entry + * @hw: pointer to the hardware structure + * @idx: index of Boost TCAM entry + * @item: item of Boost TCAM entry + * @data: Boost TCAM entry data to be parsed + * @size: size of Boost TCAM entry + */ +static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_bst_tcam_item *ti = item; + u8 *buf = (u8 *)data; + int i; + + ti->addr = *(u16 *)buf; + + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) { + ti->key[i] = buf[ICE_BT_KEY_IDD + i]; + ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i]; + } + ti->hit_idx_grp = buf[ICE_BT_HIG_IDD]; + ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M; + + ice_bst_npkb_init(&ti->np_kb, + *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >> + ICE_BT_NPKB_OFF); + ice_bst_pgkb_init(&ti->pg_kb, + *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >> + ICE_BT_PGKB_OFF); + + ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF); + ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF); + ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_bst_tcam_dump(hw, ti); +} + +/** + * ice_bst_tcam_table_get - create a boost tcam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Boost TCAM table. + */ +static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(struct ice_bst_tcam_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_bst_parse_item, true); +} + +static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_lbl_item *lbl_item = item; + struct ice_lbl_item *lbl_data = data; + + lbl_item->idx = lbl_data->idx; + memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label)); + + if (strstarts(lbl_item->label, ICE_LBL_BST_DVM)) + lbl_item->type = ICE_LBL_BST_TYPE_DVM; + else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM)) + lbl_item->type = ICE_LBL_BST_TYPE_SVM; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN)) + lbl_item->type = ICE_LBL_BST_TYPE_VXLAN; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE)) + lbl_item->type = ICE_LBL_BST_TYPE_GENEVE; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI)) + lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_lbl_dump(hw, lbl_item); +} + +/** + * ice_bst_lbl_table_get - create a boost label table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Boost label table. + */ +static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM, + sizeof(struct ice_lbl_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_parse_lbl_item, true); +} + +/** + * ice_bst_tcam_match - match a pattern on the boost tcam table + * @tcam_table: boost tcam table to search + * @pat: pattern to match + * + * Return: a pointer to the matching Boost TCAM item or NULL. + */ +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat) +{ + int i; + + for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + struct ice_bst_tcam_item *item = &tcam_table[i]; + + if (item->hit_idx_grp == 0) + continue; + if (ice_ternary_match(item->key, item->key_inv, pat, + ICE_BST_TCAM_KEY_SIZE)) + return item; + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ +/** + * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info + * @hw: pointer to the hardware structure + * @item: ptype marker tcam to dump + */ +static void ice_ptype_mk_tcam_dump(struct ice_hw *hw, + struct ice_ptype_mk_tcam_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "address = %d\n", item->address); + dev_info(dev, "ptype = %d\n", item->ptype); + + dev_info(dev, "key :"); + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "key_inv:"); + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key_inv[i]); + + dev_info(dev, "\n"); +} + +static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, + void *item, void *data, int size) +{ + memcpy(item, data, size); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_ptype_mk_tcam_dump(hw, + (struct ice_ptype_mk_tcam_item *)item); +} + +/** + * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Marker PType TCAM table. + */ +static +struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE, + sizeof(struct ice_ptype_mk_tcam_item), + ICE_PTYPE_MK_TCAM_TABLE_SIZE, + ice_parse_ptype_mk_tcam_item, true); +} + +/** + * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table + * @table: ptype marker tcam table to search + * @pat: pattern to match + * @len: length of the pattern + * + * Return: a pointer to the matching Marker PType item or NULL. + */ +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len) +{ + int i; + + for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) { + struct ice_ptype_mk_tcam_item *item = &table[i]; + + if (ice_ternary_match(item->key, item->key_inv, pat, len)) + return item; + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_MARKER_GRP section ***/ +/** + * ice_mk_grp_dump - dump an marker group item info + * @hw: pointer to the hardware structure + * @item: marker group item to dump + */ +static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "index = %d\n", item->idx); + + dev_info(dev, "markers: "); + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + dev_info(dev, "%d ", item->markers[i]); + + dev_info(dev, "\n"); +} + +static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_mk_grp_item *grp = item; + u8 *buf = data; + int i; + + grp->idx = idx; + + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + grp->markers[i] = buf[i]; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_mk_grp_dump(hw, grp); +} + +/** + * ice_mk_grp_table_get - create a marker group table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Marker Group ID table. + */ +static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP, + sizeof(struct ice_mk_grp_item), + ICE_MK_GRP_TABLE_SIZE, + ice_mk_grp_parse_item, false); +} + +/*** ICE_SID_RXPARSER_PROTO_GRP section ***/ +static void ice_proto_off_dump(struct ice_hw *hw, + struct ice_proto_off *po, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "proto %d\n", idx); + dev_info(dev, "\tpolarity = %d\n", po->polarity); + dev_info(dev, "\tproto_id = %d\n", po->proto_id); + dev_info(dev, "\toffset = %d\n", po->offset); +} + +/** + * ice_proto_grp_dump - dump a proto group item info + * @hw: pointer to the hardware structure + * @item: proto group item to dump + */ +static void ice_proto_grp_dump(struct ice_hw *hw, + struct ice_proto_grp_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) + ice_proto_off_dump(hw, &item->po[i], i); +} + +#define ICE_PO_POL BIT(0) +#define ICE_PO_PID GENMASK(8, 1) +#define ICE_PO_OFF GENMASK(21, 12) + +/** + * ice_proto_off_parse - parse 22 bits of Protocol entry + * @po: pointer to the Protocol entry structure + * @data: Protocol entry data to be parsed + */ +static void ice_proto_off_parse(struct ice_proto_off *po, u32 data) +{ + po->polarity = FIELD_GET(ICE_PO_POL, data); + po->proto_id = FIELD_GET(ICE_PO_PID, data); + po->offset = FIELD_GET(ICE_PO_OFF, data); +} + +/** + * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry + * @hw: pointer to the hardware structure + * @idx: index of Protocol Group Table entry + * @item: item of Protocol Group Table entry + * @data: Protocol Group Table entry data to be parsed + * @size: size of Protocol Group Table entry + */ +static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_proto_grp_item *grp = item; + u8 *buf = (u8 *)data; + u8 idd, off; + u32 d32; + int i; + + grp->idx = idx; + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { + idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE; + off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE; + d32 = *((u32 *)&buf[idd]) >> off; + ice_proto_off_parse(&grp->po[i], d32); + } + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_proto_grp_dump(hw, grp); +} + +/** + * ice_proto_grp_table_get - create a proto group table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Protocol Group table. + */ +static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP, + sizeof(struct ice_proto_grp_item), + ICE_PROTO_GRP_TABLE_SIZE, + ice_proto_grp_parse_item, false); +} + +/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ +/** + * ice_flg_rd_dump - dump a flag redirect item info + * @hw: pointer to the hardware structure + * @item: flag redirect item to dump + */ +static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + dev_info(dev, "expose = %d\n", item->expose); + dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id); +} + +#define ICE_FRT_EXPO BIT(0) +#define ICE_FRT_IFID GENMASK(6, 1) + +/** + * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry + * @hw: pointer to the hardware structure + * @idx: index of Flag Redirect Table entry + * @item: item of Flag Redirect Table entry + * @data: Flag Redirect Table entry data to be parsed + * @size: size of Flag Redirect Table entry + */ +static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_flg_rd_item *rdi = item; + u8 d8 = *(u8 *)data; + + rdi->idx = idx; + rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8); + rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_flg_rd_dump(hw, rdi); +} + +/** + * ice_flg_rd_table_get - create a flag redirect table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Flags Redirection table. + */ +static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR, + sizeof(struct ice_flg_rd_item), + ICE_FLG_RD_TABLE_SIZE, + ice_flg_rd_parse_item, false); +} + +/** + * ice_flg_redirect - redirect a parser flag to packet flag + * @table: flag redirect table + * @psr_flg: parser flag to redirect + * + * Return: flag or 0 if @psr_flag = 0. + */ +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) +{ + u64 flg = 0; + int i; + + for (i = 0; i < ICE_FLG_RDT_SIZE; i++) { + struct ice_flg_rd_item *item = &table[i]; + + if (!item->expose) + continue; + + if (psr_flg & BIT(item->intr_flg_id)) + flg |= BIT(i); + } + + return flg; +} + +/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS + * sections ***/ +static void ice_xlt_kb_entry_dump(struct ice_hw *hw, + struct ice_xlt_kb_entry *entry, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "key builder entry %d\n", idx); + dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel); + dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel); + + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) + dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]); + + dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel); + dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel); +} + +/** + * ice_xlt_kb_dump - dump a xlt key build info + * @hw: pointer to the hardware structure + * @kb: key build to dump + */ +static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm); + dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm); + dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm); + dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15); + dev_info(dev, "flag15 hi = 0x%08x\n", + (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE))); + + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + ice_xlt_kb_entry_dump(hw, &kb->entries[i], i); +} + +#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */ +#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE) +#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE) +#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \ + 32 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \ + 35 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \ + 38 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \ + 47 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \ + 56 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \ + 65 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \ + 74 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \ + 83 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */ +#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE) +#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE) +#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \ + 92 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \ + 101 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \ + 110 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \ + 119 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \ + 128 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \ + 137 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */ +#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE) +#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE) +#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \ + 146 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \ + 155 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \ + 164 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \ + 182 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \ + 187 - ICE_XLT_KB_FL12_S) + +/** + * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry + * @entry: pointer to the XLT Key Builder entry structure + * @data: XLT Key Builder entry data to be parsed + */ +static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data) +{ + u8 i = 0; + u64 d64; + + d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF; + + entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64); + entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64); + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64); + + d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF; + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64); + + d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF; + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64); + entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64); + + entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64); + entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64); +} + +#define ICE_XLT_KB_X1PM_OFF 0 +#define ICE_XLT_KB_X2PM_OFF 1 +#define ICE_XLT_KB_PIPM_OFF 2 +#define ICE_XLT_KB_FL15_OFF 4 +#define ICE_XLT_KB_TBL_OFF 12 + +/** + * ice_parse_kb_data - parse 204 bits of XLT Key Build Table + * @hw: pointer to the hardware structure + * @kb: pointer to the XLT Key Build Table structure + * @data: XLT Key Build Table data to be parsed + */ +static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb, + void *data) +{ + u8 *buf = data; + int i; + + kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF]; + kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF]; + kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF]; + + kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF]; + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + ice_kb_entry_init(&kb->entries[i], + &buf[ICE_XLT_KB_TBL_OFF + + i * ICE_XLT_KB_TBL_ENTRY_SIZE]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_xlt_kb_dump(hw, kb); +} + +static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type) +{ + struct ice_pkg_enum state = {}; + struct ice_seg *seg = hw->seg; + struct ice_xlt_kb *kb; + void *data; + + if (!seg) + return ERR_PTR(-EINVAL); + + kb = kzalloc(sizeof(*kb), GFP_KERNEL); + if (!kb) + return ERR_PTR(-ENOMEM); + + data = ice_pkg_enum_section(seg, &state, sect_type); + if (!data) { + ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n", + sect_type); + kfree(kb); + return ERR_PTR(-EINVAL); + } + + ice_parse_kb_data(hw, kb, data); + + return kb; +} + +/** + * ice_xlt_kb_get_sw - create switch xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for Switch. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW); +} + +/** + * ice_xlt_kb_get_acl - create acl xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for ACL. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL); +} + +/** + * ice_xlt_kb_get_fd - create fdir xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for Flow Director. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD); +} + +/** + * ice_xlt_kb_get_rss - create rss xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for RSS. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS); +} + +#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0) + +/** + * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag + * @kb: xlt key build + * @pkt_flag: 64 bits packet flag + * + * Return: XLT flag or 0 if @pkt_flag = 0. + */ +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag) +{ + struct ice_xlt_kb_entry *entry = &kb->entries[0]; + u16 flag = 0; + int i; + + /* check flag 15 */ + if (kb->flag15 & pkt_flag) + flag = BIT(ICE_XLT_KB_FLAG0_14_CNT); + + /* check flag 0 - 14 */ + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) { + /* only check first entry */ + u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK; + + if (pkt_flag & BIT(idx)) + flag |= (u16)BIT(i); + } + + return flag; +} + +/*** Parser API ***/ +/** + * ice_parser_create - create a parser instance + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated parser instance or ERR_PTR + * in case of error. + */ +struct ice_parser *ice_parser_create(struct ice_hw *hw) +{ + struct ice_parser *p; + void *err; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + p->hw = hw; + p->rt.psr = p; + + p->imem_table = ice_imem_table_get(hw); + if (IS_ERR(p->imem_table)) { + err = p->imem_table; + goto err; + } + + p->mi_table = ice_metainit_table_get(hw); + if (IS_ERR(p->mi_table)) { + err = p->mi_table; + goto err; + } + + p->pg_cam_table = ice_pg_cam_table_get(hw); + if (IS_ERR(p->pg_cam_table)) { + err = p->pg_cam_table; + goto err; + } + + p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw); + if (IS_ERR(p->pg_sp_cam_table)) { + err = p->pg_sp_cam_table; + goto err; + } + + p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw); + if (IS_ERR(p->pg_nm_cam_table)) { + err = p->pg_nm_cam_table; + goto err; + } + + p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw); + if (IS_ERR(p->pg_nm_sp_cam_table)) { + err = p->pg_nm_sp_cam_table; + goto err; + } + + p->bst_tcam_table = ice_bst_tcam_table_get(hw); + if (IS_ERR(p->bst_tcam_table)) { + err = p->bst_tcam_table; + goto err; + } + + p->bst_lbl_table = ice_bst_lbl_table_get(hw); + if (IS_ERR(p->bst_lbl_table)) { + err = p->bst_lbl_table; + goto err; + } + + p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw); + if (IS_ERR(p->ptype_mk_tcam_table)) { + err = p->ptype_mk_tcam_table; + goto err; + } + + p->mk_grp_table = ice_mk_grp_table_get(hw); + if (IS_ERR(p->mk_grp_table)) { + err = p->mk_grp_table; + goto err; + } + + p->proto_grp_table = ice_proto_grp_table_get(hw); + if (IS_ERR(p->proto_grp_table)) { + err = p->proto_grp_table; + goto err; + } + + p->flg_rd_table = ice_flg_rd_table_get(hw); + if (IS_ERR(p->flg_rd_table)) { + err = p->flg_rd_table; + goto err; + } + + p->xlt_kb_sw = ice_xlt_kb_get_sw(hw); + if (IS_ERR(p->xlt_kb_sw)) { + err = p->xlt_kb_sw; + goto err; + } + + p->xlt_kb_acl = ice_xlt_kb_get_acl(hw); + if (IS_ERR(p->xlt_kb_acl)) { + err = p->xlt_kb_acl; + goto err; + } + + p->xlt_kb_fd = ice_xlt_kb_get_fd(hw); + if (IS_ERR(p->xlt_kb_fd)) { + err = p->xlt_kb_fd; + goto err; + } + + p->xlt_kb_rss = ice_xlt_kb_get_rss(hw); + if (IS_ERR(p->xlt_kb_rss)) { + err = p->xlt_kb_rss; + goto err; + } + + return p; +err: + ice_parser_destroy(p); + return err; +} + +/** + * ice_parser_destroy - destroy a parser instance + * @psr: pointer to a parser instance + */ +void ice_parser_destroy(struct ice_parser *psr) +{ + kfree(psr->imem_table); + kfree(psr->mi_table); + kfree(psr->pg_cam_table); + kfree(psr->pg_sp_cam_table); + kfree(psr->pg_nm_cam_table); + kfree(psr->pg_nm_sp_cam_table); + kfree(psr->bst_tcam_table); + kfree(psr->bst_lbl_table); + kfree(psr->ptype_mk_tcam_table); + kfree(psr->mk_grp_table); + kfree(psr->proto_grp_table); + kfree(psr->flg_rd_table); + kfree(psr->xlt_kb_sw); + kfree(psr->xlt_kb_acl); + kfree(psr->xlt_kb_fd); + kfree(psr->xlt_kb_rss); + + kfree(psr); +} + +/** + * ice_parser_run - parse on a packet in binary and return the result + * @psr: pointer to a parser instance + * @pkt_buf: packet data + * @pkt_len: packet length + * @rslt: input/output parameter to save parser result. + * + * Return: 0 on success or errno. + */ +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt) +{ + ice_parser_rt_reset(&psr->rt); + ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len); + + return ice_parser_rt_execute(&psr->rt, rslt); +} + +/** + * ice_parser_result_dump - dump a parser result info + * @hw: pointer to the hardware structure + * @rslt: parser result info to dump + */ +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "ptype = %d\n", rslt->ptype); + for (i = 0; i < rslt->po_num; i++) + dev_info(dev, "proto = %d, offset = %d\n", + rslt->po[i].proto_id, rslt->po[i].offset); + + dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr); + dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt); + dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw); + dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd); + dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss); +} + +#define ICE_BT_VLD_KEY 0xFF +#define ICE_BT_INV_KEY 0xFE + +static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type, + bool on) +{ + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + u8 key; + + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + type, &i); + if (!item) + break; + + key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY; + item->key[ICE_BT_VM_OFF] = key; + item->key_inv[ICE_BT_VM_OFF] = key; + i++; + } +} + +/** + * ice_parser_dvm_set - configure double vlan mode for parser + * @psr: pointer to a parser instance + * @on: true to turn on; false to turn off + */ +void ice_parser_dvm_set(struct ice_parser *psr, bool on) +{ + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on); + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on); +} + +static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type, + u16 udp_port, bool on) +{ + u8 *buf = (u8 *)&udp_port; + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + type, &i); + if (!item) + break; + + /* found empty slot to add */ + if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY && + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) { + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = + buf[ICE_UDP_PORT_OFF_L]; + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = + buf[ICE_UDP_PORT_OFF_H]; + + item->key[ICE_BT_TUN_PORT_OFF_L] = + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L]; + item->key[ICE_BT_TUN_PORT_OFF_H] = + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H]; + + return 0; + /* found a matched slot to delete */ + } else if (!on && + (item->key_inv[ICE_BT_TUN_PORT_OFF_L] == + buf[ICE_UDP_PORT_OFF_L] || + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == + buf[ICE_UDP_PORT_OFF_H])) { + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; + + item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; + item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; + + return 0; + } + i++; + } + + return -EINVAL; +} + +/** + * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: vxlan tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on); +} + +/** + * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: geneve tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on); +} + +/** + * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: ecpri tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI, + udp_port, on); +} + +/** + * ice_nearest_proto_id - find nearest protocol ID + * @rslt: pointer to a parser result instance + * @offset: a min value for the protocol offset + * @proto_id: the protocol ID (output) + * @proto_off: the protocol offset (output) + * + * From the protocols in @rslt, find the nearest protocol that has offset + * larger than @offset. + * + * Return: if true, the protocol's ID and offset + */ +static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset, + u8 *proto_id, u16 *proto_off) +{ + u16 dist = U16_MAX; + u8 proto = 0; + int i; + + for (i = 0; i < rslt->po_num; i++) { + if (offset < rslt->po[i].offset) + continue; + if (offset - rslt->po[i].offset < dist) { + proto = rslt->po[i].proto_id; + dist = offset - rslt->po[i].offset; + } + } + + if (dist % 2) + return false; + + *proto_id = proto; + *proto_off = dist; + + return true; +} + +/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2 + * In future, the flag masks should learn from DDP + */ +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010 + +/** + * ice_parser_profile_init - initialize a FXP profile based on parser result + * @rslt: a instance of a parser result + * @pkt_buf: packet data buffer + * @msk_buf: packet mask buffer + * @buf_len: packet length + * @blk: FXP pipeline stage + * @prof: input/output parameter to save the profile + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + struct ice_parser_profile *prof) +{ + u8 proto_id = U8_MAX; + u16 proto_off = 0; + u16 off; + + memset(prof, 0, sizeof(*prof)); + set_bit(rslt->ptype, prof->ptypes); + if (blk == ICE_BLK_SW) { + prof->flags = rslt->flags_sw; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW; + } else if (blk == ICE_BLK_ACL) { + prof->flags = rslt->flags_acl; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL; + } else if (blk == ICE_BLK_FD) { + prof->flags = rslt->flags_fd; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD; + } else if (blk == ICE_BLK_RSS) { + prof->flags = rslt->flags_rss; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS; + } else { + return -EINVAL; + } + + for (off = 0; off < buf_len - 1; off++) { + if (msk_buf[off] == 0 && msk_buf[off + 1] == 0) + continue; + if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off)) + continue; + if (prof->fv_num >= ICE_PARSER_FV_MAX) + return -EINVAL; + + prof->fv[prof->fv_num].proto_id = proto_id; + prof->fv[prof->fv_num].offset = proto_off; + prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off]; + prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off]; + prof->fv_num++; + } + + return 0; +} + +/** + * ice_parser_profile_dump - dump an FXP profile info + * @hw: pointer to the hardware structure + * @prof: profile info to dump + */ +void ice_parser_profile_dump(struct ice_hw *hw, + struct ice_parser_profile *prof) +{ + struct device *dev = ice_hw_to_dev(hw); + u16 i; + + dev_info(dev, "ptypes:\n"); + for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++) + if (test_bit(i, prof->ptypes)) + dev_info(dev, "\t%u\n", i); + + for (i = 0; i < prof->fv_num; i++) + dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n", + prof->fv[i].proto_id, prof->fv[i].offset, + prof->fv[i].spec, prof->fv[i].msk); + + dev_info(dev, "flags = 0x%04x\n", prof->flags); + dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk); +} diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h new file mode 100644 index 00000000000000..6509d807627cee --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser.h @@ -0,0 +1,540 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _ICE_PARSER_H_ +#define _ICE_PARSER_H_ + +#define ICE_SEC_DATA_OFFSET 4 +#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48 +#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16 +#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17 +#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12 +#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13 +#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88 +#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8 +#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1 + +#define ICE_SEC_LBL_DATA_OFFSET 2 +#define ICE_SID_LBL_ENTRY_SIZE 66 + +/*** ICE_SID_RXPARSER_IMEM section ***/ +#define ICE_IMEM_TABLE_SIZE 192 + +/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM + * output. + */ +struct ice_bst_main { + bool alu0; + bool alu1; + bool alu2; + bool pg; +}; + +struct ice_bst_keybuilder { + u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */ + bool tsr_ctrl; /* TCAM Search Register control */ +}; + +/* Next protocol Key builder */ +struct ice_np_keybuilder { + u8 opc; + u8 start_reg0; + u8 len_reg1; +}; + +enum ice_np_keybuilder_opcode { + ICE_NPKB_OPC_EXTRACT = 0, + ICE_NPKB_OPC_BUILD = 1, + ICE_NPKB_OPC_BYPASS = 2, +}; + +/* Parse Graph Key builder */ +struct ice_pg_keybuilder { + bool flag0_ena; + bool flag1_ena; + bool flag2_ena; + bool flag3_ena; + u8 flag0_idx; + u8 flag1_idx; + u8 flag2_idx; + u8 flag3_idx; + u8 alu_reg_idx; +}; + +enum ice_alu_idx { + ICE_ALU0_IDX = 0, + ICE_ALU1_IDX = 1, + ICE_ALU2_IDX = 2, +}; + +enum ice_alu_opcode { + ICE_ALU_PARK = 0, + ICE_ALU_MOV_ADD = 1, + ICE_ALU_ADD = 2, + ICE_ALU_MOV_AND = 4, + ICE_ALU_AND = 5, + ICE_ALU_AND_IMM = 6, + ICE_ALU_MOV_OR = 7, + ICE_ALU_OR = 8, + ICE_ALU_MOV_XOR = 9, + ICE_ALU_XOR = 10, + ICE_ALU_NOP = 11, + ICE_ALU_BR = 12, + ICE_ALU_BREQ = 13, + ICE_ALU_BRNEQ = 14, + ICE_ALU_BRGT = 15, + ICE_ALU_BRLT = 16, + ICE_ALU_BRGEQ = 17, + ICE_ALU_BRLEG = 18, + ICE_ALU_SETEQ = 19, + ICE_ALU_ANDEQ = 20, + ICE_ALU_OREQ = 21, + ICE_ALU_SETNEQ = 22, + ICE_ALU_ANDNEQ = 23, + ICE_ALU_ORNEQ = 24, + ICE_ALU_SETGT = 25, + ICE_ALU_ANDGT = 26, + ICE_ALU_ORGT = 27, + ICE_ALU_SETLT = 28, + ICE_ALU_ANDLT = 29, + ICE_ALU_ORLT = 30, + ICE_ALU_MOV_SUB = 31, + ICE_ALU_SUB = 32, + ICE_ALU_INVALID = 64, +}; + +enum ice_proto_off_opcode { + ICE_PO_OFF_REMAIN = 0, + ICE_PO_OFF_HDR_ADD = 1, + ICE_PO_OFF_HDR_SUB = 2, +}; + +struct ice_alu { + enum ice_alu_opcode opc; + u8 src_start; + u8 src_len; + bool shift_xlate_sel; + u8 shift_xlate_key; + u8 src_reg_id; + u8 dst_reg_id; + bool inc0; + bool inc1; + u8 proto_offset_opc; + u8 proto_offset; + u8 branch_addr; + u16 imm; + bool dedicate_flags_ena; + u8 dst_start; + u8 dst_len; + bool flags_extr_imm; + u8 flags_start_imm; +}; + +/* Parser program code (iMEM) */ +struct ice_imem_item { + u16 idx; + struct ice_bst_main b_m; + struct ice_bst_keybuilder b_kb; + u8 pg_prio; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +/*** ICE_SID_RXPARSER_METADATA_INIT section ***/ +#define ICE_METAINIT_TABLE_SIZE 16 + +/* Metadata Initialization item */ +struct ice_metainit_item { + u16 idx; + + u8 tsr; /* TCAM Search key Register */ + u16 ho; /* Header Offset register */ + u16 pc; /* Program Counter register */ + u16 pg_rn; /* Parse Graph Root Node */ + u8 cd; /* Control Domain ID */ + + /* General Purpose Registers */ + bool gpr_a_ctrl; + u8 gpr_a_data_mdid; + u8 gpr_a_data_start; + u8 gpr_a_data_len; + u8 gpr_a_id; + + bool gpr_b_ctrl; + u8 gpr_b_data_mdid; + u8 gpr_b_data_start; + u8 gpr_b_data_len; + u8 gpr_b_id; + + bool gpr_c_ctrl; + u8 gpr_c_data_mdid; + u8 gpr_c_data_start; + u8 gpr_c_data_len; + u8 gpr_c_id; + + bool gpr_d_ctrl; + u8 gpr_d_data_mdid; + u8 gpr_d_data_start; + u8 gpr_d_data_len; + u8 gpr_d_id; + + u64 flags; /* Initial value for all flags */ +}; + +/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM + * sections ***/ +#define ICE_PG_CAM_TABLE_SIZE 2048 +#define ICE_PG_SP_CAM_TABLE_SIZE 128 +#define ICE_PG_NM_CAM_TABLE_SIZE 1024 +#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64 + +struct ice_pg_cam_key { + bool valid; + struct_group_attr(val, __packed, + u16 node_id; /* Node ID of protocol in parse graph */ + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; /* Boost TCAM match index */ + u16 alu_reg; + u32 next_proto; /* next Protocol value (must be last) */ + ); +}; + +struct ice_pg_nm_cam_key { + bool valid; + struct_group_attr(val, __packed, + u16 node_id; + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; + u16 alu_reg; + ); +}; + +struct ice_pg_cam_action { + u16 next_node; /* Parser Node ID for the next round */ + u8 next_pc; /* next Program Counter */ + bool is_pg; /* is protocol group */ + u8 proto_id; /* protocol ID or proto group ID */ + bool is_mg; /* is marker group */ + u8 marker_id; /* marker ID or marker group ID */ + bool is_last_round; + bool ho_polarity; /* header offset polarity */ + u16 ho_inc; +}; + +/* Parse Graph item */ +struct ice_pg_cam_item { + u16 idx; + struct ice_pg_cam_key key; + struct ice_pg_cam_action action; +}; + +/* Parse Graph No Match item */ +struct ice_pg_nm_cam_item { + u16 idx; + struct ice_pg_nm_cam_key key; + struct ice_pg_cam_action action; +}; + +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key); +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key); + +/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ +#define ICE_BST_TCAM_TABLE_SIZE 256 +#define ICE_BST_TCAM_KEY_SIZE 20 +#define ICE_BST_KEY_TCAM_SIZE 19 + +/* Boost TCAM item */ +struct ice_bst_tcam_item { + u16 addr; + u8 key[ICE_BST_TCAM_KEY_SIZE]; + u8 key_inv[ICE_BST_TCAM_KEY_SIZE]; + u8 hit_idx_grp; + u8 pg_prio; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +#define ICE_LBL_LEN 64 +#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM" +#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM" +#define ICE_LBL_TNL_VXLAN "TNL_VXLAN" +#define ICE_LBL_TNL_GENEVE "TNL_GENEVE" +#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI" + +enum ice_lbl_type { + ICE_LBL_BST_TYPE_UNKNOWN, + ICE_LBL_BST_TYPE_DVM, + ICE_LBL_BST_TYPE_SVM, + ICE_LBL_BST_TYPE_VXLAN, + ICE_LBL_BST_TYPE_GENEVE, + ICE_LBL_BST_TYPE_UDP_ECPRI, +}; + +struct ice_lbl_item { + u16 idx; + char label[ICE_LBL_LEN]; + + /* must be at the end, not part of the DDP section */ + enum ice_lbl_type type; +}; + +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat); +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + enum ice_lbl_type type, u16 *start); + +/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ +#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024 +#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10 + +struct ice_ptype_mk_tcam_item { + u16 address; + u16 ptype; + u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE]; + u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE]; +} __packed; + +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len); +/*** ICE_SID_RXPARSER_MARKER_GRP section ***/ +#define ICE_MK_GRP_TABLE_SIZE 128 +#define ICE_MK_COUNT_PER_GRP 8 + +/* Marker Group item */ +struct ice_mk_grp_item { + int idx; + u8 markers[ICE_MK_COUNT_PER_GRP]; +}; + +/*** ICE_SID_RXPARSER_PROTO_GRP section ***/ +#define ICE_PROTO_COUNT_PER_GRP 8 +#define ICE_PROTO_GRP_TABLE_SIZE 192 +#define ICE_PROTO_GRP_ITEM_SIZE 22 +struct ice_proto_off { + bool polarity; /* true: positive, false: negative */ + u8 proto_id; + u16 offset; /* 10 bit protocol offset */ +}; + +/* Protocol Group item */ +struct ice_proto_grp_item { + u16 idx; + struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP]; +}; + +/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ +#define ICE_FLG_RD_TABLE_SIZE 64 +#define ICE_FLG_RDT_SIZE 64 + +/* Flags Redirection item */ +struct ice_flg_rd_item { + u16 idx; + bool expose; + u8 intr_flg_id; /* Internal Flag ID */ +}; + +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg); + +/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS + * sections ***/ +#define ICE_XLT_KB_FLAG0_14_CNT 15 +#define ICE_XLT_KB_TBL_CNT 8 +#define ICE_XLT_KB_TBL_ENTRY_SIZE 24 + +struct ice_xlt_kb_entry { + u8 xlt1_ad_sel; + u8 xlt2_ad_sel; + u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT]; + u8 xlt1_md_sel; + u8 xlt2_md_sel; +}; + +/* XLT Key Builder */ +struct ice_xlt_kb { + u8 xlt1_pm; /* XLT1 Partition Mode */ + u8 xlt2_pm; /* XLT2 Partition Mode */ + u8 prof_id_pm; /* Profile ID Partition Mode */ + u64 flag15; + + struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT]; +}; + +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); + +/*** Parser API ***/ +#define ICE_GPR_HV_IDX 64 +#define ICE_GPR_HV_SIZE 32 +#define ICE_GPR_ERR_IDX 84 +#define ICE_GPR_FLG_IDX 104 +#define ICE_GPR_FLG_SIZE 16 + +#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */ +#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */ +#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */ +#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */ + +#define ICE_PARSER_MAX_PKT_LEN 504 +#define ICE_PARSER_PKT_REV 32 +#define ICE_PARSER_GPR_NUM 128 +#define ICE_PARSER_FLG_NUM 64 +#define ICE_PARSER_ERR_NUM 16 +#define ICE_BST_KEY_SIZE 10 +#define ICE_MARKER_ID_SIZE 9 +#define ICE_MARKER_MAX_SIZE \ + (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) +#define ICE_MARKER_ID_NUM 8 +#define ICE_PO_PAIR_SIZE 256 + +struct ice_gpr_pu { + /* array of flags to indicate if GRP needs to be updated */ + bool gpr_val_upd[ICE_PARSER_GPR_NUM]; + u16 gpr_val[ICE_PARSER_GPR_NUM]; + u64 flg_msk; + u64 flg_val; + u16 err_msk; + u16 err_val; +}; + +enum ice_pg_prio { + ICE_PG_P0 = 0, + ICE_PG_P1 = 1, + ICE_PG_P2 = 2, + ICE_PG_P3 = 3, +}; + +struct ice_parser_rt { + struct ice_parser *psr; + u16 gpr[ICE_PARSER_GPR_NUM]; + u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; + u16 pkt_len; + u16 po; + u8 bst_key[ICE_BST_KEY_SIZE]; + struct ice_pg_cam_key pg_key; + struct ice_alu *alu0; + struct ice_alu *alu1; + struct ice_alu *alu2; + struct ice_pg_cam_action *action; + u8 pg_prio; + struct ice_gpr_pu pu; + u8 markers[ICE_MARKER_ID_SIZE]; + bool protocols[ICE_PO_PAIR_SIZE]; + u16 offsets[ICE_PO_PAIR_SIZE]; +}; + +struct ice_parser_proto_off { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset from the start of the protocol header */ +}; + +#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16 +#define ICE_PARSER_FLAG_PSR_SIZE 8 +#define ICE_PARSER_FV_SIZE 48 +#define ICE_PARSER_FV_MAX 24 +#define ICE_BT_TUN_PORT_OFF_H 16 +#define ICE_BT_TUN_PORT_OFF_L 15 +#define ICE_BT_VM_OFF 0 +#define ICE_UDP_PORT_OFF_H 1 +#define ICE_UDP_PORT_OFF_L 0 + +struct ice_parser_result { + u16 ptype; /* 16 bits hardware PTYPE */ + /* array of protocol and header offset pairs */ + struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE]; + int po_num; /* # of protocol-offset pairs must <= 16 */ + u64 flags_psr; /* parser flags */ + u64 flags_pkt; /* packet flags */ + u16 flags_sw; /* key builder flags for SW */ + u16 flags_acl; /* key builder flags for ACL */ + u16 flags_fd; /* key builder flags for FD */ + u16 flags_rss; /* key builder flags for RSS */ +}; + +void ice_parser_rt_reset(struct ice_parser_rt *rt); +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len); +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt); + +struct ice_parser { + struct ice_hw *hw; /* pointer to the hardware structure */ + + struct ice_imem_item *imem_table; + struct ice_metainit_item *mi_table; + + struct ice_pg_cam_item *pg_cam_table; + struct ice_pg_cam_item *pg_sp_cam_table; + struct ice_pg_nm_cam_item *pg_nm_cam_table; + struct ice_pg_nm_cam_item *pg_nm_sp_cam_table; + + struct ice_bst_tcam_item *bst_tcam_table; + struct ice_lbl_item *bst_lbl_table; + struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table; + struct ice_mk_grp_item *mk_grp_table; + struct ice_proto_grp_item *proto_grp_table; + struct ice_flg_rd_item *flg_rd_table; + + struct ice_xlt_kb *xlt_kb_sw; + struct ice_xlt_kb *xlt_kb_acl; + struct ice_xlt_kb *xlt_kb_fd; + struct ice_xlt_kb *xlt_kb_rss; + + struct ice_parser_rt rt; +}; + +struct ice_parser *ice_parser_create(struct ice_hw *hw); +void ice_parser_destroy(struct ice_parser *psr); +void ice_parser_dvm_set(struct ice_parser *psr, bool on); +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt); +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt); + +struct ice_parser_fv { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset from the start of the protocol header */ + u16 spec; /* pattern to match */ + u16 msk; /* pattern mask */ +}; + +struct ice_parser_profile { + /* array of field vectors */ + struct ice_parser_fv fv[ICE_PARSER_FV_SIZE]; + int fv_num; /* # of field vectors must <= 48 */ + u16 flags; /* key builder flags */ + u16 flags_msk; /* key builder flag mask */ + + DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */ +}; + +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + struct ice_parser_profile *prof); +void ice_parser_profile_dump(struct ice_hw *hw, + struct ice_parser_profile *prof); +#endif /* _ICE_PARSER_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c new file mode 100644 index 00000000000000..dedf5e854e4b76 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Intel Corporation */ + +#include "ice_common.h" + +static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr) +{ + rt->gpr[ICE_GPR_TSR_IDX] = tsr; +} + +static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho) +{ + rt->gpr[ICE_GPR_HO_IDX] = ho; + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); +} + +static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc) +{ + rt->gpr[ICE_GPR_NP_IDX] = pc; +} + +static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node) +{ + rt->gpr[ICE_GPR_NN_IDX] = node; +} + +static void +ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set) +{ + struct ice_hw *hw = rt->psr->hw; + unsigned int word, id; + + word = idx / ICE_GPR_FLG_SIZE; + id = idx % ICE_GPR_FLG_SIZE; + + if (set) { + rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id); + ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx); + } else { + rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id); + ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx); + } +} + +static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val) +{ + struct ice_hw *hw = rt->psr->hw; + + if (idx == ICE_GPR_HO_IDX) + ice_rt_ho_set(rt, val); + else + rt->gpr[idx] = val; + + ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val); +} + +static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set) +{ + struct ice_hw *hw = rt->psr->hw; + + if (set) { + rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx); + ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx); + } else { + rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx); + ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx); + } +} + +/** + * ice_parser_rt_reset - reset the parser runtime + * @rt: pointer to the parser runtime + */ +void ice_parser_rt_reset(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_metainit_item *mi; + unsigned int i; + + mi = &psr->mi_table[0]; + + memset(rt, 0, sizeof(*rt)); + rt->psr = psr; + + ice_rt_tsr_set(rt, mi->tsr); + ice_rt_ho_set(rt, mi->ho); + ice_rt_np_set(rt, mi->pc); + ice_rt_nn_set(rt, mi->pg_rn); + + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { + if (mi->flags & BIT(i)) + ice_rt_flag_set(rt, i, true); + } +} + +/** + * ice_parser_rt_pktbuf_set - set a packet into parser runtime + * @rt: pointer to the parser runtime + * @pkt_buf: buffer with packet data + * @pkt_len: packet buffer length + */ +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len) +{ + int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len); + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; + + memcpy(rt->pkt_buf, pkt_buf, len); + rt->pkt_len = pkt_len; + + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); +} + +static void ice_bst_key_init(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX]; + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; + u8 *key = rt->bst_key; + int idd, i; + + idd = ICE_BST_TCAM_KEY_SIZE - 1; + if (imem->b_kb.tsr_ctrl) + key[idd] = tsr; + else + key[idd] = imem->b_kb.prio; + + idd = ICE_BST_KEY_TCAM_SIZE - 1; + for (i = idd; i >= 0; i--) { + int j; + + j = ho + idd - i; + if (j < ICE_PARSER_MAX_PKT_LEN) + key[i] = rt->pkt_buf[ho + idd - i]; + else + key[i] = 0; + } + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + key[0], key[1], key[2], key[3], key[4], + key[5], key[6], key[7], key[8], key[9]); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); +} + +static u16 ice_bit_rev_u16(u16 v, int len) +{ + return bitrev16(v) >> (BITS_PER_TYPE(v) - len); +} + +static u32 ice_bit_rev_u32(u32 v, int len) +{ + return bitrev32(v) >> (BITS_PER_TYPE(v) - len); +} + +static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len) +{ + int offset; + u32 buf[2]; + u64 val; + + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); + + memcpy(buf, &rt->gpr[offset], sizeof(buf)); + + buf[0] = bitrev8x4(buf[0]); + buf[1] = bitrev8x4(buf[1]); + + val = *(u64 *)buf; + val >>= start % BITS_PER_TYPE(u16); + + return ice_bit_rev_u32(val, len); +} + +static u32 ice_pk_build(struct ice_parser_rt *rt, + struct ice_np_keybuilder *kb) +{ + if (kb->opc == ICE_NPKB_OPC_EXTRACT) + return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1); + else if (kb->opc == ICE_NPKB_OPC_BUILD) + return rt->gpr[kb->start_reg0] | + ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16)); + else if (kb->opc == ICE_NPKB_OPC_BYPASS) + return 0; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n", + kb->opc); + return U32_MAX; +} + +static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index) +{ + int word = index / ICE_GPR_FLG_SIZE; + int id = index % ICE_GPR_FLG_SIZE; + + return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id)); +} + +static int ice_imem_pgk_init(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb); + if (rt->pg_key.next_proto == U32_MAX) + return -EINVAL; + + if (imem->pg_kb.flag0_ena) + rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx); + if (imem->pg_kb.flag1_ena) + rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx); + if (imem->pg_kb.flag2_ena) + rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx); + if (imem->pg_kb.flag3_ena) + rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); + + return 0; +} + +static void ice_imem_alu0_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu0 = &imem->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_alu1_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu1 = &imem->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_alu2_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu2 = &imem->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_pgp_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->pg_prio = imem->pg_prio; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n", + rt->pg_prio, imem->idx); +} + +static int ice_bst_pgk_init(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.boost_idx = bst->hit_idx_grp; + rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb); + if (rt->pg_key.next_proto == U32_MAX) + return -EINVAL; + + if (bst->pg_kb.flag0_ena) + rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx); + if (bst->pg_kb.flag1_ena) + rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx); + if (bst->pg_kb.flag2_ena) + rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx); + if (bst->pg_kb.flag3_ena) + rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); + + return 0; +} + +static void ice_bst_alu0_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu0 = &bst->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n", + bst->addr); +} + +static void ice_bst_alu1_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu1 = &bst->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n", + bst->addr); +} + +static void ice_bst_alu2_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu2 = &bst->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n", + bst->addr); +} + +static void ice_bst_pgp_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->pg_prio = bst->pg_prio; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n", + rt->pg_prio, bst->addr); +} + +static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *item; + + item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE, + &rt->pg_key); + if (!item) + item = ice_pg_cam_match(psr->pg_sp_cam_table, + ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key); + return item; +} + +static +struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_nm_cam_item *item; + + item = ice_pg_nm_cam_match(psr->pg_nm_cam_table, + ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key); + + if (!item) + item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table, + ICE_PG_NM_SP_CAM_TABLE_SIZE, + &rt->pg_key); + return item; +} + +static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val) +{ + rt->pu.gpr_val_upd[idx] = true; + rt->pu.gpr_val[idx] = val; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n", + idx, val); +} + +static void ice_pg_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n"); + + ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc); + ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n"); +} + +static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.flg_msk |= BIT_ULL(idx); + if (val) + rt->pu.flg_val |= BIT_ULL(idx); + else + rt->pu.flg_val &= ~BIT_ULL(idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n", + idx, val); +} + +static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + u32 hv_bit_sel; + int i; + + if (!alu->dedicate_flags_ena) + return; + + if (alu->flags_extr_imm) { + for (i = 0; i < alu->dst_len; i++) + ice_flg_add(rt, alu->dst_start + i, + !!(alu->flags_start_imm & BIT(i))); + } else { + for (i = 0; i < alu->dst_len; i++) { + hv_bit_sel = ice_hv_bit_sel(rt, + alu->flags_start_imm + i, + 1); + ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel); + } + } +} + +static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD) + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset); + else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB) + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset); + else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN) + rt->po = rt->gpr[ICE_GPR_HO_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n", + rt->po); +} + +static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx, + int start, int len) +{ + int offset; + u32 val; + + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); + + memcpy(&val, &rt->gpr[offset], sizeof(val)); + + val = bitrev8x4(val); + val >>= start % BITS_PER_TYPE(u16); + + return ice_bit_rev_u16(val, len); +} + +static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.err_msk |= (u16)BIT(idx); + if (val) + rt->pu.flg_val |= (u64)BIT_ULL(idx); + else + rt->pu.flg_val &= ~(u64)BIT_ULL(idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n", + idx, val); +} + +static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu, + bool val) +{ + u16 flg_idx; + + if (alu->dedicate_flags_ena) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n", + alu->opc); + return; + } + + if (alu->dst_reg_id == ICE_GPR_ERR_IDX) { + if (alu->dst_start >= ICE_PARSER_ERR_NUM) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n", + alu->dst_start); + return; + } + ice_err_add(rt, alu->dst_start, val); + } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) { + flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) + + alu->dst_start); + + if (flg_idx >= ICE_PARSER_FLG_NUM) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n", + flg_idx); + return; + } + ice_flg_add(rt, flg_idx, val); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n", + alu->dst_reg_id, alu->dst_start); + } +} + +static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + u16 dst, src, shift, imm; + + if (alu->shift_xlate_sel) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n"); + return; + } + + ice_po_update(rt, alu); + ice_flg_update(rt, alu); + + dst = rt->gpr[alu->dst_reg_id]; + src = ice_reg_bit_sel(rt, alu->src_reg_id, + alu->src_start, alu->src_len); + shift = alu->shift_xlate_key; + imm = alu->imm; + + switch (alu->opc) { + case ICE_ALU_PARK: + break; + case ICE_ALU_MOV_ADD: + dst = (src << shift) + imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ADD: + dst += (src << shift) + imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ORLT: + if (src < imm) + ice_dst_reg_bit_set(rt, alu, true); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_OREQ: + if (src == imm) + ice_dst_reg_bit_set(rt, alu, true); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_SETEQ: + ice_dst_reg_bit_set(rt, alu, src == imm); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_MOV_XOR: + dst = (src << shift) ^ imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + default: + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n", + alu->opc); + break; + } +} + +static void ice_alu0_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n"); + ice_alu_exe(rt, rt->alu0); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n"); +} + +static void ice_alu1_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n"); + ice_alu_exe(rt, rt->alu1); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n"); +} + +static void ice_alu2_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n"); + ice_alu_exe(rt, rt->alu2); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n"); +} + +static void ice_pu_exe(struct ice_parser_rt *rt) +{ + struct ice_gpr_pu *pu = &rt->pu; + unsigned int i; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n"); + + for (i = 0; i < ICE_PARSER_GPR_NUM; i++) { + if (pu->gpr_val_upd[i]) + ice_rt_gpr_set(rt, i, pu->gpr_val[i]); + } + + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { + if (pu->flg_msk & BIT(i)) + ice_rt_flag_set(rt, i, pu->flg_val & BIT(i)); + } + + for (i = 0; i < ICE_PARSER_ERR_NUM; i++) { + if (pu->err_msk & BIT(i)) + ice_rt_err_set(rt, i, pu->err_val & BIT(i)); + } + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n"); +} + +static void ice_alu_pg_exe(struct ice_parser_rt *rt) +{ + memset(&rt->pu, 0, sizeof(rt->pu)); + + switch (rt->pg_prio) { + case (ICE_PG_P0): + ice_pg_exe(rt); + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P1): + ice_alu0_exe(rt); + ice_pg_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P2): + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_pg_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P3): + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + ice_pg_exe(rt); + break; + } + + ice_pu_exe(rt); + + if (rt->action->ho_inc == 0) + return; + + if (rt->action->ho_polarity) + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc); + else + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc); +} + +static void ice_proto_off_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_pg) { + struct ice_proto_grp_item *proto_grp = + &psr->proto_grp_table[rt->action->proto_id]; + u16 po; + int i; + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { + struct ice_proto_off *entry = &proto_grp->po[i]; + + if (entry->proto_id == U8_MAX) + break; + + if (!entry->polarity) + po = rt->po + entry->offset; + else + po = rt->po - entry->offset; + + rt->protocols[entry->proto_id] = true; + rt->offsets[entry->proto_id] = po; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + entry->proto_id, po); + } + } else { + rt->protocols[rt->action->proto_id] = true; + rt->offsets[rt->action->proto_id] = rt->po; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + rt->action->proto_id, rt->po); + } +} + +static void ice_marker_set(struct ice_parser_rt *rt, int idx) +{ + unsigned int byte = idx / BITS_PER_BYTE; + unsigned int bit = idx % BITS_PER_BYTE; + + rt->markers[byte] |= (u8)BIT(bit); +} + +static void ice_marker_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_mg) { + struct ice_mk_grp_item *mk_grp = + &psr->mk_grp_table[rt->action->marker_id]; + int i; + + for (i = 0; i < ICE_MARKER_ID_NUM; i++) { + u8 marker = mk_grp->markers[i]; + + if (marker == ICE_MARKER_MAX_SIZE) + break; + + ice_marker_set(rt, marker); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + marker); + } + } else { + if (rt->action->marker_id != ICE_MARKER_MAX_SIZE) + ice_marker_set(rt, rt->action->marker_id); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + rt->action->marker_id); + } +} + +static u16 ice_ptype_resolve(struct ice_parser_rt *rt) +{ + struct ice_ptype_mk_tcam_item *item; + struct ice_parser *psr = rt->psr; + + item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table, + rt->markers, ICE_MARKER_ID_SIZE); + if (item) + return item->ptype; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n"); + return U16_MAX; +} + +static void ice_proto_off_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + int i; + + for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) { + if (rt->protocols[i]) { + rslt->po[rslt->po_num].proto_id = (u8)i; + rslt->po[rslt->po_num].offset = rt->offsets[i]; + rslt->po_num++; + } + } +} + +static void ice_result_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_parser *psr = rt->psr; + + memset(rslt, 0, sizeof(*rslt)); + + memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX], + ICE_PARSER_FLAG_PSR_SIZE); + rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr); + rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt); + rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt); + rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt); + + ice_proto_off_resolve(rt, rslt); + rslt->ptype = ice_ptype_resolve(rt); +} + +/** + * ice_parser_rt_execute - parser execution routine + * @rt: pointer to the parser runtime + * @rslt: input/output parameter to save parser result + * + * Return: 0 on success or errno. + */ +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_pg_nm_cam_item *pg_nm_cam; + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *pg_cam; + int status = 0; + u16 node; + u16 pc; + + node = rt->gpr[ICE_GPR_NN_IDX]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node); + + while (true) { + struct ice_bst_tcam_item *bst; + struct ice_imem_item *imem; + + pc = rt->gpr[ICE_GPR_NP_IDX]; + imem = &psr->imem_table[pc]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n", + pc); + + ice_bst_key_init(rt, imem); + bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key); + if (!bst) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n"); + status = ice_imem_pgk_init(rt, imem); + if (status) + break; + ice_imem_alu0_set(rt, imem); + ice_imem_alu1_set(rt, imem); + ice_imem_alu2_set(rt, imem); + ice_imem_pgp_set(rt, imem); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n", + bst->addr); + if (imem->b_m.pg) { + status = ice_bst_pgk_init(rt, bst); + if (status) + break; + ice_bst_pgp_set(rt, bst); + } else { + status = ice_imem_pgk_init(rt, imem); + if (status) + break; + ice_imem_pgp_set(rt, imem); + } + + if (imem->b_m.alu0) + ice_bst_alu0_set(rt, bst); + else + ice_imem_alu0_set(rt, imem); + + if (imem->b_m.alu1) + ice_bst_alu1_set(rt, bst); + else + ice_imem_alu1_set(rt, imem); + + if (imem->b_m.alu2) + ice_bst_alu2_set(rt, bst); + else + ice_imem_alu2_set(rt, imem); + } + + rt->action = NULL; + pg_cam = ice_rt_pg_cam_match(rt); + if (!pg_cam) { + pg_nm_cam = ice_rt_pg_nm_cam_match(rt); + if (pg_nm_cam) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n", + pg_nm_cam->idx); + rt->action = &pg_nm_cam->action; + } + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n", + pg_cam->idx); + rt->action = &pg_cam->action; + } + + if (!rt->action) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n"); + status = -EINVAL; + break; + } + + ice_alu_pg_exe(rt); + ice_marker_update(rt); + ice_proto_off_update(rt); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n", + rt->action->next_node); + + if (rt->action->is_last_round) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n"); + break; + } + + if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n", + rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len); + break; + } + } + + ice_result_resolve(rt, rslt); + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index bdda3401e3438b..970a99a52bf181 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -59,12 +59,13 @@ static void ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; struct ice_eth_stats *eth_stats; struct ice_vsi *vsi; - if (ice_is_vf_disabled(np->repr->vf)) + if (repr->ops.ready(repr)) return; - vsi = np->repr->src_vsi; + vsi = repr->src_vsi; ice_update_vsi_stats(vsi); eth_stats = &vsi->eth_stats; @@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) } /** - * ice_repr_open - Enable port representor's network interface + * ice_repr_vf_open - Enable port representor's network interface * @netdev: network interface device structure * * The open entry point is called when a port representor's network @@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_open(struct net_device *netdev) +static int ice_repr_vf_open(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev) return 0; } +static int ice_repr_sf_open(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + /** - * ice_repr_stop - Disable port representor's network interface + * ice_repr_vf_stop - Disable port representor's network interface * @netdev: network interface device structure * * The stop entry point is called when a port representor's network @@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_stop(struct net_device *netdev) +static int ice_repr_vf_stop(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev) return 0; } +static int ice_repr_sf_stop(struct net_device *netdev) +{ + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + /** * ice_repr_sp_stats64 - get slow path stats for port representor * @dev: network interface device structure @@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, } } -static const struct net_device_ops ice_repr_netdev_ops = { +static const struct net_device_ops ice_repr_vf_netdev_ops = { .ndo_get_stats64 = ice_repr_get_stats64, - .ndo_open = ice_repr_open, - .ndo_stop = ice_repr_stop, + .ndo_open = ice_repr_vf_open, + .ndo_stop = ice_repr_vf_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_setup_tc = ice_repr_setup_tc, + .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, +}; + +static const struct net_device_ops ice_repr_sf_netdev_ops = { + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_sf_open, + .ndo_stop = ice_repr_sf_stop, .ndo_start_xmit = ice_eswitch_port_start_xmit, .ndo_setup_tc = ice_repr_setup_tc, .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, @@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = { */ bool ice_is_port_repr_netdev(const struct net_device *netdev) { - return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); + return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops || + netdev->netdev_ops == &ice_repr_sf_netdev_ops); } /** * ice_repr_reg_netdev - register port representor netdev * @netdev: pointer to port representor netdev + * @ops: new ops for netdev */ static int -ice_repr_reg_netdev(struct net_device *netdev) +ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops) { eth_hw_addr_random(netdev); - netdev->netdev_ops = &ice_repr_netdev_ops; + netdev->netdev_ops = ops; ice_set_ethtool_repr_ops(netdev); netdev->hw_features |= NETIF_F_HW_TC; @@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } -static void ice_repr_remove_node(struct devlink_port *devlink_port) +static int ice_repr_ready_vf(struct ice_repr *repr) +{ + return !ice_check_vf_ready_for_cfg(repr->vf); +} + +static int ice_repr_ready_sf(struct ice_repr *repr) { - devl_rate_leaf_destroy(devlink_port); + return !repr->sf->active; } /** - * ice_repr_rem - remove representor from VF + * ice_repr_destroy - remove representor from VF * @repr: pointer to representor structure */ -static void ice_repr_rem(struct ice_repr *repr) +void ice_repr_destroy(struct ice_repr *repr) { free_percpu(repr->stats); free_netdev(repr->netdev); kfree(repr); } -/** - * ice_repr_rem_vf - remove representor from VF - * @repr: pointer to representor structure - */ -void ice_repr_rem_vf(struct ice_repr *repr) +static void ice_repr_rem_vf(struct ice_repr *repr) { - ice_repr_remove_node(&repr->vf->devlink_port); ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); - ice_repr_rem(repr); } -static void ice_repr_set_tx_topology(struct ice_pf *pf) +static void ice_repr_rem_sf(struct ice_repr *repr) { - struct devlink *devlink; + unregister_netdev(repr->netdev); + ice_devlink_destroy_sf_port(repr->sf); +} +static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink) +{ /* only export if ADQ and DCB disabled and eswitch enabled*/ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || !ice_is_switchdev_running(pf)) return; - devlink = priv_to_devlink(pf); ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); } /** - * ice_repr_add - add representor for generic VSI - * @pf: pointer to PF structure + * ice_repr_create - add representor for generic VSI * @src_vsi: pointer to VSI structure of device to represent - * @parent_mac: device MAC address */ -static struct ice_repr * -ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) +static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi) { struct ice_netdev_priv *np; struct ice_repr *repr; @@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) np = netdev_priv(repr->netdev); np->repr = repr; - ether_addr_copy(repr->parent_mac, parent_mac); + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; + + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back)); return repr; @@ -371,34 +402,18 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) return ERR_PTR(err); } -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +static int ice_repr_add_vf(struct ice_repr *repr) { - struct ice_repr *repr; - struct ice_vsi *vsi; + struct ice_vf *vf = repr->vf; + struct devlink *devlink; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return ERR_PTR(-ENOENT); - err = ice_devlink_create_vf_port(vf); if (err) - return ERR_PTR(err); - - repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); - goto err_repr_add; - } - - repr->vf = vf; - - repr->netdev->min_mtu = ETH_MIN_MTU; - repr->netdev->max_mtu = ICE_MAX_MTU; + return err; - SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); - err = ice_repr_reg_netdev(repr->netdev); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops); if (err) goto err_netdev; @@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) goto err_cfg_vsi; ice_virtchnl_set_repr_ops(vf); - ice_repr_set_tx_topology(vf->pf); - return repr; + devlink = priv_to_devlink(vf->pf); + ice_repr_set_tx_topology(vf->pf, devlink); + + return 0; err_cfg_vsi: unregister_netdev(repr->netdev); err_netdev: - ice_repr_rem(repr); -err_repr_add: ice_devlink_destroy_vf_port(vf); - return ERR_PTR(err); + return err; +} + +/** + * ice_repr_create_vf - add representor for VF VSI + * @vf: VF to create port representor on + * + * Set correct representor type for VF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_repr *repr; + + if (!vsi) + return ERR_PTR(-EINVAL); + + repr = ice_repr_create(vsi); + if (IS_ERR(repr)) + return repr; + + repr->type = ICE_REPR_TYPE_VF; + repr->vf = vf; + repr->ops.add = ice_repr_add_vf; + repr->ops.rem = ice_repr_rem_vf; + repr->ops.ready = ice_repr_ready_vf; + + ether_addr_copy(repr->parent_mac, vf->hw_lan_addr); + + return repr; +} + +static int ice_repr_add_sf(struct ice_repr *repr) +{ + struct ice_dynamic_port *sf = repr->sf; + int err; + + err = ice_devlink_create_sf_port(sf); + if (err) + return err; + + SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops); + if (err) + goto err_netdev; + + ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back)); + + return 0; + +err_netdev: + ice_devlink_destroy_sf_port(sf); + return err; +} + +/** + * ice_repr_create_sf - add representor for SF VSI + * @sf: SF to create port representor on + * + * Set correct representor type for SF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = ice_repr_create(sf->vsi); + + if (IS_ERR(repr)) + return repr; + + repr->type = ICE_REPR_TYPE_SF; + repr->sf = sf; + repr->ops.add = ice_repr_add_sf; + repr->ops.rem = ice_repr_rem_sf; + repr->ops.ready = ice_repr_ready_sf; + + ether_addr_copy(repr->parent_mac, sf->hw_addr); + + return repr; } struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id) diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index 488661b2900b0c..35bd93165e1ea7 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats { u64 tx_drops; }; +enum ice_repr_type { + ICE_REPR_TYPE_VF, + ICE_REPR_TYPE_SF, +}; + struct ice_repr { struct ice_vsi *src_vsi; - struct ice_vf *vf; struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; struct ice_repr_pcpu_stats __percpu *stats; u32 id; u8 parent_mac[ETH_ALEN]; + enum ice_repr_type type; + union { + struct ice_vf *vf; + struct ice_dynamic_port *sf; + }; + struct { + int (*add)(struct ice_repr *repr); + void (*rem)(struct ice_repr *repr); + int (*ready)(struct ice_repr *repr); + } ops; }; -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); -void ice_repr_rem_vf(struct ice_repr *repr); +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf); +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf); + +void ice_repr_destroy(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ecf8f5d6029219..6ca13c5dcb14e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, if (!root) return -ENOMEM; - /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], - sizeof(*root), GFP_KERNEL); + sizeof(*root->children), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); return -ENOMEM; @@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!node) return -ENOMEM; if (hw->max_children[layer]) { - /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[layer], - sizeof(*node), GFP_KERNEL); + sizeof(*node->children), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c new file mode 100644 index 00000000000000..75d7147e1c01c0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ +#include "ice.h" +#include "ice_lib.h" +#include "ice_txrx.h" +#include "ice_fltr.h" +#include "ice_sf_eth.h" +#include "devlink/devlink_port.h" +#include "devlink/devlink.h" + +static const struct net_device_ops ice_sf_netdev_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp, + .ndo_xdp_xmit = ice_xdp_xmit, + .ndo_xsk_wakeup = ice_xsk_wakeup, +}; + +/** + * ice_sf_cfg_netdev - Allocate, configure and register a netdev + * @dyn_port: subfunction associated with configured netdev + * @devlink_port: subfunction devlink port to be linked with netdev + * + * Return: 0 on success, negative value on failure + */ +static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port, + struct devlink_port *devlink_port) +{ + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_netdev_priv *np; + struct net_device *netdev; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; + + eth_hw_addr_set(netdev, dyn_port->hw_addr); + ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr); + netdev->netdev_ops = &ice_sf_netdev_ops; + SET_NETDEV_DEVLINK_PORT(netdev, devlink_port); + + err = register_netdev(netdev); + if (err) { + free_netdev(netdev); + vsi->netdev = NULL; + return -ENOMEM; + } + set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static void ice_sf_decfg_netdev(struct ice_vsi *vsi) +{ + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); +} + +/** + * ice_sf_dev_probe - subfunction driver probe function + * @adev: pointer to the auxiliary device + * @id: pointer to the auxiliary_device id + * + * Configure VSI and netdev resources for the subfunction device. + * + * Return: zero on success or an error code on failure. + */ +static int ice_sf_dev_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_pf *pf = dyn_port->pf; + struct device *dev = &adev->dev; + struct ice_sf_priv *priv; + struct devlink *devlink; + int err; + + vsi->type = ICE_VSI_SF; + vsi->port_info = pf->hw.port_info; + vsi->flags = ICE_VSI_FLAG_INIT; + + priv = ice_allocate_sf(&adev->dev, pf); + if (IS_ERR(priv)) { + dev_err(dev, "Subfunction devlink alloc failed"); + return PTR_ERR(priv); + } + + priv->dev = sf_dev; + sf_dev->priv = priv; + devlink = priv_to_devlink(priv); + + devl_lock(devlink); + + err = ice_vsi_cfg(vsi); + if (err) { + dev_err(dev, "Subfunction vsi config failed"); + goto err_free_devlink; + } + vsi->sf = dyn_port; + + ice_eswitch_update_repr(&dyn_port->repr_id, vsi); + + err = ice_devlink_create_sf_dev_port(sf_dev); + if (err) { + dev_err(dev, "Cannot add ice virtual devlink port for subfunction"); + goto err_vsi_decfg; + } + + err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port); + if (err) { + dev_err(dev, "Subfunction netdev config failed"); + goto err_devlink_destroy; + } + + err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink); + if (err) { + dev_err(dev, "Can't link devlink instance to SF devlink port"); + goto err_netdev_decfg; + } + + ice_napi_add(vsi); + + devl_register(devlink); + devl_unlock(devlink); + + dyn_port->attached = true; + + return 0; + +err_netdev_decfg: + ice_sf_decfg_netdev(vsi); +err_devlink_destroy: + ice_devlink_destroy_sf_dev_port(sf_dev); +err_vsi_decfg: + ice_vsi_decfg(vsi); +err_free_devlink: + devl_unlock(devlink); + devlink_free(devlink); + return err; +} + +/** + * ice_sf_dev_remove - subfunction driver remove function + * @adev: pointer to the auxiliary device + * + * Deinitalize VSI and netdev resources for the subfunction device. + */ +static void ice_sf_dev_remove(struct auxiliary_device *adev) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct devlink *devlink; + + devlink = priv_to_devlink(sf_dev->priv); + devl_lock(devlink); + + ice_vsi_close(vsi); + + ice_sf_decfg_netdev(vsi); + ice_devlink_destroy_sf_dev_port(sf_dev); + devl_unregister(devlink); + devl_unlock(devlink); + devlink_free(devlink); + ice_vsi_decfg(vsi); + + dyn_port->attached = false; +} + +static const struct auxiliary_device_id ice_sf_dev_id_table[] = { + { .name = "ice.sf", }, + { }, +}; + +MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table); + +static struct auxiliary_driver ice_sf_driver = { + .name = "sf", + .probe = ice_sf_dev_probe, + .remove = ice_sf_dev_remove, + .id_table = ice_sf_dev_id_table +}; + +static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id); + +/** + * ice_sf_driver_register - Register new auxiliary subfunction driver + * + * Return: zero on success or an error code on failure. + */ +int ice_sf_driver_register(void) +{ + return auxiliary_driver_register(&ice_sf_driver); +} + +/** + * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver + * + */ +void ice_sf_driver_unregister(void) +{ + auxiliary_driver_unregister(&ice_sf_driver); +} + +/** + * ice_sf_dev_release - Release device associated with auxiliary device + * @device: pointer to the device + * + * Since most of the code for subfunction deactivation is handled in + * the remove handler, here just free tracking resources. + */ +static void ice_sf_dev_release(struct device *device) +{ + struct auxiliary_device *adev = to_auxiliary_dev(device); + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + + xa_erase(&ice_sf_aux_id, adev->id); + kfree(sf_dev); +} + +/** + * ice_sf_eth_activate - Activate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * @extack: extack for reporting error messages + * + * Activate the dynamic port as an Ethernet subfunction. Setup the netdev + * resources associated and initialize the auxiliary device. + * + * Return: zero on success or an error code on failure. + */ +int +ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = dyn_port->pf; + struct ice_sf_dev *sf_dev; + struct pci_dev *pdev; + int err; + u32 id; + + err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b, + GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID"); + return err; + } + + sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL); + if (!sf_dev) { + err = -ENOMEM; + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory"); + goto xa_erase; + } + pdev = pf->pdev; + + sf_dev->dyn_port = dyn_port; + sf_dev->adev.id = id; + sf_dev->adev.name = "sf"; + sf_dev->adev.dev.release = ice_sf_dev_release; + sf_dev->adev.dev.parent = &pdev->dev; + + err = auxiliary_device_init(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device"); + goto sf_dev_free; + } + + err = auxiliary_device_add(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device"); + goto aux_dev_uninit; + } + + dyn_port->sf_dev = sf_dev; + + return 0; + +aux_dev_uninit: + auxiliary_device_uninit(&sf_dev->adev); +sf_dev_free: + kfree(sf_dev); +xa_erase: + xa_erase(&ice_sf_aux_id, id); + + return err; +} + +/** + * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * + * Deactivate the Ethernet subfunction, removing its auxiliary device and the + * associated resources. + */ +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port) +{ + struct ice_sf_dev *sf_dev = dyn_port->sf_dev; + + auxiliary_device_delete(&sf_dev->adev); + auxiliary_device_uninit(&sf_dev->adev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h new file mode 100644 index 00000000000000..c558cad0a183bc --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _ICE_SF_ETH_H_ +#define _ICE_SF_ETH_H_ + +#include +#include "ice.h" + +struct ice_sf_dev { + struct auxiliary_device adev; + struct ice_dynamic_port *dyn_port; + struct ice_sf_priv *priv; +}; + +struct ice_sf_priv { + struct ice_sf_dev *dev; + struct devlink_port devlink_port; +}; + +static inline struct +ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev) +{ + return container_of(adev, struct ice_sf_dev, adev); +} + +int ice_sf_driver_register(void); +void ice_sf_driver_unregister(void); + +int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack); +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port); +#endif /* _ICE_SF_ETH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c new file mode 100644 index 00000000000000..3d7e96721cf92c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023, Intel Corporation. */ + +#include "ice_vsi_vlan_ops.h" +#include "ice_vsi_vlan_lib.h" +#include "ice_vlan_mode.h" +#include "ice.h" +#include "ice_sf_vsi_vlan_ops.h" + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + + if (ice_is_dvm_ena(&vsi->back->hw)) + vlan_ops = &vsi->outer_vlan_ops; + else + vlan_ops = &vsi->inner_vlan_ops; + + vlan_ops->add_vlan = ice_vsi_add_vlan; + vlan_ops->del_vlan = ice_vsi_del_vlan; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h new file mode 100644 index 00000000000000..8c44eafceea051 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023, Intel Corporation. */ + +#ifndef _ICE_SF_VSI_VLAN_OPS_H_ +#define _ICE_SF_VSI_VLAN_OPS_H_ + +#include "ice_vsi_vlan_ops.h" + +struct ice_vsi; + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi); + +#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 55ef33208456a4..e34fe2516cccf2 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); ice_dis_vf_qs(vf); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { @@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } - retval = ice_eswitch_attach(pf, vf); + retval = ice_eswitch_attach_vf(pf, vf); if (retval) { dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", vf->vf_id, retval); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c9bc3f1add5d37..8208055d6e7fc5 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2368,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back)) + if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 96037bef3e7848..45768796691fec 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -61,6 +61,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R) ICE_DBG_AQ_DESC | \ ICE_DBG_AQ_DESC_BUF | \ ICE_DBG_AQ_CMD) +#define ICE_DBG_PARSER BIT_ULL(28) #define ICE_DBG_USER BIT_ULL(31) @@ -158,6 +159,7 @@ enum ice_vsi_type { ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, + ICE_VSI_SF = 9, }; struct ice_link_status { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 5635e9da2212ba..a69e91f88d8118 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -766,7 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -782,7 +782,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); - ice_eswitch_attach(pf, vf); + ice_eswitch_attach_vf(pf, vf); mutex_unlock(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index fec16919ec1957..be4266899690ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -12,6 +12,7 @@ #include #include #include "ice_type.h" +#include "ice_flow.h" #include "ice_virtchnl_fdir.h" #include "ice_vsi_vlan_ops.h" @@ -52,6 +53,12 @@ struct ice_mdd_vf_events { u16 last_printed; }; +/* Structure to store fdir fv entry */ +struct ice_fdir_prof_info { + struct ice_parser_profile prof; + u64 fdir_active_cnt; +}; + /* VF operations */ struct ice_vf_ops { enum ice_disq_rst_src reset_type; @@ -91,6 +98,7 @@ struct ice_vf { u16 lan_vsi_idx; /* index into PF struct */ u16 ctrl_vsi_idx; struct ice_vf_fdir fdir; + struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; /* first vector index of this VF in the PF space */ int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1c6ce0c4ed4ee9..59f62306b9cb02 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 && + vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index b4feb09276870f..14e3f0f89c78d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type { ICE_FDIR_TUNNEL_TYPE_NONE = 0, ICE_FDIR_TUNNEL_TYPE_GTPU, ICE_FDIR_TUNNEL_TYPE_GTPU_EH, + ICE_FDIR_TUNNEL_TYPE_ECPRI, + ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE_INNER, + ICE_FDIR_TUNNEL_TYPE_L2TPV2, + ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, }; struct virtchnl_fdir_fltr_conf { @@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf { enum ice_fdir_tunnel_type ttype; u64 inset_flag; u32 flow_id; + + struct ice_parser_profile *prof; + bool parser_ena; + u8 *pkt_buf; + u8 pkt_len; }; struct virtchnl_fdir_inset_map { @@ -786,6 +800,107 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, return ret; } +/** + * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) + * @proto: virtchnl protocol headers + * + * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note + * that common FDIR rule must have non-zero proto->count. Thus, we choose the + * tunnel_level and count of proto as the indicators. If both tunnel_level and + * count of proto are zero, this FDIR rule will be regarded as raw flow. + * + * Returns: true if headers describe raw flow, false otherwise. + */ +static bool +ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) +{ + return (proto->tunnel_level == 0 && proto->count == 0); +} + +/** + * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's raw flow and store it in @conf + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_fdir_parse_raw(struct ice_vf *vf, + struct virtchnl_proto_hdrs *proto, + struct virtchnl_fdir_fltr_conf *conf) +{ + u8 *pkt_buf, *msk_buf __free(kfree); + struct ice_parser_result rslt; + struct ice_pf *pf = vf->pf; + struct ice_parser *psr; + int status = -ENOMEM; + struct ice_hw *hw; + u16 udp_port = 0; + + pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) + goto err_mem_alloc; + + memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); + memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); + + hw = &pf->hw; + + /* Get raw profile info via Parser Lib */ + psr = ice_parser_create(hw); + if (IS_ERR(psr)) { + status = PTR_ERR(psr); + goto err_mem_alloc; + } + + ice_parser_dvm_set(psr, ice_is_dvm_ena(hw)); + + if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) + ice_parser_vxlan_tunnel_set(psr, udp_port, true); + + status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt); + if (status) + goto err_parser_destroy; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_parser_result_dump(hw, &rslt); + + conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); + if (!conf->prof) { + status = -ENOMEM; + goto err_parser_destroy; + } + + status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, + proto->raw.pkt_len, ICE_BLK_FD, + conf->prof); + if (status) + goto err_parser_profile_init; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_parser_profile_dump(hw, conf->prof); + + /* Store raw flow info into @conf */ + conf->pkt_len = proto->raw.pkt_len; + conf->pkt_buf = pkt_buf; + conf->parser_ena = true; + + ice_parser_destroy(psr); + return 0; + +err_parser_profile_init: + kfree(conf->prof); +err_parser_destroy: + ice_parser_destroy(psr); +err_mem_alloc: + kfree(pkt_buf); + return status; +} + /** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info @@ -813,6 +928,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, return -EINVAL; } + /* For raw FDIR filters created by the parser */ + if (ice_vc_fdir_is_raw_flow(proto)) + return ice_vc_fdir_parse_raw(vf, proto, conf); + for (i = 0; i < proto->count; i++) { struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; struct ip_esp_hdr *esph; @@ -1101,8 +1220,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - if (!ice_vc_validate_pattern(vf, proto)) - return -EINVAL; + /* For raw FDIR filters created by the parser */ + if (!ice_vc_fdir_is_raw_flow(proto)) + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1295,11 +1416,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (ret) { - dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", - vf->vf_id, input->flow_type); - goto err_free_pkt; + if (conf->parser_ena) { + memcpy(pkt, conf->pkt_buf, conf->pkt_len); + } else { + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (ret) { + dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", + vf->vf_id, input->flow_type); + goto err_free_pkt; + } } ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); @@ -1521,6 +1646,16 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, return ret; } +static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) +{ + return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || + ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); +} + /** * ice_vc_add_fdir_fltr_post * @vf: pointer to the VF structure @@ -1781,6 +1916,158 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } +/** + * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context + * @fv_a: struct of parsed FDIR profile field vector + * @fv_b: struct of parsed FDIR profile field vector + * + * Check if the two parsed FDIR profile field vector context are different, + * including proto_id, offset and mask. + * + * Return: true on different, false on otherwise. + */ +static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, + struct ice_parser_fv *fv_b) +{ + return (fv_a->proto_id != fv_b->proto_id || + fv_a->offset != fv_b->offset || + fv_a->msk != fv_b->msk); +} + +/** + * ice_vc_parser_fv_save - save parsed FDIR profile fv context + * @fv: struct of parsed FDIR profile field vector + * @fv_src: parsed FDIR profile field vector context to save + * + * Save the parsed FDIR profile field vector context, including proto_id, + * offset and mask. + * + * Return: Void. + */ +static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, + struct ice_parser_fv *fv_src) +{ + fv->proto_id = fv_src->proto_id; + fv->offset = fv_src->offset; + fv->msk = fv_src->msk; + fv->spec = 0; +} + +/** + * ice_vc_add_fdir_raw - add a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @v_ret: the final VIRTCHNL code + * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_add_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_status_code *v_ret, + struct virtchnl_fdir_add *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + int ret, ptg, id, i; + struct device *dev; + struct ice_hw *hw; + bool fv_found; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); + return -ENODEV; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", + vf->vf_id); + return -ENODEV; + } + + fv_found = false; + + /* Check if profile info already exists, then update the counter */ + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) + if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], + &conf->prof->fv[i])) + break; + if (i == ICE_MAX_FV_WORDS) { + fv_found = true; + pi->fdir_active_cnt++; + } + } + + /* HW profile setting is only required for the first time */ + if (!fv_found) { + ret = ice_flow_set_parser_prof(hw, vf_vsi->idx, + ctrl_vsi->idx, conf->prof, + ICE_BLK_FD); + + if (ret) { + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: insert hw prof failed\n", + vf->vf_id); + return ret; + } + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: insert FDIR list failed\n", + vf->vf_id); + return ret; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, + VIRTCHNL_OP_ADD_FDIR_FILTER); + if (ret) { + dev_dbg(dev, "VF %d: set FDIR context failed\n", + vf->vf_id); + goto err_rem_entry; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, true, false); + if (ret) { + dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_clr_irq; + } + + /* Save parsed profile fv info of the FDIR rule for the first time */ + if (!fv_found) { + for (i = 0; i < conf->prof->fv_num; i++) + ice_vc_parser_fv_save(&pi->prof.fv[i], + &conf->prof->fv[i]); + pi->prof.fv_num = conf->prof->fv_num; + pi->fdir_active_cnt = 1; + } + + return 0; + +err_clr_irq: + ice_vc_fdir_clear_irq_ctx(vf); +err_rem_entry: + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + return ret; +} + /** * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info @@ -1846,7 +2133,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) len = sizeof(*stat); ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); if (ret) { - v_ret = VIRTCHNL_STATUS_SUCCESS; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); goto err_free_conf; @@ -1861,6 +2148,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) goto exit; } + /* For raw FDIR filters created by the parser */ + if (conf->parser_ena) { + ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); + if (ret) + goto err_free_conf; + goto exit; + } + + is_tun = ice_fdir_is_tunnel(conf->ttype); ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; @@ -1921,6 +2217,78 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) return ret; } +/** + * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @v_ret: the final VIRTCHNL code + * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_del_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_status_code *v_ret, + struct virtchnl_fdir_del *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + enum ice_block blk = ICE_BLK_FD; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + struct device *dev; + struct ice_hw *hw; + unsigned long id; + u16 vsi_num; + int ptg; + int ret; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + ret = ice_vc_fdir_write_fltr(vf, conf, false, false); + if (ret) { + dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n", + vf->vf_id, ret); + return ret; + } + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + return -ENODEV; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n", + vf->vf_id); + return -ENODEV; + } + + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + pi->fdir_active_cnt--; + /* Remove the profile id flow if no active FDIR rule left */ + if (!pi->fdir_active_cnt) { + vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + } + } + + conf->parser_ena = false; + return 0; +} + /** * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info @@ -1933,7 +2301,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; struct virtchnl_fdir_del *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; + struct ice_vf_fdir *fdir = &vf->fdir; enum virtchnl_status_code v_ret; + struct ice_fdir_fltr *input; + enum ice_fltr_ptype flow; struct device *dev; struct ice_pf *pf; int is_tun = 0; @@ -1983,6 +2354,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_exit; } + /* For raw FDIR filters created by the parser */ + if (conf->parser_ena) { + ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); + if (ret) + goto err_del_tmr; + goto exit; + } + + is_tun = ice_fdir_is_tunnel(conf->ttype); ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; @@ -1992,6 +2372,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_del_tmr; } + /* Remove unused profiles to avoid unexpected behaviors */ + input = &conf->input; + flow = input->flow_type; + if (fdir->fdir_fltr_cnt[flow][is_tun] == 1) + ice_vc_fdir_rem_prof(vf, flow, is_tun); + +exit: kfree(stat); return ret; diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c index 7aae7fdcfcdb91..8c7a9b41fb63c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c @@ -3,6 +3,7 @@ #include "ice_pf_vsi_vlan_ops.h" #include "ice_vf_vsi_vlan_ops.h" +#include "ice_sf_vsi_vlan_ops.h" #include "ice_lib.h" #include "ice.h" @@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) case ICE_VSI_VF: ice_vf_vsi_init_vlan_ops(vsi); break; + case ICE_VSI_SF: + ice_sf_vsi_init_vlan_ops(vsi); + break; default: dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n", ice_vsi_type_str(vsi->type)); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 5dee829bfc47c5..334ae945d6404c 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -289,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { int err; - if (vsi->type != ICE_VSI_PF) + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) return -EINVAL; if (qid >= vsi->netdev->real_num_rx_queues || diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 3df9935685e962..6c913a703df647 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -97,8 +97,10 @@ static int idpf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl = idpf_get_reg_addr(adapter, reg_vals[vec_id].dyn_ctl_reg); intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M; + intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M; intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S; intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S; + intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M; spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_PF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 0b6c8fd5bc90f7..4f20343e49a980 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -357,24 +357,11 @@ int idpf_intr_req(struct idpf_adapter *adapter) goto free_msix; } - if (adapter->req_vec_chunks) { - struct virtchnl2_vector_chunks *vchunks; - struct virtchnl2_alloc_vectors *ac; - - ac = adapter->req_vec_chunks; - vchunks = &ac->vchunks; - - num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, - vchunks); - if (num_vec_ids < v_actual) { - err = -EINVAL; - goto free_vecids; - } - } else { - int i; - - for (i = 0; i < v_actual; i++) - vecids[i] = i; + num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + &adapter->req_vec_chunks->vchunks); + if (num_vec_ids < v_actual) { + err = -EINVAL; + goto free_vecids; } for (vector = 0; vector < v_actual; vector++) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index fe64febf7436f4..dfd7cf1d9aa0ad 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 Intel Corporation */ #include +#include #include "idpf.h" @@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); dma_unmap_addr_set(tx_buf, dma, dma); + tx_buf->type = LIBETH_SQE_FRAG; /* align size to end of page */ max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); @@ -237,14 +239,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, offsets, max_data, td_tag); - tx_desc++; - i++; - - if (i == tx_q->desc_count) { + if (unlikely(++i == tx_q->desc_count)) { + tx_buf = &tx_q->tx_buf[0]; tx_desc = &tx_q->base_tx[0]; i = 0; + } else { + tx_buf++; + tx_desc++; } + tx_buf->type = LIBETH_SQE_EMPTY; + dma += max_data; size -= max_data; @@ -257,12 +262,14 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, size, td_tag); - tx_desc++; - i++; - if (i == tx_q->desc_count) { + if (unlikely(++i == tx_q->desc_count)) { + tx_buf = &tx_q->tx_buf[0]; tx_desc = &tx_q->base_tx[0]; i = 0; + } else { + tx_buf++; + tx_desc++; } size = skb_frag_size(frag); @@ -270,8 +277,6 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, DMA_TO_DEVICE); - - tx_buf = &tx_q->tx_buf[i]; } skb_tx_timestamp(first->skb); @@ -282,13 +287,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, size, td_tag); - IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i); + first->type = LIBETH_SQE_SKB; + first->rs_idx = i; - /* set next_to_watch value indicating a packet is present */ - first->next_to_watch = tx_desc; + IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i); nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - netdev_tx_sent_queue(nq, first->bytecount); + netdev_tx_sent_queue(nq, first->bytes); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); } @@ -306,8 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq) struct idpf_base_tx_ctx_desc *ctx_desc; int ntu = txq->next_to_use; - memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); - txq->tx_buf[ntu].ctx_entry = true; + txq->tx_buf[ntu].type = LIBETH_SQE_CTX; ctx_desc = &txq->base_ctx[ntu]; @@ -371,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, IDPF_TX_DESCS_FOR_CTX)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + return NETDEV_TX_BUSY; } @@ -396,11 +404,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, first->skb = skb; if (tso) { - first->gso_segs = offload.tso_segs; - first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len); + first->packets = offload.tso_segs; + first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len); } else { - first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); - first->gso_segs = 1; + first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + first->packets = 1; } idpf_tx_singleq_map(tx_q, first, &offload); @@ -420,10 +428,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, int *cleaned) { - unsigned int total_bytes = 0, total_pkts = 0; + struct libeth_sq_napi_stats ss = { }; struct idpf_base_tx_desc *tx_desc; u32 budget = tx_q->clean_budget; s16 ntc = tx_q->next_to_clean; + struct libeth_cq_pp cp = { + .dev = tx_q->dev, + .ss = &ss, + .napi = napi_budget, + }; struct idpf_netdev_priv *np; struct idpf_tx_buf *tx_buf; struct netdev_queue *nq; @@ -441,47 +454,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, * such. We can skip this descriptor since there is no buffer * to clean. */ - if (tx_buf->ctx_entry) { - /* Clear this flag here to avoid stale flag values when - * this buffer is used for actual data in the future. - * There are cases where the tx_buf struct / the flags - * field will not be cleared before being reused. - */ - tx_buf->ctx_entry = false; + if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) { + tx_buf->type = LIBETH_SQE_EMPTY; goto fetch_next_txq_desc; } - /* if next_to_watch is not set then no work pending */ - eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch; - if (!eop_desc) + if (unlikely(tx_buf->type != LIBETH_SQE_SKB)) break; - /* prevent any other reads prior to eop_desc */ + /* prevent any other reads prior to type */ smp_rmb(); + eop_desc = &tx_q->base_tx[tx_buf->rs_idx]; + /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->qw1 & cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE))) break; - /* clear next_to_watch to prevent false hangs */ - tx_buf->next_to_watch = NULL; - /* update the statistics for this packet */ - total_bytes += tx_buf->bytecount; - total_pkts += tx_buf->gso_segs; - - napi_consume_skb(tx_buf->skb, napi_budget); - - /* unmap skb header data */ - dma_unmap_single(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - - /* clear tx_buf data */ - tx_buf->skb = NULL; - dma_unmap_len_set(tx_buf, len, 0); + libeth_tx_complete(tx_buf, &cp); /* unmap remaining buffers */ while (tx_desc != eop_desc) { @@ -495,13 +487,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, } /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buf, len)) { - dma_unmap_page(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - } + libeth_tx_complete(tx_buf, &cp); } /* update budget only if we did something */ @@ -521,11 +507,11 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, ntc += tx_q->desc_count; tx_q->next_to_clean = ntc; - *cleaned += total_pkts; + *cleaned += ss.packets; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.packets, total_pkts); - u64_stats_add(&tx_q->q_stats.bytes, total_bytes); + u64_stats_add(&tx_q->q_stats.packets, ss.packets); + u64_stats_add(&tx_q->q_stats.bytes, ss.bytes); u64_stats_update_end(&tx_q->stats_sync); np = netdev_priv(tx_q->netdev); @@ -533,7 +519,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, dont_wake = np->state != __IDPF_VPORT_UP || !netif_carrier_ok(tx_q->netdev); - __netif_txq_completed_wake(nq, total_pkts, total_bytes, + __netif_txq_completed_wake(nq, ss.packets, ss.bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, dont_wake); @@ -1134,8 +1120,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget) &work_done); /* If work not completed, return budget and polling will return */ - if (!clean_complete) + if (!clean_complete) { + idpf_vport_intr_set_wb_on_itr(q_vector); return budget; + } work_done = min_t(int, work_done, budget - 1); @@ -1144,6 +1132,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget) */ if (likely(napi_complete_done(napi, work_done))) idpf_vport_intr_update_itr_ena_irq(q_vector); + else + idpf_vport_intr_set_wb_on_itr(q_vector); return work_done; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 585c3dadd9bfac..d4e6f0e104872d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2,10 +2,19 @@ /* Copyright (C) 2023 Intel Corporation */ #include +#include #include "idpf.h" #include "idpf_virtchnl.h" +struct idpf_tx_stash { + struct hlist_node hlist; + struct libeth_sqe buf; +}; + +#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv) +LIBETH_SQE_CHECK_PRIV(u32); + static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, unsigned int count); @@ -60,42 +69,21 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) } } -/** - * idpf_tx_buf_rel - Release a Tx buffer - * @tx_q: the queue that owns the buffer - * @tx_buf: the buffer to free - */ -static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q, - struct idpf_tx_buf *tx_buf) -{ - if (tx_buf->skb) { - if (dma_unmap_len(tx_buf, len)) - dma_unmap_single(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - dev_kfree_skb_any(tx_buf->skb); - } else if (dma_unmap_len(tx_buf, len)) { - dma_unmap_page(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - } - - tx_buf->next_to_watch = NULL; - tx_buf->skb = NULL; - tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; - dma_unmap_len_set(tx_buf, len, 0); -} - /** * idpf_tx_buf_rel_all - Free any empty Tx buffers * @txq: queue to be cleaned */ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { + struct libeth_sq_napi_stats ss = { }; struct idpf_buf_lifo *buf_stack; - u16 i; + struct idpf_tx_stash *stash; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; + struct hlist_node *tmp; + u32 i, tag; /* Buffers already cleared, nothing to do */ if (!txq->tx_buf) @@ -103,7 +91,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) /* Free all the Tx buffer sk_buffs */ for (i = 0; i < txq->desc_count; i++) - idpf_tx_buf_rel(txq, &txq->tx_buf[i]); + libeth_tx_complete(&txq->tx_buf[i], &cp); kfree(txq->tx_buf); txq->tx_buf = NULL; @@ -115,6 +103,20 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) if (!buf_stack->bufs) return; + /* + * If a Tx timeout occurred, there are potentially still bufs in the + * hash table, free them here. + */ + hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash, + hlist) { + if (!stash) + continue; + + libeth_tx_complete(&stash->buf, &cp); + hash_del(&stash->hlist); + idpf_buf_lifo_push(buf_stack, stash); + } + for (i = 0; i < buf_stack->size; i++) kfree(buf_stack->bufs[i]); @@ -131,6 +133,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) { idpf_tx_buf_rel_all(txq); + netdev_tx_reset_subqueue(txq->netdev, txq->idx); if (!txq->desc_ring) return; @@ -203,10 +206,6 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) if (!tx_q->tx_buf) return -ENOMEM; - /* Initialize tx_bufs with invalid completion tags */ - for (i = 0; i < tx_q->desc_count; i++) - tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; - if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) return 0; @@ -1655,37 +1654,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) wake_up(&vport->sw_marker_wq); } -/** - * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of - * packet - * @tx_q: tx queue to clean buffer from - * @tx_buf: buffer to be cleaned - * @cleaned: pointer to stats struct to track cleaned packets/bytes - * @napi_budget: Used to determine if we are in netpoll - */ -static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q, - struct idpf_tx_buf *tx_buf, - struct idpf_cleaned_stats *cleaned, - int napi_budget) -{ - napi_consume_skb(tx_buf->skb, napi_budget); - - if (dma_unmap_len(tx_buf, len)) { - dma_unmap_single(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - - dma_unmap_len_set(tx_buf, len, 0); - } - - /* clear tx_buf data */ - tx_buf->skb = NULL; - - cleaned->bytes += tx_buf->bytecount; - cleaned->packets += tx_buf->gso_segs; -} - /** * idpf_tx_clean_stashed_bufs - clean bufs that were stored for * out of order completions @@ -1696,33 +1664,28 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q, */ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, u16 compl_tag, - struct idpf_cleaned_stats *cleaned, + struct libeth_sq_napi_stats *cleaned, int budget) { struct idpf_tx_stash *stash; struct hlist_node *tmp_buf; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = cleaned, + .napi = budget, + }; /* Buffer completion */ hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, hlist, compl_tag) { - if (unlikely(stash->buf.compl_tag != (int)compl_tag)) + if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag)) continue; - if (stash->buf.skb) { - idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned, - budget); - } else if (dma_unmap_len(&stash->buf, len)) { - dma_unmap_page(txq->dev, - dma_unmap_addr(&stash->buf, dma), - dma_unmap_len(&stash->buf, len), - DMA_TO_DEVICE); - dma_unmap_len_set(&stash->buf, len, 0); - } + hash_del(&stash->hlist); + libeth_tx_complete(&stash->buf, &cp); /* Push shadow buf back onto stack */ idpf_buf_lifo_push(&txq->stash->buf_stack, stash); - - hash_del(&stash->hlist); } } @@ -1737,8 +1700,7 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, { struct idpf_tx_stash *stash; - if (unlikely(!dma_unmap_addr(tx_buf, dma) && - !dma_unmap_len(tx_buf, len))) + if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) return 0; stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); @@ -1751,29 +1713,27 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, /* Store buffer params in shadow buffer */ stash->buf.skb = tx_buf->skb; - stash->buf.bytecount = tx_buf->bytecount; - stash->buf.gso_segs = tx_buf->gso_segs; + stash->buf.bytes = tx_buf->bytes; + stash->buf.packets = tx_buf->packets; + stash->buf.type = tx_buf->type; + stash->buf.nr_frags = tx_buf->nr_frags; dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); - stash->buf.compl_tag = tx_buf->compl_tag; + idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf); /* Add buffer to buf_hash table to be freed later */ hash_add(txq->stash->sched_buf_hash, &stash->hlist, - stash->buf.compl_tag); - - memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); + idpf_tx_buf_compl_tag(&stash->buf)); - /* Reinitialize buf_id portion of tag */ - tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + tx_buf->type = LIBETH_SQE_EMPTY; return 0; } #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ do { \ - (ntc)++; \ - if (unlikely(!(ntc))) { \ - ntc -= (txq)->desc_count; \ + if (unlikely(++(ntc) == (txq)->desc_count)) { \ + ntc = 0; \ buf = (txq)->tx_buf; \ desc = &(txq)->flex_tx[0]; \ } else { \ @@ -1797,69 +1757,71 @@ do { \ * Separate packet completion events will be reported on the completion queue, * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. + * + * Furthermore, in flow scheduling mode, check to make sure there are enough + * reserve buffers to stash the packet. If there are not, return early, which + * will leave next_to_clean pointing to the packet that failed to be stashed. + * + * Return: false in the scenario above, true otherwise. */ -static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, +static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, - struct idpf_cleaned_stats *cleaned, + struct libeth_sq_napi_stats *cleaned, bool descs_only) { union idpf_tx_flex_desc *next_pending_desc = NULL; union idpf_tx_flex_desc *tx_desc; - s16 ntc = tx_q->next_to_clean; + u32 ntc = tx_q->next_to_clean; + struct libeth_cq_pp cp = { + .dev = tx_q->dev, + .ss = cleaned, + .napi = napi_budget, + }; struct idpf_tx_buf *tx_buf; + bool clean_complete = true; tx_desc = &tx_q->flex_tx[ntc]; next_pending_desc = &tx_q->flex_tx[end]; tx_buf = &tx_q->tx_buf[ntc]; - ntc -= tx_q->desc_count; while (tx_desc != next_pending_desc) { - union idpf_tx_flex_desc *eop_desc; + u32 eop_idx; /* If this entry in the ring was used as a context descriptor, - * it's corresponding entry in the buffer ring will have an - * invalid completion tag since no buffer was used. We can - * skip this descriptor since there is no buffer to clean. + * it's corresponding entry in the buffer ring is reserved. We + * can skip this descriptor since there is no buffer to clean. */ - if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG)) + if (tx_buf->type <= LIBETH_SQE_CTX) goto fetch_next_txq_desc; - eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch; + if (unlikely(tx_buf->type != LIBETH_SQE_SKB)) + break; - /* clear next_to_watch to prevent false hangs */ - tx_buf->next_to_watch = NULL; + eop_idx = tx_buf->rs_idx; if (descs_only) { - if (idpf_stash_flow_sch_buffers(tx_q, tx_buf)) + if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) { + clean_complete = false; goto tx_splitq_clean_out; + } + + idpf_stash_flow_sch_buffers(tx_q, tx_buf); - while (tx_desc != eop_desc) { + while (ntc != eop_idx) { idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); - - if (dma_unmap_len(tx_buf, len)) { - if (idpf_stash_flow_sch_buffers(tx_q, - tx_buf)) - goto tx_splitq_clean_out; - } + idpf_stash_flow_sch_buffers(tx_q, tx_buf); } } else { - idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned, - napi_budget); + libeth_tx_complete(tx_buf, &cp); /* unmap remaining buffers */ - while (tx_desc != eop_desc) { + while (ntc != eop_idx) { idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buf, len)) { - dma_unmap_page(tx_q->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - } + libeth_tx_complete(tx_buf, &cp); } } @@ -1868,8 +1830,9 @@ static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, } tx_splitq_clean_out: - ntc += tx_q->desc_count; tx_q->next_to_clean = ntc; + + return clean_complete; } #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ @@ -1895,57 +1858,68 @@ do { \ * this completion tag. */ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, - struct idpf_cleaned_stats *cleaned, + struct libeth_sq_napi_stats *cleaned, int budget) { u16 idx = compl_tag & txq->compl_tag_bufid_m; struct idpf_tx_buf *tx_buf = NULL; - u16 ntc = txq->next_to_clean; - u16 num_descs_cleaned = 0; - u16 orig_idx = idx; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = cleaned, + .napi = budget, + }; + u16 ntc, orig_idx = idx; tx_buf = &txq->tx_buf[idx]; - while (tx_buf->compl_tag == (int)compl_tag) { - if (tx_buf->skb) { - idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget); - } else if (dma_unmap_len(tx_buf, len)) { - dma_unmap_page(txq->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - } + if (unlikely(tx_buf->type <= LIBETH_SQE_CTX || + idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) + return false; + + if (tx_buf->type == LIBETH_SQE_SKB) + libeth_tx_complete(tx_buf, &cp); - memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); - tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); - num_descs_cleaned++; + while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) { + libeth_tx_complete(tx_buf, &cp); idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); } - /* If we didn't clean anything on the ring for this completion, there's - * nothing more to do. - */ - if (unlikely(!num_descs_cleaned)) - return false; - - /* Otherwise, if we did clean a packet on the ring directly, it's safe - * to assume that the descriptors starting from the original - * next_to_clean up until the previously cleaned packet can be reused. - * Therefore, we will go back in the ring and stash any buffers still - * in the ring into the hash table to be cleaned later. + /* + * It's possible the packet we just cleaned was an out of order + * completion, which means we can stash the buffers starting from + * the original next_to_clean and reuse the descriptors. We need + * to compare the descriptor ring next_to_clean packet's "first" buffer + * to the "first" buffer of the packet we just cleaned to determine if + * this is the case. Howevever, next_to_clean can point to either a + * reserved buffer that corresponds to a context descriptor used for the + * next_to_clean packet (TSO packet) or the "first" buffer (single + * packet). The orig_idx from the packet we just cleaned will always + * point to the "first" buffer. If next_to_clean points to a reserved + * buffer, let's bump ntc once and start the comparison from there. */ + ntc = txq->next_to_clean; tx_buf = &txq->tx_buf[ntc]; - while (tx_buf != &txq->tx_buf[orig_idx]) { - idpf_stash_flow_sch_buffers(txq, tx_buf); + + if (tx_buf->type == LIBETH_SQE_CTX) idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); - } - /* Finally, update next_to_clean to reflect the work that was just done - * on the ring, if any. If the packet was only cleaned from the hash - * table, the ring will not be impacted, therefore we should not touch - * next_to_clean. The updated idx is used here + /* + * If ntc still points to a different "first" buffer, clean the + * descriptor ring and stash all of the buffers for later cleaning. If + * we cannot stash all of the buffers, next_to_clean will point to the + * "first" buffer of the packet that could not be stashed and cleaning + * will start there next time. + */ + if (unlikely(tx_buf != &txq->tx_buf[orig_idx] && + !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned, + true))) + return true; + + /* + * Otherwise, update next_to_clean to reflect the cleaning that was + * done above. */ txq->next_to_clean = idx; @@ -1965,7 +1939,7 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, */ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct idpf_splitq_tx_compl_desc *desc, - struct idpf_cleaned_stats *cleaned, + struct libeth_sq_napi_stats *cleaned, int budget) { u16 compl_tag; @@ -1973,7 +1947,8 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, if (!idpf_queue_has(FLOW_SCH_EN, txq)) { u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); - return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + return; } compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); @@ -2008,7 +1983,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, ntc -= complq->desc_count; do { - struct idpf_cleaned_stats cleaned_stats = { }; + struct libeth_sq_napi_stats cleaned_stats = { }; struct idpf_tx_queue *tx_q; int rel_tx_qid; u16 hw_head; @@ -2157,29 +2132,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } -/** - * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions - * @tx_q: the queue to be checked - * @size: number of descriptors we want to assure is available - * - * Returns 0 if stop is not needed - */ -int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size) -{ - struct netdev_queue *nq; - - if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) - return 0; - - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.q_busy); - u64_stats_update_end(&tx_q->stats_sync); - - nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - - return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); -} - /** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked @@ -2191,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) - goto splitq_stop; + goto out; /* If there are too many outstanding completions expected on the * completion queue, stop the TX queue to give the device some time to @@ -2210,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, return 0; splitq_stop: + netif_stop_subqueue(tx_q->netdev, tx_q->idx); + +out: u64_stats_update_begin(&tx_q->stats_sync); u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - netif_stop_subqueue(tx_q->netdev, tx_q->idx); return -EBUSY; } @@ -2236,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; - idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); + if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + } /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -2307,6 +2265,12 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 idx) { + struct libeth_sq_napi_stats ss = { }; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; + u64_stats_update_begin(&txq->stats_sync); u64_stats_inc(&txq->q_stats.dma_map_errs); u64_stats_update_end(&txq->stats_sync); @@ -2316,7 +2280,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *tx_buf; tx_buf = &txq->tx_buf[idx]; - idpf_tx_buf_rel(txq, tx_buf); + libeth_tx_complete(tx_buf, &cp); if (tx_buf == first) break; if (idx == 0) @@ -2395,6 +2359,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); tx_buf = first; + first->nr_frags = 0; params->compl_tag = (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; @@ -2405,7 +2370,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, if (dma_mapping_error(tx_q->dev, dma)) return idpf_tx_dma_map_error(tx_q, skb, first, i); - tx_buf->compl_tag = params->compl_tag; + first->nr_frags++; + idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; + tx_buf->type = LIBETH_SQE_FRAG; /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); @@ -2459,14 +2426,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, max_data); - tx_desc++; - i++; - - if (i == tx_q->desc_count) { + if (unlikely(++i == tx_q->desc_count)) { + tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } else { + tx_buf++; + tx_desc++; } /* Since this packet has a buffer that is going to span @@ -2479,8 +2447,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, * simply pass over these holes and finish cleaning the * rest of the packet. */ - memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); - tx_q->tx_buf[i].compl_tag = params->compl_tag; + tx_buf->type = LIBETH_SQE_EMPTY; /* Adjust the DMA offset and the remaining size of the * fragment. On the first iteration of this loop, @@ -2504,13 +2471,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, break; idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); - tx_desc++; - i++; - if (i == tx_q->desc_count) { + if (unlikely(++i == tx_q->desc_count)) { + tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } else { + tx_buf++; + tx_desc++; } size = skb_frag_size(frag); @@ -2518,26 +2487,24 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, DMA_TO_DEVICE); - - tx_buf = &tx_q->tx_buf[i]; } /* record SW timestamp if HW timestamp is not available */ skb_tx_timestamp(skb); + first->type = LIBETH_SQE_SKB; + /* write last descriptor with RS and EOP bits */ + first->rs_idx = i; td_cmd |= params->eop_cmd; idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); i = idpf_tx_splitq_bump_ntu(tx_q, i); - /* set next_to_watch value indicating a packet is present */ - first->next_to_watch = tx_desc; - tx_q->txq_grp->num_completions_pending++; /* record bytecount for BQL */ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - netdev_tx_sent_queue(nq, first->bytecount); + netdev_tx_sent_queue(nq, first->bytes); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); } @@ -2737,8 +2704,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) struct idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; - memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); - txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + txq->tx_buf[i].type = LIBETH_SQE_CTX; /* grab the next descriptor */ desc = &txq->flex_ctx[i]; @@ -2822,12 +2788,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, first->skb = skb; if (tso) { - first->gso_segs = tx_params.offload.tso_segs; - first->bytecount = skb->len + - ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len); + first->packets = tx_params.offload.tso_segs; + first->bytes = skb->len + + ((first->packets - 1) * tx_params.offload.tso_hdr_len); } else { - first->gso_segs = 1; - first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->packets = 1; + first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { @@ -3749,6 +3715,7 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) /* net_dim() updates ITR out-of-band using a work item */ idpf_net_dim(q_vector); + q_vector->wb_on_itr = false; intval = idpf_vport_intr_buildreg_itr(q_vector, IDPF_NO_ITR_UPDATE_IDX, 0); @@ -4051,8 +4018,10 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done); /* If work not completed, return budget and polling will return */ - if (!clean_complete) + if (!clean_complete) { + idpf_vport_intr_set_wb_on_itr(q_vector); return budget; + } work_done = min_t(int, work_done, budget - 1); @@ -4061,6 +4030,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ if (likely(napi_complete_done(napi, work_done))) idpf_vport_intr_update_itr_ena_irq(q_vector); + else + idpf_vport_intr_set_wb_on_itr(q_vector); /* Switch to poll mode in the tear-down path after sending disable * queues virtchnl message, as the interrupts will be disabled after diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 6215dbee554651..f0537826f8403f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -127,11 +127,10 @@ do { \ */ #define IDPF_TX_COMPLQ_PENDING(txq) \ (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \ - 0 : U64_MAX) + \ + 0 : U32_MAX) + \ (txq)->num_completions_pending - (txq)->complq->num_completions) #define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 -#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1 /* Adjust the generation for the completion tag and wrap if necessary */ #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ @@ -149,47 +148,7 @@ union idpf_tx_flex_desc { struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ }; -/** - * struct idpf_tx_buf - * @next_to_watch: Next descriptor to clean - * @skb: Pointer to the skb - * @dma: DMA address - * @len: DMA length - * @bytecount: Number of bytes - * @gso_segs: Number of GSO segments - * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare - * with completion tag returned in buffer completion event. - * Because the completion tag is expected to be the same in all - * data descriptors for a given packet, and a single packet can - * span multiple buffers, we need this field to track all - * buffers associated with this completion tag independently of - * the buf_id. The tag consists of a N bit buf_id and M upper - * order "generation bits". See compl_tag_bufid_m and - * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1 - * to indicate the tag is not valid. - * @ctx_entry: Singleq only. Used to indicate the corresponding entry - * in the descriptor ring was used for a context descriptor and - * this buffer entry should be skipped. - */ -struct idpf_tx_buf { - void *next_to_watch; - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); - unsigned int bytecount; - unsigned short gso_segs; - - union { - int compl_tag; - - bool ctx_entry; - }; -}; - -struct idpf_tx_stash { - struct hlist_node hlist; - struct idpf_tx_buf buf; -}; +#define idpf_tx_buf libeth_sqe /** * struct idpf_buf_lifo - LIFO for managing OOO completions @@ -390,9 +349,11 @@ struct idpf_vec_regs { * struct idpf_intr_reg * @dyn_ctl: Dynamic control interrupt register * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable + * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask * @dyn_ctl_itridx_s: Register bit offset for ITR index * @dyn_ctl_itridx_m: Mask for ITR index * @dyn_ctl_intrvl_s: Register bit offset for ITR interval + * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature * @rx_itr: RX ITR register * @tx_itr: TX ITR register * @icr_ena: Interrupt cause register offset @@ -401,9 +362,11 @@ struct idpf_vec_regs { struct idpf_intr_reg { void __iomem *dyn_ctl; u32 dyn_ctl_intena_m; + u32 dyn_ctl_intena_msk_m; u32 dyn_ctl_itridx_s; u32 dyn_ctl_itridx_m; u32 dyn_ctl_intrvl_s; + u32 dyn_ctl_wb_on_itr_m; void __iomem *rx_itr; void __iomem *tx_itr; void __iomem *icr_ena; @@ -424,6 +387,7 @@ struct idpf_intr_reg { * @intr_reg: See struct idpf_intr_reg * @napi: napi handler * @total_events: Number of interrupts processed + * @wb_on_itr: whether WB on ITR is enabled * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not @@ -454,6 +418,7 @@ struct idpf_q_vector { __cacheline_group_begin_aligned(read_write); struct napi_struct napi; u16 total_events; + bool wb_on_itr; struct dim tx_dim; u16 tx_itr_value; @@ -472,7 +437,7 @@ struct idpf_q_vector { cpumask_var_t affinity_mask; __cacheline_group_end_aligned(cold); }; -libeth_cacheline_set_assert(struct idpf_q_vector, 104, +libeth_cacheline_set_assert(struct idpf_q_vector, 112, 424 + 2 * sizeof(struct dim), 8 + sizeof(cpumask_var_t)); @@ -496,11 +461,6 @@ struct idpf_tx_queue_stats { u64_stats_t dma_map_errs; }; -struct idpf_cleaned_stats { - u32 packets; - u32 bytes; -}; - #define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_20K 0x0032 @@ -688,7 +648,7 @@ struct idpf_tx_queue { void *desc_ring; }; - struct idpf_tx_buf *tx_buf; + struct libeth_sqe *tx_buf; struct idpf_txq_group *txq_grp; struct device *dev; void __iomem *tail; @@ -831,7 +791,7 @@ struct idpf_compl_queue { u32 next_to_use; u32 next_to_clean; - u32 num_completions; + aligned_u64 num_completions; __cacheline_group_end_aligned(read_write); __cacheline_group_begin_aligned(cold); @@ -963,7 +923,7 @@ struct idpf_txq_group { struct idpf_compl_queue *complq; - u32 num_completions_pending; + aligned_u64 num_completions_pending; }; static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) @@ -1033,6 +993,25 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); } +/** + * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts + * @q_vector: pointer to queue vector struct + */ +static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector) +{ + struct idpf_intr_reg *reg; + + if (q_vector->wb_on_itr) + return; + + q_vector->wb_on_itr = true; + reg = &q_vector->intr_reg; + + writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m | + (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s), + reg->dyn_ctl); +} + int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -1064,7 +1043,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 ring_idx); unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb); -int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q); @@ -1073,4 +1051,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); +static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, + u32 needed) +{ + return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + IDPF_DESC_UNUSED(tx_q), + needed, needed); +} + #endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index 629cb5cb7c9fc1..99b8dbaf4225c5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -97,7 +97,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl = idpf_get_reg_addr(adapter, reg_vals[vec_id].dyn_ctl_reg); intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M; + intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M; intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; + intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M; spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_VF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 06b9970dffad0c..ca6ccbc139548b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2387,15 +2387,11 @@ static int igb_get_ts_info(struct net_device *dev, if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; switch (adapter->hw.mac.type) { case e1000_82575: info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; return 0; case e1000_82576: case e1000_82580: @@ -2405,8 +2401,6 @@ static int igb_get_ts_info(struct net_device *dev, case e1000_i211: info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7b83678ba83a69..6ad35a00a28736 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -282,7 +282,6 @@ enum igbvf_state_t { extern char igbvf_driver_name[]; -void igbvf_check_options(struct igbvf_adapter *); void igbvf_set_ethtool_ops(struct net_device *); int igbvf_up(struct igbvf_adapter *); diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index e5b31818d56585..7637d21445bf53 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -49,7 +49,6 @@ #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); #endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index c38b4d0f00cefa..eac0f966e0e4c5 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -259,6 +259,10 @@ struct igc_adapter { */ spinlock_t qbv_tx_lock; + bool strict_priority_enable; + u8 num_tc; + u16 queue_per_tc[IGC_MAX_TX_QUEUES]; + /* OS defined structs */ struct pci_dev *pdev; /* lock for statistics */ @@ -382,9 +386,11 @@ extern char igc_driver_name[]; #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) #define IGC_FLAG_TSN_QAV_ENABLED BIT(18) +#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19) -#define IGC_FLAG_TSN_ANY_ENABLED \ - (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \ + IGC_FLAG_TSN_LEGACY_ENABLED) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) @@ -681,6 +687,7 @@ enum igc_ring_flags_t { IGC_RING_FLAG_TX_DETECT_HANG, IGC_RING_FLAG_AF_XDP_ZC, IGC_RING_FLAG_TX_HWTSTAMP, + IGC_RING_FLAG_RX_ALLOC_FAILED, }; #define ring_uses_large_buffer(ring) \ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 511384f3ec5cb5..8e449904aa7dbd 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -4,6 +4,8 @@ #ifndef _IGC_DEFINES_H_ #define _IGC_DEFINES_H_ +#include + /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 @@ -176,7 +178,6 @@ /* PHY GPY 211 registers */ #define STANDARD_AN_REG_MASK 0x0007 /* MMD */ -#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ #define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ #define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ @@ -553,6 +554,15 @@ #define IGC_MAX_SR_QUEUES 2 +#define IGC_TXARB_TXQ_PRIO_0_MASK GENMASK(1, 0) +#define IGC_TXARB_TXQ_PRIO_1_MASK GENMASK(3, 2) +#define IGC_TXARB_TXQ_PRIO_2_MASK GENMASK(5, 4) +#define IGC_TXARB_TXQ_PRIO_3_MASK GENMASK(7, 6) +#define IGC_TXARB_TXQ_PRIO_0(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_0_MASK, (x)) +#define IGC_TXARB_TXQ_PRIO_1(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_1_MASK, (x)) +#define IGC_TXARB_TXQ_PRIO_2(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_2_MASK, (x)) +#define IGC_TXARB_TXQ_PRIO_3(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_3_MASK, (x)) + /* Receive Checksum Control */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ @@ -641,6 +651,16 @@ #define IGC_MDIC_READY 0x10000000 #define IGC_MDIC_ERROR 0x40000000 +/* EEE Link Ability */ +#define IGC_EEE_2500BT_MASK BIT(0) +#define IGC_EEE_1000BT_MASK BIT(2) +#define IGC_EEE_100BT_MASK BIT(1) + +/* EEE Link-Partner Ability */ +#define IGC_LP_EEE_2500BT_MASK BIT(0) +#define IGC_LP_EEE_1000BT_MASK BIT(2) +#define IGC_LP_EEE_100BT_MASK BIT(1) + #define IGC_N0_QUEUE -1 #define IGC_MAX_MAC_HDR_LEN 127 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 3d3ef4e1547c6f..5b0c6f43376797 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1540,6 +1540,10 @@ static int igc_ethtool_set_channels(struct net_device *netdev, if (ch->other_count != NON_Q_VECTORS) return -EINVAL; + /* Do not allow channel reconfiguration when mqprio is enabled */ + if (adapter->strict_priority_enable) + return -EINVAL; + /* Verify the number of channels doesn't exceed hw limits */ max_combined = igc_get_max_rss_queues(adapter); if (count > max_combined) @@ -1565,15 +1569,11 @@ static int igc_ethtool_get_ts_info(struct net_device *dev, if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; switch (adapter->hw.mac.type) { case igc_i225: info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -1627,8 +1627,11 @@ static int igc_ethtool_get_eee(struct net_device *netdev, { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; - u32 eeer; + struct igc_phy_info *phy = &hw->phy; + u16 eee_advert, eee_lp_advert; + u32 eeer, ret_val; + /* EEE supported */ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, edata->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, @@ -1636,6 +1639,74 @@ static int igc_ethtool_get_eee(struct net_device *netdev, linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, edata->supported); + /* EEE Advertisement 1 - reg 7.60 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + IGC_ANEG_EEE_AB1, + &eee_advert); + if (ret_val) { + netdev_err(adapter->netdev, + "Failed to read IEEE 7.60 register\n"); + return -EINVAL; + } + + if (eee_advert & IGC_EEE_1000BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->advertised); + + if (eee_advert & IGC_EEE_100BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->advertised); + + /* EEE Advertisement 2 - reg 7.62 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + IGC_ANEG_EEE_AB2, + &eee_advert); + if (ret_val) { + netdev_err(adapter->netdev, + "Failed to read IEEE 7.62 register\n"); + return -EINVAL; + } + + if (eee_advert & IGC_EEE_2500BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + edata->advertised); + + /* EEE Link-Partner Ability 1 - reg 7.61 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + IGC_ANEG_EEE_LP_AB1, + &eee_lp_advert); + if (ret_val) { + netdev_err(adapter->netdev, + "Failed to read IEEE 7.61 register\n"); + return -EINVAL; + } + + if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->lp_advertised); + + if (eee_lp_advert & IGC_LP_EEE_100BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->lp_advertised); + + /* EEE Link-Partner Ability 2 - reg 7.63 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + IGC_ANEG_EEE_LP_AB2, + &eee_lp_advert); + if (ret_val) { + netdev_err(adapter->netdev, + "Failed to read IEEE 7.63 register\n"); + return -EINVAL; + } + + if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK) + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + edata->lp_advertised); + eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 0a095cdea4fb8c..6e70bca15db1d8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2191,6 +2191,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -2207,6 +2208,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, __free_page(page); rx_ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -2658,6 +2660,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) if (!skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); break; } @@ -2738,6 +2741,7 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, skb = igc_construct_skb_zc(ring, xdp); if (!skb) { ring->rx_stats.alloc_failed++; + set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); return; } @@ -5807,11 +5811,29 @@ static void igc_watchdog_task(struct work_struct *work) if (adapter->flags & IGC_FLAG_HAS_MSIX) { u32 eics = 0; - for (i = 0; i < adapter->num_q_vectors; i++) - eics |= adapter->q_vector[i]->eims_value; - wr32(IGC_EICS, eics); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + struct igc_ring *rx_ring; + + if (!q_vector->rx.ring) + continue; + + rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; + + if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + eics |= q_vector->eims_value; + clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + } + } + if (eics) + wr32(IGC_EICS, eics); } else { - wr32(IGC_ICS, IGC_ICS_RXDMT0); + struct igc_ring *rx_ring = adapter->rx_ring[0]; + + if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } } igc_ptp_tx_hang(adapter); @@ -6515,6 +6537,13 @@ static int igc_tc_query_caps(struct igc_adapter *adapter, struct igc_hw *hw = &adapter->hw; switch (base->type) { + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_caps *caps = base->caps; + + caps->validate_queue_counts = true; + + return 0; + } case TC_SETUP_QDISC_TAPRIO: { struct tc_taprio_caps *caps = base->caps; @@ -6532,6 +6561,65 @@ static int igc_tc_query_caps(struct igc_adapter *adapter, } } +static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc, + u16 *offset) +{ + int i; + + adapter->strict_priority_enable = true; + adapter->num_tc = num_tc; + + for (i = 0; i < num_tc; i++) + adapter->queue_per_tc[i] = offset[i]; +} + +static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (!mqprio->qopt.num_tc) { + adapter->strict_priority_enable = false; + goto apply; + } + + /* There are as many TCs as Tx queues. */ + if (mqprio->qopt.num_tc != adapter->num_tx_queues) { + NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, + "Only %d traffic classes supported", + adapter->num_tx_queues); + return -EOPNOTSUPP; + } + + /* Only one queue per TC is supported. */ + for (i = 0; i < mqprio->qopt.num_tc; i++) { + if (mqprio->qopt.count[i] != 1) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "Only one queue per TC supported"); + return -EOPNOTSUPP; + } + } + + /* Preemption is not supported yet. */ + if (mqprio->preemptible_tcs) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "Preemption is not supported yet"); + return -EOPNOTSUPP; + } + + igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, + mqprio->qopt.offset); + + mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; + +apply: + return igc_tsn_offload_apply(adapter); +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -6551,6 +6639,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_CBS: return igc_tsn_enable_cbs(adapter, type_data); + case TC_SETUP_QDISC_MQPRIO: + return igc_tsn_enable_mqprio(adapter, type_data); + default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 861f3707686165..2801e5f24df902 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -240,7 +240,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) /* Read the MULTI GBT AN Control Register - reg 7.32 */ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | - ANEG_MULTIGBT_AN_CTRL, + IGC_ANEG_MULTIGBT_AN_CTRL, &aneg_multigbt_an_ctrl); if (ret_val) @@ -380,7 +380,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) ret_val = phy->ops.write_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | - ANEG_MULTIGBT_AN_CTRL, + IGC_ANEG_MULTIGBT_AN_CTRL, aneg_multigbt_an_ctrl); return ret_val; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index e5b893fc5b663d..12ddc5793651d8 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -238,6 +238,8 @@ #define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) #define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) +#define IGC_TXARB 0x3354 /* Tx Arbitration Control TxARB - RW */ + /* System Time Registers */ #define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ #define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ @@ -308,6 +310,16 @@ #define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ #define IGC_EEE_SU 0x0E34 /* EEE Setup */ +/* MULTI GBT AN Control Register - reg. 7.32 */ +#define IGC_ANEG_MULTIGBT_AN_CTRL 0x0020 + +/* EEE ANeg Advertisement Register - reg 7.60 and reg 7.62 */ +#define IGC_ANEG_EEE_AB1 0x003c +#define IGC_ANEG_EEE_AB2 0x003e +/* EEE ANeg Link-Partner Advertisement Register - reg 7.61 and reg 7.63 */ +#define IGC_ANEG_EEE_LP_AB1 0x003d +#define IGC_ANEG_EEE_LP_AB2 0x003f + /* LTR registers */ #define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ #define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index d68fa7f3d5f078..1e44374ca1ffbb 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -46,6 +46,9 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) if (is_cbs_enabled(adapter)) new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + if (adapter->strict_priority_enable) + new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED; + return new_flags; } @@ -102,11 +105,32 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter) adapter->taprio_offload_enable; } +static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc) +{ + struct igc_hw *hw = &adapter->hw; + u32 txarb; + + txarb = rd32(IGC_TXARB); + + txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK | + IGC_TXARB_TXQ_PRIO_1_MASK | + IGC_TXARB_TXQ_PRIO_2_MASK | + IGC_TXARB_TXQ_PRIO_3_MASK); + + txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]); + txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]); + txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]); + txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]); + + wr32(IGC_TXARB, txarb); +} + /* Returns the TSN specific registers to their default values after * the adapter is reset. */ static int igc_tsn_disable_offload(struct igc_adapter *adapter) { + u16 queue_per_tc[4] = { 3, 2, 1, 0 }; struct igc_hw *hw = &adapter->hw; u32 tqavctrl; int i; @@ -133,7 +157,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_QBVCYCLET_S, 0); wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + /* Reset mqprio TC configuration. */ + netdev_reset_tc(adapter->netdev); + + /* Restore the default Tx arbitration: Priority 0 has the highest + * priority and is assigned to queue 0 and so on and so forth. + */ + igc_tsn_tx_arb(adapter, queue_per_tc); + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED; return 0; } @@ -172,6 +205,40 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (igc_is_device_id_i226(hw)) igc_tsn_set_retx_qbvfullthreshold(adapter); + if (adapter->strict_priority_enable) { + int err; + + err = netdev_set_num_tc(adapter->netdev, adapter->num_tc); + if (err) + return err; + + for (i = 0; i < adapter->num_tc; i++) { + err = netdev_set_tc_queue(adapter->netdev, i, 1, + adapter->queue_per_tc[i]); + if (err) + return err; + } + + /* In case the card is configured with less than four queues. */ + for (; i < IGC_MAX_TX_QUEUES; i++) + adapter->queue_per_tc[i] = i; + + /* Configure queue priorities according to the user provided + * mapping. + */ + igc_tsn_tx_arb(adapter, adapter->queue_per_tc); + + /* Enable legacy TSN mode which will do strict priority without + * any other TSN features. + */ + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN; + tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV; + wr32(IGC_TQAVCTRL, tqavctrl); + + return 0; + } + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index e85f7d2e88106e..f2709b10c2e5ab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -317,7 +317,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) + if (adapter->netdev->fcoe_mtu) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4cac76254966da..9482e0cca8b7d2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3196,16 +3196,12 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 18d63c8c2ff4df..955dced844a989 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -858,7 +858,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) /* enable FCoE and notify stack */ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - netdev->features |= NETIF_F_FCOE_MTU; + netdev->fcoe_mtu = true; netdev_features_change(netdev); /* release existing queues and reallocate them */ @@ -898,7 +898,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) /* disable FCoE and notify stack */ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - netdev->features &= ~NETIF_F_FCOE_MTU; + netdev->fcoe_mtu = false; netdev_features_change(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 0ee943db3dc92b..16fa621ce0ffb5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -981,7 +981,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); #ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) { + if (adapter->netdev->fcoe_mtu) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; if ((rxr_idx >= f->offset) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8057cef61f397e..8b8404d8c94606 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5079,7 +5079,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) netif_set_tso_max_size(adapter->netdev, 32768); #ifdef IXGBE_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) + if (adapter->netdev->fcoe_mtu) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif @@ -5136,8 +5136,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ - if ((dev->features & NETIF_F_FCOE_MTU) && - (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE && (pb == ixgbe_fcoe_get_tc(adapter))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif @@ -5197,8 +5196,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ - if ((dev->features & NETIF_F_FCOE_MTU) && - (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE && (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif @@ -11096,8 +11094,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_FCOE_CRC; netdev->vlan_features |= NETIF_F_FSO | - NETIF_F_FCOE_CRC | - NETIF_F_FCOE_MTU; + NETIF_F_FCOE_CRC; } #endif /* IXGBE_FCOE */ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index fcfd0a075eee56..e71715f5da2287 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -495,7 +495,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf int err = 0; #ifdef CONFIG_FCOE - if (dev->features & NETIF_F_FCOE_MTU) + if (dev->fcoe_mtu) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); @@ -857,7 +857,7 @@ static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) int pf_max_frame = dev->mtu + ETH_HLEN; #if IS_ENABLED(CONFIG_FCOE) - if (dev->features & NETIF_F_FCOE_MTU) + if (dev->fcoe_mtu) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* CONFIG_FCOE */ diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 9e69848153864b..7179271f63b655 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -95,7 +95,6 @@ struct ltq_etop_priv { struct mii_bus *mii_bus; struct ltq_etop_chan ch[MAX_DMA_CHAN]; - int tx_free[MAX_DMA_CHAN >> 1]; int tx_burst_len; int rx_burst_len; @@ -482,7 +481,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) unsigned long flags; u32 byte_offset; - len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + len = skb->len; if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { netdev_err(dev, "tx ring full\n"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index f35ae2c88091d5..9e80899546d996 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2802,7 +2802,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { struct mv643xx_eth_shared_platform_data *pd; - struct device_node *pnp, *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node; int ret; /* bail out if not registered from DT */ @@ -2816,10 +2816,9 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); - for_each_available_child_of_node(np, pnp) { + for_each_available_child_of_node_scoped(np, pnp) { ret = mv643xx_eth_shared_of_add_port(pdev, pnp); if (ret) { - of_node_put(pnp); mv643xx_eth_shared_of_remove(); return ret; } diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 9190eff6c0bba3..e1d003fdbc2ed2 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -104,7 +104,7 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops, return 0; } else { /* wait_event_timeout does not guarantee a delay of at - * least one whole jiffie, so timeout must be no less + * least one whole jiffy, so timeout must be no less * than two. */ timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 41894834fb53c5..d72b2d5f96db87 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1781,7 +1781,7 @@ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, } /* Set TXQ descriptors fields relevant for CSUM calculation */ -static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, +static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto, int ip_hdr_len, int l4_proto) { u32 command; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index e809f91c08fb9d..9e02e4367bec81 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1088,7 +1088,7 @@ struct mvpp2 { unsigned int max_port_rxqs; /* Workqueue to gather hardware statistics */ - char queue_name[30]; + char queue_name[31]; struct workqueue_struct *stats_queue; /* Debugfs root entry */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 40aeaa7bd739fa..1641791a2d5b4e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1522,29 +1522,19 @@ static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx) return 0; } -int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx) +int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx) { u32 rss_ctx; - int ret, i; + int ret; ret = mvpp22_rss_context_create(port, &rss_ctx); if (ret) return ret; - /* Find the first available context number in the port, starting from 1. - * Context 0 on each port is reserved for the default context. - */ - for (i = 1; i < MVPP22_N_RSS_TABLES; i++) { - if (port->rss_ctx[i] < 0) - break; - } - - if (i == MVPP22_N_RSS_TABLES) + if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0)) return -EINVAL; - port->rss_ctx[i] = rss_ctx; - *port_ctx = i; - + port->rss_ctx[port_ctx] = rss_ctx; return 0; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 663157dc8062b2..85c9c6e8067896 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -264,7 +264,7 @@ int mvpp22_port_rss_init(struct mvpp2_port *port); int mvpp22_port_rss_enable(struct mvpp2_port *port); int mvpp22_port_rss_disable(struct mvpp2_port *port); -int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx); +int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 rss_ctx); int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx); int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0d62a33afa80ca..3880dcc0418b2d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5268,8 +5268,6 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev, info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -5696,40 +5694,82 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, return ret; } -static int mvpp2_ethtool_set_rxfh(struct net_device *dev, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port, + const struct ethtool_rxfh_param *rxfh) { - struct mvpp2_port *port = netdev_priv(dev); - u32 *rss_context = &rxfh->rss_context; - int ret = 0; - if (!mvpp22_rss_is_supported(port)) - return -EOPNOTSUPP; + return false; if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && rxfh->hfunc != ETH_RSS_HASH_CRC32) - return -EOPNOTSUPP; + return false; if (rxfh->key) + return false; + + return true; +} + +static int mvpp2_create_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) return -EOPNOTSUPP; - if (*rss_context && rxfh->rss_delete) - return mvpp22_port_rss_ctx_delete(port, *rss_context); + ctx->hfunc = ETH_RSS_HASH_CRC32; - if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { - ret = mvpp22_port_rss_ctx_create(port, rss_context); - if (ret) - return ret; - } + ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context); + if (ret) + return ret; - if (rxfh->indir) - ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context, + if (!rxfh->indir) + ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context, + ethtool_rxfh_context_indir(ctx)); + else + ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, rxfh->indir); + return ret; +} +static int mvpp2_modify_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) + return -EOPNOTSUPP; + + if (rxfh->indir) + ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, + rxfh->indir); return ret; } +static int mvpp2_remove_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + + return mvpp22_port_rss_ctx_delete(port, rss_context); +} + +static int mvpp2_ethtool_set_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -5749,7 +5789,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { }; static const struct ethtool_ops mvpp2_eth_tool_ops = { - .cap_rss_ctx_supported = true, + .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = mvpp2_ethtool_nway_reset, @@ -5772,6 +5812,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, + .create_rxfh_context = mvpp2_create_rxfh_context, + .modify_rxfh_context = mvpp2_modify_rxfh_context, + .remove_rxfh_context = mvpp2_remove_rxfh_context, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -7417,8 +7460,6 @@ static int mvpp2_get_sram(struct platform_device *pdev, static int mvpp2_probe(struct platform_device *pdev) { - struct fwnode_handle *fwnode = pdev->dev.fwnode; - struct fwnode_handle *port_fwnode; struct mvpp2 *priv; struct resource *res; void __iomem *base; @@ -7591,7 +7632,7 @@ static int mvpp2_probe(struct platform_device *pdev) } /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ - fwnode_for_each_available_child_node(fwnode, port_fwnode) { + device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) priv->port_map |= BIT(i); } @@ -7614,7 +7655,7 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_axi_clk; /* Initialize ports */ - fwnode_for_each_available_child_node(fwnode, port_fwnode) { + device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { err = mvpp2_port_probe(pdev, port_fwnode, priv); if (err < 0) goto err_port_probe; @@ -7653,14 +7694,8 @@ static int mvpp2_probe(struct platform_device *pdev) return 0; err_port_probe: - fwnode_handle_put(port_fwnode); - - i = 0; - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) - mvpp2_port_remove(priv->port_list[i]); - i++; - } + for (i = 0; i < priv->port_count; i++) + mvpp2_port_remove(priv->port_list[i]); err_axi_clk: clk_disable_unprepare(priv->axi_clk); err_mg_core_clk: @@ -7677,18 +7712,13 @@ static int mvpp2_probe(struct platform_device *pdev) static void mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); - struct fwnode_handle *fwnode = pdev->dev.fwnode; - int i = 0, poolnum = MVPP2_BM_POOLS_NUM; - struct fwnode_handle *port_fwnode; + int i, poolnum = MVPP2_BM_POOLS_NUM; mvpp2_dbgfs_cleanup(priv); - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) { - mutex_destroy(&priv->port_list[i]->gather_stats_lock); - mvpp2_port_remove(priv->port_list[i]); - } - i++; + for (i = 0; i < priv->port_count; i++) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); + mvpp2_port_remove(priv->port_list[i]); } destroy_workqueue(priv->stats_queue); @@ -7711,7 +7741,7 @@ static void mvpp2_remove(struct platform_device *pdev) aggr_txq->descs_dma); } - if (is_acpi_node(port_fwnode)) + if (!dev_of_node(&pdev->dev)) return; clk_disable_unprepare(priv->axi_clk); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index ed2160cc5acb40..6ea2f3071fe8f7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1856,8 +1856,9 @@ struct cpt_flt_eng_info_req { struct cpt_flt_eng_info_rsp { struct mbox_msghdr hdr; - u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU]; - u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU]; +#define CPT_AF_MAX_FLT_INT_VECS 3 + u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS]; + u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS]; u64 rsvd; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index ac7ee3f3598c90..1a97fb9032fa44 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2479,9 +2479,9 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; } - mw->mbox_wq = alloc_workqueue(name, + mw->mbox_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - num); + num, name); if (!mw->mbox_wq) { err = -ENOMEM; goto unmap_regions; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index db2db0738ee42a..5016ba82e1423a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -399,6 +399,7 @@ struct hw_cap { bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */ bool npc_hash_extract; /* Hash extract enabled ? */ bool npc_exact_match_enabled; /* Exact match supported ? */ + bool cpt_rxc; /* Is CPT-RXC supported */ }; struct rvu_hwinfo { @@ -689,6 +690,35 @@ static inline bool is_cnf10ka_a0(struct rvu *rvu) return false; } +static inline bool is_cn10ka_a0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0x0F) == 0x0) + return true; + return false; +} + +static inline bool is_cn10ka_a1(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0x0F) == 0x1) + return true; + return false; +} + +static inline bool is_cn10kb(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B) + return true; + return false; +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index daf4b951e90591..3c5bbaf12e594c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -19,6 +19,12 @@ /* Length of initial context fetch in 128 byte words */ #define CPT_CTX_ILEN 1ULL +/* Interrupt vector count of CPT RVU and RAS interrupts */ +#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2 + +/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */ +#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL + #define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ ({ \ u64 free_sts = 0, busy_sts = 0; \ @@ -37,6 +43,41 @@ (_rsp)->free_sts_##etype = free_sts; \ }) +#define MAX_AE GENMASK_ULL(47, 32) +#define MAX_IE GENMASK_ULL(31, 16) +#define MAX_SE GENMASK_ULL(15, 0) + +static u16 cpt_max_engines_get(struct rvu *rvu) +{ + u16 max_ses, max_ies, max_aes; + u64 reg; + + reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1); + max_ses = FIELD_GET(MAX_SE, reg); + max_ies = FIELD_GET(MAX_IE, reg); + max_aes = FIELD_GET(MAX_AE, reg); + + return max_ses + max_ies + max_aes; +} + +/* Number of flt interrupt vectors are depends on number of engines that the + * chip has. Each flt vector represents 64 engines. + */ +static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs) +{ + int flt_vecs; + + flt_vecs = DIV_ROUND_UP(max_engs, 64); + + if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) { + dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n", + flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX); + flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX; + } + + return flt_vecs; +} + static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) { struct rvu_block *block = ptr; @@ -150,17 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; - int i; + int i, flt_vecs; + u16 max_engs; + u8 nr; + + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); /* Disable all CPT AF interrupts */ - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); + for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) { + nr = (max_engs > 64) ? 64 : max_engs; + max_engs -= nr; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), + INTR_MASK(nr)); + } rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); - for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++) + /* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */ + for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++) if (rvu->irq_allocated[off + i]) { free_irq(pci_irq_vector(rvu->pdev, off + i), block); rvu->irq_allocated[off + i] = false; @@ -206,12 +256,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu) static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { + int rvu_intr_vec, ras_intr_vec; struct rvu *rvu = block->rvu; int blkaddr = block->addr; irq_handler_t flt_fn; - int i, ret; + int i, ret, flt_vecs; + u16 max_engs; + u8 nr; - for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); + + for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) { sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); switch (i) { @@ -229,20 +285,24 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; - if (i == CPT_10K_AF_INT_VEC_FLT2) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); - else - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); + + nr = (max_engs > 64) ? 64 : max_engs; + max_engs -= nr; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), + INTR_MASK(nr)); } - ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, + rvu_intr_vec = flt_vecs; + ras_intr_vec = rvu_intr_vec + 1; + + ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec, rvu_cpt_af_rvu_intr_handler, "CPTAF RVU"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); - ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS, + ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec, rvu_cpt_af_ras_intr_handler, "CPTAF RAS"); if (ret) @@ -680,6 +740,7 @@ static bool validate_and_update_reg_offset(struct rvu *rvu, case CPT_AF_BLK_RST: case CPT_AF_CONSTANTS1: case CPT_AF_CTX_FLUSH_TIMER: + case CPT_AF_RXC_CFG1: return true; } @@ -732,6 +793,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) { + struct rvu_hwinfo *hw = rvu->hw; + if (is_rvu_otx2(rvu)) return; @@ -755,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR); rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID); rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER); + rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); + rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); + if (!hw->cap.cpt_rxc) + return; rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME); rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); - rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); - rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); } static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) @@ -921,13 +986,17 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r struct rvu_block *block; unsigned long flags; int blkaddr, vec; + int flt_vecs; + u16 max_engs; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; block = &rvu->hw->block[blkaddr]; - for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) { + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); + for (vec = 0; vec < flt_vecs; vec++) { spin_lock_irqsave(&rvu->cpt_intr_lock, flags); rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec]; rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec]; @@ -943,10 +1012,11 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) { struct cpt_rxc_time_cfg_req req, prev; + struct rvu_hwinfo *hw = rvu->hw; int timeout = 2000; u64 reg; - if (is_rvu_otx2(rvu)) + if (!hw->cap.cpt_rxc) return; /* Set time limit to minimum values, so that rxc entries will be @@ -1219,10 +1289,30 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) return 0; } +#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32) + int rvu_cpt_init(struct rvu *rvu) { + struct rvu_hwinfo *hw = rvu->hw; + u64 reg_val; + /* Retrieve CPT PF number */ rvu->cpt_pf_num = get_cpt_pf_num(rvu); + if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) && + !is_cn10kb(rvu)) + hw->cap.cpt_rxc = true; + + if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) { + /* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect + * inline inbound peak performance + */ + reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1); + reg_val &= ~MAX_RXC_ICB_CNT; + reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT, + CPT_DFLT_MAX_RXC_ICB_CNT); + rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val); + } + spin_lock_init(&rvu->cpt_intr_lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 4a4ef5bd9e0bd5..87ba77e5026a02 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -838,10 +838,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) { + char cgx[10], lmac[10], chan[10]; struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; - char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; u8 cgx_id, lmac_id; @@ -852,7 +852,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) /* There can be no CGX devices at all */ if (!mac_ops) return 0; - seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", + seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) @@ -876,8 +876,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) &lmac_id); sprintf(cgx, "%s%d", mac_ops->name, cgx_id); sprintf(lmac, "LMAC%d", lmac_id); - seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", - dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); + sprintf(chan, "%d", + rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0)); + seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n", + dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac, + chan); pci_dev_put(pdev); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index d56be5fb7eb4a2..2b299fa8515913 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -545,6 +545,7 @@ #define CPT_AF_CTX_PSH_PC (0x49450ull) #define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull) #define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3) +#define CPT_AF_RXC_CFG1 (0x50000ull) #define CPT_AF_RXC_TIME (0x50010ull) #define CPT_AF_RXC_TIME_CFG (0x50018ull) #define CPT_AF_RXC_DFRG (0x50020ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5ef406c7e8a44a..fc8da209065705 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -71,13 +71,11 @@ enum cpt_af_int_vec_e { CPT_AF_INT_VEC_CNT = 0x4, }; -enum cpt_10k_af_int_vec_e { +enum cpt_cn10k_flt_int_vec_e { CPT_10K_AF_INT_VEC_FLT0 = 0x0, CPT_10K_AF_INT_VEC_FLT1 = 0x1, CPT_10K_AF_INT_VEC_FLT2 = 0x2, - CPT_10K_AF_INT_VEC_RVU = 0x3, - CPT_10K_AF_INT_VEC_RAS = 0x4, - CPT_10K_AF_INT_VEC_CNT = 0x5, + CPT_10K_AF_INT_VEC_FLT_MAX = 0x3, }; /* NPA Admin function Interrupt Vector Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 0db62eb0dab3f0..32468c663605ef 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -962,8 +962,6 @@ static int otx2_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 3eb85949677ad2..933e18ba2fb2c3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -687,7 +687,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); - u16 iplen; + __be16 iplen; ext->lso_sb = skb_transport_offset(skb) + sizeof(struct udphdr); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 63ae01954dfc5e..22ca6ee9665eb4 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -633,7 +633,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) if (err) goto err_dl_port_register; - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; + dev->features |= NETIF_F_HW_TC; + dev->netns_local = true; dev->netdev_ops = &prestera_netdev_ops; dev->ethtool_ops = &prestera_ethtool_ops; SET_NETDEV_DEV(dev, sw->dev->dev); diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 1c5b85a86df120..930f180688e5ca 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -18,6 +18,7 @@ #include #define AIROHA_MAX_NUM_GDM_PORTS 1 +#define AIROHA_MAX_NUM_QDMA 2 #define AIROHA_MAX_NUM_RSTS 3 #define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 2000 @@ -66,9 +67,11 @@ #define FE_RST_GDM3_MBI_ARB_MASK BIT(2) #define FE_RST_CORE_MASK BIT(0) +#define REG_FE_WAN_MAC_H 0x0030 #define REG_FE_LAN_MAC_H 0x0040 -#define REG_FE_LAN_MAC_LMIN 0x0044 -#define REG_FE_LAN_MAC_LMAX 0x0048 + +#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04) +#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08) #define REG_FE_CDM1_OQ_MAP0 0x0050 #define REG_FE_CDM1_OQ_MAP1 0x0054 @@ -727,7 +730,7 @@ struct airoha_queue_entry { }; struct airoha_queue { - struct airoha_eth *eth; + struct airoha_qdma *qdma; /* protect concurrent queue accesses */ spinlock_t lock; @@ -746,7 +749,7 @@ struct airoha_queue { }; struct airoha_tx_irq_queue { - struct airoha_eth *eth; + struct airoha_qdma *qdma; struct napi_struct napi; u32 *q; @@ -782,9 +785,30 @@ struct airoha_hw_stats { u64 rx_len[7]; }; +struct airoha_qdma { + struct airoha_eth *eth; + void __iomem *regs; + + /* protect concurrent irqmask accesses */ + spinlock_t irq_lock; + u32 irqmask[QDMA_INT_REG_MAX]; + int irq; + + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; + + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; + + /* descriptor and packet buffers for qdma hw forward */ + struct { + void *desc; + void *q; + } hfwd; +}; + struct airoha_gdm_port { + struct airoha_qdma *qdma; struct net_device *dev; - struct airoha_eth *eth; int id; struct airoha_hw_stats stats; @@ -794,31 +818,15 @@ struct airoha_eth { struct device *dev; unsigned long state; - - void __iomem *qdma_regs; void __iomem *fe_regs; - /* protect concurrent irqmask accesses */ - spinlock_t irq_lock; - u32 irqmask[QDMA_INT_REG_MAX]; - int irq; - struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; - struct net_device *napi_dev; - struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; - struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; - - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; - /* descriptor and packet buffers for qdma hw forward */ - struct { - void *desc; - void *q; - } hfwd; + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; }; static u32 airoha_rr(void __iomem *base, u32 offset) @@ -850,60 +858,72 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) #define airoha_fe_clear(eth, offset, val) \ airoha_rmw((eth)->fe_regs, (offset), (val), 0) -#define airoha_qdma_rr(eth, offset) \ - airoha_rr((eth)->qdma_regs, (offset)) -#define airoha_qdma_wr(eth, offset, val) \ - airoha_wr((eth)->qdma_regs, (offset), (val)) -#define airoha_qdma_rmw(eth, offset, mask, val) \ - airoha_rmw((eth)->qdma_regs, (offset), (mask), (val)) -#define airoha_qdma_set(eth, offset, val) \ - airoha_rmw((eth)->qdma_regs, (offset), 0, (val)) -#define airoha_qdma_clear(eth, offset, val) \ - airoha_rmw((eth)->qdma_regs, (offset), (val), 0) - -static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, +#define airoha_qdma_rr(qdma, offset) \ + airoha_rr((qdma)->regs, (offset)) +#define airoha_qdma_wr(qdma, offset, val) \ + airoha_wr((qdma)->regs, (offset), (val)) +#define airoha_qdma_rmw(qdma, offset, mask, val) \ + airoha_rmw((qdma)->regs, (offset), (mask), (val)) +#define airoha_qdma_set(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), 0, (val)) +#define airoha_qdma_clear(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), (val), 0) + +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, u32 clear, u32 set) { unsigned long flags; - if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask))) + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) return; - spin_lock_irqsave(ð->irq_lock, flags); + spin_lock_irqsave(&qdma->irq_lock, flags); - eth->irqmask[index] &= ~clear; - eth->irqmask[index] |= set; - airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]); + qdma->irqmask[index] &= ~clear; + qdma->irqmask[index] |= set; + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); /* Read irq_enable register in order to guarantee the update above * completes in the spinlock critical section. */ - airoha_qdma_rr(eth, REG_INT_ENABLE(index)); + airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); - spin_unlock_irqrestore(ð->irq_lock, flags); + spin_unlock_irqrestore(&qdma->irq_lock, flags); } -static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index, +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, u32 mask) { - airoha_qdma_set_irqmask(eth, index, 0, mask); + airoha_qdma_set_irqmask(qdma, index, 0, mask); } -static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index, +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, u32 mask) { - airoha_qdma_set_irqmask(eth, index, mask, 0); + airoha_qdma_set_irqmask(qdma, index, mask, 0); } -static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) +static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) { - u32 val; + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. + * GDM{2,3,4} can be used as wan port connected to an external + * phy module. + */ + return port->id == 1; +} +static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) +{ + struct airoha_eth *eth = port->qdma->eth; + u32 val, reg; + + reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H + : REG_FE_WAN_MAC_H; val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; - airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val); + airoha_fe_wr(eth, reg, val); val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; - airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val); - airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val); + airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val); + airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val); } static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, @@ -1383,8 +1403,9 @@ static int airoha_fe_init(struct airoha_eth *eth) static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); - struct airoha_eth *eth = q->eth; - int qid = q - ð->q_rx[0]; + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0]; int nframes = 0; while (q->queued < q->ndesc - 1) { @@ -1420,7 +1441,8 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) WRITE_ONCE(desc->msg2, 0); WRITE_ONCE(desc->msg3, 0); - airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), + RX_RING_CPU_IDX_MASK, FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); } @@ -1450,8 +1472,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); - struct airoha_eth *eth = q->eth; - int qid = q - ð->q_rx[0]; + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0]; int done = 0; while (done < budget) { @@ -1513,7 +1536,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) { struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); - struct airoha_eth *eth = q->eth; int cur, done = 0; do { @@ -1522,14 +1544,14 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) } while (cur && done < budget); if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); return done; } -static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, - struct airoha_queue *q, int ndesc) +static int airoha_qdma_init_rx_queue(struct airoha_queue *q, + struct airoha_qdma *qdma, int ndesc) { const struct page_pool_params pp_params = { .order = 0, @@ -1538,15 +1560,16 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, .dma_dir = DMA_FROM_DEVICE, .max_len = PAGE_SIZE, .nid = NUMA_NO_NODE, - .dev = eth->dev, + .dev = qdma->eth->dev, .napi = &q->napi, }; - int qid = q - ð->q_rx[0], thr; + struct airoha_eth *eth = qdma->eth; + int qid = q - &qdma->q_rx[0], thr; dma_addr_t dma_addr; q->buf_size = PAGE_SIZE / 2; q->ndesc = ndesc; - q->eth = eth; + q->qdma = qdma; q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), GFP_KERNEL); @@ -1568,14 +1591,15 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); - airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr); - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK, + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), + RX_RING_SIZE_MASK, FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); thr = clamp(ndesc >> 3, 1, 32); - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, FIELD_PREP(RX_RING_THR_MASK, thr)); - airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); airoha_qdma_fill_rx_queue(q); @@ -1585,7 +1609,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) { - struct airoha_eth *eth = q->eth; + struct airoha_eth *eth = q->qdma->eth; while (q->queued) { struct airoha_queue_entry *e = &q->entry[q->tail]; @@ -1599,11 +1623,11 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) } } -static int airoha_qdma_init_rx(struct airoha_eth *eth) +static int airoha_qdma_init_rx(struct airoha_qdma *qdma) { int i; - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { int err; if (!(RX_DONE_INT_MASK & BIT(i))) { @@ -1611,7 +1635,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth) continue; } - err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i], + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, RX_DSCP_NUM(i)); if (err) return err; @@ -1623,12 +1647,14 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth) static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) { struct airoha_tx_irq_queue *irq_q; + struct airoha_qdma *qdma; struct airoha_eth *eth; int id, done = 0; irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); - eth = irq_q->eth; - id = irq_q - ð->q_tx_irq[0]; + qdma = irq_q->qdma; + id = irq_q - &qdma->q_tx_irq[0]; + eth = qdma->eth; while (irq_q->queued > 0 && done < budget) { u32 qid, last, val = irq_q->q[irq_q->head]; @@ -1645,10 +1671,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) last = FIELD_GET(IRQ_DESC_IDX_MASK, val); qid = FIELD_GET(IRQ_RING_IDX_MASK, val); - if (qid >= ARRAY_SIZE(eth->q_tx)) + if (qid >= ARRAY_SIZE(qdma->q_tx)) continue; - q = ð->q_tx[qid]; + q = &qdma->q_tx[qid]; if (!q->ndesc) continue; @@ -1697,28 +1723,29 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) int i, len = done >> 7; for (i = 0; i < len; i++) - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), IRQ_CLEAR_LEN_MASK, 0x80); - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), IRQ_CLEAR_LEN_MASK, (done & 0x7f)); } if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(id)); return done; } -static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, - struct airoha_queue *q, int size) +static int airoha_qdma_init_tx_queue(struct airoha_queue *q, + struct airoha_qdma *qdma, int size) { - int i, qid = q - ð->q_tx[0]; + struct airoha_eth *eth = qdma->eth; + int i, qid = q - &qdma->q_tx[0]; dma_addr_t dma_addr; spin_lock_init(&q->lock); q->ndesc = size; - q->eth = eth; + q->qdma = qdma; q->free_thr = 1 + MAX_SKB_FRAGS; q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), @@ -1738,20 +1765,20 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); } - airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr); - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); - airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); return 0; } -static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, - struct airoha_tx_irq_queue *irq_q, - int size) +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, + struct airoha_qdma *qdma, int size) { - int id = irq_q - ð->q_tx_irq[0]; + int id = irq_q - &qdma->q_tx_irq[0]; + struct airoha_eth *eth = qdma->eth; dma_addr_t dma_addr; netif_napi_add_tx(eth->napi_dev, &irq_q->napi, @@ -1763,30 +1790,30 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, memset(irq_q->q, 0xff, size * sizeof(u32)); irq_q->size = size; - irq_q->eth = eth; + irq_q->qdma = qdma; - airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr); - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, FIELD_PREP(TX_IRQ_THR_MASK, 1)); return 0; } -static int airoha_qdma_init_tx(struct airoha_eth *eth) +static int airoha_qdma_init_tx(struct airoha_qdma *qdma) { int i, err; - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i], + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, IRQ_QUEUE_LEN(i)); if (err) return err; } - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i], + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, TX_DSCP_NUM); if (err) return err; @@ -1797,7 +1824,7 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth) static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) { - struct airoha_eth *eth = q->eth; + struct airoha_eth *eth = q->qdma->eth; spin_lock_bh(&q->lock); while (q->queued) { @@ -1814,34 +1841,35 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) spin_unlock_bh(&q->lock); } -static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) { + struct airoha_eth *eth = qdma->eth; dma_addr_t dma_addr; u32 status; int size; size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); - eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!eth->hfwd.desc) + qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!qdma->hfwd.desc) return -ENOMEM; - airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr); + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; - eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!eth->hfwd.q) + qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!qdma->hfwd.q) return -ENOMEM; - airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr); + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); - airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG, + airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, HW_FWD_DSCP_PAYLOAD_SIZE_MASK, FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); - airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, + airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); - airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG, + airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, LMGR_INIT_START | LMGR_SRAM_MODE_MASK | HW_FWD_DESC_NUM_MASK, FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | @@ -1849,87 +1877,87 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) return read_poll_timeout(airoha_qdma_rr, status, !(status & LMGR_INIT_START), USEC_PER_MSEC, - 30 * USEC_PER_MSEC, true, eth, + 30 * USEC_PER_MSEC, true, qdma, REG_LMGR_INIT_CFG); } -static void airoha_qdma_init_qos(struct airoha_eth *eth) +static void airoha_qdma_init_qos(struct airoha_qdma *qdma) { - airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); - airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); + airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); + airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); - airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG, + airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, PSE_BUF_ESTIMATE_EN_MASK); - airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_EN_MASK | EGRESS_RATE_METER_EQ_RATE_EN_MASK); /* 2047us x 31 = 63.457ms */ - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_WINDOW_SZ_MASK, FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_TIMESLICE_MASK, FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); /* ratelimit init */ - airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); + airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); /* fast-tick 25us */ - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, FIELD_PREP(GLB_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); - airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, + airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_SLOW_TICK_RATIO_MASK, FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); - airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); - airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG, + airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); + airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_MODE_MASK); - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_SLOW_TICK_RATIO_MASK, FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); - airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, + airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, FIELD_PREP(SLA_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); } -static int airoha_qdma_hw_init(struct airoha_eth *eth) +static int airoha_qdma_hw_init(struct airoha_qdma *qdma) { int i; /* clear pending irqs */ - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) - airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff); + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) + airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); /* setup irqs */ - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK); - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); /* setup irq binding */ - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - if (!eth->q_tx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) continue; if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) - airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i), + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), TX_RING_IRQ_BLOCKING_CFG_MASK); else - airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i), + airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), TX_RING_IRQ_BLOCKING_CFG_MASK); } - airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG, + airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_2B_OFFSET_MASK | FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | GLOBAL_CFG_CPU_TXR_RR_MASK | @@ -1940,18 +1968,18 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth) GLOBAL_CFG_TX_WB_DONE_MASK | FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); - airoha_qdma_init_qos(eth); + airoha_qdma_init_qos(qdma); /* disable qdma rx delay interrupt */ - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; - airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i), + airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), RX_DELAY_INT_MASK); } - airoha_qdma_set(eth, REG_TXQ_CNGST_CFG, + airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); return 0; @@ -1959,150 +1987,180 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth) static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) { - struct airoha_eth *eth = dev_instance; - u32 intr[ARRAY_SIZE(eth->irqmask)]; + struct airoha_qdma *qdma = dev_instance; + u32 intr[ARRAY_SIZE(qdma->irqmask)]; int i; - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { - intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i)); - intr[i] &= eth->irqmask[i]; - airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]); + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { + intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); + intr[i] &= qdma->irqmask[i]; + airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); } - if (!test_bit(DEV_STATE_INITIALIZED, ð->state)) + if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) return IRQ_NONE; if (intr[1] & RX_DONE_INT_MASK) { - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; if (intr[1] & BIT(i)) - napi_schedule(ð->q_rx[i].napi); + napi_schedule(&qdma->q_rx[i].napi); } } if (intr[0] & INT_TX_MASK) { - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i]; + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i]; u32 status, head; if (!(intr[0] & TX_DONE_INT_MASK(i))) continue; - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(i)); - status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i)); + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i)); head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); irq_q->head = head % irq_q->size; irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); - napi_schedule(ð->q_tx_irq[i].napi); + napi_schedule(&qdma->q_tx_irq[i].napi); } } return IRQ_HANDLED; } -static int airoha_qdma_init(struct airoha_eth *eth) +static int airoha_qdma_init(struct platform_device *pdev, + struct airoha_eth *eth, + struct airoha_qdma *qdma) { - int err; + int err, id = qdma - ð->qdma[0]; + const char *res; - err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, eth); - if (err) - return err; + spin_lock_init(&qdma->irq_lock); + qdma->eth = eth; - err = airoha_qdma_init_rx(eth); + res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); + if (!res) + return -ENOMEM; + + qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); + if (IS_ERR(qdma->regs)) + return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), + "failed to iomap qdma%d regs\n", id); + + qdma->irq = platform_get_irq(pdev, 4 * id); + if (qdma->irq < 0) + return qdma->irq; + + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, qdma); if (err) return err; - err = airoha_qdma_init_tx(eth); + err = airoha_qdma_init_rx(qdma); if (err) return err; - err = airoha_qdma_init_hfwd_queues(eth); + err = airoha_qdma_init_tx(qdma); if (err) return err; - err = airoha_qdma_hw_init(eth); + err = airoha_qdma_init_hfwd_queues(qdma); if (err) return err; - set_bit(DEV_STATE_INITIALIZED, ð->state); - - return 0; + return airoha_qdma_hw_init(qdma); } -static int airoha_hw_init(struct airoha_eth *eth) +static int airoha_hw_init(struct platform_device *pdev, + struct airoha_eth *eth) { - int err; + int err, i; /* disable xsi */ - reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts); + err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), + eth->xsi_rsts); + if (err) + return err; + + err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); + if (err) + return err; - reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); - msleep(20); - reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); msleep(20); + err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); + if (err) + return err; + msleep(20); err = airoha_fe_init(eth); if (err) return err; - return airoha_qdma_init(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { + err = airoha_qdma_init(pdev, eth, ð->qdma[i]); + if (err) + return err; + } + + set_bit(DEV_STATE_INITIALIZED, ð->state); + + return 0; } -static void airoha_hw_cleanup(struct airoha_eth *eth) +static void airoha_hw_cleanup(struct airoha_qdma *qdma) { int i; - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; - napi_disable(ð->q_rx[i].napi); - netif_napi_del(ð->q_rx[i].napi); - airoha_qdma_cleanup_rx_queue(ð->q_rx[i]); - if (eth->q_rx[i].page_pool) - page_pool_destroy(eth->q_rx[i].page_pool); + napi_disable(&qdma->q_rx[i].napi); + netif_napi_del(&qdma->q_rx[i].napi); + airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); + if (qdma->q_rx[i].page_pool) + page_pool_destroy(qdma->q_rx[i].page_pool); } - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - napi_disable(ð->q_tx_irq[i].napi); - netif_napi_del(ð->q_tx_irq[i].napi); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + napi_disable(&qdma->q_tx_irq[i].napi); + netif_napi_del(&qdma->q_tx_irq[i].napi); } - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - if (!eth->q_tx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) continue; - airoha_qdma_cleanup_tx_queue(ð->q_tx[i]); + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); } } -static void airoha_qdma_start_napi(struct airoha_eth *eth) +static void airoha_qdma_start_napi(struct airoha_qdma *qdma) { int i; - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) - napi_enable(ð->q_tx_irq[i].napi); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) + napi_enable(&qdma->q_tx_irq[i].napi); - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; - napi_enable(ð->q_rx[i].napi); + napi_enable(&qdma->q_rx[i].napi); } } static void airoha_update_hw_stats(struct airoha_gdm_port *port) { - struct airoha_eth *eth = port->eth; + struct airoha_eth *eth = port->qdma->eth; u32 val, i = 0; spin_lock(&port->stats.lock); @@ -2247,23 +2305,24 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port) static int airoha_dev_open(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; int err; netif_tx_start_all_queues(dev); - err = airoha_set_gdm_ports(eth, true); + err = airoha_set_gdm_ports(qdma->eth, true); if (err) return err; if (netdev_uses_dsa(dev)) - airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id), + airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); else - airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), + airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); return 0; } @@ -2271,16 +2330,17 @@ static int airoha_dev_open(struct net_device *dev) static int airoha_dev_stop(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; int err; netif_tx_disable(dev); - err = airoha_set_gdm_ports(eth, false); + err = airoha_set_gdm_ports(qdma->eth, false); if (err) return err; - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); return 0; } @@ -2294,7 +2354,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) if (err) return err; - airoha_set_macaddr(port->eth, dev->dev_addr); + airoha_set_macaddr(port, dev->dev_addr); return 0; } @@ -2303,7 +2363,7 @@ static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - airoha_set_macaddr(port->eth, dev->dev_addr); + airoha_set_macaddr(port, dev->dev_addr); return 0; } @@ -2337,7 +2397,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct airoha_gdm_port *port = netdev_priv(dev); u32 msg0 = 0, msg1, len = skb_headlen(skb); int i, qid = skb_get_queue_mapping(skb); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; u32 nr_frags = 1 + sinfo->nr_frags; struct netdev_queue *txq; struct airoha_queue *q; @@ -2367,7 +2427,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); - q = ð->q_tx[qid]; + q = &qdma->q_tx[qid]; if (WARN_ON_ONCE(!q->ndesc)) goto error; @@ -2411,7 +2471,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, e->dma_addr = addr; e->dma_len = len; - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), + TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); data = skb_frag_address(frag); @@ -2448,7 +2509,7 @@ static void airoha_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_eth *eth = port->qdma->eth; strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); @@ -2529,6 +2590,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) { const __be32 *id_ptr = of_get_property(np, "reg", NULL); struct airoha_gdm_port *port; + struct airoha_qdma *qdma; struct net_device *dev; int err, index; u32 id; @@ -2558,6 +2620,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) return -ENOMEM; } + qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; dev->netdev_ops = &airoha_netdev_ops; dev->ethtool_ops = &airoha_ethtool_ops; dev->max_mtu = AIROHA_MAX_MTU; @@ -2567,6 +2630,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) NETIF_F_SG | NETIF_F_TSO; dev->features |= dev->hw_features; dev->dev.of_node = np; + dev->irq = qdma->irq; SET_NETDEV_DEV(dev, eth->dev); err = of_get_ethdev_address(np, dev); @@ -2582,8 +2646,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) port = netdev_priv(dev); u64_stats_init(&port->stats.syncp); spin_lock_init(&port->stats.lock); + port->qdma = qdma; port->dev = dev; - port->eth = eth; port->id = id; eth->ports[index] = port; @@ -2613,11 +2677,6 @@ static int airoha_probe(struct platform_device *pdev) return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), "failed to iomap fe regs\n"); - eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0"); - if (IS_ERR(eth->qdma_regs)) - return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs), - "failed to iomap qdma regs\n"); - eth->rsts[0].id = "fe"; eth->rsts[1].id = "pdma"; eth->rsts[2].id = "qdma"; @@ -2642,11 +2701,6 @@ static int airoha_probe(struct platform_device *pdev) return err; } - spin_lock_init(ð->irq_lock); - eth->irq = platform_get_irq(pdev, 0); - if (eth->irq < 0) - return eth->irq; - eth->napi_dev = alloc_netdev_dummy(0); if (!eth->napi_dev) return -ENOMEM; @@ -2656,11 +2710,13 @@ static int airoha_probe(struct platform_device *pdev) strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); platform_set_drvdata(pdev, eth); - err = airoha_hw_init(eth); + err = airoha_hw_init(pdev, eth); if (err) goto error; - airoha_qdma_start_napi(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_qdma_start_napi(ð->qdma[i]); + for_each_child_of_node(pdev->dev.of_node, np) { if (!of_device_is_compatible(np, "airoha,eth-mac")) continue; @@ -2678,7 +2734,9 @@ static int airoha_probe(struct platform_device *pdev) return 0; error: - airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_hw_cleanup(ð->qdma[i]); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; @@ -2696,7 +2754,9 @@ static void airoha_remove(struct platform_device *pdev) struct airoha_eth *eth = platform_get_drvdata(pdev); int i; - airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_hw_cleanup(ð->qdma[i]); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; @@ -2715,6 +2775,7 @@ static const struct of_device_id of_airoha_match[] = { { .compatible = "airoha,en7581-eth" }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, of_airoha_match); static struct platform_driver airoha_driver = { .probe = airoha_probe, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index eb1708b43aa3e5..0d5225f1d3eef6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -724,12 +724,8 @@ enum mtk_clks_map { MTK_CLK_ETHWARP_WOCPU2, MTK_CLK_ETHWARP_WOCPU1, MTK_CLK_ETHWARP_WOCPU0, - MTK_CLK_TOP_USXGMII_SBUS_0_SEL, - MTK_CLK_TOP_USXGMII_SBUS_1_SEL, MTK_CLK_TOP_SGM_0_SEL, MTK_CLK_TOP_SGM_1_SEL, - MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL, - MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL, MTK_CLK_TOP_ETH_GMII_SEL, MTK_CLK_TOP_ETH_REFCK_50M_SEL, MTK_CLK_TOP_ETH_SYS_200M_SEL, @@ -800,19 +796,9 @@ enum mtk_clks_map { BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \ BIT_ULL(MTK_CLK_CRYPTO) | \ - BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ - BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ - BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ - BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \ BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \ BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \ - BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \ - BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \ - BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \ - BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \ - BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \ - BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \ BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 0acee405a7498a..ada852adc5f702 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -8,8 +8,11 @@ #include #include #include + #include #include +#include + #include "mtk_eth_soc.h" #include "mtk_ppe.h" #include "mtk_ppe_regs.h" @@ -338,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, { int type = mtk_get_ib1_pkt_type(eth, entry->ib1); u32 *src, *dest; - int i; switch (type) { case MTK_PPE_PKT_TYPE_IPV4_DSLITE: @@ -359,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, return -EINVAL; } - for (i = 0; i < 4; i++) - src[i] = be32_to_cpu(src_addr[i]); - for (i = 0; i < 4; i++) - dest[i] = be32_to_cpu(dest_addr[i]); + ipv6_addr_be32_to_cpu(src, src_addr); + ipv6_addr_be32_to_cpu(dest, dest_addr); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c index 1a97feca77f237..570ebf91f69354 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -3,6 +3,9 @@ #include #include + +#include + #include "mtk_eth_soc.h" struct mtk_flow_addr_info @@ -47,16 +50,14 @@ static const char *mtk_foe_pkt_type_str(int type) static void mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6) { - __be32 n_addr[4]; - int i; + __be32 n_addr[IPV6_ADDR_WORDS]; if (!ipv6) { seq_printf(m, "%pI4h", addr); return; } - for (i = 0; i < ARRAY_SIZE(n_addr); i++) - n_addr[i] = htonl(addr[i]); + ipv6_addr_cpu_to_be32(n_addr, addr); seq_printf(m, "%pI6", n_addr); } diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c index ea0884186d7644..c06e5ad18b0105 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "mtk_wed_regs.h" #include "mtk_wed_wo.h" diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 943d6918c2ec99..cd17a3f4faf83e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2036,20 +2036,20 @@ static int mlx4_en_get_module_info(struct net_device *dev, switch (data[0] /* identifier */) { case MLX4_MODULE_ID_QSFP: modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; break; case MLX4_MODULE_ID_QSFP_PLUS: if (data[1] >= 0x3) { /* revision id */ modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; } else { modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; } break; case MLX4_MODULE_ID_QSFP28: modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; break; case MLX4_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 685335832a9304..ea6070180c96b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -172,6 +172,16 @@ config MLX5_SW_STEERING help Build support for software-managed steering in the NIC. +config MLX5_HW_STEERING + bool "Mellanox Technologies hardware-managed steering" + depends on MLX5_CORE_EN && MLX5_ESWITCH + default y + help + Build support for Hardware-Managed Flow Steering (HMFS) in the NIC. + HMFS is a new approach to managing steering rules where STEs are + written to ICM by HW (as opposed to SW in software-managed steering), + which allows higher rate of rule insertion. + config MLX5_SF bool "Mellanox Technologies subfunction device support using auxiliary device" depends on MLX5_CORE && MLX5_CORE_EN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1289475e7be7a0..5912f7e614f9c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_action.o steering/fs_dr.o \ steering/dr_definer.o steering/dr_ptrn.o \ steering/dr_arg.o steering/dr_dbg.o lib/smfs.o + +# +# HW Steering +# +mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \ + steering/hws/mlx5hws_context.o \ + steering/hws/mlx5hws_pat_arg.o \ + steering/hws/mlx5hws_buddy.o \ + steering/hws/mlx5hws_pool.o \ + steering/hws/mlx5hws_table.o \ + steering/hws/mlx5hws_action.o \ + steering/hws/mlx5hws_rule.o \ + steering/hws/mlx5hws_matcher.o \ + steering/hws/mlx5hws_send.o \ + steering/hws/mlx5hws_definer.o \ + steering/hws/mlx5hws_bwc.o \ + steering/hws/mlx5hws_debug.o \ + steering/hws/mlx5hws_vport.o \ + steering/hws/mlx5hws_bwc_complex.o + + # # SF device # diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 20768ef2e9d2b2..a64d96effb9eac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status) return "bad resource"; case MLX5_CMD_STAT_RES_BUSY: return "resource busy"; + case MLX5_CMD_STAT_NOT_READY: + return "FW not ready"; case MLX5_CMD_STAT_LIM_ERR: return "limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: @@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status) case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_NOT_READY: return -EAGAIN; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; @@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err); static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { u16 opcode, op_mod; + u8 status; u16 uid; opcode = in_to_opcode(in); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); + status = MLX5_GET(mbox_out, out, status); if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && - opcode != MLX5_CMD_OP_CREATE_UCTX) + opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY) mlx5_cmd_out_err(dev, opcode, op_mod, out); } @@ -1882,10 +1887,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, throttle_op = mlx5_cmd_is_throttle_opcode(opcode); if (throttle_op) { - /* atomic context may not sleep */ - if (callback) - return -EINVAL; - down(&dev->cmd.vars.throttle_sem); + if (callback) { + if (down_trylock(&dev->cmd.vars.throttle_sem)) + return -EBUSY; + } else { + down(&dev->cmd.vars.throttle_sem); + } } pages_queue = is_manage_pages(in); @@ -2091,10 +2098,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) { struct mlx5_async_work *work = _work; struct mlx5_async_ctx *ctx; + struct mlx5_core_dev *dev; + u16 opcode; ctx = work->ctx; - status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out); + dev = ctx->dev; + opcode = work->opcode; + status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out); work->user_callback(status, work); + /* Can't access "work" from this point on. It could have been freed in + * the callback. + */ + if (mlx5_cmd_is_throttle_opcode(opcode)) + up(&dev->cmd.vars.throttle_sem); if (atomic_dec_and_test(&ctx->num_inflight)) complete(&ctx->inflight_done); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index ddf1b87f1bc0d6..9aed29fa490064 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte, fs_get_obj(__entry->fg, fte->node.parent); __entry->group_index = __entry->fg->id; __entry->index = fte->index; - __entry->action = fte->action.action; + __entry->action = fte->act_dests.action.action; __entry->mask_enable = __entry->fg->mask.match_criteria_enable; - __entry->flow_tag = fte->flow_context.flow_tag; - __entry->flow_source = fte->flow_context.flow_source; + __entry->flow_tag = fte->act_dests.flow_context.flow_tag; + __entry->flow_source = fte->act_dests.flow_context.flow_source; memcpy(__entry->mask_outer, MLX5_ADDR_OF(fte_match_param, &__entry->fg->mask.match_criteria, @@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule, TP_fast_assign( __entry->rule = rule; fs_get_obj(__entry->fte, rule->node.parent); - __entry->index = __entry->fte->dests_size - 1; + __entry->index = __entry->fte->act_dests.dests_size - 1; __entry->sw_action = rule->sw_action; memcpy(__entry->destination, &rule->dest_attr, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d9e241423bc567..57b7298a0e793c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -627,7 +627,7 @@ struct mlx5e_shampo_hd { struct mlx5e_dma_info *info; struct mlx5e_frag_page *pages; u16 curr_page_index; - u16 hd_per_wq; + u32 hd_per_wq; u16 hd_per_wqe; unsigned long *bitmap; u16 pi; @@ -1173,14 +1173,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param, struct kernel_ethtool_ringparam *kernel_param); int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, - struct ethtool_ringparam *param); + struct ethtool_ringparam *param, + struct netlink_ext_ack *extack); void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, struct ethtool_channels *ch); int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, struct ethtool_channels *ch); int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal); + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h index bb6b1a979ba132..62b3f7ff556211 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h @@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops { struct mlx5_flow_attr *attr, struct flow_rule *flow_rule); void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule); + int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr); size_t priv_size; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c index ae4f55be48cec3..64a82aafaacafd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c @@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru kfree(dmfs_rule); } +static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) +{ + struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_dmfs_rule, + fs_rule); + struct mlx5e_priv *priv = netdev_priv(fs->netdev); + struct mlx5_flow_handle *rule; + + rule = mlx5_tc_rule_insert(priv, spec, attr); + if (IS_ERR(rule)) + return PTR_ERR(rule); + mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr); + + dmfs_rule->rule = rule; + dmfs_rule->attr = attr; + + return 0; +} + static struct mlx5_ct_fs_ops dmfs_ops = { .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add, .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del, + .ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update, .init = mlx5_ct_fs_dmfs_init, .destroy = mlx5_ct_fs_dmfs_destroy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 8c531f4ec91292..1c062a2e8996e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru kfree(smfs_rule); } +static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) +{ + struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule, + struct mlx5_ct_fs_smfs_rule, + fs_rule); + struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); + struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */ + struct mlx5dr_rule *rule; + + actions[0] = smfs_rule->count_action; + actions[1] = attr->modify_hdr->action.dr_action; + actions[2] = fs_smfs->fwd_action; + + rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec, + ARRAY_SIZE(actions), actions, spec->flow_context.flow_source); + if (!rule) + return -EINVAL; + + mlx5_smfs_rule_destroy(smfs_rule->rule); + smfs_rule->rule = rule; + + return 0; +} + static struct mlx5_ct_fs_ops fs_smfs_ops = { .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add, .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del, + .ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update, .init = mlx5_ct_fs_smfs_init, .destroy = mlx5_ct_fs_smfs_destroy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 71a168746ebe21..dcfccaaa8d917b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -876,15 +876,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, } static int -mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, - struct flow_rule *flow_rule, - struct mlx5_ct_entry *entry, - bool nat, u8 zone_restore_id) +mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv, + struct flow_rule *flow_rule, + struct mlx5_ct_entry *entry, + bool nat, u8 zone_restore_id) { struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr; struct mlx5e_mod_hdr_handle *mh; - struct mlx5_ct_fs_rule *rule; struct mlx5_flow_spec *spec; int err; @@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id, nat, mlx5_tc_ct_entry_in_ct_nat_table(entry)); if (err) { - ct_dbg("Failed to create ct entry mod hdr"); + ct_dbg("Failed to create ct entry mod hdr, err: %d", err); goto err_mod_hdr; } mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); - rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat); + err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr); + if (err) { + ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err); goto err_rule; } - ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule); - zone_rule->rule = rule; mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh); zone_rule->mh = mh; mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id); kfree(old_attr); kvfree(spec); - ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone); + ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone); return 0; @@ -1141,23 +1137,23 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, } static int -mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, - struct flow_rule *flow_rule, - struct mlx5_ct_entry *entry, - u8 zone_restore_id) +mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv, + struct flow_rule *flow_rule, + struct mlx5_ct_entry *entry, + u8 zone_restore_id) { int err = 0; if (mlx5_tc_ct_entry_in_ct_table(entry)) { - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, - zone_restore_id); + err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); if (err) return err; } if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, - zone_restore_id); + err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); if (err && mlx5_tc_ct_entry_in_ct_table(entry)) mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); } @@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, } static int -mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule, - struct mlx5_ct_entry *entry, unsigned long cookie) +mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule, + struct mlx5_ct_entry *entry, unsigned long cookie) { struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; int err; - err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id); + err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id); if (!err) return 0; @@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, entry->restore_cookie = meta_action->ct_metadata.cookie; spin_unlock_bh(&ct_priv->ht_lock); - err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie); + err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie); mlx5_tc_ct_entry_put(entry); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 6cc23af66b5be0..efb34de4cb7adb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -109,6 +109,7 @@ struct mlx5e_tc_flow { struct completion init_done; struct completion del_hw_done; struct mlx5_flow_attr *attr; + struct mlx5_flow_attr *extra_split_attr; struct list_head attrs; u32 chain_mapping; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index d4239e3b3c88ef..11f724ad90dbfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify) struct mlx5e_tir_builder *builder; builder = kvzalloc(sizeof(*builder), GFP_KERNEL); + if (!builder) + return NULL; + builder->modify = modify; return builder; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 3d274599015be1..ca92e518be7669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -67,7 +67,6 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) return; spin_lock_bh(&x->lock); - xfrm_state_check_expire(x); if (x->km.state == XFRM_STATE_EXPIRED) { sa_entry->attrs.drop = true; spin_unlock_bh(&x->lock); @@ -75,6 +74,13 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) mlx5e_accel_ipsec_fs_modify(sa_entry); return; } + + if (x->km.state != XFRM_STATE_VALID) { + spin_unlock_bh(&x->lock); + return; + } + + xfrm_state_check_expire(x); spin_unlock_bh(&x->lock); queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 797db853de3638..53cfa39188cb0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt, attrs->lft.hard_packet_limit); MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1); + MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1); } if (attrs->lft.soft_packet_limit != XFRM_INF) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 1cf3c54d343ebf..1966736f98b4af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER]; ({ \ struct ptys2ethtool_config *cfg; \ const unsigned int modes[] = { __VA_ARGS__ }; \ - unsigned int i, bit, idx; \ + unsigned int i; \ cfg = &ptys2##table##_ethtool_table[reg_]; \ bitmap_zero(cfg->supported, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ bitmap_zero(cfg->advertised, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ - bit = modes[i] % 64; \ - idx = modes[i] / 64; \ - __set_bit(bit, &cfg->supported[idx]); \ - __set_bit(bit, &cfg->advertised[idx]); \ + bitmap_set(cfg->supported, modes[i], 1); \ + bitmap_set(cfg->advertised, modes[i], 1); \ } \ }) @@ -364,35 +362,25 @@ static void mlx5e_get_ringparam(struct net_device *dev, } int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct netlink_ext_ack *extack) { struct mlx5e_params new_params; u8 log_rq_size; u8 log_sq_size; int err = 0; - if (param->rx_jumbo_pending) { - netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n", - __func__); - return -EINVAL; - } - if (param->rx_mini_pending) { - netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n", - __func__); - return -EINVAL; - } - if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { - netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", - __func__, param->rx_pending, - 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)", + param->rx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); return -EINVAL; } if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { - netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n", - __func__, param->tx_pending, - 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); + NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)", + param->tx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); return -EINVAL; } @@ -428,7 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - return mlx5e_ethtool_set_ringparam(priv, param); + return mlx5e_ethtool_set_ringparam(priv, param, extack); } void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, @@ -455,7 +443,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, unsigned int count = ch->combined_count; struct mlx5e_params new_params; bool arfs_enabled; - int rss_cnt; bool opened; int err = 0; @@ -509,17 +496,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } - /* Don't allow changing the number of channels if non-default RSS contexts exist, - * the kernel doesn't protect against set_channels operations that break them. - */ - rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1; - if (rss_cnt) { - err = -EINVAL; - netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n", - __func__, rss_cnt); - goto out; - } - /* Don't allow changing the number of channels if MQPRIO mode channel offload is active, * because it defines a partition over the channels queues. */ @@ -567,12 +543,15 @@ static int mlx5e_set_channels(struct net_device *dev, int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal) + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct dim_cq_moder *rx_moder, *tx_moder; - if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) { + NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported"); return -EOPNOTSUPP; + } rx_moder = &priv->channels.params.rx_cq_moderation; coal->rx_coalesce_usecs = rx_moder->usec; @@ -596,7 +575,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack); } static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, @@ -718,26 +697,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, int err = 0; if (!MLX5_CAP_GEN(mdev, cq_moderation) || - !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) + !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) { + NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported"); return -EOPNOTSUPP; + } if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { - netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", - __func__, MLX5E_MAX_COAL_TIME); + NL_SET_ERR_MSG_FMT_MOD( + extack, + "Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)", + MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs, + coal->rx_coalesce_usecs); return -ERANGE; } if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { - netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", - __func__, MLX5E_MAX_COAL_FRAMES); + NL_SET_ERR_MSG_FMT_MOD( + extack, + "Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)", + MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames, + coal->rx_max_coalesced_frames); return -ERANGE; } if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) { - NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device"); + NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device"); return -EOPNOTSUPP; } @@ -1309,7 +1296,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (*ptys2legacy_ethtool_table[i].advertised == 0) + if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS)) continue; if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised, link_modes, @@ -1323,18 +1311,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; - unsigned long modes[2]; + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes); for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { - if (ptys2ext_ethtool_table[i].advertised[0] == 0 && - ptys2ext_ethtool_table[i].advertised[1] == 0) + if (bitmap_empty(ptys2ext_ethtool_table[i].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS)) continue; - memset(modes, 0, sizeof(modes)); + bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS); - if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] && - modes[1] == ptys2ext_ethtool_table[i].advertised[1]) + if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); } return ptys_modes; @@ -2025,8 +2013,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, if (size_read == -EINVAL) return -EINVAL; if (size_read < 0) { - netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n", - __func__, size_read); + NL_SET_ERR_MSG_FMT_MOD( + extack, + "Query module eeprom by page failed, read %u bytes, err %d\n", + i, size_read); return i; } @@ -2615,6 +2605,7 @@ static void mlx5e_get_ts_stats(struct net_device *netdev, const struct ethtool_ops mlx5e_ethtool_ops = { .cap_rss_ctx_supported = true, + .rxfh_per_ctx_key = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 16b67c457b6057..a5659c0c423613 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1016,30 +1016,31 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, static void mlx5e_free_rq(struct mlx5e_rq *rq) { - struct bpf_prog *old_prog; - - if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { - old_prog = rcu_dereference_protected(rq->xdp_prog, - lockdep_is_held(&rq->priv->state_lock)); - if (old_prog) - bpf_prog_put(old_prog); - } + kvfree(rq->dim); + page_pool_destroy(rq->page_pool); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + mlx5e_rq_free_shampo(rq); kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be)); mlx5e_free_mpwqe_rq_drop_page(rq); - mlx5e_rq_free_shampo(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ mlx5e_free_wqe_alloc_info(rq); } - kvfree(rq->dim); - xdp_rxq_info_unreg(&rq->xdp_rxq); - page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); + + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { + struct bpf_prog *old_prog; + + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&rq->priv->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); + } + xdp_rxq_info_unreg(&rq->xdp_rxq); } int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter) @@ -4414,9 +4415,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, if (mlx5e_is_uplink_rep(priv)) { features = mlx5e_fix_uplink_rep_features(netdev, features); - features |= NETIF_F_NETNS_LOCAL; + netdev->netns_local = true; } else { - features &= ~NETIF_F_NETNS_LOCAL; + netdev->netns_local = false; } mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 8790d57dc6dbf0..92094bf60d5986 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - return mlx5e_ethtool_set_ringparam(priv, param); + return mlx5e_ethtool_set_ringparam(priv, param, extack); } static void mlx5e_rep_get_channels(struct net_device *dev, @@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack); } static int mlx5e_rep_set_coalesce(struct net_device *netdev, @@ -898,7 +898,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->hw_features |= NETIF_F_RXCSUM; netdev->features |= netdev->hw_features; - netdev->features |= NETIF_F_NETNS_LOCAL; + + netdev->netns_local = true; } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index de9d01036c2807..8e24ba96c779ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq stats->hds_nodata_packets++; stats->hds_nodata_bytes += head_size; } + } else { + stats->hds_nosplit_packets++; + stats->hds_nosplit_bytes += data_bcnt; } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e7a3290a708a57..611ec4b6f37092 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, @@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_gro_large_hds += rq_stats->gro_large_hds; s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets; s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes; + s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets; + s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes; s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; @@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4c5858c1dd8293..5961c569cfe01a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -156,6 +156,8 @@ struct mlx5e_sw_stats { u64 rx_gro_large_hds; u64 rx_hds_nodata_packets; u64 rx_hds_nodata_bytes; + u64 rx_hds_nosplit_packets; + u64 rx_hds_nosplit_bytes; u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; @@ -356,6 +358,8 @@ struct mlx5e_rq_stats { u64 gro_large_hds; u64 hds_nodata_packets; u64 hds_nodata_bytes; + u64 hds_nosplit_packets; + u64 hds_nosplit_bytes; u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 30673292e15fee..6b3b1afe831214 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1739,11 +1739,119 @@ has_encap_dests(struct mlx5_flow_attr *attr) return false; } +static int +extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) +{ + bool int_dest = false, ext_dest = false; + struct mlx5_esw_flow_attr *esw_attr; + int i; + + if (flow->attr != attr || + !list_is_first(&attr->list, &flow->attrs)) + return 0; + + if (flow_flag_test(flow, SLOW)) + return 0; + + esw_attr = attr->esw_attr; + if (!esw_attr->split_count || + esw_attr->split_count == esw_attr->out_count - 1) + return 0; + + if (esw_attr->dest_int_port && + (esw_attr->dests[esw_attr->split_count].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)) + return esw_attr->split_count + 1; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { + /* external dest with encap is considered as internal by firmware */ + if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK && + !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) + ext_dest = true; + else + int_dest = true; + + if (ext_dest && int_dest) + return esw_attr->split_count; + } + + return 0; +} + +static int +extra_split_attr_dests(struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, int split_count) +{ + struct mlx5e_post_act *post_act = get_post_action(flow->priv); + struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2; + struct mlx5_esw_flow_attr *esw_attr, *esw_attr2; + struct mlx5e_post_act_handle *handle; + struct mlx5_flow_attr *attr2; + int i, j, err; + + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + + attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); + parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); + if (!attr2 || !parse_attr2) { + err = -ENOMEM; + goto err_free; + } + attr2->parse_attr = parse_attr2; + + handle = mlx5e_tc_post_act_add(post_act, attr2); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto err_free; + } + + esw_attr = attr->esw_attr; + esw_attr2 = attr2->esw_attr; + esw_attr2->in_rep = esw_attr->in_rep; + + parse_attr = attr->parse_attr; + parse_attr2->filter_dev = parse_attr->filter_dev; + + for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++) + esw_attr2->dests[j] = esw_attr->dests[i]; + + esw_attr2->out_count = j; + attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + err = mlx5e_tc_post_act_offload(post_act, handle); + if (err) + goto err_post_act_offload; + + err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle, + &parse_attr->mod_hdr_acts); + if (err) + goto err_post_act_set_handle; + + esw_attr->out_count = split_count; + attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act); + flow->extra_split_attr = attr2; + + attr2->post_act_handle = handle; + + return 0; + +err_post_act_set_handle: + mlx5e_tc_post_act_unoffload(post_act, handle); +err_post_act_offload: + mlx5e_tc_post_act_del(post_act, handle); +err_free: + kvfree(parse_attr2); + kfree(attr2); + return err; +} + static int post_process_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, struct netlink_ext_ack *extack) { + int extra_split; bool vf_tun; int err = 0; @@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow, goto err_out; } + extra_split = extra_split_attr_dests_needed(flow, attr); + if (extra_split > 0) { + err = extra_split_attr_dests(flow, attr, extra_split); + if (err) + goto err_out; + } + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); if (err) @@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow); free_flow_post_acts(flow); + if (flow->extra_split_attr) { + mlx5_free_flow_attr_actions(flow, flow->extra_split_attr); + kvfree(flow->extra_split_attr->parse_attr); + kfree(flow->extra_split_attr); + } mlx5_free_flow_attr_actions(flow, attr); kvfree(attr->esw_attr->rx_tun_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index c24bda56b2b5d2..e1b8cb78369f11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -86,6 +86,7 @@ struct mlx5_flow_attr { u32 dest_chain; struct mlx5_flow_table *ft; struct mlx5_flow_table *dest_ft; + struct mlx5_flow_table *extra_split_ft; u8 inner_match_level; u8 outer_match_level; u8 tun_ip_version; @@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr { #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 #define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0) -#define MLX5E_TC_MAX_INT_PORT_NUM (8) +#define MLX5E_TC_MAX_INT_PORT_NUM (32) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index b09e9abd39f37f..f8c7912abe0e3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -642,7 +642,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, return; err_unmap: - mlx5e_dma_unmap_wqe_err(sq, 1); sq->stats->dropped++; dev_kfree_skb_any(skb); mlx5e_tx_flush(sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index cb7e7e4104aff9..2505f90c0b39d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx) mlx5_irq_release_vector(irq); } -static int mlx5_cpumask_default_spread(int numa_node, int index) +static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index) { - const struct cpumask *prev = cpu_none_mask; - const struct cpumask *mask; - int found_cpu = 0; - int i = 0; - int cpu; - - rcu_read_lock(); - for_each_numa_hop_mask(mask, numa_node) { - for_each_cpu_andnot(cpu, mask, prev) { - if (i++ == index) { - found_cpu = cpu; - goto spread_done; - } - } - prev = mask; - } - -spread_done: - rcu_read_unlock(); - return found_cpu; + return cpumask_local_spread(index, dev->priv.numa_node); } static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) @@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) int cpu; rmap = mlx5_eq_table_get_pci_rmap(dev); - cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx); + cpu = mlx5_cpumask_default_spread(dev, vecidx); irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); if (IS_ERR(irq)) return PTR_ERR(irq); @@ -915,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) if (!mlx5_irq_pool_is_sf_pool(pool)) return comp_irq_request_pci(dev, vecidx); - af_desc.is_managed = 1; + af_desc.is_managed = false; cpumask_copy(&af_desc.mask, cpu_online_mask); cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); irq = mlx5_irq_affinity_request(dev, pool, &af_desc); @@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector) if (mask) cpu = cpumask_first(mask); else - cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector); + cpu = mlx5_cpumask_default_spread(dev, vector); return cpu; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 578466d69f2196..f44b4c7ebcfd73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -887,9 +887,6 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v bool enable); int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, u16 vport_num); -void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw); -void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw); - #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 768199d2255a1d..f24f91d213f24c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest, } } + if (attr->extra_split_ft) { + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[*i].ft = attr->extra_split_ft; + (*i)++; + } + out: return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9b8599c200e2c0..676005854dad4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, int num_encap = 0; *extended_dest = false; - if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context) execute_aso[0]); MLX5_SET(execute_aso, execute_aso, valid, 1); MLX5_SET(execute_aso, execute_aso, aso_object_id, - fte->action.exe_aso.object_id); + fte->act_dests.action.exe_aso.object_id); exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id, - fte->action.exe_aso.return_reg_id); + fte->act_dests.action.exe_aso.return_reg_id); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type, - fte->action.exe_aso.type); + fte->act_dests.action.exe_aso.type); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color, - fte->action.exe_aso.flow_meter.init_color); + fte->act_dests.action.exe_aso.flow_meter.init_color); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id, - fte->action.exe_aso.flow_meter.meter_idx); + fte->act_dests.action.exe_aso.flow_meter.meter_idx); } static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, @@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, else dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); - inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size; + inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); MLX5_SET(set_fte_in, in, ignore_flow_level, - !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); + !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, @@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, - fte->flow_context.flow_tag); + fte->act_dests.flow_context.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_source, - fte->flow_context.flow_source); + fte->act_dests.flow_context.flow_source); MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en, - !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); + !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); MLX5_SET(flow_context, in_flow_context, extended_destination, extended_dest); - action = fte->action.action; + action = fte->act_dests.action.action; if (extended_dest) action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; MLX5_SET(flow_context, in_flow_context, action, action); - if (!extended_dest && fte->action.pkt_reformat) { - struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat; + if (!extended_dest && fte->act_dests.action.pkt_reformat) { + struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat; if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); @@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, goto err_out; } } else { - reformat_id = fte->action.pkt_reformat->id; + reformat_id = fte->act_dests.action.pkt_reformat->id; } } MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); - if (fte->action.modify_hdr) { - if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + if (fte->act_dests.action.modify_hdr) { + if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); err = -EOPNOTSUPP; goto err_out; } MLX5_SET(flow_context, in_flow_context, modify_header_id, - fte->action.modify_hdr->id); + fte->act_dests.action.modify_hdr->id); } MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, - fte->action.crypto.type); + fte->act_dests.action.crypto.type); MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id, - fte->action.crypto.obj_id); + fte->act_dests.action.crypto.obj_id); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); + MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype); + MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid); + MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); + MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype); + MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid); + MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, sizeof(fte->val)); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_flow_counter, ft->type)); @@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { - if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { mlx5_cmd_set_fte_flow_meter(fte, in_flow_context); } else { err = -EOPNOTSUPP; @@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns, static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { - return 0; + return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH; } static const struct mlx5_flow_cmds mlx5_flow_cmds = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 53e0e5137d3ff0..7eb7b3ffe3d849 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode); int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect); + +static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft) +{ + if (ft->flags & MLX5_FLOW_TABLE_TERMINATION) + return true; + + return false; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a47d6419160d78..8505d5e241e1a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte) dev = get_dev(&fte->node); root = find_root(&ft->node); - err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte); + err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); - fte->modify_mask = 0; + fte->act_dests.modify_mask = 0; +} + +static void del_sw_hw_dup_rule(struct fs_node *node) +{ + struct mlx5_flow_rule *rule; + struct fs_fte *fte; + + fs_get_obj(rule, node); + fs_get_obj(fte, rule->node.parent); + trace_mlx5_fs_del_rule(rule); + + if (is_fwd_next_action(rule->sw_action)) { + mutex_lock(&rule->dest_attr.ft->lock); + list_del(&rule->next_ft); + mutex_unlock(&rule->dest_attr.ft->lock); + } + + /* If a pending rule is being deleted it means + * this is a NO APPEND rule, so there are no partial deletions, + * all the rules of the mlx5_flow_handle are going to be deleted + * and the rules aren't shared with any other mlx5_flow_handle instance + * so no need to do any bookkeeping like in del_sw_hw_rule(). + */ + + kfree(rule); } static void del_sw_hw_rule(struct fs_node *node) @@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node) } if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) { - --fte->dests_size; - fte->modify_mask |= + --fte->act_dests.dests_size; + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; goto out; } if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) { - --fte->dests_size; - fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; + --fte->act_dests.dests_size; + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; goto out; } if (is_fwd_dest_type(rule->dest_attr.type)) { - --fte->dests_size; - --fte->fwd_dests; + --fte->act_dests.dests_size; + --fte->act_dests.fwd_dests; - if (!fte->fwd_dests) - fte->action.action &= + if (!fte->act_dests.fwd_dests) + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - fte->modify_mask |= + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); goto out; } @@ -658,12 +683,33 @@ static void del_sw_hw_rule(struct fs_node *node) kfree(rule); } +static void switch_to_pending_act_dests(struct fs_fte *fte) +{ + struct fs_node *iter; + + memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests)); + + list_bulk_move_tail(&fte->node.children, + fte->dup->children.next, + fte->dup->children.prev); + + list_for_each_entry(iter, &fte->node.children, list) + iter->del_sw_func = del_sw_hw_rule; + + /* Make sure the fte isn't deleted + * as mlx5_del_flow_rules() decreases the refcount + * of the fte to trigger deletion. + */ + tree_get_node(&fte->node); +} + static void del_hw_fte(struct fs_node *node) { struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct mlx5_core_dev *dev; + bool pending_used = false; struct fs_fte *fte; int err; @@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node) fs_get_obj(ft, fg->node.parent); trace_mlx5_fs_del_fte(fte); - WARN_ON(fte->dests_size); + WARN_ON(fte->act_dests.dests_size); dev = get_dev(&ft->node); root = find_root(&ft->node); + + if (fte->dup && !list_empty(&fte->dup->children)) { + switch_to_pending_act_dests(fte); + pending_used = true; + } else { + /* Avoid double call to del_hw_fte */ + node->del_hw_func = NULL; + } + if (node->active) { - err = root->cmds->delete_fte(root, ft, fte); - if (err) - mlx5_core_warn(dev, - "flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); - node->active = false; + if (pending_used) { + err = root->cmds->update_fte(root, ft, fg, + fte->act_dests.modify_mask, fte); + if (err) + mlx5_core_warn(dev, + "flow steering can't update to pending rule in index %d of flow group id %d\n", + fte->index, fg->id); + fte->act_dests.modify_mask = 0; + } else { + err = root->cmds->delete_fte(root, ft, fte); + if (err) + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); + node->active = false; + } } } @@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_free(&fg->fte_allocator, fte->index - fg->start_index); + kvfree(fte->dup); kmem_cache_free(steering->ftes_cache, fte); } @@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, memcpy(fte->val, &spec->match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; - fte->action = *flow_act; - fte->flow_context = spec->flow_context; + fte->act_dests.action = *flow_act; + fte->act_dests.flow_context = spec->flow_context; tree_init_node(&fte->node, del_hw_fte, del_sw_fte); @@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } +static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule) +{ + struct mlx5_flow_rule *tmp_rule; + struct fs_node *iter; + + if (!fte->dup || list_empty(&fte->dup->children)) + return false; + + list_for_each_entry(iter, &fte->dup->children, list) { + tmp_rule = container_of(iter, struct mlx5_flow_rule, node); + + if (tmp_rule == rule) + return true; + } + + return false; +} + static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest) { struct mlx5_flow_root_namespace *root; + struct fs_fte_action *act_dests; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; + bool pending = false; struct fs_fte *fte; int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); int err = 0; fs_get_obj(fte, rule->node.parent); - if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + + pending = rule_is_pending(fte, rule); + if (pending) + act_dests = &fte->dup->act_dests; + else + act_dests = &fte->act_dests; + + if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return -EINVAL; down_write_ref_node(&fte->node, false); fs_get_obj(fg, fte->node.parent); @@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, memcpy(&rule->dest_attr, dest, sizeof(*dest)); root = find_root(&ft->node); - err = root->cmds->update_fte(root, ft, fg, - modify_mask, fte); + if (!pending) + err = root->cmds->update_fte(root, ft, fg, + modify_mask, fte); up_write_ref_node(&fte->node, false); return err; @@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules) return handle; } +static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle, + int i) +{ + for (; --i >= 0;) { + list_del(&handle->rule[i]->node.list); + kfree(handle->rule[i]); + } + kfree(handle); +} + static void destroy_flow_handle(struct fs_fte *fte, struct mlx5_flow_handle *handle, struct mlx5_flow_destination *dest, @@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte, { for (; --i >= 0;) { if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) { - fte->dests_size--; + fte->act_dests.dests_size--; list_del(&handle->rule[i]->node.list); kfree(handle->rule[i]); } @@ -1468,6 +1572,61 @@ static void destroy_flow_handle(struct fs_fte *fte, kfree(handle); } +static struct mlx5_flow_handle * +create_flow_handle_dup(struct list_head *children, + struct mlx5_flow_destination *dest, + int dest_num, + struct fs_fte_action *act_dests) +{ + static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); + struct mlx5_flow_rule *rule = NULL; + struct mlx5_flow_handle *handle; + int i = 0; + int type; + + handle = alloc_handle((dest_num) ? dest_num : 1); + if (!handle) + return NULL; + + do { + rule = alloc_rule(dest + i); + if (!rule) + goto free_rules; + + /* Add dest to dests list- we need flow tables to be in the + * end of the list for forward to next prio rules. + */ + tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule); + if (dest && + dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + list_add(&rule->node.list, children); + else + list_add_tail(&rule->node.list, children); + + if (dest) { + act_dests->dests_size++; + + if (is_fwd_dest_type(dest[i].type)) + act_dests->fwd_dests++; + + type = dest[i].type == + MLX5_FLOW_DESTINATION_TYPE_COUNTER; + act_dests->modify_mask |= type ? count : dst; + } + handle->rule[i] = rule; + } while (++i < dest_num); + + return handle; + +free_rules: + destroy_flow_handle_dup(handle, i); + act_dests->dests_size = 0; + act_dests->fwd_dests = 0; + + return NULL; +} + static struct mlx5_flow_handle * create_flow_handle(struct fs_fte *fte, struct mlx5_flow_destination *dest, @@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte, else list_add_tail(&rule->node.list, &fte->node.children); if (dest) { - fte->dests_size++; + fte->act_dests.dests_size++; if (is_fwd_dest_type(dest[i].type)) - fte->fwd_dests++; + fte->act_dests.fwd_dests++; type = dest[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER; @@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act, &fte->action)) { + if (check_conflicting_actions(flow_act, &fte->act_dests.action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; } if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) && - fte->flow_context.flow_tag != flow_context->flow_tag) { + fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", - fte->flow_context.flow_tag, + fte->act_dests.flow_context.flow_tag, flow_context->flow_tag); return -EEXIST; } @@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, if (ret) return ERR_PTR(ret); - old_action = fte->action.action; - fte->action.action |= flow_act->action; + old_action = fte->act_dests.action.action; + fte->act_dests.action.action |= flow_act->action; handle = add_rule_fte(fte, fg, dest, dest_num, old_action != flow_act->action); if (IS_ERR(handle)) { - fte->action.action = old_action; + fte->act_dests.action.action = old_action; return handle; } trace_mlx5_fs_set_fte(fte, false); @@ -1961,6 +2120,62 @@ lookup_fte_locked(struct mlx5_flow_group *g, return fte_tmp; } +/* Native capability lacks support for adding an additional match with the same value + * to the same flow group. To accommodate the NO APPEND flag in these scenarios, + * we include the new rule in the existing flow table entry (fte) without immediate + * hardware commitment. When a request is made to delete the corresponding hardware rule, + * we then commit the pending rule to hardware. + */ +static struct mlx5_flow_handle * +add_rule_dup_match_fte(struct fs_fte *fte, + const struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) +{ + struct mlx5_flow_handle *handle; + struct fs_fte_dup *dup; + int i = 0; + + if (!fte->dup) { + dup = kvzalloc(sizeof(*dup), GFP_KERNEL); + if (!dup) + return ERR_PTR(-ENOMEM); + /* dup will be freed when the fte is freed + * this way we don't allocate / free dup on every rule deletion + * or creation + */ + INIT_LIST_HEAD(&dup->children); + fte->dup = dup; + } + + if (!list_empty(&fte->dup->children)) { + mlx5_core_warn(get_dev(&fte->node), + "Can have only a single duplicate rule\n"); + + return ERR_PTR(-EEXIST); + } + + fte->dup->act_dests.action = *flow_act; + fte->dup->act_dests.flow_context = spec->flow_context; + fte->dup->act_dests.dests_size = 0; + fte->dup->act_dests.fwd_dests = 0; + fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + + handle = create_flow_handle_dup(&fte->dup->children, + dest, dest_num, + &fte->dup->act_dests); + if (!handle) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < handle->num_rules; i++) { + tree_add_node(&handle->rule[i]->node, &fte->node); + trace_mlx5_fs_add_rule(handle->rule[i]); + } + + return handle; +} + static struct mlx5_flow_handle * try_add_to_existing_fg(struct mlx5_flow_table *ft, struct list_head *match_head, @@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, int ft_version) { struct mlx5_flow_steering *steering = get_steering(&ft->node); + struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; struct match_list *iter; @@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, return ERR_PTR(-ENOMEM); search_again_locked: - if (flow_act->flags & FLOW_ACT_NO_APPEND) + if (flow_act->flags & FLOW_ACT_NO_APPEND && + (root->cmds->get_capabilities(root, root->table_type) & + MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH)) goto skip_search; version = matched_fgs_get_version(match_head); /* Try to find an fte with identical match value and attempt update its @@ -1997,7 +2215,10 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); if (!fte_tmp) continue; - rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); + if (flow_act->flags & FLOW_ACT_NO_APPEND) + rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num); + else + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); /* No error check needed here, because insert_fte() is not called */ up_write_ref_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false); @@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) tree_remove_node(&handle->rule[i]->node, true); if (list_empty(&fte->node.children)) { fte->node.del_hw_func(&fte->node); - /* Avoid double call to del_hw_fte */ - fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); - } else if (fte->dests_size) { - if (fte->modify_mask) + } else if (fte->act_dests.dests_size) { + if (fte->act_dests.modify_mask) modify_fte(fte); up_write_ref_node(&fte->node, false); } else { @@ -3590,8 +3809,8 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) } EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); -static struct mlx5_flow_root_namespace -*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) +struct mlx5_flow_root_namespace * +mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) { struct mlx5_flow_namespace *ns; @@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr; int err; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, modify_hdr->ns_type); + root = mlx5_get_root_namespace(dev, modify_hdr->ns_type); if (WARN_ON(!root)) return; root->cmds->modify_header_dealloc(root, modify_hdr); @@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, struct mlx5_flow_root_namespace *root; int err; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, pkt_reformat->ns_type); + root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type); if (WARN_ON(!root)) return; root->cmds->packet_reformat_dealloc(root, pkt_reformat); @@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev, struct mlx5_flow_definer *definer; int id; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, definer->ns_type); + root = mlx5_get_root_namespace(dev, definer->ns_type); if (WARN_ON(!root)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 78eb6b7097e1a4..964937f17cf54b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -110,7 +110,9 @@ enum fs_flow_table_type { FS_FT_RDMA_RX = 0X7, FS_FT_RDMA_TX = 0X8, FS_FT_PORT_SEL = 0X9, - FS_FT_MAX_TYPE = FS_FT_PORT_SEL, + FS_FT_FDB_RX = 0xa, + FS_FT_FDB_TX = 0xb, + FS_FT_MAX_TYPE = FS_FT_FDB_TX, }; enum fs_flow_table_op_mod { @@ -131,6 +133,7 @@ enum mlx5_flow_steering_capabilty { MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0, MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1, MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2, + MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3, }; struct mlx5_flow_steering { @@ -228,20 +231,29 @@ struct mlx5_ft_underlay_qp { MLX5_BYTE_OFF(fte_match_param, \ MLX5_FTE_MATCH_PARAM_RESERVED))) +struct fs_fte_action { + int modify_mask; + u32 dests_size; + u32 fwd_dests; + struct mlx5_flow_context flow_context; + struct mlx5_flow_act action; +}; + +struct fs_fte_dup { + struct list_head children; + struct fs_fte_action act_dests; +}; + /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; struct mlx5_fs_dr_rule fs_dr_rule; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; - u32 dests_size; - u32 fwd_dests; + struct fs_fte_action act_dests; + struct fs_fte_dup *dup; u32 index; - struct mlx5_flow_context flow_context; - struct mlx5_flow_act action; enum fs_fte_status status; - struct mlx5_fc *counter; struct rhash_head hash; - int modify_mask; }; /* Type of children is mlx5_flow_table/namespace */ @@ -368,7 +380,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node); (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \ - (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\ + (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ + (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\ ) #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index b61b7d96611413..76ad46bf477d62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -224,6 +224,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, mcam_reg)) { mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128); mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F); + mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9180_0x91FF); } if (MLX5_CAP_GEN(dev, qcam_reg)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index b43ca0b762c306..4f55e55ecb5513 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -26,6 +26,7 @@ struct mlx5_fw_reset { struct work_struct reset_now_work; struct work_struct reset_abort_work; unsigned long reset_flags; + u8 reset_method; struct timer_list timer; struct completion done; int ret; @@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level, } static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, - u8 *reset_type, u8 *reset_state) + u8 *reset_type, u8 *reset_state, u8 *reset_method) { u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; @@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, *reset_type = MLX5_GET(mfrl_reg, out, reset_type); if (reset_state) *reset_state = MLX5_GET(mfrl_reg, out, reset_state); + if (reset_method) + *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method); return 0; } int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) { - return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL); + return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL); +} + +static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev, + u8 *reset_method) +{ + if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) { + *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE; + return 0; + } + + return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method); } static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, @@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, { u8 reset_state; - if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) + if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL)) goto out; if (!reset_state) @@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) return 0; } -static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) +static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev, + u8 reset_method) { u16 dev_id; int err; @@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) } #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) - err = mlx5_check_hotplug_interrupt(dev); - if (err) - return false; + if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) { + err = mlx5_check_hotplug_interrupt(dev); + if (err) + return false; + } #endif err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); @@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) struct mlx5_core_dev *dev = fw_reset->dev; int err; - if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || - !mlx5_is_reset_now_capable(dev)) { + err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method); + if (err) + mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err); + + if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || + !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) { err = mlx5_fw_reset_set_reset_sync_nack(dev); mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s", err ? "Failed" : "Sent"); @@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n"); } -static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) +static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id) { struct pci_bus *bridge_bus = dev->pdev->bus; struct pci_dev *bridge = bridge_bus->self; unsigned long timeout; struct pci_dev *sdev; - u16 reg16, dev_id; int cap, err; + u16 reg16; - err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); - if (err) - return pcibios_err_to_errno(err); - err = mlx5_check_dev_ids(dev, dev_id); - if (err) - return err; cap = pci_find_capability(bridge, PCI_CAP_ID_EXP); if (!cap) return -EOPNOTSUPP; @@ -528,6 +543,44 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) return err; } +static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) + return -EOPNOTSUPP; + + return pci_reset_bus(dev->pdev); +} + +static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method) +{ + u16 dev_id; + int err; + + err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); + if (err) + return pcibios_err_to_errno(err); + err = mlx5_check_dev_ids(dev, dev_id); + if (err) + return err; + + switch (reset_method) { + case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE: + err = mlx5_pci_link_toggle(dev, dev_id); + if (err) + mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n"); + break; + case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET: + err = mlx5_pci_reset_bus(dev); + if (err) + mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n"); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + static void mlx5_sync_reset_now_event(struct work_struct *work) { struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, @@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) goto done; } - err = mlx5_pci_link_toggle(dev); + err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); if (err) { - mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); + mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err); set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags); } @@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state); if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) { - err = mlx5_pci_link_toggle(dev); + err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); if (err) { - mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err); + mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err); fw_reset->ret = err; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 26f8a11b89068e..9772327d512461 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = mlx5i_epriv(dev); - return mlx5e_ethtool_set_ringparam(priv, param); + return mlx5e_ethtool_set_ringparam(priv, param, extack); } static void mlx5i_get_ringparam(struct net_device *dev, @@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack); } static int mlx5i_get_ts_info(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index cf8045b926892e..8577db3308cc56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -445,6 +445,34 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); } +static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev) +{ + struct net_device *ndev = NULL; + struct mlx5_lag *ldev; + unsigned long flags; + int i; + + spin_lock_irqsave(&lag_lock, flags); + ldev = mlx5_lag_dev(dev); + + if (!ldev) + goto unlock; + + for (i = 0; i < ldev->ports; i++) + if (ldev->tracker.netdev_state[i].tx_enabled) + ndev = ldev->pf[i].netdev; + if (!ndev) + ndev = ldev->pf[ldev->ports - 1].netdev; + + if (ndev) + dev_hold(ndev); + +unlock: + spin_unlock_irqrestore(&lag_lock, flags); + + return ndev; +} + void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { @@ -477,9 +505,18 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } } - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && - !(ldev->mode == MLX5_LAG_MODE_ROCE)) - mlx5_lag_drop_rule_setup(ldev, tracker); + if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0); + + if(!(ldev->mode == MLX5_LAG_MODE_ROCE)) + mlx5_lag_drop_rule_setup(ldev, tracker); + /** Only sriov and roce lag should have tracker->tx_type set so + * no need to check the mode + */ + blocking_notifier_call_chain(&dev0->priv.lag_nh, + MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE, + ndev); + } } static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, @@ -613,6 +650,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, mlx5_core_err(dev0, "Failed to deactivate RoCE LAG; driver restart required\n"); } + BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh); return err; } @@ -1492,38 +1530,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) mlx5_queue_bond_work(ldev, 0); } -struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) -{ - struct net_device *ndev = NULL; - struct mlx5_lag *ldev; - unsigned long flags; - int i; - - spin_lock_irqsave(&lag_lock, flags); - ldev = mlx5_lag_dev(dev); - - if (!(ldev && __mlx5_lag_is_roce(ldev))) - goto unlock; - - if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { - for (i = 0; i < ldev->ports; i++) - if (ldev->tracker.netdev_state[i].tx_enabled) - ndev = ldev->pf[i].netdev; - if (!ndev) - ndev = ldev->pf[ldev->ports - 1].netdev; - } else { - ndev = ldev->pf[MLX5_LAG_P1].netdev; - } - if (ndev) - dev_hold(ndev); - -unlock: - spin_unlock_irqrestore(&lag_lock, flags); - - return ndev; -} -EXPORT_SYMBOL(mlx5_lag_get_roce_netdev); - u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, struct net_device *slave) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 0361741632a6d6..b306ae79bf97a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -38,6 +38,10 @@ #include "lib/eq.h" #include "en.h" #include "clock.h" +#ifdef CONFIG_X86 +#include +#include +#endif /* CONFIG_X86 */ enum { MLX5_PIN_MODE_IN = 0x0, @@ -148,6 +152,87 @@ static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) MLX5_REG_MTUTC, 0, 1); } +#ifdef CONFIG_X86 +static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0}; + int err; + + if (!MLX5_CAP_MCAM_REG3(dev, mtptm)) + return false; + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM, + 0, 0); + if (err) + return false; + + return !!MLX5_GET(mtptm_reg, out, psta); +} + +static int mlx5_mtctr_syncdevicetime(ktime_t *device_time, + struct system_counterval_t *sys_counterval, + void *ctx) +{ + u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0}; + struct mlx5_core_dev *mdev = ctx; + bool real_time_mode; + u64 host, device; + int err; + + real_time_mode = mlx5_real_time_mode(mdev); + + MLX5_SET(mtctr_reg, in, first_clock_timestamp_request, + MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK); + MLX5_SET(mtctr_reg, in, second_clock_timestamp_request, + real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK : + MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR, + 0, 0); + if (err) + return err; + + if (!MLX5_GET(mtctr_reg, out, first_clock_valid) || + !MLX5_GET(mtctr_reg, out, second_clock_valid)) + return -EINVAL; + + host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp); + *sys_counterval = (struct system_counterval_t) { + .cycles = host, + .cs_id = CSID_X86_ART, + .use_nsecs = true, + }; + + device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp); + if (real_time_mode) + *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX)); + else + *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device); + + return 0; +} + +static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct system_time_snapshot history_begin = {0}; + struct mlx5_core_dev *mdev; + + mdev = container_of(clock, struct mlx5_core_dev, clock); + + if (!mlx5_is_ptm_source_time_available(mdev)) + return -EBUSY; + + ktime_get_snapshot(&history_begin); + + return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev, + &history_begin, cts); +} +#endif /* CONFIG_X86 */ + static u64 mlx5_read_time(struct mlx5_core_dev *dev, struct ptp_system_timestamp *sts, bool real_time) @@ -1034,6 +1119,12 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) if (MLX5_CAP_MCAM_REG(mdev, mtutc)) mlx5_init_timer_max_freq_adjustment(mdev); +#ifdef CONFIG_X86 + if (MLX5_CAP_MCAM_REG3(mdev, mtptm) && + MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) + clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp; +#endif /* CONFIG_X86 */ + mlx5_timecounter_init(mdev); mlx5_init_clock_info(mdev); mlx5_init_overflow_period(clock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c index d0b595ba611014..432c98f2626db9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c @@ -24,6 +24,11 @@ pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val)) #define VSC_MAX_RETRIES 2048 +/* Reading VSC registers can take relatively long time. + * Yield the cpu every 128 registers read. + */ +#define VSC_GW_READ_BLOCK_COUNT 128 + enum { VSC_CTRL_OFFSET = 0x4, VSC_COUNTER_OFFSET = 0x8, @@ -273,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data, { unsigned int next_read_addr = 0; unsigned int read_addr = 0; + unsigned int count = 0; while (read_addr < length) { if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr, @@ -280,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data, return read_addr; read_addr = next_read_addr; + if (++count == VSC_GW_READ_BLOCK_COUNT) { + cond_resched(); + count = 0; + } } return length; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2ec33c4a2a3ae7..220a9ac75c8ba0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx) static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) { + bool do_set = false, mem_page_fault = false; void *set_hca_cap; - bool do_set = false; int err; if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) || @@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur, MLX5_ST_SZ_BYTES(odp_cap)); + /* For best performance, enable memory scheme ODP only when + * it has page prefetch enabled. + */ + if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) && + MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) { + mem_page_fault = true; + do_set = true; + MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault); + goto set; + } + #define ODP_CAP_SET_MAX(dev, field) \ do { \ u32 _res = MLX5_CAP_ODP_MAX(dev, field); \ @@ -479,25 +490,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) } \ } while (0) - ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive); - ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.send); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); - ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); - ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive); - ODP_CAP_SET_MAX(dev, dc_odp_caps.send); - ODP_CAP_SET_MAX(dev, dc_odp_caps.receive); - ODP_CAP_SET_MAX(dev, dc_odp_caps.write); - ODP_CAP_SET_MAX(dev, dc_odp_caps.read); - ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic); - - if (!do_set) - return 0; - - return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read); + ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic); + +set: + if (do_set) + err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP); + + mlx5_core_dbg(dev, "Using ODP %s scheme\n", + mem_page_fault ? "memory" : "transport"); + return err; } static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev) @@ -619,6 +633,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload)) MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_with_driver_unload, 1); + if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method)) + MLX5_SET(cmd_hca_cap, set_hca_cap, + pcie_reset_using_hotreset_method, 1); if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports)) MLX5_SET(cmd_hca_cap, @@ -923,6 +940,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, } mlx5_pci_vsc_init(dev); + + err = pci_enable_ptm(pdev, NULL); + if (err) + mlx5_core_info(dev, "PTM is not supported by PCIe\n"); + return 0; err_clr_master: @@ -939,6 +961,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev) * before removing the pci bars */ mlx5_drain_health_wq(dev); + pci_disable_ptm(dev->pdev); iounmap(dev->iseg); release_bar(dev->pdev); mlx5_pci_disable_device(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d894a88fa9f283..972e8e9df585ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -608,6 +608,11 @@ enum { RELEASE_ALL_PAGES_MASK = 0x4000, }; +/* This limit is based on the capability of the firmware as it cannot release + * more than 50000 back to the host in one go. + */ +#define MAX_RECLAIM_NPAGES (-50000) + static int req_pages_handler(struct notifier_block *nb, unsigned long type, void *data) { @@ -639,7 +644,16 @@ static int req_pages_handler(struct notifier_block *nb, req->dev = dev; req->func_id = func_id; - req->npages = npages; + + /* npages > 0 means HCA asking host to allocate/give pages, + * npages < 0 means HCA asking host to reclaim back the pages allocated. + * Here we are restricting the maximum number of pages that can be + * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is + * a negative value. + * Since MAX_RECLAIM is negative, we are using max() to restrict + * req->npages (and not min ()). + */ + req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES); req->ec_function = ec_function; req->release_all = release_all; INIT_WORK(&req->work, pages_work_handler); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 8c2a34a0d6be84..baefb9a3fa05ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev, output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level); output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out, - flow_table_context.sw_owner_icm_root_1); + flow_table_context.sws.sw_owner_icm_root_1); output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out, - flow_table_context.sw_owner_icm_root_0); + flow_table_context.sws.sw_owner_icm_root_0); return 0; } @@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, */ if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_rx); + sws.sw_owner_icm_root_0, attr->icm_addr_rx); } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_tx); + sws.sw_owner_icm_root_0, attr->icm_addr_tx); } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) { MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_0, attr->icm_addr_rx); + sws.sw_owner_icm_root_0, attr->icm_addr_rx); MLX5_SET64(flow_table_context, ft_mdev, - sw_owner_icm_root_1, attr->icm_addr_tx); + sws.sw_owner_icm_root_1, attr->icm_addr_tx); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 50c2554c9ccf24..833cb68c744f2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -9,14 +9,6 @@ #include "fs_dr.h" #include "dr_types.h" -static bool dr_is_fw_term_table(struct mlx5_flow_table *ft) -{ - if (ft->flags & MLX5_FLOW_TABLE_TERMINATION) - return true; - - return false; -} - static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, @@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, u32 flags; int err; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr, next_ft); @@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5dr_action *action = ft->fs_dr_table.miss_action; int err; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table); @@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); return set_miss_action(ns, ft, next_ft); @@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, match_criteria_enable); struct mlx5dr_match_parameters mask; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg); @@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher); @@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, int err = 0; int i; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), @@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, match_sz = sizeof(fte->val); /* Drop reformat action bit if destination vport set with reformat */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { if (!contain_vport_reformat_action(dst)) continue; - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; break; } } @@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, * TX: modify header -> push vlan -> encap * RX: decap -> pop vlan -> modify header */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { enum mlx5dr_action_reformat_type decap_type = DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; @@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { bool is_decap; - if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { err = -EINVAL; mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); goto free_actions; } - is_decap = fte->action.pkt_reformat->reformat_type == + is_decap = fte->act_dests.action.pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; if (is_decap) actions[num_actions++] = - fte->action.pkt_reformat->action.dr_action; + fte->act_dests.action.pkt_reformat->action.dr_action; else delay_encap_set = true; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { tmp_action = mlx5dr_action_create_pop_vlan(); if (!tmp_action) { @@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { tmp_action = mlx5dr_action_create_pop_vlan(); if (!tmp_action) { @@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) actions[num_actions++] = - fte->action.modify_hdr->action.dr_action; + fte->act_dests.action.modify_hdr->action.dr_action; - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (delay_encap_set) actions[num_actions++] = - fte->action.pkt_reformat->action.dr_action; + fte->act_dests.action.pkt_reformat->action.dr_action; /* The order of the actions below is not important */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { tmp_action = mlx5dr_action_create_drop(); if (!tmp_action) { err = -ENOMEM; @@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, term_actions[num_term_actions++].dest = tmp_action; } - if (fte->flow_context.flow_tag) { + if (fte->act_dests.flow_context.flow_tag) { tmp_action = - mlx5dr_action_create_tag(fte->flow_context.flow_tag); + mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; u32 id; @@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { list_for_each_entry(dst, &fte->node.children, node.list) { u32 id; @@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { - if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + struct mlx5_flow_act *action = &fte->act_dests.action; + + if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) { err = -EOPNOTSUPP; goto free_actions; } tmp_action = mlx5dr_action_create_aso(domain, - fte->action.exe_aso.object_id, - fte->action.exe_aso.return_reg_id, - fte->action.exe_aso.type, - fte->action.exe_aso.flow_meter.init_color, - fte->action.exe_aso.flow_meter.meter_idx); + action->exe_aso.object_id, + action->exe_aso.return_reg_id, + action->exe_aso.type, + action->exe_aso.flow_meter.init_color, + action->exe_aso.flow_meter.meter_idx); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { bool ignore_flow_level = - !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); - u32 flow_source = fte->flow_context.flow_source; + !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + u32 flow_source = fte->act_dests.flow_context.flow_source; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { @@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, ¶ms, num_actions, actions, - fte->flow_context.flow_source); + fte->act_dests.flow_context.flow_source); if (!rule) { err = -EINVAL; goto free_actions; @@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, int err; int i; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); err = mlx5dr_rule_destroy(rule->dr_rule); @@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, struct fs_fte fte_tmp = {}; int ret; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); /* Backup current dr rule details */ @@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns) static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { - u32 steering_caps = 0; + u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH; if (ft_type != FS_FT_FDB || MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) - return 0; + return steering_caps; steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX; steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile new file mode 100644 index 00000000000000..c78512eed8d73a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h new file mode 100644 index 00000000000000..f39d636ff39aee --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -0,0 +1,926 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_H_ +#define MLX5HWS_H_ + +struct mlx5hws_context; +struct mlx5hws_table; +struct mlx5hws_matcher; +struct mlx5hws_rule; + +enum mlx5hws_table_type { + MLX5HWS_TABLE_TYPE_FDB, + MLX5HWS_TABLE_TYPE_MAX, +}; + +enum mlx5hws_matcher_resource_mode { + /* Allocate resources based on number of rules with minimal failure probability */ + MLX5HWS_MATCHER_RESOURCE_MODE_RULE, + /* Allocate fixed size hash table based on given column and rows */ + MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE, +}; + +enum mlx5hws_action_type { + MLX5HWS_ACTION_TYP_LAST, + MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2, + MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2, + MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3, + MLX5HWS_ACTION_TYP_DROP, + MLX5HWS_ACTION_TYP_MISS, + MLX5HWS_ACTION_TYP_TBL, + MLX5HWS_ACTION_TYP_CTR, + MLX5HWS_ACTION_TYP_TAG, + MLX5HWS_ACTION_TYP_MODIFY_HDR, + MLX5HWS_ACTION_TYP_VPORT, + MLX5HWS_ACTION_TYP_POP_VLAN, + MLX5HWS_ACTION_TYP_PUSH_VLAN, + MLX5HWS_ACTION_TYP_ASO_METER, + MLX5HWS_ACTION_TYP_INSERT_HEADER, + MLX5HWS_ACTION_TYP_REMOVE_HEADER, + MLX5HWS_ACTION_TYP_RANGE, + MLX5HWS_ACTION_TYP_SAMPLER, + MLX5HWS_ACTION_TYP_DEST_ARRAY, + MLX5HWS_ACTION_TYP_MAX, +}; + +enum mlx5hws_action_flags { + MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0, + /* Shared action can be used over a few threads, since the + * data is written only once at the creation of the action. + */ + MLX5HWS_ACTION_FLAG_SHARED = 1 << 1, +}; + +enum mlx5hws_action_aso_meter_color { + MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0, + MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1, + MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2, + MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3, +}; + +enum mlx5hws_send_queue_actions { + /* Start executing all pending queued rules */ + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0, + /* Start executing all pending queued rules wait till completion */ + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1, +}; + +struct mlx5hws_context_attr { + u16 queues; + u16 queue_size; + bool bwc; /* add support for backward compatible API*/ +}; + +struct mlx5hws_table_attr { + enum mlx5hws_table_type type; + u32 level; +}; + +enum mlx5hws_matcher_flow_src { + MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0, + MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1, + MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2, +}; + +enum mlx5hws_matcher_insert_mode { + MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0, + MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1, +}; + +enum mlx5hws_matcher_distribute_mode { + MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0, + MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1, +}; + +struct mlx5hws_matcher_attr { + /* Processing priority inside table */ + u32 priority; + /* Provide all rules with unique rule_idx in num_log range to reduce locking */ + bool optimize_using_rule_idx; + /* Resource mode and corresponding size */ + enum mlx5hws_matcher_resource_mode mode; + /* Optimize insertion in case packet origin is the same for all rules */ + enum mlx5hws_matcher_flow_src optimize_flow_src; + /* Define the insertion and distribution modes for this matcher */ + enum mlx5hws_matcher_insert_mode insert_mode; + enum mlx5hws_matcher_distribute_mode distribute_mode; + /* Define whether the created matcher supports resizing into a bigger matcher */ + bool resizable; + union { + struct { + u8 sz_row_log; + u8 sz_col_log; + } table; + + struct { + u8 num_log; + } rule; + }; + /* Optional AT attach configuration - Max number of additional AT */ + u8 max_num_of_at_attach; +}; + +struct mlx5hws_rule_attr { + void *user_data; + /* Valid if matcher optimize_using_rule_idx is set or + * if matcher is configured to insert rules by index. + */ + u32 rule_idx; + u32 flow_source; + u16 queue_id; + u32 burst:1; +}; + +/* In actions that take offset, the offset is unique, pointing to a single + * resource and the user should not reuse the same index because data changing + * is not atomic. + */ +struct mlx5hws_rule_action { + struct mlx5hws_action *action; + union { + struct { + u32 value; + } tag; + + struct { + u32 offset; + } counter; + + struct { + u32 offset; + u8 *data; + } modify_header; + + struct { + u32 offset; + u8 hdr_idx; + u8 *data; + } reformat; + + struct { + __be32 vlan_hdr; + } push_vlan; + + struct { + u32 offset; + enum mlx5hws_action_aso_meter_color init_color; + } aso_meter; + }; +}; + +struct mlx5hws_action_reformat_header { + size_t sz; + void *data; +}; + +struct mlx5hws_action_insert_header { + struct mlx5hws_action_reformat_header hdr; + /* PRM start anchor to which header will be inserted */ + u8 anchor; + /* Header insertion offset in bytes, from the start + * anchor to the location where new header will be inserted. + */ + u8 offset; + /* Indicates this header insertion adds encapsulation header to the packet, + * requiring device to update offloaded fields (for example IPv4 total length). + */ + bool encap; +}; + +struct mlx5hws_action_remove_header_attr { + /* PRM start anchor from which header will be removed */ + u8 anchor; + /* Header remove offset in bytes, from the start + * anchor to the location where remove header starts. + */ + u8 offset; + /* Indicates the removed header size in bytes */ + size_t size; +}; + +struct mlx5hws_action_mh_pattern { + /* Byte size of modify actions provided by "data" */ + size_t sz; + /* PRM format modify actions pattern */ + __be64 *data; +}; + +struct mlx5hws_action_dest_attr { + /* Required destination action to forward the packet */ + struct mlx5hws_action *dest; + /* Optional reformat action */ + struct mlx5hws_action *reformat; +}; + +/** + * mlx5hws_is_supported - Check whether HWS is supported + * + * @mdev: The device to check. + * + * Return: true if supported, false otherwise. + */ +static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev) +{ + u8 ignore_flow_level_rtc_valid; + u8 wqe_based_flow_table_update; + + wqe_based_flow_table_update = + MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap); + ignore_flow_level_rtc_valid = + MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); + + return wqe_based_flow_table_update && ignore_flow_level_rtc_valid; +} + +/** + * mlx5hws_context_open - Open a context used for direct rule insertion + * using hardware steering. + * + * @mdev: The device to be used for HWS. + * @attr: Attributes used for context open. + * + * Return: pointer to mlx5hws_context on success NULL otherwise. + */ +struct mlx5hws_context * +mlx5hws_context_open(struct mlx5_core_dev *mdev, + struct mlx5hws_context_attr *attr); + +/** + * mlx5hws_context_close - Close a context used for direct hardware steering. + * + * @ctx: mlx5hws context to close. + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_context_close(struct mlx5hws_context *ctx); + +/** + * mlx5hws_context_set_peer - Set a peer context. + * Each context can have multiple contexts as peers. + * + * @ctx: The context in which the peer_ctx will be peered to it. + * @peer_ctx: The peer context. + * @peer_vhca_id: The peer context vhca id. + */ +void mlx5hws_context_set_peer(struct mlx5hws_context *ctx, + struct mlx5hws_context *peer_ctx, + u16 peer_vhca_id); + +/** + * mlx5hws_table_create - Create a new direct rule table. + * Each table can contain multiple matchers. + * + * @ctx: The context in which the new table will be opened. + * @attr: Attributes used for table creation. + * + * Return: pointer to mlx5hws_table on success NULL otherwise. + */ +struct mlx5hws_table * +mlx5hws_table_create(struct mlx5hws_context *ctx, + struct mlx5hws_table_attr *attr); + +/** + * mlx5hws_table_destroy - Destroy direct rule table. + * + * @tbl: Table to destroy. + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_table_destroy(struct mlx5hws_table *tbl); + +/** + * mlx5hws_table_get_id() - Get ID of the flow table. + * + * @tbl:Table to get ID of. + * + * Return: ID of the table. + */ +u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl); + +/** + * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table + * by using another mlx5hws_table. + * Traffic which all table matchers miss will be forwarded to miss table. + * + * @tbl: Source table + * @miss_tbl: Target (miss) table, or NULL to remove current miss table + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl); + +/** + * mlx5hws_match_template_create - Create a new match template based on items mask. + * The match template will be used for matcher creation. + * + * @ctx: The context in which the new template will be created. + * @match_param: Describe the mask based on PRM match parameters. + * @match_param_sz: Size of match param buffer. + * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer. + * + * Return: Pointer to mlx5hws_match_template on success, NULL otherwise. + */ +struct mlx5hws_match_template * +mlx5hws_match_template_create(struct mlx5hws_context *ctx, + u32 *match_param, + u32 match_param_sz, + u8 match_criteria_enable); + +/** + * mlx5hws_match_template_destroy - Destroy a match template. + * + * @mt: Match template to destroy. + * + * Return: Zero on success, non-zero otherwise. + */ +int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt); + +/** + * mlx5hws_action_template_create - Create a new action template based on an action_type array. + * + * @action_type: An array of actions based on the order of actions which will be provided + * with rule_actions to mlx5hws_rule_create. The last action is marked + * using MLX5HWS_ACTION_TYP_LAST. + * + * Return: Pointer to mlx5hws_action_template on success, NULL otherwise. + */ +struct mlx5hws_action_template * +mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]); + +/** + * mlx5hws_action_template_destroy - Destroy action template. + * + * @at: Action template to destroy. + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at); + +/** + * mlx5hws_matcher_create - Create a new direct rule matcher. + * + * Each matcher can contain multiple rules. Matchers on the table will be + * processed by priority. Matching fields and mask are described by the + * match template. In some cases, multiple match templates can be used on + * the same matcher. + * + * @table: The table in which the new matcher will be opened. + * @mt: Array of match templates to be used on matcher. + * @num_of_mt: Number of match templates in mt array. + * @at: Array of action templates to be used on matcher. + * @num_of_at: Number of action templates in at array. + * @attr: Attributes used for matcher creation. + * + * Return: Pointer to mlx5hws_matcher on success, NULL otherwise. + * + */ +struct mlx5hws_matcher * +mlx5hws_matcher_create(struct mlx5hws_table *table, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at, + struct mlx5hws_matcher_attr *attr); + +/** + * mlx5hws_matcher_destroy - Destroy a direct rule matcher. + * + * @matcher: Matcher to destroy. + * + * Return: Zero on success, non-zero otherwise. + */ +int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher); + +/** + * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher. + * + * @matcher: Matcher to attach the action template to. + * @at: Action template to be attached to the matcher. + * + * Return: Zero on success, non-zero otherwise. + */ +int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at); + +/** + * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules. + * + * Both matchers must be in the same table type, must be created with the + * 'resizable' property, and should have the same characteristics (e.g., same + * match templates and action templates). It is the user's responsibility to + * ensure that the destination matcher is allocated with the appropriate size. + * + * Once the function is completed, the user is: + * - Allowed to move rules from the source into the destination matcher. + * - No longer allowed to insert rules into the source matcher. + * + * The user is always allowed to insert rules into the destination matcher and + * to delete rules from any matcher. + * + * @src_matcher: Source matcher for moving rules from. + * @dst_matcher: Destination matcher for moving rules to. + * + * Return: Zero on successful move, non-zero otherwise. + */ +int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher); + +/** + * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation. + * + * This function enqueues the operation of moving a rule from the source + * matcher to the destination matcher. + * + * @src_matcher: Matcher that the rule belongs to. + * @rule: The rule to move. + * @attr: Rule attributes. + * + * Return: Zero on success, non-zero otherwise. + */ +int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +/** + * mlx5hws_rule_create - Enqueue create rule operation. + * + * @matcher: The matcher in which the new rule will be created. + * @mt_idx: Match template index to create the match with. + * @match_param: The match parameter PRM buffer used for value matching. + * @at_idx: Action template index to apply the actions with. + * @rule_actions: Rule actions to be executed on match. + * @attr: Rule creation attributes. + * @rule_handle: A valid rule handle. The handle doesn't require any initialization. + * + * Return: Zero on successful enqueue, non-zero otherwise. + */ +int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr, + struct mlx5hws_rule *rule_handle); + +/** + * mlx5hws_rule_destroy - Enqueue destroy rule operation. + * + * @rule: The rule destruction to enqueue. + * @attr: Rule destruction attributes. + * + * Return: Zero on successful enqueue, non-zero otherwise. + */ +int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +/** + * mlx5hws_rule_action_update - Enqueue update actions on an existing rule. + * + * @rule: A valid rule handle to update. + * @at_idx: Action template index to update the actions with. + * @rule_actions: Rule actions to be executed on match. + * @attr: Rule update attributes. + * + * Return: Zero on successful enqueue, non-zero otherwise. + */ +int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr); + +/** + * mlx5hws_action_get_type - Get action type. + * + * @action: The action to get the type of. + * + * Return: action type. + */ +enum mlx5hws_action_type +mlx5hws_action_get_type(struct mlx5hws_action *action); + +/** + * mlx5hws_action_create_dest_drop - Create a direct rule drop action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: Pointer to mlx5hws_action on success, NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, + u32 flags); + +/** + * mlx5hws_action_create_default_miss - Create a direct rule default miss action. + * Defaults are RX: Drop, TX: Wire. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: Pointer to mlx5hws_action on success, NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, + u32 flags); + +/** + * mlx5hws_action_create_dest_table - Create direct rule goto table action. + * + * @ctx: The context in which the new action will be created. + * @tbl: Destination table. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl, + u32 flags); + +/** + * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action. + * + * @ctx: The context in which the new action will be created. + * @tbl_num: Destination table number. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx, + u32 tbl_num, u32 flags); + +/** + * mlx5hws_action_create_dest_match_range - Create direct rule range match action. + * + * @ctx: The context in which the new action will be created. + * @field: Field to comapare the value. + * @hit_ft: Flow table to go to on hit. + * @miss_ft: Flow table to go to on miss. + * @min: Minimal value of the field to be considered as hit. + * @max: Maximal value of the field to be considered as hit. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, + u32 field, + struct mlx5_flow_table *hit_ft, + struct mlx5_flow_table *miss_ft, + u32 min, u32 max, u32 flags); + +/** + * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action. + * + * @ctx: The context in which the new action will be created. + * @sampler_id: Flow sampler object ID. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, + u32 sampler_id, u32 flags); + +/** + * mlx5hws_action_create_dest_vport - Create direct rule goto vport action. + * + * @ctx: The context in which the new action will be created. + * @vport_num: Destination vport number. + * @vhca_id_valid: Tells if the vhca_id parameter is valid. + * @vhca_id: VHCA ID of the destination vport. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, + u16 vport_num, + bool vhca_id_valid, + u16 vhca_id, + u32 flags); + +/** + * mlx5hws_action_create_tag - Create direct rule TAG action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags); + +/** + * mlx5hws_action_create_counter - Create direct rule counter action. + * + * @ctx: The context in which the new action will be created. + * @obj_id: Direct rule counter object ID. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_counter(struct mlx5hws_context *ctx, + u32 obj_id, + u32 flags); + +/** + * mlx5hws_action_create_reformat - Create direct rule reformat action. + * + * @ctx: The context in which the new action will be created. + * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT. + * @num_of_hdrs: Number of provided headers in "hdrs" array. + * @hdrs: Headers array containing header information. + * @log_bulk_size: Number of unique values used with this reformat. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, + enum mlx5hws_action_type reformat_type, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_size, + u32 flags); + +/** + * mlx5hws_action_create_modify_header - Create direct rule modify header action. + * + * @ctx: The context in which the new action will be created. + * @num_of_patterns: Number of provided patterns in "patterns" array. + * @patterns: Patterns array containing pattern information. + * @log_bulk_size: Number of unique values used with this pattern. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *patterns, + u32 log_bulk_size, + u32 flags); + +/** + * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action. + * + * @ctx: The context in which the new action will be created. + * @obj_id: ASO object ID. + * @return_reg_c: Copy the ASO object value into this reg_c, + * after a packet hits a rule with this ASO object. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, + u32 obj_id, + u8 return_reg_c, + u32 flags); + +/** + * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags); + +/** + * mlx5hws_action_create_push_vlan - Create direct rule push vlan action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags); + +/** + * mlx5hws_action_create_dest_array - Create a dest array action, this action can + * duplicate packets and forward to multiple destinations in the destination list. + * + * @ctx: The context in which the new action will be created. + * @num_dest: The number of dests attributes. + * @dests: The destination array. Each contains a destination action and can + * have additional actions. + * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest. + * @flow_source: Source port of the traffic for this actions. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, + size_t num_dest, + struct mlx5hws_action_dest_attr *dests, + bool ignore_flow_level, + u32 flow_source, + u32 flags); + +/** + * mlx5hws_action_create_insert_header - Create insert header action. + * + * @ctx: The context in which the new action will be created. + * @num_of_hdrs: Number of provided headers in "hdrs" array. + * @hdrs: Headers array containing header information. + * @log_bulk_size: Number of unique values used with this insert header. + * @flags: Action creation flags. (enum mlx5hws_action_flags) + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, + u8 num_of_hdrs, + struct mlx5hws_action_insert_header *hdrs, + u32 log_bulk_size, + u32 flags); + +/** + * mlx5hws_action_create_remove_header - Create remove header action. + * + * @ctx: The context in which the new action will be created. + * @attr: attributes that specifie the remove header type, PRM start anchor and + * the PRM end anchor or the PRM start anchor and remove size in bytes. + * @flags: Action creation flags. (enum mlx5hws_action_flags) + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx, + struct mlx5hws_action_remove_header_attr *attr, + u32 flags); + +/** + * mlx5hws_action_create_last - Create direct rule LAST action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags. (enum mlx5hws_action_flags) + * + * Return: pointer to mlx5hws_action on success NULL otherwise. + */ +struct mlx5hws_action * +mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags); + +/** + * mlx5hws_action_destroy - Destroy direct rule action. + * + * @action: The action to destroy. + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_action_destroy(struct mlx5hws_action *action); + +enum mlx5hws_flow_op_status { + MLX5HWS_FLOW_OP_SUCCESS, + MLX5HWS_FLOW_OP_ERROR, +}; + +struct mlx5hws_flow_op_result { + enum mlx5hws_flow_op_status status; + void *user_data; +}; + +/** + * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions. + * + * @ctx: The context to which the queue belong to. + * @queue_id: The id of the queue to poll. + * @res: Completion array. + * @res_nb: Maximum number of results to return. + * + * Return: negative number on failure, the number of completions otherwise. + */ +int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + struct mlx5hws_flow_op_result res[], + u32 res_nb); + +/** + * mlx5hws_send_queue_action - Perform an action on the queue + * + * @ctx: The context to which the queue belong to. + * @queue_id: The id of the queue to perform the action on. + * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions) + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions); + +/** + * mlx5hws_debug_dump - Dump HWS info + * + * @ctx: The context which to dump the info from. + * + * Return: zero on success non zero otherwise. + */ +int mlx5hws_debug_dump(struct mlx5hws_context *ctx); + +struct mlx5hws_bwc_matcher; +struct mlx5hws_bwc_rule; + +struct mlx5hws_match_parameters { + size_t match_sz; + u32 *match_buf; /* Device spec format */ +}; + +/** + * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher. + * + * This function does the following: + * - creates match template based on flow items + * - creates an empty action template + * - creates a usual mlx5hws_matcher with these mt and at, setting + * its size to minimal + * Notes: + * - table->ctx must have BWC support + * - complex rules are not supported + * + * @table: The table in which the new matcher will be opened + * @priority: Priority for this BWC matcher + * @match_criteria_enable: Bitmask that defines matching criteria + * @mask: Match parameters + * + * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise. + */ +struct mlx5hws_bwc_matcher * +mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +/** + * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher. + * + * @bwc_matcher: Matcher to destroy + * + * Return: zero on success, non zero otherwise + */ +int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher); + +/** + * mlx5hws_bwc_rule_create - Create a new BWC rule. + * + * Unlike the usual rule creation function, this one is blocking: when the + * function returns, the rule is written to its place (no need to poll). + * This function does the following: + * - finds matching action template based on the provided rule_actions, or + * creates new action template if matching action template doesn't exist + * - updates corresponding BWC matcher stats + * - if needed, the function performs rehash: + * - creates a new matcher based on mt, at, new_sz + * - moves all the existing matcher rules to the new matcher + * - removes the old matcher + * - inserts new rule + * - polls till completion is received + * Notes: + * - matcher->tbl->ctx must have BWC support + * - separate BWC ctx queues are used + * + * @bwc_matcher: The BWC matcher in which the new rule will be created. + * @params: Match perameters + * @flow_source: Flow source for this rule + * @rule_actions: Rule action to be executed on match + * + * Return: valid BWC rule handle on success, NULL otherwise + */ +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[]); + +/** + * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule. + * + * @bwc_rule: Rule to destroy. + * + * Return: zero on success, non zero otherwise. + */ +int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule); + +/** + * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule. + * + * @bwc_rule: Rule to update + * @rule_actions: Rule action to update with + * + * Return: zero on successful update, non zero otherwise. + */ +int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c new file mode 100644 index 00000000000000..b27bb41065326d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c @@ -0,0 +1,2604 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1 + +/* Header removal size limited to 128B (64 words) */ +#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128 + +/* This is the longest supported action sequence for FDB table: + * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term. + */ +static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = { + [MLX5HWS_TABLE_TYPE_FDB] = { + BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_POP_VLAN), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN), + BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) | + BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3), + BIT(MLX5HWS_ACTION_TYP_CTR), + BIT(MLX5HWS_ACTION_TYP_TAG), + BIT(MLX5HWS_ACTION_TYP_ASO_METER), + BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR), + BIT(MLX5HWS_ACTION_TYP_TBL) | + BIT(MLX5HWS_ACTION_TYP_VPORT) | + BIT(MLX5HWS_ACTION_TYP_DROP) | + BIT(MLX5HWS_ACTION_TYP_SAMPLER) | + BIT(MLX5HWS_ACTION_TYP_RANGE) | + BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY), + BIT(MLX5HWS_ACTION_TYP_LAST), + }, +}; + +static const char * const mlx5hws_action_type_str[] = { + [MLX5HWS_ACTION_TYP_LAST] = "LAST", + [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2", + [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3", + [MLX5HWS_ACTION_TYP_DROP] = "DROP", + [MLX5HWS_ACTION_TYP_TBL] = "TBL", + [MLX5HWS_ACTION_TYP_CTR] = "CTR", + [MLX5HWS_ACTION_TYP_TAG] = "TAG", + [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR", + [MLX5HWS_ACTION_TYP_VPORT] = "VPORT", + [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS", + [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN", + [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN", + [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER", + [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY", + [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER", + [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER", + [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER", + [MLX5HWS_ACTION_TYP_RANGE] = "RANGE", +}; + +static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX, + "Missing mlx5hws_action_type_str"); + +const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type) +{ + return mlx5hws_action_type_str[action_type]; +} + +enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action) +{ + return action->type; +} + +static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, + enum mlx5hws_context_shared_stc_type stc_type, + u8 tbl_type) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_action_shared_stc *shared_stc; + int ret; + + mutex_lock(&ctx->ctrl_lock); + if (ctx->common_res[tbl_type].shared_stc[stc_type]) { + ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++; + mutex_unlock(&ctx->ctrl_lock); + return 0; + } + + shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL); + if (!shared_stc) { + ret = -ENOMEM; + goto unlock_and_out; + } + switch (stc_type) { + case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3: + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.remove_header.decap = 0; + stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4; + break; + case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP: + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; + stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN; + break; + default: + mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type); + pr_warn("HWS: Invalid stc_type: %d\n", stc_type); + ret = -EINVAL; + goto unlock_and_out; + } + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &shared_stc->stc_chunk); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n"); + goto free_shared_stc; + } + + ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc; + ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1; + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +free_shared_stc: + kfree(shared_stc); +unlock_and_out: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static int hws_action_get_shared_stc(struct mlx5hws_action *action, + enum mlx5hws_context_shared_stc_type stc_type) +{ + struct mlx5hws_context *ctx = action->ctx; + int ret; + + if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) { + pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type); + return -EINVAL; + } + + if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) { + pr_warn("HWS: Invalid action->flags: %d\n", action->flags); + return -EINVAL; + } + + ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB); + if (ret) { + mlx5hws_err(ctx, + "Failed to allocate memory for FDB shared STCs (type: %d)\n", + stc_type); + return ret; + } + + return 0; +} + +static void hws_action_put_shared_stc(struct mlx5hws_action *action, + enum mlx5hws_context_shared_stc_type stc_type) +{ + enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB; + struct mlx5hws_action_shared_stc *shared_stc; + struct mlx5hws_context *ctx = action->ctx; + + if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) { + pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type); + return; + } + + mutex_lock(&ctx->ctrl_lock); + if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) { + mutex_unlock(&ctx->ctrl_lock); + return; + } + + shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type]; + + mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk); + kfree(shared_stc); + ctx->common_res[tbl_type].shared_stc[stc_type] = NULL; + mutex_unlock(&ctx->ctrl_lock); +} + +static void hws_action_print_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions) +{ + mlx5hws_err(ctx, "Invalid action_type sequence"); + while (*user_actions != MLX5HWS_ACTION_TYP_LAST) { + mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions)); + user_actions++; + } + mlx5hws_err(ctx, "\n"); +} + +bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions, + enum mlx5hws_table_type table_type) +{ + const u32 *order_arr = action_order_arr[table_type]; + u8 order_idx = 0; + u8 user_idx = 0; + bool valid_combo; + + if (table_type >= MLX5HWS_TABLE_TYPE_MAX) { + mlx5hws_err(ctx, "Invalid table_type %d", table_type); + return false; + } + + while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) { + /* User action order validated move to next user action */ + if (BIT(user_actions[user_idx]) & order_arr[order_idx]) + user_idx++; + + /* Iterate to the next supported action in the order */ + order_idx++; + } + + /* Combination is valid if all user action were processed */ + valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST; + if (!valid_combo) + hws_action_print_combo(ctx, user_actions); + + return valid_combo; +} + +static bool +hws_action_fixup_stc_attr(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr, + enum mlx5hws_table_type table_type, + bool is_mirror) +{ + bool use_fixup = false; + u32 fw_tbl_type; + u32 base_id; + + fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror); + + switch (stc_attr->action_type) { + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: + if (is_mirror && stc_attr->ste_table.ignore_tx) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + use_fixup = true; + break; + } + if (!is_mirror) + base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool, + &stc_attr->ste_table.ste); + else + base_id = + mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool, + &stc_attr->ste_table.ste); + + *fixup_stc_attr = *stc_attr; + fixup_stc_attr->ste_table.ste_obj_id = base_id; + use_fixup = true; + break; + + case MLX5_IFC_STC_ACTION_TYPE_TAG: + if (fw_tbl_type == FS_FT_FDB_TX) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; + fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + use_fixup = true; + } + break; + + case MLX5_IFC_STC_ACTION_TYPE_ALLOW: + if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; + fixup_stc_attr->action_offset = stc_attr->action_offset; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id; + fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number; + fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = + ctx->caps->merged_eswitch; + use_fixup = true; + } + break; + + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: + if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK) + break; + + if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) { + /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */ + fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK; + fixup_stc_attr->action_offset = stc_attr->action_offset; + fixup_stc_attr->stc_offset = stc_attr->stc_offset; + fixup_stc_attr->vport.vport_num = 0; + fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id; + fixup_stc_attr->vport.eswitch_owner_vhca_id_valid = + stc_attr->vport.eswitch_owner_vhca_id_valid; + } + use_fixup = true; + break; + + default: + break; + } + + return use_fixup; +} + +int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + u32 table_type, + struct mlx5hws_pool_chunk *stc) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0}; + struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; + struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0}; + bool use_fixup; + u32 obj_0_id; + int ret; + + ret = mlx5hws_pool_chunk_alloc(stc_pool, stc); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate single action STC\n"); + return ret; + } + + stc_attr->stc_offset = stc->offset; + + /* Dynamic reparse not supported, overwrite and use default */ + if (!mlx5hws_context_cap_dynamic_reparse(ctx)) + stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + + /* According to table/action limitation change the stc_attr */ + use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false); + ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, + use_fixup ? &fixup_stc_attr : stc_attr); + if (ret) { + mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n", + stc_attr->action_type, table_type); + goto free_chunk; + } + + /* Modify the FDB peer */ + if (table_type == MLX5HWS_TABLE_TYPE_FDB) { + u32 obj_1_id; + + obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + + use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, + &fixup_stc_attr, + table_type, true); + ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id, + use_fixup ? &fixup_stc_attr : stc_attr); + if (ret) { + mlx5hws_err(ctx, + "Failed to modify peer STC action_type %d tbl_type %d\n", + stc_attr->action_type, table_type); + goto clean_obj_0; + } + } + + return 0; + +clean_obj_0: + cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + cleanup_stc_attr.stc_offset = stc->offset; + mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr); +free_chunk: + mlx5hws_pool_chunk_free(stc_pool, stc); + return ret; +} + +void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx, + u32 table_type, + struct mlx5hws_pool_chunk *stc) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type]; + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + u32 obj_id; + + /* Modify the STC not to point to an object */ + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.stc_offset = stc->offset; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); + + if (table_type == MLX5HWS_TABLE_TYPE_FDB) { + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); + } + + mlx5hws_pool_chunk_free(stc_pool, stc); +} + +static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx, + __be64 pattern) +{ + u8 action_type = MLX5_GET(set_action_in, &pattern, action_type); + + switch (action_type) { + case MLX5_MODIFICATION_TYPE_SET: + return MLX5_IFC_STC_ACTION_TYPE_SET; + case MLX5_MODIFICATION_TYPE_ADD: + return MLX5_IFC_STC_ACTION_TYPE_ADD; + case MLX5_MODIFICATION_TYPE_COPY: + return MLX5_IFC_STC_ACTION_TYPE_COPY; + case MLX5_MODIFICATION_TYPE_ADD_FIELD: + return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD; + default: + mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type); + return MLX5_IFC_STC_ACTION_TYPE_NOP; + } +} + +static void hws_action_fill_stc_attr(struct mlx5hws_action *action, + u32 obj_id, + struct mlx5hws_cmd_stc_modify_attr *attr) +{ + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_TAG: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + break; + case MLX5HWS_ACTION_TYP_DROP: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + break; + case MLX5HWS_ACTION_TYP_MISS: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + break; + case MLX5HWS_ACTION_TYP_CTR: + attr->id = obj_id; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + if (action->modify_header.require_reparse) + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + + if (action->modify_header.num_of_actions == 1) { + attr->modify_action.data = action->modify_header.single_action; + attr->action_type = hws_action_get_mh_stc_type(action->ctx, + attr->modify_action.data); + + if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD || + attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET) + MLX5_SET(set_action_in, &attr->modify_action.data, data, 0); + } else { + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST; + attr->modify_header.arg_id = action->modify_header.arg_id; + attr->modify_header.pattern_id = action->modify_header.pat_id; + } + break; + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + attr->dest_table_id = obj_id; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->remove_header.decap = 1; + attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC; + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + if (!action->reformat.require_reparse) + attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->insert_header.encap = action->reformat.encap; + attr->insert_header.insert_anchor = action->reformat.anchor; + attr->insert_header.arg_id = action->reformat.arg_id; + attr->insert_header.header_size = action->reformat.header_size; + attr->insert_header.insert_offset = action->reformat.offset; + break; + case MLX5HWS_ACTION_TYP_ASO_METER: + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO; + attr->aso.aso_type = ASO_OPC_MOD_POLICER; + attr->aso.devx_obj_id = obj_id; + attr->aso.return_reg_id = action->aso.return_reg_id; + break; + case MLX5HWS_ACTION_TYP_VPORT: + attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT; + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT; + attr->vport.vport_num = action->vport.vport_num; + attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id; + attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid; + break; + case MLX5HWS_ACTION_TYP_POP_VLAN: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START; + attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2; + break; + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + attr->insert_header.encap = 0; + attr->insert_header.is_inline = 1; + attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START; + attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS; + attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN; + break; + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS; + attr->remove_header.decap = 0; /* the mode we support decap is 0 */ + attr->remove_words.start_anchor = action->remove_header.anchor; + /* the size is in already in words */ + attr->remove_words.num_of_words = action->remove_header.size; + attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5; + attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; + break; + default: + mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type); + } +} + +static int +hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_context *ctx = action->ctx; + int ret; + + hws_action_fill_stc_attr(action, obj_id, &stc_attr); + + /* Block unsupported parallel obj modify over the same base */ + mutex_lock(&ctx->ctrl_lock); + + /* Allocate STC for FDB */ + if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) { + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, + MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + if (ret) + goto out_err; + } + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +out_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static void +hws_action_destroy_stcs(struct mlx5hws_action *action) +{ + struct mlx5hws_context *ctx = action->ctx; + + /* Block unsupported parallel obj modify over the same base */ + mutex_lock(&ctx->ctrl_lock); + + if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) + mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + + mutex_unlock(&ctx->ctrl_lock); +} + +static bool hws_action_is_flag_hws_fdb(u32 flags) +{ + return flags & MLX5HWS_ACTION_FLAG_HWS_FDB; +} + +static bool +hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) { + mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n"); + return false; + } + + if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) { + mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n"); + return false; + } + + return true; +} + +static struct mlx5hws_action * +hws_action_create_generic_bulk(struct mlx5hws_context *ctx, + u32 flags, + enum mlx5hws_action_type action_type, + u8 bulk_sz) +{ + struct mlx5hws_action *action; + int i; + + if (!hws_action_is_flag_hws_fdb(flags)) { + mlx5hws_err(ctx, + "Action (type: %d) flags must specify only HWS FDB\n", action_type); + return NULL; + } + + if (!hws_action_validate_hws_action(ctx, flags)) + return NULL; + + action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL); + if (!action) + return NULL; + + for (i = 0; i < bulk_sz; i++) { + action[i].ctx = ctx; + action[i].flags = flags; + action[i].type = action_type; + } + + return action; +} + +static struct mlx5hws_action * +hws_action_create_generic(struct mlx5hws_context *ctx, + u32 flags, + enum mlx5hws_action_type action_type) +{ + return hws_action_create_generic_bulk(ctx, flags, action_type, 1); +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx, + u32 table_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, table_id); + if (ret) + goto free_action; + + action->dest_obj.obj_id = table_id; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl, + u32 flags) +{ + return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags); +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +static struct mlx5hws_action * +hws_action_create_aso(struct mlx5hws_context *ctx, + enum mlx5hws_action_type action_type, + u32 obj_id, + u8 return_reg_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, action_type); + if (!action) + return NULL; + + action->aso.obj_id = obj_id; + action->aso.return_reg_id = return_reg_id; + + ret = hws_action_create_stcs(action, obj_id); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, + u32 obj_id, + u8 return_reg_id, + u32 flags) +{ + return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER, + obj_id, return_reg_id, flags); +} + +struct mlx5hws_action * +mlx5hws_action_create_counter(struct mlx5hws_context *ctx, + u32 obj_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, obj_id); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, + u16 vport_num, + bool vhca_id_valid, + u16 vhca_id, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) { + mlx5hws_err(ctx, "Vport action is supported for FDB only\n"); + return NULL; + } + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT); + if (!action) + return NULL; + + if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) { + mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n"); + goto free_action; + } + + action->vport.vport_num = vport_num; + action->vport.esw_owner_vhca_id_valid = vhca_id_valid; + + if (vhca_id_valid) + action->vport.esw_owner_vhca_id = vhca_id; + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN); + if (!action) + return NULL; + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for push vlan\n"); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN); + if (!action) + return NULL; + + ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); + if (ret) { + mlx5hws_err(ctx, "Failed to create remove stc for reformat\n"); + goto free_action; + } + + ret = hws_action_create_stcs(action, 0); + if (ret) { + mlx5hws_err(ctx, "Failed creating stc for pop vlan\n"); + goto free_shared; + } + + return action; + +free_shared: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); +free_action: + kfree(action); + return NULL; +} + +static int +hws_action_handle_insert_with_ptr(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + size_t max_sz = 0; + u32 arg_id; + int ret, i; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz % W_SIZE != 0) { + mlx5hws_err(action->ctx, + "Header data size should be in WORD granularity\n"); + return -EINVAL; + } + max_sz = max(hdrs[i].sz, max_sz); + } + + /* Allocate single shared arg object for all headers */ + ret = mlx5hws_arg_create(action->ctx, + hdrs->data, + max_sz, + log_bulk_sz, + action->flags & MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + return ret; + + for (i = 0; i < num_of_hdrs; i++) { + action[i].reformat.arg_id = arg_id; + action[i].reformat.header_size = hdrs[i].sz; + action[i].reformat.num_of_hdrs = num_of_hdrs; + action[i].reformat.max_hdr_sz = max_sz; + action[i].reformat.require_reparse = true; + + if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 || + action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) { + action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START; + action[i].reformat.offset = 0; + action[i].reformat.encap = 1; + } + + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + mlx5hws_err(action->ctx, "Failed to create stc for reformat\n"); + goto free_stc; + } + } + + return 0; + +free_stc: + while (i--) + hws_action_destroy_stcs(&action[i]); + + mlx5hws_arg_destroy(action->ctx, arg_id); + return ret; +} + +static int +hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + int ret; + + /* The action is remove-l2-header + insert-l3-header */ + ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + if (ret) { + mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n"); + return ret; + } + + /* Reuse the insert with pointer for the L2L3 header */ + ret = hws_action_handle_insert_with_ptr(action, + num_of_hdrs, + hdrs, + log_bulk_sz); + if (ret) + goto put_shared_stc; + + return 0; + +put_shared_stc: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + return ret; +} + +static void hws_action_prepare_decap_l3_actions(size_t data_sz, + u8 *mh_data, + int *num_of_actions) +{ + int actions; + u32 i; + + /* Remove L2L3 outer headers */ + MLX5_SET(stc_ste_param_remove, mh_data, action_type, + MLX5_MODIFICATION_TYPE_REMOVE); + MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1); + MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor, + MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4); + mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */ + actions = 1; + + /* Add the new header using inline action 4Byte at a time, the header + * is added in reversed order to the beginning of the packet to avoid + * incorrect parsing by the HW. Since header is 14B or 18B an extra + * two bytes are padded and later removed. + */ + for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) { + MLX5_SET(stc_ste_param_insert, mh_data, action_type, + MLX5_MODIFICATION_TYPE_INSERT); + MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1); + MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2); + mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; + actions++; + } + + /* Remove first 2 extra bytes */ + MLX5_SET(stc_ste_param_remove_words, mh_data, action_type, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS); + MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor, + MLX5_HEADER_ANCHOR_PACKET_START); + /* The hardware expects here size in words (2 bytes) */ + MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1); + actions++; + + *num_of_actions = actions; +} + +static int +hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_sz) +{ + u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0}; + struct mlx5hws_context *ctx = action->ctx; + u32 arg_id, pat_id; + int num_of_actions; + int mh_data_size; + int ret, i; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 && + hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) { + mlx5hws_err(ctx, "Data size is not supported for decap-l3\n"); + return -EINVAL; + } + } + + /* Create a full modify header action list in case shared */ + hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); + if (action->flags & MLX5HWS_ACTION_FLAG_SHARED) + mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); + + /* All DecapL3 cases require the same max arg size */ + ret = mlx5hws_arg_create_modify_header_arg(ctx, + (__be64 *)mh_data, + num_of_actions, + log_bulk_sz, + action->flags & MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + return ret; + + for (i = 0; i < num_of_hdrs; i++) { + memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE); + hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions); + mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE; + + ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n"); + goto free_stc_and_pat; + } + + action[i].modify_header.max_num_of_actions = num_of_actions; + action[i].modify_header.num_of_actions = num_of_actions; + action[i].modify_header.num_of_patterns = num_of_hdrs; + action[i].modify_header.arg_id = arg_id; + action[i].modify_header.pat_id = pat_id; + action[i].modify_header.require_reparse = + mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions); + + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + mlx5hws_pat_put_pattern(ctx, pat_id); + goto free_stc_and_pat; + } + } + + return 0; + +free_stc_and_pat: + while (i--) { + hws_action_destroy_stcs(&action[i]); + mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id); + } + + mlx5hws_arg_destroy(action->ctx, arg_id); + return ret; +} + +static int +hws_action_create_reformat_hws(struct mlx5hws_action *action, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 bulk_size) +{ + int ret; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + ret = hws_action_create_stcs(action, 0); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size); + break; + default: + mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n"); + return -EINVAL; + } + + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, + enum mlx5hws_action_type reformat_type, + u8 num_of_hdrs, + struct mlx5hws_action_reformat_header *hdrs, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!num_of_hdrs) { + mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n"); + return NULL; + } + + action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs); + if (!action) + return NULL; + + if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) { + mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags); + goto free_action; + } + + ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size); + if (ret) { + mlx5hws_err(ctx, "Failed to create HWS reformat action\n"); + goto free_action; + } + + return action; + +free_action: + kfree(action); + return NULL; +} + +static int +hws_action_create_modify_header_hws(struct mlx5hws_action *action, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *pattern, + u32 log_bulk_size) +{ + struct mlx5hws_context *ctx = action->ctx; + u16 num_actions, max_mh_actions = 0; + int i, ret, size_in_bytes; + u32 pat_id, arg_id = 0; + __be64 *new_pattern; + size_t pat_max_sz; + + pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE; + size_in_bytes = pat_max_sz * sizeof(__be64); + new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL); + if (!new_pattern) + return -ENOMEM; + + /* Calculate maximum number of mh actions for shared arg allocation */ + for (i = 0; i < num_of_patterns; i++) { + size_t new_num_actions; + size_t cur_num_actions; + u32 nope_location; + + cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; + + mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions, + pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE, + &new_num_actions, &nope_location, + &new_pattern[i * pat_max_sz]); + + action[i].modify_header.nope_locations = nope_location; + action[i].modify_header.num_of_actions = new_num_actions; + + max_mh_actions = max(max_mh_actions, new_num_actions); + } + + if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) { + mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n", + max_mh_actions); + ret = -EINVAL; + goto free_new_pat; + } + + /* Allocate single shared arg for all patterns based on the max size */ + if (max_mh_actions > 1) { + ret = mlx5hws_arg_create_modify_header_arg(ctx, + pattern->data, + max_mh_actions, + log_bulk_size, + action->flags & + MLX5HWS_ACTION_FLAG_SHARED, + &arg_id); + if (ret) + goto free_new_pat; + } + + for (i = 0; i < num_of_patterns; i++) { + if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) { + mlx5hws_err(ctx, "Fail to verify pattern modify actions\n"); + ret = -EINVAL; + goto free_stc_and_pat; + } + num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; + action[i].modify_header.num_of_patterns = num_of_patterns; + action[i].modify_header.max_num_of_actions = max_mh_actions; + + action[i].modify_header.require_reparse = + mlx5hws_pat_require_reparse(pattern[i].data, num_actions); + + if (num_actions == 1) { + pat_id = 0; + /* Optimize single modify action to be used inline */ + action[i].modify_header.single_action = pattern[i].data[0]; + action[i].modify_header.single_action_type = + MLX5_GET(set_action_in, pattern[i].data, action_type); + } else { + /* Multiple modify actions require a pattern */ + if (unlikely(action[i].modify_header.nope_locations)) { + size_t pattern_sz; + + pattern_sz = action[i].modify_header.num_of_actions * + MLX5HWS_MODIFY_ACTION_SIZE; + ret = + mlx5hws_pat_get_pattern(ctx, + &new_pattern[i * pat_max_sz], + pattern_sz, &pat_id); + } else { + ret = mlx5hws_pat_get_pattern(ctx, + pattern[i].data, + pattern[i].sz, + &pat_id); + } + if (ret) { + mlx5hws_err(ctx, + "Failed to allocate pattern for modify header\n"); + goto free_stc_and_pat; + } + + action[i].modify_header.arg_id = arg_id; + action[i].modify_header.pat_id = pat_id; + } + /* Allocate STC for each action representing a header */ + ret = hws_action_create_stcs(&action[i], 0); + if (ret) { + if (pat_id) + mlx5hws_pat_put_pattern(ctx, pat_id); + goto free_stc_and_pat; + } + } + + kfree(new_pattern); + return 0; + +free_stc_and_pat: + while (i--) { + hws_action_destroy_stcs(&action[i]); + if (action[i].modify_header.pat_id) + mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id); + } + + if (arg_id) + mlx5hws_arg_destroy(ctx, arg_id); +free_new_pat: + kfree(new_pattern); + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, + u8 num_of_patterns, + struct mlx5hws_action_mh_pattern *patterns, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action *action; + int ret; + + if (!num_of_patterns) { + mlx5hws_err(ctx, "Invalid number of patterns\n"); + return NULL; + } + action = hws_action_create_generic_bulk(ctx, flags, + MLX5HWS_ACTION_TYP_MODIFY_HDR, + num_of_patterns); + if (!action) + return NULL; + + if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) { + mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n"); + goto free_action; + } + + ret = hws_action_create_modify_header_hws(action, + num_of_patterns, + patterns, + log_bulk_size); + if (ret) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, + size_t num_dest, + struct mlx5hws_action_dest_attr *dests, + bool ignore_flow_level, + u32 flow_source, + u32 flags) +{ + struct mlx5hws_cmd_set_fte_dest *dest_list = NULL; + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; + struct mlx5hws_cmd_forward_tbl *fw_island; + struct mlx5hws_action *action; + u32 i /*, packet_reformat_id*/; + int ret; + + if (num_dest <= 1) { + mlx5hws_err(ctx, "Action must have multiple dests\n"); + return NULL; + } + + if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) { + ft_attr.type = FS_FT_FDB; + ft_attr.level = ctx->caps->fdb_ft.max_level - 1; + } else { + mlx5hws_err(ctx, "Action flags not supported\n"); + return NULL; + } + + dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL); + if (!dest_list) + return NULL; + + for (i = 0; i < num_dest; i++) { + enum mlx5hws_action_type action_type = dests[i].dest->type; + struct mlx5hws_action *reformat_action = dests[i].reformat; + + switch (action_type) { + case MLX5HWS_ACTION_TYP_TBL: + dest_list[i].destination_type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id; + fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_attr.ignore_flow_level = ignore_flow_level; + /* ToDo: In SW steering we have a handling of 'go to WIRE' + * destination here by upper layer setting 'is_wire_ft' flag + * if the destination is wire. + * This is because uplink should be last dest in the list. + */ + break; + case MLX5HWS_ACTION_TYP_VPORT: + dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest_list[i].destination_id = dests[i].dest->vport.vport_num; + fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (ctx->caps->merged_eswitch) { + dest_list[i].ext_flags |= + MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID; + dest_list[i].esw_owner_vhca_id = + dests[i].dest->vport.esw_owner_vhca_id; + } + break; + default: + mlx5hws_err(ctx, "Unsupported action in dest_array\n"); + goto free_dest_list; + } + + if (reformat_action) { + mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n"); + goto free_dest_list; + } + } + + fte_attr.dests_num = num_dest; + fte_attr.dests = dest_list; + + fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr); + if (!fw_island) + goto free_dest_list; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY); + if (!action) + goto destroy_fw_island; + + ret = hws_action_create_stcs(action, fw_island->ft_id); + if (ret) + goto free_action; + + action->dest_array.fw_island = fw_island; + action->dest_array.num_dest = num_dest; + action->dest_array.dest_list = dest_list; + + return action; + +free_action: + kfree(action); +destroy_fw_island: + mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island); +free_dest_list: + for (i = 0; i < num_dest; i++) { + if (dest_list[i].ext_reformat_id) + mlx5hws_cmd_packet_reformat_destroy(ctx->mdev, + dest_list[i].ext_reformat_id); + } + kfree(dest_list); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, + u8 num_of_hdrs, + struct mlx5hws_action_insert_header *hdrs, + u32 log_bulk_size, + u32 flags) +{ + struct mlx5hws_action_reformat_header *reformat_hdrs; + struct mlx5hws_action *action; + int ret; + int i; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER); + if (!action) + return NULL; + + reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL); + if (!reformat_hdrs) + goto free_action; + + for (i = 0; i < num_of_hdrs; i++) { + if (hdrs[i].offset % W_SIZE != 0) { + mlx5hws_err(ctx, "Header offset should be in WORD granularity\n"); + goto free_reformat_hdrs; + } + + action[i].reformat.anchor = hdrs[i].anchor; + action[i].reformat.encap = hdrs[i].encap; + action[i].reformat.offset = hdrs[i].offset; + + reformat_hdrs[i].sz = hdrs[i].hdr.sz; + reformat_hdrs[i].data = hdrs[i].hdr.data; + } + + ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, + reformat_hdrs, log_bulk_size); + if (ret) { + mlx5hws_err(ctx, "Failed to create HWS reformat action\n"); + goto free_reformat_hdrs; + } + + kfree(reformat_hdrs); + + return action; + +free_reformat_hdrs: + kfree(reformat_hdrs); +free_action: + kfree(action); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx, + struct mlx5hws_action_remove_header_attr *attr, + u32 flags) +{ + struct mlx5hws_action *action; + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER); + if (!action) + return NULL; + + /* support only remove anchor with size */ + if (attr->size % W_SIZE != 0) { + mlx5hws_err(ctx, + "Invalid size, HW supports header remove in WORD granularity\n"); + goto free_action; + } + + if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) { + mlx5hws_err(ctx, "Header removal size limited to %u bytes\n", + MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE); + goto free_action; + } + + action->remove_header.anchor = attr->anchor; + action->remove_header.size = attr->size / W_SIZE; + + if (hws_action_create_stcs(action, 0)) + goto free_action; + + return action; + +free_action: + kfree(action); + return NULL; +} + +static struct mlx5hws_definer * +hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx) +{ + struct mlx5hws_definer *definer; + __be32 *tag; + int ret; + + definer = kzalloc(sizeof(*definer), GFP_KERNEL); + if (!definer) + return NULL; + + definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4; + /* Set DW0 tag mask */ + tag = (__force __be32 *)definer->mask.jumbo; + tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16); + + mutex_lock(&ctx->ctrl_lock); + + ret = mlx5hws_definer_get_obj(ctx, definer); + if (ret < 0) { + mutex_unlock(&ctx->ctrl_lock); + kfree(definer); + return NULL; + } + + mutex_unlock(&ctx->ctrl_lock); + definer->obj_id = ret; + + return definer; +} + +static struct mlx5hws_matcher_action_ste * +hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer, + u32 miss_ft_id) +{ + struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_pool_attr pool_attr = {0}; + struct mlx5hws_pool *ste_pool, *stc_pool; + struct mlx5hws_pool_chunk *ste; + u32 *rtc_0_id, *rtc_1_id; + u32 obj_id; + int ret; + + /* Check if STE range is supported */ + if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) { + mlx5hws_err(ctx, "Range STE format not supported\n"); + return NULL; + } + + table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL); + if (!table_ste) + return NULL; + + mutex_lock(&ctx->ctrl_lock); + + pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; + pool_attr.alloc_log_sz = 1; + table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!table_ste->pool) { + mlx5hws_err(ctx, "Failed to allocate memory ste pool\n"); + goto free_ste; + } + + /* Allocate RTC */ + rtc_0_id = &table_ste->rtc_0_id; + rtc_1_id = &table_ste->rtc_1_id; + ste_pool = table_ste->pool; + ste = &table_ste->ste; + ste->order = 1; + + rtc_attr.log_size = 0; + rtc_attr.log_depth = 0; + rtc_attr.miss_ft_id = miss_ft_id; + rtc_attr.num_hash_definer = 1; + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; + rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; + rtc_attr.fw_gen_wqe = true; + rtc_attr.is_scnd_range = true; + + obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + + rtc_attr.pd = ctx->pd_num; + rtc_attr.ste_base = obj_id; + rtc_attr.ste_offset = ste->offset; + rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false); + + /* STC is a single resource (obj_id), use any STC for the ID */ + stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB]; + default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create RTC"); + goto pool_destroy; + } + + /* Create mirror RTC */ + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + rtc_attr.ste_base = obj_id; + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true); + + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create mirror RTC"); + goto destroy_rtc_0; + } + + mutex_unlock(&ctx->ctrl_lock); + + return table_ste; + +destroy_rtc_0: + mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id); +pool_destroy: + mlx5hws_pool_destroy(table_ste->pool); +free_ste: + mutex_unlock(&ctx->ctrl_lock); + kfree(table_ste); + return NULL; +} + +static void +hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx, + struct mlx5hws_matcher_action_ste *table_ste) +{ + mutex_lock(&ctx->ctrl_lock); + + mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id); + mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id); + mlx5hws_pool_destroy(table_ste->pool); + kfree(table_ste); + + mutex_unlock(&ctx->ctrl_lock); +} + +static int +hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx, + struct mlx5hws_matcher_action_ste *table_ste, + struct mlx5hws_action *hit_ft_action, + struct mlx5hws_definer *range_definer, + u32 min, u32 max) +{ + struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0}; + struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0}; + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0}; + u32 no_use, used_rtc_0_id, used_rtc_1_id, ret; + struct mlx5hws_context_common_res *common_res; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + __be32 *wqe_data_arr; + + mutex_lock(&ctx->ctrl_lock); + + /* Get the control queue */ + queue = &ctx->send_queue[ctx->queues - 1]; + if (unlikely(mlx5hws_send_engine_err(queue))) { + ret = -EIO; + goto error; + } + + /* Init default send STE attributes */ + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.user_data = &no_use; + ste_attr.send_attr.rule = NULL; + ste_attr.send_attr.fence = 1; + ste_attr.send_attr.notify_hw = true; + ste_attr.rtc_0 = table_ste->rtc_0_id; + ste_attr.rtc_1 = table_ste->rtc_1_id; + ste_attr.used_id_rtc_0 = &used_rtc_0_id; + ste_attr.used_id_rtc_1 = &used_rtc_1_id; + + common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB]; + + /* init an empty match STE which will always hit */ + ste_attr.wqe_ctrl = &wqe_ctrl; + ste_attr.wqe_data = &match_wqe_data; + ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer; + + /* Fill WQE control data */ + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] = + htonl(common_res->default_stc->nop_ctr.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(common_res->default_stc->nop_dw5.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = + htonl(common_res->default_stc->nop_dw6.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = + htonl(common_res->default_stc->nop_dw7.offset); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |= + htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29); + wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = + htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset); + + wqe_data_arr = (__force __be32 *)&range_wqe_data; + + ste_attr.range_wqe_data = &range_wqe_data; + ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer); + + /* Fill range matching fields, + * min/max_value_2 corresponds to match_dw_0 in its definer, + * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE. + */ + wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16); + wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16); + + /* Send WQEs to FW */ + mlx5hws_send_stes_fw(ctx, queue, &ste_attr); + + /* Poll for completion */ + ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + if (ret) { + mlx5hws_err(ctx, "Failed to drain control queue"); + goto error; + } + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +error: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +struct mlx5hws_action * +mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, + u32 field, + struct mlx5_flow_table *hit_ft, + struct mlx5_flow_table *miss_ft, + u32 min, u32 max, u32 flags) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_action *hit_ft_action; + struct mlx5hws_definer *definer; + struct mlx5hws_action *action; + u32 miss_ft_id = miss_ft->id; + u32 hit_ft_id = hit_ft->id; + int ret; + + if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN || + min > 0xffff || max > 0xffff) { + mlx5hws_err(ctx, "Invalid match range parameters\n"); + return NULL; + } + + action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE); + if (!action) + return NULL; + + definer = hws_action_create_dest_match_range_definer(ctx); + if (!definer) + goto free_action; + + table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id); + if (!table_ste) + goto destroy_definer; + + hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags); + if (!hit_ft_action) + goto destroy_table_ste; + + ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste, + hit_ft_action, + definer, min, max); + if (ret) + goto destroy_hit_ft_action; + + action->range.table_ste = table_ste; + action->range.definer = definer; + action->range.hit_ft_action = hit_ft_action; + + /* Allocate STC for jumps to STE */ + mutex_lock(&ctx->ctrl_lock); + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.ste_table.ste = table_ste->ste; + stc_attr.ste_table.ste_pool = table_ste->pool; + stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB, + &action->stc[MLX5HWS_TABLE_TYPE_FDB]); + if (ret) + goto error_unlock; + + mutex_unlock(&ctx->ctrl_lock); + + return action; + +error_unlock: + mutex_unlock(&ctx->ctrl_lock); +destroy_hit_ft_action: + mlx5hws_action_destroy(hit_ft_action); +destroy_table_ste: + hws_action_destroy_dest_match_range_table(ctx, table_ste); +destroy_definer: + mlx5hws_definer_free(ctx, definer); +free_action: + kfree(action); + mlx5hws_err(ctx, "Failed to create action dest match range"); + return NULL; +} + +struct mlx5hws_action * +mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags) +{ + return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST); +} + +struct mlx5hws_action * +mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, + u32 sampler_id, u32 flags) +{ + mlx5hws_err(ctx, "Flow sampler action - unsupported\n"); + return NULL; +} + +static void hws_action_destroy_hws(struct mlx5hws_action *action) +{ + u32 ext_reformat_id; + bool shared_arg; + u32 obj_id; + u32 i; + + switch (action->type) { + case MLX5HWS_ACTION_TYP_MISS: + case MLX5HWS_ACTION_TYP_TAG: + case MLX5HWS_ACTION_TYP_DROP: + case MLX5HWS_ACTION_TYP_CTR: + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + case MLX5HWS_ACTION_TYP_ASO_METER: + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + case MLX5HWS_ACTION_TYP_VPORT: + hws_action_destroy_stcs(action); + break; + case MLX5HWS_ACTION_TYP_POP_VLAN: + hws_action_destroy_stcs(action); + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP); + break; + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + hws_action_destroy_stcs(action); + mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island); + for (i = 0; i < action->dest_array.num_dest; i++) { + ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id; + if (ext_reformat_id) + mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev, + ext_reformat_id); + } + kfree(action->dest_array.dest_list); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + shared_arg = false; + for (i = 0; i < action->modify_header.num_of_patterns; i++) { + hws_action_destroy_stcs(&action[i]); + if (action[i].modify_header.num_of_actions > 1) { + mlx5hws_pat_put_pattern(action[i].ctx, + action[i].modify_header.pat_id); + /* Save shared arg object to be freed after */ + obj_id = action[i].modify_header.arg_id; + shared_arg = true; + } + } + if (shared_arg) + mlx5hws_arg_destroy(action->ctx, obj_id); + break; + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3); + for (i = 0; i < action->reformat.num_of_hdrs; i++) + hws_action_destroy_stcs(&action[i]); + mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id); + break; + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + for (i = 0; i < action->reformat.num_of_hdrs; i++) + hws_action_destroy_stcs(&action[i]); + mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id); + break; + case MLX5HWS_ACTION_TYP_RANGE: + hws_action_destroy_stcs(action); + hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste); + mlx5hws_definer_free(action->ctx, action->range.definer); + mlx5hws_action_destroy(action->range.hit_ft_action); + break; + case MLX5HWS_ACTION_TYP_LAST: + break; + default: + pr_warn("HWS: Invalid action type: %d\n", action->type); + } +} + +int mlx5hws_action_destroy(struct mlx5hws_action *action) +{ + hws_action_destroy_hws(action); + + kfree(action); + return 0; +} + +int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_action_default_stc *default_stc; + int ret; + + if (ctx->common_res[tbl_type].default_stc) { + ctx->common_res[tbl_type].default_stc->refcount++; + return 0; + } + + default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL); + if (!default_stc) + return -ENOMEM; + + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP; + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_ctr); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default counter STC\n"); + goto free_default_stc; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw5); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n"); + goto free_nop_ctr; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw6); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n"); + goto free_nop_dw5; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7; + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->nop_dw7); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n"); + goto free_nop_dw6; + } + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type, + &default_stc->default_hit); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate default allow STC\n"); + goto free_nop_dw7; + } + + ctx->common_res[tbl_type].default_stc = default_stc; + ctx->common_res[tbl_type].default_stc->refcount++; + + return 0; + +free_nop_dw7: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); +free_nop_dw6: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); +free_nop_dw5: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); +free_nop_ctr: + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); +free_default_stc: + kfree(default_stc); + return ret; +} + +void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type) +__must_hold(&ctx->ctrl_lock) +{ + struct mlx5hws_action_default_stc *default_stc; + + default_stc = ctx->common_res[tbl_type].default_stc; + + default_stc = ctx->common_res[tbl_type].default_stc; + if (--default_stc->refcount) + return; + + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5); + mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr); + kfree(default_stc); + ctx->common_res[tbl_type].default_stc = NULL; +} + +static void hws_action_modify_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions, + u32 nope_locations) +{ + u8 *new_arg_data = NULL; + int i, j; + + if (unlikely(nope_locations)) { + new_arg_data = kcalloc(num_of_actions, + MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); + if (unlikely(!new_arg_data)) + return; + + for (i = 0, j = 0; i < num_of_actions; i++, j++) { + memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE); + if (BIT(i) & nope_locations) + j++; + } + } + + mlx5hws_arg_write(queue, NULL, arg_idx, + new_arg_data ? new_arg_data : arg_data, + num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE); + + kfree(new_arg_data); +} + +void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions) +{ + u8 *e_src; + int i; + + /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes + * copy from end of src to the start of dst. + * move to the end, 2 is the leftover from 14B or 18B + */ + if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN) + e_src = src + MLX5HWS_ACTION_HDR_LEN_L2; + else + e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN; + + /* Move dst over the first remove action + zero data */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE; + /* Move dst over the first insert ctrl action */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2; + /* Actions: + * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. + * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b. + * the loop is without the last insertion. + */ + for (i = 0; i < num_of_actions - 3; i++) { + e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE; + memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */ + dst += MLX5HWS_ACTION_DOUBLE_SIZE; + } + /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */ + e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2; + dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2; + memcpy(dst, e_src, 2); +} + +static int +hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res, + enum mlx5hws_context_shared_stc_type stc_type) +{ + return common_res->shared_stc[stc_type]->stc_chunk.offset; +} + +static struct mlx5hws_actions_wqe_setter * +hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter, + u8 req_flags) +{ + /* Use a new setter if requested flags are taken */ + while (setter->flags & req_flags) + setter++; + + /* Use current setter in required flags are not used */ + return setter; +} + +static void +hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply, + enum mlx5hws_action_stc_idx stc_idx, + u8 action_idx) +{ + struct mlx5hws_action *action = apply->rule_action[action_idx].action; + + apply->wqe_ctrl->stc_ix[stc_idx] = + htonl(action->stc[apply->tbl_type].offset); +} + +static void +hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_double]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr; + + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; +} + +static void +hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_sz, arg_idx; + u8 *single_action; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action; + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + + if (action->modify_header.num_of_actions == 1) { + if (action->modify_header.single_action_type == + MLX5_MODIFICATION_TYPE_COPY || + action->modify_header.single_action_type == + MLX5_MODIFICATION_TYPE_ADD_FIELD) { + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + return; + } + + if (action->flags & MLX5HWS_ACTION_FLAG_SHARED) + single_action = (u8 *)&action->modify_header.single_action; + else + single_action = rule_action->modify_header.data; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = + *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); + } else { + /* Argument offset multiple with number of args per these actions */ + arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); + arg_idx = rule_action->modify_header.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + hws_action_modify_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->modify_header.data, + action->modify_header.num_of_actions, + action->modify_header.nope_locations); + } + } +} + +static void +hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_idx, arg_sz; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action + rule_action->reformat.hdr_idx; + + /* Argument offset multiple on args required for header size */ + arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz); + arg_idx = rule_action->reformat.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + mlx5hws_arg_write(apply->queue, NULL, + action->reformat.arg_id + arg_idx, + rule_action->reformat.data, + action->reformat.header_size); + } +} + +static void +hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + struct mlx5hws_action *action; + u32 arg_sz, arg_idx; + __be32 stc_idx; + + rule_action = &apply->rule_action[setter->idx_double]; + action = rule_action->action + rule_action->reformat.hdr_idx; + + /* Argument offset multiple on args required for num of actions */ + arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); + arg_idx = rule_action->reformat.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + stc_idx = htonl(action->stc[apply->tbl_type].offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + mlx5hws_arg_decapl3_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->reformat.data, + action->modify_header.num_of_actions); + } +} + +static void +hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + u32 exe_aso_ctrl; + u32 offset; + + rule_action = &apply->rule_action[setter->idx_double]; + + switch (rule_action->action->type) { + case MLX5HWS_ACTION_TYP_ASO_METER: + /* exe_aso_ctrl format: + * [STC only and reserved bits 29b][init_color 2b][meter_id 1b] + */ + offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ; + exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ; + exe_aso_ctrl |= rule_action->aso_meter.init_color << + MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET; + break; + default: + mlx5hws_err(rule_action->action->ctx, + "Unsupported ASO action type: %d\n", rule_action->action->type); + return; + } + + /* aso_object_offset format: [24B] */ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset); + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl); + + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; +} + +static void +hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_single]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value); + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single); +} + +static void +hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + struct mlx5hws_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_ctr]; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset); + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr); +} + +static void +hws_action_setter_single(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single); +} + +static void +hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(hws_action_get_shared_stc_offset(apply->common_res, + MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP)); +} + +static void +hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit); +} + +static void +hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = + htonl(apply->common_res->default_stc->default_hit.offset); +} + +static void +hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc); +} + +static void +hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(hws_action_get_shared_stc_offset(apply->common_res, + MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3)); +} + +static void +hws_action_setter_range(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + /* Always jump to index zero */ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0; + hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit); +} + +int mlx5hws_action_template_process(struct mlx5hws_action_template *at) +{ + struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1; + enum mlx5hws_action_type *action_type = at->action_type_arr; + struct mlx5hws_actions_wqe_setter *setter = at->setters; + struct mlx5hws_actions_wqe_setter *pop_setter = NULL; + struct mlx5hws_actions_wqe_setter *last_setter; + int i; + + /* Note: Given action combination must be valid */ + + /* Check if action were already processed */ + if (at->num_of_action_stes) + return 0; + + for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++) + setter[i].set_hit = &hws_action_setter_hit_next_action; + + /* The same action template setters can be used with jumbo or match + * STE, to support both cases we reserve the first setter for cases + * with jumbo STE to allow jump to the first action STE. + * This extra setter can be reduced in some cases on rule creation. + */ + setter = start_setter; + last_setter = start_setter; + + for (i = 0; i < at->num_actions; i++) { + switch (action_type[i]) { + case MLX5HWS_ACTION_TYP_DROP: + case MLX5HWS_ACTION_TYP_TBL: + case MLX5HWS_ACTION_TYP_DEST_ARRAY: + case MLX5HWS_ACTION_TYP_VPORT: + case MLX5HWS_ACTION_TYP_MISS: + /* Hit action */ + last_setter->flags |= ASF_HIT; + last_setter->set_hit = &hws_action_setter_hit; + last_setter->idx_hit = i; + break; + + case MLX5HWS_ACTION_TYP_RANGE: + last_setter->flags |= ASF_HIT; + last_setter->set_hit = &hws_action_setter_range; + last_setter->idx_hit = i; + break; + + case MLX5HWS_ACTION_TYP_POP_VLAN: + /* Single remove header to header */ + if (pop_setter) { + /* We have 2 pops, use the shared */ + pop_setter->set_single = &hws_action_setter_single_double_pop; + break; + } + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_MODIFY | + ASF_INSERT); + setter->flags |= ASF_SINGLE1 | ASF_REMOVE; + setter->set_single = &hws_action_setter_single; + setter->idx_single = i; + pop_setter = setter; + break; + + case MLX5HWS_ACTION_TYP_PUSH_VLAN: + /* Double insert inline */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_INSERT; + setter->set_double = &hws_action_setter_push_vlan; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_MODIFY_HDR: + /* Double modify header list */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_MODIFY; + setter->set_double = &hws_action_setter_modify_header; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_ASO_METER: + /* Double ASO action */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE); + setter->flags |= ASF_DOUBLE; + setter->set_double = &hws_action_setter_aso; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_REMOVE_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: + /* Single remove header to header */ + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_MODIFY); + setter->flags |= ASF_SINGLE1 | ASF_REMOVE; + setter->set_single = &hws_action_setter_single; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_INSERT_HEADER: + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: + /* Double insert header with pointer */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_INSERT; + setter->set_double = &hws_action_setter_insert_ptr; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: + /* Single remove + Double insert header with pointer */ + setter = hws_action_setter_find_first(last_setter, + ASF_SINGLE1 | ASF_DOUBLE); + setter->flags |= ASF_SINGLE1 | ASF_DOUBLE; + setter->set_double = &hws_action_setter_insert_ptr; + setter->idx_double = i; + setter->set_single = &hws_action_setter_common_decap; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: + /* Double modify header list with remove and push inline */ + setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE); + setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT; + setter->set_double = &hws_action_setter_tnl_l3_to_l2; + setter->idx_double = i; + break; + + case MLX5HWS_ACTION_TYP_TAG: + /* Single TAG action, search for any room from the start */ + setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1); + setter->flags |= ASF_SINGLE1; + setter->set_single = &hws_action_setter_tag; + setter->idx_single = i; + break; + + case MLX5HWS_ACTION_TYP_CTR: + /* Control counter action + * TODO: Current counter executed first. Support is needed + * for single ation counter action which is done last. + * Example: Decap + CTR + */ + setter = hws_action_setter_find_first(start_setter, ASF_CTR); + setter->flags |= ASF_CTR; + setter->set_ctr = &hws_action_setter_ctrl_ctr; + setter->idx_ctr = i; + break; + default: + pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n", + i, action_type[i]); + return -EOPNOTSUPP; + } + + last_setter = max(setter, last_setter); + } + + /* Set default hit on the last STE if no hit action provided */ + if (!(last_setter->flags & ASF_HIT)) + last_setter->set_hit = &hws_action_setter_default_hit; + + at->num_of_action_stes = last_setter - start_setter + 1; + + /* Check if action template doesn't require any action DWs */ + at->only_term = (at->num_of_action_stes == 1) && + !(last_setter->flags & ~(ASF_CTR | ASF_HIT)); + + return 0; +} + +struct mlx5hws_action_template * +mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]) +{ + struct mlx5hws_action_template *at; + u8 num_actions = 0; + int i; + + at = kzalloc(sizeof(*at), GFP_KERNEL); + if (!at) + return NULL; + + while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST) + ; + + at->num_actions = num_actions - 1; + at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL); + if (!at->action_type_arr) + goto free_at; + + for (i = 0; i < num_actions; i++) + at->action_type_arr[i] = action_type[i]; + + return at; + +free_at: + kfree(at); + return NULL; +} + +int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at) +{ + kfree(at->action_type_arr); + kfree(at); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h new file mode 100644 index 00000000000000..bf5c1b2410060c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_ACTION_H_ +#define MLX5HWS_ACTION_H_ + +/* Max number of STEs needed for a rule (including match) */ +#define MLX5HWS_ACTION_MAX_STE 20 + +/* Max number of internal subactions of ipv6_ext */ +#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4 + +enum mlx5hws_action_stc_idx { + MLX5HWS_ACTION_STC_IDX_CTRL = 0, + MLX5HWS_ACTION_STC_IDX_HIT = 1, + MLX5HWS_ACTION_STC_IDX_DW5 = 2, + MLX5HWS_ACTION_STC_IDX_DW6 = 3, + MLX5HWS_ACTION_STC_IDX_DW7 = 4, + MLX5HWS_ACTION_STC_IDX_MAX = 5, + /* STC Jumvo STE combo: CTR, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1, + /* STC combo1: CTR, SINGLE, DOUBLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3, + /* STC combo2: CTR, 3 x SINGLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4, + /* STC combo2: CTR, TRIPLE, Hit */ + MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2, +}; + +enum mlx5hws_action_offset { + MLX5HWS_ACTION_OFFSET_DW0 = 0, + MLX5HWS_ACTION_OFFSET_DW5 = 5, + MLX5HWS_ACTION_OFFSET_DW6 = 6, + MLX5HWS_ACTION_OFFSET_DW7 = 7, + MLX5HWS_ACTION_OFFSET_HIT = 3, + MLX5HWS_ACTION_OFFSET_HIT_LSB = 4, +}; + +enum { + MLX5HWS_ACTION_DOUBLE_SIZE = 8, + MLX5HWS_ACTION_INLINE_DATA_SIZE = 4, + MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12, + MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4, + MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2, + MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS + + MLX5HWS_ACTION_HDR_LEN_L2_ETHER), + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 + + MLX5HWS_ACTION_HDR_LEN_L2_VLAN), + MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64, + DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6, + DECAP_L3_NUM_ACTIONS_W_VLAN = 7, +}; + +enum mlx5hws_action_setter_flag { + ASF_SINGLE1 = 1 << 0, + ASF_SINGLE2 = 1 << 1, + ASF_SINGLE3 = 1 << 2, + ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3, + ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE, + ASF_INSERT = 1 << 3, + ASF_REMOVE = 1 << 4, + ASF_MODIFY = 1 << 5, + ASF_CTR = 1 << 6, + ASF_HIT = 1 << 7, +}; + +struct mlx5hws_action_default_stc { + struct mlx5hws_pool_chunk nop_ctr; + struct mlx5hws_pool_chunk nop_dw5; + struct mlx5hws_pool_chunk nop_dw6; + struct mlx5hws_pool_chunk nop_dw7; + struct mlx5hws_pool_chunk default_hit; + u32 refcount; +}; + +struct mlx5hws_action_shared_stc { + struct mlx5hws_pool_chunk stc_chunk; + u32 refcount; +}; + +struct mlx5hws_actions_apply_data { + struct mlx5hws_send_engine *queue; + struct mlx5hws_rule_action *rule_action; + __be32 *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + u32 jump_to_action_stc; + struct mlx5hws_context_common_res *common_res; + enum mlx5hws_table_type tbl_type; + u32 next_direct_idx; + u8 require_dep; +}; + +struct mlx5hws_actions_wqe_setter; + +typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter); + +struct mlx5hws_actions_wqe_setter { + mlx5hws_action_setter_fp set_single; + mlx5hws_action_setter_fp set_double; + mlx5hws_action_setter_fp set_triple; + mlx5hws_action_setter_fp set_hit; + mlx5hws_action_setter_fp set_ctr; + u8 idx_single; + u8 idx_double; + u8 idx_triple; + u8 idx_ctr; + u8 idx_hit; + u8 stage_idx; + u8 flags; +}; + +struct mlx5hws_action_template { + struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE]; + enum mlx5hws_action_type *action_type_arr; + u8 num_of_action_stes; + u8 num_actions; + u8 only_term; +}; + +struct mlx5hws_action { + u8 type; + u8 flags; + struct mlx5hws_context *ctx; + union { + struct { + struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX]; + union { + struct { + u32 pat_id; + u32 arg_id; + __be64 single_action; + u32 nope_locations; + u8 num_of_patterns; + u8 single_action_type; + u8 num_of_actions; + u8 max_num_of_actions; + u8 require_reparse; + } modify_header; + struct { + u32 arg_id; + u32 header_size; + u16 max_hdr_sz; + u8 num_of_hdrs; + u8 anchor; + u8 e_anchor; + u8 offset; + bool encap; + u8 require_reparse; + } reformat; + struct { + u32 obj_id; + u8 return_reg_id; + } aso; + struct { + u16 vport_num; + u16 esw_owner_vhca_id; + bool esw_owner_vhca_id_valid; + } vport; + struct { + u32 obj_id; + } dest_obj; + struct { + struct mlx5hws_cmd_forward_tbl *fw_island; + size_t num_dest; + struct mlx5hws_cmd_set_fte_dest *dest_list; + } dest_array; + struct { + u8 type; + u8 start_anchor; + u8 end_anchor; + u8 num_of_words; + bool decap; + } insert_hdr; + struct { + /* PRM start anchor from which header will be removed */ + u8 anchor; + /* Header remove offset in bytes, from the start + * anchor to the location where remove header starts. + */ + u8 offset; + /* Indicates the removed header size in bytes */ + size_t size; + } remove_header; + struct { + struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_action *hit_ft_action; + struct mlx5hws_definer *definer; + } range; + }; + }; + + struct ibv_flow_action *flow_action; + u32 obj_id; + struct ibv_qp *qp; + }; +}; + +const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type); + +int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, + u8 tbl_type); + +void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, + u8 tbl_type); + +void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, + u16 num_of_actions); + +int mlx5hws_action_template_process(struct mlx5hws_action_template *at); + +bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx, + enum mlx5hws_action_type *user_actions, + enum mlx5hws_table_type table_type); + +int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + u32 table_type, + struct mlx5hws_pool_chunk *stc); + +void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx, + u32 table_type, + struct mlx5hws_pool_chunk *stc); + +static inline void +mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = + htonl(apply->common_res->default_stc->nop_dw5.offset); +} + +static inline void +mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = + htonl(apply->common_res->default_stc->nop_dw6.offset); + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = + htonl(apply->common_res->default_stc->nop_dw7.offset); +} + +static inline void +mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter) +{ + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] = + htonl(apply->common_res->default_stc->nop_ctr.offset); +} + +static inline void +mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply, + struct mlx5hws_actions_wqe_setter *setter, + bool is_jumbo) +{ + u8 num_of_actions; + + /* Set control counter */ + if (setter->set_ctr) + setter->set_ctr(apply, setter); + else + mlx5hws_action_setter_default_ctr(apply, setter); + + if (!is_jumbo) { + if (unlikely(setter->set_triple)) { + /* Set triple on match */ + setter->set_triple(apply, setter); + num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3; + } else { + /* Set single and double on match */ + if (setter->set_single) + setter->set_single(apply, setter); + else + mlx5hws_action_setter_default_single(apply, setter); + + if (setter->set_double) + setter->set_double(apply, setter); + else + mlx5hws_action_setter_default_double(apply, setter); + + num_of_actions = setter->set_double ? + MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 : + MLX5HWS_ACTION_STC_IDX_LAST_COMBO2; + } + } else { + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0; + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0; + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0; + num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE; + } + + /* Set next/final hit action */ + setter->set_hit(apply, setter); + + /* Set number of actions */ + apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |= + htonl(num_of_actions << 29); +} + +#endif /* MLX5HWS_ACTION_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c new file mode 100644 index 00000000000000..e6ed66202a4040 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "mlx5hws_buddy.h" + +static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order) +{ + int i, s, ret = 0; + + buddy->max_order = max_order; + + buddy->bitmap = kcalloc(buddy->max_order + 1, + sizeof(*buddy->bitmap), + GFP_KERNEL); + if (!buddy->bitmap) + return -ENOMEM; + + buddy->num_free = kcalloc(buddy->max_order + 1, + sizeof(*buddy->num_free), + GFP_KERNEL); + if (!buddy->num_free) { + ret = -ENOMEM; + goto err_out_free_bits; + } + + for (i = 0; i <= (int)buddy->max_order; ++i) { + s = 1 << (buddy->max_order - i); + + buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL); + if (!buddy->bitmap[i]) { + ret = -ENOMEM; + goto err_out_free_num_free; + } + } + + bitmap_set(buddy->bitmap[buddy->max_order], 0, 1); + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free_num_free: + for (i = 0; i <= (int)buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + +err_out_free_bits: + kfree(buddy->bitmap); + return ret; +} + +struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order) +{ + struct mlx5hws_buddy_mem *buddy; + + buddy = kzalloc(sizeof(*buddy), GFP_KERNEL); + if (!buddy) + return NULL; + + if (hws_buddy_init(buddy, max_order)) + goto free_buddy; + + return buddy; + +free_buddy: + kfree(buddy); + return NULL; +} + +void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy) +{ + int i; + + for (i = 0; i <= (int)buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + kfree(buddy->bitmap); +} + +static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy, + u32 start_order, + u32 *segment, + u32 *order) +{ + unsigned int seg, order_iter, m; + + for (order_iter = start_order; + order_iter <= buddy->max_order; ++order_iter) { + if (!buddy->num_free[order_iter]) + continue; + + m = 1 << (buddy->max_order - order_iter); + seg = find_first_bit(buddy->bitmap[order_iter], m); + + if (WARN(seg >= m, + "ICM Buddy: failed finding free mem for order %d\n", + order_iter)) + return -ENOMEM; + + break; + } + + if (order_iter > buddy->max_order) + return -ENOMEM; + + *segment = seg; + *order = order_iter; + return 0; +} + +int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order) +{ + u32 seg, order_iter, err; + + err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter); + if (err) + return err; + + bitmap_clear(buddy->bitmap[order_iter], seg, 1); + --buddy->num_free[order_iter]; + + while (order_iter > order) { + --order_iter; + seg <<= 1; + bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1); + ++buddy->num_free[order_iter]; + } + + seg <<= order; + + return seg; +} + +void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order) +{ + seg >>= order; + + while (test_bit(seg ^ 1, buddy->bitmap[order])) { + bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + + bitmap_set(buddy->bitmap[order], seg, 1); + ++buddy->num_free[order]; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h new file mode 100644 index 00000000000000..338c44bbedaff6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BUDDY_H_ +#define MLX5HWS_BUDDY_H_ + +struct mlx5hws_buddy_mem { + unsigned long **bitmap; + unsigned int *num_free; + u32 max_order; +}; + +struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order); + +void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy); + +int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order); + +void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order); + +#endif /* MLX5HWS_BUDDY_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c new file mode 100644 index 00000000000000..bd52b05db36704 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c @@ -0,0 +1,997 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx) +{ + /* assign random queue */ + return get_random_u8() % mlx5hws_bwc_queues(ctx); +} + +static u16 +hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id) +{ + return min(ctx->send_queue[queue_id].num_entries / 2, + MLX5HWS_BWC_MATCHER_REHASH_BURST_TH); +} + +static struct mutex * +hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx) +{ + return &ctx->bwc_send_queue_locks[idx]; +} + +static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx) +{ + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mutex *queue_lock; /* Protect the queue */ + int i; + + for (i = 0; i < bwc_queues; i++) { + queue_lock = hws_bwc_get_queue_lock(ctx, i); + mutex_lock(queue_lock); + } +} + +static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx) +{ + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mutex *queue_lock; /* Protect the queue */ + int i = bwc_queues; + + while (i--) { + queue_lock = hws_bwc_get_queue_lock(ctx, i); + mutex_unlock(queue_lock); + } +} + +static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, + u32 priority, + u8 size_log) +{ + memset(attr, 0, sizeof(*attr)); + + attr->priority = priority; + attr->optimize_using_rule_idx = 0; + attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE; + attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY; + attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH; + attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH; + attr->rule.num_log = size_log; + attr->resizable = true; + attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; +} + +int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask, + enum mlx5hws_action_type action_types[]) +{ + enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST }; + struct mlx5hws_context *ctx = table->ctx; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_matcher_attr attr = {0}; + int i; + + bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL); + if (!bwc_matcher->rules) + goto err; + + for (i = 0; i < bwc_queues; i++) + INIT_LIST_HEAD(&bwc_matcher->rules[i]); + + hws_bwc_matcher_init_attr(&attr, + priority, + MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG); + + bwc_matcher->priority = priority; + bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG; + + /* create dummy action template */ + bwc_matcher->at[0] = + mlx5hws_action_template_create(action_types ? + action_types : init_action_types); + if (!bwc_matcher->at[0]) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n"); + goto free_bwc_matcher_rules; + } + + bwc_matcher->num_of_at = 1; + + bwc_matcher->mt = mlx5hws_match_template_create(ctx, + mask->match_buf, + mask->match_sz, + match_criteria_enable); + if (!bwc_matcher->mt) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n"); + goto free_at; + } + + bwc_matcher->matcher = mlx5hws_matcher_create(table, + &bwc_matcher->mt, 1, + &bwc_matcher->at[0], + bwc_matcher->num_of_at, + &attr); + if (!bwc_matcher->matcher) { + mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n"); + goto free_mt; + } + + return 0; + +free_mt: + mlx5hws_match_template_destroy(bwc_matcher->mt); +free_at: + mlx5hws_action_template_destroy(bwc_matcher->at[0]); +free_bwc_matcher_rules: + kfree(bwc_matcher->rules); +err: + return -EINVAL; +} + +struct mlx5hws_bwc_matcher * +mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_bwc_matcher *bwc_matcher; + bool is_complex; + int ret; + + if (!mlx5hws_context_bwc_supported(table->ctx)) { + mlx5hws_err(table->ctx, + "BWC matcher: context created w/o BWC API compatibility\n"); + return NULL; + } + + bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL); + if (!bwc_matcher) + return NULL; + + /* Check if the required match params can be all matched + * in single STE, otherwise complex matcher is needed. + */ + + is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask); + if (is_complex) + ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher, + table, + priority, + match_criteria_enable, + mask); + else + ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher, + table, + priority, + match_criteria_enable, + mask, + NULL); + if (ret) + goto free_bwc_matcher; + + return bwc_matcher; + +free_bwc_matcher: + kfree(bwc_matcher); + + return NULL; +} + +int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + int i; + + mlx5hws_matcher_destroy(bwc_matcher->matcher); + bwc_matcher->matcher = NULL; + + for (i = 0; i < bwc_matcher->num_of_at; i++) + mlx5hws_action_template_destroy(bwc_matcher->at[i]); + + mlx5hws_match_template_destroy(bwc_matcher->mt); + kfree(bwc_matcher->rules); + + return 0; +} + +int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + if (bwc_matcher->num_of_rules) + mlx5hws_err(bwc_matcher->matcher->tbl->ctx, + "BWC matcher destroy: matcher still has %d rules\n", + bwc_matcher->num_of_rules); + + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + + kfree(bwc_matcher); + return 0; +} + +static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain) +{ + struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH]; + u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id); + bool got_comp = *pending_rules >= burst_th; + bool queue_full; + int err = 0; + int ret; + int i; + + /* Check if there are any completions at all */ + if (!got_comp && !drain) + return 0; + + queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]); + while (queue_full || ((got_comp || drain) && *pending_rules)) { + ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th); + if (unlikely(ret < 0)) { + mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n", + queue_id, ret); + return -EINVAL; + } + + if (ret) { + (*pending_rules) -= ret; + for (i = 0; i < ret; i++) { + if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) { + mlx5hws_err(ctx, + "BWC poll error: polling queue %d returned completion with error\n", + queue_id); + err = -EINVAL; + } + } + queue_full = false; + } + + got_comp = !!ret; + } + + return err; +} + +void +mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, + u16 bwc_queue_idx, + u32 flow_source, + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + + /* no use of INSERT_BY_INDEX in bwc rule */ + rule_attr->rule_idx = 0; + + /* notify HW at each rule insertion/deletion */ + rule_attr->burst = 0; + + /* We don't need user data, but the API requires it to exist */ + rule_attr->user_data = (void *)0xFACADE; + + rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx); + rule_attr->flow_source = flow_source; +} + +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_bwc_rule *bwc_rule; + + bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL); + if (unlikely(!bwc_rule)) + goto out_err; + + bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL); + if (unlikely(!bwc_rule->rule)) + goto free_rule; + + bwc_rule->bwc_matcher = bwc_matcher; + return bwc_rule; + +free_rule: + kfree(bwc_rule); +out_err: + return NULL; +} + +void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule) +{ + if (likely(bwc_rule->rule)) + kfree(bwc_rule->rule); + kfree(bwc_rule); +} + +static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + + bwc_matcher->num_of_rules++; + bwc_rule->bwc_queue_idx = idx; + list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]); +} + +static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + + bwc_matcher->num_of_rules--; + list_del_init(&bwc_rule->list_node); +} + +static int +hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_attr *attr) +{ + return mlx5hws_rule_destroy(bwc_rule->rule, attr); +} + +static int +hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_flow_op_result completion; + int ret; + + ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr); + if (unlikely(ret)) + return ret; + + do { + ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1); + } while (ret != 1); + + if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS || + (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED && + bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) { + mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n", + completion.status, bwc_rule->rule->status); + return -EINVAL; + } + + return 0; +} + +int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u16 idx = bwc_rule->bwc_queue_idx; + struct mlx5hws_rule_attr attr; + struct mutex *queue_lock; /* Protect the queue */ + int ret; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr); + + queue_lock = hws_bwc_get_queue_lock(ctx, idx); + + mutex_lock(queue_lock); + + ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr); + hws_bwc_rule_list_remove(bwc_rule); + + mutex_unlock(queue_lock); + + return ret; +} + +int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule) +{ + int ret; + + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + + mlx5hws_bwc_rule_free(bwc_rule); + return ret; +} + +static int +hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) +{ + return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher, + 0, /* only one match template supported */ + match_param, + at_idx, + rule_actions, + rule_attr, + bwc_rule->rule); +} + +static int +hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) + +{ + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + u32 expected_completions = 1; + int ret; + + ret = hws_bwc_rule_create_async(bwc_rule, match_param, + at_idx, rule_actions, + rule_attr); + if (unlikely(ret)) + return ret; + + ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + + return ret; +} + +static int +hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *rule_attr) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u32 expected_completions = 1; + int ret; + + ret = mlx5hws_rule_action_update(bwc_rule->rule, + at_idx, + rule_actions, + rule_attr); + if (unlikely(ret)) + return ret; + + ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + if (unlikely(ret)) + mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret); + + return ret; +} + +static bool +hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps; + + return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >= + caps->ste_alloc_log_max - 1; +} + +static bool +hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher, + u32 num_of_rules) +{ + if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) + return false; + + if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >= + (1UL << bwc_matcher->size_log))) + return true; + + return false; +} + +static void +hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[], + enum mlx5hws_action_type action_types[]) +{ + int i = 0; + + for (i = 0; + rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST); + i++) { + action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type; + } + + action_types[i] = MLX5HWS_ACTION_TYP_LAST; +} + +static int +hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_rule_action rule_actions[]) +{ + enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS]; + + hws_bwc_rule_actions_to_action_types(rule_actions, action_types); + + bwc_matcher->at[bwc_matcher->num_of_at] = + mlx5hws_action_template_create(action_types); + + if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at])) + return -ENOMEM; + + bwc_matcher->num_of_at++; + return 0; +} + +static int +hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) { + mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n", + caps->rtc_log_depth_max); + return -ENOMEM; + } + + bwc_matcher->size_log = + min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, + caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH); + + return 0; +} + +static int +hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_rule_action rule_actions[]) +{ + enum mlx5hws_action_type *action_type_arr; + int i, j; + + /* start from index 1 - first action template is a dummy */ + for (i = 1; i < bwc_matcher->num_of_at; i++) { + j = 0; + action_type_arr = bwc_matcher->at[i]->action_type_arr; + + while (rule_actions[j].action && + rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) { + if (action_type_arr[j] != rule_actions[j].action->type) + break; + j++; + } + + if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST && + (!rule_actions[j].action || + rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST)) + return i; + } + + return -1; +} + +static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_bwc_rule **bwc_rules; + struct mlx5hws_rule_attr rule_attr; + u32 *pending_rules; + int i, j, ret = 0; + bool all_done; + u16 burst_th; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); + + pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL); + if (!pending_rules) + return -ENOMEM; + + bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL); + if (!bwc_rules) { + ret = -ENOMEM; + goto free_pending_rules; + } + + for (i = 0; i < bwc_queues; i++) { + if (list_empty(&bwc_matcher->rules[i])) + bwc_rules[i] = NULL; + else + bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i], + struct mlx5hws_bwc_rule, + list_node); + } + + do { + all_done = true; + + for (i = 0; i < bwc_queues; i++) { + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id); + + for (j = 0; j < burst_th && bwc_rules[i]; j++) { + rule_attr.burst = !!((j + 1) % burst_th); + ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher, + bwc_rules[i]->rule, + &rule_attr); + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Moving BWC rule failed during rehash (%d)\n", + ret); + goto free_bwc_rules; + } + + all_done = false; + pending_rules[i]++; + bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node, + &bwc_matcher->rules[i]) ? + NULL : list_next_entry(bwc_rules[i], list_node); + + ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id, + &pending_rules[i], false); + if (unlikely(ret)) + goto free_bwc_rules; + } + } + } while (!all_done); + + /* drain all the bwc queues */ + for (i = 0; i < bwc_queues; i++) { + if (pending_rules[i]) { + u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + + mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); + ret = hws_bwc_queue_poll(ctx, queue_id, + &pending_rules[i], true); + if (unlikely(ret)) + goto free_bwc_rules; + } + } + +free_bwc_rules: + kfree(bwc_rules); +free_pending_rules: + kfree(pending_rules); + + return ret; +} + +static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + return hws_bwc_matcher_move_all_simple(bwc_matcher); +} + +static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher_attr matcher_attr = {0}; + struct mlx5hws_matcher *old_matcher; + struct mlx5hws_matcher *new_matcher; + int ret; + + hws_bwc_matcher_init_attr(&matcher_attr, + bwc_matcher->priority, + bwc_matcher->size_log); + + old_matcher = bwc_matcher->matcher; + new_matcher = mlx5hws_matcher_create(old_matcher->tbl, + &bwc_matcher->mt, 1, + bwc_matcher->at, + bwc_matcher->num_of_at, + &matcher_attr); + if (!new_matcher) { + mlx5hws_err(ctx, "Rehash error: matcher creation failed\n"); + return -ENOMEM; + } + + ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher); + if (ret) { + mlx5hws_err(ctx, "Rehash error: failed setting resize target\n"); + return ret; + } + + ret = hws_bwc_matcher_move_all(bwc_matcher); + if (ret) { + mlx5hws_err(ctx, "Rehash error: moving rules failed\n"); + return -ENOMEM; + } + + bwc_matcher->matcher = new_matcher; + mlx5hws_matcher_destroy(old_matcher); + + return 0; +} + +static int +hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + u32 num_of_rules; + int ret; + + /* If the current matcher size is already at its max size, we can't + * do the rehash. Skip it and try adding the rule again - perhaps + * there was some change. + */ + if (hws_bwc_matcher_size_maxed_out(bwc_matcher)) + return 0; + + /* It is possible that other rule has already performed rehash. + * Need to check again if we really need rehash. + * If the reason for rehash was size, but not any more - skip rehash. + */ + num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED); + if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules)) + return 0; + + /* Now we're done all the checking - do the rehash: + * - extend match RTC size + * - create new matcher + * - move all the rules to the new matcher + * - destroy the old matcher + */ + + ret = hws_bwc_matcher_extend_size(bwc_matcher); + if (ret) + return ret; + + return hws_bwc_matcher_move(bwc_matcher); +} + +static int +hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + /* Rehash by action template doesn't require any additional checking. + * The bwc_matcher already contains the new action template. + * Just do the usual rehash: + * - create new matcher + * - move all the rules to the new matcher + * - destroy the old matcher + */ + return hws_bwc_matcher_move(bwc_matcher); +} + +int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + struct mlx5hws_rule_action rule_actions[], + u32 flow_source, + u16 bwc_queue_idx) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_rule_attr rule_attr; + struct mutex *queue_lock; /* Protect the queue */ + u32 num_of_rules; + int ret = 0; + int at_idx; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr); + + queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx); + + mutex_lock(queue_lock); + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (unlikely(at_idx < 0)) { + /* we need to extend BWC matcher action templates array */ + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + hws_bwc_unlock_all_queues(ctx); + return ret; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + /* Action template attach failed, possibly due to + * requiring more action STEs. + * Need to attempt creating new matcher with all + * the action templates, including the new one. + */ + ret = hws_bwc_matcher_rehash_at(bwc_matcher); + if (unlikely(ret)) { + mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); + bwc_matcher->at[at_idx] = NULL; + bwc_matcher->num_of_at--; + + hws_bwc_unlock_all_queues(ctx); + + mlx5hws_err(ctx, + "BWC rule insertion: rehash AT failed (%d)\n", ret); + return ret; + } + } + + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + } + + /* check if number of rules require rehash */ + num_of_rules = bwc_matcher->num_of_rules; + + if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) { + mutex_unlock(queue_lock); + + hws_bwc_lock_all_queues(ctx); + ret = hws_bwc_matcher_rehash_size(bwc_matcher); + hws_bwc_unlock_all_queues(ctx); + + if (ret) { + mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n", + bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, + bwc_matcher->size_log, + ret); + return ret; + } + + mutex_lock(queue_lock); + } + + ret = hws_bwc_rule_create_sync(bwc_rule, + match_param, + at_idx, + rule_actions, + &rule_attr); + if (likely(!ret)) { + hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx); + mutex_unlock(queue_lock); + return 0; /* rule inserted successfully */ + } + + /* At this point the rule wasn't added. + * It could be because there was collision, or some other problem. + * If we don't dive deeper than API, the only thing we know is that + * the status of completion is RTE_FLOW_OP_ERROR. + * Try rehash by size and insert rule again - last chance. + */ + + mutex_unlock(queue_lock); + + hws_bwc_lock_all_queues(ctx); + ret = hws_bwc_matcher_rehash_size(bwc_matcher); + hws_bwc_unlock_all_queues(ctx); + + if (ret) { + mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret); + return ret; + } + + /* Rehash done, but we still have that pesky rule to add */ + mutex_lock(queue_lock); + + ret = hws_bwc_rule_create_sync(bwc_rule, + match_param, + at_idx, + rule_actions, + &rule_attr); + + if (unlikely(ret)) { + mutex_unlock(queue_lock); + mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret); + return ret; + } + + hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx); + mutex_unlock(queue_lock); + + return 0; +} + +struct mlx5hws_bwc_rule * +mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_bwc_rule *bwc_rule; + u16 bwc_queue_idx; + int ret; + + if (unlikely(!mlx5hws_context_bwc_supported(ctx))) { + mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n"); + return NULL; + } + + bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher); + if (unlikely(!bwc_rule)) + return NULL; + + bwc_queue_idx = hws_bwc_gen_queue_idx(ctx); + + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions, + flow_source, + bwc_queue_idx); + if (unlikely(ret)) { + mlx5hws_bwc_rule_free(bwc_rule); + return NULL; + } + + return bwc_rule; +} + +static int +hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_rule_attr rule_attr; + struct mutex *queue_lock; /* Protect the queue */ + int at_idx, ret; + u16 idx; + + idx = bwc_rule->bwc_queue_idx; + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr); + queue_lock = hws_bwc_get_queue_lock(ctx, idx); + + mutex_lock(queue_lock); + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (unlikely(at_idx < 0)) { + /* we need to extend BWC matcher action templates array */ + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + /* check again - perhaps other thread already did extend_at */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (likely(at_idx < 0)) { + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + hws_bwc_unlock_all_queues(ctx); + mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret); + return -EINVAL; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + /* Action template attach failed, possibly due to + * requiring more action STEs. + * Need to attempt creating new matcher with all + * the action templates, including the new one. + */ + ret = hws_bwc_matcher_rehash_at(bwc_matcher); + if (unlikely(ret)) { + mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); + bwc_matcher->at[at_idx] = NULL; + bwc_matcher->num_of_at--; + + hws_bwc_unlock_all_queues(ctx); + + mlx5hws_err(ctx, + "BWC rule update: rehash AT failed (%d)\n", + ret); + return ret; + } + } + } + + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + } + + ret = hws_bwc_rule_update_sync(bwc_rule, + at_idx, + rule_actions, + &rule_attr); + mutex_unlock(queue_lock); + + if (unlikely(ret)) + mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret); + + return ret; +} + +int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + + if (unlikely(!mlx5hws_context_bwc_supported(ctx))) { + mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n"); + return -EINVAL; + } + + return hws_bwc_rule_action_update(bwc_rule, rule_actions); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h new file mode 100644 index 00000000000000..4fe8c32d8fbe86 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BWC_H_ +#define MLX5HWS_BWC_H_ + +#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1 +#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1 +#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70 +#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32 +#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255 + +#define MLX5HWS_BWC_MAX_ACTS 16 + +struct mlx5hws_bwc_matcher { + struct mlx5hws_matcher *matcher; + struct mlx5hws_match_template *mt; + struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM]; + u8 num_of_at; + u16 priority; + u8 size_log; + u32 num_of_rules; /* atomically accessed */ + struct list_head *rules; +}; + +struct mlx5hws_bwc_rule { + struct mlx5hws_bwc_matcher *bwc_matcher; + struct mlx5hws_rule *rule; + u16 bwc_queue_idx; + struct list_head list_node; +}; + +int +mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask, + enum mlx5hws_action_type action_types[]); + +int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher); + +struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher); + +void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule); + +int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, + u32 *match_param, + struct mlx5hws_rule_action rule_actions[], + u32 flow_source, + u16 bwc_queue_idx); + +int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule); + +void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, + u16 bwc_queue_idx, + u32 flow_source, + struct mlx5hws_rule_attr *rule_attr); + +static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) +{ + /* Besides the control queue, half of the queues are + * reguler HWS queues, and the other half are BWC queues. + */ + return (ctx->queues - 1) / 2; +} + +static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx) +{ + return idx + mlx5hws_bwc_queues(ctx); +} + +#endif /* MLX5HWS_BWC_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c new file mode 100644 index 00000000000000..601fad5fc54a39 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_definer match_layout = {0}; + struct mlx5hws_match_template *mt; + bool is_complex = false; + int ret; + + if (!match_criteria_enable) + return false; /* empty matcher */ + + mt = mlx5hws_match_template_create(ctx, + mask->match_buf, + mask->match_sz, + match_criteria_enable); + if (!mt) { + mlx5hws_err(ctx, "BWC: failed creating match template\n"); + return false; + } + + ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout); + if (ret) { + /* The only case that we're interested in is E2BIG, + * which means that the match parameters need to be + * split into complex martcher. + * For all other cases (good or bad) - just return true + * and let the usual match creation path handle it, + * both for good and bad flows. + */ + if (ret == -E2BIG) { + is_complex = true; + mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n"); + } else { + mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n"); + } + } + + mlx5hws_match_template_destroy(mt); + + return is_complex; +} + +int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n"); + return -EOPNOTSUPP; +} + +void +mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + /* nothing to do here */ +} + +int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx) +{ + mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx, + "Complex rule is not supported yet\n"); + return -EOPNOTSUPP; +} + +int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule) +{ + return 0; +} + +int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_err(bwc_matcher->matcher->tbl->ctx, + "Moving complex rule is not supported yet\n"); + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h new file mode 100644 index 00000000000000..068ee811860982 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_BWC_COMPLEX_H_ +#define MLX5HWS_BWC_COMPLEX_H_ + +bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u32 priority, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask); + +void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher); + +int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher); + +int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params, + u32 flow_source, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx); + +int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule); + +#endif /* MLX5HWS_BWC_COMPLEX_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c new file mode 100644 index 00000000000000..2c7b14172049b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c @@ -0,0 +1,1300 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static enum mlx5_ifc_flow_destination_type +hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type) +{ + switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; + case MLX5_FLOW_DESTINATION_TYPE_TIR: + return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: + return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; + case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE: + return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE; + case MLX5_FLOW_DESTINATION_TYPE_NONE: + case MLX5_FLOW_DESTINATION_TYPE_PORT: + case MLX5_FLOW_DESTINATION_TYPE_COUNTER: + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: + case MLX5_FLOW_DESTINATION_TYPE_RANGE: + default: + pr_warn("HWS: unknown flow dest type %d\n", type); + return 0; + } +}; + +static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev, + u32 object_type, + u32 object_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + u32 *table_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; + void *ft_ctx; + int ret; + + MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); + MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); + + ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); + MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); + MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); + MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en); + MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en); + + ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out); + if (ret) + return ret; + + *table_id = MLX5_GET(create_flow_table_out, out, table_id); + + return 0; +} + +int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_modify_attr *ft_attr, + u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; + void *ft_ctx; + + MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); + MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); + MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); + MLX5_SET(modify_flow_table_in, in, table_id, table_id); + + ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); + + MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); + MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); + MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0); + MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1); + + return mlx5_cmd_exec_in(mdev, modify_flow_table, in); +} + +int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev, + u32 table_id, + struct mlx5hws_cmd_ft_query_attr *ft_attr, + u64 *icm_addr_0, u64 *icm_addr_1) +{ + u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0}; + void *ft_ctx; + int ret; + + MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE); + MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type); + MLX5_SET(query_flow_table_in, in, table_id, table_id); + + ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out); + if (ret) + return ret; + + ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context); + *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0); + *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1); + + return ret; +} + +int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, + u8 fw_ft_type, u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; + + MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); + MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type); + MLX5_SET(destroy_flow_table_in, in, table_id, table_id); + + return mlx5_cmd_exec_in(mdev, destroy_flow_table, in); +} + +void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, + u32 table_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id); +} + +static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_fg_attr *fg_attr, + u32 *group_id) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + u32 *in; + int ret; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); + MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); + + ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out); + if (ret) + goto out; + + *group_id = MLX5_GET(create_flow_group_out, out, group_id); + +out: + kvfree(in); + return ret; +} + +static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev, + u32 ft_id, u32 fg_id, u8 ft_type) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; + + MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET(destroy_flow_group_in, in, table_type, ft_type); + MLX5_SET(destroy_flow_group_in, in, table_id, ft_id); + MLX5_SET(destroy_flow_group_in, in, group_id, fg_id); + + return mlx5_cmd_exec_in(mdev, destroy_flow_group, in); +} + +int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + struct mlx5hws_cmd_set_fte_attr *fte_attr) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; + void *in_flow_context; + u32 dest_entry_sz; + u32 total_dest_sz; + u32 action_flags; + u8 *in_dests; + u32 inlen; + u32 *in; + int ret; + u32 i; + + dest_entry_sz = fte_attr->extended_dest ? + MLX5_ST_SZ_BYTES(extended_dest_format) : + MLX5_ST_SZ_BYTES(dest_format); + total_dest_sz = dest_entry_sz * fte_attr->dests_num; + inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + MLX5_SET(set_fte_in, in, table_type, table_type); + MLX5_SET(set_fte_in, in, table_id, table_id); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source); + MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest); + MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level); + + action_flags = fte_attr->action_flags; + MLX5_SET(flow_context, in_flow_context, action, action_flags); + + if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + MLX5_SET(flow_context, in_flow_context, + packet_reformat_id, fte_attr->packet_reformat_id); + } + + if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) { + MLX5_SET(flow_context, in_flow_context, + encrypt_decrypt_type, fte_attr->encrypt_decrypt_type); + MLX5_SET(flow_context, in_flow_context, + encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id); + } + + if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination); + + for (i = 0; i < fte_attr->dests_num; i++) { + struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i]; + enum mlx5_ifc_flow_destination_type ifc_dest_type = + hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type); + + switch (dest->destination_type) { + case MLX5_FLOW_DESTINATION_TYPE_VPORT: + if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) { + MLX5_SET(dest_format, in_dests, + destination_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(dest_format, in_dests, + destination_eswitch_owner_vhca_id, + dest->esw_owner_vhca_id); + } + fallthrough; + case MLX5_FLOW_DESTINATION_TYPE_TIR: + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: + MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type); + MLX5_SET(dest_format, in_dests, destination_id, + dest->destination_id); + if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) { + MLX5_SET(dest_format, in_dests, packet_reformat, 1); + MLX5_SET(extended_dest_format, in_dests, packet_reformat_id, + dest->ext_reformat_id); + } + break; + default: + ret = -EOPNOTSUPP; + goto out; + } + + in_dests = in_dests + dest_entry_sz; + } + MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num); + } + + ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n"); + +out: + kfree(in); + return ret; +} + +int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id) +{ + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; + + MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + MLX5_SET(delete_fte_in, in, table_type, table_type); + MLX5_SET(delete_fte_in, in, table_id, table_id); + + return mlx5_cmd_exec_in(mdev, delete_fte, in); +} + +struct mlx5hws_cmd_forward_tbl * +mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + struct mlx5hws_cmd_set_fte_attr *fte_attr) +{ + struct mlx5hws_cmd_fg_attr fg_attr = {0}; + struct mlx5hws_cmd_forward_tbl *tbl; + int ret; + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id); + if (ret) { + mlx5_core_err(mdev, "Failed to create FT\n"); + goto free_tbl; + } + + fg_attr.table_id = tbl->ft_id; + fg_attr.table_type = ft_attr->type; + + ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id); + if (ret) { + mlx5_core_err(mdev, "Failed to create FG\n"); + goto free_ft; + } + + ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type, + tbl->ft_id, tbl->fg_id, fte_attr); + if (ret) { + mlx5_core_err(mdev, "Failed to create FTE\n"); + goto free_fg; + } + + tbl->type = ft_attr->type; + return tbl; + +free_fg: + hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type); +free_ft: + mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id); +free_tbl: + kfree(tbl); + return NULL; +} + +void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_forward_tbl *tbl) +{ + mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id); + hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type); + mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id); + kfree(tbl); +} + +void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, + u32 fw_ft_type, + enum mlx5hws_table_type type, + struct mlx5hws_cmd_ft_modify_attr *ft_attr) +{ + u32 default_miss_tbl; + + if (type != MLX5HWS_TABLE_TYPE_FDB) + return; + + ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr->type = fw_ft_type; + ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; + + default_miss_tbl = ctx->common_res[type].default_miss->ft_id; + if (!default_miss_tbl) { + pr_warn("HWS: no flow table ID for default miss\n"); + return; + } + + ft_attr->table_miss_id = default_miss_tbl; +} + +int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + u32 *rtc_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_RTC); + + attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); + MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ? + MLX5_IFC_RTC_STE_FORMAT_11DW : + MLX5_IFC_RTC_STE_FORMAT_8DW); + + if (rtc_attr->is_scnd_range) { + MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE); + MLX5_SET(rtc, attr, num_match_ste, 2); + } + + MLX5_SET(rtc, attr, pd, rtc_attr->pd); + MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe); + MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); + MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); + MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); + MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); + MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); + MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); + MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); + MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0); + MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); + MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); + MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); + MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); + MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); + MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create RTC\n"); + goto out; + } + + *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id); +} + +int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_create_attr *stc_attr, + u32 *stc_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_stc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STC); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, stc_attr->log_obj_range); + + attr = MLX5_ADDR_OF(create_stc_in, in, stc); + MLX5_SET(stc, attr, table_type, stc_attr->table_type); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create STC\n"); + goto out; + } + + *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id); +} + +static int +hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_modify_attr *stc_attr, + void *stc_param) +{ + switch (stc_attr->action_type) { + case MLX5_IFC_STC_ACTION_TYPE_COUNTER: + MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: + MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: + MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: + MLX5_SET(stc_ste_param_header_modify_list, stc_param, + header_modify_pattern_id, stc_attr->modify_header.pattern_id); + MLX5_SET(stc_ste_param_header_modify_list, stc_param, + header_modify_argument_id, stc_attr->modify_header.arg_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: + MLX5_SET(stc_ste_param_remove, stc_param, action_type, + MLX5_MODIFICATION_TYPE_REMOVE); + MLX5_SET(stc_ste_param_remove, stc_param, decap, + stc_attr->remove_header.decap); + MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor, + stc_attr->remove_header.start_anchor); + MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor, + stc_attr->remove_header.end_anchor); + break; + case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: + MLX5_SET(stc_ste_param_insert, stc_param, action_type, + MLX5_MODIFICATION_TYPE_INSERT); + MLX5_SET(stc_ste_param_insert, stc_param, encap, + stc_attr->insert_header.encap); + MLX5_SET(stc_ste_param_insert, stc_param, inline_data, + stc_attr->insert_header.is_inline); + MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor, + stc_attr->insert_header.insert_anchor); + /* HW gets the next 2 sizes in words */ + MLX5_SET(stc_ste_param_insert, stc_param, insert_size, + stc_attr->insert_header.header_size / W_SIZE); + MLX5_SET(stc_ste_param_insert, stc_param, insert_offset, + stc_attr->insert_header.insert_offset / W_SIZE); + MLX5_SET(stc_ste_param_insert, stc_param, insert_argument, + stc_attr->insert_header.arg_id); + break; + case MLX5_IFC_STC_ACTION_TYPE_COPY: + case MLX5_IFC_STC_ACTION_TYPE_SET: + case MLX5_IFC_STC_ACTION_TYPE_ADD: + case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: + *(__be64 *)stc_param = stc_attr->modify_action.data; + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: + MLX5_SET(stc_ste_param_vport, stc_param, vport_number, + stc_attr->vport.vport_num); + MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id, + stc_attr->vport.esw_owner_vhca_id); + MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid, + stc_attr->vport.eswitch_owner_vhca_id_valid); + break; + case MLX5_IFC_STC_ACTION_TYPE_DROP: + case MLX5_IFC_STC_ACTION_TYPE_NOP: + case MLX5_IFC_STC_ACTION_TYPE_TAG: + case MLX5_IFC_STC_ACTION_TYPE_ALLOW: + break; + case MLX5_IFC_STC_ACTION_TYPE_ASO: + MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id, + stc_attr->aso.devx_obj_id); + MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id, + stc_attr->aso.return_reg_id); + MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type, + stc_attr->aso.aso_type); + break; + case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: + MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id, + stc_attr->ste_table.ste_obj_id); + MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id, + stc_attr->ste_table.match_definer_id); + MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size, + stc_attr->ste_table.log_hash_size); + break; + case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: + MLX5_SET(stc_ste_param_remove_words, stc_param, action_type, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS); + MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor, + stc_attr->remove_words.start_anchor); + MLX5_SET(stc_ste_param_remove_words, stc_param, + remove_size, stc_attr->remove_words.num_of_words); + break; + case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION: + MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id, + stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION: + MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id, + stc_attr->id); + break; + case MLX5_IFC_STC_ACTION_TYPE_TRAILER: + MLX5_SET(stc_ste_param_trailer, stc_param, command, + stc_attr->reformat_trailer.op); + MLX5_SET(stc_ste_param_trailer, stc_param, type, + stc_attr->reformat_trailer.type); + MLX5_SET(stc_ste_param_trailer, stc_param, length, + stc_attr->reformat_trailer.size); + break; + default: + mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type); + return -EINVAL; + } + return 0; +} + +int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev, + u32 stc_id, + struct mlx5hws_cmd_stc_modify_attr *stc_attr) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; + void *stc_param; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_stc_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id); + MLX5_SET(general_obj_in_cmd_hdr, in, + op_param.query.obj_offset, stc_attr->stc_offset); + + attr = MLX5_ADDR_OF(create_stc_in, in, stc); + MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); + MLX5_SET(stc, attr, action_type, stc_attr->action_type); + MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode); + MLX5_SET64(stc, attr, modify_field_select, + MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); + + /* Set destination TIRN, TAG, FT ID, STE ID */ + stc_param = MLX5_ADDR_OF(stc, attr, stc_param); + ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param); + if (ret) + return ret; + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n", + stc_attr->action_type); + + return ret; +} + +int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, + u16 log_obj_range, + u32 pd, + u32 *arg_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_arg_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, log_obj_range); + + attr = MLX5_ADDR_OF(create_arg_in, in, arg); + MLX5_SET(arg, attr, access_pd, pd); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create ARG\n"); + goto out; + } + + *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev, + u32 arg_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id); +} + +int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev, + u32 pattern_length, + u8 *actions, + u32 *ptrn_id) +{ + u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + int num_of_actions; + u64 *pattern_data; + void *pattern; + void *attr; + int ret; + int i; + + if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { + mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n", + pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY); + return -EINVAL; + } + + attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN); + + pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); + /* Pattern_length is in ddwords */ + MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); + + pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); + memcpy(pattern_data, actions, pattern_length); + + num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE; + for (i = 0; i < num_of_actions; i++) { + int type; + + type = MLX5_GET(set_action_in, &pattern_data[i], action_type); + if (type != MLX5_MODIFICATION_TYPE_COPY && + type != MLX5_MODIFICATION_TYPE_ADD_FIELD) + /* Action typ-copy use all bytes for control */ + MLX5_SET(set_action_in, &pattern_data[i], data, 0); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create header_modify_pattern\n"); + goto out; + } + + *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev, + u32 ptrn_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id); +} + +int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ste_create_attr *ste_attr, + u32 *ste_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; + void *attr; + int ret; + + attr = MLX5_ADDR_OF(create_ste_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, MLX5_OBJ_TYPE_STE); + MLX5_SET(general_obj_in_cmd_hdr, + attr, op_param.create.log_obj_range, ste_attr->log_obj_range); + + attr = MLX5_ADDR_OF(create_ste_in, in, ste); + MLX5_SET(ste, attr, table_type, ste_attr->table_type); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create STE\n"); + goto out; + } + + *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id); +} + +int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_definer_create_attr *def_attr, + u32 *definer_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; + void *ptr; + int ret; + + MLX5_SET(general_obj_in_cmd_hdr, + in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER); + + ptr = MLX5_ADDR_OF(create_definer_in, in, definer); + MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); + + MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); + MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); + MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); + MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); + MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); + MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); + MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); + MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); + MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); + + MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); + MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); + MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); + MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); + MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); + MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); + MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); + MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); + + ptr = MLX5_ADDR_OF(definer, ptr, match_mask); + memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create Definer\n"); + goto out; + } + + *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev, + u32 definer_id) +{ + hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id); +} + +int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_packet_reformat_create_attr *attr, + u32 *reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; + size_t insz, cmd_data_sz, cmd_total_sz; + void *prctx; + void *pdata; + void *in; + int ret; + + cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); + cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in); + cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data); + insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE); + in = kzalloc(insz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(alloc_packet_reformat_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); + + prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, + packet_reformat_context); + pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); + + MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type); + MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0); + MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz); + memcpy(pdata, attr->data, attr->data_sz); + + ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create packet reformat\n"); + goto out; + } + + *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); +out: + kfree(in); + return ret; +} + +int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev, + u32 reformat_id) +{ + u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0}; + int ret; + + MLX5_SET(dealloc_packet_reformat_in, in, opcode, + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); + MLX5_SET(dealloc_packet_reformat_in, in, + packet_reformat_id, reformat_id); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to destroy packet_reformat\n"); + + return ret; +} + +int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn) +{ + u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; + void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + int ret; + + MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); + MLX5_SET(modify_sq_in, in, sqn, sqn); + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to modify SQ\n"); + + return ret; +} + +int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_allow_other_vhca_access_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; + void *key; + int ret; + + MLX5_SET(allow_other_vhca_access_in, + in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); + MLX5_SET(allow_other_vhca_access_in, + in, object_type_to_be_accessed, attr->obj_type); + MLX5_SET(allow_other_vhca_access_in, + in, object_id_to_be_accessed, attr->obj_id); + + key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); + memcpy(key, attr->access_key, sizeof(attr->access_key)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n"); + + return ret; +} + +int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; + void *attr; + void *key; + int ret; + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, alias_attr->obj_type); + MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1); + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); + MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); + MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); + + key = MLX5_ADDR_OF(alias_context, attr, access_key); + memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n"); + goto out; + } + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + return ret; +} + +int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, + u16 obj_type, + u32 obj_id) +{ + return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id); +} + +int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_generate_wqe_attr *attr, + struct mlx5_cqe64 *ret_cqe) +{ + u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0}; + u8 status; + void *ptr; + int ret; + + MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE); + MLX5_SET(generate_wqe_in, in, pdn, attr->pdn); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl); + memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl)); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl); + memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl)); + + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0); + memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0)); + + if (attr->gta_data_1) { + ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1); + memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1)); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (ret) { + mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n"); + return ret; + } + + status = MLX5_GET(generate_wqe_out, out, status); + if (status) { + mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status); + return -EINVAL; + } + + ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data); + memcpy(ret_cqe, ptr, sizeof(*ret_cqe)); + + return ret; +} + +int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_query_caps *caps) +{ + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; + u32 out_size; + u32 *out; + int ret; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device caps\n"); + goto out; + } + + caps->wqe_based_update = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.wqe_based_flow_table_update_cap); + + caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.eswitch_manager); + + caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_protocols); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED) + caps->flex_parser_id_geneve_tlv_option_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED) + caps->flex_parser_id_mpls_over_gre = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre); + + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) + caps->flex_parser_id_mpls_over_udp = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label); + + caps->log_header_modify_argument_granularity = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_granularity); + + caps->log_header_modify_argument_granularity -= + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_granularity_offset); + + caps->log_header_modify_argument_max_alloc = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.log_header_modify_argument_max_alloc); + + caps->definer_format_sup = + MLX5_GET64(query_hca_cap_out, out, + capability.cmd_hca_cap.match_definer_format_supported); + + caps->vhca_id = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.vhca_id); + + caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.sq_ts_format); + + caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap.ipsec_offload); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device caps 2\n"); + goto out; + } + + caps->full_dw_jumbo_support = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_8_6_ext); + + caps->format_select_gtpu_dw_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0); + + caps->format_select_gtpu_dw_1 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1); + + caps->format_select_gtpu_dw_2 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2); + + caps->format_select_gtpu_ext_dw_0 = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0); + + caps->supp_type_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.generate_wqe_type); + + caps->flow_table_hash_type = + MLX5_GET(query_hca_cap_out, out, + capability.cmd_hca_cap_2.flow_table_hash_type); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query flow table caps\n"); + goto out; + } + + caps->nic_ft.max_level = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level); + + caps->nic_ft.reparse = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse); + + caps->nic_ft.ignore_flow_level_rtc_valid = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); + + caps->flex_parser_ok_bits_supp = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist); + + if (caps->wqe_based_update) { + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query WQE based FT caps\n"); + goto out; + } + + caps->rtc_reparse_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_reparse_mode); + + caps->ste_format = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_format); + + caps->rtc_index_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_index_mode); + + caps->rtc_log_depth_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_log_depth_max); + + caps->ste_alloc_log_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_alloc_log_max); + + caps->ste_alloc_log_gran = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_alloc_log_granularity); + + caps->trivial_match_definer = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.trivial_match_definer); + + caps->stc_alloc_log_max = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.stc_alloc_log_max); + + caps->stc_alloc_log_gran = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.stc_alloc_log_granularity); + + caps->rtc_hash_split_table = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_hash_split_table); + + caps->rtc_linear_lookup_table = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_linear_lookup_table); + + caps->access_index_mode = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.access_index_mode); + + caps->linear_match_definer = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3); + + caps->rtc_max_hash_def_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe); + + caps->supp_ste_format_gen_wqe = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.ste_format_gen_wqe); + + caps->fdb_tir_stc = + MLX5_GET(query_hca_cap_out, out, + capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc); + } + + if (caps->eswitch_manager) { + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query flow table esw caps\n"); + goto out; + } + + caps->fdb_ft.max_level = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level); + + caps->fdb_ft.reparse = + MLX5_GET(query_hca_cap_out, out, + capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse); + + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR); + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query eswitch capabilities\n"); + goto out; + } + + if (MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.esw_manager_vport_number_valid)) + caps->eswitch_manager_vport_number = + MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.esw_manager_vport_number); + + caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out, + capability.esw_cap.merged_eswitch); + } + + ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size); + if (ret) { + mlx5_core_err(mdev, "Failed to query device attributes\n"); + goto out; + } + + snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + + caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev); + +out: + kfree(out); + return ret; +} + +int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, + u16 vport_number, u16 *gvmi) +{ + bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false; + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; + int out_size; + void *out; + int err; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, other_function, other_function); + MLX5_SET(query_hca_cap_in, in, function_id, + mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func)); + MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR); + + err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out); + if (err) { + kfree(out); + return err; + } + + *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id); + + kfree(out); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h new file mode 100644 index 00000000000000..2fbcf4ff571a06 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_CMD_H_ +#define MLX5HWS_CMD_H_ + +#define WIRE_PORT 0xFFFF + +#define ACCESS_KEY_LEN 32 + +enum mlx5hws_cmd_ext_dest_flags { + MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0, + MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1, +}; + +struct mlx5hws_cmd_set_fte_dest { + u8 destination_type; + u32 destination_id; + enum mlx5hws_cmd_ext_dest_flags ext_flags; + u32 ext_reformat_id; + u16 esw_owner_vhca_id; +}; + +struct mlx5hws_cmd_set_fte_attr { + u32 action_flags; + bool ignore_flow_level; + u8 flow_source; + u8 extended_dest; + u8 encrypt_decrypt_type; + u32 encrypt_decrypt_obj_id; + u32 packet_reformat_id; + u32 dests_num; + struct mlx5hws_cmd_set_fte_dest *dests; +}; + +struct mlx5hws_cmd_ft_create_attr { + u8 type; + u8 level; + bool rtc_valid; + bool decap_en; + bool reformat_en; +}; + +struct mlx5hws_cmd_ft_modify_attr { + u8 type; + u32 rtc_id_0; + u32 rtc_id_1; + u32 table_miss_id; + u8 table_miss_action; + u64 modify_fs; +}; + +struct mlx5hws_cmd_ft_query_attr { + u8 type; +}; + +struct mlx5hws_cmd_fg_attr { + u32 table_id; + u32 table_type; +}; + +struct mlx5hws_cmd_forward_tbl { + u8 type; + u32 ft_id; + u32 fg_id; + u32 refcount; +}; + +struct mlx5hws_cmd_rtc_create_attr { + u32 pd; + u32 stc_base; + u32 ste_base; + u32 ste_offset; + u32 miss_ft_id; + bool fw_gen_wqe; + u8 update_index_mode; + u8 access_index_mode; + u8 num_hash_definer; + u8 log_depth; + u8 log_size; + u8 table_type; + u8 match_definer_0; + u8 match_definer_1; + u8 reparse_mode; + bool is_frst_jumbo; + bool is_scnd_range; +}; + +struct mlx5hws_cmd_alias_obj_create_attr { + u32 obj_id; + u16 vhca_id; + u16 obj_type; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5hws_cmd_stc_create_attr { + u8 log_obj_range; + u8 table_type; +}; + +struct mlx5hws_cmd_stc_modify_attr { + u32 stc_offset; + u8 action_offset; + u8 reparse_mode; + enum mlx5_ifc_stc_action_type action_type; + union { + u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */ + struct { + u8 decap; + u16 start_anchor; + u16 end_anchor; + } remove_header; + struct { + u32 arg_id; + u32 pattern_id; + } modify_header; + struct { + __be64 data; + } modify_action; + struct { + u32 arg_id; + u32 header_size; + u8 is_inline; + u8 encap; + u16 insert_anchor; + u16 insert_offset; + } insert_header; + struct { + u8 aso_type; + u32 devx_obj_id; + u8 return_reg_id; + } aso; + struct { + u16 vport_num; + u16 esw_owner_vhca_id; + u8 eswitch_owner_vhca_id_valid; + } vport; + struct { + struct mlx5hws_pool_chunk ste; + struct mlx5hws_pool *ste_pool; + u32 ste_obj_id; /* Internal */ + u32 match_definer_id; + u8 log_hash_size; + bool ignore_tx; + } ste_table; + struct { + u16 start_anchor; + u16 num_of_words; + } remove_words; + struct { + u8 type; + u8 op; + u8 size; + } reformat_trailer; + + u32 dest_table_id; + u32 dest_tir_num; + }; +}; + +struct mlx5hws_cmd_ste_create_attr { + u8 log_obj_range; + u8 table_type; +}; + +struct mlx5hws_cmd_definer_create_attr { + u8 *dw_selector; + u8 *byte_selector; + u8 *match_mask; +}; + +struct mlx5hws_cmd_allow_other_vhca_access_attr { + u16 obj_type; + u32 obj_id; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5hws_cmd_packet_reformat_create_attr { + u8 type; + size_t data_sz; + void *data; + u8 reformat_param_0; +}; + +struct mlx5hws_cmd_query_ft_caps { + u8 max_level; + u8 reparse; + u8 ignore_flow_level_rtc_valid; +}; + +struct mlx5hws_cmd_generate_wqe_attr { + u8 *wqe_ctrl; + u8 *gta_ctrl; + u8 *gta_data_0; + u8 *gta_data_1; + u32 pdn; +}; + +struct mlx5hws_cmd_query_caps { + u32 flex_protocols; + u8 wqe_based_update; + u8 rtc_reparse_mode; + u16 ste_format; + u8 rtc_index_mode; + u8 ste_alloc_log_max; + u8 ste_alloc_log_gran; + u8 stc_alloc_log_max; + u8 stc_alloc_log_gran; + u8 rtc_log_depth_max; + u8 format_select_gtpu_dw_0; + u8 format_select_gtpu_dw_1; + u8 flow_table_hash_type; + u8 format_select_gtpu_dw_2; + u8 format_select_gtpu_ext_dw_0; + u8 access_index_mode; + u32 linear_match_definer; + bool full_dw_jumbo_support; + bool rtc_hash_split_table; + bool rtc_linear_lookup_table; + u32 supp_type_gen_wqe; + u8 rtc_max_hash_def_gen_wqe; + u16 supp_ste_format_gen_wqe; + struct mlx5hws_cmd_query_ft_caps nic_ft; + struct mlx5hws_cmd_query_ft_caps fdb_ft; + bool eswitch_manager; + bool merged_eswitch; + u32 eswitch_manager_vport_number; + u8 log_header_modify_argument_granularity; + u8 log_header_modify_argument_max_alloc; + u8 sq_ts_format; + u8 fdb_tir_stc; + u64 definer_format_sup; + u32 trivial_match_definer; + u32 vhca_id; + u32 shared_vhca_id; + char fw_ver[64]; + bool ipsec_offload; + bool is_ecpf; + u8 flex_parser_ok_bits_supp; + u8 flex_parser_id_geneve_tlv_option_0; + u8 flex_parser_id_mpls_over_gre; + u8 flex_parser_id_mpls_over_udp; +}; + +int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + u32 *table_id); + +int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_modify_attr *ft_attr, + u32 table_id); + +int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev, + u32 obj_id, + struct mlx5hws_cmd_ft_query_attr *ft_attr, + u64 *icm_addr_0, u64 *icm_addr_1); + +int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, + u8 fw_ft_type, u32 table_id); + +void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, + u32 table_id); + +int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + u32 *rtc_id); + +void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id); + +int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_stc_create_attr *stc_attr, + u32 *stc_id); + +int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev, + u32 stc_id, + struct mlx5hws_cmd_stc_modify_attr *stc_attr); + +void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id); + +int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_generate_wqe_attr *attr, + struct mlx5_cqe64 *ret_cqe); + +int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ste_create_attr *ste_attr, + u32 *ste_id); + +void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id); + +int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_definer_create_attr *def_attr, + u32 *definer_id); + +void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev, + u32 definer_id); + +int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev, + u16 log_obj_range, + u32 pd, + u32 *arg_id); + +void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev, + u32 arg_id); + +int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev, + u32 pattern_length, + u8 *actions, + u32 *ptrn_id); + +void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev, + u32 ptrn_id); + +int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_packet_reformat_create_attr *attr, + u32 *reformat_id); + +int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev, + u32 reformat_id); + +int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev, + u32 table_type, + u32 table_id, + u32 group_id, + struct mlx5hws_cmd_set_fte_attr *fte_attr); + +int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev, + u32 table_type, u32 table_id); + +struct mlx5hws_cmd_forward_tbl * +mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_ft_create_attr *ft_attr, + struct mlx5hws_cmd_set_fte_attr *fte_attr); + +void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_forward_tbl *tbl); + +int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id); + +int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev, + u16 obj_type, + u32 obj_id); + +int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn); + +int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_query_caps *caps); + +void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx, + u32 fw_ft_type, + enum mlx5hws_table_type type, + struct mlx5hws_cmd_ft_modify_attr *ft_attr); + +int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev, + struct mlx5hws_cmd_allow_other_vhca_access_attr *attr); + +int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function, + u16 vport_number, u16 *gvmi); + +#endif /* MLX5HWS_CMD_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c new file mode 100644 index 00000000000000..00e4fdf4a558b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */ + +#include "mlx5hws_internal.h" + +bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx) +{ + return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC); +} + +u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx) +{ + /* Prefer to use dynamic reparse, reparse only specific actions */ + if (mlx5hws_context_cap_dynamic_reparse(ctx)) + return MLX5_IFC_RTC_REPARSE_NEVER; + + /* Otherwise use less efficient static */ + return MLX5_IFC_RTC_REPARSE_ALWAYS; +} + +static int hws_context_pools_init(struct mlx5hws_context *ctx) +{ + struct mlx5hws_pool_attr pool_attr = {0}; + u8 max_log_sz; + int ret; + int i; + + ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache); + if (ret) + return ret; + + ret = mlx5hws_definer_init_cache(&ctx->definer_cache); + if (ret) + goto uninit_pat_cache; + + /* Create an STC pool per FT type */ + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL; + max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); + pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran); + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + pool_attr.table_type = i; + ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr); + if (!ctx->stc_pool[i]) { + mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i); + ret = -ENOMEM; + goto free_stc_pools; + } + } + + return 0; + +free_stc_pools: + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) + if (ctx->stc_pool[i]) + mlx5hws_pool_destroy(ctx->stc_pool[i]); + + mlx5hws_definer_uninit_cache(ctx->definer_cache); +uninit_pat_cache: + mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); + return ret; +} + +static void hws_context_pools_uninit(struct mlx5hws_context *ctx) +{ + int i; + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + if (ctx->stc_pool[i]) + mlx5hws_pool_destroy(ctx->stc_pool[i]); + } + + mlx5hws_definer_uninit_cache(ctx->definer_cache); + mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache); +} + +static int hws_context_init_pd(struct mlx5hws_context *ctx) +{ + int ret = 0; + + ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate PD\n"); + return ret; + } + + ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD; + + return 0; +} + +static int hws_context_uninit_pd(struct mlx5hws_context *ctx) +{ + if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD) + mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num); + + return 0; +} + +static void hws_context_check_hws_supp(struct mlx5hws_context *ctx) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + /* HWS not supported on device / FW */ + if (!caps->wqe_based_update) { + mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n"); + return; + } + + if (!caps->eswitch_manager) { + mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n"); + return; + } + + /* Current solution requires all rules to set reparse bit */ + if ((!caps->nic_ft.reparse || + (!caps->fdb_ft.reparse && caps->eswitch_manager)) || + !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) { + mlx5hws_err(ctx, "Required HWS reparse cap not supported\n"); + return; + } + + /* FW/HW must support 8DW STE */ + if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) { + mlx5hws_err(ctx, "Required HWS STE format not supported\n"); + return; + } + + /* Adding rules by hash and by offset are requirements */ + if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) || + !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) { + mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n"); + return; + } + + /* Support for SELECT definer ID is required */ + if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) { + mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n"); + return; + } + + ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT; +} + +static int hws_context_init_hws(struct mlx5hws_context *ctx, + struct mlx5hws_context_attr *attr) +{ + int ret; + + hws_context_check_hws_supp(ctx); + + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) + return 0; + + ret = hws_context_init_pd(ctx); + if (ret) + return ret; + + ret = hws_context_pools_init(ctx); + if (ret) + goto uninit_pd; + + if (attr->bwc) + ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; + + ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size); + if (ret) + goto pools_uninit; + + INIT_LIST_HEAD(&ctx->tbl_list); + + return 0; + +pools_uninit: + hws_context_pools_uninit(ctx); +uninit_pd: + hws_context_uninit_pd(ctx); + return ret; +} + +static void hws_context_uninit_hws(struct mlx5hws_context *ctx) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) + return; + + mlx5hws_send_queues_close(ctx); + hws_context_pools_uninit(ctx); + hws_context_uninit_pd(ctx); +} + +struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev, + struct mlx5hws_context_attr *attr) +{ + struct mlx5hws_context *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->mdev = mdev; + + mutex_init(&ctx->ctrl_lock); + xa_init(&ctx->peer_ctx_xa); + + ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL); + if (!ctx->caps) + goto free_ctx; + + ret = mlx5hws_cmd_query_caps(mdev, ctx->caps); + if (ret) + goto free_caps; + + ret = mlx5hws_vport_init_vports(ctx); + if (ret) + goto free_caps; + + ret = hws_context_init_hws(ctx, attr); + if (ret) + goto uninit_vports; + + mlx5hws_debug_init_dump(ctx); + + return ctx; + +uninit_vports: + mlx5hws_vport_uninit_vports(ctx); +free_caps: + kfree(ctx->caps); +free_ctx: + xa_destroy(&ctx->peer_ctx_xa); + mutex_destroy(&ctx->ctrl_lock); + kfree(ctx); + return NULL; +} + +int mlx5hws_context_close(struct mlx5hws_context *ctx) +{ + mlx5hws_debug_uninit_dump(ctx); + hws_context_uninit_hws(ctx); + mlx5hws_vport_uninit_vports(ctx); + kfree(ctx->caps); + xa_destroy(&ctx->peer_ctx_xa); + mutex_destroy(&ctx->ctrl_lock); + kfree(ctx); + return 0; +} + +void mlx5hws_context_set_peer(struct mlx5hws_context *ctx, + struct mlx5hws_context *peer_ctx, + u16 peer_vhca_id) +{ + mutex_lock(&ctx->ctrl_lock); + + if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL))) + pr_warn("HWS: failed storing peer vhca ID in peer xarray\n"); + + mutex_unlock(&ctx->ctrl_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h new file mode 100644 index 00000000000000..e5a7ce60433401 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_CONTEXT_H_ +#define MLX5HWS_CONTEXT_H_ + +enum mlx5hws_context_flags { + MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0, + MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1, + MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2, +}; + +enum mlx5hws_context_shared_stc_type { + MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0, + MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1, + MLX5HWS_CONTEXT_SHARED_STC_MAX = 2, +}; + +struct mlx5hws_context_common_res { + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX]; + struct mlx5hws_cmd_forward_tbl *default_miss; +}; + +struct mlx5hws_context_debug_info { + struct dentry *steering_debugfs; + struct dentry *fdb_debugfs; +}; + +struct mlx5hws_context_vports { + u16 esw_manager_gvmi; + u16 uplink_gvmi; + struct xarray vport_gvmi_xa; +}; + +struct mlx5hws_context { + struct mlx5_core_dev *mdev; + struct mlx5hws_cmd_query_caps *caps; + u32 pd_num; + struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX]; + struct mlx5hws_pattern_cache *pattern_cache; + struct mlx5hws_definer_cache *definer_cache; + struct mutex ctrl_lock; /* control lock to protect the whole context */ + enum mlx5hws_context_flags flags; + struct mlx5hws_send_engine *send_queue; + size_t queues; + struct mutex *bwc_send_queue_locks; /* protect BWC queues */ + struct list_head tbl_list; + struct mlx5hws_context_debug_info debug_info; + struct xarray peer_ctx_xa; + struct mlx5hws_context_vports vports; +}; + +static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx) +{ + return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; +} + +bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx); + +u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx); + +#endif /* MLX5HWS_CONTEXT_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c new file mode 100644 index 00000000000000..2b8c5a4e1c4c0f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include +#include +#include +#include +#include "mlx5hws_internal.h" + +static int +hws_debug_dump_matcher_template_definer(struct seq_file *f, + void *parent_obj, + struct mlx5hws_definer *definer, + enum mlx5hws_debug_res_type type) +{ + int i; + + if (!definer) + return 0; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,", + type, + HWS_PTR_TO_ID(definer), + HWS_PTR_TO_ID(parent_obj), + definer->obj_id, + definer->type); + + for (i = 0; i < DW_SELECTORS; i++) + seq_printf(f, "0x%x%s", definer->dw_selector[i], + (i == DW_SELECTORS - 1) ? "," : "-"); + + for (i = 0; i < BYTE_SELECTORS; i++) + seq_printf(f, "0x%x%s", definer->byte_selector[i], + (i == BYTE_SELECTORS - 1) ? "," : "-"); + + for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++) + seq_printf(f, "%02x", definer->mask.jumbo[i]); + + seq_puts(f, "\n"); + + return 0; +} + +static int +hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_debug_res_type type; + int i, ret; + + for (i = 0; i < matcher->num_of_mt; i++) { + struct mlx5hws_match_template *mt = &matcher->mt[i]; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE, + HWS_PTR_TO_ID(mt), + HWS_PTR_TO_ID(matcher), + mt->fc_sz, + 0, 0); + + type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER; + ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_action_type action_type; + int i, j; + + for (i = 0; i < matcher->num_of_at; i++) { + struct mlx5hws_action_template *at = &matcher->at[i]; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE, + HWS_PTR_TO_ID(at), + HWS_PTR_TO_ID(matcher), + at->only_term, + at->num_of_action_stes, + at->num_actions); + + for (j = 0; j < at->num_actions; j++) { + action_type = at->action_type_arr[j]; + seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type)); + } + + seq_puts(f, "\n"); + } + + return 0; +} + +static int +hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR, + HWS_PTR_TO_ID(matcher), + attr->priority, + attr->mode, + attr->table.sz_row_log, + attr->table.sz_col_log, + attr->optimize_using_rule_idx, + attr->optimize_flow_src, + attr->insert_mode, + attr->distribute_mode); + + return 0; +} + +static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher) +{ + enum mlx5hws_table_type tbl_type = matcher->tbl->type; + struct mlx5hws_cmd_ft_query_attr ft_attr = {0}; + struct mlx5hws_pool_chunk *ste; + struct mlx5hws_pool *ste_pool; + u64 icm_addr_0 = 0; + u64 icm_addr_1 = 0; + u32 ste_0_id = -1; + u32 ste_1_id = -1; + int ret; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx", + MLX5HWS_DEBUG_RES_TYPE_MATCHER, + HWS_PTR_TO_ID(matcher), + HWS_PTR_TO_ID(matcher->tbl), + matcher->num_of_mt, + matcher->end_ft_id, + matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0); + + ste = &matcher->match_ste.ste; + ste_pool = matcher->match_ste.pool; + if (ste_pool) { + ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + } + + seq_printf(f, ",%d,%d,%d,%d", + matcher->match_ste.rtc_0_id, + (int)ste_0_id, + matcher->match_ste.rtc_1_id, + (int)ste_1_id); + + ste = &matcher->action_ste[0].ste; + ste_pool = matcher->action_ste[0].pool; + if (ste_pool) { + ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + else + ste_1_id = -1; + } else { + ste_0_id = -1; + ste_1_id = -1; + } + + ft_attr.type = matcher->tbl->fw_ft_type; + ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev, + matcher->end_ft_id, + &ft_attr, + &icm_addr_0, + &icm_addr_1); + if (ret) + return ret; + + seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n", + matcher->action_ste[0].rtc_0_id, + (int)ste_0_id, + matcher->action_ste[0].rtc_1_id, + (int)ste_1_id, + 0, + mlx5hws_debug_icm_to_idx(icm_addr_0), + mlx5hws_debug_icm_to_idx(icm_addr_1)); + + ret = hws_debug_dump_matcher_attr(f, matcher); + if (ret) + return ret; + + ret = hws_debug_dump_matcher_match_template(f, matcher); + if (ret) + return ret; + + ret = hws_debug_dump_matcher_action_template(f, matcher); + if (ret) + return ret; + + return 0; +} + +static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_ft_query_attr ft_attr = {0}; + struct mlx5hws_matcher *matcher; + u64 local_icm_addr_0 = 0; + u64 local_icm_addr_1 = 0; + u64 icm_addr_0 = 0; + u64 icm_addr_1 = 0; + int ret; + + seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d", + MLX5HWS_DEBUG_RES_TYPE_TABLE, + HWS_PTR_TO_ID(tbl), + HWS_PTR_TO_ID(tbl->ctx), + tbl->ft_id, + MLX5HWS_TABLE_TYPE_BASE + tbl->type, + tbl->fw_ft_type, + tbl->level, + 0); + + ft_attr.type = tbl->fw_ft_type; + ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev, + tbl->ft_id, + &ft_attr, + &icm_addr_0, + &icm_addr_1); + if (ret) + return ret; + + seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n", + mlx5hws_debug_icm_to_idx(icm_addr_0), + mlx5hws_debug_icm_to_idx(icm_addr_1), + mlx5hws_debug_icm_to_idx(local_icm_addr_0), + mlx5hws_debug_icm_to_idx(local_icm_addr_1), + HWS_PTR_TO_ID(tbl->default_miss.miss_tbl)); + + list_for_each_entry(matcher, &tbl->matchers_list, list_node) { + ret = hws_debug_dump_matcher(f, matcher); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_send_engine *send_queue; + struct mlx5hws_send_ring *send_ring; + struct mlx5hws_send_ring_cq *cq; + struct mlx5hws_send_ring_sq *sq; + int i; + + for (i = 0; i < (int)ctx->queues; i++) { + send_queue = &ctx->send_queue[i]; + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE, + HWS_PTR_TO_ID(ctx), + i, + send_queue->used_entries, + send_queue->num_entries, + 1, /* one send ring per queue */ + send_queue->num_entries, + send_queue->err, + send_queue->completed.ci, + send_queue->completed.pi, + send_queue->completed.mask); + + send_ring = &send_queue->send_ring; + cq = &send_ring->send_cq; + sq = &send_ring->send_sq; + + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING, + HWS_PTR_TO_ID(ctx), + 0, /* one send ring per send queue */ + i, + cq->mcq.cqn, + 0, + 0, + 0, + 0, + 0, + 0, + cq->mcq.cqe_sz, + sq->sqn, + 0, + 0, + 0); + } + + return 0; +} + +static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS, + HWS_PTR_TO_ID(ctx), + caps->fw_ver, + caps->wqe_based_update, + caps->ste_format, + caps->ste_alloc_log_max, + caps->log_header_modify_argument_max_alloc); + + seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n", + caps->flex_protocols, + caps->rtc_reparse_mode, + caps->rtc_index_mode, + caps->ste_alloc_log_gran, + caps->stc_alloc_log_max, + caps->stc_alloc_log_gran, + caps->rtc_log_depth_max, + caps->format_select_gtpu_dw_0, + caps->format_select_gtpu_dw_1, + caps->format_select_gtpu_dw_2, + caps->format_select_gtpu_ext_dw_0, + caps->nic_ft.max_level, + caps->nic_ft.reparse, + caps->fdb_ft.max_level, + caps->fdb_ft.reparse, + caps->log_header_modify_argument_granularity, + caps->linear_match_definer, + "regc_3"); + + return 0; +} + +static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx) +{ + seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR, + HWS_PTR_TO_ID(ctx), + ctx->pd_num, + ctx->queues, + ctx->send_queue->num_entries, + "None", /* no shared gvmi */ + ctx->caps->vhca_id, + 0xffff); /* no shared gvmi */ + + return 0; +} + +static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5_core_dev *dev = ctx->mdev; + int ret; + + seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT, + HWS_PTR_TO_ID(ctx), + ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT, + pci_name(dev->pdev), + HWS_DEBUG_FORMAT_VERSION, + LINUX_VERSION_MAJOR, + LINUX_VERSION_PATCHLEVEL, + LINUX_VERSION_SUBLEVEL); + + ret = hws_debug_dump_context_attr(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_caps(f, ctx); + if (ret) + return ret; + + return 0; +} + +static int hws_debug_dump_context_stc_resource(struct seq_file *f, + struct mlx5hws_context *ctx, + u32 tbl_type, + struct mlx5hws_pool_resource *resource) +{ + seq_printf(f, "%d,0x%llx,%u,%u\n", + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC, + HWS_PTR_TO_ID(ctx), + tbl_type, + resource->base_id); + + return 0; +} + +static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_pool *stc_pool; + u32 table_type; + int ret; + int i; + + for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) { + stc_pool = ctx->stc_pool[i]; + table_type = MLX5HWS_TABLE_TYPE_BASE + i; + + if (!stc_pool) + continue; + + if (stc_pool->resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, + stc_pool->resource[0]); + if (ret) + return ret; + } + + if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) { + ret = hws_debug_dump_context_stc_resource(f, ctx, table_type, + stc_pool->mirror_resource[0]); + if (ret) + return ret; + } + } + + return 0; +} + +static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx) +{ + struct mlx5hws_table *tbl; + int ret; + + ret = hws_debug_dump_context_info(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_send_engine(f, ctx); + if (ret) + return ret; + + ret = hws_debug_dump_context_stc(f, ctx); + if (ret) + return ret; + + list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) { + ret = hws_debug_dump_table(f, tbl); + if (ret) + return ret; + } + + return 0; +} + +static int +hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx) +{ + int ret; + + if (!f || !ctx) + return -EINVAL; + + mutex_lock(&ctx->ctrl_lock); + ret = hws_debug_dump_context(f, ctx); + mutex_unlock(&ctx->ctrl_lock); + + return ret; +} + +static int hws_dump_show(struct seq_file *file, void *priv) +{ + return hws_debug_dump(file, file->private); +} +DEFINE_SHOW_ATTRIBUTE(hws_dump); + +void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx) +{ + struct mlx5_core_dev *dev = ctx->mdev; + char file_name[128]; + + ctx->debug_info.steering_debugfs = + debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev)); + ctx->debug_info.fdb_debugfs = + debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs); + + sprintf(file_name, "ctx_%p", ctx); + debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs, + ctx, &hws_dump_fops); +} + +void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx) +{ + debugfs_remove_recursive(ctx->debug_info.steering_debugfs); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h new file mode 100644 index 00000000000000..b93a536035d960 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_DEBUG_H_ +#define MLX5HWS_DEBUG_H_ + +#define HWS_DEBUG_FORMAT_VERSION "1.0" + +#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL) + +enum mlx5hws_debug_res_type { + MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004, + MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005, + + MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100, + + MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206, + MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207, +}; + +static inline u64 +mlx5hws_debug_icm_to_idx(u64 icm_addr) +{ + return (icm_addr >> 6) & 0xffffffff; +} + +void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx); +void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx); + +#endif /* MLX5HWS_DEBUG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c new file mode 100644 index 00000000000000..d566d2ddf42435 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c @@ -0,0 +1,2146 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN BIT(12) +#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13) +#define MLX5_FLOW_LAYER_GRE BIT(14) +#define MLX5_FLOW_LAYER_MPLS BIT(15) + +/* Pattern tunnel Layer bits (continued). */ +#define MLX5_FLOW_LAYER_IPIP BIT(23) +#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24) +#define MLX5_FLOW_LAYER_NVGRE BIT(25) +#define MLX5_FLOW_LAYER_GENEVE BIT(26) + +#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ + MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ + MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \ + MLX5_FLOW_ITEM_FLEX_TUNNEL) + +#define GTP_PDU_SC 0x85 +#define BAD_PORT 0xBAD +#define ETH_TYPE_IPV4_VXLAN 0x0800 +#define ETH_TYPE_IPV6_VXLAN 0x86DD +#define UDP_GTPU_PORT 2152 +#define UDP_PORT_MPLS 6635 +#define UDP_GENEVE_PORT 6081 +#define UDP_ROCEV2_PORT 4791 +#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS) + +#define STE_NO_VLAN 0x0 +#define STE_SVLAN 0x1 +#define STE_CVLAN 0x2 +#define STE_NO_L3 0x0 +#define STE_IPV4 0x1 +#define STE_IPV6 0x2 +#define STE_NO_L4 0x0 +#define STE_TCP 0x1 +#define STE_UDP 0x2 +#define STE_ICMP 0x3 +#define STE_ESP 0x3 + +#define IPV4 0x4 +#define IPV6 0x6 + +/* Setter function based on bit offset and mask, for 32bit DW */ +#define _HWS_SET32(p, v, byte_off, bit_off, mask) \ + do { \ + u32 _v = v; \ + *((__be32 *)(p) + ((byte_off) / 4)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \ + ((byte_off) / 4))) & \ + (~((mask) << (bit_off)))) | \ + (((_v) & (mask)) << \ + (bit_off))); \ + } while (0) + +/* Setter function based on bit offset and mask, for unaligned 32bit DW */ +#define HWS_SET32(p, v, byte_off, bit_off, mask) \ + do { \ + if (unlikely((bit_off) < 0)) { \ + u32 _bit_off = -1 * (bit_off); \ + u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \ + _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \ + _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \ + (bit_off) % BITS_IN_DW, second_dw_mask); \ + } else { \ + _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \ + } \ + } while (0) + +/* Getter for up to aligned 32bit DW */ +#define HWS_GET32(p, byte_off, bit_off, mask) \ + ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) + +#define HWS_CALC_FNAME(field, inner) \ + ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \ + MLX5HWS_DEFINER_FNAME_##field##_O) + +#define HWS_GET_MATCH_PARAM(match_param, hdr) \ + MLX5_GET(fte_match_param, match_param, hdr) + +#define HWS_IS_FLD_SET(match_param, hdr) \ + (!!(HWS_GET_MATCH_PARAM(match_param, hdr))) + +#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \ + BUILD_BUG_ON((sz_in_bits) % 32); \ + u32 sz = sz_in_bits; \ + u32 res = 0; \ + u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \ + while (!res && sz >= 32) { \ + res = *((match_param) + (dw_off++)); \ + sz -= 32; \ + } \ + res; \ + }) + +#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \ + (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \ + !!(HWS_GET_MATCH_PARAM(match_param, hdr))) + +#define HWS_GET64_MATCH_PARAM(match_param, hdr) \ + MLX5_GET64(fte_match_param, match_param, hdr) + +#define HWS_IS_FLD64_SET(match_param, hdr) \ + (!!(HWS_GET64_MATCH_PARAM(match_param, hdr))) + +#define HWS_CALC_HDR_SRC(fc, s_hdr) \ + do { \ + (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \ + (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \ + (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \ + } while (0) + +#define HWS_CALC_HDR_DST(fc, d_hdr) \ + do { \ + (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \ + (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \ + (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \ + } while (0) + +#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \ + do { \ + HWS_CALC_HDR_SRC(fc, s_hdr); \ + HWS_CALC_HDR_DST(fc, d_hdr); \ + (fc)->tag_set = &hws_definer_generic_set; \ + } while (0) + +#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \ + do { \ + if (HWS_IS_FLD_SET(match_param, s_hdr)) \ + HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \ + } while (0) + +struct mlx5hws_definer_sel_ctrl { + u8 allowed_full_dw; /* Full DW selectors cover all offsets */ + u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */ + u8 allowed_bytes; /* Bytes selectors, up to offset 255 */ + u8 used_full_dw; + u8 used_lim_dw; + u8 used_bytes; + u8 full_dw_selector[DW_SELECTORS]; + u8 lim_dw_selector[DW_SELECTORS_LIMITED]; + u8 byte_selector[BYTE_SELECTORS]; +}; + +struct mlx5hws_definer_conv_data { + struct mlx5hws_context *ctx; + struct mlx5hws_definer_fc *fc; + /* enum mlx5hws_definer_match_flag */ + u32 match_flags; +}; + +static void +hws_definer_ones_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_generic_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + /* Can be optimized */ + u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask); + + HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag)) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag)) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag, + bool inner) +{ + u32 second_cvlan_tag = inner ? + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) : + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag); + u32 second_svlan_tag = inner ? + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) : + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag); + + if (second_cvlan_tag) + HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (second_svlan_tag) + HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_second_vlan_type_set(fc, match_param, tag, true); +} + +static void +hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_second_vlan_type_set(fc, match_param, tag, false); +} + +static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code); + u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type); + u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) | + (code << __mlx5_dw_bit_off(header_icmp, code)); + + HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code); + u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type); + u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) | + (code << __mlx5_dw_bit_off(header_icmp, code)); + + HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask); + + if (val == IPV4) + HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask); + else if (val == IPV6) + HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask); + else + HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag, + struct mlx5hws_context *peer_ctx) +{ + u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port); + u16 vport_gvmi = 0; + int ret; + + ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi); + if (ret) { + HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask); + mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port); + return; + } + + if (vport_gvmi) + HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +__must_hold(&fc->ctx->ctrl_lock) +{ + int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id); + struct mlx5hws_context *peer_ctx; + + if (id == fc->ctx->caps->vhca_id) + peer_ctx = fc->ctx; + else + peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id); + + if (!peer_ctx) { + HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask); + mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id); + return; + } + + hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx); +} + +static void +hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc, + void *match_param, + u8 *tag) +{ + hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx); +} + +static struct mlx5hws_definer_fc * +hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd, + u8 parser_id) +{ + struct mlx5hws_definer_fc *fc; + + switch (parser_id) { + case 0: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 1: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 2: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 3: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 4: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 5: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 6: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + case 7: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK]; + HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok); + fc->tag_set = &hws_definer_generic_set; + break; + default: + mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id); + return NULL; + } + + return fc; +} + +static struct mlx5hws_definer_fc * +hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd, + u8 parser_id) +{ + struct mlx5hws_definer_fc *fc; + + switch (parser_id) { + case 0: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0); + fc->tag_set = &hws_definer_generic_set; + break; + case 1: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1); + fc->tag_set = &hws_definer_generic_set; + break; + case 2: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2); + fc->tag_set = &hws_definer_generic_set; + break; + case 3: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3); + fc->tag_set = &hws_definer_generic_set; + break; + case 4: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4); + fc->tag_set = &hws_definer_generic_set; + break; + case 5: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5); + fc->tag_set = &hws_definer_generic_set; + break; + case 6: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6); + fc->tag_set = &hws_definer_generic_set; + break; + case 7: + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]; + HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7); + fc->tag_set = &hws_definer_generic_set; + break; + default: + mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id); + return NULL; + } + + return fc; +} + +static struct mlx5hws_definer_fc * +hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd, + bool *parser_is_used, + u32 id, + u32 value) +{ + if (id || value) { + if (id >= HWS_NUM_OF_FLEX_PARSERS) { + mlx5hws_err(cd->ctx, "Unsupported parser id\n"); + return NULL; + } + + if (parser_is_used[id]) { + mlx5hws_err(cd->ctx, "Parser id have been used\n"); + return NULL; + } + } + + parser_is_used[id] = true; + + return hws_definer_flex_parser_handler(cd, id); +} + +static int +hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd) +{ + u32 flags; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1); + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2); + + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP); + if (flags & (flags - 1)) + goto err_conflict; + + flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 | + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 | + MLX5HWS_DEFINER_MATCH_FLAG_TCP_O | + MLX5HWS_DEFINER_MATCH_FLAG_TCP_I); + if (flags & (flags - 1)) + goto err_conflict; + + return 0; + +err_conflict: + mlx5hws_err(cd->ctx, "Invalid definer fields combination\n"); + return -EINVAL; +} + +static int +hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + u32 *s_ipv6, *d_ipv6; + + if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) || + HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) || + HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) { + mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n"); + return -EINVAL; + } + + /* L2 Check ethertype */ + HWS_SET_HDR(fc, match_param, ETH_TYPE_O, + outer_headers.ethertype, + eth_l2_outer.l3_ethertype); + /* L2 Check SMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O, + outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16); + /* L2 Check SMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O, + outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0); + /* L2 Check DMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O, + outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16); + /* L2 Check DMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O, + outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0); + + /* L2 VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O, + outer_headers.first_prio, eth_l2_outer.first_priority); + HWS_SET_HDR(fc, match_param, VLAN_CFI_O, + outer_headers.first_cfi, eth_l2_outer.first_cfi); + HWS_SET_HDR(fc, match_param, VLAN_ID_O, + outer_headers.first_vid, eth_l2_outer.first_vlan_id); + + /* L2 CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier); + curr_fc->tag_set = &hws_definer_outer_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + /* L3 Check IP header */ + HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O, + outer_headers.ip_protocol, + eth_l3_outer.protocol_next_header); + HWS_SET_HDR(fc, match_param, IP_TTL_O, + outer_headers.ttl_hoplimit, + eth_l3_outer.time_to_live_hop_limit); + + /* L3 Check IPv4/IPv6 addresses */ + s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + outer_headers.src_ipv4_src_ipv6.ipv6_layout); + d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout); + + /* Assume IPv6 is used if ipv6 bits are set */ + is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; + is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + + if (is_s_ipv6) { + /* Handle IPv6 source address */ + HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_src_outer.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_src_outer.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_src_outer.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_src_outer.ipv6_address_31_0); + } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.source_address); + } + if (is_d_ipv6) { + /* Handle IPv6 destination address */ + HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_dst_outer.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_dst_outer.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_dst_outer.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_dst_outer.ipv6_address_31_0); + } else { + /* Handle IPv4 destination address */ + HWS_SET_HDR(fc, match_param, IPV4_DST_O, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.destination_address); + } + + /* L4 Handle TCP/UDP */ + HWS_SET_HDR(fc, match_param, L4_SPORT_O, + outer_headers.tcp_sport, eth_l4_outer.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_O, + outer_headers.tcp_dport, eth_l4_outer.destination_port); + HWS_SET_HDR(fc, match_param, L4_SPORT_O, + outer_headers.udp_sport, eth_l4_outer.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_O, + outer_headers.udp_dport, eth_l4_outer.destination_port); + HWS_SET_HDR(fc, match_param, TCP_FLAGS_O, + outer_headers.tcp_flags, eth_l4_outer.tcp_flags); + + /* L3 Handle DSCP, ECN and IHL */ + HWS_SET_HDR(fc, match_param, IP_DSCP_O, + outer_headers.ip_dscp, eth_l3_outer.dscp); + HWS_SET_HDR(fc, match_param, IP_ECN_O, + outer_headers.ip_ecn, eth_l3_outer.ecn); + HWS_SET_HDR(fc, match_param, IPV4_IHL_O, + outer_headers.ipv4_ihl, eth_l3_outer.ihl); + + /* Set IP fragmented bit */ + if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) { + smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) || + HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16); + dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) || + HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16); + if (smac_set == dmac_set) { + HWS_SET_HDR(fc, match_param, IP_FRAG_O, + outer_headers.frag, eth_l4_outer.ip_fragmented); + } else { + HWS_SET_HDR(fc, match_param, IP_FRAG_O, + outer_headers.frag, eth_l2_src_outer.ip_fragmented); + } + } + + /* L3_type set */ + if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type); + curr_fc->tag_set = &hws_definer_l3_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version); + } + + return 0; +} + +static int +hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + u32 *s_ipv6, *d_ipv6; + + if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) || + HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) || + HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) { + mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n"); + return -EINVAL; + } + + /* L2 Check ethertype */ + HWS_SET_HDR(fc, match_param, ETH_TYPE_I, + inner_headers.ethertype, + eth_l2_inner.l3_ethertype); + /* L2 Check SMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I, + inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16); + /* L2 Check SMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I, + inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0); + /* L2 Check DMAC 47_16 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I, + inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16); + /* L2 Check DMAC 15_0 */ + HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I, + inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0); + + /* L2 VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I, + inner_headers.first_prio, eth_l2_inner.first_priority); + HWS_SET_HDR(fc, match_param, VLAN_CFI_I, + inner_headers.first_cfi, eth_l2_inner.first_cfi); + HWS_SET_HDR(fc, match_param, VLAN_ID_I, + inner_headers.first_vid, eth_l2_inner.first_vlan_id); + + /* L2 CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier); + curr_fc->tag_set = &hws_definer_inner_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + /* L3 Check IP header */ + HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I, + inner_headers.ip_protocol, + eth_l3_inner.protocol_next_header); + HWS_SET_HDR(fc, match_param, IP_VERSION_I, + inner_headers.ip_version, + eth_l3_inner.ip_version); + HWS_SET_HDR(fc, match_param, IP_TTL_I, + inner_headers.ttl_hoplimit, + eth_l3_inner.time_to_live_hop_limit); + + /* L3 Check IPv4/IPv6 addresses */ + s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + inner_headers.src_ipv4_src_ipv6.ipv6_layout); + d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param, + inner_headers.dst_ipv4_dst_ipv6.ipv6_layout); + + /* Assume IPv6 is used if ipv6 bits are set */ + is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; + is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + + if (is_s_ipv6) { + /* Handle IPv6 source address */ + HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_src_inner.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_src_inner.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_src_inner.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_src_inner.ipv6_address_31_0); + } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.source_address); + } + if (is_d_ipv6) { + /* Handle IPv6 destination address */ + HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, + ipv6_dst_inner.ipv6_address_127_96); + HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64, + ipv6_dst_inner.ipv6_address_95_64); + HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32, + ipv6_dst_inner.ipv6_address_63_32); + HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv6_dst_inner.ipv6_address_31_0); + } else { + /* Handle IPv4 destination address */ + HWS_SET_HDR(fc, match_param, IPV4_DST_I, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.destination_address); + } + + /* L4 Handle TCP/UDP */ + HWS_SET_HDR(fc, match_param, L4_SPORT_I, + inner_headers.tcp_sport, eth_l4_inner.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_I, + inner_headers.tcp_dport, eth_l4_inner.destination_port); + HWS_SET_HDR(fc, match_param, L4_SPORT_I, + inner_headers.udp_sport, eth_l4_inner.source_port); + HWS_SET_HDR(fc, match_param, L4_DPORT_I, + inner_headers.udp_dport, eth_l4_inner.destination_port); + HWS_SET_HDR(fc, match_param, TCP_FLAGS_I, + inner_headers.tcp_flags, eth_l4_inner.tcp_flags); + + /* L3 Handle DSCP, ECN and IHL */ + HWS_SET_HDR(fc, match_param, IP_DSCP_I, + inner_headers.ip_dscp, eth_l3_inner.dscp); + HWS_SET_HDR(fc, match_param, IP_ECN_I, + inner_headers.ip_ecn, eth_l3_inner.ecn); + HWS_SET_HDR(fc, match_param, IPV4_IHL_I, + inner_headers.ipv4_ihl, eth_l3_inner.ihl); + + /* Set IP fragmented bit */ + if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) { + if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l2_inner.ip_fragmented); + } else { + smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) || + HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16); + dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) || + HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16); + if (smac_set == dmac_set) { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l4_inner.ip_fragmented); + } else { + HWS_SET_HDR(fc, match_param, IP_FRAG_I, + inner_headers.frag, eth_l2_src_inner.ip_fragmented); + } + } + } + + /* L3_type set */ + if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type); + curr_fc->tag_set = &hws_definer_l3_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version); + } + + return 0; +} + +static int +hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) || + HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) || + HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) || + HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) || + HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) || + HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) { + mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n"); + return -EINVAL; + } + + /* Check GRE related fields */ + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_c_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_k_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_s_present, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]; + HWS_CALC_HDR(curr_fc, + misc_parameters.gre_protocol, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol); + curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE | + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY; + HWS_SET_HDR(fc, match_param, GRE_OPT_KEY, + misc_parameters.gre_key.key, tunnel_header.tunnel_header_2); + } + + /* Check GENEVE related fields */ + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_vni, + tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_geneve, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_opt_len, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_protocol_type, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM]; + HWS_CALC_HDR(curr_fc, + misc_parameters.geneve_oam, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag); + curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag); + } + + HWS_SET_HDR(fc, match_param, SOURCE_QP, + misc_parameters.source_sqn, source_qp_gvmi.source_qp); + HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O, + misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label); + HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I, + misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label); + + /* L2 Second VLAN */ + HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O, + misc_parameters.outer_second_prio, eth_l2_outer.second_priority); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I, + misc_parameters.inner_second_prio, eth_l2_inner.second_priority); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O, + misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I, + misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O, + misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id); + HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I, + misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id); + + /* L2 Second CVLAN and SVLAN */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier); + curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) || + HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I]; + HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier); + curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set; + curr_fc->tag_mask_set = &hws_definer_ones_set; + } + + /* VXLAN VNI */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN; + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI]; + HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni); + } + + /* Flex protocol steering ok bits */ + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + + if (!caps->flex_parser_ok_bits_supp) { + mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_steering_ok_bits_handler( + cd, caps->flex_parser_id_geneve_tlv_option_0); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist); + } + + if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI]; + HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi); + curr_fc->tag_mask_set = &hws_definer_ones_set; + curr_fc->tag_set = HWS_IS_FLD_SET(match_param, + misc_parameters.source_eswitch_owner_vhca_id) ? + &hws_definer_set_source_gvmi_vhca_id : + &hws_definer_set_source_gvmi; + } else { + if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) { + mlx5hws_err(cd->ctx, + "Unsupported source_eswitch_owner_vhca_id field usage\n"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int +hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) || + HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) || + HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n"); + return -EINVAL; + } + + HWS_SET_HDR(fc, match_param, MPLS0_O, + misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label); + HWS_SET_HDR(fc, match_param, MPLS0_I, + misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label); + HWS_SET_HDR(fc, match_param, REG_0, + misc_parameters_2.metadata_reg_c_0, registers.register_c_0); + HWS_SET_HDR(fc, match_param, REG_1, + misc_parameters_2.metadata_reg_c_1, registers.register_c_1); + HWS_SET_HDR(fc, match_param, REG_2, + misc_parameters_2.metadata_reg_c_2, registers.register_c_2); + HWS_SET_HDR(fc, match_param, REG_3, + misc_parameters_2.metadata_reg_c_3, registers.register_c_3); + HWS_SET_HDR(fc, match_param, REG_4, + misc_parameters_2.metadata_reg_c_4, registers.register_c_4); + HWS_SET_HDR(fc, match_param, REG_5, + misc_parameters_2.metadata_reg_c_5, registers.register_c_5); + HWS_SET_HDR(fc, match_param, REG_6, + misc_parameters_2.metadata_reg_c_6, registers.register_c_6); + HWS_SET_HDR(fc, match_param, REG_7, + misc_parameters_2.metadata_reg_c_7, registers.register_c_7); + HWS_SET_HDR(fc, match_param, REG_A, + misc_parameters_2.metadata_reg_a, metadata.general_purpose); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n"); + return -EOPNOTSUPP; + } + + curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp); + } + + return 0; +} + +static int +hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param) +{ + struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps; + struct mlx5hws_definer_fc *fc = cd->fc; + struct mlx5hws_definer_fc *curr_fc; + bool vxlan_gpe_flex_parser_enabled; + + /* Check reserved and unsupported fields */ + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) { + mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n"); + return -EINVAL; + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I; + HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM, + misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq); + HWS_SET_HDR(fc, match_param, TCP_ACK_NUM, + misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O; + HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM, + misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq); + HWS_SET_HDR(fc, match_param, TCP_ACK_NUM, + misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack); + } + + vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni, + tunnel_header.tunnel_header_1); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol, + tunnel_header.tunnel_header_0); + curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE; + + if (!vxlan_gpe_flex_parser_enabled) { + mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS]; + HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags, + tunnel_header.tunnel_header_0); + curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags); + curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n"); + return -EOPNOTSUPP; + } + + HWS_SET_HDR(fc, match_param, ICMP_DW3, + misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1]; + HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1); + curr_fc->tag_set = &hws_definer_icmp_dw1_set; + } + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n"); + return -EOPNOTSUPP; + } + + HWS_SET_HDR(fc, match_param, ICMP_DW3, + misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3); + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) || + HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) { + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1]; + HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1); + curr_fc->tag_set = &hws_definer_icmpv6_dw1_set; + } + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE; + + curr_fc = + hws_definer_flex_parser_handler(cd, + caps->flex_parser_id_geneve_tlv_option_0); + if (!curr_fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, teid); + fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, msg_type); + fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type); + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n"); + return -EOPNOTSUPP; + } + + fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE]; + fc->tag_set = &hws_definer_generic_set; + fc->bit_mask = __mlx5_mask(header_gtp, msg_flags); + fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags); + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU; + + if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) { + mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n"); + return -EOPNOTSUPP; + } + + curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0]; + curr_fc->tag_set = &hws_definer_generic_set; + curr_fc->bit_mask = -1; + curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; + HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0); + } + + return 0; +} + +static int +hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {}; + struct mlx5hws_definer_fc *fc; + u32 id, value; + + if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) { + mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n"); + return -EINVAL; + } + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2); + + id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3); + value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3); + fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value); + if (!fc) + return -EINVAL; + + HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3); + + return 0; +} + +static int +hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd, + u32 *match_param) +{ + struct mlx5hws_definer_fc *fc = cd->fc; + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) || + HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) || + HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) { + mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n"); + return -EINVAL; + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1; + HWS_SET_HDR(fc, match_param, TNL_HDR_0, + misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1; + HWS_SET_HDR(fc, match_param, TNL_HDR_1, + misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1); + } + + if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) { + cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2; + HWS_SET_HDR(fc, match_param, TNL_HDR_2, + misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2); + } + + HWS_SET_HDR(fc, match_param, TNL_HDR_3, + misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3); + + return 0; +} + +static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc) +{ + u32 fc_sz = 0; + int i; + + /* For empty matcher, ZERO_SIZE_PTR is returned */ + if (fc == ZERO_SIZE_PTR) + return 0; + + for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) + if (fc[i].tag_set) + fc_sz++; + return fc_sz; +} + +static struct mlx5hws_definer_fc * +hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc) +{ + struct mlx5hws_definer_fc *compressed_fc = NULL; + u32 definer_size = hws_definer_get_fc_size(fc); + u32 fc_sz = 0; + int i; + + compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL); + if (!compressed_fc) + return NULL; + + /* For empty matcher, ZERO_SIZE_PTR is returned */ + if (!definer_size) + return compressed_fc; + + for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) { + if (!fc[i].tag_set) + continue; + + fc[i].fname = i; + memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc)); + } + + return compressed_fc; +} + +static void +hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc) +{ + int i; + + /* nothing to do for empty matcher */ + if (fc == ZERO_SIZE_PTR) + return; + + for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) { + if (!fc[i].tag_set) + continue; + + HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask); + } +} + +static struct mlx5hws_definer_fc * +hws_definer_alloc_fc(struct mlx5hws_context *ctx, + size_t len) +{ + struct mlx5hws_definer_fc *fc; + int i; + + fc = kcalloc(len, sizeof(*fc), GFP_KERNEL); + if (!fc) + return NULL; + + for (i = 0; i < len; i++) + fc[i].ctx = ctx; + + return fc; +} + +static int +hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + u8 *hl) +{ + struct mlx5hws_definer_conv_data cd = {0}; + struct mlx5hws_definer_fc *fc; + int ret; + + fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX); + if (!fc) + return -ENOMEM; + + cd.fc = fc; + cd.ctx = ctx; + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) { + mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n"); + ret = -EOPNOTSUPP; + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) { + ret = hws_definer_conv_outer(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) { + ret = hws_definer_conv_inner(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) { + ret = hws_definer_conv_misc(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) { + ret = hws_definer_conv_misc2(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) { + ret = hws_definer_conv_misc3(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) { + ret = hws_definer_conv_misc4(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) { + ret = hws_definer_conv_misc5(&cd, mt->match_param); + if (ret) + goto err_free_fc; + } + + /* Check there is no conflicted fields set together */ + ret = hws_definer_check_match_flags(&cd); + if (ret) + goto err_free_fc; + + /* Allocate fc array on mt */ + mt->fc = hws_definer_alloc_compressed_fc(fc); + if (!mt->fc) { + mlx5hws_err(ctx, + "Convert match params: failed to set field copy to match template\n"); + ret = -ENOMEM; + goto err_free_fc; + } + mt->fc_sz = hws_definer_get_fc_size(fc); + + /* Fill in headers layout */ + hws_definer_set_hl(hl, fc); + + kfree(fc); + return 0; + +err_free_fc: + kfree(fc); + return ret; +} + +struct mlx5hws_definer_fc * +mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + u32 *match_param, + int *fc_sz) +{ + struct mlx5hws_definer_fc *compressed_fc = NULL; + struct mlx5hws_definer_conv_data cd = {0}; + struct mlx5hws_definer_fc *fc; + int ret; + + fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX); + if (!fc) + return NULL; + + cd.fc = fc; + cd.ctx = ctx; + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) { + ret = hws_definer_conv_outer(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) { + ret = hws_definer_conv_inner(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) { + ret = hws_definer_conv_misc(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) { + ret = hws_definer_conv_misc2(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) { + ret = hws_definer_conv_misc3(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) { + ret = hws_definer_conv_misc4(&cd, match_param); + if (ret) + goto err_free_fc; + } + + if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) { + ret = hws_definer_conv_misc5(&cd, match_param); + if (ret) + goto err_free_fc; + } + + /* Allocate fc array on mt */ + compressed_fc = hws_definer_alloc_compressed_fc(fc); + if (!compressed_fc) { + mlx5hws_err(ctx, + "Convert to compressed fc: failed to set field copy to match template\n"); + goto err_free_fc; + } + *fc_sz = hws_definer_get_fc_size(fc); + +err_free_fc: + kfree(fc); + return compressed_fc; +} + +static int +hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer, + u32 hl_byte_off, + u32 *tag_byte_off) +{ + int i, dw_to_scan; + u8 byte_offset; + + /* Avoid accessing unused DW selectors */ + dw_to_scan = mlx5hws_definer_is_jumbo(definer) ? + DW_SELECTORS : DW_SELECTORS_MATCH; + + /* Add offset since each DW covers multiple BYTEs */ + byte_offset = hl_byte_off % DW_SIZE; + for (i = 0; i < dw_to_scan; i++) { + if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) { + *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1); + return 0; + } + } + + /* Add offset to skip DWs in definer */ + byte_offset = DW_SIZE * DW_SELECTORS; + /* Iterate in reverse since the code uses bytes from 7 -> 0 */ + for (i = BYTE_SELECTORS; i-- > 0 ;) { + if (definer->byte_selector[i] == hl_byte_off) { + *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1); + return 0; + } + } + + return -EINVAL; +} + +static int +hws_definer_fc_bind(struct mlx5hws_definer *definer, + struct mlx5hws_definer_fc *fc, + u32 fc_sz) +{ + u32 tag_offset = 0; + int ret, byte_diff; + u32 i; + + for (i = 0; i < fc_sz; i++) { + /* Map header layout byte offset to byte offset in tag */ + ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset); + if (ret) + return ret; + + /* Move setter based on the location in the definer */ + byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE; + fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE; + + /* Update offset in headers layout to offset in tag */ + fc->byte_off = tag_offset; + fc++; + } + + return 0; +} + +static bool +hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl, + u32 cur_dw, + u32 *data) +{ + u8 bytes_set; + int byte_idx; + bool ret; + int i; + + /* Reached end, nothing left to do */ + if (cur_dw == MLX5_ST_SZ_DW(definer_hl)) + return true; + + /* No data set, can skip to next DW */ + while (!*data) { + cur_dw++; + data++; + + /* Reached end, nothing left to do */ + if (cur_dw == MLX5_ST_SZ_DW(definer_hl)) + return true; + } + + /* Used all DW selectors and Byte selectors, no possible solution */ + if (ctrl->allowed_full_dw == ctrl->used_full_dw && + ctrl->allowed_lim_dw == ctrl->used_lim_dw && + ctrl->allowed_bytes == ctrl->used_bytes) + return false; + + /* Try to use limited DW selectors */ + if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) { + ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw; + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0; + } + + /* Try to use DW selectors */ + if (ctrl->allowed_full_dw > ctrl->used_full_dw) { + ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw; + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + ctrl->full_dw_selector[--ctrl->used_full_dw] = 0; + } + + /* No byte selector for offset bigger than 255 */ + if (cur_dw * DW_SIZE > 255) + return false; + + bytes_set = !!(0x000000ff & *data) + + !!(0x0000ff00 & *data) + + !!(0x00ff0000 & *data) + + !!(0xff000000 & *data); + + /* Check if there are enough byte selectors left */ + if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes) + return false; + + /* Try to use Byte selectors */ + for (i = 0; i < DW_SIZE; i++) + if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) { + /* Use byte selectors high to low */ + byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1; + ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i; + ctrl->used_bytes++; + } + + ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1); + if (ret) + return ret; + + for (i = 0; i < DW_SIZE; i++) + if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) { + ctrl->used_bytes--; + byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1; + ctrl->byte_selector[byte_idx] = 0; + } + + return false; +} + +static void +hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl, + struct mlx5hws_definer *definer) +{ + memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes); + memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw); + memcpy(definer->dw_selector + ctrl->allowed_full_dw, + ctrl->lim_dw_selector, ctrl->allowed_lim_dw); +} + +static int +hws_definer_find_best_match_fit(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer, + u8 *hl) +{ + struct mlx5hws_definer_sel_ctrl ctrl = {0}; + bool found; + + /* Try to create a match definer */ + ctrl.allowed_full_dw = DW_SELECTORS_MATCH; + ctrl.allowed_lim_dw = 0; + ctrl.allowed_bytes = BYTE_SELECTORS; + + found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl); + if (found) { + hws_definer_copy_sel_ctrl(&ctrl, definer); + definer->type = MLX5HWS_DEFINER_TYPE_MATCH; + return 0; + } + + /* Try to create a full/limited jumbo definer */ + ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS : + DW_SELECTORS_MATCH; + ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 : + DW_SELECTORS_LIMITED; + ctrl.allowed_bytes = BYTE_SELECTORS; + + found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl); + if (found) { + hws_definer_copy_sel_ctrl(&ctrl, definer); + definer->type = MLX5HWS_DEFINER_TYPE_JUMBO; + return 0; + } + + return -E2BIG; +} + +static void +hws_definer_create_tag_mask(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag) +{ + u32 i; + + for (i = 0; i < fc_sz; i++) { + if (fc->tag_mask_set) + fc->tag_mask_set(fc, match_param, tag); + else + fc->tag_set(fc, match_param, tag); + fc++; + } +} + +void mlx5hws_definer_create_tag(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag) +{ + u32 i; + + for (i = 0; i < fc_sz; i++) { + fc->tag_set(fc, match_param, tag); + fc++; + } +} + +int mlx5hws_definer_get_id(struct mlx5hws_definer *definer) +{ + return definer->obj_id; +} + +int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a, + struct mlx5hws_definer *definer_b) +{ + int i; + + /* Future: Optimize by comparing selectors with valid mask only */ + for (i = 0; i < BYTE_SELECTORS; i++) + if (definer_a->byte_selector[i] != definer_b->byte_selector[i]) + return 1; + + for (i = 0; i < DW_SELECTORS; i++) + if (definer_a->dw_selector[i] != definer_b->dw_selector[i]) + return 1; + + for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++) + if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i]) + return 1; + + return 0; +} + +int +mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_definer) +{ + u8 *match_hl; + int ret; + + /* Union header-layout (hl) is used for creating a single definer + * field layout used with different bitmasks for hash and match. + */ + match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL); + if (!match_hl) + return -ENOMEM; + + /* Convert all mt items to header layout (hl) + * and allocate the match and range field copy array (fc & fcr). + */ + ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl); + if (ret) { + mlx5hws_err(ctx, "Failed to convert items to header layout\n"); + goto free_fc; + } + + /* Find the match definer layout for header layout match union */ + ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl); + if (ret) { + if (ret == -E2BIG) + mlx5hws_dbg(ctx, + "Failed to create match definer from header layout - E2BIG\n"); + else + mlx5hws_err(ctx, + "Failed to create match definer from header layout (%d)\n", + ret); + goto free_fc; + } + + kfree(match_hl); + return 0; + +free_fc: + kfree(mt->fc); + + kfree(match_hl); + return ret; +} + +int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache) +{ + struct mlx5hws_definer_cache *new_cache; + + new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL); + if (!new_cache) + return -ENOMEM; + + INIT_LIST_HEAD(&new_cache->list_head); + *cache = new_cache; + + return 0; +} + +void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache) +{ + kfree(cache); +} + +int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer) +{ + struct mlx5hws_definer_cache *cache = ctx->definer_cache; + struct mlx5hws_cmd_definer_create_attr def_attr = {0}; + struct mlx5hws_definer_cache_item *cached_definer; + u32 obj_id; + int ret; + + /* Search definer cache for requested definer */ + list_for_each_entry(cached_definer, &cache->list_head, list_node) { + if (mlx5hws_definer_compare(&cached_definer->definer, definer)) + continue; + + /* Reuse definer and set LRU (move to be first in the list) */ + list_del_init(&cached_definer->list_node); + list_add(&cached_definer->list_node, &cache->list_head); + cached_definer->refcount++; + return cached_definer->definer.obj_id; + } + + /* Allocate and create definer based on the bitmask tag */ + def_attr.match_mask = definer->mask.jumbo; + def_attr.dw_selector = definer->dw_selector; + def_attr.byte_selector = definer->byte_selector; + + ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id); + if (ret) + return -1; + + cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL); + if (!cached_definer) + goto free_definer_obj; + + memcpy(&cached_definer->definer, definer, sizeof(*definer)); + cached_definer->definer.obj_id = obj_id; + cached_definer->refcount = 1; + list_add(&cached_definer->list_node, &cache->list_head); + + return obj_id; + +free_definer_obj: + mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id); + return -1; +} + +static void +hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id) +{ + struct mlx5hws_definer_cache_item *cached_definer; + + list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) { + if (cached_definer->definer.obj_id != obj_id) + continue; + + /* Object found */ + if (--cached_definer->refcount) + return; + + list_del_init(&cached_definer->list_node); + mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id); + kfree(cached_definer); + return; + } + + /* Programming error, object must be part of cache */ + pr_warn("HWS: failed putting definer object\n"); +} + +static struct mlx5hws_definer * +hws_definer_alloc(struct mlx5hws_context *ctx, + struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 *match_param, + struct mlx5hws_definer *layout, + bool bind_fc) +{ + struct mlx5hws_definer *definer; + int ret; + + definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL); + if (!definer) + return NULL; + + /* Align field copy array based on given layout */ + if (bind_fc) { + ret = hws_definer_fc_bind(definer, fc, fc_sz); + if (ret) { + mlx5hws_err(ctx, "Failed to bind field copy to definer\n"); + goto free_definer; + } + } + + /* Create the tag mask used for definer creation */ + hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo); + + ret = mlx5hws_definer_get_obj(ctx, definer); + if (ret < 0) + goto free_definer; + + definer->obj_id = ret; + return definer; + +free_definer: + kfree(definer); + return NULL; +} + +void mlx5hws_definer_free(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer) +{ + hws_definer_put_obj(ctx, definer->obj_id); + kfree(definer); +} + +static int +hws_definer_mt_match_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_layout) +{ + /* Create mandatory match definer */ + mt->definer = hws_definer_alloc(ctx, + mt->fc, + mt->fc_sz, + mt->match_param, + match_layout, + true); + if (!mt->definer) { + mlx5hws_err(ctx, "Failed to create match definer\n"); + return -EINVAL; + } + + return 0; +} + +static void +hws_definer_mt_match_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + mlx5hws_definer_free(ctx, mt->definer); +} + +int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + struct mlx5hws_definer match_layout = {0}; + int ret; + + ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout); + if (ret) { + mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n"); + return ret; + } + + /* Calculate definers needed for exact match */ + ret = hws_definer_mt_match_init(ctx, mt, &match_layout); + if (ret) { + mlx5hws_err(ctx, "Failed to init match definers\n"); + goto free_fc; + } + + return 0; + +free_fc: + kfree(mt->fc); + return ret; +} + +void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt) +{ + hws_definer_mt_match_uninit(ctx, mt); + kfree(mt->fc); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h new file mode 100644 index 00000000000000..2f6a7df4021c8d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_DEFINER_H_ +#define MLX5HWS_DEFINER_H_ + +/* Max available selecotrs */ +#define DW_SELECTORS 9 +#define BYTE_SELECTORS 8 + +/* Selectors based on match TAG */ +#define DW_SELECTORS_MATCH 6 +#define DW_SELECTORS_LIMITED 3 + +/* Selectors based on range TAG */ +#define DW_SELECTORS_RANGE 2 +#define BYTE_SELECTORS_RANGE 8 + +#define HWS_NUM_OF_FLEX_PARSERS 8 + +enum mlx5hws_definer_fname { + MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O, + MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O, + MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I, + MLX5HWS_DEFINER_FNAME_ETH_TYPE_O, + MLX5HWS_DEFINER_FNAME_ETH_TYPE_I, + MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O, + MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O, + MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O, + MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I, + MLX5HWS_DEFINER_FNAME_VLAN_CFI_O, + MLX5HWS_DEFINER_FNAME_VLAN_CFI_I, + MLX5HWS_DEFINER_FNAME_VLAN_ID_O, + MLX5HWS_DEFINER_FNAME_VLAN_ID_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O, + MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I, + MLX5HWS_DEFINER_FNAME_IPV4_IHL_O, + MLX5HWS_DEFINER_FNAME_IPV4_IHL_I, + MLX5HWS_DEFINER_FNAME_IP_DSCP_O, + MLX5HWS_DEFINER_FNAME_IP_DSCP_I, + MLX5HWS_DEFINER_FNAME_IP_ECN_O, + MLX5HWS_DEFINER_FNAME_IP_ECN_I, + MLX5HWS_DEFINER_FNAME_IP_TTL_O, + MLX5HWS_DEFINER_FNAME_IP_TTL_I, + MLX5HWS_DEFINER_FNAME_IPV4_DST_O, + MLX5HWS_DEFINER_FNAME_IPV4_DST_I, + MLX5HWS_DEFINER_FNAME_IPV4_SRC_O, + MLX5HWS_DEFINER_FNAME_IPV4_SRC_I, + MLX5HWS_DEFINER_FNAME_IP_VERSION_O, + MLX5HWS_DEFINER_FNAME_IP_VERSION_I, + MLX5HWS_DEFINER_FNAME_IP_FRAG_O, + MLX5HWS_DEFINER_FNAME_IP_FRAG_I, + MLX5HWS_DEFINER_FNAME_IP_LEN_O, + MLX5HWS_DEFINER_FNAME_IP_LEN_I, + MLX5HWS_DEFINER_FNAME_IP_TOS_O, + MLX5HWS_DEFINER_FNAME_IP_TOS_I, + MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O, + MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O, + MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I, + MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I, + MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I, + MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O, + MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I, + MLX5HWS_DEFINER_FNAME_L4_SPORT_O, + MLX5HWS_DEFINER_FNAME_L4_SPORT_I, + MLX5HWS_DEFINER_FNAME_L4_DPORT_O, + MLX5HWS_DEFINER_FNAME_L4_DPORT_I, + MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I, + MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O, + MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM, + MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM, + MLX5HWS_DEFINER_FNAME_GTP_TEID, + MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE, + MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG, + MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR, + MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU, + MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI, + MLX5HWS_DEFINER_FNAME_GTPU_DW0, + MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0, + MLX5HWS_DEFINER_FNAME_GTPU_DW2, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7, + MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0, + MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS, + MLX5HWS_DEFINER_FNAME_VXLAN_VNI, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI, + MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN, + MLX5HWS_DEFINER_FNAME_GENEVE_OAM, + MLX5HWS_DEFINER_FNAME_GENEVE_PROTO, + MLX5HWS_DEFINER_FNAME_GENEVE_VNI, + MLX5HWS_DEFINER_FNAME_SOURCE_QP, + MLX5HWS_DEFINER_FNAME_SOURCE_GVMI, + MLX5HWS_DEFINER_FNAME_REG_0, + MLX5HWS_DEFINER_FNAME_REG_1, + MLX5HWS_DEFINER_FNAME_REG_2, + MLX5HWS_DEFINER_FNAME_REG_3, + MLX5HWS_DEFINER_FNAME_REG_4, + MLX5HWS_DEFINER_FNAME_REG_5, + MLX5HWS_DEFINER_FNAME_REG_6, + MLX5HWS_DEFINER_FNAME_REG_7, + MLX5HWS_DEFINER_FNAME_REG_8, + MLX5HWS_DEFINER_FNAME_REG_9, + MLX5HWS_DEFINER_FNAME_REG_10, + MLX5HWS_DEFINER_FNAME_REG_11, + MLX5HWS_DEFINER_FNAME_REG_A, + MLX5HWS_DEFINER_FNAME_REG_B, + MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT, + MLX5HWS_DEFINER_FNAME_GRE_C, + MLX5HWS_DEFINER_FNAME_GRE_K, + MLX5HWS_DEFINER_FNAME_GRE_S, + MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL, + MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY, + MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ, + MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM, + MLX5HWS_DEFINER_FNAME_INTEGRITY_O, + MLX5HWS_DEFINER_FNAME_INTEGRITY_I, + MLX5HWS_DEFINER_FNAME_ICMP_DW1, + MLX5HWS_DEFINER_FNAME_ICMP_DW2, + MLX5HWS_DEFINER_FNAME_ICMP_DW3, + MLX5HWS_DEFINER_FNAME_IPSEC_SPI, + MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER, + MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME, + MLX5HWS_DEFINER_FNAME_MPLS0_O, + MLX5HWS_DEFINER_FNAME_MPLS1_O, + MLX5HWS_DEFINER_FNAME_MPLS2_O, + MLX5HWS_DEFINER_FNAME_MPLS3_O, + MLX5HWS_DEFINER_FNAME_MPLS4_O, + MLX5HWS_DEFINER_FNAME_MPLS0_I, + MLX5HWS_DEFINER_FNAME_MPLS1_I, + MLX5HWS_DEFINER_FNAME_MPLS2_I, + MLX5HWS_DEFINER_FNAME_MPLS3_I, + MLX5HWS_DEFINER_FNAME_MPLS4_I, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK, + MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I, + MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6, + MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7, + MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE, + MLX5HWS_DEFINER_FNAME_IB_L4_QPN, + MLX5HWS_DEFINER_FNAME_IB_L4_A, + MLX5HWS_DEFINER_FNAME_RANDOM_NUM, + MLX5HWS_DEFINER_FNAME_PTYPE_L2_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L2_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L3_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L3_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_I, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O, + MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I, + MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O, + MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I, + MLX5HWS_DEFINER_FNAME_TNL_HDR_0, + MLX5HWS_DEFINER_FNAME_TNL_HDR_1, + MLX5HWS_DEFINER_FNAME_TNL_HDR_2, + MLX5HWS_DEFINER_FNAME_TNL_HDR_3, + MLX5HWS_DEFINER_FNAME_MAX, +}; + +enum mlx5hws_definer_match_criteria { + MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0, + MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1, + MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6, + MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7, +}; + +enum mlx5hws_definer_type { + MLX5HWS_DEFINER_TYPE_MATCH, + MLX5HWS_DEFINER_TYPE_JUMBO, +}; + +enum mlx5hws_definer_match_flag { + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5, + + MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7, + + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8, + MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9, + + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10, + MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11, + MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12, + MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13, +}; + +struct mlx5hws_definer_fc { + struct mlx5hws_context *ctx; + /* Source */ + u32 s_byte_off; + int s_bit_off; + u32 s_bit_mask; + /* Destination */ + u32 byte_off; + int bit_off; + u32 bit_mask; + enum mlx5hws_definer_fname fname; + void (*tag_set)(struct mlx5hws_definer_fc *fc, + void *mach_param, + u8 *tag); + void (*tag_mask_set)(struct mlx5hws_definer_fc *fc, + void *mach_param, + u8 *tag); +}; + +struct mlx5_ifc_definer_hl_eth_l2_bits { + u8 dmac_47_16[0x20]; + u8 dmac_15_0[0x10]; + u8 l3_ethertype[0x10]; + u8 reserved_at_40[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 ip_fragmented[0x1]; + u8 qp_type[0x2]; + u8 encap_type[0x2]; + u8 port_number[0x2]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 first_priority[0x3]; + u8 first_cfi[0x1]; + u8 first_vlan_id[0xc]; + u8 l4_type[0x4]; + u8 reserved_at_64[0x2]; + u8 ipsec_layer[0x2]; + u8 l2_type[0x2]; + u8 force_lb[0x1]; + u8 l2_ok[0x1]; + u8 l3_ok[0x1]; + u8 l4_ok[0x1]; + u8 second_vlan_qualifier[0x2]; + u8 second_priority[0x3]; + u8 second_cfi[0x1]; + u8 second_vlan_id[0xc]; +}; + +struct mlx5_ifc_definer_hl_eth_l2_src_bits { + u8 smac_47_16[0x20]; + u8 smac_15_0[0x10]; + u8 loopback_syndrome[0x8]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 ip_fragmented[0x1]; + u8 functional_lb[0x1]; +}; + +struct mlx5_ifc_definer_hl_ib_l2_bits { + u8 sx_sniffer[0x1]; + u8 force_lb[0x1]; + u8 functional_lb[0x1]; + u8 reserved_at_3[0x3]; + u8 port_number[0x2]; + u8 sl[0x4]; + u8 qp_type[0x2]; + u8 lnh[0x2]; + u8 dlid[0x10]; + u8 vl[0x4]; + u8 lrh_packet_length[0xc]; + u8 slid[0x10]; +}; + +struct mlx5_ifc_definer_hl_eth_l3_bits { + u8 ip_version[0x4]; + u8 ihl[0x4]; + union { + u8 tos[0x8]; + struct { + u8 dscp[0x6]; + u8 ecn[0x2]; + }; + }; + u8 time_to_live_hop_limit[0x8]; + u8 protocol_next_header[0x8]; + u8 identification[0x10]; + union { + u8 ipv4_frag[0x10]; + struct { + u8 flags[0x3]; + u8 fragment_offset[0xd]; + }; + }; + u8 ipv4_total_length[0x10]; + u8 checksum[0x10]; + u8 reserved_at_60[0xc]; + u8 flow_label[0x14]; + u8 packet_length[0x10]; + u8 ipv6_payload_length[0x10]; +}; + +struct mlx5_ifc_definer_hl_eth_l4_bits { + u8 source_port[0x10]; + u8 destination_port[0x10]; + u8 data_offset[0x4]; + u8 l4_ok[0x1]; + u8 l3_ok[0x1]; + u8 ip_fragmented[0x1]; + u8 tcp_ns[0x1]; + union { + u8 tcp_flags[0x8]; + struct { + u8 tcp_cwr[0x1]; + u8 tcp_ece[0x1]; + u8 tcp_urg[0x1]; + u8 tcp_ack[0x1]; + u8 tcp_psh[0x1]; + u8 tcp_rst[0x1]; + u8 tcp_syn[0x1]; + u8 tcp_fin[0x1]; + }; + }; + u8 first_fragment[0x1]; + u8 reserved_at_31[0xf]; +}; + +struct mlx5_ifc_definer_hl_src_qp_gvmi_bits { + u8 loopback_syndrome[0x8]; + u8 l3_type[0x2]; + u8 l4_type_bwc[0x2]; + u8 first_vlan_qualifier[0x2]; + u8 reserved_at_e[0x1]; + u8 functional_lb[0x1]; + u8 source_gvmi[0x10]; + u8 force_lb[0x1]; + u8 ip_fragmented[0x1]; + u8 source_is_requestor[0x1]; + u8 reserved_at_23[0x5]; + u8 source_qp[0x18]; +}; + +struct mlx5_ifc_definer_hl_ib_l4_bits { + u8 opcode[0x8]; + u8 qp[0x18]; + u8 se[0x1]; + u8 migreq[0x1]; + u8 ackreq[0x1]; + u8 fecn[0x1]; + u8 becn[0x1]; + u8 bth[0x1]; + u8 deth[0x1]; + u8 dcceth[0x1]; + u8 reserved_at_28[0x2]; + u8 pad_count[0x2]; + u8 tver[0x4]; + u8 p_key[0x10]; + u8 reserved_at_40[0x8]; + u8 deth_source_qp[0x18]; +}; + +enum mlx5hws_integrity_ok1_bits { + MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24, + MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25, + MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26, + MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27, + MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28, + MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29, + MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30, + MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31, +}; + +struct mlx5_ifc_definer_hl_oks1_bits { + union { + u8 oks1_bits[0x20]; + struct { + u8 second_ipv4_checksum_ok[0x1]; + u8 second_l4_checksum_ok[0x1]; + u8 first_ipv4_checksum_ok[0x1]; + u8 first_l4_checksum_ok[0x1]; + u8 second_l3_ok[0x1]; + u8 second_l4_ok[0x1]; + u8 first_l3_ok[0x1]; + u8 first_l4_ok[0x1]; + u8 flex_parser7_steering_ok[0x1]; + u8 flex_parser6_steering_ok[0x1]; + u8 flex_parser5_steering_ok[0x1]; + u8 flex_parser4_steering_ok[0x1]; + u8 flex_parser3_steering_ok[0x1]; + u8 flex_parser2_steering_ok[0x1]; + u8 flex_parser1_steering_ok[0x1]; + u8 flex_parser0_steering_ok[0x1]; + u8 second_ipv6_extension_header_vld[0x1]; + u8 first_ipv6_extension_header_vld[0x1]; + u8 l3_tunneling_ok[0x1]; + u8 l2_tunneling_ok[0x1]; + u8 second_tcp_ok[0x1]; + u8 second_udp_ok[0x1]; + u8 second_ipv4_ok[0x1]; + u8 second_ipv6_ok[0x1]; + u8 second_l2_ok[0x1]; + u8 vxlan_ok[0x1]; + u8 gre_ok[0x1]; + u8 first_tcp_ok[0x1]; + u8 first_udp_ok[0x1]; + u8 first_ipv4_ok[0x1]; + u8 first_ipv6_ok[0x1]; + u8 first_l2_ok[0x1]; + }; + }; +}; + +struct mlx5_ifc_definer_hl_oks2_bits { + u8 reserved_at_0[0xa]; + u8 second_mpls_ok[0x1]; + u8 second_mpls4_s_bit[0x1]; + u8 second_mpls4_qualifier[0x1]; + u8 second_mpls3_s_bit[0x1]; + u8 second_mpls3_qualifier[0x1]; + u8 second_mpls2_s_bit[0x1]; + u8 second_mpls2_qualifier[0x1]; + u8 second_mpls1_s_bit[0x1]; + u8 second_mpls1_qualifier[0x1]; + u8 second_mpls0_s_bit[0x1]; + u8 second_mpls0_qualifier[0x1]; + u8 first_mpls_ok[0x1]; + u8 first_mpls4_s_bit[0x1]; + u8 first_mpls4_qualifier[0x1]; + u8 first_mpls3_s_bit[0x1]; + u8 first_mpls3_qualifier[0x1]; + u8 first_mpls2_s_bit[0x1]; + u8 first_mpls2_qualifier[0x1]; + u8 first_mpls1_s_bit[0x1]; + u8 first_mpls1_qualifier[0x1]; + u8 first_mpls0_s_bit[0x1]; + u8 first_mpls0_qualifier[0x1]; +}; + +struct mlx5_ifc_definer_hl_voq_bits { + u8 reserved_at_0[0x18]; + u8 ecn_ok[0x1]; + u8 congestion[0x1]; + u8 profile[0x2]; + u8 internal_prio[0x4]; +}; + +struct mlx5_ifc_definer_hl_ipv4_src_dst_bits { + u8 source_address[0x20]; + u8 destination_address[0x20]; +}; + +struct mlx5_ifc_definer_hl_random_number_bits { + u8 random_number[0x10]; + u8 reserved[0x10]; +}; + +struct mlx5_ifc_definer_hl_ipv6_addr_bits { + u8 ipv6_address_127_96[0x20]; + u8 ipv6_address_95_64[0x20]; + u8 ipv6_address_63_32[0x20]; + u8 ipv6_address_31_0[0x20]; +}; + +struct mlx5_ifc_definer_tcp_icmp_header_bits { + union { + struct { + u8 icmp_dw1[0x20]; + u8 icmp_dw2[0x20]; + u8 icmp_dw3[0x20]; + }; + struct { + u8 tcp_seq[0x20]; + u8 tcp_ack[0x20]; + u8 tcp_win_urg[0x20]; + }; + }; +}; + +struct mlx5_ifc_definer_hl_tunnel_header_bits { + u8 tunnel_header_0[0x20]; + u8 tunnel_header_1[0x20]; + u8 tunnel_header_2[0x20]; + u8 tunnel_header_3[0x20]; +}; + +struct mlx5_ifc_definer_hl_ipsec_bits { + u8 spi[0x20]; + u8 sequence_number[0x20]; + u8 reserved[0x10]; + u8 ipsec_syndrome[0x8]; + u8 next_header[0x8]; +}; + +struct mlx5_ifc_definer_hl_metadata_bits { + u8 metadata_to_cqe[0x20]; + u8 general_purpose[0x20]; + u8 acomulated_hash[0x20]; +}; + +struct mlx5_ifc_definer_hl_flex_parser_bits { + u8 flex_parser_7[0x20]; + u8 flex_parser_6[0x20]; + u8 flex_parser_5[0x20]; + u8 flex_parser_4[0x20]; + u8 flex_parser_3[0x20]; + u8 flex_parser_2[0x20]; + u8 flex_parser_1[0x20]; + u8 flex_parser_0[0x20]; +}; + +struct mlx5_ifc_definer_hl_registers_bits { + u8 register_c_10[0x20]; + u8 register_c_11[0x20]; + u8 register_c_8[0x20]; + u8 register_c_9[0x20]; + u8 register_c_6[0x20]; + u8 register_c_7[0x20]; + u8 register_c_4[0x20]; + u8 register_c_5[0x20]; + u8 register_c_2[0x20]; + u8 register_c_3[0x20]; + u8 register_c_0[0x20]; + u8 register_c_1[0x20]; +}; + +struct mlx5_ifc_definer_hl_mpls_bits { + u8 mpls0_label[0x20]; + u8 mpls1_label[0x20]; + u8 mpls2_label[0x20]; + u8 mpls3_label[0x20]; + u8 mpls4_label[0x20]; +}; + +struct mlx5_ifc_definer_hl_bits { + struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer; + struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner; + struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer; + struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner; + struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2; + struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer; + struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner; + struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer; + struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner; + struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi; + struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4; + struct mlx5_ifc_definer_hl_oks1_bits oks1; + struct mlx5_ifc_definer_hl_oks2_bits oks2; + struct mlx5_ifc_definer_hl_voq_bits voq; + u8 reserved_at_480[0x380]; + struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer; + struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer; + struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner; + u8 unsupported_dest_ib_l3[0x80]; + u8 unsupported_source_ib_l3[0x80]; + u8 unsupported_udp_misc_outer[0x20]; + u8 unsupported_udp_misc_inner[0x20]; + struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp; + struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header; + struct mlx5_ifc_definer_hl_mpls_bits mpls_outer; + struct mlx5_ifc_definer_hl_mpls_bits mpls_inner; + u8 unsupported_config_headers_outer[0x80]; + u8 unsupported_config_headers_inner[0x80]; + struct mlx5_ifc_definer_hl_random_number_bits random_number; + struct mlx5_ifc_definer_hl_ipsec_bits ipsec; + struct mlx5_ifc_definer_hl_metadata_bits metadata; + u8 unsupported_utc_timestamp[0x40]; + u8 unsupported_free_running_timestamp[0x40]; + struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser; + struct mlx5_ifc_definer_hl_registers_bits registers; + /* Reserved in case header layout on future HW */ + u8 unsupported_reserved[0xd40]; +}; + +enum mlx5hws_definer_gtp { + MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04, +}; + +struct mlx5_ifc_header_gtp_bits { + u8 version[0x3]; + u8 proto_type[0x1]; + u8 reserved1[0x1]; + union { + u8 msg_flags[0x3]; + struct { + u8 ext_hdr_flag[0x1]; + u8 seq_num_flag[0x1]; + u8 pdu_flag[0x1]; + }; + }; + u8 msg_type[0x8]; + u8 msg_len[0x8]; + u8 teid[0x20]; +}; + +struct mlx5_ifc_header_opt_gtp_bits { + u8 seq_num[0x10]; + u8 pdu_num[0x8]; + u8 next_ext_hdr_type[0x8]; +}; + +struct mlx5_ifc_header_gtp_psc_bits { + u8 len[0x8]; + u8 pdu_type[0x4]; + u8 flags[0x4]; + u8 qfi[0x8]; + u8 reserved2[0x8]; +}; + +struct mlx5_ifc_header_ipv6_vtc_bits { + u8 version[0x4]; + union { + u8 tos[0x8]; + struct { + u8 dscp[0x6]; + u8 ecn[0x2]; + }; + }; + u8 flow_label[0x14]; +}; + +struct mlx5_ifc_header_ipv6_routing_ext_bits { + u8 next_hdr[0x8]; + u8 hdr_len[0x8]; + u8 type[0x8]; + u8 segments_left[0x8]; + union { + u8 flags[0x20]; + struct { + u8 last_entry[0x8]; + u8 flag[0x8]; + u8 tag[0x10]; + }; + }; +}; + +struct mlx5_ifc_header_vxlan_bits { + u8 flags[0x8]; + u8 reserved1[0x18]; + u8 vni[0x18]; + u8 reserved2[0x8]; +}; + +struct mlx5_ifc_header_vxlan_gpe_bits { + u8 flags[0x8]; + u8 rsvd0[0x10]; + u8 protocol[0x8]; + u8 vni[0x18]; + u8 rsvd1[0x8]; +}; + +struct mlx5_ifc_header_gre_bits { + union { + u8 c_rsvd0_ver[0x10]; + struct { + u8 gre_c_present[0x1]; + u8 reserved_at_1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 reserved_at_4[0x9]; + u8 version[0x3]; + }; + }; + u8 gre_protocol[0x10]; + u8 checksum[0x10]; + u8 reserved_at_30[0x10]; +}; + +struct mlx5_ifc_header_geneve_bits { + union { + u8 ver_opt_len_o_c_rsvd[0x10]; + struct { + u8 version[0x2]; + u8 opt_len[0x6]; + u8 o_flag[0x1]; + u8 c_flag[0x1]; + u8 reserved_at_a[0x6]; + }; + }; + u8 protocol_type[0x10]; + u8 vni[0x18]; + u8 reserved_at_38[0x8]; +}; + +struct mlx5_ifc_header_geneve_opt_bits { + u8 class[0x10]; + u8 type[0x8]; + u8 reserved[0x3]; + u8 len[0x5]; +}; + +struct mlx5_ifc_header_icmp_bits { + union { + u8 icmp_dw1[0x20]; + struct { + u8 type[0x8]; + u8 code[0x8]; + u8 cksum[0x10]; + }; + }; + union { + u8 icmp_dw2[0x20]; + struct { + u8 ident[0x10]; + u8 seq_nb[0x10]; + }; + }; +}; + +struct mlx5hws_definer { + enum mlx5hws_definer_type type; + u8 dw_selector[DW_SELECTORS]; + u8 byte_selector[BYTE_SELECTORS]; + struct mlx5hws_rule_match_tag mask; + u32 obj_id; +}; + +struct mlx5hws_definer_cache { + struct list_head list_head; +}; + +struct mlx5hws_definer_cache_item { + struct mlx5hws_definer definer; + u32 refcount; + struct list_head list_node; +}; + +static inline bool +mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer) +{ + return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO); +} + +void mlx5hws_definer_create_tag(u32 *match_param, + struct mlx5hws_definer_fc *fc, + u32 fc_sz, + u8 *tag); + +int mlx5hws_definer_get_id(struct mlx5hws_definer *definer); + +int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt); + +void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt); + +int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache); + +void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache); + +int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a, + struct mlx5hws_definer *definer_b); + +int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer); + +void mlx5hws_definer_free(struct mlx5hws_context *ctx, + struct mlx5hws_definer *definer); + +int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, + struct mlx5hws_match_template *mt, + struct mlx5hws_definer *match_definer); + +struct mlx5hws_definer_fc * +mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, + u8 match_criteria_enable, + u32 *match_param, + int *fc_sz); + +#endif /* MLX5HWS_DEFINER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h new file mode 100644 index 00000000000000..5643be1cd5bf39 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_INTERNAL_H_ +#define MLX5HWS_INTERNAL_H_ + +#include +#include +#include "fs_core.h" +#include "wq.h" +#include "lib/mlx5.h" + +#include "mlx5hws_prm.h" +#include "mlx5hws.h" +#include "mlx5hws_pool.h" +#include "mlx5hws_vport.h" +#include "mlx5hws_context.h" +#include "mlx5hws_table.h" +#include "mlx5hws_send.h" +#include "mlx5hws_rule.h" +#include "mlx5hws_cmd.h" +#include "mlx5hws_action.h" +#include "mlx5hws_definer.h" +#include "mlx5hws_matcher.h" +#include "mlx5hws_debug.h" +#include "mlx5hws_pat_arg.h" +#include "mlx5hws_bwc.h" +#include "mlx5hws_bwc_complex.h" + +#define W_SIZE 2 +#define DW_SIZE 4 +#define BITS_IN_BYTE 8 +#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE) + +#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit))) + +#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg) +#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg) +#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg) + +#define MLX5HWS_TABLE_TYPE_BASE 2 +#define MLX5HWS_ACTION_STE_IDX_ANY 0 + +static inline bool is_mem_zero(const u8 *mem, size_t size) +{ + if (unlikely(!size)) { + pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__); + return true; + } + + return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0; +} + +static inline unsigned long align(unsigned long val, unsigned long align) +{ + return (val + align - 1) & ~(align - 1); +} + +#endif /* MLX5HWS_INTERNAL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c new file mode 100644 index 00000000000000..61a1155d4b4fdb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c @@ -0,0 +1,1216 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +enum mlx5hws_matcher_rtc_type { + HWS_MATCHER_RTC_TYPE_MATCH, + HWS_MATCHER_RTC_TYPE_STE_ARRAY, + HWS_MATCHER_RTC_TYPE_MAX, +}; + +static const char * const mlx5hws_matcher_rtc_type_str[] = { + [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH", + [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY", + [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN", +}; + +static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type) +{ + if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX) + rtc_type = HWS_MATCHER_RTC_TYPE_MAX; + return mlx5hws_matcher_rtc_type_str[rtc_type]; +} + +static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules) +{ + /* Collision table concatenation is done only for large rule tables */ + return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH; +} + +static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules) +{ + if (hws_matcher_requires_col_tbl(log_num_of_rules)) + return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH; + + /* For small rule tables we use a single deep table to assure insertion */ + return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH); +} + +static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher) +{ + mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id); +} + +static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + int ret; + + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n"); + return ret; + } + return 0; +} + +static int hws_matcher_connect(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *prev = NULL; + struct mlx5hws_matcher *next = NULL; + struct mlx5hws_matcher *tmp_matcher; + int ret; + + /* Find location in matcher list */ + if (list_empty(&tbl->matchers_list)) { + list_add(&matcher->list_node, &tbl->matchers_list); + goto connect; + } + + list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) { + if (tmp_matcher->attr.priority > matcher->attr.priority) { + next = tmp_matcher; + break; + } + prev = tmp_matcher; + } + + if (next) + /* insert before next */ + list_add_tail(&matcher->list_node, &next->list_node); + else + /* insert after prev */ + list_add(&matcher->list_node, &prev->list_node); + +connect: + if (next) { + /* Connect to next RTC */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + matcher->end_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n"); + goto remove_from_list; + } + } else { + /* Connect last matcher to next miss_tbl if exists */ + ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + if (ret) { + mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n"); + goto remove_from_list; + } + } + + /* Connect to previous FT */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + prev ? prev->end_ft_id : tbl->ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n"); + goto remove_from_list; + } + + /* Reset prev matcher FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n"); + goto remove_from_list; + } + + if (!prev) { + /* Update tables missing to current matcher in the table */ + ret = mlx5hws_table_update_connected_miss_tables(tbl); + if (ret) { + mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n"); + goto remove_from_list; + } + } + + return 0; + +remove_from_list: + list_del_init(&matcher->list_node); + return ret; +} + +static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher *next = NULL, *prev = NULL; + struct mlx5hws_table *tbl = matcher->tbl; + u32 prev_ft_id = tbl->ft_id; + int ret; + + if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) { + prev = list_prev_entry(matcher, list_node); + prev_ft_id = prev->end_ft_id; + } + + if (!list_is_last(&matcher->list_node, &tbl->matchers_list)) + next = list_next_entry(matcher, list_node); + + list_del_init(&matcher->list_node); + + if (next) { + /* Connect previous end FT to next RTC */ + ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx, + prev_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n"); + goto matcher_reconnect; + } + } else { + ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n"); + goto matcher_reconnect; + } + } + + /* Removing first matcher, update connected miss tables if exists */ + if (prev_ft_id == tbl->ft_id) { + ret = mlx5hws_table_update_connected_miss_tables(tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n"); + goto matcher_reconnect; + } + } + + ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n"); + goto matcher_reconnect; + } + + return 0; + +matcher_reconnect: + if (list_empty(&tbl->matchers_list) || !prev) + list_add(&matcher->list_node, &tbl->matchers_list); + else + /* insert after prev matcher */ + list_add(&matcher->list_node, &prev->list_node); + + return ret; +} + +static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, + struct mlx5hws_cmd_rtc_create_attr *rtc_attr, + enum mlx5hws_matcher_rtc_type rtc_type, + bool is_mirror) +{ + struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste; + enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src; + bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH; + + if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) || + (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) { + /* Optimize FDB RTC */ + rtc_attr->log_size = 0; + rtc_attr->log_depth = 0; + } else { + /* Keep original values */ + rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order; + rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0; + } +} + +static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, + enum mlx5hws_matcher_rtc_type rtc_type, + u8 action_ste_selector) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; + struct mlx5hws_match_template *mt = matcher->mt; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_action_default_stc *default_stc; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool *ste_pool, *stc_pool; + struct mlx5hws_pool_chunk *ste; + u32 *rtc_0_id, *rtc_1_id; + u32 obj_id; + int ret; + + switch (rtc_type) { + case HWS_MATCHER_RTC_TYPE_MATCH: + rtc_0_id = &matcher->match_ste.rtc_0_id; + rtc_1_id = &matcher->match_ste.rtc_1_id; + ste_pool = matcher->match_ste.pool; + ste = &matcher->match_ste.ste; + ste->order = attr->table.sz_col_log + attr->table.sz_row_log; + + rtc_attr.log_size = attr->table.sz_row_log; + rtc_attr.log_depth = attr->table.sz_col_log; + rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + rtc_attr.is_scnd_range = 0; + rtc_attr.miss_ft_id = matcher->end_ft_id; + + if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) { + /* The usual Hash Table */ + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; + + /* The first mt is used since all share the same definer */ + rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); + } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) { + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + rtc_attr.num_hash_definer = 1; + + if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + /* Hash Split Table */ + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; + rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); + } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { + /* Linear Lookup Table */ + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR; + rtc_attr.match_definer_0 = ctx->caps->linear_match_definer; + } + } + + /* Match pool requires implicit allocation */ + ret = mlx5hws_pool_chunk_alloc(ste_pool, ste); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate STE for %s RTC", + hws_matcher_rtc_type_to_str(rtc_type)); + return ret; + } + break; + + case HWS_MATCHER_RTC_TYPE_STE_ARRAY: + action_ste = &matcher->action_ste[action_ste_selector]; + + rtc_0_id = &action_ste->rtc_0_id; + rtc_1_id = &action_ste->rtc_1_id; + ste_pool = action_ste->pool; + ste = &action_ste->ste; + ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) + + attr->table.sz_row_log; + rtc_attr.log_size = ste->order; + rtc_attr.log_depth = 0; + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + /* The action STEs use the default always hit definer */ + rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; + rtc_attr.is_frst_jumbo = false; + rtc_attr.miss_ft_id = 0; + break; + + default: + mlx5hws_err(ctx, "HWS Invalid RTC type\n"); + return -EINVAL; + } + + obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + + rtc_attr.pd = ctx->pd_num; + rtc_attr.ste_base = obj_id; + rtc_attr.ste_offset = ste->offset; + rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false); + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false); + + /* STC is a single resource (obj_id), use any STC for the ID */ + stc_pool = ctx->stc_pool[tbl->type]; + default_stc = ctx->common_res[tbl->type].default_stc; + obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create matcher RTC of type %s", + hws_matcher_rtc_type_to_str(rtc_type)); + goto free_ste; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + rtc_attr.ste_base = obj_id; + rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true); + + obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + rtc_attr.stc_base = obj_id; + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true); + + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s", + hws_matcher_rtc_type_to_str(rtc_type)); + goto destroy_rtc_0; + } + } + + return 0; + +destroy_rtc_0: + mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id); +free_ste: + if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) + mlx5hws_pool_chunk_free(ste_pool, ste); + return ret; +} + +static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher, + enum mlx5hws_matcher_rtc_type rtc_type, + u8 action_ste_selector) +{ + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool_chunk *ste; + struct mlx5hws_pool *ste_pool; + u32 rtc_0_id, rtc_1_id; + + switch (rtc_type) { + case HWS_MATCHER_RTC_TYPE_MATCH: + rtc_0_id = matcher->match_ste.rtc_0_id; + rtc_1_id = matcher->match_ste.rtc_1_id; + ste_pool = matcher->match_ste.pool; + ste = &matcher->match_ste.ste; + break; + case HWS_MATCHER_RTC_TYPE_STE_ARRAY: + action_ste = &matcher->action_ste[action_ste_selector]; + rtc_0_id = action_ste->rtc_0_id; + rtc_1_id = action_ste->rtc_1_id; + ste_pool = action_ste->pool; + ste = &action_ste->ste; + break; + default: + return; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id); + + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id); + if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) + mlx5hws_pool_chunk_free(ste_pool, ste); +} + +static int +hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + if (attr->table.sz_col_log > caps->rtc_log_depth_max) { + mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n", + caps->rtc_log_depth_max); + return -EOPNOTSUPP; + } + + if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) { + mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n", + caps->ste_alloc_log_max); + return -EOPNOTSUPP; + } + + if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) { + mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n", + caps->ste_alloc_log_gran); + return -EOPNOTSUPP; + } + + return 0; +} + +static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr, + struct mlx5hws_matcher *matcher) +{ + switch (matcher->attr.optimize_flow_src) { + case MLX5HWS_MATCHER_FLOW_SRC_VPORT: + attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG; + break; + case MLX5HWS_MATCHER_FLOW_SRC_WIRE: + attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR; + break; + default: + break; + } +} + +static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + bool valid; + int ret; + + valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type); + if (!valid) { + mlx5hws_err(ctx, "Invalid combination in action template\n"); + return -EINVAL; + } + + /* Process action template to setters */ + ret = mlx5hws_action_template_process(at); + if (ret) { + mlx5hws_err(ctx, "Failed to process action template\n"); + return ret; + } + + return 0; +} + +static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher) +{ + struct mlx5hws_matcher_resize_data *resize_data; + + resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL); + if (!resize_data) + return -ENOMEM; + + resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; + + resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc; + resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id; + resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id; + resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ? + src_matcher->action_ste[0].pool : + NULL; + resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc; + resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id; + resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id; + resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ? + src_matcher->action_ste[1].pool : + NULL; + + /* Place the new resized matcher on the dst matcher's list */ + list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); + + /* Move all the previous resized matchers to the dst matcher's list */ + while (!list_empty(&src_matcher->resize_data)) { + resize_data = list_first_entry(&src_matcher->resize_data, + struct mlx5hws_matcher_resize_data, + list_node); + list_del_init(&resize_data->list_node); + list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); + } + + return 0; +} + +static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_resize_data *resize_data; + + if (!mlx5hws_matcher_is_resizable(matcher)) + return; + + while (!list_empty(&matcher->resize_data)) { + resize_data = list_first_entry(&matcher->resize_data, + struct mlx5hws_matcher_resize_data, + list_node); + list_del_init(&resize_data->list_node); + + if (resize_data->max_stes) { + mlx5hws_action_free_single_stc(matcher->tbl->ctx, + matcher->tbl->type, + &resize_data->action_ste[1].stc); + mlx5hws_action_free_single_stc(matcher->tbl->ctx, + matcher->tbl->type, + &resize_data->action_ste[0].stc); + + if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[1].rtc_1_id); + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[0].rtc_1_id); + } + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[1].rtc_0_id); + mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, + resize_data->action_ste[0].rtc_0_id); + if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) { + mlx5hws_pool_destroy(resize_data->action_ste[1].pool); + mlx5hws_pool_destroy(resize_data->action_ste[0].pool); + } + } + + kfree(resize_data); + } +} + +static int +hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_pool_attr pool_attr = {0}; + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + action_ste = &matcher->action_ste[action_ste_selector]; + + /* Allocate action STE mempool */ + pool_attr.table_type = tbl->type; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; + pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) + + matcher->attr.table.sz_row_log; + hws_matcher_set_pool_attr(&pool_attr, matcher); + action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!action_ste->pool) { + mlx5hws_err(ctx, "Failed to create action ste pool\n"); + return -EINVAL; + } + + /* Allocate action RTC */ + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + if (ret) { + mlx5hws_err(ctx, "Failed to create action RTC\n"); + goto free_ste_pool; + } + + /* Allocate STC for jumps to STE */ + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.ste_table.ste = action_ste->ste; + stc_attr.ste_table.ste_pool = action_ste->pool; + stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; + + ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type, + &action_ste->stc); + if (ret) { + mlx5hws_err(ctx, "Failed to create action jump to table STC\n"); + goto free_rtc; + } + + return 0; + +free_rtc: + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); +free_ste_pool: + mlx5hws_pool_destroy(action_ste->pool); + return ret; +} + +static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector) +{ + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_table *tbl = matcher->tbl; + + action_ste = &matcher->action_ste[action_ste_selector]; + + if (!action_ste->max_stes || + matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION || + mlx5hws_matcher_is_in_resize(matcher)) + return; + + mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector); + mlx5hws_pool_destroy(action_ste->pool); +} + +static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + u32 required_stes; + u8 max_stes = 0; + int i, ret; + + if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION) + return 0; + + for (i = 0; i < matcher->num_of_at; i++) { + struct mlx5hws_action_template *at = &matcher->at[i]; + + ret = hws_matcher_check_and_process_at(matcher, at); + if (ret) { + mlx5hws_err(ctx, "Invalid at %d", i); + return ret; + } + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + max_stes = max(max_stes, required_stes); + + /* Future: Optimize reparse */ + } + + /* There are no additional STEs required for matcher */ + if (!max_stes) + return 0; + + matcher->action_ste[0].max_stes = max_stes; + matcher->action_ste[1].max_stes = max_stes; + + ret = hws_matcher_bind_at_idx(matcher, 0); + if (ret) + return ret; + + ret = hws_matcher_bind_at_idx(matcher, 1); + if (ret) + goto free_at_0; + + return 0; + +free_at_0: + hws_matcher_unbind_at_idx(matcher, 0); + return ret; +} + +static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher) +{ + hws_matcher_unbind_at_idx(matcher, 1); + hws_matcher_unbind_at_idx(matcher, 0); +} + +static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_pool_attr pool_attr = {0}; + int ret; + + /* Calculate match, range and hash definers */ + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) { + ret = mlx5hws_definer_mt_init(ctx, matcher->mt); + if (ret) { + if (ret == -E2BIG) + mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n"); + return ret; + } + } + + /* Create an STE pool per matcher*/ + pool_attr.table_type = matcher->tbl->type; + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL; + pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log + + matcher->attr.table.sz_row_log; + hws_matcher_set_pool_attr(&pool_attr, matcher); + + matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!matcher->match_ste.pool) { + mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n"); + ret = -EOPNOTSUPP; + goto uninit_match_definer; + } + + return 0; + +uninit_match_definer: + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + mlx5hws_definer_mt_uninit(ctx, matcher->mt); + return ret; +} + +static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher) +{ + mlx5hws_pool_destroy(matcher->match_ste.pool); + if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt); +} + +static int +hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + + switch (attr->insert_mode) { + case MLX5HWS_MATCHER_INSERT_BY_HASH: + if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + mlx5hws_err(ctx, "Invalid matcher distribute mode\n"); + return -EOPNOTSUPP; + } + break; + + case MLX5HWS_MATCHER_INSERT_BY_INDEX: + if (attr->table.sz_col_log) { + mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n"); + return -EOPNOTSUPP; + } + + if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + /* Hash Split Table */ + if (!caps->rtc_hash_split_table) { + mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n"); + return -EOPNOTSUPP; + } + } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { + /* Linear Lookup Table */ + if (!caps->rtc_linear_lookup_table || + !IS_BIT_SET(caps->access_index_mode, + MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) { + mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n"); + return -EOPNOTSUPP; + } + + if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) { + mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d", + MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX); + return -EOPNOTSUPP; + } + } else { + mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n"); + return -EOPNOTSUPP; + } + break; + + default: + mlx5hws_err(ctx, "Matcher has unsupported insert mode\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps, + struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher_attr *attr = &matcher->attr; + + if (hws_matcher_validate_insert_mode(caps, matcher)) + return -EOPNOTSUPP; + + if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) { + mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n"); + return -EOPNOTSUPP; + } + + /* Convert number of rules to the required depth */ + if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE && + attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) + attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log); + + matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0; + + return hws_matcher_check_attr_sz(caps, matcher); +} + +static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) +{ + int ret; + + /* Select and create the definers for current matcher */ + ret = hws_matcher_bind_mt(matcher); + if (ret) + return ret; + + /* Calculate and verify action combination */ + ret = hws_matcher_bind_at(matcher); + if (ret) + goto unbind_mt; + + /* Create matcher end flow table anchor */ + ret = hws_matcher_create_end_ft(matcher); + if (ret) + goto unbind_at; + + /* Allocate the RTC for the new matcher */ + ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + if (ret) + goto destroy_end_ft; + + /* Connect the matcher to the matcher list */ + ret = hws_matcher_connect(matcher); + if (ret) + goto destroy_rtc; + + return 0; + +destroy_rtc: + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); +destroy_end_ft: + hws_matcher_destroy_end_ft(matcher); +unbind_at: + hws_matcher_unbind_at(matcher); +unbind_mt: + hws_matcher_unbind_mt(matcher); + return ret; +} + +static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher) +{ + hws_matcher_resize_uninit(matcher); + hws_matcher_disconnect(matcher); + hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0); + hws_matcher_destroy_end_ft(matcher); + hws_matcher_unbind_at(matcher); + hws_matcher_unbind_mt(matcher); +} + +static int +hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_matcher *col_matcher; + int ret; + + if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE || + matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) + return 0; + + if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log)) + return 0; + + col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL); + if (!col_matcher) + return -ENOMEM; + + INIT_LIST_HEAD(&col_matcher->resize_data); + + col_matcher->tbl = matcher->tbl; + col_matcher->mt = matcher->mt; + col_matcher->at = matcher->at; + col_matcher->num_of_at = matcher->num_of_at; + col_matcher->num_of_mt = matcher->num_of_mt; + col_matcher->attr.priority = matcher->attr.priority; + col_matcher->flags = matcher->flags; + col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION; + col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE; + col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src; + col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log; + col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH; + if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO) + col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO; + + col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach; + + ret = hws_matcher_process_attr(ctx->caps, col_matcher); + if (ret) + goto free_col_matcher; + + ret = hws_matcher_create_and_connect(col_matcher); + if (ret) + goto free_col_matcher; + + matcher->col_matcher = col_matcher; + + return 0; + +free_col_matcher: + kfree(col_matcher); + mlx5hws_err(ctx, "Failed to create assured collision matcher\n"); + return ret; +} + +static void +hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher) +{ + if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE || + matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) + return; + + if (matcher->col_matcher) { + hws_matcher_destroy_and_disconnect(matcher->col_matcher); + kfree(matcher->col_matcher); + } +} + +static int hws_matcher_init(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + int ret; + + INIT_LIST_HEAD(&matcher->resize_data); + + mutex_lock(&ctx->ctrl_lock); + + /* Allocate matcher resource and connect to the packet pipe */ + ret = hws_matcher_create_and_connect(matcher); + if (ret) + goto unlock_err; + + /* Create additional matcher for collision handling */ + ret = hws_matcher_create_col_matcher(matcher); + if (ret) + goto destory_and_disconnect; + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +destory_and_disconnect: + hws_matcher_destroy_and_disconnect(matcher); +unlock_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static int hws_matcher_uninit(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + + mutex_lock(&ctx->ctrl_lock); + hws_matcher_destroy_col_matcher(matcher); + hws_matcher_destroy_and_disconnect(matcher); + mutex_unlock(&ctx->ctrl_lock); + + return 0; +} + +int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, + struct mlx5hws_action_template *at) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); + struct mlx5hws_context *ctx = matcher->tbl->ctx; + u32 required_stes; + int ret; + + if (!matcher->attr.max_num_of_at_attach) { + mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n", + matcher->num_of_at); + return -EOPNOTSUPP; + } + + ret = hws_matcher_check_and_process_at(matcher, at); + if (ret) + return ret; + + required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); + if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) { + mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n", + required_stes, + matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes); + return -ENOMEM; + } + + matcher->at[matcher->num_of_at] = *at; + matcher->num_of_at += 1; + matcher->attr.max_num_of_at_attach -= 1; + + if (matcher->col_matcher) + matcher->col_matcher->num_of_at = matcher->num_of_at; + + return 0; +} + +static int +hws_matcher_set_templates(struct mlx5hws_matcher *matcher, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + int ret = 0; + int i; + + if (!num_of_mt || !num_of_at) { + mlx5hws_err(ctx, "Number of action/match template cannot be zero\n"); + return -EOPNOTSUPP; + } + + matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL); + if (!matcher->mt) + return -ENOMEM; + + matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach, + sizeof(*matcher->at), + GFP_KERNEL); + if (!matcher->at) { + mlx5hws_err(ctx, "Failed to allocate action template array\n"); + ret = -ENOMEM; + goto free_mt; + } + + for (i = 0; i < num_of_mt; i++) + matcher->mt[i] = *mt[i]; + + for (i = 0; i < num_of_at; i++) + matcher->at[i] = *at[i]; + + matcher->num_of_mt = num_of_mt; + matcher->num_of_at = num_of_at; + + return 0; + +free_mt: + kfree(matcher->mt); + return ret; +} + +static void +hws_matcher_unset_templates(struct mlx5hws_matcher *matcher) +{ + kfree(matcher->at); + kfree(matcher->mt); +} + +struct mlx5hws_matcher * +mlx5hws_matcher_create(struct mlx5hws_table *tbl, + struct mlx5hws_match_template *mt[], + u8 num_of_mt, + struct mlx5hws_action_template *at[], + u8 num_of_at, + struct mlx5hws_matcher_attr *attr) +{ + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *matcher; + int ret; + + matcher = kzalloc(sizeof(*matcher), GFP_KERNEL); + if (!matcher) + return NULL; + + matcher->tbl = tbl; + matcher->attr = *attr; + + ret = hws_matcher_process_attr(tbl->ctx->caps, matcher); + if (ret) + goto free_matcher; + + ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at); + if (ret) + goto free_matcher; + + ret = hws_matcher_init(matcher); + if (ret) { + mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret); + goto unset_templates; + } + + return matcher; + +unset_templates: + hws_matcher_unset_templates(matcher); +free_matcher: + kfree(matcher); + return NULL; +} + +int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher) +{ + hws_matcher_uninit(matcher); + hws_matcher_unset_templates(matcher); + kfree(matcher); + return 0; +} + +struct mlx5hws_match_template * +mlx5hws_match_template_create(struct mlx5hws_context *ctx, + u32 *match_param, + u32 match_param_sz, + u8 match_criteria_enable) +{ + struct mlx5hws_match_template *mt; + + mt = kzalloc(sizeof(*mt), GFP_KERNEL); + if (!mt) + return NULL; + + mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!mt->match_param) + goto free_template; + + memcpy(mt->match_param, match_param, match_param_sz); + mt->match_criteria_enable = match_criteria_enable; + + return mt; + +free_template: + kfree(mt); + return NULL; +} + +int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt) +{ + kfree(mt->match_param); + kfree(mt); + return 0; +} + +static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher) +{ + struct mlx5hws_context *ctx = src_matcher->tbl->ctx; + int i; + + if (src_matcher->tbl->type != dst_matcher->tbl->type) { + mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n"); + return -EINVAL; + } + + if (!mlx5hws_matcher_is_resizable(src_matcher) || + !mlx5hws_matcher_is_resizable(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matcher is not resizable\n"); + return -EINVAL; + } + + if (mlx5hws_matcher_is_insert_by_idx(src_matcher) != + mlx5hws_matcher_is_insert_by_idx(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n"); + return -EINVAL; + } + + if (mlx5hws_matcher_is_in_resize(src_matcher) || + mlx5hws_matcher_is_in_resize(dst_matcher)) { + mlx5hws_err(ctx, "Src/dst matcher is already in resize\n"); + return -EINVAL; + } + + /* Compare match templates - make sure the definers are equivalent */ + if (src_matcher->num_of_mt != dst_matcher->num_of_mt) { + mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n"); + return -EINVAL; + } + + if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes > + dst_matcher->action_ste[0].max_stes) { + mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n"); + return -EINVAL; + } + + for (i = 0; i < src_matcher->num_of_mt; i++) { + if (mlx5hws_definer_compare(src_matcher->mt[i].definer, + dst_matcher->mt[i].definer)) { + mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n"); + return -EINVAL; + } + } + + return 0; +} + +int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_matcher *dst_matcher) +{ + int ret = 0; + + mutex_lock(&src_matcher->tbl->ctx->ctrl_lock); + + ret = hws_matcher_resize_precheck(src_matcher, dst_matcher); + if (ret) + goto out; + + src_matcher->resize_dst = dst_matcher; + + ret = hws_matcher_resize_init(src_matcher); + if (ret) + src_matcher->resize_dst = NULL; + +out: + mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock); + return ret; +} + +int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher, + struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = src_matcher->tbl->ctx; + + if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) { + mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n"); + return -EINVAL; + } + + if (unlikely(src_matcher != rule->matcher)) { + mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n"); + return -EINVAL; + } + + return mlx5hws_rule_move_hws_add(rule, attr); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h new file mode 100644 index 00000000000000..125391d1a11483 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_MATCHER_H_ +#define MLX5HWS_MATCHER_H_ + +/* We calculated that concatenating a collision table to the main table with + * 3% of the main table rows will be enough resources for high insertion + * success probability. + * + * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5 + */ +#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5 +/* Threshold to determine if amount of rules require a collision table */ +#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10 +/* Required depth of an assured collision table */ +#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4 +/* Required depth of the main large table */ +#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2 + +enum mlx5hws_matcher_offset { + MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12, + MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13, +}; + +enum mlx5hws_matcher_flags { + MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2, + MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3, +}; + +struct mlx5hws_match_template { + struct mlx5hws_definer *definer; + struct mlx5hws_definer_fc *fc; + u32 *match_param; + u8 match_criteria_enable; + u16 fc_sz; +}; + +struct mlx5hws_matcher_match_ste { + struct mlx5hws_pool_chunk ste; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; +}; + +struct mlx5hws_matcher_action_ste { + struct mlx5hws_pool_chunk ste; + struct mlx5hws_pool_chunk stc; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; + u8 max_stes; +}; + +struct mlx5hws_matcher_resize_data_node { + struct mlx5hws_pool_chunk stc; + u32 rtc_0_id; + u32 rtc_1_id; + struct mlx5hws_pool *pool; +}; + +struct mlx5hws_matcher_resize_data { + struct mlx5hws_matcher_resize_data_node action_ste[2]; + u8 max_stes; + struct list_head list_node; +}; + +struct mlx5hws_matcher { + struct mlx5hws_table *tbl; + struct mlx5hws_matcher_attr attr; + struct mlx5hws_match_template *mt; + struct mlx5hws_action_template *at; + u8 num_of_at; + u8 num_of_mt; + /* enum mlx5hws_matcher_flags */ + u8 flags; + u32 end_ft_id; + struct mlx5hws_matcher *col_matcher; + struct mlx5hws_matcher *resize_dst; + struct mlx5hws_matcher_match_ste match_ste; + struct mlx5hws_matcher_action_ste action_ste[2]; + struct list_head list_node; + struct list_head resize_data; +}; + +static inline bool +mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt) +{ + return mlx5hws_definer_is_jumbo(mt->definer); +} + +static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher) +{ + return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE); +} + +static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher) +{ + return !!matcher->resize_dst; +} + +static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher) +{ + return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX; +} + +#endif /* MLX5HWS_MATCHER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c new file mode 100644 index 00000000000000..e084a5cbf81fd4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_data_size_to_arg_log_size(u16 data_size) +{ + /* Return the roundup of log2(data_size) */ + if (data_size <= MLX5HWS_ARG_DATA_SIZE) + return MLX5HWS_ARG_CHUNK_SIZE_1; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2) + return MLX5HWS_ARG_CHUNK_SIZE_2; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4) + return MLX5HWS_ARG_CHUNK_SIZE_3; + if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8) + return MLX5HWS_ARG_CHUNK_SIZE_4; + + return MLX5HWS_ARG_CHUNK_SIZE_MAX; +} + +u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size) +{ + return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size)); +} + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_get_arg_log_size(u16 num_of_actions) +{ + return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions * + MLX5HWS_MODIFY_ACTION_SIZE); +} + +u32 mlx5hws_arg_get_arg_size(u16 num_of_actions) +{ + return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions)); +} + +bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions) +{ + u16 i, field; + u8 action_id; + + for (i = 0; i < num_of_actions; i++) { + action_id = MLX5_GET(set_action_in, &actions[i], action_type); + + switch (action_id) { + case MLX5_MODIFICATION_TYPE_NOP: + field = MLX5_MODI_OUT_NONE; + break; + + case MLX5_MODIFICATION_TYPE_SET: + case MLX5_MODIFICATION_TYPE_ADD: + field = MLX5_GET(set_action_in, &actions[i], field); + break; + + case MLX5_MODIFICATION_TYPE_COPY: + case MLX5_MODIFICATION_TYPE_ADD_FIELD: + field = MLX5_GET(copy_action_in, &actions[i], dst_field); + break; + + default: + /* Insert/Remove/Unknown actions require reparse */ + return true; + } + + /* Below fields can change packet structure require a reparse */ + if (field == MLX5_MODI_OUT_ETHERTYPE || + field == MLX5_MODI_OUT_IPV6_NEXT_HDR) + return true; + } + + return false; +} + +/* Cache and cache element handling */ +int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache) +{ + struct mlx5hws_pattern_cache *new_cache; + + new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL); + if (!new_cache) + return -ENOMEM; + + INIT_LIST_HEAD(&new_cache->ptrn_list); + mutex_init(&new_cache->lock); + + *cache = new_cache; + + return 0; +} + +void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache) +{ + mutex_destroy(&cache->lock); + kfree(cache); +} + +static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions, + __be64 cur_actions[], + int num_of_actions, + __be64 actions[]) +{ + int i; + + if (cur_num_of_actions != num_of_actions) + return false; + + for (i = 0; i < num_of_actions; i++) { + u8 action_id = + MLX5_GET(set_action_in, &actions[i], action_type); + + if (action_id == MLX5_MODIFICATION_TYPE_COPY || + action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) { + if (actions[i] != cur_actions[i]) + return false; + } else { + /* Compare just the control, not the values */ + if ((__force __be32)actions[i] != + (__force __be32)cur_actions[i]) + return false; + } + } + + return true; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pat = NULL; + + list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) { + if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions, + (__be64 *)cached_pat->mh_data.data, + num_of_actions, + actions)) + return cached_pat; + } + + return NULL; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pattern; + + cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions); + if (cached_pattern) { + /* LRU: move it to be first in the list */ + list_del_init(&cached_pattern->ptrn_list_node); + list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list); + cached_pattern->refcount++; + } + + return cached_pattern; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache, + u32 pattern_id, + u16 num_of_actions, + __be64 *actions) +{ + struct mlx5hws_pattern_cache_item *cached_pattern; + + cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL); + if (!cached_pattern) + return NULL; + + cached_pattern->mh_data.num_of_actions = num_of_actions; + cached_pattern->mh_data.pattern_id = pattern_id; + cached_pattern->mh_data.data = + kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); + if (!cached_pattern->mh_data.data) + goto free_cached_obj; + + list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list); + cached_pattern->refcount = 1; + + return cached_pattern; + +free_cached_obj: + kfree(cached_pattern); + return NULL; +} + +static struct mlx5hws_pattern_cache_item * +mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache, + u32 ptrn_id) +{ + struct mlx5hws_pattern_cache_item *cached_pattern = NULL; + + list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) { + if (cached_pattern->mh_data.pattern_id == ptrn_id) + return cached_pattern; + } + + return NULL; +} + +static void +mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern) +{ + list_del_init(&cached_pattern->ptrn_list_node); + + kfree(cached_pattern->mh_data.data); + kfree(cached_pattern); +} + +void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id) +{ + struct mlx5hws_pattern_cache *cache = ctx->pattern_cache; + struct mlx5hws_pattern_cache_item *cached_pattern; + + mutex_lock(&cache->lock); + cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id); + if (!cached_pattern) { + mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n"); + pr_warn("HWS: pattern ID %d is not found\n", ptrn_id); + goto out; + } + + if (--cached_pattern->refcount) + goto out; + + mlx5hws_pat_remove_pattern(cached_pattern); + mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id); + +out: + mutex_unlock(&cache->lock); +} + +int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, + __be64 *pattern, size_t pattern_sz, + u32 *pattern_id) +{ + u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE; + struct mlx5hws_pattern_cache_item *cached_pattern; + u32 ptrn_id = 0; + int ret = 0; + + mutex_lock(&ctx->pattern_cache->lock); + + cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache, + num_of_actions, + pattern); + if (cached_pattern) { + *pattern_id = cached_pattern->mh_data.pattern_id; + goto out_unlock; + } + + ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev, + pattern_sz, + (u8 *)pattern, + &ptrn_id); + if (ret) { + mlx5hws_err(ctx, "Failed to create pattern FW object\n"); + goto out_unlock; + } + + cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache, + ptrn_id, + num_of_actions, + pattern); + if (!cached_pattern) { + mlx5hws_err(ctx, "Failed to add pattern to cache\n"); + ret = -EINVAL; + goto clean_pattern; + } + + mutex_unlock(&ctx->pattern_cache->lock); + *pattern_id = ptrn_id; + + return ret; + +clean_pattern: + mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id); +out_unlock: + mutex_unlock(&ctx->pattern_cache->lock); + return ret; +} + +static void +mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr, + void *comp_data, + u32 arg_idx) +{ + send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG; + send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + send_attr->id = arg_idx; + send_attr->user_data = comp_data; +} + +void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL; + struct mlx5hws_send_engine_post_ctrl ctrl; + size_t wqe_len; + + mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx); + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg, + num_of_actions); + mlx5hws_send_engine_post_end(&ctrl, &send_attr); +} + +void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, + void *comp_data, + u32 arg_idx, + u8 *arg_data, + size_t data_size) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg; + struct mlx5hws_send_engine_post_ctrl ctrl; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + int i, full_iter, leftover; + size_t wqe_len; + + mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx); + + /* Each WQE can hold 64B of data, it might require multiple iteration */ + full_iter = data_size / MLX5HWS_ARG_DATA_SIZE; + leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1); + + for (i = 0; i < full_iter; i++) { + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + memcpy(wqe_arg, arg_data, wqe_len); + send_attr.id = arg_idx++; + mlx5hws_send_engine_post_end(&ctrl, &send_attr); + + /* Move to next argument data */ + arg_data += MLX5HWS_ARG_DATA_SIZE; + } + + if (leftover) { + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + memset(wqe_ctrl, 0, wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len); + memcpy(wqe_arg, arg_data, leftover); + send_attr.id = arg_idx; + mlx5hws_send_engine_post_end(&ctrl, &send_attr); + } +} + +int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, + u32 arg_idx, + u8 *arg_data, + size_t data_size) +{ + struct mlx5hws_send_engine *queue; + int ret; + + mutex_lock(&ctx->ctrl_lock); + + /* Get the control queue */ + queue = &ctx->send_queue[ctx->queues - 1]; + + mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size); + + mlx5hws_send_engine_flush_queue(queue); + + /* Poll for completion */ + ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + + if (ret) + mlx5hws_err(ctx, "Failed to drain arg queue\n"); + + mutex_unlock(&ctx->ctrl_lock); + + return ret; +} + +bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx, + u32 arg_size) +{ + if (arg_size < ctx->caps->log_header_modify_argument_granularity || + arg_size > ctx->caps->log_header_modify_argument_max_alloc) { + return false; + } + return true; +} + +int mlx5hws_arg_create(struct mlx5hws_context *ctx, + u8 *data, + size_t data_sz, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id) +{ + u16 single_arg_log_sz; + u16 multi_arg_log_sz; + int ret; + u32 id; + + single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz); + multi_arg_log_sz = single_arg_log_sz + log_bulk_sz; + + if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) { + mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz); + return -EOPNOTSUPP; + } + + if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) { + mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz); + return -EOPNOTSUPP; + } + + /* Alloc bulk of args */ + ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id); + if (ret) { + mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz); + return ret; + } + + if (write_data) { + ret = mlx5hws_arg_write_inline_arg_data(ctx, id, + data, data_sz); + if (ret) { + mlx5hws_err(ctx, "Failed writing arg data\n"); + mlx5hws_cmd_arg_destroy(ctx->mdev, id); + return ret; + } + } + + *arg_id = id; + return ret; +} + +void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id) +{ + mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id); +} + +int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx, + __be64 *data, + u8 num_of_actions, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id) +{ + size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE; + int ret; + + ret = mlx5hws_arg_create(ctx, + (u8 *)data, + data_sz, + log_bulk_sz, + write_data, + arg_id); + if (ret) + mlx5hws_err(ctx, "Failed creating modify header arg\n"); + + return ret; +} + +static int +hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern) +{ + /* Need to check field limitation here, but for now - return OK */ + return 0; +} + +#define INVALID_FIELD 0xffff + +static void +hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern, + u16 *src_field, u16 *dst_field) +{ + switch (action_type) { + case MLX5_ACTION_TYPE_SET: + case MLX5_ACTION_TYPE_ADD: + *src_field = MLX5_GET(set_action_in, pattern, field); + *dst_field = INVALID_FIELD; + break; + case MLX5_ACTION_TYPE_COPY: + *src_field = MLX5_GET(copy_action_in, pattern, src_field); + *dst_field = MLX5_GET(copy_action_in, pattern, dst_field); + break; + default: + pr_warn("HWS: invalid modify header action type %d\n", action_type); + } +} + +bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz) +{ + size_t i; + + for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) { + u8 action_type = + MLX5_GET(set_action_in, &pattern[i], action_type); + if (action_type >= MLX5_MODIFICATION_TYPE_MAX) { + mlx5hws_err(ctx, "Unsupported action id %d\n", action_type); + return false; + } + if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) { + mlx5hws_err(ctx, "Unsupported action number %zu\n", i); + return false; + } + } + + return true; +} + +void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nope_location, __be64 *new_pat) +{ + u16 prev_src_field = 0, prev_dst_field = 0; + u16 src_field, dst_field; + u8 action_type; + size_t i, j; + + *new_size = num_actions; + *nope_location = 0; + + if (num_actions == 1) + return; + + for (i = 0, j = 0; i < num_actions; i++, j++) { + action_type = MLX5_GET(set_action_in, &pattern[i], action_type); + + hws_action_modify_get_target_fields(action_type, &pattern[i], + &src_field, &dst_field); + if (i % 2) { + if (action_type == MLX5_ACTION_TYPE_COPY && + (prev_src_field == src_field || + prev_dst_field == dst_field)) { + /* need Nope */ + *new_size += 1; + *nope_location |= BIT(i); + memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); + MLX5_SET(set_action_in, &new_pat[j], + action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + } else if (prev_src_field == src_field) { + /* need Nope*/ + *new_size += 1; + *nope_location |= BIT(i); + MLX5_SET(set_action_in, &new_pat[j], + action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + } + } + memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); + /* check if no more space */ + if (j > max_actions) { + *new_size = num_actions; + *nope_location = 0; + return; + } + + prev_src_field = src_field; + prev_dst_field = dst_field; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h new file mode 100644 index 00000000000000..27ca93385b0841 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_PAT_ARG_H_ +#define MLX5HWS_PAT_ARG_H_ + +/* Modify-header arg pool */ +enum mlx5hws_arg_chunk_size { + MLX5HWS_ARG_CHUNK_SIZE_1, + /* Keep MIN updated when changing */ + MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1, + MLX5HWS_ARG_CHUNK_SIZE_2, + MLX5HWS_ARG_CHUNK_SIZE_3, + MLX5HWS_ARG_CHUNK_SIZE_4, + MLX5HWS_ARG_CHUNK_SIZE_MAX, +}; + +enum { + MLX5HWS_MODIFY_ACTION_SIZE = 8, + MLX5HWS_ARG_DATA_SIZE = 64, +}; + +struct mlx5hws_pattern_cache { + struct mutex lock; /* Protect pattern list */ + struct list_head ptrn_list; +}; + +struct mlx5hws_pattern_cache_item { + struct { + u32 pattern_id; + u8 *data; + u16 num_of_actions; + } mh_data; + u32 refcount; + struct list_head ptrn_list_node; +}; + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_get_arg_log_size(u16 num_of_actions); + +u32 mlx5hws_arg_get_arg_size(u16 num_of_actions); + +enum mlx5hws_arg_chunk_size +mlx5hws_arg_data_size_to_arg_log_size(u16 data_size); + +u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size); + +int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache); + +void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache); + +bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz); + +int mlx5hws_arg_create(struct mlx5hws_context *ctx, + u8 *data, + size_t data_sz, + u32 log_bulk_sz, + bool write_data, + u32 *arg_id); + +void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id); + +int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx, + __be64 *data, + u8 num_of_actions, + u32 log_bulk_sz, + bool write_data, + u32 *modify_hdr_arg_id); + +int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, + __be64 *pattern, + size_t pattern_sz, + u32 *ptrn_id); + +void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, + u32 ptrn_id); + +bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx, + u32 arg_size); + +bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions); + +void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, + void *comp_data, + u32 arg_idx, + u8 *arg_data, + size_t data_size); + +void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue, + u32 arg_idx, + u8 *arg_data, + u16 num_of_actions); + +int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, + u32 arg_idx, + u8 *arg_data, + size_t data_size); + +void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions, + size_t *new_size, u32 *nope_location, __be64 *new_pat); +#endif /* MLX5HWS_PAT_ARG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c new file mode 100644 index 00000000000000..a8a63e3278be30 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "mlx5hws_buddy.h" + +static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource) +{ + switch (resource->pool->type) { + case MLX5HWS_POOL_TYPE_STE: + mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id); + break; + case MLX5HWS_POOL_TYPE_STC: + mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id); + break; + default: + break; + } + + kfree(resource); +} + +static void hws_pool_resource_free(struct mlx5hws_pool *pool, + int resource_idx) +{ + hws_pool_free_one_resource(pool->resource[resource_idx]); + pool->resource[resource_idx] = NULL; + + if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { + hws_pool_free_one_resource(pool->mirror_resource[resource_idx]); + pool->mirror_resource[resource_idx] = NULL; + } +} + +static struct mlx5hws_pool_resource * +hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, + u32 fw_ft_type) +{ + struct mlx5hws_cmd_ste_create_attr ste_attr; + struct mlx5hws_cmd_stc_create_attr stc_attr; + struct mlx5hws_pool_resource *resource; + u32 obj_id = 0; + int ret; + + resource = kzalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + return NULL; + + switch (pool->type) { + case MLX5HWS_POOL_TYPE_STE: + ste_attr.log_obj_range = log_range; + ste_attr.table_type = fw_ft_type; + ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id); + break; + case MLX5HWS_POOL_TYPE_STC: + stc_attr.log_obj_range = log_range; + stc_attr.table_type = fw_ft_type; + ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id); + break; + default: + ret = -EINVAL; + } + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n"); + goto free_resource; + } + + resource->pool = pool; + resource->range = 1 << log_range; + resource->base_id = obj_id; + + return resource; + +free_resource: + kfree(resource); + return NULL; +} + +static int +hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx) +{ + struct mlx5hws_pool_resource *resource; + u32 fw_ft_type, opt_log_range; + + fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false); + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range; + resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); + if (!resource) { + mlx5hws_err(pool->ctx, "Failed allocating resource\n"); + return -EINVAL; + } + + pool->resource[idx] = resource; + + if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { + struct mlx5hws_pool_resource *mirror_resource; + + fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true); + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range; + mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); + if (!mirror_resource) { + mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n"); + hws_pool_free_one_resource(resource); + pool->resource[idx] = NULL; + return -EINVAL; + } + pool->mirror_resource[idx] = mirror_resource; + } + + return 0; +} + +static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range) +{ + unsigned long *cur_bmp; + + cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL); + if (!cur_bmp) + return NULL; + + bitmap_fill(cur_bmp, 1 << log_range); + + return cur_bmp; +} + +static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + struct mlx5hws_buddy_mem *buddy; + + buddy = pool->db.buddy_manager->buddies[chunk->resource_idx]; + if (!buddy) { + mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx); + return; + } + + mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); +} + +static struct mlx5hws_buddy_mem * +hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx, + u32 order, bool *is_new_buddy) +{ + static struct mlx5hws_buddy_mem *buddy; + u32 new_buddy_size; + + buddy = pool->db.buddy_manager->buddies[idx]; + if (buddy) + return buddy; + + new_buddy_size = max(pool->alloc_log_sz, order); + *is_new_buddy = true; + buddy = mlx5hws_buddy_create(new_buddy_size); + if (!buddy) { + mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n", + new_buddy_size, idx); + return NULL; + } + + if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, new_buddy_size, idx); + mlx5hws_buddy_cleanup(buddy); + return NULL; + } + + pool->db.buddy_manager->buddies[idx] = buddy; + + return buddy; +} + +static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool, + int order, + u32 *buddy_idx, + int *seg) +{ + struct mlx5hws_buddy_mem *buddy; + bool new_mem = false; + int ret = 0; + int i; + + *seg = -1; + + /* Find the next free place from the buddy array */ + while (*seg == -1) { + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + buddy = hws_pool_buddy_get_next_buddy(pool, i, + order, + &new_mem); + if (!buddy) { + ret = -ENOMEM; + goto out; + } + + *seg = mlx5hws_buddy_alloc_mem(buddy, order); + if (*seg != -1) + goto found; + + if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) { + mlx5hws_err(pool->ctx, + "Fail to allocate seg for one resource pool\n"); + ret = -ENOMEM; + goto out; + } + + if (new_mem) { + /* We have new memory pool, should be place for us */ + mlx5hws_err(pool->ctx, + "No memory for order: %d with buddy no: %d\n", + order, i); + ret = -ENOMEM; + goto out; + } + } + } + +found: + *buddy_idx = i; +out: + return ret; +} + +static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret = 0; + + /* Go over the buddies and find next free slot */ + ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool) +{ + struct mlx5hws_buddy_mem *buddy; + int i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + buddy = pool->db.buddy_manager->buddies[i]; + if (buddy) { + mlx5hws_buddy_cleanup(buddy); + kfree(buddy); + pool->db.buddy_manager->buddies[i] = NULL; + } + } + + kfree(pool->db.buddy_manager); +} + +static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range) +{ + pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL); + if (!pool->db.buddy_manager) + return -ENOMEM; + + if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) { + bool new_buddy; + + if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) { + mlx5hws_err(pool->ctx, + "Failed allocating memory on create log_sz: %d\n", log_range); + kfree(pool->db.buddy_manager); + return -ENOMEM; + } + } + + pool->p_db_uninit = &hws_pool_buddy_db_uninit; + pool->p_get_chunk = &hws_pool_buddy_db_get_chunk; + pool->p_put_chunk = &hws_pool_buddy_db_put_chunk; + + return 0; +} + +static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool, + u32 alloc_size, int idx) +{ + int ret = hws_pool_resource_alloc(pool, alloc_size, idx); + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + return ret; + } + + return 0; +} + +static struct mlx5hws_pool_elements * +hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx) +{ + struct mlx5hws_pool_elements *elem; + u32 alloc_size; + + alloc_size = pool->alloc_log_sz; + + elem = kzalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) + return NULL; + + /* Sharing the same resource, also means that all the elements are with size 1 */ + if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) && + !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) { + /* Currently all chunks in size 1 */ + elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order); + if (!elem->bitmap) { + mlx5hws_err(pool->ctx, + "Failed to create bitmap type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + goto free_elem; + } + + elem->log_size = alloc_size - order; + } + + if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", + pool->type, alloc_size, idx); + goto free_db; + } + + pool->db.element_manager->elements[idx] = elem; + + return elem; + +free_db: + bitmap_free(elem->bitmap); +free_elem: + kfree(elem); + return NULL; +} + +static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg) +{ + unsigned int segment, size; + + size = 1 << elem->log_size; + + segment = find_first_bit(elem->bitmap, size); + if (segment >= size) { + elem->is_full = true; + return -ENOMEM; + } + + bitmap_clear(elem->bitmap, segment, 1); + *seg = segment; + return 0; +} + +static int +hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, + u32 *idx, int *seg) +{ + struct mlx5hws_pool_elements *elem; + + elem = pool->db.element_manager->elements[0]; + if (!elem) + elem = hws_pool_element_create_new_elem(pool, order, 0); + if (!elem) + goto err_no_elem; + + if (hws_pool_element_find_seg(elem, seg) != 0) { + mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); + return -ENOMEM; + } + + *idx = 0; + elem->num_of_elements++; + return 0; + +err_no_elem: + mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); + return -ENOMEM; +} + +static int +hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, + u32 *idx, int *seg) +{ + int ret, i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + if (!pool->resource[i]) { + ret = hws_pool_create_resource_on_index(pool, order, i); + if (ret) + goto err_no_res; + *idx = i; + *seg = 0; /* One memory slot in that element */ + return 0; + } + } + + mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); + return -ENOMEM; + +err_no_res: + mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); + return -ENOMEM; +} + +static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret; + + /* Go over all memory elements and find/allocate free slot */ + ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + if (unlikely(!pool->resource[chunk->resource_idx])) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE) + hws_pool_resource_free(pool, chunk->resource_idx); +} + +static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool) +{ + (void)pool; +} + +/* This memory management works as the following: + * - At start doesn't allocate no mem at all. + * - When new request for chunk arrived: + * allocate resource and give it. + * - When free that chunk: + * the resource is freed. + */ +static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool) +{ + pool->p_db_uninit = &hws_pool_general_element_db_uninit; + pool->p_get_chunk = &hws_pool_general_element_db_get_chunk; + pool->p_put_chunk = &hws_pool_general_element_db_put_chunk; + + return 0; +} + +static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool, + struct mlx5hws_pool_elements *elem, + struct mlx5hws_pool_chunk *chunk) +{ + if (unlikely(!pool->resource[chunk->resource_idx])) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + hws_pool_resource_free(pool, chunk->resource_idx); + kfree(elem); + pool->db.element_manager->elements[chunk->resource_idx] = NULL; +} + +static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + struct mlx5hws_pool_elements *elem; + + if (unlikely(chunk->resource_idx)) + pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + + elem = pool->db.element_manager->elements[chunk->resource_idx]; + if (!elem) { + mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx); + return; + } + + bitmap_set(elem->bitmap, chunk->offset, 1); + elem->is_full = false; + elem->num_of_elements--; + + if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE && + !elem->num_of_elements) + hws_onesize_element_db_destroy_element(pool, elem, chunk); +} + +static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret = 0; + + /* Go over all memory elements and find/allocate free slot */ + ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order, + &chunk->resource_idx, + &chunk->offset); + if (ret) + mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", + chunk->order); + + return ret; +} + +static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool) +{ + struct mlx5hws_pool_elements *elem; + int i; + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { + elem = pool->db.element_manager->elements[i]; + if (elem) { + bitmap_free(elem->bitmap); + kfree(elem); + pool->db.element_manager->elements[i] = NULL; + } + } + kfree(pool->db.element_manager); +} + +/* This memory management works as the following: + * - At start doesn't allocate no mem at all. + * - When new request for chunk arrived: + * aloocate the first and only slot of memory/resource + * when it ended return error. + */ +static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool) +{ + pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL); + if (!pool->db.element_manager) + return -ENOMEM; + + pool->p_db_uninit = &hws_onesize_element_db_uninit; + pool->p_get_chunk = &hws_onesize_element_db_get_chunk; + pool->p_put_chunk = &hws_onesize_element_db_put_chunk; + + return 0; +} + +static int hws_pool_db_init(struct mlx5hws_pool *pool, + enum mlx5hws_db_type db_type) +{ + int ret; + + if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE) + ret = hws_pool_general_element_db_init(pool); + else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE) + ret = hws_pool_onesize_element_db_init(pool); + else + ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz); + + if (ret) { + mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret); + return ret; + } + + return 0; +} + +static void hws_pool_db_unint(struct mlx5hws_pool *pool) +{ + pool->p_db_uninit(pool); +} + +int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + int ret; + + mutex_lock(&pool->lock); + ret = pool->p_get_chunk(pool, chunk); + mutex_unlock(&pool->lock); + + return ret; +} + +void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + mutex_lock(&pool->lock); + pool->p_put_chunk(pool, chunk); + mutex_unlock(&pool->lock); +} + +struct mlx5hws_pool * +mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr) +{ + enum mlx5hws_db_type res_db_type; + struct mlx5hws_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->ctx = ctx; + pool->type = pool_attr->pool_type; + pool->alloc_log_sz = pool_attr->alloc_log_sz; + pool->flags = pool_attr->flags; + pool->tbl_type = pool_attr->table_type; + pool->opt_type = pool_attr->opt_type; + + /* Support general db */ + if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) + res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE; + else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS)) + res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE; + else + res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY; + + pool->alloc_log_sz = pool_attr->alloc_log_sz; + + if (hws_pool_db_init(pool, res_db_type)) + goto free_pool; + + mutex_init(&pool->lock); + + return pool; + +free_pool: + kfree(pool); + return NULL; +} + +int mlx5hws_pool_destroy(struct mlx5hws_pool *pool) +{ + int i; + + mutex_destroy(&pool->lock); + + for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) + if (pool->resource[i]) + hws_pool_resource_free(pool, i); + + hws_pool_db_unint(pool); + + kfree(pool); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h new file mode 100644 index 00000000000000..621298b352b244 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_POOL_H_ +#define MLX5HWS_POOL_H_ + +#define MLX5HWS_POOL_STC_LOG_SZ 15 + +#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100 + +enum mlx5hws_pool_type { + MLX5HWS_POOL_TYPE_STE, + MLX5HWS_POOL_TYPE_STC, +}; + +struct mlx5hws_pool_chunk { + u32 resource_idx; + /* Internal offset, relative to base index */ + int offset; + int order; +}; + +struct mlx5hws_pool_resource { + struct mlx5hws_pool *pool; + u32 base_id; + u32 range; +}; + +enum mlx5hws_pool_flags { + /* Only a one resource in that pool */ + MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0, + MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1, + /* No sharing resources between chunks */ + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2, + /* All objects are in the same size */ + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3, + /* Managed by buddy allocator */ + MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4, + /* Allocate pool_type memory on pool creation */ + MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5, + + /* These values should be used by the caller */ + MLX5HWS_POOL_FLAGS_FOR_STC_POOL = + MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS, + MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL = + MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | + MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK, + MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL = + MLX5HWS_POOL_FLAGS_ONE_RESOURCE | + MLX5HWS_POOL_FLAGS_BUDDY_MANAGED | + MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE, +}; + +enum mlx5hws_pool_optimize { + MLX5HWS_POOL_OPTIMIZE_NONE = 0x0, + MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1, + MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2, +}; + +struct mlx5hws_pool_attr { + enum mlx5hws_pool_type pool_type; + enum mlx5hws_table_type table_type; + enum mlx5hws_pool_flags flags; + enum mlx5hws_pool_optimize opt_type; + /* Allocation size once memory is depleted */ + size_t alloc_log_sz; +}; + +enum mlx5hws_db_type { + /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/ + MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE, + /* One resource only, all the elements are with same one size */ + MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE, + /* Many resources, the memory allocated with buddy mechanism */ + MLX5HWS_POOL_DB_TYPE_BUDDY, +}; + +struct mlx5hws_buddy_manager { + struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ]; +}; + +struct mlx5hws_pool_elements { + u32 num_of_elements; + unsigned long *bitmap; + u32 log_size; + bool is_full; +}; + +struct mlx5hws_element_manager { + struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ]; +}; + +struct mlx5hws_pool_db { + enum mlx5hws_db_type type; + union { + struct mlx5hws_element_manager *element_manager; + struct mlx5hws_buddy_manager *buddy_manager; + }; +}; + +typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); +typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); +typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool); + +struct mlx5hws_pool { + struct mlx5hws_context *ctx; + enum mlx5hws_pool_type type; + enum mlx5hws_pool_flags flags; + struct mutex lock; /* protect the pool */ + size_t alloc_log_sz; + enum mlx5hws_table_type tbl_type; + enum mlx5hws_pool_optimize opt_type; + struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; + struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; + /* DB */ + struct mlx5hws_pool_db db; + /* Functions */ + mlx5hws_pool_unint_db p_db_uninit; + mlx5hws_pool_db_get_chunk p_get_chunk; + mlx5hws_pool_db_put_chunk p_put_chunk; +}; + +struct mlx5hws_pool * +mlx5hws_pool_create(struct mlx5hws_context *ctx, + struct mlx5hws_pool_attr *pool_attr); + +int mlx5hws_pool_destroy(struct mlx5hws_pool *pool); + +int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); + +void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk); + +static inline u32 +mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + return pool->resource[chunk->resource_idx]->base_id; +} + +static inline u32 +mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) +{ + return pool->mirror_resource[chunk->resource_idx]->base_id; +} +#endif /* MLX5HWS_POOL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h new file mode 100644 index 00000000000000..de92cecbeb928a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h @@ -0,0 +1,514 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5_PRM_H_ +#define MLX5_PRM_H_ + +#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512 + +/* Action type of header modification. */ +enum { + MLX5_MODIFICATION_TYPE_SET = 0x1, + MLX5_MODIFICATION_TYPE_ADD = 0x2, + MLX5_MODIFICATION_TYPE_COPY = 0x3, + MLX5_MODIFICATION_TYPE_INSERT = 0x4, + MLX5_MODIFICATION_TYPE_REMOVE = 0x5, + MLX5_MODIFICATION_TYPE_NOP = 0x6, + MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7, + MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8, + MLX5_MODIFICATION_TYPE_MAX, +}; + +/* The field of packet to be modified. */ +enum mlx5_modification_field { + MLX5_MODI_OUT_NONE = -1, + MLX5_MODI_OUT_SMAC_47_16 = 1, + MLX5_MODI_OUT_SMAC_15_0, + MLX5_MODI_OUT_ETHERTYPE, + MLX5_MODI_OUT_DMAC_47_16, + MLX5_MODI_OUT_DMAC_15_0, + MLX5_MODI_OUT_IP_DSCP, + MLX5_MODI_OUT_TCP_FLAGS, + MLX5_MODI_OUT_TCP_SPORT, + MLX5_MODI_OUT_TCP_DPORT, + MLX5_MODI_OUT_IPV4_TTL, + MLX5_MODI_OUT_UDP_SPORT, + MLX5_MODI_OUT_UDP_DPORT, + MLX5_MODI_OUT_SIPV6_127_96, + MLX5_MODI_OUT_SIPV6_95_64, + MLX5_MODI_OUT_SIPV6_63_32, + MLX5_MODI_OUT_SIPV6_31_0, + MLX5_MODI_OUT_DIPV6_127_96, + MLX5_MODI_OUT_DIPV6_95_64, + MLX5_MODI_OUT_DIPV6_63_32, + MLX5_MODI_OUT_DIPV6_31_0, + MLX5_MODI_OUT_SIPV4, + MLX5_MODI_OUT_DIPV4, + MLX5_MODI_OUT_FIRST_VID, + MLX5_MODI_IN_SMAC_47_16 = 0x31, + MLX5_MODI_IN_SMAC_15_0, + MLX5_MODI_IN_ETHERTYPE, + MLX5_MODI_IN_DMAC_47_16, + MLX5_MODI_IN_DMAC_15_0, + MLX5_MODI_IN_IP_DSCP, + MLX5_MODI_IN_TCP_FLAGS, + MLX5_MODI_IN_TCP_SPORT, + MLX5_MODI_IN_TCP_DPORT, + MLX5_MODI_IN_IPV4_TTL, + MLX5_MODI_IN_UDP_SPORT, + MLX5_MODI_IN_UDP_DPORT, + MLX5_MODI_IN_SIPV6_127_96, + MLX5_MODI_IN_SIPV6_95_64, + MLX5_MODI_IN_SIPV6_63_32, + MLX5_MODI_IN_SIPV6_31_0, + MLX5_MODI_IN_DIPV6_127_96, + MLX5_MODI_IN_DIPV6_95_64, + MLX5_MODI_IN_DIPV6_63_32, + MLX5_MODI_IN_DIPV6_31_0, + MLX5_MODI_IN_SIPV4, + MLX5_MODI_IN_DIPV4, + MLX5_MODI_OUT_IPV6_HOPLIMIT, + MLX5_MODI_IN_IPV6_HOPLIMIT, + MLX5_MODI_META_DATA_REG_A, + MLX5_MODI_META_DATA_REG_B = 0x50, + MLX5_MODI_META_REG_C_0, + MLX5_MODI_META_REG_C_1, + MLX5_MODI_META_REG_C_2, + MLX5_MODI_META_REG_C_3, + MLX5_MODI_META_REG_C_4, + MLX5_MODI_META_REG_C_5, + MLX5_MODI_META_REG_C_6, + MLX5_MODI_META_REG_C_7, + MLX5_MODI_OUT_TCP_SEQ_NUM, + MLX5_MODI_IN_TCP_SEQ_NUM, + MLX5_MODI_OUT_TCP_ACK_NUM, + MLX5_MODI_IN_TCP_ACK_NUM = 0x5C, + MLX5_MODI_GTP_TEID = 0x6E, + MLX5_MODI_OUT_IP_ECN = 0x73, + MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75, + MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76, + MLX5_MODI_HASH_RESULT = 0x81, + MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a, + MLX5_MODI_IN_MPLS_LABEL_1, + MLX5_MODI_IN_MPLS_LABEL_2, + MLX5_MODI_IN_MPLS_LABEL_3, + MLX5_MODI_IN_MPLS_LABEL_4, + MLX5_MODI_OUT_IP_PROTOCOL = 0x4A, + MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A, + MLX5_MODI_META_REG_C_8 = 0x8F, + MLX5_MODI_META_REG_C_9 = 0x90, + MLX5_MODI_META_REG_C_10 = 0x91, + MLX5_MODI_META_REG_C_11 = 0x92, + MLX5_MODI_META_REG_C_12 = 0x93, + MLX5_MODI_META_REG_C_13 = 0x94, + MLX5_MODI_META_REG_C_14 = 0x95, + MLX5_MODI_META_REG_C_15 = 0x96, + MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D, + MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E, + MLX5_MODI_OUT_IPV4_IHL = 0x11F, + MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120, + MLX5_MODI_OUT_ESP_SPI = 0x5E, + MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82, + MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126, + MLX5_MODI_INVALID = INT_MAX, +}; + +enum { + MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1, + MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1, + MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1, + MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1, + MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1, +}; + +enum mlx5_ifc_rtc_update_mode { + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0, + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1, +}; + +enum mlx5_ifc_rtc_access_mode { + MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0, + MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1, +}; + +enum mlx5_ifc_rtc_ste_format { + MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4, + MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5, + MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7, +}; + +enum mlx5_ifc_rtc_reparse_mode { + MLX5_IFC_RTC_REPARSE_NEVER = 0x0, + MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1, + MLX5_IFC_RTC_REPARSE_BY_STC = 0x2, +}; + +#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16 + +struct mlx5_ifc_rtc_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x40]; + u8 update_index_mode[0x2]; + u8 reparse_mode[0x2]; + u8 num_match_ste[0x4]; + u8 pd[0x18]; + u8 reserved_at_a0[0x9]; + u8 access_index_mode[0x3]; + u8 num_hash_definer[0x4]; + u8 update_method[0x1]; + u8 reserved_at_b1[0x2]; + u8 log_depth[0x5]; + u8 log_hash_size[0x8]; + u8 ste_format_0[0x8]; + u8 table_type[0x8]; + u8 ste_format_1[0x8]; + u8 reserved_at_d8[0x8]; + u8 match_definer_0[0x20]; + u8 stc_id[0x20]; + u8 ste_table_base_id[0x20]; + u8 ste_table_offset[0x20]; + u8 reserved_at_160[0x8]; + u8 miss_flow_table_id[0x18]; + u8 match_definer_1[0x20]; + u8 reserved_at_1a0[0x260]; +}; + +enum mlx5_ifc_stc_action_type { + MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00, + MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05, + MLX5_IFC_STC_ACTION_TYPE_SET = 0x06, + MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07, + MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08, + MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09, + MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b, + MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c, + MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e, + MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10, + MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11, + MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12, + MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13, + MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14, + MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82, + MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83, + MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85, + MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86, +}; + +enum mlx5_ifc_stc_reparse_mode { + MLX5_IFC_STC_REPARSE_IGNORE = 0x0, + MLX5_IFC_STC_REPARSE_NEVER = 0x1, + MLX5_IFC_STC_REPARSE_ALWAYS = 0x2, +}; + +struct mlx5_ifc_stc_ste_param_ste_table_bits { + u8 ste_obj_id[0x20]; + u8 match_definer_id[0x20]; + u8 reserved_at_40[0x3]; + u8 log_hash_size[0x5]; + u8 reserved_at_48[0x38]; +}; + +struct mlx5_ifc_stc_ste_param_tir_bits { + u8 reserved_at_0[0x8]; + u8 tirn[0x18]; + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_stc_ste_param_table_bits { + u8 reserved_at_0[0x8]; + u8 table_id[0x18]; + u8 reserved_at_20[0x60]; +}; + +struct mlx5_ifc_stc_ste_param_flow_counter_bits { + u8 flow_counter_id[0x20]; +}; + +enum { + MLX5_ASO_CT_NUM_PER_OBJ = 1, + MLX5_ASO_METER_NUM_PER_OBJ = 2, + MLX5_ASO_IPSEC_NUM_PER_OBJ = 1, + MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512, +}; + +struct mlx5_ifc_stc_ste_param_execute_aso_bits { + u8 aso_object_id[0x20]; + u8 return_reg_id[0x4]; + u8 aso_type[0x4]; + u8 reserved_at_28[0x18]; +}; + +struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits { + u8 ipsec_object_id[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits { + u8 ipsec_object_id[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_trailer_bits { + u8 reserved_at_0[0x8]; + u8 command[0x4]; + u8 reserved_at_c[0x2]; + u8 type[0x2]; + u8 reserved_at_10[0xa]; + u8 length[0x6]; +}; + +struct mlx5_ifc_stc_ste_param_header_modify_list_bits { + u8 header_modify_pattern_id[0x20]; + u8 header_modify_argument_id[0x20]; +}; + +enum mlx5_ifc_header_anchors { + MLX5_HEADER_ANCHOR_PACKET_START = 0x0, + MLX5_HEADER_ANCHOR_MAC = 0x1, + MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2, + MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07, + MLX5_HEADER_ANCHOR_ESP = 0x08, + MLX5_HEADER_ANCHOR_TCP_UDP = 0x09, + MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a, + MLX5_HEADER_ANCHOR_INNER_MAC = 0x13, + MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, + MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a, + MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b, + MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c +}; + +struct mlx5_ifc_stc_ste_param_remove_bits { + u8 action_type[0x4]; + u8 decap[0x1]; + u8 reserved_at_5[0x5]; + u8 remove_start_anchor[0x6]; + u8 reserved_at_10[0x2]; + u8 remove_end_anchor[0x6]; + u8 reserved_at_18[0x8]; +}; + +struct mlx5_ifc_stc_ste_param_remove_words_bits { + u8 action_type[0x4]; + u8 reserved_at_4[0x6]; + u8 remove_start_anchor[0x6]; + u8 reserved_at_10[0x1]; + u8 remove_offset[0x7]; + u8 reserved_at_18[0x2]; + u8 remove_size[0x6]; +}; + +struct mlx5_ifc_stc_ste_param_insert_bits { + u8 action_type[0x4]; + u8 encap[0x1]; + u8 inline_data[0x1]; + u8 reserved_at_6[0x4]; + u8 insert_anchor[0x6]; + u8 reserved_at_10[0x1]; + u8 insert_offset[0x7]; + u8 reserved_at_18[0x1]; + u8 insert_size[0x7]; + u8 insert_argument[0x20]; +}; + +struct mlx5_ifc_stc_ste_param_vport_bits { + u8 eswitch_owner_vhca_id[0x10]; + u8 vport_number[0x10]; + u8 eswitch_owner_vhca_id_valid[0x1]; + u8 reserved_at_21[0x5f]; +}; + +union mlx5_ifc_stc_param_bits { + struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table; + struct mlx5_ifc_stc_ste_param_tir_bits tir; + struct mlx5_ifc_stc_ste_param_table_bits table; + struct mlx5_ifc_stc_ste_param_flow_counter_bits counter; + struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header; + struct mlx5_ifc_stc_ste_param_execute_aso_bits aso; + struct mlx5_ifc_stc_ste_param_remove_bits remove_header; + struct mlx5_ifc_stc_ste_param_insert_bits insert_header; + struct mlx5_ifc_set_action_in_bits add; + struct mlx5_ifc_set_action_in_bits set; + struct mlx5_ifc_copy_action_in_bits copy; + struct mlx5_ifc_stc_ste_param_vport_bits vport; + struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt; + struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt; + struct mlx5_ifc_stc_ste_param_trailer_bits trailer; + u8 reserved_at_0[0x80]; +}; + +enum { + MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0), +}; + +struct mlx5_ifc_stc_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x46]; + u8 reparse_mode[0x2]; + u8 table_type[0x8]; + u8 ste_action_offset[0x8]; + u8 action_type[0x8]; + u8 reserved_at_a0[0x60]; + union mlx5_ifc_stc_param_bits stc_param; + u8 reserved_at_180[0x280]; +}; + +struct mlx5_ifc_ste_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x48]; + u8 table_type[0x8]; + u8 reserved_at_90[0x370]; +}; + +struct mlx5_ifc_definer_bits { + u8 modify_field_select[0x40]; + u8 reserved_at_40[0x50]; + u8 format_id[0x10]; + u8 reserved_at_60[0x60]; + u8 format_select_dw3[0x8]; + u8 format_select_dw2[0x8]; + u8 format_select_dw1[0x8]; + u8 format_select_dw0[0x8]; + u8 format_select_dw7[0x8]; + u8 format_select_dw6[0x8]; + u8 format_select_dw5[0x8]; + u8 format_select_dw4[0x8]; + u8 reserved_at_100[0x18]; + u8 format_select_dw8[0x8]; + u8 reserved_at_120[0x20]; + u8 format_select_byte3[0x8]; + u8 format_select_byte2[0x8]; + u8 format_select_byte1[0x8]; + u8 format_select_byte0[0x8]; + u8 format_select_byte7[0x8]; + u8 format_select_byte6[0x8]; + u8 format_select_byte5[0x8]; + u8 format_select_byte4[0x8]; + u8 reserved_at_180[0x40]; + u8 ctrl[0xa0]; + u8 match_mask[0x160]; +}; + +struct mlx5_ifc_arg_bits { + u8 rsvd0[0x88]; + u8 access_pd[0x18]; +}; + +struct mlx5_ifc_header_modify_pattern_in_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 pattern_length[0x8]; + u8 reserved_at_88[0x18]; + + u8 reserved_at_a0[0x60]; + + u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8]; +}; + +struct mlx5_ifc_create_rtc_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_rtc_bits rtc; +}; + +struct mlx5_ifc_create_stc_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_stc_bits stc; +}; + +struct mlx5_ifc_create_ste_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_ste_bits ste; +}; + +struct mlx5_ifc_create_definer_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_definer_bits definer; +}; + +struct mlx5_ifc_create_arg_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_arg_bits arg; +}; + +struct mlx5_ifc_create_header_modify_pattern_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_header_modify_pattern_in_bits pattern; +}; + +struct mlx5_ifc_generate_wqe_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mode[0x10]; + u8 reserved_at_40[0x40]; + u8 reserved_at_80[0x8]; + u8 pdn[0x18]; + u8 reserved_at_a0[0x160]; + u8 wqe_ctrl[0x80]; + u8 wqe_gta_ctrl[0x180]; + u8 wqe_gta_data_0[0x200]; + u8 wqe_gta_data_1[0x200]; +}; + +struct mlx5_ifc_generate_wqe_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x1c0]; + u8 cqe_data[0x200]; +}; + +enum mlx5_access_aso_opc_mod { + ASO_OPC_MOD_IPSEC = 0x0, + ASO_OPC_MOD_CONNECTION_TRACKING = 0x1, + ASO_OPC_MOD_POLICER = 0x2, + ASO_OPC_MOD_RACE_AVOIDANCE = 0x3, + ASO_OPC_MOD_FLOW_HIT = 0x4, +}; + +enum { + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0), + MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1), +}; + +enum { + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0, + MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1, +}; + +struct mlx5_ifc_alloc_packet_reformat_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_packet_reformat_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_dealloc_packet_reformat_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +#endif /* MLX5_PRM_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c new file mode 100644 index 00000000000000..8a011b958b431c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +static void hws_rule_skip(struct mlx5hws_matcher *matcher, + struct mlx5hws_match_template *mt, + u32 flow_source, + bool *skip_rx, bool *skip_tx) +{ + /* By default FDB rules are added to both RX and TX */ + *skip_rx = false; + *skip_tx = false; + + if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) { + *skip_rx = true; + } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) { + *skip_tx = true; + } else { + /* If no flow source was set for current rule, + * check for flow source in matcher attributes. + */ + if (matcher->attr.optimize_flow_src) { + *skip_tx = + matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE; + *skip_rx = + matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT; + return; + } + } +} + +static void +hws_rule_update_copy_tag(struct mlx5hws_rule *rule, + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data, + bool is_jumbo) +{ + struct mlx5hws_rule_match_tag *tag; + + if (!mlx5hws_matcher_is_resizable(rule->matcher)) { + tag = &rule->tag; + } else { + struct mlx5hws_wqe_gta_data_seg_ste *data_seg = + (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg; + tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action; + } + + if (is_jumbo) + memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ); + else + memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ); +} + +static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe, + struct mlx5hws_rule *rule, + struct mlx5hws_match_template *mt, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_table *tbl = matcher->tbl; + bool skip_rx, skip_tx; + + dep_wqe->rule = rule; + dep_wqe->user_data = attr->user_data; + dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ? + attr->rule_idx : 0; + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx); + + if (!skip_rx) { + dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id; + dep_wqe->retry_rtc_0 = matcher->col_matcher ? + matcher->col_matcher->match_ste.rtc_0_id : 0; + } else { + dep_wqe->rtc_0 = 0; + dep_wqe->retry_rtc_0 = 0; + } + + if (!skip_tx) { + dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id; + dep_wqe->retry_rtc_1 = matcher->col_matcher ? + matcher->col_matcher->match_ste.rtc_1_id : 0; + } else { + dep_wqe->rtc_1 = 0; + dep_wqe->retry_rtc_1 = 0; + } + } else { + pr_warn("HWS: invalid tbl->type: %d\n", tbl->type); + } +} + +static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst; + + if (rule->resize_info->rtc_0) { + ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id; + ste_attr->retry_rtc_0 = dst_matcher->col_matcher ? + dst_matcher->col_matcher->match_ste.rtc_0_id : 0; + } + if (rule->resize_info->rtc_1) { + ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id; + ste_attr->retry_rtc_1 = dst_matcher->col_matcher ? + dst_matcher->col_matcher->match_ste.rtc_1_id : 0; + } +} + +static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue, + struct mlx5hws_rule *rule, + bool err, + void *user_data, + enum mlx5hws_rule_status rule_status_on_succ) +{ + enum mlx5hws_flow_op_status comp_status; + + if (!err) { + comp_status = MLX5HWS_FLOW_OP_SUCCESS; + rule->status = rule_status_on_succ; + } else { + comp_status = MLX5HWS_FLOW_OP_ERROR; + rule->status = MLX5HWS_RULE_STATUS_FAILED; + } + + mlx5hws_send_engine_inc_rule(queue); + mlx5hws_send_engine_gen_comp(queue, user_data, comp_status); +} + +static void +hws_rule_save_resize_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr, + bool is_update) +{ + if (!mlx5hws_matcher_is_resizable(rule->matcher)) + return; + + if (likely(!is_update)) { + rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL); + if (unlikely(!rule->resize_info)) { + pr_warn("HWS: resize info isn't allocated for rule\n"); + return; + } + + rule->resize_info->max_stes = + rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes; + rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ? + rule->matcher->action_ste[0].pool : + NULL; + rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ? + rule->matcher->action_ste[1].pool : + NULL; + } + + memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl, + sizeof(rule->resize_info->ctrl_seg)); + memcpy(rule->resize_info->data_seg, ste_attr->wqe_data, + sizeof(rule->resize_info->data_seg)); +} + +void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule) +{ + if (mlx5hws_matcher_is_resizable(rule->matcher) && + rule->resize_info) { + kfree(rule->resize_info); + rule->resize_info = NULL; + } +} + +static void +hws_rule_save_delete_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_match_template *mt = rule->matcher->mt; + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + + if (mlx5hws_matcher_is_resizable(rule->matcher)) + return; + + if (is_jumbo) + memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ); + else + memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ); +} + +static void +hws_rule_clear_delete_info(struct mlx5hws_rule *rule) +{ + /* nothing to do here */ +} + +static void +hws_rule_load_delete_info(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr) +{ + if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) { + ste_attr->wqe_tag = &rule->tag; + } else { + struct mlx5hws_wqe_gta_data_seg_ste *data_seg = + (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg; + struct mlx5hws_rule_match_tag *tag = + (struct mlx5hws_rule_match_tag *)(void *)data_seg->action; + ste_attr->wqe_tag = tag; + } +} + +static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule, + u8 action_ste_selector) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_matcher_action_ste *action_ste; + struct mlx5hws_pool_chunk ste = {0}; + int ret; + + action_ste = &matcher->action_ste[action_ste_selector]; + ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes)); + ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste); + if (unlikely(ret)) { + mlx5hws_err(matcher->tbl->ctx, + "Failed to allocate STE for rule actions"); + return ret; + } + rule->action_ste_idx = ste.offset; + + return 0; +} + +static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule, + u8 action_ste_selector) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_pool_chunk ste = {0}; + struct mlx5hws_pool *pool; + u8 max_stes; + + if (mlx5hws_matcher_is_resizable(matcher)) { + /* Free the original action pool if rule was resized */ + max_stes = rule->resize_info->max_stes; + pool = rule->resize_info->action_ste_pool[action_ste_selector]; + } else { + max_stes = matcher->action_ste[action_ste_selector].max_stes; + pool = matcher->action_ste[action_ste_selector].pool; + } + + /* This release is safe only when the rule match part was deleted */ + ste.order = ilog2(roundup_pow_of_two(max_stes)); + ste.offset = rule->action_ste_idx; + + mlx5hws_pool_chunk_free(pool, &ste); +} + +static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + int action_ste_idx; + int ret; + + ret = hws_rule_alloc_action_ste_idx(rule, 0); + if (unlikely(ret)) + return ret; + + action_ste_idx = rule->action_ste_idx; + + ret = hws_rule_alloc_action_ste_idx(rule, 1); + if (unlikely(ret)) { + hws_rule_free_action_ste_idx(rule, 0); + return ret; + } + + /* Both pools have to return the same index */ + if (unlikely(rule->action_ste_idx != action_ste_idx)) { + pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n"); + return -EINVAL; + } + + return 0; +} + +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule) +{ + if (rule->action_ste_idx > -1) { + hws_rule_free_action_ste_idx(rule, 1); + hws_rule_free_action_ste_idx(rule, 0); + } +} + +static void hws_rule_create_init(struct mlx5hws_rule *rule, + struct mlx5hws_send_ste_attr *ste_attr, + struct mlx5hws_actions_apply_data *apply, + bool is_update) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + + /* Init rule before reuse */ + if (!is_update) { + /* In update we use these rtc's */ + rule->rtc_0 = 0; + rule->rtc_1 = 0; + rule->action_ste_selector = 0; + } else { + rule->action_ste_selector = !rule->action_ste_selector; + } + + rule->pending_wqes = 0; + rule->action_ste_idx = -1; + rule->status = MLX5HWS_RULE_STATUS_CREATING; + + /* Init default send STE attributes */ + ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + + /* Init default action apply */ + apply->tbl_type = tbl->type; + apply->common_res = &ctx->common_res[tbl->type]; + apply->jump_to_action_stc = matcher->action_ste[0].stc.offset; + apply->require_dep = 0; +} + +static void hws_rule_move_init(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + /* Save the old RTC IDs to be later used in match STE delete */ + rule->resize_info->rtc_0 = rule->rtc_0; + rule->resize_info->rtc_1 = rule->rtc_1; + rule->resize_info->rule_idx = attr->rule_idx; + + rule->rtc_0 = 0; + rule->rtc_1 = 0; + + rule->pending_wqes = 0; + rule->action_ste_idx = -1; + rule->action_ste_selector = 0; + rule->status = MLX5HWS_RULE_STATUS_CREATING; + rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING; +} + +bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule) +{ + return mlx5hws_matcher_is_in_resize(rule->matcher) && + rule->resize_info && + rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE; +} + +static int hws_rule_create_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[]) +{ + struct mlx5hws_action_template *at = &rule->matcher->at[at_idx]; + struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx]; + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + struct mlx5hws_actions_wqe_setter *setter; + struct mlx5hws_actions_apply_data apply; + struct mlx5hws_send_engine *queue; + u8 total_stes, action_stes; + bool is_update; + int i, ret; + + is_update = !match_param; + + setter = &at->setters[at->num_of_action_stes]; + total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term); + action_stes = total_stes - 1; + + queue = &ctx->send_queue[attr->queue_id]; + if (unlikely(mlx5hws_send_engine_err(queue))) + return -EIO; + + hws_rule_create_init(rule, &ste_attr, &apply, is_update); + + /* Allocate dependent match WQE since rule might have dependent writes. + * The queued dependent WQE can be later aborted or kept as a dependency. + * dep_wqe buffers (ctrl, data) are also reused for all STE writes. + */ + dep_wqe = mlx5hws_send_add_new_dep_wqe(queue); + hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr); + + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; + apply.wqe_ctrl = &dep_wqe->wqe_ctrl; + apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data; + apply.rule_action = rule_actions; + apply.queue = queue; + + if (action_stes) { + /* Allocate action STEs for rules that need more than match STE */ + if (!is_update) { + ret = hws_rule_alloc_action_ste(rule, attr); + if (ret) { + mlx5hws_err(ctx, "Failed to allocate action memory %d", ret); + mlx5hws_send_abort_new_dep_wqe(queue); + return ret; + } + } + /* Skip RX/TX based on the dep_wqe init */ + ste_attr.rtc_0 = dep_wqe->rtc_0 ? + matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0; + ste_attr.rtc_1 = dep_wqe->rtc_1 ? + matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0; + /* Action STEs are written to a specific index last to first */ + ste_attr.direct_index = rule->action_ste_idx + action_stes; + apply.next_direct_idx = ste_attr.direct_index; + } else { + apply.next_direct_idx = 0; + } + + for (i = total_stes; i-- > 0;) { + mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo); + + if (i == 0) { + /* Handle last match STE. + * For hash split / linear lookup RTCs, packets reaching any STE + * will always match and perform the specified actions, which + * makes the tag irrelevant. + */ + if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update)) + mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz, + (u8 *)dep_wqe->wqe_data.action); + else if (is_update) + hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo); + + /* Rule has dependent WQEs, match dep_wqe is queued */ + if (action_stes || apply.require_dep) + break; + + /* Rule has no dependencies, abort dep_wqe and send WQE now */ + mlx5hws_send_abort_new_dep_wqe(queue); + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = dep_wqe->user_data; + ste_attr.send_attr.rule = dep_wqe->rule; + ste_attr.rtc_0 = dep_wqe->rtc_0; + ste_attr.rtc_1 = dep_wqe->rtc_1; + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; + ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; + ste_attr.direct_index = dep_wqe->direct_index; + } else { + apply.next_direct_idx = --ste_attr.direct_index; + } + + mlx5hws_send_ste(queue, &ste_attr); + } + + /* Backup TAG on the rule for deletion and resize info for + * moving rules to a new matcher, only after insertion. + */ + if (!is_update) + hws_rule_save_delete_info(rule, &ste_attr); + + hws_rule_save_resize_info(rule, &ste_attr, is_update); + mlx5hws_send_engine_inc_rule(queue); + + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + return 0; +} + +static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_send_engine *queue; + + queue = &ctx->send_queue[attr->queue_id]; + + hws_rule_gen_comp(queue, rule, false, + attr->user_data, MLX5HWS_RULE_STATUS_DELETED); + + /* Rule failed now we can safely release action STEs */ + mlx5hws_rule_free_action_ste(rule); + + /* Clear complex tag */ + hws_rule_clear_delete_info(rule); + + /* Clear info that was saved for resizing */ + mlx5hws_rule_clear_resize_info(rule); + + /* If a rule that was indicated as burst (need to trigger HW) has failed + * insertion we won't ring the HW as nothing is being written to the WQ. + * In such case update the last WQE and ring the HW with that work + */ + if (attr->burst) + return; + + mlx5hws_send_all_dep_wqe(queue); + mlx5hws_send_engine_flush_queue(queue); +} + +static int hws_rule_destroy_hws(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0}; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + + queue = &ctx->send_queue[attr->queue_id]; + + if (unlikely(mlx5hws_send_engine_err(queue))) { + hws_rule_destroy_failed_hws(rule, attr); + return 0; + } + + /* Rule is not completed yet */ + if (rule->status == MLX5HWS_RULE_STATUS_CREATING) + return -EBUSY; + + /* Rule failed and doesn't require cleanup */ + if (rule->status == MLX5HWS_RULE_STATUS_FAILED) { + hws_rule_destroy_failed_hws(rule, attr); + return 0; + } + + if (rule->skip_delete) { + /* Rule shouldn't be deleted in HW. + * Generate completion as if write succeeded, and we can + * safely release action STEs and clear resize info. + */ + hws_rule_gen_comp(queue, rule, false, + attr->user_data, MLX5HWS_RULE_STATUS_DELETED); + + mlx5hws_rule_free_action_ste(rule); + mlx5hws_rule_clear_resize_info(rule); + return 0; + } + + mlx5hws_send_engine_inc_rule(queue); + + /* Send dependent WQE */ + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + rule->status = MLX5HWS_RULE_STATUS_DELETING; + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = attr->user_data; + + ste_attr.rtc_0 = rule->rtc_0; + ste_attr.rtc_1 = rule->rtc_1; + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.wqe_ctrl = &wqe_ctrl; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE; + if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher))) + ste_attr.direct_index = attr->rule_idx; + + hws_rule_load_delete_info(rule, &ste_attr); + mlx5hws_send_ste(queue, &ste_attr); + hws_rule_clear_delete_info(rule); + + return 0; +} + +static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + + if (unlikely(!attr->user_data)) + return -EINVAL; + + /* Check if there is room in queue */ + if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id]))) + return -EBUSY; + + return 0; +} + +static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED)) + return -EINVAL; + + return hws_rule_enqueue_precheck(rule, attr); +} + +static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher))) + /* Matcher in resize - new rules are not allowed */ + return -EAGAIN; + + return hws_rule_enqueue_precheck(rule, attr); +} + +static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + struct mlx5hws_matcher *matcher = rule->matcher; + + if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) && + !matcher->attr.optimize_using_rule_idx && + !mlx5hws_matcher_is_insert_by_idx(matcher))) { + return -EOPNOTSUPP; + } + + if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED)) + return -EBUSY; + + return hws_rule_enqueue_precheck_create(rule, attr); +} + +int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, + void *queue_ptr, + void *user_data) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0}; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_send_engine *queue = queue_ptr; + struct mlx5hws_send_ste_attr ste_attr = {0}; + + mlx5hws_send_all_dep_wqe(queue); + + rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING; + + ste_attr.send_attr.fence = 0; + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.notify_hw = 1; + ste_attr.send_attr.user_data = user_data; + ste_attr.rtc_0 = rule->resize_info->rtc_0; + ste_attr.rtc_1 = rule->resize_info->rtc_1; + ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0; + ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1; + ste_attr.wqe_ctrl = &empty_wqe_ctrl; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE; + + if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher))) + ste_attr.direct_index = rule->resize_info->rule_idx; + + hws_rule_load_delete_info(rule, &ste_attr); + mlx5hws_send_ste(queue, &ste_attr); + + return 0; +} + +int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt); + struct mlx5hws_context *ctx = rule->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = rule->matcher; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_engine *queue; + int ret; + + ret = hws_rule_enqueue_precheck_move(rule, attr); + if (unlikely(ret)) + return ret; + + queue = &ctx->send_queue[attr->queue_id]; + + ret = mlx5hws_send_engine_err(queue); + if (ret) + return ret; + + hws_rule_move_init(rule, attr); + hws_rule_move_get_rtc(rule, &ste_attr); + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + ste_attr.wqe_tag_is_jumbo = is_jumbo; + + ste_attr.send_attr.rule = rule; + ste_attr.send_attr.fence = 0; + ste_attr.send_attr.notify_hw = !attr->burst; + ste_attr.send_attr.user_data = attr->user_data; + + ste_attr.used_id_rtc_0 = &rule->rtc_0; + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg; + ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg; + ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ? + attr->rule_idx : 0; + + mlx5hws_send_ste(queue, &ste_attr); + mlx5hws_send_engine_inc_rule(queue); + + if (!attr->burst) + mlx5hws_send_all_dep_wqe(queue); + + return 0; +} + +int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, + u8 mt_idx, + u32 *match_param, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr, + struct mlx5hws_rule *rule_handle) +{ + int ret; + + rule_handle->matcher = matcher; + + ret = hws_rule_enqueue_precheck_create(rule_handle, attr); + if (unlikely(ret)) + return ret; + + if (unlikely(!(matcher->num_of_mt >= mt_idx) || + !(matcher->num_of_at >= at_idx) || + !match_param)) { + pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n"); + return -EINVAL; + } + + ret = hws_rule_create_hws(rule_handle, + attr, + mt_idx, + match_param, + at_idx, + rule_actions); + + return ret; +} + +int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr) +{ + int ret; + + ret = hws_rule_enqueue_precheck(rule, attr); + if (unlikely(ret)) + return ret; + + ret = hws_rule_destroy_hws(rule, attr); + + return ret; +} + +int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, + u8 at_idx, + struct mlx5hws_rule_action rule_actions[], + struct mlx5hws_rule_attr *attr) +{ + int ret; + + ret = hws_rule_enqueue_precheck_update(rule, attr); + if (unlikely(ret)) + return ret; + + ret = hws_rule_create_hws(rule, + attr, + 0, + NULL, + at_idx, + rule_actions); + + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h new file mode 100644 index 00000000000000..495cdd17e9f3cb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_RULE_H_ +#define MLX5HWS_RULE_H_ + +enum { + MLX5HWS_STE_CTRL_SZ = 20, + MLX5HWS_ACTIONS_SZ = 12, + MLX5HWS_MATCH_TAG_SZ = 32, + MLX5HWS_JUMBO_TAG_SZ = 44, +}; + +enum mlx5hws_rule_status { + MLX5HWS_RULE_STATUS_UNKNOWN, + MLX5HWS_RULE_STATUS_CREATING, + MLX5HWS_RULE_STATUS_CREATED, + MLX5HWS_RULE_STATUS_DELETING, + MLX5HWS_RULE_STATUS_DELETED, + MLX5HWS_RULE_STATUS_FAILING, + MLX5HWS_RULE_STATUS_FAILED, +}; + +enum mlx5hws_rule_move_state { + MLX5HWS_RULE_RESIZE_STATE_IDLE, + MLX5HWS_RULE_RESIZE_STATE_WRITING, + MLX5HWS_RULE_RESIZE_STATE_DELETING, +}; + +enum mlx5hws_rule_jumbo_match_tag_offset { + MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8, +}; + +struct mlx5hws_rule_match_tag { + union { + u8 jumbo[MLX5HWS_JUMBO_TAG_SZ]; + struct { + u8 reserved[MLX5HWS_ACTIONS_SZ]; + u8 match[MLX5HWS_MATCH_TAG_SZ]; + }; + }; +}; + +struct mlx5hws_rule_resize_info { + struct mlx5hws_pool *action_ste_pool[2]; + u32 rtc_0; + u32 rtc_1; + u32 rule_idx; + u8 state; + u8 max_stes; + u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */ + u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */ +}; + +struct mlx5hws_rule { + struct mlx5hws_matcher *matcher; + union { + struct mlx5hws_rule_match_tag tag; + struct mlx5hws_rule_resize_info *resize_info; + }; + u32 rtc_0; /* The RTC into which the STE was inserted */ + u32 rtc_1; /* The RTC into which the STE was inserted */ + int action_ste_idx; /* STE array index */ + u8 status; /* enum mlx5hws_rule_status */ + u8 action_ste_selector; /* For rule update - which action STE is in use */ + u8 pending_wqes; + bool skip_delete; /* For complex rules - another rule with same tag + * still exists, so don't actually delete this rule. + */ +}; + +void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule); + +int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, + void *queue, void *user_data); + +int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule, + struct mlx5hws_rule_attr *attr); + +bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule); + +void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule); + +#endif /* MLX5HWS_RULE_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c new file mode 100644 index 00000000000000..0c7989184c3071 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c @@ -0,0 +1,1215 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" +#include "lib/clock.h" + +enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 }; + +struct mlx5hws_send_ring_dep_wqe * +mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1); + + memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ); + + return &send_sq->dep_wqe[idx]; +} + +void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue) +{ + queue->send_ring.send_sq.head_dep_idx--; +} + +void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + struct mlx5hws_send_ste_attr ste_attr = {0}; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + + ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA; + ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE; + + /* Fence first from previous depend WQEs */ + ste_attr.send_attr.fence = 1; + + while (send_sq->head_dep_idx != send_sq->tail_dep_idx) { + dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)]; + + /* Notify HW on the last WQE */ + ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx); + ste_attr.send_attr.user_data = dep_wqe->user_data; + ste_attr.send_attr.rule = dep_wqe->rule; + + ste_attr.rtc_0 = dep_wqe->rtc_0; + ste_attr.rtc_1 = dep_wqe->rtc_1; + ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; + ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; + ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0; + ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1; + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; + ste_attr.direct_index = dep_wqe->direct_index; + + mlx5hws_send_ste(queue, &ste_attr); + + /* Fencing is done only on the first WQE */ + ste_attr.send_attr.fence = 0; + } +} + +struct mlx5hws_send_engine_post_ctrl +mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_engine_post_ctrl ctrl; + + ctrl.queue = queue; + /* Currently only one send ring is supported */ + ctrl.send_ring = &queue->send_ring; + ctrl.num_wqebbs = 0; + + return ctrl; +} + +void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl, + char **buf, size_t *len) +{ + struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq; + unsigned int idx; + + idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask; + + /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used + * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB + * can be on 2 different kernel memory pages. + */ + *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + *len = MLX5_SEND_WQE_BB; + + if (!ctrl->num_wqebbs) { + *buf += sizeof(struct mlx5hws_wqe_ctrl_seg); + *len -= sizeof(struct mlx5hws_wqe_ctrl_seg); + } + + ctrl->num_wqebbs++; +} + +static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_wqe_ctrl_seg *doorbell_cseg) +{ + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + *sq->wq.db = cpu_to_be32(sq->cur_post); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map); + + /* Ensure doorbell is written on uar_page before poll_cq */ + WRITE_ONCE(doorbell_cseg, NULL); +} + +static void +hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data, + struct mlx5hws_rule_match_tag *tag, + bool is_jumbo) +{ + if (is_jumbo) { + /* Clear previous possibly dirty control */ + memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ); + memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ); + } else { + /* Clear previous possibly dirty control and actions */ + memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ); + memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ); + } +} + +void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl, + struct mlx5hws_send_engine_post_attr *attr) +{ + struct mlx5hws_wqe_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_ring_sq *sq; + unsigned int idx; + u32 flags = 0; + + sq = &ctrl->send_ring->send_sq; + idx = sq->cur_post & sq->buf_mask; + sq->last_idx = idx; + + wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx); + + wqe_ctrl->opmod_idx_opcode = + cpu_to_be32((attr->opmod << 24) | + ((sq->cur_post & 0xffff) << 8) | + attr->opcode); + wqe_ctrl->qpn_ds = + cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 | + sq->sqn << 8); + wqe_ctrl->imm = cpu_to_be32(attr->id); + + flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0; + flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0; + wqe_ctrl->flags = cpu_to_be32(flags); + + sq->wr_priv[idx].id = attr->id; + sq->wr_priv[idx].retry_id = attr->retry_id; + + sq->wr_priv[idx].rule = attr->rule; + sq->wr_priv[idx].user_data = attr->user_data; + sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs; + + if (attr->rule) { + sq->wr_priv[idx].rule->pending_wqes++; + sq->wr_priv[idx].used_id = attr->used_id; + } + + sq->cur_post += ctrl->num_wqebbs; + + if (attr->notify_hw) + hws_send_engine_post_ring(sq, wqe_ctrl); +} + +static void hws_send_wqe(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_engine_post_attr *send_attr, + struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl, + void *send_wqe_data, + void *send_wqe_tag, + bool is_jumbo, + u8 gta_opcode, + u32 direct_index) +{ + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_engine_post_ctrl ctrl; + size_t wqe_len; + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len); + + wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index); + memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix, + sizeof(send_wqe_ctrl->stc_ix)); + + if (send_wqe_data) + memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data)); + else + hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo); + + mlx5hws_send_engine_post_end(&ctrl, send_attr); +} + +void mlx5hws_send_ste(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr; + u8 notify_hw = send_attr->notify_hw; + u8 fence = send_attr->fence; + + if (ste_attr->rtc_1) { + send_attr->id = ste_attr->rtc_1; + send_attr->used_id = ste_attr->used_id_rtc_1; + send_attr->retry_id = ste_attr->retry_rtc_1; + send_attr->fence = fence; + send_attr->notify_hw = notify_hw && !ste_attr->rtc_0; + hws_send_wqe(queue, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode, + ste_attr->direct_index); + } + + if (ste_attr->rtc_0) { + send_attr->id = ste_attr->rtc_0; + send_attr->used_id = ste_attr->used_id_rtc_0; + send_attr->retry_id = ste_attr->retry_rtc_0; + send_attr->fence = fence && !ste_attr->rtc_1; + send_attr->notify_hw = notify_hw; + hws_send_wqe(queue, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode, + ste_attr->direct_index); + } + + /* Restore to original requested values */ + send_attr->notify_hw = notify_hw; + send_attr->fence = fence; +} + +static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + u16 wqe_cnt) +{ + struct mlx5hws_send_engine_post_attr send_attr = {0}; + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_send_engine_post_ctrl ctrl; + struct mlx5hws_send_ring_sq *send_sq; + unsigned int idx; + size_t wqe_len; + char *p; + + send_attr.rule = priv->rule; + send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS; + send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE; + send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg); + send_attr.notify_hw = 1; + send_attr.fence = 0; + send_attr.user_data = priv->user_data; + send_attr.id = priv->retry_id; + send_attr.used_id = priv->used_id; + + ctrl = mlx5hws_send_engine_post_start(queue); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len); + mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len); + + send_sq = &ctrl.send_ring->send_sq; + idx = wqe_cnt & send_sq->buf_mask; + p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + + /* Copy old gta ctrl */ + memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg), + MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg)); + + idx = (wqe_cnt + 1) & send_sq->buf_mask; + p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx); + + /* Copy old gta data */ + memcpy(wqe_data, p, MLX5_SEND_WQE_BB); + + mlx5hws_send_engine_post_end(&ctrl, &send_attr); +} + +void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq; + struct mlx5hws_wqe_ctrl_seg *wqe_ctrl; + + wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx); + wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE); + + hws_send_engine_post_ring(sq, wqe_ctrl); +} + +static void +hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + enum mlx5hws_flow_op_status *status) +{ + switch (priv->rule->resize_info->state) { + case MLX5HWS_RULE_RESIZE_STATE_WRITING: + if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) { + /* Backup original RTCs */ + u32 orig_rtc_0 = priv->rule->resize_info->rtc_0; + u32 orig_rtc_1 = priv->rule->resize_info->rtc_1; + + /* Delete partially failed move rule using resize_info */ + priv->rule->resize_info->rtc_0 = priv->rule->rtc_0; + priv->rule->resize_info->rtc_1 = priv->rule->rtc_1; + + /* Move rule to original RTC for future delete */ + priv->rule->rtc_0 = orig_rtc_0; + priv->rule->rtc_1 = orig_rtc_1; + } + /* Clean leftovers */ + mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data); + break; + + case MLX5HWS_RULE_RESIZE_STATE_DELETING: + if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) { + *status = MLX5HWS_FLOW_OP_ERROR; + } else { + *status = MLX5HWS_FLOW_OP_SUCCESS; + priv->rule->matcher = priv->rule->matcher->resize_dst; + } + priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE; + priv->rule->status = MLX5HWS_RULE_STATUS_CREATED; + break; + + default: + break; + } +} + +static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + u16 wqe_cnt, + enum mlx5hws_flow_op_status *status) +{ + priv->rule->pending_wqes--; + + if (*status == MLX5HWS_FLOW_OP_ERROR) { + if (priv->retry_id) { + hws_send_engine_retry_post_send(queue, priv, wqe_cnt); + return; + } + /* Some part of the rule failed */ + priv->rule->status = MLX5HWS_RULE_STATUS_FAILING; + *priv->used_id = 0; + } else { + *priv->used_id = priv->id; + } + + /* Update rule status for the last completion */ + if (!priv->rule->pending_wqes) { + if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) { + hws_send_engine_update_rule_resize(queue, priv, status); + return; + } + + if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) { + /* Rule completely failed and doesn't require cleanup */ + if (!priv->rule->rtc_0 && !priv->rule->rtc_1) + priv->rule->status = MLX5HWS_RULE_STATUS_FAILED; + + *status = MLX5HWS_FLOW_OP_ERROR; + } else { + /* Increase the status, this only works on good flow as the enum + * is arrange it away creating -> created -> deleting -> deleted + */ + priv->rule->status++; + *status = MLX5HWS_FLOW_OP_SUCCESS; + /* Rule was deleted now we can safely release action STEs + * and clear resize info + */ + if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) { + mlx5hws_rule_free_action_ste(priv->rule); + mlx5hws_rule_clear_resize_info(priv->rule); + } + } + } +} + +static void hws_send_engine_update(struct mlx5hws_send_engine *queue, + struct mlx5_cqe64 *cqe, + struct mlx5hws_send_ring_priv *priv, + struct mlx5hws_flow_op_result res[], + s64 *i, + u32 res_nb, + u16 wqe_cnt) +{ + enum mlx5hws_flow_op_status status; + + if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) && + likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) { + status = MLX5HWS_FLOW_OP_SUCCESS; + } else { + status = MLX5HWS_FLOW_OP_ERROR; + } + + if (priv->user_data) { + if (priv->rule) { + hws_send_engine_update_rule(queue, priv, wqe_cnt, &status); + /* Completion is provided on the last rule WQE */ + if (priv->rule->pending_wqes) + return; + } + + if (*i < res_nb) { + res[*i].user_data = priv->user_data; + res[*i].status = status; + (*i)++; + mlx5hws_send_engine_dec_rule(queue); + } else { + mlx5hws_send_engine_gen_comp(queue, priv->user_data, status); + } + } +} + +static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq, + struct mlx5_cqe64 *cqe64) +{ + if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) { + struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64; + + mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64)); + mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd); + mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, + 16, 1, err_cqe, + sizeof(*err_cqe), false); + return CQ_POLL_ERR; + } + + return CQ_OK; +} + +static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq) +{ + struct mlx5_cqe64 *cqe64; + int err; + + cqe64 = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe64) { + if (unlikely(cq->mdev->state == + MLX5_DEVICE_STATE_INTERNAL_ERROR)) { + mlx5_core_dbg_once(cq->mdev, + "Polling CQ while device is shutting down\n"); + return CQ_POLL_ERR; + } + return CQ_EMPTY; + } + + mlx5_cqwq_pop(&cq->wq); + err = mlx5hws_parse_cqe(cq, cqe64); + mlx5_cqwq_update_db_record(&cq->wq); + + return err; +} + +static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + s64 *polled, + u32 res_nb) +{ + struct mlx5hws_send_ring *send_ring = &queue->send_ring; + struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; + struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq; + struct mlx5hws_send_ring_priv *priv; + struct mlx5_cqe64 *cqe; + u8 cqe_opcode; + u16 wqe_cnt; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return; + + cqe_opcode = get_cqe_opcode(cqe); + if (cqe_opcode == MLX5_CQE_INVALID) + return; + + if (unlikely(cqe_opcode != MLX5_CQE_REQ)) + queue->err = true; + + wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask; + + while (cq->poll_wqe != wqe_cnt) { + priv = &sq->wr_priv[cq->poll_wqe]; + hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0); + cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask; + } + + priv = &sq->wr_priv[wqe_cnt]; + cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask; + hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt); + mlx5hws_cq_poll_one(cq); +} + +static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + s64 *polled, + u32 res_nb) +{ + struct mlx5hws_completed_poll *comp = &queue->completed; + + while (comp->ci != comp->pi) { + if (*polled < res_nb) { + res[*polled].status = + comp->entries[comp->ci].status; + res[*polled].user_data = + comp->entries[comp->ci].user_data; + (*polled)++; + comp->ci = (comp->ci + 1) & comp->mask; + mlx5hws_send_engine_dec_rule(queue); + } else { + return; + } + } +} + +static int hws_send_engine_poll(struct mlx5hws_send_engine *queue, + struct mlx5hws_flow_op_result res[], + u32 res_nb) +{ + s64 polled = 0; + + hws_send_engine_poll_list(queue, res, &polled, res_nb); + + if (polled >= res_nb) + return polled; + + hws_send_engine_poll_cq(queue, res, &polled, res_nb); + + return polled; +} + +int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + struct mlx5hws_flow_op_result res[], + u32 res_nb) +{ + return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb); +} + +static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev, + int numa_node, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + void *sqc_data) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wq_param param; + size_t buf_sz; + int err; + + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; + sq->mdev = mdev; + + param.db_numa_node = numa_node; + param.buf_numa_node = numa_node; + err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl); + if (err) + return err; + wq->db = &wq->db[MLX5_SND_DBR]; + + buf_sz = queue->num_entries * MAX_WQES_PER_RULE; + sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL); + if (!sq->dep_wqe) { + err = -ENOMEM; + goto destroy_wq_cyc; + } + + sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL); + if (!sq->wr_priv) { + err = -ENOMEM; + goto free_dep_wqe; + } + + sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1; + + return 0; + +free_dep_wqe: + kfree(sq->dep_wqe); +destroy_wq_cyc: + mlx5_wq_destroy(&sq->wq_ctrl); + return err; +} + +static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq) +{ + if (!sq) + return; + kfree(sq->wr_priv); + kfree(sq->dep_wqe); + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn, + void *sqc_data, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + void *in, *sqc, *wq; + int inlen, err; + u8 ts_format; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc)); + MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn); + + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + MLX5_SET(sqc, sqc, ts_format, ts_format); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev, + struct mlx5hws_send_ring_sq *sq) +{ + mlx5_core_destroy_sq(mdev, sq->sqn); +} + +static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn) +{ + void *in, *sqc; + int inlen, err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + err = mlx5_core_modify_sq(mdev, sqn, in); + + kvfree(in); + + return err; +} + +static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq) +{ + mlx5_core_destroy_sq(sq->mdev, sq->sqn); + mlx5_wq_destroy(&sq->wq_ctrl); + kfree(sq->wr_priv); + kfree(sq->dep_wqe); +} + +static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn, + void *sqc_data, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + int err; + + err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq); + if (err) + return err; + + err = hws_send_ring_set_sq_rdy(mdev, sq->sqn); + if (err) + hws_send_ring_destroy_sq(mdev, sq); + + return err; +} + +static int hws_send_ring_open_sq(struct mlx5hws_context *ctx, + int numa_node, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_sq *sq, + struct mlx5hws_send_ring_cq *cq) +{ + size_t buf_sz, sq_log_buf_sz; + void *sqc_data, *wq; + int err; + + sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL); + if (!sqc_data) + return -ENOMEM; + + buf_sz = queue->num_entries * MAX_WQES_PER_RULE; + sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz)); + + wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, ctx->pd_num); + MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz); + + err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data); + if (err) + goto err_free_sqc; + + err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data, + queue, sq, cq); + if (err) + goto err_free_sq; + + kvfree(sqc_data); + + return 0; +err_free_sq: + hws_send_ring_free_sq(sq); +err_free_sqc: + kvfree(sqc_data); + return err; +} + +static void hws_cq_complete(struct mlx5_core_cq *mcq, + struct mlx5_eqe *eqe) +{ + pr_err("CQ completion CQ: #%u\n", mcq->cqn); +} + +static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev, + int numa_node, + struct mlx5hws_send_engine *queue, + void *cqc_data, + struct mlx5hws_send_ring_cq *cq) +{ + struct mlx5_core_cq *mcq = &cq->mcq; + struct mlx5_wq_param param; + struct mlx5_cqe64 *cqe; + int err; + u32 i; + + param.buf_numa_node = numa_node; + param.db_numa_node = numa_node; + + err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl); + if (err) + return err; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + mcq->comp = hws_cq_complete; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + cqe->op_own = 0xf1; + } + + cq->mdev = mdev; + + return 0; +} + +static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev, + struct mlx5hws_send_engine *queue, + void *cqc_data, + struct mlx5hws_send_ring_cq *cq) +{ + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_core_cq *mcq = &cq->mcq; + void *in, *cqc; + int inlen, eqn; + int err; + + err = mlx5_comp_eqn_get(mdev, 0, &eqn); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); + + kvfree(in); + + return err; +} + +static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev, + struct mlx5hws_send_engine *queue, + int numa_node, + struct mlx5hws_send_ring_cq *cq) +{ + void *cqc_data; + int err; + + cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); + if (!cqc_data) + return -ENOMEM; + + MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries); + MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries)); + + err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq); + if (err) + goto err_out; + + err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq); + if (err) + goto err_free_cq; + + kvfree(cqc_data); + + return 0; + +err_free_cq: + mlx5_wq_destroy(&cq->wq_ctrl); +err_out: + kvfree(cqc_data); + return err; +} + +static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq) +{ + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static void hws_send_ring_close(struct mlx5hws_send_engine *queue) +{ + hws_send_ring_close_sq(&queue->send_ring.send_sq); + hws_send_ring_close_cq(&queue->send_ring.send_cq); +} + +static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue) +{ + int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev)); + struct mlx5hws_send_ring *ring = &queue->send_ring; + int err; + + err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); + if (err) + return err; + + err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq, + &ring->send_cq); + if (err) + goto close_cq; + + return err; + +close_cq: + hws_send_ring_close_cq(&ring->send_cq); + return err; +} + +void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) +{ + hws_send_ring_close(queue); + kfree(queue->completed.entries); +} + +int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size) +{ + int err; + + mutex_init(&queue->lock); + + queue->num_entries = roundup_pow_of_two(queue_size); + queue->used_entries = 0; + + queue->completed.entries = kcalloc(queue->num_entries, + sizeof(queue->completed.entries[0]), + GFP_KERNEL); + if (!queue->completed.entries) + return -ENOMEM; + + queue->completed.pi = 0; + queue->completed.ci = 0; + queue->completed.mask = queue->num_entries - 1; + err = mlx5hws_send_ring_open(ctx, queue); + if (err) + goto free_completed_entries; + + return 0; + +free_completed_entries: + kfree(queue->completed.entries); + return err; +} + +static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues) +{ + while (queues--) + mlx5hws_send_queue_close(&ctx->send_queue[queues]); +} + +static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx) +{ + int bwc_queues = ctx->queues - 1; + int i; + + if (!mlx5hws_context_bwc_supported(ctx)) + return; + + for (i = 0; i < bwc_queues; i++) + mutex_destroy(&ctx->bwc_send_queue_locks[i]); + kfree(ctx->bwc_send_queue_locks); +} + +void mlx5hws_send_queues_close(struct mlx5hws_context *ctx) +{ + hws_send_queues_bwc_locks_destroy(ctx); + __hws_send_queues_close(ctx, ctx->queues); + kfree(ctx->send_queue); +} + +static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx) +{ + /* Number of BWC queues is equal to number of the usual HWS queues */ + int bwc_queues = ctx->queues - 1; + int i; + + if (!mlx5hws_context_bwc_supported(ctx)) + return 0; + + ctx->queues += bwc_queues; + + ctx->bwc_send_queue_locks = kcalloc(bwc_queues, + sizeof(*ctx->bwc_send_queue_locks), + GFP_KERNEL); + + if (!ctx->bwc_send_queue_locks) + return -ENOMEM; + + for (i = 0; i < bwc_queues; i++) + mutex_init(&ctx->bwc_send_queue_locks[i]); + + return 0; +} + +int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size) +{ + int err = 0; + u32 i; + + /* Open one extra queue for control path */ + ctx->queues = queues + 1; + + /* open a separate set of queues and locks for bwc API */ + err = hws_bwc_send_queues_init(ctx); + if (err) + return err; + + ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL); + if (!ctx->send_queue) { + err = -ENOMEM; + goto free_bwc_locks; + } + + for (i = 0; i < ctx->queues; i++) { + err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size); + if (err) + goto close_send_queues; + } + + return 0; + +close_send_queues: + __hws_send_queues_close(ctx, i); + + kfree(ctx->send_queue); + +free_bwc_locks: + hws_send_queues_bwc_locks_destroy(ctx); + + return err; +} + +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions) +{ + struct mlx5hws_send_ring_sq *send_sq; + struct mlx5hws_send_engine *queue; + bool wait_comp = false; + s64 polled = 0; + + queue = &ctx->send_queue[queue_id]; + send_sq = &queue->send_ring.send_sq; + + switch (actions) { + case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC: + wait_comp = true; + fallthrough; + case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC: + if (send_sq->head_dep_idx != send_sq->tail_dep_idx) + /* Send dependent WQEs to drain the queue */ + mlx5hws_send_all_dep_wqe(queue); + else + /* Signal on the last posted WQE */ + mlx5hws_send_engine_flush_queue(queue); + + /* Poll queue until empty */ + while (wait_comp && !mlx5hws_send_engine_empty(queue)) + hws_send_engine_poll_cq(queue, NULL, &polled, 0); + + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +hws_send_wqe_fw(struct mlx5_core_dev *mdev, + u32 pd_num, + struct mlx5hws_send_engine_post_attr *send_attr, + struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl, + void *send_wqe_match_data, + void *send_wqe_match_tag, + void *send_wqe_range_data, + void *send_wqe_range_tag, + bool is_jumbo, + u8 gta_opcode) +{ + bool has_range = send_wqe_range_data || send_wqe_range_tag; + bool has_match = send_wqe_match_data || send_wqe_match_tag; + struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0}; + struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0}; + struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0}; + struct mlx5hws_cmd_generate_wqe_attr attr = {0}; + struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0}; + struct mlx5_cqe64 cqe; + u32 flags = 0; + int ret; + + /* Set WQE control */ + wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode); + wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16); + flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0; + wqe_ctrl.flags = cpu_to_be32(flags); + wqe_ctrl.imm = cpu_to_be32(send_attr->id); + + /* Set GTA WQE CTRL */ + memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix)); + gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28); + + /* Set GTA match WQE DATA */ + if (has_match) { + if (send_wqe_match_data) + memcpy(>a_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0)); + else + hws_send_wqe_set_tag(>a_wqe_data0, send_wqe_match_tag, is_jumbo); + + gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8); + attr.gta_data_0 = (u8 *)>a_wqe_data0; + } + + /* Set GTA range WQE DATA */ + if (has_range) { + if (send_wqe_range_data) + memcpy(>a_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1)); + else + hws_send_wqe_set_tag(>a_wqe_data1, send_wqe_range_tag, false); + + gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8); + attr.gta_data_1 = (u8 *)>a_wqe_data1; + } + + attr.pdn = pd_num; + attr.wqe_ctrl = (u8 *)&wqe_ctrl; + attr.gta_ctrl = (u8 *)>a_wqe_ctrl; + +send_wqe: + ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe); + if (ret) { + mlx5_core_err(mdev, "Failed to write WQE using command"); + return ret; + } + + if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) && + (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) { + *send_attr->used_id = send_attr->id; + return 0; + } + + /* Retry if rule failed */ + if (send_attr->retry_id) { + wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id); + send_attr->id = send_attr->retry_id; + send_attr->retry_id = 0; + goto send_wqe; + } + + return -1; +} + +void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr) +{ + struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr; + struct mlx5hws_rule *rule = send_attr->rule; + struct mlx5_core_dev *mdev; + u16 queue_id; + u32 pdn; + int ret; + + queue_id = queue - ctx->send_queue; + mdev = ctx->mdev; + pdn = ctx->pd_num; + + /* Writing through FW can't HW fence, therefore we drain the queue */ + if (send_attr->fence) + mlx5hws_send_queue_action(ctx, + queue_id, + MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC); + + if (ste_attr->rtc_1) { + send_attr->id = ste_attr->rtc_1; + send_attr->used_id = ste_attr->used_id_rtc_1; + send_attr->retry_id = ste_attr->retry_rtc_1; + ret = hws_send_wqe_fw(mdev, pdn, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->range_wqe_data, + ste_attr->range_wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode); + if (ret) + goto fail_rule; + } + + if (ste_attr->rtc_0) { + send_attr->id = ste_attr->rtc_0; + send_attr->used_id = ste_attr->used_id_rtc_0; + send_attr->retry_id = ste_attr->retry_rtc_0; + ret = hws_send_wqe_fw(mdev, pdn, send_attr, + ste_attr->wqe_ctrl, + ste_attr->wqe_data, + ste_attr->wqe_tag, + ste_attr->range_wqe_data, + ste_attr->range_wqe_tag, + ste_attr->wqe_tag_is_jumbo, + ste_attr->gta_opcode); + if (ret) + goto fail_rule; + } + + /* Increase the status, this only works on good flow as the enum + * is arrange it away creating -> created -> deleting -> deleted + */ + if (likely(rule)) + rule->status++; + + mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS); + + return; + +fail_rule: + if (likely(rule)) + rule->status = !rule->rtc_0 && !rule->rtc_1 ? + MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING; + + mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h new file mode 100644 index 00000000000000..b50825d6dc53dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_SEND_H_ +#define MLX5HWS_SEND_H_ + +/* As a single operation requires at least two WQEBBS. + * This means a maximum of 16 such operations per rule. + */ +#define MAX_WQES_PER_RULE 32 + +enum mlx5hws_wqe_opcode { + MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c, +}; + +enum mlx5hws_wqe_opmod { + MLX5HWS_WQE_OPMOD_GTA_STE = 0, + MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1, +}; + +enum mlx5hws_wqe_gta_opcode { + MLX5HWS_WQE_GTA_OP_ACTIVATE = 0, + MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1, +}; + +enum mlx5hws_wqe_gta_opmod { + MLX5HWS_WQE_GTA_OPMOD_STE = 0, + MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1, +}; + +enum mlx5hws_wqe_gta_sz { + MLX5HWS_WQE_SZ_GTA_CTRL = 48, + MLX5HWS_WQE_SZ_GTA_DATA = 64, +}; + +/* WQE Control segment. */ +struct mlx5hws_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + __be32 flags; + __be32 imm; +}; + +struct mlx5hws_wqe_gta_ctrl_seg { + __be32 op_dirix; + __be32 stc_ix[5]; + __be32 rsvd0[6]; +}; + +struct mlx5hws_wqe_gta_data_seg_ste { + __be32 rsvd0_ctr_id; + __be32 rsvd1_definer; + __be32 rsvd2[3]; + union { + struct { + __be32 action[3]; + __be32 tag[8]; + }; + __be32 jumbo[11]; + }; +}; + +struct mlx5hws_wqe_gta_data_seg_arg { + __be32 action_args[8]; +}; + +struct mlx5hws_wqe_gta { + struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl; + union { + struct mlx5hws_wqe_gta_data_seg_ste seg_ste; + struct mlx5hws_wqe_gta_data_seg_arg seg_arg; + }; +}; + +struct mlx5hws_send_ring_cq { + struct mlx5_core_dev *mdev; + struct mlx5_cqwq wq; + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_core_cq mcq; + u16 poll_wqe; +}; + +struct mlx5hws_send_ring_priv { + struct mlx5hws_rule *rule; + void *user_data; + u32 num_wqebbs; + u32 id; + u32 retry_id; + u32 *used_id; +}; + +struct mlx5hws_send_ring_dep_wqe { + struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl; + struct mlx5hws_wqe_gta_data_seg_ste wqe_data; + struct mlx5hws_rule *rule; + u32 rtc_0; + u32 rtc_1; + u32 retry_rtc_0; + u32 retry_rtc_1; + u32 direct_index; + void *user_data; +}; + +struct mlx5hws_send_ring_sq { + struct mlx5_core_dev *mdev; + u16 cur_post; + u16 buf_mask; + struct mlx5hws_send_ring_priv *wr_priv; + unsigned int last_idx; + struct mlx5hws_send_ring_dep_wqe *dep_wqe; + unsigned int head_dep_idx; + unsigned int tail_dep_idx; + u32 sqn; + struct mlx5_wq_cyc wq; + struct mlx5_wq_ctrl wq_ctrl; + void __iomem *uar_map; +}; + +struct mlx5hws_send_ring { + struct mlx5hws_send_ring_cq send_cq; + struct mlx5hws_send_ring_sq send_sq; +}; + +struct mlx5hws_completed_poll_entry { + void *user_data; + enum mlx5hws_flow_op_status status; +}; + +struct mlx5hws_completed_poll { + struct mlx5hws_completed_poll_entry *entries; + u16 ci; + u16 pi; + u16 mask; +}; + +struct mlx5hws_send_engine { + struct mlx5hws_send_ring send_ring; + struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */ + struct mlx5hws_completed_poll completed; + u16 used_entries; + u16 num_entries; + bool err; + struct mutex lock; /* Protects the send engine */ +}; + +struct mlx5hws_send_engine_post_ctrl { + struct mlx5hws_send_engine *queue; + struct mlx5hws_send_ring *send_ring; + size_t num_wqebbs; +}; + +struct mlx5hws_send_engine_post_attr { + u8 opcode; + u8 opmod; + u8 notify_hw; + u8 fence; + u8 match_definer_id; + u8 range_definer_id; + size_t len; + struct mlx5hws_rule *rule; + u32 id; + u32 retry_id; + u32 *used_id; + void *user_data; +}; + +struct mlx5hws_send_ste_attr { + u32 rtc_0; + u32 rtc_1; + u32 retry_rtc_0; + u32 retry_rtc_1; + u32 *used_id_rtc_0; + u32 *used_id_rtc_1; + bool wqe_tag_is_jumbo; + u8 gta_opcode; + u32 direct_index; + struct mlx5hws_send_engine_post_attr send_attr; + struct mlx5hws_rule_match_tag *wqe_tag; + struct mlx5hws_rule_match_tag *range_wqe_tag; + struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl; + struct mlx5hws_wqe_gta_data_seg_ste *wqe_data; + struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data; +}; + +struct mlx5hws_send_ring_dep_wqe * +mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue); + +int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size); + +void mlx5hws_send_queues_close(struct mlx5hws_context *ctx); + +int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size); + +int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, + u16 queue_id, + u32 actions); + +int mlx5hws_send_test(struct mlx5hws_context *ctx, + u16 queues, + u16 queue_size); + +struct mlx5hws_send_engine_post_ctrl +mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue); + +void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl, + char **buf, size_t *len); + +void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl, + struct mlx5hws_send_engine_post_attr *attr); + +void mlx5hws_send_ste(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr); + +void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ste_attr *ste_attr); + +void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue); + +static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue) +{ + struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq; + struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq; + + return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe); +} + +static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue) +{ + return queue->used_entries >= queue->num_entries; +} + +static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue) +{ + queue->used_entries++; +} + +static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue) +{ + queue->used_entries--; +} + +static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue, + void *user_data, + int comp_status) +{ + struct mlx5hws_completed_poll *comp = &queue->completed; + + comp->entries[comp->pi].status = comp_status; + comp->entries[comp->pi].user_data = user_data; + + comp->pi = (comp->pi + 1) & comp->mask; +} + +static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue) +{ + return queue->err; +} + +#endif /* MLX5HWS_SEND_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c new file mode 100644 index 00000000000000..8c063a8d87d7dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl) +{ + return tbl->ft_id; +} + +static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl, + struct mlx5hws_cmd_ft_create_attr *ft_attr) +{ + ft_attr->type = tbl->fw_ft_type; + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1; + else + ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1; + ft_attr->rtc_valid = true; +} + +static void hws_table_set_cap_attr(struct mlx5hws_table *tbl, + struct mlx5hws_cmd_ft_create_attr *ft_attr) +{ + /* Enabling reformat_en or decap_en for the first flow table + * must be done when all VFs are down. + * However, HWS doesn't know when it is required to create the first FT. + * On the other hand, HWS doesn't use all these FT capabilities at all + * (the API doesn't even provide a way to specify these flags), so we'll + * just set these caps on all the flow tables. + * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A. + */ + if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) { + ft_attr->reformat_en = true; + ft_attr->decap_en = true; + } +} + +static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + struct mlx5hws_cmd_set_fte_attr fte_attr = {0}; + struct mlx5hws_cmd_forward_tbl *default_miss; + struct mlx5hws_cmd_set_fte_dest dest = {0}; + struct mlx5hws_context *ctx = tbl->ctx; + u8 tbl_type = tbl->type; + + if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) + return 0; + + if (ctx->common_res[tbl_type].default_miss) { + ctx->common_res[tbl_type].default_miss->refcount++; + return 0; + } + + ft_attr.type = tbl->fw_ft_type; + ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */ + ft_attr.rtc_valid = false; + + dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.destination_id = ctx->caps->eswitch_manager_vport_number; + + fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + fte_attr.dests_num = 1; + fte_attr.dests = &dest; + + default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr); + if (!default_miss) { + mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type); + return -EINVAL; + } + + /* ctx->ctrl_lock must be held here */ + ctx->common_res[tbl_type].default_miss = default_miss; + ctx->common_res[tbl_type].default_miss->refcount++; + + return 0; +} + +/* Called under ctx->ctrl_lock */ +static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl) +{ + struct mlx5hws_cmd_forward_tbl *default_miss; + struct mlx5hws_context *ctx = tbl->ctx; + u8 tbl_type = tbl->type; + + if (tbl->type != MLX5HWS_TABLE_TYPE_FDB) + return; + + default_miss = ctx->common_res[tbl_type].default_miss; + if (--default_miss->refcount) + return; + + mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss); + ctx->common_res[tbl_type].default_miss = NULL; +} + +static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + int ret; + + if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB)) + pr_warn("HWS: invalid table type %d\n", tbl->type); + + mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx, + tbl->fw_ft_type, + tbl->type, + &ft_attr); + + ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n"); + return ret; + } + + return 0; +} + +int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, + struct mlx5hws_table *tbl, + u32 *ft_id) +{ + struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; + int ret; + + hws_table_init_next_ft_attr(tbl, &ft_attr); + hws_table_set_cap_attr(tbl, &ft_attr); + + ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed creating default ft\n"); + return ret; + } + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { + /* Take/create ref over the default miss */ + ret = hws_table_up_default_fdb_miss_tbl(tbl); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n"); + goto free_ft_obj; + } + ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n"); + goto down_miss_tbl; + } + } + + return 0; + +down_miss_tbl: + hws_table_down_default_fdb_miss_tbl(tbl); +free_ft_obj: + mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id); + return ret; +} + +void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, + u32 ft_id) +{ + mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id); + hws_table_down_default_fdb_miss_tbl(tbl); +} + +static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx, + struct mlx5hws_table *tbl) +{ + if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) { + mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hws_table_init(struct mlx5hws_table *tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + ret = hws_table_init_check_hws_support(ctx, tbl); + if (ret) + return ret; + + if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) { + pr_warn("HWS: invalid table type %d\n", tbl->type); + return -EOPNOTSUPP; + } + + mutex_lock(&ctx->ctrl_lock); + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to create flow table object\n"); + mutex_unlock(&ctx->ctrl_lock); + return ret; + } + + ret = mlx5hws_action_get_default_stc(ctx, tbl->type); + if (ret) + goto tbl_destroy; + + INIT_LIST_HEAD(&tbl->matchers_list); + INIT_LIST_HEAD(&tbl->default_miss.head); + + mutex_unlock(&ctx->ctrl_lock); + + return 0; + +tbl_destroy: + mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id); + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static void hws_table_uninit(struct mlx5hws_table *tbl) +{ + mutex_lock(&tbl->ctx->ctrl_lock); + mlx5hws_action_put_default_stc(tbl->ctx, tbl->type); + mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id); + mutex_unlock(&tbl->ctx->ctrl_lock); +} + +struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx, + struct mlx5hws_table_attr *attr) +{ + struct mlx5hws_table *tbl; + int ret; + + if (attr->type > MLX5HWS_TABLE_TYPE_FDB) { + mlx5hws_err(ctx, "Invalid table type %d\n", attr->type); + return NULL; + } + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + tbl->ctx = ctx; + tbl->type = attr->type; + tbl->level = attr->level; + + ret = hws_table_init(tbl); + if (ret) { + mlx5hws_err(ctx, "Failed to initialise table\n"); + goto free_tbl; + } + + mutex_lock(&ctx->ctrl_lock); + list_add(&tbl->tbl_list_node, &ctx->tbl_list); + mutex_unlock(&ctx->ctrl_lock); + + return tbl; + +free_tbl: + kfree(tbl); + return NULL; +} + +int mlx5hws_table_destroy(struct mlx5hws_table *tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + mutex_lock(&ctx->ctrl_lock); + if (!list_empty(&tbl->matchers_list)) { + mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n"); + ret = -EBUSY; + goto unlock_err; + } + + if (!list_empty(&tbl->default_miss.head)) { + mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n"); + ret = -EBUSY; + goto unlock_err; + } + + list_del_init(&tbl->tbl_list_node); + mutex_unlock(&ctx->ctrl_lock); + + hws_table_uninit(tbl); + kfree(tbl); + + return 0; + +unlock_err: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} + +static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl) +{ + struct mlx5hws_matcher *matcher; + + if (list_empty(&tbl->matchers_list)) + return tbl->ft_id; + + matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node); + return matcher->end_ft_id; +} + +int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + int ret; + + /* Due to FW limitation, resetting the flow table to default action will + * disconnect RTC when ignore_flow_level_rtc_valid is not supported. + */ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) + return 0; + + if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) + return hws_table_connect_to_default_miss_tbl(tbl, ft_id); + + ft_attr.type = tbl->fw_ft_type; + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT; + + ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n"); + return ret; + } + + return 0; +} + +int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 rtc_0_id, + u32 rtc_1_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID; + ft_attr.type = fw_ft_type; + ft_attr.rtc_id_0 = rtc_0_id; + ft_attr.rtc_id_1 = rtc_1_id; + + return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); +} + +static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + + ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; + ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; + ft_attr.type = fw_ft_type; + ft_attr.table_miss_id = next_ft_id; + + return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); +} + +int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl) +{ + struct mlx5hws_table *src_tbl; + int ret; + + if (list_empty(&dst_tbl->default_miss.head)) + return 0; + + list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) { + ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl); + if (ret) { + mlx5hws_err(dst_tbl->ctx, + "Failed to update source miss table, unexpected behavior\n"); + return ret; + } + } + + return 0; +} + +int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, + struct mlx5hws_table *dst_tbl) +{ + struct mlx5hws_matcher *matcher; + u32 last_ft_id; + int ret; + + last_ft_id = hws_table_get_last_ft(src_tbl); + + if (dst_tbl) { + if (list_empty(&dst_tbl->matchers_list)) { + /* Connect src_tbl last_ft to dst_tbl start anchor */ + ret = hws_table_ft_set_next_ft(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + dst_tbl->ft_id); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + 0, 0); + if (ret) + return ret; + } else { + /* Connect src_tbl last_ft to first matcher RTC */ + matcher = list_first_entry(&dst_tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) + return ret; + + /* Reset next miss FT to default */ + ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id); + if (ret) + return ret; + } + } else { + /* Reset next miss FT to default */ + ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id); + if (ret) + return ret; + + /* Reset last_ft RTC to default RTC */ + ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + 0, 0); + if (ret) + return ret; + } + + src_tbl->default_miss.miss_tbl = dst_tbl; + + return 0; +} + +static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl) +{ + if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) { + mlx5hws_err(tbl->ctx, "Default miss table is not supported\n"); + return -EOPNOTSUPP; + } + + if ((miss_tbl && miss_tbl->type != tbl->type)) { + mlx5hws_err(tbl->ctx, "Invalid arguments\n"); + return -EINVAL; + } + + return 0; +} + +int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, + struct mlx5hws_table *miss_tbl) +{ + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_table *old_miss_tbl; + int ret; + + ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl); + if (ret) + return ret; + + mutex_lock(&ctx->ctrl_lock); + + old_miss_tbl = tbl->default_miss.miss_tbl; + ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl); + if (ret) + goto out; + + if (old_miss_tbl) + list_del_init(&tbl->default_miss.next); + + old_miss_tbl = tbl->default_miss.miss_tbl; + if (old_miss_tbl) + list_del_init(&old_miss_tbl->default_miss.head); + + if (miss_tbl) + list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head); + + mutex_unlock(&ctx->ctrl_lock); + return 0; +out: + mutex_unlock(&ctx->ctrl_lock); + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h new file mode 100644 index 00000000000000..dd50420eec9eae --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_TABLE_H_ +#define MLX5HWS_TABLE_H_ + +struct mlx5hws_default_miss { + /* My miss table */ + struct mlx5hws_table *miss_tbl; + struct list_head next; + /* Tables missing to my table */ + struct list_head head; +}; + +struct mlx5hws_table { + struct mlx5hws_context *ctx; + u32 ft_id; + enum mlx5hws_table_type type; + u32 fw_ft_type; + u32 level; + struct list_head matchers_list; + struct list_head tbl_list_node; + struct mlx5hws_default_miss default_miss; +}; + +static inline +u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type, + u8 *ret_type) +{ + if (type != MLX5HWS_TABLE_TYPE_FDB) + return -EOPNOTSUPP; + + *ret_type = FS_FT_FDB; + + return 0; +} + +static inline +u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type, + bool is_mirror) +{ + if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) + return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX; + + return 0; +} + +int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, + struct mlx5hws_table *tbl, + u32 *ft_id); + +void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, + u32 ft_id); + +int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, + struct mlx5hws_table *dst_tbl); + +int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl); + +int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id); + +int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 rtc_0_id, + u32 rtc_1_id); + +#endif /* MLX5HWS_TABLE_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c new file mode 100644 index 00000000000000..faf42421c43fd6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include "mlx5hws_internal.h" + +int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx) +{ + int ret; + + if (!ctx->caps->eswitch_manager) + return 0; + + xa_init(&ctx->vports.vport_gvmi_xa); + + /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports + * (vport 0 of other function, VFs and SFs) will be queried dynamically. + */ + + ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi); + if (ret) + return ret; + + ctx->vports.uplink_gvmi = 0; + return 0; +} + +void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx) +{ + if (ctx->caps->eswitch_manager) + xa_destroy(&ctx->vports.vport_gvmi_xa); +} + +static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport) +{ + u16 vport_gvmi; + int ret; + + ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi); + if (ret) + return -EINVAL; + + ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport, + xa_mk_value(vport_gvmi), GFP_KERNEL); + if (ret) + mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret); + + return ret; +} + +static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport) +{ + return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF : + vport == MLX5_VPORT_PF; +} + +int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi) +{ + void *entry; + int ret; + + if (!ctx->caps->eswitch_manager) + return -EINVAL; + + if (hws_vport_is_esw_mgr_vport(ctx, vport)) { + *vport_gvmi = ctx->vports.esw_manager_gvmi; + return 0; + } + + if (vport == MLX5_VPORT_UPLINK) { + *vport_gvmi = ctx->vports.uplink_gvmi; + return 0; + } + +load_entry: + entry = xa_load(&ctx->vports.vport_gvmi_xa, vport); + + if (!xa_is_value(entry)) { + ret = hws_vport_add_gvmi(ctx, vport); + if (ret && ret != -EBUSY) + return ret; + goto load_entry; + } + + *vport_gvmi = (u16)xa_to_value(entry); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h new file mode 100644 index 00000000000000..0912fc166b3a63 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef MLX5HWS_VPORT_H_ +#define MLX5HWS_VPORT_H_ + +int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx); + +void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx); + +int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi); + +#endif /* MLX5HWS_VPORT_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index d61478c0c63261..e746cd9c68ed2d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -165,52 +165,22 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, return -ENODEV; } -static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev, - struct thermal_cooling_device *cdev) +static bool mlxsw_thermal_should_bind(struct thermal_zone_device *tzdev, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev); - struct device *dev = thermal->bus_info->dev; - int i, err; + const struct mlxsw_cooling_states *state = trip->priv; /* If the cooling device is one of ours bind it */ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) - return 0; + return false; - for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { - const struct mlxsw_cooling_states *state = &thermal->cooling_states[i]; + c->upper = state->max_state; + c->lower = state->min_state; - err = thermal_zone_bind_cooling_device(tzdev, i, cdev, - state->max_state, - state->min_state, - THERMAL_WEIGHT_DEFAULT); - if (err < 0) { - dev_err(dev, "Failed to bind cooling device to trip %d\n", i); - return err; - } - } - return 0; -} - -static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev, - struct thermal_cooling_device *cdev) -{ - struct mlxsw_thermal *thermal = thermal_zone_device_priv(tzdev); - struct device *dev = thermal->bus_info->dev; - int i; - int err; - - /* If the cooling device is our one unbind it */ - if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) - return 0; - - for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { - err = thermal_zone_unbind_cooling_device(tzdev, i, cdev); - if (err < 0) { - dev_err(dev, "Failed to unbind cooling device\n"); - return err; - } - } - return 0; + return true; } static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, @@ -240,57 +210,27 @@ static struct thermal_zone_params mlxsw_thermal_params = { }; static struct thermal_zone_device_ops mlxsw_thermal_ops = { - .bind = mlxsw_thermal_bind, - .unbind = mlxsw_thermal_unbind, + .should_bind = mlxsw_thermal_should_bind, .get_temp = mlxsw_thermal_get_temp, }; -static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev, - struct thermal_cooling_device *cdev) +static bool mlxsw_thermal_module_should_bind(struct thermal_zone_device *tzdev, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev); + const struct mlxsw_cooling_states *state = trip->priv; struct mlxsw_thermal *thermal = tz->parent; - int i, j, err; /* If the cooling device is one of ours bind it */ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) - return 0; - - for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { - const struct mlxsw_cooling_states *state = &tz->cooling_states[i]; - - err = thermal_zone_bind_cooling_device(tzdev, i, cdev, - state->max_state, - state->min_state, - THERMAL_WEIGHT_DEFAULT); - if (err < 0) - goto err_thermal_zone_bind_cooling_device; - } - return 0; - -err_thermal_zone_bind_cooling_device: - for (j = i - 1; j >= 0; j--) - thermal_zone_unbind_cooling_device(tzdev, j, cdev); - return err; -} - -static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, - struct thermal_cooling_device *cdev) -{ - struct mlxsw_thermal_module *tz = thermal_zone_device_priv(tzdev); - struct mlxsw_thermal *thermal = tz->parent; - int i; - int err; + return false; - /* If the cooling device is one of ours unbind it */ - if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) - return 0; + c->upper = state->max_state; + c->lower = state->min_state; - for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { - err = thermal_zone_unbind_cooling_device(tzdev, i, cdev); - WARN_ON(err); - } - return err; + return true; } static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, @@ -313,8 +253,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, } static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { - .bind = mlxsw_thermal_module_bind, - .unbind = mlxsw_thermal_module_unbind, + .should_bind = mlxsw_thermal_module_should_bind, .get_temp = mlxsw_thermal_module_temp_get, }; @@ -342,8 +281,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, } static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { - .bind = mlxsw_thermal_module_bind, - .unbind = mlxsw_thermal_module_unbind, + .should_bind = mlxsw_thermal_module_should_bind, .get_temp = mlxsw_thermal_gearbox_temp_get, }; @@ -411,7 +349,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { static int mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) { - char tz_name[THERMAL_NAME_LENGTH]; + char tz_name[40]; int err; if (module_tz->slot_index) @@ -445,17 +383,14 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev) thermal_zone_device_unregister(tzdev); } -static void -mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal, +static int +mlxsw_thermal_module_init(struct mlxsw_thermal *thermal, struct mlxsw_thermal_area *area, u8 module) { struct mlxsw_thermal_module *module_tz; + int i; module_tz = &area->tz_module_arr[module]; - /* Skip if parent is already set (case of port split). */ - if (module_tz->parent) - return; module_tz->module = module; module_tz->slot_index = area->slot_index; module_tz->parent = thermal; @@ -465,15 +400,15 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, sizeof(thermal->trips)); memcpy(module_tz->cooling_states, default_cooling_states, sizeof(thermal->cooling_states)); + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) + module_tz->trips[i].priv = &module_tz->cooling_states[i]; + + return mlxsw_thermal_module_tz_init(module_tz); } static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) { - if (module_tz && module_tz->tzdev) { - mlxsw_thermal_module_tz_fini(module_tz->tzdev); - module_tz->tzdev = NULL; - module_tz->parent = NULL; - } + mlxsw_thermal_module_tz_fini(module_tz->tzdev); } static int @@ -481,7 +416,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, struct mlxsw_thermal *thermal, struct mlxsw_thermal_area *area) { - struct mlxsw_thermal_module *module_tz; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; int i, err; @@ -503,22 +437,16 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, if (!area->tz_module_arr) return -ENOMEM; - for (i = 0; i < area->tz_module_num; i++) - mlxsw_thermal_module_init(dev, core, thermal, area, i); - for (i = 0; i < area->tz_module_num; i++) { - module_tz = &area->tz_module_arr[i]; - if (!module_tz->parent) - continue; - err = mlxsw_thermal_module_tz_init(module_tz); + err = mlxsw_thermal_module_init(thermal, area, i); if (err) - goto err_thermal_module_tz_init; + goto err_thermal_module_init; } return 0; -err_thermal_module_tz_init: - for (i = area->tz_module_num - 1; i >= 0; i--) +err_thermal_module_init: + for (i--; i >= 0; i--) mlxsw_thermal_module_fini(&area->tz_module_arr[i]); kfree(area->tz_module_arr); return err; @@ -579,7 +507,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, struct mlxsw_thermal_module *gearbox_tz; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; u8 gbox_num; - int i; + int i, j; int err; mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index); @@ -606,6 +534,9 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, sizeof(thermal->trips)); memcpy(gearbox_tz->cooling_states, default_cooling_states, sizeof(thermal->cooling_states)); + for (j = 0; j < MLXSW_THERMAL_NUM_TRIPS; j++) + gearbox_tz->trips[j].priv = &gearbox_tz->cooling_states[j]; + gearbox_tz->module = i; gearbox_tz->parent = thermal; gearbox_tz->slot_index = area->slot_index; @@ -722,6 +653,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core, thermal->bus_info = bus_info; memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips)); memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states)); + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) + thermal->trips[i].priv = &thermal->cooling_states[i]; + thermal->line_cards[0].slot_index = 0; err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl); @@ -821,10 +755,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core, err_thermal_gearboxes_init: mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); err_thermal_modules_init: - if (thermal->tzdev) { - thermal_zone_device_unregister(thermal->tzdev); - thermal->tzdev = NULL; - } + thermal_zone_device_unregister(thermal->tzdev); err_thermal_zone_device_register: err_thermal_cooling_device_register: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) @@ -845,10 +776,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) thermal); mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); - if (thermal->tzdev) { - thermal_zone_device_unregister(thermal->tzdev); - thermal->tzdev = NULL; - } + thermal_zone_device_unregister(thermal->tzdev); for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) thermal_cooling_device_unregister(thermal->cdevs[i].cdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f064789f324058..3f5e5d99251b75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1676,9 +1676,11 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, netif_carrier_off(dev); - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; + dev->lltx = true; + dev->netns_local = true; dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; @@ -2784,7 +2786,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, .shaper_work = mlxsw_sp1_ptp_shaper_work, +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) .get_ts_info = mlxsw_sp1_ptp_get_ts_info, +#endif .get_stats_count = mlxsw_sp1_get_stats_count, .get_stats_strings = mlxsw_sp1_get_stats_strings, .get_stats = mlxsw_sp1_get_stats, @@ -2801,7 +2805,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, .shaper_work = mlxsw_sp2_ptp_shaper_work, +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) .get_ts_info = mlxsw_sp2_ptp_get_ts_info, +#endif .get_stats_count = mlxsw_sp2_get_stats_count, .get_stats_strings = mlxsw_sp2_get_stats_strings, .get_stats = mlxsw_sp2_get_stats, @@ -2818,7 +2824,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, .shaper_work = mlxsw_sp2_ptp_shaper_work, +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) .get_ts_info = mlxsw_sp2_ptp_get_ts_info, +#endif .get_stats_count = mlxsw_sp2_get_stats_count, .get_stats_strings = mlxsw_sp2_get_stats_strings, .get_stats = mlxsw_sp2_get_stats, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 769095d4932d58..c8aa1452fbb973 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -11,14 +11,6 @@ struct mlxsw_sp; struct mlxsw_sp_port; struct mlxsw_sp_ptp_clock; -static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info) -{ - info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; - return 0; -} - #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) struct mlxsw_sp_ptp_clock * @@ -151,12 +143,6 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) { } -static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct kernel_ethtool_ts_info *info) -{ - return mlxsw_sp_ptp_get_ts_info_noptp(info); -} - static inline int mlxsw_sp1_get_stats_count(void) { return 0; @@ -226,12 +212,6 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } -static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct kernel_ethtool_ts_info *info) -{ - return mlxsw_sp_ptp_get_ts_info_noptp(info); -} - static inline int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index c002ede3640202..85519690b83778 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -23,6 +23,8 @@ config FBNIC depends on !S390 depends on MAX_SKB_FRAGS < 22 depends on PCI_MSI + select NET_DEVLINK + select PAGE_POOL select PHYLINK help This driver supports Meta Platforms Host Network Interface. diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index 9373b558fdc927..ed4533a73c575f 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -8,7 +8,9 @@ obj-$(CONFIG_FBNIC) += fbnic.o fbnic-y := fbnic_devlink.o \ + fbnic_ethtool.o \ fbnic_fw.o \ + fbnic_hw_stats.o \ fbnic_irq.o \ fbnic_mac.o \ fbnic_netdev.o \ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index ad2689bfd6cb8d..0f9e8d79461c63 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -11,6 +11,7 @@ #include "fbnic_csr.h" #include "fbnic_fw.h" +#include "fbnic_hw_stats.h" #include "fbnic_mac.h" #include "fbnic_rpc.h" @@ -47,6 +48,9 @@ struct fbnic_dev { /* Number of TCQs/RCQs available on hardware */ u16 max_num_queues; + + /* Local copy of hardware statistics */ + struct fbnic_hw_stats hw_stats; }; /* Reserve entry 0 in the MSI-X "others" array until we have filled all @@ -132,6 +136,9 @@ void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data); void fbnic_free_irqs(struct fbnic_dev *fbd); int fbnic_alloc_irqs(struct fbnic_dev *fbd); +void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, + const size_t str_sz); + enum fbnic_boards { fbnic_board_asic }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index a64360de055274..21db509acbc157 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -660,6 +660,43 @@ enum { #define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */ #define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */ +#define FBNIC_CSR_START_MAC_STAT 0x11a00 +#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */ +#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */ +#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \ + 0x11a0a /* 0x46828 */ +#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \ + 0x11a0b /* 0x4682c */ +#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */ +#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */ +#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \ + 0x11a12 /* 0x46848 */ +#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \ + 0x11a13 /* 0x4684c */ +#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \ + 0x11a14 /* 0x46850 */ +#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \ + 0x11a15 /* 0x46854 */ +#define FBNIC_MAC_STAT_RX_IFINERRORS_L 0x11a18 /* 0x46860 */ +#define FBNIC_MAC_STAT_RX_IFINERRORS_H 0x11a19 /* 0x46864 */ +#define FBNIC_MAC_STAT_RX_MULTICAST_L 0x11a1c /* 0x46870 */ +#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */ +#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */ +#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */ +#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */ +#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */ +#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \ + 0x11a42 /* 0x46908 */ +#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \ + 0x11a43 /* 0x4690c */ +#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \ + 0x11a46 /* 0x46918 */ +#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \ + 0x11a47 /* 0x4691c */ +#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */ +#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */ +#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */ +#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */ /* PUL User Registers */ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c index e87049dfd223a0..0072d612215e49 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ -#include +#include #include #include #include @@ -10,6 +10,56 @@ #define FBNIC_SN_STR_LEN 24 +static int fbnic_version_running_put(struct devlink_info_req *req, + struct fbnic_fw_ver *fw_ver, + char *ver_name) +{ + char running_ver[FBNIC_FW_VER_MAX_SIZE]; + int err; + + fbnic_mk_fw_ver_str(fw_ver->version, running_ver); + err = devlink_info_version_running_put(req, ver_name, running_ver); + if (err) + return err; + + if (strlen(fw_ver->commit) > 0) { + char commit_name[FBNIC_SN_STR_LEN]; + + snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name); + err = devlink_info_version_running_put(req, commit_name, + fw_ver->commit); + if (err) + return err; + } + + return 0; +} + +static int fbnic_version_stored_put(struct devlink_info_req *req, + struct fbnic_fw_ver *fw_ver, + char *ver_name) +{ + char stored_ver[FBNIC_FW_VER_MAX_SIZE]; + int err; + + fbnic_mk_fw_ver_str(fw_ver->version, stored_ver); + err = devlink_info_version_stored_put(req, ver_name, stored_ver); + if (err) + return err; + + if (strlen(fw_ver->commit) > 0) { + char commit_name[FBNIC_SN_STR_LEN]; + + snprintf(commit_name, FBNIC_SN_STR_LEN, "%s.commit", ver_name); + err = devlink_info_version_stored_put(req, commit_name, + fw_ver->commit); + if (err) + return err; + } + + return 0; +} + static int fbnic_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) @@ -17,6 +67,31 @@ static int fbnic_devlink_info_get(struct devlink *devlink, struct fbnic_dev *fbd = devlink_priv(devlink); int err; + err = fbnic_version_running_put(req, &fbd->fw_cap.running.mgmt, + DEVLINK_INFO_VERSION_GENERIC_FW); + if (err) + return err; + + err = fbnic_version_running_put(req, &fbd->fw_cap.running.bootloader, + DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.mgmt, + DEVLINK_INFO_VERSION_GENERIC_FW); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.bootloader, + DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER); + if (err) + return err; + + err = fbnic_version_stored_put(req, &fbd->fw_cap.stored.undi, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI); + if (err) + return err; + if (fbd->dsn) { unsigned char serial[FBNIC_SN_STR_LEN]; u8 dsn[8]; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c new file mode 100644 index 00000000000000..5d980e17894141 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -0,0 +1,75 @@ +#include +#include +#include + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_tlv.h" + +static void +fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version, + sizeof(drvinfo->fw_version)); +} + +static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter) +{ + if (counter->reported) + *stat = counter->value; +} + +static void +fbnic_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *eth_mac_stats) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_stats *mac_stats; + struct fbnic_dev *fbd = fbn->fbd; + const struct fbnic_mac *mac; + + mac_stats = &fbd->hw_stats.mac; + mac = fbd->mac; + + mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac); + + fbnic_set_counter(ð_mac_stats->FramesTransmittedOK, + &mac_stats->eth_mac.FramesTransmittedOK); + fbnic_set_counter(ð_mac_stats->FramesReceivedOK, + &mac_stats->eth_mac.FramesReceivedOK); + fbnic_set_counter(ð_mac_stats->FrameCheckSequenceErrors, + &mac_stats->eth_mac.FrameCheckSequenceErrors); + fbnic_set_counter(ð_mac_stats->AlignmentErrors, + &mac_stats->eth_mac.AlignmentErrors); + fbnic_set_counter(ð_mac_stats->OctetsTransmittedOK, + &mac_stats->eth_mac.OctetsTransmittedOK); + fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACXmitError, + &mac_stats->eth_mac.FramesLostDueToIntMACXmitError); + fbnic_set_counter(ð_mac_stats->OctetsReceivedOK, + &mac_stats->eth_mac.OctetsReceivedOK); + fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACRcvError, + &mac_stats->eth_mac.FramesLostDueToIntMACRcvError); + fbnic_set_counter(ð_mac_stats->MulticastFramesXmittedOK, + &mac_stats->eth_mac.MulticastFramesXmittedOK); + fbnic_set_counter(ð_mac_stats->BroadcastFramesXmittedOK, + &mac_stats->eth_mac.BroadcastFramesXmittedOK); + fbnic_set_counter(ð_mac_stats->MulticastFramesReceivedOK, + &mac_stats->eth_mac.MulticastFramesReceivedOK); + fbnic_set_counter(ð_mac_stats->BroadcastFramesReceivedOK, + &mac_stats->eth_mac.BroadcastFramesReceivedOK); + fbnic_set_counter(ð_mac_stats->FrameTooLongErrors, + &mac_stats->eth_mac.FrameTooLongErrors); +} + +static const struct ethtool_ops fbnic_ethtool_ops = { + .get_drvinfo = fbnic_get_drvinfo, + .get_eth_mac_stats = fbnic_get_eth_mac_stats, +}; + +void fbnic_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &fbnic_ethtool_ops; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 0c6e1b4c119b62..8f7a2a19ddf802 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -789,3 +789,16 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); } + +void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, + const size_t str_sz) +{ + struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt; + const char *delim = ""; + + if (mgmt->commit[0]) + delim = "_"; + + fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit, + fw_version, str_sz); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index c65bca613665af..221faf8c67566c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -53,10 +53,10 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); -#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \ +#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \ do { \ const u32 __rev_id = _rev_id; \ - snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \ + snprintf(_str, _str_sz, "%02lu.%02lu.%02lu-%03lu%s%s", \ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \ @@ -65,7 +65,7 @@ do { \ } while (0) #define fbnic_mk_fw_ver_str(_rev_id, _str) \ - fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str) + fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str)) #define FW_HEARTBEAT_PERIOD (10 * HZ) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c new file mode 100644 index 00000000000000..a0acc7606aa1aa --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c @@ -0,0 +1,27 @@ +#include "fbnic.h" + +u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset) +{ + u32 prev_upper, upper, lower, diff; + + prev_upper = rd32(fbd, reg + offset); + lower = rd32(fbd, reg); + upper = rd32(fbd, reg + offset); + + diff = upper - prev_upper; + if (!diff) + return ((u64)upper << 32) | lower; + + if (diff > 1) + dev_warn_once(fbd->dev, + "Stats inconsistent, upper 32b of %#010x updating too quickly\n", + reg * 4); + + /* Return only the upper bits as we cannot guarantee + * the accuracy of the lower bits. We will add them in + * when the counter slows down enough that we can get + * a snapshot with both upper values being the same + * between reads. + */ + return ((u64)upper << 32); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h new file mode 100644 index 00000000000000..30348904b51072 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h @@ -0,0 +1,40 @@ +#include + +#include "fbnic_csr.h" + +struct fbnic_stat_counter { + u64 value; + union { + u32 old_reg_value_32; + u64 old_reg_value_64; + } u; + bool reported; +}; + +struct fbnic_eth_mac_stats { + struct fbnic_stat_counter FramesTransmittedOK; + struct fbnic_stat_counter FramesReceivedOK; + struct fbnic_stat_counter FrameCheckSequenceErrors; + struct fbnic_stat_counter AlignmentErrors; + struct fbnic_stat_counter OctetsTransmittedOK; + struct fbnic_stat_counter FramesLostDueToIntMACXmitError; + struct fbnic_stat_counter OctetsReceivedOK; + struct fbnic_stat_counter FramesLostDueToIntMACRcvError; + struct fbnic_stat_counter MulticastFramesXmittedOK; + struct fbnic_stat_counter BroadcastFramesXmittedOK; + struct fbnic_stat_counter MulticastFramesReceivedOK; + struct fbnic_stat_counter BroadcastFramesReceivedOK; + struct fbnic_stat_counter FrameTooLongErrors; +}; + +struct fbnic_mac_stats { + struct fbnic_eth_mac_stats eth_mac; +}; + +struct fbnic_hw_stats { + struct fbnic_mac_stats mac; +}; + +u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset); + +void fbnic_get_hw_stats(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 7920e7af82d9c6..7b654d0a6dac66 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -403,6 +403,21 @@ static void fbnic_mac_init_regs(struct fbnic_dev *fbd) fbnic_mac_init_txb(fbd); } +static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg, + struct fbnic_stat_counter *stat) +{ + u64 new_reg_value; + + new_reg_value = fbnic_stat_rd64(fbd, reg, 1); + if (!reset) + stat->value += new_reg_value - stat->u.old_reg_value_64; + stat->u.old_reg_value_64 = new_reg_value; + stat->reported = true; +} + +#define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \ + __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat)) + static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) { u32 rxb_pause_ctrl; @@ -637,12 +652,47 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd, wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); } +static void +fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, + struct fbnic_eth_mac_stats *mac_stats) +{ + fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK, + MAC_STAT_RX_BYTE_COUNT); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors, + MAC_STAT_RX_ALIGN_ERROR); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors, + MAC_STAT_RX_TOOLONG); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK, + MAC_STAT_RX_RECEIVED_OK); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors, + MAC_STAT_RX_PACKET_BAD_FCS); + fbnic_mac_stat_rd64(fbd, reset, + mac_stats->FramesLostDueToIntMACRcvError, + MAC_STAT_RX_IFINERRORS); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK, + MAC_STAT_RX_MULTICAST); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK, + MAC_STAT_RX_BROADCAST); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK, + MAC_STAT_TX_BYTE_COUNT); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK, + MAC_STAT_TX_TRANSMITTED_OK); + fbnic_mac_stat_rd64(fbd, reset, + mac_stats->FramesLostDueToIntMACXmitError, + MAC_STAT_TX_IFOUTERRORS); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK, + MAC_STAT_TX_MULTICAST); + fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK, + MAC_STAT_TX_BROADCAST); +} + static const struct fbnic_mac fbnic_mac_asic = { .init_regs = fbnic_mac_init_regs, .pcs_enable = fbnic_pcs_enable_asic, .pcs_disable = fbnic_pcs_disable_asic, .pcs_get_link = fbnic_pcs_get_link_asic, .pcs_get_link_event = fbnic_pcs_get_link_event_asic, + .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, .link_down = fbnic_mac_link_down_asic, .link_up = fbnic_mac_link_up_asic, }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h index f53be6e6aef9d9..476239a9d381e1 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -78,6 +78,9 @@ struct fbnic_mac { bool (*pcs_get_link)(struct fbnic_dev *fbd); int (*pcs_get_link_event)(struct fbnic_dev *fbd); + void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset, + struct fbnic_eth_mac_stats *mac_stats); + void (*link_down)(struct fbnic_dev *fbd); void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index b7ce6da6854337..a400616a24d416 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "fbnic.h" #include "fbnic_netdev.h" @@ -316,6 +317,74 @@ void fbnic_clear_rx_mode(struct net_device *netdev) __dev_mc_unsync(netdev, NULL); } +static void fbnic_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats64) +{ + u64 tx_bytes, tx_packets, tx_dropped = 0; + u64 rx_bytes, rx_packets, rx_dropped = 0; + struct fbnic_net *fbn = netdev_priv(dev); + struct fbnic_queue_stats *stats; + unsigned int start, i; + + stats = &fbn->tx_stats; + + tx_bytes = stats->bytes; + tx_packets = stats->packets; + tx_dropped = stats->dropped; + + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + stats64->tx_dropped = tx_dropped; + + for (i = 0; i < fbn->num_tx_queues; i++) { + struct fbnic_ring *txr = fbn->tx[i]; + + if (!txr) + continue; + + stats = &txr->stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + tx_bytes = stats->bytes; + tx_packets = stats->packets; + tx_dropped = stats->dropped; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + stats64->tx_bytes += tx_bytes; + stats64->tx_packets += tx_packets; + stats64->tx_dropped += tx_dropped; + } + + stats = &fbn->rx_stats; + + rx_bytes = stats->bytes; + rx_packets = stats->packets; + rx_dropped = stats->dropped; + + stats64->rx_bytes = rx_bytes; + stats64->rx_packets = rx_packets; + stats64->rx_dropped = rx_dropped; + + for (i = 0; i < fbn->num_rx_queues; i++) { + struct fbnic_ring *rxr = fbn->rx[i]; + + if (!rxr) + continue; + + stats = &rxr->stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + rx_bytes = stats->bytes; + rx_packets = stats->packets; + rx_dropped = stats->dropped; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + stats64->rx_bytes += rx_bytes; + stats64->rx_packets += rx_packets; + stats64->rx_dropped += rx_dropped; + } +} + static const struct net_device_ops fbnic_netdev_ops = { .ndo_open = fbnic_open, .ndo_stop = fbnic_stop, @@ -324,6 +393,72 @@ static const struct net_device_ops fbnic_netdev_ops = { .ndo_features_check = fbnic_features_check, .ndo_set_mac_address = fbnic_set_mac, .ndo_set_rx_mode = fbnic_set_rx_mode, + .ndo_get_stats64 = fbnic_get_stats64, +}; + +static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, + struct netdev_queue_stats_rx *rx) +{ + struct fbnic_net *fbn = netdev_priv(dev); + struct fbnic_ring *rxr = fbn->rx[idx]; + struct fbnic_queue_stats *stats; + unsigned int start; + u64 bytes, packets; + + if (!rxr) + return; + + stats = &rxr->stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + bytes = stats->bytes; + packets = stats->packets; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + rx->bytes = bytes; + rx->packets = packets; +} + +static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, + struct netdev_queue_stats_tx *tx) +{ + struct fbnic_net *fbn = netdev_priv(dev); + struct fbnic_ring *txr = fbn->tx[idx]; + struct fbnic_queue_stats *stats; + unsigned int start; + u64 bytes, packets; + + if (!txr) + return; + + stats = &txr->stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + bytes = stats->bytes; + packets = stats->packets; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + tx->bytes = bytes; + tx->packets = packets; +} + +static void fbnic_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct fbnic_net *fbn = netdev_priv(dev); + + tx->bytes = fbn->tx_stats.bytes; + tx->packets = fbn->tx_stats.packets; + + rx->bytes = fbn->rx_stats.bytes; + rx->packets = fbn->rx_stats.packets; +} + +static const struct netdev_stat_ops fbnic_stat_ops = { + .get_queue_stats_rx = fbnic_get_queue_stats_rx, + .get_queue_stats_tx = fbnic_get_queue_stats_tx, + .get_base_stats = fbnic_get_base_stats, }; void fbnic_reset_queues(struct fbnic_net *fbn, @@ -384,6 +519,9 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) fbd->netdev = netdev; netdev->netdev_ops = &fbnic_netdev_ops; + netdev->stat_ops = &fbnic_stat_ops; + + fbnic_set_ethtool_ops(netdev); fbn = netdev_priv(netdev); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index 6bc0ebeb81826b..6c27da09a6122b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -40,6 +40,9 @@ struct fbnic_net { u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; u32 rss_flow_hash[FBNIC_NUM_HASH_OPT]; + /* Storage for stats after ring destruction */ + struct fbnic_queue_stats tx_stats; + struct fbnic_queue_stats rx_stats; u64 link_down_events; struct list_head napis; @@ -55,6 +58,7 @@ int fbnic_netdev_register(struct net_device *netdev); void fbnic_netdev_unregister(struct net_device *netdev); void fbnic_reset_queues(struct fbnic_net *fbn, unsigned int tx, unsigned int rx); +void fbnic_set_ethtool_ops(struct net_device *dev); void __fbnic_set_rx_mode(struct net_device *netdev); void fbnic_clear_rx_mode(struct net_device *netdev); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index 0ed4c9fff5d807..6a6d7e22f1a722 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -273,6 +273,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring) err_free: dev_kfree_skb_any(skb); err_count: + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.dropped++; + u64_stats_update_end(&ring->stats.syncp); return NETDEV_TX_OK; } @@ -363,10 +366,19 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, txq = txring_txq(nv->napi.dev, ring); if (unlikely(discard)) { + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.dropped += total_packets; + u64_stats_update_end(&ring->stats.syncp); + netdev_tx_completed_queue(txq, total_packets, total_bytes); return; } + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.bytes += total_bytes; + ring->stats.packets += total_packets; + u64_stats_update_end(&ring->stats.syncp); + netif_txq_completed_wake(txq, total_packets, total_bytes, fbnic_desc_unused(ring), FBNIC_TX_DESC_WAKEUP); @@ -730,12 +742,12 @@ static bool fbnic_rcd_metadata_err(u64 rcd) static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, int budget) { + unsigned int packets = 0, bytes = 0, dropped = 0; struct fbnic_ring *rcq = &qt->cmpl; struct fbnic_pkt_buff *pkt; s32 head0 = -1, head1 = -1; __le64 *raw_rcd, done; u32 head = rcq->head; - u64 packets = 0; done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; raw_rcd = &rcq->desc[head & rcq->size_mask]; @@ -780,9 +792,11 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, fbnic_populate_skb_fields(nv, rcd, skb, qt); packets++; + bytes += skb->len; napi_gro_receive(&nv->napi, skb); } else { + dropped++; fbnic_put_pkt_buff(nv, pkt, 1); } @@ -799,6 +813,14 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, } } + u64_stats_update_begin(&rcq->stats.syncp); + rcq->stats.packets += packets; + rcq->stats.bytes += bytes; + /* Re-add ethernet header length (removed in fbnic_build_skb) */ + rcq->stats.bytes += ETH_HLEN * packets; + rcq->stats.dropped += dropped; + u64_stats_update_end(&rcq->stats.syncp); + /* Unmap and free processed buffers */ if (head0 >= 0) fbnic_clean_bdq(nv, budget, &qt->sub0, head0); @@ -865,12 +887,36 @@ static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } +static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + struct fbnic_queue_stats *stats = &rxr->stats; + + /* Capture stats from queues before dissasociating them */ + fbn->rx_stats.bytes += stats->bytes; + fbn->rx_stats.packets += stats->packets; + fbn->rx_stats.dropped += stats->dropped; +} + +static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + struct fbnic_queue_stats *stats = &txr->stats; + + /* Capture stats from queues before dissasociating them */ + fbn->tx_stats.bytes += stats->bytes; + fbn->tx_stats.packets += stats->packets; + fbn->tx_stats.dropped += stats->dropped; +} + static void fbnic_remove_tx_ring(struct fbnic_net *fbn, struct fbnic_ring *txr) { if (!(txr->flags & FBNIC_RING_F_STATS)) return; + fbnic_aggregate_ring_tx_counters(fbn, txr); + /* Remove pointer to the Tx ring */ WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); fbn->tx[txr->q_idx] = NULL; @@ -882,6 +928,8 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn, if (!(rxr->flags & FBNIC_RING_F_STATS)) return; + fbnic_aggregate_ring_rx_counters(fbn, rxr); + /* Remove pointer to the Rx ring */ WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); fbn->rx[rxr->q_idx] = NULL; @@ -974,6 +1022,7 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn, static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell, int q_idx, u8 flags) { + u64_stats_init(&ring->stats.syncp); ring->doorbell = doorbell; ring->q_idx = q_idx; ring->flags = flags; @@ -1012,14 +1061,14 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, nv->fbd = fbd; nv->v_idx = v_idx; - /* Record IRQ to NAPI struct */ - netif_napi_set_irq(&nv->napi, - pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); - /* Tie napi to netdev */ list_add(&nv->napis, &fbn->napis); netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); + /* Record IRQ to NAPI struct */ + netif_napi_set_irq(&nv->napi, + pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); + /* Tie nv back to PCIe dev */ nv->dev = fbd->dev; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 4a206c0e71924e..2f91f68d11d57c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -7,6 +7,7 @@ #include #include #include +#include #include struct fbnic_net; @@ -51,6 +52,13 @@ struct fbnic_pkt_buff { u16 nr_frags; }; +struct fbnic_queue_stats { + u64 packets; + u64 bytes; + u64 dropped; + struct u64_stats_sync syncp; +}; + /* Pagecnt bias is long max to reserve the last bit to catch overflow * cases where if we overcharge the bias it will flip over to be negative. */ @@ -77,6 +85,8 @@ struct fbnic_ring { u32 head, tail; /* Head/Tail of ring */ + struct fbnic_queue_stats stats; + /* Slow path fields follow */ dma_addr_t dma; /* Phys addr of descriptor memory */ size_t size; /* Size of descriptor ring in memory */ diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 43ba71e82260c1..ee046468652c75 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,18 +46,21 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL - select PHYLIB select FIXED_PHY select CRC16 select CRC32 + select PHYLINK help - Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip + Support for the Microchip LAN743x and PCI11x1x families of PCI + Express Ethernet devices To compile this driver as a module, choose M here. The module will be called lan743x. +source "drivers/net/ethernet/microchip/lan865x/Kconfig" source "drivers/net/ethernet/microchip/lan966x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" source "drivers/net/ethernet/microchip/vcap/Kconfig" +source "drivers/net/ethernet/microchip/fdma/Kconfig" endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index bbd349264e6f61..3c65baed9fd876 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_LAN743X) += lan743x.o lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o +obj-$(CONFIG_LAN865X) += lan865x/ obj-$(CONFIG_LAN966X_SWITCH) += lan966x/ obj-$(CONFIG_SPARX5_SWITCH) += sparx5/ obj-$(CONFIG_VCAP) += vcap/ +obj-$(CONFIG_FDMA) += fdma/ diff --git a/drivers/net/ethernet/microchip/fdma/Kconfig b/drivers/net/ethernet/microchip/fdma/Kconfig new file mode 100644 index 00000000000000..ec228c0613517e --- /dev/null +++ b/drivers/net/ethernet/microchip/fdma/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microchip FDMA API configuration +# + +if NET_VENDOR_MICROCHIP + +config FDMA + bool "FDMA API" if COMPILE_TEST + help + Provides the basic FDMA functionality for multiple Microchip + switchcores. + + Say Y here if you want to build the FDMA API that provides a common + set of functions and data structures for interacting with the Frame + DMA engine in multiple microchip switchcores. + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/fdma/Makefile b/drivers/net/ethernet/microchip/fdma/Makefile new file mode 100644 index 00000000000000..cc9a736be35732 --- /dev/null +++ b/drivers/net/ethernet/microchip/fdma/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for Microchip FDMA +# + +obj-$(CONFIG_FDMA) += fdma.o +fdma-y += fdma_api.o diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.c b/drivers/net/ethernet/microchip/fdma/fdma_api.c new file mode 100644 index 00000000000000..e78c3590da9e61 --- /dev/null +++ b/drivers/net/ethernet/microchip/fdma/fdma_api.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "fdma_api.h" + +#include +#include +#include + +/* Add a DB to a DCB, providing a callback for getting the DB dataptr. */ +static int __fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status, + int (*cb)(struct fdma *fdma, int dcb_idx, + int db_idx, u64 *dataptr)) +{ + struct fdma_db *db = fdma_db_get(fdma, dcb_idx, db_idx); + + db->status = status; + + return cb(fdma, dcb_idx, db_idx, &db->dataptr); +} + +/* Add a DB to a DCB, using the callback set in the fdma_ops struct. */ +int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status) +{ + return __fdma_db_add(fdma, + dcb_idx, + db_idx, + status, + fdma->ops.dataptr_cb); +} + +/* Add a DCB with callbacks for getting the DB dataptr and the DCB nextptr. */ +int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status, + int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr), + int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx, + u64 *dataptr)) +{ + struct fdma_dcb *dcb = fdma_dcb_get(fdma, dcb_idx); + int i, err; + + for (i = 0; i < fdma->n_dbs; i++) { + err = __fdma_db_add(fdma, dcb_idx, i, status, db_cb); + if (unlikely(err)) + return err; + } + + err = dcb_cb(fdma, dcb_idx, &fdma->last_dcb->nextptr); + if (unlikely(err)) + return err; + + fdma->last_dcb = dcb; + + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = info; + + return 0; +} +EXPORT_SYMBOL_GPL(__fdma_dcb_add); + +/* Add a DCB, using the preset callbacks in the fdma_ops struct. */ +int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status) +{ + return __fdma_dcb_add(fdma, + dcb_idx, + info, status, + fdma->ops.nextptr_cb, + fdma->ops.dataptr_cb); +} +EXPORT_SYMBOL_GPL(fdma_dcb_add); + +/* Initialize the DCB's and DB's. */ +int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status) +{ + int i, err; + + fdma->last_dcb = fdma->dcbs; + fdma->db_index = 0; + fdma->dcb_index = 0; + + for (i = 0; i < fdma->n_dcbs; i++) { + err = fdma_dcb_add(fdma, i, info, status); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(fdma_dcbs_init); + +/* Allocate coherent DMA memory for FDMA. */ +int fdma_alloc_coherent(struct device *dev, struct fdma *fdma) +{ + fdma->dcbs = dma_alloc_coherent(dev, + fdma->size, + &fdma->dma, + GFP_KERNEL); + if (!fdma->dcbs) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(fdma_alloc_coherent); + +/* Allocate physical memory for FDMA. */ +int fdma_alloc_phys(struct fdma *fdma) +{ + fdma->dcbs = kzalloc(fdma->size, GFP_KERNEL); + if (!fdma->dcbs) + return -ENOMEM; + + fdma->dma = virt_to_phys(fdma->dcbs); + + return 0; +} +EXPORT_SYMBOL_GPL(fdma_alloc_phys); + +/* Free coherent DMA memory. */ +void fdma_free_coherent(struct device *dev, struct fdma *fdma) +{ + dma_free_coherent(dev, fdma->size, fdma->dcbs, fdma->dma); +} +EXPORT_SYMBOL_GPL(fdma_free_coherent); + +/* Free virtual memory. */ +void fdma_free_phys(struct fdma *fdma) +{ + kfree(fdma->dcbs); +} +EXPORT_SYMBOL_GPL(fdma_free_phys); + +/* Get the size of the FDMA memory */ +u32 fdma_get_size(struct fdma *fdma) +{ + return ALIGN(sizeof(struct fdma_dcb) * fdma->n_dcbs, PAGE_SIZE); +} +EXPORT_SYMBOL_GPL(fdma_get_size); + +/* Get the size of the FDMA memory. This function is only applicable if the + * dataptr addresses and DCB's are in contiguous memory. + */ +u32 fdma_get_size_contiguous(struct fdma *fdma) +{ + return ALIGN(fdma->n_dcbs * sizeof(struct fdma_dcb) + + fdma->n_dcbs * fdma->n_dbs * fdma->db_size, + PAGE_SIZE); +} +EXPORT_SYMBOL_GPL(fdma_get_size_contiguous); diff --git a/drivers/net/ethernet/microchip/fdma/fdma_api.h b/drivers/net/ethernet/microchip/fdma/fdma_api.h new file mode 100644 index 00000000000000..d91affe8bd988e --- /dev/null +++ b/drivers/net/ethernet/microchip/fdma/fdma_api.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _FDMA_API_H_ +#define _FDMA_API_H_ + +#include +#include +#include + +/* This provides a common set of functions and data structures for interacting + * with the Frame DMA engine on multiple Microchip switchcores. + * + * Frame DMA DCB format: + * + * +---------------------------+ + * | Next Ptr | + * +---------------------------+ + * | Reserved | Info | + * +---------------------------+ + * | Data0 Ptr | + * +---------------------------+ + * | Reserved | Status0 | + * +---------------------------+ + * | Data1 Ptr | + * +---------------------------+ + * | Reserved | Status1 | + * +---------------------------+ + * | Data2 Ptr | + * +---------------------------+ + * | Reserved | Status2 | + * |-------------|-------------| + * | | + * | | + * | | + * | | + * | | + * |---------------------------| + * | Data14 Ptr | + * +-------------|-------------+ + * | Reserved | Status14 | + * +-------------|-------------+ + * + * The data pointers points to the actual frame data to be received or sent. The + * addresses of the data pointers can, as of writing, be either a: DMA address, + * physical address or mapped address. + * + */ + +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_INFO_TOKEN BIT(17) +#define FDMA_DCB_INFO_INTR BIT(18) +#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_DB_MAX 15 /* Max number of DB's on Sparx5 */ + +struct fdma; + +struct fdma_db { + u64 dataptr; + u64 status; +}; + +struct fdma_dcb { + u64 nextptr; + u64 info; + struct fdma_db db[FDMA_DB_MAX]; +}; + +struct fdma_ops { + /* User-provided callback to set the dataptr */ + int (*dataptr_cb)(struct fdma *fdma, int dcb_idx, int db_idx, u64 *ptr); + /* User-provided callback to set the nextptr */ + int (*nextptr_cb)(struct fdma *fdma, int dcb_idx, u64 *ptr); +}; + +struct fdma { + void *priv; + + /* Virtual addresses */ + struct fdma_dcb *dcbs; + struct fdma_dcb *last_dcb; + + /* DMA address */ + dma_addr_t dma; + + /* Size of DCB + DB memory */ + int size; + + /* Indexes used to access the next-to-be-used DCB or DB */ + int db_index; + int dcb_index; + + /* Number of DCB's and DB's */ + u32 n_dcbs; + u32 n_dbs; + + /* Size of DB's */ + u32 db_size; + + /* Channel id this FDMA object operates on */ + u32 channel_id; + + struct fdma_ops ops; +}; + +/* Advance the DCB index and wrap if required. */ +static inline void fdma_dcb_advance(struct fdma *fdma) +{ + fdma->dcb_index++; + if (fdma->dcb_index >= fdma->n_dcbs) + fdma->dcb_index = 0; +} + +/* Advance the DB index. */ +static inline void fdma_db_advance(struct fdma *fdma) +{ + fdma->db_index++; +} + +/* Reset the db index to zero. */ +static inline void fdma_db_reset(struct fdma *fdma) +{ + fdma->db_index = 0; +} + +/* Check if a DCB can be reused in case of multiple DB's per DCB. */ +static inline bool fdma_dcb_is_reusable(struct fdma *fdma) +{ + return fdma->db_index != fdma->n_dbs; +} + +/* Check if the FDMA has marked this DB as done. */ +static inline bool fdma_db_is_done(struct fdma_db *db) +{ + return db->status & FDMA_DCB_STATUS_DONE; +} + +/* Get the length of a DB. */ +static inline int fdma_db_len_get(struct fdma_db *db) +{ + return FDMA_DCB_STATUS_BLOCKL(db->status); +} + +/* Set the length of a DB. */ +static inline void fdma_dcb_len_set(struct fdma_dcb *dcb, u32 len) +{ + dcb->info = FDMA_DCB_INFO_DATAL(len); +} + +/* Get a DB by index. */ +static inline struct fdma_db *fdma_db_get(struct fdma *fdma, int dcb_idx, + int db_idx) +{ + return &fdma->dcbs[dcb_idx].db[db_idx]; +} + +/* Get the next DB. */ +static inline struct fdma_db *fdma_db_next_get(struct fdma *fdma) +{ + return fdma_db_get(fdma, fdma->dcb_index, fdma->db_index); +} + +/* Get a DCB by index. */ +static inline struct fdma_dcb *fdma_dcb_get(struct fdma *fdma, int dcb_idx) +{ + return &fdma->dcbs[dcb_idx]; +} + +/* Get the next DCB. */ +static inline struct fdma_dcb *fdma_dcb_next_get(struct fdma *fdma) +{ + return fdma_dcb_get(fdma, fdma->dcb_index); +} + +/* Check if the FDMA has frames ready for extraction. */ +static inline bool fdma_has_frames(struct fdma *fdma) +{ + return fdma_db_is_done(fdma_db_next_get(fdma)); +} + +/* Get a nextptr by index */ +static inline int fdma_nextptr_cb(struct fdma *fdma, int dcb_idx, u64 *nextptr) +{ + *nextptr = fdma->dma + (sizeof(struct fdma_dcb) * dcb_idx); + return 0; +} + +/* Get the DMA address of a dataptr, by index. This function is only applicable + * if the dataptr addresses and DCB's are in contiguous memory and the driver + * supports XDP. + */ +static inline u64 fdma_dataptr_get_contiguous(struct fdma *fdma, int dcb_idx, + int db_idx) +{ + return fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + + (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size + + XDP_PACKET_HEADROOM; +} + +/* Get the virtual address of a dataptr, by index. This function is only + * applicable if the dataptr addresses and DCB's are in contiguous memory and + * the driver supports XDP. + */ +static inline void *fdma_dataptr_virt_get_contiguous(struct fdma *fdma, + int dcb_idx, int db_idx) +{ + return (u8 *)fdma->dcbs + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + + (dcb_idx * fdma->n_dbs + db_idx) * fdma->db_size + + XDP_PACKET_HEADROOM; +} + +/* Check if this DCB is the last used DCB. */ +static inline bool fdma_is_last(struct fdma *fdma, struct fdma_dcb *dcb) +{ + return dcb == fdma->last_dcb; +} + +int fdma_dcbs_init(struct fdma *fdma, u64 info, u64 status); +int fdma_db_add(struct fdma *fdma, int dcb_idx, int db_idx, u64 status); +int fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status); +int __fdma_dcb_add(struct fdma *fdma, int dcb_idx, u64 info, u64 status, + int (*dcb_cb)(struct fdma *fdma, int dcb_idx, u64 *nextptr), + int (*db_cb)(struct fdma *fdma, int dcb_idx, int db_idx, + u64 *dataptr)); + +int fdma_alloc_coherent(struct device *dev, struct fdma *fdma); +int fdma_alloc_phys(struct fdma *fdma); + +void fdma_free_coherent(struct device *dev, struct fdma *fdma); +void fdma_free_phys(struct fdma *fdma); + +u32 fdma_get_size(struct fdma *fdma); +u32 fdma_get_size_contiguous(struct fdma *fdma); + +#endif diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 3a63ec09141385..1a1cbd034eda0a 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1034,16 +1034,12 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev, struct lan743x_adapter *adapter = netdev_priv(netdev); ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (adapter->ptp.ptp_clock) ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock); - else - ts_info->phc_index = -1; ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | @@ -1058,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, struct ethtool_keee *eee) { struct lan743x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - u32 buf; - int ret; - - if (!phydev) - return -EIO; - if (!phydev->drv) { - netif_err(adapter, drv, adapter->netdev, - "Missing PHY Driver\n"); - return -EIO; - } - ret = phy_ethtool_get_eee(phydev, eee); - if (ret < 0) - return ret; + eee->tx_lpi_timer = lan743x_csr_read(adapter, + MAC_EEE_TX_LPI_REQ_DLY_CNT); - buf = lan743x_csr_read(adapter, MAC_CR); - if (buf & MAC_CR_EEE_EN_) { - /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ - buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); - eee->tx_lpi_timer = buf; - } else { - eee->tx_lpi_timer = 0; - } - - return 0; + return phylink_ethtool_get_eee(adapter->phylink, eee); } static int lan743x_ethtool_set_eee(struct net_device *netdev, struct ethtool_keee *eee) { - struct lan743x_adapter *adapter; - struct phy_device *phydev; - u32 buf = 0; + struct lan743x_adapter *adapter = netdev_priv(netdev); + u32 tx_lpi_timer; - if (!netdev) - return -EINVAL; - adapter = netdev_priv(netdev); - if (!adapter) - return -EINVAL; - phydev = netdev->phydev; - if (!phydev) - return -EIO; - if (!phydev->drv) { - netif_err(adapter, drv, adapter->netdev, - "Missing PHY Driver\n"); - return -EIO; - } + tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); + if (tx_lpi_timer != eee->tx_lpi_timer) { + u32 mac_cr = lan743x_csr_read(adapter, MAC_CR); + + /* Software should only change this field when Energy Efficient + * Ethernet Enable (EEEEN) is cleared. + * This function will trigger an autonegotiation restart and + * eee will be reenabled during link up if eee was negotiated. + */ + lan743x_mac_eee_enable(adapter, false); + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, + eee->tx_lpi_timer); - if (eee->eee_enabled) { - buf = (u32)eee->tx_lpi_timer; - lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); + if (mac_cr & MAC_CR_EEE_EN_) + lan743x_mac_eee_enable(adapter, true); } - return phy_ethtool_set_eee(phydev, eee); + return phylink_ethtool_set_eee(adapter->phylink, eee); +} + +static int +lan743x_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(adapter->phylink, cmd); +} + +static int +lan743x_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(adapter->phylink, cmd); } #ifdef CONFIG_PM @@ -1124,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, wol->supported = 0; wol->wolopts = 0; - if (netdev->phydev) - phy_ethtool_get_wol(netdev->phydev, wol); + phylink_ethtool_get_wol(adapter->phylink, wol); if (wol->supported != adapter->phy_wol_supported) netif_warn(adapter, drv, adapter->netdev, @@ -1166,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, !(adapter->phy_wol_supported & WAKE_MAGICSECURE)) phy_wol.wolopts &= ~WAKE_MAGIC; - ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol); + ret = phylink_ethtool_set_wol(adapter->phylink, wol); if (ret && (ret != -EOPNOTSUPP)) return ret; @@ -1355,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct lan743x_adapter *adapter = netdev_priv(dev); - struct lan743x_phy *phy = &adapter->phy; - if (phy->fc_request_control & FLOW_CTRL_TX) - pause->tx_pause = 1; - if (phy->fc_request_control & FLOW_CTRL_RX) - pause->rx_pause = 1; - pause->autoneg = phy->fc_autoneg; + phylink_ethtool_get_pauseparam(adapter->phylink, pause); } static int lan743x_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct lan743x_adapter *adapter = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - struct lan743x_phy *phy = &adapter->phy; - - if (!phydev) - return -ENODEV; - - if (!phy_validate_pause(phydev, pause)) - return -EINVAL; - - phy->fc_request_control = 0; - if (pause->rx_pause) - phy->fc_request_control |= FLOW_CTRL_RX; - if (pause->tx_pause) - phy->fc_request_control |= FLOW_CTRL_TX; - - phy->fc_autoneg = pause->autoneg; - - if (pause->autoneg == AUTONEG_DISABLE) - lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause, - pause->rx_pause); - else - phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); - - return 0; + return phylink_ethtool_set_pauseparam(adapter->phylink, pause); } const struct ethtool_ops lan743x_ethtool_ops = { @@ -1417,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = { .get_ts_info = lan743x_ethtool_get_ts_info, .get_eee = lan743x_ethtool_get_eee, .set_eee = lan743x_ethtool_set_eee, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = lan743x_ethtool_get_link_ksettings, + .set_link_ksettings = lan743x_ethtool_set_link_ksettings, .get_regs_len = lan743x_get_regs_len, .get_regs = lan743x_get_regs, .get_pauseparam = lan743x_get_pauseparam, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e418539565b18d..4dc5adcda6a381 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "lan743x_main.h" #include "lan743x_ethtool.h" @@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter, return ret; } +static int lan743x_get_lsd(int speed, int duplex, u8 mss) +{ + int lsd; + + switch (speed) { + case SPEED_2500: + if (mss == MASTER_SLAVE_STATE_SLAVE) + lsd = LINK_2500_SLAVE; + else + lsd = LINK_2500_MASTER; + break; + case SPEED_1000: + if (mss == MASTER_SLAVE_STATE_SLAVE) + lsd = LINK_1000_SLAVE; + else + lsd = LINK_1000_MASTER; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + lsd = LINK_100FD; + else + lsd = LINK_100HD; + break; + case SPEED_10: + if (duplex == DUPLEX_FULL) + lsd = LINK_10FD; + else + lsd = LINK_10HD; + break; + default: + lsd = -EINVAL; + } + + return lsd; +} + static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter, u16 baud) { @@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter, VR_MII_BAUD_RATE_1P25GBPS); } -static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter, - bool *status) -{ - int ret; - - ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, - VR_MII_GEN2_4_MPLL_CTRL1); - if (ret < 0) - return ret; - - if (ret == VR_MII_MPLL_MULTIPLIER_125 || - ret == VR_MII_MPLL_MULTIPLIER_50) - *status = true; - else - *status = false; - - return 0; -} - -static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter) +static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter) { enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd; int mii_ctrl; @@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state) return 0; } -static int lan743x_sgmii_config(struct lan743x_adapter *adapter) +static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct phy_device *phydev = netdev->phydev; - enum lan743x_sgmii_lsd lsd = POWER_DOWN; int mii_ctl; - bool status; int ret; - switch (phydev->speed) { - case SPEED_2500: - if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) - lsd = LINK_2500_MASTER; - else - lsd = LINK_2500_SLAVE; - break; - case SPEED_1000: - if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) - lsd = LINK_1000_MASTER; - else - lsd = LINK_1000_SLAVE; - break; - case SPEED_100: - if (phydev->duplex) - lsd = LINK_100FD; - else - lsd = LINK_100HD; - break; - case SPEED_10: - if (phydev->duplex) - lsd = LINK_10FD; - else - lsd = LINK_10HD; - break; - default: - netif_err(adapter, drv, adapter->netdev, - "Invalid speed %d\n", phydev->speed); - return -EINVAL; - } - - adapter->sgmii_lsd = lsd; - ret = lan743x_sgmii_aneg_update(adapter); - if (ret < 0) { - netif_err(adapter, drv, adapter->netdev, - "error %d SGMII cfg failed\n", ret); - return ret; - } - - ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); - if (ret < 0) { - netif_err(adapter, drv, adapter->netdev, - "error %d SGMII get mode failed\n", ret); - return ret; - } - - if (status) - netif_dbg(adapter, drv, adapter->netdev, - "SGMII 2.5G mode enable\n"); - else - netif_dbg(adapter, drv, adapter->netdev, - "SGMII 1G mode enable\n"); - /* SGMII/1000/2500BASE-X PCS power down */ mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); if (mii_ctl < 0) @@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter) if (ret < 0) return ret; - ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); - if (ret < 0) - return ret; - - return 0; + return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); } static void lan743x_mac_set_address(struct lan743x_adapter *adapter, @@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) 50000, 1000000); } -static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u16 local_adv, u16 remote_adv) -{ - struct lan743x_phy *phy = &adapter->phy; - u8 cap; - - if (phy->fc_autoneg) - cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); - else - cap = phy->fc_request_control; - - lan743x_mac_flow_ctrl_set_enables(adapter, - cap & FLOW_CTRL_TX, - cap & FLOW_CTRL_RX); -} - static int lan743x_phy_init(struct lan743x_adapter *adapter) { return lan743x_phy_reset(adapter); } -static void lan743x_phy_link_status_change(struct net_device *netdev) -{ - struct lan743x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - u32 data; - - phy_print_status(phydev); - if (phydev->state == PHY_RUNNING) { - int remote_advertisement = 0; - int local_advertisement = 0; - - data = lan743x_csr_read(adapter, MAC_CR); - - /* set duplex mode */ - if (phydev->duplex) - data |= MAC_CR_DPX_; - else - data &= ~MAC_CR_DPX_; - - /* set bus speed */ - switch (phydev->speed) { - case SPEED_10: - data &= ~MAC_CR_CFG_H_; - data &= ~MAC_CR_CFG_L_; - break; - case SPEED_100: - data &= ~MAC_CR_CFG_H_; - data |= MAC_CR_CFG_L_; - break; - case SPEED_1000: - data |= MAC_CR_CFG_H_; - data &= ~MAC_CR_CFG_L_; - break; - case SPEED_2500: - data |= MAC_CR_CFG_H_; - data |= MAC_CR_CFG_L_; - break; - } - lan743x_csr_write(adapter, MAC_CR, data); - - local_advertisement = - linkmode_adv_to_mii_adv_t(phydev->advertising); - remote_advertisement = - linkmode_adv_to_mii_adv_t(phydev->lp_advertising); - - lan743x_phy_update_flowcontrol(adapter, local_advertisement, - remote_advertisement); - lan743x_ptp_update_latency(adapter, phydev->speed); - if (phydev->interface == PHY_INTERFACE_MODE_SGMII || - phydev->interface == PHY_INTERFACE_MODE_1000BASEX || - phydev->interface == PHY_INTERFACE_MODE_2500BASEX) - lan743x_sgmii_config(adapter); - - data = lan743x_csr_read(adapter, MAC_CR); - if (phydev->enable_tx_lpi) - data |= MAC_CR_EEE_EN_; - else - data &= ~MAC_CR_EEE_EN_; - lan743x_csr_write(adapter, MAC_CR, data); - } -} - -static void lan743x_phy_close(struct lan743x_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct phy_device *phydev = netdev->phydev; - - phy_stop(netdev->phydev); - phy_disconnect(netdev->phydev); - - /* using phydev here as phy_disconnect NULLs netdev->phydev */ - if (phy_is_pseudo_fixed_link(phydev)) - fixed_phy_unregister(phydev); - -} - static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) { u32 id_rev; @@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) adapter->phy_interface = PHY_INTERFACE_MODE_MII; else adapter->phy_interface = PHY_INTERFACE_MODE_RGMII; -} - -static int lan743x_phy_open(struct lan743x_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct lan743x_phy *phy = &adapter->phy; - struct fixed_phy_status fphy_status = { - .link = 1, - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - }; - struct phy_device *phydev; - int ret = -EIO; - - /* try devicetree phy, or fixed link */ - phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, - lan743x_phy_link_status_change); - - if (!phydev) { - /* try internal phy */ - phydev = phy_find_first(adapter->mdiobus); - if (!phydev) { - if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == - ID_REV_ID_LAN7431_) { - phydev = fixed_phy_register(PHY_POLL, - &fphy_status, NULL); - if (IS_ERR(phydev)) { - netdev_err(netdev, "No PHY/fixed_PHY found\n"); - return PTR_ERR(phydev); - } - } else { - goto return_error; - } - } - - lan743x_phy_interface_select(adapter); - - ret = phy_connect_direct(netdev, phydev, - lan743x_phy_link_status_change, - adapter->phy_interface); - if (ret) - goto return_error; - } - - /* MAC doesn't support 1000T Half */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* support both flow controls */ - phy_support_asym_pause(phydev); - phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phy->fc_autoneg = phydev->autoneg; - - phy_start(phydev); - phy_start_aneg(phydev); - phy_attached_info(phydev); - return 0; -return_error: - return ret; + netif_dbg(adapter, drv, adapter->netdev, + "selected phy interface: 0x%X\n", adapter->phy_interface); } static void lan743x_rfe_open(struct lan743x_adapter *adapter) @@ -3061,6 +2870,336 @@ static int lan743x_rx_open(struct lan743x_rx *rx) return ret; } +static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from the External PHY via SGMII */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d sgmii aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d 1000basex aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter) +{ + u32 sgmii_ctl; + int ret; + + ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL, + MASTER_SLAVE_STATE_MASTER); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d link-speed-duplex(LSD) invalid\n", ret); + return ret; + } + + adapter->sgmii_lsd = ret; + netif_dbg(adapter, drv, adapter->netdev, + "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd); + + /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */ + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_; + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + + ret = lan743x_serdes_clock_and_aneg_update(adapter); + if (ret < 0) { + netif_err(adapter, drv, adapter->netdev, + "error %d 2500basex aneg update failed\n", ret); + return ret; + } + + return lan743x_pcs_power_reset(adapter); +} + +void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable) +{ + u32 mac_cr; + + mac_cr = lan743x_csr_read(adapter, MAC_CR); + if (enable) + mac_cr |= MAC_CR_EEE_EN_; + else + mac_cr &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, mac_cr); +} + +static void lan743x_phylink_mac_config(struct phylink_config *config, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + switch (state->interface) { + case PHY_INTERFACE_MODE_2500BASEX: + ret = lan743x_phylink_2500basex_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "2500BASEX config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "2500BASEX mode selected and configured\n"); + break; + case PHY_INTERFACE_MODE_1000BASEX: + ret = lan743x_phylink_1000basex_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "1000BASEX config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "1000BASEX mode selected and configured\n"); + break; + case PHY_INTERFACE_MODE_SGMII: + ret = lan743x_phylink_sgmii_config(adapter); + if (ret < 0) + netif_err(adapter, drv, adapter->netdev, + "SGMII config failed. Error %d\n", ret); + else + netif_dbg(adapter, drv, adapter->netdev, + "SGMII mode selected and configured\n"); + break; + default: + netif_dbg(adapter, drv, adapter->netdev, + "RGMII/GMII/MII(0x%X) mode enable\n", + state->interface); + break; + } +} + +static void lan743x_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + netif_tx_stop_all_queues(to_net_dev(config->dev)); + lan743x_mac_eee_enable(adapter, false); +} + +static void lan743x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int mac_cr; + u8 cap; + + mac_cr = lan743x_csr_read(adapter, MAC_CR); + /* Pre-initialize register bits. + * Resulting value corresponds to SPEED_10 + */ + mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_); + if (speed == SPEED_2500) + mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_; + else if (speed == SPEED_1000) + mac_cr |= MAC_CR_CFG_H_; + else if (speed == SPEED_100) + mac_cr |= MAC_CR_CFG_L_; + + lan743x_csr_write(adapter, MAC_CR, mac_cr); + + lan743x_ptp_update_latency(adapter, speed); + + /* Flow Control operation */ + cap = 0; + if (tx_pause) + cap |= FLOW_CTRL_TX; + if (rx_pause) + cap |= FLOW_CTRL_RX; + + lan743x_mac_flow_ctrl_set_enables(adapter, + cap & FLOW_CTRL_TX, + cap & FLOW_CTRL_RX); + + if (phydev) + lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi); + + netif_tx_wake_all_queues(netdev); +} + +static const struct phylink_mac_ops lan743x_phylink_mac_ops = { + .mac_config = lan743x_phylink_mac_config, + .mac_link_down = lan743x_phylink_mac_link_down, + .mac_link_up = lan743x_phylink_mac_link_up, +}; + +static int lan743x_phylink_create(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct phylink *pl; + + adapter->phylink_config.dev = &netdev->dev; + adapter->phylink_config.type = PHYLINK_NETDEV; + adapter->phylink_config.mac_managed_pm = false; + + adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + + lan743x_phy_interface_select(adapter); + + switch (adapter->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + __set_bit(PHY_INTERFACE_MODE_SGMII, + adapter->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + adapter->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + adapter->phylink_config.supported_interfaces); + adapter->phylink_config.mac_capabilities |= MAC_2500FD; + break; + case PHY_INTERFACE_MODE_GMII: + __set_bit(PHY_INTERFACE_MODE_GMII, + adapter->phylink_config.supported_interfaces); + break; + case PHY_INTERFACE_MODE_MII: + __set_bit(PHY_INTERFACE_MODE_MII, + adapter->phylink_config.supported_interfaces); + break; + default: + phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces); + } + + pl = phylink_create(&adapter->phylink_config, NULL, + adapter->phy_interface, &lan743x_phylink_mac_ops); + + if (IS_ERR(pl)) { + netdev_err(netdev, "Could not create phylink (%pe)\n", pl); + return PTR_ERR(pl); + } + + adapter->phylink = pl; + netdev_dbg(netdev, "lan743x phylink created"); + + return 0; +} + +static bool lan743x_phy_handle_exists(struct device_node *dn) +{ + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; +} + +static int lan743x_phylink_connect(struct lan743x_adapter *adapter) +{ + struct device_node *dn = adapter->pdev->dev.of_node; + struct net_device *dev = adapter->netdev; + struct phy_device *phydev; + int ret; + + if (dn) + ret = phylink_of_phy_connect(adapter->phylink, dn, 0); + + if (!dn || (ret && !lan743x_phy_handle_exists(dn))) { + phydev = phy_find_first(adapter->mdiobus); + if (phydev) { + /* attach the mac to the phy */ + ret = phylink_connect_phy(adapter->phylink, phydev); + } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) == + ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) { + struct phylink_link_state state; + unsigned long caps; + + caps = adapter->phylink_config.mac_capabilities; + if (caps & MAC_2500FD) { + state.speed = SPEED_2500; + state.duplex = DUPLEX_FULL; + } else if (caps & MAC_1000FD) { + state.speed = SPEED_1000; + state.duplex = DUPLEX_FULL; + } else { + state.speed = SPEED_UNKNOWN; + state.duplex = DUPLEX_UNKNOWN; + } + + ret = phylink_set_fixed_link(adapter->phylink, &state); + if (ret) { + netdev_err(dev, "Could not set fixed link\n"); + return ret; + } + } else { + netdev_err(dev, "no PHY found\n"); + return -ENXIO; + } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; + } + + phylink_start(adapter->phylink); + + return 0; +} + +static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter) +{ + phylink_stop(adapter->phylink); + phylink_disconnect_phy(adapter->phylink); +} + static int lan743x_netdev_close(struct net_device *netdev) { struct lan743x_adapter *adapter = netdev_priv(netdev); @@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev) lan743x_ptp_close(adapter); - lan743x_phy_close(adapter); + lan743x_phylink_disconnect(adapter); lan743x_mac_close(adapter); @@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_intr; - ret = lan743x_phy_open(adapter); + ret = lan743x_phylink_connect(adapter); if (ret) goto close_mac; ret = lan743x_ptp_open(adapter); if (ret) - goto close_phy; + goto close_mac; lan743x_rfe_open(adapter); @@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev) goto close_tx; } + if (netdev->phydev) + phy_support_eee(netdev->phydev); + #ifdef CONFIG_PM if (adapter->netdev->phydev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; @@ -3143,9 +3285,8 @@ static int lan743x_netdev_open(struct net_device *netdev) lan743x_rx_close(&adapter->rx[index]); } lan743x_ptp_close(adapter); - -close_phy: - lan743x_phy_close(adapter); + if (adapter->phylink) + lan743x_phylink_disconnect(adapter); close_mac: lan743x_mac_close(adapter); @@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, static int lan743x_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lan743x_adapter *adapter = netdev_priv(netdev); + if (!netif_running(netdev)) return -EINVAL; if (cmd == SIOCSHWTSTAMP) return lan743x_ptp_ioctl(netdev, ifr, cmd); - return phy_mii_ioctl(netdev->phydev, ifr, cmd); + + return phylink_mii_ioctl(adapter->phylink, ifr, cmd); } static void lan743x_netdev_set_multicast(struct net_device *netdev) @@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) mdiobus_unregister(adapter->mdiobus); } +static void lan743x_destroy_phylink(struct lan743x_adapter *adapter) +{ + phylink_destroy(adapter->phylink); + adapter->phylink = NULL; +} + static void lan743x_full_cleanup(struct lan743x_adapter *adapter) { unregister_netdev(adapter->netdev); + lan743x_destroy_phylink(adapter); lan743x_mdiobus_cleanup(adapter); lan743x_hardware_cleanup(adapter); lan743x_pci_cleanup(adapter); @@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, NETIF_F_HW_CSUM | NETIF_F_RXCSUM; adapter->netdev->hw_features = adapter->netdev->features; - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); + ret = lan743x_phylink_create(adapter); + if (ret < 0) { + netif_err(adapter, probe, netdev, + "failed to setup phylink (%d)\n", ret); + goto cleanup_mdiobus; + } ret = register_netdev(adapter->netdev); if (ret < 0) - goto cleanup_mdiobus; + goto cleanup_phylink; return 0; +cleanup_phylink: + lan743x_destroy_phylink(adapter); + cleanup_mdiobus: lan743x_mdiobus_cleanup(adapter); @@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev) MAC_WK_SRC_WK_FR_SAVED_; lan743x_csr_write(adapter, MAC_WK_SRC, data); + rtnl_lock(); /* open netdev when netdev is at running state while resume. * For instance, it is true when system wakesup after pm-suspend * However, it is false when system wakes up after suspend GUI menu @@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev) lan743x_netdev_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 3b2585a384e2c5..8ef897c114d3ce 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -5,6 +5,7 @@ #define _LAN743X_H #include +#include #include "lan743x_ptp.h" #define DRIVER_AUTHOR "Bryan Whitehead " @@ -1083,6 +1084,8 @@ struct lan743x_adapter { u32 flags; u32 hw_cfg; phy_interface_t phy_interface; + struct phylink *phylink; + struct phylink_config phylink_config; }; #define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) @@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter); void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, bool tx_enable, bool rx_enable); int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr); +void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable); #endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/microchip/lan865x/Kconfig b/drivers/net/ethernet/microchip/lan865x/Kconfig new file mode 100644 index 00000000000000..7f2a4e7e1915fb --- /dev/null +++ b/drivers/net/ethernet/microchip/lan865x/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microchip LAN865x Driver Support +# + +if NET_VENDOR_MICROCHIP + +config LAN865X + tristate "LAN865x support" + depends on SPI + select OA_TC6 + help + Support for the Microchip LAN8650/1 Rev.B0/B1 MACPHY Ethernet chip. It + uses OPEN Alliance 10BASE-T1x Serial Interface specification. + + To compile this driver as a module, choose M here. The module will be + called lan865x. + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/lan865x/Makefile b/drivers/net/ethernet/microchip/lan865x/Makefile new file mode 100644 index 00000000000000..9f5dd89c1eb87b --- /dev/null +++ b/drivers/net/ethernet/microchip/lan865x/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip LAN865x Driver +# + +obj-$(CONFIG_LAN865X) += lan865x.o diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c new file mode 100644 index 00000000000000..dd436bdff0f86d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Microchip's LAN865x 10BASE-T1S MAC-PHY driver + * + * Author: Parthiban Veerasooran + */ + +#include +#include +#include +#include + +#define DRV_NAME "lan8650" + +/* MAC Network Control Register */ +#define LAN865X_REG_MAC_NET_CTL 0x00010000 +#define MAC_NET_CTL_TXEN BIT(3) /* Transmit Enable */ +#define MAC_NET_CTL_RXEN BIT(2) /* Receive Enable */ + +/* MAC Network Configuration Reg */ +#define LAN865X_REG_MAC_NET_CFG 0x00010001 +#define MAC_NET_CFG_PROMISCUOUS_MODE BIT(4) +#define MAC_NET_CFG_MULTICAST_MODE BIT(6) +#define MAC_NET_CFG_UNICAST_MODE BIT(7) + +/* MAC Hash Register Bottom */ +#define LAN865X_REG_MAC_L_HASH 0x00010020 +/* MAC Hash Register Top */ +#define LAN865X_REG_MAC_H_HASH 0x00010021 +/* MAC Specific Addr 1 Bottom Reg */ +#define LAN865X_REG_MAC_L_SADDR1 0x00010022 +/* MAC Specific Addr 1 Top Reg */ +#define LAN865X_REG_MAC_H_SADDR1 0x00010023 + +struct lan865x_priv { + struct work_struct multicast_work; + struct net_device *netdev; + struct spi_device *spi; + struct oa_tc6 *tc6; +}; + +static int lan865x_set_hw_macaddr_low_bytes(struct oa_tc6 *tc6, const u8 *mac) +{ + u32 regval; + + regval = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0]; + + return oa_tc6_write_register(tc6, LAN865X_REG_MAC_L_SADDR1, regval); +} + +static int lan865x_set_hw_macaddr(struct lan865x_priv *priv, const u8 *mac) +{ + int restore_ret; + u32 regval; + int ret; + + /* Configure MAC address low bytes */ + ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, mac); + if (ret) + return ret; + + /* Prepare and configure MAC address high bytes */ + regval = (mac[5] << 8) | mac[4]; + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_SADDR1, + regval); + if (!ret) + return 0; + + /* Restore the old MAC address low bytes from netdev if the new MAC + * address high bytes setting failed. + */ + restore_ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, + priv->netdev->dev_addr); + if (restore_ret) + return restore_ret; + + return ret; +} + +static const struct ethtool_ops lan865x_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static int lan865x_set_mac_address(struct net_device *netdev, void *addr) +{ + struct lan865x_priv *priv = netdev_priv(netdev); + struct sockaddr *address = addr; + int ret; + + ret = eth_prepare_mac_addr_change(netdev, addr); + if (ret < 0) + return ret; + + if (ether_addr_equal(address->sa_data, netdev->dev_addr)) + return 0; + + ret = lan865x_set_hw_macaddr(priv, address->sa_data); + if (ret) + return ret; + + eth_commit_mac_addr_change(netdev, addr); + + return 0; +} + +static u32 get_address_bit(u8 addr[ETH_ALEN], u32 bit) +{ + return ((addr[bit / 8]) >> (bit % 8)) & 1; +} + +static u32 lan865x_hash(u8 addr[ETH_ALEN]) +{ + u32 hash_index = 0; + + for (int i = 0; i < 6; i++) { + u32 hash = 0; + + for (int j = 0; j < 8; j++) + hash ^= get_address_bit(addr, (j * 6) + i); + + hash_index |= (hash << i); + } + + return hash_index; +} + +static int lan865x_set_specific_multicast_addr(struct lan865x_priv *priv) +{ + struct netdev_hw_addr *ha; + u32 hash_lo = 0; + u32 hash_hi = 0; + int ret; + + netdev_for_each_mc_addr(ha, priv->netdev) { + u32 bit_num = lan865x_hash(ha->addr); + + if (bit_num >= BIT(5)) + hash_hi |= (1 << (bit_num - BIT(5))); + else + hash_lo |= (1 << bit_num); + } + + /* Enabling specific multicast addresses */ + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, hash_hi); + if (ret) { + netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n", + ret); + return ret; + } + + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, hash_lo); + if (ret) + netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n", + ret); + + return ret; +} + +static int lan865x_set_all_multicast_addr(struct lan865x_priv *priv) +{ + int ret; + + /* Enabling all multicast addresses */ + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, + 0xffffffff); + if (ret) { + netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n", + ret); + return ret; + } + + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, + 0xffffffff); + if (ret) + netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n", + ret); + + return ret; +} + +static int lan865x_clear_all_multicast_addr(struct lan865x_priv *priv) +{ + int ret; + + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, 0); + if (ret) { + netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n", + ret); + return ret; + } + + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, 0); + if (ret) + netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n", + ret); + + return ret; +} + +static void lan865x_multicast_work_handler(struct work_struct *work) +{ + struct lan865x_priv *priv = container_of(work, struct lan865x_priv, + multicast_work); + u32 regval = 0; + int ret; + + if (priv->netdev->flags & IFF_PROMISC) { + /* Enabling promiscuous mode */ + regval |= MAC_NET_CFG_PROMISCUOUS_MODE; + regval &= (~MAC_NET_CFG_MULTICAST_MODE); + regval &= (~MAC_NET_CFG_UNICAST_MODE); + } else if (priv->netdev->flags & IFF_ALLMULTI) { + /* Enabling all multicast mode */ + if (lan865x_set_all_multicast_addr(priv)) + return; + + regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE); + regval |= MAC_NET_CFG_MULTICAST_MODE; + regval &= (~MAC_NET_CFG_UNICAST_MODE); + } else if (!netdev_mc_empty(priv->netdev)) { + /* Enabling specific multicast mode */ + if (lan865x_set_specific_multicast_addr(priv)) + return; + + regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE); + regval |= MAC_NET_CFG_MULTICAST_MODE; + regval &= (~MAC_NET_CFG_UNICAST_MODE); + } else { + /* Enabling local mac address only */ + if (lan865x_clear_all_multicast_addr(priv)) + return; + } + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CFG, regval); + if (ret) + netdev_err(priv->netdev, "Failed to enable promiscuous/multicast/normal mode: %d\n", + ret); +} + +static void lan865x_set_multicast_list(struct net_device *netdev) +{ + struct lan865x_priv *priv = netdev_priv(netdev); + + schedule_work(&priv->multicast_work); +} + +static netdev_tx_t lan865x_send_packet(struct sk_buff *skb, + struct net_device *netdev) +{ + struct lan865x_priv *priv = netdev_priv(netdev); + + return oa_tc6_start_xmit(priv->tc6, skb); +} + +static int lan865x_hw_disable(struct lan865x_priv *priv) +{ + u32 regval; + + if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, ®val)) + return -ENODEV; + + regval &= ~(MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN); + + if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval)) + return -ENODEV; + + return 0; +} + +static int lan865x_net_close(struct net_device *netdev) +{ + struct lan865x_priv *priv = netdev_priv(netdev); + int ret; + + netif_stop_queue(netdev); + phy_stop(netdev->phydev); + ret = lan865x_hw_disable(priv); + if (ret) { + netdev_err(netdev, "Failed to disable the hardware: %d\n", ret); + return ret; + } + + return 0; +} + +static int lan865x_hw_enable(struct lan865x_priv *priv) +{ + u32 regval; + + if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, ®val)) + return -ENODEV; + + regval |= MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN; + + if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval)) + return -ENODEV; + + return 0; +} + +static int lan865x_net_open(struct net_device *netdev) +{ + struct lan865x_priv *priv = netdev_priv(netdev); + int ret; + + ret = lan865x_hw_enable(priv); + if (ret) { + netdev_err(netdev, "Failed to enable hardware: %d\n", ret); + return ret; + } + + phy_start(netdev->phydev); + + return 0; +} + +static const struct net_device_ops lan865x_netdev_ops = { + .ndo_open = lan865x_net_open, + .ndo_stop = lan865x_net_close, + .ndo_start_xmit = lan865x_send_packet, + .ndo_set_rx_mode = lan865x_set_multicast_list, + .ndo_set_mac_address = lan865x_set_mac_address, +}; + +static int lan865x_probe(struct spi_device *spi) +{ + struct net_device *netdev; + struct lan865x_priv *priv; + int ret; + + netdev = alloc_etherdev(sizeof(struct lan865x_priv)); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->spi = spi; + spi_set_drvdata(spi, priv); + INIT_WORK(&priv->multicast_work, lan865x_multicast_work_handler); + + priv->tc6 = oa_tc6_init(spi, netdev); + if (!priv->tc6) { + ret = -ENODEV; + goto free_netdev; + } + + /* As per the point s3 in the below errata, SPI receive Ethernet frame + * transfer may halt when starting the next frame in the same data block + * (chunk) as the end of a previous frame. The RFA field should be + * configured to 01b or 10b for proper operation. In these modes, only + * one receive Ethernet frame will be placed in a single data block. + * When the RFA field is written to 01b, received frames will be forced + * to only start in the first word of the data block payload (SWO=0). As + * recommended, enable zero align receive frame feature for proper + * operation. + * + * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/Errata/LAN8650-1-Errata-80001075.pdf + */ + ret = oa_tc6_zero_align_receive_frame_enable(priv->tc6); + if (ret) { + dev_err(&spi->dev, "Failed to set ZARFE: %d\n", ret); + goto oa_tc6_exit; + } + + /* Get the MAC address from the SPI device tree node */ + if (device_get_ethdev_address(&spi->dev, netdev)) + eth_hw_addr_random(netdev); + + ret = lan865x_set_hw_macaddr(priv, netdev->dev_addr); + if (ret) { + dev_err(&spi->dev, "Failed to configure MAC: %d\n", ret); + goto oa_tc6_exit; + } + + netdev->if_port = IF_PORT_10BASET; + netdev->irq = spi->irq; + netdev->netdev_ops = &lan865x_netdev_ops; + netdev->ethtool_ops = &lan865x_ethtool_ops; + + ret = register_netdev(netdev); + if (ret) { + dev_err(&spi->dev, "Register netdev failed (ret = %d)", ret); + goto oa_tc6_exit; + } + + return 0; + +oa_tc6_exit: + oa_tc6_exit(priv->tc6); +free_netdev: + free_netdev(priv->netdev); + return ret; +} + +static void lan865x_remove(struct spi_device *spi) +{ + struct lan865x_priv *priv = spi_get_drvdata(spi); + + cancel_work_sync(&priv->multicast_work); + unregister_netdev(priv->netdev); + oa_tc6_exit(priv->tc6); + free_netdev(priv->netdev); +} + +static const struct spi_device_id spidev_spi_ids[] = { + { .name = "lan8650" }, + {}, +}; + +static const struct of_device_id lan865x_dt_ids[] = { + { .compatible = "microchip,lan8650" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lan865x_dt_ids); + +static struct spi_driver lan865x_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = lan865x_dt_ids, + }, + .probe = lan865x_probe, + .remove = lan865x_remove, + .id_table = spidev_spi_ids, +}; +module_spi_driver(lan865x_driver); + +MODULE_DESCRIPTION(DRV_NAME " 10Base-T1S MACPHY Ethernet Driver"); +MODULE_AUTHOR("Parthiban Veerasooran "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig index f9ebffc04eb855..f663b6e12466e5 100644 --- a/drivers/net/ethernet/microchip/lan966x/Kconfig +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -8,6 +8,7 @@ config LAN966X_SWITCH select PHYLINK select PAGE_POOL select VCAP + select FDMA help This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index 3b6ac331691d0b..4cdbe263502cbc 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -20,3 +20,4 @@ lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o # Provide include files ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap +ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c index aec7066d83b3cc..2474dfd330f46e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -549,16 +549,13 @@ static int lan966x_get_ts_info(struct net_device *dev, phc = &lan966x->phc[LAN966X_PHC_PORT]; - info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + if (phc->clock) { + info->phc_index = ptp_clock_index(phc->clock); + } else { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 3960534ac2ad81..50267071810429 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -6,31 +6,55 @@ #include "lan966x_main.h" -static int lan966x_fdma_channel_active(struct lan966x *lan966x) -{ - return lan_rd(lan966x, FDMA_CH_ACTIVE); -} - -static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, - struct lan966x_db *db) +static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) { + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + struct lan966x_rx *rx = &lan966x->rx; struct page *page; page = page_pool_dev_alloc_pages(rx->page_pool); if (unlikely(!page)) - return NULL; + return -ENOMEM; + + rx->page[dcb][db] = page; + *dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; + + return 0; +} - db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; +static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + + *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr; - return page; + return 0; +} + +static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) +{ + struct lan966x *lan966x = (struct lan966x *)fdma->priv; + + *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM; + + return 0; +} + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); } static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) { + struct fdma *fdma = &rx->fdma; int i, j; - for (i = 0; i < FDMA_DCB_MAX; ++i) { - for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) + for (i = 0; i < fdma->n_dcbs; ++i) { + for (j = 0; j < fdma->n_dbs; ++j) page_pool_put_full_page(rx->page_pool, rx->page[i][j], false); } @@ -38,41 +62,23 @@ static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) { + struct fdma *fdma = &rx->fdma; struct page *page; - page = rx->page[rx->dcb_index][rx->db_index]; + page = rx->page[fdma->dcb_index][fdma->db_index]; if (unlikely(!page)) return; page_pool_recycle_direct(rx->page_pool, page); } -static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, - struct lan966x_rx_dcb *dcb, - u64 nextptr) -{ - struct lan966x_db *db; - int i; - - for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { - db = &dcb->db[i]; - db->status = FDMA_DCB_STATUS_INTR; - } - - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); - - rx->last_entry->nextptr = nextptr; - rx->last_entry = dcb; -} - static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; struct page_pool_params pp_params = { .order = rx->page_order, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .pool_size = FDMA_DCB_MAX, + .pool_size = rx->fdma.n_dcbs, .nid = NUMA_NO_NODE, .dev = lan966x->dev, .dma_dir = DMA_FROM_DEVICE, @@ -104,84 +110,41 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; - struct lan966x_rx_dcb *dcb; - struct lan966x_db *db; - struct page *page; - int i, j; - int size; + struct fdma *fdma = &rx->fdma; + int err; if (lan966x_fdma_rx_alloc_page_pool(rx)) return PTR_ERR(rx->page_pool); - /* calculate how many pages are needed to allocate the dcbs */ - size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - - rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); - if (!rx->dcbs) - return -ENOMEM; - - rx->last_entry = rx->dcbs; - rx->db_index = 0; - rx->dcb_index = 0; - - /* Now for each dcb allocate the dbs */ - for (i = 0; i < FDMA_DCB_MAX; ++i) { - dcb = &rx->dcbs[i]; - dcb->info = 0; - - /* For each db allocate a page and map it to the DB dataptr. */ - for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { - db = &dcb->db[j]; - page = lan966x_fdma_rx_alloc_page(rx, db); - if (!page) - return -ENOMEM; - - db->status = 0; - rx->page[i][j] = page; - } + err = fdma_alloc_coherent(lan966x->dev, fdma); + if (err) + return err; - lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); - } + fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); return 0; } -static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx) -{ - rx->dcb_index++; - rx->dcb_index &= FDMA_DCB_MAX - 1; -} - -static void lan966x_fdma_rx_free(struct lan966x_rx *rx) -{ - struct lan966x *lan966x = rx->lan966x; - u32 size; - - /* Now it is possible to do the cleanup of dcb */ - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); -} - static void lan966x_fdma_rx_start(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; u32 mask; /* When activating a channel, first is required to write the first DCB * address and then to activate it */ - lan_wr(lower_32_bits((u64)rx->dma), lan966x, - FDMA_DCB_LLP(rx->channel_id)); - lan_wr(upper_32_bits((u64)rx->dma), lan966x, - FDMA_DCB_LLP1(rx->channel_id)); + lan_wr(lower_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP(fdma->channel_id)); + lan_wr(upper_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP1(fdma->channel_id)); - lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_MEM_SET(1), - lan966x, FDMA_CH_CFG(rx->channel_id)); + lan966x, FDMA_CH_CFG(fdma->channel_id)); /* Start fdma */ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), @@ -191,13 +154,13 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) /* Enable interrupts */ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); - mask |= BIT(rx->channel_id); + mask |= BIT(fdma->channel_id); lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), FDMA_INTR_DB_ENA_INTR_DB_ENA, lan966x, FDMA_INTR_DB_ENA); /* Activate the channel */ - lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), FDMA_CH_ACTIVATE_CH_ACTIVATE, lan966x, FDMA_CH_ACTIVATE); } @@ -205,18 +168,19 @@ static void lan966x_fdma_rx_start(struct lan966x_rx *rx) static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; u32 val; /* Disable the channel */ - lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), FDMA_CH_DISABLE_CH_DISABLE, lan966x, FDMA_CH_DISABLE); readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, - val, !(val & BIT(rx->channel_id)), + val, !(val & BIT(fdma->channel_id)), READL_SLEEP_US, READL_TIMEOUT_US); - lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), FDMA_CH_DB_DISCARD_DB_DISCARD, lan966x, FDMA_CH_DB_DISCARD); } @@ -225,50 +189,27 @@ static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; - lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)), FDMA_CH_RELOAD_CH_RELOAD, lan966x, FDMA_CH_RELOAD); } -static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, - struct lan966x_tx_dcb *dcb) -{ - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = 0; -} - static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - struct lan966x_tx_dcb *dcb; - struct lan966x_db *db; - int size; - int i, j; + struct fdma *fdma = &tx->fdma; + int err; - tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf), GFP_KERNEL); if (!tx->dcbs_buf) return -ENOMEM; - /* calculate how many pages are needed to allocate the dcbs */ - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); - if (!tx->dcbs) + err = fdma_alloc_coherent(lan966x->dev, fdma); + if (err) goto out; - /* Now for each dcb allocate the db */ - for (i = 0; i < FDMA_DCB_MAX; ++i) { - dcb = &tx->dcbs[i]; - - for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { - db = &dcb->db[j]; - db->dataptr = 0; - db->status = 0; - } - - lan966x_fdma_tx_add_dcb(tx, dcb); - } + fdma_dcbs_init(fdma, 0, 0); return 0; @@ -280,33 +221,30 @@ static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) static void lan966x_fdma_tx_free(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - int size; kfree(tx->dcbs_buf); - - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); + fdma_free_coherent(lan966x->dev, &tx->fdma); } static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; + struct fdma *fdma = &tx->fdma; u32 mask; /* When activating a channel, first is required to write the first DCB * address and then to activate it */ - lan_wr(lower_32_bits((u64)tx->dma), lan966x, - FDMA_DCB_LLP(tx->channel_id)); - lan_wr(upper_32_bits((u64)tx->dma), lan966x, - FDMA_DCB_LLP1(tx->channel_id)); + lan_wr(lower_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP(fdma->channel_id)); + lan_wr(upper_32_bits((u64)fdma->dma), lan966x, + FDMA_DCB_LLP1(fdma->channel_id)); - lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(0) | FDMA_CH_CFG_CH_MEM_SET(1), - lan966x, FDMA_CH_CFG(tx->channel_id)); + lan966x, FDMA_CH_CFG(fdma->channel_id)); /* Start fdma */ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), @@ -316,13 +254,13 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) /* Enable interrupts */ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); - mask |= BIT(tx->channel_id); + mask |= BIT(fdma->channel_id); lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), FDMA_INTR_DB_ENA_INTR_DB_ENA, lan966x, FDMA_INTR_DB_ENA); /* Activate the channel */ - lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), FDMA_CH_ACTIVATE_CH_ACTIVATE, lan966x, FDMA_CH_ACTIVATE); } @@ -330,23 +268,23 @@ static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; + struct fdma *fdma = &tx->fdma; u32 val; /* Disable the channel */ - lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), FDMA_CH_DISABLE_CH_DISABLE, lan966x, FDMA_CH_DISABLE); readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, - val, !(val & BIT(tx->channel_id)), + val, !(val & BIT(fdma->channel_id)), READL_SLEEP_US, READL_TIMEOUT_US); - lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), FDMA_CH_DB_DISCARD_DB_DISCARD, lan966x, FDMA_CH_DB_DISCARD); tx->activated = false; - tx->last_in_use = -1; } static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) @@ -354,7 +292,7 @@ static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) struct lan966x *lan966x = tx->lan966x; /* Write the registers to reload the channel */ - lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)), FDMA_CH_RELOAD_CH_RELOAD, lan966x, FDMA_CH_RELOAD); } @@ -393,23 +331,24 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) struct lan966x_tx *tx = &lan966x->tx; struct lan966x_rx *rx = &lan966x->rx; struct lan966x_tx_dcb_buf *dcb_buf; + struct fdma *fdma = &tx->fdma; struct xdp_frame_bulk bq; - struct lan966x_db *db; unsigned long flags; bool clear = false; + struct fdma_db *db; int i; xdp_frame_bulk_init(&bq); spin_lock_irqsave(&lan966x->tx_lock, flags); - for (i = 0; i < FDMA_DCB_MAX; ++i) { + for (i = 0; i < fdma->n_dcbs; ++i) { dcb_buf = &tx->dcbs_buf[i]; if (!dcb_buf->used) continue; - db = &tx->dcbs[i].db[0]; - if (!(db->status & FDMA_DCB_STATUS_DONE)) + db = fdma_db_get(fdma, i, 0); + if (!fdma_db_is_done(db)) continue; dcb_buf->dev->stats.tx_packets++; @@ -449,27 +388,16 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) spin_unlock_irqrestore(&lan966x->tx_lock, flags); } -static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) -{ - struct lan966x_db *db; - - /* Check if there is any data */ - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) - return false; - - return true; -} - static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) { struct lan966x *lan966x = rx->lan966x; + struct fdma *fdma = &rx->fdma; struct lan966x_port *port; - struct lan966x_db *db; + struct fdma_db *db; struct page *page; - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - page = rx->page[rx->dcb_index][rx->db_index]; + db = fdma_db_next_get(fdma); + page = rx->page[fdma->dcb_index][fdma->db_index]; if (unlikely(!page)) return FDMA_ERROR; @@ -494,16 +422,17 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, u64 src_port) { struct lan966x *lan966x = rx->lan966x; - struct lan966x_db *db; + struct fdma *fdma = &rx->fdma; struct sk_buff *skb; + struct fdma_db *db; struct page *page; u64 timestamp; /* Get the received frame and unmap it */ - db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; - page = rx->page[rx->dcb_index][rx->db_index]; + db = fdma_db_next_get(fdma); + page = rx->page[fdma->dcb_index][fdma->db_index]; - skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + skb = build_skb(page_address(page), fdma->db_size); if (unlikely(!skb)) goto free_page; @@ -546,21 +475,19 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) { struct lan966x *lan966x = container_of(napi, struct lan966x, napi); struct lan966x_rx *rx = &lan966x->rx; - int dcb_reload = rx->dcb_index; - struct lan966x_rx_dcb *old_dcb; - struct lan966x_db *db; + int old_dcb, dcb_reload, counter = 0; + struct fdma *fdma = &rx->fdma; bool redirect = false; struct sk_buff *skb; - struct page *page; - int counter = 0; u64 src_port; - u64 nextptr; + + dcb_reload = fdma->dcb_index; lan966x_fdma_tx_clear_buf(lan966x, weight); /* Get all received skb */ while (counter < weight) { - if (!lan966x_fdma_rx_more_frames(rx)) + if (!fdma_has_frames(fdma)) break; counter++; @@ -570,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) break; case FDMA_ERROR: lan966x_fdma_rx_free_page(rx); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); goto allocate_new; case FDMA_REDIRECT: redirect = true; fallthrough; case FDMA_TX: - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); continue; case FDMA_DROP: lan966x_fdma_rx_free_page(rx); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); continue; } skb = lan966x_fdma_rx_get_frame(rx, src_port); - lan966x_fdma_rx_advance_dcb(rx); + fdma_dcb_advance(fdma); if (!skb) goto allocate_new; @@ -594,20 +521,14 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) allocate_new: /* Allocate new pages and map them */ - while (dcb_reload != rx->dcb_index) { - db = &rx->dcbs[dcb_reload].db[rx->db_index]; - page = lan966x_fdma_rx_alloc_page(rx, db); - if (unlikely(!page)) - break; - rx->page[dcb_reload][rx->db_index] = page; - - old_dcb = &rx->dcbs[dcb_reload]; + while (dcb_reload != fdma->dcb_index) { + old_dcb = dcb_reload; dcb_reload++; - dcb_reload &= FDMA_DCB_MAX - 1; + dcb_reload &= fdma->n_dcbs - 1; + + fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); - nextptr = rx->dma + ((unsigned long)old_dcb - - (unsigned long)rx->dcbs); - lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); lan966x_fdma_rx_reload(rx); } @@ -650,56 +571,30 @@ irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) { struct lan966x_tx_dcb_buf *dcb_buf; + struct fdma *fdma = &tx->fdma; int i; - for (i = 0; i < FDMA_DCB_MAX; ++i) { + for (i = 0; i < fdma->n_dcbs; ++i) { dcb_buf = &tx->dcbs_buf[i]; - if (!dcb_buf->used && i != tx->last_in_use) + if (!dcb_buf->used && + !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i])) return i; } return -1; } -static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, - int next_to_use, int len, - dma_addr_t dma_addr) -{ - struct lan966x_tx_dcb *next_dcb; - struct lan966x_db *next_db; - - next_dcb = &tx->dcbs[next_to_use]; - next_dcb->nextptr = FDMA_DCB_INVALID_DATA; - - next_db = &next_dcb->db[0]; - next_db->dataptr = dma_addr; - next_db->status = FDMA_DCB_STATUS_SOF | - FDMA_DCB_STATUS_EOF | - FDMA_DCB_STATUS_INTR | - FDMA_DCB_STATUS_BLOCKO(0) | - FDMA_DCB_STATUS_BLOCKL(len); -} - -static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) +static void lan966x_fdma_tx_start(struct lan966x_tx *tx) { struct lan966x *lan966x = tx->lan966x; - struct lan966x_tx_dcb *dcb; if (likely(lan966x->tx.activated)) { - /* Connect current dcb to the next db */ - dcb = &tx->dcbs[tx->last_in_use]; - dcb->nextptr = tx->dma + (next_to_use * - sizeof(struct lan966x_tx_dcb)); - lan966x_fdma_tx_reload(tx); } else { /* Because it is first time, then just activate */ lan966x->tx.activated = true; lan966x_fdma_tx_activate(tx); } - - /* Move to next dcb because this last in use */ - tx->last_in_use = next_to_use; } int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) @@ -752,11 +647,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->data.xdpf = xdpf; next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; - - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, - xdpf->len + IFH_LEN_BYTES, - dma_addr); } else { page = ptr; @@ -773,11 +663,6 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->data.page = page; next_dcb_buf->len = len + IFH_LEN_BYTES; - - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, - len + IFH_LEN_BYTES, - dma_addr + XDP_PACKET_HEADROOM); } /* Fill up the buffer */ @@ -788,8 +673,19 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) next_dcb_buf->ptp = false; next_dcb_buf->dev = port->dev; + __fdma_dcb_add(&tx->fdma, + next_to_use, + 0, + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len), + &fdma_nextptr_cb, + &lan966x_fdma_xdp_tx_dataptr_cb); + /* Start the transmission */ - lan966x_fdma_tx_start(tx, next_to_use); + lan966x_fdma_tx_start(tx); out: spin_unlock(&lan966x->tx_lock); @@ -847,9 +743,6 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) goto release; } - /* Setup next dcb */ - lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr); - /* Fill up the buffer */ next_dcb_buf = &tx->dcbs_buf[next_to_use]; next_dcb_buf->use_skb = true; @@ -861,12 +754,21 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) next_dcb_buf->ptp = false; next_dcb_buf->dev = dev; + fdma_dcb_add(&tx->fdma, + next_to_use, + 0, + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len)); + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) next_dcb_buf->ptp = true; /* Start the transmission */ - lan966x_fdma_tx_start(tx, next_to_use); + lan966x_fdma_tx_start(tx); return NETDEV_TX_OK; @@ -908,14 +810,11 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) { struct page_pool *page_pool; - dma_addr_t rx_dma; - void *rx_dcbs; - u32 size; + struct fdma fdma_rx_old; int err; /* Store these for later to free them */ - rx_dma = lan966x->rx.dma; - rx_dcbs = lan966x->rx.dcbs; + memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma)); page_pool = lan966x->rx.page_pool; napi_synchronize(&lan966x->napi); @@ -931,9 +830,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) goto restore; lan966x_fdma_rx_start(&lan966x->rx); - size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + fdma_free_coherent(lan966x->dev, &fdma_rx_old); page_pool_destroy(page_pool); @@ -943,8 +840,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) return err; restore: lan966x->rx.page_pool = page_pool; - lan966x->rx.dma = rx_dma; - lan966x->rx.dcbs = rx_dcbs; + memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma)); lan966x_fdma_rx_start(&lan966x->rx); return err; @@ -1034,11 +930,24 @@ int lan966x_fdma_init(struct lan966x *lan966x) return 0; lan966x->rx.lan966x = lan966x; - lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL; + lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX; + lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS; + lan966x->rx.fdma.priv = lan966x; + lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma); + lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; + lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; + lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb; lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); lan966x->tx.lan966x = lan966x; - lan966x->tx.channel_id = FDMA_INJ_CHANNEL; - lan966x->tx.last_in_use = -1; + lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX; + lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS; + lan966x->tx.fdma.priv = lan966x; + lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma); + lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; + lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; + lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb; err = lan966x_fdma_rx_alloc(&lan966x->rx); if (err) @@ -1046,7 +955,7 @@ int lan966x_fdma_init(struct lan966x *lan966x) err = lan966x_fdma_tx_alloc(&lan966x->tx); if (err) { - lan966x_fdma_rx_free(&lan966x->rx); + fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); return err; } @@ -1067,7 +976,7 @@ void lan966x_fdma_deinit(struct lan966x *lan966x) napi_disable(&lan966x->napi); lan966x_fdma_rx_free_pages(&lan966x->rx); - lan966x_fdma_rx_free(&lan966x->rx); + fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); page_pool_destroy(lan966x->rx.page_pool); lan966x_fdma_tx_free(&lan966x->tx); } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index ec672af12e2515..534d4716d5f7d4 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -816,7 +816,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p, NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC; - dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS; + dev->see_all_hwtstamp_requests = true; dev->needed_headroom = IFH_LEN_BYTES; eth_hw_addr_gen(dev, lan966x->base_mac, p + 1); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index f8bebbcf77b2df..25cb2f61986f69 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -76,15 +77,6 @@ #define FDMA_RX_DCB_MAX_DBS 1 #define FDMA_TX_DCB_MAX_DBS 1 -#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) - -#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) -#define FDMA_DCB_STATUS_SOF BIT(16) -#define FDMA_DCB_STATUS_EOF BIT(17) -#define FDMA_DCB_STATUS_INTR BIT(18) -#define FDMA_DCB_STATUS_DONE BIT(19) -#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) -#define FDMA_DCB_INVALID_DATA 0x1 #define FDMA_XTR_CHANNEL 6 #define FDMA_INJ_CHANNEL 0 @@ -199,49 +191,14 @@ enum vcap_is1_port_sel_rt { struct lan966x_port; -struct lan966x_db { - u64 dataptr; - u64 status; -}; - -struct lan966x_rx_dcb { - u64 nextptr; - u64 info; - struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; -}; - -struct lan966x_tx_dcb { - u64 nextptr; - u64 info; - struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; -}; - struct lan966x_rx { struct lan966x *lan966x; - /* Pointer to the array of hardware dcbs. */ - struct lan966x_rx_dcb *dcbs; - - /* Pointer to the last address in the dcbs. */ - struct lan966x_rx_dcb *last_entry; + struct fdma fdma; /* For each DB, there is a page */ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; - /* Represents the db_index, it can have a value between 0 and - * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS - * it means that the DCB can be reused. - */ - int db_index; - - /* Represents the index in the dcbs. It has a value between 0 and - * FDMA_DCB_MAX - */ - int dcb_index; - - /* Represents the dma address to the dcbs array */ - dma_addr_t dma; - /* Represents the page order that is used to allocate the pages for the * RX buffers. This value is calculated based on max MTU of the devices. */ @@ -252,8 +209,6 @@ struct lan966x_rx { */ u32 max_mtu; - u8 channel_id; - struct page_pool *page_pool; }; @@ -275,18 +230,11 @@ struct lan966x_tx_dcb_buf { struct lan966x_tx { struct lan966x *lan966x; - /* Pointer to the dcb list */ - struct lan966x_tx_dcb *dcbs; - u16 last_in_use; - - /* Represents the DMA address to the first entry of the dcb entries. */ - dma_addr_t dma; + struct fdma fdma; /* Array of dcbs that are given to the HW */ struct lan966x_tx_dcb_buf *dcbs_buf; - u8 channel_id; - bool activated; }; diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index f58c506bda228c..3f04992eace6ab 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -10,6 +10,7 @@ config SPARX5_SWITCH select PHY_SPARX5_SERDES select RESET_CONTROLLER select VCAP + select FDMA help This driver supports the Sparx5 network switch device. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index b68fe9c9a656dd..288de95add188d 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -18,3 +18,4 @@ sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o # Provide include files ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap +ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 4f800c1a435d98..d898a7238b4879 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1194,16 +1194,13 @@ static int sparx5_get_ts_info(struct net_device *dev, phc = &sparx5->phc[SPARX5_PHC_PORT]; - info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + if (phc->clock) { + info->phc_index = ptp_clock_index(phc->clock); + } else { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 1915998f60796a..61df874b762356 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -21,107 +21,51 @@ #define FDMA_XTR_CHANNEL 6 #define FDMA_INJ_CHANNEL 0 -#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) -#define FDMA_DCB_INFO_TOKEN BIT(17) -#define FDMA_DCB_INFO_INTR BIT(18) -#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) - -#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) -#define FDMA_DCB_STATUS_SOF BIT(16) -#define FDMA_DCB_STATUS_EOF BIT(17) -#define FDMA_DCB_STATUS_INTR BIT(18) -#define FDMA_DCB_STATUS_DONE BIT(19) -#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) -#define FDMA_DCB_INVALID_DATA 0x1 - #define FDMA_XTR_BUFFER_SIZE 2048 #define FDMA_WEIGHT 4 -/* Frame DMA DCB format - * - * +---------------------------+ - * | Next Ptr | - * +---------------------------+ - * | Reserved | Info | - * +---------------------------+ - * | Data0 Ptr | - * +---------------------------+ - * | Reserved | Status0 | - * +---------------------------+ - * | Data1 Ptr | - * +---------------------------+ - * | Reserved | Status1 | - * +---------------------------+ - * | Data2 Ptr | - * +---------------------------+ - * | Reserved | Status2 | - * |-------------|-------------| - * | | - * | | - * | | - * | | - * | | - * |---------------------------| - * | Data14 Ptr | - * +-------------|-------------+ - * | Reserved | Status14 | - * +-------------|-------------+ - */ - -/* For each hardware DB there is an entry in this list and when the HW DB - * entry is used, this SW DB entry is moved to the back of the list - */ -struct sparx5_db { - struct list_head list; - void *cpu_addr; -}; - -static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, - struct sparx5_rx_dcb_hw *dcb, - u64 nextptr) +static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) { - int idx = 0; - - /* Reset the status of the DB */ - for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { - struct sparx5_db_hw *db = &dcb->db[idx]; + *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + + ((dcb * fdma->n_dbs + db) * fdma->db_size); - db->status = FDMA_DCB_STATUS_INTR; - } - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); - rx->last_entry->nextptr = nextptr; - rx->last_entry = dcb; + return 0; } -static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, - struct sparx5_tx_dcb_hw *dcb, - u64 nextptr) +static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db, + u64 *dataptr) { - int idx = 0; + struct sparx5 *sparx5 = fdma->priv; + struct sparx5_rx *rx = &sparx5->rx; + struct sk_buff *skb; - /* Reset the status of the DB */ - for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { - struct sparx5_db_hw *db = &dcb->db[idx]; + skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; - db->status = FDMA_DCB_STATUS_DONE; - } - dcb->nextptr = FDMA_DCB_INVALID_DATA; - dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); + *dataptr = virt_to_phys(skb->data); + + rx->skb[dcb][db] = skb; + + return 0; } static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) { + struct fdma *fdma = &rx->fdma; + /* Write the buffer address in the LLP and LLP1 regs */ - spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, - FDMA_DCB_LLP(rx->channel_id)); - spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); + spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(fdma->channel_id)); + spx5_wr(((u64)fdma->dma) >> 32, sparx5, + FDMA_DCB_LLP1(fdma->channel_id)); /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ - spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), - sparx5, FDMA_CH_CFG(rx->channel_id)); + sparx5, FDMA_CH_CFG(fdma->channel_id)); /* Set the RX Watermark to max */ spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, @@ -133,22 +77,24 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) sparx5, FDMA_PORT_CTRL(0)); /* Enable RX channel DB interrupt */ - spx5_rmw(BIT(rx->channel_id), - BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + spx5_rmw(BIT(fdma->channel_id), + BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, sparx5, FDMA_INTR_DB_ENA); /* Activate the RX channel */ - spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); + spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE); } static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) { + struct fdma *fdma = &rx->fdma; + /* Deactivate the RX channel */ - spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, sparx5, FDMA_CH_ACTIVATE); /* Disable RX channel DB interrupt */ - spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, sparx5, FDMA_INTR_DB_ENA); /* Stop RX fdma */ @@ -158,75 +104,55 @@ static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *r static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) { + struct fdma *fdma = &tx->fdma; + /* Write the buffer address in the LLP and LLP1 regs */ - spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, - FDMA_DCB_LLP(tx->channel_id)); - spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); + spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(fdma->channel_id)); + spx5_wr(((u64)fdma->dma) >> 32, sparx5, + FDMA_DCB_LLP1(fdma->channel_id)); /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ - spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), - sparx5, FDMA_CH_CFG(tx->channel_id)); + sparx5, FDMA_CH_CFG(fdma->channel_id)); /* Start TX fdma */ spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, sparx5, FDMA_PORT_CTRL(0)); /* Activate the channel */ - spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); + spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE); } static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) { /* Disable the channel */ - spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, sparx5, FDMA_CH_ACTIVATE); } -static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) +static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma) { /* Reload the RX channel */ - spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); -} - -static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) -{ - /* Reload the TX channel */ - spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); -} - -static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) -{ - return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, - GFP_ATOMIC); + spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD); } static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) { - struct sparx5_db_hw *db_hw; - unsigned int packet_size; + struct fdma *fdma = &rx->fdma; struct sparx5_port *port; - struct sk_buff *new_skb; + struct fdma_db *db_hw; struct frame_info fi; struct sk_buff *skb; - dma_addr_t dma_addr; /* Check if the DCB is done */ - db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; - if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) - return false; - skb = rx->skb[rx->dcb_index][rx->db_index]; - /* Replace the DB entry with a new SKB */ - new_skb = sparx5_fdma_rx_alloc_skb(rx); - if (unlikely(!new_skb)) + db_hw = fdma_db_next_get(fdma); + if (unlikely(!fdma_db_is_done(db_hw))) return false; - /* Map the new skb data and set the new skb */ - dma_addr = virt_to_phys(new_skb->data); - rx->skb[rx->dcb_index][rx->db_index] = new_skb; - db_hw->dataptr = dma_addr; - packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); - skb_put(skb, packet_size); + skb = rx->skb[fdma->dcb_index][fdma->db_index]; + skb_put(skb, fdma_db_len_get(db_hw)); /* Now do the normal processing of the skb */ sparx5_ifh_parse((u32 *)skb->data, &fi); /* Map to port netdev */ @@ -259,84 +185,62 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) { struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + struct fdma *fdma = &rx->fdma; int counter = 0; while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { - struct sparx5_rx_dcb_hw *old_dcb; - - rx->db_index++; + fdma_db_advance(fdma); counter++; /* Check if the DCB can be reused */ - if (rx->db_index != FDMA_RX_DCB_MAX_DBS) + if (fdma_dcb_is_reusable(fdma)) continue; - /* As the DCB can be reused, just advance the dcb_index - * pointer and set the nextptr in the DCB - */ - rx->db_index = 0; - old_dcb = &rx->dcb_entries[rx->dcb_index]; - rx->dcb_index++; - rx->dcb_index &= FDMA_DCB_MAX - 1; - sparx5_fdma_rx_add_dcb(rx, old_dcb, - rx->dma + - ((unsigned long)old_dcb - - (unsigned long)rx->dcb_entries)); + fdma_dcb_add(fdma, fdma->dcb_index, + FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); + fdma_db_reset(fdma); + fdma_dcb_advance(fdma); } if (counter < weight) { napi_complete_done(&rx->napi, counter); - spx5_rmw(BIT(rx->channel_id), - BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + spx5_rmw(BIT(fdma->channel_id), + BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, sparx5, FDMA_INTR_DB_ENA); } if (counter) - sparx5_fdma_rx_reload(sparx5, rx); + sparx5_fdma_reload(sparx5, fdma); return counter; } -static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, - struct sparx5_tx_dcb_hw *dcb) -{ - struct sparx5_tx_dcb_hw *next_dcb; - - next_dcb = dcb; - next_dcb++; - /* Handle wrap-around */ - if ((unsigned long)next_dcb >= - ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) - next_dcb = tx->first_entry; - return next_dcb; -} - int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) { - struct sparx5_tx_dcb_hw *next_dcb_hw; struct sparx5_tx *tx = &sparx5->tx; + struct fdma *fdma = &tx->fdma; static bool first_time = true; - struct sparx5_db_hw *db_hw; - struct sparx5_db *db; + void *virt_addr; - next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); - db_hw = &next_dcb_hw->db[0]; - if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) + fdma_dcb_advance(fdma); + if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0))) return -EINVAL; - db = list_first_entry(&tx->db_list, struct sparx5_db, list); - list_move_tail(&db->list, &tx->db_list); - next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; - tx->curr_entry->nextptr = tx->dma + - ((unsigned long)next_dcb_hw - - (unsigned long)tx->first_entry); - tx->curr_entry = next_dcb_hw; - memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); - memcpy(db->cpu_addr, ifh, IFH_LEN * 4); - memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); - db_hw->status = FDMA_DCB_STATUS_SOF | - FDMA_DCB_STATUS_EOF | - FDMA_DCB_STATUS_BLOCKO(0) | - FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); + + /* Get the virtual address of the dataptr for the next DB */ + virt_addr = ((u8 *)fdma->dcbs + + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + + ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size)); + + memcpy(virt_addr, ifh, IFH_LEN * 4); + memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len); + + fdma_dcb_add(fdma, fdma->dcb_index, 0, + FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4)); + if (first_time) { sparx5_fdma_tx_activate(sparx5, tx); first_time = false; } else { - sparx5_fdma_tx_reload(sparx5, tx); + sparx5_fdma_reload(sparx5, fdma); } return NETDEV_TX_OK; } @@ -344,43 +248,16 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) { struct sparx5_rx *rx = &sparx5->rx; - struct sparx5_rx_dcb_hw *dcb; - int idx, jdx; - int size; - - size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); - if (!rx->dcb_entries) - return -ENOMEM; - rx->dma = virt_to_phys(rx->dcb_entries); - rx->last_entry = rx->dcb_entries; - rx->db_index = 0; - rx->dcb_index = 0; - /* Now for each dcb allocate the db */ - for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { - dcb = &rx->dcb_entries[idx]; - dcb->info = 0; - /* For each db allocate an skb and map skb data pointer to the DB - * dataptr. In this way when the frame is received the skb->data - * will contain the frame, so no memcpy is needed - */ - for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { - struct sparx5_db_hw *db_hw = &dcb->db[jdx]; - dma_addr_t dma_addr; - struct sk_buff *skb; - - skb = sparx5_fdma_rx_alloc_skb(rx); - if (!skb) - return -ENOMEM; - - dma_addr = virt_to_phys(skb->data); - db_hw->dataptr = dma_addr; - db_hw->status = 0; - rx->skb[idx][jdx] = skb; - } - sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); - } + struct fdma *fdma = &rx->fdma; + int err; + + err = fdma_alloc_phys(fdma); + if (err) + return err; + + fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_INTR); + netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT); napi_enable(&rx->napi); @@ -391,57 +268,33 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) { struct sparx5_tx *tx = &sparx5->tx; - struct sparx5_tx_dcb_hw *dcb; - int idx, jdx; - int size; - - size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); - if (!tx->curr_entry) - return -ENOMEM; - tx->dma = virt_to_phys(tx->curr_entry); - tx->first_entry = tx->curr_entry; - INIT_LIST_HEAD(&tx->db_list); - /* Now for each dcb allocate the db */ - for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { - dcb = &tx->curr_entry[idx]; - dcb->info = 0; - /* TX databuffers must be 16byte aligned */ - for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { - struct sparx5_db_hw *db_hw = &dcb->db[jdx]; - struct sparx5_db *db; - dma_addr_t phys; - void *cpu_addr; - - cpu_addr = devm_kzalloc(sparx5->dev, - FDMA_XTR_BUFFER_SIZE, - GFP_KERNEL); - if (!cpu_addr) - return -ENOMEM; - phys = virt_to_phys(cpu_addr); - db_hw->dataptr = phys; - db_hw->status = 0; - db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); - if (!db) - return -ENOMEM; - db->cpu_addr = cpu_addr; - list_add_tail(&db->list, &tx->db_list); - } - sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); - /* Let the curr_entry to point to the last allocated entry */ - if (idx == FDMA_DCB_MAX - 1) - tx->curr_entry = dcb; - } + struct fdma *fdma = &tx->fdma; + int err; + + err = fdma_alloc_phys(fdma); + if (err) + return err; + + fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), + FDMA_DCB_STATUS_DONE); + return 0; } static void sparx5_fdma_rx_init(struct sparx5 *sparx5, struct sparx5_rx *rx, int channel) { + struct fdma *fdma = &rx->fdma; int idx; - rx->channel_id = channel; + fdma->channel_id = channel; + fdma->n_dcbs = FDMA_DCB_MAX; + fdma->n_dbs = FDMA_RX_DCB_MAX_DBS; + fdma->priv = sparx5; + fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE); + fdma->size = fdma_get_size(&sparx5->rx.fdma); + fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb; + fdma->ops.nextptr_cb = &fdma_nextptr_cb; /* Fetch a netdev for SKB and NAPI use, any will do */ for (idx = 0; idx < SPX5_PORTS; ++idx) { struct sparx5_port *port = sparx5->ports[idx]; @@ -456,7 +309,16 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5, static void sparx5_fdma_tx_init(struct sparx5 *sparx5, struct sparx5_tx *tx, int channel) { - tx->channel_id = channel; + struct fdma *fdma = &tx->fdma; + + fdma->channel_id = channel; + fdma->n_dcbs = FDMA_DCB_MAX; + fdma->n_dbs = FDMA_TX_DCB_MAX_DBS; + fdma->priv = sparx5; + fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE); + fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma); + fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb; + fdma->ops.nextptr_cb = &fdma_nextptr_cb; } irqreturn_t sparx5_fdma_handler(int irq, void *args) @@ -594,5 +456,7 @@ int sparx5_fdma_stop(struct sparx5 *sparx5) read_poll_timeout(sparx5_fdma_port_ctrl, val, FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, 500, 10000, 0, sparx5); + fdma_free_phys(&sparx5->rx.fdma); + fdma_free_phys(&sparx5->tx.fdma); return 0; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 1982ae03b4feb8..3309060b1e4c6e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -20,6 +20,8 @@ #include #include +#include + #include "sparx5_main_regs.h" /* Target chip type */ @@ -100,23 +102,6 @@ enum sparx5_vlan_port_type { struct sparx5; -struct sparx5_db_hw { - u64 dataptr; - u64 status; -}; - -struct sparx5_rx_dcb_hw { - u64 nextptr; - u64 info; - struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; -}; - -struct sparx5_tx_dcb_hw { - u64 nextptr; - u64 info; - struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; -}; - /* Frame DMA receive state: * For each DB, there is a SKB, and the skb data pointer is mapped in * the DB. Once a frame is received the skb is given to the upper layers @@ -124,14 +109,10 @@ struct sparx5_tx_dcb_hw { * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. */ struct sparx5_rx { - struct sparx5_rx_dcb_hw *dcb_entries; - struct sparx5_rx_dcb_hw *last_entry; + struct fdma fdma; struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; - int db_index; - int dcb_index; dma_addr_t dma; struct napi_struct napi; - u32 channel_id; struct net_device *ndev; u64 packets; }; @@ -140,11 +121,7 @@ struct sparx5_rx { * DCBs are chained using the DCBs nextptr field. */ struct sparx5_tx { - struct sparx5_tx_dcb_hw *curr_entry; - struct sparx5_tx_dcb_hw *first_entry; - struct list_head db_list; - dma_addr_t dma; - u32 channel_id; + struct fdma fdma; u64 packets; u64 dropped; }; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index f3f5fb4204689b..70427643f777c0 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -45,8 +45,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) fwd = (fwd >> 5); info->src_port = FIELD_GET(GENMASK(7, 1), fwd); + /* + * Bit 270-271 are occasionally unexpectedly set by the hardware, + * clear bits before extracting timestamp + */ info->timestamp = - ((u64)xtr_hdr[2] << 24) | + ((u64)(xtr_hdr[2] & GENMASK(5, 0)) << 24) | ((u64)xtr_hdr[3] << 16) | ((u64)xtr_hdr[4] << 8) | ((u64)xtr_hdr[5] << 0); diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index ddb8f68d80a206..ca4ed58f1206dd 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto release_region; - err = dma_set_max_seg_size(&pdev->dev, UINT_MAX); - if (err) { - dev_err(&pdev->dev, "Failed to set dma device segment size\n"); - goto release_region; - } + dma_set_max_seg_size(&pdev->dev, UINT_MAX); err = -ENOMEM; gc = vzalloc(sizeof(*gc)); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 3d151700f6580c..c47266d1c7c279 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -511,7 +511,7 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, } /* Release pre-allocated RX buffers */ -static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) +void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) { struct device *dev; int i; @@ -608,7 +608,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, *datasize = mtu + ETH_HLEN; } -static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) +int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues) { struct device *dev; struct page *page; @@ -622,7 +622,7 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) dev = mpc->ac->gdma_dev->gdma_context->dev; - num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; + num_rxb = num_queues * mpc->rx_queue_size; WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); @@ -682,7 +682,7 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu) int err; /* Pre-allocate buffers to prevent failure in mana_attach later */ - err = mana_pre_alloc_rxbufs(mpc, new_mtu); + err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues); if (err) { netdev_err(ndev, "Insufficient memory for new MTU\n"); return err; @@ -1911,15 +1911,17 @@ static int mana_create_txq(struct mana_port_context *apc, return -ENOMEM; /* The minimum size of the WQE is 32 bytes, hence - * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs + * apc->tx_queue_size represents the maximum number of WQEs * the SQ can store. This value is then used to size other queues * to prevent overflow. + * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED, + * as min val of apc->tx_queue_size is 128 and that would make + * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size + * are always power of two */ - txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; - BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size)); + txq_size = apc->tx_queue_size * 32; - cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; - cq_size = MANA_PAGE_ALIGN(cq_size); + cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; gc = gd->gdma_context; @@ -2159,10 +2161,11 @@ static int mana_push_wqe(struct mana_rxq *rxq) static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) { + struct mana_port_context *mpc = netdev_priv(rxq->ndev); struct page_pool_params pprm = {}; int ret; - pprm.pool_size = RX_BUFFERS_PER_QUEUE; + pprm.pool_size = mpc->rx_queue_size; pprm.nid = gc->numa_node; pprm.napi = &rxq->rx_cq.napi; pprm.netdev = rxq->ndev; @@ -2194,13 +2197,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc = gd->gdma_context; - rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE), + rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size), GFP_KERNEL); if (!rxq) return NULL; rxq->ndev = ndev; - rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; + rxq->num_rx_buf = apc->rx_queue_size; rxq->rxq_idx = rxq_idx; rxq->rxobj = INVALID_MANA_HANDLE; @@ -2748,6 +2751,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, apc->ndev = ndev; apc->max_queues = gc->max_num_queues; apc->num_queues = gc->max_num_queues; + apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE; + apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE; apc->port_handle = INVALID_MANA_HANDLE; apc->pf_filter_handle = INVALID_MANA_HANDLE; apc->port_idx = port_idx; diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 146d5db1792fdb..dc386437753846 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -345,27 +345,101 @@ static int mana_set_channels(struct net_device *ndev, struct mana_port_context *apc = netdev_priv(ndev); unsigned int new_count = channels->combined_count; unsigned int old_count = apc->num_queues; - int err, err2; + int err; + + err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count); + if (err) { + netdev_err(ndev, "Insufficient memory for new allocations"); + return err; + } err = mana_detach(ndev, false); if (err) { netdev_err(ndev, "mana_detach failed: %d\n", err); - return err; + goto out; } apc->num_queues = new_count; err = mana_attach(ndev); - if (!err) - return 0; + if (err) { + apc->num_queues = old_count; + netdev_err(ndev, "mana_attach failed: %d\n", err); + } + +out: + mana_pre_dealloc_rxbufs(apc); + return err; +} + +static void mana_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(ndev); + + ring->rx_pending = apc->rx_queue_size; + ring->tx_pending = apc->tx_queue_size; + ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE; + ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE; +} + +static int mana_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(ndev); + u32 new_tx, new_rx; + u32 old_tx, old_rx; + int err; - netdev_err(ndev, "mana_attach failed: %d\n", err); + old_tx = apc->tx_queue_size; + old_rx = apc->rx_queue_size; - /* Try to roll it back to the old configuration. */ - apc->num_queues = old_count; - err2 = mana_attach(ndev); - if (err2) - netdev_err(ndev, "mana re-attach failed: %d\n", err2); + if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) { + NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending, + MIN_TX_BUFFERS_PER_QUEUE); + return -EINVAL; + } + + if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) { + NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending, + MIN_RX_BUFFERS_PER_QUEUE); + return -EINVAL; + } + + new_rx = roundup_pow_of_two(ring->rx_pending); + new_tx = roundup_pow_of_two(ring->tx_pending); + netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n", + new_tx, new_rx); + + /* pre-allocating new buffers to prevent failures in mana_attach() later */ + apc->rx_queue_size = new_rx; + err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); + apc->rx_queue_size = old_rx; + if (err) { + netdev_err(ndev, "Insufficient memory for new allocations\n"); + return err; + } + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, "mana_detach failed: %d\n", err); + goto out; + } + + apc->tx_queue_size = new_tx; + apc->rx_queue_size = new_rx; + + err = mana_attach(ndev); + if (err) { + netdev_err(ndev, "mana_attach failed: %d\n", err); + apc->tx_queue_size = old_tx; + apc->rx_queue_size = old_rx; + } +out: + mana_pre_dealloc_rxbufs(apc); return err; } @@ -380,4 +454,6 @@ const struct ethtool_ops mana_ethtool_ops = { .set_rxfh = mana_set_rxfh, .get_channels = mana_get_channels, .set_channels = mana_set_channels, + .get_ringparam = mana_get_ringparam, + .set_ringparam = mana_set_ringparam, }; diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index b3c28260adf880..e172638b060102 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -582,17 +582,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_set); int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct kernel_ethtool_ts_info *info) { - info->phc_index = ocelot->ptp_clock ? - ptp_clock_index(ocelot->ptp_clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + if (ocelot->ptp_clock) { + info->phc_index = ptp_clock_index(ocelot->ptp_clock); + } else { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index df2ab5cbd49bd1..3a02eef58cc6d9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -4537,8 +4537,8 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) u64 *prog; int err; - prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), - GFP_KERNEL); + prog = kmemdup_array(nfp_prog->prog, nfp_prog->prog_len, sizeof(u64), + GFP_KERNEL); if (!prog) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index cc54faca2283b9..515069d5637b0d 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 182ba0a8b095bc..6e0929af0f725b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -821,14 +821,13 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, snprintf(r_vec->name, sizeof(r_vec->name), "%s-rxtx-%d", nfp_net_name(nn), idx); - err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, - r_vec); + err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN, + r_vec->name, r_vec); if (err) { nfp_net_napi_del(&nn->dp, r_vec); nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); return err; } - disable_irq(r_vec->irq_vector); irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index 2dd37557185e9a..7276e44a21d09e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -41,6 +41,8 @@ struct nfp_dump_tl { ); char data[]; }; +static_assert(offsetof(struct nfp_dump_tl, data) == sizeof(struct nfp_dump_tl_hdr), + "struct member likely outside of struct_group_tagged()"); /* NFP CPP parameters */ struct nfp_dumpspec_cpp_isl_id { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index eee0bfc41074ee..227e7a5d712e37 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -248,7 +248,6 @@ nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features) features = netdev_intersect_features(features, lower_features); features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC); - features |= NETIF_F_LLTX; return features; } @@ -386,7 +385,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; - netdev->features |= NETIF_F_LLTX; + netdev->lltx = true; if (nfp_app_has_tc(app)) { netdev->features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 3f10c5365c80eb..7c2200b49ce42c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -15,7 +15,7 @@ * abstraction builds upon this BAR interface. */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index a8286d0032d1e6..669f9f8fb5079e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -9,7 +9,7 @@ * Rolf Neugebauer */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 508ae6b571ca61..addf02c63b1a09 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -9,7 +9,7 @@ * Rolf Neugebauer */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c index f05dd34ab89ff2..cfa4db5d3f8572 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 7136bc48530ba0..0bd6477292a63c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -7,7 +7,7 @@ * Jason McMullan */ -#include +#include #include #include #include @@ -278,7 +278,7 @@ struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp) res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); if (IS_ERR(res)) - return (void *)res; + return ERR_CAST(res); state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 2260c2403a83a1..68862ac062d2b8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -10,7 +10,7 @@ * Francois H. Theron */ -#include +#include #include #include #include diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c new file mode 100644 index 00000000000000..f9c0dcd965c2e7 --- /dev/null +++ b/drivers/net/ethernet/oa_tc6.c @@ -0,0 +1,1361 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework + * + * Author: Parthiban Veerasooran + */ + +#include +#include +#include +#include +#include + +/* OPEN Alliance TC6 registers */ +/* Standard Capabilities Register */ +#define OA_TC6_REG_STDCAP 0x0002 +#define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8) + +/* Reset Control and Status Register */ +#define OA_TC6_REG_RESET 0x0003 +#define RESET_SWRESET BIT(0) /* Software Reset */ + +/* Configuration Register #0 */ +#define OA_TC6_REG_CONFIG0 0x0004 +#define CONFIG0_SYNC BIT(15) +#define CONFIG0_ZARFE_ENABLE BIT(12) + +/* Status Register #0 */ +#define OA_TC6_REG_STATUS0 0x0008 +#define STATUS0_RESETC BIT(6) /* Reset Complete */ +#define STATUS0_HEADER_ERROR BIT(5) +#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4) +#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3) +#define STATUS0_TX_PROTOCOL_ERROR BIT(0) + +/* Buffer Status Register */ +#define OA_TC6_REG_BUFFER_STATUS 0x000B +#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8) +#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0) + +/* Interrupt Mask Register #0 */ +#define OA_TC6_REG_INT_MASK0 0x000C +#define INT_MASK0_HEADER_ERR_MASK BIT(5) +#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4) +#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3) +#define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0) + +/* PHY Clause 22 registers base address and mask */ +#define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00 +#define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F + +/* Control command header */ +#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31) +#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29) +#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24) +#define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8) +#define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1) +#define OA_TC6_CTRL_HEADER_PARITY BIT(0) + +/* Data header */ +#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31) +#define OA_TC6_DATA_HEADER_DATA_VALID BIT(21) +#define OA_TC6_DATA_HEADER_START_VALID BIT(20) +#define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16) +#define OA_TC6_DATA_HEADER_END_VALID BIT(14) +#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8) +#define OA_TC6_DATA_HEADER_PARITY BIT(0) + +/* Data footer */ +#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31) +#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30) +#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29) +#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24) +#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21) +#define OA_TC6_DATA_FOOTER_START_VALID BIT(20) +#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16) +#define OA_TC6_DATA_FOOTER_END_VALID BIT(14) +#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8) +#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1) + +/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the + * OPEN Alliance specification. + */ +#define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */ +#define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */ +#define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */ +#define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */ +#define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */ + +#define OA_TC6_CTRL_HEADER_SIZE 4 +#define OA_TC6_CTRL_REG_VALUE_SIZE 4 +#define OA_TC6_CTRL_IGNORED_SIZE 4 +#define OA_TC6_CTRL_MAX_REGISTERS 128 +#define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\ + (OA_TC6_CTRL_MAX_REGISTERS *\ + OA_TC6_CTRL_REG_VALUE_SIZE) +\ + OA_TC6_CTRL_IGNORED_SIZE) +#define OA_TC6_CHUNK_PAYLOAD_SIZE 64 +#define OA_TC6_DATA_HEADER_SIZE 4 +#define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\ + OA_TC6_CHUNK_PAYLOAD_SIZE) +#define OA_TC6_MAX_TX_CHUNKS 48 +#define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\ + OA_TC6_CHUNK_SIZE) +#define STATUS0_RESETC_POLL_DELAY 1000 +#define STATUS0_RESETC_POLL_TIMEOUT 1000000 + +/* Internal structure for MAC-PHY drivers */ +struct oa_tc6 { + struct device *dev; + struct net_device *netdev; + struct phy_device *phydev; + struct mii_bus *mdiobus; + struct spi_device *spi; + struct mutex spi_ctrl_lock; /* Protects spi control transfer */ + void *spi_ctrl_tx_buf; + void *spi_ctrl_rx_buf; + void *spi_data_tx_buf; + void *spi_data_rx_buf; + struct sk_buff *ongoing_tx_skb; + struct sk_buff *waiting_tx_skb; + struct sk_buff *rx_skb; + struct task_struct *spi_thread; + wait_queue_head_t spi_wq; + u16 tx_skb_offset; + u16 spi_data_tx_buf_offset; + u16 tx_credits; + u8 rx_chunks_available; + bool rx_buf_overflow; + bool int_flag; +}; + +enum oa_tc6_header_type { + OA_TC6_CTRL_HEADER, + OA_TC6_DATA_HEADER, +}; + +enum oa_tc6_register_op { + OA_TC6_CTRL_REG_READ = 0, + OA_TC6_CTRL_REG_WRITE = 1, +}; + +enum oa_tc6_data_valid_info { + OA_TC6_DATA_INVALID, + OA_TC6_DATA_VALID, +}; + +enum oa_tc6_data_start_valid_info { + OA_TC6_DATA_START_INVALID, + OA_TC6_DATA_START_VALID, +}; + +enum oa_tc6_data_end_valid_info { + OA_TC6_DATA_END_INVALID, + OA_TC6_DATA_END_VALID, +}; + +static int oa_tc6_spi_transfer(struct oa_tc6 *tc6, + enum oa_tc6_header_type header_type, u16 length) +{ + struct spi_transfer xfer = { 0 }; + struct spi_message msg; + + if (header_type == OA_TC6_DATA_HEADER) { + xfer.tx_buf = tc6->spi_data_tx_buf; + xfer.rx_buf = tc6->spi_data_rx_buf; + } else { + xfer.tx_buf = tc6->spi_ctrl_tx_buf; + xfer.rx_buf = tc6->spi_ctrl_rx_buf; + } + xfer.len = length; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(tc6->spi, &msg); +} + +static int oa_tc6_get_parity(u32 p) +{ + /* Public domain code snippet, lifted from + * http://www-graphics.stanford.edu/~seander/bithacks.html + */ + p ^= p >> 1; + p ^= p >> 2; + p = (p & 0x11111111U) * 0x11111111U; + + /* Odd parity is used here */ + return !((p >> 28) & 1); +} + +static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length, + enum oa_tc6_register_op reg_op) +{ + u32 header; + + header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL, + OA_TC6_CTRL_HEADER) | + FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) | + FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) | + FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) | + FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1); + header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY, + oa_tc6_get_parity(header)); + + return cpu_to_be32(header); +} + +static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[], + u8 length) +{ + __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE; + + for (int i = 0; i < length; i++) + *tx_buf++ = cpu_to_be32(value[i]); +} + +static u16 oa_tc6_calculate_ctrl_buf_size(u8 length) +{ + /* Control command consists 4 bytes header + 4 bytes register value for + * each register + 4 bytes ignored value. + */ + return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length + + OA_TC6_CTRL_IGNORED_SIZE; +} + +static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address, + u32 value[], u8 length, + enum oa_tc6_register_op reg_op) +{ + __be32 *tx_buf = tc6->spi_ctrl_tx_buf; + + *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op); + + if (reg_op == OA_TC6_CTRL_REG_WRITE) + oa_tc6_update_ctrl_write_data(tc6, value, length); +} + +static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size) +{ + u8 *tx_buf = tc6->spi_ctrl_tx_buf; + u8 *rx_buf = tc6->spi_ctrl_rx_buf; + + rx_buf += OA_TC6_CTRL_IGNORED_SIZE; + + /* The echoed control write must match with the one that was + * transmitted. + */ + if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE)) + return -EPROTO; + + return 0; +} + +static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size) +{ + u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE; + u32 *tx_buf = tc6->spi_ctrl_tx_buf; + + /* The echoed control read header must match with the one that was + * transmitted. + */ + if (*tx_buf != *rx_buf) + return -EPROTO; + + return 0; +} + +static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[], + u8 length) +{ + __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE + + OA_TC6_CTRL_HEADER_SIZE; + + for (int i = 0; i < length; i++) + value[i] = be32_to_cpu(*rx_buf++); +} + +static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[], + u8 length, enum oa_tc6_register_op reg_op) +{ + u16 size; + int ret; + + /* Prepare control command and copy to SPI control buffer */ + oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op); + + size = oa_tc6_calculate_ctrl_buf_size(length); + + /* Perform SPI transfer */ + ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size); + if (ret) { + dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n", + ret); + return ret; + } + + /* Check echoed/received control write command reply for errors */ + if (reg_op == OA_TC6_CTRL_REG_WRITE) + return oa_tc6_check_ctrl_write_reply(tc6, size); + + /* Check echoed/received control read command reply for errors */ + ret = oa_tc6_check_ctrl_read_reply(tc6, size); + if (ret) + return ret; + + oa_tc6_copy_ctrl_read_data(tc6, value, length); + + return 0; +} + +/** + * oa_tc6_read_registers - function for reading multiple consecutive registers. + * @tc6: oa_tc6 struct. + * @address: address of the first register to be read in the MAC-PHY. + * @value: values to be read from the starting register address @address. + * @length: number of consecutive registers to be read from @address. + * + * Maximum of 128 consecutive registers can be read starting at @address. + * + * Return: 0 on success otherwise failed. + */ +int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[], + u8 length) +{ + int ret; + + if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) { + dev_err(&tc6->spi->dev, "Invalid register length parameter\n"); + return -EINVAL; + } + + mutex_lock(&tc6->spi_ctrl_lock); + ret = oa_tc6_perform_ctrl(tc6, address, value, length, + OA_TC6_CTRL_REG_READ); + mutex_unlock(&tc6->spi_ctrl_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(oa_tc6_read_registers); + +/** + * oa_tc6_read_register - function for reading a MAC-PHY register. + * @tc6: oa_tc6 struct. + * @address: register address of the MAC-PHY to be read. + * @value: value read from the @address register address of the MAC-PHY. + * + * Return: 0 on success otherwise failed. + */ +int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value) +{ + return oa_tc6_read_registers(tc6, address, value, 1); +} +EXPORT_SYMBOL_GPL(oa_tc6_read_register); + +/** + * oa_tc6_write_registers - function for writing multiple consecutive registers. + * @tc6: oa_tc6 struct. + * @address: address of the first register to be written in the MAC-PHY. + * @value: values to be written from the starting register address @address. + * @length: number of consecutive registers to be written from @address. + * + * Maximum of 128 consecutive registers can be written starting at @address. + * + * Return: 0 on success otherwise failed. + */ +int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[], + u8 length) +{ + int ret; + + if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) { + dev_err(&tc6->spi->dev, "Invalid register length parameter\n"); + return -EINVAL; + } + + mutex_lock(&tc6->spi_ctrl_lock); + ret = oa_tc6_perform_ctrl(tc6, address, value, length, + OA_TC6_CTRL_REG_WRITE); + mutex_unlock(&tc6->spi_ctrl_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(oa_tc6_write_registers); + +/** + * oa_tc6_write_register - function for writing a MAC-PHY register. + * @tc6: oa_tc6 struct. + * @address: register address of the MAC-PHY to be written. + * @value: value to be written in the @address register address of the MAC-PHY. + * + * Return: 0 on success otherwise failed. + */ +int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value) +{ + return oa_tc6_write_registers(tc6, address, &value, 1); +} +EXPORT_SYMBOL_GPL(oa_tc6_write_register); + +static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6) +{ + u32 regval; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, ®val); + if (ret) + return ret; + + if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS)) + return -ENODEV; + + return 0; +} + +static void oa_tc6_handle_link_change(struct net_device *netdev) +{ + phy_print_status(netdev->phydev); +} + +static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum) +{ + struct oa_tc6 *tc6 = bus->priv; + u32 regval; + bool ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE | + (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK), + ®val); + if (ret) + return ret; + + return regval; +} + +static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct oa_tc6 *tc6 = bus->priv; + + return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE | + (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK), + val); +} + +static int oa_tc6_get_phy_c45_mms(int devnum) +{ + switch (devnum) { + case MDIO_MMD_PCS: + return OA_TC6_PHY_C45_PCS_MMS2; + case MDIO_MMD_PMAPMD: + return OA_TC6_PHY_C45_PMA_PMD_MMS3; + case MDIO_MMD_VEND2: + return OA_TC6_PHY_C45_VS_PLCA_MMS4; + case MDIO_MMD_AN: + return OA_TC6_PHY_C45_AUTO_NEG_MMS5; + case MDIO_MMD_POWER_UNIT: + return OA_TC6_PHY_C45_POWER_UNIT_MMS6; + default: + return -EOPNOTSUPP; + } +} + +static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum, + int regnum) +{ + struct oa_tc6 *tc6 = bus->priv; + u32 regval; + int ret; + + ret = oa_tc6_get_phy_c45_mms(devnum); + if (ret < 0) + return ret; + + ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, ®val); + if (ret) + return ret; + + return regval; +} + +static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum, + int regnum, u16 val) +{ + struct oa_tc6 *tc6 = bus->priv; + int ret; + + ret = oa_tc6_get_phy_c45_mms(devnum); + if (ret < 0) + return ret; + + return oa_tc6_write_register(tc6, (ret << 16) | regnum, val); +} + +static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6) +{ + int ret; + + tc6->mdiobus = mdiobus_alloc(); + if (!tc6->mdiobus) { + netdev_err(tc6->netdev, "MDIO bus alloc failed\n"); + return -ENOMEM; + } + + tc6->mdiobus->priv = tc6; + tc6->mdiobus->read = oa_tc6_mdiobus_read; + tc6->mdiobus->write = oa_tc6_mdiobus_write; + /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and + * C45 registers space. If the PHY is discovered via C22 bus protocol it + * assumes it uses C22 protocol and always uses C22 registers indirect + * access to access C45 registers. This is because, we don't have a + * clean separation between C22/C45 register space and C22/C45 MDIO bus + * protocols. Resulting, PHY C45 registers direct access can't be used + * which can save multiple SPI bus access. To support this feature, PHY + * drivers can set .read_mmd/.write_mmd in the PHY driver to call + * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c + */ + tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45; + tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45; + tc6->mdiobus->name = "oa-tc6-mdiobus"; + tc6->mdiobus->parent = tc6->dev; + + snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s", + dev_name(&tc6->spi->dev)); + + ret = mdiobus_register(tc6->mdiobus); + if (ret) { + netdev_err(tc6->netdev, "Could not register MDIO bus\n"); + mdiobus_free(tc6->mdiobus); + return ret; + } + + return 0; +} + +static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6) +{ + mdiobus_unregister(tc6->mdiobus); + mdiobus_free(tc6->mdiobus); +} + +static int oa_tc6_phy_init(struct oa_tc6 *tc6) +{ + int ret; + + ret = oa_tc6_check_phy_reg_direct_access_capability(tc6); + if (ret) { + netdev_err(tc6->netdev, + "Direct PHY register access is not supported by the MAC-PHY\n"); + return ret; + } + + ret = oa_tc6_mdiobus_register(tc6); + if (ret) + return ret; + + tc6->phydev = phy_find_first(tc6->mdiobus); + if (!tc6->phydev) { + netdev_err(tc6->netdev, "No PHY found\n"); + oa_tc6_mdiobus_unregister(tc6); + return -ENODEV; + } + + tc6->phydev->is_internal = true; + ret = phy_connect_direct(tc6->netdev, tc6->phydev, + &oa_tc6_handle_link_change, + PHY_INTERFACE_MODE_INTERNAL); + if (ret) { + netdev_err(tc6->netdev, "Can't attach PHY to %s\n", + tc6->mdiobus->id); + oa_tc6_mdiobus_unregister(tc6); + return ret; + } + + phy_attached_info(tc6->netdev->phydev); + + return 0; +} + +static void oa_tc6_phy_exit(struct oa_tc6 *tc6) +{ + phy_disconnect(tc6->phydev); + oa_tc6_mdiobus_unregister(tc6); +} + +static int oa_tc6_read_status0(struct oa_tc6 *tc6) +{ + u32 regval; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, ®val); + if (ret) { + dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n", + ret); + return 0; + } + + return regval; +} + +static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6) +{ + u32 regval = RESET_SWRESET; + int ret; + + ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval); + if (ret) + return ret; + + /* Poll for soft reset complete for every 1ms until 1s timeout */ + ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval, + regval & STATUS0_RESETC, + STATUS0_RESETC_POLL_DELAY, + STATUS0_RESETC_POLL_TIMEOUT); + if (ret) + return -ENODEV; + + /* Clear the reset complete status */ + return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval); +} + +static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6) +{ + u32 regval; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, ®val); + if (ret) + return ret; + + regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK | + INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK | + INT_MASK0_LOSS_OF_FRAME_ERR_MASK | + INT_MASK0_HEADER_ERR_MASK); + + return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval); +} + +static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6) +{ + u32 value; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value); + if (ret) + return ret; + + /* Enable configuration synchronization for data transfer */ + value |= CONFIG0_SYNC; + + return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value); +} + +static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6) +{ + if (tc6->rx_skb) { + tc6->netdev->stats.rx_dropped++; + kfree_skb(tc6->rx_skb); + tc6->rx_skb = NULL; + } +} + +static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6) +{ + if (tc6->ongoing_tx_skb) { + tc6->netdev->stats.tx_dropped++; + kfree_skb(tc6->ongoing_tx_skb); + tc6->ongoing_tx_skb = NULL; + } +} + +static int oa_tc6_process_extended_status(struct oa_tc6 *tc6) +{ + u32 value; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value); + if (ret) { + netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n", + ret); + return ret; + } + + /* Clear the error interrupts status */ + ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value); + if (ret) { + netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n", + ret); + return ret; + } + + if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) { + tc6->rx_buf_overflow = true; + oa_tc6_cleanup_ongoing_rx_skb(tc6); + net_err_ratelimited("%s: Receive buffer overflow error\n", + tc6->netdev->name); + return -EAGAIN; + } + if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) { + netdev_err(tc6->netdev, "Transmit protocol error\n"); + return -ENODEV; + } + /* TODO: Currently loss of frame and header errors are treated as + * non-recoverable errors. They will be handled in the next version. + */ + if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) { + netdev_err(tc6->netdev, "Loss of frame error\n"); + return -ENODEV; + } + if (FIELD_GET(STATUS0_HEADER_ERROR, value)) { + netdev_err(tc6->netdev, "Header error\n"); + return -ENODEV; + } + + return 0; +} + +static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer) +{ + /* Process rx chunk footer for the following, + * 1. tx credits + * 2. errors if any from MAC-PHY + * 3. receive chunks available + */ + tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer); + tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS, + footer); + + if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) { + int ret = oa_tc6_process_extended_status(tc6); + + if (ret) + return ret; + } + + /* TODO: Currently received header bad and configuration unsync errors + * are treated as non-recoverable errors. They will be handled in the + * next version. + */ + if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) { + netdev_err(tc6->netdev, "Rxd header bad error\n"); + return -ENODEV; + } + + if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) { + netdev_err(tc6->netdev, "Config unsync error\n"); + return -ENODEV; + } + + return 0; +} + +static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6) +{ + tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev); + tc6->netdev->stats.rx_packets++; + tc6->netdev->stats.rx_bytes += tc6->rx_skb->len; + + netif_rx(tc6->rx_skb); + + tc6->rx_skb = NULL; +} + +static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length) +{ + memcpy(skb_put(tc6->rx_skb, length), payload, length); +} + +static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6) +{ + tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu + + ETH_HLEN + ETH_FCS_LEN); + if (!tc6->rx_skb) { + tc6->netdev->stats.rx_dropped++; + return -ENOMEM; + } + + return 0; +} + +static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload, + u16 size) +{ + int ret; + + ret = oa_tc6_allocate_rx_skb(tc6); + if (ret) + return ret; + + oa_tc6_update_rx_skb(tc6, payload, size); + + oa_tc6_submit_rx_skb(tc6); + + return 0; +} + +static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size) +{ + int ret; + + ret = oa_tc6_allocate_rx_skb(tc6); + if (ret) + return ret; + + oa_tc6_update_rx_skb(tc6, payload, size); + + return 0; +} + +static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size) +{ + oa_tc6_update_rx_skb(tc6, payload, size); + + oa_tc6_submit_rx_skb(tc6); +} + +static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload, + u32 footer) +{ + oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE); +} + +static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data, + u32 footer) +{ + u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET, + footer) * sizeof(u32); + u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET, + footer); + bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer); + bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer); + u16 size; + + /* Restart the new rx frame after receiving rx buffer overflow error */ + if (start_valid && tc6->rx_buf_overflow) + tc6->rx_buf_overflow = false; + + if (tc6->rx_buf_overflow) + return 0; + + /* Process the chunk with complete rx frame */ + if (start_valid && end_valid && start_byte_offset < end_byte_offset) { + size = end_byte_offset + 1 - start_byte_offset; + return oa_tc6_prcs_complete_rx_frame(tc6, + &data[start_byte_offset], + size); + } + + /* Process the chunk with only rx frame start */ + if (start_valid && !end_valid) { + size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset; + return oa_tc6_prcs_rx_frame_start(tc6, + &data[start_byte_offset], + size); + } + + /* Process the chunk with only rx frame end */ + if (end_valid && !start_valid) { + size = end_byte_offset + 1; + oa_tc6_prcs_rx_frame_end(tc6, data, size); + return 0; + } + + /* Process the chunk with previous rx frame end and next rx frame + * start. + */ + if (start_valid && end_valid && start_byte_offset > end_byte_offset) { + /* After rx buffer overflow error received, there might be a + * possibility of getting an end valid of a previously + * incomplete rx frame along with the new rx frame start valid. + */ + if (tc6->rx_skb) { + size = end_byte_offset + 1; + oa_tc6_prcs_rx_frame_end(tc6, data, size); + } + size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset; + return oa_tc6_prcs_rx_frame_start(tc6, + &data[start_byte_offset], + size); + } + + /* Process the chunk with ongoing rx frame data */ + oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer); + + return 0; +} + +static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset) +{ + u8 *rx_buf = tc6->spi_data_rx_buf; + __be32 footer; + + footer = *((__be32 *)&rx_buf[footer_offset]); + + return be32_to_cpu(footer); +} + +static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length) +{ + u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE; + u32 footer; + int ret; + + /* All the rx chunks in the receive SPI data buffer are examined here */ + for (int i = 0; i < no_of_rx_chunks; i++) { + /* Last 4 bytes in each received chunk consist footer info */ + footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE + + OA_TC6_CHUNK_PAYLOAD_SIZE); + + ret = oa_tc6_process_rx_chunk_footer(tc6, footer); + if (ret) + return ret; + + /* If there is a data valid chunks then process it for the + * information needed to determine the validity and the location + * of the receive frame data. + */ + if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) { + u8 *payload = tc6->spi_data_rx_buf + i * + OA_TC6_CHUNK_SIZE; + + ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload, + footer); + if (ret) + return ret; + } + } + + return 0; +} + +static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid, + bool end_valid, u8 end_byte_offset) +{ + u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL, + OA_TC6_DATA_HEADER) | + FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) | + FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) | + FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) | + FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET, + end_byte_offset); + + header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY, + oa_tc6_get_parity(header)); + + return cpu_to_be32(header); +} + +static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6) +{ + enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID; + __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset; + u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset; + u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset; + enum oa_tc6_data_start_valid_info start_valid; + u8 end_byte_offset = 0; + u16 length_to_copy; + + /* Initial value is assigned here to avoid more than 80 characters in + * the declaration place. + */ + start_valid = OA_TC6_DATA_START_INVALID; + + /* Set start valid if the current tx chunk contains the start of the tx + * ethernet frame. + */ + if (!tc6->tx_skb_offset) + start_valid = OA_TC6_DATA_START_VALID; + + /* If the remaining tx skb length is more than the chunk payload size of + * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for + * next tx chunk. + */ + length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE); + + /* Copy the tx skb data to the tx chunk payload buffer */ + memcpy(tx_buf + 1, tx_skb_data, length_to_copy); + tc6->tx_skb_offset += length_to_copy; + + /* Set end valid if the current tx chunk contains the end of the tx + * ethernet frame. + */ + if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) { + end_valid = OA_TC6_DATA_END_VALID; + end_byte_offset = length_to_copy - 1; + tc6->tx_skb_offset = 0; + tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len; + tc6->netdev->stats.tx_packets++; + kfree_skb(tc6->ongoing_tx_skb); + tc6->ongoing_tx_skb = NULL; + } + + *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid, + end_valid, end_byte_offset); + tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE; +} + +static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6) +{ + u16 used_tx_credits; + + /* Get tx skbs and convert them into tx chunks based on the tx credits + * available. + */ + for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits; + used_tx_credits++) { + if (!tc6->ongoing_tx_skb) { + tc6->ongoing_tx_skb = tc6->waiting_tx_skb; + tc6->waiting_tx_skb = NULL; + } + if (!tc6->ongoing_tx_skb) + break; + oa_tc6_add_tx_skb_to_spi_buf(tc6); + } + + return used_tx_credits * OA_TC6_CHUNK_SIZE; +} + +static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6, + u16 needed_empty_chunks) +{ + __be32 header; + + header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID, + OA_TC6_DATA_START_INVALID, + OA_TC6_DATA_END_INVALID, 0); + + while (needed_empty_chunks--) { + __be32 *tx_buf = tc6->spi_data_tx_buf + + tc6->spi_data_tx_buf_offset; + + *tx_buf = header; + tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE; + } +} + +static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len) +{ + u16 tx_chunks = len / OA_TC6_CHUNK_SIZE; + u16 needed_empty_chunks; + + /* If there are more chunks to receive than to transmit, we need to add + * enough empty tx chunks to allow the reception of the excess rx + * chunks. + */ + if (tx_chunks >= tc6->rx_chunks_available) + return len; + + needed_empty_chunks = tc6->rx_chunks_available - tx_chunks; + + oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks); + + return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len; +} + +static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6) +{ + int ret; + + while (true) { + u16 spi_len = 0; + + tc6->spi_data_tx_buf_offset = 0; + + if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb) + spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6); + + spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len); + + if (tc6->int_flag) { + tc6->int_flag = false; + if (spi_len == 0) { + oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1); + spi_len = OA_TC6_CHUNK_SIZE; + } + } + + if (spi_len == 0) + break; + + ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len); + if (ret) { + netdev_err(tc6->netdev, "SPI data transfer failed: %d\n", + ret); + return ret; + } + + ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len); + if (ret) { + if (ret == -EAGAIN) + continue; + + oa_tc6_cleanup_ongoing_tx_skb(tc6); + oa_tc6_cleanup_ongoing_rx_skb(tc6); + netdev_err(tc6->netdev, "Device error: %d\n", ret); + return ret; + } + + if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev)) + netif_wake_queue(tc6->netdev); + } + + return 0; +} + +static int oa_tc6_spi_thread_handler(void *data) +{ + struct oa_tc6 *tc6 = data; + int ret; + + while (likely(!kthread_should_stop())) { + /* This kthread will be waken up if there is a tx skb or mac-phy + * interrupt to perform spi transfer with tx chunks. + */ + wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb || + tc6->int_flag || + kthread_should_stop()); + + if (kthread_should_stop()) + break; + + ret = oa_tc6_try_spi_transfer(tc6); + if (ret) + return ret; + } + + return 0; +} + +static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6) +{ + u32 value; + int ret; + + /* Initially tx credits and rx chunks available to be updated from the + * register as there is no data transfer performed yet. Later they will + * be updated from the rx footer. + */ + ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value); + if (ret) + return ret; + + tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value); + tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE, + value); + + return 0; +} + +static irqreturn_t oa_tc6_macphy_isr(int irq, void *data) +{ + struct oa_tc6 *tc6 = data; + + /* MAC-PHY interrupt can occur for the following reasons. + * - availability of tx credits if it was 0 before and not reported in + * the previous rx footer. + * - availability of rx chunks if it was 0 before and not reported in + * the previous rx footer. + * - extended status event not reported in the previous rx footer. + */ + tc6->int_flag = true; + /* Wake spi kthread to perform spi transfer */ + wake_up_interruptible(&tc6->spi_wq); + + return IRQ_HANDLED; +} + +/** + * oa_tc6_zero_align_receive_frame_enable - function to enable zero align + * receive frame feature. + * @tc6: oa_tc6 struct. + * + * Return: 0 on success otherwise failed. + */ +int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6) +{ + u32 regval; + int ret; + + ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, ®val); + if (ret) + return ret; + + /* Set Zero-Align Receive Frame Enable */ + regval |= CONFIG0_ZARFE_ENABLE; + + return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval); +} +EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable); + +/** + * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet + * frame. + * @tc6: oa_tc6 struct. + * @skb: socket buffer in which the ethernet frame is stored. + * + * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q + * otherwise returns NETDEV_TX_BUSY. + */ +netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb) +{ + if (tc6->waiting_tx_skb) { + netif_stop_queue(tc6->netdev); + return NETDEV_TX_BUSY; + } + + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); + tc6->netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + tc6->waiting_tx_skb = skb; + + /* Wake spi kthread to perform spi transfer */ + wake_up_interruptible(&tc6->spi_wq); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(oa_tc6_start_xmit); + +/** + * oa_tc6_init - allocates and initializes oa_tc6 structure. + * @spi: device with which data will be exchanged. + * @netdev: network device interface structure. + * + * Return: pointer reference to the oa_tc6 structure if the MAC-PHY + * initialization is successful otherwise NULL. + */ +struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev) +{ + struct oa_tc6 *tc6; + int ret; + + tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL); + if (!tc6) + return NULL; + + tc6->spi = spi; + tc6->netdev = netdev; + SET_NETDEV_DEV(netdev, &spi->dev); + mutex_init(&tc6->spi_ctrl_lock); + + /* Set the SPI controller to pump at realtime priority */ + tc6->spi->rt = true; + spi_setup(tc6->spi); + + tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev, + OA_TC6_CTRL_SPI_BUF_SIZE, + GFP_KERNEL); + if (!tc6->spi_ctrl_tx_buf) + return NULL; + + tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev, + OA_TC6_CTRL_SPI_BUF_SIZE, + GFP_KERNEL); + if (!tc6->spi_ctrl_rx_buf) + return NULL; + + tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev, + OA_TC6_SPI_DATA_BUF_SIZE, + GFP_KERNEL); + if (!tc6->spi_data_tx_buf) + return NULL; + + tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev, + OA_TC6_SPI_DATA_BUF_SIZE, + GFP_KERNEL); + if (!tc6->spi_data_rx_buf) + return NULL; + + ret = oa_tc6_sw_reset_macphy(tc6); + if (ret) { + dev_err(&tc6->spi->dev, + "MAC-PHY software reset failed: %d\n", ret); + return NULL; + } + + ret = oa_tc6_unmask_macphy_error_interrupts(tc6); + if (ret) { + dev_err(&tc6->spi->dev, + "MAC-PHY error interrupts unmask failed: %d\n", ret); + return NULL; + } + + ret = oa_tc6_phy_init(tc6); + if (ret) { + dev_err(&tc6->spi->dev, + "MAC internal PHY initialization failed: %d\n", ret); + return NULL; + } + + ret = oa_tc6_enable_data_transfer(tc6); + if (ret) { + dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n", + ret); + goto phy_exit; + } + + ret = oa_tc6_update_buffer_status_from_register(tc6); + if (ret) { + dev_err(&tc6->spi->dev, + "Failed to update buffer status: %d\n", ret); + goto phy_exit; + } + + init_waitqueue_head(&tc6->spi_wq); + + tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6, + "oa-tc6-spi-thread"); + if (IS_ERR(tc6->spi_thread)) { + dev_err(&tc6->spi->dev, "Failed to create SPI thread\n"); + goto phy_exit; + } + + sched_set_fifo(tc6->spi_thread); + + ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr, + IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev), + tc6); + if (ret) { + dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n", + ret); + goto kthread_stop; + } + + /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset + * complete status. IRQ is also asserted on reset completion and it is + * remain asserted until MAC-PHY receives a data chunk. So performing an + * empty data chunk transmission will deassert the IRQ. Refer section + * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details. + */ + tc6->int_flag = true; + wake_up_interruptible(&tc6->spi_wq); + + return tc6; + +kthread_stop: + kthread_stop(tc6->spi_thread); +phy_exit: + oa_tc6_phy_exit(tc6); + return NULL; +} +EXPORT_SYMBOL_GPL(oa_tc6_init); + +/** + * oa_tc6_exit - exit function. + * @tc6: oa_tc6 struct. + */ +void oa_tc6_exit(struct oa_tc6 *tc6) +{ + oa_tc6_phy_exit(tc6); + kthread_stop(tc6->spi_thread); + dev_kfree_skb_any(tc6->ongoing_tx_skb); + dev_kfree_skb_any(tc6->waiting_tx_skb); + dev_kfree_skb_any(tc6->rx_skb); +} +EXPORT_SYMBOL_GPL(oa_tc6_exit); + +MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib"); +MODULE_AUTHOR("Parthiban Veerasooran "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 1cc0010871932b..a36d422b517339 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -163,7 +163,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include #include /* Processor type for cache alignment. */ #include -#include +#include #include static const char version[] = diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 640ac01689fb5e..c0515dc63246b2 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -102,7 +102,7 @@ static int gx_fix; #include #include #include /* Processor type for cache alignment. */ -#include +#include #include /* These identify the driver base version and may not be removed. */ diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 62ba269da90265..cb4e12df7719df 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1699,8 +1699,9 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &mac->napi, pasemi_mac_poll); - dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_GSO; + dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_GSO; + dev->lltx = true; mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!mac->dma_pdev) { diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index 3f7519e435b813..01fe76786f771a 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -23,6 +23,7 @@ config IONIC depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select DIMLIB + select PAGE_POOL help This enables the support for the Pensando family of Ethernet adapters. More specific information on this driver can be diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index 59e5a9f2110520..c98b4e75e28858 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -123,7 +123,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) struct ionic_cq *cq = &qcq->cq; qcq_dentry = debugfs_create_dir(q->name, lif->dentry); - if (IS_ERR_OR_NULL(qcq_dentry)) + if (IS_ERR(qcq_dentry)) return; qcq->dentry = qcq_dentry; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index f2f07bf8854560..c8c710cfe70cc4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -181,10 +181,7 @@ struct ionic_queue; struct ionic_qcq; #define IONIC_MAX_BUF_LEN ((u16)-1) -#define IONIC_PAGE_SIZE PAGE_SIZE -#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) -#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ - __GFP_COMP | __GFP_MEMALLOC) +#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN) #define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \ (VLAN_ETH_HLEN + \ @@ -238,9 +235,8 @@ struct ionic_queue { unsigned int index; unsigned int num_descs; unsigned int max_sg_elems; + u64 features; - unsigned int type; - unsigned int hw_index; unsigned int hw_type; bool xdp_flush; union { @@ -249,11 +245,6 @@ struct ionic_queue { struct ionic_rxq_desc *rxq; struct ionic_admin_cmd *adminq; }; - union { - void __iomem *cmb_base; - struct ionic_txq_desc __iomem *cmb_txq; - struct ionic_rxq_desc __iomem *cmb_rxq; - }; union { void *sg_base; struct ionic_txq_sg_desc *txq_sgl; @@ -261,7 +252,17 @@ struct ionic_queue { struct ionic_rxq_sg_desc *rxq_sgl; }; struct xdp_rxq_info *xdp_rxq_info; + struct bpf_prog *xdp_prog; + struct page_pool *page_pool; struct ionic_queue *partner; + + union { + void __iomem *cmb_base; + struct ionic_txq_desc __iomem *cmb_txq; + struct ionic_rxq_desc __iomem *cmb_rxq; + }; + unsigned int type; + unsigned int hw_index; dma_addr_t base_pa; dma_addr_t cmb_base_pa; dma_addr_t sg_base_pa; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 4619fd74f3e3d2..dda22fa4448cff 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -989,8 +989,6 @@ static int ionic_get_ts_info(struct net_device *netdev, info->phc_index = ptp_clock_index(lif->phc->ptp); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 86774d9922d84d..40496587b2b318 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "ionic.h" #include "ionic_bus.h" @@ -46,8 +47,9 @@ static int ionic_start_queues(struct ionic_lif *lif); static void ionic_stop_queues(struct ionic_lif *lif); static void ionic_lif_queue_identify(struct ionic_lif *lif); -static int ionic_xdp_queues_config(struct ionic_lif *lif); -static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); +static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif); +static void ionic_unregister_rxq_info(struct ionic_queue *q); +static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id); static void ionic_dim_work(struct work_struct *work) { @@ -380,6 +382,7 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) if (!(qcq->flags & IONIC_QCQ_F_INITED)) return; + ionic_unregister_rxq_info(&qcq->q); if (qcq->flags & IONIC_QCQ_F_INTR) { ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_SET); @@ -437,9 +440,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) qcq->sg_base_pa = 0; } - ionic_xdp_unregister_rxq_info(&qcq->q); - ionic_qcq_intr_free(lif, qcq); + page_pool_destroy(qcq->q.page_pool); + qcq->q.page_pool = NULL; + ionic_qcq_intr_free(lif, qcq); vfree(qcq->q.info); qcq->q.info = NULL; } @@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, unsigned int cq_desc_size, unsigned int sg_desc_size, unsigned int desc_info_size, - unsigned int pid, struct ionic_qcq **qcq) + unsigned int pid, struct bpf_prog *xdp_prog, + struct ionic_qcq **qcq) { struct ionic_dev *idev = &lif->ionic->idev; struct device *dev = lif->ionic->dev; @@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, goto err_out_free_qcq; } + if (type == IONIC_QTYPE_RXQ) { + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = num_descs, + .nid = NUMA_NO_NODE, + .dev = lif->ionic->dev, + .napi = &new->napi, + .dma_dir = DMA_FROM_DEVICE, + .max_len = PAGE_SIZE, + .netdev = lif->netdev, + }; + + if (xdp_prog) + pp_params.dma_dir = DMA_BIDIRECTIONAL; + + new->q.page_pool = page_pool_create(&pp_params); + if (IS_ERR(new->q.page_pool)) { + netdev_err(lif->netdev, "Cannot create page_pool\n"); + err = PTR_ERR(new->q.page_pool); + new->q.page_pool = NULL; + goto err_out_free_q_info; + } + } + new->q.type = type; new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; @@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, desc_size, sg_desc_size, pid); if (err) { netdev_err(lif->netdev, "Cannot initialize queue\n"); - goto err_out_free_q_info; + goto err_out_free_page_pool; } err = ionic_alloc_qcq_interrupt(lif, new); if (err) - goto err_out_free_q_info; + goto err_out_free_page_pool; err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); if (err) { @@ -712,6 +742,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, devm_free_irq(dev, new->intr.vector, &new->napi); ionic_intr_free(lif->ionic, new->intr.index); } +err_out_free_page_pool: + page_pool_destroy(new->q.page_pool); err_out_free_q_info: vfree(new->q.info); err_out_free_qcq: @@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) sizeof(struct ionic_admin_comp), 0, sizeof(struct ionic_admin_desc_info), - lif->kern_pid, &lif->adminqcq); + lif->kern_pid, NULL, &lif->adminqcq); if (err) return err; ionic_debugfs_add_qcq(lif, lif->adminqcq); @@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) sizeof(union ionic_notifyq_comp), 0, sizeof(struct ionic_admin_desc_info), - lif->kern_pid, &lif->notifyqcq); + lif->kern_pid, NULL, &lif->notifyqcq); if (err) goto err_out; ionic_debugfs_add_qcq(lif, lif->notifyqcq); @@ -925,6 +957,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi); else netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); + err = ionic_register_rxq_info(q, qcq->napi.napi_id); + if (err) { + netif_napi_del(&qcq->napi); + return err; + } qcq->flags |= IONIC_QCQ_F_INITED; @@ -960,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &txq); + lif->kern_pid, NULL, &txq); if (err) goto err_qcq_alloc; @@ -1020,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &rxq); + lif->kern_pid, NULL, &rxq); if (err) goto err_qcq_alloc; @@ -1037,7 +1074,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) goto err_qcq_init; if (test_bit(IONIC_LIF_F_UP, lif->state)) { - ionic_rx_fill(&rxq->q); + ionic_rx_fill(&rxq->q, NULL); err = ionic_qcq_enable(rxq); if (err) goto err_qcq_enable; @@ -2046,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &lif->txqcqs[i]); + lif->kern_pid, NULL, &lif->txqcqs[i]); if (err) goto err_out; @@ -2078,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &lif->rxqcqs[i]); + lif->kern_pid, lif->xdp_prog, + &lif->rxqcqs[i]); if (err) goto err_out; @@ -2143,9 +2181,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) int derr = 0; int i, err; - err = ionic_xdp_queues_config(lif); - if (err) - return err; + ionic_xdp_rxqs_prog_update(lif); for (i = 0; i < lif->nxqs; i++) { if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { @@ -2154,7 +2190,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif) goto err_out; } - ionic_rx_fill(&lif->rxqcqs[i]->q); + ionic_rx_fill(&lif->rxqcqs[i]->q, + READ_ONCE(lif->rxqcqs[i]->q.xdp_prog)); err = ionic_qcq_enable(lif->rxqcqs[i]); if (err) goto err_out; @@ -2167,7 +2204,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) } if (lif->hwstamp_rxq) { - ionic_rx_fill(&lif->hwstamp_rxq->q); + ionic_rx_fill(&lif->hwstamp_rxq->q, NULL); err = ionic_qcq_enable(lif->hwstamp_rxq); if (err) goto err_out_hwstamp_rx; @@ -2192,7 +2229,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif) derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); } - ionic_xdp_queues_config(lif); + ionic_xdp_rxqs_prog_update(lif); return err; } @@ -2651,7 +2688,7 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif) ionic_vf_start(ionic); } -static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) +static void ionic_unregister_rxq_info(struct ionic_queue *q) { struct xdp_rxq_info *xi; @@ -2665,7 +2702,7 @@ static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) kfree(xi); } -static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) +static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) { struct xdp_rxq_info *rxq_info; int err; @@ -2676,15 +2713,15 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); if (err) { - dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", - q->index, err); + netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n", + q->index, err); goto err_out; } - err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); + err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool); if (err) { - dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", - q->index, err); + netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n", + q->index, err); xdp_rxq_info_unreg(rxq_info); goto err_out; } @@ -2698,44 +2735,20 @@ static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_ return err; } -static int ionic_xdp_queues_config(struct ionic_lif *lif) +static void ionic_xdp_rxqs_prog_update(struct ionic_lif *lif) { + struct bpf_prog *xdp_prog; unsigned int i; - int err; if (!lif->rxqcqs) - return 0; - - /* There's no need to rework memory if not going to/from NULL program. - * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info - * This way we don't need to keep an *xdp_prog in every queue struct. - */ - if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) - return 0; + return; + xdp_prog = READ_ONCE(lif->xdp_prog); for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { struct ionic_queue *q = &lif->rxqcqs[i]->q; - if (q->xdp_rxq_info) { - ionic_xdp_unregister_rxq_info(q); - continue; - } - - err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); - if (err) { - dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", - i, err); - goto err_out; - } + WRITE_ONCE(q->xdp_prog, xdp_prog); } - - return 0; - -err_out: - for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) - ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); - - return err; } static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) @@ -2765,11 +2778,17 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) if (!netif_running(netdev)) { old_prog = xchg(&lif->xdp_prog, bpf->prog); + } else if (lif->xdp_prog && bpf->prog) { + old_prog = xchg(&lif->xdp_prog, bpf->prog); + ionic_xdp_rxqs_prog_update(lif); } else { + struct ionic_queue_params qparams; + + ionic_init_queue_params(lif, &qparams); + qparams.xdp_prog = bpf->prog; mutex_lock(&lif->queue_lock); - ionic_stop_queues_reconfig(lif); + ionic_reconfigure_queues(lif, &qparams); old_prog = xchg(&lif->xdp_prog, bpf->prog); - ionic_start_queues_reconfig(lif); mutex_unlock(&lif->queue_lock); } @@ -2871,13 +2890,23 @@ static int ionic_cmb_reconfig(struct ionic_lif *lif, static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) { - /* only swapping the queues, not the napi, flags, or other stuff */ + /* only swapping the queues and napi, not flags or other stuff */ + swap(a->napi, b->napi); + + if (a->q.type == IONIC_QTYPE_RXQ) { + swap(a->q.page_pool, b->q.page_pool); + a->q.page_pool->p.napi = &a->napi; + if (b->q.page_pool) /* is NULL when increasing queue count */ + b->q.page_pool->p.napi = &b->napi; + } + swap(a->q.features, b->q.features); swap(a->q.num_descs, b->q.num_descs); swap(a->q.desc_size, b->q.desc_size); swap(a->q.base, b->q.base); swap(a->q.base_pa, b->q.base_pa); swap(a->q.info, b->q.info); + swap(a->q.xdp_prog, b->q.xdp_prog); swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); swap(a->q.partner, b->q.partner); swap(a->q_base, b->q_base); @@ -2928,7 +2957,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, } if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs || - qparam->rxq_features != lif->rxq_features) { + qparam->rxq_features != lif->rxq_features || + qparam->xdp_prog != lif->xdp_prog) { rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); if (!rx_qcqs) { @@ -2959,7 +2989,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 4, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &lif->txqcqs[i]); + lif->kern_pid, NULL, &lif->txqcqs[i]); if (err) goto err_out; } @@ -2968,7 +2998,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_tx_desc_info), - lif->kern_pid, &tx_qcqs[i]); + lif->kern_pid, NULL, &tx_qcqs[i]); if (err) goto err_out; } @@ -2990,7 +3020,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 4, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &lif->rxqcqs[i]); + lif->kern_pid, NULL, &lif->rxqcqs[i]); if (err) goto err_out; } @@ -2999,11 +3029,12 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, sizeof(struct ionic_rx_desc_info), - lif->kern_pid, &rx_qcqs[i]); + lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]); if (err) goto err_out; rx_qcqs[i]->q.features = qparam->rxq_features; + rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog; } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 3e1005293c4a1e..e01756fb7fdded 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -268,6 +268,7 @@ struct ionic_queue_params { unsigned int ntxq_descs; unsigned int nrxq_descs; u64 rxq_features; + struct bpf_prog *xdp_prog; bool intr_split; bool cmb_tx; bool cmb_rx; @@ -280,6 +281,7 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif, qparam->ntxq_descs = lif->ntxq_descs; qparam->nrxq_descs = lif->nrxq_descs; qparam->rxq_features = lif->rxq_features; + qparam->xdp_prog = lif->xdp_prog; qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state); qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 1ee2f285cb42cd..52811487767740 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -312,8 +312,8 @@ static int ionic_lif_filter_add(struct ionic_lif *lif, int err = 0; ctx.cmd.rx_filter_add = *ac; - ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD, - ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index), + ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD; + ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index); spin_lock_bh(&lif->rx_filters.lock); f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index fc79baad456149..0eeda7e502db26 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "ionic.h" #include "ionic_lif.h" @@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info) static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info) { - return buf_info->dma_addr + buf_info->page_offset; + return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset; } -static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) +static void __ionic_rx_put_buf(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + bool recycle_direct) { - return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset); -} - -static int ionic_rx_page_alloc(struct ionic_queue *q, - struct ionic_buf_info *buf_info) -{ - struct device *dev = q->dev; - dma_addr_t dma_addr; - struct page *page; - - page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); - if (unlikely(!page)) { - net_err_ratelimited("%s: %s page alloc failed\n", - dev_name(dev), q->name); - q_to_rx_stats(q)->alloc_err++; - return -ENOMEM; - } - - dma_addr = dma_map_page(dev, page, 0, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, dma_addr))) { - __free_pages(page, 0); - net_err_ratelimited("%s: %s dma map failed\n", - dev_name(dev), q->name); - q_to_rx_stats(q)->dma_map_err++; - return -EIO; - } - - buf_info->dma_addr = dma_addr; - buf_info->page = page; - buf_info->page_offset = 0; - - return 0; -} - -static void ionic_rx_page_free(struct ionic_queue *q, - struct ionic_buf_info *buf_info) -{ - struct device *dev = q->dev; - - if (unlikely(!buf_info)) { - net_err_ratelimited("%s: %s invalid buf_info in free\n", - dev_name(dev), q->name); - return; - } - if (!buf_info->page) return; - dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(buf_info->page, 0); + page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct); buf_info->page = NULL; + buf_info->len = 0; + buf_info->page_offset = 0; } -static bool ionic_rx_buf_recycle(struct ionic_queue *q, - struct ionic_buf_info *buf_info, u32 len) -{ - u32 size; - - /* don't re-use pages allocated in low-mem condition */ - if (page_is_pfmemalloc(buf_info->page)) - return false; - - /* don't re-use buffers from non-local numa nodes */ - if (page_to_nid(buf_info->page) != numa_mem_id()) - return false; - - size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); - buf_info->page_offset += size; - if (buf_info->page_offset >= IONIC_PAGE_SIZE) - return false; - get_page(buf_info->page); +static void ionic_rx_put_buf(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + __ionic_rx_put_buf(q, buf_info, false); +} - return true; +static void ionic_rx_put_buf_direct(struct ionic_queue *q, + struct ionic_buf_info *buf_info) +{ + __ionic_rx_put_buf(q, buf_info, true); } static void ionic_rx_add_skb_frag(struct ionic_queue *q, struct sk_buff *skb, struct ionic_buf_info *buf_info, - u32 off, u32 len, + u32 headroom, u32 len, bool synced) { if (!synced) - dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info), - off, len, DMA_FROM_DEVICE); + page_pool_dma_sync_for_cpu(q->page_pool, + buf_info->page, + buf_info->page_offset + headroom, + len); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf_info->page, buf_info->page_offset + off, - len, - IONIC_PAGE_SIZE); + buf_info->page, buf_info->page_offset + headroom, + len, buf_info->len); - if (!ionic_rx_buf_recycle(q, buf_info, len)) { - dma_unmap_page(q->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - buf_info->page = NULL; - } + /* napi_gro_frags() will release/recycle the + * page_pool buffers from the frags list + */ + buf_info->page = NULL; + buf_info->len = 0; + buf_info->page_offset = 0; } static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, @@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, q_to_rx_stats(q)->alloc_err++; return NULL; } + skb_mark_for_recycle(skb); if (headroom) frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); else - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + frag_len = min_t(u16, len, IONIC_PAGE_SIZE); if (unlikely(!buf_info->page)) goto err_bad_buf_page; @@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, for (i = 0; i < num_sg_elems; i++, buf_info++) { if (unlikely(!buf_info->page)) goto err_bad_buf_page; - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + frag_len = min_t(u16, len, buf_info->len); ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced); len -= frag_len; } @@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, struct ionic_rx_desc_info *desc_info, unsigned int headroom, unsigned int len, + unsigned int num_sg_elems, bool synced) { struct ionic_buf_info *buf_info; struct device *dev = q->dev; struct sk_buff *skb; + int i; buf_info = &desc_info->bufs[0]; @@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, q_to_rx_stats(q)->alloc_err++; return NULL; } - - if (unlikely(!buf_info->page)) { - dev_kfree_skb(skb); - return NULL; - } + skb_mark_for_recycle(skb); if (!synced) - dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), - headroom, len, DMA_FROM_DEVICE); + page_pool_dma_sync_for_cpu(q->page_pool, + buf_info->page, + buf_info->page_offset + headroom, + len); + skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len); - dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info), - headroom, len, DMA_FROM_DEVICE); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); + /* recycle the Rx buffer now that we're done with it */ + ionic_rx_put_buf_direct(q, buf_info); + buf_info++; + for (i = 0; i < num_sg_elems; i++, buf_info++) + ionic_rx_put_buf_direct(q, buf_info); + return skb; } static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, - struct ionic_tx_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info, + bool in_napi) { - unsigned int nbufs = desc_info->nbufs; - struct ionic_buf_info *buf_info; - struct device *dev = q->dev; - int i; + struct xdp_frame_bulk bq; - if (!nbufs) + if (!desc_info->nbufs) return; - buf_info = desc_info->bufs; - dma_unmap_single(dev, buf_info->dma_addr, - buf_info->len, DMA_TO_DEVICE); - if (desc_info->act == XDP_TX) - __free_pages(buf_info->page, 0); - buf_info->page = NULL; + xdp_frame_bulk_init(&bq); + rcu_read_lock(); /* need for xdp_return_frame_bulk */ - buf_info++; - for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) { - dma_unmap_page(dev, buf_info->dma_addr, - buf_info->len, DMA_TO_DEVICE); - if (desc_info->act == XDP_TX) - __free_pages(buf_info->page, 0); - buf_info->page = NULL; + if (desc_info->act == XDP_TX) { + if (likely(in_napi)) + xdp_return_frame_rx_napi(desc_info->xdpf); + else + xdp_return_frame(desc_info->xdpf); + } else if (desc_info->act == XDP_REDIRECT) { + ionic_tx_desc_unmap_bufs(q, desc_info); + xdp_return_frame_bulk(desc_info->xdpf, &bq); } - if (desc_info->act == XDP_REDIRECT) - xdp_return_frame(desc_info->xdpf); + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); desc_info->nbufs = 0; desc_info->xdpf = NULL; @@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, buf_info = desc_info->bufs; stats = q_to_tx_stats(q); - dma_addr = ionic_tx_map_single(q, frame->data, len); - if (!dma_addr) - return -EIO; + if (act == XDP_TX) { + dma_addr = page_pool_get_dma_addr(page) + + off + XDP_PACKET_HEADROOM; + dma_sync_single_for_device(q->dev, dma_addr, + len, DMA_TO_DEVICE); + } else /* XDP_REDIRECT */ { + dma_addr = ionic_tx_map_single(q, frame->data, len); + if (!dma_addr) + return -EIO; + } + buf_info->dma_addr = dma_addr; buf_info->len = len; buf_info->page = page; @@ -387,10 +346,21 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, frag = sinfo->frags; elem = ionic_tx_sg_elems(q); for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) { - dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (!dma_addr) { - ionic_tx_desc_unmap_bufs(q, desc_info); - return -EIO; + if (act == XDP_TX) { + struct page *pg = skb_frag_page(frag); + + dma_addr = page_pool_get_dma_addr(pg) + + skb_frag_off(frag); + dma_sync_single_for_device(q->dev, dma_addr, + skb_frag_size(frag), + DMA_TO_DEVICE); + } else { + dma_addr = ionic_tx_map_frag(q, frag, 0, + skb_frag_size(frag)); + if (dma_mapping_error(q->dev, dma_addr)) { + ionic_tx_desc_unmap_bufs(q, desc_info); + return -EIO; + } } bi->dma_addr = dma_addr; bi->len = skb_frag_size(frag); @@ -481,15 +451,13 @@ int ionic_xdp_xmit(struct net_device *netdev, int n, return nxmit; } -static void ionic_xdp_rx_put_bufs(struct ionic_queue *q, - struct ionic_buf_info *buf_info, - int nbufs) +static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + int nbufs) { int i; for (i = 0; i < nbufs; i++) { - dma_unmap_page(q->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); buf_info->page = NULL; buf_info++; } @@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), XDP_PACKET_HEADROOM, frag_len, false); - - dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), - XDP_PACKET_HEADROOM, frag_len, - DMA_FROM_DEVICE); - + page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page, + buf_info->page_offset + XDP_PACKET_HEADROOM, + frag_len); prefetchw(&xdp_buf.data_hard_start); /* We limit MTU size to one buffer if !xdp_has_frags, so @@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, do { if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { err = -ENOSPC; - goto out_xdp_abort; + break; } frag = &sinfo->frags[sinfo->nr_frags]; sinfo->nr_frags++; bi++; - frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi)); - dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi), - 0, frag_len, DMA_FROM_DEVICE); + frag_len = min_t(u16, remain_len, bi->len); + page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page, + buf_info->page_offset, + frag_len); skb_frag_fill_page_desc(frag, bi->page, 0, frag_len); sinfo->xdp_frags_size += frag_len; remain_len -= frag_len; @@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, return false; /* false = we didn't consume the packet */ case XDP_DROP: - ionic_rx_page_free(rxq, buf_info); + ionic_rx_put_buf_direct(rxq, buf_info); stats->xdp_drop++; break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(&xdp_buf); - if (!xdpf) - goto out_xdp_abort; + if (!xdpf) { + err = -ENOSPC; + break; + } txq = rxq->partner; nq = netdev_get_tx_queue(netdev, txq->index); @@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ionic_q_space_avail(txq), 1, 1)) { __netif_tx_unlock(nq); - goto out_xdp_abort; + err = -EIO; + break; } err = ionic_xdp_post_frame(txq, xdpf, XDP_TX, @@ -598,49 +568,47 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, __netif_tx_unlock(nq); if (unlikely(err)) { netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); - goto out_xdp_abort; + break; } - ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); + ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs); stats->xdp_tx++; - - /* the Tx completion will free the buffers */ break; case XDP_REDIRECT: err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); if (unlikely(err)) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); - goto out_xdp_abort; + break; } - ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); + ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs); rxq->xdp_flush = true; stats->xdp_redirect++; break; case XDP_ABORTED: default: - goto out_xdp_abort; + err = -EIO; + break; } - return true; - -out_xdp_abort: - trace_xdp_exception(netdev, xdp_prog, xdp_action); - ionic_rx_page_free(rxq, buf_info); - stats->xdp_aborted++; + if (err) { + ionic_rx_put_buf_direct(rxq, buf_info); + trace_xdp_exception(netdev, xdp_prog, xdp_action); + stats->xdp_aborted++; + } return true; } static void ionic_rx_clean(struct ionic_queue *q, struct ionic_rx_desc_info *desc_info, - struct ionic_rxq_comp *comp) + struct ionic_rxq_comp *comp, + struct bpf_prog *xdp_prog) { struct net_device *netdev = q->lif->netdev; struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; - struct bpf_prog *xdp_prog; - unsigned int headroom; + unsigned int headroom = 0; struct sk_buff *skb; bool synced = false; bool use_copybreak; @@ -648,7 +616,14 @@ static void ionic_rx_clean(struct ionic_queue *q, stats = q_to_rx_stats(q); - if (comp->status) { + if (unlikely(comp->status)) { + /* Most likely status==2 and the pkt received was bigger + * than the buffer available: comp->len will show the + * pkt size received that didn't fit the advertised desc.len + */ + dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n", + q->index, comp->status, comp->len, q->rxq[q->head_idx].len); + stats->dropped++; return; } @@ -657,18 +632,18 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->pkts++; stats->bytes += len; - xdp_prog = READ_ONCE(q->lif->xdp_prog); if (xdp_prog) { if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) return; synced = true; + headroom = XDP_PACKET_HEADROOM; } - headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; use_copybreak = len <= q->lif->rx_copybreak; if (use_copybreak) skb = ionic_rx_copybreak(netdev, q, desc_info, - headroom, len, synced); + headroom, len, + comp->num_sg_elems, synced); else skb = ionic_rx_build_skb(q, desc_info, headroom, len, comp->num_sg_elems, synced); @@ -744,7 +719,7 @@ static void ionic_rx_clean(struct ionic_queue *q, napi_gro_frags(&qcq->napi); } -bool ionic_rx_service(struct ionic_cq *cq) +static bool __ionic_rx_service(struct ionic_cq *cq, struct bpf_prog *xdp_prog) { struct ionic_rx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; @@ -766,11 +741,16 @@ bool ionic_rx_service(struct ionic_cq *cq) q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); /* clean the related q entry, only one per qc completion */ - ionic_rx_clean(q, desc_info, comp); + ionic_rx_clean(q, desc_info, comp, xdp_prog); return true; } +bool ionic_rx_service(struct ionic_cq *cq) +{ + return __ionic_rx_service(cq, NULL); +} + static inline void ionic_write_cmb_desc(struct ionic_queue *q, void *desc) { @@ -781,7 +761,7 @@ static inline void ionic_write_cmb_desc(struct ionic_queue *q, memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0])); } -void ionic_rx_fill(struct ionic_queue *q) +void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog) { struct net_device *netdev = q->lif->netdev; struct ionic_rx_desc_info *desc_info; @@ -789,6 +769,9 @@ void ionic_rx_fill(struct ionic_queue *q) struct ionic_buf_info *buf_info; unsigned int fill_threshold; struct ionic_rxq_desc *desc; + unsigned int first_frag_len; + unsigned int first_buf_len; + unsigned int headroom = 0; unsigned int remain_len; unsigned int frag_len; unsigned int nfrags; @@ -806,35 +789,43 @@ void ionic_rx_fill(struct ionic_queue *q) len = netdev->mtu + VLAN_ETH_HLEN; - for (i = n_fill; i; i--) { - unsigned int headroom; - unsigned int buf_len; + if (xdp_prog) { + /* Always alloc the full size buffer, but only need + * the actual frag_len in the descriptor + * XDP uses space in the first buffer, so account for + * head room, tail room, and ip header in the first frag size. + */ + headroom = XDP_PACKET_HEADROOM; + first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom; + first_frag_len = min_t(u16, len + headroom, first_buf_len); + } else { + /* Use MTU size if smaller than max buffer size */ + first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE); + first_buf_len = first_frag_len; + } + for (i = n_fill; i; i--) { + /* fill main descriptor - buf[0] */ nfrags = 0; remain_len = len; desc = &q->rxq[q->head_idx]; desc_info = &q->rx_info[q->head_idx]; buf_info = &desc_info->bufs[0]; - if (!buf_info->page) { /* alloc a new buffer? */ - if (unlikely(ionic_rx_page_alloc(q, buf_info))) { - desc->addr = 0; - desc->len = 0; - return; - } + buf_info->len = first_buf_len; + frag_len = first_frag_len - headroom; + + /* get a new buffer if we can't reuse one */ + if (!buf_info->page) + buf_info->page = page_pool_alloc(q->page_pool, + &buf_info->page_offset, + &buf_info->len, + GFP_ATOMIC); + if (unlikely(!buf_info->page)) { + buf_info->len = 0; + return; } - /* fill main descriptor - buf[0] - * XDP uses space in the first buffer, so account for - * head room, tail room, and ip header in the first frag size. - */ - headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; - if (q->xdp_rxq_info) - buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN; - else - buf_len = ionic_rx_buf_size(buf_info); - frag_len = min_t(u16, len, buf_len); - desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; @@ -844,16 +835,26 @@ void ionic_rx_fill(struct ionic_queue *q) /* fill sg descriptors - buf[1..n] */ sg_elem = q->rxq_sgl[q->head_idx].elems; for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) { - if (!buf_info->page) { /* alloc a new sg buffer? */ - if (unlikely(ionic_rx_page_alloc(q, buf_info))) { - sg_elem->addr = 0; - sg_elem->len = 0; + frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE); + + /* Recycle any leftover buffers that are too small to reuse */ + if (unlikely(buf_info->page && buf_info->len < frag_len)) + ionic_rx_put_buf_direct(q, buf_info); + + /* Get new buffer if needed */ + if (!buf_info->page) { + buf_info->len = frag_len; + buf_info->page = page_pool_alloc(q->page_pool, + &buf_info->page_offset, + &buf_info->len, + GFP_ATOMIC); + if (unlikely(!buf_info->page)) { + buf_info->len = 0; return; } } sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); - frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info)); sg_elem->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -883,17 +884,12 @@ void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q) { struct ionic_rx_desc_info *desc_info; - struct ionic_buf_info *buf_info; unsigned int i, j; for (i = 0; i < q->num_descs; i++) { desc_info = &q->rx_info[i]; - for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) { - buf_info = &desc_info->bufs[j]; - if (buf_info->page) - ionic_rx_page_free(q, buf_info); - } - + for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) + ionic_rx_put_buf(q, &desc_info->bufs[j]); desc_info->nbufs = 0; } @@ -974,6 +970,32 @@ static void ionic_xdp_do_flush(struct ionic_cq *cq) } } +static unsigned int ionic_rx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do) +{ + struct ionic_queue *q = cq->bound_q; + unsigned int work_done = 0; + struct bpf_prog *xdp_prog; + + if (work_to_do == 0) + return 0; + + xdp_prog = READ_ONCE(q->xdp_prog); + while (__ionic_rx_service(cq, xdp_prog)) { + if (cq->tail_idx == cq->num_descs - 1) + cq->done_color = !cq->done_color; + + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + if (++work_done >= work_to_do) + break; + } + ionic_rx_fill(q, xdp_prog); + ionic_xdp_do_flush(cq); + + return work_done; +} + int ionic_rx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); @@ -984,12 +1006,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) if (unlikely(!budget)) return budget; - work_done = ionic_cq_service(cq, budget, - ionic_rx_service, NULL, NULL); - - ionic_rx_fill(cq->bound_q); + work_done = ionic_rx_cq_service(cq, budget); - ionic_xdp_do_flush(cq); if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -1030,12 +1048,8 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) if (unlikely(!budget)) return budget; - rx_work_done = ionic_cq_service(rxcq, budget, - ionic_rx_service, NULL, NULL); - - ionic_rx_fill(rxcq->bound_q); + rx_work_done = ionic_rx_cq_service(rxcq, budget); - ionic_xdp_do_flush(rxcq); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { ionic_dim_update(rxqcq, 0); flags |= IONIC_INTR_CRED_UNMASK; @@ -1166,7 +1180,7 @@ static void ionic_tx_clean(struct ionic_queue *q, struct sk_buff *skb; if (desc_info->xdpf) { - ionic_xdp_tx_desc_clean(q->partner, desc_info); + ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi); stats->clean++; if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index))) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 9e73e324e7a119..b2b9a2dc9eb848 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -4,9 +4,11 @@ #ifndef _IONIC_TXRX_H_ #define _IONIC_TXRX_H_ +struct bpf_prog; + void ionic_tx_flush(struct ionic_cq *cq); -void ionic_rx_fill(struct ionic_queue *q); +void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog); void ionic_rx_empty(struct ionic_queue *q); void ionic_tx_empty(struct ionic_queue *q); int ionic_rx_napi(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index ed24d6af7487b0..9cff0a8ffb2c55 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3185,8 +3185,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter, struct list_head *head; bool ret = false; - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - + dev = ifa->ifa_dev->dev; if (dev == NULL) goto out; @@ -3379,7 +3378,7 @@ netxen_inetaddr_event(struct notifier_block *this, struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; unsigned long ip_event; - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + dev = ifa->ifa_dev->dev; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; recheck: if (dev == NULL) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 63e3dac4d5f75b..9d6399a5c780d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -326,25 +326,18 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *i struct qede_ptp *ptp = edev->ptp; if (!ptp) { - info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (ptp->clock) info->phc_index = ptp_clock_index(ptp->clock); - else - info->phc_index = -1; info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index b25102fded7b20..3d0b5cd978cbb0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1608,7 +1608,6 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *, struct qlcnic_host_tx_ring *); int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); -void qlcnic_watchdog_task(struct work_struct *work); void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); void qlcnic_set_multi(struct net_device *netdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bcef8ab715bfa1..d7cdea8f604d08 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) { - int err; - u32 word; struct qlcnic_cmd_args cmd; - const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL }; + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + u32 word; + int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); if (err) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 23cd47d588e53f..a55fe6ac06c733 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -539,7 +539,6 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *); void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); -int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32); void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *); void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); @@ -577,8 +576,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8); int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, struct qlcnic_adapter *, u32); void qlcnic_free_mbx_args(struct qlcnic_cmd_args *); -void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *, - struct qlcnic_info *); int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *, struct ethtool_coalesce *); int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *); @@ -590,7 +587,6 @@ irqreturn_t qlcnic_83xx_intr(int, void *); irqreturn_t qlcnic_83xx_tmp_intr(int, void *); void qlcnic_83xx_check_vf(struct qlcnic_adapter *, const struct pci_device_id *); -int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *); int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *); void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *); void qlcnic_83xx_register_map(struct qlcnic_hardware_context *); @@ -602,8 +598,6 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int); int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *); int qlcnic_83xx_lock_flash(struct qlcnic_adapter *); void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *); -int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *); -int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int); int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *); int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *); int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); @@ -616,13 +610,9 @@ void qlcnic_83xx_idc_exit(struct qlcnic_adapter *); void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32); int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); -int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); -int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, - struct qlcnic_info *, u8); -int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 90df4a0909fa2c..b3588a1ebc25f4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -4146,7 +4146,7 @@ qlcnic_inetaddr_event(struct notifier_block *this, struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + dev = ifa->ifa_dev->dev; recheck: if (dev == NULL) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index f1e40aade127bf..4f0ddcedfa9790 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -286,7 +286,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; - rmnet_dev->features |= NETIF_F_LLTX; + rmnet_dev->lltx = true; /* This perm addr will be used as interface identifier by IPv6 */ rmnet_dev->addr_assign_type = NET_ADDR_RANDOM; diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 03015b665f4ea5..8a8ea51c639e9c 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -120,4 +120,23 @@ config R8169_LEDS Optional support for controlling the NIC LED's with the netdev LED trigger. +config RTASE + tristate "Realtek Automotive Switch 9054/9068/9072/9075/9068/9071 PCIe Interface support" + depends on PCI + select CRC32 + select PAGE_POOL + help + Say Y here and it will be compiled and linked with the kernel + if you have a Realtek Ethernet adapter belonging to the + following families: + RTL9054 5GBit Ethernet + RTL9068 5GBit Ethernet + RTL9072 5GBit Ethernet + RTL9075 5GBit Ethernet + RTL9068 5GBit Ethernet + RTL9071 5GBit Ethernet + + To compile this driver as a module, choose M here: the module + will be called rtase. This is recommended. + endif # NET_VENDOR_REALTEK diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile index 635491d8826e56..046adf503ff492 100644 --- a/drivers/net/ethernet/realtek/Makefile +++ b/drivers/net/ethernet/realtek/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_ATP) += atp.o r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o obj-$(CONFIG_R8169) += r8169.o +obj-$(CONFIG_RTASE) += rtase/ diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 00882ffc7a029e..e2db944e6fa8bd 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -69,6 +69,7 @@ enum mac_version { RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_VER_63, RTL_GIGA_MAC_VER_65, + RTL_GIGA_MAC_VER_66, RTL_GIGA_MAC_NONE }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3507c2e28110d2..0cc9baaecb1bcb 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include @@ -56,6 +56,7 @@ #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" +#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -138,6 +139,7 @@ static const struct { /* reserve 62 for CFG_METHOD_4 in the vendor driver */ [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2}, + [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -576,7 +578,34 @@ struct rtl8169_counters { __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; - __le16 tx_underun; + __le16 tx_underrun; + /* new since RTL8125 */ + __le64 tx_octets; + __le64 rx_octets; + __le64 rx_multicast64; + __le64 tx_unicast64; + __le64 tx_broadcast64; + __le64 tx_multicast64; + __le32 tx_pause_on; + __le32 tx_pause_off; + __le32 tx_pause_all; + __le32 tx_deferred; + __le32 tx_late_collision; + __le32 tx_all_collision; + __le32 tx_aborted32; + __le32 align_errors32; + __le32 rx_frame_too_long; + __le32 rx_runt; + __le32 rx_pause_on; + __le32 rx_pause_off; + __le32 rx_pause_all; + __le32 rx_unknown_opcode; + __le32 rx_mac_error; + __le32 tx_underrun32; + __le32 rx_mac_missed; + __le32 rx_tcam_dropped; + __le32 tdu; + __le32 rdu; }; struct rtl8169_tc_offsets { @@ -679,6 +708,7 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); MODULE_FIRMWARE(FIRMWARE_8126A_2); +MODULE_FIRMWARE(FIRMWARE_8126A_3); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -1201,7 +1231,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: r8168g_mdio_write(tp, location, val); break; default: @@ -1216,7 +1246,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1425,7 +1455,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66: if (enable) RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); else @@ -1592,7 +1622,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66: if (wolopts) rtl_mod_config2(tp, 0, PME_SIGNAL); else @@ -1841,7 +1871,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, data[9] = le64_to_cpu(counters->rx_broadcast); data[10] = le32_to_cpu(counters->rx_multicast); data[11] = le16_to_cpu(counters->tx_aborted); - data[12] = le16_to_cpu(counters->tx_underun); + data[12] = le16_to_cpu(counters->tx_underrun); } static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2071,6 +2101,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61: case RTL_GIGA_MAC_VER_63: case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: tp->tx_lpi_timer = timer_val; RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val); break; @@ -2199,6 +2230,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) enum mac_version ver; } mac_info[] = { /* 8126A family. */ + { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 }, { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 }, /* 8125B family. */ @@ -2470,6 +2502,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) break; case RTL_GIGA_MAC_VER_63: case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | RX_PAUSE_SLOT_ON); break; @@ -2656,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; - case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); @@ -2899,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: rtl_eri_set_bits(tp, 0xd4, 0x0c00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); break; default: @@ -2913,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: rtl_eri_clear_bits(tp, 0xd4, 0x1f00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); break; default: @@ -2940,6 +2973,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) rtl_mod_config5(tp, 0, ASPM_en); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -2950,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: /* reset ephy tx/rx disable timer */ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); /* chip can trigger L1.2 */ @@ -2962,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) } else { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); break; default: @@ -2971,6 +3005,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -3690,10 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) /* disable new tx descriptor format */ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); - if (tp->mac_version == RTL_GIGA_MAC_VER_65) + if (tp->mac_version == RTL_GIGA_MAC_VER_65 || + tp->mac_version == RTL_GIGA_MAC_VER_66) RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02); - if (tp->mac_version == RTL_GIGA_MAC_VER_65) + if (tp->mac_version == RTL_GIGA_MAC_VER_65 || + tp->mac_version == RTL_GIGA_MAC_VER_66) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); else if (tp->mac_version == RTL_GIGA_MAC_VER_63) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); @@ -3711,7 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); - if (tp->mac_version == RTL_GIGA_MAC_VER_65) + if (tp->mac_version == RTL_GIGA_MAC_VER_65 || + tp->mac_version == RTL_GIGA_MAC_VER_66) r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000); else r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3825,6 +3863,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a, + [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a, }; if (hw_configs[tp->mac_version]) @@ -3845,6 +3884,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) break; case RTL_GIGA_MAC_VER_63: case RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_66: for (i = 0xa00; i < 0xa80; i += 4) RTL_W32(tp, i, 0); RTL_W16(tp, INT_CFG1_8125, 0x0000); @@ -4073,7 +4113,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -4224,7 +4264,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: padto = max_t(unsigned int, padto, ETH_ZLEN); break; default: @@ -5257,7 +5297,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66: rtl_hw_init_8125(tp); break; default: diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 1f74317beb8878..cf29b120848269 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); rtl8168g_enable_gphy_10m(phydev); + rtl8168g_disable_aldps(phydev); rtl8125a_config_eee_phy(phydev); } @@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); rtl8125b_config_eee_phy(phydev); } @@ -1159,6 +1161,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config, + [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config, }; if (phy_configs[ver]) diff --git a/drivers/net/ethernet/realtek/rtase/Makefile b/drivers/net/ethernet/realtek/rtase/Makefile new file mode 100644 index 00000000000000..ba3d8550f9e68a --- /dev/null +++ b/drivers/net/ethernet/realtek/rtase/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# Copyright(c) 2024 Realtek Semiconductor Corp. All rights reserved. + +# +# Makefile for the Realtek PCIe driver +# + +obj-$(CONFIG_RTASE) += rtase.o + +rtase-objs := rtase_main.o diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h new file mode 100644 index 00000000000000..583c33930f886f --- /dev/null +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * rtase is the Linux device driver released for Realtek Automotive Switch + * controllers with PCI-Express interface. + * + * Copyright(c) 2024 Realtek Semiconductor Corp. + */ + +#ifndef RTASE_H +#define RTASE_H + +#define RTASE_HW_VER_MASK 0x7C800000 + +#define RTASE_RX_DMA_BURST_256 4 +#define RTASE_TX_DMA_BURST_UNLIMITED 7 + +#define RTASE_RX_BUF_SIZE (PAGE_SIZE - \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define RTASE_MAX_JUMBO_SIZE (RTASE_RX_BUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* 3 means InterFrameGap = the shortest one */ +#define RTASE_INTERFRAMEGAP 0x03 + +#define RTASE_REGS_SIZE 256 +#define RTASE_PCI_REGS_SIZE 0x100 + +#define RTASE_MULTICAST_FILTER_MASK GENMASK(30, 26) + +#define RTASE_VLAN_FILTER_ENTRY_NUM 32 +#define RTASE_NUM_TX_QUEUE 8 +#define RTASE_NUM_RX_QUEUE 4 + +#define RTASE_TXQ_CTRL 1 +#define RTASE_FUNC_TXQ_NUM 1 +#define RTASE_FUNC_RXQ_NUM 1 +#define RTASE_INTERRUPT_NUM 1 + +#define RTASE_MITI_TIME_COUNT_MASK GENMASK(3, 0) +#define RTASE_MITI_TIME_UNIT_MASK GENMASK(7, 4) +#define RTASE_MITI_DEFAULT_TIME 128 +#define RTASE_MITI_MAX_TIME 491520 +#define RTASE_MITI_PKT_NUM_COUNT_MASK GENMASK(11, 8) +#define RTASE_MITI_PKT_NUM_UNIT_MASK GENMASK(13, 12) +#define RTASE_MITI_DEFAULT_PKT_NUM 64 +#define RTASE_MITI_MAX_PKT_NUM_IDX 3 +#define RTASE_MITI_MAX_PKT_NUM_UNIT 16 +#define RTASE_MITI_MAX_PKT_NUM 240 +#define RTASE_MITI_COUNT_BIT_NUM 4 + +#define RTASE_NUM_MSIX 4 + +#define RTASE_DWORD_MOD 16 + +/*****************************************************************************/ +enum rtase_registers { + RTASE_MAC0 = 0x0000, + RTASE_MAC4 = 0x0004, + RTASE_MAR0 = 0x0008, + RTASE_MAR1 = 0x000C, + RTASE_DTCCR0 = 0x0010, + RTASE_DTCCR4 = 0x0014, +#define RTASE_COUNTER_RESET BIT(0) +#define RTASE_COUNTER_DUMP BIT(3) + + RTASE_FCR = 0x0018, +#define RTASE_FCR_RXQ_MASK GENMASK(5, 4) + + RTASE_LBK_CTRL = 0x001A, +#define RTASE_LBK_ATLD BIT(1) +#define RTASE_LBK_CLR BIT(0) + + RTASE_TX_DESC_ADDR0 = 0x0020, + RTASE_TX_DESC_ADDR4 = 0x0024, + RTASE_TX_DESC_COMMAND = 0x0028, +#define RTASE_TX_DESC_CMD_CS BIT(15) +#define RTASE_TX_DESC_CMD_WE BIT(14) + + RTASE_BOOT_CTL = 0x6004, + RTASE_CLKSW_SET = 0x6018, + + RTASE_CHIP_CMD = 0x0037, +#define RTASE_STOP_REQ BIT(7) +#define RTASE_STOP_REQ_DONE BIT(6) +#define RTASE_RE BIT(3) +#define RTASE_TE BIT(2) + + RTASE_IMR0 = 0x0038, + RTASE_ISR0 = 0x003C, +#define RTASE_TOK7 BIT(30) +#define RTASE_TOK6 BIT(28) +#define RTASE_TOK5 BIT(26) +#define RTASE_TOK4 BIT(24) +#define RTASE_FOVW BIT(6) +#define RTASE_RDU BIT(4) +#define RTASE_TOK BIT(2) +#define RTASE_ROK BIT(0) + + RTASE_IMR1 = 0x0800, + RTASE_ISR1 = 0x0802, +#define RTASE_Q_TOK BIT(4) +#define RTASE_Q_RDU BIT(1) +#define RTASE_Q_ROK BIT(0) + + RTASE_EPHY_ISR = 0x6014, + RTASE_EPHY_IMR = 0x6016, + + RTASE_TX_CONFIG_0 = 0x0040, +#define RTASE_TX_INTER_FRAME_GAP_MASK GENMASK(25, 24) + /* DMA burst value (0-7) is shift this many bits */ +#define RTASE_TX_DMA_MASK GENMASK(10, 8) + + RTASE_RX_CONFIG_0 = 0x0044, +#define RTASE_RX_SINGLE_FETCH BIT(14) +#define RTASE_RX_SINGLE_TAG BIT(13) +#define RTASE_RX_MX_DMA_MASK GENMASK(10, 8) +#define RTASE_ACPT_FLOW BIT(7) +#define RTASE_ACCEPT_ERR BIT(5) +#define RTASE_ACCEPT_RUNT BIT(4) +#define RTASE_ACCEPT_BROADCAST BIT(3) +#define RTASE_ACCEPT_MULTICAST BIT(2) +#define RTASE_ACCEPT_MYPHYS BIT(1) +#define RTASE_ACCEPT_ALLPHYS BIT(0) +#define RTASE_ACCEPT_MASK (RTASE_ACPT_FLOW | RTASE_ACCEPT_ERR | \ + RTASE_ACCEPT_RUNT | RTASE_ACCEPT_BROADCAST | \ + RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_MYPHYS | \ + RTASE_ACCEPT_ALLPHYS) + + RTASE_RX_CONFIG_1 = 0x0046, +#define RTASE_RX_MAX_FETCH_DESC_MASK GENMASK(15, 11) +#define RTASE_RX_NEW_DESC_FORMAT_EN BIT(8) +#define RTASE_OUTER_VLAN_DETAG_EN BIT(7) +#define RTASE_INNER_VLAN_DETAG_EN BIT(6) +#define RTASE_PCIE_NEW_FLOW BIT(2) +#define RTASE_PCIE_RELOAD_EN BIT(0) + + RTASE_EEM = 0x0050, +#define RTASE_EEM_UNLOCK 0xC0 + + RTASE_TDFNR = 0x0057, + RTASE_TPPOLL = 0x0090, + RTASE_PDR = 0x00B0, + RTASE_FIFOR = 0x00D3, +#define RTASE_TX_FIFO_EMPTY BIT(5) +#define RTASE_RX_FIFO_EMPTY BIT(4) + + RTASE_RMS = 0x00DA, + RTASE_CPLUS_CMD = 0x00E0, +#define RTASE_FORCE_RXFLOW_EN BIT(11) +#define RTASE_FORCE_TXFLOW_EN BIT(10) +#define RTASE_RX_CHKSUM BIT(5) + + RTASE_Q0_RX_DESC_ADDR0 = 0x00E4, + RTASE_Q0_RX_DESC_ADDR4 = 0x00E8, + RTASE_Q1_RX_DESC_ADDR0 = 0x4000, + RTASE_Q1_RX_DESC_ADDR4 = 0x4004, + RTASE_MTPS = 0x00EC, +#define RTASE_TAG_NUM_SEL_MASK GENMASK(10, 8) + + RTASE_MISC = 0x00F2, +#define RTASE_RX_DV_GATE_EN BIT(3) + + RTASE_TFUN_CTRL = 0x0400, +#define RTASE_TX_NEW_DESC_FORMAT_EN BIT(0) + + RTASE_TX_CONFIG_1 = 0x203E, +#define RTASE_TC_MODE_MASK GENMASK(11, 10) + + RTASE_TOKSEL = 0x2046, + RTASE_RFIFONFULL = 0x4406, + RTASE_INT_MITI_TX = 0x0A00, + RTASE_INT_MITI_RX = 0x0A80, + + RTASE_VLAN_ENTRY_0 = 0xAC80, +}; + +enum rtase_desc_status_bit { + RTASE_DESC_OWN = BIT(31), /* Descriptor is owned by NIC */ + RTASE_RING_END = BIT(30), /* End of descriptor ring */ +}; + +enum rtase_sw_flag_content { + RTASE_SWF_MSI_ENABLED = BIT(1), + RTASE_SWF_MSIX_ENABLED = BIT(2), +}; + +#define RSVD_MASK 0x3FFFC000 + +struct rtase_tx_desc { + __le32 opts1; + __le32 opts2; + __le64 addr; + __le32 opts3; + __le32 reserved1; + __le32 reserved2; + __le32 reserved3; +} __packed; + +/*------ offset 0 of tx descriptor ------*/ +#define RTASE_TX_FIRST_FRAG BIT(29) /* Tx First segment of a packet */ +#define RTASE_TX_LAST_FRAG BIT(28) /* Tx Final segment of a packet */ +#define RTASE_GIANT_SEND_V4 BIT(26) /* TCP Giant Send Offload V4 (GSOv4) */ +#define RTASE_GIANT_SEND_V6 BIT(25) /* TCP Giant Send Offload V6 (GSOv6) */ +#define RTASE_TX_VLAN_TAG BIT(17) /* Add VLAN tag */ + +/*------ offset 4 of tx descriptor ------*/ +#define RTASE_TX_UDPCS_C BIT(31) /* Calculate UDP/IP checksum */ +#define RTASE_TX_TCPCS_C BIT(30) /* Calculate TCP/IP checksum */ +#define RTASE_TX_IPCS_C BIT(29) /* Calculate IP checksum */ +#define RTASE_TX_IPV6F_C BIT(28) /* Indicate it is an IPv6 packet */ + +union rtase_rx_desc { + struct { + __le64 header_buf_addr; + __le32 reserved1; + __le32 opts_header_len; + __le64 addr; + __le32 reserved2; + __le32 opts1; + } __packed desc_cmd; + + struct { + __le32 reserved1; + __le32 reserved2; + __le32 rss; + __le32 opts4; + __le32 reserved3; + __le32 opts3; + __le32 opts2; + __le32 opts1; + } __packed desc_status; +} __packed; + +/*------ offset 28 of rx descriptor ------*/ +#define RTASE_RX_FIRST_FRAG BIT(25) /* Rx First segment of a packet */ +#define RTASE_RX_LAST_FRAG BIT(24) /* Rx Final segment of a packet */ +#define RTASE_RX_RES BIT(20) +#define RTASE_RX_RUNT BIT(19) +#define RTASE_RX_RWT BIT(18) +#define RTASE_RX_CRC BIT(16) +#define RTASE_RX_V6F BIT(31) +#define RTASE_RX_V4F BIT(30) +#define RTASE_RX_UDPT BIT(29) +#define RTASE_RX_TCPT BIT(28) +#define RTASE_RX_IPF BIT(26) /* IP checksum failed */ +#define RTASE_RX_UDPF BIT(25) /* UDP/IP checksum failed */ +#define RTASE_RX_TCPF BIT(24) /* TCP/IP checksum failed */ +#define RTASE_RX_VLAN_TAG BIT(16) /* VLAN tag available */ + +#define RTASE_NUM_DESC 1024 +#define RTASE_TX_BUDGET_DEFAULT 256 +#define RTASE_TX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(struct rtase_tx_desc)) +#define RTASE_RX_RING_DESC_SIZE (RTASE_NUM_DESC * sizeof(union rtase_rx_desc)) +#define RTASE_TX_STOP_THRS (MAX_SKB_FRAGS + 1) +#define RTASE_TX_START_THRS (2 * RTASE_TX_STOP_THRS) +#define RTASE_VLAN_TAG_MASK GENMASK(15, 0) +#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0) + +#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10) + +struct rtase_int_vector { + struct rtase_private *tp; + unsigned int irq; + char name[RTASE_IVEC_NAME_SIZE]; + u16 index; + u16 imr_addr; + u16 isr_addr; + u32 imr; + struct list_head ring_list; + struct napi_struct napi; + int (*poll)(struct napi_struct *napi, int budget); +}; + +struct rtase_ring { + struct rtase_int_vector *ivec; + void *desc; + dma_addr_t phy_addr; + u32 cur_idx; + u32 dirty_idx; + u16 index; + + struct sk_buff *skbuff[RTASE_NUM_DESC]; + void *data_buf[RTASE_NUM_DESC]; + union { + u32 len[RTASE_NUM_DESC]; + dma_addr_t data_phy_addr[RTASE_NUM_DESC]; + } mis; + + struct list_head ring_entry; + int (*ring_handler)(struct rtase_ring *ring, int budget); + u64 alloc_fail; +}; + +struct rtase_stats { + u64 tx_dropped; + u64 rx_dropped; + u64 multicast; + u64 rx_errors; + u64 rx_length_errors; + u64 rx_crc_errors; +}; + +struct rtase_private { + void __iomem *mmio_addr; + u32 sw_flag; + + struct pci_dev *pdev; + struct net_device *dev; + u32 rx_buf_sz; + + struct page_pool *page_pool; + struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE]; + struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE]; + struct rtase_counters *tally_vaddr; + dma_addr_t tally_paddr; + + u32 vlan_filter_ctrl; + u16 vlan_filter_vid[RTASE_VLAN_FILTER_ENTRY_NUM]; + + struct msix_entry msix_entry[RTASE_NUM_MSIX]; + struct rtase_int_vector int_vector[RTASE_NUM_MSIX]; + + struct rtase_stats stats; + + u16 tx_queue_ctrl; + u16 func_tx_queue_num; + u16 func_rx_queue_num; + u16 int_nums; + u16 tx_int_mit; + u16 rx_int_mit; +}; + +#define RTASE_LSO_64K 64000 + +#define RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2 (16 * 4) + +#define RTASE_TCPHO_MASK GENMASK(24, 18) + +#define RTASE_MSS_MASK GENMASK(28, 18) + +#endif /* RTASE_H */ diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c new file mode 100644 index 00000000000000..f8777b7663d35d --- /dev/null +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -0,0 +1,2288 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * rtase is the Linux device driver released for Realtek Automotive Switch + * controllers with PCI-Express interface. + * + * Copyright(c) 2024 Realtek Semiconductor Corp. + * + * Below is a simplified block diagram of the chip and its relevant interfaces. + * + * ************************* + * * * + * * CPU network device * + * * * + * * +-------------+ * + * * | PCIE Host | * + * ***********++************ + * || + * PCIE + * || + * ********************++********************** + * * | PCIE Endpoint | * + * * +---------------+ * + * * | GMAC | * + * * +--++--+ Realtek * + * * || RTL90xx Series * + * * || * + * * +-------------++----------------+ * + * * | | MAC | | * + * * | +-----+ | * + * * | | * + * * | Ethernet Switch Core | * + * * | | * + * * | +-----+ +-----+ | * + * * | | MAC |...........| MAC | | * + * * +---+-----+-----------+-----+---+ * + * * | PHY |...........| PHY | * + * * +--++-+ +--++-+ * + * *************||****************||*********** + * + * The block of the Realtek RTL90xx series is our entire chip architecture, + * the GMAC is connected to the switch core, and there is no PHY in between. + * In addition, this driver is mainly used to control GMAC, but does not + * control the switch core, so it is not the same as DSA. Linux only plays + * the role of a normal leaf node in this model. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtase.h" + +#define RTK_OPTS1_DEBUG_VALUE 0x0BADBEEF +#define RTK_MAGIC_NUMBER 0x0BADBADBADBADBAD + +static const struct pci_device_id rtase_pci_tbl[] = { + {PCI_VDEVICE(REALTEK, 0x906A)}, + {} +}; + +MODULE_DEVICE_TABLE(pci, rtase_pci_tbl); + +MODULE_AUTHOR("Realtek ARD Software Team"); +MODULE_DESCRIPTION("Network Driver for the PCIe interface of Realtek Automotive Ethernet Switch"); +MODULE_LICENSE("Dual BSD/GPL"); + +struct rtase_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underrun; +} __packed; + +static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8) +{ + writeb(val8, tp->mmio_addr + reg); +} + +static void rtase_w16(const struct rtase_private *tp, u16 reg, u16 val16) +{ + writew(val16, tp->mmio_addr + reg); +} + +static void rtase_w32(const struct rtase_private *tp, u16 reg, u32 val32) +{ + writel(val32, tp->mmio_addr + reg); +} + +static u8 rtase_r8(const struct rtase_private *tp, u16 reg) +{ + return readb(tp->mmio_addr + reg); +} + +static u16 rtase_r16(const struct rtase_private *tp, u16 reg) +{ + return readw(tp->mmio_addr + reg); +} + +static u32 rtase_r32(const struct rtase_private *tp, u16 reg) +{ + return readl(tp->mmio_addr + reg); +} + +static void rtase_free_desc(struct rtase_private *tp) +{ + struct pci_dev *pdev = tp->pdev; + u32 i; + + for (i = 0; i < tp->func_tx_queue_num; i++) { + if (!tp->tx_ring[i].desc) + continue; + + dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE, + tp->tx_ring[i].desc, + tp->tx_ring[i].phy_addr); + tp->tx_ring[i].desc = NULL; + } + + for (i = 0; i < tp->func_rx_queue_num; i++) { + if (!tp->rx_ring[i].desc) + continue; + + dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE, + tp->rx_ring[i].desc, + tp->rx_ring[i].phy_addr); + tp->rx_ring[i].desc = NULL; + } +} + +static int rtase_alloc_desc(struct rtase_private *tp) +{ + struct pci_dev *pdev = tp->pdev; + u32 i; + + /* rx and tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + for (i = 0; i < tp->func_tx_queue_num; i++) { + tp->tx_ring[i].desc = + dma_alloc_coherent(&pdev->dev, + RTASE_TX_RING_DESC_SIZE, + &tp->tx_ring[i].phy_addr, + GFP_KERNEL); + if (!tp->tx_ring[i].desc) + goto err_out; + } + + for (i = 0; i < tp->func_rx_queue_num; i++) { + tp->rx_ring[i].desc = + dma_alloc_coherent(&pdev->dev, + RTASE_RX_RING_DESC_SIZE, + &tp->rx_ring[i].phy_addr, + GFP_KERNEL); + if (!tp->rx_ring[i].desc) + goto err_out; + } + + return 0; + +err_out: + rtase_free_desc(tp); + return -ENOMEM; +} + +static void rtase_unmap_tx_skb(struct pci_dev *pdev, u32 len, + struct rtase_tx_desc *desc) +{ + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, + DMA_TO_DEVICE); + desc->opts1 = cpu_to_le32(RTK_OPTS1_DEBUG_VALUE); + desc->opts2 = 0x00; + desc->addr = cpu_to_le64(RTK_MAGIC_NUMBER); +} + +static void rtase_tx_clear_range(struct rtase_ring *ring, u32 start, u32 n) +{ + struct rtase_tx_desc *desc_base = ring->desc; + struct rtase_private *tp = ring->ivec->tp; + u32 i; + + for (i = 0; i < n; i++) { + u32 entry = (start + i) % RTASE_NUM_DESC; + struct rtase_tx_desc *desc = desc_base + entry; + u32 len = ring->mis.len[entry]; + struct sk_buff *skb; + + if (len == 0) + continue; + + rtase_unmap_tx_skb(tp->pdev, len, desc); + ring->mis.len[entry] = 0; + skb = ring->skbuff[entry]; + if (!skb) + continue; + + tp->stats.tx_dropped++; + dev_kfree_skb_any(skb); + ring->skbuff[entry] = NULL; + } +} + +static void rtase_tx_clear(struct rtase_private *tp) +{ + struct rtase_ring *ring; + u16 i; + + for (i = 0; i < tp->func_tx_queue_num; i++) { + ring = &tp->tx_ring[i]; + rtase_tx_clear_range(ring, ring->dirty_idx, RTASE_NUM_DESC); + ring->cur_idx = 0; + ring->dirty_idx = 0; + } +} + +static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END; + + desc->desc_status.opts2 = 0; + /* force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->desc_cmd.opts1, + cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz)); +} + +static u32 rtase_tx_avail(struct rtase_ring *ring) +{ + return READ_ONCE(ring->dirty_idx) + RTASE_NUM_DESC - + READ_ONCE(ring->cur_idx); +} + +static int tx_handler(struct rtase_ring *ring, int budget) +{ + const struct rtase_private *tp = ring->ivec->tp; + struct net_device *dev = tp->dev; + u32 dirty_tx, tx_left; + u32 bytes_compl = 0; + u32 pkts_compl = 0; + int workdone = 0; + + dirty_tx = ring->dirty_idx; + tx_left = READ_ONCE(ring->cur_idx) - dirty_tx; + + while (tx_left > 0) { + u32 entry = dirty_tx % RTASE_NUM_DESC; + struct rtase_tx_desc *desc = ring->desc + + sizeof(struct rtase_tx_desc) * entry; + u32 status; + + status = le32_to_cpu(desc->opts1); + + if (status & RTASE_DESC_OWN) + break; + + rtase_unmap_tx_skb(tp->pdev, ring->mis.len[entry], desc); + ring->mis.len[entry] = 0; + if (ring->skbuff[entry]) { + pkts_compl++; + bytes_compl += ring->skbuff[entry]->len; + napi_consume_skb(ring->skbuff[entry], budget); + ring->skbuff[entry] = NULL; + } + + dirty_tx++; + tx_left--; + workdone++; + + if (workdone == RTASE_TX_BUDGET_DEFAULT) + break; + } + + if (ring->dirty_idx != dirty_tx) { + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + WRITE_ONCE(ring->dirty_idx, dirty_tx); + + netif_subqueue_completed_wake(dev, ring->index, pkts_compl, + bytes_compl, + rtase_tx_avail(ring), + RTASE_TX_START_THRS); + + if (ring->cur_idx != dirty_tx) + rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index)); + } + + return 0; +} + +static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx) +{ + struct rtase_ring *ring = &tp->tx_ring[idx]; + struct rtase_tx_desc *desc; + u32 i; + + memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE); + memset(ring->skbuff, 0x0, sizeof(ring->skbuff)); + ring->cur_idx = 0; + ring->dirty_idx = 0; + ring->index = idx; + ring->alloc_fail = 0; + + for (i = 0; i < RTASE_NUM_DESC; i++) { + ring->mis.len[i] = 0; + if ((RTASE_NUM_DESC - 1) == i) { + desc = ring->desc + sizeof(struct rtase_tx_desc) * i; + desc->opts1 = cpu_to_le32(RTASE_RING_END); + } + } + + ring->ring_handler = tx_handler; + if (idx < 4) { + ring->ivec = &tp->int_vector[idx]; + list_add_tail(&ring->ring_entry, + &tp->int_vector[idx].ring_list); + } else { + ring->ivec = &tp->int_vector[0]; + list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list); + } +} + +static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->desc_cmd.addr = cpu_to_le64(mapping); + + rtase_mark_to_asic(desc, rx_buf_sz); +} + +static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc) +{ + desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER); + desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK); +} + +static int rtase_alloc_rx_data_buf(struct rtase_ring *ring, + void **p_data_buf, + union rtase_rx_desc *desc, + dma_addr_t *rx_phy_addr) +{ + struct rtase_int_vector *ivec = ring->ivec; + const struct rtase_private *tp = ivec->tp; + dma_addr_t mapping; + struct page *page; + + page = page_pool_dev_alloc_pages(tp->page_pool); + if (!page) { + ring->alloc_fail++; + goto err_out; + } + + *p_data_buf = page_address(page); + mapping = page_pool_get_dma_addr(page); + *rx_phy_addr = mapping; + rtase_map_to_asic(desc, mapping, tp->rx_buf_sz); + + return 0; + +err_out: + rtase_make_unusable_by_asic(desc); + + return -ENOMEM; +} + +static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start, + u32 ring_end) +{ + union rtase_rx_desc *desc_base = ring->desc; + u32 cur; + + for (cur = ring_start; ring_end - cur > 0; cur++) { + u32 i = cur % RTASE_NUM_DESC; + union rtase_rx_desc *desc = desc_base + i; + int ret; + + if (ring->data_buf[i]) + continue; + + ret = rtase_alloc_rx_data_buf(ring, &ring->data_buf[i], desc, + &ring->mis.data_phy_addr[i]); + if (ret) + break; + } + + return cur - ring_start; +} + +static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc) +{ + desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END); +} + +static void rtase_rx_ring_clear(struct page_pool *page_pool, + struct rtase_ring *ring) +{ + union rtase_rx_desc *desc; + struct page *page; + u32 i; + + for (i = 0; i < RTASE_NUM_DESC; i++) { + desc = ring->desc + sizeof(union rtase_rx_desc) * i; + page = virt_to_head_page(ring->data_buf[i]); + + if (ring->data_buf[i]) + page_pool_put_full_page(page_pool, page, true); + + rtase_make_unusable_by_asic(desc); + } +} + +static int rtase_fragmented_frame(u32 status) +{ + return (status & (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG)) != + (RTASE_RX_FIRST_FRAG | RTASE_RX_LAST_FRAG); +} + +static void rtase_rx_csum(const struct rtase_private *tp, struct sk_buff *skb, + const union rtase_rx_desc *desc) +{ + u32 opts2 = le32_to_cpu(desc->desc_status.opts2); + + /* rx csum offload */ + if (((opts2 & RTASE_RX_V4F) && !(opts2 & RTASE_RX_IPF)) || + (opts2 & RTASE_RX_V6F)) { + if (((opts2 & RTASE_RX_TCPT) && !(opts2 & RTASE_RX_TCPF)) || + ((opts2 & RTASE_RX_UDPT) && !(opts2 & RTASE_RX_UDPF))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + +static void rtase_rx_vlan_skb(union rtase_rx_desc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->desc_status.opts2); + + if (!(opts2 & RTASE_RX_VLAN_TAG)) + return; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + swab16(opts2 & RTASE_VLAN_TAG_MASK)); +} + +static void rtase_rx_skb(const struct rtase_ring *ring, struct sk_buff *skb) +{ + struct rtase_int_vector *ivec = ring->ivec; + + napi_gro_receive(&ivec->napi, skb); +} + +static int rx_handler(struct rtase_ring *ring, int budget) +{ + union rtase_rx_desc *desc_base = ring->desc; + u32 pkt_size, cur_rx, delta, entry, status; + struct rtase_private *tp = ring->ivec->tp; + struct net_device *dev = tp->dev; + union rtase_rx_desc *desc; + struct sk_buff *skb; + int workdone = 0; + + cur_rx = ring->cur_idx; + entry = cur_rx % RTASE_NUM_DESC; + desc = &desc_base[entry]; + + while (workdone < budget) { + status = le32_to_cpu(desc->desc_status.opts1); + + if (status & RTASE_DESC_OWN) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the rx descriptor until + * we know the status of RTASE_DESC_OWN + */ + dma_rmb(); + + if (unlikely(status & RTASE_RX_RES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + + tp->stats.rx_errors++; + + if (status & (RTASE_RX_RWT | RTASE_RX_RUNT)) + tp->stats.rx_length_errors++; + + if (status & RTASE_RX_CRC) + tp->stats.rx_crc_errors++; + + if (dev->features & NETIF_F_RXALL) + goto process_pkt; + + rtase_mark_to_asic(desc, tp->rx_buf_sz); + goto skip_process_pkt; + } + +process_pkt: + pkt_size = status & RTASE_RX_PKT_SIZE_MASK; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtase_fragmented_frame(status))) { + tp->stats.rx_dropped++; + tp->stats.rx_length_errors++; + rtase_mark_to_asic(desc, tp->rx_buf_sz); + goto skip_process_pkt; + } + + dma_sync_single_for_cpu(&tp->pdev->dev, + ring->mis.data_phy_addr[entry], + tp->rx_buf_sz, DMA_FROM_DEVICE); + + skb = build_skb(ring->data_buf[entry], PAGE_SIZE); + if (!skb) { + tp->stats.rx_dropped++; + rtase_mark_to_asic(desc, tp->rx_buf_sz); + goto skip_process_pkt; + } + ring->data_buf[entry] = NULL; + + if (dev->features & NETIF_F_RXCSUM) + rtase_rx_csum(tp, skb, desc); + + skb_put(skb, pkt_size); + skb_mark_for_recycle(skb); + skb->protocol = eth_type_trans(skb, dev); + + if (skb->pkt_type == PACKET_MULTICAST) + tp->stats.multicast++; + + rtase_rx_vlan_skb(desc, skb); + rtase_rx_skb(ring, skb); + + dev_sw_netstats_rx_add(dev, pkt_size); + +skip_process_pkt: + workdone++; + cur_rx++; + entry = cur_rx % RTASE_NUM_DESC; + desc = ring->desc + sizeof(union rtase_rx_desc) * entry; + } + + ring->cur_idx = cur_rx; + delta = rtase_rx_ring_fill(ring, ring->dirty_idx, ring->cur_idx); + ring->dirty_idx += delta; + + return workdone; +} + +static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx) +{ + struct rtase_ring *ring = &tp->rx_ring[idx]; + u16 i; + + memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE); + memset(ring->data_buf, 0x0, sizeof(ring->data_buf)); + ring->cur_idx = 0; + ring->dirty_idx = 0; + ring->index = idx; + ring->alloc_fail = 0; + + for (i = 0; i < RTASE_NUM_DESC; i++) + ring->mis.data_phy_addr[i] = 0; + + ring->ring_handler = rx_handler; + ring->ivec = &tp->int_vector[idx]; + list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list); +} + +static void rtase_rx_clear(struct rtase_private *tp) +{ + u32 i; + + for (i = 0; i < tp->func_rx_queue_num; i++) + rtase_rx_ring_clear(tp->page_pool, &tp->rx_ring[i]); + + page_pool_destroy(tp->page_pool); + tp->page_pool = NULL; +} + +static int rtase_init_ring(const struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + struct page_pool_params pp_params = { 0 }; + struct page_pool *page_pool; + u32 num; + u16 i; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.order = 0; + pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num; + pp_params.nid = dev_to_node(&tp->pdev->dev); + pp_params.dev = &tp->pdev->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pp_params.max_len = PAGE_SIZE; + pp_params.offset = 0; + + page_pool = page_pool_create(&pp_params); + if (IS_ERR(page_pool)) { + netdev_err(tp->dev, "failed to create page pool\n"); + return -ENOMEM; + } + + tp->page_pool = page_pool; + + for (i = 0; i < tp->func_tx_queue_num; i++) + rtase_tx_desc_init(tp, i); + + for (i = 0; i < tp->func_rx_queue_num; i++) { + rtase_rx_desc_init(tp, i); + + num = rtase_rx_ring_fill(&tp->rx_ring[i], 0, RTASE_NUM_DESC); + if (num != RTASE_NUM_DESC) + goto err_out; + + rtase_mark_as_last_descriptor(tp->rx_ring[i].desc + + sizeof(union rtase_rx_desc) * + (RTASE_NUM_DESC - 1)); + } + + return 0; + +err_out: + rtase_rx_clear(tp); + return -ENOMEM; +} + +static void rtase_interrupt_mitigation(const struct rtase_private *tp) +{ + u32 i; + + for (i = 0; i < tp->func_tx_queue_num; i++) + rtase_w16(tp, RTASE_INT_MITI_TX + i * 2, tp->tx_int_mit); + + for (i = 0; i < tp->func_rx_queue_num; i++) + rtase_w16(tp, RTASE_INT_MITI_RX + i * 2, tp->rx_int_mit); +} + +static void rtase_tally_counter_addr_fill(const struct rtase_private *tp) +{ + rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr)); + rtase_w32(tp, RTASE_DTCCR0, lower_32_bits(tp->tally_paddr)); +} + +static void rtase_tally_counter_clear(const struct rtase_private *tp) +{ + u32 cmd = lower_32_bits(tp->tally_paddr); + + rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(tp->tally_paddr)); + rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET); +} + +static void rtase_desc_addr_fill(const struct rtase_private *tp) +{ + const struct rtase_ring *ring; + u16 i, cmd, val; + int err; + + for (i = 0; i < tp->func_tx_queue_num; i++) { + ring = &tp->tx_ring[i]; + + rtase_w32(tp, RTASE_TX_DESC_ADDR0, + lower_32_bits(ring->phy_addr)); + rtase_w32(tp, RTASE_TX_DESC_ADDR4, + upper_32_bits(ring->phy_addr)); + + cmd = i | RTASE_TX_DESC_CMD_WE | RTASE_TX_DESC_CMD_CS; + rtase_w16(tp, RTASE_TX_DESC_COMMAND, cmd); + + err = read_poll_timeout(rtase_r16, val, + !(val & RTASE_TX_DESC_CMD_CS), 10, + 1000, false, tp, + RTASE_TX_DESC_COMMAND); + + if (err == -ETIMEDOUT) + netdev_err(tp->dev, + "error occurred in fill tx descriptor\n"); + } + + for (i = 0; i < tp->func_rx_queue_num; i++) { + ring = &tp->rx_ring[i]; + + if (i == 0) { + rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR0, + lower_32_bits(ring->phy_addr)); + rtase_w32(tp, RTASE_Q0_RX_DESC_ADDR4, + upper_32_bits(ring->phy_addr)); + } else { + rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR0 + ((i - 1) * 8)), + lower_32_bits(ring->phy_addr)); + rtase_w32(tp, (RTASE_Q1_RX_DESC_ADDR4 + ((i - 1) * 8)), + upper_32_bits(ring->phy_addr)); + } + } +} + +static void rtase_hw_set_features(const struct net_device *dev, + netdev_features_t features) +{ + const struct rtase_private *tp = netdev_priv(dev); + u16 rx_config, val; + + rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0); + if (features & NETIF_F_RXALL) + rx_config |= (RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT); + else + rx_config &= ~(RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT); + + rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config); + + val = rtase_r16(tp, RTASE_CPLUS_CMD); + if (features & NETIF_F_RXCSUM) + rtase_w16(tp, RTASE_CPLUS_CMD, val | RTASE_RX_CHKSUM); + else + rtase_w16(tp, RTASE_CPLUS_CMD, val & ~RTASE_RX_CHKSUM); + + rx_config = rtase_r16(tp, RTASE_RX_CONFIG_1); + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= (RTASE_INNER_VLAN_DETAG_EN | + RTASE_OUTER_VLAN_DETAG_EN); + else + rx_config &= ~(RTASE_INNER_VLAN_DETAG_EN | + RTASE_OUTER_VLAN_DETAG_EN); + + rtase_w16(tp, RTASE_RX_CONFIG_1, rx_config); +} + +static void rtase_hw_set_rx_packet_filter(struct net_device *dev) +{ + u32 mc_filter[2] = { 0xFFFFFFFF, 0xFFFFFFFF }; + struct rtase_private *tp = netdev_priv(dev); + u16 rx_mode; + + rx_mode = rtase_r16(tp, RTASE_RX_CONFIG_0) & ~RTASE_ACCEPT_MASK; + rx_mode |= RTASE_ACCEPT_BROADCAST | RTASE_ACCEPT_MYPHYS; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= RTASE_ACCEPT_MULTICAST | RTASE_ACCEPT_ALLPHYS; + } else if (dev->flags & IFF_ALLMULTI) { + rx_mode |= RTASE_ACCEPT_MULTICAST; + } else { + struct netdev_hw_addr *hw_addr; + + mc_filter[0] = 0; + mc_filter[1] = 0; + + netdev_for_each_mc_addr(hw_addr, dev) { + u32 bit_nr = eth_hw_addr_crc(hw_addr); + u32 idx = u32_get_bits(bit_nr, BIT(31)); + u32 bit = u32_get_bits(bit_nr, + RTASE_MULTICAST_FILTER_MASK); + + mc_filter[idx] |= BIT(bit); + rx_mode |= RTASE_ACCEPT_MULTICAST; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= RTASE_ACCEPT_ERR | RTASE_ACCEPT_RUNT; + + rtase_w32(tp, RTASE_MAR0, swab32(mc_filter[1])); + rtase_w32(tp, RTASE_MAR1, swab32(mc_filter[0])); + rtase_w16(tp, RTASE_RX_CONFIG_0, rx_mode); +} + +static void rtase_irq_dis_and_clear(const struct rtase_private *tp) +{ + const struct rtase_int_vector *ivec = &tp->int_vector[0]; + u32 val1; + u16 val2; + u8 i; + + rtase_w32(tp, ivec->imr_addr, 0); + val1 = rtase_r32(tp, ivec->isr_addr); + rtase_w32(tp, ivec->isr_addr, val1); + + for (i = 1; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + rtase_w16(tp, ivec->imr_addr, 0); + val2 = rtase_r16(tp, ivec->isr_addr); + rtase_w16(tp, ivec->isr_addr, val2); + } +} + +static void rtase_poll_timeout(const struct rtase_private *tp, u32 cond, + u32 sleep_us, u64 timeout_us, u16 reg) +{ + int err; + u8 val; + + err = read_poll_timeout(rtase_r8, val, val & cond, sleep_us, + timeout_us, false, tp, reg); + + if (err == -ETIMEDOUT) + netdev_err(tp->dev, "poll reg 0x00%x timeout\n", reg); +} + +static void rtase_nic_reset(const struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + u16 rx_config; + u8 val; + + rx_config = rtase_r16(tp, RTASE_RX_CONFIG_0); + rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config & ~RTASE_ACCEPT_MASK); + + val = rtase_r8(tp, RTASE_MISC); + rtase_w8(tp, RTASE_MISC, val | RTASE_RX_DV_GATE_EN); + + val = rtase_r8(tp, RTASE_CHIP_CMD); + rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_STOP_REQ); + mdelay(2); + + rtase_poll_timeout(tp, RTASE_STOP_REQ_DONE, 100, 150000, + RTASE_CHIP_CMD); + + rtase_poll_timeout(tp, RTASE_TX_FIFO_EMPTY, 100, 100000, + RTASE_FIFOR); + + rtase_poll_timeout(tp, RTASE_RX_FIFO_EMPTY, 100, 100000, + RTASE_FIFOR); + + val = rtase_r8(tp, RTASE_CHIP_CMD); + rtase_w8(tp, RTASE_CHIP_CMD, val & ~(RTASE_TE | RTASE_RE)); + val = rtase_r8(tp, RTASE_CHIP_CMD); + rtase_w8(tp, RTASE_CHIP_CMD, val & ~RTASE_STOP_REQ); + + rtase_w16(tp, RTASE_RX_CONFIG_0, rx_config); +} + +static void rtase_hw_reset(const struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + + rtase_irq_dis_and_clear(tp); + + rtase_nic_reset(dev); +} + +static void rtase_set_rx_queue(const struct rtase_private *tp) +{ + u16 reg_data; + + reg_data = rtase_r16(tp, RTASE_FCR); + switch (tp->func_rx_queue_num) { + case 1: + u16p_replace_bits(®_data, 0x1, RTASE_FCR_RXQ_MASK); + break; + case 2: + u16p_replace_bits(®_data, 0x2, RTASE_FCR_RXQ_MASK); + break; + case 4: + u16p_replace_bits(®_data, 0x3, RTASE_FCR_RXQ_MASK); + break; + } + rtase_w16(tp, RTASE_FCR, reg_data); +} + +static void rtase_set_tx_queue(const struct rtase_private *tp) +{ + u16 reg_data; + + reg_data = rtase_r16(tp, RTASE_TX_CONFIG_1); + switch (tp->tx_queue_ctrl) { + case 1: + u16p_replace_bits(®_data, 0x0, RTASE_TC_MODE_MASK); + break; + case 2: + u16p_replace_bits(®_data, 0x1, RTASE_TC_MODE_MASK); + break; + case 3: + case 4: + u16p_replace_bits(®_data, 0x2, RTASE_TC_MODE_MASK); + break; + default: + u16p_replace_bits(®_data, 0x3, RTASE_TC_MODE_MASK); + break; + } + rtase_w16(tp, RTASE_TX_CONFIG_1, reg_data); +} + +static void rtase_hw_config(struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + u32 reg_data32; + u16 reg_data16; + + rtase_hw_reset(dev); + + /* set rx dma burst */ + reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_0); + reg_data16 &= ~(RTASE_RX_SINGLE_TAG | RTASE_RX_SINGLE_FETCH); + u16p_replace_bits(®_data16, RTASE_RX_DMA_BURST_256, + RTASE_RX_MX_DMA_MASK); + rtase_w16(tp, RTASE_RX_CONFIG_0, reg_data16); + + /* new rx descritpor */ + reg_data16 = rtase_r16(tp, RTASE_RX_CONFIG_1); + reg_data16 |= RTASE_RX_NEW_DESC_FORMAT_EN | RTASE_PCIE_NEW_FLOW; + u16p_replace_bits(®_data16, 0xF, RTASE_RX_MAX_FETCH_DESC_MASK); + rtase_w16(tp, RTASE_RX_CONFIG_1, reg_data16); + + rtase_set_rx_queue(tp); + + rtase_interrupt_mitigation(tp); + + /* set tx dma burst size and interframe gap time */ + reg_data32 = rtase_r32(tp, RTASE_TX_CONFIG_0); + u32p_replace_bits(®_data32, RTASE_TX_DMA_BURST_UNLIMITED, + RTASE_TX_DMA_MASK); + u32p_replace_bits(®_data32, RTASE_INTERFRAMEGAP, + RTASE_TX_INTER_FRAME_GAP_MASK); + rtase_w32(tp, RTASE_TX_CONFIG_0, reg_data32); + + /* new tx descriptor */ + reg_data16 = rtase_r16(tp, RTASE_TFUN_CTRL); + rtase_w16(tp, RTASE_TFUN_CTRL, reg_data16 | + RTASE_TX_NEW_DESC_FORMAT_EN); + + /* tx fetch desc number */ + rtase_w8(tp, RTASE_TDFNR, 0x10); + + /* tag num select */ + reg_data16 = rtase_r16(tp, RTASE_MTPS); + u16p_replace_bits(®_data16, 0x4, RTASE_TAG_NUM_SEL_MASK); + rtase_w16(tp, RTASE_MTPS, reg_data16); + + rtase_set_tx_queue(tp); + + rtase_w16(tp, RTASE_TOKSEL, 0x5555); + + rtase_tally_counter_addr_fill(tp); + rtase_desc_addr_fill(tp); + rtase_hw_set_features(dev, dev->features); + + /* enable flow control */ + reg_data16 = rtase_r16(tp, RTASE_CPLUS_CMD); + reg_data16 |= (RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN); + rtase_w16(tp, RTASE_CPLUS_CMD, reg_data16); + /* set near fifo threshold - rx missed issue. */ + rtase_w16(tp, RTASE_RFIFONFULL, 0x190); + + rtase_w16(tp, RTASE_RMS, tp->rx_buf_sz); + + rtase_hw_set_rx_packet_filter(dev); +} + +static void rtase_nic_enable(const struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1); + u8 val; + + rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN); + rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN); + + val = rtase_r8(tp, RTASE_CHIP_CMD); + rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE); + + val = rtase_r8(tp, RTASE_MISC); + rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN); +} + +static void rtase_enable_hw_interrupt(const struct rtase_private *tp) +{ + const struct rtase_int_vector *ivec = &tp->int_vector[0]; + u32 i; + + rtase_w32(tp, ivec->imr_addr, ivec->imr); + + for (i = 1; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + rtase_w16(tp, ivec->imr_addr, ivec->imr); + } +} + +static void rtase_hw_start(const struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + + rtase_nic_enable(dev); + rtase_enable_hw_interrupt(tp); +} + +/* the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status + */ +static irqreturn_t rtase_interrupt(int irq, void *dev_instance) +{ + const struct rtase_private *tp; + struct rtase_int_vector *ivec; + u32 status; + + ivec = dev_instance; + tp = ivec->tp; + status = rtase_r32(tp, ivec->isr_addr); + + rtase_w32(tp, ivec->imr_addr, 0x0); + rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW); + + if (napi_schedule_prep(&ivec->napi)) + __napi_schedule(&ivec->napi); + + return IRQ_HANDLED; +} + +/* the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt + * status according to interrupt vector + */ +static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance) +{ + const struct rtase_private *tp; + struct rtase_int_vector *ivec; + u16 status; + + ivec = dev_instance; + tp = ivec->tp; + status = rtase_r16(tp, ivec->isr_addr); + + rtase_w16(tp, ivec->imr_addr, 0x0); + rtase_w16(tp, ivec->isr_addr, status); + + if (napi_schedule_prep(&ivec->napi)) + __napi_schedule(&ivec->napi); + + return IRQ_HANDLED; +} + +static int rtase_poll(struct napi_struct *napi, int budget) +{ + const struct rtase_int_vector *ivec; + const struct rtase_private *tp; + struct rtase_ring *ring; + int total_workdone = 0; + + ivec = container_of(napi, struct rtase_int_vector, napi); + tp = ivec->tp; + + list_for_each_entry(ring, &ivec->ring_list, ring_entry) + total_workdone += ring->ring_handler(ring, budget); + + if (total_workdone >= budget) + return budget; + + if (napi_complete_done(napi, total_workdone)) { + if (!ivec->index) + rtase_w32(tp, ivec->imr_addr, ivec->imr); + else + rtase_w16(tp, ivec->imr_addr, ivec->imr); + } + + return total_workdone; +} + +static int rtase_open(struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + const struct pci_dev *pdev = tp->pdev; + struct rtase_int_vector *ivec; + u16 i = 0, j; + int ret; + + ivec = &tp->int_vector[0]; + tp->rx_buf_sz = RTASE_RX_BUF_SIZE; + + ret = rtase_alloc_desc(tp); + if (ret) + return ret; + + ret = rtase_init_ring(dev); + if (ret) + goto err_free_all_allocated_mem; + + rtase_hw_config(dev); + + if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) { + ret = request_irq(ivec->irq, rtase_interrupt, 0, + dev->name, ivec); + if (ret) + goto err_free_all_allocated_irq; + + /* request other interrupts to handle multiqueue */ + for (i = 1; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + snprintf(ivec->name, sizeof(ivec->name), "%s_int%i", + tp->dev->name, i); + ret = request_irq(ivec->irq, rtase_q_interrupt, 0, + ivec->name, ivec); + if (ret) + goto err_free_all_allocated_irq; + } + } else { + ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name, + ivec); + if (ret) + goto err_free_all_allocated_mem; + } + + rtase_hw_start(dev); + + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + napi_enable(&ivec->napi); + } + + netif_carrier_on(dev); + netif_wake_queue(dev); + + return 0; + +err_free_all_allocated_irq: + for (j = 0; j < i; j++) + free_irq(tp->int_vector[j].irq, &tp->int_vector[j]); + +err_free_all_allocated_mem: + rtase_free_desc(tp); + + return ret; +} + +static void rtase_down(struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + struct rtase_int_vector *ivec; + struct rtase_ring *ring, *tmp; + u32 i; + + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + napi_disable(&ivec->napi); + list_for_each_entry_safe(ring, tmp, &ivec->ring_list, + ring_entry) + list_del(&ring->ring_entry); + } + + netif_tx_disable(dev); + + netif_carrier_off(dev); + + rtase_hw_reset(dev); + + rtase_tx_clear(tp); + + rtase_rx_clear(tp); +} + +static int rtase_close(struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + const struct pci_dev *pdev = tp->pdev; + u32 i; + + rtase_down(dev); + + if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) { + for (i = 0; i < tp->int_nums; i++) + free_irq(tp->int_vector[i].irq, &tp->int_vector[i]); + + } else { + free_irq(pdev->irq, &tp->int_vector[0]); + } + + rtase_free_desc(tp); + + return 0; +} + +static u32 rtase_tx_vlan_tag(const struct rtase_private *tp, + const struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + (RTASE_TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb))) : 0x00; +} + +static u32 rtase_tx_csum(struct sk_buff *skb, const struct net_device *dev) +{ + u32 csum_cmd = 0; + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + csum_cmd = RTASE_TX_IPCS_C; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + csum_cmd = RTASE_TX_IPV6F_C; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + csum_cmd |= RTASE_TX_TCPCS_C; + else if (ip_protocol == IPPROTO_UDP) + csum_cmd |= RTASE_TX_UDPCS_C; + + csum_cmd |= u32_encode_bits(skb_transport_offset(skb), + RTASE_TCPHO_MASK); + + return csum_cmd; +} + +static int rtase_xmit_frags(struct rtase_ring *ring, struct sk_buff *skb, + u32 opts1, u32 opts2) +{ + const struct skb_shared_info *info = skb_shinfo(skb); + const struct rtase_private *tp = ring->ivec->tp; + const u8 nr_frags = info->nr_frags; + struct rtase_tx_desc *txd = NULL; + u32 cur_frag, entry; + + entry = ring->cur_idx; + for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { + const skb_frag_t *frag = &info->frags[cur_frag]; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % RTASE_NUM_DESC; + + txd = ring->desc + sizeof(struct rtase_tx_desc) * entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(&tp->pdev->dev, addr, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { + if (unlikely(net_ratelimit())) + netdev_err(tp->dev, + "Failed to map TX fragments DMA!\n"); + + goto err_out; + } + + if (((entry + 1) % RTASE_NUM_DESC) == 0) + status = (opts1 | len | RTASE_RING_END); + else + status = opts1 | len; + + if (cur_frag == (nr_frags - 1)) { + ring->skbuff[entry] = skb; + status |= RTASE_TX_LAST_FRAG; + } + + ring->mis.len[entry] = len; + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts2); + + /* make sure the operating fields have been updated */ + dma_wmb(); + txd->opts1 = cpu_to_le32(status); + } + + return cur_frag; + +err_out: + rtase_tx_clear_range(ring, ring->cur_idx + 1, cur_frag); + return -EIO; +} + +static netdev_tx_t rtase_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct rtase_private *tp = netdev_priv(dev); + u32 q_idx, entry, len, opts1, opts2; + struct netdev_queue *tx_queue; + bool stop_queue, door_bell; + u32 mss = shinfo->gso_size; + struct rtase_tx_desc *txd; + struct rtase_ring *ring; + dma_addr_t mapping; + int frags; + + /* multiqueues */ + q_idx = skb_get_queue_mapping(skb); + ring = &tp->tx_ring[q_idx]; + tx_queue = netdev_get_tx_queue(dev, q_idx); + + if (unlikely(!rtase_tx_avail(ring))) { + if (net_ratelimit()) + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + entry = ring->cur_idx % RTASE_NUM_DESC; + txd = ring->desc + sizeof(struct rtase_tx_desc) * entry; + + opts1 = RTASE_DESC_OWN; + opts2 = rtase_tx_vlan_tag(tp, skb); + + /* tcp segmentation offload (or tcp large send) */ + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts1 |= RTASE_GIANT_SEND_V4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + goto err_dma_0; + + tcp_v6_gso_csum_prep(skb); + opts1 |= RTASE_GIANT_SEND_V6; + } else { + WARN_ON_ONCE(1); + } + + opts1 |= u32_encode_bits(skb_transport_offset(skb), + RTASE_TCPHO_MASK); + opts2 |= u32_encode_bits(mss, RTASE_MSS_MASK); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + opts2 |= rtase_tx_csum(skb, dev); + } + + frags = rtase_xmit_frags(ring, skb, opts1, opts2); + if (unlikely(frags < 0)) + goto err_dma_0; + + if (frags) { + len = skb_headlen(skb); + opts1 |= RTASE_TX_FIRST_FRAG; + } else { + len = skb->len; + ring->skbuff[entry] = skb; + opts1 |= RTASE_TX_FIRST_FRAG | RTASE_TX_LAST_FRAG; + } + + if (((entry + 1) % RTASE_NUM_DESC) == 0) + opts1 |= (len | RTASE_RING_END); + else + opts1 |= len; + + mapping = dma_map_single(&tp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { + if (unlikely(net_ratelimit())) + netdev_err(dev, "Failed to map TX DMA!\n"); + + goto err_dma_1; + } + + ring->mis.len[entry] = len; + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts2); + txd->opts1 = cpu_to_le32(opts1 & ~RTASE_DESC_OWN); + + /* make sure the operating fields have been updated */ + dma_wmb(); + + door_bell = __netdev_tx_sent_queue(tx_queue, skb->len, + netdev_xmit_more()); + + txd->opts1 = cpu_to_le32(opts1); + + skb_tx_timestamp(skb); + + /* tx needs to see descriptor changes before updated cur_idx */ + smp_wmb(); + + WRITE_ONCE(ring->cur_idx, ring->cur_idx + frags + 1); + + stop_queue = !netif_subqueue_maybe_stop(dev, ring->index, + rtase_tx_avail(ring), + RTASE_TX_STOP_THRS, + RTASE_TX_START_THRS); + + if (door_bell || stop_queue) + rtase_w8(tp, RTASE_TPPOLL, BIT(ring->index)); + + return NETDEV_TX_OK; + +err_dma_1: + ring->skbuff[entry] = NULL; + rtase_tx_clear_range(ring, ring->cur_idx + 1, frags); + +err_dma_0: + tp->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void rtase_set_rx_mode(struct net_device *dev) +{ + rtase_hw_set_rx_packet_filter(dev); +} + +static void rtase_enable_eem_write(const struct rtase_private *tp) +{ + u8 val; + + val = rtase_r8(tp, RTASE_EEM); + rtase_w8(tp, RTASE_EEM, val | RTASE_EEM_UNLOCK); +} + +static void rtase_disable_eem_write(const struct rtase_private *tp) +{ + u8 val; + + val = rtase_r8(tp, RTASE_EEM); + rtase_w8(tp, RTASE_EEM, val & ~RTASE_EEM_UNLOCK); +} + +static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr) +{ + u32 rar_low, rar_high; + + rar_low = (u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24); + + rar_high = (u32)addr[4] | ((u32)addr[5] << 8); + + rtase_enable_eem_write(tp); + rtase_w32(tp, RTASE_MAC0, rar_low); + rtase_w32(tp, RTASE_MAC4, rar_high); + rtase_disable_eem_write(tp); + rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR); +} + +static int rtase_set_mac_address(struct net_device *dev, void *p) +{ + struct rtase_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtase_rar_set(tp, dev->dev_addr); + + return 0; +} + +static int rtase_change_mtu(struct net_device *dev, int new_mtu) +{ + dev->mtu = new_mtu; + + netdev_update_features(dev); + + return 0; +} + +static void rtase_wait_for_quiescence(const struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + struct rtase_int_vector *ivec; + u32 i; + + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + synchronize_irq(ivec->irq); + /* wait for any pending NAPI task to complete */ + napi_disable(&ivec->napi); + } + + rtase_irq_dis_and_clear(tp); + + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + napi_enable(&ivec->napi); + } +} + +static void rtase_sw_reset(struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + int ret; + + netif_stop_queue(dev); + netif_carrier_off(dev); + rtase_hw_reset(dev); + + /* let's wait a bit while any (async) irq lands on */ + rtase_wait_for_quiescence(dev); + rtase_tx_clear(tp); + rtase_rx_clear(tp); + + ret = rtase_init_ring(dev); + if (ret) { + netdev_err(dev, "unable to init ring\n"); + rtase_free_desc(tp); + return; + } + + rtase_hw_config(dev); + /* always link, so start to transmit & receive */ + rtase_hw_start(dev); + + netif_carrier_on(dev); + netif_wake_queue(dev); +} + +static void rtase_dump_tally_counter(const struct rtase_private *tp) +{ + dma_addr_t paddr = tp->tally_paddr; + u32 cmd = lower_32_bits(paddr); + u32 val; + int err; + + rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr)); + rtase_w32(tp, RTASE_DTCCR0, cmd); + rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP); + + err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP), + 10, 250, false, tp, RTASE_DTCCR0); + + if (err == -ETIMEDOUT) + netdev_err(tp->dev, "error occurred in dump tally counter\n"); +} + +static void rtase_dump_state(const struct net_device *dev) +{ + const struct rtase_private *tp = netdev_priv(dev); + int max_reg_size = RTASE_PCI_REGS_SIZE; + const struct rtase_counters *counters; + const struct rtase_ring *ring; + u32 dword_rd; + int n = 0; + + ring = &tp->tx_ring[0]; + netdev_err(dev, "Tx descriptor info:\n"); + netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx); + netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx); + netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr); + + ring = &tp->rx_ring[0]; + netdev_err(dev, "Rx descriptor info:\n"); + netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx); + netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx); + netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr); + + netdev_err(dev, "Device Registers:\n"); + netdev_err(dev, "Chip Command = 0x%02x\n", + rtase_r8(tp, RTASE_CHIP_CMD)); + netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0)); + netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0)); + netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n", + rtase_r16(tp, RTASE_BOOT_CTL)); + netdev_err(dev, "EPHY ISR(0xE014) = %04x\n", + rtase_r16(tp, RTASE_EPHY_ISR)); + netdev_err(dev, "EPHY IMR(0xE016) = %04x\n", + rtase_r16(tp, RTASE_EPHY_IMR)); + netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n", + rtase_r16(tp, RTASE_CLKSW_SET)); + + netdev_err(dev, "Dump PCI Registers:\n"); + + while (n < max_reg_size) { + if ((n % RTASE_DWORD_MOD) == 0) + netdev_err(tp->dev, "0x%03x:\n", n); + + pci_read_config_dword(tp->pdev, n, &dword_rd); + netdev_err(tp->dev, "%08x\n", dword_rd); + n += 4; + } + + netdev_err(dev, "Dump tally counter:\n"); + counters = tp->tally_vaddr; + rtase_dump_tally_counter(tp); + + netdev_err(dev, "tx_packets %lld\n", + le64_to_cpu(counters->tx_packets)); + netdev_err(dev, "rx_packets %lld\n", + le64_to_cpu(counters->rx_packets)); + netdev_err(dev, "tx_errors %lld\n", + le64_to_cpu(counters->tx_errors)); + netdev_err(dev, "rx_errors %d\n", + le32_to_cpu(counters->rx_errors)); + netdev_err(dev, "rx_missed %d\n", + le16_to_cpu(counters->rx_missed)); + netdev_err(dev, "align_errors %d\n", + le16_to_cpu(counters->align_errors)); + netdev_err(dev, "tx_one_collision %d\n", + le32_to_cpu(counters->tx_one_collision)); + netdev_err(dev, "tx_multi_collision %d\n", + le32_to_cpu(counters->tx_multi_collision)); + netdev_err(dev, "rx_unicast %lld\n", + le64_to_cpu(counters->rx_unicast)); + netdev_err(dev, "rx_broadcast %lld\n", + le64_to_cpu(counters->rx_broadcast)); + netdev_err(dev, "rx_multicast %d\n", + le32_to_cpu(counters->rx_multicast)); + netdev_err(dev, "tx_aborted %d\n", + le16_to_cpu(counters->tx_aborted)); + netdev_err(dev, "tx_underrun %d\n", + le16_to_cpu(counters->tx_underrun)); +} + +static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + rtase_dump_state(dev); + rtase_sw_reset(dev); +} + +static void rtase_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + const struct rtase_private *tp = netdev_priv(dev); + const struct rtase_counters *counters; + + counters = tp->tally_vaddr; + + dev_fetch_sw_netstats(stats, dev->tstats); + + /* fetch additional counter values missing in stats collected by driver + * from tally counter + */ + rtase_dump_tally_counter(tp); + stats->rx_errors = tp->stats.rx_errors; + stats->tx_errors = le64_to_cpu(counters->tx_errors); + stats->rx_dropped = tp->stats.rx_dropped; + stats->tx_dropped = tp->stats.tx_dropped; + stats->multicast = tp->stats.multicast; + stats->rx_length_errors = tp->stats.rx_length_errors; +} + +static netdev_features_t rtase_fix_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t features_fix = features; + + /* not support TSO for jumbo frames */ + if (dev->mtu > ETH_DATA_LEN) + features_fix &= ~NETIF_F_ALL_TSO; + + return features_fix; +} + +static int rtase_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t features_set = features; + + features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX; + + if (features_set ^ dev->features) + rtase_hw_set_features(dev, features_set); + + return 0; +} + +static const struct net_device_ops rtase_netdev_ops = { + .ndo_open = rtase_open, + .ndo_stop = rtase_close, + .ndo_start_xmit = rtase_start_xmit, + .ndo_set_rx_mode = rtase_set_rx_mode, + .ndo_set_mac_address = rtase_set_mac_address, + .ndo_change_mtu = rtase_change_mtu, + .ndo_tx_timeout = rtase_tx_timeout, + .ndo_get_stats64 = rtase_get_stats64, + .ndo_fix_features = rtase_fix_features, + .ndo_set_features = rtase_set_features, +}; + +static void rtase_get_mac_address(struct net_device *dev) +{ + struct rtase_private *tp = netdev_priv(dev); + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; + u32 i; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = rtase_r8(tp, RTASE_MAC0 + i); + + if (!is_valid_ether_addr(mac_addr)) { + eth_hw_addr_random(dev); + netdev_warn(dev, "Random ether addr %pM\n", dev->dev_addr); + } else { + eth_hw_addr_set(dev, mac_addr); + ether_addr_copy(dev->perm_addr, dev->dev_addr); + } + + rtase_rar_set(tp, dev->dev_addr); +} + +static int rtase_get_settings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + cmd->base.speed = SPEED_5000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_MII; + cmd->base.autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void rtase_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + const struct rtase_private *tp = netdev_priv(dev); + u16 value = rtase_r16(tp, RTASE_CPLUS_CMD); + + pause->autoneg = AUTONEG_DISABLE; + pause->tx_pause = !!(value & RTASE_FORCE_TXFLOW_EN); + pause->rx_pause = !!(value & RTASE_FORCE_RXFLOW_EN); +} + +static int rtase_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + const struct rtase_private *tp = netdev_priv(dev); + u16 value = rtase_r16(tp, RTASE_CPLUS_CMD); + + if (pause->autoneg) + return -EOPNOTSUPP; + + value &= ~(RTASE_FORCE_TXFLOW_EN | RTASE_FORCE_RXFLOW_EN); + + if (pause->tx_pause) + value |= RTASE_FORCE_TXFLOW_EN; + + if (pause->rx_pause) + value |= RTASE_FORCE_RXFLOW_EN; + + rtase_w16(tp, RTASE_CPLUS_CMD, value); + return 0; +} + +static void rtase_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *stats) +{ + struct rtase_private *tp = netdev_priv(dev); + const struct rtase_counters *counters; + + counters = tp->tally_vaddr; + + rtase_dump_tally_counter(tp); + + stats->FramesTransmittedOK = le64_to_cpu(counters->tx_packets); + stats->FramesReceivedOK = le64_to_cpu(counters->rx_packets); + stats->FramesLostDueToIntMACXmitError = + le64_to_cpu(counters->tx_errors); + stats->BroadcastFramesReceivedOK = le64_to_cpu(counters->rx_broadcast); +} + +static const struct ethtool_ops rtase_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = rtase_get_settings, + .get_pauseparam = rtase_get_pauseparam, + .set_pauseparam = rtase_set_pauseparam, + .get_eth_mac_stats = rtase_get_eth_mac_stats, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static void rtase_init_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &rtase_netdev_ops; + dev->ethtool_ops = &rtase_ethtool_ops; +} + +static void rtase_reset_interrupt(struct pci_dev *pdev, + const struct rtase_private *tp) +{ + if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) + pci_disable_msix(pdev); + else + pci_disable_msi(pdev); +} + +static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp) +{ + int ret, irq; + u16 i; + + memset(tp->msix_entry, 0x0, RTASE_NUM_MSIX * + sizeof(struct msix_entry)); + + for (i = 0; i < RTASE_NUM_MSIX; i++) + tp->msix_entry[i].entry = i; + + ret = pci_enable_msix_exact(pdev, tp->msix_entry, tp->int_nums); + + if (ret) + return ret; + + for (i = 0; i < tp->int_nums; i++) { + irq = pci_irq_vector(pdev, i); + if (!irq) { + pci_disable_msix(pdev); + return irq; + } + + tp->int_vector[i].irq = irq; + } + + return 0; +} + +static int rtase_alloc_interrupt(struct pci_dev *pdev, + struct rtase_private *tp) +{ + int ret; + + ret = rtase_alloc_msix(pdev, tp); + if (ret) { + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, + "unable to alloc interrupt.(MSI)\n"); + return ret; + } + + tp->sw_flag |= RTASE_SWF_MSI_ENABLED; + } else { + tp->sw_flag |= RTASE_SWF_MSIX_ENABLED; + } + + return 0; +} + +static void rtase_init_hardware(const struct rtase_private *tp) +{ + u16 i; + + for (i = 0; i < RTASE_VLAN_FILTER_ENTRY_NUM; i++) + rtase_w32(tp, RTASE_VLAN_ENTRY_0 + i * 4, 0); +} + +static void rtase_init_int_vector(struct rtase_private *tp) +{ + u16 i; + + /* interrupt vector 0 */ + tp->int_vector[0].tp = tp; + tp->int_vector[0].index = 0; + tp->int_vector[0].imr_addr = RTASE_IMR0; + tp->int_vector[0].isr_addr = RTASE_ISR0; + tp->int_vector[0].imr = RTASE_ROK | RTASE_RDU | RTASE_TOK | + RTASE_TOK4 | RTASE_TOK5 | RTASE_TOK6 | + RTASE_TOK7; + tp->int_vector[0].poll = rtase_poll; + + memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name)); + INIT_LIST_HEAD(&tp->int_vector[0].ring_list); + + netif_napi_add(tp->dev, &tp->int_vector[0].napi, + tp->int_vector[0].poll); + + /* interrupt vector 1 ~ 3 */ + for (i = 1; i < tp->int_nums; i++) { + tp->int_vector[i].tp = tp; + tp->int_vector[i].index = i; + tp->int_vector[i].imr_addr = RTASE_IMR1 + (i - 1) * 4; + tp->int_vector[i].isr_addr = RTASE_ISR1 + (i - 1) * 4; + tp->int_vector[i].imr = RTASE_Q_ROK | RTASE_Q_RDU | + RTASE_Q_TOK; + tp->int_vector[i].poll = rtase_poll; + + memset(tp->int_vector[i].name, 0x0, + sizeof(tp->int_vector[0].name)); + INIT_LIST_HEAD(&tp->int_vector[i].ring_list); + + netif_napi_add(tp->dev, &tp->int_vector[i].napi, + tp->int_vector[i].poll); + } +} + +static u16 rtase_calc_time_mitigation(u32 time_us) +{ + u8 msb, time_count, time_unit; + u16 int_miti; + + time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); + + msb = fls(time_us); + if (msb >= RTASE_MITI_COUNT_BIT_NUM) { + time_unit = msb - RTASE_MITI_COUNT_BIT_NUM; + time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM); + } else { + time_unit = 0; + time_count = time_us; + } + + int_miti = u16_encode_bits(time_count, RTASE_MITI_TIME_COUNT_MASK) | + u16_encode_bits(time_unit, RTASE_MITI_TIME_UNIT_MASK); + + return int_miti; +} + +static u16 rtase_calc_packet_num_mitigation(u16 pkt_num) +{ + u8 msb, pkt_num_count, pkt_num_unit; + u16 int_miti; + + pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM); + + if (pkt_num > 60) { + pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX; + pkt_num_count = pkt_num / RTASE_MITI_MAX_PKT_NUM_UNIT; + } else { + msb = fls(pkt_num); + if (msb >= RTASE_MITI_COUNT_BIT_NUM) { + pkt_num_unit = msb - RTASE_MITI_COUNT_BIT_NUM; + pkt_num_count = pkt_num >> (msb - + RTASE_MITI_COUNT_BIT_NUM); + } else { + pkt_num_unit = 0; + pkt_num_count = pkt_num; + } + } + + int_miti = u16_encode_bits(pkt_num_count, + RTASE_MITI_PKT_NUM_COUNT_MASK) | + u16_encode_bits(pkt_num_unit, + RTASE_MITI_PKT_NUM_UNIT_MASK); + + return int_miti; +} + +static void rtase_init_software_variable(struct pci_dev *pdev, + struct rtase_private *tp) +{ + u16 int_miti; + + tp->tx_queue_ctrl = RTASE_TXQ_CTRL; + tp->func_tx_queue_num = RTASE_FUNC_TXQ_NUM; + tp->func_rx_queue_num = RTASE_FUNC_RXQ_NUM; + tp->int_nums = RTASE_INTERRUPT_NUM; + + int_miti = rtase_calc_time_mitigation(RTASE_MITI_DEFAULT_TIME) | + rtase_calc_packet_num_mitigation(RTASE_MITI_DEFAULT_PKT_NUM); + tp->tx_int_mit = int_miti; + tp->rx_int_mit = int_miti; + + tp->sw_flag = 0; + + rtase_init_int_vector(tp); + + /* MTU range: 60 - hw-specific max */ + tp->dev->min_mtu = ETH_ZLEN; + tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE; +} + +static bool rtase_check_mac_version_valid(struct rtase_private *tp) +{ + u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK; + bool known_ver = false; + + switch (hw_ver) { + case 0x00800000: + case 0x04000000: + case 0x04800000: + known_ver = true; + break; + } + + return known_ver; +} + +static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out, + void __iomem **ioaddr_out) +{ + struct net_device *dev; + void __iomem *ioaddr; + int ret = -ENOMEM; + + /* dev zeroed in alloc_etherdev */ + dev = alloc_etherdev_mq(sizeof(struct rtase_private), + RTASE_FUNC_TXQ_NUM); + if (!dev) + goto err_out; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = pci_enable_device(pdev); + if (ret < 0) + goto err_out_free_dev; + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + ret = -ENODEV; + goto err_out_disable; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, 2) < RTASE_REGS_SIZE) { + ret = -ENODEV; + goto err_out_disable; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret < 0) + goto err_out_disable; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "no usable dma addressing method\n"); + goto err_out_free_res; + } + + pci_set_master(pdev); + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!ioaddr) { + ret = -EIO; + goto err_out_free_res; + } + + *ioaddr_out = ioaddr; + *dev_out = dev; + + return ret; + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + +err_out_free_dev: + free_netdev(dev); + +err_out: + *ioaddr_out = NULL; + *dev_out = NULL; + + return ret; +} + +static void rtase_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) +{ + const struct rtase_private *tp = netdev_priv(dev); + + rtase_rar_set(tp, tp->dev->perm_addr); + iounmap(ioaddr); + + if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) + pci_disable_msix(pdev); + else + pci_disable_msi(pdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static int rtase_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtase_int_vector *ivec; + void __iomem *ioaddr = NULL; + struct rtase_private *tp; + int ret, i; + + if (!pdev->is_physfn && pdev->is_virtfn) { + dev_err(&pdev->dev, + "This module does not support a virtual function."); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n"); + + ret = rtase_init_board(pdev, &dev, &ioaddr); + if (ret != 0) + return ret; + + tp = netdev_priv(dev); + tp->mmio_addr = ioaddr; + tp->dev = dev; + tp->pdev = pdev; + + /* identify chip attached to board */ + if (!rtase_check_mac_version_valid(tp)) + return dev_err_probe(&pdev->dev, -ENODEV, + "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n"); + + rtase_init_software_variable(pdev, tp); + rtase_init_hardware(tp); + + ret = rtase_alloc_interrupt(pdev, tp); + if (ret < 0) { + dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n"); + goto err_out_1; + } + + rtase_init_netdev_ops(dev); + + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_IP_CSUM | NETIF_F_HIGHDMA | + NETIF_F_RXCSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_IPV6_CSUM | + NETIF_F_TSO6; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_RXALL | NETIF_F_RXFCS | + NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + netif_set_tso_max_size(dev, RTASE_LSO_64K); + netif_set_tso_max_segs(dev, RTASE_NIC_MAX_PHYS_BUF_COUNT_LSO2); + + rtase_get_mac_address(dev); + + tp->tally_vaddr = dma_alloc_coherent(&pdev->dev, + sizeof(*tp->tally_vaddr), + &tp->tally_paddr, + GFP_KERNEL); + if (!tp->tally_vaddr) { + ret = -ENOMEM; + goto err_out; + } + + rtase_tally_counter_clear(tp); + + pci_set_drvdata(pdev, dev); + + netif_carrier_off(dev); + + ret = register_netdev(dev); + if (ret != 0) + goto err_out; + + netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq); + + return 0; + +err_out: + if (tp->tally_vaddr) { + dma_free_coherent(&pdev->dev, + sizeof(*tp->tally_vaddr), + tp->tally_vaddr, + tp->tally_paddr); + + tp->tally_vaddr = NULL; + } + +err_out_1: + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + netif_napi_del(&ivec->napi); + } + + rtase_release_board(pdev, dev, ioaddr); + + return ret; +} + +static void rtase_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtase_private *tp = netdev_priv(dev); + struct rtase_int_vector *ivec; + u32 i; + + unregister_netdev(dev); + + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + netif_napi_del(&ivec->napi); + } + + rtase_reset_interrupt(pdev, tp); + if (tp->tally_vaddr) { + dma_free_coherent(&pdev->dev, + sizeof(*tp->tally_vaddr), + tp->tally_vaddr, + tp->tally_paddr); + tp->tally_vaddr = NULL; + } + + rtase_release_board(pdev, dev, tp->mmio_addr); + pci_set_drvdata(pdev, NULL); +} + +static void rtase_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + const struct rtase_private *tp; + + tp = netdev_priv(dev); + + if (netif_running(dev)) + rtase_close(dev); + + rtase_reset_interrupt(pdev, tp); +} + +static int rtase_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + + if (netif_running(dev)) { + netif_device_detach(dev); + rtase_hw_reset(dev); + } + + return 0; +} + +static int rtase_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rtase_private *tp = netdev_priv(dev); + int ret; + + /* restore last modified mac address */ + rtase_rar_set(tp, dev->dev_addr); + + if (!netif_running(dev)) + goto out; + + rtase_wait_for_quiescence(dev); + + rtase_tx_clear(tp); + rtase_rx_clear(tp); + + ret = rtase_init_ring(dev); + if (ret) { + netdev_err(dev, "unable to init ring\n"); + rtase_free_desc(tp); + return -ENOMEM; + } + + rtase_hw_config(dev); + /* always link, so start to transmit & receive */ + rtase_hw_start(dev); + + netif_device_attach(dev); +out: + + return 0; +} + +static const struct dev_pm_ops rtase_pm_ops = { + SYSTEM_SLEEP_PM_OPS(rtase_suspend, rtase_resume) +}; + +static struct pci_driver rtase_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtase_pci_tbl, + .probe = rtase_init_one, + .remove = rtase_remove_one, + .shutdown = rtase_shutdown, + .driver.pm = pm_ptr(&rtase_pm_ops), +}; + +module_pci_driver(rtase_pci_driver); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9893c91af1050f..a7de5cf6b31743 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1052,6 +1052,7 @@ struct ravb_hw_info { netdev_features_t net_features; int stats_len; u32 tccr_mask; + u32 tx_max_frame_size; u32 rx_max_frame_size; u32 rx_buffer_size; u32 rx_desc_size; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c02fb296bf7d7a..d2a6518532f37a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) static void ravb_emac_init_rcar(struct net_device *ndev) { - /* Receive frame limit set register */ - ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); + struct ravb_private *priv = netdev_priv(ndev); + + /* Set receive frame length + * + * The length set here describes the frame from the destination address + * up to and including the CRC data. However only the frame data, + * excluding the CRC, are transferred to memory. To allow for the + * largest frames add the CRC length to the maximum Rx descriptor size. + */ + ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ ravb_write(ndev, ECMR_ZPF | ECMR_DM | @@ -1744,8 +1752,6 @@ static int ravb_get_ts_info(struct net_device *ndev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -1756,6 +1762,8 @@ static int ravb_get_ts_info(struct net_device *ndev, (1 << HWTSTAMP_FILTER_ALL); if (hw_info->gptp || hw_info->ccc_gac) info->phc_index = ptp_clock_index(priv->ptp.clock); + else + info->phc_index = 0; return 0; } @@ -2674,6 +2682,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2696,6 +2705,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2721,6 +2731,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2770,6 +2781,7 @@ static const struct ravb_hw_info gbeth_hw_info = { .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .tccr_mask = TCCR_TSRQ0, + .tx_max_frame_size = 1522, .rx_max_frame_size = SZ_8K, .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), @@ -2981,7 +2993,7 @@ static int ravb_probe(struct platform_device *pdev) priv->avb_link_active_low = of_property_read_bool(np, "renesas,ether-link-active-low"); - ndev->max_mtu = info->rx_max_frame_size - + ndev->max_mtu = info->tx_max_frame_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index ff50e20856ec15..b80aa27a7214d4 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1815,8 +1815,6 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 0e6cea42f00771..f9f63c61d79237 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -1219,8 +1219,6 @@ static int rtsn_get_ts_info(struct net_device *ndev, info->phc_index = ptp_clock_index(priv->ptp_priv->clock); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index e097ce3e69ea3b..84fa911c78db55 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2575,7 +2575,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; + dev->features |= NETIF_F_SG; + dev->netns_local = true; /* MTU range: 68 - 9000 */ dev->min_mtu = ROCKER_PORT_MIN_MTU; diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c672f92d65e976..9319a2675e7b65 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); + ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); + del_timer_sync(&priv(dev)->timer); free_netdev(dev); ecard_release_resources(ec); } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7d69302ffa0af0..de131fc5fa0bbc 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .sensor_event = efx_mcdi_sensor_event, .rx_recycle_ring_size = efx_ef10_recycle_ring_size, }; + +const struct efx_nic_type efx_x4_nic_type = { + .is_vf = false, + .mem_bar = efx_ef10_pf_mem_bar, + .mem_map_size = efx_ef10_mem_map_size, + .probe = efx_ef10_probe_pf, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .init = efx_ef10_init_nic, + .fini = efx_ef10_fini_nic, + .map_reset_reason = efx_ef10_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats_pf, + .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol, + .set_wol = efx_ef10_set_wol, + .resume_wol = efx_port_dummy_op_void, + .get_fec_stats = efx_ef10_get_fec_stats, + .test_chip = efx_ef10_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_write = efx_ef10_tx_write, + .tx_limit_len = efx_ef10_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, + .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_mcdi_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, + .ev_probe = efx_mcdi_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_probe, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_remove = efx_ef10_filter_table_remove, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_ef10_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, + .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, +#ifdef CONFIG_SFC_SRIOV + /* currently set to the VF versions of these functions + * because SRIOV will be reimplemented later. + */ + .vswitching_probe = efx_ef10_vswitching_probe_vf, + .vswitching_restore = efx_ef10_vswitching_restore_vf, + .vswitching_remove = efx_ef10_vswitching_remove_vf, +#endif + .get_mac_address = efx_ef10_get_mac_address_pf, + .set_mac_address = efx_ef10_set_mac_address, + .tso_versions = efx_ef10_tso_versions, + + .get_phys_port_id = efx_ef10_get_phys_port_id, + .revision = EFX_REV_X4, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .option_descriptors = true, + .min_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, + .offload_features = EF10_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, + .check_caps = ef10_check_caps, + .print_additional_fwver = efx_ef10_print_additional_fwver, + .sensor_event = efx_mcdi_sensor_event, + .rx_recycle_ring_size = efx_ef10_recycle_ring_size, +}; + diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index 896ffca4aee2b3..5c2551369812cb 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -37,7 +37,6 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev, /* Ethtool options available */ const struct ethtool_ops ef100_ethtool_ops = { - .cap_rss_ctx_supported = true, .get_drvinfo = efx_ethtool_get_drvinfo, .get_msglevel = efx_ethtool_get_msglevel, .set_msglevel = efx_ethtool_set_msglevel, @@ -59,6 +58,7 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_per_ctx_key = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c index 0b3083ef0eada4..e923e1796369d0 100644 --- a/drivers/net/ethernet/sfc/ef100_rep.c +++ b/drivers/net/ethernet/sfc/ef100_rep.c @@ -233,8 +233,8 @@ static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops; net_dev->min_mtu = EFX_MIN_MTU; net_dev->max_mtu = EFX_MAX_MTU; - net_dev->features |= NETIF_F_LLTX; - net_dev->hw_features |= NETIF_F_LLTX; + net_dev->lltx = true; + return efv; fail1: free_netdev(net_dev); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 6f1a01ded7d4ce..36b3b57e20558a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ + .driver_data = (unsigned long)&efx_x4_nic_type}, {0} /* end of list */ }; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 7c887160e2ef8c..bb1930818beba4 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, { struct efx_nic *efx = efx_netdev_priv(net_dev); - /* Software capabilities */ - ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE); - ts_info->phc_index = -1; - efx_ptp_get_ts_info(efx, ts_info); return 0; } const struct ethtool_ops efx_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -268,6 +262,7 @@ const struct ethtool_ops efx_ethtool_ops = { .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_per_ctx_key = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 1db64fc6e9097d..9fa5c4c713abd3 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, extern const struct efx_nic_type efx_hunt_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; +extern const struct efx_nic_type efx_x4_nic_type; + #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 466df5348b29f4..7ec4ac7b7ff5c5 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -21,6 +21,7 @@ enum { */ EFX_REV_HUNT_A0 = 4, EFX_REV_EF100 = 5, + EFX_REV_X4 = 6, }; static inline int efx_nic_rev(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 6fd2fdbaa41819..aaacdcfa54ae11 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -884,7 +884,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); - timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND); timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); /* Ignore seconds */ diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index cf195162e27087..a0966f87966452 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -725,7 +725,6 @@ void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method) mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); - mutex_lock(&efx->rss_lock); efx->type->fini(efx); } @@ -786,9 +785,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) " VFs may not function\n", rc); #endif - if (efx->type->rx_restore_rss_contexts) - efx->type->rx_restore_rss_contexts(efx); - mutex_unlock(&efx->rss_lock); efx->type->filter_table_restore(efx); up_write(&efx->filter_sem); if (efx->type->sriov_reset) @@ -806,7 +802,6 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) fail: efx->port_initialized = false; - mutex_unlock(&efx->rss_lock); up_write(&efx->filter_sem); mutex_unlock(&efx->mac_lock); @@ -1016,9 +1011,7 @@ int efx_siena_init_struct(struct efx_nic *efx, efx->type->rx_hash_offset - efx->type->rx_prefix_size; efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; - INIT_LIST_HEAD(&efx->rss_context.list); efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - mutex_init(&efx->rss_lock); efx->vport_id = EVB_PORT_ID_ASSIGNED; spin_lock_init(&efx->stats_lock); efx->vi_stride = EFX_DEFAULT_VI_STRIDE; diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 4c182d4edfc29d..c5ad84db96137f 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -230,17 +230,11 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); - /* Software capabilities */ - ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE); - ts_info->phc_index = -1; - efx_siena_ptp_get_ts_info(efx, ts_info); return 0; } const struct ethtool_ops efx_siena_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index 5f0a8127e967f7..075fef64de680c 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -820,27 +820,16 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, return 0; case ETHTOOL_GRXFH: { - struct efx_rss_context *ctx = &efx->rss_context; __u64 data; - mutex_lock(&efx->rss_lock); - if (info->flow_type & FLOW_RSS && info->rss_context) { - ctx = efx_siena_find_rss_context_entry(efx, - info->rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - } - data = 0; - if (!efx_rss_active(ctx)) /* No RSS */ - goto out_setdata_unlock; + if (!efx_rss_active(&efx->rss_context)) /* No RSS */ + goto out_setdata; - switch (info->flow_type & ~FLOW_RSS) { + switch (info->flow_type) { case UDP_V4_FLOW: case UDP_V6_FLOW: - if (ctx->rx_hash_udp_4tuple) + if (efx->rss_context.rx_hash_udp_4tuple) data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | RXH_IP_SRC | RXH_IP_DST); else @@ -862,10 +851,8 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, default: break; } -out_setdata_unlock: +out_setdata: info->data = data; -out_unlock: - mutex_unlock(&efx->rss_lock); return rc; } @@ -1164,47 +1151,12 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev) return efx->type->rx_hash_key_size; } -static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_rss_context *ctx; - int rc = 0; - - if (!efx->type->rx_pull_rss_context_config) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - rc = efx->type->rx_pull_rss_context_config(efx, ctx); - if (rc) - goto out_unlock; - - rxfh->hfunc = ETH_RSS_HASH_TOP; - if (rxfh->indir) - memcpy(rxfh->indir, ctx->rx_indir_table, - sizeof(ctx->rx_indir_table)); - if (rxfh->key) - memcpy(rxfh->key, ctx->rx_hash_key, - efx->type->rx_hash_key_size); -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh) { struct efx_nic *efx = netdev_priv(net_dev); int rc; - if (rxfh->rss_context) - return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh); - rc = efx->type->rx_pull_rss_config(efx); if (rc) return rc; @@ -1219,70 +1171,6 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, return 0; } -static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) -{ - struct efx_nic *efx = netdev_priv(net_dev); - u32 *rss_context = &rxfh->rss_context; - struct efx_rss_context *ctx; - u32 *indir = rxfh->indir; - bool allocated = false; - u8 *key = rxfh->key; - int rc; - - if (!efx->type->rx_push_rss_context_config) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - - if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { - if (rxfh->rss_delete) { - /* alloc + delete == Nothing to do */ - rc = -EINVAL; - goto out_unlock; - } - ctx = efx_siena_alloc_rss_context_entry(efx); - if (!ctx) { - rc = -ENOMEM; - goto out_unlock; - } - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - /* Initialise indir table and key to defaults */ - efx_siena_set_default_rx_indir_table(efx, ctx); - netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); - allocated = true; - } else { - ctx = efx_siena_find_rss_context_entry(efx, *rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - } - - if (rxfh->rss_delete) { - /* delete this context */ - rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); - if (!rc) - efx_siena_free_rss_context_entry(ctx); - goto out_unlock; - } - - if (!key) - key = ctx->rx_hash_key; - if (!indir) - indir = ctx->rx_indir_table; - - rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); - if (rc && allocated) - efx_siena_free_rss_context_entry(ctx); - else - *rss_context = ctx->user_id; -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1296,9 +1184,6 @@ int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack); - if (!indir && !key) return 0; diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 94152f595acd3f..3fa7c652ae9b7c 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -707,20 +707,14 @@ struct vfdi_status; /* The reserved RSS context value */ #define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff /** - * struct efx_rss_context - A user-defined RSS context for filtering - * @list: node of linked list on which this struct is stored - * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or - * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. - * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. - * @user_id: the rss_context ID exposed to userspace over ethtool. + * struct efx_rss_context - An RSS context for filtering + * @context_id: 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled * @rx_hash_key: Toeplitz hash key for this RSS context * @indir_table: Indirection table for this RSS context */ struct efx_rss_context { - struct list_head list; u32 context_id; - u32 user_id; bool rx_hash_udp_4tuple; u8 rx_hash_key[40]; u32 rx_indir_table[128]; @@ -851,9 +845,7 @@ enum efx_xdp_tx_queues_mode { * @rx_packet_ts_offset: Offset of timestamp from start of packet data * (valid only if channel->sync_timestamps_enabled; always negative) * @rx_scatter: Scatter mode enabled for receives - * @rss_context: Main RSS context. Its @list member is the head of the list of - * RSS contexts created by user requests - * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @rss_context: Main RSS context * @vport_id: The function's vport ID, only relevant for PFs * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired @@ -1018,7 +1010,6 @@ struct efx_nic { int rx_packet_ts_offset; bool rx_scatter; struct efx_rss_context rss_context; - struct mutex rss_lock; u32 vport_id; unsigned int_error_count; @@ -1220,10 +1211,6 @@ struct efx_udp_tunnel { * @tx_enqueue: Add an SKB to TX queue * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC - * @rx_push_rss_context_config: Write RSS hash key and indirection table for - * user RSS context to the NIC - * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user - * RSS context back from the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1366,13 +1353,6 @@ struct efx_nic_type { int (*rx_push_rss_config)(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); int (*rx_pull_rss_config)(struct efx_nic *efx); - int (*rx_push_rss_context_config)(struct efx_nic *efx, - struct efx_rss_context *ctx, - const u32 *rx_indir_table, - const u8 *key); - int (*rx_pull_rss_context_config)(struct efx_nic *efx, - struct efx_rss_context *ctx); - void (*rx_restore_rss_contexts)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index c473a4b6dd44d6..85005196b4c506 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -897,7 +897,7 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); - timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND); timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); /* Ignore seconds */ diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c index 219fb358a646d3..082e35c6caaaed 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.c +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -558,62 +558,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel, napi_gro_frags(napi); } -/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because - * (a) this is an infrequent control-plane operation and (b) n is small (max 64) - */ -struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx) -{ - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx, *new; - u32 id = 1; /* Don't use zero, that refers to the master RSS context */ - - WARN_ON(!mutex_is_locked(&efx->rss_lock)); - - /* Search for first gap in the numbering */ - list_for_each_entry(ctx, head, list) { - if (ctx->user_id != id) - break; - id++; - /* Check for wrap. If this happens, we have nearly 2^32 - * allocated RSS contexts, which seems unlikely. - */ - if (WARN_ON_ONCE(!id)) - return NULL; - } - - /* Create the new entry */ - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (!new) - return NULL; - new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - new->rx_hash_udp_4tuple = false; - - /* Insert the new entry into the gap */ - new->user_id = id; - list_add_tail(&new->list, &ctx->list); - return new; -} - -struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, - u32 id) -{ - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx; - - WARN_ON(!mutex_is_locked(&efx->rss_lock)); - - list_for_each_entry(ctx, head, list) - if (ctx->user_id == id) - return ctx; - return NULL; -} - -void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx) -{ - list_del(&ctx->list); - kfree(ctx); -} - void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, struct efx_rss_context *ctx) { diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h index 6b37f83ecb30b8..f90a8320d3969a 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.h +++ b/drivers/net/ethernet/sfc/siena/rx_common.h @@ -78,10 +78,6 @@ efx_siena_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh, __wsum csum); -struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx); -struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, - u32 id); -void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx); void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, struct efx_rss_context *ctx); diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c index c4408842432363..a421b012350694 100644 --- a/drivers/net/ethernet/sfc/tc_counters.c +++ b/drivers/net/ethernet/sfc/tc_counters.c @@ -249,7 +249,7 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index( &ctr->linkage, efx_tc_counter_id_ht_params); kfree(ctr); - return (void *)cnt; /* it's an ERR_PTR */ + return ERR_CAST(cnt); } ctr->cnt = cnt; refcount_set(&ctr->ref, 1); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 907498848028b4..a5e23e2da90f44 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2355,7 +2355,7 @@ static int smc_drv_probe(struct platform_device *pdev) * the resource supplies a trigger, override the irqflags with * the trigger flags from the resource. */ - irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + irq_resflags = irq_get_trigger_type(ndev->irq); if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) irq_flags = irq_resflags & IRQF_TRIGGER_MASK; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 15cb96c2506d73..f30d4b17c7fbe0 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include "smsc9420.h" #define DRV_NAME "smsc9420" diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index cd36ff4da68c66..684489156dcee7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -29,6 +29,7 @@ /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_3_70 0x37 #define DWMAC_CORE_4_00 0x40 #define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 9e40c28d453ab1..bfe6e2d631bdf5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -8,15 +8,90 @@ #include #include #include "stmmac.h" +#include "dwmac_dma.h" +#include "dwmac1000.h" + +/* Normal Loongson Tx Summary */ +#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Summary */ +#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000 + +#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \ + DMA_INTR_ENA_NIE_RX_LOONGSON | \ + DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE) + +/* Abnormal Loongson Tx Summary */ +#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Summary */ +#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000 + +#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \ + DMA_INTR_ENA_AIE_RX_LOONGSON | \ + DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE) + +#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \ + DMA_INTR_ABNORMAL_LOONGSON) + +/* Normal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000 + +/* Abnormal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000 + +/* Fatal Loongson Tx Bus Error Interrupt */ +#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000 +/* Fatal Loongson Rx Bus Error Interrupt */ +#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000 + +#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \ + DMA_STATUS_NIS_RX_LOONGSON | \ + DMA_STATUS_AIS_TX_LOONGSON | \ + DMA_STATUS_AIS_RX_LOONGSON | \ + DMA_STATUS_FBI_TX_LOONGSON | \ + DMA_STATUS_FBI_RX_LOONGSON) + +#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \ + DMA_STATUS_RPS | DMA_STATUS_RU | \ + DMA_STATUS_RI | DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON_LOONGSON) + +#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \ + DMA_STATUS_TJT | DMA_STATUS_TU | \ + DMA_STATUS_TPS | DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON_LOONGSON) + +#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 +#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13 +#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */ +#define CHANNEL_NUM 8 + +struct loongson_data { + u32 loongson_id; + struct device *dev; +}; + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; -static int loongson_default_data(struct plat_stmmacenet_data *plat) +static void loongson_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { + /* Get bus_id, this can be overwritten later */ + plat->bus_id = pci_dev_id(pdev); + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->has_gmac = 1; plat->force_sf_dma_mode = 1; /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; + plat->multicast_filter_bins = 256; + + plat->mac_interface = PHY_INTERFACE_MODE_NA; /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; @@ -24,10 +99,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) /* Set the maxmtu to a default of JUMBO_LEN */ plat->maxmtu = JUMBO_LEN; - /* Set default number of RX and TX queues to use */ - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - /* Disable Priority config by default */ plat->tx_queues_cfg[0].use_prio = false; plat->rx_queues_cfg[0].use_prio = false; @@ -35,30 +106,424 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) /* Disable RX queues routing by default */ plat->rx_queues_cfg[0].pkt_route = 0x0; + plat->clk_ref_rate = 125000000; + plat->clk_ptp_rate = 125000000; + /* Default to phy auto-detection */ plat->phy_addr = -1; plat->dma_cfg->pbl = 32; plat->dma_cfg->pblx8 = true; +} + +static int loongson_gmac_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct loongson_data *ld; + int i; + + ld = plat->bsp_priv; + + loongson_default_data(pdev, plat); + + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { + plat->rx_queues_to_use = CHANNEL_NUM; + plat->tx_queues_to_use = CHANNEL_NUM; + + /* Only channel 0 supports checksum, + * so turn off checksum to enable multiple channels. + */ + for (i = 1; i < CHANNEL_NUM; i++) + plat->tx_queues_cfg[i].coe_unsupported = 1; + } else { + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + } + + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; - plat->multicast_filter_bins = 256; return 0; } -static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static struct stmmac_pci_info loongson_gmac_pci_info = { + .setup = loongson_gmac_data, +}; + +static void loongson_gnet_fix_speed(void *priv, unsigned int speed, + unsigned int mode) { - struct plat_stmmacenet_data *plat; - struct stmmac_resources res; - struct device_node *np; - int ret, i, phy_mode; + struct loongson_data *ld = (struct loongson_data *)priv; + struct net_device *ndev = dev_get_drvdata(ld->dev); + struct stmmac_priv *ptr = netdev_priv(ndev); + + /* The integrated PHY has a weird problem with switching from the low + * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link + * re-negotiation. + */ + if (speed == SPEED_1000) { + if (readl(ptr->ioaddr + MAC_CTRL_REG) & + GMAC_CONTROL_PS) + /* Word around hardware bug, restart autoneg */ + phy_restart_aneg(ndev->phydev); + } +} - np = dev_of_node(&pdev->dev); +static int loongson_gnet_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct loongson_data *ld; + int i; - if (!np) { - pr_info("dwmac_loongson_pci: No OF node\n"); - return -ENODEV; + ld = plat->bsp_priv; + + loongson_default_data(pdev, plat); + + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { + plat->rx_queues_to_use = CHANNEL_NUM; + plat->tx_queues_to_use = CHANNEL_NUM; + + /* Only channel 0 supports checksum, + * so turn off checksum to enable multiple channels. + */ + for (i = 1; i < CHANNEL_NUM; i++) + plat->tx_queues_cfg[i].coe_unsupported = 1; + } else { + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; } + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + plat->mdio_bus_data->phy_mask = ~(u32)BIT(2); + plat->fix_mac_speed = loongson_gnet_fix_speed; + + return 0; +} + +static struct stmmac_pci_info loongson_gnet_pci_info = { + .setup = loongson_gnet_data, +}; + +static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 chan) +{ + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + if (dma_cfg->pblx8) + value |= DMA_BUS_MODE_MAXPBL; + + value |= DMA_BUS_MODE_USP; + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (dma_cfg->atds) + value |= DMA_BUS_MODE_ATDS; + + if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr + + DMA_CHAN_INTR_ENA(chan)); +} + +static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_extra_stats *x, + u32 chan, u32 dir) +{ + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); + u32 abnor_intr_status; + u32 nor_intr_status; + u32 fb_intr_status; + u32 intr_status; + int ret = 0; + + /* read the status register (CSR5) */ + intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX_LOONGSON; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX_LOONGSON; + + nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON | + DMA_STATUS_NIS_RX_LOONGSON); + abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON | + DMA_STATUS_AIS_RX_LOONGSON); + fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON | + DMA_STATUS_FBI_RX_LOONGSON); + + /* ABNORMAL interrupts */ + if (unlikely(abnor_intr_status)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(fb_intr_status)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(nor_intr_status)) { + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */ + writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan)); + + return ret; +} + +static struct mac_device_info *loongson_dwmac_setup(void *apriv) +{ + struct stmmac_priv *priv = apriv; + struct mac_device_info *mac; + struct stmmac_dma_ops *dma; + struct loongson_data *ld; + struct pci_dev *pdev; + + ld = priv->plat->bsp_priv; + pdev = to_pci_dev(priv->device); + + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + if (!mac) + return NULL; + + dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + /* The Loongson GMAC and GNET devices are based on the DW GMAC + * v3.50a and v3.73a IP-cores. But the HW designers have changed the + * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the + * network controllers with the multi-channels feature + * available to emphasize the differences: multiple DMA-channels, + * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the + * original value so the correct HW-interface would be selected. + */ + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { + priv->synopsys_id = DWMAC_CORE_3_70; + *dma = dwmac1000_dma_ops; + dma->init_chan = loongson_dwmac_dma_init_channel; + dma->dma_interrupt = loongson_dwmac_dma_interrupt; + mac->dma = dma; + } + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + + /* Pre-initialize the respective "mac" fields as it's done in + * dwmac1000_setup() + */ + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + /* Loongson GMAC doesn't support the flow control. LS2K2000 + * GNET doesn't support the half-duplex link mode. + */ + if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) { + mac->link.caps = MAC_10 | MAC_100 | MAC_1000; + } else { + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + else + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10FD | MAC_100FD | MAC_1000FD; + } + + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed10 = GMAC_CONTROL_PS; + mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->link.speed1000 = 0; + mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return mac; +} + +static int loongson_dwmac_msi_config(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int i, ret, vecs; + + vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1); + ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI); + if (ret < 0) { + dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n"); + return ret; + } + + res->irq = pci_irq_vector(pdev, 0); + + for (i = 0; i < plat->rx_queues_to_use; i++) { + res->rx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 1 + i * 2); + } + + for (i = 0; i < plat->tx_queues_to_use; i++) { + res->tx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 2 + i * 2); + } + + plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; + + return 0; +} + +static void loongson_dwmac_msi_clear(struct pci_dev *pdev) +{ + pci_free_irq_vectors(pdev); +} + +static int loongson_dwmac_dt_config(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + struct device_node *np = dev_of_node(&pdev->dev); + int ret; + + plat->mdio_node = of_get_child_by_name(np, "mdio"); + if (plat->mdio_node) { + dev_info(&pdev->dev, "Found MDIO subnode\n"); + plat->mdio_bus_data->needs_reset = true; + } + + ret = of_alias_get_id(np, "ethernet"); + if (ret >= 0) + plat->bus_id = ret; + + res->irq = of_irq_get_byname(np, "macirq"); + if (res->irq < 0) { + dev_err(&pdev->dev, "IRQ macirq not found\n"); + ret = -ENODEV; + goto err_put_node; + } + + res->wol_irq = of_irq_get_byname(np, "eth_wake_irq"); + if (res->wol_irq < 0) { + dev_info(&pdev->dev, + "IRQ eth_wake_irq not found, using macirq\n"); + res->wol_irq = res->irq; + } + + res->lpi_irq = of_irq_get_byname(np, "eth_lpi"); + if (res->lpi_irq < 0) { + dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); + ret = -ENODEV; + goto err_put_node; + } + + ret = device_get_phy_mode(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "phy_mode not found\n"); + ret = -ENODEV; + goto err_put_node; + } + + plat->phy_interface = ret; + + return 0; + +err_put_node: + of_node_put(plat->mdio_node); + + return ret; +} + +static void loongson_dwmac_dt_clear(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + of_node_put(plat->mdio_node); +} + +static int loongson_dwmac_acpi_config(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + if (!pdev->irq) + return -EINVAL; + + res->irq = pdev->irq; + + return 0; +} + +static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct plat_stmmacenet_data *plat; + struct stmmac_pci_info *info; + struct stmmac_resources res; + struct loongson_data *ld; + int ret, i; + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; @@ -69,25 +534,23 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (!plat->mdio_bus_data) return -ENOMEM; - plat->mdio_node = of_get_child_by_name(np, "mdio"); - if (plat->mdio_node) { - dev_info(&pdev->dev, "Found MDIO subnode\n"); - plat->mdio_bus_data->needs_reset = true; - } - plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); - if (!plat->dma_cfg) { - ret = -ENOMEM; - goto err_put_node; - } + if (!plat->dma_cfg) + return -ENOMEM; + + ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL); + if (!ld) + return -ENOMEM; /* Enable pci device */ ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); - goto err_put_node; + return ret; } + pci_set_master(pdev); + /* Get the base address of device */ for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) @@ -98,59 +561,43 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id break; } - plat->bus_id = of_alias_get_id(np, "ethernet"); - if (plat->bus_id < 0) - plat->bus_id = pci_dev_id(pdev); - - phy_mode = device_get_phy_mode(&pdev->dev); - if (phy_mode < 0) { - dev_err(&pdev->dev, "phy_mode not found\n"); - ret = phy_mode; - goto err_disable_device; - } - - plat->phy_interface = phy_mode; - plat->mac_interface = PHY_INTERFACE_MODE_GMII; - - pci_set_master(pdev); - - loongson_default_data(plat); - pci_enable_msi(pdev); memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; - res.irq = of_irq_get_byname(np, "macirq"); - if (res.irq < 0) { - dev_err(&pdev->dev, "IRQ macirq not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } + plat->bsp_priv = ld; + plat->setup = loongson_dwmac_setup; + ld->dev = &pdev->dev; + ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff; - res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); - if (res.wol_irq < 0) { - dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n"); - res.wol_irq = res.irq; - } + info = (struct stmmac_pci_info *)id->driver_data; + ret = info->setup(pdev, plat); + if (ret) + goto err_disable_device; - res.lpi_irq = of_irq_get_byname(np, "eth_lpi"); - if (res.lpi_irq < 0) { - dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } + if (dev_of_node(&pdev->dev)) + ret = loongson_dwmac_dt_config(pdev, plat, &res); + else + ret = loongson_dwmac_acpi_config(pdev, plat, &res); + if (ret) + goto err_disable_device; + + /* Use the common MAC IRQ if per-channel MSIs allocation failed */ + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + loongson_dwmac_msi_config(pdev, plat, &res); ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) - goto err_disable_msi; + goto err_plat_clear; - return ret; + return 0; -err_disable_msi: - pci_disable_msi(pdev); +err_plat_clear: + if (dev_of_node(&pdev->dev)) + loongson_dwmac_dt_clear(pdev, plat); + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + loongson_dwmac_msi_clear(pdev); err_disable_device: pci_disable_device(pdev); -err_put_node: - of_node_put(plat->mdio_node); return ret; } @@ -158,11 +605,18 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) { struct net_device *ndev = dev_get_drvdata(&pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct loongson_data *ld; int i; - of_node_put(priv->plat->mdio_node); + ld = priv->plat->bsp_priv; stmmac_dvr_remove(&pdev->dev); + if (dev_of_node(&pdev->dev)) + loongson_dwmac_dt_clear(pdev, priv->plat); + + if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + loongson_dwmac_msi_clear(pdev); + for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; @@ -170,7 +624,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) break; } - pci_disable_msi(pdev); pci_disable_device(pdev); } @@ -213,7 +666,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, loongson_dwmac_resume); static const struct pci_device_id loongson_dwmac_id_table[] = { - { PCI_VDEVICE(LOONGSON, 0x7a03) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) }, + { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) }, {} }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); @@ -232,4 +686,5 @@ module_pci_driver(loongson_dwmac_driver); MODULE_DESCRIPTION("Loongson DWMAC PCI driver"); MODULE_AUTHOR("Qing Zhang "); +MODULE_AUTHOR("Yanteng Si "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 7ae04d8d291c82..50073bdade46e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1116,6 +1116,161 @@ static const struct rk_gmac_ops rk3568_ops = { }, }; +/* VCCIO0_1_3_IOC */ +#define RK3576_VCCIO0_1_3_IOC_CON2 0X6408 +#define RK3576_VCCIO0_1_3_IOC_CON3 0X640c +#define RK3576_VCCIO0_1_3_IOC_CON4 0X6410 +#define RK3576_VCCIO0_1_3_IOC_CON5 0X6414 + +#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) +#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) +#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7) +#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7) + +#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) +#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* SDGMAC_GRF */ +#define RK3576_GRF_GMAC_CON0 0X0020 +#define RK3576_GRF_GMAC_CON1 0X0024 + +#define RK3576_GMAC_RMII_MODE GRF_BIT(3) +#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3) + +#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7) +#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7) + +#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5) +#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5) + +#define RK3576_GMAC_CLK_RGMII_DIV1 \ + (GRF_CLR_BIT(6) | GRF_CLR_BIT(5)) +#define RK3576_GMAC_CLK_RGMII_DIV5 \ + (GRF_BIT(6) | GRF_BIT(5)) +#define RK3576_GMAC_CLK_RGMII_DIV50 \ + (GRF_BIT(6) | GRF_CLR_BIT(5)) + +#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4) +#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4) + +static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + unsigned int offset_con; + + if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) { + dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n"); + return; + } + + offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : + RK3576_GRF_GMAC_CON0; + + regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE); + + offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 : + RK3576_VCCIO0_1_3_IOC_CON2; + + /* m0 && m1 delay enabled */ + regmap_write(bsp_priv->php_grf, offset_con, + DELAY_ENABLE(RK3576, tx_delay, rx_delay)); + regmap_write(bsp_priv->php_grf, offset_con + 0x4, + DELAY_ENABLE(RK3576, tx_delay, rx_delay)); + + /* m0 && m1 delay value */ + regmap_write(bsp_priv->php_grf, offset_con, + RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) | + RK3576_GMAC_CLK_RX_DL_CFG(rx_delay)); + regmap_write(bsp_priv->php_grf, offset_con + 0x4, + RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) | + RK3576_GMAC_CLK_RX_DL_CFG(rx_delay)); +} + +static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + unsigned int offset_con; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : + RK3576_GRF_GMAC_CON0; + + regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE); +} + +static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + unsigned int val = 0, offset_con; + + switch (speed) { + case 10: + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + val = RK3576_GMAC_CLK_RMII_DIV20; + else + val = RK3576_GMAC_CLK_RGMII_DIV50; + break; + case 100: + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + val = RK3576_GMAC_CLK_RMII_DIV2; + else + val = RK3576_GMAC_CLK_RGMII_DIV5; + break; + case 1000: + if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII) + val = RK3576_GMAC_CLK_RGMII_DIV1; + else + goto err; + break; + default: + goto err; + } + + offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : + RK3576_GRF_GMAC_CON0; + + regmap_write(bsp_priv->grf, offset_con, val); + + return; +err: + dev_err(dev, "unknown speed value for GMAC speed=%d", speed); +} + +static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input, + bool enable) +{ + unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO : + RK3576_GMAC_CLK_SELECT_CRU; + unsigned int offset_con; + + val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE : + RK3576_GMAC_CLK_RMII_GATE; + + offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : + RK3576_GRF_GMAC_CON0; + + regmap_write(bsp_priv->grf, offset_con, val); +} + +static const struct rk_gmac_ops rk3576_ops = { + .set_to_rgmii = rk3576_set_to_rgmii, + .set_to_rmii = rk3576_set_to_rmii, + .set_rgmii_speed = rk3576_set_gmac_speed, + .set_rmii_speed = rk3576_set_gmac_speed, + .set_clock_selection = rk3576_set_clock_selection, + .regs_valid = true, + .regs = { + 0x2a220000, /* gmac0 */ + 0x2a230000, /* gmac1 */ + 0x0, /* sentinel */ + }, +}; + /* sys_grf */ #define RK3588_GRF_GMAC_CON7 0X031c #define RK3588_GRF_GMAC_CON8 0X0320 @@ -1141,8 +1296,8 @@ static const struct rk_gmac_ops rk3568_ops = { #define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id)) #define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id)) -#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4) -#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4) +#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4) +#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4) #define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2) #define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2) @@ -1240,8 +1395,8 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed) static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input, bool enable) { - unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) : - RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id); + unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) : + RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id); val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) : RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id); @@ -1908,6 +2063,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops }, + { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops }, { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops }, { .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops }, { .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index e1b761dcfa1dd5..4a0ae92b3055c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) * Called from stmmac via stmmac_dma_ops->init */ static void sun8i_dwmac_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); @@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv, writel(v, ioaddr + EMAC_TX_CTL1); } -static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr) +static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { u32 v; @@ -774,8 +774,8 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv) static int get_ephy_nodes(struct stmmac_priv *priv) { struct sunxi_priv_data *gmac = priv->plat->bsp_priv; - struct device_node *mdio_mux, *iphynode; struct device_node *mdio_internal; + struct device_node *mdio_mux; int ret; mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux"); @@ -793,7 +793,7 @@ static int get_ephy_nodes(struct stmmac_priv *priv) } /* Seek for internal PHY */ - for_each_child_of_node(mdio_internal, iphynode) { + for_each_child_of_node_scoped(mdio_internal, iphynode) { gmac->ephy_clk = of_clk_get(iphynode, 0); if (IS_ERR(gmac->ephy_clk)) continue; @@ -801,14 +801,12 @@ static int get_ephy_nodes(struct stmmac_priv *priv) if (IS_ERR(gmac->rst_ephy)) { ret = PTR_ERR(gmac->rst_ephy); if (ret == -EPROBE_DEFER) { - of_node_put(iphynode); of_node_put(mdio_internal); return ret; } continue; } dev_info(priv->device, "Found internal PHY node\n"); - of_node_put(iphynode); of_node_put(mdio_internal); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index adccdd816ea913..118a22406a2e93 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) writel(value, ioaddr + DMA_AXI_BUS_MODE); } -static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) +static void dwmac1000_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) { - u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; - /* - * Set the DMA PBL (Programmable Burst Length) mode. + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Set the DMA PBL (Programmable Burst Length) mode. * * Note: before stmmac core 3.50 this mode bit was 4xPBL, and * post 3.5 mode bit acts as 8*PBL. @@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, if (dma_cfg->mixed_burst) value |= DMA_BUS_MODE_MB; - if (atds) + if (dma_cfg->atds) value |= DMA_BUS_MODE_ATDS; if (dma_cfg->aal) value |= DMA_BUS_MODE_AAL; - writel(value, ioaddr + DMA_BUS_MODE); + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); /* Mask interrupts by writing to CSR7 */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan)); } static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, @@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, dma_addr_t dma_rx_phy, u32 chan) { /* RX descriptor base address list must be written into DMA CSR3 */ - writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan)); } static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, @@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, dma_addr_t dma_tx_phy, u32 chan) { /* TX descriptor base address list must be written into DMA CSR4 */ - writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable RX store and forward mode\n"); @@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, /* Configure flow control based on rx fifo size */ csr6 = dwmac1000_configure_fc(csr6, fifosz); - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); @@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, csr6 |= DMA_CONTROL_TTC_256; } - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv, @@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr, static void dwmac1000_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, u32 riwt, u32 queue) { - writel(riwt, ioaddr + DMA_RX_WATCHDOG); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, - .init = dwmac1000_dma_init, + .init_chan = dwmac1000_dma_init_channel, .init_rx_chan = dwmac1000_dma_init_rx, .init_tx_chan = dwmac1000_dma_init_tx, .axi = dwmac1000_dma_axi, @@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .get_hw_feature = dwmac1000_get_hw_feature, .rx_watchdog = dwmac1000_rx_watchdog, }; +EXPORT_SYMBOL_GPL(dwmac1000_dma_ops); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index b402fb54f6131b..82957db47c9911 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -19,7 +19,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 31c387cc5f269d..e65a65666cc1de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac4.h" @@ -58,10 +59,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; - /* Enable FPE interrupt */ - if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) - value |= GMAC_INT_FPE_EN; - writel(value, ioaddr + GMAC_INT_EN); if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) @@ -475,7 +472,7 @@ static int dwmac4_write_vlan_filter(struct net_device *dev, u8 index, u32 data) { void __iomem *ioaddr = (void __iomem *)dev->base_addr; - int i, timeout = 10; + int ret; u32 val; if (index >= hw->num_vlan) @@ -491,16 +488,15 @@ static int dwmac4_write_vlan_filter(struct net_device *dev, writel(val, ioaddr + GMAC_VLAN_TAG); - for (i = 0; i < timeout; i++) { - val = readl(ioaddr + GMAC_VLAN_TAG); - if (!(val & GMAC_VLAN_TAG_CTRL_OB)) - return 0; - udelay(1); + ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val, + !(val & GMAC_VLAN_TAG_CTRL_OB), + 1000, 500000); + if (ret) { + netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); + return -EBUSY; } - netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); - - return -EBUSY; + return 0; } static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, @@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = { .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, + .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size, + .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size, + .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, @@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = { .fpe_configure = dwmac5_fpe_configure, .fpe_send_mpacket = dwmac5_fpe_send_mpacket, .fpe_irq_status = dwmac5_fpe_irq_status, + .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size, + .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size, + .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 1c5802e0d7f4e2..e99401bcc1f84b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -186,10 +186,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p) static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + u32 flags = (RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); if (!disable_rx_ic) - p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); + flags |= RDES3_INT_ON_COMPLETION_EN; + + p->des3 |= cpu_to_le32(flags); } static int dwmac4_get_tx_ls(struct dma_desc *p) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 84d3a8551b0322..e0165358c4ac88 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv, } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index e02cebc3f1b736..08add508db8441 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable) + bool tx_enable, bool pmac_enable) { u32 value; - if (enable) { + if (tx_enable) { cfg->fpe_csr = EFPE; value = readl(ioaddr + GMAC_RXQ_CTRL1); value &= ~GMAC_RXQCTRL_FPRQ; @@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, cfg->fpe_csr = 0; } writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); + + value = readl(ioaddr + GMAC_INT_EN); + + if (pmac_enable) { + if (!(value & GMAC_INT_FPE_EN)) { + /* Dummy read to clear any pending masked interrupts */ + readl(ioaddr + MAC_FPE_CTRL_STS); + + value |= GMAC_INT_FPE_EN; + } + } else { + value &= ~GMAC_INT_FPE_EN; + } + + writel(value, ioaddr + GMAC_INT_EN); } int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) @@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) if (value & TRSP) { status |= FPE_EVENT_TRSP; - netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n"); } if (value & TVER) { status |= FPE_EVENT_TVER; - netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n"); } if (value & RRSP) { status |= FPE_EVENT_RRSP; - netdev_info(dev, "FPE: Respond mPacket is received\n"); + netdev_dbg(dev, "FPE: Respond mPacket is received\n"); } if (value & RVER) { status |= FPE_EVENT_RVER; - netdev_info(dev, "FPE: Verify mPacket is received\n"); + netdev_dbg(dev, "FPE: Verify mPacket is received\n"); } return status; @@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, writel(value, ioaddr + MAC_FPE_CTRL_STS); } + +int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr) +{ + return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS)); +} + +void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size) +{ + u32 value; + + value = readl(ioaddr + MTL_FPE_CTRL_STS); + writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ), + ioaddr + MTL_FPE_CTRL_STS); +} + +#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping" +#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]" + +int dwmac5_fpe_map_preemption_class(struct net_device *ndev, + struct netlink_ext_ack *extack, u32 pclass) +{ + u32 val, offset, count, queue_weight, preemptible_txqs = 0; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 num_tc = ndev->num_tc; + + if (!pclass) + goto update_mapping; + + /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware. + * + * Synopsys Databook: + * "The number of Tx DMA channels is equal to the number of Tx queues, + * and is direct one-to-one mapping." + */ + for (u32 tc = 0; tc < num_tc; tc++) { + count = ndev->tc_to_txq[tc].count; + offset = ndev->tc_to_txq[tc].offset; + + if (pclass & BIT(tc)) + preemptible_txqs |= GENMASK(offset + count - 1, offset); + + /* This is 1:1 mapping, go to next TC */ + if (count == 1) + continue; + + if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) { + NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG); + return -EINVAL; + } + + queue_weight = priv->plat->tx_queues_cfg[offset].weight; + + for (u32 i = 1; i < count; i++) { + if (priv->plat->tx_queues_cfg[offset + i].weight != + queue_weight) { + NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG, + queue_weight, tc); + return -EINVAL; + } + } + } + +update_mapping: + val = readl(priv->ioaddr + MTL_FPE_CTRL_STS); + writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS), + priv->ioaddr + MTL_FPE_CTRL_STS); + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index bf33a51d229e9d..6c6eb6790e836a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -39,6 +39,12 @@ #define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) #define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) +#define MTL_FPE_CTRL_STS 0x00000c90 +/* Preemption Classification */ +#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8) +/* Additional Fragment Size of preempted frames */ +#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0) + #define MTL_RXP_CONTROL_STATUS 0x00000ca0 #define RXPI BIT(31) #define NPE GENMASK(23, 16) @@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, u32 sub_second_inc, u32 systime_flags); void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable); + bool tx_enable, bool pmac_enable); void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); +int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr); +void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size); +int dwmac5_fpe_map_preemption_class(struct net_device *ndev, + struct netlink_ext_ack *extack, u32 pclass); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 72672391675f6e..5d9c18f5bbf587 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,31 @@ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +/* Following DMA defines are channels oriented */ +#define DMA_CHAN_BASE_OFFSET 0x100 + +static inline u32 dma_chan_base_addr(u32 base, u32 chan) +{ + return base + chan * DMA_CHAN_BASE_OFFSET; +} + +#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan) +#define DMA_CHAN_XMT_POLL_DEMAND(chan) \ + dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan) +#define DMA_CHAN_RCV_POLL_DEMAND(chan) \ + dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan) +#define DMA_CHAN_RCV_BASE_ADDR(chan) \ + dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan) +#define DMA_CHAN_TX_BASE_ADDR(chan) \ + dma_chan_base_addr(DMA_TX_BASE_ADDR, chan) +#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan) +#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan) +#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan) +#define DMA_CHAN_MISSED_FRAME_CTR(chan) \ + dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan) +#define DMA_CHAN_RX_WATCHDOG(chan) \ + dma_chan_base_addr(DMA_RX_WATCHDOG, chan) + /* SW Reset */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ @@ -152,7 +177,7 @@ #define NUM_DWMAC1000_DMA_REGS 23 #define NUM_DWMAC4_DMA_REGS 27 -void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 85e18f9a22f920..4846bf49c576a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr) } /* CSR1 enables the transmit DMA to check for new descriptor */ -void dwmac_enable_dma_transmission(void __iomem *ioaddr) +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { - writel(1, ioaddr + DMA_XMT_POLL_DEMAND); + writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan)); } void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value |= DMA_INTR_DEFAULT_RX; if (tx) value |= DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value &= ~DMA_INTR_DEFAULT_RX; if (tx) value &= ~DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } #ifdef DWMAC_DMA_DEBUG @@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ - u32 intr_status = readl(ioaddr + DMA_STATUS); + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); #ifdef DWMAC_DMA_DEBUG /* Enable it to monitor DMA rx/tx status in case of critical problems */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f196cd99d51049..f519d43738b080 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -846,42 +846,41 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { { false, "UNKNOWN", "Unknown Error" }, /* 31 */ }; -#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" -#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" - +static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error"; +static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error"; static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { - { true, "TDPES0", DPP_TX_ERR }, - { true, "TDPES1", DPP_TX_ERR }, - { true, "TDPES2", DPP_TX_ERR }, - { true, "TDPES3", DPP_TX_ERR }, - { true, "TDPES4", DPP_TX_ERR }, - { true, "TDPES5", DPP_TX_ERR }, - { true, "TDPES6", DPP_TX_ERR }, - { true, "TDPES7", DPP_TX_ERR }, - { true, "TDPES8", DPP_TX_ERR }, - { true, "TDPES9", DPP_TX_ERR }, - { true, "TDPES10", DPP_TX_ERR }, - { true, "TDPES11", DPP_TX_ERR }, - { true, "TDPES12", DPP_TX_ERR }, - { true, "TDPES13", DPP_TX_ERR }, - { true, "TDPES14", DPP_TX_ERR }, - { true, "TDPES15", DPP_TX_ERR }, - { true, "RDPES0", DPP_RX_ERR }, - { true, "RDPES1", DPP_RX_ERR }, - { true, "RDPES2", DPP_RX_ERR }, - { true, "RDPES3", DPP_RX_ERR }, - { true, "RDPES4", DPP_RX_ERR }, - { true, "RDPES5", DPP_RX_ERR }, - { true, "RDPES6", DPP_RX_ERR }, - { true, "RDPES7", DPP_RX_ERR }, - { true, "RDPES8", DPP_RX_ERR }, - { true, "RDPES9", DPP_RX_ERR }, - { true, "RDPES10", DPP_RX_ERR }, - { true, "RDPES11", DPP_RX_ERR }, - { true, "RDPES12", DPP_RX_ERR }, - { true, "RDPES13", DPP_RX_ERR }, - { true, "RDPES14", DPP_RX_ERR }, - { true, "RDPES15", DPP_RX_ERR }, + { true, "TDPES0", dpp_tx_err }, + { true, "TDPES1", dpp_tx_err }, + { true, "TDPES2", dpp_tx_err }, + { true, "TDPES3", dpp_tx_err }, + { true, "TDPES4", dpp_tx_err }, + { true, "TDPES5", dpp_tx_err }, + { true, "TDPES6", dpp_tx_err }, + { true, "TDPES7", dpp_tx_err }, + { true, "TDPES8", dpp_tx_err }, + { true, "TDPES9", dpp_tx_err }, + { true, "TDPES10", dpp_tx_err }, + { true, "TDPES11", dpp_tx_err }, + { true, "TDPES12", dpp_tx_err }, + { true, "TDPES13", dpp_tx_err }, + { true, "TDPES14", dpp_tx_err }, + { true, "TDPES15", dpp_tx_err }, + { true, "RDPES0", dpp_rx_err }, + { true, "RDPES1", dpp_rx_err }, + { true, "RDPES2", dpp_rx_err }, + { true, "RDPES3", dpp_rx_err }, + { true, "RDPES4", dpp_rx_err }, + { true, "RDPES5", dpp_rx_err }, + { true, "RDPES6", dpp_rx_err }, + { true, "RDPES7", dpp_rx_err }, + { true, "RDPES8", dpp_rx_err }, + { true, "RDPES9", dpp_rx_err }, + { true, "RDPES10", dpp_rx_err }, + { true, "RDPES11", dpp_rx_err }, + { true, "RDPES12", dpp_rx_err }, + { true, "RDPES13", dpp_rx_err }, + { true, "RDPES14", dpp_rx_err }, + { true, "RDPES15", dpp_rx_err }, }; static void dwxgmac3_handle_dma_err(struct net_device *ndev, @@ -1505,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, writel(value, ioaddr + XGMAC_RX_CONFIG); } -static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, - u32 num_txq, - u32 num_rxq, bool enable) +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool tx_enable, bool pmac_enable) { u32 value; - if (!enable) { + if (!tx_enable) { value = readl(ioaddr + XGMAC_FPE_CTRL_STS); value &= ~XGMAC_EFPE; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index fc82862a612c74..389aad7b5c1ee3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -56,10 +56,12 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p) static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN); + u32 flags = XGMAC_RDES3_OWN; if (!disable_rx_ic) - p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC); + flags |= XGMAC_RDES3_IOC; + + p->des3 |= cpu_to_le32(flags); } static int dwxgmac2_get_tx_ls(struct dma_desc *p) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index dd2ab6185c40e8..7840bc403788ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr) } static void dwxgmac2_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 29367105df5482..88cce28b2f9805 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwmac4_tc_ops, .mmc = &dwmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwmac4_setup, @@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxgmac2_setup, @@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxlgmac2_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = &dwmac510_tc_ops, + .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, .est = &dwmac510_est_ops, .setup = dwxlgmac2_setup, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e53c3236277498..d5a9f01ecac53f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -7,6 +7,7 @@ #include #include +#include #define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ ({ \ @@ -28,6 +29,8 @@ struct stmmac_extra_stats; struct stmmac_priv; struct stmmac_safety_stats; +struct stmmac_fpe_cfg; +enum stmmac_mpacket_type; struct dma_desc; struct dma_extended_desc; struct dma_edesc; @@ -175,8 +178,7 @@ struct dma_features; struct stmmac_dma_ops { /* DMA core initialization */ int (*reset)(void __iomem *ioaddr); - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - int atds); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg); void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan); void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -198,7 +200,7 @@ struct stmmac_dma_ops { /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, void __iomem *ioaddr); - void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan); void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -420,11 +422,16 @@ struct stmmac_ops { void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, u32 num_txq, u32 num_rxq, - bool enable); + bool tx_enable, bool pmac_enable); void (*fpe_send_mpacket)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); + int (*fpe_get_add_frag_size)(const void __iomem *ioaddr); + void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size); + int (*fpe_map_preemption_class)(struct net_device *ndev, + struct netlink_ext_ack *extack, + u32 pclass); }; #define stmmac_core_init(__priv, __args...) \ @@ -529,6 +536,12 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) #define stmmac_fpe_irq_status(__priv, __args...) \ stmmac_do_callback(__priv, mac, fpe_irq_status, __args) +#define stmmac_fpe_get_add_frag_size(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args) +#define stmmac_fpe_set_add_frag_size(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args) +#define stmmac_fpe_map_preemption_class(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -616,6 +629,8 @@ struct stmmac_tc_ops { struct tc_etf_qopt_offload *qopt); int (*query_caps)(struct stmmac_priv *priv, struct tc_query_caps_base *base); + int (*setup_mqprio)(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ @@ -632,6 +647,8 @@ struct stmmac_tc_ops { stmmac_do_callback(__priv, tc, setup_etf, __args) #define stmmac_tc_query_caps(__priv, __args...) \ stmmac_do_callback(__priv, tc, query_caps, __args) +#define stmmac_tc_setup_mqprio(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_mqprio, __args) struct stmmac_counters; @@ -675,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops; extern const struct stmmac_ops dwmac410_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac4_tc_ops; extern const struct stmmac_tc_ops dwmac510_tc_ops; +extern const struct stmmac_tc_ops dwxgmac_tc_ops; extern const struct stmmac_ops dwxgmac210_ops; extern const struct stmmac_ops dwxlgmac2_ops; extern const struct stmmac_dma_ops dwxgmac210_dma_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b23b920eedb1cd..ea135203ff2e60 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -146,6 +146,32 @@ struct stmmac_channel { u32 index; }; +/* FPE link-partner hand-shaking mPacket type */ +enum stmmac_mpacket_type { + MPACKET_VERIFY = 0, + MPACKET_RESPONSE = 1, +}; + +#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3 +#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128 + +struct stmmac_fpe_cfg { + /* Serialize access to MAC Merge state between ethtool requests + * and link state updates. + */ + spinlock_t lock; + + u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ + + enum ethtool_mm_verify_status status; + struct timer_list verify_timer; + bool verify_enabled; + int verify_retries; + bool pmac_enabled; + u32 verify_time; + bool tx_enabled; +}; + struct stmmac_tc_entry { bool in_use; bool in_hw; @@ -339,11 +365,8 @@ struct stmmac_priv { struct workqueue_struct *wq; struct work_struct service_task; - /* Workqueue for handling FPE hand-shaking */ - unsigned long fpe_task_state; - struct workqueue_struct *fpe_wq; - struct work_struct fpe_task; - char wq_name[IFNAMSIZ + 4]; + /* Frame Preemption feature (FPE) */ + struct stmmac_fpe_cfg fpe_cfg; /* TC Handling */ unsigned int tc_entries_max; @@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); -void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); +void stmmac_fpe_apply(struct stmmac_priv *priv); static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 7008219fd88db0..2a37592a628101 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -19,6 +19,7 @@ #include "stmmac.h" #include "dwmac_dma.h" #include "dwxgmac2.h" +#include "dwmac5.h" #define REG_SPACE_SIZE 0x1060 #define GMAC4_REG_SPACE_SIZE 0x116C @@ -438,13 +439,6 @@ static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) } -static int stmmac_check_if_running(struct net_device *dev) -{ - if (!netif_running(dev)) - return -EBUSY; - return 0; -} - static int stmmac_ethtool_get_regs_len(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -1207,13 +1201,13 @@ static int stmmac_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (priv->ptp_clock) info->phc_index = ptp_clock_index(priv->ptp_clock); + else + info->phc_index = 0; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1270,10 +1264,101 @@ static int stmmac_set_tunable(struct net_device *dev, return ret; } +static int stmmac_get_mm(struct net_device *ndev, + struct ethtool_mm_state *state) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long flags; + u32 frag_size; + + if (!priv->dma_cap.fpesel) + return -EOPNOTSUPP; + + spin_lock_irqsave(&priv->fpe_cfg.lock, flags); + + state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; + state->verify_enabled = priv->fpe_cfg.verify_enabled; + state->pmac_enabled = priv->fpe_cfg.pmac_enabled; + state->verify_time = priv->fpe_cfg.verify_time; + state->tx_enabled = priv->fpe_cfg.tx_enabled; + state->verify_status = priv->fpe_cfg.status; + state->rx_min_frag_size = ETH_ZLEN; + + /* FPE active if common tx_enabled and + * (verification success or disabled(forced)) + */ + if (state->tx_enabled && + (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || + state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED)) + state->tx_active = true; + else + state->tx_active = false; + + frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr); + state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size); + + spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags); + + return 0; +} + +static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + unsigned long flags; + u32 frag_size; + int err; + + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, + &frag_size, extack); + if (err) + return err; + + /* Wait for the verification that's currently in progress to finish */ + timer_shutdown_sync(&fpe_cfg->verify_timer); + + spin_lock_irqsave(&fpe_cfg->lock, flags); + + fpe_cfg->verify_enabled = cfg->verify_enabled; + fpe_cfg->pmac_enabled = cfg->pmac_enabled; + fpe_cfg->verify_time = cfg->verify_time; + fpe_cfg->tx_enabled = cfg->tx_enabled; + + if (!cfg->verify_enabled) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + + stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size); + stmmac_fpe_apply(priv); + + spin_unlock_irqrestore(&fpe_cfg->lock, flags); + + return 0; +} + +static void stmmac_get_mm_stats(struct net_device *ndev, + struct ethtool_mm_stats *s) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_counters *mmc = &priv->mmc; + + if (!priv->dma_cap.rmon) + return; + + stmmac_mmc_read(priv, priv->mmcaddr, mmc); + + s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr; + s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr; + s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr; + s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr; + s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr; + s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr; +} + static const struct ethtool_ops stmmac_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, - .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, .get_msglevel = stmmac_ethtool_getmsglevel, .set_msglevel = stmmac_ethtool_setmsglevel, @@ -1309,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .set_tunable = stmmac_set_tunable, .get_link_ksettings = stmmac_ethtool_get_link_ksettings, .set_link_ksettings = stmmac_ethtool_set_link_ksettings, + .get_mm = stmmac_get_mm, + .set_mm = stmmac_set_mm, + .get_mm_stats = stmmac_get_mm_stats, }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f3a1b179aaeaca..e2140482270a88 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) { - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + unsigned long flags; - if (is_up && *hs_enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, - MPACKET_VERIFY); + timer_shutdown_sync(&fpe_cfg->verify_timer); + + spin_lock_irqsave(&fpe_cfg->lock, flags); + + if (is_up && fpe_cfg->pmac_enabled) { + /* VERIFY process requires pmac enabled when NIC comes up */ + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false, true); + + /* New link => maybe new partner => new verification process */ + stmmac_fpe_apply(priv); } else { - *lo_state = FPE_STATE_OFF; - *lp_state = FPE_STATE_OFF; + /* No link => turn off EFPE */ + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false, false); } + + spin_unlock_irqrestore(&fpe_cfg->lock, flags); } static void stmmac_mac_link_down(struct phylink_config *config, @@ -2022,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, rx_q->queue_index = queue; rx_q->priv_data = priv; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0); pp_params.pool_size = dma_conf->dma_rx_size; num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); @@ -2367,9 +2380,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; - /* Adjust for real per queue fifo size */ - rxfifosz /= rx_channels_count; - txfifosz /= tx_channels_count; + /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + } if (priv->plat->force_thresh_dma_mode) { txmode = tc; @@ -2553,7 +2568,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) true, priv->mode, true, true, xdp_desc.len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); xsk_tx_metadata_to_compl(meta, &tx_q->tx_skbuff_dma[entry].xsk_meta); @@ -3003,7 +3018,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 chan = 0; - int atds = 0; int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { @@ -3012,7 +3026,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) - atds = 1; + priv->plat->dma_cfg->atds = 1; ret = stmmac_reset(priv, priv->ioaddr); if (ret) { @@ -3021,7 +3035,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); @@ -3357,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } -static int stmmac_fpe_start_wq(struct stmmac_priv *priv) -{ - char *name; - - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); - clear_bit(__FPE_REMOVING, &priv->fpe_task_state); - - name = priv->wq_name; - sprintf(name, "%s-fpe", priv->dev->name); - - priv->fpe_wq = create_singlethread_workqueue(name); - if (!priv->fpe_wq) { - netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); - - return -ENOMEM; - } - netdev_info(priv->dev, "FPE workqueue start"); - - return 0; -} - /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -3532,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) stmmac_set_hw_vlan_mode(priv, priv->hw); - if (priv->dma_cap.fpesel) { - stmmac_fpe_start_wq(priv); - - if (priv->plat->fpe_cfg->enable) - stmmac_fpe_handshake(priv, true); - } - return 0; } @@ -4035,18 +4021,6 @@ static int stmmac_open(struct net_device *dev) return ret; } -static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) -{ - set_bit(__FPE_REMOVING, &priv->fpe_task_state); - - if (priv->fpe_wq) { - destroy_workqueue(priv->fpe_wq); - priv->fpe_wq = NULL; - } - - netdev_info(priv->dev, "FPE workqueue stop"); -} - /** * stmmac_release - close entry point of the driver * @dev : device pointer. @@ -4094,10 +4068,10 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); - pm_runtime_put(priv->device); - if (priv->dma_cap.fpesel) - stmmac_fpe_stop_wq(priv); + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); + + pm_runtime_put(priv->device); return 0; } @@ -4754,7 +4728,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4981,7 +4955,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, u64_stats_update_end(&txq_stats->q_syncp); } - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; @@ -5981,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev, static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) { - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; - if (status == FPE_EVENT_UNKNOWN || !*hs_enable) - return; + /* This is interrupt context, just spin_lock() */ + spin_lock(&fpe_cfg->lock); - /* If LP has sent verify mPacket, LP is FPE capable */ - if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { - if (*lp_state < FPE_STATE_CAPABLE) - *lp_state = FPE_STATE_CAPABLE; + if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN) + goto unlock_out; - /* If user has requested FPE enable, quickly response */ - if (*hs_enable) - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_RESPONSE); - } - - /* If Local has sent verify mPacket, Local is FPE capable */ - if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { - if (*lo_state < FPE_STATE_CAPABLE) - *lo_state = FPE_STATE_CAPABLE; - } + /* LP has sent verify mPacket */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_RESPONSE); - /* If LP has sent response mPacket, LP is entering FPE ON */ - if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) - *lp_state = FPE_STATE_ENTERING_ON; + /* Local has sent verify mPacket */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; - /* If Local has sent response mPacket, Local is entering FPE ON */ - if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) - *lo_state = FPE_STATE_ENTERING_ON; + /* LP has sent response mPacket */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP && + fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING) + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; - if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && - !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && - priv->fpe_wq) { - queue_work(priv->fpe_wq, &priv->fpe_task); - } +unlock_out: + spin_unlock(&fpe_cfg->lock); } static void stmmac_common_interrupt(struct stmmac_priv *priv) @@ -6256,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_QUERY_CAPS: return stmmac_tc_query_caps(priv, priv, type_data); + case TC_SETUP_QDISC_MQPRIO: + return stmmac_tc_setup_mqprio(priv, priv, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &stmmac_block_cb_list, @@ -7375,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } -#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" -static void stmmac_fpe_lp_task(struct work_struct *work) +/** + * stmmac_fpe_verify_timer - Timer for MAC Merge verification + * @t: timer_list struct containing private info + * + * Verify the MAC Merge capability in the local TX direction, by + * transmitting Verify mPackets up to 3 times. Wait until link + * partner responds with a Response mPacket, otherwise fail. + */ +static void stmmac_fpe_verify_timer(struct timer_list *t) { - struct stmmac_priv *priv = container_of(work, struct stmmac_priv, - fpe_task); - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - bool *enable = &fpe_cfg->enable; - int retries = 20; - - while (retries-- > 0) { - /* Bail out immediately if FPE handshake is OFF */ - if (*lo_state == FPE_STATE_OFF || !*hs_enable) - break; - - if (*lo_state == FPE_STATE_ENTERING_ON && - *lp_state == FPE_STATE_ENTERING_ON) { - stmmac_fpe_configure(priv, priv->ioaddr, - fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - *enable); + struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer); + struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv, + fpe_cfg); + unsigned long flags; + bool rearm = false; - netdev_info(priv->dev, "configured FPE\n"); + spin_lock_irqsave(&fpe_cfg->lock, flags); - *lo_state = FPE_STATE_ON; - *lp_state = FPE_STATE_ON; - netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); - break; - } - - if ((*lo_state == FPE_STATE_CAPABLE || - *lo_state == FPE_STATE_ENTERING_ON) && - *lp_state != FPE_STATE_ON) { - netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, - *lo_state, *lp_state); + switch (fpe_cfg->status) { + case ETHTOOL_MM_VERIFY_STATUS_INITIAL: + case ETHTOOL_MM_VERIFY_STATUS_VERIFYING: + if (fpe_cfg->verify_retries != 0) { stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_VERIFY); + fpe_cfg, MPACKET_VERIFY); + rearm = true; + } else { + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED; } - /* Sleep then retry */ - msleep(500); + + fpe_cfg->verify_retries--; + break; + + case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + true, true); + break; + + default: + break; + } + + if (rearm) { + mod_timer(&fpe_cfg->verify_timer, + jiffies + msecs_to_jiffies(fpe_cfg->verify_time)); } - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + spin_unlock_irqrestore(&fpe_cfg->lock, flags); } -void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg) { - if (priv->plat->fpe_cfg->hs_enable != enable) { - if (enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - priv->plat->fpe_cfg, - MPACKET_VERIFY); - } else { - priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; - priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; - } + if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled && + fpe_cfg->verify_enabled && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED && + fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) { + timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0); + mod_timer(&fpe_cfg->verify_timer, jiffies); + } +} - priv->plat->fpe_cfg->hs_enable = enable; +void stmmac_fpe_apply(struct stmmac_priv *priv) +{ + struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + + /* If verification is disabled, configure FPE right away. + * Otherwise let the timer code do it. + */ + if (!fpe_cfg->verify_enabled) { + stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + fpe_cfg->tx_enabled, + fpe_cfg->pmac_enabled); + } else { + fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; + fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; + + if (netif_running(priv->dev)) + stmmac_fpe_verify_timer_arm(fpe_cfg); } } @@ -7554,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); - /* Initialize Link Partner FPE workqueue */ - INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); - /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -7721,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device, mutex_init(&priv->lock); + priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; + priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; + priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0); + spin_lock_init(&priv->fpe_cfg.lock); + /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed. Viceversa the driver'll try to @@ -7894,16 +7878,8 @@ int stmmac_suspend(struct device *dev) } rtnl_unlock(); - if (priv->dma_cap.fpesel) { - /* Disable FPE */ - stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, false); - - stmmac_fpe_handshake(priv, false); - stmmac_fpe_stop_wq(priv); - } + if (priv->dma_cap.fpesel) + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); priv->speed = SPEED_UNKNOWN; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 996f2bcd07a243..75ad2da1a37f1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv) if (ret) return -ENOMEM; - if (!priv->plat->fpe_cfg) { - priv->plat->fpe_cfg = devm_kzalloc(priv->device, - sizeof(*priv->plat->fpe_cfg), - GFP_KERNEL); - if (!priv->plat->fpe_cfg) - return -ENOMEM; - } else { - memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); - } - /* Fail silently as we can still use remaining features, e.g. CBS */ if (!dma_cap->frpsel) return 0; @@ -396,6 +386,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv, return ret; priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + return 0; } /* Final adjustments for HW */ @@ -941,9 +932,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; + struct netlink_ext_ack *extack = qopt->mqprio.extack; struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; - bool fpe = false; int i, ret = 0; u64 ctr; @@ -1028,16 +1019,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv, switch (qopt->entries[i].command) { case TC_TAPRIO_CMD_SET_GATES: - if (fpe) - return -EINVAL; break; case TC_TAPRIO_CMD_SET_AND_HOLD: gates |= BIT(0); - fpe = true; break; case TC_TAPRIO_CMD_SET_AND_RELEASE: gates &= ~BIT(0); - fpe = true; break; default: return -EOPNOTSUPP; @@ -1068,16 +1055,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv, tc_taprio_map_maxsdu_txq(priv, qopt); - if (fpe && !priv->dma_cap.fpesel) { - mutex_unlock(&priv->est_lock); - return -EOPNOTSUPP; - } - - /* Actual FPE register configuration will be done after FPE handshake - * is success. - */ - priv->plat->fpe_cfg->enable = fpe; - ret = stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); mutex_unlock(&priv->est_lock); @@ -1086,12 +1063,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv, goto disable; } - netdev_info(priv->dev, "configured EST\n"); - - if (fpe) { - stmmac_fpe_handshake(priv, true); - netdev_info(priv->dev, "start FPE handshake\n"); - } + ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack, + qopt->mqprio.preemptible_tcs); + if (ret) + goto disable; return 0; @@ -1109,16 +1084,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, mutex_unlock(&priv->est_lock); } - priv->plat->fpe_cfg->enable = false; - stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - false); - netdev_info(priv->dev, "disabled FPE\n"); - - stmmac_fpe_handshake(priv, false); - netdev_info(priv->dev, "stop FPE handshake\n"); + stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0); return ret; } @@ -1174,6 +1140,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return err; } +static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + if (!qopt->mqprio.preemptible_tcs) + return tc_setup_taprio(priv, qopt); + + NL_SET_ERR_MSG_MOD(qopt->mqprio.extack, + "taprio with FPE is not implemented for this MAC"); + + return -EOPNOTSUPP; +} + static int tc_setup_etf(struct stmmac_priv *priv, struct tc_etf_qopt_offload *qopt) { @@ -1198,6 +1176,13 @@ static int tc_query_caps(struct stmmac_priv *priv, struct tc_query_caps_base *base) { switch (base->type) { + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_caps *caps = base->caps; + + caps->validate_queue_counts = true; + + return 0; + } case TC_SETUP_QDISC_TAPRIO: { struct tc_taprio_caps *caps = base->caps; @@ -1214,6 +1199,81 @@ static int tc_query_caps(struct stmmac_priv *priv, } } +static void stmmac_reset_tc_mqprio(struct net_device *ndev, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + netdev_reset_tc(ndev); + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); + stmmac_fpe_map_preemption_class(priv, ndev, extack, 0); +} + +static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct netlink_ext_ack *extack = mqprio->extack; + struct tc_mqprio_qopt *qopt = &mqprio->qopt; + u32 offset, count, num_stack_tx_queues = 0; + struct net_device *ndev = priv->dev; + u32 num_tc = qopt->num_tc; + int err; + + if (!num_tc) { + stmmac_reset_tc_mqprio(ndev, extack); + return 0; + } + + err = netdev_set_num_tc(ndev, num_tc); + if (err) + return err; + + for (u32 tc = 0; tc < num_tc; tc++) { + offset = qopt->offset[tc]; + count = qopt->count[tc]; + num_stack_tx_queues += count; + + err = netdev_set_tc_queue(ndev, tc, count, offset); + if (err) + goto err_reset_tc; + } + + err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); + if (err) + goto err_reset_tc; + + err = stmmac_fpe_map_preemption_class(priv, ndev, extack, + mqprio->preemptible_tcs); + if (err) + goto err_reset_tc; + + return 0; + +err_reset_tc: + stmmac_reset_tc_mqprio(ndev, extack); + + return err; +} + +static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ + NL_SET_ERR_MSG_MOD(mqprio->extack, + "mqprio HW offload is not implemented for this MAC"); + return -EOPNOTSUPP; +} + +const struct stmmac_tc_ops dwmac4_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio_without_fpe, + .setup_etf = tc_setup_etf, + .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_mqprio_unimplemented, +}; + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, @@ -1222,4 +1282,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = { .setup_taprio = tc_setup_taprio, .setup_etf = tc_setup_etf, .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_dwmac510_mqprio, +}; + +const struct stmmac_tc_ops dwxgmac_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio_without_fpe, + .setup_etf = tc_setup_etf, + .query_caps = tc_query_caps, + .setup_mqprio = tc_setup_mqprio_unimplemented, }; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 2f30715e9b67f0..1e887d951a0417 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -114,37 +114,23 @@ static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct vnet *vp = (struct vnet *)netdev_priv(dev); struct vnet_port *port; - char *p = (char *)buf; switch (stringset) { case ETH_SS_STATS: memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); - p += sizeof(ethtool_stats_keys); + buf += sizeof(ethtool_stats_keys); rcu_read_lock(); list_for_each_entry_rcu(port, &vp->port_list, list) { - snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM", - port->q_index, port->switch_port ? "s" : "q", - port->raddr); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets", - port->q_index); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets", - port->q_index); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes", - port->q_index); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes", - port->q_index); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.event_up", - port->q_index); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset", - port->q_index); - p += ETH_GSTRING_LEN; + ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index, + port->switch_port ? "s" : "q", + port->raddr); + ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index); + ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index); + ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index); + ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index); + ethtool_sprintf(&buf, "p%u.event_up", port->q_index); + ethtool_sprintf(&buf, "p%u.event_reset", port->q_index); } rcu_read_unlock(); break; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index ede5f7890fb4b0..fc77f424f90bb1 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1671,7 +1671,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, #endif #ifdef BDX_LLTX - netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */ + netif_trans_update(ndev); /* dev->lltx driver :( */ #endif ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; @@ -2019,7 +2019,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * set multicast list callback has to use priv->tx_lock. */ #ifdef BDX_LLTX - ndev->features |= NETIF_F_LLTX; + ndev->lltx = true; #endif /* MTU range: 60 - 16384 */ ndev->min_mtu = ETH_ZLEN; diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h index 909e7296cecfa4..47a2d3e5f8ede6 100644 --- a/drivers/net/ethernet/tehuti/tehuti.h +++ b/drivers/net/ethernet/tehuti/tehuti.h @@ -260,7 +260,7 @@ struct bdx_priv { int tx_update_mark; int tx_noupd; #endif - spinlock_t tx_lock; /* NETIF_F_LLTX mode */ + spinlock_t tx_lock; /* dev->lltx mode */ /* rarely used */ u8 port; diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index b60976947da500..9032444435e90c 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev, { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - ch->max_rx = AM65_CPSW_MAX_RX_QUEUES; - ch->max_tx = AM65_CPSW_MAX_TX_QUEUES; - ch->rx_count = AM65_CPSW_MAX_RX_QUEUES; + ch->max_rx = AM65_CPSW_MAX_QUEUES; + ch->max_tx = AM65_CPSW_MAX_QUEUES; + ch->rx_count = common->rx_ch_num_flows; ch->tx_count = common->tx_ch_num; } @@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev, if (common->usage_count) return -EBUSY; - am65_cpsw_nuss_remove_tx_chns(common); - - return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count); + return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count, + chs->rx_count); } static void @@ -714,8 +713,6 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = am65_cpts_phc_index(common->cpts); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); @@ -915,80 +912,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev, s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD); } -static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *extack) -{ - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - struct am65_cpsw_tx_chn *tx_chn; - - tx_chn = &common->tx_chns[0]; - - coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000; - coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000; - - return 0; -} - static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue, struct ethtool_coalesce *coal) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); + struct am65_cpsw_rx_flow *rx_flow; struct am65_cpsw_tx_chn *tx_chn; - if (queue >= AM65_CPSW_MAX_TX_QUEUES) + if (queue >= AM65_CPSW_MAX_QUEUES) return -EINVAL; tx_chn = &common->tx_chns[queue]; - coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000; + rx_flow = &common->rx_chns.flows[queue]; + coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000; + return 0; } -static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, +static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - struct am65_cpsw_tx_chn *tx_chn; - - tx_chn = &common->tx_chns[0]; - - if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20) - return -EINVAL; - - if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) - return -EINVAL; - - common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000; - tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000; - - return 0; + return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal); } static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue, struct ethtool_coalesce *coal) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); + struct am65_cpsw_rx_flow *rx_flow; struct am65_cpsw_tx_chn *tx_chn; - if (queue >= AM65_CPSW_MAX_TX_QUEUES) + if (queue >= AM65_CPSW_MAX_QUEUES) return -EINVAL; tx_chn = &common->tx_chns[queue]; - - if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) { - dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n", - queue); - coal->tx_coalesce_usecs = 20; - } + if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) + return -EINVAL; tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000; + rx_flow = &common->rx_chns.flows[queue]; + if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20) + return -EINVAL; + + rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000; + return 0; } +static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal); +} + const struct ethtool_ops am65_cpsw_ethtool_ops_slave = { .begin = am65_cpsw_ethtool_op_begin, .complete = am65_cpsw_ethtool_op_complete, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index b06b8872b4eb7f..d253727b160f5f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -138,7 +138,7 @@ AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 -/* Number of TX/RX descriptors */ +/* Number of TX/RX descriptors per channel/flow */ #define AM65_CPSW_MAX_TX_DESC 500 #define AM65_CPSW_MAX_RX_DESC 500 @@ -150,6 +150,7 @@ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) #define AM65_CPSW_DEFAULT_TX_CHNS 8 +#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1 /* CPPI streaming packet interface */ #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF @@ -331,7 +332,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, } static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, - struct page *page) + struct page *page, u32 flow_idx) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct cppi5_host_desc_t *desc_rx; @@ -364,7 +365,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, swdata = cppi5_hdesc_get_swdata(desc_rx); *((void **)swdata) = page_address(page); - return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, + desc_rx, desc_dma); } void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) @@ -399,22 +401,27 @@ static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_rx_flow *flow; struct xdp_rxq_info *rxq; - int i; + int id, port; - for (i = 0; i < common->port_num; i++) { - if (!common->ports[i].ndev) - continue; + for (id = 0; id < common->rx_ch_num_flows; id++) { + flow = &rx_chn->flows[id]; - rxq = &common->ports[i].xdp_rxq; + for (port = 0; port < common->port_num; port++) { + if (!common->ports[port].ndev) + continue; - if (xdp_rxq_info_is_reg(rxq)) - xdp_rxq_info_unreg(rxq); - } + rxq = &common->ports[port].xdp_rxq[id]; + + if (xdp_rxq_info_is_reg(rxq)) + xdp_rxq_info_unreg(rxq); + } - if (rx_chn->page_pool) { - page_pool_destroy(rx_chn->page_pool); - rx_chn->page_pool = NULL; + if (flow->page_pool) { + page_pool_destroy(flow->page_pool); + flow->page_pool = NULL; + } } } @@ -428,31 +435,44 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) .nid = dev_to_node(common->dev), .dev = common->dev, .dma_dir = DMA_BIDIRECTIONAL, - .napi = &common->napi_rx, + /* .napi set dynamically */ }; + struct am65_cpsw_rx_flow *flow; struct xdp_rxq_info *rxq; struct page_pool *pool; - int i, ret; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - return PTR_ERR(pool); + int id, port, ret; + + for (id = 0; id < common->rx_ch_num_flows; id++) { + flow = &rx_chn->flows[id]; + pp_params.napi = &flow->napi_rx; + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) { + ret = PTR_ERR(pool); + goto err; + } - rx_chn->page_pool = pool; + flow->page_pool = pool; - for (i = 0; i < common->port_num; i++) { - if (!common->ports[i].ndev) - continue; + /* using same page pool is allowed as no running rx handlers + * simultaneously for both ndevs + */ + for (port = 0; port < common->port_num; port++) { + if (!common->ports[port].ndev) + continue; - rxq = &common->ports[i].xdp_rxq; + rxq = &common->ports[port].xdp_rxq[id]; - ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0); - if (ret) - goto err; + ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, + id, flow->napi_rx.napi_id); + if (ret) + goto err; - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); - if (ret) - goto err; + ret = xdp_rxq_info_reg_mem_model(rxq, + MEM_TYPE_PAGE_POOL, + pool); + if (ret) + goto err; + } } return 0; @@ -497,25 +517,27 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch desc_idx); } -static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn, +static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, struct page *page, bool allow_direct, int desc_idx) { - page_pool_put_full_page(rx_chn->page_pool, page, allow_direct); - rx_chn->pages[desc_idx] = NULL; + page_pool_put_full_page(flow->page_pool, page, allow_direct); + flow->pages[desc_idx] = NULL; } static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) { - struct am65_cpsw_rx_chn *rx_chn = data; + struct am65_cpsw_rx_flow *flow = data; struct cppi5_host_desc_t *desc_rx; + struct am65_cpsw_rx_chn *rx_chn; dma_addr_t buf_dma; u32 buf_dma_len; void *page_addr; void **swdata; int desc_idx; + rx_chn = &flow->common->rx_chns; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); page_addr = *swdata; @@ -526,7 +548,7 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, rx_chn->dsize_log2); - am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx); + am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx); } static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, @@ -602,7 +624,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) struct am65_cpsw_host *host_p = am65_common_get_host(common); struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; - int port_idx, i, ret, tx; + int port_idx, i, ret, tx, flow_idx; + struct am65_cpsw_rx_flow *flow; u32 val, port_mask; struct page *page; @@ -670,27 +693,26 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) return ret; } - for (i = 0; i < rx_chn->descs_num; i++) { - page = page_pool_dev_alloc_pages(rx_chn->page_pool); - if (!page) { - ret = -ENOMEM; - if (i) + for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) { + flow = &rx_chn->flows[flow_idx]; + for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { + page = page_pool_dev_alloc_pages(flow->page_pool); + if (!page) { + dev_err(common->dev, "cannot allocate page in flow %d\n", + flow_idx); + ret = -ENOMEM; goto fail_rx; + } + flow->pages[i] = page; - return ret; - } - rx_chn->pages[i] = page; - - ret = am65_cpsw_nuss_rx_push(common, page); - if (ret < 0) { - dev_err(common->dev, - "cannot submit page to channel rx: %d\n", - ret); - am65_cpsw_put_page(rx_chn, page, false, i); - if (i) + ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); + if (ret < 0) { + dev_err(common->dev, + "cannot submit page to rx channel flow %d, error %d\n", + flow_idx, ret); + am65_cpsw_put_page(flow, page, false, i); goto fail_rx; - - return ret; + } } } @@ -700,6 +722,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) goto fail_rx; } + for (i = 0; i < common->rx_ch_num_flows ; i++) { + napi_enable(&rx_chn->flows[i].napi_rx); + if (rx_chn->flows[i].irq_disabled) { + rx_chn->flows[i].irq_disabled = false; + enable_irq(rx_chn->flows[i].irq); + } + } + for (tx = 0; tx < common->tx_ch_num; tx++) { ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); if (ret) { @@ -711,12 +741,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) napi_enable(&tx_chn[tx].napi_tx); } - napi_enable(&common->napi_rx); - if (common->rx_irq_disabled) { - common->rx_irq_disabled = false; - enable_irq(rx_chn->irq); - } - dev_dbg(common->dev, "cpsw_nuss started\n"); return 0; @@ -727,11 +751,24 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) tx--; } + for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) { + flow = &rx_chn->flows[flow_idx]; + if (!flow->irq_disabled) { + disable_irq(flow->irq); + flow->irq_disabled = true; + } + napi_disable(&flow->napi_rx); + } + k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); fail_rx: - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn, - am65_cpsw_nuss_rx_cleanup, 0); + for (i = 0; i < common->rx_ch_num_flows; i++) + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], + am65_cpsw_nuss_rx_cleanup, 0); + + am65_cpsw_destroy_xdp_rxqs(common); + return ret; } @@ -780,12 +817,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) dev_err(common->dev, "rx teardown timeout\n"); } - napi_disable(&common->napi_rx); - hrtimer_cancel(&common->rx_hrtimer); - - for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, - am65_cpsw_nuss_rx_cleanup, !!i); + for (i = 0; i < common->rx_ch_num_flows; i++) { + napi_disable(&rx_chn->flows[i].napi_rx); + hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], + am65_cpsw_nuss_rx_cleanup, 0); + } k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); @@ -794,10 +831,6 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); - for (i = 0; i < rx_chn->descs_num; i++) { - if (rx_chn->pages[i]) - am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i); - } am65_cpsw_destroy_xdp_rxqs(common); dev_dbg(common->dev, "cpsw_nuss stopped\n"); @@ -868,7 +901,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) goto runtime_put; } - ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES); + ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows); if (ret) { dev_err(common->dev, "cannot set real number of rx queues\n"); goto runtime_put; @@ -992,12 +1025,12 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, return ret; } -static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, +static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_port *port, struct xdp_buff *xdp, int desc_idx, int cpu, int *len) { - struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_common *common = flow->common; struct am65_cpsw_ndev_priv *ndev_priv; struct net_device *ndev = port->ndev; struct am65_cpsw_ndev_stats *stats; @@ -1026,7 +1059,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, ret = AM65_CPSW_XDP_PASS; goto out; case XDP_TX: - tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; + tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES]; netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); xdpf = xdp_convert_buff_to_frame(xdp); @@ -1068,7 +1101,8 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, } page = virt_to_head_page(xdp->data); - am65_cpsw_put_page(rx_chn, page, true, desc_idx); + am65_cpsw_put_page(flow, page, true, desc_idx); + out: return ret; } @@ -1106,11 +1140,12 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) } } -static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, - u32 flow_idx, int cpu, int *xdp_state) +static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, + int cpu, int *xdp_state) { - struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns; u32 buf_dma_len, pkt_len, port_id = 0, csum_info; + struct am65_cpsw_common *common = flow->common; struct am65_cpsw_ndev_priv *ndev_priv; struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_rx; @@ -1120,6 +1155,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, struct am65_cpsw_port *port; int headroom, desc_idx, ret; struct net_device *ndev; + u32 flow_idx = flow->id; struct sk_buff *skb; struct xdp_buff xdp; void *page_addr; @@ -1174,10 +1210,10 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, } if (port->xdp_prog) { - xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq); + xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - *xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, + *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx, cpu, &pkt_len); if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; @@ -1195,7 +1231,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, skb_mark_for_recycle(skb); skb->protocol = eth_type_trans(skb, ndev); am65_cpsw_nuss_rx_csum(skb, csum_info); - napi_gro_receive(&common->napi_rx, skb); + napi_gro_receive(&flow->napi_rx, skb); stats = this_cpu_ptr(ndev_priv->stats); @@ -1205,24 +1241,24 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, u64_stats_update_end(&stats->syncp); allocate: - new_page = page_pool_dev_alloc_pages(rx_chn->page_pool); + new_page = page_pool_dev_alloc_pages(flow->page_pool); if (unlikely(!new_page)) { dev_err(dev, "page alloc failed\n"); return -ENOMEM; } - rx_chn->pages[desc_idx] = new_page; + flow->pages[desc_idx] = new_page; if (netif_dormant(ndev)) { - am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true, desc_idx); ndev->stats.rx_dropped++; return 0; } requeue: - ret = am65_cpsw_nuss_rx_push(common, new_page); + ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); if (WARN_ON(ret < 0)) { - am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true, desc_idx); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -1232,38 +1268,32 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) { - struct am65_cpsw_common *common = - container_of(timer, struct am65_cpsw_common, rx_hrtimer); + struct am65_cpsw_rx_flow *flow = container_of(timer, + struct am65_cpsw_rx_flow, + rx_hrtimer); - enable_irq(common->rx_chns.irq); + enable_irq(flow->irq); return HRTIMER_NORESTART; } static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) { - struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); - int flow = AM65_CPSW_MAX_RX_FLOWS; + struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx); + struct am65_cpsw_common *common = flow->common; int cpu = smp_processor_id(); int xdp_state_or = 0; int cur_budget, ret; int xdp_state; int num_rx = 0; - /* process every flow */ - while (flow--) { - cur_budget = budget - num_rx; - - while (cur_budget--) { - ret = am65_cpsw_nuss_rx_packets(common, flow, cpu, - &xdp_state); - xdp_state_or |= xdp_state; - if (ret) - break; - num_rx++; - } - - if (num_rx >= budget) + /* process only this flow */ + cur_budget = budget; + while (cur_budget--) { + ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state); + xdp_state_or |= xdp_state; + if (ret) break; + num_rx++; } if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) @@ -1272,14 +1302,14 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { - if (common->rx_irq_disabled) { - common->rx_irq_disabled = false; - if (unlikely(common->rx_pace_timeout)) { - hrtimer_start(&common->rx_hrtimer, - ns_to_ktime(common->rx_pace_timeout), + if (flow->irq_disabled) { + flow->irq_disabled = false; + if (unlikely(flow->rx_pace_timeout)) { + hrtimer_start(&flow->rx_hrtimer, + ns_to_ktime(flow->rx_pace_timeout), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(common->rx_chns.irq); + enable_irq(flow->irq); } } } @@ -1527,11 +1557,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) { - struct am65_cpsw_common *common = dev_id; + struct am65_cpsw_rx_flow *flow = dev_id; - common->rx_irq_disabled = true; + flow->irq_disabled = true; disable_irq_nosync(irq); - napi_schedule(&common->napi_rx); + napi_schedule(&flow->napi_rx); return IRQ_HANDLED; } @@ -2176,7 +2206,7 @@ static void am65_cpsw_nuss_free_tx_chns(void *data) } } -void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) +static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) { struct device *dev = common->dev; int i; @@ -2191,15 +2221,9 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) devm_free_irq(dev, tx_chn->irq, tx_chn); netif_napi_del(&tx_chn->napi_tx); - - if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) - k3_cppi_desc_pool_destroy(tx_chn->desc_pool); - - if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); - - memset(tx_chn, 0, sizeof(*tx_chn)); } + + am65_cpsw_nuss_free_tx_chns(common); } static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) @@ -2331,19 +2355,22 @@ static void am65_cpsw_nuss_free_rx_chns(void *data) k3_udma_glue_release_rx_chn(rx_chn->rx_chn); } -static void am65_cpsw_nuss_remove_rx_chns(void *data) +static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common) { - struct am65_cpsw_common *common = data; struct device *dev = common->dev; struct am65_cpsw_rx_chn *rx_chn; + struct am65_cpsw_rx_flow *flows; + int i; rx_chn = &common->rx_chns; + flows = rx_chn->flows; devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); - if (!(rx_chn->irq < 0)) - devm_free_irq(dev, rx_chn->irq, common); - - netif_napi_del(&common->napi_rx); + for (i = 0; i < common->rx_ch_num_flows; i++) { + if (!(flows[i].irq < 0)) + devm_free_irq(dev, flows[i].irq, &flows[i]); + netif_napi_del(&flows[i].napi_rx); + } am65_cpsw_nuss_free_rx_chns(common); @@ -2356,6 +2383,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; struct device *dev = common->dev; + struct am65_cpsw_rx_flow *flow; u32 hdesc_size, hdesc_size_out; u32 fdqring_id; int i, ret = 0; @@ -2364,12 +2392,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) AM65_CPSW_NAV_SW_DATA_SIZE); rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; - rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS; + rx_cfg.flow_id_num = common->rx_ch_num_flows; rx_cfg.flow_id_base = common->rx_flow_id_base; /* init all flows */ rx_chn->dev = dev; - rx_chn->descs_num = max_desc_num; + rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num; + + for (i = 0; i < common->rx_ch_num_flows; i++) { + flow = &rx_chn->flows[i]; + flow->page_pool = NULL; + flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC, + sizeof(*flow->pages), GFP_KERNEL); + if (!flow->pages) + return -ENOMEM; + } rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); if (IS_ERR(rx_chn->rx_chn)) { @@ -2392,13 +2429,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_chn->dsize_log2 = __fls(hdesc_size_out); WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); - rx_chn->page_pool = NULL; - - rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num, - sizeof(*rx_chn->pages), GFP_KERNEL); - if (!rx_chn->pages) - return -ENOMEM; - common->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); @@ -2422,6 +2452,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, }; + flow = &rx_chn->flows[i]; + flow->id = i; + flow->common = common; + rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.rx_cfg.size = max_desc_num; rx_flow_cfg.rxfdq_cfg.size = max_desc_num; @@ -2438,30 +2472,37 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, i); - rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - - if (rx_chn->irq < 0) { + flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); + if (flow->irq <= 0) { dev_err(dev, "Failed to get rx dma irq %d\n", - rx_chn->irq); - ret = rx_chn->irq; + flow->irq); + ret = flow->irq; goto err; } - } - - netif_napi_add(common->dma_ndev, &common->napi_rx, - am65_cpsw_nuss_rx_poll); - hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); - common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; - ret = devm_request_irq(dev, rx_chn->irq, - am65_cpsw_nuss_rx_irq, - IRQF_TRIGGER_HIGH, dev_name(dev), common); - if (ret) { - dev_err(dev, "failure requesting rx irq %u, %d\n", - rx_chn->irq, ret); - goto err; + snprintf(flow->name, + sizeof(flow->name), "%s-rx%d", + dev_name(dev), i); + netif_napi_add(common->dma_ndev, &flow->napi_rx, + am65_cpsw_nuss_rx_poll); + hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; + + ret = devm_request_irq(dev, flow->irq, + am65_cpsw_nuss_rx_irq, + IRQF_TRIGGER_HIGH, + flow->name, flow); + if (ret) { + dev_err(dev, "failure requesting rx %d irq %u, %d\n", + i, flow->irq, ret); + goto err; + } } + /* setup classifier to route priorities to flows */ + cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows); + err: i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); if (i) { @@ -2705,8 +2746,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) /* alloc netdev */ port->ndev = devm_alloc_etherdev_mqs(common->dev, sizeof(struct am65_cpsw_ndev_priv), - AM65_CPSW_MAX_TX_QUEUES, - AM65_CPSW_MAX_RX_QUEUES); + AM65_CPSW_MAX_QUEUES, + AM65_CPSW_MAX_QUEUES); if (!port->ndev) { dev_err(dev, "error allocating slave net_device %u\n", port->port_id); @@ -2777,7 +2818,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) } phylink = phylink_create(&port->slave.phylink_config, - of_node_to_fwnode(port->slave.port_np), + of_fwnode_handle(port->slave.port_np), port->slave.phy_if, &am65_cpsw_phylink_mac_ops); if (IS_ERR(phylink)) @@ -3303,9 +3344,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); } - for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) - k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan, - am65_cpsw_nuss_rx_cleanup, !!i); + for (i = 0; i < common->rx_ch_num_flows; i++) + k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, + &rx_chan->flows[i], + am65_cpsw_nuss_rx_cleanup, 0); k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); @@ -3346,12 +3388,21 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) return ret; } -int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) +int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, + int num_tx, int num_rx) { int ret; + am65_cpsw_nuss_remove_tx_chns(common); + am65_cpsw_nuss_remove_rx_chns(common); + common->tx_ch_num = num_tx; + common->rx_ch_num_flows = num_rx; ret = am65_cpsw_nuss_init_tx_chns(common); + if (ret) + return ret; + + ret = am65_cpsw_nuss_init_rx_chns(common); return ret; } @@ -3481,6 +3532,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) common->rx_flow_id_base = -1; init_completion(&common->tdown_complete); common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; + common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS; common->pf_p0_rx_ptype_rrobin = false; common->default_vlan = 1; @@ -3672,8 +3724,10 @@ static int am65_cpsw_nuss_resume(struct device *dev) return ret; /* If RX IRQ was disabled before suspend, keep it disabled */ - if (common->rx_irq_disabled) - disable_irq(common->rx_chns.irq); + for (i = 0; i < common->rx_ch_num_flows; i++) { + if (common->rx_chns.flows[i].irq_disabled) + disable_irq(common->rx_chns.flows[i].irq); + } am65_cpts_resume(common->cpts); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index e2ce2be320bd6c..dc8d544230dc81 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -21,9 +21,7 @@ struct am65_cpts; #define HOST_PORT_NUM 0 -#define AM65_CPSW_MAX_TX_QUEUES 8 -#define AM65_CPSW_MAX_RX_QUEUES 1 -#define AM65_CPSW_MAX_RX_FLOWS 1 +#define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */ #define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014 @@ -58,7 +56,7 @@ struct am65_cpsw_port { struct am65_cpsw_qos qos; struct devlink_port devlink_port; struct bpf_prog *xdp_prog; - struct xdp_rxq_info xdp_rxq; + struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES]; /* Only for suspend resume context */ u32 vid_context; }; @@ -94,16 +92,27 @@ struct am65_cpsw_tx_chn { u32 rate_mbps; }; +struct am65_cpsw_rx_flow { + u32 id; + struct napi_struct napi_rx; + struct am65_cpsw_common *common; + int irq; + bool irq_disabled; + struct hrtimer rx_hrtimer; + unsigned long rx_pace_timeout; + struct page_pool *page_pool; + struct page **pages; + char name[32]; +}; + struct am65_cpsw_rx_chn { struct device *dev; struct device *dma_dev; struct k3_cppi_desc_pool *desc_pool; struct k3_udma_glue_rx_channel *rx_chn; - struct page_pool *page_pool; - struct page **pages; u32 descs_num; unsigned char dsize_log2; - int irq; + struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES]; }; #define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0) @@ -145,16 +154,12 @@ struct am65_cpsw_common { u32 tx_ch_rate_msk; u32 rx_flow_id_base; - struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES]; + struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES]; struct completion tdown_complete; atomic_t tdown_cnt; + int rx_ch_num_flows; struct am65_cpsw_rx_chn rx_chns; - struct napi_struct napi_rx; - - bool rx_irq_disabled; - struct hrtimer rx_hrtimer; - unsigned long rx_pace_timeout; u32 nuss_ver; u32 cpsw_ver; @@ -203,8 +208,8 @@ struct am65_cpsw_ndev_priv { #define am65_common_get_host(common) (&(common)->host) #define am65_common_get_port(common, id) (&(common)->ports[(id) - 1]) -#define am65_cpsw_napi_to_common(pnapi) \ - container_of(pnapi, struct am65_cpsw_common, napi_rx) +#define am65_cpsw_napi_to_rx_flow(pnapi) \ + container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx) #define am65_cpsw_napi_to_tx_chn(pnapi) \ container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx) @@ -215,8 +220,8 @@ struct am65_cpsw_ndev_priv { extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave; void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common); -void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common); -int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx); +int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, + int num_tx, int num_rx); bool am65_cpsw_port_dev_check(const struct net_device *dev); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 64bf22cd860c9a..8d02d2b2142937 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,24 @@ #define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C #define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg))) +#define ALE_POLICER_PORT_OUI 0x100 +#define ALE_POLICER_DA_SA 0x104 +#define ALE_POLICER_VLAN 0x108 +#define ALE_POLICER_ETHERTYPE_IPSA 0x10c +#define ALE_POLICER_IPDA 0x110 +#define ALE_POLICER_PIR 0x118 +#define ALE_POLICER_CIR 0x11c +#define ALE_POLICER_TBL_CTL 0x120 +#define ALE_POLICER_CTL 0x124 +#define ALE_POLICER_TEST_CTL 0x128 +#define ALE_POLICER_HIT_STATUS 0x12c +#define ALE_THREAD_DEF 0x134 +#define ALE_THREAD_CTL 0x138 +#define ALE_THREAD_VAL 0x13c + +#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31) +#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0) + #define AM65_CPSW_ALE_THREAD_DEF_REG 0x134 /* ALE_AGING_TIMER */ @@ -76,7 +95,8 @@ enum { * @dev_id: ALE version/SoC id * @features: features supported by ALE * @tbl_entries: number of ALE entries - * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg. + * @reg_fields: pointer to array of register field configuration + * @num_fields: number of fields in the reg_fields array * @nu_switch_ale: NU Switch ALE * @vlan_entry_tbl: ALE vlan entry fields description tbl */ @@ -84,7 +104,8 @@ struct cpsw_ale_dev_id { const char *dev_id; u32 features; u32 tbl_entries; - u32 major_ver_mask; + const struct reg_field *reg_fields; + int num_fields; bool nu_switch_ale; const struct ale_entry_fld *vlan_entry_tbl; }; @@ -102,7 +123,7 @@ struct cpsw_ale_dev_id { #define ALE_UCAST_TOUCHED 3 #define ALE_TABLE_SIZE_MULTIPLIER 1024 -#define ALE_STATUS_SIZE_MASK 0x1f +#define ALE_POLICER_SIZE_MULTIPLIER 8 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { @@ -1292,25 +1313,111 @@ void cpsw_ale_stop(struct cpsw_ale *ale) cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } +static const struct reg_field ale_fields_cpsw[] = { + /* CPSW_ALE_IDVER_REG */ + [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7), + [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15), +}; + +static const struct reg_field ale_fields_cpsw_nu[] = { + /* CPSW_ALE_IDVER_REG */ + [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7), + [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10), + /* CPSW_ALE_STATUS_REG */ + [ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7), + [ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15), + /* CPSW_ALE_POLICER_PORT_OUI_REG */ + [POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31), + [POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30), + [POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25), + [POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19), + [POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18), + [POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15), + [POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5), + + /* CPSW_ALE_POLICER_DA_SA_REG */ + [POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31), + [POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21), + [POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15), + [POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5), + + /* CPSW_ALE_POLICER_VLAN_REG */ + [POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31), + [POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21), + [POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15), + [POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5), + + /* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */ + [POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31), + [POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21), + [POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15), + [POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5), + + /* CPSW_ALE_POLICER_IPDA_REG */ + [POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31), + [POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21), + + /* CPSW_ALE_POLICER_TBL_CTL_REG */ + /** + * REG_FIELDS not defined for this as fields cannot be correctly + * used independently + */ + + /* CPSW_ALE_POLICER_CTL_REG */ + [POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31), + [POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29), + [POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28), + [POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26), + [POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23), + [POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21), + [POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20), + + /* CPSW_ALE_POLICER_TEST_CTL_REG */ + [POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31), + [POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30), + [POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29), + [POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28), + [POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4), + + /* CPSW_ALE_POLICER_HIT_STATUS_REG */ + [POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31), + [POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30), + [POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29), + + /* CPSW_ALE_THREAD_DEF_REG */ + [ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15), + [ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5), + + /* CPSW_ALE_THREAD_CTL_REG */ + [ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4), + + /* CPSW_ALE_THREAD_VAL_REG */ + [ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15), + [ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5), +}; + static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = { { /* am3/4/5, dra7. dm814x, 66ak2hk-gbe */ .dev_id = "cpsw", .tbl_entries = 1024, - .major_ver_mask = 0xff, + .reg_fields = ale_fields_cpsw, + .num_fields = ARRAY_SIZE(ale_fields_cpsw), .vlan_entry_tbl = vlan_entry_cpsw, }, { /* 66ak2h_xgbe */ .dev_id = "66ak2h-xgbe", .tbl_entries = 2048, - .major_ver_mask = 0xff, + .reg_fields = ale_fields_cpsw, + .num_fields = ARRAY_SIZE(ale_fields_cpsw), .vlan_entry_tbl = vlan_entry_cpsw, }, { .dev_id = "66ak2el", .features = CPSW_ALE_F_STATUS_REG, - .major_ver_mask = 0x7, + .reg_fields = ale_fields_cpsw_nu, + .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu), .nu_switch_ale = true, .vlan_entry_tbl = vlan_entry_nu, }, @@ -1318,7 +1425,8 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = { .dev_id = "66ak2g", .features = CPSW_ALE_F_STATUS_REG, .tbl_entries = 64, - .major_ver_mask = 0x7, + .reg_fields = ale_fields_cpsw_nu, + .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu), .nu_switch_ale = true, .vlan_entry_tbl = vlan_entry_nu, }, @@ -1326,20 +1434,23 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = { .dev_id = "am65x-cpsw2g", .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, .tbl_entries = 64, - .major_ver_mask = 0x7, + .reg_fields = ale_fields_cpsw_nu, + .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu), .nu_switch_ale = true, .vlan_entry_tbl = vlan_entry_nu, }, { .dev_id = "j721e-cpswxg", .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, - .major_ver_mask = 0x7, + .reg_fields = ale_fields_cpsw_nu, + .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu), .vlan_entry_tbl = vlan_entry_k3_cpswxg, }, { .dev_id = "am64-cpswxg", .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING, - .major_ver_mask = 0x7, + .reg_fields = ale_fields_cpsw_nu, + .num_fields = ARRAY_SIZE(ale_fields_cpsw_nu), .vlan_entry_tbl = vlan_entry_k3_cpswxg, .tbl_entries = 512, }, @@ -1361,47 +1472,81 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id, return NULL; } +static const struct regmap_config ale_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .name = "cpsw-ale", +}; + +static int cpsw_ale_regfield_init(struct cpsw_ale *ale) +{ + const struct reg_field *reg_fields = ale->params.reg_fields; + struct device *dev = ale->params.dev; + struct regmap *regmap = ale->regmap; + int i; + + for (i = 0; i < ale->params.num_fields; i++) { + ale->fields[i] = devm_regmap_field_alloc(dev, regmap, + reg_fields[i]); + if (IS_ERR(ale->fields[i])) { + dev_err(dev, "Unable to allocate regmap field %d\n", i); + return PTR_ERR(ale->fields[i]); + } + } + + return 0; +} + struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) { + u32 ale_entries, rev_major, rev_minor, policers; const struct cpsw_ale_dev_id *ale_dev_id; struct cpsw_ale *ale; - u32 rev, ale_entries; + int ret; ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id); if (!ale_dev_id) return ERR_PTR(-EINVAL); params->ale_entries = ale_dev_id->tbl_entries; - params->major_ver_mask = ale_dev_id->major_ver_mask; params->nu_switch_ale = ale_dev_id->nu_switch_ale; + params->reg_fields = ale_dev_id->reg_fields; + params->num_fields = ale_dev_id->num_fields; ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); if (!ale) return ERR_PTR(-ENOMEM); + ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs, + &ale_regmap_cfg); + if (IS_ERR(ale->regmap)) { + dev_err(params->dev, "Couldn't create CPSW ALE regmap\n"); + return ERR_PTR(-ENOMEM); + } + + ale->params = *params; + ret = cpsw_ale_regfield_init(ale); + if (ret) + return ERR_PTR(ret); ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID, GFP_KERNEL); if (!ale->p0_untag_vid_mask) return ERR_PTR(-ENOMEM); - ale->params = *params; ale->ageout = ale->params.ale_ageout * HZ; ale->features = ale_dev_id->features; ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl; - rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER); - ale->version = - (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) | - ALE_VERSION_MINOR(rev); + regmap_field_read(ale->fields[MINOR_VER], &rev_minor); + regmap_field_read(ale->fields[MAJOR_VER], &rev_major); + ale->version = rev_major << 8 | rev_minor; dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n", - ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask), - ALE_VERSION_MINOR(rev)); + rev_major, rev_minor); if (ale->features & CPSW_ALE_F_STATUS_REG && !ale->params.ale_entries) { - ale_entries = - readl_relaxed(ale->params.ale_regs + ALE_STATUS) & - ALE_STATUS_SIZE_MASK; + regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries); /* ALE available on newer NetCP switches has introduced * a register, ALE_STATUS, to indicate the size of ALE * table which shows the size as a multiple of 1024 entries. @@ -1415,8 +1560,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ale_entries *= ALE_TABLE_SIZE_MULTIPLIER; ale->params.ale_entries = ale_entries; } + + if (ale->features & CPSW_ALE_F_STATUS_REG && + !ale->params.num_policers) { + regmap_field_read(ale->fields[ALE_POLICERS], &policers); + if (!policers) + return ERR_PTR(-EINVAL); + + policers *= ALE_POLICER_SIZE_MULTIPLIER; + ale->params.num_policers = policers; + } + dev_info(ale->params.dev, - "ALE Table size %ld\n", ale->params.ale_entries); + "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries, + ale->params.num_policers); /* set default bits for existing h/w */ ale->port_mask_bits = ale->params.ale_ports; @@ -1480,3 +1637,97 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale) { return ale ? ale->params.ale_entries : 0; } + +/* Reads the specified policer index into ALE POLICER registers */ +static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx) +{ + idx &= ALE_POLICER_TBL_INDEX_MASK; + writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL); +} + +/* Writes the ALE POLICER registers into the specified policer index */ +static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx) +{ + idx &= ALE_POLICER_TBL_INDEX_MASK; + idx |= ALE_POLICER_TBL_WRITE_ENABLE; + writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL); +} + +/* enables/disables the custom thread value for the specified policer index */ +static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx, + u32 thread_id, bool enable) +{ + regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx); + regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id); + regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0); +} + +/* Disable all policer entries and thread mappings */ +static void cpsw_ale_policer_reset(struct cpsw_ale *ale) +{ + int i; + + for (i = 0; i < ale->params.num_policers ; i++) { + cpsw_ale_policer_read_idx(ale, i); + regmap_field_write(ale->fields[POL_PORT_MEN], 0); + regmap_field_write(ale->fields[POL_PRI_MEN], 0); + regmap_field_write(ale->fields[POL_OUI_MEN], 0); + regmap_field_write(ale->fields[POL_DST_MEN], 0); + regmap_field_write(ale->fields[POL_SRC_MEN], 0); + regmap_field_write(ale->fields[POL_OVLAN_MEN], 0); + regmap_field_write(ale->fields[POL_IVLAN_MEN], 0); + regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0); + regmap_field_write(ale->fields[POL_IPSRC_MEN], 0); + regmap_field_write(ale->fields[POL_IPDST_MEN], 0); + regmap_field_write(ale->fields[POL_EN], 0); + regmap_field_write(ale->fields[POL_RED_DROP_EN], 0); + regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0); + regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0); + + cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0); + } +} + +/* Default classifier is to map 8 user priorities to N receive channels */ +void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch) +{ + int pri, idx; + /* IEEE802.1D-2004, Standard for Local and metropolitan area networks + * Table G-2 - Traffic type acronyms + * Table G-3 - Defining traffic types + * User priority values 1 and 2 effectively communicate a lower + * priority than 0. In the below table 0 is assigned to higher priority + * thread than 1 and 2 wherever possible. + * The below table maps which thread the user priority needs to be + * sent to for a given number of threads (RX channels). Upper threads + * have higher priority. + * e.g. if number of threads is 8 then user priority 0 will map to + * pri_thread_map[8-1][0] i.e. thread 2 + */ + int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, }, + { 0, 0, 0, 0, 1, 1, 1, 1, }, + { 0, 0, 0, 0, 1, 1, 2, 2, }, + { 1, 0, 0, 1, 2, 2, 3, 3, }, + { 1, 0, 0, 1, 2, 3, 4, 4, }, + { 1, 0, 0, 2, 3, 4, 5, 5, }, + { 1, 0, 0, 2, 3, 4, 5, 6, }, + { 2, 0, 1, 3, 4, 5, 6, 7, } }; + + cpsw_ale_policer_reset(ale); + + /* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */ + for (pri = 0; pri < 8; pri++) { + idx = pri; + + /* Classifier 'idx' match on priority 'pri' */ + cpsw_ale_policer_read_idx(ale, idx); + regmap_field_write(ale->fields[POL_PRI_VAL], pri); + regmap_field_write(ale->fields[POL_PRI_MEN], 1); + cpsw_ale_policer_write_idx(ale, idx); + + /* Map Classifier 'idx' to thread provided by the map */ + cpsw_ale_policer_thread_idx_enable(ale, idx, + pri_thread_map[num_rx_ch - 1][pri], + 1); + } +} diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 6779ee111d57b2..87b7d1b3a34a90 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -8,11 +8,14 @@ #ifndef __TI_CPSW_ALE_H__ #define __TI_CPSW_ALE_H__ +struct reg_fields; + struct cpsw_ale_params { struct device *dev; void __iomem *ale_regs; unsigned long ale_ageout; /* in secs */ unsigned long ale_entries; + unsigned long num_policers; unsigned long ale_ports; /* NU Switch has specific handling as number of bits in ALE entries * are different than other versions of ALE. Also there are specific @@ -20,19 +23,70 @@ struct cpsw_ale_params { * to identify this hardware. */ bool nu_switch_ale; - /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So - * pass it from caller. - */ - u32 major_ver_mask; + const struct reg_field *reg_fields; + int num_fields; const char *dev_id; unsigned long bus_freq; }; struct ale_entry_fld; +struct regmap; + +enum ale_fields { + MINOR_VER, + MAJOR_VER, + ALE_ENTRIES, + ALE_POLICERS, + POL_PORT_MEN, + POL_TRUNK_ID, + POL_PORT_NUM, + POL_PRI_MEN, + POL_PRI_VAL, + POL_OUI_MEN, + POL_OUI_INDEX, + POL_DST_MEN, + POL_DST_INDEX, + POL_SRC_MEN, + POL_SRC_INDEX, + POL_OVLAN_MEN, + POL_OVLAN_INDEX, + POL_IVLAN_MEN, + POL_IVLAN_INDEX, + POL_ETHERTYPE_MEN, + POL_ETHERTYPE_INDEX, + POL_IPSRC_MEN, + POL_IPSRC_INDEX, + POL_IPDST_MEN, + POL_IPDST_INDEX, + POL_EN, + POL_RED_DROP_EN, + POL_YELLOW_DROP_EN, + POL_YELLOW_THRESH, + POL_POL_MATCH_MODE, + POL_PRIORITY_THREAD_EN, + POL_MAC_ONLY_DEF_DIS, + POL_TEST_CLR, + POL_TEST_CLR_RED, + POL_TEST_CLR_YELLOW, + POL_TEST_CLR_SELECTED, + POL_TEST_ENTRY, + POL_STATUS_HIT, + POL_STATUS_HIT_RED, + POL_STATUS_HIT_YELLOW, + ALE_DEFAULT_THREAD_EN, + ALE_DEFAULT_THREAD_VAL, + ALE_THREAD_CLASS_INDEX, + ALE_THREAD_ENABLE, + ALE_THREAD_VALUE, + /* terminator */ + ALE_FIELDS_MAX, +}; struct cpsw_ale { struct cpsw_ale_params params; struct timer_list timer; + struct regmap *regmap; + struct regmap_field *fields[ALE_FIELDS_MAX]; unsigned long ageout; u32 version; u32 features; @@ -140,5 +194,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask); void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, bool add); +void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch); #endif diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 53ed23d6872209..21d55a180ef6e8 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -725,8 +725,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = cpsw->cpts->phc_index; info->tx_types = @@ -741,10 +739,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *inf int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = 0; info->rx_filters = 0; return 0; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 2baa198ebfa090..557cc71b9dd22c 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1407,7 +1407,8 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) cpsw->slaves[i].ndev = ndev; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; + ndev->netns_local = true; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 75c294ce6fb65d..5d6d1cf78e93f2 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -53,78 +53,6 @@ #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n)) #define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10) -enum { - ICSS_IEP_GLOBAL_CFG_REG, - ICSS_IEP_GLOBAL_STATUS_REG, - ICSS_IEP_COMPEN_REG, - ICSS_IEP_SLOW_COMPEN_REG, - ICSS_IEP_COUNT_REG0, - ICSS_IEP_COUNT_REG1, - ICSS_IEP_CAPTURE_CFG_REG, - ICSS_IEP_CAPTURE_STAT_REG, - - ICSS_IEP_CAP6_RISE_REG0, - ICSS_IEP_CAP6_RISE_REG1, - - ICSS_IEP_CAP7_RISE_REG0, - ICSS_IEP_CAP7_RISE_REG1, - - ICSS_IEP_CMP_CFG_REG, - ICSS_IEP_CMP_STAT_REG, - ICSS_IEP_CMP0_REG0, - ICSS_IEP_CMP0_REG1, - ICSS_IEP_CMP1_REG0, - ICSS_IEP_CMP1_REG1, - - ICSS_IEP_CMP8_REG0, - ICSS_IEP_CMP8_REG1, - ICSS_IEP_SYNC_CTRL_REG, - ICSS_IEP_SYNC0_STAT_REG, - ICSS_IEP_SYNC1_STAT_REG, - ICSS_IEP_SYNC_PWIDTH_REG, - ICSS_IEP_SYNC0_PERIOD_REG, - ICSS_IEP_SYNC1_DELAY_REG, - ICSS_IEP_SYNC_START_REG, - ICSS_IEP_MAX_REGS, -}; - -/** - * struct icss_iep_plat_data - Plat data to handle SoC variants - * @config: Regmap configuration data - * @reg_offs: register offsets to capture offset differences across SoCs - * @flags: Flags to represent IEP properties - */ -struct icss_iep_plat_data { - const struct regmap_config *config; - u32 reg_offs[ICSS_IEP_MAX_REGS]; - u32 flags; -}; - -struct icss_iep { - struct device *dev; - void __iomem *base; - const struct icss_iep_plat_data *plat_data; - struct regmap *map; - struct device_node *client_np; - unsigned long refclk_freq; - int clk_tick_time; /* one refclk tick time in ns */ - struct ptp_clock_info ptp_info; - struct ptp_clock *ptp_clock; - struct mutex ptp_clk_mutex; /* PHC access serializer */ - u32 def_inc; - s16 slow_cmp_inc; - u32 slow_cmp_count; - const struct icss_iep_clockops *ops; - void *clockops_data; - u32 cycle_time_ns; - u32 perout_enabled; - bool pps_enabled; - int cap_cmp_irq; - u64 period; - u32 latch_enable; - struct work_struct work; -}; - /** * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter * @iep: Pointer to structure representing IEP. diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h index 803a4b71489300..0bdca0155abd65 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.h +++ b/drivers/net/ethernet/ti/icssg/icss_iep.h @@ -12,7 +12,78 @@ #include #include -struct icss_iep; +enum { + ICSS_IEP_GLOBAL_CFG_REG, + ICSS_IEP_GLOBAL_STATUS_REG, + ICSS_IEP_COMPEN_REG, + ICSS_IEP_SLOW_COMPEN_REG, + ICSS_IEP_COUNT_REG0, + ICSS_IEP_COUNT_REG1, + ICSS_IEP_CAPTURE_CFG_REG, + ICSS_IEP_CAPTURE_STAT_REG, + + ICSS_IEP_CAP6_RISE_REG0, + ICSS_IEP_CAP6_RISE_REG1, + + ICSS_IEP_CAP7_RISE_REG0, + ICSS_IEP_CAP7_RISE_REG1, + + ICSS_IEP_CMP_CFG_REG, + ICSS_IEP_CMP_STAT_REG, + ICSS_IEP_CMP0_REG0, + ICSS_IEP_CMP0_REG1, + ICSS_IEP_CMP1_REG0, + ICSS_IEP_CMP1_REG1, + + ICSS_IEP_CMP8_REG0, + ICSS_IEP_CMP8_REG1, + ICSS_IEP_SYNC_CTRL_REG, + ICSS_IEP_SYNC0_STAT_REG, + ICSS_IEP_SYNC1_STAT_REG, + ICSS_IEP_SYNC_PWIDTH_REG, + ICSS_IEP_SYNC0_PERIOD_REG, + ICSS_IEP_SYNC1_DELAY_REG, + ICSS_IEP_SYNC_START_REG, + ICSS_IEP_MAX_REGS, +}; + +/** + * struct icss_iep_plat_data - Plat data to handle SoC variants + * @config: Regmap configuration data + * @reg_offs: register offsets to capture offset differences across SoCs + * @flags: Flags to represent IEP properties + */ +struct icss_iep_plat_data { + const struct regmap_config *config; + u32 reg_offs[ICSS_IEP_MAX_REGS]; + u32 flags; +}; + +struct icss_iep { + struct device *dev; + void __iomem *base; + const struct icss_iep_plat_data *plat_data; + struct regmap *map; + struct device_node *client_np; + unsigned long refclk_freq; + int clk_tick_time; /* one refclk tick time in ns */ + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct mutex ptp_clk_mutex; /* PHC access serializer */ + u32 def_inc; + s16 slow_cmp_inc; + u32 slow_cmp_count; + const struct icss_iep_clockops *ops; + void *clockops_data; + u32 cycle_time_ns; + u32 perout_enabled; + bool pps_enabled; + int cap_cmp_irq; + u64 period; + u32 latch_enable; + struct work_struct work; +}; + extern const struct icss_iep_clockops prueth_iep_clockops; /* Firmware specific clock operations */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c index 9ec504d976d6fd..833ca86d0b717f 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -290,6 +290,7 @@ void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac) mac[2] << 16 | mac[3] << 24)); regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8)); } +EXPORT_SYMBOL_GPL(icssg_class_set_host_mac_addr); void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index b9d8a93d16805d..fdebeb2f84e00c 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -660,14 +660,15 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev { struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; struct netdev_queue *netif_txq; struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; + u32 pkt_len, dst_tag_id; int i, ret = 0, q_idx; bool in_tx_ts = 0; int tx_ts_cookie; void **swdata; - u32 pkt_len; u32 *epib; pkt_len = skb_headlen(skb); @@ -712,9 +713,20 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev /* set dst tag to indicate internal qid at the firmware which is at * bit8..bit15. bit0..bit7 indicates port num for directed - * packets in case of switch mode operation + * packets in case of switch mode operation and port num 0 + * for undirected packets in case of HSR offload mode */ - cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); + dst_tag_id = emac->port_id | (q_idx << 8); + + if (prueth->is_hsr_offload_mode && + (ndev->features & NETIF_F_HW_HSR_DUP)) + dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG; + + if (prueth->is_hsr_offload_mode && + (ndev->features & NETIF_F_HW_HSR_TAG_INS)) + epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS; + + cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id); k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); swdata = cppi5_hdesc_get_swdata(first_desc); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index dae52a83a3786f..72ace151d8e9c9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -107,7 +107,7 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { }, }; -static void icssg_config_mii_init_switch(struct prueth_emac *emac) +static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac) { struct prueth *prueth = emac->prueth; int mii = prueth_emac_slice(emac); @@ -278,7 +278,7 @@ static int emac_r30_is_done(struct prueth_emac *emac) return 1; } -static int prueth_switch_buffer_setup(struct prueth_emac *emac) +static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) { struct icssg_buffer_pool_cfg __iomem *bpool_cfg; struct icssg_rxq_ctx __iomem *rxq_ctx; @@ -424,7 +424,7 @@ static void icssg_init_emac_mode(struct prueth *prueth) icssg_class_set_host_mac_addr(prueth->miig_rt, mac); } -static void icssg_init_switch_mode(struct prueth *prueth) +static void icssg_init_fw_offload_mode(struct prueth *prueth) { u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; int i; @@ -455,8 +455,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) struct icssg_flow_cfg __iomem *flow_cfg; int ret; - if (prueth->is_switch_mode) - icssg_init_switch_mode(prueth); + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) + icssg_init_fw_offload_mode(prueth); else icssg_init_emac_mode(prueth); @@ -472,8 +472,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if); - if (prueth->is_switch_mode) - icssg_config_mii_init_switch(emac); + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) + icssg_config_mii_init_fw_offload(emac); else icssg_config_mii_init(emac); icssg_config_ipg(emac); @@ -498,8 +498,8 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) writeb(0, config + SPL_PKT_DEFAULT_PRIORITY); writeb(0, config + QUEUE_NUM_UNTAGGED); - if (prueth->is_switch_mode) - ret = prueth_switch_buffer_setup(emac); + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) + ret = prueth_fw_offload_buffer_setup(emac); else ret = prueth_emac_buffer_setup(emac); if (ret) @@ -531,7 +531,9 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = { {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/ {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/ {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/ - {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/ + {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/ + {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */ + {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */ }; int icssg_set_port_state(struct prueth_emac *emac, diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index 1ac60283923bd3..92c2deaa306835 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -80,6 +80,8 @@ enum icssg_port_state_cmd { ICSSG_EMAC_PORT_PREMPT_TX_DISABLE, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE, ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE, + ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE, + ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE, ICSSG_EMAC_PORT_MAX_COMMANDS }; diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index 5688f054cec5f9..b715af21d23aca 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -68,9 +68,13 @@ static int emac_nway_reset(struct net_device *ndev) static int emac_get_sset_count(struct net_device *ndev, int stringset) { + struct prueth_emac *emac = netdev_priv(ndev); switch (stringset) { case ETH_SS_STATS: - return ICSSG_NUM_ETHTOOL_STATS; + if (emac->prueth->pa_stats) + return ICSSG_NUM_ETHTOOL_STATS; + else + return ICSSG_NUM_ETHTOOL_STATS - ICSSG_NUM_PA_STATS; default: return -EOPNOTSUPP; } @@ -78,18 +82,18 @@ static int emac_get_sset_count(struct net_device *ndev, int stringset) static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct prueth_emac *emac = netdev_priv(ndev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { - if (!icssg_all_stats[i].standard_stats) { - memcpy(p, icssg_all_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) + if (!icssg_all_miig_stats[i].standard_stats) + ethtool_puts(&p, icssg_all_miig_stats[i].name); + if (emac->prueth->pa_stats) + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) + ethtool_puts(&p, icssg_all_pa_stats[i].name); break; default: break; @@ -104,9 +108,13 @@ static void emac_get_ethtool_stats(struct net_device *ndev, emac_update_hardware_stats(emac); - for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) - if (!icssg_all_stats[i].standard_stats) + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) + if (!icssg_all_miig_stats[i].standard_stats) *(data++) = emac->stats[i]; + + if (emac->prueth->pa_stats) + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) + *(data++) = emac->pa_stats[i]; } static int emac_get_ts_info(struct net_device *ndev, @@ -118,8 +126,6 @@ static int emac_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index e3451beed32385..5fd9902ab181e9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,11 @@ #define DEFAULT_PORT_MASK 1 #define DEFAULT_UNTAG_MASK 1 +#define NETIF_PRUETH_HSR_OFFLOAD_FEATURES (NETIF_F_HW_HSR_FWD | \ + NETIF_F_HW_HSR_DUP | \ + NETIF_F_HW_HSR_TAG_INS | \ + NETIF_F_HW_HSR_TAG_RM) + /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) @@ -118,6 +124,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static struct icssg_firmwares icssg_hsr_firmwares[] = { + { + .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf", + }, + { + .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf", + } +}; + static struct icssg_firmwares icssg_switch_firmwares[] = { { .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", @@ -152,6 +171,8 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) if (prueth->is_switch_mode) firmwares = icssg_switch_firmwares; + else if (prueth->is_hsr_offload_mode) + firmwares = icssg_hsr_firmwares; else firmwares = icssg_emac_firmwares; @@ -365,7 +386,8 @@ static void prueth_iep_settime(void *clockops_data, u64 ns) sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; sc_desc.iepcount_set = ns % cycletime; - sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4 + /* Count from 0 to (cycle time) - emac->iep->def_inc */ + sc_desc.CMP0_current = cycletime - emac->iep->def_inc; memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); @@ -470,6 +492,36 @@ static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) return 0; } +static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + icssg_fdb_add_del(emac, addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, true); + + icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id), + BIT(emac->port_id), true); + return 0; +} + +static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + icssg_fdb_add_del(emac, addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, false); + + return 0; +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -630,7 +682,10 @@ static int emac_ndo_stop(struct net_device *ndev) icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); - __dev_mc_unsync(ndev, icssg_prueth_del_mcast); + if (emac->prueth->is_hsr_offload_mode) + __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast); + else + __dev_mc_unsync(ndev, icssg_prueth_del_mcast); atomic_set(&emac->tdown_cnt, emac->tx_ch_num); /* ensure new tdown_cnt value is visible */ @@ -708,7 +763,12 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work) return; } - __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast); + if (emac->prueth->is_hsr_offload_mode) + __dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast, + icssg_prueth_hsr_del_mcast); + else + __dev_mc_sync(ndev, icssg_prueth_add_mcast, + icssg_prueth_del_mcast); } /** @@ -725,6 +785,29 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev) queue_work(emac->cmd_wq, &emac->rx_mode_work); } +static netdev_features_t emac_ndo_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + /* hsr tag insertion offload and hsr dup offload are tightly coupled in + * firmware implementation. Both these features need to be enabled / + * disabled together. + */ + if (!(ndev->features & (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_TAG_INS))) + if ((features & NETIF_F_HW_HSR_DUP) || + (features & NETIF_F_HW_HSR_TAG_INS)) + features |= NETIF_F_HW_HSR_DUP | + NETIF_F_HW_HSR_TAG_INS; + + if ((ndev->features & NETIF_F_HW_HSR_DUP) || + (ndev->features & NETIF_F_HW_HSR_TAG_INS)) + if (!(features & NETIF_F_HW_HSR_DUP) || + !(features & NETIF_F_HW_HSR_TAG_INS)) + features &= ~(NETIF_F_HW_HSR_DUP | + NETIF_F_HW_HSR_TAG_INS); + + return features; +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -736,6 +819,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_eth_ioctl = icssg_ndo_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, + .ndo_fix_features = emac_ndo_fix_features, }; static int prueth_netdev_init(struct prueth *prueth, @@ -857,12 +941,14 @@ static int prueth_netdev_init(struct prueth *prueth, } ether_addr_copy(emac->mac_addr, ndev->dev_addr); + ndev->dev.of_node = eth_node; ndev->min_mtu = PRUETH_MIN_PKT_SIZE; ndev->max_mtu = PRUETH_MAX_MTU; ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = &icssg_ethtool_ops; ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features; + ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, @@ -951,7 +1037,7 @@ static void prueth_emac_restart(struct prueth *prueth) netif_device_attach(emac1->ndev); } -static void icssg_enable_switch_mode(struct prueth *prueth) +static void icssg_change_mode(struct prueth *prueth) { struct prueth_emac *emac; int mac; @@ -960,6 +1046,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth) for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { emac = prueth->emac[mac]; + if (prueth->is_hsr_offload_mode) { + if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); + else + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); + } + if (netif_running(emac->ndev)) { icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, ICSSG_FDB_ENTRY_P0_MEMBERSHIP | @@ -971,8 +1064,13 @@ static void icssg_enable_switch_mode(struct prueth *prueth) BIT(emac->port_id) | DEFAULT_PORT_MASK, BIT(emac->port_id) | DEFAULT_UNTAG_MASK, true); + if (prueth->is_hsr_offload_mode) + icssg_vtbl_modify(emac, DEFAULT_VID, + DEFAULT_PORT_MASK, + DEFAULT_UNTAG_MASK, true); icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); - icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); + if (prueth->is_switch_mode) + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); } } } @@ -1010,7 +1108,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev, prueth->is_switch_mode = true; prueth->default_vlan = 1; emac->port_vlan = prueth->default_vlan; - icssg_enable_switch_mode(prueth); + icssg_change_mode(prueth); } } @@ -1038,6 +1136,61 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev) prueth->hw_bridge_dev = NULL; } +static int prueth_hsr_port_link(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + struct prueth_emac *emac0; + struct prueth_emac *emac1; + + emac0 = prueth->emac[PRUETH_MAC0]; + emac1 = prueth->emac[PRUETH_MAC1]; + + if (prueth->is_switch_mode) + return -EOPNOTSUPP; + + prueth->hsr_members |= BIT(emac->port_id); + if (!prueth->is_hsr_offload_mode) { + if (prueth->hsr_members & BIT(PRUETH_PORT_MII0) && + prueth->hsr_members & BIT(PRUETH_PORT_MII1)) { + if (!(emac0->ndev->features & + NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && + !(emac1->ndev->features & + NETIF_PRUETH_HSR_OFFLOAD_FEATURES)) + return -EOPNOTSUPP; + prueth->is_hsr_offload_mode = true; + prueth->default_vlan = 1; + emac0->port_vlan = prueth->default_vlan; + emac1->port_vlan = prueth->default_vlan; + icssg_change_mode(prueth); + netdev_dbg(ndev, "Enabling HSR offload mode\n"); + } + } + + return 0; +} + +static void prueth_hsr_port_unlink(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + struct prueth_emac *emac0; + struct prueth_emac *emac1; + + emac0 = prueth->emac[PRUETH_MAC0]; + emac1 = prueth->emac[PRUETH_MAC1]; + + prueth->hsr_members &= ~BIT(emac->port_id); + if (prueth->is_hsr_offload_mode) { + prueth->is_hsr_offload_mode = false; + emac0->port_vlan = 0; + emac1->port_vlan = 0; + prueth->hsr_dev = NULL; + prueth_emac_restart(prueth); + netdev_dbg(ndev, "Disabling HSR Offload mode\n"); + } +} + /* netdev notifier */ static int prueth_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) @@ -1045,6 +1198,8 @@ static int prueth_netdevice_event(struct notifier_block *unused, struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; int ret = NOTIFY_DONE; if (ndev->netdev_ops != &emac_netdev_ops) @@ -1054,6 +1209,25 @@ static int prueth_netdevice_event(struct notifier_block *unused, case NETDEV_CHANGEUPPER: info = ptr; + if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) && + is_hsr_master(info->upper_dev)) { + if (info->linking) { + if (!prueth->hsr_dev) { + prueth->hsr_dev = info->upper_dev; + icssg_class_set_host_mac_addr(prueth->miig_rt, + prueth->hsr_dev->dev_addr); + } else { + if (prueth->hsr_dev != info->upper_dev) { + netdev_dbg(ndev, "Both interfaces must be linked to same upper device\n"); + return -EOPNOTSUPP; + } + } + prueth_hsr_port_link(ndev); + } else { + prueth_hsr_port_unlink(ndev); + } + } + if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); @@ -1181,6 +1355,12 @@ static int prueth_probe(struct platform_device *pdev) return -ENODEV; } + prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats"); + if (IS_ERR(prueth->pa_stats)) { + dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n"); + prueth->pa_stats = NULL; + } + if (eth0_node) { ret = prueth_get_cores(prueth, ICSS_SLICE0, false); if (ret) @@ -1271,8 +1451,8 @@ static int prueth_probe(struct platform_device *pdev) goto exit_iep; } - if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) - prueth->emac[PRUETH_MAC0]->half_duplex = 1; + prueth->emac[PRUETH_MAC0]->half_duplex = + of_property_read_bool(eth0_node, "ti,half-duplex-capable"); prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; } @@ -1285,8 +1465,8 @@ static int prueth_probe(struct platform_device *pdev) goto netdev_exit; } - if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) - prueth->emac[PRUETH_MAC1]->half_duplex = 1; + prueth->emac[PRUETH_MAC1]->half_duplex = + of_property_read_bool(eth1_node, "ti,half-duplex-capable"); prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index f678d656a3ed3a..bba6da2e6bd8f9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -50,13 +50,18 @@ #define ICSSG_MAX_RFLOWS 8 /* per slice */ +#define ICSSG_NUM_PA_STATS 4 +#define ICSSG_NUM_MIIG_STATS 60 /* Number of ICSSG related stats */ -#define ICSSG_NUM_STATS 60 +#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS) #define ICSSG_NUM_STANDARD_STATS 31 #define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS) #define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */ +#define PRUETH_UNDIRECTED_PKT_DST_TAG 0 +#define PRUETH_UNDIRECTED_PKT_TAG_INS BIT(30) + /* Firmware status codes */ #define ICSS_HS_FW_READY 0x55555555 #define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */ @@ -190,7 +195,8 @@ struct prueth_emac { int port_vlan; struct delayed_work stats_work; - u64 stats[ICSSG_NUM_STATS]; + u64 stats[ICSSG_NUM_MIIG_STATS]; + u64 pa_stats[ICSSG_NUM_PA_STATS]; /* RX IRQ Coalescing Related */ struct hrtimer rx_hrtimer; @@ -230,6 +236,7 @@ struct icssg_firmwares { * @registered_netdevs: list of registered netdevs * @miig_rt: regmap to mii_g_rt block * @mii_rt: regmap to mii_rt block + * @pa_stats: regmap to pa_stats block * @pru_id: ID for each of the PRUs * @pdev: pointer to ICSSG platform device * @pdata: pointer to platform data for ICSSG driver @@ -239,11 +246,14 @@ struct icssg_firmwares { * @iep1: pointer to IEP1 device * @vlan_tbl: VLAN-FID table pointer * @hw_bridge_dev: pointer to HW bridge net device + * @hsr_dev: pointer to the HSR net device * @br_members: bitmask of bridge member ports + * @hsr_members: bitmask of hsr member ports * @prueth_netdevice_nb: netdevice notifier block * @prueth_switchdev_nb: switchdev notifier block * @prueth_switchdev_bl_nb: switchdev blocking notifier block * @is_switch_mode: flag to indicate if device is in Switch mode + * @is_hsr_offload_mode: flag to indicate if device is in hsr offload mode * @is_switchmode_supported: indicates platform support for switch mode * @switch_id: ID for mapping switch ports to bridge * @default_vlan: Default VLAN for host @@ -263,6 +273,7 @@ struct prueth { struct net_device *registered_netdevs[PRUETH_NUM_MACS]; struct regmap *miig_rt; struct regmap *mii_rt; + struct regmap *pa_stats; enum pruss_pru_id pru_id[PRUSS_NUM_PRUS]; struct platform_device *pdev; @@ -274,11 +285,14 @@ struct prueth { struct prueth_vlan_tbl *vlan_tbl; struct net_device *hw_bridge_dev; + struct net_device *hsr_dev; u8 br_members; + u8 hsr_members; struct notifier_block prueth_netdevice_nb; struct notifier_block prueth_switchdev_nb; struct notifier_block prueth_switchdev_bl_nb; bool is_switch_mode; + bool is_hsr_offload_mode; bool is_switchmode_supported; unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN]; int default_vlan; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index e180c1166170af..292f04d29f4f7b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -847,6 +847,7 @@ static int prueth_netdev_init(struct prueth *prueth, } ether_addr_copy(emac->mac_addr, ndev->dev_addr); + ndev->dev.of_node = eth_node; ndev->min_mtu = PRUETH_MIN_PKT_SIZE; ndev->max_mtu = PRUETH_MAX_MTU; ndev->netdev_ops = &emac_netdev_ops; @@ -1045,8 +1046,8 @@ static int prueth_probe(struct platform_device *pdev) goto exit_iep; } - if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) - prueth->emac[PRUETH_MAC0]->half_duplex = 1; + prueth->emac[PRUETH_MAC0]->half_duplex = + of_property_read_bool(eth0_node, "ti,half-duplex-capable"); prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; } @@ -1059,8 +1060,8 @@ static int prueth_probe(struct platform_device *pdev) goto netdev_exit; } - if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) - prueth->emac[PRUETH_MAC1]->half_duplex = 1; + prueth->emac[PRUETH_MAC1]->half_duplex = + of_property_read_bool(eth1_node, "ti,half-duplex-capable"); prueth->emac[PRUETH_MAC1]->iep = prueth->iep1; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 2fb150c13078b7..8800bd3a8d074c 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -11,6 +11,7 @@ #define ICSSG_TX_PACKET_OFFSET 0xA0 #define ICSSG_TX_BYTE_OFFSET 0xEC +#define ICSSG_FW_STATS_BASE 0x0248 static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */ 0xb18, /* Slice 1 stats start */ @@ -22,24 +23,34 @@ void emac_update_hardware_stats(struct prueth_emac *emac) int slice = prueth_emac_slice(emac); u32 base = stats_base[slice]; u32 tx_pkt_cnt = 0; - u32 val; + u32 val, reg; int i; - for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { regmap_read(prueth->miig_rt, - base + icssg_all_stats[i].offset, + base + icssg_all_miig_stats[i].offset, &val); regmap_write(prueth->miig_rt, - base + icssg_all_stats[i].offset, + base + icssg_all_miig_stats[i].offset, val); - if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET) + if (icssg_all_miig_stats[i].offset == ICSSG_TX_PACKET_OFFSET) tx_pkt_cnt = val; emac->stats[i] += val; - if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET) + if (icssg_all_miig_stats[i].offset == ICSSG_TX_BYTE_OFFSET) emac->stats[i] -= tx_pkt_cnt * 8; } + + if (prueth->pa_stats) { + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { + reg = ICSSG_FW_STATS_BASE + + icssg_all_pa_stats[i].offset * + PRUETH_NUM_MACS + slice * sizeof(u32); + regmap_read(prueth->pa_stats, reg, &val); + emac->pa_stats[i] += val; + } + } } void icssg_stats_work_handler(struct work_struct *work) @@ -57,9 +68,16 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) { int i; - for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { - if (!strcmp(icssg_all_stats[i].name, stat_name)) - return emac->stats[icssg_all_stats[i].offset / sizeof(u32)]; + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { + if (!strcmp(icssg_all_miig_stats[i].name, stat_name)) + return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)]; + } + + if (emac->prueth->pa_stats) { + for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { + if (!strcmp(icssg_all_pa_stats[i].name, stat_name)) + return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)]; + } } netdev_err(emac->ndev, "Invalid stats %s\n", stat_name); diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h index 999a4a91276c3f..e88b919f532cdb 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.h +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h @@ -77,82 +77,114 @@ struct miig_stats_regs { u32 tx_bytes; }; -#define ICSSG_STATS(field, stats_type) \ +#define ICSSG_MIIG_STATS(field, stats_type) \ { \ #field, \ offsetof(struct miig_stats_regs, field), \ stats_type \ } -struct icssg_stats { +struct icssg_miig_stats { char name[ETH_GSTRING_LEN]; u32 offset; bool standard_stats; }; -static const struct icssg_stats icssg_all_stats[] = { +static const struct icssg_miig_stats icssg_all_miig_stats[] = { /* Rx */ - ICSSG_STATS(rx_packets, true), - ICSSG_STATS(rx_broadcast_frames, false), - ICSSG_STATS(rx_multicast_frames, true), - ICSSG_STATS(rx_crc_errors, true), - ICSSG_STATS(rx_mii_error_frames, false), - ICSSG_STATS(rx_odd_nibble_frames, false), - ICSSG_STATS(rx_frame_max_size, true), - ICSSG_STATS(rx_max_size_error_frames, false), - ICSSG_STATS(rx_frame_min_size, true), - ICSSG_STATS(rx_min_size_error_frames, false), - ICSSG_STATS(rx_over_errors, true), - ICSSG_STATS(rx_class0_hits, false), - ICSSG_STATS(rx_class1_hits, false), - ICSSG_STATS(rx_class2_hits, false), - ICSSG_STATS(rx_class3_hits, false), - ICSSG_STATS(rx_class4_hits, false), - ICSSG_STATS(rx_class5_hits, false), - ICSSG_STATS(rx_class6_hits, false), - ICSSG_STATS(rx_class7_hits, false), - ICSSG_STATS(rx_class8_hits, false), - ICSSG_STATS(rx_class9_hits, false), - ICSSG_STATS(rx_class10_hits, false), - ICSSG_STATS(rx_class11_hits, false), - ICSSG_STATS(rx_class12_hits, false), - ICSSG_STATS(rx_class13_hits, false), - ICSSG_STATS(rx_class14_hits, false), - ICSSG_STATS(rx_class15_hits, false), - ICSSG_STATS(rx_smd_frags, false), - ICSSG_STATS(rx_bucket1_size, true), - ICSSG_STATS(rx_bucket2_size, true), - ICSSG_STATS(rx_bucket3_size, true), - ICSSG_STATS(rx_bucket4_size, true), - ICSSG_STATS(rx_64B_frames, true), - ICSSG_STATS(rx_bucket1_frames, true), - ICSSG_STATS(rx_bucket2_frames, true), - ICSSG_STATS(rx_bucket3_frames, true), - ICSSG_STATS(rx_bucket4_frames, true), - ICSSG_STATS(rx_bucket5_frames, true), - ICSSG_STATS(rx_bytes, true), - ICSSG_STATS(rx_tx_total_bytes, false), + ICSSG_MIIG_STATS(rx_packets, true), + ICSSG_MIIG_STATS(rx_broadcast_frames, false), + ICSSG_MIIG_STATS(rx_multicast_frames, true), + ICSSG_MIIG_STATS(rx_crc_errors, true), + ICSSG_MIIG_STATS(rx_mii_error_frames, false), + ICSSG_MIIG_STATS(rx_odd_nibble_frames, false), + ICSSG_MIIG_STATS(rx_frame_max_size, true), + ICSSG_MIIG_STATS(rx_max_size_error_frames, false), + ICSSG_MIIG_STATS(rx_frame_min_size, true), + ICSSG_MIIG_STATS(rx_min_size_error_frames, false), + ICSSG_MIIG_STATS(rx_over_errors, true), + ICSSG_MIIG_STATS(rx_class0_hits, false), + ICSSG_MIIG_STATS(rx_class1_hits, false), + ICSSG_MIIG_STATS(rx_class2_hits, false), + ICSSG_MIIG_STATS(rx_class3_hits, false), + ICSSG_MIIG_STATS(rx_class4_hits, false), + ICSSG_MIIG_STATS(rx_class5_hits, false), + ICSSG_MIIG_STATS(rx_class6_hits, false), + ICSSG_MIIG_STATS(rx_class7_hits, false), + ICSSG_MIIG_STATS(rx_class8_hits, false), + ICSSG_MIIG_STATS(rx_class9_hits, false), + ICSSG_MIIG_STATS(rx_class10_hits, false), + ICSSG_MIIG_STATS(rx_class11_hits, false), + ICSSG_MIIG_STATS(rx_class12_hits, false), + ICSSG_MIIG_STATS(rx_class13_hits, false), + ICSSG_MIIG_STATS(rx_class14_hits, false), + ICSSG_MIIG_STATS(rx_class15_hits, false), + ICSSG_MIIG_STATS(rx_smd_frags, false), + ICSSG_MIIG_STATS(rx_bucket1_size, true), + ICSSG_MIIG_STATS(rx_bucket2_size, true), + ICSSG_MIIG_STATS(rx_bucket3_size, true), + ICSSG_MIIG_STATS(rx_bucket4_size, true), + ICSSG_MIIG_STATS(rx_64B_frames, true), + ICSSG_MIIG_STATS(rx_bucket1_frames, true), + ICSSG_MIIG_STATS(rx_bucket2_frames, true), + ICSSG_MIIG_STATS(rx_bucket3_frames, true), + ICSSG_MIIG_STATS(rx_bucket4_frames, true), + ICSSG_MIIG_STATS(rx_bucket5_frames, true), + ICSSG_MIIG_STATS(rx_bytes, true), + ICSSG_MIIG_STATS(rx_tx_total_bytes, false), /* Tx */ - ICSSG_STATS(tx_packets, true), - ICSSG_STATS(tx_broadcast_frames, false), - ICSSG_STATS(tx_multicast_frames, false), - ICSSG_STATS(tx_odd_nibble_frames, false), - ICSSG_STATS(tx_underflow_errors, false), - ICSSG_STATS(tx_frame_max_size, true), - ICSSG_STATS(tx_max_size_error_frames, false), - ICSSG_STATS(tx_frame_min_size, true), - ICSSG_STATS(tx_min_size_error_frames, false), - ICSSG_STATS(tx_bucket1_size, true), - ICSSG_STATS(tx_bucket2_size, true), - ICSSG_STATS(tx_bucket3_size, true), - ICSSG_STATS(tx_bucket4_size, true), - ICSSG_STATS(tx_64B_frames, true), - ICSSG_STATS(tx_bucket1_frames, true), - ICSSG_STATS(tx_bucket2_frames, true), - ICSSG_STATS(tx_bucket3_frames, true), - ICSSG_STATS(tx_bucket4_frames, true), - ICSSG_STATS(tx_bucket5_frames, true), - ICSSG_STATS(tx_bytes, true), + ICSSG_MIIG_STATS(tx_packets, true), + ICSSG_MIIG_STATS(tx_broadcast_frames, false), + ICSSG_MIIG_STATS(tx_multicast_frames, false), + ICSSG_MIIG_STATS(tx_odd_nibble_frames, false), + ICSSG_MIIG_STATS(tx_underflow_errors, false), + ICSSG_MIIG_STATS(tx_frame_max_size, true), + ICSSG_MIIG_STATS(tx_max_size_error_frames, false), + ICSSG_MIIG_STATS(tx_frame_min_size, true), + ICSSG_MIIG_STATS(tx_min_size_error_frames, false), + ICSSG_MIIG_STATS(tx_bucket1_size, true), + ICSSG_MIIG_STATS(tx_bucket2_size, true), + ICSSG_MIIG_STATS(tx_bucket3_size, true), + ICSSG_MIIG_STATS(tx_bucket4_size, true), + ICSSG_MIIG_STATS(tx_64B_frames, true), + ICSSG_MIIG_STATS(tx_bucket1_frames, true), + ICSSG_MIIG_STATS(tx_bucket2_frames, true), + ICSSG_MIIG_STATS(tx_bucket3_frames, true), + ICSSG_MIIG_STATS(tx_bucket4_frames, true), + ICSSG_MIIG_STATS(tx_bucket5_frames, true), + ICSSG_MIIG_STATS(tx_bytes, true), +}; + +/** + * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register + * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI + * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues + * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter + * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter + */ +struct pa_stats_regs { + u32 fw_rx_cnt; + u32 fw_tx_cnt; + u32 fw_tx_pre_overflow; + u32 fw_tx_exp_overflow; +}; + +#define ICSSG_PA_STATS(field) \ +{ \ + #field, \ + offsetof(struct pa_stats_regs, field), \ +} + +struct icssg_pa_stats { + char name[ETH_GSTRING_LEN]; + u32 offset; +}; + +static const struct icssg_pa_stats icssg_all_pa_stats[] = { + ICSSG_PA_STATS(fw_rx_cnt), + ICSSG_PA_STATS(fw_tx_cnt), + ICSSG_PA_STATS(fw_tx_pre_overflow), + ICSSG_PA_STATS(fw_tx_exp_overflow), }; #endif /* __NET_TI_ICSSG_STATS_H */ diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index d286709ca3b93a..63e686f0b11974 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2012,8 +2012,6 @@ static int keystone_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = gbe_intf->gbe_dev->cpts->phc_index; info->tx_types = @@ -2030,10 +2028,7 @@ static int keystone_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; + SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = 0; info->rx_filters = 0; return 0; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 87e67121477cb5..a4937c18d7cb5b 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2277,10 +2277,11 @@ spider_net_setup_netdev(struct spider_net_card *card) netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; if (SPIDER_NET_RX_CSUM_DEFAULT) netdev->features |= NETIF_F_RXCSUM; - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; + netdev->features |= NETIF_F_IP_CSUM; /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | * NETIF_F_HW_VLAN_CTAG_FILTER */ + netdev->lltx = true; /* MTU range: 64 - 2294 */ netdev->min_mtu = SPIDER_NET_MIN_MTU; diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index edd8b59680e538..a04d4073def942 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -377,8 +377,8 @@ static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, int ret; bool first = true; - if (txb->len < 60) - pad = 60 - txb->len; + if (txb->len < ETH_ZLEN) + pad = ETH_ZLEN - txb->len; while (1) { mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad)); @@ -451,7 +451,7 @@ static void mse102x_tx_work(struct work_struct *work) if (ret == -ETIMEDOUT) { if (netif_msg_timer(mse)) - netdev_err(mse->ndev, "tx work timeout\n"); + netdev_err_once(mse->ndev, "tx work timeout\n"); mse->stats.tx_timeout++; } @@ -485,8 +485,8 @@ static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np) if (ret) { eth_hw_addr_random(ndev); - netdev_err(ndev, "Using random MAC address: %pM\n", - ndev->dev_addr); + dev_warn(ndev->dev.parent, "Using random MAC address: %pM\n", + ndev->dev_addr); } } @@ -622,8 +622,6 @@ static const struct ethtool_ops mse102x_ethtool_ops = { /* driver bus management functions */ -#ifdef CONFIG_PM_SLEEP - static int mse102x_suspend(struct device *dev) { struct mse102x_net *mse = dev_get_drvdata(dev); @@ -649,9 +647,8 @@ static int mse102x_resume(struct device *dev) return 0; } -#endif -static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume); static int mse102x_probe_spi(struct spi_device *spi) { @@ -736,9 +733,6 @@ static void mse102x_remove_spi(struct spi_device *spi) struct mse102x_net *mse = dev_get_drvdata(&spi->dev); struct mse102x_net_spi *mses = to_mse102x_spi(mse); - if (netif_msg_drv(mse)) - dev_info(&spi->dev, "remove\n"); - mse102x_remove_device_debugfs(mses); unregister_netdev(mse->ndev); } @@ -761,7 +755,7 @@ static struct spi_driver mse102x_driver = { .driver = { .name = DRV_NAME, .of_match_table = mse102x_match_table, - .pm = &mse102x_pm_ops, + .pm = pm_sleep_ptr(&mse102x_pm_ops), }, .probe = mse102x_probe_spi, .remove = mse102x_remove_spi, diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 85cdbdd44fec70..e46ccebcfd22fd 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -41,10 +41,9 @@ config TXGBE tristate "Wangxun(R) 10GbE PCI Express adapters support" depends on PCI depends on COMMON_CLK + depends on I2C_DESIGNWARE_PLATFORM select MARVELL_10G_PHY select REGMAP - select I2C - select I2C_DESIGNWARE_PLATFORM select PHYLINK select HWMON if TXGBE=y select SFP diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 1eecba984f3b8b..2b3d6586f44a53 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -251,10 +251,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, rx_buffer->page_offset; /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + net_prefetch(page_addr); /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index d6b2b3c781b6f2..cd1372da92a94c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -103,8 +103,7 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) *checksum += local_buffer[i]; - if (eeprom_ptrs) - kvfree(eeprom_ptrs); + kvfree(eeprom_ptrs); *checksum = TXGBE_EEPROM_SUM - *checksum; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 5f502265f0a63e..67b61afdde96ce 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -688,8 +688,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", - (pdev->bus->number << 8) | pdev->devfn); + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", pci_dev_id(pdev)); ret = devm_mdiobus_register(&pdev->dev, mii_bus); if (ret) { diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 1223fcc1a8daee..d64b8abcf01867 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -29,26 +29,26 @@ /* Configuration options */ /* Accept all incoming packets. Default: disabled (cleared) */ -#define XAE_OPTION_PROMISC (1 << 0) +#define XAE_OPTION_PROMISC BIT(0) /* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */ -#define XAE_OPTION_JUMBO (1 << 1) +#define XAE_OPTION_JUMBO BIT(1) /* VLAN Rx & Tx frame support. Default: disabled (cleared) */ -#define XAE_OPTION_VLAN (1 << 2) +#define XAE_OPTION_VLAN BIT(2) /* Enable recognition of flow control frames on Rx. Default: enabled (set) */ -#define XAE_OPTION_FLOW_CONTROL (1 << 4) +#define XAE_OPTION_FLOW_CONTROL BIT(4) /* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not * stripped. Default: disabled (set) */ -#define XAE_OPTION_FCS_STRIP (1 << 5) +#define XAE_OPTION_FCS_STRIP BIT(5) /* Generate FCS field and add PAD automatically for outgoing frames. * Default: enabled (set) */ -#define XAE_OPTION_FCS_INSERT (1 << 6) +#define XAE_OPTION_FCS_INSERT BIT(6) /* Enable Length/Type error checking for incoming frames. When this option is * set, the MAC will filter frames that have a mismatched type/length field @@ -56,13 +56,13 @@ * types of frames are encountered. When this option is cleared, the MAC will * allow these types of frames to be received. Default: enabled (set) */ -#define XAE_OPTION_LENTYPE_ERR (1 << 7) +#define XAE_OPTION_LENTYPE_ERR BIT(7) /* Enable the transmitter. Default: enabled (set) */ -#define XAE_OPTION_TXEN (1 << 11) +#define XAE_OPTION_TXEN BIT(11) /* Enable the receiver. Default: enabled (set) */ -#define XAE_OPTION_RXEN (1 << 12) +#define XAE_OPTION_RXEN BIT(12) /* Default options set when device is initialized or reset */ #define XAE_OPTION_DEFAULTS \ @@ -156,6 +156,7 @@ #define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */ #define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */ #define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */ +#define XAE_STATS_OFFSET 0x00000200 /* Statistics counters */ #define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */ #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ @@ -163,6 +164,7 @@ #define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */ #define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */ #define XAE_ID_OFFSET 0x000004F8 /* Identification register */ +#define XAE_ABILITY_OFFSET 0x000004FC /* Ability Register offset */ #define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */ #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */ #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */ @@ -173,6 +175,8 @@ #define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */ #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ +#define XAE_AM0_OFFSET 0x00000750 /* Frame Filter Mask Value Bytes 3-0 */ +#define XAE_AM1_OFFSET 0x00000754 /* Frame Filter Mask Value Bytes 7-4 */ #define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */ #define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */ @@ -284,6 +288,16 @@ #define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */ #define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */ +/* Bit masks for Axi Ethernet ability register */ +#define XAE_ABILITY_PFC BIT(16) +#define XAE_ABILITY_FRAME_FILTER BIT(10) +#define XAE_ABILITY_HALF_DUPLEX BIT(9) +#define XAE_ABILITY_STATS BIT(8) +#define XAE_ABILITY_2_5G BIT(3) +#define XAE_ABILITY_1G BIT(2) +#define XAE_ABILITY_100M BIT(1) +#define XAE_ABILITY_10M BIT(0) + /* Bit masks for Axi Ethernet MDIO interface MC register */ #define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */ #define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */ @@ -327,11 +341,12 @@ #define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Axi Ethernet Synthesis features */ -#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0) -#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) -#define XAE_FEATURE_FULL_RX_CSUM (1 << 2) -#define XAE_FEATURE_FULL_TX_CSUM (1 << 3) -#define XAE_FEATURE_DMA_64BIT (1 << 4) +#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0) +#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1) +#define XAE_FEATURE_FULL_RX_CSUM BIT(2) +#define XAE_FEATURE_FULL_TX_CSUM BIT(3) +#define XAE_FEATURE_DMA_64BIT BIT(4) +#define XAE_FEATURE_STATS BIT(5) #define XAE_NO_CSUM_OFFLOAD 0 @@ -345,6 +360,61 @@ #define XLNX_MII_STD_SELECT_REG 0x11 #define XLNX_MII_STD_SELECT_SGMII BIT(0) +/* enum temac_stat - TEMAC statistics counters + * + * Index of statistics counters within the TEMAC. This must match the + * order/offset of hardware registers exactly. + */ +enum temac_stat { + STAT_RX_BYTES = 0, + STAT_TX_BYTES, + STAT_UNDERSIZE_FRAMES, + STAT_FRAGMENT_FRAMES, + STAT_RX_64_BYTE_FRAMES, + STAT_RX_65_127_BYTE_FRAMES, + STAT_RX_128_255_BYTE_FRAMES, + STAT_RX_256_511_BYTE_FRAMES, + STAT_RX_512_1023_BYTE_FRAMES, + STAT_RX_1024_MAX_BYTE_FRAMES, + STAT_RX_OVERSIZE_FRAMES, + STAT_TX_64_BYTE_FRAMES, + STAT_TX_65_127_BYTE_FRAMES, + STAT_TX_128_255_BYTE_FRAMES, + STAT_TX_256_511_BYTE_FRAMES, + STAT_TX_512_1023_BYTE_FRAMES, + STAT_TX_1024_MAX_BYTE_FRAMES, + STAT_TX_OVERSIZE_FRAMES, + STAT_RX_GOOD_FRAMES, + STAT_RX_FCS_ERRORS, + STAT_RX_BROADCAST_FRAMES, + STAT_RX_MULTICAST_FRAMES, + STAT_RX_CONTROL_FRAMES, + STAT_RX_LENGTH_ERRORS, + STAT_RX_VLAN_FRAMES, + STAT_RX_PAUSE_FRAMES, + STAT_RX_CONTROL_OPCODE_ERRORS, + STAT_TX_GOOD_FRAMES, + STAT_TX_BROADCAST_FRAMES, + STAT_TX_MULTICAST_FRAMES, + STAT_TX_UNDERRUN_ERRORS, + STAT_TX_CONTROL_FRAMES, + STAT_TX_VLAN_FRAMES, + STAT_TX_PAUSE_FRAMES, + STAT_TX_SINGLE_COLLISION_FRAMES, + STAT_TX_MULTIPLE_COLLISION_FRAMES, + STAT_TX_DEFERRED_FRAMES, + STAT_TX_LATE_COLLISIONS, + STAT_TX_EXCESS_COLLISIONS, + STAT_TX_EXCESS_DEFERRAL, + STAT_RX_ALIGNMENT_ERRORS, + STAT_TX_PFC_FRAMES, + STAT_RX_PFC_FRAMES, + STAT_USER_DEFINED0, + STAT_USER_DEFINED1, + STAT_USER_DEFINED2, + STAT_COUNT, +}; + /** * struct axidma_bd - Axi Dma buffer descriptor layout * @next: MM2S/S2MM Next Descriptor Pointer @@ -435,6 +505,16 @@ struct skbuf_dma_descriptor { * @tx_packets: TX packet count for statistics * @tx_bytes: TX byte count for statistics * @tx_stat_sync: Synchronization object for TX stats + * @hw_stat_base: Base offset for statistics counters. This may be nonzero if + * the statistics counteres were reset or wrapped around. + * @hw_last_counter: Last-seen value of each statistic counter + * @reset_in_progress: Set while we are performing a reset and statistics + * counters may be invalid + * @hw_stats_seqcount: Sequence counter for @hw_stat_base, @hw_last_counter, + * and @reset_in_progress. + * @stats_lock: Lock for @hw_stats_seqcount + * @stats_work: Work for reading the hardware statistics counters often enough + * to catch overflows. * @dma_err_task: Work structure to process Axi DMA errors * @stopping: Set when @dma_err_task shouldn't do anything because we are * about to stop the device. @@ -449,8 +529,6 @@ struct skbuf_dma_descriptor { * supported, the maximum frame size would be 9k. Else it is * 1522 bytes (assuming support for basic VLAN) * @rxmem: Stores rx memory size for jumbo frame handling. - * @csum_offload_on_tx_path: Stores the checksum selection on TX side. - * @csum_offload_on_rx_path: Stores the checksum selection on RX side. * @coalesce_count_rx: Store the irq coalesce on RX side. * @coalesce_usec_rx: IRQ coalesce delay for RX * @coalesce_count_tx: Store the irq coalesce on TX side. @@ -508,6 +586,13 @@ struct axienet_local { u64_stats_t tx_bytes; struct u64_stats_sync tx_stat_sync; + u64 hw_stat_base[STAT_COUNT]; + u32 hw_last_counter[STAT_COUNT]; + seqcount_mutex_t hw_stats_seqcount; + struct mutex stats_lock; + struct delayed_work stats_work; + bool reset_in_progress; + struct work_struct dma_err_task; bool stopping; @@ -522,9 +607,6 @@ struct axienet_local { u32 max_frm_size; u32 rxmem; - int csum_offload_on_tx_path; - int csum_offload_on_rx_path; - u32 coalesce_count_rx; u32 coalesce_usec_rx; u32 coalesce_count_tx; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 9eb300fc359096..fc35fcb22d94fd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -415,6 +415,7 @@ static void axienet_set_mac_address(struct net_device *ndev, static int netdev_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; + axienet_set_mac_address(ndev, addr->sa_data); return 0; } @@ -436,24 +437,27 @@ static void axienet_set_multicast_list(struct net_device *ndev) u32 reg, af0reg, af1reg; struct axienet_local *lp = netdev_priv(ndev); - if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || - netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { - /* We must make the kernel realize we had to move into - * promiscuous mode. If it was a promiscuous mode request - * the flag is already set. If not we set it. - */ - ndev->flags |= IFF_PROMISC; - reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg &= ~XAE_FMI_PM_MASK; + if (ndev->flags & IFF_PROMISC) reg |= XAE_FMI_PM_MASK; + else + reg &= ~XAE_FMI_PM_MASK; + axienet_iow(lp, XAE_FMI_OFFSET, reg); + + if (ndev->flags & IFF_ALLMULTI || + netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { + reg &= 0xFFFFFF00; axienet_iow(lp, XAE_FMI_OFFSET, reg); - dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); + axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ + axienet_iow(lp, XAE_AF1_OFFSET, 0); + axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ + axienet_iow(lp, XAE_AM1_OFFSET, 0); + axienet_iow(lp, XAE_FFE_OFFSET, 1); + i = 1; } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; - reg = axienet_ior(lp, XAE_FMI_OFFSET); - reg &= ~XAE_FMI_PM_MASK; - axienet_iow(lp, XAE_FMI_OFFSET, reg); - netdev_for_each_mc_addr(ha, ndev) { if (i >= XAE_MULTICAST_CAM_TABLE_NUM) break; @@ -466,25 +470,21 @@ static void axienet_set_multicast_list(struct net_device *ndev) af1reg = (ha->addr[4]); af1reg |= (ha->addr[5] << 8); - reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; + reg &= 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, af0reg); axienet_iow(lp, XAE_AF1_OFFSET, af1reg); + axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); + axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); axienet_iow(lp, XAE_FFE_OFFSET, 1); i++; } - } else { - reg = axienet_ior(lp, XAE_FMI_OFFSET); - reg &= ~XAE_FMI_PM_MASK; - - axienet_iow(lp, XAE_FMI_OFFSET, reg); - dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); } for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { - reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; + reg &= 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_FFE_OFFSET, 0); @@ -519,11 +519,55 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; } +static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) +{ + u32 counter; + + if (lp->reset_in_progress) + return lp->hw_stat_base[stat]; + + counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); +} + +static void axienet_stats_update(struct axienet_local *lp, bool reset) +{ + enum temac_stat stat; + + write_seqcount_begin(&lp->hw_stats_seqcount); + lp->reset_in_progress = reset; + for (stat = 0; stat < STAT_COUNT; stat++) { + u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + + lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; + lp->hw_last_counter[stat] = counter; + } + write_seqcount_end(&lp->hw_stats_seqcount); +} + +static void axienet_refresh_stats(struct work_struct *work) +{ + struct axienet_local *lp = container_of(work, struct axienet_local, + stats_work.work); + + mutex_lock(&lp->stats_lock); + axienet_stats_update(lp, false); + mutex_unlock(&lp->stats_lock); + + /* Just less than 2^32 bytes at 2.5 GBit/s */ + schedule_delayed_work(&lp->stats_work, 13 * HZ); +} + static int __axienet_device_reset(struct axienet_local *lp) { u32 value; int ret; + /* Save statistics counters in case they will be reset */ + mutex_lock(&lp->stats_lock); + if (lp->features & XAE_FEATURE_STATS) + axienet_stats_update(lp, true); + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this @@ -538,7 +582,7 @@ static int __axienet_device_reset(struct axienet_local *lp) XAXIDMA_TX_CR_OFFSET); if (ret) { dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); - return ret; + goto out; } /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ @@ -548,10 +592,29 @@ static int __axienet_device_reset(struct axienet_local *lp) XAE_IS_OFFSET); if (ret) { dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); - return ret; + goto out; } - return 0; + /* Update statistics counters with new values */ + if (lp->features & XAE_FEATURE_STATS) { + enum temac_stat stat; + + write_seqcount_begin(&lp->hw_stats_seqcount); + lp->reset_in_progress = false; + for (stat = 0; stat < STAT_COUNT; stat++) { + u32 counter = + axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); + + lp->hw_stat_base[stat] += + lp->hw_last_counter[stat] - counter; + lp->hw_last_counter[stat] = counter; + } + write_seqcount_end(&lp->hw_stats_seqcount); + } + +out: + mutex_unlock(&lp->stats_lock); + return ret; } /** @@ -614,8 +677,7 @@ static int axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_VLAN; lp->options &= (~XAE_OPTION_JUMBO); - if ((ndev->mtu > XAE_MTU) && - (ndev->mtu <= XAE_JUMBO_MTU)) { + if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + XAE_TRL_SIZE; @@ -674,15 +736,15 @@ static int axienet_device_reset(struct net_device *ndev) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. - * Returns the number of descriptors handled. + * Returns the number of packets handled. */ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget) { struct axidma_bd *cur_p; unsigned int status; + int i, packets = 0; dma_addr_t phys; - int i; for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; @@ -701,8 +763,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); - if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { napi_consume_skb(cur_p->skb, budget); + packets++; + } cur_p->app0 = 0; cur_p->app1 = 0; @@ -718,7 +782,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; } - return i; + if (!force) { + lp->tx_bd_ci += i; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci %= lp->tx_bd_num; + } + + return packets; } /** @@ -891,13 +961,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) u32 size = 0; int packets; - packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); + packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, + &size, budget); if (packets) { - lp->tx_bd_ci += packets; - if (lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci %= lp->tx_bd_num; - u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -1126,9 +1193,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget) csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { skb->ip_summed = CHECKSUM_UNNECESSARY; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } @@ -1222,9 +1287,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) u32 cr = lp->tx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_tx); + if (napi_schedule_prep(&lp->napi_tx)) { + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_tx); + } } return IRQ_HANDLED; @@ -1266,9 +1332,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) u32 cr = lp->rx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_rx); + if (napi_schedule_prep(&lp->napi_rx)) { + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_rx); + } } return IRQ_HANDLED; @@ -1297,7 +1364,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) ndev->stats.rx_missed_errors++; if (pending & XAE_INT_RXRJECT_MASK) - ndev->stats.rx_frame_errors++; + ndev->stats.rx_dropped++; axienet_iow(lp, XAE_IS_OFFSET, pending); return IRQ_HANDLED; @@ -1516,8 +1583,6 @@ static int axienet_open(struct net_device *ndev) int ret; struct axienet_local *lp = netdev_priv(ndev); - dev_dbg(&ndev->dev, "%s\n", __func__); - /* When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. @@ -1534,6 +1599,9 @@ static int axienet_open(struct net_device *ndev) phylink_start(lp->phylink); + /* Start the statistics refresh work */ + schedule_delayed_work(&lp->stats_work, 0); + if (lp->use_dmaengine) { /* Enable interrupts for Axi Ethernet core (if defined) */ if (lp->eth_irq > 0) { @@ -1558,6 +1626,7 @@ static int axienet_open(struct net_device *ndev) if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); err_phy: + cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); return ret; @@ -1578,8 +1647,6 @@ static int axienet_stop(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); int i; - dev_dbg(&ndev->dev, "axienet_close()\n"); - if (!lp->use_dmaengine) { WRITE_ONCE(lp->stopping, true); flush_work(&lp->dma_err_task); @@ -1588,6 +1655,8 @@ static int axienet_stop(struct net_device *ndev) napi_disable(&lp->napi_rx); } + cancel_delayed_work_sync(&lp->stats_work); + phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1662,6 +1731,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) static void axienet_poll_controller(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); + disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); axienet_rx_irq(lp->tx_irq, ndev); @@ -1700,6 +1770,35 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = u64_stats_read(&lp->tx_packets); stats->tx_bytes = u64_stats_read(&lp->tx_bytes); } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + stats->rx_length_errors = + axienet_stat(lp, STAT_RX_LENGTH_ERRORS); + stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); + stats->rx_frame_errors = + axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); + stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + + axienet_stat(lp, STAT_FRAGMENT_FRAMES) + + stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); + + stats->tx_aborted_errors = + axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); + stats->tx_fifo_errors = + axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); + stats->tx_window_errors = + axienet_stat(lp, STAT_TX_LATE_COLLISIONS); + stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + + stats->tx_aborted_errors + + stats->tx_fifo_errors + + stats->tx_window_errors; + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); } static const struct net_device_ops axienet_netdev_ops = { @@ -1992,6 +2091,213 @@ static int axienet_ethtools_nway_reset(struct net_device *dev) return phylink_ethtool_nway_reset(lp->phylink); } +static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + data[0] = axienet_stat(lp, STAT_RX_BYTES); + data[1] = axienet_stat(lp, STAT_TX_BYTES); + data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); + data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); + data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); + data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); + data[8] = axienet_stat(lp, STAT_USER_DEFINED0); + data[9] = axienet_stat(lp, STAT_USER_DEFINED1); + data[10] = axienet_stat(lp, STAT_USER_DEFINED2); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { + "Received bytes", + "Transmitted bytes", + "RX Good VLAN Tagged Frames", + "TX Good VLAN Tagged Frames", + "TX Good PFC Frames", + "RX Good PFC Frames", + "User Defined Counter 0", + "User Defined Counter 1", + "User Defined Counter 2", +}; + +static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, axienet_ethtool_stats_strings, + sizeof(axienet_ethtool_stats_strings)); + break; + } +} + +static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) +{ + struct axienet_local *lp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + if (lp->features & XAE_FEATURE_STATS) + return ARRAY_SIZE(axienet_ethtool_stats_strings); + fallthrough; + default: + return -EOPNOTSUPP; + } +} + +static void +axienet_ethtools_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + pause_stats->tx_pause_frames = + axienet_stat(lp, STAT_TX_PAUSE_FRAMES); + pause_stats->rx_pause_frames = + axienet_stat(lp, STAT_RX_PAUSE_FRAMES); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static void +axienet_ethtool_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + mac_stats->FramesTransmittedOK = + axienet_stat(lp, STAT_TX_GOOD_FRAMES); + mac_stats->SingleCollisionFrames = + axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); + mac_stats->MultipleCollisionFrames = + axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); + mac_stats->FramesReceivedOK = + axienet_stat(lp, STAT_RX_GOOD_FRAMES); + mac_stats->FrameCheckSequenceErrors = + axienet_stat(lp, STAT_RX_FCS_ERRORS); + mac_stats->AlignmentErrors = + axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); + mac_stats->FramesWithDeferredXmissions = + axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); + mac_stats->LateCollisions = + axienet_stat(lp, STAT_TX_LATE_COLLISIONS); + mac_stats->FramesAbortedDueToXSColls = + axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); + mac_stats->MulticastFramesXmittedOK = + axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); + mac_stats->BroadcastFramesXmittedOK = + axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); + mac_stats->FramesWithExcessiveDeferral = + axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); + mac_stats->MulticastFramesReceivedOK = + axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); + mac_stats->BroadcastFramesReceivedOK = + axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); + mac_stats->InRangeLengthErrors = + axienet_stat(lp, STAT_RX_LENGTH_ERRORS); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static void +axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + ctrl_stats->MACControlFramesTransmitted = + axienet_stat(lp, STAT_TX_CONTROL_FRAMES); + ctrl_stats->MACControlFramesReceived = + axienet_stat(lp, STAT_RX_CONTROL_FRAMES); + ctrl_stats->UnsupportedOpcodesReceived = + axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); +} + +static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 16384 }, + { }, +}; + +static void +axienet_ethtool_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct axienet_local *lp = netdev_priv(dev); + unsigned int start; + + if (!(lp->features & XAE_FEATURE_STATS)) + return; + + do { + start = read_seqcount_begin(&lp->hw_stats_seqcount); + rmon_stats->undersize_pkts = + axienet_stat(lp, STAT_UNDERSIZE_FRAMES); + rmon_stats->oversize_pkts = + axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); + rmon_stats->fragments = + axienet_stat(lp, STAT_FRAGMENT_FRAMES); + + rmon_stats->hist[0] = + axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); + rmon_stats->hist[1] = + axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); + rmon_stats->hist[2] = + axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); + rmon_stats->hist[3] = + axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); + rmon_stats->hist[4] = + axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); + rmon_stats->hist[5] = + axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); + rmon_stats->hist[6] = + rmon_stats->oversize_pkts; + + rmon_stats->hist_tx[0] = + axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); + rmon_stats->hist_tx[1] = + axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); + rmon_stats->hist_tx[2] = + axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); + rmon_stats->hist_tx[3] = + axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); + rmon_stats->hist_tx[4] = + axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); + rmon_stats->hist_tx[5] = + axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); + rmon_stats->hist_tx[6] = + axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); + } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); + + *ranges = axienet_rmon_ranges; +} + static const struct ethtool_ops axienet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS, @@ -2008,6 +2314,13 @@ static const struct ethtool_ops axienet_ethtool_ops = { .get_link_ksettings = axienet_ethtools_get_link_ksettings, .set_link_ksettings = axienet_ethtools_set_link_ksettings, .nway_reset = axienet_ethtools_nway_reset, + .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, + .get_strings = axienet_ethtools_get_strings, + .get_sset_count = axienet_ethtools_get_sset_count, + .get_pause_stats = axienet_ethtools_get_pause_stats, + .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, + .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, + .get_rmon_stats = axienet_ethtool_get_rmon_stats, }; static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) @@ -2280,6 +2593,10 @@ static int axienet_probe(struct platform_device *pdev) u64_stats_init(&lp->rx_stat_sync); u64_stats_init(&lp->tx_stat_sync); + mutex_init(&lp->stats_lock); + seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); + INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); + lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { /* For backward compatibility, if named AXI clock is not present, @@ -2320,42 +2637,35 @@ static int axienet_probe(struct platform_device *pdev) /* Setup checksum offload, but default to off if not specified */ lp->features = 0; + if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) + lp->features |= XAE_FEATURE_STATS; + ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); if (!ret) { switch (value) { case 1: - lp->csum_offload_on_tx_path = - XAE_FEATURE_PARTIAL_TX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; - /* Can checksum TCP/UDP over IPv4. */ - ndev->features |= NETIF_F_IP_CSUM; + /* Can checksum any contiguous range */ + ndev->features |= NETIF_F_HW_CSUM; break; case 2: - lp->csum_offload_on_tx_path = - XAE_FEATURE_FULL_TX_CSUM; lp->features |= XAE_FEATURE_FULL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; - default: - lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; } } ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); if (!ret) { switch (value) { case 1: - lp->csum_offload_on_rx_path = - XAE_FEATURE_PARTIAL_RX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; case 2: - lp->csum_offload_on_rx_path = - XAE_FEATURE_FULL_RX_CSUM; lp->features |= XAE_FEATURE_FULL_RX_CSUM; + ndev->features |= NETIF_F_RXCSUM; break; - default: - lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; } } /* For supporting jumbo frames, the Axi Ethernet hardware must have @@ -2405,7 +2715,7 @@ static int axienet_probe(struct platform_device *pdev) goto cleanup_clk; } - if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { + if (!of_property_present(pdev->dev.of_node, "dmas")) { /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 56df37f8d50a4d..aef316278eb456 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1026,9 +1026,7 @@ static int ixp4xx_get_ts_info(struct net_device *dev, if (info->phc_index < 0) { info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + SOF_TIMESTAMPING_TX_SOFTWARE; return 0; } info->so_timestamping = diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 838e85ddec6710..7f611c74eb629b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1194,7 +1194,6 @@ static void geneve_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &geneve_type); - dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; @@ -1215,6 +1214,7 @@ static void geneve_setup(struct net_device *dev) netif_keep_dst(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; + dev->lltx = true; eth_hw_addr_random(dev); } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 2e94d10348cceb..a60bfb1abb7f01 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1356,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev) dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->priv_flags |= IFF_NO_QUEUE; - dev->features |= NETIF_F_LLTX; + dev->lltx = true; netif_keep_dst(dev); dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 6ed38a3cdd734e..3bf6785f905739 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -37,8 +37,6 @@ #include #include -#define SIXPACK_VERSION "Revision: 0.3.0" - /* sixpack priority commands */ #define SIXP_SEOF 0x40 /* start and end of a 6pack frame */ #define SIXP_TX_URUN 0x48 /* transmit overrun */ @@ -88,22 +86,18 @@ struct sixpack { struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ - unsigned char *rbuff; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ - unsigned char raw_buf[4]; - unsigned char cooked_buf[400]; + u8 raw_buf[4]; + u8 cooked_buf[400]; unsigned int rx_count; unsigned int rx_count_cooked; spinlock_t rxlock; - int mtu; /* Our mtu (to spot changes!) */ - int buffsize; /* Max buffers sizes */ - unsigned long flags; /* Flag values/ mode etc */ unsigned char mode; /* 6pack mode */ @@ -113,8 +107,8 @@ struct sixpack { unsigned char slottime; unsigned char duplex; unsigned char led_state; - unsigned char status; - unsigned char status1; + u8 status; + u8 status1; unsigned char status2; unsigned char tx_enable; unsigned char tnc_state; @@ -128,7 +122,7 @@ struct sixpack { #define AX25_6PACK_HEADER_LEN 0 -static void sixpack_decode(struct sixpack *, const unsigned char[], int); +static void sixpack_decode(struct sixpack *, const u8 *, size_t); static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char); /* @@ -167,7 +161,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) unsigned char *msg, *p = icp; int actual, count; - if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ + if (len > AX25_MTU + 73) { msg = "oversized transmit packet!"; goto out_drop; } @@ -333,7 +327,7 @@ static void sp_bump(struct sixpack *sp, char cmd) { struct sk_buff *skb; int count; - unsigned char *ptr; + u8 *ptr; count = sp->rcount + 1; @@ -431,7 +425,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct sixpack *sp; - int count1; + size_t count1; if (!count) return; @@ -544,7 +538,7 @@ static inline int tnc_init(struct sixpack *sp) */ static int sixpack_open(struct tty_struct *tty) { - char *rbuff = NULL, *xbuff = NULL; + char *xbuff = NULL; struct net_device *dev; struct sixpack *sp; unsigned long len; @@ -574,10 +568,8 @@ static int sixpack_open(struct tty_struct *tty) len = dev->mtu * 2; - rbuff = kmalloc(len + 4, GFP_KERNEL); xbuff = kmalloc(len + 4, GFP_KERNEL); - - if (rbuff == NULL || xbuff == NULL) { + if (xbuff == NULL) { err = -ENOBUFS; goto out_free; } @@ -586,11 +578,8 @@ static int sixpack_open(struct tty_struct *tty) sp->tty = tty; - sp->rbuff = rbuff; sp->xbuff = xbuff; - sp->mtu = AX25_MTU + 73; - sp->buffsize = len; sp->rcount = 0; sp->rx_count = 0; sp->rx_count_cooked = 0; @@ -631,7 +620,6 @@ static int sixpack_open(struct tty_struct *tty) out_free: kfree(xbuff); - kfree(rbuff); free_netdev(dev); @@ -676,7 +664,6 @@ static void sixpack_close(struct tty_struct *tty) del_timer_sync(&sp->resync_t); /* Free all 6pack frame buffers after unreg. */ - kfree(sp->rbuff); kfree(sp->xbuff); free_netdev(sp->dev); @@ -756,21 +743,14 @@ static struct tty_ldisc_ops sp_ldisc = { /* Initialize 6pack control device -- register 6pack line discipline */ -static const char msg_banner[] __initconst = KERN_INFO \ - "AX.25: 6pack driver, " SIXPACK_VERSION "\n"; -static const char msg_regfail[] __initconst = KERN_ERR \ - "6pack: can't register line discipline (err = %d)\n"; - static int __init sixpack_init_driver(void) { int status; - printk(msg_banner); - /* Register the provided line protocol discipline */ status = tty_register_ldisc(&sp_ldisc); if (status) - printk(msg_regfail, status); + pr_err("6pack: can't register line discipline (err = %d)\n", status); return status; } @@ -820,9 +800,9 @@ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, /* decode 4 sixpack-encoded bytes into 3 data bytes */ -static void decode_data(struct sixpack *sp, unsigned char inbyte) +static void decode_data(struct sixpack *sp, u8 inbyte) { - unsigned char *buf; + u8 *buf; if (sp->rx_count != 3) { sp->raw_buf[sp->rx_count++] = inbyte; @@ -848,9 +828,9 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) /* identify and execute a 6pack priority command byte */ -static void decode_prio_command(struct sixpack *sp, unsigned char cmd) +static void decode_prio_command(struct sixpack *sp, u8 cmd) { - int actual; + ssize_t actual; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ @@ -898,9 +878,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) /* identify and execute a standard 6pack command byte */ -static void decode_std_command(struct sixpack *sp, unsigned char cmd) +static void decode_std_command(struct sixpack *sp, u8 cmd) { - unsigned char checksum = 0, rest = 0; + u8 checksum = 0, rest = 0; short i; switch (cmd & SIXP_CMD_MASK) { /* normal command */ @@ -948,10 +928,10 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) /* decode a 6pack packet */ static void -sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count) +sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count) { - unsigned char inbyte; - int count1; + size_t count1; + u8 inbyte; for (count1 = 0; count1 < count; count1++) { inbyte = pre_rbuff[count1]; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 83a16d10eedbc7..bac1bb69d63a11 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -458,7 +458,7 @@ static void bpq_setup(struct net_device *dev) dev->needs_free_netdev = true; dev->flags = 0; - dev->features = NETIF_F_LLTX; /* Allow recursion */ + dev->lltx = true; /* Allow recursion */ #if IS_ENABLED(CONFIG_AX25) dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 810977952f950b..e690b95b1bbb4c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -882,7 +882,7 @@ struct nvsp_message { #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 -#define VRSS_CHANNEL_DEFAULT 8 +#define VRSS_CHANNEL_DEFAULT 16 #define RNDIS_MAX_PKT_DEFAULT 8 #define RNDIS_PKT_ALIGN_DEFAULT 8 diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 4a9522689fa4fa..e01c5997a551c6 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) xdp.command = XDP_SETUP_PROG; xdp.prog = prog; - ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp); + ret = dev_xdp_propagate(vf_netdev, &xdp); if (ret && prog) bpf_prog_put(prog); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 44142245343d95..153b97f8ec0df9 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -987,7 +987,8 @@ struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev) dev_info->bprog = prog; } } else { - dev_info->num_chn = VRSS_CHANNEL_DEFAULT; + dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT, + netif_get_num_default_rss_queues()); dev_info->send_sections = NETVSC_DEFAULT_TX; dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; dev_info->recv_sections = NETVSC_DEFAULT_RX; diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 95da876c561384..1075e24b11defc 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -101,6 +101,7 @@ config IEEE802154_CA8210_DEBUGFS config IEEE802154_MCR20A tristate "MCR20A transceiver driver" + select REGMAP_SPI depends on IEEE802154_DRIVERS && MAC802154 depends on SPI help diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index a94d8dd71aad9a..2b7034193a00b8 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 433fb583920310..020d392a98b69d 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -1302,16 +1302,13 @@ mcr20a_probe(struct spi_device *spi) irq_type = IRQF_TRIGGER_FALLING; ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr, - irq_type, dev_name(&spi->dev), lp); + irq_type | IRQF_NO_AUTOEN, dev_name(&spi->dev), lp); if (ret) { dev_err(&spi->dev, "could not request_irq for mcr20a\n"); ret = -ENODEV; goto free_dev; } - /* disable_irq by default and wait for starting hardware */ - disable_irq(spi->irq); - ret = ieee802154_register_hw(hw); if (ret) { dev_crit(&spi->dev, "ieee802154_register_hw failed\n"); diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index 65fd14da0f86f2..c572da9e9bc465 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -242,11 +242,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) int ret; clk = clk_get(dev, "core"); - if (IS_ERR(clk)) { - dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); - - return ERR_CAST(clk); - } + if (IS_ERR(clk)) + return dev_err_cast_probe(dev, clk, "error getting core clock\n"); ret = clk_set_rate(clk, data->core_clock_rate); if (ret) { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index fef4eff7753a7a..b1afcb8740de12 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -2,6 +2,8 @@ /* Copyright (c) 2014 Mahesh Bandewar */ +#include + #include "ipvlan.h" static u32 ipvlan_jhash_secret __read_mostly; @@ -420,7 +422,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) int err, ret = NET_XMIT_DROP; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, - .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_tos = ip4h->tos & INET_DSCP_MASK, .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, .daddr = ip4h->daddr, diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 094f44dac5c85f..ee2c3cf4df365e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -114,7 +114,7 @@ static void ipvlan_port_destroy(struct net_device *dev) NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) #define IPVLAN_ALWAYS_ON \ - (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_VLAN_CHALLENGED) #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ @@ -141,6 +141,7 @@ static int ipvlan_init(struct net_device *dev) dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; + dev->lltx = true; netif_inherit_tso_max(dev, phy_dev); dev->hard_header_len = phy_dev->hard_header_len; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 2b486e7c749ca3..1993b90b1a5f90 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -171,6 +171,8 @@ static void gen_lo_setup(struct net_device *dev, dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; + dev->lltx = true; + dev->netns_local = true; netif_keep_dst(dev); dev->hw_features = NETIF_F_GSO_SOFTWARE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST @@ -179,8 +181,6 @@ static void gen_lo_setup(struct net_device *dev, | NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA - | NETIF_F_LLTX - | NETIF_F_NETNS_LOCAL | NETIF_F_VLAN_CHALLENGED | NETIF_F_LOOPBACK; dev->ethtool_ops = eth_ops; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 2da70bc3dd8695..12d1b205f6d117 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3550,7 +3550,8 @@ static int macsec_dev_init(struct net_device *dev) return err; dev->features = real_dev->features & MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->lltx = true; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; macsec_set_head_tail_room(dev); @@ -3581,7 +3582,6 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; - features |= NETIF_F_LLTX; return features; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 24298a33e0e948..cf18e66de142c9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -900,7 +900,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) -#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) +#define ALWAYS_ON_FEATURES ALWAYS_ON_OFFLOADS #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ @@ -932,6 +932,7 @@ static int macvlan_init(struct net_device *dev) dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; dev->hw_enc_features |= dev->features; + dev->lltx = true; netif_inherit_tso_max(dev, lowerdev); dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); @@ -1213,7 +1214,8 @@ void macvlan_common_setup(struct net_device *dev) dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); - dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN; + dev->priv_flags |= IFF_UNICAST_FLT; + dev->change_proto_down = true; dev->netdev_ops = &macvlan_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = macvlan_dev_free; diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c index 8e989c157caaf5..1bc87a0626860f 100644 --- a/drivers/net/mctp/mctp-i3c.c +++ b/drivers/net/mctp/mctp-i3c.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index f39bbe255497df..e63720ec32384b 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -64,18 +64,18 @@ struct mctp_serial { u16 txfcs, rxfcs, rxfcs_rcvd; unsigned int txlen, rxlen; unsigned int txpos, rxpos; - unsigned char txbuf[BUFSIZE], + u8 txbuf[BUFSIZE], rxbuf[BUFSIZE]; }; -static bool needs_escape(unsigned char c) +static bool needs_escape(u8 c) { return c == BYTE_ESC || c == BYTE_FRAME; } -static int next_chunk_len(struct mctp_serial *dev) +static unsigned int next_chunk_len(struct mctp_serial *dev) { - int i; + unsigned int i; /* either we have no bytes to send ... */ if (dev->txpos == dev->txlen) @@ -99,7 +99,7 @@ static int next_chunk_len(struct mctp_serial *dev) return i; } -static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len) +static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len) { return dev->tty->ops->write(dev->tty, buf, len); } @@ -108,9 +108,10 @@ static void mctp_serial_tx_work(struct work_struct *work) { struct mctp_serial *dev = container_of(work, struct mctp_serial, tx_work); - unsigned char c, buf[3]; unsigned long flags; - int len, txlen; + ssize_t txlen; + unsigned int len; + u8 c, buf[3]; spin_lock_irqsave(&dev->lock, flags); @@ -293,7 +294,7 @@ static void mctp_serial_rx(struct mctp_serial *dev) dev->netdev->stats.rx_bytes += dev->rxlen; } -static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c) +static void mctp_serial_push_header(struct mctp_serial *dev, u8 c) { switch (dev->rxpos) { case 0: @@ -323,7 +324,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c) } } -static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c) +static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c) { switch (dev->rxpos) { case 0: @@ -347,7 +348,7 @@ static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c) } } -static void mctp_serial_push(struct mctp_serial *dev, unsigned char c) +static void mctp_serial_push(struct mctp_serial *dev, u8 c) { switch (dev->rxstate) { case STATE_IDLE: @@ -394,7 +395,7 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c, const u8 *f, size_t len) { struct mctp_serial *dev = tty->disc_data; - int i; + size_t i; if (!netif_running(dev->netdev)) return; diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index fd02f5cbc853a6..b156493d708415 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -104,7 +105,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, return rc; } - dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n", + dev_dbg(&mdio->dev, "registered phy fwnode %pfw at address %i\n", child, addr); return 0; } diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index de08419d0c9852..b70e6d1ad42944 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -96,7 +96,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, static int mdio_mux_mmioreg_probe(struct platform_device *pdev) { - struct device_node *np2, *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node; struct mdio_mux_mmioreg_state *s; struct resource res; const __be32 *iprop; @@ -109,52 +109,42 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) return -ENOMEM; ret = of_address_to_resource(np, 0, &res); - if (ret) { - dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n", - np); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "could not obtain memory map for node %pOF\n", np); s->phys = res.start; s->iosize = resource_size(&res); if (s->iosize != sizeof(uint8_t) && s->iosize != sizeof(uint16_t) && - s->iosize != sizeof(uint32_t)) { - dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n"); - return -EINVAL; - } + s->iosize != sizeof(uint32_t)) + return dev_err_probe(&pdev->dev, -EINVAL, + "only 8/16/32-bit registers are supported\n"); iprop = of_get_property(np, "mux-mask", &len); - if (!iprop || len != sizeof(uint32_t)) { - dev_err(&pdev->dev, "missing or invalid mux-mask property\n"); - return -ENODEV; - } - if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) { - dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n"); - return -EINVAL; - } + if (!iprop || len != sizeof(uint32_t)) + return dev_err_probe(&pdev->dev, -ENODEV, + "missing or invalid mux-mask property\n"); + if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) + return dev_err_probe(&pdev->dev, -EINVAL, + "only 8/16/32-bit registers are supported\n"); s->mask = be32_to_cpup(iprop); /* * Verify that the 'reg' property of each child MDIO bus does not * set any bits outside of the 'mask'. */ - for_each_available_child_of_node(np, np2) { + for_each_available_child_of_node_scoped(np, np2) { u64 reg; - if (of_property_read_reg(np2, 0, ®, NULL)) { - dev_err(&pdev->dev, "mdio-mux child node %pOF is " - "missing a 'reg' property\n", np2); - of_node_put(np2); - return -ENODEV; - } - if ((u32)reg & ~s->mask) { - dev_err(&pdev->dev, "mdio-mux child node %pOF has " - "a 'reg' value with unmasked bits\n", - np2); - of_node_put(np2); - return -ENODEV; - } + if (of_property_read_reg(np2, 0, ®, NULL)) + return dev_err_probe(&pdev->dev, -ENODEV, + "mdio-mux child node %pOF is missing a 'reg' property\n", + np2); + if ((u32)reg & ~s->mask) + return dev_err_probe(&pdev->dev, -ENODEV, + "mdio-mux child node %pOF has a 'reg' value with unmasked bits\n", + np2); } ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node, diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 08e607f62e102f..2f4fc664d2e121 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -390,7 +390,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect); bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; - int len, err; + int err; const char *managed; /* New binding */ @@ -405,8 +405,7 @@ bool of_phy_is_fixed_link(struct device_node *np) return true; /* Old binding */ - if (of_get_property(np, "fixed-link", &len) && - len == (5 * sizeof(__be32))) + if (of_property_count_u32_elems(np, "fixed-link") == 5) return true; return false; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 963d8b4af28d79..54c8b9d5b5fcf4 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -731,10 +731,10 @@ struct failover *net_failover_create(struct net_device *standby_dev) IFF_TX_SKB_SHARING); /* don't acquire failover netdev's netif_tx_lock when transmitting */ - failover_dev->features |= NETIF_F_LLTX; + failover_dev->lltx = true; /* Don't allow failover devices to change network namespaces. */ - failover_dev->features |= NETIF_F_NETNS_LOCAL; + failover_dev->netns_local = true; failover_dev->hw_features = FAILOVER_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 9c09293b52588d..01cf33fa75036c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -37,8 +37,9 @@ #include #include #include +#include -MODULE_AUTHOR("Maintainer: Matt Mackall "); +MODULE_AUTHOR("Matt Mackall "); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); @@ -72,9 +73,16 @@ __setup("netconsole=", option_setup); /* Linked list of all configured targets */ static LIST_HEAD(target_list); +/* target_cleanup_list is used to track targets that need to be cleaned outside + * of target_list_lock. It should be cleaned in the same function it is + * populated. + */ +static LIST_HEAD(target_cleanup_list); /* This needs to be a spinlock because write_msg() cannot sleep */ static DEFINE_SPINLOCK(target_list_lock); +/* This needs to be a mutex because netpoll_cleanup might sleep */ +static DEFINE_MUTEX(target_cleanup_list_lock); /* * Console driver for extended netconsoles. Registered on the first use to @@ -210,6 +218,33 @@ static struct netconsole_target *alloc_and_init(void) return nt; } +/* Clean up every target in the cleanup_list and move the clean targets back to + * the main target_list. + */ +static void netconsole_process_cleanups_core(void) +{ + struct netconsole_target *nt, *tmp; + unsigned long flags; + + /* The cleanup needs RTNL locked */ + ASSERT_RTNL(); + + mutex_lock(&target_cleanup_list_lock); + list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) { + /* all entries in the cleanup_list needs to be disabled */ + WARN_ON_ONCE(nt->enabled); + do_netpoll_cleanup(&nt->np); + /* moved the cleaned target to target_list. Need to hold both + * locks + */ + spin_lock_irqsave(&target_list_lock, flags); + list_move(&nt->list, &target_list); + spin_unlock_irqrestore(&target_list_lock, flags); + } + WARN_ON_ONCE(!list_empty(&target_cleanup_list)); + mutex_unlock(&target_cleanup_list_lock); +} + #ifdef CONFIG_NETCONSOLE_DYNAMIC /* @@ -246,6 +281,19 @@ static struct netconsole_target *to_target(struct config_item *item) struct netconsole_target, group); } +/* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because + * netdev might be cleaned-up by calling __netpoll_cleanup(), + */ +static void netconsole_process_cleanups(void) +{ + /* rtnl lock is called here, because it has precedence over + * target_cleanup_list_lock mutex and target_cleanup_list + */ + rtnl_lock(); + netconsole_process_cleanups_core(); + rtnl_unlock(); +} + /* Get rid of possible trailing newline, returning the new length */ static void trim_newline(char *s, size_t maxlen) { @@ -336,14 +384,14 @@ static ssize_t enabled_store(struct config_item *item, struct netconsole_target *nt = to_target(item); unsigned long flags; bool enabled; - int err; + ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); - err = kstrtobool(buf, &enabled); - if (err) + ret = kstrtobool(buf, &enabled); + if (ret) goto out_unlock; - err = -EINVAL; + ret = -EINVAL; if (enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); @@ -365,8 +413,8 @@ static ssize_t enabled_store(struct config_item *item, */ netpoll_print_options(&nt->np); - err = netpoll_setup(&nt->np); - if (err) + ret = netpoll_setup(&nt->np); + if (ret) goto out_unlock; nt->enabled = true; @@ -376,17 +424,23 @@ static ssize_t enabled_store(struct config_item *item, * otherwise we might end up in write_msg() with * nt->np.dev == NULL and nt->enabled == true */ + mutex_lock(&target_cleanup_list_lock); spin_lock_irqsave(&target_list_lock, flags); nt->enabled = false; + /* Remove the target from the list, while holding + * target_list_lock + */ + list_move(&nt->list, &target_cleanup_list); spin_unlock_irqrestore(&target_list_lock, flags); - netpoll_cleanup(&nt->np); + mutex_unlock(&target_cleanup_list_lock); } - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); + /* Deferred cleanup */ + netconsole_process_cleanups(); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return err; + return ret; } static ssize_t release_store(struct config_item *item, const char *buf, @@ -394,27 +448,26 @@ static ssize_t release_store(struct config_item *item, const char *buf, { struct netconsole_target *nt = to_target(item); bool release; - int err; + ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); - err = -EINVAL; + ret = -EINVAL; goto out_unlock; } - err = kstrtobool(buf, &release); - if (err) + ret = kstrtobool(buf, &release); + if (ret) goto out_unlock; nt->release = release; - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return err; + return ret; } static ssize_t extended_store(struct config_item *item, const char *buf, @@ -422,27 +475,25 @@ static ssize_t extended_store(struct config_item *item, const char *buf, { struct netconsole_target *nt = to_target(item); bool extended; - int err; + ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); - err = -EINVAL; + ret = -EINVAL; goto out_unlock; } - err = kstrtobool(buf, &extended); - if (err) + ret = kstrtobool(buf, &extended); + if (ret) goto out_unlock; nt->extended = extended; - - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return err; + return ret; } static ssize_t dev_name_store(struct config_item *item, const char *buf, @@ -469,7 +520,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); - int rv = -EINVAL; + ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { @@ -478,21 +529,20 @@ static ssize_t local_port_store(struct config_item *item, const char *buf, goto out_unlock; } - rv = kstrtou16(buf, 10, &nt->np.local_port); - if (rv < 0) + ret = kstrtou16(buf, 10, &nt->np.local_port); + if (ret < 0) goto out_unlock; - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return rv; + return ret; } static ssize_t remote_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); - int rv = -EINVAL; + ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { @@ -501,20 +551,20 @@ static ssize_t remote_port_store(struct config_item *item, goto out_unlock; } - rv = kstrtou16(buf, 10, &nt->np.remote_port); - if (rv < 0) + ret = kstrtou16(buf, 10, &nt->np.remote_port); + if (ret < 0) goto out_unlock; - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return rv; + return ret; } static ssize_t local_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); + ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { @@ -541,17 +591,17 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf, goto out_unlock; } - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return -EINVAL; + return ret; } static ssize_t remote_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); + ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { @@ -578,11 +628,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf, goto out_unlock; } - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return -EINVAL; + return ret; } static ssize_t remote_mac_store(struct config_item *item, const char *buf, @@ -590,6 +639,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf, { struct netconsole_target *nt = to_target(item); u8 remote_mac[ETH_ALEN]; + ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { @@ -604,11 +654,10 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf, goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); - mutex_unlock(&dynamic_netconsole_mutex); - return strnlen(buf, count); + ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); - return -EINVAL; + return ret; } struct userdatum { @@ -685,7 +734,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf, struct userdatum *udm = to_userdatum(item); struct netconsole_target *nt; struct userdata *ud; - int ret; + ssize_t ret; if (count > MAX_USERDATA_VALUE_LENGTH) return -EMSGSIZE; @@ -700,9 +749,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf, ud = to_userdata(item->ci_parent); nt = userdata_to_target(ud); update_userdata(nt); - - mutex_unlock(&dynamic_netconsole_mutex); - return count; + ret = count; out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; @@ -778,7 +825,7 @@ static struct configfs_group_operations userdata_ops = { .drop_item = userdatum_drop, }; -static struct config_item_type userdata_type = { +static const struct config_item_type userdata_type = { .ct_item_ops = &userdatum_ops, .ct_group_ops = &userdata_ops, .ct_attrs = userdata_attrs, @@ -950,7 +997,7 @@ static int netconsole_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { unsigned long flags; - struct netconsole_target *nt; + struct netconsole_target *nt, *tmp; struct net_device *dev = netdev_notifier_info_to_dev(ptr); bool stopped = false; @@ -958,9 +1005,9 @@ static int netconsole_netdev_event(struct notifier_block *this, event == NETDEV_RELEASE || event == NETDEV_JOIN)) goto done; + mutex_lock(&target_cleanup_list_lock); spin_lock_irqsave(&target_list_lock, flags); -restart: - list_for_each_entry(nt, &target_list, list) { + list_for_each_entry_safe(nt, tmp, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { switch (event) { @@ -970,25 +1017,16 @@ static int netconsole_netdev_event(struct notifier_block *this, case NETDEV_RELEASE: case NETDEV_JOIN: case NETDEV_UNREGISTER: - /* rtnl_lock already held - * we might sleep in __netpoll_cleanup() - */ nt->enabled = false; - spin_unlock_irqrestore(&target_list_lock, flags); - - __netpoll_cleanup(&nt->np); - - spin_lock_irqsave(&target_list_lock, flags); - netdev_put(nt->np.dev, &nt->np.dev_tracker); - nt->np.dev = NULL; + list_move(&nt->list, &target_cleanup_list); stopped = true; - netconsole_target_put(nt); - goto restart; } } netconsole_target_put(nt); } spin_unlock_irqrestore(&target_list_lock, flags); + mutex_unlock(&target_cleanup_list_lock); + if (stopped) { const char *msg = "had an event"; @@ -1007,6 +1045,11 @@ static int netconsole_netdev_event(struct notifier_block *this, dev->name, msg); } + /* Process target_cleanup_list entries. By the end, target_cleanup_list + * should be empty + */ + netconsole_process_cleanups_core(); + done: return NOTIFY_DONE; } @@ -1215,11 +1258,18 @@ static struct netconsole_target *alloc_param_target(char *target_config, goto fail; err = netpoll_setup(&nt->np); - if (err) - goto fail; - + if (err) { + pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n", + NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); + if (!IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) + /* only fail if dynamic reconfiguration is set, + * otherwise, keep the target in the list, but disabled. + */ + goto fail; + } else { + nt->enabled = true; + } populate_configfs_item(nt, cmdline_count); - nt->enabled = true; return nt; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index a1f91ff8ec5688..41e80f78b31602 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -1414,7 +1414,6 @@ static ssize_t nsim_nexthop_bucket_activity_write(struct file *file, static const struct file_operations nsim_nexthop_bucket_activity_fops = { .open = simple_open, .write = nsim_nexthop_bucket_activity_write, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 16789cd446e9e4..059269557d9264 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -65,6 +65,7 @@ static struct netkit *netkit_priv(const struct net_device *dev) static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct netkit *nk = netkit_priv(dev); enum netkit_action ret = READ_ONCE(nk->policy); netdev_tx_t ret_dev = NET_XMIT_SUCCESS; @@ -72,6 +73,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *peer; int len = skb->len; + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); rcu_read_lock(); peer = rcu_dereference(nk->peer); if (unlikely(!peer || !(peer->flags & IFF_UP) || @@ -110,6 +112,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) break; } rcu_read_unlock(); + bpf_net_ctx_clear(bpf_net_ctx); return ret_dev; } @@ -255,11 +258,13 @@ static void netkit_setup(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_PHONY_HEADROOM; dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_DISABLE_NETPOLL; + dev->lltx = true; dev->ethtool_ops = &netkit_ethtool_ops; dev->netdev_ops = &netkit_netdev_ops; - dev->features |= netkit_features | NETIF_F_LLTX; + dev->features |= netkit_features; dev->hw_features = netkit_features; dev->hw_enc_features = netkit_features; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index e5a0987a263e55..8bfd4ee5a8c4fb 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -63,13 +63,13 @@ static void nlmon_setup(struct net_device *dev) { dev->type = ARPHRD_NETLINK; dev->priv_flags |= IFF_NO_QUEUE; + dev->lltx = true; dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; dev->needs_free_netdev = true; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | - NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->flags = IFF_NOARP; dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c index 19c75886f070ea..5f5cd3596cb846 100644 --- a/drivers/net/pcs/pcs-xpcs-wx.c +++ b/drivers/net/pcs/pcs-xpcs-wx.c @@ -109,7 +109,7 @@ static void txgbe_pma_config_1g(struct dw_xpcs *xpcs) txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0); val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3); val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0); - txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL3, val); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20); txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7fddc8306d8226..01b235b3bb7e80 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -44,6 +44,9 @@ config LED_TRIGGER_PHY Mbps OR Gbps OR link for any speed known to the PHY. +config OPEN_ALLIANCE_HELPERS + bool + config PHYLIB_LEDS def_bool OF depends on LEDS_CLASS=y || LEDS_CLASS=PHYLIB @@ -109,6 +112,13 @@ config ADIN1100_PHY Currently supports the: - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY +config AMCC_QT2025_PHY + tristate "AMCC QT2025 PHY" + depends on RUST_PHYLIB_ABSTRACTIONS + depends on RUST_FW_LOADER_ABSTRACTIONS + help + Adds support for the Applied Micro Circuits Corporation QT2025 PHY. + source "drivers/net/phy/aquantia/Kconfig" config AX88796B_PHY @@ -414,6 +424,7 @@ config DP83TD510_PHY config DP83TG720_PHY tristate "Texas Instruments DP83TG720 Ethernet 1000Base-T1 PHY" + select OPEN_ALLIANCE_HELPERS help The DP83TG720S-Q1 is an automotive Ethernet physical layer transceiver compliant with IEEE 802.3bp and Open Alliance diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 202ed7f450da6f..90f886844381d0 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -2,7 +2,7 @@ # Makefile for Linux PHY drivers libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ - linkmode.o + linkmode.o phy_link_topology.o mdio-bus-y += mdio_bus.o mdio_device.o ifdef CONFIG_MDIO_DEVICE @@ -22,6 +22,7 @@ endif obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o +libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o @@ -36,6 +37,7 @@ obj-$(CONFIG_ADIN_PHY) += adin.o obj-$(CONFIG_ADIN1100_PHY) += adin1100.o obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o obj-$(CONFIG_AMD_PHY) += amd.o +obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia/ ifdef CONFIG_AX88796B_RUST_PHY obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c index 3cdc8c6b30b648..8d076b9609fdbd 100644 --- a/drivers/net/phy/air_en8811h.c +++ b/drivers/net/phy/air_en8811h.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #define EN8811H_PHY_ID 0x03a2a411 diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c index 524627a36c6fc2..dab3af80593f51 100644 --- a/drivers/net/phy/aquantia/aquantia_firmware.c +++ b/drivers/net/phy/aquantia/aquantia_firmware.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include "aquantia.h" @@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev) { int ret; - ret = aqr_wait_reset_complete(phydev); - if (ret) - return ret; - - /* Check if the firmware is not already loaded by pooling - * the current version returned by the PHY. If 0 is returned, - * no firmware is loaded. + /* Check if the firmware is not already loaded by polling + * the current version returned by the PHY. */ - ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); - if (ret > 0) - goto exit; - - ret = aqr_firmware_load_nvmem(phydev); - if (!ret) - goto exit; - - ret = aqr_firmware_load_fs(phydev); - if (ret) + ret = aqr_wait_reset_complete(phydev); + switch (ret) { + case 0: + /* Some firmware is loaded => do nothing */ + return 0; + case -ETIMEDOUT: + /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling. + * We don't have full confidence that no firmware is loaded (in + * theory it might just not have loaded yet), but we will + * assume that, and load a new image. + */ + ret = aqr_firmware_load_nvmem(phydev); + if (!ret) + return ret; + + ret = aqr_firmware_load_fs(phydev); + if (ret) + return ret; + break; + default: + /* PHY read error, propagate it to the caller */ return ret; + } -exit: return 0; } diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c index 0516ac02c3f81e..201c8df93fad94 100644 --- a/drivers/net/phy/aquantia/aquantia_leds.c +++ b/drivers/net/phy/aquantia/aquantia_leds.c @@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index, int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable) { return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index), - VEND1_GLOBAL_LED_DRIVE_VDD, enable); + VEND1_GLOBAL_LED_DRIVE_VDD, + enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0); } int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes) diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index e982e9ce44a596..4d156d406bab9a 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev, } } +#define AQR_FW_WAIT_SLEEP_US 20000 +#define AQR_FW_WAIT_TIMEOUT_US 2000000 + /* If we configure settings whilst firmware is still initializing the chip, * then these settings may be overwritten. Therefore make sure chip * initialization has completed. Use presence of the firmware ID as @@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev, */ int aqr_wait_reset_complete(struct phy_device *phydev) { - int val; + int ret, val; + + ret = read_poll_timeout(phy_read_mmd, val, val != 0, + AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US, + false, phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_FW_ID); + if (val < 0) { + phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n", + ERR_PTR(val)); + return val; + } - return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_FW_ID, val, val != 0, - 20000, 2000000, false); + return ret; } static void aqr107_chip_info(struct phy_device *phydev) @@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev) { struct aqr107_priv *priv = phydev->priv; u32 led_active_low; - int ret, index = 0; + int ret; /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && @@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev) /* Restore LED polarity state after reset */ for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) { - ret = aqr_phy_led_active_low_set(phydev, index, led_active_low); + ret = aqr_phy_led_active_low_set(phydev, led_active_low, true); if (ret) return ret; - index++; } return 0; diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs index 5c92572962dce7..8c7eb009d9fc0f 100644 --- a/drivers/net/phy/ax88796b_rust.rs +++ b/drivers/net/phy/ax88796b_rust.rs @@ -6,7 +6,7 @@ //! C version of this driver: [`drivers/net/phy/ax88796b.c`](./ax88796b.c) use kernel::{ c_str, - net::phy::{self, DeviceId, Driver}, + net::phy::{self, reg::C22, DeviceId, Driver}, prelude::*, uapi, }; @@ -24,7 +24,6 @@ license: "GPL", } -const MII_BMCR: u16 = uapi::MII_BMCR as u16; const BMCR_SPEED100: u16 = uapi::BMCR_SPEED100 as u16; const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16; @@ -33,7 +32,7 @@ // Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation // such as used on the Individual Computers' X-Surf 100 Zorro card. fn asix_soft_reset(dev: &mut phy::Device) -> Result { - dev.write(uapi::MII_BMCR as u16, 0)?; + dev.write(C22::BMCR, 0)?; dev.genphy_soft_reset() } @@ -55,7 +54,7 @@ fn read_status(dev: &mut phy::Device) -> Result { } // If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve // linkmode so use MII_BMCR as default values. - let ret = dev.read(MII_BMCR)?; + let ret = dev.read(C22::BMCR)?; if ret & BMCR_SPEED100 != 0 { dev.set_speed(uapi::SPEED_100); diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index 874a1b64b115f5..208e8f561e0696 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -4,7 +4,7 @@ * Copyright (C) 2022 Jonathan Lemon */ -#include +#include #include #include #include diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index 551e37786c2daa..92aa3a2b9744c2 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -58,6 +58,10 @@ static const u16 dp83td510_mse_sqi_map[] = { 0x0000 /* 24dB =< SNR */ }; +struct dp83td510_priv { + bool alcd_test_active; +}; + /* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY * * I assume that this PHY is using a variation of Spread Spectrum Time Domain @@ -169,6 +173,10 @@ static const u16 dp83td510_mse_sqi_map[] = { #define DP83TD510E_UNKN_030E 0x30e #define DP83TD510E_030E_VAL 0x2520 +#define DP83TD510E_ALCD_STAT 0xa9f +#define DP83TD510E_ALCD_COMPLETE BIT(15) +#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0) + static int dp83td510_config_intr(struct phy_device *phydev) { int ret; @@ -325,8 +333,23 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev) */ static int dp83td510_cable_test_start(struct phy_device *phydev) { + struct dp83td510_priv *priv = phydev->priv; int ret; + /* If link partner is active, we won't be able to use TDR, since + * we can't force link partner to be silent. The autonegotiation + * pulses will be too frequent and the TDR sequence will be + * too long. So, TDR will always fail. Since the link is established + * we already know that the cable is working, so we can get some + * extra information line the cable length using ALCD. + */ + if (phydev->link) { + priv->alcd_test_active = true; + return 0; + } + + priv->alcd_test_active = false; + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL, DP83TD510E_CTRL_HW_RESET); if (ret) @@ -402,8 +425,8 @@ static int dp83td510_cable_test_start(struct phy_device *phydev) } /** - * dp83td510_cable_test_get_status - Get the status of the cable test for the - * DP83TD510 PHY. + * dp83td510_cable_test_get_tdr_status - Get the status of the TDR test for the + * DP83TD510 PHY. * @phydev: Pointer to the phy_device structure. * @finished: Pointer to a boolean that indicates whether the test is finished. * @@ -411,13 +434,11 @@ static int dp83td510_cable_test_start(struct phy_device *phydev) * * Returns: 0 on success or a negative error code on failure. */ -static int dp83td510_cable_test_get_status(struct phy_device *phydev, - bool *finished) +static int dp83td510_cable_test_get_tdr_status(struct phy_device *phydev, + bool *finished) { int ret, stat; - *finished = false; - ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG); if (ret < 0) return ret; @@ -459,6 +480,77 @@ static int dp83td510_cable_test_get_status(struct phy_device *phydev, return phy_init_hw(phydev); } +/** + * dp83td510_cable_test_get_alcd_status - Get the status of the ALCD test for the + * DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * @finished: Pointer to a boolean that indicates whether the test is finished. + * + * The function sets the @finished flag to true if the test is complete. + * The function reads the cable length and reports it to the user. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int dp83td510_cable_test_get_alcd_status(struct phy_device *phydev, + bool *finished) +{ + unsigned int location; + int ret, phy_sts; + + phy_sts = phy_read(phydev, DP83TD510E_PHY_STS); + + if (!(phy_sts & DP83TD510E_LINK_STATUS)) { + /* If the link is down, we can't do any thing usable now */ + ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC, + ETHTOOL_A_CABLE_INF_SRC_ALCD); + *finished = true; + return 0; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_ALCD_STAT); + if (ret < 0) + return ret; + + if (!(ret & DP83TD510E_ALCD_COMPLETE)) + return 0; + + location = FIELD_GET(DP83TD510E_ALCD_CABLE_LENGTH, ret) * 100; + + ethnl_cable_test_fault_length_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A, + location, + ETHTOOL_A_CABLE_INF_SRC_ALCD); + + ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_OK, + ETHTOOL_A_CABLE_INF_SRC_ALCD); + *finished = true; + + return 0; +} + +/** + * dp83td510_cable_test_get_status - Get the status of the cable test for the + * DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * @finished: Pointer to a boolean that indicates whether the test is finished. + * + * The function sets the @finished flag to true if the test is complete. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int dp83td510_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + struct dp83td510_priv *priv = phydev->priv; + *finished = false; + + if (priv->alcd_test_active) + return dp83td510_cable_test_get_alcd_status(phydev, finished); + + return dp83td510_cable_test_get_tdr_status(phydev, finished); +} + static int dp83td510_get_features(struct phy_device *phydev) { /* This PHY can't respond on MDIO bus if no RMII clock is enabled. @@ -477,12 +569,27 @@ static int dp83td510_get_features(struct phy_device *phydev) return 0; } +static int dp83td510_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct dp83td510_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + static struct phy_driver dp83td510_driver[] = { { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID), .name = "TI DP83TD510E", .flags = PHY_POLL_CABLE_TEST, + .probe = dp83td510_probe, .config_aneg = dp83td510_config_aneg, .read_status = dp83td510_read_status, .get_features = dp83td510_get_features, diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c index c706429b225a20..0ef4d7dba06563 100644 --- a/drivers/net/phy/dp83tg720.c +++ b/drivers/net/phy/dp83tg720.c @@ -3,10 +3,13 @@ * Copyright (c) 2023 Pengutronix, Oleksij Rempel */ #include +#include #include #include #include +#include "open_alliance_helpers.h" + #define DP83TG720S_PHY_ID 0x2000a284 /* MDIO_MMD_VEND2 registers */ @@ -14,6 +17,17 @@ #define DP83TG720S_STS_MII_INT BIT(7) #define DP83TG720S_LINK_STATUS BIT(0) +/* TDR Configuration Register (0x1E) */ +#define DP83TG720S_TDR_CFG 0x1e +/* 1b = TDR start, 0b = No TDR */ +#define DP83TG720S_TDR_START BIT(15) +/* 1b = TDR auto on link down, 0b = Manual TDR start */ +#define DP83TG720S_CFG_TDR_AUTO_RUN BIT(14) +/* 1b = TDR done, 0b = TDR in progress */ +#define DP83TG720S_TDR_DONE BIT(1) +/* 1b = TDR fail, 0b = TDR success */ +#define DP83TG720S_TDR_FAIL BIT(0) + #define DP83TG720S_PHY_RESET 0x1f #define DP83TG720S_HW_RESET BIT(15) @@ -22,18 +36,155 @@ /* Power Mode 0 is Normal mode */ #define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0) +/* Open Aliance 1000BaseT1 compatible HDD.TDR Fault Status Register */ +#define DP83TG720S_TDR_FAULT_STATUS 0x30f + +/* Register 0x0301: TDR Configuration 2 */ +#define DP83TG720S_TDR_CFG2 0x301 + +/* Register 0x0303: TDR Configuration 3 */ +#define DP83TG720S_TDR_CFG3 0x303 + +/* Register 0x0304: TDR Configuration 4 */ +#define DP83TG720S_TDR_CFG4 0x304 + +/* Register 0x0405: Unknown Register */ +#define DP83TG720S_UNKNOWN_0405 0x405 + +/* Register 0x0576: TDR Master Link Down Control */ +#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576 + #define DP83TG720S_RGMII_DELAY_CTRL 0x602 /* In RGMII mode, Enable or disable the internal delay for RXD */ #define DP83TG720S_RGMII_RX_CLK_SEL BIT(1) /* In RGMII mode, Enable or disable the internal delay for TXD */ #define DP83TG720S_RGMII_TX_CLK_SEL BIT(0) +/* Register 0x083F: Unknown Register */ +#define DP83TG720S_UNKNOWN_083F 0x83f + #define DP83TG720S_SQI_REG_1 0x871 #define DP83TG720S_SQI_OUT_WORST GENMASK(7, 5) #define DP83TG720S_SQI_OUT GENMASK(3, 1) #define DP83TG720_SQI_MAX 7 +/** + * dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY. + * @phydev: Pointer to the phy_device structure. + * + * This sequence is based on the documented procedure for the DP83TG720 PHY. + * + * Returns: 0 on success, a negative error code on failure. + */ +static int dp83tg720_cable_test_start(struct phy_device *phydev) +{ + int ret; + + /* Initialize the PHY to run the TDR test as described in the + * "DP83TG720S-Q1: Configuring for Open Alliance Specification + * Compliance (Rev. B)" application note. + * Most of the registers are not documented. Some of register names + * are guessed by comparing the register offsets with the DP83TD510E. + */ + + /* Force master link down */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + DP83TG720S_TDR_MASTER_LINK_DOWN, 0x0400); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG2, + 0xa008); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG3, + 0x0928); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG4, + 0x0004); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_0405, + 0x6400); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_083F, + 0x3003); + if (ret) + return ret; + + /* Start the TDR */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG, + DP83TG720S_TDR_START); + if (ret) + return ret; + + return 0; +} + +/** + * dp83tg720_cable_test_get_status - Get the status of the cable test for the + * DP83TG720 PHY. + * @phydev: Pointer to the phy_device structure. + * @finished: Pointer to a boolean that indicates whether the test is finished. + * + * The function sets the @finished flag to true if the test is complete. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int dp83tg720_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret, stat; + + *finished = false; + + /* Read the TDR status */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG); + if (ret < 0) + return ret; + + /* Check if the TDR test is done */ + if (!(ret & DP83TG720S_TDR_DONE)) + return 0; + + /* Check for TDR test failure */ + if (!(ret & DP83TG720S_TDR_FAIL)) { + int location; + + /* Read fault status */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + DP83TG720S_TDR_FAULT_STATUS); + if (ret < 0) + return ret; + + /* Get fault type */ + stat = oa_1000bt1_get_ethtool_cable_result_code(ret); + + /* Determine fault location */ + location = oa_1000bt1_get_tdr_distance(ret); + if (location > 0) + ethnl_cable_test_fault_length(phydev, + ETHTOOL_A_CABLE_PAIR_A, + location); + } else { + /* Active link partner or other issues */ + stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } + + *finished = true; + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat); + + return phy_init_hw(phydev); +} + static int dp83tg720_config_aneg(struct phy_device *phydev) { int ret; @@ -195,12 +346,15 @@ static struct phy_driver dp83tg720_driver[] = { PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID), .name = "TI DP83TG720S", + .flags = PHY_POLL_CABLE_TEST, .config_aneg = dp83tg720_config_aneg, .read_status = dp83tg720_read_status, .get_features = genphy_c45_pma_read_ext_abilities, .config_init = dp83tg720_config_init, .get_sqi = dp83tg720_get_sqi, .get_sqi_max = dp83tg720_get_sqi_max, + .cable_test_start = dp83tg720_cable_test_start, + .cable_test_get_status = dp83tg720_cable_test_get_status, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index b88398e6872bcf..0b777cdd7078b1 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -553,6 +553,8 @@ static const struct sfp_upstream_ops sfp_phy_ops = { .link_down = mv2222_sfp_link_down, .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int mv2222_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index b89fbffa6a931a..9964bf3dea2fb2 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -3613,6 +3613,8 @@ static const struct sfp_upstream_ops m88e1510_sfp_ops = { .module_remove = m88e1510_sfp_remove, .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int m88e1510_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index ad43e280930c35..6642eb642d4bdd 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -503,6 +503,8 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) static const struct sfp_upstream_ops mv3310_sfp_ops = { .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, .module_insert = mv3310_sfp_insert, }; diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index a35528497a5762..a5ef8fe5070467 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -12,6 +12,7 @@ #define PHY_ID_LAN87XX 0x0007c150 #define PHY_ID_LAN937X 0x0007c180 +#define PHY_ID_LAN887X 0x0007c1f0 /* External Register Control Register */ #define LAN87XX_EXT_REG_CTL (0x14) @@ -94,8 +95,155 @@ /* SQI defines */ #define LAN87XX_MAX_SQI 0x07 +/* Chiptop registers */ +#define LAN887X_PMA_EXT_ABILITY_2 0x12 +#define LAN887X_PMA_EXT_ABILITY_2_1000T1 BIT(1) +#define LAN887X_PMA_EXT_ABILITY_2_100T1 BIT(0) + +/* DSP 100M registers */ +#define LAN887x_CDR_CONFIG1_100 0x0405 +#define LAN887x_LOCK1_EQLSR_CONFIG_100 0x0411 +#define LAN887x_SLV_HD_MUFAC_CONFIG_100 0x0417 +#define LAN887x_PLOCK_MUFAC_CONFIG_100 0x041c +#define LAN887x_PROT_DISABLE_100 0x0425 +#define LAN887x_KF_LOOP_SAT_CONFIG_100 0x0454 + +/* DSP 1000M registers */ +#define LAN887X_LOCK1_EQLSR_CONFIG 0x0811 +#define LAN887X_LOCK3_EQLSR_CONFIG 0x0813 +#define LAN887X_PROT_DISABLE 0x0825 +#define LAN887X_FFE_GAIN6 0x0843 +#define LAN887X_FFE_GAIN7 0x0844 +#define LAN887X_FFE_GAIN8 0x0845 +#define LAN887X_FFE_GAIN9 0x0846 +#define LAN887X_ECHO_DELAY_CONFIG 0x08ec +#define LAN887X_FFE_MAX_CONFIG 0x08ee + +/* PCS 1000M registers */ +#define LAN887X_SCR_CONFIG_3 0x8043 +#define LAN887X_INFO_FLD_CONFIG_5 0x8048 + +/* T1 afe registers */ +#define LAN887X_ZQCAL_CONTROL_1 0x8080 +#define LAN887X_AFE_PORT_TESTBUS_CTRL2 0x8089 +#define LAN887X_AFE_PORT_TESTBUS_CTRL4 0x808b +#define LAN887X_AFE_PORT_TESTBUS_CTRL6 0x808d +#define LAN887X_TX_AMPLT_1000T1_REG 0x80b0 +#define LAN887X_INIT_COEFF_DFE1_100 0x0422 + +/* PMA registers */ +#define LAN887X_DSP_PMA_CONTROL 0x810e +#define LAN887X_DSP_PMA_CONTROL_LNK_SYNC BIT(4) + +/* PCS 100M registers */ +#define LAN887X_IDLE_ERR_TIMER_WIN 0x8204 +#define LAN887X_IDLE_ERR_CNT_THRESH 0x8213 + +/* Misc registers */ +#define LAN887X_REG_REG26 0x001a +#define LAN887X_REG_REG26_HW_INIT_SEQ_EN BIT(8) + +/* Mis registers */ +#define LAN887X_MIS_CFG_REG0 0xa00 +#define LAN887X_MIS_CFG_REG0_RCLKOUT_DIS BIT(5) +#define LAN887X_MIS_CFG_REG0_MAC_MODE_SEL GENMASK(1, 0) + +#define LAN887X_MAC_MODE_RGMII 0x01 +#define LAN887X_MAC_MODE_SGMII 0x03 + +#define LAN887X_MIS_DLL_CFG_REG0 0xa01 +#define LAN887X_MIS_DLL_CFG_REG1 0xa02 + +#define LAN887X_MIS_DLL_DELAY_EN BIT(15) +#define LAN887X_MIS_DLL_EN BIT(0) +#define LAN887X_MIS_DLL_CONF (LAN887X_MIS_DLL_DELAY_EN |\ + LAN887X_MIS_DLL_EN) + +#define LAN887X_MIS_CFG_REG2 0xa03 +#define LAN887X_MIS_CFG_REG2_FE_LPBK_EN BIT(2) + +#define LAN887X_MIS_PKT_STAT_REG0 0xa06 +#define LAN887X_MIS_PKT_STAT_REG1 0xa07 +#define LAN887X_MIS_PKT_STAT_REG3 0xa09 +#define LAN887X_MIS_PKT_STAT_REG4 0xa0a +#define LAN887X_MIS_PKT_STAT_REG5 0xa0b +#define LAN887X_MIS_PKT_STAT_REG6 0xa0c + +/* Chiptop common registers */ +#define LAN887X_COMMON_LED3_LED2 0xc05 +#define LAN887X_COMMON_LED2_MODE_SEL_MASK GENMASK(4, 0) +#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0 + +/* MX chip top registers */ +#define LAN887X_CHIP_HARD_RST 0xf03e +#define LAN887X_CHIP_HARD_RST_RESET BIT(0) + +#define LAN887X_CHIP_SOFT_RST 0xf03f +#define LAN887X_CHIP_SOFT_RST_RESET BIT(0) + +#define LAN887X_SGMII_CTL 0xf01a +#define LAN887X_SGMII_CTL_SGMII_MUX_EN BIT(0) + +#define LAN887X_SGMII_PCS_CFG 0xf034 +#define LAN887X_SGMII_PCS_CFG_PCS_ENA BIT(9) + +#define LAN887X_EFUSE_READ_DAT9 0xf209 +#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9) +#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0) + +#define LAN887X_CALIB_CONFIG_100 0x437 +#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL BIT(5) +#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE BIT(4) +#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE BIT(3) +#define LAN887X_CALIB_CONFIG_100_VAL \ + (LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE |\ + LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE |\ + LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL) + +#define LAN887X_MAX_PGA_GAIN_100 0x44f +#define LAN887X_MIN_PGA_GAIN_100 0x450 +#define LAN887X_START_CBL_DIAG_100 0x45a +#define LAN887X_CBL_DIAG_DONE BIT(1) +#define LAN887X_CBL_DIAG_START BIT(0) +#define LAN887X_CBL_DIAG_STOP 0x0 + +#define LAN887X_CBL_DIAG_TDR_THRESH_100 0x45b +#define LAN887X_CBL_DIAG_AGC_THRESH_100 0x45c +#define LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100 0x45d +#define LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100 0x45e +#define LAN887X_CBL_DIAG_CYC_CONFIG_100 0x45f +#define LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100 0x460 +#define LAN887X_CBL_DIAG_MIN_PGA_GAIN_100 0x462 +#define LAN887X_CBL_DIAG_AGC_GAIN_100 0x497 +#define LAN887X_CBL_DIAG_POS_PEAK_VALUE_100 0x499 +#define LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100 0x49a +#define LAN887X_CBL_DIAG_POS_PEAK_TIME_100 0x49c +#define LAN887X_CBL_DIAG_NEG_PEAK_TIME_100 0x49d + +#define MICROCHIP_CABLE_NOISE_MARGIN 20 +#define MICROCHIP_CABLE_TIME_MARGIN 89 +#define MICROCHIP_CABLE_MIN_TIME_DIFF 96 +#define MICROCHIP_CABLE_MAX_TIME_DIFF \ + (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN) + #define DRIVER_AUTHOR "Nisar Sayed " -#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver" +#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver" + +/* TEST_MODE_NORMAL: Non-hybrid results to calculate cable status(open/short/ok) + * TEST_MODE_HYBRID: Hybrid results to calculate distance to fault + */ +enum cable_diag_mode { + TEST_MODE_NORMAL, + TEST_MODE_HYBRID +}; + +/* CD_TEST_INIT: Cable test is initated + * CD_TEST_DONE: Cable test is done + */ +enum cable_diag_state { + CD_TEST_INIT, + CD_TEST_DONE +}; struct access_ereg_val { u8 mode; @@ -105,6 +253,32 @@ struct access_ereg_val { u16 mask; }; +struct lan887x_hw_stat { + const char *string; + u8 mmd; + u16 reg; + u8 bits; +}; + +static const struct lan887x_hw_stat lan887x_hw_stats[] = { + { "TX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG0, 14}, + { "RX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG1, 14}, + { "RX ERR Count detected by PCS", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG3, 16}, + { "TX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG4, 8}, + { "RX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG5, 8}, + { "RX ERR Count for SGMII MII2GMII", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG6, 8}, +}; + +struct lan887x_regwr_map { + u8 mmd; + u16 reg; + u16 val; +}; + +struct lan887x_priv { + u64 stats[ARRAY_SIZE(lan887x_hw_stats)]; +}; + static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank) { u8 prev_bank; @@ -860,6 +1034,802 @@ static int lan87xx_get_sqi_max(struct phy_device *phydev) return LAN87XX_MAX_SQI; } +static int lan887x_rgmii_init(struct phy_device *phydev) +{ + int ret; + + /* SGMII mux disable */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_SGMII_CTL, + LAN887X_SGMII_CTL_SGMII_MUX_EN); + if (ret < 0) + return ret; + + /* Select MAC_MODE as RGMII */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0, + LAN887X_MIS_CFG_REG0_MAC_MODE_SEL, + LAN887X_MAC_MODE_RGMII); + if (ret < 0) + return ret; + + /* Disable PCS */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_SGMII_PCS_CFG, + LAN887X_SGMII_PCS_CFG_PCS_ENA); + if (ret < 0) + return ret; + + /* LAN887x Errata: RGMII rx clock active in SGMII mode + * Disabled it for SGMII mode + * Re-enabling it for RGMII mode + */ + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_MIS_CFG_REG0, + LAN887X_MIS_CFG_REG0_RCLKOUT_DIS); +} + +static int lan887x_sgmii_init(struct phy_device *phydev) +{ + int ret; + + /* SGMII mux enable */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_SGMII_CTL, + LAN887X_SGMII_CTL_SGMII_MUX_EN); + if (ret < 0) + return ret; + + /* Select MAC_MODE as SGMII */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0, + LAN887X_MIS_CFG_REG0_MAC_MODE_SEL, + LAN887X_MAC_MODE_SGMII); + if (ret < 0) + return ret; + + /* LAN887x Errata: RGMII rx clock active in SGMII mode. + * So disabling it for SGMII mode + */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0, + LAN887X_MIS_CFG_REG0_RCLKOUT_DIS); + if (ret < 0) + return ret; + + /* Enable PCS */ + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SGMII_PCS_CFG, + LAN887X_SGMII_PCS_CFG_PCS_ENA); +} + +static int lan887x_config_rgmii_en(struct phy_device *phydev) +{ + int txc; + int rxc; + int ret; + + ret = lan887x_rgmii_init(phydev); + if (ret < 0) + return ret; + + /* Control bit to enable/disable TX DLL delay line in signal path */ + txc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0); + if (txc < 0) + return txc; + + /* Control bit to enable/disable RX DLL delay line in signal path */ + rxc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1); + if (rxc < 0) + return rxc; + + /* Configures the phy to enable RX/TX delay + * RGMII - TX & RX delays are either added by MAC or not needed, + * phy should not add + * RGMII_ID - Configures phy to enable TX & RX delays, MAC shouldn't add + * RGMII_RX_ID - Configures the PHY to enable the RX delay. + * The MAC shouldn't add the RX delay + * RGMII_TX_ID - Configures the PHY to enable the TX delay. + * The MAC shouldn't add the TX delay in this case + */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + txc &= ~LAN887X_MIS_DLL_CONF; + rxc &= ~LAN887X_MIS_DLL_CONF; + break; + case PHY_INTERFACE_MODE_RGMII_ID: + txc |= LAN887X_MIS_DLL_CONF; + rxc |= LAN887X_MIS_DLL_CONF; + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + txc &= ~LAN887X_MIS_DLL_CONF; + rxc |= LAN887X_MIS_DLL_CONF; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + txc |= LAN887X_MIS_DLL_CONF; + rxc &= ~LAN887X_MIS_DLL_CONF; + break; + default: + WARN_ONCE(1, "Invalid phydev interface %d\n", phydev->interface); + return 0; + } + + /* Configures the PHY to enable/disable RX delay in signal path */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1, + LAN887X_MIS_DLL_CONF, rxc); + if (ret < 0) + return ret; + + /* Configures the PHY to enable/disable the TX delay in signal path */ + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0, + LAN887X_MIS_DLL_CONF, txc); +} + +static int lan887x_config_phy_interface(struct phy_device *phydev) +{ + int interface_mode; + int sgmii_dis; + int ret; + + /* Read sku efuse data for interfaces supported by sku */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_EFUSE_READ_DAT9); + if (ret < 0) + return ret; + + /* If interface_mode is 1 then efuse sets RGMII operations. + * If interface mode is 3 then efuse sets SGMII operations. + */ + interface_mode = ret & LAN887X_EFUSE_READ_DAT9_MAC_MODE; + /* SGMII disable is set for RGMII operations */ + sgmii_dis = ret & LAN887X_EFUSE_READ_DAT9_SGMII_DIS; + + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* Reject RGMII settings for SGMII only sku */ + ret = -EOPNOTSUPP; + + if (!((interface_mode & LAN887X_MAC_MODE_SGMII) == + LAN887X_MAC_MODE_SGMII)) + ret = lan887x_config_rgmii_en(phydev); + break; + case PHY_INTERFACE_MODE_SGMII: + /* Reject SGMII setting for RGMII only sku */ + ret = -EOPNOTSUPP; + + if (!sgmii_dis) + ret = lan887x_sgmii_init(phydev); + break; + default: + /* Reject setting for unsupported interfaces */ + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int lan887x_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_pma_read_abilities(phydev); + if (ret < 0) + return ret; + + /* Enable twisted pair */ + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported); + + /* First patch only supports 100Mbps and 1000Mbps force-mode. + * T1 Auto-Negotiation (Clause 98 of IEEE 802.3) will be added later. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + + return 0; +} + +static int lan887x_phy_init(struct phy_device *phydev) +{ + int ret; + + /* Clear loopback */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_MIS_CFG_REG2, + LAN887X_MIS_CFG_REG2_FE_LPBK_EN); + if (ret < 0) + return ret; + + /* Configure default behavior of led to link and activity for any + * speed + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_COMMON_LED3_LED2, + LAN887X_COMMON_LED2_MODE_SEL_MASK, + LAN887X_LED_LINK_ACT_ANY_SPEED); + if (ret < 0) + return ret; + + /* PHY interface setup */ + return lan887x_config_phy_interface(phydev); +} + +static int lan887x_phy_config(struct phy_device *phydev, + const struct lan887x_regwr_map *reg_map, int cnt) +{ + int ret; + + for (int i = 0; i < cnt; i++) { + ret = phy_write_mmd(phydev, reg_map[i].mmd, + reg_map[i].reg, reg_map[i].val); + if (ret < 0) + return ret; + } + + return 0; +} + +static int lan887x_phy_setup(struct phy_device *phydev) +{ + static const struct lan887x_regwr_map phy_cfg[] = { + /* PORT_AFE writes */ + {MDIO_MMD_PMAPMD, LAN887X_ZQCAL_CONTROL_1, 0x4008}, + {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL2, 0x0000}, + {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL6, 0x0040}, + /* 100T1_PCS_VENDOR writes */ + {MDIO_MMD_PCS, LAN887X_IDLE_ERR_CNT_THRESH, 0x0008}, + {MDIO_MMD_PCS, LAN887X_IDLE_ERR_TIMER_WIN, 0x800d}, + /* 100T1 DSP writes */ + {MDIO_MMD_VEND1, LAN887x_CDR_CONFIG1_100, 0x0ab1}, + {MDIO_MMD_VEND1, LAN887x_LOCK1_EQLSR_CONFIG_100, 0x5274}, + {MDIO_MMD_VEND1, LAN887x_SLV_HD_MUFAC_CONFIG_100, 0x0d74}, + {MDIO_MMD_VEND1, LAN887x_PLOCK_MUFAC_CONFIG_100, 0x0aea}, + {MDIO_MMD_VEND1, LAN887x_PROT_DISABLE_100, 0x0360}, + {MDIO_MMD_VEND1, LAN887x_KF_LOOP_SAT_CONFIG_100, 0x0c30}, + /* 1000T1 DSP writes */ + {MDIO_MMD_VEND1, LAN887X_LOCK1_EQLSR_CONFIG, 0x2a78}, + {MDIO_MMD_VEND1, LAN887X_LOCK3_EQLSR_CONFIG, 0x1368}, + {MDIO_MMD_VEND1, LAN887X_PROT_DISABLE, 0x1354}, + {MDIO_MMD_VEND1, LAN887X_FFE_GAIN6, 0x3C84}, + {MDIO_MMD_VEND1, LAN887X_FFE_GAIN7, 0x3ca5}, + {MDIO_MMD_VEND1, LAN887X_FFE_GAIN8, 0x3ca5}, + {MDIO_MMD_VEND1, LAN887X_FFE_GAIN9, 0x3ca5}, + {MDIO_MMD_VEND1, LAN887X_ECHO_DELAY_CONFIG, 0x0024}, + {MDIO_MMD_VEND1, LAN887X_FFE_MAX_CONFIG, 0x227f}, + /* 1000T1 PCS writes */ + {MDIO_MMD_PCS, LAN887X_SCR_CONFIG_3, 0x1e00}, + {MDIO_MMD_PCS, LAN887X_INFO_FLD_CONFIG_5, 0x0fa1}, + }; + + return lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg)); +} + +static int lan887x_100M_setup(struct phy_device *phydev) +{ + int ret; + + /* (Re)configure the speed/mode dependent T1 settings */ + if (phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_FORCE || + phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_PREFERRED){ + static const struct lan887x_regwr_map phy_cfg[] = { + {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8}, + {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x0038}, + {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x000f}, + }; + + ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg)); + } else { + static const struct lan887x_regwr_map phy_cfg[] = { + {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x0038}, + {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x0014}, + }; + + ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg)); + } + if (ret < 0) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26, + LAN887X_REG_REG26_HW_INIT_SEQ_EN); +} + +static int lan887x_1000M_setup(struct phy_device *phydev) +{ + static const struct lan887x_regwr_map phy_cfg[] = { + {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x003f}, + {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8}, + }; + int ret; + + /* (Re)configure the speed/mode dependent T1 settings */ + ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg)); + if (ret < 0) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL, + LAN887X_DSP_PMA_CONTROL_LNK_SYNC); +} + +static int lan887x_link_setup(struct phy_device *phydev) +{ + int ret = -EINVAL; + + if (phydev->speed == SPEED_1000) + ret = lan887x_1000M_setup(phydev); + else if (phydev->speed == SPEED_100) + ret = lan887x_100M_setup(phydev); + + return ret; +} + +/* LAN887x Errata: speed configuration changes require soft reset + * and chip soft reset + */ +static int lan887x_phy_reset(struct phy_device *phydev) +{ + int ret, val; + + /* Clear 1000M link sync */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL, + LAN887X_DSP_PMA_CONTROL_LNK_SYNC); + if (ret < 0) + return ret; + + /* Clear 100M link sync */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26, + LAN887X_REG_REG26_HW_INIT_SEQ_EN); + if (ret < 0) + return ret; + + /* Chiptop soft-reset to allow the speed/mode change */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_SOFT_RST, + LAN887X_CHIP_SOFT_RST_RESET); + if (ret < 0) + return ret; + + /* CL22 soft-reset to let the link re-train */ + ret = phy_modify(phydev, MII_BMCR, BMCR_RESET, BMCR_RESET); + if (ret < 0) + return ret; + + /* Wait for reset complete or timeout if > 10ms */ + return phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET), + 5000, 10000, true); +} + +static int lan887x_phy_reconfig(struct phy_device *phydev) +{ + int ret; + + linkmode_zero(phydev->advertising); + + ret = genphy_c45_pma_setup_forced(phydev); + if (ret < 0) + return ret; + + return lan887x_link_setup(phydev); +} + +static int lan887x_config_aneg(struct phy_device *phydev) +{ + int ret; + + /* LAN887x Errata: speed configuration changes require soft reset + * and chip soft reset + */ + ret = lan887x_phy_reset(phydev); + if (ret < 0) + return ret; + + return lan887x_phy_reconfig(phydev); +} + +static int lan887x_probe(struct phy_device *phydev) +{ + struct lan887x_priv *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return lan887x_phy_setup(phydev); +} + +static u64 lan887x_get_stat(struct phy_device *phydev, int i) +{ + struct lan887x_hw_stat stat = lan887x_hw_stats[i]; + struct lan887x_priv *priv = phydev->priv; + int val; + u64 ret; + + if (stat.mmd) + val = phy_read_mmd(phydev, stat.mmd, stat.reg); + else + val = phy_read(phydev, stat.reg); + + if (val < 0) { + ret = U64_MAX; + } else { + val = val & ((1 << stat.bits) - 1); + priv->stats[i] += val; + ret = priv->stats[i]; + } + + return ret; +} + +static void lan887x_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++) + data[i] = lan887x_get_stat(phydev, i); +} + +static int lan887x_get_sset_count(struct phy_device *phydev) +{ + return ARRAY_SIZE(lan887x_hw_stats); +} + +static void lan887x_get_strings(struct phy_device *phydev, u8 *data) +{ + for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++) + ethtool_puts(&data, lan887x_hw_stats[i].string); +} + +static int lan887x_cd_reset(struct phy_device *phydev, + enum cable_diag_state cd_done) +{ + u16 val; + int rc; + + /* Chip hard-reset */ + rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_HARD_RST, + LAN887X_CHIP_HARD_RST_RESET); + if (rc < 0) + return rc; + + /* Wait for reset to complete */ + rc = phy_read_poll_timeout(phydev, MII_PHYSID2, val, + ((val & GENMASK(15, 4)) == + (PHY_ID_LAN887X & GENMASK(15, 4))), + 5000, 50000, true); + if (rc < 0) + return rc; + + if (cd_done == CD_TEST_DONE) { + /* Cable diagnostics complete. Restore PHY. */ + rc = lan887x_phy_setup(phydev); + if (rc < 0) + return rc; + + rc = lan887x_phy_init(phydev); + if (rc < 0) + return rc; + + rc = lan887x_phy_reconfig(phydev); + if (rc < 0) + return rc; + } + + return 0; +} + +static int lan887x_cable_test_prep(struct phy_device *phydev, + enum cable_diag_mode mode) +{ + static const struct lan887x_regwr_map values[] = { + {MDIO_MMD_VEND1, LAN887X_MAX_PGA_GAIN_100, 0x1f}, + {MDIO_MMD_VEND1, LAN887X_MIN_PGA_GAIN_100, 0x0}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TDR_THRESH_100, 0x1}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_AGC_THRESH_100, 0x3c}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100, 0x0}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100, 0x46}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_CYC_CONFIG_100, 0x5a}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100, 0x44d5}, + {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_PGA_GAIN_100, 0x0}, + + }; + int rc; + + rc = lan887x_cd_reset(phydev, CD_TEST_INIT); + if (rc < 0) + return rc; + + /* Forcing DUT to master mode, as we don't care about + * mode during diagnostics + */ + rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST); + if (rc < 0) + return rc; + + rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x80b0, 0x0038); + if (rc < 0) + return rc; + + rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CALIB_CONFIG_100, 0, + LAN887X_CALIB_CONFIG_100_VAL); + if (rc < 0) + return rc; + + for (int i = 0; i < ARRAY_SIZE(values); i++) { + rc = phy_write_mmd(phydev, values[i].mmd, values[i].reg, + values[i].val); + if (rc < 0) + return rc; + + if (mode && + values[i].reg == LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100) { + rc = phy_write_mmd(phydev, values[i].mmd, + values[i].reg, 0xa); + if (rc < 0) + return rc; + } + } + + if (mode == TEST_MODE_HYBRID) { + rc = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, + LAN887X_AFE_PORT_TESTBUS_CTRL4, + BIT(0), BIT(0)); + if (rc < 0) + return rc; + } + + /* HW_INIT 100T1, Get DUT running in 100T1 mode */ + rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26, + LAN887X_REG_REG26_HW_INIT_SEQ_EN, + LAN887X_REG_REG26_HW_INIT_SEQ_EN); + if (rc < 0) + return rc; + + /* Cable diag requires hard reset and is sensitive regarding the delays. + * Hard reset is expected into and out of cable diag. + * Wait for 50ms + */ + msleep(50); + + /* Start cable diag */ + return phy_write_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_START_CBL_DIAG_100, + LAN887X_CBL_DIAG_START); +} + +static int lan887x_cable_test_chk(struct phy_device *phydev, + enum cable_diag_mode mode) +{ + int val; + int rc; + + if (mode == TEST_MODE_HYBRID) { + /* Cable diag requires hard reset and is sensitive regarding the delays. + * Hard reset is expected into and out of cable diag. + * Wait for cable diag to complete. + * Minimum wait time is 50ms if the condition is not a match. + */ + rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + LAN887X_START_CBL_DIAG_100, val, + ((val & LAN887X_CBL_DIAG_DONE) == + LAN887X_CBL_DIAG_DONE), + 50000, 500000, false); + if (rc < 0) + return rc; + } else { + rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_START_CBL_DIAG_100); + if (rc < 0) + return rc; + + if ((rc & LAN887X_CBL_DIAG_DONE) != LAN887X_CBL_DIAG_DONE) + return -EAGAIN; + } + + /* Stop cable diag */ + return phy_write_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_START_CBL_DIAG_100, + LAN887X_CBL_DIAG_STOP); +} + +static int lan887x_cable_test_start(struct phy_device *phydev) +{ + int rc, ret; + + rc = lan887x_cable_test_prep(phydev, TEST_MODE_NORMAL); + if (rc < 0) { + ret = lan887x_cd_reset(phydev, CD_TEST_DONE); + if (ret < 0) + return ret; + + return rc; + } + + return 0; +} + +static int lan887x_cable_test_report(struct phy_device *phydev) +{ + int pos_peak_cycle, pos_peak_cycle_hybrid, pos_peak_in_phases; + int pos_peak_time, pos_peak_time_hybrid, neg_peak_time; + int neg_peak_cycle, neg_peak_in_phases; + int pos_peak_in_phases_hybrid; + int gain_idx, gain_idx_hybrid; + int pos_peak_phase_hybrid; + int pos_peak, neg_peak; + int distance; + int detect; + int length; + int ret; + int rc; + + /* Read non-hybrid results */ + gain_idx = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_AGC_GAIN_100); + if (gain_idx < 0) { + rc = gain_idx; + goto error; + } + + pos_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_POS_PEAK_VALUE_100); + if (pos_peak < 0) { + rc = pos_peak; + goto error; + } + + neg_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100); + if (neg_peak < 0) { + rc = neg_peak; + goto error; + } + + pos_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_POS_PEAK_TIME_100); + if (pos_peak_time < 0) { + rc = pos_peak_time; + goto error; + } + + neg_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_NEG_PEAK_TIME_100); + if (neg_peak_time < 0) { + rc = neg_peak_time; + goto error; + } + + /* Calculate non-hybrid values */ + pos_peak_cycle = (pos_peak_time >> 7) & 0x7f; + pos_peak_in_phases = (pos_peak_cycle * 96) + (pos_peak_time & 0x7f); + neg_peak_cycle = (neg_peak_time >> 7) & 0x7f; + neg_peak_in_phases = (neg_peak_cycle * 96) + (neg_peak_time & 0x7f); + + /* Deriving the status of cable */ + if (pos_peak > MICROCHIP_CABLE_NOISE_MARGIN && + neg_peak > MICROCHIP_CABLE_NOISE_MARGIN && gain_idx >= 0) { + if (pos_peak_in_phases > neg_peak_in_phases && + ((pos_peak_in_phases - neg_peak_in_phases) >= + MICROCHIP_CABLE_MIN_TIME_DIFF) && + ((pos_peak_in_phases - neg_peak_in_phases) < + MICROCHIP_CABLE_MAX_TIME_DIFF) && + pos_peak_in_phases > 0) { + detect = LAN87XX_CABLE_TEST_SAME_SHORT; + } else if (neg_peak_in_phases > pos_peak_in_phases && + ((neg_peak_in_phases - pos_peak_in_phases) >= + MICROCHIP_CABLE_MIN_TIME_DIFF) && + ((neg_peak_in_phases - pos_peak_in_phases) < + MICROCHIP_CABLE_MAX_TIME_DIFF) && + neg_peak_in_phases > 0) { + detect = LAN87XX_CABLE_TEST_OPEN; + } else { + detect = LAN87XX_CABLE_TEST_OK; + } + } else { + detect = LAN87XX_CABLE_TEST_OK; + } + + if (detect == LAN87XX_CABLE_TEST_OK) { + distance = 0; + goto get_len; + } + + /* Re-initialize PHY and start cable diag test */ + rc = lan887x_cable_test_prep(phydev, TEST_MODE_HYBRID); + if (rc < 0) + goto cd_stop; + + /* Wait for cable diag test completion */ + rc = lan887x_cable_test_chk(phydev, TEST_MODE_HYBRID); + if (rc < 0) + goto cd_stop; + + /* Read hybrid results */ + gain_idx_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_AGC_GAIN_100); + if (gain_idx_hybrid < 0) { + rc = gain_idx_hybrid; + goto error; + } + + pos_peak_time_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_CBL_DIAG_POS_PEAK_TIME_100); + if (pos_peak_time_hybrid < 0) { + rc = pos_peak_time_hybrid; + goto error; + } + + /* Calculate hybrid values to derive cable length to fault */ + pos_peak_cycle_hybrid = (pos_peak_time_hybrid >> 7) & 0x7f; + pos_peak_phase_hybrid = pos_peak_time_hybrid & 0x7f; + pos_peak_in_phases_hybrid = pos_peak_cycle_hybrid * 96 + + pos_peak_phase_hybrid; + + /* Distance to fault calculation. + * distance = (peak_in_phases - peak_in_phases_hybrid) * + * propagationconstant. + * constant to convert number of phases to meters + * propagationconstant = 0.015953 + * (0.6811 * 2.9979 * 156.2499 * 0.0001 * 0.5) + * Applying constant 1.5953 as ethtool further devides by 100 to + * convert to meters. + */ + if (detect == LAN87XX_CABLE_TEST_OPEN) { + distance = (((pos_peak_in_phases - pos_peak_in_phases_hybrid) + * 15953) / 10000); + } else if (detect == LAN87XX_CABLE_TEST_SAME_SHORT) { + distance = (((neg_peak_in_phases - pos_peak_in_phases_hybrid) + * 15953) / 10000); + } else { + distance = 0; + } + +get_len: + rc = lan887x_cd_reset(phydev, CD_TEST_DONE); + if (rc < 0) + return rc; + + length = ((u32)distance & GENMASK(15, 0)); + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + lan87xx_cable_test_report_trans(detect)); + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, length); + + return 0; + +cd_stop: + /* Stop cable diag */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + LAN887X_START_CBL_DIAG_100, + LAN887X_CBL_DIAG_STOP); + if (ret < 0) + return ret; + +error: + /* Cable diag test failed */ + ret = lan887x_cd_reset(phydev, CD_TEST_DONE); + if (ret < 0) + return ret; + + /* Return error in failure case */ + return rc; +} + +static int lan887x_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int rc; + + rc = lan887x_cable_test_chk(phydev, TEST_MODE_NORMAL); + if (rc < 0) { + /* Let PHY statemachine poll again */ + if (rc == -EAGAIN) + return 0; + return rc; + } + + /* Cable diag test complete */ + *finished = true; + + /* Retrieve test status and cable length to fault */ + return lan887x_cable_test_report(phydev); +} + static struct phy_driver microchip_t1_phy_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX), @@ -894,6 +1864,23 @@ static struct phy_driver microchip_t1_phy_driver[] = { .get_sqi_max = lan87xx_get_sqi_max, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_LAN887X), + .name = "Microchip LAN887x T1 PHY", + .flags = PHY_POLL_CABLE_TEST, + .probe = lan887x_probe, + .get_features = lan887x_get_features, + .config_init = lan887x_phy_init, + .config_aneg = lan887x_config_aneg, + .get_stats = lan887x_get_stats, + .get_sset_count = lan887x_get_sset_count, + .get_strings = lan887x_get_strings, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_status = genphy_c45_read_status, + .cable_test_start = lan887x_cable_test_start, + .cable_test_get_status = lan887x_cable_test_get_status, } }; @@ -902,6 +1889,7 @@ module_phy_driver(microchip_t1_phy_driver); static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) }, { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) }, + { PHY_ID_MATCH_MODEL(PHY_ID_LAN887X) }, { } }; diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c index 534ca7d1b061dd..3614839a8e5191 100644 --- a/drivers/net/phy/microchip_t1s.c +++ b/drivers/net/phy/microchip_t1s.c @@ -268,6 +268,34 @@ static int lan86xx_read_status(struct phy_device *phydev) return 0; } +/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and + * C45 registers space. If the PHY is discovered via C22 bus protocol it assumes + * it uses C22 protocol and always uses C22 registers indirect access to access + * C45 registers. This is because, we don't have a clean separation between + * C22/C45 register space and C22/C45 MDIO bus protocols. Resulting, PHY C45 + * registers direct access can't be used which can save multiple SPI bus access. + * To support this feature, set .read_mmd/.write_mmd in the PHY driver to call + * .read_c45/.write_c45 in the OPEN Alliance framework + * drivers/net/ethernet/oa_tc6.c + */ +static int lan865x_phy_read_mmd(struct phy_device *phydev, int devnum, + u16 regnum) +{ + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; + + return __mdiobus_c45_read(bus, addr, devnum, regnum); +} + +static int lan865x_phy_write_mmd(struct phy_device *phydev, int devnum, + u16 regnum, u16 val) +{ + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; + + return __mdiobus_c45_write(bus, addr, devnum, regnum, val); +} + static struct phy_driver microchip_t1s_driver[] = { { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1), @@ -285,6 +313,8 @@ static struct phy_driver microchip_t1s_driver[] = { .features = PHY_BASIC_T1S_P2MP_FEATURES, .config_init = lan865x_revb0_config_init, .read_status = lan86xx_read_status, + .read_mmd = lan865x_phy_read_mmd, + .write_mmd = lan865x_phy_write_mmd, .get_plca_cfg = genphy_c45_plca_get_cfg, .set_plca_cfg = genphy_c45_plca_set_cfg, .get_plca_status = genphy_c45_plca_get_status, diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index 7a11fdb687cc41..0e91f5d1a4fdbd 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Motorcomm 8511/8521/8531/8531S PHY driver. + * Motorcomm 8511/8521/8531/8531S/8821 PHY driver. * * Author: Peter Geis * Author: Frank @@ -16,8 +16,8 @@ #define PHY_ID_YT8521 0x0000011a #define PHY_ID_YT8531 0x4f51e91b #define PHY_ID_YT8531S 0x4f51e91a - -/* YT8521/YT8531S Register Overview +#define PHY_ID_YT8821 0x4f51ea19 +/* YT8521/YT8531S/YT8821 Register Overview * UTP Register space | FIBER Register space * ------------------------------------------------------------ * | UTP MII | FIBER MII | @@ -46,12 +46,12 @@ /* Specific Status Register */ #define YTPHY_SPECIFIC_STATUS_REG 0x11 -#define YTPHY_SSR_SPEED_MODE_OFFSET 14 - -#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14)) -#define YTPHY_SSR_SPEED_10M 0x0 -#define YTPHY_SSR_SPEED_100M 0x1 -#define YTPHY_SSR_SPEED_1000M 0x2 +#define YTPHY_SSR_SPEED_MASK ((0x3 << 14) | BIT(9)) +#define YTPHY_SSR_SPEED_10M ((0x0 << 14)) +#define YTPHY_SSR_SPEED_100M ((0x1 << 14)) +#define YTPHY_SSR_SPEED_1000M ((0x2 << 14)) +#define YTPHY_SSR_SPEED_10G ((0x3 << 14)) +#define YTPHY_SSR_SPEED_2500M ((0x0 << 14) | BIT(9)) #define YTPHY_SSR_DUPLEX_OFFSET 13 #define YTPHY_SSR_DUPLEX BIT(13) #define YTPHY_SSR_PAGE_RECEIVED BIT(12) @@ -270,12 +270,89 @@ #define YT8531_SCR_CLK_SRC_REF_25M 4 #define YT8531_SCR_CLK_SRC_SSC_25M 5 +#define YT8821_SDS_EXT_CSR_CTRL_REG 0x23 +#define YT8821_SDS_EXT_CSR_VCO_LDO_EN BIT(15) +#define YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN BIT(8) + +#define YT8821_UTP_EXT_PI_CTRL_REG 0x56 +#define YT8821_UTP_EXT_PI_RST_N_FIFO BIT(5) +#define YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE BIT(4) +#define YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE BIT(3) +#define YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE BIT(2) +#define YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE BIT(1) +#define YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE BIT(0) + +#define YT8821_UTP_EXT_VCT_CFG6_CTRL_REG 0x97 +#define YT8821_UTP_EXT_FECHO_AMP_TH_HUGE GENMASK(15, 8) + +#define YT8821_UTP_EXT_ECHO_CTRL_REG 0x336 +#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000 GENMASK(14, 8) + +#define YT8821_UTP_EXT_GAIN_CTRL_REG 0x340 +#define YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000 GENMASK(6, 0) + +#define YT8821_UTP_EXT_RPDN_CTRL_REG 0x34E +#define YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 BIT(15) +#define YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 BIT(7) +#define YT8821_UTP_EXT_RPDN_IPR_SHT_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG 0x36A +#define YT8821_UTP_EXT_TH_20DB_2500 GENMASK(15, 0) + +#define YT8821_UTP_EXT_TRACE_CTRL_REG 0x372 +#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 GENMASK(14, 8) +#define YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG 0x374 +#define YT8821_UTP_EXT_ALPHA_SHT_2500 GENMASK(14, 8) +#define YT8821_UTP_EXT_IPR_LNG_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_PLL_CTRL_REG 0x450 +#define YT8821_UTP_EXT_PLL_SPARE_CFG GENMASK(7, 0) + +#define YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG 0x466 +#define YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG 0x467 +#define YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG 0x468 +#define YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG 0x469 +#define YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG 0x4B3 +#define YT8821_UTP_EXT_MU_COARSE_FR_F_FFE GENMASK(14, 12) +#define YT8821_UTP_EXT_MU_COARSE_FR_F_FBE GENMASK(10, 8) + +#define YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG 0x4B5 +#define YT8821_UTP_EXT_MU_FINE_FR_F_FFE GENMASK(14, 12) +#define YT8821_UTP_EXT_MU_FINE_FR_F_FBE GENMASK(10, 8) + +#define YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG 0x4D2 +#define YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER GENMASK(7, 4) +#define YT8821_UTP_EXT_VGA_LPF1_CAP_2500 GENMASK(3, 0) + +#define YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG 0x4D3 +#define YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER GENMASK(7, 4) +#define YT8821_UTP_EXT_VGA_LPF2_CAP_2500 GENMASK(3, 0) + +#define YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG 0x660 +#define YT8821_UTP_EXT_NFR_TX_ABILITY BIT(3) /* Extended Register end */ #define YTPHY_DTS_OUTPUT_CLK_DIS 0 #define YTPHY_DTS_OUTPUT_CLK_25M 25000000 #define YTPHY_DTS_OUTPUT_CLK_125M 125000000 +#define YT8821_CHIP_MODE_AUTO_BX2500_SGMII 0 +#define YT8821_CHIP_MODE_FORCE_BX2500 1 + struct yt8521_priv { /* combo_advertising is used for case of YT8521 in combo mode, * this means that yt8521 may work in utp or fiber mode which depends @@ -1187,8 +1264,7 @@ static int yt8521_adjust_status(struct phy_device *phydev, int status, else duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */ - speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >> - YTPHY_SSR_SPEED_MODE_OFFSET; + speed_mode = status & YTPHY_SSR_SPEED_MASK; switch (speed_mode) { case YTPHY_SSR_SPEED_10M: @@ -2252,6 +2328,572 @@ static int yt8521_get_features(struct phy_device *phydev) return ret; } +/** + * yt8821_get_features - read mmd register to get 2.5G capability + * @phydev: target phy_device struct + * + * Returns: 0 or negative errno code + */ +static int yt8821_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_pma_read_ext_abilities(phydev); + if (ret < 0) + return ret; + + return genphy_read_abilities(phydev); +} + +/** + * yt8821_get_rate_matching - read register to get phy chip mode + * @phydev: target phy_device struct + * @iface: PHY data interface type + * + * Returns: rate matching type or negative errno code + */ +static int yt8821_get_rate_matching(struct phy_device *phydev, + phy_interface_t iface) +{ + int val; + + val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); + if (val < 0) + return val; + + if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) == + YT8821_CHIP_MODE_FORCE_BX2500) + return RATE_MATCH_PAUSE; + + return RATE_MATCH_NONE; +} + +/** + * yt8821_aneg_done() - determines the auto negotiation result + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0(no link)or 1(utp link) or negative errno code + */ +static int yt8821_aneg_done(struct phy_device *phydev) +{ + return yt8521_aneg_done_paged(phydev, YT8521_RSSR_UTP_SPACE); +} + +/** + * yt8821_serdes_init() - serdes init + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_serdes_init(struct phy_device *phydev) +{ + int old_page; + int ret = 0; + u16 mask; + u16 set; + + old_page = phy_select_page(phydev, YT8521_RSSR_FIBER_SPACE); + if (old_page < 0) { + phydev_err(phydev, "Failed to select page: %d\n", + old_page); + goto err_restore_page; + } + + ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_SDS_EXT_CSR_VCO_LDO_EN | + YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN; + set = YT8821_SDS_EXT_CSR_VCO_LDO_EN; + ret = ytphy_modify_ext(phydev, YT8821_SDS_EXT_CSR_CTRL_REG, mask, + set); + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8821_utp_init() - utp init + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_utp_init(struct phy_device *phydev) +{ + int old_page; + int ret = 0; + u16 mask; + u16 save; + u16 set; + + old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); + if (old_page < 0) { + phydev_err(phydev, "Failed to select page: %d\n", + old_page); + goto err_restore_page; + } + + mask = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 | + YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 | + YT8821_UTP_EXT_RPDN_IPR_SHT_2500; + set = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 | + YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500; + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_RPDN_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER | + YT8821_UTP_EXT_VGA_LPF1_CAP_2500; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER | + YT8821_UTP_EXT_VGA_LPF2_CAP_2500; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 | + YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500, 0x5a) | + FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500, 0x3c); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_TRACE_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_IPR_LNG_2500; + set = FIELD_PREP(YT8821_UTP_EXT_IPR_LNG_2500, 0x6c); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000, 0x2a); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_ECHO_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000, 0x22); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_GAIN_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TH_20DB_2500; + set = FIELD_PREP(YT8821_UTP_EXT_TH_20DB_2500, 0x8000); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_MU_COARSE_FR_F_FFE | + YT8821_UTP_EXT_MU_COARSE_FR_F_FBE; + set = FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FFE, 0x7) | + FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FBE, 0x7); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_MU_FINE_FR_F_FFE | + YT8821_UTP_EXT_MU_FINE_FR_F_FBE; + set = FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FFE, 0x2) | + FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FBE, 0x2); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + /* save YT8821_UTP_EXT_PI_CTRL_REG's val for use later */ + ret = ytphy_read_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG); + if (ret < 0) + goto err_restore_page; + + save = ret; + + mask = YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE; + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + /* restore YT8821_UTP_EXT_PI_CTRL_REG's val */ + ret = ytphy_write_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, save); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_FECHO_AMP_TH_HUGE; + set = FIELD_PREP(YT8821_UTP_EXT_FECHO_AMP_TH_HUGE, 0x38); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_VCT_CFG6_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_NFR_TX_ABILITY; + set = YT8821_UTP_EXT_NFR_TX_ABILITY; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_PLL_SPARE_CFG; + set = FIELD_PREP(YT8821_UTP_EXT_PLL_SPARE_CFG, 0xe9); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PLL_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG | + YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG | + YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG | + YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG | + YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG, + mask, set); + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8821_auto_sleep_config() - phy auto sleep config + * @phydev: a pointer to a &struct phy_device + * @enable: true enable auto sleep, false disable auto sleep + * + * Returns: 0 or negative errno code + */ +static int yt8821_auto_sleep_config(struct phy_device *phydev, + bool enable) +{ + int old_page; + int ret = 0; + + old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); + if (old_page < 0) { + phydev_err(phydev, "Failed to select page: %d\n", + old_page); + goto err_restore_page; + } + + ret = ytphy_modify_ext(phydev, + YT8521_EXTREG_SLEEP_CONTROL1_REG, + YT8521_ESC1R_SLEEP_SW, + enable ? 1 : 0); + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8821_soft_reset() - soft reset utp and serdes + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_soft_reset(struct phy_device *phydev) +{ + return ytphy_modify_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG, + YT8521_CCR_SW_RST, 0); +} + +/** + * yt8821_config_init() - phy initializatioin + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_config_init(struct phy_device *phydev) +{ + u8 mode = YT8821_CHIP_MODE_AUTO_BX2500_SGMII; + int ret; + u16 set; + + if (phydev->interface == PHY_INTERFACE_MODE_2500BASEX) + mode = YT8821_CHIP_MODE_FORCE_BX2500; + + set = FIELD_PREP(YT8521_CCR_MODE_SEL_MASK, mode); + ret = ytphy_modify_ext_with_lock(phydev, + YT8521_CHIP_CONFIG_REG, + YT8521_CCR_MODE_SEL_MASK, + set); + if (ret < 0) + return ret; + + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + phydev->possible_interfaces); + + if (mode == YT8821_CHIP_MODE_AUTO_BX2500_SGMII) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + phydev->possible_interfaces); + + phydev->rate_matching = RATE_MATCH_NONE; + } else if (mode == YT8821_CHIP_MODE_FORCE_BX2500) { + phydev->rate_matching = RATE_MATCH_PAUSE; + } + + ret = yt8821_serdes_init(phydev); + if (ret < 0) + return ret; + + ret = yt8821_utp_init(phydev); + if (ret < 0) + return ret; + + /* disable auto sleep */ + ret = yt8821_auto_sleep_config(phydev, false); + if (ret < 0) + return ret; + + /* soft reset */ + return yt8821_soft_reset(phydev); +} + +/** + * yt8821_adjust_status() - update speed and duplex to phydev + * @phydev: a pointer to a &struct phy_device + * @val: read from YTPHY_SPECIFIC_STATUS_REG + */ +static void yt8821_adjust_status(struct phy_device *phydev, int val) +{ + int speed, duplex; + int speed_mode; + + duplex = FIELD_GET(YTPHY_SSR_DUPLEX, val); + speed_mode = val & YTPHY_SSR_SPEED_MASK; + switch (speed_mode) { + case YTPHY_SSR_SPEED_10M: + speed = SPEED_10; + break; + case YTPHY_SSR_SPEED_100M: + speed = SPEED_100; + break; + case YTPHY_SSR_SPEED_1000M: + speed = SPEED_1000; + break; + case YTPHY_SSR_SPEED_2500M: + speed = SPEED_2500; + break; + default: + speed = SPEED_UNKNOWN; + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; +} + +/** + * yt8821_update_interface() - update interface per current speed + * @phydev: a pointer to a &struct phy_device + */ +static void yt8821_update_interface(struct phy_device *phydev) +{ + if (!phydev->link) + return; + + switch (phydev->speed) { + case SPEED_2500: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + default: + phydev_warn(phydev, "phy speed err :%d\n", phydev->speed); + break; + } +} + +/** + * yt8821_read_status() - determines the negotiated speed and duplex + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_read_status(struct phy_device *phydev) +{ + int link; + int ret; + int val; + + ret = ytphy_write_ext_with_lock(phydev, + YT8521_REG_SPACE_SELECT_REG, + YT8521_RSSR_UTP_SPACE); + if (ret < 0) + return ret; + + ret = genphy_read_status(phydev); + if (ret < 0) + return ret; + + if (phydev->autoneg_complete) { + ret = genphy_c45_read_lpa(phydev); + if (ret < 0) + return ret; + } + + ret = phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG); + if (ret < 0) + return ret; + + val = ret; + + link = val & YTPHY_SSR_LINK; + if (link) + yt8821_adjust_status(phydev, val); + + if (link) { + if (phydev->link == 0) + phydev_dbg(phydev, + "%s, phy addr: %d, link up\n", + __func__, phydev->mdio.addr); + phydev->link = 1; + } else { + if (phydev->link == 1) + phydev_dbg(phydev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); + phydev->link = 0; + } + + val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); + if (val < 0) + return val; + + if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) == + YT8821_CHIP_MODE_AUTO_BX2500_SGMII) + yt8821_update_interface(phydev); + + return 0; +} + +/** + * yt8821_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register + * @phydev: the phy_device struct + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * NOTE: Convenience function which allows a PHY's BMCR register to be + * modified as new register value = (old register value & ~mask) | set. + * + * Returns: 0 or negative errno code + */ +static int yt8821_modify_utp_fiber_bmcr(struct phy_device *phydev, + u16 mask, u16 set) +{ + int ret; + + ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE, + mask, set); + if (ret < 0) + return ret; + + return yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE, + mask, set); +} + +/** + * yt8821_suspend() - suspend the hardware + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_suspend(struct phy_device *phydev) +{ + int wol_config; + + wol_config = ytphy_read_ext_with_lock(phydev, + YTPHY_WOL_CONFIG_REG); + if (wol_config < 0) + return wol_config; + + /* if wol enable, do nothing */ + if (wol_config & YTPHY_WCR_ENABLE) + return 0; + + return yt8821_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN); +} + +/** + * yt8821_resume() - resume the hardware + * @phydev: a pointer to a &struct phy_device + * + * Returns: 0 or negative errno code + */ +static int yt8821_resume(struct phy_device *phydev) +{ + int wol_config; + int ret; + + /* disable auto sleep */ + ret = yt8821_auto_sleep_config(phydev, false); + if (ret < 0) + return ret; + + wol_config = ytphy_read_ext_with_lock(phydev, + YTPHY_WOL_CONFIG_REG); + if (wol_config < 0) + return wol_config; + + /* if wol enable, do nothing */ + if (wol_config & YTPHY_WCR_ENABLE) + return 0; + + return yt8821_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0); +} + static struct phy_driver motorcomm_phy_drvs[] = { { PHY_ID_MATCH_EXACT(PHY_ID_YT8511), @@ -2307,11 +2949,28 @@ static struct phy_driver motorcomm_phy_drvs[] = { .suspend = yt8521_suspend, .resume = yt8521_resume, }, + { + PHY_ID_MATCH_EXACT(PHY_ID_YT8821), + .name = "YT8821 2.5Gbps PHY", + .get_features = yt8821_get_features, + .read_page = yt8521_read_page, + .write_page = yt8521_write_page, + .get_wol = ytphy_get_wol, + .set_wol = ytphy_set_wol, + .config_aneg = genphy_config_aneg, + .aneg_done = yt8821_aneg_done, + .config_init = yt8821_config_init, + .get_rate_matching = yt8821_get_rate_matching, + .read_status = yt8821_read_status, + .soft_reset = yt8821_soft_reset, + .suspend = yt8821_suspend, + .resume = yt8821_resume, + }, }; module_phy_driver(motorcomm_phy_drvs); -MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver"); +MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S/8821 PHY driver"); MODULE_AUTHOR("Peter Geis"); MODULE_AUTHOR("Frank"); MODULE_LICENSE("GPL"); @@ -2321,6 +2980,7 @@ static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = { { PHY_ID_MATCH_EXACT(PHY_ID_YT8521) }, { PHY_ID_MATCH_EXACT(PHY_ID_YT8531) }, { PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) }, + { PHY_ID_MATCH_EXACT(PHY_ID_YT8821) }, { /* sentinel */ } }; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index c1ddae36a2ae61..738a8822fcf014 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "mscc.h" #include "mscc_ptp.h" diff --git a/drivers/net/phy/open_alliance_helpers.c b/drivers/net/phy/open_alliance_helpers.c new file mode 100644 index 00000000000000..36a70451d7da0c --- /dev/null +++ b/drivers/net/phy/open_alliance_helpers.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * open_alliance_helpers.c - OPEN Alliance specific PHY diagnostic helpers + * + * This file contains helper functions for implementing advanced diagnostic + * features as specified by the OPEN Alliance for automotive Ethernet PHYs. + * These helpers include functionality for Time Delay Reflection (TDR), dynamic + * channel quality assessment, and other PHY diagnostics. + * + * For more information on the specifications, refer to the OPEN Alliance + * documentation: https://opensig.org/automotive-ethernet-specifications/ + * Currently following specifications are partially or fully implemented: + * - Advanced diagnostic features for 1000BASE-T1 automotive Ethernet PHYs. + * TC12 - advanced PHY features. + * https://opensig.org/wp-content/uploads/2024/03/Advanced_PHY_features_for_automotive_Ethernet_v2.0_fin.pdf + */ + +#include +#include + +#include "open_alliance_helpers.h" + +/** + * oa_1000bt1_get_ethtool_cable_result_code - Convert TDR status to ethtool + * result code + * @reg_value: Value read from the TDR register + * + * This function takes a register value from the HDD.TDR register and converts + * the TDR status to the corresponding ethtool cable test result code. + * + * Return: The appropriate ethtool result code based on the TDR status + */ +int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value) +{ + u8 tdr_status = FIELD_GET(OA_1000BT1_HDD_TDR_STATUS_MASK, reg_value); + u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value); + + switch (tdr_status) { + case OA_1000BT1_HDD_TDR_STATUS_CABLE_OK: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case OA_1000BT1_HDD_TDR_STATUS_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case OA_1000BT1_HDD_TDR_STATUS_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case OA_1000BT1_HDD_TDR_STATUS_NOISE: + return ETHTOOL_A_CABLE_RESULT_CODE_NOISE; + default: + if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE) + return ETHTOOL_A_CABLE_RESULT_CODE_RESOLUTION_NOT_POSSIBLE; + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} +EXPORT_SYMBOL_GPL(oa_1000bt1_get_ethtool_cable_result_code); + +/** + * oa_1000bt1_get_tdr_distance - Get distance to the main fault from TDR + * register value + * @reg_value: Value read from the TDR register + * + * This function takes a register value from the HDD.TDR register and extracts + * the distance to the main fault detected by the TDR feature. The distance is + * measured in centimeters and ranges from 0 to 3100 centimeters. If the + * distance is not available (0x3f), the function returns -ERANGE. + * + * Return: The distance to the main fault in centimeters, or -ERANGE if the + * resolution is not possible. + */ +int oa_1000bt1_get_tdr_distance(u16 reg_value) +{ + u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value); + + if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE) + return -ERANGE; + + return dist_val * 100; +} +EXPORT_SYMBOL_GPL(oa_1000bt1_get_tdr_distance); diff --git a/drivers/net/phy/open_alliance_helpers.h b/drivers/net/phy/open_alliance_helpers.h new file mode 100644 index 00000000000000..8b7d97bc6f1861 --- /dev/null +++ b/drivers/net/phy/open_alliance_helpers.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef OPEN_ALLIANCE_HELPERS_H +#define OPEN_ALLIANCE_HELPERS_H + +/* + * These defines reflect the TDR (Time Delay Reflection) diagnostic feature + * for 1000BASE-T1 automotive Ethernet PHYs as specified by the OPEN Alliance. + * + * The register values are part of the HDD.TDR register, which provides + * information about the cable status and faults. The exact register offset + * is device-specific and should be provided by the driver. + */ +#define OA_1000BT1_HDD_TDR_ACTIVATION_MASK GENMASK(1, 0) +#define OA_1000BT1_HDD_TDR_ACTIVATION_OFF 1 +#define OA_1000BT1_HDD_TDR_ACTIVATION_ON 2 + +#define OA_1000BT1_HDD_TDR_STATUS_MASK GENMASK(7, 4) +#define OA_1000BT1_HDD_TDR_STATUS_SHORT 3 +#define OA_1000BT1_HDD_TDR_STATUS_OPEN 6 +#define OA_1000BT1_HDD_TDR_STATUS_NOISE 5 +#define OA_1000BT1_HDD_TDR_STATUS_CABLE_OK 7 +#define OA_1000BT1_HDD_TDR_STATUS_TEST_IN_PROGRESS 8 +#define OA_1000BT1_HDD_TDR_STATUS_TEST_NOT_POSSIBLE 13 + +/* + * OA_1000BT1_HDD_TDR_DISTANCE_MASK: + * This mask is used to extract the distance to the first/main fault + * detected by the TDR feature. Each bit represents an approximate distance + * of 1 meter, ranging from 0 to 31 meters. The exact interpretation of the + * bits may vary, but generally: + * 000000 = no error + * 000001 = error about 0-1m away + * 000010 = error between 1-2m away + * ... + * 011111 = error about 30-31m away + * 111111 = resolution not possible / out of distance + */ +#define OA_1000BT1_HDD_TDR_DISTANCE_MASK GENMASK(13, 8) +#define OA_1000BT1_HDD_TDR_DISTANCE_NO_ERROR 0 +#define OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE 0x3f + +int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value); +int oa_1000bt1_get_tdr_distance(u16 reg_value); + +#endif /* OPEN_ALLIANCE_HELPERS_H */ + diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 785182fa5fe01f..4f3e742907cb62 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -342,14 +342,19 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - mii_data->val_out = mdiobus_c45_read( - phydev->mdio.bus, prtad, devad, - mii_data->reg_num); + ret = mdiobus_c45_read(phydev->mdio.bus, prtad, devad, + mii_data->reg_num); + } else { - mii_data->val_out = mdiobus_read( - phydev->mdio.bus, mii_data->phy_id, - mii_data->reg_num); + ret = mdiobus_read(phydev->mdio.bus, mii_data->phy_id, + mii_data->reg_num); } + + if (ret < 0) + return ret; + + mii_data->val_out = ret; + return 0; case SIOCSMIIREG: @@ -1089,7 +1094,10 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) return -EINVAL; - if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising)) + if (autoneg == AUTONEG_ENABLE && + (linkmode_empty(advertising) || + !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported))) return -EINVAL; if (autoneg == AUTONEG_DISABLE && diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6bb2793de0a94a..560e338b307a40 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -279,6 +280,15 @@ static struct phy_driver genphy_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); +static bool phy_drv_wol_enabled(struct phy_device *phydev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + + phy_ethtool_get_wol(phydev, &wol); + + return wol.wolopts != 0; +} + static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->mdio.dev.driver; @@ -288,6 +298,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) if (!drv || !phydrv->suspend) return false; + /* If the PHY on the mido bus is not attached but has WOL enabled + * we cannot suspend the PHY. + */ + if (!netdev && phy_drv_wol_enabled(phydev)) + return false; + /* PHY not attached? May suspend if the PHY has not already been * suspended as part of a prior call to phy_disconnect() -> * phy_detach() -> phy_suspend() because the parent netdev might be the @@ -1369,6 +1385,48 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(phy_standalone); +/** + * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY + * @upstream: pointer to the upstream phy device + * @phy: pointer to the SFP module's phy device + * + * This helper allows keeping track of PHY devices on the link. It adds the + * SFP module's phy to the phy namespace of the upstream phy + * + * Return: 0 on success, otherwise a negative error code. + */ +int phy_sfp_connect_phy(void *upstream, struct phy_device *phy) +{ + struct phy_device *phydev = upstream; + struct net_device *dev = phydev->attached_dev; + + if (dev) + return phy_link_topo_add_phy(dev, phy, PHY_UPSTREAM_PHY, phydev); + + return 0; +} +EXPORT_SYMBOL(phy_sfp_connect_phy); + +/** + * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY + * @upstream: pointer to the upstream phy device + * @phy: pointer to the SFP module's phy device + * + * This helper allows keeping track of PHY devices on the link. It removes the + * SFP module's phy to the phy namespace of the upstream phy. As the module phy + * will be destroyed, re-inserting the same module will add a new phy with a + * new index. + */ +void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy) +{ + struct phy_device *phydev = upstream; + struct net_device *dev = phydev->attached_dev; + + if (dev) + phy_link_topo_del_phy(dev, phy); +} +EXPORT_SYMBOL(phy_sfp_disconnect_phy); + /** * phy_sfp_attach - attach the SFP bus to the PHY upstream network device * @upstream: pointer to the phy device @@ -1511,6 +1569,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (phydev->sfp_bus_attached) dev->sfp_bus = phydev->sfp_bus; + + err = phy_link_topo_add_phy(dev, phydev, PHY_UPSTREAM_MAC, dev); + if (err) + goto error; } /* Some Ethernet drivers try to connect to a PHY device before @@ -1938,6 +2000,7 @@ void phy_detach(struct phy_device *phydev) if (dev) { phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; + phy_link_topo_del_phy(dev, phydev); } phydev->phylink = NULL; @@ -1975,7 +2038,6 @@ EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; struct net_device *netdev = phydev->attached_dev; const struct phy_driver *phydrv = phydev->drv; int ret; @@ -1983,8 +2045,7 @@ int phy_suspend(struct phy_device *phydev) if (phydev->suspended || !phydrv) return 0; - phy_ethtool_get_wol(phydev, &wol); - phydev->wol_enabled = wol.wolopts || + phydev->wol_enabled = phy_drv_wol_enabled(phydev) || (netdev && netdev->ethtool->wol_enabled); /* If the device has WOL enabled, we cannot suspend the PHY */ if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND)) @@ -2095,22 +2156,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable); /** * genphy_config_advert - sanitize and advertise auto-negotiation parameters * @phydev: target phy_device struct + * @advert: auto-negotiation parameters to advertise * * Description: Writes MII_ADVERTISE with the appropriate values, * after sanitizing the values to make sure we only advertise * what is supported. Returns < 0 on error, 0 if the PHY's advertisement * hasn't changed, and > 0 if it has changed. */ -static int genphy_config_advert(struct phy_device *phydev) +static int genphy_config_advert(struct phy_device *phydev, + const unsigned long *advert) { int err, bmsr, changed = 0; u32 adv; - /* Only allow advertising what this PHY supports */ - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); - - adv = linkmode_adv_to_mii_adv_t(phydev->advertising); + adv = linkmode_adv_to_mii_adv_t(advert); /* Setup standard advertisement */ err = phy_modify_changed(phydev, MII_ADVERTISE, @@ -2133,7 +2192,7 @@ static int genphy_config_advert(struct phy_device *phydev) if (!(bmsr & BMSR_ESTATEN)) return changed; - adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + adv = linkmode_adv_to_mii_ctrl1000_t(advert); err = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL | ADVERTISE_1000HALF, @@ -2357,6 +2416,9 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg); */ int __genphy_config_aneg(struct phy_device *phydev, bool changed) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert); + const struct phy_setting *set; + unsigned long *advert; int err; err = genphy_c45_an_config_eee_aneg(phydev); @@ -2371,10 +2433,25 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed) else if (err) changed = true; - if (AUTONEG_ENABLE != phydev->autoneg) + if (phydev->autoneg == AUTONEG_ENABLE) { + /* Only allow advertising what this PHY supports */ + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + advert = phydev->advertising; + } else if (phydev->speed < SPEED_1000) { return genphy_setup_forced(phydev); + } else { + linkmode_zero(fixed_advert); + + set = phy_lookup_setting(phydev->speed, phydev->duplex, + phydev->supported, true); + if (set) + linkmode_set_bit(set->bit, fixed_advert); + + advert = fixed_advert; + } - err = genphy_config_advert(phydev); + err = genphy_config_advert(phydev, advert); if (err < 0) /* error */ return err; else if (err) @@ -3330,7 +3407,7 @@ static int of_phy_led(struct phy_device *phydev, static int of_phy_leds(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; - struct device_node *leds, *led; + struct device_node *leds; int err; if (!IS_ENABLED(CONFIG_OF_MDIO)) @@ -3343,10 +3420,9 @@ static int of_phy_leds(struct phy_device *phydev) if (!leds) return 0; - for_each_available_child_of_node(leds, led) { + for_each_available_child_of_node_scoped(leds, led) { err = of_phy_led(phydev, led); if (err) { - of_node_put(led); of_node_put(leds); phy_leds_unregister(phydev); return err; diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c new file mode 100644 index 00000000000000..4a5d73002a1a85 --- /dev/null +++ b/drivers/net/phy/phy_link_topology.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Infrastructure to handle all PHY devices connected to a given netdev, + * either directly or indirectly attached. + * + * Copyright (c) 2023 Maxime Chevallier + */ + +#include +#include +#include +#include + +static int netdev_alloc_phy_link_topology(struct net_device *dev) +{ + struct phy_link_topology *topo; + + topo = kzalloc(sizeof(*topo), GFP_KERNEL); + if (!topo) + return -ENOMEM; + + xa_init_flags(&topo->phys, XA_FLAGS_ALLOC1); + topo->next_phy_index = 1; + + dev->link_topo = topo; + + return 0; +} + +int phy_link_topo_add_phy(struct net_device *dev, + struct phy_device *phy, + enum phy_upstream upt, void *upstream) +{ + struct phy_link_topology *topo = dev->link_topo; + struct phy_device_node *pdn; + int ret; + + if (!topo) { + ret = netdev_alloc_phy_link_topology(dev); + if (ret) + return ret; + + topo = dev->link_topo; + } + + pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); + if (!pdn) + return -ENOMEM; + + pdn->phy = phy; + switch (upt) { + case PHY_UPSTREAM_MAC: + pdn->upstream.netdev = (struct net_device *)upstream; + if (phy_on_sfp(phy)) + pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus; + break; + case PHY_UPSTREAM_PHY: + pdn->upstream.phydev = (struct phy_device *)upstream; + if (phy_on_sfp(phy)) + pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus; + break; + default: + ret = -EINVAL; + goto err; + } + pdn->upstream_type = upt; + + /* Attempt to re-use a previously allocated phy_index */ + if (phy->phyindex) + ret = xa_insert(&topo->phys, phy->phyindex, pdn, GFP_KERNEL); + else + ret = xa_alloc_cyclic(&topo->phys, &phy->phyindex, pdn, + xa_limit_32b, &topo->next_phy_index, + GFP_KERNEL); + + if (ret) + goto err; + + return 0; + +err: + kfree(pdn); + return ret; +} +EXPORT_SYMBOL_GPL(phy_link_topo_add_phy); + +void phy_link_topo_del_phy(struct net_device *dev, + struct phy_device *phy) +{ + struct phy_link_topology *topo = dev->link_topo; + struct phy_device_node *pdn; + + if (!topo) + return; + + pdn = xa_erase(&topo->phys, phy->phyindex); + + /* We delete the PHY from the topology, however we don't re-set the + * phy->phyindex field. If the PHY isn't gone, we can re-assign it the + * same index next time it's added back to the topology + */ + + kfree(pdn); +} +EXPORT_SYMBOL_GPL(phy_link_topo_del_phy); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 51c526d227fab3..4309317de3d143 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1635,6 +1635,48 @@ static int phylink_register_sfp(struct phylink *pl, return ret; } +/** + * phylink_set_fixed_link() - set the fixed link + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @state: a pointer to a struct phylink_link_state. + * + * This function is used when the link parameters are known and do not change, + * making it suitable for certain types of network connections. + * + * Returns: zero on success or negative error code. + */ +int phylink_set_fixed_link(struct phylink *pl, + const struct phylink_link_state *state) +{ + const struct phy_setting *s; + unsigned long *adv; + + if (pl->cfg_link_an_mode != MLO_AN_PHY || !state || + !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) + return -EINVAL; + + s = phy_lookup_setting(state->speed, state->duplex, + pl->supported, true); + if (!s) + return -EINVAL; + + adv = pl->link_config.advertising; + linkmode_zero(adv); + linkmode_set_bit(s->bit, adv); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv); + + pl->link_config.speed = state->speed; + pl->link_config.duplex = state->duplex; + pl->link_config.link = 1; + pl->link_config.an_complete = 1; + + pl->cfg_link_an_mode = MLO_AN_FIXED; + pl->cur_link_an_mode = pl->cfg_link_an_mode; + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_set_fixed_link); + /** * phylink_create() - create a phylink instance * @config: a pointer to the target &struct phylink_config @@ -3423,7 +3465,8 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) return ret; } -static void phylink_sfp_disconnect_phy(void *upstream) +static void phylink_sfp_disconnect_phy(void *upstream, + struct phy_device *phydev) { phylink_disconnect_phy(upstream); } diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index c8f83e5f78ab9c..105602581a0336 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -770,6 +770,8 @@ static const struct sfp_upstream_ops at8031_sfp_ops = { .attach = phy_sfp_attach, .detach = phy_sfp_detach, .module_insert = at8031_sfp_insert, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int at8031_parse_dt(struct phy_device *phydev) diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index 672c6929119a62..bd8a51ec0ecd6a 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -699,6 +699,8 @@ static const struct sfp_upstream_ops qca807x_sfp_ops = { .detach = phy_sfp_detach, .module_insert = qca807x_sfp_insert, .module_remove = qca807x_sfp_remove, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int qca807x_probe(struct phy_device *phydev) @@ -733,16 +735,6 @@ static int qca807x_probe(struct phy_device *phydev) "qcom,dac-disable-bias-current-tweak"); #if IS_ENABLED(CONFIG_GPIOLIB) - /* Make sure we don't have mixed leds node and gpio-controller - * to prevent registering leds and having gpio-controller usage - * conflicting with them. - */ - if (of_find_property(node, "leds", NULL) && - of_find_property(node, "gpio-controller", NULL)) { - phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive."); - return -EINVAL; - } - /* Do not register a GPIO controller unless flagged for it */ if (of_property_read_bool(node, "gpio-controller")) { ret = qca807x_gpio(phydev); diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c index 5d083ef0250eaa..a05d0df6fa16b8 100644 --- a/drivers/net/phy/qcom/qca83xx.c +++ b/drivers/net/phy/qcom/qca83xx.c @@ -15,7 +15,6 @@ #define QCA8327_A_PHY_ID 0x004dd033 #define QCA8327_B_PHY_ID 0x004dd034 #define QCA8337_PHY_ID 0x004dd036 -#define QCA8K_PHY_ID_MASK 0xffffffff #define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0) @@ -216,8 +215,7 @@ static int qca8327_suspend(struct phy_device *phydev) static struct phy_driver qca83xx_driver[] = { { /* QCA8337 */ - .phy_id = QCA8337_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, + PHY_ID_MATCH_EXACT(QCA8337_PHY_ID), .name = "Qualcomm Atheros 8337 internal PHY", /* PHY_GBIT_FEATURES */ .probe = qca83xx_probe, @@ -231,8 +229,7 @@ static struct phy_driver qca83xx_driver[] = { .resume = qca83xx_resume, }, { /* QCA8327-A from switch QCA8327-AL1A */ - .phy_id = QCA8327_A_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, + PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID), .name = "Qualcomm Atheros 8327-A internal PHY", /* PHY_GBIT_FEATURES */ .link_change_notify = qca83xx_link_change_notify, @@ -247,8 +244,7 @@ static struct phy_driver qca83xx_driver[] = { .resume = qca83xx_resume, }, { /* QCA8327-B from switch QCA8327-BL1A */ - .phy_id = QCA8327_B_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, + PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID), .name = "Qualcomm Atheros 8327-B internal PHY", /* PHY_GBIT_FEATURES */ .link_change_notify = qca83xx_link_change_notify, diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs new file mode 100644 index 00000000000000..1ab065798175b4 --- /dev/null +++ b/drivers/net/phy/qt2025.rs @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) Tehuti Networks Ltd. +// Copyright (C) 2024 FUJITA Tomonori + +//! Applied Micro Circuits Corporation QT2025 PHY driver +//! +//! This driver is based on the vendor driver `QT2025_phy.c`. This source +//! and firmware can be downloaded on the EN-9320SFP+ support site. +//! +//! The QT2025 PHY integrates an Intel 8051 micro-controller. + +use kernel::c_str; +use kernel::error::code; +use kernel::firmware::Firmware; +use kernel::net::phy::{ + self, + reg::{Mmd, C45}, + Driver, +}; +use kernel::prelude::*; +use kernel::sizes::{SZ_16K, SZ_8K}; + +kernel::module_phy_driver! { + drivers: [PhyQT2025], + device_table: [ + phy::DeviceId::new_with_driver::(), + ], + name: "qt2025_phy", + author: "FUJITA Tomonori ", + description: "AMCC QT2025 PHY driver", + license: "GPL", + firmware: ["qt2025-2.0.3.3.fw"], +} + +struct PhyQT2025; + +#[vtable] +impl Driver for PhyQT2025 { + const NAME: &'static CStr = c_str!("QT2025 10Gpbs SFP+"); + const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x0043a400); + + fn probe(dev: &mut phy::Device) -> Result<()> { + // Check the hardware revision code. + // Only 0x3b works with this driver and firmware. + let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?; + if (hw_rev >> 8) != 0xb3 { + return Err(code::ENODEV); + } + + // `MICRO_RESETN`: hold the micro-controller in reset while configuring. + dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0000)?; + // `SREFCLK_FREQ`: configure clock frequency of the micro-controller. + dev.write(C45::new(Mmd::PMAPMD, 0xc302), 0x0004)?; + // Non loopback mode. + dev.write(C45::new(Mmd::PMAPMD, 0xc319), 0x0038)?; + // `CUS_LAN_WAN_CONFIG`: select between LAN and WAN (WIS) mode. + dev.write(C45::new(Mmd::PMAPMD, 0xc31a), 0x0098)?; + // The following writes use standardized registers (3.38 through + // 3.41 5/10/25GBASE-R PCS test pattern seed B) for something else. + // We don't know what. + dev.write(C45::new(Mmd::PCS, 0x0026), 0x0e00)?; + dev.write(C45::new(Mmd::PCS, 0x0027), 0x0893)?; + dev.write(C45::new(Mmd::PCS, 0x0028), 0xa528)?; + dev.write(C45::new(Mmd::PCS, 0x0029), 0x0003)?; + // Configure transmit and recovered clock. + dev.write(C45::new(Mmd::PMAPMD, 0xa30a), 0x06e1)?; + // `MICRO_RESETN`: release the micro-controller from the reset state. + dev.write(C45::new(Mmd::PMAPMD, 0xc300), 0x0002)?; + // The micro-controller will start running from the boot ROM. + dev.write(C45::new(Mmd::PCS, 0xe854), 0x00c0)?; + + let fw = Firmware::request(c_str!("qt2025-2.0.3.3.fw"), dev.as_ref())?; + if fw.data().len() > SZ_16K + SZ_8K { + return Err(code::EFBIG); + } + + // The 24kB of program memory space is accessible by MDIO. + // The first 16kB of memory is located in the address range 3.8000h - 3.BFFFh. + // The next 8kB of memory is located at 4.8000h - 4.9FFFh. + let mut dst_offset = 0; + let mut dst_mmd = Mmd::PCS; + for (src_idx, val) in fw.data().iter().enumerate() { + if src_idx == SZ_16K { + // Start writing to the next register with no offset + dst_offset = 0; + dst_mmd = Mmd::PHYXS; + } + + dev.write(C45::new(dst_mmd, 0x8000 + dst_offset), (*val).into())?; + + dst_offset += 1; + } + // The micro-controller will start running from SRAM. + dev.write(C45::new(Mmd::PCS, 0xe854), 0x0040)?; + + // TODO: sleep here until the hw becomes ready. + Ok(()) + } + + fn read_status(dev: &mut phy::Device) -> Result { + dev.genphy_read_status::() + } +} diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 25e5bfbb6f89b8..c15d2f66ef0dc2 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -527,6 +527,9 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index, { int val; + if (index >= RTL8211F_LED_COUNT) + return -EINVAL; + val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR); if (val < 0) return val; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 2f44fc51848f44..f13c00b5b449cf 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -487,7 +487,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) bus->socket_ops->stop(bus->sfp); bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) - ops->disconnect_phy(bus->upstream); + ops->disconnect_phy(bus->upstream, bus->phydev); } bus->registered = false; } @@ -722,6 +722,28 @@ void sfp_bus_del_upstream(struct sfp_bus *bus) } EXPORT_SYMBOL_GPL(sfp_bus_del_upstream); +/** + * sfp_get_name() - Get the SFP device name + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * + * Gets the SFP device's name, if @bus has a registered socket. Callers must + * hold RTNL, and the returned name is only valid until RTNL is released. + * + * Returns: + * - The name of the SFP device registered with sfp_register_socket() + * - %NULL if no device was registered on @bus + */ +const char *sfp_get_name(struct sfp_bus *bus) +{ + ASSERT_RTNL(); + + if (bus->sfp_dev) + return dev_name(bus->sfp_dev); + + return NULL; +} +EXPORT_SYMBOL_GPL(sfp_get_name); + /* Socket driver entry points */ int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev) { @@ -743,7 +765,7 @@ void sfp_remove_phy(struct sfp_bus *bus) const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); if (ops && ops->disconnect_phy) - ops->disconnect_phy(bus->upstream); + ops->disconnect_phy(bus->upstream, bus->phydev); bus->phydev = NULL; } EXPORT_SYMBOL_GPL(sfp_remove_phy); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 3b5fcaf0dd36db..2377179de017b5 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -10,8 +10,10 @@ #include #include #include +#include /* Vitesse Extended Page Magic Register(s) */ +#define MII_VSC73XX_EXT_PAGE_1E 0x01 #define MII_VSC82X4_EXT_PAGE_16E 0x10 #define MII_VSC82X4_EXT_PAGE_17E 0x11 #define MII_VSC82X4_EXT_PAGE_18E 0x12 @@ -60,6 +62,28 @@ /* Vitesse Extended Page Access Register */ #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f +/* Vitesse VSC73XX Extended Control Register */ +#define MII_VSC73XX_PHY_CTRL_EXT3 0x14 + +#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN BIT(4) +#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT GENMASK(3, 2) +#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_STA BIT(1) +#define MII_VSC73XX_DOWNSHIFT_MAX 5 +#define MII_VSC73XX_DOWNSHIFT_INVAL 1 + +/* VSC73XX PHY_BYPASS_CTRL register*/ +#define MII_VSC73XX_PHY_BYPASS_CTRL MII_DCOUNTER +#define MII_VSC73XX_PBC_TX_DIS BIT(15) +#define MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS BIT(7) +#define MII_VSC73XX_PBC_PAIR_SWAP_DIS BIT(5) +#define MII_VSC73XX_PBC_POL_INV_DIS BIT(4) +#define MII_VSC73XX_PBC_PARALLEL_DET_DIS BIT(3) +#define MII_VSC73XX_PBC_AUTO_NP_EXCHANGE_DIS BIT(1) + +/* VSC73XX PHY_AUX_CTRL_STAT register */ +#define MII_VSC73XX_PHY_AUX_CTRL_STAT MII_NCONFIG +#define MII_VSC73XX_PACS_NO_MDI_X_IND BIT(13) + /* Vitesse VSC8601 Extended PHY Control Register 1 */ #define MII_VSC8601_EPHY_CTL 0x17 #define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8) @@ -128,6 +152,74 @@ static int vsc73xx_write_page(struct phy_device *phydev, int page) return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page); } +static int vsc73xx_get_downshift(struct phy_device *phydev, u8 *data) +{ + int val, enable, cnt; + + val = phy_read_paged(phydev, MII_VSC73XX_EXT_PAGE_1E, + MII_VSC73XX_PHY_CTRL_EXT3); + if (val < 0) + return val; + + enable = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN, val); + cnt = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT, val) + 2; + + *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE; + + return 0; +} + +static int vsc73xx_set_downshift(struct phy_device *phydev, u8 cnt) +{ + u16 mask, val; + int ret; + + if (cnt > MII_VSC73XX_DOWNSHIFT_MAX) + return -E2BIG; + else if (cnt == MII_VSC73XX_DOWNSHIFT_INVAL) + return -EINVAL; + + mask = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN; + + if (!cnt) { + val = 0; + } else { + mask |= MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT; + val = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN | + FIELD_PREP(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT, + cnt - 2); + } + + ret = phy_modify_paged(phydev, MII_VSC73XX_EXT_PAGE_1E, + MII_VSC73XX_PHY_CTRL_EXT3, mask, val); + if (ret < 0) + return ret; + + return genphy_soft_reset(phydev); +} + +static int vsc73xx_get_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return vsc73xx_get_downshift(phydev, data); + default: + return -EOPNOTSUPP; + } +} + +static int vsc73xx_set_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, const void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return vsc73xx_set_downshift(phydev, *(const u8 *)data); + default: + return -EOPNOTSUPP; + } +} + static void vsc73xx_config_init(struct phy_device *phydev) { /* Receiver init */ @@ -137,6 +229,12 @@ static void vsc73xx_config_init(struct phy_device *phydev) /* Config LEDs 0x61 */ phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061); + + /* Enable downshift by default */ + vsc73xx_set_downshift(phydev, MII_VSC73XX_DOWNSHIFT_MAX); + + /* Set Auto MDI-X by default */ + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; } static int vsc738x_config_init(struct phy_device *phydev) @@ -237,6 +335,75 @@ static int vsc739x_config_init(struct phy_device *phydev) return 0; } +static int vsc73xx_mdix_set(struct phy_device *phydev, u8 mdix) +{ + int ret; + u16 val; + + val = phy_read(phydev, MII_VSC73XX_PHY_BYPASS_CTRL); + + switch (mdix) { + case ETH_TP_MDI: + val |= MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS | + MII_VSC73XX_PBC_PAIR_SWAP_DIS | + MII_VSC73XX_PBC_POL_INV_DIS; + break; + case ETH_TP_MDI_X: + /* When MDI-X auto configuration is disabled, is possible + * to force only MDI mode. Let's use autoconfig for forced + * MDIX mode. + */ + case ETH_TP_MDI_AUTO: + val &= ~(MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS | + MII_VSC73XX_PBC_PAIR_SWAP_DIS | + MII_VSC73XX_PBC_POL_INV_DIS); + break; + default: + return -EINVAL; + } + + ret = phy_write(phydev, MII_VSC73XX_PHY_BYPASS_CTRL, val); + if (ret) + return ret; + + return genphy_restart_aneg(phydev); +} + +static int vsc73xx_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = vsc73xx_mdix_set(phydev, phydev->mdix_ctrl); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + +static int vsc73xx_mdix_get(struct phy_device *phydev, u8 *mdix) +{ + u16 reg_val; + + reg_val = phy_read(phydev, MII_VSC73XX_PHY_AUX_CTRL_STAT); + if (reg_val & MII_VSC73XX_PACS_NO_MDI_X_IND) + *mdix = ETH_TP_MDI; + else + *mdix = ETH_TP_MDI_X; + + return 0; +} + +static int vsc73xx_read_status(struct phy_device *phydev) +{ + int ret; + + ret = vsc73xx_mdix_get(phydev, &phydev->mdix); + if (ret < 0) + return ret; + + return genphy_read_status(phydev); +} + /* This adds a skew for both TX and RX clocks, so the skew should only be * applied to "rgmii-id" interfaces. It may not work as expected * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. @@ -434,32 +601,48 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_status = vsc73xx_read_status, .read_page = vsc73xx_read_page, .write_page = vsc73xx_write_page, + .get_tunable = vsc73xx_get_tunable, + .set_tunable = vsc73xx_set_tunable, }, { .phy_id = PHY_ID_VSC7388, .name = "Vitesse VSC7388", .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_status = vsc73xx_read_status, .read_page = vsc73xx_read_page, .write_page = vsc73xx_write_page, + .get_tunable = vsc73xx_get_tunable, + .set_tunable = vsc73xx_set_tunable, }, { .phy_id = PHY_ID_VSC7395, .name = "Vitesse VSC7395", .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_status = vsc73xx_read_status, .read_page = vsc73xx_read_page, .write_page = vsc73xx_write_page, + .get_tunable = vsc73xx_get_tunable, + .set_tunable = vsc73xx_set_tunable, }, { .phy_id = PHY_ID_VSC7398, .name = "Vitesse VSC7398", .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_status = vsc73xx_read_status, .read_page = vsc73xx_read_page, .write_page = vsc73xx_write_page, + .get_tunable = vsc73xx_get_tunable, + .set_tunable = vsc73xx_set_tunable, }, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index c33c3db3cc0896..a940b9a67107a9 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index 4d2ff63f2ee2f6..d93aeacc0dba9a 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -16,7 +16,7 @@ #include #include -#include +#include /* * State for a Deflate (de)compressor. diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index eb9acfcaeb0974..4583e15ad03a0b 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #include #include @@ -1631,7 +1631,7 @@ static void ppp_setup(struct net_device *dev) dev->netdev_ops = &ppp_netdev_ops; SET_NETDEV_DEVTYPE(dev, &ppp_type); - dev->features |= NETIF_F_LLTX; + dev->lltx = true; dev->hard_header_len = PPP_HDRLEN; dev->mtu = PPP_MRU; @@ -2269,7 +2269,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb) if (!pchb) goto out_rcu; - spin_lock(&pchb->downl); + spin_lock_bh(&pchb->downl); if (!pchb->chan) { /* channel got unregistered */ kfree_skb(skb); @@ -2281,7 +2281,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb) kfree_skb(skb); outl: - spin_unlock(&pchb->downl); + spin_unlock_bh(&pchb->downl); out_rcu: rcu_read_unlock(); diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 208f6e24f37c49..bcc1eaedf58fb5 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -56,7 +56,7 @@ #include #include #include -#include +#include #include "ppp_mppe.h" diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 45bf59ac8f5711..644e99fc3623f5 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #define PPP_VERSION "2.4.2" diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 2ea75686a31907..5c4e88be46ee33 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -737,6 +738,7 @@ static int tps23881_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tps23881_priv *priv; + struct gpio_desc *reset; int ret; u8 val; @@ -749,6 +751,25 @@ static int tps23881_i2c_probe(struct i2c_client *client) if (!priv) return -ENOMEM; + reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(reset)) + return dev_err_probe(&client->dev, PTR_ERR(reset), "Failed to get reset GPIO\n"); + + if (reset) { + /* TPS23880 datasheet (Rev G) indicates minimum reset pulse is 5us */ + usleep_range(5, 10); + gpiod_set_value_cansleep(reset, 0); /* De-assert reset */ + + /* TPS23880 datasheet indicates the minimum time after power on reset + * should be 20ms, but the document describing how to load SRAM ("How + * to Load TPS2388x SRAM and Parity Code over I2C" (Rev E)) + * indicates we should delay that programming by at least 50ms. So + * we'll wait the entire 50ms here to ensure we're safe to go to the + * SRAM loading proceedure. + */ + msleep(50); + } + ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID); if (ret < 0) return ret; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 4eececc945138b..318a0ef1af50da 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -515,7 +515,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) /* MTU range: 68 - 4082 */ ndev->min_mtu = ETH_MIN_MTU; ndev->max_mtu = RIONET_MAX_MTU; - ndev->features = NETIF_F_LLTX; + ndev->lltx = true; SET_NETDEV_DEV(ndev, &mport->dev); ndev->ethtool_ops = &rionet_ethtool_ops; diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 18df7ca6619814..252cd757d3a2e5 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -77,7 +77,7 @@ #include #include #include -#include +#include static unsigned char *encode(unsigned char *cp, unsigned short n); static long decode(unsigned char **cpp); diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index d591e33268e5f0..55aa8d0c8e1f27 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -893,7 +893,7 @@ static const struct mii_phy_ops bcm5201_phy_ops = { .read_link = genmii_read_link, }; -static struct mii_phy_def bcm5201_phy_def = { +static const struct mii_phy_def bcm5201_phy_def = { .phy_id = 0x00406210, .phy_id_mask = 0xfffffff0, .name = "BCM5201", @@ -912,7 +912,7 @@ static const struct mii_phy_ops bcm5221_phy_ops = { .read_link = genmii_read_link, }; -static struct mii_phy_def bcm5221_phy_def = { +static const struct mii_phy_def bcm5221_phy_def = { .phy_id = 0x004061e0, .phy_id_mask = 0xfffffff0, .name = "BCM5221", @@ -930,7 +930,8 @@ static const struct mii_phy_ops bcm5241_phy_ops = { .poll_link = genmii_poll_link, .read_link = genmii_read_link, }; -static struct mii_phy_def bcm5241_phy_def = { + +static const struct mii_phy_def bcm5241_phy_def = { .phy_id = 0x0143bc30, .phy_id_mask = 0xfffffff0, .name = "BCM5241", @@ -949,7 +950,7 @@ static const struct mii_phy_ops bcm5400_phy_ops = { .read_link = bcm54xx_read_link, }; -static struct mii_phy_def bcm5400_phy_def = { +static const struct mii_phy_def bcm5400_phy_def = { .phy_id = 0x00206040, .phy_id_mask = 0xfffffff0, .name = "BCM5400", @@ -968,7 +969,7 @@ static const struct mii_phy_ops bcm5401_phy_ops = { .read_link = bcm54xx_read_link, }; -static struct mii_phy_def bcm5401_phy_def = { +static const struct mii_phy_def bcm5401_phy_def = { .phy_id = 0x00206050, .phy_id_mask = 0xfffffff0, .name = "BCM5401", @@ -987,7 +988,7 @@ static const struct mii_phy_ops bcm5411_phy_ops = { .read_link = bcm54xx_read_link, }; -static struct mii_phy_def bcm5411_phy_def = { +static const struct mii_phy_def bcm5411_phy_def = { .phy_id = 0x00206070, .phy_id_mask = 0xfffffff0, .name = "BCM5411", @@ -1007,7 +1008,7 @@ static const struct mii_phy_ops bcm5421_phy_ops = { .enable_fiber = bcm5421_enable_fiber, }; -static struct mii_phy_def bcm5421_phy_def = { +static const struct mii_phy_def bcm5421_phy_def = { .phy_id = 0x002060e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421", @@ -1026,7 +1027,7 @@ static const struct mii_phy_ops bcm5421k2_phy_ops = { .read_link = bcm54xx_read_link, }; -static struct mii_phy_def bcm5421k2_phy_def = { +static const struct mii_phy_def bcm5421k2_phy_def = { .phy_id = 0x002062e0, .phy_id_mask = 0xfffffff0, .name = "BCM5421-K2", @@ -1045,7 +1046,7 @@ static const struct mii_phy_ops bcm5461_phy_ops = { .enable_fiber = bcm5461_enable_fiber, }; -static struct mii_phy_def bcm5461_phy_def = { +static const struct mii_phy_def bcm5461_phy_def = { .phy_id = 0x002060c0, .phy_id_mask = 0xfffffff0, .name = "BCM5461", @@ -1064,7 +1065,7 @@ static const struct mii_phy_ops bcm5462V_phy_ops = { .read_link = bcm54xx_read_link, }; -static struct mii_phy_def bcm5462V_phy_def = { +static const struct mii_phy_def bcm5462V_phy_def = { .phy_id = 0x002060d0, .phy_id_mask = 0xfffffff0, .name = "BCM5462-Vesta", @@ -1094,7 +1095,7 @@ static const struct mii_phy_ops marvell88e1111_phy_ops = { /* two revs in darwin for the 88e1101 ... I could use a datasheet * to get the proper names... */ -static struct mii_phy_def marvell88e1101v1_phy_def = { +static const struct mii_phy_def marvell88e1101v1_phy_def = { .phy_id = 0x01410c20, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v1", @@ -1102,7 +1103,8 @@ static struct mii_phy_def marvell88e1101v1_phy_def = { .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; -static struct mii_phy_def marvell88e1101v2_phy_def = { + +static const struct mii_phy_def marvell88e1101v2_phy_def = { .phy_id = 0x01410c60, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1101v2", @@ -1110,7 +1112,8 @@ static struct mii_phy_def marvell88e1101v2_phy_def = { .magic_aneg = 1, .ops = &marvell88e1101_phy_ops }; -static struct mii_phy_def marvell88e1111_phy_def = { + +static const struct mii_phy_def marvell88e1111_phy_def = { .phy_id = 0x01410cc0, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1111", @@ -1127,7 +1130,7 @@ static const struct mii_phy_ops generic_phy_ops = { .read_link = genmii_read_link }; -static struct mii_phy_def genmii_phy_def = { +static const struct mii_phy_def genmii_phy_def = { .phy_id = 0x00000000, .phy_id_mask = 0x00000000, .name = "Generic MII", @@ -1136,7 +1139,7 @@ static struct mii_phy_def genmii_phy_def = { .ops = &generic_phy_ops }; -static struct mii_phy_def* mii_phy_table[] = { +static const struct mii_phy_def *mii_phy_table[] = { &bcm5201_phy_def, &bcm5221_phy_def, &bcm5241_phy_def, @@ -1156,9 +1159,9 @@ static struct mii_phy_def* mii_phy_table[] = { int sungem_phy_probe(struct mii_phy *phy, int mii_id) { + const struct mii_phy_def *def; int rc; u32 id; - struct mii_phy_def* def; int i; /* We do not reset the mii_phy structure as the driver diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 77574f7a3bd459..5aa41d5f7765a6 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1162,7 +1162,6 @@ static const struct file_operations tap_fops = { .read_iter = tap_read_iter, .write_iter = tap_write_iter, .poll = tap_poll, - .llseek = no_llseek, .unlocked_ioctl = tap_ioctl, .compat_ioctl = compat_ptr_ioctl, }; diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index ab1935a4aa2cd6..18191d5a8bd4d3 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -2189,12 +2189,12 @@ static void team_setup(struct net_device *dev) * Let this up to underlay drivers. */ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; - - dev->features |= NETIF_F_LLTX; - dev->features |= NETIF_F_GRO; + dev->lltx = true; /* Don't allow team devices to change network namespaces. */ - dev->features |= NETIF_F_NETNS_LOCAL; + dev->netns_local = true; + + dev->features |= NETIF_F_GRO; dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 1d06c560c5e653..9a0f6eb3201661 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -990,10 +990,11 @@ static int tun_net_init(struct net_device *dev) dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; + dev->features = dev->hw_features; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); + dev->lltx = true; tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); @@ -1129,7 +1130,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; } - /* NETIF_F_LLTX requires to do our own update of trans_start */ + /* dev->lltx requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); @@ -3451,6 +3452,12 @@ static int tun_chr_fasync(int fd, struct file *file, int on) struct tun_file *tfile = file->private_data; int ret; + if (on) { + ret = file_f_owner_allocate(file); + if (ret) + goto out; + } + if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) goto out; @@ -3536,7 +3543,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) static const struct file_operations tun_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read_iter = tun_chr_read_iter, .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 6d61052353f078..a6469235d904e7 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -418,7 +418,8 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) case USB_CDC_NOTIFY_NETWORK_CONNECTION: netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", event->wValue ? "on" : "off"); - usbnet_link_change(dev, !!event->wValue, 0); + if (netif_carrier_ok(dev->net) != !!event->wValue) + usbnet_link_change(dev, !!event->wValue, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index b0c0c9dd6a028d..5d4a1fd2b5244c 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -17,7 +17,7 @@ #include #include -#include +#include /* diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 673d3aa8379267..3d239b8d1a1bcb 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -30,7 +30,7 @@ static const char driver_name[] = "sierra_net"; #include #include #include -#include +#include #include #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 18eb5ba436df69..2506aa8c603ec0 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]); - else - netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]); + if (!usbnet_going_away(dev)) { + if (!schedule_work(&dev->kevent)) + netdev_dbg(dev->net, + "kevent %s may have been dropped\n", + usbnet_event_names[work]); + else + netdev_dbg(dev->net, + "kevent %s scheduled\n", usbnet_event_names[work]); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent); @@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + if (!usbnet_going_away(dev)) + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net) /* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; - del_timer_sync (&dev->delay); - tasklet_kill (&dev->bh); + del_timer_sync(&dev->delay); + tasklet_kill(&dev->bh); cancel_work_sync(&dev->kevent); + + /* We have cyclic dependencies. Those calls are needed + * to break a cycle. We cannot fall into the gaps because + * we have a flag + */ + tasklet_kill(&dev->bh); + del_timer_sync(&dev->delay); + cancel_work_sync(&dev->kevent); + if (!pm) usb_autopm_put_interface(dev->intf); @@ -1171,7 +1186,8 @@ usbnet_deferred_kevent (struct work_struct *work) status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } } @@ -1196,7 +1212,8 @@ usbnet_deferred_kevent (struct work_struct *work) usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } } @@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t) } else if (netif_running (dev->net) && netif_device_present (dev->net) && netif_carrier_ok(dev->net) && + !usbnet_going_away(dev) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_PAUSED, &dev->flags) && !test_bit(EVENT_RX_HALT, &dev->flags)) { @@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (!dev) return; + usbnet_mark_going_away(dev); xdev = interface_to_usbdev (intf); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 426e68a950672e..18148e068aa006 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1696,11 +1696,12 @@ static void veth_setup(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_PHONY_HEADROOM; + dev->priv_flags |= IFF_DISABLE_NETPOLL; + dev->lltx = true; dev->netdev_ops = &veth_netdev_ops; dev->xdp_metadata_ops = &veth_xdp_metadata_ops; dev->ethtool_ops = &veth_ethtool_ops; - dev->features |= NETIF_F_LLTX; dev->features |= VETH_FEATURES; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5a1c1ec5a64b8e..f8131f92a39288 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1807,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); struct sk_buff *skb; + /* We passed the address of virtnet header to virtio-core, + * so truncate the padding. + */ + buf -= VIRTNET_RX_PAD + xdp_headroom; + len -= vi->hdr_len; u64_stats_add(&stats->bytes, len); @@ -2422,8 +2427,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!buf)) return -ENOMEM; - virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, - vi->hdr_len + GOOD_PACKET_LEN); + buf += VIRTNET_RX_PAD + xdp_headroom; + + virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { @@ -2884,6 +2890,25 @@ static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) net_dim_work_cancel(dim); } +static void virtnet_update_settings(struct virtnet_info *vi) +{ + u32 speed; + u8 duplex; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) + return; + + virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); + + if (ethtool_validate_speed(speed)) + vi->speed = speed; + + virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); + + if (ethtool_validate_duplex(duplex)) + vi->duplex = duplex; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2902,6 +2927,15 @@ static int virtnet_open(struct net_device *dev) goto err_enable_qp; } + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { + if (vi->status & VIRTIO_NET_S_LINK_UP) + netif_carrier_on(vi->dev); + virtio_config_driver_enable(vi->vdev); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; + netif_carrier_on(dev); + } + return 0; err_enable_qp: @@ -3380,12 +3414,22 @@ static int virtnet_close(struct net_device *dev) disable_delayed_refill(vi); /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); + /* Prevent the config change callback from changing carrier + * after close + */ + virtio_config_driver_disable(vi->vdev); + /* Stop getting status/speed updates: we don't care until next + * open + */ + cancel_work_sync(&vi->config_work); for (i = 0; i < vi->max_queue_pairs; i++) { virtnet_disable_queue_pair(vi, i); virtnet_cancel_dim(vi, &vi->rq[i].dim); } + netif_carrier_off(dev); + return 0; } @@ -5094,25 +5138,6 @@ static void virtnet_init_settings(struct net_device *dev) vi->duplex = DUPLEX_UNKNOWN; } -static void virtnet_update_settings(struct virtnet_info *vi) -{ - u32 speed; - u8 duplex; - - if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) - return; - - virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); - - if (ethtool_validate_speed(speed)) - vi->speed = speed; - - virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); - - if (ethtool_validate_duplex(duplex)) - vi->duplex = duplex; -} - static u32 virtnet_get_rxfh_key_size(struct net_device *dev) { return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; @@ -6521,6 +6546,9 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_failover; } + /* Disable config change notification until ndo_open. */ + virtio_config_driver_disable(vi->vdev); + virtio_device_ready(vdev); virtnet_set_queues(vi, vi->curr_queue_pairs); @@ -6570,19 +6598,11 @@ static int virtnet_probe(struct virtio_device *vdev) vi->device_stats_cap = le64_to_cpu(v); } - rtnl_unlock(); - - err = virtnet_cpu_notif_add(vi); - if (err) { - pr_debug("virtio_net: registering cpu notifier failed\n"); - goto free_unregister_netdev; - } - /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - schedule_work(&vi->config_work); + virtnet_config_changed_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; virtnet_update_settings(vi); @@ -6594,6 +6614,14 @@ static int virtnet_probe(struct virtio_device *vdev) set_bit(guest_offloads[i], &vi->guest_offloads); vi->guest_offloads_capable = vi->guest_offloads; + rtnl_unlock(); + + err = virtnet_cpu_notif_add(vi); + if (err) { + pr_debug("virtio_net: registering cpu notifier failed\n"); + goto free_unregister_netdev; + } + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 040f0bb36c0ea4..4087f72f0d2be8 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -37,6 +37,7 @@ #include #include #include +#include #define DRV_NAME "vrf" #define DRV_VERSION "1.1" @@ -520,7 +521,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, /* needed to match OIF rule */ fl4.flowi4_l3mdev = vrf_dev->ifindex; fl4.flowi4_iif = LOOPBACK_IFINDEX; - fl4.flowi4_tos = RT_TOS(ip4h->tos); + fl4.flowi4_tos = ip4h->tos & INET_DSCP_MASK; fl4.flowi4_flags = FLOWI_FLAG_ANYSRC; fl4.flowi4_proto = ip4h->protocol; fl4.daddr = ip4h->daddr; @@ -607,7 +608,9 @@ static void vrf_finish_direct(struct sk_buff *skb) eth_zero_addr(eth->h_dest); eth->h_proto = skb->protocol; + rcu_read_lock_bh(); dev_queue_xmit_nit(skb, vrf_dev); + rcu_read_unlock_bh(); skb_pull(skb, ETH_HLEN); } @@ -1634,10 +1637,10 @@ static void vrf_setup(struct net_device *dev) eth_hw_addr_random(dev); /* don't acquire vrf device's netif_tx_lock when transmitting */ - dev->features |= NETIF_F_LLTX; + dev->lltx = true; /* don't allow vrf devices to change network namespaces. */ - dev->features |= NETIF_F_NETNS_LOCAL; + dev->netns_local = true; /* does not make sense for a VLAN to be added to a vrf device */ dev->features |= NETIF_F_VLAN_CHALLENGED; diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index 4c260074c091f1..53fb76d574c6e2 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -83,13 +83,13 @@ static void vsockmon_setup(struct net_device *dev) { dev->type = ARPHRD_VSOCKMON; dev->priv_flags |= IFF_NO_QUEUE; + dev->lltx = true; dev->netdev_ops = &vsockmon_ops; dev->ethtool_ops = &vsockmon_ethtool_ops; dev->needs_free_netdev = true; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | - NETIF_F_HIGHDMA | NETIF_F_LLTX; + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->flags = IFF_NOARP; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index ba59e92ab941de..53dcb9fffc04fe 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -277,8 +277,7 @@ static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); return; errout: - if (err < 0) - rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, @@ -2690,11 +2689,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *rdst, *fdst = NULL; const struct ip_tunnel_info *info; - bool did_rsc = false; struct vxlan_fdb *f; struct ethhdr *eth; __be32 vni = 0; u32 nhid = 0; + bool did_rsc; info = skb_tunnel_info(skb); @@ -3322,7 +3321,6 @@ static void vxlan_setup(struct net_device *dev) dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &vxlan_type); - dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; @@ -3332,7 +3330,9 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; netif_keep_dst(dev); - dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN; + dev->priv_flags |= IFF_NO_QUEUE; + dev->change_proto_down = true; + dev->lltx = true; /* MTU range: 68 - 65535 */ dev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 3feb36ee5bfb44..45e9b908dbfb05 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -289,7 +289,7 @@ static void wg_setup(struct net_device *dev) dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->priv_flags |= IFF_NO_QUEUE; - dev->features |= NETIF_F_LLTX; + dev->lltx = true; dev->features |= WG_NETDEV_FEATURES; dev->hw_features |= WG_NETDEV_FEATURES; dev->hw_enc_features |= WG_NETDEV_FEATURES; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index b93a64bf819018..35bfe7232e95ed 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -3,7 +3,7 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1774,7 +1774,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, if (!arvif->is_started) return -EINVAL; - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); return count; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a5da32e87106c7..646e1737d4c47c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1437,7 +1437,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar) * by indicating that radar was detected. */ ath10k_warn(ar, "failed to start CAC: %d\n", ret); - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); } } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index fe234459836497..4861179b221786 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3990,7 +3990,7 @@ static void ath10k_radar_detected(struct ath10k *ar) if (ar->dfs_block_radar_events) ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); } static void ath10k_radar_confirmation_work(struct work_struct *work) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index b655967a465bba..09c37e19a16802 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -399,6 +399,7 @@ struct ath11k_vif { u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; struct delayed_work connection_loss_work; + struct work_struct bcn_tx_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; @@ -406,11 +407,17 @@ struct ath11k_vif { bool wpaie_present; bool bcca_zero_sent; bool do_not_send_tmpl; - struct ieee80211_chanctx_conf chanctx; struct ath11k_arp_ns_offload arp_ns_offload; struct ath11k_rekey_data rekey_data; struct ath11k_reg_tpc_power_info reg_tpc_info; + + /* Must be last - ends in a flexible-array member. + * + * FIXME: Driver should not copy struct ieee80211_chanctx_conf, + * especially because it has a flexible array. Find a better way. + */ + struct ieee80211_chanctx_conf chanctx; }; struct ath11k_vif_iter { diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 2f6dd69d3be276..65d2bc0687c884 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -1305,18 +1305,6 @@ struct htt_ppdu_stats_user_rate { #define HTT_TX_INFO_PEERID(_flags) \ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags) -struct htt_tx_ppdu_stats_info { - struct htt_tlv tlv_hdr; - u32 tx_success_bytes; - u32 tx_retry_bytes; - u32 tx_failed_bytes; - u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */ - u16 tx_success_msdus; - u16 tx_retry_msdus; - u16 tx_failed_msdus; - u16 tx_duration; /* united in us */ -} __packed; - enum htt_ppdu_stats_usr_compln_status { HTT_PPDU_STATS_USER_STATUS_OK, HTT_PPDU_STATS_USER_STATUS_FILTERED, @@ -1364,17 +1352,6 @@ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status { u32 success_bytes; } __packed; -struct htt_ppdu_stats_usr_cmn_array { - struct htt_tlv tlv_hdr; - u32 num_ppdu_stats; - /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info - * elements. - * tx_ppdu_stats_info is variable length, with length = - * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info) - */ - struct htt_tx_ppdu_stats_info tx_ppdu_info[]; -} __packed; - struct htt_ppdu_user_stats { u16 peer_id; u32 tlv_flags; diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 86485580dd8953..c087d8a0f5b255 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2697,7 +2697,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); - ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; + ab->soc_stats.hal_reo_error[ring_id]++; continue; } diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7c0ef6916dd258..f8068d2e848c33 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -6599,6 +6599,16 @@ static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif) return ret; } +static void ath11k_mac_bcn_tx_work(struct work_struct *work) +{ + struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, + bcn_tx_work); + + mutex_lock(&arvif->ar->conf_mutex); + ath11k_mac_bcn_tx_event(arvif); + mutex_unlock(&arvif->ar->conf_mutex); +} + static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -6637,6 +6647,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, arvif->vif = vif; INIT_LIST_HEAD(&arvif->list); + INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work); INIT_DELAYED_WORK(&arvif->connection_loss_work, ath11k_mac_vif_sta_connection_loss_work); @@ -6879,6 +6890,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, int i; cancel_delayed_work_sync(&arvif->connection_loss_work); + cancel_work_sync(&arvif->bcn_tx_work); mutex_lock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 38f175dd15578a..87abfa54752953 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -7404,7 +7404,9 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s rcu_read_unlock(); return; } - ath11k_mac_bcn_tx_event(arvif); + + queue_work(ab->workqueue, &arvif->bcn_tx_work); + rcu_read_unlock(); } @@ -8356,7 +8358,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff if (ar->dfs_block_radar_events) ath11k_info(ab, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); exit: rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index cdfd43a7321a9e..7f2e9a9b40977d 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -287,7 +287,6 @@ struct ath12k_vif { int txpower; bool rsnie_present; bool wpaie_present; - struct ieee80211_chanctx_conf chanctx; u32 key_cipher; u8 tx_encap_type; u8 vdev_stats_id; @@ -295,6 +294,13 @@ struct ath12k_vif { bool ps; struct ath12k_vif_cache *cache; struct ath12k_rekey_data rekey_data; + + /* Must be last - ends in a flexible-array member. + * + * FIXME: Driver should not copy struct ieee80211_chanctx_conf, + * especially because it has a flexible array. Find a better way. + */ + struct ieee80211_chanctx_conf chanctx; }; struct ath12k_vif_iter { diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c index ce80e7b5175b4b..f1b7e74aefe426 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -1117,6 +1117,336 @@ ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len, stats_req->buf_len = len; } +static void +ath12k_htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->tcl2fw_entry_count)); + len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n", + le32_to_cpu(htt_stats_buf->not_to_fw)); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n", + le32_to_cpu(htt_stats_buf->invalid_pdev_vdev_peer)); + len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n", + le32_to_cpu(htt_stats_buf->tcl_res_invalid_addrx)); + len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->wbm2fw_entry_count)); + len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n", + le32_to_cpu(htt_stats_buf->invalid_pdev)); + len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n", + le32_to_cpu(htt_stats_buf->tcl_res_addrx_timeout)); + len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n", + le32_to_cpu(htt_stats_buf->invalid_vdev)); + len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n", + le32_to_cpu(htt_stats_buf->invalid_tcl_exp_frame_desc)); + len += scnprintf(buf + len, buf_len - len, "vdev_id_mismatch_count = %u\n\n", + le32_to_cpu(htt_stats_buf->vdev_id_mismatch_cnt)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n", + le32_to_cpu(htt_stats_buf->m1_packets)); + len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n", + le32_to_cpu(htt_stats_buf->m2_packets)); + len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n", + le32_to_cpu(htt_stats_buf->m3_packets)); + len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n", + le32_to_cpu(htt_stats_buf->m4_packets)); + len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n", + le32_to_cpu(htt_stats_buf->g1_packets)); + len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n", + le32_to_cpu(htt_stats_buf->g2_packets)); + len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n", + le32_to_cpu(htt_stats_buf->rc4_packets)); + len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n", + le32_to_cpu(htt_stats_buf->eap_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n", + le32_to_cpu(htt_stats_buf->eapol_start_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n", + le32_to_cpu(htt_stats_buf->eapol_logoff_packets)); + len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n\n", + le32_to_cpu(htt_stats_buf->eapol_encap_asf_packets)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n", + le32_to_cpu(htt_stats_buf->arp_packets)); + len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n", + le32_to_cpu(htt_stats_buf->igmp_packets)); + len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n", + le32_to_cpu(htt_stats_buf->dhcp_packets)); + len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n", + le32_to_cpu(htt_stats_buf->host_inspected)); + len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n", + le32_to_cpu(htt_stats_buf->htt_included)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_mcs)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_nss)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_preamble_type)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_chainmask)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_guard_interval)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_retries)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_bw_info)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_power)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n", + le32_to_cpu(htt_stats_buf->htt_valid_key_flags)); + len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n", + le32_to_cpu(htt_stats_buf->htt_valid_no_encryption)); + len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n", + le32_to_cpu(htt_stats_buf->fse_entry_count)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_be)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_high)); + len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n", + le32_to_cpu(htt_stats_buf->fse_priority_low)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_be)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_over_sub)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_bursty)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_interactive)); + len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n", + le32_to_cpu(htt_stats_buf->fse_traffic_ptrn_periodic)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_alloc)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_created)); + len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n", + le32_to_cpu(htt_stats_buf->fse_hwqueue_send_to_host)); + len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n", + le32_to_cpu(htt_stats_buf->mcast_entry)); + len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n", + le32_to_cpu(htt_stats_buf->bcast_entry)); + len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n", + le32_to_cpu(htt_stats_buf->htt_update_peer_cache)); + len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n", + le32_to_cpu(htt_stats_buf->htt_learning_frame)); + len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n", + le32_to_cpu(htt_stats_buf->fse_invalid_peer)); + len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n", + le32_to_cpu(htt_stats_buf->mec_notify)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n", + le32_to_cpu(htt_stats_buf->ap_bss_peer_not_found)); + len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n", + le32_to_cpu(htt_stats_buf->ap_bcast_mcast_no_peer)); + len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n", + le32_to_cpu(htt_stats_buf->sta_delete_in_progress)); + len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n", + le32_to_cpu(htt_stats_buf->ibss_no_bss_peer)); + len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n", + le32_to_cpu(htt_stats_buf->invalid_vdev_type)); + len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n", + le32_to_cpu(htt_stats_buf->invalid_ast_peer_entry)); + len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n", + le32_to_cpu(htt_stats_buf->peer_entry_invalid)); + len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n", + le32_to_cpu(htt_stats_buf->ethertype_not_ip)); + len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n", + le32_to_cpu(htt_stats_buf->eapol_lookup_failed)); + len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n", + le32_to_cpu(htt_stats_buf->qpeer_not_allow_data)); + len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n", + le32_to_cpu(htt_stats_buf->fse_tid_override)); + len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n", + le32_to_cpu(htt_stats_buf->ipv6_jumbogram_zero_length)); + len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n", + le32_to_cpu(htt_stats_buf->qos_to_non_qos_in_prog)); + len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n", + le32_to_cpu(htt_stats_buf->ap_bcast_mcast_eapol)); + len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n", + le32_to_cpu(htt_stats_buf->unicast_on_ap_bss_peer)); + len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n", + le32_to_cpu(htt_stats_buf->ap_vdev_invalid)); + len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n", + le32_to_cpu(htt_stats_buf->incomplete_llc)); + len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n", + le32_to_cpu(htt_stats_buf->eapol_duplicate_m3)); + len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n\n", + le32_to_cpu(htt_stats_buf->eapol_duplicate_m4)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "eok = %u\n", + le32_to_cpu(htt_stats_buf->eok)); + len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n", + le32_to_cpu(htt_stats_buf->classify_done)); + len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n", + le32_to_cpu(htt_stats_buf->lookup_failed)); + len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n", + le32_to_cpu(htt_stats_buf->send_host_dhcp)); + len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n", + le32_to_cpu(htt_stats_buf->send_host_mcast)); + len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n", + le32_to_cpu(htt_stats_buf->send_host_unknown_dest)); + len += scnprintf(buf + len, buf_len - len, "send_host = %u\n", + le32_to_cpu(htt_stats_buf->send_host)); + len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n", + le32_to_cpu(htt_stats_buf->status_invalid)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n", + le32_to_cpu(htt_stats_buf->enqueued_pkts)); + len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n", + le32_to_cpu(htt_stats_buf->to_tqm)); + len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n", + le32_to_cpu(htt_stats_buf->to_tqm_bypass)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n", + le32_to_cpu(htt_stats_buf->discarded_pkts)); + len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n", + le32_to_cpu(htt_stats_buf->local_frames)); + len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n", + le32_to_cpu(htt_stats_buf->is_ext_msdu)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n", + le32_to_cpu(htt_stats_buf->tcl_dummy_frame)); + len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n", + le32_to_cpu(htt_stats_buf->tqm_dummy_frame)); + len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n", + le32_to_cpu(htt_stats_buf->tqm_notify_frame)); + len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n", + le32_to_cpu(htt_stats_buf->fw2wbm_enq)); + len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n", + le32_to_cpu(htt_stats_buf->tqm_bypass_frame)); + + stats_req->buf_len = len; +} + static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *tag_buf, void *user_data) @@ -1198,6 +1528,30 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, case HTT_STATS_TX_TQM_PDEV_TAG: ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req); break; + case HTT_STATS_TX_DE_CMN_TAG: + ath12k_htt_print_tx_de_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG: + ath12k_htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG: + ath12k_htt_print_tx_de_classify_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG: + ath12k_htt_print_tx_de_classify_failed_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG: + ath12k_htt_print_tx_de_classify_status_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG: + ath12k_htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG: + ath12k_htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_DE_COMPL_STATS_TAG: + ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req); + break; default: break; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h index 6294a082cf8a2d..d52b26b23e6539 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h @@ -128,6 +128,7 @@ enum ath12k_dbg_htt_ext_stats_type { ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, /* keep this last */ ATH12K_DBG_HTT_NUM_EXT_STATS, @@ -143,6 +144,13 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13, HTT_STATS_TX_TQM_CMN_TAG = 14, HTT_STATS_TX_TQM_PDEV_TAG = 15, + HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17, + HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18, + HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19, + HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20, + HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21, + HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22, + HTT_STATS_TX_DE_CMN_TAG = 23, HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, HTT_STATS_TX_SCHED_CMN_TAG = 37, HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, @@ -150,6 +158,7 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44, HTT_STATS_HW_INTR_MISC_TAG = 54, HTT_STATS_HW_PDEV_ERRS_TAG = 56, + HTT_STATS_TX_DE_COMPL_STATS_TAG = 65, HTT_STATS_WHAL_TX_TAG = 66, HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, @@ -564,4 +573,121 @@ struct ath12k_htt_tx_tqm_pdev_stats_tlv { __le32 sched_nonudp_notify2; } __packed; +struct ath12k_htt_tx_de_cmn_stats_tlv { + __le32 mac_id__word; + __le32 tcl2fw_entry_count; + __le32 not_to_fw; + __le32 invalid_pdev_vdev_peer; + __le32 tcl_res_invalid_addrx; + __le32 wbm2fw_entry_count; + __le32 invalid_pdev; + __le32 tcl_res_addrx_timeout; + __le32 invalid_vdev; + __le32 invalid_tcl_exp_frame_desc; + __le32 vdev_id_mismatch_cnt; +} __packed; + +struct ath12k_htt_tx_de_eapol_packets_stats_tlv { + __le32 m1_packets; + __le32 m2_packets; + __le32 m3_packets; + __le32 m4_packets; + __le32 g1_packets; + __le32 g2_packets; + __le32 rc4_packets; + __le32 eap_packets; + __le32 eapol_start_packets; + __le32 eapol_logoff_packets; + __le32 eapol_encap_asf_packets; +} __packed; + +struct ath12k_htt_tx_de_classify_stats_tlv { + __le32 arp_packets; + __le32 igmp_packets; + __le32 dhcp_packets; + __le32 host_inspected; + __le32 htt_included; + __le32 htt_valid_mcs; + __le32 htt_valid_nss; + __le32 htt_valid_preamble_type; + __le32 htt_valid_chainmask; + __le32 htt_valid_guard_interval; + __le32 htt_valid_retries; + __le32 htt_valid_bw_info; + __le32 htt_valid_power; + __le32 htt_valid_key_flags; + __le32 htt_valid_no_encryption; + __le32 fse_entry_count; + __le32 fse_priority_be; + __le32 fse_priority_high; + __le32 fse_priority_low; + __le32 fse_traffic_ptrn_be; + __le32 fse_traffic_ptrn_over_sub; + __le32 fse_traffic_ptrn_bursty; + __le32 fse_traffic_ptrn_interactive; + __le32 fse_traffic_ptrn_periodic; + __le32 fse_hwqueue_alloc; + __le32 fse_hwqueue_created; + __le32 fse_hwqueue_send_to_host; + __le32 mcast_entry; + __le32 bcast_entry; + __le32 htt_update_peer_cache; + __le32 htt_learning_frame; + __le32 fse_invalid_peer; + __le32 mec_notify; +} __packed; + +struct ath12k_htt_tx_de_classify_failed_stats_tlv { + __le32 ap_bss_peer_not_found; + __le32 ap_bcast_mcast_no_peer; + __le32 sta_delete_in_progress; + __le32 ibss_no_bss_peer; + __le32 invalid_vdev_type; + __le32 invalid_ast_peer_entry; + __le32 peer_entry_invalid; + __le32 ethertype_not_ip; + __le32 eapol_lookup_failed; + __le32 qpeer_not_allow_data; + __le32 fse_tid_override; + __le32 ipv6_jumbogram_zero_length; + __le32 qos_to_non_qos_in_prog; + __le32 ap_bcast_mcast_eapol; + __le32 unicast_on_ap_bss_peer; + __le32 ap_vdev_invalid; + __le32 incomplete_llc; + __le32 eapol_duplicate_m3; + __le32 eapol_duplicate_m4; +} __packed; + +struct ath12k_htt_tx_de_classify_status_stats_tlv { + __le32 eok; + __le32 classify_done; + __le32 lookup_failed; + __le32 send_host_dhcp; + __le32 send_host_mcast; + __le32 send_host_unknown_dest; + __le32 send_host; + __le32 status_invalid; +} __packed; + +struct ath12k_htt_tx_de_enqueue_packets_stats_tlv { + __le32 enqueued_pkts; + __le32 to_tqm; + __le32 to_tqm_bypass; +} __packed; + +struct ath12k_htt_tx_de_enqueue_discard_stats_tlv { + __le32 discarded_pkts; + __le32 local_frames; + __le32 is_ext_msdu; +} __packed; + +struct ath12k_htt_tx_de_compl_stats_tlv { + __le32 tcl_dummy_frame; + __le32 tqm_dummy_frame; + __le32 tqm_notify_frame; + __le32 fw2wbm_enq; + __le32 tqm_bypass_frame; +} __packed; + #endif diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index b77497c14ac45d..2fb18b83b3eec4 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -1495,18 +1495,6 @@ struct htt_ppdu_stats_user_rate { #define HTT_TX_INFO_PEERID(_flags) \ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M) -struct htt_tx_ppdu_stats_info { - struct htt_tlv tlv_hdr; - __le32 tx_success_bytes; - __le32 tx_retry_bytes; - __le32 tx_failed_bytes; - __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */ - __le16 tx_success_msdus; - __le16 tx_retry_msdus; - __le16 tx_failed_msdus; - __le16 tx_duration; /* united in us */ -} __packed; - enum htt_ppdu_stats_usr_compln_status { HTT_PPDU_STATS_USER_STATUS_OK, HTT_PPDU_STATS_USER_STATUS_FILTERED, diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 14236d0a0c89db..91e3393f7b5f40 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -2681,7 +2681,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { dev_kfree_skb_any(msdu); - ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; + ab->soc_stats.hal_reo_error[ring_id]++; continue; } diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index 7b0b6a7f4701ab..ec1bda95e555dd 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -926,6 +926,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_dynamic_smps_6ghz = true, .iova_mask = 0, + + .supports_aspm = false, }, { .name = "wcn7850 hw2.0", @@ -1004,6 +1006,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_dynamic_smps_6ghz = false, .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1, + + .supports_aspm = true, }, { .name = "qcn9274 hw2.0", @@ -1078,6 +1082,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_dynamic_smps_6ghz = true, .iova_mask = 0, + + .supports_aspm = false, }, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index b1d302c48326b8..8d52182e28aef4 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -189,6 +189,7 @@ struct ath12k_hw_params { bool tcl_ring_retry:1; bool reoq_lut_support:1; bool supports_shadow_regs:1; + bool supports_aspm:1; u32 num_tcl_banks; u32 max_tx_ring; diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index ce41c8153080cd..137394c364603b 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -2196,9 +2196,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ - ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { if (sta->deflink.vht_cap.vht_supported) @@ -3664,7 +3663,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ath12k *ar, *prev_ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; - struct ath12k_wmi_scan_req_arg arg = {}; + struct ath12k_wmi_scan_req_arg *arg = NULL; int ret; int i; bool create = true; @@ -3746,42 +3745,47 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; - ath12k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH12K_SCAN_ID; + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath12k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH12K_SCAN_ID; if (req->ie_len) { - arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); - if (!arg.extraie.ptr) { + arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg->extraie.ptr) { ret = -ENOMEM; goto exit; } - arg.extraie.len = req->ie_len; + arg->extraie.len = req->ie_len; } if (req->n_ssids) { - arg.num_ssids = req->n_ssids; - for (i = 0; i < arg.num_ssids; i++) - arg.ssid[i] = req->ssids[i]; + arg->num_ssids = req->n_ssids; + for (i = 0; i < arg->num_ssids; i++) + arg->ssid[i] = req->ssids[i]; } else { - arg.scan_f_passive = 1; + arg->scan_f_passive = 1; } if (req->n_channels) { - arg.num_chan = req->n_channels; - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), - GFP_KERNEL); - - if (!arg.chan_list) { + arg->num_chan = req->n_channels; + arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), + GFP_KERNEL); + if (!arg->chan_list) { ret = -ENOMEM; goto exit; } - for (i = 0; i < arg.num_chan; i++) - arg.chan_list[i] = req->channels[i]->center_freq; + for (i = 0; i < arg->num_chan; i++) + arg->chan_list[i] = req->channels[i]->center_freq; } - ret = ath12k_start_scan(ar, &arg); + ret = ath12k_start_scan(ar, arg); if (ret) { ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); @@ -3791,14 +3795,15 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, /* Add a margin to account for event/command processing */ ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, - msecs_to_jiffies(arg.max_scan_time + + msecs_to_jiffies(arg->max_scan_time + ATH12K_MAC_SCAN_TIMEOUT_MSECS)); exit: - kfree(arg.chan_list); - - if (req->ie_len) - kfree(arg.extraie.ptr); + if (arg) { + kfree(arg->chan_list); + kfree(arg->extraie.ptr); + kfree(arg); + } mutex_unlock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 9e0b9e329bda32..bd269aa1740bcd 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -954,7 +954,8 @@ static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) { - if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) + if (ab_pci->ab->hw_params->supports_aspm && + test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, ab_pci->link_ctl & diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 9f6be557365e90..2cd3ff9b0164c8 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1538,6 +1538,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, sizeof(*cmd)); cmd->req_type = cpu_to_le32(type); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI bss chan info req type %d\n", type); @@ -6788,7 +6789,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff if (ar->dfs_block_radar_events) ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ath12k_ar_to_hw(ar)); + ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL); exit: rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index f1f52175a52b33..6a913f9b831580 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -3121,6 +3121,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { __le32 tlv_header; /* ref wmi_bss_chan_info_req_type */ __le32 req_type; + __le32 pdev_id; } __packed; struct wmi_ap_ps_peer_cmd { @@ -4085,7 +4086,6 @@ struct wmi_vdev_stopped_event { } __packed; struct wmi_pdev_bss_chan_info_event { - __le32 pdev_id; __le32 freq; /* Units in MHz */ __le32 noise_floor; /* units are dBm */ /* rx clear - how often the channel was unused */ @@ -4103,6 +4103,7 @@ struct wmi_pdev_bss_chan_info_event { /*rx_cycle cnt for my bss in 64bits format */ __le32 rx_bss_cycle_count_low; __le32 rx_bss_cycle_count_high; + __le32 pdev_id; } __packed; #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index abe41330fb6929..4d88b02ffa7951 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -59,7 +59,7 @@ #include #include -#include +#include #include #include "base.h" diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index eea4bda776080b..d81b2ad0b095fc 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -44,7 +44,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include "ath5k.h" #include "base.h" diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index 3f4ce4e9c5320d..90e0859a8e5015 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -24,7 +24,7 @@ * Protocol Control Unit Functions * \*********************************/ -#include +#include #include "ath5k.h" #include "reg.h" diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 7ee4e1616f45ac..4825f9cb9cb857 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include "ath5k.h" #include "reg.h" diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 9fdb5283b39c32..c67f163c0858d7 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -25,7 +25,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include +#include #include /* To determine if a card is pci-e */ #include diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index fb5144e2d86c40..f8a94d764be604 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -21,7 +21,7 @@ #include "hif-ops.h" #include "trace.h" -#include +#include #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 944f46cdf34c00..73c38a6b488098 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include #include "hw.h" #include "ar9003_phy.h" diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index a5eb43f3032013..004ca5f536bec5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -65,7 +65,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, dev_info(&pdev->dev, "fixup device configuration\n"); - mem = pcim_iomap(pdev, 0, 0); + mem = pci_iomap(pdev, 0, 0); if (!mem) { dev_err(&pdev->dev, "ioremap error\n"); return -EINVAL; @@ -103,7 +103,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, pci_write_config_word(pdev, PCI_COMMAND, cmd); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, bar0); - pcim_iounmap(pdev, mem); + pci_iounmap(pdev, mem); pci_disable_device(pdev); @@ -200,11 +200,9 @@ static int owl_probe(struct pci_dev *pdev, const char *eeprom_name; int err = 0; - if (pcim_enable_device(pdev)) + if (pci_enable_device(pdev)) return -EIO; - pcim_pin_device(pdev); - ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index fb270df75eb2d3..4b331c85509cf6 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -32,11 +32,8 @@ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) { for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) { - if (sort[j] > sort[j - 1]) { - nfval = sort[j]; - sort[j] = sort[j - 1]; - sort[j - 1] = nfval; - } + if (sort[j] > sort[j - 1]) + swap(sort[j], sort[j - 1]); } } nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index d84e3ee7b5d902..eff894958a7384 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "ath9k.h" @@ -1325,11 +1325,11 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; int i = 0; - data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all + + data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all); - data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all + + data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all + sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all); @@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); - if (IS_ERR(sc->debug.debugfs_phy)) - return -ENOMEM; #ifdef CONFIG_ATH_DEBUG debugfs_create_file("debug", 0600, sc->debug.debugfs_phy, diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index 11349218bc21c7..3689e12db9f74f 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -280,7 +280,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe) if (!pd->add_pulse(pd, pe, NULL)) return; DFS_STAT_INC(sc, radar_detected); - ieee80211_radar_detected(sc->hw); + ieee80211_radar_detected(sc->hw, NULL); } /* diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 8e18e9b4ef4835..426caa057396d2 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -116,7 +116,7 @@ static ssize_t write_file_simulate_radar(struct file *file, { struct ath_softc *sc = file->private_data; - ieee80211_radar_detected(sc->hw); + ieee80211_radar_detected(sc->hw, NULL); return count; } diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 27b860b0c7694a..3e16cfe059f37a 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include "hw.h" #include "ar9002_phy.h" diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index d85472ee4d85f3..c139ac49ccf690 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include "hw.h" #include "ar9002_phy.h" diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 84b31caf8ca6fe..5ba467cb7425ce 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include "hw.h" #include "ar9002_phy.h" diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 0c7841f952287f..7265766cddbdeb 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include "htc.h" MODULE_FIRMWARE(HTC_7010_MODULE_FW); @@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb) } resubmit: - skb_reset_tail_pointer(skb); - skb_trim(skb, 0); + __skb_set_length(skb, 0); usb_anchor_urb(urb, &hif_dev->rx_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); @@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) case -ESHUTDOWN: goto free_skb; default: - skb_reset_tail_pointer(skb); - skb_trim(skb, 0); + __skb_set_length(skb, 0); goto resubmit; } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index f7c6d9bc931196..9437d69877cc56 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah) priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (IS_ERR(priv->debug.debugfs_phy)) - return -ENOMEM; ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5982e0db45f93e..c3a6368bfc68ab 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include "hw.h" #include "hw-ops.h" @@ -2732,7 +2732,7 @@ static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out, if (ah->caps.gpio_requested & BIT(gpio)) return; - err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label); + err = devm_gpio_request_one(ah->dev, gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label); if (err) { ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n", gpio, err); @@ -2801,10 +2801,8 @@ void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio) WARN_ON(gpio >= ah->caps.num_gpio_pins); - if (ah->caps.gpio_requested & BIT(gpio)) { - gpio_free(gpio); + if (ah->caps.gpio_requested & BIT(gpio)) ah->caps.gpio_requested &= ~BIT(gpio); - } } EXPORT_SYMBOL(ath9k_hw_gpio_free); diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c index 6cdbee5beb077a..20ceed0dd4be16 100644 --- a/drivers/net/wireless/ath/carl9170/mac.c +++ b/drivers/net/wireless/ath/carl9170/mac.c @@ -36,7 +36,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include "carl9170.h" #include "cmd.h" diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c index 85955572a705ec..b301e6fbce6cbd 100644 --- a/drivers/net/wireless/ath/hw.c +++ b/drivers/net/wireless/ath/hw.c @@ -15,7 +15,7 @@ */ #include -#include +#include #include "ath.h" #include "reg.h" diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 21a93fec284d65..0ae436bd9b665c 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include "ath.h" diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 8e56dcf9309d13..25b4ef9d3c9a10 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include "b43.h" #include "main.h" diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c index 71a7cd8dc7877a..fb70a1e3544be2 100644 --- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c @@ -1066,7 +1066,7 @@ static const u32 lpphy_papd_mult_table[] = { 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28, }; -static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, @@ -1197,7 +1197,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = { { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, @@ -1328,7 +1328,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, @@ -1459,7 +1459,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, @@ -1599,7 +1599,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = { { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, @@ -1730,7 +1730,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = { { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, @@ -1861,7 +1861,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = { { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = { { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, }, { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, }, @@ -1992,7 +1992,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = { { .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = { { .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, }, { .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, }, @@ -2123,7 +2123,7 @@ static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = { { .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, }, }; -static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = { +static const struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = { { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, }, { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, }, @@ -2392,7 +2392,7 @@ void lpphy_write_gain_table(struct b43_wldev *dev, int offset, } void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, - struct lpphy_tx_gain_table_entry *table) + const struct lpphy_tx_gain_table_entry *table) { int i; diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.h b/drivers/net/wireless/broadcom/b43/tables_lpphy.h index 62002098bbdac9..1971ccccabf8a4 100644 --- a/drivers/net/wireless/broadcom/b43/tables_lpphy.h +++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.h @@ -36,7 +36,7 @@ struct lpphy_tx_gain_table_entry { void lpphy_write_gain_table(struct b43_wldev *dev, int offset, struct lpphy_tx_gain_table_entry data); void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, - struct lpphy_tx_gain_table_entry *table); + const struct lpphy_tx_gain_table_entry *table); void lpphy_rev0_1_table_init(struct b43_wldev *dev); void lpphy_rev2plus_table_init(struct b43_wldev *dev); diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 441d6440671baf..2370a2e6a2e3cc 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include "b43legacy.h" #include "main.h" diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 0c3d119d12199c..1e8495f50c16ae 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data) { *data = addr; - return brcmf_fil_iovar_int_get(ifp, "btc_params", data); + return brcmf_fil_iovar_int_query(ifp, "btc_params", data); } /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d4cc5fa92341d5..349aa3439502cb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -1135,7 +1135,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) + n_channels * sizeof(u16); offset = roundup(offset, sizeof(u32)); - length += sizeof(ssid_le) * n_ssids, + length += sizeof(ssid_le) * n_ssids; ptr = (char *)params_le + offset; for (i = 0; i < n_ssids; i++) { memset(&ssid_le, 0, sizeof(ssid_le)); @@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) if (!sme->crypto.n_akm_suites) return 0; - err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); + err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), + "wpa_auth", &val); if (err) { bphy_err(drvr, "could not get wpa_auth (%d)\n", err); return err; @@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); struct brcmf_pub *drvr = cfg->pub; - s32 qdbm = 0; + s32 qdbm; s32 err; brcmf_dbg(TRACE, "Enter\n"); @@ -3067,7 +3068,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, struct brcmf_scb_val_le scbval; struct brcmf_pktcnt_le pktcnt; s32 err; - u32 rate = 0; + u32 rate; u32 rssi; /* Get the current tx rate */ @@ -7046,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; - err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info", - &chaninfo); + err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info", + &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) channel->flags |= @@ -7081,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) /* verify support for bw_cap command */ val = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val); if (!err) { /* only set 2G bandwidth using bw_cap command */ @@ -7157,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) int err; band = WLC_BAND_2G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_2GHZ] = band; band = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_5GHZ] = band; return; @@ -7170,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) return; } brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n"); - mimo_bwcap = 0; err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap); if (err) /* assume 20MHz if firmware does not give a clue */ @@ -7266,10 +7266,10 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct wiphy *wiphy = cfg_to_wiphy(cfg); - u32 nmode = 0; + u32 nmode; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; - u32 rxchain = 0; + u32 rxchain; u32 nchain; int err; s32 i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index bf91b1e1368f03..da72fd2d541ff7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - u32 monitor = 0; + u32 monitor; int err; brcmf_dbg(TRACE, "Enter\n"); @@ -1184,7 +1184,6 @@ static ssize_t bus_reset_write(struct file *file, const char __user *user_buf, static const struct file_operations bus_reset_fops = { .open = simple_open, - .llseek = no_llseek, .write = bus_reset_write, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index ea76b8d334019a..39226b9c0fa834 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -48,20 +48,20 @@ /** * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info * - * @pktslots: dynamic allocated array for ordering AMPDU packets. * @flow_id: AMPDU flow identifier. * @cur_idx: last AMPDU index from firmware. * @exp_idx: expected next AMPDU index. * @max_idx: maximum amount of packets per AMPDU. * @pend_pkts: number of packets currently in @pktslots. + * @pktslots: array for ordering AMPDU packets. */ struct brcmf_ampdu_rx_reorder { - struct sk_buff **pktslots; u8 flow_id; u8 cur_idx; u8 exp_idx; u8 max_idx; u8 pend_pkts; + struct sk_buff *pktslots[]; }; /* Forward decls for struct brcmf_pub (see below) */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index f23310a77a5d11..0d9ae197fa1ec3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -184,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv) static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, enum brcmf_feat_id id, char *name) { - u32 data = 0; + u32 data; int err; /* we need to know firmware error */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index 9ca1b2aadcb537..eed439b840109f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -7,7 +7,7 @@ #ifndef FWEH_H_ #define FWEH_H_ -#include +#include #include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index a315a7fac6a06f..31e080e4da6697 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -96,15 +96,22 @@ static inline s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) { s32 err; - __le32 data_le = cpu_to_le32(*data); - err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le)); + err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); return err; } +static inline +s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_cmd_int_get(ifp, cmd, data); +} s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data, u32 len); @@ -120,14 +127,21 @@ s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) static inline s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) { - __le32 data_le = cpu_to_le32(*data); s32 err; - err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le)); + err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); return err; } +static inline +s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_iovar_int_get(ifp, name, data); +} s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, @@ -145,15 +159,21 @@ s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) static inline s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) { - __le32 data_le = cpu_to_le32(*data); s32 err; - err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le, - sizeof(data_le)); + err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); return err; } +static inline +s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_bsscfg_int_get(ifp, name, data); +} s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, void *data, u32 len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 36af81975855c5..0949e7975ff106 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1673,7 +1673,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) struct sk_buff_head reorder_list; struct sk_buff *pnext; u8 flags; - u32 buf_size; reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder; flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET]; @@ -1708,15 +1707,13 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) } /* from here on we need a flow reorder instance */ if (rfi == NULL) { - buf_size = sizeof(*rfi); max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; - buf_size += (max_idx + 1) * sizeof(pkt); - /* allocate space for flow reorder info */ brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n", flow_id, max_idx); - rfi = kzalloc(buf_size, GFP_ATOMIC); + rfi = kzalloc(struct_size(rfi, pktslots, max_idx + 1), + GFP_ATOMIC); if (rfi == NULL) { bphy_err(drvr, "failed to alloc buffer\n"); brcmf_netif_rx(ifp, pkt); @@ -1724,7 +1721,6 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) } ifp->drvr->reorder_flows[flow_id] = rfi; - rfi->pktslots = (struct sk_buff **)(rfi + 1); rfi->max_idx = max_idx; } if (flags & BRCMF_RXREORDER_NEW_HOLE) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index ce482a3877e90a..5dee54819fbdfb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 1461dc453ac22e..7b936668c1b66d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c index 2f890807430379..08841b9a5b81f4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c @@ -3,7 +3,7 @@ * Copyright (c) 2019 Broadcom */ -#include +#include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index 33d17b779201d2..a767cbb791858c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c @@ -351,9 +351,7 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid) { struct ampdu_info *ampdu = wlc->ampdu; u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false); - u32 txunfl_ratio; u8 max_mpdu; - u32 current_ampdu_cnt = 0; u16 max_pld_size; u32 new_txunfl; struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid); @@ -389,26 +387,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid) if (fifo->accum_txfunfl < 10) return 0; - brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n", - current_ampdu_cnt, fifo->accum_txfunfl); + brcms_dbg_ht(wlc->hw->d11core, "tx_underflows %d\n", fifo->accum_txfunfl); - /* - compute the current ratio of tx unfl per ampdu. - When the current ampdu count becomes too - big while the ratio remains small, we reset - the current count in order to not - introduce too big of a latency in detecting a - large amount of tx underflows later. - */ - - txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl; - - if (txunfl_ratio > ampdu->tx_max_funl) { - if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT) - fifo->accum_txfunfl = 0; - - return 0; - } max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index d86f28b8bc60bf..5b1d35601bbd14 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -1611,10 +1611,9 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kvmalloc(len, GFP_KERNEL); + *pbuf = kvmemdup(pdata, len, GFP_KERNEL); if (*pbuf == NULL) - goto fail; - memcpy(*pbuf, pdata, len); + return -ENOMEM; return 0; } } @@ -1622,7 +1621,6 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) brcms_err(wl->wlc->hw->d11core, "ERROR: ucode buf tag:%d can not be found!\n", idx); *pbuf = NULL; -fail: return -ENODATA; } diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index 9065ca5b020857..bad080d33c07fc 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h @@ -345,14 +345,19 @@ struct libipw_hdr_2addr { } __packed; struct libipw_hdr_3addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(libipw_hdr_3addr_hdr, hdr, __packed, + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + ); u8 payload[]; } __packed; +static_assert(offsetof(struct libipw_hdr_3addr, payload) == sizeof(struct libipw_hdr_3addr_hdr), + "struct member likely outside of __struct_group()"); struct libipw_hdr_4addr { __le16 frame_ctl; @@ -400,7 +405,7 @@ struct libipw_info_element { */ struct libipw_auth { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; __le16 algorithm; __le16 transaction; __le16 status; @@ -417,7 +422,7 @@ struct libipw_channel_switch { } __packed; struct libipw_action { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; u8 category; u8 action; union { @@ -430,7 +435,7 @@ struct libipw_action { } __packed; struct libipw_disassoc { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; __le16 reason; } __packed; @@ -438,13 +443,13 @@ struct libipw_disassoc { #define libipw_deauth libipw_disassoc struct libipw_probe_request { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; /* SSID, supported rates */ u8 variable[]; } __packed; struct libipw_probe_response { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; __le32 time_stamp[2]; __le16 beacon_interval; __le16 capability; @@ -456,16 +461,8 @@ struct libipw_probe_response { /* Alias beacon for probe_response */ #define libipw_beacon libipw_probe_response -struct libipw_assoc_request { - struct libipw_hdr_3addr header; - __le16 capability; - __le16 listen_interval; - /* SSID, supported rates, RSN */ - u8 variable[]; -} __packed; - struct libipw_reassoc_request { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; __le16 capability; __le16 listen_interval; u8 current_ap[ETH_ALEN]; @@ -473,7 +470,7 @@ struct libipw_reassoc_request { } __packed; struct libipw_assoc_response { - struct libipw_hdr_3addr header; + struct libipw_hdr_3addr_hdr header; __le16 capability; __le16 status; __le16 aid; @@ -588,13 +585,6 @@ struct libipw_channel_map { u8 map; } __packed; -struct libipw_ibss_dfs { - struct libipw_info_element ie; - u8 owner[ETH_ALEN]; - u8 recovery_interval; - struct libipw_channel_map channel_map[]; -}; - struct libipw_csa { u8 mode; u8 channel; diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index 903de34028efbf..dbc7153d0a3d07 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -509,7 +509,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, int i, idx, ret = 0; int group_key = 0; const char *alg, *module; - struct lib80211_crypto_ops *ops; + const struct lib80211_crypto_ops *ops; struct lib80211_crypt_data **crypt; struct libipw_security sec = { diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 1fab7849f56d1a..14d2331ee6cb97 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include "common.h" @@ -527,7 +527,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status = {}; struct il_rx_pkt *pkt = rxb_addr(rxb); - struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt); + struct il3945_rx_frame_stats_hdr *rx_stats = IL_RX_STATS(pkt); struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h index 82e4a4878bc25f..ffbe11902628c0 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.h +++ b/drivers/net/wireless/intel/iwlegacy/3945.h @@ -157,8 +157,10 @@ struct il3945_ibss_seq { }; #define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\ - x->u.rx_frame.stats.payload + \ - x->u.rx_frame.stats.phy_count)) + container_of(&x->u.rx_frame.stats, \ + struct il3945_rx_frame_stats, \ + hdr)->payload + \ + x->u.rx_frame.stats.phy_count)) #define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\ IL_RX_HDR(x)->payload + \ le16_to_cpu(IL_RX_HDR(x)->len))) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 1600c344edbb20..fcccde7bb65922 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -1769,7 +1769,7 @@ il4965_tx_skb(struct il_priv *il, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[q->write_ptr]; out_meta = &txq->meta[q->write_ptr]; - tx_cmd = &out_cmd->cmd.tx; + tx_cmd = container_of(&out_cmd->cmd.tx, struct il_tx_cmd, __hdr); memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c index c34729f576cdad..b63e29590b0487 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.c +++ b/drivers/net/wireless/intel/iwlegacy/4965.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include "common.h" #include "4965.h" diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h index 28cf4e832152cd..4a9fa8b83f0ffc 100644 --- a/drivers/net/wireless/intel/iwlegacy/commands.h +++ b/drivers/net/wireless/intel/iwlegacy/commands.h @@ -201,9 +201,6 @@ struct il_cmd_header { * 15 unsolicited RX or uCode-originated notification */ __le16 sequence; - - /* command or response/notification data follows immediately */ - u8 data[]; } __packed; /** @@ -1160,23 +1157,33 @@ struct il_wep_cmd { #define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) struct il3945_rx_frame_stats { - u8 phy_count; - u8 id; - u8 rssi; - u8 agc; - __le16 sig_avg; - __le16 noise_diff; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(il3945_rx_frame_stats_hdr, hdr, __packed, + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + ); u8 payload[]; } __packed; +static_assert(offsetof(struct il3945_rx_frame_stats, payload) == sizeof(struct il3945_rx_frame_stats_hdr), + "struct member likely outside of __struct_group()"); struct il3945_rx_frame_hdr { - __le16 channel; - __le16 phy_flags; - u8 reserved1; - u8 rate; - __le16 len; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(il3945_rx_frame_hdr_hdr, hdr, __packed, + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + ); u8 payload[]; } __packed; +static_assert(offsetof(struct il3945_rx_frame_hdr, payload) == sizeof(struct il3945_rx_frame_hdr_hdr), + "struct member likely outside of __struct_group()"); struct il3945_rx_frame_end { __le32 status; @@ -1193,8 +1200,8 @@ struct il3945_rx_frame_end { * stats.phy_count */ struct il3945_rx_frame { - struct il3945_rx_frame_stats stats; - struct il3945_rx_frame_hdr hdr; + struct il3945_rx_frame_stats_hdr stats; + struct il3945_rx_frame_hdr_hdr hdr; struct il3945_rx_frame_end end; } __packed; @@ -1352,67 +1359,69 @@ struct il_rx_mpdu_res_start { */ struct il3945_tx_cmd { - /* - * MPDU byte count: - * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * NOTE: Does not include Tx command bytes, post-MAC pad bytes, - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i - * Range: 14-2342 bytes. - */ - __le16 len; - - /* - * MPDU or MSDU byte count for next frame. - * Used for fragmentation and bursting, but not 11n aggregation. - * Same as "len", but for next frame. Set to 0 if not applicable. - */ - __le16 next_frame_len; - - __le32 tx_flags; /* TX_CMD_FLG_* */ - - u8 rate; - - /* Index of recipient station in uCode's station table */ - u8 sta_id; - u8 tid_tspec; - u8 sec_ctl; - u8 key[16]; - union { - u8 byte[8]; - __le16 word[4]; - __le32 dw[2]; - } tkip_mic; - __le32 next_frame_info; - union { - __le32 life_time; - __le32 attempt; - } stop_time; - u8 supp_rates[2]; - u8 rts_retry_limit; /*byte 50 */ - u8 data_retry_limit; /*byte 51 */ - union { - __le16 pm_frame_timeout; - __le16 attempt_duration; - } timeout; - - /* - * Duration of EDCA burst Tx Opportunity, in 32-usec units. - * Set this if txop time is not specified by HCCA protocol (e.g. by AP). - */ - __le16 driver_txop; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(il3945_tx_cmd_hdr, __hdr, __packed, + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + u8 rate; + + /* Index of recipient station in uCode's station table */ + u8 sta_id; + u8 tid_tspec; + u8 sec_ctl; + u8 key[16]; + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + u8 supp_rates[2]; + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + ); /* * MAC header goes here, followed by 2 bytes padding if MAC header * length is 26 or 30 bytes, followed by payload data */ - union { - DECLARE_FLEX_ARRAY(u8, payload); - DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr); - }; + struct ieee80211_hdr hdr[]; } __packed; +static_assert(offsetof(struct il3945_tx_cmd, hdr) == sizeof(struct il3945_tx_cmd_hdr), + "struct member likely outside of __struct_group()"); /* * C_TX = 0x1c (response) @@ -1438,83 +1447,87 @@ struct il_dram_scratch { } __packed; struct il_tx_cmd { - /* - * MPDU byte count: - * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * NOTE: Does not include Tx command bytes, post-MAC pad bytes, - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i - * Range: 14-2342 bytes. - */ - __le16 len; - - /* - * MPDU or MSDU byte count for next frame. - * Used for fragmentation and bursting, but not 11n aggregation. - * Same as "len", but for next frame. Set to 0 if not applicable. - */ - __le16 next_frame_len; - - __le32 tx_flags; /* TX_CMD_FLG_* */ - - /* uCode may modify this field of the Tx command (in host DRAM!). - * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ - struct il_dram_scratch scratch; - - /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ - __le32 rate_n_flags; /* RATE_MCS_* */ - - /* Index of destination station in uCode's station table */ - u8 sta_id; - - /* Type of security encryption: CCM or TKIP */ - u8 sec_ctl; /* TX_CMD_SEC_* */ - - /* - * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial - * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for - * data frames, this field may be used to selectively reduce initial - * rate (via non-0 value) for special frames (e.g. management), while - * still supporting rate scaling for all frames. - */ - u8 initial_rate_idx; - u8 reserved; - u8 key[16]; - __le16 next_frame_flags; - __le16 reserved2; - union { - __le32 life_time; - __le32 attempt; - } stop_time; - - /* Host DRAM physical address pointer to "scratch" in this command. - * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ - __le32 dram_lsb_ptr; - u8 dram_msb_ptr; - - u8 rts_retry_limit; /*byte 50 */ - u8 data_retry_limit; /*byte 51 */ - u8 tid_tspec; - union { - __le16 pm_frame_timeout; - __le16 attempt_duration; - } timeout; - - /* - * Duration of EDCA burst Tx Opportunity, in 32-usec units. - * Set this if txop time is not specified by HCCA protocol (e.g. by AP). - */ - __le16 driver_txop; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(il_tx_cmd_hdr, __hdr, __packed, + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct il_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_idx; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + ); /* * MAC header goes here, followed by 2 bytes padding if MAC header * length is 26 or 30 bytes, followed by payload data */ - u8 payload[0]; struct ieee80211_hdr hdr[]; } __packed; +static_assert(offsetof(struct il_tx_cmd, hdr) == sizeof(struct il_tx_cmd_hdr), + "struct member likely outside of __struct_group()"); /* TX command response is sent after *3945* transmission attempts. * @@ -2502,7 +2515,7 @@ struct il3945_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct il3945_tx_cmd tx_cmd; + struct il3945_tx_cmd_hdr tx_cmd; /* For directed active scans (set to all-0s otherwise) */ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; @@ -2546,7 +2559,7 @@ struct il_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct il_tx_cmd tx_cmd; + struct il_tx_cmd_hdr tx_cmd; /* For directed active scans (set to all-0s otherwise) */ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX]; @@ -2662,7 +2675,7 @@ struct il4965_beacon_notif { */ struct il3945_tx_beacon_cmd { - struct il3945_tx_cmd tx; + struct il3945_tx_cmd_hdr tx; __le16 tim_idx; u8 tim_size; u8 reserved1; @@ -2670,7 +2683,7 @@ struct il3945_tx_beacon_cmd { } __packed; struct il_tx_beacon_cmd { - struct il_tx_cmd tx; + struct il_tx_cmd_hdr tx; __le16 tim_idx; u8 tim_size; u8 reserved1; diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index 69687fcf963fc1..2147781b5fffb0 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -553,7 +553,7 @@ struct il_device_cmd { u8 val8; u16 val16; u32 val32; - struct il_tx_cmd tx; + struct il_tx_cmd_hdr tx; u8 payload[DEF_CMD_PAYLOAD_SIZE]; } __packed cmd; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index 3b6b8b410be580..fa1be8c54d3c1a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 92 +#define IWL_BZ_UCODE_API_MAX 93 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 90 @@ -148,6 +148,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; +const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, +}; + const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 4ccb0b7bdc20f5..f1dd1c29f3054d 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 92 +#define IWL_SC_UCODE_API_MAX 93 /* Lowest firmware API version supported */ #define IWL_SC_UCODE_API_MIN 90 diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 71f67a019cf60f..5ca85d90a8d64c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-modparams.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 65b7c68e5ca773..e0b14be25b0238 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1325,8 +1325,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, iwlwifi_mod_params.amsdu_size); } - trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED; - trans_cfg.command_groups = iwl_dvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index e9d2717362cf9a..7f67e602940ca6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "iwl-trans.h" #include "iwl-io.h" diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 8c8880b4482701..a7cea0a55b35af 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -357,6 +357,11 @@ int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) } mcc_val = wifi_pkg->package.elements[1].integer.value; + if (mcc_val != BIOS_MCC_CHINA) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "ACPI WRDD is supported only for CN\n"); + goto out_free; + } mcc[0] = (mcc_val >> 8) & 0xff; mcc[1] = mcc_val & 0xff; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index b97a43353779f1..ddc84430d89594 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -95,7 +95,7 @@ enum iwl_bt_ci_compliance { }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ /** - * struct iwl_bt_coex_profile_notif - notification about BT coex + * struct iwl_bt_coex_prof_old_notif - notification about BT coex * @mbox_msg: message from BT to WiFi * @msg_idx: the index of the message * @bt_ci_compliance: enum %iwl_bt_ci_compliance @@ -110,7 +110,7 @@ enum iwl_bt_ci_compliance { * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that * BT is utilizing) when the RSSI is mid/high (>= -65 dBm) */ -struct iwl_bt_coex_profile_notif { +struct iwl_bt_coex_prof_old_notif { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; @@ -126,4 +126,29 @@ struct iwl_bt_coex_profile_notif { * BT_COEX_PROFILE_NTFY_API_S_VER_5 */ +/** + * enum iwl_bt_coex_subcmd_ids - coex configuration command IDs + */ +enum iwl_bt_coex_subcmd_ids { + /** + *@PROFILE_NOTIF: &struct iwl_bt_coex_profile_notif + */ + PROFILE_NOTIF = 0xFF, +}; + +#define COEX_NUM_BAND 3 +#define COEX_NUM_CHAINS 2 + +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is + * utilizing) when the RSSI is low (<= -65 dBm) + * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that + * BT is utilizing) when the RSSI is mid/high (>= -65 dBm) + */ +struct iwl_bt_coex_profile_notif { + u8 wifi_loss_low_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS]; + u8 wifi_loss_mid_high_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS]; +} __packed; /* BT_COEX_BT_PROFILE_NTF_API_S_VER_1 */ + #endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 7544c4cb1a30ab..2f40e69db3187b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2022, 2024 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ @@ -25,6 +25,8 @@ * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids + * @BT_COEX_GROUP: bt coex group, uses command IDs from + * &enum iwl_bt_coex_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from @@ -43,6 +45,7 @@ enum iwl_mvm_command_groups { SCAN_GROUP = 0x6, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, + BT_COEX_GROUP = 0x9, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, @@ -144,8 +147,8 @@ enum iwl_legacy_cmds { /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, - * response in &struct iwl_mvm_tx_resp or - * &struct iwl_mvm_tx_resp_v3 + * response in &struct iwl_tx_resp or + * &struct iwl_tx_resp_v3 */ TX_CMD = 0x1c, @@ -398,7 +401,7 @@ enum iwl_legacy_cmds { REDUCE_TX_POWER_CMD = 0x9f, /** - * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif + * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif_v4 */ MISSED_BEACONS_NOTIFICATION = 0xa2, @@ -467,7 +470,7 @@ enum iwl_legacy_cmds { MARKER_CMD = 0xcb, /** - * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif + * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_prof_old_notif */ BT_PROFILE_NOTIFICATION = 0xce, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 855cd13a181ea3..550de6db18340b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -476,6 +476,8 @@ enum iwl_fw_ini_region_device_memory_subtype { * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset * request * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list + * @IWL_FW_INI_TIME_ESR_LINK_UP: EMLSR is active (several links are activated) + * @IWL_FW_INI_TIME_ESR_LINK_DOWN: EMLSR is inactive (only one active link left) * @IWL_FW_INI_TIME_POINT_NUM: number of time points */ enum iwl_fw_ini_time_point { @@ -509,6 +511,8 @@ enum iwl_fw_ini_time_point { IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ, IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START, IWL_FW_INI_TIME_SCAN_FAILURE, + IWL_FW_INI_TIME_ESR_LINK_UP, + IWL_FW_INI_TIME_ESR_LINK_DOWN, IWL_FW_INI_TIME_POINT_NUM, }; /* FW_TLV_DEBUG_TIME_POINT_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index ca6fa66d1917a1..c46e24fc6a1e46 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -42,7 +42,7 @@ enum iwl_mac_conf_subcmd_ids { */ LINK_CONFIG_CMD = 0x9, /** - * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd + * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd */ STA_CONFIG_CMD = 0xA, /** @@ -50,7 +50,7 @@ enum iwl_mac_conf_subcmd_ids { */ AUX_STA_CMD = 0xB, /** - * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd + * @STA_REMOVE_CMD: &struct iwl_remove_sta_cmd */ STA_REMOVE_CMD = 0xC, /** @@ -61,6 +61,14 @@ enum iwl_mac_conf_subcmd_ids { * @ROC_CMD: &struct iwl_roc_req */ ROC_CMD = 0xE, + /** + * @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif + */ + MISSED_BEACONS_NOTIF = 0xF6, + /** + * @EMLSR_TRANS_FAIL_NOTIF: &struct iwl_esr_trans_fail_notif + */ + EMLSR_TRANS_FAIL_NOTIF = 0xF7, /** * @ROC_NOTIF: &struct iwl_roc_notif */ @@ -446,6 +454,9 @@ enum iwl_link_ctx_flags { * @listen_lmac: indicates whether the link should be allocated on the Listen * Lmac or on the Main Lmac. Cannot be changed on an active Link. * Relevant only for eSR. + * @block_tx: tell the firmware that this link can't Tx. This should be used + * only when a link is de-activated because of CSA with mode = 1. + * Available since version 5. * @reserved1: in version 2, listen_lmac became reserved * @cck_rates: basic rates available for CCK * @ofdm_rates: basic rates available for OFDM @@ -472,7 +483,9 @@ enum iwl_link_ctx_flags { * @bssid_index: index of the associated VAP * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @spec_link_id: link_id as the AP knows it - * @reserved2: alignment + * @ul_mu_data_disable: OM Control UL MU Data Disable RX Support (bit 44) in + * HE MAC Capabilities information field as defined in figure 9-897 in + * IEEE802.11REVme-D5.0 * @ibss_bssid_addr: bssid for ibss * @reserved_for_ibss_bssid_addr: reserved * @reserved3: reserved for future use @@ -488,7 +501,10 @@ struct iwl_link_config_cmd { __le32 active; union { __le32 listen_lmac; - __le32 reserved1; + struct { + u8 block_tx; + u8 reserved1[3]; + }; }; __le32 cck_rates; __le32 ofdm_rates; @@ -515,17 +531,17 @@ struct iwl_link_config_cmd { u8 bssid_index; u8 bss_color; u8 spec_link_id; - u8 reserved2; + u8 ul_mu_data_disable; u8 ibss_bssid_addr[6]; __le16 reserved_for_ibss_bssid_addr; __le32 reserved3[8]; -} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */ +} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5 */ /* Currently FW supports link ids in the range 0-3 and can have * at most two active links for each vif. */ -#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2 -#define IWL_MVM_FW_MAX_LINK_ID 3 +#define IWL_FW_MAX_ACTIVE_LINKS_NUM 2 +#define IWL_FW_MAX_LINK_ID 3 /** * enum iwl_fw_sta_type - FW station types @@ -547,7 +563,7 @@ enum iwl_fw_sta_type { }; /* STATION_TYPE_E_VER_1 */ /** - * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's + * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's * station table * ( STA_CONFIG_CMD = 0xA ) * @@ -579,7 +595,7 @@ enum iwl_fw_sta_type { * capa * @htc_flags: which features are supported in HTC */ -struct iwl_mvm_sta_cfg_cmd { +struct iwl_sta_cfg_cmd { __le32 sta_id; __le32 link_id; u8 peer_mld_address[ETH_ALEN]; @@ -620,13 +636,13 @@ struct iwl_mvm_aux_sta_cmd { } __packed; /* AUX_STA_CMD_API_S_VER_1 */ /** - * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by + * struct iwl_remove_sta_cmd - a cmd structure to remove a sta added by * STA_CONFIG_CMD or AUX_STA_CONFIG_CMD * ( STA_REMOVE_CMD = 0xC ) * * @sta_id: index of station to remove */ -struct iwl_mvm_remove_sta_cmd { +struct iwl_remove_sta_cmd { __le32 sta_id; } __packed; /* REMOVE_STA_API_S_VER_1 */ @@ -663,4 +679,51 @@ struct iwl_mvm_esr_mode_notif { __le32 action; } __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */ +/** + * struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss + * ( MISSED_BEACONS_NOTIF = 0xF6 ) + * @link_id: fw link ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @other_link_id: used in EMLSR only. The fw link ID for + * &consec_missed_beacons_other_link. IWL_MVM_FW_LINK_ID_INVALID (0xff) if + * invalid. + * @consec_missed_beacons_other_link: number of consecutive missed beacons on + * &other_link_id. + */ +struct iwl_missed_beacons_notif { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 other_link_id; + __le32 consec_missed_beacons_other_link; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_5 */ + +/* + * enum iwl_esr_trans_fail_code: to be used to parse the notif below + * + * @ESR_TRANS_FAILED_TX_STATUS_ERROR: failed to TX EML OMN frame + * @ESR_TRANSITION_FAILED_TX_TIMEOUT: timeout on the EML OMN frame + * @ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD: can't get a beacon on the new link + */ +enum iwl_esr_trans_fail_code { + ESR_TRANS_FAILED_TX_STATUS_ERROR, + ESR_TRANSITION_FAILED_TX_TIMEOUT, + ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD, +}; + +/** + * struct iwl_esr_trans_fail_notif - FW reports a failure in EMLSR transition + * + * @link_id: the link_id that still works after the failure + * @activation: true if the link was activated, false otherwise + * @err_code: see &enum iwl_esr_trans_fail_code + */ +struct iwl_esr_trans_fail_notif { + __le32 link_id; + __le32 activation; + __le32 err_code; +} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index bcbbf8c4a29743..977ca4ac166d8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -16,7 +16,7 @@ #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) -#define IWL_MVM_STATION_COUNT_MAX 16 +#define IWL_STATION_COUNT_MAX 16 #define IWL_MVM_INVALID_STA 0xFF enum iwl_ac { @@ -378,7 +378,7 @@ struct iwl_missed_beacons_notif_ver_3 { } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ /** - * struct iwl_missed_beacons_notif - information on missed beacons + * struct iwl_missed_beacons_notif_v4 - information on missed beacons * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) * @link_id: fw link ID * @consec_missed_beacons_since_last_rx: number of consecutive missed @@ -387,7 +387,7 @@ struct iwl_missed_beacons_notif_ver_3 { * @num_expected_beacons: number of expected beacons * @num_recvd_beacons: number of received beacons */ -struct iwl_missed_beacons_notif { +struct iwl_missed_beacons_notif_v4 { __le32 link_id; __le32 consec_missed_beacons_since_last_rx; __le32 consec_missed_beacons; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 6e6a92d173cc2b..df0680eae30cbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -285,18 +285,12 @@ enum iwl_dev_tx_power_cmd_mode { * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. - * @dev_24: device TX power restriction in 1/8 dBms - * @dev_52_low: device TX power restriction upper band - low - * @dev_52_high: device TX power restriction upper band - high */ struct iwl_dev_tx_power_common { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; - __le16 dev_24; - __le16 dev_52_low; - __le16 dev_52_high; -}; +} __packed; /** * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3 @@ -412,8 +406,20 @@ struct iwl_dev_tx_power_cmd_v8 { __le32 tpc_vlp_backoff_level; } __packed; /* TX_REDUCED_POWER_API_S_VER_8 */ +/* + * @dev_24: device TX power restriction in 1/8 dBms + * @dev_52_low: device TX power restriction upper band - low + * @dev_52_high: device TX power restriction upper band - high + */ +struct iwl_dev_tx_power_cmd_per_band { + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; +} __packed; + /** - * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) + * struct iwl_dev_tx_power_cmd_v3_v8 - TX power reduction command (multiversion) + * @per_band: per band restrictions * @common: common part of the command * @v3: version 3 part of the command * @v4: version 4 part of the command @@ -422,8 +428,9 @@ struct iwl_dev_tx_power_cmd_v8 { * @v7: version 7 part of the command * @v8: version 8 part of the command */ -struct iwl_dev_tx_power_cmd { +struct iwl_dev_tx_power_cmd_v3_v8 { struct iwl_dev_tx_power_common common; + struct iwl_dev_tx_power_cmd_per_band per_band; union { struct iwl_dev_tx_power_cmd_v3 v3; struct iwl_dev_tx_power_cmd_v4 v4; @@ -434,6 +441,60 @@ struct iwl_dev_tx_power_cmd { }; }; +/** + * struct iwl_dev_tx_power_cmd_v9 - TX power reduction cmd + * @reserved: reserved (padding) + * @per_chain: per chain restrictions + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved1: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + */ +struct iwl_dev_tx_power_cmd_v9 { + __le16 reserved; + __le16 per_chain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; + u8 per_chain_restriction_changed; + u8 reserved1[3]; + __le32 timer_period; +} __packed; /* TX_REDUCED_POWER_API_S_VER_9 */ + +/** + * struct iwl_dev_tx_power_cmd_v10 - TX power reduction cmd + * @per_chain: per chain restrictions + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + */ +struct iwl_dev_tx_power_cmd_v10 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 per_chain_restriction_changed; + u8 reserved; + __le32 timer_period; + __le32 flags; +} __packed; /* TX_REDUCED_POWER_API_S_VER_10 */ + +/* + * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) + * @common: common part of the command + * @v9: version 9 part of the command + * @v10: version 10 part of the command + */ +struct iwl_dev_tx_power_cmd { + struct iwl_dev_tx_power_common common; + union { + struct iwl_dev_tx_power_cmd_v9 v9; + struct iwl_dev_tx_power_cmd_v10 v10; + }; +} __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */ + #define IWL_NUM_GEO_PROFILES 3 #define IWL_NUM_GEO_PROFILES_V3 8 #define IWL_NUM_BANDS_PER_CHAIN_V1 2 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 8598031567bba5..f486d624500beb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -731,39 +731,46 @@ enum iwl_umac_scan_general_params_flags2 { * struct iwl_scan_channel_cfg_umac * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. - * @band: band of channel: 0 for 2GHz, 1 for 5GHz - * @iter_count: repetition count for the channel. - * @iter_interval: interval between two scan iterations on one channel. + * @v1: command version 1 + * @v1.iter_count: repetition count for the channel. + * @v1.iter_interval: interval between two scan iterations on one channel. + * @v2: command versions 2-4 + * @v2.band: band of channel: 0 for 2GHz, 1 for 5GHz + * @v2.iter_count: repetition count for the channel. + * @v2.iter_interval: interval between two scan iterations on one channel. + * @v5: command versions 5 and up + * @v5.iter_count: repetition count for the channel. + * @v5.iter_interval: interval between two scan iterations on one channel. + * @v5.psd_20: highest PSD value for all APs known so far + * on this channel. */ struct iwl_scan_channel_cfg_umac { #define IWL_CHAN_CFG_FLAGS_BAND_POS 30 __le32 flags; + u8 channel_num; /* All versions are of the same size, so use a union without adjusting * the command size later */ union { struct { - u8 channel_num; u8 iter_count; __le16 iter_interval; - } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */ + } __packed v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */ struct { - u8 channel_num; u8 band; u8 iter_count; u8 iter_interval; - } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2 - * SCAN_CHANNEL_CONFIG_API_S_VER_3 - * SCAN_CHANNEL_CONFIG_API_S_VER_4 - */ + } __packed v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2 + * SCAN_CHANNEL_CONFIG_API_S_VER_3 + * SCAN_CHANNEL_CONFIG_API_S_VER_4 + */ struct { - u8 channel_num; u8 psd_20; u8 iter_count; u8 iter_interval; - } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ - }; + } __packed v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ + } __packed; } __packed; /** @@ -1132,6 +1139,19 @@ struct iwl_umac_scan_abort { __le32 flags; } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ +/** + * enum iwl_umac_scan_abort_status + * + * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted + * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress + * @IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND: nothing to abort + */ +enum iwl_umac_scan_abort_status { + IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0, + IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS, + IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND, +}; + /** * struct iwl_umac_scan_complete * @uid: scan id, &enum iwl_umac_scan_uid_offsets diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 2271b19213fab3..0a9f14fb04bef5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -327,14 +327,14 @@ struct mvm_statistics_load { __le32 air_time[MAC_INDEX_AUX]; __le32 byte_count[MAC_INDEX_AUX]; __le32 pkt_count[MAC_INDEX_AUX]; - u8 avg_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 avg_energy[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */ struct mvm_statistics_load_v1 { __le32 air_time[NUM_MAC_INDEX]; __le32 byte_count[NUM_MAC_INDEX]; __le32 pkt_count[NUM_MAC_INDEX]; - u8 avg_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 avg_energy[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ struct mvm_statistics_rx { @@ -594,7 +594,7 @@ struct iwl_stats_ntfy_per_sta { } __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ #define IWL_STATS_MAX_PHY_OPERATIONAL 3 -#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1) +#define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1) /** * struct iwl_system_statistics_notif_oper @@ -608,7 +608,7 @@ struct iwl_system_statistics_notif_oper { __le32 time_stamp; struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS]; struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; - struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ /** @@ -651,7 +651,7 @@ struct iwl_statistics_operational_ntfy { __le32 flags; struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX]; struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; - struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX]; __le64 rx_time; __le64 tx_time; __le64 on_time_rf; @@ -699,7 +699,7 @@ struct iwl_statistics_operational_ntfy_ver_14 { __le64 tx_time; __le64 on_time_rf; __le64 on_time_scan; - __le32 average_energy[IWL_MVM_STATION_COUNT_MAX]; + __le32 average_energy[IWL_STATION_COUNT_MAX]; __le32 reserved; } __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index c5277e2f8cd48c..f3bf2e087a40ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -486,7 +486,7 @@ struct agg_tx_status { #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) /** - * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet + * struct iwl_tx_resp_v3 - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) @@ -517,7 +517,7 @@ struct agg_tx_status { * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ -struct iwl_mvm_tx_resp_v3 { +struct iwl_tx_resp_v3 { u8 frame_count; u8 bt_kill_count; u8 failure_rts; @@ -543,7 +543,7 @@ struct iwl_mvm_tx_resp_v3 { } __packed; /* TX_RSP_API_S_VER_3 */ /** - * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet + * struct iwl_tx_resp - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) @@ -575,7 +575,7 @@ struct iwl_mvm_tx_resp_v3 { * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ -struct iwl_mvm_tx_resp { +struct iwl_tx_resp { u8 frame_count; u8 bt_kill_count; u8 failure_rts; @@ -823,7 +823,7 @@ struct iwl_mac_beacon_cmd { */ struct iwl_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; + struct iwl_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; } __packed; @@ -836,7 +836,7 @@ struct iwl_beacon_notif { * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif_v5 { - struct iwl_mvm_tx_resp beacon_notify_hdr; + struct iwl_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index 560a91998cc488..4d9a1f83ef8c2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -634,3 +634,19 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, GET_BIOS_TABLE(dsm, fwrt, func, value); } IWL_EXPORT_SYMBOL(iwl_bios_get_dsm); + +bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc) +{ + /* Some kind of regulatory mess means we need to currently disallow + * puncturing in the US and Canada unless enabled in BIOS. + */ + switch (mcc) { + case IWL_MCC_US: + return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK; + case IWL_MCC_CANADA: + return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK; + default: + return true; + } +} +IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index e2c056f483c1c7..81787501d4a4f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -45,6 +45,8 @@ #define IWL_WTAS_ENABLE_IEC_MSK 0x4 #define IWL_WTAS_USA_UHB_MSK BIT(16) +#define BIOS_MCC_CHINA 0x434e + /* * The profile for revision 2 is a superset of revision 1, which is in * turn a superset of revision 0. So we can store all revisions @@ -217,4 +219,6 @@ static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes, return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK : IWL_PPAG_REV3_MASK); } + +bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc); #endif /* __fw_regulatory_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index fb982d4fe85100..091fb6fd7c787c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -638,7 +638,7 @@ int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) goto out; } - if (data->mcc != UEFI_MCC_CHINA) { + if (data->mcc != BIOS_MCC_CHINA) { ret = -EINVAL; IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n"); goto out; @@ -729,3 +729,32 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, kfree(data); return ret; } + +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_puncturing_data *data; + /* default value is not enabled if there is any issue in reading + * uefi variable or revision is not supported + */ + int puncturing = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, + IWL_UEFI_PUNCTURING_NAME, + "UefiCnvWlanPuncturing", + sizeof(*data), NULL); + if (IS_ERR(data)) + return puncturing; + + if (data->revision != IWL_UEFI_PUNCTURING_REVISION) { + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PUNCTURING rev:%d\n", + data->revision); + } else { + puncturing = data->puncturing & IWL_UEFI_PUNCTURING_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded puncturing bits from UEFI: %d\n", + puncturing); + } + + kfree(data); + return puncturing; +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 1f8884ca8997c4..e525d449e656e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -22,6 +22,7 @@ #define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV" #define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg" #define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" +#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" #define IWL_SGOM_MAP_SIZE 339 @@ -38,6 +39,7 @@ #define IWL_UEFI_ECKV_REVISION 0 #define IWL_UEFI_WBEM_REVISION 0 #define IWL_UEFI_DSM_REVISION 4 +#define IWL_UEFI_PUNCTURING_REVISION 0 struct pnvm_sku_package { u8 rev; @@ -149,8 +151,6 @@ struct uefi_cnv_var_splc { u32 default_pwr_limit; } __packed; -#define UEFI_MCC_CHINA 0x434e - /* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI * @revision: the revision of the table * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code @@ -194,6 +194,25 @@ struct uefi_cnv_wlan_wbem_data { u32 wbem_320mhz_per_mcc; } __packed; +enum iwl_uefi_cnv_puncturing_flags { + IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK = BIT(0), + IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK = BIT(1), +}; + +#define IWL_UEFI_PUNCTURING_REV0_MASK (IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK | \ + IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK) +/** + * struct uefi_cnv_var_puncturing_data - controlling channel + * puncturing for few countries. + * @revision: the revision of the table + * @puncturing: enablement of channel puncturing per mcc + * see &enum iwl_uefi_cnv_puncturing_flags. + */ +struct uefi_cnv_var_puncturing_data { + u8 revision; + u32 puncturing; +} __packed; + /* * This is known to be broken on v4.19 and to work on v5.4. Until we * figure out why this is the case and how to make it work, simply @@ -224,6 +243,7 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); int iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt); #else /* CONFIG_EFI */ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { @@ -320,5 +340,11 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans, { return 0; } + +static inline +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt) +{ + return 0; +} #endif /* CONFIG_EFI */ #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b2abd4fd194449..34c91deca57b1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -504,6 +504,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index aaaabd67f9593c..2abfc986701f8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1205,7 +1205,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; if (le32_to_cpup((const __le32 *)tlv_data) > - IWL_MVM_STATION_COUNT_MAX) { + IWL_STATION_COUNT_MAX) { IWL_ERR(drv, "%d is an invalid number of station\n", le32_to_cpup((const __le32 *)tlv_data)); @@ -1479,7 +1479,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX; + fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX; fw->ucode_capa.num_beacons = 1; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0ef48effeefb4e..e95ffe3035473c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -386,7 +386,6 @@ struct iwl_dump_sanitize_ops { * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @cmd_fifo: the fifo for host commands - * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. * @no_reclaim_cmds: Some devices erroneously don't set the * SEQ_RX_FRAME bit on some notifications, this is the * list of such notifications to filter. Max length is @@ -412,7 +411,6 @@ struct iwl_trans_config { u8 cmd_queue; u8 cmd_fifo; - unsigned int cmd_q_wdg_timeout; const u8 *no_reclaim_cmds; unsigned int n_no_reclaim_cmds; diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h index 4900de3cc0d30b..2081775e0ec916 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -283,6 +283,16 @@ struct iwl_mei_colloc_info { u8 bssid[ETH_ALEN]; }; +/** + * enum iwl_mei_sap_version - SAP version + * @IWL_MEI_SAP_VERSION_3: SAP version 3 + * @IWL_MEI_SAP_VERSION_4: SAP version 4 + */ +enum iwl_mei_sap_version { + IWL_MEI_SAP_VERSION_3 = 3, + IWL_MEI_SAP_VERSION_4 = 4, +}; + /* * struct iwl_mei_ops - driver's operations called by iwlmei * Operations will not be called more than once concurrently. diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 1dd9106c65136e..dce0b7cf7b265c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #include @@ -58,7 +58,6 @@ bool iwl_mei_is_connected(void) } EXPORT_SYMBOL_GPL(iwl_mei_is_connected); -#define SAP_VERSION 3 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ struct iwl_sap_q_ctrl_blk { @@ -110,16 +109,19 @@ struct iwl_sap_shared_mem_ctrl_blk { #define SAP_H2M_DATA_Q_SZ 48256 #define SAP_M2H_DATA_Q_SZ 24128 -#define SAP_H2M_NOTIF_Q_SZ 2240 +#define SAP_H2M_NOTIF_Q_SZ_VER3 2240 +#define SAP_H2M_NOTIF_Q_SZ_VER4 32768 #define SAP_M2H_NOTIF_Q_SZ 62720 -#define _IWL_MEI_SAP_SHARED_MEM_SZ \ +#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER3 \ (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ - SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER3 + \ SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) -#define IWL_MEI_SAP_SHARED_MEM_SZ \ - (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) +#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 \ + (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER4 + \ + SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) struct iwl_mei_shared_mem_ptrs { struct iwl_sap_shared_mem_ctrl_blk *ctrl; @@ -206,6 +208,7 @@ struct iwl_mei { * @mac_address: interface MAC address. * @nvm_address: NVM MAC address. * @priv: A pointer to iwlwifi. + * @sap_version: The SAP version to use. enum iwl_mei_sap_version. * * This used to cache the configurations coming from iwlwifi's way. The data * is cached here so that we can buffer the configuration even if we don't have @@ -220,6 +223,7 @@ struct iwl_mei_cache { u16 mcc; u8 mac_address[6]; u8 nvm_address[6]; + enum iwl_mei_sap_version sap_version; void *priv; }; @@ -238,14 +242,17 @@ static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) #define HBM_DMA_BUF_ID_WLAN 1 -static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +static int iwl_mei_alloc_mem_for_version(struct mei_cl_device *cldev, + enum iwl_mei_sap_version version) { struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + u32 mem_size = roundup(version == IWL_MEI_SAP_VERSION_4 ? + _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 : + _IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE); - mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, - IWL_MEI_SAP_SHARED_MEM_SZ); - + iwl_mei_cache.sap_version = version; + mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, mem_size); if (IS_ERR(mem->ctrl)) { int ret = PTR_ERR(mem->ctrl); @@ -254,11 +261,30 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) return ret; } - memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); + memset(mem->ctrl, 0, mem_size); return 0; } +static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +{ + int ret; + + /* + * SAP version 4 uses a larger Host to MEI notif queue. + * Since it is unknown at this stage which SAP version is used by the + * CSME firmware on this platform, try to allocate the version 4 first. + * If the CSME firmware uses version 3, this allocation is expected to + * fail because the CSME firmware allocated less memory for our driver. + */ + ret = iwl_mei_alloc_mem_for_version(cldev, IWL_MEI_SAP_VERSION_4); + if (ret) + ret = iwl_mei_alloc_mem_for_version(cldev, + IWL_MEI_SAP_VERSION_3); + + return ret; +} + static void iwl_mei_init_shared_mem(struct iwl_mei *mei) { struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; @@ -277,7 +303,9 @@ static void iwl_mei_init_shared_mem(struct iwl_mei *mei) h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = cpu_to_le32(SAP_H2M_DATA_Q_SZ); h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = - cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); + iwl_mei_cache.sap_version == IWL_MEI_SAP_VERSION_3 ? + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER3) : + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER4); m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = cpu_to_le32(SAP_M2H_DATA_Q_SZ); m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = @@ -647,7 +675,7 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, return; } - if (rsp->supported_version != SAP_VERSION) { + if (rsp->supported_version != iwl_mei_cache.sap_version) { dev_err(&cldev->dev, "didn't get the expected version: got %d\n", rsp->supported_version); @@ -1281,7 +1309,7 @@ static int iwl_mei_send_start(struct mei_cl_device *cldev) .hdr.type = cpu_to_le32(SAP_ME_MSG_START), .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), .hdr.len = cpu_to_le32(sizeof(msg)), - .supported_versions[0] = SAP_VERSION, + .supported_versions[0] = iwl_mei_cache.sap_version, .init_data_seq_num = cpu_to_le16(0x100), .init_notif_seq_num = cpu_to_le16(0x800), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 8873904f51ecb8..59751f123571be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -13,7 +13,7 @@ iwlmvm-y += ptp.o iwlmvm-y += time-sync.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-$(CONFIG_PM) += d3.o +iwlmvm-$(CONFIG_PM_SLEEP) += d3.o iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o subdir-ccflags-y += -I $(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index ad3e14a0d0434f..b607961970e970 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -208,7 +208,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, } struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif *notif; + struct iwl_bt_coex_prof_old_notif *notif; struct iwl_mvm *mvm; struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; @@ -266,10 +266,26 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool have_wifi_loss_rate = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - BT_PROFILE_NOTIFICATION, 0) > 4; + BT_PROFILE_NOTIFICATION, 0) > 4 || + iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP, + PROFILE_NOTIF, 0) >= 1; + u8 wifi_loss_mid_high_rssi; + u8 wifi_loss_low_rssi; u8 wifi_loss_rate; - if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF) + if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP, + PROFILE_NOTIF, 0) >= 1) { + /* For now, we consider 2.4 GHz band / ANT_A only */ + wifi_loss_mid_high_rssi = + mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0]; + wifi_loss_low_rssi = + mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0]; + } else { + wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi; + wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi; + } + + if (wifi_loss_low_rssi == BT_OFF) return true; if (primary) @@ -286,20 +302,20 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, * we will get an update on this and exit eSR. */ if (!link_rssi) - wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi; + wifi_loss_rate = wifi_loss_mid_high_rssi; else if (mvmvif->esr_active) /* RSSI needs to get really low to disable eSR... */ wifi_loss_rate = link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ? - mvm->last_bt_notif.wifi_loss_low_rssi : - mvm->last_bt_notif.wifi_loss_mid_high_rssi; + wifi_loss_low_rssi : + wifi_loss_mid_high_rssi; else /* ...And really high before we enable it back */ wifi_loss_rate = link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ? - mvm->last_bt_notif.wifi_loss_low_rssi : - mvm->last_bt_notif.wifi_loss_mid_high_rssi; + wifi_loss_low_rssi : + wifi_loss_mid_high_rssi; return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH; } @@ -509,6 +525,35 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id); } +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + for (int link_id = 0; + link_id < IEEE80211_MLD_MAX_NUM_LINKS; + link_id++) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference_check(vif->link_conf[link_id], + lockdep_is_held(&mvm->mutex)); + struct ieee80211_chanctx_conf *chanctx_conf = + rcu_dereference_check(link_conf->chanctx_conf, + lockdep_is_held(&mvm->mutex)); + + if ((!chanctx_conf || + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) + continue; + + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); + } +} + static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { @@ -591,11 +636,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) } } -void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) +void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data; IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); @@ -612,6 +657,22 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, iwl_mvm_bt_coex_notif_handle(mvm); } +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + const struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data; + + lockdep_assert_held(&mvm->mutex); + + mvm->last_bt_wifi_loss = *notif; + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bt_coex_notif_iterator, + mvm); +} + void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index c4c1e67b9ac76e..ddf484027d4fe6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -16,6 +16,8 @@ #define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0 #define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30 #define IWL_MVM_TPT_COUNT_WINDOW_SEC 5 +#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5 +#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 11 #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) @@ -109,7 +111,7 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true -#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true @@ -125,7 +127,6 @@ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ #define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 #define IWL_MVM_AUTO_EML_ENABLE true -#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7 #define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67 #define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 99a541d442bb18..49a6aff42376b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -3768,7 +3768,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) } const struct file_operations iwl_dbgfs_d3_test_ops = { - .llseek = no_llseek, .open = iwl_mvm_d3_test_open, .read = iwl_mvm_d3_test_read, .release = iwl_mvm_d3_test_release, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index afd90a52d4ecbe..55245f913286b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -772,6 +772,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_ftm_iter_data target; target.bssid = bssid; + target.cipher = cipher; ieee80211_iter_keys(mvm->hw, vif, iter, &target); } else { memcpy(tk, entry->tk, sizeof(entry->tk)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 08c4898c8f1a33..08546e673cf51e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -863,7 +863,10 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { u32 cmd_id = REDUCE_TX_POWER_CMD; - struct iwl_dev_tx_power_cmd cmd = { + struct iwl_dev_tx_power_cmd_v3_v8 cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + }; + struct iwl_dev_tx_power_cmd cmd_v9_v10 = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; __le16 *per_chain; @@ -871,8 +874,19 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) u16 len = 0; u32 n_subbands; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + void *cmd_data = &cmd; - if (cmd_ver >= 7) { + if (cmd_ver == 10) { + len = sizeof(cmd_v9_v10.v10); + n_subbands = IWL_NUM_SUB_BANDS_V2; + per_chain = &cmd_v9_v10.v10.per_chain[0][0][0]; + cmd_v9_v10.v10.flags = + cpu_to_le32(mvm->fwrt.reduced_power_flags); + } else if (cmd_ver == 9) { + len = sizeof(cmd_v9_v10.v9); + n_subbands = IWL_NUM_SUB_BANDS_V1; + per_chain = &cmd_v9_v10.v9.per_chain[0][0]; + } else if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v7.per_chain[0][0]; @@ -899,9 +913,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) per_chain = cmd.v3.per_chain[0][0]; } - /* all structs have the same common part, add it */ + /* all structs have the same common part, add its length */ len += sizeof(cmd.common); + if (cmd_ver < 9) + len += sizeof(cmd.per_band); + else + cmd_data = &cmd_v9_v10; + ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain, IWL_NUM_CHAIN_TABLES, n_subbands, prof_a, prof_b); @@ -913,7 +932,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) iwl_mei_set_power_limit(per_chain); IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -1464,7 +1483,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); } - for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) + for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++) RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index a9929aa49913af..2b06521680020c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -17,7 +17,8 @@ HOW(EXIT_COEX) \ HOW(EXIT_BANDWIDTH) \ HOW(EXIT_CSA) \ - HOW(EXIT_LINK_USAGE) + HOW(EXIT_LINK_USAGE) \ + HOW(EXIT_FAIL_ENTRY) static const char *const iwl_mvm_esr_states_names[] = { #define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x, @@ -233,10 +234,15 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, WARN_ON_ONCE(active == link_info->active); /* When deactivating a link session protection should - * be stopped + * be stopped. Also let the firmware know if we can't Tx. */ - if (!active && vif->type == NL80211_IFTYPE_STATION) + if (!active && vif->type == NL80211_IFTYPE_STATION) { iwl_mvm_stop_session_protection(mvm, vif); + if (link_info->csa_block_tx) { + cmd.block_tx = 1; + link_info->csa_block_tx = false; + } + } } cmd.link_id = cpu_to_le32(link_info->fw_link_id); @@ -258,7 +264,7 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); - iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf, + iwl_mvm_set_fw_basic_rates(mvm, vif, link_info, &cmd.cck_rates, &cmd.ofdm_rates); cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble); @@ -293,6 +299,17 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, (link_conf->uora_ocw_range >> 3) & 0x7; } + /* ap_sta may be NULL if we're disconnecting */ + if (changes & LINK_CONTEXT_MODIFY_HE_PARAMS && mvmvif->ap_sta) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(mvmvif->ap_sta, link_id); + + if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he && + link_sta->he_cap.he_cap_elem.mac_cap_info[5] & + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX) + cmd.ul_mu_data_disable = 1; + } + /* TODO how to set ndp_fdbk_buff_th_exp? */ if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id], diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index dfcc96f18b4f7c..a7a10e716e6517 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -413,19 +413,18 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, } void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_vif_link_info *link_info, __le32 *cck_rates, __le32 *ofdm_rates) { - struct ieee80211_chanctx_conf *chanctx; + struct iwl_mvm_phy_ctxt *phy_ctxt; u8 cck_ack_rates = 0, ofdm_ack_rates = 0; + enum nl80211_band band = NL80211_BAND_2GHZ; - rcu_read_lock(); - chanctx = rcu_dereference(link_conf->chanctx_conf); - iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : NL80211_BAND_2GHZ, - &cck_ack_rates, &ofdm_ack_rates); + phy_ctxt = link_info->phy_ctxt; + if (phy_ctxt && phy_ctxt->channel) + band = phy_ctxt->channel->band; - rcu_read_unlock(); + iwl_mvm_ack_rates(mvm, vif, band, &cck_ack_rates, &ofdm_ack_rates); *cck_rates = cpu_to_le32((u32)cck_ack_rates); *ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); @@ -563,7 +562,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, else eth_broadcast_addr(cmd->bssid_addr); - iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates, + iwl_mvm_set_fw_basic_rates(mvm, vif, &mvmvif->deflink, &cmd->cck_rates, &cmd->ofdm_rates); cmd->cck_short_preamble = @@ -1528,7 +1527,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { - struct iwl_mvm_tx_resp *beacon_notify_hdr = + struct iwl_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; if (unlikely(pkt_len < sizeof(*beacon_v5))) @@ -1586,11 +1585,11 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, } } -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) +static void +iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm, + const struct iwl_missed_beacons_notif *mb, + struct iwl_rx_packet *pkt) { - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; @@ -1604,6 +1603,16 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, MISSED_BEACONS_NOTIFICATION, 0); + u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + MISSED_BEACONS_NOTIF, 0); + + /* If the firmware uses the new notification (from MAC_CONF_GROUP), + * refer to that notification's version. + * Note that the new notification from MAC_CONF_GROUP starts from + * version 5. + */ + if (new_notif_ver) + notif_ver = new_notif_ver; /* before version four the ID in the notification refers to mac ID */ if (notif_ver < 4) { @@ -1620,13 +1629,11 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, } IWL_DEBUG_INFO(mvm, - "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n", + "missed bcn %s_id=%u, consecutive=%u (%u)\n", notif_ver < 4 ? "mac" : "link", id, le32_to_cpu(mb->consec_missed_beacons), - le32_to_cpu(mb->consec_missed_beacons_since_last_rx), - le32_to_cpu(mb->num_recvd_beacons), - le32_to_cpu(mb->num_expected_beacons)); + le32_to_cpu(mb->consec_missed_beacons_since_last_rx)); if (!vif) return; @@ -1656,10 +1663,27 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, "missed_beacons:%d, missed_beacons_since_rx:%d\n", rx_missed_bcon, rx_missed_bcon_since_rx); } - } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH && - link_id >= 0 && hweight16(vif->active_links) > 1) { - iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON, - iwl_mvm_get_other_link(vif, link_id)); + } else if (link_id >= 0 && hweight16(vif->active_links) > 1) { + u32 scnd_lnk_bcn_lost = 0; + + if (notif_ver >= 5 && + !IWL_FW_CHECK(mvm, + le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID, + "No data for other link id but we are in EMLSR active_links: 0x%x\n", + vif->active_links)) + scnd_lnk_bcn_lost = + le32_to_cpu(mb->consec_missed_beacons_other_link); + + /* Exit EMLSR if we lost more than + * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links + * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link. + */ + if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS && + scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) || + rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH) + iwl_mvm_exit_esr(mvm, vif, + IWL_MVM_ESR_EXIT_MISSED_BEACON, + iwl_mvm_get_primary_link(vif)); } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) { if (!iwl_mvm_has_new_tx_api(mvm)) ieee80211_beacon_loss(vif); @@ -1687,6 +1711,31 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); } +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + iwl_mvm_handle_missed_beacons_notif(mvm, (const void *)pkt->data, pkt); +} + +void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_missed_beacons_notif_v4 *mb_v4 = + (const void *)pkt->data; + struct iwl_missed_beacons_notif mb = { + .link_id = mb_v4->link_id, + .consec_missed_beacons = mb_v4->consec_missed_beacons, + .consec_missed_beacons_since_last_rx = + mb_v4->consec_missed_beacons_since_last_rx, + .other_link_id = cpu_to_le32(IWL_MVM_FW_LINK_ID_INVALID), + }; + + iwl_mvm_handle_missed_beacons_notif(mvm, &mb, pkt); +} + void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 625ccf566e1c2a..a327893c6dce47 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -165,12 +165,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, mvm->lar_regdom_set = true; mvm->mcc_src = src_id; - /* Some kind of regulatory mess means we need to currently disallow - * puncturing in the US and Canada. Do that here, at least until we - * figure out the new chanctx APIs for puncturing. - */ - if (resp->mcc == cpu_to_le16(IWL_MCC_US) || - resp->mcc == cpu_to_le16(IWL_MCC_CANADA)) + if (!iwl_puncturing_is_allowed_in_bios(mvm->bios_enable_puncturing, + le16_to_cpu(resp->mcc))) ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING); else __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags); @@ -639,10 +635,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | - NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; + /* when firmware supports RLC/SMPS offload, do not set these + * driver features, since it's no longer supported by driver. + */ + if (!iwl_mvm_has_rlc_offload(mvm)) + hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_DYNAMIC_SMPS; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; @@ -838,20 +839,10 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw, if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; - /* If there is no sta, and it's not offchannel - send through AP */ + /* this shouldn't even happen: just drop */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && - !offchannel) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); - u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id); - - if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { - /* mac80211 holds rcu read lock */ - sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); - if (IS_ERR_OR_NULL(sta)) - goto drop; - } - } + !offchannel) + goto drop; if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED && !ieee80211_is_probe_resp(hdr->frame_control)) { @@ -1241,7 +1232,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) mvm->nvm_data = NULL; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* fast_resume will be cleared by iwl_mvm_fast_resume */ fast_resume = mvm->fast_resume; @@ -1263,7 +1254,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); } } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* @@ -1480,19 +1471,33 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; - struct iwl_dev_tx_power_cmd cmd = { + struct iwl_dev_tx_power_cmd_v3_v8 cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - - if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - - if (cmd_ver == 8) + struct iwl_dev_tx_power_cmd cmd_v9_v10; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ? + IWL_DEV_MAX_TX_POWER : 8 * tx_power; + void *cmd_data = &cmd; + + cmd.common.pwr_restriction = cpu_to_le16(u_tx_power); + + if (cmd_ver > 8) { + /* Those fields sit on the same place for v9 and v10 */ + cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC); + cmd_v9_v10.common.mac_context_id = + cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id); + cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power); + cmd_data = &cmd_v9_v10; + } + + if (cmd_ver == 10) + len = sizeof(cmd_v9_v10.v10); + else if (cmd_ver == 9) + len = sizeof(cmd_v9_v10.v9); + else if (cmd_ver == 8) len = sizeof(cmd.v8); else if (cmd_ver == 7) len = sizeof(cmd.v7); @@ -1507,10 +1512,14 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, else len = sizeof(cmd.v3); - /* all structs have the same common part, add it */ + /* all structs have the same common part, add its length */ len += sizeof(cmd.common); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); + if (cmd_ver < 9) + len += sizeof(cmd.per_band); + + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data); + } static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c index 8a38fc4b0b0f97..455f5f41750642 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c @@ -144,7 +144,7 @@ static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw, if (sta != data->sta || key->link_id >= 0) return; - err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd); + err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); if (err) data->err = err; @@ -162,8 +162,8 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, .new_sta_mask = new_sta_mask, }; - ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key, - &data); + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key, + &data); return data.err; } @@ -402,7 +402,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, if (!sec_key_ver) return; - ieee80211_iter_keys_rcu(mvm->hw, vif, - iwl_mvm_sec_key_remove_ap_iter, - (void *)(uintptr_t)link_id); + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_sec_key_remove_ap_iter, + (void *)(uintptr_t)link_id); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 3c99396ad36922..f2378e0fb2fb38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -223,7 +223,7 @@ static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm, spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); } - IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n"); + IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n"); } static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, @@ -269,6 +269,9 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, */ iwl_mvm_restart_mpdu_count(mvm, mvmvif); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP, + NULL); + return ret; } @@ -456,6 +459,9 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, /* Start a new counting window */ iwl_mvm_restart_mpdu_count(mvm, mvmvif); + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN, + NULL); + return ret; } @@ -1335,6 +1341,22 @@ iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw, else selected = primary; + /* + * remembers to tell the firmware that this link can't tx + * Note that this logic seems to be unrelated to esr, but it + * really is needed only when esr is active. When we have a + * single link, the firmware will handle all this on its own. + * In multi-link scenarios, we can learn about the CSA from + * another link and this logic is too complex for the firmware + * to track. + * Since we want to de-activate the link that got a CSA, we + * need to tell the firmware not to send any frame on that link + * as the firmware may not be aware that link is under a CSA + * with mode=1 (no Tx allowed). + */ + if (chsw->block_tx && mvmvif->link[chsw->link_id]) + mvmvif->link[chsw->link_id]->csa_block_tx = true; + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected); mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index d5a204e520764b..28a9d90ad1cd11 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -46,7 +46,7 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, - struct iwl_mvm_sta_cfg_cmd *cmd) + struct iwl_sta_cfg_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), @@ -63,7 +63,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, int link_id) { - struct iwl_mvm_sta_cfg_cmd cmd; + struct iwl_sta_cfg_cmd cmd; lockdep_assert_held(&mvm->mutex); @@ -94,7 +94,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, */ static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id) { - struct iwl_mvm_remove_sta_cmd rm_sta_cmd = { + struct iwl_remove_sta_cmd rm_sta_cmd = { .sta_id = cpu_to_le32(sta_id), }; int ret; @@ -216,7 +216,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); u16 *queue; lockdep_assert_held(&mvm->mutex); @@ -254,7 +254,7 @@ int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; - unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif); lockdep_assert_held(&mvm->mutex); @@ -438,7 +438,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif_link_info *link_info = mvm_vif->link[link_conf->link_id]; - struct iwl_mvm_sta_cfg_cmd cmd = { + struct iwl_sta_cfg_cmd cmd = { .sta_id = cpu_to_le32(mvm_link_sta->sta_id), .station_type = cpu_to_le32(mvm_sta->sta_type), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 22f48b66d79c77..ef07cff203b0d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -299,6 +299,7 @@ struct iwl_probe_resp_data { * @active: indicates the link is active in FW (for sanity checking) * @cab_queue: content-after-beacon (multicast) queue * @listen_lmac: indicates this link is allocated to the listen LMAC + * @csa_block_tx: we got CSA with mode=1 * @mcast_sta: multicast station * @phy_ctxt: phy context allocated to this link, if any * @bf_data: beacon filtering data @@ -324,6 +325,7 @@ struct iwl_mvm_vif_link_info { bool he_ru_2mhz_block; bool active; bool listen_lmac; + bool csa_block_tx; u16 cab_queue; /* Assigned while mac80211 has the link in a channel context, @@ -368,6 +370,7 @@ struct iwl_mvm_vif_link_info { * preventing the enablement of EMLSR * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link + * @IWL_MVM_ESR_EXIT_FAIL_ENTRY: Exit EMLSR due to entry failure */ enum iwl_mvm_esr_state { IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1, @@ -382,6 +385,7 @@ enum iwl_mvm_esr_state { IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000, IWL_MVM_ESR_EXIT_CSA = 0x100000, IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000, + IWL_MVM_ESR_EXIT_FAIL_ENTRY = 0x400000, }; #define IWL_MVM_BLOCK_ESR_REASONS 0xffff @@ -508,7 +512,7 @@ struct iwl_mvm_vif { bool bf_enabled; bool ba_enabled; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_EXT_LEN]; @@ -770,7 +774,6 @@ struct iwl_mvm_tcm { * @num_stored: number of mpdus stored in the buffer * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection - * @last_sub_index: track ASMDU sub frame index for duplication detection * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state */ @@ -779,7 +782,6 @@ struct iwl_mvm_reorder_buffer { u16 num_stored; int queue; u16 last_amsdu; - u8 last_sub_index; bool valid; spinlock_t lock; } ____cacheline_aligned_in_smp; @@ -1074,8 +1076,8 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; - struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; - struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX]; + struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX]; + struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -1164,7 +1166,7 @@ struct iwl_mvm { struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; - struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1]; + struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; @@ -1176,7 +1178,7 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; @@ -1200,8 +1202,11 @@ struct iwl_mvm { wait_queue_head_t rx_sync_waitq; - /* BT-Coex */ - struct iwl_bt_coex_profile_notif last_bt_notif; + /* BT-Coex - only one of those will be used */ + union { + struct iwl_bt_coex_prof_old_notif last_bt_notif; + struct iwl_bt_coex_profile_notif last_bt_wifi_loss; + }; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; @@ -1365,6 +1370,7 @@ struct iwl_mvm { struct iwl_mvm_acs_survey *acs_survey; bool statistics_clear; + u32 bios_enable_puncturing; }; /* Extract MVM priv from op_mode and _hw */ @@ -1700,9 +1706,9 @@ static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) - return &((struct iwl_mvm_tx_resp *)tx_resp)->status; + return &((struct iwl_tx_resp *)tx_resp)->status; else - return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; + return ((struct iwl_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) @@ -1745,7 +1751,7 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, if (iwl_mvm_is_esr_supported(trans) || (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) - return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; + return IWL_FW_MAX_ACTIVE_LINKS_NUM; return 1; } @@ -1764,6 +1770,13 @@ static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, return iwl_mvm_ac_to_tx_fifo[ac]; } +static inline bool iwl_mvm_has_rlc_offload(struct iwl_mvm *mvm) +{ + return iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), + 0) >= 3; +} + struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ @@ -2003,7 +2016,7 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_vif_link_info *link_info, __le32 *cck_rates, __le32 *ofdm_rates); void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2062,6 +2075,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, @@ -2291,7 +2306,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_fast_suspend(struct iwl_mvm *mvm); @@ -2322,6 +2337,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); +void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2570,8 +2587,7 @@ u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q); + struct ieee80211_vif *vif); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 836ca22597bc1e..80ec59c58ae4d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -611,6 +611,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, char mcc[3]; struct ieee80211_regdomain *regd; int wgds_tbl_idx; + bool changed = false; lockdep_assert_held(&mvm->mutex); @@ -630,10 +631,15 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src); - regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, &changed); if (IS_ERR_OR_NULL(regd)) return; + if (!changed) { + IWL_DEBUG_LAR(mvm, "RX: No change in the regulatory data\n"); + goto out; + } + wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (wgds_tbl_idx < 1) IWL_DEBUG_INFO(mvm, @@ -644,5 +650,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, wgds_tbl_idx); regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); + +out: kfree(regd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index b9daaffd9c7f53..4dd4a9d5c71fc7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -159,6 +159,43 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, iwl_mvm_get_primary_link(vif)); } +static void iwl_mvm_rx_esr_trans_fail_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + u8 fw_link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *bss_conf; + + if (IS_ERR_OR_NULL(vif)) + return; + + IWL_DEBUG_INFO(mvm, "Failed to %s eSR on link %d, reason %d\n", + le32_to_cpu(notif->activation) ? "enter" : "exit", + le32_to_cpu(notif->link_id), + le32_to_cpu(notif->err_code)); + + /* we couldn't go back to single link, disconnect */ + if (!le32_to_cpu(notif->activation)) { + iwl_mvm_connection_loss(mvm, vif, "emlsr exit failed"); + return; + } + + bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, false); + if (IWL_FW_CHECK(mvm, !bss_conf, + "FW reported failure to activate EMLSR on a non-existing link: %d\n", + fw_link_id)) + return; + + /* + * We failed to activate the second link and enter EMLSR, we need to go + * back to single link. + */ + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_FAIL_ENTRY, + bss_conf->link_id); +} + static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -261,6 +298,12 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; + /* firmware is expected to handle that in RLC offload mode */ + if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm), + "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n", + req->event)) + return; + /* * We could pass it to the iterator data, but also need to remember * it for new interfaces that are added while in this state. @@ -325,7 +368,7 @@ struct iwl_rx_handlers { */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC, - struct iwl_mvm_tx_resp), + struct iwl_tx_resp), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC, struct iwl_mvm_ba_notif), @@ -333,9 +376,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC, struct iwl_tlc_update_notif), - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif, RX_HANDLER_ASYNC_LOCKED_WIPHY, - struct iwl_bt_coex_profile_notif), + struct iwl_bt_coex_prof_old_notif), + RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_bt_coex_profile_notif), RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, @@ -385,10 +431,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), - RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, + RX_HANDLER(MISSED_BEACONS_NOTIFICATION, + iwl_mvm_rx_missed_beacons_notif_legacy, RX_HANDLER_ASYNC_LOCKED_WIPHY, - struct iwl_missed_beacons_notif), + struct iwl_missed_beacons_notif_v4), + RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF, + iwl_mvm_rx_missed_beacons_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_missed_beacons_notif), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, struct iwl_error_resp), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, @@ -472,6 +523,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF, iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_channel_survey_notif), + RX_HANDLER_GRP(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF, + iwl_mvm_rx_esr_trans_fail_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_esr_trans_fail_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -602,6 +657,7 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(STA_REMOVE_CMD), HCMD_NAME(STA_DISABLE_TX_CMD), HCMD_NAME(ROC_CMD), + HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF), HCMD_NAME(ROC_NOTIF), HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), HCMD_NAME(MISSED_VAP_NOTIF), @@ -713,6 +769,13 @@ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { HCMD_NAME(TAS_CONFIG), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = { + HCMD_NAME(PROFILE_NOTIF), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), @@ -722,6 +785,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), + [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), @@ -1223,12 +1287,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* - * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station + * We use IWL_STATION_COUNT_MAX to check the validity of the station * index all over the driver - check that its value corresponds to the * array size. */ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != - IWL_MVM_STATION_COUNT_MAX); + IWL_STATION_COUNT_MAX); /******************************** * 1. Allocating and configuring HW data @@ -1287,6 +1351,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; + mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt); if (iwl_mvm_has_new_tx_api(mvm)) { /* @@ -1386,10 +1451,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); - /* Set a short watchdog for the command queue */ - trans_cfg.cmd_q_wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, false, true); - snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%.31s", fw->fw_version); @@ -2094,6 +2155,7 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); } +#ifdef CONFIG_PM_SLEEP static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -2102,11 +2164,13 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_mvm_stop_device(mvm); -#ifdef CONFIG_PM mvm->fast_resume = false; -#endif mutex_unlock(&mvm->mutex); } +#else +static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) +{} +#endif #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index ce264b386029ca..7cab5373c8ae14 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -159,7 +159,11 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, .phy_id = cpu_to_le32(ctxt->id), }; - if (ctxt->rlc_disabled) + /* From version 3, RLC is offloaded to firmware, so the driver no + * longer needs to send cmd.rlc, note that we are not using any + * other fields in the command - don't send it. + */ + if (iwl_mvm_has_rlc_offload(mvm) || ctxt->rlc_disabled) return 0; if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 151289e1330802..1a0b5f8d43390e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -4,7 +4,7 @@ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ -#include +#include #include #include #include "iwl-trans.h" @@ -738,8 +738,8 @@ static void iwl_mvm_stats_energy_iter(void *_data, u8 *energy = _data; u32 sta_id = mvmsta->deflink.sta_id; - if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", - sta_id, IWL_MVM_STATION_COUNT_MAX)) + if (WARN_ONCE(sta_id >= IWL_STATION_COUNT_MAX, "sta_id %d >= %d", + sta_id, IWL_STATION_COUNT_MAX)) return; if (energy[sta_id]) @@ -991,7 +991,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) spin_lock_bh(&mvmsta->mpdu_counters[q].lock); /* The link IDs that doesn't exist will contain 0 */ - for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) { + for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) { total_tx += mvmsta->mpdu_counters[q].per_link[link].tx; total_rx += mvmsta->mpdu_counters[q].per_link[link].rx; } @@ -1009,8 +1009,8 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); } - IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", - total_tx, total_rx); + IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", + total_tx, total_rx); /* If we don't have enough MPDUs - exit EMLSR */ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH && @@ -1020,6 +1020,9 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) return; } + IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", + sec_link, sec_link_tx, sec_link_rx); + /* Calculate the percentage of the secondary link TX/RX */ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; @@ -1039,7 +1042,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 average_energy[IWL_STATION_COUNT_MAX]; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_system_statistics_notif_oper *stats; int i; @@ -1098,7 +1101,7 @@ static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 average_energy[IWL_STATION_COUNT_MAX]; __le32 air_time[MAC_INDEX_AUX]; __le32 rx_bytes[MAC_INDEX_AUX]; __le32 flags = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 1a210d0c22b3c5..65f8933c34b420 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -729,8 +729,6 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; u8 tid = ieee80211_get_tid(hdr); - u8 sub_frame_idx = desc->amsdu_info & - IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; u32 sta_mask; int index; @@ -843,10 +841,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; - if (amsdu) { + if (amsdu) buffer->last_amsdu = sn; - buffer->last_sub_index = sub_frame_idx; - } /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. @@ -2542,7 +2538,7 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } - if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX || + if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX || !(baid_data->sta_mask & BIT(sta_id)), "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", baid, baid_data->sta_mask, baid_data->tid, sta_id, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 1cc9c426bb1599..3ce9150213a744 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1594,7 +1594,7 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, for (i = 0; i < n_channels; i++) { channel_cfg[i].flags = cpu_to_le32(flags); - channel_cfg[i].v1.channel_num = channels[i]->hw_value; + channel_cfg[i].channel_num = channels[i]->hw_value; if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { enum nl80211_band band = channels[i]->band; @@ -1626,13 +1626,13 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm, &cp->channel_config[i]; cfg->flags = cpu_to_le32(flags); - cfg->v2.channel_num = channels[i]->hw_value; + cfg->channel_num = channels[i]->hw_value; cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; iwl_mvm_scan_ch_add_n_aps_override(vif_type, - cfg->v2.channel_num, + cfg->channel_num, cfg->v2.band, bitmap, bitmap_n_entries); } @@ -1656,7 +1656,7 @@ iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm, u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band); cfg->flags = cpu_to_le32(flags | n_aps_flag); - cfg->v2.channel_num = channels[i]->hw_value; + cfg->channel_num = channels[i]->hw_value; if (cfg80211_channel_is_psc(channels[i])) cfg->flags = 0; @@ -1789,7 +1789,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, !params->n_6ghz_params && params->n_ssids) continue; - cfg->v1.channel_num = params->channels[i]->hw_value; + cfg->channel_num = params->channels[i]->hw_value; if (version < 17) cfg->v2.band = PHY_BAND_6; else @@ -2477,7 +2477,7 @@ iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm, if (!cfg80211_channel_is_psc(channel)) continue; - cfg->v5.channel_num = channel->hw_value; + cfg->channel_num = channel->hw_value; cfg->v5.iter_count = 1; cfg->v5.iter_interval = 0; @@ -3313,13 +3313,23 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, mvm->scan_start); } -static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) +static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait) { - struct iwl_umac_scan_abort cmd = {}; + struct iwl_umac_scan_abort abort_cmd = {}; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), + .len = { sizeof(abort_cmd), }, + .data = { &abort_cmd, }, + .flags = CMD_SEND_IN_RFKILL, + }; + int uid, ret; + u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND; lockdep_assert_held(&mvm->mutex); + *wait = true; + /* We should always get a valid index here, because we already * checked that this type of scan was running in the generic * code. @@ -3328,17 +3338,28 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) if (WARN_ON_ONCE(uid < 0)) return uid; - cmd.uid = cpu_to_le32(uid); + abort_cmd.uid = cpu_to_le32(uid); IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), - CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); + + IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; - IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret); + /* Handle the case that the FW is no longer familiar with the scan that + * is to be stopped. In such a case, it is expected that the scan + * complete notification was already received but not yet processed. + * In such a case, there is no need to wait for a scan complete + * notification and the flow should continue similar to the case that + * the scan was really aborted. + */ + if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) { + mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + *wait = false; + } + return ret; } @@ -3348,6 +3369,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; + bool wait = true; lockdep_assert_held(&mvm->mutex); @@ -3359,7 +3381,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - ret = iwl_mvm_umac_scan_abort(mvm, type); + ret = iwl_mvm_umac_scan_abort(mvm, type, &wait); else ret = iwl_mvm_lmac_scan_abort(mvm); @@ -3367,6 +3389,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; + } else if (!wait) { + IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type); + iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); + return 0; } return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 15e64d94d6ea79..b6c99cd6d9e5df 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -29,7 +29,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) int sta_id; u32 reserved_ids = 0; - BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); + BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); @@ -900,7 +900,7 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); int queue = -1; lockdep_assert_held(&mvm->mutex); @@ -1080,7 +1080,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); - wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); @@ -1330,7 +1330,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); int queue = -1; u16 queue_tmp; unsigned long disable_agg_tids = 0; @@ -1622,7 +1622,7 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = - iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->deflink.sta_id, @@ -2359,7 +2359,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int queue; int ret; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, .sta_id = mvmvif->deflink.bcast_sta.sta_id, @@ -2568,7 +2568,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; - unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif); int ret; lockdep_assert_held(&mvm->mutex); @@ -3207,7 +3207,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); + iwl_mvm_get_wd_timeout(mvm, vif); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; @@ -4326,7 +4326,7 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); bool mld = iwl_mvm_has_mld_api(mvm->fw); u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; @@ -4455,10 +4455,10 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, sizeof(queue_counter->per_link)); queue_counter->window_start = jiffies; - IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n"); + IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n"); } - for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++) + for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++) total_mpdus += tx ? queue_counter->per_link[i].tx : queue_counter->per_link[i].rx; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 0dc83d6afb3c56..4a3799ae7c184d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -12,7 +12,7 @@ #include #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ -#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */ +#include "fw-api.h" /* IWL_STATION_COUNT_MAX */ #include "rs.h" struct iwl_mvm; @@ -361,7 +361,7 @@ struct iwl_mvm_mpdu_counter { */ struct iwl_mvm_tpt_counter { spinlock_t lock; - struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID]; + struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID]; unsigned long window_start; } ____cacheline_aligned_in_smp; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index a8c42ce3b63008..72fa7ac86516cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -114,16 +114,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id, mvm->aux_sta.tfd_queue_msk); - if (mvm->mld_api_is_used) { - iwl_mvm_mld_rm_aux_sta(mvm); - mutex_unlock(&mvm->mutex); - return; - } - /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end - * of use */ - if (iwl_mvm_has_new_station_api(mvm->fw)) + * of use. For the even newer mld api, use the appropriate + * function. + */ + if (mvm->mld_api_is_used) + iwl_mvm_mld_rm_aux_sta(mvm); + else if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 7ff5ea5e7aca57..ca026b5256ce33 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1203,6 +1203,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, bool is_ampdu = false; int hdrlen; + if (WARN_ON_ONCE(!sta)) + return -1; + mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); @@ -1210,9 +1213,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; - if (WARN_ON_ONCE(!mvmsta)) - return -1; - if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; @@ -1343,7 +1343,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta; struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; struct ieee80211_vif *vif; @@ -1352,9 +1352,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct sk_buff *orig_skb = skb; const u8 *addr3; - if (WARN_ON_ONCE(!mvmsta)) + if (WARN_ON_ONCE(!sta)) return -1; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; @@ -1678,7 +1680,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, * For 22000-series and lower, this is just 12 bits. For later, 16 bits. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, - struct iwl_mvm_tx_resp *tx_resp) + struct iwl_tx_resp *tx_resp) { u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count); @@ -1694,8 +1696,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); - /* struct iwl_mvm_tx_resp_v3 is almost the same */ - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + /* struct iwl_tx_resp_v3 is almost the same */ + struct iwl_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = @@ -1952,7 +1954,7 @@ static const char *iwl_get_agg_tx_status(u16 status) static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct iwl_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; @@ -1986,7 +1988,7 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct iwl_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -2027,7 +2029,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct iwl_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 0e5fa8374103f4..1d1364d03f028d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -297,6 +297,10 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type != NL80211_IFTYPE_STATION) return; + /* SMPS is handled by firmware */ + if (iwl_mvm_has_rlc_offload(mvm)) + return; + mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ON_ONCE(!mvmvif->link[link_id])) @@ -743,58 +747,20 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) } unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q) + struct ieee80211_vif *vif) { - struct iwl_fw_dbg_trigger_tlv *trigger; - struct iwl_fw_dbg_trigger_txq_timer *txq_timer; - unsigned int default_timeout = cmd_q ? - IWL_DEF_WD_TIMEOUT : + unsigned int default_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { - /* - * We can't know when the station is asleep or awake, so we - * must disable the queue hang detection. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && - vif && vif->type == NL80211_IFTYPE_AP) - return IWL_WATCHDOG_DISABLED; - return default_timeout; - } - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); - txq_timer = (void *)trigger->data; - - if (tdls) - return le32_to_cpu(txq_timer->tdls); - - if (cmd_q) - return le32_to_cpu(txq_timer->command_queue); - - if (WARN_ON(!vif)) - return default_timeout; - - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_ADHOC: - return le32_to_cpu(txq_timer->ibss); - case NL80211_IFTYPE_STATION: - return le32_to_cpu(txq_timer->bss); - case NL80211_IFTYPE_AP: - return le32_to_cpu(txq_timer->softap); - case NL80211_IFTYPE_P2P_CLIENT: - return le32_to_cpu(txq_timer->p2p_client); - case NL80211_IFTYPE_P2P_GO: - return le32_to_cpu(txq_timer->p2p_go); - case NL80211_IFTYPE_P2P_DEVICE: - return le32_to_cpu(txq_timer->p2p_device); - case NL80211_IFTYPE_MONITOR: - return default_timeout; - default: - WARN_ON(1); - return mvm->trans->trans_cfg->base_params->wd_timeout; - } + /* + * We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && + vif->type == NL80211_IFTYPE_AP) + return IWL_WATCHDOG_DISABLED; + return default_timeout; } void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 84fd93278450b0..805fb249a0c6a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -500,9 +500,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ - {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 18dda89b7985b4..8903a5692dfbbd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -526,6 +526,8 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, keep_ram_busy = !iwl_pcie_set_ltr(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n", + iwl_read32(trans, CSR_FUNC_SCRATCH)); iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 719ddc4b72c52c..3b9943eb69341e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1967,7 +1967,6 @@ void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue; trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo; - trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs; trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; @@ -3567,6 +3566,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); + /* Set a short watchdog for the command queue */ + trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT; + trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans_pcie->txqs.tso_hdr_page) { ret = -ENOMEM; diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index b700c213d10c4f..afe9bcd3ad46d7 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "decl.h" #include "cfg.h" diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h index 3c193074662b62..d7be232f573940 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.h +++ b/drivers/net/wireless/marvell/libertas/cmd.h @@ -116,11 +116,6 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, int8_t p2, int usesnr); -int lbs_set_data_rate(struct lbs_private *priv, u8 rate); - -int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, - uint16_t cmd_action); - int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep); diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index 74cb7551f4275c..f2aa659e77148e 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include "cfg.h" diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index 631b5da09f86a0..a5d4c09fb91899 100644 --- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h @@ -484,12 +484,9 @@ void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd, void lbtf_cmd_response_rx(struct lbtf_private *priv); /* main.c */ -struct chan_freq_power *lbtf_get_region_cfp_table(u8 region, - int *cfp_no); struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev, const struct lbtf_ops *ops); int lbtf_remove_card(struct lbtf_private *priv); -int lbtf_start_card(struct lbtf_private *priv); int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb); void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail); void lbtf_bcn_sent(struct lbtf_private *priv); diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index b90f922f1cdc63..032b93a41d9956 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -117,12 +117,12 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work) dfs_cac_work); chandef = priv->dfs_chandef; - if (priv->wdev.cac_started) { + if (priv->wdev.links[0].cac_started) { mwifiex_dbg(priv->adapter, MSG, "CAC timer finished; No radar detected\n"); cfg80211_cac_event(priv->netdev, &chandef, NL80211_RADAR_CAC_FINISHED, - GFP_KERNEL); + GFP_KERNEL, 0); } } @@ -174,7 +174,7 @@ int mwifiex_stop_radar_detection(struct mwifiex_private *priv, */ void mwifiex_abort_cac(struct mwifiex_private *priv) { - if (priv->wdev.cac_started) { + if (priv->wdev.links[0].cac_started) { if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef)) mwifiex_dbg(priv->adapter, ERROR, "failed to stop CAC in FW\n"); @@ -182,7 +182,8 @@ void mwifiex_abort_cac(struct mwifiex_private *priv) "Aborting delayed work for CAC.\n"); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, - NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); + NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, + 0); } } @@ -221,7 +222,7 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, NL80211_RADAR_DETECTED, - GFP_KERNEL); + GFP_KERNEL, 0); } break; default: diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index c0c635e74bc505..66f0f5377ac181 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -881,8 +881,6 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter) struct mwifiex_private *priv; for (i = 0; i < adapter->priv_num; i++) { - if (!adapter->priv[i]) - continue; priv = adapter->priv[i]; tx_win_size = priv->add_ba_param.tx_win_size; diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h index 7738ebe1fec172..773bd5c0f007da 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.h +++ b/drivers/net/wireless/marvell/mwifiex/11n.h @@ -108,9 +108,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream( for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv) - ba_stream_num += list_count_nodes( - &priv->tx_ba_stream_tbl_ptr); + ba_stream_num += list_count_nodes(&priv->tx_ba_stream_tbl_ptr); } if (adapter->fw_api_ver == MWIFIEX_FW_V15) { diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 10690e82358b89..cb948ca343736f 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -810,8 +810,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; spin_lock_bh(&priv->rx_reorder_tbl_lock); list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) @@ -834,8 +832,6 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "Update rxwinsize %d\n", coex_flag); for (i = 0; i < adapter->priv_num; i++) { - if (!adapter->priv[i]) - continue; priv = adapter->priv[i]; rx_win_size = priv->add_ba_param.rx_win_size; if (coex_flag) { @@ -882,17 +878,16 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter) u8 count = 0; for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - priv = adapter->priv[i]; - if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { - if (priv->media_connected) - count++; - } - if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { - if (priv->bss_started) - count++; - } + priv = adapter->priv[i]; + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { + if (priv->media_connected) + count++; } + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { + if (priv->bss_started) + count++; + } + if (count >= MWIFIEX_BSS_COEX_COUNT) break; } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index bf35c92f91d7e9..fca3eea7ee8421 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -221,6 +221,26 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, return 0; } + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { + if (ieee80211_is_auth(mgmt->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "auth: send auth to %pM\n", mgmt->da); + if (ieee80211_is_deauth(mgmt->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "auth: send deauth to %pM\n", mgmt->da); + if (ieee80211_is_disassoc(mgmt->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: send disassoc to %pM\n", mgmt->da); + if (ieee80211_is_assoc_resp(mgmt->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: send assoc resp to %pM\n", + mgmt->da); + if (ieee80211_is_reassoc_resp(mgmt->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: send reassoc resp to %pM\n", + mgmt->da); + } + pkt_len = len + ETH_ALEN; skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN + MWIFIEX_MGMT_FRAME_HEADER_SIZE + @@ -268,6 +288,8 @@ mwifiex_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy, if (mask != priv->mgmt_frame_mask) { priv->mgmt_frame_mask = mask; + if (priv->host_mlme_reg) + priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK; mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask, false); @@ -503,6 +525,9 @@ mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy, wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index); + if (priv->adapter->host_mlme_enabled) + return 0; + memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); encrypt_key.key_len = WLAN_KEY_LEN_CCMP; encrypt_key.key_index = key_index; @@ -848,6 +873,7 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv) struct mwifiex_adapter *adapter = priv->adapter; unsigned long flags; + priv->host_mlme_reg = false; priv->mgmt_frame_mask = 0; if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, @@ -1880,7 +1906,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_sta_node *sta_node; u8 deauth_mac[ETH_ALEN]; - if (!priv->bss_started && priv->wdev.cac_started) { + if (!priv->bss_started && priv->wdev.links[0].cac_started) { mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__); mwifiex_abort_cac(priv); } @@ -3482,7 +3508,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) + if (priv->netdev) netif_device_detach(priv->netdev); } @@ -3554,7 +3580,7 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) + if (priv->netdev) netif_device_attach(priv->netdev); } @@ -3633,6 +3659,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) return -EOPNOTSUPP; + if (priv->adapter->host_mlme_enabled) + return 0; + return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, HostCmd_ACT_GEN_SET, 0, data, true); } @@ -3947,12 +3976,43 @@ mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy, } } +static int +mwifiex_cfg80211_uap_add_station(struct mwifiex_private *priv, const u8 *mac, + struct station_parameters *params) +{ + struct mwifiex_sta_info add_sta; + int ret; + + memcpy(add_sta.peer_mac, mac, ETH_ALEN); + add_sta.params = params; + + ret = mwifiex_send_cmd(priv, HostCmd_CMD_ADD_NEW_STATION, + HostCmd_ACT_ADD_STA, 0, (void *)&add_sta, true); + + if (!ret) { + struct station_info *sinfo; + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + + cfg80211_new_sta(priv->netdev, mac, sinfo, GFP_KERNEL); + kfree(sinfo); + } + + return ret; +} + static int mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (priv->adapter->host_mlme_enabled && + (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)) + return mwifiex_cfg80211_uap_add_station(priv, mac, params); + if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EOPNOTSUPP; @@ -3978,7 +4038,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, return -EBUSY; } - if (priv->wdev.cac_started) + if (priv->wdev.links[0].cac_started) return -EBUSY; if (cfg80211_chandef_identical(¶ms->chandef, @@ -4145,7 +4205,7 @@ static int mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, - u32 cac_time_ms) + u32 cac_time_ms, int link_id) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_radar_params radar_params; @@ -4190,6 +4250,10 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, int ret; struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (priv->adapter->host_mlme_enabled && + (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)) + return 0; + /* we support change_station handler only for TDLS peers*/ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EOPNOTSUPP; @@ -4206,8 +4270,307 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, return ret; } +static int +mwifiex_cfg80211_authenticate(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_auth_request *req) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_adapter *adapter = priv->adapter; + struct sk_buff *skb; + u16 pkt_len, auth_alg; + int ret; + struct mwifiex_ieee80211_mgmt *mgmt; + struct mwifiex_txinfo *tx_info; + u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT; + u8 trans = 1, status_code = 0; + u8 *varptr = NULL; + + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { + mwifiex_dbg(priv->adapter, ERROR, "Interface role is AP\n"); + return -EFAULT; + } + + if (priv->wdev.iftype != NL80211_IFTYPE_STATION) { + mwifiex_dbg(priv->adapter, ERROR, + "Interface type is not correct (type %d)\n", + priv->wdev.iftype); + return -EINVAL; + } + + if (priv->auth_alg != WLAN_AUTH_SAE && + (priv->auth_flag & HOST_MLME_AUTH_PENDING)) { + mwifiex_dbg(priv->adapter, ERROR, "Pending auth on going\n"); + return -EBUSY; + } + + if (!priv->host_mlme_reg) { + priv->host_mlme_reg = true; + priv->mgmt_frame_mask |= HOST_MLME_MGMT_MASK; + mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, + HostCmd_ACT_GEN_SET, 0, + &priv->mgmt_frame_mask, false); + } + + switch (req->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + auth_alg = WLAN_AUTH_OPEN; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + auth_alg = WLAN_AUTH_SHARED_KEY; + break; + case NL80211_AUTHTYPE_FT: + auth_alg = WLAN_AUTH_FT; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + auth_alg = WLAN_AUTH_LEAP; + break; + case NL80211_AUTHTYPE_SAE: + auth_alg = WLAN_AUTH_SAE; + break; + default: + mwifiex_dbg(priv->adapter, ERROR, + "unsupported auth type=%d\n", req->auth_type); + return -EOPNOTSUPP; + } + + if (!priv->auth_flag) { + ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, + req->bss->channel, + AUTH_TX_DEFAULT_WAIT_TIME); + + if (!ret) { + priv->roc_cfg.cookie = get_random_u32() | 1; + priv->roc_cfg.chan = *req->bss->channel; + } else { + return -EFAULT; + } + } + + priv->sec_info.authentication_mode = auth_alg; + + mwifiex_cancel_scan(adapter); + + pkt_len = (u16)req->ie_len + req->auth_data_len + + MWIFIEX_MGMT_HEADER_LEN + MWIFIEX_AUTH_BODY_LEN; + if (req->auth_data_len >= 4) + pkt_len -= 4; + + skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + + pkt_len + sizeof(pkt_len)); + if (!skb) { + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); + return -ENOMEM; + } + + tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); + tx_info->bss_num = priv->bss_num; + tx_info->bss_type = priv->bss_type; + tx_info->pkt_len = pkt_len; + + skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len)); + memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len)); + memcpy(skb_push(skb, sizeof(tx_control)), + &tx_control, sizeof(tx_control)); + memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type)); + + mgmt = (struct mwifiex_ieee80211_mgmt *)skb_put(skb, pkt_len); + memset(mgmt, 0, pkt_len); + mgmt->frame_control = + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); + memcpy(mgmt->da, req->bss->bssid, ETH_ALEN); + memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN); + memcpy(mgmt->bssid, req->bss->bssid, ETH_ALEN); + eth_broadcast_addr(mgmt->addr4); + + if (req->auth_data_len >= 4) { + if (req->auth_type == NL80211_AUTHTYPE_SAE) { + __le16 *pos = (__le16 *)req->auth_data; + + trans = le16_to_cpu(pos[0]); + status_code = le16_to_cpu(pos[1]); + } + memcpy((u8 *)(&mgmt->auth.variable), req->auth_data + 4, + req->auth_data_len - 4); + varptr = (u8 *)&mgmt->auth.variable + + (req->auth_data_len - 4); + } + + mgmt->auth.auth_alg = cpu_to_le16(auth_alg); + mgmt->auth.auth_transaction = cpu_to_le16(trans); + mgmt->auth.status_code = cpu_to_le16(status_code); + + if (req->ie && req->ie_len) { + if (!varptr) + varptr = (u8 *)&mgmt->auth.variable; + memcpy((u8 *)varptr, req->ie, req->ie_len); + } + + priv->auth_flag = HOST_MLME_AUTH_PENDING; + priv->auth_alg = auth_alg; + + skb->priority = WMM_HIGHEST_PRIORITY; + __net_timestamp(skb); + + mwifiex_dbg(priv->adapter, MSG, + "auth: send authentication to %pM\n", req->bss->bssid); + + mwifiex_queue_tx_pkt(priv, skb); + + return 0; +} + +static int +mwifiex_cfg80211_associate(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_adapter *adapter = priv->adapter; + int ret; + struct cfg80211_ssid req_ssid; + const u8 *ssid_ie; + + if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) { + mwifiex_dbg(adapter, ERROR, + "%s: reject infra assoc request in non-STA role\n", + dev->name); + return -EINVAL; + } + + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) || + test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) { + mwifiex_dbg(adapter, ERROR, + "%s: Ignore association.\t" + "Card removed or FW in bad state\n", + dev->name); + return -EFAULT; + } + + if (priv->auth_alg == WLAN_AUTH_SAE) + priv->auth_flag = HOST_MLME_AUTH_DONE; + + if (priv->auth_flag && !(priv->auth_flag & HOST_MLME_AUTH_DONE)) + return -EBUSY; + + if (priv->roc_cfg.cookie) { + ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE, + &priv->roc_cfg.chan, 0); + if (!ret) + memset(&priv->roc_cfg, 0, + sizeof(struct mwifiex_roc_cfg)); + else + return -EFAULT; + } + + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_locked(priv->wdev.wiphy, 0); + + memset(&req_ssid, 0, sizeof(struct cfg80211_ssid)); + rcu_read_lock(); + ssid_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); + + if (!ssid_ie) + goto ssid_err; + + req_ssid.ssid_len = ssid_ie[1]; + if (req_ssid.ssid_len > IEEE80211_MAX_SSID_LEN) { + mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n"); + goto ssid_err; + } + + memcpy(req_ssid.ssid, ssid_ie + 2, req_ssid.ssid_len); + if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) { + mwifiex_dbg(adapter, ERROR, "invalid SSID - aborting\n"); + goto ssid_err; + } + rcu_read_unlock(); + + /* As this is new association, clear locally stored + * keys and security related flags + */ + priv->sec_info.wpa_enabled = false; + priv->sec_info.wpa2_enabled = false; + priv->wep_key_curr_index = 0; + priv->sec_info.encryption_mode = 0; + priv->sec_info.is_authtype_auto = 0; + if (mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1)) { + mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n"); + return -EFAULT; + } + + if (req->crypto.n_ciphers_pairwise) + priv->sec_info.encryption_mode = + req->crypto.ciphers_pairwise[0]; + + if (req->crypto.cipher_group) + priv->sec_info.encryption_mode = req->crypto.cipher_group; + + if (req->ie) + mwifiex_set_gen_ie(priv, req->ie, req->ie_len); + + memcpy(priv->cfg_bssid, req->bss->bssid, ETH_ALEN); + + mwifiex_dbg(adapter, MSG, + "assoc: send association to %pM\n", req->bss->bssid); + + cfg80211_ref_bss(adapter->wiphy, req->bss); + ret = mwifiex_bss_start(priv, req->bss, &req_ssid); + if (ret) { + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + eth_zero_addr(priv->cfg_bssid); + } + + if (priv->assoc_rsp_size) { + priv->req_bss = req->bss; + adapter->assoc_resp_received = true; + queue_work(adapter->host_mlme_workqueue, + &adapter->host_mlme_work); + } + + cfg80211_put_bss(priv->adapter->wiphy, req->bss); + + return 0; + +ssid_err: + rcu_read_unlock(); + return -EFAULT; +} + +static int +mwifiex_cfg80211_deauthenticate(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_deauth_request *req) +{ + return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code); +} + +static int +mwifiex_cfg80211_disassociate(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_disassoc_request *req) +{ + return mwifiex_cfg80211_disconnect(wiphy, dev, req->reason_code); +} + +static int +mwifiex_cfg80211_probe_client(struct wiphy *wiphy, + struct net_device *dev, const u8 *peer, + u64 *cookie) +{ + /* hostapd looks for NL80211_CMD_PROBE_CLIENT support; otherwise, + * it requires monitor-mode support (which mwifiex doesn't support). + * Provide fake probe_client support to work around this. + */ + return -EOPNOTSUPP; +} + /* station cfg80211 operations */ -static struct cfg80211_ops mwifiex_cfg80211_ops = { +static const struct cfg80211_ops mwifiex_cfg80211_ops = { .add_virtual_intf = mwifiex_add_virtual_intf, .del_virtual_intf = mwifiex_del_virtual_intf, .change_virtual_intf = mwifiex_cfg80211_change_virtual_intf, @@ -4342,24 +4705,58 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA]; u8 *country_code; u32 thr, retry; + struct cfg80211_ops *ops; + + ops = devm_kmemdup(adapter->dev, &mwifiex_cfg80211_ops, + sizeof(mwifiex_cfg80211_ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; /* create a new wiphy for use with cfg80211 */ - wiphy = wiphy_new(&mwifiex_cfg80211_ops, - sizeof(struct mwifiex_adapter *)); + wiphy = wiphy_new(ops, sizeof(struct mwifiex_adapter *)); if (!wiphy) { mwifiex_dbg(adapter, ERROR, "%s: creating new wiphy\n", __func__); return -ENOMEM; } + if (adapter->host_mlme_enabled) { + ops->auth = mwifiex_cfg80211_authenticate; + ops->assoc = mwifiex_cfg80211_associate; + ops->deauth = mwifiex_cfg80211_deauthenticate; + ops->disassoc = mwifiex_cfg80211_disassociate; + ops->disconnect = NULL; + ops->connect = NULL; + ops->probe_client = mwifiex_cfg80211_probe_client; + } wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; - wiphy->mgmt_stypes = mwifiex_mgmt_stypes; + if (adapter->host_mlme_enabled) { + memcpy(adapter->mwifiex_mgmt_stypes, + mwifiex_mgmt_stypes, + NUM_NL80211_IFTYPES * + sizeof(struct ieee80211_txrx_stypes)); + + adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].tx = 0xffff; + adapter->mwifiex_mgmt_stypes[NL80211_IFTYPE_AP].rx = + BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4); + wiphy->mgmt_stypes = adapter->mwifiex_mgmt_stypes; + } else { + wiphy->mgmt_stypes = mwifiex_mgmt_stypes; + } wiphy->max_remain_on_channel_duration = 5000; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP); + wiphy->max_num_akm_suites = CFG80211_MAX_NUM_AKM_SUITES; + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); @@ -4411,14 +4808,18 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) ether_addr_copy(wiphy->perm_addr, adapter->perm_addr); wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | - WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | + wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (adapter->host_mlme_enabled) + wiphy->flags |= WIPHY_FLAG_REPORTS_OBSS; + else + wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; + if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_TDLS_EXTERNAL_SETUP; @@ -4448,6 +4849,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_NEED_OBSS_SCAN; + if (adapter->host_mlme_enabled) + wiphy->features |= NL80211_FEATURE_SAE; + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) wiphy->features |= NL80211_FEATURE_HT_IBSS; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 9eff29a255445c..1cff001bdc5145 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -5,7 +5,7 @@ * Copyright 2011-2020 NXP */ -#include +#include #include "decl.h" #include "ioctl.h" #include "util.h" @@ -482,7 +482,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && mwifiex_is_11h_active(priv)) { + if (mwifiex_is_11h_active(priv)) { adapter->event_cause |= ((priv->bss_num & 0xff) << 16) | ((priv->bss_type & 0xff) << 24); @@ -635,6 +635,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, case HostCmd_CMD_UAP_STA_DEAUTH: case HOST_CMD_APCMD_SYS_RESET: case HOST_CMD_APCMD_STA_LIST: + case HostCmd_CMD_CHAN_REPORT_REQUEST: + case HostCmd_CMD_ADD_NEW_STATION: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); @@ -924,6 +926,24 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) return ret; } +void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter) +{ + struct cfg80211_rx_assoc_resp_data assoc_resp = { + .uapsd_queues = -1, + }; + struct mwifiex_private *priv = + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + + if (priv->assoc_rsp_size) { + assoc_resp.links[0].bss = priv->req_bss; + assoc_resp.buf = priv->assoc_rsp_buf; + assoc_resp.len = priv->assoc_rsp_size; + cfg80211_rx_assoc_resp(priv->netdev, + &assoc_resp); + priv->assoc_rsp_size = 0; + } +} + /* * This function handles the timeout of command sending. * @@ -1672,6 +1692,13 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, if (adapter->fw_api_ver == MWIFIEX_FW_V15) adapter->scan_chan_gap_enabled = true; + if (adapter->key_api_major_ver != KEY_API_VER_MAJOR_V2) + adapter->host_mlme_enabled = false; + + mwifiex_dbg(adapter, MSG, "host_mlme: %s, key_api: %d\n", + adapter->host_mlme_enabled ? "enable" : "disable", + adapter->key_api_major_ver); + return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index 326ffb05d79138..84603f1e7f6e0f 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -31,6 +31,29 @@ * + sizeof(tx_control) */ +#define FRMCTL_LEN 2 +#define DURATION_LEN 2 +#define SEQCTL_LEN 2 +/* special FW 4 address management header */ +#define MWIFIEX_MGMT_HEADER_LEN (FRMCTL_LEN + DURATION_LEN + ETH_ALEN + \ + ETH_ALEN + ETH_ALEN + SEQCTL_LEN + ETH_ALEN) + +#define AUTH_ALG_LEN 2 +#define AUTH_TRANSACTION_LEN 2 +#define AUTH_STATUS_LEN 2 +#define MWIFIEX_AUTH_BODY_LEN (AUTH_ALG_LEN + AUTH_TRANSACTION_LEN + \ + AUTH_STATUS_LEN) + +#define HOST_MLME_AUTH_PENDING BIT(0) +#define HOST_MLME_AUTH_DONE BIT(1) + +#define HOST_MLME_MGMT_MASK (BIT(IEEE80211_STYPE_AUTH >> 4) | \ + BIT(IEEE80211_STYPE_DEAUTH >> 4) | \ + BIT(IEEE80211_STYPE_DISASSOC >> 4)) +#define AUTH_TX_DEFAULT_WAIT_TIME 2400 + +#define WLAN_AUTH_NONE 0xFFFF + #define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 #define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 #define MWIFIEX_MAX_TDLS_PEER_SUPPORTED 8 diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 3adc447b715f68..d03129d5d24e3d 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -210,6 +210,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236) #define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237) #define TLV_TYPE_MAX_CONN (PROPRIETARY_TLV_BASE_ID + 279) +#define TLV_TYPE_HOST_MLME (PROPRIETARY_TLV_BASE_ID + 307) +#define TLV_TYPE_UAP_STA_FLAGS (PROPRIETARY_TLV_BASE_ID + 313) +#define TLV_TYPE_SAE_PWE_MODE (PROPRIETARY_TLV_BASE_ID + 339) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -405,6 +408,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_STA_CONFIGURE 0x023f #define HostCmd_CMD_CHAN_REGION_CFG 0x0242 #define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251 +#define HostCmd_CMD_ADD_NEW_STATION 0x025f #define PROTOCOL_NO_SECURITY 0x01 #define PROTOCOL_STATIC_WEP 0x02 @@ -415,6 +419,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define KEY_MGMT_NONE 0x04 #define KEY_MGMT_PSK 0x02 #define KEY_MGMT_EAP 0x01 +#define KEY_MGMT_PSK_SHA256 0x100 +#define KEY_MGMT_SAE 0x400 #define CIPHER_TKIP 0x04 #define CIPHER_AES_CCMP 0x08 #define VALID_CIPHER_BITMAP 0x0c @@ -500,6 +506,9 @@ enum mwifiex_channel_flags { #define HostCmd_ACT_GET_TX 0x0008 #define HostCmd_ACT_GET_BOTH 0x000c +#define HostCmd_ACT_REMOVE_STA 0x0 +#define HostCmd_ACT_ADD_STA 0x1 + #define RF_ANTENNA_AUTO 0xFFFF #define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \ @@ -744,6 +753,25 @@ struct uap_rxpd { u8 flags; } __packed; +struct mwifiex_auth { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[]; +} __packed; + +struct mwifiex_ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le16 seq_ctrl; + u8 addr4[ETH_ALEN]; + struct mwifiex_auth auth; +} __packed; + struct mwifiex_fw_chan_stats { u8 chan_num; u8 bandcfg; @@ -803,6 +831,11 @@ struct mwifiex_ie_types_ssid_param_set { u8 ssid[]; } __packed; +struct mwifiex_ie_types_host_mlme { + struct mwifiex_ie_types_header header; + u8 host_mlme; +} __packed; + struct mwifiex_ie_types_num_probes { struct mwifiex_ie_types_header header; __le16 num_probes; @@ -906,6 +939,13 @@ struct mwifiex_ie_types_tdls_idle_timeout { __le16 value; } __packed; +#define MWIFIEX_AUTHTYPE_SAE 6 + +struct mwifiex_ie_types_sae_pwe_mode { + struct mwifiex_ie_types_header header; + u8 pwe[]; +} __packed; + struct mwifiex_ie_types_rsn_param_set { struct mwifiex_ie_types_header header; u8 rsn_ie[]; @@ -1587,7 +1627,7 @@ struct host_cmd_ds_802_11_scan_rsp { struct host_cmd_ds_802_11_scan_ext { u32 reserved; - u8 tlv_buffer[1]; + u8 tlv_buffer[]; } __packed; struct mwifiex_ie_types_bss_mode { @@ -2298,6 +2338,20 @@ struct host_cmd_ds_sta_configure { u8 tlv_buffer[]; } __packed; +struct mwifiex_ie_types_sta_flag { + struct mwifiex_ie_types_header header; + __le32 sta_flags; +} __packed; + +struct host_cmd_ds_add_station { + __le16 action; + __le16 aid; + u8 peer_mac[ETH_ALEN]; + __le32 listen_interval; + __le16 cap_info; + u8 tlv[]; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2376,6 +2430,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_chan_region_cfg reg_cfg; struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl; struct host_cmd_ds_sta_configure sta_cfg; + struct host_cmd_ds_add_station sta_info; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index c9c58419c37bfb..8b61e45cd66781 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -81,6 +81,9 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR; priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR; + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + priv->sec_info.wep_enabled = 0; priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM; priv->sec_info.encryption_mode = 0; @@ -220,6 +223,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->cmd_resp_received = false; adapter->event_received = false; adapter->data_received = false; + adapter->assoc_resp_received = false; + adapter->priv_link_lost = NULL; + adapter->host_mlme_link_lost = false; clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); @@ -362,15 +368,13 @@ static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter) list_del(&adapter->bss_prio_tbl[i].bss_prio_head); for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - priv = adapter->priv[i]; - for (j = 0; j < MAX_NUM_TID; ++j) - list_del(&priv->wmm.tid_tbl_ptr[j].ra_list); - list_del(&priv->tx_ba_stream_tbl_ptr); - list_del(&priv->rx_reorder_tbl_ptr); - list_del(&priv->sta_list); - list_del(&priv->auto_tdls_list); - } + priv = adapter->priv[i]; + for (j = 0; j < MAX_NUM_TID; ++j) + list_del(&priv->wmm.tid_tbl_ptr[j].ra_list); + list_del(&priv->tx_ba_stream_tbl_ptr); + list_del(&priv->rx_reorder_tbl_ptr); + list_del(&priv->sta_list); + list_del(&priv->auto_tdls_list); } } @@ -419,13 +423,11 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->mwifiex_cmd_lock); spin_lock_init(&adapter->queue_lock); for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - priv = adapter->priv[i]; - spin_lock_init(&priv->wmm.ra_list_spinlock); - spin_lock_init(&priv->curr_bcn_buf_lock); - spin_lock_init(&priv->sta_list_spinlock); - spin_lock_init(&priv->auto_tdls_lock); - } + priv = adapter->priv[i]; + spin_lock_init(&priv->wmm.ra_list_spinlock); + spin_lock_init(&priv->curr_bcn_buf_lock); + spin_lock_init(&priv->sta_list_spinlock); + spin_lock_init(&priv->auto_tdls_lock); } /* Initialize cmd_free_q */ @@ -449,8 +451,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) } for (i = 0; i < adapter->priv_num; i++) { - if (!adapter->priv[i]) - continue; priv = adapter->priv[i]; for (j = 0; j < MAX_NUM_TID; ++j) INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list); @@ -500,31 +500,24 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter) mwifiex_init_adapter(adapter); for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - priv = adapter->priv[i]; + priv = adapter->priv[i]; - /* Initialize private structure */ - ret = mwifiex_init_priv(priv); - if (ret) - return -1; - } + /* Initialize private structure */ + ret = mwifiex_init_priv(priv); + if (ret) + return -1; } if (adapter->mfg_mode) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; ret = -EINPROGRESS; } else { for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - ret = mwifiex_sta_init_cmd(adapter->priv[i], - first_sta, true); - if (ret == -1) - return -1; - - first_sta = false; - } - - + ret = mwifiex_sta_init_cmd(adapter->priv[i], + first_sta, true); + if (ret == -1) + return -1; + first_sta = false; } } @@ -631,13 +624,11 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) /* Clean up Tx/Rx queues and delete BSS priority table */ for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - priv = adapter->priv[i]; + priv = adapter->priv[i]; - mwifiex_clean_auto_tdls(priv); - mwifiex_abort_cac(priv); - mwifiex_free_priv(priv); - } + mwifiex_clean_auto_tdls(priv); + mwifiex_abort_cac(priv); + mwifiex_free_priv(priv); } atomic_set(&adapter->tx_queued, 0); diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index e8825f302de8a3..516159b721d34e 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -158,6 +158,11 @@ struct mwifiex_bss_info { u8 bssid[ETH_ALEN]; }; +struct mwifiex_sta_info { + u8 peer_mac[ETH_ALEN]; + struct station_parameters *params; +}; + #define MAX_NUM_TID 8 #define MAX_RX_WINSIZE 64 diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 9d98a1908dd603..6d8f1d1d7ca4e9 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -382,7 +382,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, struct mwifiex_ie_types_ss_param_set *ss_tlv; struct mwifiex_ie_types_rates_param_set *rates_tlv; struct mwifiex_ie_types_auth_type *auth_tlv; + struct mwifiex_ie_types_sae_pwe_mode *sae_pwe_tlv; struct mwifiex_ie_types_chan_list_param_set *chan_tlv; + struct mwifiex_ie_types_host_mlme *host_mlme_tlv; u8 rates[MWIFIEX_SUPPORTED_RATES]; u32 rates_size; u16 tmp_cap; @@ -460,6 +462,24 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len); + if (priv->sec_info.authentication_mode == WLAN_AUTH_SAE) { + auth_tlv->auth_type = cpu_to_le16(MWIFIEX_AUTHTYPE_SAE); + if (bss_desc->bcn_rsnx_ie && + bss_desc->bcn_rsnx_ie->ieee_hdr.len && + (bss_desc->bcn_rsnx_ie->data[0] & + WLAN_RSNX_CAPA_SAE_H2E)) { + sae_pwe_tlv = + (struct mwifiex_ie_types_sae_pwe_mode *)pos; + sae_pwe_tlv->header.type = + cpu_to_le16(TLV_TYPE_SAE_PWE_MODE); + sae_pwe_tlv->header.len = + cpu_to_le16(sizeof(sae_pwe_tlv->pwe[0])); + sae_pwe_tlv->pwe[0] = bss_desc->bcn_rsnx_ie->data[0]; + pos += sizeof(sae_pwe_tlv->header) + + sizeof(sae_pwe_tlv->pwe[0]); + } + } + if (IS_SUPPORT_MULTI_BANDS(priv->adapter) && !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info) && (!bss_desc->disable_11n) && @@ -491,6 +511,16 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_scan_param_set); } + if (priv->adapter->host_mlme_enabled) { + host_mlme_tlv = (struct mwifiex_ie_types_host_mlme *)pos; + host_mlme_tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME); + host_mlme_tlv->header.len = + cpu_to_le16(sizeof(host_mlme_tlv->host_mlme)); + host_mlme_tlv->host_mlme = 1; + pos += sizeof(host_mlme_tlv->header) + + sizeof(host_mlme_tlv->host_mlme); + } + if (!priv->wps.session_enable) { if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos); @@ -641,7 +671,21 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, goto done; } - assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; + if (adapter->host_mlme_enabled) { + struct ieee80211_mgmt *hdr; + + hdr = (struct ieee80211_mgmt *)&resp->params; + if (!memcmp(hdr->bssid, + priv->attempted_bss_desc->mac_address, + ETH_ALEN)) + assoc_rsp = (struct ieee_types_assoc_rsp *) + &hdr->u.assoc_resp; + else + assoc_rsp = + (struct ieee_types_assoc_rsp *)&resp->params; + } else { + assoc_rsp = (struct ieee_types_assoc_rsp *)&resp->params; + } cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap); status_code = le16_to_cpu(assoc_rsp->status_code); @@ -680,6 +724,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, ERROR, "ASSOC_RESP: UNSPECIFIED failure\n"); } + + if (priv->adapter->host_mlme_enabled) + priv->assoc_rsp_size = 0; } else { ret = status_code; } @@ -778,7 +825,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, priv->adapter->dbg.num_cmd_assoc_success++; - mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n"); + mwifiex_dbg(priv->adapter, MSG, "assoc: associated with %pM\n", + priv->attempted_bss_desc->mac_address); /* Add the ra_list here for infra mode as there will be only 1 ra always */ @@ -1491,6 +1539,20 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) if (!priv->media_connected) return 0; + if (priv->adapter->host_mlme_enabled) { + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + priv->host_mlme_reg = false; + priv->mgmt_frame_mask = 0; + if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, + HostCmd_ACT_GEN_SET, 0, + &priv->mgmt_frame_mask, false)) { + mwifiex_dbg(priv->adapter, ERROR, + "could not unregister mgmt frame rx\n"); + return -1; + } + } + switch (priv->bss_mode) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: @@ -1520,8 +1582,7 @@ void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv) - mwifiex_deauthenticate(priv, NULL); + mwifiex_deauthenticate(priv, NULL); } } EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index d99127dc466ecb..96d1f6039fbca3 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -127,10 +127,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) /* Free private structures */ for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - mwifiex_free_curr_bcn(adapter->priv[i]); - kfree(adapter->priv[i]); - } + mwifiex_free_curr_bcn(adapter->priv[i]); + kfree(adapter->priv[i]); } if (adapter->nd_info) { @@ -530,6 +528,11 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) destroy_workqueue(adapter->rx_workqueue); adapter->rx_workqueue = NULL; } + + if (adapter->host_mlme_workqueue) { + destroy_workqueue(adapter->host_mlme_workqueue); + adapter->host_mlme_workqueue = NULL; + } } /* @@ -802,6 +805,10 @@ mwifiex_bypass_tx_queue(struct mwifiex_private *priv, "bypass txqueue; eth type %#x, mgmt %d\n", ntohs(eth_hdr->h_proto), mwifiex_is_skb_mgmt_frame(skb)); + if (eth_hdr->h_proto == htons(ETH_P_PAE)) + mwifiex_dbg(priv->adapter, MSG, + "key: send EAPOL to %pM\n", + eth_hdr->h_dest); return true; } @@ -1162,7 +1169,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) } for (i = 0; i < adapter->priv_num; i++) { - if (!adapter->priv[i] || !adapter->priv[i]->netdev) + if (!adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; p += sprintf(p, "\n[interface : \"%s\"]\n", @@ -1201,7 +1208,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { - if (!adapter->priv[i] || !adapter->priv[i]->netdev) + if (!adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; mwifiex_get_debug_info(priv, debug_info); @@ -1384,6 +1391,35 @@ int is_command_pending(struct mwifiex_adapter *adapter) return !is_cmd_pend_q_empty; } +/* This is the host mlme work queue function. + * It handles the host mlme operations. + */ +static void mwifiex_host_mlme_work_queue(struct work_struct *work) +{ + struct mwifiex_adapter *adapter = + container_of(work, struct mwifiex_adapter, host_mlme_work); + + if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) + return; + + /* Check for host mlme disconnection */ + if (adapter->host_mlme_link_lost) { + if (adapter->priv_link_lost) { + mwifiex_reset_connect_state(adapter->priv_link_lost, + WLAN_REASON_DEAUTH_LEAVING, + true); + adapter->priv_link_lost = NULL; + } + adapter->host_mlme_link_lost = false; + } + + /* Check for host mlme Assoc Resp */ + if (adapter->assoc_resp_received) { + mwifiex_process_assoc_resp(adapter); + adapter->assoc_resp_received = false; + } +} + /* * This is the RX work queue function. * @@ -1434,7 +1470,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) /* Stop data */ for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) { + if (priv->netdev) { mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); @@ -1459,8 +1495,6 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; rtnl_lock(); if (priv->netdev && priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) { @@ -1558,6 +1592,18 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); } + if (adapter->host_mlme_enabled) { + adapter->host_mlme_workqueue = + alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE", + WQ_HIGHPRI | + WQ_MEM_RECLAIM | + WQ_UNBOUND, 0); + if (!adapter->host_mlme_workqueue) + goto err_kmalloc; + INIT_WORK(&adapter->host_mlme_work, + mwifiex_host_mlme_work_queue); + } + /* Register the device. Fill up the private data structure with * relevant information from the card. Some code extracted from * mwifiex_register_dev() @@ -1721,6 +1767,18 @@ mwifiex_add_card(void *card, struct completion *fw_done, goto err_registerdev; } + if (adapter->host_mlme_enabled) { + adapter->host_mlme_workqueue = + alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE", + WQ_HIGHPRI | + WQ_MEM_RECLAIM | + WQ_UNBOUND, 0); + if (!adapter->host_mlme_workqueue) + goto err_kmalloc; + INIT_WORK(&adapter->host_mlme_work, + mwifiex_host_mlme_work_queue); + } + if (mwifiex_init_hw_fw(adapter, true)) { pr_err("%s: firmware init failed\n", __func__); goto err_init_fw; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index c5164ae41b5475..566adce3413cb6 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -424,6 +424,8 @@ struct mwifiex_bssdescriptor { u16 wpa_offset; struct ieee_types_generic *bcn_rsn_ie; u16 rsn_offset; + struct ieee_types_generic *bcn_rsnx_ie; + u16 rsnx_offset; struct ieee_types_generic *bcn_wapi_ie; u16 wapi_offset; u8 *beacon_buf; @@ -525,6 +527,8 @@ struct mwifiex_private { u8 bss_priority; u8 bss_num; u8 bss_started; + u8 auth_flag; + u16 auth_alg; u8 frame_type; u8 curr_addr[ETH_ALEN]; u8 media_connected; @@ -608,6 +612,7 @@ struct mwifiex_private { #define MWIFIEX_ASSOC_RSP_BUF_SIZE 500 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE]; u32 assoc_rsp_size; + struct cfg80211_bss *req_bss; #define MWIFIEX_GENIE_BUF_SIZE 256 u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE]; @@ -647,6 +652,7 @@ struct mwifiex_private { u16 gen_idx; u8 ap_11n_enabled; u8 ap_11ac_enabled; + bool host_mlme_reg; u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; bool scan_aborting; @@ -793,7 +799,7 @@ struct mwifiex_auto_tdls_peer { u8 mac_addr[ETH_ALEN]; u8 tdls_status; int rssi; - long rssi_jiffies; + unsigned long rssi_jiffies; u8 failure_count; u8 do_discover; u8 do_setup; @@ -876,6 +882,8 @@ struct mwifiex_adapter { struct work_struct main_work; struct workqueue_struct *rx_workqueue; struct work_struct rx_work; + struct workqueue_struct *host_mlme_workqueue; + struct work_struct host_mlme_work; bool rx_work_enabled; bool rx_processing; bool delay_main_work; @@ -907,6 +915,9 @@ struct mwifiex_adapter { u8 cmd_resp_received; u8 event_received; u8 data_received; + u8 assoc_resp_received; + struct mwifiex_private *priv_link_lost; + u8 host_mlme_link_lost; u16 seq_num; struct cmd_ctrl_node *cmd_pool; struct cmd_ctrl_node *curr_cmd; @@ -996,6 +1007,8 @@ struct mwifiex_adapter { bool is_up; bool ext_scan; + bool host_mlme_enabled; + struct ieee80211_txrx_stypes mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES]; u8 fw_api_ver; u8 key_api_major_ver, key_api_minor_ver; u8 max_p2p_conn, max_sta_conn; @@ -1061,6 +1074,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_uap_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); +void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv, + u16 reason_code, u8 *sa); + int mwifiex_process_mgmt_packet(struct mwifiex_private *priv, struct sk_buff *skb); @@ -1092,6 +1108,7 @@ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter); int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter); +void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter); int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb); int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, @@ -1286,14 +1303,12 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, int i; for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) - continue; + if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) + continue; - if ((adapter->priv[i]->bss_num == bss_num) && - (adapter->priv[i]->bss_type == bss_type)) - break; - } + if ((adapter->priv[i]->bss_num == bss_num) && + (adapter->priv[i]->bss_type == bss_type)) + break; } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); } @@ -1309,11 +1324,9 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter, int i; for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) { - if (bss_role == MWIFIEX_BSS_ROLE_ANY || - GET_BSS_ROLE(adapter->priv[i]) == bss_role) - break; - } + if (bss_role == MWIFIEX_BSS_ROLE_ANY || + GET_BSS_ROLE(adapter->priv[i]) == bss_role) + break; } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); @@ -1331,12 +1344,10 @@ mwifiex_get_unused_bss_num(struct mwifiex_adapter *adapter, u8 bss_type) memset(index, 0, sizeof(index)); for (i = 0; i < adapter->priv_num; i++) - if (adapter->priv[i]) { - if (adapter->priv[i]->bss_type == bss_type && - !(adapter->priv[i]->bss_mode == - NL80211_IFTYPE_UNSPECIFIED)) { - index[adapter->priv[i]->bss_num] = 1; - } + if (adapter->priv[i]->bss_type == bss_type && + !(adapter->priv[i]->bss_mode == + NL80211_IFTYPE_UNSPECIFIED)) { + index[adapter->priv[i]->bss_num] = 1; } for (j = 0; j < MWIFIEX_MAX_BSS_NUM; j++) if (!index[j]) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 0326b121747cb2..cab889af4c4a62 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1371,6 +1371,12 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, bss_entry->rsn_offset = (u16) (current_ptr - bss_entry->beacon_buf); break; + case WLAN_EID_RSNX: + bss_entry->bcn_rsnx_ie = + (struct ieee_types_generic *)current_ptr; + bss_entry->rsnx_offset = + (u16)(current_ptr - bss_entry->beacon_buf); + break; case WLAN_EID_BSS_AC_ACCESS_DELAY: bss_entry->bcn_wapi_ie = (struct ieee_types_generic *) current_ptr; @@ -2045,8 +2051,6 @@ void mwifiex_cancel_scan(struct mwifiex_adapter *adapter) spin_unlock_bh(&adapter->mwifiex_cmd_lock); for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; if (priv->scan_request) { struct cfg80211_scan_info info = { .aborted = true, @@ -2530,8 +2534,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, ext_scan_resp = &resp->params.ext_scan; tlv = (void *)ext_scan_resp->tlv_buffer; - buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN - - 1); + buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN); while (buf_left >= sizeof(struct mwifiex_ie_types_header)) { type = le16_to_cpu(tlv->type); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bda9b2b8a1f370..490ffd981164d0 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -332,6 +332,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { .can_auto_tdls = false, .can_ext_scan = false, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { @@ -348,6 +349,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { @@ -364,6 +366,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { @@ -380,6 +383,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { @@ -397,6 +401,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = { @@ -414,6 +419,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = true, + .host_mlme = true, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { @@ -432,6 +438,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { @@ -448,6 +455,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { .can_auto_tdls = true, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = { @@ -465,6 +473,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = { .can_auto_tdls = true, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = { @@ -481,6 +490,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = { .can_auto_tdls = false, .can_ext_scan = true, .fw_ready_extra_delay = false, + .host_mlme = false, }; static struct memory_type_mapping generic_mem_type_map[] = { @@ -574,6 +584,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; card->fw_ready_extra_delay = data->fw_ready_extra_delay; + card->host_mlme = data->host_mlme; INIT_WORK(&card->work, mwifiex_sdio_work); } @@ -2511,6 +2522,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); } + adapter->host_mlme_enabled = card->host_mlme; + return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index cb63ad55d675f2..65d142286c46ec 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -256,6 +256,7 @@ struct sdio_mmc_card { bool can_auto_tdls; bool can_ext_scan; bool fw_ready_extra_delay; + bool host_mlme; struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; @@ -280,6 +281,7 @@ struct mwifiex_sdio_device { bool can_auto_tdls; bool can_ext_scan; bool fw_ready_extra_delay; + bool host_mlme; }; /* diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 7b69d27e0c0e57..9c53825f222d1b 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1398,6 +1398,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_UAP_STA_DEAUTH: break; + case HostCmd_CMD_ADD_NEW_STATION: + break; case HOST_CMD_APCMD_SYS_RESET: break; case HostCmd_CMD_MEF_CFG: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index df9cdd10a494f3..b5f3821a6a8f21 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -135,6 +135,9 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, priv->media_connected = false; + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + priv->scan_block = false; priv->port_open = false; @@ -222,8 +225,12 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, priv->cfg_bssid, reason_code); if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { - cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, - !from_ap, GFP_KERNEL); + if (adapter->host_mlme_enabled && adapter->host_mlme_link_lost) + mwifiex_host_mlme_disconnect(adapter->priv_link_lost, + reason_code, NULL); + else + cfg80211_disconnected(priv->netdev, reason_code, NULL, + 0, !from_ap, GFP_KERNEL); } eth_zero_addr(priv->cfg_bssid); @@ -746,7 +753,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = get_unaligned_le16(adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code, true); + if (adapter->host_mlme_enabled) { + adapter->priv_link_lost = priv; + adapter->host_mlme_link_lost = true; + queue_work(adapter->host_mlme_workqueue, + &adapter->host_mlme_work); + } else { + mwifiex_reset_connect_state(priv, reason_code, + true); + } } break; @@ -999,10 +1014,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_REMAIN_ON_CHAN_EXPIRED: mwifiex_dbg(adapter, EVENT, "event: Remain on channel expired\n"); - cfg80211_remain_on_channel_expired(&priv->wdev, - priv->roc_cfg.cookie, - &priv->roc_cfg.chan, - GFP_ATOMIC); + + if (adapter->host_mlme_enabled && + (priv->auth_flag & HOST_MLME_AUTH_PENDING)) { + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + } else { + cfg80211_remain_on_channel_expired(&priv->wdev, + priv->roc_cfg.cookie, + &priv->roc_cfg.chan, + GFP_ATOMIC); + } memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg)); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 32a27fad7b79a9..d3cba6895f8ce4 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -339,7 +339,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, ret = mwifiex_associate(priv, bss_desc); } - if (bss) + if (bss && !priv->adapter->host_mlme_enabled) cfg80211_put_bss(priv->adapter->wiphy, bss); } else { /* Adhoc mode */ @@ -503,8 +503,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) if (disconnect_on_suspend) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv) - mwifiex_deauthenticate(priv, NULL); + mwifiex_deauthenticate(priv, NULL); } } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c index 70c2790b8e3514..9d0ef04ebe02a7 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c @@ -36,7 +36,7 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv, struct txpd *local_tx_pd; struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); unsigned int pad; - u16 pkt_type, pkt_offset; + u16 pkt_type, pkt_length, pkt_offset; int hroom = adapter->intf_hdr_len; pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; @@ -49,9 +49,10 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv, memset(local_tx_pd, 0, sizeof(struct txpd)); local_tx_pd->bss_num = priv->bss_num; local_tx_pd->bss_type = priv->bss_type; - local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len - - (sizeof(struct txpd) + - pad))); + pkt_length = (u16)(skb->len - (sizeof(struct txpd) + pad)); + if (pkt_type == PKT_TYPE_MGMT) + pkt_length -= MWIFIEX_MGMT_FRAME_HEADER_SIZE; + local_tx_pd->tx_pkt_length = cpu_to_le16(pkt_length); local_tx_pd->priority = (u8) skb->priority; local_tx_pd->pkt_delay_2ms = diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 6c60621b6cccb5..7823e67694e888 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -1439,8 +1439,8 @@ void mwifiex_check_auto_tdls(struct timer_list *t) spin_lock_bh(&priv->auto_tdls_lock); list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) { - if ((jiffies - tdls_peer->rssi_jiffies) > - (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) { + if (time_after(jiffies, tdls_peer->rssi_jiffies + + MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) { tdls_peer->rssi = 0; tdls_peer->do_discover = true; priv->check_tdls_tx = true; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 491e366119096e..1c0ceac6b27fb3 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -46,31 +46,26 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv, bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST; + bss_config->protocol = 0; + if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1) + bss_config->protocol |= PROTOCOL_WPA; + if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) + bss_config->protocol |= PROTOCOL_WPA2; + + bss_config->key_mgmt = 0; for (i = 0; i < params->crypto.n_akm_suites; i++) { switch (params->crypto.akm_suites[i]) { case WLAN_AKM_SUITE_8021X: - if (params->crypto.wpa_versions & - NL80211_WPA_VERSION_1) { - bss_config->protocol = PROTOCOL_WPA; - bss_config->key_mgmt = KEY_MGMT_EAP; - } - if (params->crypto.wpa_versions & - NL80211_WPA_VERSION_2) { - bss_config->protocol |= PROTOCOL_WPA2; - bss_config->key_mgmt = KEY_MGMT_EAP; - } + bss_config->key_mgmt |= KEY_MGMT_EAP; break; case WLAN_AKM_SUITE_PSK: - if (params->crypto.wpa_versions & - NL80211_WPA_VERSION_1) { - bss_config->protocol = PROTOCOL_WPA; - bss_config->key_mgmt = KEY_MGMT_PSK; - } - if (params->crypto.wpa_versions & - NL80211_WPA_VERSION_2) { - bss_config->protocol |= PROTOCOL_WPA2; - bss_config->key_mgmt = KEY_MGMT_PSK; - } + bss_config->key_mgmt |= KEY_MGMT_PSK; + break; + case WLAN_AKM_SUITE_PSK_SHA256: + bss_config->key_mgmt |= KEY_MGMT_PSK_SHA256; + break; + case WLAN_AKM_SUITE_SAE: + bss_config->key_mgmt |= KEY_MGMT_SAE; break; default: break; @@ -751,6 +746,28 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action, return 0; } +/* This function prepares AP start up command with or without host MLME + */ +static void mwifiex_cmd_uap_bss_start(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd) +{ + struct mwifiex_ie_types_host_mlme *tlv; + int size; + + cmd->command = cpu_to_le16(HostCmd_CMD_UAP_BSS_START); + size = S_DS_GEN; + + if (priv->adapter->host_mlme_enabled) { + tlv = (struct mwifiex_ie_types_host_mlme *)((u8 *)cmd + size); + tlv->header.type = cpu_to_le16(TLV_TYPE_HOST_MLME); + tlv->header.len = cpu_to_le16(sizeof(tlv->host_mlme)); + tlv->host_mlme = 1; + size += sizeof(struct mwifiex_ie_types_host_mlme); + } + + cmd->size = cpu_to_le16(size); +} + /* This function prepares AP specific deauth command with mac supplied in * function parameter. */ @@ -768,6 +785,144 @@ static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv, return 0; } +/* This function prepares AP specific add station command. + */ +static int mwifiex_cmd_uap_add_station(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_add_station *new_sta = &cmd->params.sta_info; + struct mwifiex_sta_info *add_sta = (struct mwifiex_sta_info *)data_buf; + struct station_parameters *params = add_sta->params; + struct mwifiex_sta_node *sta_ptr; + u8 *pos; + u8 qos_capa; + u16 header_len = sizeof(struct mwifiex_ie_types_header); + u16 tlv_len; + int size; + struct mwifiex_ie_types_data *tlv; + struct mwifiex_ie_types_sta_flag *sta_flag; + int i; + + cmd->command = cpu_to_le16(HostCmd_CMD_ADD_NEW_STATION); + new_sta->action = cpu_to_le16(cmd_action); + size = sizeof(struct host_cmd_ds_add_station) + S_DS_GEN; + + if (cmd_action == HostCmd_ACT_ADD_STA) + sta_ptr = mwifiex_add_sta_entry(priv, add_sta->peer_mac); + else + sta_ptr = mwifiex_get_sta_entry(priv, add_sta->peer_mac); + + if (!sta_ptr) + return -1; + + memcpy(new_sta->peer_mac, add_sta->peer_mac, ETH_ALEN); + + if (cmd_action == HostCmd_ACT_REMOVE_STA) { + cmd->size = cpu_to_le16(size); + return 0; + } + + new_sta->aid = cpu_to_le16(params->aid); + new_sta->listen_interval = cpu_to_le32(params->listen_interval); + new_sta->cap_info = cpu_to_le16(params->capability); + + pos = new_sta->tlv; + + if (params->sta_flags_set & NL80211_STA_FLAG_WME) + sta_ptr->is_wmm_enabled = 1; + sta_flag = (struct mwifiex_ie_types_sta_flag *)pos; + sta_flag->header.type = cpu_to_le16(TLV_TYPE_UAP_STA_FLAGS); + sta_flag->header.len = cpu_to_le16(sizeof(__le32)); + sta_flag->sta_flags = cpu_to_le32(params->sta_flags_set); + pos += sizeof(struct mwifiex_ie_types_sta_flag); + size += sizeof(struct mwifiex_ie_types_sta_flag); + + if (params->ext_capab_len) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY); + tlv_len = params->ext_capab_len; + tlv->header.len = cpu_to_le16(tlv_len); + memcpy(tlv->data, params->ext_capab, tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + } + + if (params->link_sta_params.supported_rates_len) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); + tlv_len = params->link_sta_params.supported_rates_len; + tlv->header.len = cpu_to_le16(tlv_len); + memcpy(tlv->data, + params->link_sta_params.supported_rates, tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + } + + if (params->uapsd_queues || params->max_sp) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA); + tlv_len = sizeof(qos_capa); + tlv->header.len = cpu_to_le16(tlv_len); + qos_capa = params->uapsd_queues | (params->max_sp << 5); + memcpy(tlv->data, &qos_capa, tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + sta_ptr->is_wmm_enabled = 1; + } + + if (params->link_sta_params.ht_capa) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY); + tlv_len = sizeof(struct ieee80211_ht_cap); + tlv->header.len = cpu_to_le16(tlv_len); + memcpy(tlv->data, params->link_sta_params.ht_capa, tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + sta_ptr->is_11n_enabled = 1; + sta_ptr->max_amsdu = + le16_to_cpu(params->link_sta_params.ht_capa->cap_info) & + IEEE80211_HT_CAP_MAX_AMSDU ? + MWIFIEX_TX_DATA_BUF_SIZE_8K : + MWIFIEX_TX_DATA_BUF_SIZE_4K; + } + + if (params->link_sta_params.vht_capa) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY); + tlv_len = sizeof(struct ieee80211_vht_cap); + tlv->header.len = cpu_to_le16(tlv_len); + memcpy(tlv->data, params->link_sta_params.vht_capa, tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + sta_ptr->is_11ac_enabled = 1; + } + + if (params->link_sta_params.opmode_notif_used) { + tlv = (struct mwifiex_ie_types_data *)pos; + tlv->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF); + tlv_len = sizeof(u8); + tlv->header.len = cpu_to_le16(tlv_len); + memcpy(tlv->data, ¶ms->link_sta_params.opmode_notif, + tlv_len); + pos += (header_len + tlv_len); + size += (header_len + tlv_len); + } + + for (i = 0; i < MAX_NUM_TID; i++) { + if (sta_ptr->is_11n_enabled) + sta_ptr->ampdu_sta[i] = + priv->aggr_prio_tbl[i].ampdu_user; + else + sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; + } + + memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq)); + cmd->size = cpu_to_le16(size); + + return 0; +} + /* This function prepares the AP specific commands before sending them * to the firmware. * This is a generic function which calls specific command preparation @@ -785,6 +940,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, return -1; break; case HostCmd_CMD_UAP_BSS_START: + mwifiex_cmd_uap_bss_start(priv, cmd); + break; case HostCmd_CMD_UAP_BSS_STOP: case HOST_CMD_APCMD_SYS_RESET: case HOST_CMD_APCMD_STA_LIST: @@ -800,6 +957,11 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, data_buf)) return -1; break; + case HostCmd_CMD_ADD_NEW_STATION: + if (mwifiex_cmd_uap_add_station(priv, cmd, cmd_action, + data_buf)) + return -1; + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd %#x\n", cmd_no); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 515e6db410f28a..6085cd50970d46 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -745,8 +745,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter) if (adapter->usb_mc_status) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP && !priv->bss_started) || (priv->bss_role == MWIFIEX_BSS_ROLE_STA && @@ -758,8 +756,6 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter) } else { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->bss_started) || (priv->bss_role == MWIFIEX_BSS_ROLE_STA && @@ -770,8 +766,7 @@ static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter) } for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv) - priv->usb_port = active_port; + priv->usb_port = active_port; } for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) { if (active_port == card->port[i].tx_data_ep) diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 745b1d925b2176..42c04bf858da37 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -370,6 +370,45 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, return 0; } + +/* This function sends deauth packet to the kernel. */ +void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv, + u16 reason_code, u8 *sa) +{ + u8 frame_buf[100]; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)frame_buf; + + memset(frame_buf, 0, sizeof(frame_buf)); + mgmt->frame_control = cpu_to_le16(IEEE80211_STYPE_DEAUTH); + mgmt->duration = 0; + mgmt->seq_ctrl = 0; + mgmt->u.deauth.reason_code = cpu_to_le16(reason_code); + + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { + eth_broadcast_addr(mgmt->da); + memcpy(mgmt->sa, + priv->curr_bss_params.bss_descriptor.mac_address, + ETH_ALEN); + memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN); + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + } else { + memcpy(mgmt->da, priv->curr_addr, ETH_ALEN); + memcpy(mgmt->sa, sa, ETH_ALEN); + memcpy(mgmt->bssid, priv->curr_addr, ETH_ALEN); + } + + if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) { + wiphy_lock(priv->wdev.wiphy); + cfg80211_rx_mlme_mgmt(priv->netdev, frame_buf, 26); + wiphy_unlock(priv->wdev.wiphy); + } else { + cfg80211_rx_mgmt(&priv->wdev, + priv->bss_chandef.chan->center_freq, + 0, frame_buf, 26, 0); + } +} + /* * This function processes the received management packet and send it * to the kernel. @@ -417,6 +456,71 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, pkt_len -= ETH_ALEN; rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); + if (priv->host_mlme_reg && + (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) && + (ieee80211_is_auth(ieee_hdr->frame_control) || + ieee80211_is_deauth(ieee_hdr->frame_control) || + ieee80211_is_disassoc(ieee_hdr->frame_control))) { + if (ieee80211_is_auth(ieee_hdr->frame_control)) { + if (priv->auth_flag & HOST_MLME_AUTH_PENDING) { + if (priv->auth_alg != WLAN_AUTH_SAE) { + priv->auth_flag &= + ~HOST_MLME_AUTH_PENDING; + priv->auth_flag |= + HOST_MLME_AUTH_DONE; + } + } else { + return 0; + } + + mwifiex_dbg(priv->adapter, MSG, + "auth: receive authentication from %pM\n", + ieee_hdr->addr3); + } else { + if (!priv->wdev.connected) + return 0; + + if (ieee80211_is_deauth(ieee_hdr->frame_control)) { + mwifiex_dbg(priv->adapter, MSG, + "auth: receive deauth from %pM\n", + ieee_hdr->addr3); + priv->auth_flag = 0; + priv->auth_alg = WLAN_AUTH_NONE; + } else { + mwifiex_dbg + (priv->adapter, MSG, + "assoc: receive disassoc from %pM\n", + ieee_hdr->addr3); + } + } + + cfg80211_rx_mlme_mgmt(priv->netdev, skb->data, pkt_len); + } + + if (priv->adapter->host_mlme_enabled && + (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)) { + if (ieee80211_is_auth(ieee_hdr->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "auth: receive auth from %pM\n", + ieee_hdr->addr2); + if (ieee80211_is_deauth(ieee_hdr->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "auth: receive deauth from %pM\n", + ieee_hdr->addr2); + if (ieee80211_is_disassoc(ieee_hdr->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: receive disassoc from %pM\n", + ieee_hdr->addr2); + if (ieee80211_is_assoc_req(ieee_hdr->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: receive assoc req from %pM\n", + ieee_hdr->addr2); + if (ieee80211_is_reassoc_req(ieee_hdr->frame_control)) + mwifiex_dbg(priv->adapter, MSG, + "assoc: receive reassoc req from %pM\n", + ieee_hdr->addr2); + } + cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq, CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len, 0); diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 8558995e8fc73d..bcb61dab7dc86c 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -454,8 +454,6 @@ int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (!priv) - continue; if (adapter->if_ops.is_port_ready && !adapter->if_ops.is_port_ready(priv)) continue; @@ -477,8 +475,6 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; ++i) { priv = adapter->priv[i]; - if (!priv) - continue; if (!priv->port_open && (priv->bss_mode != NL80211_IFTYPE_ADHOC)) continue; @@ -1491,9 +1487,6 @@ void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; ++i) { priv = adapter->priv[i]; - if (!priv) - continue; - if (adapter->if_ops.is_port_ready && !adapter->if_ops.is_port_ready(priv)) continue; diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index b130e057370f43..bab9ef37a1ab80 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -587,6 +587,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image, } struct mwl8k_cmd_pkt { + /* New members MUST be added within the __struct_group() macro below. */ __struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed, __le16 code; __le16 length; @@ -596,6 +597,8 @@ struct mwl8k_cmd_pkt { ); char payload[]; } __packed; +static_assert(offsetof(struct mwl8k_cmd_pkt, payload) == sizeof(struct mwl8k_cmd_pkt_hdr), + "struct member likely outside of __struct_group()"); /* * Firmware loading. diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index bb291fe314fb44..9d5561f441347b 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -929,14 +929,19 @@ void mt76_update_survey(struct mt76_phy *phy) } EXPORT_SYMBOL_GPL(mt76_update_survey); -void mt76_set_channel(struct mt76_phy *phy) +int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, + bool offchannel) { struct mt76_dev *dev = phy->dev; - struct ieee80211_hw *hw = phy->hw; - struct cfg80211_chan_def *chandef = &hw->conf.chandef; - bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; int timeout = HZ / 5; + int ret; + cancel_delayed_work_sync(&phy->mac_work); + + mutex_lock(&dev->mutex); + set_bit(MT76_RESET, &phy->state); + + mt76_worker_disable(&dev->tx_worker); wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); mt76_update_survey(phy); @@ -946,14 +951,34 @@ void mt76_set_channel(struct mt76_phy *phy) phy->chandef = *chandef; phy->chan_state = mt76_channel_state(phy, chandef->chan); + phy->offchannel = offchannel; if (!offchannel) phy->main_chan = chandef->chan; if (chandef->chan != phy->main_chan) memset(phy->chan_state, 0, sizeof(*phy->chan_state)); + mt76_worker_enable(&dev->tx_worker); + + ret = dev->drv->set_channel(phy); + + clear_bit(MT76_RESET, &phy->state); + mt76_worker_schedule(&dev->tx_worker); + + mutex_unlock(&dev->mutex); + + return ret; } -EXPORT_SYMBOL_GPL(mt76_set_channel); + +int mt76_update_channel(struct mt76_phy *phy) +{ + struct ieee80211_hw *hw = phy->hw; + struct cfg80211_chan_def *chandef = &hw->conf.chandef; + bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; + + return mt76_set_channel(phy, chandef, offchannel); +} +EXPORT_SYMBOL_GPL(mt76_update_channel); int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) @@ -1484,21 +1509,32 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; + enum mt76_sta_event ev; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) return mt76_sta_add(phy, vif, sta); - if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC && - dev->drv->sta_assoc) - dev->drv->sta_assoc(dev, vif, sta); - if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) mt76_sta_remove(dev, vif, sta); - return 0; + if (!dev->drv->sta_event) + return 0; + + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) + ev = MT76_STA_EVENT_ASSOC; + else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) + ev = MT76_STA_EVENT_AUTHORIZE; + else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) + ev = MT76_STA_EVENT_DISASSOC; + else + return 0; + + return dev->drv->sta_event(dev, vif, sta, ev); } EXPORT_SYMBOL_GPL(mt76_sta_state); @@ -1521,6 +1557,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid) { INIT_LIST_HEAD(&wcid->tx_list); skb_queue_head_init(&wcid->tx_pending); + skb_queue_head_init(&wcid->tx_offchannel); INIT_LIST_HEAD(&wcid->list); idr_init(&wcid->pktid); @@ -1529,7 +1566,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init); void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) { - struct mt76_phy *phy = dev->phys[wcid->phy_idx]; + struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx); struct ieee80211_hw *hw; struct sk_buff_head list; struct sk_buff *skb; @@ -1697,14 +1734,15 @@ int mt76_get_rate(struct mt76_dev *dev, struct ieee80211_supported_band *sband, int idx, bool cck) { + bool is_2g = sband->band == NL80211_BAND_2GHZ; int i, offset = 0, len = sband->n_bitrates; if (cck) { - if (sband != &dev->phy.sband_2g.sband) + if (!is_2g) return 0; idx &= ~BIT(2); /* short preamble */ - } else if (sband == &dev->phy.sband_2g.sband) { + } else if (is_2g) { offset = 4; } diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index a8cafa39a56dda..98da82b74094dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -73,6 +73,8 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, bool wait_resp, struct sk_buff **ret_skb) { + unsigned int retry = 0; + struct sk_buff *orig_skb = NULL; unsigned long expires; int ret, seq; @@ -81,6 +83,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, mutex_lock(&dev->mcu.mutex); + if (dev->mcu_ops->mcu_skb_prepare_msg) { + ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq); + if (ret < 0) + goto out; + } + +retry: + orig_skb = skb_get(skb); ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq); if (ret < 0) goto out; @@ -94,6 +104,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, do { skb = mt76_mcu_get_response(dev, expires); + if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) && + retry++ < dev->mcu_ops->max_retry) { + dev_err(dev->dev, "Retry message %08x (seq %d)\n", + cmd, seq); + skb = orig_skb; + goto retry; + } + ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq); if (!ret && ret_skb) *ret_skb = skb; @@ -101,7 +119,9 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, dev_kfree_skb(skb); } while (ret == -EAGAIN); + out: + dev_kfree_skb(orig_skb); mutex_unlock(&dev->mcu.mutex); return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 4a58a78d5ed25d..0b75a45ad2e821 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -162,8 +162,8 @@ enum mt76_dfs_state { struct mt76_queue_buf { dma_addr_t addr; - u16 len; - bool skip_unmap; + u16 len:15, + skip_unmap:1; }; struct mt76_tx_info { @@ -230,11 +230,14 @@ struct mt76_queue { }; struct mt76_mcu_ops { + unsigned int max_retry; u32 headroom; u32 tailroom; int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp); + int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, int *seq); int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, int cmd, int *seq); int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, @@ -347,6 +350,7 @@ struct mt76_wcid { u8 hw_key_idx2; u8 sta:1; + u8 sta_disabled:1; u8 amsdu:1; u8 phy_idx:2; u8 link_id:4; @@ -361,6 +365,7 @@ struct mt76_wcid { struct list_head tx_list; struct sk_buff_head tx_pending; + struct sk_buff_head tx_offchannel; struct list_head list; struct idr pktid; @@ -466,6 +471,12 @@ enum { MT76_STATE_WED_RESET, }; +enum mt76_sta_event { + MT76_STA_EVENT_ASSOC, + MT76_STA_EVENT_AUTHORIZE, + MT76_STA_EVENT_DISASSOC, +}; + struct mt76_hw_cap { bool has_2ghz; bool has_5ghz; @@ -487,6 +498,7 @@ struct mt76_driver_ops { u8 mcs_rates; void (*update_survey)(struct mt76_phy *phy); + int (*set_channel)(struct mt76_phy *phy); int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, @@ -511,8 +523,8 @@ struct mt76_driver_ops { int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); - void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -768,6 +780,7 @@ struct mt76_phy { struct cfg80211_chan_def chandef; struct ieee80211_channel *main_chan; + bool offchannel; struct mt76_channel_state *chan_state; enum mt76_dfs_state dfs_state; @@ -1370,7 +1383,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw, enum ieee80211_frame_release_type reason, bool more_data); bool mt76_has_tx_pending(struct mt76_phy *phy); -void mt76_set_channel(struct mt76_phy *phy); +int mt76_update_channel(struct mt76_phy *phy); void mt76_update_survey(struct mt76_phy *phy); void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); int mt76_get_survey(struct ieee80211_hw *hw, int idx, @@ -1484,6 +1497,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); void mt76_testmode_tx_pending(struct mt76_phy *phy); void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e); +int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, + bool offchannel); /* usb */ static inline bool mt76u_urb_error(struct urb *urb) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index c223f7c19e6da1..6457ee06bb5a15 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -107,7 +107,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) struct sk_buff *skb; int i, nframes; - if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (dev->mphy.offchannel) return; data.dev = dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index ea017f22fff22c..863e5770df51d5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; - u8 tid = 0, hwq = 0; + u8 qid, tid = 0, hwq = 0; void *priv; int idx; u32 val; @@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (ieee80211_is_data_qos(hdr->frame_control)) { tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TAG1D_MASK; - u8 qid = tid_to_ac[tid]; + qid = tid_to_ac[tid]; hwq = wmm_queue_map[qid]; skb_set_queue_mapping(skb, qid); } else if (ieee80211_is_data(hdr->frame_control)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c index d951cb81df833c..f5a6b03bc61d01 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -181,6 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev) is_mt7688(dev)) dev->mphy.antenna_mask = 1; + dev->mphy.chainmask = dev->mphy.antenna_mask; mt76_eeprom_override(&dev->mphy); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 6c55c72f28a282..86617a3e4328b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -15,9 +15,10 @@ const struct mt76_driver_ops mt7603_drv_ops = { .rx_poll_complete = mt7603_rx_poll_complete, .sta_ps = mt7603_sta_ps, .sta_add = mt7603_sta_add, - .sta_assoc = mt7603_sta_assoc, + .sta_event = mt7603_sta_event, .sta_remove = mt7603_sta_remove, .update_survey = mt7603_update_channel, + .set_channel = mt7603_set_channel, }; static void @@ -456,11 +457,13 @@ mt7603_init_txpower(struct mt7603_dev *dev, int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7); u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK]; bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1); + u8 ext_pa_pwr; int max_offset, cur_offset; int i; - if (ext_pa && is_mt7603(dev)) - target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7); + ext_pa_pwr = eeprom[MT_EE_TX_POWER_TSSI_OFF]; + if (ext_pa && is_mt7603(dev) && ext_pa_pwr != 0 && ext_pa_pwr != 0xff) + target_power = ext_pa_pwr & ~BIT(7); if (target_power & BIT(6)) target_power = -(target_power & GENMASK(5, 0)); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index f35fa643c0da23..574f74ad325da7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -133,30 +133,24 @@ void mt7603_init_edcca(struct mt7603_dev *dev) mt7603_edcca_set_strict(dev, false); } -static int -mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def) +int mt7603_set_channel(struct mt76_phy *mphy) { - struct mt7603_dev *dev = hw->priv; + struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76); + struct cfg80211_chan_def *def = &mphy->chandef; + u8 *rssi_data = (u8 *)dev->mt76.eeprom.data; int idx, ret; u8 bw = MT_BW_20; bool failed = false; - ieee80211_stop_queues(hw); - cancel_delayed_work_sync(&dev->mphy.mac_work); tasklet_disable(&dev->mt76.pre_tbtt_tasklet); - mutex_lock(&dev->mt76.mutex); - set_bit(MT76_RESET, &dev->mphy.state); - mt7603_beacon_set_timer(dev, -1, 0); - mt76_set_channel(&dev->mphy); mt7603_mac_stop(dev); if (def->width == NL80211_CHAN_WIDTH_40) bw = MT_BW_40; - dev->mphy.chandef = *def; mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw); ret = mt7603_mcu_set_channel(dev); if (ret) { @@ -180,10 +174,6 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def) mt7603_mac_set_timing(dev); mt7603_mac_start(dev); - clear_bit(MT76_RESET, &dev->mphy.state); - - mt76_txq_schedule_all(&dev->mphy); - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, msecs_to_jiffies(MT7603_WATCHDOG_TIME)); @@ -199,17 +189,14 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def) mt7603_init_edcca(dev); out: - if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)) + if (!mphy->offchannel) mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int); - mutex_unlock(&dev->mt76.mutex); tasklet_enable(&dev->mt76.pre_tbtt_tasklet); if (failed) mt7603_mac_work(&dev->mphy.mac_work.work); - ieee80211_wake_queues(hw); - return ret; } @@ -227,7 +214,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw, if (err) return err; - return mt7603_set_channel(hw, &mphy->chandef); + return mt76_update_channel(mphy); } static int @@ -238,7 +225,7 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed) if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER)) - ret = mt7603_set_channel(hw, &hw->conf.chandef); + ret = mt76_update_channel(&dev->mphy); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { mutex_lock(&dev->mt76.mutex); @@ -368,13 +355,19 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, return ret; } -void -mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +int +mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev) { struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); - mt7603_wtbl_update_cap(dev, sta); + if (ev == MT76_STA_EVENT_ASSOC) { + mutex_lock(&dev->mt76.mutex); + mt7603_wtbl_update_cap(dev, sta); + mutex_unlock(&dev->mt76.mutex); + } + + return 0; } void diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 9e58df7042ad16..55a034ccbacd94 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -213,6 +213,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev); void mt7603_pse_client_reset(struct mt7603_dev *dev); +int mt7603_set_channel(struct mt76_phy *mphy); int mt7603_mcu_set_channel(struct mt7603_dev *dev); int mt7603_mcu_set_eeprom(struct mt7603_dev *dev); void mt7603_mcu_exit(struct mt7603_dev *dev); @@ -245,8 +246,8 @@ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); +int mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index f7722f67db576f..66ba3be2734344 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); return PTR_ERR_OR_ZERO(hwmon); @@ -319,7 +322,7 @@ void mt7615_init_work(struct mt7615_dev *dev) mt7615_mcu_set_eeprom(dev); mt7615_mac_init(dev); mt7615_phy_init(dev); - mt7615_mcu_del_wtbl_all(dev); + mt76_connac_mcu_del_wtbl_all(&dev->mt76); mt7615_check_offload_capability(dev); } EXPORT_SYMBOL_GPL(mt7615_init_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 50e262c1622f93..37697538800757 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -282,19 +282,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); } -int mt7615_set_channel(struct mt7615_phy *phy) +int mt7615_set_channel(struct mt76_phy *mphy) { + struct mt7615_phy *phy = mphy->priv; struct mt7615_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; int ret; - cancel_delayed_work_sync(&phy->mt76->mac_work); - - mt7615_mutex_acquire(dev); - - set_bit(MT76_RESET, &phy->mt76->state); - - mt76_set_channel(phy->mt76); + mt76_connac_pm_wake(mphy, &dev->pm); if (is_mt7615(&dev->mt76) && dev->flash_eeprom) { ret = mt7615_mcu_apply_rx_dcoc(phy); @@ -325,11 +320,8 @@ int mt7615_set_channel(struct mt7615_phy *phy) phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy)); out: - clear_bit(MT76_RESET, &phy->mt76->state); + mt76_connac_power_save_sched(mphy, &dev->pm); - mt7615_mutex_release(dev); - - mt76_worker_schedule(&dev->mt76.tx_worker); if (!mt76_testmode_enabled(phy->mt76)) { unsigned long timeout = mt7615_get_macwork_timeout(dev); @@ -339,6 +331,7 @@ int mt7615_set_channel(struct mt7615_phy *phy) return ret; } +EXPORT_SYMBOL_GPL(mt7615_set_channel); static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -425,11 +418,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw, if (mt7615_firmware_offload(phy->dev)) return mt76_connac_mcu_set_rate_txpower(phy->mt76); - ieee80211_stop_queues(hw); - err = mt7615_set_channel(phy); - ieee80211_wake_queues(hw); - - return err; + return mt76_update_channel(phy->mt76); } static int mt7615_config(struct ieee80211_hw *hw, u32 changed) @@ -448,9 +437,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) mt7615_mutex_release(dev); } #endif - ieee80211_stop_queues(hw); - ret = mt7615_set_channel(phy); - ieee80211_wake_queues(hw); + ret = mt76_update_channel(phy->mt76); } mt7615_mutex_acquire(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index d50d967828be0d..96e34277fece9b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -394,7 +394,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb) if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC) return; - ieee80211_radar_detected(mphy->hw); + ieee80211_radar_detected(mphy->hw, NULL); dev->hw_pattern++; } @@ -847,6 +847,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, struct wtbl_req_hdr *wtbl_hdr; struct mt7615_sta *msta; bool new_entry = true; + int conn_state; int cmd, err; msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; @@ -863,8 +864,9 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, else mvif->sta_added = true; } + conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT; mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta, - enable, new_entry); + conn_state, new_entry); if (enable && sta) mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0, MT76_STA_INFO_STATE_ASSOC); @@ -1878,16 +1880,6 @@ int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) sizeof(req), true); } -int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) -{ - struct wtbl_req_hdr req = { - .operation = WTBL_RESET_ALL, - }; - - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE), - &req, sizeof(req), true); -} - int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val) { struct { @@ -2151,7 +2143,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) if (cmd == MCU_EXT_CMD(SET_RX_PATH) || phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->offchannel) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, NL80211_IFTYPE_AP)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 87a956ea3ad74f..dbb2c82407df27 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -182,6 +182,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, .sta_add = mt7615_mac_sta_add, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, + .set_channel = mt7615_set_channel, }; struct mt76_bus_ops *bus_ops; struct ieee80211_ops *ops; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index a20322aae96725..530da48ce3ea9f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -399,7 +399,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *rates); void mt7615_pm_wake_work(struct work_struct *work); void mt7615_pm_power_save_work(struct work_struct *work); -int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev); int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd); int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, const struct ieee80211_tx_queue_params *params); @@ -457,7 +456,7 @@ void mt7615_roc_work(struct work_struct *work); void mt7615_roc_timer(struct timer_list *timer); void mt7615_init_txpower(struct mt7615_dev *dev, struct ieee80211_supported_band *sband); -int mt7615_set_channel(struct mt7615_phy *phy); +int mt7615_set_channel(struct mt76_phy *mphy); void mt7615_init_work(struct mt7615_dev *dev); int mt7615_mcu_restart(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 9692890ba51b7b..aebfc4576aa494 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -87,6 +87,7 @@ static int mt7663s_probe(struct sdio_func *func, .sta_add = mt7615_mac_sta_add, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, + .set_channel = mt7615_set_channel, }; static const struct mt76_bus_ops mt7663s_ops = { .rr = mt76s_rr, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c index a3d1cfa729edad..03f5af84424bdd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c @@ -141,7 +141,7 @@ mt7615_tm_init(struct mt7615_phy *phy) mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF); mutex_unlock(&dev->mt76.mutex); - mt7615_set_channel(phy); + mt76_update_channel(phy->mt76); mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0); mutex_lock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 9335ca0776febe..5020af52c68c13 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -123,6 +123,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, .sta_add = mt7615_mac_sta_add, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, + .set_channel = mt7615_set_channel, }; static struct mt76_bus_ops bus_ops = { .rr = mt7663u_rr, diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h index 5f132115ebfc4e..eb4765365b8ce4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h @@ -355,4 +355,11 @@ enum tx_port_idx { MT_TX_PORT_IDX_MCU }; +enum tx_frag_idx { + MT_TX_FRAG_NONE, + MT_TX_FRAG_FIRST, + MT_TX_FRAG_MID, + MT_TX_FRAG_LAST +}; + #endif /* __MT76_CONNAC2_MAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 353e6606984093..db0c29e65185ca 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -28,8 +28,6 @@ enum { #define MT_RXD0_MESH BIT(18) #define MT_RXD0_MHCP BIT(19) #define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) -#define MT_RXD0_NORMAL_IP_SUM BIT(23) -#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) #define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16) #define MT_RXD0_SW_PKT_TYPE_MAP 0x380F @@ -80,6 +78,8 @@ enum { #define MT_RXD3_NORMAL_BEACON_UC BIT(21) #define MT_RXD3_NORMAL_CO_ANT BIT(22) #define MT_RXD3_NORMAL_FCS_ERR BIT(24) +#define MT_RXD3_NORMAL_IP_SUM BIT(26) +#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27) #define MT_RXD3_NORMAL_VLAN2ETH BIT(31) /* RXD DW4 */ @@ -197,6 +197,13 @@ enum tx_mgnt_type { MT_TX_ADDBA, }; +enum tx_frag_idx { + MT_TX_FRAG_NONE, + MT_TX_FRAG_FIRST, + MT_TX_FRAG_MID, + MT_TX_FRAG_LAST +}; + #define MT_CT_INFO_APPLY_TXD BIT(0) #define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) #define MT_CT_INFO_MGMT_FRAME BIT(2) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index b841bf628d0267..a3db65254e37fd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -391,6 +391,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi, bool multicast = is_multicast_ether_addr(hdr->addr1); u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; __le16 fc = hdr->frame_control; + __le16 sc = hdr->seq_ctrl; u8 fc_type, fc_stype; u32 val; @@ -432,6 +433,13 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi, info->flags & IEEE80211_TX_CTL_USE_MINRATE) val |= MT_TXD2_FIX_RATE; + if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); + else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); + else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); + txwi[2] |= cpu_to_le32(val); if (ieee80211_is_beacon(fc)) { @@ -440,7 +448,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi, } if (info->flags & IEEE80211_TX_CTL_INJECTED) { - u16 seqno = le16_to_cpu(hdr->seq_ctrl); + u16 seqno = le16_to_cpu(sc); if (ieee80211_is_back_req(hdr->frame_control)) { struct ieee80211_bar *bar; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 4dce03ddbfa442..864246f9408899 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif, }; struct sk_buff *skb; - if (wcid && !wcid->sta) + if (wcid && !wcid->sta && !wcid->sta_disabled) hdr.muar_idx = 0xe; mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, @@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, - bool enable, bool newly) + int conn_state, bool newly) { struct sta_rec_basic *basic; struct tlv *tlv; @@ -382,13 +382,9 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, basic = (struct sta_rec_basic *)tlv; basic->extra_info = cpu_to_le16(EXTRA_INFO_VER); - if (enable) { - if (newly) - basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW); - basic->conn_state = CONN_STATE_PORT_SECURE; - } else { - basic->conn_state = CONN_STATE_DISCONNECT; - } + if (newly && conn_state != CONN_STATE_DISCONNECT) + basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW); + basic->conn_state = conn_state; if (!link_sta) { basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); @@ -1051,15 +1047,18 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy, struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; struct sk_buff *skb; + int conn_state; skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid); if (IS_ERR(skb)) return PTR_ERR(skb); + conn_state = info->enable ? CONN_STATE_PORT_SECURE : + CONN_STATE_DISCONNECT; link_sta = info->sta ? &info->sta->deflink : NULL; if (info->sta || !info->offload_fw) mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, - link_sta, info->enable, + link_sta, conn_state, info->newly); if (info->sta && info->enable) mt76_connac_mcu_sta_tlv(phy, skb, info->sta, @@ -2850,6 +2849,17 @@ int mt76_connac_mcu_restart(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart); +int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev) +{ + struct wtbl_req_hdr req = { + .operation = WTBL_RESET_ALL, + }; + + return mt76_mcu_send_msg(dev, MCU_EXT_CMD(WTBL_UPDATE), + &req, sizeof(req), true); +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_del_wtbl_all); + int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index, u8 rx_sel, u8 val) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 4242d436de2621..1b0e80dfc346b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -115,21 +115,26 @@ struct mt76_connac2_mcu_uni_txd { } __packed __aligned(4); struct mt76_connac2_mcu_rxd { - __le32 rxd[6]; + /* New members MUST be added within the struct_group() macro below. */ + struct_group_tagged(mt76_connac2_mcu_rxd_hdr, hdr, + __le32 rxd[6]; - __le16 len; - __le16 pkt_type_id; + __le16 len; + __le16 pkt_type_id; - u8 eid; - u8 seq; - u8 option; - u8 rsv; - u8 ext_eid; - u8 rsv1[2]; - u8 s2d_index; + u8 eid; + u8 seq; + u8 option; + u8 rsv; + u8 ext_eid; + u8 rsv1[2]; + u8 s2d_index; + ); u8 tlv[]; }; +static_assert(offsetof(struct mt76_connac2_mcu_rxd, tlv) == sizeof(struct mt76_connac2_mcu_rxd_hdr), + "struct member likely outside of struct_group_tagged()"); struct mt76_connac2_patch_hdr { char build_date[16]; @@ -1898,7 +1903,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, - bool enable, bool newly); + int state, bool newly); void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta, void *sta_wtbl, @@ -2032,6 +2037,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, void *sta_wtbl, void *wtbl_tlv); int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter); int mt76_connac_mcu_restart(struct mt76_dev *dev); +int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev); int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index, u8 rx_sel, u8 val); int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index bcd24c9072ec9e..4de45a56812d67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "mt76x0.h" #include "eeprom.h" #include "../mt76x02_phy.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index 07380cce8755ea..4aa2dcedc87448 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -8,16 +8,15 @@ #include #include "mt76x0.h" -static void -mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) +int mt76x0_set_channel(struct mt76_phy *mphy) { - cancel_delayed_work_sync(&dev->cal_work); + struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76); + mt76x02_pre_tbtt_enable(dev, false); if (mt76_is_mmio(&dev->mt76)) tasklet_disable(&dev->dfs_pd.dfs_tasklet); - mt76_set_channel(&dev->mphy); - mt76x0_phy_set_channel(dev, chandef); + mt76x0_phy_set_channel(dev, &mphy->chandef); mt76x02_mac_cc_reset(dev); mt76x02_edcca_init(dev); @@ -28,8 +27,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) } mt76x02_pre_tbtt_enable(dev, true); - mt76_txq_schedule_all(&dev->mphy); + return 0; } +EXPORT_SYMBOL_GPL(mt76x0_set_channel); int mt76x0_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) @@ -61,13 +61,10 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed) { struct mt76x02_dev *dev = hw->priv; - mutex_lock(&dev->mt76.mutex); + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) + mt76_update_channel(&dev->mphy); - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - mt76x0_set_channel(dev, &hw->conf.chandef); - ieee80211_wake_queues(hw); - } + mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_POWER) { struct mt76_phy *mphy = &dev->mphy; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 99dcb8feb9f771..50f75534496837 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -49,6 +49,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset); void mt76x0_mac_stop(struct mt76x02_dev *dev); int mt76x0_config(struct ieee80211_hw *hw, u32 changed); +int mt76x0_set_channel(struct mt76_phy *mphy); int mt76x0_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 2ecee7c5c80d9f..1eb955f3ca130b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -159,6 +159,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) MT_DRV_SW_RX_AIRTIME, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, + .set_channel = mt76x0_set_channel, .tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_complete_skb = mt76x02_tx_complete_skb, .rx_skb = mt76x02_queue_rx_skb, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 390f502e97f03a..b031c500b74156 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -217,6 +217,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, .drv_flags = MT_DRV_SW_RX_AIRTIME, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, + .set_channel = mt76x0_set_channel, .tx_prepare_skb = mt76x02u_tx_prepare_skb, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index 024a5c0a5a5750..7a07636d09c65f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -630,7 +630,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t) radar_detected = mt76x02_dfs_check_detection(dev); if (radar_detected) { /* sw detector rx radar pattern */ - ieee80211_radar_detected(dev->mt76.hw); + ieee80211_radar_detected(dev->mt76.hw, NULL); mt76x02_dfs_detector_reset(dev); return; @@ -658,7 +658,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t) /* hw detector rx radar pattern */ dfs_pd->stats[i].hw_pattern++; - ieee80211_radar_detected(dev->mt76.hw); + ieee80211_radar_detected(dev->mt76.hw, NULL); mt76x02_dfs_detector_reset(dev); return; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 5d402cf2951cb3..a5e3392c0b48f9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -4,7 +4,7 @@ * Copyright (C) 2018 Lorenzo Bianconi */ -#include +#include #include "mt76x02_eeprom.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 35b7ebc2c9c625..4a49a3036a46b2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -22,7 +22,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) struct sk_buff *skb; int i; - if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (dev->mphy.offchannel) return; __skb_queue_head_init(&data.q); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 29b9a15f8dbe79..0e1ede9314d8f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -188,10 +188,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work) struct sk_buff *skb; int nbeacons; - if (!dev->mt76.beacon_mask) - return; - - if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) + if (!dev->mt76.beacon_mask || dev->mphy.offchannel) return; __skb_queue_head_init(&data.q); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 1fe5f5a02f9377..156b16c17b2b44 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -5,7 +5,7 @@ #include #include -#include +#include #include "mt76x2.h" #include "eeprom.h" diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index be1217329a7723..f051721bb00eb9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -47,6 +47,8 @@ void mt76x2_phy_power_on(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev); int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); +int mt76x2e_set_channel(struct mt76_phy *phy); +int mt76x2u_set_channel(struct mt76_phy *phy); void mt76x2_phy_set_antenna(struct mt76x02_dev *dev); int mt76x2_phy_start(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 30959746e92427..67c9d1caa0bd63 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -25,6 +25,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) MT_DRV_SW_RX_AIRTIME, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, + .set_channel = mt76x2e_set_channel, .tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_complete_skb = mt76x02_tx_complete_skb, .rx_skb = mt76x02_queue_rx_skb, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 6accea55131906..eb70130d2711ac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -32,33 +32,25 @@ mt76x2_stop(struct ieee80211_hw *hw, bool suspend) mt76x2_stop_hardware(dev); } -static void -mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) +int mt76x2e_set_channel(struct mt76_phy *phy) { - cancel_delayed_work_sync(&dev->cal_work); + struct mt76x02_dev *dev = container_of(phy->dev, struct mt76x02_dev, mt76); + tasklet_disable(&dev->mt76.pre_tbtt_tasklet); tasklet_disable(&dev->dfs_pd.dfs_tasklet); - mutex_lock(&dev->mt76.mutex); - set_bit(MT76_RESET, &dev->mphy.state); - - mt76_set_channel(&dev->mphy); - mt76x2_mac_stop(dev, true); - mt76x2_phy_set_channel(dev, chandef); + mt76x2_phy_set_channel(dev, &phy->chandef); mt76x02_mac_cc_reset(dev); mt76x02_dfs_init_params(dev); mt76x2_mac_resume(dev); - clear_bit(MT76_RESET, &dev->mphy.state); - mutex_unlock(&dev->mt76.mutex); - tasklet_enable(&dev->dfs_pd.dfs_tasklet); tasklet_enable(&dev->mt76.pre_tbtt_tasklet); - mt76_txq_schedule_all(&dev->mphy); + return 0; } static int @@ -95,11 +87,8 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&dev->mt76.mutex); - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - mt76x2_set_channel(dev, &hw->conf.chandef); - ieee80211_wake_queues(hw); - } + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) + mt76_update_channel(&dev->mphy); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index e92bb871f2318a..e832ad53e2393b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -32,6 +32,7 @@ static int mt76x2u_probe(struct usb_interface *intf, .drv_flags = MT_DRV_SW_RX_AIRTIME, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, + .set_channel = mt76x2u_set_channel, .tx_prepare_skb = mt76x02u_tx_prepare_skb, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index ba0241c3667262..83e7061b10e2d2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -31,32 +31,20 @@ static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend) mt76x2u_stop_hw(dev); } -static int -mt76x2u_set_channel(struct mt76x02_dev *dev, - struct cfg80211_chan_def *chandef) +int mt76x2u_set_channel(struct mt76_phy *mphy) { + struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76); int err; - cancel_delayed_work_sync(&dev->cal_work); mt76x02_pre_tbtt_enable(dev, false); - - mutex_lock(&dev->mt76.mutex); - set_bit(MT76_RESET, &dev->mphy.state); - - mt76_set_channel(&dev->mphy); - mt76x2_mac_stop(dev, false); - err = mt76x2u_phy_set_channel(dev, chandef); + err = mt76x2u_phy_set_channel(dev, &mphy->chandef); mt76x02_mac_cc_reset(dev); mt76x2_mac_resume(dev); - clear_bit(MT76_RESET, &dev->mphy.state); - mutex_unlock(&dev->mt76.mutex); - mt76x02_pre_tbtt_enable(dev, true); - mt76_txq_schedule_all(&dev->mphy); return err; } @@ -93,11 +81,8 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&dev->mt76.mutex); - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - err = mt76x2u_set_channel(dev, &hw->conf.chandef); - ieee80211_wake_queues(hw); - } + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) + mt76_update_channel(&dev->mphy); return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index a978f434dc5e64..6bef96e3d2a3d9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops); if (!IS_ERR(cdev)) { @@ -398,6 +400,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy) ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); hw->max_tx_fragments = 4; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 8008ce3fa6c7ea..cf77ce0c875991 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1448,6 +1448,7 @@ mt7915_mac_full_reset(struct mt7915_dev *dev) dev->recovery.hw_full_reset = true; + set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); ieee80211_stop_queues(mt76_hw(dev)); if (ext_phy) @@ -1462,26 +1463,27 @@ mt7915_mac_full_reset(struct mt7915_dev *dev) if (!mt7915_mac_restart(dev)) break; } - mutex_unlock(&dev->mt76.mutex); if (i == 10) dev_err(dev->mt76.dev, "chip full reset failed\n"); - ieee80211_restart_hw(mt76_hw(dev)); - if (ext_phy) - ieee80211_restart_hw(ext_phy->hw); + spin_lock_bh(&dev->mt76.sta_poll_lock); + while (!list_empty(&dev->mt76.sta_poll_list)) + list_del_init(dev->mt76.sta_poll_list.next); + spin_unlock_bh(&dev->mt76.sta_poll_lock); - ieee80211_wake_queues(mt76_hw(dev)); - if (ext_phy) - ieee80211_wake_queues(ext_phy->hw); + memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask)); + dev->mt76.vif_mask = 0; + i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA); + dev->mt76.global_wcid.idx = i; dev->recovery.hw_full_reset = false; - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, - MT7915_WATCHDOG_TIME); + + mutex_unlock(&dev->mt76.mutex); + + ieee80211_restart_hw(mt76_hw(dev)); if (ext_phy) - ieee80211_queue_delayed_work(ext_phy->hw, - &ext_phy->mac_work, - MT7915_WATCHDOG_TIME); + ieee80211_restart_hw(ext_phy->hw); } /* system error recovery */ @@ -1537,12 +1539,14 @@ void mt7915_mac_reset_work(struct work_struct *work) set_bit(MT76_RESET, &phy2->mt76->state); cancel_delayed_work_sync(&phy2->mt76->mac_work); } + + mutex_lock(&dev->mt76.mutex); + mt76_worker_disable(&dev->mt76.tx_worker); mt76_for_each_q_rx(&dev->mt76, i) napi_disable(&dev->mt76.napi[i]); napi_disable(&dev->mt76.tx_napi); - mutex_lock(&dev->mt76.mutex); if (mtk_wed_device_active(&dev->mt76.mmio.wed)) mtk_wed_device_stop(&dev->mt76.mmio.wed); @@ -1692,6 +1696,11 @@ void mt7915_reset(struct mt7915_dev *dev) return; } + if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) { + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + } + queue_work(dev->mt76.wq, &dev->reset_work); wake_up(&dev->reset_wait); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 049223df9beb15..d75e8dea1fbdc8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -245,7 +245,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - idx = MT7915_WTBL_RESERVED - mvif->mt76.idx; + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev)); + if (idx < 0) + return -ENOSPC; INIT_LIST_HEAD(&mvif->sta.rc_list); INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); @@ -272,7 +274,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, memset(&mvif->cap, -1, sizeof(mvif->cap)); mt7915_mcu_add_bss_info(phy, vif, true); - mt7915_mcu_add_sta(dev, vif, NULL, true); + mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, true); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); out: @@ -291,7 +293,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, int idx = msta->wcid.idx; mt7915_mcu_add_bss_info(phy, vif, false); - mt7915_mcu_add_sta(dev, vif, NULL, false); + mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false); + mt76_wcid_mask_clear(dev->mt76.wcid_mask, mvif->sta.wcid.idx); mutex_lock(&dev->mt76.mutex); mt76_testmode_reset(phy->mt76, true); @@ -317,18 +320,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } -int mt7915_set_channel(struct mt7915_phy *phy) +int mt7915_set_channel(struct mt76_phy *mphy) { + struct mt7915_phy *phy = mphy->priv; struct mt7915_dev *dev = phy->dev; int ret; - cancel_delayed_work_sync(&phy->mt76->mac_work); - - mutex_lock(&dev->mt76.mutex); - set_bit(MT76_RESET, &phy->mt76->state); - - mt76_set_channel(phy->mt76); - if (dev->cal) { ret = mt7915_mcu_apply_tx_dpd(phy); if (ret) @@ -347,11 +344,6 @@ int mt7915_set_channel(struct mt7915_phy *phy) phy->noise = 0; out: - clear_bit(MT76_RESET, &phy->mt76->state); - mutex_unlock(&dev->mt76.mutex); - - mt76_txq_schedule_all(phy->mt76); - if (!mt76_testmode_enabled(phy->mt76)) ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, @@ -374,6 +366,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int idx = key->keyidx; int err = 0; + if (sta && !wcid->sta) + return -EOPNOTSUPP; + /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. */ @@ -464,11 +459,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&dev->mt76.mutex); } #endif - ieee80211_stop_queues(hw); - ret = mt7915_set_channel(phy); + ret = mt76_update_channel(phy->mt76); if (ret) return ret; - ieee80211_wake_queues(hw); } if (changed & (IEEE80211_CONF_CHANGE_POWER | @@ -564,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | MT_WF_RFCR_DROP_RTS | - MT_WF_RFCR_DROP_CTL_RSV | - MT_WF_RFCR_DROP_NDPA); + MT_WF_RFCR_DROP_CTL_RSV); *total_flags = flags; rxfilter = phy->rxfilter; @@ -633,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, if (set_bss_info == 1) mt7915_mcu_add_bss_info(phy, vif, true); if (set_sta == 1) - mt7915_mcu_add_sta(dev, vif, NULL, true); + mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false); if (changed & BSS_CHANGED_ERP_CTS_PROT) mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot); @@ -668,7 +660,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, if (set_bss_info == 0) mt7915_mcu_add_bss_info(phy, vif, false); if (set_sta == 0) - mt7915_mcu_add_sta(dev, vif, NULL, false); + mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false); mutex_unlock(&dev->mt76.mutex); } @@ -706,7 +698,7 @@ mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, err = mt7915_mcu_add_bss_info(phy, vif, true); if (err) goto out; - err = mt7915_mcu_add_sta(dev, vif, NULL, true); + err = mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false); out: mutex_unlock(&dev->mt76.mutex); @@ -720,7 +712,7 @@ mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt7915_dev *dev = mt7915_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mt7915_mcu_add_sta(dev, vif, NULL, false); + mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false); mutex_unlock(&dev->mt76.mutex); } @@ -743,8 +735,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; bool ext_phy = mvif->phy != &dev->phy; - int ret, idx; - u32 addr; + int idx; idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA); if (idx < 0) @@ -753,25 +744,61 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, INIT_LIST_HEAD(&msta->rc_list); INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; - msta->wcid.sta = 1; + msta->wcid.sta_disabled = 1; msta->wcid.idx = idx; msta->wcid.phy_idx = ext_phy; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; msta->jiffies = jiffies; ewma_avg_signal_init(&msta->avg_ack_signal); mt7915_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, true); - ret = mt7915_mcu_add_sta(dev, vif, sta, true); - if (ret) - return ret; + return 0; +} - addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30); - mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0); +int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + int i, ret; + u32 addr; + + switch (ev) { + case MT76_STA_EVENT_ASSOC: + ret = mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_CONNECT, true); + if (ret) + return ret; + + addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30); + mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0); + + ret = mt7915_mcu_add_rate_ctrl(dev, vif, sta, false); + if (ret) + return ret; + + msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->wcid.sta = 1; + msta->wcid.sta_disabled = 0; + + return 0; + + case MT76_STA_EVENT_AUTHORIZE: + return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false); - return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false); + case MT76_STA_EVENT_DISASSOC: + for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) + mt7915_mac_twt_teardown_flow(dev, msta, i); + + mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false); + msta->wcid.sta_disabled = 1; + msta->wcid.sta = 0; + return 0; + } + + return 0; } void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -779,16 +806,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, { struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - int i; - - mt7915_mcu_add_sta(dev, vif, sta, false); mt7915_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) - mt7915_mac_twt_teardown_flow(dev, msta, i); - spin_lock_bh(&mdev->sta_poll_lock); if (!list_empty(&msta->wcid.poll_list)) list_del_init(&msta->wcid.poll_list); @@ -895,22 +916,6 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return ret; } -static int -mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST, - IEEE80211_STA_NONE); -} - -static int -mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE, - IEEE80211_STA_NOTEXIST); -} - static int mt7915_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) @@ -1089,8 +1094,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, struct rate_info *txrate = &msta->wcid.rate; struct rate_info rxrate = {}; - if (is_mt7915(&phy->dev->mt76) && - !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) { + if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) { sinfo->rxrate = rxrate; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } @@ -1164,6 +1168,10 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw, { struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_dev *dev = phy->dev; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + if (!msta->wcid.sta) + return; mt7915_sta_rc_work(&changed, sta); ieee80211_queue_work(hw, &dev->rc_work); @@ -1207,6 +1215,9 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw, else clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags); + if (!msta->wcid.sta) + return; + mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta); } @@ -1223,6 +1234,9 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw, else clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + if (!msta->wcid.sta) + return; + mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta); } @@ -1578,6 +1592,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } +static int +mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val) +{ + return 0; +} + static int mt7915_set_radar_background(struct ieee80211_hw *hw, struct cfg80211_chan_def *chandef) @@ -1660,6 +1680,17 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw, } #endif +static void +mt7915_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + + ieee80211_wake_queues(hw); + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + MT7915_WATCHDOG_TIME); +} + const struct ieee80211_ops mt7915_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, @@ -1676,8 +1707,7 @@ const struct ieee80211_ops mt7915_ops = { .bss_info_changed = mt7915_bss_info_changed, .start_ap = mt7915_start_ap, .stop_ap = mt7915_stop_ap, - .sta_add = mt7915_sta_add, - .sta_remove = mt7915_sta_remove, + .sta_state = mt76_sta_state, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, .sta_rc_update = mt7915_sta_rc_update, .set_key = mt7915_set_key, @@ -1708,6 +1738,7 @@ const struct ieee80211_ops mt7915_ops = { .sta_set_decap_offload = mt7915_sta_set_decap_offload, .add_twt_setup = mt7915_mac_add_twt_setup, .twt_teardown_request = mt7915_twt_teardown_request, + .set_frag_threshold = mt7915_set_frag_threshold, CFG80211_TESTMODE_CMD(mt76_testmode_cmd) CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_MAC80211_DEBUGFS @@ -1718,4 +1749,5 @@ const struct ieee80211_ops mt7915_ops = { .net_fill_forward_path = mt7915_net_fill_forward_path, .net_setup_tc = mt76_wed_net_setup_tc, #endif + .reconfig_complete = mt7915_reconfig_complete, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 2185cd24e2e1cd..87d0dd040001c5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -157,12 +157,21 @@ static int mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq) { + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); struct mt76_connac2_mcu_rxd *rxd; int ret = 0; if (!skb) { dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); + + if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) { + dev->recovery.restart = true; + wake_up(&dev->mt76.mcu.wait); + queue_work(dev->mt76.wq, &dev->reset_work); + wake_up(&dev->reset_wait); + } + return -ETIMEDOUT; } @@ -191,11 +200,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, { struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); enum mt76_mcuq_id qid; - int ret; - - ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq); - if (ret) - return ret; if (cmd == MCU_CMD(FW_SCATTER)) qid = MT_MCUQ_FWDL; @@ -293,7 +297,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) &dev->rdd2_chandef, GFP_ATOMIC); else - ieee80211_radar_detected(mphy->hw); + ieee80211_radar_detected(mphy->hw, NULL); dev->hw_pattern++; } @@ -690,13 +694,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, { struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; struct mt7915_vif *mvif = msta->vif; + int ret; + mt76_worker_disable(&dev->mt76.tx_worker); if (enable && !params->amsdu) msta->wcid.amsdu = false; + ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + MCU_EXT_CMD(STA_REC_UPDATE), + enable, true); + mt76_worker_enable(&dev->mt76.tx_worker); - return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, - MCU_EXT_CMD(STA_REC_UPDATE), - enable, true); + return ret; } int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, @@ -1653,7 +1661,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, } int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) + struct ieee80211_sta *sta, int conn_state, bool newly) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct ieee80211_link_sta *link_sta; @@ -1670,13 +1678,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable, - !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx])); - if (!enable) - goto out; - + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, + conn_state, newly); /* tag order is in accordance with firmware dependency. */ - if (sta) { + if (sta && conn_state != CONN_STATE_DISCONNECT) { /* starec bfer */ mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta); /* starec ht */ @@ -1687,12 +1692,17 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, mt76_connac_mcu_sta_uapsd(skb, vif, sta); } - ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta); - if (ret) { - dev_kfree_skb(skb); - return ret; + if (newly || conn_state != CONN_STATE_DISCONNECT) { + ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta); + if (ret) { + dev_kfree_skb(skb); + return ret; + } } + if (conn_state == CONN_STATE_DISCONNECT) + goto out; + if (sta) { /* starec amsdu */ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta); @@ -2352,6 +2362,8 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev) if (ret) return ret; + mt76_connac_mcu_del_wtbl_all(&dev->mt76); + if ((mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(&dev->mt76)) || !mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) @@ -2376,7 +2388,9 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev) int mt7915_mcu_init(struct mt7915_dev *dev) { static const struct mt76_mcu_ops mt7915_mcu_ops = { + .max_retry = 3, .headroom = sizeof(struct mt76_connac2_mcu_txd), + .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message, .mcu_skb_send_msg = mt7915_mcu_send_message, .mcu_parse_response = mt7915_mcu_parse_response, }; @@ -2747,7 +2761,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + else if (phy->mt76->offchannel || phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index b41ac4aaced7fb..49476a4182fd14 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -29,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl { } __packed; struct mt7915_mcu_thermal_notify { - struct mt76_connac2_mcu_rxd rxd; + struct mt76_connac2_mcu_rxd_hdr rxd; struct mt7915_mcu_thermal_ctrl ctrl; __le32 temperature; @@ -37,7 +37,7 @@ struct mt7915_mcu_thermal_notify { } __packed; struct mt7915_mcu_csa_notify { - struct mt76_connac2_mcu_rxd rxd; + struct mt76_connac2_mcu_rxd_hdr rxd; u8 omac_idx; u8 csa_count; @@ -46,7 +46,7 @@ struct mt7915_mcu_csa_notify { } __packed; struct mt7915_mcu_bcc_notify { - struct mt76_connac2_mcu_rxd rxd; + struct mt76_connac2_mcu_rxd_hdr rxd; u8 band_idx; u8 omac_idx; @@ -55,7 +55,7 @@ struct mt7915_mcu_bcc_notify { } __packed; struct mt7915_mcu_rdd_report { - struct mt76_connac2_mcu_rxd rxd; + struct mt76_connac2_mcu_rxd_hdr rxd; u8 band_idx; u8 long_detected; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index d6ecd698cdcd08..44e112b8b5b368 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -927,8 +927,10 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, .rx_check = mt7915_rx_check, .rx_poll_complete = mt7915_rx_poll_complete, .sta_add = mt7915_mac_sta_add, + .sta_event = mt7915_mac_sta_event, .sta_remove = mt7915_mac_sta_remove, .update_survey = mt7915_update_channel, + .set_channel = mt7915_set_channel, }; struct mt7915_dev *dev; struct mt76_dev *mdev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index a30d08eb0656a2..ac0b1f0eb27c14 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -444,7 +444,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, int enable); int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable); + struct ieee80211_sta *sta, int conn_state, bool newly); int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, struct ieee80211_ampdu_params *params, bool add); @@ -463,7 +463,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool changed); int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int mt7915_set_channel(struct mt7915_phy *phy); +int mt7915_set_channel(struct mt76_phy *mphy); int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd); int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif); int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req); @@ -560,6 +560,8 @@ void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, void mt7915_mac_set_timing(struct mt7915_phy *phy); int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7915_mac_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c index 0d76ae31b37669..d534fff5c952bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -404,6 +404,7 @@ static void mt7915_tm_init(struct mt7915_phy *phy, bool en) { struct mt7915_dev *dev = phy->dev; + int state; if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) return; @@ -415,7 +416,8 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en) mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en); mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en); - mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en); + state = en ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT; + mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, state, true); if (!en) mt7915_tm_set_tam_arb(phy, en, 0); @@ -425,7 +427,7 @@ static void mt7915_tm_update_channel(struct mt7915_phy *phy) { mutex_unlock(&phy->dev->mt76.mutex); - mt7915_set_channel(phy); + mt76_update_channel(phy->mt76); mutex_lock(&phy->dev->mt76.mutex); mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index ef0c721d26e332..d1d64fa7d35d02 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7921_hwmon_groups); @@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev) } /* UNII-4 */ - if (IS_UNII_INVALID(0, 5850, 5925)) + if (IS_UNII_INVALID(0, 5845, 5925)) ch->flags |= IEEE80211_CHAN_DISABLED; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 23b228804289be..a7f5bfbc02ed1f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -455,37 +455,30 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw, return mt7921_abort_roc(phy, mvif); } -static int mt7921_set_channel(struct mt792x_phy *phy) +int mt7921_set_channel(struct mt76_phy *mphy) { + struct mt792x_phy *phy = mphy->priv; struct mt792x_dev *dev = phy->dev; int ret; - cancel_delayed_work_sync(&phy->mt76->mac_work); - - mt792x_mutex_acquire(dev); - set_bit(MT76_RESET, &phy->mt76->state); - - mt76_set_channel(phy->mt76); - + mt76_connac_pm_wake(mphy, &dev->pm); ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); if (ret) goto out; mt792x_mac_set_timeing(phy); - mt792x_mac_reset_counters(phy); phy->noise = 0; out: - clear_bit(MT76_RESET, &phy->mt76->state); - mt792x_mutex_release(dev); + mt76_connac_power_save_sched(mphy, &dev->pm); - mt76_worker_schedule(&dev->mt76.tx_worker); - ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT792x_WATCHDOG_TIME); return ret; } +EXPORT_SYMBOL_GPL(mt7921_set_channel); static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -620,11 +613,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) int ret = 0; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - ret = mt7921_set_channel(phy); + ret = mt76_update_channel(phy->mt76); if (ret) return ret; - ieee80211_wake_queues(hw); } mt792x_mutex_acquire(dev); @@ -831,13 +822,16 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt7921_mac_sta_add); -void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + if (ev != MT76_STA_EVENT_ASSOC) + return 0; + mt792x_mutex_acquire(dev); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) @@ -853,8 +847,10 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); mt792x_mutex_release(dev); + + return 0; } -EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc); +EXPORT_SYMBOL_GPL(mt7921_mac_sta_event); void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 394fcd7993450b..02c1de8620a7f2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -890,7 +890,7 @@ int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd) if (cmd == MCU_EXT_CMD(SET_RX_PATH) || dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->offchannel) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef, NL80211_IFTYPE_AP)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 6c5392c5d207e6..16c89815c0b8a3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -186,6 +186,7 @@ int __mt7921_start(struct mt792x_phy *phy); int mt7921_register_device(struct mt792x_dev *dev); void mt7921_unregister_device(struct mt792x_dev *dev); int mt7921_run_firmware(struct mt792x_dev *dev); +int mt7921_set_channel(struct mt76_phy *mphy); int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, @@ -244,8 +245,8 @@ int mt7921_mac_init(struct mt792x_dev *dev); bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); +int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_reset_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index a7430216a80df0..67723c22aea6ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -244,9 +244,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev, .rx_skb = mt7921_queue_rx_skb, .rx_poll_complete = mt792x_rx_poll_complete, .sta_add = mt7921_mac_sta_add, - .sta_assoc = mt7921_mac_sta_assoc, + .sta_event = mt7921_mac_sta_event, .sta_remove = mt7921_mac_sta_remove, .update_survey = mt792x_update_channel, + .set_channel = mt7921_set_channel, }; static const struct mt792x_hif_ops mt7921_pcie_ops = { .init_reset = mt7921e_init_reset, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index 004d942ee11a86..95f526f7bb991d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -100,9 +100,10 @@ static int mt7921s_probe(struct sdio_func *func, .rx_skb = mt7921_queue_rx_skb, .rx_check = mt7921_rx_check, .sta_add = mt7921_mac_sta_add, - .sta_assoc = mt7921_mac_sta_assoc, + .sta_event = mt7921_mac_sta_event, .sta_remove = mt7921_mac_sta_remove, .update_survey = mt792x_update_channel, + .set_channel = mt7921_set_channel, }; static const struct mt76_bus_ops mt7921s_ops = { .rr = mt76s_rr, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index 8b7c03c47598de..8aa4f0203208ab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -151,9 +151,10 @@ static int mt7921u_probe(struct usb_interface *usb_intf, .rx_skb = mt7921_queue_rx_skb, .rx_check = mt7921_rx_check, .sta_add = mt7921_mac_sta_add, - .sta_assoc = mt7921_mac_sta_assoc, + .sta_event = mt7921_mac_sta_event, .sta_remove = mt7921_mac_sta_remove, .update_survey = mt792x_update_channel, + .set_channel = mt7921_set_channel, }; static const struct mt792x_hif_ops hif_ops = { .mcu_init = mt7921u_mcu_init, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index cf36750cf70923..634c42bbf23f67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev, static int mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) { - u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; bool hdr_trans, unicast, insert_ccmp_hdr = false; u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; @@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) struct mt792x_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; u32 csum_status = *(u32 *)skb->cb; - u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); @@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; - if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask && !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 8c0768bf9343b3..791c8b00e11264 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac, mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id); } +void mt7925_roc_abort_sync(struct mt792x_dev *dev) +{ + struct mt792x_phy *phy = &dev->phy; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + ieee80211_iterate_interfaces(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_roc_iter, (void *)phy); +} +EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync); + void mt7925_roc_work(struct work_struct *work) { struct mt792x_phy *phy; @@ -1078,23 +1091,26 @@ static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev, mt792x_mutex_release(dev); } -void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev) { + struct ieee80211_link_sta *link_sta = &sta->deflink; + + if (ev != MT76_STA_EVENT_ASSOC) + return 0; + if (ieee80211_vif_is_mld(vif)) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct ieee80211_link_sta *link_sta; link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id); - mt7925_mac_set_links(mdev, vif); - - mt7925_mac_link_sta_assoc(mdev, vif, link_sta); - } else { - mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink); } + + mt7925_mac_link_sta_assoc(mdev, vif, link_sta); + + return 0; } -EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc); +EXPORT_SYMBOL_GPL(mt7925_mac_sta_event); static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -1109,6 +1125,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, msta = (struct mt792x_sta *)link_sta->sta->drv_priv; mlink = mt792x_sta_to_link(msta, link_id); + mt7925_roc_abort_sync(dev); + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 9dc22fbe25d335..748ea6adbc6b39 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { clc = (const struct mt7925_clc *)(clc_base + offset); + if (clc->idx > ARRAY_SIZE(phy->clc)) + break; + /* do not init buf again if chip reset triggered */ if (phy->clc[clc->idx]) continue; @@ -1770,16 +1773,19 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy, struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; struct mt76_dev *dev = phy->dev; struct sk_buff *skb; + int conn_state; skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid, MT7925_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); + conn_state = info->enable ? CONN_STATE_PORT_SECURE : + CONN_STATE_DISCONNECT; if (info->link_sta) mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->link_sta, - info->enable, info->newly); + conn_state, info->newly); if (info->link_sta && info->enable) { mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); mt7925_mcu_sta_ht_tlv(skb, info->link_sta); @@ -2171,12 +2177,12 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy, tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req)); req = (struct bss_rlm_tlv *)tlv; - req->control_channel = chandef->chan->hw_value, - req->center_chan = ieee80211_frequency_to_channel(freq1), - req->center_chan2 = ieee80211_frequency_to_channel(freq2), - req->tx_streams = hweight8(phy->antenna_mask), - req->ht_op_info = 4, /* set HT 40M allowed */ - req->rx_streams = hweight8(phy->antenna_mask), + req->control_channel = chandef->chan->hw_value; + req->center_chan = ieee80211_frequency_to_channel(freq1); + req->center_chan2 = ieee80211_frequency_to_channel(freq2); + req->tx_streams = hweight8(phy->antenna_mask); + req->ht_op_info = 4; /* set HT 40M allowed */ + req->rx_streams = hweight8(phy->antenna_mask); req->band = band; switch (chandef->width) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 669f3a079d0485..f5c02e5f506633 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -219,8 +219,8 @@ int mt7925_mac_init(struct mt792x_dev *dev); int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); -void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); +int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7925_mac_reset_work(struct work_struct *work); @@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, enum mt7925_roc_req type, u8 token_id); int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id); +void mt7925_roc_abort_sync(struct mt792x_dev *dev); int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *wait_seq); int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 6e4f4e78c3505d..9aec675450f267 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -279,7 +279,7 @@ static int mt7925_pci_probe(struct pci_dev *pdev, .rx_skb = mt7925_queue_rx_skb, .rx_poll_complete = mt792x_rx_poll_complete, .sta_add = mt7925_mac_sta_add, - .sta_assoc = mt7925_mac_sta_assoc, + .sta_event = mt7925_mac_sta_event, .sta_remove = mt7925_mac_sta_remove, .update_survey = mt792x_update_channel, }; @@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device) cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); + mt7925_roc_abort_sync(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto restore_suspend; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index 1e0f094fc9059d..682db1bab21c6a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -142,7 +142,7 @@ static int mt7925u_probe(struct usb_interface *usb_intf, .rx_skb = mt7925_queue_rx_skb, .rx_check = mt7925_rx_check, .sta_add = mt7925_mac_sta_add, - .sta_assoc = mt7925_mac_sta_assoc, + .sta_event = mt7925_mac_sta_event, .sta_remove = mt7925_mac_sta_remove, .update_survey = mt792x_update_channel, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 7fa74d59cc48f8..ab12616ec2b87c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -68,7 +68,7 @@ struct mt792x_fw_features { enum { MT792x_CLC_POWER, - MT792x_CLC_CHAN, + MT792x_CLC_POWER_EXT, MT792x_CLC_MAX_NUM, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 283df84f1b4335..5e96973226bbb5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -42,6 +42,7 @@ static const struct ieee80211_iface_combination if_comb[] = { BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), + .beacon_int_min_gcd = 100, } }; @@ -941,8 +942,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy) cap = &phy->mt76->sband_5g.sband.vht_cap.cap; *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | - IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | - FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1); + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + if (is_mt7996(phy->mt76->dev)) + *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3); + else + *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4); *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | @@ -987,9 +992,15 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; elem->phy_cap_info[2] |= c; - c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | - IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | - IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; + + if (is_mt7996(phy->mt76->dev)) + c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + else + c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5; + elem->phy_cap_info[4] |= c; /* do not support NG16 due to spec D4.0 changes subcarrier idx */ @@ -1011,8 +1022,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, return; elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; - if (vif == NL80211_IFTYPE_AP) - elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, sts - 1) | @@ -1020,6 +1029,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, sts - 1); elem->phy_cap_info[5] |= c; + if (vif != NL80211_IFTYPE_AP) + return; + + elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] |= c; @@ -1179,12 +1193,12 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_MAC_CAP0_OM_CONTROL; eht_cap_elem->phy_cap_info[0] = - IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; - val = max_t(u8, sts - 1, 3); + /* Set the maximum capability regardless of the antenna configuration. */ + val = is_mt7992(phy->mt76->dev) ? 4 : 3; eht_cap_elem->phy_cap_info[0] |= u8_encode_bits(u8_get_bits(val, BIT(0)), IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); @@ -1193,30 +1207,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | - u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK); eht_cap_elem->phy_cap_info[2] = u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); + + if (band == NL80211_BAND_6GHZ) { + eht_cap_elem->phy_cap_info[0] |= + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + + eht_cap_elem->phy_cap_info[1] |= + u8_encode_bits(val, + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] |= + u8_encode_bits(sts - 1, + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + } eht_cap_elem->phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | - IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK; eht_cap_elem->phy_cap_info[4] = u8_encode_bits(min_t(int, sts - 1, 2), IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK); eht_cap_elem->phy_cap_info[5] = - IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), @@ -1230,14 +1250,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK); - eht_cap_elem->phy_cap_info[7] = - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; - val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); #define SET_EHT_MAX_NSS(_bw, _val) do { \ @@ -1248,8 +1260,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, SET_EHT_MAX_NSS(80, val); SET_EHT_MAX_NSS(160, val); - SET_EHT_MAX_NSS(320, val); + if (band == NL80211_BAND_6GHZ) + SET_EHT_MAX_NSS(320, val); #undef SET_EHT_MAX_NSS + + if (iftype != NL80211_IFTYPE_AP) + return; + + eht_cap_elem->phy_cap_info[3] |= + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK; + + eht_cap_elem->phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; + + if (band != NL80211_BAND_6GHZ) + return; + + eht_cap_elem->phy_cap_info[7] |= + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; } static void diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index bc7111a71f98ca..0d21414e2c884a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); - u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; u32 csum_status = *(u32 *)skb->cb; u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; @@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, if (!sband->channels) return -EINVAL; - if ((rxd0 & csum_mask) == csum_mask && + if ((rxd3 & csum_mask) == csum_mask && !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -746,7 +746,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool multicast = is_multicast_ether_addr(hdr->addr1); u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; - __le16 fc = hdr->frame_control; + __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; u8 fc_type, fc_stype; u32 val; @@ -780,6 +780,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); + else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); + else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); + else + val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); + txwi[2] |= cpu_to_le32(val); txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); @@ -789,7 +798,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, } if (info->flags & IEEE80211_TX_CTL_INJECTED) { - u16 seqno = le16_to_cpu(hdr->seq_ctrl); + u16 seqno = le16_to_cpu(sc); if (ieee80211_is_back_req(hdr->frame_control)) { struct ieee80211_bar *bar; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index bce0820382194d..39f071ece35e6e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->phy = phy; mvif->mt76.band_idx = band_idx; - mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3; ret = mt7996_mcu_add_dev_info(phy, vif, true); if (ret) @@ -291,22 +291,19 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } -int mt7996_set_channel(struct mt7996_phy *phy) +int mt7996_set_channel(struct mt76_phy *mphy) { - struct mt7996_dev *dev = phy->dev; + struct mt7996_phy *phy = mphy->priv; int ret; - cancel_delayed_work_sync(&phy->mt76->mac_work); - - mutex_lock(&dev->mt76.mutex); - set_bit(MT76_RESET, &phy->mt76->state); - - mt76_set_channel(phy->mt76); - ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH); if (ret) goto out; + ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH); + if (ret) + goto out; + ret = mt7996_dfs_init_radar_detector(phy); mt7996_mac_cca_stats_reset(phy); @@ -314,13 +311,7 @@ int mt7996_set_channel(struct mt7996_phy *phy) phy->noise = 0; out: - clear_bit(MT76_RESET, &phy->mt76->state); - mutex_unlock(&dev->mt76.mutex); - - mt76_txq_schedule_all(phy->mt76); - - ieee80211_queue_delayed_work(phy->mt76->hw, - &phy->mt76->mac_work, + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT7996_WATCHDOG_TIME); return ret; @@ -360,14 +351,14 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_SMS4: break; case WLAN_CIPHER_SUITE_AES_CMAC: - wcid_keyidx = &wcid->hw_key_idx2; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; - fallthrough; case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: - if (key->keyidx == 6 || key->keyidx == 7) + if (key->keyidx == 6 || key->keyidx == 7) { + wcid_keyidx = &wcid->hw_key_idx2; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; + } fallthrough; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: @@ -411,11 +402,9 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed) int ret; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ieee80211_stop_queues(hw); - ret = mt7996_set_channel(phy); + ret = mt76_update_channel(phy->mt76); if (ret) return ret; - ieee80211_wake_queues(hw); } if (changed & (IEEE80211_CONF_CHANGE_POWER | diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 2e4fa9f48dfbee..6c445a9dbc03d8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -371,7 +371,7 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb) &dev->rdd2_chandef, GFP_ATOMIC); else - ieee80211_radar_detected(mphy->hw); + ieee80211_radar_detected(mphy->hw, NULL); dev->hw_pattern++; } @@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb) static struct tlv * mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len) { - struct tlv *ptlv = skb_put(skb, len); + struct tlv *ptlv = skb_put_zero(skb, len); ptlv->tag = cpu_to_le16(tag); ptlv->len = cpu_to_le16(len); @@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct bss_info_uni_mbssid *mbssid; struct tlv *tlv; - if (!vif->bss_conf.bssid_indicator) + if (!vif->bss_conf.bssid_indicator && enable) return; tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid)); @@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif, if (bfee) return vif->bss_conf.eht_su_beamformee && - EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); else return vif->bss_conf.eht_su_beamformer && - EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); } if (sta->deflink.he_cap.has_he) { @@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map); u8 snd_dim, sts; + if (!vc) + return; + bf->tx_mode = MT_PHY_TYPE_HE_SU; mt7996_mcu_sta_sounding_rate(bf); @@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_phy *phy = mvif->phy; - int tx_ant = hweight8(phy->mt76->chainmask) - 1; + int tx_ant = hweight16(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; static const u8 matrix[4][4] = { @@ -2160,6 +2163,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta; struct mt7996_sta *msta; struct sk_buff *skb; + int conn_state; int ret; msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta; @@ -2172,8 +2176,9 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ + conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT; mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, - enable, newly); + conn_state, newly); if (!enable) goto out; @@ -3460,7 +3465,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag) if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + else if (phy->mt76->offchannel || phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, @@ -3923,8 +3928,9 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action) tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en)); req_mod_en = (struct bf_mod_en_ctrl *)tlv; - req_mod_en->bf_num = 3; - req_mod_en->bf_bitmap = GENMASK(2, 0); + req_mod_en->bf_num = mt7996_band_valid(dev, MT_BAND2) ? 3 : 2; + req_mod_en->bf_bitmap = mt7996_band_valid(dev, MT_BAND2) ? + GENMASK(2, 0) : GENMASK(1, 0); break; } default: diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 928a9663b49e0c..40e45fb2b62607 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -620,6 +620,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, .sta_add = mt7996_mac_sta_add, .sta_remove = mt7996_mac_sta_remove, .update_survey = mt7996_update_channel, + .set_channel = mt7996_set_channel, }; struct mt7996_dev *dev; struct mt76_dev *mdev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 177cfff31120ed..ab8c9070630b0f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -468,7 +468,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif, struct ieee80211_he_obss_pd *he_obss_pd); int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool changed); -int mt7996_set_channel(struct mt7996_phy *phy); +int mt7996_set_channel(struct mt76_phy *mphy); int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag); int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif); int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5cf6edee4d1342..ce193e625666be 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -313,6 +313,9 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, return idx; wcid = (struct mt76_wcid *)sta->drv_priv; + if (!wcid->sta) + return idx; + q->entry[idx].wcid = wcid->idx; if (!non_aql) @@ -330,6 +333,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff_head *head; if (mt76_testmode_enabled(phy)) { ieee80211_free_txskb(phy->hw, skb); @@ -345,9 +349,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); - spin_lock_bh(&wcid->tx_pending.lock); - __skb_queue_tail(&wcid->tx_pending, skb); - spin_unlock_bh(&wcid->tx_pending.lock); + if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || + (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) + head = &wcid->tx_offchannel; + else + head = &wcid->tx_pending; + + spin_lock_bh(&head->lock); + __skb_queue_tail(head, skb); + spin_unlock_bh(&head->lock); spin_lock_bh(&phy->tx_lock); if (list_empty(&wcid->tx_list)) @@ -478,7 +488,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, return idx; do { - if (test_bit(MT76_RESET, &phy->state)) + if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) return -EBUSY; if (stop || mt76_txq_stopped(q)) @@ -522,7 +532,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) while (1) { int n_frames = 0; - if (test_bit(MT76_RESET, &phy->state)) + if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) return -EBUSY; if (dev->queue_ops->tx_cleanup && @@ -568,7 +578,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) { int len; - if (qid >= 4) + if (qid >= 4 || phy->offchannel) return; local_bh_disable(); @@ -586,7 +596,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) EXPORT_SYMBOL_GPL(mt76_txq_schedule); static int -mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) +mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, + struct sk_buff_head *head) { struct mt76_dev *dev = phy->dev; struct ieee80211_sta *sta; @@ -594,8 +605,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) struct sk_buff *skb; int ret = 0; - spin_lock(&wcid->tx_pending.lock); - while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { + spin_lock(&head->lock); + while ((skb = skb_peek(head)) != NULL) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int qid = skb_get_queue_mapping(skb); @@ -607,13 +618,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) qid = MT_TXQ_PSD; q = phy->q_tx[qid]; - if (mt76_txq_stopped(q)) { + if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) { ret = -1; break; } - __skb_unlink(skb, &wcid->tx_pending); - spin_unlock(&wcid->tx_pending.lock); + __skb_unlink(skb, head); + spin_unlock(&head->lock); sta = wcid_to_sta(wcid); spin_lock(&q->lock); @@ -621,15 +632,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) dev->queue_ops->kick(dev, q); spin_unlock(&q->lock); - spin_lock(&wcid->tx_pending.lock); + spin_lock(&head->lock); } - spin_unlock(&wcid->tx_pending.lock); + spin_unlock(&head->lock); return ret; } static void mt76_txq_schedule_pending(struct mt76_phy *phy) { + LIST_HEAD(tx_list); + if (list_empty(&phy->tx_list)) return; @@ -637,22 +650,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy) rcu_read_lock(); spin_lock(&phy->tx_lock); - while (!list_empty(&phy->tx_list)) { - struct mt76_wcid *wcid = NULL; + list_splice_init(&phy->tx_list, &tx_list); + while (!list_empty(&tx_list)) { + struct mt76_wcid *wcid; int ret; - wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); + wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list); list_del_init(&wcid->tx_list); spin_unlock(&phy->tx_lock); - ret = mt76_txq_schedule_pending_wcid(phy, wcid); + ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel); + if (ret >= 0 && !phy->offchannel) + ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending); spin_lock(&phy->tx_lock); - if (ret) { - if (list_empty(&wcid->tx_list)) - list_add_tail(&wcid->tx_list, &phy->tx_list); + if (!skb_queue_empty(&wcid->tx_pending) && + !skb_queue_empty(&wcid->tx_offchannel) && + list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + + if (ret < 0) break; - } } spin_unlock(&phy->tx_lock); diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h index 81e559ec1c7b3b..cda9c267516ec9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.h +++ b/drivers/net/wireless/mediatek/mt7601u/dma.h @@ -7,7 +7,7 @@ #ifndef __MT7601U_DMA_H #define __MT7601U_DMA_H -#include +#include #include #define MT_DMA_HDR_LEN 4 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index 625bebe6053887..d4d31a54655692 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "mt7601u.h" #include "eeprom.h" #include "mac.h" diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 3c48e1a57b24cd..bba53307b960d9 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct wilc_join_bss_param *param; u8 rates_len = 0; int ies_len; + u64 ies_tsf; int ret; param = kzalloc(sizeof(*param), GFP_KERNEL); @@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, return NULL; } ies_len = ies->len; + ies_tsf = ies->tsf; rcu_read_unlock(); param->beacon_period = cpu_to_le16(bss->beacon_interval); @@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { - param->tsf_lo = cpu_to_le32(ies->tsf); + param->tsf_lo = cpu_to_le32(ies_tsf); param->noa_enabled = 1; param->idx = noa_attr.index; if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) { diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 0043f7a0fdf97b..b4da05d5a498a0 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -174,19 +174,18 @@ static int wilc_sdio_probe(struct sdio_func *func, wilc->bus_data = sdio_priv; wilc->dev = &func->dev; - wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc"); + wilc->rtc_clk = devm_clk_get_optional_enabled(&func->card->dev, "rtc"); if (IS_ERR(wilc->rtc_clk)) { ret = PTR_ERR(wilc->rtc_clk); goto dispose_irq; } - clk_prepare_enable(wilc->rtc_clk); wilc_sdio_init(wilc, false); ret = wilc_load_mac_from_nv(wilc); if (ret) { pr_err("Can not retrieve MAC address from chip\n"); - goto clk_disable_unprepare; + goto dispose_irq; } wilc_sdio_deinit(wilc); @@ -195,14 +194,12 @@ static int wilc_sdio_probe(struct sdio_func *func, NL80211_IFTYPE_STATION, false); if (IS_ERR(vif)) { ret = PTR_ERR(vif); - goto clk_disable_unprepare; + goto dispose_irq; } dev_info(&func->dev, "Driver Initializing success\n"); return 0; -clk_disable_unprepare: - clk_disable_unprepare(wilc->rtc_clk); dispose_irq: irq_dispose_mapping(wilc->dev_irq_num); wilc_netdev_cleanup(wilc); @@ -217,7 +214,6 @@ static void wilc_sdio_remove(struct sdio_func *func) struct wilc *wilc = sdio_get_drvdata(func); struct wilc_sdio *sdio_priv = wilc->bus_data; - clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(sdio_priv->cmd53_buf); kfree(sdio_priv); @@ -977,6 +973,9 @@ static int wilc_sdio_suspend(struct device *dev) dev_info(dev, "sdio suspend\n"); + if (!wilc->initialized) + return 0; + if (!IS_ERR(wilc->rtc_clk)) clk_disable_unprepare(wilc->rtc_clk); @@ -999,6 +998,13 @@ static int wilc_sdio_resume(struct device *dev) struct wilc *wilc = sdio_get_drvdata(func); dev_info(dev, "sdio resume\n"); + + if (!wilc->initialized) + return 0; + + if (!IS_ERR(wilc->rtc_clk)) + clk_prepare_enable(wilc->rtc_clk); + wilc_sdio_init(wilc, true); wilc_sdio_enable_interrupt(wilc); diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 5ff940c53ad9c0..05b577b1068ea3 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -228,12 +228,11 @@ static int wilc_bus_probe(struct spi_device *spi) if (ret < 0) goto netdev_cleanup; - wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc"); + wilc->rtc_clk = devm_clk_get_optional_enabled(&spi->dev, "rtc"); if (IS_ERR(wilc->rtc_clk)) { ret = PTR_ERR(wilc->rtc_clk); goto netdev_cleanup; } - clk_prepare_enable(wilc->rtc_clk); dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n", enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off"); @@ -266,7 +265,6 @@ static int wilc_bus_probe(struct spi_device *spi) return 0; power_down: - clk_disable_unprepare(wilc->rtc_clk); wilc_wlan_power(wilc, false); netdev_cleanup: wilc_netdev_cleanup(wilc); @@ -280,7 +278,6 @@ static void wilc_bus_remove(struct spi_device *spi) struct wilc *wilc = spi_get_drvdata(spi); struct wilc_spi *spi_priv = wilc->bus_data; - clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(spi_priv); } diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 15334940287d86..56d1139ba8bcce 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include "mac.h" diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 663d77770fce70..8b97accf6638fc 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -837,7 +837,7 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev, static int qtnf_start_radar_detection(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_chan_def *chandef, - u32 cac_time_ms) + u32 cac_time_ms, int link_id) { struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); int ret; diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 76b07db284f892..71840f41b73ce1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -520,21 +520,21 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif, cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL); break; case QLINK_RADAR_CAC_FINISHED: - if (!vif->wdev.cac_started) + if (!vif->wdev.links[0].cac_started) break; cfg80211_cac_event(vif->netdev, &chandef, - NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); + NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0); break; case QLINK_RADAR_CAC_ABORTED: - if (!vif->wdev.cac_started) + if (!vif->wdev.links[0].cac_started) break; cfg80211_cac_event(vif->netdev, &chandef, - NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); + NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0); break; case QLINK_RADAR_CAC_STARTED: - if (vif->wdev.cac_started) + if (vif->wdev.links[0].cac_started) break; if (!wiphy_ext_feature_isset(wiphy, @@ -542,7 +542,7 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif, break; cfg80211_cac_event(vif->netdev, &chandef, - NL80211_RADAR_CAC_STARTED, GFP_KERNEL); + NL80211_RADAR_CAC_STARTED, GFP_KERNEL, 0); break; default: pr_warn("%s: unhandled radar event %u\n", diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig index 44ad94757a03e9..14d0343368ac00 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig +++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig @@ -20,9 +20,8 @@ config RTL8XXXU memory footprint than the vendor drivers and benefits from the in kernel mac80211 stack. - It can coexist with drivers from drivers/staging/rtl8723au, - drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi, - but you will need to control which module you wish to load. + It can coexist with the rtlwifi driver but you will need + to control which module you wish to load. To compile this driver as a module, choose M here: the module will be called rtl8xxxu. If unsure, say N. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 043fa364e70148..7891c988dd5f03 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -8110,6 +8110,12 @@ static const struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x819a, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8754, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817c, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192cu_fops}, /* Tested by Larry Finger */ {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index 22838ede03cd80..02b0d698413bec 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -12,6 +12,7 @@ if RTW88 config RTW88_CORE tristate + select WANT_DEV_COREDUMP config RTW88_PCI tristate diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index de3332eb7a227a..a99776af56c27f 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_efuse *efuse = &rtwdev->efuse; u8 table_case, tdma_case; - bool wl_cpt_test = false, bt_cpt_test = false; rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); @@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ - if (wl_cpt_test) { - if (coex_stat->wl_gl_busy) { - table_case = 20; - tdma_case = 17; - } else { - table_case = 10; - tdma_case = 15; - } - } else if (bt_cpt_test) { - table_case = 26; - tdma_case = 26; - } else { - if (coex_stat->wl_gl_busy && - coex_stat->wl_noisy_level == 0) - table_case = 14; - else - table_case = 10; + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; - if (coex_stat->wl_gl_busy) - tdma_case = 15; - else - tdma_case = 20; - } + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; } else { /* Non-Shared-Ant */ table_case = 112; @@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) tdma_case = 120; } - if (wl_cpt_test) - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); - else - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 5b2036798159af..c26a6905fd15ad 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -43,6 +43,62 @@ struct rtw_debugfs_priv { }; }; +struct rtw_debugfs { + struct rtw_debugfs_priv mac_0; + struct rtw_debugfs_priv mac_1; + struct rtw_debugfs_priv mac_2; + struct rtw_debugfs_priv mac_3; + struct rtw_debugfs_priv mac_4; + struct rtw_debugfs_priv mac_5; + struct rtw_debugfs_priv mac_6; + struct rtw_debugfs_priv mac_7; + struct rtw_debugfs_priv mac_10; + struct rtw_debugfs_priv mac_11; + struct rtw_debugfs_priv mac_12; + struct rtw_debugfs_priv mac_13; + struct rtw_debugfs_priv mac_14; + struct rtw_debugfs_priv mac_15; + struct rtw_debugfs_priv mac_16; + struct rtw_debugfs_priv mac_17; + struct rtw_debugfs_priv bb_8; + struct rtw_debugfs_priv bb_9; + struct rtw_debugfs_priv bb_a; + struct rtw_debugfs_priv bb_b; + struct rtw_debugfs_priv bb_c; + struct rtw_debugfs_priv bb_d; + struct rtw_debugfs_priv bb_e; + struct rtw_debugfs_priv bb_f; + struct rtw_debugfs_priv bb_18; + struct rtw_debugfs_priv bb_19; + struct rtw_debugfs_priv bb_1a; + struct rtw_debugfs_priv bb_1b; + struct rtw_debugfs_priv bb_1c; + struct rtw_debugfs_priv bb_1d; + struct rtw_debugfs_priv bb_1e; + struct rtw_debugfs_priv bb_1f; + struct rtw_debugfs_priv bb_2c; + struct rtw_debugfs_priv bb_2d; + struct rtw_debugfs_priv bb_40; + struct rtw_debugfs_priv bb_41; + struct rtw_debugfs_priv rf_dump; + struct rtw_debugfs_priv tx_pwr_tbl; + struct rtw_debugfs_priv write_reg; + struct rtw_debugfs_priv h2c; + struct rtw_debugfs_priv rf_write; + struct rtw_debugfs_priv rf_read; + struct rtw_debugfs_priv read_reg; + struct rtw_debugfs_priv fix_rate; + struct rtw_debugfs_priv dump_cam; + struct rtw_debugfs_priv rsvd_page; + struct rtw_debugfs_priv phy_info; + struct rtw_debugfs_priv coex_enable; + struct rtw_debugfs_priv coex_info; + struct rtw_debugfs_priv edcca_enable; + struct rtw_debugfs_priv fw_crash; + struct rtw_debugfs_priv force_lowest_basic_rate; + struct rtw_debugfs_priv dm_cap; +}; + static const char * const rtw_dm_cap_strs[] = { [RTW_DM_CAP_NA] = "NA", [RTW_DM_CAP_TXGAPK] = "TXGAPK", @@ -524,7 +580,7 @@ static int rtw_debug_get_bb_page(struct seq_file *m, void *v) return 0; } -static int rtw_debug_get_rf_dump(struct seq_file *m, void *v) +static int rtw_debugfs_get_rf_dump(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; @@ -1074,139 +1130,102 @@ static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v) return 0; } -#define rtw_debug_impl_mac(page, addr) \ -static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ +#define rtw_debug_priv_mac(addr) \ +{ \ .cb_read = rtw_debug_get_mac_page, \ .cb_data = addr, \ } -rtw_debug_impl_mac(0, 0x0000); -rtw_debug_impl_mac(1, 0x0100); -rtw_debug_impl_mac(2, 0x0200); -rtw_debug_impl_mac(3, 0x0300); -rtw_debug_impl_mac(4, 0x0400); -rtw_debug_impl_mac(5, 0x0500); -rtw_debug_impl_mac(6, 0x0600); -rtw_debug_impl_mac(7, 0x0700); -rtw_debug_impl_mac(10, 0x1000); -rtw_debug_impl_mac(11, 0x1100); -rtw_debug_impl_mac(12, 0x1200); -rtw_debug_impl_mac(13, 0x1300); -rtw_debug_impl_mac(14, 0x1400); -rtw_debug_impl_mac(15, 0x1500); -rtw_debug_impl_mac(16, 0x1600); -rtw_debug_impl_mac(17, 0x1700); - -#define rtw_debug_impl_bb(page, addr) \ -static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \ +#define rtw_debug_priv_bb(addr) \ +{ \ .cb_read = rtw_debug_get_bb_page, \ .cb_data = addr, \ } -rtw_debug_impl_bb(8, 0x0800); -rtw_debug_impl_bb(9, 0x0900); -rtw_debug_impl_bb(a, 0x0a00); -rtw_debug_impl_bb(b, 0x0b00); -rtw_debug_impl_bb(c, 0x0c00); -rtw_debug_impl_bb(d, 0x0d00); -rtw_debug_impl_bb(e, 0x0e00); -rtw_debug_impl_bb(f, 0x0f00); -rtw_debug_impl_bb(18, 0x1800); -rtw_debug_impl_bb(19, 0x1900); -rtw_debug_impl_bb(1a, 0x1a00); -rtw_debug_impl_bb(1b, 0x1b00); -rtw_debug_impl_bb(1c, 0x1c00); -rtw_debug_impl_bb(1d, 0x1d00); -rtw_debug_impl_bb(1e, 0x1e00); -rtw_debug_impl_bb(1f, 0x1f00); -rtw_debug_impl_bb(2c, 0x2c00); -rtw_debug_impl_bb(2d, 0x2d00); -rtw_debug_impl_bb(40, 0x4000); -rtw_debug_impl_bb(41, 0x4100); - -static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = { - .cb_read = rtw_debug_get_rf_dump, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = { - .cb_read = rtw_debugfs_get_tx_pwr_tbl, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_write_reg = { - .cb_write = rtw_debugfs_set_write_reg, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_h2c = { - .cb_write = rtw_debugfs_set_h2c, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_rf_write = { - .cb_write = rtw_debugfs_set_rf_write, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_rf_read = { - .cb_write = rtw_debugfs_set_rf_read, - .cb_read = rtw_debugfs_get_rf_read, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_read_reg = { - .cb_write = rtw_debugfs_set_read_reg, - .cb_read = rtw_debugfs_get_read_reg, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = { - .cb_write = rtw_debugfs_set_fix_rate, - .cb_read = rtw_debugfs_get_fix_rate, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = { - .cb_write = rtw_debugfs_set_single_input, - .cb_read = rtw_debugfs_get_dump_cam, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = { - .cb_write = rtw_debugfs_set_rsvd_page, - .cb_read = rtw_debugfs_get_rsvd_page, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_phy_info = { - .cb_read = rtw_debugfs_get_phy_info, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = { - .cb_write = rtw_debugfs_set_coex_enable, - .cb_read = rtw_debugfs_get_coex_enable, -}; - -static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { - .cb_read = rtw_debugfs_get_coex_info, -}; +#define rtw_debug_priv_get(name) \ +{ \ + .cb_read = rtw_debugfs_get_ ##name, \ +} -static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = { - .cb_write = rtw_debugfs_set_edcca_enable, - .cb_read = rtw_debugfs_get_edcca_enable, -}; +#define rtw_debug_priv_set(name) \ +{ \ + .cb_write = rtw_debugfs_set_ ##name, \ +} -static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { - .cb_write = rtw_debugfs_set_fw_crash, - .cb_read = rtw_debugfs_get_fw_crash, -}; +#define rtw_debug_priv_set_and_get(name) \ +{ \ + .cb_write = rtw_debugfs_set_ ##name, \ + .cb_read = rtw_debugfs_get_ ##name, \ +} -static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = { - .cb_write = rtw_debugfs_set_force_lowest_basic_rate, - .cb_read = rtw_debugfs_get_force_lowest_basic_rate, -}; +#define rtw_debug_priv_set_single_and_get(name) \ +{ \ + .cb_write = rtw_debugfs_set_single_input, \ + .cb_read = rtw_debugfs_get_ ##name, \ +} -static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { - .cb_write = rtw_debugfs_set_dm_cap, - .cb_read = rtw_debugfs_get_dm_cap, +static const struct rtw_debugfs rtw_debugfs_templ = { + .mac_0 = rtw_debug_priv_mac(0x0000), + .mac_1 = rtw_debug_priv_mac(0x0100), + .mac_2 = rtw_debug_priv_mac(0x0200), + .mac_3 = rtw_debug_priv_mac(0x0300), + .mac_4 = rtw_debug_priv_mac(0x0400), + .mac_5 = rtw_debug_priv_mac(0x0500), + .mac_6 = rtw_debug_priv_mac(0x0600), + .mac_7 = rtw_debug_priv_mac(0x0700), + .mac_10 = rtw_debug_priv_mac(0x1000), + .mac_11 = rtw_debug_priv_mac(0x1100), + .mac_12 = rtw_debug_priv_mac(0x1200), + .mac_13 = rtw_debug_priv_mac(0x1300), + .mac_14 = rtw_debug_priv_mac(0x1400), + .mac_15 = rtw_debug_priv_mac(0x1500), + .mac_16 = rtw_debug_priv_mac(0x1600), + .mac_17 = rtw_debug_priv_mac(0x1700), + .bb_8 = rtw_debug_priv_bb(0x0800), + .bb_9 = rtw_debug_priv_bb(0x0900), + .bb_a = rtw_debug_priv_bb(0x0a00), + .bb_b = rtw_debug_priv_bb(0x0b00), + .bb_c = rtw_debug_priv_bb(0x0c00), + .bb_d = rtw_debug_priv_bb(0x0d00), + .bb_e = rtw_debug_priv_bb(0x0e00), + .bb_f = rtw_debug_priv_bb(0x0f00), + .bb_18 = rtw_debug_priv_bb(0x1800), + .bb_19 = rtw_debug_priv_bb(0x1900), + .bb_1a = rtw_debug_priv_bb(0x1a00), + .bb_1b = rtw_debug_priv_bb(0x1b00), + .bb_1c = rtw_debug_priv_bb(0x1c00), + .bb_1d = rtw_debug_priv_bb(0x1d00), + .bb_1e = rtw_debug_priv_bb(0x1e00), + .bb_1f = rtw_debug_priv_bb(0x1f00), + .bb_2c = rtw_debug_priv_bb(0x2c00), + .bb_2d = rtw_debug_priv_bb(0x2d00), + .bb_40 = rtw_debug_priv_bb(0x4000), + .bb_41 = rtw_debug_priv_bb(0x4100), + .rf_dump = rtw_debug_priv_get(rf_dump), + .tx_pwr_tbl = rtw_debug_priv_get(tx_pwr_tbl), + .write_reg = rtw_debug_priv_set(write_reg), + .h2c = rtw_debug_priv_set(h2c), + .rf_write = rtw_debug_priv_set(rf_write), + .rf_read = rtw_debug_priv_set_and_get(rf_read), + .read_reg = rtw_debug_priv_set_and_get(read_reg), + .fix_rate = rtw_debug_priv_set_and_get(fix_rate), + .dump_cam = rtw_debug_priv_set_single_and_get(dump_cam), + .rsvd_page = rtw_debug_priv_set_and_get(rsvd_page), + .phy_info = rtw_debug_priv_get(phy_info), + .coex_enable = rtw_debug_priv_set_and_get(coex_enable), + .coex_info = rtw_debug_priv_get(coex_info), + .edcca_enable = rtw_debug_priv_set_and_get(edcca_enable), + .fw_crash = rtw_debug_priv_set_and_get(fw_crash), + .force_lowest_basic_rate = rtw_debug_priv_set_and_get(force_lowest_basic_rate), + .dm_cap = rtw_debug_priv_set_and_get(dm_cap), }; #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ - rtw_debug_priv_ ##name.rtwdev = rtwdev; \ + struct rtw_debugfs_priv *priv = &rtwdev->debugfs->name; \ + priv->rtwdev = rtwdev; \ if (IS_ERR(debugfs_create_file(#name, mode, \ - parent, &rtw_debug_priv_ ##name,\ + parent, priv, \ &file_ops_ ##fopname))) \ pr_debug("Unable to initialize debugfs:%s\n", \ #name); \ @@ -1219,12 +1238,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { #define rtw_debugfs_add_r(name) \ rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir) -void rtw_debugfs_init(struct rtw_dev *rtwdev) +static +void rtw_debugfs_add_basic(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir) { - struct dentry *debugfs_topdir; - - debugfs_topdir = debugfs_create_dir("rtw88", - rtwdev->hw->wiphy->debugfsdir); rtw_debugfs_add_w(write_reg); rtw_debugfs_add_rw(read_reg); rtw_debugfs_add_w(rf_write); @@ -1236,6 +1252,17 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(coex_info); rtw_debugfs_add_rw(coex_enable); rtw_debugfs_add_w(h2c); + rtw_debugfs_add_r(rf_dump); + rtw_debugfs_add_r(tx_pwr_tbl); + rtw_debugfs_add_rw(edcca_enable); + rtw_debugfs_add_rw(fw_crash); + rtw_debugfs_add_rw(force_lowest_basic_rate); + rtw_debugfs_add_rw(dm_cap); +} + +static +void rtw_debugfs_add_sec0(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir) +{ rtw_debugfs_add_r(mac_0); rtw_debugfs_add_r(mac_1); rtw_debugfs_add_r(mac_2); @@ -1252,6 +1279,11 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(bb_d); rtw_debugfs_add_r(bb_e); rtw_debugfs_add_r(bb_f); +} + +static +void rtw_debugfs_add_sec1(struct rtw_dev *rtwdev, struct dentry *debugfs_topdir) +{ rtw_debugfs_add_r(mac_10); rtw_debugfs_add_r(mac_11); rtw_debugfs_add_r(mac_12); @@ -1274,14 +1306,29 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(bb_40); rtw_debugfs_add_r(bb_41); } - rtw_debugfs_add_r(rf_dump); - rtw_debugfs_add_r(tx_pwr_tbl); - rtw_debugfs_add_rw(edcca_enable); - rtw_debugfs_add_rw(fw_crash); - rtw_debugfs_add_rw(force_lowest_basic_rate); - rtw_debugfs_add_rw(dm_cap); } +void rtw_debugfs_init(struct rtw_dev *rtwdev) +{ + struct dentry *debugfs_topdir; + + rtwdev->debugfs = kmemdup(&rtw_debugfs_templ, sizeof(rtw_debugfs_templ), + GFP_KERNEL); + if (!rtwdev->debugfs) + return; + + debugfs_topdir = debugfs_create_dir("rtw88", + rtwdev->hw->wiphy->debugfsdir); + + rtw_debugfs_add_basic(rtwdev, debugfs_topdir); + rtw_debugfs_add_sec0(rtwdev, debugfs_topdir); + rtw_debugfs_add_sec1(rtwdev, debugfs_topdir); +} + +void rtw_debugfs_deinit(struct rtw_dev *rtwdev) +{ + kfree(rtwdev->debugfs); +} #endif /* CONFIG_RTW88_DEBUGFS */ #ifdef CONFIG_RTW88_DEBUG diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index eb69006c463ed4..6570e84d8d2440 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -25,6 +25,7 @@ enum rtw_debug_mask { RTW_DBG_HW_SCAN = 0x00010000, RTW_DBG_STATE = 0x00020000, RTW_DBG_SDIO = 0x00040000, + RTW_DBG_USB = 0x00080000, RTW_DBG_UNEXP = 0x80000000, RTW_DBG_ALL = 0xffffffff @@ -33,11 +34,13 @@ enum rtw_debug_mask { #ifdef CONFIG_RTW88_DEBUGFS void rtw_debugfs_init(struct rtw_dev *rtwdev); +void rtw_debugfs_deinit(struct rtw_dev *rtwdev); void rtw_debugfs_get_simple_phy_info(struct seq_file *m); #else static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {} +static inline void rtw_debugfs_deinit(struct rtw_dev *rtwdev) {} #endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index ab7d414d0ba679..b9b0114e253b43 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, val |= BIT_ENSWBCN >> 8; rtw_write8(rtwdev, REG_CR + 1, val); - val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); - bckp[1] = val; - val &= ~(BIT_EN_BCNQ_DL >> 16); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { + val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); + bckp[1] = val; + val &= ~(BIT_EN_BCNQ_DL >> 16); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + } ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); if (ret) { @@ -1496,7 +1498,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, rsvd_pg_head = rtwdev->fifo.rsvd_boundary; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, rsvd_pg_head | BIT_BCN_VALID_V1); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); rtw_write8(rtwdev, REG_CR + 1, bckp[0]); return ret; diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h index 830d7532f2a35a..96aeda26014e20 100644 --- a/drivers/net/wireless/realtek/rtw88/hci.h +++ b/drivers/net/wireless/realtek/rtw88/hci.h @@ -18,6 +18,7 @@ struct rtw_hci_ops { void (*deep_ps)(struct rtw_dev *rtwdev, bool enter); void (*link_ps)(struct rtw_dev *rtwdev, bool enter); void (*interface_cfg)(struct rtw_dev *rtwdev); + void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable); int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); @@ -72,6 +73,12 @@ static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev) rtwdev->hci.ops->interface_cfg(rtwdev); } +static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) +{ + if (rtwdev->hci.ops->dynamic_rx_agg) + rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable); +} + static inline int rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 63326b35273895..b39e90fb66b41d 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -167,6 +167,12 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); + rtwvif->mac_id = rtw_acquire_macid(rtwdev); + if (rtwvif->mac_id >= RTW_MAX_MAC_ID_NUM) { + mutex_unlock(&rtwdev->mutex); + return -ENOSPC; + } + port = find_first_zero_bit(rtwdev->hw_port, RTW_PORT_NUM); if (port >= RTW_PORT_NUM) { mutex_unlock(&rtwdev->mutex); @@ -214,7 +220,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw, mutex_unlock(&rtwdev->mutex); - rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port); + rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM mac_id %d on port %d\n", + vif->addr, rtwvif->mac_id, rtwvif->port); return 0; } @@ -225,7 +232,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw, struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; u32 config = 0; - rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port); + rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM mac_id %d on port %d\n", + vif->addr, rtwvif->mac_id, rtwvif->port); mutex_lock(&rtwdev->mutex); @@ -242,6 +250,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw, config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); clear_bit(rtwvif->port, rtwdev->hw_port); + rtw_release_macid(rtwdev, rtwvif->mac_id); rtw_recalc_lps(rtwdev, NULL); mutex_unlock(&rtwdev->mutex); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 7ab7a988b123f1..bbdef38c7e341c 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -212,6 +212,7 @@ static void rtw_watch_dog_work(struct work_struct *work) struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_watch_dog_iter_data data = {}; bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + u32 tx_unicast_mbps, rx_unicast_mbps; bool ps_active; mutex_lock(&rtwdev->mutex); @@ -236,10 +237,11 @@ static void rtw_watch_dog_work(struct work_struct *work) else ps_active = false; - ewma_tp_add(&stats->tx_ewma_tp, - (u32)(stats->tx_unicast >> RTW_TP_SHIFT)); - ewma_tp_add(&stats->rx_ewma_tp, - (u32)(stats->rx_unicast >> RTW_TP_SHIFT)); + tx_unicast_mbps = stats->tx_unicast >> RTW_TP_SHIFT; + rx_unicast_mbps = stats->rx_unicast >> RTW_TP_SHIFT; + + ewma_tp_add(&stats->tx_ewma_tp, tx_unicast_mbps); + ewma_tp_add(&stats->rx_ewma_tp, rx_unicast_mbps); stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); @@ -259,6 +261,9 @@ static void rtw_watch_dog_work(struct work_struct *work) rtw_phy_dynamic_mechanism(rtwdev); + rtw_hci_dynamic_rx_agg(rtwdev, + tx_unicast_mbps >= 1 || rx_unicast_mbps >= 1); + data.rtwdev = rtwdev; /* rtw_iterate_vifs internally uses an atomic iterator which is needed * to avoid taking local->iflist_mtx mutex @@ -306,17 +311,6 @@ static void rtw_ips_work(struct work_struct *work) mutex_unlock(&rtwdev->mutex); } -static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) -{ - unsigned long mac_id; - - mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); - if (mac_id < RTW_MAX_MAC_ID_NUM) - set_bit(mac_id, rtwdev->mac_id_map); - - return mac_id; -} - static void rtw_sta_rc_work(struct work_struct *work) { struct rtw_sta_info *si = container_of(work, struct rtw_sta_info, @@ -335,12 +329,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; int i; - si->mac_id = rtw_acquire_macid(rtwdev); - if (si->mac_id >= RTW_MAX_MAC_ID_NUM) - return -ENOSPC; + if (vif->type == NL80211_IFTYPE_STATION) { + si->mac_id = rtwvif->mac_id; + } else { + si->mac_id = rtw_acquire_macid(rtwdev); + if (si->mac_id >= RTW_MAX_MAC_ID_NUM) + return -ENOSPC; + } - if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc == 0) - rtwvif->mac_id = si->mac_id; si->rtwdev = rtwdev; si->sta = sta; si->vif = vif; @@ -365,11 +361,13 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + struct ieee80211_vif *vif = si->vif; int i; cancel_work_sync(&si->rc_work); - rtw_release_macid(rtwdev, si->mac_id); + if (vif->type != NL80211_IFTYPE_STATION) + rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); @@ -609,6 +607,8 @@ static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) rtw_bf_disassoc(rtwdev, vif, NULL); rtw_vif_assoc_changed(rtwvif, NULL); rtw_txq_cleanup(rtwdev, vif->txq); + + rtw_release_macid(rtwdev, rtwvif->mac_id); } void rtw_fw_recovery(struct rtw_dev *rtwdev) @@ -1313,20 +1313,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; + int ret = 0; fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL; if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL; } - return 0; + return ret; } static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, @@ -2005,7 +2006,7 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; - efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0; + efuse->ext_lna_5g = efuse->lna_type_5g & BIT(3) ? 1 : 0; if (!is_valid_ether_addr(efuse->addr)) { eth_random_addr(efuse->addr); @@ -2133,7 +2134,6 @@ int rtw_core_init(struct rtw_dev *rtwdev) rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; rtwdev->dm_info.fix_rate = U8_MAX; - set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); rtw_stats_init(rtwdev); @@ -2299,6 +2299,7 @@ void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) ieee80211_unregister_hw(hw); rtw_unset_supported_band(hw, chip); + rtw_debugfs_deinit(rtwdev); } EXPORT_SYMBOL(rtw_unregister_hw); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 49a3fd4fb7dcdc..945117afe1438b 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -50,6 +50,7 @@ extern const struct ieee80211_ops rtw_ops; #define RTW_MAX_CHANNEL_NUM_5G 49 struct rtw_dev; +struct rtw_debugfs; enum rtw_hci_type { RTW_HCI_TYPE_PCIE, @@ -622,6 +623,7 @@ struct rtw_rx_pkt_stat { bool crc_err; bool decrypted; bool is_c2h; + bool channel_invalid; s32 signal_power; u16 pkt_len; @@ -740,7 +742,6 @@ struct rtw_txq { unsigned long flags; }; -#define RTW_BC_MC_MACID 1 DECLARE_EWMA(rssi, 10, 16); struct rtw_sta_info { @@ -803,7 +804,7 @@ struct rtw_bf_info { struct rtw_vif { enum rtw_net_type net_type; u16 aid; - u8 mac_id; /* for STA mode only */ + u8 mac_id; u8 mac_addr[ETH_ALEN]; u8 bssid[ETH_ALEN]; u8 port; @@ -1785,6 +1786,8 @@ struct rtw_efuse { bool share_ant; u8 bt_setting; + u8 usb_mode_switch; + struct { u8 hci; u8 bw; @@ -2051,7 +2054,7 @@ struct rtw_dev { bool beacon_loss; struct completion lps_leave_check; - struct dentry *debugfs; + struct rtw_debugfs *debugfs; u8 sta_cnt; u32 rts_threshold; @@ -2127,6 +2130,17 @@ static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev) return rtwdev->chip->tx_stbc; } +static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev) +{ + unsigned long mac_id; + + mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); + if (mac_id < RTW_MAX_MAC_ID_NUM) + set_bit(mac_id, rtwdev->mac_id_map); + + return mac_id; +} + static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) { clear_bit(mac_id, rtwdev->mac_id_map); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index a5b9d6c7be37e6..0b9b8807af2cb0 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1088,6 +1088,7 @@ static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, /* remove rx_desc */ skb_pull(new, pkt_offset); + rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat); rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); @@ -1600,6 +1601,7 @@ static struct rtw_hci_ops rtw_pci_ops = { .deep_ps = rtw_pci_deep_ps, .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, + .dynamic_rx_agg = NULL, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 02ef9a77316b48..4d9b8668e8b047 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -15,6 +15,7 @@ #define BIT_WLOCK_1C_B6 BIT(5) #define REG_SYS_PW_CTRL 0x0004 #define BIT_PFM_WOWL BIT(3) +#define BIT_APFM_OFFMAC BIT(9) #define REG_SYS_CLK_CTRL 0x0008 #define BIT_CPU_CLK_EN BIT(14) @@ -133,6 +134,14 @@ #define REG_PMC_DBG_CTRL1 0xa8 #define BITS_PMC_BT_IQK_STS GENMASK(22, 21) +#define REG_PAD_CTRL2 0x00C4 +#define BIT_RSM_EN_V1 BIT(16) +#define BIT_NO_PDN_CHIPOFF_V1 BIT(17) +#define BIT_MASK_USB23_SW_MODE_V1 GENMASK(19, 18) +#define BIT_USB3_USB2_TRANSITION BIT(20) +#define BIT_USB_MODE_U2 1 +#define BIT_USB_MODE_U3 2 + #define REG_EFUSE_ACCESS 0x00CF #define EFUSE_ACCESS_ON 0x69 #define EFUSE_ACCESS_OFF 0x00 @@ -313,6 +322,12 @@ #define REG_RXDMA_DPR 0x028C #define REG_RXDMA_MODE 0x0290 #define BIT_DMA_MODE BIT(1) +#define BIT_DMA_BURST_CNT GENMASK(3, 2) +#define BIT_DMA_BURST_SIZE GENMASK(5, 4) +#define BIT_DMA_BURST_SIZE_64 2 +#define BIT_DMA_BURST_SIZE_512 1 +#define BIT_DMA_BURST_SIZE_1024 0 + #define REG_RXPKTNUM 0x02B0 #define REG_INT_MIG 0x0304 @@ -568,6 +583,8 @@ #define BIT_WL_SECURITY_CLK BIT(15) #define BIT_DDMA_EN BIT(8) +#define REG_SW_MDIO 0x10C0 + #define REG_H2C_PKT_READADDR 0x10D0 #define REG_H2C_PKT_WRITEADDR 0x10D4 #define REG_FW_DBG6 0x10F8 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c index e2c7d9f876836a..a019f4085e7389 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c @@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = { .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 2456ff24281801..6edb17aea90e0e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -46,6 +46,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) map = (struct rtw8822b_efuse *)log_map; + efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7)); efuse->rfe_option = map->rfe_option; efuse->rf_board_option = map->rf_board_option; efuse->crystal_cap = map->xtal_k; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h index 2dc3a6660f06a5..cf85e63966a1c7 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h @@ -72,7 +72,9 @@ struct rtw8822bs_efuse { struct rtw8822b_efuse { __le16 rtl_id; - u8 res0[0x0e]; + u8 res0[4]; + u8 usb_mode; + u8 res1[0x09]; /* power index for four RF paths */ struct rtw_txpwr_idx txpwr_idx_table[4]; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 62376d1cca22fc..1dbe1cdbc3fd45 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -49,6 +49,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) map = (struct rtw8822c_efuse *)log_map; + efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(7)); efuse->rfe_option = map->rfe_option; efuse->rf_board_option = map->rf_board_option; efuse->crystal_cap = map->xtal_k & XCAP_MASK; @@ -2575,9 +2576,10 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, rx_power[RF_PATH_B] -= 110; channel = GET_PHY_STAT_P0_CHANNEL(phy_status); - if (channel == 0) - channel = rtwdev->hal.current_channel; - rtw_set_rx_freq_band(pkt_stat, channel); + if (channel != 0) + rtw_set_rx_freq_band(pkt_stat, channel); + else + pkt_stat->channel_invalid = true; pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A]; pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B]; @@ -2611,12 +2613,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, else rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status); - if (rxsc >= 9 && rxsc <= 12) + if (rxsc == 0) + bw = rtwdev->hal.current_band_width; + else if (rxsc >= 1 && rxsc <= 8) + bw = RTW_CHANNEL_WIDTH_20; + else if (rxsc >= 9 && rxsc <= 12) bw = RTW_CHANNEL_WIDTH_40; - else if (rxsc >= 13) - bw = RTW_CHANNEL_WIDTH_80; else - bw = RTW_CHANNEL_WIDTH_20; + bw = RTW_CHANNEL_WIDTH_80; channel = GET_PHY_STAT_P1_CHANNEL(phy_status); rtw_set_rx_freq_band(pkt_stat, channel); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index 1bc0e7f5d6bb1f..e2b383d633cd23 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -59,16 +59,18 @@ struct rtw8822ce_efuse { struct rtw8822c_efuse { __le16 rtl_id; - u8 res0[0x0e]; + u8 res0[4]; + u8 usb_mode; + u8 res1[0x09]; /* power index for four RF paths */ struct rtw_txpwr_idx txpwr_idx_table[4]; u8 channel_plan; /* 0xb8 */ u8 xtal_k; - u8 res1; + u8 res2; u8 iqk_lck; - u8 res2[5]; /* 0xbc */ + u8 res3[5]; /* 0xbc */ u8 rf_board_option; u8 rf_feature_option; u8 rf_bt_setting; @@ -80,21 +82,21 @@ struct rtw8822c_efuse { u8 rf_antenna_option; /* 0xc9 */ u8 rfe_option; u8 country_code[2]; - u8 res3[3]; + u8 res4[3]; u8 path_a_thermal; /* 0xd0 */ u8 path_b_thermal; - u8 res4[2]; + u8 res5[2]; u8 rx_gain_gap_2g_ofdm; - u8 res5; - u8 rx_gain_gap_2g_cck; u8 res6; - u8 rx_gain_gap_5gl; + u8 rx_gain_gap_2g_cck; u8 res7; - u8 rx_gain_gap_5gm; + u8 rx_gain_gap_5gl; u8 res8; - u8 rx_gain_gap_5gh; + u8 rx_gain_gap_5gm; u8 res9; - u8 res10[0x42]; + u8 rx_gain_gap_5gh; + u8 res10; + u8 res11[0x42]; union { struct rtw8822ce_efuse e; struct rtw8822cu_efuse u; diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 84aedabdf28530..66f9419588cf3a 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -146,6 +146,47 @@ static void rtw_set_rx_freq_by_pktstat(struct rtw_rx_pkt_stat *pkt_stat, rx_status->band = pkt_stat->band; } +void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct ieee80211_rx_status *rx_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + int channel = rtwdev->hal.current_channel; + size_t hdr_len, ielen; + int channel_number; + u8 *variable; + + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + goto fill_rx_status; + + if (ieee80211_is_beacon(mgmt->frame_control)) { + variable = mgmt->u.beacon.variable; + hdr_len = offsetof(struct ieee80211_mgmt, + u.beacon.variable); + } else if (ieee80211_is_probe_resp(mgmt->frame_control)) { + variable = mgmt->u.probe_resp.variable; + hdr_len = offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + } else { + goto fill_rx_status; + } + + if (skb->len > hdr_len) + ielen = skb->len - hdr_len; + else + goto fill_rx_status; + + channel_number = cfg80211_get_ies_channel_number(variable, ielen, + NL80211_BAND_2GHZ); + if (channel_number != -1) + channel = channel_number; + +fill_rx_status: + rtw_set_rx_freq_band(pkt_stat, channel); + rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status); +} +EXPORT_SYMBOL(rtw_update_rx_freq_from_ie); + void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, struct rtw_rx_pkt_stat *pkt_stat, struct ieee80211_hdr *hdr, diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h index d3668c4efc24d5..9f001911298777 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.h +++ b/drivers/net/wireless/realtek/rtw88/rx.h @@ -41,7 +41,7 @@ enum rtw_rx_desc_enc { #define GET_RX_DESC_TSFL(rxdesc) \ le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0)) #define GET_RX_DESC_BW(rxdesc) \ - (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24))) + (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(5, 4))) void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb); @@ -50,5 +50,18 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *rx_status, u8 *phy_status); +void rtw_update_rx_freq_from_ie(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct ieee80211_rx_status *rx_status, + struct rtw_rx_pkt_stat *pkt_stat); + +static inline +void rtw_update_rx_freq_for_invalid(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct ieee80211_rx_status *rx_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + if (pkt_stat->channel_invalid) + rtw_update_rx_freq_from_ie(rtwdev, skb, rx_status, pkt_stat); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index 0cae5746f540fa..21d0754dd7f6ac 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -948,6 +948,7 @@ static void rtw_sdio_rx_skb(struct rtw_dev *rtwdev, struct sk_buff *skb, skb_put(skb, pkt_stat->pkt_len); skb_reserve(skb, pkt_offset); + rtw_update_rx_freq_for_invalid(rtwdev, skb, rx_status, pkt_stat); rtw_rx_stats(rtwdev, pkt_stat->vif, skb); ieee80211_rx_irqsafe(rtwdev->hw, skb); @@ -1156,6 +1157,7 @@ static struct rtw_hci_ops rtw_sdio_ops = { .deep_ps = rtw_sdio_deep_ps, .link_ps = rtw_sdio_link_ps, .interface_cfg = rtw_sdio_interface_cfg, + .dynamic_rx_agg = NULL, .read8 = rtw_sdio_read8, .read16 = rtw_sdio_read16, diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index c02ac673be321d..dae7ca14886507 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -46,7 +46,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) le32_encode_bits(pkt_info->ls, RTW_TX_DESC_W0_LS) | le32_encode_bits(pkt_info->dis_qselseq, RTW_TX_DESC_W0_DISQSELSEQ); - tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) | + tx_desc->w1 = le32_encode_bits(pkt_info->mac_id, RTW_TX_DESC_W1_MACID) | + le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) | le32_encode_bits(pkt_info->rate_id, RTW_TX_DESC_W1_RATE_ID) | le32_encode_bits(pkt_info->sec_type, RTW_TX_DESC_W1_SEC_TYPE) | le32_encode_bits(pkt_info->pkt_offset, RTW_TX_DESC_W1_PKT_OFFSET) | @@ -401,14 +402,18 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, const struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_vif *vif = info->control.vif; struct rtw_sta_info *si; - struct ieee80211_vif *vif = NULL; + struct rtw_vif *rtwvif; __le16 fc = hdr->frame_control; bool bmc; if (sta) { si = (struct rtw_sta_info *)sta->drv_priv; - vif = si->vif; + pkt_info->mac_id = si->mac_id; + } else if (vif) { + rtwvif = (struct rtw_vif *)vif->drv_priv; + pkt_info->mac_id = rtwvif->mac_id; } if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h index 3241896062577b..3d544fd7f60f51 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.h +++ b/drivers/net/wireless/realtek/rtw88/tx.h @@ -27,6 +27,7 @@ struct rtw_tx_desc { #define RTW_TX_DESC_W0_BMC BIT(24) #define RTW_TX_DESC_W0_LS BIT(26) #define RTW_TX_DESC_W0_DISQSELSEQ BIT(31) +#define RTW_TX_DESC_W1_MACID GENMASK(7, 0) #define RTW_TX_DESC_W1_QSEL GENMASK(12, 8) #define RTW_TX_DESC_W1_RATE_ID GENMASK(20, 16) #define RTW_TX_DESC_W1_SEC_TYPE GENMASK(23, 22) diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index a55ca5a2422759..e83ab6fb83f5b7 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -14,6 +14,11 @@ #include "ps.h" #include "usb.h" +static bool rtw_switch_usb_mode = true; +module_param_named(switch_usb_mode, rtw_switch_usb_mode, bool, 0644); +MODULE_PARM_DESC(switch_usb_mode, + "Set to N to disable switching to USB 3 mode to avoid potential interference in the 2.4 GHz band (default: Y)"); + #define RTW_USB_MAX_RXQ_LEN 512 struct rtw_usb_txcb { @@ -541,11 +546,12 @@ static void rtw_usb_rx_handler(struct work_struct *work) struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work); struct rtw_dev *rtwdev = rtwusb->rtwdev; const struct rtw_chip_info *chip = rtwdev->chip; - struct rtw_rx_pkt_stat pkt_stat; + u32 pkt_desc_sz = chip->rx_pkt_desc_sz; struct ieee80211_rx_status rx_status; + u32 pkt_offset, next_pkt, urb_len; + struct rtw_rx_pkt_stat pkt_stat; + struct sk_buff *next_skb; struct sk_buff *skb; - u32 pkt_desc_sz = chip->rx_pkt_desc_sz; - u32 pkt_offset; u8 *rx_desc; int limit; @@ -554,28 +560,48 @@ static void rtw_usb_rx_handler(struct work_struct *work) if (!skb) break; - rx_desc = skb->data; - chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, - &rx_status); - pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + - pkt_stat.shift; - - if (pkt_stat.is_c2h) { - skb_put(skb, pkt_stat.pkt_len + pkt_offset); - rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb); - continue; - } - if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) { dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n"); dev_kfree_skb_any(skb); continue; } - skb_put(skb, pkt_stat.pkt_len); - skb_reserve(skb, pkt_offset); - memcpy(skb->cb, &rx_status, sizeof(rx_status)); - ieee80211_rx_irqsafe(rtwdev->hw, skb); + urb_len = skb->len; + + do { + rx_desc = skb->data; + chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, + &rx_status); + pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + + pkt_stat.shift; + + next_pkt = round_up(pkt_stat.pkt_len + pkt_offset, 8); + + if (urb_len >= next_pkt + pkt_desc_sz) + next_skb = skb_clone(skb, GFP_KERNEL); + else + next_skb = NULL; + + if (pkt_stat.is_c2h) { + skb_trim(skb, pkt_stat.pkt_len + pkt_offset); + rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb); + } else { + skb_pull(skb, pkt_offset); + skb_trim(skb, pkt_stat.pkt_len); + rtw_update_rx_freq_for_invalid(rtwdev, skb, + &rx_status, + &pkt_stat); + rtw_rx_stats(rtwdev, pkt_stat.vif, skb); + memcpy(skb->cb, &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(rtwdev->hw, skb); + } + + skb = next_skb; + if (skb) + skb_pull(skb, next_pkt); + + urb_len -= next_pkt; + } while (skb); } } @@ -619,6 +645,7 @@ static void rtw_usb_read_port_complete(struct urb *urb) if (skb) dev_kfree_skb_any(skb); } else { + skb_put(skb, urb->actual_length); skb_queue_tail(&rtwusb->rx_queue, skb); queue_work(rtwusb->rxwq, &rtwusb->rx_work); } @@ -713,9 +740,69 @@ static void rtw_usb_link_ps(struct rtw_dev *rtwdev, bool enter) /* empty function for rtw_hci_ops */ } +static void rtw_usb_init_burst_pkt_len(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + enum usb_device_speed speed = rtwusb->udev->speed; + u8 rxdma, burst_size; + + rxdma = BIT_DMA_BURST_CNT | BIT_DMA_MODE; + + if (speed == USB_SPEED_SUPER) + burst_size = BIT_DMA_BURST_SIZE_1024; + else if (speed == USB_SPEED_HIGH) + burst_size = BIT_DMA_BURST_SIZE_512; + else + burst_size = BIT_DMA_BURST_SIZE_64; + + u8p_replace_bits(&rxdma, burst_size, BIT_DMA_BURST_SIZE); + + rtw_write8(rtwdev, REG_RXDMA_MODE, rxdma); + rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); +} + static void rtw_usb_interface_cfg(struct rtw_dev *rtwdev) { - /* empty function for rtw_hci_ops */ + rtw_usb_init_burst_pkt_len(rtwdev); +} + +static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable) +{ + u8 size, timeout; + u16 val16; + + rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC); + rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); + rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); + + if (enable) { + size = 0x5; + timeout = 0x20; + } else { + size = 0x0; + timeout = 0x1; + } + val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | + u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); + + rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); +} + +static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) +{ + switch (rtwdev->chip->id) { + case RTW_CHIP_TYPE_8822C: + case RTW_CHIP_TYPE_8822B: + case RTW_CHIP_TYPE_8821C: + rtw_usb_dynamic_rx_agg_v1(rtwdev, enable); + break; + case RTW_CHIP_TYPE_8723D: + /* Doesn't like aggregation. */ + break; + case RTW_CHIP_TYPE_8703B: + /* Likely not found in USB devices. */ + break; + } } static struct rtw_hci_ops rtw_usb_ops = { @@ -727,6 +814,7 @@ static struct rtw_hci_ops rtw_usb_ops = { .deep_ps = rtw_usb_deep_ps, .link_ps = rtw_usb_link_ps, .interface_cfg = rtw_usb_interface_cfg, + .dynamic_rx_agg = rtw_usb_dynamic_rx_agg, .write8 = rtw_usb_write8, .write16 = rtw_usb_write16, @@ -841,6 +929,77 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev, usb_set_intfdata(intf, NULL); } +static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev) +{ + enum usb_device_speed cur_speed; + u8 id = rtwdev->chip->id; + bool can_switch; + u32 pad_ctrl2; + + if (rtw_read8(rtwdev, REG_SYS_CFG2 + 3) == 0x20) + cur_speed = USB_SPEED_SUPER; + else + cur_speed = USB_SPEED_HIGH; + + if (cur_speed == USB_SPEED_SUPER) + return 0; + + pad_ctrl2 = rtw_read32(rtwdev, REG_PAD_CTRL2); + + can_switch = !!(pad_ctrl2 & (BIT_MASK_USB23_SW_MODE_V1 | + BIT_USB3_USB2_TRANSITION)); + + if (!can_switch) { + rtw_dbg(rtwdev, RTW_DBG_USB, + "Switching to USB 3 mode unsupported by the chip\n"); + return 0; + } + + /* At this point cur_speed is USB_SPEED_HIGH. If we already tried + * to switch don't try again - it's a USB 2 port. + */ + if (u32_get_bits(pad_ctrl2, BIT_MASK_USB23_SW_MODE_V1) == BIT_USB_MODE_U3) + return 0; + + /* Enable IO wrapper timeout */ + if (id == RTW_CHIP_TYPE_8822B || id == RTW_CHIP_TYPE_8821C) + rtw_write8_clr(rtwdev, REG_SW_MDIO + 3, BIT(0)); + + u32p_replace_bits(&pad_ctrl2, BIT_USB_MODE_U3, BIT_MASK_USB23_SW_MODE_V1); + pad_ctrl2 |= BIT_RSM_EN_V1; + + rtw_write32(rtwdev, REG_PAD_CTRL2, pad_ctrl2); + rtw_write8(rtwdev, REG_PAD_CTRL2 + 1, 4); + + rtw_write16_set(rtwdev, REG_SYS_PW_CTRL, BIT_APFM_OFFMAC); + usleep_range(1000, 1001); + rtw_write32_set(rtwdev, REG_PAD_CTRL2, BIT_NO_PDN_CHIPOFF_V1); + + return 1; +} + +static int rtw_usb_switch_mode(struct rtw_dev *rtwdev) +{ + u8 id = rtwdev->chip->id; + + if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B) + return 0; + + if (!rtwdev->efuse.usb_mode_switch) { + rtw_dbg(rtwdev, RTW_DBG_USB, + "Switching to USB 3 mode disabled by chip's efuse\n"); + return 0; + } + + if (!rtw_switch_usb_mode) { + rtw_dbg(rtwdev, RTW_DBG_USB, + "Switching to USB 3 mode disabled by module parameter\n"); + return 0; + } + + return rtw_usb_switch_mode_new(rtwdev); +} + int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct rtw_dev *rtwdev; @@ -896,6 +1055,14 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err_destroy_rxwq; } + ret = rtw_usb_switch_mode(rtwdev); + if (ret) { + /* Not a fail, but we do need to skip rtw_register_hw. */ + rtw_dbg(rtwdev, RTW_DBG_USB, "switching to USB 3 mode\n"); + ret = 0; + goto err_destroy_rxwq; + } + ret = rtw_register_hw(rtwdev, rtwdev->hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index 3c9f864805b150..d2a3361669d7a6 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -12,6 +12,7 @@ if RTW89 config RTW89_CORE tristate + select WANT_DEV_COREDUMP config RTW89_PCI tristate @@ -28,6 +29,9 @@ config RTW89_8852B_COMMON config RTW89_8852B tristate +config RTW89_8852BT + tristate + config RTW89_8852C tristate @@ -68,6 +72,18 @@ config RTW89_8852BE 802.11ax PCIe wireless network (Wi-Fi 6) adapter +config RTW89_8852BTE + tristate "Realtek 8852BE-VT PCI wireless network (Wi-Fi 6) adapter" + depends on PCI + select RTW89_CORE + select RTW89_PCI + select RTW89_8852BT + select RTW89_8852B_COMMON + help + Select this option will enable support for 8852BE-VT chipset + + 802.11ax PCIe wireless network (Wi-Fi 6) adapter + config RTW89_8852CE tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter" depends on PCI diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile index 1f1050a7a89dc6..c751013e811e8f 100644 --- a/drivers/net/wireless/realtek/rtw89/Makefile +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -52,6 +52,14 @@ rtw89_8852b-objs := rtw8852b.o \ obj-$(CONFIG_RTW89_8852BE) += rtw89_8852be.o rtw89_8852be-objs := rtw8852be.o +obj-$(CONFIG_RTW89_8852BT) += rtw89_8852bt.o +rtw89_8852bt-objs := rtw8852bt.o \ + rtw8852bt_rfk.o \ + rtw8852bt_rfk_table.o + +obj-$(CONFIG_RTW89_8852BTE) += rtw89_8852bte.o +rtw89_8852bte-objs := rtw8852bte.o + obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o rtw89_8852c-objs := rtw8852c.o \ rtw8852c_table.o \ diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 4557c6e035a974..4476fc7e53db74 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -384,20 +384,24 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, break; case WLAN_CIPHER_SUITE_CCMP: hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128; - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + if (!chip->hw_mgmt_tx_encrypt) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; break; case WLAN_CIPHER_SUITE_CCMP_256: hw_key_type = RTW89_SEC_KEY_TYPE_CCMP256; - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + if (!chip->hw_mgmt_tx_encrypt) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; ext_key = true; break; case WLAN_CIPHER_SUITE_GCMP: hw_key_type = RTW89_SEC_KEY_TYPE_GCMP128; - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + if (!chip->hw_mgmt_tx_encrypt) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; break; case WLAN_CIPHER_SUITE_GCMP_256: hw_key_type = RTW89_SEC_KEY_TYPE_GCMP256; - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + if (!chip->hw_mgmt_tx_encrypt) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; ext_key = true; break; case WLAN_CIPHER_SUITE_AES_CMAC: diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 7f90d93dcdc0c0..7070c85e2c2883 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -124,12 +124,12 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, } bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct rtw89_chan *new) { struct rtw89_hal *hal = &rtwdev->hal; - struct rtw89_chan *chan = &hal->sub[idx].chan; - struct rtw89_chan_rcd *rcd = &hal->sub[idx].rcd; + struct rtw89_chan *chan = &hal->chanctx[idx].chan; + struct rtw89_chan_rcd *rcd = &hal->chanctx[idx].rcd; bool band_changed; rcd->prev_primary_channel = chan->primary_channel; @@ -153,7 +153,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, lockdep_assert_held(&rtwdev->mutex); - for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) { + for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) { chan = rtw89_chan_get(rtwdev, idx); ret = iterator(chan, data); if (ret) @@ -164,36 +164,36 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, } static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef, bool from_stack) { struct rtw89_hal *hal = &rtwdev->hal; - hal->sub[idx].chandef = *chandef; + hal->chanctx[idx].chandef = *chandef; if (from_stack) set_bit(idx, hal->entity_map); } void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef) { __rtw89_config_entity_chandef(rtwdev, idx, chandef, true); } void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef) { struct rtw89_hal *hal = &rtwdev->hal; - enum rtw89_sub_entity_idx cur; + enum rtw89_chanctx_idx cur; if (chandef) { - cur = atomic_cmpxchg(&hal->roc_entity_idx, - RTW89_SUB_ENTITY_IDLE, idx); - if (cur != RTW89_SUB_ENTITY_IDLE) { + cur = atomic_cmpxchg(&hal->roc_chanctx_idx, + RTW89_CHANCTX_IDLE, idx); + if (cur != RTW89_CHANCTX_IDLE) { rtw89_debug(rtwdev, RTW89_DBG_TXRX, "ROC still processing on entity %d\n", idx); return; @@ -201,12 +201,12 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, hal->roc_chandef = *chandef; } else { - cur = atomic_cmpxchg(&hal->roc_entity_idx, idx, - RTW89_SUB_ENTITY_IDLE); + cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx, + RTW89_CHANCTX_IDLE); if (cur == idx) return; - if (cur == RTW89_SUB_ENTITY_IDLE) + if (cur == RTW89_CHANCTX_IDLE) rtw89_debug(rtwdev, RTW89_DBG_TXRX, "ROC already finished on entity %d\n", idx); else @@ -220,7 +220,7 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev) struct cfg80211_chan_def chandef = {0}; rtw89_get_default_chandef(&chandef); - __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false); + __rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &chandef, false); } void rtw89_entity_init(struct rtw89_dev *rtwdev) @@ -228,9 +228,9 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev) struct rtw89_hal *hal = &rtwdev->hal; hal->entity_pause = false; - bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); + bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX); bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES); - atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE); + atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE); rtw89_config_default_chandef(rtwdev); } @@ -242,8 +242,8 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif; int idx; - for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) { - cfg = hal->sub[idx].cfg; + for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) { + cfg = hal->chanctx[idx].cfg; if (!cfg) { /* doesn't run with chanctx ops; one channel at most */ w->active_chanctxs = 1; @@ -262,7 +262,7 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev, enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) { - DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {}; + DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_CHANCTX) = {}; struct rtw89_hal *hal = &rtwdev->hal; const struct cfg80211_chan_def *chandef; struct rtw89_entity_weight w = {}; @@ -272,23 +272,23 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) lockdep_assert_held(&rtwdev->mutex); - bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); + bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX); rtw89_entity_calculate_weight(rtwdev, &w); switch (w.active_chanctxs) { default: rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n", w.active_chanctxs); - bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY); + bitmap_zero(recalc_map, NUM_OF_RTW89_CHANCTX); fallthrough; case 0: rtw89_config_default_chandef(rtwdev); - set_bit(RTW89_SUB_ENTITY_0, recalc_map); + set_bit(RTW89_CHANCTX_0, recalc_map); fallthrough; case 1: mode = RTW89_ENTITY_MODE_SCC; break; - case 2 ... NUM_OF_RTW89_SUB_ENTITY: + case 2 ... NUM_OF_RTW89_CHANCTX: if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) { rtw89_debug(rtwdev, RTW89_DBG_CHAN, "unhandled ent: %d chanctxs %d roles\n", @@ -304,7 +304,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) break; } - for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) { + for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_CHANCTX) { chandef = rtw89_chandef_get(rtwdev, idx); rtw89_get_channel_params(chandef, &chan); if (chan.channel == 0) { @@ -650,7 +650,7 @@ static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev, role->duration = role->beacon_interval / 2; - chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); role->is_2ghz = chan->band_type == RTW89_BAND_2G; role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO; role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; @@ -678,10 +678,10 @@ static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev) } struct rtw89_mcc_fill_role_selector { - struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY]; + struct rtw89_vif *bind_vif[NUM_OF_RTW89_CHANCTX]; }; -static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES); +static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES); static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *mcc_role, @@ -719,14 +719,14 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) if (!rtwvif->chanctx_assigned) continue; - if (sel.bind_vif[rtwvif->sub_entity_idx]) { + if (sel.bind_vif[rtwvif->chanctx_idx]) { rtw89_warn(rtwdev, "MCC skip extra vif on chanctx[%d]\n", - rtwvif->mac_id, rtwvif->sub_entity_idx); + rtwvif->mac_id, rtwvif->chanctx_idx); continue; } - sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif; + sel.bind_vif[rtwvif->chanctx_idx] = rtwvif; } ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel); @@ -1390,7 +1390,7 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro const struct rtw89_chan *chan; int ret; - chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx); req.central_ch_seg0 = chan->channel; req.primary_ch = chan->primary_channel; req.bandwidth = chan->band_width; @@ -1448,7 +1448,7 @@ void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role, slot_arg->duration = role->duration; slot_arg->role_num = 1; - chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, role->rtwvif->chanctx_idx); slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI; slot_arg->roles[0].is_master = role == ref; @@ -1934,22 +1934,53 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev) return 0; } +struct rtw89_mcc_stop_sel { + u8 mac_id; + u8 slot_idx; +}; + +static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel, + const struct rtw89_mcc_role *mcc_role) +{ + sel->mac_id = mcc_role->rtwvif->mac_id; + sel->slot_idx = mcc_role->slot_idx; +} + +static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_stop_sel *sel = data; + + if (!mcc_role->rtwvif->chanctx_assigned) + return 0; + + rtw89_mcc_stop_sel_fill(sel, mcc_role); + return 1; /* break iteration */ +} + static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) { struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_stop_sel sel; int ret; - rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n"); + /* by default, stop at ref */ + rtw89_mcc_stop_sel_fill(&sel, ref); + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at \n", sel.mac_id); if (rtw89_concurrent_via_mrc(rtwdev)) { - ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group); + ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group, sel.slot_idx); if (ret) rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MRC h2c failed to trigger del: %d\n", ret); } else { ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group, - ref->rtwvif->mac_id, true); + sel.mac_id, true); if (ret) rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC h2c failed to trigger stop: %d\n", ret); @@ -2339,9 +2370,9 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev) rtw89_queue_chanctx_work(rtwdev); } -static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx1, - enum rtw89_sub_entity_idx idx2) +static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_idx idx1, + enum rtw89_chanctx_idx idx2) { struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_vif *rtwvif; @@ -2350,25 +2381,25 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev, if (idx1 == idx2) return; - hal->sub[idx1].cfg->idx = idx2; - hal->sub[idx2].cfg->idx = idx1; + hal->chanctx[idx1].cfg->idx = idx2; + hal->chanctx[idx2].cfg->idx = idx1; - swap(hal->sub[idx1], hal->sub[idx2]); + swap(hal->chanctx[idx1], hal->chanctx[idx2]); rtw89_for_each_rtwvif(rtwdev, rtwvif) { if (!rtwvif->chanctx_assigned) continue; - if (rtwvif->sub_entity_idx == idx1) - rtwvif->sub_entity_idx = idx2; - else if (rtwvif->sub_entity_idx == idx2) - rtwvif->sub_entity_idx = idx1; + if (rtwvif->chanctx_idx == idx1) + rtwvif->chanctx_idx = idx2; + else if (rtwvif->chanctx_idx == idx2) + rtwvif->chanctx_idx = idx1; } - cur = atomic_read(&hal->roc_entity_idx); + cur = atomic_read(&hal->roc_chanctx_idx); if (cur == idx1) - atomic_set(&hal->roc_entity_idx, idx2); + atomic_set(&hal->roc_chanctx_idx, idx2); else if (cur == idx2) - atomic_set(&hal->roc_entity_idx, idx1); + atomic_set(&hal->roc_chanctx_idx, idx1); } int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, @@ -2379,14 +2410,14 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, const struct rtw89_chip_info *chip = rtwdev->chip; u8 idx; - idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); + idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX); if (idx >= chip->support_chanctx_num) return -ENOENT; rtw89_config_entity_chandef(rtwdev, idx, &ctx->def); cfg->idx = idx; cfg->ref_count = 0; - hal->sub[idx].cfg = cfg; + hal->chanctx[idx].cfg = cfg; return 0; } @@ -2419,19 +2450,19 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; struct rtw89_entity_weight w = {}; - rtwvif->sub_entity_idx = cfg->idx; + rtwvif->chanctx_idx = cfg->idx; rtwvif->chanctx_assigned = true; cfg->ref_count++; - if (cfg->idx == RTW89_SUB_ENTITY_0) + if (cfg->idx == RTW89_CHANCTX_0) goto out; rtw89_entity_calculate_weight(rtwdev, &w); if (w.active_chanctxs != 1) goto out; - /* put the first active chanctx at RTW89_SUB_ENTITY_0 */ - rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0); + /* put the first active chanctx at RTW89_CHANCTX_0 */ + rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0); out: return rtw89_set_channel(rtwdev); @@ -2443,47 +2474,60 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, { struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; struct rtw89_hal *hal = &rtwdev->hal; - struct rtw89_entity_weight w = {}; - enum rtw89_sub_entity_idx roll; + enum rtw89_chanctx_idx roll; enum rtw89_entity_mode cur; + enum rtw89_entity_mode new; + int ret; - rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_idx = RTW89_CHANCTX_0; rtwvif->chanctx_assigned = false; cfg->ref_count--; if (cfg->ref_count != 0) goto out; - if (cfg->idx != RTW89_SUB_ENTITY_0) + if (cfg->idx != RTW89_CHANCTX_0) goto out; - roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, + roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_CHANCTX, cfg->idx + 1); /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */ - if (roll == NUM_OF_RTW89_SUB_ENTITY) + if (roll == NUM_OF_RTW89_CHANCTX) goto out; - /* RTW89_SUB_ENTITY_0 is going to release, and another exists. - * Make another roll down to RTW89_SUB_ENTITY_0 to replace. + /* RTW89_CHANCTX_0 is going to release, and another exists. + * Make another roll down to RTW89_CHANCTX_0 to replace. */ - rtw89_swap_sub_entity(rtwdev, cfg->idx, roll); + rtw89_swap_chanctx(rtwdev, cfg->idx, roll); out: - rtw89_entity_calculate_weight(rtwdev, &w); + if (!hal->entity_pause) { + cur = rtw89_get_entity_mode(rtwdev); + switch (cur) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_stop(rtwdev); + break; + default: + break; + } + } + + ret = rtw89_set_channel(rtwdev); + if (ret) + return; + + if (hal->entity_pause) + return; - cur = rtw89_get_entity_mode(rtwdev); - switch (cur) { + new = rtw89_get_entity_mode(rtwdev); + switch (new) { case RTW89_ENTITY_MODE_MCC: - /* If still multi-roles, re-plan MCC for chanctx changes. - * Otherwise, just stop MCC. - */ - rtw89_mcc_stop(rtwdev); - if (w.active_roles == NUM_OF_RTW89_MCC_ROLES) - rtw89_mcc_start(rtwdev); + /* re-plan MCC for chanctx changes. */ + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); break; default: break; } - - rtw89_set_channel(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index 5278ff8c513b39..c6d31984e57536 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -76,17 +76,17 @@ static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev, void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, enum rtw89_band band, enum rtw89_bandwidth bandwidth); bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct rtw89_chan *new); int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, int (*iterator)(const struct rtw89_chan *chan, void *data), void *data); void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef); void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx, + enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef); void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 24929ef534e08b..df51b29142aa26 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -129,6 +129,13 @@ static const u32 cxtbl[] = { static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { /* firmware version must be in decreasing order for each chip */ + {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0), + .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7, + .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7, + .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7, + .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7, + .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6, + }, {RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0), .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7, .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7, @@ -1351,6 +1358,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, pfinfo = &pfwinfo->rpt_ctrl.finfo.v8; pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8); break; + } else if (ver->fcxbtcrpt == 7) { + pfinfo = &pfwinfo->rpt_ctrl.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7); + break; } else { goto err; } @@ -1655,6 +1666,38 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, pfwinfo->event[BTF_EVNT_RPT]); dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout; + } else if (ver->fcxbtcrpt == 7) { + prpt->v7 = pfwinfo->rpt_ctrl.finfo.v7; + pfwinfo->rpt_en_map = le32_to_cpu(prpt->v7.rpt_info.en); + wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver); + wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver); + + for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0], + sizeof(dm->gnt.band[i])); + + btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = + le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_TX_V105]); + btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = + le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_RX_V105]); + btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = + le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_TX_V105]); + btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = + le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_RX_V105]); + + val1 = le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]); + if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]) + val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */ + + btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1; + btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] = + le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]); + + val1 = pfwinfo->event[BTF_EVNT_RPT]; + _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0); + _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1); + _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0); + _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0); } else if (ver->fcxbtcrpt == 8) { prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8; pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en); @@ -2397,7 +2440,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, if (val == fwinfo->rpt_en_map) return; - if (btc->ver->fcxbtcrpt == 8) { + if (btc->ver->fcxbtcrpt == 7 || btc->ver->fcxbtcrpt == 8) { r.v8.type = SET_REPORT_EN; r.v8.fver = btc->ver->fcxbtcrpt; r.v8.len = sizeof(r.v8.map); @@ -2567,6 +2610,10 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type) rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type); else if (ver->fwlrole == 2) rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type); + else if (ver->fwlrole == 7) + rtw89_fw_h2c_cxdrv_role_v7(rtwdev, type); + else if (ver->fwlrole == 8) + rtw89_fw_h2c_cxdrv_role_v8(rtwdev, type); break; case CXDRVINFO_CTRL: if (ver->drvinfo_type == 1) @@ -2748,7 +2795,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map, rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt); } -#define BTC_TDMA_WLROLE_MAX 2 +#define BTC_TDMA_WLROLE_MAX 3 static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable) { @@ -2998,10 +3045,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_wl_active_role *r; struct rtw89_btc_wl_active_role_v1 *r1; struct rtw89_btc_wl_active_role_v2 *r2; + struct rtw89_btc_wl_active_role_v7 *r7; struct rtw89_btc_wl_rlink *rlink; u8 en = 0, i, ch = 0, bw = 0; u8 mode, connect_cnt; @@ -3018,6 +3067,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) } else if (ver->fwlrole == 2) { mode = wl_rinfo_v2->link_mode; connect_cnt = wl_rinfo_v2->connect_cnt; + } else if (ver->fwlrole == 7) { + mode = wl_rinfo_v7->link_mode; + connect_cnt = wl_rinfo_v7->connect_cnt; } else if (ver->fwlrole == 8) { mode = wl_rinfo_v8->link_mode; connect_cnt = wl_rinfo_v8->connect_cnt; @@ -3036,6 +3088,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) r = &wl_rinfo->active_role[i]; r1 = &wl_rinfo_v1->active_role_v1[i]; r2 = &wl_rinfo_v2->active_role_v2[i]; + r7 = &wl_rinfo_v7->active_role[i]; rlink = &wl_rinfo_v8->rlink[i][0]; if (ver->fwlrole == 0 && @@ -3056,6 +3109,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) ch = r2->ch; bw = r2->bw; break; + } else if (ver->fwlrole == 7 && + (r7->role == RTW89_WIFI_ROLE_P2P_GO || + r7->role == RTW89_WIFI_ROLE_P2P_CLIENT)) { + ch = r7->ch; + bw = r7->bw; + break; } else if (ver->fwlrole == 8 && (rlink->role == RTW89_WIFI_ROLE_P2P_GO || rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) { @@ -3071,6 +3130,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) r = &wl_rinfo->active_role[i]; r1 = &wl_rinfo_v1->active_role_v1[i]; r2 = &wl_rinfo_v2->active_role_v2[i]; + r7 = &wl_rinfo_v7->active_role[i]; rlink = &wl_rinfo_v8->rlink[i][0]; if (ver->fwlrole == 0 && @@ -3088,6 +3148,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) ch = r2->ch; bw = r2->bw; break; + } else if (ver->fwlrole == 7 && + r7->connected && r7->band == RTW89_BAND_2G) { + ch = r7->ch; + bw = r7->bw; + break; } else if (ver->fwlrole == 8 && rlink->connected && rlink->rf_band == RTW89_BAND_2G) { ch = rlink->ch; @@ -3146,6 +3211,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc; @@ -3164,6 +3230,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev) connect_cnt = wl_rinfo_v1->connect_cnt; else if (ver->fwlrole == 2) connect_cnt = wl_rinfo_v2->connect_cnt; + else if (ver->fwlrole == 7) + connect_cnt = wl_rinfo_v7->connect_cnt; else if (ver->fwlrole == 8) connect_cnt = wl_rinfo_v8->connect_cnt; @@ -4082,6 +4150,8 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec, dbcc_chg = wl->role_info_v1.dbcc_chg; else if (btc->ver->fwlrole == 2) dbcc_chg = wl->role_info_v2.dbcc_chg; + else if (btc->ver->fwlrole == 7) + dbcc_chg = wl->role_info_v7.dbcc_chg; else if (btc->ver->fwlrole == 8) dbcc_chg = wl->role_info_v8.dbcc_chg; @@ -4754,6 +4824,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; @@ -4775,6 +4846,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) wl_rinfo.link_mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) wl_rinfo.link_mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 7) + wl_rinfo.link_mode = wl_rinfo_v7->link_mode; else if (ver->fwlrole == 8) wl_rinfo.link_mode = wl_rinfo_v8->link_mode; else @@ -4790,6 +4863,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy; } else if (ver->fwlrole == 2) { wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy; + } else if (ver->fwlrole == 7) { + wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy; } else if (ver->fwlrole == 8) { wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy; } else { @@ -4835,37 +4910,56 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev) struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; struct rtw89_btc_wl_info *wl = &btc->cx.wl; - struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7; + struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8; const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_btc_dm *dm = &btc->dm; - u8 is_preagc, val; + u8 is_preagc, val, link_mode, dbcc_2g_phy; + u8 role_ver = rtwdev->btc.ver->fwlrole; + bool dbcc_en; if (btc->manual_ctrl) return; - if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC) + if (role_ver == 2) { + dbcc_en = rinfo_v2->dbcc_en; + link_mode = rinfo_v2->link_mode; + dbcc_2g_phy = rinfo_v2->dbcc_2g_phy; + } else if (role_ver == 7) { + dbcc_en = rinfo_v7->dbcc_en; + link_mode = rinfo_v7->link_mode; + dbcc_2g_phy = rinfo_v7->dbcc_2g_phy; + } else if (role_ver == 8) { + dbcc_en = rinfo_v8->dbcc_en; + link_mode = rinfo_v8->link_mode; + dbcc_2g_phy = rinfo_v7->dbcc_2g_phy; + } else { + return; + } + + if (link_mode == BTC_WLINK_25G_MCC) { is_preagc = BTC_PREAGC_BB_FWCTRL; - else if (!(bt->run_patch_code && bt->enable.now)) + } else if (!(bt->run_patch_code && bt->enable.now)) { is_preagc = BTC_PREAGC_DISABLE; - else if (wl_rinfo->link_mode == BTC_WLINK_5G) + } else if (link_mode == BTC_WLINK_5G) { is_preagc = BTC_PREAGC_DISABLE; - else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK || - btc->cx.bt.link_info.profile_cnt.now == 0) + } else if (link_mode == BTC_WLINK_NOLINK || + btc->cx.bt.link_info.profile_cnt.now == 0) { is_preagc = BTC_PREAGC_DISABLE; - else if (dm->tdma_now.type != CXTDMA_OFF && + } else if (dm->tdma_now.type != CXTDMA_OFF && !bt_linfo->hfp_desc.exist && !bt_linfo->hid_desc.exist && - dm->fddt_train == BTC_FDDT_DISABLE) + dm->fddt_train == BTC_FDDT_DISABLE) { is_preagc = BTC_PREAGC_DISABLE; - else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en && - wl_rinfo->dbcc_2g_phy != RTW89_PHY_1) + } else if (dbcc_en && (dbcc_2g_phy != RTW89_PHY_1)) { is_preagc = BTC_PREAGC_DISABLE; - else if (btc->ant_type == BTC_ANT_SHARED) + } else if (btc->ant_type == BTC_ANT_SHARED) { is_preagc = BTC_PREAGC_DISABLE; - else + } else { is_preagc = BTC_PREAGC_ENABLE; + } if (dm->wl_pre_agc_rb != dm->wl_pre_agc && dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) { @@ -4968,6 +5062,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_txtime_data data = {.rtwdev = rtwdev}; u8 mode, igno_bt, tx_retry; @@ -4984,6 +5079,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 7) + mode = wl_rinfo_v7->link_mode; else if (ver->fwlrole == 8) mode = wl_rinfo_v8->link_mode; else @@ -5043,6 +5140,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_bt_info *bt = &btc->cx.bt; bool bt_hi_lna_rx = false; @@ -5054,6 +5152,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 7) + mode = wl_rinfo_v7->link_mode; else if (ver->fwlrole == 8) mode = wl_rinfo_v8->link_mode; else @@ -5359,15 +5459,26 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_btc_dm *dm = &btc->dm; - struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7; + u32 dur, mrole_type, mrole_noa_duration; u16 policy_type = BTC_CXP_OFF_BT; - u32 dur; + + if (btc->ver->fwlrole == 2) { + mrole_type = rinfo_v2->mrole_type; + mrole_noa_duration = rinfo_v2->mrole_noa_duration; + } else if (btc->ver->fwlrole == 7) { + mrole_type = rinfo_v7->mrole_type; + mrole_noa_duration = rinfo_v7->mrole_noa_duration; + } else { + return; + } if (btc->ant_type == BTC_ANT_DEDICATED) { policy_type = BTC_CXP_OFF_EQ0; } else { /* shared-antenna */ - switch (wl_rinfo->mrole_type) { + switch (mrole_type) { case BTC_WLMROLE_STA_GC: dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION; dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT; @@ -5385,7 +5496,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev) case BTC_WLMROLE_STA_GO_NOA: dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION; dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE; - dur = wl_rinfo->mrole_noa_duration; + dur = mrole_noa_duration; if (wl->status.map._4way) { dm->wl_scc.ebt_null = 0; @@ -5567,6 +5678,14 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh) return next_state; } +static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac) +{ + if (mac == RTW89_MAC_0) + rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC); + else + rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC); +} + static void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { @@ -6065,12 +6184,20 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch, u8 *phy, u8 *role, u8 *dbcc_2g_phy) { struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl; - struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8; + struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7; + struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8; bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false; - u8 j, k, dbcc_2g_cid, dbcc_2g_cid2; + u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt; + + if (rtwdev->btc.ver->fwlrole == 7) + connect_cnt = rinfo_v7->connect_cnt; + else if (rtwdev->btc.ver->fwlrole == 8) + connect_cnt = rinfo_v8->connect_cnt; + else + return BTC_WLINK_NOLINK; /* find out the 2G-PHY by connect-id ->ch */ - for (j = 0; j < wl_rinfo->connect_cnt; j++) { + for (j = 0; j < connect_cnt; j++) { if (ch[j].center_ch <= 14) { is_2g_ch_exist = true; break; @@ -6085,11 +6212,11 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch, *dbcc_2g_phy = phy[dbcc_2g_cid]; /* connect_cnt <= 2 */ - if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX) + if (connect_cnt < BTC_TDMA_WLROLE_MAX) return (_get_role_link_mode((role[dbcc_2g_cid]))); /* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */ - for (k = 0; k < wl_rinfo->connect_cnt; k++) { + for (k = 0; k < connect_cnt; k++) { if (k == dbcc_2g_cid) continue; @@ -6116,29 +6243,54 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch, static void _update_role_link_mode(struct rtw89_dev *rtwdev, bool client_joined, u32 noa) { - struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8; + struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &rtwdev->btc.cx.wl.role_info_v8; + struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &rtwdev->btc.cx.wl.role_info_v7; + u8 role_ver = rtwdev->btc.ver->fwlrole; u32 type = BTC_WLMROLE_NONE, dur = 0; - u32 wl_role = wl_rinfo->role_map; + u8 link_mode, connect_cnt; + u32 wl_role; + + if (role_ver == 7) { + wl_role = rinfo_v7->role_map; + link_mode = rinfo_v7->link_mode; + connect_cnt = rinfo_v7->connect_cnt; + } else if (role_ver == 8) { + wl_role = rinfo_v8->role_map; + link_mode = rinfo_v8->link_mode; + connect_cnt = rinfo_v8->connect_cnt; + } else { + return; + } /* if no client_joined, don't care P2P-GO/AP role */ if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) || (wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) { - if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) { - wl_rinfo->link_mode = BTC_WLINK_2G_STA; - wl_rinfo->connect_cnt--; - } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO || - wl_rinfo->link_mode == BTC_WLINK_2G_AP) { - wl_rinfo->link_mode = BTC_WLINK_NOLINK; - wl_rinfo->connect_cnt--; + if (link_mode == BTC_WLINK_2G_SCC) { + if (role_ver == 7) { + rinfo_v7->link_mode = BTC_WLINK_2G_STA; + rinfo_v7->connect_cnt--; + } else if (role_ver == 8) { + rinfo_v8->link_mode = BTC_WLINK_2G_STA; + rinfo_v8->connect_cnt--; + } + } else if (link_mode == BTC_WLINK_2G_GO || + link_mode == BTC_WLINK_2G_AP) { + if (role_ver == 7) { + rinfo_v7->link_mode = BTC_WLINK_NOLINK; + rinfo_v7->connect_cnt--; + } else if (role_ver == 8) { + rinfo_v8->link_mode = BTC_WLINK_NOLINK; + rinfo_v8->connect_cnt--; + } } } /* Identify 2-Role type */ - if (wl_rinfo->connect_cnt >= 2 && - (wl_rinfo->link_mode == BTC_WLINK_2G_SCC || - wl_rinfo->link_mode == BTC_WLINK_2G_MCC || - wl_rinfo->link_mode == BTC_WLINK_25G_MCC || - wl_rinfo->link_mode == BTC_WLINK_5G)) { + if (connect_cnt >= 2 && + (link_mode == BTC_WLINK_2G_SCC || + link_mode == BTC_WLINK_2G_MCC || + link_mode == BTC_WLINK_25G_MCC || + link_mode == BTC_WLINK_5G)) { if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) || (wl_role & BIT(RTW89_WIFI_ROLE_AP))) type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO; @@ -6150,8 +6302,167 @@ static void _update_role_link_mode(struct rtw89_dev *rtwdev, dur = noa; } - wl_rinfo->mrole_type = type; - wl_rinfo->mrole_noa_duration = dur; + if (role_ver == 7) { + rinfo_v7->mrole_type = type; + rinfo_v7->mrole_noa_duration = dur; + } else if (role_ver == 8) { + rinfo_v8->mrole_type = type; + rinfo_v8->mrole_noa_duration = dur; + } +} + +static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid) +{ + struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER]; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo = &wl->role_info_v7; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info; + struct rtw89_btc_wl_active_role_v7 *act_role = NULL; + u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc; + bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false; + u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; + u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; + u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; + u8 mac = RTW89_MAC_0, dbcc_2g_phy = RTW89_PHY_0; + u32 noa_duration = 0; + + memset(wl_rinfo, 0, sizeof(*wl_rinfo)); + + for (i = 0; i < RTW89_PORT_NUM; i++) { + if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX) + continue; + + act_role = &wl_rinfo->active_role[i]; + act_role->role = wl_linfo[i].role; + + /* check if role connect? */ + if (wl_linfo[i].connected == MLME_NO_LINK) { + act_role->connected = 0; + continue; + } else if (wl_linfo[i].connected == MLME_LINKING) { + continue; + } + + cnt++; + act_role->connected = 1; + act_role->pid = wl_linfo[i].pid; + act_role->phy = wl_linfo[i].phy; + act_role->band = wl_linfo[i].band; + act_role->ch = wl_linfo[i].ch; + act_role->bw = wl_linfo[i].bw; + act_role->noa = wl_linfo[i].noa; + act_role->noa_dur = wl_linfo[i].noa_duration; + cid_ch[cnt - 1] = wl_linfo[i].chdef; + cid_phy[cnt - 1] = wl_linfo[i].phy; + cid_role[cnt - 1] = wl_linfo[i].role; + wl_rinfo->role_map |= BIT(wl_linfo[i].role); + + if (rid == i) + phy_now = act_role->phy; + + if (wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO || + wl_linfo[i].role == RTW89_WIFI_ROLE_AP) { + if (wl_linfo[i].client_cnt > 1) + client_joined = true; + if (client_cnt_last[i] < wl_linfo[i].client_cnt && + wl_linfo[i].chdef.band == RTW89_BAND_2G) + client_inc_2g = true; + act_role->client_cnt = wl_linfo[i].client_cnt; + } else { + act_role->client_cnt = 0; + } + + if (act_role->noa && act_role->noa_dur > 0) + noa_duration = act_role->noa_dur; + + if (rtwdev->dbcc_en) { + phy_dbcc = wl_linfo[i].phy; + wl_dinfo->role[phy_dbcc] |= BIT(wl_linfo[i].role); + wl_dinfo->op_band[phy_dbcc] = wl_linfo[i].chdef.band; + } + + if (wl_linfo[i].chdef.band != RTW89_BAND_2G) { + cnt_5g++; + b5g = true; + } else { + if (((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO || + wl_linfo[i].role == RTW89_WIFI_ROLE_AP) && + client_joined) || + wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_CLIENT) + wl_rinfo->p2p_2g = 1; + + if ((wl_linfo[i].mode & BIT(BTC_WL_MODE_11B)) || + (wl_linfo[i].mode & BIT(BTC_WL_MODE_11G))) + wl->bg_mode = 1; + else if (wl_linfo[i].mode & BIT(BTC_WL_MODE_HE)) + wl->he_mode = true; + + cnt_2g++; + b2g = true; + } + + if (act_role->band == RTW89_BAND_5G && act_role->ch >= 100) + wl->is_5g_hi_channel = 1; + else + wl->is_5g_hi_channel = 0; + } + + wl_rinfo->connect_cnt = cnt; + wl->client_cnt_inc_2g = client_inc_2g; + + if (cnt == 0) { + mode = BTC_WLINK_NOLINK; + wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE); + } else if (!b2g && b5g) { + mode = BTC_WLINK_5G; + } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) { + mode = BTC_WLINK_2G_NAN; + } else if (cnt > BTC_TDMA_WLROLE_MAX) { + mode = BTC_WLINK_OTHER; + } else if (rtwdev->dbcc_en) { + mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy); + + /* correct 2G-located PHY band for gnt ctrl */ + if (dbcc_2g_phy < RTW89_PHY_MAX) + wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G; + } else if (b2g && b5g && cnt == 2) { + mode = BTC_WLINK_25G_MCC; + } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */ + if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1])) + mode = BTC_WLINK_2G_SCC; + else + mode = BTC_WLINK_2G_MCC; + } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */ + mode = _get_role_link_mode(cid_role[0]); + } else { + mode = BTC_WLINK_NOLINK; + } + + wl_rinfo->link_mode = mode; + _update_role_link_mode(rtwdev, client_joined, noa_duration); + + /* todo DBCC related event */ + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now); + + if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) { + wl_rinfo->dbcc_chg = 1; + wl_rinfo->dbcc_en = rtwdev->dbcc_en; + btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++; + } + + if (rtwdev->dbcc_en) { + wl_rinfo->dbcc_2g_phy = dbcc_2g_phy; + + if (dbcc_2g_phy == RTW89_PHY_1) + mac = RTW89_MAC_1; + + _update_dbcc_band(rtwdev, RTW89_PHY_0); + _update_dbcc_band(rtwdev, RTW89_PHY_1); + } + _wl_req_mac(rtwdev, mac); + _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); } static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id, @@ -6496,6 +6807,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; u8 mode, igno_bt, always_freerun; @@ -6511,6 +6823,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 7) + mode = wl_rinfo_v7->link_mode; else if (ver->fwlrole == 8) mode = wl_rinfo_v8->link_mode; else @@ -6657,7 +6971,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) _action_wl_2g_scc(rtwdev); else if (ver->fwlrole == 1) _action_wl_2g_scc_v1(rtwdev); - else if (ver->fwlrole == 2) + else if (ver->fwlrole == 2 || ver->fwlrole == 7) _action_wl_2g_scc_v2(rtwdev); else if (ver->fwlrole == 8) _action_wl_2g_scc_v8(rtwdev); @@ -7169,7 +7483,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif struct rtw89_sta *rtwsta, enum btc_role_state state) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); struct rtw89_btc *btc = &rtwdev->btc; @@ -7250,6 +7564,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif } else if (ver->fwlrole == 2) { *wlinfo = r; _update_wl_info_v2(rtwdev); + } else if (ver->fwlrole == 7) { + *wlinfo = r; + _update_wl_info_v7(rtwdev, r.pid); } else if (ver->fwlrole == 8) { wlinfo = &wl->rlink_info[r.pid][rlink_id]; *wlinfo = r; @@ -7856,6 +8173,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; u8 mode; @@ -7870,6 +8188,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 7) + mode = wl_rinfo_v7->link_mode; else if (ver->fwlrole == 8) mode = wl_rinfo_v8->link_mode; else @@ -10288,6 +10608,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m) cnt[BTC_NCNT_CUSTOMERIZE]); } +static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; + struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_cx *cx = &rtwdev->btc.cx; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_wl_info *wl = &cx->wl; + u32 *cnt = rtwdev->btc.dm.cnt_notify; + u32 cnt_sum = 0; + u8 i; + + if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) + return; + + seq_printf(m, "%s", "\n\r========== [Statistics] =========="); + + pcinfo = &pfwinfo->rpt_ctrl.cinfo; + if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF && + !wl->status.map.rf_off) { + prptctrl = &pfwinfo->rpt_ctrl.finfo.v7; + + seq_printf(m, + "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d)," + "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ", + "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h), + rtwdev->btc.ver->info_buf); + + seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); + + if (dm->error.map.wl_fw_hang) + seq_puts(m, " (WL FW Hang!!)"); + + seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + seq_printf(m, + "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], + wl->rfk_info.proc_time); + + seq_printf(m, ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + } else { + seq_printf(m, + "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", + "[summary]", + pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + pfwinfo->cnt_c2h, + wl->status.map.lps, wl->status.map.rf_off); + } + + for (i = 0; i < BTC_NCNT_NUM; i++) + cnt_sum += dm->cnt_notify[i]; + + seq_printf(m, + "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + seq_printf(m, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); + + seq_printf(m, + "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], + cnt[BTC_NCNT_SPECIAL_PACKET]); + + seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], + rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], + cnt[BTC_NCNT_COUNTRYCODE]); +} + static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m) { struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; @@ -10440,6 +10862,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) _show_summary_v5(rtwdev, m); else if (ver->fcxbtcrpt == 105) _show_summary_v105(rtwdev, m); + else if (ver->fcxbtcrpt == 7) + _show_summary_v7(rtwdev, m); else if (ver->fcxbtcrpt == 8) _show_summary_v8(rtwdev, m); } diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index 0e5f268616f756..de53b56632f7c6 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -193,6 +193,8 @@ enum btc_wa_type { BTC_WA_5G_HI_CH_RX = BIT(0), BTC_WA_NULL_AP = BIT(1), BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */ + BTC_WA_HFP_LAG = BIT(3), /* 52BT WL break BT Rx lag issue */ + BTC_WA_INIT_SCAN = BIT(4) /* 52A/C/D init scan move to wl slot WA */ }; enum btc_3cx_type { @@ -289,9 +291,10 @@ void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev); static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - enum rtw89_rf_path_bit paths) + enum rtw89_rf_path_bit paths, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 phy_map; phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) | @@ -303,9 +306,10 @@ static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev, static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - enum rtw89_rf_path path) + enum rtw89_rf_path path, + enum rtw89_chanctx_idx chanctx_idx) { - return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path)); + return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path), chanctx_idx); } /* return bt req len in TU */ diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 7019f7d482a881..4553810634c66b 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -346,8 +346,8 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan *chan; - enum rtw89_sub_entity_idx sub_entity_idx; - enum rtw89_sub_entity_idx roc_idx; + enum rtw89_chanctx_idx chanctx_idx; + enum rtw89_chanctx_idx roc_idx; enum rtw89_phy_idx phy_idx; enum rtw89_entity_mode mode; bool entity_active; @@ -360,22 +360,22 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) switch (mode) { case RTW89_ENTITY_MODE_SCC: case RTW89_ENTITY_MODE_MCC: - sub_entity_idx = RTW89_SUB_ENTITY_0; + chanctx_idx = RTW89_CHANCTX_0; break; case RTW89_ENTITY_MODE_MCC_PREPARE: - sub_entity_idx = RTW89_SUB_ENTITY_1; + chanctx_idx = RTW89_CHANCTX_1; break; default: WARN(1, "Invalid ent mode: %d\n", mode); return; } - roc_idx = atomic_read(&hal->roc_entity_idx); - if (roc_idx != RTW89_SUB_ENTITY_IDLE) - sub_entity_idx = roc_idx; + roc_idx = atomic_read(&hal->roc_chanctx_idx); + if (roc_idx != RTW89_CHANCTX_IDLE) + chanctx_idx = roc_idx; phy_idx = RTW89_PHY_0; - chan = rtw89_chan_get(rtwdev, sub_entity_idx); + chan = rtw89_chan_get(rtwdev, chanctx_idx); chip->ops->set_txpwr(rtwdev, chan, phy_idx); } @@ -385,8 +385,8 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan_rcd *chan_rcd; const struct rtw89_chan *chan; - enum rtw89_sub_entity_idx sub_entity_idx; - enum rtw89_sub_entity_idx roc_idx; + enum rtw89_chanctx_idx chanctx_idx; + enum rtw89_chanctx_idx roc_idx; enum rtw89_mac_idx mac_idx; enum rtw89_phy_idx phy_idx; struct rtw89_channel_help_params bak; @@ -399,25 +399,25 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev) switch (mode) { case RTW89_ENTITY_MODE_SCC: case RTW89_ENTITY_MODE_MCC: - sub_entity_idx = RTW89_SUB_ENTITY_0; + chanctx_idx = RTW89_CHANCTX_0; break; case RTW89_ENTITY_MODE_MCC_PREPARE: - sub_entity_idx = RTW89_SUB_ENTITY_1; + chanctx_idx = RTW89_CHANCTX_1; break; default: WARN(1, "Invalid ent mode: %d\n", mode); return -EINVAL; } - roc_idx = atomic_read(&hal->roc_entity_idx); - if (roc_idx != RTW89_SUB_ENTITY_IDLE) - sub_entity_idx = roc_idx; + roc_idx = atomic_read(&hal->roc_chanctx_idx); + if (roc_idx != RTW89_CHANCTX_IDLE) + chanctx_idx = roc_idx; mac_idx = RTW89_MAC_0; phy_idx = RTW89_PHY_0; - chan = rtw89_chan_get(rtwdev, sub_entity_idx); - chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx); + chan = rtw89_chan_get(rtwdev, chanctx_idx); + chan_rcd = rtw89_chan_rcd_get(rtwdev, chanctx_idx); rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx); @@ -429,7 +429,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev) if (!entity_active || chan_rcd->band_changed) { rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type); - rtw89_chip_rfk_band_changed(rtwdev, phy_idx); + rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan); } rtw89_set_entity_state(rtwdev, true); @@ -441,7 +441,7 @@ void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { const struct cfg80211_chan_def *chandef; - chandef = rtw89_chandef_get(rtwdev, rtwvif->sub_entity_idx); + chandef = rtw89_chandef_get(rtwdev, rtwvif->chanctx_idx); rtw89_get_channel_params(chandef, chan); } @@ -602,15 +602,28 @@ static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev, return rtwsta->mac_id; } +static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + + desc_info->hdr_llc_len = ieee80211_hdrlen(fc); + desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ +} + static void rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct ieee80211_vif *vif = tx_req->vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); + struct sk_buff *skb = tx_req->skb; u8 qsel, ch_dma; qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT; @@ -629,6 +642,11 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, desc_info->dis_data_fb = true; desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan); + if (chip->hw_mgmt_tx_encrypt && IEEE80211_SKB_CB(skb)->control.hw_key) { + rtw89_core_tx_update_sec_key(rtwdev, tx_req); + rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); + } + rtw89_debug(rtwdev, RTW89_DBG_TXRX, "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n", desc_info->data_rate, chan->channel, chan->band_type, @@ -769,7 +787,7 @@ static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta = tx_req->sta; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; - enum rtw89_sub_entity_idx idx = rtwvif->sub_entity_idx; + enum rtw89_chanctx_idx idx = rtwvif->chanctx_idx; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx); u16 lowest_rate; @@ -862,17 +880,6 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, return PACKET_MAX; } -static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, - struct rtw89_tx_desc_info *desc_info, - struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - __le16 fc = hdr->frame_control; - - desc_info->hdr_llc_len = ieee80211_hdrlen(fc); - desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ -} - static void rtw89_core_tx_wake(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) @@ -1449,16 +1456,20 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, return -EINVAL; } - /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set by hardware, - * so update mac_id by rxinfo_user[].mac_id. - */ - for (i = 0; i < usr_num && chip_gen == RTW89_CHIP_BE; i++) { + for (i = 0; i < usr_num; i++) { user = &rxinfo->user[i]; if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID)) continue; - - phy_ppdu->mac_id = - le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID); + /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set + * by hardware, so update mac_id by rxinfo_user[].mac_id. + */ + if (chip_gen == RTW89_CHIP_BE) + phy_ppdu->mac_id = + le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID); + phy_ppdu->has_data = + le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA); + phy_ppdu->has_bcn = + le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN); break; } @@ -1480,6 +1491,26 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, return 0; } +static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate) +{ + u8 data_rate_mode; + + data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); + switch (data_rate_mode) { + case DATA_RATE_MODE_NON_HT: + return 1; + case DATA_RATE_MODE_HT: + return rtw89_get_data_ht_nss(rtwdev, data_rate) + 1; + case DATA_RATE_MODE_VHT: + case DATA_RATE_MODE_HE: + case DATA_RATE_MODE_EHT: + return rtw89_get_data_nss(rtwdev, data_rate) + 1; + default: + rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); + return 0; + } +} + static void rtw89_core_rx_process_phy_ppdu_iter(void *data, struct ieee80211_sta *sta) { @@ -1509,10 +1540,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data, ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]); } - if (phy_ppdu->ofdm.has) { + if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) { ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr); - ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min); - ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max); + if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) { + ewma_evm_add(&rtwsta->evm_1ss, phy_ppdu->ofdm.evm_min); + } else { + ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min); + ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max); + } } } @@ -1548,11 +1583,27 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, return ie_len; } +static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev, + const struct rtw89_phy_sts_iehdr *iehdr, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + const struct rtw89_phy_sts_ie01_v2 *ie; + u8 *rpl_fd = phy_ppdu->rpl_fd; + + ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr; + rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A); + rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B); + rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C); + rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D); + + phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX); +} + static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, const struct rtw89_phy_sts_iehdr *iehdr, struct rtw89_rx_phy_ppdu *phy_ppdu) { - const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr; + const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr; s16 cfo; u32 t; @@ -1563,12 +1614,17 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC); } + if (!phy_ppdu->hdr_2_en) + phy_ppdu->rx_path_en = + le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN); + if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) return; if (!phy_ppdu->to_self) return; + phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD); phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR); phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX); phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN); @@ -1584,6 +1640,39 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, } rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu); + + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu); +} + +static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev, + const struct rtw89_phy_sts_iehdr *iehdr, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr; + u16 tmp_rpl; + + tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL); + phy_ppdu->rpl_avg = tmp_rpl >> 1; +} + +static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev, + const struct rtw89_phy_sts_iehdr *iehdr, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + const struct rtw89_phy_sts_ie00_v2 *ie; + u8 *rpl_path = phy_ppdu->rpl_path; + u16 tmp_rpl[RF_PATH_MAX]; + u8 i; + + ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr; + tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A); + tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B); + tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C); + tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D); + + for (i = 0; i < RF_PATH_MAX; i++) + rpl_path[i] = tmp_rpl[i] >> 1; } static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, @@ -1595,6 +1684,11 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE); switch (ie) { + case RTW89_PHYSTS_IE00_CMN_CCK: + rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu); + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu); + break; case RTW89_PHYSTS_IE01_CMN_OFDM: rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu); break; @@ -1605,6 +1699,13 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, return 0; } +static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN; + + phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN); +} + static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) { const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf; @@ -1616,6 +1717,10 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B); rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C); rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D); + + phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN); + if (phy_ppdu->hdr_2_en) + rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu); } static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev, @@ -1668,6 +1773,7 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev, } } + rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu); rtw89_phy_antdiv_parse(rtwdev, phy_ppdu); return 0; @@ -1959,7 +2065,7 @@ static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, struct ieee80211_rx_status *status) { const struct rtw89_chan_rcd *rcd = - rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0); + rtw89_chan_rcd_get(rtwdev, RTW89_CHANCTX_0); u16 chan = rcd->prev_primary_channel; u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type); @@ -2363,7 +2469,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, struct ieee80211_rx_status *rx_status) { const struct cfg80211_chan_def *chandef = - rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0); + rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0); u16 data_rate; u8 data_rate_mode; bool eht = false; @@ -2856,7 +2962,7 @@ static void rtw89_core_sta_pending_tx_iter(void *data, struct sk_buff *skb, *tmp; int qsel, ret; - if (rtwvif->sub_entity_idx != rtwvif_target->sub_entity_idx) + if (rtwvif->chanctx_idx != rtwvif_target->chanctx_idx) return; if (skb_queue_len(&rtwsta->roc_queue) == 0) @@ -2950,11 +3056,11 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) "roc send null-1 failed: %d\n", ret); rtw89_for_each_rtwvif(rtwdev, tmp) - if (tmp->sub_entity_idx == rtwvif->sub_entity_idx) + if (tmp->chanctx_idx == rtwvif->chanctx_idx) tmp->offchan = true; cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT); - rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan); + rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, &roc_chan); rtw89_set_channel(rtwdev); rtw89_write32_clr(rtwdev, rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), @@ -2987,7 +3093,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtwdev->hal.rx_fltr); roc->state = RTW89_ROC_IDLE; - rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL); + rtw89_config_roc_chandef(rtwdev, rtwvif->chanctx_idx, NULL); rtw89_chanctx_proceed(rtwdev); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false); if (ret) @@ -2995,7 +3101,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) "roc send null-0 failed: %d\n", ret); rtw89_for_each_rtwvif(rtwdev, tmp) - if (tmp->sub_entity_idx == rtwvif->sub_entity_idx) + if (tmp->chanctx_idx == rtwvif->chanctx_idx) tmp->offchan = false; rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif); @@ -3189,6 +3295,7 @@ static void rtw89_track_work(struct work_struct *work) rtw89_phy_edcca_track(rtwdev); rtw89_tas_track(rtwdev); rtw89_chanctx_track(rtwdev); + rtw89_core_rfkill_poll(rtwdev, false); if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); @@ -3367,6 +3474,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, ewma_rssi_init(&rtwsta->avg_rssi); ewma_snr_init(&rtwsta->avg_snr); + ewma_evm_init(&rtwsta->evm_1ss); for (i = 0; i < ant_num; i++) { ewma_rssi_init(&rtwsta->rssi[i]); ewma_evm_init(&rtwsta->evm_min[i]); @@ -3384,7 +3492,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_START); - rtw89_chip_rfk_channel(rtwdev); + rtw89_chip_rfk_channel(rtwdev, rtwvif); } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev); if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM) @@ -3491,7 +3599,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta); const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); int ret; if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { @@ -4226,9 +4334,14 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; - u8 mac_id_num = chip->support_macid_num; + u8 mac_id_num; u8 mac_id; + if (rtwdev->support_mlo) + mac_id_num = chip->support_macid_num / chip->support_link_num; + else + mac_id_num = chip->support_macid_num; + mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num); if (mac_id == mac_id_num) return RTW89_MAX_MAC_ID_NUM; @@ -4278,6 +4391,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtw89_init_wait(&rtwdev->mcc.wait); rtw89_init_wait(&rtwdev->mac.fw_ofld_wait); + rtw89_init_wait(&rtwdev->wow.wait); + rtw89_init_wait(&rtwdev->mac.ps_wait); INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work); INIT_WORK(&rtwdev->ips_work, rtw89_ips_work); @@ -4333,7 +4448,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); rtwdev->scanning = true; rtw89_leave_lps(rtwdev); @@ -4342,7 +4457,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, ether_addr_copy(rtwvif->mac_addr, mac_addr); rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type); - rtw89_chip_rfk_scan(rtwdev, true); + rtw89_chip_rfk_scan(rtwdev, rtwvif, true); rtw89_hci_recalc_int_mit(rtwdev); rtw89_phy_config_edcca(rtwdev, true); @@ -4360,7 +4475,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, ether_addr_copy(rtwvif->mac_addr, vif->addr); rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); - rtw89_chip_rfk_scan(rtwdev, false); + rtw89_chip_rfk_scan(rtwdev, rtwvif, false); rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0); rtw89_phy_config_edcca(rtwdev, false); @@ -4470,6 +4585,70 @@ static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev) return 0; } +static bool rtw89_chip_has_rfkill(struct rtw89_dev *rtwdev) +{ + return !!rtwdev->chip->rfkill_init; +} + +static void rtw89_core_rfkill_init(struct rtw89_dev *rtwdev) +{ + const struct rtw89_rfkill_regs *regs = rtwdev->chip->rfkill_init; + + rtw89_write16_mask(rtwdev, regs->pinmux.addr, + regs->pinmux.mask, regs->pinmux.data); + rtw89_write16_mask(rtwdev, regs->mode.addr, + regs->mode.mask, regs->mode.data); +} + +static bool rtw89_core_rfkill_get(struct rtw89_dev *rtwdev) +{ + const struct rtw89_reg_def *reg = &rtwdev->chip->rfkill_get; + + return !rtw89_read8_mask(rtwdev, reg->addr, reg->mask); +} + +static void rtw89_rfkill_polling_init(struct rtw89_dev *rtwdev) +{ + if (!rtw89_chip_has_rfkill(rtwdev)) + return; + + rtw89_core_rfkill_init(rtwdev); + rtw89_core_rfkill_poll(rtwdev, true); + wiphy_rfkill_start_polling(rtwdev->hw->wiphy); +} + +static void rtw89_rfkill_polling_deinit(struct rtw89_dev *rtwdev) +{ + if (!rtw89_chip_has_rfkill(rtwdev)) + return; + + wiphy_rfkill_stop_polling(rtwdev->hw->wiphy); +} + +void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force) +{ + bool prev, blocked; + + if (!rtw89_chip_has_rfkill(rtwdev)) + return; + + prev = test_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); + blocked = rtw89_core_rfkill_get(rtwdev); + + if (!force && prev == blocked) + return; + + rtw89_info(rtwdev, "rfkill hardware state changed to %s\n", + blocked ? "disable" : "enable"); + + if (blocked) + set_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); + else + clear_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); + + wiphy_rfkill_set_hw_state(rtwdev->hw->wiphy, blocked); +} + int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) { int ret; @@ -4580,6 +4759,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) if (chip->chip_gen == RTW89_CHIP_BE) hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + if (rtwdev->support_mlo) + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID; @@ -4587,6 +4769,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) #ifdef CONFIG_PM hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; + hw->wiphy->max_sched_scan_ssids = RTW89_SCANOFLD_MAX_SSID; #endif hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); @@ -4625,6 +4808,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) goto err_unregister_hw; } + rtw89_rfkill_polling_init(rtwdev); + return 0; err_unregister_hw: @@ -4639,6 +4824,7 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev) { struct ieee80211_hw *hw = rtwdev->hw; + rtw89_rfkill_polling_deinit(rtwdev); ieee80211_unregister_hw(hw); rtw89_core_clr_supported_band(rtwdev); } @@ -4662,6 +4848,8 @@ EXPORT_SYMBOL(rtw89_core_register); void rtw89_core_unregister(struct rtw89_dev *rtwdev) { rtw89_core_unregister_hw(rtwdev); + + rtw89_debugfs_deinit(rtwdev); } EXPORT_SYMBOL(rtw89_core_unregister); @@ -4676,6 +4864,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, struct ieee80211_ops *ops; u32 driver_data_size; int fw_format = -1; + bool support_mlo; bool no_chanctx; firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format); @@ -4704,6 +4893,14 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, if (!hw) goto err; + /* TODO: When driver MLO arch. is done, determine whether to support MLO + * according to the following conditions. + * 1. run with chanctx_ops + * 2. chip->support_link_num != 0 + * 3. FW feature supports AP_LINK_PS + */ + support_mlo = false; + hw->wiphy->iface_combinations = rtw89_iface_combs; if (no_chanctx || chip->support_chanctx_num == 1) @@ -4718,9 +4915,12 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, rtwdev->chip = chip; rtwdev->fw.req.firmware = firmware; rtwdev->fw.fw_format = fw_format; + rtwdev->support_mlo = support_mlo; - rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n", + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n", no_chanctx ? "without" : "with"); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n", + support_mlo ? "with" : "without"); return rtwdev; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 11fa003a9788c4..4ed9034fdb4641 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -21,6 +21,7 @@ struct rtw89_efuse_block_cfg; struct rtw89_h2c_rf_tssi; struct rtw89_fw_txpwr_track_cfg; struct rtw89_phy_rfk_log_fmt; +struct rtw89_debugfs; extern const struct ieee80211_ops rtw89_ops; @@ -796,16 +797,24 @@ struct rtw89_rx_phy_ppdu { u8 chan_idx; u8 ie; u16 rate; + u8 rpl_avg; + u8 rpl_path[RF_PATH_MAX]; + u8 rpl_fd[RF_PATH_MAX]; + u8 bw_idx; + u8 rx_path_en; struct { bool has; u8 avg_snr; u8 evm_max; u8 evm_min; } ofdm; + bool has_data; + bool has_bcn; bool ldpc; bool stbc; bool to_self; bool valid; + bool hdr_2_en; }; enum rtw89_mac_idx { @@ -820,12 +829,12 @@ enum rtw89_phy_idx { RTW89_PHY_MAX }; -enum rtw89_sub_entity_idx { - RTW89_SUB_ENTITY_0 = 0, - RTW89_SUB_ENTITY_1 = 1, +enum rtw89_chanctx_idx { + RTW89_CHANCTX_0 = 0, + RTW89_CHANCTX_1 = 1, - NUM_OF_RTW89_SUB_ENTITY, - RTW89_SUB_ENTITY_IDLE = NUM_OF_RTW89_SUB_ENTITY, + NUM_OF_RTW89_CHANCTX, + RTW89_CHANCTX_IDLE = NUM_OF_RTW89_CHANCTX, }; enum rtw89_rf_path { @@ -925,10 +934,12 @@ enum rtw89_sc_offset { RTW89_SC_40_LOWER = 10, }; +/* only mgd features can be added to the enum */ enum rtw89_wow_flags { RTW89_WOW_FLAG_EN_MAGIC_PKT, RTW89_WOW_FLAG_EN_REKEY_PKT, RTW89_WOW_FLAG_EN_DISCONNECT, + RTW89_WOW_FLAG_EN_PATTERN, RTW89_WOW_FLAG_NUM, }; @@ -1585,6 +1596,23 @@ struct rtw89_btc_wl_active_role_v2 { u32 noa_duration; /* ms */ }; +struct rtw89_btc_wl_active_role_v7 { + u8 connected; + u8 pid; + u8 phy; + u8 noa; + + u8 band; + u8 client_ps; + u8 bw; + u8 role; + + u8 ch; + u8 noa_dur; + u8 client_cnt; + u8 rsvd2; +} __packed; + struct rtw89_btc_wl_role_info_bpos { u16 none: 1; u16 station: 1; @@ -1666,6 +1694,22 @@ struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */ } __packed; #define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6 +struct rtw89_btc_wl_role_info_v7 { /* struct size must be n*4 bytes */ + u8 connect_cnt; + u8 link_mode; + u8 link_mode_chg; + u8 p2p_2g; + + struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER]; + + u32 role_map; + u32 mrole_type; /* btc_wl_mrole_type */ + u32 mrole_noa_duration; /* ms */ + u32 dbcc_en; + u32 dbcc_chg; + u32 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */ +} __packed; + struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */ u8 connect_cnt; u8 link_mode; @@ -1829,6 +1873,7 @@ struct rtw89_btc_wl_info { struct rtw89_btc_wl_role_info role_info; struct rtw89_btc_wl_role_info_v1 role_info_v1; struct rtw89_btc_wl_role_info_v2 role_info_v2; + struct rtw89_btc_wl_role_info_v7 role_info_v7; struct rtw89_btc_wl_role_info_v8 role_info_v8; struct rtw89_btc_wl_scan_info scan_info; struct rtw89_btc_wl_dbcc_info dbcc_info; @@ -1846,8 +1891,10 @@ struct rtw89_btc_wl_info { bool is_5g_hi_channel; bool pta_reg_mac_chg; bool bg_mode; + bool he_mode; bool scbd_change; bool fw_ver_mismatch; + bool client_cnt_inc_2g; u32 scbd; }; @@ -2198,6 +2245,19 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 { struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info; } __packed; +struct rtw89_btc_fbtc_rpt_ctrl_v7 { + u8 fver; + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + + u8 gnt_val[RTW89_PHY_MAX][4]; + __le16 bt_cnt[BTC_BCNT_STA_MAX_V105]; + + struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info; + struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info; +} __packed; + struct rtw89_btc_fbtc_rpt_ctrl_v8 { u8 fver; u8 rsvd0; @@ -2216,6 +2276,7 @@ union rtw89_btc_fbtc_rpt_ctrl_ver_info { struct rtw89_btc_fbtc_rpt_ctrl_v4 v4; struct rtw89_btc_fbtc_rpt_ctrl_v5 v5; struct rtw89_btc_fbtc_rpt_ctrl_v105 v105; + struct rtw89_btc_fbtc_rpt_ctrl_v7 v7; struct rtw89_btc_fbtc_rpt_ctrl_v8 v8; }; @@ -3306,6 +3367,7 @@ struct rtw89_sta { struct ewma_rssi avg_rssi; struct ewma_rssi rssi[RF_PATH_MAX]; struct ewma_snr avg_snr; + struct ewma_evm evm_1ss; struct ewma_evm evm_min[RF_PATH_MAX]; struct ewma_evm evm_max[RF_PATH_MAX]; struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS]; @@ -3403,7 +3465,7 @@ struct rtw89_vif { struct rtw89_dev *rtwdev; struct rtw89_roc roc; bool chanctx_assigned; /* only valid when running with chanctx_ops */ - enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_chanctx_idx chanctx_idx; enum rtw89_reg_6ghz_power reg_6ghz_power; struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; @@ -3537,10 +3599,12 @@ struct rtw89_chip_ops { void (*rfk_hw_init)(struct rtw89_dev *rtwdev); void (*rfk_init)(struct rtw89_dev *rtwdev); void (*rfk_init_late)(struct rtw89_dev *rtwdev); - void (*rfk_channel)(struct rtw89_dev *rtwdev); + void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); void (*rfk_band_changed)(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx); - void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start); + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); + void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start); void (*rfk_track)(struct rtw89_dev *rtwdev); void (*power_trim)(struct rtw89_dev *rtwdev); void (*set_txpwr)(struct rtw89_dev *rtwdev, @@ -3555,11 +3619,15 @@ struct rtw89_chip_ops { void (*query_ppdu)(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status); + void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu); void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, enum rtw89_phy_idx phy_idx); void (*cfg_txrx_path)(struct rtw89_dev *rtwdev); void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx); + void (*digital_pwr_comp)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx); int (*pwr_on_func)(struct rtw89_dev *rtwdev); int (*pwr_off_func)(struct rtw89_dev *rtwdev); void (*query_rxdesc)(struct rtw89_dev *rtwdev, @@ -3671,6 +3739,7 @@ struct rtw89_scan_option { u16 slow_pd; u16 norm_cy; u8 opch_end; + u16 delay; u64 prohib_chan; enum rtw89_phy_idx band; enum rtw89_scan_be_operation operation; @@ -3909,16 +3978,22 @@ struct rtw89_txpwr_conf { const void *data; }; +static inline bool rtw89_txpwr_entcpy(void *entry, const void *cursor, u8 size, + const struct rtw89_txpwr_conf *conf) +{ + u8 valid_size = min(size, conf->ent_sz); + + memcpy(entry, cursor, valid_size); + return true; +} + #define rtw89_txpwr_conf_valid(conf) (!!(conf)->data) #define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \ - for (typecheck(const void *, cursor), (cursor) = (conf)->data, \ - memcpy(&(entry), cursor, \ - min_t(u8, sizeof(entry), (conf)->ent_sz)); \ + for (typecheck(const void *, cursor), (cursor) = (conf)->data; \ (cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \ - (cursor) += (conf)->ent_sz, \ - memcpy(&(entry), cursor, \ - min_t(u8, sizeof(entry), (conf)->ent_sz))) + (cursor) += (conf)->ent_sz) \ + if (rtw89_txpwr_entcpy(&(entry), cursor, sizeof(entry), conf)) struct rtw89_txpwr_byrate_data { struct rtw89_txpwr_conf conf; @@ -4066,6 +4141,11 @@ struct rtw89_rrsr_cfgs { struct rtw89_reg3_def rsc; }; +struct rtw89_rfkill_regs { + struct rtw89_reg3_def pinmux; + struct rtw89_reg3_def mode; +}; + struct rtw89_dig_regs { u32 seg0_pd_reg; u32 pd_lower_bound_mask; @@ -4166,6 +4246,7 @@ struct rtw89_chip_info { u8 wde_qempty_mgq_grpsel; u32 rf_base_addr[2]; u8 support_macid_num; + u8 support_link_num; u8 support_chanctx_num; u8 support_bands; u16 support_bandwidths; @@ -4174,6 +4255,7 @@ struct rtw89_chip_info { bool ul_tb_waveform_ctrl; bool ul_tb_pwr_diff; bool hw_sec_hdr; + bool hw_mgmt_tx_encrypt; u8 rf_path_num; u8 tx_nss; u8 rx_nss; @@ -4257,6 +4339,8 @@ struct rtw89_chip_info { const struct rtw89_rrsr_cfgs *rrsr_cfgs; struct rtw89_reg_def bss_clr_vld; u32 bss_clr_map_reg; + const struct rtw89_rfkill_regs *rfkill_init; + struct rtw89_reg_def rfkill_get; u32 dma_ch_mask; const struct rtw89_edcca_regs *edcca_regs; const struct wiphy_wowlan_support *wowlan_stub; @@ -4327,6 +4411,8 @@ struct rtw89_mac_info { /* see RTW89_FW_OFLD_WAIT_COND series for wait condition */ struct rtw89_wait_info fw_ofld_wait; + /* see RTW89_PS_WAIT_COND series for wait condition */ + struct rtw89_wait_info ps_wait; }; enum rtw89_fwdl_check_type { @@ -4356,7 +4442,9 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_NO_LPS_PG, RTW89_FW_FEATURE_BEACON_FILTER, RTW89_FW_FEATURE_MACID_PAUSE_SLEEP, + RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0, RTW89_FW_FEATURE_WOW_REASON_V1, + RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0, }; struct rtw89_fw_suit { @@ -4519,7 +4607,7 @@ struct rtw89_tas_info { }; struct rtw89_chanctx_cfg { - enum rtw89_sub_entity_idx idx; + enum rtw89_chanctx_idx idx; int ref_count; }; @@ -4544,7 +4632,7 @@ enum rtw89_entity_mode { RTW89_ENTITY_MODE_UNHANDLED = -ESRCH, }; -struct rtw89_sub_entity { +struct rtw89_chanctx { struct cfg80211_chan_def chandef; struct rtw89_chan chan; struct rtw89_chan_rcd rcd; @@ -4577,11 +4665,11 @@ struct rtw89_hal { bool ant_diversity_fixed; bool support_cckpd; bool support_igi; - atomic_t roc_entity_idx; + atomic_t roc_chanctx_idx; DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES); - DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY); - struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY]; + DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX); + struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX]; struct cfg80211_chan_def roc_chandef; bool entity_active; @@ -4615,6 +4703,7 @@ enum rtw89_flags { RTW89_FLAG_WOWLAN, RTW89_FLAG_FORBIDDEN_TRACK_WROK, RTW89_FLAG_CHANGING_INTERFACE, + RTW89_FLAG_HW_RFKILL_STATE, NUM_OF_RTW89_FLAGS, }; @@ -5293,6 +5382,13 @@ struct rtw89_wow_param { u8 gtk_alg; u8 ptk_keyidx; u8 akm; + + /* see RTW89_WOW_WAIT_COND series for wait condition */ + struct rtw89_wait_info wait; + + bool pno_inited; + struct list_head pno_pkt_list; + struct cfg80211_sched_scan_request *nd_config; }; struct rtw89_mcc_limit { @@ -5399,6 +5495,7 @@ struct rtw89_dev { const struct ieee80211_ops *ops; bool dbcc_en; + bool support_mlo; enum rtw89_mlo_dbcc_mode mlo_dbcc_mode; struct rtw89_hw_scan_info scan_info; const struct rtw89_chip_info *chip; @@ -5505,6 +5602,8 @@ struct rtw89_dev { struct napi_struct napi; int napi_budget_countdown; + struct rtw89_debugfs *debugfs; + /* HCI related data, keep last */ u8 priv[] __aligned(sizeof(void *)); }; @@ -6028,33 +6127,33 @@ void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev, static inline const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx) + enum rtw89_chanctx_idx idx) { struct rtw89_hal *hal = &rtwdev->hal; - enum rtw89_sub_entity_idx roc_idx = atomic_read(&hal->roc_entity_idx); + enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_chanctx_idx); if (roc_idx == idx) return &hal->roc_chandef; - return &hal->sub[idx].chandef; + return &hal->chanctx[idx].chandef; } static inline const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx) + enum rtw89_chanctx_idx idx) { struct rtw89_hal *hal = &rtwdev->hal; - return &hal->sub[idx].chan; + return &hal->chanctx[idx].chan; } static inline const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev, - enum rtw89_sub_entity_idx idx) + enum rtw89_chanctx_idx idx) { struct rtw89_hal *hal = &rtwdev->hal; - return &hal->sub[idx].rcd; + return &hal->chanctx[idx].rcd; } static inline @@ -6064,9 +6163,9 @@ const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev) struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); if (rtwvif) - return rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + return rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); else - return rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); } static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev) @@ -6140,29 +6239,32 @@ static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev) chip->ops->rfk_init_late(rtwdev); } -static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev) +static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) { const struct rtw89_chip_info *chip = rtwdev->chip; if (chip->ops->rfk_channel) - chip->ops->rfk_channel(rtwdev); + chip->ops->rfk_channel(rtwdev, rtwvif); } static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { const struct rtw89_chip_info *chip = rtwdev->chip; if (chip->ops->rfk_band_changed) - chip->ops->rfk_band_changed(rtwdev, phy_idx); + chip->ops->rfk_band_changed(rtwdev, phy_idx, chan); } -static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool start) { const struct rtw89_chip_info *chip = rtwdev->chip; if (chip->ops->rfk_scan) - chip->ops->rfk_scan(rtwdev, start); + chip->ops->rfk_scan(rtwdev, rtwvif, start); } static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev) @@ -6219,6 +6321,15 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev, chip->ops->query_ppdu(rtwdev, phy_ppdu, status); } +static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->convert_rpl_to_rssi) + chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu); +} + static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, enum rtw89_phy_idx phy_idx) { @@ -6250,6 +6361,15 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx); } +static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->digital_pwr_comp) + chip->ops->digital_pwr_comp(rtwdev, phy_idx); +} + static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl) { @@ -6503,6 +6623,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, struct cfg80211_tid_config *tid_config); +void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force); void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks); int rtw89_core_init(struct rtw89_dev *rtwdev); void rtw89_core_deinit(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 9e1353cce9ccd6..29f85210f91964 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -52,6 +52,27 @@ struct rtw89_debugfs_priv { }; }; +struct rtw89_debugfs { + struct rtw89_debugfs_priv read_reg; + struct rtw89_debugfs_priv write_reg; + struct rtw89_debugfs_priv read_rf; + struct rtw89_debugfs_priv write_rf; + struct rtw89_debugfs_priv rf_reg_dump; + struct rtw89_debugfs_priv txpwr_table; + struct rtw89_debugfs_priv mac_reg_dump; + struct rtw89_debugfs_priv mac_mem_dump; + struct rtw89_debugfs_priv mac_dbg_port_dump; + struct rtw89_debugfs_priv send_h2c; + struct rtw89_debugfs_priv early_h2c; + struct rtw89_debugfs_priv fw_crash; + struct rtw89_debugfs_priv btc_info; + struct rtw89_debugfs_priv btc_manual; + struct rtw89_debugfs_priv fw_log_manual; + struct rtw89_debugfs_priv phy_info; + struct rtw89_debugfs_priv stations; + struct rtw89_debugfs_priv disable_dm; +}; + static const u16 rtw89_rate_info_bw_to_mhz_map[] = { [RATE_INFO_BW_20] = 20, [RATE_INFO_BW_40] = 40, @@ -851,7 +872,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); - chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan); @@ -3463,9 +3484,9 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, return count; } -static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; @@ -3505,7 +3526,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) struct rtw89_hal *hal = &rtwdev->hal; u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num; bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity; - u8 evm_min, evm_max; + u8 evm_min, evm_max, evm_1ss; u8 rssi; u8 snr; int i; @@ -3574,7 +3595,8 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) } seq_puts(m, "]\n"); - seq_puts(m, "EVM: ["); + evm_1ss = ewma_evm_read(&rtwsta->evm_1ss); + seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25); for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) { evm_min = ewma_evm_read(&rtwsta->evm_min[i]); evm_max = ewma_evm_read(&rtwsta->evm_max[i]); @@ -3853,92 +3875,55 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf, return count; } -static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = { - .cb_read = rtw89_debug_priv_read_reg_get, - .cb_write = rtw89_debug_priv_read_reg_select, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = { - .cb_write = rtw89_debug_priv_write_reg_set, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = { - .cb_read = rtw89_debug_priv_read_rf_get, - .cb_write = rtw89_debug_priv_read_rf_select, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = { - .cb_write = rtw89_debug_priv_write_rf_set, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = { - .cb_read = rtw89_debug_priv_rf_reg_dump_get, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = { - .cb_read = rtw89_debug_priv_txpwr_table_get, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = { - .cb_read = rtw89_debug_priv_mac_reg_dump_get, - .cb_write = rtw89_debug_priv_mac_reg_dump_select, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = { - .cb_read = rtw89_debug_priv_mac_mem_dump_get, - .cb_write = rtw89_debug_priv_mac_mem_dump_select, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = { - .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get, - .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = { - .cb_write = rtw89_debug_priv_send_h2c_set, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = { - .cb_read = rtw89_debug_priv_early_h2c_get, - .cb_write = rtw89_debug_priv_early_h2c_set, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = { - .cb_read = rtw89_debug_priv_fw_crash_get, - .cb_write = rtw89_debug_priv_fw_crash_set, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = { - .cb_read = rtw89_debug_priv_btc_info_get, -}; - -static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = { - .cb_write = rtw89_debug_priv_btc_manual_set, -}; +#define rtw89_debug_priv_get(name) \ +{ \ + .cb_read = rtw89_debug_priv_ ##name## _get, \ +} -static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = { - .cb_write = rtw89_debug_fw_log_manual_set, -}; +#define rtw89_debug_priv_set(name) \ +{ \ + .cb_write = rtw89_debug_priv_ ##name## _set, \ +} -static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = { - .cb_read = rtw89_debug_priv_phy_info_get, -}; +#define rtw89_debug_priv_select_and_get(name) \ +{ \ + .cb_write = rtw89_debug_priv_ ##name## _select, \ + .cb_read = rtw89_debug_priv_ ##name## _get, \ +} -static struct rtw89_debugfs_priv rtw89_debug_priv_stations = { - .cb_read = rtw89_debug_priv_stations_get, -}; +#define rtw89_debug_priv_set_and_get(name) \ +{ \ + .cb_write = rtw89_debug_priv_ ##name## _set, \ + .cb_read = rtw89_debug_priv_ ##name## _get, \ +} -static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = { - .cb_read = rtw89_debug_priv_disable_dm_get, - .cb_write = rtw89_debug_priv_disable_dm_set, +static const struct rtw89_debugfs rtw89_debugfs_templ = { + .read_reg = rtw89_debug_priv_select_and_get(read_reg), + .write_reg = rtw89_debug_priv_set(write_reg), + .read_rf = rtw89_debug_priv_select_and_get(read_rf), + .write_rf = rtw89_debug_priv_set(write_rf), + .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump), + .txpwr_table = rtw89_debug_priv_get(txpwr_table), + .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump), + .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump), + .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump), + .send_h2c = rtw89_debug_priv_set(send_h2c), + .early_h2c = rtw89_debug_priv_set_and_get(early_h2c), + .fw_crash = rtw89_debug_priv_set_and_get(fw_crash), + .btc_info = rtw89_debug_priv_get(btc_info), + .btc_manual = rtw89_debug_priv_set(btc_manual), + .fw_log_manual = rtw89_debug_priv_set(fw_log_manual), + .phy_info = rtw89_debug_priv_get(phy_info), + .stations = rtw89_debug_priv_get(stations), + .disable_dm = rtw89_debug_priv_set_and_get(disable_dm), }; #define rtw89_debugfs_add(name, mode, fopname, parent) \ do { \ - rtw89_debug_priv_ ##name.rtwdev = rtwdev; \ - if (!debugfs_create_file(#name, mode, \ - parent, &rtw89_debug_priv_ ##name, \ - &file_ops_ ##fopname)) \ + struct rtw89_debugfs_priv *priv = &rtwdev->debugfs->name; \ + priv->rtwdev = rtwdev; \ + if (IS_ERR(debugfs_create_file(#name, mode, parent, priv, \ + &file_ops_ ##fopname))) \ pr_debug("Unable to initialize debugfs:%s\n", #name); \ } while (0) @@ -3949,13 +3934,9 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = { #define rtw89_debugfs_add_r(name) \ rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir) -void rtw89_debugfs_init(struct rtw89_dev *rtwdev) +static +void rtw89_debugfs_add_sec0(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir) { - struct dentry *debugfs_topdir; - - debugfs_topdir = debugfs_create_dir("rtw89", - rtwdev->hw->wiphy->debugfsdir); - rtw89_debugfs_add_rw(read_reg); rtw89_debugfs_add_w(write_reg); rtw89_debugfs_add_rw(read_rf); @@ -3965,6 +3946,11 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev) rtw89_debugfs_add_rw(mac_reg_dump); rtw89_debugfs_add_rw(mac_mem_dump); rtw89_debugfs_add_rw(mac_dbg_port_dump); +} + +static +void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir) +{ rtw89_debugfs_add_w(send_h2c); rtw89_debugfs_add_rw(early_h2c); rtw89_debugfs_add_rw(fw_crash); @@ -3975,6 +3961,27 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev) rtw89_debugfs_add_r(stations); rtw89_debugfs_add_rw(disable_dm); } + +void rtw89_debugfs_init(struct rtw89_dev *rtwdev) +{ + struct dentry *debugfs_topdir; + + rtwdev->debugfs = kmemdup(&rtw89_debugfs_templ, + sizeof(rtw89_debugfs_templ), GFP_KERNEL); + if (!rtwdev->debugfs) + return; + + debugfs_topdir = debugfs_create_dir("rtw89", + rtwdev->hw->wiphy->debugfsdir); + + rtw89_debugfs_add_sec0(rtwdev, debugfs_topdir); + rtw89_debugfs_add_sec1(rtwdev, debugfs_topdir); +} + +void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) +{ + kfree(rtwdev->debugfs); +} #endif #ifdef CONFIG_RTW89_DEBUGMSG diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h index 800ea59873a1d8..fc690f7c55dc7a 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.h +++ b/drivers/net/wireless/realtek/rtw89/debug.h @@ -49,8 +49,10 @@ enum rtw89_debug_mac_reg_sel { #ifdef CONFIG_RTW89_DEBUGFS void rtw89_debugfs_init(struct rtw89_dev *rtwdev); +void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev); #else static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {} +static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {} #endif #define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a) diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index fbe08c162b93de..d9b0e7ebe619a3 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -670,6 +670,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), @@ -679,8 +683,10 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -2491,7 +2497,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_h2c_lps_ch_info *h2c; u32 len = sizeof(*h2c); @@ -2810,7 +2816,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); struct sk_buff *skb; u8 pads[RTW89_PPE_BW_NUM]; u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; @@ -2943,9 +2949,9 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; struct rtw89_h2c_cctlinfo_ud_g7 *h2c; u8 pads[RTW89_PPE_BW_NUM]; @@ -3206,7 +3212,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct rtw89_h2c_bcn_upd *h2c; struct sk_buff *skb_beacon; @@ -3285,7 +3291,7 @@ EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct rtw89_h2c_bcn_upd_be *h2c; struct sk_buff *skb_beacon; @@ -4319,6 +4325,52 @@ int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) return ret; } +int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; + struct rtw89_h2c_cxrole_v7 *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + int ret; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + return -ENOMEM; + } + skb_put(skb, len); + h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; + + h2c->hdr.type = type; + h2c->hdr.ver = btc->ver->fwlrole; + h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; + memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); + h2c->_u32.role_map = cpu_to_le32(role->role_map); + h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); + h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); + h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); + h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); + h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} + int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) { struct rtw89_btc *btc = &rtwdev->btc; @@ -4337,6 +4389,7 @@ int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; h2c->hdr.type = type; + h2c->hdr.ver = btc->ver->fwlrole; h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); h2c->_u32.role_map = cpu_to_le32(role->role_map); @@ -4417,7 +4470,7 @@ int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { - rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); return -ENOMEM; } skb_put(skb, len); @@ -4802,16 +4855,20 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, return 0; } -int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, - struct rtw89_scan_option *option, - struct rtw89_vif *rtwvif) +#define RTW89_SCAN_DELAY_TSF_UNIT 104800 +int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, + struct rtw89_scan_option *option, + struct rtw89_vif *rtwvif, + bool wowlan) { struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; struct rtw89_chan *op = &rtwdev->scan_info.op_chan; + enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; struct rtw89_h2c_scanofld *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; unsigned int cond; + u64 tsf = 0; int ret; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); @@ -4822,6 +4879,17 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_scanofld *)skb->data; + if (option->delay) { + ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf); + if (ret) { + rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); + scan_mode = RTW89_SCAN_IMMEDIATE; + } else { + scan_mode = RTW89_SCAN_DELAY; + tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT; + } + } + h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | @@ -4830,9 +4898,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | le32_encode_bits(option->target_ch_mode, RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | - le32_encode_bits(RTW89_SCAN_IMMEDIATE, - RTW89_H2C_SCANOFLD_W1_START_MODE) | - le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); + le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | + le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); + + h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | + le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); if (option->target_ch_mode) { h2c->w1 |= le32_encode_bits(op->band_width, @@ -4845,6 +4915,11 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); } + h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), + RTW89_H2C_SCANOFLD_W3_TSF_HIGH); + h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), + RTW89_H2C_SCANOFLD_W4_TSF_LOW); + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, H2C_FUNC_SCANOFLD, 1, 1, @@ -4888,7 +4963,8 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, struct rtw89_scan_option *option, - struct rtw89_vif *rtwvif) + struct rtw89_vif *rtwvif, + bool wowlan) { struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; @@ -4902,6 +4978,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; u8 opch_size = sizeof(*opch) * option->num_opch; u8 probe_id[NUM_NL80211_BANDS]; + u8 cfg_len = sizeof(*h2c); unsigned int cond; void *ptr; int ret; @@ -4910,7 +4987,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, rtw89_scan_get_6g_disabled_chan(rtwdev, option); - len = sizeof(*h2c) + macc_role_size + opch_size; + len = cfg_len + macc_role_size + opch_size; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); @@ -4923,11 +5000,13 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); - list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { - if (pkt_info->wildcard_6ghz) { - /* Provide wildcard as template */ - probe_id[NL80211_BAND_6GHZ] = pkt_info->id; - break; + if (!wowlan) { + list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { + if (pkt_info->wildcard_6ghz) { + /* Provide wildcard as template */ + probe_id[NL80211_BAND_6GHZ] = pkt_info->id; + break; + } } } @@ -4958,7 +5037,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | le32_encode_bits(probe_id[NL80211_BAND_6GHZ], RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | - le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); + le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); @@ -4966,7 +5045,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); - if (req->no_cck) { + if (!wowlan && req->no_cck) { h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | @@ -4975,10 +5054,24 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, le32_encode_bits(RTW89_HW_RATE_OFDM6, RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); } - ptr += sizeof(*h2c); + + if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { + cfg_len = offsetofend(typeof(*h2c), w8); + goto flex_member; + } + + h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), + RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | + le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), + RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | + le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), + RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); + +flex_member: + ptr += cfg_len; for (i = 0; i < option->num_macc_role; i++) { - macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i]; + macc_role = ptr; macc_role->w0 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | @@ -5132,14 +5225,21 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; struct rtw89_fw_h2c_rfk_pre_info *h2c; u8 tbl_sel = rfk_mcc->table_idx; u32 len = sizeof(*h2c); struct sk_buff *skb; + u8 ver = U8_MAX; u8 tbl, path; u32 val32; int ret; + if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { + len = sizeof(*h2c_v0); + ver = 0; + } + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); @@ -5148,41 +5248,53 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; - h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); + h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { - h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]); - h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]); + h2c->common.dbcc.ch[path][tbl] = + cpu_to_le32(rfk_mcc->ch[tbl]); + h2c->common.dbcc.band[path][tbl] = + cpu_to_le32(rfk_mcc->band[tbl]); } } for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { - h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); - h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); + h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); + h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); } - h2c->phy_idx = cpu_to_le32(phy_idx); - h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); - h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); - h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); - - val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); - h2c->ktbl_sel0 = cpu_to_le32(val32); - val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); - h2c->ktbl_sel1 = cpu_to_le32(val32); - val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); - h2c->rfmod0 = cpu_to_le32(val32); - val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); - h2c->rfmod1 = cpu_to_le32(val32); + h2c->common.phy_idx = cpu_to_le32(phy_idx); - if (rtw89_is_mlo_1_1(rtwdev)) - h2c->mlo_1_1 = cpu_to_le32(1); + if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ + h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; + + h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); + h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); + h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); + + val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); + h2c_v0->ktbl_sel0 = cpu_to_le32(val32); + val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); + h2c_v0->ktbl_sel1 = cpu_to_le32(val32); + val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + h2c_v0->rfmod0 = cpu_to_le32(val32); + val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); + h2c_v0->rfmod1 = cpu_to_le32(val32); + + if (rtw89_is_mlo_1_1(rtwdev)) + h2c_v0->mlo_1_1 = cpu_to_le32(1); + + h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); - h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); + goto done; + } + if (rtw89_is_mlo_1_1(rtwdev)) + h2c->mlo_1_1 = cpu_to_le32(1); +done: rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, @@ -5202,10 +5314,8 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, } int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - enum rtw89_tssi_mode tssi_mode) + const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - RTW89_SUB_ENTITY_0); struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_h2c_rf_tssi *h2c; u32 len = sizeof(*h2c); @@ -5249,7 +5359,8 @@ int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, return ret; } -int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { struct rtw89_h2c_rf_iqk *h2c; u32 len = sizeof(*h2c); @@ -5284,10 +5395,9 @@ int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return ret; } -int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - RTW89_SUB_ENTITY_0); struct rtw89_h2c_rf_dpk *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; @@ -5327,10 +5437,9 @@ int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return ret; } -int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - RTW89_SUB_ENTITY_0); struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_h2c_rf_txgapk *h2c; u32 len = sizeof(*h2c); @@ -5371,7 +5480,8 @@ int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return ret; } -int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { struct rtw89_h2c_rf_dack *h2c; u32 len = sizeof(*h2c); @@ -5407,10 +5517,9 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return ret; } -int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - RTW89_SUB_ENTITY_0); struct rtw89_h2c_rf_rxdck *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; @@ -5926,6 +6035,56 @@ static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, return ret; } +static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, + int chan_type, int ssid_num, + struct rtw89_mac_chinfo *ch_info) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_pktofld_info *info; + u8 probe_count = 0; + + ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; + ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; + ch_info->bw = RTW89_SCAN_WIDTH; + ch_info->tx_pkt = true; + ch_info->cfg_tx_pwr = false; + ch_info->tx_pwr_idx = 0; + ch_info->tx_null = false; + ch_info->pause_data = false; + ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; + + if (ssid_num) { + list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { + if (info->channel_6ghz && + ch_info->pri_ch != info->channel_6ghz) + continue; + else if (info->channel_6ghz && probe_count != 0) + ch_info->period += RTW89_CHANNEL_TIME_6G; + + if (info->wildcard_6ghz) + continue; + + ch_info->pkt_id[probe_count++] = info->id; + if (probe_count >= RTW89_SCANOFLD_MAX_SSID) + break; + } + ch_info->num_pkt = probe_count; + } + + switch (chan_type) { + case RTW89_CHAN_DFS: + if (ch_info->ch_band != RTW89_BAND_6G) + ch_info->period = max_t(u8, ch_info->period, + RTW89_DFS_CHAN_TIME); + ch_info->dwell_time = RTW89_DWELL_TIME; + break; + case RTW89_CHAN_ACTIVE: + break; + default: + rtw89_err(rtwdev, "Channel type out of bound\n"); + } +} + static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, int ssid_num, struct rtw89_mac_chinfo *ch_info) @@ -6004,6 +6163,45 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, } } +static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, + int ssid_num, + struct rtw89_mac_chinfo_be *ch_info) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_pktofld_info *info; + u8 probe_count = 0, i; + + ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; + ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; + ch_info->bw = RTW89_SCAN_WIDTH; + ch_info->tx_null = false; + ch_info->pause_data = false; + ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; + + if (ssid_num) { + list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { + ch_info->pkt_id[probe_count++] = info->id; + if (probe_count >= RTW89_SCANOFLD_MAX_SSID) + break; + } + } + + for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) + ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; + + switch (chan_type) { + case RTW89_CHAN_DFS: + ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); + ch_info->dwell_time = RTW89_DWELL_TIME; + break; + case RTW89_CHAN_ACTIVE: + break; + default: + rtw89_warn(rtwdev, "Channel type out of bound\n"); + break; + } +} + static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, int ssid_num, struct rtw89_mac_chinfo_be *ch_info) @@ -6066,8 +6264,58 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, } } -int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool connected) +int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; + struct rtw89_mac_chinfo *ch_info, *tmp; + struct ieee80211_channel *channel; + struct list_head chan_list; + int list_len; + enum rtw89_chan_type type; + int ret = 0; + u32 idx; + + INIT_LIST_HEAD(&chan_list); + for (idx = 0, list_len = 0; + idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; + idx++, list_len++) { + channel = nd_config->channels[idx]; + ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); + if (!ch_info) { + ret = -ENOMEM; + goto out; + } + + ch_info->period = RTW89_CHANNEL_TIME; + ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); + ch_info->central_ch = channel->hw_value; + ch_info->pri_ch = channel->hw_value; + ch_info->is_psc = cfg80211_channel_is_psc(channel); + + if (channel->flags & + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) + type = RTW89_CHAN_DFS; + else + type = RTW89_CHAN_ACTIVE; + + rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); + list_add_tail(&ch_info->list, &chan_list); + } + ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); + +out: + list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } + + return ret; +} + +int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool connected) { struct cfg80211_scan_request *req = rtwvif->scan_req; struct rtw89_mac_chinfo *ch_info, *tmp; @@ -6143,6 +6391,58 @@ int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, return ret; } +int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; + struct rtw89_mac_chinfo_be *ch_info, *tmp; + struct ieee80211_channel *channel; + struct list_head chan_list; + enum rtw89_chan_type type; + int list_len, ret; + u32 idx; + + INIT_LIST_HEAD(&chan_list); + + for (idx = 0, list_len = 0; + idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; + idx++, list_len++) { + channel = nd_config->channels[idx]; + ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); + if (!ch_info) { + ret = -ENOMEM; + goto out; + } + + ch_info->period = RTW89_CHANNEL_TIME; + ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); + ch_info->central_ch = channel->hw_value; + ch_info->pri_ch = channel->hw_value; + ch_info->is_psc = cfg80211_channel_is_psc(channel); + + if (channel->flags & + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) + type = RTW89_CHAN_DFS; + else + type = RTW89_CHAN_ACTIVE; + + rtw89_pno_scan_add_chan_be(rtwdev, type, + nd_config->n_match_sets, ch_info); + list_add_tail(&ch_info->list, &chan_list); + } + + ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); + +out: + list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } + + return ret; +} + int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool connected) { @@ -6352,7 +6652,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; } - ret = mac->scan_offload(rtwdev, &opt, rtwvif); + ret = mac->scan_offload(rtwdev, &opt, rtwvif, false); out: return ret; } @@ -6602,6 +6902,57 @@ int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, return ret; } +int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool enable) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; + struct rtw89_h2c_cfg_nlo *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + int ret, i; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); + return -ENOMEM; + } + + skb_put(skb, len); + h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; + + h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | + le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | + le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID); + + if (enable) { + h2c->nlo_cnt = nd_config->n_match_sets; + for (i = 0 ; i < nd_config->n_match_sets; i++) { + h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; + memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid_len); + } + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_WOW, + H2C_FUNC_NLO, 0, 1, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; + +fail: + dev_kfree_skb_any(skb); + return ret; +} + int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable) { @@ -6816,20 +7167,46 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, goto fail; } return 0; - fail: dev_kfree_skb_any(skb); return ret; } +int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool enable) +{ + struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; + struct rtw89_h2c_fwips *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); + return -ENOMEM; + } + skb_put(skb, len); + h2c = (struct rtw89_h2c_fwips *)skb->data; + + h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | + le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_PS, + H2C_FUNC_IPS_CFG, 0, 1, + len); + + return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); +} + int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) { - struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_wait_info *wait = &rtwdev->wow.wait; struct rtw89_h2c_wow_aoac *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; - unsigned int cond; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { @@ -6848,8 +7225,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) H2C_FUNC_AOAC_REPORT_REQ, 1, 0, len); - cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); - return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); + return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); } /* Return < 0, if failures happen during waiting for the condition. @@ -7341,7 +7717,7 @@ int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); } -int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx) +int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) { struct rtw89_wait_info *wait = &rtwdev->mcc.wait; struct rtw89_h2c_mrc_del *h2c; @@ -7358,7 +7734,8 @@ int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx) skb_put(skb, len); h2c = (struct rtw89_h2c_mrc_del *)skb->data; - h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX); + h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | + le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index c3b4324c621c16..ad47e77d740b25 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -1898,6 +1898,24 @@ struct rtw89_h2c_wow_global { #define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16) #define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24) +#define RTW89_MAX_SUPPORT_NL_NUM 16 +struct rtw89_h2c_cfg_nlo { + __le32 w0; + u8 nlo_cnt; + u8 rsvd[3]; + __le32 patterncheck; + __le32 rsvd1; + __le32 rsvd2; + u8 ssid_len[RTW89_MAX_SUPPORT_NL_NUM]; + u8 chiper[RTW89_MAX_SUPPORT_NL_NUM]; + u8 rsvd3[24]; + u8 ssid[RTW89_MAX_SUPPORT_NL_NUM][IEEE80211_MAX_SSID_LEN]; +} __packed; + +#define RTW89_H2C_NLO_W0_ENABLE BIT(0) +#define RTW89_H2C_NLO_W0_IGNORE_CIPHER BIT(2) +#define RTW89_H2C_NLO_W0_MACID GENMASK(31, 24) + static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val) { le32p_replace_bits((__le32 *)h2c, val, BIT(0)); @@ -2089,10 +2107,15 @@ enum rtw89_btc_cxdrvinfo { enum rtw89_scan_mode { RTW89_SCAN_IMMEDIATE, + RTW89_SCAN_DELAY, }; enum rtw89_scan_type { RTW89_SCAN_ONCE, + RTW89_SCAN_NORMAL, + RTW89_SCAN_NORMAL_SLOW, + RTW89_SCAN_SEAMLESS, + RTW89_SCAN_MAX, }; static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val) @@ -2124,6 +2147,30 @@ struct rtw89_h2c_cxctrl_v7 { #define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr) #define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7) +struct rtw89_btc_wl_role_info_v7_u8 { + u8 connect_cnt; + u8 link_mode; + u8 link_mode_chg; + u8 p2p_2g; + + struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER]; +} __packed; + +struct rtw89_btc_wl_role_info_v7_u32 { + __le32 role_map; + __le32 mrole_type; + __le32 mrole_noa_duration; + __le32 dbcc_en; + __le32 dbcc_chg; + __le32 dbcc_2g_phy; +} __packed; + +struct rtw89_h2c_cxrole_v7 { + struct rtw89_h2c_cxhdr_v7 hdr; + struct rtw89_btc_wl_role_info_v7_u8 _u8; + struct rtw89_btc_wl_role_info_v7_u32 _u32; +} __packed; + struct rtw89_btc_wl_role_info_v8_u8 { u8 connect_cnt; u8 link_mode; @@ -2145,7 +2192,7 @@ struct rtw89_btc_wl_role_info_v8_u32 { } __packed; struct rtw89_h2c_cxrole_v8 { - struct rtw89_h2c_cxhdr hdr; + struct rtw89_h2c_cxhdr_v7 hdr; struct rtw89_btc_wl_role_info_v8_u8 _u8; struct rtw89_btc_wl_role_info_v8_u32 _u32; } __packed; @@ -2664,6 +2711,8 @@ struct rtw89_h2c_scanofld { #define RTW89_H2C_SCANOFLD_W1_PROBE_REQ_PKT_ID GENMASK(31, 24) #define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0) #define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16) +#define RTW89_H2C_SCANOFLD_W3_TSF_HIGH GENMASK(31, 0) +#define RTW89_H2C_SCANOFLD_W4_TSF_LOW GENMASK(31, 0) struct rtw89_h2c_scanofld_be_macc_role { __le32 w0; @@ -2711,7 +2760,9 @@ struct rtw89_h2c_scanofld_be { __le32 w6; __le32 w7; __le32 w8; - struct rtw89_h2c_scanofld_be_macc_role role[]; + __le32 w9; /* Added after SCAN_OFFLOAD_BE_V1 */ + /* struct rtw89_h2c_scanofld_be_macc_role (flexible number) */ + /* struct rtw89_h2c_scanofld_be_opch (flexible number) */ } __packed; #define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0) @@ -2742,6 +2793,16 @@ struct rtw89_h2c_scanofld_be { #define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0) #define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8) #define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16) +#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG GENMASK(7, 0) +#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC GENMASK(15, 8) +#define RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP GENMASK(23, 16) + +struct rtw89_h2c_fwips { + __le32 w0; +} __packed; + +#define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0) +#define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8) static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val) { @@ -3741,17 +3802,28 @@ enum rtw89_fw_element_id { RTW89_FW_ELEMENT_ID_NUM, }; -#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \ +#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ \ (BIT(RTW89_FW_ELEMENT_ID_TXPWR_BYRATE) | \ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ) | \ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ) | \ - BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ) | \ BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ) | \ - BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ) | \ BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT) | \ BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU)) +#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \ + (BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ)) + +#define RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ \ + (BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \ + BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_TRK) | \ + BITS_OF_RTW89_TXPWR_FW_ELEMENTS_NO_6GHZ) + #define RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS (BIT(RTW89_FW_ELEMENT_ID_BBMCU0) | \ BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \ BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \ @@ -3935,6 +4007,7 @@ enum rtw89_wow_h2c_func { H2C_FUNC_WOW_GLOBAL = 0x2, H2C_FUNC_GTK_OFLD = 0x3, H2C_FUNC_ARP_OFLD = 0x4, + H2C_FUNC_NLO = 0x7, H2C_FUNC_WAKEUP_CTRL = 0x8, H2C_FUNC_WOW_CAM_UPD = 0xC, H2C_FUNC_AOAC_REPORT_REQ = 0xD, @@ -3942,13 +4015,27 @@ enum rtw89_wow_h2c_func { NUM_OF_RTW89_WOW_H2C_FUNC, }; -#define RTW89_WOW_WAIT_COND(func) \ - (NUM_OF_RTW89_WOW_H2C_FUNC + (func)) +#define RTW89_WOW_WAIT_COND(tag, func) \ + ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func)) + +#define RTW89_WOW_WAIT_COND_AOAC \ + RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ) /* CLASS 2 - PS */ #define H2C_CL_MAC_PS 0x2 -#define H2C_FUNC_MAC_LPS_PARM 0x0 -#define H2C_FUNC_P2P_ACT 0x1 +enum rtw89_ps_h2c_func { + H2C_FUNC_MAC_LPS_PARM = 0x0, + H2C_FUNC_P2P_ACT = 0x1, + H2C_FUNC_IPS_CFG = 0x3, + + NUM_OF_RTW89_PS_H2C_FUNC, +}; + +#define RTW89_PS_WAIT_COND(tag, func) \ + ((tag) * NUM_OF_RTW89_PS_H2C_FUNC + (func)) + +#define RTW89_PS_WAIT_COND_IPS_CFG \ + RTW89_PS_WAIT_COND(0 /* don't care */, H2C_FUNC_IPS_CFG) /* CLASS 3 - FW download */ #define H2C_CL_MAC_FWDL 0x3 @@ -4095,7 +4182,7 @@ struct rtw89_fw_h2c_rf_get_mccch { #define NUM_OF_RTW89_FW_RFK_PATH 2 #define NUM_OF_RTW89_FW_RFK_TBL 3 -struct rtw89_fw_h2c_rfk_pre_info { +struct rtw89_fw_h2c_rfk_pre_info_common { struct { __le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL]; __le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL]; @@ -4108,6 +4195,11 @@ struct rtw89_fw_h2c_rfk_pre_info { } __packed tbl; __le32 phy_idx; +} __packed; + +struct rtw89_fw_h2c_rfk_pre_info_v0 { + struct rtw89_fw_h2c_rfk_pre_info_common common; + __le32 cur_band; __le32 cur_bw; __le32 cur_center_ch; @@ -4127,6 +4219,11 @@ struct rtw89_fw_h2c_rfk_pre_info { } __packed mlo; } __packed; +struct rtw89_fw_h2c_rfk_pre_info { + struct rtw89_fw_h2c_rfk_pre_info_common common; + __le32 mlo_1_1; +} __packed; + struct rtw89_h2c_rf_tssi { __le16 len; u8 phy; @@ -4366,6 +4463,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type); +int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type); @@ -4378,12 +4476,14 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, struct list_head *chan_list); int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, struct list_head *chan_list); -int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, - struct rtw89_scan_option *opt, - struct rtw89_vif *vif); +int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, + struct rtw89_scan_option *opt, + struct rtw89_vif *vif, + bool wowlan); int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, struct rtw89_scan_option *opt, - struct rtw89_vif *vif); + struct rtw89_vif *vif, + bool wowlan); int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, struct rtw89_fw_h2c_rf_reg_info *info, u16 len, u8 page); @@ -4391,12 +4491,17 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev); int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - enum rtw89_tssi_mode tssi_mode); -int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode); +int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); +int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); +int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); +int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); +int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan); int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, bool rack, bool dack); @@ -4420,6 +4525,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, struct rtw89_lps_parm *lps_param); int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); +int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool enable); struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len); struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len); int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, @@ -4434,10 +4541,14 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool enable); void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); -int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool connected); +int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool connected); +int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif); int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool connected); +int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif); int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev); int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, const struct rtw89_pkt_drop_params *params); @@ -4450,6 +4561,8 @@ int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); +int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool enable); int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, @@ -4488,7 +4601,7 @@ int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, const struct rtw89_fw_mrc_add_arg *arg); int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, const struct rtw89_fw_mrc_start_arg *arg); -int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx); +int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx); int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, const struct rtw89_fw_mrc_req_tsf_arg *arg, struct rtw89_mac_mrc_tsf_rpt *rpt); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index e2399796aeb1e0..c70a23a763b0ee 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1625,6 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,}, /* 8852C PCIE SCC */ .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,}, + .wde_size23 = {RTW89_WDE_PG_64, 1022, 2,}, /* PCIE */ .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,}, .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,}, @@ -1635,6 +1636,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .ple_size6 = {RTW89_PLE_PG_128, 496, 16,}, /* DLFW */ .ple_size8 = {RTW89_PLE_PG_128, 64, 960,}, + .ple_size9 = {RTW89_PLE_PG_128, 2288, 16,}, /* 8852C DLFW */ .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,}, /* 8852C PCIE SCC */ @@ -1652,6 +1654,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_qt17 = {0, 0, 0, 0,}, /* 8852C PCIE SCC */ .wde_qt18 = {3228, 60, 0, 40,}, + .wde_qt23 = {958, 48, 0, 16,}, .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,}, .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,}, /* PCIE SCC */ @@ -1671,12 +1674,16 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,}, /* 8852C PCIE SCC */ .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,}, + .ple_qt57 = {147, 0, 16, 20, 13, 13, 178, 0, 32, 14, 8, 0,}, /* PCIE 64 */ .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,}, + .ple_qt59 = {147, 0, 32, 20, 1860, 13, 2025, 0, 1879, 14, 24, 0,}, /* 8852A PCIE WOW */ .ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,}, /* 8852B PCIE WOW */ .ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, + /* 8852BT PCIE WOW */ + .ple_qt_52bt_wow = {147, 0, 32, 20, 1860, 13, 1929, 0, 1879, 14, 24, 0,}, /* 8851B PCIE WOW */ .ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,}, @@ -2025,11 +2032,16 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow) void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable) { + const struct rtw89_chip_info *chip = rtwdev->chip; u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC; if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) return; + /* 8852C enable B_AX_UC_MGNT_DEC by default */ + if (chip->chip_id == RTL8852C) + msk32 = B_AX_BMC_MGNT_DEC; + if (enable) rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32); else @@ -2254,6 +2266,8 @@ static int sec_eng_init_ax(struct rtw89_dev *rtwdev) /* init TX encryption */ val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); val |= (B_AX_MC_DEC | B_AX_BC_DEC); + if (chip->chip_id == RTL8852C) + val |= B_AX_UC_MGNT_DEC; if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) val &= ~B_AX_TX_PARTIAL_MODE; @@ -2728,6 +2742,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val, reg; int ret; @@ -2766,6 +2781,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); } + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx); + rtw89_write32_mask(rtwdev, reg, + B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80); + } + return 0; } @@ -3781,7 +3802,7 @@ static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason, rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); - if (rtwdev->chip->chip_id == RTL8852B) + if (rtw89_is_rtl885xb(rtwdev)) rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL, B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2); @@ -4774,14 +4795,14 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, case RTW89_SCAN_ENTER_OP_NOTIFY: case RTW89_SCAN_ENTER_CH_NOTIFY: if (rtw89_is_op_chan(rtwdev, band, chan)) { - rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx, + rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx, &rtwdev->scan_info.op_chan); rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); ieee80211_wake_queues(rtwdev->hw); } else { rtw89_chan_create(&new, chan, chan, band, RTW89_CHANNEL_WIDTH_20); - rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx, + rtw89_assign_entity_chan(rtwdev, rtwvif->chanctx_idx, &new); } break; @@ -4866,6 +4887,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le { /* N.B. This will run in interrupt context. */ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait; const struct rtw89_c2h_done_ack *c2h = (const struct rtw89_c2h_done_ack *)skb_c2h->data; u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT); @@ -4886,6 +4908,18 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le switch (h2c_class) { default: return; + case H2C_CL_MAC_PS: + switch (h2c_func) { + default: + return; + case H2C_FUNC_IPS_CFG: + cond = RTW89_PS_WAIT_COND_IPS_CFG; + break; + } + + data.err = !!h2c_return; + rtw89_complete_cond(ps_wait, cond, &data); + return; case H2C_CL_MAC_FW_OFLD: switch (h2c_func) { default: @@ -5144,11 +5178,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; - struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_wait_info *wait = &rtw_wow->wait; const struct rtw89_c2h_wow_aoac_report *c2h = (const struct rtw89_c2h_wow_aoac_report *)skb->data; struct rtw89_completion_data data = {}; - unsigned int cond; aoac_rpt->rpt_ver = c2h->rpt_ver; aoac_rpt->sec_type = c2h->sec_type; @@ -5166,8 +5199,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn); memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk)); - cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); - rtw89_complete_cond(wait, cond, &data); + rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data); } static void @@ -6513,8 +6545,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .is_txq_empty = mac_is_txq_empty_ax, - .add_chan_list = rtw89_hw_scan_add_chan_list, - .scan_offload = rtw89_fw_h2c_scan_offload, + .add_chan_list = rtw89_hw_scan_add_chan_list_ax, + .add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax, + .scan_offload = rtw89_fw_h2c_scan_offload_ax, .wow_config_mac = rtw89_wow_config_mac_ax, }; diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index d5895516b3ed52..67c2a45071244d 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func { enum rtw89_mac_c2h_wow_func { RTW89_MAC_C2H_FUNC_AOAC_REPORT, - RTW89_MAC_C2H_FUNC_READ_WOW_CAM, NUM_OF_RTW89_MAC_C2H_FUNC_WOW, }; @@ -885,12 +884,14 @@ struct rtw89_mac_size_set { const struct rtw89_dle_size wde_size9; const struct rtw89_dle_size wde_size18; const struct rtw89_dle_size wde_size19; + const struct rtw89_dle_size wde_size23; const struct rtw89_dle_size ple_size0; const struct rtw89_dle_size ple_size0_v1; const struct rtw89_dle_size ple_size3_v1; const struct rtw89_dle_size ple_size4; const struct rtw89_dle_size ple_size6; const struct rtw89_dle_size ple_size8; + const struct rtw89_dle_size ple_size9; const struct rtw89_dle_size ple_size18; const struct rtw89_dle_size ple_size19; const struct rtw89_wde_quota wde_qt0; @@ -900,6 +901,7 @@ struct rtw89_mac_size_set { const struct rtw89_wde_quota wde_qt7; const struct rtw89_wde_quota wde_qt17; const struct rtw89_wde_quota wde_qt18; + const struct rtw89_wde_quota wde_qt23; const struct rtw89_ple_quota ple_qt0; const struct rtw89_ple_quota ple_qt1; const struct rtw89_ple_quota ple_qt4; @@ -911,9 +913,12 @@ struct rtw89_mac_size_set { const struct rtw89_ple_quota ple_qt45; const struct rtw89_ple_quota ple_qt46; const struct rtw89_ple_quota ple_qt47; + const struct rtw89_ple_quota ple_qt57; const struct rtw89_ple_quota ple_qt58; + const struct rtw89_ple_quota ple_qt59; const struct rtw89_ple_quota ple_qt_52a_wow; const struct rtw89_ple_quota ple_qt_52b_wow; + const struct rtw89_ple_quota ple_qt_52bt_wow; const struct rtw89_ple_quota ple_qt_51b_wow; const struct rtw89_rsvd_quota ple_rsvd_qt0; const struct rtw89_rsvd_quota ple_rsvd_qt1; @@ -1000,9 +1005,12 @@ struct rtw89_mac_gen_def { int (*add_chan_list)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool connected); + int (*add_chan_list_pno)(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif); int (*scan_offload)(struct rtw89_dev *rtwdev, struct rtw89_scan_option *option, - struct rtw89_vif *rtwvif); + struct rtw89_vif *rtwvif, + bool wowlan); int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow); }; diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 1508693032cb20..48ad0d0f76bff4 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -90,7 +90,7 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed) rtw89_leave_ips(rtwdev); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, + rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &hw->conf.chandef); rtw89_set_channel(rtwdev); } @@ -126,7 +126,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtwvif->rtwdev = rtwdev; rtwvif->roc.state = RTW89_ROC_IDLE; rtwvif->offchan = false; - list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) + list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work); INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work); rtw89_leave_ps_mode(rtwdev); @@ -144,7 +146,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtwvif->bcn_hit_cond = 0; rtwvif->mac_idx = RTW89_MAC_0; rtwvif->phy_idx = RTW89_PHY_0; - rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_idx = RTW89_CHANCTX_0; rtwvif->chanctx_assigned = false; rtwvif->hit_rule = 0; rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; @@ -313,7 +315,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); u8 slot_time; u8 sifs; @@ -503,7 +505,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); - chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); if (chan->band_type == RTW89_BAND_6G) { mutex_unlock(&rtwdev->mutex); return -EOPNOTSUPP; @@ -519,7 +521,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE); rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true); rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); - rtw89_chip_rfk_channel(rtwdev); + rtw89_chip_rfk_channel(rtwdev, rtwvif); rtw89_queue_chanctx_work(rtwdev); mutex_unlock(&rtwdev->mutex); @@ -783,7 +785,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta rtwsta->use_cfg_mask = true; rtwsta->mask = *br_data->mask; - rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); + rtw89_phy_ra_update_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); } static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev, @@ -925,7 +927,7 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - rtw89_phy_ra_updata_sta(rtwdev, sta, changed); + rtw89_phy_ra_update_sta(rtwdev, sta, changed); } static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw, @@ -1147,6 +1149,22 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw, } #endif +static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw) +{ + struct rtw89_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + + /* wl_disable GPIO get floating when entering LPS */ + if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) + goto out; + + rtw89_core_rfkill_poll(rtwdev, false); + +out: + mutex_unlock(&rtwdev->mutex); +} + const struct ieee80211_ops rtw89_ops = { .tx = rtw89_ops_tx, .wake_tx_queue = rtw89_ops_wake_tx_queue, @@ -1193,5 +1211,6 @@ const struct ieee80211_ops rtw89_ops = { .set_wakeup = rtw89_ops_set_wakeup, .set_rekey_data = rtw89_set_rekey_data, #endif + .rfkill_poll = rtw89_ops_rfkill_poll, }; EXPORT_SYMBOL(rtw89_ops); diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index f212b67771d50d..31f0a5225b115e 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -2599,6 +2599,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = { .is_txq_empty = mac_is_txq_empty_be, .add_chan_list = rtw89_hw_scan_add_chan_list_be, + .add_chan_list_pno = rtw89_pno_scan_add_chan_list_be, .scan_offload = rtw89_fw_h2c_scan_offload_be, .wow_config_mac = rtw89_wow_config_mac_be, diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index ad11d1414874ab..c7165e757842be 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -302,7 +302,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; struct rtw89_ra_info *ra = &rtwsta->ra; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif); const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi); @@ -341,8 +341,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, mode |= RTW89_RA_MODE_VHT; csi_mode = RTW89_RA_RPT_MODE_VHT; - /* MCS9, MCS8, MCS7 */ - ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); + /* MCS9 (non-20MHz), MCS8, MCS7 */ + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1); + else + ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); high_rate_masks = rtw89_ra_mask_vht_rates; if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = 1; @@ -353,8 +356,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, csi_mode = RTW89_RA_RPT_MODE_HT; ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) | ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) | - (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) | - (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); + ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) | + ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12); high_rate_masks = rtw89_ra_mask_ht_rates; if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = 1; @@ -462,7 +465,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra->csi_mode = csi_mode; } -void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, +void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, u32 changed) { struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; @@ -528,7 +531,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_phy_rate_pattern next_pattern = {0}; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), @@ -610,17 +613,17 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); } -static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta) +static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta) { struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; - rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); + rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); } void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) { ieee80211_iterate_stations_atomic(rtwdev->hw, - rtw89_phy_ra_updata_sta_iter, + rtw89_phy_ra_update_sta_iter, rtwdev); } @@ -3081,6 +3084,7 @@ EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait); int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode, unsigned int ms) { @@ -3088,7 +3092,7 @@ int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode); + ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode); if (ret) return ret; @@ -3098,13 +3102,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait); int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms) { int ret; rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx); + ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan); if (ret) return ret; @@ -3114,13 +3119,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait); int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms) { int ret; rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx); + ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan); if (ret) return ret; @@ -3130,13 +3136,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait); int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms) { int ret; rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx); + ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan); if (ret) return ret; @@ -3146,13 +3153,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait); int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms) { int ret; rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx); + ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan); if (ret) return ret; @@ -3162,13 +3170,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait); int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms) { int ret; rtw89_phy_rfk_report_prep(rtwdev); - ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx); + ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan); if (ret) return ret; @@ -4285,7 +4294,7 @@ void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, - rtwvif->sub_entity_idx); + rtwvif->chanctx_idx); struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; if (!chip->ul_tb_waveform_ctrl) @@ -5367,7 +5376,7 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) { struct rtw89_dig_info *dig = &rtwdev->dig; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); bool is_linked = rtwdev->total_sta_assoc > 0; const u16 *fa_th_src = NULL; @@ -5392,7 +5401,7 @@ static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); } -static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20; +static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20; static const u8 igi_max_performance_mode = 0x5a; static const u8 dynamic_pd_threshold_max; @@ -5611,7 +5620,7 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, bool enable) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; enum rtw89_bandwidth cbw = chan->band_width; struct rtw89_dig_info *dig = &rtwdev->dig; @@ -5690,38 +5699,47 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) } #define IGI_RSSI_MIN 10 +#define ABS_IGI_MIN 0xc void rtw89_phy_dig(struct rtw89_dev *rtwdev) { struct rtw89_dig_info *dig = &rtwdev->dig; bool is_linked = rtwdev->total_sta_assoc > 0; + u8 igi_min; if (unlikely(dig->bypass_dig)) { dig->bypass_dig = false; return; } + rtw89_phy_dig_update_rssi_info(rtwdev); + if (!dig->is_linked_pre && is_linked) { rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); rtw89_phy_dig_update_para(rtwdev); + dig->igi_fa_rssi = dig->igi_rssi; } else if (dig->is_linked_pre && !is_linked) { rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); rtw89_phy_dig_update_para(rtwdev); + dig->igi_fa_rssi = dig->igi_rssi; } dig->is_linked_pre = is_linked; rtw89_phy_dig_igi_offset_by_env(rtwdev); - rtw89_phy_dig_update_rssi_info(rtwdev); - dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ? - dig->igi_rssi - IGI_RSSI_MIN : 0; - dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX; - dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst; + igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0); + dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode); + dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN); - dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, - dig->dyn_igi_max); + if (dig->dyn_igi_max >= dig->dyn_igi_min) { + dig->igi_fa_rssi += dig->fa_rssi_ofst; + dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, + dig->dyn_igi_max); + } else { + dig->igi_fa_rssi = dig->dyn_igi_max; + } rtw89_debug(rtwdev, RTW89_DBG_DIG, - "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n", + "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n", dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, dig->igi_fa_rssi); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index d8df553b9cb005..6dd8ec46939acd 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -894,7 +894,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); -void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, +void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, u32 changed); void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, @@ -907,22 +907,28 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev, unsigned int ms); int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode, unsigned int ms); int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms); int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms); int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms); int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms); int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan, unsigned int ms); void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 92074b73ebebdc..aebd6404f80250 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -98,10 +98,10 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif); } -static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id) +static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { struct rtw89_lps_parm lps_param = { - .macid = mac_id, + .macid = rtwvif->mac_id, .psmode = RTW89_MAC_AX_PS_MODE_ACTIVE, .lastrpwm = RTW89_LAST_RPWM_ACTIVE, }; @@ -109,6 +109,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id) rtw89_fw_h2c_lps_parm(rtwdev, &lps_param); rtw89_fw_leave_lps_check(rtwdev, 0); rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); + rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx); } void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) @@ -137,7 +138,7 @@ static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) return; - __rtw89_leave_lps(rtwdev, rtwvif->mac_id); + __rtw89_leave_lps(rtwdev, rtwvif); } void rtw89_leave_lps(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 7df36f3bff0b01..69678eab230939 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -107,6 +107,15 @@ #define B_AX_DBG_SEL0_16BIT BIT(11) #define B_AX_DBG_SEL0 GENMASK(7, 0) +#define R_AX_GPIO_EXT_CTRL 0x0060 +#define B_AX_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24) +#define B_AX_GPIO_MOD_9 BIT(25) +#define B_AX_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16) +#define B_AX_GPIO_IO_SEL_9 BIT(17) +#define B_AX_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8) +#define B_AX_GPIO_IN_15_TO_8_MASK GENMASK(7, 0) +#define B_AX_GPIO_IN_9 BIT(1) + #define R_AX_SYS_SDIO_CTRL 0x0070 #define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15) #define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14) @@ -267,6 +276,9 @@ #define R_AX_GPIO0_7_FUNC_SEL 0x02D0 +#define R_AX_GPIO8_15_FUNC_SEL 0x02D4 +#define B_AX_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4) + #define R_AX_EECS_EESK_FUNC_SEL 0x02D8 #define B_AX_PINMUX_EESK_FUNC_SEL_MASK GENMASK(7, 4) @@ -706,6 +718,14 @@ B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \ B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \ B_AX_HDT_DMA_PROCESS_ERR_INT_EN) +#define B_AX_HOST_DISP_IMR_SET_V01 (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \ + B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \ + B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \ + B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \ + B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \ + B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \ + B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN) #define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31) #define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30) @@ -1096,6 +1116,7 @@ #define B_AX_WDE_BUFMGN_FRZTMR_MODE BIT(0) #define R_AX_WDE_ERR_IMR 0x8C38 +#define B_AX_WDE_DATCHN_UAPG_ERR_INT_EN BIT(30) #define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27) #define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26) #define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25) @@ -1135,6 +1156,29 @@ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN) +#define B_AX_WDE_IMR_CLR_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN | \ + B_AX_WDE_DATCHN_UAPG_ERR_INT_EN) #define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ @@ -1154,6 +1198,28 @@ B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN) +#define B_AX_WDE_IMR_SET_V01 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN) #define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29) #define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28) @@ -2440,6 +2506,10 @@ #define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8) #define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0) +#define R_AX_AGG_LEN_VHT_0 0xC618 +#define R_AX_AGG_LEN_VHT_0_C1 0xE618 +#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0) + #define S_AX_CTS2S_TH_SEC_256B 1 #define R_AX_SIFS_SETTING 0xC624 #define R_AX_SIFS_SETTING_C1 0xE624 @@ -3098,7 +3168,9 @@ B_AX_OFDM_CCA_TIMEOUT_INT_EN | \ B_AX_DATA_ON_TIMEOUT_INT_EN | \ B_AX_STS_ON_TIMEOUT_INT_EN | \ - B_AX_CSI_ON_TIMEOUT_INT_EN) + B_AX_CSI_ON_TIMEOUT_INT_EN | \ + B_AX_PHYINTF_TIMEOUT_THR_MSAK) +#define B_AX_PHYINFO_IMR_SET (B_AX_PHY_TXON_TIMEOUT_INT_EN | 0x7) #define R_AX_PHYINFO_ERR_ISR 0xCCFC #define R_AX_PHYINFO_ERR_ISR_C1 0xECFC @@ -3854,6 +3926,15 @@ #define R_BE_EFUSE_CTRL_1_V1 0x0034 #define B_BE_EF_DATA_MASK GENMASK(31, 0) +#define R_BE_GPIO_EXT_CTRL 0x0060 +#define B_BE_GPIO_MOD_15_TO_8_MASK GENMASK(31, 24) +#define B_BE_GPIO_MOD_9 BIT(25) +#define B_BE_GPIO_IO_SEL_15_TO_8_MASK GENMASK(23, 16) +#define B_BE_GPIO_IO_SEL_9 BIT(17) +#define B_BE_GPIO_OUT_15_TO_8_MASK GENMASK(15, 8) +#define B_BE_GPIO_IN_15_TO_8_MASK GENMASK(7, 0) +#define B_BE_GPIO_IN_9 BIT(1) + #define R_BE_WL_BT_PWR_CTRL 0x0068 #define B_BE_ISO_BD2PP BIT(31) #define B_BE_LDOV12B_EN BIT(30) @@ -4299,6 +4380,9 @@ #define B_BE_REG_CK40M_EN BIT(1) #define B_BE_REG_CK640M_EN BIT(0) +#define R_BE_GPIO8_15_FUNC_SEL 0x02D4 +#define B_BE_PINMUX_GPIO9_FUNC_SEL_MASK GENMASK(7, 4) + #define R_BE_WLAN_XTAL_SI_CTRL 0x0270 #define B_BE_WL_XTAL_SI_CMD_POLL BIT(31) #define B_BE_WL_XTAL_SI_CHIPID_MASK GENMASK(30, 28) @@ -5964,6 +6048,9 @@ #define R_BE_WP_PAGE_INFO1 0xB7AC #define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16) +#define R_BE_LTPC_T0_PATH0 0xBA28 +#define R_BE_LTPC_T0_PATH1 0xBB28 + #define R_BE_CMAC_SHARE_FUNC_EN 0x0E000 #define B_BE_CMAC_SHARE_CRPRT BIT(31) #define B_BE_CMAC_SHARE_EN BIT(30) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index a251b0e3b16e93..a7720a1f17a743 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -800,7 +800,7 @@ static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev) const struct rtw89_reg_6ghz_tpe *tmp; const struct rtw89_chan *chan; - chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); if (chan->band_type != RTW89_BAND_6G) continue; @@ -872,7 +872,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) u8 index; rtw89_for_each_rtwvif(rtwdev, rtwvif) { - chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); if (chan->band_type != RTW89_BAND_6G) continue; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 40cf84a79c464c..1679bd408ef3f3 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -185,6 +185,15 @@ static const struct rtw89_rrsr_cfgs rtw8851b_rrsr_cfgs = { .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2}, }; +static const struct rtw89_rfkill_regs rtw8851b_rfkill_regs = { + .pinmux = {R_AX_GPIO8_15_FUNC_SEL, + B_AX_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_AX_GPIO_EXT_CTRL + 2, + (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + static const struct rtw89_dig_regs rtw8851b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, @@ -1578,28 +1587,31 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev) rtw8851b_aack(rtwdev); rtw8851b_rck(rtwdev); rtw8851b_dack(rtwdev); - rtw8851b_rx_dck(rtwdev, RTW89_PHY_0); + rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0); } -static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev) +static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; - rtw8851b_rx_dck(rtwdev, phy_idx); - rtw8851b_iqk(rtwdev, phy_idx); - rtw8851b_tssi(rtwdev, phy_idx, true); - rtw8851b_dpk(rtwdev, phy_idx); + rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx); + rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx); + rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx); } static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - rtw8851b_tssi_scan(rtwdev, phy_idx); + rtw8851b_tssi_scan(rtwdev, phy_idx, chan); } -static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) { - rtw8851b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); + rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx); } static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev) @@ -1801,7 +1813,7 @@ rtw8851b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, enum rtw89_phy_idx phy_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8851b_btc_preagc_en_defs_tbl : &rtw8851b_btc_preagc_dis_defs_tbl); @@ -1824,7 +1836,7 @@ static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, enum rtw89_phy_idx phy_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, @@ -1869,7 +1881,7 @@ static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, static void rtw8851b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, enum rtw89_rf_path_bit rx_path) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); u32 rst_mask0; if (rx_path == RF_A) { @@ -2375,9 +2387,11 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = { .get_thermal = rtw8851b_get_thermal, .ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx, .query_ppdu = rtw8851b_query_ppdu, + .convert_rpl_to_rssi = NULL, .ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8851b_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset, + .digital_pwr_comp = NULL, .pwr_on_func = rtw8851b_pwr_on_func, .pwr_off_func = rtw8851b_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc, @@ -2452,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .dig_regs = &rtw8851b_dig_regs, .tssi_dbw_table = NULL, .support_macid_num = RTW89_MAX_MAC_ID_NUM, + .support_link_num = 0, .support_chanctx_num = 0, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2463,6 +2478,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, .hw_sec_hdr = false, + .hw_mgmt_tx_encrypt = false, .rf_path_num = 1, .tx_nss = 1, .rx_nss = 1, @@ -2524,6 +2540,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .rrsr_cfgs = &rtw8851b_rrsr_cfgs, .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, + .rfkill_init = &rtw8851b_rfkill_regs, + .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9}, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c index a221f94627f5b4..364e3635422569 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c @@ -521,9 +521,10 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force) } static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, bool is_afe) + enum rtw89_rf_path path, bool is_afe, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path, @@ -574,7 +575,8 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf _rxbb_ofst_swap(rtwdev, path, rf_mode); } -static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) +static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe, + enum rtw89_chanctx_idx chanctx_idx) { u32 rf_reg5; u8 path; @@ -584,7 +586,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_af 0x2, rtwdev->hal.cv); for (path = 0; path < RF_PATH_NUM_8851B; path++) { - _rx_dck_info(rtwdev, phy, path, is_afe); + _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx); rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); @@ -1481,9 +1483,9 @@ static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, } static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - u8 path) + u8 path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 idx = 0; @@ -1586,10 +1588,11 @@ static void _iqk_init(struct rtw89_dev *rtwdev) } static void _doiqk(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR]; u32 backup_bb_val[BACKUP_BB_REGS_NR]; @@ -1602,7 +1605,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, iqk_info->version = RTW8851B_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); @@ -1618,9 +1621,10 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, BTC_WRFK_ONESHOT_STOP); } -static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + bool force, enum rtw89_chanctx_idx chanctx_idx) { - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); } static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg, @@ -1746,9 +1750,9 @@ static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) } static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 kidx = dpk->cur_idx[path]; @@ -2449,7 +2453,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy, u8 kpath) + enum rtw89_phy_idx phy, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {}; @@ -2465,7 +2470,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, continue; _dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path); _dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path); - _dpk_information(rtwdev, phy, path); + _dpk_information(rtwdev, phy, path, chanctx_idx); _dpk_init(rtwdev, path); if (rtwdev->is_tssi_mode[path]) @@ -2505,13 +2510,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, _dpk_kip_pwr_clk_onoff(rtwdev, false); } -static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, + enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n", DPK_VER_8851B, rtwdev->hal.cv); - _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx); } static void _dpk_track(struct rtw89_dev *rtwdev) @@ -2617,9 +2623,8 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) } static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl); @@ -2650,7 +2655,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define RTW8851B_TSSI_GET_VAL(ptr, idx) \ ({ \ @@ -2664,7 +2669,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph __val; \ }) struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; @@ -2755,9 +2759,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx } static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, @@ -2766,9 +2769,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, bool all) + enum rtw89_rf_path path, bool all, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, @@ -2944,10 +2947,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u32 gidx, gidx_1st, gidx_2nd; u8 ch = chan->channel; s8 de_1st; @@ -2980,10 +2982,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u32 tgidx, tgidx_1st, tgidx_2nd; u8 ch = chan->channel; s8 tde_1st; @@ -3017,10 +3018,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph return val; } -static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 gidx; s8 ofdm_de; @@ -3033,7 +3034,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3049,8 +3050,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK)); - ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3096,10 +3097,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p } static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, enum rtw89_rf_path path) + enum rtw89_phy_idx phy, enum rtw89_rf_path path, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 channel = chan->channel; u8 band; @@ -3255,9 +3256,10 @@ void rtw8851b_dack(struct rtw89_dev *rtwdev) _dac_cal(rtwdev, false); } -void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); @@ -3265,30 +3267,32 @@ void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _iqk_init(rtwdev); - _iqk(rtwdev, phy_idx, false); + _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); } -void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); - _rx_dck(rtwdev, phy_idx, false); + _rx_dck(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } -void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); @@ -3297,7 +3301,7 @@ void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtwdev->dpk.is_dpk_enable = true; rtwdev->dpk.is_dpk_reload_en = false; - _dpk(rtwdev, phy_idx, false); + _dpk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); @@ -3308,9 +3312,11 @@ void rtw8851b_dpk_track(struct rtw89_dev *rtwdev) _dpk_track(rtwdev); } -void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) +void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx); u8 i; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); @@ -3319,26 +3325,26 @@ void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { - _tssi_set_sys(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); _tssi_set_dck(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_set_dac_gain_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_alignment_default(rtwdev, phy, i, true, chan); _tssi_set_tssi_slope(rtwdev, phy, i); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 channel = chan->channel; u32 i; @@ -3348,20 +3354,21 @@ void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { - _tssi_set_sys(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_set_sys(rtwdev, phy, i, chan); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_alignment_default(rtwdev, phy, i, true, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, bool enable) + enum rtw89_phy_idx phy, bool enable, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 channel = chan->channel; rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", @@ -3379,7 +3386,7 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); - _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan); rtw89_debug(rtwdev, RTW89_DBG_RFK, "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n", @@ -3391,12 +3398,13 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, } void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { if (scan_start) - rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true); + rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx); else - rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false); + rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx); } static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h index b66a23d6d36732..ea7df628256bed 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h @@ -12,15 +12,21 @@ void rtw8851b_lck_init(struct rtw89_dev *rtwdev); void rtw8851b_lck_track(struct rtw89_dev *rtwdev); void rtw8851b_rck(struct rtw89_dev *rtwdev); void rtw8851b_dack(struct rtw89_dev *rtwdev); -void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8851b_dpk_init(struct rtw89_dev *rtwdev); -void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8851b_dpk_track(struct rtw89_dev *rtwdev); -void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en); -void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx); +void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan); void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 08e148328c6227..dde96bd63021ff 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -337,6 +337,11 @@ static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = { PWR_INTF_MSK_PCIE, PWR_BASE_MAC, PWR_CMD_WRITE, BIT(0), 0}, + {0x0092, + PWR_CV_MSK_ALL, + PWR_INTF_MSK_PCIE, + PWR_BASE_MAC, + PWR_CMD_WRITE, BIT(4), BIT(4)}, {0x0005, PWR_CV_MSK_ALL, PWR_INTF_MSK_PCIE, @@ -478,6 +483,15 @@ static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = { .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2}, }; +static const struct rtw89_rfkill_regs rtw8852a_rfkill_regs = { + .pinmux = {R_AX_GPIO8_15_FUNC_SEL, + B_AX_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_AX_GPIO_EXT_CTRL + 2, + (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + static const struct rtw89_dig_regs rtw8852a_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, @@ -1332,29 +1346,32 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev) rtwdev->is_tssi_mode[RF_PATH_B] = false; rtw8852a_rck(rtwdev); - rtw8852a_dack(rtwdev); - rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true); + rtw8852a_dack(rtwdev, RTW89_CHANCTX_0); + rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0); } -static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev) +static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; - rtw8852a_rx_dck(rtwdev, phy_idx, true); - rtw8852a_iqk(rtwdev, phy_idx); - rtw8852a_tssi(rtwdev, phy_idx); - rtw8852a_dpk(rtwdev, phy_idx); + rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx); + rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx); + rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx); + rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx); } static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - rtw8852a_tssi_scan(rtwdev, phy_idx); + rtw8852a_tssi_scan(rtwdev, phy_idx, chan); } -static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) { - rtw8852a_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); + rtw8852a_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx); } static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev) @@ -1534,10 +1551,8 @@ static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev, void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev, struct rtw8852a_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) + enum rtw89_phy_idx idx, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - if (!tx_info->en_pmac_tx) { rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx); rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); @@ -1559,7 +1574,7 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev, void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx) + enum rtw89_phy_idx idx, const struct rtw89_chan *chan) { struct rtw8852a_bb_pmac_info tx_info = {0}; @@ -1569,7 +1584,7 @@ void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, tx_info.tx_cnt = tx_cnt; tx_info.period = period; tx_info.tx_time = tx_time; - rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx); + rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan); } void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, @@ -2098,9 +2113,11 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .get_thermal = rtw8852a_get_thermal, .ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx, .query_ppdu = rtw8852a_query_ppdu, + .convert_rpl_to_rssi = NULL, .ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx, .cfg_txrx_path = NULL, .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, + .digital_pwr_comp = NULL, .pwr_on_func = NULL, .pwr_off_func = NULL, .query_rxdesc = rtw89_core_query_rxdesc, @@ -2167,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .dig_regs = &rtw8852a_dig_regs, .tssi_dbw_table = NULL, .support_macid_num = RTW89_MAX_MAC_ID_NUM, + .support_link_num = 0, .support_chanctx_num = 1, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2178,6 +2196,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, .hw_sec_hdr = false, + .hw_mgmt_tx_encrypt = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, @@ -2240,6 +2259,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .rrsr_cfgs = &rtw8852a_rrsr_cfgs, .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, + .rfkill_init = &rtw8852a_rfkill_regs, + .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9}, .dma_ch_mask = 0, .edcca_regs = &rtw8852a_edcca_regs, #ifdef CONFIG_PM diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h index ea82fed7b7bec1..d6c1acd09238b8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h @@ -97,10 +97,10 @@ extern const struct rtw89_chip_info rtw8852a_chip_info; void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev); void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev, struct rtw8852a_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx); + enum rtw89_phy_idx idx, const struct rtw89_chan *chan); void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx); + enum rtw89_phy_idx idx, const struct rtw89_chan *chan); void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, enum rtw89_phy_idx idx); void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c index d86429e4a35ffe..9db8713ac99be7 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c @@ -493,11 +493,12 @@ static void _dack(struct rtw89_dev *rtwdev) _dack_s1(rtwdev); } -static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +static void _dac_cal(struct rtw89_dev *rtwdev, bool force, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 rf0_0, rf1_0; - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx); dack->dack_done = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); @@ -799,12 +800,13 @@ static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) } static bool _iqk_one_shot(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path, u8 ktype) + enum rtw89_phy_idx phy_idx, u8 path, u8 ktype, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool fail = false; u32 iqk_cmd = 0x0; - u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path); + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx); u32 addr_rfc_ctl = 0x0; if (path == RF_PATH_A) @@ -888,7 +890,8 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, } static bool _rxk_group_sel(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0}; @@ -927,7 +930,7 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); - fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx); rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail); } @@ -952,7 +955,8 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev, } static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 group = 0x0; @@ -991,7 +995,7 @@ static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, B_CFIR_LUT_GP, group); rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); - fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx); switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: @@ -1040,7 +1044,8 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) } static bool _txk_group_sel(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED}; static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED}; @@ -1083,7 +1088,7 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); - fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx); rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail); } @@ -1098,7 +1103,8 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev, } static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 group = 0x2; @@ -1131,7 +1137,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); - fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx); if (!fail) { tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); iqk_info->nb_txcfir[path] = tmp | 0x2; @@ -1179,7 +1185,8 @@ static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) } static bool _iqk_lok(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 rf0 = 0x0; @@ -1210,11 +1217,11 @@ static bool _iqk_lok(struct rtw89_dev *rtwdev, rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); - tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE); + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx); iqk_info->lok_cor_fail[0][path] = tmp; fsleep(10); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); - tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE); + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx); iqk_info->lok_fin_fail[0][path] = tmp; fail = _lok_finetune_check(rtwdev, path); return fail; @@ -1321,7 +1328,8 @@ static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, } static -void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool lok_is_fail = false; @@ -1333,30 +1341,35 @@ void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) for (i = 0; i < 3; i++) { _lok_res_table(rtwdev, path, ibias++); _iqk_txk_setting(rtwdev, path); - lok_is_fail = _iqk_lok(rtwdev, phy_idx, path); + lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx); if (!lok_is_fail) break; } if (iqk_info->is_nbiqk) - iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); + iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path, + chanctx_idx); else - iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); + iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path, + chanctx_idx); _iqk_rxclk_setting(rtwdev, path); _iqk_rxk_setting(rtwdev, path); if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G) - iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); + iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path, + chanctx_idx); else - iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); + iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path, + chanctx_idx); _iqk_info_iqk(rtwdev, phy_idx, path); } static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, u8 path) + enum rtw89_phy_idx phy, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u32 reg_rf18 = 0x0, reg_35c = 0x0; u8 idx = 0; u8 get_empty_table = false; @@ -1413,9 +1426,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, } static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - u8 path) + u8 path, enum rtw89_chanctx_idx chanctx_idx) { - _iqk_by_path(rtwdev, phy_idx, path); + _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx); } static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) @@ -1513,7 +1526,8 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, rtw89_rfk_parser(rtwdev, tbl); } -static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path) +static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 phy_idx = 0x0; @@ -1525,10 +1539,10 @@ static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path) else phy_idx = RTW89_PHY_1; - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _iqk_macbb_setting(rtwdev, phy_idx, path); _iqk_preset(rtwdev, path); - _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx); _iqk_restore(rtwdev, path); _iqk_afebb_restore(rtwdev, phy_idx, path); } @@ -1607,12 +1621,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev) } static void _doiqk(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR]; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); @@ -1622,12 +1637,12 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, iqk_info->version = RTW8852A_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); _iqk_macbb_setting(rtwdev, phy_idx, path); _iqk_preset(rtwdev, path); - _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx); _iqk_restore(rtwdev, path); _iqk_afebb_restore(rtwdev, phy_idx, path); _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); @@ -1635,18 +1650,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force, + enum rtw89_chanctx_idx chanctx_idx) { switch (_kpath(rtwdev, phy_idx)) { case RF_A: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); break; case RF_B: - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; case RF_AB: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; default: break; @@ -1656,9 +1672,10 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc #define RXDCK_VER_8852A 0xe static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, bool is_afe) + enum rtw89_rf_path path, bool is_afe, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path); + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx); u32 ori_val; rtw89_debug(rtwdev, RTW89_DBG_RFK, @@ -1704,7 +1721,7 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - bool is_afe) + bool is_afe, enum rtw89_chanctx_idx chanctx_idx) { u8 path, kpath, dck_tune; u32 rf_reg5; @@ -1732,7 +1749,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); - _set_rx_dck(rtwdev, phy, path, is_afe); + _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); @@ -1800,9 +1817,10 @@ static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg, } static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, enum rtw8852a_dpk_id id) + enum rtw89_rf_path path, enum rtw8852a_dpk_id id, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path); + u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx); u16 dpk_cmd = 0x0; u32 val; int ret; @@ -1841,18 +1859,19 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, + enum rtw89_chanctx_idx chanctx_idx) { rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3); - _set_rx_dck(rtwdev, phy, path, false); + _set_rx_dck(rtwdev, phy, path, false, chanctx_idx); } static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 kidx = dpk->cur_idx[path]; dpk->bp[path][kidx].band = chan->band_type; @@ -1967,7 +1986,8 @@ static void _dpk_kip_restore(struct rtw89_dev *rtwdev, static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, + enum rtw89_chanctx_idx chanctx_idx) { u8 cur_rxbb; @@ -1997,7 +2017,7 @@ static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); - _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD)); @@ -2186,10 +2206,11 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev, } static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 kidx) + enum rtw89_rf_path path, u8 kidx, + enum rtw89_chanctx_idx chanctx_idx) { _dpk_tpg_sel(rtwdev, path, kidx); - _dpk_one_shot(rtwdev, phy, path, SYNC); + _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx); return _dpk_sync_check(rtwdev, path); /*1= fail*/ } @@ -2242,10 +2263,10 @@ static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, - u8 kidx) + u8 kidx, enum rtw89_chanctx_idx chanctx_idx) { _dpk_table_select(rtwdev, path, kidx, 1); - _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS); + _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx); } #define DPK_TXAGC_LOWER 0x2e @@ -2322,7 +2343,7 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, u8 init_txagc, - bool loss_only) + bool loss_only, enum rtw89_chanctx_idx chanctx_idx) { #define DPK_AGC_ADJ_LMT 6 #define DPK_DGAIN_UPPER 1922 @@ -2330,7 +2351,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, #define DPK_RXBB_UPPER 0x1f #define DPK_RXBB_LOWER 0 #define DPK_GL_CRIT 7 - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0; u8 agc_cnt = 0; bool limited_rxbb = false; @@ -2344,7 +2365,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, do { switch (step) { case DPK_AGC_STEP_SYNC_DGAIN: - if (_dpk_sync(rtwdev, phy, path, kidx)) { + if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) { tmp_txagc = DPK_TXAGC_INVAL; goout = true; break; @@ -2380,7 +2401,8 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, if (chan->band_width < RTW89_CHANNEL_WIDTH_80) _dpk_bypass_rxcfir(rtwdev, path, true); else - _dpk_lbk_rxiqk(rtwdev, phy, path); + _dpk_lbk_rxiqk(rtwdev, phy, path, + chanctx_idx); } if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER) step = DPK_AGC_STEP_SYNC_DGAIN; @@ -2391,7 +2413,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, break; case DPK_AGC_STEP_GAIN_LOSS_IDX: - _dpk_gainloss(rtwdev, phy, path, kidx); + _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx); tmp_gl_idx = _dpk_gainloss_read(rtwdev); if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || @@ -2475,11 +2497,12 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) } static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 kidx, u8 gain) + enum rtw89_rf_path path, u8 kidx, u8 gain, + enum rtw89_chanctx_idx chanctx_idx) { _dpk_set_mdpd_para(rtwdev, 0x0); _dpk_table_select(rtwdev, path, kidx, 1); - _dpk_one_shot(rtwdev, phy, path, MDPK_IDL); + _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx); } static void _dpk_fill_result(struct rtw89_dev *rtwdev, @@ -2518,10 +2541,10 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, } static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); bool is_reload = false; u8 idx, cur_band, cur_ch; @@ -2545,7 +2568,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 gain) + enum rtw89_rf_path path, u8 gain, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 txagc = 0, kidx = dpk->cur_idx[path]; @@ -2558,16 +2582,16 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, _rf_direct_cntrl(rtwdev, path, false); txagc = _dpk_set_tx_pwr(rtwdev, gain, path); _dpk_rf_setting(rtwdev, gain, path, kidx); - _dpk_rx_dck(rtwdev, phy, path); + _dpk_rx_dck(rtwdev, phy, path, chanctx_idx); _dpk_kip_setting(rtwdev, path, kidx); _dpk_manual_txcfir(rtwdev, path, true); - txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx); if (txagc == DPK_TXAGC_INVAL) is_fail = true; _dpk_get_thermal(rtwdev, kidx, path); - _dpk_idl_mpa(rtwdev, phy, path, kidx, gain); + _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); _dpk_fill_result(rtwdev, path, kidx, gain, txagc); _dpk_manual_txcfir(rtwdev, path, false); @@ -2584,7 +2608,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy, u8 kpath) + enum rtw89_phy_idx phy, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; @@ -2599,7 +2624,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, if (!(kpath & BIT(path))) continue; - reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + reloaded[path] = _dpk_reload_check(rtwdev, phy, path, + chanctx_idx); if (!reloaded[path] && dpk->bp[path][0].ch != 0) dpk->cur_idx[path] = !dpk->cur_idx[path]; else @@ -2624,7 +2650,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, _dpk_tssi_pause(rtwdev, path, true); _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); - _dpk_information(rtwdev, phy, path); + _dpk_information(rtwdev, phy, path, chanctx_idx); } _dpk_bb_afe_setting(rtwdev, phy, path, kpath); @@ -2633,7 +2659,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, if (!(kpath & BIT(path)) || reloaded[path]) continue; - is_fail = _dpk_main(rtwdev, phy, path, 1); + is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx); _dpk_onoff(rtwdev, path, is_fail); } @@ -2652,10 +2678,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, } } -static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_fem_info *fem = &rtwdev->fem; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { rtw89_debug(rtwdev, RTW89_DBG_RFK, @@ -2682,17 +2709,19 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } } -static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool force, enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", RTW8852A_DPK_VER, rtwdev->hal.cv, RTW8852A_RF_REL_VERSION); - if (_dpk_bypass_check(rtwdev, phy)) + if (_dpk_bypass_check(rtwdev, phy, chanctx_idx)) _dpk_force_bypass(rtwdev, phy); else - _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), + chanctx_idx); } static void _dpk_onoff(struct rtw89_dev *rtwdev, @@ -2815,9 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev) } static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (band == RTW89_BAND_2G) @@ -2826,9 +2854,9 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); } -static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl); @@ -2838,9 +2866,9 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, @@ -2869,7 +2897,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define __get_val(ptr, idx) \ ({ \ @@ -2883,7 +2911,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph __val; \ }) struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; @@ -3076,9 +3103,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, } static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 subband = chan->subband_type; switch (subband) { @@ -3252,10 +3278,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 gidx, gidx_1st, gidx_2nd; s8 de_1st = 0; @@ -3290,10 +3315,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 tgidx, tgidx_1st, tgidx_2nd; s8 tde_1st = 0; @@ -3328,11 +3352,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, } static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy) + enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { #define __DE_MASK 0x003ff000 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858}; static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860}; static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838}; @@ -3352,7 +3375,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, for (i = 0; i < RF_PATH_NUM_8852A; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3368,8 +3391,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, rtw89_phy_read32_mask(rtwdev, r_cck_long[i], __DE_MASK)); - ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3458,10 +3481,10 @@ static void _tssi_track(struct rtw89_dev *rtwdev) } } -static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel, ch_tmp; u8 bw = chan->band_width; u8 band = chan->band_type; @@ -3497,24 +3520,25 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - u8 path, s16 pwr_dbm, u8 enable) + u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan) { rtw8852a_bb_set_plcp_tx(rtwdev); rtw8852a_bb_cfg_tx_path(rtwdev, path); rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy); - rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy); + rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan); } -static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); const struct rtw89_chip_info *mac_reg = rtwdev->chip; u8 ch = chan->channel, ch_tmp; u8 bw = chan->band_width; u8 band = chan->band_type; u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx); s8 power; s16 xdbm; u32 i, tx_counter = 0; @@ -3546,9 +3570,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _wait_rx_mode(rtwdev, _kpath(rtwdev, phy)); tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); - _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true); + _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan); mdelay(15); - _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false); + _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan); tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) - tx_counter; @@ -3600,19 +3624,21 @@ void rtw8852a_rck(struct rtw89_dev *rtwdev) _rck(rtwdev, path); } -void rtw8852a_dack(struct rtw89_dev *rtwdev) +void rtw8852a_dack(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); - _dac_cal(rtwdev, false); + _dac_cal(rtwdev, false, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } -void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); @@ -3620,34 +3646,35 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) _iqk_init(rtwdev); if (rtwdev->dbcc_en) - _iqk_dbcc(rtwdev, phy_idx); + _iqk_dbcc(rtwdev, phy_idx, chanctx_idx); else - _iqk(rtwdev, phy_idx, false); + _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); } void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - bool is_afe) + bool is_afe, enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); - _rx_dck(rtwdev, phy_idx, is_afe); + _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } -void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); @@ -3655,7 +3682,7 @@ void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtwdev->dpk.is_dpk_enable = true; rtwdev->dpk.is_dpk_reload_en = false; - _dpk(rtwdev, phy_idx, false); + _dpk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); @@ -3666,8 +3693,10 @@ void rtw8852a_dpk_track(struct rtw89_dev *rtwdev) _dpk_track(rtwdev); } -void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 i; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", @@ -3676,26 +3705,27 @@ void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy); - _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, chan); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); _tssi_set_dck(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_set_dac_gain_tbl(rtwdev, phy, i); _tssi_slope_cal_org(rtwdev, phy, i); _tssi_set_rf_gap_tbl(rtwdev, phy, i); _tssi_set_slope(rtwdev, phy, i); - _tssi_pak(rtwdev, phy, i); + _tssi_pak(rtwdev, phy, i, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); - _tssi_high_power(rtwdev, phy); - _tssi_pre_tx(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); + _tssi_high_power(rtwdev, phy, chan); + _tssi_pre_tx(rtwdev, phy, chanctx_idx); } -void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { u8 i; @@ -3710,14 +3740,14 @@ void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy); - _tssi_set_tmeter_tbl(rtwdev, phy, i); - _tssi_pak(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, chan); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); + _tssi_pak(rtwdev, phy, i, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } void rtw8852a_tssi_track(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h index fa058ccc8616d5..8761f2cc935954 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h @@ -8,14 +8,19 @@ #include "core.h" void rtw8852a_rck(struct rtw89_dev *rtwdev); -void rtw8852a_dack(struct rtw89_dev *rtwdev); -void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852a_dack(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, - bool is_afe); -void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); + bool is_afe, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852a_dpk_track(struct rtw89_dev *rtwdev); -void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); -void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan); void rtw8852a_tssi_track(struct rtw89_dev *rtwdev); void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, enum rtw89_phy_idx phy_idx); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index a22847a311ad47..12be52f76427a1 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -150,6 +150,15 @@ static const struct rtw89_rrsr_cfgs rtw8852b_rrsr_cfgs = { .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2}, }; +static const struct rtw89_rfkill_regs rtw8852b_rfkill_regs = { + .pinmux = {R_AX_GPIO8_15_FUNC_SEL, + B_AX_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_AX_GPIO_EXT_CTRL + 2, + (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + static const struct rtw89_dig_regs rtw8852b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, @@ -549,29 +558,32 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev) rtw8852b_dpk_init(rtwdev); rtw8852b_rck(rtwdev); - rtw8852b_dack(rtwdev); - rtw8852b_rx_dck(rtwdev, RTW89_PHY_0); + rtw8852b_dack(rtwdev, RTW89_CHANCTX_0); + rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0); } -static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev) +static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; - rtw8852b_rx_dck(rtwdev, phy_idx); - rtw8852b_iqk(rtwdev, phy_idx); - rtw8852b_tssi(rtwdev, phy_idx, true); - rtw8852b_dpk(rtwdev, phy_idx); + rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx); + rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx); + rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx); } static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - rtw8852b_tssi_scan(rtwdev, phy_idx); + rtw8852b_tssi_scan(rtwdev, phy_idx, chan); } -static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) { - rtw8852b_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); + rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx); } static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev) @@ -729,9 +741,11 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .get_thermal = rtw8852bx_get_thermal, .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, .query_ppdu = rtw8852bx_query_ppdu, + .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi, .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, + .digital_pwr_comp = NULL, .pwr_on_func = rtw8852b_pwr_on_func, .pwr_off_func = rtw8852b_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc, @@ -807,6 +821,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .dig_regs = &rtw8852b_dig_regs, .tssi_dbw_table = NULL, .support_macid_num = RTW89_MAX_MAC_ID_NUM, + .support_link_num = 0, .support_chanctx_num = 0, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -818,6 +833,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, .hw_sec_hdr = false, + .hw_mgmt_tx_encrypt = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, @@ -880,6 +896,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .rrsr_cfgs = &rtw8852b_rrsr_cfgs, .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, + .rfkill_init = &rtw8852b_rfkill_regs, + .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9}, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c index 1745c2882acf5f..ede0ca5426aebc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -1445,10 +1445,8 @@ static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev, static void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev, struct rtw8852bx_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) + enum rtw89_phy_idx idx, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - if (!tx_info->en_pmac_tx) { rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx); rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); @@ -1473,7 +1471,7 @@ void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev, static void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx) + enum rtw89_phy_idx idx, const struct rtw89_chan *chan) { struct rtw8852bx_bb_pmac_info tx_info = {0}; @@ -1484,7 +1482,7 @@ void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, tx_info.period = period; tx_info.tx_time = tx_time; - rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx); + rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan); } static @@ -1623,9 +1621,9 @@ static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, static void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) + enum rtw89_rf_path_bit rx_path, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u32 rst_mask0; u32 rst_mask1; @@ -1713,9 +1711,10 @@ static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev, static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB; - rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan); rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path); if (rtwdev->hal.rx_nss == 1) { @@ -1948,6 +1947,19 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); } +static void __rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + u8 delta = phy_ppdu->rpl_avg - phy_ppdu->rssi_avg; + u8 *rssi = phy_ppdu->rssi; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + rssi[i] += delta; + + phy_ppdu->rssi_avg = phy_ppdu->rpl_avg; +} + static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; @@ -2030,6 +2042,7 @@ const struct rtw8852bx_info rtw8852bx_info = { .ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx, .ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx, .query_ppdu = __rtw8852bx_query_ppdu, + .convert_rpl_to_rssi = __rtw8852bx_convert_rpl_to_rssi, .read_efuse = __rtw8852bx_read_efuse, .read_phycap = __rtw8852bx_read_phycap, .power_trim = __rtw8852bx_power_trim, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h index 801e7ab9f4fa23..3dce5422f41e57 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h @@ -121,13 +121,14 @@ struct rtw8852bx_info { void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev); void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path); void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path); + enum rtw89_rf_path_bit rx_path, + const struct rtw89_chan *chan); void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev); void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm, enum rtw89_phy_idx idx); void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable, u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx); + enum rtw89_phy_idx idx, const struct rtw89_chan *chan); void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, struct rtw8852bx_bb_tssi_bak *bak); void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, @@ -145,6 +146,8 @@ struct rtw8852bx_info { void (*query_ppdu)(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status); + void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu); int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map, enum rtw89_efuse_block block); int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map); @@ -207,9 +210,10 @@ void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) static inline void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) + enum rtw89_rf_path_bit rx_path, + const struct rtw89_chan *chan) { - rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path, chan); } static inline @@ -228,9 +232,10 @@ void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, static inline void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx) + enum rtw89_phy_idx idx, const struct rtw89_chan *chan) { - rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx); + rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx, + chan); } static inline @@ -290,6 +295,13 @@ void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, rtw8852bx_info.query_ppdu(rtwdev, phy_ppdu, status); } +static inline +void rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + rtw8852bx_info.convert_rpl_to_rssi(rtwdev, phy_ppdu); +} + static inline int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, enum rtw89_efuse_block block) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index 12354612441c4b..ef47a5facc8361 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -1382,9 +1382,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u _iqk_info_iqk(rtwdev, phy_idx, path); } -static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path) +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 reg_rf18; u32 reg_35c; @@ -1608,12 +1609,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx } static void _doiqk(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR]; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); @@ -1623,7 +1625,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, iqk_info->version = RTW8852B_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); @@ -1638,20 +1640,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force, + enum rtw89_chanctx_idx chanctx_idx) { u8 kpath = _kpath(rtwdev, phy_idx); switch (kpath) { case RF_A: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); break; case RF_B: - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; case RF_AB: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; default: break; @@ -1761,9 +1764,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 kidx = dpk->cur_idx[path]; @@ -1786,9 +1789,10 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 kpath) + enum rtw89_rf_path path, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl); @@ -1803,9 +1807,10 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 kpath) + enum rtw89_rf_path path, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl); @@ -2217,9 +2222,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, u8 init_txagc, - bool loss_only) + bool loss_only, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 step = DPK_AGC_STEP_SYNC_DGAIN; u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0; u8 goout = 0, agc_cnt = 0, limited_rxbb = 0; @@ -2416,9 +2421,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; bool is_reload = false; u8 idx, cur_band, cur_ch; @@ -2443,7 +2448,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 gain) + enum rtw89_rf_path path, u8 gain, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 txagc = 0x38, kidx = dpk->cur_idx[path]; @@ -2464,7 +2470,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, _dpk_kip_set_rxagc(rtwdev, phy, path); _dpk_table_select(rtwdev, path, kidx, gain); - txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc); if (txagc == 0xff) { @@ -2491,7 +2497,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy, u8 kpath) + enum rtw89_phy_idx phy, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120}; @@ -2503,7 +2510,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, if (dpk->is_dpk_reload_en) { for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { - reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + reloaded[path] = _dpk_reload_check(rtwdev, phy, path, + chanctx_idx); if (!reloaded[path] && dpk->bp[path][0].ch) dpk->cur_idx[path] = !dpk->cur_idx[path]; else @@ -2519,19 +2527,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); - _dpk_information(rtwdev, phy, path); + _dpk_information(rtwdev, phy, path, chanctx_idx); if (rtwdev->is_tssi_mode[path]) _dpk_tssi_pause(rtwdev, path, true); } - _dpk_bb_afe_setting(rtwdev, phy, path, kpath); + _dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx); for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { - is_fail = _dpk_main(rtwdev, phy, path, 1); + is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx); _dpk_onoff(rtwdev, path, is_fail); } - _dpk_bb_afe_restore(rtwdev, phy, path, kpath); + _dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx); _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) { @@ -2543,9 +2551,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, } } -static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_fem_info *fem = &rtwdev->fem; if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { @@ -2577,17 +2586,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } } -static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, + enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", RTW8852B_DPK_VER, rtwdev->hal.cv, RTW8852B_RF_REL_VERSION); - if (_dpk_bypass_check(rtwdev, phy)) + if (_dpk_bypass_check(rtwdev, phy, chanctx_idx)) _dpk_force_bypass(rtwdev, phy); else - _dpk_cal_select(rtwdev, force, phy, RF_AB); + _dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx); } static void _dpk_track(struct rtw89_dev *rtwdev) @@ -2722,9 +2732,8 @@ static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (band == RTW89_BAND_2G) @@ -2734,9 +2743,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl); @@ -2778,7 +2786,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define RTW8852B_TSSI_GET_VAL(ptr, idx) \ ({ \ @@ -2792,7 +2800,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph __val; \ }) struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; @@ -2944,9 +2951,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx } static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (path == RF_PATH_A) @@ -2960,9 +2966,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, bool all) + enum rtw89_rf_path path, bool all, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; const struct rtw89_rfk_tbl *tbl = NULL; u8 ch = chan->channel; @@ -3231,10 +3237,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 gidx, gidx_1st, gidx_2nd; s8 de_1st; @@ -3267,10 +3272,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 tgidx, tgidx_1st, tgidx_2nd; s8 tde_1st; @@ -3304,10 +3308,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph return val; } -static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 gidx; s8 ofdm_de; @@ -3320,7 +3324,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3336,8 +3340,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK)); - ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3383,10 +3387,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p } static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, enum rtw89_rf_path path) + enum rtw89_phy_idx phy, enum rtw89_rf_path path, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 channel = chan->channel; u8 band; @@ -3420,7 +3424,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm, - u8 enable) + u8 enable, const struct rtw89_chan *chan) { enum rtw89_rf_path_bit rx_path; @@ -3436,11 +3440,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, if (enable) { rtw8852bx_bb_set_plcp_tx(rtwdev); rtw8852bx_bb_cfg_tx_path(rtwdev, path); - rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan); rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); } - rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan); } static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, @@ -3494,7 +3498,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel) static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const s16 *power, - u32 *tssi_cw_rpt) + u32 *tssi_cw_rpt, const struct rtw89_chan *chan) { u32 tx_counter, tx_counter_tmp; const int retry = 100; @@ -3513,9 +3517,10 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy _tssi_trigger[path], tmp, path); if (j == 0) - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan); else - _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true); + _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true, + chan); tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); tx_counter_tmp -= tx_counter; @@ -3546,14 +3551,14 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n", k, path); - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan); return false; } tssi_cw_rpt[j] = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT); - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan); tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); tx_counter_tmp -= tx_counter; @@ -3567,14 +3572,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4, 0x78e4, 0x49c0, 0x0d18, 0x0d80}; static const s16 power_2g[4] = {48, 20, 4, 4}; static const s16 power_5g[4] = {48, 20, 4, 4}; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3; u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0}; u8 channel = chan->channel; @@ -3635,7 +3639,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); - ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt); + ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan); if (!ok) goto out; @@ -3755,18 +3759,19 @@ void rtw8852b_rck(struct rtw89_dev *rtwdev) _rck(rtwdev, path); } -void rtw8852b_dack(struct rtw89_dev *rtwdev) +void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); _dac_cal(rtwdev, false); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } -void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); @@ -3774,15 +3779,16 @@ void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _iqk_init(rtwdev); - _iqk(rtwdev, phy_idx, false); + _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); } -void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); @@ -3795,9 +3801,10 @@ void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } -void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); @@ -3806,7 +3813,7 @@ void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtwdev->dpk.is_dpk_enable = true; rtwdev->dpk.is_dpk_reload_en = false; - _dpk(rtwdev, phy_idx, false); + _dpk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); @@ -3817,9 +3824,11 @@ void rtw8852b_dpk_track(struct rtw89_dev *rtwdev) _dpk_track(rtwdev); } -void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) +void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx); u32 tx_en; u8 i; @@ -3829,34 +3838,34 @@ void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); _tssi_set_dck(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_set_dac_gain_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_alignment_default(rtwdev, phy, i, true, chan); _tssi_set_tssi_slope(rtwdev, phy, i); rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); _tmac_tx_pause(rtwdev, phy, true); if (hwtx_en) - _tssi_alimentk(rtwdev, phy, i); + _tssi_alimentk(rtwdev, phy, i, chan); _tmac_tx_pause(rtwdev, phy, false); rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 channel = chan->channel; u8 band; @@ -3879,24 +3888,25 @@ void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, i, chan); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); if (tssi_info->alignment_done[i][band]) - _tssi_alimentk_done(rtwdev, phy, i); + _tssi_alimentk_done(rtwdev, phy, i, chan); else - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_alignment_default(rtwdev, phy, i, true, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, bool enable) + enum rtw89_phy_idx phy, bool enable, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 channel = chan->channel; rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", @@ -3904,7 +3914,7 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev, if (enable) { if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) - rtw8852b_tssi(rtwdev, phy, true); + rtw8852b_tssi(rtwdev, phy, true, chanctx_idx); return; } @@ -3921,8 +3931,8 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); - _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); - _tssi_alimentk_done(rtwdev, phy, RF_PATH_B); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan); rtw89_debug(rtwdev, RTW89_DBG_RFK, "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", @@ -3935,12 +3945,13 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev, } void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { if (scan_start) - rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true); + rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx); else - rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false); + rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx); } static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h index f528320656001c..c31ba446e6e096 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h @@ -8,16 +8,22 @@ #include "core.h" void rtw8852b_rck(struct rtw89_dev *rtwdev); -void rtw8852b_dack(struct rtw89_dev *rtwdev); -void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852b_dpk_init(struct rtw89_dev *rtwdev); -void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852b_dpk_track(struct rtw89_dev *rtwdev); -void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en); -void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan); void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c new file mode 100644 index 00000000000000..7dfdcb5964e117 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c @@ -0,0 +1,848 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "coex.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852bt.h" +#include "rtw8852bt_rfk.h" +#include "rtw8852b_common.h" + +#define RTW8852BT_FW_FORMAT_MAX 0 +#define RTW8852BT_FW_BASENAME "rtw89/rtw8852bt_fw" +#define RTW8852BT_MODULE_FIRMWARE \ + RTW8852BT_FW_BASENAME ".bin" + +static const struct rtw89_hfc_ch_cfg rtw8852bt_hfc_chcfg_pcie[] = { + {16, 742, grp_0}, /* ACH 0 */ + {16, 742, grp_0}, /* ACH 1 */ + {16, 742, grp_0}, /* ACH 2 */ + {16, 742, grp_0}, /* ACH 3 */ + {0, 0, grp_0}, /* ACH 4 */ + {0, 0, grp_0}, /* ACH 5 */ + {0, 0, grp_0}, /* ACH 6 */ + {0, 0, grp_0}, /* ACH 7 */ + {15, 743, grp_0}, /* B0MGQ */ + {15, 743, grp_0}, /* B0HIQ */ + {0, 0, grp_0}, /* B1MGQ */ + {0, 0, grp_0}, /* B1HIQ */ + {40, 0, 0} /* FWCMDQ */ +}; + +static const struct rtw89_hfc_pub_cfg rtw8852bt_hfc_pubcfg_pcie = { + 958, /* Group 0 */ + 0, /* Group 1 */ + 958, /* Public Max */ + 0 /* WP threshold */ +}; + +static const struct rtw89_hfc_param_ini rtw8852bt_hfc_param_ini_pcie[] = { + [RTW89_QTA_SCC] = {rtw8852bt_hfc_chcfg_pcie, &rtw8852bt_hfc_pubcfg_pcie, + &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH}, + [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie, + RTW89_HCIFC_POH}, + [RTW89_QTA_INVALID] = {NULL}, +}; + +static const struct rtw89_dle_mem rtw8852bt_dle_mem_pcie[] = { + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size23, + &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23, + &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57, + &rtw89_mac_size.ple_qt59}, + [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size23, + &rtw89_mac_size.ple_size9, &rtw89_mac_size.wde_qt23, + &rtw89_mac_size.wde_qt23, &rtw89_mac_size.ple_qt57, + &rtw89_mac_size.ple_qt_52bt_wow}, + [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4, + &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4, + &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13, + &rtw89_mac_size.ple_qt13}, + [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL, + NULL}, +}; + +static const u32 rtw8852bt_h2c_regs[RTW89_H2CREG_MAX] = { + R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2, + R_AX_H2CREG_DATA3 +}; + +static const u32 rtw8852bt_c2h_regs[RTW89_C2HREG_MAX] = { + R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2, + R_AX_C2HREG_DATA3 +}; + +static const u32 rtw8852bt_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + +static const struct rtw89_page_regs rtw8852bt_page_regs = { + .hci_fc_ctrl = R_AX_HCI_FC_CTRL, + .ch_page_ctrl = R_AX_CH_PAGE_CTRL, + .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL, + .ach_page_info = R_AX_ACH0_PAGE_INFO, + .pub_page_info3 = R_AX_PUB_PAGE_INFO3, + .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1, + .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2, + .pub_page_info1 = R_AX_PUB_PAGE_INFO1, + .pub_page_info2 = R_AX_PUB_PAGE_INFO2, + .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1, + .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2, + .wp_page_info1 = R_AX_WP_PAGE_INFO1, +}; + +static const struct rtw89_reg_def rtw8852bt_dcfo_comp = { + R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK +}; + +static const struct rtw89_imr_info rtw8852bt_imr_info = { + .wdrls_imr_set = B_AX_WDRLS_IMR_SET, + .wsec_imr_reg = R_AX_SEC_DEBUG, + .wsec_imr_set = B_AX_IMR_ERROR, + .mpdu_tx_imr_set = 0, + .mpdu_rx_imr_set = 0, + .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET, + .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR, + .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR, + .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET, + .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1, + .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR, + .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET, + .wde_imr_clr = B_AX_WDE_IMR_CLR_V01, + .wde_imr_set = B_AX_WDE_IMR_SET_V01, + .ple_imr_clr = B_AX_PLE_IMR_CLR, + .ple_imr_set = B_AX_PLE_IMR_SET, + .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR, + .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V01, + .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR, + .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET, + .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR, + .other_disp_imr_set = 0, + .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR, + .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR, + .bbrpt_err_imr_set = 0, + .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR, + .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_ALL, + .ptcl_imr_set = B_AX_PTCL_IMR_SET, + .cdma_imr_0_reg = R_AX_DLE_CTRL, + .cdma_imr_0_clr = B_AX_DLE_IMR_CLR, + .cdma_imr_0_set = B_AX_DLE_IMR_SET, + .cdma_imr_1_reg = 0, + .cdma_imr_1_clr = 0, + .cdma_imr_1_set = 0, + .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR, + .phy_intf_imr_clr = B_AX_PHYINFO_IMR_EN_ALL, + .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET, + .rmac_imr_reg = R_AX_RMAC_ERR_ISR, + .rmac_imr_clr = B_AX_RMAC_IMR_CLR, + .rmac_imr_set = B_AX_RMAC_IMR_SET, + .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR, + .tmac_imr_clr = B_AX_TMAC_IMR_CLR, + .tmac_imr_set = B_AX_TMAC_IMR_SET, +}; + +static const struct rtw89_rrsr_cfgs rtw8852bt_rrsr_cfgs = { + .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0}, + .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2}, +}; + +static const struct rtw89_rfkill_regs rtw8852bt_rfkill_regs = { + .pinmux = {R_AX_GPIO8_15_FUNC_SEL, + B_AX_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_AX_GPIO_EXT_CTRL + 2, + (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + +static const struct rtw89_dig_regs rtw8852bt_dig_regs = { + .seg0_pd_reg = R_SEG0R_PD_V1, + .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, + .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, + .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, + .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, + .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, + .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1}, + .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1}, + .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1}, + .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2, + B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK}, + .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2, + B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, + .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2, + B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK}, + .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2, + B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, +}; + +static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = { + .edcca_level = R_SEG0R_EDCCA_LVL_V1, + .edcca_mask = B_EDCCA_LVL_MSK0, + .edcca_p_mask = B_EDCCA_LVL_MSK1, + .ppdu_level = R_SEG0R_EDCCA_LVL_V1, + .ppdu_mask = B_EDCCA_LVL_MSK3, + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, + .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, +}; + +static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_ul[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {6, 1, 0, 7}, + {13, 1, 0, 7}, + {13, 1, 0, 7} +}; + +static const struct rtw89_btc_rf_trx_para rtw89_btc_8852bt_rf_dl[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {255, 1, 0, 7}, + {255, 1, 0, 7}, + {255, 1, 0, 7} +}; + +static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852bt_mon_reg[] = { + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4aa4), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x4778), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x476c), +}; + +static const u8 rtw89_btc_8852bt_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40}; +static const u8 rtw89_btc_8852bt_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20}; + +static int rtw8852bt_pwr_on_func(struct rtw89_dev *rtwdev) +{ + u32 val32; + u32 ret; + + rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L); + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN | + B_AX_AFSM_PCIE_SUS_EN); + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC); + rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC); + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN); + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_OCP_L1_MASK, 7); + + ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR, + 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL); + if (ret) + return ret; + + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON); + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC); + + ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC), + 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL); + if (ret) + return ret; + + rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); + rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); + rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); + rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); + rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1); + rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3); + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, + XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL); + if (ret) + return ret; + + rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3); + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, + XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI, + XTAL_SI_OFF_WEI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI, + XTAL_SI_OFF_EI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI, + XTAL_SI_PON_WEI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI, + XTAL_SI_PON_EI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_SRAM_CTRL, 0, XTAL_SI_SRAM_DIS); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP); + if (ret) + return ret; + + rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK); + rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE); + rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15); + + fsleep(1000); + + rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14); + rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK); + + if (!rtwdev->efuse.valid || rtwdev->efuse.power_k_valid) + goto func_en; + + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VOL_L1_MASK, 0x9); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VREFPFM_L_MASK, 0xA); + +func_en: + rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, + B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN | + B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN | + B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN | + B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN | + B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN | + B_AX_DMACREG_GCKEN); + rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN, + B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | + B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | + B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN | B_AX_TMAC_EN | + B_AX_RMAC_EN); + + rtw89_write32_mask(rtwdev, R_AX_EECS_EESK_FUNC_SEL, + B_AX_PINMUX_EESK_FUNC_SEL_MASK, 0x1); + + return 0; +} + +static int rtw8852bt_pwr_off_func(struct rtw89_dev *rtwdev) +{ + u32 val32; + u32 ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, + XTAL_SI_RFC2RF); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC, + XTAL_SI_SRAM2RFC); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI); + if (ret) + return ret; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI); + if (ret) + return ret; + + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB); + rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3); + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL); + if (ret) + return ret; + + rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3); + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL); + if (ret) + return ret; + + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC); + + ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC), + 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL); + if (ret) + return ret; + + rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION); + rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x3); + rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS); + + return 0; +} + +static void rtw8852bt_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band, + enum rtw89_phy_idx phy_idx, bool en) +{ + if (en) { + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, + B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, + B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); + if (band == RTW89_BAND_2G) + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, + B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, + B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + fsleep(1); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx); + } +} + +static void rtw8852bt_bb_reset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x1); + rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI, 0x1); + rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); + rtw8852bx_bb_reset_all(rtwdev, phy_idx); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 3); + rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + B_P1_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI, 0x3); + rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); +} + +static void rtw8852bt_set_channel(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_mac_idx mac_idx, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx); + rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx); + rtw8852bt_set_channel_rf(rtwdev, chan, phy_idx); +} + +static void rtw8852bt_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, + enum rtw89_rf_path path) +{ + static const u32 tssi_trk[2] = {R_P0_TSSI_TRK, R_P1_TSSI_TRK}; + + if (en) + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x0); + else + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], B_P0_TSSI_TRK_EN, 0x1); +} + +static void rtw8852bt_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, + u8 phy_idx, const struct rtw89_chan *chan) +{ + if (!rtwdev->dbcc_en) { + rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A); + rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B); + rtw8852bt_tssi_scan(rtwdev, phy_idx, chan); + } else { + if (phy_idx == RTW89_PHY_0) + rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A); + else + rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B); + } +} + +static void rtw8852bt_adc_en(struct rtw89_dev *rtwdev, bool en) +{ + if (en) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0xf); +} + +static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter, + struct rtw89_channel_help_params *p, + const struct rtw89_chan *chan, + enum rtw89_mac_idx mac_idx, + enum rtw89_phy_idx phy_idx) +{ + if (enter) { + rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL); + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); + rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0, chan); + rtw8852bt_adc_en(rtwdev, false); + fsleep(40); + rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, false); + } else { + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); + rtw8852bt_adc_en(rtwdev, true); + rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0, chan); + rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, true); + rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en); + } +} + +static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev) +{ + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; + + rtw8852bt_dpk_init(rtwdev); + rtw8852bt_rck(rtwdev); + rtw8852bt_dack(rtwdev, RTW89_CHANCTX_0); + rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0); +} + +static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; + + rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx); + rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx); + rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx); +} + +static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) +{ + rtw8852bt_tssi_scan(rtwdev, phy_idx, chan); +} + +static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) +{ + rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx); +} + +static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev) +{ + rtw8852bt_dpk_track(rtwdev); +} + +static void rtw8852bt_btc_set_rfe(struct rtw89_dev *rtwdev) +{ + const struct rtw89_btc_ver *ver = rtwdev->btc.ver; + union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo; + + if (ver->fcxinit == 7) { + md->md_v7.rfe_type = rtwdev->efuse.rfe_type; + md->md_v7.kt_ver = rtwdev->hal.cv; + md->md_v7.kt_ver_adie = rtwdev->hal.acv; + md->md_v7.bt_solo = 0; + md->md_v7.bt_pos = BTC_BT_BTG; + md->md_v7.switch_type = BTC_SWITCH_INTERNAL; + md->md_v7.wa_type = 0; + + md->md_v7.ant.type = BTC_ANT_SHARED; + md->md_v7.ant.num = 2; + md->md_v7.ant.isolation = 10; + md->md_v7.ant.diversity = 0; + /* WL 1-stream+1-Ant is located at 0:s0(path-A) or 1:s1(path-B) */ + md->md_v7.ant.single_pos = RF_PATH_A; + md->md_v7.ant.btg_pos = RF_PATH_B; + + if (md->md_v7.rfe_type == 0) { + rtwdev->btc.dm.error.map.rfe_type0 = true; + return; + } + + md->md_v7.ant.num = (md->md_v7.rfe_type % 2) ? 2 : 3; + md->md_v7.ant.stream_cnt = 2; + md->md_v7.wa_type |= BTC_WA_INIT_SCAN; + + if (md->md_v7.ant.num == 2) { + md->md_v7.ant.type = BTC_ANT_SHARED; + md->md_v7.bt_pos = BTC_BT_BTG; + md->md_v7.wa_type |= BTC_WA_HFP_LAG; + } else { + md->md_v7.ant.type = BTC_ANT_DEDICATED; + md->md_v7.bt_pos = BTC_BT_ALONE; + } + } else { + return; + } +} + +static void +rtw8852bt_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) +{ + u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0)); + u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16)); + + switch (ctrl_all_time) { + case 0xffff: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL, + B_AX_FORCE_PWR_BY_RATE_EN, 0x0); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL, + B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, 0x0); + break; + default: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL, + B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, + ctrl_all_time); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_RATE_CTRL, + B_AX_FORCE_PWR_BY_RATE_EN, 0x1); + break; + } + + switch (ctrl_gnt_bt) { + case 0xffff: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL, + B_AX_TXAGC_BT_EN, 0x0); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL, + B_AX_TXAGC_BT_MASK, 0x0); + break; + default: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL, + B_AX_TXAGC_BT_MASK, ctrl_gnt_bt); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_AX_PWR_COEXT_CTRL, + B_AX_TXAGC_BT_EN, 0x1); + break; + } +} + +static const struct rtw89_chip_ops rtw8852bt_chip_ops = { + .enable_bb_rf = rtw8852bx_mac_enable_bb_rf, + .disable_bb_rf = rtw8852bx_mac_disable_bb_rf, + .bb_preinit = NULL, + .bb_postinit = NULL, + .bb_reset = rtw8852bt_bb_reset, + .bb_sethw = rtw8852bx_bb_sethw, + .read_rf = rtw89_phy_read_rf_v1, + .write_rf = rtw89_phy_write_rf_v1, + .set_channel = rtw8852bt_set_channel, + .set_channel_help = rtw8852bt_set_channel_help, + .read_efuse = rtw8852bx_read_efuse, + .read_phycap = rtw8852bx_read_phycap, + .fem_setup = NULL, + .rfe_gpio = NULL, + .rfk_hw_init = NULL, + .rfk_init = rtw8852bt_rfk_init, + .rfk_init_late = NULL, + .rfk_channel = rtw8852bt_rfk_channel, + .rfk_band_changed = rtw8852bt_rfk_band_changed, + .rfk_scan = rtw8852bt_rfk_scan, + .rfk_track = rtw8852bt_rfk_track, + .power_trim = rtw8852bx_power_trim, + .set_txpwr = rtw8852bx_set_txpwr, + .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl, + .init_txpwr_unit = rtw8852bx_init_txpwr_unit, + .get_thermal = rtw8852bx_get_thermal, + .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, + .query_ppdu = rtw8852bx_query_ppdu, + .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi, + .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, + .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, + .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, + .digital_pwr_comp = NULL, + .pwr_on_func = rtw8852bt_pwr_on_func, + .pwr_off_func = rtw8852bt_pwr_off_func, + .query_rxdesc = rtw89_core_query_rxdesc, + .fill_txdesc = rtw89_core_fill_txdesc, + .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, + .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, + .mac_cfg_gnt = rtw89_mac_cfg_gnt, + .stop_sch_tx = rtw89_mac_stop_sch_tx, + .resume_sch_tx = rtw89_mac_resume_sch_tx, + .h2c_dctl_sec_cam = NULL, + .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, + .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, + .h2c_ampdu_cmac_tbl = NULL, + .h2c_default_dmac_tbl = NULL, + .h2c_update_beacon = rtw89_fw_h2c_update_beacon, + .h2c_ba_cam = rtw89_fw_h2c_ba_cam, + + .btc_set_rfe = rtw8852bt_btc_set_rfe, + .btc_init_cfg = rtw8852bx_btc_init_cfg, + .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri, + .btc_set_wl_txpwr_ctrl = rtw8852bt_btc_set_wl_txpwr_ctrl, + .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi, + .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby, + .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain, + .btc_set_policy = rtw89_btc_set_policy_v1, +}; + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support rtw_wowlan_stub_8852bt = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = RTW89_MAX_PATTERN_NUM, + .pattern_max_len = RTW89_MAX_PATTERN_SIZE, + .pattern_min_len = 1, +}; +#endif + +const struct rtw89_chip_info rtw8852bt_chip_info = { + .chip_id = RTL8852BT, + .chip_gen = RTW89_CHIP_AX, + .ops = &rtw8852bt_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, + .fw_basename = RTW8852BT_FW_BASENAME, + .fw_format_max = RTW8852BT_FW_FORMAT_MAX, + .try_ce_fw = true, + .bbmcu_nr = 0, + .needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ, + .fifo_size = 458752, + .small_fifo_size = true, + .dle_scc_rsvd_size = 98304, + .max_amsdu_limit = 5000, + .dis_2g_40m_ul_ofdma = true, + .rsvd_ple_ofst = 0x6f800, + .hfc_param_ini = rtw8852bt_hfc_param_ini_pcie, + .dle_mem = rtw8852bt_dle_mem_pcie, + .wde_qempty_acq_grpnum = 4, + .wde_qempty_mgq_grpsel = 4, + .rf_base_addr = {0xe000, 0xf000}, + .pwr_on_seq = NULL, + .pwr_off_seq = NULL, + .bb_table = NULL, + .bb_gain_table = NULL, + .rf_table = {}, + .nctl_table = NULL, + .nctl_post_table = NULL, + .dflt_parms = NULL, + .rfe_parms_conf = NULL, + .txpwr_factor_rf = 2, + .txpwr_factor_mac = 1, + .dig_table = NULL, + .dig_regs = &rtw8852bt_dig_regs, + .tssi_dbw_table = NULL, + .support_macid_num = RTW89_MAX_MAC_ID_NUM, + .support_link_num = 0, + .support_chanctx_num = 1, + .support_rnr = false, + .support_bands = BIT(NL80211_BAND_2GHZ) | + BIT(NL80211_BAND_5GHZ), + .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), + .support_unii4 = true, + .ul_tb_waveform_ctrl = true, + .ul_tb_pwr_diff = false, + .hw_sec_hdr = false, + .hw_mgmt_tx_encrypt = false, + .rf_path_num = 2, + .tx_nss = 2, + .rx_nss = 2, + .acam_num = 128, + .bcam_num = 10, + .scam_num = 128, + .bacam_num = 2, + .bacam_dynamic_num = 4, + .bacam_ver = RTW89_BACAM_V0, + .ppdu_max_usr = 4, + .sec_ctrl_efuse_size = 4, + .physical_efuse_size = 1216, + .logical_efuse_size = 2048, + .limit_efuse_size = 1280, + .dav_phy_efuse_size = 96, + .dav_log_efuse_size = 16, + .efuse_blocks = NULL, + .phycap_addr = 0x580, + .phycap_size = 128, + .para_ver = 0, + .wlcx_desired = 0x070e0000, + .btcx_desired = 0x7, + .scbd = 0x1, + .mailbox = 0x1, + + .afh_guard_ch = 6, + .wl_rssi_thres = rtw89_btc_8852bt_wl_rssi_thres, + .bt_rssi_thres = rtw89_btc_8852bt_bt_rssi_thres, + .rssi_tol = 2, + .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852bt_mon_reg), + .mon_reg = rtw89_btc_8852bt_mon_reg, + .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_ul), + .rf_para_ulink = rtw89_btc_8852bt_rf_ul, + .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852bt_rf_dl), + .rf_para_dlink = rtw89_btc_8852bt_rf_dl, + .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) | + BIT(RTW89_PS_MODE_CLK_GATED) | + BIT(RTW89_PS_MODE_PWR_GATED), + .low_power_hci_modes = 0, + .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD, + .hci_func_en_addr = R_AX_HCI_FUNC_EN, + .h2c_desc_size = sizeof(struct rtw89_txwd_body), + .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), + .h2c_ctrl_reg = R_AX_H2CREG_CTRL, + .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, + .h2c_regs = rtw8852bt_h2c_regs, + .c2h_ctrl_reg = R_AX_C2HREG_CTRL, + .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, + .c2h_regs = rtw8852bt_c2h_regs, + .page_regs = &rtw8852bt_page_regs, + .wow_reason_reg = rtw8852bt_wow_wakeup_regs, + .cfo_src_fd = true, + .cfo_hw_comp = true, + .dcfo_comp = &rtw8852bt_dcfo_comp, + .dcfo_comp_sft = 10, + .imr_info = &rtw8852bt_imr_info, + .imr_dmac_table = NULL, + .imr_cmac_table = NULL, + .rrsr_cfgs = &rtw8852bt_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, + .bss_clr_map_reg = R_BSS_CLR_MAP_V1, + .rfkill_init = &rtw8852bt_rfkill_regs, + .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9}, + .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | + BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | + BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI), + .edcca_regs = &rtw8852bt_edcca_regs, +#ifdef CONFIG_PM + .wowlan_stub = &rtw_wowlan_stub_8852bt, +#endif + .xtal_info = NULL, +}; +EXPORT_SYMBOL(rtw8852bt_chip_info); + +MODULE_FIRMWARE(RTW8852BT_MODULE_FIRMWARE); +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BT driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h index 6177f36ad667b8..b76b36aaf02509 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h @@ -10,4 +10,6 @@ #define RF_PATH_NUM_8852BT 2 #define BB_PATH_NUM_8852BT 2 +extern const struct rtw89_chip_info rtw8852bt_chip_info; + #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c index fa0e49d581126d..336a83e1d46be9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c @@ -1525,9 +1525,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u lok_result, txk_result, rxk_result); } -static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path) +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 get_empty_table = false; u32 reg_rf18; @@ -1755,12 +1756,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx } static void _doiqk(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR]; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); @@ -1770,7 +1772,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, iqk_info->version = RTW8852BT_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, backup_bb_val); _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); @@ -1785,20 +1787,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force, + enum rtw89_chanctx_idx chanctx_idx) { u8 kpath = _kpath(rtwdev, phy_idx); switch (kpath) { case RF_A: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); break; case RF_B: - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; case RF_AB: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; default: break; @@ -1824,7 +1827,7 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o BIT(24), val); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, - kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable"); + kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse)); } static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, @@ -1863,7 +1866,7 @@ static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, id == 0x14 ? "PWR_CAL" : id == 0x15 ? "DPK_RXAGC" : id == 0x16 ? "KIP_PRESET" : - id == 0x17 ? "KIP_RESOTRE" : + id == 0x17 ? "KIP_RESTORE" : "DPK_TXAGC", dpk_cmd); } @@ -1879,9 +1882,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 kidx = dpk->cur_idx[path]; @@ -2277,9 +2280,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, u8 init_txagc, - bool loss_only) + bool loss_only, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0; u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0; @@ -2504,9 +2507,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 idx, cur_band, cur_ch; bool is_reload = false; @@ -2549,7 +2552,8 @@ void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool i } static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, u8 gain) + enum rtw89_rf_path path, u8 gain, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 txagc = 0x38, kidx = dpk->cur_idx[path]; @@ -2569,7 +2573,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, _dpk_kip_set_rxagc(rtwdev, phy, path); _dpk_table_select(rtwdev, path, kidx, gain); - txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx); _rfk_get_thermal(rtwdev, kidx, path); @@ -2601,7 +2605,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _dpk_cal_select(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, u8 kpath) + enum rtw89_phy_idx phy, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u32 backup_kip_val[BACKUP_KIP_REGS_NR]; @@ -2611,7 +2616,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, u8 path; for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { - reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx); if (!reloaded[path] && dpk->bp[path][0].ch != 0) dpk->cur_idx[path] = !dpk->cur_idx[path]; else @@ -2623,7 +2628,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); - _dpk_information(rtwdev, phy, path); + _dpk_information(rtwdev, phy, path, chanctx_idx); if (rtwdev->is_tssi_mode[path]) _dpk_tssi_pause(rtwdev, path, true); } @@ -2631,7 +2636,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, _rfk_bb_afe_setting(rtwdev, phy, path, kpath); for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) - _dpk_main(rtwdev, phy, path, 1); + _dpk_main(rtwdev, phy, path, 1, chanctx_idx); _rfk_bb_afe_restore(rtwdev, phy, path, kpath); @@ -2646,9 +2651,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, } } -static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_fem_info *fem = &rtwdev->fem; if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { @@ -2817,9 +2823,8 @@ static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (band == RTW89_BAND_2G) @@ -2829,9 +2834,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl); @@ -2878,7 +2882,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define RTW8852BT_TSSI_GET_VAL(ptr, idx) \ ({ \ @@ -2893,7 +2897,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph }) struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; @@ -3047,9 +3050,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx } static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (path == RF_PATH_A) @@ -3063,9 +3065,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path, bool all) + enum rtw89_rf_path path, bool all, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; const struct rtw89_rfk_tbl *tbl = NULL; u8 ch = chan->channel; @@ -3310,10 +3312,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 gidx, gidx_1st, gidx_2nd; s8 de_1st; @@ -3346,10 +3347,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u32 tgidx, tgidx_1st, tgidx_2nd; s8 tde_1st; @@ -3383,10 +3383,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph return val; } -static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 gidx; s8 ofdm_de; @@ -3399,7 +3399,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3415,8 +3415,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK)); - ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3463,10 +3463,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p } static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, enum rtw89_rf_path path) + enum rtw89_phy_idx phy, enum rtw89_rf_path path, + const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 channel = chan->channel; u8 band; @@ -3500,7 +3500,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm, - u8 enable) + u8 enable, const struct rtw89_chan *chan) { enum rtw89_rf_path_bit rx_path; @@ -3516,11 +3516,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, if (enable) { rtw8852bx_bb_set_plcp_tx(rtwdev); rtw8852bx_bb_cfg_tx_path(rtwdev, path); - rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan); rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); } - rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan); } static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, @@ -3574,7 +3574,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel) static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const s16 *power, - u32 *tssi_cw_rpt) + u32 *tssi_cw_rpt, const struct rtw89_chan *chan) { u32 tx_counter, tx_counter_tmp; const int retry = 100; @@ -3593,9 +3593,11 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy _tssi_trigger[path], tmp, path); if (j == 0) - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, + chan); else - _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true); + _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true, + chan); tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); tx_counter_tmp -= tx_counter; @@ -3626,7 +3628,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n", k, path); - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan); return false; } @@ -3634,7 +3636,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT); - _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan); tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); tx_counter_tmp -= tx_counter; @@ -3648,14 +3650,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4, 0x78e4, 0x49c0, 0x0d18, 0x0d80}; static const s16 power_2g[4] = {48, 20, 4, -8}; static const s16 power_5g[4] = {48, 20, 4, 4}; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3; u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {}; u8 channel = chan->channel; @@ -3701,7 +3702,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); - ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt); + ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan); if (!ok) goto out; @@ -3833,18 +3834,19 @@ void rtw8852bt_rck(struct rtw89_dev *rtwdev) _rck(rtwdev, path); } -void rtw8852bt_dack(struct rtw89_dev *rtwdev) +void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); _dac_cal(rtwdev, false); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } -void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); @@ -3852,15 +3854,16 @@ void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _iqk_init(rtwdev); - _iqk(rtwdev, phy_idx, false); + _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); } -void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); @@ -3873,15 +3876,16 @@ void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } -void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER); - if (_dpk_bypass_check(rtwdev, phy_idx)) + if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx)) _dpk_force_bypass(rtwdev, phy_idx); else - _dpk_cal_select(rtwdev, phy_idx, RF_AB); + _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx); } void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev) @@ -3889,10 +3893,12 @@ void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev) _dpk_track(rtwdev); } -void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B}; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx); u32 reg_backup[2] = {}; u32 tx_en; u8 i; @@ -3905,36 +3911,36 @@ void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_ _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); _tssi_set_dck(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_set_dac_gain_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_alignment_default(rtwdev, phy, i, true, chan); _tssi_set_tssi_slope(rtwdev, phy, i); rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); _tmac_tx_pause(rtwdev, phy, true); if (hwtx_en) - _tssi_alimentk(rtwdev, phy, i); + _tssi_alimentk(rtwdev, phy, i, chan); _tmac_tx_pause(rtwdev, phy, false); rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 channel = chan->channel; u8 band; @@ -3957,24 +3963,25 @@ void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) { - _tssi_rf_setting(rtwdev, phy, i); - _tssi_set_sys(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_rf_setting(rtwdev, phy, i, chan); + _tssi_set_sys(rtwdev, phy, i, chan); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); if (tssi_info->alignment_done[i][band]) - _tssi_alimentk_done(rtwdev, phy, i); + _tssi_alimentk_done(rtwdev, phy, i, chan); else - _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_alignment_default(rtwdev, phy, i, true, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, bool enable) + enum rtw89_phy_idx phy, bool enable, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 channel = chan->channel; rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", @@ -3996,8 +4003,8 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev, rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); - _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); - _tssi_alimentk_done(rtwdev, phy, RF_PATH_B); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan); rtw89_debug(rtwdev, RTW89_DBG_RFK, "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", @@ -4010,10 +4017,237 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev, } void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { if (scan_start) - rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true); + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx); else - rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false); + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx); +} + +static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + enum rtw89_bandwidth bw, bool dav) +{ + u32 rf_reg18; + u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); + if (rf_reg18 == INV_RF_DATA) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]Invalid RF_0x18 for Path-%d\n", path); + return; + } + rf_reg18 &= ~RR_CFGCH_BW; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); + break; + case RTW89_CHANNEL_WIDTH_40: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); + break; + case RTW89_CHANNEL_WIDTH_80: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n"); + } + + rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | + RR_CFGCH_BW2) & RFREG_MASK; + rf_reg18 |= RR_CFGCH_BW2; + rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n", + bw, path, reg18_addr, + rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); +} + +static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + _bw_setting(rtwdev, RF_PATH_A, bw, true); + _bw_setting(rtwdev, RF_PATH_B, bw, true); + _bw_setting(rtwdev, RF_PATH_A, bw, false); + _bw_setting(rtwdev, RF_PATH_B, bw, false); +} + +static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) +{ + u32 tmp; + int ret; + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val); + + ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, + false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n"); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); + return !!ret; +} + +static void _lck_check(struct rtw89_dev *rtwdev) +{ + u32 tmp; + + if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n"); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0); + } + + udelay(10); + + if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n"); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + _set_s0_arfc18(rtwdev, tmp); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); + } + + if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n"); + + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp); + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + _set_s0_arfc18(rtwdev, tmp); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n", + rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK), + rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK)); + } +} + +static void _set_ch(struct rtw89_dev *rtwdev, u32 val) +{ + bool timeout; + u32 bak; + + bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1); + timeout = _set_s0_arfc18(rtwdev, val); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak); + if (!timeout) + _lck_check(rtwdev); +} + +static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 central_ch, bool dav) +{ + u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; + bool is_2g_ch = central_ch <= 14; + u32 rf_reg18; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__); + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK); + rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | + RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); + rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); + + if (!is_2g_ch) + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | + FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); + + rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | + RR_CFGCH_BW2) & RFREG_MASK; + rf_reg18 |= RR_CFGCH_BW2; + + if (path == RF_PATH_A && dav) + _set_ch(rtwdev, rf_reg18); + else + rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18); + + rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0); + rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n", + central_ch, path, reg18_addr, + rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK)); +} + +static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) +{ + _ch_setting(rtwdev, RF_PATH_A, central_ch, true); + _ch_setting(rtwdev, RF_PATH_B, central_ch, true); + _ch_setting(rtwdev, RF_PATH_A, central_ch, false); + _ch_setting(rtwdev, RF_PATH_B, central_ch, false); +} + +static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12); + + if (bw == RTW89_CHANNEL_WIDTH_20) + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b); + else if (bw == RTW89_CHANNEL_WIDTH_40) + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13); + else if (bw == RTW89_CHANNEL_WIDTH_80) + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb); + else + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", + path, rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB)); + + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); +} + +static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + u8 kpath, path; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + if (!(kpath & BIT(path))) + continue; + + _set_rxbb_bw(rtwdev, bw, path); + } +} + +static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 central_ch, + enum rtw89_band band, enum rtw89_bandwidth bw) +{ + _ctrl_ch(rtwdev, central_ch); + _ctrl_bw(rtwdev, phy, bw); + _rxbb_bw(rtwdev, phy, bw); +} + +void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type, + chan->band_width); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h index 09918835c6e85d..e34560b4905f8e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h @@ -8,15 +8,24 @@ #include "core.h" void rtw8852bt_rck(struct rtw89_dev *rtwdev); -void rtw8852bt_dack(struct rtw89_dev *rtwdev); -void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev); -void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev); -void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en); -void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan); void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c new file mode 100644 index 00000000000000..70294811964686 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include +#include + +#include "pci.h" +#include "reg.h" +#include "rtw8852bt.h" + +static const struct rtw89_pci_info rtw8852bt_pci_info = { + .gen_def = &rtw89_pci_gen_ax, + .txbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_mode = MAC_AX_RXBD_PKT, + .tag_mode = MAC_AX_TAG_MULTI, + .tx_burst = MAC_AX_TX_BURST_2048B, + .rx_burst = MAC_AX_RX_BURST_128B, + .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .multi_tag_num = MAC_AX_TAG_NUM_8, + .lbc_en = MAC_AX_PCIE_ENABLE, + .lbc_tmr = MAC_AX_LBC_TMR_2MS, + .autok_en = MAC_AX_PCIE_DISABLE, + .io_rcy_en = MAC_AX_PCIE_DISABLE, + .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS, + .rx_ring_eq_is_full = false, + .check_rx_tag = false, + + .init_cfg_reg = R_AX_PCIE_INIT_CFG1, + .txhci_en_bit = B_AX_TXHCI_EN, + .rxhci_en_bit = B_AX_RXHCI_EN, + .rxbd_mode_bit = B_AX_RXBD_MODE, + .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL, + .max_tag_num_mask = B_AX_MAX_TAG_NUM, + .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR, + .txbd_rwptr_clr2_reg = 0, + .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO}, + .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1}, + .dma_stop2 = {0}, + .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1}, + .dma_busy2_reg = 0, + .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1, + + .rpwm_addr = R_AX_PCIE_HRPWM, + .cpwm_addr = R_AX_CPWM, + .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, + .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) | + BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) | + BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11), + .bd_idx_addr_low_power = NULL, + .dma_addr_set = &rtw89_pci_ch_dma_addr_set, + .bd_ram_table = &rtw89_bd_ram_table_single, + + .ltr_set = rtw89_pci_ltr_set, + .fill_txaddr_info = rtw89_pci_fill_txaddr_info, + .config_intr_mask = rtw89_pci_config_intr_mask, + .enable_intr = rtw89_pci_enable_intr, + .disable_intr = rtw89_pci_disable_intr, + .recognize_intrs = rtw89_pci_recognize_intrs, +}; + +static const struct rtw89_driver_info rtw89_8852bte_info = { + .chip = &rtw8852bt_chip_info, + .quirks = NULL, + .bus = { + .pci = &rtw8852bt_pci_info, + }, +}; + +static const struct pci_device_id rtw89_8852bte_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb520), + .driver_data = (kernel_ulong_t)&rtw89_8852bte_info, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, rtw89_8852bte_id_table); + +static struct pci_driver rtw89_8852bte_driver = { + .name = "rtw89_8852bte", + .id_table = rtw89_8852bte_id_table, + .probe = rtw89_pci_probe, + .remove = rtw89_pci_remove, + .driver.pm = &rtw89_pm_ops, +}; +module_pci_driver(rtw89_8852bte_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE-VT driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 193168dc7b6c35..1c6e89ab0f4bcb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -14,10 +14,10 @@ #include "rtw8852c_table.h" #include "util.h" -#define RTW8852C_FW_FORMAT_MAX 0 +#define RTW8852C_FW_FORMAT_MAX 1 #define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw" #define RTW8852C_MODULE_FIRMWARE \ - RTW8852C_FW_BASENAME ".bin" + RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin" static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = { {13, 1614, grp_0}, /* ACH 0 */ @@ -147,6 +147,15 @@ static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = { .rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2}, }; +static const struct rtw89_rfkill_regs rtw8852c_rfkill_regs = { + .pinmux = {R_AX_GPIO8_15_FUNC_SEL, + B_AX_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_AX_GPIO_EXT_CTRL + 2, + (B_AX_GPIO_MOD_9 | B_AX_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + static const struct rtw89_dig_regs rtw8852c_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, @@ -1808,7 +1817,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, RTW89_SCH_TX_SEL_ALL); rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false); rtw8852c_dfs_en(rtwdev, false); - rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx); + rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx, chan); rtw8852c_adc_en(rtwdev, false); fsleep(40); rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false); @@ -1816,7 +1825,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true); rtw8852c_adc_en(rtwdev, true); rtw8852c_dfs_en(rtwdev, true); - rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx); + rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx, chan); rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true); rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en); } @@ -1833,31 +1842,34 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) rtw8852c_dpk_init(rtwdev); rtw8852c_rck(rtwdev); - rtw8852c_dack(rtwdev); + rtw8852c_dack(rtwdev, RTW89_CHANCTX_0); rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false); } -static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) +static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; rtw8852c_mcc_get_ch_info(rtwdev, phy_idx); rtw8852c_rx_dck(rtwdev, phy_idx, false); - rtw8852c_iqk(rtwdev, phy_idx); - rtw8852c_tssi(rtwdev, phy_idx); - rtw8852c_dpk(rtwdev, phy_idx); + rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx); + rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx); + rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - rtw8852c_tssi_scan(rtwdev, phy_idx); + rtw8852c_tssi_scan(rtwdev, phy_idx, chan); } -static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) { - rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); + rtw8852c_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx); } static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev) @@ -2108,7 +2120,7 @@ rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); u8 band = chan->band_type; u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; @@ -2840,10 +2852,12 @@ static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = { - .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, .n_patterns = RTW89_MAX_PATTERN_NUM, .pattern_max_len = RTW89_MAX_PATTERN_SIZE, .pattern_min_len = 1, + .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID, }; #endif @@ -2876,9 +2890,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .get_thermal = rtw8852c_get_thermal, .ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx, .query_ppdu = rtw8852c_query_ppdu, + .convert_rpl_to_rssi = NULL, .ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, + .digital_pwr_comp = NULL, .pwr_on_func = rtw8852c_pwr_on_func, .pwr_off_func = rtw8852c_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc, @@ -2946,6 +2962,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dig_regs = &rtw8852c_dig_regs, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, .support_macid_num = RTW89_MAX_MAC_ID_NUM, + .support_link_num = 0, .support_chanctx_num = 2, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2959,6 +2976,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = true, .hw_sec_hdr = true, + .hw_mgmt_tx_encrypt = true, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, @@ -3022,6 +3040,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .rrsr_cfgs = &rtw8852c_rrsr_cfgs, .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, + .rfkill_init = &rtw8852c_rfkill_regs, + .rfkill_get = {R_AX_GPIO_EXT_CTRL, B_AX_GPIO_IN_9}, .dma_ch_mask = 0, .edcca_regs = &rtw8852c_edcca_regs, #ifdef CONFIG_PM diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 743f7014bf3e88..211c051c296762 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -5,6 +5,7 @@ #include "chan.h" #include "coex.h" #include "debug.h" +#include "fw.h" #include "phy.h" #include "reg.h" #include "rtw8852c.h" @@ -584,11 +585,12 @@ static void _drck(struct rtw89_dev *rtwdev) rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); } -static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +static void _dac_cal(struct rtw89_dev *rtwdev, bool force, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 rf0_0, rf1_0; - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx); dack->dack_done = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); @@ -1321,9 +1323,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u } static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy, u8 path) + enum rtw89_phy_idx phy, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); @@ -1516,12 +1519,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev) } static void _doiqk(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy_idx, u8 path) + enum rtw89_phy_idx phy_idx, u8 path, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR]; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); @@ -1531,7 +1535,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, iqk_info->version = RTW8852C_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); - _iqk_get_ch_info(rtwdev, phy_idx, path); + _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, backup_bb_val); _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); _iqk_macbb_setting(rtwdev, phy_idx, path); @@ -1544,18 +1548,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force, rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } -static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force, + enum rtw89_chanctx_idx chanctx_idx) { switch (_kpath(rtwdev, phy_idx)) { case RF_A: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); break; case RF_B: - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; case RF_AB: - _doiqk(rtwdev, force, phy_idx, RF_PATH_A); - _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; default: break; @@ -1901,9 +1906,9 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 kidx = dpk->cur_idx[path]; @@ -2495,9 +2500,9 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_dpk_info *dpk = &rtwdev->dpk; bool is_reload = false; u8 idx, cur_band, cur_ch; @@ -2689,7 +2694,8 @@ static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_byb } static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, - enum rtw89_phy_idx phy, u8 kpath) + enum rtw89_phy_idx phy, u8 kpath, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8}; @@ -2705,7 +2711,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, if (!(kpath & BIT(path))) continue; - reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + reloaded[path] = _dpk_reload_check(rtwdev, phy, path, + chanctx_idx); if (!reloaded[path] && dpk->bp[path][0].ch != 0) dpk->cur_idx[path] = !dpk->cur_idx[path]; else @@ -2722,7 +2729,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, path, dpk->cur_idx[path]); _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); - _dpk_information(rtwdev, phy, path); + _dpk_information(rtwdev, phy, path, chanctx_idx); _dpk_init(rtwdev, path); if (rtwdev->is_tssi_mode[path]) _dpk_tssi_pause(rtwdev, path, true); @@ -2755,10 +2762,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, _dpk_kip_pwr_clk_onoff(rtwdev, false); } -static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_fem_info *fem = &rtwdev->fem; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 band = chan->band_type; if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) { @@ -2790,17 +2798,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) } } -static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, + enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", RTW8852C_DPK_VER, rtwdev->hal.cv, RTW8852C_RF_REL_VERSION); - if (_dpk_bypass_check(rtwdev, phy)) + if (_dpk_bypass_check(rtwdev, phy, chanctx_idx)) _dpk_force_bypass(rtwdev, phy); else - _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx); if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1) rtw8852c_rx_dck(rtwdev, phy, false); @@ -2891,9 +2900,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev) } static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_bandwidth bw = chan->band_width; enum rtw89_band band = chan->band_type; u32 clk = 0x0; @@ -2945,9 +2953,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, } static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (path == RF_PATH_A) { @@ -2972,7 +2979,7 @@ static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define RTW8852C_TSSI_GET_VAL(ptr, idx) \ ({ \ @@ -2985,8 +2992,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph } \ __val; \ }) + struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; @@ -3001,56 +3008,88 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph switch (subband) { default: case RTW89_CH_2G: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_2ga_p; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_2ga_n; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_2gb_p; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_2gb_n; break; case RTW89_CH_5G_BAND_1: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0]; break; case RTW89_CH_5G_BAND_3: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1]; break; case RTW89_CH_5G_BAND_4: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] : + rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] : + rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2]; break; case RTW89_CH_6G_BAND_IDX0: case RTW89_CH_6G_BAND_IDX1: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0]; break; case RTW89_CH_6G_BAND_IDX2: case RTW89_CH_6G_BAND_IDX3: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1]; break; case RTW89_CH_6G_BAND_IDX4: case RTW89_CH_6G_BAND_IDX5: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2]; break; case RTW89_CH_6G_BAND_IDX6: case RTW89_CH_6G_BAND_IDX7: - thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3]; - thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3]; - thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3]; - thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3]; + thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3]; + thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] : + rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3]; + thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3]; + thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] : + rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3]; break; } @@ -3158,9 +3197,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph } static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; if (path == RF_PATH_A) { @@ -3175,9 +3213,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy } static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; const struct rtw89_rfk_tbl *tbl; @@ -3586,10 +3624,9 @@ static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch) } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; u8 ch = chan->channel; u32 gidx, gidx_1st, gidx_2nd; @@ -3650,10 +3687,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); enum rtw89_band band = chan->band_type; u8 ch = chan->channel; u32 tgidx, tgidx_1st, tgidx_2nd; @@ -3715,10 +3751,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, } static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy) + enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 ch = chan->channel; u8 gidx; s8 ofdm_de; @@ -3741,7 +3776,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, for (i = path; i < path_max; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3757,8 +3792,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK)); - ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); - trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, @@ -3781,7 +3816,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, } static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, - enum rtw89_rf_path path) + enum rtw89_rf_path path, const struct rtw89_chan *chan) { static const u32 tssi_trk[2] = {0x5818, 0x7818}; static const u32 tssi_en[2] = {0x5820, 0x7820}; @@ -3790,25 +3825,26 @@ static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0); rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0); if (rtwdev->dbcc_en && path == RF_PATH_B) - _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1); + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan); else - _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0); + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan); } else { rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1); rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1); } } -void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx) +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx, + const struct rtw89_chan *chan) { if (!rtwdev->dbcc_en) { - rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); - rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan); } else { if (phy_idx == RTW89_PHY_0) - rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan); else - rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan); } } @@ -4079,10 +4115,10 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i mode = rtw89_get_entity_mode(rtwdev); switch (mode) { case RTW89_ENTITY_MODE_MCC_PREPARE: - chan_idx = RTW89_SUB_ENTITY_1; + chan_idx = RTW89_CHANCTX_1; break; default: - chan_idx = RTW89_SUB_ENTITY_0; + chan_idx = RTW89_CHANCTX_0; break; } @@ -4112,26 +4148,27 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev) _rck(rtwdev, path); } -void rtw8852c_dack(struct rtw89_dev *rtwdev) +void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx) { - u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); - _dac_cal(rtwdev, false); + _dac_cal(rtwdev, false, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } -void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _iqk_init(rtwdev); - _iqk(rtwdev, phy_idx, false); + _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); @@ -4202,10 +4239,11 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck; enum rtw89_phy_idx phy_idx = RTW89_PHY_0; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); u8 dck_channel; u8 cur_thermal; u32 tx_en; @@ -4259,16 +4297,17 @@ void rtw8852c_dpk_init(struct rtw89_dev *rtwdev) dpk->is_dpk_reload_en = false; } -void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); - _dpk(rtwdev, phy_idx, false); + _dpk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); @@ -4279,8 +4318,10 @@ void rtw8852c_dpk_track(struct rtw89_dev *rtwdev) _dpk_track(rtwdev); } -void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); @@ -4298,23 +4339,24 @@ void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = path; i < path_max; i++) { - _tssi_set_sys(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); - _tssi_set_dck(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i, chan); _tssi_set_bbgain_split(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_set_aligk_default(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_set_aligk_default(rtwdev, phy, i, chan); _tssi_set_slope(rtwdev, phy, i); _tssi_run_slope(rtwdev, phy, i); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } -void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan) { u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; @@ -4339,15 +4381,15 @@ void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) _tssi_disable(rtwdev, phy); for (i = path; i < path_max; i++) { - _tssi_set_sys(rtwdev, phy, i); - _tssi_set_dck(rtwdev, phy, i); - _tssi_set_tmeter_tbl(rtwdev, phy, i); - _tssi_slope_cal_org(rtwdev, phy, i); - _tssi_set_aligk_default(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i, chan); + _tssi_set_dck(rtwdev, phy, i, chan); + _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); + _tssi_slope_cal_org(rtwdev, phy, i, chan); + _tssi_set_aligk_default(rtwdev, phy, i, chan); } _tssi_enable(rtwdev, phy); - _tssi_set_efuse_to_de(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy, chan); } static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev, @@ -4422,7 +4464,7 @@ void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev, dpk->is_dpk_enable = true; for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) _dpk_onoff(rtwdev, path, false); - rtw8852c_dpk(rtwdev, RTW89_PHY_0); + rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0); break; default: break; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 6605137e61aa43..306dd0a0be735e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -9,16 +9,21 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_rck(struct rtw89_dev *rtwdev); -void rtw8852c_dack(struct rtw89_dev *rtwdev); -void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx); +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev); void rtw8852c_dpk_init(struct rtw89_dev *rtwdev); -void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + enum rtw89_chanctx_idx chanctx_idx); void rtw8852c_dpk_track(struct rtw89_dev *rtwdev); -void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); -void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); -void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx); +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_chanctx_idx chanctx_idx); +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + const struct rtw89_chan *chan); +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx, + const struct rtw89_chan *chan); void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, enum rtw89_phy_idx phy_idx); void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 2af568a3264d35..63b1ff2f98ed31 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -13,10 +13,10 @@ #include "rtw8922a_rfk.h" #include "util.h" -#define RTW8922A_FW_FORMAT_MAX 0 +#define RTW8922A_FW_FORMAT_MAX 1 #define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw" #define RTW8922A_MODULE_FIRMWARE \ - RTW8922A_FW_BASENAME ".bin" + RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin" #define HE_N_USER_MAX_8922A 4 @@ -165,6 +165,15 @@ static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = { .rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2}, }; +static const struct rtw89_rfkill_regs rtw8922a_rfkill_regs = { + .pinmux = {R_BE_GPIO8_15_FUNC_SEL, + B_BE_PINMUX_GPIO9_FUNC_SEL_MASK, + 0xf}, + .mode = {R_BE_GPIO_EXT_CTRL + 2, + (B_BE_GPIO_MOD_9 | B_BE_GPIO_IO_SEL_9) >> 16, + 0x0}, +}; + static const struct rtw89_dig_regs rtw8922a_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V2, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, @@ -1680,9 +1689,63 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev, return 0; } +#define DIGITAL_PWR_COMP_REG_NUM 22 +static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = { + {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A, + 0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303, + 0x05030302, 0x06060605, 0x06050300, 0x0A090807, 0x02000B0B, + 0x09080604, 0x0D0D0C0B, 0x08060400, 0x110F0C0B, 0x05001111, + 0x0D0C0907, 0x12121210}, + {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A, + 0x0BB80708, 0x17701194, 0x04030201, 0x05050505, 0x01000505, + 0x07060504, 0x09090908, 0x09070400, 0x0E0D0C0B, 0x03000E0E, + 0x0D0B0907, 0x1010100F, 0x0B080500, 0x1512100D, 0x05001515, + 0x100D0B08, 0x15151512}, +}; + +static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev, + bool enable, u8 nss, + enum rtw89_rf_path path) +{ + static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1}; + const u32 *digital_pwr_comp; + u32 addr, val; + u32 i; + + if (nss == 1) + digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0]; + else + digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1]; + + addr = ltpc_t0[path]; + for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) { + val = enable ? digital_pwr_comp[i] : 0; + rtw89_phy_write32(rtwdev, addr, val); + } +} + +static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + bool enable = chan->band_type != RTW89_BAND_2G; + u8 path; + + if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) { + if (phy_idx == RTW89_PHY_0) + path = RF_PATH_A; + else + path = RF_PATH_B; + rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path); + } else { + rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A); + rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B); + } +} + static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) { rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1); @@ -1801,11 +1864,13 @@ static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev, } static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev, - enum rtw89_mlo_dbcc_mode mode) + enum rtw89_mlo_dbcc_mode mode, + enum rtw89_phy_idx phy_idx) { if (!rtwdev->dbcc_en) return; + rtw8922a_digital_pwr_comp(rtwdev, phy_idx); rtw8922a_ctrl_mlo(rtwdev, mode); } @@ -1912,7 +1977,7 @@ static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter, rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter); if (!enter) { - rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode); + rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode, phy_idx); rtw8922a_post_set_channel_rf(rtwdev, phy_idx); } } @@ -1928,10 +1993,12 @@ static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev) static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5); - rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58); - rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32); + rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, chan, 58); + rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32); } static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) @@ -1953,10 +2020,12 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) } } -static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev) +static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - enum rtw89_phy_idx phy_idx = RTW89_PHY_0; - u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); + enum rtw89_phy_idx phy_idx = rtwvif->phy_idx; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); u32 tx_en; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START); @@ -1964,23 +2033,25 @@ static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev) _wait_rx_mode(rtwdev, RF_AB); rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5); - rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54); - rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84); - rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6); - rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34); - rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32); + rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, chan, 54); + rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, chan, 84); + rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_NORMAL, 6); + rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, chan, 34); + rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP); } static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) + enum rtw89_phy_idx phy_idx, + const struct rtw89_chan *chan) { - rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6); + rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6); } -static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start) +static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool start) { } @@ -2109,7 +2180,7 @@ static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); enum rtw89_band band = chan->band_type; struct rtw89_hal *hal = &rtwdev->hal; u8 ntx_path = RF_PATH_AB; @@ -2426,6 +2497,38 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev, rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); } +static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + /* Mapping to BW: 5, 10, 20, 40, 80, 160, 80_80 */ + static const u8 bw_compensate[] = {0, 0, 0, 6, 12, 18, 0}; + u8 *rssi = phy_ppdu->rssi; + u8 compensate = 0; + u16 rpl_tmp; + u8 i; + + if (phy_ppdu->bw_idx < ARRAY_SIZE(bw_compensate)) + compensate = bw_compensate[phy_ppdu->bw_idx]; + + for (i = 0; i < RF_PATH_NUM_8922A; i++) { + if (!(phy_ppdu->rx_path_en & BIT(i))) { + rssi[i] = 0; + phy_ppdu->rpl_path[i] = 0; + phy_ppdu->rpl_fd[i] = 0; + } + if (phy_ppdu->rate >= RTW89_HW_RATE_OFDM6) { + rpl_tmp = phy_ppdu->rpl_fd[i]; + if (rpl_tmp) + rpl_tmp += compensate; + + phy_ppdu->rpl_path[i] = rpl_tmp; + } + rssi[i] = phy_ppdu->rpl_path[i]; + } + + phy_ppdu->rssi_avg = phy_ppdu->rpl_avg; +} + static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev) { rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE, @@ -2445,10 +2548,12 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev) #ifdef CONFIG_PM static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = { - .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, .n_patterns = RTW89_MAX_PATTERN_NUM, .pattern_max_len = RTW89_MAX_PATTERN_SIZE, .pattern_min_len = 1, + .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID, }; #endif @@ -2481,9 +2586,11 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = { .get_thermal = rtw8922a_get_thermal, .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx, .query_ppdu = rtw8922a_query_ppdu, + .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi, .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = NULL, + .digital_pwr_comp = rtw8922a_digital_pwr_comp, .pwr_on_func = rtw8922a_pwr_on_func, .pwr_off_func = rtw8922a_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc_v2, @@ -2549,6 +2656,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .dig_regs = &rtw8922a_dig_regs, .tssi_dbw_table = NULL, .support_macid_num = 32, + .support_link_num = 2, .support_chanctx_num = 2, .support_rnr = true, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2562,6 +2670,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, .hw_sec_hdr = true, + .hw_mgmt_tx_encrypt = true, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, @@ -2624,6 +2733,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .rrsr_cfgs = &rtw8922a_rrsr_cfgs, .bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2}, .bss_clr_map_reg = R_BSS_CLR_MAP_V2, + .rfkill_init = &rtw8922a_rfkill_regs, + .rfkill_get = {R_BE_GPIO_EXT_CTRL, B_BE_GPIO_IN_9}, .dma_ch_mask = 0, .edcca_regs = &rtw8922a_edcca_regs, #ifdef CONFIG_PM diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index 0ebcb06ae84837..28907df7407d58 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -256,7 +256,7 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {}; - enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_chanctx_idx chanctx_idx; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; u8 s0_tbl, s1_tbl; @@ -265,14 +265,14 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) mode = rtw89_get_entity_mode(rtwdev); switch (mode) { case RTW89_ENTITY_MODE_MCC_PREPARE: - sub_entity_idx = RTW89_SUB_ENTITY_1; + chanctx_idx = RTW89_CHANCTX_1; break; default: - sub_entity_idx = RTW89_SUB_ENTITY_0; + chanctx_idx = RTW89_CHANCTX_0; break; } - chan = rtw89_chan_get(rtwdev, sub_entity_idx); + chan = rtw89_chan_get(rtwdev, chanctx_idx); for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) { struct rtw89_rfk_chan_desc *p = &desc[tbl_sel]; diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index 1b2a400406ae13..27826d909785c8 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -366,7 +366,7 @@ static void rtw89_tas_state_update(struct rtw89_dev *rtwdev) if (src == RTW89_SAR_SOURCE_NONE) return; - chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg); if (ret) return; diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 3882938c089318..b2e47829983fee 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -14,6 +14,7 @@ #define DATA_RATE_HT_IDX_MASK GENMASK(4, 0) #define DATA_RATE_HT_IDX_MASK_V1 GENMASK(4, 0) #define DATA_RATE_MODE_HT 0x1 +#define DATA_RATE_HT_NSS_MASK GENMASK(4, 3) #define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4) #define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0) #define DATA_RATE_NSS_MASK_V1 GENMASK(7, 5) @@ -51,6 +52,11 @@ static inline u8 rtw89_get_data_mcs(struct rtw89_dev *rtwdev, u16 hw_rate) return u16_get_bits(hw_rate, DATA_RATE_VHT_HE_IDX_MASK); } +static inline u8 rtw89_get_data_ht_nss(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + return u16_get_bits(hw_rate, DATA_RATE_HT_NSS_MASK); +} + static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate) { if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) @@ -408,7 +414,7 @@ struct rtw89_rxinfo_user { #define RTW89_RXINFO_USER_DATA BIT(1) #define RTW89_RXINFO_USER_CTRL BIT(2) #define RTW89_RXINFO_USER_MGMT BIT(3) -#define RTW89_RXINFO_USER_BCM BIT(4) +#define RTW89_RXINFO_USER_BCN BIT(4) #define RTW89_RXINFO_USER_MACID GENMASK(15, 8) struct rtw89_rxinfo { @@ -435,6 +441,7 @@ struct rtw89_phy_sts_hdr { } __packed; #define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0) +#define RTW89_PHY_STS_HDR_W0_HDR_2_EN BIT(5) #define RTW89_PHY_STS_HDR_W0_VALID BIT(7) #define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8) #define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24) @@ -443,6 +450,13 @@ struct rtw89_phy_sts_hdr { #define RTW89_PHY_STS_HDR_W1_RSSI_C GENMASK(23, 16) #define RTW89_PHY_STS_HDR_W1_RSSI_D GENMASK(31, 24) +struct rtw89_phy_sts_hdr_v2 { + __le32 w0; + __le32 w1; +} __packed; + +#define RTW89_PHY_STS_HDR_V2_W0_PATH_EN GENMASK(20, 16) + struct rtw89_phy_sts_iehdr { __le32 w0; }; @@ -546,13 +560,43 @@ struct rtw89_phy_sts_iehdr { #define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16) #define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21) -struct rtw89_phy_sts_ie0 { +struct rtw89_phy_sts_ie00 { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; +} __packed; + +#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7) + +struct rtw89_phy_sts_ie00_v2 { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; + __le32 w7; +} __packed; + +#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A GENMASK(8, 0) +#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B GENMASK(17, 9) +#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C GENMASK(26, 18) +#define RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D GENMASK(8, 0) + +struct rtw89_phy_sts_ie01 { __le32 w0; __le32 w1; __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; } __packed; #define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16) +#define RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD GENMASK(15, 8) +#define RTW89_PHY_STS_IE01_W0_RX_PATH_EN GENMASK(31, 28) #define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8) #define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20) #define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0) @@ -561,6 +605,25 @@ struct rtw89_phy_sts_ie0 { #define RTW89_PHY_STS_IE01_W2_LDPC BIT(28) #define RTW89_PHY_STS_IE01_W2_STBC BIT(30) +struct rtw89_phy_sts_ie01_v2 { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; + __le32 w7; + __le32 w8; + __le32 w9; +} __packed; + +#define RTW89_PHY_STS_IE01_V2_W5_BW_IDX GENMASK(31, 29) +#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A GENMASK(11, 4) +#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B GENMASK(23, 16) +#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C GENMASK(11, 4) +#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D GENMASK(23, 16) + enum rtw89_tx_channel { RTW89_TXCH_ACH0 = 0, RTW89_TXCH_ACH1 = 1, diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index e82e7df052d889..e669544cafd3f9 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -16,6 +16,24 @@ #define rtw89_for_each_rtwvif(rtwdev, rtwvif) \ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list) +/* Before adding rtwvif to list, we need to check if it already exist, beacase + * in some case such as SER L2 happen during WoWLAN flow, calling reconfig + * twice cause the list to be added twice. + */ +static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev, + struct rtw89_vif *new) +{ + struct rtw89_vif *rtwvif; + + lockdep_assert_held(&rtwdev->mutex); + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + if (rtwvif == new) + return true; + + return false; +} + /* The result of negative dividend and positive divisor is undefined, but it * should be one case of round-down or round-up. So, make it round-down if the * result is round-up. diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index 9882064ef68d33..86e24e07780d9b 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -687,17 +687,30 @@ static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev) __rtw89_enter_ps_mode(rtwdev, rtwvif); } -static void rtw89_wow_enter_lps(struct rtw89_dev *rtwdev) +static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev) { struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; - rtw89_enter_lps(rtwdev, rtwvif, false); + if (rtw89_wow_mgd_linked(rtwdev)) + rtw89_enter_lps(rtwdev, rtwvif, false); + else if (rtw89_wow_no_link(rtwdev)) + rtw89_fw_h2c_fwips(rtwdev, rtwvif, true); } -static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev) +static void rtw89_wow_leave_ps(struct rtw89_dev *rtwdev, bool enable_wow) { - rtw89_leave_lps(rtwdev); + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + + if (rtw89_wow_mgd_linked(rtwdev)) { + rtw89_leave_lps(rtwdev); + } else if (rtw89_wow_no_link(rtwdev)) { + if (enable_wow) + rtw89_leave_ips(rtwdev); + else + rtw89_fw_h2c_fwips(rtwdev, rtwvif, false); + } } static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow) @@ -781,17 +794,22 @@ static void rtw89_wow_vif_iter(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - /* Current wowlan function support setting of only one STATION vif. - * So when one suitable vif is found, stop the iteration. + /* Current WoWLAN function support setting of only vif in + * infra mode or no link mode. When one suitable vif is found, + * stop the iteration. */ if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION) return; switch (rtwvif->net_type) { case RTW89_NET_TYPE_INFRA: - rtw_wow->wow_vif = vif; + if (rtw_wow_has_mgd_features(rtwdev)) + rtw_wow->wow_vif = vif; break; case RTW89_NET_TYPE_NO_LINK: + if (rtw_wow->pno_inited) + rtw_wow->wow_vif = vif; + break; default: break; } @@ -1025,6 +1043,23 @@ static void rtw89_wow_clear_wakeups(struct rtw89_dev *rtwdev) rtw_wow->wow_vif = NULL; rtw89_core_release_all_bits_map(rtw_wow->flags, RTW89_WOW_FLAG_NUM); rtw_wow->pattern_cnt = 0; + rtw_wow->pno_inited = false; +} + +static void rtw89_wow_init_pno(struct rtw89_dev *rtwdev, + struct cfg80211_sched_scan_request *nd_config) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + + if (!nd_config->n_match_sets || !nd_config->n_channels) + return; + + rtw_wow->nd_config = nd_config; + rtw_wow->pno_inited = true; + + INIT_LIST_HEAD(&rtw_wow->pno_pkt_list); + + rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: net-detect is enabled\n"); } static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev, @@ -1037,6 +1072,11 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev, set_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags); if (wowlan->magic_pkt) set_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags); + if (wowlan->n_patterns && wowlan->patterns) + set_bit(RTW89_WOW_FLAG_EN_PATTERN, rtw_wow->flags); + + if (wowlan->nd_config) + rtw89_wow_init_pno(rtwdev, wowlan->nd_config); rtw89_for_each_rtwvif(rtwdev, rtwvif) rtw89_wow_vif_iter(rtwdev, rtwvif); @@ -1048,6 +1088,34 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev, return rtw89_wow_parse_patterns(rtwdev, rtwvif, wowlan); } +static int rtw89_wow_cfg_wake_pno(struct rtw89_dev *rtwdev, bool wow) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + int ret; + + ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, true); + if (ret) { + rtw89_err(rtwdev, "failed to config pno\n"); + return ret; + } + + ret = rtw89_fw_h2c_wow_wakeup_ctrl(rtwdev, rtwvif, wow); + if (ret) { + rtw89_err(rtwdev, "failed to fw wow wakeup ctrl\n"); + return ret; + } + + ret = rtw89_fw_h2c_wow_global(rtwdev, rtwvif, wow); + if (ret) { + rtw89_err(rtwdev, "failed to fw wow global\n"); + return ret; + } + + return 0; +} + static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; @@ -1308,100 +1376,239 @@ static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev) return ret; } -static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev) +static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; - struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv; + struct list_head *pkt_list = &rtw_wow->pno_pkt_list; + struct rtw89_pktofld_info *info, *tmp; + + list_for_each_entry_safe(info, tmp, pkt_list, list) { + rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); + list_del(&info->list); + kfree(info); + } +} + +static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; + u8 num = nd_config->n_match_sets, i; + struct rtw89_pktofld_info *info; + struct sk_buff *skb; int ret; - rtw89_wow_pattern_write(rtwdev); - rtw89_wow_construct_key_info(rtwdev); + for (i = 0; i < num; i++) { + skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, + nd_config->match_sets[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid_len, + nd_config->ie_len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, nd_config->ie, nd_config->ie_len); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + kfree_skb(skb); + rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif); + return -ENOMEM; + } - ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true); - if (ret) { - rtw89_err(rtwdev, "wow: failed to enable keep alive\n"); - return ret; + ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); + if (ret) { + kfree_skb(skb); + kfree(info); + rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif); + return ret; + } + + list_add_tail(&info->list, &rtw_wow->pno_pkt_list); + kfree_skb(skb); } - ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true); - if (ret) { - rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n"); - goto out; + return 0; +} + +static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + int interval = rtw_wow->nd_config->scan_plans[0].interval; + struct rtw89_scan_option opt = {}; + int ret; + + if (enable) { + ret = rtw89_pno_scan_update_probe_req(rtwdev, rtwvif); + if (ret) { + rtw89_err(rtwdev, "Update probe request failed\n"); + return ret; + } + + ret = mac->add_chan_list_pno(rtwdev, rtwvif); + if (ret) { + rtw89_err(rtwdev, "Update channel list failed\n"); + return ret; + } } - ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true); - if (ret) { - rtw89_err(rtwdev, "wow: failed to enable GTK offload\n"); - goto out; + opt.enable = enable; + opt.repeat = RTW89_SCAN_NORMAL; + opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */ + opt.delay = max(rtw_wow->nd_config->delay, 1); + + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { + opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; + opt.scan_mode = RTW89_SCAN_MODE_SA; + opt.band = RTW89_PHY_0; + opt.num_macc_role = 0; + opt.mlo_mode = rtwdev->mlo_dbcc_mode; + opt.num_opch = 0; + opt.opch_end = RTW89_CHAN_INVALID; } - ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true); - if (ret) - rtw89_warn(rtwdev, "wow: failed to enable arp offload\n"); + mac->scan_offload(rtwdev, &opt, rtwvif, true); - ret = rtw89_wow_cfg_wake(rtwdev, true); - if (ret) { - rtw89_err(rtwdev, "wow: failed to config wake\n"); - goto out; + return 0; +} + +static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + int ret; + + if (rtw89_wow_no_link(rtwdev)) { + ret = rtw89_pno_scan_offload(rtwdev, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n"); + return ret; + } + + ret = rtw89_pno_scan_offload(rtwdev, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to enable pno scan offload\n"); + return ret; + } + } else { + rtw89_wow_pattern_write(rtwdev); + rtw89_wow_construct_key_info(rtwdev); + + ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to enable keep alive\n"); + return ret; + } + + ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to enable disconnect detect\n"); + return ret; + } + + ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to enable GTK offload\n"); + return ret; + } + + ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true); + if (ret) + rtw89_warn(rtwdev, "wow: failed to enable arp offload\n"); + } + + if (rtw89_wow_no_link(rtwdev)) { + ret = rtw89_wow_cfg_wake_pno(rtwdev, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to config wake PNO\n"); + return ret; + } + } else { + ret = rtw89_wow_cfg_wake(rtwdev, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to config wake\n"); + return ret; + } } ret = rtw89_wow_check_fw_status(rtwdev, true); if (ret) { rtw89_err(rtwdev, "wow: failed to check enable fw ready\n"); - goto out; + return ret; } -out: - return ret; + return 0; } static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; - struct rtw89_vif *rtwvif = (struct rtw89_vif *)rtw_wow->wow_vif->drv_priv; + struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; int ret; - rtw89_wow_pattern_clear(rtwdev); + if (rtw89_wow_no_link(rtwdev)) { + ret = rtw89_pno_scan_offload(rtwdev, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable pno scan offload\n"); + return ret; + } - ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false); - if (ret) { - rtw89_err(rtwdev, "wow: failed to disable keep alive\n"); - goto out; - } + ret = rtw89_fw_h2c_cfg_pno(rtwdev, rtwvif, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable pno\n"); + return ret; + } - ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false); - if (ret) { - rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n"); - goto out; - } + rtw89_fw_release_pno_pkt_list(rtwdev, rtwvif); + } else { + rtw89_wow_pattern_clear(rtwdev); - ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false); - if (ret) { - rtw89_err(rtwdev, "wow: failed to disable GTK offload\n"); - goto out; - } + ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable keep alive\n"); + return ret; + } - ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false); - if (ret) - rtw89_warn(rtwdev, "wow: failed to disable arp offload\n"); + ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n"); + return ret; + } + + ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable GTK offload\n"); + return ret; + } + + ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false); + if (ret) + rtw89_warn(rtwdev, "wow: failed to disable arp offload\n"); + + rtw89_wow_key_clear(rtwdev); + rtw89_fw_release_general_pkt_list(rtwdev, true); + } - rtw89_wow_key_clear(rtwdev); - rtw89_fw_release_general_pkt_list(rtwdev, true); ret = rtw89_wow_cfg_wake(rtwdev, false); if (ret) { rtw89_err(rtwdev, "wow: failed to disable config wake\n"); - goto out; + return ret; } ret = rtw89_wow_check_fw_status(rtwdev, false); if (ret) { rtw89_err(rtwdev, "wow: failed to check disable fw ready\n"); - goto out; + return ret; } -out: - return ret; + return 0; } static int rtw89_wow_enable(struct rtw89_dev *rtwdev) @@ -1430,7 +1637,7 @@ static int rtw89_wow_enable(struct rtw89_dev *rtwdev) goto out; } - rtw89_wow_enter_lps(rtwdev); + rtw89_wow_enter_ps(rtwdev); ret = rtw89_wow_enable_trx_post(rtwdev); if (ret) { @@ -1455,7 +1662,7 @@ static int rtw89_wow_disable(struct rtw89_dev *rtwdev) goto out; } - rtw89_wow_leave_lps(rtwdev); + rtw89_wow_leave_ps(rtwdev, false); ret = rtw89_wow_fw_stop(rtwdev); if (ret) { @@ -1480,6 +1687,12 @@ static int rtw89_wow_disable(struct rtw89_dev *rtwdev) return ret; } +static void rtw89_wow_restore_ps(struct rtw89_dev *rtwdev) +{ + if (rtw89_wow_no_link(rtwdev)) + rtw89_enter_ips(rtwdev); +} + int rtw89_wow_resume(struct rtw89_dev *rtwdev) { int ret; @@ -1504,6 +1717,7 @@ int rtw89_wow_resume(struct rtw89_dev *rtwdev) if (ret) rtw89_err(rtwdev, "failed to disable wow\n"); + rtw89_wow_restore_ps(rtwdev); out: rtw89_wow_clear_wakeups(rtwdev); return ret; @@ -1519,7 +1733,7 @@ int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan) return ret; } - rtw89_wow_leave_lps(rtwdev); + rtw89_wow_leave_ps(rtwdev, true); ret = rtw89_wow_enable(rtwdev); if (ret) { diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h index 0d90add0e88d99..3fbc2b87c058ac 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.h +++ b/drivers/net/wireless/realtek/rtw89/wow.h @@ -95,6 +95,29 @@ static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev) } #ifdef CONFIG_PM +static inline bool rtw89_wow_mgd_linked(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + + return rtwvif->net_type == RTW89_NET_TYPE_INFRA; +} + +static inline bool rtw89_wow_no_link(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + + return rtwvif->net_type == RTW89_NET_TYPE_NO_LINK; +} + +static inline bool rtw_wow_has_mgd_features(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + + return !bitmap_empty(rtw_wow->flags, RTW89_WOW_FLAG_NUM); +} + int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan); int rtw89_wow_resume(struct rtw89_dev *rtwdev); void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb); diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h index a6a28640ad4098..bbc1200dbb62ca 100644 --- a/drivers/net/wireless/rsi/rsi_debugfs.h +++ b/drivers/net/wireless/rsi/rsi_debugfs.h @@ -39,7 +39,6 @@ struct rsi_dbg_files { struct rsi_debugfs { struct dentry *subdir; - struct rsi_dbg_ops *dfs_get_ops; struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES]; }; int rsi_init_dbgfs(struct rsi_hw *adapter); diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index 34d95f458e1a92..a9f090e15cbbea 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -142,7 +142,7 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) wl18xx_radar_type_decode(mbox->radar_type)); if (!wl->radar_debug_mode) - ieee80211_radar_detected(wl->hw); + ieee80211_radar_detected(wl->hw, NULL); } if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index d86e6ff4523db4..f0e528abb1b46f 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -71,7 +71,7 @@ MODULE_PARM_DESC(mlo, "Support MLO"); static bool multi_radio; module_param(multi_radio, bool, 0444); -MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy"); +MODULE_PARM_DESC(multi_radio, "Support Multiple Radios per wiphy"); /** * enum hwsim_regtest - the type of regulatory tests we offer @@ -1146,7 +1146,7 @@ static int hwsim_write_simulate_radar(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; - ieee80211_radar_detected(data->hw); + ieee80211_radar_detected(data->hw, NULL); return 0; } diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index a8a94edf2a707a..9ae10f65f2af0b 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "zd_def.h" #include "zd_mac.h" diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c index 26ca719fa0de43..5dcb9a84a12e35 100644 --- a/drivers/net/wwan/qcom_bam_dmux.c +++ b/drivers/net/wwan/qcom_bam_dmux.c @@ -823,17 +823,17 @@ static int bam_dmux_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq, IRQF_ONESHOT, NULL, dmux); if (ret) - return ret; + goto err_disable_pm; ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq, IRQF_ONESHOT, NULL, dmux); if (ret) - return ret; + goto err_disable_pm; ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL, &dmux->pc_state); if (ret) - return ret; + goto err_disable_pm; /* Check if remote finished initialization before us */ if (dmux->pc_state) { @@ -844,6 +844,11 @@ static int bam_dmux_probe(struct platform_device *pdev) } return 0; + +err_disable_pm: + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + return ret; } static void bam_dmux_remove(struct platform_device *pdev) diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 8d864d4ed77f5e..79f17100f70b1c 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -53,6 +53,7 @@ #define RGU_RESET_DELAY_MS 10 #define PORT_RESET_DELAY_MS 2000 +#define FASTBOOT_RESET_DELAY_MS 2000 #define EX_HS_TIMEOUT_MS 5000 #define EX_HS_POLL_DELAY_MS 10 @@ -167,19 +168,52 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) } kfree(buffer.pointer); +#else + struct device *dev = &t7xx_dev->pdev->dev; + int ret; + ret = pci_reset_function(t7xx_dev->pdev); + if (ret) { + dev_err(dev, "Failed to reset device, error:%d\n", ret); + return ret; + } #endif return 0; } -int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) +static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id) { - return t7xx_acpi_reset(t7xx_dev, "_RST"); + u32 value; + + value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + value &= ~HOST_EVENT_MASK; + value |= FIELD_PREP(HOST_EVENT_MASK, event_id); + iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); } -int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev) +int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type) { - return t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + int ret = 0; + + pci_save_state(t7xx_dev->pdev); + t7xx_pci_reprobe_early(t7xx_dev); + t7xx_mode_update(t7xx_dev, T7XX_RESET); + + if (type == FLDR) { + ret = t7xx_acpi_reset(t7xx_dev, "_RST"); + } else if (type == PLDR) { + ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + } else if (type == FASTBOOT) { + t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY); + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); + msleep(FASTBOOT_RESET_DELAY_MS); + } + + pci_restore_state(t7xx_dev->pdev); + if (ret) + return ret; + + return t7xx_pci_reprobe(t7xx_dev, true); } static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) @@ -188,16 +222,15 @@ static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); if (val & MISC_RESET_TYPE_PLDR) - t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + t7xx_reset_device(t7xx_dev, PLDR); else if (val & MISC_RESET_TYPE_FLDR) - t7xx_acpi_fldr_func(t7xx_dev); + t7xx_reset_device(t7xx_dev, FLDR); } static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) { struct t7xx_pci_dev *t7xx_dev = data; - t7xx_mode_update(t7xx_dev, T7XX_RESET); msleep(RGU_RESET_DELAY_MS); t7xx_reset_device_via_pmic(t7xx_dev); return IRQ_HANDLED; diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h index b39e945a92e017..39ed0000fbba28 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -78,14 +78,19 @@ struct t7xx_modem { spinlock_t exp_lock; /* Protects exception events */ }; +enum reset_type { + FLDR, + PLDR, + FASTBOOT, +}; + void t7xx_md_exception_handshake(struct t7xx_modem *md); void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); -int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); -int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev); +int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type); int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); #endif /* __T7XX_MODEM_OPS_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 10a8c1080b102a..e556e5bd49abcf 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -69,6 +69,7 @@ static ssize_t t7xx_mode_store(struct device *dev, { struct t7xx_pci_dev *t7xx_dev; struct pci_dev *pdev; + enum t7xx_mode mode; int index = 0; pdev = to_pci_dev(dev); @@ -76,12 +77,22 @@ static ssize_t t7xx_mode_store(struct device *dev, if (!t7xx_dev) return -ENODEV; + mode = READ_ONCE(t7xx_dev->mode); + index = sysfs_match_string(t7xx_mode_names, buf); + if (index == mode) + return -EBUSY; + if (index == T7XX_FASTBOOT_SWITCHING) { + if (mode == T7XX_FASTBOOT_DOWNLOAD) + return count; + WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING); + pm_runtime_resume(dev); + t7xx_reset_device(t7xx_dev, FASTBOOT); } else if (index == T7XX_RESET) { - WRITE_ONCE(t7xx_dev->mode, T7XX_RESET); - t7xx_acpi_pldr_func(t7xx_dev); + pm_runtime_resume(dev); + t7xx_reset_device(t7xx_dev, PLDR); } return count; @@ -446,7 +457,7 @@ static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) if (is_d3) { t7xx_mhccif_init(t7xx_dev); - return t7xx_pci_pm_reinit(t7xx_dev); + t7xx_pci_pm_reinit(t7xx_dev); } return 0; @@ -481,6 +492,33 @@ static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) return ret; } +int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev) +{ + enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); + int ret; + + if (mode == T7XX_FASTBOOT_DOWNLOAD) + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + return 0; +} + +int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot) +{ + int ret; + + ret = t7xx_pcie_reinit(t7xx_dev, boot); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); +} + static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) { struct t7xx_pci_dev *t7xx_dev; @@ -507,16 +545,11 @@ static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) if (prev_state == PM_RESUME_REG_STATE_L3 || (prev_state == PM_RESUME_REG_STATE_INIT && atr_reg_val == ATR_SRC_ADDR_INVALID)) { - ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); - if (ret) - return ret; - - ret = t7xx_pcie_reinit(t7xx_dev, true); + ret = t7xx_pci_reprobe_early(t7xx_dev); if (ret) return ret; - t7xx_clear_rgu_irq(t7xx_dev); - return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); + return t7xx_pci_reprobe(t7xx_dev, true); } if (prev_state == PM_RESUME_REG_STATE_EXP || diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index 49a11586d8d844..cd8ea17c26441c 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -133,4 +133,7 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode); +int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot); +int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev); + #endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index 7d6388bf1d7c31..35743e7de0c306 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -553,7 +553,6 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md) md->port_prox = port_prox; port_prox->dev = dev; - t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY); return 0; } diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c index 6a3f3638586557..4ed8b4e29bf1d9 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_trace.c +++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c @@ -59,6 +59,7 @@ static void t7xx_trace_port_uninit(struct t7xx_port *port) relay_close(relaych); debugfs_remove_recursive(debugfs_dir); + port->log.relaych = NULL; } static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb) diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 9889ca4621cf58..3931c7a13f5ab2 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -213,16 +213,6 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm fsm_finish_command(ctl, cmd, 0); } -static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id) -{ - u32 value; - - value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); - value &= ~HOST_EVENT_MASK; - value |= FIELD_PREP(HOST_EVENT_MASK, event_id); - iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); -} - static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status) { struct t7xx_modem *md = ctl->md; @@ -264,8 +254,14 @@ static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) { + enum t7xx_mode mode; + ctl->curr_state = FSM_STATE_STOPPED; + mode = READ_ONCE(ctl->md->t7xx_dev->mode); + if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP) + return 0; + t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); return t7xx_md_reset(ctl->md->t7xx_dev); } @@ -284,8 +280,6 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma { struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev; - enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); - int err; if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { fsm_finish_command(ctl, cmd, -EINVAL); @@ -296,21 +290,10 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); t7xx_cldma_stop(md_ctrl); - if (mode == T7XX_FASTBOOT_SWITCHING) - t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY); - t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); /* Wait for the DRM disable to take effect */ msleep(FSM_DRM_DISABLE_DELAY_MS); - if (mode == T7XX_FASTBOOT_SWITCHING) { - t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); - } else { - err = t7xx_acpi_fldr_func(t7xx_dev); - if (err) - t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); - } - fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); } @@ -414,7 +397,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command case T7XX_DEV_STAGE_LK: dev_dbg(dev, "LK_STAGE Entered\n"); + t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY); t7xx_lk_stage_event_handling(ctl, status); + break; case T7XX_DEV_STAGE_LINUX: @@ -436,6 +421,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command } finish_command: + if (ret) + t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN); + fsm_finish_command(ctl, cmd, ret); } diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index ff96f22648efde..45ddce35f6d2c6 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -95,7 +95,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data, static void xenvif_flush_hash(struct xenvif *vif) { - struct xenvif_hash_cache_entry *entry; + struct xenvif_hash_cache_entry *entry, *n; unsigned long flags; if (xenvif_hash_cache_size == 0) @@ -103,8 +103,7 @@ static void xenvif_flush_hash(struct xenvif *vif) spin_lock_irqsave(&vif->hash.cache.lock, flags); - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, - lockdep_is_held(&vif->hash.cache.lock)) { + list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) { list_del_rcu(&entry->link); vif->hash.cache.count--; kfree_rcu(entry, rcu); diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index e83f65596a88a0..93094418fd247e 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c index 119bf305c64285..381b5bb754779c 100644 --- a/drivers/nfc/nxp-nci/firmware.c +++ b/drivers/nfc/nxp-nci/firmware.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "nxp-nci.h" diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index a8aced0b801004..049662ffdf9729 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index a187f0e0b0f7d1..ffd7367ce11945 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -254,7 +254,6 @@ struct pn533_acr122_ccid_hdr { * byte for reposnse msg */ u8 params[3]; - u8 data[]; /* payload */ } __packed; struct pn533_acr122_apdu_hdr { diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index e2a6575b9ff7f8..a0dfb3f98d5a9f 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c index d702bee7808266..ed6f4adc613033 100644 --- a/drivers/ntb/core.c +++ b/drivers/ntb/core.c @@ -72,7 +72,7 @@ MODULE_VERSION(DRIVER_VERSION); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESCRIPTION); -static struct bus_type ntb_bus; +static const struct bus_type ntb_bus; static void ntb_dev_release(struct device *dev); int __ntb_register_client(struct ntb_client *client, struct module *mod, @@ -298,7 +298,7 @@ static void ntb_dev_release(struct device *dev) complete(&ntb->released); } -static struct bus_type ntb_bus = { +static const struct bus_type ntb_bus = { .name = "ntb", .probe = ntb_probe, .remove = ntb_remove, diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c index b640aa0bf45e61..00f0e78f685bf7 100644 --- a/drivers/ntb/hw/epf/ntb_hw_epf.c +++ b/drivers/ntb/hw/epf/ntb_hw_epf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Host side endpoint driver to implement Non-Transparent Bridge functionality * * Copyright (C) 2020 Texas Instruments diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index 48dfb1a69a77e5..6fc9dfe8247477 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2547,7 +2547,7 @@ static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev) */ /* - * idt_check_setup() - Check whether the IDT PCIe-swtich is properly + * idt_check_setup() - Check whether the IDT PCIe-switch is properly * pre-initialized * @pdev: Pointer to the PCI device descriptor * diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 9ab836d0d4f12d..079b8cd7978573 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->ntb.pdev), debugfs_dir); - if (!ndev->debugfs_dir) + if (IS_ERR(ndev->debugfs_dir)) ndev->debugfs_info = NULL; else ndev->debugfs_info = diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 31946387badf0e..ad1786be2554b3 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -1554,6 +1554,7 @@ static void switchtec_ntb_remove(struct device *dev) switchtec_ntb_deinit_db_msg_irq(sndev); switchtec_ntb_deinit_shared_mw(sndev); switchtec_ntb_deinit_crosslink(sndev); + cancel_work_sync(&sndev->check_link_status_work); kfree(sndev); dev_info(dev, "ntb device unregistered\n"); } diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 77e55debeed61b..a22ea4a4b202bd 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -314,7 +314,7 @@ static void ntb_transport_bus_remove(struct device *dev) put_device(dev); } -static struct bus_type ntb_transport_bus = { +static const struct bus_type ntb_transport_bus = { .name = "ntb_transport", .match = ntb_transport_bus_match, .probe = ntb_transport_bus_probe, @@ -377,6 +377,8 @@ EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); * @device_name: Name of NTB client device * * Register an NTB client device with the NTB transport layer + * + * Returns: %0 on success or -errno code on error */ int ntb_transport_register_client_dev(char *device_name) { @@ -807,16 +809,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) } static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, - struct device *dma_dev, size_t align) + struct device *ntb_dev, size_t align) { dma_addr_t dma_addr; void *alloc_addr, *virt_addr; int rc; - alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, - &dma_addr, GFP_KERNEL); + /* + * The buffer here is allocated against the NTB device. The reason to + * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer + * backing the NTB BAR for the remote host to write to. During receive + * processing, the data is being copied out of the receive buffer to + * the kernel skbuff. When a DMA device is being used, dma_map_page() + * is called on the kvaddr of the receive buffer (from dma_alloc_*()) + * and remapped against the DMA device. It appears to be a double + * DMA mapping of buffers, but first is mapped to the NTB device and + * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary + * in order for the later dma_map_page() to not fail. + */ + alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); if (!alloc_addr) { - dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", + dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n", mw->alloc_size); return -ENOMEM; } @@ -845,7 +860,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, return 0; err: - dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); + dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr); return rc; } @@ -1966,9 +1981,9 @@ static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) /** * ntb_transport_create_queue - Create a new NTB transport layer queue - * @rx_handler: receive callback function - * @tx_handler: transmit callback function - * @event_handler: event callback function + * @data: pointer for callback data + * @client_dev: &struct device pointer + * @handlers: pointer to various ntb queue (callback) handlers * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 553f1f46bc664f..72bc1d017a46ee 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, "\tOut buffer addr 0x%pK\n", peer->outbuf); pos += scnprintf(buf + pos, buf_size - pos, - "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); + "\tOut buff phys addr %pap\n", &peer->out_phys_addr); pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer size %pa\n", &peer->outbuf_size); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index d6d558f94d6bb2..55cfbf1e0a9561 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1612,9 +1612,6 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id) { int i; - if (!pmem_id) - return -ENODEV; - for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -1790,9 +1787,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, case -EINVAL: dev_dbg(&nd_region->dev, "invalid label(s)\n"); break; - case -ENODEV: - dev_dbg(&nd_region->dev, "label not found\n"); - break; default: dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc); break; @@ -1937,12 +1931,16 @@ static int cmp_dpa(const void *a, const void *b) static struct device **scan_labels(struct nd_region *nd_region) { int i, count = 0; - struct device *dev, **devs = NULL; + struct device *dev, **devs; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; + devs = kcalloc(2, sizeof(dev), GFP_KERNEL); + if (!devs) + return NULL; + /* "safe" because create_namespace_pmem() might list_move() label_ent */ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; @@ -1961,12 +1959,14 @@ static struct device **scan_labels(struct nd_region *nd_region) goto err; if (i < count) continue; - __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); - if (!__devs) - goto err; - memcpy(__devs, devs, sizeof(dev) * count); - kfree(devs); - devs = __devs; + if (count) { + __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); + if (!__devs) + goto err; + memcpy(__devs, devs, sizeof(dev) * count); + kfree(devs); + devs = __devs; + } dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { @@ -1974,9 +1974,6 @@ static struct device **scan_labels(struct nd_region *nd_region) case -EAGAIN: /* skip invalid labels */ continue; - case -ENODEV: - /* fallthrough to seed creation */ - break; default: goto err; } @@ -1993,11 +1990,6 @@ static struct device **scan_labels(struct nd_region *nd_region) /* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); - - devs = kcalloc(2, sizeof(dev), GFP_KERNEL); - if (!devs) - goto err; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); if (!nspm) goto err; @@ -2036,11 +2028,10 @@ static struct device **scan_labels(struct nd_region *nd_region) return devs; err: - if (devs) { - for (i = 0; devs[i]; i++) - namespace_pmem_release(devs[i]); - kfree(devs); - } + for (i = 0; devs[i]; i++) + namespace_pmem_release(devs[i]); + kfree(devs); + return NULL; } diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 35c8fbbba10ed3..f55d60922b87d3 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region) unsigned long flags; int err, err1; + /* + * Don't bother to submit the request to the device if the device is + * not activated. + */ + if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) { + dev_info(&vdev->dev, "virtio pmem device needs a reset\n"); + return -EIO; + } + might_sleep(); req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 403384f25ce3f1..b4a1cf70e8b703 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -47,7 +47,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, priv); - is_volatile = !!of_find_property(np, "volatile", NULL); + is_volatile = of_property_read_bool(np, "volatile"); dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n", is_volatile ? "volatile" : "non-volatile", np); diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index a3455f1d67fae2..9b7126e1a19d9e 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c index 6f7e7a8fa5ae47..ed5167f942d89b 100644 --- a/drivers/nvme/common/keyring.c +++ b/drivers/nvme/common/keyring.c @@ -20,6 +20,28 @@ key_serial_t nvme_keyring_id(void) } EXPORT_SYMBOL_GPL(nvme_keyring_id); +static bool nvme_tls_psk_revoked(struct key *psk) +{ + return test_bit(KEY_FLAG_REVOKED, &psk->flags) || + test_bit(KEY_FLAG_INVALIDATED, &psk->flags); +} + +struct key *nvme_tls_key_lookup(key_serial_t key_id) +{ + struct key *key = key_lookup(key_id); + + if (IS_ERR(key)) { + pr_err("key id %08x not found\n", key_id); + return key; + } + if (nvme_tls_psk_revoked(key)) { + pr_err("key id %08x revoked\n", key_id); + return ERR_PTR(-EKEYREVOKED); + } + return key; +} +EXPORT_SYMBOL_GPL(nvme_tls_key_lookup); + static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); @@ -36,14 +58,12 @@ static bool nvme_tls_psk_match(const struct key *key, pr_debug("%s: no key description\n", __func__); return false; } - match_len = strlen(key->description); - pr_debug("%s: id %s len %zd\n", __func__, key->description, match_len); - if (!match_data->raw_data) { pr_debug("%s: no match data\n", __func__); return false; } match_id = match_data->raw_data; + match_len = strlen(match_id); pr_debug("%s: match '%s' '%s' len %zd\n", __func__, match_id, key->description, match_len); return !memcmp(key->description, match_id, match_len); @@ -71,7 +91,7 @@ static struct key_type nvme_tls_psk_key_type = { static struct key *nvme_tls_psk_lookup(struct key *keyring, const char *hostnqn, const char *subnqn, - int hmac, bool generated) + u8 hmac, u8 psk_ver, bool generated) { char *identity; size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11; @@ -82,8 +102,8 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring, if (!identity) return ERR_PTR(-ENOMEM); - snprintf(identity, identity_len, "NVMe0%c%02d %s %s", - generated ? 'G' : 'R', hmac, hostnqn, subnqn); + snprintf(identity, identity_len, "NVMe%u%c%02u %s %s", + psk_ver, generated ? 'G' : 'R', hmac, hostnqn, subnqn); if (!keyring) keyring = nvme_keyring; @@ -107,21 +127,38 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring, /* * NVMe PSK priority list * - * 'Retained' PSKs (ie 'generated == false') - * should be preferred to 'generated' PSKs, - * and SHA-384 should be preferred to SHA-256. + * 'Retained' PSKs (ie 'generated == false') should be preferred to 'generated' + * PSKs, PSKs with hash (psk_ver 1) should be preferred to PSKs without hash + * (psk_ver 0), and SHA-384 should be preferred to SHA-256. */ static struct nvme_tls_psk_priority_list { bool generated; + u8 psk_ver; enum nvme_tcp_tls_cipher cipher; } nvme_tls_psk_prio[] = { { .generated = false, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, + { .generated = false, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, + { .generated = false, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, { .generated = false, + .psk_ver = 0, + .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, + { .generated = true, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, + { .generated = true, + .psk_ver = 1, .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, { .generated = true, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, { .generated = true, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, }; @@ -137,10 +174,11 @@ key_serial_t nvme_tls_psk_default(struct key *keyring, for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) { bool generated = nvme_tls_psk_prio[prio].generated; + u8 ver = nvme_tls_psk_prio[prio].psk_ver; enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher; tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn, - cipher, generated); + cipher, ver, generated); if (!IS_ERR(tls_key)) { tls_key_id = tls_key->serial; key_put(tls_key); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index a3caef75aa0a83..486afe59818454 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -41,6 +41,7 @@ config NVME_HWMON config NVME_FABRICS select NVME_CORE + select NVME_KEYRING if NVME_TCP_TLS tristate config NVME_RDMA @@ -94,7 +95,6 @@ config NVME_TCP config NVME_TCP_TLS bool "NVMe over Fabrics TCP TLS encryption support" depends on NVME_TCP - select NVME_KEYRING select NET_HANDSHAKE select KEYS help @@ -109,6 +109,7 @@ config NVME_HOST_AUTH bool "NVMe over Fabrics In-Band Authentication in host side" depends on NVME_CORE select NVME_AUTH + select NVME_KEYRING if NVME_TCP_TLS help This provides support for NVMe over Fabrics In-Band Authentication in host side. diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 371e14f0a20392..5ea0e21709da37 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include "nvme.h" diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 983909a600adb8..43d73d31c66f36 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4,6 +4,7 @@ * Copyright (c) 2011-2014, Intel Corporation. */ +#include #include #include #include @@ -21,7 +22,7 @@ #include #include #include -#include +#include #include "nvme.h" #include "fabrics.h" @@ -987,8 +988,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); cmnd->rw.reftag = 0; - cmnd->rw.apptag = 0; - cmnd->rw.appmask = 0; + cmnd->rw.lbat = 0; + cmnd->rw.lbatm = 0; if (ns->head->ms) { /* @@ -2467,11 +2468,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) if (ret) return ret; - /* Flush write to device (required if transport is PCI) */ - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); - if (ret) - return ret; - /* CAP value may change after initial CC write */ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); if (ret) @@ -4040,6 +4036,35 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) } } +/** + * struct async_scan_info - keeps track of controller & NSIDs to scan + * @ctrl: Controller on which namespaces are being scanned + * @next_nsid: Index of next NSID to scan in ns_list + * @ns_list: Pointer to list of NSIDs to scan + * + * Note: There is a single async_scan_info structure shared by all instances + * of nvme_scan_ns_async() scanning a given controller, so the atomic + * operations on next_nsid are critical to ensure each instance scans a unique + * NSID. + */ +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +static void nvme_scan_ns_async(void *data, async_cookie_t cookie) +{ + struct async_scan_info *scan_info = data; + int idx; + u32 nsid; + + idx = (u32)atomic_fetch_inc(&scan_info->next_nsid); + nsid = le32_to_cpu(scan_info->ns_list[idx]); + + nvme_scan_ns(scan_info->ctrl, nsid); +} + static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid) { @@ -4066,11 +4091,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) __le32 *ns_list; u32 prev = 0; int ret = 0, i; + ASYNC_DOMAIN(domain); + struct async_scan_info scan_info; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!ns_list) return -ENOMEM; + scan_info.ctrl = ctrl; + scan_info.ns_list = ns_list; for (;;) { struct nvme_command cmd = { .identify.opcode = nvme_admin_identify, @@ -4086,19 +4115,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) goto free; } + atomic_set(&scan_info.next_nsid, 0); for (i = 0; i < nr_entries; i++) { u32 nsid = le32_to_cpu(ns_list[i]); if (!nsid) /* end of the list? */ goto out; - nvme_scan_ns(ctrl, nsid); + async_schedule_domain(nvme_scan_ns_async, &scan_info, + &domain); while (++prev < nsid) nvme_ns_remove_by_nsid(ctrl, prev); } + async_synchronize_full_domain(&domain); } out: nvme_remove_invalid_namespaces(ctrl, prev); free: + async_synchronize_full_domain(&domain); kfree(ns_list); return ret; } @@ -4568,7 +4601,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, set->flags = BLK_MQ_F_SHOULD_MERGE; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; - set->cmd_size = cmd_size, + set->cmd_size = cmd_size; set->driver_data = ctrl; set->nr_hw_queues = ctrl->queue_count - 1; set->timeout = NVME_IO_TIMEOUT; @@ -4678,7 +4711,6 @@ static void nvme_free_ctrl(struct device *dev) if (!subsys || ctrl->instance != subsys->instance) ida_free(&nvme_instance_ida, ctrl->instance); - key_put(ctrl->tls_key); nvme_free_cels(ctrl); nvme_mpath_uninit(ctrl); cleanup_srcu_struct(&ctrl->srcu); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index f5f545fa010354..432efcbf9e2f5f 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -665,7 +665,7 @@ static struct key *nvmf_parse_key(int key_id) return ERR_PTR(-EINVAL); } - key = key_lookup(key_id); + key = nvme_tls_key_lookup(key_id); if (IS_ERR(key)) pr_err("key id %08x not found\n", key_id); else diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 1d1b6441a3398d..105d6cb41c7264 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -6,6 +6,7 @@ */ #include +#include #include "nvme.h" static DECLARE_FAULT_ATTR(fail_default_attr); diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 8df73a0b3980cd..89a1a1043d63b0 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include "nvme.h" diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index f1d58e70933f54..b9b79ccfabf8ab 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -3,7 +3,7 @@ * Copyright (c) 2011-2014, Intel Corporation. * Copyright (c) 2017-2021 Christoph Hellwig. */ -#include +#include #include /* for force_successful_syscall_return */ #include #include @@ -119,9 +119,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; + bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); + bool has_metadata = meta_buffer && meta_len; struct bio *bio = NULL; int ret; + if (has_metadata && !supports_metadata) + return -EINVAL; + if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; @@ -143,15 +148,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, goto out; bio = req->bio; - if (bdev) { + if (bdev) bio_set_dev(bio, bdev); - if (meta_buffer && meta_len) { - ret = bio_integrity_map_user(bio, meta_buffer, meta_len, - meta_seed); - if (ret) - goto out_unmap; - req->cmd_flags |= REQ_INTEGRITY; - } + + if (has_metadata) { + ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len, + meta_seed); + if (ret) + goto out_unmap; } return ret; @@ -260,8 +264,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.control = cpu_to_le16(io.control); c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); c.rw.reftag = cpu_to_le32(io.reftag); - c.rw.apptag = cpu_to_le16(io.apptag); - c.rw.appmask = cpu_to_le16(io.appmask); + c.rw.lbat = cpu_to_le16(io.apptag); + c.rw.lbatm = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, meta_len, lower_32_bits(io.slba), NULL, 0, 0); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 518e22dd4f9bea..48e7a8906d0121 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -421,6 +421,9 @@ static bool nvme_available_path(struct nvme_ns_head *head) { struct nvme_ns *ns; + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + return NULL; + list_for_each_entry_rcu(ns, &head->list, siblings) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; @@ -648,7 +651,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) rc = device_add_disk(&head->subsys->dev, head->disk, nvme_ns_attr_groups); if (rc) { - clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); return; } nvme_add_ns_head_cdev(head); @@ -969,11 +972,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; - kblockd_schedule_work(&head->requeue_work); - if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { nvme_cdev_del(&head->cdev, &head->cdev_device); del_gendisk(head->disk); } + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } void nvme_mpath_remove_disk(struct nvme_ns_head *head) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index da57947130cc7a..313a4f978a2cf3 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -90,6 +90,11 @@ enum nvme_quirks { */ NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), + /* + * Problems seen with concurrent commands + */ + NVME_QUIRK_QDEPTH_ONE = (1 << 6), + /* * Set MEDIUM priority on SQ creation */ @@ -372,7 +377,7 @@ struct nvme_ctrl { struct nvme_dhchap_key *ctrl_key; u16 transaction; #endif - struct key *tls_key; + key_serial_t tls_pskid; /* Power saving configuration */ u64 ps_max_latency_us; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c0533f3f64cbad..7990c3f22ecf66 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2563,15 +2563,8 @@ static int nvme_pci_enable(struct nvme_dev *dev) else dev->io_sqes = NVME_NVM_IOSQES; - /* - * Temporary fix for the Apple controller found in the MacBook8,1 and - * some MacBook7,1 to avoid controller resets and data loss. - */ - if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { + if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { dev->q_depth = 2; - dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " - "set queue depth=%u to work around controller resets\n", - dev->q_depth); } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && (pdev->device == 0xa821 || pdev->device == 0xa822) && NVME_CAP_MQES(dev->ctrl.cap) == 0) { @@ -3442,6 +3435,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_BOGUS_NID, }, { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ + .driver_data = NVME_QUIRK_QDEPTH_ONE }, { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_BOGUS_NID, }, @@ -3576,7 +3571,12 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), - .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + /* + * Fix for the Apple controller found in the MacBook8,1 and + * some MacBook7,1 to avoid controller resets and data loss. + */ + .driver_data = NVME_QUIRK_SINGLE_VECTOR | + NVME_QUIRK_QDEPTH_ONE }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), .driver_data = NVME_QUIRK_SINGLE_VECTOR | diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index 7347ddf85f00b3..dc7922f226004f 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -5,7 +5,7 @@ */ #include #include -#include +#include #include "nvme.h" diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2eb33842f9711a..24a2759798d01e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -1363,8 +1363,8 @@ static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; - domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); - domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); + domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); + domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; @@ -1496,7 +1496,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, req->metadata_sgl->sg_table.sgl = (struct scatterlist *)(req->metadata_sgl + 1); ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, - blk_rq_count_integrity_sg(rq->q, rq->bio), + rq->nr_integrity_segments, req->metadata_sgl->sg_table.sgl, NVME_INLINE_METADATA_SG_CNT); if (unlikely(ret)) { @@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, goto out_unmap_sg; } - req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, - rq->bio, req->metadata_sgl->sg_table.sgl); + req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq, + req->metadata_sgl->sg_table.sgl); *pi_count = ib_dma_map_sg(ibdev, req->metadata_sgl->sg_table.sgl, req->metadata_sgl->nents, @@ -1876,6 +1876,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) */ priv.hrqsize = cpu_to_le16(queue->queue_size); priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); + /* cntlid should only be set when creating an I/O queue */ + priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid); } ret = rdma_connect_locked(queue->cm_id, ¶m); diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index ba05faaac562dc..b68a9e5f1ea395 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -664,19 +664,6 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); #endif -#ifdef CONFIG_NVME_TCP_TLS -static ssize_t tls_key_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (!ctrl->tls_key) - return 0; - return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key)); -} -static DEVICE_ATTR_RO(tls_key); -#endif - static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_rescan_controller.attr, @@ -703,9 +690,6 @@ static struct attribute *nvme_dev_attrs[] = { #ifdef CONFIG_NVME_HOST_AUTH &dev_attr_dhchap_secret.attr, &dev_attr_dhchap_ctrl_secret.attr, -#endif -#ifdef CONFIG_NVME_TCP_TLS - &dev_attr_tls_key.attr, #endif &dev_attr_adm_passthru_err_log_enabled.attr, NULL @@ -737,11 +721,6 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) return 0; #endif -#ifdef CONFIG_NVME_TCP_TLS - if (a == &dev_attr_tls_key.attr && - (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))) - return 0; -#endif return a->mode; } @@ -752,8 +731,78 @@ const struct attribute_group nvme_dev_attrs_group = { }; EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); +#ifdef CONFIG_NVME_TCP_TLS +static ssize_t tls_key_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!ctrl->tls_pskid) + return 0; + return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid); +} +static DEVICE_ATTR_RO(tls_key); + +static ssize_t tls_configured_key_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct key *key = ctrl->opts->tls_key; + + return sysfs_emit(buf, "%08x\n", key_serial(key)); +} +static DEVICE_ATTR_RO(tls_configured_key); + +static ssize_t tls_keyring_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct key *keyring = ctrl->opts->keyring; + + return sysfs_emit(buf, "%s\n", keyring->description); +} +static DEVICE_ATTR_RO(tls_keyring); + +static struct attribute *nvme_tls_attrs[] = { + &dev_attr_tls_key.attr, + &dev_attr_tls_configured_key.attr, + &dev_attr_tls_keyring.attr, + NULL, +}; + +static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")) + return 0; + + if (a == &dev_attr_tls_key.attr && + !ctrl->opts->tls) + return 0; + if (a == &dev_attr_tls_configured_key.attr && + !ctrl->opts->tls_key) + return 0; + if (a == &dev_attr_tls_keyring.attr && + !ctrl->opts->keyring) + return 0; + + return a->mode; +} + +const struct attribute_group nvme_tls_attrs_group = { + .attrs = nvme_tls_attrs, + .is_visible = nvme_tls_attrs_are_visible, +}; +#endif + const struct attribute_group *nvme_dev_attr_groups[] = { &nvme_dev_attrs_group, +#ifdef CONFIG_NVME_TCP_TLS + &nvme_tls_attrs_group, +#endif NULL, }; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index a2a47d3ab99f0d..89c44413c59394 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -165,6 +165,7 @@ struct nvme_tcp_queue { bool hdr_digest; bool data_digest; + bool tls_enabled; struct ahash_request *rcv_hash; struct ahash_request *snd_hash; __le32 exp_ddgst; @@ -213,7 +214,21 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) return queue - queue->ctrl->queues; } -static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl) +/* + * Check if the queue is TLS encrypted + */ +static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) +{ + if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) + return 0; + + return queue->tls_enabled; +} + +/* + * Check if TLS is configured for the controller. + */ +static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl) { if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) return 0; @@ -368,7 +383,7 @@ static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) { - return !nvme_tcp_tls(&queue->ctrl->ctrl) && + return !nvme_tcp_queue_tls(queue) && nvme_tcp_queue_has_pending(queue); } @@ -1051,7 +1066,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) else msg.msg_flags |= MSG_MORE; - if (!sendpage_ok(page)) + if (!sendpages_ok(page, len, offset)) msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page, len, offset); @@ -1427,7 +1442,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) memset(&msg, 0, sizeof(msg)); iov.iov_base = icresp; iov.iov_len = sizeof(*icresp); - if (nvme_tcp_tls(&queue->ctrl->ctrl)) { + if (nvme_tcp_queue_tls(queue)) { msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); } @@ -1439,7 +1454,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) goto free_icresp; } ret = -ENOTCONN; - if (nvme_tcp_tls(&queue->ctrl->ctrl)) { + if (nvme_tcp_queue_tls(queue)) { ctype = tls_get_record_type(queue->sock->sk, (struct cmsghdr *)cbuf); if (ctype != TLS_RECORD_TYPE_DATA) { @@ -1581,13 +1596,16 @@ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) goto out_complete; } - tls_key = key_lookup(pskid); + tls_key = nvme_tls_key_lookup(pskid); if (IS_ERR(tls_key)) { dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", qid, pskid); queue->tls_err = -ENOKEY; } else { - ctrl->ctrl.tls_key = tls_key; + queue->tls_enabled = true; + if (qid == 0) + ctrl->ctrl.tls_pskid = key_serial(tls_key); + key_put(tls_key); queue->tls_err = 0; } @@ -1768,7 +1786,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, } /* If PSKs are configured try to start TLS */ - if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) { + if (nvme_tcp_tls_configured(nctrl) && pskid) { ret = nvme_tcp_start_tls(nctrl, queue, pskid); if (ret) goto err_init_connect; @@ -1829,6 +1847,8 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) mutex_lock(&queue->queue_lock); if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) __nvme_tcp_stop_queue(queue); + /* Stopping the queue will disable TLS */ + queue->tls_enabled = false; mutex_unlock(&queue->queue_lock); } @@ -1925,16 +1945,17 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) int ret; key_serial_t pskid = 0; - if (nvme_tcp_tls(ctrl)) { + if (nvme_tcp_tls_configured(ctrl)) { if (ctrl->opts->tls_key) pskid = key_serial(ctrl->opts->tls_key); - else + else { pskid = nvme_tls_psk_default(ctrl->opts->keyring, ctrl->opts->host->nqn, ctrl->opts->subsysnqn); - if (!pskid) { - dev_err(ctrl->device, "no valid PSK found\n"); - return -ENOKEY; + if (!pskid) { + dev_err(ctrl->device, "no valid PSK found\n"); + return -ENOKEY; + } } } @@ -1957,13 +1978,14 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { int i, ret; - if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) { + if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) { dev_err(ctrl->device, "no PSK negotiated\n"); return -ENOKEY; } + for (i = 1; i < ctrl->queue_count; i++) { ret = nvme_tcp_alloc_queue(ctrl, i, - key_serial(ctrl->tls_key)); + ctrl->tls_pskid); if (ret) goto out_free_queues; } @@ -2144,6 +2166,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, if (remove) nvme_unquiesce_admin_queue(ctrl); nvme_tcp_destroy_admin_queue(ctrl, remove); + if (ctrl->tls_pskid) { + dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n", + ctrl->tls_pskid); + ctrl->tls_pskid = 0; + } } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 0288315f005028..87c437fc070d12 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -4,7 +4,7 @@ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ -#include +#include #include "trace.h" static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 85006b2df8ae08..081f0473cd9ea1 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include "nvmet.h" u32 nvmet_get_log_page_len(struct nvme_command *cmd) @@ -1015,8 +1015,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); - if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 8bc3f431c77f60..29f8639cfe7f69 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "nvmet.h" @@ -25,6 +25,18 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, unsigned char key_hash; char *dhchap_secret; + if (!strlen(secret)) { + if (set_ctrl) { + kfree(host->dhchap_ctrl_secret); + host->dhchap_ctrl_secret = NULL; + host->dhchap_ctrl_key_hash = 0; + } else { + kfree(host->dhchap_secret); + host->dhchap_secret = NULL; + host->dhchap_key_hash = 0; + } + return 0; + } if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1) return -EINVAL; if (key_hash > 3) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 1eff8ca6a5f12a..ade285308450d9 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -578,8 +578,8 @@ static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; - domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); - domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); + domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); + domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index 8d1806a828879a..9a3548179a8e36 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -4,7 +4,7 @@ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ -#include +#include #include "trace.h" static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10) diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 283134498fbc33..d2c384f58028dc 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -363,8 +363,7 @@ config NVMEM_SUNXI_SID config NVMEM_U_BOOT_ENV tristate "U-Boot environment variables support" depends on OF && MTD - select CRC32 - select GENERIC_NET_UTILS + select NVMEM_LAYOUT_U_BOOT_ENV help U-Boot stores its setup as environment variables. This driver adds support for verifying & exporting such data. It also exposes variables diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c index cf920542f939ea..1ba49449769874 100644 --- a/drivers/nvmem/imx-ocotp-ele.c +++ b/drivers/nvmem/imx-ocotp-ele.c @@ -14,8 +14,9 @@ #include enum fuse_type { - FUSE_FSB = 1, - FUSE_ELE = 2, + FUSE_FSB = BIT(0), + FUSE_ELE = BIT(1), + FUSE_ECC = BIT(2), FUSE_INVALID = -1 }; @@ -93,7 +94,10 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz continue; } - *buf++ = readl_relaxed(reg + (i << 2)); + if (type & FUSE_ECC) + *buf++ = readl_relaxed(reg + (i << 2)) & GENMASK(15, 0); + else + *buf++ = readl_relaxed(reg + (i << 2)); } memcpy(val, (u8 *)p, bytes); @@ -155,8 +159,30 @@ static const struct ocotp_devtype_data imx93_ocotp_data = { }, }; +static const struct ocotp_devtype_data imx95_ocotp_data = { + .reg_off = 0x8000, + .reg_read = imx_ocotp_reg_read, + .size = 2048, + .num_entry = 12, + .entry = { + { 0, 1, FUSE_FSB | FUSE_ECC }, + { 7, 1, FUSE_FSB | FUSE_ECC }, + { 9, 3, FUSE_FSB | FUSE_ECC }, + { 12, 24, FUSE_FSB }, + { 36, 2, FUSE_FSB | FUSE_ECC }, + { 38, 14, FUSE_FSB }, + { 63, 1, FUSE_ELE }, + { 128, 16, FUSE_ELE }, + { 188, 1, FUSE_ELE }, + { 317, 2, FUSE_FSB | FUSE_ECC }, + { 320, 7, FUSE_FSB }, + { 328, 184, FUSE_FSB } + }, +}; + static const struct of_device_id imx_ele_ocotp_dt_ids[] = { { .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, }, + { .compatible = "fsl,imx95-ocotp", .data = &imx95_ocotp_data, }, {}, }; MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids); diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c index 77a4119efea8f8..65d39e19f6eca4 100644 --- a/drivers/nvmem/layouts.c +++ b/drivers/nvmem/layouts.c @@ -123,7 +123,7 @@ static int nvmem_layout_bus_populate(struct nvmem_device *nvmem, int ret; /* Make sure it has a compatible property */ - if (!of_get_property(layout_dn, "compatible", NULL)) { + if (!of_property_present(layout_dn, "compatible")) { pr_debug("%s() - skipping %pOF, no compatible prop\n", __func__, layout_dn); return 0; diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig index 9c6e672fc3509f..5e586dfebe47b1 100644 --- a/drivers/nvmem/layouts/Kconfig +++ b/drivers/nvmem/layouts/Kconfig @@ -26,6 +26,17 @@ config NVMEM_LAYOUT_ONIE_TLV If unsure, say N. +config NVMEM_LAYOUT_U_BOOT_ENV + tristate "U-Boot environment variables layout" + select CRC32 + select GENERIC_NET_UTILS + help + U-Boot stores its setup as environment variables. This driver adds + support for verifying & exporting such data. It also exposes variables + as NVMEM cells so they can be referenced by other drivers. + + If unsure, say N. + endmenu endif diff --git a/drivers/nvmem/layouts/Makefile b/drivers/nvmem/layouts/Makefile index 2974bd7d33eda3..4940c9db066564 100644 --- a/drivers/nvmem/layouts/Makefile +++ b/drivers/nvmem/layouts/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_NVMEM_LAYOUT_SL28_VPD) += sl28vpd.o obj-$(CONFIG_NVMEM_LAYOUT_ONIE_TLV) += onie-tlv.o +obj-$(CONFIG_NVMEM_LAYOUT_U_BOOT_ENV) += u-boot-env.o diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c new file mode 100644 index 00000000000000..731e6f4f12b2bf --- /dev/null +++ b/drivers/nvmem/layouts/u-boot-env.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 - 2023 Rafał Miłecki + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "u-boot-env.h" + +struct u_boot_env_image_single { + __le32 crc32; + uint8_t data[]; +} __packed; + +struct u_boot_env_image_redundant { + __le32 crc32; + u8 mark; + uint8_t data[]; +} __packed; + +struct u_boot_env_image_broadcom { + __le32 magic; + __le32 len; + __le32 crc32; + DECLARE_FLEX_ARRAY(uint8_t, data); +} __packed; + +static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index, + unsigned int offset, void *buf, size_t bytes) +{ + u8 mac[ETH_ALEN]; + + if (bytes != 3 * ETH_ALEN - 1) + return -EINVAL; + + if (!mac_pton(buf, mac)) + return -EINVAL; + + if (index) + eth_addr_add(mac, index); + + ether_addr_copy(buf, mac); + + return 0; +} + +static int u_boot_env_parse_cells(struct device *dev, struct nvmem_device *nvmem, uint8_t *buf, + size_t data_offset, size_t data_len) +{ + char *data = buf + data_offset; + char *var, *value, *eq; + + for (var = data; + var < data + data_len && *var; + var = value + strlen(value) + 1) { + struct nvmem_cell_info info = {}; + + eq = strchr(var, '='); + if (!eq) + break; + *eq = '\0'; + value = eq + 1; + + info.name = devm_kstrdup(dev, var, GFP_KERNEL); + if (!info.name) + return -ENOMEM; + info.offset = data_offset + value - data; + info.bytes = strlen(value); + info.np = of_get_child_by_name(dev->of_node, info.name); + if (!strcmp(var, "ethaddr")) { + info.raw_len = strlen(value); + info.bytes = ETH_ALEN; + info.read_post_process = u_boot_env_read_post_process_ethaddr; + } + + nvmem_add_one_cell(nvmem, &info); + } + + return 0; +} + +int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, + enum u_boot_env_format format) +{ + size_t crc32_data_offset; + size_t crc32_data_len; + size_t crc32_offset; + __le32 *crc32_addr; + size_t data_offset; + size_t data_len; + size_t dev_size; + uint32_t crc32; + uint32_t calc; + uint8_t *buf; + int bytes; + int err; + + dev_size = nvmem_dev_size(nvmem); + + buf = kzalloc(dev_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + + bytes = nvmem_device_read(nvmem, 0, dev_size, buf); + if (bytes < 0) { + err = bytes; + goto err_kfree; + } else if (bytes != dev_size) { + err = -EIO; + goto err_kfree; + } + + switch (format) { + case U_BOOT_FORMAT_SINGLE: + crc32_offset = offsetof(struct u_boot_env_image_single, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_single, data); + data_offset = offsetof(struct u_boot_env_image_single, data); + break; + case U_BOOT_FORMAT_REDUNDANT: + crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); + data_offset = offsetof(struct u_boot_env_image_redundant, data); + break; + case U_BOOT_FORMAT_BROADCOM: + crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data); + data_offset = offsetof(struct u_boot_env_image_broadcom, data); + break; + } + + if (dev_size < data_offset) { + dev_err(dev, "Device too small for u-boot-env\n"); + err = -EIO; + goto err_kfree; + } + + crc32_addr = (__le32 *)(buf + crc32_offset); + crc32 = le32_to_cpu(*crc32_addr); + crc32_data_len = dev_size - crc32_data_offset; + data_len = dev_size - data_offset; + + calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; + if (calc != crc32) { + dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32); + err = -EINVAL; + goto err_kfree; + } + + buf[dev_size - 1] = '\0'; + err = u_boot_env_parse_cells(dev, nvmem, buf, data_offset, data_len); + +err_kfree: + kfree(buf); +err_out: + return err; +} +EXPORT_SYMBOL_GPL(u_boot_env_parse); + +static int u_boot_env_add_cells(struct nvmem_layout *layout) +{ + struct device *dev = &layout->dev; + enum u_boot_env_format format; + + format = (uintptr_t)device_get_match_data(dev); + + return u_boot_env_parse(dev, layout->nvmem, format); +} + +static int u_boot_env_probe(struct nvmem_layout *layout) +{ + layout->add_cells = u_boot_env_add_cells; + + return nvmem_layout_register(layout); +} + +static void u_boot_env_remove(struct nvmem_layout *layout) +{ + nvmem_layout_unregister(layout); +} + +static const struct of_device_id u_boot_env_of_match_table[] = { + { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, }, + { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, + { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, + { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, }, + {}, +}; + +static struct nvmem_layout_driver u_boot_env_layout = { + .driver = { + .name = "u-boot-env-layout", + .of_match_table = u_boot_env_of_match_table, + }, + .probe = u_boot_env_probe, + .remove = u_boot_env_remove, +}; +module_nvmem_layout_driver(u_boot_env_layout); + +MODULE_AUTHOR("Rafał Miłecki"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table); +MODULE_DESCRIPTION("NVMEM layout driver for U-Boot environment variables"); diff --git a/drivers/nvmem/layouts/u-boot-env.h b/drivers/nvmem/layouts/u-boot-env.h new file mode 100644 index 00000000000000..dd5f280ac047b6 --- /dev/null +++ b/drivers/nvmem/layouts/u-boot-env.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H +#define _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H + +enum u_boot_env_format { + U_BOOT_FORMAT_SINGLE, + U_BOOT_FORMAT_REDUNDANT, + U_BOOT_FORMAT_BROADCOM, +}; + +int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, + enum u_boot_env_format format); + +#endif /* ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H */ diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c index 38f5d9df39cd53..30d55b111a644c 100644 --- a/drivers/nvmem/sunplus-ocotp.c +++ b/drivers/nvmem/sunplus-ocotp.c @@ -159,7 +159,6 @@ static int sp_ocotp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct nvmem_device *nvmem; struct sp_ocotp_priv *otp; - struct resource *res; int ret; otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); @@ -168,13 +167,11 @@ static int sp_ocotp_probe(struct platform_device *pdev) otp->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio"); - otp->base[HB_GPIO] = devm_ioremap_resource(dev, res); + otp->base[HB_GPIO] = devm_platform_ioremap_resource_byname(pdev, "hb_gpio"); if (IS_ERR(otp->base[HB_GPIO])) return PTR_ERR(otp->base[HB_GPIO]); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx"); - otp->base[OTPRX] = devm_ioremap_resource(dev, res); + otp->base[OTPRX] = devm_platform_ioremap_resource_byname(pdev, "otprx"); if (IS_ERR(otp->base[OTPRX])) return PTR_ERR(otp->base[OTPRX]); diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c index 593f0bf4a395db..ced414fc9e6069 100644 --- a/drivers/nvmem/u-boot-env.c +++ b/drivers/nvmem/u-boot-env.c @@ -3,23 +3,15 @@ * Copyright (C) 2022 Rafał Miłecki */ -#include -#include -#include #include #include #include -#include #include #include #include #include -enum u_boot_env_format { - U_BOOT_FORMAT_SINGLE, - U_BOOT_FORMAT_REDUNDANT, - U_BOOT_FORMAT_BROADCOM, -}; +#include "layouts/u-boot-env.h" struct u_boot_env { struct device *dev; @@ -29,24 +21,6 @@ struct u_boot_env { struct mtd_info *mtd; }; -struct u_boot_env_image_single { - __le32 crc32; - uint8_t data[]; -} __packed; - -struct u_boot_env_image_redundant { - __le32 crc32; - u8 mark; - uint8_t data[]; -} __packed; - -struct u_boot_env_image_broadcom { - __le32 magic; - __le32 len; - __le32 crc32; - DECLARE_FLEX_ARRAY(uint8_t, data); -} __packed; - static int u_boot_env_read(void *context, unsigned int offset, void *val, size_t bytes) { @@ -69,141 +43,6 @@ static int u_boot_env_read(void *context, unsigned int offset, void *val, return 0; } -static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index, - unsigned int offset, void *buf, size_t bytes) -{ - u8 mac[ETH_ALEN]; - - if (bytes != 3 * ETH_ALEN - 1) - return -EINVAL; - - if (!mac_pton(buf, mac)) - return -EINVAL; - - if (index) - eth_addr_add(mac, index); - - ether_addr_copy(buf, mac); - - return 0; -} - -static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf, - size_t data_offset, size_t data_len) -{ - struct nvmem_device *nvmem = priv->nvmem; - struct device *dev = priv->dev; - char *data = buf + data_offset; - char *var, *value, *eq; - - for (var = data; - var < data + data_len && *var; - var = value + strlen(value) + 1) { - struct nvmem_cell_info info = {}; - - eq = strchr(var, '='); - if (!eq) - break; - *eq = '\0'; - value = eq + 1; - - info.name = devm_kstrdup(dev, var, GFP_KERNEL); - if (!info.name) - return -ENOMEM; - info.offset = data_offset + value - data; - info.bytes = strlen(value); - info.np = of_get_child_by_name(dev->of_node, info.name); - if (!strcmp(var, "ethaddr")) { - info.raw_len = strlen(value); - info.bytes = ETH_ALEN; - info.read_post_process = u_boot_env_read_post_process_ethaddr; - } - - nvmem_add_one_cell(nvmem, &info); - } - - return 0; -} - -static int u_boot_env_parse(struct u_boot_env *priv) -{ - struct nvmem_device *nvmem = priv->nvmem; - struct device *dev = priv->dev; - size_t crc32_data_offset; - size_t crc32_data_len; - size_t crc32_offset; - __le32 *crc32_addr; - size_t data_offset; - size_t data_len; - size_t dev_size; - uint32_t crc32; - uint32_t calc; - uint8_t *buf; - int bytes; - int err; - - dev_size = nvmem_dev_size(nvmem); - - buf = kzalloc(dev_size, GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto err_out; - } - - bytes = nvmem_device_read(nvmem, 0, dev_size, buf); - if (bytes < 0) { - err = bytes; - goto err_kfree; - } else if (bytes != dev_size) { - err = -EIO; - goto err_kfree; - } - - switch (priv->format) { - case U_BOOT_FORMAT_SINGLE: - crc32_offset = offsetof(struct u_boot_env_image_single, crc32); - crc32_data_offset = offsetof(struct u_boot_env_image_single, data); - data_offset = offsetof(struct u_boot_env_image_single, data); - break; - case U_BOOT_FORMAT_REDUNDANT: - crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); - crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); - data_offset = offsetof(struct u_boot_env_image_redundant, data); - break; - case U_BOOT_FORMAT_BROADCOM: - crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32); - crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data); - data_offset = offsetof(struct u_boot_env_image_broadcom, data); - break; - } - - if (dev_size < data_offset) { - dev_err(dev, "Device too small for u-boot-env\n"); - err = -EIO; - goto err_kfree; - } - - crc32_addr = (__le32 *)(buf + crc32_offset); - crc32 = le32_to_cpu(*crc32_addr); - crc32_data_len = dev_size - crc32_data_offset; - data_len = dev_size - data_offset; - - calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; - if (calc != crc32) { - dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32); - err = -EINVAL; - goto err_kfree; - } - - buf[dev_size - 1] = '\0'; - err = u_boot_env_add_cells(priv, buf, data_offset, data_len); - -err_kfree: - kfree(buf); -err_out: - return err; -} - static int u_boot_env_probe(struct platform_device *pdev) { struct nvmem_config config = { @@ -235,7 +74,7 @@ static int u_boot_env_probe(struct platform_device *pdev) if (IS_ERR(priv->nvmem)) return PTR_ERR(priv->nvmem); - return u_boot_env_parse(priv); + return u_boot_env_parse(dev, priv->nvmem, priv->format); } static const struct of_device_id u_boot_env_of_match_table[] = { diff --git a/drivers/of/.kunitconfig b/drivers/of/.kunitconfig index 5a8fee11978c4a..4c53d2c7a2757e 100644 --- a/drivers/of/.kunitconfig +++ b/drivers/of/.kunitconfig @@ -1,3 +1,4 @@ CONFIG_KUNIT=y CONFIG_OF=y CONFIG_OF_KUNIT_TEST=y +CONFIG_OF_OVERLAY_KUNIT_TEST=y diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index dd726c7056bf13..0e2d608c3e207d 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -107,6 +107,16 @@ config OF_OVERLAY While this option is selected automatically when needed, you can enable it manually to improve device tree unit test coverage. +config OF_OVERLAY_KUNIT_TEST + tristate "Device Tree overlay KUnit tests" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + select OF_OVERLAY + help + This option builds KUnit unit tests for the device tree overlay code. + + If unsure, say N here, but this option is safe to enable. + config OF_NUMA bool diff --git a/drivers/of/Makefile b/drivers/of/Makefile index 251d3353214825..379a0afcbdc0bf 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -19,6 +19,9 @@ obj-y += kexec.o endif endif +obj-$(CONFIG_KUNIT) += of_kunit_helpers.o obj-$(CONFIG_OF_KUNIT_TEST) += of_test.o +obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay-test.o +overlay-test-y := overlay_test.o kunit_overlay_test.dtbo.o obj-$(CONFIG_OF_UNITTEST) += unittest-data/ diff --git a/drivers/of/address.c b/drivers/of/address.c index d669ce25b5f9c1..286f0c161e332f 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -197,6 +198,23 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, #endif /* CONFIG_PCI */ +static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size) +{ + u64 end = start; + + if (overflows_type(start, r->start)) + return -EOVERFLOW; + if (size && check_add_overflow(end, size - 1, &end)) + return -EOVERFLOW; + if (overflows_type(end, r->end)) + return -EOVERFLOW; + + r->start = start; + r->end = end; + + return 0; +} + /* * of_pci_range_to_resource - Create a resource from an of_pci_range * @range: the PCI range that describes the resource @@ -215,6 +233,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, int of_pci_range_to_resource(struct of_pci_range *range, struct device_node *np, struct resource *res) { + u64 start; int err; res->flags = range->flags; res->parent = res->child = res->sibling = NULL; @@ -231,18 +250,11 @@ int of_pci_range_to_resource(struct of_pci_range *range, err = -EINVAL; goto invalid_range; } - res->start = port; + start = port; } else { - if ((sizeof(resource_size_t) < 8) && - upper_32_bits(range->cpu_addr)) { - err = -EINVAL; - goto invalid_range; - } - - res->start = range->cpu_addr; + start = range->cpu_addr; } - res->end = res->start + range->size - 1; - return 0; + return __of_address_resource_bounds(res, start, range->size); invalid_range: res->start = (resource_size_t)OF_BAD_ADDR; @@ -258,8 +270,8 @@ EXPORT_SYMBOL(of_pci_range_to_resource); * @res: pointer to a valid resource that will be updated to * reflect the values contained in the range. * - * Returns ENOENT if the entry is not found or EINVAL if the range cannot be - * converted to resource. + * Returns -ENOENT if the entry is not found or -EOVERFLOW if the range + * cannot be converted to resource. */ int of_range_to_resource(struct device_node *np, int index, struct resource *res) { @@ -1061,12 +1073,10 @@ static int __of_address_to_resource(struct device_node *dev, int index, int bar_ if (of_mmio_is_nonposted(dev)) flags |= IORESOURCE_MEM_NONPOSTED; - r->start = taddr; - r->end = taddr + size - 1; r->flags = flags; r->name = name ? name : dev->full_name; - return 0; + return __of_address_resource_bounds(r, taddr, size); } /** diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 68103ad230eebd..4d528c10df3a9a 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -34,7 +34,7 @@ /* * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by - * cmd_dt_S_dtb in scripts/Makefile.lib + * cmd_wrap_S_dtb in scripts/Makefile.dtbs */ extern uint8_t __dtb_empty_root_begin[]; extern uint8_t __dtb_empty_root_end[]; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 8fd63100ba8f08..a494f56a0d0ee4 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -357,8 +357,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar addr = of_get_property(device, "reg", &addr_len); /* Prevent out-of-bounds read in case of longer interrupt parent address size */ - if (addr_len > (3 * sizeof(__be32))) - addr_len = 3 * sizeof(__be32); + if (addr_len > sizeof(addr_buf)) + addr_len = sizeof(addr_buf); if (addr) memcpy(addr_buf, addr, addr_len); @@ -429,9 +429,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) of_property_read_string_index(dev, "interrupt-names", index, &name); - r->start = r->end = irq; - r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq)); - r->name = name ? name : of_node_full_name(dev); + *r = DEFINE_RES_IRQ_NAMED(irq, name ?: of_node_full_name(dev)); + r->flags |= irq_get_trigger_type(irq); } return irq; @@ -716,8 +715,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, * @np: device node for @dev * @token: bus type for this domain * - * Parse the msi-parent property (both the simple and the complex - * versions), and returns the corresponding MSI domain. + * Parse the msi-parent property and returns the corresponding MSI domain. * * Returns: the MSI domain for this device (or NULL on failure). */ @@ -725,33 +723,14 @@ struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token) { - struct device_node *msi_np; + struct of_phandle_iterator it; struct irq_domain *d; + int err; - /* Check for a single msi-parent property */ - msi_np = of_parse_phandle(np, "msi-parent", 0); - if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) { - d = irq_find_matching_host(msi_np, token); - if (!d) - of_node_put(msi_np); - return d; - } - - if (token == DOMAIN_BUS_PLATFORM_MSI) { - /* Check for the complex msi-parent version */ - struct of_phandle_args args; - int index = 0; - - while (!of_parse_phandle_with_args(np, "msi-parent", - "#msi-cells", - index, &args)) { - d = irq_find_matching_host(args.np, token); - if (d) - return d; - - of_node_put(args.np); - index++; - } + of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) { + d = irq_find_matching_host(it.node, token); + if (d) + return d; } return NULL; diff --git a/drivers/of/kunit_overlay_test.dtso b/drivers/of/kunit_overlay_test.dtso new file mode 100644 index 00000000000000..85f20b4b4c1622 --- /dev/null +++ b/drivers/of/kunit_overlay_test.dtso @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&{/} { + kunit-test { + compatible = "test,empty"; + }; +}; diff --git a/drivers/of/of_kunit_helpers.c b/drivers/of/of_kunit_helpers.c new file mode 100644 index 00000000000000..287d6c91bb3721 --- /dev/null +++ b/drivers/of/of_kunit_helpers.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test managed DeviceTree APIs + */ + +#include +#include + +#include +#include +#include + +#if defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE) + +static void of_overlay_fdt_apply_kunit_exit(void *ovcs_id) +{ + of_overlay_remove(ovcs_id); +} + +/** + * of_overlay_fdt_apply_kunit() - Test managed of_overlay_fdt_apply() + * @test: test context + * @overlay_fdt: device tree overlay to apply + * @overlay_fdt_size: size in bytes of @overlay_fdt + * @ovcs_id: identifier of overlay, used to remove the overlay + * + * Just like of_overlay_fdt_apply(), except the overlay is managed by the test + * case and is automatically removed with of_overlay_remove() after the test + * case concludes. + * + * Return: 0 on success, negative errno on failure + */ +int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt, + u32 overlay_fdt_size, int *ovcs_id) +{ + int ret; + int *copy_id; + + copy_id = kunit_kmalloc(test, sizeof(*copy_id), GFP_KERNEL); + if (!copy_id) + return -ENOMEM; + + ret = of_overlay_fdt_apply(overlay_fdt, overlay_fdt_size, + ovcs_id, NULL); + if (ret) + return ret; + + *copy_id = *ovcs_id; + + return kunit_add_action_or_reset(test, of_overlay_fdt_apply_kunit_exit, + copy_id); +} +EXPORT_SYMBOL_GPL(of_overlay_fdt_apply_kunit); + +#endif + +KUNIT_DEFINE_ACTION_WRAPPER(of_node_put_wrapper, of_node_put, struct device_node *); + +/** + * of_node_put_kunit() - Test managed of_node_put() + * @test: test context + * @node: node to pass to `of_node_put()` + * + * Just like of_node_put(), except the node is managed by the test case and is + * automatically put with of_node_put() after the test case concludes. + */ +void of_node_put_kunit(struct kunit *test, struct device_node *node) +{ + if (kunit_add_action(test, of_node_put_wrapper, node)) { + KUNIT_FAIL(test, + "Can't allocate a kunit resource to put of_node\n"); + } +} +EXPORT_SYMBOL_GPL(of_node_put_kunit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test managed DeviceTree APIs"); diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index 5949829a1b001b..2ec20886d176c5 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -44,7 +45,7 @@ static int __init of_numa_parse_memory_nodes(void) struct device_node *np = NULL; struct resource rsrc; u32 nid; - int i, r; + int i, r = -EINVAL; for_each_node_by_type(np, "memory") { r = of_property_read_u32(np, "numa-node-id", &nid); @@ -71,7 +72,7 @@ static int __init of_numa_parse_memory_nodes(void) } } - return 0; + return r; } static int __init of_numa_parse_distance_map_v1(struct device_node *map) diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 4d861a75d69462..cbdecccca09745 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -472,7 +472,6 @@ static int add_changeset_node(struct overlay_changeset *ovcs, static int build_changeset_next_level(struct overlay_changeset *ovcs, struct target *target, const struct device_node *overlay_node) { - struct device_node *child; struct property *prop; int ret; @@ -485,12 +484,11 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs, } } - for_each_child_of_node(overlay_node, child) { + for_each_child_of_node_scoped(overlay_node, child) { ret = add_changeset_node(ovcs, target, child); if (ret) { pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n", target->np, child, ret); - of_node_put(child); return ret; } } @@ -1078,16 +1076,12 @@ EXPORT_SYMBOL_GPL(of_overlay_fdt_apply); */ static int find_node(struct device_node *tree, struct device_node *np) { - struct device_node *child; - if (tree == np) return 1; - for_each_child_of_node(tree, child) { - if (find_node(child, np)) { - of_node_put(child); + for_each_child_of_node_scoped(tree, child) { + if (find_node(child, np)) return 1; - } } return 0; diff --git a/drivers/of/overlay_test.c b/drivers/of/overlay_test.c new file mode 100644 index 00000000000000..19a292cdeee31b --- /dev/null +++ b/drivers/of/overlay_test.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit tests for device tree overlays + */ +#include +#include +#include +#include +#include + +#include +#include + +static const char * const kunit_node_name = "kunit-test"; +static const char * const kunit_compatible = "test,empty"; + +/* Test that of_overlay_apply_kunit() adds a node to the live tree */ +static void of_overlay_apply_kunit_apply(struct kunit *test) +{ + struct device_node *np; + + KUNIT_ASSERT_EQ(test, 0, + of_overlay_apply_kunit(test, kunit_overlay_test)); + + np = of_find_node_by_name(NULL, kunit_node_name); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np); + of_node_put(np); +} + +/* + * Test that of_overlay_apply_kunit() creates platform devices with the + * expected device_node + */ +static void of_overlay_apply_kunit_platform_device(struct kunit *test) +{ + struct platform_device *pdev; + struct device_node *np; + + KUNIT_ASSERT_EQ(test, 0, + of_overlay_apply_kunit(test, kunit_overlay_test)); + + np = of_find_node_by_name(NULL, kunit_node_name); + of_node_put_kunit(test, np); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np); + + pdev = of_find_device_by_node(np); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, pdev); + if (pdev) + put_device(&pdev->dev); +} + +static int of_overlay_bus_match_compatible(struct device *dev, const void *data) +{ + return of_device_is_compatible(dev->of_node, data); +} + +/* Test that of_overlay_apply_kunit() cleans up after the test is finished */ +static void of_overlay_apply_kunit_cleanup(struct kunit *test) +{ + struct kunit fake; + struct platform_device *pdev; + struct device *dev; + struct device_node *np; + + if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE)) + kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE for root node"); + + kunit_init_test(&fake, "fake test", NULL); + KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS); + + KUNIT_ASSERT_EQ(test, 0, + of_overlay_apply_kunit(&fake, kunit_overlay_test)); + + np = of_find_node_by_name(NULL, kunit_node_name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np); + of_node_put_kunit(test, np); + + pdev = of_find_device_by_node(np); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev); + put_device(&pdev->dev); /* Not derefing 'pdev' after this */ + + /* Remove overlay */ + kunit_cleanup(&fake); + + /* The node and device should be removed */ + np = of_find_node_by_name(NULL, kunit_node_name); + KUNIT_EXPECT_PTR_EQ(test, NULL, np); + of_node_put(np); + + dev = bus_find_device(&platform_bus_type, NULL, kunit_compatible, + of_overlay_bus_match_compatible); + KUNIT_EXPECT_PTR_EQ(test, NULL, dev); + put_device(dev); +} + +static struct kunit_case of_overlay_apply_kunit_test_cases[] = { + KUNIT_CASE(of_overlay_apply_kunit_apply), + KUNIT_CASE(of_overlay_apply_kunit_platform_device), + KUNIT_CASE(of_overlay_apply_kunit_cleanup), + {} +}; + +/* + * Test suite for test managed device tree overlays. + */ +static struct kunit_suite of_overlay_apply_kunit_suite = { + .name = "of_overlay_apply_kunit", + .test_cases = of_overlay_apply_kunit_test_cases, +}; + +kunit_test_suites( + &of_overlay_apply_kunit_suite, +); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit tests for device tree overlays"); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index ef622d41eb5b2e..9bafcff3e628cb 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -338,7 +338,6 @@ static int of_platform_bus_create(struct device_node *bus, struct device *parent, bool strict) { const struct of_dev_auxdata *auxdata; - struct device_node *child; struct platform_device *dev; const char *bus_id = NULL; void *platform_data = NULL; @@ -382,13 +381,11 @@ static int of_platform_bus_create(struct device_node *bus, if (!dev || !of_match_node(matches, bus)) return 0; - for_each_child_of_node(bus, child) { + for_each_child_of_node_scoped(bus, child) { pr_debug(" create child: %pOF\n", child); rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict); - if (rc) { - of_node_put(child); + if (rc) break; - } } of_node_set_flag(bus, OF_POPULATED_BUS); return rc; @@ -459,7 +456,6 @@ int of_platform_populate(struct device_node *root, const struct of_dev_auxdata *lookup, struct device *parent) { - struct device_node *child; int rc = 0; root = root ? of_node_get(root) : of_find_node_by_path("/"); @@ -470,12 +466,10 @@ int of_platform_populate(struct device_node *root, pr_debug(" starting at: %pOF\n", root); device_links_supplier_sync_state_pause(); - for_each_child_of_node(root, child) { + for_each_child_of_node_scoped(root, child) { rc = of_platform_bus_create(child, matches, lookup, parent, true); - if (rc) { - of_node_put(child); + if (rc) break; - } } device_links_supplier_sync_state_resume(); @@ -732,11 +726,14 @@ static int of_platform_notify(struct notifier_block *nb, struct of_reconfig_data *rd = arg; struct platform_device *pdev_parent, *pdev; bool children_left; + struct device_node *parent; switch (of_reconfig_get_state_change(action, rd)) { case OF_RECONFIG_CHANGE_ADD: - /* verify that the parent is a bus */ - if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) + parent = rd->dn->parent; + /* verify that the parent is a bus (or the root node) */ + if (!of_node_is_root(parent) && + !of_node_check_flag(parent, OF_POPULATED_BUS)) return NOTIFY_OK; /* not for us */ /* already populated? (driver using of_populate manually) */ @@ -749,7 +746,7 @@ static int of_platform_notify(struct notifier_block *nb, */ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; /* pdev_parent may be NULL when no bus platform device */ - pdev_parent = of_find_device_by_node(rd->dn->parent); + pdev_parent = of_find_device_by_node(parent); pdev = of_platform_device_create(rd->dn, NULL, pdev_parent ? &pdev_parent->dev : NULL); platform_device_put(pdev_parent); diff --git a/drivers/of/property.c b/drivers/of/property.c index 164d77cb944585..11b922fde7af16 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -452,12 +452,17 @@ EXPORT_SYMBOL_GPL(of_property_read_string); /** * of_property_match_string() - Find string in a list and return index - * @np: pointer to node containing string list property + * @np: pointer to the node containing the string list property * @propname: string list property name - * @string: pointer to string to search for in string list + * @string: pointer to the string to search for in the string list * - * This function searches a string list property and returns the index - * of a specific string value. + * Search for an exact match of string in a device node property which is a + * string of lists. + * + * Return: the index of the first occurrence of the string on success, -EINVAL + * if the property does not exist, -ENODATA if the property does not have a + * value, and -EILSEQ if the string is not null-terminated within the length of + * the property data. */ int of_property_match_string(const struct device_node *np, const char *propname, const char *string) @@ -773,16 +778,11 @@ EXPORT_SYMBOL(of_graph_get_port_parent); struct device_node *of_graph_get_remote_port_parent( const struct device_node *node) { - struct device_node *np, *pp; - /* Get remote endpoint node. */ - np = of_graph_get_remote_endpoint(node); - - pp = of_graph_get_port_parent(np); + struct device_node *np __free(device_node) = + of_graph_get_remote_endpoint(node); - of_node_put(np); - - return pp; + return of_graph_get_port_parent(np); } EXPORT_SYMBOL(of_graph_get_remote_port_parent); @@ -1064,19 +1064,15 @@ static void of_link_to_phandle(struct device_node *con_np, struct device_node *sup_np, u8 flags) { - struct device_node *tmp_np = of_node_get(sup_np); + struct device_node *tmp_np __free(device_node) = of_node_get(sup_np); /* Check that sup_np and its ancestors are available. */ while (tmp_np) { - if (of_fwnode_handle(tmp_np)->dev) { - of_node_put(tmp_np); + if (of_fwnode_handle(tmp_np)->dev) break; - } - if (!of_device_is_available(tmp_np)) { - of_node_put(tmp_np); + if (!of_device_is_available(tmp_np)) return; - } tmp_np = of_get_next_parent(tmp_np); } @@ -1440,16 +1436,13 @@ static int of_link_property(struct device_node *con_np, const char *prop_name) } while ((phandle = s->parse_prop(con_np, prop_name, i))) { - struct device_node *con_dev_np; + struct device_node *con_dev_np __free(device_node) = + s->get_con_dev ? s->get_con_dev(con_np) : of_node_get(con_np); - con_dev_np = s->get_con_dev - ? s->get_con_dev(con_np) - : of_node_get(con_np); matched = true; i++; of_link_to_phandle(con_dev_np, phandle, s->fwlink_flags); of_node_put(phandle); - of_node_put(con_dev_np); } s++; } diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index 2780928764a4f1..5cf96776dd7d31 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -150,7 +150,7 @@ static int node_name_cmp(const struct device_node *dn1, static int adjust_local_phandle_references(struct device_node *local_fixups, struct device_node *overlay, int phandle_delta) { - struct device_node *child, *overlay_child; + struct device_node *overlay_child; struct property *prop_fix, *prop; int err, i, count; unsigned int off; @@ -194,7 +194,7 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, * The roots of the subtrees are the overlay's __local_fixups__ node * and the overlay's root node. */ - for_each_child_of_node(local_fixups, child) { + for_each_child_of_node_scoped(local_fixups, child) { for_each_child_of_node(overlay, overlay_child) if (!node_name_cmp(child, overlay_child)) { @@ -202,17 +202,13 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, break; } - if (!overlay_child) { - of_node_put(child); + if (!overlay_child) return -EINVAL; - } err = adjust_local_phandle_references(child, overlay_child, phandle_delta); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index c830f346df4526..daf9a2dddd7e0d 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -900,8 +900,8 @@ static void __init of_unittest_changeset(void) unittest(!of_find_node_by_path("/testcase-data/changeset/n2/n21"), "'%pOF' still present after revert\n", n21); - ppremove = of_find_property(parent, "prop-remove", NULL); - unittest(ppremove, "failed to find removed prop after revert\n"); + unittest(of_property_present(parent, "prop-remove"), + "failed to find removed prop after revert\n"); ret = of_property_read_string(parent, "prop-update", &propstr); unittest(!ret, "failed to find updated prop after revert\n"); @@ -1861,7 +1861,7 @@ static int __init unittest_data_add(void) struct device_node *unittest_data_node = NULL, *np; /* * __dtbo_testcases_begin[] and __dtbo_testcases_end[] are magically - * created by cmd_dt_S_dtbo in scripts/Makefile.lib + * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs */ extern uint8_t __dtbo_testcases_begin[]; extern uint8_t __dtbo_testcases_end[]; @@ -3525,7 +3525,7 @@ static void __init of_unittest_lifecycle(void) /* * __dtbo_##overlay_name##_begin[] and __dtbo_##overlay_name##_end[] are - * created by cmd_dt_S_dtbo in scripts/Makefile.lib + * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs */ #define OVERLAY_INFO_EXTERN(overlay_name) \ diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index ec0056a4bb135e..5f0fb3ea385b2d 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -405,7 +405,7 @@ static struct platform_driver ti_opp_supply_driver = { .probe = ti_opp_supply_probe, .driver = { .name = "ti_opp_supply", - .of_match_table = of_match_ptr(ti_opp_supply_of_match), + .of_match_table = ti_opp_supply_of_match, }, }; module_platform_driver(ti_opp_supply_driver); diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 633266447e2ffa..16f4496bca9511 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -483,7 +483,7 @@ static struct attribute *paths_subsys_attrs[] = { ATTRIBUTE_GROUPS(paths_subsys); /* Specific kobject type for our PDC paths */ -static struct kobj_type ktype_pdcspath = { +static const struct kobj_type ktype_pdcspath = { .sysfs_ops = &pdcspath_attr_ops, .default_groups = paths_subsys_groups, }; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index aa4d1833f4422b..0d94e4a967d81d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -143,6 +143,15 @@ config PCI_IOV If unsure, say N. +config PCI_NPEM + bool "Native PCIe Enclosure Management" + depends on LEDS_CLASS=y + help + Support for Native PCIe Enclosure Management. It allows managing LED + indications in storage enclosures. Enclosure must support following + indications: OK, Locate, Fail, Rebuild, other indications are + optional. + config PCI_PRI bool "PCI PRI support" select PCI_ATS diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 8ddad57934a627..374c5c06d92f9d 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o obj-$(CONFIG_VGA_ARB) += vgaarb.o obj-$(CONFIG_PCI_DOE) += doe.o obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o +obj-$(CONFIG_PCI_NPEM) += npem.o # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index c570892b209095..6afff1f1b14301 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -47,6 +47,39 @@ bool pci_ats_supported(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_ats_supported); +/** + * pci_prepare_ats - Setup the PS for ATS + * @dev: the PCI device + * @ps: the IOMMU page shift + * + * This must be done by the IOMMU driver on the PF before any VFs are created to + * ensure that the VF can have ATS enabled. + * + * Returns 0 on success, or negative on failure. + */ +int pci_prepare_ats(struct pci_dev *dev, int ps) +{ + u16 ctrl; + + if (!pci_ats_supported(dev)) + return -EINVAL; + + if (WARN_ON(dev->ats_enabled)) + return -EBUSY; + + if (ps < PCI_ATS_MIN_STU) + return -EINVAL; + + if (dev->is_virtfn) + return 0; + + dev->ats_stu = ps; + ctrl = PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); + pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); + return 0; +} +EXPORT_SYMBOL_GPL(pci_prepare_ats); + /** * pci_enable_ats - enable the ATS capability * @dev: the PCI device @@ -455,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev) * pci_pasid_features - Check which PASID features are supported * @pdev: PCI device structure * - * Returns a negative value when no PASI capability is present. - * Otherwise is returns a bitmask with supported features. Current + * Return a negative value when no PASID capability is present. + * Otherwise return a bitmask with supported features. Current * features reported are: * PCI_PASID_CAP_EXEC - Execute permission supported * PCI_PASID_CAP_PRIV - Privileged mode supported diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 4d2c188f583527..9800b768105402 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -196,7 +196,7 @@ config PCIE_MEDIATEK config PCIE_MEDIATEK_GEN3 tristate "MediaTek Gen3 PCIe controller" - depends on ARCH_MEDIATEK || COMPILE_TEST + depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on PCI_MSI help Adds support for PCIe Gen3 MAC controller for MediaTek SoCs. diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 1d5a70c9055ed6..8a0044bb398924 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP select PCIE_CADENCE_EP select PCIE_CADENCE_PLAT help - Say Y here if you want to support the Cadence PCIe platform controller in + Say Y here if you want to support the Cadence PCIe platform controller in endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 85718246016b73..284f2e0e4d2615 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -7,6 +7,8 @@ */ #include +#include +#include #include #include #include @@ -22,6 +24,8 @@ #include "../../pci.h" #include "pcie-cadence.h" +#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie) + #define ENABLE_REG_SYS_2 0x108 #define STATUS_REG_SYS_2 0x508 #define STATUS_CLR_REG_SYS_2 0x708 @@ -44,6 +48,7 @@ enum link_status { #define J721E_MODE_RC BIT(7) #define LANE_COUNT(n) ((n) << 8) +#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0) #define GENERATION_SEL_MASK GENMASK(1, 0) struct j721e_pcie { @@ -52,6 +57,7 @@ struct j721e_pcie { u32 mode; u32 num_lanes; u32 max_lanes; + struct gpio_desc *reset_gpio; void __iomem *user_cfg_base; void __iomem *intd_cfg_base; u32 linkdown_irq_regfield; @@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, return ret; } +static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->cdns_pcie->dev; + struct device_node *node = dev->of_node; + u32 mask = ACSPCIE_PAD_DISABLE_MASK; + struct of_phandle_args args; + u32 val; + int ret; + + ret = of_parse_phandle_with_fixed_args(node, + "ti,syscon-acspcie-proxy-ctrl", + 1, 0, &args); + if (ret) { + dev_err(dev, + "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n"); + return ret; + } + + /* Clear PAD IO disable bits to enable refclk output */ + val = ~(args.args[0]); + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) { + dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret); + return ret; + } + + return 0; +} + static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) { struct device *dev = pcie->cdns_pcie->dev; @@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) return ret; } - return 0; + /* Enable ACSPCIE refclk output if the optional property exists */ + syscon = syscon_regmap_lookup_by_phandle_optional(node, + "ti,syscon-acspcie-proxy-ctrl"); + if (!syscon) + return 0; + + return j721e_enable_acspcie_refclk(pcie, syscon); } static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, @@ -482,20 +524,20 @@ static int j721e_pcie_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync failed\n"); + dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n"); goto err_get_sync; } ret = j721e_pcie_ctrl_init(pcie); if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync failed\n"); + dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n"); goto err_get_sync; } ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, "j721e-pcie-link-down-irq", pcie); if (ret < 0) { - dev_err(dev, "failed to request link state IRQ %d\n", irq); + dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq); goto err_get_sync; } @@ -505,42 +547,40 @@ static int j721e_pcie_probe(struct platform_device *pdev) case PCI_MODE_RC: gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { - ret = PTR_ERR(gpiod); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to get reset GPIO\n"); + ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n"); goto err_get_sync; } + pcie->reset_gpio = gpiod; ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { - dev_err(dev, "Failed to init phy\n"); + dev_err_probe(dev, ret, "Failed to init phy\n"); goto err_get_sync; } clk = devm_clk_get_optional(dev, "pcie_refclk"); if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - dev_err(dev, "failed to get pcie_refclk\n"); + ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n"); goto err_pcie_setup; } ret = clk_prepare_enable(clk); if (ret) { - dev_err(dev, "failed to enable pcie_refclk\n"); + dev_err_probe(dev, ret, "failed to enable pcie_refclk\n"); goto err_pcie_setup; } pcie->refclk = clk; /* - * "Power Sequencing and Reset Signal Timings" table in - * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 - * indicates PERST# should be deasserted after minimum of 100us - * once REFCLK is stable. The REFCLK to the connector in RC - * mode is selected while enabling the PHY. So deassert PERST# - * after 100 us. + * The "Power Sequencing and Reset Signal Timings" table of the + * PCI Express Card Electromechanical Specification, Revision + * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# + * should be deasserted after minimum of 100us once REFCLK is + * stable. The REFCLK to the connector in RC mode is selected + * while enabling the PHY. So deassert PERST# after 100 us. */ if (gpiod) { - usleep_range(100, 200); + fsleep(PCIE_T_PERST_CLK_US); gpiod_set_value_cansleep(gpiod, 1); } @@ -554,7 +594,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) case PCI_MODE_EP: ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { - dev_err(dev, "Failed to init phy\n"); + dev_err_probe(dev, ret, "Failed to init phy\n"); goto err_get_sync; } @@ -589,6 +629,87 @@ static void j721e_pcie_remove(struct platform_device *pdev) pm_runtime_disable(dev); } +static int j721e_pcie_suspend_noirq(struct device *dev) +{ + struct j721e_pcie *pcie = dev_get_drvdata(dev); + + if (pcie->mode == PCI_MODE_RC) { + gpiod_set_value_cansleep(pcie->reset_gpio, 0); + clk_disable_unprepare(pcie->refclk); + } + + cdns_pcie_disable_phy(pcie->cdns_pcie); + + return 0; +} + +static int j721e_pcie_resume_noirq(struct device *dev) +{ + struct j721e_pcie *pcie = dev_get_drvdata(dev); + struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; + int ret; + + ret = j721e_pcie_ctrl_init(pcie); + if (ret < 0) + return ret; + + j721e_pcie_config_link_irq(pcie); + + /* + * This is not called explicitly in the probe, it is called by + * cdns_pcie_init_phy(). + */ + ret = cdns_pcie_enable_phy(pcie->cdns_pcie); + if (ret < 0) + return ret; + + if (pcie->mode == PCI_MODE_RC) { + struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie); + + ret = clk_prepare_enable(pcie->refclk); + if (ret < 0) + return ret; + + /* + * The "Power Sequencing and Reset Signal Timings" table of the + * PCI Express Card Electromechanical Specification, Revision + * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST# + * should be deasserted after minimum of 100us once REFCLK is + * stable. The REFCLK to the connector in RC mode is selected + * while enabling the PHY. So deassert PERST# after 100 us. + */ + if (pcie->reset_gpio) { + fsleep(PCIE_T_PERST_CLK_US); + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + } + + ret = cdns_pcie_host_link_setup(rc); + if (ret < 0) { + clk_disable_unprepare(pcie->refclk); + return ret; + } + + /* + * Reset internal status of BARs to force reinitialization in + * cdns_pcie_host_init(). + */ + for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; + + ret = cdns_pcie_host_init(rc); + if (ret) { + clk_disable_unprepare(pcie->refclk); + return ret; + } + } + + return 0; +} + +static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops, + j721e_pcie_suspend_noirq, + j721e_pcie_resume_noirq); + static struct platform_driver j721e_pcie_driver = { .probe = j721e_pcie_probe, .remove_new = j721e_pcie_remove, @@ -596,6 +717,7 @@ static struct platform_driver j721e_pcie_driver = { .name = "j721e-pcie", .of_match_table = of_j721e_pcie_match, .suppress_bind_attrs = true, + .pm = pm_sleep_ptr(&j721e_pcie_pm_ops), }, }; builtin_platform_driver(j721e_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 5b14f7ee3c798e..8af95e9da7cec6 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) return cdns_pcie_host_map_dma_ranges(rc); } -static int cdns_pcie_host_init(struct device *dev, - struct cdns_pcie_rc *rc) +int cdns_pcie_host_init(struct cdns_pcie_rc *rc) { int err; @@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev, return cdns_pcie_host_init_address_translation(rc); } +int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = rc->pcie.dev; + int ret; + + if (rc->quirk_detect_quiet_flag) + cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); + + cdns_pcie_host_enable_ptm_response(pcie); + + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + + ret = cdns_pcie_host_start_link(rc); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + return 0; +} + int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { struct device *dev = rc->pcie.dev; @@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) return PTR_ERR(rc->cfg_base); rc->cfg_res = res; - if (rc->quirk_detect_quiet_flag) - cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); - - cdns_pcie_host_enable_ptm_response(pcie); - - ret = cdns_pcie_start_link(pcie); - if (ret) { - dev_err(dev, "Failed to start link\n"); - return ret; - } - - ret = cdns_pcie_host_start_link(rc); + ret = cdns_pcie_host_link_setup(rc); if (ret) - dev_dbg(dev, "PCIe link never came up\n"); + return ret; for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) rc->avail_ib_bar[bar] = true; - ret = cdns_pcie_host_init(dev, rc); + ret = cdns_pcie_host_init(rc); if (ret) return ret; diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 7a66a2f815dcea..f5eeff834ec192 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -314,7 +314,6 @@ struct cdns_pcie { /** * struct cdns_pcie_rc - private data for this PCIe Root Complex driver * @pcie: Cadence PCIe controller - * @dev: pointer to PCIe device * @cfg_res: start/end offsets in the physical system memory to map PCI * configuration space accesses * @cfg_base: IO mapped window to access the PCI configuration space of a @@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) } #ifdef CONFIG_PCIE_CADENCE_HOST +int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc); +int cdns_pcie_host_init(struct cdns_pcie_rc *rc); int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, int where); #else +static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) +{ + return 0; +} + +static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc) +{ + return 0; +} + static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { return 0; diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 4c38181acffab4..b6d6778b0698b2 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -265,12 +265,16 @@ config PCIE_DW_PLAT_EP order to enable device-specific features PCI_DW_PLAT_EP must be selected. +config PCIE_QCOM_COMMON + bool + config PCIE_QCOM bool "Qualcomm PCIe controller (host mode)" depends on OF && (ARCH_QCOM || COMPILE_TEST) depends on PCI_MSI select PCIE_DW_HOST select CRC8 + select PCIE_QCOM_COMMON help Say Y here to enable PCIe controller support on Qualcomm SoCs. The PCIe controller uses the DesignWare core plus Qualcomm-specific @@ -281,6 +285,7 @@ config PCIE_QCOM_EP depends on OF && (ARCH_QCOM || COMPILE_TEST) depends on PCI_ENDPOINT select PCIE_DW_EP + select PCIE_QCOM_COMMON help Say Y here to enable support for the PCIe controllers on Qualcomm SoCs to work in endpoint mode. The PCIe controller uses the DesignWare core diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index ec215b3d619163..a8308d9ea9861f 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o +obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 4fe3b0cb72ec51..5c62e1a3ba5291 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -850,14 +850,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev) dra7xx->mode = mode; ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler, - IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + IRQF_SHARED | IRQF_ONESHOT, + "dra7xx-pcie-main", dra7xx); if (ret) { dev_err(dev, "failed to request irq\n"); - goto err_gpio; + goto err_deinit; } return 0; +err_deinit: + if (dra7xx->mode == DW_PCIE_RC_TYPE) + dw_pcie_host_deinit(&dra7xx->pci->pp); + else + dw_pcie_ep_deinit(&dra7xx->pci->ep); + err_gpio: err_get_sync: pm_runtime_put(dev); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 964d67756eb2be..808d1f10541733 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -54,9 +55,9 @@ #define IMX95_PE0_GEN_CTRL_3 0x1058 #define IMX95_PCIE_LTSSM_EN BIT(0) -#define to_imx6_pcie(x) dev_get_drvdata((x)->dev) +#define to_imx_pcie(x) dev_get_drvdata((x)->dev) -enum imx6_pcie_variants { +enum imx_pcie_variants { IMX6Q, IMX6SX, IMX6QP, @@ -64,6 +65,7 @@ enum imx6_pcie_variants { IMX8MQ, IMX8MM, IMX8MP, + IMX8Q, IMX95, IMX8MQ_EP, IMX8MM_EP, @@ -71,25 +73,25 @@ enum imx6_pcie_variants { IMX95_EP, }; -#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) -#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) -#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) -#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3) -#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4) -#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5) -#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6) -#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7) +#define IMX_PCIE_FLAG_IMX_PHY BIT(0) +#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) +#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) +#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) +#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) +#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5) +#define IMX_PCIE_FLAG_HAS_SERDES BIT(6) +#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7) +#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8) -#define imx6_check_flag(pci, val) (pci->drvdata->flags & val) +#define imx_check_flag(pci, val) (pci->drvdata->flags & val) -#define IMX6_PCIE_MAX_CLKS 6 +#define IMX_PCIE_MAX_CLKS 6 +#define IMX_PCIE_MAX_INSTANCES 2 -#define IMX6_PCIE_MAX_INSTANCES 2 +struct imx_pcie; -struct imx6_pcie; - -struct imx6_pcie_drvdata { - enum imx6_pcie_variants variant; +struct imx_pcie_drvdata { + enum imx_pcie_variants variant; enum dw_pcie_device_mode mode; u32 flags; int dbi_length; @@ -98,17 +100,19 @@ struct imx6_pcie_drvdata { const u32 clks_cnt; const u32 ltssm_off; const u32 ltssm_mask; - const u32 mode_off[IMX6_PCIE_MAX_INSTANCES]; - const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES]; + const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; + const u32 mode_mask[IMX_PCIE_MAX_INSTANCES]; const struct pci_epc_features *epc_features; - int (*init_phy)(struct imx6_pcie *pcie); + int (*init_phy)(struct imx_pcie *pcie); + int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); + int (*core_reset)(struct imx_pcie *pcie, bool assert); }; -struct imx6_pcie { +struct imx_pcie { struct dw_pcie *pci; struct gpio_desc *reset_gpiod; bool link_is_up; - struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS]; + struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; struct regmap *iomuxc_gpr; u16 msi_ctrl; u32 controller_id; @@ -129,7 +133,7 @@ struct imx6_pcie { /* power domain for pcie phy */ struct device *pd_pcie_phy; struct phy *phy; - const struct imx6_pcie_drvdata *drvdata; + const struct imx_pcie_drvdata *drvdata; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -184,28 +188,28 @@ struct imx6_pcie { #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) -static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) +static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) { - WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && - imx6_pcie->drvdata->variant != IMX8MQ_EP && - imx6_pcie->drvdata->variant != IMX8MM && - imx6_pcie->drvdata->variant != IMX8MM_EP && - imx6_pcie->drvdata->variant != IMX8MP && - imx6_pcie->drvdata->variant != IMX8MP_EP); - return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; + WARN_ON(imx_pcie->drvdata->variant != IMX8MQ && + imx_pcie->drvdata->variant != IMX8MQ_EP && + imx_pcie->drvdata->variant != IMX8MM && + imx_pcie->drvdata->variant != IMX8MM_EP && + imx_pcie->drvdata->variant != IMX8MP && + imx_pcie->drvdata->variant != IMX8MP_EP); + return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; } -static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) { - regmap_update_bits(imx6_pcie->iomuxc_gpr, + regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0, IMX95_PCIE_PHY_CR_PARA_SEL, IMX95_PCIE_PHY_CR_PARA_SEL); - regmap_update_bits(imx6_pcie->iomuxc_gpr, + regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_PHY_GEN_CTRL, IMX95_PCIE_REF_USE_PAD, 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, + regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0, IMX95_PCIE_REF_CLKEN, IMX95_PCIE_REF_CLKEN); @@ -213,9 +217,9 @@ static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie) return 0; } -static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) +static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) { - const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; + const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; unsigned int mask, val, mode, id; if (drvdata->mode == DW_PCIE_EP_TYPE) @@ -223,7 +227,11 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) else mode = PCI_EXP_TYPE_ROOT_PORT; - id = imx6_pcie->controller_id; + id = imx_pcie->controller_id; + + /* If mode_mask is 0, then generic PHY driver is used to set the mode */ + if (!drvdata->mode_mask[0]) + return; /* If mode_mask[id] is zero, means each controller have its individual gpr */ if (!drvdata->mode_mask[id]) @@ -232,12 +240,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) mask = drvdata->mode_mask[id]; val = mode << (ffs(mask) - 1); - regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); + regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val); } -static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) +static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; bool val; u32 max_iterations = 10; u32 wait_counter = 0; @@ -256,9 +264,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) return -ETIMEDOUT; } -static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) +static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; u32 val; int ret; @@ -268,24 +276,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) val |= PCIE_PHY_CTRL_CAP_ADR; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - ret = pcie_phy_poll_ack(imx6_pcie, true); + ret = pcie_phy_poll_ack(imx_pcie, true); if (ret) return ret; val = PCIE_PHY_CTRL_DATA(addr); dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); - return pcie_phy_poll_ack(imx6_pcie, false); + return pcie_phy_poll_ack(imx_pcie, false); } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ -static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) +static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; u32 phy_ctl; int ret; - ret = pcie_phy_wait_ack(imx6_pcie, addr); + ret = pcie_phy_wait_ack(imx_pcie, addr); if (ret) return ret; @@ -293,7 +301,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) phy_ctl = PCIE_PHY_CTRL_RD; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); - ret = pcie_phy_poll_ack(imx6_pcie, true); + ret = pcie_phy_poll_ack(imx_pcie, true); if (ret) return ret; @@ -302,18 +310,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) /* deassert Read signal */ dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); - return pcie_phy_poll_ack(imx6_pcie, false); + return pcie_phy_poll_ack(imx_pcie, false); } -static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) +static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; u32 var; int ret; /* write addr */ /* cap addr */ - ret = pcie_phy_wait_ack(imx6_pcie, addr); + ret = pcie_phy_wait_ack(imx_pcie, addr); if (ret) return ret; @@ -324,7 +332,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) var |= PCIE_PHY_CTRL_CAP_DAT; dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); - ret = pcie_phy_poll_ack(imx6_pcie, true); + ret = pcie_phy_poll_ack(imx_pcie, true); if (ret) return ret; @@ -333,7 +341,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, false); + ret = pcie_phy_poll_ack(imx_pcie, false); if (ret) return ret; @@ -342,7 +350,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack */ - ret = pcie_phy_poll_ack(imx6_pcie, true); + ret = pcie_phy_poll_ack(imx_pcie, true); if (ret) return ret; @@ -351,7 +359,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); /* wait for ack de-assertion */ - ret = pcie_phy_poll_ack(imx6_pcie, false); + ret = pcie_phy_poll_ack(imx_pcie, false); if (ret) return ret; @@ -360,74 +368,74 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) return 0; } -static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) { /* TODO: Currently this code assumes external oscillator is being used */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, - imx6_pcie_grp_offset(imx6_pcie), + regmap_update_bits(imx_pcie->iomuxc_gpr, + imx_pcie_grp_offset(imx_pcie), IMX8MQ_GPR_PCIE_REF_USE_PAD, IMX8MQ_GPR_PCIE_REF_USE_PAD); /* * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. */ - if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000) - regmap_update_bits(imx6_pcie->iomuxc_gpr, - imx6_pcie_grp_offset(imx6_pcie), + if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) + regmap_update_bits(imx_pcie->iomuxc_gpr, + imx_pcie_grp_offset(imx_pcie), IMX8MQ_GPR_PCIE_VREG_BYPASS, 0); return 0; } -static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie) { - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); return 0; } -static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) { - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); /* configure constant input signal to the pcie ctrl and phy */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN1, - imx6_pcie->tx_deemph_gen1 << 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + imx_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, - imx6_pcie->tx_deemph_gen2_3p5db << 6); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + imx_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, - imx6_pcie->tx_deemph_gen2_6db << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + imx_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_SWING_FULL, - imx6_pcie->tx_swing_full << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + imx_pcie->tx_swing_full << 18); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8, IMX6Q_GPR8_TX_SWING_LOW, - imx6_pcie->tx_swing_low << 25); + imx_pcie->tx_swing_low << 25); return 0; } -static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie) +static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie) { - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); - return imx6_pcie_init_phy(imx6_pcie); + return imx_pcie_init_phy(imx_pcie); } -static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) { u32 val; - struct device *dev = imx6_pcie->pci->dev; + struct device *dev = imx_pcie->pci->dev; - if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, + if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, IOMUXC_GPR22, val, val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, PHY_PLL_LOCK_WAIT_USLEEP_MAX, @@ -435,19 +443,19 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) dev_err(dev, "PCIe PLL lock timeout\n"); } -static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) +static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) { unsigned long phy_rate = 0; int mult, div; u16 val; int i; - if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) + if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) return 0; - for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++) - if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0) - phy_rate = clk_get_rate(imx6_pcie->clks[i].clk); + for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) + if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) + phy_rate = clk_get_rate(imx_pcie->clks[i].clk); switch (phy_rate) { case 125000000: @@ -465,46 +473,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) div = 1; break; default: - dev_err(imx6_pcie->pci->dev, + dev_err(imx_pcie->pci->dev, "Unsupported PHY reference clock rate %lu\n", phy_rate); return -EINVAL; } - pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); + pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << PCIE_PHY_MPLL_MULTIPLIER_SHIFT); val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; - pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); + pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); - pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); + pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val); val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; val |= PCIE_PHY_ATEOVRD_EN; - pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); + pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val); return 0; } -static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) +static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie) { u16 tmp; - if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) + if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) return; - pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); + pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); - pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); + pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); + pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp); } #ifdef CONFIG_ARM @@ -543,22 +551,22 @@ static int imx6q_pcie_abort_handler(unsigned long addr, } #endif -static int imx6_pcie_attach_pd(struct device *dev) +static int imx_pcie_attach_pd(struct device *dev) { - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + struct imx_pcie *imx_pcie = dev_get_drvdata(dev); struct device_link *link; /* Do nothing when in a single power domain */ if (dev->pm_domain) return 0; - imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); - if (IS_ERR(imx6_pcie->pd_pcie)) - return PTR_ERR(imx6_pcie->pd_pcie); + imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); + if (IS_ERR(imx_pcie->pd_pcie)) + return PTR_ERR(imx_pcie->pd_pcie); /* Do nothing when power domain missing */ - if (!imx6_pcie->pd_pcie) + if (!imx_pcie->pd_pcie) return 0; - link = device_link_add(dev, imx6_pcie->pd_pcie, + link = device_link_add(dev, imx_pcie->pd_pcie, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); @@ -567,11 +575,11 @@ static int imx6_pcie_attach_pd(struct device *dev) return -EINVAL; } - imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); - if (IS_ERR(imx6_pcie->pd_pcie_phy)) - return PTR_ERR(imx6_pcie->pd_pcie_phy); + imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy"); + if (IS_ERR(imx_pcie->pd_pcie_phy)) + return PTR_ERR(imx_pcie->pd_pcie_phy); - link = device_link_add(dev, imx6_pcie->pd_pcie_phy, + link = device_link_add(dev, imx_pcie->pd_pcie_phy, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); @@ -583,21 +591,20 @@ static int imx6_pcie_attach_pd(struct device *dev) return 0; } -static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) +static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - unsigned int offset; - int ret = 0; + if (enable) + regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); - break; - case IMX6QP: - case IMX6Q: + return 0; +} + +static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) +{ + if (enable) { /* power up core phy and enable ref clock */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); + regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); /* * the async reset input need ref clock to sync internally, * when the ref clock comes after reset, internal synced @@ -605,71 +612,51 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) * add one ~10us delay here. */ usleep_range(10, 100); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); - break; - case IMX7D: - case IMX95: - case IMX95_EP: - break; - case IMX8MM: - case IMX8MM_EP: - case IMX8MQ: - case IMX8MQ_EP: - case IMX8MP: - case IMX8MP_EP: - offset = imx6_pcie_grp_offset(imx6_pcie); - /* - * Set the over ride low and enabled - * make sure that REF_CLK is turned on. - */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, - IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, - 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, - IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, - IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); - break; + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); + } else { + regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); } - return ret; + return 0; } -static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie) +static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - switch (imx6_pcie->drvdata->variant) { - case IMX6QP: - case IMX6Q: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, - IMX6Q_GPR1_PCIE_TEST_PD); - break; - case IMX7D: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); - break; - default: - break; + int offset = imx_pcie_grp_offset(imx_pcie); + + if (enable) { + regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); + regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); } + + return 0; +} + +static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) +{ + if (!enable) + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + return 0; } -static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) +static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; struct device *dev = pci->dev; int ret; - ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); if (ret) return ret; - ret = imx6_pcie_enable_ref_clk(imx6_pcie); - if (ret) { - dev_err(dev, "unable to enable pcie ref clock\n"); - goto err_ref_clk; + if (imx_pcie->drvdata->enable_ref_clk) { + ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true); + if (ret) { + dev_err(dev, "Failed to enable PCIe REFCLK\n"); + goto err_ref_clk; + } } /* allow the clocks to stabilize */ @@ -677,99 +664,120 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) return 0; err_ref_clk: - clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); return ret; } -static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) +static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) { - imx6_pcie_disable_ref_clk(imx6_pcie); - clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + if (imx_pcie->drvdata->enable_ref_clk) + imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); + clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); } -static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) +static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) { - reset_control_assert(imx6_pcie->pciephy_reset); - reset_control_assert(imx6_pcie->apps_reset); + if (assert) + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - /* Force PCIe PHY reset */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, - IMX6SX_GPR5_PCIE_BTNRST_RESET, - IMX6SX_GPR5_PCIE_BTNRST_RESET); - break; - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_SW_RST, - IMX6Q_GPR1_PCIE_SW_RST); - break; - case IMX6Q: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); - break; - default: - break; - } + /* Force PCIe PHY reset */ + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET, + assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0); + return 0; +} - /* Some boards don't have PCIe reset GPIO. */ - gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1); +static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) +{ + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST, + assert ? IMX6Q_GPR1_PCIE_SW_RST : 0); + if (!assert) + usleep_range(200, 500); + + return 0; } -static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) { - struct dw_pcie *pci = imx6_pcie->pci; - struct device *dev = pci->dev; + if (!assert) + return 0; - reset_control_deassert(imx6_pcie->pciephy_reset); + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); - switch (imx6_pcie->drvdata->variant) { - case IMX7D: - /* Workaround for ERR010728, failure of PCI-e PLL VCO to - * oscillate, especially when cold. This turns off "Duty-cycle - * Corrector" and other mysterious undocumented things. - */ - if (likely(imx6_pcie->phy_base)) { - /* De-assert DCC_FB_EN */ - writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, - imx6_pcie->phy_base + PCIE_PHY_CMN_REG4); - /* Assert RX_EQS and RX_EQS_SEL */ - writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL - | PCIE_PHY_CMN_REG24_RX_EQ, - imx6_pcie->phy_base + PCIE_PHY_CMN_REG24); - /* Assert ATT_MODE */ - writel(PCIE_PHY_CMN_REG26_ATT_MODE, - imx6_pcie->phy_base + PCIE_PHY_CMN_REG26); - } else { - dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); - } + return 0; +} - imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, - IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); - break; - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_SW_RST, 0); +static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) +{ + struct dw_pcie *pci = imx_pcie->pci; + struct device *dev = pci->dev; - usleep_range(200, 500); - break; - default: - break; + if (assert) + return 0; + + /* + * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023): + * + * PCIe: PLL may fail to lock under corner conditions. + * + * Initial VCO oscillation may fail under corner conditions such as + * cold temperature which will cause the PCIe PLL fail to lock in the + * initialization phase. + * + * The Duty-cycle Corrector calibration must be disabled. + * + * 1. De-assert the G_RST signal by clearing + * SRC_PCIEPHY_RCR[PCIEPHY_G_RST]. + * 2. De-assert DCC_FB_EN by writing data “0x29” to the register + * address 0x306d0014 (PCIE_PHY_CMN_REG4). + * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register + * address 0x306d0090 (PCIE_PHY_CMN_REG24). + * 4. Assert ATT_MODE by writing data “0xbc” to the register + * address 0x306d0098 (PCIE_PHY_CMN_REG26). + * 5. De-assert the CMN_RST signal by clearing register bit + * SRC_PCIEPHY_RCR[PCIEPHY_BTN] + */ + + if (likely(imx_pcie->phy_base)) { + /* De-assert DCC_FB_EN */ + writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4); + /* Assert RX_EQS and RX_EQS_SEL */ + writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ, + imx_pcie->phy_base + PCIE_PHY_CMN_REG24); + /* Assert ATT_MODE */ + writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26); + } else { + dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); } + imx7d_pcie_wait_for_phy_pll_lock(imx_pcie); + return 0; +} + +static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) +{ + reset_control_assert(imx_pcie->pciephy_reset); + reset_control_assert(imx_pcie->apps_reset); + + if (imx_pcie->drvdata->core_reset) + imx_pcie->drvdata->core_reset(imx_pcie, true); + + /* Some boards don't have PCIe reset GPIO. */ + gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1); +} + +static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) +{ + reset_control_deassert(imx_pcie->pciephy_reset); + + if (imx_pcie->drvdata->core_reset) + imx_pcie->drvdata->core_reset(imx_pcie, false); /* Some boards don't have PCIe reset GPIO. */ - if (imx6_pcie->reset_gpiod) { + if (imx_pcie->reset_gpiod) { msleep(100); - gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0); + gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); } @@ -777,9 +785,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) return 0; } -static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) +static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie) { - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; struct device *dev = pci->dev; u32 tmp; unsigned int retries; @@ -796,33 +804,38 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) return -ETIMEDOUT; } -static void imx6_pcie_ltssm_enable(struct device *dev) +static void imx_pcie_ltssm_enable(struct device *dev) { - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; + struct imx_pcie *imx_pcie = dev_get_drvdata(dev); + const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; + u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP); + u32 tmp; + tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP); + phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp)); if (drvdata->ltssm_mask) - regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, + regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, drvdata->ltssm_mask); - reset_control_deassert(imx6_pcie->apps_reset); + reset_control_deassert(imx_pcie->apps_reset); } -static void imx6_pcie_ltssm_disable(struct device *dev) +static void imx_pcie_ltssm_disable(struct device *dev) { - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata; + struct imx_pcie *imx_pcie = dev_get_drvdata(dev); + const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata; + phy_set_speed(imx_pcie->phy, 0); if (drvdata->ltssm_mask) - regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, + regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask, 0); - reset_control_assert(imx6_pcie->apps_reset); + reset_control_assert(imx_pcie->apps_reset); } -static int imx6_pcie_start_link(struct dw_pcie *pci) +static int imx_pcie_start_link(struct dw_pcie *pci) { - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); struct device *dev = pci->dev; u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 tmp; @@ -841,18 +854,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) dw_pcie_dbi_ro_wr_dis(pci); /* Start LTSSM. */ - imx6_pcie_ltssm_enable(dev); + imx_pcie_ltssm_enable(dev); ret = dw_pcie_wait_for_link(pci); if (ret) goto err_reset_phy; - if (pci->link_gen > 1) { + if (pci->max_link_speed > 1) { /* Allow faster modes after the link is up */ dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); tmp &= ~PCI_EXP_LNKCAP_SLS; - tmp |= pci->link_gen; + tmp |= pci->max_link_speed; dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); /* @@ -864,8 +877,8 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); dw_pcie_dbi_ro_wr_dis(pci); - if (imx6_pcie->drvdata->flags & - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { + if (imx_pcie->drvdata->flags & + IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { /* * On i.MX7, DIRECT_SPEED_CHANGE behaves differently * from i.MX6 family when no link speed transition @@ -875,7 +888,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) * failure. */ - ret = imx6_pcie_wait_for_speed_change(imx6_pcie); + ret = imx_pcie_wait_for_speed_change(imx_pcie); if (ret) { dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; @@ -890,37 +903,37 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) dev_info(dev, "Link: Only Gen1 is enabled\n"); } - imx6_pcie->link_is_up = true; + imx_pcie->link_is_up = true; tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: - imx6_pcie->link_is_up = false; + imx_pcie->link_is_up = false; dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); - imx6_pcie_reset_phy(imx6_pcie); + imx_pcie_reset_phy(imx_pcie); return 0; } -static void imx6_pcie_stop_link(struct dw_pcie *pci) +static void imx_pcie_stop_link(struct dw_pcie *pci) { struct device *dev = pci->dev; /* Turn off PCIe LTSSM */ - imx6_pcie_ltssm_disable(dev); + imx_pcie_ltssm_disable(dev); } -static int imx6_pcie_host_init(struct dw_pcie_rp *pp) +static int imx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); int ret; - if (imx6_pcie->vpcie) { - ret = regulator_enable(imx6_pcie->vpcie); + if (imx_pcie->vpcie) { + ret = regulator_enable(imx_pcie->vpcie); if (ret) { dev_err(dev, "failed to enable vpcie regulator: %d\n", ret); @@ -928,83 +941,105 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) } } - imx6_pcie_assert_core_reset(imx6_pcie); + imx_pcie_assert_core_reset(imx_pcie); - if (imx6_pcie->drvdata->init_phy) - imx6_pcie->drvdata->init_phy(imx6_pcie); + if (imx_pcie->drvdata->init_phy) + imx_pcie->drvdata->init_phy(imx_pcie); - imx6_pcie_configure_type(imx6_pcie); + imx_pcie_configure_type(imx_pcie); - ret = imx6_pcie_clk_enable(imx6_pcie); + ret = imx_pcie_clk_enable(imx_pcie); if (ret) { dev_err(dev, "unable to enable pcie clocks: %d\n", ret); goto err_reg_disable; } - if (imx6_pcie->phy) { - ret = phy_init(imx6_pcie->phy); + if (imx_pcie->phy) { + ret = phy_init(imx_pcie->phy); if (ret) { dev_err(dev, "pcie PHY power up failed\n"); goto err_clk_disable; } - } - if (imx6_pcie->phy) { - ret = phy_power_on(imx6_pcie->phy); + ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) { + dev_err(dev, "unable to set PCIe PHY mode\n"); + goto err_phy_exit; + } + + ret = phy_power_on(imx_pcie->phy); if (ret) { dev_err(dev, "waiting for PHY ready timeout!\n"); - goto err_phy_off; + goto err_phy_exit; } } - ret = imx6_pcie_deassert_core_reset(imx6_pcie); + ret = imx_pcie_deassert_core_reset(imx_pcie); if (ret < 0) { dev_err(dev, "pcie deassert core reset failed: %d\n", ret); goto err_phy_off; } - imx6_setup_phy_mpll(imx6_pcie); + imx_setup_phy_mpll(imx_pcie); return 0; err_phy_off: - if (imx6_pcie->phy) - phy_exit(imx6_pcie->phy); + phy_power_off(imx_pcie->phy); +err_phy_exit: + phy_exit(imx_pcie->phy); err_clk_disable: - imx6_pcie_clk_disable(imx6_pcie); + imx_pcie_clk_disable(imx_pcie); err_reg_disable: - if (imx6_pcie->vpcie) - regulator_disable(imx6_pcie->vpcie); + if (imx_pcie->vpcie) + regulator_disable(imx_pcie->vpcie); return ret; } -static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) +static void imx_pcie_host_exit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); - if (imx6_pcie->phy) { - if (phy_power_off(imx6_pcie->phy)) + if (imx_pcie->phy) { + if (phy_power_off(imx_pcie->phy)) dev_err(pci->dev, "unable to power off PHY\n"); - phy_exit(imx6_pcie->phy); + phy_exit(imx_pcie->phy); } - imx6_pcie_clk_disable(imx6_pcie); + imx_pcie_clk_disable(imx_pcie); - if (imx6_pcie->vpcie) - regulator_disable(imx6_pcie->vpcie); + if (imx_pcie->vpcie) + regulator_disable(imx_pcie->vpcie); } -static const struct dw_pcie_host_ops imx6_pcie_host_ops = { - .init = imx6_pcie_host_init, - .deinit = imx6_pcie_host_exit, +static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) +{ + struct imx_pcie *imx_pcie = to_imx_pcie(pcie); + struct dw_pcie_rp *pp = &pcie->pp; + struct resource_entry *entry; + + if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) + return cpu_addr; + + entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); + if (!entry) + return cpu_addr; + + return cpu_addr - entry->offset; +} + +static const struct dw_pcie_host_ops imx_pcie_host_ops = { + .init = imx_pcie_host_init, + .deinit = imx_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { - .start_link = imx6_pcie_start_link, - .stop_link = imx6_pcie_stop_link, + .start_link = imx_pcie_start_link, + .stop_link = imx_pcie_stop_link, + .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, }; -static void imx6_pcie_ep_init(struct dw_pcie_ep *ep) +static void imx_pcie_ep_init(struct dw_pcie_ep *ep) { enum pci_barno bar; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -1013,7 +1048,7 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep) dw_pcie_ep_reset_bar(pci, bar); } -static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, +static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, unsigned int type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -1060,35 +1095,35 @@ static const struct pci_epc_features imx95_pcie_epc_features = { }; static const struct pci_epc_features* -imx6_pcie_ep_get_features(struct dw_pcie_ep *ep) +imx_pcie_ep_get_features(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); - return imx6_pcie->drvdata->epc_features; + return imx_pcie->drvdata->epc_features; } static const struct dw_pcie_ep_ops pcie_ep_ops = { - .init = imx6_pcie_ep_init, - .raise_irq = imx6_pcie_ep_raise_irq, - .get_features = imx6_pcie_ep_get_features, + .init = imx_pcie_ep_init, + .raise_irq = imx_pcie_ep_raise_irq, + .get_features = imx_pcie_ep_get_features, }; -static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, +static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, struct platform_device *pdev) { int ret; unsigned int pcie_dbi2_offset; struct dw_pcie_ep *ep; - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; - imx6_pcie_host_init(pp); + imx_pcie_host_init(pp); ep = &pci->ep; ep->ops = &pcie_ep_ops; - switch (imx6_pcie->drvdata->variant) { + switch (imx_pcie->drvdata->variant) { case IMX8MQ_EP: case IMX8MM_EP: case IMX8MP_EP: @@ -1110,9 +1145,11 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) pci->dbi_base2 = NULL; - if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT)) + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + ep->page_size = imx_pcie->drvdata->epc_features->align; + ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "failed to initialize endpoint\n"); @@ -1129,30 +1166,30 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, pci_epc_init_notify(ep->epc); /* Start LTSSM. */ - imx6_pcie_ltssm_enable(dev); + imx_pcie_ltssm_enable(dev); return 0; } -static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) +static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie) { - struct device *dev = imx6_pcie->pci->dev; + struct device *dev = imx_pcie->pci->dev; /* Some variants have a turnoff reset in DT */ - if (imx6_pcie->turnoff_reset) { - reset_control_assert(imx6_pcie->turnoff_reset); - reset_control_deassert(imx6_pcie->turnoff_reset); + if (imx_pcie->turnoff_reset) { + reset_control_assert(imx_pcie->turnoff_reset); + reset_control_deassert(imx_pcie->turnoff_reset); goto pm_turnoff_sleep; } /* Others poke directly at IOMUXC registers */ - switch (imx6_pcie->drvdata->variant) { + switch (imx_pcie->drvdata->variant) { case IMX6SX: case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, IMX6SX_GPR12_PCIE_PM_TURN_OFF); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); break; default: @@ -1171,73 +1208,73 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) usleep_range(1000, 10000); } -static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save) +static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) { u8 offset; u16 val; - struct dw_pcie *pci = imx6_pcie->pci; + struct dw_pcie *pci = imx_pcie->pci; if (pci_msi_enabled()) { offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); if (save) { val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); - imx6_pcie->msi_ctrl = val; + imx_pcie->msi_ctrl = val; } else { dw_pcie_dbi_ro_wr_en(pci); - val = imx6_pcie->msi_ctrl; + val = imx_pcie->msi_ctrl; dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); dw_pcie_dbi_ro_wr_dis(pci); } } } -static int imx6_pcie_suspend_noirq(struct device *dev) +static int imx_pcie_suspend_noirq(struct device *dev) { - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; + struct imx_pcie *imx_pcie = dev_get_drvdata(dev); + struct dw_pcie_rp *pp = &imx_pcie->pci->pp; - if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) + if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; - imx6_pcie_msi_save_restore(imx6_pcie, true); - imx6_pcie_pm_turnoff(imx6_pcie); - imx6_pcie_stop_link(imx6_pcie->pci); - imx6_pcie_host_exit(pp); + imx_pcie_msi_save_restore(imx_pcie, true); + imx_pcie_pm_turnoff(imx_pcie); + imx_pcie_stop_link(imx_pcie->pci); + imx_pcie_host_exit(pp); return 0; } -static int imx6_pcie_resume_noirq(struct device *dev) +static int imx_pcie_resume_noirq(struct device *dev) { int ret; - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; + struct imx_pcie *imx_pcie = dev_get_drvdata(dev); + struct dw_pcie_rp *pp = &imx_pcie->pci->pp; - if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) + if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; - ret = imx6_pcie_host_init(pp); + ret = imx_pcie_host_init(pp); if (ret) return ret; - imx6_pcie_msi_save_restore(imx6_pcie, false); + imx_pcie_msi_save_restore(imx_pcie, false); dw_pcie_setup_rc(pp); - if (imx6_pcie->link_is_up) - imx6_pcie_start_link(imx6_pcie->pci); + if (imx_pcie->link_is_up) + imx_pcie_start_link(imx_pcie->pci); return 0; } -static const struct dev_pm_ops imx6_pcie_pm_ops = { - NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, - imx6_pcie_resume_noirq) +static const struct dev_pm_ops imx_pcie_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq, + imx_pcie_resume_noirq) }; -static int imx6_pcie_probe(struct platform_device *pdev) +static int imx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_pcie *pci; - struct imx6_pcie *imx6_pcie; + struct imx_pcie *imx_pcie; struct device_node *np; struct resource *dbi_base; struct device_node *node = dev->of_node; @@ -1245,8 +1282,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) u16 val; int i; - imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); - if (!imx6_pcie) + imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); + if (!imx_pcie) return -ENOMEM; pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); @@ -1255,10 +1292,10 @@ static int imx6_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - pci->pp.ops = &imx6_pcie_host_ops; + pci->pp.ops = &imx_pcie_host_ops; - imx6_pcie->pci = pci; - imx6_pcie->drvdata = of_device_get_match_data(dev); + imx_pcie->pci = pci; + imx_pcie->drvdata = of_device_get_match_data(dev); /* Find the PHY if one is defined, only imx7d uses it */ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); @@ -1270,9 +1307,9 @@ static int imx6_pcie_probe(struct platform_device *pdev) dev_err(dev, "Unable to map PCIe PHY\n"); return ret; } - imx6_pcie->phy_base = devm_ioremap_resource(dev, &res); - if (IS_ERR(imx6_pcie->phy_base)) - return PTR_ERR(imx6_pcie->phy_base); + imx_pcie->phy_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(imx_pcie->phy_base)) + return PTR_ERR(imx_pcie->phy_base); } pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); @@ -1280,72 +1317,72 @@ static int imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(pci->dbi_base); /* Fetch GPIOs */ - imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(imx6_pcie->reset_gpiod)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod), + imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(imx_pcie->reset_gpiod)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod), "unable to get reset gpio\n"); - gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset"); + gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); - if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS) + if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); - for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++) - imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i]; + for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) + imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; /* Fetch clocks */ - ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks); + ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); if (ret) return ret; - if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) { - imx6_pcie->phy = devm_phy_get(dev, "pcie-phy"); - if (IS_ERR(imx6_pcie->phy)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy), + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { + imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(imx_pcie->phy)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->phy), "failed to get pcie phy\n"); } - if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) { - imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); - if (IS_ERR(imx6_pcie->apps_reset)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset), + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) { + imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps"); + if (IS_ERR(imx_pcie->apps_reset)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset), "failed to get pcie apps reset control\n"); } - if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) { - imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); - if (IS_ERR(imx6_pcie->pciephy_reset)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset), + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) { + imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); + if (IS_ERR(imx_pcie->pciephy_reset)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset), "Failed to get PCIEPHY reset control\n"); } - switch (imx6_pcie->drvdata->variant) { + switch (imx_pcie->drvdata->variant) { case IMX8MQ: case IMX8MQ_EP: case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) - imx6_pcie->controller_id = 1; + imx_pcie->controller_id = 1; break; default: break; } /* Grab turnoff reset */ - imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); - if (IS_ERR(imx6_pcie->turnoff_reset)) { + imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); + if (IS_ERR(imx_pcie->turnoff_reset)) { dev_err(dev, "Failed to get TURNOFF reset control\n"); - return PTR_ERR(imx6_pcie->turnoff_reset); + return PTR_ERR(imx_pcie->turnoff_reset); } - if (imx6_pcie->drvdata->gpr) { + if (imx_pcie->drvdata->gpr) { /* Grab GPR config register range */ - imx6_pcie->iomuxc_gpr = - syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr); - if (IS_ERR(imx6_pcie->iomuxc_gpr)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr), + imx_pcie->iomuxc_gpr = + syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr); + if (IS_ERR(imx_pcie->iomuxc_gpr)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), "unable to find iomuxc registers\n"); } - if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) { + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) { void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app"); if (IS_ERR(off)) @@ -1358,59 +1395,59 @@ static int imx6_pcie_probe(struct platform_device *pdev) .reg_stride = 4, }; - imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); - if (IS_ERR(imx6_pcie->iomuxc_gpr)) - return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr), + imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, ®map_config); + if (IS_ERR(imx_pcie->iomuxc_gpr)) + return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr), "unable to find iomuxc registers\n"); } /* Grab PCIe PHY Tx Settings */ if (of_property_read_u32(node, "fsl,tx-deemph-gen1", - &imx6_pcie->tx_deemph_gen1)) - imx6_pcie->tx_deemph_gen1 = 0; + &imx_pcie->tx_deemph_gen1)) + imx_pcie->tx_deemph_gen1 = 0; if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", - &imx6_pcie->tx_deemph_gen2_3p5db)) - imx6_pcie->tx_deemph_gen2_3p5db = 0; + &imx_pcie->tx_deemph_gen2_3p5db)) + imx_pcie->tx_deemph_gen2_3p5db = 0; if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", - &imx6_pcie->tx_deemph_gen2_6db)) - imx6_pcie->tx_deemph_gen2_6db = 20; + &imx_pcie->tx_deemph_gen2_6db)) + imx_pcie->tx_deemph_gen2_6db = 20; if (of_property_read_u32(node, "fsl,tx-swing-full", - &imx6_pcie->tx_swing_full)) - imx6_pcie->tx_swing_full = 127; + &imx_pcie->tx_swing_full)) + imx_pcie->tx_swing_full = 127; if (of_property_read_u32(node, "fsl,tx-swing-low", - &imx6_pcie->tx_swing_low)) - imx6_pcie->tx_swing_low = 127; + &imx_pcie->tx_swing_low)) + imx_pcie->tx_swing_low = 127; /* Limit link speed */ - pci->link_gen = 1; - of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen); - - imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); - if (IS_ERR(imx6_pcie->vpcie)) { - if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) - return PTR_ERR(imx6_pcie->vpcie); - imx6_pcie->vpcie = NULL; + pci->max_link_speed = 1; + of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed); + + imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx_pcie->vpcie)) { + if (PTR_ERR(imx_pcie->vpcie) != -ENODEV) + return PTR_ERR(imx_pcie->vpcie); + imx_pcie->vpcie = NULL; } - imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); - if (IS_ERR(imx6_pcie->vph)) { - if (PTR_ERR(imx6_pcie->vph) != -ENODEV) - return PTR_ERR(imx6_pcie->vph); - imx6_pcie->vph = NULL; + imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph"); + if (IS_ERR(imx_pcie->vph)) { + if (PTR_ERR(imx_pcie->vph) != -ENODEV) + return PTR_ERR(imx_pcie->vph); + imx_pcie->vph = NULL; } - platform_set_drvdata(pdev, imx6_pcie); + platform_set_drvdata(pdev, imx_pcie); - ret = imx6_pcie_attach_pd(dev); + ret = imx_pcie_attach_pd(dev); if (ret) return ret; - if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { - ret = imx6_add_pcie_ep(imx6_pcie, pdev); + if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { + ret = imx_add_pcie_ep(imx_pcie, pdev); if (ret < 0) return ret; } else { @@ -1430,24 +1467,25 @@ static int imx6_pcie_probe(struct platform_device *pdev) return 0; } -static void imx6_pcie_shutdown(struct platform_device *pdev) +static void imx_pcie_shutdown(struct platform_device *pdev) { - struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); + struct imx_pcie *imx_pcie = platform_get_drvdata(pdev); /* bring down link, so bootloader gets clean state in case of reboot */ - imx6_pcie_assert_core_reset(imx6_pcie); + imx_pcie_assert_core_reset(imx_pcie); } static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; +static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; -static const struct imx6_pcie_drvdata drvdata[] = { +static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, - .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + .flags = IMX_PCIE_FLAG_IMX_PHY | + IMX_PCIE_FLAG_IMX_SPEED_CHANGE, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", .clk_names = imx6q_clks, @@ -1456,13 +1494,15 @@ static const struct imx6_pcie_drvdata drvdata[] = { .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, - .init_phy = imx6_pcie_init_phy, + .init_phy = imx_pcie_init_phy, + .enable_ref_clk = imx6q_pcie_enable_ref_clk, + .core_reset = imx6q_pcie_core_reset, }, [IMX6SX] = { .variant = IMX6SX, - .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | - IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .flags = IMX_PCIE_FLAG_IMX_PHY | + IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", .clk_names = imx6sx_clks, .clks_cnt = ARRAY_SIZE(imx6sx_clks), @@ -1471,12 +1511,14 @@ static const struct imx6_pcie_drvdata drvdata[] = { .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .init_phy = imx6sx_pcie_init_phy, + .enable_ref_clk = imx6sx_pcie_enable_ref_clk, + .core_reset = imx6sx_pcie_core_reset, }, [IMX6QP] = { .variant = IMX6QP, - .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | - IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .flags = IMX_PCIE_FLAG_IMX_PHY | + IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", .clk_names = imx6q_clks, @@ -1485,24 +1527,28 @@ static const struct imx6_pcie_drvdata drvdata[] = { .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, - .init_phy = imx6_pcie_init_phy, + .init_phy = imx_pcie_init_phy, + .enable_ref_clk = imx6q_pcie_enable_ref_clk, + .core_reset = imx6qp_pcie_core_reset, }, [IMX7D] = { .variant = IMX7D, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | - IMX6_PCIE_FLAG_HAS_APP_RESET | - IMX6_PCIE_FLAG_HAS_PHY_RESET, + .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX_PCIE_FLAG_HAS_APP_RESET | + IMX_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx7d-iomuxc-gpr", .clk_names = imx6q_clks, .clks_cnt = ARRAY_SIZE(imx6q_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .init_phy = imx7d_pcie_init_phy, + .enable_ref_clk = imx7d_pcie_enable_ref_clk, + .core_reset = imx7d_pcie_core_reset, }, [IMX8MQ] = { .variant = IMX8MQ, - .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | - IMX6_PCIE_FLAG_HAS_PHY_RESET, + .flags = IMX_PCIE_FLAG_HAS_APP_RESET | + IMX_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx8mq-iomuxc-gpr", .clk_names = imx8mq_clks, .clks_cnt = ARRAY_SIZE(imx8mq_clks), @@ -1511,32 +1557,42 @@ static const struct imx6_pcie_drvdata drvdata[] = { .mode_off[1] = IOMUXC_GPR12, .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, .init_phy = imx8mq_pcie_init_phy, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, [IMX8MM] = { .variant = IMX8MM, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | - IMX6_PCIE_FLAG_HAS_PHYDRV | - IMX6_PCIE_FLAG_HAS_APP_RESET, + .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX_PCIE_FLAG_HAS_PHYDRV | + IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mm-iomuxc-gpr", .clk_names = imx8mm_clks, .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, [IMX8MP] = { .variant = IMX8MP, - .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND | - IMX6_PCIE_FLAG_HAS_PHYDRV | - IMX6_PCIE_FLAG_HAS_APP_RESET, + .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND | + IMX_PCIE_FLAG_HAS_PHYDRV | + IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mp-iomuxc-gpr", .clk_names = imx8mm_clks, .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, + }, + [IMX8Q] = { + .variant = IMX8Q, + .flags = IMX_PCIE_FLAG_HAS_PHYDRV | + IMX_PCIE_FLAG_CPU_ADDR_FIXUP, + .clk_names = imx8q_clks, + .clks_cnt = ARRAY_SIZE(imx8q_clks), }, [IMX95] = { .variant = IMX95, - .flags = IMX6_PCIE_FLAG_HAS_SERDES, + .flags = IMX_PCIE_FLAG_HAS_SERDES, .clk_names = imx8mq_clks, .clks_cnt = ARRAY_SIZE(imx8mq_clks), .ltssm_off = IMX95_PE0_GEN_CTRL_3, @@ -1547,8 +1603,8 @@ static const struct imx6_pcie_drvdata drvdata[] = { }, [IMX8MQ_EP] = { .variant = IMX8MQ_EP, - .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | - IMX6_PCIE_FLAG_HAS_PHY_RESET, + .flags = IMX_PCIE_FLAG_HAS_APP_RESET | + IMX_PCIE_FLAG_HAS_PHY_RESET, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mq-iomuxc-gpr", .clk_names = imx8mq_clks, @@ -1559,10 +1615,12 @@ static const struct imx6_pcie_drvdata drvdata[] = { .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, .init_phy = imx8mq_pcie_init_phy, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, [IMX8MM_EP] = { .variant = IMX8MM_EP, - .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, + .flags = IMX_PCIE_FLAG_HAS_APP_RESET | + IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", .clk_names = imx8mm_clks, @@ -1570,10 +1628,12 @@ static const struct imx6_pcie_drvdata drvdata[] = { .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, [IMX8MP_EP] = { .variant = IMX8MP_EP, - .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, + .flags = IMX_PCIE_FLAG_HAS_APP_RESET | + IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", .clk_names = imx8mm_clks, @@ -1581,11 +1641,12 @@ static const struct imx6_pcie_drvdata drvdata[] = { .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, + .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, [IMX95_EP] = { .variant = IMX95_EP, - .flags = IMX6_PCIE_FLAG_HAS_SERDES | - IMX6_PCIE_FLAG_SUPPORT_64BIT, + .flags = IMX_PCIE_FLAG_HAS_SERDES | + IMX_PCIE_FLAG_SUPPORT_64BIT, .clk_names = imx8mq_clks, .clks_cnt = ARRAY_SIZE(imx8mq_clks), .ltssm_off = IMX95_PE0_GEN_CTRL_3, @@ -1598,7 +1659,7 @@ static const struct imx6_pcie_drvdata drvdata[] = { }, }; -static const struct of_device_id imx6_pcie_of_match[] = { +static const struct of_device_id imx_pcie_of_match[] = { { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, @@ -1606,6 +1667,7 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, + { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], }, { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], }, { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, @@ -1614,19 +1676,19 @@ static const struct of_device_id imx6_pcie_of_match[] = { {}, }; -static struct platform_driver imx6_pcie_driver = { +static struct platform_driver imx_pcie_driver = { .driver = { .name = "imx6q-pcie", - .of_match_table = imx6_pcie_of_match, + .of_match_table = imx_pcie_of_match, .suppress_bind_attrs = true, - .pm = &imx6_pcie_pm_ops, + .pm = &imx_pcie_pm_ops, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, - .probe = imx6_pcie_probe, - .shutdown = imx6_pcie_shutdown, + .probe = imx_pcie_probe, + .shutdown = imx_pcie_shutdown, }; -static void imx6_pcie_quirk(struct pci_dev *dev) +static void imx_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; struct dw_pcie_rp *pp = bus->sysdata; @@ -1636,33 +1698,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev) return; /* Make sure we only quirk devices associated with this driver */ - if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) + if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver) return; if (pci_is_root_bus(bus)) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); /* * Limit config length to avoid the kernel reading beyond * the register set and causing an abort on i.MX 6Quad */ - if (imx6_pcie->drvdata->dbi_length) { - dev->cfg_size = imx6_pcie->drvdata->dbi_length; + if (imx_pcie->drvdata->dbi_length) { + dev->cfg_size = imx_pcie->drvdata->dbi_length; dev_info(&dev->dev, "Limiting cfg_size to %d\n", dev->cfg_size); } } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, - PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk); + PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk); -static int __init imx6_pcie_init(void) +static int __init imx_pcie_init(void) { #ifdef CONFIG_ARM struct device_node *np; - np = of_find_matching_node(NULL, imx6_pcie_of_match); + np = of_find_matching_node(NULL, imx_pcie_of_match); if (!np) return -ENODEV; of_node_put(np); @@ -1678,6 +1740,6 @@ static int __init imx6_pcie_init(void) "external abort on non-linefetch"); #endif - return platform_driver_register(&imx6_pcie_driver); + return platform_driver_register(&imx_pcie_driver); } -device_initcall(imx6_pcie_init); +device_initcall(imx_pcie_init); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 52c6420ae2003c..2219b1a866faf2 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -189,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) (int)data->hwirq, msg->address_hi, msg->address_lo); } -static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void ks_pcie_msi_mask(struct irq_data *data) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); @@ -247,7 +241,6 @@ static struct irq_chip ks_pcie_msi_irq_chip = { .name = "KEYSTONE-PCI-MSI", .irq_ack = ks_pcie_msi_irq_ack, .irq_compose_msi_msg = ks_pcie_compose_msi_msg, - .irq_set_affinity = ks_pcie_msi_set_affinity, .irq_mask = ks_pcie_msi_mask, .irq_unmask = ks_pcie_msi_unmask, }; @@ -577,7 +570,7 @@ static void ks_pcie_quirk(struct pci_dev *dev) */ if (pci_match_id(am6_pci_devids, bridge)) { bridge_dev = pci_get_host_bridge_device(dev); - if (!bridge_dev && !bridge_dev->parent) + if (!bridge_dev || !bridge_dev->parent) return; ks_pcie = dev_get_drvdata(bridge_dev->parent); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index a0822d5371bc55..3e41865c72904e 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = { }; static struct msi_domain_info dw_pcie_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | + MSI_FLAG_MULTI_PCI_MSI, .chip = &dw_pcie_msi_irq_chip, }; @@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) (int)d->hwirq, msg->address_hi, msg->address_lo); } -static int dw_pci_msi_set_affinity(struct irq_data *d, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void dw_pci_bottom_mask(struct irq_data *d) { struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); @@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = { .name = "DWPCI-MSI", .irq_ack = dw_pci_bottom_ack, .irq_compose_msi_msg = dw_pci_setup_msi_msg, - .irq_set_affinity = dw_pci_msi_set_affinity, .irq_mask = dw_pci_bottom_mask, .irq_unmask = dw_pci_bottom_unmask, }; diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 1b5aba1f0c92f9..6d6cbc8b5b2c67 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci) pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); + pci->dbi_phys_addr = res->start; } /* DBI2 is mainly useful for the endpoint controller */ @@ -134,6 +135,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci) pci->atu_base = devm_ioremap_resource(pci->dev, res); if (IS_ERR(pci->atu_base)) return PTR_ERR(pci->atu_base); + pci->atu_phys_addr = res->start; } else { pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; } @@ -166,8 +168,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci) return ret; } - if (pci->link_gen < 1) - pci->link_gen = of_pci_get_max_link_speed(np); + if (pci->max_link_speed < 1) + pci->max_link_speed = of_pci_get_max_link_speed(np); of_property_read_u32(np, "num-lanes", &pci->num_lanes); @@ -687,16 +689,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci) } EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); -static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) +static void dw_pcie_link_set_max_speed(struct dw_pcie *pci) { u32 cap, ctrl2, link_speed; u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); + + /* + * Even if the platform doesn't want to limit the maximum link speed, + * just cache the hardware default value so that the vendor drivers can + * use it to do any link specific configuration. + */ + if (pci->max_link_speed < 1) { + pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); + return; + } + ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; - switch (pcie_link_speed[link_gen]) { + switch (pcie_link_speed[pci->max_link_speed]) { case PCIE_SPEED_2_5GT: link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; break; @@ -1058,8 +1071,7 @@ void dw_pcie_setup(struct dw_pcie *pci) { u32 val; - if (pci->link_gen > 0) - dw_pcie_link_set_max_speed(pci, pci->link_gen); + dw_pcie_link_set_max_speed(pci); /* Configure Gen1 N_FTS */ if (pci->n_fts[0]) { diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 53c4c8f399c883..347ab74ac35aa4 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -125,6 +125,19 @@ #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1 + +#define GEN3_EQ_CONTROL_OFF 0x8A8 +#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0) +#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4) +#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8) +#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24) + +#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC +#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0) +#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5) +#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10) +#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14) #define PCIE_PORT_MULTI_LANE_CTRL 0x8C0 #define PORT_MLTI_UPCFG_SUPPORT BIT(7) @@ -197,6 +210,24 @@ #define PCIE_PL_CHK_REG_ERR_ADDR 0xB28 +/* + * 16.0 GT/s (Gen 4) lane margining register definitions + */ +#define GEN4_LANE_MARGINING_1_OFF 0xB80 +#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24) +#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16) +#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8) +#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0) + +#define GEN4_LANE_MARGINING_2_OFF 0xB84 +#define MARGINING_IND_ERROR_SAMPLER BIT(28) +#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27) +#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26) +#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25) +#define MARGINING_VOLTAGE_SUPPORTED BIT(24) +#define MARGINING_MAXLANES GENMASK(20, 16) +#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8) +#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0) /* * iATU Unroll-specific register definitions * From 4.80 core version the address translation will be made by unroll @@ -407,8 +438,10 @@ struct dw_pcie_ops { struct dw_pcie { struct device *dev; void __iomem *dbi_base; + resource_size_t dbi_phys_addr; void __iomem *dbi_base2; void __iomem *atu_base; + resource_size_t atu_phys_addr; size_t atu_size; u32 num_ib_windows; u32 num_ob_windows; @@ -421,7 +454,7 @@ struct dw_pcie { u32 type; unsigned long caps; int num_lanes; - int link_gen; + int max_link_speed; u8 n_fts[2]; struct dw_edma_chip edma; struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS]; diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index acbe4f6d3291d8..676d2aba4fbdec 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -132,7 +132,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie) static void intel_pcie_init_n_fts(struct dw_pcie *pci) { - switch (pci->link_gen) { + switch (pci->max_link_speed) { case 3: pci->n_fts[1] = PORT_AFR_N_FTS_GEN3; break; @@ -252,7 +252,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie) int ret; struct dw_pcie *pci = &pcie->pci; - if (pci->link_gen < 3) + if (pci->max_link_speed < 3) return 0; /* Send PME_TURN_OFF message */ diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 0a29136491b891..85a2c77b1835af 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -420,11 +420,11 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, "unable to get a valid reset gpio\n"); } - pcie->num_slots++; - if (pcie->num_slots > MAX_PCI_SLOTS) { + if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { dev_err(dev, "Too many PCI slots!\n"); return -EINVAL; } + pcie->num_slots++; ret = of_pci_get_devfn(child); if (ret < 0) { diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c new file mode 100644 index 00000000000000..3aad19b56da8f6 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom-common.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include "pcie-designware.h" +#include "pcie-qcom-common.h" + +void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci) +{ + u32 reg; + + /* + * GEN3_RELATED_OFF register is repurposed to apply equalization + * settings at various data transmission rates through registers namely + * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF + * determines the data rate for which these equalization settings are + * applied. + */ + reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; + reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK, + GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT); + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg); + + reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF); + reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 | + GEN3_EQ_FMDC_N_EVALS | + GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA | + GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA); + reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) | + FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) | + FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) | + FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5); + dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg); + + reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); + reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE | + GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE | + GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL | + GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC); + dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg); +} +EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization); + +void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci) +{ + u32 reg; + + reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF); + reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET | + MARGINING_NUM_VOLTAGE_STEPS | + MARGINING_MAX_TIMING_OFFSET | + MARGINING_NUM_TIMING_STEPS); + reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) | + FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) | + FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) | + FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10); + dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg); + + reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF); + reg |= MARGINING_IND_ERROR_SAMPLER | + MARGINING_SAMPLE_REPORTING_METHOD | + MARGINING_IND_LEFT_RIGHT_TIMING | + MARGINING_VOLTAGE_SUPPORTED; + reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE | + MARGINING_MAXLANES | + MARGINING_SAMPLE_RATE_TIMING | + MARGINING_SAMPLE_RATE_VOLTAGE); + reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) | + FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) | + FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f); + dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg); +} +EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining); diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h new file mode 100644 index 00000000000000..7d88d29e476611 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom-common.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _PCIE_QCOM_COMMON_H +#define _PCIE_QCOM_COMMON_H + +struct dw_pcie; + +void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci); +void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci); + +#endif diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index a9b263f749b6aa..e588fcc5458936 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -25,6 +25,7 @@ #include "../../pci.h" #include "pcie-designware.h" +#include "pcie-qcom-common.h" /* PARF registers */ #define PARF_SYS_CTRL 0x00 @@ -498,6 +499,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) goto err_disable_resources; } + if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) { + qcom_pcie_common_set_16gt_equalization(pci); + qcom_pcie_common_set_16gt_lane_margining(pci); + } + /* * The physical address of the MMIO region which is exposed as the BAR * should be written to MHI BASE registers. @@ -659,11 +665,9 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) struct dw_pcie *pci = &pcie_ep->pci; struct device *dev = pci->dev; u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS); - u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK); u32 dstate, val; writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR); - status &= mask; if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { dev_dbg(dev, "Received Linkdown event\n"); @@ -693,7 +697,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) dw_pcie_ep_linkup(&pci->ep); pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP; } else { - dev_err(dev, "Received unknown event: %d\n", status); + dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", + status); } return IRQ_HANDLED; @@ -724,8 +729,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, struct qcom_pcie_ep *pcie_ep) { + struct device *dev = pcie_ep->pci.dev; + char *name; int ret; + name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d", + pcie_ep->pci.ep.epc->domain_nr); + if (!name) + return -ENOMEM; + pcie_ep->global_irq = platform_get_irq_byname(pdev, "global"); if (pcie_ep->global_irq < 0) return pcie_ep->global_irq; @@ -733,18 +745,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL, qcom_pcie_ep_global_irq_thread, IRQF_ONESHOT, - "global_irq", pcie_ep); + name, pcie_ep); if (ret) { dev_err(&pdev->dev, "Failed to request Global IRQ\n"); return ret; } + name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d", + pcie_ep->pci.ep.epc->domain_nr); + if (!name) + return -ENOMEM; + pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset); irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL, qcom_pcie_ep_perst_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "perst_irq", pcie_ep); + name, pcie_ep); if (ret) { dev_err(&pdev->dev, "Failed to request PERST IRQ\n"); disable_irq(pcie_ep->global_irq); @@ -858,21 +875,15 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) if (ret) return ret; - ret = qcom_pcie_enable_resources(pcie_ep); - if (ret) { - dev_err(dev, "Failed to enable resources: %d\n", ret); - return ret; - } - ret = dw_pcie_ep_init(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to initialize endpoint: %d\n", ret); - goto err_disable_resources; + return ret; } ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); if (ret) - goto err_disable_resources; + goto err_ep_deinit; name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); if (!name) { @@ -889,8 +900,8 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) disable_irq(pcie_ep->global_irq); disable_irq(pcie_ep->perst_irq); -err_disable_resources: - qcom_pcie_disable_resources(pcie_ep); +err_ep_deinit: + dw_pcie_ep_deinit(&pcie_ep->pci.ep); return ret; } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 6f953e32d99070..ef44a82be058b2 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -35,6 +35,7 @@ #include "../../pci.h" #include "pcie-designware.h" +#include "pcie-qcom-common.h" /* PARF registers */ #define PARF_SYS_CTRL 0x00 @@ -45,15 +46,24 @@ #define PARF_PHY_REFCLK 0x4c #define PARF_CONFIG_BITS 0x50 #define PARF_DBI_BASE_ADDR 0x168 +#define PARF_SLV_ADDR_SPACE_SIZE 0x16c #define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 #define PARF_Q2A_FLUSH 0x1ac #define PARF_LTSSM 0x1b0 +#define PARF_INT_ALL_STATUS 0x224 +#define PARF_INT_ALL_CLEAR 0x228 +#define PARF_INT_ALL_MASK 0x22c #define PARF_SID_OFFSET 0x234 #define PARF_BDF_TRANSLATE_CFG 0x24c -#define PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define PARF_DBI_BASE_ADDR_V2 0x350 +#define PARF_DBI_BASE_ADDR_V2_HI 0x354 +#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 +#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c #define PARF_NO_SNOOP_OVERIDE 0x3d4 +#define PARF_ATU_BASE_ADDR 0x634 +#define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 #define PARF_BDF_TO_SID_CFG 0x2c00 @@ -108,7 +118,7 @@ #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) /* PARF_SLV_ADDR_SPACE_SIZE register value */ -#define SLV_ADDR_SPACE_SZ 0x10000000 +#define SLV_ADDR_SPACE_SZ 0x80000000 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ #define AHB_CLK_EN BIT(0) @@ -121,6 +131,9 @@ /* PARF_LTSSM register fields */ #define LTSSM_EN BIT(8) +/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ +#define PARF_INT_ALL_LINK_UP BIT(13) + /* PARF_NO_SNOOP_OVERIDE register fields */ #define WR_NO_SNOOP_OVERIDE_EN BIT(1) #define RD_NO_SNOOP_OVERIDE_EN BIT(3) @@ -284,6 +297,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) { struct qcom_pcie *pcie = to_qcom_pcie(pci); + if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) { + qcom_pcie_common_set_16gt_equalization(pci); + qcom_pcie_common_set_16gt_lane_margining(pci); + } + /* Enable Link Training state machine */ if (pcie->cfg->ops->ltssm_enable) pcie->cfg->ops->ltssm_enable(pcie); @@ -325,6 +343,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci) dw_pcie_dbi_ro_wr_dis(pci); } +static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + if (pci->dbi_phys_addr) { + /* + * PARF_DBI_BASE_ADDR register is in CPU domain and require to + * be programmed with CPU physical address. + */ + writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + + PARF_DBI_BASE_ADDR); + writel(SLV_ADDR_SPACE_SZ, pcie->parf + + PARF_SLV_ADDR_SPACE_SIZE); + } +} + +static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + + if (pci->dbi_phys_addr) { + /* + * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are + * in CPU domain and require to be programmed with CPU + * physical addresses. + */ + writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + + PARF_DBI_BASE_ADDR_V2); + writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + + PARF_DBI_BASE_ADDR_V2_HI); + + if (pci->atu_phys_addr) { + writel(lower_32_bits(pci->atu_phys_addr), pcie->parf + + PARF_ATU_BASE_ADDR); + writel(upper_32_bits(pci->atu_phys_addr), pcie->parf + + PARF_ATU_BASE_ADDR_HI); + } + + writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2); + writel(SLV_ADDR_SPACE_SZ, pcie->parf + + PARF_SLV_ADDR_SPACE_SIZE_V2_HI); + } +} + static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -541,8 +603,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) { - /* change DBI base address */ - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); + qcom_pcie_configure_dbi_base(pcie); if (IS_ENABLED(CONFIG_PCI_MSI)) { u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); @@ -629,8 +690,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); - /* change DBI base address */ - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); + qcom_pcie_configure_dbi_base(pcie); /* MAC PHY_POWERDOWN MUX DISABLE */ val = readl(pcie->parf + PARF_SYS_CTRL); @@ -812,13 +872,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; - writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); - val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); + qcom_pcie_configure_dbi_atu_base(pcie); writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | @@ -914,8 +972,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); - /* change DBI base address */ - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); + qcom_pcie_configure_dbi_atu_base(pcie); /* MAC PHY_POWERDOWN MUX DISABLE */ val = readl(pcie->parf + PARF_SYS_CTRL); @@ -1124,14 +1181,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) u32 val; int i; - writel(SLV_ADDR_SPACE_SZ, - pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); - val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; writel(val, pcie->parf + PARF_PHY_CTRL); - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); + qcom_pcie_configure_dbi_atu_base(pcie); writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, @@ -1489,6 +1543,29 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) qcom_pcie_link_transition_count); } +static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) +{ + struct qcom_pcie *pcie = data; + struct dw_pcie_rp *pp = &pcie->pci->pp; + struct device *dev = pcie->pci->dev; + u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS); + + writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); + + if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { + dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); + /* Rescan the bus to enumerate endpoint devices */ + pci_lock_rescan_remove(); + pci_rescan_bus(pp->bridge->bus); + pci_unlock_rescan_remove(); + } else { + dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", + status); + } + + return IRQ_HANDLED; +} + static int qcom_pcie_probe(struct platform_device *pdev) { const struct qcom_pcie_cfg *pcie_cfg; @@ -1499,7 +1576,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) struct dw_pcie_rp *pp; struct resource *res; struct dw_pcie *pci; - int ret; + int ret, irq; + char *name; pcie_cfg = of_device_get_match_data(dev); if (!pcie_cfg || !pcie_cfg->ops) { @@ -1620,6 +1698,27 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_phy_exit; } + name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d", + pci_domain_nr(pp->bridge->bus)); + if (!name) { + ret = -ENOMEM; + goto err_host_deinit; + } + + irq = platform_get_irq_byname_optional(pdev, "global"); + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + qcom_pcie_global_irq_thread, + IRQF_ONESHOT, name, pcie); + if (ret) { + dev_err_probe(&pdev->dev, ret, + "Failed to request Global IRQ\n"); + goto err_host_deinit; + } + + writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK); + } + qcom_pcie_icc_opp_update(pcie); if (pcie->mhi) @@ -1627,6 +1726,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) return 0; +err_host_deinit: + dw_pcie_host_deinit(pp); err_phy_exit: phy_exit(pcie->phy); err_pm_runtime_put: diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index f0f3ebd1a033eb..3a5511c3f7d970 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -141,10 +141,10 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw) } /* - * Require direct speed change with retrying here if the link_gen is - * PCIe Gen2 or higher. + * Require direct speed change with retrying here if the max_link_speed + * is PCIe Gen2 or higher. */ - changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1; + changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1; /* * Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained. @@ -606,7 +606,12 @@ static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar, static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar) { /* The check_addr values are magical numbers in the datasheet */ - const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121}; + static const u32 check_addr[] = { + 0x00101018, + 0x00101118, + 0x00101021, + 0x00101121, + }; struct dw_pcie *dw = &rcar->dw; const struct firmware *fw; unsigned int i, timeout; diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 201dced209f082..ff986ced56b2ee 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) } if (of_property_read_bool(np, "st,pcie-is-gen1")) - pci->link_gen = 1; + pci->max_link_speed = 1; platform_set_drvdata(pdev, spear13xx_pcie); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 4bf7b433417a3b..c1394f2ab63ff1 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -177,17 +177,12 @@ #define N_FTS_VAL 52 #define FTS_VAL 52 -#define GEN3_EQ_CONTROL_OFF 0x8a8 -#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 -#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) -#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) - #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 -#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 -#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) -#define AMBA_ERROR_RESPONSE_CRS_OKAY 0 -#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 -#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 +#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3 +#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0) +#define AMBA_ERROR_RESPONSE_RRS_OKAY 0 +#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1 +#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) @@ -861,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); - val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; - val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); - val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC; + val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE; dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); @@ -872,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); - val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; - val |= (pcie->of_data->gen4_preset_vec << - GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); - val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; + val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC; + val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, + pcie->of_data->gen4_preset_vec); + val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE; dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); @@ -907,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); - /* Enable as 0xFFFF0001 response for CRS */ + /* Enable as 0xFFFF0001 response for RRS */ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); - val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); - val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << - AMBA_ERROR_RESPONSE_CRS_SHIFT); + val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT); + val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 << + AMBA_ERROR_RESPONSE_RRS_SHIFT); dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); /* Clear Slot Clock Configuration bit if SRNS configuration */ diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 32951f7d6d6d6a..0e088e74155d3b 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = { }; static struct msi_domain_info mobiveil_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &mobiveil_msi_irq_chip, }; @@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) (int)data->hwirq, msg->address_hi, msg->address_lo); } -static int mobiveil_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static struct irq_chip mobiveil_msi_bottom_irq_chip = { .name = "Mobiveil MSI", .irq_compose_msi_msg = mobiveil_compose_msi_msg, - .irq_set_affinity = mobiveil_msi_set_affinity, }; static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 8b3e1a079cf3b8..a598a98247ce91 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -50,7 +50,7 @@ #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) #define PIO_COMPLETION_STATUS_OK 0 #define PIO_COMPLETION_STATUS_UR 1 -#define PIO_COMPLETION_STATUS_CRS 2 +#define PIO_COMPLETION_STATUS_RRS 2 #define PIO_COMPLETION_STATUS_CA 4 #define PIO_NON_POSTED_REQ BIT(10) #define PIO_ERR_STATUS BIT(11) @@ -262,7 +262,7 @@ enum { #define MSI_IRQ_NUM 32 -#define CFG_RD_CRS_VAL 0xffff0001 +#define CFG_RD_RRS_VAL 0xffff0001 struct advk_pcie { struct platform_device *pdev; @@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_pcie_train_link(pcie); } -static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) +static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val) { struct device *dev = &pcie->pdev->dev; u32 reg; @@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only * means a PIO write error, and for PIO read it is successful with * a read value of 0xFFFFFFFF. - * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7) + * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7) * only means a PIO write error, and for PIO read it is successful * with a read value of 0xFFFF0001. * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means @@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 strcomp_status = "UR"; ret = -EOPNOTSUPP; break; - case PIO_COMPLETION_STATUS_CRS: - if (allow_crs && val) { - /* PCIe r4.0, sec 2.3.2, says: - * If CRS Software Visibility is enabled: + case PIO_COMPLETION_STATUS_RRS: + if (allow_rrs && val) { + /* PCIe r6.0, sec 2.3.2, says: + * If Configuration RRS Software Visibility is enabled: * For a Configuration Read Request that includes both * bytes of the Vendor ID field of a device Function's * Configuration Space Header, the Root Complex must @@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 * all '1's for any additional bytes included in the * request. * - * So CRS in this case is not an error status. + * So RRS in this case is not an error status. */ - *val = CFG_RD_CRS_VAL; + *val = CFG_RD_RRS_VAL; strcomp_status = NULL; ret = 0; break; } - /* PCIe r4.0, sec 2.3.2, says: - * If CRS Software Visibility is not enabled, the Root Complex + /* PCIe r6.0, sec 2.3.2, says: + * If RRS Software Visibility is not enabled, the Root Complex * must re-issue the Configuration Request as a new Request. - * If CRS Software Visibility is enabled: For a Configuration + * If RRS Software Visibility is enabled: For a Configuration * Write Request or for any other Configuration Read Request, * the Root Complex must re-issue the Configuration Request as * a new Request. * A Root Complex implementation may choose to limit the number - * of Configuration Request/CRS Completion Status loops before + * of Configuration Request/RRS Completion Status loops before * determining that something is wrong with the target of the * Request and taking appropriate action, e.g., complete the * Request to the host as a failed transaction. @@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 * So return -EAGAIN and caller (pci-aardvark.c driver) will * re-issue request again up to the PIO_RETRY_CNT retries. */ - strcomp_status = "CRS"; + strcomp_status = "RRS"; ret = -EAGAIN; break; case PIO_COMPLETION_STATUS_CA: @@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, case PCI_EXP_RTCTL: { u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); - /* Only emulation of PMEIE and CRSSVE bits is provided */ - rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; + /* Only emulation of PMEIE and RRS_SVE bits is provided */ + rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE; bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); break; } @@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); /* Indicates supports for Completion Retry Status */ - bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV); bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; @@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, { struct advk_pcie *pcie = bus->sysdata; int retry_count; - bool allow_crs; + bool allow_rrs; u32 reg; int ret; @@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, size, val); /* - * Completion Retry Status is possible to return only when reading all - * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and - * CRSSVE flag on Root Bridge is enabled. + * Configuration Request Retry Status (RRS) is possible to return + * only when reading both bytes from PCI_VENDOR_ID at once and + * RRS_SVE flag on Root Port is enabled. */ - allow_crs = (where == PCI_VENDOR_ID) && (size == 4) && + allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) && (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & - PCI_EXP_RTCTL_CRSSVE); + PCI_EXP_RTCTL_RRS_SVE); if (advk_pcie_pio_is_running(pcie)) - goto try_crs; + goto try_rrs; /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); @@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, ret = advk_pcie_wait_pio(pcie); if (ret < 0) - goto try_crs; + goto try_rrs; retry_count += ret; /* Check PIO status and get the read result */ - ret = advk_pcie_check_pio_status(pcie, allow_crs, val); + ret = advk_pcie_check_pio_status(pcie, allow_rrs, val); } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); if (ret < 0) @@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, return PCIBIOS_SUCCESSFUL; -try_crs: +try_rrs: /* - * If it is possible, return Completion Retry Status so that caller - * tries to issue the request again instead of failing. + * If it is possible, return Configuration Request Retry Status so + * that caller tries to issue the request again instead of failing. */ - if (allow_crs) { - *val = CFG_RD_CRS_VAL; + if (allow_rrs) { + *val = CFG_RD_RRS_VAL; return PCIBIOS_SUCCESSFUL; } @@ -1304,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, msg->data = data->hwirq; } -static int advk_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void advk_msi_irq_mask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; @@ -1353,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d) static struct irq_chip advk_msi_bottom_irq_chip = { .name = "MSI", .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, - .irq_set_affinity = advk_msi_set_affinity, .irq_mask = advk_msi_irq_mask, .irq_unmask = advk_msi_irq_unmask, }; @@ -1451,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = { static struct msi_domain_info advk_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI | + MSI_FLAG_PCI_MSIX, .chip = &advk_msi_irq_chip, }; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 038d974a318eaf..d7517c3976e7f1 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -1629,11 +1629,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d) spin_unlock_irqrestore(&msi->mask_lock, flags); } -static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct tegra_msi *msi = irq_data_get_irq_chip_data(data); @@ -1648,7 +1643,6 @@ static struct irq_chip tegra_msi_bottom_chip = { .irq_ack = tegra_msi_irq_ack, .irq_mask = tegra_msi_irq_mask, .irq_unmask = tegra_msi_irq_unmask, - .irq_set_affinity = tegra_msi_set_affinity, .irq_compose_msi_msg = tegra_compose_msi_msg, }; @@ -1697,8 +1691,8 @@ static const struct irq_domain_ops tegra_msi_domain_ops = { }; static struct msi_domain_info tegra_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &tegra_msi_top_chip, }; diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 8e457fa450a2c4..1e2ebbfa36d194 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, /* * The v1 controller has a bug in its Configuration Request Retry - * Status (CRS) logic: when CRS Software Visibility is enabled and + * Status (RRS) logic: when RRS Software Visibility is enabled and * we read the Vendor and Device ID of a non-existent device, the * controller fabricates return data of 0xFFFF0001 ("device exists * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE) * ("device does not exist"). This causes the PCI core to retry * the read until it times out. Avoid this by not claiming to - * support CRS SV. + * support RRS SV. */ if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) - *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16); if (size <= 2) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 16336a525c1677..e36a6e158d23c1 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = { }; static struct msi_domain_info altera_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &altera_msi_irq_chip, }; @@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) (int)data->hwirq, msg->address_hi, msg->address_lo); } -static int altera_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static struct irq_chip altera_msi_bottom_irq_chip = { .name = "Altera MSI", .irq_compose_msi_msg = altera_compose_msi_msg, - .irq_set_affinity = altera_msi_set_affinity, }; static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index ef73baefaeb90b..650b2dd81c4822 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -55,12 +55,11 @@ #define TLP_READ_TAG 0x1d #define TLP_WRITE_TAG 0x10 #define RP_DEVFN 0 -#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) #define TLP_CFG_DW0(pcie, cfg) \ (((cfg) << 24) | \ TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ - (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) + (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ (((bus) << 24) | ((devfn) << 16) | (offset)) #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index c08683febdd40a..9321280f6edbab 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -75,15 +75,19 @@ #define PCIE_MEM_WIN0_HI(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8) +/* + * NOTE: You may see the term "BAR" in a number of register names used by + * this driver. The term is an artifact of when the HW core was an + * endpoint device (EP). Now it is a root complex (RC) and anywhere a + * register has the term "BAR" it is related to an inbound window. + */ + +#define PCIE_BRCM_MAX_INBOUND_WINS 16 #define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c #define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f -#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034 -#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f -#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038 +#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4 -#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c -#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f #define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044 #define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048 @@ -122,7 +126,6 @@ #define PCIE_MEM_WIN0_LIMIT_HI(win) \ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8) -#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 @@ -131,9 +134,13 @@ (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \ PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK) -#define PCIE_INTR2_CPU_BASE 0x4300 +#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac +#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0) +#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c + #define PCIE_MSI_INTR2_BASE 0x4500 -/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */ + +/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */ #define MSI_INT_STATUS 0x0 #define MSI_INT_CLR 0x8 #define MSI_INT_MASK_SET 0x10 @@ -184,9 +191,11 @@ #define SSC_STATUS_PLL_LOCK_MASK 0x800 #define PCIE_BRCM_MAX_MEMC 3 -#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX]) -#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA]) -#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1]) +#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX]) +#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA]) +#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1]) +#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG]) +#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE]) /* Rescal registers */ #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700 @@ -205,27 +214,33 @@ enum { RGR1_SW_INIT_1, EXT_CFG_INDEX, EXT_CFG_DATA, + PCIE_HARD_DEBUG, + PCIE_INTR2_CPU_BASE, }; -enum { - RGR1_SW_INIT_1_INIT_MASK, - RGR1_SW_INIT_1_INIT_SHIFT, -}; - -enum pcie_type { +enum pcie_soc_base { GENERIC, - BCM7425, - BCM7435, + BCM2711, BCM4908, BCM7278, - BCM2711, + BCM7425, + BCM7435, + BCM7712, +}; + +struct inbound_win { + u64 size; + u64 pci_offset; + u64 cpu_addr; }; struct pcie_cfg_data { const int *offsets; - const enum pcie_type type; - void (*perst_set)(struct brcm_pcie *pcie, u32 val); - void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); + const enum pcie_soc_base soc_base; + const bool has_phy; + u8 num_inbound_wins; + int (*perst_set)(struct brcm_pcie *pcie, u32 val); + int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); }; struct subdev_regulators { @@ -262,21 +277,25 @@ struct brcm_pcie { u64 msi_target_addr; struct brcm_msi *msi; const int *reg_offsets; - enum pcie_type type; + enum pcie_soc_base soc_base; struct reset_control *rescal; struct reset_control *perst_reset; + struct reset_control *bridge_reset; + struct reset_control *swinit_reset; int num_memc; u64 memc_size[PCIE_BRCM_MAX_MEMC]; u32 hw_rev; - void (*perst_set)(struct brcm_pcie *pcie, u32 val); - void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); + int (*perst_set)(struct brcm_pcie *pcie, u32 val); + int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); struct subdev_regulators *sr; bool ep_wakeup_capable; + bool has_phy; + u8 num_inbound_wins; }; static inline bool is_bmips(const struct brcm_pcie *pcie) { - return pcie->type == BCM7435 || pcie->type == BCM7425; + return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425; } /* @@ -394,7 +413,7 @@ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) } static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, - unsigned int win, u64 cpu_addr, + u8 win, u64 cpu_addr, u64 pcie_addr, u64 size) { u32 cpu_addr_mb_high, limit_addr_mb_high; @@ -445,8 +464,8 @@ static struct irq_chip brcm_msi_irq_chip = { }; static struct msi_domain_info brcm_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, .chip = &brcm_msi_irq_chip, }; @@ -484,12 +503,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq; } -static int brcm_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void brcm_msi_ack_irq(struct irq_data *data) { struct brcm_msi *msi = irq_data_get_irq_chip_data(data); @@ -502,7 +515,6 @@ static void brcm_msi_ack_irq(struct irq_data *data) static struct irq_chip brcm_msi_bottom_irq_chip = { .name = "BRCM STB MSI", .irq_compose_msi_msg = brcm_msi_compose_msi_msg, - .irq_set_affinity = brcm_msi_set_affinity, .irq_ack = brcm_msi_ack_irq, }; @@ -649,7 +661,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR); if (msi->legacy) { - msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE; + msi->intr_base = msi->base + INTR2_CPU_BASE(pcie); msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR; msi->legacy_shift = 24; } else { @@ -730,17 +742,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, return base + DATA_ADDR(pcie); } -static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) +static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) { - u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; + u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; + int ret = 0; + + if (pcie->bridge_reset) { + if (val) + ret = reset_control_assert(pcie->bridge_reset); + else + ret = reset_control_deassert(pcie->bridge_reset); + + if (ret) + dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n", + val ? "assert" : "deassert", ret); + + return ret; + } tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); tmp = (tmp & ~mask) | ((val << shift) & mask); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); + + return ret; } -static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) +static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK; u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT; @@ -748,20 +776,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); tmp = (tmp & ~mask) | ((val << shift) & mask); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); + + return 0; } -static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) +static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) { + int ret; + if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n")) - return; + return -EINVAL; if (val) - reset_control_assert(pcie->perst_reset); + ret = reset_control_assert(pcie->perst_reset); else - reset_control_deassert(pcie->perst_reset); + ret = reset_control_deassert(pcie->perst_reset); + + if (ret) + dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n", + val ? "assert" : "deassert", ret); + return ret; } -static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) +static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) { u32 tmp; @@ -769,34 +806,77 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL); u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK); writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL); + + return 0; } -static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) +static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp; tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK); writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); + + return 0; } -static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, - u64 *rc_bar2_size, - u64 *rc_bar2_offset) +static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size, + u64 cpu_addr, u64 pci_offset) +{ + b->size = size; + b->cpu_addr = cpu_addr; + b->pci_offset = pci_offset; + (*count)++; +} + +static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, + struct inbound_win inbound_wins[]) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + u64 pci_offset, cpu_addr, size = 0, tot_size = 0; struct resource_entry *entry; struct device *dev = pcie->dev; u64 lowest_pcie_addr = ~(u64)0; int ret, i = 0; - u64 size = 0; + u8 n = 0; + + /* + * The HW registers (and PCIe) use order-1 numbering for BARs. As such, + * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1]. + */ + struct inbound_win *b_begin = &inbound_wins[1]; + struct inbound_win *b = b_begin; + + /* + * STB chips beside 7712 disable the first inbound window default. + * Rather being mapped to system memory it is mapped to the + * internal registers of the SoC. This feature is deprecated, has + * security considerations, and is not implemented in our modern + * SoCs. + */ + if (pcie->soc_base != BCM7712) + add_inbound_win(b++, &n, 0, 0, 0); resource_list_for_each_entry(entry, &bridge->dma_ranges) { - u64 pcie_beg = entry->res->start - entry->offset; + u64 pcie_start = entry->res->start - entry->offset; + u64 cpu_start = entry->res->start; + + size = resource_size(entry->res); + tot_size += size; + if (pcie_start < lowest_pcie_addr) + lowest_pcie_addr = pcie_start; + /* + * 7712 and newer chips may have many BARs, with each + * offering a non-overlapping viewport to system memory. + * That being said, each BARs size must still be a power of + * two. + */ + if (pcie->soc_base == BCM7712) + add_inbound_win(b++, &n, size, cpu_start, pcie_start); - size += entry->res->end - entry->res->start + 1; - if (pcie_beg < lowest_pcie_addr) - lowest_pcie_addr = pcie_beg; + if (n > pcie->num_inbound_wins) + break; } if (lowest_pcie_addr == ~(u64)0) { @@ -804,13 +884,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, return -EINVAL; } + /* + * 7712 and newer chips do not have an internal memory mapping system + * that enables multiple memory controllers. As such, it can return + * now w/o doing special configuration. + */ + if (pcie->soc_base == BCM7712) + return n; + ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, PCIE_BRCM_MAX_MEMC); - if (ret <= 0) { /* Make an educated guess */ pcie->num_memc = 1; - pcie->memc_size[0] = 1ULL << fls64(size - 1); + pcie->memc_size[0] = 1ULL << fls64(tot_size - 1); } else { pcie->num_memc = ret; } @@ -819,10 +906,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, for (i = 0, size = 0; i < pcie->num_memc; i++) size += pcie->memc_size[i]; - /* System memory starts at this address in PCIe-space */ - *rc_bar2_offset = lowest_pcie_addr; - /* The sum of all memc views must also be a power of 2 */ - *rc_bar2_size = 1ULL << fls64(size - 1); + /* Our HW mandates that the window size must be a power of 2 */ + size = 1ULL << fls64(size - 1); + + /* + * For STB chips, the BAR2 cpu_addr is hardwired to the start + * of system memory, so we set it to 0. + */ + cpu_addr = 0; + pci_offset = lowest_pcie_addr; /* * We validate the inbound memory view even though we should trust @@ -857,44 +949,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, * outbound memory @ 3GB). So instead it will start at the 1x * multiple of its size */ - if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) || - (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) { - dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n", - *rc_bar2_size, *rc_bar2_offset); + if (!size || (pci_offset & (size - 1)) || + (pci_offset < SZ_4G && pci_offset > SZ_2G)) { + dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n", + size, pci_offset); return -EINVAL; } - return 0; + /* Enable inbound window 2, the main inbound window for STB chips */ + add_inbound_win(b++, &n, size, cpu_addr, pci_offset); + + /* + * Disable inbound window 3. On some chips presents the same + * window as #2 but the data appears in a settable endianness. + */ + add_inbound_win(b++, &n, 0, 0, 0); + + return n; +} + +static u32 brcm_bar_reg_offset(int bar) +{ + if (bar <= 3) + return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1); + else + return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4); +} + +static u32 brcm_ubus_reg_offset(int bar) +{ + if (bar <= 3) + return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1); + else + return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4); +} + +static void set_inbound_win_registers(struct brcm_pcie *pcie, + const struct inbound_win *inbound_wins, + u8 num_inbound_wins) +{ + void __iomem *base = pcie->base; + int i; + + for (i = 1; i <= num_inbound_wins; i++) { + u64 pci_offset = inbound_wins[i].pci_offset; + u64 cpu_addr = inbound_wins[i].cpu_addr; + u64 size = inbound_wins[i].size; + u32 reg_offset = brcm_bar_reg_offset(i); + u32 tmp = lower_32_bits(pci_offset); + + u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size), + PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK); + + /* Write low */ + writel_relaxed(tmp, base + reg_offset); + /* Write high */ + writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4); + + /* + * Most STB chips: + * Do nothing. + * 7712: + * All of their BARs need to be set. + */ + if (pcie->soc_base == BCM7712) { + /* BUS remap register settings */ + reg_offset = brcm_ubus_reg_offset(i); + tmp = lower_32_bits(cpu_addr) & ~0xfff; + tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK; + writel_relaxed(tmp, base + reg_offset); + tmp = upper_32_bits(cpu_addr); + writel_relaxed(tmp, base + reg_offset + 4); + } + } } static int brcm_pcie_setup(struct brcm_pcie *pcie) { - u64 rc_bar2_offset, rc_bar2_size; + struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS]; void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; u32 tmp, burst, aspm_support; - int num_out_wins = 0; - int ret, memc; + u8 num_out_wins = 0; + int num_inbound_wins = 0; + int memc, ret; /* Reset the bridge */ - pcie->bridge_sw_init_set(pcie, 1); + ret = pcie->bridge_sw_init_set(pcie, 1); + if (ret) + return ret; /* Ensure that PERST# is asserted; some bootloaders may deassert it. */ - if (pcie->type == BCM2711) - pcie->perst_set(pcie, 1); + if (pcie->soc_base == BCM2711) { + ret = pcie->perst_set(pcie, 1); + if (ret) { + pcie->bridge_sw_init_set(pcie, 0); + return ret; + } + } usleep_range(100, 200); /* Take the bridge out of reset */ - pcie->bridge_sw_init_set(pcie, 0); + ret = pcie->bridge_sw_init_set(pcie, 0); + if (ret) + return ret; - tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + tmp = readl(base + HARD_DEBUG(pcie)); if (is_bmips(pcie)) tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; else tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; - writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + writel(tmp, base + HARD_DEBUG(pcie)); /* Wait for SerDes to be stable */ usleep_range(100, 200); @@ -905,9 +1072,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) */ if (is_bmips(pcie)) burst = 0x1; /* 256 bytes */ - else if (pcie->type == BCM2711) + else if (pcie->soc_base == BCM2711) burst = 0x0; /* 128 bytes */ - else if (pcie->type == BCM7278) + else if (pcie->soc_base == BCM7278) burst = 0x3; /* 512 bytes */ else burst = 0x2; /* 512 bytes */ @@ -924,17 +1091,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK); writel(tmp, base + PCIE_MISC_MISC_CTRL); - ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size, - &rc_bar2_offset); - if (ret) - return ret; + num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins); + if (num_inbound_wins < 0) + return num_inbound_wins; - tmp = lower_32_bits(rc_bar2_offset); - u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size), - PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK); - writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO); - writel(upper_32_bits(rc_bar2_offset), - base + PCIE_MISC_RC_BAR2_CONFIG_HI); + set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins); + + if (!brcm_pcie_rc_mode(pcie)) { + dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); + return -EINVAL; + } tmp = readl(base + PCIE_MISC_MISC_CTRL); for (memc = 0; memc < pcie->num_memc; memc++) { @@ -956,25 +1122,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) * 4GB or when the inbound area is smaller than 4GB (taking into * account the rounding-up we're forced to perform). */ - if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G) + if (inbound_wins[2].pci_offset >= SZ_4G || + (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G) pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB; else pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; - if (!brcm_pcie_rc_mode(pcie)) { - dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); - return -EINVAL; - } - - /* disable the PCIe->GISB memory window (RC_BAR1) */ - tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO); - tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK; - writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO); - - /* disable the PCIe->SCB memory window (RC_BAR3) */ - tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO); - tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK; - writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); /* Don't advertise L0s capability if 'aspm-no-l0s' */ aspm_support = PCIE_LINK_STATE_L1; @@ -1025,7 +1178,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) num_out_wins++; } - /* PCIe->SCB endian mode for BAR */ + /* PCIe->SCB endian mode for inbound window */ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); @@ -1045,6 +1198,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8; u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */ + /* 7712 does not have this (RGR1) timer */ + if (pcie->soc_base == BCM7712) + return; + /* Each unit in timeout register is 1/216,000,000 seconds */ writel(216 * timeout_us, pcie->base + REG_OFFSET); } @@ -1063,7 +1220,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie) } /* Start out assuming safe mode (both mode bits cleared) */ - clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie)); clkreq_cntl &= ~PCIE_CLKREQ_MASK; if (strcmp(mode, "no-l1ss") == 0) { @@ -1106,7 +1263,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie) dev_err(pcie->dev, err_msg); mode = "safe"; } - writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie)); dev_info(pcie->dev, "clkreq-mode set to %s\n", mode); } @@ -1120,7 +1277,9 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) int ret, i; /* Unassert the fundamental reset */ - pcie->perst_set(pcie, 0); + ret = pcie->perst_set(pcie, 0); + if (ret) + return ret; /* * Wait for 100ms after PERST# deassertion; see PCIe CEM specification @@ -1304,23 +1463,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start) static inline int brcm_phy_start(struct brcm_pcie *pcie) { - return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0; + return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0; } static inline int brcm_phy_stop(struct brcm_pcie *pcie) { - return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0; + return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0; } -static void brcm_pcie_turn_off(struct brcm_pcie *pcie) +static int brcm_pcie_turn_off(struct brcm_pcie *pcie) { void __iomem *base = pcie->base; - int tmp; + int tmp, ret; if (brcm_pcie_link_up(pcie)) brcm_pcie_enter_l23(pcie); /* Assert fundamental reset */ - pcie->perst_set(pcie, 1); + ret = pcie->perst_set(pcie, 1); + if (ret) + return ret; /* Deassert request for L23 in case it was asserted */ tmp = readl(base + PCIE_MISC_PCIE_CTRL); @@ -1328,12 +1489,14 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie) writel(tmp, base + PCIE_MISC_PCIE_CTRL); /* Turn off SerDes */ - tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + tmp = readl(base + HARD_DEBUG(pcie)); u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); - writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + writel(tmp, base + HARD_DEBUG(pcie)); /* Shutdown PCIe bridge */ - pcie->bridge_sw_init_set(pcie, 1); + ret = pcie->bridge_sw_init_set(pcie, 1); + + return ret; } static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) @@ -1351,9 +1514,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - int ret; + int ret, rret; + + ret = brcm_pcie_turn_off(pcie); + if (ret) + return ret; - brcm_pcie_turn_off(pcie); /* * If brcm_phy_stop() returns an error, just dev_err(). If we * return the error it will cause the suspend to fail and this is a @@ -1382,7 +1548,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev) pcie->sr->supplies); if (ret) { dev_err(dev, "Could not turn off regulators\n"); - reset_control_reset(pcie->rescal); + rret = reset_control_reset(pcie->rescal); + if (rret) + dev_err(dev, "failed to reset 'rascal' controller ret=%d\n", + rret); return ret; } } @@ -1397,7 +1566,7 @@ static int brcm_pcie_resume_noirq(struct device *dev) struct brcm_pcie *pcie = dev_get_drvdata(dev); void __iomem *base; u32 tmp; - int ret; + int ret, rret; base = pcie->base; ret = clk_prepare_enable(pcie->clk); @@ -1416,9 +1585,9 @@ static int brcm_pcie_resume_noirq(struct device *dev) pcie->bridge_sw_init_set(pcie, 0); /* SERDES_IDDQ = 0 */ - tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + tmp = readl(base + HARD_DEBUG(pcie)); u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); - writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + writel(tmp, base + HARD_DEBUG(pcie)); /* wait for serdes to be stable */ udelay(100); @@ -1459,7 +1628,9 @@ static int brcm_pcie_resume_noirq(struct device *dev) if (pcie->sr) regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); err_reset: - reset_control_rearm(pcie->rescal); + rret = reset_control_rearm(pcie->rescal); + if (rret) + dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret); err_disable_clk: clk_disable_unprepare(pcie->clk); return ret; @@ -1487,74 +1658,111 @@ static void brcm_pcie_remove(struct platform_device *pdev) } static const int pcie_offsets[] = { - [RGR1_SW_INIT_1] = 0x9210, - [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, + [RGR1_SW_INIT_1] = 0x9210, + [EXT_CFG_INDEX] = 0x9000, + [EXT_CFG_DATA] = 0x9004, + [PCIE_HARD_DEBUG] = 0x4204, + [PCIE_INTR2_CPU_BASE] = 0x4300, }; -static const int pcie_offsets_bmips_7425[] = { - [RGR1_SW_INIT_1] = 0x8010, - [EXT_CFG_INDEX] = 0x8300, - [EXT_CFG_DATA] = 0x8304, +static const int pcie_offsets_bcm7278[] = { + [RGR1_SW_INIT_1] = 0xc010, + [EXT_CFG_INDEX] = 0x9000, + [EXT_CFG_DATA] = 0x9004, + [PCIE_HARD_DEBUG] = 0x4204, + [PCIE_INTR2_CPU_BASE] = 0x4300, }; -static const struct pcie_cfg_data generic_cfg = { - .offsets = pcie_offsets, - .type = GENERIC, - .perst_set = brcm_pcie_perst_set_generic, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +static const int pcie_offsets_bcm7425[] = { + [RGR1_SW_INIT_1] = 0x8010, + [EXT_CFG_INDEX] = 0x8300, + [EXT_CFG_DATA] = 0x8304, + [PCIE_HARD_DEBUG] = 0x4204, + [PCIE_INTR2_CPU_BASE] = 0x4300, }; -static const struct pcie_cfg_data bcm7425_cfg = { - .offsets = pcie_offsets_bmips_7425, - .type = BCM7425, +static const int pcie_offsets_bcm7712[] = { + [EXT_CFG_INDEX] = 0x9000, + [EXT_CFG_DATA] = 0x9004, + [PCIE_HARD_DEBUG] = 0x4304, + [PCIE_INTR2_CPU_BASE] = 0x4400, +}; + +static const struct pcie_cfg_data generic_cfg = { + .offsets = pcie_offsets, + .soc_base = GENERIC, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .num_inbound_wins = 3, }; -static const struct pcie_cfg_data bcm7435_cfg = { +static const struct pcie_cfg_data bcm2711_cfg = { .offsets = pcie_offsets, - .type = BCM7435, + .soc_base = BCM2711, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .num_inbound_wins = 3, }; static const struct pcie_cfg_data bcm4908_cfg = { .offsets = pcie_offsets, - .type = BCM4908, + .soc_base = BCM4908, .perst_set = brcm_pcie_perst_set_4908, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const int pcie_offset_bcm7278[] = { - [RGR1_SW_INIT_1] = 0xc010, - [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, + .num_inbound_wins = 3, }; static const struct pcie_cfg_data bcm7278_cfg = { - .offsets = pcie_offset_bcm7278, - .type = BCM7278, + .offsets = pcie_offsets_bcm7278, + .soc_base = BCM7278, .perst_set = brcm_pcie_perst_set_7278, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, + .num_inbound_wins = 3, }; -static const struct pcie_cfg_data bcm2711_cfg = { +static const struct pcie_cfg_data bcm7425_cfg = { + .offsets = pcie_offsets_bcm7425, + .soc_base = BCM7425, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .num_inbound_wins = 3, +}; + +static const struct pcie_cfg_data bcm7435_cfg = { .offsets = pcie_offsets, - .type = BCM2711, + .soc_base = BCM7435, .perst_set = brcm_pcie_perst_set_generic, .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .num_inbound_wins = 3, +}; + +static const struct pcie_cfg_data bcm7216_cfg = { + .offsets = pcie_offsets_bcm7278, + .soc_base = BCM7278, + .perst_set = brcm_pcie_perst_set_7278, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, + .has_phy = true, + .num_inbound_wins = 3, +}; + +static const struct pcie_cfg_data bcm7712_cfg = { + .offsets = pcie_offsets_bcm7712, + .perst_set = brcm_pcie_perst_set_7278, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .soc_base = BCM7712, + .num_inbound_wins = 10, }; static const struct of_device_id brcm_pcie_match[] = { { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg }, + { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg }, { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg }, - { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg }, - { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, - { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg }, { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg }, + { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg }, + { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, + { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg }, {}, }; @@ -1596,9 +1804,11 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->dev = &pdev->dev; pcie->np = np; pcie->reg_offsets = data->offsets; - pcie->type = data->type; + pcie->soc_base = data->soc_base; pcie->perst_set = data->perst_set; pcie->bridge_sw_init_set = data->bridge_sw_init_set; + pcie->has_phy = data->has_phy; + pcie->num_inbound_wins = data->num_inbound_wins; pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) @@ -1613,25 +1823,52 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); - ret = clk_prepare_enable(pcie->clk); - if (ret) { - dev_err(&pdev->dev, "could not enable clock\n"); - return ret; - } pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal"); - if (IS_ERR(pcie->rescal)) { - clk_disable_unprepare(pcie->clk); + if (IS_ERR(pcie->rescal)) return PTR_ERR(pcie->rescal); - } + pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst"); - if (IS_ERR(pcie->perst_reset)) { - clk_disable_unprepare(pcie->clk); + if (IS_ERR(pcie->perst_reset)) return PTR_ERR(pcie->perst_reset); + + pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge"); + if (IS_ERR(pcie->bridge_reset)) + return PTR_ERR(pcie->bridge_reset); + + pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit"); + if (IS_ERR(pcie->swinit_reset)) + return PTR_ERR(pcie->swinit_reset); + + ret = clk_prepare_enable(pcie->clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, "could not enable clock\n"); + + pcie->bridge_sw_init_set(pcie, 0); + + if (pcie->swinit_reset) { + ret = reset_control_assert(pcie->swinit_reset); + if (ret) { + clk_disable_unprepare(pcie->clk); + return dev_err_probe(&pdev->dev, ret, + "could not assert reset 'swinit'\n"); + } + + /* HW team recommends 1us for proper sync and propagation of reset */ + udelay(1); + + ret = reset_control_deassert(pcie->swinit_reset); + if (ret) { + clk_disable_unprepare(pcie->clk); + return dev_err_probe(&pdev->dev, ret, + "could not de-assert reset 'swinit'\n"); + } } ret = reset_control_reset(pcie->rescal); - if (ret) - dev_err(&pdev->dev, "failed to deassert 'rescal'\n"); + if (ret) { + clk_disable_unprepare(pcie->clk); + return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n"); + } ret = brcm_phy_start(pcie); if (ret) { @@ -1645,7 +1882,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) goto fail; pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); - if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { + if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); ret = -ENODEV; goto fail; @@ -1660,7 +1897,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) } } - bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; + bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; bridge->sysdata = pcie; platform_set_drvdata(pdev, pcie); @@ -1678,6 +1915,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) fail: __brcm_pcie_remove(pcie); + return ret; } diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 97f739a2c9f8f8..22134e95574bd8 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -54,7 +54,7 @@ #define CFG_RD_SUCCESS 0 #define CFG_RD_UR 1 -#define CFG_RD_CRS 2 +#define CFG_RD_RRS 2 #define CFG_RD_CA 3 #define CFG_RETRY_STATUS 0xffff0001 #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ @@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, u32 status; /* - * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only + * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only * affects config reads of the Vendor ID. For config writes or any * other config reads, the Root may automatically reissue the * configuration request again as a new request. * * For config reads, this hardware returns CFG_RETRY_STATUS data - * when it receives a CRS completion, regardless of the address of - * the read or the CRS Software Visibility Enable bit. As a + * when it receives a RRS completion, regardless of the address of + * the read or the RRS Software Visibility Enable bit. As a * partial workaround for this, we retry in software any read that * returns CFG_RETRY_STATUS. * * Note that a non-Vendor ID config register may have a value of * CFG_RETRY_STATUS. If we read that, we can't distinguish it from - * a CRS completion, so we will incorrectly retry the read and + * a RRS completion, so we will incorrectly retry the read and * eventually return the wrong data (0xffffffff). */ data = readl(cfg_data_p); while (data == CFG_RETRY_STATUS && timeout--) { /* - * CRS state is set in CFG_RD status register + * RRS state is set in CFG_RD status register * This will handle the case where CFG_RETRY_STATUS is * valid config data. */ status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); - if (status != CFG_RD_CRS) + if (status != CFG_RD_RRS) return data; udelay(1); @@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) break; case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: - /* Don't advertise CRS SV support */ - *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + /* Don't advertise RRS SV support */ + *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16); break; default: diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index b7e8e24f6a4085..66ce4b5d309bb6 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -6,7 +6,9 @@ * Author: Jianjun Wang */ +#include #include +#include #include #include #include @@ -15,6 +17,8 @@ #include #include #include +#include +#include #include #include #include @@ -29,6 +33,12 @@ #define PCI_CLASS(class) (class << 8) #define PCIE_RC_MODE BIT(0) +#define PCIE_EQ_PRESET_01_REG 0x100 +#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0) +#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8) +#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16) +#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24) + #define PCIE_CFGNUM_REG 0x140 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0)) #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8)) @@ -68,6 +78,14 @@ #define PCIE_MSI_SET_ENABLE_REG 0x190 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0) +#define PCIE_PIPE4_PIE8_REG 0x338 +#define PCIE_K_FINETUNE_MAX GENMASK(5, 0) +#define PCIE_K_FINETUNE_ERR GENMASK(7, 6) +#define PCIE_K_PRESET_TO_USE GENMASK(18, 8) +#define PCIE_K_PHYPARAM_QUERY BIT(19) +#define PCIE_K_QUERY_TIMEOUT BIT(20) +#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21) + #define PCIE_MSI_SET_BASE_REG 0xc00 #define PCIE_MSI_SET_OFFSET 0x10 #define PCIE_MSI_SET_STATUS_OFFSET 0x04 @@ -100,6 +118,26 @@ #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0) #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2) +#define MAX_NUM_PHY_RESETS 3 + +/* Time in ms needed to complete PCIe reset on EN7581 SoC */ +#define PCIE_EN7581_RESET_TIME_MS 100 + +struct mtk_gen3_pcie; + +/** + * struct mtk_gen3_pcie_pdata - differentiate between host generations + * @power_up: pcie power_up callback + * @phy_resets: phy reset lines SoC data. + */ +struct mtk_gen3_pcie_pdata { + int (*power_up)(struct mtk_gen3_pcie *pcie); + struct { + const char *id[MAX_NUM_PHY_RESETS]; + int num_resets; + } phy_resets; +}; + /** * struct mtk_msi_set - MSI information for each set * @base: IO mapped register base @@ -118,7 +156,7 @@ struct mtk_msi_set { * @base: IO mapped register base * @reg_base: physical register base * @mac_reset: MAC reset control - * @phy_reset: PHY reset control + * @phy_resets: PHY reset controllers * @phy: PHY controller block * @clks: PCIe clocks * @num_clks: PCIe clocks count for this port @@ -131,13 +169,14 @@ struct mtk_msi_set { * @msi_sets: MSI sets information * @lock: lock protecting IRQ bit map * @msi_irq_in_use: bit map for assigned MSI IRQ + * @soc: pointer to SoC-dependent operations */ struct mtk_gen3_pcie { struct device *dev; void __iomem *base; phys_addr_t reg_base; struct reset_control *mac_reset; - struct reset_control *phy_reset; + struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS]; struct phy *phy; struct clk_bulk_data *clks; int num_clks; @@ -151,6 +190,8 @@ struct mtk_gen3_pcie { struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; struct mutex lock; DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM); + + const struct mtk_gen3_pcie_pdata *soc; }; /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */ @@ -424,12 +465,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) return 0; } -static int mtk_pcie_set_affinity(struct irq_data *data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void mtk_pcie_msi_irq_mask(struct irq_data *data) { pci_msi_mask_irq(data); @@ -450,8 +485,9 @@ static struct irq_chip mtk_msi_irq_chip = { }; static struct msi_domain_info mtk_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | + MSI_FLAG_MULTI_PCI_MSI, .chip = &mtk_msi_irq_chip, }; @@ -517,7 +553,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = { .irq_mask = mtk_msi_bottom_irq_mask, .irq_unmask = mtk_msi_bottom_irq_unmask, .irq_compose_msi_msg = mtk_compose_msi_msg, - .irq_set_affinity = mtk_pcie_set_affinity, .name = "MSI", }; @@ -618,7 +653,6 @@ static struct irq_chip mtk_intx_irq_chip = { .irq_mask = mtk_intx_mask, .irq_unmask = mtk_intx_unmask, .irq_eoi = mtk_intx_eoi, - .irq_set_affinity = mtk_pcie_set_affinity, .name = "INTx", }; @@ -775,10 +809,10 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) { + int i, ret, num_resets = pcie->soc->phy_resets.num_resets; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); struct resource *regs; - int ret; regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); if (!regs) @@ -791,12 +825,12 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) pcie->reg_base = regs->start; - pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); - if (IS_ERR(pcie->phy_reset)) { - ret = PTR_ERR(pcie->phy_reset); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get PHY reset\n"); + for (i = 0; i < num_resets; i++) + pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; + ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); + if (ret) { + dev_err(dev, "failed to get PHY bulk reset\n"); return ret; } @@ -827,13 +861,96 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) return 0; } +static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) +{ + struct device *dev = pcie->dev; + int err; + u32 val; + + /* + * Wait for the time needed to complete the bulk assert in + * mtk_pcie_setup for EN7581 SoC. + */ + mdelay(PCIE_EN7581_RESET_TIME_MS); + + err = phy_init(pcie->phy); + if (err) { + dev_err(dev, "failed to initialize PHY\n"); + return err; + } + + err = phy_power_on(pcie->phy); + if (err) { + dev_err(dev, "failed to power on PHY\n"); + goto err_phy_on; + } + + err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + if (err) { + dev_err(dev, "failed to deassert PHYs\n"); + goto err_phy_deassert; + } + + /* + * Wait for the time needed to complete the bulk de-assert above. + * This time is specific for EN7581 SoC. + */ + mdelay(PCIE_EN7581_RESET_TIME_MS); + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + err = clk_bulk_prepare(pcie->num_clks, pcie->clks); + if (err) { + dev_err(dev, "failed to prepare clock\n"); + goto err_clk_prepare; + } + + val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) | + FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) | + FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) | + FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41); + writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG); + + val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT | + FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) | + FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) | + FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf); + writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG); + + err = clk_bulk_enable(pcie->num_clks, pcie->clks); + if (err) { + dev_err(dev, "failed to prepare clock\n"); + goto err_clk_enable; + } + + return 0; + +err_clk_enable: + clk_bulk_unprepare(pcie->num_clks, pcie->clks); +err_clk_prepare: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); +err_phy_deassert: + phy_power_off(pcie->phy); +err_phy_on: + phy_exit(pcie->phy); + + return err; +} + static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) { struct device *dev = pcie->dev; int err; /* PHY power on and enable pipe clock */ - reset_control_deassert(pcie->phy_reset); + err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + if (err) { + dev_err(dev, "failed to deassert PHYs\n"); + return err; + } err = phy_init(pcie->phy); if (err) { @@ -869,7 +986,7 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) err_phy_on: phy_exit(pcie->phy); err_phy_init: - reset_control_assert(pcie->phy_reset); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); return err; } @@ -884,7 +1001,7 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) phy_power_off(pcie->phy); phy_exit(pcie->phy); - reset_control_assert(pcie->phy_reset); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); } static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) @@ -895,16 +1012,22 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) if (err) return err; + /* + * Deassert the line in order to avoid unbalance in deassert_count + * counter since the bulk is shared. + */ + reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); /* * The controller may have been left out of reset by the bootloader * so make sure that we get a clean start by asserting resets here. */ - reset_control_assert(pcie->phy_reset); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + reset_control_assert(pcie->mac_reset); usleep_range(10, 20); /* Don't touch the hardware registers before power up */ - err = mtk_pcie_power_up(pcie); + err = pcie->soc->power_up(pcie); if (err) return err; @@ -939,6 +1062,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(host); pcie->dev = dev; + pcie->soc = device_get_match_data(dev); platform_set_drvdata(pdev, pcie); err = mtk_pcie_setup(pcie); @@ -1054,7 +1178,7 @@ static int mtk_pcie_resume_noirq(struct device *dev) struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; - err = mtk_pcie_power_up(pcie); + err = pcie->soc->power_up(pcie); if (err) return err; @@ -1074,8 +1198,27 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = { mtk_pcie_resume_noirq) }; +static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = { + .power_up = mtk_pcie_power_up, + .phy_resets = { + .id[0] = "phy", + .num_resets = 1, + }, +}; + +static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = { + .power_up = mtk_pcie_en7581_power_up, + .phy_resets = { + .id[0] = "phy-lane0", + .id[1] = "phy-lane1", + .id[2] = "phy-lane2", + .num_resets = 3, + }, +}; + static const struct of_device_id mtk_pcie_of_match[] = { - { .compatible = "mediatek,mt8192-pcie" }, + { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 }, + { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 }, {}, }; MODULE_DEVICE_TABLE(of, mtk_pcie_of_match); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 7fc0d7709b7fbd..7f7d04c2ea573f 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -211,7 +211,6 @@ struct mtk_pcie_port { * @base: IO mapped register base * @cfg: IO mapped register map for PCIe config * @free_ck: free-run reference clock - * @mem: non-prefetchable memory resource * @ports: pointer to PCIe port information * @soc: pointer to SoC-dependent operations */ @@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) (int)data->hwirq, msg->address_hi, msg->address_lo); } -static int mtk_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void mtk_msi_ack_irq(struct irq_data *data) { struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); @@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data) static struct irq_chip mtk_msi_bottom_irq_chip = { .name = "MTK MSI", .irq_compose_msi_msg = mtk_compose_msi_msg, - .irq_set_affinity = mtk_msi_set_affinity, .irq_ack = mtk_msi_ack_irq, }; @@ -486,8 +478,8 @@ static struct irq_chip mtk_msi_irq_chip = { }; static struct msi_domain_info mtk_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &mtk_msi_irq_chip, }; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index c01efc6ea64f60..3dd653f3d78414 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -658,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d) spin_unlock_irqrestore(&msi->mask_lock, flags); } -static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct rcar_msi *msi = irq_data_get_irq_chip_data(data); @@ -678,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = { .irq_ack = rcar_msi_irq_ack, .irq_mask = rcar_msi_irq_mask, .irq_unmask = rcar_msi_irq_unmask, - .irq_set_affinity = rcar_msi_set_affinity, .irq_compose_msi_msg = rcar_compose_msi_msg, }; @@ -725,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = { }; static struct msi_domain_info rcar_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, .chip = &rcar_msi_top_chip, }; diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index 5be5dfd8398f2a..dd117f07fc95b6 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -71,10 +71,24 @@ /* Phy Status/Control Register definitions */ #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11) +#define QDMA_BRIDGE_BASE_OFF 0xcd8 /* Number of MSI IRQs */ #define XILINX_NUM_MSI_IRQS 64 +enum xilinx_pl_dma_version { + XDMA, + QDMA, +}; + +/** + * struct xilinx_pl_dma_variant - PL DMA PCIe variant information + * @version: DMA version + */ +struct xilinx_pl_dma_variant { + enum xilinx_pl_dma_version version; +}; + struct xilinx_msi { struct irq_domain *msi_domain; unsigned long *bitmap; @@ -88,6 +102,7 @@ struct xilinx_msi { * struct pl_dma_pcie - PCIe port information * @dev: Device pointer * @reg_base: IO Mapped Register Base + * @cfg_base: IO Mapped Configuration Base * @irq: Interrupt number * @cfg: Holds mappings of config space window * @phys_reg_base: Physical address of reg base @@ -97,10 +112,12 @@ struct xilinx_msi { * @msi: MSI information * @intx_irq: INTx error interrupt number * @lock: Lock protecting shared register access + * @variant: PL DMA PCIe version check pointer */ struct pl_dma_pcie { struct device *dev; void __iomem *reg_base; + void __iomem *cfg_base; int irq; struct pci_config_window *cfg; phys_addr_t phys_reg_base; @@ -110,16 +127,23 @@ struct pl_dma_pcie { struct xilinx_msi msi; int intx_irq; raw_spinlock_t lock; + const struct xilinx_pl_dma_variant *variant; }; static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg) { + if (port->variant->version == QDMA) + return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); + return readl(port->reg_base + reg); } static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg) { - writel(val, port->reg_base + reg); + if (port->variant->version == QDMA) + writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); + else + writel(val, port->reg_base + reg); } static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port) @@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus, if (!xilinx_pl_dma_pcie_valid_device(bus, devfn)) return NULL; + if (port->variant->version == QDMA) + return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); + return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); } @@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = { }; static struct msi_domain_info xilinx_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, .chip = &xilinx_msi_irq_chip, }; @@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->data = data->hwirq; } -static int xilinx_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static struct irq_chip xilinx_irq_chip = { .name = "pl_dma:MSI", .irq_compose_msi_msg = xilinx_compose_msi_msg, - .irq_set_affinity = xilinx_msi_set_affinity, }; static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, @@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port, port->reg_base = port->cfg->win; + if (port->variant->version == QDMA) { + port->cfg_base = port->cfg->win; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); + port->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(port->reg_base)) + return PTR_ERR(port->reg_base); + port->phys_reg_base = res->start; + } + err = xilinx_request_msi_irq(port); if (err) { pci_ecam_free(port->cfg); @@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev) if (!bus) return -ENODEV; + port->variant = of_device_get_match_data(dev); + err = xilinx_pl_dma_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); @@ -791,9 +822,22 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev) return err; } +static const struct xilinx_pl_dma_variant xdma_host = { + .version = XDMA, +}; + +static const struct xilinx_pl_dma_variant qdma_host = { + .version = QDMA, +}; + static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = { { .compatible = "xlnx,xdma-host-3.00", + .data = &xdma_host, + }, + { + .compatible = "xlnx,qdma-host-3.00", + .data = &qdma_host, }, {} }; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 0408f4d612b5af..a8ae14474dd0a4 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -80,8 +81,8 @@ #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) #define MSGF_MISC_SR_FATAL_DEV BIT(23) #define MSGF_MISC_SR_LINK_DOWN BIT(24) -#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) -#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) +#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MISC_SR_LINK_BWIDTH BIT(26) #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ MSGF_MISC_SR_RXMSG_OVER | \ @@ -96,8 +97,8 @@ MSGF_MISC_SR_NON_FATAL_DEV | \ MSGF_MISC_SR_FATAL_DEV | \ MSGF_MISC_SR_LINK_DOWN | \ - MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ - MSGF_MSIC_SR_LINK_BWIDTH) + MSGF_MISC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MISC_SR_LINK_BWIDTH) /* Legacy interrupt status mask bits */ #define MSGF_LEG_SR_INTA BIT(0) @@ -157,6 +158,7 @@ struct nwl_pcie { void __iomem *breg_base; void __iomem *pcireg_base; void __iomem *ecam_base; + struct phy *phy[4]; phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ phys_addr_t phys_ecam_base; /* Physical Configuration Base */ @@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) return IRQ_NONE; if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) - dev_err(dev, "Received Message FIFO Overflow\n"); + dev_err_ratelimited(dev, "Received Message FIFO Overflow\n"); if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) - dev_err(dev, "Slave error\n"); + dev_err_ratelimited(dev, "Slave error\n"); if (misc_stat & MSGF_MISC_SR_MASTER_ERR) - dev_err(dev, "Master error\n"); + dev_err_ratelimited(dev, "Master error\n"); if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) - dev_err(dev, "In Misc Ingress address translation error\n"); + dev_err_ratelimited(dev, "In Misc Ingress address translation error\n"); if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) - dev_err(dev, "In Misc Egress address translation error\n"); + dev_err_ratelimited(dev, "In Misc Egress address translation error\n"); if (misc_stat & MSGF_MISC_SR_FATAL_AER) - dev_err(dev, "Fatal Error in AER Capability\n"); + dev_err_ratelimited(dev, "Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) - dev_err(dev, "Non-Fatal Error in AER Capability\n"); + dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_CORR_AER) - dev_err(dev, "Correctable Error in AER Capability\n"); + dev_err_ratelimited(dev, "Correctable Error in AER Capability\n"); if (misc_stat & MSGF_MISC_SR_UR_DETECT) - dev_err(dev, "Unsupported request Detected\n"); + dev_err_ratelimited(dev, "Unsupported request Detected\n"); if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) - dev_err(dev, "Non-Fatal Error Detected\n"); + dev_err_ratelimited(dev, "Non-Fatal Error Detected\n"); if (misc_stat & MSGF_MISC_SR_FATAL_DEV) - dev_err(dev, "Fatal Error Detected\n"); + dev_err_ratelimited(dev, "Fatal Error Detected\n"); - if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH) dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); - if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH) dev_info(dev, "Link Bandwidth Management Status bit set\n"); /* Clear misc interrupt status */ @@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data) u32 mask; u32 val; - mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); @@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data) u32 mask; u32 val; - mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); @@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = { }; static struct msi_domain_info nwl_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, .chip = &nwl_msi_irq_chip, }; #endif @@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->data = data->hwirq; } -static int nwl_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static struct irq_chip nwl_irq_chip = { .name = "Xilinx MSI", .irq_compose_msi_msg = nwl_compose_msi_msg, - .irq_set_affinity = nwl_msi_set_affinity, }; static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, @@ -521,6 +516,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) return 0; } +static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i) +{ + int err = phy_power_off(pcie->phy[i]); + + if (err) + dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i, + err); +} + +static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i) +{ + int err = phy_exit(pcie->phy[i]); + + if (err) + dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err); +} + +static int nwl_pcie_phy_enable(struct nwl_pcie *pcie) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) { + ret = phy_init(pcie->phy[i]); + if (ret) + goto err; + + ret = phy_power_on(pcie->phy[i]); + if (ret) { + nwl_pcie_phy_exit(pcie, i); + goto err; + } + } + + return 0; + +err: + while (i--) { + nwl_pcie_phy_power_off(pcie, i); + nwl_pcie_phy_exit(pcie, i); + } + + return ret; +} + +static void nwl_pcie_phy_disable(struct nwl_pcie *pcie) +{ + int i; + + for (i = ARRAY_SIZE(pcie->phy); i--;) { + nwl_pcie_phy_power_off(pcie, i); + nwl_pcie_phy_exit(pcie, i); + } +} + static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; @@ -732,6 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, { struct device *dev = pcie->dev; struct resource *res; + int i; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); pcie->breg_base = devm_ioremap_resource(dev, res); @@ -759,6 +809,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, irq_set_chained_handler_and_data(pcie->irq_intx, nwl_pcie_leg_handler, pcie); + + for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) { + pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i); + if (PTR_ERR(pcie->phy[i]) == -ENODEV) { + pcie->phy[i] = NULL; + break; + } + + if (IS_ERR(pcie->phy[i])) + return PTR_ERR(pcie->phy[i]); + } + return 0; } @@ -779,6 +841,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) return -ENODEV; pcie = pci_host_bridge_priv(bridge); + platform_set_drvdata(pdev, pcie); pcie->dev = dev; @@ -798,16 +861,22 @@ static int nwl_pcie_probe(struct platform_device *pdev) return err; } + err = nwl_pcie_phy_enable(pcie); + if (err) { + dev_err(dev, "could not enable PHYs\n"); + goto err_clk; + } + err = nwl_pcie_bridge_init(pcie); if (err) { dev_err(dev, "HW Initialization failed\n"); - return err; + goto err_phy; } err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); - return err; + goto err_phy; } bridge->sysdata = pcie; @@ -817,11 +886,27 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_enable_msi(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); - return err; + goto err_phy; } } - return pci_host_probe(bridge); + err = pci_host_probe(bridge); + if (!err) + return 0; + +err_phy: + nwl_pcie_phy_disable(pcie); +err_clk: + clk_disable_unprepare(pcie->clk); + return err; +} + +static void nwl_pcie_remove(struct platform_device *pdev) +{ + struct nwl_pcie *pcie = platform_get_drvdata(pdev); + + nwl_pcie_phy_disable(pcie); + clk_disable_unprepare(pcie->clk); } static struct platform_driver nwl_pcie_driver = { @@ -831,5 +916,6 @@ static struct platform_driver nwl_pcie_driver = { .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, + .remove_new = nwl_pcie_remove, }; builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index cb6e9f7b0152cb..0b534f73a94208 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = { .irq_ack = xilinx_msi_top_irq_ack, }; -static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) static struct irq_chip xilinx_msi_bottom_chip = { .name = "Xilinx MSI", - .irq_set_affinity = xilinx_msi_set_affinity, .irq_compose_msi_msg = xilinx_compose_msi_msg, }; @@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = { }; static struct msi_domain_info xilinx_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY, .chip = &xilinx_msi_top_chip, }; diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c index a18923d7cea6e7..8533dc618d45f0 100644 --- a/drivers/pci/controller/plda/pcie-plda-host.c +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -76,17 +76,10 @@ static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) (int)data->hwirq, msg->address_hi, msg->address_lo); } -static int plda_msi_set_affinity(struct irq_data *irq_data, - const struct cpumask *mask, bool force) -{ - return -EINVAL; -} - static struct irq_chip plda_msi_bottom_irq_chip = { .name = "PLDA MSI", .irq_ack = plda_msi_bottom_irq_ack, .irq_compose_msi_msg = plda_compose_msi_msg, - .irq_set_affinity = plda_msi_set_affinity, }; static int plda_irq_msi_domain_alloc(struct irq_domain *domain, @@ -146,8 +139,8 @@ static struct irq_chip plda_msi_irq_chip = { }; static struct msi_domain_info plda_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &plda_msi_irq_chip, }; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a726de0af011f9..264a180403a0ec 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -204,22 +204,11 @@ static void vmd_irq_disable(struct irq_data *data) raw_spin_unlock_irqrestore(&list_lock, flags); } -/* - * XXX: Stubbed until we develop acceptable way to not create conflicts with - * other devices sharing the same vector. - */ -static int vmd_irq_set_affinity(struct irq_data *data, - const struct cpumask *dest, bool force) -{ - return -EINVAL; -} - static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", .irq_enable = vmd_irq_enable, .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, - .irq_set_affinity = vmd_irq_set_affinity, }; static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, @@ -326,7 +315,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = { static struct msi_domain_info vmd_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX, + MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .ops = &vmd_msi_domain_ops, .chip = &vmd_msi_controller, }; @@ -1053,9 +1042,9 @@ static void vmd_remove(struct pci_dev *dev) static void vmd_shutdown(struct pci_dev *dev) { - struct vmd_dev *vmd = pci_get_drvdata(dev); + struct vmd_dev *vmd = pci_get_drvdata(dev); - vmd_remove_irq_domain(vmd); + vmd_remove_irq_domain(vmd); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index c7affbbf73ab3f..b133967faef840 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -730,7 +730,7 @@ EXPORT_SYMBOL(pcim_iounmap); * Mapping and region will get automatically released on driver detach. If * desired, release manually only with pcim_iounmap_region(). */ -static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, +void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, const char *name) { int ret; @@ -763,6 +763,7 @@ static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, return IOMEM_ERR_PTR(ret); } +EXPORT_SYMBOL(pcim_iomap_region); /** * pcim_iounmap_region - Unmap and release a PCI BAR @@ -785,7 +786,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar) } /** - * pcim_iomap_regions - Request and iomap PCI BARs + * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED) * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to request and iomap * @name: Name associated with the requests @@ -793,6 +794,9 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar) * Returns: 0 on success, negative error code on failure. * * Request and iomap regions specified by @mask. + * + * This function is DEPRECATED. Do not use it in new code. + * Use pcim_iomap_region() instead. */ int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) { @@ -865,6 +869,7 @@ int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) { return _pcim_request_region(pdev, bar, name, 0); } +EXPORT_SYMBOL(pcim_request_region); /** * pcim_request_region_exclusive - Request a PCI BAR exclusively diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 84309dfe0c6840..17f00710925508 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -838,6 +838,10 @@ void pci_epc_destroy(struct pci_epc *epc) { pci_ep_cfs_remove_epc_group(epc->group); device_unregister(&epc->dev); + +#ifdef CONFIG_PCI_DOMAINS_GENERIC + pci_bus_release_domain_nr(&epc->dev, epc->domain_nr); +#endif } EXPORT_SYMBOL_GPL(pci_epc_destroy); @@ -900,6 +904,16 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, epc->dev.release = pci_epc_release; epc->ops = ops; +#ifdef CONFIG_PCI_DOMAINS_GENERIC + epc->domain_nr = pci_bus_find_domain_nr(NULL, dev); +#else + /* + * TODO: If the architecture doesn't support generic PCI + * domains, then a custom implementation has to be used. + */ + WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n"); +#endif + ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); if (ret) goto put_dev; diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO index 9d428b0ea524f5..92e6e20e859515 100644 --- a/drivers/pci/hotplug/TODO +++ b/drivers/pci/hotplug/TODO @@ -51,11 +51,6 @@ ibmphp: shpchp: -* There is only a single implementation of struct hpc_ops. Can the struct be - removed and its functions invoked directly? This has already been done in - pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify - if there was a specific reason not to apply the same change to shpchp. - * The hardirq handler shpc_isr() queues events on a workqueue. It can be simplified by converting it to threaded IRQ handling. Use pciehp as a template. diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index c94b40e64baf22..47a3ed16159a40 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot) } else { /* Did not get a match on the target PCI device. Check * if the current IRQ table entry is a PCI-to-PCI - * bridge device. If so, and it's secondary bus + * bridge device. If so, and its secondary bus * matches the bus number for the target device, I need * to save the bridge's slot number. If I can not find * an entry for the target device, I will have to diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index e9f1fb333a718e..718bc6cf12cb3c 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -138,7 +138,7 @@ static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 o if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) return -1; - if (vendID == 0xffffffff) + if (PCI_POSSIBLE_ERROR(vendID)) return -1; return pci_bus_read_config_dword(bus, devfn, offset, value); } @@ -253,7 +253,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num *dev_num = tdevice; ctrl->pci_bus->number = tbus; pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); - if (!nobridge || (work == 0xffffffff)) + if (!nobridge || PCI_POSSIBLE_ERROR(work)) return 0; dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 7333b305f2a578..055518ee354dc9 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -112,7 +112,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { - /* if the slot exits it always contains a function */ + /* if the slot exists it always contains a function */ *value = 1; return 0; } diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 3a97f455336e67..f0e2d2d54d712a 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -72,7 +72,6 @@ struct slot { u8 latch_save; u8 pwr_save; struct controller *ctrl; - const struct hpc_ops *hpc_ops; struct hotplug_slot hotplug_slot; struct list_head slot_list; struct delayed_work work; /* work for button event */ @@ -94,7 +93,6 @@ struct controller { int slot_num_inc; /* 1 or -1 */ struct pci_dev *pci_dev; struct list_head slot_list; - const struct hpc_ops *hpc_ops; wait_queue_head_t queue; /* sleep & wake process */ u8 slot_device_offset; u32 pcix_misc2_reg; /* for amd pogo errata */ @@ -300,24 +298,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot) pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp); } -struct hpc_ops { - int (*power_on_slot)(struct slot *slot); - int (*slot_enable)(struct slot *slot); - int (*slot_disable)(struct slot *slot); - int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed); - int (*get_power_status)(struct slot *slot, u8 *status); - int (*get_attention_status)(struct slot *slot, u8 *status); - int (*set_attention_status)(struct slot *slot, u8 status); - int (*get_latch_status)(struct slot *slot, u8 *status); - int (*get_adapter_status)(struct slot *slot, u8 *status); - int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); - int (*get_prog_int)(struct slot *slot, u8 *prog_int); - int (*query_power_fault)(struct slot *slot); - void (*green_led_on)(struct slot *slot); - void (*green_led_off)(struct slot *slot); - void (*green_led_blink)(struct slot *slot); - void (*release_ctlr)(struct controller *ctrl); - int (*check_cmd_status)(struct controller *ctrl); -}; +int shpchp_power_on_slot(struct slot *slot); +int shpchp_slot_enable(struct slot *slot); +int shpchp_slot_disable(struct slot *slot); +int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed); +int shpchp_get_power_status(struct slot *slot, u8 *status); +int shpchp_get_attention_status(struct slot *slot, u8 *status); +int shpchp_set_attention_status(struct slot *slot, u8 status); +int shpchp_get_latch_status(struct slot *slot, u8 *status); +int shpchp_get_adapter_status(struct slot *slot, u8 *status); +int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed); +int shpchp_get_prog_int(struct slot *slot, u8 *prog_int); +int shpchp_query_power_fault(struct slot *slot); +void shpchp_green_led_on(struct slot *slot); +void shpchp_green_led_off(struct slot *slot); +void shpchp_green_led_blink(struct slot *slot); +void shpchp_release_ctlr(struct controller *ctrl); +int shpchp_check_cmd_status(struct controller *ctrl); #endif /* _SHPCHP_H */ diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 56c7795ed890f0..a92e28b7290825 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -81,7 +81,6 @@ static int init_slots(struct controller *ctrl) slot->ctrl = ctrl; slot->bus = ctrl->pci_dev->subordinate->number; slot->device = ctrl->slot_device_offset + i; - slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); @@ -150,7 +149,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) __func__, slot_name(slot)); slot->attention_save = status; - slot->hpc_ops->set_attention_status(slot, status); + shpchp_set_attention_status(slot, status); return 0; } @@ -183,7 +182,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_power_status(slot, value); + retval = shpchp_get_power_status(slot, value); if (retval < 0) *value = slot->pwr_save; @@ -198,7 +197,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_attention_status(slot, value); + retval = shpchp_get_attention_status(slot, value); if (retval < 0) *value = slot->attention_save; @@ -213,7 +212,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_latch_status(slot, value); + retval = shpchp_get_latch_status(slot, value); if (retval < 0) *value = slot->latch_save; @@ -228,7 +227,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", __func__, slot_name(slot)); - retval = slot->hpc_ops->get_adapter_status(slot, value); + retval = shpchp_get_adapter_status(slot, value); if (retval < 0) *value = slot->presence_save; @@ -293,7 +292,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_cleanup_slots: cleanup_slots(ctrl); err_out_release_ctlr: - ctrl->hpc_ops->release_ctlr(ctrl); + shpchp_release_ctlr(ctrl); err_out_free_ctrl: kfree(ctrl); err_out_none: @@ -306,7 +305,7 @@ static void shpc_remove(struct pci_dev *dev) dev->shpc_managed = 0; shpchp_remove_ctrl_files(ctrl); - ctrl->hpc_ops->release_ctlr(ctrl); + shpchp_release_ctlr(ctrl); kfree(ctrl); } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 6a6705e0cf1785..e6c6f23bae2741 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl) ctrl_dbg(ctrl, "Attention button interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); + shpchp_get_adapter_status(p_slot, &p_slot->presence_save); /* * Button pressed - See if need to TAKE ACTION!!! @@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl) ctrl_dbg(ctrl, "Switch interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + shpchp_get_adapter_status(p_slot, &p_slot->presence_save); + shpchp_get_latch_status(p_slot, &getstatus); ctrl_dbg(ctrl, "Card present %x Power status %x\n", p_slot->presence_save, p_slot->pwr_save); @@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl) /* * Save the presence state */ - p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); + shpchp_get_adapter_status(p_slot, &p_slot->presence_save); if (p_slot->presence_save) { /* * Card Present @@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl) p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - if (!(p_slot->hpc_ops->query_power_fault(p_slot))) { + if (!(shpchp_query_power_fault(p_slot))) { /* * Power fault Cleared */ @@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, int rc = 0; ctrl_dbg(ctrl, "Change speed to %d\n", speed); - rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed); + rc = shpchp_set_bus_speed_mode(p_slot, speed); if (rc) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", __func__); @@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot) __func__, p_slot->device, ctrl->slot_device_offset, hp_slot); /* Power on slot without connecting to bus */ - rc = p_slot->hpc_ops->power_on_slot(p_slot); + rc = shpchp_power_on_slot(p_slot); if (rc) { ctrl_err(ctrl, "Failed to power on slot\n"); return -1; } if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { - rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz); + rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz); if (rc) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", __func__); @@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot) } /* turn on board, blink green LED, turn off Amber LED */ - rc = p_slot->hpc_ops->slot_enable(p_slot); + rc = shpchp_slot_enable(p_slot); if (rc) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } } - rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); + rc = shpchp_get_adapter_speed(p_slot, &asp); if (rc) { ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n"); return WRONG_BUS_FREQUENCY; @@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot) return rc; /* turn on board, blink green LED, turn off Amber LED */ - rc = p_slot->hpc_ops->slot_enable(p_slot); + rc = shpchp_slot_enable(p_slot); if (rc) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; @@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot) p_slot->is_a_board = 0x01; p_slot->pwr_save = 1; - p_slot->hpc_ops->green_led_on(p_slot); + shpchp_green_led_on(p_slot); return 0; err_exit: /* turn off slot, turn on Amber LED, turn off Green LED */ - rc = p_slot->hpc_ops->slot_disable(p_slot); + rc = shpchp_slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); @@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot) p_slot->status = 0x01; /* turn off slot, turn on Amber LED, turn off Green LED */ - rc = p_slot->hpc_ops->slot_disable(p_slot); + rc = shpchp_slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); return rc; } - rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); + rc = shpchp_set_attention_status(p_slot, 0); if (rc) { ctrl_err(ctrl, "Issue of Set Attention command failed\n"); return rc; @@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work) case POWERON_STATE: mutex_unlock(&p_slot->lock); if (shpchp_enable_slot(p_slot)) - p_slot->hpc_ops->green_led_off(p_slot); + shpchp_green_led_off(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; @@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) static void update_slot_info(struct slot *slot) { - slot->hpc_ops->get_power_status(slot, &slot->pwr_save); - slot->hpc_ops->get_attention_status(slot, &slot->attention_save); - slot->hpc_ops->get_latch_status(slot, &slot->latch_save); - slot->hpc_ops->get_adapter_status(slot, &slot->presence_save); + shpchp_get_power_status(slot, &slot->pwr_save); + shpchp_get_attention_status(slot, &slot->attention_save); + shpchp_get_latch_status(slot, &slot->latch_save); + shpchp_get_adapter_status(slot, &slot->presence_save); } /* @@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot) switch (p_slot->state) { case STATIC_STATE: - p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + shpchp_get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n", @@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot) slot_name(p_slot)); } /* blink green LED and turn off amber */ - p_slot->hpc_ops->green_led_blink(p_slot); - p_slot->hpc_ops->set_attention_status(p_slot, 0); + shpchp_green_led_blink(p_slot); + shpchp_set_attention_status(p_slot, 0); queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); break; @@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot) slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) - p_slot->hpc_ops->green_led_on(p_slot); + shpchp_green_led_on(p_slot); else - p_slot->hpc_ops->green_led_off(p_slot); - p_slot->hpc_ops->set_attention_status(p_slot, 0); + shpchp_green_led_off(p_slot); + shpchp_set_attention_status(p_slot, 0); ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; @@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work) break; case INT_POWER_FAULT: ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__); - p_slot->hpc_ops->set_attention_status(p_slot, 1); - p_slot->hpc_ops->green_led_off(p_slot); + shpchp_set_attention_status(p_slot, 1); + shpchp_green_led_off(p_slot); break; default: update_slot_info(p_slot); @@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot) /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); - rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + rc = shpchp_get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } - rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + rc = shpchp_get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } - rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + rc = shpchp_get_power_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Already enabled on slot(%s)\n", slot_name(p_slot)); @@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot) p_slot->is_a_board = 1; /* We have to save the presence info for these slots */ - p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); - p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); + shpchp_get_adapter_status(p_slot, &p_slot->presence_save); + shpchp_get_power_status(p_slot, &p_slot->pwr_save); ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + shpchp_get_latch_status(p_slot, &getstatus); if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD && p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458) @@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot) retval = board_added(p_slot); if (retval) { - p_slot->hpc_ops->get_adapter_status(p_slot, - &(p_slot->presence_save)); - p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + shpchp_get_adapter_status(p_slot, &p_slot->presence_save); + shpchp_get_latch_status(p_slot, &getstatus); } update_slot_info(p_slot); @@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot) /* Check to see if (latch closed, card present, power on) */ mutex_lock(&p_slot->ctrl->crit_sect); - rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); + rc = shpchp_get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } - rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); + rc = shpchp_get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } - rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); + rc = shpchp_get_power_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "Already disabled on slot(%s)\n", slot_name(p_slot)); diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 48e4daefc44af4..012b9e3fe5b0e2 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -167,7 +167,6 @@ static irqreturn_t shpc_isr(int irq, void *dev_id); static void start_int_poll_timer(struct controller *ctrl, int sec); -static int hpc_check_cmd_status(struct controller *ctrl); static inline u8 shpc_readb(struct controller *ctrl, int reg) { @@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) if (retval) goto out; - cmd_status = hpc_check_cmd_status(slot->ctrl); + cmd_status = shpchp_check_cmd_status(slot->ctrl); if (cmd_status) { ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n", cmd, cmd_status); @@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) return retval; } -static int hpc_check_cmd_status(struct controller *ctrl) +int shpchp_check_cmd_status(struct controller *ctrl) { int retval = 0; u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F; @@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl) } -static int hpc_get_attention_status(struct slot *slot, u8 *status) +int shpchp_get_attention_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); @@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) return 0; } -static int hpc_get_power_status(struct slot *slot, u8 *status) +int shpchp_get_power_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); @@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) } -static int hpc_get_latch_status(struct slot *slot, u8 *status) +int shpchp_get_latch_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); @@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) return 0; } -static int hpc_get_adapter_status(struct slot *slot, u8 *status) +int shpchp_get_adapter_status(struct slot *slot, u8 *status) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); @@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) return 0; } -static int hpc_get_prog_int(struct slot *slot, u8 *prog_int) +int shpchp_get_prog_int(struct slot *slot, u8 *prog_int) { struct controller *ctrl = slot->ctrl; @@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int) return 0; } -static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) +int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) { int retval = 0; struct controller *ctrl = slot->ctrl; @@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) u8 m66_cap = !!(slot_reg & MHZ66_CAP); u8 pi, pcix_cap; - retval = hpc_get_prog_int(slot, &pi); + retval = shpchp_get_prog_int(slot, &pi); if (retval) return retval; @@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) return retval; } -static int hpc_query_power_fault(struct slot *slot) +int shpchp_query_power_fault(struct slot *slot) { struct controller *ctrl = slot->ctrl; u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot)); @@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot) return !(slot_reg & POWER_FAULT); } -static int hpc_set_attention_status(struct slot *slot, u8 value) +int shpchp_set_attention_status(struct slot *slot, u8 value) { u8 slot_cmd = 0; @@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) } -static void hpc_set_green_led_on(struct slot *slot) +void shpchp_green_led_on(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON); } -static void hpc_set_green_led_off(struct slot *slot) +void shpchp_green_led_off(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF); } -static void hpc_set_green_led_blink(struct slot *slot) +void shpchp_green_led_blink(struct slot *slot) { shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK); } -static void hpc_release_ctlr(struct controller *ctrl) +void shpchp_release_ctlr(struct controller *ctrl) { int i; u32 slot_reg, serr_int; @@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl) release_mem_region(ctrl->mmio_base, ctrl->mmio_size); } -static int hpc_power_on_slot(struct slot *slot) +int shpchp_power_on_slot(struct slot *slot) { int retval; @@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot) return retval; } -static int hpc_slot_enable(struct slot *slot) +int shpchp_slot_enable(struct slot *slot) { int retval; @@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot) return retval; } -static int hpc_slot_disable(struct slot *slot) +int shpchp_slot_disable(struct slot *slot) { int retval; @@ -681,7 +680,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl) } -static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value) +int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value) { int retval; struct controller *ctrl = slot->ctrl; @@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl) return retval; } -static const struct hpc_ops shpchp_hpc_ops = { - .power_on_slot = hpc_power_on_slot, - .slot_enable = hpc_slot_enable, - .slot_disable = hpc_slot_disable, - .set_bus_speed_mode = hpc_set_bus_speed_mode, - .set_attention_status = hpc_set_attention_status, - .get_power_status = hpc_get_power_status, - .get_attention_status = hpc_get_attention_status, - .get_latch_status = hpc_get_latch_status, - .get_adapter_status = hpc_get_adapter_status, - - .get_adapter_speed = hpc_get_adapter_speed, - .get_prog_int = hpc_get_prog_int, - - .query_power_fault = hpc_query_power_fault, - .green_led_on = hpc_set_green_led_on, - .green_led_off = hpc_set_green_led_off, - .green_led_blink = hpc_set_green_led_blink, - - .release_ctlr = hpc_release_ctlr, -}; - int shpc_init(struct controller *ctrl, struct pci_dev *pdev) { int rc = -1, num_slots = 0; @@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) /* Setup wait queue */ init_waitqueue_head(&ctrl->queue); - ctrl->hpc_ops = &shpchp_hpc_ops; - /* Return PCI Controller Info */ slot_config = shpc_readl(ctrl, SLOT_CONFIG); ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8; diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c index a715a4803c9572..9fb7cacc15cdef 100644 --- a/drivers/pci/iomap.c +++ b/drivers/pci/iomap.c @@ -156,7 +156,7 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc); * the different IOMAP ranges. * * But if the architecture does not use the generic iomap code, and if - * it has _not_ defined it's own private pci_iounmap function, we define + * it has _not_ defined its own private pci_iounmap function, we define * it here. * * NOTE! This default implementation assumes that if the architecture diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c new file mode 100644 index 00000000000000..97507e0df769b7 --- /dev/null +++ b/drivers/pci/npem.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe Enclosure management driver created for LED interfaces based on + * indications. It says *what indications* blink but does not specify *how* + * they blink - it is hardware defined. + * + * The driver name refers to Native PCIe Enclosure Management. It is + * first indication oriented standard with specification. + * + * Native PCIe Enclosure Management (NPEM) + * PCIe Base Specification r6.1 sec 6.28, 7.9.19 + * + * _DSM Definitions for PCIe SSD Status LED + * PCI Firmware Specification, r3.3 sec 4.7 + * + * Two backends are supported to manipulate indications: Direct NPEM register + * access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops). + * _DSM is used if supported, else NPEM. + * + * Copyright (c) 2021-2022 Dell Inc. + * Copyright (c) 2023-2024 Intel Corporation + * Mariusz Tkaczyk + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci.h" + +struct indication { + u32 bit; + const char *name; +}; + +static const struct indication npem_indications[] = { + {PCI_NPEM_IND_OK, "enclosure:ok"}, + {PCI_NPEM_IND_LOCATE, "enclosure:locate"}, + {PCI_NPEM_IND_FAIL, "enclosure:fail"}, + {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"}, + {PCI_NPEM_IND_PFA, "enclosure:pfa"}, + {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"}, + {PCI_NPEM_IND_ICA, "enclosure:ica"}, + {PCI_NPEM_IND_IFA, "enclosure:ifa"}, + {PCI_NPEM_IND_IDT, "enclosure:idt"}, + {PCI_NPEM_IND_DISABLED, "enclosure:disabled"}, + {PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"}, + {PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"}, + {PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"}, + {PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"}, + {PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"}, + {PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"}, + {PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"}, + {PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"}, + {0, NULL} +}; + +/* _DSM PCIe SSD LED States correspond to NPEM register values */ +static const struct indication dsm_indications[] = { + {PCI_NPEM_IND_OK, "enclosure:ok"}, + {PCI_NPEM_IND_LOCATE, "enclosure:locate"}, + {PCI_NPEM_IND_FAIL, "enclosure:fail"}, + {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"}, + {PCI_NPEM_IND_PFA, "enclosure:pfa"}, + {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"}, + {PCI_NPEM_IND_ICA, "enclosure:ica"}, + {PCI_NPEM_IND_IFA, "enclosure:ifa"}, + {PCI_NPEM_IND_IDT, "enclosure:idt"}, + {PCI_NPEM_IND_DISABLED, "enclosure:disabled"}, + {0, NULL} +}; + +#define for_each_indication(ind, inds) \ + for (ind = inds; ind->bit; ind++) + +/* + * The driver has internal list of supported indications. Ideally, the driver + * should not touch bits that are not defined and for which LED devices are + * not exposed but in reality, it needs to turn them off. + * + * Otherwise, there will be no possibility to turn off indications turned on by + * other utilities or turned on by default and it leads to bad user experience. + * + * Additionally, it excludes NPEM commands like RESET or ENABLE. + */ +static u32 reg_to_indications(u32 caps, const struct indication *inds) +{ + const struct indication *ind; + u32 supported_indications = 0; + + for_each_indication(ind, inds) + supported_indications |= ind->bit; + + return caps & supported_indications; +} + +/** + * struct npem_led - LED details + * @indication: indication details + * @npem: NPEM device + * @name: LED name + * @led: LED device + */ +struct npem_led { + const struct indication *indication; + struct npem *npem; + char name[LED_MAX_NAME_SIZE]; + struct led_classdev led; +}; + +/** + * struct npem_ops - backend specific callbacks + * @get_active_indications: get active indications + * npem: NPEM device + * inds: response buffer + * @set_active_indications: set new indications + * npem: npem device + * inds: bit mask to set + * @inds: supported indications array, set of indications is backend specific + * @name: backend name + */ +struct npem_ops { + int (*get_active_indications)(struct npem *npem, u32 *inds); + int (*set_active_indications)(struct npem *npem, u32 inds); + const struct indication *inds; + const char *name; +}; + +/** + * struct npem - NPEM device properties + * @dev: PCI device this driver is attached to + * @ops: backend specific callbacks + * @lock: serializes concurrent access to NPEM device by multiple LED devices + * @pos: cached offset of NPEM Capability Register in Configuration Space; + * only used if NPEM registers are accessed directly and not through _DSM + * @supported_indications: cached bit mask of supported indications; + * non-indication and reserved bits in the NPEM Capability Register are + * cleared in this bit mask + * @active_indications: cached bit mask of active indications; + * non-indication and reserved bits in the NPEM Control Register are + * cleared in this bit mask + * @active_inds_initialized: whether @active_indications has been initialized; + * On Dell platforms, it is required that IPMI drivers are loaded before + * the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to + * get/set the active LEDs. By initializing @active_indications lazily + * (on first access to an LED), IPMI drivers are given a chance to load. + * If they are not loaded in time, users will see various errors on LED + * access in dmesg. Once they are loaded, the errors go away and LED + * access becomes possible. + * @led_cnt: size of @leds array + * @leds: array containing LED class devices of all supported LEDs + */ +struct npem { + struct pci_dev *dev; + const struct npem_ops *ops; + struct mutex lock; + u16 pos; + u32 supported_indications; + u32 active_indications; + unsigned int active_inds_initialized:1; + int led_cnt; + struct npem_led leds[]; +}; + +static int npem_read_reg(struct npem *npem, u16 reg, u32 *val) +{ + int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val); + + return pcibios_err_to_errno(ret); +} + +static int npem_write_ctrl(struct npem *npem, u32 reg) +{ + int pos = npem->pos + PCI_NPEM_CTRL; + int ret = pci_write_config_dword(npem->dev, pos, reg); + + return pcibios_err_to_errno(ret); +} + +static int npem_get_active_indications(struct npem *npem, u32 *inds) +{ + u32 ctrl; + int ret; + + ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl); + if (ret) + return ret; + + /* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */ + if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) { + *inds = 0; + return 0; + } + + *inds = ctrl & npem->supported_indications; + + return 0; +} + +static int npem_set_active_indications(struct npem *npem, u32 inds) +{ + int ctrl, ret, ret_val; + u32 cc_status; + + lockdep_assert_held(&npem->lock); + + /* This bit is always required */ + ctrl = inds | PCI_NPEM_CTRL_ENABLE; + + ret = npem_write_ctrl(npem, ctrl); + if (ret) + return ret; + + /* + * For the case where a NPEM command has not completed immediately, + * it is recommended that software not continuously "spin" on polling + * the status register, but rather poll under interrupt at a reduced + * rate; for example at 10 ms intervals. + * + * PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM + * Command Completed" + */ + ret = read_poll_timeout(npem_read_reg, ret_val, + ret_val || (cc_status & PCI_NPEM_STATUS_CC), + 10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem, + PCI_NPEM_STATUS, &cc_status); + if (ret) + return ret; + if (ret_val) + return ret_val; + + /* + * All writes to control register, including writes that do not change + * the register value, are NPEM commands and should eventually result + * in a command completion indication in the NPEM Status Register. + * + * PCIe Base Specification r6.1 sec 7.9.19.3 + * + * Register may not be updated, or other conflicting bits may be + * cleared. Spec is not strict here. Read NPEM Control register after + * write to keep cache in-sync. + */ + return npem_get_active_indications(npem, &npem->active_indications); +} + +static const struct npem_ops npem_ops = { + .get_active_indications = npem_get_active_indications, + .set_active_indications = npem_set_active_indications, + .name = "Native PCIe Enclosure Management", + .inds = npem_indications, +}; + +#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\ + 0xd5, 0x1e, 0x19, 0x4d) +#define GET_SUPPORTED_STATES_DSM 1 +#define GET_STATE_DSM 2 +#define SET_STATE_DSM 3 + +static const guid_t dsm_guid = DSM_GUID; + +static bool npem_has_dsm(struct pci_dev *pdev) +{ + acpi_handle handle; + + handle = ACPI_HANDLE(&pdev->dev); + if (!handle) + return false; + + return acpi_check_dsm(handle, &dsm_guid, 0x1, + BIT(GET_SUPPORTED_STATES_DSM) | + BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM)); +} + +struct dsm_output { + u16 status; + u8 function_specific_err; + u8 vendor_specific_err; + u32 state; +}; + +/** + * dsm_evaluate() - send DSM PCIe SSD Status LED command + * @pdev: PCI device + * @dsm_func: DSM LED Function + * @output: buffer to copy DSM Response + * @value_to_set: value for SET_STATE_DSM function + * + * To not bother caller with ACPI context, the returned _DSM Output Buffer is + * copied. + */ +static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func, + struct dsm_output *output, u32 value_to_set) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *out_obj, arg3[2]; + union acpi_object *arg3_p = NULL; + + if (dsm_func == SET_STATE_DSM) { + arg3[0].type = ACPI_TYPE_PACKAGE; + arg3[0].package.count = 1; + arg3[0].package.elements = &arg3[1]; + + arg3[1].type = ACPI_TYPE_BUFFER; + arg3[1].buffer.length = 4; + arg3[1].buffer.pointer = (u8 *)&value_to_set; + + arg3_p = arg3; + } + + out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func, + arg3_p, ACPI_TYPE_BUFFER); + if (!out_obj) + return -EIO; + + if (out_obj->buffer.length < sizeof(struct dsm_output)) { + ACPI_FREE(out_obj); + return -EIO; + } + + memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output)); + + ACPI_FREE(out_obj); + return 0; +} + +static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf) +{ + struct dsm_output output; + int ret = dsm_evaluate(pdev, dsm_func, &output, 0); + + if (ret) + return ret; + + if (output.status != 0) + return -EIO; + + *buf = output.state; + return 0; +} + +static int dsm_get_active_indications(struct npem *npem, u32 *buf) +{ + int ret = dsm_get(npem->dev, GET_STATE_DSM, buf); + + /* Filter out not supported indications in response */ + *buf &= npem->supported_indications; + return ret; +} + +static int dsm_set_active_indications(struct npem *npem, u32 value) +{ + struct dsm_output output; + int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value); + + if (ret) + return ret; + + switch (output.status) { + case 4: + /* + * Not all bits are set. If this bit is set, the platform + * disregarded some or all of the request state changes. OSPM + * should check the resulting PCIe SSD Status LED States to see + * what, if anything, has changed. + * + * PCI Firmware Specification, r3.3 Table 4-19. + */ + if (output.function_specific_err != 1) + return -EIO; + fallthrough; + case 0: + break; + default: + return -EIO; + } + + npem->active_indications = output.state; + + return 0; +} + +static const struct npem_ops dsm_ops = { + .get_active_indications = dsm_get_active_indications, + .set_active_indications = dsm_set_active_indications, + .name = "_DSM PCIe SSD Status LED Management", + .inds = dsm_indications, +}; + +static int npem_initialize_active_indications(struct npem *npem) +{ + int ret; + + lockdep_assert_held(&npem->lock); + + if (npem->active_inds_initialized) + return 0; + + ret = npem->ops->get_active_indications(npem, + &npem->active_indications); + if (ret) + return ret; + + npem->active_inds_initialized = true; + return 0; +} + +/* + * The status of each indicator is cached on first brightness_ get/set time + * and updated at write time. brightness_get() is only responsible for + * reflecting the last written/cached value. + */ +static enum led_brightness brightness_get(struct led_classdev *led) +{ + struct npem_led *nled = container_of(led, struct npem_led, led); + struct npem *npem = nled->npem; + int ret, val = 0; + + ret = mutex_lock_interruptible(&npem->lock); + if (ret) + return ret; + + ret = npem_initialize_active_indications(npem); + if (ret) + goto out; + + if (npem->active_indications & nled->indication->bit) + val = 1; + +out: + mutex_unlock(&npem->lock); + return val; +} + +static int brightness_set(struct led_classdev *led, + enum led_brightness brightness) +{ + struct npem_led *nled = container_of(led, struct npem_led, led); + struct npem *npem = nled->npem; + u32 indications; + int ret; + + ret = mutex_lock_interruptible(&npem->lock); + if (ret) + return ret; + + ret = npem_initialize_active_indications(npem); + if (ret) + goto out; + + if (brightness == 0) + indications = npem->active_indications & ~(nled->indication->bit); + else + indications = npem->active_indications | nled->indication->bit; + + ret = npem->ops->set_active_indications(npem, indications); + +out: + mutex_unlock(&npem->lock); + return ret; +} + +static void npem_free(struct npem *npem) +{ + struct npem_led *nled; + int cnt; + + if (!npem) + return; + + for (cnt = 0; cnt < npem->led_cnt; cnt++) { + nled = &npem->leds[cnt]; + + if (nled->name[0]) + led_classdev_unregister(&nled->led); + } + + mutex_destroy(&npem->lock); + kfree(npem); +} + +static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled) +{ + struct led_classdev *led = &nled->led; + struct led_init_data init_data = {}; + char *name = nled->name; + int ret; + + init_data.devicename = pci_name(npem->dev); + init_data.default_label = nled->indication->name; + + ret = led_compose_name(&npem->dev->dev, &init_data, name); + if (ret) + return ret; + + led->name = name; + led->brightness_set_blocking = brightness_set; + led->brightness_get = brightness_get; + led->max_brightness = 1; + led->default_trigger = "none"; + led->flags = 0; + + ret = led_classdev_register(&npem->dev->dev, led); + if (ret) + /* Clear the name to indicate that it is not registered. */ + name[0] = 0; + return ret; +} + +static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops, + int pos, u32 caps) +{ + u32 supported = reg_to_indications(caps, ops->inds); + int supported_cnt = hweight32(supported); + const struct indication *indication; + struct npem_led *nled; + struct npem *npem; + int led_idx = 0; + int ret; + + npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL); + if (!npem) + return -ENOMEM; + + npem->supported_indications = supported; + npem->led_cnt = supported_cnt; + npem->pos = pos; + npem->dev = dev; + npem->ops = ops; + + mutex_init(&npem->lock); + + for_each_indication(indication, npem_indications) { + if (!(npem->supported_indications & indication->bit)) + continue; + + nled = &npem->leds[led_idx++]; + nled->indication = indication; + nled->npem = npem; + + ret = pci_npem_set_led_classdev(npem, nled); + if (ret) { + npem_free(npem); + return ret; + } + } + + dev->npem = npem; + return 0; +} + +void pci_npem_remove(struct pci_dev *dev) +{ + npem_free(dev->npem); +} + +void pci_npem_create(struct pci_dev *dev) +{ + const struct npem_ops *ops = &npem_ops; + int pos = 0, ret; + u32 cap; + + if (npem_has_dsm(dev)) { + /* + * OS should use the DSM for LED control if it is available + * PCI Firmware Spec r3.3 sec 4.7. + */ + ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap); + if (ret) + return; + + ops = &dsm_ops; + } else { + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM); + if (pos == 0) + return; + + if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 || + (cap & PCI_NPEM_CAP_CAPABLE) == 0) + return; + } + + pci_info(dev, "Configuring %s\n", ops->name); + + ret = pci_npem_init(dev, ops, pos, cap); + if (ret) + pci_err(dev, "Failed to register %s, err: %d\n", ops->name, + ret); +} diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 9cc447da9475d8..af370628e58393 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -1541,3 +1542,184 @@ static int __init acpi_pci_init(void) return 0; } arch_initcall(acpi_pci_init); + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) + +/* + * Try to assign the IRQ number when probing a new device + */ +int pcibios_alloc_irq(struct pci_dev *dev) +{ + if (!acpi_disabled) + acpi_pci_irq_enable(dev); + + return 0; +} + +struct acpi_pci_generic_root_info { + struct acpi_pci_root_info common; + struct pci_config_window *cfg; /* config space mapping */ +}; + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_config_window *cfg; + struct acpi_device *adev; + struct device *bus_dev; + + if (acpi_disabled) + return 0; + + cfg = bridge->bus->sysdata; + + /* + * On Hyper-V there is no corresponding ACPI device for a root bridge, + * therefore ->parent is set as NULL by the driver. And set 'adev' as + * NULL in this case because there is no proper ACPI device. + */ + if (!cfg->parent) + adev = NULL; + else + adev = to_acpi_device(cfg->parent); + + bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); + + return 0; +} + +static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) +{ + struct resource_entry *entry, *tmp; + int status; + + status = acpi_pci_probe_root_resources(ci); + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (!(entry->res->flags & IORESOURCE_WINDOW)) + resource_list_destroy_entry(entry); + } + return status; +} + +/* + * Lookup the bus range for the domain in MCFG, and set up config space + * mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct resource *bus_res = &root->secondary; + u16 seg = root->segment; + const struct pci_ecam_ops *ecam_ops; + struct resource cfgres; + struct acpi_device *adev; + struct pci_config_window *cfg; + int ret; + + ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); + if (ret) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + adev = acpi_resource_consumer(&cfgres); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, + dev_name(&adev->dev)); + else + dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", + &cfgres); + + cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +/* release_info: free resources allocated by init_info */ +static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) +{ + struct acpi_pci_generic_root_info *ri; + + ri = container_of(ci, struct acpi_pci_generic_root_info, common); + pci_ecam_free(ri->cfg); + kfree(ci->ops); + kfree(ri); +} + +/* Interface called from ACPI code to setup PCI host controller */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct acpi_pci_generic_root_info *ri; + struct pci_bus *bus, *child; + struct acpi_pci_root_ops *root_ops; + struct pci_host_bridge *host; + + ri = kzalloc(sizeof(*ri), GFP_KERNEL); + if (!ri) + return NULL; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) { + kfree(ri); + return NULL; + } + + ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!ri->cfg) { + kfree(ri); + kfree(root_ops); + return NULL; + } + + root_ops->release_info = pci_acpi_generic_release_info; + root_ops->prepare_resources = pci_acpi_root_prepare_resources; + root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; + bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); + if (!bus) + return NULL; + + /* If we must preserve the resource configuration, claim now */ + host = pci_find_host_bridge(bus); + if (host->preserve_config) + pci_bus_claim_resources(bus); + + /* + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. + */ + pci_assign_unassigned_root_bus_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + return bus; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} + +#endif diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 9334b2dd47641b..6658c1edd464ea 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = */ .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE | - PCI_EXP_RTCTL_CRSSVE), - .ro = PCI_EXP_RTCAP_CRSVIS << 16, + PCI_EXP_RTCTL_RRS_SVE), + .ro = PCI_EXP_RTCAP_RRS_SV << 16, }, [PCI_EXP_RTSTA / 4] = { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index f412ef73a6e4b1..35270172c83318 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1670,7 +1670,7 @@ static void pci_dma_cleanup(struct device *dev) iommu_device_unuse_default_domain(dev); } -struct bus_type pci_bus_type = { +const struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, .uevent = pci_uevent, diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 40cfa716392fbd..5d0f4db1cab786 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -31,6 +31,10 @@ #include #include "pci.h" +#ifndef ARCH_PCI_DEV_GROUPS +#define ARCH_PCI_DEV_GROUPS +#endif + static int sysfs_initialized; /* = 0 */ /* show configuration fields */ @@ -1624,6 +1628,7 @@ const struct attribute_group *pci_dev_groups[] = { &pci_dev_acpi_attr_group, #endif &pci_dev_resource_resize_group, + ARCH_PCI_DEV_GROUPS NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ffaaca0978cbc9..7d85c04fbba2ae 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) { int delay = 1; bool retrain = false; - struct pci_dev *bridge; + struct pci_dev *root, *bridge; + + root = pcie_find_root_port(dev); if (pci_is_pcie(dev)) { bridge = pci_upstream_bridge(dev); @@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) } /* - * After reset, the device should not silently discard config - * requests, but it may still indicate that it needs more time by - * responding to them with CRS completions. The Root Port will - * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete - * the read (except when CRS SV is enabled and the read was for the - * Vendor ID; in that case it synthesizes 0x0001 data). + * The caller has already waited long enough after a reset that the + * device should respond to config requests, but it may respond + * with Request Retry Status (RRS) if it needs more time to + * initialize. * - * Wait for the device to return a non-CRS completion. Read the - * Command register instead of Vendor ID so we don't have to - * contend with the CRS SV value. + * If the device is below a Root Port with Configuration RRS + * Software Visibility enabled, reading the Vendor ID returns a + * special data value if the device responded with RRS. Read the + * Vendor ID until we get non-RRS status. + * + * If there's no Root Port or Configuration RRS Software Visibility + * is not enabled, the device may still respond with RRS, but + * hardware may retry the config request. If no retries receive + * Successful Completion, hardware generally synthesizes ~0 + * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor + * ID for VFs and non-existent devices also returns ~0, so read the + * Command register until it returns something other than ~0. */ for (;;) { u32 id; @@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) return -ENOTTY; } - pci_read_config_dword(dev, PCI_COMMAND, &id); - if (!PCI_POSSIBLE_ERROR(id)) - break; + if (root && root->config_rrs_sv) { + pci_read_config_dword(dev, PCI_VENDOR_ID, &id); + if (!pci_bus_rrs_vendor_id(id)) + break; + } else { + pci_read_config_dword(dev, PCI_COMMAND, &id); + if (!PCI_POSSIBLE_ERROR(id)) + break; + } if (delay > timeout) { pci_warn(dev, "not ready %dms after %s; giving up\n", @@ -1324,7 +1339,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) if (delay > PCI_RESET_WAIT) { if (retrain) { retrain = false; - if (pcie_failed_link_retrain(bridge)) { + if (pcie_failed_link_retrain(bridge) == 0) { delay = 1; continue; } @@ -4718,7 +4733,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); } - return pcie_wait_for_link_status(pdev, use_lt, !use_lt); + rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + + /* + * Clear LBMS after a manual retrain so that the bit can be used + * to track link speed or width changes made by hardware itself + * in attempt to correct unreliable link operation. + */ + pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + return rc; } /** @@ -5672,8 +5695,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "bus reset"); pci_bus_restore_locked(dev->subordinate); + } } } @@ -5707,8 +5732,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot) if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "slot reset"); pci_bus_restore_locked(dev->subordinate); + } } } @@ -6802,16 +6829,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent) return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); } -static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr) { - if (bus->domain_nr < 0) + if (domain_nr < 0) return; /* Release domain from IDA where it was allocated. */ - if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr) - ida_free(&pci_domain_nr_static_ida, bus->domain_nr); + if (of_get_pci_domain_nr(parent->of_node) == domain_nr) + ida_free(&pci_domain_nr_static_ida, domain_nr); else - ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr); + ida_free(&pci_domain_nr_dynamic_ida, domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) @@ -6820,11 +6847,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) acpi_pci_bus_find_domain_nr(bus); } -void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +void pci_bus_release_domain_nr(struct device *parent, int domain_nr) { if (!acpi_disabled) return; - of_pci_bus_release_domain_nr(bus, parent); + of_pci_bus_release_domain_nr(parent, domain_nr); } #endif diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 79c8398f39384c..14d00ce45bfa95 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -13,9 +13,24 @@ #define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000 -/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */ +/* + * Power stable to PERST# inactive. + * + * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express + * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol + * "T_PVPERL". + */ #define PCIE_T_PVPERL_MS 100 +/* + * REFCLK stable before PERST# inactive. + * + * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express + * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol + * "T_PERST-CLK". + */ +#define PCIE_T_PERST_CLK_US 100 + /* * End of conventional reset (PERST# de-asserted) to first configuration * request (device able to respond with a "Request Retry Status" completion), @@ -124,7 +139,6 @@ void pcie_clear_device_status(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); -int __pci_pme_wakeup(struct pci_dev *dev, void *ign); void pci_pme_restore(struct pci_dev *dev); bool pci_dev_need_resume(struct pci_dev *dev); void pci_dev_adjust_pme(struct pci_dev *dev); @@ -139,6 +153,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type); +static inline bool pci_bus_rrs_vendor_id(u32 l) +{ + return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG; +} + static inline void pci_wakeup_event(struct pci_dev *dev) { /* Wait 100 ms before the system can be put into a sleep state. */ @@ -169,7 +188,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev) } void pci_vpd_init(struct pci_dev *dev); -void pci_vpd_release(struct pci_dev *dev); extern const struct attribute_group pci_dev_vpd_attr_group; /* PCI Virtual Channel */ @@ -290,10 +308,10 @@ void pci_put_host_bridge_device(struct device *dev); int pci_configure_extended_tags(struct pci_dev *dev, void *ign); bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, - int crs_timeout); + int rrs_timeout); bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, - int crs_timeout); -int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); + int rrs_timeout); +int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout); int pci_setup_device(struct pci_dev *dev); int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, @@ -398,6 +416,14 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { } static inline void pci_doe_disconnected(struct pci_dev *pdev) { } #endif +#ifdef CONFIG_PCI_NPEM +void pci_npem_create(struct pci_dev *dev); +void pci_npem_remove(struct pci_dev *dev); +#else +static inline void pci_npem_create(struct pci_dev *dev) { } +static inline void pci_npem_remove(struct pci_dev *dev) { } +#endif + /** * pci_dev_set_io_state - Set the new error state if possible. * @@ -606,7 +632,7 @@ void pci_acs_init(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); -bool pcie_failed_link_retrain(struct pci_dev *dev); +int pcie_failed_link_retrain(struct pci_dev *dev); #else static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) @@ -621,9 +647,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { return -ENOTTY; } -static inline bool pcie_failed_link_retrain(struct pci_dev *dev) +static inline int pcie_failed_link_retrain(struct pci_dev *dev) { - return false; + return -ENOTTY; } #endif @@ -887,8 +913,6 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) #endif int pcim_intx(struct pci_dev *dev, int enable); - -int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name); void pcim_release_region(struct pci_dev *pdev, int bar); diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index f81b2303bf6a04..91acc7b17f6854 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj) else rperr->root_status |= PCI_ERR_ROOT_COR_RCV; rperr->source_id &= 0xffff0000; - rperr->source_id |= (einj->bus << 8) | devfn; + rperr->source_id |= PCI_DEVID(einj->bus, devfn); } if (einj->uncor_status) { if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV) @@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj) rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV; rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV; rperr->source_id &= 0x0000ffff; - rperr->source_id |= ((einj->bus << 8) | devfn) << 16; + rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16; } spin_unlock_irqrestore(&inject_lock, flags); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b14b9876c0303f..4f68414c308609 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1061,7 +1061,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) free: #ifdef CONFIG_PCI_DOMAINS_GENERIC - pci_bus_release_domain_nr(bus, parent); + pci_bus_release_domain_nr(parent, bus->domain_nr); #endif kfree(bus); return err; @@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, } EXPORT_SYMBOL(pci_add_new_bus); -static void pci_enable_crs(struct pci_dev *pdev) +static void pci_enable_rrs_sv(struct pci_dev *pdev) { u16 root_cap = 0; - /* Enable CRS Software Visibility if supported */ + /* Enable Configuration RRS Software Visibility if supported */ pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); - if (root_cap & PCI_EXP_RTCAP_CRSVIS) + if (root_cap & PCI_EXP_RTCAP_RRS_SV) { pcie_capability_set_word(pdev, PCI_EXP_RTCTL, - PCI_EXP_RTCTL_CRSSVE); + PCI_EXP_RTCTL_RRS_SVE); + pdev->config_rrs_sv = 1; + } } static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, @@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); - pci_enable_crs(dev); + pci_enable_rrs_sv(dev); if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { @@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) } EXPORT_SYMBOL(pci_alloc_dev); -static bool pci_bus_crs_vendor_id(u32 l) -{ - return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG; -} - -static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, +static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l, int timeout) { int delay = 1; - if (!pci_bus_crs_vendor_id(*l)) - return true; /* not a CRS completion */ + if (!pci_bus_rrs_vendor_id(*l)) + return true; /* not a Configuration RRS completion */ if (!timeout) - return false; /* CRS, but caller doesn't want to wait */ + return false; /* RRS, but caller doesn't want to wait */ /* * We got the reserved Vendor ID that indicates a completion with - * Configuration Request Retry Status (CRS). Retry until we get a + * Configuration Request Retry Status (RRS). Retry until we get a * valid Vendor ID or we time out. */ - while (pci_bus_crs_vendor_id(*l)) { + while (pci_bus_rrs_vendor_id(*l)) { if (delay > timeout) { pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", pci_domain_nr(bus), bus->number, @@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, *l == 0x0000ffff || *l == 0xffff0000) return false; - if (pci_bus_crs_vendor_id(*l)) - return pci_bus_wait_crs(bus, devfn, l, timeout); + if (pci_bus_rrs_vendor_id(*l)) + return pci_bus_wait_rrs(bus, devfn, l, timeout); return true; } @@ -2593,6 +2590,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->match_driver = false; ret = device_add(&dev->dev); WARN_ON(ret < 0); + + pci_npem_create(dev); } struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c index f07758c9edaddc..a23a4312574b9f 100644 --- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c +++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c @@ -66,6 +66,11 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { .compatible = "pci17cb,1101", .data = "wlan", }, + { + /* ATH11K in WCN6855 package. */ + .compatible = "pci17cb,1103", + .data = "wlan", + }, { /* ATH12K in WCN7850 package. */ .compatible = "pci17cb,1107", diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a2ce4e08edf5a3..dccb60c1d9cc3d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -66,7 +66,7 @@ * apply this erratum workaround to any downstream ports as long as they * support Link Active reporting and have the Link Control 2 register. * Restrict the speed to 2.5GT/s then with the Target Link Speed field, - * request a retrain and wait 200ms for the data link to go up. + * request a retrain and check the result. * * If this turns out successful and we know by the Vendor:Device ID it is * safe to do so, then lift the restriction, letting the devices negotiate @@ -74,33 +74,45 @@ * firmware may have already arranged and lift it with ports that already * report their data link being up. * - * Return TRUE if the link has been successfully retrained, otherwise FALSE. + * Otherwise revert the speed to the original setting and request a retrain + * again to remove any residual state, ignoring the result as it's supposed + * to fail anyway. + * + * Return 0 if the link has been successfully retrained. Return an error + * if retraining was not needed or we attempted a retrain and it failed. */ -bool pcie_failed_link_retrain(struct pci_dev *dev) +int pcie_failed_link_retrain(struct pci_dev *dev) { static const struct pci_device_id ids[] = { { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ {} }; u16 lnksta, lnkctl2; + int ret = -ENOTTY; if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) - return false; + return ret; pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == PCI_EXP_LNKSTA_LBMS) { + u16 oldlnkctl2 = lnkctl2; + pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, + oldlnkctl2); + pcie_retrain_link(dev, true); + return ret; } pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); @@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev) lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); - if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + return ret; } } - return true; + return ret; } static ktime_t fixup_debug_start(struct pci_dev *dev, @@ -3608,6 +3621,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */ quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2, + quirk_broken_intx_masking); /* * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) @@ -4246,6 +4261,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); +/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias); + static void quirk_dma_func1_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 1) @@ -5070,6 +5089,8 @@ static const struct pci_dev_acs_enabled { /* QCOM QDF2xxx root ports */ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, + /* QCOM SA8775P root port */ + { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs }, /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */ { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs }, /* Intel PCH root ports */ diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 4770cb87e3f0a1..e4ce1145aa3ee0 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -50,6 +50,8 @@ static void pci_destroy_dev(struct pci_dev *dev) if (!dev->dev.kobj.parent) return; + pci_npem_remove(dev); + device_del(&dev->dev); down_write(&pci_bus_sem); @@ -179,7 +181,7 @@ void pci_remove_root_bus(struct pci_bus *bus) #ifdef CONFIG_PCI_DOMAINS_GENERIC /* Release domain_nr if it was dynamically allocated */ if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET) - pci_bus_release_domain_nr(bus, host_bridge->dev.parent); + pci_bus_release_domain_nr(host_bridge->dev.parent, bus->domain_nr); #endif pci_remove_bus(bus); diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 485a642b9304da..e4300f5f304f3c 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include "pci.h" #define PCI_VPD_LRDT_TAG_SIZE 3 diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 948b763dc451ec..d018f36f3a8935 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c index de7046e6b9c49d..b93eb6f43b98fa 100644 --- a/drivers/peci/controller/peci-aspeed.c +++ b/drivers/peci/controller/peci-aspeed.c @@ -2,7 +2,7 @@ // Copyright (c) 2012-2017 ASPEED Technology Inc. // Copyright (c) 2018-2021 Intel Corporation -#include +#include #include #include diff --git a/drivers/peci/request.c b/drivers/peci/request.c index 8d6dd7b6b55953..87eefe66743f2d 100644 --- a/drivers/peci/request.c +++ b/drivers/peci/request.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include "internal.h" diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index aa9530b4064f70..bab8ba64162ffd 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -48,6 +48,13 @@ config ARM_CMN Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh Network interconnect. +config ARM_NI + tristate "Arm NI-700 PMU support" + depends on ARM64 || COMPILE_TEST + help + Support for PMU events monitoring on the Arm NI-700 Network-on-Chip + interconnect and family. + config ARM_PMU depends on ARM || ARM64 bool "ARM PMU framework" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index d43df81d52f78e..8268f38e42c5ae 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o obj-$(CONFIG_ARM_CCN) += arm-ccn.o obj-$(CONFIG_ARM_CMN) += arm-cmn.o obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o +obj-$(CONFIG_ARM_NI) += arm-ni.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index 38a2947ae8130c..c6ff1bc7d336b8 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -400,7 +400,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data) } /* clear common counter intr status */ - clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1); + clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status); writel(clr_status, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR); } diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index f322e5ca1114b9..1d4d01e1275e05 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -47,46 +47,79 @@ * implementations, we'll have to introduce per cpu-type tables. */ enum m1_pmu_events { - M1_PMU_PERFCTR_UNKNOWN_01 = 0x01, - M1_PMU_PERFCTR_CPU_CYCLES = 0x02, - M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c, - M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d, - M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e, - M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f, - M1_PMU_PERFCTR_UNKNOWN_90 = 0x90, - M1_PMU_PERFCTR_UNKNOWN_93 = 0x93, - M1_PMU_PERFCTR_UNKNOWN_94 = 0x94, - M1_PMU_PERFCTR_UNKNOWN_95 = 0x95, - M1_PMU_PERFCTR_UNKNOWN_96 = 0x96, - M1_PMU_PERFCTR_UNKNOWN_97 = 0x97, - M1_PMU_PERFCTR_UNKNOWN_98 = 0x98, - M1_PMU_PERFCTR_UNKNOWN_99 = 0x99, - M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a, - M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b, - M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c, - M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f, - M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf, - M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0, - M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1, - M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4, - M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5, - M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6, - M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8, - M1_PMU_PERFCTR_UNKNOWN_ca = 0xca, - M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb, - M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5, - M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6, - M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7, - M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8, - M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd, - M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT, + M1_PMU_PERFCTR_RETIRE_UOP = 0x1, + M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE = 0x2, + M1_PMU_PERFCTR_L1I_TLB_FILL = 0x4, + M1_PMU_PERFCTR_L1D_TLB_FILL = 0x5, + M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION = 0x7, + M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA = 0x8, + M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION = 0xa, + M1_PMU_PERFCTR_L2_TLB_MISS_DATA = 0xb, + M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC = 0xd, + M1_PMU_PERFCTR_SCHEDULE_UOP = 0x52, + M1_PMU_PERFCTR_INTERRUPT_PENDING = 0x6c, + M1_PMU_PERFCTR_MAP_STALL_DISPATCH = 0x70, + M1_PMU_PERFCTR_MAP_REWIND = 0x75, + M1_PMU_PERFCTR_MAP_STALL = 0x76, + M1_PMU_PERFCTR_MAP_INT_UOP = 0x7c, + M1_PMU_PERFCTR_MAP_LDST_UOP = 0x7d, + M1_PMU_PERFCTR_MAP_SIMD_UOP = 0x7e, + M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC = 0x84, + M1_PMU_PERFCTR_INST_ALL = 0x8c, + M1_PMU_PERFCTR_INST_BRANCH = 0x8d, + M1_PMU_PERFCTR_INST_BRANCH_CALL = 0x8e, + M1_PMU_PERFCTR_INST_BRANCH_RET = 0x8f, + M1_PMU_PERFCTR_INST_BRANCH_TAKEN = 0x90, + M1_PMU_PERFCTR_INST_BRANCH_INDIR = 0x93, + M1_PMU_PERFCTR_INST_BRANCH_COND = 0x94, + M1_PMU_PERFCTR_INST_INT_LD = 0x95, + M1_PMU_PERFCTR_INST_INT_ST = 0x96, + M1_PMU_PERFCTR_INST_INT_ALU = 0x97, + M1_PMU_PERFCTR_INST_SIMD_LD = 0x98, + M1_PMU_PERFCTR_INST_SIMD_ST = 0x99, + M1_PMU_PERFCTR_INST_SIMD_ALU = 0x9a, + M1_PMU_PERFCTR_INST_LDST = 0x9b, + M1_PMU_PERFCTR_INST_BARRIER = 0x9c, + M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f, + M1_PMU_PERFCTR_L1D_TLB_ACCESS = 0xa0, + M1_PMU_PERFCTR_L1D_TLB_MISS = 0xa1, + M1_PMU_PERFCTR_L1D_CACHE_MISS_ST = 0xa2, + M1_PMU_PERFCTR_L1D_CACHE_MISS_LD = 0xa3, + M1_PMU_PERFCTR_LD_UNIT_UOP = 0xa6, + M1_PMU_PERFCTR_ST_UNIT_UOP = 0xa7, + M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK = 0xa8, + M1_PMU_PERFCTR_LDST_X64_UOP = 0xb1, + M1_PMU_PERFCTR_LDST_XPG_UOP = 0xb2, + M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC = 0xb3, + M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL = 0xb4, + M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC = 0xbf, + M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC = 0xc0, + M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC = 0xc1, + M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC = 0xc4, + M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC = 0xc5, + M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC = 0xc6, + M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC = 0xc8, + M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC = 0xca, + M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC = 0xcb, + M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND = 0xd4, + M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE = 0xd6, + M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND = 0xdb, + M1_PMU_PERFCTR_FETCH_RESTART = 0xde, + M1_PMU_PERFCTR_ST_NT_UOP = 0xe5, + M1_PMU_PERFCTR_LD_NT_UOP = 0xe6, + M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5, + M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6, + M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7, + M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8, + M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd, + M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT, /* * From this point onwards, these are not actual HW events, * but attributes that get stored in hw->config_base. */ - M1_PMU_CFG_COUNT_USER = BIT(8), - M1_PMU_CFG_COUNT_KERNEL = BIT(9), + M1_PMU_CFG_COUNT_USER = BIT(8), + M1_PMU_CFG_COUNT_KERNEL = BIT(9), }; /* @@ -96,46 +129,45 @@ enum m1_pmu_events { * counters had strange affinities. */ static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = { - [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1, - [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7), - [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0), - [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1), - [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7), - [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7), - [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7), - [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7, - [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6, - [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6, - [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6, - [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7, - [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6, + [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1, + [M1_PMU_PERFCTR_RETIRE_UOP] = BIT(7), + [M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE] = ANY_BUT_0_1 | BIT(0), + [M1_PMU_PERFCTR_INST_ALL] = BIT(7) | BIT(1), + [M1_PMU_PERFCTR_INST_BRANCH] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_BRANCH_CALL] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_BRANCH_RET] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_BRANCH_TAKEN] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_BRANCH_INDIR] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_BRANCH_COND] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_INT_LD] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_INT_ST] = BIT(7), + [M1_PMU_PERFCTR_INST_INT_ALU] = BIT(7), + [M1_PMU_PERFCTR_INST_SIMD_LD] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_SIMD_ST] = ONLY_5_6_7, + [M1_PMU_PERFCTR_INST_SIMD_ALU] = BIT(7), + [M1_PMU_PERFCTR_INST_LDST] = BIT(7), + [M1_PMU_PERFCTR_INST_BARRIER] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7), + [M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7, + [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6, }; static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, - [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS, - /* No idea about the rest yet */ + [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE, + [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL, }; /* sysfs definitions */ @@ -154,8 +186,8 @@ static ssize_t m1_pmu_events_sysfs_show(struct device *dev, PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config) static struct attribute *m1_pmu_event_attrs[] = { - M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES), - M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS), + M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE), + M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL), NULL, }; @@ -400,7 +432,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; idx++) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct perf_sample_data data; @@ -560,7 +592,7 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags) cpu_pmu->reset = m1_pmu_reset; cpu_pmu->set_event_filter = m1_pmu_set_event_filter; - cpu_pmu->num_events = M1_PMU_NR_COUNTERS; + bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS); cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group; return 0; diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index c932d9d355cf0b..397a46410f7cb7 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -24,14 +24,6 @@ #define CMN_NI_NODE_ID GENMASK_ULL(31, 16) #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) -#define CMN_NODEID_DEVID(reg) ((reg) & 3) -#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) -#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) -#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) -#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) -#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) -#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) - #define CMN_CHILD_INFO 0x0080 #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) @@ -43,6 +35,9 @@ #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) +/* Currently XPs are the node type we can have most of; others top out at 128 */ +#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS + /* The CFG node has various info besides the discovery tree */ #define CMN_CFGM_PERIPH_ID_01 0x0008 #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) @@ -50,24 +45,28 @@ #define CMN_CFGM_PERIPH_ID_23 0x0010 #define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4) -#define CMN_CFGM_INFO_GLOBAL 0x900 +#define CMN_CFGM_INFO_GLOBAL 0x0900 #define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63) #define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52) #define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50) +#define CMN_INFO_DEVICE_ISO_ENABLE BIT_ULL(44) -#define CMN_CFGM_INFO_GLOBAL_1 0x908 +#define CMN_CFGM_INFO_GLOBAL_1 0x0908 #define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2) #define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0) /* XPs also have some local topology info which has uses too */ #define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p)) -#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0) +#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(5, 0) #define CMN_MAX_PORTS 6 #define CI700_CONNECT_INFO_P2_5_OFFSET 0x10 /* PMU registers occupy the 3rd 4KB page of each node's region */ #define CMN_PMU_OFFSET 0x2000 +/* ...except when they don't :( */ +#define CMN_S3_DTM_OFFSET 0xa000 +#define CMN_S3_PMU_OFFSET 0xd900 /* For most nodes, this is all there is */ #define CMN_PMU_EVENT_SEL 0x000 @@ -78,7 +77,8 @@ /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32) -/* HN-Ps are weird... */ +/* Some types are designed to coexist with another device in the same node */ +#define CMN_CCLA_PMU_EVENT_SEL 0x008 #define CMN_HNP_PMU_EVENT_SEL 0x008 /* DTMs live in the PMU space of XP registers */ @@ -123,27 +123,28 @@ /* The DTC node is where the magic happens */ #define CMN_DT_DTC_CTL 0x0a00 #define CMN_DT_DTC_CTL_DT_EN BIT(0) +#define CMN_DT_DTC_CTL_CG_DISABLE BIT(10) /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */ #define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4) -#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n)) -#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40) +#define CMN_DT_PMEVCNT(dtc, n) ((dtc)->pmu_base + _CMN_DT_CNT_REG(n)) +#define CMN_DT_PMCCNTR(dtc) ((dtc)->pmu_base + 0x40) -#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n)) -#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90) +#define CMN_DT_PMEVCNTSR(dtc, n) ((dtc)->pmu_base + 0x50 + _CMN_DT_CNT_REG(n)) +#define CMN_DT_PMCCNTRSR(dtc) ((dtc)->pmu_base + 0x90) -#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100) +#define CMN_DT_PMCR(dtc) ((dtc)->pmu_base + 0x100) #define CMN_DT_PMCR_PMU_EN BIT(0) #define CMN_DT_PMCR_CNTR_RST BIT(5) #define CMN_DT_PMCR_OVFL_INTR_EN BIT(6) -#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118) -#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120) +#define CMN_DT_PMOVSR(dtc) ((dtc)->pmu_base + 0x118) +#define CMN_DT_PMOVSR_CLR(dtc) ((dtc)->pmu_base + 0x120) -#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128) +#define CMN_DT_PMSSR(dtc) ((dtc)->pmu_base + 0x128) #define CMN_DT_PMSSR_SS_STATUS(n) BIT(n) -#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130) +#define CMN_DT_PMSRR(dtc) ((dtc)->pmu_base + 0x130) #define CMN_DT_PMSRR_SS_REQ BIT(0) #define CMN_DT_NUM_COUNTERS 8 @@ -198,10 +199,11 @@ enum cmn_model { CMN650 = 2, CMN700 = 4, CI700 = 8, + CMNS3 = 16, /* ...and then we can use bitmap tricks for commonality */ CMN_ANY = -1, NOT_CMN600 = -2, - CMN_650ON = CMN650 | CMN700, + CMN_650ON = CMN650 | CMN700 | CMNS3, }; /* Actual part numbers and revision IDs defined by the hardware */ @@ -210,6 +212,7 @@ enum cmn_part { PART_CMN650 = 0x436, PART_CMN700 = 0x43c, PART_CI700 = 0x43a, + PART_CMN_S3 = 0x43e, }; /* CMN-600 r0px shouldn't exist in silicon, thankfully */ @@ -261,6 +264,7 @@ enum cmn_node_type { CMN_TYPE_HNS = 0x200, CMN_TYPE_HNS_MPAM_S, CMN_TYPE_HNS_MPAM_NS, + CMN_TYPE_APB = 0x1000, /* Not a real node type */ CMN_TYPE_WP = 0x7770 }; @@ -280,8 +284,11 @@ struct arm_cmn_node { u16 id, logid; enum cmn_node_type type; + /* XP properties really, but replicated to children for convenience */ u8 dtm; s8 dtc; + u8 portid_bits:4; + u8 deviceid_bits:4; /* DN/HN-F/CXHA */ struct { u8 val : 4; @@ -307,8 +314,9 @@ struct arm_cmn_dtm { struct arm_cmn_dtc { void __iomem *base; + void __iomem *pmu_base; int irq; - int irq_friend; + s8 irq_friend; bool cc_active; struct perf_event *counters[CMN_DT_NUM_COUNTERS]; @@ -357,49 +365,33 @@ struct arm_cmn { static int arm_cmn_hp_state; struct arm_cmn_nodeid { - u8 x; - u8 y; u8 port; u8 dev; }; static int arm_cmn_xyidbits(const struct arm_cmn *cmn) { - return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); + return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1)); } -static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) +static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn) { struct arm_cmn_nodeid nid; - if (cmn->num_xps == 1) { - nid.x = 0; - nid.y = 0; - nid.port = CMN_NODEID_1x1_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } else { - int bits = arm_cmn_xyidbits(cmn); - - nid.x = CMN_NODEID_X(id, bits); - nid.y = CMN_NODEID_Y(id, bits); - if (cmn->ports_used & 0xc) { - nid.port = CMN_NODEID_EXT_PID(id); - nid.dev = CMN_NODEID_EXT_DEVID(id); - } else { - nid.port = CMN_NODEID_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } - } + nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1); + nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1); return nid; } static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); - int xp_idx = cmn->mesh_x * nid.y + nid.x; + int id = dn->id >> (dn->portid_bits + dn->deviceid_bits); + int bits = arm_cmn_xyidbits(cmn); + int x = id >> bits; + int y = id & ((1U << bits) - 1); - return cmn->xps + xp_idx; + return cmn->xps + cmn->mesh_x * y + x; } static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, enum cmn_node_type type) @@ -423,15 +415,27 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn) return CMN700; case PART_CI700: return CI700; + case PART_CMN_S3: + return CMNS3; default: return 0; }; } +static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) +{ + if (cmn->part == PART_CMN_S3) { + if (dn->type == CMN_TYPE_XP) + return CMN_S3_DTM_OFFSET; + return CMN_S3_PMU_OFFSET; + } + return CMN_PMU_OFFSET; +} + static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn, const struct arm_cmn_node *xp, int port) { - int offset = CMN_MXP__CONNECT_INFO(port); + int offset = CMN_MXP__CONNECT_INFO(port) - arm_cmn_pmu_offset(cmn, xp); if (port >= 2) { if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650) @@ -444,7 +448,7 @@ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn, offset += CI700_CONNECT_INFO_P2_5_OFFSET; } - return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset); + return readl_relaxed(xp->pmu_base + offset); } static struct dentry *arm_cmn_debugfs; @@ -478,20 +482,25 @@ static const char *arm_cmn_device_type(u8 type) case 0x17: return "RN-F_C_E|"; case 0x18: return " RN-F_E |"; case 0x19: return "RN-F_E_E|"; + case 0x1a: return " HN-S |"; + case 0x1b: return " LCN |"; case 0x1c: return " MTSX |"; case 0x1d: return " HN-V |"; case 0x1e: return " CCG |"; + case 0x20: return " RN-F_F |"; + case 0x21: return "RN-F_F_E|"; + case 0x22: return " SN-F_F |"; default: return " ???? |"; } } -static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) +static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d) { struct arm_cmn *cmn = s->private; struct arm_cmn_node *dn; + u16 id = xp->id | d | (p << xp->deviceid_bits); for (dn = cmn->dns; dn->type; dn++) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); int pad = dn->logid < 10; if (dn->type == CMN_TYPE_XP) @@ -500,7 +509,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) if (dn->type < CMN_TYPE_HNI) continue; - if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) + if (dn->id != id) continue; seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); @@ -521,6 +530,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) y = cmn->mesh_y; while (y--) { int xp_base = cmn->mesh_x * y; + struct arm_cmn_node *xp = cmn->xps + xp_base; u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION]; for (x = 0; x < cmn->mesh_x; x++) @@ -528,16 +538,14 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) seq_printf(s, "\n%-2d |", y); for (x = 0; x < cmn->mesh_x; x++) { - struct arm_cmn_node *xp = cmn->xps + xp_base + x; - for (p = 0; p < CMN_MAX_PORTS; p++) - port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); + port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p); seq_printf(s, " XP #%-3d|", xp_base + x); } seq_puts(s, "\n |"); for (x = 0; x < cmn->mesh_x; x++) { - s8 dtc = cmn->xps[xp_base + x].dtc; + s8 dtc = xp[x].dtc; if (dtc < 0) seq_puts(s, " DTC ?? |"); @@ -554,10 +562,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) seq_puts(s, arm_cmn_device_type(port[p][x])); seq_puts(s, "\n 0|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 0); + arm_cmn_show_logid(s, xp + x, p, 0); seq_puts(s, "\n 1|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 1); + arm_cmn_show_logid(s, xp + x, p, 1); } seq_puts(s, "\n-----+"); } @@ -585,7 +593,7 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {} struct arm_cmn_hw_event { struct arm_cmn_node *dn; - u64 dtm_idx[4]; + u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)]; s8 dtc_idx[CMN_MAX_DTCS]; u8 num_dns; u8 dtm_offset; @@ -599,6 +607,7 @@ struct arm_cmn_hw_event { bool wide_sel; enum cmn_filter_select filter_sel; }; +static_assert(sizeof(struct arm_cmn_hw_event) <= offsetof(struct hw_perf_event, target)); #define for_each_hw_dn(hw, dn, i) \ for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++) @@ -609,7 +618,6 @@ struct arm_cmn_hw_event { static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event) { - BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target)); return (struct arm_cmn_hw_event *)&event->hw; } @@ -790,8 +798,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event) #define CMN_EVENT_CCRA(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event) -#define CMN_EVENT_CCHA(_name, _event) \ - CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event) +#define CMN_EVENT_CCHA(_model, _name, _event) \ + CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event) #define CMN_EVENT_CCLA(_name, _event) \ CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event) #define CMN_EVENT_CCLA_RNI(_name, _event) \ @@ -1149,42 +1157,43 @@ static struct attribute *arm_cmn_event_attrs[] = { CMN_EVENT_CCRA(wdb_alloc, 0x59), CMN_EVENT_CCRA(ssb_alloc, 0x5a), - CMN_EVENT_CCHA(rddatbyp, 0x61), - CMN_EVENT_CCHA(chirsp_up_stall, 0x62), - CMN_EVENT_CCHA(chidat_up_stall, 0x63), - CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64), - CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65), - CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66), - CMN_EVENT_CCHA(reqtrk_occ, 0x67), - CMN_EVENT_CCHA(rdb_occ, 0x68), - CMN_EVENT_CCHA(rdbyp_occ, 0x69), - CMN_EVENT_CCHA(wdb_occ, 0x6a), - CMN_EVENT_CCHA(snptrk_occ, 0x6b), - CMN_EVENT_CCHA(sdb_occ, 0x6c), - CMN_EVENT_CCHA(snphaz_occ, 0x6d), - CMN_EVENT_CCHA(reqtrk_alloc, 0x6e), - CMN_EVENT_CCHA(rdb_alloc, 0x6f), - CMN_EVENT_CCHA(rdbyp_alloc, 0x70), - CMN_EVENT_CCHA(wdb_alloc, 0x71), - CMN_EVENT_CCHA(snptrk_alloc, 0x72), - CMN_EVENT_CCHA(sdb_alloc, 0x73), - CMN_EVENT_CCHA(snphaz_alloc, 0x74), - CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75), - CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76), - CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77), - CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78), - CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79), - CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a), - CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b), - CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c), - CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d), - CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e), - CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f), - CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80), - CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81), - CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82), - CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83), - CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84), + CMN_EVENT_CCHA(CMN_ANY, rddatbyp, 0x61), + CMN_EVENT_CCHA(CMN_ANY, chirsp_up_stall, 0x62), + CMN_EVENT_CCHA(CMN_ANY, chidat_up_stall, 0x63), + CMN_EVENT_CCHA(CMN_ANY, snppcrd_link0_stall, 0x64), + CMN_EVENT_CCHA(CMN_ANY, snppcrd_link1_stall, 0x65), + CMN_EVENT_CCHA(CMN_ANY, snppcrd_link2_stall, 0x66), + CMN_EVENT_CCHA(CMN_ANY, reqtrk_occ, 0x67), + CMN_EVENT_CCHA(CMN_ANY, rdb_occ, 0x68), + CMN_EVENT_CCHA(CMN_ANY, rdbyp_occ, 0x69), + CMN_EVENT_CCHA(CMN_ANY, wdb_occ, 0x6a), + CMN_EVENT_CCHA(CMN_ANY, snptrk_occ, 0x6b), + CMN_EVENT_CCHA(CMN_ANY, sdb_occ, 0x6c), + CMN_EVENT_CCHA(CMN_ANY, snphaz_occ, 0x6d), + CMN_EVENT_CCHA(CMN_ANY, reqtrk_alloc, 0x6e), + CMN_EVENT_CCHA(CMN_ANY, rdb_alloc, 0x6f), + CMN_EVENT_CCHA(CMN_ANY, rdbyp_alloc, 0x70), + CMN_EVENT_CCHA(CMN_ANY, wdb_alloc, 0x71), + CMN_EVENT_CCHA(CMN_ANY, snptrk_alloc, 0x72), + CMN_EVENT_CCHA(CMN_ANY, db_alloc, 0x73), + CMN_EVENT_CCHA(CMN_ANY, snphaz_alloc, 0x74), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_occ, 0x75), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_alloc, 0x76), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_occ, 0x77), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_alloc, 0x78), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_occ, 0x79), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_alloc, 0x7a), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_occ, 0x7b), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_alloc, 0x7c), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_occ, 0x7d), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_alloc, 0x7e), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_occ, 0x7f), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_alloc, 0x80), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_occ, 0x81), + CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_alloc, 0x82), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_occ, 0x83), + CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_alloc, 0x84), + CMN_EVENT_CCHA(CMNS3, chirsp1_up_stall, 0x85), CMN_EVENT_CCLA(rx_cxs, 0x21), CMN_EVENT_CCLA(tx_cxs, 0x22), @@ -1271,15 +1280,11 @@ static ssize_t arm_cmn_format_show(struct device *dev, struct device_attribute *attr, char *buf) { struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr); - int lo = __ffs(fmt->field), hi = __fls(fmt->field); - - if (lo == hi) - return sysfs_emit(buf, "config:%d\n", lo); if (!fmt->config) - return sysfs_emit(buf, "config:%d-%d\n", lo, hi); + return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field); - return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi); + return sysfs_emit(buf, "config%d:%*pbl\n", fmt->config, 64, &fmt->field); } #define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \ @@ -1415,7 +1420,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx) static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state) { if (!cmn->state) - writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR); + writel_relaxed(0, CMN_DT_PMCR(&cmn->dtc[0])); cmn->state |= state; } @@ -1424,7 +1429,7 @@ static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state) cmn->state &= ~state; if (!cmn->state) writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, - cmn->dtc[0].base + CMN_DT_PMCR); + CMN_DT_PMCR(&cmn->dtc[0])); } static void arm_cmn_pmu_enable(struct pmu *pmu) @@ -1459,18 +1464,19 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc) { - u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR); + void __iomem *pmccntr = CMN_DT_PMCCNTR(dtc); + u64 val = readq_relaxed(pmccntr); - writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR); + writeq_relaxed(CMN_CC_INIT, pmccntr); return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1); } static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx) { - u32 val, pmevcnt = CMN_DT_PMEVCNT(idx); + void __iomem *pmevcnt = CMN_DT_PMEVCNT(dtc, idx); + u32 val = readl_relaxed(pmevcnt); - val = readl_relaxed(dtc->base + pmevcnt); - writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt); + writel_relaxed(CMN_COUNTER_INIT, pmevcnt); return val - CMN_COUNTER_INIT; } @@ -1481,7 +1487,7 @@ static void arm_cmn_init_counter(struct perf_event *event) u64 count; for_each_hw_dtc_idx(hw, i, idx) { - writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx)); + writel_relaxed(CMN_COUNTER_INIT, CMN_DT_PMEVCNT(&cmn->dtc[i], idx)); cmn->dtc[i].counters[idx] = event; } @@ -1564,9 +1570,12 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) int i; if (type == CMN_TYPE_DTC) { - i = hw->dtc_idx[0]; - writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); - cmn->dtc[i].cc_active = true; + struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0]; + + writel_relaxed(CMN_DT_DTC_CTL_DT_EN | CMN_DT_DTC_CTL_CG_DISABLE, + dtc->base + CMN_DT_DTC_CTL); + writeq_relaxed(CMN_CC_INIT, CMN_DT_PMCCNTR(dtc)); + dtc->cc_active = true; } else if (type == CMN_TYPE_WP) { u64 val = CMN_EVENT_WP_VAL(event); u64 mask = CMN_EVENT_WP_MASK(event); @@ -1595,8 +1604,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) int i; if (type == CMN_TYPE_DTC) { - i = hw->dtc_idx[0]; - cmn->dtc[i].cc_active = false; + struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0]; + + dtc->cc_active = false; + writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); } else if (type == CMN_TYPE_WP) { for_each_hw_dn(hw, dn, i) { void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); @@ -1784,7 +1795,8 @@ static int arm_cmn_event_init(struct perf_event *event) /* ...but the DTM may depend on which port we're watching */ if (cmn->multi_dtm) hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; - } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) { + } else if (type == CMN_TYPE_XP && + (cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) { hw->wide_sel = true; } @@ -1815,10 +1827,7 @@ static int arm_cmn_event_init(struct perf_event *event) } if (!hw->num_dns) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); - - dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", - nodeid, nid.x, nid.y, nid.port, nid.dev, type); + dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type); return -EINVAL; } @@ -1921,7 +1930,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i); writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); } else { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + struct arm_cmn_nodeid nid = arm_cmn_nid(dn); if (cmn->multi_dtm) nid.port %= 2; @@ -2010,7 +2019,7 @@ static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_nod cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); node = dev_to_node(cmn->dev); - if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) + if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) arm_cmn_migrate(cmn, cpu); return 0; } @@ -2043,7 +2052,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) irqreturn_t ret = IRQ_NONE; for (;;) { - u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR); + u32 status = readl_relaxed(CMN_DT_PMOVSR(dtc)); u64 delta; int i; @@ -2065,7 +2074,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) } } - writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR); + writel_relaxed(status, CMN_DT_PMOVSR_CLR(dtc)); if (!dtc->irq_friend) return ret; @@ -2119,15 +2128,16 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id { struct arm_cmn_dtc *dtc = cmn->dtc + idx; - dtc->base = dn->pmu_base - CMN_PMU_OFFSET; + dtc->pmu_base = dn->pmu_base; + dtc->base = dtc->pmu_base - arm_cmn_pmu_offset(cmn, dn); dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); if (dtc->irq < 0) return dtc->irq; writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); - writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); - writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); - writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); + writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, CMN_DT_PMCR(dtc)); + writeq_relaxed(0, CMN_DT_PMCCNTR(dtc)); + writel_relaxed(0x1ff, CMN_DT_PMOVSR_CLR(dtc)); return 0; } @@ -2168,10 +2178,12 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) continue; xp = arm_cmn_node_to_xp(cmn, dn); + dn->portid_bits = xp->portid_bits; + dn->deviceid_bits = xp->deviceid_bits; dn->dtc = xp->dtc; dn->dtm = xp->dtm; if (cmn->multi_dtm) - dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; + dn->dtm += arm_cmn_nid(dn).port / 2; if (dn->type == CMN_TYPE_DTC) { int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); @@ -2213,7 +2225,7 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c node->id = FIELD_GET(CMN_NI_NODE_ID, reg); node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg); - node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET; + node->pmu_base = cmn->base + offset + arm_cmn_pmu_offset(cmn, node); if (node->type == CMN_TYPE_CFG) level = 0; @@ -2271,7 +2283,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23); cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); + /* + * With the device isolation feature, if firmware has neglected to enable + * an XP port then we risk locking up if we try to access anything behind + * it; however we also have no way to tell from Non-Secure whether any + * given port is disabled or not, so the only way to win is not to play... + */ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL); + if (reg & CMN_INFO_DEVICE_ISO_ENABLE) { + dev_err(cmn->dev, "Device isolation enabled, not continuing due to risk of lockup\n"); + return -ENODEV; + } cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); @@ -2341,18 +2363,27 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) arm_cmn_init_dtm(dtm++, xp, 0); /* * Keeping track of connected ports will let us filter out - * unnecessary XP events easily. We can also reliably infer the - * "extra device ports" configuration for the node ID format - * from this, since in that case we will see at least one XP - * with port 2 connected, for the HN-D. + * unnecessary XP events easily, and also infer the per-XP + * part of the node ID format. */ for (int p = 0; p < CMN_MAX_PORTS; p++) if (arm_cmn_device_connect_info(cmn, xp, p)) xp_ports |= BIT(p); - if (cmn->multi_dtm && (xp_ports & 0xc)) + if (cmn->num_xps == 1) { + xp->portid_bits = 3; + xp->deviceid_bits = 2; + } else if (xp_ports > 0x3) { + xp->portid_bits = 2; + xp->deviceid_bits = 1; + } else { + xp->portid_bits = 1; + xp->deviceid_bits = 2; + } + + if (cmn->multi_dtm && (xp_ports > 0x3)) arm_cmn_init_dtm(dtm++, xp, 1); - if (cmn->multi_dtm && (xp_ports & 0x30)) + if (cmn->multi_dtm && (xp_ports > 0xf)) arm_cmn_init_dtm(dtm++, xp, 2); cmn->ports_used |= xp_ports; @@ -2407,10 +2438,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_CXHA: case CMN_TYPE_CCRA: case CMN_TYPE_CCHA: - case CMN_TYPE_CCLA: case CMN_TYPE_HNS: dn++; break; + case CMN_TYPE_CCLA: + dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL; + dn++; + break; /* Nothing to see here */ case CMN_TYPE_MPAM_S: case CMN_TYPE_MPAM_NS: @@ -2418,6 +2452,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_CXLA: case CMN_TYPE_HNS_MPAM_S: case CMN_TYPE_HNS_MPAM_NS: + case CMN_TYPE_APB: break; /* * Split "optimised" combination nodes into separate @@ -2428,7 +2463,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_HNP: case CMN_TYPE_CCLA_RNI: dn[1] = dn[0]; - dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; + dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL; dn[1].type = arm_cmn_subtype(dn->type); dn += 2; break; @@ -2603,6 +2638,7 @@ static const struct of_device_id arm_cmn_of_match[] = { { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 }, { .compatible = "arm,cmn-650" }, { .compatible = "arm,cmn-700" }, + { .compatible = "arm,cmn-s3" }, { .compatible = "arm,ci-700" }, {} }; diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c new file mode 100644 index 00000000000000..90fcfe693439ef --- /dev/null +++ b/drivers/perf/arm-ni.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022-2024 Arm Limited +// NI-700 Network-on-Chip PMU driver + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Common registers */ +#define NI_NODE_TYPE 0x000 +#define NI_NODE_TYPE_NODE_ID GENMASK(31, 16) +#define NI_NODE_TYPE_NODE_TYPE GENMASK(15, 0) + +#define NI_CHILD_NODE_INFO 0x004 +#define NI_CHILD_PTR(n) (0x008 + (n) * 4) + +#define NI700_PMUSELA 0x00c + +/* Config node */ +#define NI_PERIPHERAL_ID0 0xfe0 +#define NI_PIDR0_PART_7_0 GENMASK(7, 0) +#define NI_PERIPHERAL_ID1 0xfe4 +#define NI_PIDR1_PART_11_8 GENMASK(3, 0) +#define NI_PERIPHERAL_ID2 0xfe8 +#define NI_PIDR2_VERSION GENMASK(7, 4) + +/* PMU node */ +#define NI_PMEVCNTR(n) (0x008 + (n) * 8) +#define NI_PMCCNTR_L 0x0f8 +#define NI_PMCCNTR_U 0x0fc +#define NI_PMEVTYPER(n) (0x400 + (n) * 4) +#define NI_PMEVTYPER_NODE_TYPE GENMASK(12, 9) +#define NI_PMEVTYPER_NODE_ID GENMASK(8, 0) +#define NI_PMCNTENSET 0xc00 +#define NI_PMCNTENCLR 0xc20 +#define NI_PMINTENSET 0xc40 +#define NI_PMINTENCLR 0xc60 +#define NI_PMOVSCLR 0xc80 +#define NI_PMOVSSET 0xcc0 +#define NI_PMCFGR 0xe00 +#define NI_PMCR 0xe04 +#define NI_PMCR_RESET_CCNT BIT(2) +#define NI_PMCR_RESET_EVCNT BIT(1) +#define NI_PMCR_ENABLE BIT(0) + +#define NI_NUM_COUNTERS 8 +#define NI_CCNT_IDX 31 + +/* Event attributes */ +#define NI_CONFIG_TYPE GENMASK_ULL(15, 0) +#define NI_CONFIG_NODEID GENMASK_ULL(31, 16) +#define NI_CONFIG_EVENTID GENMASK_ULL(47, 32) + +#define NI_EVENT_TYPE(event) FIELD_GET(NI_CONFIG_TYPE, (event)->attr.config) +#define NI_EVENT_NODEID(event) FIELD_GET(NI_CONFIG_NODEID, (event)->attr.config) +#define NI_EVENT_EVENTID(event) FIELD_GET(NI_CONFIG_EVENTID, (event)->attr.config) + +enum ni_part { + PART_NI_700 = 0x43b, + PART_NI_710AE = 0x43d, +}; + +enum ni_node_type { + NI_GLOBAL, + NI_VOLTAGE, + NI_POWER, + NI_CLOCK, + NI_ASNI, + NI_AMNI, + NI_PMU, + NI_HSNI, + NI_HMNI, + NI_PMNI, +}; + +struct arm_ni_node { + void __iomem *base; + enum ni_node_type type; + u16 id; + u32 num_components; +}; + +struct arm_ni_unit { + void __iomem *pmusela; + enum ni_node_type type; + u16 id; + bool ns; + union { + __le64 pmusel; + u8 event[8]; + }; +}; + +struct arm_ni_cd { + void __iomem *pmu_base; + u16 id; + int num_units; + int irq; + int cpu; + struct hlist_node cpuhp_node; + struct pmu pmu; + struct arm_ni_unit *units; + struct perf_event *evcnt[NI_NUM_COUNTERS]; + struct perf_event *ccnt; +}; + +struct arm_ni { + struct device *dev; + void __iomem *base; + enum ni_part part; + int id; + int num_cds; + struct arm_ni_cd cds[] __counted_by(num_cds); +}; + +#define cd_to_ni(cd) container_of((cd), struct arm_ni, cds[(cd)->id]) +#define pmu_to_cd(p) container_of((p), struct arm_ni_cd, pmu) + +#define cd_for_each_unit(cd, u) \ + for (struct arm_ni_unit *u = cd->units; u < cd->units + cd->num_units; u++) + +static int arm_ni_hp_state; + +struct arm_ni_event_attr { + struct device_attribute attr; + enum ni_node_type type; +}; + +#define NI_EVENT_ATTR(_name, _type) \ + (&((struct arm_ni_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_ni_event_show, NULL), \ + .type = _type, \ + }})[0].attr.attr) + +static ssize_t arm_ni_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ni_event_attr *eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == NI_PMU) + return sysfs_emit(buf, "type=0x%x\n", eattr->type); + + return sysfs_emit(buf, "type=0x%x,eventid=?,nodeid=?\n", eattr->type); +} + +static umode_t arm_ni_event_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct arm_ni_cd *cd = pmu_to_cd(dev_get_drvdata(dev)); + struct arm_ni_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr.attr); + + cd_for_each_unit(cd, unit) { + if (unit->type == eattr->type && unit->ns) + return attr->mode; + } + + return 0; +} + +static struct attribute *arm_ni_event_attrs[] = { + NI_EVENT_ATTR(asni, NI_ASNI), + NI_EVENT_ATTR(amni, NI_AMNI), + NI_EVENT_ATTR(cycles, NI_PMU), + NI_EVENT_ATTR(hsni, NI_HSNI), + NI_EVENT_ATTR(hmni, NI_HMNI), + NI_EVENT_ATTR(pmni, NI_PMNI), + NULL +}; + +static const struct attribute_group arm_ni_event_attrs_group = { + .name = "events", + .attrs = arm_ni_event_attrs, + .is_visible = arm_ni_event_attr_is_visible, +}; + +struct arm_ni_format_attr { + struct device_attribute attr; + u64 field; +}; + +#define NI_FORMAT_ATTR(_name, _fld) \ + (&((struct arm_ni_format_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_ni_format_show, NULL), \ + .field = _fld, \ + }})[0].attr.attr) + +static ssize_t arm_ni_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ni_format_attr *fmt = container_of(attr, typeof(*fmt), attr); + + return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field); +} + +static struct attribute *arm_ni_format_attrs[] = { + NI_FORMAT_ATTR(type, NI_CONFIG_TYPE), + NI_FORMAT_ATTR(nodeid, NI_CONFIG_NODEID), + NI_FORMAT_ATTR(eventid, NI_CONFIG_EVENTID), + NULL +}; + +static const struct attribute_group arm_ni_format_attrs_group = { + .name = "format", + .attrs = arm_ni_format_attrs, +}; + +static ssize_t arm_ni_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ni_cd *cd = pmu_to_cd(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(cd->cpu)); +} + +static struct device_attribute arm_ni_cpumask_attr = + __ATTR(cpumask, 0444, arm_ni_cpumask_show, NULL); + +static ssize_t arm_ni_identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ni *ni = cd_to_ni(pmu_to_cd(dev_get_drvdata(dev))); + u32 reg = readl_relaxed(ni->base + NI_PERIPHERAL_ID2); + int version = FIELD_GET(NI_PIDR2_VERSION, reg); + + return sysfs_emit(buf, "%03x%02x\n", ni->part, version); +} + +static struct device_attribute arm_ni_identifier_attr = + __ATTR(identifier, 0444, arm_ni_identifier_show, NULL); + +static struct attribute *arm_ni_other_attrs[] = { + &arm_ni_cpumask_attr.attr, + &arm_ni_identifier_attr.attr, + NULL +}; + +static const struct attribute_group arm_ni_other_attr_group = { + .attrs = arm_ni_other_attrs, + NULL +}; + +static const struct attribute_group *arm_ni_attr_groups[] = { + &arm_ni_event_attrs_group, + &arm_ni_format_attrs_group, + &arm_ni_other_attr_group, + NULL +}; + +static void arm_ni_pmu_enable(struct pmu *pmu) +{ + writel_relaxed(NI_PMCR_ENABLE, pmu_to_cd(pmu)->pmu_base + NI_PMCR); +} + +static void arm_ni_pmu_disable(struct pmu *pmu) +{ + writel_relaxed(0, pmu_to_cd(pmu)->pmu_base + NI_PMCR); +} + +struct arm_ni_val { + unsigned int evcnt; + unsigned int ccnt; +}; + +static bool arm_ni_val_count_event(struct perf_event *evt, struct arm_ni_val *val) +{ + if (is_software_event(evt)) + return true; + + if (NI_EVENT_TYPE(evt) == NI_PMU) { + val->ccnt++; + return val->ccnt <= 1; + } + + val->evcnt++; + return val->evcnt <= NI_NUM_COUNTERS; +} + +static int arm_ni_validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct arm_ni_val val = { 0 }; + + if (leader == event) + return 0; + + arm_ni_val_count_event(event, &val); + if (!arm_ni_val_count_event(leader, &val)) + return -EINVAL; + + for_each_sibling_event(sibling, leader) { + if (!arm_ni_val_count_event(sibling, &val)) + return -EINVAL; + } + return 0; +} + +static int arm_ni_event_init(struct perf_event *event) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event)) + return -EINVAL; + + event->cpu = cd->cpu; + if (NI_EVENT_TYPE(event) == NI_PMU) + return arm_ni_validate_group(event); + + cd_for_each_unit(cd, unit) { + if (unit->type == NI_EVENT_TYPE(event) && + unit->id == NI_EVENT_NODEID(event) && unit->ns) { + event->hw.config_base = (unsigned long)unit; + return arm_ni_validate_group(event); + } + } + return -EINVAL; +} + +static u64 arm_ni_read_ccnt(struct arm_ni_cd *cd) +{ + u64 l, u_old, u_new; + int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */ + + u_new = readl_relaxed(cd->pmu_base + NI_PMCCNTR_U); + do { + u_old = u_new; + l = readl_relaxed(cd->pmu_base + NI_PMCCNTR_L); + u_new = readl_relaxed(cd->pmu_base + NI_PMCCNTR_U); + } while (u_new != u_old && --retries); + WARN_ON(!retries); + + return (u_new << 32) | l; +} + +static void arm_ni_event_read(struct perf_event *event) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 count, prev; + bool ccnt = hw->idx == NI_CCNT_IDX; + + do { + prev = local64_read(&hw->prev_count); + if (ccnt) + count = arm_ni_read_ccnt(cd); + else + count = readl_relaxed(cd->pmu_base + NI_PMEVCNTR(hw->idx)); + } while (local64_cmpxchg(&hw->prev_count, prev, count) != prev); + + count -= prev; + if (!ccnt) + count = (u32)count; + local64_add(count, &event->count); +} + +static void arm_ni_event_start(struct perf_event *event, int flags) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + + writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENSET); +} + +static void arm_ni_event_stop(struct perf_event *event, int flags) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + + writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENCLR); + if (flags & PERF_EF_UPDATE) + arm_ni_event_read(event); +} + +static void arm_ni_init_ccnt(struct arm_ni_cd *cd) +{ + local64_set(&cd->ccnt->hw.prev_count, S64_MIN); + lo_hi_writeq_relaxed(S64_MIN, cd->pmu_base + NI_PMCCNTR_L); +} + +static void arm_ni_init_evcnt(struct arm_ni_cd *cd, int idx) +{ + local64_set(&cd->evcnt[idx]->hw.prev_count, S32_MIN); + writel_relaxed(S32_MIN, cd->pmu_base + NI_PMEVCNTR(idx)); +} + +static int arm_ni_event_add(struct perf_event *event, int flags) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ni_unit *unit; + enum ni_node_type type = NI_EVENT_TYPE(event); + u32 reg; + + if (type == NI_PMU) { + if (cd->ccnt) + return -ENOSPC; + hw->idx = NI_CCNT_IDX; + cd->ccnt = event; + arm_ni_init_ccnt(cd); + } else { + hw->idx = 0; + while (cd->evcnt[hw->idx]) { + if (++hw->idx == NI_NUM_COUNTERS) + return -ENOSPC; + } + cd->evcnt[hw->idx] = event; + unit = (void *)hw->config_base; + unit->event[hw->idx] = NI_EVENT_EVENTID(event); + arm_ni_init_evcnt(cd, hw->idx); + lo_hi_writeq_relaxed(le64_to_cpu(unit->pmusel), unit->pmusela); + + reg = FIELD_PREP(NI_PMEVTYPER_NODE_TYPE, type) | + FIELD_PREP(NI_PMEVTYPER_NODE_ID, NI_EVENT_NODEID(event)); + writel_relaxed(reg, cd->pmu_base + NI_PMEVTYPER(hw->idx)); + } + if (flags & PERF_EF_START) + arm_ni_event_start(event, 0); + return 0; +} + +static void arm_ni_event_del(struct perf_event *event, int flags) +{ + struct arm_ni_cd *cd = pmu_to_cd(event->pmu); + struct hw_perf_event *hw = &event->hw; + + arm_ni_event_stop(event, PERF_EF_UPDATE); + + if (hw->idx == NI_CCNT_IDX) + cd->ccnt = NULL; + else + cd->evcnt[hw->idx] = NULL; +} + +static irqreturn_t arm_ni_handle_irq(int irq, void *dev_id) +{ + struct arm_ni_cd *cd = dev_id; + irqreturn_t ret = IRQ_NONE; + u32 reg = readl_relaxed(cd->pmu_base + NI_PMOVSCLR); + + if (reg & (1U << NI_CCNT_IDX)) { + ret = IRQ_HANDLED; + if (!(WARN_ON(!cd->ccnt))) { + arm_ni_event_read(cd->ccnt); + arm_ni_init_ccnt(cd); + } + } + for (int i = 0; i < NI_NUM_COUNTERS; i++) { + if (!(reg & (1U << i))) + continue; + ret = IRQ_HANDLED; + if (!(WARN_ON(!cd->evcnt[i]))) { + arm_ni_event_read(cd->evcnt[i]); + arm_ni_init_evcnt(cd, i); + } + } + writel_relaxed(reg, cd->pmu_base + NI_PMOVSCLR); + return ret; +} + +static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_start) +{ + struct arm_ni_cd *cd = ni->cds + node->id; + const char *name; + int err; + + cd->id = node->id; + cd->num_units = node->num_components; + cd->units = devm_kcalloc(ni->dev, cd->num_units, sizeof(*(cd->units)), GFP_KERNEL); + if (!cd->units) + return -ENOMEM; + + for (int i = 0; i < cd->num_units; i++) { + u32 reg = readl_relaxed(node->base + NI_CHILD_PTR(i)); + void __iomem *unit_base = ni->base + reg; + struct arm_ni_unit *unit = cd->units + i; + + reg = readl_relaxed(unit_base + NI_NODE_TYPE); + unit->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg); + unit->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg); + + switch (unit->type) { + case NI_PMU: + reg = readl_relaxed(unit_base + NI_PMCFGR); + if (!reg) { + dev_info(ni->dev, "No access to PMU %d\n", cd->id); + devm_kfree(ni->dev, cd->units); + return 0; + } + unit->ns = true; + cd->pmu_base = unit_base; + break; + case NI_ASNI: + case NI_AMNI: + case NI_HSNI: + case NI_HMNI: + case NI_PMNI: + unit->pmusela = unit_base + NI700_PMUSELA; + writel_relaxed(1, unit->pmusela); + if (readl_relaxed(unit->pmusela) != 1) + dev_info(ni->dev, "No access to node 0x%04x%04x\n", unit->id, unit->type); + else + unit->ns = true; + break; + default: + /* + * e.g. FMU - thankfully bits 3:2 of FMU_ERR_FR0 are RES0 so + * can't alias any of the leaf node types we're looking for. + */ + dev_dbg(ni->dev, "Mystery node 0x%04x%04x\n", unit->id, unit->type); + break; + } + } + + res_start += cd->pmu_base - ni->base; + if (!devm_request_mem_region(ni->dev, res_start, SZ_4K, dev_name(ni->dev))) { + dev_err(ni->dev, "Failed to request PMU region 0x%llx\n", res_start); + return -EBUSY; + } + + writel_relaxed(NI_PMCR_RESET_CCNT | NI_PMCR_RESET_EVCNT, + cd->pmu_base + NI_PMCR); + writel_relaxed(U32_MAX, cd->pmu_base + NI_PMCNTENCLR); + writel_relaxed(U32_MAX, cd->pmu_base + NI_PMOVSCLR); + writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENSET); + + cd->irq = platform_get_irq(to_platform_device(ni->dev), cd->id); + if (cd->irq < 0) + return cd->irq; + + err = devm_request_irq(ni->dev, cd->irq, arm_ni_handle_irq, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(ni->dev), cd); + if (err) + return err; + + cd->cpu = cpumask_local_spread(0, dev_to_node(ni->dev)); + cd->pmu = (struct pmu) { + .module = THIS_MODULE, + .parent = ni->dev, + .attr_groups = arm_ni_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = arm_ni_pmu_enable, + .pmu_disable = arm_ni_pmu_disable, + .event_init = arm_ni_event_init, + .add = arm_ni_event_add, + .del = arm_ni_event_del, + .start = arm_ni_event_start, + .stop = arm_ni_event_stop, + .read = arm_ni_event_read, + }; + + name = devm_kasprintf(ni->dev, GFP_KERNEL, "arm_ni_%d_cd_%d", ni->id, cd->id); + if (!name) + return -ENOMEM; + + err = cpuhp_state_add_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node); + if (err) + return err; + + err = perf_pmu_register(&cd->pmu, name, -1); + if (err) + cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node); + + return err; +} + +static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node) +{ + u32 reg = readl_relaxed(base + NI_NODE_TYPE); + + node->base = base; + node->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg); + node->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg); + node->num_components = readl_relaxed(base + NI_CHILD_NODE_INFO); +} + +static int arm_ni_probe(struct platform_device *pdev) +{ + struct arm_ni_node cfg, vd, pd, cd; + struct arm_ni *ni; + struct resource *res; + void __iomem *base; + static atomic_t id; + int num_cds; + u32 reg, part; + + /* + * We want to map the whole configuration space for ease of discovery, + * but the PMU pages are the only ones for which we can honestly claim + * exclusive ownership, so we'll request them explicitly once found. + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + arm_ni_probe_domain(base, &cfg); + if (cfg.type != NI_GLOBAL) + return -ENODEV; + + reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID0); + part = FIELD_GET(NI_PIDR0_PART_7_0, reg); + reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID1); + part |= FIELD_GET(NI_PIDR1_PART_11_8, reg) << 8; + + switch (part) { + case PART_NI_700: + case PART_NI_710AE: + break; + default: + dev_WARN(&pdev->dev, "Unknown part number: 0x%03x, this may go badly\n", part); + break; + } + + num_cds = 0; + for (int v = 0; v < cfg.num_components; v++) { + reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v)); + arm_ni_probe_domain(base + reg, &vd); + for (int p = 0; p < vd.num_components; p++) { + reg = readl_relaxed(vd.base + NI_CHILD_PTR(p)); + arm_ni_probe_domain(base + reg, &pd); + num_cds += pd.num_components; + } + } + + ni = devm_kzalloc(&pdev->dev, struct_size(ni, cds, num_cds), GFP_KERNEL); + if (!ni) + return -ENOMEM; + + ni->dev = &pdev->dev; + ni->base = base; + ni->num_cds = num_cds; + ni->part = part; + ni->id = atomic_fetch_inc(&id); + + for (int v = 0; v < cfg.num_components; v++) { + reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v)); + arm_ni_probe_domain(base + reg, &vd); + for (int p = 0; p < vd.num_components; p++) { + reg = readl_relaxed(vd.base + NI_CHILD_PTR(p)); + arm_ni_probe_domain(base + reg, &pd); + for (int c = 0; c < pd.num_components; c++) { + int ret; + + reg = readl_relaxed(pd.base + NI_CHILD_PTR(c)); + arm_ni_probe_domain(base + reg, &cd); + ret = arm_ni_init_cd(ni, &cd, res->start); + if (ret) + return ret; + } + } + } + + return 0; +} + +static void arm_ni_remove(struct platform_device *pdev) +{ + struct arm_ni *ni = platform_get_drvdata(pdev); + + for (int i = 0; i < ni->num_cds; i++) { + struct arm_ni_cd *cd = ni->cds + i; + + if (!cd->pmu_base) + continue; + + writel_relaxed(0, cd->pmu_base + NI_PMCR); + writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR); + perf_pmu_unregister(&cd->pmu); + cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node); + } +} + +#ifdef CONFIG_OF +static const struct of_device_id arm_ni_of_match[] = { + { .compatible = "arm,ni-700" }, + {} +}; +MODULE_DEVICE_TABLE(of, arm_ni_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id arm_ni_acpi_match[] = { + { "ARMHCB70" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, arm_ni_acpi_match); +#endif + +static struct platform_driver arm_ni_driver = { + .driver = { + .name = "arm-ni", + .of_match_table = of_match_ptr(arm_ni_of_match), + .acpi_match_table = ACPI_PTR(arm_ni_acpi_match), + }, + .probe = arm_ni_probe, + .remove = arm_ni_remove, +}; + +static void arm_ni_pmu_migrate(struct arm_ni_cd *cd, unsigned int cpu) +{ + perf_pmu_migrate_context(&cd->pmu, cd->cpu, cpu); + irq_set_affinity(cd->irq, cpumask_of(cpu)); + cd->cpu = cpu; +} + +static int arm_ni_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct arm_ni_cd *cd; + int node; + + cd = hlist_entry_safe(cpuhp_node, struct arm_ni_cd, cpuhp_node); + node = dev_to_node(cd_to_ni(cd)->dev); + if (cpu_to_node(cd->cpu) != node && cpu_to_node(cpu) == node) + arm_ni_pmu_migrate(cd, cpu); + return 0; +} + +static int arm_ni_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct arm_ni_cd *cd; + unsigned int target; + int node; + + cd = hlist_entry_safe(cpuhp_node, struct arm_ni_cd, cpuhp_node); + if (cpu != cd->cpu) + return 0; + + node = dev_to_node(cd_to_ni(cd)->dev); + target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target < nr_cpu_ids) + arm_ni_pmu_migrate(cd, target); + return 0; +} + +static int __init arm_ni_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/arm/ni:online", + arm_ni_pmu_online_cpu, + arm_ni_pmu_offline_cpu); + if (ret < 0) + return ret; + + arm_ni_hp_state = ret; + + ret = platform_driver_register(&arm_ni_driver); + if (ret) + cpuhp_remove_multi_state(arm_ni_hp_state); + return ret; +} + +static void __exit arm_ni_exit(void) +{ + platform_driver_unregister(&arm_ni_driver); + cpuhp_remove_multi_state(arm_ni_hp_state); +} + +module_init(arm_ni_init); +module_exit(arm_ni_exit); + +MODULE_AUTHOR("Robin Murphy "); +MODULE_DESCRIPTION("Arm NI-700 PMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 8458fe2cebb4fb..398cce3d76fc44 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -522,7 +522,7 @@ static void armpmu_enable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); /* For task-bound events we may be called on other CPUs */ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) @@ -742,7 +742,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) struct perf_event *event; int idx; - for (idx = 0; idx < armpmu->num_events; idx++) { + for_each_set_bit(idx, armpmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { event = hw_events->events[idx]; if (!event) continue; @@ -772,7 +772,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, { struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS); if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) return NOTIFY_DONE; @@ -924,8 +924,9 @@ int armpmu_register(struct arm_pmu *pmu) if (ret) goto out_destroy; - pr_info("enabled with %s PMU driver, %d counters available%s\n", - pmu->name, pmu->num_events, + pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n", + pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS), + ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask, has_nmi ? ", using NMIs" : ""); kvm_host_pmu_init(pmu); diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 4b1a9a92ea117b..118170a5cedefa 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -59,7 +59,7 @@ static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) static bool pmu_has_irq_affinity(struct device_node *node) { - return !!of_find_property(node, "interrupt-affinity", NULL); + return of_property_present(node, "interrupt-affinity"); } static int pmu_parse_irq_affinity(struct device *dev, int i) diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index d246840797b676..0afe02f879b45a 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -451,13 +451,6 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { .attrs = armv8_pmuv3_caps_attrs, }; -/* - * Perf Events' indices - */ -#define ARMV8_IDX_CYCLE_COUNTER 0 -#define ARMV8_IDX_COUNTER0 1 -#define ARMV8_IDX_CYCLE_COUNTER_USER 32 - /* * We unconditionally enable ARMv8.5-PMU long event counter support * (64-bit events) where supported. Indicate if this arm_pmu has long @@ -489,19 +482,12 @@ static bool armv8pmu_event_is_chained(struct perf_event *event) return !armv8pmu_event_has_user_read(event) && armv8pmu_event_is_64bit(event) && !armv8pmu_has_long_event(cpu_pmu) && - (idx != ARMV8_IDX_CYCLE_COUNTER); + (idx < ARMV8_PMU_MAX_GENERAL_COUNTERS); } /* * ARMv8 low level PMU access */ - -/* - * Perf Event to low level counters mapping - */ -#define ARMV8_IDX_TO_COUNTER(x) \ - (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) - static u64 armv8pmu_pmcr_read(void) { return read_pmcr(); @@ -514,21 +500,19 @@ static void armv8pmu_pmcr_write(u64 val) write_pmcr(val); } -static int armv8pmu_has_overflowed(u32 pmovsr) +static int armv8pmu_has_overflowed(u64 pmovsr) { - return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; + return !!(pmovsr & ARMV8_PMU_OVERFLOWED_MASK); } -static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) +static int armv8pmu_counter_has_overflowed(u64 pmnc, int idx) { - return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); + return !!(pmnc & BIT(idx)); } static u64 armv8pmu_read_evcntr(int idx) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - - return read_pmevcntrn(counter); + return read_pmevcntrn(idx); } static u64 armv8pmu_read_hw_counter(struct perf_event *event) @@ -557,7 +541,7 @@ static bool armv8pmu_event_needs_bias(struct perf_event *event) return false; if (armv8pmu_has_long_event(cpu_pmu) || - idx == ARMV8_IDX_CYCLE_COUNTER) + idx >= ARMV8_PMU_MAX_GENERAL_COUNTERS) return true; return false; @@ -585,8 +569,10 @@ static u64 armv8pmu_read_counter(struct perf_event *event) int idx = hwc->idx; u64 value; - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) value = read_pmccntr(); + else if (idx == ARMV8_PMU_INSTR_IDX) + value = read_pmicntr(); else value = armv8pmu_read_hw_counter(event); @@ -595,9 +581,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event) static void armv8pmu_write_evcntr(int idx, u64 value) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); - - write_pmevcntrn(counter, value); + write_pmevcntrn(idx, value); } static void armv8pmu_write_hw_counter(struct perf_event *event, @@ -620,15 +604,16 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) value = armv8pmu_bias_long_counter(event, value); - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccntr(value); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicntr(value); else armv8pmu_write_hw_counter(event, value); } static void armv8pmu_write_evtype(int idx, unsigned long val) { - u32 counter = ARMV8_IDX_TO_COUNTER(idx); unsigned long mask = ARMV8_PMU_EVTYPE_EVENT | ARMV8_PMU_INCLUDE_EL2 | ARMV8_PMU_EXCLUDE_EL0 | @@ -638,7 +623,7 @@ static void armv8pmu_write_evtype(int idx, unsigned long val) mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH; val &= mask; - write_pmevtypern(counter, val); + write_pmevtypern(idx, val); } static void armv8pmu_write_event_type(struct perf_event *event) @@ -658,24 +643,26 @@ static void armv8pmu_write_event_type(struct perf_event *event) armv8pmu_write_evtype(idx - 1, hwc->config_base); armv8pmu_write_evtype(idx, chain_evt); } else { - if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_PMU_CYCLE_IDX) write_pmccfiltr(hwc->config_base); + else if (idx == ARMV8_PMU_INSTR_IDX) + write_pmicfiltr(hwc->config_base); else armv8pmu_write_evtype(idx, hwc->config_base); } } -static u32 armv8pmu_event_cnten_mask(struct perf_event *event) +static u64 armv8pmu_event_cnten_mask(struct perf_event *event) { - int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); - u32 mask = BIT(counter); + int counter = event->hw.idx; + u64 mask = BIT(counter); if (armv8pmu_event_is_chained(event)) mask |= BIT(counter - 1); return mask; } -static void armv8pmu_enable_counter(u32 mask) +static void armv8pmu_enable_counter(u64 mask) { /* * Make sure event configuration register writes are visible before we @@ -688,7 +675,7 @@ static void armv8pmu_enable_counter(u32 mask) static void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - u32 mask = armv8pmu_event_cnten_mask(event); + u64 mask = armv8pmu_event_cnten_mask(event); kvm_set_pmu_events(mask, attr); @@ -697,7 +684,7 @@ static void armv8pmu_enable_event_counter(struct perf_event *event) armv8pmu_enable_counter(mask); } -static void armv8pmu_disable_counter(u32 mask) +static void armv8pmu_disable_counter(u64 mask) { write_pmcntenclr(mask); /* @@ -710,7 +697,7 @@ static void armv8pmu_disable_counter(u32 mask) static void armv8pmu_disable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - u32 mask = armv8pmu_event_cnten_mask(event); + u64 mask = armv8pmu_event_cnten_mask(event); kvm_clr_pmu_events(mask); @@ -719,18 +706,17 @@ static void armv8pmu_disable_event_counter(struct perf_event *event) armv8pmu_disable_counter(mask); } -static void armv8pmu_enable_intens(u32 mask) +static void armv8pmu_enable_intens(u64 mask) { write_pmintenset(mask); } static void armv8pmu_enable_event_irq(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); - armv8pmu_enable_intens(BIT(counter)); + armv8pmu_enable_intens(BIT(event->hw.idx)); } -static void armv8pmu_disable_intens(u32 mask) +static void armv8pmu_disable_intens(u64 mask) { write_pmintenclr(mask); isb(); @@ -741,13 +727,12 @@ static void armv8pmu_disable_intens(u32 mask) static void armv8pmu_disable_event_irq(struct perf_event *event) { - u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); - armv8pmu_disable_intens(BIT(counter)); + armv8pmu_disable_intens(BIT(event->hw.idx)); } -static u32 armv8pmu_getreset_flags(void) +static u64 armv8pmu_getreset_flags(void) { - u32 value; + u64 value; /* Read */ value = read_pmovsclr(); @@ -786,9 +771,12 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); /* Clear any unused counters to avoid leaking their contents */ - for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { - if (i == ARMV8_IDX_CYCLE_COUNTER) + for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, + ARMPMU_MAX_HWEVENTS) { + if (i == ARMV8_PMU_CYCLE_IDX) write_pmccntr(0); + else if (i == ARMV8_PMU_INSTR_IDX) + write_pmicntr(0); else armv8pmu_write_evcntr(i, 0); } @@ -842,7 +830,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu) static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) { - u32 pmovsr; + u64 pmovsr; struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; @@ -869,7 +857,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) * to prevent skews in group events. */ armv8pmu_stop(cpu_pmu); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -908,7 +896,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, { int idx; - for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -924,7 +912,9 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, * Chaining requires two consecutive event counters, where * the lower idx must be even. */ - for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) { + if (!(idx & 0x1)) + continue; if (!test_and_set_bit(idx, cpuc->used_mask)) { /* Check if the preceding even counter is available */ if (!test_and_set_bit(idx - 1, cpuc->used_mask)) @@ -946,14 +936,27 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* Always prefer to place a cycle counter into the cycle counter. */ if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && !armv8pmu_event_get_threshold(&event->attr)) { - if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) - return ARMV8_IDX_CYCLE_COUNTER; + if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask)) + return ARMV8_PMU_CYCLE_IDX; else if (armv8pmu_event_is_64bit(event) && armv8pmu_event_want_user_access(event) && !armv8pmu_has_long_event(cpu_pmu)) return -EAGAIN; } + /* + * Always prefer to place a instruction counter into the instruction counter, + * but don't expose the instruction counter to userspace access as userspace + * may not know how to handle it. + */ + if ((evtype == ARMV8_PMUV3_PERFCTR_INST_RETIRED) && + !armv8pmu_event_get_threshold(&event->attr) && + test_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask) && + !armv8pmu_event_want_user_access(event)) { + if (!test_and_set_bit(ARMV8_PMU_INSTR_IDX, cpuc->used_mask)) + return ARMV8_PMU_INSTR_IDX; + } + /* * Otherwise use events counters */ @@ -978,15 +981,7 @@ static int armv8pmu_user_event_idx(struct perf_event *event) if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event)) return 0; - /* - * We remap the cycle counter index to 32 to - * match the offset applied to the rest of - * the counter indices. - */ - if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER) - return ARMV8_IDX_CYCLE_COUNTER_USER; - - return event->hw.idx; + return event->hw.idx + 1; } /* @@ -1061,14 +1056,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u64 pmcr; + u64 pmcr, mask; + + bitmap_to_arr64(&mask, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS); /* The counter and interrupt enable registers are unknown at reset. */ - armv8pmu_disable_counter(U32_MAX); - armv8pmu_disable_intens(U32_MAX); + armv8pmu_disable_counter(mask); + armv8pmu_disable_intens(mask); /* Clear the counters we flip at guest entry/exit */ - kvm_clr_pmu_events(U32_MAX); + kvm_clr_pmu_events(mask); /* * Initialize & Reset PMNC. Request overflow interrupt for @@ -1089,14 +1086,14 @@ static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu, if (event->attr.type == PERF_TYPE_HARDWARE && event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) { - if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, - armpmu->pmceid_bitmap)) - return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED; - if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED, armpmu->pmceid_bitmap)) return ARMV8_PMUV3_PERFCTR_BR_RETIRED; + if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, + armpmu->pmceid_bitmap)) + return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED; + return HW_OP_UNSUPPORTED; } @@ -1211,10 +1208,15 @@ static void __armv8pmu_probe_pmu(void *info) probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ - cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()); + bitmap_set(cpu_pmu->cntr_mask, + 0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read())); /* Add the CPU cycles counter */ - cpu_pmu->num_events += 1; + set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask); + + /* Add the CPU instructions counter */ + if (pmuv3_has_icntr()) + set_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask); pmceid[0] = pmceid_raw[0] = read_pmceid0(); pmceid[1] = pmceid_raw[1] = read_pmceid1(); diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 9100d82bfabc0d..3569050f9cf375 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -41,7 +41,7 @@ /* * Cache if the event is allowed to trace Context information. - * This allows us to perform the check, i.e, perfmon_capable(), + * This allows us to perform the check, i.e, perf_allow_kernel(), * in the context of the event owner, once, during the event_init(). */ #define SPE_PMU_HW_FLAGS_CX 0x00001 @@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C static void set_spe_event_has_cx(struct perf_event *event) { - if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr)) event->hw.flags |= SPE_PMU_HW_FLAGS_CX; } @@ -745,9 +745,8 @@ static int arm_spe_pmu_event_init(struct perf_event *event) set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); - if (!perfmon_capable() && - (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))) - return -EACCES; + if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)) + return perf_allow_kernel(&event->attr); return 0; } diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c index 0bb685b4bac579..b09615bb2bb251 100644 --- a/drivers/perf/arm_v6_pmu.c +++ b/drivers/perf/arm_v6_pmu.c @@ -64,6 +64,7 @@ enum armv6_counters { ARMV6_CYCLE_COUNTER = 0, ARMV6_COUNTER0, ARMV6_COUNTER1, + ARMV6_NUM_COUNTERS }; /* @@ -254,7 +255,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) */ armv6_pmcr_write(pmcr); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV6_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -391,7 +392,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = armv6pmu_start; cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6_map_event; - cpu_pmu->num_events = 3; + + bitmap_set(cpu_pmu->cntr_mask, 0, ARMV6_NUM_COUNTERS); } static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c index 928ac3d626ede6..420cadd108e798 100644 --- a/drivers/perf/arm_v7_pmu.c +++ b/drivers/perf/arm_v7_pmu.c @@ -649,24 +649,12 @@ static struct attribute_group armv7_pmuv2_events_attr_group = { /* * Perf Events' indices */ -#define ARMV7_IDX_CYCLE_COUNTER 0 -#define ARMV7_IDX_COUNTER0 1 -#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ - (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) - -#define ARMV7_MAX_COUNTERS 32 -#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) - +#define ARMV7_IDX_CYCLE_COUNTER 31 +#define ARMV7_IDX_COUNTER_MAX 31 /* * ARMv7 low level PMNC access */ -/* - * Perf Event to low level counters mapping - */ -#define ARMV7_IDX_TO_COUNTER(x) \ - (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) - /* * Per-CPU PMNC: config reg */ @@ -725,19 +713,17 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) { - return idx >= ARMV7_IDX_CYCLE_COUNTER && - idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); + return test_bit(idx, cpu_pmu->cntr_mask); } static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { - return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); + return pmnc & BIT(idx); } static inline void armv7_pmnc_select_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (idx)); isb(); } @@ -787,29 +773,25 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) static inline void armv7_pmnc_enable_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(idx))); } static inline void armv7_pmnc_disable_counter(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(idx))); } static inline void armv7_pmnc_enable_intens(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(idx))); } static inline void armv7_pmnc_disable_intens(int idx) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); - asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(idx))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ - asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(idx))); isb(); } @@ -853,15 +835,12 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); pr_info("CCNT =0x%08x\n", val); - for (cnt = ARMV7_IDX_COUNTER0; - cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { + for_each_set_bit(cnt, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); - pr_info("CNT[%d] count =0x%08x\n", - ARMV7_IDX_TO_COUNTER(cnt), val); + pr_info("CNT[%d] count =0x%08x\n", cnt, val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); - pr_info("CNT[%d] evtsel=0x%08x\n", - ARMV7_IDX_TO_COUNTER(cnt), val); + pr_info("CNT[%d] evtsel=0x%08x\n", cnt, val); } } #endif @@ -958,7 +937,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) */ regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1027,7 +1006,7 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, * For anything other than a cycle counter, try and use * the events counters */ - for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -1073,7 +1052,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 idx, nb_cnt = cpu_pmu->num_events, val; + u32 idx, val; if (cpu_pmu->secure_access) { asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); @@ -1082,7 +1061,7 @@ static void armv7pmu_reset(void *info) } /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) { armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_intens(idx); } @@ -1161,20 +1140,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) static void armv7_read_num_pmnc_events(void *info) { - int *nb_cnt = info; + int nb_cnt; + struct arm_pmu *cpu_pmu = info; /* Read the nb of CNTx counters supported from PMNC */ - *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + bitmap_set(cpu_pmu->cntr_mask, 0, nb_cnt); /* Add the CPU cycles counter */ - *nb_cnt += 1; + set_bit(ARMV7_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask); } static int armv7_probe_num_events(struct arm_pmu *arm_pmu) { return smp_call_function_any(&arm_pmu->supported_cpus, armv7_read_num_pmnc_events, - &arm_pmu->num_events, 1); + arm_pmu, 1); } static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) @@ -1524,7 +1505,7 @@ static void krait_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; - u32 idx, nb_cnt = cpu_pmu->num_events; + u32 idx; armv7pmu_reset(info); @@ -1538,7 +1519,7 @@ static void krait_pmu_reset(void *info) venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } @@ -1562,7 +1543,7 @@ static int krait_event_to_bit(struct perf_event *event, unsigned int region, * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ - bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX); return bit; } @@ -1845,7 +1826,7 @@ static void scorpion_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; - u32 idx, nb_cnt = cpu_pmu->num_events; + u32 idx; armv7pmu_reset(info); @@ -1860,7 +1841,7 @@ static void scorpion_pmu_reset(void *info) venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ - for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } @@ -1883,7 +1864,7 @@ static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ - bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX); return bit; } diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c index 3d8b72d6b37fdf..638fea9b1263d3 100644 --- a/drivers/perf/arm_xscale_pmu.c +++ b/drivers/perf/arm_xscale_pmu.c @@ -53,6 +53,8 @@ enum xscale_counters { XSCALE_COUNTER2, XSCALE_COUNTER3, }; +#define XSCALE1_NUM_COUNTERS 3 +#define XSCALE2_NUM_COUNTERS 5 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -168,7 +170,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE1_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -364,7 +366,8 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = xscale1pmu_start; cpu_pmu->stop = xscale1pmu_stop; cpu_pmu->map_event = xscale_map_event; - cpu_pmu->num_events = 3; + + bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE1_NUM_COUNTERS); return 0; } @@ -500,7 +503,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) regs = get_irq_regs(); - for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE2_NUM_COUNTERS) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -719,7 +722,8 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = xscale2pmu_start; cpu_pmu->stop = xscale2pmu_stop; cpu_pmu->map_event = xscale_map_event; - cpu_pmu->num_events = 5; + + bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE2_NUM_COUNTERS); return 0; } diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index c5e328f2384194..4ca50f9b6dfed8 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -107,6 +107,7 @@ struct dwc_pcie_vendor_id { static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {.vendor_id = PCI_VENDOR_ID_QCOM }, {} /* terminator */ }; @@ -556,10 +557,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev) { struct platform_device *plat_dev; struct dwc_pcie_dev_info *dev_info; - u32 bdf; + u32 sbdf; - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf, pdev, sizeof(*pdev)); if (IS_ERR(plat_dev)) @@ -611,15 +612,15 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) struct pci_dev *pdev = plat_dev->dev.platform_data; struct dwc_pcie_pmu *pcie_pmu; char *name; - u32 bdf, val; + u32 sbdf, val; u16 vsec; int ret; vsec = pci_find_vsec_capability(pdev, pdev->vendor, DWC_PCIE_VSEC_RAS_DES_ID); pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + sbdf = plat_dev->id; + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); if (!name) return -ENOMEM; @@ -650,7 +651,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, &pcie_pmu->cpuhp_node); if (ret) { - pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf); return ret; } @@ -663,7 +664,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); if (ret) { - pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf); return ret; } ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, @@ -726,7 +727,6 @@ static struct platform_driver dwc_pcie_pmu_driver = { static int __init dwc_pcie_pmu_init(void) { struct pci_dev *pdev = NULL; - bool found = false; int ret; for_each_pci_dev(pdev) { @@ -738,11 +738,7 @@ static int __init dwc_pcie_pmu_init(void) pci_dev_put(pdev); return ret; } - - found = true; } - if (!found) - return -ENODEV; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/dwc_pcie_pmu:online", diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index f06027574a241a..c5394d007b61be 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -141,6 +141,22 @@ static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char } static DEVICE_ATTR_RO(bus); +static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_min); +} +static DEVICE_ATTR_RO(bdf_min); + +static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_max); +} +static DEVICE_ATTR_RO(bdf_max); + static struct hisi_pcie_reg_pair hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off) { @@ -208,7 +224,7 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event) { u64 port, trig_len, thr_len, len_mode; - u64 reg = HISI_PCIE_INIT_SET; + u64 reg = 0; /* Config HISI_PCIE_EVENT_CTRL according to event. */ reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); @@ -452,10 +468,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event) struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + u64 orig_cnt, cnt; + + orig_cnt = hisi_pcie_pmu_read_counter(event); local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); + + /* + * The counter maybe unwritable if the target event is unsupported. + * Check this by comparing the counts after setting the period. If + * the counts stay unchanged after setting the period then update + * the hwc->prev_count correctly. Otherwise the final counts user + * get maybe totally wrong. + */ + cnt = hisi_pcie_pmu_read_counter(event); + if (orig_cnt == cnt) + local64_set(&hwc->prev_count, cnt); } static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) @@ -749,6 +779,8 @@ static const struct attribute_group hisi_pcie_pmu_format_group = { static struct attribute *hisi_pcie_pmu_bus_attrs[] = { &dev_attr_bus.attr, + &dev_attr_bdf_max.attr, + &dev_attr_bdf_min.attr, NULL }; diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 0a02e85a895138..7644147d50b46a 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -39,7 +39,6 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time_short = 0; userpg->cap_user_rdpmc = riscv_perf_user_access(event); -#ifdef CONFIG_RISCV_PMU /* * The counters are 64-bit but the priv spec doesn't mandate all the * bits to be implemented: that's why, counter width can vary based on @@ -47,7 +46,6 @@ void arch_perf_update_userpage(struct perf_event *event, */ if (userpg->cap_user_rdpmc) userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; -#endif do { rd = sched_clock_read_begin(&seq); diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c index 04487ad7fba0b5..93c8e0fdb58985 100644 --- a/drivers/perf/riscv_pmu_legacy.c +++ b/drivers/perf/riscv_pmu_legacy.c @@ -22,13 +22,13 @@ static int pmu_legacy_ctr_get_idx(struct perf_event *event) struct perf_event_attr *attr = &event->attr; if (event->attr.type != PERF_TYPE_HARDWARE) - return -EOPNOTSUPP; + return -ENOENT; if (attr->config == PERF_COUNT_HW_CPU_CYCLES) return RISCV_PMU_LEGACY_CYCLE; else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS) return RISCV_PMU_LEGACY_INSTRET; else - return -EOPNOTSUPP; + return -ENOENT; } /* For legacy config & counter index are same */ diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 25b1b699b3e279..391ca1422caec1 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -60,7 +60,7 @@ asm volatile(ALTERNATIVE( \ #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY) PMU_FORMAT_ATTR(event, "config:0-47"); -PMU_FORMAT_ATTR(firmware, "config:63"); +PMU_FORMAT_ATTR(firmware, "config:62-63"); static bool sbi_v2_available; static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available); @@ -309,7 +309,7 @@ static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata) ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); } else if (ret.error == SBI_ERR_NOT_SUPPORTED) { /* This event cannot be monitored by any counter */ - edata->event_idx = -EINVAL; + edata->event_idx = -ENOENT; } } @@ -507,7 +507,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) { u32 type = event->attr.type; u64 config = event->attr.config; - int bSoftware; u64 raw_config_val; int ret; @@ -528,22 +527,36 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) break; case PERF_TYPE_RAW: /* - * As per SBI specification, the upper 16 bits must be unused for - * a raw event. Use the MSB (63b) to distinguish between hardware - * raw event and firmware events. + * As per SBI specification, the upper 16 bits must be unused + * for a raw event. + * Bits 63:62 are used to distinguish between raw events + * 00 - Hardware raw event + * 10 - SBI firmware events + * 11 - Risc-V platform specific firmware event */ - bSoftware = config >> 63; raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; - if (bSoftware) { + switch (config >> 62) { + case 0: + ret = RISCV_PMU_RAW_EVENT_IDX; + *econfig = raw_config_val; + break; + case 2: ret = (raw_config_val & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16); - } else { - ret = RISCV_PMU_RAW_EVENT_IDX; + break; + case 3: + /* + * For Risc-V platform specific firmware events + * Event code - 0xFFFF + * Event data - raw event encoding + */ + ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT; *econfig = raw_config_val; + break; } break; default: - ret = -EINVAL; + ret = -ENOENT; break; } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index dfab1c66b3e510..f73abff416bedb 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -95,6 +95,7 @@ source "drivers/phy/mediatek/Kconfig" source "drivers/phy/microchip/Kconfig" source "drivers/phy/motorola/Kconfig" source "drivers/phy/mscc/Kconfig" +source "drivers/phy/nuvoton/Kconfig" source "drivers/phy/qualcomm/Kconfig" source "drivers/phy/ralink/Kconfig" source "drivers/phy/realtek/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 5fcbce5f9ab1f8..ebc399560da4f6 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -25,6 +25,7 @@ obj-y += allwinner/ \ microchip/ \ motorola/ \ mscc/ \ + nuvoton/ \ qualcomm/ \ ralink/ \ realtek/ \ diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c index cc29b08e49ebcb..462c61a24ec5cf 100644 --- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c @@ -113,11 +113,10 @@ static const struct phy_ops cygnus_pcie_phy_ops = { static int cygnus_pcie_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node, *child; + struct device_node *node = dev->of_node; struct cygnus_pcie_phy_core *core; struct phy_provider *provider; unsigned cnt = 0; - int ret; if (of_get_child_count(node) == 0) { dev_err(dev, "PHY no child node\n"); @@ -136,35 +135,31 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) mutex_init(&core->lock); - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { unsigned int id; struct cygnus_pcie_phy *p; if (of_property_read_u32(child, "reg", &id)) { dev_err(dev, "missing reg property for %pOFn\n", child); - ret = -EINVAL; - goto put_child; + return -EINVAL; } if (id >= MAX_NUM_PHYS) { dev_err(dev, "invalid PHY id: %u\n", id); - ret = -EINVAL; - goto put_child; + return -EINVAL; } if (core->phys[id].phy) { dev_err(dev, "duplicated PHY id: %u\n", id); - ret = -EINVAL; - goto put_child; + return -EINVAL; } p = &core->phys[id]; p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); if (IS_ERR(p->phy)) { dev_err(dev, "failed to create PHY\n"); - ret = PTR_ERR(p->phy); - goto put_child; + return PTR_ERR(p->phy); } p->core = core; @@ -184,9 +179,6 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); return 0; -put_child: - of_node_put(child); - return ret; } static const struct of_device_id cygnus_pcie_phy_match_table[] = { diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c index ed9e18791ec949..228100357054d9 100644 --- a/drivers/phy/broadcom/phy-brcm-sata.c +++ b/drivers/phy/broadcom/phy-brcm-sata.c @@ -751,11 +751,11 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) { const char *rxaeq_mode; struct device *dev = &pdev->dev; - struct device_node *dn = dev->of_node, *child; + struct device_node *dn = dev->of_node; const struct of_device_id *of_id; struct brcm_sata_phy *priv; struct phy_provider *provider; - int ret, count = 0; + int count = 0; if (of_get_child_count(dn) == 0) return -ENODEV; @@ -782,26 +782,23 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) return PTR_ERR(priv->ctrl_base); } - for_each_available_child_of_node(dn, child) { + for_each_available_child_of_node_scoped(dn, child) { unsigned int id; struct brcm_sata_port *port; if (of_property_read_u32(child, "reg", &id)) { dev_err(dev, "missing reg property in node %pOFn\n", child); - ret = -EINVAL; - goto put_child; + return -EINVAL; } if (id >= MAX_PORTS) { dev_err(dev, "invalid reg: %u\n", id); - ret = -EINVAL; - goto put_child; + return -EINVAL; } if (priv->phys[id].phy) { dev_err(dev, "already registered port %u\n", id); - ret = -EINVAL; - goto put_child; + return -EINVAL; } port = &priv->phys[id]; @@ -822,8 +819,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); if (IS_ERR(port->phy)) { dev_err(dev, "failed to create PHY\n"); - ret = PTR_ERR(port->phy); - goto put_child; + return PTR_ERR(port->phy); } phy_set_drvdata(port->phy, port); @@ -839,9 +835,6 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) dev_info(dev, "registered %d port(s)\n", count); return 0; -put_child: - of_node_put(child); - return ret; } static struct platform_driver brcm_sata_phy_driver = { diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index d4eb93ce823238..aeec6eb6be237a 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -310,7 +310,7 @@ static const struct clk_parent_data pll_mux_parent_data[][SIERRA_NUM_CMN_PLLC_PA }, }; -static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = { +static const u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = { [CMN_PLLLC] = { 0, 1 }, [CMN_PLLLC1] = { 1, 0 }, }; @@ -362,14 +362,14 @@ struct cdns_sierra_data { u32 id_value; u8 block_offset_shift; u8 reg_offset_shift; - struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; - struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; - struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; - struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] - [NUM_SSC_MODE]; + const struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + const struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + const struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + const struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; }; struct cdns_regmap_cdb_context { @@ -539,12 +539,12 @@ static int cdns_sierra_phy_init(struct phy *gphy) struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent); const struct cdns_sierra_data *init_data = phy->init_data; - struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; + const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; enum cdns_sierra_phy_type phy_type = ins->phy_type; + const struct cdns_sierra_vals *phy_pma_ln_vals; enum cdns_sierra_ssc_mode ssc = ins->ssc_mode; - struct cdns_sierra_vals *phy_pma_ln_vals; + const struct cdns_sierra_vals *pcs_cmn_vals; const struct cdns_reg_pairs *reg_pairs; - struct cdns_sierra_vals *pcs_cmn_vals; struct regmap *regmap; u32 num_regs; int i, j; @@ -1244,12 +1244,12 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp, static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp) { + const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; const struct cdns_sierra_data *init_data = sp->init_data; - struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; + const struct cdns_sierra_vals *phy_pma_ln_vals; + const struct cdns_sierra_vals *pcs_cmn_vals; enum cdns_sierra_phy_type phy_t1, phy_t2; - struct cdns_sierra_vals *phy_pma_ln_vals; const struct cdns_reg_pairs *reg_pairs; - struct cdns_sierra_vals *pcs_cmn_vals; int i, j, node, mlane, num_lanes, ret; enum cdns_sierra_ssc_mode ssc; struct regmap *regmap; @@ -1366,7 +1366,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) unsigned int id_value; int ret, node = 0; void __iomem *base; - struct device_node *dn = dev->of_node, *child; + struct device_node *dn = dev->of_node; if (of_get_child_count(dn) == 0) return -ENODEV; @@ -1438,7 +1438,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) sp->autoconf = of_property_read_bool(dn, "cdns,autoconf"); - for_each_available_child_of_node(dn, child) { + for_each_available_child_of_node_scoped(dn, child) { struct phy *gphy; if (!(of_node_name_eq(child, "phy") || @@ -1452,7 +1452,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) dev_err(dev, "failed to get reset %s\n", child->full_name); ret = PTR_ERR(sp->phys[node].lnk_rst); - of_node_put(child); goto put_control; } @@ -1461,7 +1460,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "missing property in node %s\n", child->name); - of_node_put(child); reset_control_put(sp->phys[node].lnk_rst); goto put_control; } @@ -1475,7 +1473,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) gphy = devm_phy_create(dev, child, &noop_ops); if (IS_ERR(gphy)) { ret = PTR_ERR(gphy); - of_node_put(child); reset_control_put(sp->phys[node].lnk_rst); goto put_control; } @@ -1544,11 +1541,11 @@ static void cdns_sierra_phy_remove(struct platform_device *pdev) } /* SGMII PHY PMA lane configuration */ -static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = { {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} }; -static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = { +static const struct cdns_sierra_vals sgmii_phy_pma_ln_vals = { .reg_pairs = sgmii_phy_pma_ln_regs, .num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs), }; @@ -1598,22 +1595,22 @@ static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_ln_regs[] = { {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG} }; -static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = { +static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = { .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs, .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs), }; -static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = { +static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = { .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs, .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs), }; /* QSGMII PHY PMA lane configuration */ -static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = { +static const struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = { {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} }; -static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = { +static const struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = { .reg_pairs = qsgmii_phy_pma_ln_regs, .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs), }; @@ -1664,22 +1661,22 @@ static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = { {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG} }; -static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = { +static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = { .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs, .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs), }; -static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = { +static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = { .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs, .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs), }; /* PCIE PHY PCS common configuration */ -static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = { {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1} }; -static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = { +static const struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = { .reg_pairs = pcie_phy_pcs_cmn_regs, .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs), }; @@ -1745,12 +1742,12 @@ static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = { .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs, .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs), }; -static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { +static const struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { .reg_pairs = ml_pcie_100_no_ssc_ln_regs, .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs), }; @@ -1810,7 +1807,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = { {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} }; -static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = { +static const struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = { .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs, .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs), }; @@ -1886,12 +1883,12 @@ static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = { .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs, .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs), }; -static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { +static const struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { .reg_pairs = ml_pcie_100_int_ssc_ln_regs, .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs), }; @@ -1954,7 +1951,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = { {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} }; -static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = { +static const struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = { .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs, .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs), }; @@ -2024,12 +2021,12 @@ static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = { .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs, .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs), }; -static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { +static const struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { .reg_pairs = ml_pcie_100_ext_ssc_ln_regs, .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs), }; @@ -2092,7 +2089,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = { {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} }; -static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = { +static const struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = { .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs, .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs), }; @@ -2152,12 +2149,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = { .reg_pairs = cdns_pcie_cmn_regs_no_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc), }; -static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = { +static const struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = { .reg_pairs = cdns_pcie_ln_regs_no_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc), }; @@ -2227,12 +2224,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = { .reg_pairs = cdns_pcie_cmn_regs_int_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc), }; -static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = { +static const struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = { .reg_pairs = cdns_pcie_ln_regs_int_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc), }; @@ -2296,12 +2293,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = { +static const struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = { .reg_pairs = cdns_pcie_cmn_regs_ext_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), }; -static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = { +static const struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = { .reg_pairs = cdns_pcie_ln_regs_ext_ssc, .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), }; @@ -2413,12 +2410,12 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = { {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG} }; -static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = { +static const struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = { .reg_pairs = cdns_usb_cmn_regs_ext_ssc, .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), }; -static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = { +static const struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = { .reg_pairs = cdns_usb_ln_regs_ext_ssc, .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), }; @@ -2443,7 +2440,7 @@ static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = { {0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG}, }; -static struct cdns_sierra_vals sgmii_cmn_vals = { +static const struct cdns_sierra_vals sgmii_cmn_vals = { .reg_pairs = sgmii_pma_cmn_vals, .num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals), }; @@ -2489,7 +2486,7 @@ static const struct cdns_reg_pairs sgmii_ln_regs[] = { {0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG}, }; -static struct cdns_sierra_vals sgmii_pma_ln_vals = { +static const struct cdns_sierra_vals sgmii_pma_ln_vals = { .reg_pairs = sgmii_ln_regs, .num_regs = ARRAY_SIZE(sgmii_ln_regs), }; diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 56ce82a47f88c7..8bbbbb87bb22e5 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -285,7 +285,7 @@ static const int refclk_driver_parent_index[] = { CDNS_TORRENT_RECEIVED_REFCLK }; -static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 }; +static const u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 }; enum cdns_torrent_phy_type { TYPE_NONE, @@ -351,6 +351,7 @@ struct cdns_torrent_phy { void __iomem *sd_base; /* SD0801 registers base */ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ u32 dp_pll; + u32 protocol_bitmask; struct reset_control *phy_rst; struct reset_control *apb_rst; struct device *dev; @@ -422,17 +423,17 @@ struct cdns_reg_pairs { }; struct cdns_torrent_vals { - struct cdns_reg_pairs *reg_pairs; + const struct cdns_reg_pairs *reg_pairs; u32 num_regs; }; struct cdns_torrent_vals_entry { u32 key; - struct cdns_torrent_vals *vals; + const struct cdns_torrent_vals *vals; }; struct cdns_torrent_vals_table { - struct cdns_torrent_vals_entry *entries; + const struct cdns_torrent_vals_entry *entries; u32 num_entries; }; @@ -454,12 +455,12 @@ struct cdns_regmap_cdb_context { u8 reg_offset_shift; }; -static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl, - enum cdns_torrent_ref_clk refclk0, - enum cdns_torrent_ref_clk refclk1, - enum cdns_torrent_phy_type link0, - enum cdns_torrent_phy_type link1, - enum cdns_torrent_ssc_mode ssc) +static const struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl, + enum cdns_torrent_ref_clk refclk0, + enum cdns_torrent_ref_clk refclk1, + enum cdns_torrent_phy_type link0, + enum cdns_torrent_phy_type link1, + enum cdns_torrent_ssc_mode ssc) { int i; u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc); @@ -2306,16 +2307,16 @@ static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy) static int cdns_torrent_phy_init(struct phy *phy) { struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; const struct cdns_torrent_data *init_data = cdns_phy->init_data; - struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; - struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; struct cdns_torrent_inst *inst = phy_get_drvdata(phy); enum cdns_torrent_phy_type phy_type = inst->phy_type; + const struct cdns_torrent_vals *phy_pma_cmn_vals; enum cdns_torrent_ssc_mode ssc = inst->ssc_mode; - struct cdns_torrent_vals *phy_pma_cmn_vals; - struct cdns_torrent_vals *pcs_cmn_vals; - struct cdns_reg_pairs *reg_pairs; + const struct cdns_torrent_vals *pcs_cmn_vals; + const struct cdns_reg_pairs *reg_pairs; struct regmap *regmap; u32 num_regs; int i, j; @@ -2463,166 +2464,216 @@ static const struct phy_ops cdns_torrent_phy_ops = { static int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) { + const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; const struct cdns_torrent_data *init_data = cdns_phy->init_data; - struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate; enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; - struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; + const struct cdns_torrent_vals *phy_pma_cmn_vals; + const struct cdns_torrent_vals *pcs_cmn_vals; enum cdns_torrent_phy_type phy_t1, phy_t2; - struct cdns_torrent_vals *phy_pma_cmn_vals; - struct cdns_torrent_vals *pcs_cmn_vals; + const struct cdns_reg_pairs *reg_pairs; int i, j, node, mlane, num_lanes, ret; - struct cdns_reg_pairs *reg_pairs; + struct device *dev = cdns_phy->dev; enum cdns_torrent_ssc_mode ssc; struct regmap *regmap; - u32 num_regs; + u32 num_regs, num_protocols, protocol; - /* Maximum 2 links (subnodes) are supported */ - if (cdns_phy->nsubnodes != 2) + num_protocols = hweight32(cdns_phy->protocol_bitmask); + /* Maximum 2 protocols are supported */ + if (num_protocols > 2) { + dev_err(dev, "at most 2 protocols are supported\n"); return -EINVAL; + } + + + /** + * Get PHY types directly from subnodes if only 2 subnodes exist. + * It is possible for phy_t1 to be the same as phy_t2 for special + * configurations such as PCIe Multilink. + */ + if (cdns_phy->nsubnodes == 2) { + phy_t1 = cdns_phy->phys[0].phy_type; + phy_t2 = cdns_phy->phys[1].phy_type; + } else { + /** + * Both PHY types / protocols should be unique. + * If they are the same, it should be expressed with either + * a) Single-Link (1 Sub-node) - handled via PHY APIs + * OR + * b) Double-Link (2 Sub-nodes) - handled above + */ + if (num_protocols != 2) { + dev_err(dev, "incorrect representation of link\n"); + return -EINVAL; + } - phy_t1 = cdns_phy->phys[0].phy_type; - phy_t2 = cdns_phy->phys[1].phy_type; + phy_t1 = fns(cdns_phy->protocol_bitmask, 0); + phy_t2 = fns(cdns_phy->protocol_bitmask, 1); + } /** - * First configure the PHY for first link with phy_t1. Get the array - * values as [phy_t1][phy_t2][ssc]. + * Configure all links with the protocol phy_t1 first followed by + * configuring all links with the protocol phy_t2. + * + * When phy_t1 = phy_t2, it is a single protocol and configuration + * is performed with a single iteration of the protocol and multiple + * iterations over the sub-nodes (links). + * + * When phy_t1 != phy_t2, there are two protocols and configuration + * is performed by iterating over all sub-nodes matching the first + * protocol and configuring them first, followed by iterating over + * all sub-nodes matching the second protocol and configuring them + * next. */ - for (node = 0; node < cdns_phy->nsubnodes; node++) { - if (node == 1) { + for (protocol = 0; protocol < num_protocols; protocol++) { + /** + * For the case where num_protocols is 1, + * phy_t1 = phy_t2 and the swap is unnecessary. + * + * Swapping phy_t1 and phy_t2 is only required when the + * number of protocols is 2 and there are 2 or more links. + */ + if (protocol == 1) { /** - * If first link with phy_t1 is configured, then - * configure the PHY for second link with phy_t2. + * If first protocol with phy_t1 is configured, then + * configure the PHY for second protocol with phy_t2. * Get the array values as [phy_t2][phy_t1][ssc]. */ swap(phy_t1, phy_t2); swap(ref_clk, ref_clk1); } - mlane = cdns_phy->phys[node].mlane; - ssc = cdns_phy->phys[node].ssc_mode; - num_lanes = cdns_phy->phys[node].num_lanes; + for (node = 0; node < cdns_phy->nsubnodes; node++) { + if (cdns_phy->phys[node].phy_type != phy_t1) + continue; - /** - * PHY configuration specific registers: - * link_cmn_vals depend on combination of PHY types being - * configured and are common for both PHY types, so array - * values should be same for [phy_t1][phy_t2][ssc] and - * [phy_t2][phy_t1][ssc]. - * xcvr_diag_vals also depend on combination of PHY types - * being configured, but these can be different for particular - * PHY type and are per lane. - */ - link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl, - CLK_ANY, CLK_ANY, - phy_t1, phy_t2, ANY_SSC); - if (link_cmn_vals) { - reg_pairs = link_cmn_vals->reg_pairs; - num_regs = link_cmn_vals->num_regs; - regmap = cdns_phy->regmap_common_cdb; + mlane = cdns_phy->phys[node].mlane; + ssc = cdns_phy->phys[node].ssc_mode; + num_lanes = cdns_phy->phys[node].num_lanes; /** - * First array value in link_cmn_vals must be of - * PHY_PLL_CFG register + * PHY configuration specific registers: + * link_cmn_vals depend on combination of PHY types being + * configured and are common for both PHY types, so array + * values should be same for [phy_t1][phy_t2][ssc] and + * [phy_t2][phy_t1][ssc]. + * xcvr_diag_vals also depend on combination of PHY types + * being configured, but these can be different for particular + * PHY type and are per lane. */ - regmap_field_write(cdns_phy->phy_pll_cfg, - reg_pairs[0].val); + link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl, + CLK_ANY, CLK_ANY, + phy_t1, phy_t2, ANY_SSC); + if (link_cmn_vals) { + reg_pairs = link_cmn_vals->reg_pairs; + num_regs = link_cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + + /** + * First array value in link_cmn_vals must be of + * PHY_PLL_CFG register + */ + regmap_field_write(cdns_phy->phy_pll_cfg, + reg_pairs[0].val); + + for (i = 1; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } - for (i = 1; i < num_regs; i++) - regmap_write(regmap, reg_pairs[i].off, - reg_pairs[i].val); - } + xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl, + CLK_ANY, CLK_ANY, + phy_t1, phy_t2, ANY_SSC); + if (xcvr_diag_vals) { + reg_pairs = xcvr_diag_vals->reg_pairs; + num_regs = xcvr_diag_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } - xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl, - CLK_ANY, CLK_ANY, - phy_t1, phy_t2, ANY_SSC); - if (xcvr_diag_vals) { - reg_pairs = xcvr_diag_vals->reg_pairs; - num_regs = xcvr_diag_vals->num_regs; - for (i = 0; i < num_lanes; i++) { - regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; - for (j = 0; j < num_regs; j++) - regmap_write(regmap, reg_pairs[j].off, - reg_pairs[j].val); + /* PHY PCS common registers configurations */ + pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl, + CLK_ANY, CLK_ANY, + phy_t1, phy_t2, ANY_SSC); + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); } - } - /* PHY PCS common registers configurations */ - pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl, - CLK_ANY, CLK_ANY, - phy_t1, phy_t2, ANY_SSC); - if (pcs_cmn_vals) { - reg_pairs = pcs_cmn_vals->reg_pairs; - num_regs = pcs_cmn_vals->num_regs; - regmap = cdns_phy->regmap_phy_pcs_common_cdb; - for (i = 0; i < num_regs; i++) - regmap_write(regmap, reg_pairs[i].off, - reg_pairs[i].val); - } + /* PHY PMA common registers configurations */ + phy_pma_cmn_vals = + cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl, + CLK_ANY, CLK_ANY, phy_t1, phy_t2, + ANY_SSC); + if (phy_pma_cmn_vals) { + reg_pairs = phy_pma_cmn_vals->reg_pairs; + num_regs = phy_pma_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pma_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } - /* PHY PMA common registers configurations */ - phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl, - CLK_ANY, CLK_ANY, - phy_t1, phy_t2, ANY_SSC); - if (phy_pma_cmn_vals) { - reg_pairs = phy_pma_cmn_vals->reg_pairs; - num_regs = phy_pma_cmn_vals->num_regs; - regmap = cdns_phy->regmap_phy_pma_common_cdb; - for (i = 0; i < num_regs; i++) - regmap_write(regmap, reg_pairs[i].off, - reg_pairs[i].val); - } + /* PMA common registers configurations */ + cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl, + ref_clk, ref_clk1, + phy_t1, phy_t2, ssc); + if (cmn_vals) { + reg_pairs = cmn_vals->reg_pairs; + num_regs = cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } - /* PMA common registers configurations */ - cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl, - ref_clk, ref_clk1, - phy_t1, phy_t2, ssc); - if (cmn_vals) { - reg_pairs = cmn_vals->reg_pairs; - num_regs = cmn_vals->num_regs; - regmap = cdns_phy->regmap_common_cdb; - for (i = 0; i < num_regs; i++) - regmap_write(regmap, reg_pairs[i].off, - reg_pairs[i].val); - } + /* PMA TX lane registers configurations */ + tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl, + ref_clk, ref_clk1, + phy_t1, phy_t2, ssc); + if (tx_ln_vals) { + reg_pairs = tx_ln_vals->reg_pairs; + num_regs = tx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } - /* PMA TX lane registers configurations */ - tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl, - ref_clk, ref_clk1, - phy_t1, phy_t2, ssc); - if (tx_ln_vals) { - reg_pairs = tx_ln_vals->reg_pairs; - num_regs = tx_ln_vals->num_regs; - for (i = 0; i < num_lanes; i++) { - regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; - for (j = 0; j < num_regs; j++) - regmap_write(regmap, reg_pairs[j].off, - reg_pairs[j].val); + /* PMA RX lane registers configurations */ + rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl, + ref_clk, ref_clk1, + phy_t1, phy_t2, ssc); + if (rx_ln_vals) { + reg_pairs = rx_ln_vals->reg_pairs; + num_regs = rx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } } - } - /* PMA RX lane registers configurations */ - rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl, - ref_clk, ref_clk1, - phy_t1, phy_t2, ssc); - if (rx_ln_vals) { - reg_pairs = rx_ln_vals->reg_pairs; - num_regs = rx_ln_vals->num_regs; - for (i = 0; i < num_lanes; i++) { - regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; - for (j = 0; j < num_regs; j++) - regmap_write(regmap, reg_pairs[j].off, - reg_pairs[j].val); + if (phy_t1 == TYPE_DP) { + ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2); + if (ret) + return ret; } - } - if (phy_t1 == TYPE_DP) { - ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2); - if (ret) - return ret; + reset_control_deassert(cdns_phy->phys[node].lnk_rst); } - - reset_control_deassert(cdns_phy->phys[node].lnk_rst); } /* Take the PHY out of reset */ @@ -2826,6 +2877,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, cdns_phy); cdns_phy->dev = dev; cdns_phy->init_data = data; + cdns_phy->protocol_bitmask = 0; cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cdns_phy->sd_base)) @@ -3010,6 +3062,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) } cdns_phy->phys[node].phy = gphy; + cdns_phy->protocol_bitmask |= BIT(cdns_phy->phys[node].phy_type); phy_set_drvdata(gphy, &cdns_phy->phys[node]); node++; @@ -3079,21 +3132,21 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev) } /* SGMII and QSGMII link configuration */ -static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG} }; -static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { {0x0003, XCVR_DIAG_HSCLK_DIV}, {0x0113, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { +static const struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { .reg_pairs = sgmii_qsgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs), }; -static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { .reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs), }; @@ -3155,73 +3208,73 @@ static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops, cdns_torrent_phy_resume_noirq); /* USB and DP link configuration */ -static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { +static const struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG}, {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} }; -static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0041, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = { {0x0001, XCVR_DIAG_HSCLK_SEL}, {0x0009, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals usb_dp_link_cmn_vals = { +static const struct cdns_torrent_vals usb_dp_link_cmn_vals = { .reg_pairs = usb_dp_link_cmn_regs, .num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs), }; -static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = { .reg_pairs = usb_dp_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = { .reg_pairs = dp_usb_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs), }; /* USXGMII and SGMII/QSGMII link configuration */ -static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} }; -static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0001, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = { {0x0111, XCVR_DIAG_HSCLK_SEL}, {0x0103, XCVR_DIAG_HSCLK_DIV}, {0x0A9B, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = { +static const struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = { .reg_pairs = usxgmii_sgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs), }; -static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = { .reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = { .reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs), }; /* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0}, {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0}, {0x061B, CMN_PLL0_VCOCAL_INIT_TMR}, @@ -3233,13 +3286,13 @@ static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { {0x0138, CMN_PLL0_LOCK_PLLCNT_START} }; -static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { .reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs), }; /* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, @@ -3248,13 +3301,13 @@ static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = { .reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs), }; /* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { {0x0014, CMN_SSM_BIAS_TMR}, {0x0028, CMN_PLLSM0_PLLPRE_TMR}, {0x00A4, CMN_PLLSM0_PLLLOCK_TMR}, @@ -3280,13 +3333,13 @@ static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { {0x0138, CMN_PLL0_LOCK_PLLCNT_START} }; -static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { .reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs), }; /* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { {0x0028, CMN_PLLSM1_PLLPRE_TMR}, {0x00A4, CMN_PLLSM1_PLLLOCK_TMR}, {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, @@ -3297,42 +3350,42 @@ static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = { .reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs), }; /* PCIe and USXGMII link configuration */ -static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = { {0x0003, PHY_PLL_CFG}, {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, {0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0} }; -static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0012, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = { {0x0011, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0089, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = { +static const struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = { .reg_pairs = pcie_usxgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs), }; -static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = { .reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = { .reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs), }; @@ -3340,7 +3393,7 @@ static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = { /* * Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0}, @@ -3355,7 +3408,7 @@ static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = { {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3 }, @@ -3363,7 +3416,7 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = { {0x0000, XCVR_DIAG_PSC_OVRD} }; -static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = { {0x091D, RX_PSC_A0}, {0x0900, RX_PSC_A2}, {0x0100, RX_PSC_A3}, @@ -3381,55 +3434,55 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG} }; -static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = { .reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = { .reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = { .reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs), }; /* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */ -static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = { +static const struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = { {0x0040, PHY_PMA_CMN_CTRL1}, }; -static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = { +static const struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = { .reg_pairs = ti_usxgmii_phy_pma_cmn_regs, .num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs), }; /* Single USXGMII link configuration */ -static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = { {0x0000, PHY_PLL_CFG}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0} }; -static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0001, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = { +static const struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = { .reg_pairs = sl_usxgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs), }; -static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = { .reg_pairs = sl_usxgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs), }; /* Single link USXGMII, 156.25 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = { {0x0014, CMN_SSM_BIAS_TMR}, {0x0028, CMN_PLLSM0_PLLPRE_TMR}, {0x00A4, CMN_PLLSM0_PLLLOCK_TMR}, @@ -3467,7 +3520,7 @@ static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = { {0x0138, CMN_PLL1_LOCK_PLLCNT_START} }; -static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = { {0x07A2, TX_RCVDET_ST_TMR}, {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, @@ -3476,7 +3529,7 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = { {0x0000, XCVR_DIAG_PSC_OVRD} }; -static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = { {0x0014, RX_SDCAL0_INIT_TMR}, {0x0062, RX_SDCAL0_ITER_TMR}, {0x0014, RX_SDCAL1_INIT_TMR}, @@ -3498,68 +3551,68 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG} }; -static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = { .reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = { .reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = { .reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs), }; /* PCIe and DP link configuration */ -static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = { {0x0003, PHY_PLL_CFG}, {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1} }; -static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0012, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = { {0x0001, XCVR_DIAG_HSCLK_SEL}, {0x0009, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals pcie_dp_link_cmn_vals = { +static const struct cdns_torrent_vals pcie_dp_link_cmn_vals = { .reg_pairs = pcie_dp_link_cmn_regs, .num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs), }; -static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = { .reg_pairs = pcie_dp_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = { .reg_pairs = dp_pcie_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs), }; /* DP Multilink, 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPUCAL_TUNE}, {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = { {0x00FB, TX_PSC_A0}, {0x04AA, TX_PSC_A2}, {0x04AA, TX_PSC_A3}, {0x000F, XCVR_DIAG_BIDI_CTRL} }; -static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = { {0x0000, RX_PSC_A0}, {0x0000, RX_PSC_A2}, {0x0000, RX_PSC_A3}, @@ -3569,43 +3622,43 @@ static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = { {0x0000, RX_REE_PERGCSM_CTRL} }; -static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = { .reg_pairs = dp_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = { .reg_pairs = dp_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = { .reg_pairs = dp_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs), }; /* Single DisplayPort(DP) link configuration */ -static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = { +static const struct cdns_reg_pairs sl_dp_link_cmn_regs[] = { {0x0000, PHY_PLL_CFG}, }; -static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals sl_dp_link_cmn_vals = { +static const struct cdns_torrent_vals sl_dp_link_cmn_vals = { .reg_pairs = sl_dp_link_cmn_regs, .num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs), }; -static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = { .reg_pairs = sl_dp_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs), }; /* Single DP, 19.2 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { {0x0014, CMN_SSM_BIAS_TMR}, {0x0027, CMN_PLLSM0_PLLPRE_TMR}, {0x00A1, CMN_PLLSM0_PLLLOCK_TMR}, @@ -3642,7 +3695,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { {0x0003, CMN_PLL1_VCOCAL_TCTRL} }; -static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { {0x0780, TX_RCVDET_ST_TMR}, {0x00FB, TX_PSC_A0}, {0x04AA, TX_PSC_A2}, @@ -3650,7 +3703,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { {0x000F, XCVR_DIAG_BIDI_CTRL} }; -static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { {0x0000, RX_PSC_A0}, {0x0000, RX_PSC_A2}, {0x0000, RX_PSC_A3}, @@ -3660,23 +3713,23 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { {0x0000, RX_REE_PERGCSM_CTRL} }; -static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = { .reg_pairs = sl_dp_19_2_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = { .reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = { .reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs), }; /* Single DP, 25 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { {0x0019, CMN_SSM_BIAS_TMR}, {0x0032, CMN_PLLSM0_PLLPRE_TMR}, {0x00D1, CMN_PLLSM0_PLLLOCK_TMR}, @@ -3713,7 +3766,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { {0x0003, CMN_PLL1_VCOCAL_TCTRL} }; -static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { {0x09C4, TX_RCVDET_ST_TMR}, {0x00FB, TX_PSC_A0}, {0x04AA, TX_PSC_A2}, @@ -3721,7 +3774,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { {0x000F, XCVR_DIAG_BIDI_CTRL} }; -static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { {0x0000, RX_PSC_A0}, {0x0000, RX_PSC_A2}, {0x0000, RX_PSC_A3}, @@ -3731,35 +3784,35 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { {0x0000, RX_REE_PERGCSM_CTRL} }; -static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = { .reg_pairs = sl_dp_25_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = { .reg_pairs = sl_dp_25_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = { .reg_pairs = sl_dp_25_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs), }; /* Single DP, 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = { {0x0003, CMN_PLL0_VCOCAL_TCTRL}, {0x0003, CMN_PLL1_VCOCAL_TCTRL} }; -static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = { {0x00FB, TX_PSC_A0}, {0x04AA, TX_PSC_A2}, {0x04AA, TX_PSC_A3}, {0x000F, XCVR_DIAG_BIDI_CTRL} }; -static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { {0x0000, RX_PSC_A0}, {0x0000, RX_PSC_A2}, {0x0000, RX_PSC_A3}, @@ -3769,92 +3822,92 @@ static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { {0x0000, RX_REE_PERGCSM_CTRL} }; -static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = { .reg_pairs = sl_dp_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = { .reg_pairs = sl_dp_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = { .reg_pairs = sl_dp_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs), }; /* USB and SGMII/QSGMII link configuration */ -static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG}, {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} }; -static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0041, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { {0x0011, XCVR_DIAG_HSCLK_SEL}, {0x0003, XCVR_DIAG_HSCLK_DIV}, {0x009B, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { +static const struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { .reg_pairs = usb_sgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs), }; -static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { .reg_pairs = usb_sgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { .reg_pairs = sgmii_usb_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs), }; /* PCIe and USB Unique SSC link configuration */ -static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { {0x0003, PHY_PLL_CFG}, {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0} }; -static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0012, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { {0x0011, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x00C9, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals pcie_usb_link_cmn_vals = { +static const struct cdns_torrent_vals pcie_usb_link_cmn_vals = { .reg_pairs = pcie_usb_link_cmn_regs, .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs), }; -static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { .reg_pairs = pcie_usb_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { .reg_pairs = usb_pcie_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs), }; /* USB 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, @@ -3907,47 +3960,47 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { .reg_pairs = usb_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs), }; /* Single USB link configuration */ -static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { +static const struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { {0x0000, PHY_PLL_CFG}, {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} }; -static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0041, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals sl_usb_link_cmn_vals = { +static const struct cdns_torrent_vals sl_usb_link_cmn_vals = { .reg_pairs = sl_usb_link_cmn_regs, .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs), }; -static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { .reg_pairs = sl_usb_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs), }; /* USB PHY PCS common configuration */ -static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { +static const struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0}, {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0}, {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1} }; -static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { +static const struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { .reg_pairs = usb_phy_pcs_cmn_regs, .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs), }; /* USB 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, @@ -3957,19 +4010,19 @@ static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} }; -static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = { .reg_pairs = sl_usb_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs), }; -static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}, {0x007F, CMN_TXPUCAL_TUNE}, {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { {0x02FF, TX_PSC_A0}, {0x06AF, TX_PSC_A1}, {0x06AE, TX_PSC_A2}, @@ -3979,7 +4032,7 @@ static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { {0x0003, XCVR_DIAG_PSC_OVRD} }; -static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { {0x0D1D, RX_PSC_A0}, {0x0D1D, RX_PSC_A1}, {0x0D00, RX_PSC_A2}, @@ -4002,23 +4055,23 @@ static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { {0x0003, RX_CDRLF_CNFG3} }; -static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { .reg_pairs = usb_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { .reg_pairs = usb_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { .reg_pairs = usb_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs), }; /* Single link USB, 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, @@ -4059,48 +4112,48 @@ static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} }; -static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { .reg_pairs = sl_usb_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs), }; /* PCIe and SGMII/QSGMII Unique SSC link configuration */ -static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { {0x0003, PHY_PLL_CFG}, {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} }; -static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0001, XCVR_DIAG_HSCLK_DIV}, {0x0012, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { {0x0011, XCVR_DIAG_HSCLK_SEL}, {0x0003, XCVR_DIAG_HSCLK_DIV}, {0x009B, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { +static const struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { .reg_pairs = pcie_sgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs), }; -static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs), }; -static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs), }; /* SGMII 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, @@ -4108,17 +4161,17 @@ static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { {0x0003, CMN_PLL1_VCOCAL_TCTRL} }; -static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = { .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs), }; -static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPUCAL_TUNE}, {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3}, @@ -4127,7 +4180,7 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { {0x0002, XCVR_DIAG_PSC_OVRD} }; -static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3}, @@ -4137,7 +4190,7 @@ static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { {0x4000, XCVR_DIAG_RXCLK_CTRL} }; -static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { {0x091D, RX_PSC_A0}, {0x0900, RX_PSC_A2}, {0x0100, RX_PSC_A3}, @@ -4155,28 +4208,28 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG}, }; -static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { .reg_pairs = sgmii_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = sgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { .reg_pairs = sgmii_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs), }; /* TI J7200, multilink SGMII */ -static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = { {0x07A2, TX_RCVDET_ST_TMR}, {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, @@ -4187,12 +4240,12 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = { {0x4000, XCVR_DIAG_RXCLK_CTRL} }; -static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = { {0x0014, RX_SDCAL0_INIT_TMR}, {0x0062, RX_SDCAL0_ITER_TMR}, {0x0014, RX_SDCAL1_INIT_TMR}, @@ -4214,13 +4267,13 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG} }; -static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = { .reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs), }; /* SGMII 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, @@ -4271,13 +4324,13 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { .reg_pairs = sgmii_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs), }; /* QSGMII 100 MHz Ref clk, no SSC */ -static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, @@ -4285,17 +4338,17 @@ static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { {0x0003, CMN_PLL1_VCOCAL_TCTRL} }; -static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = { .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs), }; -static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { {0x007F, CMN_TXPUCAL_TUNE}, {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3}, @@ -4305,7 +4358,7 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { {0x0002, XCVR_DIAG_PSC_OVRD} }; -static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3}, @@ -4316,7 +4369,7 @@ static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { {0x4000, XCVR_DIAG_RXCLK_CTRL} }; -static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { {0x091D, RX_PSC_A0}, {0x0900, RX_PSC_A2}, {0x0100, RX_PSC_A3}, @@ -4334,28 +4387,28 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG}, }; -static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { .reg_pairs = qsgmii_100_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs), }; /* TI J7200, multilink QSGMII */ -static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = { +static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = { {0x07A2, TX_RCVDET_ST_TMR}, {0x00F3, TX_PSC_A0}, {0x04A2, TX_PSC_A2}, @@ -4367,12 +4420,12 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = { {0x4000, XCVR_DIAG_RXCLK_CTRL} }; -static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = { +static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = { .reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs, .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs), }; -static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = { {0x0014, RX_SDCAL0_INIT_TMR}, {0x0062, RX_SDCAL0_ITER_TMR}, {0x0014, RX_SDCAL1_INIT_TMR}, @@ -4394,13 +4447,13 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = { {0x018C, RX_CDRLF_CNFG} }; -static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = { .reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs), }; /* QSGMII 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, @@ -4451,35 +4504,35 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { {0x007F, CMN_TXPDCAL_TUNE} }; -static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { .reg_pairs = qsgmii_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs), }; /* Single SGMII/QSGMII link configuration */ -static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { +static const struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { {0x0000, PHY_PLL_CFG}, {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0} }; -static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { +static const struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { {0x0000, XCVR_DIAG_HSCLK_SEL}, {0x0003, XCVR_DIAG_HSCLK_DIV}, {0x0013, XCVR_DIAG_PLLDRC_CTRL} }; -static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { +static const struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { .reg_pairs = sl_sgmii_link_cmn_regs, .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs), }; -static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { +static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { .reg_pairs = sl_sgmii_xcvr_diag_ln_regs, .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs), }; /* Multi link PCIe, 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, @@ -4528,13 +4581,13 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} }; -static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { .reg_pairs = pcie_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs), }; /* Single link PCIe, 100 MHz Ref clk, internal SSC */ -static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, {0x0004, CMN_PLL1_DSM_DIAG_M0}, @@ -4583,35 +4636,35 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} }; -static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { +static const struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { .reg_pairs = sl_pcie_100_int_ssc_cmn_regs, .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs), }; /* PCIe, 100 MHz Ref clk, no SSC & external SSC */ -static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { +static const struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0} }; -static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { +static const struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { {0x0019, RX_REE_TAP1_CLIP}, {0x0019, RX_REE_TAP2TON_CLIP}, {0x0001, RX_DIAG_ACYA} }; -static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { +static const struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { .reg_pairs = pcie_100_ext_no_ssc_cmn_regs, .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs), }; -static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { +static const struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs, .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs), }; -static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { +static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals}, @@ -4647,7 +4700,7 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals}, }; -static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { +static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals}, @@ -4683,7 +4736,7 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals}, }; -static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { +static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals}, @@ -4691,7 +4744,7 @@ static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals}, }; -static struct cdns_torrent_vals_entry cmn_vals_entries[] = { +static const struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals}, @@ -4773,7 +4826,7 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals}, }; -static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { +static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, @@ -4855,7 +4908,7 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals}, }; -static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { +static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals}, @@ -4966,14 +5019,14 @@ static const struct cdns_torrent_data cdns_map_torrent = { }, }; -static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = { +static const struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals}, }; -static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { +static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, @@ -5089,7 +5142,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = { }; /* TI J7200 (Torrent SD0805) */ -static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { +static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals}, @@ -5171,7 +5224,7 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals}, }; -static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { +static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, @@ -5253,7 +5306,7 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals}, }; -static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { +static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals}, diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c index c138cd4807d6f9..c843923252aace 100644 --- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c +++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c @@ -138,7 +138,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct hisi_inno_phy_priv *priv; struct phy_provider *provider; - struct device_node *child; int i = 0; int ret; @@ -162,24 +161,20 @@ static int hisi_inno_phy_probe(struct platform_device *pdev) priv->type = (uintptr_t) of_device_get_match_data(dev); - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { struct reset_control *rst; struct phy *phy; rst = of_reset_control_get_exclusive(child, NULL); - if (IS_ERR(rst)) { - of_node_put(child); + if (IS_ERR(rst)) return PTR_ERR(rst); - } priv->ports[i].utmi_rst = rst; priv->ports[i].priv = priv; phy = devm_phy_create(dev, child, &hisi_inno_phy_ops); - if (IS_ERR(phy)) { - of_node_put(child); + if (IS_ERR(phy)) return PTR_ERR(phy); - } phy_set_bus_width(phy, 8); phy_set_drvdata(phy, &priv->ports[i]); @@ -187,7 +182,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev) if (i >= INNO_PHY_PORT_NUM) { dev_warn(dev, "Support %d ports in maximum\n", i); - of_node_put(child); break; } } diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index da5e8f4057490c..fefc02d921e699 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -244,8 +244,8 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = { GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX), - ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI), + ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), + ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI), ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI), /* lane 5 */ ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI), diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 25b86bbb9cec04..3f7095ec5978e3 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -1577,12 +1577,11 @@ static int mtk_tphy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct device_node *child_np; struct phy_provider *provider; struct resource *sif_res; struct mtk_tphy *tphy; struct resource res; - int port, retval; + int port; tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL); if (!tphy) @@ -1623,25 +1622,23 @@ static int mtk_tphy_probe(struct platform_device *pdev) } port = 0; - for_each_child_of_node(np, child_np) { + for_each_child_of_node_scoped(np, child_np) { struct mtk_phy_instance *instance; struct clk_bulk_data *clks; struct device *subdev; struct phy *phy; + int retval; instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); - if (!instance) { - retval = -ENOMEM; - goto put_child; - } + if (!instance) + return -ENOMEM; tphy->phys[port] = instance; phy = devm_phy_create(dev, child_np, &mtk_tphy_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create phy\n"); - retval = PTR_ERR(phy); - goto put_child; + return PTR_ERR(phy); } subdev = &phy->dev; @@ -1649,14 +1646,12 @@ static int mtk_tphy_probe(struct platform_device *pdev) if (retval) { dev_err(subdev, "failed to get address resource(id-%d)\n", port); - goto put_child; + return retval; } instance->port_base = devm_ioremap_resource(subdev, &res); - if (IS_ERR(instance->port_base)) { - retval = PTR_ERR(instance->port_base); - goto put_child; - } + if (IS_ERR(instance->port_base)) + return PTR_ERR(instance->port_base); instance->phy = phy; instance->index = port; @@ -1668,19 +1663,16 @@ static int mtk_tphy_probe(struct platform_device *pdev) clks[1].id = "da_ref"; /* analog clock */ retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks); if (retval) - goto put_child; + return retval; retval = phy_type_syscon_get(instance, child_np); if (retval) - goto put_child; + return retval; } provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); return PTR_ERR_OR_ZERO(provider); -put_child: - of_node_put(child_np); - return retval; } static struct platform_driver mtk_tphy_driver = { diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c index 064fd09417275e..7c248f5cfca5dc 100644 --- a/drivers/phy/mediatek/phy-mtk-xsphy.c +++ b/drivers/phy/mediatek/phy-mtk-xsphy.c @@ -432,12 +432,11 @@ static int mtk_xsphy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct device_node *child_np; struct phy_provider *provider; struct resource *glb_res; struct mtk_xsphy *xsphy; struct resource res; - int port, retval; + int port; xsphy = devm_kzalloc(dev, sizeof(*xsphy), GFP_KERNEL); if (!xsphy) @@ -471,37 +470,34 @@ static int mtk_xsphy_probe(struct platform_device *pdev) device_property_read_u32(dev, "mediatek,src-coef", &xsphy->src_coef); port = 0; - for_each_child_of_node(np, child_np) { + for_each_child_of_node_scoped(np, child_np) { struct xsphy_instance *inst; struct phy *phy; + int retval; inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); - if (!inst) { - retval = -ENOMEM; - goto put_child; - } + if (!inst) + return -ENOMEM; xsphy->phys[port] = inst; phy = devm_phy_create(dev, child_np, &mtk_xsphy_ops); if (IS_ERR(phy)) { dev_err(dev, "failed to create phy\n"); - retval = PTR_ERR(phy); - goto put_child; + return PTR_ERR(phy); } retval = of_address_to_resource(child_np, 0, &res); if (retval) { dev_err(dev, "failed to get address resource(id-%d)\n", port); - goto put_child; + return retval; } inst->port_base = devm_ioremap_resource(&phy->dev, &res); if (IS_ERR(inst->port_base)) { dev_err(dev, "failed to remap phy regs\n"); - retval = PTR_ERR(inst->port_base); - goto put_child; + return PTR_ERR(inst->port_base); } inst->phy = phy; @@ -512,17 +508,12 @@ static int mtk_xsphy_probe(struct platform_device *pdev) inst->ref_clk = devm_clk_get(&phy->dev, "ref"); if (IS_ERR(inst->ref_clk)) { dev_err(dev, "failed to get ref_clk(id-%d)\n", port); - retval = PTR_ERR(inst->ref_clk); - goto put_child; + return PTR_ERR(inst->ref_clk); } } provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); return PTR_ERR_OR_ZERO(provider); - -put_child: - of_node_put(child_np); - return retval; } static struct platform_driver mtk_xsphy_driver = { diff --git a/drivers/phy/nuvoton/Kconfig b/drivers/phy/nuvoton/Kconfig new file mode 100644 index 00000000000000..d02cae2db315a8 --- /dev/null +++ b/drivers/phy/nuvoton/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# PHY drivers for Nuvoton MA35 platforms +# +config PHY_MA35_USB + tristate "Nuvoton MA35 USB2.0 PHY driver" + depends on ARCH_MA35 || COMPILE_TEST + depends on OF + select GENERIC_PHY + help + Enable this to support the USB2.0 PHY on the Nuvoton MA35 + series SoCs. diff --git a/drivers/phy/nuvoton/Makefile b/drivers/phy/nuvoton/Makefile new file mode 100644 index 00000000000000..2937e392189816 --- /dev/null +++ b/drivers/phy/nuvoton/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_PHY_MA35_USB) += phy-ma35d1-usb2.o diff --git a/drivers/phy/nuvoton/phy-ma35d1-usb2.c b/drivers/phy/nuvoton/phy-ma35d1-usb2.c new file mode 100644 index 00000000000000..9a459b700ed48c --- /dev/null +++ b/drivers/phy/nuvoton/phy-ma35d1-usb2.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Nuvoton Technology Corp. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* USB PHY Miscellaneous Control Register */ +#define MA35_SYS_REG_USBPMISCR 0x60 +#define PHY0POR BIT(0) /* PHY Power-On Reset Control Bit */ +#define PHY0SUSPEND BIT(1) /* PHY Suspend; 0: suspend, 1: operaion */ +#define PHY0COMN BIT(2) /* PHY Common Block Power-Down Control */ +#define PHY0DEVCKSTB BIT(10) /* PHY 60 MHz UTMI clock stable bit */ + +struct ma35_usb_phy { + struct clk *clk; + struct device *dev; + struct regmap *sysreg; +}; + +static int ma35_usb_phy_power_on(struct phy *phy) +{ + struct ma35_usb_phy *p_phy = phy_get_drvdata(phy); + unsigned int val; + int ret; + + ret = clk_prepare_enable(p_phy->clk); + if (ret < 0) { + dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret); + return ret; + } + + regmap_read(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, &val); + if (val & PHY0SUSPEND) { + /* + * USB PHY0 is in operation mode already + * make sure USB PHY 60 MHz UTMI Interface Clock ready + */ + ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val, + val & PHY0DEVCKSTB, 10, 1000); + if (ret == 0) + return 0; + } + + /* + * reset USB PHY0. + * wait until USB PHY0 60 MHz UTMI Interface Clock ready + */ + regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, (PHY0POR | PHY0SUSPEND)); + udelay(20); + + /* make USB PHY0 enter operation mode */ + regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, PHY0SUSPEND); + + /* make sure USB PHY 60 MHz UTMI Interface Clock ready */ + ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val, + val & PHY0DEVCKSTB, 10, 1000); + if (ret == -ETIMEDOUT) { + dev_err(p_phy->dev, "Check PHY clock, Timeout: %d\n", ret); + clk_disable_unprepare(p_phy->clk); + return ret; + } + + return 0; +} + +static int ma35_usb_phy_power_off(struct phy *phy) +{ + struct ma35_usb_phy *p_phy = phy_get_drvdata(phy); + + clk_disable_unprepare(p_phy->clk); + return 0; +} + +static const struct phy_ops ma35_usb_phy_ops = { + .power_on = ma35_usb_phy_power_on, + .power_off = ma35_usb_phy_power_off, + .owner = THIS_MODULE, +}; + +static int ma35_usb_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct ma35_usb_phy *p_phy; + struct phy *phy; + + p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL); + if (!p_phy) + return -ENOMEM; + + p_phy->dev = &pdev->dev; + platform_set_drvdata(pdev, p_phy); + + p_phy->sysreg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys"); + if (IS_ERR(p_phy->sysreg)) + return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->sysreg), + "Failed to get SYS registers\n"); + + p_phy->clk = of_clk_get(pdev->dev.of_node, 0); + if (IS_ERR(p_phy->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->clk), + "failed to find usb_phy clock\n"); + + phy = devm_phy_create(&pdev->dev, NULL, &ma35_usb_phy_ops); + if (IS_ERR(phy)) + return dev_err_probe(&pdev->dev, PTR_ERR(phy), "Failed to create PHY\n"); + + phy_set_drvdata(phy, p_phy); + + provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return dev_err_probe(&pdev->dev, PTR_ERR(provider), + "Failed to register PHY provider\n"); + return 0; +} + +static const struct of_device_id ma35_usb_phy_of_match[] = { + { .compatible = "nuvoton,ma35d1-usb2-phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ma35_usb_phy_of_match); + +static struct platform_driver ma35_usb_phy_driver = { + .probe = ma35_usb_phy_probe, + .driver = { + .name = "ma35d1-usb2-phy", + .of_match_table = ma35_usb_phy_of_match, + }, +}; +module_platform_driver(ma35_usb_phy_driver); + +MODULE_DESCRIPTION("Nuvoton ma35d1 USB2.0 PHY driver"); +MODULE_AUTHOR("Hui-Ping Chen "); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c index bd3edaa986c8bd..1e410eb410580c 100644 --- a/drivers/phy/phy-airoha-pcie.c +++ b/drivers/phy/phy-airoha-pcie.c @@ -18,6 +18,9 @@ #define LEQ_LEN_CTRL_MAX_VAL 7 #define FREQ_LOCK_MAX_ATTEMPT 10 +/* PCIe-PHY initialization time in ms needed by the hw to complete */ +#define PHY_HW_INIT_TIME_MS 30 + enum airoha_pcie_port_gen { PCIE_PORT_GEN1 = 1, PCIE_PORT_GEN2, @@ -1181,7 +1184,8 @@ static int airoha_pcie_phy_init(struct phy *phy) airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, PCIE_DA_XPON_CDR_PR_PWDB); - usleep_range(100, 200); + /* Wait for the PCIe PHY to complete initialization before returning */ + msleep(PHY_HW_INIT_TIME_MS); return 0; } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 7b00945f7191d7..a8adc3214bfe70 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -2190,24 +2190,25 @@ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp) void __iomem *serdes = qmp->dp_serdes; const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; - qmp_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num); + qmp_configure(qmp->dev, serdes, cfg->dp_serdes_tbl, + cfg->dp_serdes_tbl_num); switch (dp_opts->link_rate) { case 1620: - qmp_configure(serdes, cfg->serdes_tbl_rbr, - cfg->serdes_tbl_rbr_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_rbr, + cfg->serdes_tbl_rbr_num); break; case 2700: - qmp_configure(serdes, cfg->serdes_tbl_hbr, - cfg->serdes_tbl_hbr_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr, + cfg->serdes_tbl_hbr_num); break; case 5400: - qmp_configure(serdes, cfg->serdes_tbl_hbr2, - cfg->serdes_tbl_hbr2_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr2, + cfg->serdes_tbl_hbr2_num); break; case 8100: - qmp_configure(serdes, cfg->serdes_tbl_hbr3, - cfg->serdes_tbl_hbr3_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr3, + cfg->serdes_tbl_hbr3_num); break; default: /* Other link rates aren't supported */ @@ -2807,8 +2808,8 @@ static int qmp_combo_dp_power_on(struct phy *phy) qmp_combo_dp_serdes_init(qmp); - qmp_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1); - qmp_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2); + qmp_configure_lane(qmp->dev, tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1); + qmp_configure_lane(qmp->dev, tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2); /* Configure special DP tx tunings */ cfg->configure_dp_tx(qmp); @@ -2850,7 +2851,7 @@ static int qmp_combo_usb_power_on(struct phy *phy) unsigned int val; int ret; - qmp_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { @@ -2859,16 +2860,17 @@ static int qmp_combo_usb_power_on(struct phy *phy) } /* Tx, Rx, and PCS configurations */ - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); - qmp_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_configure_lane(qmp->dev, tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); - qmp_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_configure_lane(qmp->dev, rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); if (pcs_usb) - qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); + qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, + cfg->pcs_usb_tbl_num); if (cfg->has_pwrdn_delay) usleep_range(10, 20); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-common.h b/drivers/phy/qualcomm/phy-qcom-qmp-common.h index 7993842105093f..b945fc14cecec4 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-common.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-common.h @@ -9,6 +9,7 @@ struct qmp_phy_init_tbl { unsigned int offset; unsigned int val; + char *name; /* * mask of lanes for which this register is written * for cases when second lane needs different values @@ -20,6 +21,7 @@ struct qmp_phy_init_tbl { { \ .offset = o, \ .val = v, \ + .name = #o, \ .lane_mask = 0xff, \ } @@ -27,13 +29,13 @@ struct qmp_phy_init_tbl { { \ .offset = o, \ .val = v, \ + .name = #o, \ .lane_mask = l, \ } -static inline void qmp_configure_lane(void __iomem *base, - const struct qmp_phy_init_tbl tbl[], - int num, - u8 lane_mask) +static inline void qmp_configure_lane(struct device *dev, void __iomem *base, + const struct qmp_phy_init_tbl tbl[], + int num, u8 lane_mask) { int i; const struct qmp_phy_init_tbl *t = tbl; @@ -45,15 +47,16 @@ static inline void qmp_configure_lane(void __iomem *base, if (!(t->lane_mask & lane_mask)) continue; + dev_dbg(dev, "Writing Reg: %s Offset: 0x%04x Val: 0x%02x\n", + t->name, t->offset, t->val); writel(t->val, base + t->offset); } } -static inline void qmp_configure(void __iomem *base, - const struct qmp_phy_init_tbl tbl[], - int num) +static inline void qmp_configure(struct device *dev, void __iomem *base, + const struct qmp_phy_init_tbl tbl[], int num) { - qmp_configure_lane(base, tbl, num, 0xff); + qmp_configure_lane(dev, base, tbl, num, 0xff); } #endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c index 0442b31205638c..a7c65cfe31dfb8 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -288,7 +288,7 @@ static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy) unsigned int val; int ret; - qmp_configure(serdes, serdes_tbl, serdes_tbl_num); + qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num); qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], @@ -431,9 +431,9 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy) } /* Tx, Rx, and PCS configurations */ - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); /* * Pull out PHY from POWER DOWN state. @@ -725,7 +725,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) { struct qcom_qmp *qmp; struct device *dev = &pdev->dev; - struct device_node *child; struct phy_provider *phy_provider; void __iomem *serdes; const struct qmp_phy_cfg *cfg = NULL; @@ -773,13 +772,13 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) return -ENOMEM; id = 0; - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { /* Create per-lane phy */ ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg); if (ret) { dev_err(dev, "failed to create lane%d phy, %d\n", id, ret); - goto err_node_put; + return ret; } /* @@ -790,7 +789,7 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) if (ret) { dev_err(qmp->dev, "failed to register pipe clock source\n"); - goto err_node_put; + return ret; } id++; @@ -799,10 +798,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy_provider); - -err_node_put: - of_node_put(child); - return ret; } static struct platform_driver qmp_pcie_msm8996_driver = { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 06cd9787e70029..f71787fb4d7e91 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -1242,6 +1242,10 @@ static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f), }; +static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c), +}; + static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01), QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88), @@ -3654,6 +3658,41 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = { .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl, .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl), }, + + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = sm8550_qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), + .regs = pciephy_v6_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS_4_20, + .has_nocsr_reset = true, +}; + +static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = { + .lanes = 4, + + .offsets = &qmp_pcie_offsets_v6_20, + + .tbls = { + .serdes = x1e80100_qmp_gen4x2_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_serdes_tbl), + .tx = x1e80100_qmp_gen4x2_pcie_tx_tbl, + .tx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_tx_tbl), + .rx = x1e80100_qmp_gen4x2_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_rx_tbl), + .pcs = x1e80100_qmp_gen4x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_tbl), + .pcs_misc = x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl), + .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl, + .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl), + }, + + .serdes_4ln_tbl = x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl, + .serdes_4ln_num = ARRAY_SIZE(x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl), + .reset_list = sdm845_pciephy_reset_l, .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), .vreg_list = sm8550_qmp_phy_vreg_l, @@ -3669,18 +3708,30 @@ static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_ { const struct qmp_phy_cfg *cfg = qmp->cfg; const struct qmp_pcie_offsets *offs = cfg->offsets; - void __iomem *tx3, *rx3, *tx4, *rx4; + void __iomem *serdes, *tx3, *rx3, *tx4, *rx4, *pcs, *pcs_misc, *ln_shrd; + serdes = qmp->port_b + offs->serdes; tx3 = qmp->port_b + offs->tx; rx3 = qmp->port_b + offs->rx; tx4 = qmp->port_b + offs->tx2; rx4 = qmp->port_b + offs->rx2; + pcs = qmp->port_b + offs->pcs; + pcs_misc = qmp->port_b + offs->pcs_misc; + ln_shrd = qmp->port_b + offs->ln_shrd; - qmp_configure_lane(tx3, tbls->tx, tbls->tx_num, 1); - qmp_configure_lane(rx3, tbls->rx, tbls->rx_num, 1); + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num); - qmp_configure_lane(tx4, tbls->tx, tbls->tx_num, 2); - qmp_configure_lane(rx4, tbls->rx, tbls->rx_num, 2); + qmp_configure_lane(qmp->dev, tx3, tbls->tx, tbls->tx_num, 1); + qmp_configure_lane(qmp->dev, rx3, tbls->rx, tbls->rx_num, 1); + + qmp_configure_lane(qmp->dev, tx4, tbls->tx, tbls->tx_num, 2); + qmp_configure_lane(qmp->dev, rx4, tbls->rx, tbls->rx_num, 2); + + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); + qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); + + qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); } static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) @@ -3698,25 +3749,26 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c if (!tbls) return; - qmp_configure(serdes, tbls->serdes, tbls->serdes_num); + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); - qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1); - qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1); + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); + qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); if (cfg->lanes >= 2) { - qmp_configure_lane(tx2, tbls->tx, tbls->tx_num, 2); - qmp_configure_lane(rx2, tbls->rx, tbls->rx_num, 2); + qmp_configure_lane(qmp->dev, tx2, tbls->tx, tbls->tx_num, 2); + qmp_configure_lane(qmp->dev, rx2, tbls->rx, tbls->rx_num, 2); } - qmp_configure(pcs, tbls->pcs, tbls->pcs_num); - qmp_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); + qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) { - qmp_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num); + qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, + cfg->serdes_4ln_num); qmp_pcie_init_port_b(qmp, tbls); } - qmp_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); + qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); } static int qmp_pcie_init(struct phy *phy) @@ -4423,6 +4475,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = { }, { .compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy", .data = &x1e80100_qmp_gen4x2_pciephy_cfg, + }, { + .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy", + .data = &x1e80100_qmp_gen4x4_pciephy_cfg, }, { }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c index a57e8a4657f4e4..d964bdfe870029 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -1527,7 +1527,7 @@ static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tb { void __iomem *serdes = qmp->serdes; - qmp_configure(serdes, tbls->serdes, tbls->serdes_num); + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); } static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls) @@ -1536,12 +1536,12 @@ static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbl void __iomem *tx = qmp->tx; void __iomem *rx = qmp->rx; - qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1); - qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1); + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); + qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); if (cfg->lanes >= 2) { - qmp_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2); - qmp_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2); + qmp_configure_lane(qmp->dev, qmp->tx2, tbls->tx, tbls->tx_num, 2); + qmp_configure_lane(qmp->dev, qmp->rx2, tbls->rx, tbls->rx_num, 2); } } @@ -1549,7 +1549,7 @@ static void qmp_ufs_pcs_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls { void __iomem *pcs = qmp->pcs; - qmp_configure(pcs, tbls->pcs, tbls->pcs_num); + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); } static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 9b0eb87b16804a..2fd49355aa3764 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -1649,7 +1649,7 @@ static int qmp_usb_serdes_init(struct qmp_usb *qmp) const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; int serdes_tbl_num = cfg->serdes_tbl_num; - qmp_configure(serdes, serdes_tbl, serdes_tbl_num); + qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num); return 0; } @@ -1730,13 +1730,13 @@ static int qmp_usb_power_on(struct phy *phy) } /* Tx, Rx, and PCS configurations */ - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); if (pcs_usb) - qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); + qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); if (cfg->has_pwrdn_delay) usleep_range(10, 20); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c index 5cbc5fd529ebe1..d4fa1063ea61dc 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c @@ -526,7 +526,8 @@ static int qmp_usbc_power_on(struct phy *phy) unsigned int val; int ret; - qmp_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); + qmp_configure(qmp->dev, qmp->serdes, cfg->serdes_tbl, + cfg->serdes_tbl_num); ret = clk_prepare_enable(qmp->pipe_clk); if (ret) { @@ -535,13 +536,13 @@ static int qmp_usbc_power_on(struct phy *phy) } /* Tx, Rx, and PCS configurations */ - qmp_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); - qmp_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); + qmp_configure_lane(qmp->dev, qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); + qmp_configure_lane(qmp->dev, qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); - qmp_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); - qmp_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); + qmp_configure_lane(qmp->dev, qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); + qmp_configure_lane(qmp->dev, qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); - qmp_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); + qmp_configure(qmp->dev, qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); /* Pull PHY out of reset state */ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 7594f64eb7373e..58e1233051526a 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -19,12 +19,14 @@ #include #include #include +#include #include #include #include /******* USB2.0 Host registers (original offset is +0x200) *******/ #define USB2_INT_ENABLE 0x000 +#define USB2_AHB_BUS_CTR 0x008 #define USB2_USBCTR 0x00c #define USB2_SPD_RSM_TIMSET 0x10c #define USB2_OC_TIMSET 0x110 @@ -40,6 +42,10 @@ #define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */ #define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */ +/* AHB_BUS_CTR */ +#define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0) +#define USB2_AHB_BUS_CTR_MBL_INCR4 2 + /* USBCTR */ #define USB2_USBCTR_DIRPD BIT(2) #define USB2_USBCTR_PLL_RST BIT(1) @@ -111,6 +117,7 @@ struct rcar_gen3_chan { struct extcon_dev *extcon; struct rcar_gen3_phy rphys[NUM_OF_PHYS]; struct regulator *vbus; + struct reset_control *rstc; struct work_struct work; struct mutex lock; /* protects rphys[...].powered */ enum usb_dr_mode dr_mode; @@ -125,6 +132,7 @@ struct rcar_gen3_chan { struct rcar_gen3_phy_drv_data { const struct phy_ops *phy_usb2_ops; bool no_adp_ctrl; + bool init_bus; }; /* @@ -575,6 +583,12 @@ static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = { .no_adp_ctrl = true, }; +static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = { + .phy_usb2_ops = &rcar_gen3_phy_usb2_ops, + .no_adp_ctrl = true, + .init_bus = true, +}; + static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { { .compatible = "renesas,usb2-phy-r8a77470", @@ -596,6 +610,10 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { .compatible = "renesas,rzg2l-usb2-phy", .data = &rz_g2l_phy_usb2_data, }, + { + .compatible = "renesas,usb2-phy-r9a08g045", + .data = &rz_g3s_phy_usb2_data, + }, { .compatible = "renesas,rcar-gen3-usb2-phy", .data = &rcar_gen3_phy_usb2_data, @@ -650,6 +668,35 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np) return candidate; } +static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel) +{ + struct device *dev = channel->dev; + int ret; + u32 val; + + channel->rstc = devm_reset_control_array_get_shared(dev); + if (IS_ERR(channel->rstc)) + return PTR_ERR(channel->rstc); + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + ret = reset_control_deassert(channel->rstc); + if (ret) + goto rpm_put; + + val = readl(channel->base + USB2_AHB_BUS_CTR); + val &= ~USB2_AHB_BUS_CTR_MBL_MASK; + val |= USB2_AHB_BUS_CTR_MBL_INCR4; + writel(val, channel->base + USB2_AHB_BUS_CTR); + +rpm_put: + pm_runtime_put(dev); + + return ret; +} + static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) { const struct rcar_gen3_phy_drv_data *phy_data; @@ -703,6 +750,15 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) goto error; } + platform_set_drvdata(pdev, channel); + channel->dev = dev; + + if (phy_data->init_bus) { + ret = rcar_gen3_phy_usb2_init_bus(channel); + if (ret) + goto error; + } + channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl; if (phy_data->no_adp_ctrl) channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; @@ -733,9 +789,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; } - platform_set_drvdata(pdev, channel); - channel->dev = dev; - provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); if (IS_ERR(provider)) { dev_err(dev, "Failed to register PHY provider\n"); @@ -762,6 +815,7 @@ static void rcar_gen3_phy_usb2_remove(struct platform_device *pdev) if (channel->is_otg_channel) device_remove_file(&pdev->dev, &dev_attr_role); + reset_control_assert(channel->rstc); pm_runtime_disable(&pdev->dev); }; diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 946c01210ac8c0..9f084697dd05ce 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -15,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -190,6 +192,8 @@ #define LN3_TX_SER_RATE_SEL_HBR2 BIT(3) #define LN3_TX_SER_RATE_SEL_HBR3 BIT(2) +#define HDMI20_MAX_RATE 600000000 + struct lcpll_config { u32 bit_rate; u8 lcvco_mode_en; @@ -272,6 +276,12 @@ struct rk_hdptx_phy { struct clk_bulk_data *clks; int nr_clks; struct reset_control_bulk_data rsts[RST_MAX]; + + /* clk provider */ + struct clk_hw hw; + unsigned long rate; + + atomic_t usage_count; }; static const struct ropll_config ropll_tmds_cfg[] = { @@ -759,6 +769,8 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, struct ropll_config rc = {0}; int i; + hdptx->rate = rate * 100; + for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++) if (rate == ropll_tmds_cfg[i].bit_rate) { cfg = &ropll_tmds_cfg[i]; @@ -822,19 +834,6 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx, static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx, unsigned int rate) { - u32 val; - int ret; - - ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val); - if (ret) - return ret; - - if (!(val & HDPTX_O_PLL_LOCK_DONE)) { - ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate); - if (ret) - return ret; - } - rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq); regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06); @@ -856,10 +855,68 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx, return rk_hdptx_post_enable_lane(hdptx); } +static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx, + unsigned int rate) +{ + u32 status; + int ret; + + if (atomic_inc_return(&hdptx->usage_count) > 1) + return 0; + + ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status); + if (ret) + goto dec_usage; + + if (status & HDPTX_O_PLL_LOCK_DONE) + dev_warn(hdptx->dev, "PLL locked by unknown consumer!\n"); + + if (rate) { + ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate); + if (ret) + goto dec_usage; + } + + return 0; + +dec_usage: + atomic_dec(&hdptx->usage_count); + return ret; +} + +static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force) +{ + u32 status; + int ret; + + ret = atomic_dec_return(&hdptx->usage_count); + if (ret > 0) + return 0; + + if (ret < 0) { + dev_warn(hdptx->dev, "Usage count underflow!\n"); + ret = -EINVAL; + } else { + ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status); + if (!ret) { + if (status & HDPTX_O_PLL_LOCK_DONE) + rk_hdptx_phy_disable(hdptx); + return 0; + } else if (force) { + return 0; + } + } + + atomic_inc(&hdptx->usage_count); + return ret; +} + static int rk_hdptx_phy_power_on(struct phy *phy) { struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy); - int ret, bus_width = phy_get_bus_width(hdptx->phy); + int bus_width = phy_get_bus_width(hdptx->phy); + int ret; + /* * FIXME: Temporary workaround to pass pixel_clk_rate * from the HDMI bridge driver until phy_configure_opts_hdmi @@ -870,15 +927,13 @@ static int rk_hdptx_phy_power_on(struct phy *phy) dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n", __func__, bus_width, rate); - ret = pm_runtime_resume_and_get(hdptx->dev); - if (ret) { - dev_err(hdptx->dev, "Failed to resume phy: %d\n", ret); + ret = rk_hdptx_phy_consumer_get(hdptx, rate); + if (ret) return ret; - } ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate); if (ret) - pm_runtime_put(hdptx->dev); + rk_hdptx_phy_consumer_put(hdptx, true); return ret; } @@ -886,16 +941,8 @@ static int rk_hdptx_phy_power_on(struct phy *phy) static int rk_hdptx_phy_power_off(struct phy *phy) { struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy); - u32 val; - int ret; - ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val); - if (ret == 0 && (val & HDPTX_O_PLL_LOCK_DONE)) - rk_hdptx_phy_disable(hdptx); - - pm_runtime_put(hdptx->dev); - - return ret; + return rk_hdptx_phy_consumer_put(hdptx, false); } static const struct phy_ops rk_hdptx_phy_ops = { @@ -904,6 +951,99 @@ static const struct phy_ops rk_hdptx_phy_ops = { .owner = THIS_MODULE, }; +static struct rk_hdptx_phy *to_rk_hdptx_phy(struct clk_hw *hw) +{ + return container_of(hw, struct rk_hdptx_phy, hw); +} + +static int rk_hdptx_phy_clk_prepare(struct clk_hw *hw) +{ + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); + + return rk_hdptx_phy_consumer_get(hdptx, hdptx->rate / 100); +} + +static void rk_hdptx_phy_clk_unprepare(struct clk_hw *hw) +{ + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); + + rk_hdptx_phy_consumer_put(hdptx, true); +} + +static unsigned long rk_hdptx_phy_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); + + return hdptx->rate; +} + +static long rk_hdptx_phy_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + u32 bit_rate = rate / 100; + int i; + + if (rate > HDMI20_MAX_RATE) + return rate; + + for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++) + if (bit_rate == ropll_tmds_cfg[i].bit_rate) + break; + + if (i == ARRAY_SIZE(ropll_tmds_cfg) && + !rk_hdptx_phy_clk_pll_calc(bit_rate, NULL)) + return -EINVAL; + + return rate; +} + +static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); + + return rk_hdptx_ropll_tmds_cmn_config(hdptx, rate / 100); +} + +static const struct clk_ops hdptx_phy_clk_ops = { + .prepare = rk_hdptx_phy_clk_prepare, + .unprepare = rk_hdptx_phy_clk_unprepare, + .recalc_rate = rk_hdptx_phy_clk_recalc_rate, + .round_rate = rk_hdptx_phy_clk_round_rate, + .set_rate = rk_hdptx_phy_clk_set_rate, +}; + +static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx) +{ + struct device *dev = hdptx->dev; + const char *name, *pname; + struct clk *refclk; + int ret, id; + + refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(refclk)) + return dev_err_probe(dev, PTR_ERR(refclk), + "Failed to get ref clock\n"); + + id = of_alias_get_id(dev->of_node, "hdptxphy"); + name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; + pname = __clk_get_name(refclk); + + hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops, + CLK_GET_RATE_NOCACHE); + + ret = devm_clk_hw_register(dev, &hdptx->hw); + if (ret) + return dev_err_probe(dev, ret, "Failed to register clock\n"); + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &hdptx->hw); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clk provider\n"); + return 0; +} + static int rk_hdptx_phy_runtime_suspend(struct device *dev) { struct rk_hdptx_phy *hdptx = dev_get_drvdata(dev); @@ -976,6 +1116,10 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(hdptx->grf), "Could not get GRF syscon\n"); + ret = devm_pm_runtime_enable(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); + hdptx->phy = devm_phy_create(dev, NULL, &rk_hdptx_phy_ops); if (IS_ERR(hdptx->phy)) return dev_err_probe(dev, PTR_ERR(hdptx->phy), @@ -985,10 +1129,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) phy_set_drvdata(hdptx->phy, hdptx); phy_set_bus_width(hdptx->phy, 8); - ret = devm_pm_runtime_enable(dev); - if (ret) - return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) return dev_err_probe(dev, PTR_ERR(phy_provider), @@ -998,7 +1138,7 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) reset_control_deassert(hdptx->rsts[RST_CMN].rstc); reset_control_deassert(hdptx->rsts[RST_INIT].rstc); - return 0; + return rk_hdptx_phy_clk_register(hdptx); } static const struct dev_pm_ops rk_hdptx_phy_pm_ops = { diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index 9cbf9014295036..c421b495eb0fe4 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -607,7 +607,7 @@ exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd) reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); reg &= ~SECPMACTL_PMA_REF_FREQ_SEL; - reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1); + reg |= FIELD_PREP(SECPMACTL_PMA_REF_FREQ_SEL, 1); /* SFR reset */ reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST); reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL | @@ -1123,19 +1123,19 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) reg &= ~SSPPLLCTL_FSEL; switch (phy_drd->extrefclk) { case EXYNOS5_FSEL_50MHZ: - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7); + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 7); break; case EXYNOS5_FSEL_26MHZ: - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6); + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 6); break; case EXYNOS5_FSEL_24MHZ: - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2); + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 2); break; case EXYNOS5_FSEL_20MHZ: - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1); + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 1); break; case EXYNOS5_FSEL_19MHZ2: - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0); + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 0); break; default: dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n", diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 673449607c02cc..3bf3aff4b1c746 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -644,7 +645,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy, struct device_node *node = am654_phy->of_node; struct device *dev = am654_phy->dev; struct serdes_am654_clk_mux *mux; - struct device_node *regmap_node; const char **parent_names; struct clk_init_data *init; unsigned int num_parents; @@ -652,7 +652,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy, const __be32 *addr; unsigned int reg; struct clk *clk; - int ret = 0; mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); if (!mux) @@ -660,41 +659,30 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy, init = &mux->clk_data; - regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0); - if (!regmap_node) { - dev_err(dev, "Fail to get serdes-clk node\n"); - ret = -ENODEV; - goto out_put_node; - } + struct device_node *regmap_node __free(device_node) = + of_parse_phandle(node, "ti,serdes-clk", 0); + if (!regmap_node) + return dev_err_probe(dev, -ENODEV, "Fail to get serdes-clk node\n"); regmap = syscon_node_to_regmap(regmap_node->parent); - if (IS_ERR(regmap)) { - dev_err(dev, "Fail to get Syscon regmap\n"); - ret = PTR_ERR(regmap); - goto out_put_node; - } + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Fail to get Syscon regmap\n"); num_parents = of_clk_get_parent_count(node); - if (num_parents < 2) { - dev_err(dev, "SERDES clock must have parents\n"); - ret = -EINVAL; - goto out_put_node; - } + if (num_parents < 2) + return dev_err_probe(dev, -EINVAL, "SERDES clock must have parents\n"); parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL); - if (!parent_names) { - ret = -ENOMEM; - goto out_put_node; - } + if (!parent_names) + return -ENOMEM; of_clk_parent_fill(node, parent_names, num_parents); addr = of_get_address(regmap_node, 0, NULL, NULL); - if (!addr) { - ret = -EINVAL; - goto out_put_node; - } + if (!addr) + return -EINVAL; reg = be32_to_cpu(*addr); @@ -710,16 +698,12 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy, mux->hw.init = init; clk = devm_clk_register(dev, &mux->hw); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto out_put_node; - } + if (IS_ERR(clk)) + return PTR_ERR(clk); am654_phy->clks[clock_num] = clk; -out_put_node: - of_node_put(regmap_node); - return ret; + return 0; } static const struct of_device_id serdes_am654_id_table[] = { diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index b30bf740e2e0df..103b266fec7717 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -468,11 +468,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->regmap = syscon_node_to_regmap(node->parent); if (IS_ERR(priv->regmap)) { priv->regmap = device_node_to_regmap(node); - if (IS_ERR(priv->regmap)) { - ret = PTR_ERR(priv->regmap); - dev_err(dev, "Failed to get syscon %d\n", ret); - return ret; - } + if (IS_ERR(priv->regmap)) + return dev_err_probe(dev, PTR_ERR(priv->regmap), + "Failed to get syscon\n"); priv->no_offset = true; } @@ -485,11 +483,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->phy_provider = devm_of_phy_provider_register(dev, phy_gmii_sel_of_xlate); - if (IS_ERR(priv->phy_provider)) { - ret = PTR_ERR(priv->phy_provider); - dev_err(dev, "Failed to create phy provider %d\n", ret); - return ret; - } + if (IS_ERR(priv->phy_provider)) + return dev_err_probe(dev, PTR_ERR(priv->phy_provider), + "Failed to create phy provider\n"); return 0; } diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 7f626c59702503..a6c0c5607ffd7b 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -1179,14 +1179,13 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node) ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i], clk_mux_sel[i].table); + of_node_put(clk_node); if (ret) { dev_err_probe(dev, ret, "Failed to register %s clock\n", node_name); - of_node_put(clk_node); goto err; } - of_node_put(clk_node); } for (i = 0; i < wiz->clk_div_sel_num; i++) { @@ -1199,14 +1198,12 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node) ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i], clk_div_sel[i].table); + of_node_put(clk_node); if (ret) { dev_err_probe(dev, ret, "Failed to register %s clock\n", node_name); - of_node_put(clk_node); goto err; } - - of_node_put(clk_node); } return 0; @@ -1407,7 +1404,7 @@ MODULE_DEVICE_TABLE(of, wiz_id_table); static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz) { - struct device_node *serdes, *subnode; + struct device_node *serdes; serdes = of_get_child_by_name(dev->of_node, "serdes"); if (!serdes) { @@ -1415,7 +1412,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz) return -EINVAL; } - for_each_child_of_node(serdes, subnode) { + for_each_child_of_node_scoped(serdes, subnode) { u32 reg, num_lanes = 1, phy_type = PHY_NONE; int ret, i; @@ -1425,7 +1422,6 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz) ret = of_property_read_u32(subnode, "reg", ®); if (ret) { - of_node_put(subnode); dev_err(dev, "%s: Reading \"reg\" from \"%s\" failed: %d\n", __func__, subnode->name, ret); @@ -1578,8 +1574,8 @@ static int wiz_probe(struct platform_device *pdev) phy_reset_dev = &wiz->wiz_phy_reset_dev; phy_reset_dev->dev = dev; - phy_reset_dev->ops = &wiz_phy_reset_ops, - phy_reset_dev->owner = THIS_MODULE, + phy_reset_dev->ops = &wiz_phy_reset_ops; + phy_reset_dev->owner = THIS_MODULE; phy_reset_dev->of_node = node; /* Reset for each of the lane and one for the entire SERDES */ phy_reset_dev->nr_resets = num_lanes + 1; diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c index 751fecd466e3e3..c3ae9d7948d7ea 100644 --- a/drivers/phy/ti/phy-tusb1210.c +++ b/drivers/phy/ti/phy-tusb1210.c @@ -411,12 +411,6 @@ static int tusb1210_psy_get_prop(struct power_supply *psy, return 0; } -static const enum power_supply_usb_type tusb1210_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_UNKNOWN, -}; - static const enum power_supply_property tusb1210_psy_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_USB_TYPE, @@ -426,8 +420,9 @@ static const enum power_supply_property tusb1210_psy_props[] = { static const struct power_supply_desc tusb1210_psy_desc = { .name = "tusb1211-charger-detect", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = tusb1210_psy_usb_types, - .num_usb_types = ARRAY_SIZE(tusb1210_psy_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = tusb1210_psy_props, .num_properties = ARRAY_SIZE(tusb1210_psy_props), .get_property = tusb1210_psy_get_prop, diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 7e4f93a3bc7ac9..354536de564b67 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -194,6 +194,13 @@ config PINCTRL_DIGICOLOR select PINMUX select GENERIC_PINCONF +config PINCTRL_EP93XX + bool + depends on ARCH_EP93XX || COMPILE_TEST + select PINMUX + select GENERIC_PINCONF + select MFD_SYSCON + config PINCTRL_EQUILIBRIUM tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC" depends on OF && HAS_IOMEM @@ -213,6 +220,21 @@ config PINCTRL_EQUILIBRIUM desired pin functions, configure GPIO attributes for LGM SoC pins. Pin muxing and pin config settings are retrieved from device tree. +config PINCTRL_EYEQ5 + bool "Mobileye EyeQ5 pinctrl driver" + depends on OF + depends on MACH_EYEQ5 || COMPILE_TEST + select PINMUX + select GENERIC_PINCONF + select AUXILIARY_BUS + default MACH_EYEQ5 + help + Pin controller driver for the Mobileye EyeQ5 platform. It does both + pin config & pin muxing. It does not handle GPIO. + + Pin muxing supports two functions for each pin: first is GPIO, second + is pin-dependent. Pin config is about bias & drive strength. + config PINCTRL_GEMINI bool depends on ARCH_GEMINI @@ -583,6 +605,7 @@ source "drivers/pinctrl/qcom/Kconfig" source "drivers/pinctrl/realtek/Kconfig" source "drivers/pinctrl/renesas/Kconfig" source "drivers/pinctrl/samsung/Kconfig" +source "drivers/pinctrl/sophgo/Kconfig" source "drivers/pinctrl/spear/Kconfig" source "drivers/pinctrl/sprd/Kconfig" source "drivers/pinctrl/starfive/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index cc809669405ab6..97823f52b972a3 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -23,6 +23,8 @@ obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o +obj-$(CONFIG_PINCTRL_EP93XX) += pinctrl-ep93xx.o +obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o @@ -73,6 +75,7 @@ obj-y += qcom/ obj-$(CONFIG_ARCH_REALTEK) += realtek/ obj-$(CONFIG_PINCTRL_RENESAS) += renesas/ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/ +obj-y += sophgo/ obj-$(CONFIG_PINCTRL_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_SOC_STARFIVE) += starfive/ diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 184641e221d48d..cc1fe0555e196a 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -1280,6 +1280,7 @@ static const struct of_device_id bcm2835_pinctrl_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, bcm2835_pinctrl_match); static int bcm2835_pinctrl_probe(struct platform_device *pdev) { diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index 898b197c373848..2932d7aba72550 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -1063,12 +1063,9 @@ static int madera_pin_probe(struct platform_device *pdev) if (pdata->gpio_configs) { ret = pinctrl_register_mappings(pdata->gpio_configs, pdata->n_gpio_configs); - if (ret) { - dev_err(priv->dev, - "Failed to register pdata mappings (%d)\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to register pdata mappings\n"); } ret = pinctrl_enable(priv->pctl); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 314ab93d769182..4061890a174835 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1971,7 +1971,7 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) static void pinctrl_init_debugfs(void) { debugfs_root = debugfs_create_dir("pinctrl", NULL); - if (IS_ERR(debugfs_root) || !debugfs_root) { + if (IS_ERR(debugfs_root)) { pr_warn("failed to create debugfs directory\n"); debugfs_root = NULL; return; diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c index 2991047535bce6..8f15c4c4dc4412 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c +++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c @@ -130,7 +130,7 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev, cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_DAISY_CFG, input_val); } - configs = kmemdup(cfg, ncfg * sizeof(unsigned long), GFP_KERNEL); + configs = kmemdup_array(cfg, ncfg, sizeof(unsigned long), GFP_KERNEL); new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN; new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_id); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 9c2680df082c31..d05c2c478e7950 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -804,14 +804,14 @@ int imx_pinctrl_probe(struct platform_device *pdev, } EXPORT_SYMBOL_GPL(imx_pinctrl_probe); -static int __maybe_unused imx_pinctrl_suspend(struct device *dev) +static int imx_pinctrl_suspend(struct device *dev) { struct imx_pinctrl *ipctl = dev_get_drvdata(dev); return pinctrl_force_sleep(ipctl->pctl); } -static int __maybe_unused imx_pinctrl_resume(struct device *dev) +static int imx_pinctrl_resume(struct device *dev) { struct imx_pinctrl *ipctl = dev_get_drvdata(dev); @@ -819,8 +819,7 @@ static int __maybe_unused imx_pinctrl_resume(struct device *dev) } const struct dev_pm_ops imx_pinctrl_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend, - imx_pinctrl_resume) + LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend, imx_pinctrl_resume) }; EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c index 529eebe46298d2..e59e4fc8019381 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c @@ -341,7 +341,7 @@ static struct platform_driver imx8mq_pinctrl_driver = { .driver = { .name = "imx8mq-pinctrl", .of_match_table = imx8mq_pinctrl_of_match, - .pm = &imx_pinctrl_pm_ops, + .pm = pm_sleep_ptr(&imx_pinctrl_pm_ops), .suppress_bind_attrs = true, }, .probe = imx8mq_pinctrl_probe, diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 4e87f5b875c0e8..4533c4d0a9e750 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -560,9 +560,10 @@ static DEFINE_RAW_SPINLOCK(byt_lock); static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset, int reg) { - struct intel_community *comm = intel_get_community(vg, offset); + const struct intel_community *comm; u32 reg_offset; + comm = intel_get_community(vg, offset); if (!comm) return NULL; @@ -1541,10 +1542,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg) } ret = devm_gpiochip_add_data(vg->dev, gc, vg); - if (ret) { + if (ret) dev_err(vg->dev, "failed adding byt-gpio chip\n"); - return ret; - } return ret; } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 89bd7ce6711a13..928607a21d36db 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -70,6 +70,12 @@ #define PADCFG0_PMODE_SHIFT 10 #define PADCFG0_PMODE_MASK GENMASK(13, 10) #define PADCFG0_PMODE_GPIO 0 +#define PADCFG0_GPIODIS_SHIFT 8 +#define PADCFG0_GPIODIS_MASK GENMASK(9, 8) +#define PADCFG0_GPIODIS_NONE 0 +#define PADCFG0_GPIODIS_OUTPUT 1 +#define PADCFG0_GPIODIS_INPUT 2 +#define PADCFG0_GPIODIS_FULL 3 #define PADCFG0_GPIORXDIS BIT(9) #define PADCFG0_GPIOTXDIS BIT(8) #define PADCFG0_GPIORXSTATE BIT(1) @@ -108,13 +114,30 @@ struct intel_community_context { #define pin_to_padno(c, p) ((p) - (c)->pin_base) #define padgroup_offset(g, p) ((p) - (g)->base) -struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin) +#define for_each_intel_pin_community(pctrl, community) \ + for (unsigned int __ci = 0; \ + __ci < pctrl->ncommunities && (community = &pctrl->communities[__ci]); \ + __ci++) \ + +#define for_each_intel_community_pad_group(community, grp) \ + for (unsigned int __gi = 0; \ + __gi < community->ngpps && (grp = &community->gpps[__gi]); \ + __gi++) \ + +#define for_each_intel_pad_group(pctrl, community, grp) \ + for_each_intel_pin_community(pctrl, community) \ + for_each_intel_community_pad_group(community, grp) + +#define for_each_intel_gpio_group(pctrl, community, grp) \ + for_each_intel_pad_group(pctrl, community, grp) \ + if (grp->gpio_base == INTEL_GPIO_BASE_NOMAP) {} else + +const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl, + unsigned int pin) { - struct intel_community *community; - int i; + const struct intel_community *community; - for (i = 0; i < pctrl->ncommunities; i++) { - community = &pctrl->communities[i]; + for_each_intel_pin_community(pctrl, community) { if (pin >= community->pin_base && pin < community->pin_base + community->npins) return community; @@ -129,11 +152,9 @@ static const struct intel_padgroup * intel_community_get_padgroup(const struct intel_community *community, unsigned int pin) { - int i; - - for (i = 0; i < community->ngpps; i++) { - const struct intel_padgroup *padgrp = &community->gpps[i]; + const struct intel_padgroup *padgrp; + for_each_intel_community_pad_group(community, padgrp) { if (pin >= padgrp->base && pin < padgrp->base + padgrp->size) return padgrp; } @@ -161,7 +182,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, return community->pad_regs + reg + padno * nregs * 4; } -static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pin) +static bool intel_pad_owned_by_host(const struct intel_pinctrl *pctrl, unsigned int pin) { const struct intel_community *community; const struct intel_padgroup *padgrp; @@ -186,7 +207,7 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pi return !(readl(padown) & PADOWN_MASK(gpp_offset)); } -static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin) +static bool intel_pad_acpi_mode(const struct intel_pinctrl *pctrl, unsigned int pin) { const struct intel_community *community; const struct intel_padgroup *padgrp; @@ -212,7 +233,6 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin) /** * enum - Locking variants of the pad configuration - * * @PAD_UNLOCKED: pad is fully controlled by the configuration registers * @PAD_LOCKED: pad configuration registers, except TX state, are locked * @PAD_LOCKED_TX: pad configuration TX state is locked @@ -229,9 +249,9 @@ enum { PAD_LOCKED_FULL = PAD_LOCKED | PAD_LOCKED_TX, }; -static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin) +static int intel_pad_locked(const struct intel_pinctrl *pctrl, unsigned int pin) { - struct intel_community *community; + const struct intel_community *community; const struct intel_padgroup *padgrp; unsigned int offset, gpp_offset; u32 value; @@ -267,19 +287,19 @@ static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin) return ret; } -static bool intel_pad_is_unlocked(struct intel_pinctrl *pctrl, unsigned int pin) +static bool intel_pad_is_unlocked(const struct intel_pinctrl *pctrl, unsigned int pin) { return (intel_pad_locked(pctrl, pin) & PAD_LOCKED) == PAD_UNLOCKED; } -static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned int pin) +static bool intel_pad_usable(const struct intel_pinctrl *pctrl, unsigned int pin) { return intel_pad_owned_by_host(pctrl, pin) && intel_pad_is_unlocked(pctrl, pin); } int intel_get_groups_count(struct pinctrl_dev *pctldev) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->ngroups; } @@ -287,7 +307,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_groups_count, PINCTRL_INTEL); const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->groups[group].grp.name; } @@ -296,7 +316,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_group_name, PINCTRL_INTEL); int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, const unsigned int **pins, unsigned int *npins) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *pins = pctrl->soc->groups[group].grp.pins; *npins = pctrl->soc->groups[group].grp.npins; @@ -364,7 +384,7 @@ static const struct pinctrl_ops intel_pinctrl_ops = { int intel_get_functions_count(struct pinctrl_dev *pctldev) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->nfunctions; } @@ -372,7 +392,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_functions_count, PINCTRL_INTEL); const char *intel_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); return pctrl->soc->functions[function].func.name; } @@ -381,7 +401,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_function_name, PINCTRL_INTEL); int intel_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, const char * const **groups, unsigned int * const ngroups) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); *groups = pctrl->soc->functions[function].func.groups; *ngroups = pctrl->soc->functions[function].func.ngroups; @@ -429,19 +449,49 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, return 0; } -static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) -{ - u32 value; +/** + * enum - Possible pad physical connections + * @PAD_CONNECT_NONE: pad is fully disconnected + * @PAD_CONNECT_INPUT: pad is in input only mode + * @PAD_CONNECT_OUTPUT: pad is in output only mode + * @PAD_CONNECT_FULL: pad is fully connected + */ +enum { + PAD_CONNECT_NONE = 0, + PAD_CONNECT_INPUT = 1, + PAD_CONNECT_OUTPUT = 2, + PAD_CONNECT_FULL = PAD_CONNECT_INPUT | PAD_CONNECT_OUTPUT, +}; - value = readl(padcfg0); - if (input) { +static int __intel_gpio_get_direction(u32 value) +{ + switch ((value & PADCFG0_GPIODIS_MASK) >> PADCFG0_GPIODIS_SHIFT) { + case PADCFG0_GPIODIS_FULL: + return PAD_CONNECT_NONE; + case PADCFG0_GPIODIS_OUTPUT: + return PAD_CONNECT_INPUT; + case PADCFG0_GPIODIS_INPUT: + return PAD_CONNECT_OUTPUT; + case PADCFG0_GPIODIS_NONE: + return PAD_CONNECT_FULL; + default: + return -ENOTSUPP; + }; +} + +static u32 __intel_gpio_set_direction(u32 value, bool input, bool output) +{ + if (input) value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; - } else { - value &= ~PADCFG0_GPIOTXDIS; + else value |= PADCFG0_GPIORXDIS; - } - writel(value, padcfg0); + + if (output) + value &= ~PADCFG0_GPIOTXDIS; + else + value |= PADCFG0_GPIOTXDIS; + + return value; } static int __intel_gpio_get_gpio_mode(u32 value) @@ -465,8 +515,7 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value |= PADCFG0_PMODE_GPIO; /* Disable TX buffer and enable RX (this will be input) */ - value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; + value = __intel_gpio_set_direction(value, true, false); /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); @@ -512,12 +561,18 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, { struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; + u32 value; padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); guard(raw_spinlock_irqsave)(&pctrl->lock); - __intel_gpio_set_direction(padcfg0, input); + value = readl(padcfg0); + if (input) + value = __intel_gpio_set_direction(value, true, false); + else + value = __intel_gpio_set_direction(value, false, true); + writel(value, padcfg0); return 0; } @@ -612,6 +667,23 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin, return 0; } +static int intel_config_get_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin, + enum pin_config_param param, u32 *arg) +{ + void __iomem *padcfg0; + u32 value; + + padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); + + scoped_guard(raw_spinlock_irqsave, &pctrl->lock) + value = readl(padcfg0); + + if (__intel_gpio_get_direction(value) != PAD_CONNECT_NONE) + return -EINVAL; + + return 0; +} + static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin, enum pin_config_param param, u32 *arg) { @@ -655,6 +727,12 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin, return ret; break; + case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: + ret = intel_config_get_high_impedance(pctrl, pin, param, &arg); + if (ret) + return ret; + break; + case PIN_CONFIG_INPUT_DEBOUNCE: ret = intel_config_get_debounce(pctrl, pin, param, &arg); if (ret) @@ -753,11 +831,34 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, return 0; } +static void intel_gpio_set_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin) +{ + void __iomem *padcfg0; + u32 value; + + padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); + + guard(raw_spinlock_irqsave)(&pctrl->lock); + + value = readl(padcfg0); + value = __intel_gpio_set_direction(value, false, false); + writel(value, padcfg0); +} + static int intel_config_set_debounce(struct intel_pinctrl *pctrl, unsigned int pin, unsigned int debounce) { void __iomem *padcfg0, *padcfg2; u32 value0, value2; + unsigned long v; + + if (debounce) { + v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC); + if (v < 3 || v > 15) + return -EINVAL; + } else { + v = 0; + } padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2); if (!padcfg2) @@ -770,21 +871,15 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl, value0 = readl(padcfg0); value2 = readl(padcfg2); - /* Disable glitch filter and debouncer */ - value0 &= ~PADCFG0_PREGFRXSEL; - value2 &= ~(PADCFG2_DEBEN | PADCFG2_DEBOUNCE_MASK); - - if (debounce) { - unsigned long v; - - v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC); - if (v < 3 || v > 15) - return -EINVAL; - + value2 = (value2 & ~PADCFG2_DEBOUNCE_MASK) | (v << PADCFG2_DEBOUNCE_SHIFT); + if (v) { /* Enable glitch filter and debouncer */ value0 |= PADCFG0_PREGFRXSEL; - value2 |= v << PADCFG2_DEBOUNCE_SHIFT; value2 |= PADCFG2_DEBEN; + } else { + /* Disable glitch filter and debouncer */ + value0 &= ~PADCFG0_PREGFRXSEL; + value2 &= ~PADCFG2_DEBEN; } writel(value0, padcfg0); @@ -812,6 +907,10 @@ static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin, return ret; break; + case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: + intel_gpio_set_high_impedance(pctrl, pin); + break; + case PIN_CONFIG_INPUT_DEBOUNCE: ret = intel_config_set_debounce(pctrl, pin, pinconf_to_config_argument(configs[i])); @@ -854,34 +953,21 @@ static const struct pinctrl_desc intel_pinctrl_desc = { * Return: a pin number and pointers to the community and pad group, which * the pin belongs to, or negative error code if translation can't be done. */ -static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset, +static int intel_gpio_to_pin(const struct intel_pinctrl *pctrl, unsigned int offset, const struct intel_community **community, const struct intel_padgroup **padgrp) { - int i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *comm = &pctrl->communities[i]; - int j; - - for (j = 0; j < comm->ngpps; j++) { - const struct intel_padgroup *pgrp = &comm->gpps[j]; + const struct intel_community *comm; + const struct intel_padgroup *grp; - if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP) - continue; + for_each_intel_gpio_group(pctrl, comm, grp) { + if (offset >= grp->gpio_base && offset < grp->gpio_base + grp->size) { + if (community) + *community = comm; + if (padgrp) + *padgrp = grp; - if (offset >= pgrp->gpio_base && - offset < pgrp->gpio_base + pgrp->size) { - int pin; - - pin = pgrp->base + offset - pgrp->gpio_base; - if (community) - *community = comm; - if (padgrp) - *padgrp = pgrp; - - return pin; - } + return grp->base + offset - grp->gpio_base; } } @@ -897,7 +983,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset, * * Return: a GPIO offset, or negative error code if translation can't be done. */ -static int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin) +static int intel_pin_to_gpio(const struct intel_pinctrl *pctrl, int pin) { const struct intel_community *community; const struct intel_padgroup *padgrp; @@ -929,7 +1015,7 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset) return -EINVAL; padcfg0 = readl(reg); - if (!(padcfg0 & PADCFG0_GPIOTXDIS)) + if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT) return !!(padcfg0 & PADCFG0_GPIOTXSTATE); return !!(padcfg0 & PADCFG0_GPIORXSTATE); @@ -982,10 +1068,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (padcfg0 & PADCFG0_PMODE_MASK) return -EINVAL; - if (padcfg0 & PADCFG0_GPIOTXDIS) - return GPIO_LINE_DIRECTION_IN; + if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT) + return GPIO_LINE_DIRECTION_OUT; - return GPIO_LINE_DIRECTION_OUT; + return GPIO_LINE_DIRECTION_IN; } static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -1171,15 +1257,16 @@ static const struct irq_chip intel_gpio_irq_chip = { GPIOCHIP_IRQ_RESOURCE_HELPERS, }; -static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, - const struct intel_community *community) +static irqreturn_t intel_gpio_irq(int irq, void *data) { - struct gpio_chip *gc = &pctrl->chip; - unsigned int gpp; + const struct intel_community *community; + const struct intel_padgroup *padgrp; + struct intel_pinctrl *pctrl = data; int ret = 0; - for (gpp = 0; gpp < community->ngpps; gpp++) { - const struct intel_padgroup *padgrp = &community->gpps[gpp]; + /* Need to check all communities for pending interrupts */ + for_each_intel_pad_group(pctrl, community, padgrp) { + struct gpio_chip *gc = &pctrl->chip; unsigned long pending, enabled; unsigned int gpp, gpp_offset; void __iomem *reg, *is; @@ -1203,36 +1290,17 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, ret += pending ? 1 : 0; } - return ret; -} - -static irqreturn_t intel_gpio_irq(int irq, void *data) -{ - const struct intel_community *community; - struct intel_pinctrl *pctrl = data; - unsigned int i; - int ret = 0; - - /* Need to check all communities for pending interrupts */ - for (i = 0; i < pctrl->ncommunities; i++) { - community = &pctrl->communities[i]; - ret += intel_gpio_community_irq_handler(pctrl, community); - } - return IRQ_RETVAL(ret); } static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) { - int i; + const struct intel_community *community; - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *community; + for_each_intel_pin_community(pctrl, community) { void __iomem *reg, *is; unsigned int gpp; - community = &pctrl->communities[i]; - for (gpp = 0; gpp < community->ngpps; gpp++) { reg = community->regs + community->ie_offset + gpp * 4; is = community->regs + community->is_offset + gpp * 4; @@ -1257,36 +1325,17 @@ static int intel_gpio_irq_init_hw(struct gpio_chip *gc) return 0; } -static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, - const struct intel_community *community) -{ - int ret = 0, i; - - for (i = 0; i < community->ngpps; i++) { - const struct intel_padgroup *gpp = &community->gpps[i]; - - if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP) - continue; - - ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), - gpp->gpio_base, gpp->base, - gpp->size); - if (ret) - return ret; - } - - return ret; -} - static int intel_gpio_add_pin_ranges(struct gpio_chip *gc) { struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - int ret, i; - - for (i = 0; i < pctrl->ncommunities; i++) { - struct intel_community *community = &pctrl->communities[i]; + const struct intel_community *community; + const struct intel_padgroup *grp; + int ret; - ret = intel_gpio_add_community_ranges(pctrl, community); + for_each_intel_gpio_group(pctrl, community, grp) { + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + grp->gpio_base, grp->base, + grp->size); if (ret) { dev_err(pctrl->dev, "failed to add GPIO pin range\n"); return ret; @@ -1299,20 +1348,12 @@ static int intel_gpio_add_pin_ranges(struct gpio_chip *gc) static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl) { const struct intel_community *community; + const struct intel_padgroup *grp; unsigned int ngpio = 0; - int i, j; - for (i = 0; i < pctrl->ncommunities; i++) { - community = &pctrl->communities[i]; - for (j = 0; j < community->ngpps; j++) { - const struct intel_padgroup *gpp = &community->gpps[j]; - - if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP) - continue; - - if (gpp->gpio_base + gpp->size > ngpio) - ngpio = gpp->gpio_base + gpp->size; - } + for_each_intel_gpio_group(pctrl, community, grp) { + if (grp->gpio_base + grp->size > ngpio) + ngpio = grp->gpio_base + grp->size; } return ngpio; @@ -1682,7 +1723,8 @@ EXPORT_SYMBOL_NS_GPL(intel_pinctrl_get_soc_data, PINCTRL_INTEL); static bool __intel_gpio_is_direct_irq(u32 value) { - return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) && + return (value & PADCFG0_GPIROUTIOXAPIC) && + (__intel_gpio_get_direction(value) == PAD_CONNECT_INPUT) && (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO); } diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 6981e2fab93f34..4d4e1257afdf75 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -264,7 +264,8 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev); extern const struct dev_pm_ops intel_pinctrl_pm_ops; -struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin); +const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl, + unsigned int pin); int intel_get_groups_count(struct pinctrl_dev *pctldev); const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group); diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c index 1fb0bba8b386b9..bcce97f3b8975e 100644 --- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c +++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c @@ -211,7 +211,7 @@ static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset, int reg) { struct intel_pinctrl *lg = gpiochip_get_data(chip); - struct intel_community *comm; + const struct intel_community *comm; int reg_offset; comm = intel_get_community(lg, offset); diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index e12316c4269805..87e958d827bf93 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -1044,11 +1044,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev) hw->nbase = hw->soc->nbase_names; - if (of_find_property(hw->dev->of_node, - "mediatek,rsel-resistance-in-si-unit", NULL)) - hw->rsel_si_unit = true; - else - hw->rsel_si_unit = false; + hw->rsel_si_unit = of_property_read_bool(hw->dev->of_node, + "mediatek,rsel-resistance-in-si-unit"); spin_lock_init(&hw->lock); diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c index 04f1e87bae990a..776d32465ab9ba 100644 --- a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c +++ b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c @@ -375,7 +375,7 @@ static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_3 }; static const unsigned int gen_clk_a4_pins[] = { GPIOA_4 }; static const unsigned int clk12_24_a_pins[] = { GPIOA_5 }; -static struct meson_pmx_group c3_periphs_groups[] = { +static const struct meson_pmx_group c3_periphs_groups[] = { GPIO_GROUP(GPIOE_0), GPIO_GROUP(GPIOE_1), GPIO_GROUP(GPIOE_2), @@ -987,7 +987,7 @@ static const char * const lcd_groups[] = { "lcd_clk_a", "lcd_clk_x", "lcd_hs", "lcd_vs", }; -static struct meson_pmx_func c3_periphs_functions[] = { +static const struct meson_pmx_func c3_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(uart_a), FUNCTION(uart_b), @@ -1036,7 +1036,7 @@ static struct meson_pmx_func c3_periphs_functions[] = { FUNCTION(lcd), }; -static struct meson_bank c3_periphs_banks[] = { +static const struct meson_bank c3_periphs_banks[] = { /* name first last irq pullen pull dir out in ds */ BANK_DS("X", GPIOX_0, GPIOX_13, 40, 53, 0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0), @@ -1054,7 +1054,7 @@ static struct meson_bank c3_periphs_banks[] = { 0x73, 0, 0x74, 0, 0x72, 0, 0x71, 0, 0x70, 0, 0x77, 0), }; -static struct meson_pmx_bank c3_periphs_pmx_banks[] = { +static const struct meson_pmx_bank c3_periphs_pmx_banks[] = { /* name first last reg offset */ BANK_PMX("B", GPIOB_0, GPIOB_14, 0x00, 0), BANK_PMX("X", GPIOX_0, GPIOX_13, 0x03, 0), @@ -1065,12 +1065,12 @@ static struct meson_pmx_bank c3_periphs_pmx_banks[] = { BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x02, 0), }; -static struct meson_axg_pmx_data c3_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data c3_periphs_pmx_banks_data = { .pmx_banks = c3_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(c3_periphs_pmx_banks), }; -static struct meson_pinctrl_data c3_periphs_pinctrl_data = { +static const struct meson_pinctrl_data c3_periphs_pinctrl_data = { .name = "periphs-banks", .pins = c3_periphs_pins, .groups = c3_periphs_groups, diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c index 0aed5de3f0682f..cfd98b9dcb6878 100644 --- a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c +++ b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c @@ -535,7 +535,7 @@ static const unsigned int i2c0_sck_h_pins[] = { GPIOH_7 }; /* Bank H func3 */ static const unsigned int pcieck_reqn_h_pins[] = { GPIOH_2 }; -static struct meson_pmx_group t7_periphs_groups[] = { +static const struct meson_pmx_group t7_periphs_groups[] = { GPIO_GROUP(GPIOB_0), GPIO_GROUP(GPIOB_1), GPIO_GROUP(GPIOB_2), @@ -1443,7 +1443,7 @@ static const char * const mic_mute_groups[] = { "mic_mute_key", "mic_mute_led", }; -static struct meson_pmx_func t7_periphs_functions[] = { +static const struct meson_pmx_func t7_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(emmc), FUNCTION(nor), @@ -1524,7 +1524,7 @@ static struct meson_pmx_func t7_periphs_functions[] = { FUNCTION(mic_mute), }; -static struct meson_bank t7_periphs_banks[] = { +static const struct meson_bank t7_periphs_banks[] = { /* name first last irq pullen pull dir out in ds */ BANK_DS("D", GPIOD_0, GPIOD_12, 57, 69, 0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0), @@ -1552,7 +1552,7 @@ static struct meson_bank t7_periphs_banks[] = { 0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0), }; -static struct meson_pmx_bank t7_periphs_pmx_banks[] = { +static const struct meson_pmx_bank t7_periphs_pmx_banks[] = { /* name first last reg offset */ BANK_PMX("D", GPIOD_0, GPIOD_12, 0x0a, 0), BANK_PMX("E", GPIOE_0, GPIOE_6, 0x0c, 0), @@ -1568,12 +1568,12 @@ static struct meson_pmx_bank t7_periphs_pmx_banks[] = { BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x09, 0), }; -static struct meson_axg_pmx_data t7_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data t7_periphs_pmx_banks_data = { .pmx_banks = t7_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(t7_periphs_pmx_banks), }; -static struct meson_pinctrl_data t7_periphs_pinctrl_data = { +static const struct meson_pinctrl_data t7_periphs_pinctrl_data = { .name = "periphs-banks", .pins = t7_periphs_pins, .groups = t7_periphs_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c index d2ac9ca72a3ee5..20c4323d4223d5 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-a1.c +++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c @@ -339,7 +339,7 @@ static const unsigned int tst_out11_pins[] = { GPIOA_11 }; static const unsigned int mute_key_pins[] = { GPIOA_4 }; static const unsigned int mute_en_pins[] = { GPIOA_5 }; -static struct meson_pmx_group meson_a1_periphs_groups[] = { +static const struct meson_pmx_group meson_a1_periphs_groups[] = { GPIO_GROUP(GPIOP_0), GPIO_GROUP(GPIOP_1), GPIO_GROUP(GPIOP_2), @@ -832,7 +832,7 @@ static const char * const mute_groups[] = { "mute_key", "mute_en", }; -static struct meson_pmx_func meson_a1_periphs_functions[] = { +static const struct meson_pmx_func meson_a1_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(psram), FUNCTION(pwm_a), @@ -875,7 +875,7 @@ static struct meson_pmx_func meson_a1_periphs_functions[] = { FUNCTION(mute), }; -static struct meson_bank meson_a1_periphs_banks[] = { +static const struct meson_bank meson_a1_periphs_banks[] = { /* name first last irq pullen pull dir out in ds*/ BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0, 0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0), @@ -889,7 +889,7 @@ static struct meson_bank meson_a1_periphs_banks[] = { 0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0), }; -static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = { +static const struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = { /* name first lask reg offset */ BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0), BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0), @@ -898,12 +898,12 @@ static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = { BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0), }; -static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = { .pmx_banks = meson_a1_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks), }; -static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_a1_periphs_pins, .groups = meson_a1_periphs_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c index cad411d9072779..00c3829216d6a4 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c @@ -27,10 +27,10 @@ static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc, unsigned int pin, - struct meson_pmx_bank **bank) + const struct meson_pmx_bank **bank) { int i; - struct meson_axg_pmx_data *pmx = pc->data->pmx_data; + const struct meson_axg_pmx_data *pmx = pc->data->pmx_data; for (i = 0; i < pmx->num_pmx_banks; i++) if (pin >= pmx->pmx_banks[i].first && @@ -42,7 +42,7 @@ static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc, return -EINVAL; } -static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank, +static int meson_pmx_calc_reg_and_offset(const struct meson_pmx_bank *bank, unsigned int pin, unsigned int *reg, unsigned int *offset) { @@ -59,10 +59,10 @@ static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank, static int meson_axg_pmx_update_function(struct meson_pinctrl *pc, unsigned int pin, unsigned int func) { + const struct meson_pmx_bank *bank; int ret; int reg; int offset; - struct meson_pmx_bank *bank; ret = meson_axg_pmx_get_bank(pc, pin, &bank); if (ret) @@ -82,8 +82,8 @@ static int meson_axg_pmx_set_mux(struct pinctrl_dev *pcdev, int i; int ret; struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev); - struct meson_pmx_func *func = &pc->data->funcs[func_num]; - struct meson_pmx_group *group = &pc->data->groups[group_num]; + const struct meson_pmx_func *func = &pc->data->funcs[func_num]; + const struct meson_pmx_group *group = &pc->data->groups[group_num]; struct meson_pmx_axg_data *pmx_data = (struct meson_pmx_axg_data *)group->data; diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h index 67147ebaef1bee..63b9d471e98025 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h +++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h @@ -17,7 +17,7 @@ struct meson_pmx_bank { }; struct meson_axg_pmx_data { - struct meson_pmx_bank *pmx_banks; + const struct meson_pmx_bank *pmx_banks; unsigned int num_pmx_banks; }; diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 8f4e7154b73fa3..fa2df489639013 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c @@ -352,7 +352,7 @@ static const unsigned int tdmb_dout2_pins[] = {GPIOA_12}; static const unsigned int tdmb_din3_pins[] = {GPIOA_13}; static const unsigned int tdmb_dout3_pins[] = {GPIOA_13}; -static struct meson_pmx_group meson_axg_periphs_groups[] = { +static const struct meson_pmx_group meson_axg_periphs_groups[] = { GPIO_GROUP(GPIOZ_0), GPIO_GROUP(GPIOZ_1), GPIO_GROUP(GPIOZ_2), @@ -675,7 +675,7 @@ static const unsigned int jtag_ao_tms_pins[] = {GPIOAO_7}; /* gen_clk */ static const unsigned int gen_clk_ee_pins[] = {GPIOAO_13}; -static struct meson_pmx_group meson_axg_aobus_groups[] = { +static const struct meson_pmx_group meson_axg_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -955,7 +955,7 @@ static const char * const gen_clk_ee_groups[] = { "gen_clk_ee", }; -static struct meson_pmx_func meson_axg_periphs_functions[] = { +static const struct meson_pmx_func meson_axg_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(emmc), FUNCTION(nor), @@ -987,7 +987,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = { FUNCTION(tdmc), }; -static struct meson_pmx_func meson_axg_aobus_functions[] = { +static const struct meson_pmx_func meson_axg_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao_a), FUNCTION(uart_ao_b), @@ -1003,7 +1003,7 @@ static struct meson_pmx_func meson_axg_aobus_functions[] = { FUNCTION(gen_clk_ee), }; -static struct meson_bank meson_axg_periphs_banks[] = { +static const struct meson_bank meson_axg_periphs_banks[] = { /* name first last irq pullen pull dir out in */ BANK("Z", GPIOZ_0, GPIOZ_10, 14, 24, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0), BANK("BOOT", BOOT_0, BOOT_14, 25, 39, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0), @@ -1012,12 +1012,12 @@ static struct meson_bank meson_axg_periphs_banks[] = { BANK("Y", GPIOY_0, GPIOY_15, 84, 99, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0), }; -static struct meson_bank meson_axg_aobus_banks[] = { +static const struct meson_bank meson_axg_aobus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; -static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { +static const struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { /* name first lask reg offset */ BANK_PMX("Z", GPIOZ_0, GPIOZ_10, 0x2, 0), BANK_PMX("BOOT", BOOT_0, BOOT_14, 0x0, 0), @@ -1026,21 +1026,21 @@ static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { BANK_PMX("Y", GPIOY_0, GPIOY_15, 0x8, 0), }; -static struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = { .pmx_banks = meson_axg_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_axg_periphs_pmx_banks), }; -static struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = { +static const struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = { BANK_PMX("AO", GPIOAO_0, GPIOAO_13, 0x0, 0), }; -static struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = { .pmx_banks = meson_axg_aobus_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_axg_aobus_pmx_banks), }; -static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_axg_periphs_pins, .groups = meson_axg_periphs_groups, @@ -1054,7 +1054,7 @@ static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = { .pmx_data = &meson_axg_periphs_pmx_banks_data, }; -static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = { .name = "aobus-banks", .pins = meson_axg_aobus_pins, .groups = meson_axg_aobus_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c index 32830269a5b43b..e2788bfc5874ef 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c +++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c @@ -436,7 +436,7 @@ static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 }; static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 }; static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 }; -static struct meson_pmx_group meson_g12a_periphs_groups[] = { +static const struct meson_pmx_group meson_g12a_periphs_groups[] = { GPIO_GROUP(GPIOZ_0), GPIO_GROUP(GPIOZ_1), GPIO_GROUP(GPIOZ_2), @@ -860,7 +860,7 @@ static const unsigned int tdm_ao_b_dout2_pins[] = { GPIOAO_6 }; /* mclk0_ao */ static const unsigned int mclk0_ao_pins[] = { GPIOAO_9 }; -static struct meson_pmx_group meson_g12a_aobus_groups[] = { +static const struct meson_pmx_group meson_g12a_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -1253,7 +1253,7 @@ static const char * const mclk0_ao_groups[] = { "mclk0_ao", }; -static struct meson_pmx_func meson_g12a_periphs_functions[] = { +static const struct meson_pmx_func meson_g12a_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(emmc), FUNCTION(nor), @@ -1295,7 +1295,7 @@ static struct meson_pmx_func meson_g12a_periphs_functions[] = { FUNCTION(tdm_c), }; -static struct meson_pmx_func meson_g12a_aobus_functions[] = { +static const struct meson_pmx_func meson_g12a_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao_a), FUNCTION(uart_ao_b), @@ -1317,7 +1317,7 @@ static struct meson_pmx_func meson_g12a_aobus_functions[] = { FUNCTION(mclk0_ao), }; -static struct meson_bank meson_g12a_periphs_banks[] = { +static const struct meson_bank meson_g12a_periphs_banks[] = { /* name first last irq pullen pull dir out in ds */ BANK_DS("Z", GPIOZ_0, GPIOZ_15, IRQID_GPIOZ_0, IRQID_GPIOZ_15, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0, 5, 0), @@ -1333,7 +1333,7 @@ static struct meson_bank meson_g12a_periphs_banks[] = { 2, 0, 2, 0, 6, 0, 7, 0, 8, 0, 2, 0), }; -static struct meson_bank meson_g12a_aobus_banks[] = { +static const struct meson_bank meson_g12a_aobus_banks[] = { /* name first last irq pullen pull dir out in ds */ BANK_DS("AO", GPIOAO_0, GPIOAO_11, IRQID_GPIOAO_0, IRQID_GPIOAO_11, 3, 0, 2, 0, 0, 0, 4, 0, 1, 0, 0, 0), @@ -1342,7 +1342,7 @@ static struct meson_bank meson_g12a_aobus_banks[] = { 3, 16, 2, 16, 0, 16, 4, 16, 1, 16, 1, 0), }; -static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = { +static const struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = { /* name first last reg offset */ BANK_PMX("Z", GPIOZ_0, GPIOZ_15, 0x6, 0), BANK_PMX("H", GPIOH_0, GPIOH_8, 0xb, 0), @@ -1352,17 +1352,17 @@ static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = { BANK_PMX("X", GPIOX_0, GPIOX_19, 0x3, 0), }; -static struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = { .pmx_banks = meson_g12a_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_g12a_periphs_pmx_banks), }; -static struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = { +static const struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = { BANK_PMX("AO", GPIOAO_0, GPIOAO_11, 0x0, 0), BANK_PMX("E", GPIOE_0, GPIOE_2, 0x1, 16), }; -static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = { .pmx_banks = meson_g12a_aobus_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks), }; @@ -1375,7 +1375,7 @@ static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc) return 0; } -static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_g12a_periphs_pins, .groups = meson_g12a_periphs_groups, @@ -1389,7 +1389,7 @@ static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = { .pmx_data = &meson_g12a_periphs_pmx_banks_data, }; -static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = { .name = "aobus-banks", .pins = meson_g12a_aobus_pins, .groups = meson_g12a_aobus_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 2867f397fec67e..4e8b9d7c2e4b2d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -307,7 +307,7 @@ static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 }; static const unsigned int ao_cec_pins[] = { GPIOAO_12 }; static const unsigned int ee_cec_pins[] = { GPIOAO_12 }; -static struct meson_pmx_group meson_gxbb_periphs_groups[] = { +static const struct meson_pmx_group meson_gxbb_periphs_groups[] = { GPIO_GROUP(GPIOZ_0), GPIO_GROUP(GPIOZ_1), GPIO_GROUP(GPIOZ_2), @@ -541,7 +541,7 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = { GROUP(sdcard_clk, 2, 11), }; -static struct meson_pmx_group meson_gxbb_aobus_groups[] = { +static const struct meson_pmx_group meson_gxbb_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -798,7 +798,7 @@ static const char * const cec_ao_groups[] = { "ao_cec", "ee_cec", }; -static struct meson_pmx_func meson_gxbb_periphs_functions[] = { +static const struct meson_pmx_func meson_gxbb_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(emmc), FUNCTION(nor), @@ -829,7 +829,7 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = { FUNCTION(tsin_b), }; -static struct meson_pmx_func meson_gxbb_aobus_functions[] = { +static const struct meson_pmx_func meson_gxbb_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(uart_ao_b), @@ -845,7 +845,7 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = { FUNCTION(cec_ao), }; -static struct meson_bank meson_gxbb_periphs_banks[] = { +static const struct meson_bank meson_gxbb_periphs_banks[] = { /* name first last irq pullen pull dir out in */ BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0), BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0), @@ -857,12 +857,12 @@ static struct meson_bank meson_gxbb_periphs_banks[] = { BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28), }; -static struct meson_bank meson_gxbb_aobus_banks[] = { +static const struct meson_bank meson_gxbb_aobus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; -static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_gxbb_periphs_pins, .groups = meson_gxbb_periphs_groups, @@ -875,7 +875,7 @@ static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { .pmx_ops = &meson8_pmx_ops, }; -static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = { .name = "aobus-banks", .pins = meson_gxbb_aobus_pins, .groups = meson_gxbb_aobus_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index a2f25fa02852e3..9171de657f9780 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -301,7 +301,7 @@ static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 }; static const unsigned int ao_cec_pins[] = { GPIOAO_8 }; static const unsigned int ee_cec_pins[] = { GPIOAO_8 }; -static struct meson_pmx_group meson_gxl_periphs_groups[] = { +static const struct meson_pmx_group meson_gxl_periphs_groups[] = { GPIO_GROUP(GPIOZ_0), GPIO_GROUP(GPIOZ_1), GPIO_GROUP(GPIOZ_2), @@ -527,7 +527,7 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = { GROUP(pwm_f_clk, 8, 30), }; -static struct meson_pmx_group meson_gxl_aobus_groups[] = { +static const struct meson_pmx_group meson_gxl_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -763,7 +763,7 @@ static const char * const cec_ao_groups[] = { "ao_cec", "ee_cec", }; -static struct meson_pmx_func meson_gxl_periphs_functions[] = { +static const struct meson_pmx_func meson_gxl_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(emmc), FUNCTION(nor), @@ -793,7 +793,7 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = { FUNCTION(tsin_b), }; -static struct meson_pmx_func meson_gxl_aobus_functions[] = { +static const struct meson_pmx_func meson_gxl_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(uart_ao_b), @@ -807,7 +807,7 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = { FUNCTION(cec_ao), }; -static struct meson_bank meson_gxl_periphs_banks[] = { +static const struct meson_bank meson_gxl_periphs_banks[] = { /* name first last irq pullen pull dir out in */ BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0), BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0), @@ -818,12 +818,12 @@ static struct meson_bank meson_gxl_periphs_banks[] = { BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28), }; -static struct meson_bank meson_gxl_aobus_banks[] = { +static const struct meson_bank meson_gxl_aobus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; -static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_gxl_periphs_pins, .groups = meson_gxl_periphs_groups, @@ -836,7 +836,7 @@ static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { .pmx_ops = &meson8_pmx_ops, }; -static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = { .name = "aobus-banks", .pins = meson_gxl_aobus_pins, .groups = meson_gxl_aobus_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c index 60c7d5003e8a8c..872948699e9fee 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-s4.c +++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c @@ -411,7 +411,7 @@ static const unsigned int s2_demod_gpio0_pins[] = { GPIOZ_12 }; static const unsigned int gen_clk_z9_pins[] = { GPIOZ_9 }; static const unsigned int gen_clk_z12_pins[] = { GPIOZ_12 }; -static struct meson_pmx_group meson_s4_periphs_groups[] = { +static const struct meson_pmx_group meson_s4_periphs_groups[] = { GPIO_GROUP(GPIOE_0), GPIO_GROUP(GPIOE_1), @@ -1100,7 +1100,7 @@ static const char * const s2_demod_groups[] = { "s2_demod_gpio3", "s2_demod_gpio2", "s2_demod_gpio1", "s2_demod_gpio0", }; -static struct meson_pmx_func meson_s4_periphs_functions[] = { +static const struct meson_pmx_func meson_s4_periphs_functions[] = { FUNCTION(gpio_periphs), FUNCTION(i2c0), FUNCTION(i2c1), @@ -1160,7 +1160,7 @@ static struct meson_pmx_func meson_s4_periphs_functions[] = { FUNCTION(s2_demod), }; -static struct meson_bank meson_s4_periphs_banks[] = { +static const struct meson_bank meson_s4_periphs_banks[] = { /* name first last irq pullen pull dir out in */ BANK_DS("B", GPIOB_0, GPIOB_13, 0, 13, 0x63, 0, 0x64, 0, 0x62, 0, 0x61, 0, 0x60, 0, 0x67, 0), @@ -1180,7 +1180,7 @@ static struct meson_bank meson_s4_periphs_banks[] = { 0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0), }; -static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = { +static const struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = { /*name first lask reg offset*/ BANK_PMX("B", GPIOB_0, GPIOB_13, 0x00, 0), BANK_PMX("C", GPIOC_0, GPIOC_7, 0x9, 0), @@ -1192,12 +1192,12 @@ static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = { BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0xf, 0) }; -static struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = { +static const struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = { .pmx_banks = meson_s4_periphs_pmx_banks, .num_pmx_banks = ARRAY_SIZE(meson_s4_periphs_pmx_banks), }; -static struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = { +static const struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = { .name = "periphs-banks", .pins = meson_s4_periphs_pins, .groups = meson_s4_periphs_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index ef002b9dd46492..253a0cc57e396d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -70,7 +70,7 @@ static const unsigned int meson_bit_strides[] = { * Return: 0 on success, a negative value on error */ static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin, - struct meson_bank **bank) + const struct meson_bank **bank) { int i; @@ -94,11 +94,12 @@ static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin, * @reg: the computed register offset * @bit: the computed bit */ -static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin, +static void meson_calc_reg_and_bit(const struct meson_bank *bank, + unsigned int pin, enum meson_reg_type reg_type, unsigned int *reg, unsigned int *bit) { - struct meson_reg_desc *desc = &bank->regs[reg_type]; + const struct meson_reg_desc *desc = &bank->regs[reg_type]; *bit = (desc->bit + pin - bank->first) * meson_bit_strides[reg_type]; *reg = (desc->reg + (*bit / 32)) * 4; @@ -181,7 +182,7 @@ static int meson_pinconf_set_gpio_bit(struct meson_pinctrl *pc, unsigned int reg_type, bool arg) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit; int ret; @@ -198,7 +199,7 @@ static int meson_pinconf_get_gpio_bit(struct meson_pinctrl *pc, unsigned int pin, unsigned int reg_type) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit, val; int ret; @@ -261,7 +262,7 @@ static int meson_pinconf_set_output_drive(struct meson_pinctrl *pc, static int meson_pinconf_disable_bias(struct meson_pinctrl *pc, unsigned int pin) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit = 0; int ret; @@ -280,7 +281,7 @@ static int meson_pinconf_disable_bias(struct meson_pinctrl *pc, static int meson_pinconf_enable_bias(struct meson_pinctrl *pc, unsigned int pin, bool pull_up) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit, val = 0; int ret; @@ -308,7 +309,7 @@ static int meson_pinconf_set_drive_strength(struct meson_pinctrl *pc, unsigned int pin, u16 drive_strength_ua) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit, ds_val; int ret; @@ -399,7 +400,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit, val; int ret, conf; @@ -435,7 +436,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, unsigned int pin, u16 *drive_strength_ua) { - struct meson_bank *bank; + const struct meson_bank *bank; unsigned int reg, bit; unsigned int val; int ret; @@ -528,7 +529,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev, unsigned long *configs, unsigned num_configs) { struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev); - struct meson_pmx_group *group = &pc->data->groups[num_group]; + const struct meson_pmx_group *group = &pc->data->groups[num_group]; int i; dev_dbg(pc->dev, "set pinconf for group %s\n", group->name); @@ -587,8 +588,8 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio) { struct meson_pinctrl *pc = gpiochip_get_data(chip); + const struct meson_bank *bank; unsigned int reg, bit, val; - struct meson_bank *bank; int ret; ret = meson_get_bank(pc, gpio, &bank); diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h index 34fc4e8612e47c..7883ea31a001ad 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.h +++ b/drivers/pinctrl/meson/pinctrl-meson.h @@ -110,15 +110,15 @@ struct meson_bank { struct meson_pinctrl_data { const char *name; const struct pinctrl_pin_desc *pins; - struct meson_pmx_group *groups; - struct meson_pmx_func *funcs; + const struct meson_pmx_group *groups; + const struct meson_pmx_func *funcs; unsigned int num_pins; unsigned int num_groups; unsigned int num_funcs; - struct meson_bank *banks; + const struct meson_bank *banks; unsigned int num_banks; const struct pinmux_ops *pmx_ops; - void *pmx_data; + const void *pmx_data; int (*parse_dt)(struct meson_pinctrl *pc); }; diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c index 7f22aa0f8e3690..10adf52edda64c 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c +++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c @@ -32,7 +32,7 @@ static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc, unsigned int pin, int sel_group) { - struct meson_pmx_group *group; + const struct meson_pmx_group *group; struct meson8_pmx_data *pmx_data; int i, j; @@ -57,8 +57,8 @@ static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num, unsigned group_num) { struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev); - struct meson_pmx_func *func = &pc->data->funcs[func_num]; - struct meson_pmx_group *group = &pc->data->groups[group_num]; + const struct meson_pmx_func *func = &pc->data->funcs[func_num]; + const struct meson_pmx_group *group = &pc->data->groups[group_num]; struct meson8_pmx_data *pmx_data = (struct meson8_pmx_data *)group->data; int i, ret = 0; diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c index dd17100efdcfc2..3da7f3799c3f61 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8.c +++ b/drivers/pinctrl/meson/pinctrl-meson8.c @@ -405,7 +405,7 @@ static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 }; static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 }; -static struct meson_pmx_group meson8_cbus_groups[] = { +static const struct meson_pmx_group meson8_cbus_groups[] = { GPIO_GROUP(GPIOX_0), GPIO_GROUP(GPIOX_1), GPIO_GROUP(GPIOX_2), @@ -745,7 +745,7 @@ static struct meson_pmx_group meson8_cbus_groups[] = { GROUP(sdxc_cmd_b, 2, 4), }; -static struct meson_pmx_group meson8_aobus_groups[] = { +static const struct meson_pmx_group meson8_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -1015,7 +1015,7 @@ static const char * const hdmi_cec_ao_groups[] = { "hdmi_cec_ao" }; -static struct meson_pmx_func meson8_cbus_functions[] = { +static const struct meson_pmx_func meson8_cbus_functions[] = { FUNCTION(gpio_periphs), FUNCTION(sd_a), FUNCTION(sdxc_a), @@ -1051,7 +1051,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = { FUNCTION(spdif), }; -static struct meson_pmx_func meson8_aobus_functions[] = { +static const struct meson_pmx_func meson8_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(remote), @@ -1063,7 +1063,7 @@ static struct meson_pmx_func meson8_aobus_functions[] = { FUNCTION(hdmi_cec_ao), }; -static struct meson_bank meson8_cbus_banks[] = { +static const struct meson_bank meson8_cbus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0), BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0), @@ -1074,12 +1074,12 @@ static struct meson_bank meson8_cbus_banks[] = { BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0), }; -static struct meson_bank meson8_aobus_banks[] = { +static const struct meson_bank meson8_aobus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; -static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { +static const struct meson_pinctrl_data meson8_cbus_pinctrl_data = { .name = "cbus-banks", .pins = meson8_cbus_pins, .groups = meson8_cbus_groups, @@ -1092,7 +1092,7 @@ static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { .pmx_ops = &meson8_pmx_ops, }; -static struct meson_pinctrl_data meson8_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson8_aobus_pinctrl_data = { .name = "ao-bank", .pins = meson8_aobus_pins, .groups = meson8_aobus_groups, diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index 6cd4b3ec1b4026..a71e1f41358af3 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -349,7 +349,7 @@ static const unsigned int eth_ref_clk_pins[] = { DIF_3_N }; static const unsigned int eth_mdc_pins[] = { DIF_4_P }; static const unsigned int eth_mdio_en_pins[] = { DIF_4_N }; -static struct meson_pmx_group meson8b_cbus_groups[] = { +static const struct meson_pmx_group meson8b_cbus_groups[] = { GPIO_GROUP(GPIOX_0), GPIO_GROUP(GPIOX_1), GPIO_GROUP(GPIOX_2), @@ -603,7 +603,7 @@ static struct meson_pmx_group meson8b_cbus_groups[] = { GROUP(eth_rxd2, 7, 23), }; -static struct meson_pmx_group meson8b_aobus_groups[] = { +static const struct meson_pmx_group meson8b_aobus_groups[] = { GPIO_GROUP(GPIOAO_0), GPIO_GROUP(GPIOAO_1), GPIO_GROUP(GPIOAO_2), @@ -869,7 +869,7 @@ static const char * const tsin_b_groups[] = { "tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b" }; -static struct meson_pmx_func meson8b_cbus_functions[] = { +static const struct meson_pmx_func meson8b_cbus_functions[] = { FUNCTION(gpio_periphs), FUNCTION(sd_a), FUNCTION(sdxc_a), @@ -903,7 +903,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = { FUNCTION(clk_24m), }; -static struct meson_pmx_func meson8b_aobus_functions[] = { +static const struct meson_pmx_func meson8b_aobus_functions[] = { FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(uart_ao_b), @@ -917,7 +917,7 @@ static struct meson_pmx_func meson8b_aobus_functions[] = { FUNCTION(hdmi_cec), }; -static struct meson_bank meson8b_cbus_banks[] = { +static const struct meson_bank meson8b_cbus_banks[] = { /* name first last irq pullen pull dir out in */ BANK("X0..11", GPIOX_0, GPIOX_11, 97, 108, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0), BANK("X16..21", GPIOX_16, GPIOX_21, 113, 118, 4, 16, 4, 16, 0, 16, 1, 16, 2, 16), @@ -938,12 +938,12 @@ static struct meson_bank meson8b_cbus_banks[] = { BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12), }; -static struct meson_bank meson8b_aobus_banks[] = { +static const struct meson_bank meson8b_aobus_banks[] = { /* name first lastc irq pullen pull dir out in */ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; -static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { +static const struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { .name = "cbus-banks", .pins = meson8b_cbus_pins, .groups = meson8b_cbus_groups, @@ -956,7 +956,7 @@ static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { .pmx_ops = &meson8_pmx_ops, }; -static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = { +static const struct meson_pinctrl_data meson8b_aobus_pinctrl_data = { .name = "aobus-banks", .pins = meson8b_aobus_pins, .groups = meson8b_aobus_groups, diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c index 1947da73e51210..dce601d993728c 100644 --- a/drivers/pinctrl/mvebu/pinctrl-dove.c +++ b/drivers/pinctrl/mvebu/pinctrl-dove.c @@ -767,7 +767,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev) struct resource fb_res; struct mvebu_mpp_ctrl_data *mpp_data; void __iomem *base; - int i; + int i, ret; pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev); @@ -783,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) clk_prepare_enable(clk); base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res); - if (IS_ERR(base)) - return PTR_ERR(base); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto err_probe; + } mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols, sizeof(*mpp_data), GFP_KERNEL); - if (!mpp_data) - return -ENOMEM; + if (!mpp_data) { + ret = -ENOMEM; + goto err_probe; + } dove_pinctrl_info.control_data = mpp_data; for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++) @@ -808,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) } mpp4_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mpp4_base)) - return PTR_ERR(mpp4_base); + if (IS_ERR(mpp4_base)) { + ret = PTR_ERR(mpp4_base); + goto err_probe; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) { @@ -820,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) } pmu_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmu_base)) - return PTR_ERR(pmu_base); + if (IS_ERR(pmu_base)) { + ret = PTR_ERR(pmu_base); + goto err_probe; + } gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config"); if (IS_ERR(gconfmap)) { @@ -831,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) adjust_resource(&fb_res, (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14); gc_base = devm_ioremap_resource(&pdev->dev, &fb_res); - if (IS_ERR(gc_base)) - return PTR_ERR(gc_base); + if (IS_ERR(gc_base)) { + ret = PTR_ERR(gc_base); + goto err_probe; + } + gconfmap = devm_regmap_init_mmio(&pdev->dev, gc_base, &gc_regmap_config); - if (IS_ERR(gconfmap)) - return PTR_ERR(gconfmap); + if (IS_ERR(gconfmap)) { + ret = PTR_ERR(gconfmap); + goto err_probe; + } } /* Warn on any missing DT resource */ @@ -844,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev) dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n"); return mvebu_pinctrl_probe(pdev); +err_probe: + clk_disable_unprepare(clk); + return ret; } static struct platform_driver dove_pinctrl_driver = { diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 47f62c89955ab6..68750b6f8e57a5 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -716,8 +716,7 @@ static int abx500_dt_add_map_configs(struct pinctrl_map **map, if (*num_maps == *reserved_maps) return -ENOSPC; - dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), - GFP_KERNEL); + dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL); if (!dup_configs) return -ENOMEM; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index fa78d5ecc6855c..f4f10c60c1d23b 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -601,8 +601,7 @@ static int nmk_dt_add_map_configs(struct pinctrl_map **map, if (*num_maps == *reserved_maps) return -ENOSPC; - dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), - GFP_KERNEL); + dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL); if (!dup_configs) return -ENOMEM; diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c index a377d36b0eb072..471f644c5eef2c 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c @@ -241,6 +241,7 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type) npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_BOTH: + npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio); npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio); break; case IRQ_TYPE_LEVEL_LOW: @@ -315,8 +316,8 @@ static struct irq_chip npcmgpio_irqchip = { GPIOCHIP_IRQ_RESOURCE_HELPERS, }; -static const int gpi36_pins[] = { 58 }; -static const int gpi35_pins[] = { 58 }; +static const int gpi36_pins[] = { 36 }; +static const int gpi35_pins[] = { 35 }; static const int tp_jtag3_pins[] = { 44, 62, 45, 46 }; static const int tp_uart_pins[] = { 50, 51 }; @@ -437,7 +438,6 @@ static const int smb4_pins[] = { 28, 29 }; static const int smb4b_pins[] = { 18, 19 }; static const int smb4c_pins[] = { 20, 21 }; static const int smb4d_pins[] = { 22, 23 }; -static const int smb4den_pins[] = { 17 }; static const int smb5_pins[] = { 26, 27 }; static const int smb5b_pins[] = { 13, 12 }; static const int smb5c_pins[] = { 15, 14 }; @@ -515,7 +515,7 @@ static const int rg2_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212, static const int rg2mdio_pins[] = { 216, 217 }; static const int ddr_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212, - 213, 214, 215, 216, 217 }; + 213, 214, 215, 216, 217, 250 }; static const int iox1_pins[] = { 0, 1, 2, 3 }; static const int iox2_pins[] = { 4, 5, 6, 7 }; @@ -570,7 +570,6 @@ static const int spi3cs3_pins[] = { 189 }; static const int ddc_pins[] = { 204, 205, 206, 207 }; static const int lpc_pins[] = { 95, 161, 163, 164, 165, 166, 167 }; -static const int lpcclk_pins[] = { 168 }; static const int espi_pins[] = { 95, 161, 163, 164, 165, 166, 167, 168 }; static const int lkgpo0_pins[] = { 16 }; @@ -699,7 +698,6 @@ struct npcm8xx_pingroup { NPCM8XX_GRP(smb4b), \ NPCM8XX_GRP(smb4c), \ NPCM8XX_GRP(smb4d), \ - NPCM8XX_GRP(smb4den), \ NPCM8XX_GRP(smb5), \ NPCM8XX_GRP(smb5b), \ NPCM8XX_GRP(smb5c), \ @@ -808,7 +806,6 @@ struct npcm8xx_pingroup { NPCM8XX_GRP(spi3cs3), \ NPCM8XX_GRP(spi0cs1), \ NPCM8XX_GRP(lpc), \ - NPCM8XX_GRP(lpcclk), \ NPCM8XX_GRP(espi), \ NPCM8XX_GRP(lkgpo0), \ NPCM8XX_GRP(lkgpo1), \ @@ -948,7 +945,6 @@ NPCM8XX_SFUNC(smb4); NPCM8XX_SFUNC(smb4b); NPCM8XX_SFUNC(smb4c); NPCM8XX_SFUNC(smb4d); -NPCM8XX_SFUNC(smb4den); NPCM8XX_SFUNC(smb5); NPCM8XX_SFUNC(smb5b); NPCM8XX_SFUNC(smb5c); @@ -1056,7 +1052,6 @@ NPCM8XX_SFUNC(spi3cs2); NPCM8XX_SFUNC(spi3cs3); NPCM8XX_SFUNC(spi0cs1); NPCM8XX_SFUNC(lpc); -NPCM8XX_SFUNC(lpcclk); NPCM8XX_SFUNC(espi); NPCM8XX_SFUNC(lkgpo0); NPCM8XX_SFUNC(lkgpo1); @@ -1172,7 +1167,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = { NPCM8XX_MKFUNC(smb4b), NPCM8XX_MKFUNC(smb4c), NPCM8XX_MKFUNC(smb4d), - NPCM8XX_MKFUNC(smb4den), NPCM8XX_MKFUNC(smb5), NPCM8XX_MKFUNC(smb5b), NPCM8XX_MKFUNC(smb5c), @@ -1280,7 +1274,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = { NPCM8XX_MKFUNC(spi3cs3), NPCM8XX_MKFUNC(spi0cs1), NPCM8XX_MKFUNC(lpc), - NPCM8XX_MKFUNC(lpcclk), NPCM8XX_MKFUNC(espi), NPCM8XX_MKFUNC(lkgpo0), NPCM8XX_MKFUNC(lkgpo1), @@ -1347,7 +1340,7 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(16, lkgpo0, FLOCKR1, 0, smb7b, I2CSEGSEL, 27, tp_gpio2b, MFSEL7, 10, none, NONE, 0, none, NONE, 0, SLEW), - NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, smb4den, I2CSEGSEL, 23, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(18, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(19, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(20, hgpio0, MFSEL2, 24, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, none, NONE, 0, none, NONE, 0, SLEW), @@ -1365,6 +1358,8 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(32, spi0cs1, MFSEL1, 3, smb14b, MFSEL7, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(33, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(34, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(35, gpi35, MFSEL5, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), + NPCM8XX_PINCFG(36, gpi36, MFSEL5, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(37, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(38, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(39, smb3b, I2CSEGSEL, 11, smb22, MFSEL5, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), @@ -1438,10 +1433,10 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(107, i3c5, MFSEL3, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(108, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(109, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), - NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), + NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(114, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(115, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(116, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), @@ -1490,13 +1485,13 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(161, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(162, serirq, MFSEL1, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM8XX_PINCFG(162, clkrun, MFSEL3, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM8XX_PINCFG(163, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(164, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(165, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(166, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(167, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(168, lpcclk, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), + NPCM8XX_PINCFG(168, serirq, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(169, scipme, MFSEL3, 0, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(170, smi, MFSEL1, 22, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(171, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), @@ -1515,22 +1510,22 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(184, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(185, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(186, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), - NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, 0), + NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(188, gpio1889, MFSEL7, 25, spi3cs2, MFSEL4, 18, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(189, gpio1889, MFSEL7, 25, spi3cs3, MFSEL4, 19, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(190, nprd_smi, FLOCKR1, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), - NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */ - NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */ + NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */ + NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */ NPCM8XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), - NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), - NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), - NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), - NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), - NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), - NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), + NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO), NPCM8XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO), - NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)), + NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(203, faninx, MFSEL3, 3, spi1cs0, MFSEL3, 4, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM8XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */ NPCM8XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */ @@ -1553,10 +1548,10 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(226, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(227, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(228, spixcs1, MFSEL4, 28, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), - NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), - NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW), + NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW), NPCM8XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 12) | SLEW), - NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEWLPC), /* slewlpc ? */ + NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), /* slewlpc ? */ NPCM8XX_PINCFG(234, pwm10, MFSEL6, 13, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(235, pwm11, MFSEL6, 14, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(240, i3c0, MFSEL5, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), @@ -1567,7 +1562,8 @@ static const struct npcm8xx_pincfg pincfg[] = { NPCM8XX_PINCFG(245, i3c2, MFSEL5, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(246, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), NPCM8XX_PINCFG(247, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), - NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), + NPCM8XX_PINCFG(250, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), NPCM8XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */ NPCM8XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */ NPCM8XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */ @@ -1610,6 +1606,8 @@ static const struct pinctrl_pin_desc npcm8xx_pins[] = { PINCTRL_PIN(32, "GPIO32/SMB14B_SCL/SPI0_nCS1"), PINCTRL_PIN(33, "GPIO33/I3C4_SCL"), PINCTRL_PIN(34, "GPIO34/I3C4_SDA"), + PINCTRL_PIN(35, "MCBPCK/GPI35_AHB2PCI_DIS"), + PINCTRL_PIN(36, "SYSBPCK/GPI36"), PINCTRL_PIN(37, "GPIO37/SMB3C_SDA/SMB23_SDA"), PINCTRL_PIN(38, "GPIO38/SMB3C_SCL/SMB23_SCL"), PINCTRL_PIN(39, "GPIO39/SMB3B_SDA/SMB22_SDA"), @@ -2044,7 +2042,7 @@ static int npcm8xx_gpio_request_enable(struct pinctrl_dev *pctldev, const unsigned int *pin = &offset; int mode = fn_gpio; - if (pin[0] >= 183 && pin[0] <= 189) + if ((pin[0] >= 183 && pin[0] <= 189) || pin[0] == 35 || pin[0] == 36) mode = pincfg[pin[0]].fn0; npcm8xx_setfunc(npcm->gcr_regmap, &offset, 1, mode); diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c index f2609a35c312c8..501eb296c76050 100644 --- a/drivers/pinctrl/nxp/pinctrl-s32cc.c +++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c @@ -2,7 +2,7 @@ /* * Core driver for the S32 CC (Common Chassis) pin controller * - * Copyright 2017-2022 NXP + * Copyright 2017-2022,2024 NXP * Copyright (C) 2022 SUSE LLC * Copyright 2015-2016 Freescale Semiconductor, Inc. */ @@ -39,6 +39,11 @@ #define S32_MSCR_ODE BIT(20) #define S32_MSCR_OBE BIT(21) +enum s32_write_type { + S32_PINCONF_UPDATE_ONLY, + S32_PINCONF_OVERWRITE, +}; + static struct regmap_config s32_regmap_config = { .reg_bits = 32, .val_bits = 32, @@ -431,16 +436,15 @@ static int s32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, unsigned int offset, bool input) { - unsigned int config; + /* Always enable IBE for GPIOs. This allows us to read the + * actual line value and compare it with the one set. + */ + unsigned int config = S32_MSCR_IBE; unsigned int mask = S32_MSCR_IBE | S32_MSCR_OBE; - if (input) { - /* Disable output buffer and enable input buffer */ - config = S32_MSCR_IBE; - } else { - /* Disable input buffer and enable output buffer */ - config = S32_MSCR_OBE; - } + /* Enable output buffer */ + if (!input) + config |= S32_MSCR_OBE; return s32_regmap_update(pctldev, offset, mask, config); } @@ -511,6 +515,10 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask, *config |= S32_MSCR_ODE; *mask |= S32_MSCR_ODE; break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + *config &= ~S32_MSCR_ODE; + *mask |= S32_MSCR_ODE; + break; case PIN_CONFIG_OUTPUT_ENABLE: if (arg) *config |= S32_MSCR_OBE; @@ -549,10 +557,11 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask, return 0; } -static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev, +static int s32_pinconf_mscr_write(struct pinctrl_dev *pctldev, unsigned int pin_id, unsigned long *configs, - unsigned int num_configs) + unsigned int num_configs, + enum s32_write_type write_type) { struct s32_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); unsigned int config = 0, mask = 0; @@ -571,10 +580,20 @@ static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev, return ret; } + /* If the MSCR configuration has to be written, + * the SSS field should not be touched. + */ + if (write_type == S32_PINCONF_OVERWRITE) + mask = (unsigned int)~S32_MSCR_SSS_MASK; + if (!config && !mask) return 0; - dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id, config); + if (write_type == S32_PINCONF_OVERWRITE) + dev_dbg(ipctl->dev, "set: pin %u cfg 0x%x\n", pin_id, config); + else + dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id, + config); return s32_regmap_update(pctldev, pin_id, mask, config); } @@ -590,8 +609,8 @@ static int s32_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id, unsigned long *configs, unsigned int num_configs) { - return s32_pinconf_mscr_update(pctldev, pin_id, configs, - num_configs); + return s32_pinconf_mscr_write(pctldev, pin_id, configs, + num_configs, S32_PINCONF_UPDATE_ONLY); } static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selector, @@ -604,8 +623,8 @@ static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selecto grp = &info->groups[selector]; for (i = 0; i < grp->data.npins; i++) { - ret = s32_pinconf_mscr_update(pctldev, grp->data.pins[i], - configs, num_configs); + ret = s32_pinconf_mscr_write(pctldev, grp->data.pins[i], + configs, num_configs, S32_PINCONF_OVERWRITE); if (ret) return ret; } diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index a499b8af5c1f19..0b13d7f17b3256 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -44,6 +44,7 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true), PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false), PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false), + PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_UV, "input schmitt threshold", "uV", true), PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false), PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true), PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false), @@ -177,6 +178,7 @@ static const struct pinconf_generic_params dt_params[] = { { "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 }, { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 }, { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, + { "input-schmitt-microvolts", PIN_CONFIG_INPUT_SCHMITT_UV, 0 }, { "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 }, { "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 }, { "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 }, diff --git a/drivers/pinctrl/pinctrl-ep93xx.c b/drivers/pinctrl/pinctrl-ep93xx.c new file mode 100644 index 00000000000000..abafbbb8fd6a50 --- /dev/null +++ b/drivers/pinctrl/pinctrl-ep93xx.c @@ -0,0 +1,1434 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the EP93xx pin controller + * based on linux/drivers/pinctrl/pinmux-gemini.c + * + * Copyright (C) 2022 Nikita Shubin + * + * This is a group-only pin controller. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "pinctrl-utils.h" + +#define DRIVER_NAME "pinctrl-ep93xx" + +enum ep93xx_pinctrl_model { + EP93XX_9301_PINCTRL, + EP93XX_9307_PINCTRL, + EP93XX_9312_PINCTRL, +}; + +struct ep93xx_pmx { + struct device *dev; + struct pinctrl_dev *pctl; + struct ep93xx_regmap_adev *aux_dev; + struct regmap *map; + enum ep93xx_pinctrl_model model; +}; + +static void ep93xx_pinctrl_update_bits(struct ep93xx_pmx *pmx, unsigned int reg, + unsigned int mask, unsigned int val) +{ + struct ep93xx_regmap_adev *aux = pmx->aux_dev; + + aux->update_bits(aux->map, aux->lock, reg, mask, val); +} + +struct ep93xx_pin_group { + struct pingroup grp; + u32 mask; + u32 value; +}; + +#define PMX_GROUP(_name, _pins, _mask, _value) \ + { \ + .grp = PINCTRL_PINGROUP(_name, _pins, ARRAY_SIZE(_pins)), \ + .mask = _mask, \ + .value = _value, \ + } + +#define EP93XX_SYSCON_DEVCFG 0x80 + +/* + * There are several system configuration options selectable by the DeviceCfg and SysCfg + * registers. These registers provide the selection of several pin multiplexing options and also + * provide software access to the system reset configuration options. Please refer to the + * descriptions of the registers, “DeviceCfg” on page 5-25 and “SysCfg” on page 5-34, for a + * detailed explanation. + */ +#define EP93XX_SYSCON_DEVCFG_D1ONG BIT(30) +#define EP93XX_SYSCON_DEVCFG_D0ONG BIT(29) +#define EP93XX_SYSCON_DEVCFG_IONU2 BIT(28) +#define EP93XX_SYSCON_DEVCFG_GONK BIT(27) +#define EP93XX_SYSCON_DEVCFG_TONG BIT(26) +#define EP93XX_SYSCON_DEVCFG_MONG BIT(25) +#define EP93XX_SYSCON_DEVCFG_A2ONG BIT(22) +#define EP93XX_SYSCON_DEVCFG_A1ONG BIT(21) +#define EP93XX_SYSCON_DEVCFG_HONIDE BIT(11) +#define EP93XX_SYSCON_DEVCFG_GONIDE BIT(10) +#define EP93XX_SYSCON_DEVCFG_PONG BIT(9) +#define EP93XX_SYSCON_DEVCFG_EONIDE BIT(8) +#define EP93XX_SYSCON_DEVCFG_I2SONSSP BIT(7) +#define EP93XX_SYSCON_DEVCFG_I2SONAC97 BIT(6) +#define EP93XX_SYSCON_DEVCFG_RASONP3 BIT(4) + +#define PADS_MASK (GENMASK(30, 25) | BIT(22) | BIT(21) | GENMASK(11, 6) | BIT(4)) +#define PADS_MAXBIT 30 + +/* Ordered by bit index */ +static const char * const ep93xx_padgroups[] = { + NULL, NULL, NULL, NULL, + "RasOnP3", + NULL, + "I2SonAC97", + "I2SonSSP", + "EonIDE", + "PonG", + "GonIDE", + "HonIDE", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "A1onG", + "A2onG", + NULL, NULL, + "MonG", + "TonG", + "GonK", + "IonU2", + "D0onG", + "D1onG", +}; + +/* ep9301, ep9302 */ +static const struct pinctrl_pin_desc ep9301_pins[] = { + PINCTRL_PIN(1, "CSn[7]"), + PINCTRL_PIN(2, "CSn[6]"), + PINCTRL_PIN(3, "CSn[3]"), + PINCTRL_PIN(4, "CSn[2]"), + PINCTRL_PIN(5, "CSn[1]"), + PINCTRL_PIN(6, "AD[25]"), + PINCTRL_PIN(7, "vdd_ring"), + PINCTRL_PIN(8, "gnd_ring"), + PINCTRL_PIN(9, "AD[24]"), + PINCTRL_PIN(10, "SDCLK"), + PINCTRL_PIN(11, "AD[23]"), + PINCTRL_PIN(12, "vdd_core"), + PINCTRL_PIN(13, "gnd_core"), + PINCTRL_PIN(14, "SDWEn"), + PINCTRL_PIN(15, "SDCSn[3]"), + PINCTRL_PIN(16, "SDCSn[2]"), + PINCTRL_PIN(17, "SDCSn[1]"), + PINCTRL_PIN(18, "SDCSn[0]"), + PINCTRL_PIN(19, "vdd_ring"), + PINCTRL_PIN(20, "gnd_ring"), + PINCTRL_PIN(21, "RASn"), + PINCTRL_PIN(22, "CASn"), + PINCTRL_PIN(23, "DQMn[1]"), + PINCTRL_PIN(24, "DQMn[0]"), + PINCTRL_PIN(25, "AD[22]"), + PINCTRL_PIN(26, "AD[21]"), + PINCTRL_PIN(27, "vdd_ring"), + PINCTRL_PIN(28, "gnd_ring"), + PINCTRL_PIN(29, "DA[15]"), + PINCTRL_PIN(30, "AD[7]"), + PINCTRL_PIN(31, "DA[14]"), + PINCTRL_PIN(32, "AD[6]"), + PINCTRL_PIN(33, "DA[13]"), + PINCTRL_PIN(34, "vdd_core"), + PINCTRL_PIN(35, "gnd_core"), + PINCTRL_PIN(36, "AD[5]"), + PINCTRL_PIN(37, "DA[12]"), + PINCTRL_PIN(38, "AD[4]"), + PINCTRL_PIN(39, "DA[11]"), + PINCTRL_PIN(40, "AD[3]"), + PINCTRL_PIN(41, "vdd_ring"), + PINCTRL_PIN(42, "gnd_ring"), + PINCTRL_PIN(43, "DA[10]"), + PINCTRL_PIN(44, "AD[2]"), + PINCTRL_PIN(45, "DA[9]"), + PINCTRL_PIN(46, "AD[1]"), + PINCTRL_PIN(47, "DA[8]"), + PINCTRL_PIN(48, "AD[0]"), + PINCTRL_PIN(49, "vdd_ring"), + PINCTRL_PIN(50, "gnd_ring"), + PINCTRL_PIN(51, "NC"), + PINCTRL_PIN(52, "NC"), + PINCTRL_PIN(53, "vdd_ring"), + PINCTRL_PIN(54, "gnd_ring"), + PINCTRL_PIN(55, "AD[15]"), + PINCTRL_PIN(56, "DA[7]"), + PINCTRL_PIN(57, "vdd_core"), + PINCTRL_PIN(58, "gnd_core"), + PINCTRL_PIN(59, "AD[14]"), + PINCTRL_PIN(60, "DA[6]"), + PINCTRL_PIN(61, "AD[13]"), + PINCTRL_PIN(62, "DA[5]"), + PINCTRL_PIN(63, "AD[12]"), + PINCTRL_PIN(64, "DA[4]"), + PINCTRL_PIN(65, "AD[11]"), + PINCTRL_PIN(66, "vdd_ring"), + PINCTRL_PIN(67, "gnd_ring"), + PINCTRL_PIN(68, "DA[3]"), + PINCTRL_PIN(69, "AD[10]"), + PINCTRL_PIN(70, "DA[2]"), + PINCTRL_PIN(71, "AD[9]"), + PINCTRL_PIN(72, "DA[1]"), + PINCTRL_PIN(73, "AD[8]"), + PINCTRL_PIN(74, "DA[0]"), + PINCTRL_PIN(75, "DSRn"), + PINCTRL_PIN(76, "DTRn"), + PINCTRL_PIN(77, "TCK"), + PINCTRL_PIN(78, "TDI"), + PINCTRL_PIN(79, "TDO"), + PINCTRL_PIN(80, "TMS"), + PINCTRL_PIN(81, "vdd_ring"), + PINCTRL_PIN(82, "gnd_ring"), + PINCTRL_PIN(83, "BOOT[1]"), + PINCTRL_PIN(84, "BOOT[0]"), + PINCTRL_PIN(85, "gnd_ring"), + PINCTRL_PIN(86, "NC"), + PINCTRL_PIN(87, "EECLK"), + PINCTRL_PIN(88, "EEDAT"), + PINCTRL_PIN(89, "ASYNC"), + PINCTRL_PIN(90, "vdd_core"), + PINCTRL_PIN(91, "gnd_core"), + PINCTRL_PIN(92, "ASDO"), + PINCTRL_PIN(93, "SCLK1"), + PINCTRL_PIN(94, "SFRM1"), + PINCTRL_PIN(95, "SSPRX1"), + PINCTRL_PIN(96, "SSPTX1"), + PINCTRL_PIN(97, "GRLED"), + PINCTRL_PIN(98, "RDLED"), + PINCTRL_PIN(99, "vdd_ring"), + PINCTRL_PIN(100, "gnd_ring"), + PINCTRL_PIN(101, "INT[3]"), + PINCTRL_PIN(102, "INT[1]"), + PINCTRL_PIN(103, "INT[0]"), + PINCTRL_PIN(104, "RTSn"), + PINCTRL_PIN(105, "USBm[0]"), + PINCTRL_PIN(106, "USBp[0]"), + PINCTRL_PIN(107, "ABITCLK"), + PINCTRL_PIN(108, "CTSn"), + PINCTRL_PIN(109, "RXD[0]"), + PINCTRL_PIN(110, "RXD[1]"), + PINCTRL_PIN(111, "vdd_ring"), + PINCTRL_PIN(112, "gnd_ring"), + PINCTRL_PIN(113, "TXD[0]"), + PINCTRL_PIN(114, "TXD[1]"), + PINCTRL_PIN(115, "CGPIO[0]"), + PINCTRL_PIN(116, "gnd_core"), + PINCTRL_PIN(117, "PLL_GND"), + PINCTRL_PIN(118, "XTALI"), + PINCTRL_PIN(119, "XTALO"), + PINCTRL_PIN(120, "PLL_VDD"), + PINCTRL_PIN(121, "vdd_core"), + PINCTRL_PIN(122, "gnd_ring"), + PINCTRL_PIN(123, "vdd_ring"), + PINCTRL_PIN(124, "RSTOn"), + PINCTRL_PIN(125, "PRSTn"), + PINCTRL_PIN(126, "CSn[0]"), + PINCTRL_PIN(127, "gnd_core"), + PINCTRL_PIN(128, "vdd_core"), + PINCTRL_PIN(129, "gnd_ring"), + PINCTRL_PIN(130, "vdd_ring"), + PINCTRL_PIN(131, "ADC[4]"), + PINCTRL_PIN(132, "ADC[3]"), + PINCTRL_PIN(133, "ADC[2]"), + PINCTRL_PIN(134, "ADC[1]"), + PINCTRL_PIN(135, "ADC[0]"), + PINCTRL_PIN(136, "ADC_VDD"), + PINCTRL_PIN(137, "RTCXTALI"), + PINCTRL_PIN(138, "RTCXTALO"), + PINCTRL_PIN(139, "ADC_GND"), + PINCTRL_PIN(140, "EGPIO[11]"), + PINCTRL_PIN(141, "EGPIO[10]"), + PINCTRL_PIN(142, "EGPIO[9]"), + PINCTRL_PIN(143, "EGPIO[8]"), + PINCTRL_PIN(144, "EGPIO[7]"), + PINCTRL_PIN(145, "EGPIO[6]"), + PINCTRL_PIN(146, "EGPIO[5]"), + PINCTRL_PIN(147, "EGPIO[4]"), + PINCTRL_PIN(148, "EGPIO[3]"), + PINCTRL_PIN(149, "gnd_ring"), + PINCTRL_PIN(150, "vdd_ring"), + PINCTRL_PIN(151, "EGPIO[2]"), + PINCTRL_PIN(152, "EGPIO[1]"), + PINCTRL_PIN(153, "EGPIO[0]"), + PINCTRL_PIN(154, "ARSTn"), + PINCTRL_PIN(155, "TRSTn"), + PINCTRL_PIN(156, "ASDI"), + PINCTRL_PIN(157, "USBm[2]"), + PINCTRL_PIN(158, "USBp[2]"), + PINCTRL_PIN(159, "WAITn"), + PINCTRL_PIN(160, "EGPIO[15]"), + PINCTRL_PIN(161, "gnd_ring"), + PINCTRL_PIN(162, "vdd_ring"), + PINCTRL_PIN(163, "EGPIO[14]"), + PINCTRL_PIN(164, "EGPIO[13]"), + PINCTRL_PIN(165, "EGPIO[12]"), + PINCTRL_PIN(166, "gnd_core"), + PINCTRL_PIN(167, "vdd_core"), + PINCTRL_PIN(168, "FGPIO[3]"), + PINCTRL_PIN(169, "FGPIO[2]"), + PINCTRL_PIN(170, "FGPIO[1]"), + PINCTRL_PIN(171, "gnd_ring"), + PINCTRL_PIN(172, "vdd_ring"), + PINCTRL_PIN(173, "CLD"), + PINCTRL_PIN(174, "CRS"), + PINCTRL_PIN(175, "TXERR"), + PINCTRL_PIN(176, "TXEN"), + PINCTRL_PIN(177, "MIITXD[0]"), + PINCTRL_PIN(178, "MIITXD[1]"), + PINCTRL_PIN(179, "MIITXD[2]"), + PINCTRL_PIN(180, "MIITXD[3]"), + PINCTRL_PIN(181, "TXCLK"), + PINCTRL_PIN(182, "RXERR"), + PINCTRL_PIN(183, "RXDVAL"), + PINCTRL_PIN(184, "MIIRXD[0]"), + PINCTRL_PIN(185, "MIIRXD[1]"), + PINCTRL_PIN(186, "MIIRXD[2]"), + PINCTRL_PIN(187, "gnd_ring"), + PINCTRL_PIN(188, "vdd_ring"), + PINCTRL_PIN(189, "MIIRXD[3]"), + PINCTRL_PIN(190, "RXCLK"), + PINCTRL_PIN(191, "MDIO"), + PINCTRL_PIN(192, "MDC"), + PINCTRL_PIN(193, "RDn"), + PINCTRL_PIN(194, "WRn"), + PINCTRL_PIN(195, "AD[16]"), + PINCTRL_PIN(196, "AD[17]"), + PINCTRL_PIN(197, "gnd_core"), + PINCTRL_PIN(198, "vdd_core"), + PINCTRL_PIN(199, "HGPIO[2]"), + PINCTRL_PIN(200, "HGPIO[3]"), + PINCTRL_PIN(201, "HGPIO[4]"), + PINCTRL_PIN(202, "HGPIO[5]"), + PINCTRL_PIN(203, "gnd_ring"), + PINCTRL_PIN(204, "vdd_ring"), + PINCTRL_PIN(205, "AD[18]"), + PINCTRL_PIN(206, "AD[19]"), + PINCTRL_PIN(207, "AD[20]"), + PINCTRL_PIN(208, "SDCLKEN"), +}; + +static const unsigned int ssp_ep9301_pins[] = { + 93, 94, 95, 96, +}; + +static const unsigned int ac97_ep9301_pins[] = { + 89, 92, 107, 154, 156, +}; + +/* + * Note: The EP9307 processor has one PWM with one output, PWMOUT. + * Note: The EP9301, EP9302, EP9312, and EP9315 processors each have two PWMs with + * two outputs, PWMOUT and PWMO1. PWMO1 is an alternate function for EGPIO14. + */ +/* The GPIO14E (14) pin overlap with pwm1 */ +static const unsigned int pwm_9301_pins[] = { 163 }; + +static const unsigned int gpio1a_9301_pins[] = { 163 }; + +/* ep9301/9302 have only 0 pin of GPIO C Port exposed */ +static const unsigned int gpio2a_9301_pins[] = { 115 }; + +/* ep9301/9302 have only 4,5 pin of GPIO E Port exposed */ +static const unsigned int gpio4a_9301_pins[] = { 97, 98 }; + +/* ep9301/9302 have only 4,5 pin of GPIO G Port exposed */ +static const unsigned int gpio6a_9301_pins[] = { 87, 88 }; + +static const unsigned int gpio7a_9301_pins[] = { 199, 200, 201, 202 }; + +/* Groups for the ep9301/ep9302 SoC/package */ +static const struct ep93xx_pin_group ep9301_pin_groups[] = { + PMX_GROUP("ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0), + PMX_GROUP("i2s_on_ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, + EP93XX_SYSCON_DEVCFG_I2SONSSP), + PMX_GROUP("ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0), + PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, + EP93XX_SYSCON_DEVCFG_I2SONAC97), + PMX_GROUP("pwm1", pwm_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, EP93XX_SYSCON_DEVCFG_PONG), + PMX_GROUP("gpio1agrp", gpio1a_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, 0), + PMX_GROUP("gpio2agrp", gpio2a_9301_pins, EP93XX_SYSCON_DEVCFG_GONK, + EP93XX_SYSCON_DEVCFG_GONK), + PMX_GROUP("gpio4agrp", gpio4a_9301_pins, EP93XX_SYSCON_DEVCFG_EONIDE, + EP93XX_SYSCON_DEVCFG_EONIDE), + PMX_GROUP("gpio6agrp", gpio6a_9301_pins, EP93XX_SYSCON_DEVCFG_GONIDE, + EP93XX_SYSCON_DEVCFG_GONIDE), + PMX_GROUP("gpio7agrp", gpio7a_9301_pins, EP93XX_SYSCON_DEVCFG_HONIDE, + EP93XX_SYSCON_DEVCFG_HONIDE), +}; + +static const struct pinctrl_pin_desc ep9307_pins[] = { + /* Row A */ + PINCTRL_PIN(0, "CSn[1]"), /* A1 */ + PINCTRL_PIN(1, "CSn[7]"), /* A2 */ + PINCTRL_PIN(2, "SDCLKEN"), /* A3 */ + PINCTRL_PIN(3, "DA[31]"), /* A4 */ + PINCTRL_PIN(4, "DA[29]"), /* A5 */ + PINCTRL_PIN(5, "DA[27]"), /* A6 */ + PINCTRL_PIN(6, "HGPIO[2]"), /* A7 */ + PINCTRL_PIN(7, "RDn"), /* A8 */ + PINCTRL_PIN(8, "MIIRXD[3]"), /* A9 */ + PINCTRL_PIN(9, "RXDVAL"), /* A10 */ + PINCTRL_PIN(10, "MIITXD[1]"), /* A11 */ + PINCTRL_PIN(11, "CRS"), /* A12 */ + PINCTRL_PIN(12, "FGPIO[7]"), /* A13 */ + PINCTRL_PIN(13, "FGPIO[0]"), /* A14 */ + PINCTRL_PIN(14, "WAITn"), /* A15 */ + PINCTRL_PIN(15, "USBm[2]"), /* A16 */ + PINCTRL_PIN(16, "ASDI"), /* A17 */ + /* Row B */ + PINCTRL_PIN(17, "AD[25]"), /* B1 */ + PINCTRL_PIN(18, "CSn[2]"), /* B2 */ + PINCTRL_PIN(19, "CSn[6]"), /* B3 */ + PINCTRL_PIN(20, "AD[20]"), /* B4 */ + PINCTRL_PIN(21, "DA[30]"), /* B5 */ + PINCTRL_PIN(22, "AD[18]"), /* B6 */ + PINCTRL_PIN(23, "HGPIO[3]"), /* B7 */ + PINCTRL_PIN(24, "AD[17]"), /* B8 */ + PINCTRL_PIN(25, "RXCLK"), /* B9 */ + PINCTRL_PIN(26, "MIIRXD[1]"), /* B10 */ + PINCTRL_PIN(27, "MIITXD[2]"), /* B11 */ + PINCTRL_PIN(28, "TXEN"), /* B12 */ + PINCTRL_PIN(29, "FGPIO[5]"), /* B13 */ + PINCTRL_PIN(30, "EGPIO[15]"), /* B14 */ + PINCTRL_PIN(31, "USBp[2]"), /* B15 */ + PINCTRL_PIN(32, "ARSTn"), /* B16 */ + PINCTRL_PIN(33, "ADC_VDD"), /* B17 */ + /* Row C */ + PINCTRL_PIN(34, "AD[23]"), /* C1 */ + PINCTRL_PIN(35, "DA[26]"), /* C2 */ + PINCTRL_PIN(36, "CSn[3]"), /* C3 */ + PINCTRL_PIN(37, "DA[25]"), /* C4 */ + PINCTRL_PIN(38, "AD[24]"), /* C5 */ + PINCTRL_PIN(39, "AD[19]"), /* C6 */ + PINCTRL_PIN(40, "HGPIO[5]"), /* C7 */ + PINCTRL_PIN(41, "WRn"), /* C8 */ + PINCTRL_PIN(42, "MDIO"), /* C9 */ + PINCTRL_PIN(43, "MIIRXD[2]"), /* C10 */ + PINCTRL_PIN(44, "TXCLK"), /* C11 */ + PINCTRL_PIN(45, "MIITXD[0]"), /* C12 */ + PINCTRL_PIN(46, "CLD"), /* C13 */ + PINCTRL_PIN(47, "EGPIO[13]"), /* C14 */ + PINCTRL_PIN(48, "TRSTn"), /* C15 */ + PINCTRL_PIN(49, "Xp"), /* C16 */ + PINCTRL_PIN(50, "Xm"), /* C17 */ + /* Row D */ + PINCTRL_PIN(51, "SDCSn[3]"), /* D1 */ + PINCTRL_PIN(52, "DA[23]"), /* D2 */ + PINCTRL_PIN(53, "SDCLK"), /* D3 */ + PINCTRL_PIN(54, "DA[24]"), /* D4 */ + PINCTRL_PIN(55, "HGPIO[7]"), /* D5 */ + PINCTRL_PIN(56, "HGPIO[6]"), /* D6 */ + PINCTRL_PIN(57, "A[28]"), /* D7 */ + PINCTRL_PIN(58, "HGPIO[4]"), /* D8 */ + PINCTRL_PIN(59, "AD[16]"), /* D9 */ + PINCTRL_PIN(60, "MDC"), /* D10 */ + PINCTRL_PIN(61, "RXERR"), /* D11 */ + PINCTRL_PIN(62, "MIITXD[3]"), /* D12 */ + PINCTRL_PIN(63, "EGPIO[12]"), /* D13 */ + PINCTRL_PIN(64, "EGPIO[1]"), /* D14 */ + PINCTRL_PIN(65, "EGPIO[0]"), /* D15 */ + PINCTRL_PIN(66, "Ym"), /* D16 */ + PINCTRL_PIN(67, "Yp"), /* D17 */ + /* Row E */ + PINCTRL_PIN(68, "SDCSn[2]"), /* E1 */ + PINCTRL_PIN(69, "SDWEN"), /* E2 */ + PINCTRL_PIN(70, "DA[22]"), /* E3 */ + PINCTRL_PIN(71, "AD[3]"), /* E4 */ + PINCTRL_PIN(72, "DA[15]"), /* E5 */ + PINCTRL_PIN(73, "AD[21]"), /* E6 */ + PINCTRL_PIN(74, "DA[17]"), /* E7 */ + PINCTRL_PIN(75, "vddr"), /* E8 */ + PINCTRL_PIN(76, "vddr"), /* E9 */ + PINCTRL_PIN(77, "vddr"), /* E10 */ + PINCTRL_PIN(78, "MIIRXD[0]"), /* E11 */ + PINCTRL_PIN(79, "TXERR"), /* E12 */ + PINCTRL_PIN(80, "EGPIO[2]"), /* E13 */ + PINCTRL_PIN(81, "EGPIO[4]"), /* E14 */ + PINCTRL_PIN(82, "EGPIO[3]"), /* E15 */ + PINCTRL_PIN(83, "sXp"), /* E16 */ + PINCTRL_PIN(84, "sXm"), /* E17 */ + /* Row F */ + PINCTRL_PIN(85, "RASn"), /* F1 */ + PINCTRL_PIN(86, "SDCSn[1]"), /* F2 */ + PINCTRL_PIN(87, "SDCSn[0]"), /* F3 */ + PINCTRL_PIN(88, "DQMn[3]"), /* F4 */ + PINCTRL_PIN(89, "AD[5]"), /* F5 */ + PINCTRL_PIN(90, "gndr"), /* F6 */ + PINCTRL_PIN(91, "gndr"), /* F7 */ + PINCTRL_PIN(92, "gndr"), /* F8 */ + PINCTRL_PIN(93, "vddc"), /* F9 */ + PINCTRL_PIN(94, "vddc"), /* F10 */ + PINCTRL_PIN(95, "gndr"), /* F11 */ + PINCTRL_PIN(96, "EGPIO[7]"), /* F12 */ + PINCTRL_PIN(97, "EGPIO[5]"), /* F13 */ + PINCTRL_PIN(98, "ADC GND"), /* F14 */ + PINCTRL_PIN(99, "EGPIO[6]"), /* F15 */ + PINCTRL_PIN(100, "sYm"), /* F16 */ + PINCTRL_PIN(101, "syp"), /* F17 */ + /* Row G */ + PINCTRL_PIN(102, "DQMn[0]"), /* G1 */ + PINCTRL_PIN(103, "CASn"), /* G2 */ + PINCTRL_PIN(104, "DA[21]"), /* G3 */ + PINCTRL_PIN(105, "AD[22]"), /* G4 */ + PINCTRL_PIN(106, "vddr"), /* G5 */ + PINCTRL_PIN(107, "gndr"), /* G6 */ + PINCTRL_PIN(108, "gndr"), /* G12 */ + PINCTRL_PIN(109, "EGPIO[9]"), /* G13 */ + PINCTRL_PIN(110, "EGPIO[10]"), /* G14 */ + PINCTRL_PIN(111, "EGPIO[11]"), /* G15 */ + PINCTRL_PIN(112, "RTCXTALO"), /* G16 */ + PINCTRL_PIN(113, "RTCXTALI"), /* G17 */ + /* Row H */ + PINCTRL_PIN(114, "DA[18]"), /* H1 */ + PINCTRL_PIN(115, "DA[20]"), /* H2 */ + PINCTRL_PIN(116, "DA[19]"), /* H3 */ + PINCTRL_PIN(117, "DA[16]"), /* H4 */ + PINCTRL_PIN(118, "vddr"), /* H5 */ + PINCTRL_PIN(119, "vddc"), /* H6 */ + PINCTRL_PIN(120, "gndc"), /* H7 */ + PINCTRL_PIN(121, "gndc"), /* H9 */ + PINCTRL_PIN(122, "gndc"), /* H10 */ + PINCTRL_PIN(123, "gndr"), /* H12 */ + PINCTRL_PIN(124, "vddr"), /* H13 */ + PINCTRL_PIN(125, "EGPIO[8]"), /* H14 */ + PINCTRL_PIN(126, "PRSTN"), /* H15 */ + PINCTRL_PIN(127, "COL[7]"), /* H16 */ + PINCTRL_PIN(128, "RSTON"), /* H17 */ + /* Row J */ + PINCTRL_PIN(129, "AD[6]"), /* J1 */ + PINCTRL_PIN(130, "DA[14]"), /* J2 */ + PINCTRL_PIN(131, "AD[7]"), /* J3 */ + PINCTRL_PIN(132, "DA[13]"), /* J4 */ + PINCTRL_PIN(133, "vddr"), /* J5 */ + PINCTRL_PIN(134, "vddc"), /* J6 */ + PINCTRL_PIN(135, "gndc"), /* J8 */ + PINCTRL_PIN(136, "gndc"), /* J10 */ + PINCTRL_PIN(137, "vddc"), /* J12 */ + PINCTRL_PIN(138, "vddr"), /* J13 */ + PINCTRL_PIN(139, "COL[5]"), /* J14 */ + PINCTRL_PIN(140, "COL[6]"), /* J15 */ + PINCTRL_PIN(141, "CSn[0]"), /* J16 */ + PINCTRL_PIN(142, "COL[3]"), /* J17 */ + /* Row K */ + PINCTRL_PIN(143, "AD[4]"), /* K1 */ + PINCTRL_PIN(144, "DA[12]"), /* K2 */ + PINCTRL_PIN(145, "DA[10]"), /* K3 */ + PINCTRL_PIN(146, "DA[11]"), /* K4 */ + PINCTRL_PIN(147, "vddr"), /* K5 */ + PINCTRL_PIN(148, "gndr"), /* K6 */ + PINCTRL_PIN(149, "gndc"), /* K8 */ + PINCTRL_PIN(150, "gndc"), /* K9 */ + PINCTRL_PIN(151, "gndc"), /* K10 */ + PINCTRL_PIN(152, "vddc"), /* K12 */ + PINCTRL_PIN(153, "COL[4]"), /* K13 */ + PINCTRL_PIN(154, "PLL_VDD"), /* K14 */ + PINCTRL_PIN(155, "COL[2]"), /* K15 */ + PINCTRL_PIN(156, "COL[1]"), /* K16 */ + PINCTRL_PIN(157, "COL[0]"), /* K17 */ + /* Row L */ + PINCTRL_PIN(158, "DA[9]"), /* L1 */ + PINCTRL_PIN(159, "AD[2]"), /* L2 */ + PINCTRL_PIN(160, "AD[1]"), /* L3 */ + PINCTRL_PIN(161, "DA[8]"), /* L4 */ + PINCTRL_PIN(162, "BLANK"), /* L5 */ + PINCTRL_PIN(163, "gndr"), /* L6 */ + PINCTRL_PIN(164, "gndr"), /* L7 */ + PINCTRL_PIN(165, "ROW[7]"), /* L8 */ + PINCTRL_PIN(166, "ROW[5]"), /* L9 */ + PINCTRL_PIN(167, "PLL GND"), /* L10 */ + PINCTRL_PIN(168, "XTALI"), /* L11 */ + PINCTRL_PIN(169, "XTALO"), /* L12 */ + /* Row M */ + PINCTRL_PIN(170, "BRIGHT"), /* M1 */ + PINCTRL_PIN(171, "AD[0]"), /* M2 */ + PINCTRL_PIN(172, "DQMn[1]"), /* M3 */ + PINCTRL_PIN(173, "DQMn[2]"), /* M4 */ + PINCTRL_PIN(174, "P[17]"), /* M5 */ + PINCTRL_PIN(175, "gndr"), /* M6 */ + PINCTRL_PIN(176, "gndr"), /* M7 */ + PINCTRL_PIN(177, "vddc"), /* M8 */ + PINCTRL_PIN(178, "vddc"), /* M9 */ + PINCTRL_PIN(179, "gndr"), /* M10 */ + PINCTRL_PIN(180, "gndr"), /* M11 */ + PINCTRL_PIN(181, "ROW[6]"), /* M12 */ + PINCTRL_PIN(182, "ROW[4]"), /* M13 */ + PINCTRL_PIN(183, "ROW[1]"), /* M14 */ + PINCTRL_PIN(184, "ROW[0]"), /* M15 */ + PINCTRL_PIN(185, "ROW[3]"), /* M16 */ + PINCTRL_PIN(186, "ROW[2]"), /* M17 */ + /* Row N */ + PINCTRL_PIN(187, "P[14]"), /* N1 */ + PINCTRL_PIN(188, "P[16]"), /* N2 */ + PINCTRL_PIN(189, "P[15]"), /* N3 */ + PINCTRL_PIN(190, "P[13]"), /* N4 */ + PINCTRL_PIN(191, "P[12]"), /* N5 */ + PINCTRL_PIN(192, "DA[5]"), /* N6 */ + PINCTRL_PIN(193, "vddr"), /* N7 */ + PINCTRL_PIN(194, "vddr"), /* N8 */ + PINCTRL_PIN(195, "vddr"), /* N9 */ + PINCTRL_PIN(196, "vddr"), /* N10 */ + PINCTRL_PIN(197, "EECLK"), /* N11 */ + PINCTRL_PIN(198, "ASDO"), /* N12 */ + PINCTRL_PIN(199, "CTSn"), /* N13 */ + PINCTRL_PIN(200, "RXD[0]"), /* N14 */ + PINCTRL_PIN(201, "TXD[0]"), /* N15 */ + PINCTRL_PIN(202, "TXD[1]"), /* N16 */ + PINCTRL_PIN(203, "TXD[2]"), /* N17 */ + /* Row P */ + PINCTRL_PIN(204, "SPCLK"), /* P1 */ + PINCTRL_PIN(205, "P[10]"), /* P2 */ + PINCTRL_PIN(206, "P[11]"), /* P3 */ + PINCTRL_PIN(207, "P[3]"), /* P4 */ + PINCTRL_PIN(208, "AD[15]"), /* P5 */ + PINCTRL_PIN(209, "AD[13]"), /* P6 */ + PINCTRL_PIN(210, "AD[12]"), /* P7 */ + PINCTRL_PIN(211, "DA[2]"), /* P8 */ + PINCTRL_PIN(212, "AD[8]"), /* P9 */ + PINCTRL_PIN(213, "TCK"), /* P10 */ + PINCTRL_PIN(214, "BOOT[1]"), /* P11 */ + PINCTRL_PIN(215, "EEDAT"), /* P12 */ + PINCTRL_PIN(216, "GRLED"), /* P13 */ + PINCTRL_PIN(217, "RDLED"), /* P14 */ + PINCTRL_PIN(218, "GGPIO[2]"), /* P15 */ + PINCTRL_PIN(219, "RXD[1]"), /* P16 */ + PINCTRL_PIN(220, "RXD[2]"), /* P17 */ + /* Row R */ + PINCTRL_PIN(221, "P[9]"), /* R1 */ + PINCTRL_PIN(222, "HSYNC"), /* R2 */ + PINCTRL_PIN(223, "P[6]"), /* R3 */ + PINCTRL_PIN(224, "P[5]"), /* R4 */ + PINCTRL_PIN(225, "P[0]"), /* R5 */ + PINCTRL_PIN(226, "AD[14]"), /* R6 */ + PINCTRL_PIN(227, "DA[4]"), /* R7 */ + PINCTRL_PIN(228, "DA[1]"), /* R8 */ + PINCTRL_PIN(229, "DTRn"), /* R9 */ + PINCTRL_PIN(230, "TDI"), /* R10 */ + PINCTRL_PIN(231, "BOOT[0]"), /* R11 */ + PINCTRL_PIN(232, "ASYNC"), /* R12 */ + PINCTRL_PIN(233, "SSPTX[1]"), /* R13 */ + PINCTRL_PIN(234, "PWMOUT"), /* R14 */ + PINCTRL_PIN(235, "USBm[0]"), /* R15 */ + PINCTRL_PIN(236, "ABITCLK"), /* R16 */ + PINCTRL_PIN(237, "USBp[0]"), /* R17 */ + /* Row T */ + PINCTRL_PIN(238, "NC"), /* T1 */ + PINCTRL_PIN(239, "NC"), /* T2 */ + PINCTRL_PIN(240, "V_CSYNC"), /* T3 */ + PINCTRL_PIN(241, "P[7]"), /* T4 */ + PINCTRL_PIN(242, "P[2]"), /* T5 */ + PINCTRL_PIN(243, "DA[7]"), /* T6 */ + PINCTRL_PIN(244, "AD[11]"), /* T7 */ + PINCTRL_PIN(245, "AD[9]"), /* T8 */ + PINCTRL_PIN(246, "DSRn"), /* T9 */ + PINCTRL_PIN(247, "TMS"), /* T10 */ + PINCTRL_PIN(248, "gndr"), /* T11 */ + PINCTRL_PIN(249, "SFRM[1]"), /* T12 */ + PINCTRL_PIN(250, "INT[2]"), /* T13 */ + PINCTRL_PIN(251, "INT[0]"), /* T14 */ + PINCTRL_PIN(252, "USBp[1]"), /* T15 */ + PINCTRL_PIN(253, "NC"), /* T16 */ + PINCTRL_PIN(254, "NC"), /* T17 */ + /* Row U */ + PINCTRL_PIN(255, "NC"), /* U1 */ + PINCTRL_PIN(256, "NC"), /* U2 */ + PINCTRL_PIN(257, "P[8]"), /* U3 */ + PINCTRL_PIN(258, "P[4]"), /* U4 */ + PINCTRL_PIN(259, "P[1]"), /* U5 */ + PINCTRL_PIN(260, "DA[6]"), /* U6 */ + PINCTRL_PIN(261, "DA[3]"), /* U7 */ + PINCTRL_PIN(262, "AD[10]"), /* U8 */ + PINCTRL_PIN(263, "DA[0]"), /* U9 */ + PINCTRL_PIN(264, "TDO"), /* U10 */ + PINCTRL_PIN(265, "NC"), /* U11 */ + PINCTRL_PIN(266, "SCLK[1]"), /* U12 */ + PINCTRL_PIN(267, "SSPRX[1]"), /* U13 */ + PINCTRL_PIN(268, "INT[1]"), /* U14 */ + PINCTRL_PIN(269, "RTSn"), /* U15 */ + PINCTRL_PIN(270, "USBm[1]"), /* U16 */ + PINCTRL_PIN(271, "NC"), /* U17 */ +}; + +static const unsigned int ssp_ep9307_pins[] = { + 233, 249, 266, 267, +}; + +static const unsigned int ac97_ep9307_pins[] = { + 16, 32, 198, 232, 236, +}; + +/* I can't find info on those - it's some internal state */ +static const unsigned int raster_on_sdram0_pins[] = { +}; + +static const unsigned int raster_on_sdram3_pins[] = { +}; + +/* ROW[N] */ +static const unsigned int gpio2a_9307_pins[] = { + 165, 166, 181, 182, 183, 184, 185, 186, +}; + +/* COL[N] */ +static const unsigned int gpio3a_9307_pins[] = { + 127, 139, 140, 142, 153, 155, 156, 157, +}; + +static const unsigned int keypad_9307_pins[] = { + 127, 139, 140, 142, 153, 155, 156, 157, + 165, 166, 181, 182, 183, 184, 185, 186, +}; + +/* ep9307 have only 4,5 pin of GPIO E Port exposed */ +static const unsigned int gpio4a_9307_pins[] = { 216, 217 }; + +/* ep9307 have only 2 pin of GPIO G Port exposed */ +static const unsigned int gpio6a_9307_pins[] = { 219 }; + +static const unsigned int gpio7a_9307_pins[] = { 7, 24, 41, 56, 57, 59 }; + +static const struct ep93xx_pin_group ep9307_pin_groups[] = { + PMX_GROUP("ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0), + PMX_GROUP("i2s_on_ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, + EP93XX_SYSCON_DEVCFG_I2SONSSP), + PMX_GROUP("ac97", ac97_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0), + PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, + EP93XX_SYSCON_DEVCFG_I2SONAC97), + PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0), + PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3, + EP93XX_SYSCON_DEVCFG_RASONP3), + PMX_GROUP("gpio2agrp", gpio2a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK, + EP93XX_SYSCON_DEVCFG_GONK), + PMX_GROUP("gpio3agrp", gpio3a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK, + EP93XX_SYSCON_DEVCFG_GONK), + PMX_GROUP("keypadgrp", keypad_9307_pins, EP93XX_SYSCON_DEVCFG_GONK, 0), + PMX_GROUP("gpio4agrp", gpio4a_9307_pins, EP93XX_SYSCON_DEVCFG_EONIDE, + EP93XX_SYSCON_DEVCFG_EONIDE), + PMX_GROUP("gpio6agrp", gpio6a_9307_pins, EP93XX_SYSCON_DEVCFG_GONIDE, + EP93XX_SYSCON_DEVCFG_GONIDE), + PMX_GROUP("gpio7agrp", gpio7a_9307_pins, EP93XX_SYSCON_DEVCFG_HONIDE, + EP93XX_SYSCON_DEVCFG_HONIDE), +}; + +/* ep9312, ep9315 */ +static const struct pinctrl_pin_desc ep9312_pins[] = { + /* Row A */ + PINCTRL_PIN(0, "CSN[7]"), /* A1 */ + PINCTRL_PIN(1, "DA[28]"), /* A2 */ + PINCTRL_PIN(2, "AD[18]"), /* A3 */ + PINCTRL_PIN(3, "DD[8]"), /* A4 */ + PINCTRL_PIN(4, "DD[4]"), /* A5 */ + PINCTRL_PIN(5, "AD[17]"), /* A6 */ + PINCTRL_PIN(6, "RDN"), /* A7 */ + PINCTRL_PIN(7, "RXCLK"), /* A8 */ + PINCTRL_PIN(8, "MIIRXD[0]"), /* A9 */ + PINCTRL_PIN(9, "RXDVAL"), /* A10 */ + PINCTRL_PIN(10, "MIITXD[2]"), /* A11 */ + PINCTRL_PIN(11, "TXERR"), /* A12 */ + PINCTRL_PIN(12, "CLD"), /* A13 */ + PINCTRL_PIN(13, "NC"), /* A14 */ + PINCTRL_PIN(14, "NC"), /* A15 */ + PINCTRL_PIN(15, "NC"), /* A16 */ + PINCTRL_PIN(16, "EGPIO[12]"), /* A17 */ + PINCTRL_PIN(17, "EGPIO[15]"), /* A18 */ + PINCTRL_PIN(18, "NC"), /* A19 */ + PINCTRL_PIN(19, "NC"), /* A20 */ + /* Row B */ + PINCTRL_PIN(20, "CSN[2]"), /* B1 */ + PINCTRL_PIN(21, "DA[31]"), /* B2 */ + PINCTRL_PIN(22, "DA[30]"), /* B3 */ + PINCTRL_PIN(23, "DA[27]"), /* B4 */ + PINCTRL_PIN(24, "DD[7]"), /* B5 */ + PINCTRL_PIN(25, "DD[3]"), /* B6 */ + PINCTRL_PIN(26, "WRN"), /* B7 */ + PINCTRL_PIN(27, "MDIO"), /* B8 */ + PINCTRL_PIN(28, "MIIRXD[1]"), /* B9 */ + PINCTRL_PIN(29, "RXERR"), /* B10 */ + PINCTRL_PIN(30, "MIITXD[1]"), /* B11 */ + PINCTRL_PIN(31, "CRS"), /* B12 */ + PINCTRL_PIN(32, "NC"), /* B13 */ + PINCTRL_PIN(33, "NC"), /* B14 */ + PINCTRL_PIN(34, "NC"), /* B15 */ + PINCTRL_PIN(35, "NC"), /* B16 */ + PINCTRL_PIN(36, "EGPIO[13]"), /* B17 */ + PINCTRL_PIN(37, "NC"), /* B18 */ + PINCTRL_PIN(38, "WAITN"), /* B19 */ + PINCTRL_PIN(39, "TRSTN"), /* B20 */ + /* Row C */ + PINCTRL_PIN(40, "CSN[1]"), /* C1 */ + PINCTRL_PIN(41, "CSN[3]"), /* C2 */ + PINCTRL_PIN(42, "AD[20]"), /* C3 */ + PINCTRL_PIN(43, "DA[29]"), /* C4 */ + PINCTRL_PIN(44, "DD[10]"), /* C5 */ + PINCTRL_PIN(45, "DD[6]"), /* C6 */ + PINCTRL_PIN(46, "DD[2]"), /* C7 */ + PINCTRL_PIN(47, "MDC"), /* C8 */ + PINCTRL_PIN(48, "MIIRXD[3]"), /* C9 */ + PINCTRL_PIN(49, "TXCLK"), /* C10 */ + PINCTRL_PIN(50, "MIITXD[0]"), /* C11 */ + PINCTRL_PIN(51, "NC"), /* C12 */ + PINCTRL_PIN(52, "NC"), /* C13 */ + PINCTRL_PIN(53, "NC"), /* C14 */ + PINCTRL_PIN(54, "NC"), /* C15 */ + PINCTRL_PIN(55, "NC"), /* C16 */ + PINCTRL_PIN(56, "NC"), /* C17 */ + PINCTRL_PIN(57, "USBP[2]"), /* C18 */ + PINCTRL_PIN(58, "IORDY"), /* C19 */ + PINCTRL_PIN(59, "DMACKN"), /* C20 */ + /* Row D */ + PINCTRL_PIN(60, "AD[24]"), /* D1 */ + PINCTRL_PIN(61, "DA[25]"), /* D2 */ + PINCTRL_PIN(62, "DD[11]"), /* D3 */ + PINCTRL_PIN(63, "SDCLKEN"), /* D4 */ + PINCTRL_PIN(64, "AD[19]"), /* D5 */ + PINCTRL_PIN(65, "DD[9]"), /* D6 */ + PINCTRL_PIN(66, "DD[5]"), /* D7 */ + PINCTRL_PIN(67, "AD[16]"), /* D8 */ + PINCTRL_PIN(68, "MIIRXD[2]"), /* D9 */ + PINCTRL_PIN(69, "MIITXD[3]"), /* D10 */ + PINCTRL_PIN(70, "TXEN"), /* D11 */ + PINCTRL_PIN(71, "NC"), /* D12 */ + PINCTRL_PIN(72, "NC"), /* D13 */ + PINCTRL_PIN(73, "NC"), /* D14 */ + PINCTRL_PIN(74, "EGPIO[14]"), /* D15 */ + PINCTRL_PIN(75, "NC"), /* D16 */ + PINCTRL_PIN(76, "USBM[2]"), /* D17 */ + PINCTRL_PIN(77, "ARSTN"), /* D18 */ + PINCTRL_PIN(78, "DIORN"), /* D19 */ + PINCTRL_PIN(79, "EGPIO[1]"), /* D20 */ + /* Row E */ + PINCTRL_PIN(80, "AD[23]"), /* E1 */ + PINCTRL_PIN(81, "DA[23]"), /* E2 */ + PINCTRL_PIN(82, "DA[26]"), /* E3 */ + PINCTRL_PIN(83, "CSN[6]"), /* E4 */ + PINCTRL_PIN(84, "GND"), /* E5 */ + PINCTRL_PIN(85, "GND"), /* E6 */ + PINCTRL_PIN(86, "CVDD"), /* E7 */ + PINCTRL_PIN(87, "CVDD"), /* E8 */ + PINCTRL_PIN(88, "RVDD"), /* E9 */ + PINCTRL_PIN(89, "GND"), /* E10 */ + PINCTRL_PIN(90, "GND"), /* E11 */ + PINCTRL_PIN(91, "RVDD"), /* E12 */ + PINCTRL_PIN(92, "CVDD"), /* E13 */ + PINCTRL_PIN(93, "CVDD"), /* E14 */ + PINCTRL_PIN(94, "GND"), /* E15 */ + PINCTRL_PIN(95, "ASDI"), /* E16 */ + PINCTRL_PIN(96, "DIOWN"), /* E17 */ + PINCTRL_PIN(97, "EGPIO[0]"), /* E18 */ + PINCTRL_PIN(98, "EGPIO[3]"), /* E19 */ + PINCTRL_PIN(99, "EGPIO[5]"), /* E20 */ + /* Row F */ + PINCTRL_PIN(100, "SDCSN[3]"), /* F1 */ + PINCTRL_PIN(101, "DA[22]"), /* F2 */ + PINCTRL_PIN(102, "DA[24]"), /* F3 */ + PINCTRL_PIN(103, "AD[25]"), /* F4 */ + PINCTRL_PIN(104, "RVDD"), /* F5 */ + PINCTRL_PIN(105, "GND"), /* F6 */ + PINCTRL_PIN(106, "CVDD"), /* F7 */ + PINCTRL_PIN(107, "CVDD"), /* F14 */ + PINCTRL_PIN(108, "GND"), /* F15 */ + PINCTRL_PIN(109, "GND"), /* F16 */ + PINCTRL_PIN(110, "EGPIO[2]"), /* F17 */ + PINCTRL_PIN(111, "EGPIO[4]"), /* F18 */ + PINCTRL_PIN(112, "EGPIO[6]"), /* F19 */ + PINCTRL_PIN(113, "EGPIO[8]"), /* F20 */ + /* Row G */ + PINCTRL_PIN(114, "SDCSN[0]"), /* G1 */ + PINCTRL_PIN(115, "SDCSN[1]"), /* G2 */ + PINCTRL_PIN(116, "SDWEN"), /* G3 */ + PINCTRL_PIN(117, "SDCLK"), /* G4 */ + PINCTRL_PIN(118, "RVDD"), /* G5 */ + PINCTRL_PIN(119, "RVDD"), /* G6 */ + PINCTRL_PIN(120, "RVDD"), /* G15 */ + PINCTRL_PIN(121, "RVDD"), /* G16 */ + PINCTRL_PIN(122, "EGPIO[7]"), /* G17 */ + PINCTRL_PIN(123, "EGPIO[9]"), /* G18 */ + PINCTRL_PIN(124, "EGPIO[10]"), /* G19 */ + PINCTRL_PIN(125, "EGPIO[11]"), /* G20 */ + /* Row H */ + PINCTRL_PIN(126, "DQMN[3]"), /* H1 */ + PINCTRL_PIN(127, "CASN"), /* H2 */ + PINCTRL_PIN(128, "RASN"), /* H3 */ + PINCTRL_PIN(129, "SDCSN[2]"), /* H4 */ + PINCTRL_PIN(130, "CVDD"), /* H5 */ + PINCTRL_PIN(131, "GND"), /* H8 */ + PINCTRL_PIN(132, "GND"), /* H9 */ + PINCTRL_PIN(133, "GND"), /* H10 */ + PINCTRL_PIN(134, "GND"), /* H11 */ + PINCTRL_PIN(135, "GND"), /* H12 */ + PINCTRL_PIN(136, "GND"), /* H13 */ + PINCTRL_PIN(137, "RVDD"), /* H16 */ + PINCTRL_PIN(138, "RTCXTALO"), /* H17 */ + PINCTRL_PIN(139, "ADC_VDD"), /* H18 */ + PINCTRL_PIN(140, "ADC_GND"), /* H19 */ + PINCTRL_PIN(141, "XP"), /* H20 */ + /* Row J */ + PINCTRL_PIN(142, "DA[21]"), /* J1 */ + PINCTRL_PIN(143, "DQMN[0]"), /* J2 */ + PINCTRL_PIN(144, "DQMN[1]"), /* J3 */ + PINCTRL_PIN(145, "DQMN[2]"), /* J4 */ + PINCTRL_PIN(146, "GND"), /* J5 */ + PINCTRL_PIN(147, "GND"), /* J8 */ + PINCTRL_PIN(148, "GND"), /* J9 */ + PINCTRL_PIN(149, "GND"), /* J10 */ + PINCTRL_PIN(150, "GND"), /* J11 */ + PINCTRL_PIN(151, "GND"), /* J12 */ + PINCTRL_PIN(152, "GND"), /* J13 */ + PINCTRL_PIN(153, "CVDD"), /* J16 */ + PINCTRL_PIN(154, "RTCXTALI"), /* J17 */ + PINCTRL_PIN(155, "XM"), /* J18 */ + PINCTRL_PIN(156, "YP"), /* J19 */ + PINCTRL_PIN(157, "YM"), /* J20 */ + /* Row K */ + PINCTRL_PIN(158, "AD[22]"), /* K1 */ + PINCTRL_PIN(159, "DA[20]"), /* K2 */ + PINCTRL_PIN(160, "AD[21]"), /* K3 */ + PINCTRL_PIN(161, "DA[19]"), /* K4 */ + PINCTRL_PIN(162, "RVDD"), /* K5 */ + PINCTRL_PIN(163, "GND"), /* K8 */ + PINCTRL_PIN(164, "GND"), /* K9 */ + PINCTRL_PIN(165, "GND"), /* K10 */ + PINCTRL_PIN(166, "GND"), /* K11 */ + PINCTRL_PIN(167, "GND"), /* K12 */ + PINCTRL_PIN(168, "GND"), /* K13 */ + PINCTRL_PIN(169, "CVDD"), /* K16 */ + PINCTRL_PIN(170, "SYM"), /* K17 */ + PINCTRL_PIN(171, "SYP"), /* K18 */ + PINCTRL_PIN(172, "SXM"), /* K19 */ + PINCTRL_PIN(173, "SXP"), /* K20 */ + /* Row L */ + PINCTRL_PIN(174, "DA[18]"), /* L1 */ + PINCTRL_PIN(175, "DA[17]"), /* L2 */ + PINCTRL_PIN(176, "DA[16]"), /* L3 */ + PINCTRL_PIN(177, "DA[15]"), /* L4 */ + PINCTRL_PIN(178, "GND"), /* L5 */ + PINCTRL_PIN(179, "GND"), /* L8 */ + PINCTRL_PIN(180, "GND"), /* L9 */ + PINCTRL_PIN(181, "GND"), /* L10 */ + PINCTRL_PIN(182, "GND"), /* L11 */ + PINCTRL_PIN(183, "GND"), /* L12 */ + PINCTRL_PIN(184, "GND"), /* L13 */ + PINCTRL_PIN(185, "CVDD"), /* L16 */ + PINCTRL_PIN(186, "COL[5]"), /* L17 */ + PINCTRL_PIN(187, "COL[7]"), /* L18 */ + PINCTRL_PIN(188, "RSTON"), /* L19 */ + PINCTRL_PIN(189, "PRSTN"), /* L20 */ + /* Row M */ + PINCTRL_PIN(190, "AD[7]"), /* M1 */ + PINCTRL_PIN(191, "DA[14]"), /* M2 */ + PINCTRL_PIN(192, "AD[6]"), /* M3 */ + PINCTRL_PIN(193, "AD[5]"), /* M4 */ + PINCTRL_PIN(194, "CVDD"), /* M5 */ + PINCTRL_PIN(195, "GND"), /* M8 */ + PINCTRL_PIN(196, "GND"), /* M9 */ + PINCTRL_PIN(197, "GND"), /* M10 */ + PINCTRL_PIN(198, "GND"), /* M11 */ + PINCTRL_PIN(199, "GND"), /* M12 */ + PINCTRL_PIN(200, "GND"), /* M13 */ + PINCTRL_PIN(201, "GND"), /* M16 */ + PINCTRL_PIN(202, "COL[4]"), /* M17 */ + PINCTRL_PIN(203, "COL[3]"), /* M18 */ + PINCTRL_PIN(204, "COL[6]"), /* M19 */ + PINCTRL_PIN(205, "CSN[0]"), /* M20 */ + /* Row N */ + PINCTRL_PIN(206, "DA[13]"), /* N1 */ + PINCTRL_PIN(207, "DA[12]"), /* N2 */ + PINCTRL_PIN(208, "DA[11]"), /* N3 */ + PINCTRL_PIN(209, "AD[3]"), /* N4 */ + PINCTRL_PIN(210, "CVDD"), /* N5 */ + PINCTRL_PIN(211, "CVDD"), /* N6 */ + PINCTRL_PIN(212, "GND"), /* N8 */ + PINCTRL_PIN(213, "GND"), /* N9 */ + PINCTRL_PIN(214, "GND"), /* N10 */ + PINCTRL_PIN(215, "GND"), /* N11 */ + PINCTRL_PIN(216, "GND"), /* N12 */ + PINCTRL_PIN(217, "GND"), /* N13 */ + PINCTRL_PIN(218, "GND"), /* N15 */ + PINCTRL_PIN(219, "GND"), /* N16 */ + PINCTRL_PIN(220, "XTALO"), /* N17 */ + PINCTRL_PIN(221, "COL[0]"), /* N18 */ + PINCTRL_PIN(222, "COL[1]"), /* N19 */ + PINCTRL_PIN(223, "COL[2]"), /* N20 */ + /* Row P */ + PINCTRL_PIN(224, "AD[4]"), /* P1 */ + PINCTRL_PIN(225, "DA[10]"), /* P2 */ + PINCTRL_PIN(226, "DA[9]"), /* P3 */ + PINCTRL_PIN(227, "BRIGHT"), /* P4 */ + PINCTRL_PIN(228, "RVDD"), /* P5 */ + PINCTRL_PIN(229, "RVDD"), /* P6 */ + PINCTRL_PIN(230, "RVDD"), /* P15 */ + PINCTRL_PIN(231, "RVDD"), /* P16 */ + PINCTRL_PIN(232, "XTALI"), /* P17 */ + PINCTRL_PIN(233, "PLL_VDD"), /* P18 */ + PINCTRL_PIN(234, "ROW[6]"), /* P19 */ + PINCTRL_PIN(235, "ROW[7]"), /* P20 */ + /* Row R */ + PINCTRL_PIN(236, "AD[2]"), /* R1 */ + PINCTRL_PIN(237, "AD[1]"), /* R2 */ + PINCTRL_PIN(238, "P[17]"), /* R3 */ + PINCTRL_PIN(239, "P[14]"), /* R4 */ + PINCTRL_PIN(240, "RVDD"), /* R5 */ + PINCTRL_PIN(241, "RVDD"), /* R6 */ + PINCTRL_PIN(242, "GND"), /* R7 */ + PINCTRL_PIN(243, "CVDD"), /* R8 */ + PINCTRL_PIN(244, "CVDD"), /* R13 */ + PINCTRL_PIN(245, "GND"), /* R14 */ + PINCTRL_PIN(246, "RVDD"), /* R15 */ + PINCTRL_PIN(247, "RVDD"), /* R16 */ + PINCTRL_PIN(248, "ROW[0]"), /* R17 */ + PINCTRL_PIN(249, "ROW[3]"), /* R18 */ + PINCTRL_PIN(250, "PLL_GND"), /* R19 */ + PINCTRL_PIN(251, "ROW[5]"), /* R20 */ + /* Row T */ + PINCTRL_PIN(252, "DA[8]"), /* T1 */ + PINCTRL_PIN(253, "BLANK"), /* T2 */ + PINCTRL_PIN(254, "P[13]"), /* T3 */ + PINCTRL_PIN(255, "SPCLK"), /* T4 */ + PINCTRL_PIN(256, "V_CSYNC"), /* T5 */ + PINCTRL_PIN(257, "DD[14]"), /* T6 */ + PINCTRL_PIN(258, "GND"), /* T7 */ + PINCTRL_PIN(259, "CVDD"), /* T8 */ + PINCTRL_PIN(260, "RVDD"), /* T9 */ + PINCTRL_PIN(261, "GND"), /* T10 */ + PINCTRL_PIN(262, "GND"), /* T11 */ + PINCTRL_PIN(263, "RVDD"), /* T12 */ + PINCTRL_PIN(264, "CVDD"), /* T13 */ + PINCTRL_PIN(265, "GND"), /* T14 */ + PINCTRL_PIN(266, "INT[0]"), /* T15 */ + PINCTRL_PIN(267, "USBM[1]"), /* T16 */ + PINCTRL_PIN(268, "RXD[0]"), /* T17 */ + PINCTRL_PIN(269, "TXD[2]"), /* T18 */ + PINCTRL_PIN(270, "ROW[2]"), /* T19 */ + PINCTRL_PIN(271, "ROW[4]"), /* T20 */ + /* Row U */ + PINCTRL_PIN(272, "AD[0]"), /* U1 */ + PINCTRL_PIN(273, "P[15]"), /* U2 */ + PINCTRL_PIN(274, "P[10]"), /* U3 */ + PINCTRL_PIN(275, "P[7]"), /* U4 */ + PINCTRL_PIN(276, "P[6]"), /* U5 */ + PINCTRL_PIN(277, "P[4]"), /* U6 */ + PINCTRL_PIN(278, "P[0]"), /* U7 */ + PINCTRL_PIN(279, "AD[13]"), /* U8 */ + PINCTRL_PIN(280, "DA[3]"), /* U9 */ + PINCTRL_PIN(281, "DA[0]"), /* U10 */ + PINCTRL_PIN(282, "DSRN"), /* U11 */ + PINCTRL_PIN(283, "BOOT[1]"), /* U12 */ + PINCTRL_PIN(284, "NC"), /* U13 */ + PINCTRL_PIN(285, "SSPRX1"), /* U14 */ + PINCTRL_PIN(286, "INT[1]"), /* U15 */ + PINCTRL_PIN(287, "PWMOUT"), /* U16 */ + PINCTRL_PIN(288, "USBM[0]"), /* U17 */ + PINCTRL_PIN(289, "RXD[1]"), /* U18 */ + PINCTRL_PIN(290, "TXD[1]"), /* U19 */ + PINCTRL_PIN(291, "ROW[1]"), /* U20 */ + /* Row V */ + PINCTRL_PIN(292, "P[16]"), /* V1 */ + PINCTRL_PIN(293, "P[11]"), /* V2 */ + PINCTRL_PIN(294, "P[8]"), /* V3 */ + PINCTRL_PIN(295, "DD[15]"), /* V4 */ + PINCTRL_PIN(296, "DD[13]"), /* V5 */ + PINCTRL_PIN(297, "P[1]"), /* V6 */ + PINCTRL_PIN(298, "AD[14]"), /* V7 */ + PINCTRL_PIN(299, "AD[12]"), /* V8 */ + PINCTRL_PIN(300, "DA[2]"), /* V9 */ + PINCTRL_PIN(301, "IDECS0N"), /* V10 */ + PINCTRL_PIN(302, "IDEDA[2]"), /* V11 */ + PINCTRL_PIN(303, "TDI"), /* V12 */ + PINCTRL_PIN(304, "GND"), /* V13 */ + PINCTRL_PIN(305, "ASYNC"), /* V14 */ + PINCTRL_PIN(306, "SSPTX1"), /* V15 */ + PINCTRL_PIN(307, "INT[2]"), /* V16 */ + PINCTRL_PIN(308, "RTSN"), /* V17 */ + PINCTRL_PIN(309, "USBP[0]"), /* V18 */ + PINCTRL_PIN(310, "CTSN"), /* V19 */ + PINCTRL_PIN(311, "TXD[0]"), /* V20 */ + /* Row W */ + PINCTRL_PIN(312, "P[12]"), /* W1 */ + PINCTRL_PIN(313, "P[9]"), /* W2 */ + PINCTRL_PIN(314, "DD[0]"), /* W3 */ + PINCTRL_PIN(315, "P[5]"), /* W4 */ + PINCTRL_PIN(316, "P[3]"), /* W5 */ + PINCTRL_PIN(317, "DA[7]"), /* W6 */ + PINCTRL_PIN(318, "DA[5]"), /* W7 */ + PINCTRL_PIN(319, "AD[11]"), /* W8 */ + PINCTRL_PIN(320, "AD[9]"), /* W9 */ + PINCTRL_PIN(321, "IDECS1N"), /* W10 */ + PINCTRL_PIN(322, "IDEDA[1]"), /* W11 */ + PINCTRL_PIN(323, "TCK"), /* W12 */ + PINCTRL_PIN(324, "TMS"), /* W13 */ + PINCTRL_PIN(325, "EECLK"), /* W14 */ + PINCTRL_PIN(326, "SCLK1"), /* W15 */ + PINCTRL_PIN(327, "GRLED"), /* W16 */ + PINCTRL_PIN(328, "INT[3]"), /* W17 */ + PINCTRL_PIN(329, "SLA[1]"), /* W18 */ + PINCTRL_PIN(330, "SLA[0]"), /* W19 */ + PINCTRL_PIN(331, "RXD[2]"), /* W20 */ + /* Row Y */ + PINCTRL_PIN(332, "HSYNC"), /* Y1 */ + PINCTRL_PIN(333, "DD[1]"), /* Y2 */ + PINCTRL_PIN(334, "DD[12]"), /* Y3 */ + PINCTRL_PIN(335, "P[2]"), /* Y4 */ + PINCTRL_PIN(336, "AD[15]"), /* Y5 */ + PINCTRL_PIN(337, "DA[6]"), /* Y6 */ + PINCTRL_PIN(338, "DA[4]"), /* Y7 */ + PINCTRL_PIN(339, "AD[10]"), /* Y8 */ + PINCTRL_PIN(340, "DA[1]"), /* Y9 */ + PINCTRL_PIN(341, "AD[8]"), /* Y10 */ + PINCTRL_PIN(342, "IDEDA[0]"), /* Y11 */ + PINCTRL_PIN(343, "DTRN"), /* Y12 */ + PINCTRL_PIN(344, "TDO"), /* Y13 */ + PINCTRL_PIN(345, "BOOT[0]"), /* Y14 */ + PINCTRL_PIN(346, "EEDAT"), /* Y15 */ + PINCTRL_PIN(347, "ASDO"), /* Y16 */ + PINCTRL_PIN(348, "SFRM1"), /* Y17 */ + PINCTRL_PIN(349, "RDLED"), /* Y18 */ + PINCTRL_PIN(350, "USBP[1]"), /* Y19 */ + PINCTRL_PIN(351, "ABITCLK"), /* Y20 */ +}; + +static const unsigned int ssp_ep9312_pins[] = { + 285, 306, 326, 348, +}; + +static const unsigned int ac97_ep9312_pins[] = { + 77, 95, 305, 347, 351, +}; + +static const unsigned int pwm_ep9312_pins[] = { 74 }; + +static const unsigned int gpio1a_ep9312_pins[] = { 74 }; + +static const unsigned int gpio2a_9312_pins[] = { + 234, 235, 248, 249, 251, 270, 271, 291, +}; + +static const unsigned int gpio3a_9312_pins[] = { + 186, 187, 202, 203, 204, 221, 222, 223, +}; + +static const unsigned int keypad_9312_pins[] = { + 186, 187, 202, 203, 204, 221, 222, 223, + 234, 235, 248, 249, 251, 270, 271, 291, +}; + +static const unsigned int gpio4a_9312_pins[] = { + 78, 301, 302, 321, 322, 342, +}; + +static const unsigned int gpio6a_9312_pins[] = { + 257, 295, 296, 334, +}; + +static const unsigned int gpio7a_9312_pins[] = { + 4, 24, 25, 45, 46, 66, 314, 333, +}; + +static const unsigned int ide_9312_pins[] = { + 78, 301, 302, 321, 322, 342, 257, 295, + 296, 334, 4, 24, 25, 45, 46, 66, + 314, 333, +}; + +static const struct ep93xx_pin_group ep9312_pin_groups[] = { + PMX_GROUP("ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0), + PMX_GROUP("i2s_on_ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, + EP93XX_SYSCON_DEVCFG_I2SONSSP), + PMX_GROUP("pwm1", pwm_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG, + EP93XX_SYSCON_DEVCFG_PONG), + PMX_GROUP("gpio1agrp", gpio1a_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG, 0), + PMX_GROUP("ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0), + PMX_GROUP("i2s_on_ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, + EP93XX_SYSCON_DEVCFG_I2SONAC97), + PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0), + PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3, + EP93XX_SYSCON_DEVCFG_RASONP3), + PMX_GROUP("gpio2agrp", gpio2a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK, + EP93XX_SYSCON_DEVCFG_GONK), + PMX_GROUP("gpio3agrp", gpio3a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK, + EP93XX_SYSCON_DEVCFG_GONK), + PMX_GROUP("keypadgrp", keypad_9312_pins, EP93XX_SYSCON_DEVCFG_GONK, 0), + PMX_GROUP("gpio4agrp", gpio4a_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE, + EP93XX_SYSCON_DEVCFG_EONIDE), + PMX_GROUP("gpio6agrp", gpio6a_9312_pins, EP93XX_SYSCON_DEVCFG_GONIDE, + EP93XX_SYSCON_DEVCFG_GONIDE), + PMX_GROUP("gpio7agrp", gpio7a_9312_pins, EP93XX_SYSCON_DEVCFG_HONIDE, + EP93XX_SYSCON_DEVCFG_HONIDE), + PMX_GROUP("idegrp", ide_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE | + EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE, 0), +}; + +static int ep93xx_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + switch (pmx->model) { + case EP93XX_9301_PINCTRL: + return ARRAY_SIZE(ep9301_pin_groups); + case EP93XX_9307_PINCTRL: + return ARRAY_SIZE(ep9307_pin_groups); + case EP93XX_9312_PINCTRL: + return ARRAY_SIZE(ep9312_pin_groups); + default: + return 0; + } +} + +static const char *ep93xx_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + switch (pmx->model) { + case EP93XX_9301_PINCTRL: + return ep9301_pin_groups[selector].grp.name; + case EP93XX_9307_PINCTRL: + return ep9307_pin_groups[selector].grp.name; + case EP93XX_9312_PINCTRL: + return ep9312_pin_groups[selector].grp.name; + default: + return NULL; + } +} + +static int ep93xx_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + switch (pmx->model) { + case EP93XX_9301_PINCTRL: + *pins = ep9301_pin_groups[selector].grp.pins; + *num_pins = ep9301_pin_groups[selector].grp.npins; + break; + case EP93XX_9307_PINCTRL: + *pins = ep9307_pin_groups[selector].grp.pins; + *num_pins = ep9307_pin_groups[selector].grp.npins; + break; + case EP93XX_9312_PINCTRL: + *pins = ep9312_pin_groups[selector].grp.pins; + *num_pins = ep9312_pin_groups[selector].grp.npins; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct pinctrl_ops ep93xx_pctrl_ops = { + .get_groups_count = ep93xx_get_groups_count, + .get_group_name = ep93xx_get_group_name, + .get_group_pins = ep93xx_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static const char * const spigrps[] = { "ssp" }; +static const char * const ac97grps[] = { "ac97" }; +static const char * const i2sgrps[] = { "i2s_on_ssp", "i2s_on_ac97" }; +static const char * const pwm1grps[] = { "pwm1" }; +static const char * const gpiogrps[] = { "gpio1agrp", "gpio2agrp", "gpio3agrp", + "gpio4agrp", "gpio6agrp", "gpio7agrp" }; +static const char * const rastergrps[] = { "rasteronsdram0grp", "rasteronsdram3grp"}; +static const char * const keypadgrps[] = { "keypadgrp"}; +static const char * const idegrps[] = { "idegrp"}; + +static const struct pinfunction ep93xx_pmx_functions[] = { + PINCTRL_PINFUNCTION("spi", spigrps, ARRAY_SIZE(spigrps)), + PINCTRL_PINFUNCTION("ac97", ac97grps, ARRAY_SIZE(ac97grps)), + PINCTRL_PINFUNCTION("i2s", i2sgrps, ARRAY_SIZE(i2sgrps)), + PINCTRL_PINFUNCTION("pwm", pwm1grps, ARRAY_SIZE(pwm1grps)), + PINCTRL_PINFUNCTION("keypad", keypadgrps, ARRAY_SIZE(keypadgrps)), + PINCTRL_PINFUNCTION("pata", idegrps, ARRAY_SIZE(idegrps)), + PINCTRL_PINFUNCTION("lcd", rastergrps, ARRAY_SIZE(rastergrps)), + PINCTRL_PINFUNCTION("gpio", gpiogrps, ARRAY_SIZE(gpiogrps)), +}; + +static int ep93xx_pmx_set_mux(struct pinctrl_dev *pctldev, + unsigned int selector, + unsigned int group) +{ + struct ep93xx_pmx *pmx; + const struct pinfunction *func; + const struct ep93xx_pin_group *grp; + u32 before, after, expected; + unsigned long tmp; + int i; + + pmx = pinctrl_dev_get_drvdata(pctldev); + + switch (pmx->model) { + case EP93XX_9301_PINCTRL: + grp = &ep9301_pin_groups[group]; + break; + case EP93XX_9307_PINCTRL: + grp = &ep9307_pin_groups[group]; + break; + case EP93XX_9312_PINCTRL: + grp = &ep9312_pin_groups[group]; + break; + default: + dev_err(pmx->dev, "invalid SoC type\n"); + return -ENODEV; + } + + func = &ep93xx_pmx_functions[selector]; + + dev_dbg(pmx->dev, + "ACTIVATE function \"%s\" with group \"%s\" (mask=0x%x, value=0x%x)\n", + func->name, grp->grp.name, grp->mask, grp->value); + + regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &before); + ep93xx_pinctrl_update_bits(pmx, EP93XX_SYSCON_DEVCFG, + grp->mask, grp->value); + regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &after); + + dev_dbg(pmx->dev, "before=0x%x, after=0x%x, mask=0x%lx\n", + before, after, PADS_MASK); + + /* Which bits changed */ + before &= PADS_MASK; + after &= PADS_MASK; + expected = before & ~grp->mask; + expected |= grp->value; + expected &= PADS_MASK; + + /* Print changed states */ + tmp = expected ^ after; + for_each_set_bit(i, &tmp, PADS_MAXBIT) { + bool enabled = expected & BIT(i); + + dev_err(pmx->dev, + "pin group %s could not be %s: probably a hardware limitation\n", + ep93xx_padgroups[i], str_enabled_disabled(enabled)); + dev_err(pmx->dev, + "DeviceCfg before: %08x, after %08x, expected %08x\n", + before, after, expected); + } + + return tmp ? -EINVAL : 0; +}; + +static int ep93xx_pmx_get_funcs_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(ep93xx_pmx_functions); +} + +static const char *ep93xx_pmx_get_func_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return ep93xx_pmx_functions[selector].name; +} + +static int ep93xx_pmx_get_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int * const num_groups) +{ + *groups = ep93xx_pmx_functions[selector].groups; + *num_groups = ep93xx_pmx_functions[selector].ngroups; + return 0; +} + +static const struct pinmux_ops ep93xx_pmx_ops = { + .get_functions_count = ep93xx_pmx_get_funcs_count, + .get_function_name = ep93xx_pmx_get_func_name, + .get_function_groups = ep93xx_pmx_get_groups, + .set_mux = ep93xx_pmx_set_mux, +}; + +static struct pinctrl_desc ep93xx_pmx_desc = { + .name = DRIVER_NAME, + .pctlops = &ep93xx_pctrl_ops, + .pmxops = &ep93xx_pmx_ops, + .owner = THIS_MODULE, +}; + +static int ep93xx_pmx_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev); + struct device *dev = &adev->dev; + struct ep93xx_pmx *pmx; + + /* Create state holders etc for this driver */ + pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL); + if (!pmx) + return -ENOMEM; + + pmx->dev = dev; + pmx->map = rdev->map; + pmx->aux_dev = rdev; + pmx->model = (enum ep93xx_pinctrl_model)(uintptr_t)id->driver_data; + switch (pmx->model) { + case EP93XX_9301_PINCTRL: + ep93xx_pmx_desc.pins = ep9301_pins; + ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9301_pins); + dev_info(dev, "detected 9301/9302 chip variant\n"); + break; + case EP93XX_9307_PINCTRL: + ep93xx_pmx_desc.pins = ep9307_pins; + ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9307_pins); + dev_info(dev, "detected 9307 chip variant\n"); + break; + case EP93XX_9312_PINCTRL: + ep93xx_pmx_desc.pins = ep9312_pins; + ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9312_pins); + dev_info(dev, "detected 9312/9315 chip variant\n"); + break; + default: + return dev_err_probe(dev, -EINVAL, "unknown pin control model: %u\n", pmx->model); + } + + /* using parent of_node to match in get_pinctrl_dev_from_of_node() */ + device_set_node(dev, dev_fwnode(adev->dev.parent)); + pmx->pctl = devm_pinctrl_register(dev, &ep93xx_pmx_desc, pmx); + if (IS_ERR(pmx->pctl)) + return dev_err_probe(dev, PTR_ERR(pmx->pctl), "could not register pinmux driver\n"); + + return 0; +}; + +static const struct auxiliary_device_id ep93xx_pinctrl_ids[] = { + { + .name = "soc_ep93xx.pinctrl-ep9301", + .driver_data = (kernel_ulong_t)EP93XX_9301_PINCTRL, + }, + { + .name = "soc_ep93xx.pinctrl-ep9307", + .driver_data = (kernel_ulong_t)EP93XX_9307_PINCTRL, + }, + { + .name = "soc_ep93xx.pinctrl-ep9312", + .driver_data = (kernel_ulong_t)EP93XX_9312_PINCTRL, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(auxiliary, ep93xx_pinctrl_ids); + +static struct auxiliary_driver ep93xx_pmx_driver = { + .probe = ep93xx_pmx_probe, + .id_table = ep93xx_pinctrl_ids, +}; +module_auxiliary_driver(ep93xx_pmx_driver); diff --git a/drivers/pinctrl/pinctrl-eyeq5.c b/drivers/pinctrl/pinctrl-eyeq5.c new file mode 100644 index 00000000000000..5f6af934a51695 --- /dev/null +++ b/drivers/pinctrl/pinctrl-eyeq5.c @@ -0,0 +1,575 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Pinctrl driver for the Mobileye EyeQ5 platform. + * + * The registers are located in a syscon region called OLB. There are two pin + * banks, each being controlled by 5 registers (see enum eq5p_regs) for + * pull-down, pull-up, drive strength and muxing. + * + * For each pin, muxing is between two functions: (0) GPIO or (1) another one + * that is pin-dependent. Functions are declared statically in this driver. + * + * We create pinctrl groups that are 1:1 equivalent to pins: each group has a + * single pin, and its index/selector is the pin number. + * + * We use eq5p_ as prefix, as-in "EyeQ5 Pinctrl", but way shorter. + * + * Copyright (C) 2024 Mobileye Vision Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "core.h" +#include "pinctrl-utils.h" + +struct eq5p_pinctrl { + struct pinctrl_desc desc; + void __iomem *base; +}; + +enum eq5p_bank { + EQ5P_BANK_A, + EQ5P_BANK_B, + + EQ5P_BANK_COUNT, +}; + +enum eq5p_regs { + EQ5P_PD, + EQ5P_PU, + EQ5P_DS_LOW, + EQ5P_DS_HIGH, + EQ5P_IOCR, + + EQ5P_REG_COUNT, +}; + +static const unsigned int eq5p_regs[EQ5P_BANK_COUNT][EQ5P_REG_COUNT] = { + [EQ5P_BANK_A] = {0x0C0, 0x0C4, 0x0D0, 0x0D4, 0x0B0}, + [EQ5P_BANK_B] = {0x0C8, 0x0CC, 0x0D8, 0x0DC, 0x0B4}, +}; + +/* + * Drive strength; two bits per pin. + */ +#define EQ5P_DS_MASK GENMASK(1, 0) + +/* + * Comments to the right of each pin are the "signal name" in the datasheet. + */ +static const struct pinctrl_pin_desc eq5p_pins[] = { + /* Bank A */ + PINCTRL_PIN(0, "PA0"), /* A0_TIMER0_CK */ + PINCTRL_PIN(1, "PA1"), /* A1_TIMER0_EOC */ + PINCTRL_PIN(2, "PA2"), /* A2_TIMER1_CK */ + PINCTRL_PIN(3, "PA3"), /* A3_TIMER1_EOC */ + PINCTRL_PIN(4, "PA4"), /* A4_TIMER2_CK */ + PINCTRL_PIN(5, "PA5"), /* A5_TIMER2_EOC */ + PINCTRL_PIN(6, "PA6"), /* A6_TIMER5_EXT_INCAP1 */ + PINCTRL_PIN(7, "PA7"), /* A7_TIMER5_EXT_INCAP2 */ + PINCTRL_PIN(8, "PA8"), /* A8_TIMER5_EXT_OUTCMP1 */ + PINCTRL_PIN(9, "PA9"), /* A9_TIMER5_EXT_OUTCMP2 */ + PINCTRL_PIN(10, "PA10"), /* A10_UART_0_TX */ + PINCTRL_PIN(11, "PA11"), /* A11_UART_0_RX */ + PINCTRL_PIN(12, "PA12"), /* A12_UART_1_TX */ + PINCTRL_PIN(13, "PA13"), /* A13_UART_1_RX */ + PINCTRL_PIN(14, "PA14"), /* A14_CAN_0_TX */ + PINCTRL_PIN(15, "PA15"), /* A15_CAN_0_RX */ + PINCTRL_PIN(16, "PA16"), /* A16_CAN_1_TX */ + PINCTRL_PIN(17, "PA17"), /* A17_CAN_1_RX */ + PINCTRL_PIN(18, "PA18"), /* A18_SPI_0_DO */ + PINCTRL_PIN(19, "PA19"), /* A19_SPI_0_DI */ + PINCTRL_PIN(20, "PA20"), /* A20_SPI_0_CK */ + PINCTRL_PIN(21, "PA21"), /* A21_SPI_0_CS0 */ + PINCTRL_PIN(22, "PA22"), /* A22_SPI_0_CS1 */ + PINCTRL_PIN(23, "PA23"), /* A23_SPI_1_DO */ + PINCTRL_PIN(24, "PA24"), /* A24_SPI_1_DI */ + PINCTRL_PIN(25, "PA25"), /* A25_SPI_1_CK */ + PINCTRL_PIN(26, "PA26"), /* A26_SPI_1_CS0 */ + PINCTRL_PIN(27, "PA27"), /* A27_SPI_1_CS1 */ + PINCTRL_PIN(28, "PA28"), /* A28_REF_CLK0 */ + +#define EQ5P_PIN_OFFSET_BANK_B 29 + + /* Bank B */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 0, "PB0"), /* B0_TIMER3_CK */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 1, "PB1"), /* B1_TIMER3_EOC */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 2, "PB2"), /* B2_TIMER4_CK */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 3, "PB3"), /* B3_TIMER4_EOC */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 4, "PB4"), /* B4_TIMER6_EXT_INCAP1 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 5, "PB5"), /* B5_TIMER6_EXT_INCAP2 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 6, "PB6"), /* B6_TIMER6_EXT_OUTCMP1 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 7, "PB7"), /* B7_TIMER6_EXT_OUTCMP2 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 8, "PB8"), /* B8_UART_2_TX */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 9, "PB9"), /* B9_UART_2_RX */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 10, "PB10"), /* B10_CAN_2_TX */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 11, "PB11"), /* B11_CAN_2_RX */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 12, "PB12"), /* B12_SPI_2_DO */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 13, "PB13"), /* B13_SPI_2_DI */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 14, "PB14"), /* B14_SPI_2_CK */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 15, "PB15"), /* B15_SPI_2_CS0 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 16, "PB16"), /* B16_SPI_2_CS1 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 17, "PB17"), /* B17_SPI_3_DO */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 18, "PB18"), /* B18_SPI_3_DI */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 19, "PB19"), /* B19_SPI_3_CK */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 20, "PB20"), /* B20_SPI_3_CS0 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 21, "PB21"), /* B21_SPI_3_CS1 */ + PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 22, "PB22"), /* B22_MCLK0 */ +}; + +static const char * const gpio_groups[] = { + /* Bank A */ + "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7", + "PA8", "PA9", "PA10", "PA11", "PA12", "PA13", "PA14", "PA15", + "PA16", "PA17", "PA18", "PA19", "PA20", "PA21", "PA22", "PA23", + "PA24", "PA25", "PA26", "PA27", "PA28", + + /* Bank B */ + "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7", + "PB8", "PB9", "PB10", "PB11", "PB12", "PB13", "PB14", "PB15", + "PB16", "PB17", "PB18", "PB19", "PB20", "PB21", "PB22", +}; + +/* Groups of functions on bank A */ +static const char * const timer0_groups[] = { "PA0", "PA1" }; +static const char * const timer1_groups[] = { "PA2", "PA3" }; +static const char * const timer2_groups[] = { "PA4", "PA5" }; +static const char * const timer5_groups[] = { "PA6", "PA7", "PA8", "PA9" }; +static const char * const uart0_groups[] = { "PA10", "PA11" }; +static const char * const uart1_groups[] = { "PA12", "PA13" }; +static const char * const can0_groups[] = { "PA14", "PA15" }; +static const char * const can1_groups[] = { "PA16", "PA17" }; +static const char * const spi0_groups[] = { "PA18", "PA19", "PA20", "PA21", "PA22" }; +static const char * const spi1_groups[] = { "PA23", "PA24", "PA25", "PA26", "PA27" }; +static const char * const refclk0_groups[] = { "PA28" }; + +/* Groups of functions on bank B */ +static const char * const timer3_groups[] = { "PB0", "PB1" }; +static const char * const timer4_groups[] = { "PB2", "PB3" }; +static const char * const timer6_groups[] = { "PB4", "PB5", "PB6", "PB7" }; +static const char * const uart2_groups[] = { "PB8", "PB9" }; +static const char * const can2_groups[] = { "PB10", "PB11" }; +static const char * const spi2_groups[] = { "PB12", "PB13", "PB14", "PB15", "PB16" }; +static const char * const spi3_groups[] = { "PB17", "PB18", "PB19", "PB20", "PB21" }; +static const char * const mclk0_groups[] = { "PB22" }; + +static const struct pinfunction eq5p_functions[] = { + /* GPIO having a fixed index is depended upon, see GPIO_FUNC_SELECTOR. */ + PINCTRL_PINFUNCTION("gpio", gpio_groups, ARRAY_SIZE(gpio_groups)), +#define GPIO_FUNC_SELECTOR 0 + + /* Bank A functions */ + PINCTRL_PINFUNCTION("timer0", timer0_groups, ARRAY_SIZE(timer0_groups)), + PINCTRL_PINFUNCTION("timer1", timer1_groups, ARRAY_SIZE(timer1_groups)), + PINCTRL_PINFUNCTION("timer2", timer2_groups, ARRAY_SIZE(timer2_groups)), + PINCTRL_PINFUNCTION("timer5", timer5_groups, ARRAY_SIZE(timer5_groups)), + PINCTRL_PINFUNCTION("uart0", uart0_groups, ARRAY_SIZE(uart0_groups)), + PINCTRL_PINFUNCTION("uart1", uart1_groups, ARRAY_SIZE(uart1_groups)), + PINCTRL_PINFUNCTION("can0", can0_groups, ARRAY_SIZE(can0_groups)), + PINCTRL_PINFUNCTION("can1", can1_groups, ARRAY_SIZE(can1_groups)), + PINCTRL_PINFUNCTION("spi0", spi0_groups, ARRAY_SIZE(spi0_groups)), + PINCTRL_PINFUNCTION("spi1", spi1_groups, ARRAY_SIZE(spi1_groups)), + PINCTRL_PINFUNCTION("refclk0", refclk0_groups, ARRAY_SIZE(refclk0_groups)), + + /* Bank B functions */ + PINCTRL_PINFUNCTION("timer3", timer3_groups, ARRAY_SIZE(timer3_groups)), + PINCTRL_PINFUNCTION("timer4", timer4_groups, ARRAY_SIZE(timer4_groups)), + PINCTRL_PINFUNCTION("timer6", timer6_groups, ARRAY_SIZE(timer6_groups)), + PINCTRL_PINFUNCTION("uart2", uart2_groups, ARRAY_SIZE(uart2_groups)), + PINCTRL_PINFUNCTION("can2", can2_groups, ARRAY_SIZE(can2_groups)), + PINCTRL_PINFUNCTION("spi2", spi2_groups, ARRAY_SIZE(spi2_groups)), + PINCTRL_PINFUNCTION("spi3", spi3_groups, ARRAY_SIZE(spi3_groups)), + PINCTRL_PINFUNCTION("mclk0", mclk0_groups, ARRAY_SIZE(mclk0_groups)), +}; + +static void eq5p_update_bits(const struct eq5p_pinctrl *pctrl, + enum eq5p_bank bank, enum eq5p_regs reg, + u32 mask, u32 val) +{ + void __iomem *ptr = pctrl->base + eq5p_regs[bank][reg]; + + writel((readl(ptr) & ~mask) | (val & mask), ptr); +} + +static bool eq5p_test_bit(const struct eq5p_pinctrl *pctrl, + enum eq5p_bank bank, enum eq5p_regs reg, int offset) +{ + u32 val = readl(pctrl->base + eq5p_regs[bank][reg]); + + if (WARN_ON(offset > 31)) + return false; + + return (val & BIT(offset)) != 0; +} + +static enum eq5p_bank eq5p_pin_to_bank(unsigned int pin) +{ + if (pin < EQ5P_PIN_OFFSET_BANK_B) + return EQ5P_BANK_A; + else + return EQ5P_BANK_B; +} + +static unsigned int eq5p_pin_to_offset(unsigned int pin) +{ + if (pin < EQ5P_PIN_OFFSET_BANK_B) + return pin; + else + return pin - EQ5P_PIN_OFFSET_BANK_B; +} + +static int eq5p_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(eq5p_pins); +} + +static const char *eq5p_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return pctldev->desc->pins[selector].name; +} + +static int eq5p_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + *pins = &pctldev->desc->pins[selector].number; + *num_pins = 1; + return 0; +} + +static int eq5p_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + enum pin_config_param param = pinconf_to_config_param(*config); + struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + unsigned int offset = eq5p_pin_to_offset(pin); + enum eq5p_bank bank = eq5p_pin_to_bank(pin); + u32 val_ds, arg; + bool pd, pu; + + pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset); + pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + arg = !(pd || pu); + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + arg = pd; + break; + case PIN_CONFIG_BIAS_PULL_UP: + arg = pu; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + offset *= 2; /* two bits per pin */ + if (offset >= 32) { + val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_HIGH]); + offset -= 32; + } else { + val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_LOW]); + } + arg = (val_ds >> offset) & EQ5P_DS_MASK; + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + return 0; +} + +static void eq5p_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned int pin) +{ + struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const char *pin_name = pctrl->desc.pins[pin].name; + unsigned int offset = eq5p_pin_to_offset(pin); + enum eq5p_bank bank = eq5p_pin_to_bank(pin); + const char *func_name, *bias; + unsigned long ds_config; + u32 drive_strength; + bool pd, pu; + int i, j; + + /* + * First, let's get the function name. All pins have only two functions: + * GPIO (IOCR == 0) and something else (IOCR == 1). + */ + if (eq5p_test_bit(pctrl, bank, EQ5P_IOCR, offset)) { + func_name = NULL; + for (i = 0; i < ARRAY_SIZE(eq5p_functions); i++) { + if (i == GPIO_FUNC_SELECTOR) + continue; + + for (j = 0; j < eq5p_functions[i].ngroups; j++) { + /* Groups and pins are the same thing for us. */ + const char *x = eq5p_functions[i].groups[j]; + + if (strcmp(x, pin_name) == 0) { + func_name = eq5p_functions[i].name; + break; + } + } + + if (func_name) + break; + } + + /* + * We have not found the function attached to this pin, this + * should never occur as all pins have exactly two functions. + */ + if (!func_name) + func_name = "unknown"; + } else { + func_name = eq5p_functions[GPIO_FUNC_SELECTOR].name; + } + + /* Second, we retrieve the bias. */ + pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset); + pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset); + if (pd && pu) + bias = "both"; + else if (pd && !pu) + bias = "pulldown"; + else if (!pd && pu) + bias = "pullup"; + else + bias = "none"; + + /* Third, we get the drive strength. */ + ds_config = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, 0); + eq5p_pinconf_get(pctldev, pin, &ds_config); + drive_strength = pinconf_to_config_argument(ds_config); + + seq_printf(s, "function=%s bias=%s drive_strength=%d", + func_name, bias, drive_strength); +} + +static const struct pinctrl_ops eq5p_pinctrl_ops = { + .get_groups_count = eq5p_pinctrl_get_groups_count, + .get_group_name = eq5p_pinctrl_get_group_name, + .get_group_pins = eq5p_pinctrl_get_group_pins, + .pin_dbg_show = eq5p_pinctrl_pin_dbg_show, + .dt_node_to_map = pinconf_generic_dt_node_to_map_pin, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int eq5p_pinmux_get_functions_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(eq5p_functions); +} + +static const char *eq5p_pinmux_get_function_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return eq5p_functions[selector].name; +} + +static int eq5p_pinmux_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int *num_groups) +{ + *groups = eq5p_functions[selector].groups; + *num_groups = eq5p_functions[selector].ngroups; + return 0; +} + +static int eq5p_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned int func_selector, unsigned int pin) +{ + struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const char *func_name = eq5p_functions[func_selector].name; + const char *group_name = pctldev->desc->pins[pin].name; + bool is_gpio = func_selector == GPIO_FUNC_SELECTOR; + unsigned int offset = eq5p_pin_to_offset(pin); + enum eq5p_bank bank = eq5p_pin_to_bank(pin); + u32 mask, val; + + dev_dbg(pctldev->dev, "func=%s group=%s\n", func_name, group_name); + + mask = BIT(offset); + val = is_gpio ? 0 : mask; + eq5p_update_bits(pctrl, bank, EQ5P_IOCR, mask, val); + return 0; +} + +static int eq5p_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + /* Pin numbers and group selectors are the same thing in our case. */ + return eq5p_pinmux_set_mux(pctldev, GPIO_FUNC_SELECTOR, pin); +} + +static const struct pinmux_ops eq5p_pinmux_ops = { + .get_functions_count = eq5p_pinmux_get_functions_count, + .get_function_name = eq5p_pinmux_get_function_name, + .get_function_groups = eq5p_pinmux_get_function_groups, + .set_mux = eq5p_pinmux_set_mux, + .gpio_request_enable = eq5p_pinmux_gpio_request_enable, + .strict = true, +}; + +static int eq5p_pinconf_set_drive_strength(struct pinctrl_dev *pctldev, + unsigned int pin, u32 arg) +{ + struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + unsigned int offset = eq5p_pin_to_offset(pin); + enum eq5p_bank bank = eq5p_pin_to_bank(pin); + unsigned int reg; + u32 mask, val; + + if (arg & ~EQ5P_DS_MASK) { + dev_err(pctldev->dev, "Unsupported drive strength: %u\n", arg); + return -EINVAL; + } + + offset *= 2; /* two bits per pin */ + + if (offset >= 32) { + reg = EQ5P_DS_HIGH; + offset -= 32; + } else { + reg = EQ5P_DS_LOW; + } + + mask = EQ5P_DS_MASK << offset; + val = arg << offset; + eq5p_update_bits(pctrl, bank, reg, mask, val); + return 0; +} + +static int eq5p_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const char *pin_name = pctldev->desc->pins[pin].name; + unsigned int offset = eq5p_pin_to_offset(pin); + enum eq5p_bank bank = eq5p_pin_to_bank(pin); + struct device *dev = pctldev->dev; + u32 val = BIT(offset); + unsigned int i; + + for (i = 0; i < num_configs; i++) { + enum pin_config_param param = pinconf_to_config_param(configs[i]); + u32 arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + dev_dbg(dev, "pin=%s bias_disable\n", pin_name); + + eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0); + eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0); + break; + + case PIN_CONFIG_BIAS_PULL_DOWN: + dev_dbg(dev, "pin=%s bias_pull_down arg=%u\n", + pin_name, arg); + + if (arg == 0) /* cannot connect to GND */ + return -ENOTSUPP; + + eq5p_update_bits(pctrl, bank, EQ5P_PD, val, val); + eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0); + break; + + case PIN_CONFIG_BIAS_PULL_UP: + dev_dbg(dev, "pin=%s bias_pull_up arg=%u\n", + pin_name, arg); + + if (arg == 0) /* cannot connect to VDD */ + return -ENOTSUPP; + + eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0); + eq5p_update_bits(pctrl, bank, EQ5P_PU, val, val); + break; + + case PIN_CONFIG_DRIVE_STRENGTH: + dev_dbg(dev, "pin=%s drive_strength arg=%u\n", + pin_name, arg); + + eq5p_pinconf_set_drive_strength(pctldev, pin, arg); + break; + + default: + dev_err(dev, "Unsupported pinconf %u\n", param); + return -ENOTSUPP; + } + } + + return 0; +} + +static const struct pinconf_ops eq5p_pinconf_ops = { + .is_generic = true, + .pin_config_get = eq5p_pinconf_get, + .pin_config_set = eq5p_pinconf_set, + /* Pins and groups are equivalent in this driver. */ + .pin_config_group_get = eq5p_pinconf_get, + .pin_config_group_set = eq5p_pinconf_set, +}; + +static int eq5p_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &adev->dev; + struct pinctrl_dev *pctldev; + struct eq5p_pinctrl *pctrl; + int ret; + + pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + + pctrl->base = (void __iomem *)dev_get_platdata(dev); + pctrl->desc.name = dev_name(dev); + pctrl->desc.pins = eq5p_pins; + pctrl->desc.npins = ARRAY_SIZE(eq5p_pins); + pctrl->desc.pctlops = &eq5p_pinctrl_ops; + pctrl->desc.pmxops = &eq5p_pinmux_ops; + pctrl->desc.confops = &eq5p_pinconf_ops; + pctrl->desc.owner = THIS_MODULE; + + ret = devm_pinctrl_register_and_init(dev, &pctrl->desc, pctrl, &pctldev); + if (ret) + return dev_err_probe(dev, ret, "failed registering pinctrl device\n"); + + ret = pinctrl_enable(pctldev); + if (ret) + return dev_err_probe(dev, ret, "failed enabling pinctrl device\n"); + + return 0; +} + +static const struct auxiliary_device_id eq5p_id_table[] = { + { .name = "clk_eyeq.pinctrl" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, eq5p_id_table); + +static struct auxiliary_driver eq5p_driver = { + .probe = eq5p_probe, + .id_table = eq5p_id_table, +}; +module_auxiliary_driver(eq5p_driver); diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index a898e40451fe4b..0f6b55fec31de7 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -925,7 +925,6 @@ static int k210_fpioa_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct k210_fpioa_data *pdata; - int ret; dev_info(dev, "K210 FPIOA pin controller\n"); @@ -940,46 +939,28 @@ static int k210_fpioa_probe(struct platform_device *pdev) if (IS_ERR(pdata->fpioa)) return PTR_ERR(pdata->fpioa); - pdata->clk = devm_clk_get(dev, "ref"); + pdata->clk = devm_clk_get_enabled(dev, "ref"); if (IS_ERR(pdata->clk)) return PTR_ERR(pdata->clk); - ret = clk_prepare_enable(pdata->clk); - if (ret) - return ret; - - pdata->pclk = devm_clk_get_optional(dev, "pclk"); - if (!IS_ERR(pdata->pclk)) { - ret = clk_prepare_enable(pdata->pclk); - if (ret) - goto disable_clk; - } + pdata->pclk = devm_clk_get_optional_enabled(dev, "pclk"); + if (IS_ERR(pdata->pclk)) + return PTR_ERR(pdata->pclk); pdata->sysctl_map = syscon_regmap_lookup_by_phandle_args(np, "canaan,k210-sysctl-power", 1, &pdata->power_offset); - if (IS_ERR(pdata->sysctl_map)) { - ret = PTR_ERR(pdata->sysctl_map); - goto disable_pclk; - } + if (IS_ERR(pdata->sysctl_map)) + return PTR_ERR(pdata->sysctl_map); k210_fpioa_init_ties(pdata); pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata); - if (IS_ERR(pdata->pctl)) { - ret = PTR_ERR(pdata->pctl); - goto disable_pclk; - } + if (IS_ERR(pdata->pctl)) + return PTR_ERR(pdata->pctl); return 0; - -disable_pclk: - clk_disable_unprepare(pdata->pclk); -disable_clk: - clk_disable_unprepare(pdata->clk); - - return ret; } static const struct of_device_id k210_fpioa_dt_ids[] = { diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 6878bc86faa2ce..5c1bc4d5b662ed 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -84,6 +84,27 @@ }, \ } +#define PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(id, pins, label, iom0, \ + iom1, iom2, iom3, \ + offset0, offset1, \ + offset2, offset3, pull0, \ + pull1, pull2, pull3) \ + { \ + .bank_num = id, \ + .nr_pins = pins, \ + .name = label, \ + .iomux = { \ + { .type = iom0, .offset = offset0 }, \ + { .type = iom1, .offset = offset1 }, \ + { .type = iom2, .offset = offset2 }, \ + { .type = iom3, .offset = offset3 }, \ + }, \ + .pull_type[0] = pull0, \ + .pull_type[1] = pull1, \ + .pull_type[2] = pull2, \ + .pull_type[3] = pull3, \ + } + #define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \ { \ .bank_num = id, \ @@ -1120,6 +1141,11 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin) if (bank->recalced_mask & BIT(pin)) rockchip_get_recalced_mux(bank, pin, ®, &bit, &mask); + if (ctrl->type == RK3576) { + if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7)) + reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */ + } + if (ctrl->type == RK3588) { if (bank->bank_num == 0) { if ((pin >= RK_PB4) && (pin <= RK_PD7)) { @@ -1234,6 +1260,11 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) if (bank->recalced_mask & BIT(pin)) rockchip_get_recalced_mux(bank, pin, ®, &bit, &mask); + if (ctrl->type == RK3576) { + if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7)) + reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */ + } + if (ctrl->type == RK3588) { if (bank->bank_num == 0) { if ((pin >= RK_PB4) && (pin <= RK_PD7)) { @@ -2038,6 +2069,142 @@ static int rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, return 0; } +#define RK3576_DRV_BITS_PER_PIN 4 +#define RK3576_DRV_PINS_PER_REG 4 +#define RK3576_DRV_GPIO0_AL_OFFSET 0x10 +#define RK3576_DRV_GPIO0_BH_OFFSET 0x2014 +#define RK3576_DRV_GPIO1_OFFSET 0x6020 +#define RK3576_DRV_GPIO2_OFFSET 0x6040 +#define RK3576_DRV_GPIO3_OFFSET 0x6060 +#define RK3576_DRV_GPIO4_AL_OFFSET 0x6080 +#define RK3576_DRV_GPIO4_CL_OFFSET 0xA090 +#define RK3576_DRV_GPIO4_DL_OFFSET 0xB098 + +static int rk3576_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + *regmap = info->regmap_base; + + if (bank->bank_num == 0 && pin_num < 12) + *reg = RK3576_DRV_GPIO0_AL_OFFSET; + else if (bank->bank_num == 0) + *reg = RK3576_DRV_GPIO0_BH_OFFSET - 0xc; + else if (bank->bank_num == 1) + *reg = RK3576_DRV_GPIO1_OFFSET; + else if (bank->bank_num == 2) + *reg = RK3576_DRV_GPIO2_OFFSET; + else if (bank->bank_num == 3) + *reg = RK3576_DRV_GPIO3_OFFSET; + else if (bank->bank_num == 4 && pin_num < 16) + *reg = RK3576_DRV_GPIO4_AL_OFFSET; + else if (bank->bank_num == 4 && pin_num < 24) + *reg = RK3576_DRV_GPIO4_CL_OFFSET - 0x10; + else if (bank->bank_num == 4) + *reg = RK3576_DRV_GPIO4_DL_OFFSET - 0x18; + else + dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num); + + *reg += ((pin_num / RK3576_DRV_PINS_PER_REG) * 4); + *bit = pin_num % RK3576_DRV_PINS_PER_REG; + *bit *= RK3576_DRV_BITS_PER_PIN; + + return 0; +} + +#define RK3576_PULL_BITS_PER_PIN 2 +#define RK3576_PULL_PINS_PER_REG 8 +#define RK3576_PULL_GPIO0_AL_OFFSET 0x20 +#define RK3576_PULL_GPIO0_BH_OFFSET 0x2028 +#define RK3576_PULL_GPIO1_OFFSET 0x6110 +#define RK3576_PULL_GPIO2_OFFSET 0x6120 +#define RK3576_PULL_GPIO3_OFFSET 0x6130 +#define RK3576_PULL_GPIO4_AL_OFFSET 0x6140 +#define RK3576_PULL_GPIO4_CL_OFFSET 0xA148 +#define RK3576_PULL_GPIO4_DL_OFFSET 0xB14C + +static int rk3576_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + *regmap = info->regmap_base; + + if (bank->bank_num == 0 && pin_num < 12) + *reg = RK3576_PULL_GPIO0_AL_OFFSET; + else if (bank->bank_num == 0) + *reg = RK3576_PULL_GPIO0_BH_OFFSET - 0x4; + else if (bank->bank_num == 1) + *reg = RK3576_PULL_GPIO1_OFFSET; + else if (bank->bank_num == 2) + *reg = RK3576_PULL_GPIO2_OFFSET; + else if (bank->bank_num == 3) + *reg = RK3576_PULL_GPIO3_OFFSET; + else if (bank->bank_num == 4 && pin_num < 16) + *reg = RK3576_PULL_GPIO4_AL_OFFSET; + else if (bank->bank_num == 4 && pin_num < 24) + *reg = RK3576_PULL_GPIO4_CL_OFFSET - 0x8; + else if (bank->bank_num == 4) + *reg = RK3576_PULL_GPIO4_DL_OFFSET - 0xc; + else + dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num); + + *reg += ((pin_num / RK3576_PULL_PINS_PER_REG) * 4); + *bit = pin_num % RK3576_PULL_PINS_PER_REG; + *bit *= RK3576_PULL_BITS_PER_PIN; + + return 0; +} + +#define RK3576_SMT_BITS_PER_PIN 1 +#define RK3576_SMT_PINS_PER_REG 8 +#define RK3576_SMT_GPIO0_AL_OFFSET 0x30 +#define RK3576_SMT_GPIO0_BH_OFFSET 0x2040 +#define RK3576_SMT_GPIO1_OFFSET 0x6210 +#define RK3576_SMT_GPIO2_OFFSET 0x6220 +#define RK3576_SMT_GPIO3_OFFSET 0x6230 +#define RK3576_SMT_GPIO4_AL_OFFSET 0x6240 +#define RK3576_SMT_GPIO4_CL_OFFSET 0xA248 +#define RK3576_SMT_GPIO4_DL_OFFSET 0xB24C + +static int rk3576_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, + struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + *regmap = info->regmap_base; + + if (bank->bank_num == 0 && pin_num < 12) + *reg = RK3576_SMT_GPIO0_AL_OFFSET; + else if (bank->bank_num == 0) + *reg = RK3576_SMT_GPIO0_BH_OFFSET - 0x4; + else if (bank->bank_num == 1) + *reg = RK3576_SMT_GPIO1_OFFSET; + else if (bank->bank_num == 2) + *reg = RK3576_SMT_GPIO2_OFFSET; + else if (bank->bank_num == 3) + *reg = RK3576_SMT_GPIO3_OFFSET; + else if (bank->bank_num == 4 && pin_num < 16) + *reg = RK3576_SMT_GPIO4_AL_OFFSET; + else if (bank->bank_num == 4 && pin_num < 24) + *reg = RK3576_SMT_GPIO4_CL_OFFSET - 0x8; + else if (bank->bank_num == 4) + *reg = RK3576_SMT_GPIO4_DL_OFFSET - 0xc; + else + dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num); + + *reg += ((pin_num / RK3576_SMT_PINS_PER_REG) * 4); + *bit = pin_num % RK3576_SMT_PINS_PER_REG; + *bit *= RK3576_SMT_BITS_PER_PIN; + + return 0; +} + #define RK3588_PMU1_IOC_REG (0x0000) #define RK3588_PMU2_IOC_REG (0x4000) #define RK3588_BUS_IOC_REG (0x8000) @@ -2332,6 +2499,10 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank, rmask_bits = RK3568_DRV_BITS_PER_PIN; ret = (1 << (strength + 1)) - 1; goto config; + } else if (ctrl->type == RK3576) { + rmask_bits = RK3576_DRV_BITS_PER_PIN; + ret = ((strength & BIT(2)) >> 2) | ((strength & BIT(0)) << 2) | (strength & BIT(1)); + goto config; } if (ctrl->type == RV1126) { @@ -2469,6 +2640,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) case RK3368: case RK3399: case RK3568: + case RK3576: case RK3588: pull_type = bank->pull_type[pin_num / 8]; data >>= bit; @@ -2528,6 +2700,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, case RK3368: case RK3399: case RK3568: + case RK3576: case RK3588: pull_type = bank->pull_type[pin_num / 8]; ret = -EINVAL; @@ -2793,6 +2966,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3368: case RK3399: case RK3568: + case RK3576: case RK3588: return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT); } @@ -3949,6 +4123,37 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = { .schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit, }; +#define RK3576_PIN_BANK(ID, LABEL, OFFSET0, OFFSET1, OFFSET2, OFFSET3) \ + PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(ID, 32, LABEL, \ + IOMUX_WIDTH_4BIT, \ + IOMUX_WIDTH_4BIT, \ + IOMUX_WIDTH_4BIT, \ + IOMUX_WIDTH_4BIT, \ + OFFSET0, OFFSET1, \ + OFFSET2, OFFSET3, \ + PULL_TYPE_IO_1V8_ONLY, \ + PULL_TYPE_IO_1V8_ONLY, \ + PULL_TYPE_IO_1V8_ONLY, \ + PULL_TYPE_IO_1V8_ONLY) + +static struct rockchip_pin_bank rk3576_pin_banks[] = { + RK3576_PIN_BANK(0, "gpio0", 0, 0x8, 0x2004, 0x200C), + RK3576_PIN_BANK(1, "gpio1", 0x4020, 0x4028, 0x4030, 0x4038), + RK3576_PIN_BANK(2, "gpio2", 0x4040, 0x4048, 0x4050, 0x4058), + RK3576_PIN_BANK(3, "gpio3", 0x4060, 0x4068, 0x4070, 0x4078), + RK3576_PIN_BANK(4, "gpio4", 0x4080, 0x4088, 0xA390, 0xB398), +}; + +static struct rockchip_pin_ctrl rk3576_pin_ctrl __maybe_unused = { + .pin_banks = rk3576_pin_banks, + .nr_banks = ARRAY_SIZE(rk3576_pin_banks), + .label = "RK3576-GPIO", + .type = RK3576, + .pull_calc_reg = rk3576_calc_pull_reg_and_bit, + .drv_calc_reg = rk3576_calc_drv_reg_and_bit, + .schmitt_calc_reg = rk3576_calc_schmitt_reg_and_bit, +}; + static struct rockchip_pin_bank rk3588_pin_banks[] = { RK3588_PIN_BANK_FLAGS(0, 32, "gpio0", IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), @@ -4005,6 +4210,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { .data = &rk3399_pin_ctrl }, { .compatible = "rockchip,rk3568-pinctrl", .data = &rk3568_pin_ctrl }, + { .compatible = "rockchip,rk3576-pinctrl", + .data = &rk3576_pin_ctrl }, { .compatible = "rockchip,rk3588-pinctrl", .data = &rk3588_pin_ctrl }, {}, diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h index 849266f8b19131..6ebbb0a88ce797 100644 --- a/drivers/pinctrl/pinctrl-rockchip.h +++ b/drivers/pinctrl/pinctrl-rockchip.h @@ -197,6 +197,7 @@ enum rockchip_pinctrl_type { RK3368, RK3399, RK3568, + RK3576, RK3588, }; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 4da3c3f422b691..2ec599e383e4b2 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1913,7 +1913,8 @@ static int pcs_probe(struct platform_device *pdev) dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size); - if (pinctrl_enable(pcs->pctl)) + ret = pinctrl_enable(pcs->pctl); + if (ret) goto free; return 0; diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c index 6313be370eb7d4..d2c5321dd025f8 100644 --- a/drivers/pinctrl/pinctrl-stmfx.c +++ b/drivers/pinctrl/pinctrl-stmfx.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -369,14 +370,14 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev, return; if (dir == GPIO_LINE_DIRECTION_OUT) { - seq_printf(s, "output %s ", val ? "high" : "low"); + seq_printf(s, "output %s ", str_high_low(val)); if (type) seq_printf(s, "open drain %s internal pull-up ", pupd ? "with" : "without"); else seq_puts(s, "push pull no pull "); } else { - seq_printf(s, "input %s ", val ? "high" : "low"); + seq_printf(s, "input %s ", str_high_low(val)); if (type) seq_printf(s, "with internal pull-%s ", pupd ? "up" : "down"); diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c index d81d7b46116cc5..b880e44b822132 100644 --- a/drivers/pinctrl/pinctrl-utils.c +++ b/drivers/pinctrl/pinctrl-utils.c @@ -70,8 +70,8 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev, if (WARN_ON(*num_maps == *reserved_maps)) return -ENOSPC; - dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), - GFP_KERNEL); + dup_configs = kmemdup_array(configs, num_configs, + sizeof(*dup_configs), GFP_KERNEL); if (!dup_configs) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index 0e8de27d0de80d..caa8a2ca3e6817 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -1202,6 +1202,7 @@ static const struct of_device_id zynq_pinctrl_of_match[] = { { .compatible = "xlnx,pinctrl-zynq" }, { } }; +MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match); static struct platform_driver zynq_pinctrl_driver = { .driver = { diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index aae71a37219b26..02033ea1c64384 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -442,8 +442,7 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting) gname = pctlops->get_group_name(pctldev, setting->data.mux.group); dev_err_probe(pctldev->dev, ret, - "could not request pin %d (%s) from group %s " - " on device %s\n", + "could not request pin %d (%s) from group %s on device %s\n", pins[i], pname, gname, pinctrl_dev_get_name(pctldev)); goto err_pin_request; @@ -526,9 +525,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting) gname = pctlops->get_group_name(pctldev, setting->data.mux.group); dev_warn(pctldev->dev, - "not freeing pin %d (%s) as part of " - "deactivating group %s - it is already " - "used for some other setting", + "not freeing pin %d (%s) as part of deactivating group %s - it is already used for some other setting", pins[i], desc->name, gname); } } diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c index 208896593b61ec..2440604863327d 100644 --- a/drivers/pinctrl/realtek/pinctrl-rtd.c +++ b/drivers/pinctrl/realtek/pinctrl-rtd.c @@ -533,7 +533,7 @@ static const struct pinconf_ops rtd_pinconf_ops = { .pin_config_group_set = rtd_pin_config_group_set, }; -static struct regmap_config rtd_pinctrl_regmap_config = { +static const struct regmap_config rtd_pinctrl_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 632180570b7048..5a403915fed2c6 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -51,17 +52,15 @@ #define PIN_CFG_IO_VMC_QSPI BIT(7) #define PIN_CFG_IO_VMC_ETH0 BIT(8) #define PIN_CFG_IO_VMC_ETH1 BIT(9) -#define PIN_CFG_FILONOFF BIT(10) -#define PIN_CFG_FILNUM BIT(11) -#define PIN_CFG_FILCLKSEL BIT(12) -#define PIN_CFG_IOLH_C BIT(13) -#define PIN_CFG_SOFT_PS BIT(14) -#define PIN_CFG_OEN BIT(15) -#define PIN_CFG_NOGPIO_INT BIT(16) -#define PIN_CFG_NOD BIT(17) /* N-ch Open Drain */ -#define PIN_CFG_SMT BIT(18) /* Schmitt-trigger input control */ -#define PIN_CFG_ELC BIT(19) -#define PIN_CFG_IOLH_RZV2H BIT(20) +#define PIN_CFG_NF BIT(10) /* Digital noise filter */ +#define PIN_CFG_IOLH_C BIT(11) +#define PIN_CFG_SOFT_PS BIT(12) +#define PIN_CFG_OEN BIT(13) +#define PIN_CFG_NOGPIO_INT BIT(14) +#define PIN_CFG_NOD BIT(15) /* N-ch Open Drain */ +#define PIN_CFG_SMT BIT(16) /* Schmitt-trigger input control */ +#define PIN_CFG_ELC BIT(17) +#define PIN_CFG_IOLH_RZV2H BIT(18) #define RZG2L_SINGLE_PIN BIT_ULL(63) /* Dedicated pin */ #define RZG2L_VARIABLE_CFG BIT_ULL(62) /* Variable cfg for port pins */ @@ -69,9 +68,7 @@ #define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \ (PIN_CFG_IOLH_##group | \ PIN_CFG_PUPD | \ - PIN_CFG_FILONOFF | \ - PIN_CFG_FILNUM | \ - PIN_CFG_FILCLKSEL) + PIN_CFG_NF) #define RZG2L_MPXED_PIN_FUNCS (RZG2L_MPXED_COMMON_PIN_FUNCS(A) | \ PIN_CFG_SR) @@ -84,10 +81,7 @@ PIN_CFG_SR | \ PIN_CFG_SMT) -#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \ - PIN_CFG_FILONOFF | \ - PIN_CFG_FILNUM | \ - PIN_CFG_FILCLKSEL) +#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | PIN_CFG_NF) #define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(61, 54) #define PIN_CFG_PIN_REG_MASK GENMASK_ULL(53, 46) @@ -394,13 +388,13 @@ static const u64 r9a09g057_variable_pin_cfg[] = { #ifdef CONFIG_RISCV static const u64 r9a07g043f_variable_pin_cfg[] = { RZG2L_VARIABLE_PIN_CFG_PACK(20, 0, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | - PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | + PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), RZG2L_VARIABLE_PIN_CFG_PACK(20, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | - PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | + PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), RZG2L_VARIABLE_PIN_CFG_PACK(20, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | - PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | + PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), RZG2L_VARIABLE_PIN_CFG_PACK(20, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), @@ -431,7 +425,7 @@ static const u64 r9a07g043f_variable_pin_cfg[] = { RZG2L_VARIABLE_PIN_CFG_PACK(24, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_NOGPIO_INT), RZG2L_VARIABLE_PIN_CFG_PACK(24, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | - PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | + PIN_CFG_NF | PIN_CFG_NOGPIO_INT), }; #endif @@ -528,8 +522,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map, { unsigned long *cfgs; - cfgs = kmemdup(configs, num_configs * sizeof(*cfgs), - GFP_KERNEL); + cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL); if (!cfgs) return -ENOMEM; @@ -1261,7 +1254,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_OUTPUT_ENABLE: - if (!pctrl->data->oen_read || !(cfg & PIN_CFG_OEN)) + if (!(cfg & PIN_CFG_OEN)) + return -EINVAL; + if (!pctrl->data->oen_read) return -EOPNOTSUPP; arg = pctrl->data->oen_read(pctrl, _pin); if (!arg) @@ -1390,9 +1385,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, for (i = 0; i < num_configs; i++) { param = pinconf_to_config_param(_configs[i]); + arg = pinconf_to_config_argument(_configs[i]); switch (param) { case PIN_CONFIG_INPUT_ENABLE: - arg = pinconf_to_config_argument(_configs[i]); if (!(cfg & PIN_CFG_IEN)) return -EINVAL; @@ -1401,8 +1396,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_OUTPUT_ENABLE: - arg = pinconf_to_config_argument(_configs[i]); - if (!pctrl->data->oen_write || !(cfg & PIN_CFG_OEN)) + if (!(cfg & PIN_CFG_OEN)) + return -EINVAL; + if (!pctrl->data->oen_write) return -EOPNOTSUPP; ret = pctrl->data->oen_write(pctrl, _pin, !!arg); if (ret) @@ -1410,12 +1406,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_POWER_SOURCE: - settings.power_source = pinconf_to_config_argument(_configs[i]); + settings.power_source = arg; break; case PIN_CONFIG_SLEW_RATE: - arg = pinconf_to_config_argument(_configs[i]); - if (!(cfg & PIN_CFG_SR) || arg > 1) return -EINVAL; @@ -1436,8 +1430,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_DRIVE_STRENGTH: - arg = pinconf_to_config_argument(_configs[i]); - if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua) return -EINVAL; @@ -1457,12 +1449,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, !hwcfg->drive_strength_ua) return -EINVAL; - settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]); + settings.drive_strength_ua = arg; break; case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: - arg = pinconf_to_config_argument(_configs[i]); - if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0]) return -EINVAL; @@ -1480,7 +1470,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev, if (!(cfg & PIN_CFG_IOLH_RZV2H)) return -EINVAL; - arg = pinconf_to_config_argument(_configs[i]); if (arg > 3) return -EINVAL; rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, arg); @@ -1883,8 +1872,7 @@ static const u64 r9a07g043_gpio_configs[] = { #ifdef CONFIG_RISCV /* Below additional port pins (P19 - P28) are exclusively available on RZ/Five SoC only */ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | - PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | - PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */ + PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x07), /* P20 */ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */ @@ -1892,8 +1880,7 @@ static const u64 r9a07g043_gpio_configs[] = { PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */ RZG2L_GPIO_PORT_SPARSE_PACK_VARIABLE(0x3e, 0x0a), /* P23 */ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x0b), /* P24 */ - RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF | - PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL | + RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NF | PIN_CFG_NOGPIO_INT), /* P25 */ 0x0, /* P26 */ 0x0, /* P27 */ @@ -1971,8 +1958,7 @@ static const struct { struct rzg2l_dedicated_configs rzg2l_pins[7]; } rzg2l_dedicated_pins = { .common = { - { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, - (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) }, + { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) }, { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) }, { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0, @@ -2053,8 +2039,7 @@ static const struct { }; static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = { - { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | - PIN_CFG_FILCLKSEL)) }, + { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, PIN_CFG_NF) }, { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN | PIN_CFG_SOFT_PS)) }, { "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) }, @@ -2093,8 +2078,7 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = { }; static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = { - { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | - PIN_CFG_FILCLKSEL)) }, + { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) }, { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_IEN)) }, { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, @@ -2596,16 +2580,13 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) return -EPROBE_DEFER; ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args); - if (ret) { - dev_err(pctrl->dev, "Unable to parse gpio-ranges\n"); - return ret; - } + if (ret) + return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n"); if (of_args.args[0] != 0 || of_args.args[1] != 0 || - of_args.args[2] != pctrl->data->n_port_pins) { - dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); - return -EINVAL; - } + of_args.args[2] != pctrl->data->n_port_pins) + return dev_err_probe(pctrl->dev, -EINVAL, + "gpio-ranges does not match selected SOC\n"); chip->names = pctrl->data->port_pins; chip->request = rzg2l_gpio_request; @@ -2623,7 +2604,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) girq = &chip->irq; gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip); - girq->fwnode = of_node_to_fwnode(np); + girq->fwnode = dev_fwnode(pctrl->dev); girq->parent_domain = parent_domain; girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq; girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec; @@ -2637,10 +2618,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) pctrl->gpio_range.name = chip->label; pctrl->gpio_range.gc = chip; ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); - if (ret) { - dev_err(pctrl->dev, "failed to add GPIO controller\n"); - return ret; - } + if (ret) + return dev_err_probe(pctrl->dev, ret, "failed to add GPIO controller\n"); dev_dbg(pctrl->dev, "Registered gpio controller\n"); @@ -2726,22 +2705,16 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl) ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl, &pctrl->pctl); - if (ret) { - dev_err(pctrl->dev, "pinctrl registration failed\n"); - return ret; - } + if (ret) + return dev_err_probe(pctrl->dev, ret, "pinctrl registration failed\n"); ret = pinctrl_enable(pctrl->pctl); - if (ret) { - dev_err(pctrl->dev, "pinctrl enable failed\n"); - return ret; - } + if (ret) + dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n"); ret = rzg2l_gpio_register(pctrl); - if (ret) { - dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret); - return ret; - } + if (ret) + return dev_err_probe(pctrl->dev, ret, "failed to add GPIO chip\n"); return 0; } diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 0cae5472ac678c..4062c56619f595 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -196,8 +196,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map, { unsigned long *cfgs; - cfgs = kmemdup(configs, num_configs * sizeof(*cfgs), - GFP_KERNEL); + cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL); if (!cfgs) return -ENOMEM; diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c index 03e9bdbc82b909..29d16c9c1bd194 100644 --- a/drivers/pinctrl/renesas/pinctrl.c +++ b/drivers/pinctrl/renesas/pinctrl.c @@ -83,8 +83,7 @@ static int sh_pfc_map_add_config(struct pinctrl_map *map, { unsigned long *cfgs; - cfgs = kmemdup(configs, num_configs * sizeof(*cfgs), - GFP_KERNEL); + cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL); if (cfgs == NULL) return -ENOMEM; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 85ddf49a518859..d3d8672f74dc62 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -40,6 +40,19 @@ static const struct samsung_pin_bank_type bank_type_alive = { #define S5P_OTHERS_RET_MMC (1 << 29) #define S5P_OTHERS_RET_UART (1 << 28) +#define S5P_PIN_PULL_DISABLE 0 +#define S5P_PIN_PULL_DOWN 1 +#define S5P_PIN_PULL_UP 2 + +static void s5pv210_pud_value_init(struct samsung_pinctrl_drv_data *drvdata) +{ + unsigned int *pud_val = drvdata->pud_val; + + pud_val[PUD_PULL_DISABLE] = S5P_PIN_PULL_DISABLE; + pud_val[PUD_PULL_DOWN] = S5P_PIN_PULL_DOWN; + pud_val[PUD_PULL_UP] = S5P_PIN_PULL_UP; +} + static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata) { void __iomem *clk_base = (void __iomem *)drvdata->retention_ctrl->priv; @@ -133,6 +146,7 @@ static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { .nr_banks = ARRAY_SIZE(s5pv210_pin_bank), .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, + .pud_value_init = s5pv210_pud_value_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, .retention_data = &s5pv210_retention_data, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index ce5e6783b5b99f..b79c211c037496 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -662,7 +662,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc) __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) { struct device *dev = d->dev; - struct device_node *wkup_np = NULL; + struct device_node *wkup_np __free(device_node) = NULL; struct device_node *np; struct samsung_pin_bank *bank; struct exynos_weint_data *weint_data; @@ -692,17 +692,14 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) bank->irq_chip = devm_kmemdup(dev, irq_chip, sizeof(*irq_chip), GFP_KERNEL); - if (!bank->irq_chip) { - of_node_put(wkup_np); + if (!bank->irq_chip) return -ENOMEM; - } bank->irq_chip->chip.name = bank->name; bank->irq_domain = irq_domain_create_linear(bank->fwnode, bank->nr_pins, &exynos_eint_irqd_ops, bank); if (!bank->irq_domain) { dev_err(dev, "wkup irq domain add failed\n"); - of_node_put(wkup_np); return -ENXIO; } @@ -715,10 +712,8 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) weint_data = devm_kcalloc(dev, bank->nr_pins, sizeof(*weint_data), GFP_KERNEL); - if (!weint_data) { - of_node_put(wkup_np); + if (!weint_data) return -ENOMEM; - } for (idx = 0; idx < bank->nr_pins; ++idx) { irq = irq_of_parse_and_map(to_of_node(bank->fwnode), idx); @@ -735,13 +730,10 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) } } - if (!muxed_banks) { - of_node_put(wkup_np); + if (!muxed_banks) return 0; - } irq = irq_of_parse_and_map(wkup_np, 0); - of_node_put(wkup_np); if (!irq) { dev_err(dev, "irq number for muxed EINTs not found\n"); return 0; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index c5d92db4fdb1d3..68715c09baa9d0 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -63,6 +63,10 @@ #define EINT_CON_MASK 0xF #define EINT_CON_LEN 4 +#define S3C_PIN_PULL_DISABLE 0 +#define S3C_PIN_PULL_DOWN 1 +#define S3C_PIN_PULL_UP 2 + static const struct samsung_pin_bank_type bank_type_4bit_off = { .fld_width = { 4, 1, 2, 0, 2, 2, }, .reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, }, @@ -255,6 +259,15 @@ static int s3c64xx_irq_get_trigger(unsigned int type) return trigger; } +static void s3c64xx_pud_value_init(struct samsung_pinctrl_drv_data *drvdata) +{ + unsigned int *pud_val = drvdata->pud_val; + + pud_val[PUD_PULL_DISABLE] = S3C_PIN_PULL_DISABLE; + pud_val[PUD_PULL_DOWN] = S3C_PIN_PULL_DOWN; + pud_val[PUD_PULL_UP] = S3C_PIN_PULL_UP; +} + static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type) { /* Edge- and level-triggered interrupts need different handlers */ @@ -797,6 +810,7 @@ static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { .nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0), .eint_gpio_init = s3c64xx_eint_gpio_init, .eint_wkup_init = s3c64xx_eint_eint0_init, + .pud_value_init = s3c64xx_pud_value_init, }, }; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 623df65a5d6f9e..675efa5d86a9af 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -122,8 +122,8 @@ static int add_map_configs(struct device *dev, struct pinctrl_map **map, if (WARN_ON(*num_maps == *reserved_maps)) return -ENOSPC; - dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), - GFP_KERNEL); + dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), + GFP_KERNEL); if (!dup_configs) return -ENOMEM; @@ -251,7 +251,6 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, { struct samsung_pinctrl_drv_data *drvdata; unsigned reserved_maps; - struct device_node *np; int ret; drvdata = pinctrl_dev_get_drvdata(pctldev); @@ -266,12 +265,11 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, &reserved_maps, num_maps); - for_each_child_of_node(np_config, np) { + for_each_child_of_node_scoped(np_config, np) { ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map, &reserved_maps, num_maps); if (ret < 0) { samsung_dt_free_map(pctldev, *map, *num_maps); - of_node_put(np); return ret; } } @@ -823,16 +821,16 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( struct device_node *func_np; if (!of_get_child_count(cfg_np)) { - if (!of_find_property(cfg_np, - "samsung,pin-function", NULL)) + if (!of_property_present(cfg_np, + "samsung,pin-function")) continue; ++func_cnt; continue; } for_each_child_of_node(cfg_np, func_np) { - if (!of_find_property(func_np, - "samsung,pin-function", NULL)) + if (!of_property_present(func_np, + "samsung,pin-function")) continue; ++func_cnt; } @@ -849,16 +847,12 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( * and create pin groups and pin function lists. */ func_cnt = 0; - for_each_child_of_node(dev_np, cfg_np) { - struct device_node *func_np; - + for_each_child_of_node_scoped(dev_np, cfg_np) { if (!of_get_child_count(cfg_np)) { ret = samsung_pinctrl_create_function(dev, drvdata, cfg_np, func); - if (ret < 0) { - of_node_put(cfg_np); + if (ret < 0) return ERR_PTR(ret); - } if (ret > 0) { ++func; ++func_cnt; @@ -866,14 +860,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( continue; } - for_each_child_of_node(cfg_np, func_np) { + for_each_child_of_node_scoped(cfg_np, func_np) { ret = samsung_pinctrl_create_function(dev, drvdata, func_np, func); - if (ret < 0) { - of_node_put(func_np); - of_node_put(cfg_np); + if (ret < 0) return ERR_PTR(ret); - } if (ret > 0) { ++func; ++func_cnt; @@ -997,6 +988,77 @@ static int samsung_pinctrl_unregister(struct platform_device *pdev, return 0; } +static void samsung_pud_value_init(struct samsung_pinctrl_drv_data *drvdata) +{ + unsigned int *pud_val = drvdata->pud_val; + + pud_val[PUD_PULL_DISABLE] = EXYNOS_PIN_PUD_PULL_DISABLE; + pud_val[PUD_PULL_DOWN] = EXYNOS_PIN_PID_PULL_DOWN; + pud_val[PUD_PULL_UP] = EXYNOS_PIN_PID_PULL_UP; +} + +/* + * Enable or Disable the pull-down and pull-up for the gpio pins in the + * PUD register. + */ +static void samsung_gpio_set_pud(struct gpio_chip *gc, unsigned int offset, + unsigned int value) +{ + struct samsung_pin_bank *bank = gpiochip_get_data(gc); + const struct samsung_pin_bank_type *type = bank->type; + void __iomem *reg; + unsigned int data, mask; + + reg = bank->pctl_base + bank->pctl_offset; + data = readl(reg + type->reg_offset[PINCFG_TYPE_PUD]); + mask = (1 << type->fld_width[PINCFG_TYPE_PUD]) - 1; + data &= ~(mask << (offset * type->fld_width[PINCFG_TYPE_PUD])); + data |= value << (offset * type->fld_width[PINCFG_TYPE_PUD]); + writel(data, reg + type->reg_offset[PINCFG_TYPE_PUD]); +} + +/* + * Identify the type of PUD config based on the gpiolib request to enable + * or disable the PUD config. + */ +static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + struct samsung_pin_bank *bank = gpiochip_get_data(gc); + struct samsung_pinctrl_drv_data *drvdata = bank->drvdata; + unsigned int value; + int ret = 0; + unsigned long flags; + + switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_BIAS_DISABLE: + value = drvdata->pud_val[PUD_PULL_DISABLE]; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + value = drvdata->pud_val[PUD_PULL_DOWN]; + break; + case PIN_CONFIG_BIAS_PULL_UP: + value = drvdata->pud_val[PUD_PULL_UP]; + break; + default: + return -ENOTSUPP; + } + + ret = clk_enable(drvdata->pclk); + if (ret) { + dev_err(drvdata->dev, "failed to enable clock\n"); + return ret; + } + + raw_spin_lock_irqsave(&bank->slock, flags); + samsung_gpio_set_pud(gc, offset, value); + raw_spin_unlock_irqrestore(&bank->slock, flags); + + clk_disable(drvdata->pclk); + + return ret; +} + static const struct gpio_chip samsung_gpiolib_chip = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, @@ -1006,6 +1068,7 @@ static const struct gpio_chip samsung_gpiolib_chip = { .direction_output = samsung_gpio_direction_output, .to_irq = samsung_gpio_to_irq, .add_pin_ranges = samsung_add_pin_ranges, + .set_config = samsung_gpio_set_config, .owner = THIS_MODULE, }; @@ -1237,6 +1300,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) if (ctrl->eint_wkup_init) ctrl->eint_wkup_init(drvdata); + if (ctrl->pud_value_init) + ctrl->pud_value_init(drvdata); + else + samsung_pud_value_init(drvdata); + ret = samsung_gpiolib_register(pdev, drvdata); if (ret) goto err_unregister; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index d50ba6f07d5df3..a1e7377bd890b7 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -61,6 +61,25 @@ enum pincfg_type { #define PIN_CON_FUNC_INPUT 0x0 #define PIN_CON_FUNC_OUTPUT 0x1 +/* Values for the pin PUD register */ +#define EXYNOS_PIN_PUD_PULL_DISABLE 0x0 +#define EXYNOS_PIN_PID_PULL_DOWN 0x1 +#define EXYNOS_PIN_PID_PULL_UP 0x3 + +/* + * enum pud_index - Possible index values to access the pud_val array. + * @PUD_PULL_DISABLE: Index for the value of pud disable + * @PUD_PULL_DOWN: Index for the value of pull down enable + * @PUD_PULL_UP: Index for the value of pull up enable + * @PUD_MAX: Maximum value of the index + */ +enum pud_index { + PUD_PULL_DISABLE, + PUD_PULL_DOWN, + PUD_PULL_UP, + PUD_MAX, +}; + /** * enum eint_type - possible external interrupt types. * @EINT_TYPE_NONE: bank does not support external interrupts @@ -261,6 +280,7 @@ struct samsung_pin_ctrl { int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); + void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata); void (*suspend)(struct samsung_pinctrl_drv_data *); void (*resume)(struct samsung_pinctrl_drv_data *); }; @@ -307,6 +327,7 @@ struct samsung_pinctrl_drv_data { struct samsung_pin_bank *pin_banks; unsigned int nr_banks; unsigned int nr_pins; + unsigned int pud_val[PUD_MAX]; struct samsung_retention_ctrl *retention_ctrl; diff --git a/drivers/pinctrl/sophgo/Kconfig b/drivers/pinctrl/sophgo/Kconfig new file mode 100644 index 00000000000000..b14792ee46fc3e --- /dev/null +++ b/drivers/pinctrl/sophgo/Kconfig @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Sophgo SoC PINCTRL drivers +# + +config PINCTRL_SOPHGO_CV18XX + bool + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + +config PINCTRL_SOPHGO_CV1800B + tristate "Sophgo CV1800B SoC Pinctrl driver" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on OF + select PINCTRL_SOPHGO_CV18XX + help + Say Y to select the pinctrl driver for CV1800B SoC. + This pin controller allows selecting the mux function for + each pin. This driver can also be built as a module called + pinctrl-cv1800b. + +config PINCTRL_SOPHGO_CV1812H + tristate "Sophgo CV1812H SoC Pinctrl driver" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on OF + select PINCTRL_SOPHGO_CV18XX + help + Say Y to select the pinctrl driver for CV1812H SoC. + This pin controller allows selecting the mux function for + each pin. This driver can also be built as a module called + pinctrl-cv1812h. + +config PINCTRL_SOPHGO_SG2000 + tristate "Sophgo SG2000 SoC Pinctrl driver" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on OF + select PINCTRL_SOPHGO_CV18XX + help + Say Y to select the pinctrl driver for SG2000 SoC. + This pin controller allows selecting the mux function for + each pin. This driver can also be built as a module called + pinctrl-sg2000. + +config PINCTRL_SOPHGO_SG2002 + tristate "Sophgo SG2000 SoC Pinctrl driver" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on OF + select PINCTRL_SOPHGO_CV18XX + help + Say Y to select the pinctrl driver for SG2002 SoC. + This pin controller allows selecting the mux function for + each pin. This driver can also be built as a module called + pinctrl-sg2002. diff --git a/drivers/pinctrl/sophgo/Makefile b/drivers/pinctrl/sophgo/Makefile new file mode 100644 index 00000000000000..4113a5c9191b2f --- /dev/null +++ b/drivers/pinctrl/sophgo/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_PINCTRL_SOPHGO_CV18XX) += pinctrl-cv18xx.o +obj-$(CONFIG_PINCTRL_SOPHGO_CV1800B) += pinctrl-cv1800b.o +obj-$(CONFIG_PINCTRL_SOPHGO_CV1812H) += pinctrl-cv1812h.o +obj-$(CONFIG_PINCTRL_SOPHGO_SG2000) += pinctrl-sg2000.o +obj-$(CONFIG_PINCTRL_SOPHGO_SG2002) += pinctrl-sg2002.o diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1800b.c b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c new file mode 100644 index 00000000000000..3322906689e74c --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo CV1800B SoC pinctrl driver. + * + * Copyright (C) 2024 Inochi Amaoto + * + * This file is generated from vendor pinout definition. + */ + +#include +#include +#include + +#include +#include + +#include + +#include "pinctrl-cv18xx.h" + +enum CV1800B_POWER_DOMAIN { + VDD18A_AUD = 0, + VDD18A_USB_PLL_ETH_CSI = 1, + VDD33A_ETH_USB_SD1 = 2, + VDDIO_RTC = 3, + VDDIO_SD0_SPI = 4 +}; + +static const char *const cv1800b_power_domain_desc[] = { + [VDD18A_AUD] = "VDD18A_AUD", + [VDD18A_USB_PLL_ETH_CSI] = "VDD18A_USB_PLL_ETH_CSI", + [VDD33A_ETH_USB_SD1] = "VDD33A_ETH_USB_SD1", + [VDDIO_RTC] = "VDDIO_RTC", + [VDDIO_SD0_SPI] = "VDDIO_SD0_SPI", +}; + +static int cv1800b_get_pull_up(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 79000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 60000; + if (pstate == PIN_POWER_STATE_3V3) + return 60000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static int cv1800b_get_pull_down(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 87000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 61000; + if (pstate == PIN_POWER_STATE_3V3) + return 62000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static const u32 cv1800b_1v8_oc_map[] = { + 12800, + 25300, + 37400, + 49000 +}; + +static const u32 cv1800b_18od33_1v8_oc_map[] = { + 7800, + 11700, + 15500, + 19200, + 23000, + 26600, + 30200, + 33700 +}; + +static const u32 cv1800b_18od33_3v3_oc_map[] = { + 5500, + 8200, + 10800, + 13400, + 16100, + 18700, + 21200, + 23700 +}; + +static const u32 cv1800b_eth_oc_map[] = { + 15700, + 17800 +}; + +static int cv1800b_get_oc_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = cv1800b_1v8_oc_map; + return ARRAY_SIZE(cv1800b_1v8_oc_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = cv1800b_18od33_1v8_oc_map; + return ARRAY_SIZE(cv1800b_18od33_1v8_oc_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = cv1800b_18od33_3v3_oc_map; + return ARRAY_SIZE(cv1800b_18od33_3v3_oc_map); + } + } + + if (type == IO_TYPE_ETH) { + *map = cv1800b_eth_oc_map; + return ARRAY_SIZE(cv1800b_eth_oc_map); + } + + return -ENOTSUPP; +} + +static const u32 cv1800b_1v8_schmitt_map[] = { + 0, + 970000, + 1040000 +}; + +static const u32 cv1800b_18od33_1v8_schmitt_map[] = { + 0, + 1070000 +}; + +static const u32 cv1800b_18od33_3v3_schmitt_map[] = { + 0, + 1100000 +}; + +static int cv1800b_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = cv1800b_1v8_schmitt_map; + return ARRAY_SIZE(cv1800b_1v8_schmitt_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = cv1800b_18od33_1v8_schmitt_map; + return ARRAY_SIZE(cv1800b_18od33_1v8_schmitt_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = cv1800b_18od33_3v3_schmitt_map; + return ARRAY_SIZE(cv1800b_18od33_3v3_schmitt_map); + } + } + + return -ENOTSUPP; +} + +static const struct cv1800_vddio_cfg_ops cv1800b_vddio_cfg_ops = { + .get_pull_up = cv1800b_get_pull_up, + .get_pull_down = cv1800b_get_pull_down, + .get_oc_map = cv1800b_get_oc_map, + .get_schmitt_map = cv1800b_get_schmitt_map, +}; + +static const struct pinctrl_pin_desc cv1800b_pins[] = { + PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"), + PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"), + PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"), + PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"), + PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"), + PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"), + PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"), + PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"), + PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"), + PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"), + PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"), + PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"), + PINCTRL_PIN(PIN_SPINOR_HOLD_X, "SPINOR_HOLD_X"), + PINCTRL_PIN(PIN_SPINOR_SCK, "SPINOR_SCK"), + PINCTRL_PIN(PIN_SPINOR_MOSI, "SPINOR_MOSI"), + PINCTRL_PIN(PIN_SPINOR_WP_X, "SPINOR_WP_X"), + PINCTRL_PIN(PIN_SPINOR_MISO, "SPINOR_MISO"), + PINCTRL_PIN(PIN_SPINOR_CS_X, "SPINOR_CS_X"), + PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"), + PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"), + PINCTRL_PIN(PIN_AUX0, "AUX0"), + PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"), + PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"), + PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"), + PINCTRL_PIN(PIN_SD1_GPIO0, "SD1_GPIO0"), + PINCTRL_PIN(PIN_SD1_GPIO1, "SD1_GPIO1"), + PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"), + PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"), + PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"), + PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"), + PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"), + PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"), + PINCTRL_PIN(PIN_ADC1, "ADC1"), + PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"), + PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"), + PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"), + PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"), + PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"), + PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"), + PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"), + PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"), + PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"), + PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"), + PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"), + PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"), + PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"), + PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"), + PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"), + PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"), +}; + +static const struct cv1800_pin cv1800b_pin_data[ARRAY_SIZE(cv1800b_pins)] = { + CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_AUD, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x12c, 6), + CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x000, 7, + CV1800_PINCONF_AREA_SYS, 0xa00), + CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x004, 7, + CV1800_PINCONF_AREA_SYS, 0xa04), + CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x008, 7, + CV1800_PINCONF_AREA_SYS, 0xa08), + CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x00c, 7, + CV1800_PINCONF_AREA_SYS, 0xa0c), + CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x010, 7, + CV1800_PINCONF_AREA_SYS, 0xa10), + CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x014, 7, + CV1800_PINCONF_AREA_SYS, 0xa14), + CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x018, 3, + CV1800_PINCONF_AREA_SYS, 0x900), + CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x01c, 3, + CV1800_PINCONF_AREA_SYS, 0x904), + CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x020, 3, + CV1800_PINCONF_AREA_SYS, 0x908), + CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x024, 7, + CV1800_PINCONF_AREA_SYS, 0x90c), + CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x028, 7, + CV1800_PINCONF_AREA_SYS, 0x910), + CV1800_GENERAL_PIN(PIN_SPINOR_HOLD_X, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x02c, 3, + CV1800_PINCONF_AREA_SYS, 0x914), + CV1800_GENERAL_PIN(PIN_SPINOR_SCK, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x030, 3, + CV1800_PINCONF_AREA_SYS, 0x918), + CV1800_GENERAL_PIN(PIN_SPINOR_MOSI, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x034, 3, + CV1800_PINCONF_AREA_SYS, 0x91c), + CV1800_GENERAL_PIN(PIN_SPINOR_WP_X, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x038, 3, + CV1800_PINCONF_AREA_SYS, 0x920), + CV1800_GENERAL_PIN(PIN_SPINOR_MISO, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x03c, 3, + CV1800_PINCONF_AREA_SYS, 0x924), + CV1800_GENERAL_PIN(PIN_SPINOR_CS_X, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x040, 3, + CV1800_PINCONF_AREA_SYS, 0x928), + CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x04c, 7, + CV1800_PINCONF_AREA_SYS, 0x934), + CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x050, 7, + CV1800_PINCONF_AREA_SYS, 0x938), + CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_SPI, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x054, 7, + CV1800_PINCONF_AREA_SYS, 0x93c), + CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x05c, 0, + CV1800_PINCONF_AREA_RTC, 0x004), + CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x068, 3, + CV1800_PINCONF_AREA_RTC, 0x010), + CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x074, 0, + CV1800_PINCONF_AREA_RTC, 0x020), + CV1800_GENERAL_PIN(PIN_SD1_GPIO0, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x088, 7, + CV1800_PINCONF_AREA_RTC, 0x034), + CV1800_GENERAL_PIN(PIN_SD1_GPIO1, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x084, 7, + CV1800_PINCONF_AREA_RTC, 0x030), + CV1800_GENERAL_PIN(PIN_SD1_D3, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x08c, 7, + CV1800_PINCONF_AREA_RTC, 0x038), + CV1800_GENERAL_PIN(PIN_SD1_D2, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x090, 7, + CV1800_PINCONF_AREA_RTC, 0x03c), + CV1800_GENERAL_PIN(PIN_SD1_D1, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x094, 7, + CV1800_PINCONF_AREA_RTC, 0x040), + CV1800_GENERAL_PIN(PIN_SD1_D0, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x098, 7, + CV1800_PINCONF_AREA_RTC, 0x044), + CV1800_GENERAL_PIN(PIN_SD1_CMD, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x09c, 7, + CV1800_PINCONF_AREA_RTC, 0x048), + CV1800_GENERAL_PIN(PIN_SD1_CLK, VDD33A_ETH_USB_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0a0, 7, + CV1800_PINCONF_AREA_RTC, 0x04c), + CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a8, 6, + CV1800_PINCONF_AREA_SYS, 0x804), + CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ac, 6, + CV1800_PINCONF_AREA_SYS, 0x808), + CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x0c0, 7), + CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x0c4, 7), + CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x0c8, 7), + CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x0cc, 7), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0d4, 7, + CV1800_PINCONF_AREA_SYS, 0x0bc, 7, + CV1800_PINCONF_AREA_SYS, 0xc04), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0d8, 7, + CV1800_PINCONF_AREA_SYS, 0x0b8, 7, + CV1800_PINCONF_AREA_SYS, 0xc08), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0dc, 7, + CV1800_PINCONF_AREA_SYS, 0x0b0, 7, + CV1800_PINCONF_AREA_SYS, 0xc0c), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0e0, 7, + CV1800_PINCONF_AREA_SYS, 0x0b4, 7, + CV1800_PINCONF_AREA_SYS, 0xc10), + CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0e4, 7, + CV1800_PINCONF_AREA_SYS, 0xc14), + CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0e8, 7, + CV1800_PINCONF_AREA_SYS, 0xc18), + CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ec, 7, + CV1800_PINCONF_AREA_SYS, 0xc1c), + CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f0, 7, + CV1800_PINCONF_AREA_SYS, 0xc20), + CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f4, 7, + CV1800_PINCONF_AREA_SYS, 0xc24), + CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_USB_PLL_ETH_CSI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f8, 7, + CV1800_PINCONF_AREA_SYS, 0xc28), + CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_AUD, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x120, 5), +}; + +static const struct cv1800_pinctrl_data cv1800b_pindata = { + .pins = cv1800b_pins, + .pindata = cv1800b_pin_data, + .pdnames = cv1800b_power_domain_desc, + .vddio_ops = &cv1800b_vddio_cfg_ops, + .npins = ARRAY_SIZE(cv1800b_pins), + .npd = ARRAY_SIZE(cv1800b_power_domain_desc), +}; + +static const struct of_device_id cv1800b_pinctrl_ids[] = { + { .compatible = "sophgo,cv1800b-pinctrl", .data = &cv1800b_pindata }, + { } +}; +MODULE_DEVICE_TABLE(of, cv1800b_pinctrl_ids); + +static struct platform_driver cv1800b_pinctrl_driver = { + .probe = cv1800_pinctrl_probe, + .driver = { + .name = "cv1800b-pinctrl", + .suppress_bind_attrs = true, + .of_match_table = cv1800b_pinctrl_ids, + }, +}; +module_platform_driver(cv1800b_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for the CV1800B series SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1812h.c b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c new file mode 100644 index 00000000000000..5632290b46fa67 --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c @@ -0,0 +1,771 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo CV1812H SoC pinctrl driver. + * + * Copyright (C) 2024 Inochi Amaoto + * + * This file is generated from vendor pinout definition. + */ + +#include +#include +#include + +#include +#include + +#include + +#include "pinctrl-cv18xx.h" + +enum CV1812H_POWER_DOMAIN { + VDD18A_EPHY = 0, + VDD18A_MIPI = 1, + VDDIO18_1 = 2, + VDDIO_EMMC = 3, + VDDIO_RTC = 4, + VDDIO_SD0 = 5, + VDDIO_SD1 = 6, + VDDIO_VIVO = 7 +}; + +static const char *const cv1812h_power_domain_desc[] = { + [VDD18A_EPHY] = "VDD18A_EPHY", + [VDD18A_MIPI] = "VDD18A_MIPI", + [VDDIO18_1] = "VDDIO18_1", + [VDDIO_EMMC] = "VDDIO_EMMC", + [VDDIO_RTC] = "VDDIO_RTC", + [VDDIO_SD0] = "VDDIO_SD0", + [VDDIO_SD1] = "VDDIO_SD1", + [VDDIO_VIVO] = "VDDIO_VIVO", +}; + +static int cv1812h_get_pull_up(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 79000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 60000; + if (pstate == PIN_POWER_STATE_3V3) + return 60000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static int cv1812h_get_pull_down(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 87000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 61000; + if (pstate == PIN_POWER_STATE_3V3) + return 62000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static const u32 cv1812h_1v8_oc_map[] = { + 12800, + 25300, + 37400, + 49000 +}; + +static const u32 cv1812h_18od33_1v8_oc_map[] = { + 7800, + 11700, + 15500, + 19200, + 23000, + 26600, + 30200, + 33700 +}; + +static const u32 cv1812h_18od33_3v3_oc_map[] = { + 5500, + 8200, + 10800, + 13400, + 16100, + 18700, + 21200, + 23700 +}; + +static const u32 cv1812h_eth_oc_map[] = { + 15700, + 17800 +}; + +static int cv1812h_get_oc_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = cv1812h_1v8_oc_map; + return ARRAY_SIZE(cv1812h_1v8_oc_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = cv1812h_18od33_1v8_oc_map; + return ARRAY_SIZE(cv1812h_18od33_1v8_oc_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = cv1812h_18od33_3v3_oc_map; + return ARRAY_SIZE(cv1812h_18od33_3v3_oc_map); + } + } + + if (type == IO_TYPE_ETH) { + *map = cv1812h_eth_oc_map; + return ARRAY_SIZE(cv1812h_eth_oc_map); + } + + return -ENOTSUPP; +} + +static const u32 cv1812h_1v8_schmitt_map[] = { + 0, + 970000, + 1040000 +}; + +static const u32 cv1812h_18od33_1v8_schmitt_map[] = { + 0, + 1070000 +}; + +static const u32 cv1812h_18od33_3v3_schmitt_map[] = { + 0, + 1100000 +}; + +static int cv1812h_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = cv1812h_1v8_schmitt_map; + return ARRAY_SIZE(cv1812h_1v8_schmitt_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = cv1812h_18od33_1v8_schmitt_map; + return ARRAY_SIZE(cv1812h_18od33_1v8_schmitt_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = cv1812h_18od33_3v3_schmitt_map; + return ARRAY_SIZE(cv1812h_18od33_3v3_schmitt_map); + } + } + + return -ENOTSUPP; +} + +static const struct cv1800_vddio_cfg_ops cv1812h_vddio_cfg_ops = { + .get_pull_up = cv1812h_get_pull_up, + .get_pull_down = cv1812h_get_pull_down, + .get_oc_map = cv1812h_get_oc_map, + .get_schmitt_map = cv1812h_get_schmitt_map, +}; + +static const struct pinctrl_pin_desc cv1812h_pins[] = { + PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"), + PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"), + PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"), + PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"), + PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"), + PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"), + PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"), + PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"), + PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"), + PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"), + PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"), + PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"), + PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"), + PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"), + PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"), + PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"), + PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"), + PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"), + PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"), + PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"), + PINCTRL_PIN(PIN_USB_ID, "USB_ID"), + PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"), + PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"), + PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"), + PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"), + PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"), + PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"), + PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"), + PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"), + PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"), + PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"), + PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"), + PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"), + PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"), + PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"), + PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"), + PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"), + PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"), + PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"), + PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"), + PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"), + PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"), + PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"), + PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"), + PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"), + PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"), + PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"), + PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"), + PINCTRL_PIN(PIN_ADC1, "ADC1"), + PINCTRL_PIN(PIN_ADC2, "ADC2"), + PINCTRL_PIN(PIN_ADC3, "ADC3"), + PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"), + PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"), + PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"), + PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"), + PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"), + PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"), + PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"), + PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"), + PINCTRL_PIN(PIN_RSTN, "RSTN"), + PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"), + PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"), + PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"), + PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"), + PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"), + PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"), + PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"), + PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"), + PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"), + PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"), + PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"), + PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"), + PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"), + PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"), + PINCTRL_PIN(PIN_CLK32K, "CLK32K"), + PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"), + PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"), + PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"), + PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"), + PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"), + PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"), + PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"), + PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"), + PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"), + PINCTRL_PIN(PIN_CLK25M, "CLK25M"), + PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"), + PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"), + PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"), + PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"), + PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"), + PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"), + PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"), + PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"), + PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"), + PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"), + PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"), + PINCTRL_PIN(PIN_AUX0, "AUX0"), + PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"), + PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"), + PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"), + PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"), + PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"), + PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"), + PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"), + PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"), + PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"), + PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"), + PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"), + PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"), + PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"), +}; + +static const struct cv1800_pin cv1812h_pin_data[ARRAY_SIZE(cv1812h_pins)] = { + CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x194, 7, + CV1800_PINCONF_AREA_SYS, 0xc60), + CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x18c, 7, + CV1800_PINCONF_AREA_SYS, 0xc58), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x178, 7, + CV1800_PINCONF_AREA_SYS, 0x118, 7, + CV1800_PINCONF_AREA_SYS, 0xc44), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x170, 7, + CV1800_PINCONF_AREA_SYS, 0x11c, 7, + CV1800_PINCONF_AREA_SYS, 0xc3c), + CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x154, 7, + CV1800_PINCONF_AREA_SYS, 0xc20), + CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x150, 7, + CV1800_PINCONF_AREA_SYS, 0xc1c), + CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x134, 7, + CV1800_PINCONF_AREA_SYS, 0xc00), + CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x108, 5, + CV1800_PINCONF_AREA_SYS, 0x820), + CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a0, 7, + CV1800_PINCONF_AREA_SYS, 0xc6c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x19c, 7, + CV1800_PINCONF_AREA_SYS, 0xc68), + CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x198, 7, + CV1800_PINCONF_AREA_SYS, 0xc64), + CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x190, 7, + CV1800_PINCONF_AREA_SYS, 0xc5c), + CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x184, 7, + CV1800_PINCONF_AREA_SYS, 0xc50), + CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x17c, 7, + CV1800_PINCONF_AREA_SYS, 0xc48), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x16c, 7, + CV1800_PINCONF_AREA_SYS, 0x120, 7, + CV1800_PINCONF_AREA_SYS, 0xc38), + CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x164, 7, + CV1800_PINCONF_AREA_SYS, 0xc30), + CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x158, 7, + CV1800_PINCONF_AREA_SYS, 0xc24), + CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x148, 7, + CV1800_PINCONF_AREA_SYS, 0xc14), + CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x140, 7, + CV1800_PINCONF_AREA_SYS, 0xc0c), + CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x138, 7, + CV1800_PINCONF_AREA_SYS, 0xc04), + CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0fc, 3, + CV1800_PINCONF_AREA_SYS, 0x814), + CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x130, 7), + CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a8, 7, + CV1800_PINCONF_AREA_SYS, 0xc74), + CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a4, 7, + CV1800_PINCONF_AREA_SYS, 0xc70), + CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x004, 4, + CV1800_PINCONF_AREA_SYS, 0xb04), + CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x000, 3, + CV1800_PINCONF_AREA_SYS, 0xb00), + CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x188, 7, + CV1800_PINCONF_AREA_SYS, 0xc54), + CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x180, 7, + CV1800_PINCONF_AREA_SYS, 0xc4c), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x174, 7, + CV1800_PINCONF_AREA_SYS, 0x114, 7, + CV1800_PINCONF_AREA_SYS, 0xc40), + CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x168, 7, + CV1800_PINCONF_AREA_SYS, 0xc34), + CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x160, 7, + CV1800_PINCONF_AREA_SYS, 0xc2c), + CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x144, 7, + CV1800_PINCONF_AREA_SYS, 0xc10), + CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x13c, 7, + CV1800_PINCONF_AREA_SYS, 0xc08), + CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x100, 3, + CV1800_PINCONF_AREA_SYS, 0x818), + CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x12c, 7), + CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1cc, 5, + CV1800_PINCONF_AREA_SYS, 0xc8c), + CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b0, 7, + CV1800_PINCONF_AREA_SYS, 0xc7c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1ac, 7, + CV1800_PINCONF_AREA_SYS, 0xc78), + CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x00c, 4, + CV1800_PINCONF_AREA_SYS, 0xb0c), + CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x014, 3, + CV1800_PINCONF_AREA_SYS, 0xb14), + CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x14c, 7, + CV1800_PINCONF_AREA_SYS, 0xc18), + CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x128, 7), + CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x124, 7), + CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b8, 7, + CV1800_PINCONF_AREA_SYS, 0xc84), + CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b4, 7, + CV1800_PINCONF_AREA_SYS, 0xc80), + CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x010, 6, + CV1800_PINCONF_AREA_SYS, 0xb10), + CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x008, 6, + CV1800_PINCONF_AREA_SYS, 0xb08), + CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x15c, 7, + CV1800_PINCONF_AREA_SYS, 0xc28), + CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f8, 4, + CV1800_PINCONF_AREA_SYS, 0x810), + CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f4, 7, + CV1800_PINCONF_AREA_SYS, 0x80c), + CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f0, 7, + CV1800_PINCONF_AREA_SYS, 0x808), + CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c4, 5), + CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x018, 3, + CV1800_PINCONF_AREA_SYS, 0xb18), + CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d4, 7, + CV1800_PINCONF_AREA_RTC, 0x05c), + CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c8, 6), + CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d0, 7, + CV1800_PINCONF_AREA_RTC, 0x058), + CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e4, 7, + CV1800_PINCONF_AREA_RTC, 0x06c), + CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e0, 7, + CV1800_PINCONF_AREA_RTC, 0x068), + CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1bc, 5), + CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0e8, 0, + CV1800_PINCONF_AREA_SYS, 0x800), + CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ec, 3, + CV1800_PINCONF_AREA_SYS, 0x804), + CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d8, 7, + CV1800_PINCONF_AREA_RTC, 0x060), + CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0dc, 7, + CV1800_PINCONF_AREA_RTC, 0x064), + CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c0, 6), + CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b8, 7, + CV1800_PINCONF_AREA_RTC, 0x040), + CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0bc, 7, + CV1800_PINCONF_AREA_RTC, 0x044), + CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x034, 3, + CV1800_PINCONF_AREA_SYS, 0x900), + CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x028, 7, + CV1800_PINCONF_AREA_SYS, 0xa0c), + CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c8, 7, + CV1800_PINCONF_AREA_RTC, 0x050), + CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0cc, 7, + CV1800_PINCONF_AREA_RTC, 0x054), + CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c0, 7, + CV1800_PINCONF_AREA_RTC, 0x048), + CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x01c, 7, + CV1800_PINCONF_AREA_SYS, 0xa00), + CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x024, 7, + CV1800_PINCONF_AREA_SYS, 0xa08), + CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x020, 7, + CV1800_PINCONF_AREA_SYS, 0xa04), + CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b0, 7, + CV1800_PINCONF_AREA_RTC, 0x038), + CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c4, 7, + CV1800_PINCONF_AREA_RTC, 0x04c), + CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x030, 7, + CV1800_PINCONF_AREA_SYS, 0xa14), + CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x02c, 7, + CV1800_PINCONF_AREA_SYS, 0xa10), + CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x044, 7, + CV1800_PINCONF_AREA_SYS, 0x910), + CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x040, 7, + CV1800_PINCONF_AREA_SYS, 0x90c), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x06c, 6, + CV1800_PINCONF_AREA_SYS, 0x938), + CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x09c, 7, + CV1800_PINCONF_AREA_RTC, 0x024), + CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ac, 7, + CV1800_PINCONF_AREA_RTC, 0x034), + CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a4, 4, + CV1800_PINCONF_AREA_RTC, 0x02c), + CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b4, 7, + CV1800_PINCONF_AREA_RTC, 0x03c), + CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x038, 3, + CV1800_PINCONF_AREA_SYS, 0x904), + CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x03c, 3, + CV1800_PINCONF_AREA_SYS, 0x908), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x068, 7, + CV1800_PINCONF_AREA_SYS, 0x934), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x064, 7, + CV1800_PINCONF_AREA_SYS, 0x930), + CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x094, 7, + CV1800_PINCONF_AREA_RTC, 0x01c), + CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x090, 7, + CV1800_PINCONF_AREA_RTC, 0x018), + CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a8, 7, + CV1800_PINCONF_AREA_RTC, 0x030), + CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x058, 3, + CV1800_PINCONF_AREA_SYS, 0x924), + CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x054, 3, + CV1800_PINCONF_AREA_SYS, 0x920), + CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x04c, 3, + CV1800_PINCONF_AREA_SYS, 0x918), + CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x048, 4, + CV1800_PINCONF_AREA_SYS, 0x914), + CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x078, 7, + CV1800_PINCONF_AREA_SYS, 0x944), + CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x074, 7, + CV1800_PINCONF_AREA_SYS, 0x940), + CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x08c, 3, + CV1800_PINCONF_AREA_RTC, 0x010), + CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x07c, 0, + CV1800_PINCONF_AREA_RTC, 0x000), + CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x084, 3, + CV1800_PINCONF_AREA_RTC, 0x008), + CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x098, 7, + CV1800_PINCONF_AREA_RTC, 0x020), + CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x060, 3, + CV1800_PINCONF_AREA_SYS, 0x92c), + CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x05c, 3, + CV1800_PINCONF_AREA_SYS, 0x928), + CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x050, 3, + CV1800_PINCONF_AREA_SYS, 0x91c), + CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x070, 7, + CV1800_PINCONF_AREA_SYS, 0x93c), + CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1d0, 4, + CV1800_PINCONF_AREA_RTC, 0x0e0), + CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x080, 0, + CV1800_PINCONF_AREA_RTC, 0x004), + CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x088, 3, + CV1800_PINCONF_AREA_RTC, 0x00c), + CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a0, 0, + CV1800_PINCONF_AREA_RTC, 0x028), +}; + +static const struct cv1800_pinctrl_data cv1812h_pindata = { + .pins = cv1812h_pins, + .pindata = cv1812h_pin_data, + .pdnames = cv1812h_power_domain_desc, + .vddio_ops = &cv1812h_vddio_cfg_ops, + .npins = ARRAY_SIZE(cv1812h_pins), + .npd = ARRAY_SIZE(cv1812h_power_domain_desc), +}; + +static const struct of_device_id cv1812h_pinctrl_ids[] = { + { .compatible = "sophgo,cv1812h-pinctrl", .data = &cv1812h_pindata }, + { } +}; +MODULE_DEVICE_TABLE(of, cv1812h_pinctrl_ids); + +static struct platform_driver cv1812h_pinctrl_driver = { + .probe = cv1800_pinctrl_probe, + .driver = { + .name = "cv1812h-pinctrl", + .suppress_bind_attrs = true, + .of_match_table = cv1812h_pinctrl_ids, + }, +}; +module_platform_driver(cv1812h_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for the CV1812H series SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c new file mode 100644 index 00000000000000..d18fc5aa84f750 --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo CV18XX SoCs pinctrl driver. + * + * Copyright (C) 2024 Inochi Amaoto + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "../core.h" +#include "../pinctrl-utils.h" +#include "../pinconf.h" +#include "../pinmux.h" +#include "pinctrl-cv18xx.h" + +struct cv1800_pinctrl { + struct device *dev; + struct pinctrl_dev *pctl_dev; + const struct cv1800_pinctrl_data *data; + struct pinctrl_desc pdesc; + u32 *power_cfg; + + struct mutex mutex; + raw_spinlock_t lock; + + void __iomem *regs[2]; +}; + +struct cv1800_pin_mux_config { + struct cv1800_pin *pin; + u32 config; +}; + +static unsigned int cv1800_dt_get_pin(u32 value) +{ + return value & GENMASK(15, 0); +} + +static unsigned int cv1800_dt_get_pin_mux(u32 value) +{ + return (value >> 16) & GENMASK(7, 0); +} + +static unsigned int cv1800_dt_get_pin_mux2(u32 value) +{ + return (value >> 24) & GENMASK(7, 0); +} + +#define cv1800_pinctrl_get_component_addr(pctrl, _comp) \ + ((pctrl)->regs[(_comp)->area] + (_comp)->offset) + +static int cv1800_cmp_pin(const void *key, const void *pivot) +{ + const struct cv1800_pin *pin = pivot; + int pin_id = (long)key; + int pivid = pin->pin; + + return pin_id - pivid; +} + +static int cv1800_set_power_cfg(struct cv1800_pinctrl *pctrl, + u8 domain, u32 cfg) +{ + if (domain >= pctrl->data->npd) + return -ENOTSUPP; + + if (pctrl->power_cfg[domain] && pctrl->power_cfg[domain] != cfg) + return -EINVAL; + + pctrl->power_cfg[domain] = cfg; + + return 0; +} + +static int cv1800_get_power_cfg(struct cv1800_pinctrl *pctrl, + u8 domain) +{ + return pctrl->power_cfg[domain]; +} + +static struct cv1800_pin *cv1800_get_pin(struct cv1800_pinctrl *pctrl, + unsigned long pin) +{ + return bsearch((void *)pin, pctrl->data->pindata, pctrl->data->npins, + sizeof(struct cv1800_pin), cv1800_cmp_pin); +} + +#define PIN_BGA_ID_OFFSET 8 +#define PIN_BGA_ID_MASK 0xff + +static const char *const io_type_desc[] = { + "1V8", + "18OD33", + "AUDIO", + "ETH" +}; + +static const char *cv1800_get_power_cfg_desc(struct cv1800_pinctrl *pctrl, + u8 domain) +{ + return pctrl->data->pdnames[domain]; +} + +static void cv1800_pctrl_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *seq, unsigned int pin_id) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 value; + void __iomem *reg; + + if (pin->pin >> PIN_BGA_ID_OFFSET) + seq_printf(seq, "pos: %c%u ", + 'A' + (pin->pin >> PIN_BGA_ID_OFFSET) - 1, + pin->pin & PIN_BGA_ID_MASK); + else + seq_printf(seq, "pos: %u ", pin->pin); + + seq_printf(seq, "power-domain: %s ", + cv1800_get_power_cfg_desc(pctrl, pin->power_domain)); + seq_printf(seq, "type: %s ", io_type_desc[type]); + + reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux); + value = readl(reg); + seq_printf(seq, "mux: 0x%08x ", value); + + if (pin->flags & CV1800_PIN_HAVE_MUX2) { + reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2); + value = readl(reg); + seq_printf(seq, "mux2: 0x%08x ", value); + } + + if (type == IO_TYPE_1V8_ONLY || type == IO_TYPE_1V8_OR_3V3) { + reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf); + value = readl(reg); + seq_printf(seq, "conf: 0x%08x ", value); + } +} + +static int cv1800_verify_pinmux_config(const struct cv1800_pin_mux_config *config) +{ + unsigned int mux = cv1800_dt_get_pin_mux(config->config); + unsigned int mux2 = cv1800_dt_get_pin_mux2(config->config); + + if (mux > config->pin->mux.max) + return -EINVAL; + + if (config->pin->flags & CV1800_PIN_HAVE_MUX2) { + if (mux != config->pin->mux2.pfunc) + return -EINVAL; + + if (mux2 > config->pin->mux2.max) + return -EINVAL; + } else { + if (mux2 != PIN_MUX_INVALD) + return -ENOTSUPP; + } + + return 0; +} + +static int cv1800_verify_pin_group(const struct cv1800_pin_mux_config *mux, + unsigned long npins) +{ + enum cv1800_pin_io_type type; + u8 power_domain; + int i; + + if (npins == 1) + return 0; + + type = cv1800_pin_io_type(mux[0].pin); + power_domain = mux[0].pin->power_domain; + + for (i = 0; i < npins; i++) { + if (type != cv1800_pin_io_type(mux[i].pin) || + power_domain != mux[i].pin->power_domain) + return -ENOTSUPP; + } + + return 0; +} + +static int cv1800_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **maps, + unsigned int *num_maps) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = pctrl->dev; + struct device_node *child; + struct pinctrl_map *map; + const char **grpnames; + const char *grpname; + int ngroups = 0; + int nmaps = 0; + int ret; + + for_each_available_child_of_node(np, child) + ngroups += 1; + + grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL); + if (!grpnames) + return -ENOMEM; + + map = devm_kcalloc(dev, ngroups * 2, sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + ngroups = 0; + mutex_lock(&pctrl->mutex); + for_each_available_child_of_node(np, child) { + int npins = of_property_count_u32_elems(child, "pinmux"); + unsigned int *pins; + struct cv1800_pin_mux_config *pinmuxs; + u32 config, power; + int i; + + if (npins < 1) { + dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n", + np, child); + ret = -EINVAL; + goto dt_failed; + } + + grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", + np, child); + if (!grpname) { + ret = -ENOMEM; + goto dt_failed; + } + + grpnames[ngroups++] = grpname; + + pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) { + ret = -ENOMEM; + goto dt_failed; + } + + pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL); + if (!pinmuxs) { + ret = -ENOMEM; + goto dt_failed; + } + + for (i = 0; i < npins; i++) { + ret = of_property_read_u32_index(child, "pinmux", + i, &config); + if (ret) + goto dt_failed; + + pins[i] = cv1800_dt_get_pin(config); + pinmuxs[i].config = config; + pinmuxs[i].pin = cv1800_get_pin(pctrl, pins[i]); + + if (!pinmuxs[i].pin) { + dev_err(dev, "failed to get pin %d\n", pins[i]); + ret = -ENODEV; + goto dt_failed; + } + + ret = cv1800_verify_pinmux_config(&pinmuxs[i]); + if (ret) { + dev_err(dev, "group %s pin %d is invalid\n", + grpname, i); + goto dt_failed; + } + } + + ret = cv1800_verify_pin_group(pinmuxs, npins); + if (ret) { + dev_err(dev, "group %s is invalid\n", grpname); + goto dt_failed; + } + + ret = of_property_read_u32(child, "power-source", &power); + if (ret) + goto dt_failed; + + if (!(power == PIN_POWER_STATE_3V3 || power == PIN_POWER_STATE_1V8)) { + dev_err(dev, "group %s have unsupported power: %u\n", + grpname, power); + ret = -ENOTSUPP; + goto dt_failed; + } + + ret = cv1800_set_power_cfg(pctrl, pinmuxs[0].pin->power_domain, + power); + if (ret) + goto dt_failed; + + map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP; + map[nmaps].data.mux.function = np->name; + map[nmaps].data.mux.group = grpname; + nmaps += 1; + + ret = pinconf_generic_parse_dt_config(child, pctldev, + &map[nmaps].data.configs.configs, + &map[nmaps].data.configs.num_configs); + if (ret) { + dev_err(dev, "failed to parse pin config of group %s: %d\n", + grpname, ret); + goto dt_failed; + } + + ret = pinctrl_generic_add_group(pctldev, grpname, + pins, npins, pinmuxs); + if (ret < 0) { + dev_err(dev, "failed to add group %s: %d\n", grpname, ret); + goto dt_failed; + } + + /* don't create a map if there are no pinconf settings */ + if (map[nmaps].data.configs.num_configs == 0) + continue; + + map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP; + map[nmaps].data.configs.group_or_pin = grpname; + nmaps += 1; + } + + ret = pinmux_generic_add_function(pctldev, np->name, + grpnames, ngroups, NULL); + if (ret < 0) { + dev_err(dev, "error adding function %s: %d\n", np->name, ret); + goto function_failed; + } + + *maps = map; + *num_maps = nmaps; + mutex_unlock(&pctrl->mutex); + + return 0; + +dt_failed: + of_node_put(child); +function_failed: + pinctrl_utils_free_map(pctldev, map, nmaps); + mutex_unlock(&pctrl->mutex); + return ret; +} + +static const struct pinctrl_ops cv1800_pctrl_ops = { + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, + .pin_dbg_show = cv1800_pctrl_dbg_show, + .dt_node_to_map = cv1800_pctrl_dt_node_to_map, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int cv1800_pmx_set_mux(struct pinctrl_dev *pctldev, + unsigned int fsel, unsigned int gsel) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct group_desc *group; + const struct cv1800_pin_mux_config *configs; + unsigned int i; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + configs = group->data; + + for (i = 0; i < group->grp.npins; i++) { + const struct cv1800_pin *pin = configs[i].pin; + u32 value = configs[i].config; + void __iomem *reg_mux; + void __iomem *reg_mux2; + unsigned long flags; + u32 mux; + u32 mux2; + + reg_mux = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux); + reg_mux2 = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2); + mux = cv1800_dt_get_pin_mux(value); + mux2 = cv1800_dt_get_pin_mux2(value); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + writel_relaxed(mux, reg_mux); + if (mux2 != PIN_MUX_INVALD) + writel_relaxed(mux2, reg_mux2); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + return 0; +} + +static const struct pinmux_ops cv1800_pmx_ops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .set_mux = cv1800_pmx_set_mux, + .strict = true, +}; + +#define PIN_IO_PULLUP BIT(2) +#define PIN_IO_PULLDOWN BIT(3) +#define PIN_IO_DRIVE GENMASK(7, 5) +#define PIN_IO_SCHMITT GENMASK(9, 8) +#define PIN_IO_BUS_HOLD BIT(10) +#define PIN_IO_OUT_FAST_SLEW BIT(11) + +static u32 cv1800_pull_down_typical_resistor(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin) +{ + return pctrl->data->vddio_ops->get_pull_down(pin, pctrl->power_cfg); +} + +static u32 cv1800_pull_up_typical_resistor(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin) +{ + return pctrl->data->vddio_ops->get_pull_up(pin, pctrl->power_cfg); +} + +static int cv1800_pinctrl_oc2reg(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin, u32 target) +{ + const u32 *map; + int i, len; + + len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map); + if (len < 0) + return len; + + for (i = 0; i < len; i++) { + if (map[i] >= target) + return i; + } + + return -EINVAL; +} + +static int cv1800_pinctrl_reg2oc(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin, u32 reg) +{ + const u32 *map; + int len; + + len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map); + if (len < 0) + return len; + + if (reg >= len) + return -EINVAL; + + return map[reg]; +} + +static int cv1800_pinctrl_schmitt2reg(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin, u32 target) +{ + const u32 *map; + int i, len; + + len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg, + &map); + if (len < 0) + return len; + + for (i = 0; i < len; i++) { + if (map[i] == target) + return i; + } + + return -EINVAL; +} + +static int cv1800_pinctrl_reg2schmitt(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin, u32 reg) +{ + const u32 *map; + int len; + + len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg, + &map); + if (len < 0) + return len; + + if (reg >= len) + return -EINVAL; + + return map[reg]; +} + +static int cv1800_pconf_get(struct pinctrl_dev *pctldev, + unsigned int pin_id, unsigned long *config) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int param = pinconf_to_config_param(*config); + struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); + enum cv1800_pin_io_type type; + u32 value; + u32 arg; + bool enabled; + int ret; + + if (!pin) + return -EINVAL; + + type = cv1800_pin_io_type(pin); + if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO) + return -ENOTSUPP; + + value = readl(cv1800_pinctrl_get_component_addr(pctrl, &pin->conf)); + + switch (param) { + case PIN_CONFIG_BIAS_PULL_DOWN: + enabled = FIELD_GET(PIN_IO_PULLDOWN, value); + arg = cv1800_pull_down_typical_resistor(pctrl, pin); + break; + case PIN_CONFIG_BIAS_PULL_UP: + enabled = FIELD_GET(PIN_IO_PULLUP, value); + arg = cv1800_pull_up_typical_resistor(pctrl, pin); + break; + case PIN_CONFIG_DRIVE_STRENGTH_UA: + enabled = true; + arg = FIELD_GET(PIN_IO_DRIVE, value); + ret = cv1800_pinctrl_reg2oc(pctrl, pin, arg); + if (ret < 0) + return ret; + arg = ret; + break; + case PIN_CONFIG_INPUT_SCHMITT_UV: + arg = FIELD_GET(PIN_IO_SCHMITT, value); + ret = cv1800_pinctrl_reg2schmitt(pctrl, pin, arg); + if (ret < 0) + return ret; + arg = ret; + enabled = arg != 0; + break; + case PIN_CONFIG_POWER_SOURCE: + enabled = true; + arg = cv1800_get_power_cfg(pctrl, pin->power_domain); + break; + case PIN_CONFIG_SLEW_RATE: + enabled = true; + arg = FIELD_GET(PIN_IO_OUT_FAST_SLEW, value); + break; + case PIN_CONFIG_BIAS_BUS_HOLD: + arg = FIELD_GET(PIN_IO_BUS_HOLD, value); + enabled = arg != 0; + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + + return enabled ? 0 : -EINVAL; +} + +static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, + struct cv1800_pin *pin, + unsigned long *configs, + unsigned int num_configs, + u32 *value) +{ + int i; + u32 v = 0; + enum cv1800_pin_io_type type; + int ret; + + if (!pin) + return -EINVAL; + + type = cv1800_pin_io_type(pin); + if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO) + return -ENOTSUPP; + + for (i = 0; i < num_configs; i++) { + int param = pinconf_to_config_param(configs[i]); + u32 arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_PULL_DOWN: + v &= ~PIN_IO_PULLDOWN; + v |= FIELD_PREP(PIN_IO_PULLDOWN, arg); + break; + case PIN_CONFIG_BIAS_PULL_UP: + v &= ~PIN_IO_PULLUP; + v |= FIELD_PREP(PIN_IO_PULLUP, arg); + break; + case PIN_CONFIG_DRIVE_STRENGTH_UA: + ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg); + if (ret < 0) + return ret; + v &= ~PIN_IO_DRIVE; + v |= FIELD_PREP(PIN_IO_DRIVE, ret); + break; + case PIN_CONFIG_INPUT_SCHMITT_UV: + ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg); + if (ret < 0) + return ret; + v &= ~PIN_IO_SCHMITT; + v |= FIELD_PREP(PIN_IO_SCHMITT, ret); + break; + case PIN_CONFIG_POWER_SOURCE: + /* Ignore power source as it is always fixed */ + break; + case PIN_CONFIG_SLEW_RATE: + v &= ~PIN_IO_OUT_FAST_SLEW; + v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg); + break; + case PIN_CONFIG_BIAS_BUS_HOLD: + v &= ~PIN_IO_BUS_HOLD; + v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg); + break; + default: + return -ENOTSUPP; + } + } + + *value = v; + + return 0; +} + +static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl, + unsigned int pin_id, + u32 value) +{ + struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); + unsigned long flags; + void __iomem *addr; + + if (!pin) + return -EINVAL; + + addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + writel(value, addr); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int cv1800_pconf_set(struct pinctrl_dev *pctldev, + unsigned int pin_id, unsigned long *configs, + unsigned int num_configs) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); + u32 value; + + if (!pin) + return -ENODEV; + + if (cv1800_pinconf_compute_config(pctrl, pin, + configs, num_configs, &value)) + return -ENOTSUPP; + + return cv1800_pin_set_config(pctrl, pin_id, value); +} + +static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev, + unsigned int gsel, + unsigned long *configs, + unsigned int num_configs) +{ + struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + const struct group_desc *group; + const struct cv1800_pin_mux_config *pinmuxs; + u32 value; + int i; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + pinmuxs = group->data; + + if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin, + configs, num_configs, &value)) + return -ENOTSUPP; + + for (i = 0; i < group->grp.npins; i++) + cv1800_pin_set_config(pctrl, group->grp.pins[i], value); + + return 0; +} + +static const struct pinconf_ops cv1800_pconf_ops = { + .pin_config_get = cv1800_pconf_get, + .pin_config_set = cv1800_pconf_set, + .pin_config_group_set = cv1800_pconf_group_set, + .is_generic = true, +}; + +int cv1800_pinctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cv1800_pinctrl *pctrl; + const struct cv1800_pinctrl_data *pctrl_data; + int ret; + + pctrl_data = device_get_match_data(dev); + if (!pctrl_data) + return -ENODEV; + + if (pctrl_data->npins == 0 || pctrl_data->npd == 0) + return dev_err_probe(dev, -EINVAL, "invalid pin data\n"); + + pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + + pctrl->power_cfg = devm_kcalloc(dev, pctrl_data->npd, + sizeof(u32), GFP_KERNEL); + if (!pctrl->power_cfg) + return -ENOMEM; + + pctrl->regs[0] = devm_platform_ioremap_resource_byname(pdev, "sys"); + if (IS_ERR(pctrl->regs[0])) + return PTR_ERR(pctrl->regs[0]); + + pctrl->regs[1] = devm_platform_ioremap_resource_byname(pdev, "rtc"); + if (IS_ERR(pctrl->regs[1])) + return PTR_ERR(pctrl->regs[1]); + + pctrl->pdesc.name = dev_name(dev); + pctrl->pdesc.pins = pctrl_data->pins; + pctrl->pdesc.npins = pctrl_data->npins; + pctrl->pdesc.pctlops = &cv1800_pctrl_ops; + pctrl->pdesc.pmxops = &cv1800_pmx_ops; + pctrl->pdesc.confops = &cv1800_pconf_ops; + pctrl->pdesc.owner = THIS_MODULE; + + pctrl->data = pctrl_data; + pctrl->dev = dev; + raw_spin_lock_init(&pctrl->lock); + mutex_init(&pctrl->mutex); + + platform_set_drvdata(pdev, pctrl); + + ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc, + pctrl, &pctrl->pctl_dev); + if (ret) + return dev_err_probe(dev, ret, + "fail to register pinctrl driver\n"); + + return pinctrl_enable(pctrl->pctl_dev); +} +EXPORT_SYMBOL_GPL(cv1800_pinctrl_probe); diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.h b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h new file mode 100644 index 00000000000000..1a9998abb3b73b --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Inochi Amaoto + */ + +#ifndef _PINCTRL_SOPHGO_CV18XX_H +#define _PINCTRL_SOPHGO_CV18XX_H + +#include +#include +#include +#include +#include +#include +#include +#include + +enum cv1800_pin_io_type { + IO_TYPE_1V8_ONLY = 0, + IO_TYPE_1V8_OR_3V3 = 1, + IO_TYPE_AUDIO = 2, + IO_TYPE_ETH = 3 +}; + +#define CV1800_PINCONF_AREA_SYS 0 +#define CV1800_PINCONF_AREA_RTC 1 + +struct cv1800_pinmux { + u16 offset; + u8 area; + u8 max; +}; + +struct cv1800_pinmux2 { + u16 offset; + u8 area; + u8 max; + u8 pfunc; +}; + +struct cv1800_pinconf { + u16 offset; + u8 area; +}; + +#define CV1800_PIN_HAVE_MUX2 BIT(0) +#define CV1800_PIN_IO_TYPE GENMASK(2, 1) + +#define CV1800_PIN_FLAG_IO_TYPE(type) \ + FIELD_PREP_CONST(CV1800_PIN_IO_TYPE, type) +struct cv1800_pin { + u16 pin; + u16 flags; + u8 power_domain; + struct cv1800_pinmux mux; + struct cv1800_pinmux2 mux2; + struct cv1800_pinconf conf; +}; + +#define PIN_POWER_STATE_1V8 1800 +#define PIN_POWER_STATE_3V3 3300 + +/** + * struct cv1800_vddio_cfg_ops - pin vddio operations + * + * @get_pull_up: get resistor for pull up; + * @get_pull_down: get resistor for pull down. + * @get_oc_map: get mapping for typical low level output current value to + * register value map. + * @get_schmitt_map: get mapping for register value to typical schmitt + * threshold. + */ +struct cv1800_vddio_cfg_ops { + int (*get_pull_up)(struct cv1800_pin *pin, const u32 *psmap); + int (*get_pull_down)(struct cv1800_pin *pin, const u32 *psmap); + int (*get_oc_map)(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map); + int (*get_schmitt_map)(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map); +}; + +struct cv1800_pinctrl_data { + const struct pinctrl_pin_desc *pins; + const struct cv1800_pin *pindata; + const char * const *pdnames; + const struct cv1800_vddio_cfg_ops *vddio_ops; + u16 npins; + u16 npd; +}; + +static inline enum cv1800_pin_io_type cv1800_pin_io_type(struct cv1800_pin *pin) +{ + return FIELD_GET(CV1800_PIN_IO_TYPE, pin->flags); +}; + +int cv1800_pinctrl_probe(struct platform_device *pdev); + +#define CV1800_FUNC_PIN(_id, _power_domain, _type, \ + _mux_area, _mux_offset, _mux_func_max) \ + { \ + .pin = (_id), \ + .power_domain = (_power_domain), \ + .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \ + .mux = { \ + .area = (_mux_area), \ + .offset = (_mux_offset), \ + .max = (_mux_func_max), \ + }, \ + } + +#define CV1800_GENERAL_PIN(_id, _power_domain, _type, \ + _mux_area, _mux_offset, _mux_func_max, \ + _conf_area, _conf_offset) \ + { \ + .pin = (_id), \ + .power_domain = (_power_domain), \ + .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \ + .mux = { \ + .area = (_mux_area), \ + .offset = (_mux_offset), \ + .max = (_mux_func_max), \ + }, \ + .conf = { \ + .area = (_conf_area), \ + .offset = (_conf_offset), \ + }, \ + } + +#define CV1800_GENERATE_PIN_MUX2(_id, _power_domain, _type, \ + _mux_area, _mux_offset, _mux_func_max, \ + _mux2_area, _mux2_offset, \ + _mux2_func_max, \ + _conf_area, _conf_offset) \ + { \ + .pin = (_id), \ + .power_domain = (_power_domain), \ + .flags = CV1800_PIN_FLAG_IO_TYPE(_type) | \ + CV1800_PIN_HAVE_MUX2, \ + .mux = { \ + .area = (_mux_area), \ + .offset = (_mux_offset), \ + .max = (_mux_func_max), \ + }, \ + .mux2 = { \ + .area = (_mux2_area), \ + .offset = (_mux2_offset), \ + .max = (_mux2_func_max), \ + }, \ + .conf = { \ + .area = (_conf_area), \ + .offset = (_conf_offset), \ + }, \ + } + +#endif diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2000.c b/drivers/pinctrl/sophgo/pinctrl-sg2000.c new file mode 100644 index 00000000000000..63c05b4dd68f5e --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-sg2000.c @@ -0,0 +1,771 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo SG2000 SoC pinctrl driver. + * + * Copyright (C) 2024 Inochi Amaoto + * + * This file is generated from vendor pinout definition. + */ + +#include +#include +#include + +#include +#include + +#include + +#include "pinctrl-cv18xx.h" + +enum SG2000_POWER_DOMAIN { + VDD18A_EPHY = 0, + VDD18A_MIPI = 1, + VDDIO18_1 = 2, + VDDIO_EMMC = 3, + VDDIO_RTC = 4, + VDDIO_SD0 = 5, + VDDIO_SD1 = 6, + VDDIO_VIVO = 7 +}; + +static const char *const sg2000_power_domain_desc[] = { + [VDD18A_EPHY] = "VDD18A_EPHY", + [VDD18A_MIPI] = "VDD18A_MIPI", + [VDDIO18_1] = "VDDIO18_1", + [VDDIO_EMMC] = "VDDIO_EMMC", + [VDDIO_RTC] = "VDDIO_RTC", + [VDDIO_SD0] = "VDDIO_SD0", + [VDDIO_SD1] = "VDDIO_SD1", + [VDDIO_VIVO] = "VDDIO_VIVO", +}; + +static int sg2000_get_pull_up(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 79000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 60000; + if (pstate == PIN_POWER_STATE_3V3) + return 60000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static int sg2000_get_pull_down(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 87000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 61000; + if (pstate == PIN_POWER_STATE_3V3) + return 62000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static const u32 sg2000_1v8_oc_map[] = { + 12800, + 25300, + 37400, + 49000 +}; + +static const u32 sg2000_18od33_1v8_oc_map[] = { + 7800, + 11700, + 15500, + 19200, + 23000, + 26600, + 30200, + 33700 +}; + +static const u32 sg2000_18od33_3v3_oc_map[] = { + 5500, + 8200, + 10800, + 13400, + 16100, + 18700, + 21200, + 23700 +}; + +static const u32 sg2000_eth_oc_map[] = { + 15700, + 17800 +}; + +static int sg2000_get_oc_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = sg2000_1v8_oc_map; + return ARRAY_SIZE(sg2000_1v8_oc_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = sg2000_18od33_1v8_oc_map; + return ARRAY_SIZE(sg2000_18od33_1v8_oc_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = sg2000_18od33_3v3_oc_map; + return ARRAY_SIZE(sg2000_18od33_3v3_oc_map); + } + } + + if (type == IO_TYPE_ETH) { + *map = sg2000_eth_oc_map; + return ARRAY_SIZE(sg2000_eth_oc_map); + } + + return -ENOTSUPP; +} + +static const u32 sg2000_1v8_schmitt_map[] = { + 0, + 970000, + 1040000 +}; + +static const u32 sg2000_18od33_1v8_schmitt_map[] = { + 0, + 1070000 +}; + +static const u32 sg2000_18od33_3v3_schmitt_map[] = { + 0, + 1100000 +}; + +static int sg2000_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = sg2000_1v8_schmitt_map; + return ARRAY_SIZE(sg2000_1v8_schmitt_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = sg2000_18od33_1v8_schmitt_map; + return ARRAY_SIZE(sg2000_18od33_1v8_schmitt_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = sg2000_18od33_3v3_schmitt_map; + return ARRAY_SIZE(sg2000_18od33_3v3_schmitt_map); + } + } + + return -ENOTSUPP; +} + +static const struct cv1800_vddio_cfg_ops sg2000_vddio_cfg_ops = { + .get_pull_up = sg2000_get_pull_up, + .get_pull_down = sg2000_get_pull_down, + .get_oc_map = sg2000_get_oc_map, + .get_schmitt_map = sg2000_get_schmitt_map, +}; + +static const struct pinctrl_pin_desc sg2000_pins[] = { + PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"), + PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"), + PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"), + PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"), + PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"), + PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"), + PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"), + PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"), + PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"), + PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"), + PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"), + PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"), + PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"), + PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"), + PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"), + PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"), + PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"), + PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"), + PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"), + PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"), + PINCTRL_PIN(PIN_USB_ID, "USB_ID"), + PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"), + PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"), + PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"), + PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"), + PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"), + PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"), + PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"), + PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"), + PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"), + PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"), + PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"), + PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"), + PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"), + PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"), + PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"), + PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"), + PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"), + PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"), + PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"), + PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"), + PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"), + PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"), + PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"), + PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"), + PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"), + PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"), + PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"), + PINCTRL_PIN(PIN_ADC1, "ADC1"), + PINCTRL_PIN(PIN_ADC2, "ADC2"), + PINCTRL_PIN(PIN_ADC3, "ADC3"), + PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"), + PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"), + PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"), + PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"), + PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"), + PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"), + PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"), + PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"), + PINCTRL_PIN(PIN_RSTN, "RSTN"), + PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"), + PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"), + PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"), + PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"), + PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"), + PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"), + PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"), + PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"), + PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"), + PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"), + PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"), + PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"), + PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"), + PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"), + PINCTRL_PIN(PIN_CLK32K, "CLK32K"), + PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"), + PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"), + PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"), + PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"), + PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"), + PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"), + PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"), + PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"), + PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"), + PINCTRL_PIN(PIN_CLK25M, "CLK25M"), + PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"), + PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"), + PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"), + PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"), + PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"), + PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"), + PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"), + PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"), + PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"), + PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"), + PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"), + PINCTRL_PIN(PIN_AUX0, "AUX0"), + PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"), + PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"), + PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"), + PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"), + PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"), + PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"), + PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"), + PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"), + PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"), + PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"), + PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"), + PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"), + PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"), +}; + +static const struct cv1800_pin sg2000_pin_data[ARRAY_SIZE(sg2000_pins)] = { + CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x194, 7, + CV1800_PINCONF_AREA_SYS, 0xc60), + CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x18c, 7, + CV1800_PINCONF_AREA_SYS, 0xc58), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x178, 7, + CV1800_PINCONF_AREA_SYS, 0x118, 7, + CV1800_PINCONF_AREA_SYS, 0xc44), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x170, 7, + CV1800_PINCONF_AREA_SYS, 0x11c, 7, + CV1800_PINCONF_AREA_SYS, 0xc3c), + CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x154, 7, + CV1800_PINCONF_AREA_SYS, 0xc20), + CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x150, 7, + CV1800_PINCONF_AREA_SYS, 0xc1c), + CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x134, 7, + CV1800_PINCONF_AREA_SYS, 0xc00), + CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x108, 5, + CV1800_PINCONF_AREA_SYS, 0x820), + CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a0, 7, + CV1800_PINCONF_AREA_SYS, 0xc6c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x19c, 7, + CV1800_PINCONF_AREA_SYS, 0xc68), + CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x198, 7, + CV1800_PINCONF_AREA_SYS, 0xc64), + CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x190, 7, + CV1800_PINCONF_AREA_SYS, 0xc5c), + CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x184, 7, + CV1800_PINCONF_AREA_SYS, 0xc50), + CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x17c, 7, + CV1800_PINCONF_AREA_SYS, 0xc48), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x16c, 7, + CV1800_PINCONF_AREA_SYS, 0x120, 7, + CV1800_PINCONF_AREA_SYS, 0xc38), + CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x164, 7, + CV1800_PINCONF_AREA_SYS, 0xc30), + CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x158, 7, + CV1800_PINCONF_AREA_SYS, 0xc24), + CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x148, 7, + CV1800_PINCONF_AREA_SYS, 0xc14), + CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x140, 7, + CV1800_PINCONF_AREA_SYS, 0xc0c), + CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x138, 7, + CV1800_PINCONF_AREA_SYS, 0xc04), + CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0fc, 3, + CV1800_PINCONF_AREA_SYS, 0x814), + CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x130, 7), + CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a8, 7, + CV1800_PINCONF_AREA_SYS, 0xc74), + CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a4, 7, + CV1800_PINCONF_AREA_SYS, 0xc70), + CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x004, 4, + CV1800_PINCONF_AREA_SYS, 0xb04), + CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x000, 3, + CV1800_PINCONF_AREA_SYS, 0xb00), + CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x188, 7, + CV1800_PINCONF_AREA_SYS, 0xc54), + CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x180, 7, + CV1800_PINCONF_AREA_SYS, 0xc4c), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x174, 7, + CV1800_PINCONF_AREA_SYS, 0x114, 7, + CV1800_PINCONF_AREA_SYS, 0xc40), + CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x168, 7, + CV1800_PINCONF_AREA_SYS, 0xc34), + CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x160, 7, + CV1800_PINCONF_AREA_SYS, 0xc2c), + CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x144, 7, + CV1800_PINCONF_AREA_SYS, 0xc10), + CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x13c, 7, + CV1800_PINCONF_AREA_SYS, 0xc08), + CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x100, 3, + CV1800_PINCONF_AREA_SYS, 0x818), + CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x12c, 7), + CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1cc, 5, + CV1800_PINCONF_AREA_SYS, 0xc8c), + CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b0, 7, + CV1800_PINCONF_AREA_SYS, 0xc7c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1ac, 7, + CV1800_PINCONF_AREA_SYS, 0xc78), + CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x00c, 4, + CV1800_PINCONF_AREA_SYS, 0xb0c), + CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x014, 3, + CV1800_PINCONF_AREA_SYS, 0xb14), + CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x14c, 7, + CV1800_PINCONF_AREA_SYS, 0xc18), + CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x128, 7), + CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x124, 7), + CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b8, 7, + CV1800_PINCONF_AREA_SYS, 0xc84), + CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b4, 7, + CV1800_PINCONF_AREA_SYS, 0xc80), + CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x010, 6, + CV1800_PINCONF_AREA_SYS, 0xb10), + CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x008, 6, + CV1800_PINCONF_AREA_SYS, 0xb08), + CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x15c, 7, + CV1800_PINCONF_AREA_SYS, 0xc28), + CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f8, 4, + CV1800_PINCONF_AREA_SYS, 0x810), + CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f4, 7, + CV1800_PINCONF_AREA_SYS, 0x80c), + CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f0, 7, + CV1800_PINCONF_AREA_SYS, 0x808), + CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c4, 5), + CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x018, 3, + CV1800_PINCONF_AREA_SYS, 0xb18), + CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d4, 7, + CV1800_PINCONF_AREA_RTC, 0x05c), + CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c8, 6), + CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d0, 7, + CV1800_PINCONF_AREA_RTC, 0x058), + CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e4, 7, + CV1800_PINCONF_AREA_RTC, 0x06c), + CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e0, 7, + CV1800_PINCONF_AREA_RTC, 0x068), + CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1bc, 5), + CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0e8, 0, + CV1800_PINCONF_AREA_SYS, 0x800), + CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ec, 3, + CV1800_PINCONF_AREA_SYS, 0x804), + CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d8, 7, + CV1800_PINCONF_AREA_RTC, 0x060), + CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0dc, 7, + CV1800_PINCONF_AREA_RTC, 0x064), + CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c0, 6), + CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b8, 7, + CV1800_PINCONF_AREA_RTC, 0x040), + CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0bc, 7, + CV1800_PINCONF_AREA_RTC, 0x044), + CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x034, 3, + CV1800_PINCONF_AREA_SYS, 0x900), + CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x028, 7, + CV1800_PINCONF_AREA_SYS, 0xa0c), + CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c8, 7, + CV1800_PINCONF_AREA_RTC, 0x050), + CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0cc, 7, + CV1800_PINCONF_AREA_RTC, 0x054), + CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c0, 7, + CV1800_PINCONF_AREA_RTC, 0x048), + CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x01c, 7, + CV1800_PINCONF_AREA_SYS, 0xa00), + CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x024, 7, + CV1800_PINCONF_AREA_SYS, 0xa08), + CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x020, 7, + CV1800_PINCONF_AREA_SYS, 0xa04), + CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b0, 7, + CV1800_PINCONF_AREA_RTC, 0x038), + CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0c4, 7, + CV1800_PINCONF_AREA_RTC, 0x04c), + CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x030, 7, + CV1800_PINCONF_AREA_SYS, 0xa14), + CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x02c, 7, + CV1800_PINCONF_AREA_SYS, 0xa10), + CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x044, 7, + CV1800_PINCONF_AREA_SYS, 0x910), + CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x040, 7, + CV1800_PINCONF_AREA_SYS, 0x90c), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x06c, 6, + CV1800_PINCONF_AREA_SYS, 0x938), + CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x09c, 7, + CV1800_PINCONF_AREA_RTC, 0x024), + CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ac, 7, + CV1800_PINCONF_AREA_RTC, 0x034), + CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a4, 4, + CV1800_PINCONF_AREA_RTC, 0x02c), + CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0b4, 7, + CV1800_PINCONF_AREA_RTC, 0x03c), + CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x038, 3, + CV1800_PINCONF_AREA_SYS, 0x904), + CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x03c, 3, + CV1800_PINCONF_AREA_SYS, 0x908), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x068, 7, + CV1800_PINCONF_AREA_SYS, 0x934), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x064, 7, + CV1800_PINCONF_AREA_SYS, 0x930), + CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x094, 7, + CV1800_PINCONF_AREA_RTC, 0x01c), + CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x090, 7, + CV1800_PINCONF_AREA_RTC, 0x018), + CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a8, 7, + CV1800_PINCONF_AREA_RTC, 0x030), + CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x058, 3, + CV1800_PINCONF_AREA_SYS, 0x924), + CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x054, 3, + CV1800_PINCONF_AREA_SYS, 0x920), + CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x04c, 3, + CV1800_PINCONF_AREA_SYS, 0x918), + CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x048, 4, + CV1800_PINCONF_AREA_SYS, 0x914), + CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x078, 7, + CV1800_PINCONF_AREA_SYS, 0x944), + CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x074, 7, + CV1800_PINCONF_AREA_SYS, 0x940), + CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x08c, 3, + CV1800_PINCONF_AREA_RTC, 0x010), + CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x07c, 0, + CV1800_PINCONF_AREA_RTC, 0x000), + CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x084, 3, + CV1800_PINCONF_AREA_RTC, 0x008), + CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x098, 7, + CV1800_PINCONF_AREA_RTC, 0x020), + CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x060, 3, + CV1800_PINCONF_AREA_SYS, 0x92c), + CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x05c, 3, + CV1800_PINCONF_AREA_SYS, 0x928), + CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x050, 3, + CV1800_PINCONF_AREA_SYS, 0x91c), + CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x070, 7, + CV1800_PINCONF_AREA_SYS, 0x93c), + CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1d0, 4, + CV1800_PINCONF_AREA_RTC, 0x0e0), + CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x080, 0, + CV1800_PINCONF_AREA_RTC, 0x004), + CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x088, 3, + CV1800_PINCONF_AREA_RTC, 0x00c), + CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a0, 0, + CV1800_PINCONF_AREA_RTC, 0x028), +}; + +static const struct cv1800_pinctrl_data sg2000_pindata = { + .pins = sg2000_pins, + .pindata = sg2000_pin_data, + .pdnames = sg2000_power_domain_desc, + .vddio_ops = &sg2000_vddio_cfg_ops, + .npins = ARRAY_SIZE(sg2000_pins), + .npd = ARRAY_SIZE(sg2000_power_domain_desc), +}; + +static const struct of_device_id sg2000_pinctrl_ids[] = { + { .compatible = "sophgo,sg2000-pinctrl", .data = &sg2000_pindata }, + { } +}; +MODULE_DEVICE_TABLE(of, sg2000_pinctrl_ids); + +static struct platform_driver sg2000_pinctrl_driver = { + .probe = cv1800_pinctrl_probe, + .driver = { + .name = "sg2000-pinctrl", + .suppress_bind_attrs = true, + .of_match_table = sg2000_pinctrl_ids, + }, +}; +module_platform_driver(sg2000_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for the SG2000 series SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2002.c b/drivers/pinctrl/sophgo/pinctrl-sg2002.c new file mode 100644 index 00000000000000..5c49208dcb59fc --- /dev/null +++ b/drivers/pinctrl/sophgo/pinctrl-sg2002.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo SG2002 SoC pinctrl driver. + * + * Copyright (C) 2024 Inochi Amaoto + * + * This file is generated from vendor pinout definition. + */ + +#include +#include +#include + +#include +#include + +#include + +#include "pinctrl-cv18xx.h" + +enum SG2002_POWER_DOMAIN { + VDD18A_MIPI = 0, + VDD18A_USB_PLL_ETH = 1, + VDDIO_RTC = 2, + VDDIO_SD0_EMMC = 3, + VDDIO_SD1 = 4 +}; + +static const char *const sg2002_power_domain_desc[] = { + [VDD18A_MIPI] = "VDD18A_MIPI", + [VDD18A_USB_PLL_ETH] = "VDD18A_USB_PLL_ETH", + [VDDIO_RTC] = "VDDIO_RTC", + [VDDIO_SD0_EMMC] = "VDDIO_SD0_EMMC", + [VDDIO_SD1] = "VDDIO_SD1", +}; + +static int sg2002_get_pull_up(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 79000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 60000; + if (pstate == PIN_POWER_STATE_3V3) + return 60000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static int sg2002_get_pull_down(struct cv1800_pin *pin, const u32 *psmap) +{ + u32 pstate = psmap[pin->power_domain]; + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + + if (type == IO_TYPE_1V8_ONLY) + return 87000; + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) + return 61000; + if (pstate == PIN_POWER_STATE_3V3) + return 62000; + + return -EINVAL; + } + + return -ENOTSUPP; +} + +static const u32 sg2002_1v8_oc_map[] = { + 12800, + 25300, + 37400, + 49000 +}; + +static const u32 sg2002_18od33_1v8_oc_map[] = { + 7800, + 11700, + 15500, + 19200, + 23000, + 26600, + 30200, + 33700 +}; + +static const u32 sg2002_18od33_3v3_oc_map[] = { + 5500, + 8200, + 10800, + 13400, + 16100, + 18700, + 21200, + 23700 +}; + +static const u32 sg2002_eth_oc_map[] = { + 15700, + 17800 +}; + +static int sg2002_get_oc_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = sg2002_1v8_oc_map; + return ARRAY_SIZE(sg2002_1v8_oc_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = sg2002_18od33_1v8_oc_map; + return ARRAY_SIZE(sg2002_18od33_1v8_oc_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = sg2002_18od33_3v3_oc_map; + return ARRAY_SIZE(sg2002_18od33_3v3_oc_map); + } + } + + if (type == IO_TYPE_ETH) { + *map = sg2002_eth_oc_map; + return ARRAY_SIZE(sg2002_eth_oc_map); + } + + return -ENOTSUPP; +} + +static const u32 sg2002_1v8_schmitt_map[] = { + 0, + 970000, + 1040000 +}; + +static const u32 sg2002_18od33_1v8_schmitt_map[] = { + 0, + 1070000 +}; + +static const u32 sg2002_18od33_3v3_schmitt_map[] = { + 0, + 1100000 +}; + +static int sg2002_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap, + const u32 **map) +{ + enum cv1800_pin_io_type type = cv1800_pin_io_type(pin); + u32 pstate = psmap[pin->power_domain]; + + if (type == IO_TYPE_1V8_ONLY) { + *map = sg2002_1v8_schmitt_map; + return ARRAY_SIZE(sg2002_1v8_schmitt_map); + } + + if (type == IO_TYPE_1V8_OR_3V3) { + if (pstate == PIN_POWER_STATE_1V8) { + *map = sg2002_18od33_1v8_schmitt_map; + return ARRAY_SIZE(sg2002_18od33_1v8_schmitt_map); + } else if (pstate == PIN_POWER_STATE_3V3) { + *map = sg2002_18od33_3v3_schmitt_map; + return ARRAY_SIZE(sg2002_18od33_3v3_schmitt_map); + } + } + + return -ENOTSUPP; +} + +static const struct cv1800_vddio_cfg_ops sg2002_vddio_cfg_ops = { + .get_pull_up = sg2002_get_pull_up, + .get_pull_down = sg2002_get_pull_down, + .get_oc_map = sg2002_get_oc_map, + .get_schmitt_map = sg2002_get_schmitt_map, +}; + +static const struct pinctrl_pin_desc sg2002_pins[] = { + PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"), + PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"), + PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"), + PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"), + PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"), + PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"), + PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"), + PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"), + PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"), + PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"), + PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"), + PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"), + PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"), + PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"), + PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"), + PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"), + PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"), + PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"), + PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"), + PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"), + PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"), + PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"), + PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"), + PINCTRL_PIN(PIN_AUX0, "AUX0"), + PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"), + PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"), + PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"), + PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"), + PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"), + PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"), + PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"), + PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"), + PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"), + PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"), + PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"), + PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"), + PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"), + PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"), + PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"), + PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"), + PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"), + PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"), + PINCTRL_PIN(PIN_ADC1, "ADC1"), + PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"), + PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"), + PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"), + PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"), + PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"), + PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"), + PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"), + PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"), + PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"), + PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"), + PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"), + PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"), + PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"), + PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"), + PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"), + PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"), + PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"), + PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"), + PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"), + PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"), + PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"), + PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"), +}; + +static const struct cv1800_pin sg2002_pin_data[ARRAY_SIZE(sg2002_pins)] = { + CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1bc, 5), + CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI, + IO_TYPE_AUDIO, + CV1800_PINCONF_AREA_SYS, 0x1c8, 6), + CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x01c, 7, + CV1800_PINCONF_AREA_SYS, 0xa00), + CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x020, 7, + CV1800_PINCONF_AREA_SYS, 0xa04), + CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x024, 7, + CV1800_PINCONF_AREA_SYS, 0xa08), + CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x028, 7, + CV1800_PINCONF_AREA_SYS, 0xa0c), + CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x02c, 7, + CV1800_PINCONF_AREA_SYS, 0xa10), + CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x030, 7, + CV1800_PINCONF_AREA_SYS, 0xa14), + CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x034, 3, + CV1800_PINCONF_AREA_SYS, 0x900), + CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x038, 3, + CV1800_PINCONF_AREA_SYS, 0x904), + CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x03c, 3, + CV1800_PINCONF_AREA_SYS, 0x908), + CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x040, 7, + CV1800_PINCONF_AREA_SYS, 0x90c), + CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x044, 7, + CV1800_PINCONF_AREA_SYS, 0x910), + CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x04c, 3, + CV1800_PINCONF_AREA_SYS, 0x918), + CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x050, 3, + CV1800_PINCONF_AREA_SYS, 0x91c), + CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x054, 3, + CV1800_PINCONF_AREA_SYS, 0x920), + CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x058, 3, + CV1800_PINCONF_AREA_SYS, 0x924), + CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x05c, 3, + CV1800_PINCONF_AREA_SYS, 0x928), + CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x060, 3, + CV1800_PINCONF_AREA_SYS, 0x92c), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x064, 7, + CV1800_PINCONF_AREA_SYS, 0x930), + CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x068, 7, + CV1800_PINCONF_AREA_SYS, 0x934), + CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x070, 7, + CV1800_PINCONF_AREA_SYS, 0x93c), + CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x074, 7, + CV1800_PINCONF_AREA_SYS, 0x940), + CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_EMMC, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x078, 7, + CV1800_PINCONF_AREA_SYS, 0x944), + CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1d0, 4, + CV1800_PINCONF_AREA_RTC, 0x0e0), + CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x07c, 0, + CV1800_PINCONF_AREA_RTC, 0x000), + CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x080, 0, + CV1800_PINCONF_AREA_RTC, 0x004), + CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x084, 3, + CV1800_PINCONF_AREA_RTC, 0x008), + CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x088, 3, + CV1800_PINCONF_AREA_RTC, 0x00c), + CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x090, 7, + CV1800_PINCONF_AREA_RTC, 0x018), + CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x098, 7, + CV1800_PINCONF_AREA_RTC, 0x020), + CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a0, 0, + CV1800_PINCONF_AREA_RTC, 0x028), + CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a4, 4, + CV1800_PINCONF_AREA_RTC, 0x02c), + CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0a8, 7, + CV1800_PINCONF_AREA_RTC, 0x030), + CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ac, 7, + CV1800_PINCONF_AREA_RTC, 0x034), + CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d0, 7, + CV1800_PINCONF_AREA_RTC, 0x058), + CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d4, 7, + CV1800_PINCONF_AREA_RTC, 0x05c), + CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0d8, 7, + CV1800_PINCONF_AREA_RTC, 0x060), + CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0dc, 7, + CV1800_PINCONF_AREA_RTC, 0x064), + CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e0, 7, + CV1800_PINCONF_AREA_RTC, 0x068), + CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1, + IO_TYPE_1V8_OR_3V3, + CV1800_PINCONF_AREA_SYS, 0x0e4, 7, + CV1800_PINCONF_AREA_RTC, 0x06c), + CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDD18A_USB_PLL_ETH, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0ec, 3, + CV1800_PINCONF_AREA_SYS, 0x804), + CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x0f8, 4, + CV1800_PINCONF_AREA_SYS, 0x810), + CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x108, 5, + CV1800_PINCONF_AREA_SYS, 0x820), + CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x124, 7), + CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x128, 7), + CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x12c, 7), + CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH, + IO_TYPE_ETH, + CV1800_PINCONF_AREA_SYS, 0x130, 7), + CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDD18A_USB_PLL_ETH, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1cc, 5, + CV1800_PINCONF_AREA_SYS, 0xc8c), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x16c, 7, + CV1800_PINCONF_AREA_SYS, 0x120, 7, + CV1800_PINCONF_AREA_SYS, 0xc38), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x170, 7, + CV1800_PINCONF_AREA_SYS, 0x11c, 7, + CV1800_PINCONF_AREA_SYS, 0xc3c), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x174, 7, + CV1800_PINCONF_AREA_SYS, 0x114, 7, + CV1800_PINCONF_AREA_SYS, 0xc40), + CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x178, 7, + CV1800_PINCONF_AREA_SYS, 0x118, 7, + CV1800_PINCONF_AREA_SYS, 0xc44), + CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x17c, 7, + CV1800_PINCONF_AREA_SYS, 0xc48), + CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x180, 7, + CV1800_PINCONF_AREA_SYS, 0xc4c), + CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x184, 7, + CV1800_PINCONF_AREA_SYS, 0xc50), + CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x188, 7, + CV1800_PINCONF_AREA_SYS, 0xc54), + CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x18c, 7, + CV1800_PINCONF_AREA_SYS, 0xc58), + CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x190, 7, + CV1800_PINCONF_AREA_SYS, 0xc5c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a4, 7, + CV1800_PINCONF_AREA_SYS, 0xc70), + CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1a8, 7, + CV1800_PINCONF_AREA_SYS, 0xc74), + CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1ac, 7, + CV1800_PINCONF_AREA_SYS, 0xc78), + CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b0, 7, + CV1800_PINCONF_AREA_SYS, 0xc7c), + CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b4, 7, + CV1800_PINCONF_AREA_SYS, 0xc80), + CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI, + IO_TYPE_1V8_ONLY, + CV1800_PINCONF_AREA_SYS, 0x1b8, 7, + CV1800_PINCONF_AREA_SYS, 0xc84), +}; + +static const struct cv1800_pinctrl_data sg2002_pindata = { + .pins = sg2002_pins, + .pindata = sg2002_pin_data, + .pdnames = sg2002_power_domain_desc, + .vddio_ops = &sg2002_vddio_cfg_ops, + .npins = ARRAY_SIZE(sg2002_pins), + .npd = ARRAY_SIZE(sg2002_power_domain_desc), +}; + +static const struct of_device_id sg2002_pinctrl_ids[] = { + { .compatible = "sophgo,sg2002-pinctrl", .data = &sg2002_pindata }, + { } +}; +MODULE_DEVICE_TABLE(of, sg2002_pinctrl_ids); + +static struct platform_driver sg2002_pinctrl_driver = { + .probe = cv1800_pinctrl_probe, + .driver = { + .name = "sg2002-pinctrl", + .suppress_bind_attrs = true, + .of_match_table = sg2002_pinctrl_ids, + }, +}; +module_platform_driver(sg2002_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for the SG2002 series SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 73bcf806af0ec2..bde67ee31417f0 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1603,30 +1603,26 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, } ret = of_clk_get_parent_count(node); - clk = devm_clk_get(&pdev->dev, ret == 1 ? NULL : "apb"); + clk = devm_clk_get_enabled(&pdev->dev, ret == 1 ? NULL : "apb"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto gpiochip_error; } - ret = clk_prepare_enable(clk); - if (ret) - goto gpiochip_error; - pctl->irq = devm_kcalloc(&pdev->dev, pctl->desc->irq_banks, sizeof(*pctl->irq), GFP_KERNEL); if (!pctl->irq) { ret = -ENOMEM; - goto clk_error; + goto gpiochip_error; } for (i = 0; i < pctl->desc->irq_banks; i++) { pctl->irq[i] = platform_get_irq(pdev, i); if (pctl->irq[i] < 0) { ret = pctl->irq[i]; - goto clk_error; + goto gpiochip_error; } } @@ -1637,7 +1633,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, if (!pctl->domain) { dev_err(&pdev->dev, "Couldn't register IRQ domain\n"); ret = -ENOMEM; - goto clk_error; + goto gpiochip_error; } for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { @@ -1669,8 +1665,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, return 0; -clk_error: - clk_disable_unprepare(clk); gpiochip_error: gpiochip_remove(pctl->chip); return ret; diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index f5e5a23d222600..019b302db2b001 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -82,7 +82,7 @@ struct ti_iodelay_reg_data { u32 reg_start_offset; u32 reg_nr_per_pin; - struct regmap_config *regmap_config; + const struct regmap_config *regmap_config; }; /** @@ -273,6 +273,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, return r; } +/** + * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device + * @data: IODelay device + * + * Deinitialize the IODelay device (basically just lock the region back up. + */ +static void ti_iodelay_pinconf_deinit_dev(void *data) +{ + struct ti_iodelay_device *iod = data; + const struct ti_iodelay_reg_data *reg = iod->reg_data; + + /* lock the iodelay region back again */ + regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, + reg->global_lock_mask, reg->global_lock_val); +} + /** * ti_iodelay_pinconf_init_dev() - Initialize IODelay device * @iod: iodelay device @@ -295,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) if (r) return r; + r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev, + iod); + if (r) + return r; + /* Read up Recalibration sequence done by bootloader */ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val); if (r) @@ -353,21 +374,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) return 0; } -/** - * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device - * @iod: IODelay device - * - * Deinitialize the IODelay device (basically just lock the region back up. - */ -static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod) -{ - const struct ti_iodelay_reg_data *reg = iod->reg_data; - - /* lock the iodelay region back again */ - regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, - reg->global_lock_mask, reg->global_lock_val); -} - /** * ti_iodelay_get_pingroup() - Find the group mapped by a group selector * @iod: iodelay device @@ -770,14 +776,14 @@ static int ti_iodelay_alloc_pins(struct device *dev, return 0; } -static struct regmap_config dra7_iodelay_regmap_config = { +static const struct regmap_config dra7_iodelay_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0xd1c, }; -static struct ti_iodelay_reg_data dra7_iodelay_data = { +static const struct ti_iodelay_reg_data dra7_iodelay_data = { .signature_mask = 0x0003f000, .signature_value = 0x29, .lock_mask = 0x00000400, @@ -877,27 +883,11 @@ static int ti_iodelay_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, iod); - return pinctrl_enable(iod->pctl); } -/** - * ti_iodelay_remove() - standard remove - * @pdev: platform device - */ -static void ti_iodelay_remove(struct platform_device *pdev) -{ - struct ti_iodelay_device *iod = platform_get_drvdata(pdev); - - ti_iodelay_pinconf_deinit_dev(iod); - - /* Expect other allocations to be freed by devm */ -} - static struct platform_driver ti_iodelay_driver = { .probe = ti_iodelay_probe, - .remove_new = ti_iodelay_remove, .driver = { .name = DRIVER_NAME, .of_match_table = ti_iodelay_of_match, diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c index dbb1cce139654a..2df42406430db7 100644 --- a/drivers/platform/arm64/acer-aspire1-ec.c +++ b/drivers/platform/arm64/acer-aspire1-ec.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2024, Nikita Travkin */ -#include +#include #include #include #include diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index a2cdbfbaeae6b0..3ab668764383f5 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -749,10 +749,9 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, if (!src->num_i2c_peripherals) return 0; - i2c_peripherals = kmemdup(src->i2c_peripherals, - src->num_i2c_peripherals * - sizeof(*src->i2c_peripherals), - GFP_KERNEL); + i2c_peripherals = kmemdup_array(src->i2c_peripherals, + src->num_i2c_peripherals, + sizeof(*i2c_peripherals), GFP_KERNEL); if (!i2c_peripherals) return -ENOMEM; diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 4525ad1b59f448..839154c46e46cd 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -302,7 +302,6 @@ static const struct file_operations cros_ec_console_log_fops = { .owner = THIS_MODULE, .open = cros_ec_console_log_open, .read = cros_ec_console_log_read, - .llseek = no_llseek, .poll = cros_ec_console_log_poll, .release = cros_ec_console_log_release, }; diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index f0470248b10987..c784119ab5dc0c 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -631,12 +631,12 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); -static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initconst = { +static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = { .quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY, .quirk_mmio_memory_base = 0xE00, }; -static const struct lpc_driver_data framework_laptop_11_lpc_driver_data __initconst = { +static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initconst = { .quirks = CROS_EC_LPC_QUIRK_ACPI_ID|CROS_EC_LPC_QUIRK_AML_MUTEX, .quirk_acpi_id = "PNP0C09", .quirk_aml_mutex_name = "ECMT", @@ -696,21 +696,39 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { }, /* A small number of non-Chromebook/box machines also use the ChromeOS EC */ { - /* the Framework Laptop 13 (AMD Ryzen) and 16 (AMD Ryzen) */ + /* Framework Laptop (11th Gen Intel Core) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Framework"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMD Ryzen"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop"), + }, + .driver_data = (void *)&framework_laptop_mec_lpc_driver_data, + }, + { + /* Framework Laptop (12th Gen Intel Core) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Framework"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "12th Gen Intel Core"), + }, + .driver_data = (void *)&framework_laptop_mec_lpc_driver_data, + }, + { + /* Framework Laptop (13th Gen Intel Core) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Framework"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "13th Gen Intel Core"), }, - .driver_data = (void *)&framework_laptop_amd_lpc_driver_data, + .driver_data = (void *)&framework_laptop_mec_lpc_driver_data, }, { - /* the Framework Laptop (Intel 11th, 12th, 13th Generation) */ + /* + * All remaining Framework Laptop models (13 AMD Ryzen, 16 AMD + * Ryzen, Intel Core Ultra) + */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Framework"), - DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"), }, - .driver_data = (void *)&framework_laptop_11_lpc_driver_data, + .driver_data = (void *)&framework_laptop_npcx_lpc_driver_data, }, { /* sentinel */ } }; diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 73f75958e15c10..5c9a53dffcf95a 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include "cros_ec_trace.h" diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c index 7ca9895a0065ee..3f281996a6863c 100644 --- a/drivers/platform/chrome/cros_ec_proto_test.c +++ b/drivers/platform/chrome/cros_ec_proto_test.c @@ -5,7 +5,7 @@ #include -#include +#include #include #include diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 4d305876ec08f6..c7781aea0b88b2 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -1285,6 +1285,15 @@ static int cros_typec_probe(struct platform_device *pdev) return ret; } +static void cros_typec_remove(struct platform_device *pdev) +{ + struct cros_typec_data *typec = platform_get_drvdata(pdev); + + cros_usbpd_unregister_notify(&typec->nb); + cancel_work_sync(&typec->port_work); + cros_unregister_ports(typec); +} + static int __maybe_unused cros_typec_suspend(struct device *dev) { struct cros_typec_data *typec = dev_get_drvdata(dev); @@ -1316,6 +1325,7 @@ static struct platform_driver cros_typec_driver = { .pm = &cros_typec_pm_ops, }, .probe = cros_typec_probe, + .remove_new = cros_typec_remove, }; module_platform_driver(cros_typec_driver); diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c index 983f2fa44ba574..99486086af6a16 100644 --- a/drivers/platform/chrome/wilco_ec/debugfs.c +++ b/drivers/platform/chrome/wilco_ec/debugfs.c @@ -156,7 +156,6 @@ static const struct file_operations fops_raw = { .owner = THIS_MODULE, .read = raw_read, .write = raw_write, - .llseek = no_llseek, }; #define CMD_KB_CHROME 0x88 diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c index bd1fb53ba02890..196e46a1d489dc 100644 --- a/drivers/platform/chrome/wilco_ec/event.c +++ b/drivers/platform/chrome/wilco_ec/event.c @@ -403,7 +403,6 @@ static const struct file_operations event_fops = { .poll = event_poll, .read = event_read, .release = event_release, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c index c2bf4c95c5d2c9..9951c8db04da06 100644 --- a/drivers/platform/chrome/wilco_ec/properties.c +++ b/drivers/platform/chrome/wilco_ec/properties.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include /* Operation code; what the EC should do with the property */ enum ec_property_op { diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c index 21d4cbbb009a55..a87877e4300a5b 100644 --- a/drivers/platform/chrome/wilco_ec/telemetry.c +++ b/drivers/platform/chrome/wilco_ec/telemetry.c @@ -330,7 +330,6 @@ static const struct file_operations telem_fops = { .write = telem_write, .read = telem_read, .release = telem_release, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c index 91da56a704c7bc..88e208d458820a 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c +++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include "turris-omnia-mcu.h" diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c index ad953fb3c37afd..9a1d9292dc9ad3 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-trng.c +++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c @@ -70,8 +70,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu) irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)]; irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); - if (!irq) - return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n"); + if (irq < 0) + return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n"); /* * If someone else cleared the TRNG interrupt but did not read the diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h index fed0d357fea39c..57ef5d3500436a 100644 --- a/drivers/platform/cznic/turris-omnia-mcu.h +++ b/drivers/platform/cznic/turris-omnia-mcu.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include struct i2c_client; struct rtc_device; diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 4ed9c7fd2b62af..9d18dfca6a673b 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -1774,6 +1774,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_ /* "event_list" sysfs to list events supported by the block */ attr = &pmc->block[blk_num].attr_event_list; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0444; attr->dev_attr.show = mlxbf_pmc_event_list_show; attr->nr = blk_num; @@ -1787,6 +1788,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_ if (strstr(pmc->block_name[blk_num], "l3cache") || ((pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE))) { attr = &pmc->block[blk_num].attr_enable; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0644; attr->dev_attr.show = mlxbf_pmc_enable_show; attr->dev_attr.store = mlxbf_pmc_enable_store; @@ -1814,6 +1816,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_ /* "eventX" and "counterX" sysfs to program and read counter values */ for (j = 0; j < pmc->block[blk_num].counters; ++j) { attr = &pmc->block[blk_num].attr_counter[j]; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0644; attr->dev_attr.show = mlxbf_pmc_counter_show; attr->dev_attr.store = mlxbf_pmc_counter_store; @@ -1826,6 +1829,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_ attr = NULL; attr = &pmc->block[blk_num].attr_event[j]; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0644; attr->dev_attr.show = mlxbf_pmc_event_show; attr->dev_attr.store = mlxbf_pmc_event_store; @@ -1861,6 +1865,7 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, unsigned int blk_num) while (count > 0) { --count; attr = &pmc->block[blk_num].attr_event[count]; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0644; attr->dev_attr.show = mlxbf_pmc_counter_show; attr->dev_attr.store = mlxbf_pmc_counter_store; diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 921520475ff68f..48e9861bb571f1 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c @@ -332,9 +332,6 @@ static struct dentry *olpc_ec_setup_debugfs(void) struct dentry *dbgfs_dir; dbgfs_dir = debugfs_create_dir("olpc-ec", NULL); - if (IS_ERR_OR_NULL(dbgfs_dir)) - return NULL; - debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops); return dbgfs_dir; diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 62ccbcb15c7446..fa7b3bda688a63 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -536,7 +536,7 @@ static int olpc_xo175_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *resp, dev_err(dev, "EC cmd error: timeout in STATE %d\n", priv->cmd_state); gpiod_set_value_cansleep(priv->gpio_cmd, 0); - spi_slave_abort(priv->spi); + spi_target_abort(priv->spi); olpc_xo175_ec_read_packet(priv); return -ETIMEDOUT; } @@ -653,7 +653,7 @@ static void olpc_xo175_ec_remove(struct spi_device *spi) if (pm_power_off == olpc_xo175_ec_power_off) pm_power_off = NULL; - spi_slave_abort(spi); + spi_target_abort(spi); platform_device_unregister(olpc_ec); olpc_ec = NULL; diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c index af8d573aae93a7..d68d231e716ee4 100644 --- a/drivers/platform/surface/aggregator/bus.c +++ b/drivers/platform/surface/aggregator/bus.c @@ -6,6 +6,7 @@ */ #include +#include #include #include @@ -441,6 +442,7 @@ static int ssam_add_client_device(struct device *parent, struct ssam_controller sdev->dev.parent = parent; sdev->dev.fwnode = fwnode_handle_get(node); + sdev->dev.of_node = to_of_node(node); status = ssam_device_add(sdev); if (status) diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c index 7e89f547999b2a..a265e667538c1c 100644 --- a/drivers/platform/surface/aggregator/controller.c +++ b/drivers/platform/surface/aggregator/controller.c @@ -1104,13 +1104,6 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle, u64 funcs; int status; - /* Set defaults. */ - caps->ssh_power_profile = U32_MAX; - caps->screen_on_sleep_idle_timeout = U32_MAX; - caps->screen_off_sleep_idle_timeout = U32_MAX; - caps->d3_closes_handle = false; - caps->ssh_buffer_size = U32_MAX; - /* Pre-load supported DSM functions. */ status = ssam_dsm_get_functions(handle, &funcs); if (status) @@ -1149,6 +1142,52 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle, return 0; } +/** + * ssam_controller_caps_load_from_of() - Load controller capabilities from OF/DT. + * @dev: A pointer to the controller device + * @caps: Where to store the capabilities in. + * + * Return: Returns zero on success, a negative error code on failure. + */ +static int ssam_controller_caps_load_from_of(struct device *dev, struct ssam_controller_caps *caps) +{ + /* + * Every device starting with Surface Pro X through Laptop 7 uses these + * identical values, which makes them good defaults. + */ + caps->d3_closes_handle = true; + caps->screen_on_sleep_idle_timeout = 5000; + caps->screen_off_sleep_idle_timeout = 30; + caps->ssh_buffer_size = 48; + /* TODO: figure out power profile */ + + return 0; +} + +/** + * ssam_controller_caps_load() - Load controller capabilities + * @dev: A pointer to the controller device + * @caps: Where to store the capabilities in. + * + * Return: Returns zero on success, a negative error code on failure. + */ +static int ssam_controller_caps_load(struct device *dev, struct ssam_controller_caps *caps) +{ + acpi_handle handle = ACPI_HANDLE(dev); + + /* Set defaults. */ + caps->ssh_power_profile = U32_MAX; + caps->screen_on_sleep_idle_timeout = U32_MAX; + caps->screen_off_sleep_idle_timeout = U32_MAX; + caps->d3_closes_handle = false; + caps->ssh_buffer_size = U32_MAX; + + if (handle) + return ssam_controller_caps_load_from_acpi(handle, caps); + else + return ssam_controller_caps_load_from_of(dev, caps); +} + /** * ssam_controller_init() - Initialize SSAM controller. * @ctrl: The controller to initialize. @@ -1165,13 +1204,12 @@ int ssam_controller_caps_load_from_acpi(acpi_handle handle, int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *serdev) { - acpi_handle handle = ACPI_HANDLE(&serdev->dev); int status; init_rwsem(&ctrl->lock); kref_init(&ctrl->kref); - status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps); + status = ssam_controller_caps_load(&serdev->dev, &ctrl->caps); if (status) return status; @@ -2716,11 +2754,12 @@ int ssam_irq_setup(struct ssam_controller *ctrl) const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN; gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - - irq = gpiod_to_irq(gpiod); - gpiod_put(gpiod); + if (IS_ERR(gpiod)) { + irq = fwnode_irq_get(dev_fwnode(dev), 0); + } else { + irq = gpiod_to_irq(gpiod); + gpiod_put(gpiod); + } if (irq < 0) return irq; diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c index 797d0645bd77fa..c58e1fdd1a5f75 100644 --- a/drivers/platform/surface/aggregator/core.c +++ b/drivers/platform/surface/aggregator/core.c @@ -17,9 +17,12 @@ #include #include #include +#include +#include #include #include #include +#include #include #include @@ -299,7 +302,7 @@ static const struct attribute_group ssam_sam_group = { }; -/* -- ACPI based device setup. ---------------------------------------------- */ +/* -- Serial device setup. -------------------------------------------------- */ static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc, void *ctx) @@ -352,13 +355,28 @@ static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc, return AE_CTRL_TERMINATE; } -static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle, - struct serdev_device *serdev) +static int ssam_serdev_setup_via_acpi(struct serdev_device *serdev, acpi_handle handle) { - return acpi_walk_resources(handle, METHOD_NAME__CRS, - ssam_serdev_setup_via_acpi_crs, serdev); + acpi_status status; + + status = acpi_walk_resources(handle, METHOD_NAME__CRS, + ssam_serdev_setup_via_acpi_crs, serdev); + + return status ? -ENXIO : 0; } +static int ssam_serdev_setup(struct acpi_device *ssh, struct serdev_device *serdev) +{ + if (ssh) + return ssam_serdev_setup_via_acpi(serdev, ssh->handle); + + /* TODO: these values may differ per board/implementation */ + serdev_device_set_baudrate(serdev, 4 * HZ_PER_MHZ); + serdev_device_set_flow_control(serdev, true); + serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); + + return 0; +} /* -- Power management. ----------------------------------------------------- */ @@ -621,16 +639,17 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev) struct device *dev = &serdev->dev; struct acpi_device *ssh = ACPI_COMPANION(dev); struct ssam_controller *ctrl; - acpi_status astatus; int status; - status = gpiod_count(dev, NULL); - if (status < 0) - return dev_err_probe(dev, status, "no GPIO found\n"); + if (ssh) { + status = gpiod_count(dev, NULL); + if (status < 0) + return dev_err_probe(dev, status, "no GPIO found\n"); - status = devm_acpi_dev_add_driver_gpios(dev, ssam_acpi_gpios); - if (status) - return status; + status = devm_acpi_dev_add_driver_gpios(dev, ssam_acpi_gpios); + if (status) + return status; + } /* Allocate controller. */ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); @@ -655,9 +674,9 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev) goto err_devopen; } - astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev); - if (ACPI_FAILURE(astatus)) { - status = dev_err_probe(dev, -ENXIO, "failed to setup serdev\n"); + status = ssam_serdev_setup(ssh, serdev); + if (status) { + status = dev_err_probe(dev, status, "failed to setup serdev\n"); goto err_devinit; } @@ -717,7 +736,23 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev) * For now let's thus default power/wakeup to false. */ device_set_wakeup_capable(dev, true); - acpi_dev_clear_dependencies(ssh); + + /* + * When using DT, we have to register the platform hub driver manually, + * as it can't be matched based on top-level board compatible (like it + * does the ACPI case). + */ + if (!ssh) { + struct platform_device *ph_pdev = + platform_device_register_simple("surface_aggregator_platform_hub", + 0, NULL, 0); + if (IS_ERR(ph_pdev)) + return dev_err_probe(dev, PTR_ERR(ph_pdev), + "Failed to register the platform hub driver\n"); + } + + if (ssh) + acpi_dev_clear_dependencies(ssh); return 0; @@ -782,18 +817,27 @@ static void ssam_serial_hub_remove(struct serdev_device *serdev) device_set_wakeup_capable(&serdev->dev, false); } -static const struct acpi_device_id ssam_serial_hub_match[] = { +static const struct acpi_device_id ssam_serial_hub_acpi_match[] = { { "MSHW0084", 0 }, { }, }; -MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match); +MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_acpi_match); + +#ifdef CONFIG_OF +static const struct of_device_id ssam_serial_hub_of_match[] = { + { .compatible = "microsoft,surface-sam", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ssam_serial_hub_of_match); +#endif static struct serdev_device_driver ssam_serial_hub = { .probe = ssam_serial_hub_probe, .remove = ssam_serial_hub_remove, .driver = { .name = "surface_serial_hub", - .acpi_match_table = ssam_serial_hub_match, + .acpi_match_table = ACPI_PTR(ssam_serial_hub_acpi_match), + .of_match_table = of_match_ptr(ssam_serial_hub_of_match), .pm = &ssam_serial_hub_pm_ops, .shutdown = ssam_serial_hub_shutdown, .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h index 438873e060986d..80aa568a0759e8 100644 --- a/drivers/platform/surface/aggregator/ssh_msgb.h +++ b/drivers/platform/surface/aggregator/ssh_msgb.h @@ -8,7 +8,7 @@ #ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H #define _SURFACE_AGGREGATOR_SSH_MSGB_H -#include +#include #include #include diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c index d726b1a8631999..6081b0146d5f21 100644 --- a/drivers/platform/surface/aggregator/ssh_packet_layer.c +++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c @@ -5,7 +5,7 @@ * Copyright (C) 2019-2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c index a6f66869436527..6cfda85d3b33b1 100644 --- a/drivers/platform/surface/aggregator/ssh_parser.c +++ b/drivers/platform/surface/aggregator/ssh_parser.c @@ -5,7 +5,7 @@ * Copyright (C) 2019-2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c index 90634dcacabf28..879ca9ee7ff683 100644 --- a/drivers/platform/surface/aggregator/ssh_request_layer.c +++ b/drivers/platform/surface/aggregator/ssh_request_layer.c @@ -5,7 +5,7 @@ * Copyright (C) 2019-2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h index 55cc61bba1da6e..caf7d3cb5d8b79 100644 --- a/drivers/platform/surface/aggregator/trace.h +++ b/drivers/platform/surface/aggregator/trace.h @@ -13,7 +13,7 @@ #include -#include +#include #include TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ); diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c index 4c0f92562a794f..1ee5239269ae03 100644 --- a/drivers/platform/surface/surface3_power.c +++ b/drivers/platform/surface/surface3_power.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #define SURFACE_3_POLL_INTERVAL (2 * HZ) #define SURFACE_3_STRLEN 10 diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c index 20f3870915d2b7..14a9d8a267cbbe 100644 --- a/drivers/platform/surface/surface_acpi_notify.c +++ b/drivers/platform/surface/surface_acpi_notify.c @@ -11,7 +11,7 @@ * Copyright (C) 2019-2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c index 07e065b9159f70..165b1416230d71 100644 --- a/drivers/platform/surface/surface_aggregator_cdev.c +++ b/drivers/platform/surface/surface_aggregator_cdev.c @@ -670,7 +670,6 @@ static const struct file_operations ssam_controller_fops = { .fasync = ssam_cdev_fasync, .unlocked_ioctl = ssam_cdev_device_ioctl, .compat_ioctl = ssam_cdev_device_ioctl, - .llseek = no_llseek, }; diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c index a23dff35f8ca23..25c8aa2131d638 100644 --- a/drivers/platform/surface/surface_aggregator_registry.c +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -291,6 +292,18 @@ static const struct software_node *ssam_node_group_sl6[] = { NULL, }; +/* Devices for Surface Laptop 7. */ +static const struct software_node *ssam_node_group_sl7[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_perf_profile_with_fan, + &ssam_node_fan_speed, + &ssam_node_hid_sam_keyboard, + /* TODO: evaluate thermal sensors devices when we get a driver for that */ + NULL, +}; + /* Devices for Surface Laptop Studio 1. */ static const struct software_node *ssam_node_group_sls1[] = { &ssam_node_root, @@ -380,7 +393,7 @@ static const struct software_node *ssam_node_group_sp9[] = { /* -- SSAM platform/meta-hub driver. ---------------------------------------- */ -static const struct acpi_device_id ssam_platform_hub_match[] = { +static const struct acpi_device_id ssam_platform_hub_acpi_match[] = { /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */ { "MSHW0081", (unsigned long)ssam_node_group_gen5 }, @@ -446,18 +459,39 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { { }, }; -MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match); +MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_acpi_match); + +static const struct of_device_id ssam_platform_hub_of_match[] __maybe_unused = { + /* Surface Laptop 7 */ + { .compatible = "microsoft,romulus13", (void *)ssam_node_group_sl7 }, + { .compatible = "microsoft,romulus15", (void *)ssam_node_group_sl7 }, + { }, +}; static int ssam_platform_hub_probe(struct platform_device *pdev) { const struct software_node **nodes; + const struct of_device_id *match; + struct device_node *fdt_root; struct ssam_controller *ctrl; struct fwnode_handle *root; int status; nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev); - if (!nodes) - return -ENODEV; + if (!nodes) { + fdt_root = of_find_node_by_path("/"); + if (!fdt_root) + return -ENODEV; + + match = of_match_node(ssam_platform_hub_of_match, fdt_root); + of_node_put(fdt_root); + if (!match) + return -ENODEV; + + nodes = (const struct software_node **)match->data; + if (!nodes) + return -ENODEV; + } /* * As we're adding the SSAM client devices as children under this device @@ -506,12 +540,13 @@ static struct platform_driver ssam_platform_hub_driver = { .remove_new = ssam_platform_hub_remove, .driver = { .name = "surface_aggregator_platform_hub", - .acpi_match_table = ssam_platform_hub_match, + .acpi_match_table = ssam_platform_hub_acpi_match, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; module_platform_driver(ssam_platform_hub_driver); +MODULE_ALIAS("platform:surface_aggregator_platform_hub"); MODULE_AUTHOR("Maximilian Luz "); MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c index c0a1a5869246ed..ffa36ed9289700 100644 --- a/drivers/platform/surface/surface_aggregator_tabletsw.c +++ b/drivers/platform/surface/surface_aggregator_tabletsw.c @@ -5,7 +5,7 @@ * Copyright (C) 2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c index 2de843b7ea707d..89ca6b50e8129b 100644 --- a/drivers/platform/surface/surface_dtx.c +++ b/drivers/platform/surface/surface_dtx.c @@ -555,7 +555,6 @@ static const struct file_operations surface_dtx_fops = { .fasync = surface_dtx_fasync, .unlocked_ioctl = surface_dtx_ioctl, .compat_ioctl = surface_dtx_ioctl, - .llseek = no_llseek, }; diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c index 3de864bc66108d..08db878f1d7d46 100644 --- a/drivers/platform/surface/surface_platform_profile.c +++ b/drivers/platform/surface/surface_platform_profile.c @@ -6,7 +6,7 @@ * Copyright (C) 2021-2022 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ddfccc226751f4..3875abba5a7903 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1000,7 +1000,8 @@ config TOPSTAR_LAPTOP config SERIAL_MULTI_INSTANTIATE tristate "Serial bus multi instantiate pseudo device driver" - depends on I2C && SPI && ACPI + depends on ACPI + depends on (I2C && !SPI) || (!I2C && SPI) || (I2C && SPI) help Some ACPI-based systems list multiple devices in a single ACPI firmware-node. This driver will instantiate separate clients diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 38c932df6446ac..7169b84ccdb6e2 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -1685,7 +1684,7 @@ static int acer_backlight_init(struct device *dev) acer_backlight_device = bd; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; bd->props.brightness = read_brightness(bd); backlight_update_status(bd); return 0; @@ -2224,39 +2223,25 @@ static void acer_rfkill_exit(void) } } -static void acer_wmi_notify(u32 value, void *context) +static void acer_wmi_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; struct event_return_value return_value; - acpi_status status; u16 device_state; const struct key_entry *key; u32 scancode; - status = wmi_get_event_data(value, &response); - if (status != AE_OK) { - pr_warn("bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; - if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_warn("Unknown response received %d\n", obj->type); - kfree(obj); return; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); - kfree(obj); return; } return_value = *((struct event_return_value *)obj->buffer.pointer); - kfree(obj); switch (return_value.function) { case WMID_HOTKEY_EVENT: diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 018c484296162b..4c3bb68e8fe4b1 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -378,33 +378,13 @@ static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal, int *t) return 0; } -static int acerhdf_bind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) +static bool acerhdf_should_bind(struct thermal_zone_device *thermal, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { /* if the cooling device is the one from acerhdf bind it */ - if (cdev != cl_dev) - return 0; - - if (thermal_zone_bind_cooling_device(thermal, 0, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT)) { - pr_err("error binding cooling dev\n"); - return -EINVAL; - } - return 0; -} - -static int acerhdf_unbind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - if (cdev != cl_dev) - return 0; - - if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) { - pr_err("error unbinding cooling dev\n"); - return -EINVAL; - } - return 0; + return cdev == cl_dev && trip->type == THERMAL_TRIP_ACTIVE; } static inline void acerhdf_revert_to_bios_mode(void) @@ -447,8 +427,7 @@ static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal, /* bind callback functions to thermalzone */ static struct thermal_zone_device_ops acerhdf_dev_ops = { - .bind = acerhdf_bind, - .unbind = acerhdf_unbind, + .should_bind = acerhdf_should_bind, .get_temp = acerhdf_get_ec_temp, .change_mode = acerhdf_change_mode, .get_crit_temp = acerhdf_get_crit_temp, diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c index 1157ec148880b5..d5b496433d6975 100644 --- a/drivers/platform/x86/amd/pmf/acpi.c +++ b/drivers/platform/x86/amd/pmf/acpi.c @@ -282,6 +282,29 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx) return 0; } +static int apmf_notify_smart_pc_update(struct amd_pmf_dev *pdev, u32 val, u32 preq, u32 index) +{ + struct amd_pmf_notify_smart_pc_update args; + struct acpi_buffer params; + union acpi_object *info; + + args.size = sizeof(args); + args.pending_req = preq; + args.custom_bios[index] = val; + + params.length = sizeof(args); + params.pointer = &args; + + info = apmf_if_call(pdev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES, ¶ms); + if (!info) + return -EIO; + + kfree(info); + dev_dbg(pdev->dev, "Notify smart pc update, val: %u\n", val); + + return 0; +} + int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data) { return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data)); @@ -447,6 +470,14 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev) return 0; } +int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx) +{ + if (!is_apmf_func_supported(dev, APMF_FUNC_NOTIFY_SMART_PC_UPDATES)) + return -EINVAL; + + return apmf_notify_smart_pc_update(dev, val, preq, idx); +} + void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev) { acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev); diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index 8f1f719befa3e7..d6af0ca036f171 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -37,12 +37,6 @@ #define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE #define AMD_PMF_RESULT_FAILED 0xFF -/* List of supported CPU ids */ -#define AMD_CPU_ID_RMB 0x14b5 -#define AMD_CPU_ID_PS 0x14e8 -#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 -#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 - #define PMF_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 @@ -261,7 +255,19 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer) /* Get Metrics Table Address */ if (alloc_buffer) { - dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL); + switch (dev->cpu_id) { + case AMD_CPU_ID_PS: + case AMD_CPU_ID_RMB: + dev->mtable_size = sizeof(dev->m_table); + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + dev->mtable_size = sizeof(dev->m_table_v2); + break; + default: + dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id); + } + + dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL); if (!dev->buf) return -ENOMEM; } diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c index 48870ca52b4139..7cde5733b9cacf 100644 --- a/drivers/platform/x86/amd/pmf/pmf-quirks.c +++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c @@ -37,6 +37,14 @@ static const struct dmi_system_id fwbug_list[] = { }, .driver_data = &quirk_no_sps_bug, }, + { + .ident = "ASUS TUF Gaming A14", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "FA401W"), + }, + .driver_data = &quirk_no_sps_bug, + }, {} }; diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index 753d5662c08019..8ce8816da9c168 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -19,6 +19,12 @@ #define POLICY_SIGN_COOKIE 0x31535024 #define POLICY_COOKIE_OFFSET 0x10 +/* List of supported CPU ids */ +#define AMD_CPU_ID_RMB 0x14b5 +#define AMD_CPU_ID_PS 0x14e8 +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 +#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 + struct cookie_header { u32 sign; u32 length; @@ -35,6 +41,7 @@ struct cookie_header { #define APMF_FUNC_STATIC_SLIDER_GRANULAR 9 #define APMF_FUNC_DYN_SLIDER_AC 11 #define APMF_FUNC_DYN_SLIDER_DC 12 +#define APMF_FUNC_NOTIFY_SMART_PC_UPDATES 14 #define APMF_FUNC_SBIOS_HEARTBEAT_V2 16 /* Message Definitions */ @@ -82,7 +89,17 @@ struct cookie_header { #define PMF_POLICY_STT_SKINTEMP_APU 7 #define PMF_POLICY_STT_SKINTEMP_HS2 8 #define PMF_POLICY_SYSTEM_STATE 9 +#define PMF_POLICY_BIOS_OUTPUT_1 10 +#define PMF_POLICY_BIOS_OUTPUT_2 11 #define PMF_POLICY_P3T 38 +#define PMF_POLICY_BIOS_OUTPUT_3 57 +#define PMF_POLICY_BIOS_OUTPUT_4 58 +#define PMF_POLICY_BIOS_OUTPUT_5 59 +#define PMF_POLICY_BIOS_OUTPUT_6 60 +#define PMF_POLICY_BIOS_OUTPUT_7 61 +#define PMF_POLICY_BIOS_OUTPUT_8 62 +#define PMF_POLICY_BIOS_OUTPUT_9 63 +#define PMF_POLICY_BIOS_OUTPUT_10 64 /* TA macros */ #define PMF_TA_IF_VERSION_MAJOR 1 @@ -181,6 +198,53 @@ struct apmf_fan_idx { u32 fan_ctl_idx; } __packed; +struct smu_pmf_metrics_v2 { + u16 core_frequency[16]; /* MHz */ + u16 core_power[16]; /* mW */ + u16 core_temp[16]; /* centi-C */ + u16 gfx_temp; /* centi-C */ + u16 soc_temp; /* centi-C */ + u16 stapm_opn_limit; /* mW */ + u16 stapm_cur_limit; /* mW */ + u16 infra_cpu_maxfreq; /* MHz */ + u16 infra_gfx_maxfreq; /* MHz */ + u16 skin_temp; /* centi-C */ + u16 gfxclk_freq; /* MHz */ + u16 fclk_freq; /* MHz */ + u16 gfx_activity; /* GFX busy % [0-100] */ + u16 socclk_freq; /* MHz */ + u16 vclk_freq; /* MHz */ + u16 vcn_activity; /* VCN busy % [0-100] */ + u16 vpeclk_freq; /* MHz */ + u16 ipuclk_freq; /* MHz */ + u16 ipu_busy[8]; /* NPU busy % [0-100] */ + u16 dram_reads; /* MB/sec */ + u16 dram_writes; /* MB/sec */ + u16 core_c0residency[16]; /* C0 residency % [0-100] */ + u16 ipu_power; /* mW */ + u32 apu_power; /* mW */ + u32 gfx_power; /* mW */ + u32 dgpu_power; /* mW */ + u32 socket_power; /* mW */ + u32 all_core_power; /* mW */ + u32 filter_alpha_value; /* time constant [us] */ + u32 metrics_counter; + u16 memclk_freq; /* MHz */ + u16 mpipuclk_freq; /* MHz */ + u16 ipu_reads; /* MB/sec */ + u16 ipu_writes; /* MB/sec */ + u32 throttle_residency_prochot; + u32 throttle_residency_spl; + u32 throttle_residency_fppt; + u32 throttle_residency_sppt; + u32 throttle_residency_thm_core; + u32 throttle_residency_thm_gfx; + u32 throttle_residency_thm_soc; + u16 psys; + u16 spare1; + u32 spare[6]; +} __packed; + struct smu_pmf_metrics { u16 gfxclk_freq; /* in MHz */ u16 socclk_freq; /* in MHz */ @@ -278,6 +342,7 @@ struct amd_pmf_dev { int hb_interval; /* SBIOS heartbeat interval */ struct delayed_work heart_beat; struct smu_pmf_metrics m_table; + struct smu_pmf_metrics_v2 m_table_v2; struct delayed_work work_buffer; ktime_t start_time; int socket_power_history[AVG_SAMPLE_SIZE]; @@ -302,6 +367,7 @@ struct amd_pmf_dev { bool smart_pc_enabled; u16 pmf_if_version; struct input_dev *pmf_idev; + size_t mtable_size; }; struct apmf_sps_prop_granular_v2 { @@ -344,6 +410,12 @@ struct os_power_slider { u8 slider_event; } __packed; +struct amd_pmf_notify_smart_pc_update { + u16 size; + u32 pending_req; + u32 custom_bios[10]; +} __packed; + struct fan_table_control { bool manual; unsigned long fan_id; @@ -717,6 +789,7 @@ extern const struct attribute_group cnqf_feature_attribute_group; int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev); void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev); int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev); +int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq, u32 idx); /* Smart PC - TA interfaces */ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in); diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c index 3c153fb1425e9f..b5183969f9bf9a 100644 --- a/drivers/platform/x86/amd/pmf/spc.c +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -53,30 +53,49 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table * void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {} #endif -static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in) { u16 max, avg = 0; int i; - memset(dev->buf, 0, sizeof(dev->m_table)); - amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL); - memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table)); - - in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power; - in->ev_info.skin_temperature = dev->m_table.skin_temp; - /* Get the avg and max C0 residency of all the cores */ - max = dev->m_table.avg_core_c0residency[0]; - for (i = 0; i < ARRAY_SIZE(dev->m_table.avg_core_c0residency); i++) { - avg += dev->m_table.avg_core_c0residency[i]; - if (dev->m_table.avg_core_c0residency[i] > max) - max = dev->m_table.avg_core_c0residency[i]; + max = *core_res; + for (i = 0; i < size; i++) { + avg += core_res[i]; + if (core_res[i] > max) + max = core_res[i]; } - - avg = DIV_ROUND_CLOSEST(avg, ARRAY_SIZE(dev->m_table.avg_core_c0residency)); + avg = DIV_ROUND_CLOSEST(avg, size); in->ev_info.avg_c0residency = avg; in->ev_info.max_c0residency = max; - in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity; +} + +static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +{ + /* Get the updated metrics table data */ + memset(dev->buf, 0, dev->mtable_size); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL); + + switch (dev->cpu_id) { + case AMD_CPU_ID_PS: + memcpy(&dev->m_table, dev->buf, dev->mtable_size); + in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power; + in->ev_info.skin_temperature = dev->m_table.skin_temp; + in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity; + amd_pmf_get_c0_residency(dev->m_table.avg_core_c0residency, + ARRAY_SIZE(dev->m_table.avg_core_c0residency), in); + break; + case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size); + in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power; + in->ev_info.skin_temperature = dev->m_table_v2.skin_temp; + in->ev_info.gfx_busy = dev->m_table_v2.gfx_activity; + amd_pmf_get_c0_residency(dev->m_table_v2.core_c0residency, + ARRAY_SIZE(dev->m_table_v2.core_c0residency), in); + break; + default: + dev_err(dev->dev, "Unsupported CPU id: 0x%x", dev->cpu_id); + } } static const char * const pmf_battery_supply_name[] = { diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index e246367aacee21..19c27b6e46663c 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -160,6 +160,46 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n", amd_pmf_uevent_as_str(val)); break; + + case PMF_POLICY_BIOS_OUTPUT_1: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(0), 0); + break; + + case PMF_POLICY_BIOS_OUTPUT_2: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(1), 1); + break; + + case PMF_POLICY_BIOS_OUTPUT_3: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(2), 2); + break; + + case PMF_POLICY_BIOS_OUTPUT_4: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(3), 3); + break; + + case PMF_POLICY_BIOS_OUTPUT_5: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(4), 4); + break; + + case PMF_POLICY_BIOS_OUTPUT_6: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(5), 5); + break; + + case PMF_POLICY_BIOS_OUTPUT_7: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(6), 6); + break; + + case PMF_POLICY_BIOS_OUTPUT_8: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(7), 7); + break; + + case PMF_POLICY_BIOS_OUTPUT_9: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(8), 8); + break; + + case PMF_POLICY_BIOS_OUTPUT_10: + amd_pmf_smartpc_apply_bios_output(dev, val, BIT(9), 9); + break; } } } diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index ccb33d034e2ade..9d7e6b712abf11 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -818,7 +817,7 @@ static int asus_backlight_init(struct asus_laptop *asus) asus->backlight_device = bd; bd->props.brightness = asus_read_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); return 0; } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index ed3633c5955d9f..ef04d396f61c77 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -7,12 +7,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include #include #include -#include #include #include @@ -538,7 +538,7 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) dmi_check_system(asus_quirks); driver->quirks = quirks; - driver->panel_power = FB_BLANK_UNBLANK; + driver->panel_power = BACKLIGHT_POWER_ON; /* overwrite the wapf setting if the wapf paramater is specified */ if (wapf != -1) diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c index b441d8ca72d3f7..ca4670d0dc678c 100644 --- a/drivers/platform/x86/asus-tf103c-dock.c +++ b/drivers/platform/x86/asus-tf103c-dock.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include static bool fnlock; module_param(fnlock, bool, 0644); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 321a658e455411..7a48220b4f5a66 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -97,6 +96,12 @@ module_param(fnlock_default, bool, 0444); #define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1 #define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2 +#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO 0 +#define ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO 1 +#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO 2 + +#define PLATFORM_PROFILE_MAX 2 + #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -300,8 +305,8 @@ struct asus_wmi { u32 kbd_rgb_dev; bool kbd_rgb_state_available; - bool throttle_thermal_policy_available; u8 throttle_thermal_policy_mode; + u32 throttle_thermal_policy_dev; bool cpu_fan_curve_available; bool gpu_fan_curve_available; @@ -349,20 +354,29 @@ static int asus_wmi_evaluate_method3(u32 method_id, status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id, &input, &output); - if (ACPI_FAILURE(status)) + pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x, 0x%08x\n", + __func__, method_id, arg0, arg1, arg2); + if (ACPI_FAILURE(status)) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, -EIO); return -EIO; + } obj = (union acpi_object *)output.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) tmp = (u32) obj->integer.value; + pr_debug("Result: 0x%08x\n", tmp); if (retval) *retval = tmp; kfree(obj); - if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) + if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, -ENODEV); return -ENODEV; + } return 0; } @@ -392,20 +406,29 @@ static int asus_wmi_evaluate_method5(u32 method_id, status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id, &input, &output); - if (ACPI_FAILURE(status)) + pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + __func__, method_id, arg0, arg1, arg2, arg3, arg4); + if (ACPI_FAILURE(status)) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, -EIO); return -EIO; + } obj = (union acpi_object *)output.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) tmp = (u32) obj->integer.value; + pr_debug("Result: %x\n", tmp); if (retval) *retval = tmp; kfree(obj); - if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) + if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, -ENODEV); return -ENODEV; + } return 0; } @@ -431,8 +454,13 @@ static int asus_wmi_evaluate_method_buf(u32 method_id, status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id, &input, &output); - if (ACPI_FAILURE(status)) + pr_debug("%s called (0x%08x) with args: 0x%08x, 0x%08x\n", + __func__, method_id, arg0, arg1); + if (ACPI_FAILURE(status)) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, -EIO); return -EIO; + } obj = (union acpi_object *)output.pointer; @@ -468,8 +496,11 @@ static int asus_wmi_evaluate_method_buf(u32 method_id, kfree(obj); - if (err) + if (err) { + pr_debug("%s, (0x%08x), arg 0x%08x failed: %d\n", + __func__, method_id, arg0, err); return err; + } return 0; } @@ -557,6 +588,7 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id) { u32 retval; int status = asus_wmi_get_devstate(asus, dev_id, &retval); + pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval); return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT); } @@ -1722,7 +1754,8 @@ static int asus_wmi_led_init(struct asus_wmi *asus) goto error; } - if (!kbd_led_read(asus, &led_val, NULL)) { + if (!kbd_led_read(asus, &led_val, NULL) && !dmi_check_system(asus_use_hid_led_dmi_ids)) { + pr_info("using asus-wmi for asus::kbd_backlight\n"); asus->kbd_led_wk = led_val; asus->kbd_led.name = "asus::kbd_backlight"; asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED; @@ -3186,7 +3219,7 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev) int err, fan_idx; u8 mode = 0; - if (asus->throttle_thermal_policy_available) + if (asus->throttle_thermal_policy_dev) mode = asus->throttle_thermal_policy_mode; /* DEVID_PU_FAN_CURVE is switched for OVERBOOST vs SILENT */ if (mode == 2) @@ -3393,7 +3426,7 @@ static ssize_t fan_curve_enable_store(struct device *dev, * For machines with throttle this is the only way to reset fans * to default mode of operation (does not erase curve data). */ - if (asus->throttle_thermal_policy_available) { + if (asus->throttle_thermal_policy_dev) { err = throttle_thermal_policy_write(asus); if (err) return err; @@ -3610,8 +3643,8 @@ static const struct attribute_group asus_fan_curve_attr_group = { __ATTRIBUTE_GROUPS(asus_fan_curve_attr); /* - * Must be initialised after throttle_thermal_policy_check_present() as - * we check the status of throttle_thermal_policy_available during init. + * Must be initialised after throttle_thermal_policy_dev is set as + * we check the status of throttle_thermal_policy_dev during init. */ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus) { @@ -3621,18 +3654,27 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus) err = fan_curve_check_present(asus, &asus->cpu_fan_curve_available, ASUS_WMI_DEVID_CPU_FAN_CURVE); - if (err) + if (err) { + pr_debug("%s, checked 0x%08x, failed: %d\n", + __func__, ASUS_WMI_DEVID_CPU_FAN_CURVE, err); return err; + } err = fan_curve_check_present(asus, &asus->gpu_fan_curve_available, ASUS_WMI_DEVID_GPU_FAN_CURVE); - if (err) + if (err) { + pr_debug("%s, checked 0x%08x, failed: %d\n", + __func__, ASUS_WMI_DEVID_GPU_FAN_CURVE, err); return err; + } err = fan_curve_check_present(asus, &asus->mid_fan_curve_available, ASUS_WMI_DEVID_MID_FAN_CURVE); - if (err) + if (err) { + pr_debug("%s, checked 0x%08x, failed: %d\n", + __func__, ASUS_WMI_DEVID_MID_FAN_CURVE, err); return err; + } if (!asus->cpu_fan_curve_available && !asus->gpu_fan_curve_available @@ -3652,38 +3694,13 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus) } /* Throttle thermal policy ****************************************************/ - -static int throttle_thermal_policy_check_present(struct asus_wmi *asus) -{ - u32 result; - int err; - - asus->throttle_thermal_policy_available = false; - - err = asus_wmi_get_devstate(asus, - ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, - &result); - if (err) { - if (err == -ENODEV) - return 0; - return err; - } - - if (result & ASUS_WMI_DSTS_PRESENCE_BIT) - asus->throttle_thermal_policy_available = true; - - return 0; -} - static int throttle_thermal_policy_write(struct asus_wmi *asus) { - int err; - u8 value; + u8 value = asus->throttle_thermal_policy_mode; u32 retval; + int err; - value = asus->throttle_thermal_policy_mode; - - err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + err = asus_wmi_set_devstate(asus->throttle_thermal_policy_dev, value, &retval); sysfs_notify(&asus->platform_device->dev.kobj, NULL, @@ -3713,7 +3730,7 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus) static int throttle_thermal_policy_set_default(struct asus_wmi *asus) { - if (!asus->throttle_thermal_policy_available) + if (!asus->throttle_thermal_policy_dev) return 0; asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; @@ -3725,7 +3742,7 @@ static int throttle_thermal_policy_switch_next(struct asus_wmi *asus) u8 new_mode = asus->throttle_thermal_policy_mode + 1; int err; - if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + if (new_mode > PLATFORM_PROFILE_MAX) new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; asus->throttle_thermal_policy_mode = new_mode; @@ -3764,7 +3781,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev, if (result < 0) return result; - if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + if (new_mode > PLATFORM_PROFILE_MAX) return -EINVAL; asus->throttle_thermal_policy_mode = new_mode; @@ -3781,10 +3798,52 @@ static ssize_t throttle_thermal_policy_store(struct device *dev, return count; } -// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent +/* + * Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent + */ static DEVICE_ATTR_RW(throttle_thermal_policy); /* Platform profile ***********************************************************/ +static int asus_wmi_platform_profile_to_vivo(struct asus_wmi *asus, int mode) +{ + bool vivo; + + vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; + + if (vivo) { + switch (mode) { + case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT: + return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO; + case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST: + return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO; + case ASUS_THROTTLE_THERMAL_POLICY_SILENT: + return ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO; + } + } + + return mode; +} + +static int asus_wmi_platform_profile_mode_from_vivo(struct asus_wmi *asus, int mode) +{ + bool vivo; + + vivo = asus->throttle_thermal_policy_dev == ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; + + if (vivo) { + switch (mode) { + case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT_VIVO: + return ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST_VIVO: + return ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST; + case ASUS_THROTTLE_THERMAL_POLICY_SILENT_VIVO: + return ASUS_THROTTLE_THERMAL_POLICY_SILENT; + } + } + + return mode; +} + static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof, enum platform_profile_option *profile) { @@ -3792,10 +3851,9 @@ static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof, int tp; asus = container_of(pprof, struct asus_wmi, platform_profile_handler); - tp = asus->throttle_thermal_policy_mode; - switch (tp) { + switch (asus_wmi_platform_profile_mode_from_vivo(asus, tp)) { case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT: *profile = PLATFORM_PROFILE_BALANCED; break; @@ -3834,7 +3892,7 @@ static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof, return -EOPNOTSUPP; } - asus->throttle_thermal_policy_mode = tp; + asus->throttle_thermal_policy_mode = asus_wmi_platform_profile_to_vivo(asus, tp); return throttle_thermal_policy_write(asus); } @@ -3847,7 +3905,7 @@ static int platform_profile_setup(struct asus_wmi *asus) * Not an error if a component platform_profile relies on is unavailable * so early return, skipping the setup of platform_profile. */ - if (!asus->throttle_thermal_policy_available) + if (!asus->throttle_thermal_policy_dev) return 0; dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n"); @@ -3862,8 +3920,13 @@ static int platform_profile_setup(struct asus_wmi *asus) asus->platform_profile_handler.choices); err = platform_profile_register(&asus->platform_profile_handler); - if (err) + if (err == -EEXIST) { + pr_warn("%s, a platform_profile handler is already registered\n", __func__); + return 0; + } else if (err) { + pr_err("%s, failed at platform_profile_register: %d\n", __func__, err); return err; + } asus->platform_profile_support = true; return 0; @@ -3884,7 +3947,7 @@ static int read_backlight_power(struct asus_wmi *asus) if (ret < 0) return ret; - return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + return ret ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF; } static int read_brightness_max(struct asus_wmi *asus) @@ -3943,7 +4006,7 @@ static int update_bl_status(struct backlight_device *bd) power = read_backlight_power(asus); if (power != -ENODEV && bd->props.power != power) { - ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK); + ctrl_param = !!(bd->props.power == BACKLIGHT_POWER_ON); err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, ctrl_param, NULL); if (asus->driver->quirks->store_backlight_power) @@ -4002,7 +4065,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus) power = read_backlight_power(asus); if (power == -ENODEV) - power = FB_BLANK_UNBLANK; + power = BACKLIGHT_POWER_ON; else if (power < 0) return power; @@ -4060,7 +4123,7 @@ static int read_screenpad_backlight_power(struct asus_wmi *asus) if (ret < 0) return ret; /* 1 == powered */ - return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + return ret ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF; } static int read_screenpad_brightness(struct backlight_device *bd) @@ -4073,7 +4136,7 @@ static int read_screenpad_brightness(struct backlight_device *bd) if (err < 0) return err; /* The device brightness can only be read if powered, so return stored */ - if (err == FB_BLANK_POWERDOWN) + if (err == BACKLIGHT_POWER_OFF) return asus->driver->screenpad_brightness - ASUS_SCREENPAD_BRIGHT_MIN; err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_SCREENPAD_LIGHT, &retval); @@ -4094,7 +4157,7 @@ static int update_screenpad_bl_status(struct backlight_device *bd) return power; if (bd->props.power != power) { - if (power != FB_BLANK_UNBLANK) { + if (power != BACKLIGHT_POWER_ON) { /* Only brightness > 0 can power it back on */ ctrl_param = asus->driver->screenpad_brightness - ASUS_SCREENPAD_BRIGHT_MIN; err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_LIGHT, @@ -4102,7 +4165,7 @@ static int update_screenpad_bl_status(struct backlight_device *bd) } else { err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_POWER, 0, NULL); } - } else if (power == FB_BLANK_UNBLANK) { + } else if (power == BACKLIGHT_POWER_ON) { /* Only set brightness if powered on or we get invalid/unsync state */ ctrl_param = bd->props.brightness + ASUS_SCREENPAD_BRIGHT_MIN; err = asus_wmi_set_devstate(ASUS_WMI_DEVID_SCREENPAD_LIGHT, ctrl_param, NULL); @@ -4132,7 +4195,7 @@ static int asus_screenpad_init(struct asus_wmi *asus) if (power < 0) return power; - if (power != FB_BLANK_POWERDOWN) { + if (power != BACKLIGHT_POWER_OFF) { err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_SCREENPAD_LIGHT, &brightness); if (err < 0) return err; @@ -4189,28 +4252,15 @@ static void asus_wmi_fnlock_update(struct asus_wmi *asus) /* WMI events *****************************************************************/ -static int asus_wmi_get_event_code(u32 value) +static int asus_wmi_get_event_code(union acpi_object *obj) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; int code; - status = wmi_get_event_data(value, &response); - if (ACPI_FAILURE(status)) { - pr_warn("Failed to get WMI notify code: %s\n", - acpi_format_exception(status)); - return -EIO; - } - - obj = (union acpi_object *)response.pointer; - if (obj && obj->type == ACPI_TYPE_INTEGER) code = (int)(obj->integer.value & WMI_EVENT_MASK); else code = -EIO; - kfree(obj); return code; } @@ -4262,7 +4312,7 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) { if (asus->fan_boost_mode_available) fan_boost_mode_switch_next(asus); - if (asus->throttle_thermal_policy_available) + if (asus->throttle_thermal_policy_dev) throttle_thermal_policy_switch_next(asus); return; @@ -4276,10 +4326,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) pr_info("Unknown key code 0x%x\n", code); } -static void asus_wmi_notify(u32 value, void *context) +static void asus_wmi_notify(union acpi_object *obj, void *context) { struct asus_wmi *asus = context; - int code = asus_wmi_get_event_code(value); + int code = asus_wmi_get_event_code(obj); if (code < 0) { pr_warn("Failed to get notify code: %d\n", code); @@ -4434,7 +4484,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; else if (attr == &dev_attr_throttle_thermal_policy.attr) - ok = asus->throttle_thermal_policy_available; + ok = asus->throttle_thermal_policy_dev != 0; else if (attr == &dev_attr_ppt_pl2_sppt.attr) devid = ASUS_WMI_DEVID_PPT_PL2_SPPT; else if (attr == &dev_attr_ppt_pl1_spl.attr) @@ -4460,8 +4510,10 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else if (attr == &dev_attr_available_mini_led_mode.attr) ok = asus->mini_led_dev_id != 0; - if (devid != -1) + if (devid != -1) { ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); + pr_debug("%s called 0x%08x, ok: %x\n", __func__, devid, ok); + } return ok ? attr->mode : 0; } @@ -4726,16 +4778,15 @@ static int asus_wmi_add(struct platform_device *pdev) else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY)) + asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO)) + asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; + err = fan_boost_mode_check_present(asus); if (err) goto fail_fan_boost_mode; - err = throttle_thermal_policy_check_present(asus); - if (err) - goto fail_throttle_thermal_policy; - else - throttle_thermal_policy_set_default(asus); - err = platform_profile_setup(asus); if (err) goto fail_platform_profile_setup; @@ -4830,7 +4881,6 @@ static int asus_wmi_add(struct platform_device *pdev) fail_input: asus_wmi_sysfs_exit(asus->platform_device); fail_sysfs: -fail_throttle_thermal_policy: fail_custom_fan_curve: fail_platform_profile_setup: if (asus->platform_profile_support) diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig index 309236cecd5a43..68a49788a39633 100644 --- a/drivers/platform/x86/dell/Kconfig +++ b/drivers/platform/x86/dell/Kconfig @@ -49,6 +49,7 @@ config DELL_LAPTOP default m depends on DMI depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_BATTERY depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL || RFKILL = n depends on DELL_WMI || DELL_WMI = n diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c index 6552dfe491c6d1..5671bd0deee7d1 100644 --- a/drivers/platform/x86/dell/dell-laptop.c +++ b/drivers/platform/x86/dell/dell-laptop.c @@ -22,11 +22,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include "dell-rbtn.h" #include "dell-smbios.h" @@ -99,6 +101,20 @@ static bool force_rfkill; static bool micmute_led_registered; static bool mute_led_registered; +struct battery_mode_info { + int token; + const char *label; +}; + +static const struct battery_mode_info battery_modes[] = { + { BAT_PRI_AC_MODE_TOKEN, "Trickle" }, + { BAT_EXPRESS_MODE_TOKEN, "Fast" }, + { BAT_STANDARD_MODE_TOKEN, "Standard" }, + { BAT_ADAPTIVE_MODE_TOKEN, "Adaptive" }, + { BAT_CUSTOM_MODE_TOKEN, "Custom" }, +}; +static u32 battery_supported_modes; + module_param(force_rfkill, bool, 0444); MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models"); @@ -353,6 +369,32 @@ static const struct dmi_system_id dell_quirks[] __initconst = { { } }; +/* -1 is a sentinel value, telling us to use token->value */ +#define USE_TVAL ((u32) -1) +static int dell_send_request_for_tokenid(struct calling_interface_buffer *buffer, + u16 class, u16 select, u16 tokenid, + u32 val) +{ + struct calling_interface_token *token; + + token = dell_smbios_find_token(tokenid); + if (!token) + return -ENODEV; + + if (val == USE_TVAL) + val = token->value; + + dell_fill_request(buffer, token->location, val, 0, 0); + return dell_send_request(buffer, class, select); +} + +static inline int dell_set_std_token_value(struct calling_interface_buffer *buffer, + u16 tokenid, u32 value) +{ + return dell_send_request_for_tokenid(buffer, CLASS_TOKEN_WRITE, + SELECT_TOKEN_STD, tokenid, value); +} + /* * Derived from information in smbios-wireless-ctl: * @@ -895,43 +937,24 @@ static void dell_cleanup_rfkill(void) static int dell_send_intensity(struct backlight_device *bd) { struct calling_interface_buffer buffer; - struct calling_interface_token *token; - int ret; + u16 select; - token = dell_smbios_find_token(BRIGHTNESS_TOKEN); - if (!token) - return -ENODEV; - - dell_fill_request(&buffer, - token->location, bd->props.brightness, 0, 0); - if (power_supply_is_system_supplied() > 0) - ret = dell_send_request(&buffer, - CLASS_TOKEN_WRITE, SELECT_TOKEN_AC); - else - ret = dell_send_request(&buffer, - CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT); - - return ret; + select = power_supply_is_system_supplied() > 0 ? + SELECT_TOKEN_AC : SELECT_TOKEN_BAT; + return dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_WRITE, + select, BRIGHTNESS_TOKEN, bd->props.brightness); } static int dell_get_intensity(struct backlight_device *bd) { struct calling_interface_buffer buffer; - struct calling_interface_token *token; int ret; + u16 select; - token = dell_smbios_find_token(BRIGHTNESS_TOKEN); - if (!token) - return -ENODEV; - - dell_fill_request(&buffer, token->location, 0, 0, 0); - if (power_supply_is_system_supplied() > 0) - ret = dell_send_request(&buffer, - CLASS_TOKEN_READ, SELECT_TOKEN_AC); - else - ret = dell_send_request(&buffer, - CLASS_TOKEN_READ, SELECT_TOKEN_BAT); - + select = power_supply_is_system_supplied() > 0 ? + SELECT_TOKEN_AC : SELECT_TOKEN_BAT; + ret = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ, + select, BRIGHTNESS_TOKEN, 0); if (ret == 0) ret = buffer.output[1]; @@ -1355,20 +1378,11 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) static int kbd_set_token_bit(u8 bit) { struct calling_interface_buffer buffer; - struct calling_interface_token *token; - int ret; if (bit >= ARRAY_SIZE(kbd_tokens)) return -EINVAL; - token = dell_smbios_find_token(kbd_tokens[bit]); - if (!token) - return -EINVAL; - - dell_fill_request(&buffer, token->location, token->value, 0, 0); - ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD); - - return ret; + return dell_set_std_token_value(&buffer, kbd_tokens[bit], USE_TVAL); } static int kbd_get_token_bit(u8 bit) @@ -1387,11 +1401,10 @@ static int kbd_get_token_bit(u8 bit) dell_fill_request(&buffer, token->location, 0, 0, 0); ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD); - val = buffer.output[1]; - if (ret) return ret; + val = buffer.output[1]; return (val == token->value); } @@ -1497,7 +1510,7 @@ static inline int kbd_init_info(void) } -static inline void kbd_init_tokens(void) +static inline void __init kbd_init_tokens(void) { int i; @@ -1506,7 +1519,7 @@ static inline void kbd_init_tokens(void) kbd_token_bits |= BIT(i); } -static void kbd_init(void) +static void __init kbd_init(void) { int ret; @@ -2131,21 +2144,11 @@ static int micmute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct calling_interface_buffer buffer; - struct calling_interface_token *token; - int state = brightness != LED_OFF; - - if (state == 0) - token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE); - else - token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE); - - if (!token) - return -ENODEV; + u32 tokenid; - dell_fill_request(&buffer, token->location, token->value, 0, 0); - dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD); - - return 0; + tokenid = brightness == LED_OFF ? + GLOBAL_MIC_MUTE_DISABLE : GLOBAL_MIC_MUTE_ENABLE; + return dell_set_std_token_value(&buffer, tokenid, USE_TVAL); } static struct led_classdev micmute_led_cdev = { @@ -2159,21 +2162,11 @@ static int mute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct calling_interface_buffer buffer; - struct calling_interface_token *token; - int state = brightness != LED_OFF; - - if (state == 0) - token = dell_smbios_find_token(GLOBAL_MUTE_DISABLE); - else - token = dell_smbios_find_token(GLOBAL_MUTE_ENABLE); - - if (!token) - return -ENODEV; - - dell_fill_request(&buffer, token->location, token->value, 0, 0); - dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD); + u32 tokenid; - return 0; + tokenid = brightness == LED_OFF ? + GLOBAL_MUTE_DISABLE : GLOBAL_MUTE_ENABLE; + return dell_set_std_token_value(&buffer, tokenid, USE_TVAL); } static struct led_classdev mute_led_cdev = { @@ -2183,9 +2176,283 @@ static struct led_classdev mute_led_cdev = { .default_trigger = "audio-mute", }; -static int __init dell_init(void) +static int dell_battery_set_mode(const u16 tokenid) +{ + struct calling_interface_buffer buffer; + + return dell_set_std_token_value(&buffer, tokenid, USE_TVAL); +} + +static int dell_battery_read(const u16 tokenid) +{ + struct calling_interface_buffer buffer; + int err; + + err = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ, + SELECT_TOKEN_STD, tokenid, 0); + if (err) + return err; + + if (buffer.output[1] > INT_MAX) + return -EIO; + + return buffer.output[1]; +} + +static bool dell_battery_mode_is_active(const u16 tokenid) { struct calling_interface_token *token; + int ret; + + ret = dell_battery_read(tokenid); + if (ret < 0) + return false; + + token = dell_smbios_find_token(tokenid); + /* token's already verified by dell_battery_read() */ + + return token->value == (u16) ret; +} + +/* + * The rules: the minimum start charging value is 50%. The maximum + * start charging value is 95%. The minimum end charging value is + * 55%. The maximum end charging value is 100%. And finally, there + * has to be at least a 5% difference between start & end values. + */ +#define CHARGE_START_MIN 50 +#define CHARGE_START_MAX 95 +#define CHARGE_END_MIN 55 +#define CHARGE_END_MAX 100 +#define CHARGE_MIN_DIFF 5 + +static int dell_battery_set_custom_charge_start(int start) +{ + struct calling_interface_buffer buffer; + int end; + + start = clamp(start, CHARGE_START_MIN, CHARGE_START_MAX); + end = dell_battery_read(BAT_CUSTOM_CHARGE_END); + if (end < 0) + return end; + if ((end - start) < CHARGE_MIN_DIFF) + start = end - CHARGE_MIN_DIFF; + + return dell_set_std_token_value(&buffer, BAT_CUSTOM_CHARGE_START, + start); +} + +static int dell_battery_set_custom_charge_end(int end) +{ + struct calling_interface_buffer buffer; + int start; + + end = clamp(end, CHARGE_END_MIN, CHARGE_END_MAX); + start = dell_battery_read(BAT_CUSTOM_CHARGE_START); + if (start < 0) + return start; + if ((end - start) < CHARGE_MIN_DIFF) + end = start + CHARGE_MIN_DIFF; + + return dell_set_std_token_value(&buffer, BAT_CUSTOM_CHARGE_END, end); +} + +static ssize_t charge_types_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(battery_modes); i++) { + bool active; + + if (!(battery_supported_modes & BIT(i))) + continue; + + active = dell_battery_mode_is_active(battery_modes[i].token); + count += sysfs_emit_at(buf, count, active ? "[%s] " : "%s ", + battery_modes[i].label); + } + + /* convert the last space to a newline */ + if (count > 0) + count--; + count += sysfs_emit_at(buf, count, "\n"); + + return count; +} + +static ssize_t charge_types_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + bool matched = false; + int err, i; + + for (i = 0; i < ARRAY_SIZE(battery_modes); i++) { + if (!(battery_supported_modes & BIT(i))) + continue; + + if (sysfs_streq(battery_modes[i].label, buf)) { + matched = true; + break; + } + } + if (!matched) + return -EINVAL; + + err = dell_battery_set_mode(battery_modes[i].token); + if (err) + return err; + + return size; +} + +static ssize_t charge_control_start_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int start; + + start = dell_battery_read(BAT_CUSTOM_CHARGE_START); + if (start < 0) + return start; + + if (start > CHARGE_START_MAX) + return -EIO; + + return sysfs_emit(buf, "%d\n", start); +} + +static ssize_t charge_control_start_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret, start; + + ret = kstrtoint(buf, 10, &start); + if (ret) + return ret; + if (start < 0 || start > 100) + return -EINVAL; + + ret = dell_battery_set_custom_charge_start(start); + if (ret) + return ret; + + return size; +} + +static ssize_t charge_control_end_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int end; + + end = dell_battery_read(BAT_CUSTOM_CHARGE_END); + if (end < 0) + return end; + + if (end > CHARGE_END_MAX) + return -EIO; + + return sysfs_emit(buf, "%d\n", end); +} + +static ssize_t charge_control_end_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret, end; + + ret = kstrtouint(buf, 10, &end); + if (ret) + return ret; + if (end < 0 || end > 100) + return -EINVAL; + + ret = dell_battery_set_custom_charge_end(end); + if (ret) + return ret; + + return size; +} + +static DEVICE_ATTR_RW(charge_control_start_threshold); +static DEVICE_ATTR_RW(charge_control_end_threshold); +static DEVICE_ATTR_RW(charge_types); + +static struct attribute *dell_battery_attrs[] = { + &dev_attr_charge_control_start_threshold.attr, + &dev_attr_charge_control_end_threshold.attr, + &dev_attr_charge_types.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dell_battery); + +static bool dell_battery_supported(struct power_supply *battery) +{ + /* We currently only support the primary battery */ + return strcmp(battery->desc->name, "BAT0") == 0; +} + +static int dell_battery_add(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + /* Return 0 instead of an error to avoid being unloaded */ + if (!dell_battery_supported(battery)) + return 0; + + return device_add_groups(&battery->dev, dell_battery_groups); +} + +static int dell_battery_remove(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + if (!dell_battery_supported(battery)) + return 0; + + device_remove_groups(&battery->dev, dell_battery_groups); + return 0; +} + +static struct acpi_battery_hook dell_battery_hook = { + .add_battery = dell_battery_add, + .remove_battery = dell_battery_remove, + .name = "Dell Primary Battery Extension", +}; + +static u32 __init battery_get_supported_modes(void) +{ + u32 modes = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(battery_modes); i++) { + if (dell_smbios_find_token(battery_modes[i].token)) + modes |= BIT(i); + } + + return modes; +} + +static void __init dell_battery_init(struct device *dev) +{ + battery_supported_modes = battery_get_supported_modes(); + + if (battery_supported_modes != 0) + battery_hook_register(&dell_battery_hook); +} + +static void dell_battery_exit(void) +{ + if (battery_supported_modes != 0) + battery_hook_unregister(&dell_battery_hook); +} + +static int __init dell_init(void) +{ + struct calling_interface_buffer buffer; int max_intensity = 0; int ret; @@ -2219,6 +2486,7 @@ static int __init dell_init(void) touchpad_led_init(&platform_device->dev); kbd_led_init(&platform_device->dev); + dell_battery_init(&platform_device->dev); dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, @@ -2246,16 +2514,10 @@ static int __init dell_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; - token = dell_smbios_find_token(BRIGHTNESS_TOKEN); - if (token) { - struct calling_interface_buffer buffer; - - dell_fill_request(&buffer, token->location, 0, 0, 0); - ret = dell_send_request(&buffer, - CLASS_TOKEN_READ, SELECT_TOKEN_AC); - if (ret == 0) - max_intensity = buffer.output[3]; - } + ret = dell_send_request_for_tokenid(&buffer, CLASS_TOKEN_READ, + SELECT_TOKEN_AC, BRIGHTNESS_TOKEN, 0); + if (ret == 0) + max_intensity = buffer.output[3]; if (max_intensity) { struct backlight_properties props; @@ -2293,6 +2555,7 @@ static int __init dell_init(void) if (mute_led_registered) led_classdev_unregister(&mute_led_cdev); fail_led: + dell_battery_exit(); dell_cleanup_rfkill(); fail_rfkill: platform_device_del(platform_device); @@ -2311,6 +2574,7 @@ static void __exit dell_exit(void) if (quirks && quirks->touchpad_led) touchpad_led_exit(); kbd_led_exit(); + dell_battery_exit(); backlight_device_unregister(dell_backlight_device); if (micmute_led_registered) led_classdev_unregister(&micmute_led_cdev); diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h index ea0cc38642a24e..77baa15eb52365 100644 --- a/drivers/platform/x86/dell/dell-smbios.h +++ b/drivers/platform/x86/dell/dell-smbios.h @@ -33,6 +33,13 @@ #define KBD_LED_AUTO_50_TOKEN 0x02EB #define KBD_LED_AUTO_75_TOKEN 0x02EC #define KBD_LED_AUTO_100_TOKEN 0x02F6 +#define BAT_PRI_AC_MODE_TOKEN 0x0341 +#define BAT_ADAPTIVE_MODE_TOKEN 0x0342 +#define BAT_CUSTOM_MODE_TOKEN 0x0343 +#define BAT_STANDARD_MODE_TOKEN 0x0346 +#define BAT_EXPRESS_MODE_TOKEN 0x0347 +#define BAT_CUSTOM_CHARGE_START 0x0349 +#define BAT_CUSTOM_CHARGE_END 0x034A #define GLOBAL_MIC_MUTE_ENABLE 0x0364 #define GLOBAL_MIC_MUTE_DISABLE 0x0365 #define GLOBAL_MUTE_ENABLE 0x058C diff --git a/drivers/platform/x86/dell/dell-wmi-aio.c b/drivers/platform/x86/dell/dell-wmi-aio.c index c7b7f1e403fb9a..54096495719bfa 100644 --- a/drivers/platform/x86/dell/dell-wmi-aio.c +++ b/drivers/platform/x86/dell/dell-wmi-aio.c @@ -70,20 +70,10 @@ static bool dell_wmi_aio_event_check(u8 *buffer, int length) return false; } -static void dell_wmi_aio_notify(u32 value, void *context) +static void dell_wmi_aio_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; struct dell_wmi_event *event; - acpi_status status; - status = wmi_get_event_data(value, &response); - if (status != AE_OK) { - pr_info("bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; if (obj) { unsigned int scancode = 0; @@ -114,7 +104,6 @@ static void dell_wmi_aio_notify(u32 value, void *context) break; } } - kfree(obj); } static int __init dell_wmi_aio_input_setup(void) diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index 0b2299f7a2de5a..e75cd6e1efe6ac 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -31,7 +31,7 @@ #include -#include +#include #define DRIVER_NAME "dell-wmi-ddv" diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index 9def7983d7d66a..40ddc6eb75624e 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -521,6 +521,7 @@ static int __init sysman_init(void) int ret = 0; if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) && !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) { pr_err("Unable to run on non-Dell system\n"); return -ENODEV; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 447364bed249c3..03319a80e11408 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -1137,7 +1136,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc) } eeepc->backlight_device = bd; bd->props.brightness = read_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); return 0; } diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 32d9f0ba6be3f9..37edb9ae67b852 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -13,13 +13,13 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include #include #include #include -#include #include #include "asus-wmi.h" @@ -192,7 +192,7 @@ static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) driver->quirks = quirks; driver->quirks->wapf = -1; - driver->panel_power = FB_BLANK_UNBLANK; + driver->panel_power = BACKLIGHT_POWER_ON; } static struct asus_wmi_driver asus_wmi_driver = { diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 968fc91bd5e405..ae992ac1ab4ac0 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -356,7 +355,7 @@ static int bl_get_brightness(struct backlight_device *b) { struct acpi_device *device = bl_get_data(b); - return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level(device); + return b->props.power == BACKLIGHT_POWER_OFF ? 0 : get_lcd_level(device); } static int bl_update_status(struct backlight_device *b) @@ -364,7 +363,7 @@ static int bl_update_status(struct backlight_device *b) struct acpi_device *device = bl_get_data(b); if (fext) { - if (b->props.power == FB_BLANK_POWERDOWN) + if (b->props.power == BACKLIGHT_POWER_OFF) call_fext_func(fext, FUNC_BACKLIGHT, 0x1, BACKLIGHT_PARAM_POWER, BACKLIGHT_OFF); else @@ -933,9 +932,9 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device) acpi_video_get_backlight_type() == acpi_backlight_vendor) { if (call_fext_func(fext, FUNC_BACKLIGHT, 0x2, BACKLIGHT_PARAM_POWER, 0x0) == BACKLIGHT_OFF) - fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN; + fujitsu_bl->bl_device->props.power = BACKLIGHT_POWER_OFF; else - fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK; + fujitsu_bl->bl_device->props.power = BACKLIGHT_POWER_ON; } ret = acpi_fujitsu_laptop_input_setup(device); diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index 876e0a97cee1c0..8c05e0dd2a218e 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -834,28 +834,16 @@ static struct attribute *hp_wmi_attrs[] = { }; ATTRIBUTE_GROUPS(hp_wmi); -static void hp_wmi_notify(u32 value, void *context) +static void hp_wmi_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; u32 event_id, event_data; - union acpi_object *obj; - acpi_status status; u32 *location; int key_code; - status = wmi_get_event_data(value, &response); - if (status != AE_OK) { - pr_info("bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; - if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_info("Unknown response received %d\n", obj->type); - kfree(obj); return; } @@ -872,10 +860,8 @@ static void hp_wmi_notify(u32 value, void *context) event_data = *(location + 2); } else { pr_info("Unknown buffer length %d\n", obj->buffer.length); - kfree(obj); return; } - kfree(obj); switch (event_id) { case HPWMI_DOCK_EVENT: diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index 09d476dd832e8b..d81fd5df4a000b 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -734,26 +734,14 @@ static void huawei_wmi_process_key(struct input_dev *idev, int code) sparse_keymap_report_entry(idev, key, 1, true); } -static void huawei_wmi_input_notify(u32 value, void *context) +static void huawei_wmi_input_notify(union acpi_object *obj, void *context) { struct input_dev *idev = (struct input_dev *)context; - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; - status = wmi_get_event_data(value, &response); - if (ACPI_FAILURE(status)) { - dev_err(&idev->dev, "Unable to get event data\n"); - return; - } - - obj = (union acpi_object *)response.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) huawei_wmi_process_key(idev, obj->integer.value); else dev_err(&idev->dev, "Bad response type\n"); - - kfree(response.pointer); } static int huawei_wmi_input_setup(struct device *dev, const char *guid) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 98ec30fce9fdd3..c64dfc56651d3d 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -17,11 +17,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include @@ -87,6 +87,34 @@ enum { SALS_FNLOCK_OFF = 0xf, }; +enum { + VPCCMD_R_VPC1 = 0x10, + VPCCMD_R_BL_MAX, + VPCCMD_R_BL, + VPCCMD_W_BL, + VPCCMD_R_WIFI, + VPCCMD_W_WIFI, + VPCCMD_R_BT, + VPCCMD_W_BT, + VPCCMD_R_BL_POWER, + VPCCMD_R_NOVO, + VPCCMD_R_VPC2, + VPCCMD_R_TOUCHPAD, + VPCCMD_W_TOUCHPAD, + VPCCMD_R_CAMERA, + VPCCMD_W_CAMERA, + VPCCMD_R_3G, + VPCCMD_W_3G, + VPCCMD_R_ODD, /* 0x21 */ + VPCCMD_W_FAN, + VPCCMD_R_RF, + VPCCMD_W_RF, + VPCCMD_W_YMC = 0x2A, + VPCCMD_R_FAN = 0x2B, + VPCCMD_R_SPECIAL_BUTTONS = 0x31, + VPCCMD_W_BL_POWER = 0x33, +}; + /* * These correspond to the number of supported states - 1 * Future keyboard types may need a new system, if there's a collision @@ -237,6 +265,7 @@ static void ideapad_shared_exit(struct ideapad_private *priv) /* * ACPI Helpers */ +#define IDEAPAD_EC_TIMEOUT 200 /* in ms */ static int eval_int(acpi_handle handle, const char *name, unsigned long *res) { @@ -252,6 +281,29 @@ static int eval_int(acpi_handle handle, const char *name, unsigned long *res) return 0; } +static int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, + unsigned long *res) +{ + struct acpi_object_list params; + unsigned long long result; + union acpi_object in_obj; + acpi_status status; + + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = arg; + + status = acpi_evaluate_integer(handle, (char *)name, ¶ms, &result); + if (ACPI_FAILURE(status)) + return -EIO; + + if (res) + *res = result; + + return 0; +} + static int exec_simple_method(acpi_handle handle, const char *name, unsigned long arg) { acpi_status status = acpi_execute_simple_method(handle, (char *)name, arg); @@ -294,6 +346,89 @@ static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res) return eval_int_with_arg(handle, "DYTC", cmd, res); } +static int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res) +{ + return eval_int_with_arg(handle, "VPCR", cmd, res); +} + +static int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data) +{ + struct acpi_object_list params; + union acpi_object in_obj[2]; + acpi_status status; + + params.count = 2; + params.pointer = in_obj; + in_obj[0].type = ACPI_TYPE_INTEGER; + in_obj[0].integer.value = cmd; + in_obj[1].type = ACPI_TYPE_INTEGER; + in_obj[1].integer.value = data; + + status = acpi_evaluate_object(handle, "VPCW", ¶ms, NULL); + if (ACPI_FAILURE(status)) + return -EIO; + + return 0; +} + +static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data) +{ + unsigned long end_jiffies, val; + int err; + + err = eval_vpcw(handle, 1, cmd); + if (err) + return err; + + end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; + + while (time_before(jiffies, end_jiffies)) { + schedule(); + + err = eval_vpcr(handle, 1, &val); + if (err) + return err; + + if (val == 0) + return eval_vpcr(handle, 0, data); + } + + acpi_handle_err(handle, "timeout in %s\n", __func__); + + return -ETIMEDOUT; +} + +static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data) +{ + unsigned long end_jiffies, val; + int err; + + err = eval_vpcw(handle, 0, data); + if (err) + return err; + + err = eval_vpcw(handle, 1, cmd); + if (err) + return err; + + end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; + + while (time_before(jiffies, end_jiffies)) { + schedule(); + + err = eval_vpcr(handle, 1, &val); + if (err) + return err; + + if (val == 0) + return 0; + } + + acpi_handle_err(handle, "timeout in %s\n", __func__); + + return -ETIMEDOUT; +} + /* * debugfs */ @@ -419,13 +554,14 @@ static ssize_t camera_power_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); - if (err) - return err; + if (err) + return err; + } return sysfs_emit(buf, "%d\n", !!result); } @@ -442,10 +578,11 @@ static ssize_t camera_power_store(struct device *dev, if (err) return err; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); - if (err) - return err; + if (err) + return err; + } return count; } @@ -493,13 +630,14 @@ static ssize_t fan_mode_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); - if (err) - return err; + if (err) + return err; + } return sysfs_emit(buf, "%lu\n", result); } @@ -519,10 +657,11 @@ static ssize_t fan_mode_store(struct device *dev, if (state > 4 || state == 3) return -EINVAL; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); - if (err) - return err; + if (err) + return err; + } return count; } @@ -602,13 +741,14 @@ static ssize_t touchpad_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); - if (err) - return err; + if (err) + return err; + } priv->r_touchpad_val = result; @@ -627,10 +767,11 @@ static ssize_t touchpad_store(struct device *dev, if (err) return err; - scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); - if (err) - return err; + if (err) + return err; + } priv->r_touchpad_val = state; @@ -1282,7 +1423,7 @@ static int ideapad_backlight_update_status(struct backlight_device *blightdev) return err; err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER, - blightdev->props.power != FB_BLANK_POWERDOWN); + blightdev->props.power != BACKLIGHT_POWER_OFF); if (err) return err; @@ -1332,7 +1473,7 @@ static int ideapad_backlight_init(struct ideapad_private *priv) priv->blightdev = blightdev; blightdev->props.brightness = now; - blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + blightdev->props.power = power ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF; backlight_update_status(blightdev); @@ -1358,7 +1499,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv) if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) return; - blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + blightdev->props.power = power ? BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF; } static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/ideapad-laptop.h index 948cc61800a950..1e52f2aa0aac38 100644 --- a/drivers/platform/x86/ideapad-laptop.h +++ b/drivers/platform/x86/ideapad-laptop.h @@ -9,9 +9,6 @@ #ifndef _IDEAPAD_LAPTOP_H_ #define _IDEAPAD_LAPTOP_H_ -#include -#include -#include #include enum ideapad_laptop_notifier_actions { @@ -22,140 +19,4 @@ int ideapad_laptop_register_notifier(struct notifier_block *nb); int ideapad_laptop_unregister_notifier(struct notifier_block *nb); void ideapad_laptop_call_notifier(unsigned long action, void *data); -enum { - VPCCMD_R_VPC1 = 0x10, - VPCCMD_R_BL_MAX, - VPCCMD_R_BL, - VPCCMD_W_BL, - VPCCMD_R_WIFI, - VPCCMD_W_WIFI, - VPCCMD_R_BT, - VPCCMD_W_BT, - VPCCMD_R_BL_POWER, - VPCCMD_R_NOVO, - VPCCMD_R_VPC2, - VPCCMD_R_TOUCHPAD, - VPCCMD_W_TOUCHPAD, - VPCCMD_R_CAMERA, - VPCCMD_W_CAMERA, - VPCCMD_R_3G, - VPCCMD_W_3G, - VPCCMD_R_ODD, /* 0x21 */ - VPCCMD_W_FAN, - VPCCMD_R_RF, - VPCCMD_W_RF, - VPCCMD_W_YMC = 0x2A, - VPCCMD_R_FAN = 0x2B, - VPCCMD_R_SPECIAL_BUTTONS = 0x31, - VPCCMD_W_BL_POWER = 0x33, -}; - -static inline int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, unsigned long *res) -{ - struct acpi_object_list params; - unsigned long long result; - union acpi_object in_obj; - acpi_status status; - - params.count = 1; - params.pointer = &in_obj; - in_obj.type = ACPI_TYPE_INTEGER; - in_obj.integer.value = arg; - - status = acpi_evaluate_integer(handle, (char *)name, ¶ms, &result); - if (ACPI_FAILURE(status)) - return -EIO; - - if (res) - *res = result; - - return 0; -} - -static inline int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res) -{ - return eval_int_with_arg(handle, "VPCR", cmd, res); -} - -static inline int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data) -{ - struct acpi_object_list params; - union acpi_object in_obj[2]; - acpi_status status; - - params.count = 2; - params.pointer = in_obj; - in_obj[0].type = ACPI_TYPE_INTEGER; - in_obj[0].integer.value = cmd; - in_obj[1].type = ACPI_TYPE_INTEGER; - in_obj[1].integer.value = data; - - status = acpi_evaluate_object(handle, "VPCW", ¶ms, NULL); - if (ACPI_FAILURE(status)) - return -EIO; - - return 0; -} - -#define IDEAPAD_EC_TIMEOUT 200 /* in ms */ - -static inline int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data) -{ - unsigned long end_jiffies, val; - int err; - - err = eval_vpcw(handle, 1, cmd); - if (err) - return err; - - end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; - - while (time_before(jiffies, end_jiffies)) { - schedule(); - - err = eval_vpcr(handle, 1, &val); - if (err) - return err; - - if (val == 0) - return eval_vpcr(handle, 0, data); - } - - acpi_handle_err(handle, "timeout in %s\n", __func__); - - return -ETIMEDOUT; -} - -static inline int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data) -{ - unsigned long end_jiffies, val; - int err; - - err = eval_vpcw(handle, 0, data); - if (err) - return err; - - err = eval_vpcw(handle, 1, cmd); - if (err) - return err; - - end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; - - while (time_before(jiffies, end_jiffies)) { - schedule(); - - err = eval_vpcr(handle, 1, &val); - if (err) - return err; - - if (val == 0) - return 0; - } - - acpi_handle_err(handle, "timeout in %s\n", __func__); - - return -ETIMEDOUT; -} - -#undef IDEAPAD_EC_TIMEOUT #endif /* !_IDEAPAD_LAPTOP_H_ */ diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 10cd65497cc162..445e7a59beb414 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "../dual_accel_detect.h" @@ -331,10 +332,8 @@ static int intel_hid_set_enable(struct device *device, bool enable) acpi_handle handle = ACPI_HANDLE(device); /* Enable|disable features - power button is always enabled */ - if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, - enable)) { - dev_warn(device, "failed to %sable hotkeys\n", - enable ? "en" : "dis"); + if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN, enable)) { + dev_warn(device, "failed to %s hotkeys\n", str_enable_disable(enable)); return -EIO; } diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 33412a584836d6..bc252b883210a5 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -32,6 +32,7 @@ bool *ifs_pkg_auth; static const struct ifs_test_caps scan_test = { .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, .test_num = IFS_TYPE_SAF, + .image_suffix = "scan", }; static const struct ifs_test_caps array_test = { @@ -39,9 +40,32 @@ static const struct ifs_test_caps array_test = { .test_num = IFS_TYPE_ARRAY_BIST, }; +static const struct ifs_test_msrs scan_msrs = { + .copy_hashes = MSR_COPY_SCAN_HASHES, + .copy_hashes_status = MSR_SCAN_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK, + .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SAF_CTRL, +}; + +static const struct ifs_test_msrs sbaf_msrs = { + .copy_hashes = MSR_COPY_SBAF_HASHES, + .copy_hashes_status = MSR_SBAF_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK, + .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SBAF_CTRL, +}; + +static const struct ifs_test_caps sbaf_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT, + .test_num = IFS_TYPE_SBAF, + .image_suffix = "sbft", +}; + static struct ifs_device ifs_devices[] = { [IFS_TYPE_SAF] = { .test_caps = &scan_test, + .test_msrs = &scan_msrs, .misc = { .name = "intel_ifs_0", .minor = MISC_DYNAMIC_MINOR, @@ -56,6 +80,15 @@ static struct ifs_device ifs_devices[] = { .groups = plat_ifs_array_groups, }, }, + [IFS_TYPE_SBAF] = { + .test_caps = &sbaf_test, + .test_msrs = &sbaf_msrs, + .misc = { + .name = "intel_ifs_2", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, + }, }; #define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 56b9f3e3cf76a0..5c3c0dfa1bf833 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -126,11 +126,40 @@ * The driver does not make use of this, it only tests one core at a time. * * .. [#f1] https://github.com/intel/TBD + * + * + * Structural Based Functional Test at Field (SBAF): + * ------------------------------------------------- + * + * SBAF is a new type of testing that provides comprehensive core test + * coverage complementing Scan at Field (SAF) testing. SBAF mimics the + * manufacturing screening environment and leverages the same test suite. + * It makes use of Design For Test (DFT) observation sites and features + * to maximize coverage in minimum time. + * + * Similar to the SAF test, SBAF isolates the core under test from the + * rest of the system during execution. Upon completion, the core + * seamlessly resets to its pre-test state and resumes normal operation. + * Any machine checks or hangs encountered during the test are confined to + * the isolated core, preventing disruption to the overall system. + * + * Like the SAF test, the SBAF test is also divided into multiple batches, + * and each batch test can take hundreds of milliseconds (100-200 ms) to + * complete. If such a lengthy interruption is undesirable, it is + * recommended to relocate the time-sensitive applications to other cores. */ #include #include #define MSR_ARRAY_BIST 0x00000105 + +#define MSR_COPY_SBAF_HASHES 0x000002b8 +#define MSR_SBAF_HASHES_STATUS 0x000002b9 +#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba +#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb +#define MSR_ACTIVATE_SBAF 0x000002bc +#define MSR_SBAF_STATUS 0x000002bd + #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 @@ -140,6 +169,7 @@ #define MSR_ARRAY_TRIGGER 0x000002d6 #define MSR_ARRAY_STATUS 0x000002d7 #define MSR_SAF_CTRL 0x000004f0 +#define MSR_SBAF_CTRL 0x000004f8 #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 @@ -147,6 +177,7 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define IFS_TYPE_SBAF 2 #define ARRAY_GEN0 0 #define ARRAY_GEN1 1 @@ -196,7 +227,8 @@ union ifs_chunks_auth_status_gen2 { u16 valid_chunks; u16 total_chunks; u32 error_code :8; - u32 rsvd2 :24; + u32 rsvd2 :8; + u32 max_bundle :16; }; }; @@ -253,6 +285,34 @@ union ifs_array { }; }; +/* MSR_ACTIVATE_SBAF bit fields */ +union ifs_sbaf { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SBAF_STATUS bit fields */ +union ifs_sbaf_status { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 error_code :8; + u32 rsvd3 :21; + u32 test_fail :1; + u32 sbaf_status :2; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. @@ -261,9 +321,28 @@ union ifs_array { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +#define IFS_SUFFIX_SZ 5 + struct ifs_test_caps { int integrity_cap_bit; int test_num; + char image_suffix[IFS_SUFFIX_SZ]; +}; + +/** + * struct ifs_test_msrs - MSRs used in IFS tests + * @copy_hashes: Copy test hash data + * @copy_hashes_status: Status of copied test hash data + * @copy_chunks: Copy chunks of the test data + * @copy_chunks_status: Status of the copied test data chunks + * @test_ctrl: Control the test attributes + */ +struct ifs_test_msrs { + u32 copy_hashes; + u32 copy_hashes_status; + u32 copy_chunks; + u32 copy_chunks_status; + u32 test_ctrl; }; /** @@ -278,6 +357,7 @@ struct ifs_test_caps { * @generation: IFS test generation enumerated by hardware * @chunk_size: size of a test chunk * @array_gen: test generation of array test + * @max_bundle: maximum bundle index */ struct ifs_data { int loaded_version; @@ -290,6 +370,7 @@ struct ifs_data { u32 generation; u32 chunk_size; u32 array_gen; + u32 max_bundle; }; struct ifs_work { @@ -299,6 +380,7 @@ struct ifs_work { struct ifs_device { const struct ifs_test_caps *test_caps; + const struct ifs_test_msrs *test_msrs; struct ifs_data rw_data; struct miscdevice misc; }; @@ -319,6 +401,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) return d->test_caps; } +static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_msrs; +} + extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 39f19cb517498b..de54bd1a5970bf 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -118,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) union ifs_scan_hashes_status hashes_status; union ifs_chunks_auth_status chunk_status; struct device *dev = local_work->dev; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; struct ifs_data *ifsd; u64 linear_addr, base; u32 err_code; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -147,8 +149,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->copy_chunks, linear_addr); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; @@ -180,6 +182,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) union ifs_scan_hashes_status_gen2 hashes_status; union ifs_chunks_auth_status_gen2 chunk_status; u32 err_code, valid_chunks, total_chunks; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; union meta_data *ifs_meta; int starting_chunk_nr; @@ -189,10 +192,11 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) int retry_count; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); if (need_copy_scan_hashes(ifsd)) { - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ chunk_size = hashes_status.chunk_size * SZ_1K; @@ -212,8 +216,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) } if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { - wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); if (chunk_status.valid_chunks != 0) { dev_err(dev, "Couldn't invalidate installed stride - %d\n", chunk_status.valid_chunks); @@ -234,9 +238,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[1] = linear_addr; do { local_irq_disable(); - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + wrmsrl(msrs->copy_chunks, (u64)chunk_table); local_irq_enable(); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); @@ -257,20 +261,22 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) return -EIO; } ifsd->valid_chunks = valid_chunks; + ifsd->max_bundle = chunk_status.max_bundle; return 0; } static int validate_ifs_metadata(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); union meta_data *ifs_meta; char test_file[64]; int ret = -EINVAL; - snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan", + snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s", boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); if (!ifs_meta) { @@ -300,6 +306,12 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->test_type != test->test_num) { + dev_warn(dev, "Metadata test_type %d mismatches with device type\n", + ifs_meta->test_type); + return ret; + } + return 0; } @@ -387,9 +399,9 @@ int ifs_load_firmware(struct device *dev) char scan_path[64]; int ret; - snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", + snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s", test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ret = request_firmware_direct(&fw, scan_path, dev); if (ret) { diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index be3d51ed0e4740..f978dd05d4d8b2 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -29,6 +29,13 @@ struct run_params { union ifs_status status; }; +struct sbaf_run_params { + struct ifs_data *ifsd; + int *retry_cnt; + union ifs_sbaf *activate; + union ifs_sbaf_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -146,6 +153,7 @@ static bool can_restart(union ifs_status status) #define SPINUNIT 100 /* 100 nsec */ static atomic_t array_cpus_in; static atomic_t scan_cpus_in; +static atomic_t sbaf_cpus_in; /* * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() @@ -387,6 +395,225 @@ static void ifs_array_test_gen1(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define SBAF_STATUS_PASS 0 +#define SBAF_STATUS_SIGN_FAIL 1 +#define SBAF_STATUS_INTR 2 +#define SBAF_STATUS_TEST_FAIL 3 + +enum sbaf_status_err_code { + IFS_SBAF_NO_ERROR = 0, + IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3, + IFS_SBAF_INVALID_BUNDLE_INDEX = 4, + IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5, + IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7, + IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9, + IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA, + IFS_SBAF_CORRUPTED_CHUNK = 0xB, + IFS_SBAF_DID_NOT_START = 0xC, +}; + +static const char * const sbaf_test_status[] = { + [IFS_SBAF_NO_ERROR] = "SBAF no error", + [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.", + [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3", + [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image", + [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently", + [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7", + [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start", + [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid", + [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk", + [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start", +}; + +static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data) +{ + union ifs_sbaf_status status = (union ifs_sbaf_status)status_data; + + if (status.error_code < ARRAY_SIZE(sbaf_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + sbaf_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all SBAF bundles executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status) +{ + /* Failed signature check is set when SBAF signature did not match the expected value */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed signature check\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* Failed to reach end of test */ + if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool sbaf_bundle_completed(union ifs_sbaf_status status) +{ + return !(status.sbaf_status || status.error_code); +} + +static bool sbaf_can_restart(union ifs_sbaf_status status) +{ + enum sbaf_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) + return false; + + switch (err_code) { + case IFS_SBAF_NO_ERROR: + case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_SBAF_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_SBAF_UNASSIGNED_ERROR_CODE3: + case IFS_SBAF_INVALID_BUNDLE_INDEX: + case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS: + case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_SBAF_UNASSIGNED_ERROR_CODE7: + case IFS_SBAF_INVALID_PROGRAM_INDEX: + case IFS_SBAF_CORRUPTED_CHUNK: + case IFS_SBAF_DID_NOT_START: + break; + } + return false; +} + +/* + * Execute the SBAF test. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int dosbaf(void *data) +{ + struct sbaf_run_params *run_params = data; + int cpu = smp_processor_id(); + union ifs_sbaf_status status; + struct ifs_data *ifsd; + int first; + + ifsd = run_params->ifsd; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested bundle. The core test happens + * during the "execution" of the WRMSR. + */ + wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrl(MSR_SBAF_STATUS, status.data); + trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); + + /* Pass back the result of the test */ + if (cpu == first) + run_params->status = status; + + return 0; +} + +static void ifs_sbaf_test_core(int cpu, struct device *dev) +{ + struct sbaf_run_params run_params; + union ifs_sbaf_status status = {}; + union ifs_sbaf activate; + unsigned long timeout; + struct ifs_data *ifsd; + int stop_bundle; + int retries; + + ifsd = ifs_get_data(dev); + + activate.data = 0; + activate.delay = IFS_THREAD_WAIT; + + timeout = jiffies + 2 * HZ; + retries = MAX_IFS_RETRIES; + activate.bundle_idx = 0; + stop_bundle = ifsd->max_bundle; + + while (activate.bundle_idx <= stop_bundle) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + atomic_set(&sbaf_cpus_in, 0); + + run_params.ifsd = ifsd; + run_params.activate = &activate; + run_params.retry_cnt = &retries; + stop_core_cpuslocked(cpu, dosbaf, &run_params); + + status = run_params.status; + + if (sbaf_bundle_completed(status)) { + activate.bundle_idx = status.bundle_idx + 1; + activate.pgm_idx = 0; + retries = MAX_IFS_RETRIES; + continue; + } + + /* Some cases can be retried, give up for others */ + if (!sbaf_can_restart(status)) + break; + + if (status.pgm_idx == activate.pgm_idx) { + /* If no progress retry */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + /* if some progress, more pgms remaining in bundle, reset retries */ + retries = MAX_IFS_RETRIES; + activate.bundle_idx = status.bundle_idx; + activate.pgm_idx = status.pgm_idx; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + ifsd->status = SCAN_TEST_FAIL; + sbaf_message_fail(dev, cpu, status); + } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR || + (activate.bundle_idx < stop_bundle)) { + ifsd->status = SCAN_NOT_TESTED; + sbaf_message_not_tested(dev, cpu, status.data); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -420,6 +647,12 @@ int do_core_test(int cpu, struct device *dev) else ifs_array_test_gen1(cpu, dev); break; + case IFS_TYPE_SBAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_sbaf_test_core(cpu, dev); + break; default: ret = -EINVAL; } diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile index 9f16cb51439734..a8aba07bf1dc2a 100644 --- a/drivers/platform/x86/intel/int3472/Makefile +++ b/drivers/platform/x86/intel/int3472/Makefile @@ -1,4 +1,7 @@ obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \ - intel_skl_int3472_tps68470.o -intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o -intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o + intel_skl_int3472_tps68470.o \ + intel_skl_int3472_common.o +intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o +intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o + +intel_skl_int3472_common-y += common.o diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c index 9db2bb0bbba4bd..b3a2578e06c192 100644 --- a/drivers/platform/x86/intel/int3472/common.c +++ b/drivers/platform/x86/intel/int3472/common.c @@ -29,6 +29,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i return obj; } +EXPORT_SYMBOL_GPL(skl_int3472_get_acpi_buffer); int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb) { @@ -52,6 +53,7 @@ int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb) kfree(obj); return ret; } +EXPORT_SYMBOL_GPL(skl_int3472_fill_cldb); /* sensor_adev_ret may be NULL, name_ret must not be NULL */ int skl_int3472_get_sensor_adev_and_name(struct device *dev, @@ -80,3 +82,8 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, return ret; } +EXPORT_SYMBOL_GPL(skl_int3472_get_sensor_adev_and_name); + +MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library"); +MODULE_AUTHOR("Daniel Scally "); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 07b302e0934073..3de463c3d13b8e 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "common.h" @@ -69,11 +70,7 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry, if (!adev) return -ENODEV; - table_entry->key = acpi_dev_name(adev); - table_entry->chip_hwnum = agpio->pin_table[0]; - table_entry->con_id = func; - table_entry->idx = 0; - table_entry->flags = polarity; + *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity); return 0; } @@ -234,7 +231,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func, agpio->resource_source.string_ptr, agpio->pin_table[0], - (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low"); + str_high_low(polarity == GPIO_ACTIVE_HIGH)); switch (type) { case INT3472_GPIO_TYPE_RESET: diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c index 217630f40c3f8b..265cef327b4fea 100644 --- a/drivers/platform/x86/intel/oaktrail.c +++ b/drivers/platform/x86/intel/oaktrail.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -250,7 +249,7 @@ static int oaktrail_backlight_init(void) oaktrail_bl_device = bd; bd->props.brightness = get_backlight_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); return 0; diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c index e7878558fd9090..9d9c07f44ff61c 100644 --- a/drivers/platform/x86/intel/pmc/adl.c +++ b/drivers/platform/x86/intel/pmc/adl.c @@ -295,6 +295,8 @@ const struct pmc_reg_map adl_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, .lpm_num_modes = ADL_LPM_NUM_MODES, .lpm_num_maps = ADL_LPM_NUM_MAPS, diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index dd72974bf71e2a..513c02670c5aa9 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -200,6 +200,8 @@ const struct pmc_reg_map cnp_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 01ae71c6df59d3..ecb47f8b4f834d 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -714,6 +715,49 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker); +static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + struct pmc *pmc; + u32 ltr_ign; + + pmc = pmcdev->pmcs[i]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); + + /* ltr_ignore_max is the max index value for LTR ignore register */ + ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign); + } + + /* + * Ignoring ME during suspend is blocking platforms with ADL PCH to get to + * deeper S0ix substate. + */ + pmc_core_send_ltr_ignore(pmcdev, 6, 0); +} + +static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + struct pmc *pmc; + + pmc = pmcdev->pmcs[i]; + if (!pmc) + continue; + + guard(mutex)(&pmcdev->lock); + pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign); + } +} + static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset, const int lpm_adj_x2) { @@ -728,12 +772,11 @@ static int pmc_core_substate_res_show(struct seq_file *s, void *unused) struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; u32 offset = pmc->map->lpm_residency_offset; - unsigned int i; int mode; seq_printf(s, "%-10s %-15s\n", "Substate", "Residency"); - pmc_for_each_mode(i, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode], adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2)); } @@ -787,20 +830,21 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index) { struct pmc_dev *pmcdev = s->private; - unsigned int i; int mode; seq_printf(s, "%30s |", "Element"); - pmc_for_each_mode(i, mode, pmcdev) + pmc_for_each_mode(mode, pmcdev) seq_printf(s, " %9s |", pmc_lpm_modes[mode]); - seq_printf(s, " %9s |\n", "Status"); + seq_printf(s, " %9s |", "Status"); + seq_printf(s, " %11s |\n", "Live Status"); } static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; u32 sts_offset; + u32 sts_offset_live; u32 *lpm_req_regs; unsigned int mp, pmc_index; int num_maps; @@ -815,6 +859,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) maps = pmc->map->lpm_sts; num_maps = pmc->map->lpm_num_maps; sts_offset = pmc->map->lpm_status_offset; + sts_offset_live = pmc->map->lpm_live_status_offset; lpm_req_regs = pmc->lpm_req_regs; /* @@ -832,20 +877,24 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) for (mp = 0; mp < num_maps; mp++) { u32 req_mask = 0; u32 lpm_status; + u32 lpm_status_live; const struct pmc_bit_map *map; - int mode, idx, i, len = 32; + int mode, i, len = 32; /* * Capture the requirements and create a mask so that we only * show an element if it's required for at least one of the * enabled low power modes */ - pmc_for_each_mode(idx, mode, pmcdev) + pmc_for_each_mode(mode, pmcdev) req_mask |= lpm_req_regs[mp + (mode * num_maps)]; /* Get the last latched status for this map */ lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); + /* Get the runtime status for this map */ + lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4)); + /* Loop over elements in this map */ map = maps[mp]; for (i = 0; map[i].name && i < len; i++) { @@ -863,7 +912,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name); /* Loop over the enabled states and display if required */ - pmc_for_each_mode(idx, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { bool required = lpm_req_regs[mp + (mode * num_maps)] & bit_mask; seq_printf(s, " %9s |", required ? "Required" : " "); @@ -872,6 +921,9 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) /* In Status column, show the last captured state of this agent */ seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " "); + /* In Live status column, show the live state of this agent */ + seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " "); + seq_puts(s, "\n"); } } @@ -925,7 +977,6 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; - unsigned int idx; bool c10; u32 reg; int mode; @@ -939,7 +990,7 @@ static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) c10 = true; } - pmc_for_each_mode(idx, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { if ((BIT(mode) & reg) && !c10) seq_printf(s, " [%s]", pmc_lpm_modes[mode]); else @@ -960,7 +1011,6 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; bool clear = false, c10 = false; unsigned char buf[8]; - unsigned int idx; int m, mode; u32 reg; @@ -979,7 +1029,7 @@ static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, mode = sysfs_match_string(pmc_lpm_modes, buf); /* Check string matches enabled mode */ - pmc_for_each_mode(idx, m, pmcdev) + pmc_for_each_mode(m, pmcdev) if (mode == m) break; @@ -1208,6 +1258,38 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) return val == 1; } +/* + * Enable or disable ACPI PM Timer + * + * This function is intended to be a callback for ACPI PM suspend/resume event. + * The ACPI PM Timer is enabled on resume only if it was enabled during suspend. + */ +static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend) +{ + struct pmc_dev *pmcdev = data; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; + bool enabled; + u32 reg; + + if (!map->acpi_pm_tmr_ctl_offset) + return; + + guard(mutex)(&pmcdev->lock); + + if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume) + return; + + reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset); + enabled = !(reg & map->acpi_pm_tmr_disable_bit); + if (suspend) + reg |= map->acpi_pm_tmr_disable_bit; + else + reg &= ~map->acpi_pm_tmr_disable_bit; + pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg); + + pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled; +} static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { @@ -1404,6 +1486,7 @@ static int pmc_core_probe(struct platform_device *pdev) struct pmc_dev *pmcdev; const struct x86_cpu_id *cpu_id; int (*core_init)(struct pmc_dev *pmcdev); + const struct pmc_reg_map *map; struct pmc *primary_pmc; int ret; @@ -1462,6 +1545,11 @@ static int pmc_core_probe(struct platform_device *pdev) pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * pmc_core_adjust_slp_s0_step(primary_pmc, 1)); + map = primary_pmc->map; + if (map->acpi_pm_tmr_ctl_offset) + acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume, + pmcdev); + device_initialized = true; dev_info(&pdev->dev, " initialized\n"); @@ -1471,6 +1559,12 @@ static int pmc_core_probe(struct platform_device *pdev) static void pmc_core_remove(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); + const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + const struct pmc_reg_map *map = pmc->map; + + if (map->acpi_pm_tmr_ctl_offset) + acpi_pmtmr_unregister_suspend_resume_callback(); + pmc_core_dbgfs_unregister(pmcdev); pmc_core_clean_structure(pdev); } @@ -1479,6 +1573,10 @@ static bool warn_on_s0ix_failures; module_param(warn_on_s0ix_failures, bool, 0644); MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); +static bool ltr_ignore_all_suspend = true; +module_param(ltr_ignore_all_suspend, bool, 0644); +MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend"); + static __maybe_unused int pmc_core_suspend(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); @@ -1488,6 +1586,9 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) if (pmcdev->suspend) pmcdev->suspend(pmcdev); + if (ltr_ignore_all_suspend) + pmc_core_ltr_ignore_all(pmcdev); + /* Check if the syspend will actually use S0ix */ if (pm_suspend_via_firmware()) return 0; @@ -1594,6 +1695,9 @@ static __maybe_unused int pmc_core_resume(struct device *dev) { struct pmc_dev *pmcdev = dev_get_drvdata(dev); + if (ltr_ignore_all_suspend) + pmc_core_ltr_restore_all(pmcdev); + if (pmcdev->resume) return pmcdev->resume(pmcdev); diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index ea04de7eb9e846..75fd593a7b0ffd 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -68,6 +68,8 @@ struct telem_endpoint; #define SPT_PMC_LTR_SCC 0x3A0 #define SPT_PMC_LTR_ISH 0x3A4 +#define SPT_PMC_ACPI_PM_TMR_CTL_OFFSET 0x18FC + /* Sunrise Point: PGD PFET Enable Ack Status Registers */ enum ppfear_regs { SPT_PMC_XRAM_PPFEAR0A = 0x590, @@ -148,6 +150,8 @@ enum ppfear_regs { #define SPT_PMC_VRIC1_SLPS0LVEN BIT(13) #define SPT_PMC_VRIC1_XTALSDQDIS BIT(22) +#define SPT_PMC_BIT_ACPI_PM_TMR_DISABLE BIT(1) + /* Cannonlake Power Management Controller register offsets */ #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4 #define CNP_PMC_PM_CFG_OFFSET 0x1818 @@ -351,6 +355,8 @@ struct pmc_reg_map { const u8 *lpm_reg_index; const u32 pson_residency_offset; const u32 pson_residency_counter_step; + const u32 acpi_pm_tmr_ctl_offset; + const u32 acpi_pm_tmr_disable_bit; }; /** @@ -372,6 +378,7 @@ struct pmc_info { * @map: pointer to pmc_reg_map struct that contains platform * specific attributes * @lpm_req_regs: List of substate requirements + * @ltr_ign: Holds LTR ignore data while suspended * * pmc contains info about one power management controller device. */ @@ -380,6 +387,7 @@ struct pmc { void __iomem *regbase; const struct pmc_reg_map *map; u32 *lpm_req_regs; + u32 ltr_ign; }; /** @@ -424,6 +432,8 @@ struct pmc_dev { u32 die_c6_offset; struct telem_endpoint *punit_ep; struct pmc_info *regmap_list; + + bool enable_acpi_pm_timer_on_resume; }; enum pmc_index { @@ -604,10 +614,12 @@ int lnl_core_init(struct pmc_dev *pmcdev); void cnl_suspend(struct pmc_dev *pmcdev); int cnl_resume(struct pmc_dev *pmcdev); -#define pmc_for_each_mode(i, mode, pmcdev) \ - for (i = 0, mode = pmcdev->lpm_en_modes[i]; \ - i < pmcdev->num_lpm_modes; \ - i++, mode = pmcdev->lpm_en_modes[i]) +#define pmc_for_each_mode(mode, pmcdev) \ + for (unsigned int __i = 0, __cond; \ + __cond = __i < (pmcdev)->num_lpm_modes, \ + __cond && ((mode) = (pmcdev)->lpm_en_modes[__i]), \ + __cond; \ + __i++) #define DEFINE_PMC_CORE_ATTR_WRITE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c index 1bde86c54eb979..c259c96b7dfdb4 100644 --- a/drivers/platform/x86/intel/pmc/core_ssram.c +++ b/drivers/platform/x86/intel/pmc/core_ssram.c @@ -9,11 +9,11 @@ */ #include +#include #include #include #include "core.h" -#include "../vsec.h" #include "../pmt/telemetry.h" #define SSRAM_HDR_SIZE 0x100 @@ -45,7 +45,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc) struct telem_endpoint *ep; const u8 *lpm_indices; int num_maps, mode_offset = 0; - int ret, mode, i; + int ret, mode; int lpm_size; u32 guid; @@ -116,7 +116,7 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc) * */ mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET; - pmc_for_each_mode(i, mode, pmcdev) { + pmc_for_each_mode(mode, pmcdev) { u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps); int m; diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c index 71b0fd6cb7d840..cbbd4405446888 100644 --- a/drivers/platform/x86/intel/pmc/icl.c +++ b/drivers/platform/x86/intel/pmc/icl.c @@ -46,6 +46,8 @@ const struct pmc_reg_map icl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index c7d15d864039d0..91f2fa728f5c88 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -462,6 +462,8 @@ const struct pmc_reg_map mtl_socm_reg_map = { .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .lpm_num_maps = ADL_LPM_NUM_MAPS, .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c index e0580de180773c..371b4e30f14263 100644 --- a/drivers/platform/x86/intel/pmc/tgl.c +++ b/drivers/platform/x86/intel/pmc/tgl.c @@ -197,6 +197,8 @@ const struct pmc_reg_map tgl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, + .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, .lpm_num_maps = TGL_LPM_NUM_MAPS, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 4b53940a64e288..c04bb7f97a4db1 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,12 +9,12 @@ */ #include +#include #include #include #include #include -#include "../vsec.h" #include "class.h" #define PMT_XA_START 1 @@ -58,6 +58,22 @@ pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) return count; } +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, u32 count) +{ + if (cb && cb->read_telem) + return cb->read_telem(pdev, guid, buf, count); + + if (guid == GUID_SPR_PUNIT) + /* PUNIT on SPR only supports aligned 64-bit read */ + return pmt_memcpy64_fromio(buf, addr, count); + + memcpy_fromio(buf, addr, count); + + return count; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, INTEL_PMT); + /* * sysfs */ @@ -79,11 +95,8 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off; - if (entry->guid == GUID_SPR_PUNIT) - /* PUNIT on SPR only supports aligned 64-bit read */ - count = pmt_memcpy64_fromio(buf, entry->base + off, count); - else - memcpy_fromio(buf, entry->base + off, count); + count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf, + entry->base + off, count); return count; } @@ -239,6 +252,7 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, entry->guid = header->guid; entry->size = header->size; + entry->cb = ivdev->priv_data; return 0; } @@ -300,7 +314,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, goto fail_ioremap; if (ns->pmt_add_endpoint) { - ret = ns->pmt_add_endpoint(entry, ivdev->pcidev); + ret = ns->pmt_add_endpoint(ivdev, entry); if (ret) goto fail_add_endpoint; } diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index d23c63b73ab7d0..a267ac96442301 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -2,13 +2,13 @@ #ifndef _INTEL_PMT_CLASS_H #define _INTEL_PMT_CLASS_H +#include #include #include #include #include #include -#include "../vsec.h" #include "telemetry.h" /* PMT access types */ @@ -24,6 +24,7 @@ struct pci_dev; struct telem_endpoint { struct pci_dev *pcidev; struct telem_header header; + struct pmt_callbacks *cb; void __iomem *base; bool present; struct kref kref; @@ -43,6 +44,7 @@ struct intel_pmt_entry { struct kobject *kobj; void __iomem *disc_table; void __iomem *base; + struct pmt_callbacks *cb; unsigned long base_addr; size_t size; u32 guid; @@ -55,10 +57,12 @@ struct intel_pmt_namespace { const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, struct device *dev); - int (*pmt_add_endpoint)(struct intel_pmt_entry *entry, - struct pci_dev *pdev); + int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry); }; +int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf, + void __iomem *addr, u32 count); bool intel_pmt_is_early_client_hw(struct device *dev); int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 4014c02cafdb53..9079d5dffc031a 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include -#include "../vsec.h" #include "class.h" /* Crashlog discovery header types */ diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 09258564dfc4df..c9feac859e574c 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include -#include "../vsec.h" #include "class.h" #define TELEM_SIZE_OFFSET 0x0 @@ -93,8 +93,8 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, return 0; } -static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, - struct pci_dev *pdev) +static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev, + struct intel_pmt_entry *entry) { struct telem_endpoint *ep; @@ -104,13 +104,14 @@ static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, return -ENOMEM; ep = entry->ep; - ep->pcidev = pdev; + ep->pcidev = ivdev->pcidev; ep->header.access_type = entry->header.access_type; ep->header.guid = entry->header.guid; ep->header.base_offset = entry->header.base_offset; ep->header.size = entry->header.size; ep->base = entry->base; ep->present = true; + ep->cb = ivdev->priv_data; kref_init(&ep->kref); @@ -218,7 +219,8 @@ int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) if (offset + NUM_BYTES_QWORD(count) > size) return -EINVAL; - memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count)); + pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base + offset, + NUM_BYTES_QWORD(count)); return ep->present ? 0 : -EPIPE; } diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c index 277e4f4b20acfb..9d137621f0e6e7 100644 --- a/drivers/platform/x86/intel/sdsi.c +++ b/drivers/platform/x86/intel/sdsi.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -22,8 +23,6 @@ #include #include -#include "vsec.h" - #define ACCESS_TYPE_BARID 2 #define ACCESS_TYPE_LOCAL 3 diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 10e21563fa46ff..1e46e30dae9669 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -316,7 +316,9 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) return NULL; - pkg_id = topology_physical_package_id(cpu); + pkg_id = topology_logical_package_id(cpu); + if (pkg_id >= topology_max_packages()) + return NULL; bus_number = isst_cpu_info[cpu].bus_info[bus_no]; if (bus_number < 0) @@ -651,10 +653,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, /* Lock to prevent module registration when already opened by user space */ static DEFINE_MUTEX(punit_misc_dev_open_lock); -/* Lock to allow one shared misc device for all ISST interfaces */ -static DEFINE_MUTEX(punit_misc_dev_reg_lock); -static int misc_usage_count; -static int misc_device_ret; static int misc_device_open; static int isst_if_open(struct inode *inode, struct file *file) @@ -720,39 +718,23 @@ static struct miscdevice isst_if_char_driver = { static int isst_misc_reg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_device_ret) - goto unlock_exit; - - if (!misc_usage_count) { - misc_device_ret = isst_if_cpu_info_init(); - if (misc_device_ret) - goto unlock_exit; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) { - isst_if_cpu_info_exit(); - goto unlock_exit; - } - } - misc_usage_count++; + int ret; + + ret = isst_if_cpu_info_init(); + if (ret) + return ret; -unlock_exit: - mutex_unlock(&punit_misc_dev_reg_lock); + ret = misc_register(&isst_if_char_driver); + if (ret) + isst_if_cpu_info_exit(); - return misc_device_ret; + return ret; } static void isst_misc_unreg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_usage_count) - misc_usage_count--; - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_reg_lock); + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); } /** @@ -827,6 +809,7 @@ static const struct x86_cpu_id isst_cpu_ids[] = { X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED), X86_MATCH_VFM(INTEL_ICELAKE_D, 0), X86_MATCH_VFM(INTEL_ICELAKE_X, 0), + X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0), X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED), {} diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 83e8b1fe53b355..486ddc9b359248 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -59,8 +60,6 @@ #include #include -#include "vsec.h" - /** * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry * @tpmi_id: TPMI feature identifier (what the feature is and its data format). diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c index 4eb02553957cfc..0609a8320f7ec1 100644 --- a/drivers/platform/x86/intel/tpmi_power_domains.c +++ b/drivers/platform/x86/intel/tpmi_power_domains.c @@ -82,6 +82,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = { X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL), X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL), X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL), + X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 4e880585cbe4d0..e22b683a7a434c 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -60,11 +60,16 @@ static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, enum uncore_index index) { - unsigned int input; + unsigned int input = 0; int ret; - if (kstrtouint(buf, 10, &input)) - return -EINVAL; + if (index == UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE) { + if (kstrtobool(buf, (bool *)&input)) + return -EINVAL; + } else { + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + } mutex_lock(&uncore_lock); ret = uncore_write(data, input, index); @@ -103,6 +108,18 @@ show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); +store_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +store_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +store_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +store_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + +show_uncore_attr(elc_low_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); +show_uncore_attr(elc_high_threshold_percent, UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD); +show_uncore_attr(elc_high_threshold_enable, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE); +show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ); + #define show_uncore_data(member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ @@ -146,7 +163,8 @@ show_uncore_data(initial_max_freq_khz); static int create_attr_group(struct uncore_data *data, char *name) { - int ret, freq, index = 0; + int ret, index = 0; + unsigned int val; init_attribute_rw(max_freq_khz); init_attribute_rw(min_freq_khz); @@ -168,10 +186,24 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; - ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); + ret = uncore_read(data, &val, UNCORE_INDEX_CURRENT_FREQ); if (!ret) data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; + ret = uncore_read(data, &val, UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD); + if (!ret) { + init_attribute_rw(elc_low_threshold_percent); + init_attribute_rw(elc_high_threshold_percent); + init_attribute_rw(elc_high_threshold_enable); + init_attribute_rw(elc_floor_freq_khz); + + data->uncore_attrs[index++] = &data->elc_low_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_high_threshold_percent_kobj_attr.attr; + data->uncore_attrs[index++] = + &data->elc_high_threshold_enable_kobj_attr.attr; + data->uncore_attrs[index++] = &data->elc_floor_freq_khz_kobj_attr.attr; + } + data->uncore_attrs[index] = NULL; data->uncore_attr_group.name = name; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 4c245b945e4ea1..26c854cd5d97fd 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -34,6 +34,13 @@ * @domain_id_kobj_attr: Storage for kobject attribute domain_id * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id * @package_id_kobj_attr: Storage for kobject attribute package_id + * @elc_low_threshold_percent_kobj_attr: + Storage for kobject attribute elc_low_threshold_percent + * @elc_high_threshold_percent_kobj_attr: + Storage for kobject attribute elc_high_threshold_percent + * @elc_high_threshold_enable_kobj_attr: + Storage for kobject attribute elc_high_threshold_enable + * @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz * @uncore_attrs: Attribute storage for group creation * * This structure is used to encapsulate all data related to uncore sysfs @@ -61,7 +68,11 @@ struct uncore_data { struct kobj_attribute domain_id_kobj_attr; struct kobj_attribute fabric_cluster_id_kobj_attr; struct kobj_attribute package_id_kobj_attr; - struct attribute *uncore_attrs[9]; + struct kobj_attribute elc_low_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_percent_kobj_attr; + struct kobj_attribute elc_high_threshold_enable_kobj_attr; + struct kobj_attribute elc_floor_freq_khz_kobj_attr; + struct attribute *uncore_attrs[13]; }; #define UNCORE_DOMAIN_ID_INVALID -1 @@ -70,6 +81,10 @@ enum uncore_index { UNCORE_INDEX_MIN_FREQ, UNCORE_INDEX_MAX_FREQ, UNCORE_INDEX_CURRENT_FREQ, + UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD, + UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, + UNCORE_INDEX_EFF_LAT_CTRL_FREQ, }; int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 9fa3037c03d159..0591053813a283 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -30,6 +30,7 @@ #define UNCORE_MAJOR_VERSION 0 #define UNCORE_MINOR_VERSION 2 +#define UNCORE_ELC_SUPPORTED_VERSION 2 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -46,6 +47,7 @@ struct tpmi_uncore_struct; /* Information for each cluster */ struct tpmi_uncore_cluster_info { bool root_domain; + bool elc_supported; u8 __iomem *cluster_base; struct uncore_data uncore_data; struct tpmi_uncore_struct *uncore_root; @@ -75,6 +77,10 @@ struct tpmi_uncore_struct { /* Bit definitions for CONTROL register */ #define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) #define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) +#define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22) +#define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39) +#define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40) /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, @@ -89,6 +95,48 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } +/* Helper function to read efficiency latency control values over MMIO */ +static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 ctrl; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl); + *val *= 100; + *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK)); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl); + break; + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) /* Helper for sysfs read for max/min frequencies. Called under mutex locks */ @@ -137,6 +185,82 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu return 0; } +/* Helper function for writing efficiency latency control values over MMIO */ +static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) +{ + struct tpmi_uncore_cluster_info *cluster_info; + u64 control; + + cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + + if (cluster_info->root_domain) + return -ENODATA; + + if (!cluster_info->elc_supported) + return -EOPNOTSUPP; + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + if (val > 100) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + if (val > 1) + return -EINVAL; + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + val /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK)) + return -EINVAL; + break; + + default: + return -EOPNOTSUPP; + } + + control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK); + val /= 100; + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val); + break; + + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK; + control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val); + break; + + default: + break; + } + + writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX); + + return 0; +} + /* Helper function to write MMIO offset for max/min control frequency */ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, unsigned int index) @@ -156,7 +280,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); } -/* Callback for sysfs write for max/min frequencies. Called under mutex locks */ +/* Helper for sysfs write for max/min frequencies. Called under mutex locks */ static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, enum uncore_index index) { @@ -234,6 +358,33 @@ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncor case UNCORE_INDEX_CURRENT_FREQ: return uncore_read_freq(data, value); + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return read_eff_lat_ctrl(data, value, index); + + default: + break; + } + + return -EOPNOTSUPP; +} + +/* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */ +static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD: + case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE: + case UNCORE_INDEX_EFF_LAT_CTRL_FREQ: + return write_eff_lat_ctrl(data, value, index); + + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_write_control_freq(data, value, index); + default: break; } @@ -291,7 +442,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ return -EINVAL; /* Register callbacks to uncore core */ - ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write); if (ret) return ret; @@ -409,6 +560,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ cluster_info->uncore_root = tpmi_uncore; + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION) + cluster_info->elc_supported = true; + ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0); if (ret) { cluster_info->cluster_base = NULL; @@ -427,6 +581,9 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ auxiliary_set_drvdata(auxdev, tpmi_uncore); + if (topology_max_dies_per_package() > 1) + return 0; + tpmi_uncore->root_cluster.root_domain = true; tpmi_uncore->root_cluster.uncore_root = tpmi_uncore; @@ -450,7 +607,9 @@ static void uncore_remove(struct auxiliary_device *auxdev) { struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev); - uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + if (tpmi_uncore->root_cluster.root_domain) + uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data); + remove_cluster_entries(tpmi_uncore); uncore_freq_common_exit(); diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 0fdfaf3a4f5cde..7b5cc9993974ef 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -17,14 +17,13 @@ #include #include #include -#include #include +#include +#include #include #include #include -#include "vsec.h" - #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -213,6 +212,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_dev->num_resources = header->num_entries; intel_vsec_dev->quirks = info->quirks; intel_vsec_dev->base_addr = info->base_addr; + intel_vsec_dev->priv_data = info->priv_data; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; @@ -341,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, void intel_vsec_register(struct pci_dev *pdev, struct intel_vsec_platform_info *info) { - if (!pdev || !info) + if (!pdev || !info || !info->headers) return; intel_vsec_walk_header(pdev, info); diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h deleted file mode 100644 index e23e76129691a5..00000000000000 --- a/drivers/platform/x86/intel/vsec.h +++ /dev/null @@ -1,108 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _VSEC_H -#define _VSEC_H - -#include -#include - -#define VSEC_CAP_TELEMETRY BIT(0) -#define VSEC_CAP_WATCHER BIT(1) -#define VSEC_CAP_CRASHLOG BIT(2) -#define VSEC_CAP_SDSI BIT(3) -#define VSEC_CAP_TPMI BIT(4) - -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 - -struct pci_dev; -struct resource; - -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, - VSEC_ID_TPMI = 66, -}; - -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; -}; - -enum intel_vsec_quirks { - /* Watcher feature not supported */ - VSEC_QUIRK_NO_WATCHER = BIT(0), - - /* Crashlog feature not supported */ - VSEC_QUIRK_NO_CRASHLOG = BIT(1), - - /* Use shift instead of mask to read discovery table offset */ - VSEC_QUIRK_TABLE_SHIFT = BIT(2), - - /* DVSEC not present (provided in driver data) */ - VSEC_QUIRK_NO_DVSEC = BIT(3), - - /* Platforms requiring quirk in the auxiliary driver */ - VSEC_QUIRK_EARLY_HW = BIT(4), -}; - -/* Platform specific data */ -struct intel_vsec_platform_info { - struct device *parent; - struct intel_vsec_header **headers; - unsigned long caps; - unsigned long quirks; - u64 base_addr; -}; - -struct intel_vsec_device { - struct auxiliary_device auxdev; - struct pci_dev *pcidev; - struct resource *resource; - struct ida *ida; - int num_resources; - int id; /* xa */ - void *priv_data; - size_t priv_data_size; - unsigned long quirks; - u64 base_addr; -}; - -int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, - struct intel_vsec_device *intel_vsec_dev, - const char *name); - -static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev) -{ - return container_of(dev, struct intel_vsec_device, auxdev.dev); -} - -static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev) -{ - return container_of(auxdev, struct intel_vsec_device, auxdev); -} - -void intel_vsec_register(struct pci_dev *pdev, - struct intel_vsec_platform_info *info); -#endif diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index a68df413340351..5b16d29c93d7d0 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -23,7 +23,7 @@ #include #include -#include +#include /* IPC defines the following message types */ #define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */ diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c index 7d87cbd4b9c63c..69b36ce41fa210 100644 --- a/drivers/platform/x86/intel_scu_ipcutil.c +++ b/drivers/platform/x86/intel_scu_ipcutil.c @@ -18,7 +18,7 @@ #include #include -#include +#include static int major; diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c index dbf0310448da9a..d7f72d6deb44b4 100644 --- a/drivers/platform/x86/intel_scu_pcidrv.c +++ b/drivers/platform/x86/intel_scu_pcidrv.c @@ -11,7 +11,7 @@ #include #include -#include +#include static int intel_scu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/platform/x86/intel_scu_pltdrv.c b/drivers/platform/x86/intel_scu_pltdrv.c index 56ec6ae4c824a2..0892362acd7b91 100644 --- a/drivers/platform/x86/intel_scu_pltdrv.c +++ b/drivers/platform/x86/intel_scu_pltdrv.c @@ -15,7 +15,7 @@ #include #include -#include +#include static int intel_scu_platform_probe(struct platform_device *pdev) { diff --git a/drivers/platform/x86/intel_scu_wdt.c b/drivers/platform/x86/intel_scu_wdt.c index d0b6637861d301..746d47d334061a 100644 --- a/drivers/platform/x86/intel_scu_wdt.c +++ b/drivers/platform/x86/intel_scu_wdt.c @@ -9,13 +9,14 @@ #include #include #include -#include #include #include #include #include +#include + #define TANGIER_EXT_TIMER0_MSI 12 static struct platform_device wdt_dev = { diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c index e0bbd6a14a89cb..bd9f95404c7cb0 100644 --- a/drivers/platform/x86/lenovo-ymc.c +++ b/drivers/platform/x86/lenovo-ymc.c @@ -43,6 +43,8 @@ struct lenovo_ymc_private { }; static const struct key_entry lenovo_ymc_keymap[] = { + /* Ignore the uninitialized state */ + { KE_IGNORE, 0x00 }, /* Laptop */ { KE_SW, 0x01, { .sw = { SW_TABLET_MODE, 0 } } }, /* Tablet */ diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index 9c7857842caf4c..4b57102c7f6270 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -8,6 +8,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include +#include +#include #include #include #include @@ -31,6 +34,26 @@ MODULE_AUTHOR("Matan Ziv-Av"); MODULE_DESCRIPTION("LG WMI Hotkey Driver"); MODULE_LICENSE("GPL"); +static bool fw_debug; +module_param(fw_debug, bool, 0); +MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages"); + +#define LG_ADDRESS_SPACE_ID 0x8F + +#define LG_ADDRESS_SPACE_DEBUG_FLAG_ADR 0x00 +#define LG_ADDRESS_SPACE_FAN_MODE_ADR 0x03 + +#define LG_ADDRESS_SPACE_DTTM_FLAG_ADR 0x20 +#define LG_ADDRESS_SPACE_CPU_TEMP_ADR 0x21 +#define LG_ADDRESS_SPACE_CPU_TRIP_LOW_ADR 0x22 +#define LG_ADDRESS_SPACE_CPU_TRIP_HIGH_ADR 0x23 +#define LG_ADDRESS_SPACE_MB_TEMP_ADR 0x24 +#define LG_ADDRESS_SPACE_MB_TRIP_LOW_ADR 0x25 +#define LG_ADDRESS_SPACE_MB_TRIP_HIGH_ADR 0x26 + +#define LG_ADDRESS_SPACE_DEBUG_MSG_START_ADR 0x3E8 +#define LG_ADDRESS_SPACE_DEBUG_MSG_END_ADR 0x5E8 + #define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248" #define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2" #define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6" @@ -182,21 +205,11 @@ static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u return (union acpi_object *)buffer.pointer; } -static void wmi_notify(u32 value, void *context) +static void wmi_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; long data = (long)context; pr_debug("event guid %li\n", data); - status = wmi_get_event_data(value, &response); - if (ACPI_FAILURE(status)) { - pr_err("Bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; if (!obj) return; @@ -218,7 +231,6 @@ static void wmi_notify(u32 value, void *context) pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type, obj->integer.value); - kfree(response.pointer); } static void wmi_input_setup(void) @@ -646,6 +658,107 @@ static struct platform_driver pf_driver = { } }; +static acpi_status lg_laptop_address_space_write(struct device *dev, acpi_physical_address address, + size_t size, u64 value) +{ + u8 byte; + + /* Ignore any debug messages */ + if (address >= LG_ADDRESS_SPACE_DEBUG_MSG_START_ADR && + address <= LG_ADDRESS_SPACE_DEBUG_MSG_END_ADR) + return AE_OK; + + if (size != sizeof(byte)) + return AE_BAD_PARAMETER; + + byte = value & 0xFF; + + switch (address) { + case LG_ADDRESS_SPACE_FAN_MODE_ADR: + /* + * The fan mode field is not affected by the DTTM flag, so we + * have to manually check fw_debug. + */ + if (fw_debug) + dev_dbg(dev, "Fan mode set to mode %u\n", byte); + + return AE_OK; + case LG_ADDRESS_SPACE_CPU_TEMP_ADR: + dev_dbg(dev, "CPU temperature is %u °C\n", byte); + return AE_OK; + case LG_ADDRESS_SPACE_CPU_TRIP_LOW_ADR: + dev_dbg(dev, "CPU lower trip point set to %u °C\n", byte); + return AE_OK; + case LG_ADDRESS_SPACE_CPU_TRIP_HIGH_ADR: + dev_dbg(dev, "CPU higher trip point set to %u °C\n", byte); + return AE_OK; + case LG_ADDRESS_SPACE_MB_TEMP_ADR: + dev_dbg(dev, "Motherboard temperature is %u °C\n", byte); + return AE_OK; + case LG_ADDRESS_SPACE_MB_TRIP_LOW_ADR: + dev_dbg(dev, "Motherboard lower trip point set to %u °C\n", byte); + return AE_OK; + case LG_ADDRESS_SPACE_MB_TRIP_HIGH_ADR: + dev_dbg(dev, "Motherboard higher trip point set to %u °C\n", byte); + return AE_OK; + default: + dev_notice_ratelimited(dev, "Ignoring write to unknown opregion address %llu\n", + address); + return AE_OK; + } +} + +static acpi_status lg_laptop_address_space_read(struct device *dev, acpi_physical_address address, + size_t size, u64 *value) +{ + if (size != 1) + return AE_BAD_PARAMETER; + + switch (address) { + case LG_ADDRESS_SPACE_DEBUG_FLAG_ADR: + /* Debug messages are already printed using the standard ACPI Debug object */ + *value = 0x00; + return AE_OK; + case LG_ADDRESS_SPACE_DTTM_FLAG_ADR: + *value = fw_debug; + return AE_OK; + default: + dev_notice_ratelimited(dev, "Attempt to read unknown opregion address %llu\n", + address); + return AE_BAD_PARAMETER; + } +} + +static acpi_status lg_laptop_address_space_handler(u32 function, acpi_physical_address address, + u32 bits, u64 *value, void *handler_context, + void *region_context) +{ + struct device *dev = handler_context; + size_t size; + + if (bits % BITS_PER_BYTE) + return AE_BAD_PARAMETER; + + size = bits / BITS_PER_BYTE; + + switch (function) { + case ACPI_READ: + return lg_laptop_address_space_read(dev, address, size, value); + case ACPI_WRITE: + return lg_laptop_address_space_write(dev, address, size, *value); + default: + return AE_BAD_PARAMETER; + } +} + +static void lg_laptop_remove_address_space_handler(void *data) +{ + struct acpi_device *device = data; + + acpi_remove_address_space_handler(device->handle, LG_ADDRESS_SPACE_ID, + &lg_laptop_address_space_handler); +} + static int acpi_add(struct acpi_device *device) { struct platform_device_info pdev_info = { @@ -653,6 +766,7 @@ static int acpi_add(struct acpi_device *device) .name = PLATFORM_NAME, .id = PLATFORM_DEVID_NONE, }; + acpi_status status; int ret; const char *product; int year = 2017; @@ -660,6 +774,17 @@ static int acpi_add(struct acpi_device *device) if (pf_device) return 0; + status = acpi_install_address_space_handler(device->handle, LG_ADDRESS_SPACE_ID, + &lg_laptop_address_space_handler, + NULL, &device->dev); + if (ACPI_FAILURE(status)) + return -ENODEV; + + ret = devm_add_action_or_reset(&device->dev, lg_laptop_remove_address_space_handler, + device); + if (ret < 0) + return ret; + ret = platform_driver_register(&pf_driver); if (ret) return ret; diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c index 436fb91a47db7f..9b5c7f8c79b0dd 100644 --- a/drivers/platform/x86/msi-wmi-platform.c +++ b/drivers/platform/x86/msi-wmi-platform.c @@ -22,7 +22,7 @@ #include #include -#include +#include #define DRIVER_NAME "msi-wmi-platform" diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index fd318cdfe313e8..4a7ac85c4db48f 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -170,20 +170,9 @@ static const struct backlight_ops msi_backlight_ops = { .update_status = bl_set_status, }; -static void msi_wmi_notify(u32 value, void *context) +static void msi_wmi_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; struct key_entry *key; - union acpi_object *obj; - acpi_status status; - - status = wmi_get_event_data(value, &response); - if (status != AE_OK) { - pr_info("bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) { int eventcode = obj->integer.value; @@ -192,7 +181,7 @@ static void msi_wmi_notify(u32 value, void *context) eventcode); if (!key) { pr_info("Unknown key pressed - %x\n", eventcode); - goto msi_wmi_notify_exit; + return; } if (event_wmi->quirk_last_pressed) { @@ -204,7 +193,7 @@ static void msi_wmi_notify(u32 value, void *context) pr_debug("Suppressed key event 0x%X - " "Last press was %lld us ago\n", key->code, ktime_to_us(diff)); - goto msi_wmi_notify_exit; + return; } last_pressed = cur; } @@ -221,9 +210,6 @@ static void msi_wmi_notify(u32 value, void *context) } } else pr_info("Unknown event received\n"); - -msi_wmi_notify_exit: - kfree(response.pointer); } static int __init msi_wmi_backlight_setup(void) diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index ebd81846e2d564..2bf94d0ab32432 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -121,6 +121,7 @@ #include #include +#include #include #include #include @@ -224,6 +225,17 @@ static const struct key_entry panasonic_keymap[] = { { KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */ { KE_KEY, 9, { KEY_BATTERY } }, { KE_KEY, 10, { KEY_SUSPEND } }, + { KE_KEY, 21, { KEY_MACRO1 } }, + { KE_KEY, 22, { KEY_MACRO2 } }, + { KE_KEY, 24, { KEY_MACRO3 } }, + { KE_KEY, 25, { KEY_MACRO4 } }, + { KE_KEY, 34, { KEY_MACRO5 } }, + { KE_KEY, 35, { KEY_MACRO6 } }, + { KE_KEY, 36, { KEY_MACRO7 } }, + { KE_KEY, 37, { KEY_MACRO8 } }, + { KE_KEY, 41, { KEY_MACRO9 } }, + { KE_KEY, 42, { KEY_MACRO10 } }, + { KE_KEY, 43, { KEY_MACRO11 } }, { KE_END, 0 } }; @@ -830,8 +842,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) return; } - key = result & 0xf; - updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */ + key = result & GENMASK(6, 0); + updown = result & BIT(7); /* 0x80 == key down; 0x00 = key up */ /* hack: some firmware sends no key down for sleep / hibernate */ if (key == 7 || key == 10) { diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c index df496c7e717142..8d540a1c86024e 100644 --- a/drivers/platform/x86/quickstart.c +++ b/drivers/platform/x86/quickstart.c @@ -26,7 +26,7 @@ #include #include -#include +#include #define DRIVER_NAME "quickstart" diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 3d2f8e758369d8..0d3e3ca20b1bfe 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -554,7 +553,7 @@ static int update_status(struct backlight_device *bd) set_brightness(samsung, bd->props.brightness); - if (bd->props.power == FB_BLANK_UNBLANK) + if (bd->props.power == BACKLIGHT_POWER_ON) sabi_set_commandb(samsung, commands->set_backlight, 1); else sabi_set_commandb(samsung, commands->set_backlight, 0); @@ -1189,7 +1188,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung) samsung->backlight_device = bd; samsung->backlight_device->props.brightness = read_brightness(samsung); - samsung->backlight_device->props.power = FB_BLANK_UNBLANK; + samsung->backlight_device->props.power = BACKLIGHT_POWER_ON; backlight_update_status(samsung->backlight_device); return 0; diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c index 3be016cfe6015a..7c04cc9e5891be 100644 --- a/drivers/platform/x86/serial-multi-instantiate.c +++ b/drivers/platform/x86/serial-multi-instantiate.c @@ -83,11 +83,15 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev, static void smi_devs_unregister(struct smi *smi) { +#if IS_REACHABLE(CONFIG_I2C) while (smi->i2c_num--) i2c_unregister_device(smi->i2c_devs[smi->i2c_num]); +#endif - while (smi->spi_num--) - spi_unregister_device(smi->spi_devs[smi->spi_num]); + if (IS_REACHABLE(CONFIG_SPI)) { + while (smi->spi_num--) + spi_unregister_device(smi->spi_devs[smi->spi_num]); + } } /** @@ -258,9 +262,15 @@ static int smi_probe(struct platform_device *pdev) switch (node->bus_type) { case SMI_I2C: - return smi_i2c_probe(pdev, smi, node->instances); + if (IS_REACHABLE(CONFIG_I2C)) + return smi_i2c_probe(pdev, smi, node->instances); + + return -ENODEV; case SMI_SPI: - return smi_spi_probe(pdev, smi, node->instances); + if (IS_REACHABLE(CONFIG_SPI)) + return smi_spi_probe(pdev, smi, node->instances); + + return -ENODEV; case SMI_AUTO_DETECT: /* * For backwards-compatibility with the existing nodes I2C @@ -270,10 +280,16 @@ static int smi_probe(struct platform_device *pdev) * SpiSerialBus nodes that were previously ignored, and this * preserves that behavior. */ - ret = smi_i2c_probe(pdev, smi, node->instances); - if (ret != -ENOENT) - return ret; - return smi_spi_probe(pdev, smi, node->instances); + if (IS_REACHABLE(CONFIG_I2C)) { + ret = smi_i2c_probe(pdev, smi, node->instances); + if (ret != -ENOENT) + return ret; + } + + if (IS_REACHABLE(CONFIG_SPI)) + return smi_spi_probe(pdev, smi, node->instances); + + return -ENODEV; default: return -EINVAL; } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index f269ca1ff7718d..4c1b0553f87209 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -7749,6 +7749,28 @@ static struct ibm_struct volume_driver_data = { * EC 0x2f (HFSP) might be available *for reading*, but do not use * it for writing. * + * TPACPI_FAN_RD_ACPI_FANG: + * ACPI FANG method: returns fan control register + * + * Takes one parameter which is 0x8100 plus the offset to EC memory + * address 0xf500 and returns the byte at this address. + * + * 0xf500: + * When the value is less than 9 automatic mode is enabled + * 0xf502: + * Contains the current fan speed from 0-100% + * 0xf506: + * Bit 7 has to be set in order to enable manual control by + * writing a value >= 9 to 0xf500 + * + * TPACPI_FAN_WR_ACPI_FANW: + * ACPI FANW method: sets fan control registers + * + * Takes 0x8100 plus the offset to EC memory address 0xf500 and the + * value to be written there as parameters. + * + * see TPACPI_FAN_RD_ACPI_FANG + * * TPACPI_FAN_WR_TPEC: * ThinkPad EC register 0x2f (HFSP): fan control loop mode * Supported on almost all ThinkPads @@ -7882,6 +7904,7 @@ enum { /* Fan control constants */ enum fan_status_access_mode { TPACPI_FAN_NONE = 0, /* No fan status or control */ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */ + TPACPI_FAN_RD_ACPI_FANG, /* Use ACPI FANG */ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */ TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */ }; @@ -7889,6 +7912,7 @@ enum fan_status_access_mode { enum fan_control_access_mode { TPACPI_FAN_WR_NONE = 0, /* No fan control */ TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */ + TPACPI_FAN_WR_ACPI_FANW, /* Use ACPI FANW */ TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */ TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */ }; @@ -7922,9 +7946,13 @@ TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */ TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */ "\\FSPD", /* 600e/x, 770e, 770x */ ); /* all others */ +TPACPI_HANDLE(fang, ec, "FANG", /* E531 */ + ); /* all others */ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */ "JFNS", /* 770x-JL */ ); /* all others */ +TPACPI_HANDLE(fanw, ec, "FANW", /* E531 */ + ); /* all others */ /* * Unitialized HFSP quirk: ACPI DSDT and EC fail to initialize the @@ -8031,6 +8059,23 @@ static int fan_get_status(u8 *status) break; } + case TPACPI_FAN_RD_ACPI_FANG: { + /* E531 */ + int mode, speed; + + if (unlikely(!acpi_evalf(fang_handle, &mode, NULL, "dd", 0x8100))) + return -EIO; + if (unlikely(!acpi_evalf(fang_handle, &speed, NULL, "dd", 0x8102))) + return -EIO; + + if (likely(status)) { + *status = speed * 7 / 100; + if (mode < 9) + *status |= TP_EC_FAN_AUTO; + } + + break; + } case TPACPI_FAN_RD_TPEC: /* all except 570, 600e/x, 770e, 770x */ if (unlikely(!acpi_ec_read(fan_status_offset, &s))) @@ -8145,6 +8190,17 @@ static int fan2_get_speed(unsigned int *speed) if (speed) *speed = lo ? FAN_RPM_CAL_CONST / lo : 0; break; + case TPACPI_FAN_RD_ACPI_FANG: { + /* E531 */ + int speed_tmp; + + if (unlikely(!acpi_evalf(fang_handle, &speed_tmp, NULL, "dd", 0x8102))) + return -EIO; + + if (likely(speed)) + *speed = speed_tmp * 65535 / 100; + break; + } default: return -ENXIO; @@ -8204,6 +8260,32 @@ static int fan_set_level(int level) tp_features.fan_ctrl_status_undef = 0; break; + case TPACPI_FAN_WR_ACPI_FANW: + if (!(level & TP_EC_FAN_AUTO) && (level < 0 || level > 7)) + return -EINVAL; + if (level & TP_EC_FAN_FULLSPEED) + return -EINVAL; + + if (level & TP_EC_FAN_AUTO) { + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x05)) { + return -EIO; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0x00)) { + return -EIO; + } + } else { + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) { + return -EIO; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) { + return -EIO; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8102, level * 100 / 7)) { + return -EIO; + } + } + break; + default: return -ENXIO; } @@ -8236,7 +8318,7 @@ static int fan_set_level_safe(int level) static int fan_set_enable(void) { - u8 s; + u8 s = 0; int rc; if (!fan_control_allowed) @@ -8282,6 +8364,19 @@ static int fan_set_enable(void) rc = 0; break; + case TPACPI_FAN_WR_ACPI_FANW: + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x05)) { + rc = -EIO; + break; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0x00)) { + rc = -EIO; + break; + } + + rc = 0; + break; + default: rc = -ENXIO; } @@ -8324,6 +8419,22 @@ static int fan_set_disable(void) fan_control_desired_level = 0; break; + case TPACPI_FAN_WR_ACPI_FANW: + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) { + rc = -EIO; + break; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) { + rc = -EIO; + break; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8102, 0x00)) { + rc = -EIO; + break; + } + rc = 0; + break; + default: rc = -ENXIO; } @@ -8357,6 +8468,23 @@ static int fan_set_speed(int speed) rc = -EINVAL; break; + case TPACPI_FAN_WR_ACPI_FANW: + if (speed >= 0 && speed <= 65535) { + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8106, 0x45)) { + rc = -EIO; + break; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", 0x8100, 0xff)) { + rc = -EIO; + break; + } + if (!acpi_evalf(fanw_handle, NULL, NULL, "vdd", + 0x8102, speed * 100 / 65535)) + rc = -EIO; + } else + rc = -EINVAL; + break; + default: rc = -ENXIO; } @@ -8699,6 +8827,10 @@ static int __init fan_init(struct ibm_init_struct *iibm) TPACPI_ACPIHANDLE_INIT(gfan); TPACPI_ACPIHANDLE_INIT(sfan); } + if (tpacpi_is_lenovo()) { + TPACPI_ACPIHANDLE_INIT(fang); + TPACPI_ACPIHANDLE_INIT(fanw); + } quirks = tpacpi_check_quirks(fan_quirk_table, ARRAY_SIZE(fan_quirk_table)); @@ -8718,6 +8850,9 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; + } else if (fang_handle) { + /* E531 */ + fan_status_access_mode = TPACPI_FAN_RD_ACPI_FANG; } else { /* all other ThinkPads: note that even old-style * ThinkPad ECs supports the fan control register */ @@ -8764,6 +8899,11 @@ static int __init fan_init(struct ibm_init_struct *iibm) fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN; fan_control_commands |= TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE; + } else if (fanw_handle) { + /* E531 */ + fan_control_access_mode = TPACPI_FAN_WR_ACPI_FANW; + fan_control_commands |= + TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_SPEED | TPACPI_FAN_CMD_ENABLE; } else { if (!gfan_handle) { /* gfan without sfan means no fan control */ @@ -8915,6 +9055,7 @@ static int fan_read(struct seq_file *m) case TPACPI_FAN_RD_TPEC_NS: case TPACPI_FAN_RD_TPEC: + case TPACPI_FAN_RD_ACPI_FANG: /* all except 570, 600e/x, 770e, 770x */ rc = fan_get_status_safe(&status); if (rc) @@ -8935,7 +9076,7 @@ static int fan_read(struct seq_file *m) * No other levels settings available */ seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto"); - } else { + } else if (fan_status_access_mode == TPACPI_FAN_RD_TPEC) { if (status & TP_EC_FAN_FULLSPEED) /* Disengaged mode takes precedence */ seq_printf(m, "level:\t\tdisengaged\n"); diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index 77c35529ab6f53..12c46455e8dcd5 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -32,26 +32,13 @@ static const struct key_entry toshiba_wmi_keymap[] __initconst = { { KE_END, 0 } }; -static void toshiba_wmi_notify(u32 value, void *context) +static void toshiba_wmi_notify(union acpi_object *obj, void *context) { - struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - acpi_status status; - - status = wmi_get_event_data(value, &response); - if (ACPI_FAILURE(status)) { - pr_err("Bad event status 0x%x\n", status); - return; - } - - obj = (union acpi_object *)response.pointer; if (!obj) return; /* TODO: Add proper checks once we have data */ pr_debug("Unknown event received, obj type %x\n", obj->type); - - kfree(response.pointer); } static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = { diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index f74af0a689f20c..0a39f68c641d15 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -840,6 +840,21 @@ static const struct ts_dmi_data rwc_nanote_p8_data = { .properties = rwc_nanote_p8_props, }; +static const struct property_entry rwc_nanote_next_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 5), + PROPERTY_ENTRY_U32("touchscreen-min-y", 5), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1785), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1145), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-next.fw"), + { } +}; + +static const struct ts_dmi_data rwc_nanote_next_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_next_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1589,6 +1604,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_SKU, "0001") }, }, + { + /* RWC NANOTE NEXT */ + .driver_data = (void *)&rwc_nanote_next_data, + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."), + DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + /* Above matches are too generic, add bios-version match */ + DMI_MATCH(DMI_BIOS_VERSION, "S8A70R100-V005"), + }, + }, { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 1d0b2d6040d1b4..3cbe180c3fc0a4 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -166,22 +166,6 @@ static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wbloc return ACPI_TYPE_BUFFER; } -static acpi_status get_event_data(const struct wmi_block *wblock, struct acpi_buffer *out) -{ - union acpi_object param = { - .integer = { - .type = ACPI_TYPE_INTEGER, - .value = wblock->gblock.notify_id, - } - }; - struct acpi_object_list input = { - .count = 1, - .pointer = ¶m, - }; - - return acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, out); -} - static int wmidev_match_guid(struct device *dev, const void *data) { struct wmi_block *wblock = dev_to_wblock(dev); @@ -199,23 +183,6 @@ static int wmidev_match_guid(struct device *dev, const void *data) return 0; } -static int wmidev_match_notify_id(struct device *dev, const void *data) -{ - struct wmi_block *wblock = dev_to_wblock(dev); - const u32 *notify_id = data; - - /* Legacy GUID-based functions are restricted to only see - * a single WMI device for each GUID. - */ - if (test_bit(WMI_GUID_DUPLICATED, &wblock->flags)) - return 0; - - if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id) - return 1; - - return 0; -} - static const struct bus_type wmi_bus_type; static struct wmi_device *wmi_find_device_by_guid(const char *guid_string) @@ -235,17 +202,6 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string) return dev_to_wdev(dev); } -static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id) -{ - struct device *dev; - - dev = bus_find_device(&wmi_bus_type, NULL, ¬ify_id, wmidev_match_notify_id); - if (!dev) - return ERR_PTR(-ENODEV); - - return to_wmi_device(dev); -} - static void wmi_device_put(struct wmi_device *wdev) { put_device(&wdev->dev); @@ -649,35 +605,6 @@ acpi_status wmi_remove_notify_handler(const char *guid) } EXPORT_SYMBOL_GPL(wmi_remove_notify_handler); -/** - * wmi_get_event_data - Get WMI data associated with an event (deprecated) - * - * @event: Event to find - * @out: Buffer to hold event data - * - * Get extra data associated with an WMI event, the caller needs to free @out. - * - * Return: acpi_status signaling success or error. - */ -acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out) -{ - struct wmi_block *wblock; - struct wmi_device *wdev; - acpi_status status; - - wdev = wmi_find_event_by_notify_id(event); - if (IS_ERR(wdev)) - return AE_NOT_FOUND; - - wblock = container_of(wdev, struct wmi_block, dev); - status = get_event_data(wblock, out); - - wmi_device_put(wdev); - - return status; -} -EXPORT_SYMBOL_GPL(wmi_get_event_data); - /** * wmi_has_guid - Check if a GUID is available * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba @@ -1186,14 +1113,19 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev) static int wmi_get_notify_data(struct wmi_block *wblock, union acpi_object **obj) { struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object param = { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = wblock->gblock.notify_id, + } + }; + struct acpi_object_list input = { + .count = 1, + .pointer = ¶m, + }; acpi_status status; - if (test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) { - *obj = NULL; - return 0; - } - - status = get_event_data(wblock, &data); + status = acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, &data); if (ACPI_FAILURE(status)) { dev_warn(&wblock->dev.dev, "Failed to get event data\n"); return -EIO; @@ -1220,47 +1152,40 @@ static void wmi_notify_driver(struct wmi_block *wblock, union acpi_object *obj) static int wmi_notify_device(struct device *dev, void *data) { struct wmi_block *wblock = dev_to_wblock(dev); - union acpi_object *obj; + union acpi_object *obj = NULL; u32 *event = data; int ret; if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event)) return 0; - down_read(&wblock->notify_lock); - /* The WMI driver notify handler conflicts with the legacy WMI handler. - * Because of this the WMI driver notify handler takes precedence. + /* The ACPI WMI specification says that _WED should be + * evaluated every time an notification is received, even + * if no consumers are present. + * + * Some firmware implementations actually depend on this + * by using a queue for events which will fill up if the + * WMI driver core stops evaluating _WED due to missing + * WMI event consumers. */ - if (wblock->dev.dev.driver && wblock->driver_ready) { + if (!test_bit(WMI_NO_EVENT_DATA, &wblock->flags)) { ret = wmi_get_notify_data(wblock, &obj); - if (ret >= 0) { - wmi_notify_driver(wblock, obj); - kfree(obj); - } - } else { - if (wblock->handler) { - wblock->handler(*event, wblock->handler_data); - } else { - /* The ACPI WMI specification says that _WED should be - * evaluated every time an notification is received, even - * if no consumers are present. - * - * Some firmware implementations actually depend on this - * by using a queue for events which will fill up if the - * WMI driver core stops evaluating _WED due to missing - * WMI event consumers. - * - * Because of this we need this seemingly useless call to - * wmi_get_notify_data() which in turn evaluates _WED. - */ - ret = wmi_get_notify_data(wblock, &obj); - if (ret >= 0) - kfree(obj); - } - + if (ret < 0) + return -EIO; } + + down_read(&wblock->notify_lock); + + if (wblock->dev.dev.driver && wblock->driver_ready) + wmi_notify_driver(wblock, obj); + + if (wblock->handler) + wblock->handler(obj, wblock->handler_data); + up_read(&wblock->notify_lock); + kfree(obj); + acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0); return -EBUSY; diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig index b591419de80c30..88d9e8f2ff24ec 100644 --- a/drivers/platform/x86/x86-android-tablets/Kconfig +++ b/drivers/platform/x86/x86-android-tablets/Kconfig @@ -20,4 +20,4 @@ config X86_ANDROID_TABLETS are missing from the DSDT. If you have a x86 Android tablet say Y or M here, for a generic x86 - distro config say M here. + distro configuration say M here. diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c index 227afbb5107862..07fbeab2319a4f 100644 --- a/drivers/platform/x86/x86-android-tablets/asus.c +++ b/drivers/platform/x86/x86-android-tablets/asus.c @@ -37,7 +37,7 @@ static const struct x86_gpio_button asus_me176c_tf103c_lid __initconst = { .pin = 12, }; -/* Asus ME176C tablets have an Android factory img with everything hardcoded */ +/* Asus ME176C tablets have an Android factory image with everything hardcoded */ static const char * const asus_me176c_accel_mount_matrix[] = { "-1", "0", "0", "0", "1", "0", @@ -112,7 +112,7 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst = }, .adapter_path = "\\_SB_.I2C5", }, { - /* kxtj21009 accel */ + /* kxtj21009 accelerometer */ .board_info = { .type = "kxtj21009", .addr = 0x0f, @@ -181,7 +181,7 @@ const struct x86_dev_info asus_me176c_info __initconst = { .modules = bq24190_modules, }; -/* Asus TF103C tablets have an Android factory img with everything hardcoded */ +/* Asus TF103C tablets have an Android factory image with everything hardcoded */ static const char * const asus_tf103c_accel_mount_matrix[] = { "0", "-1", "0", "-1", "0", "0", @@ -280,7 +280,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = }, .adapter_path = "\\_SB_.I2C5", }, { - /* kxtj21009 accel */ + /* kxtj21009 accelerometer */ .board_info = { .type = "kxtj21009", .addr = 0x0f, diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c index 919ef447122958..ef572b90e06ba4 100644 --- a/drivers/platform/x86/x86-android-tablets/core.c +++ b/drivers/platform/x86/x86-android-tablets/core.c @@ -26,19 +26,19 @@ static struct platform_device *x86_android_tablet_device; /* - * This helper allows getting a gpio_desc *before* the actual device consuming - * the GPIO has been instantiated. This function _must_ only be used to handle - * this special case such as e.g. : + * This helper allows getting a GPIO descriptor *before* the actual device + * consuming it has been instantiated. This function MUST only be used to + * handle this special case such as, e.g.: * * 1. Getting an IRQ from a GPIO for i2c_board_info.irq which is passed to * i2c_client_new() to instantiate i2c_client-s; or - * 2. Calling desc_to_gpio() to get an old style GPIO number for gpio_keys + * 2. Calling desc_to_gpio() to get an old style GPIO number for gpio-keys * platform_data which still uses old style GPIO numbers. * - * Since the consuming device has not been instatiated yet a dynamic lookup - * is generated using the special x86_android_tablet dev for dev_id. + * Since the consuming device has not been instantiated yet a dynamic lookup + * is generated using the special x86_android_tablet device for dev_id. * - * For normal GPIO lookups a standard static gpiod_lookup_table _must_ be used. + * For normal GPIO lookups a standard static struct gpiod_lookup_table MUST be used. */ int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id, bool active_low, enum gpiod_flags dflags, @@ -87,7 +87,7 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data) /* * The DSDT may already reference the GSI in a device skipped by * acpi_quirk_skip_i2c_client_enumeration(). Unregister the GSI - * to avoid EBUSY errors in this case. + * to avoid -EBUSY errors in this case. */ acpi_unregister_gsi(data->index); irq = acpi_register_gsi(NULL, data->index, data->trigger, data->polarity); @@ -379,7 +379,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev) } } - /* + 1 to make space for (optional) gpio_keys_button pdev */ + /* + 1 to make space for the (optional) gpio_keys_button platform device */ pdevs = kcalloc(dev_info->pdev_count + 1, sizeof(*pdevs), GFP_KERNEL); if (!pdevs) { x86_android_tablet_remove(pdev); @@ -390,8 +390,9 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev) for (i = 0; i < pdev_count; i++) { pdevs[i] = platform_device_register_full(&dev_info->pdev_info[i]); if (IS_ERR(pdevs[i])) { + ret = PTR_ERR(pdevs[i]); x86_android_tablet_remove(pdev); - return PTR_ERR(pdevs[i]); + return ret; } } @@ -432,7 +433,7 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev) buttons[i] = dev_info->gpio_button[i].button; buttons[i].gpio = desc_to_gpio(gpiod); - /* Release gpiod so that gpio-keys can request it */ + /* Release GPIO descriptor so that gpio-keys can request it */ devm_gpiod_put(&x86_android_tablet_device->dev, gpiod); } @@ -443,8 +444,9 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev) PLATFORM_DEVID_AUTO, &pdata, sizeof(pdata)); if (IS_ERR(pdevs[pdev_count])) { + ret = PTR_ERR(pdevs[pdev_count]); x86_android_tablet_remove(pdev); - return PTR_ERR(pdevs[pdev_count]); + return ret; } pdev_count++; } diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c index 387dd092c4dd01..17f6da96aa016e 100644 --- a/drivers/platform/x86/x86-android-tablets/dmi.c +++ b/drivers/platform/x86/x86-android-tablets/dmi.c @@ -99,17 +99,17 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { { /* Lenovo Yoga Book X91F / X91L */ .matches = { - /* Non exact match to match F + L versions */ + /* Inexact match to match F + L versions */ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), }, .driver_data = (void *)&lenovo_yogabook_x91_info, }, { /* - * Lenovo Yoga Tablet 2 Pro 1380F/L (13") This has more or less - * the same BIOS as the 830F/L or 1050F/L (8" and 10") below, - * but unlike the 8" / 10" models which share the same mainboard - * this model has a different mainboard. + * Lenovo Yoga Tablet 2 Pro 1380F/L (13") + * This has more or less the same BIOS as the 830F/L or 1050F/L + * (8" and 10") below, but unlike the 8"/10" models which share + * the same mainboard this model has a different mainboard. * This match for the 13" model MUST come before the 8" + 10" * match since that one will also match the 13" model! */ @@ -124,8 +124,8 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { }, { /* - * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10" - * Lenovo Yoga Tablet 2 use the same mainboard) + * Lenovo Yoga Tablet 2 830F/L or 1050F/L + * The 8" and 10" Lenovo Yoga Tablet 2 use the same mainboard. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), @@ -163,7 +163,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { .driver_data = (void *)&nextbook_ares8_info, }, { - /* Nextbook Ares 8A (CHT version)*/ + /* Nextbook Ares 8A (CHT version) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c index 74f39b658d2ccd..ae087f1471c174 100644 --- a/drivers/platform/x86/x86-android-tablets/lenovo.c +++ b/drivers/platform/x86/x86-android-tablets/lenovo.c @@ -59,7 +59,7 @@ static struct lp855x_platform_data lenovo_lp8557_reg_only_pdata = { .initial_brightness = 128, }; -/* Lenovo Yoga Book X90F / X90L's Android factory img has everything hardcoded */ +/* Lenovo Yoga Book X90F / X90L's Android factory image has everything hardcoded */ static const struct property_entry lenovo_yb1_x90_wacom_props[] = { PROPERTY_ENTRY_U32("hid-descr-addr", 0x0001), @@ -262,7 +262,7 @@ const struct x86_dev_info lenovo_yogabook_x90_info __initconst = { .init = lenovo_yb1_x90_init, }; -/* Lenovo Yoga Book X91F/L Windows tablet needs manual instantiation of the fg client */ +/* Lenovo Yoga Book X91F/L Windows tablet needs manual instantiation of the fuel-gauge client */ static const struct x86_i2c_client_info lenovo_yogabook_x91_i2c_clients[] __initconst = { { /* BQ27542 fuel-gauge */ @@ -281,7 +281,7 @@ const struct x86_dev_info lenovo_yogabook_x91_info __initconst = { .i2c_client_count = ARRAY_SIZE(lenovo_yogabook_x91_i2c_clients), }; -/* Lenovo Yoga Tablet 2 1050F/L's Android factory img has everything hardcoded */ +/* Lenovo Yoga Tablet 2 1050F/L's Android factory image has everything hardcoded */ static const struct property_entry lenovo_yoga_tab2_830_1050_bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", tusb1211_chg_det_psy, 1), PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node), @@ -521,9 +521,9 @@ static int __init lenovo_yoga_tab2_830_1050_init_codec(void) } /* - * These tablet's DSDT does not set acpi_gbl_reduced_hardware, so acpi_power_off + * These tablet's DSDT does not set acpi_gbl_reduced_hardware, so acpi_power_off() * gets used as pm_power_off handler. This causes "poweroff" on these tablets - * to hang hard. Requiring pressing the powerbutton for 30 seconds *twice* + * to hang hard. Requiring pressing the power button for 30 seconds *twice* * followed by a normal 3 second press to recover. Avoid this by doing an EFI * poweroff instead. */ @@ -546,7 +546,7 @@ static int __init lenovo_yoga_tab2_830_1050_init(struct device *dev) if (ret) return ret; - /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off() */ lenovo_yoga_tab2_830_1050_sys_off_handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1, lenovo_yoga_tab2_830_1050_power_off, NULL); @@ -742,7 +742,7 @@ static int __init lenovo_yoga_tab2_1380_init(struct device *dev) if (ret) return ret; - /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off() */ lenovo_yoga_tab2_830_1050_sys_off_handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1, lenovo_yoga_tab2_830_1050_power_off, NULL); @@ -799,7 +799,7 @@ static const struct software_node fg_bq25890_1_supply_node = { .properties = fg_bq25890_1_supply_props, }; -/* bq25892 charger settings for the flat lipo battery behind the screen */ +/* bq25892 charger settings for the flat LiPo battery behind the screen */ static const struct property_entry lenovo_yt3_bq25892_0_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_0_suppliers), PROPERTY_ENTRY_U32("linux,iinlim-percentage", 40), @@ -833,7 +833,7 @@ static const struct software_node lenovo_yt3_hideep_ts_node = { static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = { { - /* bq27500 fuel-gauge for the flat lipo battery behind the screen */ + /* bq27500 fuel-gauge for the flat LiPo battery behind the screen */ .board_info = { .type = "bq27500", .addr = 0x55, @@ -842,7 +842,7 @@ static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = { }, .adapter_path = "\\_SB_.PCI0.I2C1", }, { - /* bq25892 charger for the flat lipo battery behind the screen */ + /* bq25892 charger for the flat LiPo battery behind the screen */ .board_info = { .type = "bq25892", .addr = 0x6b, @@ -859,7 +859,7 @@ static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = { .con_id = "bq25892_0_irq", }, }, { - /* bq27500 fuel-gauge for the round li-ion cells in the hinge */ + /* bq27500 fuel-gauge for the round Li-ion cells in the hinge */ .board_info = { .type = "bq27500", .addr = 0x55, diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c index eb0e55c69dfedb..7db8aa58b90790 100644 --- a/drivers/platform/x86/x86-android-tablets/other.c +++ b/drivers/platform/x86/x86-android-tablets/other.c @@ -20,7 +20,7 @@ #include "shared-psy-info.h" #include "x86-android-tablets.h" -/* Acer Iconia One 7 B1-750 has an Android factory img with everything hardcoded */ +/* Acer Iconia One 7 B1-750 has an Android factory image with everything hardcoded */ static const char * const acer_b1_750_mount_matrix[] = { "-1", "0", "0", "0", "1", "0", @@ -98,7 +98,7 @@ const struct x86_dev_info acer_b1_750_info __initconst = { * Advantech MICA-071 * This is a standard Windows tablet, but it has an extra "quick launch" button * which is not described in the ACPI tables in anyway. - * Use the x86-android-tablets infra to create a gpio-button device for this. + * Use the x86-android-tablets infra to create a gpio-keys device for this. */ static const struct x86_gpio_button advantech_mica_071_button __initconst = { .button = { @@ -209,7 +209,7 @@ const struct x86_dev_info chuwi_hi8_info __initconst = { * This comes in both Windows and Android versions and even on Android * the DSDT is mostly sane. This tablet has 2 extra general purpose buttons * in the button row with the power + volume-buttons labeled P and F. - * Use the x86-android-tablets infra to create a gpio-button device for these. + * Use the x86-android-tablets infra to create a gpio-keys device for these. */ static const struct x86_gpio_button cyberbook_t116_buttons[] __initconst = { { @@ -276,7 +276,7 @@ const struct x86_dev_info czc_p10t __initconst = { .init = czc_p10t_init, }; -/* Medion Lifetab S10346 tablets have an Android factory img with everything hardcoded */ +/* Medion Lifetab S10346 tablets have an Android factory image with everything hardcoded */ static const char * const medion_lifetab_s10346_accel_mount_matrix[] = { "0", "1", "0", "1", "0", "0", @@ -305,7 +305,7 @@ static const struct software_node medion_lifetab_s10346_touchscreen_node = { static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __initconst = { { - /* kxtj21009 accel */ + /* kxtj21009 accelerometer */ .board_info = { .type = "kxtj21009", .addr = 0x0f, @@ -359,7 +359,7 @@ const struct x86_dev_info medion_lifetab_s10346_info __initconst = { .gpiod_lookup_tables = medion_lifetab_s10346_gpios, }; -/* Nextbook Ares 8 (BYT) tablets have an Android factory img with everything hardcoded */ +/* Nextbook Ares 8 (BYT) tablets have an Android factory image with everything hardcoded */ static const char * const nextbook_ares8_accel_mount_matrix[] = { "0", "-1", "0", "-1", "0", "0", @@ -387,7 +387,7 @@ static const struct software_node nextbook_ares8_touchscreen_node = { static const struct x86_i2c_client_info nextbook_ares8_i2c_clients[] __initconst = { { - /* Freescale MMA8653FC accel */ + /* Freescale MMA8653FC accelerometer */ .board_info = { .type = "mma8653", .addr = 0x1d, @@ -428,7 +428,7 @@ const struct x86_dev_info nextbook_ares8_info __initconst = { .gpiod_lookup_tables = nextbook_ares8_gpios, }; -/* Nextbook Ares 8A (CHT) tablets have an Android factory img with everything hardcoded */ +/* Nextbook Ares 8A (CHT) tablets have an Android factory image with everything hardcoded */ static const char * const nextbook_ares8a_accel_mount_matrix[] = { "1", "0", "0", "0", "-1", "0", @@ -446,7 +446,7 @@ static const struct software_node nextbook_ares8a_accel_node = { static const struct x86_i2c_client_info nextbook_ares8a_i2c_clients[] __initconst = { { - /* Freescale MMA8653FC accel */ + /* Freescale MMA8653FC accelerometer */ .board_info = { .type = "mma8653", .addr = 0x1d, @@ -497,7 +497,7 @@ const struct x86_dev_info nextbook_ares8a_info __initconst = { * Peaq C1010 * This is a standard Windows tablet, but it has a special Dolby button. * This button has a WMI interface, but that is broken. Instead of trying to - * use the broken WMI interface, instantiate a gpio_keys device for this. + * use the broken WMI interface, instantiate a gpio-keys device for this. */ static const struct x86_gpio_button peaq_c1010_button __initconst = { .button = { @@ -521,7 +521,7 @@ const struct x86_dev_info peaq_c1010_info __initconst = { * Whitelabel (sold as various brands) TM800A550L tablets. * These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices * (removed through acpi_quirk_skip_i2c_client_enumeration()) and - * the touchscreen fwnode has the wrong GPIOs. + * the touchscreen firmware node has the wrong GPIOs. */ static const char * const whitelabel_tm800a550l_accel_mount_matrix[] = { "-1", "0", "0", @@ -566,7 +566,7 @@ static const struct x86_i2c_client_info whitelabel_tm800a550l_i2c_clients[] __in .polarity = ACPI_ACTIVE_HIGH, }, }, { - /* kxcj91008 accel */ + /* kxcj91008 accelerometer */ .board_info = { .type = "kxcj91008", .addr = 0x0f, @@ -598,12 +598,12 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = { }; /* - * The fwnode for ktd2026 on Xaomi pad2. It composed of a RGB LED node + * The firmware node for ktd2026 on Xaomi pad2. It composed of a RGB LED node * with three subnodes for each color (B/G/R). The RGB LED node is named * "multi-led" to align with the name in the device tree. */ -/* main fwnode for ktd2026 */ +/* Main firmware node for ktd2026 */ static const struct software_node ktd2026_node = { .name = "ktd2026", }; @@ -665,12 +665,12 @@ static const struct software_node *ktd2026_node_group[] = { }; /* - * For the LEDs which backlight the menu / home / back capacitive buttons on + * For the LEDs which backlight the Menu / Home / Back capacitive buttons on * the bottom bezel. These are attached to a TPS61158 LED controller which * is controlled by the "pwm_soc_lpss_2" PWM output. */ #define XIAOMI_MIPAD2_LED_PERIOD_NS 19200 -#define XIAOMI_MIPAD2_LED_DEFAULT_DUTY 6000 /* From Android kernel */ +#define XIAOMI_MIPAD2_LED_MAX_DUTY_NS 6000 /* From Android kernel */ static struct pwm_device *xiaomi_mipad2_led_pwm; @@ -679,7 +679,7 @@ static int xiaomi_mipad2_brightness_set(struct led_classdev *led_cdev, { struct pwm_state state = { .period = XIAOMI_MIPAD2_LED_PERIOD_NS, - .duty_cycle = val, + .duty_cycle = XIAOMI_MIPAD2_LED_MAX_DUTY_NS * val / LED_FULL, /* Always set PWM enabled to avoid the pin floating */ .enabled = true, }; @@ -701,11 +701,11 @@ static int __init xiaomi_mipad2_init(struct device *dev) return -ENOMEM; led_cdev->name = "mipad2:white:touch-buttons-backlight"; - led_cdev->max_brightness = XIAOMI_MIPAD2_LED_PERIOD_NS; - /* "input-events" trigger uses blink_brightness */ - led_cdev->blink_brightness = XIAOMI_MIPAD2_LED_DEFAULT_DUTY; + led_cdev->max_brightness = LED_FULL; led_cdev->default_trigger = "input-events"; led_cdev->brightness_set_blocking = xiaomi_mipad2_brightness_set; + /* Turn LED off during suspend */ + led_cdev->flags = LED_CORE_SUSPENDRESUME; ret = devm_led_classdev_register(dev, led_cdev); if (ret) diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c index d2d0aa51bc3f53..a46fa15acfb14e 100644 --- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c +++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c @@ -39,7 +39,7 @@ const struct software_node fg_bq25890_supply_node = { .properties = fg_bq25890_supply_props, }; -/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV bat. */ +/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV battery */ static const struct property_entry generic_lipo_hv_4v35_battery_props[] = { PROPERTY_ENTRY_STRING("compatible", "simple-battery"), PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion"), @@ -80,7 +80,7 @@ const char * const bq24190_modules[] __initconst = { NULL }; -/* Generic pdevs array and gpio-lookups for micro USB ID pin handling */ +/* Generic platform device array and GPIO lookup table for micro USB ID pin handling */ const struct platform_device_info int3496_pdevs[] __initconst = { { /* For micro USB ID pin handling */ diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h index 86402b9b46a338..5517e438c7b69a 100644 --- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h +++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h @@ -61,7 +61,7 @@ struct x86_serdev_info { const char *ctrl_uid; const char *ctrl_devname; /* - * ATM the serdev core only supports of or ACPI matching; and sofar all + * ATM the serdev core only supports of or ACPI matching; and so far all * Android x86 tablets DSDTs have usable serdev nodes, but sometimes * under the wrong controller. So we just tie the existing serdev ACPI * node to the right controller. diff --git a/drivers/pmdomain/amlogic/Kconfig b/drivers/pmdomain/amlogic/Kconfig index 2108729909b5f6..e72b664174af13 100644 --- a/drivers/pmdomain/amlogic/Kconfig +++ b/drivers/pmdomain/amlogic/Kconfig @@ -1,17 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menu "Amlogic PM Domains" -config MESON_GX_PM_DOMAINS - tristate "Amlogic Meson GX Power Domains driver" - depends on ARCH_MESON || COMPILE_TEST - depends on PM && OF - default ARCH_MESON - select PM_GENERIC_DOMAINS - select PM_GENERIC_DOMAINS_OF - help - Say yes to expose Amlogic Meson GX Power Domains as - Generic Power Domains. - config MESON_EE_PM_DOMAINS tristate "Amlogic Meson Everything-Else Power Domains driver" depends on ARCH_MESON || COMPILE_TEST diff --git a/drivers/pmdomain/amlogic/Makefile b/drivers/pmdomain/amlogic/Makefile index 3d58abd574f9fd..99f195f09957b6 100644 --- a/drivers/pmdomain/amlogic/Makefile +++ b/drivers/pmdomain/amlogic/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o diff --git a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c deleted file mode 100644 index 6028e91664a4b4..00000000000000 --- a/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Copyright (c) 2017 BayLibre, SAS - * Author: Neil Armstrong - * - * SPDX-License-Identifier: GPL-2.0+ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* AO Offsets */ - -#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2) - -#define GEN_PWR_VPU_HDMI BIT(8) -#define GEN_PWR_VPU_HDMI_ISO BIT(9) - -/* HHI Offsets */ - -#define HHI_MEM_PD_REG0 (0x40 << 2) -#define HHI_VPU_MEM_PD_REG0 (0x41 << 2) -#define HHI_VPU_MEM_PD_REG1 (0x42 << 2) -#define HHI_VPU_MEM_PD_REG2 (0x4d << 2) - -struct meson_gx_pwrc_vpu { - struct generic_pm_domain genpd; - struct regmap *regmap_ao; - struct regmap *regmap_hhi; - struct reset_control *rstc; - struct clk *vpu_clk; - struct clk *vapb_clk; -}; - -static inline -struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d) -{ - return container_of(d, struct meson_gx_pwrc_vpu, genpd); -} - -static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd) -{ - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); - int i; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); - udelay(20); - - /* Power Down Memories */ - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, - 0x3 << i, 0x3 << i); - udelay(5); - } - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, - 0x3 << i, 0x3 << i); - udelay(5); - } - for (i = 8; i < 16; i++) { - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, - BIT(i), BIT(i)); - udelay(5); - } - udelay(20); - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); - - msleep(20); - - clk_disable_unprepare(pd->vpu_clk); - clk_disable_unprepare(pd->vapb_clk); - - return 0; -} - -static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd) -{ - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); - int i; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO); - udelay(20); - - /* Power Down Memories */ - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, - 0x3 << i, 0x3 << i); - udelay(5); - } - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, - 0x3 << i, 0x3 << i); - udelay(5); - } - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, - 0x3 << i, 0x3 << i); - udelay(5); - } - for (i = 8; i < 16; i++) { - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, - BIT(i), BIT(i)); - udelay(5); - } - udelay(20); - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI); - - msleep(20); - - clk_disable_unprepare(pd->vpu_clk); - clk_disable_unprepare(pd->vapb_clk); - - return 0; -} - -static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd) -{ - int ret; - - ret = clk_prepare_enable(pd->vpu_clk); - if (ret) - return ret; - - ret = clk_prepare_enable(pd->vapb_clk); - if (ret) - clk_disable_unprepare(pd->vpu_clk); - - return ret; -} - -static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd) -{ - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); - int ret; - int i; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI, 0); - udelay(20); - - /* Power Up Memories */ - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, - 0x3 << i, 0); - udelay(5); - } - - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, - 0x3 << i, 0); - udelay(5); - } - - for (i = 8; i < 16; i++) { - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, - BIT(i), 0); - udelay(5); - } - udelay(20); - - ret = reset_control_assert(pd->rstc); - if (ret) - return ret; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI_ISO, 0); - - ret = reset_control_deassert(pd->rstc); - if (ret) - return ret; - - ret = meson_gx_pwrc_vpu_setup_clk(pd); - if (ret) - return ret; - - return 0; -} - -static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd) -{ - struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd); - int ret; - int i; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI, 0); - udelay(20); - - /* Power Up Memories */ - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0, - 0x3 << i, 0); - udelay(5); - } - - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1, - 0x3 << i, 0); - udelay(5); - } - - for (i = 0; i < 32; i += 2) { - regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2, - 0x3 << i, 0); - udelay(5); - } - - for (i = 8; i < 16; i++) { - regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0, - BIT(i), 0); - udelay(5); - } - udelay(20); - - ret = reset_control_assert(pd->rstc); - if (ret) - return ret; - - regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, - GEN_PWR_VPU_HDMI_ISO, 0); - - ret = reset_control_deassert(pd->rstc); - if (ret) - return ret; - - ret = meson_gx_pwrc_vpu_setup_clk(pd); - if (ret) - return ret; - - return 0; -} - -static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd) -{ - u32 reg; - - regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, ®); - - return (reg & GEN_PWR_VPU_HDMI); -} - -static struct meson_gx_pwrc_vpu vpu_hdmi_pd = { - .genpd = { - .name = "vpu_hdmi", - .power_off = meson_gx_pwrc_vpu_power_off, - .power_on = meson_gx_pwrc_vpu_power_on, - }, -}; - -static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = { - .genpd = { - .name = "vpu_hdmi", - .power_off = meson_g12a_pwrc_vpu_power_off, - .power_on = meson_g12a_pwrc_vpu_power_on, - }, -}; - -static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) -{ - const struct meson_gx_pwrc_vpu *vpu_pd_match; - struct regmap *regmap_ao, *regmap_hhi; - struct meson_gx_pwrc_vpu *vpu_pd; - struct device_node *parent_np; - struct reset_control *rstc; - struct clk *vpu_clk; - struct clk *vapb_clk; - bool powered_off; - int ret; - - vpu_pd_match = of_device_get_match_data(&pdev->dev); - if (!vpu_pd_match) { - dev_err(&pdev->dev, "failed to get match data\n"); - return -ENODEV; - } - - vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL); - if (!vpu_pd) - return -ENOMEM; - - memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd)); - - parent_np = of_get_parent(pdev->dev.of_node); - regmap_ao = syscon_node_to_regmap(parent_np); - of_node_put(parent_np); - if (IS_ERR(regmap_ao)) { - dev_err(&pdev->dev, "failed to get regmap\n"); - return PTR_ERR(regmap_ao); - } - - regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "amlogic,hhi-sysctrl"); - if (IS_ERR(regmap_hhi)) { - dev_err(&pdev->dev, "failed to get HHI regmap\n"); - return PTR_ERR(regmap_hhi); - } - - rstc = devm_reset_control_array_get_exclusive(&pdev->dev); - if (IS_ERR(rstc)) - return dev_err_probe(&pdev->dev, PTR_ERR(rstc), - "failed to get reset lines\n"); - - vpu_clk = devm_clk_get(&pdev->dev, "vpu"); - if (IS_ERR(vpu_clk)) { - dev_err(&pdev->dev, "vpu clock request failed\n"); - return PTR_ERR(vpu_clk); - } - - vapb_clk = devm_clk_get(&pdev->dev, "vapb"); - if (IS_ERR(vapb_clk)) { - dev_err(&pdev->dev, "vapb clock request failed\n"); - return PTR_ERR(vapb_clk); - } - - vpu_pd->regmap_ao = regmap_ao; - vpu_pd->regmap_hhi = regmap_hhi; - vpu_pd->rstc = rstc; - vpu_pd->vpu_clk = vpu_clk; - vpu_pd->vapb_clk = vapb_clk; - - platform_set_drvdata(pdev, vpu_pd); - - powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); - - /* If already powered, sync the clock states */ - if (!powered_off) { - ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd); - if (ret) - return ret; - } - - vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON; - pm_genpd_init(&vpu_pd->genpd, NULL, powered_off); - - return of_genpd_add_provider_simple(pdev->dev.of_node, - &vpu_pd->genpd); -} - -static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev) -{ - struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev); - bool powered_off; - - powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd); - if (!powered_off) - vpu_pd->genpd.power_off(&vpu_pd->genpd); -} - -static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = { - { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd }, - { - .compatible = "amlogic,meson-g12a-pwrc-vpu", - .data = &vpu_hdmi_pd_g12a - }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table); - -static struct platform_driver meson_gx_pwrc_vpu_driver = { - .probe = meson_gx_pwrc_vpu_probe, - .shutdown = meson_gx_pwrc_vpu_shutdown, - .driver = { - .name = "meson_gx_pwrc_vpu", - .of_match_table = meson_gx_pwrc_vpu_match_table, - }, -}; -module_platform_driver(meson_gx_pwrc_vpu_driver); -MODULE_DESCRIPTION("Amlogic Meson GX Power Domains driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c index d62a776c89a121..9467235110f465 100644 --- a/drivers/pmdomain/apple/pmgr-pwrstate.c +++ b/drivers/pmdomain/apple/pmgr-pwrstate.c @@ -177,7 +177,7 @@ static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned return !!(reg & APPLE_PMGR_RESET); } -const struct reset_control_ops apple_pmgr_reset_ops = { +static const struct reset_control_ops apple_pmgr_reset_ops = { .assert = apple_pmgr_reset_assert, .deassert = apple_pmgr_reset_deassert, .reset = apple_pmgr_reset_reset, diff --git a/drivers/pmdomain/bcm/raspberrypi-power.c b/drivers/pmdomain/bcm/raspberrypi-power.c index 06196ebfe03b03..b87ea7adb7beac 100644 --- a/drivers/pmdomain/bcm/raspberrypi-power.c +++ b/drivers/pmdomain/bcm/raspberrypi-power.c @@ -41,40 +41,46 @@ struct rpi_power_domains { */ struct rpi_power_domain_packet { u32 domain; - u32 on; + u32 state; }; /* * Asks the firmware to enable or disable power on a specific power * domain. */ -static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on) +static int rpi_firmware_set_power(struct generic_pm_domain *domain, bool on) { + struct rpi_power_domain *rpi_domain = + container_of(domain, struct rpi_power_domain, base); + bool old_interface = rpi_domain->old_interface; struct rpi_power_domain_packet packet; + int ret; packet.domain = rpi_domain->domain; - packet.on = on; - return rpi_firmware_property(rpi_domain->fw, - rpi_domain->old_interface ? - RPI_FIRMWARE_SET_POWER_STATE : - RPI_FIRMWARE_SET_DOMAIN_STATE, - &packet, sizeof(packet)); + packet.state = on; + + ret = rpi_firmware_property(rpi_domain->fw, old_interface ? + RPI_FIRMWARE_SET_POWER_STATE : + RPI_FIRMWARE_SET_DOMAIN_STATE, + &packet, sizeof(packet)); + if (ret) + dev_err(&domain->dev, "Failed to set %s to %u (%d)\n", + old_interface ? "power" : "domain", on, ret); + else + dev_dbg(&domain->dev, "Set %s to %u\n", + old_interface ? "power" : "domain", on); + + return ret; } static int rpi_domain_off(struct generic_pm_domain *domain) { - struct rpi_power_domain *rpi_domain = - container_of(domain, struct rpi_power_domain, base); - - return rpi_firmware_set_power(rpi_domain, false); + return rpi_firmware_set_power(domain, false); } static int rpi_domain_on(struct generic_pm_domain *domain) { - struct rpi_power_domain *rpi_domain = - container_of(domain, struct rpi_power_domain, base); - - return rpi_firmware_set_power(rpi_domain, true); + return rpi_firmware_set_power(domain, true); } static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains, @@ -85,6 +91,7 @@ static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains, dom->fw = rpi_domains->fw; dom->base.name = name; + dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP; dom->base.power_on = rpi_domain_on; dom->base.power_off = rpi_domain_off; @@ -142,13 +149,13 @@ rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains) int ret; packet.domain = RPI_POWER_DOMAIN_ARM; - packet.on = ~0; + packet.state = ~0; ret = rpi_firmware_property(rpi_domains->fw, RPI_FIRMWARE_GET_DOMAIN_STATE, &packet, sizeof(packet)); - return ret == 0 && packet.on != ~0; + return ret == 0 && packet.state != ~0; } static int rpi_power_probe(struct platform_device *pdev) diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 7a61aa88c0614a..5ede0f7eda0917 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = { .unlock = genpd_unlock_spin, }; +static void genpd_lock_raw_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&genpd->raw_slock, flags); + genpd->raw_lock_flags = flags; +} + +static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd, + int depth) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth); + genpd->raw_lock_flags = flags; +} + +static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->raw_slock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&genpd->raw_slock, flags); + genpd->raw_lock_flags = flags; + return 0; +} + +static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd) + __releases(&genpd->raw_slock) +{ + raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags); +} + +static const struct genpd_lock_ops genpd_raw_spin_ops = { + .lock = genpd_lock_raw_spin, + .lock_nested = genpd_lock_nested_raw_spin, + .lock_interruptible = genpd_lock_interruptible_raw_spin, + .unlock = genpd_unlock_raw_spin, +}; + #define genpd_lock(p) p->lock_ops->lock(p) #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) @@ -1758,7 +1800,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, genpd_lock(genpd); genpd_set_cpumask(genpd, gpd_data->cpu); - dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; if (gd) @@ -1767,6 +1808,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); genpd_unlock(genpd); + dev_pm_domain_set(dev, &genpd->domain); out: if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1823,12 +1865,13 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->gd->max_off_time_changed = true; genpd_clear_cpumask(genpd, gpd_data->cpu); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + dev_pm_domain_set(dev, NULL); + if (genpd->detach_dev) genpd->detach_dev(genpd, dev); @@ -2143,7 +2186,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd) static void genpd_lock_init(struct generic_pm_domain *genpd) { - if (genpd_is_irq_safe(genpd)) { + if (genpd_is_cpu_domain(genpd)) { + raw_spin_lock_init(&genpd->raw_slock); + genpd->lock_ops = &genpd_raw_spin_ops; + } else if (genpd_is_irq_safe(genpd)) { spin_lock_init(&genpd->slock); genpd->lock_ops = &genpd_spin_ops; } else { @@ -3181,24 +3227,25 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) else WARN_ON(1); - seq_printf(s, "%-25s ", p); + seq_printf(s, "%-26s ", p); } -static void mode_status_str(struct seq_file *s, struct device *dev) +static void perf_status_str(struct seq_file *s, struct device *dev) { struct generic_pm_domain_data *gpd_data; gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW"); + seq_printf(s, "%-10u ", gpd_data->performance_state); } -static void perf_status_str(struct seq_file *s, struct device *dev) +static void mode_status_str(struct seq_file *s, struct device *dev) { struct generic_pm_domain_data *gpd_data; gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - seq_put_decimal_ull(s, "", gpd_data->performance_state); + + seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW"); } static int genpd_summary_one(struct seq_file *s, @@ -3209,7 +3256,6 @@ static int genpd_summary_one(struct seq_file *s, [GENPD_STATE_OFF] = "off" }; struct pm_domain_data *pm_data; - const char *kobj_path; struct gpd_link *link; char state[16]; int ret; @@ -3226,7 +3272,7 @@ static int genpd_summary_one(struct seq_file *s, else snprintf(state, sizeof(state), "%s", status_lookup[genpd->status]); - seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); + seq_printf(s, "%-30s %-30s %u", genpd->name, state, genpd->performance_state); /* * Modifications on the list require holding locks on both @@ -3242,17 +3288,10 @@ static int genpd_summary_one(struct seq_file *s, } list_for_each_entry(pm_data, &genpd->dev_list, list_node) { - kobj_path = kobject_get_path(&pm_data->dev->kobj, - genpd_is_irq_safe(genpd) ? - GFP_ATOMIC : GFP_KERNEL); - if (kobj_path == NULL) - continue; - - seq_printf(s, "\n %-50s ", kobj_path); + seq_printf(s, "\n %-30s ", dev_name(pm_data->dev)); rtpm_status_str(s, pm_data->dev); perf_status_str(s, pm_data->dev); mode_status_str(s, pm_data->dev); - kfree(kobj_path); } seq_puts(s, "\n"); @@ -3267,9 +3306,9 @@ static int summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, "domain status children performance\n"); - seq_puts(s, " /device runtime status managed by\n"); - seq_puts(s, "------------------------------------------------------------------------------------------------------------\n"); + seq_puts(s, "domain status children performance\n"); + seq_puts(s, " /device runtime status managed by\n"); + seq_puts(s, "------------------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); if (ret) @@ -3421,23 +3460,14 @@ static int devices_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct pm_domain_data *pm_data; - const char *kobj_path; int ret = 0; ret = genpd_lock_interruptible(genpd); if (ret) return -ERESTARTSYS; - list_for_each_entry(pm_data, &genpd->dev_list, list_node) { - kobj_path = kobject_get_path(&pm_data->dev->kobj, - genpd_is_irq_safe(genpd) ? - GFP_ATOMIC : GFP_KERNEL); - if (kobj_path == NULL) - continue; - - seq_printf(s, "%s\n", kobj_path); - kfree(kobj_path); - } + list_for_each_entry(pm_data, &genpd->dev_list, list_node) + seq_printf(s, "%s\n", dev_name(pm_data->dev)); genpd_unlock(genpd); return ret; diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c index 9517cce93d8a39..80a4dcc77199cf 100644 --- a/drivers/pmdomain/imx/gpc.c +++ b/drivers/pmdomain/imx/gpc.c @@ -455,7 +455,6 @@ static int imx_gpc_probe(struct platform_device *pdev) } else { struct imx_pm_domain *domain; struct platform_device *pd_pdev; - struct device_node *np; struct clk *ipg_clk; unsigned int ipg_rate_mhz; int domain_index; @@ -465,28 +464,24 @@ static int imx_gpc_probe(struct platform_device *pdev) return PTR_ERR(ipg_clk); ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000; - for_each_child_of_node(pgc_node, np) { + for_each_child_of_node_scoped(pgc_node, np) { ret = of_property_read_u32(np, "reg", &domain_index); - if (ret) { - of_node_put(np); + if (ret) return ret; - } + if (domain_index >= of_id_data->num_domains) continue; pd_pdev = platform_device_alloc("imx-pgc-power-domain", domain_index); - if (!pd_pdev) { - of_node_put(np); + if (!pd_pdev) return -ENOMEM; - } ret = platform_device_add_data(pd_pdev, &imx_gpc_domains[domain_index], sizeof(imx_gpc_domains[domain_index])); if (ret) { platform_device_put(pd_pdev); - of_node_put(np); return ret; } domain = pd_pdev->dev.platform_data; @@ -500,7 +495,6 @@ static int imx_gpc_probe(struct platform_device *pdev) ret = platform_device_add(pd_pdev); if (ret) { platform_device_put(pd_pdev); - of_node_put(np); return ret; } } diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c index 856eaac0ec140d..963d61c5af6d5e 100644 --- a/drivers/pmdomain/imx/gpcv2.c +++ b/drivers/pmdomain/imx/gpcv2.c @@ -1458,7 +1458,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev) .max_register = SZ_4K, }; struct device *dev = &pdev->dev; - struct device_node *pgc_np, *np; + struct device_node *pgc_np; struct regmap *regmap; void __iomem *base; int ret; @@ -1480,7 +1480,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev) return ret; } - for_each_child_of_node(pgc_np, np) { + for_each_child_of_node_scoped(pgc_np, np) { struct platform_device *pd_pdev; struct imx_pgc_domain *domain; u32 domain_index; @@ -1491,7 +1491,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "reg", &domain_index); if (ret) { dev_err(dev, "Failed to read 'reg' property\n"); - of_node_put(np); return ret; } @@ -1506,7 +1505,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) domain_index); if (!pd_pdev) { dev_err(dev, "Failed to allocate platform device\n"); - of_node_put(np); return -ENOMEM; } @@ -1515,7 +1513,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) sizeof(domain_data->domains[domain_index])); if (ret) { platform_device_put(pd_pdev); - of_node_put(np); return ret; } @@ -1532,7 +1529,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) ret = platform_device_add(pd_pdev); if (ret) { platform_device_put(pd_pdev); - of_node_put(np); return ret; } } diff --git a/drivers/pmdomain/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c index d750a7dc58d212..25ab592945bd7b 100644 --- a/drivers/pmdomain/imx/imx93-pd.c +++ b/drivers/pmdomain/imx/imx93-pd.c @@ -28,7 +28,6 @@ struct imx93_power_domain { void __iomem *addr; struct clk_bulk_data *clks; int num_clks; - bool init_off; }; #define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd) @@ -90,9 +89,6 @@ static void imx93_pd_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - if (!domain->init_off) - clk_bulk_disable_unprepare(domain->num_clks, domain->clks); - of_genpd_del_provider(np); pm_genpd_remove(&domain->genpd); } @@ -102,6 +98,7 @@ static int imx93_pd_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct imx93_power_domain *domain; + bool init_off; int ret; domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL); @@ -121,18 +118,17 @@ static int imx93_pd_probe(struct platform_device *pdev) domain->genpd.power_on = imx93_pd_on; domain->dev = dev; - domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK; + init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK; /* Just to sync the status of hardware */ - if (!domain->init_off) { + if (!init_off) { ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks); - if (ret) { - dev_err(domain->dev, "failed to enable clocks for domain: %s\n", - domain->genpd.name); - return ret; - } + if (ret) + return dev_err_probe(domain->dev, ret, + "failed to enable clocks for domain: %s\n", + domain->genpd.name); } - ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off); + ret = pm_genpd_init(&domain->genpd, NULL, init_off); if (ret) goto err_clk_unprepare; @@ -148,7 +144,7 @@ static int imx93_pd_probe(struct platform_device *pdev) pm_genpd_remove(&domain->genpd); err_clk_unprepare: - if (!domain->init_off) + if (!init_off) clk_bulk_disable_unprepare(domain->num_clks, domain->clks); return ret; diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c index e274e3315fe7a6..88406e9ac63c22 100644 --- a/drivers/pmdomain/mediatek/mtk-pm-domains.c +++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c @@ -398,12 +398,10 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no scpsys->dev->of_node = node; pd->supply = devm_regulator_get(scpsys->dev, "domain"); scpsys->dev->of_node = root_node; - if (IS_ERR(pd->supply)) { - dev_err_probe(scpsys->dev, PTR_ERR(pd->supply), + if (IS_ERR(pd->supply)) + return dev_err_cast_probe(scpsys->dev, pd->supply, "%pOF: failed to get power supply.\n", node); - return ERR_CAST(pd->supply); - } } pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg"); diff --git a/drivers/pmdomain/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c index c64e84a27cc7f0..e1fca65b80becb 100644 --- a/drivers/pmdomain/qcom/cpr.c +++ b/drivers/pmdomain/qcom/cpr.c @@ -4,6 +4,7 @@ * Copyright (c) 2019, Linaro Limited */ +#include #include #include #include @@ -747,9 +748,9 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain, struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); struct corner *corner, *end; enum voltage_change_dir dir; - int ret = 0, new_uV; + int ret, new_uV; - mutex_lock(&drv->lock); + guard(mutex)(&drv->lock); dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", __func__, state, cpr_get_cur_perf_state(drv)); @@ -760,10 +761,8 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain, */ corner = drv->corners + state - 1; end = &drv->corners[drv->num_corners - 1]; - if (corner > end || corner < drv->corners) { - ret = -EINVAL; - goto unlock; - } + if (corner > end || corner < drv->corners) + return -EINVAL; /* Determine direction */ if (drv->corner > corner) @@ -783,7 +782,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain, ret = cpr_scale_voltage(drv, corner, new_uV, dir); if (ret) - goto unlock; + return ret; if (cpr_is_allowed(drv)) { cpr_irq_clr(drv); @@ -794,10 +793,7 @@ static int cpr_set_performance_state(struct generic_pm_domain *domain, drv->corner = corner; -unlock: - mutex_unlock(&drv->lock); - - return ret; + return 0; } static int @@ -1040,36 +1036,30 @@ static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, struct device *cpu_dev) { - u64 rate = 0; - struct device_node *ref_np; - struct device_node *desc_np; - struct device_node *child_np = NULL; - struct device_node *child_req_np = NULL; + struct device_node *ref_np __free(device_node) = NULL; + struct device_node *desc_np __free(device_node) = + dev_pm_opp_of_get_opp_desc_node(cpu_dev); - desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); if (!desc_np) return 0; ref_np = dev_pm_opp_get_of_node(ref); if (!ref_np) - goto out_ref; + return 0; - do { - of_node_put(child_req_np); - child_np = of_get_next_available_child(desc_np, child_np); - child_req_np = of_parse_phandle(child_np, "required-opps", 0); - } while (child_np && child_req_np != ref_np); + for_each_available_child_of_node_scoped(desc_np, child_np) { + struct device_node *child_req_np __free(device_node) = + of_parse_phandle(child_np, "required-opps", 0); - if (child_np && child_req_np == ref_np) - of_property_read_u64(child_np, "opp-hz", &rate); + if (child_req_np == ref_np) { + u64 rate; - of_node_put(child_req_np); - of_node_put(child_np); - of_node_put(ref_np); -out_ref: - of_node_put(desc_np); + of_property_read_u64(child_np, "opp-hz", &rate); + return (unsigned long) rate; + } + } - return (unsigned long) rate; + return 0; } static int cpr_corner_init(struct cpr_drv *drv) @@ -1443,9 +1433,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, { struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); const struct acc_desc *acc_desc = drv->acc_desc; - int ret = 0; + int ret; - mutex_lock(&drv->lock); + guard(mutex)(&drv->lock); dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); @@ -1457,7 +1447,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, * additional initialization when further CPUs get attached. */ if (drv->attached_cpu_dev) - goto unlock; + return 0; /* * cpr_scale_voltage() requires the direction (if we are changing @@ -1469,12 +1459,10 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, * the first time cpr_set_performance_state() is called. */ drv->cpu_clk = devm_clk_get(dev, NULL); - if (IS_ERR(drv->cpu_clk)) { - ret = PTR_ERR(drv->cpu_clk); - if (ret != -EPROBE_DEFER) - dev_err(drv->dev, "could not get cpu clk: %d\n", ret); - goto unlock; - } + if (IS_ERR(drv->cpu_clk)) + return dev_err_probe(drv->dev, PTR_ERR(drv->cpu_clk), + "could not get cpu clk\n"); + drv->attached_cpu_dev = dev; dev_dbg(drv->dev, "using cpu clk from: %s\n", @@ -1491,42 +1479,39 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, ret = dev_pm_opp_get_opp_count(&drv->pd.dev); if (ret < 0) { dev_err(drv->dev, "could not get OPP count\n"); - goto unlock; + return ret; } drv->num_corners = ret; if (drv->num_corners < 2) { dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); - ret = -EINVAL; - goto unlock; + return -EINVAL; } drv->corners = devm_kcalloc(drv->dev, drv->num_corners, sizeof(*drv->corners), GFP_KERNEL); - if (!drv->corners) { - ret = -ENOMEM; - goto unlock; - } + if (!drv->corners) + return -ENOMEM; ret = cpr_corner_init(drv); if (ret) - goto unlock; + return ret; cpr_set_loop_allowed(drv); ret = cpr_init_parameters(drv); if (ret) - goto unlock; + return ret; /* Configure CPR HW but keep it disabled */ ret = cpr_config(drv); if (ret) - goto unlock; + return ret; ret = cpr_find_initial_corner(drv); if (ret) - goto unlock; + return ret; if (acc_desc->config) regmap_multi_reg_write(drv->tcsr, acc_desc->config, @@ -1541,10 +1526,7 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain, dev_info(drv->dev, "driver initialized with %u OPPs\n", drv->num_corners); -unlock: - mutex_unlock(&drv->lock); - - return ret; + return 0; } static int cpr_debug_info_show(struct seq_file *s, void *unused) diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c index d2cb4271a1cad0..65505e1e221986 100644 --- a/drivers/pmdomain/qcom/rpmhpd.c +++ b/drivers/pmdomain/qcom/rpmhpd.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ +#include #include #include #include @@ -775,9 +776,9 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, unsigned int level) { struct rpmhpd *pd = domain_to_rpmhpd(domain); - int ret = 0, i; + int ret, i; - mutex_lock(&rpmhpd_lock); + guard(mutex)(&rpmhpd_lock); for (i = 0; i < pd->level_count; i++) if (level <= pd->level[i]) @@ -797,14 +798,12 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, ret = rpmhpd_aggregate_corner(pd, i); if (ret) - goto out; + return ret; } pd->corner = i; -out: - mutex_unlock(&rpmhpd_lock); - return ret; + return 0; } static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c index 5e6280b4cf7018..0be6b3026e3aa0 100644 --- a/drivers/pmdomain/qcom/rpmpd.c +++ b/drivers/pmdomain/qcom/rpmpd.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -1024,20 +1025,17 @@ static int rpmpd_power_on(struct generic_pm_domain *domain) int ret; struct rpmpd *pd = domain_to_rpmpd(domain); - mutex_lock(&rpmpd_lock); + guard(mutex)(&rpmpd_lock); ret = rpmpd_send_enable(pd, true); if (ret) - goto out; + return ret; pd->enabled = true; if (pd->corner) ret = rpmpd_aggregate_corner(pd); -out: - mutex_unlock(&rpmpd_lock); - return ret; } @@ -1060,27 +1058,21 @@ static int rpmpd_power_off(struct generic_pm_domain *domain) static int rpmpd_set_performance(struct generic_pm_domain *domain, unsigned int state) { - int ret = 0; struct rpmpd *pd = domain_to_rpmpd(domain); if (state > pd->max_state) state = pd->max_state; - mutex_lock(&rpmpd_lock); + guard(mutex)(&rpmpd_lock); pd->corner = state; /* Always send updates for vfc and vfl */ if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) && pd->key != cpu_to_le32(KEY_FLOOR_LEVEL)) - goto out; + return 0; - ret = rpmpd_aggregate_corner(pd); - -out: - mutex_unlock(&rpmpd_lock); - - return ret; + return rpmpd_aggregate_corner(pd); } static int rpmpd_probe(struct platform_device *pdev) diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c index 9b76b62869d0df..cb0f938001382f 100644 --- a/drivers/pmdomain/rockchip/pm-domains.c +++ b/drivers/pmdomain/rockchip/pm-domains.c @@ -33,6 +33,7 @@ #include #include #include +#include #include struct rockchip_domain_info { @@ -45,6 +46,7 @@ struct rockchip_domain_info { bool active_wakeup; int pwr_w_mask; int req_w_mask; + int clk_ungate_mask; int mem_status_mask; int repair_status_mask; u32 pwr_offset; @@ -62,6 +64,7 @@ struct rockchip_pmu_info { u32 chain_status_offset; u32 mem_status_offset; u32 repair_status_offset; + u32 clk_ungate_offset; u32 core_pwrcnt_offset; u32 gpu_pwrcnt_offset; @@ -144,6 +147,25 @@ struct rockchip_pmu { .active_wakeup = wakeup, \ } +#define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \ +{ \ + .name = _name, \ + .pwr_offset = p_offset, \ + .pwr_w_mask = (pwr) << 16, \ + .pwr_mask = (pwr), \ + .status_mask = (status), \ + .mem_offset = m_offset, \ + .mem_status_mask = (m_status), \ + .repair_status_mask = (r_status), \ + .req_offset = r_offset, \ + .req_w_mask = (req) << 16, \ + .req_mask = (req), \ + .idle_mask = (idle), \ + .clk_ungate_mask = (g_mask), \ + .ack_mask = (ack), \ + .active_wakeup = wakeup, \ +} + #define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \ { \ .name = _name, \ @@ -175,6 +197,9 @@ struct rockchip_pmu { #define DOMAIN_RK3568(name, pwr, req, wakeup) \ DOMAIN_M(name, pwr, pwr, req, req, req, wakeup) +#define DOMAIN_RK3576(name, p_offset, pwr, status, r_status, r_offset, req, idle, g_mask, wakeup) \ + DOMAIN_M_O_R_G(name, p_offset, pwr, status, 0, r_status, r_status, r_offset, req, idle, idle, g_mask, wakeup) + /* * Dynamic Memory Controller may need to coordinate with us -- see * rockchip_pmu_block(). @@ -299,6 +324,26 @@ static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu) return val; } +static int rockchip_pmu_ungate_clk(struct rockchip_pm_domain *pd, bool ungate) +{ + const struct rockchip_domain_info *pd_info = pd->info; + struct rockchip_pmu *pmu = pd->pmu; + unsigned int val; + int clk_ungate_w_mask = pd_info->clk_ungate_mask << 16; + + if (!pd_info->clk_ungate_mask) + return 0; + + if (!pmu->info->clk_ungate_offset) + return 0; + + val = ungate ? (pd_info->clk_ungate_mask | clk_ungate_w_mask) : + clk_ungate_w_mask; + regmap_write(pmu->regmap, pmu->info->clk_ungate_offset, val); + + return 0; +} + static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, bool idle) { @@ -539,6 +584,8 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) return ret; } + rockchip_pmu_ungate_clk(pd, true); + if (!power_on) { rockchip_pmu_save_qos(pd); @@ -555,6 +602,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on) rockchip_pmu_restore_qos(pd); } + rockchip_pmu_ungate_clk(pd, false); clk_bulk_disable(pd->num_clks, pd->clks); } @@ -712,12 +760,11 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, goto err_unprepare_clocks; } pd->qos_regmap[j] = syscon_node_to_regmap(qos_node); + of_node_put(qos_node); if (IS_ERR(pd->qos_regmap[j])) { error = -ENODEV; - of_node_put(qos_node); goto err_unprepare_clocks; } - of_node_put(qos_node); } } @@ -800,11 +847,10 @@ static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu, static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, struct device_node *parent) { - struct device_node *np; struct generic_pm_domain *child_domain, *parent_domain; int error; - for_each_child_of_node(parent, np) { + for_each_child_of_node_scoped(parent, np) { u32 idx; error = of_property_read_u32(parent, "reg", &idx); @@ -812,7 +858,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, dev_err(pmu->dev, "%pOFn: failed to retrieve domain id (reg): %d\n", parent, error); - goto err_out; + return error; } parent_domain = pmu->genpd_data.domains[idx]; @@ -820,7 +866,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, if (error) { dev_err(pmu->dev, "failed to handle node %pOFn: %d\n", np, error); - goto err_out; + return error; } error = of_property_read_u32(np, "reg", &idx); @@ -828,7 +874,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, dev_err(pmu->dev, "%pOFn: failed to retrieve domain id (reg): %d\n", np, error); - goto err_out; + return error; } child_domain = pmu->genpd_data.domains[idx]; @@ -836,7 +882,7 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, if (error) { dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n", parent_domain->name, child_domain->name, error); - goto err_out; + return error; } else { dev_dbg(pmu->dev, "%s add subdomain: %s\n", parent_domain->name, child_domain->name); @@ -846,17 +892,12 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu, } return 0; - -err_out: - of_node_put(np); - return error; } static int rockchip_pm_domain_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct device_node *node; struct device *parent; struct rockchip_pmu *pmu; const struct rockchip_pmu_info *pmu_info; @@ -912,14 +953,13 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) * Prevent any rockchip_pmu_block() from racing with the remainder of * setup (clocks, register initialization). */ - mutex_lock(&dmc_pmu_mutex); + guard(mutex)(&dmc_pmu_mutex); - for_each_available_child_of_node(np, node) { + for_each_available_child_of_node_scoped(np, node) { error = rockchip_pm_add_one_domain(pmu, node); if (error) { dev_err(dev, "failed to handle node %pOFn: %d\n", node, error); - of_node_put(node); goto err_out; } @@ -927,7 +967,6 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) if (error < 0) { dev_err(dev, "failed to handle subdomain node %pOFn: %d\n", node, error); - of_node_put(node); goto err_out; } } @@ -947,13 +986,10 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) if (!WARN_ON_ONCE(dmc_pmu)) dmc_pmu = pmu; - mutex_unlock(&dmc_pmu_mutex); - return 0; err_out: rockchip_pm_domain_cleanup(pmu); - mutex_unlock(&dmc_pmu_mutex); return error; } @@ -1106,6 +1142,28 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = { [RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false), }; +static const struct rockchip_domain_info rk3576_pm_domains[] = { + [RK3576_PD_NPU] = DOMAIN_RK3576("npu", 0x0, BIT(0), BIT(0), 0, 0x0, 0, 0, 0, false), + [RK3576_PD_NVM] = DOMAIN_RK3576("nvm", 0x0, BIT(6), 0, BIT(6), 0x4, BIT(2), BIT(18), BIT(2), false), + [RK3576_PD_SDGMAC] = DOMAIN_RK3576("sdgmac", 0x0, BIT(7), 0, BIT(7), 0x4, BIT(1), BIT(17), 0x6, false), + [RK3576_PD_AUDIO] = DOMAIN_RK3576("audio", 0x0, BIT(8), 0, BIT(8), 0x4, BIT(0), BIT(16), BIT(0), false), + [RK3576_PD_PHP] = DOMAIN_RK3576("php", 0x0, BIT(9), 0, BIT(9), 0x0, BIT(15), BIT(15), BIT(15), false), + [RK3576_PD_SUBPHP] = DOMAIN_RK3576("subphp", 0x0, BIT(10), 0, BIT(10), 0x0, 0, 0, 0, false), + [RK3576_PD_VOP] = DOMAIN_RK3576("vop", 0x0, BIT(11), 0, BIT(11), 0x0, 0x6000, 0x6000, 0x6000, false), + [RK3576_PD_VO1] = DOMAIN_RK3576("vo1", 0x0, BIT(14), 0, BIT(14), 0x0, BIT(12), BIT(12), 0x7000, false), + [RK3576_PD_VO0] = DOMAIN_RK3576("vo0", 0x0, BIT(15), 0, BIT(15), 0x0, BIT(11), BIT(11), 0x6800, false), + [RK3576_PD_USB] = DOMAIN_RK3576("usb", 0x4, BIT(0), 0, BIT(16), 0x0, BIT(10), BIT(10), 0x6400, true), + [RK3576_PD_VI] = DOMAIN_RK3576("vi", 0x4, BIT(1), 0, BIT(17), 0x0, BIT(9), BIT(9), BIT(9), false), + [RK3576_PD_VEPU0] = DOMAIN_RK3576("vepu0", 0x4, BIT(2), 0, BIT(18), 0x0, BIT(7), BIT(7), 0x280, false), + [RK3576_PD_VEPU1] = DOMAIN_RK3576("vepu1", 0x4, BIT(3), 0, BIT(19), 0x0, BIT(8), BIT(8), BIT(8), false), + [RK3576_PD_VDEC] = DOMAIN_RK3576("vdec", 0x4, BIT(4), 0, BIT(20), 0x0, BIT(6), BIT(6), BIT(6), false), + [RK3576_PD_VPU] = DOMAIN_RK3576("vpu", 0x4, BIT(5), 0, BIT(21), 0x0, BIT(5), BIT(5), BIT(5), false), + [RK3576_PD_NPUTOP] = DOMAIN_RK3576("nputop", 0x4, BIT(6), 0, BIT(22), 0x0, 0x18, 0x18, 0x18, false), + [RK3576_PD_NPU0] = DOMAIN_RK3576("npu0", 0x4, BIT(7), 0, BIT(23), 0x0, BIT(1), BIT(1), 0x1a, false), + [RK3576_PD_NPU1] = DOMAIN_RK3576("npu1", 0x4, BIT(8), 0, BIT(24), 0x0, BIT(2), BIT(2), 0x1c, false), + [RK3576_PD_GPU] = DOMAIN_RK3576("gpu", 0x4, BIT(9), 0, BIT(25), 0x0, BIT(0), BIT(0), BIT(0), false), +}; + static const struct rockchip_domain_info rk3588_pm_domains[] = { [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false), [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false), @@ -1284,6 +1342,22 @@ static const struct rockchip_pmu_info rk3568_pmu = { .domain_info = rk3568_pm_domains, }; +static const struct rockchip_pmu_info rk3576_pmu = { + .pwr_offset = 0x210, + .status_offset = 0x230, + .chain_status_offset = 0x248, + .mem_status_offset = 0x250, + .mem_pwr_offset = 0x300, + .req_offset = 0x110, + .idle_offset = 0x128, + .ack_offset = 0x120, + .repair_status_offset = 0x570, + .clk_ungate_offset = 0x140, + + .num_domains = ARRAY_SIZE(rk3576_pm_domains), + .domain_info = rk3576_pm_domains, +}; + static const struct rockchip_pmu_info rk3588_pmu = { .pwr_offset = 0x14c, .status_offset = 0x180, @@ -1359,6 +1433,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = { .compatible = "rockchip,rk3568-power-controller", .data = (void *)&rk3568_pmu, }, + { + .compatible = "rockchip,rk3576-power-controller", + .data = (void *)&rk3576_pmu, + }, { .compatible = "rockchip,rk3588-power-controller", .data = (void *)&rk3588_pmu, diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index fece990af4a75b..389d5a193e5dce 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -75,6 +75,16 @@ config POWER_RESET_BRCMSTB Say Y here if you have a Broadcom STB board and you wish to have restart support. +config POWER_RESET_EP93XX + bool "Cirrus EP93XX reset driver" if COMPILE_TEST + depends on MFD_SYSCON + default ARCH_EP93XX + help + This driver provides restart support for Cirrus EP93XX SoC. + + Say Y here if you have a Cirrus EP93XX SoC and you wish + to have restart support. + config POWER_RESET_GEMINI_POWEROFF bool "Cortina Gemini power-off driver" depends on ARCH_GEMINI || COMPILE_TEST diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index a95d1bd275d187..10782d32e1da39 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_POWER_RESET_ATC260X) += atc260x-poweroff.o obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o +obj-$(CONFIG_POWER_RESET_EP93XX) += ep93xx-restart.o obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c index 0f2944dc935516..b9c093f6064ca4 100644 --- a/drivers/power/reset/brcmstb-reboot.c +++ b/drivers/power/reset/brcmstb-reboot.c @@ -18,9 +18,6 @@ #include #include -#define RESET_SOURCE_ENABLE_REG 1 -#define SW_MASTER_RESET_REG 2 - static struct regmap *regmap; static u32 rst_src_en; static u32 sw_mstr_rst; @@ -32,8 +29,7 @@ struct reset_reg_mask { static const struct reset_reg_mask *reset_masks; -static int brcmstb_restart_handler(struct notifier_block *this, - unsigned long mode, void *cmd) +static int brcmstb_restart_handler(struct sys_off_data *data) { int rc; u32 tmp; @@ -62,17 +58,9 @@ static int brcmstb_restart_handler(struct notifier_block *this, return NOTIFY_DONE; } - while (1) - ; - return NOTIFY_DONE; } -static struct notifier_block brcmstb_restart_nb = { - .notifier_call = brcmstb_restart_handler, - .priority = 128, -}; - static const struct reset_reg_mask reset_bits_40nm = { .rst_src_en_mask = BIT(0), .sw_mstr_rst_mask = BIT(0), @@ -83,46 +71,28 @@ static const struct reset_reg_mask reset_bits_65nm = { .sw_mstr_rst_mask = BIT(31), }; -static const struct of_device_id of_match[] = { - { .compatible = "brcm,brcmstb-reboot", .data = &reset_bits_40nm }, - { .compatible = "brcm,bcm7038-reboot", .data = &reset_bits_65nm }, - {}, -}; - static int brcmstb_reboot_probe(struct platform_device *pdev) { int rc; struct device_node *np = pdev->dev.of_node; - const struct of_device_id *of_id; + unsigned int args[2]; - of_id = of_match_node(of_match, np); - if (!of_id) { - pr_err("failed to look up compatible string\n"); + reset_masks = device_get_match_data(&pdev->dev); + if (!reset_masks) { + pr_err("failed to get match data\n"); return -EINVAL; } - reset_masks = of_id->data; - regmap = syscon_regmap_lookup_by_phandle(np, "syscon"); + regmap = syscon_regmap_lookup_by_phandle_args(np, "syscon", ARRAY_SIZE(args), args); if (IS_ERR(regmap)) { pr_err("failed to get syscon phandle\n"); return -EINVAL; } + rst_src_en = args[0]; + sw_mstr_rst = args[1]; - rc = of_property_read_u32_index(np, "syscon", RESET_SOURCE_ENABLE_REG, - &rst_src_en); - if (rc) { - pr_err("can't get rst_src_en offset (%d)\n", rc); - return -EINVAL; - } - - rc = of_property_read_u32_index(np, "syscon", SW_MASTER_RESET_REG, - &sw_mstr_rst); - if (rc) { - pr_err("can't get sw_mstr_rst offset (%d)\n", rc); - return -EINVAL; - } - - rc = register_restart_handler(&brcmstb_restart_nb); + rc = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, + 128, brcmstb_restart_handler, NULL); if (rc) dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", rc); @@ -130,6 +100,12 @@ static int brcmstb_reboot_probe(struct platform_device *pdev) return rc; } +static const struct of_device_id of_match[] = { + { .compatible = "brcm,brcmstb-reboot", .data = &reset_bits_40nm }, + { .compatible = "brcm,bcm7038-reboot", .data = &reset_bits_65nm }, + {}, +}; + static struct platform_driver brcmstb_reboot_driver = { .probe = brcmstb_reboot_probe, .driver = { @@ -140,7 +116,6 @@ static struct platform_driver brcmstb_reboot_driver = { static int __init brcmstb_reboot_init(void) { - return platform_driver_probe(&brcmstb_reboot_driver, - brcmstb_reboot_probe); + return platform_driver_register(&brcmstb_reboot_driver); } subsys_initcall(brcmstb_reboot_init); diff --git a/drivers/power/reset/ep93xx-restart.c b/drivers/power/reset/ep93xx-restart.c new file mode 100644 index 00000000000000..57cfb8620fafd0 --- /dev/null +++ b/drivers/power/reset/ep93xx-restart.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Cirrus EP93xx SoC reset driver + * + * Copyright (C) 2021 Nikita Shubin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define EP93XX_SYSCON_DEVCFG 0x80 +#define EP93XX_SYSCON_DEVCFG_SWRST BIT(31) + +struct ep93xx_restart { + struct ep93xx_regmap_adev *aux_dev; + struct notifier_block restart_handler; +}; + +static int ep93xx_restart_handle(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + struct ep93xx_restart *priv = + container_of(this, struct ep93xx_restart, restart_handler); + struct ep93xx_regmap_adev *aux = priv->aux_dev; + + /* Issue the reboot */ + aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG, + EP93XX_SYSCON_DEVCFG_SWRST, EP93XX_SYSCON_DEVCFG_SWRST); + aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG, + EP93XX_SYSCON_DEVCFG_SWRST, 0); + + return NOTIFY_DONE; +} + +static int ep93xx_reboot_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev); + struct device *dev = &adev->dev; + struct ep93xx_restart *priv; + int err; + + if (!rdev->update_bits) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->aux_dev = rdev; + + priv->restart_handler.notifier_call = ep93xx_restart_handle; + priv->restart_handler.priority = 128; + + err = register_restart_handler(&priv->restart_handler); + if (err) + return dev_err_probe(dev, err, "can't register restart notifier\n"); + + return 0; +} + +static const struct auxiliary_device_id ep93xx_reboot_ids[] = { + { + .name = "soc_ep93xx.reset-ep93xx", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(auxiliary, ep93xx_reboot_ids); + +static struct auxiliary_driver ep93xx_reboot_driver = { + .probe = ep93xx_reboot_probe, + .id_table = ep93xx_reboot_ids, +}; +module_auxiliary_driver(ep93xx_reboot_driver); diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c index 1775b318d0ef41..4f1cd1c0018c67 100644 --- a/drivers/power/reset/pwr-mlxbf.c +++ b/drivers/power/reset/pwr-mlxbf.c @@ -18,7 +18,6 @@ struct pwr_mlxbf { struct work_struct reboot_work; - struct work_struct shutdown_work; const char *hid; }; @@ -27,22 +26,17 @@ static void pwr_mlxbf_reboot_work(struct work_struct *work) acpi_bus_generate_netlink_event("button/reboot.*", "Reboot Button", 0x80, 1); } -static void pwr_mlxbf_shutdown_work(struct work_struct *work) -{ - acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1); -} - static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr) { const char *rst_pwr_hid = "MLNXBF24"; - const char *low_pwr_hid = "MLNXBF29"; + const char *shutdown_hid = "MLNXBF29"; struct pwr_mlxbf *priv = ptr; if (!strncmp(priv->hid, rst_pwr_hid, 8)) schedule_work(&priv->reboot_work); - if (!strncmp(priv->hid, low_pwr_hid, 8)) - schedule_work(&priv->shutdown_work); + if (!strncmp(priv->hid, shutdown_hid, 8)) + orderly_poweroff(true); return IRQ_HANDLED; } @@ -70,10 +64,6 @@ static int pwr_mlxbf_probe(struct platform_device *pdev) if (irq < 0) return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid); - err = devm_work_autocancel(dev, &priv->shutdown_work, pwr_mlxbf_shutdown_work); - if (err) - return err; - err = devm_work_autocancel(dev, &priv->reboot_work, pwr_mlxbf_reboot_work); if (err) return err; diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c index 700879474abf25..4fa129877d7e4c 100644 --- a/drivers/power/sequencing/pwrseq-qcom-wcn.c +++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c @@ -198,6 +198,13 @@ static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = { .gpio_enable_delay_ms = 100, }; +static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn6855_of_data = { + .vregs = pwrseq_qca6390_vregs, + .num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs), + .pwup_delay_ms = 50, + .gpio_enable_delay_ms = 5, +}; + static const char *const pwrseq_wcn7850_vregs[] = { "vdd", "vddio", @@ -321,6 +328,10 @@ static const struct of_device_id pwrseq_qcom_wcn_of_match[] = { .compatible = "qcom,qca6390-pmu", .data = &pwrseq_qca6390_of_data, }, + { + .compatible = "qcom,wcn6855-pmu", + .data = &pwrseq_wcn6855_of_data, + }, { .compatible = "qcom,wcn7850-pmu", .data = &pwrseq_wcn7850_of_data, diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 270874eeb934ab..a71903b1bf781e 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2531,7 +2531,7 @@ static struct attribute *ab8500_fg_attrs[] = { }; ATTRIBUTE_GROUPS(ab8500_fg); -static struct kobj_type ab8500_fg_ktype = { +static const struct kobj_type ab8500_fg_ktype = { .sysfs_ops = &ab8500_fg_sysfs_ops, .default_groups = ab8500_fg_groups, }; diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index 6ac5c80cfda214..f71cc90fea1273 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -17,6 +17,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -32,9 +33,19 @@ #include #define AXP20X_PWR_STATUS_BAT_CHARGING BIT(2) +#define AXP717_PWR_STATUS_MASK GENMASK(6, 5) +#define AXP717_PWR_STATUS_BAT_STANDBY 0 +#define AXP717_PWR_STATUS_BAT_CHRG 1 +#define AXP717_PWR_STATUS_BAT_DISCHRG 2 #define AXP20X_PWR_OP_BATT_PRESENT BIT(5) #define AXP20X_PWR_OP_BATT_ACTIVATED BIT(3) +#define AXP717_PWR_OP_BATT_PRESENT BIT(3) + +#define AXP717_BATT_PMU_FAULT_MASK GENMASK(2, 0) +#define AXP717_BATT_UVLO_2_5V BIT(2) +#define AXP717_BATT_OVER_TEMP BIT(1) +#define AXP717_BATT_UNDER_TEMP BIT(0) #define AXP209_FG_PERCENT GENMASK(6, 0) #define AXP22X_FG_VALID BIT(7) @@ -49,20 +60,51 @@ #define AXP22X_CHRG_CTRL1_TGT_4_22V (1 << 5) #define AXP22X_CHRG_CTRL1_TGT_4_24V (3 << 5) +#define AXP717_CHRG_ENABLE BIT(1) +#define AXP717_CHRG_CV_VOLT_MASK GENMASK(2, 0) +#define AXP717_CHRG_CV_4_0V 0 +#define AXP717_CHRG_CV_4_1V 1 +#define AXP717_CHRG_CV_4_2V 2 +#define AXP717_CHRG_CV_4_35V 3 +#define AXP717_CHRG_CV_4_4V 4 +/* Values 5 and 6 reserved. */ +#define AXP717_CHRG_CV_5_0V 7 + #define AXP813_CHRG_CTRL1_TGT_4_35V (3 << 5) #define AXP20X_CHRG_CTRL1_TGT_CURR GENMASK(3, 0) +#define AXP717_ICC_CHARGER_LIM_MASK GENMASK(5, 0) + +#define AXP717_ITERM_CHG_LIM_MASK GENMASK(3, 0) +#define AXP717_ITERM_CC_STEP 64000 #define AXP20X_V_OFF_MASK GENMASK(2, 0) +#define AXP717_V_OFF_MASK GENMASK(6, 4) + +#define AXP717_BAT_VMIN_MIN_UV 2600000 +#define AXP717_BAT_VMIN_MAX_UV 3300000 +#define AXP717_BAT_VMIN_STEP 100000 +#define AXP717_BAT_CV_MIN_UV 4000000 +#define AXP717_BAT_CV_MAX_UV 5000000 +#define AXP717_BAT_CC_MIN_UA 0 +#define AXP717_BAT_CC_MAX_UA 3008000 struct axp20x_batt_ps; struct axp_data { - int ccc_scale; - int ccc_offset; - bool has_fg_valid; + int ccc_scale; + int ccc_offset; + unsigned int ccc_reg; + unsigned int ccc_mask; + bool has_fg_valid; + const struct power_supply_desc *bat_ps_desc; int (*get_max_voltage)(struct axp20x_batt_ps *batt, int *val); int (*set_max_voltage)(struct axp20x_batt_ps *batt, int val); + int (*cfg_iio_chan)(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt); + void (*set_bat_info)(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt, + struct power_supply_battery_info *info); }; struct axp20x_batt_ps { @@ -135,6 +177,39 @@ static int axp22x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt, return 0; } +static int axp717_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt, + int *val) +{ + int ret, reg; + + ret = regmap_read(axp20x_batt->regmap, AXP717_CV_CHG_SET, ®); + if (ret) + return ret; + + switch (reg & AXP717_CHRG_CV_VOLT_MASK) { + case AXP717_CHRG_CV_4_0V: + *val = 4000000; + return 0; + case AXP717_CHRG_CV_4_1V: + *val = 4100000; + return 0; + case AXP717_CHRG_CV_4_2V: + *val = 4200000; + return 0; + case AXP717_CHRG_CV_4_35V: + *val = 4350000; + return 0; + case AXP717_CHRG_CV_4_4V: + *val = 4400000; + return 0; + case AXP717_CHRG_CV_5_0V: + *val = 5000000; + return 0; + default: + return -EINVAL; + } +} + static int axp813_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt, int *val) { @@ -180,6 +255,21 @@ static int axp20x_get_constant_charge_current(struct axp20x_batt_ps *axp, return 0; } +static int axp717_get_constant_charge_current(struct axp20x_batt_ps *axp, + int *val) +{ + int ret; + + ret = regmap_read(axp->regmap, AXP717_ICC_CHG_SET, val); + if (ret) + return ret; + + *val = FIELD_GET(AXP717_ICC_CHARGER_LIM_MASK, *val) * + axp->data->ccc_scale; + + return 0; +} + static int axp20x_battery_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -303,11 +393,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy, val->intval = reg & AXP209_FG_PERCENT; break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->get_max_voltage(axp20x_batt, &val->intval); - case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, ®); if (ret) return ret; @@ -332,6 +422,171 @@ static int axp20x_battery_get_prop(struct power_supply *psy, return 0; } +static int axp717_battery_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); + int ret = 0, reg; + + switch (psp) { + case POWER_SUPPLY_PROP_PRESENT: + case POWER_SUPPLY_PROP_ONLINE: + ret = regmap_read(axp20x_batt->regmap, AXP717_ON_INDICATE, + ®); + if (ret) + return ret; + + val->intval = FIELD_GET(AXP717_PWR_OP_BATT_PRESENT, reg); + return 0; + + case POWER_SUPPLY_PROP_STATUS: + ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_STATUS_2, + ®); + if (ret) + return ret; + + switch (FIELD_GET(AXP717_PWR_STATUS_MASK, reg)) { + case AXP717_PWR_STATUS_BAT_STANDBY: + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + return 0; + + case AXP717_PWR_STATUS_BAT_CHRG: + val->intval = POWER_SUPPLY_STATUS_CHARGING; + return 0; + + case AXP717_PWR_STATUS_BAT_DISCHRG: + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + return 0; + + default: + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + return 0; + } + + /* + * If a fault is detected it must also be cleared; if the + * condition persists it should reappear (This is an + * assumption, it's actually not documented). A restart was + * not sufficient to clear the bit in testing despite the + * register listed as POR. + */ + case POWER_SUPPLY_PROP_HEALTH: + ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT, + ®); + if (ret) + return ret; + + switch (reg & AXP717_BATT_PMU_FAULT_MASK) { + case AXP717_BATT_UVLO_2_5V: + val->intval = POWER_SUPPLY_HEALTH_DEAD; + regmap_update_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UVLO_2_5V, + AXP717_BATT_UVLO_2_5V); + return 0; + + case AXP717_BATT_OVER_TEMP: + val->intval = POWER_SUPPLY_HEALTH_HOT; + regmap_update_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_OVER_TEMP, + AXP717_BATT_OVER_TEMP); + return 0; + + case AXP717_BATT_UNDER_TEMP: + val->intval = POWER_SUPPLY_HEALTH_COLD; + regmap_update_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UNDER_TEMP, + AXP717_BATT_UNDER_TEMP); + return 0; + + default: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + return 0; + } + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + ret = axp717_get_constant_charge_current(axp20x_batt, + &val->intval); + if (ret) + return ret; + return 0; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + /* + * The offset of this value is currently unknown and is + * not documented in the datasheet. Based on + * observation it's assumed to be somewhere around + * 450ma. I will leave the value raw for now. + */ + ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + if (ret) + return ret; + /* IIO framework gives mA but Power Supply framework gives uA */ + val->intval *= 1000; + return 0; + + case POWER_SUPPLY_PROP_CAPACITY: + ret = regmap_read(axp20x_batt->regmap, AXP717_ON_INDICATE, + ®); + if (ret) + return ret; + + if (!FIELD_GET(AXP717_PWR_OP_BATT_PRESENT, reg)) + return -ENODEV; + + ret = regmap_read(axp20x_batt->regmap, + AXP717_BATT_PERCENT_DATA, ®); + if (ret) + return ret; + + /* + * Fuel Gauge data takes 7 bits but the stored value seems to be + * directly the raw percentage without any scaling to 7 bits. + */ + val->intval = reg & AXP209_FG_PERCENT; + return 0; + + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + return axp20x_batt->data->get_max_voltage(axp20x_batt, + &val->intval); + + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + ret = regmap_read(axp20x_batt->regmap, + AXP717_VSYS_V_POWEROFF, ®); + if (ret) + return ret; + + val->intval = AXP717_BAT_VMIN_MIN_UV + AXP717_BAT_VMIN_STEP * + (reg & AXP717_V_OFF_MASK); + return 0; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = iio_read_channel_processed(axp20x_batt->batt_v, + &val->intval); + if (ret) + return ret; + + /* IIO framework gives mV but Power Supply framework gives uV */ + val->intval *= 1000; + return 0; + + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + ret = regmap_read(axp20x_batt->regmap, + AXP717_ITERM_CHG_SET, ®); + if (ret) + return ret; + + val->intval = (reg & AXP717_ITERM_CHG_LIM_MASK) * AXP717_ITERM_CC_STEP; + return 0; + + default: + return -EINVAL; + } +} + static int axp22x_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt, int val) { @@ -388,6 +643,35 @@ static int axp20x_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt, AXP20X_CHRG_CTRL1_TGT_VOLT, val); } +static int axp717_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt, + int val) +{ + switch (val) { + case 4000000: + val = AXP717_CHRG_CV_4_0V; + break; + + case 4100000: + val = AXP717_CHRG_CV_4_1V; + break; + + case 4200000: + val = AXP717_CHRG_CV_4_2V; + break; + + default: + /* + * AXP717 can go up to 4.35, 4.4, and 5.0 volts which + * seem too high for lithium batteries, so do not allow. + */ + return -EINVAL; + } + + return regmap_update_bits(axp20x_batt->regmap, + AXP717_CV_CHG_SET, + AXP717_CHRG_CV_VOLT_MASK, val); +} + static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt, int charge_current) { @@ -404,6 +688,24 @@ static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt, AXP20X_CHRG_CTRL1_TGT_CURR, charge_current); } +static int axp717_set_constant_charge_current(struct axp20x_batt_ps *axp, + int charge_current) +{ + int val; + + if (charge_current > axp->max_ccc) + return -EINVAL; + + if (charge_current > AXP717_BAT_CC_MAX_UA || charge_current < 0) + return -EINVAL; + + val = (charge_current - axp->data->ccc_offset) / + axp->data->ccc_scale; + + return regmap_update_bits(axp->regmap, AXP717_ICC_CHG_SET, + AXP717_ICC_CHARGER_LIM_MASK, val); +} + static int axp20x_set_max_constant_charge_current(struct axp20x_batt_ps *axp, int charge_current) { @@ -448,6 +750,19 @@ static int axp20x_set_voltage_min_design(struct axp20x_batt_ps *axp_batt, AXP20X_V_OFF_MASK, val1); } +static int axp717_set_voltage_min_design(struct axp20x_batt_ps *axp_batt, + int min_voltage) +{ + int val1 = (min_voltage - AXP717_BAT_VMIN_MIN_UV) / AXP717_BAT_VMIN_STEP; + + if (val1 < 0 || val1 > AXP717_V_OFF_MASK) + return -EINVAL; + + return regmap_update_bits(axp_batt->regmap, + AXP717_VSYS_V_POWEROFF, + AXP717_V_OFF_MASK, val1); +} + static int axp20x_battery_set_prop(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) @@ -455,10 +770,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy, struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); switch (psp) { - case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: return axp20x_set_voltage_min_design(axp20x_batt, val->intval); - case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval); case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: @@ -484,6 +799,42 @@ static int axp20x_battery_set_prop(struct power_supply *psy, } } +static int axp717_battery_set_prop(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + return axp717_set_voltage_min_design(axp20x_batt, val->intval); + + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval); + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + return axp717_set_constant_charge_current(axp20x_batt, + val->intval); + case POWER_SUPPLY_PROP_STATUS: + switch (val->intval) { + case POWER_SUPPLY_STATUS_CHARGING: + return regmap_update_bits(axp20x_batt->regmap, + AXP717_MODULE_EN_CONTROL_2, + AXP717_CHRG_ENABLE, + AXP717_CHRG_ENABLE); + + case POWER_SUPPLY_STATUS_DISCHARGING: + case POWER_SUPPLY_STATUS_NOT_CHARGING: + return regmap_update_bits(axp20x_batt->regmap, + AXP717_MODULE_EN_CONTROL_2, + AXP717_CHRG_ENABLE, 0); + } + return -EINVAL; + default: + return -EINVAL; + } +} + static enum power_supply_property axp20x_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, @@ -493,22 +844,45 @@ static enum power_supply_property axp20x_battery_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_CAPACITY, }; +static enum power_supply_property axp717_battery_props[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, +}; + static int axp20x_battery_prop_writeable(struct power_supply *psy, enum power_supply_property psp) { return psp == POWER_SUPPLY_PROP_STATUS || - psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN || - psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MAX || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX; } -static const struct power_supply_desc axp20x_batt_ps_desc = { +static int axp717_battery_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + return psp == POWER_SUPPLY_PROP_STATUS || + psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MAX || + psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX; +} + +static const struct power_supply_desc axp209_batt_ps_desc = { .name = "axp20x-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = axp20x_battery_props, @@ -518,27 +892,163 @@ static const struct power_supply_desc axp20x_batt_ps_desc = { .set_property = axp20x_battery_set_prop, }; +static const struct power_supply_desc axp717_batt_ps_desc = { + .name = "axp20x-battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = axp717_battery_props, + .num_properties = ARRAY_SIZE(axp717_battery_props), + .property_is_writeable = axp717_battery_prop_writeable, + .get_property = axp717_battery_get_prop, + .set_property = axp717_battery_set_prop, +}; + +static int axp209_bat_cfg_iio_channels(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt) +{ + axp_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v"); + if (IS_ERR(axp_batt->batt_v)) { + if (PTR_ERR(axp_batt->batt_v) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(axp_batt->batt_v); + } + + axp_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev, + "batt_chrg_i"); + if (IS_ERR(axp_batt->batt_chrg_i)) { + if (PTR_ERR(axp_batt->batt_chrg_i) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(axp_batt->batt_chrg_i); + } + + axp_batt->batt_dischrg_i = devm_iio_channel_get(&pdev->dev, + "batt_dischrg_i"); + if (IS_ERR(axp_batt->batt_dischrg_i)) { + if (PTR_ERR(axp_batt->batt_dischrg_i) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(axp_batt->batt_dischrg_i); + } + + return 0; +} + +static int axp717_bat_cfg_iio_channels(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt) +{ + axp_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v"); + if (IS_ERR(axp_batt->batt_v)) { + if (PTR_ERR(axp_batt->batt_v) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(axp_batt->batt_v); + } + + axp_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev, + "batt_chrg_i"); + if (IS_ERR(axp_batt->batt_chrg_i)) { + if (PTR_ERR(axp_batt->batt_chrg_i) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(axp_batt->batt_chrg_i); + } + + return 0; +} + +static void axp209_set_battery_info(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt, + struct power_supply_battery_info *info) +{ + int vmin = info->voltage_min_design_uv; + int ccc = info->constant_charge_current_max_ua; + + if (vmin > 0 && axp20x_set_voltage_min_design(axp_batt, vmin)) + dev_err(&pdev->dev, + "couldn't set voltage_min_design\n"); + + /* Set max to unverified value to be able to set CCC */ + axp_batt->max_ccc = ccc; + + if (ccc <= 0 || axp20x_set_constant_charge_current(axp_batt, ccc)) { + dev_err(&pdev->dev, + "couldn't set ccc from DT: fallback to min value\n"); + ccc = 300000; + axp_batt->max_ccc = ccc; + axp20x_set_constant_charge_current(axp_batt, ccc); + } +} + +static void axp717_set_battery_info(struct platform_device *pdev, + struct axp20x_batt_ps *axp_batt, + struct power_supply_battery_info *info) +{ + int vmin = info->voltage_min_design_uv; + int vmax = info->voltage_max_design_uv; + int ccc = info->constant_charge_current_max_ua; + int val; + + if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin)) + dev_err(&pdev->dev, + "couldn't set voltage_min_design\n"); + + if (vmax > 0 && axp717_battery_set_max_voltage(axp_batt, vmax)) + dev_err(&pdev->dev, + "couldn't set voltage_max_design\n"); + + axp717_get_constant_charge_current(axp_batt, &val); + axp_batt->max_ccc = ccc; + if (ccc <= 0 || axp717_set_constant_charge_current(axp_batt, ccc)) { + dev_err(&pdev->dev, + "couldn't set ccc from DT: current ccc is %d\n", + val); + } +} + static const struct axp_data axp209_data = { .ccc_scale = 100000, .ccc_offset = 300000, + .ccc_reg = AXP20X_CHRG_CTRL1, + .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR, + .bat_ps_desc = &axp209_batt_ps_desc, .get_max_voltage = axp20x_battery_get_max_voltage, .set_max_voltage = axp20x_battery_set_max_voltage, + .cfg_iio_chan = axp209_bat_cfg_iio_channels, + .set_bat_info = axp209_set_battery_info, }; static const struct axp_data axp221_data = { .ccc_scale = 150000, .ccc_offset = 300000, + .ccc_reg = AXP20X_CHRG_CTRL1, + .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR, .has_fg_valid = true, + .bat_ps_desc = &axp209_batt_ps_desc, .get_max_voltage = axp22x_battery_get_max_voltage, .set_max_voltage = axp22x_battery_set_max_voltage, + .cfg_iio_chan = axp209_bat_cfg_iio_channels, + .set_bat_info = axp209_set_battery_info, +}; + +static const struct axp_data axp717_data = { + .ccc_scale = 64000, + .ccc_offset = 0, + .ccc_reg = AXP717_ICC_CHG_SET, + .ccc_mask = AXP717_ICC_CHARGER_LIM_MASK, + .bat_ps_desc = &axp717_batt_ps_desc, + .get_max_voltage = axp717_battery_get_max_voltage, + .set_max_voltage = axp717_battery_set_max_voltage, + .cfg_iio_chan = axp717_bat_cfg_iio_channels, + .set_bat_info = axp717_set_battery_info, }; static const struct axp_data axp813_data = { .ccc_scale = 200000, .ccc_offset = 200000, + .ccc_reg = AXP20X_CHRG_CTRL1, + .ccc_mask = AXP20X_CHRG_CTRL1_TGT_CURR, .has_fg_valid = true, + .bat_ps_desc = &axp209_batt_ps_desc, .get_max_voltage = axp813_battery_get_max_voltage, .set_max_voltage = axp20x_battery_set_max_voltage, + .cfg_iio_chan = axp209_bat_cfg_iio_channels, + .set_bat_info = axp209_set_battery_info, }; static const struct of_device_id axp20x_battery_ps_id[] = { @@ -548,6 +1058,9 @@ static const struct of_device_id axp20x_battery_ps_id[] = { }, { .compatible = "x-powers,axp221-battery-power-supply", .data = (void *)&axp221_data, + }, { + .compatible = "x-powers,axp717-battery-power-supply", + .data = (void *)&axp717_data, }, { .compatible = "x-powers,axp813-battery-power-supply", .data = (void *)&axp813_data, @@ -561,6 +1074,7 @@ static int axp20x_power_probe(struct platform_device *pdev) struct power_supply_config psy_cfg = {}; struct power_supply_battery_info *info; struct device *dev = &pdev->dev; + int ret; if (!of_device_is_available(pdev->dev.of_node)) return -ENODEV; @@ -572,29 +1086,6 @@ static int axp20x_power_probe(struct platform_device *pdev) axp20x_batt->dev = &pdev->dev; - axp20x_batt->batt_v = devm_iio_channel_get(&pdev->dev, "batt_v"); - if (IS_ERR(axp20x_batt->batt_v)) { - if (PTR_ERR(axp20x_batt->batt_v) == -ENODEV) - return -EPROBE_DEFER; - return PTR_ERR(axp20x_batt->batt_v); - } - - axp20x_batt->batt_chrg_i = devm_iio_channel_get(&pdev->dev, - "batt_chrg_i"); - if (IS_ERR(axp20x_batt->batt_chrg_i)) { - if (PTR_ERR(axp20x_batt->batt_chrg_i) == -ENODEV) - return -EPROBE_DEFER; - return PTR_ERR(axp20x_batt->batt_chrg_i); - } - - axp20x_batt->batt_dischrg_i = devm_iio_channel_get(&pdev->dev, - "batt_dischrg_i"); - if (IS_ERR(axp20x_batt->batt_dischrg_i)) { - if (PTR_ERR(axp20x_batt->batt_dischrg_i) == -ENODEV) - return -EPROBE_DEFER; - return PTR_ERR(axp20x_batt->batt_dischrg_i); - } - axp20x_batt->regmap = dev_get_regmap(pdev->dev.parent, NULL); platform_set_drvdata(pdev, axp20x_batt); @@ -603,8 +1094,12 @@ static int axp20x_power_probe(struct platform_device *pdev) axp20x_batt->data = (struct axp_data *)of_device_get_match_data(dev); + ret = axp20x_batt->data->cfg_iio_chan(pdev, axp20x_batt); + if (ret) + return ret; + axp20x_batt->batt = devm_power_supply_register(&pdev->dev, - &axp20x_batt_ps_desc, + axp20x_batt->data->bat_ps_desc, &psy_cfg); if (IS_ERR(axp20x_batt->batt)) { dev_err(&pdev->dev, "failed to register power supply: %ld\n", @@ -613,33 +1108,15 @@ static int axp20x_power_probe(struct platform_device *pdev) } if (!power_supply_get_battery_info(axp20x_batt->batt, &info)) { - int vmin = info->voltage_min_design_uv; - int ccc = info->constant_charge_current_max_ua; - - if (vmin > 0 && axp20x_set_voltage_min_design(axp20x_batt, - vmin)) - dev_err(&pdev->dev, - "couldn't set voltage_min_design\n"); - - /* Set max to unverified value to be able to set CCC */ - axp20x_batt->max_ccc = ccc; - - if (ccc <= 0 || axp20x_set_constant_charge_current(axp20x_batt, - ccc)) { - dev_err(&pdev->dev, - "couldn't set constant charge current from DT: fallback to minimum value\n"); - ccc = 300000; - axp20x_batt->max_ccc = ccc; - axp20x_set_constant_charge_current(axp20x_batt, ccc); - } + axp20x_batt->data->set_bat_info(pdev, axp20x_batt, info); + power_supply_put_battery_info(axp20x_batt->batt, info); } /* * Update max CCC to a valid value if battery info is present or set it * to current register value by default. */ - axp20x_get_constant_charge_current(axp20x_batt, - &axp20x_batt->max_ccc); + axp20x_get_constant_charge_current(axp20x_batt, &axp20x_batt->max_ccc); return 0; } diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index dae7e5cfc54e1b..2766352ab737da 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -30,8 +30,13 @@ #define AXP20X_PWR_STATUS_VBUS_PRESENT BIT(5) #define AXP20X_PWR_STATUS_VBUS_USED BIT(4) +#define AXP717_PWR_STATUS_VBUS_GOOD BIT(5) + #define AXP20X_USB_STATUS_VBUS_VALID BIT(2) +#define AXP717_PMU_FAULT_VBUS BIT(5) +#define AXP717_PMU_FAULT_VSYS BIT(3) + #define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000) #define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3) #define AXP20X_VBUS_VHOLD_OFFSET 3 @@ -39,12 +44,20 @@ #define AXP20X_ADC_EN1_VBUS_CURR BIT(2) #define AXP20X_ADC_EN1_VBUS_VOLT BIT(3) +#define AXP717_INPUT_VOL_LIMIT_MASK GENMASK(3, 0) +#define AXP717_INPUT_CUR_LIMIT_MASK GENMASK(5, 0) +#define AXP717_ADC_DATA_MASK GENMASK(14, 0) + +#define AXP717_ADC_EN_VBUS_VOLT BIT(2) + /* * Note do not raise the debounce time, we must report Vusb high within * 100ms otherwise we get Vbus errors in musb. */ #define DEBOUNCE_TIME msecs_to_jiffies(50) +struct axp20x_usb_power; + struct axp_data { const struct power_supply_desc *power_desc; const char * const *irq_names; @@ -58,6 +71,10 @@ struct axp_data { struct reg_field usb_bc_det_fld; struct reg_field vbus_disable_bit; bool vbus_needs_polling: 1; + void (*axp20x_read_vbus)(struct work_struct *work); + int (*axp20x_cfg_iio_chan)(struct platform_device *pdev, + struct axp20x_usb_power *power); + int (*axp20x_cfg_adc_reg)(struct axp20x_usb_power *power); }; struct axp20x_usb_power { @@ -74,6 +91,7 @@ struct axp20x_usb_power { struct iio_channel *vbus_v; struct iio_channel *vbus_i; struct delayed_work vbus_detect; + int max_input_cur; unsigned int old_status; unsigned int online; unsigned int num_irqs; @@ -136,6 +154,24 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work) mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME); } +static void axp717_usb_power_poll_vbus(struct work_struct *work) +{ + struct axp20x_usb_power *power = + container_of(work, struct axp20x_usb_power, vbus_detect.work); + unsigned int val; + int ret; + + ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &val); + if (ret) + return; + + val &= AXP717_PWR_STATUS_VBUS_GOOD; + if (val != power->old_status) + power_supply_changed(power->supply); + + power->old_status = val; +} + static int axp20x_get_usb_type(struct axp20x_usb_power *power, union power_supply_propval *val) { @@ -281,6 +317,91 @@ static int axp20x_usb_power_get_property(struct power_supply *psy, return 0; } +static int axp717_usb_power_get_property(struct power_supply *psy, + enum power_supply_property psp, union power_supply_propval *val) +{ + struct axp20x_usb_power *power = power_supply_get_drvdata(psy); + unsigned int v; + int ret; + + switch (psp) { + case POWER_SUPPLY_PROP_HEALTH: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &v); + if (ret) + return ret; + + if (!(v & AXP717_PWR_STATUS_VBUS_GOOD)) + val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; + + ret = regmap_read(power->regmap, AXP717_PMU_FAULT_VBUS, &v); + if (ret) + return ret; + + v &= (AXP717_PMU_FAULT_VBUS | AXP717_PMU_FAULT_VSYS); + if (v) { + val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; + regmap_write(power->regmap, AXP717_PMU_FAULT_VBUS, v); + } + + break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = regmap_read(power->regmap, AXP717_INPUT_CUR_LIMIT_CTRL, &v); + if (ret) + return ret; + + /* 50ma step size with 100ma offset. */ + v &= AXP717_INPUT_CUR_LIMIT_MASK; + val->intval = (v * 50000) + 100000; + break; + case POWER_SUPPLY_PROP_ONLINE: + case POWER_SUPPLY_PROP_PRESENT: + ret = regmap_read(power->regmap, AXP717_ON_INDICATE, &v); + if (ret) + return ret; + val->intval = !!(v & AXP717_PWR_STATUS_VBUS_GOOD); + break; + case POWER_SUPPLY_PROP_USB_TYPE: + return axp20x_get_usb_type(power, val); + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + ret = regmap_read(power->regmap, AXP717_INPUT_VOL_LIMIT_CTRL, &v); + if (ret) + return ret; + + /* 80mv step size with 3.88v offset. */ + v &= AXP717_INPUT_VOL_LIMIT_MASK; + val->intval = (v * 80000) + 3880000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + if (IS_ENABLED(CONFIG_AXP20X_ADC)) { + ret = iio_read_channel_processed(power->vbus_v, + &val->intval); + if (ret) + return ret; + + /* + * IIO framework gives mV but Power Supply framework + * gives uV. + */ + val->intval *= 1000; + return 0; + } + + ret = axp20x_read_variable_width(power->regmap, + AXP717_VBUS_V_H, 16); + if (ret < 0) + return ret; + + val->intval = (ret % AXP717_ADC_DATA_MASK) * 1000; + break; + default: + return -EINVAL; + } + + return 0; + +} + static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power, int intval) { @@ -307,6 +428,22 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power, return -EINVAL; } +static int axp717_usb_power_set_voltage_min(struct axp20x_usb_power *power, + int intval) +{ + int val; + + /* Minimum value of 3.88v and maximum of 5.08v. */ + if (intval < 3880000 || intval > 5080000) + return -EINVAL; + + /* step size of 80ma with 3.88v offset. */ + val = (intval - 3880000) / 80000; + return regmap_update_bits(power->regmap, + AXP717_INPUT_VOL_LIMIT_CTRL, + AXP717_INPUT_VOL_LIMIT_MASK, val); +} + static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *power, int intval) { @@ -317,6 +454,13 @@ static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *pow if (intval == -1) return -EINVAL; + if (power->max_input_cur && (intval > power->max_input_cur)) { + dev_warn(power->dev, + "requested current %d clamped to max current %d\n", + intval, power->max_input_cur); + intval = power->max_input_cur; + } + /* * BC1.2 detection can cause a race condition if we try to set a current * limit while it's in progress. When it finishes it will overwrite the @@ -340,6 +484,29 @@ static int axp20x_usb_power_set_input_current_limit(struct axp20x_usb_power *pow return regmap_field_write(power->curr_lim_fld, reg); } +static int axp717_usb_power_set_input_current_limit(struct axp20x_usb_power *power, + int intval) +{ + int tmp; + + /* Minimum value of 100mA and maximum value of 3.25A*/ + if (intval < 100000 || intval > 3250000) + return -EINVAL; + + if (power->max_input_cur && (intval > power->max_input_cur)) { + dev_warn(power->dev, + "reqested current %d clamped to max current %d\n", + intval, power->max_input_cur); + intval = power->max_input_cur; + } + + /* Minimum value of 100mA with step size of 50mA. */ + tmp = (intval - 100000) / 50000; + return regmap_update_bits(power->regmap, + AXP717_INPUT_CUR_LIMIT_CTRL, + AXP717_INPUT_CUR_LIMIT_MASK, tmp); +} + static int axp20x_usb_power_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) @@ -362,6 +529,24 @@ static int axp20x_usb_power_set_property(struct power_supply *psy, default: return -EINVAL; } +} + +static int axp717_usb_power_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct axp20x_usb_power *power = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + return axp717_usb_power_set_input_current_limit(power, val->intval); + + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + return axp717_usb_power_set_voltage_min(power, val->intval); + + default: + return -EINVAL; + } return -EINVAL; } @@ -385,6 +570,64 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy, psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT; } +static int axp717_usb_power_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || + psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT; +} + +static int axp20x_configure_iio_channels(struct platform_device *pdev, + struct axp20x_usb_power *power) +{ + power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v"); + if (IS_ERR(power->vbus_v)) { + if (PTR_ERR(power->vbus_v) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(power->vbus_v); + } + + power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i"); + if (IS_ERR(power->vbus_i)) { + if (PTR_ERR(power->vbus_i) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(power->vbus_i); + } + + return 0; +} + +static int axp717_configure_iio_channels(struct platform_device *pdev, + struct axp20x_usb_power *power) +{ + power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v"); + if (IS_ERR(power->vbus_v)) { + if (PTR_ERR(power->vbus_v) == -ENODEV) + return -EPROBE_DEFER; + return PTR_ERR(power->vbus_v); + } + + return 0; +} + +static int axp20x_configure_adc_registers(struct axp20x_usb_power *power) +{ + /* Enable vbus voltage and current measurement */ + return regmap_update_bits(power->regmap, AXP20X_ADC_EN1, + AXP20X_ADC_EN1_VBUS_CURR | + AXP20X_ADC_EN1_VBUS_VOLT, + AXP20X_ADC_EN1_VBUS_CURR | + AXP20X_ADC_EN1_VBUS_VOLT); +} + +static int axp717_configure_adc_registers(struct axp20x_usb_power *power) +{ + /* Enable vbus voltage measurement */ + return regmap_update_bits(power->regmap, AXP717_ADC_CH_EN_CONTROL, + AXP717_ADC_EN_VBUS_VOLT, + AXP717_ADC_EN_VBUS_VOLT); +} + static enum power_supply_property axp20x_usb_power_properties[] = { POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, @@ -403,6 +646,16 @@ static enum power_supply_property axp22x_usb_power_properties[] = { POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, }; +static enum power_supply_property axp717_usb_power_properties[] = { + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_USB_TYPE, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, +}; + static enum power_supply_property axp813_usb_power_properties[] = { POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, @@ -412,13 +665,6 @@ static enum power_supply_property axp813_usb_power_properties[] = { POWER_SUPPLY_PROP_USB_TYPE, }; -static enum power_supply_usb_type axp813_usb_types[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_UNKNOWN, -}; - static const struct power_supply_desc axp20x_usb_power_desc = { .name = "axp20x-usb", .type = POWER_SUPPLY_TYPE_USB, @@ -439,6 +685,20 @@ static const struct power_supply_desc axp22x_usb_power_desc = { .set_property = axp20x_usb_power_set_property, }; +static const struct power_supply_desc axp717_usb_power_desc = { + .name = "axp20x-usb", + .type = POWER_SUPPLY_TYPE_USB, + .properties = axp717_usb_power_properties, + .num_properties = ARRAY_SIZE(axp717_usb_power_properties), + .property_is_writeable = axp717_usb_power_prop_writeable, + .get_property = axp717_usb_power_get_property, + .set_property = axp717_usb_power_set_property, + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), +}; + static const struct power_supply_desc axp813_usb_power_desc = { .name = "axp20x-usb", .type = POWER_SUPPLY_TYPE_USB, @@ -447,8 +707,10 @@ static const struct power_supply_desc axp813_usb_power_desc = { .property_is_writeable = axp20x_usb_power_prop_writeable, .get_property = axp20x_usb_power_get_property, .set_property = axp20x_usb_power_set_property, - .usb_types = axp813_usb_types, - .num_usb_types = ARRAY_SIZE(axp813_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), }; static const char * const axp20x_irq_names[] = { @@ -463,6 +725,12 @@ static const char * const axp22x_irq_names[] = { "VBUS_REMOVAL", }; +static const char * const axp717_irq_names[] = { + "VBUS_PLUGIN", + "VBUS_REMOVAL", + "VBUS_OVER_V", +}; + static int axp192_usb_curr_lim_table[] = { -1, -1, @@ -505,6 +773,9 @@ static const struct axp_data axp192_data = { .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1), .vbus_valid_bit = REG_FIELD(AXP192_USB_OTG_STATUS, 2, 2), .vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3), + .axp20x_read_vbus = &axp20x_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp20x_configure_iio_channels, + .axp20x_cfg_adc_reg = axp20x_configure_adc_registers, }; static const struct axp_data axp202_data = { @@ -516,6 +787,9 @@ static const struct axp_data axp202_data = { .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1), .vbus_valid_bit = REG_FIELD(AXP20X_USB_OTG_STATUS, 2, 2), .vbus_mon_bit = REG_FIELD(AXP20X_VBUS_MON, 3, 3), + .axp20x_read_vbus = &axp20x_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp20x_configure_iio_channels, + .axp20x_cfg_adc_reg = axp20x_configure_adc_registers, }; static const struct axp_data axp221_data = { @@ -526,6 +800,9 @@ static const struct axp_data axp221_data = { .curr_lim_table_size = ARRAY_SIZE(axp221_usb_curr_lim_table), .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1), .vbus_needs_polling = true, + .axp20x_read_vbus = &axp20x_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp20x_configure_iio_channels, + .axp20x_cfg_adc_reg = axp20x_configure_adc_registers, }; static const struct axp_data axp223_data = { @@ -536,6 +813,23 @@ static const struct axp_data axp223_data = { .curr_lim_table_size = ARRAY_SIZE(axp20x_usb_curr_lim_table), .curr_lim_fld = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 0, 1), .vbus_needs_polling = true, + .axp20x_read_vbus = &axp20x_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp20x_configure_iio_channels, + .axp20x_cfg_adc_reg = axp20x_configure_adc_registers, +}; + +static const struct axp_data axp717_data = { + .power_desc = &axp717_usb_power_desc, + .irq_names = axp717_irq_names, + .num_irq_names = ARRAY_SIZE(axp717_irq_names), + .curr_lim_fld = REG_FIELD(AXP717_INPUT_CUR_LIMIT_CTRL, 0, 5), + .usb_bc_en_bit = REG_FIELD(AXP717_MODULE_EN_CONTROL_1, 4, 4), + .usb_bc_det_fld = REG_FIELD(AXP717_BC_DETECT, 5, 7), + .vbus_mon_bit = REG_FIELD(AXP717_ADC_CH_EN_CONTROL, 2, 2), + .vbus_needs_polling = false, + .axp20x_read_vbus = &axp717_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp717_configure_iio_channels, + .axp20x_cfg_adc_reg = axp717_configure_adc_registers, }; static const struct axp_data axp813_data = { @@ -549,6 +843,9 @@ static const struct axp_data axp813_data = { .usb_bc_det_fld = REG_FIELD(AXP288_BC_DET_STAT, 5, 7), .vbus_disable_bit = REG_FIELD(AXP20X_VBUS_IPSOUT_MGMT, 7, 7), .vbus_needs_polling = true, + .axp20x_read_vbus = &axp20x_usb_power_poll_vbus, + .axp20x_cfg_iio_chan = axp20x_configure_iio_channels, + .axp20x_cfg_adc_reg = axp20x_configure_adc_registers, }; #ifdef CONFIG_PM_SLEEP @@ -590,36 +887,6 @@ static int axp20x_usb_power_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(axp20x_usb_power_pm_ops, axp20x_usb_power_suspend, axp20x_usb_power_resume); -static int configure_iio_channels(struct platform_device *pdev, - struct axp20x_usb_power *power) -{ - power->vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v"); - if (IS_ERR(power->vbus_v)) { - if (PTR_ERR(power->vbus_v) == -ENODEV) - return -EPROBE_DEFER; - return PTR_ERR(power->vbus_v); - } - - power->vbus_i = devm_iio_channel_get(&pdev->dev, "vbus_i"); - if (IS_ERR(power->vbus_i)) { - if (PTR_ERR(power->vbus_i) == -ENODEV) - return -EPROBE_DEFER; - return PTR_ERR(power->vbus_i); - } - - return 0; -} - -static int configure_adc_registers(struct axp20x_usb_power *power) -{ - /* Enable vbus voltage and current measurement */ - return regmap_update_bits(power->regmap, AXP20X_ADC_EN1, - AXP20X_ADC_EN1_VBUS_CURR | - AXP20X_ADC_EN1_VBUS_VOLT, - AXP20X_ADC_EN1_VBUS_CURR | - AXP20X_ADC_EN1_VBUS_VOLT); -} - static int axp20x_regmap_field_alloc_optional(struct device *dev, struct regmap *regmap, struct reg_field fdesc, @@ -640,6 +907,18 @@ static int axp20x_regmap_field_alloc_optional(struct device *dev, return 0; } +/* Optionally allow users to specify a maximum charging current. */ +static void axp20x_usb_power_parse_dt(struct device *dev, + struct axp20x_usb_power *power) +{ + int ret; + + ret = device_property_read_u32(dev, "input-current-limit-microamp", + &power->max_input_cur); + if (ret) + dev_dbg(dev, "%s() no input-current-limit specified\n", __func__); +} + static int axp20x_usb_power_probe(struct platform_device *pdev) { struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); @@ -676,6 +955,8 @@ static int axp20x_usb_power_probe(struct platform_device *pdev) if (IS_ERR(power->curr_lim_fld)) return PTR_ERR(power->curr_lim_fld); + axp20x_usb_power_parse_dt(&pdev->dev, power); + ret = axp20x_regmap_field_alloc_optional(&pdev->dev, power->regmap, axp_data->vbus_valid_bit, &power->vbus_valid_bit); @@ -707,7 +988,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev) return ret; ret = devm_delayed_work_autocancel(&pdev->dev, &power->vbus_detect, - axp20x_usb_power_poll_vbus); + axp_data->axp20x_read_vbus); if (ret) return ret; @@ -718,9 +999,9 @@ static int axp20x_usb_power_probe(struct platform_device *pdev) return ret; if (IS_ENABLED(CONFIG_AXP20X_ADC)) - ret = configure_iio_channels(pdev, power); + ret = axp_data->axp20x_cfg_iio_chan(pdev, power); else - ret = configure_adc_registers(power); + ret = axp_data->axp20x_cfg_adc_reg(power); if (ret) return ret; @@ -778,6 +1059,9 @@ static const struct of_device_id axp20x_usb_power_match[] = { }, { .compatible = "x-powers,axp223-usb-power-supply", .data = &axp223_data, + }, { + .compatible = "x-powers,axp717-usb-power-supply", + .data = &axp717_data, }, { .compatible = "x-powers,axp813-usb-power-supply", .data = &axp813_data, diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 95d9a35243c2df..a3d71fc7206415 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #define PS_STAT_VBUS_TRIGGER (1 << 0) diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index 1a935bc885108e..5514d1896bb847 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -334,14 +334,6 @@ static const int bq25618_619_ichg_values[] = { 1290000, 1360000, 1430000, 1500000 }; -static enum power_supply_usb_type bq256xx_usb_type[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_ACA, -}; - static int bq256xx_array_parse(int array_size, int val, const int array[]) { int i = 0; @@ -1252,8 +1244,11 @@ static int bq256xx_property_is_writeable(struct power_supply *psy, static const struct power_supply_desc bq256xx_power_supply_desc = { .name = "bq256xx-charger", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = bq256xx_usb_type, - .num_usb_types = ARRAY_SIZE(bq256xx_usb_type), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_ACA) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = bq256xx_power_supply_props, .num_properties = ARRAY_SIZE(bq256xx_power_supply_props), .get_property = bq256xx_get_charger_property, diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index c1737f964840a1..ba0d22d9042950 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c index cebca34ff872fe..91e7292d86bb77 100644 --- a/drivers/power/supply/cpcap-charger.c +++ b/drivers/power/supply/cpcap-charger.c @@ -904,7 +904,7 @@ static int cpcap_charger_probe(struct platform_device *pdev) psy_cfg.of_node = pdev->dev.of_node; psy_cfg.drv_data = ddata; psy_cfg.supplied_to = cpcap_charger_supplied_to; - psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to), + psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to); ddata->usb = devm_power_supply_register(ddata->dev, &cpcap_charger_usb_desc, diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c index d406f2a7844972..962a6fd298323e 100644 --- a/drivers/power/supply/cros_peripheral_charger.c +++ b/drivers/power/supply/cros_peripheral_charger.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #define DRV_NAME "cros-ec-pchg" #define PCHG_DIR_PREFIX "peripheral" diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index 8008e31c0c0987..bed3e2e9bfea97 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -73,17 +73,6 @@ static enum power_supply_property cros_usbpd_dedicated_charger_props[] = { POWER_SUPPLY_PROP_VOLTAGE_NOW, }; -static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_DRP, - POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID -}; - /* Input voltage/current limit in mV/mA. Default to none. */ static u16 input_voltage_limit = EC_POWER_LIMIT_NONE; static u16 input_current_limit = EC_POWER_LIMIT_NONE; @@ -643,9 +632,14 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) psy_desc->properties = cros_usbpd_charger_props; psy_desc->num_properties = ARRAY_SIZE(cros_usbpd_charger_props); - psy_desc->usb_types = cros_usbpd_charger_usb_types; - psy_desc->num_usb_types = - ARRAY_SIZE(cros_usbpd_charger_usb_types); + psy_desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) | + BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) | + BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID); } psy_desc->name = port->name; diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c index d4d422cc5353ec..f98f65e00831a5 100644 --- a/drivers/power/supply/lenovo_yoga_c630_battery.c +++ b/drivers/power/supply/lenovo_yoga_c630_battery.c @@ -353,15 +353,10 @@ static enum power_supply_property yoga_c630_psy_adpt_properties[] = { POWER_SUPPLY_PROP_USB_TYPE, }; -static const enum power_supply_usb_type yoga_c630_psy_adpt_usb_type[] = { - POWER_SUPPLY_USB_TYPE_C, -}; - static const struct power_supply_desc yoga_c630_psy_adpt_psy_desc = { .name = "yoga-c630-adapter", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = yoga_c630_psy_adpt_usb_type, - .num_usb_types = ARRAY_SIZE(yoga_c630_psy_adpt_usb_type), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_C), .properties = yoga_c630_psy_adpt_properties, .num_properties = ARRAY_SIZE(yoga_c630_psy_adpt_properties), .get_property = yoga_c630_psy_adpt_get_property, diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index e7d37e422c3f6e..496c3e1f2ee6d6 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) /* program interrupt thresholds such that we should * get interrupt for every 'off' perc change in the soc */ - regmap_read(map, MAX17042_RepSOC, &soc); + if (chip->pdata->enable_current_sense) + regmap_read(map, MAX17042_RepSOC, &soc); + else + regmap_read(map, MAX17042_VFSOC, &soc); soc >>= 8; soc_tr = (soc + off) << 8; if (off < soc) diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c index edc262f0a62f2a..33105419e2427b 100644 --- a/drivers/power/supply/max1720x_battery.c +++ b/drivers/power/supply/max1720x_battery.c @@ -10,13 +10,16 @@ #include #include #include +#include #include #include -#include +#include /* Nonvolatile registers */ +#define MAX1720X_NXTABLE0 0x80 #define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */ +#define MAX1720X_NDEVICE_NAME4 0xDF /* ModelGauge m5 */ #define MAX172XX_STATUS 0x00 /* Status */ @@ -46,6 +49,8 @@ static const char *const max17205_model = "MAX17205"; struct max1720x_device_info { struct regmap *regmap; + struct regmap *regmap_nv; + struct i2c_client *ancillary; int rsense; }; @@ -106,6 +111,134 @@ static const struct regmap_config max1720x_regmap_cfg = { .cache_type = REGCACHE_RBTREE, }; +static const struct regmap_range max1720x_nvmem_allow[] = { + regmap_reg_range(MAX1720X_NXTABLE0, MAX1720X_NDEVICE_NAME4), +}; + +static const struct regmap_range max1720x_nvmem_deny[] = { + regmap_reg_range(0x00, 0x7F), + regmap_reg_range(0xE0, 0xFF), +}; + +static const struct regmap_access_table max1720x_nvmem_regs = { + .yes_ranges = max1720x_nvmem_allow, + .n_yes_ranges = ARRAY_SIZE(max1720x_nvmem_allow), + .no_ranges = max1720x_nvmem_deny, + .n_no_ranges = ARRAY_SIZE(max1720x_nvmem_deny), +}; + +static const struct regmap_config max1720x_nvmem_regmap_cfg = { + .reg_bits = 8, + .val_bits = 16, + .max_register = MAX1720X_NDEVICE_NAME4, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .rd_table = &max1720x_nvmem_regs, +}; + +static const struct nvmem_cell_info max1720x_nvmem_cells[] = { + { .name = "nXTable0", .offset = 0, .bytes = 2, }, + { .name = "nXTable1", .offset = 2, .bytes = 2, }, + { .name = "nXTable2", .offset = 4, .bytes = 2, }, + { .name = "nXTable3", .offset = 6, .bytes = 2, }, + { .name = "nXTable4", .offset = 8, .bytes = 2, }, + { .name = "nXTable5", .offset = 10, .bytes = 2, }, + { .name = "nXTable6", .offset = 12, .bytes = 2, }, + { .name = "nXTable7", .offset = 14, .bytes = 2, }, + { .name = "nXTable8", .offset = 16, .bytes = 2, }, + { .name = "nXTable9", .offset = 18, .bytes = 2, }, + { .name = "nXTable10", .offset = 20, .bytes = 2, }, + { .name = "nXTable11", .offset = 22, .bytes = 2, }, + { .name = "nUser18C", .offset = 24, .bytes = 2, }, + { .name = "nUser18D", .offset = 26, .bytes = 2, }, + { .name = "nODSCTh", .offset = 28, .bytes = 2, }, + { .name = "nODSCCfg", .offset = 30, .bytes = 2, }, + + { .name = "nOCVTable0", .offset = 32, .bytes = 2, }, + { .name = "nOCVTable1", .offset = 34, .bytes = 2, }, + { .name = "nOCVTable2", .offset = 36, .bytes = 2, }, + { .name = "nOCVTable3", .offset = 38, .bytes = 2, }, + { .name = "nOCVTable4", .offset = 40, .bytes = 2, }, + { .name = "nOCVTable5", .offset = 42, .bytes = 2, }, + { .name = "nOCVTable6", .offset = 44, .bytes = 2, }, + { .name = "nOCVTable7", .offset = 46, .bytes = 2, }, + { .name = "nOCVTable8", .offset = 48, .bytes = 2, }, + { .name = "nOCVTable9", .offset = 50, .bytes = 2, }, + { .name = "nOCVTable10", .offset = 52, .bytes = 2, }, + { .name = "nOCVTable11", .offset = 54, .bytes = 2, }, + { .name = "nIChgTerm", .offset = 56, .bytes = 2, }, + { .name = "nFilterCfg", .offset = 58, .bytes = 2, }, + { .name = "nVEmpty", .offset = 60, .bytes = 2, }, + { .name = "nLearnCfg", .offset = 62, .bytes = 2, }, + + { .name = "nQRTable00", .offset = 64, .bytes = 2, }, + { .name = "nQRTable10", .offset = 66, .bytes = 2, }, + { .name = "nQRTable20", .offset = 68, .bytes = 2, }, + { .name = "nQRTable30", .offset = 70, .bytes = 2, }, + { .name = "nCycles", .offset = 72, .bytes = 2, }, + { .name = "nFullCapNom", .offset = 74, .bytes = 2, }, + { .name = "nRComp0", .offset = 76, .bytes = 2, }, + { .name = "nTempCo", .offset = 78, .bytes = 2, }, + { .name = "nIAvgEmpty", .offset = 80, .bytes = 2, }, + { .name = "nFullCapRep", .offset = 82, .bytes = 2, }, + { .name = "nVoltTemp", .offset = 84, .bytes = 2, }, + { .name = "nMaxMinCurr", .offset = 86, .bytes = 2, }, + { .name = "nMaxMinVolt", .offset = 88, .bytes = 2, }, + { .name = "nMaxMinTemp", .offset = 90, .bytes = 2, }, + { .name = "nSOC", .offset = 92, .bytes = 2, }, + { .name = "nTimerH", .offset = 94, .bytes = 2, }, + + { .name = "nConfig", .offset = 96, .bytes = 2, }, + { .name = "nRippleCfg", .offset = 98, .bytes = 2, }, + { .name = "nMiscCfg", .offset = 100, .bytes = 2, }, + { .name = "nDesignCap", .offset = 102, .bytes = 2, }, + { .name = "nHibCfg", .offset = 104, .bytes = 2, }, + { .name = "nPackCfg", .offset = 106, .bytes = 2, }, + { .name = "nRelaxCfg", .offset = 108, .bytes = 2, }, + { .name = "nConvgCfg", .offset = 110, .bytes = 2, }, + { .name = "nNVCfg0", .offset = 112, .bytes = 2, }, + { .name = "nNVCfg1", .offset = 114, .bytes = 2, }, + { .name = "nNVCfg2", .offset = 116, .bytes = 2, }, + { .name = "nSBSCfg", .offset = 118, .bytes = 2, }, + { .name = "nROMID0", .offset = 120, .bytes = 2, }, + { .name = "nROMID1", .offset = 122, .bytes = 2, }, + { .name = "nROMID2", .offset = 124, .bytes = 2, }, + { .name = "nROMID3", .offset = 126, .bytes = 2, }, + + { .name = "nVAlrtTh", .offset = 128, .bytes = 2, }, + { .name = "nTAlrtTh", .offset = 130, .bytes = 2, }, + { .name = "nSAlrtTh", .offset = 132, .bytes = 2, }, + { .name = "nIAlrtTh", .offset = 134, .bytes = 2, }, + { .name = "nUser1C4", .offset = 136, .bytes = 2, }, + { .name = "nUser1C5", .offset = 138, .bytes = 2, }, + { .name = "nFullSOCThr", .offset = 140, .bytes = 2, }, + { .name = "nTTFCfg", .offset = 142, .bytes = 2, }, + { .name = "nCGain", .offset = 144, .bytes = 2, }, + { .name = "nTCurve", .offset = 146, .bytes = 2, }, + { .name = "nTGain", .offset = 148, .bytes = 2, }, + { .name = "nTOff", .offset = 150, .bytes = 2, }, + { .name = "nManfctrName0", .offset = 152, .bytes = 2, }, + { .name = "nManfctrName1", .offset = 154, .bytes = 2, }, + { .name = "nManfctrName2", .offset = 156, .bytes = 2, }, + { .name = "nRSense", .offset = 158, .bytes = 2, }, + + { .name = "nUser1D0", .offset = 160, .bytes = 2, }, + { .name = "nUser1D1", .offset = 162, .bytes = 2, }, + { .name = "nAgeFcCfg", .offset = 164, .bytes = 2, }, + { .name = "nDesignVoltage", .offset = 166, .bytes = 2, }, + { .name = "nUser1D4", .offset = 168, .bytes = 2, }, + { .name = "nRFastVShdn", .offset = 170, .bytes = 2, }, + { .name = "nManfctrDate", .offset = 172, .bytes = 2, }, + { .name = "nFirstUsed", .offset = 174, .bytes = 2, }, + { .name = "nSerialNumber0", .offset = 176, .bytes = 2, }, + { .name = "nSerialNumber1", .offset = 178, .bytes = 2, }, + { .name = "nSerialNumber2", .offset = 180, .bytes = 2, }, + { .name = "nDeviceName0", .offset = 182, .bytes = 2, }, + { .name = "nDeviceName1", .offset = 184, .bytes = 2, }, + { .name = "nDeviceName2", .offset = 186, .bytes = 2, }, + { .name = "nDeviceName3", .offset = 188, .bytes = 2, }, + { .name = "nDeviceName4", .offset = 190, .bytes = 2, }, +}; + static const enum power_supply_property max1720x_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY, @@ -249,30 +382,80 @@ static int max1720x_battery_get_property(struct power_supply *psy, return ret; } -static int max1720x_probe_sense_resistor(struct i2c_client *client, - struct max1720x_device_info *info) +static +int max1720x_nvmem_reg_read(void *priv, unsigned int off, void *val, size_t len) +{ + struct max1720x_device_info *info = priv; + unsigned int reg = MAX1720X_NXTABLE0 + (off / 2); + + return regmap_bulk_read(info->regmap_nv, reg, val, len / 2); +} + +static void max1720x_unregister_ancillary(void *data) +{ + struct max1720x_device_info *info = data; + + i2c_unregister_device(info->ancillary); +} + +static int max1720x_probe_nvmem(struct i2c_client *client, + struct max1720x_device_info *info) { struct device *dev = &client->dev; - struct i2c_client *ancillary; + struct nvmem_config nvmem_config = { + .dev = dev, + .name = "max1720x_nvmem", + .cells = max1720x_nvmem_cells, + .ncells = ARRAY_SIZE(max1720x_nvmem_cells), + .read_only = true, + .root_only = true, + .reg_read = max1720x_nvmem_reg_read, + .size = ARRAY_SIZE(max1720x_nvmem_cells) * 2, + .word_size = 2, + .stride = 2, + .priv = info, + }; + struct nvmem_device *nvmem; + unsigned int val; int ret; - ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb); - if (IS_ERR(ancillary)) { + info->ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb); + if (IS_ERR(info->ancillary)) { dev_err(dev, "Failed to initialize ancillary i2c device\n"); - return PTR_ERR(ancillary); + return PTR_ERR(info->ancillary); } - ret = i2c_smbus_read_word_data(ancillary, MAX1720X_NRSENSE); - i2c_unregister_device(ancillary); - if (ret < 0) + ret = devm_add_action_or_reset(dev, max1720x_unregister_ancillary, info); + if (ret) { + dev_err(dev, "Failed to add unregister callback\n"); return ret; + } + + info->regmap_nv = devm_regmap_init_i2c(info->ancillary, + &max1720x_nvmem_regmap_cfg); + if (IS_ERR(info->regmap_nv)) { + dev_err(dev, "regmap initialization of nvmem failed\n"); + return PTR_ERR(info->regmap_nv); + } + + ret = regmap_read(info->regmap_nv, MAX1720X_NRSENSE, &val); + if (ret < 0) { + dev_err(dev, "Failed to read sense resistor value\n"); + return ret; + } - info->rsense = ret; + info->rsense = val; if (!info->rsense) { dev_warn(dev, "RSense not calibrated, set 10 mOhms!\n"); info->rsense = 1000; /* in regs in 10^-5 */ } + nvmem = devm_nvmem_register(dev, &nvmem_config); + if (IS_ERR(nvmem)) { + dev_err(dev, "Could not register nvmem!"); + return PTR_ERR(nvmem); + } + return 0; } @@ -299,15 +482,15 @@ static int max1720x_probe(struct i2c_client *client) psy_cfg.drv_data = info; psy_cfg.fwnode = dev_fwnode(dev); + i2c_set_clientdata(client, info); info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg); if (IS_ERR(info->regmap)) return dev_err_probe(dev, PTR_ERR(info->regmap), "regmap initialization failed\n"); - ret = max1720x_probe_sense_resistor(client, info); + ret = max1720x_probe_nvmem(client, info); if (ret) - return dev_err_probe(dev, ret, - "Failed to read sense resistor value\n"); + return dev_err_probe(dev, ret, "Failed to probe nvmem\n"); bat = devm_power_supply_register(dev, &max1720x_bat_desc, &psy_cfg); if (IS_ERR(bat)) diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index 2001e12c9f7de9..4caac142c4285a 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -197,12 +197,58 @@ static int max77693_get_online(struct regmap *regmap, int *val) return 0; } +/* + * There are *two* current limit registers: + * - CHGIN limit, which limits the input current from the external charger; + * - Fast charge current limit, which limits the current going to the battery. + */ + +static int max77693_get_input_current_limit(struct regmap *regmap, int *val) +{ + unsigned int data; + int ret; + + ret = regmap_read(regmap, MAX77693_CHG_REG_CHG_CNFG_09, &data); + if (ret < 0) + return ret; + + data &= CHG_CNFG_09_CHGIN_ILIM_MASK; + data >>= CHG_CNFG_09_CHGIN_ILIM_SHIFT; + + if (data <= 0x03) + /* The first four values (0x00..0x03) are 60mA */ + *val = 60000; + else + *val = data * 20000; /* 20mA steps */ + + return 0; +} + +static int max77693_get_fast_charge_current(struct regmap *regmap, int *val) +{ + unsigned int data; + int ret; + + ret = regmap_read(regmap, MAX77693_CHG_REG_CHG_CNFG_02, &data); + if (ret < 0) + return ret; + + data &= CHG_CNFG_02_CC_MASK; + data >>= CHG_CNFG_02_CC_SHIFT; + + *val = data * 33300; /* 33.3mA steps */ + + return 0; +} + static enum power_supply_property max77693_charger_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; @@ -231,6 +277,12 @@ static int max77693_charger_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: ret = max77693_get_online(regmap, &val->intval); break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = max77693_get_input_current_limit(regmap, &val->intval); + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + ret = max77693_get_fast_charge_current(regmap, &val->intval); + break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = max77693_charger_model; break; diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c index c26023b19f267f..418b882b163d18 100644 --- a/drivers/power/supply/max8998_charger.c +++ b/drivers/power/supply/max8998_charger.c @@ -191,6 +191,7 @@ static const struct platform_device_id max8998_battery_id[] = { { "max8998-battery", TYPE_MAX8998 }, { } }; +MODULE_DEVICE_TABLE(platform, max8998_battery_id); static struct platform_driver max8998_battery_driver = { .driver = { diff --git a/drivers/power/supply/mp2629_charger.c b/drivers/power/supply/mp2629_charger.c index 3a2a28fbba7396..d281c105962978 100644 --- a/drivers/power/supply/mp2629_charger.c +++ b/drivers/power/supply/mp2629_charger.c @@ -94,14 +94,6 @@ struct mp2629_prop { int shift; }; -static enum power_supply_usb_type mp2629_usb_types[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_PD_DRP, - POWER_SUPPLY_USB_TYPE_UNKNOWN -}; - static enum power_supply_property mp2629_charger_usb_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_USB_TYPE, @@ -487,8 +479,11 @@ static irqreturn_t mp2629_irq_handler(int irq, void *dev_id) static const struct power_supply_desc mp2629_usb_desc = { .name = "mp2629_usb", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = mp2629_usb_types, - .num_usb_types = ARRAY_SIZE(mp2629_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = mp2629_charger_usb_props, .num_properties = ARRAY_SIZE(mp2629_charger_usb_props), .get_property = mp2629_charger_usb_get_prop, diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c index aca123783efccd..e99e551489761a 100644 --- a/drivers/power/supply/mt6360_charger.c +++ b/drivers/power/supply/mt6360_charger.c @@ -154,13 +154,6 @@ enum mt6360_pmu_chg_type { MT6360_CHG_TYPE_MAX, }; -static enum power_supply_usb_type mt6360_charger_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, -}; - static int mt6360_get_chrdet_ext_stat(struct mt6360_chg_info *mci, bool *pwr_rdy) { @@ -574,8 +567,10 @@ static const struct power_supply_desc mt6360_charger_desc = { .get_property = mt6360_charger_get_property, .set_property = mt6360_charger_set_property, .property_is_writeable = mt6360_charger_property_is_writeable, - .usb_types = mt6360_charger_usb_types, - .num_usb_types = ARRAY_SIZE(mt6360_charger_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), }; static const struct regulator_ops mt6360_chg_otg_ops = { diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c index e24fce087d804e..ad8793bf997e15 100644 --- a/drivers/power/supply/mt6370-charger.c +++ b/drivers/power/supply/mt6370-charger.c @@ -624,13 +624,6 @@ static enum power_supply_property mt6370_chg_properties[] = { POWER_SUPPLY_PROP_USB_TYPE, }; -static enum power_supply_usb_type mt6370_chg_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_DCP, -}; - static const struct power_supply_desc mt6370_chg_psy_desc = { .name = "mt6370-charger", .type = POWER_SUPPLY_TYPE_USB, @@ -639,8 +632,10 @@ static const struct power_supply_desc mt6370_chg_psy_desc = { .get_property = mt6370_chg_get_property, .set_property = mt6370_chg_set_property, .property_is_writeable = mt6370_chg_property_is_writeable, - .usb_types = mt6370_chg_usb_types, - .num_usb_types = ARRAY_SIZE(mt6370_chg_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), }; static const struct regulator_ops mt6370_chg_otg_ops = { diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 8f6025acd10a08..49534458a9f7d3 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -9,6 +9,7 @@ * Modified: 2004, Oct Szabolcs Gyurko */ +#include #include #include #include @@ -756,10 +757,10 @@ int power_supply_get_battery_info(struct power_supply *psy, for (index = 0; index < len; index++) { struct power_supply_battery_ocv_table *table; - char *propname; int i, tab_len, size; - propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index); + char *propname __free(kfree) = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", + index); if (!propname) { power_supply_put_battery_info(psy, info); err = -ENOMEM; @@ -768,13 +769,11 @@ int power_supply_get_battery_info(struct power_supply *psy, list = of_get_property(battery_np, propname, &size); if (!list || !size) { dev_err(&psy->dev, "failed to get %s\n", propname); - kfree(propname); power_supply_put_battery_info(psy, info); err = -EINVAL; goto out_put_node; } - kfree(propname); tab_len = size / (2 * sizeof(__be32)); info->ocv_table_size[index] = tab_len; @@ -1232,11 +1231,7 @@ EXPORT_SYMBOL_GPL(power_supply_set_property); int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { - if (atomic_read(&psy->use_cnt) <= 0 || - !psy->desc->property_is_writeable) - return -ENODEV; - - return psy->desc->property_is_writeable(psy, psp); + return psy->desc->property_is_writeable && psy->desc->property_is_writeable(psy, psp); } EXPORT_SYMBOL_GPL(power_supply_property_is_writeable); @@ -1296,7 +1291,7 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd, return ret; } -static struct thermal_zone_device_ops psy_tzd_ops = { +static const struct thermal_zone_device_ops psy_tzd_ops = { .get_temp = power_supply_read_temp, }; @@ -1361,10 +1356,6 @@ __power_supply_register(struct device *parent, pr_warn("%s: Expected proper parent device for '%s'\n", __func__, desc->name); - if (psy_has_property(desc, POWER_SUPPLY_PROP_USB_TYPE) && - (!desc->usb_types || !desc->num_usb_types)) - return ERR_PTR(-EINVAL); - psy = kzalloc(sizeof(*psy), GFP_KERNEL); if (!psy) return ERR_PTR(-ENOMEM); diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index baacefbdf768ab..6fbbfb1c685e6c 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -318,7 +318,8 @@ static const struct hwmon_channel_info * const power_supply_hwmon_info[] = { HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | - HWMON_T_MIN_ALARM, + HWMON_T_MIN_ALARM | + HWMON_T_MAX_ALARM, HWMON_T_LABEL | HWMON_T_INPUT | diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 3e63d165b2f70c..16b3c5880cd8ce 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -209,7 +209,7 @@ static struct power_supply_attr power_supply_attrs[] = { POWER_SUPPLY_ATTR(TIME_TO_FULL_NOW), POWER_SUPPLY_ATTR(TIME_TO_FULL_AVG), POWER_SUPPLY_ENUM_ATTR(TYPE), - POWER_SUPPLY_ATTR(USB_TYPE), + POWER_SUPPLY_ENUM_ATTR(USB_TYPE), POWER_SUPPLY_ENUM_ATTR(SCOPE), POWER_SUPPLY_ATTR(PRECHARGE_CURRENT), POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT), @@ -237,31 +237,28 @@ static enum power_supply_property dev_attr_psp(struct device_attribute *attr) return to_ps_attr(attr) - power_supply_attrs; } -static ssize_t power_supply_show_usb_type(struct device *dev, - const struct power_supply_desc *desc, - union power_supply_propval *value, - char *buf) +static ssize_t power_supply_show_enum_with_available( + struct device *dev, const char * const labels[], int label_count, + unsigned int available_values, int value, char *buf) { - enum power_supply_usb_type usb_type; + bool match = false, available, active; ssize_t count = 0; - bool match = false; int i; - for (i = 0; i < desc->num_usb_types; ++i) { - usb_type = desc->usb_types[i]; + for (i = 0; i < label_count; i++) { + available = available_values & BIT(i); + active = i == value; - if (value->intval == usb_type) { - count += sysfs_emit_at(buf, count, "[%s] ", - POWER_SUPPLY_USB_TYPE_TEXT[usb_type]); + if (available && active) { + count += sysfs_emit_at(buf, count, "[%s] ", labels[i]); match = true; - } else { - count += sysfs_emit_at(buf, count, "%s ", - POWER_SUPPLY_USB_TYPE_TEXT[usb_type]); + } else if (available) { + count += sysfs_emit_at(buf, count, "%s ", labels[i]); } } if (!match) { - dev_warn(dev, "driver reporting unsupported connected type\n"); + dev_warn(dev, "driver reporting unavailable enum value %d\n", value); return -EINVAL; } @@ -300,8 +297,10 @@ static ssize_t power_supply_show_property(struct device *dev, switch (psp) { case POWER_SUPPLY_PROP_USB_TYPE: - ret = power_supply_show_usb_type(dev, psy->desc, - &value, buf); + ret = power_supply_show_enum_with_available( + dev, POWER_SUPPLY_USB_TYPE_TEXT, + ARRAY_SIZE(POWER_SUPPLY_USB_TYPE_TEXT), + psy->desc->usb_types, value.intval, buf); break; case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours, @@ -523,33 +522,10 @@ ssize_t power_supply_charge_behaviour_show(struct device *dev, enum power_supply_charge_behaviour current_behaviour, char *buf) { - bool match = false, available, active; - ssize_t count = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT); i++) { - available = available_behaviours & BIT(i); - active = i == current_behaviour; - - if (available && active) { - count += sysfs_emit_at(buf, count, "[%s] ", - POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]); - match = true; - } else if (available) { - count += sysfs_emit_at(buf, count, "%s ", - POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]); - } - } - - if (!match) { - dev_warn(dev, "driver reporting unsupported charge behaviour\n"); - return -EINVAL; - } - - if (count) - buf[count - 1] = '\n'; - - return count; + return power_supply_show_enum_with_available( + dev, POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT, + ARRAY_SIZE(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT), + available_behaviours, current_behaviour, buf); } EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_show); diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index 8b3df3ee59ba63..f0a64c00ddaae0 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -786,19 +786,6 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy, return 0; } -static const enum power_supply_usb_type usb_psy_supported_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_ACA, - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_DRP, - POWER_SUPPLY_USB_TYPE_PD_PPS, - POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, -}; - static const enum power_supply_property sc8280xp_usb_props[] = { POWER_SUPPLY_PROP_ONLINE, }; @@ -809,8 +796,16 @@ static const struct power_supply_desc sc8280xp_usb_psy_desc = { .properties = sc8280xp_usb_props, .num_properties = ARRAY_SIZE(sc8280xp_usb_props), .get_property = qcom_battmgr_usb_get_property, - .usb_types = usb_psy_supported_types, - .num_usb_types = ARRAY_SIZE(usb_psy_supported_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) | + BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_ACA) | + BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS) | + BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID), }; static const enum power_supply_property sm8350_usb_props[] = { @@ -829,8 +824,16 @@ static const struct power_supply_desc sm8350_usb_psy_desc = { .properties = sm8350_usb_props, .num_properties = ARRAY_SIZE(sm8350_usb_props), .get_property = qcom_battmgr_usb_get_property, - .usb_types = usb_psy_supported_types, - .num_usb_types = ARRAY_SIZE(usb_psy_supported_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) | + BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_ACA) | + BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS) | + BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID), }; static const u8 sm8350_wls_prop_map[] = { diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c index 9bb7774060138e..81acbd8b2169ce 100644 --- a/drivers/power/supply/qcom_pmi8998_charger.c +++ b/drivers/power/supply/qcom_pmi8998_charger.c @@ -411,13 +411,6 @@ static enum power_supply_property smb2_properties[] = { POWER_SUPPLY_PROP_USB_TYPE, }; -static enum power_supply_usb_type smb2_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, -}; - static int smb2_get_prop_usb_online(struct smb2_chip *chip, int *val) { unsigned int stat; @@ -775,8 +768,10 @@ static irqreturn_t smb2_handle_wdog_bark(int irq, void *data) static const struct power_supply_desc smb2_psy_desc = { .name = "pmi8998_charger", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = smb2_usb_types, - .num_usb_types = ARRAY_SIZE(smb2_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = smb2_properties, .num_properties = ARRAY_SIZE(smb2_properties), .get_property = smb2_get_property, diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index 7ca91739c6ccf8..57b6ddefad28db 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -8,7 +8,7 @@ * Chris Morgan */ -#include +#include #include #include #include @@ -673,11 +673,6 @@ static enum power_supply_property rk817_chg_props[] = { POWER_SUPPLY_PROP_VOLTAGE_AVG, }; -static enum power_supply_usb_type rk817_usb_type[] = { - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_UNKNOWN, -}; - static const struct power_supply_desc rk817_bat_desc = { .name = "rk817-battery", .type = POWER_SUPPLY_TYPE_BATTERY, @@ -689,8 +684,8 @@ static const struct power_supply_desc rk817_bat_desc = { static const struct power_supply_desc rk817_chg_desc = { .name = "rk817-charger", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = rk817_usb_type, - .num_usb_types = ARRAY_SIZE(rk817_usb_type), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = rk817_chg_props, .num_properties = ARRAY_SIZE(rk817_chg_props), .get_property = rk817_chg_get_prop, diff --git a/drivers/power/supply/rn5t618_power.c b/drivers/power/supply/rn5t618_power.c index ebea3522a2ac39..40dec55a9f7360 100644 --- a/drivers/power/supply/rn5t618_power.c +++ b/drivers/power/supply/rn5t618_power.c @@ -70,13 +70,6 @@ struct rn5t618_power_info { int irq; }; -static enum power_supply_usb_type rn5t618_usb_types[] = { - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_UNKNOWN -}; - static enum power_supply_property rn5t618_usb_props[] = { /* input current limit is not very accurate */ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, @@ -681,8 +674,10 @@ static const struct power_supply_desc rn5t618_adp_desc = { static const struct power_supply_desc rn5t618_usb_desc = { .name = "rn5t618-usb", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = rn5t618_usb_types, - .num_usb_types = ARRAY_SIZE(rn5t618_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = rn5t618_usb_props, .num_properties = ARRAY_SIZE(rn5t618_usb_props), .get_property = rn5t618_usb_get_property, diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c index fdfdc83ab04588..235169c85c5d8a 100644 --- a/drivers/power/supply/rt9467-charger.c +++ b/drivers/power/supply/rt9467-charger.c @@ -630,13 +630,6 @@ static int rt9467_psy_set_ieoc(struct rt9467_chg_data *data, int microamp) return ret; } -static const enum power_supply_usb_type rt9467_chg_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, -}; - static const enum power_supply_property rt9467_chg_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, @@ -745,8 +738,6 @@ static int rt9467_psy_set_property(struct power_supply *psy, RT9467_RANGE_IPREC, val->intval); case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: return rt9467_psy_set_ieoc(data, val->intval); - case POWER_SUPPLY_PROP_USB_TYPE: - return regmap_field_write(data->rm_field[F_USBCHGEN], val->intval); default: return -EINVAL; } @@ -764,7 +755,6 @@ static int rt9467_chg_prop_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT: case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: - case POWER_SUPPLY_PROP_USB_TYPE: return 1; default: return 0; @@ -774,8 +764,10 @@ static int rt9467_chg_prop_is_writeable(struct power_supply *psy, static const struct power_supply_desc rt9467_chg_psy_desc = { .name = "rt9467-charger", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = rt9467_chg_usb_types, - .num_usb_types = ARRAY_SIZE(rt9467_chg_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .properties = rt9467_chg_properties, .num_properties = ARRAY_SIZE(rt9467_chg_properties), .property_is_writeable = rt9467_chg_prop_is_writeable, diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c index 868b0703d15c52..c04af1ee89c675 100644 --- a/drivers/power/supply/rt9471.c +++ b/drivers/power/supply/rt9471.c @@ -333,14 +333,6 @@ static enum power_supply_property rt9471_charger_properties[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; -static enum power_supply_usb_type rt9471_charger_usb_types[] = { - POWER_SUPPLY_USB_TYPE_UNKNOWN, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, -}; - static int rt9471_charger_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { @@ -726,8 +718,11 @@ static int rt9471_register_psy(struct rt9471_chip *chip) desc->name = psy_name; desc->type = POWER_SUPPLY_TYPE_USB; - desc->usb_types = rt9471_charger_usb_types; - desc->num_usb_types = ARRAY_SIZE(rt9471_charger_usb_types); + desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN); desc->properties = rt9471_charger_properties; desc->num_properties = ARRAY_SIZE(rt9471_charger_properties); desc->get_property = rt9471_charger_get_property; diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c index 196d290dc5966b..ebd1edde28f12a 100644 --- a/drivers/power/supply/surface_battery.c +++ b/drivers/power/supply/surface_battery.c @@ -6,7 +6,7 @@ * Copyright (C) 2019-2021 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c index 7a6c62d6f88364..90b823848c998a 100644 --- a/drivers/power/supply/surface_charger.c +++ b/drivers/power/supply/surface_charger.c @@ -6,7 +6,7 @@ * Copyright (C) 2019-2021 Maximilian Luz */ -#include +#include #include #include #include diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index 7b9b0b3e164e05..f3f1a0862e935c 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -363,7 +363,7 @@ static int twl4030_charger_update_current(struct twl4030_bci *bci) if (status < 0) return status; cur_reg |= oldreg << 8; - if (reg != oldreg) { + if (reg != cur_reg) { /* disable write protection for one write access for * BCIIREF */ status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7, diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c index 7970843a4f480d..7382bec6a43c77 100644 --- a/drivers/power/supply/ucs1002_power.c +++ b/drivers/power/supply/ucs1002_power.c @@ -296,22 +296,17 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val) return 0; } -static enum power_supply_usb_type ucs1002_usb_types[] = { - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_SDP, - POWER_SUPPLY_USB_TYPE_DCP, - POWER_SUPPLY_USB_TYPE_CDP, - POWER_SUPPLY_USB_TYPE_UNKNOWN, -}; - static int ucs1002_set_usb_type(struct ucs1002_info *info, int val) { unsigned int mode; - if (val < 0 || val >= ARRAY_SIZE(ucs1002_usb_types)) - return -EINVAL; - - switch (ucs1002_usb_types[val]) { + switch (val) { + /* + * POWER_SUPPLY_USB_TYPE_UNKNOWN == 0, map this to dedicated for + * userspace API compatibility with older versions of this driver + * which mapped 0 to dedicated. + */ + case POWER_SUPPLY_USB_TYPE_UNKNOWN: case POWER_SUPPLY_USB_TYPE_PD: mode = V_SET_ACTIVE_MODE_DEDICATED; break; @@ -428,8 +423,11 @@ static int ucs1002_property_is_writeable(struct power_supply *psy, static const struct power_supply_desc ucs1002_charger_desc = { .name = "ucs1002", .type = POWER_SUPPLY_TYPE_USB, - .usb_types = ucs1002_usb_types, - .num_usb_types = ARRAY_SIZE(ucs1002_usb_types), + .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) | + BIT(POWER_SUPPLY_USB_TYPE_CDP) | + BIT(POWER_SUPPLY_USB_TYPE_DCP) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN), .get_property = ucs1002_get_property, .set_property = ucs1002_set_property, .property_is_writeable = ucs1002_property_is_writeable, diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 3cffa6c7953880..5e793b80fd6b6b 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -740,7 +740,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim) { struct rapl_primitive_info *rpi = rp->priv->rpi; - if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi) + if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi) return NULL; return &rpi[prim]; @@ -1267,6 +1267,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core), X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core), X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core), X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core), X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt), @@ -1285,6 +1286,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd), X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd), + X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd), X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd), {} }; @@ -2128,6 +2130,21 @@ void rapl_remove_package(struct rapl_package *rp) } EXPORT_SYMBOL_GPL(rapl_remove_package); +/* + * RAPL Package energy counter scope: + * 1. AMD/HYGON platforms use per-PKG package energy counter + * 2. For Intel platforms + * 2.1 CLX-AP platform has per-DIE package energy counter + * 2.2 Other platforms that uses MSR RAPL are single die systems so the + * package energy counter can be considered as per-PKG/per-DIE, + * here it is considered as per-DIE. + * 2.3 New platforms that use TPMI RAPL doesn't care about the + * scope because they are not MSR/CPU based. + */ +#define rapl_msrs_are_pkg_scope() \ + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \ + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + /* caller to ensure CPU hotplug lock is held */ struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu) @@ -2135,8 +2152,14 @@ struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_ struct rapl_package *rp; int uid; - if (id_is_cpu) - uid = topology_logical_die_id(id); + if (id_is_cpu) { + uid = rapl_msrs_are_pkg_scope() ? + topology_physical_package_id(id) : topology_logical_die_id(id); + if (uid < 0) { + pr_err("topology_logical_(package/die)_id() returned a negative value"); + return NULL; + } + } else uid = id; @@ -2168,9 +2191,14 @@ struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *pr return ERR_PTR(-ENOMEM); if (id_is_cpu) { - rp->id = topology_logical_die_id(id); + rp->id = rapl_msrs_are_pkg_scope() ? + topology_physical_package_id(id) : topology_logical_die_id(id); + if ((int)(rp->id) < 0) { + pr_err("topology_logical_(package/die)_id() returned a negative value"); + return ERR_PTR(-EINVAL); + } rp->lead_cpu = id; - if (topology_max_dies_per_package() > 1) + if (!rapl_msrs_are_pkg_scope() && topology_max_dies_per_package() > 1) snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d", topology_physical_package_id(id), topology_die_id(id)); else diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 63d03a0df5cce9..abaffb4e1c1ce9 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -149,6 +149,9 @@ static void parport_attach(struct parport *port) } index = ida_alloc(&pps_client_index, GFP_KERNEL); + if (index < 0) + goto err_free_device; + memset(&pps_client_cb, 0, sizeof(pps_client_cb)); pps_client_cb.private = device; pps_client_cb.irq_func = parport_irq; @@ -159,7 +162,7 @@ static void parport_attach(struct parport *port) index); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); - goto err_free; + goto err_free_ida; } if (parport_claim_or_block(device->pardev) < 0) { @@ -187,8 +190,9 @@ static void parport_attach(struct parport *port) parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); -err_free: +err_free_ida: ida_free(&pps_client_index, index); +err_free_device: kfree(device); } diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 5d19baae6a380a..25d47907db175e 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -319,7 +319,6 @@ static int pps_cdev_release(struct inode *inode, struct file *file) static const struct file_operations pps_cdev_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .poll = pps_cdev_poll, .fasync = pps_cdev_fasync, .compat_ioctl = pps_cdev_compat_ioctl, diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 2067b0120d083d..ea96a14d72d141 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -359,11 +359,15 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, extoff = NULL; break; } - if (extoff->n_samples > PTP_MAX_SAMPLES - || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) { + if (extoff->n_samples > PTP_MAX_SAMPLES || + extoff->rsv[0] || extoff->rsv[1] || + (extoff->clockid != CLOCK_REALTIME && + extoff->clockid != CLOCK_MONOTONIC && + extoff->clockid != CLOCK_MONOTONIC_RAW)) { err = -EINVAL; break; } + sts.clockid = extoff->clockid; for (i = 0; i < extoff->n_samples; i++) { err = ptp->info->gettimex64(ptp->info, &ts, &sts); if (err) diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 209a45a76e6bd8..b6f1941308b17a 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "ptp_private.h" #include "ptp_clockmatrix.h" diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c index 6ef982862e27d7..e14e149b746eb2 100644 --- a/drivers/ptp/ptp_fc3.c +++ b/drivers/ptp/ptp_fc3.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include "ptp_private.h" #include "ptp_fc3.h" diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c index 92bb42c43fb29d..d5732490ed9d16 100644 --- a/drivers/ptp/ptp_idt82p33.c +++ b/drivers/ptp/ptp_idt82p33.c @@ -1171,10 +1171,10 @@ static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps, caps->owner = THIS_MODULE; caps->max_adj = DCO_MAX_PPB; caps->n_per_out = MAX_PER_OUT; - caps->n_ext_ts = MAX_PHC_PLL, - caps->n_pins = max_pins, - caps->adjphase = idt82p33_adjwritephase, - caps->getmaxphase = idt82p33_getmaxphase, + caps->n_ext_ts = MAX_PHC_PLL; + caps->n_pins = max_pins; + caps->adjphase = idt82p33_adjwritephase; + caps->getmaxphase = idt82p33_getmaxphase; caps->adjfine = idt82p33_adjfine; caps->adjtime = idt82p33_adjtime; caps->gettime64 = idt82p33_gettime; diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c index e6f7d2bf8ddee4..14a23d3a27f2c9 100644 --- a/drivers/ptp/ptp_ines.c +++ b/drivers/ptp/ptp_ines.c @@ -562,12 +562,8 @@ static int ines_ts_info(struct mii_timestamper *mii_ts, SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON) | diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index e7479b9b90cb14..5feecaadde8e05 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -1558,22 +1558,24 @@ ptp_ocp_watchdog(struct timer_list *t) static void ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp) { - ktime_t start, end; - ktime_t delay; + ktime_t start, end, delay = U64_MAX; u32 ctrl; + int i; - ctrl = ioread32(&bp->reg->ctrl); - ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE; + for (i = 0; i < 3; i++) { + ctrl = ioread32(&bp->reg->ctrl); + ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE; - iowrite32(ctrl, &bp->reg->ctrl); + iowrite32(ctrl, &bp->reg->ctrl); - start = ktime_get_ns(); + start = ktime_get_raw_ns(); - ctrl = ioread32(&bp->reg->ctrl); + ctrl = ioread32(&bp->reg->ctrl); - end = ktime_get_ns(); + end = ktime_get_raw_ns(); - delay = end - start; + delay = min(delay, end - start); + } bp->ts_window_adjust = (delay >> 5) * 3; } diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 3e53838990f5b8..0915c1e7df16d4 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -47,6 +47,13 @@ config PWM_AB8500 To compile this driver as a module, choose M here: the module will be called pwm-ab8500. +config PWM_ADP5585 + tristate "ADP5585 PWM support" + depends on MFD_ADP5585 + help + This option enables support for the PWM function found in the Analog + Devices ADP5585. + config PWM_APPLE tristate "Apple SoC PWM support" depends on ARCH_APPLE || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 0be4f3e6dd432f..9081e0c0e9e097 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PWM) += core.o obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o +obj-$(CONFIG_PWM_ADP5585) += pwm-adp5585.o obj-$(CONFIG_PWM_APPLE) += pwm-apple.o obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 8acbcf5b66739f..6e752e148b98cc 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -325,20 +325,19 @@ EXPORT_SYMBOL_GPL(pwm_adjust_config); * * Returns: 0 on success or a negative error code on failure. */ -int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, - unsigned long timeout) +static int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, + unsigned long timeout) { - if (!pwm || !pwm->chip->ops) - return -EINVAL; + struct pwm_chip *chip = pwm->chip; + const struct pwm_ops *ops = chip->ops; - if (!pwm->chip->ops->capture) + if (!ops->capture) return -ENOSYS; guard(mutex)(&pwm_lock); - return pwm->chip->ops->capture(pwm->chip, pwm, result, timeout); + return ops->capture(chip, pwm, result, timeout); } -EXPORT_SYMBOL_GPL(pwm_capture); static struct pwm_chip *pwmchip_find_by_name(const char *name) { diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c new file mode 100644 index 00000000000000..40472ac5db6410 --- /dev/null +++ b/drivers/pwm/pwm-adp5585.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Analog Devices ADP5585 PWM driver + * + * Copyright 2022 NXP + * Copyright 2024 Ideas on Board Oy + * + * Limitations: + * - The .apply() operation executes atomically, but may not wait for the + * period to complete (this is not documented and would need to be tested). + * - Disabling the PWM drives the output pin to a low level immediately. + * - The hardware can only generate normal polarity output. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADP5585_PWM_CHAN_NUM 1 + +#define ADP5585_PWM_OSC_FREQ_HZ 1000000U +#define ADP5585_PWM_MIN_PERIOD_NS (2ULL * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) +#define ADP5585_PWM_MAX_PERIOD_NS (2ULL * 0xffff * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) + +static int pwm_adp5585_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct regmap *regmap = pwmchip_get_drvdata(chip); + + /* Configure the R3 pin as PWM output. */ + return regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C, + ADP5585_R3_EXTEND_CFG_MASK, + ADP5585_R3_EXTEND_CFG_PWM_OUT); +} + +static void pwm_adp5585_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct regmap *regmap = pwmchip_get_drvdata(chip); + + regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C, + ADP5585_R3_EXTEND_CFG_MASK, + ADP5585_R3_EXTEND_CFG_GPIO4); +} + +static int pwm_adp5585_apply(struct pwm_chip *chip, + struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct regmap *regmap = pwmchip_get_drvdata(chip); + u64 period, duty_cycle; + u32 on, off; + __le16 val; + int ret; + + if (!state->enabled) { + regmap_clear_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN); + regmap_clear_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN); + return 0; + } + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (state->period < ADP5585_PWM_MIN_PERIOD_NS) + return -EINVAL; + + period = min(state->period, ADP5585_PWM_MAX_PERIOD_NS); + duty_cycle = min(state->duty_cycle, period); + + /* + * Compute the on and off time. As the internal oscillator frequency is + * 1MHz, the calculation can be simplified without loss of precision. + */ + on = div_u64(duty_cycle, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ); + off = div_u64(period, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) - on; + + val = cpu_to_le16(off); + ret = regmap_bulk_write(regmap, ADP5585_PWM_OFFT_LOW, &val, 2); + if (ret) + return ret; + + val = cpu_to_le16(on); + ret = regmap_bulk_write(regmap, ADP5585_PWM_ONT_LOW, &val, 2); + if (ret) + return ret; + + /* Enable PWM in continuous mode and no external AND'ing. */ + ret = regmap_update_bits(regmap, ADP5585_PWM_CFG, + ADP5585_PWM_IN_AND | ADP5585_PWM_MODE | + ADP5585_PWM_EN, ADP5585_PWM_EN); + if (ret) + return ret; + + ret = regmap_set_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN); + if (ret) + return ret; + + return regmap_set_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN); +} + +static int pwm_adp5585_get_state(struct pwm_chip *chip, + struct pwm_device *pwm, + struct pwm_state *state) +{ + struct regmap *regmap = pwmchip_get_drvdata(chip); + unsigned int on, off; + unsigned int val; + __le16 on_off; + int ret; + + ret = regmap_bulk_read(regmap, ADP5585_PWM_OFFT_LOW, &on_off, 2); + if (ret) + return ret; + off = le16_to_cpu(on_off); + + ret = regmap_bulk_read(regmap, ADP5585_PWM_ONT_LOW, &on_off, 2); + if (ret) + return ret; + on = le16_to_cpu(on_off); + + state->duty_cycle = on * (NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ); + state->period = (on + off) * (NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ); + + state->polarity = PWM_POLARITY_NORMAL; + + regmap_read(regmap, ADP5585_PWM_CFG, &val); + state->enabled = !!(val & ADP5585_PWM_EN); + + return 0; +} + +static const struct pwm_ops adp5585_pwm_ops = { + .request = pwm_adp5585_request, + .free = pwm_adp5585_free, + .apply = pwm_adp5585_apply, + .get_state = pwm_adp5585_get_state, +}; + +static int adp5585_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent); + struct pwm_chip *chip; + int ret; + + chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM, 0); + if (IS_ERR(chip)) + return PTR_ERR(chip); + + device_set_of_node_from_dev(dev, dev->parent); + + pwmchip_set_drvdata(chip, adp5585->regmap); + chip->ops = &adp5585_pwm_ops; + + ret = devm_pwmchip_add(dev, chip); + if (ret) + return dev_err_probe(dev, ret, "failed to add PWM chip\n"); + + return 0; +} + +static const struct platform_device_id adp5585_pwm_id_table[] = { + { "adp5585-pwm" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, adp5585_pwm_id_table); + +static struct platform_driver adp5585_pwm_driver = { + .driver = { + .name = "adp5585-pwm", + }, + .probe = adp5585_pwm_probe, + .id_table = adp5585_pwm_id_table, +}; +module_platform_driver(adp5585_pwm_driver); + +MODULE_AUTHOR("Xiaoning Wang "); +MODULE_DESCRIPTION("ADP5585 PWM Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index 2afb302be02c11..387a0d1fa4f2d4 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -234,7 +234,7 @@ static const struct of_device_id atmel_hlcdc_dt_ids[] = { .data = &atmel_hlcdc_pwm_sama5d3_errata, }, { .compatible = "microchip,sam9x60-hlcdc", }, - { /* sentinel */ }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_hlcdc_dt_ids); @@ -288,8 +288,9 @@ static void atmel_hlcdc_pwm_remove(struct platform_device *pdev) static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = { { .compatible = "atmel,hlcdc-pwm" }, - { /* sentinel */ }, + { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, atmel_hlcdc_pwm_dt_ids); static struct platform_driver atmel_hlcdc_pwm_driver = { .driver = { @@ -298,7 +299,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = { .pm = pm_ptr(&atmel_hlcdc_pwm_pm_ops), }, .probe = atmel_hlcdc_pwm_probe, - .remove_new = atmel_hlcdc_pwm_remove, + .remove = atmel_hlcdc_pwm_remove, }; module_platform_driver(atmel_hlcdc_pwm_driver); diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index f9a9c12cbcdd62..5ee4254d1e4878 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -527,7 +527,7 @@ static struct platform_driver atmel_tcb_pwm_driver = { .pm = pm_ptr(&atmel_tcb_pwm_pm_ops), }, .probe = atmel_tcb_pwm_probe, - .remove_new = atmel_tcb_pwm_remove, + .remove = atmel_tcb_pwm_remove, }; module_platform_driver(atmel_tcb_pwm_driver); diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c index 3ad60edf20a565..b5477659ba186c 100644 --- a/drivers/pwm/pwm-axi-pwmgen.c +++ b/drivers/pwm/pwm-axi-pwmgen.c @@ -29,7 +29,6 @@ #include #include -#define AXI_PWMGEN_REG_CORE_VERSION 0x00 #define AXI_PWMGEN_REG_ID 0x04 #define AXI_PWMGEN_REG_SCRATCHPAD 0x08 #define AXI_PWMGEN_REG_CORE_MAGIC 0x0C @@ -145,7 +144,7 @@ static int axi_pwmgen_setup(struct regmap *regmap, struct device *dev) "failed to read expected value from register: got %08x, expected %08x\n", val, AXI_PWMGEN_REG_CORE_MAGIC_VAL); - ret = regmap_read(regmap, AXI_PWMGEN_REG_CORE_VERSION, &val); + ret = regmap_read(regmap, ADI_AXI_REG_VERSION, &val); if (ret) return ret; diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c index c19a482d7e28d9..f8f5af57acba2c 100644 --- a/drivers/pwm/pwm-clk.c +++ b/drivers/pwm/pwm-clk.c @@ -130,7 +130,7 @@ static struct platform_driver pwm_clk_driver = { .of_match_table = pwm_clk_dt_ids, }, .probe = pwm_clk_probe, - .remove_new = pwm_clk_remove, + .remove = pwm_clk_remove, }; module_platform_driver(pwm_clk_driver); diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c index 666f2954133cee..994f89ac43b4e3 100644 --- a/drivers/pwm/pwm-ep93xx.c +++ b/drivers/pwm/pwm-ep93xx.c @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -26,8 +27,6 @@ #include -#include /* for ep93xx_pwm_{acquire,release}_gpio() */ - #define EP93XX_PWMx_TERM_COUNT 0x00 #define EP93XX_PWMx_DUTY_CYCLE 0x04 #define EP93XX_PWMx_ENABLE 0x08 @@ -43,20 +42,6 @@ static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip) return pwmchip_get_drvdata(chip); } -static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct platform_device *pdev = to_platform_device(pwmchip_parent(chip)); - - return ep93xx_pwm_acquire_gpio(pdev); -} - -static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct platform_device *pdev = to_platform_device(pwmchip_parent(chip)); - - ep93xx_pwm_release_gpio(pdev); -} - static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { @@ -155,8 +140,6 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } static const struct pwm_ops ep93xx_pwm_ops = { - .request = ep93xx_pwm_request, - .free = ep93xx_pwm_free, .apply = ep93xx_pwm_apply, }; @@ -188,9 +171,16 @@ static int ep93xx_pwm_probe(struct platform_device *pdev) return 0; } +static const struct of_device_id ep93xx_pwm_of_ids[] = { + { .compatible = "cirrus,ep9301-pwm" }, + { /* sentinel */} +}; +MODULE_DEVICE_TABLE(of, ep93xx_pwm_of_ids); + static struct platform_driver ep93xx_pwm_driver = { .driver = { .name = "ep93xx-pwm", + .of_match_table = ep93xx_pwm_of_ids, }, .probe = ep93xx_pwm_probe, }; diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index 2eb0b13d4e1066..e02ee6383dbcea 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -276,7 +276,7 @@ static struct platform_driver hibvt_pwm_driver = { .of_match_table = hibvt_pwm_of_match, }, .probe = hibvt_pwm_probe, - .remove_new = hibvt_pwm_remove, + .remove = hibvt_pwm_remove, }; module_platform_driver(hibvt_pwm_driver); diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index d6596583ed4e78..71542956feca9c 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -416,7 +416,7 @@ static struct platform_driver img_pwm_driver = { .of_match_table = img_pwm_of_match, }, .probe = img_pwm_probe, - .remove_new = img_pwm_remove, + .remove = img_pwm_remove, }; module_platform_driver(img_pwm_driver); diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index 61189cea104670..90b0733c00c1be 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -218,8 +218,7 @@ static int lp3943_pwm_parse_dt(struct device *dev, struct lp3943_platform_data *pdata; struct lp3943_pwm_map *pwm_map; enum lp3943_pwm_output *output; - int i, err, proplen, count = 0; - u32 num_outputs; + int i, err, num_outputs, count = 0; if (!node) return -EINVAL; @@ -234,11 +233,8 @@ static int lp3943_pwm_parse_dt(struct device *dev, */ for (i = 0; i < LP3943_NUM_PWMS; i++) { - if (!of_get_property(node, name[i], &proplen)) - continue; - - num_outputs = proplen / sizeof(u32); - if (num_outputs == 0) + num_outputs = of_property_count_u32_elems(node, name[i]); + if (num_outputs <= 0) continue; output = devm_kcalloc(dev, num_outputs, sizeof(*output), diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 04b76d257fd87c..f351baa63453f5 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -446,7 +446,7 @@ static struct platform_driver lpc18xx_pwm_driver = { .of_match_table = lpc18xx_pwm_of_match, }, .probe = lpc18xx_pwm_probe, - .remove_new = lpc18xx_pwm_remove, + .remove = lpc18xx_pwm_remove, }; module_platform_driver(lpc18xx_pwm_driver); diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index cd51c4a938f592..1858a77401f80f 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -355,7 +355,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) goto err_platdata; } - if (!of_get_property(timer, "ti,timer-pwm", NULL)) { + if (!of_property_read_bool(timer, "ti,timer-pwm")) { dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n"); ret = -ENODEV; goto err_timer_property; @@ -455,7 +455,7 @@ static struct platform_driver pwm_omap_dmtimer_driver = { .of_match_table = pwm_omap_dmtimer_of_match, }, .probe = pwm_omap_dmtimer_probe, - .remove_new = pwm_omap_dmtimer_remove, + .remove = pwm_omap_dmtimer_remove, }; module_platform_driver(pwm_omap_dmtimer_driver); diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 4cfecd88ede045..2261789cc27dae 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -253,7 +253,7 @@ MODULE_DEVICE_TABLE(of, rcar_pwm_of_table); static struct platform_driver rcar_pwm_driver = { .probe = rcar_pwm_probe, - .remove_new = rcar_pwm_remove, + .remove = rcar_pwm_remove, .driver = { .name = "pwm-rcar", .of_match_table = rcar_pwm_of_table, diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 0fa7575dbb546c..c5f50e5eaf41ac 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -386,7 +386,7 @@ static struct platform_driver rockchip_pwm_driver = { .of_match_table = rockchip_pwm_dt_ids, }, .probe = rockchip_pwm_probe, - .remove_new = rockchip_pwm_remove, + .remove = rockchip_pwm_remove, }; module_platform_driver(rockchip_pwm_driver); diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index ed7957cc51fde7..d5b647e6be7812 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -336,7 +336,7 @@ MODULE_DEVICE_TABLE(of, pwm_sifive_of_match); static struct platform_driver pwm_sifive_driver = { .probe = pwm_sifive_probe, - .remove_new = pwm_sifive_remove, + .remove = pwm_sifive_remove, .driver = { .name = "pwm-sifive", .of_match_table = pwm_sifive_of_match, diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index f85eb41cb08482..eb24054f972973 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -222,7 +222,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm, scale = max_arr / min(max_arr, raw_prd); } else { - scale = priv->max_arr; /* bellow resolution, use max scale */ + scale = priv->max_arr; /* below resolution, use max scale */ } if (psc && scale > 1) { diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 5c29590d1821e2..e60dc7d6b5915f 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -493,7 +493,7 @@ static struct platform_driver sun4i_pwm_driver = { .of_match_table = sun4i_pwm_dt_ids, }, .probe = sun4i_pwm_probe, - .remove_new = sun4i_pwm_remove, + .remove = sun4i_pwm_remove, }; module_platform_driver(sun4i_pwm_driver); diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index a3d69976148f58..172063b51d4424 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -432,7 +432,7 @@ static struct platform_driver tegra_pwm_driver = { .pm = &tegra_pwm_pm_ops, }, .probe = tegra_pwm_probe, - .remove_new = tegra_pwm_remove, + .remove = tegra_pwm_remove, }; module_platform_driver(tegra_pwm_driver); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index d6c2b1b1387e03..d91b2bdc88fce2 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -324,7 +324,7 @@ static struct platform_driver ecap_pwm_driver = { .pm = pm_ptr(&ecap_pwm_pm_ops), }, .probe = ecap_pwm_probe, - .remove_new = ecap_pwm_remove, + .remove = ecap_pwm_remove, }; module_platform_driver(ecap_pwm_driver); diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index e5104725d9b702..0125e73b98dfb4 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -603,7 +603,7 @@ static struct platform_driver ehrpwm_pwm_driver = { .pm = pm_ptr(&ehrpwm_pwm_pm_ops), }, .probe = ehrpwm_pwm_probe, - .remove_new = ehrpwm_pwm_remove, + .remove = ehrpwm_pwm_remove, }; module_platform_driver(ehrpwm_pwm_driver); diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig index df49c23e7f6234..551680073e431a 100644 --- a/drivers/ras/amd/atl/Kconfig +++ b/drivers/ras/amd/atl/Kconfig @@ -19,3 +19,7 @@ config AMD_ATL Enable this option if using DRAM ECC on Zen-based systems and OS-based error handling. + +config AMD_ATL_PRM + depends on AMD_ATL && ACPI_PRMT + def_bool y diff --git a/drivers/ras/amd/atl/Makefile b/drivers/ras/amd/atl/Makefile index 4acd5f05bd9c27..b56892c0c0d9c8 100644 --- a/drivers/ras/amd/atl/Makefile +++ b/drivers/ras/amd/atl/Makefile @@ -15,4 +15,6 @@ amd_atl-y += map.o amd_atl-y += system.o amd_atl-y += umc.o +amd_atl-$(CONFIG_AMD_ATL_PRM) += prm.o + obj-$(CONFIG_AMD_ATL) += amd_atl.o diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 9de5d53d05688b..143d04c779a821 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -282,6 +282,16 @@ unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err); u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr); u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr); +#ifdef CONFIG_AMD_ATL_PRM +unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 umc_bank_inst_id, unsigned long addr); +#else +static inline unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 umc_bank_inst_id, + unsigned long addr) +{ + return -ENODEV; +} +#endif + /* * Make a gap in @data that is @num_bits long starting at @bit_num. * e.g. data = 11111111'b diff --git a/drivers/ras/amd/atl/prm.c b/drivers/ras/amd/atl/prm.c new file mode 100644 index 00000000000000..0931a20d213b61 --- /dev/null +++ b/drivers/ras/amd/atl/prm.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * prm.c : Plumbing code for ACPI Platform Runtime Mechanism (PRM) + * + * Information on AMD PRM modules and handlers including the GUIDs and buffer + * structures used here are defined in the AMD ACPI Porting Guide in the + * chapter "Platform Runtime Mechanism Table (PRMT)" + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: John Allen + */ + +#include "internal.h" + +#include + +/* + * PRM parameter buffer - normalized to system physical address, as described + * in the "PRM Parameter Buffer" section of the AMD ACPI Porting Guide. + */ +struct norm_to_sys_param_buf { + u64 norm_addr; + u8 socket; + u64 bank_id; + void *out_buf; +} __packed; + +static const guid_t norm_to_sys_guid = GUID_INIT(0xE7180659, 0xA65D, 0x451D, + 0x92, 0xCD, 0x2B, 0x56, 0xF1, + 0x2B, 0xEB, 0xA6); + +unsigned long prm_umc_norm_to_sys_addr(u8 socket_id, u64 bank_id, unsigned long addr) +{ + struct norm_to_sys_param_buf p_buf; + unsigned long ret_addr; + int ret; + + p_buf.norm_addr = addr; + p_buf.socket = socket_id; + p_buf.bank_id = bank_id; + p_buf.out_buf = &ret_addr; + + ret = acpi_call_prm_handler(norm_to_sys_guid, &p_buf); + if (!ret) + return ret_addr; + + if (ret == -ENODEV) + pr_debug("PRM module/handler not available\n"); + else + pr_notice_once("PRM address translation failed\n"); + + return ret; +} diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index a1b4accf7b9659..dc8aa12f63c811 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -401,9 +401,14 @@ unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err) u8 coh_st_inst_id = get_coh_st_inst_id(err); unsigned long addr = get_addr(err->addr); u8 die_id = get_die_id(err); + unsigned long ret_addr; pr_debug("socket_id=0x%x die_id=0x%x coh_st_inst_id=0x%x addr=0x%016lx", socket_id, die_id, coh_st_inst_id, addr); + ret_addr = prm_umc_norm_to_sys_addr(socket_id, err->ipid, addr); + if (!IS_ERR_VALUE(ret_addr)) + return ret_addr; + return norm_to_sys_addr(socket_id, die_id, coh_st_inst_id, addr); } diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 4b411a09c1a670..39297f7d817719 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -1373,13 +1373,6 @@ config REGULATOR_SLG51000 The SLG51000 is seven compact and customizable low dropout regulators. -config REGULATOR_SM5703 - tristate "Silicon Mitus SM5703 regulators" - depends on MFD_SM5703 - help - This driver provides support for voltage regulators of SM5703 - multi-function device. - config REGULATOR_STM32_BOOSTER tristate "STMicroelectronics STM32 BOOSTER" depends on ARCH_STM32 || COMPILE_TEST diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index a61fa42b13c4d2..3d5a803dce8a05 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -160,7 +160,6 @@ obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o obj-$(CONFIG_REGULATOR_SLG51000) += slg51000-regulator.o -obj-$(CONFIG_REGULATOR_SM5703) += sm5703-regulator.o obj-$(CONFIG_REGULATOR_STM32_BOOSTER) += stm32-booster.o obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c index a504b01dd99c8f..0457af23c55acd 100644 --- a/drivers/regulator/act8865-regulator.c +++ b/drivers/regulator/act8865-regulator.c @@ -673,9 +673,7 @@ static int act8865_pmic_probe(struct i2c_client *client) type = (unsigned long) id->data; - voltage_select = !!of_get_property(dev->of_node, - "active-semi,vsel-high", - NULL); + voltage_select = of_property_read_bool(dev->of_node, "active-semi,vsel-high"); } else { type = i2c_id->driver_data; pdata = dev_get_platdata(dev); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index f3c447ecdc3bfd..a8e91d9d028b89 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -143,6 +143,7 @@ #define AXP717_DCDC3_NUM_VOLTAGES 103 #define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0) #define AXP717_LDO_V_OUT_MASK GENMASK(4, 0) +#define AXP717_BOOST_V_OUT_MASK GENMASK(7, 4) #define AXP803_PWR_OUT_DCDC1_MASK BIT_MASK(0) #define AXP803_PWR_OUT_DCDC2_MASK BIT_MASK(1) @@ -834,6 +835,9 @@ static const struct regulator_desc axp717_regulators[] = { AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50, AXP717_CPUSLDO_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_LDO1_OUTPUT_CONTROL, BIT(4)), + AXP_DESC(AXP717, BOOST, "boost", "vin1", 4550, 5510, 64, + AXP717_BOOST_CONTROL, AXP717_BOOST_V_OUT_MASK, + AXP717_MODULE_EN_CONTROL_2, BIT(4)), }; /* DCDC ranges shared with AXP813 */ diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index c3fb05dce40c95..1bb048de3ecd5a 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -2,6 +2,7 @@ // Copyright (C) 2018 ROHM Semiconductors // bd71837-regulator.c ROHM BD71837MWV/BD71847MWV regulator driver +#include #include #include #include @@ -1635,18 +1636,17 @@ static int get_special_regulators(struct device *dev, unsigned int num_reg_data, int *info) { int ret; - struct device_node *np; - struct device_node *nproot = dev->of_node; int uv; *info = 0; - nproot = of_get_child_by_name(nproot, "regulators"); + struct device_node *nproot __free(device_node) = of_get_child_by_name(dev->of_node, + "regulators"); if (!nproot) { dev_err(dev, "failed to find regulators node\n"); return -ENODEV; } - for_each_child_of_node(nproot, np) { + for_each_child_of_node_scoped(nproot, np) { if (of_property_read_bool(np, "rohm,no-regulator-enable-control")) mark_hw_controlled(dev, np, reg_data, num_reg_data, info); @@ -1656,22 +1656,15 @@ static int get_special_regulators(struct device *dev, if (ret == -EINVAL) continue; else - goto err_out; + return ret; } ret = setup_feedback_loop(dev, np, reg_data, num_reg_data, uv); if (ret) - goto err_out; + return ret; } - of_node_put(nproot); return 0; - -err_out: - of_node_put(np); - of_node_put(nproot); - - return ret; } static int bd718xx_probe(struct platform_device *pdev) diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c index d4ca7b3f40362a..bf5f9c3f2c9752 100644 --- a/drivers/regulator/bd9576-regulator.c +++ b/drivers/regulator/bd9576-regulator.c @@ -68,25 +68,25 @@ static const struct linear_range voutL1_xvd_ranges[] = { REGULATOR_LINEAR_RANGE(220000, 0x6e, 0x7f, 0), }; -static struct linear_range voutS1_ocw_ranges_internal[] = { +static const struct linear_range voutS1_ocw_ranges_internal[] = { REGULATOR_LINEAR_RANGE(200000, 0x01, 0x04, 0), REGULATOR_LINEAR_RANGE(250000, 0x05, 0x18, 50000), REGULATOR_LINEAR_RANGE(1200000, 0x19, 0x3f, 0), }; -static struct linear_range voutS1_ocw_ranges[] = { +static const struct linear_range voutS1_ocw_ranges[] = { REGULATOR_LINEAR_RANGE(50000, 0x01, 0x04, 0), REGULATOR_LINEAR_RANGE(60000, 0x05, 0x18, 10000), REGULATOR_LINEAR_RANGE(250000, 0x19, 0x3f, 0), }; -static struct linear_range voutS1_ocp_ranges_internal[] = { +static const struct linear_range voutS1_ocp_ranges_internal[] = { REGULATOR_LINEAR_RANGE(300000, 0x01, 0x06, 0), REGULATOR_LINEAR_RANGE(350000, 0x7, 0x1b, 50000), REGULATOR_LINEAR_RANGE(1350000, 0x1c, 0x3f, 0), }; -static struct linear_range voutS1_ocp_ranges[] = { +static const struct linear_range voutS1_ocp_ranges[] = { REGULATOR_LINEAR_RANGE(70000, 0x01, 0x06, 0), REGULATOR_LINEAR_RANGE(80000, 0x7, 0x1b, 10000), REGULATOR_LINEAR_RANGE(280000, 0x1c, 0x3f, 0), diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c index 46ca81f187033e..9876cc05867eaa 100644 --- a/drivers/regulator/bd96801-regulator.c +++ b/drivers/regulator/bd96801-regulator.c @@ -34,6 +34,7 @@ * conflict in your downstream driver ;) */ +#include #include #include #include @@ -453,15 +454,14 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap, int num) { int i, ret; - struct device_node *np; - struct device_node *nproot = dev->parent->of_node; - nproot = of_get_child_by_name(nproot, "regulators"); + struct device_node *nproot __free(device_node) = + of_get_child_by_name(dev->parent->of_node, "regulators"); if (!nproot) { dev_err(dev, "failed to find regulators node\n"); return -ENODEV; } - for_each_child_of_node(nproot, np) + for_each_child_of_node_scoped(nproot, np) { for (i = 0; i < num; i++) { if (!of_node_name_eq(np, data[i].desc.of_match)) continue; @@ -476,11 +476,9 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap, dev_err(dev, "Initializing voltages for %s failed\n", data[i].desc.name); - of_node_put(np); - of_node_put(nproot); - return ret; } + if (of_property_read_bool(np, "rohm,keep-on-stby")) { ret = regmap_set_bits(regmap, BD96801_ALWAYS_ON_REG, @@ -489,14 +487,11 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap, dev_err(dev, "failed to set %s on-at-stby\n", data[i].desc.name); - of_node_put(np); - of_node_put(nproot); - return ret; } } } - of_node_put(nproot); + } return 0; } @@ -853,8 +848,6 @@ static int bd96801_probe(struct platform_device *pdev) ldo_errs_arr[temp_notif_ldos] = rdesc[i].ldo_errs; temp_notif_ldos++; } - if (!idesc) - continue; /* Register INTB handlers for configured protections */ for (j = 0; j < idesc->num_irqs; j++) { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7674b7f2df147b..1179766811f583 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -139,6 +139,8 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops) * once. If a task, which is calling this function is other * than the one, which initially locked the mutex, it will * wait on mutex. + * + * Return: 0 on success or a negative error number on failure. */ static inline int regulator_lock_nested(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) @@ -419,72 +421,6 @@ static void regulator_lock_dependent(struct regulator_dev *rdev, mutex_unlock(®ulator_list_mutex); } -/** - * of_get_child_regulator - get a child regulator device node - * based on supply name - * @parent: Parent device node - * @prop_name: Combination regulator supply name and "-supply" - * - * Traverse all child nodes. - * Extract the child regulator device node corresponding to the supply name. - * returns the device node corresponding to the regulator if found, else - * returns NULL. - */ -static struct device_node *of_get_child_regulator(struct device_node *parent, - const char *prop_name) -{ - struct device_node *regnode = NULL; - struct device_node *child = NULL; - - for_each_child_of_node(parent, child) { - regnode = of_parse_phandle(child, prop_name, 0); - - if (!regnode) { - regnode = of_get_child_regulator(child, prop_name); - if (regnode) - goto err_node_put; - } else { - goto err_node_put; - } - } - return NULL; - -err_node_put: - of_node_put(child); - return regnode; -} - -/** - * of_get_regulator - get a regulator device node based on supply name - * @dev: Device pointer for the consumer (of regulator) device - * @supply: regulator supply name - * - * Extract the regulator device node corresponding to the supply name. - * returns the device node corresponding to the regulator if found, else - * returns NULL. - */ -static struct device_node *of_get_regulator(struct device *dev, const char *supply) -{ - struct device_node *regnode = NULL; - char prop_name[64]; /* 64 is max size of property name */ - - dev_dbg(dev, "Looking up %s-supply from device tree\n", supply); - - snprintf(prop_name, 64, "%s-supply", supply); - regnode = of_parse_phandle(dev->of_node, prop_name, 0); - - if (!regnode) { - regnode = of_get_child_regulator(dev->of_node, prop_name); - if (regnode) - return regnode; - - dev_dbg(dev, "Looking up %s property in node %pOF failed\n", - prop_name, dev->of_node); - return NULL; - } - return regnode; -} - /* Platform voltage constraint check */ int regulator_check_voltage(struct regulator_dev *rdev, int *min_uV, int *max_uV) @@ -1462,6 +1398,8 @@ static int handle_notify_limits(struct regulator_dev *rdev, * Constraints *must* be set by platform code in order for some * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. + * + * Return: 0 on success or a negative error number on failure. */ static int set_machine_constraints(struct regulator_dev *rdev) { @@ -1700,6 +1638,8 @@ static int set_machine_constraints(struct regulator_dev *rdev) * Called by platform initialisation code to set the supply regulator for this * regulator. This ensures that a regulators supply will also be enabled by the * core if it's child is enabled. + * + * Return: 0 on success or a negative error number on failure. */ static int set_supply(struct regulator_dev *rdev, struct regulator_dev *supply_rdev) @@ -1732,6 +1672,8 @@ static int set_supply(struct regulator_dev *rdev, * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. + * + * Return: 0 on success or a negative error number on failure. */ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, @@ -1998,18 +1940,19 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name) * @dev: device for regulator "consumer". * @supply: Supply name or regulator ID. * + * Return: pointer to &struct regulator_dev or ERR_PTR() encoded negative error number. + * * If successful, returns a struct regulator_dev that corresponds to the name * @supply and with the embedded struct device refcount incremented by one. * The refcount must be dropped by calling put_device(). - * On failure one of the following ERR-PTR-encoded values is returned: - * -ENODEV if lookup fails permanently, -EPROBE_DEFER if lookup could succeed + * On failure one of the following ERR_PTR() encoded values is returned: + * -%ENODEV if lookup fails permanently, -%EPROBE_DEFER if lookup could succeed * in the future. */ static struct regulator_dev *regulator_dev_lookup(struct device *dev, const char *supply) { struct regulator_dev *r = NULL; - struct device_node *node; struct regulator_map *map; const char *devname = NULL; @@ -2017,19 +1960,14 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev, /* first do a dt based lookup */ if (dev && dev->of_node) { - node = of_get_regulator(dev, supply); - if (node) { - r = of_find_regulator_by_node(node); - of_node_put(node); - if (r) - return r; + r = of_regulator_dev_lookup(dev, supply); + if (!IS_ERR(r)) + return r; + if (PTR_ERR(r) == -EPROBE_DEFER) + return r; - /* - * We have a node, but there is no device. - * assume it has not registered yet. - */ - return ERR_PTR(-EPROBE_DEFER); - } + if (PTR_ERR(r) == -ENODEV) + r = NULL; } /* if not found, try doing it non-dt way */ @@ -2168,26 +2106,43 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return ret; } -/* Internal regulator request function */ -struct regulator *_regulator_get(struct device *dev, const char *id, - enum regulator_get_type get_type) +/* common pre-checks for regulator requests */ +int _regulator_get_common_check(struct device *dev, const char *id, + enum regulator_get_type get_type) { - struct regulator_dev *rdev; - struct regulator *regulator; - struct device_link *link; - int ret; - if (get_type >= MAX_GET_TYPE) { dev_err(dev, "invalid type %d in %s\n", get_type, __func__); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (id == NULL) { - pr_err("get() with no identifier\n"); - return ERR_PTR(-EINVAL); + dev_err(dev, "regulator request with no identifier\n"); + return -EINVAL; } - rdev = regulator_dev_lookup(dev, id); + return 0; +} + +/** + * _regulator_get_common - Common code for regulator requests + * @rdev: regulator device pointer as returned by *regulator_dev_lookup() + * Its reference count is expected to have been incremented. + * @dev: device used for dev_printk messages + * @id: Supply name or regulator ID + * @get_type: enum regulator_get_type value corresponding to type of request + * + * Returns: pointer to struct regulator corresponding to @rdev, or ERR_PTR() + * encoded error. + * + * This function should be chained with *regulator_dev_lookup() functions. + */ +struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct device *dev, + const char *id, enum regulator_get_type get_type) +{ + struct regulator *regulator; + struct device_link *link; + int ret; + if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); @@ -2303,18 +2258,33 @@ struct regulator *_regulator_get(struct device *dev, const char *id, return regulator; } +/* Internal regulator request function */ +struct regulator *_regulator_get(struct device *dev, const char *id, + enum regulator_get_type get_type) +{ + struct regulator_dev *rdev; + int ret; + + ret = _regulator_get_common_check(dev, id, get_type); + if (ret) + return ERR_PTR(ret); + + rdev = regulator_dev_lookup(dev, id); + return _regulator_get_common(rdev, dev, id, get_type); +} + /** * regulator_get - lookup and obtain a reference to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * - * Returns a struct regulator corresponding to the regulator producer, - * or IS_ERR() condition containing errno. - * * Use of supply names configured via set_consumer_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. + * + * Return: Pointer to a &struct regulator corresponding to the regulator + * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get(struct device *dev, const char *id) { @@ -2327,11 +2297,9 @@ EXPORT_SYMBOL_GPL(regulator_get); * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * - * Returns a struct regulator corresponding to the regulator producer, - * or IS_ERR() condition containing errno. Other consumers will be - * unable to obtain this regulator while this reference is held and the - * use count for the regulator will be initialised to reflect the current - * state of the regulator. + * Other consumers will be unable to obtain this regulator while this + * reference is held and the use count for the regulator will be + * initialised to reflect the current state of the regulator. * * This is intended for use by consumers which cannot tolerate shared * use of the regulator such as those which need to force the @@ -2342,6 +2310,9 @@ EXPORT_SYMBOL_GPL(regulator_get); * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. + * + * Return: Pointer to a &struct regulator corresponding to the regulator + * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { @@ -2354,9 +2325,6 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive); * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * - * Returns a struct regulator corresponding to the regulator producer, - * or IS_ERR() condition containing errno. - * * This is intended for use by consumers for devices which can have * some supplies unconnected in normal use, such as some MMC devices. * It can allow the regulator core to provide stub supplies for other @@ -2368,6 +2336,9 @@ EXPORT_SYMBOL_GPL(regulator_get_exclusive); * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. + * + * Return: Pointer to a &struct regulator corresponding to the regulator + * producer, or an ERR_PTR() encoded negative error number. */ struct regulator *regulator_get_optional(struct device *dev, const char *id) { @@ -2448,6 +2419,8 @@ EXPORT_SYMBOL_GPL(regulator_put); * * All lookups for id on dev will instead be conducted for alias_id on * alias_dev. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, @@ -2507,12 +2480,12 @@ EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias); * lookup the supply * @num_id: Number of aliases to register * - * @return 0 on success, an errno on failure. - * * This helper function allows drivers to register several supply * aliases in one operation. If any of the aliases cannot be * registered any aliases that were registered will be removed * before returning to the caller. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, @@ -2637,6 +2610,8 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) * * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) + * + * Return: 0 on success or a negative error number on failure. */ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) { @@ -2707,10 +2682,8 @@ static void _regulator_delay_helper(unsigned int delay) } /** - * _regulator_check_status_enabled - * - * A helper function to check if the regulator status can be interpreted - * as 'regulator is enabled'. + * _regulator_check_status_enabled - check if regulator status can be + * interpreted as "regulator is enabled" * @rdev: the regulator device to check * * Return: @@ -2839,7 +2812,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * responsible for keeping track of the refcount for a given regulator consumer * and applying / unapplying these things. * - * Returns 0 upon no error; -error upon error. + * Return: 0 on success or negative error number on failure. */ static int _regulator_handle_consumer_enable(struct regulator *regulator) { @@ -2865,7 +2838,7 @@ static int _regulator_handle_consumer_enable(struct regulator *regulator) * * The opposite of _regulator_handle_consumer_enable(). * - * Returns 0 upon no error; -error upon error. + * Return: 0 on success or a negative error number on failure. */ static int _regulator_handle_consumer_disable(struct regulator *regulator) { @@ -2961,6 +2934,8 @@ static int _regulator_enable(struct regulator *regulator) * * NOTE: the output value can be set by other drivers, boot loader or may be * hardwired in the regulator. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_enable(struct regulator *regulator) { @@ -3071,6 +3046,8 @@ static int _regulator_disable(struct regulator *regulator) * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_disable(struct regulator *regulator) { @@ -3120,6 +3097,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev) * NOTE: this *will* disable the regulator output even if other consumer * devices have it enabled. This should be used for situations when device * damage will likely occur if the regulator is not disabled (e.g. over temp). + * + * Return: 0 on success or a negative error number on failure. */ int regulator_force_disable(struct regulator *regulator) { @@ -3202,6 +3181,8 @@ static void regulator_disable_work(struct work_struct *work) * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_disable_deferred(struct regulator *regulator, int ms) { @@ -3273,13 +3254,13 @@ static int _regulator_list_voltage(struct regulator_dev *rdev, * regulator_is_enabled - is the regulator output enabled * @regulator: regulator source * - * Returns positive if the regulator driver backing the source/client - * has requested that the device be enabled, zero if it hasn't, else a - * negative errno code. - * * Note that the device backing this regulator handle can have multiple * users, so it might be enabled even if regulator_enable() was never * called for this particular source. + * + * Return: Positive if the regulator driver backing the source/client + * has requested that the device be enabled, zero if it hasn't, + * else a negative error number. */ int regulator_is_enabled(struct regulator *regulator) { @@ -3300,9 +3281,10 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled); * regulator_count_voltages - count regulator_list_voltage() selectors * @regulator: regulator source * - * Returns number of selectors, or negative errno. Selectors are - * numbered starting at zero, and typically correspond to bitfields - * in hardware registers. + * Return: Number of selectors for @regulator, or negative error number. + * + * Selectors are numbered starting at zero, and typically correspond to + * bitfields in hardware registers. */ int regulator_count_voltages(struct regulator *regulator) { @@ -3324,9 +3306,9 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages); * @selector: identify voltage to list * Context: can sleep * - * Returns a voltage that can be passed to @regulator_set_voltage(), - * zero if this selector code can't be used on this system, or a - * negative errno. + * Return: Voltage for @selector that can be passed to regulator_set_voltage(), + * 0 if @selector can't be used on this system, or a negative error + * number on failure. */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { @@ -3338,8 +3320,8 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage); * regulator_get_regmap - get the regulator's register map * @regulator: regulator source * - * Returns the register map for the given regulator, or an ERR_PTR value - * if the regulator doesn't use regmap. + * Return: Pointer to the &struct regmap for @regulator, or ERR_PTR() + * encoded -%EOPNOTSUPP if @regulator doesn't use regmap. */ struct regmap *regulator_get_regmap(struct regulator *regulator) { @@ -3360,8 +3342,11 @@ EXPORT_SYMBOL_GPL(regulator_get_regmap); * hardware or firmware that can make I2C requests behind the kernel's back, * for example. * + * Return: 0 on success, or -%EOPNOTSUPP if the regulator does not support + * voltage selectors. + * * On success, the output parameters @vsel_reg and @vsel_mask are filled in - * and 0 is returned, otherwise a negative errno is returned. + * and 0 is returned, otherwise a negative error number is returned. */ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_reg, @@ -3389,7 +3374,9 @@ EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register); * directly written to the regulator registers. The address of the voltage * register can be determined by calling @regulator_get_hardware_vsel_register. * - * On error a negative errno is returned. + * Return: 0 on success, -%EINVAL if the selector is outside the supported + * range, or -%EOPNOTSUPP if the regulator does not support voltage + * selectors. */ int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector) @@ -3416,7 +3403,7 @@ EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel); * Request that the regulator be enabled/disabled with the regulator output at * the predefined voltage or current value. * - * On success 0 is returned, otherwise a negative errno is returned. + * Return: 0 on success or a negative error number on failure. */ int regulator_hardware_enable(struct regulator *regulator, bool enable) { @@ -3440,8 +3427,8 @@ EXPORT_SYMBOL_GPL(regulator_hardware_enable); * regulator_get_linear_step - return the voltage step size between VSEL values * @regulator: regulator source * - * Returns the voltage step size between VSEL values for linear - * regulators, or return 0 if the regulator isn't a linear regulator. + * Return: The voltage step size between VSEL values for linear regulators, + * or 0 if the regulator isn't a linear regulator. */ unsigned int regulator_get_linear_step(struct regulator *regulator) { @@ -3458,7 +3445,9 @@ EXPORT_SYMBOL_GPL(regulator_get_linear_step); * @min_uV: Minimum required voltage in uV. * @max_uV: Maximum required voltage in uV. * - * Returns a boolean. + * Return: 1 if the voltage range is supported, 0 if not, or a negative error + * number if @regulator's voltage can't be changed and voltage readback + * failed. */ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) @@ -4210,6 +4199,8 @@ static int regulator_balance_voltage(struct regulator_dev *rdev, * request voltage that meets the system constraints will be used. * Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { @@ -4320,6 +4311,8 @@ EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage); * Provided with the starting and ending voltage, this function attempts to * calculate the time in microseconds required to rise or fall to this new * voltage. + * + * Return: ramp time in microseconds, or a negative error number if calculation failed. */ int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) @@ -4377,6 +4370,8 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time); * * Drivers providing ramp_delay in regulation_constraints can use this as their * set_voltage_time_sel() operation. + * + * Return: ramp time in microseconds, or a negative error number if calculation failed. */ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_selector, @@ -4429,6 +4424,8 @@ int regulator_sync_voltage_rdev(struct regulator_dev *rdev) * Re-apply the last configured voltage. This is intended to be used * where some external control source the consumer is cooperating with * has caused the configured voltage to change. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_sync_voltage(struct regulator *regulator) { @@ -4527,7 +4524,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); * regulator_get_voltage - get regulator output voltage * @regulator: regulator source * - * This returns the current regulator voltage in uV. + * Return: Current regulator voltage in uV, or a negative error number on failure. * * NOTE: If the regulator is disabled it will return the voltage value. This * function should not be used to determine regulator state. @@ -4560,6 +4557,8 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage); * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) @@ -4611,7 +4610,8 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev) * regulator_get_current_limit - get regulator output current * @regulator: regulator source * - * This returns the current supplied by the specified current sink in uA. + * Return: Current supplied by the specified current sink in uA, + * or a negative error number on failure. * * NOTE: If the regulator is disabled it will return the current value. This * function should not be used to determine regulator state. @@ -4632,6 +4632,8 @@ EXPORT_SYMBOL_GPL(regulator_get_current_limit); * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { @@ -4693,6 +4695,9 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev) * @regulator: regulator source * * Get the current regulator operating mode. + * + * Return: Current operating mode as %REGULATOR_MODE_* values, + * or a negative error number on failure. */ unsigned int regulator_get_mode(struct regulator *regulator) { @@ -4739,6 +4744,8 @@ static int _regulator_get_error_flags(struct regulator_dev *rdev, * @flags: pointer to store error flags * * Get the current regulator error information. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_get_error_flags(struct regulator *regulator, unsigned int *flags) @@ -4779,7 +4786,7 @@ EXPORT_SYMBOL_GPL(regulator_get_error_flags); * If a regulator is an always-on regulator then an individual consumer's * load will still be removed if that consumer is fully disabled. * - * On error a negative errno is returned. + * Return: 0 on success or a negative error number on failure. */ int regulator_set_load(struct regulator *regulator, int uA_load) { @@ -4811,6 +4818,9 @@ EXPORT_SYMBOL_GPL(regulator_set_load); * for the regulator also enable bypass mode and the machine * constraints allow this. Bypass mode means that the regulator is * simply passing the input directly to the output with no regulation. + * + * Return: 0 on success or if changing bypass is not possible, or + * a negative error number on failure. */ int regulator_allow_bypass(struct regulator *regulator, bool enable) { @@ -4868,6 +4878,8 @@ EXPORT_SYMBOL_GPL(regulator_allow_bypass); * @nb: notifier block * * Register notifier block to receive regulator events. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) @@ -4883,6 +4895,8 @@ EXPORT_SYMBOL_GPL(regulator_register_notifier); * @nb: notifier block * * Unregister regulator event notifier block. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb) @@ -4964,12 +4978,12 @@ int _regulator_bulk_get(struct device *dev, int num_consumers, * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * - * @return 0 on success, an errno on failure. - * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed * before returning to the caller. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) @@ -4990,12 +5004,13 @@ static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. - * @return 0 on success, an errno on failure * * This convenience API allows consumers to enable multiple regulator * clients in a single API call. If any consumers cannot be enabled * then any others that were enabled will be disabled again prior to * return. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) @@ -5039,12 +5054,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_enable); * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. - * @return 0 on success, an errno on failure * * This convenience API allows consumers to disable multiple regulator * clients in a single API call. If any consumers cannot be disabled * then any others that were disabled will be enabled again prior to * return. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers) @@ -5078,7 +5094,6 @@ EXPORT_SYMBOL_GPL(regulator_bulk_disable); * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. - * @return 0 on success, an errno on failure * * This convenience API allows consumers to forcibly disable multiple regulator * clients in a single API call. @@ -5086,6 +5101,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_disable); * likely occur if the regulators are not disabled (e.g. over temp). * Although regulator_force_disable function call for some consumers can * return error numbers, the function is called for all consumers. + * + * Return: 0 on success or a negative error number on failure. */ int regulator_bulk_force_disable(int num_consumers, struct regulator_bulk_data *consumers) @@ -5170,6 +5187,8 @@ static void regulator_handle_critical(struct regulator_dev *rdev, * * Called by regulator drivers to notify clients a regulator event has * occurred. + * + * Return: %NOTIFY_DONE. */ int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) @@ -5188,6 +5207,8 @@ EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); * @mode: Mode to convert * * Convert a regulator mode into a status. + * + * Return: %REGULATOR_STATUS_* value corresponding to given mode. */ int regulator_mode_to_status(unsigned int mode) { @@ -5582,8 +5603,9 @@ static struct regulator_coupler generic_regulator_coupler = { * @cfg: runtime configuration for regulator * * Called by regulator drivers to register a regulator. - * Returns a valid pointer to struct regulator_dev on success - * or an ERR_PTR() on error. + * + * Return: Pointer to a valid &struct regulator_dev on success or + * an ERR_PTR() encoded negative error number on failure. */ struct regulator_dev * regulator_register(struct device *dev, @@ -5877,6 +5899,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister); * @dev: ``&struct device`` pointer that is passed to _regulator_suspend() * * Configure each regulator with it's suspend operating parameters for state. + * + * Return: 0 on success or a negative error number on failure. */ static int regulator_suspend(struct device *dev) { @@ -5966,6 +5990,8 @@ EXPORT_SYMBOL_GPL(regulator_has_full_constraints); * * Get rdev regulator driver private data. This call can be used in the * regulator driver context. + * + * Return: Pointer to regulator driver private data. */ void *rdev_get_drvdata(struct regulator_dev *rdev) { @@ -5979,6 +6005,8 @@ EXPORT_SYMBOL_GPL(rdev_get_drvdata); * * Get regulator driver private data. This call can be used in the consumer * driver context when non API regulator specific functions need to be called. + * + * Return: Pointer to regulator driver private data. */ void *regulator_get_drvdata(struct regulator *regulator) { @@ -6000,6 +6028,8 @@ EXPORT_SYMBOL_GPL(regulator_set_drvdata); /** * rdev_get_id - get regulator ID * @rdev: regulator + * + * Return: Regulator ID for @rdev. */ int rdev_get_id(struct regulator_dev *rdev) { diff --git a/drivers/regulator/da903x-regulator.c b/drivers/regulator/da903x-regulator.c index f79337079a456c..2f85897183b3b0 100644 --- a/drivers/regulator/da903x-regulator.c +++ b/drivers/regulator/da903x-regulator.c @@ -61,7 +61,7 @@ #define DA9034_MDTV2 (0x33) #define DA9034_MVRC (0x34) -/* DA9035 Registers. DA9034 Registers are comptabile to DA9035. */ +/* DA9035 Registers. DA9034 Registers are compatible to DA9035. */ #define DA9035_OVER3 (0x12) #define DA9035_VCC2 (0x1f) #define DA9035_3DTV1 (0x2c) diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index ab6f5d61b17309..fbebe538a648ee 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -67,11 +67,11 @@ struct da9052_regulator_info { struct da9052_regulator { struct da9052 *da9052; - struct da9052_regulator_info *info; + const struct da9052_regulator_info *info; struct regulator_dev *rdev; }; -static int verify_range(struct da9052_regulator_info *info, +static int verify_range(const struct da9052_regulator_info *info, int min_uV, int max_uV) { if (min_uV > info->max_uV || max_uV < info->min_uV) @@ -151,7 +151,7 @@ static int da9052_list_voltage(struct regulator_dev *rdev, unsigned int selector) { struct da9052_regulator *regulator = rdev_get_drvdata(rdev); - struct da9052_regulator_info *info = regulator->info; + const struct da9052_regulator_info *info = regulator->info; int id = rdev_get_id(rdev); int volt_uV; @@ -175,7 +175,7 @@ static int da9052_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct da9052_regulator *regulator = rdev_get_drvdata(rdev); - struct da9052_regulator_info *info = regulator->info; + const struct da9052_regulator_info *info = regulator->info; int id = rdev_get_id(rdev); int ret, sel; @@ -206,7 +206,7 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) { struct da9052_regulator *regulator = rdev_get_drvdata(rdev); - struct da9052_regulator_info *info = regulator->info; + const struct da9052_regulator_info *info = regulator->info; int id = rdev_get_id(rdev); int ret; @@ -237,7 +237,7 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_sel) { struct da9052_regulator *regulator = rdev_get_drvdata(rdev); - struct da9052_regulator_info *info = regulator->info; + const struct da9052_regulator_info *info = regulator->info; int id = rdev_get_id(rdev); int ret = 0; @@ -327,7 +327,7 @@ static const struct regulator_ops da9052_ldo_ops = { .activate_bit = (abits),\ } -static struct da9052_regulator_info da9052_regulator_info[] = { +static const struct da9052_regulator_info da9052_regulator_info[] = { DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), @@ -344,7 +344,7 @@ static struct da9052_regulator_info da9052_regulator_info[] = { DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; -static struct da9052_regulator_info da9053_regulator_info[] = { +static const struct da9052_regulator_info da9053_regulator_info[] = { DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), @@ -361,10 +361,10 @@ static struct da9052_regulator_info da9053_regulator_info[] = { DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; -static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, - int id) +static inline const struct da9052_regulator_info *find_regulator_info(u8 chip_id, + int id) { - struct da9052_regulator_info *info; + const struct da9052_regulator_info *info; int i; switch (chip_id) { diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 352547c375bd78..a0d3414aa79e6b 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c @@ -73,7 +73,7 @@ struct da9055_regulator_info { struct da9055_regulator { struct da9055 *da9055; - struct da9055_regulator_info *info; + const struct da9055_regulator_info *info; struct regulator_dev *rdev; enum gpio_select reg_rselect; }; @@ -81,7 +81,7 @@ struct da9055_regulator { static unsigned int da9055_buck_get_mode(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; int ret, mode = 0; ret = da9055_reg_read(regulator->da9055, info->mode.reg); @@ -107,7 +107,7 @@ static int da9055_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; int val = 0; switch (mode) { @@ -129,7 +129,7 @@ static int da9055_buck_set_mode(struct regulator_dev *rdev, static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; int ret; ret = da9055_reg_read(regulator->da9055, info->volt.reg_b); @@ -145,7 +145,7 @@ static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev) static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; struct da9055_volt_reg volt = info->volt; int val = 0; @@ -167,7 +167,7 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; struct da9055_volt_reg volt = info->volt; int ret, sel; @@ -199,7 +199,7 @@ static int da9055_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; int ret; /* @@ -242,7 +242,7 @@ static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; int ret; /* Select register set B for suspend voltage ramping. */ @@ -264,7 +264,7 @@ static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev, static int da9055_suspend_enable(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; /* Select register set B for voltage ramping. */ if (regulator->reg_rselect == NO_GPIO) @@ -277,7 +277,7 @@ static int da9055_suspend_enable(struct regulator_dev *rdev) static int da9055_suspend_disable(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; /* Diselect register set B. */ if (regulator->reg_rselect == NO_GPIO) @@ -396,7 +396,7 @@ static const struct regulator_ops da9055_ldo_ops = { },\ } -static struct da9055_regulator_info da9055_regulator_info[] = { +static const struct da9055_regulator_info da9055_regulator_info[] = { DA9055_BUCK(BUCK1, 25, 725, 2075, 6, 9, 0xc, 2), DA9055_BUCK(BUCK2, 25, 925, 2500, 6, 0, 3, 0), DA9055_LDO(LDO1, 50, 900, 3300, 6, 2), @@ -417,7 +417,7 @@ static int da9055_gpio_init(struct device *dev, struct regulator_config *config, struct da9055_pdata *pdata, int id) { - struct da9055_regulator_info *info = regulator->info; + const struct da9055_regulator_info *info = regulator->info; struct gpio_desc *ren; struct gpio_desc *ena; struct gpio_desc *rsel; @@ -491,9 +491,9 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data) return IRQ_HANDLED; } -static inline struct da9055_regulator_info *find_regulator_info(int id) +static inline const struct da9055_regulator_info *find_regulator_info(int id) { - struct da9055_regulator_info *info; + const struct da9055_regulator_info *info; int i; for (i = 0; i < ARRAY_SIZE(da9055_regulator_info); i++) { diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 82bf321ae06f0c..9d369cc45d4178 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -133,7 +133,7 @@ struct da9063_regulator_info { .suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \ .mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK) -/* Defines asignment of regulators info table to chip model */ +/* Defines assignment of regulators info table to chip model */ struct da9063_dev_model { const struct da9063_regulator_info *regulator_info; unsigned int n_regulators; @@ -715,7 +715,7 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { }; /* Link chip model with regulators info table */ -static struct da9063_dev_model regulators_models[] = { +static const struct da9063_dev_model regulators_models[] = { { .regulator_info = da9063_regulator_info, .n_regulators = ARRAY_SIZE(da9063_regulator_info), diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c index d97162f737939f..17527a3f53b430 100644 --- a/drivers/regulator/da9121-regulator.c +++ b/drivers/regulator/da9121-regulator.c @@ -53,7 +53,7 @@ struct da9121_range { int reg_max; }; -static struct da9121_range da9121_10A_2phase_current = { +static const struct da9121_range da9121_10A_2phase_current = { .val_min = 7000000, .val_max = 20000000, .val_stp = 1000000, @@ -61,7 +61,7 @@ static struct da9121_range da9121_10A_2phase_current = { .reg_max = 14, }; -static struct da9121_range da9121_6A_2phase_current = { +static const struct da9121_range da9121_6A_2phase_current = { .val_min = 7000000, .val_max = 12000000, .val_stp = 1000000, @@ -69,7 +69,7 @@ static struct da9121_range da9121_6A_2phase_current = { .reg_max = 6, }; -static struct da9121_range da9121_5A_1phase_current = { +static const struct da9121_range da9121_5A_1phase_current = { .val_min = 3500000, .val_max = 10000000, .val_stp = 500000, @@ -77,7 +77,7 @@ static struct da9121_range da9121_5A_1phase_current = { .reg_max = 14, }; -static struct da9121_range da9121_3A_1phase_current = { +static const struct da9121_range da9121_3A_1phase_current = { .val_min = 3500000, .val_max = 6000000, .val_stp = 500000, @@ -85,7 +85,7 @@ static struct da9121_range da9121_3A_1phase_current = { .reg_max = 6, }; -static struct da9121_range da914x_40A_4phase_current = { +static const struct da9121_range da914x_40A_4phase_current = { .val_min = 26000000, .val_max = 78000000, .val_stp = 4000000, @@ -93,7 +93,7 @@ static struct da9121_range da914x_40A_4phase_current = { .reg_max = 14, }; -static struct da9121_range da914x_20A_2phase_current = { +static const struct da9121_range da914x_20A_2phase_current = { .val_min = 13000000, .val_max = 39000000, .val_stp = 2000000, @@ -104,7 +104,7 @@ static struct da9121_range da914x_20A_2phase_current = { struct da9121_variant_info { int num_bucks; int num_phases; - struct da9121_range *current_range; + const struct da9121_range *current_range; }; static const struct da9121_variant_info variant_parameters[] = { @@ -188,7 +188,7 @@ static int da9121_get_current_limit(struct regulator_dev *rdev) { struct da9121 *chip = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); - struct da9121_range *range = + const struct da9121_range *range = variant_parameters[chip->variant_id].current_range; unsigned int val = 0; int ret = 0; @@ -219,7 +219,7 @@ static int da9121_ceiling_selector(struct regulator_dev *rdev, unsigned int *selector) { struct da9121 *chip = rdev_get_drvdata(rdev); - struct da9121_range *range = + const struct da9121_range *range = variant_parameters[chip->variant_id].current_range; unsigned int level; unsigned int i = 0; @@ -259,7 +259,7 @@ static int da9121_set_current_limit(struct regulator_dev *rdev, { struct da9121 *chip = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); - struct da9121_range *range = + const struct da9121_range *range = variant_parameters[chip->variant_id].current_range; unsigned int sel = 0; int ret = 0; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index d8b39ea3de0e11..d4f14d7ea8cfae 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -264,7 +264,7 @@ static const struct regulator_ops da9211_buck_ops = { .of_map_mode = da9211_map_buck_mode,\ } -static struct regulator_desc da9211_regulators[] = { +static const struct regulator_desc da9211_regulators[] = { DA9211_BUCK(BUCKA), DA9211_BUCK(BUCKB), }; diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c index 7111c46e9de13f..1b893cdd1aad61 100644 --- a/drivers/regulator/devres.c +++ b/drivers/regulator/devres.c @@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_get_optional); * In cases where the supply is not strictly required, callers can check for * -ENODEV error and handle it accordingly. * - * Returns: voltage in microvolts on success, or an error code on failure. + * Returns: voltage in microvolts on success, or an negative error number on failure. */ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id) { @@ -174,8 +174,8 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id) * Since we need a real voltage, we use devm_regulator_get_optional() * rather than getting a dummy regulator with devm_regulator_get() and * then letting regulator_get_voltage() fail with -EINVAL. This way, the - * caller can handle the -ENODEV error code if needed instead of the - * ambiguous -EINVAL. + * caller can handle the -ENODEV negative error number if needed instead + * of the ambiguous -EINVAL. */ r = devm_regulator_get_optional(dev, id); if (IS_ERR(r)) @@ -276,7 +276,7 @@ static int _devm_regulator_bulk_get(struct device *dev, int num_consumers, * @num_consumers: number of consumers to register * @consumers: configuration of consumers; clients are stored here. * - * @return 0 on success, an errno on failure. + * @return 0 on success, a negative error number on failure. * * This helper function allows drivers to get several regulator * consumers in one operation with management, the regulators will @@ -299,7 +299,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_get); * @num_consumers: number of consumers to register * @consumers: configuration of consumers; clients are stored here. * - * @return 0 on success, an errno on failure. + * @return 0 on success, a negative error number on failure. * * This helper function allows drivers to exclusively get several * regulator consumers in one operation with management, the regulators @@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_exclusive); * This is a convenience function to allow bulk regulator configuration * to be stored "static const" in files. * - * Return: 0 on success, an errno on failure. + * Return: 0 on success, a negative error number on failure. */ int devm_regulator_bulk_get_const(struct device *dev, int num_consumers, const struct regulator_bulk_data *in_consumers, @@ -393,7 +393,7 @@ static void devm_regulator_bulk_disable(void *res) * @num_consumers: number of consumers to register * @id: list of supply names or regulator IDs * - * @return 0 on success, an errno on failure. + * @return 0 on success, a negative error number on failure. * * This helper function allows drivers to get several regulator * consumers in one operation with management, the regulators will @@ -574,7 +574,7 @@ static void devm_regulator_unregister_supply_alias(struct device *dev, * lookup the supply * @num_id: number of aliases to register * - * @return 0 on success, an errno on failure. + * @return 0 on success, a negative error number on failure. * * This helper function allows drivers to register several supply * aliases in one operation, the aliases will be automatically @@ -726,7 +726,7 @@ static void regulator_irq_helper_drop(void *res) * IRQ. * @rdev_amount: Amount of regulators associated with this IRQ. * - * Return: handle to irq_helper or an ERR_PTR() encoded error code. + * Return: handle to irq_helper or an ERR_PTR() encoded negative error number. */ void *devm_regulator_irq_helper(struct device *dev, const struct regulator_irq_desc *d, int irq, diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index 17c9bf20438590..bd9447dac5967a 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -46,7 +46,7 @@ /* VSEL bit definitions */ #define VSEL_BUCK_EN BIT(7) #define VSEL_MODE BIT(6) -/* Chip ID and Verison */ +/* Chip ID and Version */ #define DIE_ID 0x0F /* ID1 */ #define DIE_REV 0x0F /* ID2 */ /* Control bit definitions */ diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c index 2d5a42b2b3d801..b6cb0aaac3b169 100644 --- a/drivers/regulator/fixed-helper.c +++ b/drivers/regulator/fixed-helper.c @@ -26,6 +26,8 @@ static void regulator_fixed_release(struct device *dev) * @supplies: consumers for this regulator * @num_supplies: number of consumers * @uv: voltage in microvolts + * + * Return: Pointer to registered platform device, or %NULL if memory allocation fails. */ struct platform_device *regulator_register_always_on(int id, const char *name, struct regulator_consumer_supply *supplies, int num_supplies, int uv) diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index cb93e5cdcfa94e..1cb647ed70c62d 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -129,7 +129,7 @@ static irqreturn_t reg_fixed_under_voltage_irq_handler(int irq, void *data) * If it's an optional IRQ and not found, it returns 0. * Otherwise, it attempts to request the threaded IRQ. * - * Return: 0 on success, or error code on failure. + * Return: 0 on success, or a negative error number on failure. */ static int reg_fixed_get_irqs(struct device *dev, struct fixed_voltage_data *priv) @@ -158,8 +158,10 @@ static int reg_fixed_get_irqs(struct device *dev, * @desc: regulator description * * Populates fixed_voltage_config structure by extracting data from device - * tree node, returns a pointer to the populated structure of NULL if memory - * alloc fails. + * tree node. + * + * Return: Pointer to a populated &struct fixed_voltage_config or %NULL if + * memory allocation fails. */ static struct fixed_voltage_config * of_get_fixed_voltage_config(struct device *dev, diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index 6e1ace660b8cf2..0def82eb8b46a1 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -125,7 +125,7 @@ static int regulator_range_selector_to_index(struct regulator_dev *rdev, * * Regulators that use regmap for their register I/O and use pickable * ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask - * fields in their descriptor and then use this as their get_voltage_vsel + * fields in their descriptor and then use this as their get_voltage_sel * operation, saving some code. */ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev) @@ -195,7 +195,7 @@ static int write_separate_vsel_and_range(struct regulator_dev *rdev, * * Regulators that use regmap for their register I/O and use pickable * ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask - * fields in their descriptor and then use this as their set_voltage_vsel + * fields in their descriptor and then use this as their set_voltage_sel * operation, saving some code. */ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev, @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_pickable_regmap); * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this - * as their get_voltage_vsel operation, saving some code. + * as their get_voltage_sel operation, saving some code. */ int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev) { @@ -276,7 +276,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap); * * Regulators that use regmap for their register I/O can set the * vsel_reg and vsel_mask fields in their descriptor and then use this - * as their set_voltage_vsel operation, saving some code. + * as their set_voltage_sel operation, saving some code. */ int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel) { diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c index 82e9e364d4d411..69d24728d6a4c3 100644 --- a/drivers/regulator/hi6421-regulator.c +++ b/drivers/regulator/hi6421-regulator.c @@ -303,7 +303,7 @@ static const struct regulator_ops hi6421_buck345_ops; } /* HI6421 regulator information */ -static struct hi6421_regulator_info +static const struct hi6421_regulator_info hi6421_regulator_info[HI6421_NUM_REGULATORS] = { HI6421_LDO(LDO0, hi6421_vout0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10, 10000, 0x20, 8000), @@ -384,7 +384,7 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; unsigned int reg_val; info = container_of(rdev->desc, struct hi6421_regulator_info, desc); @@ -397,7 +397,7 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; unsigned int reg_val; info = container_of(rdev->desc, struct hi6421_regulator_info, desc); @@ -411,7 +411,7 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; unsigned int new_mode; info = container_of(rdev->desc, struct hi6421_regulator_info, desc); @@ -436,7 +436,7 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; unsigned int new_mode; info = container_of(rdev->desc, struct hi6421_regulator_info, desc); @@ -462,7 +462,7 @@ static unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; info = container_of(rdev->desc, struct hi6421_regulator_info, desc); @@ -539,7 +539,7 @@ static int hi6421_regulator_probe(struct platform_device *pdev) { struct hi6421_pmic *pmic = dev_get_drvdata(pdev->dev.parent); struct hi6421_regulator_pdata *pdata; - struct hi6421_regulator_info *info; + const struct hi6421_regulator_info *info; struct regulator_config config = { }; struct regulator_dev *rdev; int i; diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c index 23924ff0c7b233..b3ebd162481435 100644 --- a/drivers/regulator/hi6421v530-regulator.c +++ b/drivers/regulator/hi6421v530-regulator.c @@ -21,12 +21,10 @@ * struct hi6421v530_regulator_info - hi6421v530 regulator information * @desc: regulator description * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep - * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only */ struct hi6421v530_regulator_info { struct regulator_desc rdesc; u8 mode_mask; - u32 eco_microamp; }; /* HI6421v530 regulators */ @@ -68,10 +66,9 @@ static const struct regulator_ops hi6421v530_ldo_ops; * emask - enable mask * odelay - off/on delay time in uS * ecomask - eco mode mask - * ecoamp - eco mode load uppler limit in uA */ #define HI6421V530_LDO(_ID, v_table, vreg, vmask, ereg, emask, \ - odelay, ecomask, ecoamp) { \ + odelay, ecomask) { \ .rdesc = { \ .name = #_ID, \ .of_match = of_match_ptr(#_ID), \ @@ -90,31 +87,30 @@ static const struct regulator_ops hi6421v530_ldo_ops; .off_on_delay = odelay, \ }, \ .mode_mask = ecomask, \ - .eco_microamp = ecoamp, \ } /* HI6421V530 regulator information */ -static struct hi6421v530_regulator_info hi6421v530_regulator_info[] = { +static const struct hi6421v530_regulator_info hi6421v530_regulator_info[] = { HI6421V530_LDO(LDO3, ldo_3_voltages, 0x061, 0xf, 0x060, 0x2, - 20000, 0x6, 8000), + 20000, 0x6), HI6421V530_LDO(LDO9, ldo_9_11_voltages, 0x06b, 0x7, 0x06a, 0x2, - 40000, 0x6, 8000), + 40000, 0x6), HI6421V530_LDO(LDO11, ldo_9_11_voltages, 0x06f, 0x7, 0x06e, 0x2, - 40000, 0x6, 8000), + 40000, 0x6), HI6421V530_LDO(LDO15, ldo_15_16_voltages, 0x077, 0x7, 0x076, 0x2, - 40000, 0x6, 8000), + 40000, 0x6), HI6421V530_LDO(LDO16, ldo_15_16_voltages, 0x079, 0x7, 0x078, 0x2, - 40000, 0x6, 8000), + 40000, 0x6), }; static unsigned int hi6421v530_regulator_ldo_get_mode( struct regulator_dev *rdev) { - struct hi6421v530_regulator_info *info; + const struct hi6421v530_regulator_info *info; unsigned int reg_val; - info = rdev_get_drvdata(rdev); + info = container_of(rdev->desc, struct hi6421v530_regulator_info, rdesc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & (info->mode_mask)) @@ -126,10 +122,10 @@ static unsigned int hi6421v530_regulator_ldo_get_mode( static int hi6421v530_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421v530_regulator_info *info; + const struct hi6421v530_regulator_info *info; unsigned int new_mode; - info = rdev_get_drvdata(rdev); + info = container_of(rdev->desc, struct hi6421v530_regulator_info, rdesc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -176,7 +172,6 @@ static int hi6421v530_regulator_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(hi6421v530_regulator_info); i++) { config.dev = pdev->dev.parent; config.regmap = pmic->regmap; - config.driver_data = &hi6421v530_regulator_info[i]; rdev = devm_regulator_register(&pdev->dev, &hi6421v530_regulator_info[i].rdesc, diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c index 4e10daa1e68975..e5f6fbfc901642 100644 --- a/drivers/regulator/hi6421v600-regulator.c +++ b/drivers/regulator/hi6421v600-regulator.c @@ -118,7 +118,7 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) { - struct hi6421_spmi_reg_info *sreg; + const struct hi6421_spmi_reg_info *sreg; unsigned int reg_val; sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); @@ -133,7 +133,7 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_spmi_reg_info *sreg; + const struct hi6421_spmi_reg_info *sreg; unsigned int val; sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); @@ -160,7 +160,7 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_spmi_reg_info *sreg; + const struct hi6421_spmi_reg_info *sreg; sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); @@ -195,7 +195,7 @@ enum hi6421_spmi_regulator_id { hi6421v600_ldo34, }; -static struct hi6421_spmi_reg_info regulator_info[] = { +static const struct hi6421_spmi_reg_info regulator_info[] = { HI6421V600_LDO(ldo3, range_1v5_to_2v0, 0x16, 0x01, 0x51, 20000, 120, @@ -235,7 +235,7 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev) struct device *pmic_dev = pdev->dev.parent; struct regulator_config config = { }; struct hi6421_spmi_reg_priv *priv; - struct hi6421_spmi_reg_info *info; + const struct hi6421_spmi_reg_info *info; struct device *dev = &pdev->dev; struct regmap *regmap; struct regulator_dev *rdev; diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h index 77a50214108963..5b43f802468ddb 100644 --- a/drivers/regulator/internal.h +++ b/drivers/regulator/internal.h @@ -66,7 +66,8 @@ static inline struct regulator_dev *dev_to_rdev(struct device *dev) } #ifdef CONFIG_OF -struct regulator_dev *of_find_regulator_by_node(struct device_node *np); +struct regulator_dev *of_regulator_dev_lookup(struct device *dev, + const char *supply); struct regulator_init_data *regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, struct regulator_config *config, @@ -80,10 +81,10 @@ int of_get_n_coupled(struct regulator_dev *rdev); bool of_check_coupling_data(struct regulator_dev *rdev); #else -static inline struct regulator_dev * -of_find_regulator_by_node(struct device_node *np) +static inline struct regulator_dev *of_regulator_dev_lookup(struct device *dev, + const char *supply) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct regulator_init_data * @@ -120,6 +121,10 @@ enum regulator_get_type { MAX_GET_TYPE }; +int _regulator_get_common_check(struct device *dev, const char *id, + enum regulator_get_type get_type); +struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct device *dev, + const char *id, enum regulator_get_type get_type); struct regulator *_regulator_get(struct device *dev, const char *id, enum regulator_get_type get_type); int _regulator_bulk_get(struct device *dev, int num_consumers, diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c index 5ab1a0befe12f7..0aa188b2bbb267 100644 --- a/drivers/regulator/irq_helpers.c +++ b/drivers/regulator/irq_helpers.c @@ -333,7 +333,7 @@ static void init_rdev_errors(struct regulator_irq *h) * IRQ. * @rdev_amount: Amount of regulators associated with this IRQ. * - * Return: handle to irq_helper or an ERR_PTR() encoded error code. + * Return: handle to irq_helper or an ERR_PTR() encoded negative error number. */ void *regulator_irq_helper(struct device *dev, const struct regulator_irq_desc *d, int irq, @@ -404,16 +404,21 @@ EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel); /** * regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs * - * @irq: Number of IRQ that occurred - * @rid: Information about the event IRQ indicates - * @dev_mask: mask indicating the regulator originating the IRQ + * @irq: Number of IRQ that occurred. + * @rid: Information about the event IRQ indicates. + * The function fills in the ®ulator_err_state->notifs + * and ®ulator_err_state->errors fields of + * ®ulator_irq_data->states as output. + * @dev_mask: mask indicating the regulator originating the IRQ. * * Regulators whose IRQ has single, well defined purpose (always indicate * exactly one event, and are relevant to exactly one regulator device) can - * use this function as their map_event callbac for their regulator IRQ - * notification helperk. Exactly one rdev and exactly one error (in + * use this function as their map_event callback for their regulator IRQ + * notification helper. Exactly one rdev and exactly one error (in * "common_errs"-field) can be given at IRQ helper registration for * regulator_irq_map_event_simple() to be viable. + * + * Return: 0. */ int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid, unsigned long *dev_mask) diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c index 8bbcd983a74aa8..4a568b1b010711 100644 --- a/drivers/regulator/max5970-regulator.c +++ b/drivers/regulator/max5970-regulator.c @@ -70,7 +70,7 @@ static int max5970_read(struct device *dev, enum hwmon_sensor_types type, * millivolts) and then divide it by the maximum value of the 10-bit ADC. */ *val = (*val * ddata->irng) >> 10; - /* Convert the voltage meansurement across shunt resistor to current */ + /* Convert the voltage measurement across shunt resistor to current */ *val = (*val * 1000) / ddata->shunt_micro_ohms; return 0; default: diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c index 94abfbb2bc1e5c..7368f54f046dc8 100644 --- a/drivers/regulator/max77650-regulator.c +++ b/drivers/regulator/max77650-regulator.c @@ -43,8 +43,6 @@ struct max77650_regulator_desc { unsigned int regB; }; -static struct max77650_regulator_desc max77651_SBB1_desc; - static const unsigned int max77651_sbb1_volt_range_sel[] = { 0x0, 0x1, 0x2, 0x3 }; @@ -66,11 +64,11 @@ static const unsigned int max77650_current_limit_table[] = { static int max77650_regulator_is_enabled(struct regulator_dev *rdev) { - struct max77650_regulator_desc *rdesc; + const struct max77650_regulator_desc *rdesc; struct regmap *map; int val, rv, en; - rdesc = rdev_get_drvdata(rdev); + rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc); map = rdev_get_regmap(rdev); rv = regmap_read(map, rdesc->regB, &val); @@ -84,10 +82,10 @@ static int max77650_regulator_is_enabled(struct regulator_dev *rdev) static int max77650_regulator_enable(struct regulator_dev *rdev) { - struct max77650_regulator_desc *rdesc; + const struct max77650_regulator_desc *rdesc; struct regmap *map; - rdesc = rdev_get_drvdata(rdev); + rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc); map = rdev_get_regmap(rdev); return regmap_update_bits(map, rdesc->regB, @@ -97,10 +95,10 @@ static int max77650_regulator_enable(struct regulator_dev *rdev) static int max77650_regulator_disable(struct regulator_dev *rdev) { - struct max77650_regulator_desc *rdesc; + const struct max77650_regulator_desc *rdesc; struct regmap *map; - rdesc = rdev_get_drvdata(rdev); + rdesc = container_of(rdev->desc, struct max77650_regulator_desc, desc); map = rdev_get_regmap(rdev); return regmap_update_bits(map, rdesc->regB, @@ -145,7 +143,7 @@ static const struct regulator_ops max77651_SBB1_regulator_ops = { .set_active_discharge = regulator_set_active_discharge_regmap, }; -static struct max77650_regulator_desc max77650_LDO_desc = { +static const struct max77650_regulator_desc max77650_LDO_desc = { .desc = { .name = "ldo", .of_match = of_match_ptr("ldo"), @@ -171,7 +169,7 @@ static struct max77650_regulator_desc max77650_LDO_desc = { .regB = MAX77650_REG_CNFG_LDO_B, }; -static struct max77650_regulator_desc max77650_SBB0_desc = { +static const struct max77650_regulator_desc max77650_SBB0_desc = { .desc = { .name = "sbb0", .of_match = of_match_ptr("sbb0"), @@ -201,7 +199,7 @@ static struct max77650_regulator_desc max77650_SBB0_desc = { .regB = MAX77650_REG_CNFG_SBB0_B, }; -static struct max77650_regulator_desc max77650_SBB1_desc = { +static const struct max77650_regulator_desc max77650_SBB1_desc = { .desc = { .name = "sbb1", .of_match = of_match_ptr("sbb1"), @@ -231,7 +229,7 @@ static struct max77650_regulator_desc max77650_SBB1_desc = { .regB = MAX77650_REG_CNFG_SBB1_B, }; -static struct max77650_regulator_desc max77651_SBB1_desc = { +static const struct max77650_regulator_desc max77651_SBB1_desc = { .desc = { .name = "sbb1", .of_match = of_match_ptr("sbb1"), @@ -264,7 +262,7 @@ static struct max77650_regulator_desc max77651_SBB1_desc = { .regB = MAX77650_REG_CNFG_SBB1_B, }; -static struct max77650_regulator_desc max77650_SBB2_desc = { +static const struct max77650_regulator_desc max77650_SBB2_desc = { .desc = { .name = "sbb2", .of_match = of_match_ptr("sbb2"), @@ -294,7 +292,7 @@ static struct max77650_regulator_desc max77650_SBB2_desc = { .regB = MAX77650_REG_CNFG_SBB2_B, }; -static struct max77650_regulator_desc max77651_SBB2_desc = { +static const struct max77650_regulator_desc max77651_SBB2_desc = { .desc = { .name = "sbb2", .of_match = of_match_ptr("sbb2"), @@ -326,8 +324,8 @@ static struct max77650_regulator_desc max77651_SBB2_desc = { static int max77650_regulator_probe(struct platform_device *pdev) { - struct max77650_regulator_desc **rdescs; - struct max77650_regulator_desc *rdesc; + const struct max77650_regulator_desc **rdescs; + const struct max77650_regulator_desc *rdesc; struct regulator_config config = { }; struct device *dev, *parent; struct regulator_dev *rdev; @@ -376,7 +374,6 @@ static int max77650_regulator_probe(struct platform_device *pdev) for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) { rdesc = rdescs[i]; - config.driver_data = rdesc; rdev = devm_regulator_register(dev, &rdesc->desc, &config); if (IS_ERR(rdev)) diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c index 69eb6abd255148..b2e87642bec419 100644 --- a/drivers/regulator/max77802-regulator.c +++ b/drivers/regulator/max77802-regulator.c @@ -160,8 +160,8 @@ static unsigned max77802_get_mode(struct regulator_dev *rdev) * Enable Control Logic3 by PWRREQ (LDO 3) * * If setting the regulator mode fails, the function only warns but does - * not return an error code to avoid the regulator core to stop setting - * the operating mode for the remaining regulators. + * not return a negative error number to avoid the regulator core to stop + * setting the operating mode for the remaining regulators. */ static int max77802_set_suspend_mode(struct regulator_dev *rdev, unsigned int mode) diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c index 5590cdf615b701..310bc8ee7af842 100644 --- a/drivers/regulator/max77826-regulator.c +++ b/drivers/regulator/max77826-regulator.c @@ -153,7 +153,6 @@ enum max77826_regulators { struct max77826_regulator_info { struct regmap *regmap; - struct regulator_desc *rdesc; }; static const struct regmap_config max77826_regmap_config = { @@ -187,7 +186,7 @@ static const struct regulator_ops max77826_buck_ops = { .set_voltage_time_sel = max77826_set_voltage_time_sel, }; -static struct regulator_desc max77826_regulators_desc[] = { +static const struct regulator_desc max77826_regulators_desc[] = { MAX77826_LDO(1, NMOS), MAX77826_LDO(2, NMOS), MAX77826_LDO(3, NMOS), @@ -246,7 +245,6 @@ static int max77826_i2c_probe(struct i2c_client *client) if (!info) return -ENOMEM; - info->rdesc = max77826_regulators_desc; regmap = devm_regmap_init_i2c(client, &max77826_regmap_config); if (IS_ERR(regmap)) { dev_err(dev, "Failed to allocate regmap!\n"); diff --git a/drivers/regulator/max77857-regulator.c b/drivers/regulator/max77857-regulator.c index bc28dc8503a870..1216cc3a6f72bb 100644 --- a/drivers/regulator/max77857-regulator.c +++ b/drivers/regulator/max77857-regulator.c @@ -427,7 +427,7 @@ static int max77857_probe(struct i2c_client *client) return 0; } -const struct i2c_device_id max77857_id[] = { +static const struct i2c_device_id max77857_id[] = { { "max77831", ID_MAX77831 }, { "max77857", ID_MAX77857 }, { "max77859", ID_MAX77859 }, diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 96ca146281d697..f68caa07f546f1 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -470,8 +470,7 @@ static const struct thermal_zone_device_ops max77621_tz_ops = { static int max8973_thermal_init(struct max8973_chip *mchip) { struct thermal_zone_device *tzd; - struct irq_data *irq_data; - unsigned long irq_flags = 0; + unsigned long irq_flags; int ret; if (mchip->id != MAX77621) @@ -489,9 +488,7 @@ static int max8973_thermal_init(struct max8973_chip *mchip) if (mchip->irq <= 0) return 0; - irq_data = irq_get_irq_data(mchip->irq); - if (irq_data) - irq_flags = irqd_get_trigger_type(irq_data); + irq_flags = irq_get_trigger_type(mchip->irq); ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL, max8973_thermal_irq, diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c index 5f201ee9a5b803..e77621b6466c75 100644 --- a/drivers/regulator/max8997-regulator.c +++ b/drivers/regulator/max8997-regulator.c @@ -8,6 +8,7 @@ // This driver is based on max8998.c #include +#include #include #include #include @@ -876,7 +877,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, struct max8997_platform_data *pdata) { struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct device_node *pmic_np, *regulators_np, *reg_np; + struct device_node *pmic_np, *reg_np; struct max8997_regulator_data *rdata; unsigned int i, dvs_voltage_nr = 1; @@ -886,7 +887,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, return -ENODEV; } - regulators_np = of_get_child_by_name(pmic_np, "regulators"); + struct device_node *regulators_np __free(device_node) = of_get_child_by_name(pmic_np, + "regulators"); if (!regulators_np) { dev_err(&pdev->dev, "could not find regulators sub-node\n"); return -EINVAL; @@ -898,10 +900,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, rdata = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rdata), GFP_KERNEL); - if (!rdata) { - of_node_put(regulators_np); + if (!rdata) return -ENOMEM; - } pdata->regulators = rdata; for_each_child_of_node(regulators_np, reg_np) { @@ -922,7 +922,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, rdata->reg_node = reg_np; rdata++; } - of_node_put(regulators_np); pdata->buck1_gpiodvs = of_property_read_bool(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs"); pdata->buck2_gpiodvs = of_property_read_bool(pmic_np, "max8997,pmic-buck2-uses-gpio-dvs"); @@ -941,9 +940,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, } } - if (of_get_property(pmic_np, - "max8997,pmic-ignore-gpiodvs-side-effect", NULL)) - pdata->ignore_gpiodvs_side_effect = true; + pdata->ignore_gpiodvs_side_effect = of_property_read_bool(pmic_np, + "max8997,pmic-ignore-gpiodvs-side-effect"); dvs_voltage_nr = 8; } diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c index 5de9d4fa5113f5..b34ae0bbba6f74 100644 --- a/drivers/regulator/mcp16502.c +++ b/drivers/regulator/mcp16502.c @@ -107,9 +107,10 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode) return REGULATOR_MODE_INVALID; } -#define MCP16502_REGULATOR(_name, _id, _ranges, _ops, _ramp_table) \ +#define MCP16502_REGULATOR(_name, _id, _sn, _ranges, _ops, _ramp_table) \ [_id] = { \ .name = _name, \ + .supply_name = #_sn, \ .regulators_node = "regulators", \ .id = _id, \ .ops = &(_ops), \ @@ -467,18 +468,18 @@ static const struct linear_range b234_ranges[] = { }; static const struct regulator_desc mcp16502_desc[] = { - /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops, ramp_table) */ - MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops, + /* MCP16502_REGULATOR(_name, _id, _sn, _ranges, _ops, _ramp_table) */ + MCP16502_REGULATOR("VDD_IO", BUCK1, pvin1, b1l12_ranges, mcp16502_buck_ops, mcp16502_ramp_b1l12), - MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops, + MCP16502_REGULATOR("VDD_DDR", BUCK2, pvin2, b234_ranges, mcp16502_buck_ops, mcp16502_ramp_b234), - MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops, + MCP16502_REGULATOR("VDD_CORE", BUCK3, pvin3, b234_ranges, mcp16502_buck_ops, mcp16502_ramp_b234), - MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops, + MCP16502_REGULATOR("VDD_OTHER", BUCK4, pvin4, b234_ranges, mcp16502_buck_ops, mcp16502_ramp_b234), - MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops, + MCP16502_REGULATOR("LDO1", LDO1, lvin, b1l12_ranges, mcp16502_ldo_ops, mcp16502_ramp_b1l12), - MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops, + MCP16502_REGULATOR("LDO2", LDO2, lvin, b1l12_ranges, mcp16502_ldo_ops, mcp16502_ramp_b1l12) }; diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c index 3457e650a99459..e6794190cb68ec 100644 --- a/drivers/regulator/mp5416.c +++ b/drivers/regulator/mp5416.c @@ -163,7 +163,7 @@ static const struct regulator_ops mp5416_buck_ops = { .set_ramp_delay = regulator_set_ramp_delay_regmap, }; -static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { +static const struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1), MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2), MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1), @@ -174,7 +174,7 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { MP5416LDO("ldo4", 4, BIT(1)), }; -static struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = { +static const struct regulator_desc mp5496_regulators_desc[MP5416_MAX_REGULATORS] = { MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1), MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 1), MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1), diff --git a/drivers/regulator/mt6357-regulator.c b/drivers/regulator/mt6357-regulator.c index c0439a4e0b5062..1eb69c7a6acb38 100644 --- a/drivers/regulator/mt6357-regulator.c +++ b/drivers/regulator/mt6357-regulator.c @@ -123,7 +123,7 @@ struct mt6357_regulator_info { * * Regulators that use regmap for their register I/O can set the * da_vsel_reg and da_vsel_mask fields in the info structure and - * then use this as their get_voltage_vsel operation. + * then use this as their get_voltage_sel operation. */ static int mt6357_get_buck_voltage_sel(struct regulator_dev *rdev) { diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c index 9bf4163221f1b8..f5662c56946427 100644 --- a/drivers/regulator/mtk-dvfsrc-regulator.c +++ b/drivers/regulator/mtk-dvfsrc-regulator.c @@ -19,7 +19,7 @@ enum dvfsrc_regulator_id { }; struct dvfsrc_regulator_pdata { - struct regulator_desc *descs; + const struct regulator_desc *descs; u32 size; }; @@ -107,7 +107,7 @@ static const unsigned int mt6873_voltages[] = { 725000, }; -static struct regulator_desc mt6873_regulators[] = { +static const struct regulator_desc mt6873_regulators[] = { MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt6873_voltages), MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt6873_voltages), }; @@ -122,7 +122,7 @@ static const unsigned int mt8183_voltages[] = { 800000, }; -static struct regulator_desc mt8183_regulators[] = { +static const struct regulator_desc mt8183_regulators[] = { MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8183_voltages), }; @@ -138,7 +138,7 @@ static const unsigned int mt8195_voltages[] = { 750000, }; -static struct regulator_desc mt8195_regulators[] = { +static const struct regulator_desc mt8195_regulators[] = { MTK_DVFSRC_VREG("dvfsrc-vcore", VCORE, mt8195_voltages), MTK_DVFSRC_VREG("dvfsrc-vscp", VSCP, mt8195_voltages), }; @@ -159,7 +159,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) return -EINVAL; for (i = 0; i < pdata->size; i++) { - struct regulator_desc *vrdesc = &pdata->descs[i]; + const struct regulator_desc *vrdesc = &pdata->descs[i]; struct regulator_dev *rdev; rdev = devm_regulator_register(&pdev->dev, vrdesc, &config); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 03afc160fc72ce..3f490d81abc28f 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -338,8 +338,10 @@ static int of_get_regulation_constraints(struct device *dev, * @desc: regulator description * * Populates regulator_init_data structure by extracting data from device - * tree node, returns a pointer to the populated structure or NULL if memory - * alloc fails. + * tree node. + * + * Return: Pointer to a populated &struct regulator_init_data or NULL if + * memory allocation fails. */ struct regulator_init_data *of_get_regulator_init_data(struct device *dev, struct device_node *node, @@ -391,7 +393,7 @@ static void devm_of_regulator_put_matches(struct device *dev, void *res) * in place and an additional of_node reference is taken for each matched * regulator. * - * Returns the number of matches found or a negative error code on failure. + * Return: The number of matches found or a negative error number on failure. */ int of_regulator_match(struct device *dev, struct device_node *node, struct of_regulator_match *matches, @@ -550,7 +552,71 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev, return NULL; } -struct regulator_dev *of_find_regulator_by_node(struct device_node *np) +/** + * of_get_child_regulator - get a child regulator device node + * based on supply name + * @parent: Parent device node + * @prop_name: Combination regulator supply name and "-supply" + * + * Traverse all child nodes. + * Extract the child regulator device node corresponding to the supply name. + * + * Return: Pointer to the &struct device_node corresponding to the regulator + * if found, or %NULL if not found. + */ +static struct device_node *of_get_child_regulator(struct device_node *parent, + const char *prop_name) +{ + struct device_node *regnode = NULL; + struct device_node *child = NULL; + + for_each_child_of_node(parent, child) { + regnode = of_parse_phandle(child, prop_name, 0); + if (regnode) + goto err_node_put; + + regnode = of_get_child_regulator(child, prop_name); + if (regnode) + goto err_node_put; + } + return NULL; + +err_node_put: + of_node_put(child); + return regnode; +} + +/** + * of_get_regulator - get a regulator device node based on supply name + * @dev: Device pointer for the consumer (of regulator) device + * @supply: regulator supply name + * + * Extract the regulator device node corresponding to the supply name. + * + * Return: Pointer to the &struct device_node corresponding to the regulator + * if found, or %NULL if not found. + */ +static struct device_node *of_get_regulator(struct device *dev, const char *supply) +{ + struct device_node *regnode = NULL; + char prop_name[64]; /* 64 is max size of property name */ + + dev_dbg(dev, "Looking up %s-supply from device tree\n", supply); + + snprintf(prop_name, 64, "%s-supply", supply); + regnode = of_parse_phandle(dev->of_node, prop_name, 0); + if (regnode) + return regnode; + + regnode = of_get_child_regulator(dev->of_node, prop_name); + if (regnode) + return regnode; + + dev_dbg(dev, "Looking up %s property in node %pOF failed\n", prop_name, dev->of_node); + return NULL; +} + +static struct regulator_dev *of_find_regulator_by_node(struct device_node *np) { struct device *dev; @@ -559,6 +625,46 @@ struct regulator_dev *of_find_regulator_by_node(struct device_node *np) return dev ? dev_to_rdev(dev) : NULL; } +/** + * of_regulator_dev_lookup - lookup a regulator device with device tree only + * @dev: Device pointer for regulator supply lookup. + * @supply: Supply name or regulator ID. + * + * Return: Pointer to the &struct regulator_dev on success, or ERR_PTR() + * encoded value on error. + * + * If successful, returns a pointer to the &struct regulator_dev that + * corresponds to the name @supply and with the embedded &struct device + * refcount incremented by one. The refcount must be dropped by calling + * put_device(). + * + * On failure one of the following ERR_PTR() encoded values is returned: + * * -%ENODEV if lookup fails permanently. + * * -%EPROBE_DEFER if lookup could succeed in the future. + */ +struct regulator_dev *of_regulator_dev_lookup(struct device *dev, + const char *supply) +{ + struct regulator_dev *r; + struct device_node *node; + + node = of_get_regulator(dev, supply); + if (node) { + r = of_find_regulator_by_node(node); + of_node_put(node); + if (r) + return r; + + /* + * We have a node, but there is no device. + * assume it has not registered yet. + */ + return ERR_PTR(-EPROBE_DEFER); + } + + return ERR_PTR(-ENODEV); +} + /* * Returns number of regulators coupled with rdev. */ @@ -619,7 +725,7 @@ static bool of_coupling_find_node(struct device_node *src, * - all coupled regulators have the same number of regulator_dev phandles * - all regulators are linked to each other * - * Returns true if all conditions are met. + * Return: True if all conditions are met; false otherwise. */ bool of_check_coupling_data(struct regulator_dev *rdev) { @@ -690,8 +796,8 @@ bool of_check_coupling_data(struct regulator_dev *rdev) * "regulator-coupled-with" property * @index: Index in phandles array * - * Returns the regulator_dev pointer parsed from DTS. If it has not been yet - * registered, returns NULL + * Return: Pointer to the &struct regulator_dev parsed from DTS, or %NULL if + * it has not yet been registered. */ struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev, int index) @@ -735,31 +841,32 @@ static int is_supply_name(const char *name) return 0; } -/* +/** * of_regulator_bulk_get_all - get multiple regulator consumers * * @dev: Device to supply * @np: device node to search for consumers * @consumers: Configuration of consumers; clients are stored here. * - * @return number of regulators on success, an errno on failure. - * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed - * before returning to the caller. + * before returning to the caller, and @consumers will not be + * changed. + * + * Return: Number of regulators on success, or a negative error number + * on failure. */ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, struct regulator_bulk_data **consumers) { int num_consumers = 0; struct regulator *tmp; + struct regulator_bulk_data *_consumers = NULL; struct property *prop; int i, n = 0, ret; char name[64]; - *consumers = NULL; - /* * first pass: get numbers of xxx-supply * second pass: fill consumers @@ -769,7 +876,7 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, i = is_supply_name(prop->name); if (i == 0) continue; - if (!*consumers) { + if (!_consumers) { num_consumers++; continue; } else { @@ -777,28 +884,31 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, name[i] = '\0'; tmp = regulator_get(dev, name); if (IS_ERR(tmp)) { - ret = -EINVAL; + ret = PTR_ERR(tmp); goto error; } - (*consumers)[n].consumer = tmp; + _consumers[n].consumer = tmp; n++; continue; } } - if (*consumers) + if (_consumers) { + *consumers = _consumers; return num_consumers; + } if (num_consumers == 0) return 0; - *consumers = kmalloc_array(num_consumers, + _consumers = kmalloc_array(num_consumers, sizeof(struct regulator_bulk_data), GFP_KERNEL); - if (!*consumers) + if (!_consumers) return -ENOMEM; goto restart; error: while (--n >= 0) - regulator_put(consumers[n]->consumer); + regulator_put(_consumers[n].consumer); + kfree(_consumers); return ret; } EXPORT_SYMBOL_GPL(of_regulator_bulk_get_all); diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c index 319a884121548c..441c9344aef7c3 100644 --- a/drivers/regulator/pcap-regulator.c +++ b/drivers/regulator/pcap-regulator.c @@ -105,7 +105,7 @@ struct pcap_regulator { .lowpwr = _lowpwr, \ } -static struct pcap_regulator vreg_table[] = { +static const struct pcap_regulator vreg_table[] = { VREG_INFO(V1, PCAP_REG_VREG1, 1, 2, 18, 0), VREG_INFO(V2, PCAP_REG_VREG1, 5, 6, 19, 22), VREG_INFO(V3, PCAP_REG_VREG1, 7, 8, 20, 23), @@ -141,7 +141,7 @@ static struct pcap_regulator vreg_table[] = { static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { - struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); /* the regulator doesn't support voltage switching */ @@ -155,7 +155,7 @@ static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev, static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev) { - struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); u32 tmp; @@ -169,7 +169,7 @@ static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev) static int pcap_regulator_enable(struct regulator_dev *rdev) { - struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); if (vreg->en == NA) @@ -180,7 +180,7 @@ static int pcap_regulator_enable(struct regulator_dev *rdev) static int pcap_regulator_disable(struct regulator_dev *rdev) { - struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); if (vreg->en == NA) @@ -191,7 +191,7 @@ static int pcap_regulator_disable(struct regulator_dev *rdev) static int pcap_regulator_is_enabled(struct regulator_dev *rdev) { - struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + const struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; void *pcap = rdev_get_drvdata(rdev); u32 tmp; diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 7c04870442d3a3..7d56c22b5e4007 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -76,7 +76,7 @@ struct pfuze_chip { struct device *dev; struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR]; struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR]; - struct pfuze_regulator *pfuze_regulators; + const struct pfuze_regulator *pfuze_regulators; }; static const int pfuze100_swbst[] = { @@ -367,7 +367,7 @@ static const struct regulator_ops pfuze3000_sw_regulator_ops = { } /* PFUZE100 */ -static struct pfuze_regulator pfuze100_regulators[] = { +static const struct pfuze_regulator pfuze100_regulators[] = { PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000), PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000), @@ -386,7 +386,7 @@ static struct pfuze_regulator pfuze100_regulators[] = { PFUZE100_COIN_REG(PFUZE100, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin), }; -static struct pfuze_regulator pfuze200_regulators[] = { +static const struct pfuze_regulator pfuze200_regulators[] = { PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000), PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000), @@ -403,7 +403,7 @@ static struct pfuze_regulator pfuze200_regulators[] = { PFUZE100_COIN_REG(PFUZE200, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin), }; -static struct pfuze_regulator pfuze3000_regulators[] = { +static const struct pfuze_regulator pfuze3000_regulators[] = { PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), @@ -419,7 +419,7 @@ static struct pfuze_regulator pfuze3000_regulators[] = { PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), }; -static struct pfuze_regulator pfuze3001_regulators[] = { +static const struct pfuze_regulator pfuze3001_regulators[] = { PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c index 063e12c08e75f7..cfa72ce85bc898 100644 --- a/drivers/regulator/qcom-refgen-regulator.c +++ b/drivers/regulator/qcom-refgen-regulator.c @@ -62,7 +62,7 @@ static int qcom_sdm845_refgen_is_enabled(struct regulator_dev *rdev) return 1; } -static struct regulator_desc sdm845_refgen_desc = { +static const struct regulator_desc sdm845_refgen_desc = { .enable_time = 5, .name = "refgen", .owner = THIS_MODULE, @@ -74,7 +74,7 @@ static struct regulator_desc sdm845_refgen_desc = { }, }; -static struct regulator_desc sm8250_refgen_desc = { +static const struct regulator_desc sm8250_refgen_desc = { .enable_reg = REFGEN_REG_PWRDWN_CTRL5, .enable_mask = REFGEN_PWRDWN_CTRL5_MASK, .enable_val = REFGEN_PWRDWN_CTRL5_ENABLE, diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 80e304711345bb..6c343b4b9d15a8 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -158,7 +158,7 @@ struct rpmh_vreg_init_data { * @wait_for_ack: Boolean indicating if execution must wait until the * request has been acknowledged as complete * - * Return: 0 on success, errno on failure + * Return: 0 on success, or a negative error number on failure */ static int rpmh_regulator_send_request(struct rpmh_vreg *vreg, struct tcs_cmd *cmd, bool wait_for_ack) @@ -317,7 +317,7 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev) * This function is used in the regulator_ops for VRM type RPMh regulator * devices. * - * Return: 0 on success, errno on failure + * Return: 0 on success, or a negative error number on failure */ static unsigned int rpmh_regulator_vrm_get_optimum_mode( struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) @@ -409,7 +409,7 @@ static const struct regulator_ops rpmh_regulator_xob_ops = { * @pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator * resources defined for the top level PMIC device * - * Return: 0 on success, errno on failure + * Return: 0 on success, or a negative error number on failure */ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev, struct device_node *node, const char *pmic_id, @@ -1537,7 +1537,6 @@ static int rpmh_regulator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct rpmh_vreg_init_data *vreg_data; - struct device_node *node; struct rpmh_vreg *vreg; const char *pmic_id; int ret; @@ -1552,19 +1551,15 @@ static int rpmh_regulator_probe(struct platform_device *pdev) return ret; } - for_each_available_child_of_node(dev->of_node, node) { + for_each_available_child_of_node_scoped(dev->of_node, node) { vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) { - of_node_put(node); + if (!vreg) return -ENOMEM; - } ret = rpmh_regulator_init_vreg(vreg, dev, node, pmic_id, vreg_data); - if (ret < 0) { - of_node_put(node); + if (ret < 0) return ret; - } } return 0; diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 3b7e06b9f5ce96..28e7ce60cb617c 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -1386,7 +1386,7 @@ MODULE_DEVICE_TABLE(of, rpm_of_match); * @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator * resources defined for the top level PMIC device * - * Return: 0 on success, errno on failure + * Return: 0 on success, or a negative error number on failure */ static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev, struct device_node *node, @@ -1435,7 +1435,6 @@ static int rpm_reg_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct rpm_regulator_data *vreg_data; - struct device_node *node; struct qcom_rpm_reg *vreg; struct qcom_smd_rpm *rpm; int ret; @@ -1455,18 +1454,14 @@ static int rpm_reg_probe(struct platform_device *pdev) if (!vreg_data) return -ENODEV; - for_each_available_child_of_node(dev->of_node, node) { + for_each_available_child_of_node_scoped(dev->of_node, node) { vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) { - of_node_put(node); + if (!vreg) return -ENOMEM; - } ret = rpm_regulator_init_vreg(vreg, dev, node, vreg_data); - if (ret < 0) { - of_node_put(node); + if (ret < 0) return ret; - } } return 0; diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c index 9a9fa20dcd952c..d66a0f61637e17 100644 --- a/drivers/regulator/qcom_spmi-regulator.c +++ b/drivers/regulator/qcom_spmi-regulator.c @@ -245,7 +245,7 @@ enum spmi_saw3_registers { SAW3_VERSION = 0xFD0, }; -/* Used for indexing into ctrl_reg. These are offets from 0x40 */ +/* Used for indexing into ctrl_reg. These are offsets from 0x40 */ enum spmi_common_control_register_index { SPMI_COMMON_IDX_VOLTAGE_RANGE = 0, SPMI_COMMON_IDX_VOLTAGE_SET = 1, @@ -2528,8 +2528,8 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev) if (!reg) return -ENODEV; - if (of_find_property(node, "qcom,saw-reg", &lenp)) { - syscon = of_parse_phandle(node, "qcom,saw-reg", 0); + syscon = of_parse_phandle(node, "qcom,saw-reg", 0); + if (syscon) { saw_regmap = syscon_node_to_regmap(syscon); of_node_put(syscon); if (IS_ERR(saw_regmap)) @@ -2577,15 +2577,13 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev) if (saw_regmap) { reg_node = of_get_child_by_name(node, reg->name); - reg_prop = of_find_property(reg_node, "qcom,saw-leader", - &lenp); - of_node_put(reg_node); - if (reg_prop) { + if (of_property_read_bool(reg_node, "qcom,saw-leader")) { spmi_saw_ops = *(vreg->desc.ops); spmi_saw_ops.set_voltage_sel = spmi_regulator_saw_set_voltage; vreg->desc.ops = &spmi_saw_ops; } + of_node_put(reg_node); } if (vreg->set_points && vreg->set_points->count == 1) { diff --git a/drivers/regulator/rt5120-regulator.c b/drivers/regulator/rt5120-regulator.c index a388ac70865fd8..f0d3efd160d45f 100644 --- a/drivers/regulator/rt5120-regulator.c +++ b/drivers/regulator/rt5120-regulator.c @@ -245,8 +245,8 @@ static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid) desc->n_voltages = RT5120_BUCK1_NUM_VOLT; desc->min_uV = RT5120_BUCK1_MINUV; desc->uV_step = RT5120_BUCK1_STEPUV; - desc->vsel_reg = RT5120_REG_CH1VID, - desc->vsel_mask = RT5120_CH1VID_MASK, + desc->vsel_reg = RT5120_REG_CH1VID; + desc->vsel_mask = RT5120_CH1VID_MASK; desc->ops = &rt5120_buck1_ops; break; case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4: diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 570b61420f3acb..7dcf92af8f15e6 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -4,6 +4,7 @@ // http://www.samsung.com #include +#include #include #include #include @@ -1120,7 +1121,6 @@ static const struct regulator_desc s2mpu02_regulators[] = { static int s2mps11_pmic_probe(struct platform_device *pdev) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct of_regulator_match *rdata = NULL; struct regulator_config config = { }; struct s2mps11_info *s2mps11; unsigned int rdev_num = 0; @@ -1170,7 +1170,8 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) if (!s2mps11->ext_control_gpiod) return -ENOMEM; - rdata = kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL); + struct of_regulator_match *rdata __free(kfree) = + kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL); if (!rdata) return -ENOMEM; @@ -1179,7 +1180,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, rdev_num); if (ret) - goto out; + return ret; platform_set_drvdata(pdev, s2mps11); @@ -1201,10 +1202,9 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) regulator = devm_regulator_register(&pdev->dev, ®ulators[i], &config); if (IS_ERR(regulator)) { - ret = PTR_ERR(regulator); dev_err(&pdev->dev, "regulator init failed for %d\n", i); - goto out; + return PTR_ERR(regulator); } if (config.ena_gpiod) { @@ -1214,15 +1214,12 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to enable GPIO control over %s: %d\n", regulator->desc->name, ret); - goto out; + return ret; } } } -out: - kfree(rdata); - - return ret; + return 0; } static const struct platform_device_id s2mps11_pmic_id[] = { diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index bfc0e143bf4023..d25cd81e3f3607 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -3,6 +3,7 @@ // Copyright (c) 2011 Samsung Electronics Co., Ltd // http://www.samsung.com +#include #include #include #include @@ -521,7 +522,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, struct sec_platform_data *pdata) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct device_node *pmic_np, *regulators_np, *reg_np; + struct device_node *pmic_np, *reg_np; struct sec_regulator_data *rdata; struct sec_opmode_data *rmode; unsigned int i, dvs_voltage_nr = 8, ret; @@ -532,7 +533,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, return -ENODEV; } - regulators_np = of_get_child_by_name(pmic_np, "regulators"); + struct device_node *regulators_np __free(device_node) = of_get_child_by_name(pmic_np, + "regulators"); if (!regulators_np) { dev_err(iodev->dev, "could not find regulators sub-node\n"); return -EINVAL; @@ -544,18 +546,14 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, rdata = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rdata), GFP_KERNEL); - if (!rdata) { - of_node_put(regulators_np); + if (!rdata) return -ENOMEM; - } rmode = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rmode), GFP_KERNEL); - if (!rmode) { - of_node_put(regulators_np); + if (!rmode) return -ENOMEM; - } pdata->regulators = rdata; pdata->opmode = rmode; @@ -581,7 +579,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, rdata->ext_control_gpiod = NULL; } else if (IS_ERR(rdata->ext_control_gpiod)) { of_node_put(reg_np); - of_node_put(regulators_np); return PTR_ERR(rdata->ext_control_gpiod); } @@ -603,8 +600,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, rmode++; } - of_node_put(regulators_np); - if (of_property_read_bool(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs")) { pdata->buck2_gpiodvs = true; diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c index 29ab217297d6dd..9df726f10ad121 100644 --- a/drivers/regulator/scmi-regulator.c +++ b/drivers/regulator/scmi-regulator.c @@ -297,7 +297,7 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev, static int scmi_regulator_probe(struct scmi_device *sdev) { int d, ret, num_doms; - struct device_node *np, *child; + struct device_node *np; const struct scmi_handle *handle = sdev->handle; struct scmi_regulator_info *rinfo; struct scmi_protocol_handle *ph; @@ -341,13 +341,11 @@ static int scmi_regulator_probe(struct scmi_device *sdev) */ of_node_get(handle->dev->of_node); np = of_find_node_by_name(handle->dev->of_node, "regulators"); - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo); /* abort on any mem issue */ - if (ret == -ENOMEM) { - of_node_put(child); + if (ret == -ENOMEM) return ret; - } } of_node_put(np); /* diff --git a/drivers/regulator/sm5703-regulator.c b/drivers/regulator/sm5703-regulator.c deleted file mode 100644 index 702461cf075e46..00000000000000 --- a/drivers/regulator/sm5703-regulator.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - -#include -#include -#include -#include -#include -#include -#include - -enum sm5703_regulators { - SM5703_BUCK, - SM5703_LDO1, - SM5703_LDO2, - SM5703_LDO3, - SM5703_USBLDO1, - SM5703_USBLDO2, - SM5703_VBUS, - SM5703_MAX_REGULATORS, -}; - -static const int sm5703_ldo_voltagemap[] = { - 1500000, 1800000, 2600000, 2800000, 3000000, 3300000, -}; - -static const int sm5703_buck_voltagemap[] = { - 1000000, 1000000, 1000000, 1000000, - 1000000, 1000000, 1000000, 1000000, - 1000000, 1000000, 1000000, 1100000, - 1200000, 1300000, 1400000, 1500000, - 1600000, 1700000, 1800000, 1900000, - 2000000, 2100000, 2200000, 2300000, - 2400000, 2500000, 2600000, 2700000, - 2800000, 2900000, 3000000, 3000000, -}; - -#define SM5703USBLDO(_name, _id) \ - [SM5703_USBLDO ## _id] = { \ - .name = _name, \ - .of_match = _name, \ - .regulators_node = "regulators", \ - .type = REGULATOR_VOLTAGE, \ - .id = SM5703_USBLDO ## _id, \ - .ops = &sm5703_regulator_ops_fixed, \ - .n_voltages = 1, \ - .fixed_uV = SM5703_USBLDO_MICROVOLT, \ - .enable_reg = SM5703_REG_USBLDO12, \ - .enable_mask = SM5703_REG_EN_USBLDO ##_id, \ - .owner = THIS_MODULE, \ - } - -#define SM5703VBUS(_name) \ - [SM5703_VBUS] = { \ - .name = _name, \ - .of_match = _name, \ - .regulators_node = "regulators", \ - .type = REGULATOR_VOLTAGE, \ - .id = SM5703_VBUS, \ - .ops = &sm5703_regulator_ops_fixed, \ - .n_voltages = 1, \ - .fixed_uV = SM5703_VBUS_MICROVOLT, \ - .enable_reg = SM5703_REG_CNTL, \ - .enable_mask = SM5703_OPERATION_MODE_MASK, \ - .enable_val = SM5703_OPERATION_MODE_USB_OTG_MODE, \ - .disable_val = SM5703_OPERATION_MODE_CHARGING_ON, \ - .owner = THIS_MODULE, \ - } - -#define SM5703BUCK(_name) \ - [SM5703_BUCK] = { \ - .name = _name, \ - .of_match = _name, \ - .regulators_node = "regulators", \ - .type = REGULATOR_VOLTAGE, \ - .id = SM5703_BUCK, \ - .ops = &sm5703_regulator_ops, \ - .n_voltages = ARRAY_SIZE(sm5703_buck_voltagemap), \ - .volt_table = sm5703_buck_voltagemap, \ - .vsel_reg = SM5703_REG_BUCK, \ - .vsel_mask = SM5703_BUCK_VOLT_MASK, \ - .enable_reg = SM5703_REG_BUCK, \ - .enable_mask = SM5703_REG_EN_BUCK, \ - .owner = THIS_MODULE, \ - } - -#define SM5703LDO(_name, _id) \ - [SM5703_LDO ## _id] = { \ - .name = _name, \ - .of_match = _name, \ - .regulators_node = "regulators", \ - .type = REGULATOR_VOLTAGE, \ - .id = SM5703_LDO ## _id, \ - .ops = &sm5703_regulator_ops, \ - .n_voltages = ARRAY_SIZE(sm5703_ldo_voltagemap), \ - .volt_table = sm5703_ldo_voltagemap, \ - .vsel_reg = SM5703_REG_LDO ##_id, \ - .vsel_mask = SM5703_LDO_VOLT_MASK, \ - .enable_reg = SM5703_REG_LDO ##_id, \ - .enable_mask = SM5703_LDO_EN, \ - .owner = THIS_MODULE, \ - } - -static const struct regulator_ops sm5703_regulator_ops = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .list_voltage = regulator_list_voltage_table, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, -}; - -static const struct regulator_ops sm5703_regulator_ops_fixed = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, -}; - -static struct regulator_desc sm5703_regulators_desc[SM5703_MAX_REGULATORS] = { - SM5703BUCK("buck"), - SM5703LDO("ldo1", 1), - SM5703LDO("ldo2", 2), - SM5703LDO("ldo3", 3), - SM5703USBLDO("usbldo1", 1), - SM5703USBLDO("usbldo2", 2), - SM5703VBUS("vbus"), -}; - -static int sm5703_regulator_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct regulator_config config = { NULL, }; - struct regulator_dev *rdev; - struct sm5703_dev *sm5703 = dev_get_drvdata(pdev->dev.parent); - int i; - - config.dev = dev->parent; - config.regmap = sm5703->regmap; - - for (i = 0; i < SM5703_MAX_REGULATORS; i++) { - rdev = devm_regulator_register(dev, - &sm5703_regulators_desc[i], - &config); - if (IS_ERR(rdev)) - return dev_err_probe(dev, PTR_ERR(rdev), - "Failed to register a regulator\n"); - } - - return 0; -} - -static const struct platform_device_id sm5703_regulator_id[] = { - { "sm5703-regulator", 0 }, - {} -}; -MODULE_DEVICE_TABLE(platform, sm5703_regulator_id); - -static struct platform_driver sm5703_regulator_driver = { - .driver = { - .name = "sm5703-regulator", - .probe_type = PROBE_PREFER_ASYNCHRONOUS, - }, - .probe = sm5703_regulator_probe, - .id_table = sm5703_regulator_id, -}; - -module_platform_driver(sm5703_regulator_driver); - -MODULE_DESCRIPTION("Silicon Mitus SM5703 LDO/Buck/USB regulator driver"); -MODULE_AUTHOR("Markuss Broks "); -MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c index 7a0551f0c8c0aa..97f5ce138548ed 100644 --- a/drivers/regulator/tps6287x-regulator.c +++ b/drivers/regulator/tps6287x-regulator.c @@ -103,7 +103,7 @@ static const struct regulator_ops tps6287x_regulator_ops = { .set_ramp_delay = regulator_set_ramp_delay_regmap, }; -static struct regulator_desc tps6287x_reg = { +static const struct regulator_desc tps6287x_reg = { .name = "tps6287x", .owner = THIS_MODULE, .ops = &tps6287x_regulator_ops, diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index d5757fd9a65bb1..3334b5b7d907f1 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -242,17 +242,17 @@ static const struct regulator_desc tps65023_regulators[] = { TPS65023_REGULATOR_LDO(2, TPS65023_LDO2_VSEL_table, 0x70), }; -static struct tps_driver_data tps65020_drv_data = { +static const struct tps_driver_data tps65020_drv_data = { .desc = tps65020_regulators, .core_regulator = TPS65023_DCDC_3, }; -static struct tps_driver_data tps65021_drv_data = { +static const struct tps_driver_data tps65021_drv_data = { .desc = tps65021_regulators, .core_regulator = TPS65023_DCDC_3, }; -static struct tps_driver_data tps65023_drv_data = { +static const struct tps_driver_data tps65023_drv_data = { .desc = tps65023_regulators, .core_regulator = TPS65023_DCDC_1, }; diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c index ed5e191e8896b0..43f220cea21c13 100644 --- a/drivers/regulator/wm831x-isink.c +++ b/drivers/regulator/wm831x-isink.c @@ -146,10 +146,10 @@ static int wm831x_isink_probe(struct platform_device *pdev) isink->desc.ops = &wm831x_isink_ops; isink->desc.type = REGULATOR_CURRENT; isink->desc.owner = THIS_MODULE; - isink->desc.curr_table = wm831x_isinkv_values, - isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values), - isink->desc.csel_reg = isink->reg, - isink->desc.csel_mask = WM831X_CS1_ISEL_MASK, + isink->desc.curr_table = wm831x_isinkv_values; + isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values); + isink->desc.csel_reg = isink->reg; + isink->desc.csel_mask = WM831X_CS1_ISEL_MASK; config.dev = pdev->dev.parent; config.init_data = pdata->isink[id]; diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c index c4a229f66dec90..fb3ca7956d00c6 100644 --- a/drivers/regulator/wm8400-regulator.c +++ b/drivers/regulator/wm8400-regulator.c @@ -112,7 +112,7 @@ static const struct regulator_ops wm8400_dcdc_ops = { .get_optimum_mode = wm8400_dcdc_get_optimum_mode, }; -static struct regulator_desc regulators[] = { +static const struct regulator_desc regulators[] = { { .name = "LDO1", .id = WM8400_LDO1, diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index dda2ada215b7ce..955e4e38477e6f 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -330,8 +330,7 @@ config STM32_RPROC config TI_K3_DSP_REMOTEPROC tristate "TI K3 DSP remoteproc support" depends on ARCH_K3 - select MAILBOX - select OMAP2PLUS_MBOX + depends on OMAP2PLUS_MBOX help Say m here to support TI's C66x and C71x DSP remote processor subsystems on various TI K3 family of SoCs through the remote @@ -340,11 +339,23 @@ config TI_K3_DSP_REMOTEPROC It's safe to say N here if you're not interested in utilizing the DSP slave processors. +config TI_K3_M4_REMOTEPROC + tristate "TI K3 M4 remoteproc support" + depends on ARCH_OMAP2PLUS || ARCH_K3 + select MAILBOX + select OMAP2PLUS_MBOX + help + Say m here to support TI's M4 remote processor subsystems + on various TI K3 family of SoCs through the remote processor + framework. + + It's safe to say N here if you're not interested in utilizing + a remote processor. + config TI_K3_R5_REMOTEPROC tristate "TI K3 R5 remoteproc support" depends on ARCH_K3 - select MAILBOX - select OMAP2PLUS_MBOX + depends on OMAP2PLUS_MBOX help Say m here to support TI's R5F remote processor subsystems on various TI K3 family of SoCs through the remote processor diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 91314a9b43cefc..5ff4e2fee4abd3 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -37,5 +37,6 @@ obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o +obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c index 9041a0e07fb257..8770d0cf1255f8 100644 --- a/drivers/remoteproc/da8xx_remoteproc.c +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -239,8 +239,6 @@ static int da8xx_rproc_probe(struct platform_device *pdev) struct da8xx_rproc *drproc; struct rproc *rproc; struct irq_data *irq_data; - struct resource *bootreg_res; - struct resource *chipsig_res; struct clk *dsp_clk; struct reset_control *dsp_reset; void __iomem *chipsig; @@ -258,15 +256,11 @@ static int da8xx_rproc_probe(struct platform_device *pdev) return -EINVAL; } - bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "host1cfg"); - bootreg = devm_ioremap_resource(dev, bootreg_res); + bootreg = devm_platform_ioremap_resource_byname(pdev, "host1cfg"); if (IS_ERR(bootreg)) return PTR_ERR(bootreg); - chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "chipsig"); - chipsig = devm_ioremap_resource(dev, chipsig_res); + chipsig = devm_platform_ioremap_resource_byname(pdev, "chipsig"); if (IS_ERR(chipsig)) return PTR_ERR(chipsig); diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c index 087506e2150805..376187ad5754cd 100644 --- a/drivers/remoteproc/imx_dsp_rproc.c +++ b/drivers/remoteproc/imx_dsp_rproc.c @@ -509,7 +509,7 @@ static int imx_dsp_rproc_mbox_alloc(struct imx_dsp_rproc *priv) struct mbox_client *cl; int ret; - if (!of_get_property(dev->of_node, "mbox-names", NULL)) + if (!of_property_present(dev->of_node, "mbox-names")) return 0; cl = &priv->cl; diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 144c8e9a642e8b..800015ff7ff923 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -90,7 +91,7 @@ struct imx_rproc_mem { #define ATT_CORE_MASK 0xffff #define ATT_CORE(I) BIT((I)) -static int imx_rproc_xtr_mbox_init(struct rproc *rproc); +static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block); static void imx_rproc_free_mbox(struct rproc *rproc); struct imx_rproc { @@ -119,20 +120,16 @@ struct imx_rproc { static const struct imx_rproc_att imx_rproc_att_imx93[] = { /* dev addr , sys addr , size , flags */ /* TCM CODE NON-SECURE */ - { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM }, - { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x0FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM }, /* TCM CODE SECURE */ - { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM }, - { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x1FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM }, /* TCM SYS NON-SECURE*/ - { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM }, - { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x20000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM }, /* TCM SYS SECURE*/ - { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM }, - { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x30000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM }, /* DDR */ { 0x80000000, 0x80000000, 0x10000000, 0 }, @@ -210,11 +207,9 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = { /* QSPI Code - alias */ { 0x08000000, 0x08000000, 0x08000000, 0 }, /* DDR (Code) - alias */ - { 0x10000000, 0x80000000, 0x0FFE0000, 0 }, - /* TCML */ - { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM}, - /* TCMU */ - { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM}, + { 0x10000000, 0x40000000, 0x0FFE0000, 0 }, + /* TCML/U */ + { 0x1FFE0000, 0x007E0000, 0x00040000, ATT_OWN | ATT_IOMEM}, /* OCRAM_S */ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN }, /* OCRAM */ @@ -339,6 +334,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = { .att = imx_rproc_att_imx7ulp, .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp), .method = IMX_RPROC_NONE, + .flags = IMX_RPROC_NEED_SYSTEM_OFF, }; static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = { @@ -375,7 +371,7 @@ static int imx_rproc_start(struct rproc *rproc) struct arm_smccc_res res; int ret; - ret = imx_rproc_xtr_mbox_init(rproc); + ret = imx_rproc_xtr_mbox_init(rproc, true); if (ret) return ret; @@ -635,7 +631,7 @@ static void imx_rproc_kick(struct rproc *rproc, int vqid) static int imx_rproc_attach(struct rproc *rproc) { - return imx_rproc_xtr_mbox_init(rproc); + return imx_rproc_xtr_mbox_init(rproc, true); } static int imx_rproc_detach(struct rproc *rproc) @@ -666,6 +662,17 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc return (struct resource_table *)priv->rsc_table; } +static struct resource_table * +imx_rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw) +{ + struct imx_rproc *priv = rproc->priv; + + if (priv->rsc_table) + return (struct resource_table *)priv->rsc_table; + + return rproc_elf_find_loaded_rsc_table(rproc, fw); +} + static const struct rproc_ops imx_rproc_ops = { .prepare = imx_rproc_prepare, .attach = imx_rproc_attach, @@ -676,7 +683,7 @@ static const struct rproc_ops imx_rproc_ops = { .da_to_va = imx_rproc_da_to_va, .load = rproc_elf_load_segments, .parse_fw = imx_rproc_parse_fw, - .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .find_loaded_rsc_table = imx_rproc_elf_find_loaded_rsc_table, .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table, .sanity_check = rproc_elf_sanity_check, .get_boot_addr = rproc_elf_get_boot_addr, @@ -789,7 +796,7 @@ static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg) queue_work(priv->workqueue, &priv->rproc_work); } -static int imx_rproc_xtr_mbox_init(struct rproc *rproc) +static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block) { struct imx_rproc *priv = rproc->priv; struct device *dev = priv->dev; @@ -807,12 +814,12 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc) if (priv->tx_ch && priv->rx_ch) return 0; - if (!of_get_property(dev->of_node, "mbox-names", NULL)) + if (!of_property_present(dev->of_node, "mbox-names")) return 0; cl = &priv->cl; cl->dev = dev; - cl->tx_block = true; + cl->tx_block = tx_block; cl->tx_tout = 100; cl->knows_txdone = false; cl->rx_callback = imx_rproc_rx_callback; @@ -1045,6 +1052,22 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv) return 0; } +static int imx_rproc_sys_off_handler(struct sys_off_data *data) +{ + struct rproc *rproc = data->cb_data; + int ret; + + imx_rproc_free_mbox(rproc); + + ret = imx_rproc_xtr_mbox_init(rproc, false); + if (ret) { + dev_err(&rproc->dev, "Failed to request non-blocking mbox\n"); + return NOTIFY_BAD; + } + + return NOTIFY_DONE; +} + static int imx_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1076,7 +1099,9 @@ static int imx_rproc_probe(struct platform_device *pdev) return -ENOMEM; } - ret = imx_rproc_xtr_mbox_init(rproc); + INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); + + ret = imx_rproc_xtr_mbox_init(rproc, true); if (ret) goto err_put_wkq; @@ -1094,11 +1119,33 @@ static int imx_rproc_probe(struct platform_device *pdev) if (ret) goto err_put_scu; - INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); - if (rproc->state != RPROC_DETACHED) rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot"); + if (dcfg->flags & IMX_RPROC_NEED_SYSTEM_OFF) { + /* + * setup mailbox to non-blocking mode in + * [SYS_OFF_MODE_POWER_OFF_PREPARE, SYS_OFF_MODE_RESTART_PREPARE] + * phase before invoking [SYS_OFF_MODE_POWER_OFF, SYS_OFF_MODE_RESTART] + * atomic chain, see kernel/reboot.c. + */ + ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE, + SYS_OFF_PRIO_DEFAULT, + imx_rproc_sys_off_handler, rproc); + if (ret) { + dev_err(dev, "register power off handler failure\n"); + goto err_put_clk; + } + + ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE, + SYS_OFF_PRIO_DEFAULT, + imx_rproc_sys_off_handler, rproc); + if (ret) { + dev_err(dev, "register restart handler failure\n"); + goto err_put_clk; + } + } + ret = rproc_add(rproc); if (ret) { dev_err(dev, "rproc_add failed\n"); diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h index 79a1b8956d1421..17a7d051c53102 100644 --- a/drivers/remoteproc/imx_rproc.h +++ b/drivers/remoteproc/imx_rproc.h @@ -26,6 +26,9 @@ enum imx_rproc_method { IMX_RPROC_SCU_API, }; +/* dcfg flags */ +#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0) + struct imx_rproc_dcfg { u32 src_reg; u32 src_mask; @@ -36,6 +39,7 @@ struct imx_rproc_dcfg { const struct imx_rproc_att *att; size_t att_size; enum imx_rproc_method method; + u32 flags; }; #endif /* _IMX_RPROC_H */ diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c index 9902cce28692c5..1b78d8ddeacfd5 100644 --- a/drivers/remoteproc/ingenic_rproc.c +++ b/drivers/remoteproc/ingenic_rproc.c @@ -183,8 +183,7 @@ static int ingenic_rproc_probe(struct platform_device *pdev) vpu->dev = &pdev->dev; platform_set_drvdata(pdev, vpu); - mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux"); - vpu->aux_base = devm_ioremap_resource(dev, mem); + vpu->aux_base = devm_platform_ioremap_resource_byname(pdev, "aux"); if (IS_ERR(vpu->aux_base)) { dev_err(dev, "Failed to ioremap\n"); return PTR_ERR(vpu->aux_base); diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index 7e57b90bcaf85c..8f0f7a4cfef262 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -366,8 +366,6 @@ static int keystone_rproc_probe(struct platform_device *pdev) struct rproc *rproc; int dsp_id; char *fw_name = NULL; - char *template = "keystone-dsp%d-fw"; - int name_len = 0; int ret = 0; if (!np) { @@ -382,14 +380,12 @@ static int keystone_rproc_probe(struct platform_device *pdev) } /* construct a custom default fw name - subject to change in future */ - name_len = strlen(template); /* assuming a single digit alias */ - fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL); + fw_name = devm_kasprintf(dev, GFP_KERNEL, "keystone-dsp%d-fw", dsp_id); if (!fw_name) return -ENOMEM; - snprintf(fw_name, name_len, template, dsp_id); - rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name, - sizeof(*ksproc)); + rproc = devm_rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, + fw_name, sizeof(*ksproc)); if (!rproc) return -ENOMEM; @@ -400,13 +396,11 @@ static int keystone_rproc_probe(struct platform_device *pdev) ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc); if (ret) - goto free_rproc; + return ret; ksproc->reset = devm_reset_control_get_exclusive(dev, NULL); - if (IS_ERR(ksproc->reset)) { - ret = PTR_ERR(ksproc->reset); - goto free_rproc; - } + if (IS_ERR(ksproc->reset)) + return PTR_ERR(ksproc->reset); /* enable clock for accessing DSP internal memories */ pm_runtime_enable(dev); @@ -471,8 +465,6 @@ static int keystone_rproc_probe(struct platform_device *pdev) pm_runtime_put_sync(dev); disable_rpm: pm_runtime_disable(dev); -free_rproc: - rproc_free(rproc); return ret; } @@ -484,7 +476,6 @@ static void keystone_rproc_remove(struct platform_device *pdev) gpiod_put(ksproc->kick_gpio); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - rproc_free(ksproc->rproc); of_reserved_mem_device_release(&pdev->dev); } diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 88e7b84f223c02..ef82835e98a4ef 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -829,6 +829,23 @@ static const struct adsp_data adsp_resource_init = { .ssctl_id = 0x14, }; +static const struct adsp_data sa8775p_adsp_resource = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mbn", + .pas_id = 1, + .minidump_id = 5, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "lcx", + "lmx", + NULL + }, + .load_state = "adsp", + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + static const struct adsp_data sdm845_adsp_resource_init = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", @@ -942,6 +959,42 @@ static const struct adsp_data cdsp_resource_init = { .ssctl_id = 0x17, }; +static const struct adsp_data sa8775p_cdsp0_resource = { + .crash_reason_smem = 601, + .firmware_name = "cdsp0.mbn", + .pas_id = 18, + .minidump_id = 7, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mxc", + "nsp", + NULL + }, + .load_state = "cdsp", + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + +static const struct adsp_data sa8775p_cdsp1_resource = { + .crash_reason_smem = 633, + .firmware_name = "cdsp1.mbn", + .pas_id = 30, + .minidump_id = 20, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mxc", + "nsp", + NULL + }, + .load_state = "nsp", + .ssr_name = "cdsp1", + .sysmon_name = "cdsp1", + .ssctl_id = 0x20, +}; + static const struct adsp_data sdm845_cdsp_resource_init = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", @@ -1083,6 +1136,40 @@ static const struct adsp_data sm8350_cdsp_resource = { .ssctl_id = 0x17, }; +static const struct adsp_data sa8775p_gpdsp0_resource = { + .crash_reason_smem = 640, + .firmware_name = "gpdsp0.mbn", + .pas_id = 39, + .minidump_id = 21, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mxc", + NULL + }, + .load_state = "gpdsp0", + .ssr_name = "gpdsp0", + .sysmon_name = "gpdsp0", + .ssctl_id = 0x21, +}; + +static const struct adsp_data sa8775p_gpdsp1_resource = { + .crash_reason_smem = 641, + .firmware_name = "gpdsp1.mbn", + .pas_id = 40, + .minidump_id = 22, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mxc", + NULL + }, + .load_state = "gpdsp1", + .ssr_name = "gpdsp1", + .sysmon_name = "gpdsp1", + .ssctl_id = 0x22, +}; + static const struct adsp_data mpss_resource_init = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", @@ -1329,6 +1416,11 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init }, { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init }, { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init }, + { .compatible = "qcom,sa8775p-adsp-pas", .data = &sa8775p_adsp_resource}, + { .compatible = "qcom,sa8775p-cdsp0-pas", .data = &sa8775p_cdsp0_resource}, + { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource}, + { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource}, + { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource}, { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource}, { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init}, { .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource}, @@ -1346,6 +1438,7 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init}, { .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init}, { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource}, + { .compatible = "qcom,sdx75-mpss-pas", .data = &sm8650_mpss_resource}, { .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init}, { .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init}, { .compatible = "qcom,sm6115-mpss-pas", .data = &sc8180x_mpss_resource}, diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index d17719384c1653..5412beb0a69206 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -259,16 +259,14 @@ struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, slim_rproc->mem[i].size = resource_size(res); } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore"); - slim_rproc->slimcore = devm_ioremap_resource(dev, res); + slim_rproc->slimcore = devm_platform_ioremap_resource_byname(pdev, "slimcore"); if (IS_ERR(slim_rproc->slimcore)) { dev_err(&pdev->dev, "failed to ioremap slimcore IO\n"); err = PTR_ERR(slim_rproc->slimcore); goto err; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals"); - slim_rproc->peri = devm_ioremap_resource(dev, res); + slim_rproc->peri = devm_platform_ioremap_resource_byname(pdev, "peripherals"); if (IS_ERR(slim_rproc->peri)) { dev_err(&pdev->dev, "failed to ioremap peripherals IO\n"); err = PTR_ERR(slim_rproc->peri); diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index a22d41689a7d23..8be3f631c19206 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -115,6 +115,10 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data) const char *name = kproc->rproc->name; u32 msg = omap_mbox_message(data); + /* Do not forward messages from a detached core */ + if (kproc->rproc->state == RPROC_DETACHED) + return; + dev_dbg(dev, "mbox msg: 0x%x\n", msg); switch (msg) { @@ -155,6 +159,10 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid) mbox_msg_t msg = (mbox_msg_t)vqid; int ret; + /* Do not forward messages to a detached core */ + if (kproc->rproc->state == RPROC_DETACHED) + return; + /* send the index of the triggered virtqueue in the mailbox payload */ ret = mbox_send_message(kproc->mbox, (void *)msg); if (ret < 0) @@ -230,12 +238,9 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc) client->knows_txdone = false; kproc->mbox = mbox_request_channel(client, 0); - if (IS_ERR(kproc->mbox)) { - ret = -EBUSY; - dev_err(dev, "mbox_request_channel failed: %ld\n", - PTR_ERR(kproc->mbox)); - return ret; - } + if (IS_ERR(kproc->mbox)) + return dev_err_probe(dev, PTR_ERR(kproc->mbox), + "mbox_request_channel failed\n"); /* * Ping the remote processor, this is only for sanity-sake for now; @@ -315,32 +320,23 @@ static int k3_dsp_rproc_start(struct rproc *rproc) u32 boot_addr; int ret; - ret = k3_dsp_rproc_request_mbox(rproc); - if (ret) - return ret; - boot_addr = rproc->bootaddr; if (boot_addr & (kproc->data->boot_align_addr - 1)) { dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n", boot_addr, kproc->data->boot_align_addr); - ret = -EINVAL; - goto put_mbox; + return -EINVAL; } dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr); ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0); if (ret) - goto put_mbox; + return ret; ret = k3_dsp_rproc_release(kproc); if (ret) - goto put_mbox; + return ret; return 0; - -put_mbox: - mbox_free_channel(kproc->mbox); - return ret; } /* @@ -353,8 +349,6 @@ static int k3_dsp_rproc_stop(struct rproc *rproc) { struct k3_dsp_rproc *kproc = rproc->priv; - mbox_free_channel(kproc->mbox); - k3_dsp_rproc_reset(kproc); return 0; @@ -363,42 +357,22 @@ static int k3_dsp_rproc_stop(struct rproc *rproc) /* * Attach to a running DSP remote processor (IPC-only mode) * - * This rproc attach callback only needs to request the mailbox, the remote - * processor is already booted, so there is no need to issue any TI-SCI - * commands to boot the DSP core. This callback is invoked only in IPC-only - * mode. + * This rproc attach callback is a NOP. The remote processor is already booted, + * and all required resources have been acquired during probe routine, so there + * is no need to issue any TI-SCI commands to boot the DSP core. This callback + * is invoked only in IPC-only mode and exists because rproc_validate() checks + * for its existence. */ -static int k3_dsp_rproc_attach(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = k3_dsp_rproc_request_mbox(rproc); - if (ret) - return ret; - - dev_info(dev, "DSP initialized in IPC-only mode\n"); - return 0; -} +static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; } /* * Detach from a running DSP remote processor (IPC-only mode) * - * This rproc detach callback performs the opposite operation to attach callback - * and only needs to release the mailbox, the DSP core is not stopped and will - * be left to continue to run its booted firmware. This callback is invoked only - * in IPC-only mode. + * This rproc detach callback is a NOP. The DSP core is not stopped and will be + * left to continue to run its booted firmware. This callback is invoked only in + * IPC-only mode and exists for sanity sake. */ -static int k3_dsp_rproc_detach(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - - mbox_free_channel(kproc->mbox); - dev_info(dev, "DSP deinitialized in IPC-only mode\n"); - return 0; -} +static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; } /* * This function implements the .get_loaded_rsc_table() callback and is used @@ -636,32 +610,6 @@ static void k3_dsp_release_tsp(void *data) ti_sci_proc_release(tsp); } -static -struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev, - const struct ti_sci_handle *sci) -{ - struct ti_sci_proc *tsp; - u32 temp[2]; - int ret; - - ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids", - temp, 2); - if (ret < 0) - return ERR_PTR(ret); - - tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL); - if (!tsp) - return ERR_PTR(-ENOMEM); - - tsp->dev = dev; - tsp->sci = sci; - tsp->ops = &sci->ops.proc_ops; - tsp->proc_id = temp[0]; - tsp->host_id = temp[1]; - - return tsp; -} - static int k3_dsp_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -697,6 +645,10 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) kproc->dev = dev; kproc->data = data; + ret = k3_dsp_rproc_request_mbox(rproc); + if (ret) + return ret; + kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); if (IS_ERR(kproc->ti_sci)) return dev_err_probe(dev, PTR_ERR(kproc->ti_sci), @@ -711,7 +663,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n"); - kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci); + kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci); if (IS_ERR(kproc->tsp)) return dev_err_probe(dev, PTR_ERR(kproc->tsp), "failed to construct ti-sci proc control\n"); @@ -789,6 +741,8 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev) if (ret) dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret)); } + + mbox_free_channel(kproc->mbox); } static const struct k3_dsp_mem_data c66_mems[] = { diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c new file mode 100644 index 00000000000000..09f0484a90e103 --- /dev/null +++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI K3 Cortex-M4 Remote Processor(s) driver + * + * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/ + * Hari Nagalla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" +#include "ti_sci_proc.h" + +#define K3_M4_IRAM_DEV_ADDR 0x00000 +#define K3_M4_DRAM_DEV_ADDR 0x30000 + +/** + * struct k3_m4_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from remote processor view + * @size: Size of the memory region + */ +struct k3_m4_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct k3_m4_rproc_mem_data - memory definitions for a remote processor + * @name: name for this memory entry + * @dev_addr: device address for the memory entry + */ +struct k3_m4_rproc_mem_data { + const char *name; + const u32 dev_addr; +}; + +/** + * struct k3_m4_rproc - k3 remote processor driver structure + * @dev: cached device pointer + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @rmem: reserved memory regions data + * @num_rmems: number of reserved memory regions + * @reset: reset control handle + * @tsp: TI-SCI processor control handle + * @ti_sci: TI-SCI handle + * @ti_sci_id: TI-SCI device identifier + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + */ +struct k3_m4_rproc { + struct device *dev; + struct k3_m4_rproc_mem *mem; + int num_mems; + struct k3_m4_rproc_mem *rmem; + int num_rmems; + struct reset_control *reset; + struct ti_sci_proc *tsp; + const struct ti_sci_handle *ti_sci; + u32 ti_sci_id; + struct mbox_chan *mbox; + struct mbox_client client; +}; + +/** + * k3_m4_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by the K3 mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicate different events. Those values are deliberately very + * large so they don't coincide with virtqueue indices. + */ +static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct device *dev = client->dev; + struct rproc *rproc = dev_get_drvdata(dev); + u32 msg = (u32)(uintptr_t)(data); + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, but error recovery is not + * supported. So, just log this for now + */ + dev_err(dev, "K3 rproc %s crashed\n", rproc->name); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", rproc->name); + break; + default: + /* silently handle all other valid messages */ + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} + +/* + * Kick the remote processor to notify about pending unprocessed messages. + * The vqid usage is not used and is inconsequential, as the kick is performed + * through a simulated GPIO (a bit in an IPC interrupt-triggering register), + * the remote processor is expected to process both its Tx and Rx virtqueues. + */ +static void k3_m4_rproc_kick(struct rproc *rproc, int vqid) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + u32 msg = (u32)vqid; + int ret; + + /* + * Send the index of the triggered virtqueue in the mailbox payload. + * NOTE: msg is cast to uintptr_t to prevent compiler warnings when + * void* is 64bit. It is safely cast back to u32 in the mailbox driver. + */ + ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); +} + +static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + /* + * Ping the remote processor, this is only for sanity-sake for now; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed: %d\n", ret); + return ret; + } + + return 0; +} + +/* + * The M4 cores have a local reset that affects only the CPU, and a + * generic module reset that powers on the device and allows the internal + * memories to be accessed while the local reset is asserted. This function is + * used to release the global reset on remote cores to allow loading into the + * internal RAMs. The .prepare() ops is invoked by remoteproc core before any + * firmware loading, and is followed by the .start() ops after loading to + * actually let the remote cores to run. + */ +static int k3_m4_rproc_prepare(struct rproc *rproc) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + /* If the core is running already no need to deassert the module reset */ + if (rproc->state == RPROC_DETACHED) + return 0; + + /* + * Ensure the local reset is asserted so the core doesn't + * execute bogus code when the module reset is released. + */ + ret = reset_control_assert(kproc->reset); + if (ret) { + dev_err(dev, "could not assert local reset\n"); + return ret; + } + + ret = reset_control_status(kproc->reset); + if (ret <= 0) { + dev_err(dev, "local reset still not asserted\n"); + return ret; + } + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "could not deassert module-reset for internal RAM loading\n"); + return ret; + } + + return 0; +} + +/* + * This function implements the .unprepare() ops and performs the complimentary + * operations to that of the .prepare() ops. The function is used to assert the + * global reset on applicable cores. This completes the second portion of + * powering down the remote core. The cores themselves are only halted in the + * .stop() callback through the local reset, and the .unprepare() ops is invoked + * by the remoteproc core after the remoteproc is stopped to balance the global + * reset. + */ +static int k3_m4_rproc_unprepare(struct rproc *rproc) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + /* If the core is going to be detached do not assert the module reset */ + if (rproc->state == RPROC_ATTACHED) + return 0; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset assert failed\n"); + return ret; + } + + return 0; +} + +/* + * This function implements the .get_loaded_rsc_table() callback and is used + * to provide the resource table for a booted remote processor in IPC-only + * mode. The remote processor firmwares follow a design-by-contract approach + * and are expected to have the resource table at the base of the DDR region + * reserved for firmware usage. This provides flexibility for the remote + * processor to be booted by different bootloaders that may or may not have the + * ability to publish the resource table address and size through a DT + * property. + */ +static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc, + size_t *rsc_table_sz) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + + if (!kproc->rmem[0].cpu_addr) { + dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); + return ERR_PTR(-ENOMEM); + } + + /* + * NOTE: The resource table size is currently hard-coded to a maximum + * of 256 bytes. The most common resource table usage for K3 firmwares + * is to only have the vdev resource entry and an optional trace entry. + * The exact size could be computed based on resource table address, but + * the hard-coded value suffices to support the IPC-only mode. + */ + *rsc_table_sz = 256; + return (__force struct resource_table *)kproc->rmem[0].cpu_addr; +} + +/* + * Custom function to translate a remote processor device address (internal + * RAMs only) to a kernel virtual address. The remote processors can access + * their RAMs at either an internal address visible only from a remote + * processor, or at the SoC-level bus address. Both these addresses need to be + * looked through for translation. The translated addresses can be used either + * by the remoteproc core for loading (when using kernel remoteproc loader), or + * by any rpmsg bus drivers. + */ +static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) +{ + struct k3_m4_rproc *kproc = rproc->priv; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + for (i = 0; i < kproc->num_mems; i++) { + bus_addr = kproc->mem[i].bus_addr; + dev_addr = kproc->mem[i].dev_addr; + size = kproc->mem[i].size; + + /* handle M4-view addresses */ + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + + /* handle SoC-view addresses */ + if (da >= bus_addr && ((da + len) <= (bus_addr + size))) { + offset = da - bus_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + /* handle static DDR reserved memory regions */ + for (i = 0; i < kproc->num_rmems; i++) { + dev_addr = kproc->rmem[i].dev_addr; + size = kproc->rmem[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->rmem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + return NULL; +} + +static int k3_m4_rproc_of_get_memories(struct platform_device *pdev, + struct k3_m4_rproc *kproc) +{ + static const char * const mem_names[] = { "iram", "dram" }; + static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR }; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems; + int i; + + num_mems = ARRAY_SIZE(mem_names); + kproc->mem = devm_kcalloc(kproc->dev, num_mems, + sizeof(*kproc->mem), GFP_KERNEL); + if (!kproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + if (!res) { + dev_err(dev, "found no memory resource for %s\n", + mem_names[i]); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, + resource_size(res), + dev_name(dev))) { + dev_err(dev, "could not request %s region for resource\n", + mem_names[i]); + return -EBUSY; + } + + kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); + if (!kproc->mem[i].cpu_addr) { + dev_err(dev, "failed to map %s memory\n", + mem_names[i]); + return -ENOMEM; + } + kproc->mem[i].bus_addr = res->start; + kproc->mem[i].dev_addr = mem_addrs[i]; + kproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + mem_names[i], &kproc->mem[i].bus_addr, + kproc->mem[i].size, kproc->mem[i].cpu_addr, + kproc->mem[i].dev_addr); + } + kproc->num_mems = num_mems; + + return 0; +} + +static void k3_m4_rproc_dev_mem_release(void *data) +{ + struct device *dev = data; + + of_reserved_mem_device_release(dev); +} + +static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc) +{ + struct device *dev = kproc->dev; + struct device_node *np = dev->of_node; + struct device_node *rmem_np; + struct reserved_mem *rmem; + int num_rmems; + int ret, i; + + num_rmems = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (num_rmems < 0) { + dev_err(dev, "device does not reserved memory regions (%d)\n", + num_rmems); + return -EINVAL; + } + if (num_rmems < 2) { + dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", + num_rmems); + return -EINVAL; + } + + /* use reserved memory region 0 for vring DMA allocations */ + ret = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (ret) { + dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret); + return ret; + } + ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev); + if (ret) + return ret; + + num_rmems--; + kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) + return -ENOMEM; + + /* use remaining reserved memory regions for static carveouts */ + for (i = 0; i < num_rmems; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i + 1); + if (!rmem_np) + return -EINVAL; + + rmem = of_reserved_mem_lookup(rmem_np); + if (!rmem) { + of_node_put(rmem_np); + return -EINVAL; + } + of_node_put(rmem_np); + + kproc->rmem[i].bus_addr = rmem->base; + /* 64-bit address regions currently not supported */ + kproc->rmem[i].dev_addr = (u32)rmem->base; + kproc->rmem[i].size = rmem->size; + kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); + if (!kproc->rmem[i].cpu_addr) { + dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", + i + 1, &rmem->base, &rmem->size); + return -ENOMEM; + } + + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i + 1, &kproc->rmem[i].bus_addr, + kproc->rmem[i].size, kproc->rmem[i].cpu_addr, + kproc->rmem[i].dev_addr); + } + kproc->num_rmems = num_rmems; + + return 0; +} + +static void k3_m4_release_tsp(void *data) +{ + struct ti_sci_proc *tsp = data; + + ti_sci_proc_release(tsp); +} + +/* + * Power up the M4 remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. This callback is invoked only in remoteproc mode. + */ +static int k3_m4_rproc_start(struct rproc *rproc) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = k3_m4_rproc_ping_mbox(kproc); + if (ret) + return ret; + + ret = reset_control_deassert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset deassert failed, ret = %d\n", ret); + return ret; + } + + return 0; +} + +/* + * Stop the M4 remote processor. + * + * This function puts the M4 processor into reset, and finishes processing + * of any pending messages. This callback is invoked only in remoteproc mode. + */ +static int k3_m4_rproc_stop(struct rproc *rproc) +{ + struct k3_m4_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = reset_control_assert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset assert failed, ret = %d\n", ret); + return ret; + } + + return 0; +} + +/* + * Attach to a running M4 remote processor (IPC-only mode) + * + * The remote processor is already booted, so there is no need to issue any + * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only + * mode. + */ +static int k3_m4_rproc_attach(struct rproc *rproc) +{ + struct k3_m4_rproc *kproc = rproc->priv; + int ret; + + ret = k3_m4_rproc_ping_mbox(kproc); + if (ret) + return ret; + + return 0; +} + +/* + * Detach from a running M4 remote processor (IPC-only mode) + * + * This rproc detach callback performs the opposite operation to attach + * callback, the M4 core is not stopped and will be left to continue to + * run its booted firmware. This callback is invoked only in IPC-only mode. + */ +static int k3_m4_rproc_detach(struct rproc *rproc) +{ + return 0; +} + +static const struct rproc_ops k3_m4_rproc_ops = { + .prepare = k3_m4_rproc_prepare, + .unprepare = k3_m4_rproc_unprepare, + .start = k3_m4_rproc_start, + .stop = k3_m4_rproc_stop, + .attach = k3_m4_rproc_attach, + .detach = k3_m4_rproc_detach, + .kick = k3_m4_rproc_kick, + .da_to_va = k3_m4_rproc_da_to_va, + .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table, +}; + +static int k3_m4_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct k3_m4_rproc *kproc; + struct rproc *rproc; + const char *fw_name; + bool r_state = false; + bool p_state = false; + int ret; + + ret = rproc_of_parse_firmware(dev, 0, &fw_name); + if (ret) + return dev_err_probe(dev, ret, "failed to parse firmware-name property\n"); + + rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name, + sizeof(*kproc)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + rproc->recovery_disabled = true; + kproc = rproc->priv; + kproc->dev = dev; + platform_set_drvdata(pdev, rproc); + + kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); + if (IS_ERR(kproc->ti_sci)) + return dev_err_probe(dev, PTR_ERR(kproc->ti_sci), + "failed to get ti-sci handle\n"); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id); + if (ret) + return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n"); + + kproc->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(kproc->reset)) + return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n"); + + kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci); + if (IS_ERR(kproc->tsp)) + return dev_err_probe(dev, PTR_ERR(kproc->tsp), + "failed to construct ti-sci proc control\n"); + + ret = ti_sci_proc_request(kproc->tsp); + if (ret < 0) + return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n"); + ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp); + if (ret) + return ret; + + ret = k3_m4_rproc_of_get_memories(pdev, kproc); + if (ret) + return ret; + + ret = k3_m4_reserved_mem_init(kproc); + if (ret) + return dev_err_probe(dev, ret, "reserved memory init failed\n"); + + ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id, + &r_state, &p_state); + if (ret) + return dev_err_probe(dev, ret, + "failed to get initial state, mode cannot be determined\n"); + + /* configure devices for either remoteproc or IPC-only mode */ + if (p_state) { + rproc->state = RPROC_DETACHED; + dev_info(dev, "configured M4F for IPC-only mode\n"); + } else { + dev_info(dev, "configured M4F for remoteproc mode\n"); + } + + kproc->client.dev = dev; + kproc->client.tx_done = NULL; + kproc->client.rx_callback = k3_m4_rproc_mbox_callback; + kproc->client.tx_block = false; + kproc->client.knows_txdone = false; + kproc->mbox = mbox_request_channel(&kproc->client, 0); + if (IS_ERR(kproc->mbox)) + return dev_err_probe(dev, PTR_ERR(kproc->mbox), + "mbox_request_channel failed\n"); + + ret = devm_rproc_add(dev, rproc); + if (ret) + return dev_err_probe(dev, ret, + "failed to register device with remoteproc core\n"); + + return 0; +} + +static const struct of_device_id k3_m4_of_match[] = { + { .compatible = "ti,am64-m4fss", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, k3_m4_of_match); + +static struct platform_driver k3_m4_rproc_driver = { + .probe = k3_m4_rproc_probe, + .driver = { + .name = "k3-m4-rproc", + .of_match_table = k3_m4_of_match, + }, +}; +module_platform_driver(k3_m4_rproc_driver); + +MODULE_AUTHOR("Hari Nagalla "); +MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 39a47540c59006..747ee467da88c9 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -194,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data) const char *name = kproc->rproc->name; u32 msg = omap_mbox_message(data); + /* Do not forward message from a detached core */ + if (kproc->rproc->state == RPROC_DETACHED) + return; + dev_dbg(dev, "mbox msg: 0x%x\n", msg); switch (msg) { @@ -229,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid) mbox_msg_t msg = (mbox_msg_t)vqid; int ret; + /* Do not forward message to a detached core */ + if (kproc->rproc->state == RPROC_DETACHED) + return; + /* send the index of the triggered virtqueue in the mailbox payload */ ret = mbox_send_message(kproc->mbox, (void *)msg); if (ret < 0) @@ -399,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc) client->knows_txdone = false; kproc->mbox = mbox_request_channel(client, 0); - if (IS_ERR(kproc->mbox)) { - ret = -EBUSY; - dev_err(dev, "mbox_request_channel failed: %ld\n", - PTR_ERR(kproc->mbox)); - return ret; - } + if (IS_ERR(kproc->mbox)) + return dev_err_probe(dev, PTR_ERR(kproc->mbox), + "mbox_request_channel failed\n"); /* * Ping the remote processor, this is only for sanity-sake for now; @@ -464,8 +469,6 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) ret); return ret; } - core->released_from_reset = true; - wake_up_interruptible(&cluster->core_transition); /* * Newer IP revisions like on J7200 SoCs support h/w auto-initialization @@ -552,10 +555,6 @@ static int k3_r5_rproc_start(struct rproc *rproc) u32 boot_addr; int ret; - ret = k3_r5_rproc_request_mbox(rproc); - if (ret) - return ret; - boot_addr = rproc->bootaddr; /* TODO: add boot_addr sanity checking */ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr); @@ -564,7 +563,7 @@ static int k3_r5_rproc_start(struct rproc *rproc) core = kproc->core; ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0); if (ret) - goto put_mbox; + return ret; /* unhalt/run all applicable cores */ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { @@ -580,13 +579,15 @@ static int k3_r5_rproc_start(struct rproc *rproc) if (core != core0 && core0->rproc->state == RPROC_OFFLINE) { dev_err(dev, "%s: can not start core 1 before core 0\n", __func__); - ret = -EPERM; - goto put_mbox; + return -EPERM; } ret = k3_r5_core_run(core); if (ret) - goto put_mbox; + return ret; + + core->released_from_reset = true; + wake_up_interruptible(&cluster->core_transition); } return 0; @@ -596,8 +597,6 @@ static int k3_r5_rproc_start(struct rproc *rproc) if (k3_r5_core_halt(core)) dev_warn(core->dev, "core halt back failed\n"); } -put_mbox: - mbox_free_channel(kproc->mbox); return ret; } @@ -658,8 +657,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc) goto out; } - mbox_free_channel(kproc->mbox); - return 0; unroll_core_halt: @@ -674,42 +671,22 @@ static int k3_r5_rproc_stop(struct rproc *rproc) /* * Attach to a running R5F remote processor (IPC-only mode) * - * The R5F attach callback only needs to request the mailbox, the remote - * processor is already booted, so there is no need to issue any TI-SCI - * commands to boot the R5F cores in IPC-only mode. This callback is invoked - * only in IPC-only mode. + * The R5F attach callback is a NOP. The remote processor is already booted, and + * all required resources have been acquired during probe routine, so there is + * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode. + * This callback is invoked only in IPC-only mode and exists because + * rproc_validate() checks for its existence. */ -static int k3_r5_rproc_attach(struct rproc *rproc) -{ - struct k3_r5_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = k3_r5_rproc_request_mbox(rproc); - if (ret) - return ret; - - dev_info(dev, "R5F core initialized in IPC-only mode\n"); - return 0; -} +static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; } /* * Detach from a running R5F remote processor (IPC-only mode) * - * The R5F detach callback performs the opposite operation to attach callback - * and only needs to release the mailbox, the R5F cores are not stopped and - * will be left in booted state in IPC-only mode. This callback is invoked - * only in IPC-only mode. + * The R5F detach callback is a NOP. The R5F cores are not stopped and will be + * left in booted state in IPC-only mode. This callback is invoked only in + * IPC-only mode and exists for sanity sake. */ -static int k3_r5_rproc_detach(struct rproc *rproc) -{ - struct k3_r5_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - - mbox_free_channel(kproc->mbox); - dev_info(dev, "R5F core deinitialized in IPC-only mode\n"); - return 0; -} +static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; } /* * This function implements the .get_loaded_rsc_table() callback and is used @@ -1259,8 +1236,8 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) goto out; } - rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops, - fw_name, sizeof(*kproc)); + rproc = devm_rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops, + fw_name, sizeof(*kproc)); if (!rproc) { ret = -ENOMEM; goto out; @@ -1278,9 +1255,13 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) kproc->rproc = rproc; core->rproc = rproc; + ret = k3_r5_rproc_request_mbox(rproc); + if (ret) + return ret; + ret = k3_r5_rproc_configure_mode(kproc); if (ret < 0) - goto err_config; + goto out; if (ret) goto init_rmem; @@ -1288,7 +1269,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) if (ret) { dev_err(dev, "initial configure failed, ret = %d\n", ret); - goto err_config; + goto out; } init_rmem: @@ -1298,7 +1279,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) if (ret) { dev_err(dev, "reserved memory init failed, ret = %d\n", ret); - goto err_config; + goto out; } ret = rproc_add(rproc); @@ -1332,7 +1313,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) dev_err(dev, "Timed out waiting for %s core to power up!\n", rproc->name); - return ret; + goto err_powerup; } } @@ -1348,12 +1329,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) } } +err_powerup: rproc_del(rproc); err_add: k3_r5_reserved_mem_exit(kproc); -err_config: - rproc_free(rproc); - core->rproc = NULL; out: /* undo core0 upon any failures on core1 in split-mode */ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { @@ -1395,12 +1374,11 @@ static void k3_r5_cluster_rproc_exit(void *data) } } + mbox_free_channel(kproc->mbox); + rproc_del(rproc); k3_r5_reserved_mem_exit(kproc); - - rproc_free(rproc); - core->rproc = NULL; } } @@ -1533,32 +1511,6 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, return 0; } -static -struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev, - const struct ti_sci_handle *sci) -{ - struct ti_sci_proc *tsp; - u32 temp[2]; - int ret; - - ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids", - temp, 2); - if (ret < 0) - return ERR_PTR(ret); - - tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL); - if (!tsp) - return ERR_PTR(-ENOMEM); - - tsp->dev = dev; - tsp->sci = sci; - tsp->ops = &sci->ops.proc_ops; - tsp->proc_id = temp[0]; - tsp->host_id = temp[1]; - - return tsp; -} - static int k3_r5_core_of_init(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1633,7 +1585,7 @@ static int k3_r5_core_of_init(struct platform_device *pdev) goto err; } - core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci); + core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci); if (IS_ERR(core->tsp)) { ret = PTR_ERR(core->tsp); dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n", diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h index 778558abcdcc53..f3911ce75252ec 100644 --- a/drivers/remoteproc/ti_sci_proc.h +++ b/drivers/remoteproc/ti_sci_proc.h @@ -28,6 +28,32 @@ struct ti_sci_proc { u8 host_id; }; +static inline +struct ti_sci_proc *ti_sci_proc_of_get_tsp(struct device *dev, + const struct ti_sci_handle *sci) +{ + struct ti_sci_proc *tsp; + u32 temp[2]; + int ret; + + ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids", + temp, 2); + if (ret < 0) + return ERR_PTR(ret); + + tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL); + if (!tsp) + return ERR_PTR(-ENOMEM); + + tsp->dev = dev; + tsp->sci = sci; + tsp->ops = &sci->ops.proc_ops; + tsp->proc_id = temp[0]; + tsp->host_id = temp[1]; + + return tsp; +} + static inline int ti_sci_proc_request(struct ti_sci_proc *tsp) { int ret; diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c index 596f3ffb89353f..5aeedeaf3c415e 100644 --- a/drivers/remoteproc/xlnx_r5_remoteproc.c +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c @@ -56,6 +56,17 @@ struct mem_bank_data { char *bank_name; }; +/** + * struct zynqmp_sram_bank - sram bank description + * + * @sram_res: sram address region information + * @da: device address of sram + */ +struct zynqmp_sram_bank { + struct resource sram_res; + u32 da; +}; + /** * struct mbox_info * @@ -120,6 +131,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { * struct zynqmp_r5_core * * @rsc_tbl_va: resource table virtual address + * @sram: Array of sram memories assigned to this core + * @num_sram: number of sram for this core * @dev: device of RPU instance * @np: device node of RPU instance * @tcm_bank_count: number TCM banks accessible to this RPU @@ -131,6 +144,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { */ struct zynqmp_r5_core { void __iomem *rsc_tbl_va; + struct zynqmp_sram_bank *sram; + int num_sram; struct device *dev; struct device_node *np; int tcm_bank_count; @@ -494,6 +509,45 @@ static int add_mem_regions_carveout(struct rproc *rproc) return 0; } +static int add_sram_carveouts(struct rproc *rproc) +{ + struct zynqmp_r5_core *r5_core = rproc->priv; + struct rproc_mem_entry *rproc_mem; + struct zynqmp_sram_bank *sram; + dma_addr_t dma_addr; + size_t len; + int da, i; + + for (i = 0; i < r5_core->num_sram; i++) { + sram = &r5_core->sram[i]; + + dma_addr = (dma_addr_t)sram->sram_res.start; + + len = resource_size(&sram->sram_res); + da = sram->da; + + rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL, + dma_addr, + len, da, + zynqmp_r5_mem_region_map, + zynqmp_r5_mem_region_unmap, + sram->sram_res.name); + if (!rproc_mem) { + dev_err(&rproc->dev, "failed to add sram %s da=0x%x, size=0x%lx", + sram->sram_res.name, da, len); + return -ENOMEM; + } + + rproc_add_carveout(rproc, rproc_mem); + rproc_coredump_add_segment(rproc, da, len); + + dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx", + sram->sram_res.name, dma_addr, da, len); + } + + return 0; +} + /* * tcm_mem_unmap() * @rproc: single R5 core's corresponding rproc instance @@ -669,6 +723,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc) return ret; } + ret = add_sram_carveouts(rproc); + if (ret) { + dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret); + return ret; + } + return 0; } @@ -881,6 +941,77 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) return ERR_PTR(ret); } +static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core) +{ + struct device_node *np = r5_core->np; + struct device *dev = r5_core->dev; + struct zynqmp_sram_bank *sram; + struct device_node *sram_np; + int num_sram, i, ret; + u64 abs_addr, size; + + /* "sram" is optional property. Do not fail, if unavailable. */ + if (!of_property_present(r5_core->np, "sram")) + return 0; + + num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle)); + if (num_sram <= 0) { + dev_err(dev, "Invalid sram property, ret = %d\n", + num_sram); + return -EINVAL; + } + + sram = devm_kcalloc(dev, num_sram, + sizeof(struct zynqmp_sram_bank), GFP_KERNEL); + if (!sram) + return -ENOMEM; + + for (i = 0; i < num_sram; i++) { + sram_np = of_parse_phandle(np, "sram", i); + if (!sram_np) { + dev_err(dev, "failed to get sram %d phandle\n", i); + return -EINVAL; + } + + if (!of_device_is_available(sram_np)) { + dev_err(dev, "sram device not available\n"); + ret = -EINVAL; + goto fail_sram_get; + } + + ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res); + if (ret) { + dev_err(dev, "addr to res failed\n"); + goto fail_sram_get; + } + + /* Get SRAM device address */ + ret = of_property_read_reg(sram_np, i, &abs_addr, &size); + if (ret) { + dev_err(dev, "failed to get reg property\n"); + goto fail_sram_get; + } + + sram[i].da = (u32)abs_addr; + + of_node_put(sram_np); + + dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n", + i, sram[i].sram_res.name, sram[i].sram_res.start, + sram[i].da, resource_size(&sram[i].sram_res)); + } + + r5_core->sram = sram; + r5_core->num_sram = num_sram; + + return 0; + +fail_sram_get: + of_node_put(sram_np); + + return ret; +} + static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) { int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count; @@ -1059,7 +1190,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, r5_core = cluster->r5_cores[0]; /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */ - if (of_find_property(r5_core->np, "reg", NULL)) + if (of_property_present(r5_core->np, "reg")) ret = zynqmp_r5_get_tcm_node_from_dt(cluster); else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) ret = zynqmp_r5_get_tcm_node(cluster); @@ -1086,7 +1217,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, return ret; } - if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) || + if (of_property_present(dev_of_node(dev), "xlnx,tcm-mode") || device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode); @@ -1095,6 +1226,10 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, return ret; } } + + ret = zynqmp_r5_get_sram_banks(r5_core); + if (ret) + return ret; } return 0; @@ -1147,7 +1282,7 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) return -EINVAL; } - if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) { + if (of_property_present(dev_node, "xlnx,tcm-mode")) { ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode); if (ret) return ret; diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 67bce340a87eb7..5484a65f66b953 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -66,6 +66,19 @@ config RESET_BRCMSTB_RESCAL This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on BCM7216. +config RESET_EYEQ + bool "Mobileye EyeQ reset controller" + depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST + select AUXILIARY_BUS + default MACH_EYEQ5 || MACH_EYEQ6H + help + This enables the Mobileye EyeQ reset controller, used in EyeQ5, EyeQ6L + and EyeQ6H SoCs. + + It has one or more domains, with a varying number of resets in each. + Registers are located in a shared register region called OLB. EyeQ6H + has multiple reset instances. + config RESET_GPIO tristate "GPIO reset controller" depends on GPIOLIB diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 27b0bbdfcc044b..4411a2a124d7de 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o +obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o obj-$(CONFIG_RESET_GPIO) += reset-gpio.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index dba74e857be621..4d509d41456ad8 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -812,6 +812,7 @@ __reset_control_get_internal(struct reset_controller_dev *rcdev, kref_init(&rstc->refcnt); rstc->acquired = acquired; rstc->shared = shared; + get_device(rcdev->dev); return rstc; } @@ -826,6 +827,7 @@ static void __reset_control_release(struct kref *kref) module_put(rstc->rcdev->owner); list_del(&rstc->list); + put_device(rstc->rcdev->dev); kfree(rstc); } @@ -916,20 +918,18 @@ static int __reset_add_reset_gpio_device(const struct of_phandle_args *args) */ lockdep_assert_not_held(&reset_list_mutex); - mutex_lock(&reset_gpio_lookup_mutex); + guard(mutex)(&reset_gpio_lookup_mutex); list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) { if (args->np == rgpio_dev->of_args.np) { if (of_phandle_args_equal(args, &rgpio_dev->of_args)) - goto out; /* Already on the list, done */ + return 0; /* Already on the list, done */ } } id = ida_alloc(&reset_gpio_ida, GFP_KERNEL); - if (id < 0) { - ret = id; - goto err_unlock; - } + if (id < 0) + return id; /* Not freed on success, because it is persisent subsystem data. */ rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL); @@ -959,9 +959,6 @@ static int __reset_add_reset_gpio_device(const struct of_phandle_args *args) list_add(&rgpio_dev->list, &reset_gpio_lookup_list); -out: - mutex_unlock(&reset_gpio_lookup_mutex); - return 0; err_put: @@ -970,8 +967,6 @@ static int __reset_add_reset_gpio_device(const struct of_phandle_args *args) kfree(rgpio_dev); err_ida_free: ida_free(&reset_gpio_ida, id); -err_unlock: - mutex_unlock(&reset_gpio_lookup_mutex); return ret; } diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c index 2537ec05eceefd..578fe867080ce0 100644 --- a/drivers/reset/reset-berlin.c +++ b/drivers/reset/reset-berlin.c @@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev, static int berlin2_reset_probe(struct platform_device *pdev) { - struct device_node *parent_np = of_get_parent(pdev->dev.of_node); + struct device_node *parent_np; struct berlin_reset_priv *priv; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + parent_np = of_get_parent(pdev->dev.of_node); priv->regmap = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(priv->regmap)) diff --git a/drivers/reset/reset-eyeq.c b/drivers/reset/reset-eyeq.c new file mode 100644 index 00000000000000..02d50041048b42 --- /dev/null +++ b/drivers/reset/reset-eyeq.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Reset driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms. + * + * Controllers live in a shared register region called OLB. EyeQ5 and EyeQ6L + * have a single OLB instance for a single reset controller. EyeQ6H has seven + * OLB instances; three host reset controllers. + * + * Each reset controller has one or more domain. Domains are of a given type + * (see enum eqr_domain_type), with a valid offset mask (up to 32 resets per + * domain). + * + * Domain types define expected behavior: one-register-per-reset, + * one-bit-per-reset, status detection method, busywait duration, etc. + * + * We use eqr_ as prefix, as-in "EyeQ Reset", but way shorter. + * + * Known resets in EyeQ5 domain 0 (type EQR_EYEQ5_SARCR): + * 3. CAN0 4. CAN1 5. CAN2 6. SPI0 + * 7. SPI1 8. SPI2 9. SPI3 10. UART0 + * 11. UART1 12. UART2 13. I2C0 14. I2C1 + * 15. I2C2 16. I2C3 17. I2C4 18. TIMER0 + * 19. TIMER1 20. TIMER2 21. TIMER3 22. TIMER4 + * 23. WD0 24. EXT0 25. EXT1 26. GPIO + * 27. WD1 + * + * Known resets in EyeQ5 domain 1 (type EQR_EYEQ5_ACRP): + * 0. VMP0 1. VMP1 2. VMP2 3. VMP3 + * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1 + * 8. MPC0 9. MPC1 10. MPC2 11. MPC3 + * 12. MPC4 + * + * Known resets in EyeQ5 domain 2 (type EQR_EYEQ5_PCIE): + * 0. PCIE0_CORE 1. PCIE0_APB 2. PCIE0_LINK_AXI 3. PCIE0_LINK_MGMT + * 4. PCIE0_LINK_HOT 5. PCIE0_LINK_PIPE 6. PCIE1_CORE 7. PCIE1_APB + * 8. PCIE1_LINK_AXI 9. PCIE1_LINK_MGMT 10. PCIE1_LINK_HOT 11. PCIE1_LINK_PIPE + * 12. MULTIPHY 13. MULTIPHY_APB 15. PCIE0_LINK_MGMT 16. PCIE1_LINK_MGMT + * 17. PCIE0_LINK_PM 18. PCIE1_LINK_PM + * + * Known resets in EyeQ6L domain 0 (type EQR_EYEQ5_SARCR): + * 0. SPI0 1. SPI1 2. UART0 3. I2C0 + * 4. I2C1 5. TIMER0 6. TIMER1 7. TIMER2 + * 8. TIMER3 9. WD0 10. WD1 11. EXT0 + * 12. EXT1 13. GPIO + * + * Known resets in EyeQ6L domain 1 (type EQR_EYEQ5_ACRP): + * 0. VMP0 1. VMP1 2. VMP2 3. VMP3 + * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1 + * 8. MPC0 9. MPC1 10. MPC2 11. MPC3 + * 12. MPC4 + * + * Known resets in EyeQ6H west/east (type EQR_EYEQ6H_SARCR): + * 0. CAN 1. SPI0 2. SPI1 3. UART0 + * 4. UART1 5. I2C0 6. I2C1 7. -hole- + * 8. TIMER0 9. TIMER1 10. WD 11. EXT TIMER + * 12. GPIO + * + * Known resets in EyeQ6H acc (type EQR_EYEQ5_ACRP): + * 1. XNN0 2. XNN1 3. XNN2 4. XNN3 + * 5. VMP0 6. VMP1 7. VMP2 8. VMP3 + * 9. PMA0 10. PMA1 11. MPC0 12. MPC1 + * 13. MPC2 14. MPC3 15. PERIPH + * + * Abbreviations: + * - PMA: Programmable Macro Array + * - MPC: Multi-threaded Processing Clusters + * - VMP: Vector Microcode Processors + * + * Copyright (C) 2024 Mobileye Vision Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * A reset ID, as returned by eqr_of_xlate_*(), is a (domain, offset) pair. + * Low byte is domain, rest is offset. + */ +#define ID_DOMAIN_MASK GENMASK(7, 0) +#define ID_OFFSET_MASK GENMASK(31, 8) + +enum eqr_domain_type { + EQR_EYEQ5_SARCR, + EQR_EYEQ5_ACRP, + EQR_EYEQ5_PCIE, + EQR_EYEQ6H_SARCR, +}; + +/* + * Domain type EQR_EYEQ5_SARCR register offsets. + */ +#define EQR_EYEQ5_SARCR_REQUEST (0x000) +#define EQR_EYEQ5_SARCR_STATUS (0x004) + +/* + * Domain type EQR_EYEQ5_ACRP register masks. + * Registers are: base + 4 * offset. + */ +#define EQR_EYEQ5_ACRP_PD_REQ BIT(0) +#define EQR_EYEQ5_ACRP_ST_POWER_DOWN BIT(27) +#define EQR_EYEQ5_ACRP_ST_ACTIVE BIT(29) + +/* + * Domain type EQR_EYEQ6H_SARCR register offsets. + */ +#define EQR_EYEQ6H_SARCR_RST_REQUEST (0x000) +#define EQR_EYEQ6H_SARCR_CLK_STATUS (0x004) +#define EQR_EYEQ6H_SARCR_RST_STATUS (0x008) +#define EQR_EYEQ6H_SARCR_CLK_REQUEST (0x00C) + +struct eqr_busy_wait_timings { + unsigned long sleep_us; + unsigned long timeout_us; +}; + +static const struct eqr_busy_wait_timings eqr_timings[] = { + [EQR_EYEQ5_SARCR] = {1, 10}, + [EQR_EYEQ5_ACRP] = {1, 40 * USEC_PER_MSEC}, /* LBIST implies long timeout. */ + /* EQR_EYEQ5_PCIE does no busy waiting. */ + [EQR_EYEQ6H_SARCR] = {1, 400}, +}; + +#define EQR_MAX_DOMAIN_COUNT 3 + +struct eqr_domain_descriptor { + enum eqr_domain_type type; + u32 valid_mask; + unsigned int offset; +}; + +struct eqr_match_data { + unsigned int domain_count; + const struct eqr_domain_descriptor *domains; +}; + +struct eqr_private { + /* + * One mutex per domain for read-modify-write operations on registers. + * Some domains can be involved in LBIST which implies long critical + * sections; we wouldn't want other domains to be impacted by that. + */ + struct mutex mutexes[EQR_MAX_DOMAIN_COUNT]; + void __iomem *base; + const struct eqr_match_data *data; + struct reset_controller_dev rcdev; +}; + +static inline struct eqr_private *eqr_rcdev_to_priv(struct reset_controller_dev *x) +{ + return container_of(x, struct eqr_private, rcdev); +} + +static u32 eqr_double_readl(void __iomem *addr_a, void __iomem *addr_b, + u32 *dest_a, u32 *dest_b) +{ + *dest_a = readl(addr_a); + *dest_b = readl(addr_b); + return 0; /* read_poll_timeout() op argument must return something. */ +} + +static int eqr_busy_wait_locked(struct eqr_private *priv, struct device *dev, + u32 domain, u32 offset, bool assert) +{ + void __iomem *base = priv->base + priv->data->domains[domain].offset; + enum eqr_domain_type domain_type = priv->data->domains[domain].type; + unsigned long timeout_us = eqr_timings[domain_type].timeout_us; + unsigned long sleep_us = eqr_timings[domain_type].sleep_us; + u32 val, mask, rst_status, clk_status; + void __iomem *reg; + int ret; + + lockdep_assert_held(&priv->mutexes[domain]); + + switch (domain_type) { + case EQR_EYEQ5_SARCR: + reg = base + EQR_EYEQ5_SARCR_STATUS; + mask = BIT(offset); + + ret = readl_poll_timeout(reg, val, !(val & mask) == assert, + sleep_us, timeout_us); + break; + + case EQR_EYEQ5_ACRP: + reg = base + 4 * offset; + if (assert) + mask = EQR_EYEQ5_ACRP_ST_POWER_DOWN; + else + mask = EQR_EYEQ5_ACRP_ST_ACTIVE; + + ret = readl_poll_timeout(reg, val, !!(val & mask), + sleep_us, timeout_us); + break; + + case EQR_EYEQ5_PCIE: + ret = 0; /* No busy waiting. */ + break; + + case EQR_EYEQ6H_SARCR: + /* + * Wait until both bits change: + * readl(base + EQR_EYEQ6H_SARCR_RST_STATUS) & BIT(offset) + * readl(base + EQR_EYEQ6H_SARCR_CLK_STATUS) & BIT(offset) + */ + mask = BIT(offset); + ret = read_poll_timeout(eqr_double_readl, val, + (!(rst_status & mask) == assert) && + (!(clk_status & mask) == assert), + sleep_us, timeout_us, false, + base + EQR_EYEQ6H_SARCR_RST_STATUS, + base + EQR_EYEQ6H_SARCR_CLK_STATUS, + &rst_status, &clk_status); + break; + + default: + WARN_ON(1); + ret = -EINVAL; + break; + } + + if (ret == -ETIMEDOUT) + dev_dbg(dev, "%u-%u: timeout\n", domain, offset); + return ret; +} + +static void eqr_assert_locked(struct eqr_private *priv, u32 domain, u32 offset) +{ + enum eqr_domain_type domain_type = priv->data->domains[domain].type; + void __iomem *base, *reg; + u32 val; + + lockdep_assert_held(&priv->mutexes[domain]); + + base = priv->base + priv->data->domains[domain].offset; + + switch (domain_type) { + case EQR_EYEQ5_SARCR: + reg = base + EQR_EYEQ5_SARCR_REQUEST; + writel(readl(reg) & ~BIT(offset), reg); + break; + + case EQR_EYEQ5_ACRP: + reg = base + 4 * offset; + writel(readl(reg) | EQR_EYEQ5_ACRP_PD_REQ, reg); + break; + + case EQR_EYEQ5_PCIE: + writel(readl(base) & ~BIT(offset), base); + break; + + case EQR_EYEQ6H_SARCR: + /* RST_REQUEST and CLK_REQUEST must be kept in sync. */ + val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST); + val &= ~BIT(offset); + writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST); + writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST); + break; + + default: + WARN_ON(1); + break; + } +} + +static int eqr_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct eqr_private *priv = eqr_rcdev_to_priv(rcdev); + u32 domain = FIELD_GET(ID_DOMAIN_MASK, id); + u32 offset = FIELD_GET(ID_OFFSET_MASK, id); + + dev_dbg(rcdev->dev, "%u-%u: assert request\n", domain, offset); + + guard(mutex)(&priv->mutexes[domain]); + + eqr_assert_locked(priv, domain, offset); + return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, true); +} + +static void eqr_deassert_locked(struct eqr_private *priv, u32 domain, + u32 offset) +{ + enum eqr_domain_type domain_type = priv->data->domains[domain].type; + void __iomem *base, *reg; + u32 val; + + lockdep_assert_held(&priv->mutexes[domain]); + + base = priv->base + priv->data->domains[domain].offset; + + switch (domain_type) { + case EQR_EYEQ5_SARCR: + reg = base + EQR_EYEQ5_SARCR_REQUEST; + writel(readl(reg) | BIT(offset), reg); + break; + + case EQR_EYEQ5_ACRP: + reg = base + 4 * offset; + writel(readl(reg) & ~EQR_EYEQ5_ACRP_PD_REQ, reg); + break; + + case EQR_EYEQ5_PCIE: + writel(readl(base) | BIT(offset), base); + break; + + case EQR_EYEQ6H_SARCR: + /* RST_REQUEST and CLK_REQUEST must be kept in sync. */ + val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST); + val |= BIT(offset); + writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST); + writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST); + break; + + default: + WARN_ON(1); + break; + } +} + +static int eqr_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct eqr_private *priv = eqr_rcdev_to_priv(rcdev); + u32 domain = FIELD_GET(ID_DOMAIN_MASK, id); + u32 offset = FIELD_GET(ID_OFFSET_MASK, id); + + dev_dbg(rcdev->dev, "%u-%u: deassert request\n", domain, offset); + + guard(mutex)(&priv->mutexes[domain]); + + eqr_deassert_locked(priv, domain, offset); + return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, false); +} + +static int eqr_status(struct reset_controller_dev *rcdev, unsigned long id) +{ + u32 domain = FIELD_GET(ID_DOMAIN_MASK, id); + u32 offset = FIELD_GET(ID_OFFSET_MASK, id); + struct eqr_private *priv = eqr_rcdev_to_priv(rcdev); + enum eqr_domain_type domain_type = priv->data->domains[domain].type; + void __iomem *base, *reg; + + dev_dbg(rcdev->dev, "%u-%u: status request\n", domain, offset); + + guard(mutex)(&priv->mutexes[domain]); + + base = priv->base + priv->data->domains[domain].offset; + + switch (domain_type) { + case EQR_EYEQ5_SARCR: + reg = base + EQR_EYEQ5_SARCR_STATUS; + return !(readl(reg) & BIT(offset)); + case EQR_EYEQ5_ACRP: + reg = base + 4 * offset; + return !(readl(reg) & EQR_EYEQ5_ACRP_ST_ACTIVE); + case EQR_EYEQ5_PCIE: + return !(readl(base) & BIT(offset)); + case EQR_EYEQ6H_SARCR: + reg = base + EQR_EYEQ6H_SARCR_RST_STATUS; + return !(readl(reg) & BIT(offset)); + default: + return -EINVAL; + } +} + +static const struct reset_control_ops eqr_ops = { + .assert = eqr_assert, + .deassert = eqr_deassert, + .status = eqr_status, +}; + +static int eqr_of_xlate_internal(struct reset_controller_dev *rcdev, + u32 domain, u32 offset) +{ + struct eqr_private *priv = eqr_rcdev_to_priv(rcdev); + + if (domain >= priv->data->domain_count || offset > 31 || + !(priv->data->domains[domain].valid_mask & BIT(offset))) { + dev_err(rcdev->dev, "%u-%u: invalid reset\n", domain, offset); + return -EINVAL; + } + + return FIELD_PREP(ID_DOMAIN_MASK, domain) | FIELD_PREP(ID_OFFSET_MASK, offset); +} + +static int eqr_of_xlate_onecell(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + return eqr_of_xlate_internal(rcdev, 0, reset_spec->args[0]); +} + +static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]); +} + +static int eqr_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + const struct of_device_id *match; + struct device *dev = &adev->dev; + struct eqr_private *priv; + unsigned int i; + int ret; + + /* + * We are an auxiliary device of clk-eyeq. We do not have an OF node by + * default; let's reuse our parent's OF node. + */ + WARN_ON(dev->of_node); + device_set_of_node_from_dev(dev, dev->parent); + if (!dev->of_node) + return -ENODEV; + + /* + * Using our newfound OF node, we can get match data. We cannot use + * device_get_match_data() because it does not match reused OF nodes. + */ + match = of_match_node(dev->driver->of_match_table, dev->of_node); + if (!match || !match->data) + return -ENODEV; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->data = match->data; + priv->base = (void __iomem *)dev_get_platdata(dev); + priv->rcdev.ops = &eqr_ops; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.dev = dev; + priv->rcdev.of_node = dev->of_node; + + if (priv->data->domain_count == 1) { + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.of_xlate = eqr_of_xlate_onecell; + } else { + priv->rcdev.of_reset_n_cells = 2; + priv->rcdev.of_xlate = eqr_of_xlate_twocells; + } + + for (i = 0; i < priv->data->domain_count; i++) + mutex_init(&priv->mutexes[i]); + + priv->rcdev.nr_resets = 0; + for (i = 0; i < priv->data->domain_count; i++) + priv->rcdev.nr_resets += hweight32(priv->data->domains[i].valid_mask); + + ret = devm_reset_controller_register(dev, &priv->rcdev); + if (ret) + return dev_err_probe(dev, ret, "failed registering reset controller\n"); + + return 0; +} + +static const struct eqr_domain_descriptor eqr_eyeq5_domains[] = { + { + .type = EQR_EYEQ5_SARCR, + .valid_mask = 0xFFFFFF8, + .offset = 0x004, + }, + { + .type = EQR_EYEQ5_ACRP, + .valid_mask = 0x0001FFF, + .offset = 0x200, + }, + { + .type = EQR_EYEQ5_PCIE, + .valid_mask = 0x007BFFF, + .offset = 0x120, + }, +}; + +static const struct eqr_match_data eqr_eyeq5_data = { + .domain_count = ARRAY_SIZE(eqr_eyeq5_domains), + .domains = eqr_eyeq5_domains, +}; + +static const struct eqr_domain_descriptor eqr_eyeq6l_domains[] = { + { + .type = EQR_EYEQ5_SARCR, + .valid_mask = 0x3FFF, + .offset = 0x004, + }, + { + .type = EQR_EYEQ5_ACRP, + .valid_mask = 0x00FF, + .offset = 0x200, + }, +}; + +static const struct eqr_match_data eqr_eyeq6l_data = { + .domain_count = ARRAY_SIZE(eqr_eyeq6l_domains), + .domains = eqr_eyeq6l_domains, +}; + +/* West and east OLBs each have an instance. */ +static const struct eqr_domain_descriptor eqr_eyeq6h_we_domains[] = { + { + .type = EQR_EYEQ6H_SARCR, + .valid_mask = 0x1F7F, + .offset = 0x004, + }, +}; + +static const struct eqr_match_data eqr_eyeq6h_we_data = { + .domain_count = ARRAY_SIZE(eqr_eyeq6h_we_domains), + .domains = eqr_eyeq6h_we_domains, +}; + +static const struct eqr_domain_descriptor eqr_eyeq6h_acc_domains[] = { + { + .type = EQR_EYEQ5_ACRP, + .valid_mask = 0x7FFF, + .offset = 0x000, + }, +}; + +static const struct eqr_match_data eqr_eyeq6h_acc_data = { + .domain_count = ARRAY_SIZE(eqr_eyeq6h_acc_domains), + .domains = eqr_eyeq6h_acc_domains, +}; + +/* + * Table describes OLB system-controller compatibles. + * It does not get used to match against devicetree node. + */ +static const struct of_device_id eqr_match_table[] = { + { .compatible = "mobileye,eyeq5-olb", .data = &eqr_eyeq5_data }, + { .compatible = "mobileye,eyeq6l-olb", .data = &eqr_eyeq6l_data }, + { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqr_eyeq6h_we_data }, + { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqr_eyeq6h_we_data }, + { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqr_eyeq6h_acc_data }, + {} +}; +MODULE_DEVICE_TABLE(of, eqr_match_table); + +static const struct auxiliary_device_id eqr_id_table[] = { + { .name = "clk_eyeq.reset" }, + { .name = "clk_eyeq.reset_west" }, + { .name = "clk_eyeq.reset_east" }, + { .name = "clk_eyeq.reset_acc" }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, eqr_id_table); + +static struct auxiliary_driver eqr_driver = { + .probe = eqr_probe, + .id_table = eqr_id_table, + .driver = { + .of_match_table = eqr_match_table, + } +}; +module_auxiliary_driver(eqr_driver); diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c index b62a2fd44e4e42..e77e4cca377dca 100644 --- a/drivers/reset/reset-k210.c +++ b/drivers/reset/reset-k210.c @@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = { static int k210_rst_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *parent_np = of_get_parent(dev->of_node); + struct device_node *parent_np; struct k210_rst *ksr; dev_info(dev, "K210 reset controller\n"); @@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev) if (!ksr) return -ENOMEM; + parent_np = of_get_parent(dev->of_node); ksr->map = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(ksr->map)) diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c index 28fb85772b3e25..e42b2f24a93daf 100644 --- a/drivers/reset/reset-lpc18xx.c +++ b/drivers/reset/reset-lpc18xx.c @@ -150,29 +150,15 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev) if (IS_ERR(rc->base)) return PTR_ERR(rc->base); - rc->clk_reg = devm_clk_get(&pdev->dev, "reg"); - if (IS_ERR(rc->clk_reg)) { - dev_err(&pdev->dev, "reg clock not found\n"); - return PTR_ERR(rc->clk_reg); - } - - rc->clk_delay = devm_clk_get(&pdev->dev, "delay"); - if (IS_ERR(rc->clk_delay)) { - dev_err(&pdev->dev, "delay clock not found\n"); - return PTR_ERR(rc->clk_delay); - } - - ret = clk_prepare_enable(rc->clk_reg); - if (ret) { - dev_err(&pdev->dev, "unable to enable reg clock\n"); - return ret; - } + rc->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg"); + if (IS_ERR(rc->clk_reg)) + return dev_err_probe(&pdev->dev, PTR_ERR(rc->clk_reg), + "reg clock not found\n"); - ret = clk_prepare_enable(rc->clk_delay); - if (ret) { - dev_err(&pdev->dev, "unable to enable delay clock\n"); - goto dis_clk_reg; - } + rc->clk_delay = devm_clk_get_enabled(&pdev->dev, "delay"); + if (IS_ERR(rc->clk_delay)) + return dev_err_probe(&pdev->dev, PTR_ERR(rc->clk_delay), + "delay clock not found\n"); fcclk = clk_get_rate(rc->clk_reg) / USEC_PER_SEC; firc = clk_get_rate(rc->clk_delay) / USEC_PER_SEC; @@ -189,10 +175,8 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev) rc->rcdev.of_node = pdev->dev.of_node; ret = reset_controller_register(&rc->rcdev); - if (ret) { - dev_err(&pdev->dev, "unable to register device\n"); - goto dis_clks; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "unable to register device\n"); rc->restart_nb.priority = 192, rc->restart_nb.notifier_call = lpc18xx_rgu_restart, @@ -201,13 +185,6 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "failed to register restart handler\n"); return 0; - -dis_clks: - clk_disable_unprepare(rc->clk_delay); -dis_clk_reg: - clk_disable_unprepare(rc->clk_reg); - - return ret; } static const struct of_device_id lpc18xx_rgu_match[] = { diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c index f78be97898bc05..1e9fca3e30e8e3 100644 --- a/drivers/reset/reset-meson.c +++ b/drivers/reset/reset-meson.c @@ -102,6 +102,11 @@ static const struct meson_reset_param meson_s4_param = { .level_offset = 0x40, }; +static const struct meson_reset_param t7_param = { + .reg_count = 7, + .level_offset = 0x40, +}; + static const struct of_device_id meson_reset_dt_ids[] = { { .compatible = "amlogic,meson8b-reset", .data = &meson8b_param}, { .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param}, @@ -109,6 +114,7 @@ static const struct of_device_id meson_reset_dt_ids[] = { { .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param}, { .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param}, { .compatible = "amlogic,c3-reset", .data = &meson_s4_param}, + { .compatible = "amlogic,t7-reset", .data = &t7_param}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, meson_reset_dt_ids); diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile index 58e3b382e316c5..1e02b58ff61fe1 100644 --- a/drivers/rpmsg/Makefile +++ b/drivers/rpmsg/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o obj-$(CONFIG_RPMSG_CTRL) += rpmsg_ctrl.o obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o +CFLAGS_qcom_glink_native.o := -I$(src) qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 82d460ff477718..0b2f2900690806 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -23,6 +23,9 @@ #include "rpmsg_internal.h" #include "qcom_glink_native.h" +#define CREATE_TRACE_POINTS +#include "qcom_glink_trace.h" + #define GLINK_NAME_SIZE 32 #define GLINK_VERSION_1 1 @@ -30,11 +33,16 @@ #define RPM_GLINK_CID_MAX 65536 struct glink_msg { - __le16 cmd; - __le16 param1; - __le32 param2; + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(glink_msg_hdr, hdr, __packed, + __le16 cmd; + __le16 param1; + __le32 param2; + ); u8 data[]; } __packed; +static_assert(offsetof(struct glink_msg, data) == sizeof(struct glink_msg_hdr), + "struct member likely outside of __struct_group()"); /** * struct glink_defer_cmd - deferred incoming control message @@ -48,7 +56,7 @@ struct glink_msg { struct glink_defer_cmd { struct list_head node; - struct glink_msg msg; + struct glink_msg_hdr msg; u8 data[]; }; @@ -78,6 +86,7 @@ struct glink_core_rx_intent { /** * struct qcom_glink - driver context, relates to one remote subsystem * @dev: reference to the associated struct device + * @label: identifier of the glink edge * @rx_pipe: pipe object for receive FIFO * @tx_pipe: pipe object for transmit FIFO * @rx_work: worker for handling received control messages @@ -96,6 +105,8 @@ struct glink_core_rx_intent { struct qcom_glink { struct device *dev; + const char *label; + struct qcom_glink_pipe *rx_pipe; struct qcom_glink_pipe *tx_pipe; @@ -392,6 +403,8 @@ static int qcom_glink_send_version(struct qcom_glink *glink) msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); + trace_qcom_glink_cmd_version_tx(glink->label, GLINK_VERSION_1, glink->features); + return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -403,6 +416,8 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink) msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); + trace_qcom_glink_cmd_version_ack_tx(glink->label, msg.param1, msg.param2); + qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -415,6 +430,9 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink, msg.param1 = cpu_to_le16(channel->rcid); msg.param2 = cpu_to_le32(0); + trace_qcom_glink_cmd_open_ack_tx(glink->label, channel->name, + channel->lcid, channel->rcid); + qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -424,9 +442,16 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, struct glink_channel *channel; unsigned long flags; + qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8)); + spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, cid); spin_unlock_irqrestore(&glink->idr_lock, flags); + + trace_qcom_glink_cmd_rx_intent_req_ack_rx(glink->label, + channel ? channel->name : NULL, + channel ? channel->lcid : 0, + cid, granted); if (!channel) { dev_err(glink->dev, "unable to find channel\n"); return; @@ -455,12 +480,9 @@ static void qcom_glink_intent_req_abort(struct glink_channel *channel) static int qcom_glink_send_open_req(struct qcom_glink *glink, struct glink_channel *channel) { - struct { - struct glink_msg msg; - u8 name[GLINK_NAME_SIZE]; - } __packed req; + DEFINE_RAW_FLEX(struct glink_msg, req, data, GLINK_NAME_SIZE); int name_len = strlen(channel->name) + 1; - int req_len = ALIGN(sizeof(req.msg) + name_len, 8); + int req_len = ALIGN(sizeof(*req) + name_len, 8); int ret; unsigned long flags; @@ -476,12 +498,15 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink, channel->lcid = ret; - req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN); - req.msg.param1 = cpu_to_le16(channel->lcid); - req.msg.param2 = cpu_to_le32(name_len); - strcpy(req.name, channel->name); + req->cmd = cpu_to_le16(GLINK_CMD_OPEN); + req->param1 = cpu_to_le16(channel->lcid); + req->param2 = cpu_to_le32(name_len); + strcpy(req->data, channel->name); + + trace_qcom_glink_cmd_open_tx(glink->label, channel->name, + channel->lcid, channel->rcid); - ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true); + ret = qcom_glink_tx(glink, req, req_len, NULL, 0, true); if (ret) goto remove_idr; @@ -505,18 +530,24 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink, req.param1 = cpu_to_le16(channel->lcid); req.param2 = 0; + trace_qcom_glink_cmd_close_tx(glink->label, channel->name, + channel->lcid, channel->rcid); + qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); } static void qcom_glink_send_close_ack(struct qcom_glink *glink, - unsigned int rcid) + struct glink_channel *channel) { struct glink_msg req; req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK); - req.param1 = cpu_to_le16(rcid); + req.param1 = cpu_to_le16(channel->rcid); req.param2 = 0; + trace_qcom_glink_cmd_close_ack_tx(glink->label, channel->name, + channel->lcid, channel->rcid); + qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true); } @@ -548,6 +579,9 @@ static void qcom_glink_rx_done_work(struct work_struct *work) cmd.lcid = cid; cmd.liid = iid; + trace_qcom_glink_cmd_rx_done_tx(glink->label, channel->name, + channel->lcid, channel->rcid, cmd.liid, reuse); + qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); if (!reuse) { kfree(intent->data); @@ -598,6 +632,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink, u32 version, u32 features) { + trace_qcom_glink_cmd_version_rx(glink->label, version, features); + switch (version) { case 0: break; @@ -625,6 +661,8 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink, u32 version, u32 features) { + trace_qcom_glink_cmd_version_ack_rx(glink->label, version, features); + switch (version) { case 0: /* Version negotiation failed */ @@ -656,6 +694,10 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, { struct glink_msg msg; + trace_qcom_glink_cmd_rx_intent_req_ack_tx(glink->label, channel->name, + channel->lcid, channel->rcid, + granted); + msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK); msg.param1 = cpu_to_le16(channel->lcid); msg.param2 = cpu_to_le32(granted); @@ -693,6 +735,10 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink, cmd.size = cpu_to_le32(intent->size); cmd.liid = cpu_to_le32(intent->id); + trace_qcom_glink_cmd_intent_tx(glink->label, channel->name, + channel->lcid, channel->rcid, + cmd.count, cmd.size, cmd.liid); + qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); return 0; @@ -745,9 +791,14 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink, struct glink_channel *channel; unsigned long flags; + qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8)); + spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, cid); spin_unlock_irqrestore(&glink->idr_lock, flags); + + trace_qcom_glink_cmd_rx_done_rx(glink->label, channel ? channel->name : NULL, + channel ? channel->lcid : 0, cid, iid, reuse); if (!channel) { dev_err(glink->dev, "invalid channel id received\n"); return; @@ -797,6 +848,10 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink, channel = idr_find(&glink->rcids, cid); spin_unlock_irqrestore(&glink->idr_lock, flags); + trace_qcom_glink_cmd_rx_intent_req_rx(glink->label, + channel ? channel->name : NULL, + channel ? channel->lcid : 0, + cid, size); if (!channel) { pr_err("%s channel not found for cid %d\n", __func__, cid); return; @@ -826,7 +881,9 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra) INIT_LIST_HEAD(&dcmd->node); - qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra); + qcom_glink_rx_peek(glink, + container_of(&dcmd->msg, struct glink_msg, hdr), 0, + sizeof(dcmd->msg) + extra); spin_lock(&glink->rx_lock); list_add_tail(&dcmd->node, &glink->rx_queue); @@ -843,7 +900,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) struct glink_core_rx_intent *intent; struct glink_channel *channel; struct { - struct glink_msg msg; + struct glink_msg_hdr msg; __le32 chunk_size; __le32 left_size; } __packed hdr; @@ -869,9 +926,15 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) } rcid = le16_to_cpu(hdr.msg.param1); + liid = le32_to_cpu(hdr.msg.param2); spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, rcid); spin_unlock_irqrestore(&glink->idr_lock, flags); + + trace_qcom_glink_cmd_tx_data_rx(glink->label, channel ? channel->name : NULL, + channel ? channel->lcid : 0, rcid, + liid, chunk_size, left_size, + hdr.msg.cmd == GLINK_CMD_TX_DATA_CONT); if (!channel) { dev_dbg(glink->dev, "Data on non-existing channel\n"); @@ -902,8 +965,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) intent = channel->buf; } } else { - liid = le32_to_cpu(hdr.msg.param2); - spin_lock_irqsave(&channel->intent_lock, flags); intent = idr_find(&channel->liids, liid); spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -952,6 +1013,14 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) return ret; } +static void qcom_glink_rx_read_notif(struct qcom_glink *glink) +{ + trace_qcom_glink_cmd_read_notif_rx(glink->label); + + qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8)); + qcom_glink_tx_kick(glink); +} + static void qcom_glink_handle_intent(struct qcom_glink *glink, unsigned int cid, unsigned int count, @@ -965,7 +1034,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, }; struct { - struct glink_msg msg; + struct glink_msg_hdr msg; struct intent_pair intents[]; } __packed * msg; @@ -983,6 +1052,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, channel = idr_find(&glink->rcids, cid); spin_unlock_irqrestore(&glink->idr_lock, flags); if (!channel) { + trace_qcom_glink_cmd_intent_rx(glink->label, NULL, 0, cid, count, 0, 0); dev_err(glink->dev, "intents for non-existing channel\n"); qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); return; @@ -994,6 +1064,11 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, qcom_glink_rx_peek(glink, msg, 0, msglen); + trace_qcom_glink_cmd_intent_rx(glink->label, channel->name, + channel->lcid, cid, count, + count > 0 ? msg->intents[0].size : 0, + count > 0 ? msg->intents[0].iid : 0); + for (i = 0; i < count; ++i) { intent = kzalloc(sizeof(*intent), GFP_ATOMIC); if (!intent) @@ -1022,9 +1097,14 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) { struct glink_channel *channel; + qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8)); + spin_lock(&glink->idr_lock); channel = idr_find(&glink->lcids, lcid); spin_unlock(&glink->idr_lock); + + trace_qcom_glink_cmd_open_ack_rx(glink->label, channel ? channel->name : NULL, + lcid, channel ? channel->rcid : 0); if (!channel) { dev_err(glink->dev, "Invalid open ack packet\n"); return -EINVAL; @@ -1057,6 +1137,9 @@ static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u msg.param1 = cpu_to_le16(channel->lcid); msg.param2 = cpu_to_le32(sigs); + trace_qcom_glink_cmd_signal_tx(glink->label, channel->name, + channel->lcid, channel->rcid, sigs); + return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true); } @@ -1067,9 +1150,14 @@ static void qcom_glink_handle_signals(struct qcom_glink *glink, unsigned long flags; bool enable; + qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8)); + spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, rcid); spin_unlock_irqrestore(&glink->idr_lock, flags); + + trace_qcom_glink_cmd_signal_rx(glink->label, channel ? channel->name : NULL, + channel ? channel->lcid : 0, rcid, sigs); if (!channel) { dev_err(glink->dev, "signal for non-existing channel\n"); return; @@ -1114,7 +1202,6 @@ void qcom_glink_native_rx(struct qcom_glink *glink) break; case GLINK_CMD_OPEN_ACK: ret = qcom_glink_rx_open_ack(glink, param1); - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; case GLINK_CMD_OPEN: ret = qcom_glink_rx_defer(glink, param2); @@ -1124,27 +1211,22 @@ void qcom_glink_native_rx(struct qcom_glink *glink) ret = qcom_glink_rx_data(glink, avail); break; case GLINK_CMD_READ_NOTIF: - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); - qcom_glink_tx_kick(glink); + qcom_glink_rx_read_notif(glink); break; case GLINK_CMD_INTENT: qcom_glink_handle_intent(glink, param1, param2, avail); break; case GLINK_CMD_RX_DONE: qcom_glink_handle_rx_done(glink, param1, param2, false); - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; case GLINK_CMD_RX_DONE_W_REUSE: qcom_glink_handle_rx_done(glink, param1, param2, true); - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; case GLINK_CMD_RX_INTENT_REQ_ACK: qcom_glink_handle_intent_req_ack(glink, param1, param2); - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; case GLINK_CMD_SIGNALS: qcom_glink_handle_signals(glink, param1, param2); - qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; default: dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd); @@ -1349,6 +1431,10 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, cmd.cid = channel->lcid; cmd.size = size; + trace_qcom_glink_cmd_rx_intent_req_tx(glink->label, channel->name, + channel->lcid, channel->rcid, + cmd.size); + ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); if (ret) goto unlock; @@ -1377,7 +1463,7 @@ static int __qcom_glink_send(struct glink_channel *channel, struct glink_core_rx_intent *tmp; int iid = 0; struct { - struct glink_msg msg; + struct glink_msg_hdr msg; __le32 chunk_size; __le32 left_size; } __packed req; @@ -1429,6 +1515,12 @@ static int __qcom_glink_send(struct glink_channel *channel, req.chunk_size = cpu_to_le32(chunk_size); req.left_size = cpu_to_le32(len - offset - chunk_size); + trace_qcom_glink_cmd_tx_data_tx(glink->label, channel->name, + channel->lcid, channel->rcid, + iid, chunk_size, + len - offset - chunk_size, + offset > 0); + ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait); if (ret) { /* Mark intent available if we failed */ @@ -1544,6 +1636,8 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, create_device = true; } + trace_qcom_glink_cmd_open_rx(glink->label, name, channel->lcid, rcid); + spin_lock_irqsave(&glink->idr_lock, flags); ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC); if (ret < 0) { @@ -1605,6 +1699,9 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, rcid); spin_unlock_irqrestore(&glink->idr_lock, flags); + + trace_qcom_glink_cmd_close_rx(glink->label, channel ? channel->name : NULL, + channel ? channel->lcid : 0, rcid); if (WARN(!channel, "close request on unknown channel\n")) return; @@ -1620,7 +1717,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) } channel->rpdev = NULL; - qcom_glink_send_close_ack(glink, channel->rcid); + qcom_glink_send_close_ack(glink, channel); spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->rcids, channel->rcid); @@ -1641,6 +1738,9 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid) spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->lcids, lcid); + + trace_qcom_glink_cmd_close_ack_rx(glink->label, channel ? channel->name : NULL, + lcid, channel ? channel->rcid : 0); if (WARN(!channel, "close ack on unknown channel\n")) { spin_unlock_irqrestore(&glink->idr_lock, flags); return; @@ -1685,7 +1785,7 @@ static void qcom_glink_work(struct work_struct *work) list_del(&dcmd->node); spin_unlock_irqrestore(&glink->rx_lock, flags); - msg = &dcmd->msg; + msg = container_of(&dcmd->msg, struct glink_msg, hdr); cmd = le16_to_cpu(msg->cmd); param1 = le16_to_cpu(msg->param1); param2 = le32_to_cpu(msg->param2); @@ -1815,6 +1915,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, idr_init(&glink->lcids); idr_init(&glink->rcids); + ret = of_property_read_string(dev->of_node, "label", &glink->label); + if (ret < 0) + glink->label = dev->of_node->name; + glink->dev->groups = qcom_glink_groups; ret = device_add_groups(dev, qcom_glink_groups); diff --git a/drivers/rpmsg/qcom_glink_trace.h b/drivers/rpmsg/qcom_glink_trace.h new file mode 100644 index 00000000000000..668bdea1d78f1d --- /dev/null +++ b/drivers/rpmsg/qcom_glink_trace.h @@ -0,0 +1,406 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qcom_glink + +#if !defined(__QCOM_GLINK_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __QCOM_GLINK_TRACE_H__ + +#include +#include "qcom_glink_native.h" + + +TRACE_EVENT(qcom_glink_cmd_version, + TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx), + TP_ARGS(remote, version, features, tx), + TP_STRUCT__entry( + __string(remote, remote) + __field(u32, version) + __field(u32, features) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __entry->version = version; + __entry->features = features; + __entry->tx = tx; + ), + TP_printk("%s remote: %s version: %u features: %#x", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __entry->version, + __entry->features + ) +); +#define trace_qcom_glink_cmd_version_tx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_version_rx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_version_ack, + TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx), + TP_ARGS(remote, version, features, tx), + TP_STRUCT__entry( + __string(remote, remote) + __field(u32, version) + __field(u32, features) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __entry->version = version; + __entry->features = features; + __entry->tx = tx; + ), + TP_printk("%s remote: %s version: %u features: %#x", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __entry->version, + __entry->features + ) +); +#define trace_qcom_glink_cmd_version_ack_tx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_version_ack_rx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_open, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx), + TP_ARGS(remote, channel, lcid, rcid, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u]", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid + ) +); +#define trace_qcom_glink_cmd_open_tx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_open_rx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_close, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx), + TP_ARGS(remote, channel, lcid, rcid, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u]", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid + ) +); +#define trace_qcom_glink_cmd_close_tx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_close_rx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_open_ack, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx), + TP_ARGS(remote, channel, lcid, rcid, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u]", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid + ) +); +#define trace_qcom_glink_cmd_open_ack_tx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_open_ack_rx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_intent, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t count, size_t size, u32 liid, bool tx), + TP_ARGS(remote, channel, lcid, rcid, count, size, liid, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(u32, count) + __field(u32, size) + __field(u32, liid) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->count = count; + __entry->size = size; + __entry->liid = liid; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] count: %d [size: %d liid: %d]", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->count, + __entry->size, + __entry->liid + ) +); +#define trace_qcom_glink_cmd_intent_tx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_intent_rx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_rx_done, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, bool reuse, bool tx), + TP_ARGS(remote, channel, lcid, rcid, iid, reuse, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(u32, iid) + __field(bool, reuse) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->iid = iid; + __entry->reuse = reuse; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d reuse: %d", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->iid, + __entry->reuse + ) +); +#define trace_qcom_glink_cmd_rx_done_tx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_rx_done_rx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_rx_intent_req, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t size, bool tx), + TP_ARGS(remote, channel, lcid, rcid, size, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(u32, size) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->size = size; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] size: %d", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->size + ) +); +#define trace_qcom_glink_cmd_rx_intent_req_tx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_rx_intent_req_rx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_rx_intent_req_ack, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool granted, bool tx), + TP_ARGS(remote, channel, lcid, rcid, granted, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(bool, granted) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->granted = granted; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] granted: %d", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->granted + ) +); +#define trace_qcom_glink_cmd_rx_intent_req_ack_tx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_rx_intent_req_ack_rx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_tx_data, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, u32 chunk_size, u32 left_size, bool cont, bool tx), + TP_ARGS(remote, channel, lcid, rcid, iid, chunk_size, left_size, cont, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(u32, iid) + __field(u32, chunk_size) + __field(u32, left_size) + __field(bool, cont) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->iid = iid; + __entry->chunk_size = chunk_size; + __entry->left_size = left_size; + __entry->cont = cont; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d chunk_size: %d left_size: %d cont: %d", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->iid, + __entry->chunk_size, + __entry->left_size, + __entry->cont + ) +); +#define trace_qcom_glink_cmd_tx_data_tx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_tx_data_rx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_close_ack, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx), + TP_ARGS(remote, channel, lcid, rcid, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u]", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid + ) +); +#define trace_qcom_glink_cmd_close_ack_tx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_close_ack_rx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_read_notif, + TP_PROTO(const char *remote, bool tx), + TP_ARGS(remote, tx), + TP_STRUCT__entry( + __string(remote, remote) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __entry->tx = tx; + ), + TP_printk("%s remote: %s", + __entry->tx ? "tx" : "rx", + __get_str(remote) + ) +); +#define trace_qcom_glink_cmd_read_notif_tx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_read_notif_rx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, false) + +TRACE_EVENT(qcom_glink_cmd_signal, + TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, unsigned int signals, bool tx), + TP_ARGS(remote, channel, lcid, rcid, signals, tx), + TP_STRUCT__entry( + __string(remote, remote) + __string(channel, channel) + __field(u16, lcid) + __field(u16, rcid) + __field(u32, signals) + __field(bool, tx) + ), + TP_fast_assign( + __assign_str(remote); + __assign_str(channel); + __entry->lcid = lcid; + __entry->rcid = rcid; + __entry->signals = signals; + __entry->tx = tx; + ), + TP_printk("%s remote: %s channel: %s[%u/%u] signals: %#x", + __entry->tx ? "tx" : "rx", + __get_str(remote), + __get_str(channel), + __entry->lcid, + __entry->rcid, + __entry->signals + ) +); +#define trace_qcom_glink_cmd_signal_tx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, true) +#define trace_qcom_glink_cmd_signal_rx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, false) + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE qcom_glink_trace + +#include diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 2a95b05982ad25..66eb1122248b65 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -743,6 +743,16 @@ config RTC_DRV_S5M This driver can also be built as a module. If so, the module will be called rtc-s5m. +config RTC_DRV_SD2405AL + tristate "DFRobot SD2405AL" + select REGMAP_I2C + help + If you say yes here you will get support for the + DFRobot SD2405AL I2C RTC Module. + + This driver can also be built as a module. If so, the module + will be called rtc-sd2405al. + config RTC_DRV_SD3078 tristate "ZXW Shenzhen whwave SD3078" select REGMAP_I2C @@ -1827,6 +1837,17 @@ config RTC_DRV_BBNSM This driver can also be built as a module, if so, the module will be called "rtc-bbnsm". +config RTC_DRV_IMX_BBM_SCMI + depends on IMX_SCMI_BBM_EXT || COMPILE_TEST + default y if ARCH_MXC + tristate "NXP i.MX BBM SCMI RTC support" + help + If you say yes here you get support for the NXP i.MX BBSM SCMI + RTC module. + + To compile this driver as a module, choose M here: the + module will be called rtc-imx-sm-bbm. + config RTC_DRV_IMX_SC depends on IMX_SCU depends on HAVE_ARM_SMCCC @@ -1923,6 +1944,12 @@ config RTC_DRV_STM32 tristate "STM32 RTC" select REGMAP_MMIO depends on ARCH_STM32 || COMPILE_TEST + depends on OF + depends on PINCTRL + select PINMUX + select PINCONF + select GENERIC_PINCONF + depends on COMMON_CLK help If you say yes here you get support for the STM32 On-Chip Real Time Clock. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 3004e372f25f0a..f62340ecc5348d 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -74,6 +74,7 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o obj-$(CONFIG_RTC_DRV_IMX_SC) += rtc-imx-sc.o +obj-$(CONFIG_RTC_DRV_IMX_BBM_SCMI) += rtc-imx-sm-bbm.o obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o obj-$(CONFIG_RTC_DRV_ISL12026) += rtc-isl12026.o obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o @@ -162,6 +163,7 @@ obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o +obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c index 4aad9bb998683b..c4a3ab53dcd4b7 100644 --- a/drivers/rtc/dev.c +++ b/drivers/rtc/dev.c @@ -523,7 +523,6 @@ static int rtc_dev_release(struct inode *inode, struct file *file) static const struct file_operations rtc_dev_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .read = rtc_dev_read, .poll = rtc_dev_poll, .unlocked_ioctl = rtc_dev_ioctl, diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index f93bee96e36233..993c0878fb6606 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c @@ -368,6 +368,7 @@ static int at91_rtc_probe(struct platform_device *pdev) return ret; rtc->gpbr = syscon_node_to_regmap(args.np); + of_node_put(args.np); rtc->gpbr_offset = args.args[0]; if (IS_ERR(rtc->gpbr)) { dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n"); diff --git a/drivers/rtc/rtc-imx-sm-bbm.c b/drivers/rtc/rtc-imx-sm-bbm.c new file mode 100644 index 00000000000000..daa472be7c8069 --- /dev/null +++ b/drivers/rtc/rtc-imx-sm-bbm.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2024 NXP. + */ + +#include +#include +#include +#include +#include +#include + +struct scmi_imx_bbm { + const struct scmi_imx_bbm_proto_ops *ops; + struct rtc_device *rtc_dev; + struct scmi_protocol_handle *ph; + struct notifier_block nb; +}; + +static int scmi_imx_bbm_read_time(struct device *dev, struct rtc_time *tm) +{ + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + struct scmi_protocol_handle *ph = bbnsm->ph; + u64 val; + int ret; + + ret = bbnsm->ops->rtc_time_get(ph, 0, &val); + if (ret) + return ret; + + rtc_time64_to_tm(val, tm); + + return 0; +} + +static int scmi_imx_bbm_set_time(struct device *dev, struct rtc_time *tm) +{ + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + struct scmi_protocol_handle *ph = bbnsm->ph; + u64 val; + + val = rtc_tm_to_time64(tm); + + return bbnsm->ops->rtc_time_set(ph, 0, val); +} + +static int scmi_imx_bbm_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + struct scmi_protocol_handle *ph = bbnsm->ph; + + /* scmi_imx_bbm_set_alarm enables the irq, just handle disable here */ + if (!enable) + return bbnsm->ops->rtc_alarm_set(ph, 0, false, 0); + + return 0; +} + +static int scmi_imx_bbm_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + struct scmi_protocol_handle *ph = bbnsm->ph; + struct rtc_time *alrm_tm = &alrm->time; + u64 val; + + val = rtc_tm_to_time64(alrm_tm); + + return bbnsm->ops->rtc_alarm_set(ph, 0, true, val); +} + +static const struct rtc_class_ops smci_imx_bbm_rtc_ops = { + .read_time = scmi_imx_bbm_read_time, + .set_time = scmi_imx_bbm_set_time, + .set_alarm = scmi_imx_bbm_set_alarm, + .alarm_irq_enable = scmi_imx_bbm_alarm_irq_enable, +}; + +static int scmi_imx_bbm_rtc_notifier(struct notifier_block *nb, unsigned long event, void *data) +{ + struct scmi_imx_bbm *bbnsm = container_of(nb, struct scmi_imx_bbm, nb); + struct scmi_imx_bbm_notif_report *r = data; + + if (r->is_rtc) + rtc_update_irq(bbnsm->rtc_dev, 1, RTC_AF | RTC_IRQF); + else + pr_err("Unexpected bbm event: %s\n", __func__); + + return 0; +} + +static int scmi_imx_bbm_rtc_init(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device *dev = &sdev->dev; + struct scmi_imx_bbm *bbnsm = dev_get_drvdata(dev); + int ret; + + bbnsm->rtc_dev = devm_rtc_allocate_device(dev); + if (IS_ERR(bbnsm->rtc_dev)) + return PTR_ERR(bbnsm->rtc_dev); + + bbnsm->rtc_dev->ops = &smci_imx_bbm_rtc_ops; + bbnsm->rtc_dev->range_max = U32_MAX; + + bbnsm->nb.notifier_call = &scmi_imx_bbm_rtc_notifier; + ret = handle->notify_ops->devm_event_notifier_register(sdev, SCMI_PROTOCOL_IMX_BBM, + SCMI_EVENT_IMX_BBM_RTC, + NULL, &bbnsm->nb); + if (ret) + return ret; + + return devm_rtc_register_device(bbnsm->rtc_dev); +} + +static int scmi_imx_bbm_rtc_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + struct device *dev = &sdev->dev; + struct scmi_protocol_handle *ph; + struct scmi_imx_bbm *bbnsm; + int ret; + + if (!handle) + return -ENODEV; + + bbnsm = devm_kzalloc(dev, sizeof(*bbnsm), GFP_KERNEL); + if (!bbnsm) + return -ENOMEM; + + bbnsm->ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_BBM, &ph); + if (IS_ERR(bbnsm->ops)) + return PTR_ERR(bbnsm->ops); + + bbnsm->ph = ph; + + device_init_wakeup(dev, true); + + dev_set_drvdata(dev, bbnsm); + + ret = scmi_imx_bbm_rtc_init(sdev); + if (ret) + device_init_wakeup(dev, false); + + return ret; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_BBM, "imx-bbm-rtc" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_bbm_rtc_driver = { + .name = "scmi-imx-bbm-rtc", + .probe = scmi_imx_bbm_rtc_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_bbm_rtc_driver); + +MODULE_AUTHOR("Peng Fan "); +MODULE_DESCRIPTION("IMX SM BBM RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 0013bff0447d5b..1f58ae8b151e94 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -850,7 +850,6 @@ static const struct file_operations wdt_fops = { .write = wdt_write, .open = wdt_open, .release = wdt_release, - .llseek = no_llseek, }; static struct miscdevice wdt_dev = { diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index f0f6b9b6daecbf..5d30ce8e13ca03 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c @@ -132,7 +132,7 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm) M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH); M48T59_WRITE(bin2bcd(year % 100), M48T59_YEAR); - if (pdata->type == M48T59RTC_TYPE_M48T59 && (year / 100)) + if (pdata->type == M48T59RTC_TYPE_M48T59 && (year >= 100)) val = (M48T59_WDAY_CEB | M48T59_WDAY_CB); val |= (bin2bcd(tm->tm_wday) & 0x07); M48T59_WRITE(val, M48T59_WDAY); @@ -458,6 +458,8 @@ static int m48t59_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, m48t59); m48t59->rtc->ops = &m48t59_rtc_ops; + m48t59->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900; + m48t59->rtc->range_max = RTC_TIMESTAMP_END_2099; nvmem_cfg.size = pdata->offset; ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg); diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c index 9a456f537d3b9e..3fbcf5f6b92ffd 100644 --- a/drivers/rtc/rtc-max31335.c +++ b/drivers/rtc/rtc-max31335.c @@ -8,7 +8,7 @@ * */ -#include +#include #include #include #include diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index f6b779c12ca724..c32fba550c8e0a 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -17,7 +17,7 @@ #include #include -#include +#include /* RTC_CTRL register bit fields */ #define PM8xxx_RTC_ENABLE BIT(7) diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c index e73102a39f1b88..711f62eecd798b 100644 --- a/drivers/rtc/rtc-rc5t619.c +++ b/drivers/rtc/rtc-rc5t619.c @@ -429,14 +429,23 @@ static int rc5t619_rtc_probe(struct platform_device *pdev) return devm_rtc_register_device(rtc->rtc); } +static const struct platform_device_id rc5t619_rtc_id[] = { + { + .name = "rc5t619-rtc", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, rc5t619_rtc_id); + static struct platform_driver rc5t619_rtc_driver = { .driver = { .name = "rc5t619-rtc", }, .probe = rc5t619_rtc_probe, + .id_table = rc5t619_rtc_id, }; - module_platform_driver(rc5t619_rtc_driver); -MODULE_ALIAS("platform:rc5t619-rtc"); + MODULE_DESCRIPTION("RICOH RC5T619 RTC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 2d6b655a4b25b1..e3dc18882f4144 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -56,7 +56,6 @@ static const struct i2c_device_id s35390a_id[] = { MODULE_DEVICE_TABLE(i2c, s35390a_id); static const __maybe_unused struct of_device_id s35390a_of_match[] = { - { .compatible = "s35390a" }, { .compatible = "sii,s35390a" }, { } }; diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c new file mode 100644 index 00000000000000..d2568c3e387659 --- /dev/null +++ b/drivers/rtc/rtc-sd2405al.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * RTC driver for the SD2405AL Real-Time Clock + * + * Datasheet: + * https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf + * + * Copyright (C) 2024 Tóth János + */ + +#include +#include +#include +#include + +/* Real time clock registers */ +#define SD2405AL_REG_T_SEC 0x00 +#define SD2405AL_REG_T_MIN 0x01 +#define SD2405AL_REG_T_HOUR 0x02 +# define SD2405AL_BIT_12H_PM BIT(5) +# define SD2405AL_BIT_24H BIT(7) +#define SD2405AL_REG_T_WEEK 0x03 +#define SD2405AL_REG_T_DAY 0x04 +#define SD2405AL_REG_T_MON 0x05 +#define SD2405AL_REG_T_YEAR 0x06 + +#define SD2405AL_NUM_T_REGS (SD2405AL_REG_T_YEAR - SD2405AL_REG_T_SEC + 1) + +/* Control registers */ +#define SD2405AL_REG_CTR1 0x0F +# define SD2405AL_BIT_WRTC2 BIT(2) +# define SD2405AL_BIT_WRTC3 BIT(7) +#define SD2405AL_REG_CTR2 0x10 +# define SD2405AL_BIT_WRTC1 BIT(7) +#define SD2405AL_REG_CTR3 0x11 +#define SD2405AL_REG_TTF 0x12 +#define SD2405AL_REG_CNTDWN 0x13 + +/* General RAM */ +#define SD2405AL_REG_M_START 0x14 +#define SD2405AL_REG_M_END 0x1F + +struct sd2405al { + struct device *dev; + struct rtc_device *rtc; + struct regmap *regmap; +}; + +static int sd2405al_enable_reg_write(struct sd2405al *sd2405al) +{ + int ret; + + /* order of writes is important */ + ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2, + SD2405AL_BIT_WRTC1, SD2405AL_BIT_WRTC1); + if (ret < 0) + return ret; + + ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1, + SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3, + SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3); + if (ret < 0) + return ret; + + return 0; +} + +static int sd2405al_disable_reg_write(struct sd2405al *sd2405al) +{ + int ret; + + /* order of writes is important */ + ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1, + SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3, 0x00); + if (ret < 0) + return ret; + + ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2, + SD2405AL_BIT_WRTC1, 0x00); + if (ret < 0) + return ret; + + return 0; +} + +static int sd2405al_read_time(struct device *dev, struct rtc_time *time) +{ + u8 data[SD2405AL_NUM_T_REGS] = { 0 }; + struct sd2405al *sd2405al = dev_get_drvdata(dev); + int ret; + + ret = regmap_bulk_read(sd2405al->regmap, SD2405AL_REG_T_SEC, data, + SD2405AL_NUM_T_REGS); + if (ret < 0) + return ret; + + time->tm_sec = bcd2bin(data[SD2405AL_REG_T_SEC] & 0x7F); + time->tm_min = bcd2bin(data[SD2405AL_REG_T_MIN] & 0x7F); + + if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_24H) + time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] & 0x3F); + else + if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_12H_PM) + time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] + & 0x1F) + 12; + else /* 12 hour mode, AM */ + time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] + & 0x1F); + + time->tm_wday = bcd2bin(data[SD2405AL_REG_T_WEEK] & 0x07); + time->tm_mday = bcd2bin(data[SD2405AL_REG_T_DAY] & 0x3F); + time->tm_mon = bcd2bin(data[SD2405AL_REG_T_MON] & 0x1F) - 1; + time->tm_year = bcd2bin(data[SD2405AL_REG_T_YEAR]) + 100; + + dev_dbg(sd2405al->dev, "read time: %ptR (%d)\n", time, time->tm_wday); + + return 0; +} + +static int sd2405al_set_time(struct device *dev, struct rtc_time *time) +{ + u8 data[SD2405AL_NUM_T_REGS]; + struct sd2405al *sd2405al = dev_get_drvdata(dev); + int ret; + + data[SD2405AL_REG_T_SEC] = bin2bcd(time->tm_sec); + data[SD2405AL_REG_T_MIN] = bin2bcd(time->tm_min); + data[SD2405AL_REG_T_HOUR] = bin2bcd(time->tm_hour) | SD2405AL_BIT_24H; + data[SD2405AL_REG_T_DAY] = bin2bcd(time->tm_mday); + data[SD2405AL_REG_T_WEEK] = bin2bcd(time->tm_wday); + data[SD2405AL_REG_T_MON] = bin2bcd(time->tm_mon) + 1; + data[SD2405AL_REG_T_YEAR] = bin2bcd(time->tm_year - 100); + + ret = sd2405al_enable_reg_write(sd2405al); + if (ret < 0) + return ret; + + ret = regmap_bulk_write(sd2405al->regmap, SD2405AL_REG_T_SEC, data, + SD2405AL_NUM_T_REGS); + if (ret < 0) + return ret; + + ret = regmap_write(sd2405al->regmap, SD2405AL_REG_TTF, 0x00); + if (ret < 0) + return ret; + + ret = sd2405al_disable_reg_write(sd2405al); + if (ret < 0) + return ret; + + dev_dbg(sd2405al->dev, "set time: %ptR (%d)\n", time, time->tm_wday); + + return 0; +} + +static const struct rtc_class_ops sd2405al_rtc_ops = { + .read_time = sd2405al_read_time, + .set_time = sd2405al_set_time, +}; + +static const struct regmap_config sd2405al_regmap_conf = { + .reg_bits = 8, + .val_bits = 8, + .max_register = SD2405AL_REG_M_END, +}; + +static int sd2405al_probe(struct i2c_client *client) +{ + struct sd2405al *sd2405al; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + sd2405al = devm_kzalloc(&client->dev, sizeof(*sd2405al), GFP_KERNEL); + if (!sd2405al) + return -ENOMEM; + + sd2405al->dev = &client->dev; + + sd2405al->regmap = devm_regmap_init_i2c(client, &sd2405al_regmap_conf); + if (IS_ERR(sd2405al->regmap)) + return PTR_ERR(sd2405al->regmap); + + sd2405al->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(sd2405al->rtc)) + return PTR_ERR(sd2405al->rtc); + + sd2405al->rtc->ops = &sd2405al_rtc_ops; + sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099; + + dev_set_drvdata(&client->dev, sd2405al); + + ret = devm_rtc_register_device(sd2405al->rtc); + if (ret < 0) + return ret; + + return 0; +} + +static const struct i2c_device_id sd2405al_id[] = { + { "sd2405al" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, sd2405al_id); + +static const __maybe_unused struct of_device_id sd2405al_of_match[] = { + { .compatible = "dfrobot,sd2405al" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sd2405al_of_match); + +static struct i2c_driver sd2405al_driver = { + .driver = { + .name = "sd2405al", + .of_match_table = of_match_ptr(sd2405al_of_match), + }, + .probe = sd2405al_probe, + .id_table = sd2405al_id, +}; + +module_i2c_driver(sd2405al_driver); + +MODULE_AUTHOR("Tóth János "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SD2405AL RTC driver"); diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c index 98b07969609d29..3e4f2ee22b0bc8 100644 --- a/drivers/rtc/rtc-stm32.c +++ b/drivers/rtc/rtc-stm32.c @@ -7,12 +7,16 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include +#include #include #include #include @@ -42,6 +46,12 @@ #define STM32_RTC_CR_FMT BIT(6) #define STM32_RTC_CR_ALRAE BIT(8) #define STM32_RTC_CR_ALRAIE BIT(12) +#define STM32_RTC_CR_OSEL GENMASK(22, 21) +#define STM32_RTC_CR_OSEL_ALARM_A FIELD_PREP(STM32_RTC_CR_OSEL, 0x01) +#define STM32_RTC_CR_COE BIT(23) +#define STM32_RTC_CR_TAMPOE BIT(26) +#define STM32_RTC_CR_TAMPALRM_TYPE BIT(30) +#define STM32_RTC_CR_OUT2EN BIT(31) /* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */ #define STM32_RTC_ISR_ALRAWF BIT(0) @@ -78,6 +88,12 @@ /* STM32_RTC_SR/_SCR bit fields */ #define STM32_RTC_SR_ALRA BIT(0) +/* STM32_RTC_CFGR bit fields */ +#define STM32_RTC_CFGR_OUT2_RMP BIT(0) +#define STM32_RTC_CFGR_LSCOEN GENMASK(2, 1) +#define STM32_RTC_CFGR_LSCOEN_OUT1 1 +#define STM32_RTC_CFGR_LSCOEN_OUT2_RMP 2 + /* STM32_RTC_VERR bit fields */ #define STM32_RTC_VERR_MINREV_SHIFT 0 #define STM32_RTC_VERR_MINREV GENMASK(3, 0) @@ -107,6 +123,14 @@ /* STM32 RTC driver time helpers */ #define SEC_PER_DAY (24 * 60 * 60) +/* STM32 RTC pinctrl helpers */ +#define STM32_RTC_PINMUX(_name, _action, ...) { \ + .name = (_name), \ + .action = (_action), \ + .groups = ((const char *[]){ __VA_ARGS__ }), \ + .num_groups = ARRAY_SIZE(((const char *[]){ __VA_ARGS__ })), \ +} + struct stm32_rtc; struct stm32_rtc_registers { @@ -119,6 +143,7 @@ struct stm32_rtc_registers { u16 wpr; u16 sr; u16 scr; + u16 cfgr; u16 verr; }; @@ -134,6 +159,8 @@ struct stm32_rtc_data { bool need_dbp; bool need_accuracy; bool rif_protected; + bool has_lsco; + bool has_alarm_out; }; struct stm32_rtc { @@ -146,6 +173,7 @@ struct stm32_rtc { struct clk *rtc_ck; const struct stm32_rtc_data *data; int irq_alarm; + struct clk *clk_lsco; }; struct stm32_rtc_rif_resource { @@ -171,6 +199,209 @@ static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc) writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + regs->wpr); } +enum stm32_rtc_pin_name { + NONE, + OUT1, + OUT2, + OUT2_RMP +}; + +static const struct pinctrl_pin_desc stm32_rtc_pinctrl_pins[] = { + PINCTRL_PIN(OUT1, "out1"), + PINCTRL_PIN(OUT2, "out2"), + PINCTRL_PIN(OUT2_RMP, "out2_rmp"), +}; + +static int stm32_rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(stm32_rtc_pinctrl_pins); +} + +static const char *stm32_rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return stm32_rtc_pinctrl_pins[selector].name; +} + +static int stm32_rtc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + *pins = &stm32_rtc_pinctrl_pins[selector].number; + *num_pins = 1; + return 0; +} + +static const struct pinctrl_ops stm32_rtc_pinctrl_ops = { + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, + .get_groups_count = stm32_rtc_pinctrl_get_groups_count, + .get_group_name = stm32_rtc_pinctrl_get_group_name, + .get_group_pins = stm32_rtc_pinctrl_get_group_pins, +}; + +struct stm32_rtc_pinmux_func { + const char *name; + const char * const *groups; + const unsigned int num_groups; + int (*action)(struct pinctrl_dev *pctl_dev, unsigned int pin); +}; + +static int stm32_rtc_pinmux_action_alarm(struct pinctrl_dev *pctldev, unsigned int pin) +{ + struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + struct stm32_rtc_registers regs = rtc->data->regs; + unsigned int cr = readl_relaxed(rtc->base + regs.cr); + unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr); + + if (!rtc->data->has_alarm_out) + return -EPERM; + + cr &= ~STM32_RTC_CR_OSEL; + cr |= STM32_RTC_CR_OSEL_ALARM_A; + cr &= ~STM32_RTC_CR_TAMPOE; + cr &= ~STM32_RTC_CR_COE; + cr &= ~STM32_RTC_CR_TAMPALRM_TYPE; + + switch (pin) { + case OUT1: + cr &= ~STM32_RTC_CR_OUT2EN; + cfgr &= ~STM32_RTC_CFGR_OUT2_RMP; + break; + case OUT2: + cr |= STM32_RTC_CR_OUT2EN; + cfgr &= ~STM32_RTC_CFGR_OUT2_RMP; + break; + case OUT2_RMP: + cr |= STM32_RTC_CR_OUT2EN; + cfgr |= STM32_RTC_CFGR_OUT2_RMP; + break; + default: + return -EINVAL; + } + + stm32_rtc_wpr_unlock(rtc); + writel_relaxed(cr, rtc->base + regs.cr); + writel_relaxed(cfgr, rtc->base + regs.cfgr); + stm32_rtc_wpr_lock(rtc); + + return 0; +} + +static int stm32_rtc_pinmux_lsco_available(struct pinctrl_dev *pctldev, unsigned int pin) +{ + struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + struct stm32_rtc_registers regs = rtc->data->regs; + unsigned int cr = readl_relaxed(rtc->base + regs.cr); + unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr); + unsigned int calib = STM32_RTC_CR_COE; + unsigned int tampalrm = STM32_RTC_CR_TAMPOE | STM32_RTC_CR_OSEL; + + switch (pin) { + case OUT1: + if ((!(cr & STM32_RTC_CR_OUT2EN) && + ((cr & calib) || cr & tampalrm)) || + ((cr & calib) && (cr & tampalrm))) + return -EBUSY; + break; + case OUT2_RMP: + if ((cr & STM32_RTC_CR_OUT2EN) && + (cfgr & STM32_RTC_CFGR_OUT2_RMP) && + ((cr & calib) || (cr & tampalrm))) + return -EBUSY; + break; + default: + return -EINVAL; + } + + if (clk_get_rate(rtc->rtc_ck) != 32768) + return -ERANGE; + + return 0; +} + +static int stm32_rtc_pinmux_action_lsco(struct pinctrl_dev *pctldev, unsigned int pin) +{ + struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev); + struct stm32_rtc_registers regs = rtc->data->regs; + struct device *dev = rtc->rtc_dev->dev.parent; + u8 lscoen; + int ret; + + if (!rtc->data->has_lsco) + return -EPERM; + + ret = stm32_rtc_pinmux_lsco_available(pctldev, pin); + if (ret) + return ret; + + lscoen = (pin == OUT1) ? STM32_RTC_CFGR_LSCOEN_OUT1 : STM32_RTC_CFGR_LSCOEN_OUT2_RMP; + + rtc->clk_lsco = clk_register_gate(dev, "rtc_lsco", __clk_get_name(rtc->rtc_ck), + CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, + rtc->base + regs.cfgr, lscoen, 0, NULL); + if (IS_ERR(rtc->clk_lsco)) + return PTR_ERR(rtc->clk_lsco); + + of_clk_add_provider(dev->of_node, of_clk_src_simple_get, rtc->clk_lsco); + + return 0; +} + +static const struct stm32_rtc_pinmux_func stm32_rtc_pinmux_functions[] = { + STM32_RTC_PINMUX("lsco", &stm32_rtc_pinmux_action_lsco, "out1", "out2_rmp"), + STM32_RTC_PINMUX("alarm-a", &stm32_rtc_pinmux_action_alarm, "out1", "out2", "out2_rmp"), +}; + +static int stm32_rtc_pinmux_get_functions_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(stm32_rtc_pinmux_functions); +} + +static const char *stm32_rtc_pinmux_get_fname(struct pinctrl_dev *pctldev, unsigned int selector) +{ + return stm32_rtc_pinmux_functions[selector].name; +} + +static int stm32_rtc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned int selector, + const char * const **groups, unsigned int * const num_groups) +{ + *groups = stm32_rtc_pinmux_functions[selector].groups; + *num_groups = stm32_rtc_pinmux_functions[selector].num_groups; + return 0; +} + +static int stm32_rtc_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector, + unsigned int group) +{ + struct stm32_rtc_pinmux_func selected_func = stm32_rtc_pinmux_functions[selector]; + struct pinctrl_pin_desc pin = stm32_rtc_pinctrl_pins[group]; + + /* Call action */ + if (selected_func.action) + return selected_func.action(pctldev, pin.number); + + return -EINVAL; +} + +static const struct pinmux_ops stm32_rtc_pinmux_ops = { + .get_functions_count = stm32_rtc_pinmux_get_functions_count, + .get_function_name = stm32_rtc_pinmux_get_fname, + .get_function_groups = stm32_rtc_pinmux_get_groups, + .set_mux = stm32_rtc_pinmux_set_mux, + .strict = true, +}; + +static struct pinctrl_desc stm32_rtc_pdesc = { + .name = DRIVER_NAME, + .pins = stm32_rtc_pinctrl_pins, + .npins = ARRAY_SIZE(stm32_rtc_pinctrl_pins), + .owner = THIS_MODULE, + .pctlops = &stm32_rtc_pinctrl_ops, + .pmxops = &stm32_rtc_pinmux_ops, +}; + static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc) { const struct stm32_rtc_registers *regs = &rtc->data->regs; @@ -576,6 +807,8 @@ static const struct stm32_rtc_data stm32_rtc_data = { .need_dbp = true, .need_accuracy = false, .rif_protected = false, + .has_lsco = false, + .has_alarm_out = false, .regs = { .tr = 0x00, .dr = 0x04, @@ -586,6 +819,7 @@ static const struct stm32_rtc_data stm32_rtc_data = { .wpr = 0x24, .sr = 0x0C, /* set to ISR offset to ease alarm management */ .scr = UNDEF_REG, + .cfgr = UNDEF_REG, .verr = UNDEF_REG, }, .events = { @@ -599,6 +833,8 @@ static const struct stm32_rtc_data stm32h7_rtc_data = { .need_dbp = true, .need_accuracy = false, .rif_protected = false, + .has_lsco = false, + .has_alarm_out = false, .regs = { .tr = 0x00, .dr = 0x04, @@ -609,6 +845,7 @@ static const struct stm32_rtc_data stm32h7_rtc_data = { .wpr = 0x24, .sr = 0x0C, /* set to ISR offset to ease alarm management */ .scr = UNDEF_REG, + .cfgr = UNDEF_REG, .verr = UNDEF_REG, }, .events = { @@ -631,6 +868,8 @@ static const struct stm32_rtc_data stm32mp1_data = { .need_dbp = false, .need_accuracy = true, .rif_protected = false, + .has_lsco = true, + .has_alarm_out = true, .regs = { .tr = 0x00, .dr = 0x04, @@ -641,6 +880,7 @@ static const struct stm32_rtc_data stm32mp1_data = { .wpr = 0x24, .sr = 0x50, .scr = 0x5C, + .cfgr = 0x60, .verr = 0x3F4, }, .events = { @@ -654,6 +894,8 @@ static const struct stm32_rtc_data stm32mp25_data = { .need_dbp = false, .need_accuracy = true, .rif_protected = true, + .has_lsco = true, + .has_alarm_out = true, .regs = { .tr = 0x00, .dr = 0x04, @@ -664,6 +906,7 @@ static const struct stm32_rtc_data stm32mp25_data = { .wpr = 0x24, .sr = 0x50, .scr = 0x5C, + .cfgr = 0x60, .verr = 0x3F4, }, .events = { @@ -681,6 +924,30 @@ static const struct of_device_id stm32_rtc_of_match[] = { }; MODULE_DEVICE_TABLE(of, stm32_rtc_of_match); +static void stm32_rtc_clean_outs(struct stm32_rtc *rtc) +{ + struct stm32_rtc_registers regs = rtc->data->regs; + unsigned int cr = readl_relaxed(rtc->base + regs.cr); + + cr &= ~STM32_RTC_CR_OSEL; + cr &= ~STM32_RTC_CR_TAMPOE; + cr &= ~STM32_RTC_CR_COE; + cr &= ~STM32_RTC_CR_TAMPALRM_TYPE; + cr &= ~STM32_RTC_CR_OUT2EN; + + stm32_rtc_wpr_unlock(rtc); + writel_relaxed(cr, rtc->base + regs.cr); + stm32_rtc_wpr_lock(rtc); + + if (regs.cfgr != UNDEF_REG) { + unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr); + + cfgr &= ~STM32_RTC_CFGR_LSCOEN; + cfgr &= ~STM32_RTC_CFGR_OUT2_RMP; + writel_relaxed(cfgr, rtc->base + regs.cfgr); + } +} + static int stm32_rtc_check_rif(struct stm32_rtc *stm32_rtc, struct stm32_rtc_rif_resource res) { @@ -791,6 +1058,7 @@ static int stm32_rtc_probe(struct platform_device *pdev) { struct stm32_rtc *rtc; const struct stm32_rtc_registers *regs; + struct pinctrl_dev *pctl; int ret; rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); @@ -912,6 +1180,16 @@ static int stm32_rtc_probe(struct platform_device *pdev) goto err; } + stm32_rtc_clean_outs(rtc); + + ret = devm_pinctrl_register_and_init(&pdev->dev, &stm32_rtc_pdesc, rtc, &pctl); + if (ret) + return dev_err_probe(&pdev->dev, ret, "pinctrl register failed"); + + ret = pinctrl_enable(pctl); + if (ret) + return dev_err_probe(&pdev->dev, ret, "pinctrl enable failed"); + /* * If INITS flag is reset (calendar year field set to 0x00), calendar * must be initialized @@ -950,6 +1228,9 @@ static void stm32_rtc_remove(struct platform_device *pdev) const struct stm32_rtc_registers *regs = &rtc->data->regs; unsigned int cr; + if (!IS_ERR_OR_NULL(rtc->clk_lsco)) + clk_unregister_gate(rtc->clk_lsco); + /* Disable interrupts */ stm32_rtc_wpr_unlock(rtc); cr = readl_relaxed(rtc->base + regs->cr); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 8e0c669061036c..e681c1745866e7 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -402,6 +402,7 @@ CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { .rc_osc_rate = 32000, .has_out_clk = 1, + .has_auto_swt = 1, }; static void __init sun8i_v3_rtc_clk_init(struct device_node *node) diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 2cfacdd37e0949..4e24c12004f165 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -591,8 +591,8 @@ static int twl_rtc_probe(struct platform_device *pdev) memset(&nvmem_cfg, 0, sizeof(nvmem_cfg)); nvmem_cfg.name = "twl-secured-"; nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED; - nvmem_cfg.reg_read = twl_nvram_read, - nvmem_cfg.reg_write = twl_nvram_write, + nvmem_cfg.reg_read = twl_nvram_read; + nvmem_cfg.reg_write = twl_nvram_write; nvmem_cfg.word_size = 1; nvmem_cfg.stride = 1; if (twl_class_is_4030()) { diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 61515781c5dd7f..cfe7efd5b5da97 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -515,7 +515,6 @@ static const struct file_operations fs3270_fops = { .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ - .llseek = no_llseek, }; static void fs3270_create_cb(int minor) diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c index 8d50c894711f8b..e069dd6858995e 100644 --- a/drivers/s390/char/hmcdrv_dev.c +++ b/drivers/s390/char/hmcdrv_dev.c @@ -186,9 +186,6 @@ static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence) if (pos < 0) return -EINVAL; - if (fp->f_pos != pos) - ++fp->f_version; - fp->f_pos = pos; return pos; } diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c index 248b5db3eaa8c0..dd6051602070b4 100644 --- a/drivers/s390/char/sclp_ctl.c +++ b/drivers/s390/char/sclp_ctl.c @@ -115,7 +115,6 @@ static const struct file_operations sclp_ctl_fops = { .open = nonseekable_open, .unlocked_ioctl = sclp_ctl_ioctl, .compat_ioctl = sclp_ctl_ioctl, - .llseek = no_llseek, }; /* diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 07df04af82f2ee..29156455970e99 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -44,6 +44,7 @@ static void __init sclp_early_facilities_detect(void) sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); + sclp.has_wti = !!(sccb->fac119 & 0x40); sclp.has_kss = !!(sccb->fac98 & 0x01); sclp.has_aisii = !!(sccb->fac118 & 0x40); sclp.has_aeni = !!(sccb->fac118 & 0x20); diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index cc8237afeffa0d..89778d922d9f02 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -52,7 +52,6 @@ static const struct file_operations tape_fops = #endif .open = tapechar_open, .release = tapechar_release, - .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c index 42c9f77f8da0a0..f598edc5f25192 100644 --- a/drivers/s390/char/uvdevice.c +++ b/drivers/s390/char/uvdevice.c @@ -448,7 +448,6 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static const struct file_operations uvio_dev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = uvio_ioctl, - .llseek = no_llseek, }; static struct miscdevice uvio_dev_miscdev = { diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index eb0520a9d4af5a..c6d58335beb476 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -242,7 +242,6 @@ static const struct file_operations vmcp_fops = { .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, - .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index c09e1e09fb66aa..bd5cecc44123ff 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -96,7 +96,6 @@ static const struct file_operations vmlogrdr_fops = { .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, - .llseek = no_llseek, }; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 0969fa01df58b0..33cebb91b93382 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -165,7 +165,6 @@ static const struct file_operations zcore_reipl_fops = { .write = zcore_reipl_write, .open = zcore_reipl_open, .release = zcore_reipl_release, - .llseek = no_llseek, }; static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, @@ -200,7 +199,6 @@ static const struct file_operations zcore_hsa_fops = { .write = zcore_hsa_write, .read = zcore_hsa_read, .open = nonseekable_open, - .llseek = no_llseek, }; static int __init check_sdias(void) diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index e6c800653f9880..1e58ee3cc87db1 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -924,7 +924,6 @@ static const struct file_operations chsc_fops = { .release = chsc_release, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, - .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 53b68f8c32f3bc..7b59d20bf7850e 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1332,7 +1332,6 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, static const struct proc_ops cio_settle_proc_ops = { .proc_open = nonseekable_open, .proc_write = cio_settle_write, - .proc_lseek = no_llseek, }; static int __init cio_settle_init(void) diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index bd94811fd9f1f8..c88b6e07184708 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -13,10 +13,22 @@ obj-$(CONFIG_ZCRYPT) += zcrypt.o # adapter drivers depend on ap.o and zcrypt.o obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o -# pkey kernel module -pkey-objs := pkey_api.o +# pkey base and api module +pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o obj-$(CONFIG_PKEY) += pkey.o +# pkey cca handler module +pkey-cca-objs := pkey_cca.o +obj-$(CONFIG_PKEY_CCA) += pkey-cca.o + +# pkey ep11 handler module +pkey-ep11-objs := pkey_ep11.o +obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o + +# pkey pckmo handler module +pkey-pckmo-objs := pkey_pckmo.o +obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o + # adjunct processor matrix vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o obj-$(CONFIG_VFIO_AP) += vfio_ap.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f9f682f194154d..60cea6c243497b 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -107,6 +107,7 @@ debug_info_t *ap_dbf_info; static bool ap_scan_bus(void); static bool ap_scan_bus_result; /* result of last ap_scan_bus() */ static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */ +static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */ static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */ static int ap_scan_bus_time = AP_CONFIG_TIME; static struct timer_list ap_scan_bus_timer; @@ -733,7 +734,7 @@ static void ap_check_bindings_complete(void) if (!completion_done(&ap_apqn_bindings_complete)) { complete_all(&ap_apqn_bindings_complete); ap_send_bindings_complete_uevent(); - pr_debug("%s all apqn bindings complete\n", __func__); + pr_debug("all apqn bindings complete\n"); } } } @@ -768,7 +769,7 @@ int ap_wait_apqn_bindings_complete(unsigned long timeout) else if (l == 0 && timeout) rc = -ETIME; - pr_debug("%s rc=%d\n", __func__, rc); + pr_debug("rc=%d\n", rc); return rc; } EXPORT_SYMBOL(ap_wait_apqn_bindings_complete); @@ -795,8 +796,7 @@ static int __ap_revise_reserved(struct device *dev, void *dummy) drvres = to_ap_drv(dev->driver)->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) { - pr_debug("%s reprobing queue=%02x.%04x\n", - __func__, card, queue); + pr_debug("reprobing queue=%02x.%04x\n", card, queue); rc = device_reprobe(dev); if (rc) AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", @@ -1000,17 +1000,31 @@ bool ap_bus_force_rescan(void) unsigned long scan_counter = atomic64_read(&ap_scan_bus_count); bool rc = false; - pr_debug(">%s scan counter=%lu\n", __func__, scan_counter); + pr_debug("> scan counter=%lu\n", scan_counter); /* Only trigger AP bus scans after the initial scan is done */ if (scan_counter <= 0) goto out; + /* + * There is one unlikely but nevertheless valid scenario where the + * thread holding the mutex may try to send some crypto load but + * all cards are offline so a rescan is triggered which causes + * a recursive call of ap_bus_force_rescan(). A simple return if + * the mutex is already locked by this thread solves this. + */ + if (mutex_is_locked(&ap_scan_bus_mutex)) { + if (ap_scan_bus_task == current) + goto out; + } + /* Try to acquire the AP scan bus mutex */ if (mutex_trylock(&ap_scan_bus_mutex)) { /* mutex acquired, run the AP bus scan */ + ap_scan_bus_task = current; ap_scan_bus_result = ap_scan_bus(); rc = ap_scan_bus_result; + ap_scan_bus_task = NULL; mutex_unlock(&ap_scan_bus_mutex); goto out; } @@ -1029,7 +1043,7 @@ bool ap_bus_force_rescan(void) mutex_unlock(&ap_scan_bus_mutex); out: - pr_debug("%s rc=%d\n", __func__, rc); + pr_debug("rc=%d\n", rc); return rc; } EXPORT_SYMBOL(ap_bus_force_rescan); @@ -1043,7 +1057,7 @@ static int ap_bus_cfg_chg(struct notifier_block *nb, if (action != CHSC_NOTIFY_AP_CFG) return NOTIFY_DONE; - pr_debug("%s config change, forcing bus rescan\n", __func__); + pr_debug("config change, forcing bus rescan\n"); ap_bus_force_rescan(); @@ -1900,8 +1914,8 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED; } spin_unlock_bh(&aq->lock); - pr_debug("%s(%d,%d) queue dev checkstop on\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev checkstop on\n", + ac->id, dom); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); goto put_dev_and_continue; @@ -1911,8 +1925,8 @@ static inline void ap_scan_domains(struct ap_card *ac) if (aq->dev_state > AP_DEV_STATE_UNINITIATED) _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - pr_debug("%s(%d,%d) queue dev checkstop off\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev checkstop off\n", + ac->id, dom); goto put_dev_and_continue; } /* config state change */ @@ -1924,8 +1938,8 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_DECONFIGURED; } spin_unlock_bh(&aq->lock); - pr_debug("%s(%d,%d) queue dev config off\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev config off\n", + ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); @@ -1936,8 +1950,8 @@ static inline void ap_scan_domains(struct ap_card *ac) if (aq->dev_state > AP_DEV_STATE_UNINITIATED) _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - pr_debug("%s(%d,%d) queue dev config on\n", - __func__, ac->id, dom); + pr_debug("(%d,%d) queue dev config on\n", + ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); goto put_dev_and_continue; } @@ -2009,8 +2023,8 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - pr_debug("%s(%d) no type info (no APQN found), ignored\n", - __func__, ap); + pr_debug("(%d) no type info (no APQN found), ignored\n", + ap); } return; } @@ -2022,8 +2036,7 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - pr_debug("%s(%d) no valid type (0) info, ignored\n", - __func__, ap); + pr_debug("(%d) no valid type (0) info, ignored\n", ap); } return; } @@ -2202,7 +2215,7 @@ static bool ap_scan_bus(void) bool config_changed; int ap; - pr_debug(">%s\n", __func__); + pr_debug(">\n"); /* (re-)fetch configuration via QCI */ config_changed = ap_get_configuration(); @@ -2243,7 +2256,7 @@ static bool ap_scan_bus(void) } if (atomic64_inc_return(&ap_scan_bus_count) == 1) { - pr_debug("%s init scan complete\n", __func__); + pr_debug("init scan complete\n"); ap_send_init_scan_done_uevent(); } @@ -2251,7 +2264,7 @@ static bool ap_scan_bus(void) mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); - pr_debug("<%s config_changed=%d\n", __func__, config_changed); + pr_debug("< config_changed=%d\n", config_changed); return config_changed; } @@ -2284,7 +2297,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused) * system_long_wq which invokes this function here again. */ if (mutex_trylock(&ap_scan_bus_mutex)) { + ap_scan_bus_task = current; ap_scan_bus_result = ap_scan_bus(); + ap_scan_bus_task = NULL; mutex_unlock(&ap_scan_bus_mutex); } } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 1f647ffd6f4db9..8c878c5aa31fd7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -171,8 +171,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) aq->queue_count = 0; list_splice_init(&aq->pendingq, &aq->requestq); aq->requestq_count += aq->pendingq_count; - pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", - __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), aq->pendingq_count, aq->requestq_count); aq->pendingq_count = 0; break; @@ -453,8 +453,8 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) case AP_BS_Q_USABLE: /* association is through */ aq->sm_state = AP_SM_STATE_IDLE; - pr_debug("%s queue 0x%02x.%04x associated with %u\n", - __func__, AP_QID_CARD(aq->qid), + pr_debug("queue 0x%02x.%04x associated with %u\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), aq->assoc_idx); return AP_SM_WAIT_NONE; case AP_BS_Q_USABLE_NO_SECURE_KEY: @@ -697,8 +697,8 @@ static ssize_t ap_functions_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } @@ -853,8 +853,8 @@ static ssize_t se_bind_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } @@ -981,8 +981,8 @@ static ssize_t se_associate_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index ffc0b5db55c290..3a39e167bdbff8 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -10,1338 +10,698 @@ #define KMSG_COMPONENT "pkey" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include #include "zcrypt_api.h" #include "zcrypt_ccamisc.h" -#include "zcrypt_ep11misc.h" -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("s390 protected key interface"); - -#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ -#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) -#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ -#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ -#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ +#include "pkey_base.h" /* - * debug feature data and functions + * Helper functions */ - -static debug_info_t *pkey_dbf_info; - -#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) -#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) -#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) - -static void __init pkey_debug_init(void) +static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, size_t keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - /* 5 arguments per dbf entry (including the format string ptr) */ - pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); - debug_register_view(pkey_dbf_info, &debug_sprintf_view); - debug_set_level(pkey_dbf_info, 3); -} - -static void __exit pkey_debug_exit(void) -{ - debug_unregister(pkey_dbf_info); -} + int rc; -/* inside view of a protected key token (only type 0x00 version 0x01) */ -struct protaeskeytoken { - u8 type; /* 0x00 for PAES specific key tokens */ - u8 res0[3]; - u8 version; /* should be 0x01 for protected AES key token */ - u8 res1[3]; - u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ - u32 len; /* bytes actually stored in protkey[] */ - u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ -} __packed; - -/* inside view of a clear key token (type 0x00 version 0x02) */ -struct clearkeytoken { - u8 type; /* 0x00 for PAES specific key tokens */ - u8 res0[3]; - u8 version; /* 0x02 for clear key token */ - u8 res1[3]; - u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ - u32 len; /* bytes actually stored in clearkey[] */ - u8 clearkey[]; /* clear key value */ -} __packed; - -/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ -static inline u32 pkey_keytype_aes_to_size(u32 keytype) -{ - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - return 16; - case PKEY_KEYTYPE_AES_192: - return 24; - case PKEY_KEYTYPE_AES_256: - return 32; - default: - return 0; + /* try the direct way */ + rc = pkey_handler_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype); + + /* if this did not work, try the slowpath way */ + if (rc == -ENODEV) { + rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype); + if (rc) + rc = -ENODEV; } + + pr_debug("rc=%d\n", rc); + return rc; } /* - * Create a protected key from a clear key value via PCKMO instruction. + * In-Kernel function: Transform a key blob (of any type) into a protected key */ -static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +int pkey_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) { - /* mask of available pckmo subfunctions */ - static cpacf_mask_t pckmo_functions; - - u8 paramblock[112]; - u32 pkeytype; - int keysize; - long fc; - - switch (keytype) { - case PKEY_KEYTYPE_AES_128: - /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ - keysize = 16; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_128_KEY; - break; - case PKEY_KEYTYPE_AES_192: - /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ - keysize = 24; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_192_KEY; - break; - case PKEY_KEYTYPE_AES_256: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = keytype; - fc = CPACF_PCKMO_ENC_AES_256_KEY; - break; - case PKEY_KEYTYPE_ECC_P256: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P256_KEY; - break; - case PKEY_KEYTYPE_ECC_P384: - /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ - keysize = 48; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P384_KEY; - break; - case PKEY_KEYTYPE_ECC_P521: - /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ - keysize = 80; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_P521_KEY; - break; - case PKEY_KEYTYPE_ECC_ED25519: - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ - keysize = 32; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; - break; - case PKEY_KEYTYPE_ECC_ED448: - /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ - keysize = 64; - pkeytype = PKEY_KEYTYPE_ECC; - fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", - __func__, keytype); - return -EINVAL; - } - - if (*protkeylen < keysize + AES_WK_VP_SIZE) { - PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", - __func__, *protkeylen, keysize + AES_WK_VP_SIZE); - return -EINVAL; - } + int rc; - /* Did we already check for PCKMO ? */ - if (!pckmo_functions.bytes[0]) { - /* no, so check now */ - if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) - return -ENODEV; + rc = key2protkey(NULL, 0, key, keylen, + protkey, protkeylen, protkeytype); + if (rc == -ENODEV) { + pkey_handler_request_modules(); + rc = key2protkey(NULL, 0, key, keylen, + protkey, protkeylen, protkeytype); } - /* check for the pckmo subfunction we need now */ - if (!cpacf_test_func(&pckmo_functions, fc)) { - PKEY_DBF_ERR("%s pckmo functions not available\n", __func__); - return -ENODEV; - } - - /* prepare param block */ - memset(paramblock, 0, sizeof(paramblock)); - memcpy(paramblock, clrkey, keysize); - /* call the pckmo instruction */ - cpacf_pckmo(fc, paramblock); - - /* copy created protected key to key buffer including the wkvp block */ - *protkeylen = keysize + AES_WK_VP_SIZE; - memcpy(protkey, paramblock, *protkeylen); - *protkeytype = pkeytype; - - return 0; + return rc; } +EXPORT_SYMBOL(pkey_key2protkey); /* - * Find card and transform secure key into protected key. + * Ioctl functions */ -static int pkey_skey2pkey(const u8 *key, u8 *protkey, - u32 *protkeylen, u32 *protkeytype) -{ - struct keytoken_header *hdr = (struct keytoken_header *)key; - u16 cardnr, domain; - int rc, verify; - - zcrypt_wait_api_operational(); - - /* - * The cca_xxx2protkey call may fail when a card has been - * addressed where the master key was changed after last fetch - * of the mkvp into the cache. Try 3 times: First without verify - * then with verify and last round with verify and old master - * key verification pattern match not ignored. - */ - for (verify = 0; verify < 3; verify++) { - rc = cca_findcard(key, &cardnr, &domain, verify); - if (rc < 0) - continue; - if (rc > 0 && verify < 2) - continue; - switch (hdr->version) { - case TOKVER_CCA_AES: - rc = cca_sec2protkey(cardnr, domain, key, - protkey, protkeylen, protkeytype); - break; - case TOKVER_CCA_VLSC: - rc = cca_cipher2protkey(cardnr, domain, key, - protkey, protkeylen, - protkeytype); - break; - default: - return -EINVAL; - } - if (rc == 0) - break; - } - if (rc) - pr_debug("%s failed rc=%d\n", __func__, rc); +static void *_copy_key_from_user(void __user *ukey, size_t keylen) +{ + if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) + return ERR_PTR(-EINVAL); - return rc; + return memdup_user(ukey, keylen); } -/* - * Construct EP11 key with given clear key value. - */ -static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, - u8 *keybuf, size_t *keybuflen) +static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) { - u32 nr_apqns, *apqns = NULL; - u16 card, dom; - int i, rc; - - zcrypt_wait_api_operational(); - - /* build a list of apqns suitable for ep11 keys with cpacf support */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - NULL); - if (rc) - goto out; - - /* go through the list of apqns and try to bild an ep11 key */ - for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_clr2keyblob(card, dom, clrkeylen * 8, - 0, clrkey, keybuf, keybuflen, - PKEY_TYPE_EP11); - if (rc == 0) - break; - } + if (!uapqns || nr_apqns == 0) + return NULL; -out: - kfree(apqns); - if (rc) - pr_debug("%s failed rc=%d\n", __func__, rc); - return rc; + return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); } -/* - * Find card and transform EP11 secure key into protected key. - */ -static int pkey_ep11key2pkey(const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) { - u32 nr_apqns, *apqns = NULL; - int i, j, rc = -ENODEV; - u16 card, dom; + struct pkey_genseck kgs; + struct pkey_apqn apqn; + u32 keybuflen; + int rc; - zcrypt_wait_api_operational(); + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; - /* try two times in case of failure */ - for (i = 0; i < 2 && rc; i++) { + apqn.card = kgs.cardnr; + apqn.domain = kgs.domain; + keybuflen = sizeof(kgs.seckey.seckey); + rc = pkey_handler_gen_key(&apqn, 1, + kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, + kgs.seckey.seckey, &keybuflen, NULL); + pr_debug("gen_key()=%d\n", rc); + if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) + rc = -EFAULT; + memzero_explicit(&kgs, sizeof(kgs)); - /* build a list of apqns suitable for this key */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - ep11_kb_wkvp(key, keylen)); - if (rc) - continue; /* retry findcard on failure */ - - /* go through the list of apqns and try to derive an pkey */ - for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) { - card = apqns[j] >> 16; - dom = apqns[j] & 0xFFFF; - rc = ep11_kblob2protkey(card, dom, key, keylen, - protkey, protkeylen, protkeytype); - } + return rc; +} - kfree(apqns); - } +static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) +{ + struct pkey_clr2seck kcs; + struct pkey_apqn apqn; + u32 keybuflen; + int rc; - if (rc) - pr_debug("%s failed rc=%d\n", __func__, rc); + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + + apqn.card = kcs.cardnr; + apqn.domain = kcs.domain; + keybuflen = sizeof(kcs.seckey.seckey); + rc = pkey_handler_clr_to_key(&apqn, 1, + kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, + kcs.clrkey.clrkey, + pkey_keytype_aes_to_size(kcs.keytype), + kcs.seckey.seckey, &keybuflen, NULL); + pr_debug("clr_to_key()=%d\n", rc); + if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) + rc = -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); return rc; } -/* - * Verify key and give back some info about the key. - */ -static int pkey_verifykey(const struct pkey_seckey *seckey, - u16 *pcardnr, u16 *pdomain, - u16 *pkeysize, u32 *pattributes) +static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) { - struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; - u16 cardnr, domain; + struct pkey_sec2protk ksp; + struct pkey_apqn apqn; int rc; - /* check the secure key for valid AES secure key */ - rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0); - if (rc) - goto out; - if (pattributes) - *pattributes = PKEY_VERIFY_ATTR_AES; - if (pkeysize) - *pkeysize = t->bitsize; - - /* try to find a card which can handle this key */ - rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1); - if (rc < 0) - goto out; - - if (rc > 0) { - /* key mkvp matches to old master key mkvp */ - pr_debug("%s secure key has old mkvp\n", __func__); - if (pattributes) - *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; - rc = 0; - } - - if (pcardnr) - *pcardnr = cardnr; - if (pdomain) - *pdomain = domain; + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + + apqn.card = ksp.cardnr; + apqn.domain = ksp.domain; + ksp.protkey.len = sizeof(ksp.protkey.protkey); + rc = pkey_handler_key_to_protkey(&apqn, 1, + ksp.seckey.seckey, + sizeof(ksp.seckey.seckey), + ksp.protkey.protkey, + &ksp.protkey.len, &ksp.protkey.type); + pr_debug("key_to_protkey()=%d\n", rc); + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) + rc = -EFAULT; + memzero_explicit(&ksp, sizeof(ksp)); -out: - pr_debug("%s rc=%d\n", __func__, rc); return rc; } -/* - * Generate a random protected key - */ -static int pkey_genprotkey(u32 keytype, u8 *protkey, - u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) { - u8 clrkey[32]; - int keysize; + struct pkey_clr2protk kcp; + struct clearkeytoken *t; + u32 keylen; + u8 *tmpbuf; int rc; - keysize = pkey_keytype_aes_to_size(keytype); - if (!keysize) { - PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__, - keytype); + if (copy_from_user(&kcp, ucp, sizeof(kcp))) + return -EFAULT; + + /* build a 'clear key token' from the clear key value */ + keylen = pkey_keytype_aes_to_size(kcp.keytype); + if (!keylen) { + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, kcp.keytype); + memzero_explicit(&kcp, sizeof(kcp)); return -EINVAL; } + tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL); + if (!tmpbuf) { + memzero_explicit(&kcp, sizeof(kcp)); + return -ENOMEM; + } + t = (struct clearkeytoken *)tmpbuf; + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_CLEAR_KEY; + t->keytype = (keylen - 8) >> 3; + t->len = keylen; + memcpy(t->clearkey, kcp.clrkey.clrkey, keylen); + kcp.protkey.len = sizeof(kcp.protkey.protkey); - /* generate a dummy random clear key */ - get_random_bytes(clrkey, keysize); + rc = key2protkey(NULL, 0, + tmpbuf, sizeof(*t) + keylen, + kcp.protkey.protkey, + &kcp.protkey.len, &kcp.protkey.type); + pr_debug("key2protkey()=%d\n", rc); - /* convert it to a dummy protected key */ - rc = pkey_clr2protkey(keytype, clrkey, - protkey, protkeylen, protkeytype); - if (rc) - return rc; + kfree_sensitive(tmpbuf); - /* replace the key part of the protected key with random bytes */ - get_random_bytes(protkey, keysize); + if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp))) + rc = -EFAULT; + memzero_explicit(&kcp, sizeof(kcp)); - return 0; + return rc; } -/* - * Verify if a protected key is still valid - */ -static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, - u32 protkeytype) +static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) { - struct { - u8 iv[AES_BLOCK_SIZE]; - u8 key[MAXPROTKEYSIZE]; - } param; - u8 null_msg[AES_BLOCK_SIZE]; - u8 dest_buf[AES_BLOCK_SIZE]; - unsigned int k, pkeylen; - unsigned long fc; - - switch (protkeytype) { - case PKEY_KEYTYPE_AES_128: - pkeylen = 16 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_128; - break; - case PKEY_KEYTYPE_AES_192: - pkeylen = 24 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_192; - break; - case PKEY_KEYTYPE_AES_256: - pkeylen = 32 + AES_WK_VP_SIZE; - fc = CPACF_KMC_PAES_256; - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, - protkeytype); - return -EINVAL; - } - if (protkeylen != pkeylen) { - PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n", - __func__, protkeylen, protkeytype); - return -EINVAL; - } + struct pkey_findcard kfc; + struct pkey_apqn *apqns; + size_t nr_apqns; + int rc; - memset(null_msg, 0, sizeof(null_msg)); + if (copy_from_user(&kfc, ufc, sizeof(kfc))) + return -EFAULT; - memset(param.iv, 0, sizeof(param.iv)); - memcpy(param.key, protkey, protkeylen); + nr_apqns = MAXAPQNSINLIST; + apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL); + if (!apqns) + return -ENOMEM; - k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf, - sizeof(null_msg)); - if (k != sizeof(null_msg)) { - PKEY_DBF_ERR("%s protected key is not valid\n", __func__); - return -EKEYREJECTED; + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, + sizeof(kfc.seckey.seckey), + PKEY_FLAGS_MATCH_CUR_MKVP, + apqns, &nr_apqns); + if (rc == -ENODEV) + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, + sizeof(kfc.seckey.seckey), + PKEY_FLAGS_MATCH_ALT_MKVP, + apqns, &nr_apqns); + pr_debug("apqns_for_key()=%d\n", rc); + if (rc) { + kfree(apqns); + return rc; } + kfc.cardnr = apqns[0].card; + kfc.domain = apqns[0].domain; + kfree(apqns); + if (copy_to_user(ufc, &kfc, sizeof(kfc))) + return -EFAULT; return 0; } -/* Helper for pkey_nonccatok2pkey, handles aes clear key token */ -static int nonccatokaes2pkey(const struct clearkeytoken *t, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) { - size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE); - u8 *tmpbuf = NULL; - u32 keysize; + struct pkey_skey2pkey ksp; int rc; - keysize = pkey_keytype_aes_to_size(t->keytype); - if (!keysize) { - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); - return -EINVAL; - } - if (t->len != keysize) { - PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n", - __func__, t->len); - return -EINVAL; - } - - /* try direct way with the PCKMO instruction */ - rc = pkey_clr2protkey(t->keytype, t->clearkey, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + + ksp.protkey.len = sizeof(ksp.protkey.protkey); + rc = pkey_handler_key_to_protkey(NULL, 0, + ksp.seckey.seckey, + sizeof(ksp.seckey.seckey), + ksp.protkey.protkey, + &ksp.protkey.len, + &ksp.protkey.type); + pr_debug("key_to_protkey()=%d\n", rc); + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) + rc = -EFAULT; + memzero_explicit(&ksp, sizeof(ksp)); - /* PCKMO failed, so try the CCA secure key way */ - tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - zcrypt_wait_api_operational(); - rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf); - if (rc) - goto try_via_ep11; - rc = pkey_skey2pkey(tmpbuf, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; - -try_via_ep11: - /* if the CCA way also failed, let's try via EP11 */ - rc = pkey_clr2ep11key(t->clearkey, t->len, - tmpbuf, &tmpbuflen); - if (rc) - goto failure; - rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen, - protkey, protkeylen, protkeytype); - if (!rc) - goto out; - -failure: - PKEY_DBF_ERR("%s unable to build protected key from clear", __func__); - -out: - kfree(tmpbuf); return rc; } -/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */ -static int nonccatokecc2pkey(const struct clearkeytoken *t, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) { - u32 keylen; + u32 keytype, keybitsize, flags; + struct pkey_verifykey kvk; int rc; - switch (t->keytype) { - case PKEY_KEYTYPE_ECC_P256: - keylen = 32; - break; - case PKEY_KEYTYPE_ECC_P384: - keylen = 48; - break; - case PKEY_KEYTYPE_ECC_P521: - keylen = 80; - break; - case PKEY_KEYTYPE_ECC_ED25519: - keylen = 32; - break; - case PKEY_KEYTYPE_ECC_ED448: - keylen = 64; - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); - return -EINVAL; - } - - if (t->len != keylen) { - PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n", - __func__, t->len); - return -EINVAL; - } + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; - /* only one path possible: via PCKMO instruction */ - rc = pkey_clr2protkey(t->keytype, t->clearkey, - protkey, protkeylen, protkeytype); - if (rc) { - PKEY_DBF_ERR("%s unable to build protected key from clear", - __func__); - } + kvk.cardnr = 0xFFFF; + kvk.domain = 0xFFFF; + rc = pkey_handler_verify_key(kvk.seckey.seckey, + sizeof(kvk.seckey.seckey), + &kvk.cardnr, &kvk.domain, + &keytype, &keybitsize, &flags); + pr_debug("verify_key()=%d\n", rc); + if (!rc && keytype != PKEY_TYPE_CCA_DATA) + rc = -EINVAL; + kvk.attributes = PKEY_VERIFY_ATTR_AES; + kvk.keysize = (u16)keybitsize; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP; + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) + rc = -EFAULT; + memzero_explicit(&kvk, sizeof(kvk)); return rc; } -/* - * Transform a non-CCA key token into a protected key - */ -static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - int rc = -EINVAL; + struct pkey_genprotk kgp; + int rc; - switch (hdr->version) { - case TOKVER_PROTECTED_KEY: { - struct protaeskeytoken *t; + if (copy_from_user(&kgp, ugp, sizeof(kgp))) + return -EFAULT; - if (keylen != sizeof(struct protaeskeytoken)) - goto out; - t = (struct protaeskeytoken *)key; - rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype); - if (rc) - goto out; - memcpy(protkey, t->protkey, t->len); - *protkeylen = t->len; - *protkeytype = t->keytype; - break; - } - case TOKVER_CLEAR_KEY: { - struct clearkeytoken *t = (struct clearkeytoken *)key; - - if (keylen < sizeof(struct clearkeytoken) || - keylen != sizeof(*t) + t->len) - goto out; - switch (t->keytype) { - case PKEY_KEYTYPE_AES_128: - case PKEY_KEYTYPE_AES_192: - case PKEY_KEYTYPE_AES_256: - rc = nonccatokaes2pkey(t, protkey, - protkeylen, protkeytype); - break; - case PKEY_KEYTYPE_ECC_P256: - case PKEY_KEYTYPE_ECC_P384: - case PKEY_KEYTYPE_ECC_P521: - case PKEY_KEYTYPE_ECC_ED25519: - case PKEY_KEYTYPE_ECC_ED448: - rc = nonccatokecc2pkey(t, protkey, - protkeylen, protkeytype); - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n", - __func__, t->keytype); - return -EINVAL; - } - break; - } - case TOKVER_EP11_AES: { - /* check ep11 key for exportable as protected key */ - rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); - if (rc) - goto out; - rc = pkey_ep11key2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - } - case TOKVER_EP11_AES_WITH_HEADER: - /* check ep11 key with header for exportable as protected key */ - rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, - 3, key, keylen, 1); - if (rc) - goto out; - rc = pkey_ep11key2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n", - __func__, hdr->version); - } + kgp.protkey.len = sizeof(kgp.protkey.protkey); + rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, + PKEY_TYPE_PROTKEY, 0, 0, + kgp.protkey.protkey, &kgp.protkey.len, + &kgp.protkey.type); + pr_debug("gen_key()=%d\n", rc); + if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) + rc = -EFAULT; + memzero_explicit(&kgp, sizeof(kgp)); -out: return rc; } -/* - * Transform a CCA internal key token into a protected key - */ -static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_verifyprotk kvp; + struct protaeskeytoken *t; + u32 keytype; + u8 *tmpbuf; + int rc; - switch (hdr->version) { - case TOKVER_CCA_AES: - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - break; - case TOKVER_CCA_VLSC: - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n", - __func__, hdr->version); + if (copy_from_user(&kvp, uvp, sizeof(kvp))) + return -EFAULT; + + keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len); + if (!keytype) { + PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n", + __func__, kvp.protkey.len); + memzero_explicit(&kvp, sizeof(kvp)); return -EINVAL; } - return pkey_skey2pkey(key, protkey, protkeylen, protkeytype); + /* build a 'protected key token' from the raw protected key */ + tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL); + if (!tmpbuf) { + memzero_explicit(&kvp, sizeof(kvp)); + return -ENOMEM; + } + t = (struct protaeskeytoken *)tmpbuf; + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + t->len = kvp.protkey.len; + memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); + + rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), + NULL, NULL, NULL, NULL, NULL); + pr_debug("verify_key()=%d\n", rc); + + kfree_sensitive(tmpbuf); + memzero_explicit(&kvp, sizeof(kvp)); + + return rc; } -/* - * Transform a key blob (of any type) into a protected key - */ -int pkey_keyblob2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_kblob2pkey ktp; + u8 *kkey; int rc; - if (keylen < sizeof(struct keytoken_header)) { - PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen); - return -EINVAL; - } - - switch (hdr->type) { - case TOKTYPE_NON_CCA: - rc = pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - case TOKTYPE_CCA_INTERNAL: - rc = pkey_ccainttok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - break; - default: - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; - } + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); + ktp.protkey.len = sizeof(ktp.protkey.protkey); + rc = key2protkey(NULL, 0, kkey, ktp.keylen, + ktp.protkey.protkey, &ktp.protkey.len, + &ktp.protkey.type); + pr_debug("key2protkey()=%d\n", rc); + kfree_sensitive(kkey); + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) + rc = -EFAULT; + memzero_explicit(&ktp, sizeof(ktp)); - pr_debug("%s rc=%d\n", __func__, rc); return rc; } -EXPORT_SYMBOL(pkey_keyblob2pkey); -static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, - enum pkey_key_type ktype, enum pkey_key_size ksize, - u32 kflags, u8 *keybuf, size_t *keybufsize) +static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) { - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; + u32 klen = KEYBLOBBUFSIZE; + struct pkey_genseck2 kgs; + struct pkey_apqn *apqns; + u8 *kkey; + int rc; + u32 u; - /* check key type and size */ - switch (ktype) { - case PKEY_TYPE_CCA_DATA: - case PKEY_TYPE_CCA_CIPHER: - if (*keybufsize < SECKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11: - if (*keybufsize < MINEP11AESKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11_AES: - if (*keybufsize < (sizeof(struct ep11kblob_header) + - MINEP11AESKEYBLOBSIZE)) - return -EINVAL; - break; - default: + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + u = pkey_aes_bitsize_to_keytype(kgs.size); + if (!u) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, kgs.size); return -EINVAL; } - switch (ksize) { - case PKEY_SIZE_AES_128: - case PKEY_SIZE_AES_192: - case PKEY_SIZE_AES_256: - break; - default: - return -EINVAL; + apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = kzalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); + return -ENOMEM; } - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES) { - rc = ep11_genaeskey(card, dom, ksize, kflags, - keybuf, keybufsize, ktype); - } else if (ktype == PKEY_TYPE_CCA_DATA) { - rc = cca_genseckey(card, dom, ksize, keybuf); - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else { - /* TOKVER_CCA_VLSC */ - rc = cca_gencipherkey(card, dom, ksize, kflags, - keybuf, keybufsize); + rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, + u, kgs.type, kgs.size, kgs.keygenflags, + kkey, &klen, NULL); + pr_debug("gen_key()=%d\n", rc); + kfree(apqns); + if (rc) { + kfree_sensitive(kkey); + return rc; + } + if (kgs.key) { + if (kgs.keylen < klen) { + kfree_sensitive(kkey); + return -EINVAL; + } + if (copy_to_user(kgs.key, kkey, klen)) { + kfree_sensitive(kkey); + return -EFAULT; } - if (rc == 0) - break; } + kgs.keylen = klen; + if (copy_to_user(ugs, &kgs, sizeof(kgs))) + rc = -EFAULT; + kfree_sensitive(kkey); return rc; } -static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, - enum pkey_key_type ktype, enum pkey_key_size ksize, - u32 kflags, const u8 *clrkey, - u8 *keybuf, size_t *keybufsize) +static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) { - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - /* check key type and size */ - switch (ktype) { - case PKEY_TYPE_CCA_DATA: - case PKEY_TYPE_CCA_CIPHER: - if (*keybufsize < SECKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11: - if (*keybufsize < MINEP11AESKEYBLOBSIZE) - return -EINVAL; - break; - case PKEY_TYPE_EP11_AES: - if (*keybufsize < (sizeof(struct ep11kblob_header) + - MINEP11AESKEYBLOBSIZE)) - return -EINVAL; - break; - default: + u32 klen = KEYBLOBBUFSIZE; + struct pkey_clr2seck2 kcs; + struct pkey_apqn *apqns; + u8 *kkey; + int rc; + u32 u; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + u = pkey_aes_bitsize_to_keytype(kcs.size); + if (!u) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, kcs.size); + memzero_explicit(&kcs, sizeof(kcs)); return -EINVAL; } - switch (ksize) { - case PKEY_SIZE_AES_128: - case PKEY_SIZE_AES_192: - case PKEY_SIZE_AES_256: - break; - default: - return -EINVAL; + apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); + if (IS_ERR(apqns)) { + memzero_explicit(&kcs, sizeof(kcs)); + return PTR_ERR(apqns); } - - zcrypt_wait_api_operational(); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES) { - rc = ep11_clr2keyblob(card, dom, ksize, kflags, - clrkey, keybuf, keybufsize, - ktype); - } else if (ktype == PKEY_TYPE_CCA_DATA) { - rc = cca_clr2seckey(card, dom, ksize, - clrkey, keybuf); - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); - } else { - /* TOKVER_CCA_VLSC */ - rc = cca_clr2cipherkey(card, dom, ksize, kflags, - clrkey, keybuf, keybufsize); + kkey = kzalloc(klen, GFP_KERNEL); + if (!kkey) { + kfree(apqns); + memzero_explicit(&kcs, sizeof(kcs)); + return -ENOMEM; + } + rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, + u, kcs.type, kcs.size, kcs.keygenflags, + kcs.clrkey.clrkey, kcs.size / 8, + kkey, &klen, NULL); + pr_debug("clr_to_key()=%d\n", rc); + kfree(apqns); + if (rc) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return rc; + } + if (kcs.key) { + if (kcs.keylen < klen) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return -EINVAL; + } + if (copy_to_user(kcs.key, kkey, klen)) { + kfree_sensitive(kkey); + memzero_explicit(&kcs, sizeof(kcs)); + return -EFAULT; } - if (rc == 0) - break; } + kcs.keylen = klen; + if (copy_to_user(ucs, &kcs, sizeof(kcs))) + rc = -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); + kfree_sensitive(kkey); return rc; } -static int pkey_verifykey2(const u8 *key, size_t keylen, - u16 *cardnr, u16 *domain, - enum pkey_key_type *ktype, - enum pkey_key_size *ksize, u32 *flags) +static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + struct pkey_verifykey2 kvk; + u8 *kkey; int rc; - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; - - if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) { - struct secaeskeytoken *t = (struct secaeskeytoken *)key; - - rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_CCA_DATA; - if (ksize) - *ksize = (enum pkey_key_size)t->bitsize; - - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - if (rc == -ENODEV) { - rc = cca_findcard2(&_apqns, &_nr_apqns, - *cardnr, *domain, - ZCRYPT_CEX3C, AES_MK_SET, - 0, t->mkvp, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; - } - if (rc) - goto out; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) { - struct cipherkeytoken *t = (struct cipherkeytoken *)key; - - rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_CCA_CIPHER; - if (ksize) { - *ksize = PKEY_SIZE_UNKNOWN; - if (!t->plfver && t->wpllen == 512) - *ksize = PKEY_SIZE_AES_128; - else if (!t->plfver && t->wpllen == 576) - *ksize = PKEY_SIZE_AES_192; - else if (!t->plfver && t->wpllen == 640) - *ksize = PKEY_SIZE_AES_256; - } - - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - if (rc == -ENODEV) { - rc = cca_findcard2(&_apqns, &_nr_apqns, - *cardnr, *domain, - ZCRYPT_CEX6, AES_MK_SET, - 0, t->mkvp0, 1); - if (rc == 0 && flags) - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; - } - if (rc) - goto out; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES) { - struct ep11keyblob *kb = (struct ep11keyblob *)key; - int api; - - rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_EP11; - if (ksize) - *ksize = kb->head.bitlen; - - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; - - if (flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; - - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES_WITH_HEADER) { - struct ep11kblob_header *kh = (struct ep11kblob_header *)key; - int api; + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; + kkey = _copy_key_from_user(kvk.key, kvk.keylen); + if (IS_ERR(kkey)) + return PTR_ERR(kkey); - rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, - 3, key, keylen, 1); - if (rc) - goto out; - if (ktype) - *ktype = PKEY_TYPE_EP11_AES; - if (ksize) - *ksize = kh->bitlen; - - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, - ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; - - if (flags) - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + rc = pkey_handler_verify_key(kkey, kvk.keylen, + &kvk.cardnr, &kvk.domain, + &kvk.type, &kvk.size, &kvk.flags); + pr_debug("verify_key()=%d\n", rc); - *cardnr = ((struct pkey_apqn *)_apqns)->card; - *domain = ((struct pkey_apqn *)_apqns)->domain; - } else { - rc = -EINVAL; - } + kfree_sensitive(kkey); + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) + return -EFAULT; -out: - kfree(_apqns); return rc; } -static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, - const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) +static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - int i, card, dom, rc; + struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey2 ktp; + u8 *kkey; + int rc; - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; - - if (hdr->type == TOKTYPE_CCA_INTERNAL) { - if (hdr->version == TOKVER_CCA_AES) { - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) - return -EINVAL; - } else if (hdr->version == TOKVER_CCA_VLSC) { - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - if (cca_check_secaescipherkey(pkey_dbf_info, - 3, key, 0, 1)) - return -EINVAL; - } else { - PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); - return -EINVAL; - } - } else if (hdr->type == TOKTYPE_NON_CCA) { - if (hdr->version == TOKVER_EP11_AES) { - if (ep11_check_aes_key(pkey_dbf_info, - 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) { - if (ep11_check_aes_key_with_hdr(pkey_dbf_info, - 3, key, keylen, 1)) - return -EINVAL; - } else { - return pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, - protkeytype); - } - } else { - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; - } - - zcrypt_wait_api_operational(); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) { - rc = cca_sec2protkey(card, dom, key, - protkey, protkeylen, protkeytype); - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) { - rc = cca_cipher2protkey(card, dom, key, - protkey, protkeylen, - protkeytype); - } else { - rc = ep11_kblob2protkey(card, dom, key, keylen, - protkey, protkeylen, - protkeytype); - } - if (rc == 0) - break; + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); } + ktp.protkey.len = sizeof(ktp.protkey.protkey); + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, + ktp.protkey.protkey, &ktp.protkey.len, + &ktp.protkey.type); + pr_debug("key2protkey()=%d\n", rc); + kfree(apqns); + kfree_sensitive(kkey); + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) + rc = -EFAULT; + memzero_explicit(&ktp, sizeof(ktp)); return rc; } -static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) +static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) { - struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + struct pkey_apqn *apqns = NULL; + struct pkey_apqns4key kak; + size_t nr_apqns, len; + u8 *kkey; int rc; - if (keylen < sizeof(struct keytoken_header) || flags == 0) - return -EINVAL; - - zcrypt_wait_api_operational(); - - if (hdr->type == TOKTYPE_NON_CCA && - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - struct ep11keyblob *kb = (struct ep11keyblob *) - (key + sizeof(struct ep11kblob_header)); - int minhwtype = 0, api = 0; - - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) - return -EINVAL; - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { - minhwtype = ZCRYPT_CEX7; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) { - struct ep11keyblob *kb = (struct ep11keyblob *)key; - int minhwtype = 0, api = 0; - - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) - return -EINVAL; - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { - minhwtype = ZCRYPT_CEX7; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { - u64 cur_mkvp = 0, old_mkvp = 0; - int minhwtype = ZCRYPT_CEX3C; - - if (hdr->version == TOKVER_CCA_AES) { - struct secaeskeytoken *t = (struct secaeskeytoken *)key; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp; - } else if (hdr->version == TOKVER_CCA_VLSC) { - struct cipherkeytoken *t = (struct cipherkeytoken *)key; - - minhwtype = ZCRYPT_CEX6; - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp0; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp0; - } else { - /* unknown cca internal token type */ + if (copy_from_user(&kak, uak, sizeof(kak))) + return -EFAULT; + nr_apqns = kak.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; + } + kkey = _copy_key_from_user(kak.key, kak.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); + } + rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, + apqns, &nr_apqns); + pr_debug("apqns_for_key()=%d\n", rc); + kfree_sensitive(kkey); + if (rc && rc != -ENOSPC) { + kfree(apqns); + return rc; + } + if (!rc && kak.apqns) { + if (nr_apqns > kak.apqn_entries) { + kfree(apqns); return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { - struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; - u64 cur_mkvp = 0, old_mkvp = 0; - - if (t->secid == 0x20) { - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = t->mkvp; - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = t->mkvp; - } else { - /* unknown cca internal 2 token type */ - return -EINVAL; + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kak.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else { - return -EINVAL; - } - - if (apqns) { - if (*nr_apqns < _nr_apqns) - rc = -ENOSPC; - else - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); } - *nr_apqns = _nr_apqns; + kak.apqn_entries = nr_apqns; + if (copy_to_user(uak, &kak, sizeof(kak))) + rc = -EFAULT; + kfree(apqns); -out: - kfree(_apqns); return rc; } -static int pkey_apqns4keytype(enum pkey_key_type ktype, - u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) +static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) { - u32 _nr_apqns, *_apqns = NULL; + struct pkey_apqn *apqns = NULL; + struct pkey_apqns4keytype kat; + size_t nr_apqns, len; int rc; - zcrypt_wait_api_operational(); - - if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { - u64 cur_mkvp = 0, old_mkvp = 0; - int minhwtype = ZCRYPT_CEX3C; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *)cur_mkvp); - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *)alt_mkvp); - if (ktype == PKEY_TYPE_CCA_CIPHER) - minhwtype = ZCRYPT_CEX6; - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - } else if (ktype == PKEY_TYPE_CCA_ECC) { - u64 cur_mkvp = 0, old_mkvp = 0; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - cur_mkvp = *((u64 *)cur_mkvp); - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) - old_mkvp = *((u64 *)alt_mkvp); - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); - if (rc) - goto out; - - } else if (ktype == PKEY_TYPE_EP11 || - ktype == PKEY_TYPE_EP11_AES || - ktype == PKEY_TYPE_EP11_ECC) { - u8 *wkvp = NULL; - int api; - - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) - wkvp = cur_mkvp; - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, api, wkvp); - if (rc) - goto out; - - } else { - return -EINVAL; + if (copy_from_user(&kat, uat, sizeof(kat))) + return -EFAULT; + nr_apqns = kat.apqn_entries; + if (nr_apqns) { + apqns = kmalloc_array(nr_apqns, + sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!apqns) + return -ENOMEM; } - - if (apqns) { - if (*nr_apqns < _nr_apqns) - rc = -ENOSPC; - else - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + rc = pkey_handler_apqns_for_keytype(kat.type, + kat.cur_mkvp, kat.alt_mkvp, + kat.flags, apqns, &nr_apqns); + pr_debug("apqns_for_keytype()=%d\n", rc); + if (rc && rc != -ENOSPC) { + kfree(apqns); + return rc; } - *nr_apqns = _nr_apqns; - -out: - kfree(_apqns); - return rc; -} - -static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, - const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) -{ - struct keytoken_header *hdr = (struct keytoken_header *)key; - int i, card, dom, rc; - - /* check for at least one apqn given */ - if (!apqns || !nr_apqns) - return -EINVAL; - - if (keylen < sizeof(struct keytoken_header)) - return -EINVAL; - - if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES_WITH_HEADER && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - /* EP11 AES key blob with header */ - if (ep11_check_aes_key_with_hdr(pkey_dbf_info, - 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_ECC_WITH_HEADER && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { - /* EP11 ECC key blob with header */ - if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, - 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) { - /* EP11 AES key blob with header in session field */ - if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { - if (hdr->version == TOKVER_CCA_AES) { - /* CCA AES data key */ - if (keylen != sizeof(struct secaeskeytoken)) - return -EINVAL; - if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) - return -EINVAL; - } else if (hdr->version == TOKVER_CCA_VLSC) { - /* CCA AES cipher key */ - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) - return -EINVAL; - if (cca_check_secaescipherkey(pkey_dbf_info, - 3, key, 0, 1)) - return -EINVAL; - } else { - PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); + if (!rc && kat.apqns) { + if (nr_apqns > kat.apqn_entries) { + kfree(apqns); return -EINVAL; } - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { - /* CCA ECC (private) key */ - if (keylen < sizeof(struct eccprivkeytoken)) - return -EINVAL; - if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) - return -EINVAL; - } else if (hdr->type == TOKTYPE_NON_CCA) { - return pkey_nonccatok2pkey(key, keylen, - protkey, protkeylen, protkeytype); - } else { - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); - return -EINVAL; - } - - /* simple try all apqns from the list */ - for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { - card = apqns[i].card; - dom = apqns[i].domain; - if (hdr->type == TOKTYPE_NON_CCA && - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) - rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, - protkeytype); - else if (hdr->type == TOKTYPE_NON_CCA && - hdr->version == TOKVER_EP11_AES && - is_ep11_keyblob(key)) - rc = ep11_kblob2protkey(card, dom, key, hdr->len, - protkey, protkeylen, - protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_AES) - rc = cca_sec2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL && - hdr->version == TOKVER_CCA_VLSC) - rc = cca_cipher2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) - rc = cca_ecc2protkey(card, dom, key, protkey, - protkeylen, protkeytype); - else - return -EINVAL; + len = nr_apqns * sizeof(struct pkey_apqn); + if (len) { + if (copy_to_user(kat.apqns, apqns, len)) { + kfree(apqns); + return -EFAULT; + } + } } + kat.apqn_entries = nr_apqns; + if (copy_to_user(uat, &kat, sizeof(kat))) + rc = -EFAULT; + kfree(apqns); return rc; } -/* - * File io functions - */ - -static void *_copy_key_from_user(void __user *ukey, size_t keylen) +static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) { - if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) - return ERR_PTR(-EINVAL); - - return memdup_user(ukey, keylen); -} + u32 protkeylen = PROTKEYBLOBBUFSIZE; + struct pkey_apqn *apqns = NULL; + struct pkey_kblob2pkey3 ktp; + u8 *kkey, *protkey; + int rc; -static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) -{ - if (!uapqns || nr_apqns == 0) - return NULL; + if (copy_from_user(&ktp, utp, sizeof(ktp))) + return -EFAULT; + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); + if (IS_ERR(apqns)) + return PTR_ERR(apqns); + kkey = _copy_key_from_user(ktp.key, ktp.keylen); + if (IS_ERR(kkey)) { + kfree(apqns); + return PTR_ERR(kkey); + } + protkey = kmalloc(protkeylen, GFP_KERNEL); + if (!protkey) { + kfree(apqns); + kfree_sensitive(kkey); + return -ENOMEM; + } + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, + protkey, &protkeylen, &ktp.pkeytype); + pr_debug("key2protkey()=%d\n", rc); + kfree(apqns); + kfree_sensitive(kkey); + if (rc) { + kfree_sensitive(protkey); + return rc; + } + if (ktp.pkey && ktp.pkeylen) { + if (protkeylen > ktp.pkeylen) { + kfree_sensitive(protkey); + return -EINVAL; + } + if (copy_to_user(ktp.pkey, protkey, protkeylen)) { + kfree_sensitive(protkey); + return -EFAULT; + } + } + kfree_sensitive(protkey); + ktp.pkeylen = protkeylen; + if (copy_to_user(utp, &ktp, sizeof(ktp))) + return -EFAULT; - return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); + return 0; } static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, @@ -1350,438 +710,57 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, int rc; switch (cmd) { - case PKEY_GENSECK: { - struct pkey_genseck __user *ugs = (void __user *)arg; - struct pkey_genseck kgs; - - if (copy_from_user(&kgs, ugs, sizeof(kgs))) - return -EFAULT; - rc = cca_genseckey(kgs.cardnr, kgs.domain, - kgs.keytype, kgs.seckey.seckey); - pr_debug("%s cca_genseckey()=%d\n", __func__, rc); - if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) - rc = -EFAULT; - memzero_explicit(&kgs, sizeof(kgs)); + case PKEY_GENSECK: + rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg); break; - } - case PKEY_CLR2SECK: { - struct pkey_clr2seck __user *ucs = (void __user *)arg; - struct pkey_clr2seck kcs; - - if (copy_from_user(&kcs, ucs, sizeof(kcs))) - return -EFAULT; - rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, - kcs.clrkey.clrkey, kcs.seckey.seckey); - pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc); - if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) - rc = -EFAULT; - memzero_explicit(&kcs, sizeof(kcs)); + case PKEY_CLR2SECK: + rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg); break; - } - case PKEY_SEC2PROTK: { - struct pkey_sec2protk __user *usp = (void __user *)arg; - struct pkey_sec2protk ksp; - - if (copy_from_user(&ksp, usp, sizeof(ksp))) - return -EFAULT; - ksp.protkey.len = sizeof(ksp.protkey.protkey); - rc = cca_sec2protkey(ksp.cardnr, ksp.domain, - ksp.seckey.seckey, ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); - pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc); - if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) - rc = -EFAULT; - memzero_explicit(&ksp, sizeof(ksp)); + case PKEY_SEC2PROTK: + rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg); break; - } - case PKEY_CLR2PROTK: { - struct pkey_clr2protk __user *ucp = (void __user *)arg; - struct pkey_clr2protk kcp; - - if (copy_from_user(&kcp, ucp, sizeof(kcp))) - return -EFAULT; - kcp.protkey.len = sizeof(kcp.protkey.protkey); - rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, - kcp.protkey.protkey, - &kcp.protkey.len, &kcp.protkey.type); - pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc); - if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp))) - rc = -EFAULT; - memzero_explicit(&kcp, sizeof(kcp)); + case PKEY_CLR2PROTK: + rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg); break; - } - case PKEY_FINDCARD: { - struct pkey_findcard __user *ufc = (void __user *)arg; - struct pkey_findcard kfc; - - if (copy_from_user(&kfc, ufc, sizeof(kfc))) - return -EFAULT; - rc = cca_findcard(kfc.seckey.seckey, - &kfc.cardnr, &kfc.domain, 1); - pr_debug("%s cca_findcard()=%d\n", __func__, rc); - if (rc < 0) - break; - if (copy_to_user(ufc, &kfc, sizeof(kfc))) - return -EFAULT; + case PKEY_FINDCARD: + rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg); break; - } - case PKEY_SKEY2PKEY: { - struct pkey_skey2pkey __user *usp = (void __user *)arg; - struct pkey_skey2pkey ksp; - - if (copy_from_user(&ksp, usp, sizeof(ksp))) - return -EFAULT; - ksp.protkey.len = sizeof(ksp.protkey.protkey); - rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); - pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc); - if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) - rc = -EFAULT; - memzero_explicit(&ksp, sizeof(ksp)); + case PKEY_SKEY2PKEY: + rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg); break; - } - case PKEY_VERIFYKEY: { - struct pkey_verifykey __user *uvk = (void __user *)arg; - struct pkey_verifykey kvk; - - if (copy_from_user(&kvk, uvk, sizeof(kvk))) - return -EFAULT; - rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, - &kvk.keysize, &kvk.attributes); - pr_debug("%s pkey_verifykey()=%d\n", __func__, rc); - if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) - rc = -EFAULT; - memzero_explicit(&kvk, sizeof(kvk)); + case PKEY_VERIFYKEY: + rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg); break; - } - case PKEY_GENPROTK: { - struct pkey_genprotk __user *ugp = (void __user *)arg; - struct pkey_genprotk kgp; - - if (copy_from_user(&kgp, ugp, sizeof(kgp))) - return -EFAULT; - kgp.protkey.len = sizeof(kgp.protkey.protkey); - rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, - &kgp.protkey.len, &kgp.protkey.type); - pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc); - if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) - rc = -EFAULT; - memzero_explicit(&kgp, sizeof(kgp)); + case PKEY_GENPROTK: + rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg); break; - } - case PKEY_VERIFYPROTK: { - struct pkey_verifyprotk __user *uvp = (void __user *)arg; - struct pkey_verifyprotk kvp; - - if (copy_from_user(&kvp, uvp, sizeof(kvp))) - return -EFAULT; - rc = pkey_verifyprotkey(kvp.protkey.protkey, - kvp.protkey.len, kvp.protkey.type); - pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc); - memzero_explicit(&kvp, sizeof(kvp)); + case PKEY_VERIFYPROTK: + rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg); break; - } - case PKEY_KBLOB2PROTK: { - struct pkey_kblob2pkey __user *utp = (void __user *)arg; - struct pkey_kblob2pkey ktp; - u8 *kkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) - return PTR_ERR(kkey); - ktp.protkey.len = sizeof(ktp.protkey.protkey); - rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, - &ktp.protkey.len, &ktp.protkey.type); - pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc); - kfree_sensitive(kkey); - if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) - rc = -EFAULT; - memzero_explicit(&ktp, sizeof(ktp)); + case PKEY_KBLOB2PROTK: + rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg); break; - } - case PKEY_GENSECK2: { - struct pkey_genseck2 __user *ugs = (void __user *)arg; - size_t klen = KEYBLOBBUFSIZE; - struct pkey_genseck2 kgs; - struct pkey_apqn *apqns; - u8 *kkey; - - if (copy_from_user(&kgs, ugs, sizeof(kgs))) - return -EFAULT; - apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = kzalloc(klen, GFP_KERNEL); - if (!kkey) { - kfree(apqns); - return -ENOMEM; - } - rc = pkey_genseckey2(apqns, kgs.apqn_entries, - kgs.type, kgs.size, kgs.keygenflags, - kkey, &klen); - pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc); - kfree(apqns); - if (rc) { - kfree_sensitive(kkey); - break; - } - if (kgs.key) { - if (kgs.keylen < klen) { - kfree_sensitive(kkey); - return -EINVAL; - } - if (copy_to_user(kgs.key, kkey, klen)) { - kfree_sensitive(kkey); - return -EFAULT; - } - } - kgs.keylen = klen; - if (copy_to_user(ugs, &kgs, sizeof(kgs))) - rc = -EFAULT; - kfree_sensitive(kkey); + case PKEY_GENSECK2: + rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg); break; - } - case PKEY_CLR2SECK2: { - struct pkey_clr2seck2 __user *ucs = (void __user *)arg; - size_t klen = KEYBLOBBUFSIZE; - struct pkey_clr2seck2 kcs; - struct pkey_apqn *apqns; - u8 *kkey; - - if (copy_from_user(&kcs, ucs, sizeof(kcs))) - return -EFAULT; - apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); - if (IS_ERR(apqns)) { - memzero_explicit(&kcs, sizeof(kcs)); - return PTR_ERR(apqns); - } - kkey = kzalloc(klen, GFP_KERNEL); - if (!kkey) { - kfree(apqns); - memzero_explicit(&kcs, sizeof(kcs)); - return -ENOMEM; - } - rc = pkey_clr2seckey2(apqns, kcs.apqn_entries, - kcs.type, kcs.size, kcs.keygenflags, - kcs.clrkey.clrkey, kkey, &klen); - pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc); - kfree(apqns); - if (rc) { - kfree_sensitive(kkey); - memzero_explicit(&kcs, sizeof(kcs)); - break; - } - if (kcs.key) { - if (kcs.keylen < klen) { - kfree_sensitive(kkey); - memzero_explicit(&kcs, sizeof(kcs)); - return -EINVAL; - } - if (copy_to_user(kcs.key, kkey, klen)) { - kfree_sensitive(kkey); - memzero_explicit(&kcs, sizeof(kcs)); - return -EFAULT; - } - } - kcs.keylen = klen; - if (copy_to_user(ucs, &kcs, sizeof(kcs))) - rc = -EFAULT; - memzero_explicit(&kcs, sizeof(kcs)); - kfree_sensitive(kkey); + case PKEY_CLR2SECK2: + rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg); break; - } - case PKEY_VERIFYKEY2: { - struct pkey_verifykey2 __user *uvk = (void __user *)arg; - struct pkey_verifykey2 kvk; - u8 *kkey; - - if (copy_from_user(&kvk, uvk, sizeof(kvk))) - return -EFAULT; - kkey = _copy_key_from_user(kvk.key, kvk.keylen); - if (IS_ERR(kkey)) - return PTR_ERR(kkey); - rc = pkey_verifykey2(kkey, kvk.keylen, - &kvk.cardnr, &kvk.domain, - &kvk.type, &kvk.size, &kvk.flags); - pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc); - kfree_sensitive(kkey); - if (rc) - break; - if (copy_to_user(uvk, &kvk, sizeof(kvk))) - return -EFAULT; + case PKEY_VERIFYKEY2: + rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg); break; - } - case PKEY_KBLOB2PROTK2: { - struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_kblob2pkey2 ktp; - u8 *kkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - ktp.protkey.len = sizeof(ktp.protkey.protkey); - rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, - kkey, ktp.keylen, - ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); - pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); - kfree(apqns); - kfree_sensitive(kkey); - if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) - rc = -EFAULT; - memzero_explicit(&ktp, sizeof(ktp)); + case PKEY_KBLOB2PROTK2: + rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg); break; - } - case PKEY_APQNS4K: { - struct pkey_apqns4key __user *uak = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_apqns4key kak; - size_t nr_apqns, len; - u8 *kkey; - - if (copy_from_user(&kak, uak, sizeof(kak))) - return -EFAULT; - nr_apqns = kak.apqn_entries; - if (nr_apqns) { - apqns = kmalloc_array(nr_apqns, - sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!apqns) - return -ENOMEM; - } - kkey = _copy_key_from_user(kak.key, kak.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - rc = pkey_apqns4key(kkey, kak.keylen, kak.flags, - apqns, &nr_apqns); - pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc); - kfree_sensitive(kkey); - if (rc && rc != -ENOSPC) { - kfree(apqns); - break; - } - if (!rc && kak.apqns) { - if (nr_apqns > kak.apqn_entries) { - kfree(apqns); - return -EINVAL; - } - len = nr_apqns * sizeof(struct pkey_apqn); - if (len) { - if (copy_to_user(kak.apqns, apqns, len)) { - kfree(apqns); - return -EFAULT; - } - } - } - kak.apqn_entries = nr_apqns; - if (copy_to_user(uak, &kak, sizeof(kak))) - rc = -EFAULT; - kfree(apqns); + case PKEY_APQNS4K: + rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg); break; - } - case PKEY_APQNS4KT: { - struct pkey_apqns4keytype __user *uat = (void __user *)arg; - struct pkey_apqn *apqns = NULL; - struct pkey_apqns4keytype kat; - size_t nr_apqns, len; - - if (copy_from_user(&kat, uat, sizeof(kat))) - return -EFAULT; - nr_apqns = kat.apqn_entries; - if (nr_apqns) { - apqns = kmalloc_array(nr_apqns, - sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!apqns) - return -ENOMEM; - } - rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, - kat.flags, apqns, &nr_apqns); - pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc); - if (rc && rc != -ENOSPC) { - kfree(apqns); - break; - } - if (!rc && kat.apqns) { - if (nr_apqns > kat.apqn_entries) { - kfree(apqns); - return -EINVAL; - } - len = nr_apqns * sizeof(struct pkey_apqn); - if (len) { - if (copy_to_user(kat.apqns, apqns, len)) { - kfree(apqns); - return -EFAULT; - } - } - } - kat.apqn_entries = nr_apqns; - if (copy_to_user(uat, &kat, sizeof(kat))) - rc = -EFAULT; - kfree(apqns); + case PKEY_APQNS4KT: + rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg); break; - } - case PKEY_KBLOB2PROTK3: { - struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; - u32 protkeylen = PROTKEYBLOBBUFSIZE; - struct pkey_apqn *apqns = NULL; - struct pkey_kblob2pkey3 ktp; - u8 *kkey, *protkey; - - if (copy_from_user(&ktp, utp, sizeof(ktp))) - return -EFAULT; - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); - if (IS_ERR(apqns)) - return PTR_ERR(apqns); - kkey = _copy_key_from_user(ktp.key, ktp.keylen); - if (IS_ERR(kkey)) { - kfree(apqns); - return PTR_ERR(kkey); - } - protkey = kmalloc(protkeylen, GFP_KERNEL); - if (!protkey) { - kfree(apqns); - kfree_sensitive(kkey); - return -ENOMEM; - } - rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, - kkey, ktp.keylen, - protkey, &protkeylen, &ktp.pkeytype); - pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); - kfree(apqns); - kfree_sensitive(kkey); - if (rc) { - kfree_sensitive(protkey); - break; - } - if (ktp.pkey && ktp.pkeylen) { - if (protkeylen > ktp.pkeylen) { - kfree_sensitive(protkey); - return -EINVAL; - } - if (copy_to_user(ktp.pkey, protkey, protkeylen)) { - kfree_sensitive(protkey); - return -EFAULT; - } - } - kfree_sensitive(protkey); - ktp.pkeylen = protkeylen; - if (copy_to_user(utp, &ktp, sizeof(ktp))) - return -EFAULT; + case PKEY_KBLOB2PROTK3: + rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg); break; - } default: /* unknown/unsupported ioctl cmd */ return -ENOTTY; @@ -1791,499 +770,12 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } /* - * Sysfs and file io operations - */ - -/* - * Sysfs attribute read function for all protected key binary attributes. - * The implementation can not deal with partial reads, because a new random - * protected key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - */ -static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, - loff_t off, size_t count) -{ - struct protaeskeytoken protkeytoken; - struct pkey_protkey protkey; - int rc; - - if (off != 0 || count < sizeof(protkeytoken)) - return -EINVAL; - if (is_xts) - if (count < 2 * sizeof(protkeytoken)) - return -EINVAL; - - memset(&protkeytoken, 0, sizeof(protkeytoken)); - protkeytoken.type = TOKTYPE_NON_CCA; - protkeytoken.version = TOKVER_PROTECTED_KEY; - protkeytoken.keytype = keytype; - - protkey.len = sizeof(protkey.protkey); - rc = pkey_genprotkey(protkeytoken.keytype, - protkey.protkey, &protkey.len, &protkey.type); - if (rc) - return rc; - - protkeytoken.len = protkey.len; - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); - - memcpy(buf, &protkeytoken, sizeof(protkeytoken)); - - if (is_xts) { - /* xts needs a second protected key, reuse protkey struct */ - protkey.len = sizeof(protkey.protkey); - rc = pkey_genprotkey(protkeytoken.keytype, - protkey.protkey, &protkey.len, &protkey.type); - if (rc) - return rc; - - protkeytoken.len = protkey.len; - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); - - memcpy(buf + sizeof(protkeytoken), &protkeytoken, - sizeof(protkeytoken)); - - return 2 * sizeof(protkeytoken); - } - - return sizeof(protkeytoken); -} - -static ssize_t protkey_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, - off, count); -} - -static ssize_t protkey_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, - off, count); -} - -static ssize_t protkey_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, - off, count); -} - -static ssize_t protkey_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, - off, count); -} - -static ssize_t protkey_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); -static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); - -static struct bin_attribute *protkey_attrs[] = { - &bin_attr_protkey_aes_128, - &bin_attr_protkey_aes_192, - &bin_attr_protkey_aes_256, - &bin_attr_protkey_aes_128_xts, - &bin_attr_protkey_aes_256_xts, - NULL -}; - -static struct attribute_group protkey_attr_group = { - .name = "protkey", - .bin_attrs = protkey_attrs, -}; - -/* - * Sysfs attribute read function for all secure key ccadata binary attributes. - * The implementation can not deal with partial reads, because a new random - * protected key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - */ -static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, - loff_t off, size_t count) -{ - struct pkey_seckey *seckey = (struct pkey_seckey *)buf; - int rc; - - if (off != 0 || count < sizeof(struct secaeskeytoken)) - return -EINVAL; - if (is_xts) - if (count < 2 * sizeof(struct secaeskeytoken)) - return -EINVAL; - - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); - if (rc) - return rc; - - if (is_xts) { - seckey++; - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); - if (rc) - return rc; - - return 2 * sizeof(struct secaeskeytoken); - } - - return sizeof(struct secaeskeytoken); -} - -static ssize_t ccadata_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, - off, count); -} - -static ssize_t ccadata_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, - off, count); -} - -static ssize_t ccadata_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, - off, count); -} - -static ssize_t ccadata_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, - off, count); -} - -static ssize_t ccadata_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); -static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); - -static struct bin_attribute *ccadata_attrs[] = { - &bin_attr_ccadata_aes_128, - &bin_attr_ccadata_aes_192, - &bin_attr_ccadata_aes_256, - &bin_attr_ccadata_aes_128_xts, - &bin_attr_ccadata_aes_256_xts, - NULL -}; - -static struct attribute_group ccadata_attr_group = { - .name = "ccadata", - .bin_attrs = ccadata_attrs, -}; - -#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) - -/* - * Sysfs attribute read function for all secure key ccacipher binary attributes. - * The implementation can not deal with partial reads, because a new random - * secure key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + * File io operations */ -static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, - bool is_xts, char *buf, loff_t off, - size_t count) -{ - size_t keysize = CCACIPHERTOKENSIZE; - u32 nr_apqns, *apqns = NULL; - int i, rc, card, dom; - - if (off != 0 || count < CCACIPHERTOKENSIZE) - return -EINVAL; - if (is_xts) - if (count < 2 * CCACIPHERTOKENSIZE) - return -EINVAL; - - /* build a list of apqns able to generate an cipher key */ - rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX6, 0, 0, 0, 0); - if (rc) - return rc; - - memset(buf, 0, is_xts ? 2 * keysize : keysize); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); - if (rc == 0) - break; - } - if (rc) - return rc; - - if (is_xts) { - keysize = CCACIPHERTOKENSIZE; - buf += CCACIPHERTOKENSIZE; - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); - if (rc == 0) - return 2 * CCACIPHERTOKENSIZE; - } - - return CCACIPHERTOKENSIZE; -} - -static ssize_t ccacipher_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, - off, count); -} - -static ssize_t ccacipher_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, - off, count); -} - -static ssize_t ccacipher_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); -static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); - -static struct bin_attribute *ccacipher_attrs[] = { - &bin_attr_ccacipher_aes_128, - &bin_attr_ccacipher_aes_192, - &bin_attr_ccacipher_aes_256, - &bin_attr_ccacipher_aes_128_xts, - &bin_attr_ccacipher_aes_256_xts, - NULL -}; - -static struct attribute_group ccacipher_attr_group = { - .name = "ccacipher", - .bin_attrs = ccacipher_attrs, -}; - -/* - * Sysfs attribute read function for all ep11 aes key binary attributes. - * The implementation can not deal with partial reads, because a new random - * secure key blob is generated with each read. In case of partial reads - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. - * This function and the sysfs attributes using it provide EP11 key blobs - * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently - * 336 bytes. - */ -static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, - bool is_xts, char *buf, loff_t off, - size_t count) -{ - size_t keysize = MAXEP11AESKEYBLOBSIZE; - u32 nr_apqns, *apqns = NULL; - int i, rc, card, dom; - - if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) - return -EINVAL; - if (is_xts) - if (count < 2 * MAXEP11AESKEYBLOBSIZE) - return -EINVAL; - - /* build a list of apqns able to generate an cipher key */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - NULL); - if (rc) - return rc; - - memset(buf, 0, is_xts ? 2 * keysize : keysize); - - /* simple try all apqns from the list */ - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, - PKEY_TYPE_EP11_AES); - if (rc == 0) - break; - } - if (rc) - return rc; - - if (is_xts) { - keysize = MAXEP11AESKEYBLOBSIZE; - buf += MAXEP11AESKEYBLOBSIZE; - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, - PKEY_TYPE_EP11_AES); - if (rc == 0) - return 2 * MAXEP11AESKEYBLOBSIZE; - } - - return MAXEP11AESKEYBLOBSIZE; -} - -static ssize_t ep11_aes_128_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, - off, count); -} - -static ssize_t ep11_aes_192_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, - off, count); -} - -static ssize_t ep11_aes_256_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, - off, count); -} - -static ssize_t ep11_aes_128_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, - off, count); -} - -static ssize_t ep11_aes_256_xts_read(struct file *filp, - struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, - size_t count) -{ - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, - off, count); -} - -static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); -static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); - -static struct bin_attribute *ep11_attrs[] = { - &bin_attr_ep11_aes_128, - &bin_attr_ep11_aes_192, - &bin_attr_ep11_aes_256, - &bin_attr_ep11_aes_128_xts, - &bin_attr_ep11_aes_256_xts, - NULL -}; - -static struct attribute_group ep11_attr_group = { - .name = "ep11", - .bin_attrs = ep11_attrs, -}; - -static const struct attribute_group *pkey_attr_groups[] = { - &protkey_attr_group, - &ccadata_attr_group, - &ccacipher_attr_group, - &ep11_attr_group, - NULL, -}; static const struct file_operations pkey_fops = { .owner = THIS_MODULE, .open = nonseekable_open, - .llseek = no_llseek, .unlocked_ioctl = pkey_unlocked_ioctl, }; @@ -2295,43 +787,13 @@ static struct miscdevice pkey_dev = { .groups = pkey_attr_groups, }; -/* - * Module init - */ -static int __init pkey_init(void) +int __init pkey_api_init(void) { - cpacf_mask_t func_mask; - - /* - * The pckmo instruction should be available - even if we don't - * actually invoke it. This instruction comes with MSA 3 which - * is also the minimum level for the kmc instructions which - * are able to work with protected keys. - */ - if (!cpacf_query(CPACF_PCKMO, &func_mask)) - return -ENODEV; - - /* check for kmc instructions available */ - if (!cpacf_query(CPACF_KMC, &func_mask)) - return -ENODEV; - if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) || - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) || - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256)) - return -ENODEV; - - pkey_debug_init(); - + /* register as a misc device */ return misc_register(&pkey_dev); } -/* - * Module exit - */ -static void __exit pkey_exit(void) +void __exit pkey_api_exit(void) { misc_deregister(&pkey_dev); - pkey_debug_exit(); } - -module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); -module_exit(pkey_exit); diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c new file mode 100644 index 00000000000000..fea24332283835 --- /dev/null +++ b/drivers/s390/crypto/pkey_base.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey base: debug feature, pkey handler registry + * + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include + +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key base and api"); + +/* + * pkey debug feature + */ +debug_info_t *pkey_dbf_info; +EXPORT_SYMBOL(pkey_dbf_info); + +/* + * pkey handler registry + */ + +static DEFINE_SPINLOCK(handler_list_write_lock); +static LIST_HEAD(handler_list); + +int pkey_handler_register(struct pkey_handler *handler) +{ + const struct pkey_handler *h; + + if (!handler || + !handler->is_supported_key || + !handler->is_supported_keytype) + return -EINVAL; + + if (!try_module_get(handler->module)) + return -ENXIO; + + spin_lock(&handler_list_write_lock); + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h == handler) { + rcu_read_unlock(); + spin_unlock(&handler_list_write_lock); + module_put(handler->module); + return -EEXIST; + } + } + rcu_read_unlock(); + + list_add_rcu(&handler->list, &handler_list); + spin_unlock(&handler_list_write_lock); + synchronize_rcu(); + + module_put(handler->module); + + PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__, + handler->name ?: ""); + + return 0; +} +EXPORT_SYMBOL(pkey_handler_register); + +int pkey_handler_unregister(struct pkey_handler *handler) +{ + spin_lock(&handler_list_write_lock); + list_del_rcu(&handler->list); + INIT_LIST_HEAD_RCU(&handler->list); + spin_unlock(&handler_list_write_lock); + synchronize_rcu(); + + PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__, + handler->name ?: ""); + + return 0; +} +EXPORT_SYMBOL(pkey_handler_unregister); + +/* + * Handler invocation functions. + */ + +const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen) +{ + const struct pkey_handler *h; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->is_supported_key(key, keylen)) { + rcu_read_unlock(); + return h; + } + module_put(h->module); + } + rcu_read_unlock(); + + return NULL; +} +EXPORT_SYMBOL(pkey_handler_get_keybased); + +const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt) +{ + const struct pkey_handler *h; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->is_supported_keytype(kt)) { + rcu_read_unlock(); + return h; + } + module_put(h->module); + } + rcu_read_unlock(); + + return NULL; +} +EXPORT_SYMBOL(pkey_handler_get_keytypebased); + +void pkey_handler_put(const struct pkey_handler *handler) +{ + const struct pkey_handler *h; + + if (!handler) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h == handler) { + module_put(h->module); + break; + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(pkey_handler_put); + +int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->key_to_protkey) { + rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, + protkey, protkeylen, + protkeytype); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_key_to_protkey); + +/* + * This handler invocation is special as there may be more than + * one handler providing support for the very same key (type). + * And the handler may not respond true on is_supported_key(), + * so simple try and check return value here. + */ +int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype) +{ + const struct pkey_handler *h, *htmp[10]; + int i, n = 0, rc = -ENODEV; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (!try_module_get(h->module)) + continue; + if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp)) + htmp[n++] = h; + else + module_put(h->module); + } + rcu_read_unlock(); + + for (i = 0; i < n; i++) { + h = htmp[i]; + if (rc) + rc = h->slowpath_key_to_protkey(apqns, nr_apqns, + key, keylen, + protkey, protkeylen, + protkeytype); + module_put(h->module); + } + + return rc; +} +EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); + +int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->gen_key) { + rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_gen_key); + +int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->clr_to_key) { + rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, + keybitsize, flags, clrkey, clrkeylen, + keybuf, keybuflen, keyinfo); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_clr_to_key); + +int pkey_handler_verify_key(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->verify_key) { + rc = h->verify_key(key, keylen, card, dom, + keytype, keybitsize, flags); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_verify_key); + +int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keybased(key, keylen); + if (h && h->apqns_for_key) + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_apqns_for_key); + +int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + const struct pkey_handler *h; + int rc = -ENODEV; + + h = pkey_handler_get_keytypebased(keysubtype); + if (h && h->apqns_for_keytype) { + rc = h->apqns_for_keytype(keysubtype, + cur_mkvp, alt_mkvp, flags, + apqns, nr_apqns); + } + pkey_handler_put(h); + + return rc; +} +EXPORT_SYMBOL(pkey_handler_apqns_for_keytype); + +void pkey_handler_request_modules(void) +{ +#ifdef CONFIG_MODULES + static const char * const pkey_handler_modules[] = { + "pkey_cca", "pkey_ep11", "pkey_pckmo" }; + int i; + + for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) { + const struct pkey_handler *h; + bool found = false; + + rcu_read_lock(); + list_for_each_entry_rcu(h, &handler_list, list) { + if (h->module && + !strcmp(h->module->name, pkey_handler_modules[i])) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) { + pr_debug("request_module(%s)\n", pkey_handler_modules[i]); + request_module(pkey_handler_modules[i]); + } + } +#endif +} +EXPORT_SYMBOL(pkey_handler_request_modules); + +/* + * Module init + */ +static int __init pkey_init(void) +{ + int rc; + + /* init debug feature */ + pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); + debug_register_view(pkey_dbf_info, &debug_sprintf_view); + debug_set_level(pkey_dbf_info, 4); + + /* the handler registry does not need any init */ + + rc = pkey_api_init(); + if (rc) + debug_unregister(pkey_dbf_info); + + return rc; +} + +/* + * Module exit + */ +static void __exit pkey_exit(void) +{ + pkey_api_exit(); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); +module_exit(pkey_exit); diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h new file mode 100644 index 00000000000000..7a1a5ce192d827 --- /dev/null +++ b/drivers/s390/crypto/pkey_base.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright IBM Corp. 2024 + * + * Pkey base: debug feature, defines and structs + * common to all pkey code. + */ + +#ifndef _PKEY_BASE_H_ +#define _PKEY_BASE_H_ + +#include +#include +#include + +/* + * pkey debug feature + */ + +extern debug_info_t *pkey_dbf_info; + +#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) +#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) +#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) + +/* + * common defines and common structs + */ + +#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ +#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) +#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ +#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ +#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ + +/* inside view of a generic protected key token */ +struct protkeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* should be 0x01 for protected key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ + u32 len; /* bytes actually stored in protkey[] */ + u8 protkey[]; /* the protected key blob */ +} __packed; + +/* inside view of a protected AES key token */ +struct protaeskeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* should be 0x01 for protected key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ + u32 len; /* bytes actually stored in protkey[] */ + u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ +} __packed; + +/* inside view of a clear key token (type 0x00 version 0x02) */ +struct clearkeytoken { + u8 type; /* 0x00 for PAES specific key tokens */ + u8 res0[3]; + u8 version; /* 0x02 for clear key token */ + u8 res1[3]; + u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ + u32 len; /* bytes actually stored in clearkey[] */ + u8 clearkey[]; /* clear key value */ +} __packed; + +/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ +static inline u32 pkey_keytype_aes_to_size(u32 keytype) +{ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + return 16; + case PKEY_KEYTYPE_AES_192: + return 24; + case PKEY_KEYTYPE_AES_256: + return 32; + default: + return 0; + } +} + +/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */ +static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize) +{ + switch (keybitsize) { + case 128: + return PKEY_KEYTYPE_AES_128; + case 192: + return PKEY_KEYTYPE_AES_192; + case 256: + return PKEY_KEYTYPE_AES_256; + default: + return 0; + } +} + +/* + * pkey_api.c: + */ +int __init pkey_api_init(void); +void __exit pkey_api_exit(void); + +/* + * pkey_sysfs.c: + */ + +extern const struct attribute_group *pkey_attr_groups[]; + +/* + * pkey handler registry + */ + +struct pkey_handler { + struct module *module; + const char *name; + /* + * is_supported_key() and is_supported_keytype() are called + * within an rcu_read_lock() scope and thus must not sleep! + */ + bool (*is_supported_key)(const u8 *key, u32 keylen); + bool (*is_supported_keytype)(enum pkey_key_type); + int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); + int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype); + int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + int (*verify_key)(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags); + int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns); + int (*apqns_for_keytype)(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns); + /* used internal by pkey base */ + struct list_head list; +}; + +int pkey_handler_register(struct pkey_handler *handler); +int pkey_handler_unregister(struct pkey_handler *handler); + +/* + * invocation function for the registered pkey handlers + */ + +const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen); +const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt); +void pkey_handler_put(const struct pkey_handler *handler); + +int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype); +int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype); +int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); +int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); +int pkey_handler_verify_key(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags); +int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns); +int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns); + +/* + * Unconditional try to load all handler modules + */ +void pkey_handler_request_modules(void); + +#endif /* _PKEY_BASE_H_ */ diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c new file mode 100644 index 00000000000000..9370513817209e --- /dev/null +++ b/drivers/s390/crypto/pkey_cca.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey cca specific code + * + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include + +#include "zcrypt_api.h" +#include "zcrypt_ccamisc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key CCA handler"); + +#if IS_MODULE(CONFIG_PKEY_CCA) +static struct ap_device_id pkey_cca_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4 }, + { .dev_type = AP_DEVICE_TYPE_CEX5 }, + { .dev_type = AP_DEVICE_TYPE_CEX6 }, + { .dev_type = AP_DEVICE_TYPE_CEX7 }, + { .dev_type = AP_DEVICE_TYPE_CEX8 }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids); +#endif + +/* + * Check key blob for known and supported CCA key. + */ +static bool is_cca_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_CCA_INTERNAL: + switch (hdr->version) { + case TOKVER_CCA_AES: + case TOKVER_CCA_VLSC: + return true; + default: + return false; + } + case TOKTYPE_CCA_INTERNAL_PKA: + return true; + default: + return false; + } +} + +static bool is_cca_keytype(enum pkey_key_type key_type) +{ + switch (key_type) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + case PKEY_TYPE_CCA_ECC: + return true; + default: + return false; + } +} + +static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _nr_apqns, *_apqns = NULL; + int rc; + + if (!flags) + flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; + + if (keylen < sizeof(struct keytoken_header)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_CCA_INTERNAL) { + u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; + + if (hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else if (hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + minhwtype = ZCRYPT_CEX6; + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp0; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp0; + } else { + /* unknown CCA internal token type */ + return -EINVAL; + } + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; + u64 cur_mkvp = 0, old_mkvp = 0; + + if (t->secid == 0x20) { + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = t->mkvp; + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = t->mkvp; + } else { + /* unknown CCA internal 2 token type */ + return -EINVAL; + } + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_apqns4type(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + u32 _nr_apqns, *_apqns = NULL; + int rc; + + zcrypt_wait_api_operational(); + + if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { + u64 cur_mkvp = 0, old_mkvp = 0; + int minhwtype = ZCRYPT_CEX3C; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *)cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *)alt_mkvp); + if (ktype == PKEY_TYPE_CCA_CIPHER) + minhwtype = ZCRYPT_CEX6; + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, AES_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + + } else if (ktype == PKEY_TYPE_CCA_ECC) { + u64 cur_mkvp = 0, old_mkvp = 0; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + cur_mkvp = *((u64 *)cur_mkvp); + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) + old_mkvp = *((u64 *)alt_mkvp); + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, APKA_MK_SET, + cur_mkvp, old_mkvp, 1); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported key type %d", + __func__, (int)ktype); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_apqn *local_apqns = NULL; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + /* CCA AES data key */ + if (keylen != sizeof(struct secaeskeytoken)) + return -EINVAL; + if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + /* CCA AES cipher key */ + if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) + return -EINVAL; + if (cca_check_secaescipherkey(pkey_dbf_info, + 3, key, 0, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + /* CCA ECC (private) key */ + if (keylen < sizeof(struct eccprivkeytoken)) + return -EINVAL; + if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) + return -EINVAL; + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype); + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype); + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { + rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, + key, protkey, + protkeylen, protkeytype); + } else { + rc = -EINVAL; + break; + } + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate CCA secure key. + * As of now only CCA AES Data or Cipher secure keys are + * supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) +{ + struct pkey_apqn *local_apqns = NULL; + int i, len, rc; + + /* check keytype, subtype, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + switch (subtype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = cca_apqns4type(subtype, NULL, NULL, 0, + local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (subtype == PKEY_TYPE_CCA_CIPHER) { + rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, + keybitsize, flags, + keybuf, keybuflen); + } else { + /* PKEY_TYPE_CCA_DATA */ + rc = cca_genseckey(apqns[i].card, apqns[i].domain, + keybitsize, keybuf); + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); + } + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate CCA secure key with given clear key value. + * As of now only CCA AES Data or Cipher secure keys are + * supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) +{ + struct pkey_apqn *local_apqns = NULL; + int i, len, rc; + + /* check keytype, subtype, clrkeylen, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + if (clrkeylen != len) { + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", + __func__, clrkeylen, len); + return -EINVAL; + } + switch (subtype) { + case PKEY_TYPE_CCA_DATA: + case PKEY_TYPE_CCA_CIPHER: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = cca_apqns4type(subtype, NULL, NULL, 0, + local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (subtype == PKEY_TYPE_CCA_CIPHER) { + rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, + keybitsize, flags, clrkey, + keybuf, keybuflen); + } else { + /* PKEY_TYPE_CCA_DATA */ + rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, + keybitsize, clrkey, keybuf); + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); + } + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cca_verifykey(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 nr_apqns, *apqns = NULL; + int rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_AES) { + struct secaeskeytoken *t = (struct secaeskeytoken *)key; + + rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); + if (rc) + goto out; + *keytype = PKEY_TYPE_CCA_DATA; + *keybitsize = t->bitsize; + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX3C, AES_MK_SET, + t->mkvp, 0, 1); + if (!rc) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX3C, AES_MK_SET, + 0, t->mkvp, 1); + if (!rc) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && + hdr->version == TOKVER_CCA_VLSC) { + struct cipherkeytoken *t = (struct cipherkeytoken *)key; + + rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_CCA_CIPHER; + *keybitsize = PKEY_SIZE_UNKNOWN; + if (!t->plfver && t->wpllen == 512) + *keybitsize = PKEY_SIZE_AES_128; + else if (!t->plfver && t->wpllen == 576) + *keybitsize = PKEY_SIZE_AES_192; + else if (!t->plfver && t->wpllen == 640) + *keybitsize = PKEY_SIZE_AES_256; + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX6, AES_MK_SET, + t->mkvp0, 0, 1); + if (!rc) + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + if (rc == -ENODEV) { + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX6, AES_MK_SET, + 0, t->mkvp0, 1); + if (!rc) + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; + } + if (rc) + goto out; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else { + /* unknown/unsupported key blob */ + rc = -EINVAL; + } + +out: + kfree(apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * This function provides an alternate but usually slow way + * to convert a 'clear key token' with AES key material into + * a protected key. This is done via an intermediate step + * which creates a CCA AES DATA secure key first and then + * derives the protected key from this secure key. + */ +static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype) +{ + const struct keytoken_header *hdr = (const struct keytoken_header *)key; + const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u32 tmplen, keysize = 0; + u8 *tmpbuf; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_CLEAR_KEY) + keysize = pkey_keytype_aes_to_size(t->keytype); + if (!keysize || t->len != keysize) + return -EINVAL; + + /* alloc tmp key buffer */ + tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); + if (!tmpbuf) + return -ENOMEM; + + /* try two times in case of failure */ + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { + tmplen = SECKEYBLOBSIZE; + rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, + 8 * keysize, 0, t->clearkey, t->len, + tmpbuf, &tmplen, NULL); + pr_debug("cca_clr2key()=%d\n", rc); + if (rc) + continue; + rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, + protkey, protkeylen, protkeytype); + pr_debug("cca_key2protkey()=%d\n", rc); + } + + kfree(tmpbuf); + pr_debug("rc=%d\n", rc); + return rc; +} + +static struct pkey_handler cca_handler = { + .module = THIS_MODULE, + .name = "PKEY CCA handler", + .is_supported_key = is_cca_key, + .is_supported_keytype = is_cca_keytype, + .key_to_protkey = cca_key2protkey, + .slowpath_key_to_protkey = cca_slowpath_key2protkey, + .gen_key = cca_gen_key, + .clr_to_key = cca_clr2key, + .verify_key = cca_verifykey, + .apqns_for_key = cca_apqns4key, + .apqns_for_keytype = cca_apqns4type, +}; + +/* + * Module init + */ +static int __init pkey_cca_init(void) +{ + /* register this module as pkey handler for all the cca stuff */ + return pkey_handler_register(&cca_handler); +} + +/* + * Module exit + */ +static void __exit pkey_cca_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&cca_handler); +} + +module_init(pkey_cca_init); +module_exit(pkey_cca_exit); diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c new file mode 100644 index 00000000000000..f42d397a9cb664 --- /dev/null +++ b/drivers/s390/crypto/pkey_ep11.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey ep11 specific code + * + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include + +#include "zcrypt_api.h" +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key EP11 handler"); + +#if IS_MODULE(CONFIG_PKEY_EP11) +static struct ap_device_id pkey_ep11_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4 }, + { .dev_type = AP_DEVICE_TYPE_CEX5 }, + { .dev_type = AP_DEVICE_TYPE_CEX6 }, + { .dev_type = AP_DEVICE_TYPE_CEX7 }, + { .dev_type = AP_DEVICE_TYPE_CEX8 }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids); +#endif + +/* + * Check key blob for known and supported EP11 key. + */ +static bool is_ep11_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_NON_CCA: + switch (hdr->version) { + case TOKVER_EP11_AES: + case TOKVER_EP11_AES_WITH_HEADER: + case TOKVER_EP11_ECC_WITH_HEADER: + return true; + default: + return false; + } + default: + return false; + } +} + +static bool is_ep11_keytype(enum pkey_key_type key_type) +{ + switch (key_type) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + case PKEY_TYPE_EP11_ECC: + return true; + default: + return false; + } +} + +static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 _nr_apqns, *_apqns = NULL; + int rc; + + if (!flags) + flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + if (keylen < sizeof(struct keytoken_header) || flags == 0) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_NON_CCA && + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + struct ep11keyblob *kb = (struct ep11keyblob *) + (key + sizeof(struct ep11kblob_header)); + int minhwtype = 0, api = 0; + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + } + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp); + if (rc) + goto out; + + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + struct ep11keyblob *kb = (struct ep11keyblob *)key; + int minhwtype = 0, api = 0; + + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) + return -EINVAL; + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { + minhwtype = ZCRYPT_CEX7; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + } + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_apqns4type(enum pkey_key_type ktype, + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, + struct pkey_apqn *apqns, size_t *nr_apqns) +{ + u32 _nr_apqns, *_apqns = NULL; + int rc; + + zcrypt_wait_api_operational(); + + if (ktype == PKEY_TYPE_EP11 || + ktype == PKEY_TYPE_EP11_AES || + ktype == PKEY_TYPE_EP11_ECC) { + u8 *wkvp = NULL; + int api; + + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) + wkvp = cur_mkvp; + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, api, wkvp); + if (rc) + goto out; + + } else { + PKEY_DBF_ERR("%s unknown/unsupported key type %d\n", + __func__, (int)ktype); + return -EINVAL; + } + + if (apqns) { + if (*nr_apqns < _nr_apqns) + rc = -ENOSPC; + else + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); + } + *nr_apqns = _nr_apqns; + +out: + kfree(_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct pkey_apqn *local_apqns = NULL; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 AES key blob with header */ + if (ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + /* EP11 ECC key blob with header */ + if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) + return -EINVAL; + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + /* EP11 AES key blob with header in session field */ + if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) + return -EINVAL; + } else { + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", + __func__, hdr->type, hdr->version); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype); + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype); + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES && + is_ep11_keyblob(key)) { + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, + key, hdr->len, protkey, + protkeylen, protkeytype); + } else { + rc = -EINVAL; + break; + } + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate EP11 secure key. + * As of now only EP11 AES secure keys are supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES + * or 0 (results in subtype PKEY_TYPE_EP11_AES), + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) +{ + struct pkey_apqn *local_apqns = NULL; + int i, len, rc; + + /* check keytype, subtype, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + switch (subtype) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = ep11_apqns4type(subtype, NULL, NULL, 0, + local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, + keybitsize, flags, + keybuf, keybuflen, subtype); + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate EP11 secure key with given clear key value. + * As of now only EP11 AES secure keys are supported. + * keytype is one of the PKEY_KEYTYPE_* constants, + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES + * or 0 (assumes PKEY_TYPE_EP11_AES then). + * keybitsize is the bit size of the key (may be 0 for + * keytype PKEY_KEYTYPE_AES_*). + */ +static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, + u32 keytype, u32 subtype, + u32 keybitsize, u32 flags, + const u8 *clrkey, u32 clrkeylen, + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) +{ + struct pkey_apqn *local_apqns = NULL; + int i, len, rc; + + /* check keytype, subtype, clrkeylen, keybitsize */ + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + len = pkey_keytype_aes_to_size(keytype); + if (keybitsize && keybitsize != 8 * len) { + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); + return -EINVAL; + } + keybitsize = 8 * len; + if (clrkeylen != len) { + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", + __func__, clrkeylen, len); + return -EINVAL; + } + switch (subtype) { + case PKEY_TYPE_EP11: + case PKEY_TYPE_EP11_AES: + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + zcrypt_wait_api_operational(); + + if (!apqns || (nr_apqns == 1 && + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { + nr_apqns = MAXAPQNSINLIST; + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), + GFP_KERNEL); + if (!local_apqns) + return -ENOMEM; + rc = ep11_apqns4type(subtype, NULL, NULL, 0, + local_apqns, &nr_apqns); + if (rc) + goto out; + apqns = local_apqns; + } + + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { + rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, + keybitsize, flags, clrkey, + keybuf, keybuflen, subtype); + } + +out: + kfree(local_apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ep11_verifykey(const u8 *key, u32 keylen, + u16 *card, u16 *dom, + u32 *keytype, u32 *keybitsize, u32 *flags) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + u32 nr_apqns, *apqns = NULL; + int rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + zcrypt_wait_api_operational(); + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES) { + struct ep11keyblob *kb = (struct ep11keyblob *)key; + int api; + + rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_EP11; + *keybitsize = kb->head.bitlen; + + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen)); + if (rc) + goto out; + + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_EP11_AES_WITH_HEADER) { + struct ep11kblob_header *kh = (struct ep11kblob_header *)key; + int api; + + rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1); + if (rc) + goto out; + *keytype = PKEY_TYPE_EP11_AES; + *keybitsize = kh->bitlen; + + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; + rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + ZCRYPT_CEX7, api, + ep11_kb_wkvp(key, keylen)); + if (rc) + goto out; + + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; + + *card = ((struct pkey_apqn *)apqns)->card; + *dom = ((struct pkey_apqn *)apqns)->domain; + + } else { + /* unknown/unsupported key blob */ + rc = -EINVAL; + } + +out: + kfree(apqns); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * This function provides an alternate but usually slow way + * to convert a 'clear key token' with AES key material into + * a protected key. That is done via an intermediate step + * which creates an EP11 AES secure key first and then derives + * the protected key from this secure key. + */ +static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, + size_t nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, + u32 *protkeytype) +{ + const struct keytoken_header *hdr = (const struct keytoken_header *)key; + const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u32 tmplen, keysize = 0; + u8 *tmpbuf; + int i, rc; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + + if (hdr->type == TOKTYPE_NON_CCA && + hdr->version == TOKVER_CLEAR_KEY) + keysize = pkey_keytype_aes_to_size(t->keytype); + if (!keysize || t->len != keysize) + return -EINVAL; + + /* alloc tmp key buffer */ + tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); + if (!tmpbuf) + return -ENOMEM; + + /* try two times in case of failure */ + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { + tmplen = MAXEP11AESKEYBLOBSIZE; + rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, + 8 * keysize, 0, t->clearkey, t->len, + tmpbuf, &tmplen, NULL); + pr_debug("ep11_clr2key()=%d\n", rc); + if (rc) + continue; + rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, + protkey, protkeylen, protkeytype); + pr_debug("ep11_key2protkey()=%d\n", rc); + } + + kfree(tmpbuf); + pr_debug("rc=%d\n", rc); + return rc; +} + +static struct pkey_handler ep11_handler = { + .module = THIS_MODULE, + .name = "PKEY EP11 handler", + .is_supported_key = is_ep11_key, + .is_supported_keytype = is_ep11_keytype, + .key_to_protkey = ep11_key2protkey, + .slowpath_key_to_protkey = ep11_slowpath_key2protkey, + .gen_key = ep11_gen_key, + .clr_to_key = ep11_clr2key, + .verify_key = ep11_verifykey, + .apqns_for_key = ep11_apqns4key, + .apqns_for_keytype = ep11_apqns4type, +}; + +/* + * Module init + */ +static int __init pkey_ep11_init(void) +{ + /* register this module as pkey handler for all the ep11 stuff */ + return pkey_handler_register(&ep11_handler); +} + +/* + * Module exit + */ +static void __exit pkey_ep11_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&ep11_handler); +} + +module_init(pkey_ep11_init); +module_exit(pkey_ep11_exit); diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c new file mode 100644 index 00000000000000..98079b1ed6db81 --- /dev/null +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey pckmo specific code + * + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "zcrypt_api.h" +#include "zcrypt_ccamisc.h" +#include "pkey_base.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key PCKMO handler"); + +/* + * Check key blob for known and supported here. + */ +static bool is_pckmo_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + struct clearkeytoken *t = (struct clearkeytoken *)key; + + if (keylen < sizeof(*hdr)) + return false; + + switch (hdr->type) { + case TOKTYPE_NON_CCA: + switch (hdr->version) { + case TOKVER_CLEAR_KEY: + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + case PKEY_KEYTYPE_ECC_P256: + case PKEY_KEYTYPE_ECC_P384: + case PKEY_KEYTYPE_ECC_P521: + case PKEY_KEYTYPE_ECC_ED25519: + case PKEY_KEYTYPE_ECC_ED448: + case PKEY_KEYTYPE_AES_XTS_128: + case PKEY_KEYTYPE_AES_XTS_256: + case PKEY_KEYTYPE_HMAC_512: + case PKEY_KEYTYPE_HMAC_1024: + return true; + default: + return false; + } + case TOKVER_PROTECTED_KEY: + return true; + default: + return false; + } + default: + return false; + } +} + +static bool is_pckmo_keytype(enum pkey_key_type keytype) +{ + switch (keytype) { + case PKEY_TYPE_PROTKEY: + return true; + default: + return false; + } +} + +/* + * Create a protected key from a clear key value via PCKMO instruction. + */ +static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + /* mask of available pckmo subfunctions */ + static cpacf_mask_t pckmo_functions; + + int keysize, rc = -EINVAL; + u8 paramblock[160]; + u32 pkeytype; + long fc; + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ + keysize = 16; + pkeytype = keytype; + fc = CPACF_PCKMO_ENC_AES_128_KEY; + break; + case PKEY_KEYTYPE_AES_192: + /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ + keysize = 24; + pkeytype = keytype; + fc = CPACF_PCKMO_ENC_AES_192_KEY; + break; + case PKEY_KEYTYPE_AES_256: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = keytype; + fc = CPACF_PCKMO_ENC_AES_256_KEY; + break; + case PKEY_KEYTYPE_ECC_P256: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P256_KEY; + break; + case PKEY_KEYTYPE_ECC_P384: + /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ + keysize = 48; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P384_KEY; + break; + case PKEY_KEYTYPE_ECC_P521: + /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ + keysize = 80; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_P521_KEY; + break; + case PKEY_KEYTYPE_ECC_ED25519: + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; + break; + case PKEY_KEYTYPE_ECC_ED448: + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ + keysize = 64; + pkeytype = PKEY_KEYTYPE_ECC; + fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; + break; + case PKEY_KEYTYPE_AES_XTS_128: + /* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */ + keysize = 32; + pkeytype = PKEY_KEYTYPE_AES_XTS_128; + fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY; + break; + case PKEY_KEYTYPE_AES_XTS_256: + /* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */ + keysize = 64; + pkeytype = PKEY_KEYTYPE_AES_XTS_256; + fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY; + break; + case PKEY_KEYTYPE_HMAC_512: + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ + keysize = 64; + pkeytype = PKEY_KEYTYPE_HMAC_512; + fc = CPACF_PCKMO_ENC_HMAC_512_KEY; + break; + case PKEY_KEYTYPE_HMAC_1024: + /* 128 byte key, 32 byte aes wkvp, total 160 bytes */ + keysize = 128; + pkeytype = PKEY_KEYTYPE_HMAC_1024; + fc = CPACF_PCKMO_ENC_HMAC_1024_KEY; + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, keytype); + goto out; + } + + if (clrkeylen && clrkeylen < keysize) { + PKEY_DBF_ERR("%s clear key size too small: %u < %d\n", + __func__, clrkeylen, keysize); + goto out; + } + if (*protkeylen < keysize + AES_WK_VP_SIZE) { + PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); + goto out; + } + + /* Did we already check for PCKMO ? */ + if (!pckmo_functions.bytes[0]) { + /* no, so check now */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) { + PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__); + rc = -ENODEV; + goto out; + } + } + /* check for the pckmo subfunction we need now */ + if (!cpacf_test_func(&pckmo_functions, fc)) { + PKEY_DBF_ERR("%s pckmo functions not available\n", __func__); + rc = -ENODEV; + goto out; + } + + /* prepare param block */ + memset(paramblock, 0, sizeof(paramblock)); + memcpy(paramblock, clrkey, keysize); + + /* call the pckmo instruction */ + cpacf_pckmo(fc, paramblock); + + /* copy created protected key to key buffer including the wkvp block */ + *protkeylen = keysize + AES_WK_VP_SIZE; + memcpy(protkey, paramblock, *protkeylen); + *protkeytype = pkeytype; + + rc = 0; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Verify a raw protected key blob. + * Currently only AES protected keys are supported. + */ +static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen, + u32 protkeytype) +{ + struct { + u8 iv[AES_BLOCK_SIZE]; + u8 key[MAXPROTKEYSIZE]; + } param; + u8 null_msg[AES_BLOCK_SIZE]; + u8 dest_buf[AES_BLOCK_SIZE]; + unsigned int k, pkeylen; + unsigned long fc; + int rc = -EINVAL; + + switch (protkeytype) { + case PKEY_KEYTYPE_AES_128: + pkeylen = 16 + AES_WK_VP_SIZE; + fc = CPACF_KMC_PAES_128; + break; + case PKEY_KEYTYPE_AES_192: + pkeylen = 24 + AES_WK_VP_SIZE; + fc = CPACF_KMC_PAES_192; + break; + case PKEY_KEYTYPE_AES_256: + pkeylen = 32 + AES_WK_VP_SIZE; + fc = CPACF_KMC_PAES_256; + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, + protkeytype); + goto out; + } + if (protkeylen != pkeylen) { + PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n", + __func__, protkeylen, protkeytype); + goto out; + } + + memset(null_msg, 0, sizeof(null_msg)); + + memset(param.iv, 0, sizeof(param.iv)); + memcpy(param.key, protkey, protkeylen); + + k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf, + sizeof(null_msg)); + if (k != sizeof(null_msg)) { + PKEY_DBF_ERR("%s protected key is not valid\n", __func__); + rc = -EKEYREJECTED; + goto out; + } + + rc = 0; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int pckmo_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc = -EINVAL; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + if (hdr->type != TOKTYPE_NON_CCA) + return -EINVAL; + + switch (hdr->version) { + case TOKVER_PROTECTED_KEY: { + struct protkeytoken *t = (struct protkeytoken *)key; + + if (keylen < sizeof(*t)) + goto out; + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + if (keylen != sizeof(struct protaeskeytoken)) + goto out; + rc = pckmo_verify_protkey(t->protkey, t->len, + t->keytype); + if (rc) + goto out; + break; + case PKEY_KEYTYPE_AES_XTS_128: + if (t->len != 64 || keylen != sizeof(*t) + t->len) + goto out; + break; + case PKEY_KEYTYPE_AES_XTS_256: + case PKEY_KEYTYPE_HMAC_512: + if (t->len != 96 || keylen != sizeof(*t) + t->len) + goto out; + break; + case PKEY_KEYTYPE_HMAC_1024: + if (t->len != 160 || keylen != sizeof(*t) + t->len) + goto out; + break; + default: + PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n", + __func__, t->keytype); + goto out; + } + memcpy(protkey, t->protkey, t->len); + *protkeylen = t->len; + *protkeytype = t->keytype; + break; + } + case TOKVER_CLEAR_KEY: { + struct clearkeytoken *t = (struct clearkeytoken *)key; + u32 keysize = 0; + + if (keylen < sizeof(struct clearkeytoken) || + keylen != sizeof(*t) + t->len) + goto out; + switch (t->keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + keysize = pkey_keytype_aes_to_size(t->keytype); + break; + case PKEY_KEYTYPE_ECC_P256: + keysize = 32; + break; + case PKEY_KEYTYPE_ECC_P384: + keysize = 48; + break; + case PKEY_KEYTYPE_ECC_P521: + keysize = 80; + break; + case PKEY_KEYTYPE_ECC_ED25519: + keysize = 32; + break; + case PKEY_KEYTYPE_ECC_ED448: + keysize = 64; + break; + case PKEY_KEYTYPE_AES_XTS_128: + keysize = 32; + break; + case PKEY_KEYTYPE_AES_XTS_256: + keysize = 64; + break; + case PKEY_KEYTYPE_HMAC_512: + keysize = 64; + break; + case PKEY_KEYTYPE_HMAC_1024: + keysize = 128; + break; + default: + break; + } + if (!keysize) { + PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n", + __func__, t->keytype); + goto out; + } + if (t->len != keysize) { + PKEY_DBF_ERR("%s clear key token: invalid key len %u\n", + __func__, t->len); + goto out; + } + rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len, + protkey, protkeylen, protkeytype); + break; + } + default: + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", + __func__, hdr->version); + break; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Generate a random protected key. + * Currently only the generation of AES protected keys + * is supported. + */ +static int pckmo_gen_protkey(u32 keytype, u32 subtype, + u8 *protkey, u32 *protkeylen, u32 *protkeytype) +{ + u8 clrkey[128]; + int keysize; + int rc; + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_192: + case PKEY_KEYTYPE_AES_256: + keysize = pkey_keytype_aes_to_size(keytype); + break; + case PKEY_KEYTYPE_AES_XTS_128: + keysize = 32; + break; + case PKEY_KEYTYPE_AES_XTS_256: + case PKEY_KEYTYPE_HMAC_512: + keysize = 64; + break; + case PKEY_KEYTYPE_HMAC_1024: + keysize = 128; + break; + default: + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + if (subtype != PKEY_TYPE_PROTKEY) { + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", + __func__, subtype); + return -EINVAL; + } + + /* generate a dummy random clear key */ + get_random_bytes(clrkey, keysize); + + /* convert it to a dummy protected key */ + rc = pckmo_clr2protkey(keytype, clrkey, keysize, + protkey, protkeylen, protkeytype); + if (rc) + goto out; + + /* replace the key part of the protected key with random bytes */ + get_random_bytes(protkey, keysize); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Verify a protected key token blob. + * Currently only AES protected keys are supported. + */ +static int pckmo_verify_key(const u8 *key, u32 keylen) +{ + struct keytoken_header *hdr = (struct keytoken_header *)key; + int rc = -EINVAL; + + if (keylen < sizeof(*hdr)) + return -EINVAL; + if (hdr->type != TOKTYPE_NON_CCA) + return -EINVAL; + + switch (hdr->version) { + case TOKVER_PROTECTED_KEY: { + struct protaeskeytoken *t; + + if (keylen != sizeof(struct protaeskeytoken)) + goto out; + t = (struct protaeskeytoken *)key; + rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype); + break; + } + default: + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", + __func__, hdr->version); + break; + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * Wrapper functions used for the pkey handler struct + */ + +static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, + size_t _nr_apqns, + const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *keyinfo) +{ + return pckmo_key2protkey(key, keylen, + protkey, protkeylen, keyinfo); +} + +static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, + u32 keytype, u32 keysubtype, + u32 _keybitsize, u32 _flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) +{ + return pckmo_gen_protkey(keytype, keysubtype, + keybuf, keybuflen, keyinfo); +} + +static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, + u16 *_card, u16 *_dom, + u32 *_keytype, u32 *_keybitsize, u32 *_flags) +{ + return pckmo_verify_key(key, keylen); +} + +static struct pkey_handler pckmo_handler = { + .module = THIS_MODULE, + .name = "PKEY PCKMO handler", + .is_supported_key = is_pckmo_key, + .is_supported_keytype = is_pckmo_keytype, + .key_to_protkey = pkey_pckmo_key2protkey, + .gen_key = pkey_pckmo_gen_key, + .verify_key = pkey_pckmo_verifykey, +}; + +/* + * Module init + */ +static int __init pkey_pckmo_init(void) +{ + cpacf_mask_t func_mask; + + /* + * The pckmo instruction should be available - even if we don't + * actually invoke it. This instruction comes with MSA 3 which + * is also the minimum level for the kmc instructions which + * are able to work with protected keys. + */ + if (!cpacf_query(CPACF_PCKMO, &func_mask)) + return -ENODEV; + + /* register this module as pkey handler for all the pckmo stuff */ + return pkey_handler_register(&pckmo_handler); +} + +/* + * Module exit + */ +static void __exit pkey_pckmo_exit(void) +{ + /* unregister this module as pkey handler */ + pkey_handler_unregister(&pckmo_handler); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init); +module_exit(pkey_pckmo_exit); diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c new file mode 100644 index 00000000000000..cc0fc1e264bd0d --- /dev/null +++ b/drivers/s390/crypto/pkey_sysfs.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey module sysfs related functions + * + * Copyright IBM Corp. 2024 + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include + +#include "zcrypt_api.h" +#include "zcrypt_ccamisc.h" +#include "zcrypt_ep11misc.h" + +#include "pkey_base.h" + +/* + * Wrapper around pkey_handler_gen_key() which deals with the + * ENODEV return code and then tries to enforce a pkey handler + * module load. + */ +static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, + u32 keybitsize, u32 flags, + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) +{ + int rc; + + rc = pkey_handler_gen_key(NULL, 0, + keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo); + if (rc == -ENODEV) { + pkey_handler_request_modules(); + rc = pkey_handler_gen_key(NULL, 0, + keytype, keysubtype, + keybitsize, flags, + keybuf, keybuflen, keyinfo); + } + + return rc; +} + +/* + * Sysfs attribute read function for all protected key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, + loff_t off, size_t count) +{ + struct protaeskeytoken protkeytoken; + struct pkey_protkey protkey; + int rc; + + if (off != 0 || count < sizeof(protkeytoken)) + return -EINVAL; + if (is_xts) + if (count < 2 * sizeof(protkeytoken)) + return -EINVAL; + + memset(&protkeytoken, 0, sizeof(protkeytoken)); + protkeytoken.type = TOKTYPE_NON_CCA; + protkeytoken.version = TOKVER_PROTECTED_KEY; + protkeytoken.keytype = keytype; + + protkey.len = sizeof(protkey.protkey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + protkey.protkey, &protkey.len, + &protkey.type); + if (rc) + return rc; + + protkeytoken.len = protkey.len; + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); + + memcpy(buf, &protkeytoken, sizeof(protkeytoken)); + + if (is_xts) { + /* xts needs a second protected key, reuse protkey struct */ + protkey.len = sizeof(protkey.protkey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + protkey.protkey, &protkey.len, + &protkey.type); + if (rc) + return rc; + + protkeytoken.len = protkey.len; + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); + + memcpy(buf + sizeof(protkeytoken), &protkeytoken, + sizeof(protkeytoken)); + + return 2 * sizeof(protkeytoken); + } + + return sizeof(protkeytoken); +} + +/* + * Sysfs attribute read function for the AES XTS prot key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf, + loff_t off, size_t count) +{ + struct protkeytoken *t = (struct protkeytoken *)buf; + u32 protlen, prottype; + int rc; + + switch (keytype) { + case PKEY_KEYTYPE_AES_XTS_128: + protlen = 64; + break; + case PKEY_KEYTYPE_AES_XTS_256: + protlen = 96; + break; + default: + return -EINVAL; + } + + if (off != 0 || count < sizeof(*t) + protlen) + return -EINVAL; + + memset(t, 0, sizeof(*t) + protlen); + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + t->protkey, &protlen, &prottype); + if (rc) + return rc; + + t->len = protlen; + + return sizeof(*t) + protlen; +} + +/* + * Sysfs attribute read function for the HMAC prot key binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf, + loff_t off, size_t count) +{ + struct protkeytoken *t = (struct protkeytoken *)buf; + u32 protlen, prottype; + int rc; + + switch (keytype) { + case PKEY_KEYTYPE_HMAC_512: + protlen = 96; + break; + case PKEY_KEYTYPE_HMAC_1024: + protlen = 160; + break; + default: + return -EINVAL; + } + + if (off != 0 || count < sizeof(*t) + protlen) + return -EINVAL; + + memset(t, 0, sizeof(*t) + protlen); + t->type = TOKTYPE_NON_CCA; + t->version = TOKVER_PROTECTED_KEY; + t->keytype = keytype; + + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, + t->protkey, &protlen, &prottype); + if (rc) + return rc; + + t->len = protlen; + + return sizeof(*t) + protlen; +} + +static ssize_t protkey_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, + off, count); +} + +static ssize_t protkey_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, + off, count); +} + +static ssize_t protkey_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, + off, count); +} + +static ssize_t protkey_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, + off, count); +} + +static ssize_t protkey_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, + off, count); +} + +static ssize_t protkey_aes_xts_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128, + buf, off, count); +} + +static ssize_t protkey_aes_xts_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256, + buf, off, count); +} + +static ssize_t protkey_hmac_512_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512, + buf, off, count); +} + +static ssize_t protkey_hmac_1024_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024, + buf, off, count); +} + +static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); +static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); +static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); +static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); +static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); +static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64); +static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96); +static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96); +static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160); + +static struct bin_attribute *protkey_attrs[] = { + &bin_attr_protkey_aes_128, + &bin_attr_protkey_aes_192, + &bin_attr_protkey_aes_256, + &bin_attr_protkey_aes_128_xts, + &bin_attr_protkey_aes_256_xts, + &bin_attr_protkey_aes_xts_128, + &bin_attr_protkey_aes_xts_256, + &bin_attr_protkey_hmac_512, + &bin_attr_protkey_hmac_1024, + NULL +}; + +static struct attribute_group protkey_attr_group = { + .name = "protkey", + .bin_attrs = protkey_attrs, +}; + +/* + * Sysfs attribute read function for all secure key ccadata binary attributes. + * The implementation can not deal with partial reads, because a new random + * protected key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, + loff_t off, size_t count) +{ + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; + u32 buflen; + int rc; + + if (off != 0 || count < sizeof(struct secaeskeytoken)) + return -EINVAL; + if (is_xts) + if (count < 2 * sizeof(struct secaeskeytoken)) + return -EINVAL; + + buflen = sizeof(seckey->seckey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, + seckey->seckey, &buflen, NULL); + if (rc) + return rc; + + if (is_xts) { + seckey++; + buflen = sizeof(seckey->seckey); + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, + seckey->seckey, &buflen, NULL); + if (rc) + return rc; + + return 2 * sizeof(struct secaeskeytoken); + } + + return sizeof(struct secaeskeytoken); +} + +static ssize_t ccadata_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, + off, count); +} + +static ssize_t ccadata_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, + off, count); +} + +static ssize_t ccadata_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, + off, count); +} + +static ssize_t ccadata_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, + off, count); +} + +static ssize_t ccadata_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); +static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); +static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); +static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); +static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); + +static struct bin_attribute *ccadata_attrs[] = { + &bin_attr_ccadata_aes_128, + &bin_attr_ccadata_aes_192, + &bin_attr_ccadata_aes_256, + &bin_attr_ccadata_aes_128_xts, + &bin_attr_ccadata_aes_256_xts, + NULL +}; + +static struct attribute_group ccadata_attr_group = { + .name = "ccadata", + .bin_attrs = ccadata_attrs, +}; + +#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) + +/* + * Sysfs attribute read function for all secure key ccacipher binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + */ +static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + u32 keysize = CCACIPHERTOKENSIZE; + int rc; + + if (off != 0 || count < CCACIPHERTOKENSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * CCACIPHERTOKENSIZE) + return -EINVAL; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_CCA_CIPHER, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + + if (is_xts) { + keysize = CCACIPHERTOKENSIZE; + buf += CCACIPHERTOKENSIZE; + rc = sys_pkey_handler_gen_key( + pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_CCA_CIPHER, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + return 2 * CCACIPHERTOKENSIZE; + } + + return CCACIPHERTOKENSIZE; +} + +static ssize_t ccacipher_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ccacipher_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ccacipher_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); +static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); + +static struct bin_attribute *ccacipher_attrs[] = { + &bin_attr_ccacipher_aes_128, + &bin_attr_ccacipher_aes_192, + &bin_attr_ccacipher_aes_256, + &bin_attr_ccacipher_aes_128_xts, + &bin_attr_ccacipher_aes_256_xts, + NULL +}; + +static struct attribute_group ccacipher_attr_group = { + .name = "ccacipher", + .bin_attrs = ccacipher_attrs, +}; + +/* + * Sysfs attribute read function for all ep11 aes key binary attributes. + * The implementation can not deal with partial reads, because a new random + * secure key blob is generated with each read. In case of partial reads + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. + * This function and the sysfs attributes using it provide EP11 key blobs + * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently + * 336 bytes. + */ +static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, + bool is_xts, char *buf, loff_t off, + size_t count) +{ + u32 keysize = MAXEP11AESKEYBLOBSIZE; + int rc; + + if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + if (is_xts) + if (count < 2 * MAXEP11AESKEYBLOBSIZE) + return -EINVAL; + + memset(buf, 0, is_xts ? 2 * keysize : keysize); + + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_EP11_AES, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + + if (is_xts) { + keysize = MAXEP11AESKEYBLOBSIZE; + buf += MAXEP11AESKEYBLOBSIZE; + rc = sys_pkey_handler_gen_key( + pkey_aes_bitsize_to_keytype(keybits), + PKEY_TYPE_EP11_AES, keybits, 0, + buf, &keysize, NULL); + if (rc) + return rc; + return 2 * MAXEP11AESKEYBLOBSIZE; + } + + return MAXEP11AESKEYBLOBSIZE; +} + +static ssize_t ep11_aes_128_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, + off, count); +} + +static ssize_t ep11_aes_192_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, + off, count); +} + +static ssize_t ep11_aes_256_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, + off, count); +} + +static ssize_t ep11_aes_128_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, + off, count); +} + +static ssize_t ep11_aes_256_xts_read(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, + off, count); +} + +static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); +static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); + +static struct bin_attribute *ep11_attrs[] = { + &bin_attr_ep11_aes_128, + &bin_attr_ep11_aes_192, + &bin_attr_ep11_aes_256, + &bin_attr_ep11_aes_128_xts, + &bin_attr_ep11_aes_256_xts, + NULL +}; + +static struct attribute_group ep11_attr_group = { + .name = "ep11", + .bin_attrs = ep11_attrs, +}; + +const struct attribute_group *pkey_attr_groups[] = { + &protkey_attr_group, + &ccadata_attr_group, + &ccacipher_attr_group, + &ep11_attr_group, + NULL, +}; diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 4aeb3e1213c773..67a807e2e75b1b 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -26,6 +26,18 @@ MODULE_LICENSE("GPL v2"); struct ap_matrix_dev *matrix_dev; debug_info_t *vfio_ap_dbf_info; +static ssize_t features_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "guest_matrix hotplug ap_config\n"); +} +static DEVICE_ATTR_RO(features); + +static struct attribute *matrix_dev_attrs[] = { + &dev_attr_features.attr, + NULL, +}; +ATTRIBUTE_GROUPS(matrix_dev); + /* Only type 10 adapters (CEX4 and later) are supported * by the AP matrix device driver */ @@ -68,6 +80,7 @@ static struct device_driver matrix_driver = { .name = "vfio_ap", .bus = &matrix_bus, .suppress_bind_attrs = true, + .dev_groups = matrix_dev_groups, }; static int vfio_ap_matrix_dev_create(void) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 74036886ca87dc..5020696f13797d 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -715,7 +715,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - pr_debug("%s no matching queue found => ENODEV\n", __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -819,7 +819,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - pr_debug("%s no matching queue found => ENODEV\n", __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -940,8 +940,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - pr_debug("%s no match for address %02x.%04x => ENODEV\n", - __func__, xcrb->user_defined, *domain); + pr_debug("no match for address %02x.%04x => ENODEV\n", + xcrb->user_defined, *domain); rc = -ENODEV; goto out; } @@ -991,7 +991,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb) if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - pr_debug("%s rc=%d\n", __func__, rc); + pr_debug("rc=%d\n", rc); return rc; } @@ -1138,15 +1138,13 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (!pref_zq) { if (targets && target_num == 1) { - pr_debug("%s no match for address %02x.%04x => ENODEV\n", - __func__, (int)targets->ap_id, - (int)targets->dom_id); + pr_debug("no match for address %02x.%04x => ENODEV\n", + (int)targets->ap_id, (int)targets->dom_id); } else if (targets) { - pr_debug("%s no match for %d target addrs => ENODEV\n", - __func__, (int)target_num); + pr_debug("no match for %d target addrs => ENODEV\n", + (int)target_num); } else { - pr_debug("%s no match for address ff.ffff => ENODEV\n", - __func__); + pr_debug("no match for address ff.ffff => ENODEV\n"); } rc = -ENODEV; goto out_free; @@ -1195,7 +1193,7 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - pr_debug("%s rc=%d\n", __func__, rc); + pr_debug("rc=%d\n", rc); return rc; } @@ -1247,7 +1245,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - pr_debug("%s no matching queue found => ENODEV\n", __func__); + pr_debug("no matching queue found => ENODEV\n"); rc = -ENODEV; goto out; } @@ -1910,7 +1908,6 @@ static const struct file_operations zcrypt_fops = { #endif .open = zcrypt_open, .release = zcrypt_release, - .llseek = no_llseek, }; /* @@ -2037,8 +2034,7 @@ int zcrypt_wait_api_operational(void) break; default: /* other failure */ - pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n", - __func__, rc); + pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); break; } break; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 7bef2cc4e46195..43a27cb3db847e 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -172,7 +172,7 @@ EXPORT_SYMBOL(cca_check_secaescipherkey); * key token. Returns 0 on success or errno value on failure. */ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, - const u8 *token, size_t keysize, + const u8 *token, u32 keysize, int checkcpacfexport) { struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; @@ -187,7 +187,7 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, } if (t->len > keysize) { if (dbg) - DBF("%s token check failed, len %d > keysize %zu\n", + DBF("%s token check failed, len %d > keysize %u\n", __func__, (int)t->len, keysize); return -EINVAL; } @@ -737,7 +737,7 @@ static const u8 aes_cipher_key_skeleton[] = { * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, u32 *keybufsize) { int rc; u8 *mem, *ptr; @@ -1085,7 +1085,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize) { int rc; u8 *token; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 5ddf02f965f97e..aed7e838454267 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -153,7 +153,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl, * key token. Returns 0 on success or errno value on failure. */ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, - const u8 *token, size_t keysize, + const u8 *token, u32 keysize, int checkcpacfexport); /* @@ -178,7 +178,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize); + u8 *keybuf, u32 *keybufsize); /* * Derive proteced key from CCA AES cipher secure key. @@ -190,7 +190,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize); /* * Derive proteced key from CCA ECC secure private key. diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index b43db17a4e0e6a..cb7e6da43602d4 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -203,7 +203,7 @@ static int ep11_kb_decode(const u8 *kb, size_t kblen, * For valid ep11 keyblobs, returns a reference to the wrappingkey verification * pattern. Otherwise NULL. */ -const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen) +const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen) { struct ep11keyblob *kb; @@ -217,7 +217,7 @@ EXPORT_SYMBOL(ep11_kb_wkvp); * Simple check if the key blob is a valid EP11 AES key blob with header. */ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); @@ -225,7 +225,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*hdr) + sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } @@ -250,7 +250,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, } if (hdr->len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)hdr->len, keylen); return -EINVAL; } @@ -284,7 +284,7 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr); * Simple check if the key blob is a valid EP11 ECC key blob with header. */ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); @@ -292,7 +292,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*hdr) + sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*hdr) + sizeof(*kb)); return -EINVAL; } @@ -317,7 +317,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, } if (hdr->len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)hdr->len, keylen); return -EINVAL; } @@ -352,14 +352,14 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr); * the header in the session field (old style EP11 AES key). */ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp) + const u8 *key, u32 keylen, int checkcpacfexp) { struct ep11keyblob *kb = (struct ep11keyblob *)key; #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) if (keylen < sizeof(*kb)) { - DBF("%s key check failed, keylen %zu < %zu\n", + DBF("%s key check failed, keylen %u < %zu\n", __func__, keylen, sizeof(*kb)); return -EINVAL; } @@ -378,7 +378,7 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, } if (kb->head.len > keylen) { if (dbg) - DBF("%s key check failed, header len %d keylen %zu mismatch\n", + DBF("%s key check failed, header len %d keylen %u mismatch\n", __func__, (int)kb->head.len, keylen); return -EINVAL; } @@ -932,7 +932,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, } int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, u32 keybufver) + u8 *keybuf, u32 *keybufsize, u32 keybufver) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -1256,7 +1256,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, + u8 *keybuf, u32 *keybufsize, u8 keybufver) { struct ep11kblob_header *hdr; @@ -1412,7 +1412,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 keytype) { int rc; @@ -1471,7 +1471,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, EXPORT_SYMBOL(ep11_clr2keyblob); int ep11_kblob2protkey(u16 card, u16 dom, - const u8 *keyblob, size_t keybloblen, + const u8 *keyblob, u32 keybloblen, u8 *protkey, u32 *protkeylen, u32 *protkeytype) { struct ep11kblob_header *hdr; diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 9d17fd5228a74f..9f1bdffdec6898 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -54,7 +54,7 @@ static inline bool is_ep11_keyblob(const u8 *key) * For valid ep11 keyblobs, returns a reference to the wrappingkey verification * pattern. Otherwise NULL. */ -const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); +const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen); /* * Simple check if the key blob is a valid EP11 AES key blob with header. @@ -63,7 +63,7 @@ const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); * Returns 0 on success or errno value on failure. */ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* * Simple check if the key blob is a valid EP11 ECC key blob with header. @@ -72,7 +72,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, * Returns 0 on success or errno value on failure. */ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* * Simple check if the key blob is a valid EP11 AES key blob with @@ -82,7 +82,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, * Returns 0 on success or errno value on failure. */ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, - const u8 *key, size_t keylen, int checkcpacfexp); + const u8 *key, u32 keylen, int checkcpacfexp); /* EP11 card info struct */ struct ep11_card_info { @@ -115,13 +115,13 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize, u32 keybufver); + u8 *keybuf, u32 *keybufsize, u32 keybufver); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 keytype); /* @@ -149,7 +149,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, /* * Derive proteced key from EP11 key blob (AES and ECC keys). */ -int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, +int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, u32 *protkeytype); void zcrypt_ep11misc_exit(void); diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 3b39cb8f926dbd..adc65eddaa1e38 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, len = t80h->len; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - pr_debug("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -487,8 +487,8 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, out: ap_msg->private = NULL; if (rc) - pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), + pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -537,8 +537,8 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, out: ap_msg->private = NULL; if (rc) - pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), + pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), rc); return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 215f257d2360d0..b64c9d9fc6137e 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -437,9 +437,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, ap_msg->flags |= AP_MSG_FLAG_ADMIN; break; default: - pr_debug("%s unknown CPRB minor version '%c%c'\n", - __func__, msg->cprbx.func_id[0], - msg->cprbx.func_id[1]); + pr_debug("unknown CPRB minor version '%c%c'\n", + msg->cprbx.func_id[0], msg->cprbx.func_id[1]); } /* copy data block */ @@ -629,9 +628,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy CPRB to user */ if (xcrb->reply_control_blk_length < msg->fmt2.count1) { - pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_control_blk_length, - msg->fmt2.count1); + pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n", + xcrb->reply_control_blk_length, msg->fmt2.count1); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, @@ -642,9 +640,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy data buffer to user */ if (msg->fmt2.count2) { if (xcrb->reply_data_length < msg->fmt2.count2) { - pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_data_length, - msg->fmt2.count2); + pr_debug("reply_data_length %u < required %u => EMSGSIZE\n", + xcrb->reply_data_length, msg->fmt2.count2); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_data_addr, @@ -673,9 +670,8 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, char *data = reply->msg; if (xcrb->resp_len < msg->fmt2.count1) { - pr_debug("%s resp_len %u < required %u => EMSGSIZE\n", - __func__, (unsigned int)xcrb->resp_len, - msg->fmt2.count1); + pr_debug("resp_len %u < required %u => EMSGSIZE\n", + (unsigned int)xcrb->resp_len, msg->fmt2.count1); return -EMSGSIZE; } @@ -875,8 +871,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = sizeof(struct type86x_reply) + t86r->length; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - pr_debug("%s len mismatch => EMSGSIZE\n", - __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -890,8 +885,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - pr_debug("%s len mismatch => EMSGSIZE\n", - __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -941,8 +935,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - pr_debug("%s len mismatch => EMSGSIZE\n", - __func__); + pr_debug("len mismatch => EMSGSIZE\n"); msg->rc = -EMSGSIZE; goto out; } @@ -1154,8 +1147,8 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, out: if (rc) - pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -1277,8 +1270,8 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * out: if (rc) - pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), rc); return rc; } diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index cc178874c4a662..8643947fee8ec4 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c @@ -687,7 +687,6 @@ static int openprom_release(struct inode * inode, struct file * file) static const struct file_operations openprom_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = openprom_ioctl, .compat_ioctl = openprom_compat_ioctl, .open = openprom_open, diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 3c88f29f4c477d..8bbed7a7afb77f 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -221,7 +221,6 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id) static const struct file_operations uctrl_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .unlocked_ioctl = uctrl_ioctl, .open = uctrl_open, }; diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index cea3a79d538e4b..0e10502660def6 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -157,7 +157,6 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) } ncmd->status = 0; - ncmd->message = 0; } static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd) @@ -199,7 +198,6 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd) * Polls the chip in a reasonably efficient manner waiting for an * event to occur. After a short quick poll we begin to yield the CPU * (if possible). In irq contexts the time-out is arbitrarily limited. - * Callers may hold locks as long as they are held in irq mode. * * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. */ @@ -1228,24 +1226,15 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) return ret; } -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using polled I/O - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer, - * can_sleep - 1 or 0 when sleeping is permitted or not, respectively. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. +/** + * NCR5380_transfer_pio() - transfers data in given phase using polled I/O + * @instance: instance of driver + * @phase: pointer to what phase is expected + * @count: pointer to number of bytes to transfer + * @data: pointer to data pointer + * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively * - * XXX Note : handling for bus free may be useful. + * Returns: void. *phase, *count, *data are modified in place. */ /* @@ -1254,9 +1243,9 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) * counts, we will always do a pseudo DMA or DMA transfer. */ -static int NCR5380_transfer_pio(struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data, unsigned int can_sleep) +static void NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data, unsigned int can_sleep) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char p = *phase, tmp; @@ -1277,8 +1266,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, * valid */ - if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, - HZ * can_sleep) < 0) + if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ | SR_BSY, + SR_REQ | SR_BSY, HZ * can_sleep) < 0) break; dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); @@ -1329,17 +1318,19 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); -/* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ + /* + * We have several special cases to consider during REQ/ACK + * handshaking: + * + * 1. We were in MSGOUT phase, and we are on the last byte of + * the message. ATN must be dropped as ACK is dropped. + * + * 2. We are in MSGIN phase, and we are on the last byte of the + * message. We must exit with ACK asserted, so that the calling + * code may raise ATN before dropping ACK to reject the message. + * + * 3. ACK and ATN are clear & the target may proceed as normal. + */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -1361,11 +1352,6 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; - - if (!c || (*phase == p)) - return 0; - else - return -1; } /** @@ -1485,6 +1471,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); int c = *count; unsigned char p = *phase; unsigned char *d = *data; @@ -1496,7 +1483,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, return -1; } - NCR5380_to_ncmd(hostdata->connected)->phase = p; + ncmd->phase = p; if (p & SR_IO) { if (hostdata->read_overruns) @@ -1574,79 +1561,80 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, /* The result is zero iff pseudo DMA send/receive was completed. */ hostdata->dma_len = c; -/* - * A note regarding the DMA errata workarounds for early NMOS silicon. - * - * For DMA sends, we want to wait until the last byte has been - * transferred out over the bus before we turn off DMA mode. Alas, there - * seems to be no terribly good way of doing this on a 5380 under all - * conditions. For non-scatter-gather operations, we can wait until REQ - * and ACK both go false, or until a phase mismatch occurs. Gather-sends - * are nastier, since the device will be expecting more data than we - * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we - * could test Last Byte Sent to assure transfer (I imagine this is precisely - * why this signal was added to the newer chips) but on the older 538[01] - * this signal does not exist. The workaround for this lack is a watchdog; - * we bail out of the wait-loop after a modest amount of wait-time if - * the usual exit conditions are not met. Not a terribly clean or - * correct solution :-% - * - * DMA receive is equally tricky due to a nasty characteristic of the NCR5380. - * If the chip is in DMA receive mode, it will respond to a target's - * REQ by latching the SCSI data into the INPUT DATA register and asserting - * ACK, even if it has _already_ been notified by the DMA controller that - * the current DMA transfer has completed! If the NCR5380 is then taken - * out of DMA mode, this already-acknowledged byte is lost. This is - * not a problem for "one DMA transfer per READ command", because - * the situation will never arise... either all of the data is DMA'ed - * properly, or the target switches to MESSAGE IN phase to signal a - * disconnection (either operation bringing the DMA to a clean halt). - * However, in order to handle scatter-receive, we must work around the - * problem. The chosen fix is to DMA fewer bytes, then check for the - * condition before taking the NCR5380 out of DMA mode. One or two extra - * bytes are transferred via PIO as necessary to fill out the original - * request. - */ - - if (hostdata->flags & FLAG_DMA_FIXUP) { - if (p & SR_IO) { - /* - * The workaround was to transfer fewer bytes than we - * intended to with the pseudo-DMA read function, wait for - * the chip to latch the last byte, read it, and then disable - * pseudo-DMA mode. - * - * After REQ is asserted, the NCR5380 asserts DRQ and ACK. - * REQ is deasserted when ACK is asserted, and not reasserted - * until ACK goes false. Since the NCR5380 won't lower ACK - * until DACK is asserted, which won't happen unless we twiddle - * the DMA port or we take the NCR5380 out of DMA mode, we - * can guarantee that we won't handshake another extra - * byte. - */ + /* + * A note regarding the DMA errata workarounds for early NMOS silicon. + * + * For DMA sends, we want to wait until the last byte has been + * transferred out over the bus before we turn off DMA mode. Alas, there + * seems to be no terribly good way of doing this on a 5380 under all + * conditions. For non-scatter-gather operations, we can wait until REQ + * and ACK both go false, or until a phase mismatch occurs. Gather-sends + * are nastier, since the device will be expecting more data than we + * are prepared to send it, and REQ will remain asserted. On a 53C8[01] + * we could test Last Byte Sent to assure transfer (I imagine this is + * precisely why this signal was added to the newer chips) but on the + * older 538[01] this signal does not exist. The workaround for this + * lack is a watchdog; we bail out of the wait-loop after a modest + * amount of wait-time if the usual exit conditions are not met. + * Not a terribly clean or correct solution :-% + * + * DMA receive is equally tricky due to a nasty characteristic of the + * NCR5380. If the chip is in DMA receive mode, it will respond to a + * target's REQ by latching the SCSI data into the INPUT DATA register + * and asserting ACK, even if it has _already_ been notified by the + * DMA controller that the current DMA transfer has completed! If the + * NCR5380 is then taken out of DMA mode, this already-acknowledged + * byte is lost. + * + * This is not a problem for "one DMA transfer per READ + * command", because the situation will never arise... either all of + * the data is DMA'ed properly, or the target switches to MESSAGE IN + * phase to signal a disconnection (either operation bringing the DMA + * to a clean halt). However, in order to handle scatter-receive, we + * must work around the problem. The chosen fix is to DMA fewer bytes, + * then check for the condition before taking the NCR5380 out of DMA + * mode. One or two extra bytes are transferred via PIO as necessary + * to fill out the original request. + */ - if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ, BASR_DRQ, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); - } - if (NCR5380_poll_politely(hostdata, STATUS_REG, - SR_REQ, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); - } - d[*count - 1] = NCR5380_read(INPUT_DATA_REG); - } else { - /* - * Wait for the last byte to be sent. If REQ is being asserted for - * the byte we're interested, we'll ACK it and it will go false. - */ - if (NCR5380_poll_politely2(hostdata, - BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, - BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); + if ((hostdata->flags & FLAG_DMA_FIXUP) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + /* + * The workaround was to transfer fewer bytes than we + * intended to with the pseudo-DMA receive function, wait for + * the chip to latch the last byte, read it, and then disable + * DMA mode. + * + * After REQ is asserted, the NCR5380 asserts DRQ and ACK. + * REQ is deasserted when ACK is asserted, and not reasserted + * until ACK goes false. Since the NCR5380 won't lower ACK + * until DACK is asserted, which won't happen unless we twiddle + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra + * byte. + * + * If sending, wait for the last byte to be sent. If REQ is + * being asserted for the byte we're interested, we'll ACK it + * and it will go false. + */ + if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, 0)) { + if ((p & SR_IO) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + if (!NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, 0, 0)) { + d[c] = NCR5380_read(INPUT_DATA_REG); + --ncmd->this_residual; + } else { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: !REQ timeout\n"); + } } + } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: DRQ timeout\n"); } } @@ -1666,9 +1654,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. - * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) @@ -1807,9 +1792,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) return; case PHASE_MSGIN: len = 1; + tmp = 0xff; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); - ncmd->message = tmp; + if (tmp == 0xff) + break; switch (tmp) { case ABORT: @@ -1996,6 +1983,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) break; case PHASE_STATIN: len = 1; + tmp = ncmd->status; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data, 0); ncmd->status = tmp; @@ -2005,9 +1993,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } else { + int err; + spin_unlock_irq(&hostdata->lock); - NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); + err = NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, SR_REQ, HZ); spin_lock_irq(&hostdata->lock); + + if (err < 0 && hostdata->connected && + !(NCR5380_read(STATUS_REG) & SR_BSY)) { + scmd_printk(KERN_ERR, hostdata->connected, + "BSY signal lost\n"); + do_reset(instance); + bus_reset_cleanup(instance); + } } } } diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 8dc2be4212dce6..d402d4bffcb267 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -3,10 +3,10 @@ * NCR 5380 defines * * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 + * Visionary Computing + * (Unix consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 * * For more information, please consult * @@ -78,7 +78,7 @@ #define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */ #define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */ #define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */ -#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ +#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ #define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ #define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ @@ -135,7 +135,7 @@ #define BASR_IRQ 0x10 /* ro mirror of IRQ pin */ #define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */ #define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */ -#define BASR_ATN 0x02 /* ro BUS status */ +#define BASR_ATN 0x02 /* ro BUS status */ #define BASR_ACK 0x01 /* ro BUS status */ /* Write any value to this register to start a DMA send */ @@ -170,7 +170,7 @@ #define CSR_BASE CSR_53C80_INTR /* Note : PHASE_* macros are based on the values of the STATUS register */ -#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) +#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) #define PHASE_DATAOUT 0 #define PHASE_DATAIN SR_IO @@ -231,7 +231,6 @@ struct NCR5380_cmd { int this_residual; struct scatterlist *buffer; int status; - int message; int phase; struct list_head list; }; @@ -286,8 +285,9 @@ static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); -static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data, - unsigned int can_sleep); +static void NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data, unsigned int can_sleep); static int NCR5380_poll_politely2(struct NCR5380_hostdata *, unsigned int, u8, u8, unsigned int, u8, u8, unsigned long); diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index b22857c6f3f4f9..abf6a82b74af35 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include @@ -1267,7 +1267,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + - ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); + (le32_to_cpu(readcmd->sg.count) * sizeof(struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1302,7 +1302,7 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u if (ret < 0) return ret; fibsize = sizeof(struct aac_read64) + - ((le32_to_cpu(readcmd->sg.count) - 1) * + (le32_to_cpu(readcmd->sg.count) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1337,7 +1337,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 if (ret < 0) return ret; fibsize = sizeof(struct aac_read) + - ((le32_to_cpu(readcmd->sg.count) - 1) * + (le32_to_cpu(readcmd->sg.count) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1401,7 +1401,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u return ret; command = ContainerRawIo; fibsize = sizeof(struct aac_raw_io) + - ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); + (le32_to_cpu(writecmd->sg.count) * sizeof(struct sgentryraw)); } BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1436,7 +1436,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, if (ret < 0) return ret; fibsize = sizeof(struct aac_write64) + - ((le32_to_cpu(writecmd->sg.count) - 1) * + (le32_to_cpu(writecmd->sg.count) * sizeof (struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1473,7 +1473,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 if (ret < 0) return ret; fibsize = sizeof(struct aac_write) + - ((le32_to_cpu(writecmd->sg.count) - 1) * + (le32_to_cpu(writecmd->sg.count) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1592,9 +1592,9 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) /* * Build Scatter/Gather list */ - fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + + fibsize = sizeof(struct aac_srb) + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * - sizeof (struct sgentry64)); + sizeof(struct sgentry64)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1624,7 +1624,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) * Build Scatter/Gather list */ fibsize = sizeof (struct aac_srb) + - (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * + ((le32_to_cpu(srbcmd->sg.count) & 0xff) * sizeof (struct sgentry)); BUG_ON (fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); @@ -1693,8 +1693,7 @@ static int aac_send_safw_bmic_cmd(struct aac_dev *dev, fibptr->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); - fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + - sizeof(struct sgentry64); + fibsize = sizeof(struct aac_srb) + sizeof(struct sgentry64); /* allocate DMA buffer for response */ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len, @@ -1833,7 +1832,7 @@ static int aac_get_safw_ciss_luns(struct aac_dev *dev) struct aac_ciss_phys_luns_resp *phys_luns; datasize = sizeof(struct aac_ciss_phys_luns_resp) + - (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); + AAC_MAX_TARGETS * sizeof(struct _ciss_lun); phys_luns = kmalloc(datasize, GFP_KERNEL); if (phys_luns == NULL) goto out; @@ -2267,7 +2266,7 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->a_ops.adapter_bounds = aac_bounds_32; dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgentry)) / + sizeof(struct aac_write)) / sizeof(struct sgentry); if (dev->dac_support) { dev->a_ops.adapter_read = aac_read_block64; @@ -2278,8 +2277,7 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write64) + - sizeof(struct sgentry64)) / + sizeof(struct aac_write64)) / sizeof(struct sgentry64); } else { dev->a_ops.adapter_read = aac_read_block; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 7d5a155073c627..1d09d3ac6aa450 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -322,7 +322,7 @@ struct aac_ciss_phys_luns_resp { u8 level3[2]; u8 level2[2]; u8 node_ident[16]; /* phys. node identifier */ - } lun[1]; /* List of phys. devices */ + } lun[]; /* List of phys. devices */ }; /* @@ -507,32 +507,27 @@ struct sge_ieee1212 { struct sgmap { __le32 count; - struct sgentry sg[1]; + struct sgentry sg[]; }; struct user_sgmap { u32 count; - struct user_sgentry sg[1]; + struct user_sgentry sg[]; }; struct sgmap64 { __le32 count; - struct sgentry64 sg[1]; + struct sgentry64 sg[]; }; struct user_sgmap64 { u32 count; - struct user_sgentry64 sg[1]; + struct user_sgentry64 sg[]; }; struct sgmapraw { __le32 count; - struct sgentryraw sg[1]; -}; - -struct user_sgmapraw { - u32 count; - struct user_sgentryraw sg[1]; + struct sgentryraw sg[]; }; struct creation_info @@ -873,7 +868,7 @@ union aac_init __le16 element_count; __le16 comp_thresh; __le16 unused; - } rrq[1]; /* up to 64 RRQ addresses */ + } rrq[] __counted_by_le(rr_queue_count); /* up to 64 RRQ addresses */ } r8; }; @@ -2029,8 +2024,8 @@ struct aac_srb_reply }; struct aac_srb_unit { - struct aac_srb srb; struct aac_srb_reply srb_reply; + struct aac_srb srb; }; /* diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index e7cc927ed952dd..68240d6f27abe3 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -523,7 +523,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + if ((fibsize < sizeof(struct user_aac_srb)) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; @@ -561,7 +561,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) rcode = -EINVAL; goto cleanup; } - actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + + actual_fibsize = sizeof(struct aac_srb) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 0f64b024430376..28cf18955a088e 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -522,8 +522,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) spin_lock_init(&dev->iq_lock); dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgentry)) + - sizeof(struct aac_fibhdr) - sizeof(struct aac_write)) / sizeof(struct sgentry); dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 25cee03d7f9737..47287559c768f3 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -2327,8 +2327,9 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); sg64->sg[0].count = cpu_to_le32(datasize); - ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), - FsaNormal, 1, 1, NULL, NULL); + ret = aac_fib_send(ScsiPortCommand64, fibptr, + sizeof(struct aac_srb) + sizeof(struct sgentry), + FsaNormal, 1, 1, NULL, NULL); dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 11ef58204e96f1..28115ed637e81c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -410,7 +410,7 @@ static void aac_src_start_adapter(struct aac_dev *dev) lower_32_bits(dev->init_pa), upper_32_bits(dev->init_pa), sizeof(struct _r8) + - (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), + AAC_MAX_HRRQ * sizeof(struct _rrq), 0, 0, 0, NULL, NULL, NULL, NULL, NULL); } else { init->r7.host_elapsed_seconds = diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 06acb5ff609ee7..76a1e373386efb 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5528,7 +5528,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, struct beiscsi_hba *phba = NULL; struct be_eq_obj *pbe_eq; unsigned int s_handle; - char wq_name[20]; int ret, i; ret = beiscsi_enable_pci(pcidev); @@ -5634,9 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; - snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", - phba->shost->host_no); - phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); + phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1, + phba->shost->host_no); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 5023c0ab42777a..e52ce9b01f49a1 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -1431,7 +1431,7 @@ bfa_cb_lps_flogo_comp(void *bfad, void *uarg) * param[in] vf_id - VF_ID * * return - * If lookup succeeds, retuns fcs vf object, otherwise returns NULL + * If lookup succeeds, returns fcs vf object, otherwise returns NULL */ bfa_fcs_vf_t * bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index a9d3d8562d3c17..66fb701401de76 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -766,9 +766,8 @@ bfad_thread_workq(struct bfad_s *bfad) struct bfad_im_s *im = bfad->im; bfa_trc(bfad, 0); - snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", - bfad->inst_no); - im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); + im->drv_workq = alloc_ordered_workqueue("bfad_wq_%d", WQ_MEM_RECLAIM, + bfad->inst_no); if (!im->drv_workq) return BFA_STATUS_FAILED; diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 4353feedf76ad2..0884af04bd1f6e 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -134,7 +134,6 @@ struct bfad_fcp_binding { struct bfad_im_s { struct bfad_s *bfad; struct workqueue_struct *drv_workq; - char drv_workq_name[KOBJ_NAME_LEN]; struct work_struct aen_im_notify_work; }; diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 7e74f77da14f2f..6d47a4d8eed6b8 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -358,18 +358,12 @@ struct bnx2fc_rport { dma_addr_t lcq_dma; u32 lcq_mem_size; - void *ofld_req[4]; - dma_addr_t ofld_req_dma[4]; - void *enbl_req; - dma_addr_t enbl_req_dma; - spinlock_t tgt_lock; spinlock_t cq_lock; atomic_t num_active_ios; u32 flush_in_prog; unsigned long timestamp; unsigned long retry_delay_timestamp; - struct list_head free_task_list; struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; struct list_head active_cmd_queue; struct list_head els_queue; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 1078c20c5ef670..f49783b89d04f4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2363,8 +2363,8 @@ static int _bnx2fc_create(struct net_device *netdev, interface->vlan_id = vlan_id; interface->tm_timeout = BNX2FC_TM_TIMEOUT; - interface->timer_work_queue = - create_singlethread_workqueue("bnx2fc_timer_wq"); + interface->timer_work_queue = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, "bnx2fc_timer_wq"); if (!interface->timer_work_queue) { printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); rc = -EINVAL; diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index df7d04afce05af..7030efee5c46c5 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -815,11 +815,6 @@ extern struct bnx2i_hba *get_adapter_list_head(void); struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, u16 iscsi_cid); -int bnx2i_alloc_ep_pool(void); -void bnx2i_release_ep_pool(void); -struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); -struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); - struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); @@ -869,12 +864,6 @@ extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); -/* Debug related function prototypes */ -extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); - extern int bnx2i_percpu_io_thread(void *arg); extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 5b3ffefae476d0..6cc1d53165a0df 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 05e1a63e00c3a1..8329f0cab4e7db 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index d92cf1dccc2f91..0909b03e24976b 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -485,7 +485,6 @@ struct cxgbi_device { unsigned char nmtus; unsigned char nports; struct pci_dev *pdev; - struct dentry *debugfs_root; struct iscsi_transport *itp; struct module *owner; @@ -499,7 +498,6 @@ struct cxgbi_device { unsigned int rxq_idx_cntr; struct cxgbi_ports_map pmap; - void (*dev_ddp_cleanup)(struct cxgbi_device *); struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, struct cxgbi_task_tag_info *); @@ -512,7 +510,6 @@ struct cxgbi_device { unsigned int, int); void (*csk_release_offload_resources)(struct cxgbi_sock *); - int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); int (*csk_push_tx_frames)(struct cxgbi_sock *, int); void (*csk_send_abort_req)(struct cxgbi_sock *); diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c index 52405c6462f809..962c797fda07d9 100644 --- a/drivers/scsi/cxlflash/lunmgt.c +++ b/drivers/scsi/cxlflash/lunmgt.c @@ -8,7 +8,7 @@ * Copyright (C) 2015 IBM Corporation */ -#include +#include #include #include diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index e4b45b7e327774..60d62b93d62442 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 2d356fe2457a2a..b375509d14709e 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 35326e31199131..32e8077033778f 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4eb0837298d4d2..1bf5948d1188f6 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index f8a09e3eba582c..6e1b252cea0e21 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -822,7 +822,8 @@ static int __init rdac_init(void) /* * Create workqueue to handle mode selects for rdac */ - kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); + kmpath_rdacd = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd"); if (!kmpath_rdacd) { scsi_unregister_device_handler(&rdac_dh); printk(KERN_ERR "kmpath_rdacd creation failed.\n"); diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c index 6a6ec32c46bdf3..9ac69356b13e08 100644 --- a/drivers/scsi/elx/efct/efct_lio.c +++ b/drivers/scsi/elx/efct/efct_lio.c @@ -1114,7 +1114,8 @@ int efct_scsi_tgt_new_device(struct efct *efct) atomic_set(&efct->tgt_efct.watermark_hit, 0); atomic_set(&efct->tgt_efct.initiator_count, 0); - lio_wq = create_singlethread_workqueue("efct_lio_worker"); + lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + "efct_lio_worker"); if (!lio_wq) { efc_log_err(efct, "workqueue create failed\n"); return -EIO; diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c index 2e83a667901fec..1a7437f4328e87 100644 --- a/drivers/scsi/elx/libefc/efc_nport.c +++ b/drivers/scsi/elx/libefc/efc_nport.c @@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, spin_lock_irqsave(&efc->lock, flags); list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) { - kref_put(&nport->ref, nport->release); /* Shutdown this NPORT */ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + kref_put(&nport->ref, nport->release); break; } } diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index ed63f7a9ea54f0..8a133254c4f657 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -929,7 +929,6 @@ struct esas2r_adapter { struct list_head fw_event_list; spinlock_t fw_event_lock; u8 fw_events_off; /* if '1', then ignore events */ - char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; /* * intr_mode stores the interrupt mode currently being used by this * adapter. it is based on the interrupt_mode module parameter, but diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index c1a5ab662dc888..0cea5f3d1a08e0 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -311,9 +311,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, sema_init(&a->nvram_semaphore, 1); esas2r_fw_event_off(a); - snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", - a->index); - a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); + a->fw_event_q = + alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index); init_waitqueue_head(&a->buffered_ioctl_waiter); init_waitqueue_head(&a->nvram_waiter); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f1429f27017043..39aec710660cfa 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -722,7 +722,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) * will return 0, so do this first. */ mfs = netdev->mtu; - if (netdev->features & NETIF_F_FCOE_MTU) { + if (netdev->fcoe_mtu) { mfs = FCOE_MTU; FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs); } @@ -1863,7 +1863,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, case NETDEV_CHANGE: break; case NETDEV_CHANGEMTU: - if (netdev->features & NETIF_F_FCOE_MTU) + if (netdev->fcoe_mtu) break; mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof)); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 7d3b904af9e80f..0609ca6b935300 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -45,12 +45,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo, */ #define fcoe_ctlr_id(x) \ ((x)->id) -#define fcoe_ctlr_work_q_name(x) \ - ((x)->work_q_name) #define fcoe_ctlr_work_q(x) \ ((x)->work_q) -#define fcoe_ctlr_devloss_work_q_name(x) \ - ((x)->devloss_work_q_name) #define fcoe_ctlr_devloss_work_q(x) \ ((x)->devloss_work_q) #define fcoe_ctlr_mode(x) \ @@ -797,18 +793,14 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; - snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), - "ctlr_wq_%d", ctlr->id); - ctlr->work_q = create_singlethread_workqueue( - ctlr->work_q_name); + ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM, + ctlr->id); if (!ctlr->work_q) goto out_del; - snprintf(ctlr->devloss_work_q_name, - sizeof(ctlr->devloss_work_q_name), - "ctlr_dl_wq_%d", ctlr->id); - ctlr->devloss_work_q = create_singlethread_workqueue( - ctlr->devloss_work_q_name); + ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d", + WQ_MEM_RECLAIM, + ctlr->id); if (!ctlr->devloss_work_q) goto out_del_q; diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 29eead383eb9a4..0044717d4486c6 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -1161,14 +1161,16 @@ static int __init fnic_init_module(void) goto err_create_fnic_ioreq_slab; } - fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); + fnic_event_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); if (!fnic_event_queue) { printk(KERN_ERR PFX "fnic work queue create failed\n"); err = -ENOMEM; goto err_create_fnic_workq; } - fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + fnic_fip_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q"); if (!fnic_fip_queue) { printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); err = -ENOMEM; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index ec1a3e7ee94d30..6219807ce3b9e1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2302,7 +2302,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) hisi_hba->last_slot_index = 0; - hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); + hisi_hba->wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev)); if (!hisi_hba->wq) { dev_err(dev, "sas_alloc: failed to create workqueue\n"); goto err_out; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index feda9b54b4434f..4cd3a3eab6f1c4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2421,7 +2421,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%pK) ignored\n ", + dev_info(dev, "slot complete: task(%pK) ignored\n", task); return; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 7f987335b44c1a..e021f1106beabf 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -292,11 +292,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, } if (shost->transportt->create_work_queue) { - snprintf(shost->work_q_name, sizeof(shost->work_q_name), - "scsi_wq_%d", shost->host_no); - shost->work_q = alloc_workqueue("%s", - WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 1, shost->work_q_name); + shost->work_q = alloc_workqueue( + "scsi_wq_%d", + WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, + shost->host_no); if (!shost->work_q) { error = -EINVAL; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index e044ed09d7e0d4..0c49414c1f350b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include "hpsa_cmd.h" #include "hpsa.h" diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index a3d1013c83075c..e66c3ef74267a5 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; static u64 max_lun = IBMVFC_MAX_LUN; static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; +static u16 max_sectors = IBMVFC_MAX_SECTORS; static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; static unsigned int ibmvfc_debug = IBMVFC_DEBUG; @@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout, module_param_named(max_requests, max_requests, uint, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); +module_param_named(max_sectors, max_sectors, ushort, S_IRUGO); +MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. " + "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]"); module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO); MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. " "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]"); @@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) memset(login_info, 0, sizeof(*login_info)); login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); - login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); + login_info->max_dma_len = cpu_to_be64(max_sectors << 9); login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); login_info->partition_num = cpu_to_be32(vhost->partition_number); @@ -5230,7 +5234,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) } vhost->logged_in = 1; - npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); + npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, rsp->drc_name, npiv_max_sectors); @@ -6329,7 +6333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) shost->can_queue = scsi_qdepth; shost->max_lun = max_lun; shost->max_id = max_targets; - shost->max_sectors = IBMVFC_MAX_SECTORS; + shost->max_sectors = max_sectors; shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; shost->unique_id = shost->host_no; shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; @@ -6556,6 +6560,7 @@ static struct fc_function_template ibmvfc_transport_functions = { **/ static int __init ibmvfc_module_init(void) { + int min_max_sectors = PAGE_SIZE >> 9; int rc; if (!firmware_has_feature(FW_FEATURE_VIO)) @@ -6564,6 +6569,16 @@ static int __init ibmvfc_module_init(void) printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); + /* + * Range check the max_sectors module parameter. The upper bounds is + * implicity checked since the parameter is a ushort. + */ + if (max_sectors < min_max_sectors) { + printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n", + min_max_sectors); + max_sectors = min_max_sectors; + } + ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); if (!ibmvfc_transport_template) return -ENOMEM; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 745ad5ac72517b..c73ed2314ad04b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -32,7 +32,7 @@ #define IBMVFC_DEBUG 0 #define IBMVFC_MAX_TARGETS 1024 #define IBMVFC_MAX_LUN 0xffffffff -#define IBMVFC_MAX_SECTORS 0xffffu +#define IBMVFC_MAX_SECTORS 2048 #define IBMVFC_MAX_DISC_THREADS 4 #define IBMVFC_TGT_MEMPOOL_SZ 64 #define IBMVFC_MAX_CMDS_PER_LUN 64 diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 2fca17cf8b5188..16d085d56e9d7d 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3425,7 +3425,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev, struct scsi_info *vscsi; int rc = 0; long hrc = 0; - char wq_name[24]; vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); if (!vscsi) { @@ -3536,8 +3535,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, init_completion(&vscsi->wait_idle); init_completion(&vscsi->unconfig); - snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); - vscsi->work_q = create_workqueue(wq_name); + vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1, + dev_name(&vdev->dev)); if (!vscsi->work_q) { rc = -ENOMEM; dev_err(&vscsi->dev, "create_workqueue failed\n"); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index c77d6ca1a2105d..fde7145835de46 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -13,7 +13,7 @@ #ifndef _IPR_H #define _IPR_H -#include +#include #include #include #include @@ -1030,7 +1030,7 @@ struct ipr_hostrcb_fabric_desc { #define IPR_PATH_FAILED 0x03 __be16 num_entries; - struct ipr_hostrcb_config_element elem[1]; + struct ipr_hostrcb_config_element elem[]; }__attribute__((packed, aligned (4))); struct ipr_hostrcb64_fabric_desc { @@ -1044,7 +1044,7 @@ struct ipr_hostrcb64_fabric_desc { u8 res_path[8]; u8 reserved3[6]; __be16 num_entries; - struct ipr_hostrcb64_config_element elem[1]; + struct ipr_hostrcb64_config_element elem[]; }__attribute__((packed, aligned (8))); #define for_each_hrrq(hrrq, ioa_cfg) \ diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 384f48ff64d721..60d621ad002487 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 8d3006edbe1283..4fa18a317f77f3 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h index 6b7e4ca6b7b5ee..02e31db31d68e6 100644 --- a/drivers/scsi/libfc/fc_encode.h +++ b/drivers/scsi/libfc/fc_encode.h @@ -7,7 +7,7 @@ #ifndef _FC_ENCODE_H_ #define _FC_ENCODE_H_ -#include +#include #include #include diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 1d91c457527f38..f84a7e6ae379d6 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2693,7 +2693,8 @@ int fc_setup_exch_mgr(void) fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); fc_cpu_mask = (1 << fc_cpu_order) - 1; - fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); + fc_exch_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + "fc_exch_workqueue"); if (!fc_exch_workqueue) goto err; return 0; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ab06e9aeb613e7..310fa5add5f087 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -79,7 +79,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 33da3c1085f06b..c25979d96808a9 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -55,7 +55,7 @@ #include #include -#include +#include #include @@ -2263,7 +2263,8 @@ struct fc4_prov fc_rport_t0_prov = { */ int fc_setup_rport(void) { - rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); + rport_event_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fc_rport_eq"); if (!rport_event_queue) return -ENOMEM; return 0; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0fda8905eabd82..2b1bf990a9dc07 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 88714b7b0dbad1..7b4e7a61965a2b 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -564,7 +564,6 @@ static struct ata_port_operations sas_sata_ops = { .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, - .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, .set_dmamode = sas_ata_set_dmamode, diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 4e6bb3d0f1636b..2b8004eb6f1b35 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include "sas_internal.h" diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 9c8cc723170d16..8566bb1208a057 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -122,12 +122,12 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) error = -ENOMEM; snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev)); - sas_ha->event_q = create_singlethread_workqueue(name); + sas_ha->event_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!sas_ha->event_q) goto Undo_ports; snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev)); - sas_ha->disco_q = create_singlethread_workqueue(name); + sas_ha->disco_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!sas_ha->disco_q) goto Undo_event_q; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7c147d6ea8a8ff..e5a9c5a323f8be 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -306,6 +306,14 @@ struct lpfc_stats { struct lpfc_hba; +/* Data structure to keep withheld FLOGI_ACC information */ +struct lpfc_defer_flogi_acc { + bool flag; + u16 rx_id; + u16 ox_id; + struct lpfc_nodelist *ndlp; + +}; #define LPFC_VMID_TIMER 300 /* timer interval in seconds */ @@ -1430,9 +1438,7 @@ struct lpfc_hba { uint16_t vlan_id; struct list_head fcf_conn_rec_list; - bool defer_flogi_acc_flag; - uint16_t defer_flogi_acc_rx_id; - uint16_t defer_flogi_acc_ox_id; + struct lpfc_defer_flogi_acc defer_flogi_acc; spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ struct list_head ct_ev_waiters; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 4756a3f825310c..85059b83ea6b4f 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -3208,6 +3208,9 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) cmdiocbq->num_bdes = num_bde; cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; + if (phba->cfg_vmid_app_header) + cmdiocbq->cmd_flag |= LPFC_IO_VMID; + cmdiocbq->vport = phba->pport; cmdiocbq->cmd_cmpl = NULL; cmdiocbq->bpl_dmabuf = txbmp; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 2dedd1493e5bae..134bc96dd13400 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1572,8 +1572,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } } else - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "3065 GFT_ID failed x%08x\n", ulp_status); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY, + "3065 GFT_ID status x%08x\n", ulp_status); out: lpfc_ct_free_iocb(phba, cmdiocb); @@ -1647,6 +1647,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } out: + /* If the caller wanted a synchronous DA_ID completion, signal the + * wait obj and clear flag to reset the vport. + */ + if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) { + if (ndlp->da_id_waitq) + wake_up(ndlp->da_id_waitq); + } + + spin_lock_irq(&ndlp->lock); + ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID; + spin_unlock_irq(&ndlp->lock); + lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; @@ -2246,7 +2258,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0229 FDMI cmd %04x failed, latt = %d " + "0229 FDMI cmd %04x latt = %d " "ulp_status: x%x, rid x%x\n", be16_to_cpu(fdmi_cmd), latt, ulp_status, ulp_word4); @@ -2263,9 +2275,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { - /* FDMI rsp failed */ + /* Log FDMI reject */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, - "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); + "0220 FDMI cmd FS_RJT Data: x%x", cmd); /* Should we fallback to FDMI-2 / FDMI-1 ? */ switch (cmd) { diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f82615d87c4bbb..f5ae8cc158205c 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags { NLP_IN_RECOV_POST_DEV_LOSS = 0x1, /* wait for outstanding LOGO to cmpl */ NLP_WAIT_FOR_LOGO = 0x2, + /* wait for outstanding DA_ID to finish */ + NLP_WAIT_FOR_DA_ID = 0x4 }; struct lpfc_nodelist { @@ -159,7 +161,12 @@ struct lpfc_nodelist { uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ uint32_t nlp_defer_did; + + /* These wait objects are NPIV specific. These IOs must complete + * synchronously. + */ wait_queue_head_t *logo_waitq; + wait_queue_head_t *da_id_waitq; }; struct lpfc_node_rrq { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 929cbfc95163bf..d737b897ddd821 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -979,7 +979,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, - "2611 FLOGI failed on FCF (x%x), " + "2611 FLOGI FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " "roundrobin FCF failover\n", phba->fcf.current_rec.fcf_indx, @@ -997,11 +997,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2858 FLOGI failure Status:x%x/x%x TMO" - ":x%x Data x%lx x%x\n", - ulp_status, ulp_word4, tmo, - phba->hba_flag, phba->fcf.fcf_flag); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2858 FLOGI Status:x%x/x%x TMO" + ":x%x Data x%lx x%x\n", + ulp_status, ulp_word4, tmo, + phba->hba_flag, phba->fcf.fcf_flag); /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { @@ -1023,7 +1023,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_nlp_put(ndlp); lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, - "0150 FLOGI failure Status:x%x/x%x " + "0150 FLOGI Status:x%x/x%x " "xri x%x TMO:x%x refcnt %d\n", ulp_status, ulp_word4, cmdiocb->sli4_xritag, tmo, kref_read(&ndlp->kref)); @@ -1032,11 +1032,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (!(ulp_status == IOSTAT_LOCAL_REJECT && ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) { - /* FLOGI failure */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0100 FLOGI failure Status:x%x/x%x " - "TMO:x%x\n", - ulp_status, ulp_word4, tmo); + /* Warn FLOGI status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0100 FLOGI Status:x%x/x%x " + "TMO:x%x\n", + ulp_status, ulp_word4, tmo); goto flogifail; } @@ -1099,8 +1099,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, sp->cmn.priority_tagging, kref_read(&ndlp->kref)); /* reinitialize the VMID datastructure before returning */ - if (lpfc_is_vmid_enabled(phba)) + if (lpfc_is_vmid_enabled(phba)) { lpfc_reinit_vmid(vport); + vport->vmid_flag = 0; + } if (sp->cmn.priority_tagging) vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); @@ -1390,7 +1392,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; /* Check for a deferred FLOGI ACC condition */ - if (phba->defer_flogi_acc_flag) { + if (phba->defer_flogi_acc.flag) { /* lookup ndlp for received FLOGI */ ndlp = lpfc_findnode_did(vport, 0); if (!ndlp) @@ -1404,34 +1406,38 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (phba->sli_rev == LPFC_SLI_REV4) { bf_set(wqe_ctxt_tag, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, - phba->defer_flogi_acc_rx_id); + phba->defer_flogi_acc.rx_id); bf_set(wqe_rcvoxid, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, - phba->defer_flogi_acc_ox_id); + phba->defer_flogi_acc.ox_id); } else { icmd = &defer_flogi_acc.iocb; - icmd->ulpContext = phba->defer_flogi_acc_rx_id; + icmd->ulpContext = phba->defer_flogi_acc.rx_id; icmd->unsli3.rcvsli3.ox_id = - phba->defer_flogi_acc_ox_id; + phba->defer_flogi_acc.ox_id; } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3354 Xmit deferred FLOGI ACC: rx_id: x%x," " ox_id: x%x, hba_flag x%lx\n", - phba->defer_flogi_acc_rx_id, - phba->defer_flogi_acc_ox_id, phba->hba_flag); + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, phba->hba_flag); /* Send deferred FLOGI ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, ndlp, NULL); - phba->defer_flogi_acc_flag = false; - vport->fc_myDID = did; + phba->defer_flogi_acc.flag = false; - /* Decrement ndlp reference count to indicate the node can be - * released when other references are removed. + /* Decrement the held ndlp that was incremented when the + * deferred flogi acc flag was set. */ - lpfc_nlp_put(ndlp); + if (phba->defer_flogi_acc.ndlp) { + lpfc_nlp_put(phba->defer_flogi_acc.ndlp); + phba->defer_flogi_acc.ndlp = NULL; + } + + vport->fc_myDID = did; } return 0; @@ -1958,16 +1964,16 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ulp_status) { /* Check for retry */ - /* RRQ failed Don't print the vport to vport rjts */ + /* Warn RRQ status Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2881 RRQ failure DID:%06X Status:" - "x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2881 RRQ DID:%06X Status:" + "x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); } lpfc_clr_rrq_active(phba, rrq->xritag, rrq); @@ -2071,16 +2077,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } goto out; } - /* PLOGI failed Don't print the vport to vport rjts */ + /* Warn PLOGI status Don't print the vport to vport rjts */ if (ulp_status != IOSTAT_LS_RJT || (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2753 PLOGI failure DID:%06X " - "Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2753 PLOGI DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) @@ -2317,7 +2323,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_nodelist *ndlp; char *mode; - u32 loglevel; u32 ulp_status; u32 ulp_word4; bool release_node = false; @@ -2366,17 +2371,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * could be expected. */ if (test_bit(FC_FABRIC, &vport->fc_flag) || - vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { - mode = KERN_ERR; - loglevel = LOG_TRACE_EVENT; - } else { + vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) + mode = KERN_WARNING; + else mode = KERN_INFO; - loglevel = LOG_ELS; - } - /* PRLI failed */ - lpfc_printf_vlog(vport, mode, loglevel, - "2754 PRLI failure DID:%06X Status:x%x/x%x, " + /* Warn PRLI status */ + lpfc_printf_vlog(vport, mode, LOG_ELS, + "2754 PRLI DID:%06X Status:x%x/x%x, " "data: x%x x%x x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, @@ -2848,11 +2850,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } goto out; } - /* ADISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2755 ADISC failure DID:%06X Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + /* Warn ADISC status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2755 ADISC DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); @@ -3039,12 +3041,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * discovery. The PLOGI will retry. */ if (ulp_status) { - /* LOGO failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "2756 LOGO failure, No Retry DID:%06X " - "Status:x%x/x%x\n", - ndlp->nlp_DID, ulp_status, - ulp_word4); + /* Warn LOGO status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "2756 LOGO, No Retry DID:%06X " + "Status:x%x/x%x\n", + ndlp->nlp_DID, ulp_status, + ulp_word4); if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) skip_recovery = 1; @@ -4831,11 +4833,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0125 FDISC Failed (x%x). " - "Fabric out of resources\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0125 FDISC (x%x). " + "Fabric out of resources\n", + stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_RSCS); } @@ -4871,11 +4872,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LSEXP_NOTHING_MORE) { vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; retry = 1; - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0820 FLOGI Failed (x%x). " - "BBCredit Not Supported\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0820 FLOGI (x%x). " + "BBCredit Not Supported\n", + stat.un.lsRjtError); } break; @@ -4885,11 +4885,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) ) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_TRACE_EVENT, - "0122 FDISC Failed (x%x). " - "Fabric Detected Bad WWN\n", - stat.un.lsRjtError); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0122 FDISC (x%x). " + "Fabric Detected Bad WWN\n", + stat.un.lsRjtError); lpfc_vport_set_state(vport, FC_VPORT_FABRIC_REJ_WWN); } @@ -5240,9 +5239,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "Data: x%x x%x x%x\n", - ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, - ndlp->nlp_state, ndlp->nlp_rpi); + "last els x%x Data: x%x x%x x%x\n", + ndlp->nlp_DID, kref_read(&ndlp->kref), + ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); /* This clause allows the LOGO ACC to complete and free resources * for the Fabric Domain Controller. It does deliberately skip @@ -5254,18 +5254,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - /* If PLOGI is being retried, PLOGI completion will cleanup the - * node. The NLP_NPR_2B_DISC flag needs to be retained to make - * progress on nodes discovered from last RSCN. - */ - if ((ndlp->nlp_flag & NLP_DELAY_TMO) && - (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) - goto out; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) lpfc_unreg_rpi(vport, ndlp); + /* If came from PRLO, then PRLO_ACC is done. + * Start rediscovery now. + */ + if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } } + out: /* * The driver received a LOGO from the rport and has ACK'd it. @@ -5344,8 +5348,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, u32 ulp_status, ulp_word4, tmo, did, iotag; if (!vport) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3177 ELS response failed\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "3177 null vport in ELS rsp\n"); goto out; } if (cmdiocb->context_un.mbox) @@ -8454,9 +8458,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Defer ACC response until AFTER we issue a FLOGI */ if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { - phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, + phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com); - phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, + phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com); vport->fc_myDID = did; @@ -8464,11 +8468,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3344 Deferring FLOGI ACC: rx_id: x%x," " ox_id: x%x, hba_flag x%lx\n", - phba->defer_flogi_acc_rx_id, - phba->defer_flogi_acc_ox_id, phba->hba_flag); + phba->defer_flogi_acc.rx_id, + phba->defer_flogi_acc.ox_id, phba->hba_flag); - phba->defer_flogi_acc_flag = true; + phba->defer_flogi_acc.flag = true; + /* This nlp_get is paired with nlp_puts that reset the + * defer_flogi_acc.flag back to false. We need to retain + * a kref on the ndlp until the deferred FLOGI ACC is + * processed or cancelled. + */ + phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp); return 0; } @@ -9641,11 +9651,12 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) continue; - /* On the ELS ring we can have ELS_REQUESTs or - * GEN_REQUESTs waiting for a response. + /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, + * or GEN_REQUESTs waiting for a CQE response. */ ulp_command = get_job_cmnd(phba, piocb); - if (ulp_command == CMD_ELS_REQUEST64_CR) { + if (ulp_command == CMD_ELS_REQUEST64_WQE || + ulp_command == CMD_XMIT_ELS_RSP64_WQE) { list_add_tail(&piocb->dlist, &abort_list); /* If the link is down when flushing ELS commands @@ -10504,7 +10515,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_els_rcv_flogi(vport, elsiocb, ndlp); /* retain node if our response is deferred */ - if (phba->defer_flogi_acc_flag) + if (phba->defer_flogi_acc.flag) break; if (newnode) lpfc_disc_state_machine(vport, ndlp, NULL, @@ -10742,7 +10753,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; /* Unknown ELS command received from NPORT */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0115 Unknown ELS command x%x " "received from NPORT x%x\n", cmd, did); if (newnode) @@ -11310,10 +11321,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; - /* FDISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0126 FDISC failed. (x%x/x%x)\n", - ulp_status, ulp_word4); + /* Warn FDISC status */ + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "0126 FDISC cmpl status: x%x/x%x)\n", + ulp_status, ulp_word4); goto fdisc_failed; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 6943f6c6395c41..9241075f72fa4b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -175,7 +175,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ndlp->nlp_state, ndlp->fc4_xpt_flags); /* Don't schedule a worker thread event if the vport is going down. */ - if (test_bit(FC_UNLOADING, &vport->load_flag)) { + if (test_bit(FC_UNLOADING, &vport->load_flag) || + !test_bit(HBA_SETUP, &phba->hba_flag)) { spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; @@ -526,6 +527,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, @@ -538,6 +542,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) spin_lock_irqsave(&ndlp->lock, iflags); ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; spin_unlock_irqrestore(&ndlp->lock, iflags); + return fcf_inuse; } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Fabric node fully recovered before this dev_loss_tmo * queue work is processed. Thus, ignore the @@ -551,15 +556,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); return fcf_inuse; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return fcf_inuse; } @@ -1254,7 +1253,14 @@ lpfc_linkdown(struct lpfc_hba *phba) lpfc_scsi_dev_block(phba); offline = pci_channel_offline(phba->pcidev); - phba->defer_flogi_acc_flag = false; + /* Decrement the held ndlp if there is a deferred flogi acc */ + if (phba->defer_flogi_acc.flag) { + if (phba->defer_flogi_acc.ndlp) { + lpfc_nlp_put(phba->defer_flogi_acc.ndlp); + phba->defer_flogi_acc.ndlp = NULL; + } + } + phba->defer_flogi_acc.flag = false; /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; @@ -1376,7 +1382,7 @@ lpfc_linkup_port(struct lpfc_vport *vport) (vport != phba->pport)) return; - if (phba->defer_flogi_acc_flag) { + if (phba->defer_flogi_acc.flag) { clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); clear_bit(FC_RSCN_MODE, &vport->fc_flag); clear_bit(FC_NLP_MORE, &vport->fc_flag); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2108b4cb781575..d5c15742f7f29c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -561,6 +561,27 @@ struct fc_vft_header { #include +/* + * Application Header + */ +struct fc_app_header { + uint32_t dst_app_id; + uint32_t src_app_id; +#define LOOPBACK_SRC_APPID 0x4321 + uint32_t word2; + uint32_t word3; +}; + +/* + * dfctl optional header definition + */ +enum lpfc_fc_dfctl { + LPFC_FC_NO_DEVICE_HEADER, + LPFC_FC_16B_DEVICE_HEADER, + LPFC_FC_32B_DEVICE_HEADER, + LPFC_FC_64B_DEVICE_HEADER, +}; + /* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 500253007b1dc8..26e1313ebb21fc 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4847,6 +4847,7 @@ struct fcp_iwrite64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4863,6 +4864,7 @@ struct fcp_iread64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4879,6 +4881,7 @@ struct fcp_icmnd64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e1dfa96c2a553a..0dd451009b0791 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4699,6 +4699,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) uint64_t wwn; bool use_no_reset_hba = false; int rc; + u8 if_type; if (lpfc_no_hba_reset_cnt) { if (phba->sli_rev < LPFC_SLI_REV4 && @@ -4773,10 +4774,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; - if (phba->sli_rev == LPFC_SLI_REV4) - shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; - else + + /* Set max_cmd_len applicable to ASIC support */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_2: + fallthrough; + case LPFC_SLI_INTF_IF_TYPE_6: + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; + break; + default: + shost->max_cmd_len = LPFC_FCP_CDB_LEN; + break; + } + } else { shost->max_cmd_len = LPFC_FCP_CDB_LEN; + } if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || @@ -10436,6 +10451,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *eqcpup; struct lpfc_eq_intr_info *eqi; + u32 wqesize; /* * Create HBA Record arrays. @@ -10655,9 +10671,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) * Create ELS Work Queues */ - /* Create slow-path ELS Work Queue */ + /* + * Create slow-path ELS Work Queue. + * Increase the ELS WQ size when WQEs contain an embedded cdb + */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.wq_esize, + wqesize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -13861,12 +13883,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; - rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "6400 Can't set dma maximum segment size\n"); - return rc; - } + dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); /* * Check whether the adapter supports an embedded copy of the diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f6a53446e57f9d..4574716c8764fb 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -2652,8 +2652,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* flush the target */ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); - /* Treat like rcv logo */ - lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); + /* Send PRLO_ACC */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(&ndlp->lock); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); + + /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. + * lpfc_cmpl_els_logo_acc is expected to restart discovery. + */ + ndlp->nlp_last_elscmd = ELS_CMD_PRLO; + ndlp->nlp_prev_state = ndlp->nlp_state; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, + "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_last_elscmd, + kref_read(&ndlp->kref)); + + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return ndlp->nlp_state; } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index d70da2736c9452..fec23c7237304b 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0cef5d089f34b5..55c3e2c2bf8f7e 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 98ce9d97a22570..11c974bffa7209 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -4760,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, /* Word 3 */ bf_set(payload_offset_len, &wqe->fcp_icmd, - sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp)); + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, @@ -5555,11 +5555,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; - if (!pring_s4) { + /* if the io_wq & pring are gone, the port was reset. */ + if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq || + !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2877 SCSI Layer I/O Abort Request " + "IO CMPL Status x%x ID %d LUN %llu " + "HBA_SETUP %d\n", FAILED, + cmnd->device->id, + (u64)cmnd->device->lun, + test_bit(HBA_SETUP, &phba->hba_flag)); ret = FAILED; goto out_unlock_hba; } + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 88debef2fb6db0..2ec6e55771b45a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1940,12 +1940,15 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); + spin_lock_irqsave(&phba->hbalock, iflags); + /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ if (phba->cmf_active_mode != LPFC_CFG_MANAGED || - phba->link_state == LPFC_LINK_DOWN) - return 0; + phba->link_state < LPFC_LINK_UP) { + ret_val = 0; + goto out_unlock; + } - spin_lock_irqsave(&phba->hbalock, iflags); sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, @@ -4687,6 +4690,17 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (!phba->sli4_hba.hdwq || + !phba->sli4_hba.hdwq[i].io_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "7777 hdwq's deleted %lx " + "%lx %x %x\n", + phba->pport->load_flag, + phba->hba_flag, + phba->link_state, + phba->sli.sli_flag); + return; + } pring = phba->sli4_hba.hdwq[i].io_wq->pring; spin_lock_irq(&pring->ring_lock); @@ -8807,7 +8821,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli4_queue_setup(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0381 Error %d during queue setup.\n ", rc); + "0381 Error %d during queue setup.\n", rc); goto out_stop_timers; } /* Initialize the driver internal SLI layer lists. */ @@ -11079,9 +11093,17 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, /* Word 9 */ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); - /* Word 12 */ - if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { + /* Word 10 */ + if (cmdiocbq->cmd_flag & LPFC_IO_VMID) { + bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1); + wqe->words[31] = LOOPBACK_SRC_APPID; + } + + /* Word 12 */ wqe->xmit_sequence.xmit_len = full_size; + } else wqe->xmit_sequence.xmit_len = wqe->xmit_sequence.bde.tus.f.bdeSize; @@ -12473,8 +12495,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, cmdiocb->iocb.ulpClass, LPFC_WQE_CQ_ID_DEFAULT, ia, false); - abtsiocbp->vport = vport; - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; if (cmdiocb->cmd_flag & LPFC_IO_FCP) @@ -18422,6 +18442,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { /* make rctl_names static to save stack space */ struct fc_vft_header *fc_vft_hdr; + struct fc_app_header *fc_app_hdr; uint32_t *header = (uint32_t *) fc_hdr; #define FC_RCTL_MDS_DIAGS 0xF4 @@ -18477,6 +18498,32 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) goto drop; } + if (unlikely(phba->link_flag == LS_LOOPBACK_MODE && + phba->cfg_vmid_app_header)) { + /* Application header is 16B device header */ + if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) { + fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1); + if (be32_to_cpu(fc_app_hdr->src_app_id) != + LOOPBACK_SRC_APPID) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_LIBDFC, + "1932 Loopback src app id " + "not matched, app_id:x%x\n", + be32_to_cpu(fc_app_hdr->src_app_id)); + + goto drop; + } + } else { + lpfc_printf_log(phba, KERN_WARNING, + LOG_ELS | LOG_LIBDFC, + "1933 Loopback df_ctl bit not set, " + "df_ctl:x%x\n", + fc_hdr->fh_df_ctl); + + goto drop; + } + } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:x%x, type:x%x, " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", @@ -21140,7 +21187,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) if (!piocbq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2823 txq empty and txq_cnt is %d\n ", + "2823 txq empty and txq_cnt is %d\n", txq_cnt); break; } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 7ac9ef281881c1..e70f163fab9003 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.3" +#define LPFC_DRIVER_VERSION "14.4.0.5" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c index 773e02ae20c374..cc3e4736f2fe29 100644 --- a/drivers/scsi/lpfc/lpfc_vmid.c +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -321,6 +321,5 @@ lpfc_reinit_vmid(struct lpfc_vport *vport) if (!hash_empty(vport->hash_table)) hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) hash_del(&cur->hnode); - vport->vmid_flag = 0; write_unlock(&vport->vmid_lock); } diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 4439167a51882d..7a4d4d8e2ad55f 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -626,6 +626,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; int rc; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -679,21 +680,49 @@ lpfc_vport_delete(struct fc_vport *fc_vport) if (!ndlp) goto skip_logo; + /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP && phba->fc_topology != LPFC_TOPOLOGY_LOOP) { if (vport->cfg_enable_da_id) { - /* Send DA_ID and wait for a completion. */ + /* Send DA_ID and wait for a completion. This is best + * effort. If the DA_ID fails, likely the fabric will + * "leak" NportIDs but at least the driver issued the + * command. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ndlp) + goto issue_logo; + + spin_lock_irq(&ndlp->lock); + ndlp->da_id_waitq = &waitq; + ndlp->save_flags |= NLP_WAIT_FOR_DA_ID; + spin_unlock_irq(&ndlp->lock); + rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0); - if (rc) { - lpfc_printf_log(vport->phba, KERN_WARNING, - LOG_VPORT, - "1829 CT command failed to " - "delete objects on fabric, " - "rc %d\n", rc); + if (!rc) { + wait_event_timeout(waitq, + !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID), + msecs_to_jiffies(phba->fc_ratov * 2000)); } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, + "1829 DA_ID issue status %d. " + "SFlag x%x NState x%x, NFlag x%x " + "Rpi x%x\n", + rc, ndlp->save_flags, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + + /* Remove the waitq and save_flags. It no + * longer matters if the wake happened. + */ + spin_lock_irq(&ndlp->lock); + ndlp->da_id_waitq = NULL; + ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID; + spin_unlock_irq(&ndlp->lock); } +issue_logo: /* * If the vpi is not registered, then a valid FDISC doesn't * exist and there is no need for a ELS LOGO. Just cleanup diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 53ee8f84d094cf..f225bb20aa2201 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup); * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * so bus errors are unavoidable. * - * If a MOVE.B instruction faults, we assume that zero bytes were transferred - * and simply retry. That assumption probably depends on target behaviour but - * seems to hold up okay. The NOP provides synchronization: without it the - * fault can sometimes occur after the program counter has moved past the - * offending instruction. Post-increment addressing can't be used. + * If a MOVE.B instruction faults during a receive operation, we assume the + * target sent nothing and try again. That assumption probably depends on + * target firmware but it seems to hold up okay. If a fault happens during a + * send operation, the target may or may not have seen /ACK and got the byte. + * It's uncertain so the whole SCSI command gets retried. + * + * The NOP is needed for synchronization because the fault address in the + * exception stack frame may or may not be the instruction that actually + * caused the bus error. Post-increment addressing can't be used. */ #define MOVE_BYTE(operands) \ @@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup); ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) -#define MAC_PDMA_DELAY 32 - static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) { unsigned char *addr = start; @@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) if (n >= 1) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -1; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -2; } while (n >= 32) MOVE_16_WORDS("%0@+,%3@"); while (n >= 2) MOVE_WORD("%0@+,%3@"); if (result) - return start - addr; /* Negated to indicate uncertain length */ + return start - addr - 1; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%0@,%3@"); -out: return addr - start; } @@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) out_be32(hostdata->io + (CTRL_REG << 4), value); } +static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata) +{ + unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */ + unsigned char basr; + +again: + basr = NCR5380_read(BUS_AND_STATUS_REG); + + if (!(basr & BASR_PHASE_MATCH)) + return 1; + + if (basr & BASR_IRQ) + return -1; + + if (basr & BASR_DRQ) + return 0; + + if (n-- == 0) { + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: DRQ timeout\n", __func__); + return -1; + } + + NCR5380_poll_politely2(hostdata, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0); + goto again; +} + static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; - int result = 0; hostdata->pdma_residual = len; - while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); - bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_recv(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); if (bytes > 0) { d += bytes; @@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, } if (hostdata->pdma_residual == 0) - goto out; + break; - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue; - if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, d - dst, len, bytes, chunk_bytes); - if (bytes >= 0) + if (bytes == 0) continue; - dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, d - dst, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; } - scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; } static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, @@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); - int result = 0; hostdata->pdma_residual = len; - while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes; if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE); - bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_send(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); if (bytes > 0) { s += bytes; hostdata->pdma_residual -= bytes; } - if (hostdata->pdma_residual == 0) { - if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, - TCR_LAST_BYTE_SENT, - TCR_LAST_BYTE_SENT, - 0) < 0) { - scmd_printk(KERN_ERR, hostdata->connected, - "%s: Last Byte Sent timeout\n", __func__); - result = -1; - } - goto out; - } + if (hostdata->pdma_residual == 0) + break; - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue; - if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, s - src, len, bytes, chunk_bytes); - if (bytes >= 0) + if (bytes == 0) continue; - dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, s - src, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; } - scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; } static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, @@ -432,7 +432,7 @@ static struct scsi_host_template mac_scsi_template = { .eh_host_reset_handler = macscsi_host_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = 1, + .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), @@ -470,6 +470,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; + if (macintosh_config->ident == MAC_MODEL_IIFX) + mac_scsi_template.sg_tablesize = 1; + instance = scsi_host_alloc(&mac_scsi_template, sizeof(struct NCR5380_hostdata)); if (!instance) @@ -491,6 +494,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; + if (instance->sg_tablesize > 1) + host_flags |= FLAG_DMA_FIXUP; + error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); if (error) goto fail_init; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 5680c6cdb22193..088cc40ae866a0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -814,12 +814,12 @@ struct MR_HOST_DEVICE_LIST { __le32 size; __le32 count; __le32 reserved[2]; - struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1]; + struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[] __counted_by_le(count); } __packed; #define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \ (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \ - (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1))) + (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))) /* @@ -2473,7 +2473,7 @@ struct MR_LD_VF_MAP { union MR_LD_REF ref; u8 ldVfCount; u8 reserved[6]; - u8 policy[1]; + u8 policy[]; }; struct MR_LD_VF_AFFILIATION { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 6c79c350a4d5ba..8e75e2e279a40a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include @@ -6380,7 +6380,7 @@ static int megasas_init_fw(struct megasas_instance *instance) GFP_KERNEL); if (!fusion->stream_detect_by_ld[i]) { dev_err(&instance->pdev->dev, - "unable to allocate stream detect by LD\n "); + "unable to allocate stream detect by LD\n"); for (j = 0; j < i; ++j) kfree(fusion->stream_detect_by_ld[j]); kfree(fusion->stream_detect_by_ld); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 6c1fb8149553a8..1eec23da28e2d6 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1988,8 +1988,8 @@ megasas_fusion_start_watchdog(struct megasas_instance *instance) sizeof(instance->fault_handler_work_q_name), "poll_megasas%d_status", instance->host->host_no); - instance->fw_fault_work_q = - create_singlethread_workqueue(instance->fault_handler_work_q_name); + instance->fw_fault_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, instance->fault_handler_work_q_name); if (!instance->fw_fault_work_q) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h index 6a19e17eb1a70c..00cd18edfad6d4 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h @@ -67,6 +67,7 @@ #define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00) #define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8) #define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff) +#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff) struct mpi3_config_request { __le16 host_tag; u8 ioc_use_only02; @@ -75,7 +76,8 @@ struct mpi3_config_request { u8 ioc_use_only06; u8 msg_flags; __le16 change_count; - __le16 reserved0a; + u8 proxy_ioc_number; + u8 reserved0b; u8 page_version; u8 page_number; u8 page_type; @@ -206,6 +208,9 @@ struct mpi3_config_page_header { #define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5) #define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6) #define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8) +#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0) +#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1) +#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2) struct mpi3_man_page0 { struct mpi3_config_page_header header; u8 chip_revision[8]; @@ -1074,6 +1079,8 @@ struct mpi3_io_unit_page8 { #define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04) #define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02) #define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01) +#define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10) +#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08) struct mpi3_io_unit_page9 { struct mpi3_config_page_header header; __le32 flags; @@ -1089,6 +1096,8 @@ struct mpi3_io_unit_page9 { #define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004) #define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001) #define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff) +#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe) + struct mpi3_io_unit_page10 { struct mpi3_config_page_header header; u8 flags; @@ -1224,6 +1233,19 @@ struct mpi3_io_unit_page15 { #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01) #define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02) #define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00) + +struct mpi3_io_unit_page17 { + struct mpi3_config_page_header header; + u8 num_instances; + u8 instance; + __le16 reserved0a; + __le32 reserved0c[4]; + __le16 key_length; + u8 encryption_algorithm; + u8 reserved1f; + __le32 current_key[]; +}; +#define MPI3_IOUNIT17_PAGEVERSION (0x00) struct mpi3_ioc_page0 { struct mpi3_config_page_header header; __le32 reserved08; @@ -1311,7 +1333,7 @@ struct mpi3_driver_page0 { u8 tur_interval; u8 reserved10; u8 security_key_timeout; - __le16 reserved12; + __le16 first_device; __le32 reserved14; __le32 reserved18; }; @@ -1324,10 +1346,13 @@ struct mpi3_driver_page0 { #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001) #define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002) +#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000) +#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff) struct mpi3_driver_page1 { struct mpi3_config_page_header header; __le32 flags; - __le32 reserved0c; + u8 time_stamp_update; + u8 reserved0d[3]; __le16 host_diag_trace_max_size; __le16 host_diag_trace_min_size; __le16 host_diag_trace_decrement_size; @@ -1565,16 +1590,13 @@ struct mpi3_sas_io_unit0_phy_data { __le32 reserved10; }; -#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX -#define MPI3_SAS_IO_UNIT0_PHY_MAX (1) -#endif struct mpi3_sas_io_unit_page0 { struct mpi3_config_page_header header; __le32 reserved08; u8 num_phys; u8 init_status; __le16 reserved0e; - struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX]; + struct mpi3_sas_io_unit0_phy_data phy_data[]; }; #define MPI3_SASIOUNIT0_PAGEVERSION (0x00) @@ -1606,9 +1628,6 @@ struct mpi3_sas_io_unit1_phy_data { __le32 reserved08; }; -#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX -#define MPI3_SAS_IO_UNIT1_PHY_MAX (1) -#endif struct mpi3_sas_io_unit_page1 { struct mpi3_config_page_header header; __le16 control_flags; @@ -1618,7 +1637,7 @@ struct mpi3_sas_io_unit_page1 { u8 num_phys; u8 sata_max_q_depth; __le16 reserved12; - struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX]; + struct mpi3_sas_io_unit1_phy_data phy_data[]; }; #define MPI3_SASIOUNIT1_PAGEVERSION (0x00) @@ -2353,6 +2372,10 @@ struct mpi3_device0_vd_format { #define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000) #define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001) +#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002) union mpi3_device0_dev_spec_format { struct mpi3_device0_sas_sata_format sas_sata_format; struct mpi3_device0_pcie_format pcie_format; diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h index 7df2421901357b..2c6e548cbd0ffd 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h @@ -205,13 +205,14 @@ struct mpi3_encrypted_hash_entry { u8 hash_image_type; u8 hash_algorithm; u8 encryption_algorithm; - u8 reserved03; + u8 flags; __le16 public_key_size; __le16 signature_size; __le32 public_key[MPI3_PUBLIC_KEY_MAX]; }; - -#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04) +#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05) #define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0) #define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00) #define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) @@ -230,6 +231,12 @@ struct mpi3_encrypted_hash_entry { #define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05) #define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06) +/* hierarchical signature system (hss) */ +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b) +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c) +#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d) +#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f) + #ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX #define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1) #endif diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h index 02878494987311..c374867f9ba088 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h @@ -39,6 +39,12 @@ struct mpi3_ioc_init_request { #define MPI3_WHOINIT_HOST_DRIVER (0x03) #define MPI3_WHOINIT_MANUFACTURER (0x04) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002) +#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003) + struct mpi3_ioc_facts_request { __le16 host_tag; u8 ioc_use_only02; @@ -140,6 +146,8 @@ struct mpi3_ioc_facts_data { #define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020) #define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010) #define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008) +#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004) +#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000) #define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001) @@ -453,9 +461,6 @@ struct mpi3_event_data_sas_notify_primitive { #define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04) -#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT -#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1) -#endif struct mpi3_event_sas_topo_phy_entry { __le16 attached_dev_handle; u8 link_rate; @@ -496,7 +501,7 @@ struct mpi3_event_data_sas_topology_change_list { u8 start_phy_num; u8 exp_status; u8 io_unit_port; - struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT]; + struct mpi3_event_sas_topo_phy_entry phy_entry[] __counted_by(num_entries); }; #define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) @@ -545,9 +550,6 @@ struct mpi3_event_data_pcie_enumeration { #define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) #define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) #define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) -#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT -#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1) -#endif struct mpi3_event_pcie_topo_port_entry { __le16 attached_dev_handle; u8 port_status; @@ -588,7 +590,7 @@ struct mpi3_event_data_pcie_topology_change_list { u8 switch_status; u8 io_unit_port; __le32 reserved0c; - struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT]; + struct mpi3_event_pcie_topo_port_entry port_entry[] __counted_by(num_entries); }; #define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h index fdc3d1968e4300..b2ab25a1cfebe2 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h @@ -18,7 +18,7 @@ union mpi3_version_union { #define MPI3_VERSION_MAJOR (3) #define MPI3_VERSION_MINOR (0) -#define MPI3_VERSION_UNIT (31) +#define MPI3_VERSION_UNIT (34) #define MPI3_VERSION_DEV (0) #define MPI3_DEVHANDLE_INVALID (0xffff) struct mpi3_sysif_oper_queue_indexes { @@ -158,6 +158,7 @@ struct mpi3_sysif_registers { #define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004) #define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005) #define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006) +#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007) #define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14) #define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18) #define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c) @@ -410,6 +411,7 @@ struct mpi3_default_reply { #define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) #define MPI3_IOCSTATUS_INVALID_FIELD (0x0007) #define MPI3_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009) #define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a) #define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b) #define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c) diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index dc2cdd5f031114..14eeac08660f85 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -57,8 +57,8 @@ extern struct list_head mrioc_list; extern int prot_mask; extern atomic64_t event_counter; -#define MPI3MR_DRIVER_VERSION "8.9.1.0.51" -#define MPI3MR_DRIVER_RELDATE "29-May-2024" +#define MPI3MR_DRIVER_VERSION "8.12.0.0.50" +#define MPI3MR_DRIVER_RELDATE "05-Sept-2024" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" @@ -178,7 +178,7 @@ extern atomic64_t event_counter; #define MPI3MR_DEFAULT_SDEV_QD 32 /* Definitions for Threaded IRQ poll*/ -#define MPI3MR_IRQ_POLL_SLEEP 2 +#define MPI3MR_IRQ_POLL_SLEEP 20 #define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8 /* Definitions for the controller security status*/ @@ -213,6 +213,7 @@ extern atomic64_t event_counter; #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0 #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1 +#define MPI3MR_THRESHOLD_REPLY_COUNT 100 /* SGE Flag definition */ #define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ @@ -1059,7 +1060,6 @@ struct scmd_priv { * @sbq_lock: Sense buffer queue lock * @sbq_host_index: Sense buffer queuehost index * @event_masks: Event mask bitmap - * @fwevt_worker_name: Firmware event worker thread name * @fwevt_worker_thread: Firmware event worker thread * @fwevt_lock: Firmware event lock * @fwevt_list: Firmware event list @@ -1090,6 +1090,7 @@ struct scmd_priv { * @evtack_cmds_bitmap: Event Ack bitmap * @delayed_evtack_cmds_list: Delayed event acknowledgment list * @ts_update_counter: Timestamp update counter + * @ts_update_interval: Timestamp update interval * @reset_in_progress: Reset in progress flag * @unrecoverable: Controller unrecoverable flag * @prev_reset_result: Result of previous reset @@ -1240,7 +1241,6 @@ struct mpi3mr_ioc { u32 sbq_host_index; u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS]; - char fwevt_worker_name[MPI3MR_NAME_LENGTH]; struct workqueue_struct *fwevt_worker_thread; spinlock_t fwevt_lock; struct list_head fwevt_list; @@ -1278,7 +1278,8 @@ struct mpi3mr_ioc { unsigned long *evtack_cmds_bitmap; struct list_head delayed_evtack_cmds_list; - u32 ts_update_counter; + u16 ts_update_counter; + u16 ts_update_interval; u8 reset_in_progress; u8 unrecoverable; int prev_reset_result; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index c196dc14ad2063..f1ab76351bd81e 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -345,6 +345,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, { u16 reply_desc_type, host_tag = 0; u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; + u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS; u32 ioc_loginfo = 0, sense_count = 0; struct mpi3_status_reply_descriptor *status_desc; struct mpi3_address_reply_descriptor *addr_desc; @@ -366,8 +367,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (ioc_status & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); - ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; - mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); + masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; + mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; @@ -380,7 +381,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (ioc_status & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); - ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK; if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, @@ -393,7 +394,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, sshdr.asc, sshdr.ascq); } } - mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); + mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; @@ -408,7 +409,10 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, if (cmdptr->state & MPI3MR_CMD_PENDING) { cmdptr->state |= MPI3MR_CMD_COMPLETE; cmdptr->ioc_loginfo = ioc_loginfo; - cmdptr->ioc_status = ioc_status; + if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS) + cmdptr->ioc_status = ioc_status; + else + cmdptr->ioc_status = masked_ioc_status; cmdptr->state &= ~MPI3MR_CMD_PENDING; if (def_reply) { cmdptr->state |= MPI3MR_CMD_REPLY_VALID; @@ -439,6 +443,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) u32 admin_reply_ci = mrioc->admin_reply_ci; u32 num_admin_replies = 0; u64 reply_dma = 0; + u16 threshold_comps = 0; struct mpi3_default_reply_descriptor *reply_desc; if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) @@ -462,6 +467,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) if (reply_dma) mpi3mr_repost_reply_buf(mrioc, reply_dma); num_admin_replies++; + threshold_comps++; if (++admin_reply_ci == mrioc->num_admin_replies) { admin_reply_ci = 0; exp_phase ^= 1; @@ -472,6 +478,11 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) if ((le16_to_cpu(reply_desc->reply_flags) & MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) break; + if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { + writel(admin_reply_ci, + &mrioc->sysif_regs->admin_reply_queue_ci); + threshold_comps = 0; + } } while (1); writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); @@ -525,7 +536,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, u32 num_op_reply = 0; u64 reply_dma = 0; struct mpi3_default_reply_descriptor *reply_desc; - u16 req_q_idx = 0, reply_qidx; + u16 req_q_idx = 0, reply_qidx, threshold_comps = 0; reply_qidx = op_reply_q->qid - 1; @@ -556,6 +567,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, if (reply_dma) mpi3mr_repost_reply_buf(mrioc, reply_dma); num_op_reply++; + threshold_comps++; if (++reply_ci == op_reply_q->num_replies) { reply_ci = 0; @@ -577,13 +589,19 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, break; } #endif + if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) { + writel(reply_ci, + &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); + atomic_sub(threshold_comps, &op_reply_q->pend_ios); + threshold_comps = 0; + } } while (1); writel(reply_ci, &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); op_reply_q->ci = reply_ci; op_reply_q->ephase = exp_phase; - + atomic_sub(threshold_comps, &op_reply_q->pend_ios); atomic_dec(&op_reply_q->in_use); return num_op_reply; } @@ -710,7 +728,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) mpi3mr_process_op_reply_q(mrioc, intr_info->op_reply_q); - usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); + usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1); } while (atomic_read(&intr_info->op_reply_q->pend_ios) && (num_op_reply < mrioc->max_host_ios)); @@ -1344,6 +1362,10 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) int retval = 0; enum mpi3mr_iocstate ioc_state; u64 base_info; + u8 retry = 0; + u64 start_time, elapsed_time_sec; + +retry_bring_ioc_ready: ioc_status = readl(&mrioc->sysif_regs->ioc_status); ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); @@ -1362,26 +1384,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_info(mrioc, "controller is in %s state during detection\n", mpi3mr_iocstate_name(ioc_state)); - if (ioc_state == MRIOC_STATE_BECOMING_READY || - ioc_state == MRIOC_STATE_RESET_REQUESTED) { - timeout = mrioc->ready_timeout * 10; - do { - msleep(100); - } while (--timeout); + timeout = mrioc->ready_timeout * 10; + + do { + ioc_state = mpi3mr_get_iocstate(mrioc); + + if (ioc_state != MRIOC_STATE_BECOMING_READY && + ioc_state != MRIOC_STATE_RESET_REQUESTED) + break; if (!pci_device_is_present(mrioc->pdev)) { mrioc->unrecoverable = 1; - ioc_err(mrioc, - "controller is not present while waiting to reset\n"); - retval = -1; + ioc_err(mrioc, "controller is not present while waiting to reset\n"); goto out_device_not_present; } - ioc_state = mpi3mr_get_iocstate(mrioc); - ioc_info(mrioc, - "controller is in %s state after waiting to reset\n", - mpi3mr_iocstate_name(ioc_state)); - } + msleep(100); + } while (--timeout); if (ioc_state == MRIOC_STATE_READY) { ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); @@ -1442,6 +1461,9 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); + if (retry == 0) + start_time = jiffies; + timeout = mrioc->ready_timeout * 10; do { ioc_state = mpi3mr_get_iocstate(mrioc); @@ -1451,6 +1473,12 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) mpi3mr_iocstate_name(ioc_state)); return 0; } + ioc_status = readl(&mrioc->sysif_regs->ioc_status); + if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || + (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { + mpi3mr_print_fault_info(mrioc); + goto out_failed; + } if (!pci_device_is_present(mrioc->pdev)) { mrioc->unrecoverable = 1; ioc_err(mrioc, @@ -1459,9 +1487,19 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) goto out_device_not_present; } msleep(100); - } while (--timeout); + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; + } while (elapsed_time_sec < mrioc->ready_timeout); out_failed: + elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000; + if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) { + retry++; + + ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n" + " elapsed time =%llu\n", retry, elapsed_time_sec); + + goto retry_bring_ioc_ready; + } ioc_state = mpi3mr_get_iocstate(mrioc); ioc_err(mrioc, "failed to bring to ready state, current state: %s\n", @@ -2653,7 +2691,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work) return; } - if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { + if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) { mrioc->ts_update_counter = 0; mpi3mr_sync_timestamp(mrioc); } @@ -2742,8 +2780,8 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) snprintf(mrioc->watchdog_work_q_name, sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, mrioc->id); - mrioc->watchdog_work_q = - create_singlethread_workqueue(mrioc->watchdog_work_q_name); + mrioc->watchdog_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name); if (!mrioc->watchdog_work_q) { ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); return; @@ -3826,6 +3864,29 @@ static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc) return retval; } +/** + * mpi3mr_read_tsu_interval - Update time stamp interval + * @mrioc: Adapter instance reference + * + * Update time stamp interval if its defined in driver page 1, + * otherwise use default value. + * + * Return: Nothing + */ +static void +mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc) +{ + struct mpi3_driver_page1 driver_pg1; + u16 pg_sz = sizeof(driver_pg1); + int retval = 0; + + mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL; + + retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz); + if (!retval && driver_pg1.time_stamp_update) + mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60); +} + /** * mpi3mr_print_ioc_info - Display controller information * @mrioc: Adapter instance reference @@ -4122,6 +4183,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) goto out_failed_noretry; } + mpi3mr_read_tsu_interval(mrioc); mpi3mr_print_ioc_info(mrioc); if (!mrioc->cfg_page) { @@ -4303,6 +4365,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) goto out_failed_noretry; } + mpi3mr_read_tsu_interval(mrioc); mpi3mr_print_ioc_info(mrioc); if (is_resume) { diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 616894571c6abe..5f2f67acf8bf31 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -5317,10 +5317,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) else scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); - snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), - "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); mrioc->fwevt_worker_thread = alloc_ordered_workqueue( - mrioc->fwevt_worker_name, 0); + "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); if (!mrioc->fwevt_worker_thread) { ioc_err(mrioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index b785a7e88b498c..ed5046593fdab6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -846,8 +846,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) snprintf(ioc->fault_reset_work_q_name, sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", ioc->driver_name, ioc->id); - ioc->fault_reset_work_q = - create_singlethread_workqueue(ioc->fault_reset_work_q_name); + ioc->fault_reset_work_q = alloc_ordered_workqueue( + "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name); if (!ioc->fault_reset_work_q) { ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); return; @@ -8898,9 +8898,8 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); if (!device_remove_in_progress) { ioc_info(ioc, - "Unable to allocate the memory for " - "device_remove_in_progress of sz: %d\n " - , pd_handles_sz); + "Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + pd_handles_sz); return -ENOMEM; } memset(device_remove_in_progress + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index fe1e96fda284bc..eceb5eeb465132 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1162,8 +1162,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q: "" * @fault_reset_work: "" - * @firmware_event_name: fw event work queue - * @firmware_event_thread: "" + * @firmware_event_thread: fw event work queue * @fw_event_lock: * @fw_event_list: list of fw events * @current_evet: current processing firmware event @@ -1351,7 +1350,6 @@ struct MPT3SAS_ADAPTER { struct delayed_work fault_reset_work; /* fw event handler */ - char firmware_event_name[20]; struct workqueue_struct *firmware_event_thread; spinlock_t fw_event_lock; struct list_head fw_event_list; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 97c2472cd4343e..f2a55aa5fe6503 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -54,7 +54,7 @@ #include #include #include -#include +#include #include "mpt3sas_base.h" @@ -12301,10 +12301,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); /* event thread */ - snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), - "fw_event_%s%d", ioc->driver_name, ioc->id); ioc->firmware_event_thread = alloc_ordered_workqueue( - ioc->firmware_event_name, 0); + "fw_event_%s%d", 0, ioc->driver_name, ioc->id); if (!ioc->firmware_event_thread) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c index 1d64e5056a8ab1..2b04f0852decd3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include "mpt3sas_base.h" diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 68df771e29759d..19b01f7c476772 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index f684eb5e04898a..a7e64b867c8e28 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -112,9 +112,8 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) return false; } - snprintf(cb->work_q_name, sizeof(cb->work_q_name), - "myrb_wq_%d", cb->host->host_no); - cb->work_q = create_singlethread_workqueue(cb->work_q_name); + cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM, + cb->host->host_no); if (!cb->work_q) { dma_pool_destroy(cb->dcdb_pool); cb->dcdb_pool = NULL; diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h index fb8eacfceee88d..78dc4136fb10c4 100644 --- a/drivers/scsi/myrb.h +++ b/drivers/scsi/myrb.h @@ -712,7 +712,6 @@ struct myrb_hba { struct Scsi_Host *host; struct workqueue_struct *work_q; - char work_q_name[20]; struct delayed_work monitor_work; unsigned long primary_monitor_time; unsigned long secondary_monitor_time; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index e824be9d9bbb94..1469d0c54e4558 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -2206,9 +2206,8 @@ static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) return false; } - snprintf(cs->work_q_name, sizeof(cs->work_q_name), - "myrs_wq_%d", shost->host_no); - cs->work_q = create_singlethread_workqueue(cs->work_q_name); + cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM, + shost->host_no); if (!cs->work_q) { dma_pool_destroy(cs->dcdb_pool); cs->dcdb_pool = NULL; diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h index 9f6696d0ddd5f7..e1d6b123de7be5 100644 --- a/drivers/scsi/myrs.h +++ b/drivers/scsi/myrs.h @@ -904,7 +904,6 @@ struct myrs_hba { bool disable_enc_msg; struct workqueue_struct *work_q; - char work_q_name[20]; struct delayed_work monitor_work; unsigned long primary_monitor_time; unsigned long secondary_monitor_time; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 1e63cb6cd8e327..33e1eba62ca12c 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -100,10 +100,12 @@ static void pm8001_map_queues(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; - if (pm8001_ha->number_of_intr > 1) + if (pm8001_ha->number_of_intr > 1) { blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); + return; + } - return blk_mq_map_queues(qmap); + blk_mq_map_queues(qmap); } /* diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 8fe886dc5e4765..a9869cd8c4c075 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2037,7 +2037,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) atomic_dec(&pm8001_dev->running_req); break; } - pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", + pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n", psspPayload->ssp_resp_iu.status); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index a2a084c8075e00..4c5881917d7632 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1946,7 +1946,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) } iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg); - ioread32(pinstance->int_regs.host_ioa_interrupt_reg), + ioread32(pinstance->int_regs.host_ioa_interrupt_reg); int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg); pmcraid_info("Waiting for IOA to become operational %x:%x\n", @@ -4009,7 +4009,7 @@ static void pmcraid_tasklet_function(unsigned long instance) * This routine un-registers registered interrupt handler and * also frees irqs/vectors. * - * Retun Value + * Return Value * None */ static diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 054a51713d5563..fcfc3bed02c614 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -310,7 +310,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) if (!free_sqes) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, - "Returning NULL, free_sqes=%d.\n ", + "Returning NULL, free_sqes=%d.\n", free_sqes); goto out_failed; } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 4813087e58a157..cf13148ba281c1 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3372,9 +3372,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", qedf->io_mempool); - sprintf(host_buf, "qedf_%u_link", - qedf->lport->host->host_no); - qedf->link_update_wq = create_workqueue(host_buf); + qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM, + 1, qedf->lport->host->host_no); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); @@ -3584,9 +3583,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) ether_addr_copy(params.ll2_mac_address, qedf->mac); /* Start LL2 processing thread */ - snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); - qedf->ll2_recv_wq = - create_workqueue(host_buf); + qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1, + host->host_no); if (!qedf->ll2_recv_wq) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); rc = -ENOMEM; @@ -3627,9 +3625,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } } - sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); - qedf->timer_work_queue = - create_workqueue(host_buf); + qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer", + WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no); if (!qedf->timer_work_queue) { QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " "workqueue.\n"); @@ -3641,7 +3638,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) if (mode != QEDF_MODE_RECOVERY) { sprintf(host_buf, "qedf_%u_dpc", qedf->lport->host->host_no); - qedf->dpc_wq = create_workqueue(host_buf); + qedf->dpc_wq = + alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf); } INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); @@ -4182,7 +4180,7 @@ static int __init qedf_init(void) goto err3; } - qedf_io_wq = create_workqueue("qedf_io_wq"); + qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq"); if (!qedf_io_wq) { QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); goto err4; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cd0180b1f5b9da..c5aec26019d6ab 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -2767,7 +2767,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) } sprintf(host_buf, "host_%d", qedi->shost->host_no); - qedi->tmf_thread = create_singlethread_workqueue(host_buf); + qedi->tmf_thread = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, host_buf); if (!qedi->tmf_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start tmf thread!\n"); @@ -2775,8 +2776,9 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) goto free_cid_que; } - sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); - qedi->offload_thread = create_workqueue(host_buf); + qedi->offload_thread = alloc_workqueue("qedi_ofld%d", + WQ_MEM_RECLAIM, + 1, qedi->shost->host_no); if (!qedi->offload_thread) { QEDI_ERR(&qedi->dbg_ctx, "Unable to start offload thread!\n"); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 7cf998e3cc681c..15066c112817a8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2621,7 +2621,6 @@ typedef struct fc_port { struct kref sess_kref; struct qla_tgt *tgt; unsigned long expires; - struct list_head del_list_entry; struct work_struct free_work; struct work_struct reg_work; uint64_t jiffies_at_registration; diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h index 20788054b91bc1..52e060f83b37d9 100644 --- a/drivers/scsi/qla2xxx/qla_dsd.h +++ b/drivers/scsi/qla2xxx/qla_dsd.h @@ -1,7 +1,7 @@ #ifndef _QLA_DSD_H_ #define _QLA_DSD_H_ -#include +#include /* 32-bit data segment descriptor (8 bytes) */ struct dsd32 { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index bc3b2aea3f8bfb..7f980e6141c282 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3501,11 +3501,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); - ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); + ha->dpc_lp_wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name); INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); - ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); + ha->dpc_hp_wq = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name); INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); INIT_WORK(&ha->idc_state_handler, qla83xx_idc_state_handler_work); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index d7551b1443e4a7..11eadb3bd36e5c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7e7460a747a447..ceaf1c7b1d17b8 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 17cccd14765f8d..d91f54a6e752f2 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -8806,7 +8806,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, DEBUG2(printk("scsi: %s: Starting kernel thread for " "qla4xxx_dpc\n", __func__)); sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); - ha->dpc_thread = create_singlethread_workqueue(buf); + ha->dpc_thread = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, buf); if (!ha->dpc_thread) { ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ret = -ENODEV; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ee69bd35889d17..a77e0499b738a6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index 04749fde1636d9..e1a2a62b6910fe 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include MODULE_DESCRIPTION("SCSI functions used by both the initiator and the target code"); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a9d8a9c62663e3..de15fc0df1045b 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -47,7 +47,7 @@ #include -#include +#include #include #include @@ -2760,7 +2760,6 @@ static int resp_mode_sense(struct scsi_cmnd *scp, else bd_len = 0; alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); - memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); if (0x3 == pcontrol) { /* Saving values not supported */ mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); return check_condition_result; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 612489afe8d246..10154d78e3360b 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -48,7 +48,7 @@ #include -#include +#include /* * These should *probably* be handled by the host itself. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 3958a6d14bf457..adee6f60c96655 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1175,20 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) goto out_free_sgtables; } - ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - - if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + if (sg_alloc_table_chained(&prot_sdb->table, + rq->nr_integrity_segments, prot_sdb->table.sgl, SCSI_INLINE_PROT_SG_CNT)) { ret = BLK_STS_RESOURCE; goto out_free_sgtables; } - count = blk_rq_map_integrity_sg(rq->q, rq->bio, - prot_sdb->table.sgl); - BUG_ON(count > ivecs); - BUG_ON(count > queue_max_integrity_segments(rq->q)); - + count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } @@ -1988,8 +1982,15 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) if (shost->no_highmem) lim->features |= BLK_FEAT_BOUNCE_HIGH; - dma_set_seg_boundary(dev, shost->dma_boundary); - dma_set_max_seg_size(dev, shost->max_segment_size); + /* + * Propagate the DMA formation properties to the dma-mapping layer as + * a courtesy service to the LLDDs. This needs to check that the buses + * actually support the DMA API first, though. + */ + if (dev->dma_parms) { + dma_set_seg_boundary(dev, shost->dma_boundary); + dma_set_max_seg_size(dev, shost->max_segment_size); + } } EXPORT_SYMBOL_GPL(scsi_init_limits); diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c index 7fa0a78a2ad168..c093389edabb52 100644 --- a/drivers/scsi/scsi_proto_test.c +++ b/drivers/scsi/scsi_proto_test.c @@ -3,7 +3,7 @@ * Copyright 2023 Google LLC */ #include -#include +#include #include static void test_scsi_proto(struct kunit *test) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index c0b72199b4faac..042329b74c6e68 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 3e47c4472a80e7..b3baae91e7a249 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -5,7 +5,7 @@ */ #include #include -#include +#include #include #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 7d088b8da07578..62ea7e44460e52 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -441,18 +441,13 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, fc_host->next_vport_number = 0; fc_host->npiv_vports_inuse = 0; - snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), - "fc_wq_%d", shost->host_no); - fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); + fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no); if (!fc_host->work_q) return -ENOMEM; fc_host->dev_loss_tmo = fc_dev_loss_tmo; - snprintf(fc_host->devloss_work_q_name, - sizeof(fc_host->devloss_work_q_name), - "fc_dl_%d", shost->host_no); - fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, - fc_host->devloss_work_q_name); + fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0, + shost->host_no); if (!fc_host->devloss_work_q) { destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index dd69342bbe7815..19e6c3852d5048 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9db86943d04cf5..ca4bc0ac76adcf 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -58,7 +57,7 @@ #include #include #include -#include +#include #include #include @@ -1382,7 +1381,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); - } else if (rq->cmd_flags & REQ_ATOMIC && write) { + } else if (rq->cmd_flags & REQ_ATOMIC) { ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, sdkp->use_atomic_write_boundary, protect | fua); @@ -3404,7 +3403,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp, rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb1); - if (!vpd || vpd->len < 8) { + if (!vpd || vpd->len <= 8) { rcu_read_unlock(); return; } @@ -4093,9 +4092,38 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) { unsigned char cmd[6] = { START_STOP }; /* START_VALID */ struct scsi_sense_hdr sshdr; + struct scsi_failure failure_defs[] = { + { + /* Power on, reset, or bus device reset occurred */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 0, + .result = SAM_STAT_CHECK_CONDITION, + }, + { + /* Power on occurred */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 1, + .result = SAM_STAT_CHECK_CONDITION, + }, + { + /* SCSI bus reset */ + .sense = UNIT_ATTENTION, + .asc = 0x29, + .ascq = 2, + .result = SAM_STAT_CHECK_CONDITION, + }, + {} + }; + struct scsi_failures failures = { + .total_allowed = 3, + .failure_definitions = failure_defs, + }; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, .req_flags = BLK_MQ_REQ_PM, + .failures = &failures, }; struct scsi_device *sdp = sdkp->device; int res; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index c8b9654d30f0c3..ee2b742387581b 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index e22c7f5e652bd9..2c61624cb4b038 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index baf870a03ecf6c..f86be197fedd04 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1424,7 +1424,6 @@ static const struct file_operations sg_fops = { .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, - .llseek = no_llseek, }; static const struct class sg_sysfs_class = { diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index cdedc271857aae..fae6db20a6e9e9 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -505,7 +505,7 @@ struct pqi_vendor_general_request { __le64 buffer_address; __le32 buffer_length; u8 reserved[40]; - } ofa_memory_allocation; + } host_memory_allocation; } data; }; @@ -517,21 +517,30 @@ struct pqi_vendor_general_response { u8 reserved[2]; }; -#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 -#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 +#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 +#define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1 +#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2 #define PQI_OFA_VERSION 1 #define PQI_OFA_SIGNATURE "OFA_QRM" -#define PQI_OFA_MAX_SG_DESCRIPTORS 64 +#define PQI_CTRL_LOG_VERSION 1 +#define PQI_CTRL_LOG_SIGNATURE "FW_DATA" +#define PQI_HOST_MAX_SG_DESCRIPTORS 64 -struct pqi_ofa_memory { - __le64 signature; /* "OFA_QRM" */ +struct pqi_host_memory { + __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */ __le16 version; /* version of this struct (1 = 1st version) */ u8 reserved[62]; __le32 bytes_allocated; /* total allocated memory in bytes */ __le16 num_memory_descriptors; u8 reserved1[2]; - struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; + struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS]; +}; + +struct pqi_host_memory_descriptor { + struct pqi_host_memory *host_memory; + dma_addr_t host_memory_dma_handle; + void **host_chunk_virt_address; }; struct pqi_aio_error_info { @@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features { #define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 #define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21 -#define PQI_FIRMWARE_FEATURE_MAXIMUM 21 +#define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22 +#define PQI_FIRMWARE_FEATURE_MAXIMUM 22 struct pqi_config_table_debug { struct pqi_config_table_section_header header; @@ -1096,6 +1106,11 @@ struct pqi_tmf_work { u8 scsi_opcode; }; +struct pqi_raid_io_stats { + u64 raid_bypass_cnt; + u64 write_stream_cnt; +}; + struct pqi_scsi_dev { int devtype; /* as reported by INQUIRY command */ u8 device_type; /* as reported by */ @@ -1158,7 +1173,7 @@ struct pqi_scsi_dev { struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; - unsigned int raid_bypass_cnt; + struct pqi_raid_io_stats __percpu *raid_io_stats; struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE]; }; @@ -1357,6 +1372,7 @@ struct pqi_ctrl_info { u8 firmware_triage_supported : 1; u8 rpl_extended_format_4_5_supported : 1; u8 multi_lun_device_supported : 1; + u8 ctrl_logging_supported : 1; u8 enable_r1_writes : 1; u8 enable_r5_writes : 1; u8 enable_r6_writes : 1; @@ -1398,13 +1414,12 @@ struct pqi_ctrl_info { wait_queue_head_t block_requests_wait; struct mutex ofa_mutex; - struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; - dma_addr_t pqi_ofa_mem_dma_handle; - void **pqi_ofa_chunk_virt_addr; struct work_struct ofa_memory_alloc_work; struct work_struct ofa_quiesce_work; u32 ofa_bytes_requested; u16 ofa_cancel_reason; + struct pqi_host_memory_descriptor ofa_memory; + struct pqi_host_memory_descriptor ctrl_log_memory; enum pqi_ctrl_removal_state ctrl_removal_state; }; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 24c7cb285dca0f..870f37b7054644 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "smartpqi.h" #include "smartpqi_sis.h" @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.26-030" +#define DRIVER_VERSION "2.1.30-031" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 26 -#define DRIVER_REVISION 30 +#define DRIVER_RELEASE 30 +#define DRIVER_REVISION 31 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); @@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, if (rc) goto error; + device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); + if (!device->raid_io_stats) { + rc = -ENOMEM; + goto error; + } + device->raid_map = raid_map; return 0; @@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, /* To prevent this from being freed later. */ new_device->raid_map = NULL; } + if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { + existing_device->raid_io_stats = new_device->raid_io_stats; + new_device->raid_io_stats = NULL; + } existing_device->raid_bypass_configured = new_device->raid_bypass_configured; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; } @@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, static inline void pqi_free_device(struct pqi_scsi_dev *device) { if (device) { + free_percpu(device->raid_io_stats); kfree(device->raid_map); kfree(device); } @@ -2292,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, * queue depth, device size. */ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + /* + * Check for queue depth change. + */ if (device->sdev && device->queue_depth != device->advertised_queue_depth) { device->advertised_queue_depth = device->queue_depth; scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); - if (pqi_volume_rescan_needed(device)) { - device->rescan = false; - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - scsi_rescan_device(device->sdev); - } else { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - } + } + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + /* + * Check for changes in the device, such as size. + */ + if (pqi_volume_rescan_needed(device)) { + device->rescan = false; + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + scsi_rescan_device(device->sdev); + } else { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); } } @@ -2354,14 +2371,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) scsi3addr[3] |= 0xc0; } -static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) -{ - if (pqi_is_logical_device(device)) - return false; - - return (device->path_map & (device->path_map - 1)) != 0; -} - static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); @@ -3244,6 +3253,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) sense_data_length); } + if (pqi_cmd_priv(scmd)->this_residual && + !pqi_is_logical_device(scmd->device->hostdata) && + scsi_status == SAM_STAT_CHECK_CONDITION && + host_byte == DID_OK && + sense_data_length && + scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + sshdr.asc == 0x26 && + sshdr.ascq == 0x0) { + host_byte = DID_NO_CONNECT; + pqi_take_device_offline(scmd->device, "AIO"); + scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); + } + scmd->result = scsi_status; set_host_byte(scmd, host_byte); } @@ -3258,14 +3281,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) int residual_count; int xfer_count; bool device_offline; - struct pqi_scsi_dev *device; scmd = io_request->scmd; error_info = io_request->error_info; host_byte = DID_OK; sense_data_length = 0; device_offline = false; - device = scmd->device->hostdata; switch (error_info->service_response) { case PQI_AIO_SERV_RESPONSE_COMPLETE: @@ -3290,14 +3311,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); - if (pqi_is_multipath_device(device)) { - pqi_device_remove_start(device); - host_byte = DID_NO_CONNECT; - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = SAM_STAT_GOOD; - io_request->status = -EAGAIN; - } + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: @@ -3625,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) ctrl_info->pqi_mode_enabled = false; pqi_save_ctrl_mode(ctrl_info, SIS_MODE); rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); dev_info(&ctrl_info->pci_dev->dev, "Online Firmware Activation: %s\n", @@ -3636,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) "Online Firmware Activation ABORTED\n"); if (ctrl_info->soft_reset_handshake_supported) pqi_clear_soft_reset_status(ctrl_info); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); break; @@ -3646,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) dev_err(&ctrl_info->pci_dev->dev, "unexpected Online Firmware Activation reset status: 0x%x\n", reset_status); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); @@ -3661,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work) ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); pqi_ctrl_ofa_start(ctrl_info); - pqi_ofa_setup_host_buffer(ctrl_info); - pqi_ofa_host_memory_update(ctrl_info); + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); + pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); } static void pqi_ofa_quiesce_worker(struct work_struct *work) @@ -3702,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, dev_info(&ctrl_info->pci_dev->dev, "received Online Firmware Activation cancel request: reason: %u\n", ctrl_info->ofa_cancel_reason); - pqi_ofa_free_host_buffer(ctrl_info); + pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); pqi_ctrl_ofa_done(ctrl_info); break; default: @@ -5933,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, int rc; struct pqi_scsi_dev *device; struct pqi_stream_data *pqi_stream_data; - struct pqi_scsi_dev_raid_map_data rmd; + struct pqi_scsi_dev_raid_map_data rmd = { 0 }; if (!ctrl_info->enable_stream_detection) return false; @@ -5975,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; + per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; return true; } @@ -6025,7 +6041,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm ctrl_info = shost_to_hba(shost); - if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { + if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; @@ -6053,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - device->raid_bypass_cnt++; + per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -6190,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, continue; scsi_device = scmd->device->hostdata; - if (scsi_device != device) - continue; - - if ((u8)scmd->device->lun != lun) - continue; list_del(&io_request->request_list_entry); - set_host_byte(scmd, DID_RESET); + if (scsi_device == device && (u8)scmd->device->lun == lun) + set_host_byte(scmd, DID_RESET); + else + set_host_byte(scmd, DID_REQUEUE); pqi_free_io_request(io_request); scsi_dma_unmap(scmd); pqi_scsi_done(scmd); @@ -7350,7 +7364,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - unsigned int raid_bypass_cnt; + u64 raid_bypass_cnt; + int cpu; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -7366,11 +7381,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, return -ENODEV; } - raid_bypass_cnt = device->raid_bypass_cnt; + raid_bypass_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; + } + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); } static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, @@ -7452,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev, return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); } +static ssize_t pqi_write_stream_cnt_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u64 write_stream_cnt; + int cpu; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + if (pqi_ctrl_offline(ctrl_info)) + return -ENODEV; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + write_stream_cnt = 0; + + if (device->raid_io_stats) { + for_each_online_cpu(cpu) { + write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; + } + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); +} + static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); @@ -7462,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); static DEVICE_ATTR(sas_ncq_prio_enable, 0644, pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); +static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); static struct attribute *pqi_sdev_attrs[] = { &dev_attr_lunid.attr, @@ -7473,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = { &dev_attr_raid_bypass_cnt.attr, &dev_attr_sas_ncq_prio_enable.attr, &dev_attr_numa_node.attr, + &dev_attr_write_stream_cnt.attr, NULL }; @@ -7863,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: ctrl_info->multi_lun_device_supported = firmware_feature->enabled; break; + case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: + ctrl_info->ctrl_logging_supported = firmware_feature->enabled; + break; } pqi_firmware_feature_status(ctrl_info, firmware_feature); @@ -7968,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = { .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, .feature_status = pqi_ctrl_update_feature_flags, }, + { + .feature_name = "Controller Data Logging", + .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, + .feature_status = pqi_ctrl_update_feature_flags, + }, }; static void pqi_process_firmware_features( @@ -8070,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) ctrl_info->firmware_triage_supported = false; ctrl_info->rpl_extended_format_4_5_supported = false; ctrl_info->multi_lun_device_supported = false; + ctrl_info->ctrl_logging_supported = false; } static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) @@ -8210,6 +8279,9 @@ static void pqi_perform_lockup_action(void) } } +#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) +#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; @@ -8221,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; } + if (sis_is_ctrl_logging_supported(ctrl_info)) { + sis_notify_kdump(ctrl_info); + rc = sis_wait_for_ctrl_logging_completion(ctrl_info); + if (rc) + return rc; + } sis_soft_reset(ctrl_info); ssleep(PQI_POST_RESET_DELAY_SECS); } else { @@ -8402,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; + if (ctrl_info->ctrl_logging_supported && !reset_devices) { + pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } + rc = pqi_get_ctrl_product_details(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -8586,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } - if (pqi_ofa_in_progress(ctrl_info)) + if (pqi_ofa_in_progress(ctrl_info)) { pqi_ctrl_unblock_scan(ctrl_info); + if (ctrl_info->ctrl_logging_supported) { + if (!ctrl_info->ctrl_log_memory.host_memory) + pqi_host_setup_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory, + PQI_CTRL_LOG_TOTAL_SIZE, + PQI_CTRL_LOG_MIN_SIZE); + pqi_host_memory_update(ctrl_info, + &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); + } else { + if (ctrl_info->ctrl_log_memory.host_memory) + pqi_host_free_buffer(ctrl_info, + &ctrl_info->ctrl_log_memory); + } + } pqi_scan_scsi_devices(ctrl_info); @@ -8777,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_fail_all_outstanding_requests(ctrl_info); ctrl_info->pqi_mode_enabled = false; } + pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); pqi_unregister_scsi(ctrl_info); if (ctrl_info->pqi_mode_enabled) pqi_revert_to_sis_mode(ctrl_info); @@ -8802,177 +8900,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_unblock_scan(ctrl_info); } -static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) +{ + ssleep(delay_secs); + + return pqi_ctrl_init_resume(ctrl_info); +} + +static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 chunk_size) { int i; u32 sg_count; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; dma_addr_t dma_handle; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - sg_count = DIV_ROUND_UP(total_size, chunk_size); - if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) + if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) goto out; - ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr) + host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address) goto out; dev = &ctrl_info->pci_dev->dev; + host_memory = host_memory_descriptor->host_memory; for (i = 0; i < sg_count; i++) { - ctrl_info->pqi_ofa_chunk_virt_addr[i] = - dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); - if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) + host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); + if (!host_memory_descriptor->host_chunk_virt_address[i]) goto out_free_chunks; - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); put_unaligned_le32(chunk_size, &mem_descriptor->length); } put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); - put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); - put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); + put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); + put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); return 0; out_free_chunks: while (--i >= 0) { - mem_descriptor = &ofap->sg_descriptor[i]; + mem_descriptor = &host_memory->sg_descriptor[i]; dma_free_coherent(dev, chunk_size, - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor->address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); - + kfree(host_memory_descriptor->host_chunk_virt_address); out: return -ENOMEM; } -static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_required_size, u32 min_required_size) { - u32 total_size; u32 chunk_size; u32 min_chunk_size; - if (ctrl_info->ofa_bytes_requested == 0) + if (total_required_size == 0 || min_required_size == 0) return 0; - total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); - min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); + total_required_size = PAGE_ALIGN(total_required_size); + min_required_size = PAGE_ALIGN(min_required_size); + min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); min_chunk_size = PAGE_ALIGN(min_chunk_size); - for (chunk_size = total_size; chunk_size >= min_chunk_size;) { - if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) - return 0; - chunk_size /= 2; - chunk_size = PAGE_ALIGN(chunk_size); + while (total_required_size >= min_required_size) { + for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { + if (pqi_host_alloc_mem(ctrl_info, + host_memory_descriptor, total_required_size, + chunk_size) == 0) + return 0; + chunk_size /= 2; + chunk_size = PAGE_ALIGN(chunk_size); + } + total_required_size /= 2; + total_required_size = PAGE_ALIGN(total_required_size); } return -ENOMEM; } -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u32 total_size, u32 min_size) { struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; dev = &ctrl_info->pci_dev->dev; - ofap = dma_alloc_coherent(dev, sizeof(*ofap), - &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); - if (!ofap) + host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), + &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); + if (!host_memory) return; - ctrl_info->pqi_ofa_mem_virt_addr = ofap; + host_memory_descriptor->host_memory = host_memory; - if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { - dev_err(dev, - "failed to allocate host buffer for Online Firmware Activation\n"); - dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, + total_size, min_size) < 0) { + dev_err(dev, "failed to allocate firmware usable host buffer\n"); + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; return; } - - put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); - memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); } -static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor) { unsigned int i; struct device *dev; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; struct pqi_sg_descriptor *mem_descriptor; unsigned int num_memory_descriptors; - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - if (!ofap) + host_memory = host_memory_descriptor->host_memory; + if (!host_memory) return; dev = &ctrl_info->pci_dev->dev; - if (get_unaligned_le32(&ofap->bytes_allocated) == 0) + if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) goto out; - mem_descriptor = ofap->sg_descriptor; - num_memory_descriptors = - get_unaligned_le16(&ofap->num_memory_descriptors); + mem_descriptor = host_memory->sg_descriptor; + num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); for (i = 0; i < num_memory_descriptors; i++) { dma_free_coherent(dev, get_unaligned_le32(&mem_descriptor[i].length), - ctrl_info->pqi_ofa_chunk_virt_addr[i], + host_memory_descriptor->host_chunk_virt_address[i], get_unaligned_le64(&mem_descriptor[i].address)); } - kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + kfree(host_memory_descriptor->host_chunk_virt_address); out: - dma_free_coherent(dev, sizeof(*ofap), ofap, - ctrl_info->pqi_ofa_mem_dma_handle); - ctrl_info->pqi_ofa_mem_virt_addr = NULL; + dma_free_coherent(dev, sizeof(*host_memory), host_memory, + host_memory_descriptor->host_memory_dma_handle); + host_memory_descriptor->host_memory = NULL; } -static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, + struct pqi_host_memory_descriptor *host_memory_descriptor, + u16 function_code) { u32 buffer_length; struct pqi_vendor_general_request request; - struct pqi_ofa_memory *ofap; + struct pqi_host_memory *host_memory; memset(&request, 0, sizeof(request)); request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; - put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, - &request.header.iu_length); - put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, - &request.function_code); - - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - - if (ofap) { - buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + - get_unaligned_le16(&ofap->num_memory_descriptors) * - sizeof(struct pqi_sg_descriptor); - - put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, - &request.data.ofa_memory_allocation.buffer_address); - put_unaligned_le32(buffer_length, - &request.data.ofa_memory_allocation.buffer_length); + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); + put_unaligned_le16(function_code, &request.function_code); + + host_memory = host_memory_descriptor->host_memory; + + if (host_memory) { + buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); + put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); + put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); + + if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { + put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); + } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { + put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); + memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); + } } return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); } -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) -{ - ssleep(delay_secs); - - return pqi_ctrl_init_resume(ctrl_info); -} - static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .status = SAM_STAT_CHECK_CONDITION, @@ -9444,6 +9552,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x152d, 0x8a37) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x0462) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x1104) @@ -9472,6 +9584,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x110b) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1110) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) @@ -9480,6 +9596,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8461) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x8462) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0xc460) @@ -9588,6 +9708,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1bd4, 0x0089) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3a, 0x0104) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) @@ -10180,6 +10308,110 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1137, 0x02fa) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02fe) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x02ff) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1137, 0x0300) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0045) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0046) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0047) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0048) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x004f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0051) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0052) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0053) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006d) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x006f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0070) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0071) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0072) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x0089) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1e93, 0x1000) @@ -10264,6 +10496,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x1045) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ff9, 0x00a3) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index a981d037794809..93e96705754e8e 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "smartpqi.h" static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 673437c7152b9d..ae5a264d062dea 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "smartpqi.h" #include "smartpqi_sis.h" @@ -29,6 +29,7 @@ #define SIS_ENABLE_INTX 0x80 #define SIS_SOFT_RESET 0x100 #define SIS_CMD_READY 0x200 +#define SIS_NOTIFY_KDUMP 0x400 #define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_PQI_RESET_QUIESCE 0x1000000 @@ -52,6 +53,8 @@ #define SIS_BASE_STRUCT_ALIGNMENT 16 #define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 +#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4 +#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18 #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 #define SIS_CTRL_READY_TIMEOUT_SECS 180 @@ -65,6 +68,13 @@ enum sis_fw_triage_status { FW_TRIAGE_COMPLETED }; +enum sis_ctrl_logging_status { + CTRL_LOGGING_NOT_STARTED = 0, + CTRL_LOGGING_STARTED, + CTRL_LOGGING_COND_INVALID, + CTRL_LOGGING_COMPLETED +}; + #pragma pack(1) /* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ @@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status SIS_CTRL_KERNEL_FW_TRIAGE)); } +bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING; +} + +void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info) +{ + sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP); +} + +static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info) +{ + return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3)); +} + void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) { writel(SIS_SOFT_RESET, @@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info) return rc; } +#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180 +#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1 + +int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + enum sis_ctrl_logging_status status; + unsigned long timeout; + + timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies; + while (1) { + status = sis_read_ctrl_logging_status(ctrl_info); + if (status == CTRL_LOGGING_COND_INVALID) { + dev_err(&ctrl_info->pci_dev->dev, + "controller data logging condition invalid\n"); + rc = -EINVAL; + break; + } else if (status == CTRL_LOGGING_COMPLETED) { + rc = 0; + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for controller data logging status\n"); + rc = -ETIMEDOUT; + break; + } + + ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS); + } + + return rc; +} + void sis_verify_structures(void) { BUILD_BUG_ON(offsetof(struct sis_base_struct, diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 0c97626d87d4d4..7e0eac3d07deef 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); +bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info); +void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info); +int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info); extern unsigned int sis_ctrl_ready_timeout_secs; diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index cc824dcfe7da42..9be3f0193145fd 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -300,9 +300,8 @@ snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) } SNIC_BUG_ON(shost->work_q != NULL); - snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", - shost->host_no); - shost->work_q = create_singlethread_workqueue(shost->work_q_name); + shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM, + shost->host_no); if (!shost->work_q) { SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); @@ -873,7 +872,7 @@ snic_global_data_init(void) snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; len = sizeof(struct snic_host_req); - cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN, NULL); if (!cachep) { SNIC_ERR("Failed to create snic tm req slab\n"); @@ -884,7 +883,8 @@ snic_global_data_init(void) snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; /* snic_event queue */ - snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); + snic_glob->event_q = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq"); if (!snic_glob->event_q) { SNIC_ERR("snic event queue create failed\n"); ret = -ENOMEM; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 3f491019103e0c..198bec87bb8e7c 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -52,7 +52,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 0d8ce1a92168c4..beb88f25dbb993 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -46,7 +46,7 @@ static const char *verstr = "20160209"; #include #include -#include +#include #include #include @@ -834,6 +834,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next) int backspace, result; struct st_partstat *STps; + if (STp->ready != ST_READY) + return 0; + /* * If there was a bus reset, block further access * to this device. @@ -841,8 +844,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next) if (STp->pos_unknown) return (-EIO); - if (STp->ready != ST_READY) - return 0; STps = &(STp->ps[STp->partition]); if (STps->rw == ST_WRITING) /* Writing */ return st_flush_write_buffer(STp); diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 8ffb75be99bc8f..0e81125df8c72d 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -334,7 +334,6 @@ struct st_hba { struct st_ccb *wait_ccb; __le32 *scratch; - char work_q_name[20]; struct workqueue_struct *work_q; struct work_struct reset_work; wait_queue_head_t reset_waitq; @@ -1795,9 +1794,8 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->pdev = pdev; init_waitqueue_head(&hba->reset_waitq); - snprintf(hba->work_q_name, sizeof(hba->work_q_name), - "stex_wq_%d", host->host_no); - hba->work_q = create_singlethread_workqueue(hba->work_q_name); + hba->work_q = alloc_ordered_workqueue("stex_wq_%d", WQ_MEM_RECLAIM, + host->host_no); if (!hba->work_q) { printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", pci_name(pdev)); diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index f51702893306d7..fffc0fa525940c 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -304,7 +304,7 @@ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata, sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); #endif - return count; + return count; } diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index c4fea077265ed4..32242d86cf5b63 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1137,7 +1137,8 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) snprintf(name, sizeof(name), "vmw_pvscsi_wq_%u", adapter->host->host_no); - adapter->workqueue = create_singlethread_workqueue(name); + adapter->workqueue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name); if (!adapter->workqueue) { printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); return 0; diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 22d412cab91da8..15602ec862e30b 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -139,7 +139,7 @@ zalon_probe(struct parisc_device *dev) return -ENODEV; if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { - dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", + dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n", dev->irq); goto fail; } diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c index abe9091827cd68..a363f77881d1e1 100644 --- a/drivers/sh/intc/userimask.c +++ b/drivers/sh/intc/userimask.c @@ -32,8 +32,11 @@ store_intc_userimask(struct device *dev, const char *buf, size_t count) { unsigned long level; + int ret; - level = simple_strtoul(buf, NULL, 10); + ret = kstrtoul(buf, 10, &level); + if (ret != 0) + return ret; /* * Minimal acceptable IRQ levels are in the 2 - 16 range, but diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index 4ce0cb61e48135..242570a5e5654b 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -111,7 +111,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) { DECLARE_COMPLETION_ONSTACK(done); bool need_tid = false, clk_pause_msg = false; - int ret, timeout; + int ret; + unsigned long time_left; /* * do not vote for runtime-PM if the transactions are part of clock @@ -151,9 +152,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) if (!ret && need_tid && !txn->msg->comp) { unsigned long ms = txn->rl + HZ; - timeout = wait_for_completion_timeout(txn->comp, - msecs_to_jiffies(ms)); - if (!timeout) { + time_left = wait_for_completion_timeout(txn->comp, + msecs_to_jiffies(ms)); + if (!time_left) { ret = -ETIMEDOUT; slim_free_txn_tid(ctrl, txn); } diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index 0274bc285b6018..e25f9bdd9b2337 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -330,7 +330,8 @@ static int qcom_xfer_msg(struct slim_controller *sctrl, void *pbuf = slim_alloc_txbuf(ctrl, txn, &done); unsigned long ms = txn->rl + HZ; u8 *puc; - int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES; + int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES; + unsigned long time_left; u8 la = txn->la; u32 *head; /* HW expects length field to be excluded */ @@ -374,9 +375,9 @@ static int qcom_xfer_msg(struct slim_controller *sctrl, memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG); - timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms)); + time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms)); - if (!timeout) { + if (!time_left) { dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); ret = -ETIMEDOUT; diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index e0b21f0f79c14e..1ac8e2912fd1ce 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -788,7 +788,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev); DECLARE_COMPLETION_ONSTACK(tx_sent); DECLARE_COMPLETION_ONSTACK(done); - int ret, timeout, i; + int ret, i; + unsigned long time_left; u8 wbuf[SLIM_MSGQ_BUF_LEN]; u8 rbuf[SLIM_MSGQ_BUF_LEN]; u32 *pbuf; @@ -890,8 +891,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, return ret; } - timeout = wait_for_completion_timeout(&tx_sent, HZ); - if (!timeout) { + time_left = wait_for_completion_timeout(&tx_sent, HZ); + if (!time_left) { dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); mutex_unlock(&ctrl->tx_lock); @@ -899,8 +900,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, } if (usr_msg) { - timeout = wait_for_completion_timeout(&done, HZ); - if (!timeout) { + time_left = wait_for_completion_timeout(&done, HZ); + if (!time_left) { dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); mutex_unlock(&ctrl->tx_lock); @@ -916,7 +917,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl, struct slim_msg_txn *txn) { DECLARE_COMPLETION_ONSTACK(done); - int ret, timeout; + int ret; + unsigned long time_left; ret = pm_runtime_get_sync(ctrl->dev); if (ret < 0) @@ -928,8 +930,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl, if (ret) goto pm_put; - timeout = wait_for_completion_timeout(&done, HZ); - if (!timeout) { + time_left = wait_for_completion_timeout(&done, HZ); + if (!time_left) { dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); ret = -ETIMEDOUT; @@ -1168,11 +1170,12 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) enum qcom_slim_ngd_state cur_state = ctrl->state; struct qcom_slim_ngd *ngd = ctrl->ngd; u32 laddr, rx_msgq; - int timeout, ret = 0; + int ret = 0; + unsigned long time_left; if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) { - timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ); - if (!timeout) + time_left = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ); + if (!time_left) return -EREMOTEIO; } @@ -1217,8 +1220,8 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) ngd->base + NGD_RX_MSGQ_CFG); qcom_slim_ngd_setup(ctrl); - timeout = wait_for_completion_timeout(&ctrl->reconf, HZ); - if (!timeout) { + time_left = wait_for_completion_timeout(&ctrl->reconf, HZ); + if (!time_left) { dev_err(ctrl->dev, "capability exchange timed-out\n"); return -ETIMEDOUT; } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 5d924e946507b7..6a8daeb8c4b96c 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -7,6 +7,7 @@ source "drivers/soc/aspeed/Kconfig" source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/canaan/Kconfig" +source "drivers/soc/cirrus/Kconfig" source "drivers/soc/fsl/Kconfig" source "drivers/soc/fujitsu/Kconfig" source "drivers/soc/hisilicon/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index fb2bd31387d070..2037a8695cb289 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -8,6 +8,7 @@ obj-y += aspeed/ obj-$(CONFIG_ARCH_AT91) += atmel/ obj-y += bcm/ obj-$(CONFIG_ARCH_CANAAN) += canaan/ +obj-$(CONFIG_EP93XX_SOC) += cirrus/ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ @@ -32,5 +33,5 @@ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_ARCH_U8500) += ux500/ -obj-$(CONFIG_PLAT_VERSATILE) += versatile/ +obj-y += versatile/ obj-y += xilinx/ diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 8809a948201aae..7549f1644e5ea0 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -41,6 +41,11 @@ static const struct meson_gx_soc_id { { "G12B", 0x29 }, { "SM1", 0x2b }, { "A1", 0x2c }, + { "T7", 0x36 }, + { "S4", 0x37 }, + { "A5", 0x3c }, + { "C3", 0x3d }, + { "A4", 0x40 }, }; static const struct meson_gx_package_id { @@ -76,6 +81,11 @@ static const struct meson_gx_package_id { { "S905X3", 0x2b, 0x10, 0x3f }, { "S905D3", 0x2b, 0x30, 0x3f }, { "A113L", 0x2c, 0x0, 0xf8 }, + { "S805X2", 0x37, 0x2, 0xf }, + { "C308L", 0x3d, 0x1, 0xf }, + { "A311D2", 0x36, 0x1, 0xf }, + { "A113X2", 0x3c, 0x1, 0xf }, + { "A113L2", 0x40, 0x1, 0xf }, }; static inline unsigned int socinfo_to_major(u32 socinfo) diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index cc9a3e107479ab..2a42b28931c96d 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -101,6 +101,29 @@ static const struct at91_soc socs[] __initconst = { AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH, "sam9x60 8MiB SDRAM SiP", "sam9x60"), #endif +#ifdef CONFIG_SOC_SAM9X7 + AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X70_EXID_MATCH, + "sam9x70", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X72_EXID_MATCH, + "sam9x72", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH, + "sam9x75", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1M_EXID_MATCH, + AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH, + "sam9x75 16MB DDR2 SiP", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D5M_EXID_MATCH, + AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH, + "sam9x75 64MB DDR2 SiP", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D1G_EXID_MATCH, + AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH, + "sam9x75 125MB DDR3L SiP ", "sam9x7"), + AT91_SOC(SAM9X7_CIDR_MATCH, SAM9X75_D2G_EXID_MATCH, + AT91_CIDR_VERSION_MASK, SAM9X75_EXID_MATCH, + "sam9x75 250MB DDR3L SiP", "sam9x7"), +#endif #ifdef CONFIG_SOC_SAMA5 AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK, AT91_CIDR_VERSION_MASK, SAMA5D21CU_EXID_MATCH, diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h index 7a9f47ce85fb32..2c78e54255f7f8 100644 --- a/drivers/soc/atmel/soc.h +++ b/drivers/soc/atmel/soc.h @@ -44,6 +44,7 @@ at91_soc_init(const struct at91_soc *socs); #define AT91SAM9X5_CIDR_MATCH 0x019a05a0 #define AT91SAM9N12_CIDR_MATCH 0x019a07a0 #define SAM9X60_CIDR_MATCH 0x019b35a0 +#define SAM9X7_CIDR_MATCH 0x09750020 #define SAMA7G5_CIDR_MATCH 0x00162100 #define AT91SAM9M11_EXID_MATCH 0x00000001 @@ -66,6 +67,14 @@ at91_soc_init(const struct at91_soc *socs); #define SAM9X60_D1G_EXID_MATCH 0x00000010 #define SAM9X60_D6K_EXID_MATCH 0x00000011 +#define SAM9X70_EXID_MATCH 0x00000005 +#define SAM9X72_EXID_MATCH 0x00000004 +#define SAM9X75_D1G_EXID_MATCH 0x00000018 +#define SAM9X75_D2G_EXID_MATCH 0x00000020 +#define SAM9X75_D1M_EXID_MATCH 0x00000003 +#define SAM9X75_D5M_EXID_MATCH 0x00000010 +#define SAM9X75_EXID_MATCH 0x00000000 + #define SAMA7G51_EXID_MATCH 0x3 #define SAMA7G52_EXID_MATCH 0x2 #define SAMA7G53_EXID_MATCH 0x1 diff --git a/drivers/soc/cirrus/Kconfig b/drivers/soc/cirrus/Kconfig new file mode 100644 index 00000000000000..d8b3b1e68998c3 --- /dev/null +++ b/drivers/soc/cirrus/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only + +if ARCH_EP93XX + +config EP93XX_SOC + bool "Cirrus EP93xx chips SoC" + select SOC_BUS + select AUXILIARY_BUS + default y + help + Enable support SoC for Cirrus EP93xx chips. + + Cirrus EP93xx chips have several swlocked registers, + this driver provides locked access for reset, pinctrl + and clk devices implemented as auxiliary devices. + +endif diff --git a/drivers/soc/cirrus/Makefile b/drivers/soc/cirrus/Makefile new file mode 100644 index 00000000000000..9e6608b67f7694 --- /dev/null +++ b/drivers/soc/cirrus/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += soc-ep93xx.o diff --git a/drivers/soc/cirrus/soc-ep93xx.c b/drivers/soc/cirrus/soc-ep93xx.c new file mode 100644 index 00000000000000..3e79b3b13aefba --- /dev/null +++ b/drivers/soc/cirrus/soc-ep93xx.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SoC driver for Cirrus EP93xx chips. + * Copyright (C) 2022 Nikita Shubin + * + * Based on a rewrite of arch/arm/mach-ep93xx/core.c + * Copyright (C) 2006 Lennert Buytenhek + * Copyright (C) 2007 Herbert Valerio Riedel + * + * Thanks go to Michael Burian and Ray Lehtiniemi for their key + * role in the ep93xx Linux community. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define EP93XX_SYSCON_DEVCFG 0x80 + +#define EP93XX_SWLOCK_MAGICK 0xaa +#define EP93XX_SYSCON_SWLOCK 0xc0 +#define EP93XX_SYSCON_SYSCFG 0x9c +#define EP93XX_SYSCON_SYSCFG_REV_MASK GENMASK(31, 28) +#define EP93XX_SYSCON_SYSCFG_REV_SHIFT 28 + +struct ep93xx_map_info { + spinlock_t lock; + void __iomem *base; + struct regmap *map; +}; + +/* + * EP93xx System Controller software locked register write + * + * Logic safeguards are included to condition the control signals for + * power connection to the matrix to prevent part damage. In addition, a + * software lock register is included that must be written with 0xAA + * before each register write to change the values of the four switch + * matrix control registers. + */ +static void ep93xx_regmap_write(struct regmap *map, spinlock_t *lock, + unsigned int reg, unsigned int val) +{ + guard(spinlock_irqsave)(lock); + + regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK); + regmap_write(map, reg, val); +} + +static void ep93xx_regmap_update_bits(struct regmap *map, spinlock_t *lock, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + guard(spinlock_irqsave)(lock); + + regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK); + /* force write is required to clear swlock if no changes are made */ + regmap_update_bits_base(map, reg, mask, val, NULL, false, true); +} + +static void ep93xx_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +static void ep93xx_adev_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev); + + kfree(rdev); +} + +static struct auxiliary_device __init *ep93xx_adev_alloc(struct device *parent, + const char *name, + struct ep93xx_map_info *info) +{ + struct ep93xx_regmap_adev *rdev __free(kfree) = NULL; + struct auxiliary_device *adev; + int ret; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + rdev->map = info->map; + rdev->base = info->base; + rdev->lock = &info->lock; + rdev->write = ep93xx_regmap_write; + rdev->update_bits = ep93xx_regmap_update_bits; + + adev = &rdev->adev; + adev->name = name; + adev->dev.parent = parent; + adev->dev.release = ep93xx_adev_release; + + ret = auxiliary_device_init(adev); + if (ret) + return ERR_PTR(ret); + + return &no_free_ptr(rdev)->adev; +} + +static int __init ep93xx_controller_register(struct device *parent, const char *name, + struct ep93xx_map_info *info) +{ + struct auxiliary_device *adev; + int ret; + + adev = ep93xx_adev_alloc(parent, name, info); + if (IS_ERR(adev)) + return PTR_ERR(adev); + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(parent, ep93xx_unregister_adev, adev); +} + +static unsigned int __init ep93xx_soc_revision(struct regmap *map) +{ + unsigned int val; + + regmap_read(map, EP93XX_SYSCON_SYSCFG, &val); + val &= EP93XX_SYSCON_SYSCFG_REV_MASK; + val >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT; + return val; +} + +static const char __init *ep93xx_get_soc_rev(unsigned int rev) +{ + switch (rev) { + case EP93XX_CHIP_REV_D0: + return "D0"; + case EP93XX_CHIP_REV_D1: + return "D1"; + case EP93XX_CHIP_REV_E0: + return "E0"; + case EP93XX_CHIP_REV_E1: + return "E1"; + case EP93XX_CHIP_REV_E2: + return "E2"; + default: + return "unknown"; + } +} + +static const char *pinctrl_names[] __initconst = { + "pinctrl-ep9301", /* EP93XX_9301_SOC */ + "pinctrl-ep9307", /* EP93XX_9307_SOC */ + "pinctrl-ep9312", /* EP93XX_9312_SOC */ +}; + +static int __init ep93xx_syscon_probe(struct platform_device *pdev) +{ + enum ep93xx_soc_model model; + struct ep93xx_map_info *map_info; + struct soc_device_attribute *attrs; + struct soc_device *soc_dev; + struct device *dev = &pdev->dev; + struct regmap *map; + void __iomem *base; + unsigned int rev; + int ret; + + model = (enum ep93xx_soc_model)(uintptr_t)device_get_match_data(dev); + + map = device_node_to_regmap(dev->of_node); + if (IS_ERR(map)) + return PTR_ERR(map); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + attrs = devm_kzalloc(dev, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + rev = ep93xx_soc_revision(map); + + attrs->machine = of_flat_dt_get_machine_name(); + attrs->family = "Cirrus Logic EP93xx"; + attrs->revision = ep93xx_get_soc_rev(rev); + + soc_dev = soc_device_register(attrs); + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + map_info = devm_kzalloc(dev, sizeof(*map_info), GFP_KERNEL); + if (!map_info) + return -ENOMEM; + + spin_lock_init(&map_info->lock); + map_info->map = map; + map_info->base = base; + + ret = ep93xx_controller_register(dev, pinctrl_names[model], map_info); + if (ret) + dev_err(dev, "registering pinctrl controller failed\n"); + + /* + * EP93xx SSP clock rate was doubled in version E2. For more information + * see section 6 "2x SSP (Synchronous Serial Port) Clock – Revision E2 only": + * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf + */ + if (rev == EP93XX_CHIP_REV_E2) + ret = ep93xx_controller_register(dev, "clk-ep93xx.e2", map_info); + else + ret = ep93xx_controller_register(dev, "clk-ep93xx", map_info); + if (ret) + dev_err(dev, "registering clock controller failed\n"); + + ret = ep93xx_controller_register(dev, "reset-ep93xx", map_info); + if (ret) + dev_err(dev, "registering reset controller failed\n"); + + return 0; +} + +static const struct of_device_id ep9301_syscon_of_device_ids[] = { + { .compatible = "cirrus,ep9301-syscon", .data = (void *)EP93XX_9301_SOC }, + { .compatible = "cirrus,ep9302-syscon", .data = (void *)EP93XX_9301_SOC }, + { .compatible = "cirrus,ep9307-syscon", .data = (void *)EP93XX_9307_SOC }, + { .compatible = "cirrus,ep9312-syscon", .data = (void *)EP93XX_9312_SOC }, + { .compatible = "cirrus,ep9315-syscon", .data = (void *)EP93XX_9312_SOC }, + { /* sentinel */ } +}; + +static struct platform_driver ep9301_syscon_driver = { + .driver = { + .name = "ep9301-syscon", + .of_match_table = ep9301_syscon_of_device_ids, + }, +}; +builtin_platform_driver_probe(ep9301_syscon_driver, ep93xx_syscon_probe); diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 392e54f14dbe92..aa5348f4902fe9 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -791,8 +791,6 @@ static int fsl_qman_probe(struct platform_device *pdev) * FQD memory MUST be zero'd by software */ zero_priv_mem(fqd_a, fqd_sz); -#else - WARN(1, "Unexpected architecture using non shared-dma-mem reservations"); #endif dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index e23b60618c1a15..456ef5d5c1996b 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) struct device *dev = pcfg->dev; int ret; - pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); - if (!pcfg->iommu_domain) { + pcfg->iommu_domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(pcfg->iommu_domain)) { dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); + pcfg->iommu_domain = NULL; goto no_iommu; } ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index fa9ffbed0e929b..eb03f42ab9781d 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -17,7 +17,7 @@ config QUICC_ENGINE config UCC_SLOW bool - default y if SERIAL_QE + default y if SERIAL_QE || (CPM_QMC && QUICC_ENGINE) help This option provides qe_lib support to UCC slow protocols: UART, BISYNC, QMC @@ -31,26 +31,27 @@ config UCC_FAST config UCC bool - default y if UCC_FAST || UCC_SLOW + default y if UCC_FAST || UCC_SLOW || (CPM_TSA && QUICC_ENGINE) config CPM_TSA - tristate "CPM TSA support" + tristate "CPM/QE TSA support" depends on OF && HAS_IOMEM - depends on CPM1 || (CPM && COMPILE_TEST) + depends on CPM1 || QUICC_ENGINE || \ + ((CPM || QUICC_ENGINE) && COMPILE_TEST) help - Freescale CPM Time Slot Assigner (TSA) + Freescale CPM/QE Time Slot Assigner (TSA) controller. This option enables support for this controller config CPM_QMC - tristate "CPM QMC support" + tristate "CPM/QE QMC support" depends on OF && HAS_IOMEM - depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST) + depends on FSL_SOC depends on CPM_TSA help - Freescale CPM QUICC Multichannel Controller + Freescale CPM/QE QUICC Multichannel Controller (QMC) This option enables support for this diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index a877347d37d3e2..02c29f5f86d35e 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -13,6 +13,7 @@ * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug */ +#include #include #include #include @@ -187,6 +188,49 @@ void cpm_muram_free(s32 offset) } EXPORT_SYMBOL(cpm_muram_free); +static void devm_cpm_muram_release(struct device *dev, void *res) +{ + s32 *info = res; + + cpm_muram_free(*info); +} + +/** + * devm_cpm_muram_alloc - Resource-managed cpm_muram_alloc + * @dev: Device to allocate memory for + * @size: number of bytes to allocate + * @align: requested alignment, in bytes + * + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure as cpm_muram_alloc() does. + * Use cpm_muram_addr() to get the virtual address of the area. + * + * Compare against cpm_muram_alloc(), the memory allocated by this + * resource-managed version is automatically freed on driver detach and so, + * cpm_muram_free() must not be called to release the allocated memory. + */ +s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size, + unsigned long align) +{ + s32 info; + s32 *dr; + + dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + info = cpm_muram_alloc(size, align); + if (info >= 0) { + *dr = info; + devres_add(dev, dr); + } else { + devres_free(dr); + } + + return info; +} +EXPORT_SYMBOL(devm_cpm_muram_alloc); + /* * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram * @offset: offset of allocation start address @@ -211,6 +255,42 @@ s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) } EXPORT_SYMBOL(cpm_muram_alloc_fixed); +/** + * devm_cpm_muram_alloc_fixed - Resource-managed cpm_muram_alloc_fixed + * @dev: Device to allocate memory for + * @offset: offset of allocation start address + * @size: number of bytes to allocate + * + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure as cpm_muram_alloc_fixed() does. + * Use cpm_muram_addr() to get the virtual address of the area. + * + * Compare against cpm_muram_alloc_fixed(), the memory allocated by this + * resource-managed version is automatically freed on driver detach and so, + * cpm_muram_free() must not be called to release the allocated memory. + */ +s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset, + unsigned long size) +{ + s32 info; + s32 *dr; + + dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + info = cpm_muram_alloc_fixed(offset, size); + if (info >= 0) { + *dr = info; + devres_add(dev, dr); + } else { + devres_free(dr); + } + + return info; +} +EXPORT_SYMBOL(devm_cpm_muram_alloc_fixed); + /** * cpm_muram_addr - turn a muram offset into a virtual address * @offset: muram offset to convert diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c index 76bb496305a0e0..3dffebb48b0dac 100644 --- a/drivers/soc/fsl/qe/qmc.c +++ b/drivers/soc/fsl/qe/qmc.c @@ -8,7 +8,9 @@ */ #include +#include #include +#include #include #include #include @@ -18,31 +20,41 @@ #include #include #include +#include +#include #include #include "tsa.h" -/* SCC general mode register high (32 bits) */ +/* SCC general mode register low (32 bits) (GUMR_L in QE) */ #define SCC_GSMRL 0x00 -#define SCC_GSMRL_ENR (1 << 5) -#define SCC_GSMRL_ENT (1 << 4) -#define SCC_GSMRL_MODE_QMC (0x0A << 0) +#define SCC_GSMRL_ENR BIT(5) +#define SCC_GSMRL_ENT BIT(4) +#define SCC_GSMRL_MODE_MASK GENMASK(3, 0) +#define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A) +#define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02) -/* SCC general mode register low (32 bits) */ +/* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */ #define SCC_GSMRH 0x04 -#define SCC_GSMRH_CTSS (1 << 7) -#define SCC_GSMRH_CDS (1 << 8) -#define SCC_GSMRH_CTSP (1 << 9) -#define SCC_GSMRH_CDP (1 << 10) - -/* SCC event register (16 bits) */ +#define SCC_GSMRH_CTSS BIT(7) +#define SCC_GSMRH_CDS BIT(8) +#define SCC_GSMRH_CTSP BIT(9) +#define SCC_GSMRH_CDP BIT(10) +#define SCC_GSMRH_TTX BIT(11) +#define SCC_GSMRH_TRX BIT(12) + +/* SCC event register (16 bits) (identical to UCCE in QE) */ #define SCC_SCCE 0x10 -#define SCC_SCCE_IQOV (1 << 3) -#define SCC_SCCE_GINT (1 << 2) -#define SCC_SCCE_GUN (1 << 1) -#define SCC_SCCE_GOV (1 << 0) +#define SCC_SCCE_IQOV BIT(3) +#define SCC_SCCE_GINT BIT(2) +#define SCC_SCCE_GUN BIT(1) +#define SCC_SCCE_GOV BIT(0) /* SCC mask register (16 bits) */ #define SCC_SCCM 0x14 + +/* UCC Extended Mode Register (8 bits, QE only) */ +#define SCC_QE_UCC_GUEMR 0x90 + /* Multichannel base pointer (32 bits) */ #define QMC_GBL_MCBASE 0x00 /* Multichannel controller state (16 bits) */ @@ -73,27 +85,42 @@ #define QMC_GBL_TSATTX 0x60 /* CRC constant (16 bits) */ #define QMC_GBL_C_MASK16 0xA0 +/* Rx framer base pointer (16 bits, QE only) */ +#define QMC_QE_GBL_RX_FRM_BASE 0xAC +/* Tx framer base pointer (16 bits, QE only) */ +#define QMC_QE_GBL_TX_FRM_BASE 0xAE +/* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */ +#define QMC_QE_GBL_RSV_B0_START 0xB0 +#define QMC_QE_GBL_RSV_B0_SIZE 0x14 +/* QMC Global Channel specific base (32 bits, QE only) */ +#define QMC_QE_GBL_GCSBASE 0xC4 /* TSA entry (16bit entry in TSATRX and TSATTX) */ -#define QMC_TSA_VALID (1 << 15) -#define QMC_TSA_WRAP (1 << 14) -#define QMC_TSA_MASK (0x303F) -#define QMC_TSA_CHANNEL(x) ((x) << 6) +#define QMC_TSA_VALID BIT(15) +#define QMC_TSA_WRAP BIT(14) +#define QMC_TSA_MASK_MASKH GENMASK(13, 12) +#define QMC_TSA_MASK_MASKL GENMASK(5, 0) +#define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \ + FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F)) +#define QMC_TSA_CHANNEL_MASK GENMASK(11, 6) +#define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x) /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */ #define QMC_SPE_TBASE 0x00 /* Channel mode register (16 bits) */ #define QMC_SPE_CHAMR 0x02 -#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15) -#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13)) -#define QMC_SPE_CHAMR_ENT (1 << 12) -#define QMC_SPE_CHAMR_POL (1 << 8) -#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13) -#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7) -#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0) -#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14) -#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10) +#define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15) +#define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1) +#define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13)) +#define QMC_SPE_CHAMR_ENT BIT(12) +#define QMC_SPE_CHAMR_POL BIT(8) +#define QMC_SPE_CHAMR_HDLC_IDLM BIT(13) +#define QMC_SPE_CHAMR_HDLC_CRC BIT(7) +#define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0) +#define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x) +#define QMC_SPE_CHAMR_TRANSP_RD BIT(14) +#define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10) /* Tx internal state (32 bits) */ #define QMC_SPE_TSTATE 0x04 @@ -120,43 +147,47 @@ /* Transparent synchronization (16 bits) */ #define QMC_SPE_TRNSYNC 0x3C -#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8) -#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0) +#define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8) +#define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x) +#define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0) +#define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x) /* Interrupt related registers bits */ -#define QMC_INT_V (1 << 15) -#define QMC_INT_W (1 << 14) -#define QMC_INT_NID (1 << 13) -#define QMC_INT_IDL (1 << 12) -#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6) -#define QMC_INT_MRF (1 << 5) -#define QMC_INT_UN (1 << 4) -#define QMC_INT_RXF (1 << 3) -#define QMC_INT_BSY (1 << 2) -#define QMC_INT_TXB (1 << 1) -#define QMC_INT_RXB (1 << 0) +#define QMC_INT_V BIT(15) +#define QMC_INT_W BIT(14) +#define QMC_INT_NID BIT(13) +#define QMC_INT_IDL BIT(12) +#define QMC_INT_CHANNEL_MASK GENMASK(11, 6) +#define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x) +#define QMC_INT_MRF BIT(5) +#define QMC_INT_UN BIT(4) +#define QMC_INT_RXF BIT(3) +#define QMC_INT_BSY BIT(2) +#define QMC_INT_TXB BIT(1) +#define QMC_INT_RXB BIT(0) /* BD related registers bits */ -#define QMC_BD_RX_E (1 << 15) -#define QMC_BD_RX_W (1 << 13) -#define QMC_BD_RX_I (1 << 12) -#define QMC_BD_RX_L (1 << 11) -#define QMC_BD_RX_F (1 << 10) -#define QMC_BD_RX_CM (1 << 9) -#define QMC_BD_RX_UB (1 << 7) -#define QMC_BD_RX_LG (1 << 5) -#define QMC_BD_RX_NO (1 << 4) -#define QMC_BD_RX_AB (1 << 3) -#define QMC_BD_RX_CR (1 << 2) - -#define QMC_BD_TX_R (1 << 15) -#define QMC_BD_TX_W (1 << 13) -#define QMC_BD_TX_I (1 << 12) -#define QMC_BD_TX_L (1 << 11) -#define QMC_BD_TX_TC (1 << 10) -#define QMC_BD_TX_CM (1 << 9) -#define QMC_BD_TX_UB (1 << 7) -#define QMC_BD_TX_PAD (0x0f << 0) +#define QMC_BD_RX_E BIT(15) +#define QMC_BD_RX_W BIT(13) +#define QMC_BD_RX_I BIT(12) +#define QMC_BD_RX_L BIT(11) +#define QMC_BD_RX_F BIT(10) +#define QMC_BD_RX_CM BIT(9) +#define QMC_BD_RX_UB BIT(7) +#define QMC_BD_RX_LG BIT(5) +#define QMC_BD_RX_NO BIT(4) +#define QMC_BD_RX_AB BIT(3) +#define QMC_BD_RX_CR BIT(2) + +#define QMC_BD_TX_R BIT(15) +#define QMC_BD_TX_W BIT(13) +#define QMC_BD_TX_I BIT(12) +#define QMC_BD_TX_L BIT(11) +#define QMC_BD_TX_TC BIT(10) +#define QMC_BD_TX_CM BIT(9) +#define QMC_BD_TX_UB BIT(7) +#define QMC_BD_TX_PAD_MASK GENMASK(3, 0) +#define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x) /* Numbers of BDs and interrupt items */ #define QMC_NB_TXBDS 8 @@ -184,7 +215,7 @@ struct qmc_chan { u64 rx_ts_mask; bool is_reverse_data; - spinlock_t tx_lock; + spinlock_t tx_lock; /* Protect Tx related data */ cbd_t __iomem *txbds; cbd_t __iomem *txbd_free; cbd_t __iomem *txbd_done; @@ -192,7 +223,7 @@ struct qmc_chan { u64 nb_tx_underrun; bool is_tx_stopped; - spinlock_t rx_lock; + spinlock_t rx_lock; /* Protect Rx related data */ cbd_t __iomem *rxbds; cbd_t __iomem *rxbd_free; cbd_t __iomem *rxbd_done; @@ -203,13 +234,31 @@ struct qmc_chan { bool is_rx_stopped; }; +enum qmc_version { + QMC_CPM1, + QMC_QE, +}; + +struct qmc_data { + enum qmc_version version; + u32 tstate; /* Initial TSTATE value */ + u32 rstate; /* Initial RSTATE value */ + u32 zistate; /* Initial ZISTATE value */ + u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */ + u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */ + u32 rpack; /* Initial RPACK value */ +}; + struct qmc { struct device *dev; + const struct qmc_data *data; struct tsa_serial *tsa_serial; void __iomem *scc_regs; void __iomem *scc_pram; void __iomem *dpram; u16 scc_pram_offset; + u32 dpram_offset; + u32 qe_subblock; cbd_t __iomem *bd_table; dma_addr_t bd_dma_addr; size_t bd_size; @@ -222,6 +271,11 @@ struct qmc { struct qmc_chan *chans[64]; }; +static void qmc_write8(void __iomem *addr, u8 val) +{ + iowrite8(val, addr); +} + static void qmc_write16(void __iomem *addr, u16 val) { iowrite16be(val, addr); @@ -262,6 +316,13 @@ static void qmc_setbits32(void __iomem *addr, u32 set) qmc_write32(addr, qmc_read32(addr) | set); } +static bool qmc_is_qe(const struct qmc *qmc) +{ + if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM)) + return qmc->data->version == QMC_QE; + + return IS_ENABLED(CONFIG_QUICC_ENGINE); +} int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) { @@ -348,8 +409,8 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param switch (param->mode) { case QMC_HDLC: - if ((param->hdlc.max_rx_buf_size % 4) || - (param->hdlc.max_rx_buf_size < 8)) + if (param->hdlc.max_rx_buf_size % 4 || + param->hdlc.max_rx_buf_size < 8) return -EINVAL; qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR, @@ -532,11 +593,12 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, /* Restart receiver if needed */ if (chan->is_rx_halted && !chan->is_rx_stopped) { /* Restart receiver */ - if (chan->mode == QMC_TRANSPARENT) - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); - else - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); - qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); + qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack); + qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, + chan->mode == QMC_TRANSPARENT ? + chan->qmc->data->zdstate_transp : + chan->qmc->data->zdstate_hdlc); + qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate); chan->is_rx_halted = false; } chan->rx_pending++; @@ -641,7 +703,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser return -EINVAL; } - val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id); /* Check entries based on Rx stuff*/ for (i = 0; i < info->nb_rx_ts; i++) { @@ -662,7 +724,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser continue; qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), - ~QMC_TSA_WRAP, enable ? val : 0x0000); + (u16)~QMC_TSA_WRAP, enable ? val : 0x0000); } return 0; @@ -677,7 +739,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria /* Use a Rx 32 entries table */ - val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id); /* Check entries based on Rx stuff */ for (i = 0; i < info->nb_rx_ts; i++) { @@ -698,7 +760,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria continue; qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), - ~QMC_TSA_WRAP, enable ? val : 0x0000); + (u16)~QMC_TSA_WRAP, enable ? val : 0x0000); } return 0; @@ -713,7 +775,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria /* Use a Tx 32 entries table */ - val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id); /* Check entries based on Tx stuff */ for (i = 0; i < info->nb_tx_ts; i++) { @@ -734,7 +796,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria continue; qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), - ~QMC_TSA_WRAP, enable ? val : 0x0000); + (u16)~QMC_TSA_WRAP, enable ? val : 0x0000); } return 0; @@ -774,11 +836,18 @@ static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable) return qmc_chan_setup_tsa_32rx(chan, &info, enable); } -static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode) +static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode) { return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E); } +static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd) +{ + if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0)) + return -EIO; + return 0; +} + static int qmc_chan_stop_rx(struct qmc_chan *chan) { unsigned long flags; @@ -793,7 +862,9 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan) } /* Send STOP RECEIVE command */ - ret = qmc_chan_command(chan, 0x0); + ret = qmc_is_qe(chan->qmc) ? + qmc_chan_qe_command(chan, QE_QMC_STOP_RX) : + qmc_chan_cpm1_command(chan, 0x0); if (ret) { dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n", chan->id, ret); @@ -830,7 +901,9 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan) } /* Send STOP TRANSMIT command */ - ret = qmc_chan_command(chan, 0x1); + ret = qmc_is_qe(chan->qmc) ? + qmc_chan_qe_command(chan, QE_QMC_STOP_TX) : + qmc_chan_cpm1_command(chan, 0x1); if (ret) { dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n", chan->id, ret); @@ -889,6 +962,7 @@ EXPORT_SYMBOL(qmc_chan_stop); static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) { struct tsa_serial_info info; + unsigned int w_rx, w_tx; u16 first_rx, last_tx; u16 trnsync; int ret; @@ -898,6 +972,14 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) if (ret) return ret; + w_rx = hweight64(chan->rx_ts_mask); + w_tx = hweight64(chan->tx_ts_mask); + if (w_rx <= 1 && w_tx <= 1) { + dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n"); + qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC); + return 0; + } + /* Find the first Rx TS allocated to the channel */ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0; @@ -911,6 +993,7 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2); qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync); + qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC); dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n", chan->id, trnsync, @@ -940,19 +1023,22 @@ static int qmc_chan_start_rx(struct qmc_chan *chan) goto end; } - ret = qmc_setup_chan_trnsync(chan->qmc, chan); - if (ret) { - dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", - chan->id, ret); - goto end; + if (chan->mode == QMC_TRANSPARENT) { + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } } /* Restart the receiver */ - if (chan->mode == QMC_TRANSPARENT) - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); - else - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); - qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); + qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack); + qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, + chan->mode == QMC_TRANSPARENT ? + chan->qmc->data->zdstate_transp : + chan->qmc->data->zdstate_hdlc); + qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate); chan->is_rx_halted = false; chan->is_rx_stopped = false; @@ -982,11 +1068,13 @@ static int qmc_chan_start_tx(struct qmc_chan *chan) goto end; } - ret = qmc_setup_chan_trnsync(chan->qmc, chan); - if (ret) { - dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", - chan->id, ret); - goto end; + if (chan->mode == QMC_TRANSPARENT) { + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } } /* @@ -1096,8 +1184,8 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan) qmc_read16(chan->s_param + QMC_SPE_TBASE)); /* Reset TSTATE and ZISTATE to their initial value */ - qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000); - qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100); + qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate); + qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate); spin_unlock_irqrestore(&chan->tx_lock, flags); } @@ -1127,7 +1215,7 @@ static int qmc_check_chans(struct qmc *qmc) if (ret) return ret; - if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) { + if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) { dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n"); return -EINVAL; } @@ -1136,7 +1224,7 @@ static int qmc_check_chans(struct qmc *qmc) * If more than 32 TS are assigned to this serial, one common table is * used for Tx and Rx and so masks must be equal for all channels. */ - if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) { + if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) { if (info.nb_tx_ts != info.nb_rx_ts) { dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n"); return -EINVAL; @@ -1368,13 +1456,14 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan) val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t); qmc_write16(chan->s_param + QMC_SPE_RBASE, val); qmc_write16(chan->s_param + QMC_SPE_RBPTR, val); - qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000); - qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); - qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100); + qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate); + qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate); + qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate); + qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack); if (chan->mode == QMC_TRANSPARENT) { - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); + qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp); qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60); - val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC; + val = QMC_SPE_CHAMR_MODE_TRANSP; if (chan->is_reverse_data) val |= QMC_SPE_CHAMR_TRANSP_RD; qmc_write16(chan->s_param + QMC_SPE_CHAMR, val); @@ -1382,10 +1471,10 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan) if (ret) return ret; } else { - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); + qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc); qmc_write16(chan->s_param + QMC_SPE_MFLR, 60); qmc_write16(chan->s_param + QMC_SPE_CHAMR, - QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM); + QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM); } /* Do not enable interrupts now. They will be enabled later */ @@ -1510,11 +1599,14 @@ static void qmc_irq_gint(struct qmc *qmc) /* Restart the receiver if needed */ spin_lock_irqsave(&chan->rx_lock, flags); if (chan->rx_pending && !chan->is_rx_stopped) { - if (chan->mode == QMC_TRANSPARENT) - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); - else - qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); - qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); + qmc_write32(chan->s_param + QMC_SPE_RPACK, + chan->qmc->data->rpack); + qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, + chan->mode == QMC_TRANSPARENT ? + chan->qmc->data->zdstate_transp : + chan->qmc->data->zdstate_hdlc); + qmc_write32(chan->s_param + QMC_SPE_RSTATE, + chan->qmc->data->rstate); chan->is_rx_halted = false; } else { chan->is_rx_halted = true; @@ -1558,27 +1650,74 @@ static irqreturn_t qmc_irq_handler(int irq, void *priv) return IRQ_HANDLED; } -static int qmc_probe(struct platform_device *pdev) +static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np) { - struct device_node *np = pdev->dev.of_node; - unsigned int nb_chans; - struct resource *res; - struct qmc *qmc; - int irq; + struct qe_firmware_info *qe_fw_info; + const struct qe_firmware *qe_fw; + const struct firmware *fw; + const char *filename; int ret; - qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL); - if (!qmc) - return -ENOMEM; + ret = of_property_read_string(np, "fsl,soft-qmc", &filename); + switch (ret) { + case 0: + break; + case -EINVAL: + /* fsl,soft-qmc property not set -> Simply do nothing */ + return 0; + default: + dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n", + np); + return ret; + } - qmc->dev = &pdev->dev; - INIT_LIST_HEAD(&qmc->chan_head); + qe_fw_info = qe_get_firmware_info(); + if (qe_fw_info) { + if (!strstr(qe_fw_info->id, "Soft-QMC")) { + dev_err(qmc->dev, "Another Firmware is already loaded\n"); + return -EALREADY; + } + dev_info(qmc->dev, "Firmware already loaded\n"); + return 0; + } + + dev_info(qmc->dev, "Using firmware %s\n", filename); + + ret = request_firmware(&fw, filename, qmc->dev); + if (ret) { + dev_err(qmc->dev, "Failed to request firmware %s\n", filename); + return ret; + } + + qe_fw = (const struct qe_firmware *)fw->data; + + if (fw->size < sizeof(qe_fw->header) || + be32_to_cpu(qe_fw->header.length) != fw->size) { + dev_err(qmc->dev, "Invalid firmware %s\n", filename); + ret = -EINVAL; + goto end; + } + + ret = qe_upload_firmware(qe_fw); + if (ret) { + dev_err(qmc->dev, "Failed to load firmware %s\n", filename); + goto end; + } + + ret = 0; +end: + release_firmware(fw); + return ret; +} + +static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev) +{ + struct resource *res; qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs"); if (IS_ERR(qmc->scc_regs)) return PTR_ERR(qmc->scc_regs); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram"); if (!res) return -EINVAL; @@ -1591,44 +1730,215 @@ static int qmc_probe(struct platform_device *pdev) if (IS_ERR(qmc->dpram)) return PTR_ERR(qmc->dpram); + return 0; +} + +static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev) +{ + struct resource *res; + int ucc_num; + s32 info; + + qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs"); + if (IS_ERR(qmc->scc_regs)) + return PTR_ERR(qmc->scc_regs); + + ucc_num = tsa_serial_get_num(qmc->tsa_serial); + if (ucc_num < 0) + return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n"); + + qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num); + if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) { + dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num); + return -EINVAL; + } + /* Allocate the 'Global Multichannel Parameters' and the + * 'Framer parameters' areas. The 'Framer parameters' area + * is located right after the 'Global Multichannel Parameters'. + * The 'Framer parameters' need 1 byte per receive and transmit + * channel. The maximum number of receive or transmit channel + * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'. + */ + info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64, + ALIGNMENT_OF_UCC_SLOW_PRAM); + if (IS_ERR_VALUE(info)) { + dev_err(qmc->dev, "cannot allocate MURAM for PRAM"); + return -ENOMEM; + } + if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, info)) { + dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed"); + return -EIO; + } + qmc->scc_pram = qe_muram_addr(info); + qmc->scc_pram_offset = info; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram"); + if (!res) + return -EINVAL; + qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0)); + qmc->dpram = devm_ioremap_resource(qmc->dev, res); + if (IS_ERR(qmc->scc_pram)) + return PTR_ERR(qmc->scc_pram); + + return 0; +} + +static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev) +{ + return qmc_is_qe(qmc) ? + qmc_qe_init_resources(qmc, pdev) : + qmc_cpm1_init_resources(qmc, pdev); +} + +static int qmc_cpm1_init_scc(struct qmc *qmc) +{ + u32 val; + int ret; + + /* Connect the serial (SCC) to TSA */ + ret = tsa_serial_connect(qmc->tsa_serial); + if (ret) + return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n"); + + /* Init GMSR_H and GMSR_L registers */ + val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP; + qmc_write32(qmc->scc_regs + SCC_GSMRH, val); + + /* enable QMC mode */ + qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC); + + /* Disable and clear interrupts */ + qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000); + qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F); + + return 0; +} + +static int qmc_qe_init_ucc(struct qmc *qmc) +{ + u32 val; + int ret; + + /* Set the UCC in slow mode */ + qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR, + UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX); + + /* Connect the serial (UCC) to TSA */ + ret = tsa_serial_connect(qmc->tsa_serial); + if (ret) + return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n"); + + /* Initialize the QMC tx startup addresses */ + if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) { + dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed"); + ret = -EIO; + goto err_tsa_serial_disconnect; + } + + /* Initialize the QMC rx startup addresses */ + if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000, + QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) { + dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed"); + ret = -EIO; + goto err_tsa_serial_disconnect; + } + + /* Re-init RXPTR and TXPTR with the content of RX_S_PTR and + * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during + * qmc_setup_tsa() call + */ + val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR); + qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val); + val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR); + qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val); + + /* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */ + val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP | + SCC_GSMRH_TRX | SCC_GSMRH_TTX; + qmc_write32(qmc->scc_regs + SCC_GSMRH, val); + + /* enable QMC mode */ + qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC); + + /* Disable and clear interrupts */ + qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000); + qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F); + + return 0; + +err_tsa_serial_disconnect: + tsa_serial_disconnect(qmc->tsa_serial); + return ret; +} + +static int qmc_init_xcc(struct qmc *qmc) +{ + return qmc_is_qe(qmc) ? + qmc_qe_init_ucc(qmc) : + qmc_cpm1_init_scc(qmc); +} + +static void qmc_exit_xcc(struct qmc *qmc) +{ + /* Disconnect the serial from TSA */ + tsa_serial_disconnect(qmc->tsa_serial); +} + +static int qmc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + unsigned int nb_chans; + struct qmc *qmc; + int irq; + int ret; + + qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL); + if (!qmc) + return -ENOMEM; + + qmc->dev = &pdev->dev; + qmc->data = of_device_get_match_data(&pdev->dev); + if (!qmc->data) { + dev_err(qmc->dev, "Missing match data\n"); + return -EINVAL; + } + INIT_LIST_HEAD(&qmc->chan_head); + qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial"); if (IS_ERR(qmc->tsa_serial)) { return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial), "Failed to get TSA serial\n"); } - /* Connect the serial (SCC) to TSA */ - ret = tsa_serial_connect(qmc->tsa_serial); - if (ret) { - dev_err(qmc->dev, "Failed to connect TSA serial\n"); + ret = qmc_init_resources(qmc, pdev); + if (ret) return ret; + + if (qmc_is_qe(qmc)) { + ret = qmc_qe_soft_qmc_init(qmc, np); + if (ret) + return ret; } /* Parse channels informationss */ ret = qmc_of_parse_chans(qmc, np); if (ret) - goto err_tsa_serial_disconnect; + return ret; nb_chans = qmc_nb_chans(qmc); - /* Init GMSR_H and GMSR_L registers */ - qmc_write32(qmc->scc_regs + SCC_GSMRH, - SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP); - - /* enable QMC mode */ - qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC); - /* * Allocate the buffer descriptor table * 8 rx and 8 tx descriptors per channel */ qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t); qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size, - &qmc->bd_dma_addr, GFP_KERNEL); + &qmc->bd_dma_addr, GFP_KERNEL); if (!qmc->bd_table) { dev_err(qmc->dev, "Failed to allocate bd table\n"); - ret = -ENOMEM; - goto err_tsa_serial_disconnect; + return -ENOMEM; } memset(qmc->bd_table, 0, qmc->bd_size); @@ -1637,11 +1947,10 @@ static int qmc_probe(struct platform_device *pdev) /* Allocate the interrupt table */ qmc->int_size = QMC_NB_INTS * sizeof(u16); qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size, - &qmc->int_dma_addr, GFP_KERNEL); + &qmc->int_dma_addr, GFP_KERNEL); if (!qmc->int_table) { dev_err(qmc->dev, "Failed to allocate interrupt table\n"); - ret = -ENOMEM; - goto err_tsa_serial_disconnect; + return -ENOMEM; } memset(qmc->int_table, 0, qmc->int_size); @@ -1658,40 +1967,59 @@ static int qmc_probe(struct platform_device *pdev) qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3); qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8); + if (qmc_is_qe(qmc)) { + /* Zeroed the reserved area */ + memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0, + QMC_QE_GBL_RSV_B0_SIZE); + + qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset); + + /* Init 'framer parameters' area and set the base addresses */ + memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64); + memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64); + qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE, + qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE); + qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE, + qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64); + } + ret = qmc_init_tsa(qmc); if (ret) - goto err_tsa_serial_disconnect; + return ret; qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000); ret = qmc_setup_chans(qmc); if (ret) - goto err_tsa_serial_disconnect; + return ret; /* Init interrupts table */ ret = qmc_setup_ints(qmc); if (ret) - goto err_tsa_serial_disconnect; + return ret; - /* Disable and clear interrupts, set the irq handler */ - qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000); - qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F); + /* Init SCC (CPM1) or UCC (QE) */ + ret = qmc_init_xcc(qmc); + if (ret) + return ret; + + /* Set the irq handler */ irq = platform_get_irq(pdev, 0); if (irq < 0) - goto err_tsa_serial_disconnect; + goto err_exit_xcc; ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc); if (ret < 0) - goto err_tsa_serial_disconnect; + goto err_exit_xcc; /* Enable interrupts */ qmc_write16(qmc->scc_regs + SCC_SCCM, - SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV); + SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV); ret = qmc_finalize_chans(qmc); if (ret < 0) goto err_disable_intr; - /* Enable transmiter and receiver */ + /* Enable transmitter and receiver */ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT); platform_set_drvdata(pdev, qmc); @@ -1709,8 +2037,8 @@ static int qmc_probe(struct platform_device *pdev) err_disable_intr: qmc_write16(qmc->scc_regs + SCC_SCCM, 0); -err_tsa_serial_disconnect: - tsa_serial_disconnect(qmc->tsa_serial); +err_exit_xcc: + qmc_exit_xcc(qmc); return ret; } @@ -1718,18 +2046,43 @@ static void qmc_remove(struct platform_device *pdev) { struct qmc *qmc = platform_get_drvdata(pdev); - /* Disable transmiter and receiver */ + /* Disable transmitter and receiver */ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0); /* Disable interrupts */ qmc_write16(qmc->scc_regs + SCC_SCCM, 0); - /* Disconnect the serial from TSA */ - tsa_serial_disconnect(qmc->tsa_serial); + /* Exit SCC (CPM1) or UCC (QE) */ + qmc_exit_xcc(qmc); } +static const struct qmc_data qmc_data_cpm1 = { + .version = QMC_CPM1, + .tstate = 0x30000000, + .rstate = 0x31000000, + .zistate = 0x00000100, + .zdstate_hdlc = 0x00000080, + .zdstate_transp = 0x18000080, + .rpack = 0x00000000, +}; + +static const struct qmc_data qmc_data_qe = { + .version = QMC_QE, + .tstate = 0x30000000, + .rstate = 0x30000000, + .zistate = 0x00000200, + .zdstate_hdlc = 0x80FFFFE0, + .zdstate_transp = 0x003FFFE2, + .rpack = 0x80000000, +}; + static const struct of_device_id qmc_id_table[] = { - { .compatible = "fsl,cpm1-scc-qmc" }, +#if IS_ENABLED(CONFIG_CPM1) + { .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 }, +#endif +#if IS_ENABLED(CONFIG_QUICC_ENGINE) + { .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe }, +#endif {} /* sentinel */ }; MODULE_DEVICE_TABLE(of, qmc_id_table); @@ -1889,5 +2242,5 @@ struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, EXPORT_SYMBOL(devm_qmc_chan_get_bychild); MODULE_AUTHOR("Herve Codina "); -MODULE_DESCRIPTION("CPM QMC driver"); +MODULE_DESCRIPTION("CPM/QE QMC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c index 6c5741cf5e9d2a..f0889b3fcaf2c2 100644 --- a/drivers/soc/fsl/qe/tsa.c +++ b/drivers/soc/fsl/qe/tsa.c @@ -9,6 +9,8 @@ #include "tsa.h" #include +#include +#include #include #include #include @@ -16,86 +18,116 @@ #include #include #include +#include + +/* TSA SI RAM routing tables entry (CPM1) */ +#define TSA_CPM1_SIRAM_ENTRY_LAST BIT(16) +#define TSA_CPM1_SIRAM_ENTRY_BYTE BIT(17) +#define TSA_CPM1_SIRAM_ENTRY_CNT_MASK GENMASK(21, 18) +#define TSA_CPM1_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK GENMASK(24, 22) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5) +#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6) + +/* TSA SI RAM routing tables entry (QE) */ +#define TSA_QE_SIRAM_ENTRY_LAST BIT(0) +#define TSA_QE_SIRAM_ENTRY_BYTE BIT(1) +#define TSA_QE_SIRAM_ENTRY_CNT_MASK GENMASK(4, 2) +#define TSA_QE_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x) +#define TSA_QE_SIRAM_ENTRY_CSEL_MASK GENMASK(8, 5) +#define TSA_QE_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0) +#define TSA_QE_SIRAM_ENTRY_CSEL_UCC5 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1) +#define TSA_QE_SIRAM_ENTRY_CSEL_UCC1 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9) +#define TSA_QE_SIRAM_ENTRY_CSEL_UCC2 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa) +#define TSA_QE_SIRAM_ENTRY_CSEL_UCC3 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb) +#define TSA_QE_SIRAM_ENTRY_CSEL_UCC4 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc) - -/* TSA SI RAM routing tables entry */ -#define TSA_SIRAM_ENTRY_LAST (1 << 16) -#define TSA_SIRAM_ENTRY_BYTE (1 << 17) -#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18) -#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22) -#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22) -#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22) -#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22) -#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22) -#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22) -#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22) - -/* SI mode register (32 bits) */ -#define TSA_SIMODE 0x00 -#define TSA_SIMODE_SMC2 0x80000000 -#define TSA_SIMODE_SMC1 0x00008000 -#define TSA_SIMODE_TDMA(x) ((x) << 0) -#define TSA_SIMODE_TDMB(x) ((x) << 16) -#define TSA_SIMODE_TDM_MASK 0x0fff -#define TSA_SIMODE_TDM_SDM_MASK 0x0c00 -#define TSA_SIMODE_TDM_SDM_NORM 0x0000 -#define TSA_SIMODE_TDM_SDM_ECHO 0x0400 -#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800 -#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00 -#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8) -#define TSA_SIMODE_TDM_DSC 0x0080 -#define TSA_SIMODE_TDM_CRT 0x0040 -#define TSA_SIMODE_TDM_STZ 0x0020 -#define TSA_SIMODE_TDM_CE 0x0010 -#define TSA_SIMODE_TDM_FE 0x0008 -#define TSA_SIMODE_TDM_GM 0x0004 -#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0) - -/* SI global mode register (8 bits) */ -#define TSA_SIGMR 0x04 -#define TSA_SIGMR_ENB (1<<3) -#define TSA_SIGMR_ENA (1<<2) -#define TSA_SIGMR_RDM_MASK 0x03 -#define TSA_SIGMR_RDM_STATIC_TDMA 0x00 -#define TSA_SIGMR_RDM_DYN_TDMA 0x01 -#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02 -#define TSA_SIGMR_RDM_DYN_TDMAB 0x03 - -/* SI status register (8 bits) */ -#define TSA_SISTR 0x06 - -/* SI command register (8 bits) */ -#define TSA_SICMR 0x07 +/* + * SI mode register : + * - CPM1: 32bit register split in 2*16bit (16bit TDM) + * - QE: 4x16bit registers, one per TDM + */ +#define TSA_CPM1_SIMODE 0x00 +#define TSA_QE_SIAMR 0x00 +#define TSA_QE_SIBMR 0x02 +#define TSA_QE_SICMR 0x04 +#define TSA_QE_SIDMR 0x06 +#define TSA_CPM1_SIMODE_SMC2 BIT(31) +#define TSA_CPM1_SIMODE_SMC1 BIT(15) +#define TSA_CPM1_SIMODE_TDMA_MASK GENMASK(11, 0) +#define TSA_CPM1_SIMODE_TDMA(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x) +#define TSA_CPM1_SIMODE_TDMB_MASK GENMASK(27, 16) +#define TSA_CPM1_SIMODE_TDMB(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x) +#define TSA_QE_SIMODE_TDM_SAD_MASK GENMASK(15, 12) +#define TSA_QE_SIMODE_TDM_SAD(x) FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x) +#define TSA_CPM1_SIMODE_TDM_MASK GENMASK(11, 0) +#define TSA_SIMODE_TDM_SDM_MASK GENMASK(11, 10) +#define TSA_SIMODE_TDM_SDM_NORM FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0) +#define TSA_SIMODE_TDM_SDM_ECHO FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1) +#define TSA_SIMODE_TDM_SDM_INTL_LOOP FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2) +#define TSA_SIMODE_TDM_SDM_LOOP_CTRL FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3) +#define TSA_SIMODE_TDM_RFSD_MASK GENMASK(9, 8) +#define TSA_SIMODE_TDM_RFSD(x) FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x) +#define TSA_SIMODE_TDM_DSC BIT(7) +#define TSA_SIMODE_TDM_CRT BIT(6) +#define TSA_CPM1_SIMODE_TDM_STZ BIT(5) /* bit 5: STZ in CPM1 */ +#define TSA_QE_SIMODE_TDM_SL BIT(5) /* bit 5: SL in QE */ +#define TSA_SIMODE_TDM_CE BIT(4) +#define TSA_SIMODE_TDM_FE BIT(3) +#define TSA_SIMODE_TDM_GM BIT(2) +#define TSA_SIMODE_TDM_TFSD_MASK GENMASK(1, 0) +#define TSA_SIMODE_TDM_TFSD(x) FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x) + +/* CPM SI global mode register (8 bits) */ +#define TSA_CPM1_SIGMR 0x04 +#define TSA_CPM1_SIGMR_ENB BIT(3) +#define TSA_CPM1_SIGMR_ENA BIT(2) +#define TSA_CPM1_SIGMR_RDM_MASK GENMASK(1, 0) +#define TSA_CPM1_SIGMR_RDM_STATIC_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0) +#define TSA_CPM1_SIGMR_RDM_DYN_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1) +#define TSA_CPM1_SIGMR_RDM_STATIC_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2) +#define TSA_CPM1_SIGMR_RDM_DYN_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3) + +/* QE SI global mode register high (8 bits) */ +#define TSA_QE_SIGLMRH 0x08 +#define TSA_QE_SIGLMRH_END BIT(3) +#define TSA_QE_SIGLMRH_ENC BIT(2) +#define TSA_QE_SIGLMRH_ENB BIT(1) +#define TSA_QE_SIGLMRH_ENA BIT(0) /* SI clock route register (32 bits) */ -#define TSA_SICR 0x0C -#define TSA_SICR_SCC2(x) ((x) << 8) -#define TSA_SICR_SCC3(x) ((x) << 16) -#define TSA_SICR_SCC4(x) ((x) << 24) -#define TSA_SICR_SCC_MASK 0x0ff -#define TSA_SICR_SCC_GRX (1 << 7) -#define TSA_SICR_SCC_SCX_TSA (1 << 6) -#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3) -#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3) -#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3) -#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3) -#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3) -#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3) -#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3) -#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3) -#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3) -#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0) -#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0) -#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0) -#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0) -#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0) -#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0) -#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0) -#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0) -#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0) - -/* Serial interface RAM pointer register (32 bits) */ -#define TSA_SIRP 0x10 +#define TSA_CPM1_SICR 0x0C +#define TSA_CPM1_SICR_SCC2_MASK GENMASK(15, 8) +#define TSA_CPM1_SICR_SCC2(x) FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x) +#define TSA_CPM1_SICR_SCC3_MASK GENMASK(23, 16) +#define TSA_CPM1_SICR_SCC3(x) FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x) +#define TSA_CPM1_SICR_SCC4_MASK GENMASK(31, 24) +#define TSA_CPM1_SICR_SCC4(x) FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x) +#define TSA_CPM1_SICR_SCC_MASK GENMASK(7, 0) +#define TSA_CPM1_SICR_SCC_GRX BIT(7) +#define TSA_CPM1_SICR_SCC_SCX_TSA BIT(6) +#define TSA_CPM1_SICR_SCC_RXCS_MASK GENMASK(5, 3) +#define TSA_CPM1_SICR_SCC_RXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0) +#define TSA_CPM1_SICR_SCC_RXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1) +#define TSA_CPM1_SICR_SCC_RXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2) +#define TSA_CPM1_SICR_SCC_RXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3) +#define TSA_CPM1_SICR_SCC_RXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4) +#define TSA_CPM1_SICR_SCC_RXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5) +#define TSA_CPM1_SICR_SCC_RXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6) +#define TSA_CPM1_SICR_SCC_RXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7) +#define TSA_CPM1_SICR_SCC_TXCS_MASK GENMASK(2, 0) +#define TSA_CPM1_SICR_SCC_TXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0) +#define TSA_CPM1_SICR_SCC_TXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1) +#define TSA_CPM1_SICR_SCC_TXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2) +#define TSA_CPM1_SICR_SCC_TXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3) +#define TSA_CPM1_SICR_SCC_TXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4) +#define TSA_CPM1_SICR_SCC_TXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5) +#define TSA_CPM1_SICR_SCC_TXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6) +#define TSA_CPM1_SICR_SCC_TXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7) struct tsa_entries_area { void __iomem *entries_start; @@ -114,15 +146,31 @@ struct tsa_tdm { #define TSA_TDMA 0 #define TSA_TDMB 1 +#define TSA_TDMC 2 /* QE implementation only */ +#define TSA_TDMD 3 /* QE implementation only */ + +enum tsa_version { + TSA_CPM1 = 1, /* Avoid 0 value */ + TSA_QE, +}; struct tsa { struct device *dev; void __iomem *si_regs; void __iomem *si_ram; resource_size_t si_ram_sz; - spinlock_t lock; + spinlock_t lock; /* Lock for read/modify/write sequence */ + enum tsa_version version; int tdms; /* TSA_TDMx ORed */ +#if IS_ENABLED(CONFIG_QUICC_ENGINE) + struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */ +#else struct tsa_tdm tdm[2]; /* TDMa and TDMb */ +#endif + /* Same number of serials for CPM1 and QE: + * CPM1: NU, 3 SCCs and 2 SMCs + * QE: NU and 5 UCCs + */ struct tsa_serial { unsigned int id; struct tsa_serial_info info; @@ -140,7 +188,12 @@ static inline void tsa_write32(void __iomem *addr, u32 val) iowrite32be(val, addr); } -static inline void tsa_write8(void __iomem *addr, u32 val) +static inline void tsa_write16(void __iomem *addr, u16 val) +{ + iowrite16be(val, addr); +} + +static inline void tsa_write8(void __iomem *addr, u8 val) { iowrite8(val, addr); } @@ -150,17 +203,68 @@ static inline u32 tsa_read32(void __iomem *addr) return ioread32be(addr); } +static inline u16 tsa_read16(void __iomem *addr) +{ + return ioread16be(addr); +} + static inline void tsa_clrbits32(void __iomem *addr, u32 clr) { tsa_write32(addr, tsa_read32(addr) & ~clr); } +static inline void tsa_clrbits16(void __iomem *addr, u16 clr) +{ + tsa_write16(addr, tsa_read16(addr) & ~clr); +} + static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set) { tsa_write32(addr, (tsa_read32(addr) & ~clr) | set); } -int tsa_serial_connect(struct tsa_serial *tsa_serial) +static bool tsa_is_qe(const struct tsa *tsa) +{ + if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM)) + return tsa->version == TSA_QE; + + return IS_ENABLED(CONFIG_QUICC_ENGINE); +} + +static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial) +{ + struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); + + switch (tsa_serial->id) { + case FSL_QE_TSA_UCC1: return 0; + case FSL_QE_TSA_UCC2: return 1; + case FSL_QE_TSA_UCC3: return 2; + case FSL_QE_TSA_UCC4: return 3; + case FSL_QE_TSA_UCC5: return 4; + default: + break; + } + + dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id); + return -EINVAL; +} + +int tsa_serial_get_num(struct tsa_serial *tsa_serial) +{ + struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); + + /* + * There is no need to get the serial num out of the TSA driver in the + * CPM case. + * Further more, in CPM, we can have 2 types of serial SCCs and FCCs. + * What kind of numbering to use that can be global to both SCCs and + * FCCs ? + */ + return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP; +} +EXPORT_SYMBOL(tsa_serial_get_num); + +static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect) { struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); unsigned long flags; @@ -169,16 +273,16 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial) switch (tsa_serial->id) { case FSL_CPM_TSA_SCC2: - clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK); - set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA); + clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK); + set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA); break; case FSL_CPM_TSA_SCC3: - clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK); - set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA); + clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK); + set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA); break; case FSL_CPM_TSA_SCC4: - clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK); - set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA); + clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK); + set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA); break; default: dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id); @@ -186,40 +290,53 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial) } spin_lock_irqsave(&tsa->lock, flags); - tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set); + tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear, + connect ? set : 0); spin_unlock_irqrestore(&tsa->lock, flags); return 0; } -EXPORT_SYMBOL(tsa_serial_connect); -int tsa_serial_disconnect(struct tsa_serial *tsa_serial) +static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect) { struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); unsigned long flags; - u32 clear; + int ucc_num; + int ret; - switch (tsa_serial->id) { - case FSL_CPM_TSA_SCC2: - clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK); - break; - case FSL_CPM_TSA_SCC3: - clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK); - break; - case FSL_CPM_TSA_SCC4: - clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK); - break; - default: - dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id); - return -EINVAL; - } + ucc_num = tsa_qe_serial_get_num(tsa_serial); + if (ucc_num < 0) + return ucc_num; spin_lock_irqsave(&tsa->lock, flags); - tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0); + ret = ucc_set_qe_mux_tsa(ucc_num, connect); spin_unlock_irqrestore(&tsa->lock, flags); - + if (ret) { + dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n", + tsa_serial->id, ret); + return ret; + } return 0; } + +int tsa_serial_connect(struct tsa_serial *tsa_serial) +{ + struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); + + return tsa_is_qe(tsa) ? + tsa_qe_serial_connect(tsa_serial, true) : + tsa_cpm1_serial_connect(tsa_serial, true); +} +EXPORT_SYMBOL(tsa_serial_connect); + +int tsa_serial_disconnect(struct tsa_serial *tsa_serial) +{ + struct tsa *tsa = tsa_serial_get_tsa(tsa_serial); + + return tsa_is_qe(tsa) ? + tsa_qe_serial_connect(tsa_serial, false) : + tsa_cpm1_serial_connect(tsa_serial, false); +} EXPORT_SYMBOL(tsa_serial_disconnect); int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info) @@ -229,14 +346,14 @@ int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *i } EXPORT_SYMBOL(tsa_serial_get_info); -static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area, - u32 tdms, u32 tdm_id, bool is_rx) +static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area, + u32 tdms, u32 tdm_id, bool is_rx) { resource_size_t quarter; resource_size_t half; - quarter = tsa->si_ram_sz/4; - half = tsa->si_ram_sz/2; + quarter = tsa->si_ram_sz / 4; + half = tsa->si_ram_sz / 2; if (tdms == BIT(TSA_TDMA)) { /* Only TDMA */ @@ -281,7 +398,42 @@ static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area } } -static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id) +static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area, + u32 tdms, u32 tdm_id, bool is_rx) +{ + resource_size_t eighth; + resource_size_t half; + + eighth = tsa->si_ram_sz / 8; + half = tsa->si_ram_sz / 2; + + /* + * One half of the SI RAM used for Tx, the other one for Rx. + * In each half, 1/4 of the area is assigned to each TDM. + */ + if (is_rx) { + /* Rx: Second half of si_ram */ + area->entries_start = tsa->si_ram + half + (eighth * tdm_id); + area->entries_next = area->entries_start + eighth; + area->last_entry = NULL; + } else { + /* Tx: First half of si_ram */ + area->entries_start = tsa->si_ram + (eighth * tdm_id); + area->entries_next = area->entries_start + eighth; + area->last_entry = NULL; + } +} + +static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area, + u32 tdms, u32 tdm_id, bool is_rx) +{ + if (tsa_is_qe(tsa)) + tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx); + else + tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx); +} + +static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id) { switch (serial_id) { case FSL_CPM_TSA_NU: return "Not used"; @@ -296,22 +448,44 @@ static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id) return NULL; } -static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id) +static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id) { switch (serial_id) { - case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2; - case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3; - case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4; - case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1; - case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2; + case FSL_QE_TSA_NU: return "Not used"; + case FSL_QE_TSA_UCC1: return "UCC1"; + case FSL_QE_TSA_UCC2: return "UCC2"; + case FSL_QE_TSA_UCC3: return "UCC3"; + case FSL_QE_TSA_UCC4: return "UCC4"; + case FSL_QE_TSA_UCC5: return "UCC5"; default: break; } - return TSA_SIRAM_ENTRY_CSEL_NU; + return NULL; } -static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area, - u32 count, u32 serial_id) +static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id) +{ + return tsa_is_qe(tsa) ? + tsa_qe_serial_id2name(tsa, serial_id) : + tsa_cpm1_serial_id2name(tsa, serial_id); +} + +static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id) +{ + switch (serial_id) { + case FSL_CPM_TSA_SCC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2; + case FSL_CPM_TSA_SCC3: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3; + case FSL_CPM_TSA_SCC4: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4; + case FSL_CPM_TSA_SMC1: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1; + case FSL_CPM_TSA_SMC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2; + default: + break; + } + return TSA_CPM1_SIRAM_ENTRY_CSEL_NU; +} + +static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area, + u32 count, u32 serial_id) { void __iomem *addr; u32 left; @@ -329,21 +503,21 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area, if (area->last_entry) { /* Clear last flag */ - tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST); + tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST); } left = count; while (left) { - val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id); + val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id); if (left > 16) { cnt = 16; } else { cnt = left; - val |= TSA_SIRAM_ENTRY_LAST; + val |= TSA_CPM1_SIRAM_ENTRY_LAST; area->last_entry = addr; } - val |= TSA_SIRAM_ENTRY_CNT(cnt - 1); + val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1); tsa_write32(addr, val); addr += 4; @@ -353,6 +527,71 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area, return 0; } +static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id) +{ + switch (serial_id) { + case FSL_QE_TSA_UCC1: return TSA_QE_SIRAM_ENTRY_CSEL_UCC1; + case FSL_QE_TSA_UCC2: return TSA_QE_SIRAM_ENTRY_CSEL_UCC2; + case FSL_QE_TSA_UCC3: return TSA_QE_SIRAM_ENTRY_CSEL_UCC3; + case FSL_QE_TSA_UCC4: return TSA_QE_SIRAM_ENTRY_CSEL_UCC4; + case FSL_QE_TSA_UCC5: return TSA_QE_SIRAM_ENTRY_CSEL_UCC5; + default: + break; + } + return TSA_QE_SIRAM_ENTRY_CSEL_NU; +} + +static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area, + u32 count, u32 serial_id) +{ + void __iomem *addr; + u32 left; + u32 val; + u32 cnt; + u32 nb; + + addr = area->last_entry ? area->last_entry + 2 : area->entries_start; + + nb = DIV_ROUND_UP(count, 8); + if ((addr + (nb * 2)) > area->entries_next) { + dev_err(tsa->dev, "si ram area full\n"); + return -ENOSPC; + } + + if (area->last_entry) { + /* Clear last flag */ + tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST); + } + + left = count; + while (left) { + val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id); + + if (left > 8) { + cnt = 8; + } else { + cnt = left; + val |= TSA_QE_SIRAM_ENTRY_LAST; + area->last_entry = addr; + } + val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1); + + tsa_write16(addr, val); + addr += 2; + left -= cnt; + } + + return 0; +} + +static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area, + u32 count, u32 serial_id) +{ + return tsa_is_qe(tsa) ? + tsa_qe_add_entry(tsa, area, count, serial_id) : + tsa_cpm1_add_entry(tsa, area, count, serial_id); +} + static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np, u32 tdms, u32 tdm_id, bool is_rx) { @@ -399,7 +638,7 @@ static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np, } dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n", - tdm_id, route_name, ts, ts+count-1, serial_name); + tdm_id, route_name, ts, ts + count - 1, serial_name); ts += count; ret = tsa_add_entry(tsa, &area, count, serial_id); @@ -449,8 +688,8 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) int i; tsa->tdms = 0; - tsa->tdm[0].is_enable = false; - tsa->tdm[1].is_enable = false; + for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) + tsa->tdm[i].is_enable = false; for_each_available_child_of_node(np, tdm_np) { ret = of_property_read_u32(tdm_np, "reg", &tdm_id); @@ -466,7 +705,18 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) case 1: tsa->tdms |= BIT(TSA_TDMB); break; + case 2: + if (!tsa_is_qe(tsa)) + goto invalid_tdm; /* Not available on CPM1 */ + tsa->tdms |= BIT(TSA_TDMC); + break; + case 3: + if (!tsa_is_qe(tsa)) + goto invalid_tdm; /* Not available on CPM1 */ + tsa->tdms |= BIT(TSA_TDMD); + break; default: +invalid_tdm: dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np, tdm_id); of_node_put(tdm_np); @@ -532,10 +782,14 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge")) tdm->simode_tdm |= TSA_SIMODE_TDM_FE; + if (tsa_is_qe(tsa) && + of_property_read_bool(tdm_np, "fsl,fsync-active-low")) + tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL; + if (of_property_read_bool(tdm_np, "fsl,double-speed-clock")) tdm->simode_tdm |= TSA_SIMODE_TDM_DSC; - clk = of_clk_get_by_name(tdm_np, "l1rsync"); + clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); of_node_put(tdm_np); @@ -549,7 +803,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) } tdm->l1rsync_clk = clk; - clk = of_clk_get_by_name(tdm_np, "l1rclk"); + clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); of_node_put(tdm_np); @@ -564,7 +818,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) tdm->l1rclk_clk = clk; if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) { - clk = of_clk_get_by_name(tdm_np, "l1tsync"); + clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); of_node_put(tdm_np); @@ -578,7 +832,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) } tdm->l1tsync_clk = clk; - clk = of_clk_get_by_name(tdm_np, "l1tclk"); + clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); of_node_put(tdm_np); @@ -593,6 +847,17 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) tdm->l1tclk_clk = clk; } + if (tsa_is_qe(tsa)) { + /* + * The starting address for TSA table must be set. + * 512 entries for Tx and 512 entries for Rx are + * available for 4 TDMs. + * We assign entries equally -> 128 Rx/Tx entries per + * TDM. In other words, 4 blocks of 32 entries per TDM. + */ + tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id); + } + ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id); if (ret) { of_node_put(tdm_np); @@ -610,7 +875,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np) return 0; err: - for (i = 0; i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) { if (tsa->tdm[i].l1rsync_clk) { clk_disable_unprepare(tsa->tdm[i].l1rsync_clk); clk_put(tsa->tdm[i].l1rsync_clk); @@ -636,8 +901,87 @@ static void tsa_init_si_ram(struct tsa *tsa) resource_size_t i; /* Fill all entries as the last one */ - for (i = 0; i < tsa->si_ram_sz; i += 4) - tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST); + if (tsa_is_qe(tsa)) { + for (i = 0; i < tsa->si_ram_sz; i += 2) + tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST); + } else { + for (i = 0; i < tsa->si_ram_sz; i += 4) + tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST); + } +} + +static int tsa_cpm1_setup(struct tsa *tsa) +{ + u32 val; + + /* Set SIMODE */ + val = 0; + if (tsa->tdm[0].is_enable) + val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm); + if (tsa->tdm[1].is_enable) + val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm); + + tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE, + TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) | + TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK), + val); + + /* Set SIGMR */ + val = (tsa->tdms == BIT(TSA_TDMA)) ? + TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB; + if (tsa->tdms & BIT(TSA_TDMA)) + val |= TSA_CPM1_SIGMR_ENA; + if (tsa->tdms & BIT(TSA_TDMB)) + val |= TSA_CPM1_SIGMR_ENB; + tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val); + + return 0; +} + +static int tsa_qe_setup(struct tsa *tsa) +{ + unsigned int sixmr; + u8 siglmrh = 0; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) { + if (!tsa->tdm[i].is_enable) + continue; + + switch (i) { + case 0: + sixmr = TSA_QE_SIAMR; + siglmrh |= TSA_QE_SIGLMRH_ENA; + break; + case 1: + sixmr = TSA_QE_SIBMR; + siglmrh |= TSA_QE_SIGLMRH_ENB; + break; + case 2: + sixmr = TSA_QE_SICMR; + siglmrh |= TSA_QE_SIGLMRH_ENC; + break; + case 3: + sixmr = TSA_QE_SIDMR; + siglmrh |= TSA_QE_SIGLMRH_END; + break; + default: + return -EINVAL; + } + + /* Set SI mode register */ + tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm); + } + + /* Enable TDMs */ + tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh); + + return 0; +} + +static int tsa_setup(struct tsa *tsa) +{ + return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa); } static int tsa_probe(struct platform_device *pdev) @@ -646,7 +990,6 @@ static int tsa_probe(struct platform_device *pdev) struct resource *res; struct tsa *tsa; unsigned int i; - u32 val; int ret; tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL); @@ -654,6 +997,18 @@ static int tsa_probe(struct platform_device *pdev) return -ENOMEM; tsa->dev = &pdev->dev; + tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev); + switch (tsa->version) { + case TSA_CPM1: + dev_info(tsa->dev, "CPM1 version\n"); + break; + case TSA_QE: + dev_info(tsa->dev, "QE version\n"); + break; + default: + dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version); + return -EINVAL; + } for (i = 0; i < ARRAY_SIZE(tsa->serials); i++) tsa->serials[i].id = i; @@ -680,26 +1035,9 @@ static int tsa_probe(struct platform_device *pdev) if (ret) return ret; - /* Set SIMODE */ - val = 0; - if (tsa->tdm[0].is_enable) - val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm); - if (tsa->tdm[1].is_enable) - val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm); - - tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE, - TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) | - TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK), - val); - - /* Set SIGMR */ - val = (tsa->tdms == BIT(TSA_TDMA)) ? - TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB; - if (tsa->tdms & BIT(TSA_TDMA)) - val |= TSA_SIGMR_ENA; - if (tsa->tdms & BIT(TSA_TDMB)) - val |= TSA_SIGMR_ENB; - tsa_write8(tsa->si_regs + TSA_SIGMR, val); + ret = tsa_setup(tsa); + if (ret) + return ret; platform_set_drvdata(pdev, tsa); @@ -711,7 +1049,7 @@ static void tsa_remove(struct platform_device *pdev) struct tsa *tsa = platform_get_drvdata(pdev); int i; - for (i = 0; i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) { if (tsa->tdm[i].l1rsync_clk) { clk_disable_unprepare(tsa->tdm[i].l1rsync_clk); clk_put(tsa->tdm[i].l1rsync_clk); @@ -732,7 +1070,12 @@ static void tsa_remove(struct platform_device *pdev) } static const struct of_device_id tsa_id_table[] = { - { .compatible = "fsl,cpm1-tsa" }, +#if IS_ENABLED(CONFIG_CPM1) + { .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 }, +#endif +#if IS_ENABLED(CONFIG_QUICC_ENGINE) + { .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE }, +#endif {} /* sentinel */ }; MODULE_DEVICE_TABLE(of, tsa_id_table); @@ -841,5 +1184,5 @@ struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev, EXPORT_SYMBOL(devm_tsa_serial_get_byphandle); MODULE_AUTHOR("Herve Codina "); -MODULE_DESCRIPTION("CPM TSA driver"); +MODULE_DESCRIPTION("CPM/QE TSA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/fsl/qe/tsa.h b/drivers/soc/fsl/qe/tsa.h index d9df89b6da3e1e..da137bc0f49bd9 100644 --- a/drivers/soc/fsl/qe/tsa.h +++ b/drivers/soc/fsl/qe/tsa.h @@ -39,4 +39,7 @@ struct tsa_serial_info { /* Get information */ int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info); +/* Get serial number */ +int tsa_serial_get_num(struct tsa_serial *tsa_serial); + #endif /* __SOC_FSL_TSA_H__ */ diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index 21dbcd787cd505..892aa5931d5bd6 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -114,6 +114,7 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) return 0; } +EXPORT_SYMBOL(ucc_mux_set_grant_tsa_bkpt); int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, enum comm_dir mode) diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 01b129caf1eb25..5250c1d702eb9b 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -327,11 +327,11 @@ enum mtk_mutex_sof_id { }; struct mtk_mutex_data { - const unsigned int *mutex_mod; - const unsigned int *mutex_sof; - const unsigned int mutex_mod_reg; - const unsigned int mutex_sof_reg; - const unsigned int *mutex_table_mod; + const u8 *mutex_mod; + const u8 *mutex_table_mod; + const u16 *mutex_sof; + const u16 mutex_mod_reg; + const u16 mutex_sof_reg; const bool no_clk; }; @@ -345,7 +345,7 @@ struct mtk_mutex_ctx { struct cmdq_client_reg cmdq_reg; }; -static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS, [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR, [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL, @@ -354,7 +354,7 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA, }; -static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1, [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0, @@ -374,7 +374,7 @@ static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1, }; -static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR, [DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR, @@ -389,7 +389,7 @@ static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0, }; -static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0, [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1, @@ -407,7 +407,7 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1, }; -static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0, [DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0, @@ -421,7 +421,7 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0, }; -static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { +static const u8 mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0, [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0, [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1, @@ -432,7 +432,7 @@ static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0, }; -static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0, [DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0, @@ -445,7 +445,7 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1, }; -static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { +static const u8 mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0, [MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0, [MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1, @@ -456,7 +456,7 @@ static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0, }; -static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0, [DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0, [DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0, @@ -496,7 +496,7 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4, }; -static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { +static const u8 mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0, [MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2, [MUTEX_MOD_IDX_MDP_RDMA3] = MT8195_MUTEX_MOD_MDP_RDMA3, @@ -530,7 +530,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3, }; -static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0, [DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0, @@ -544,7 +544,7 @@ static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4, }; -static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_OVL0] = MT8195_MUTEX_MOD_DISP_OVL0, [DDP_COMPONENT_WDMA0] = MT8195_MUTEX_MOD_DISP_WDMA0, [DDP_COMPONENT_RDMA0] = MT8195_MUTEX_MOD_DISP_RDMA0, @@ -575,7 +575,7 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0, }; -static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { +static const u8 mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0, [MUTEX_MOD_IDX_MDP_RDMA1] = MT8195_MUTEX_MOD_MDP_RDMA1, [MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2, @@ -621,7 +621,7 @@ static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3, }; -static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { +static const u8 mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR, [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0, @@ -637,7 +637,7 @@ static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0, }; -static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1, @@ -647,14 +647,14 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3, }; -static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1, [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0, }; -static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, [MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0, @@ -662,13 +662,13 @@ static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = { }; /* Add EOF setting so overlay hardware can receive frame done irq */ -static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0, [MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0, }; -static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = { +static const u16 mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0, [MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0, @@ -682,7 +682,7 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = { * but also detect the error at end of frame(EAEOF) when EOF signal * arrives. */ -static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0, @@ -692,7 +692,7 @@ static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { MT8188_MUTEX_SOF_DP_INTF1 | MT8188_MUTEX_EOF_DP_INTF1, }; -static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = { +static const u16 mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0, [MUTEX_SOF_DSI1] = MT8195_MUTEX_SOF_DSI1 | MT8195_MUTEX_EOF_DSI1, diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index efd9cae212dc64..9fdc0ef7920261 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -483,7 +483,7 @@ enum pwrap_regs { PWRAP_MSB_FIRST, }; -static int mt2701_regs[] = { +static const int mt2701_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -569,7 +569,7 @@ static int mt2701_regs[] = { [PWRAP_ADC_RDATA_ADDR2] = 0x154, }; -static int mt6765_regs[] = { +static const int mt6765_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -601,7 +601,7 @@ static int mt6765_regs[] = { [PWRAP_DCM_DBC_PRD] = 0x1E0, }; -static int mt6779_regs[] = { +static const int mt6779_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -640,7 +640,7 @@ static int mt6779_regs[] = { [PWRAP_WACS2_VLDCLR] = 0xC28, }; -static int mt6795_regs[] = { +static const int mt6795_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -725,7 +725,7 @@ static int mt6795_regs[] = { [PWRAP_EXT_CK] = 0x14c, }; -static int mt6797_regs[] = { +static const int mt6797_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -758,7 +758,7 @@ static int mt6797_regs[] = { [PWRAP_DCM_DBC_PRD] = 0x1D4, }; -static int mt6873_regs[] = { +static const int mt6873_regs[] = { [PWRAP_INIT_DONE2] = 0x0, [PWRAP_TIMER_EN] = 0x3E0, [PWRAP_INT_EN] = 0x448, @@ -769,7 +769,7 @@ static int mt6873_regs[] = { [PWRAP_WACS2_RDATA] = 0xCA8, }; -static int mt7622_regs[] = { +static const int mt7622_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -881,7 +881,7 @@ static int mt7622_regs[] = { [PWRAP_SPI2_CTRL] = 0x244, }; -static int mt8135_regs[] = { +static const int mt8135_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -954,7 +954,7 @@ static int mt8135_regs[] = { [PWRAP_DCM_DBC_PRD] = 0x160, }; -static int mt8173_regs[] = { +static const int mt8173_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -1036,7 +1036,7 @@ static int mt8173_regs[] = { [PWRAP_DCM_DBC_PRD] = 0x148, }; -static int mt8183_regs[] = { +static const int mt8183_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -1087,7 +1087,7 @@ static int mt8183_regs[] = { [PWRAP_WACS2_VLDCLR] = 0xC28, }; -static int mt8195_regs[] = { +static const int mt8195_regs[] = { [PWRAP_INIT_DONE2] = 0x0, [PWRAP_STAUPD_CTRL] = 0x4C, [PWRAP_TIMER_EN] = 0x3E4, @@ -1104,7 +1104,7 @@ static int mt8195_regs[] = { [PWRAP_WACS2_RDATA] = 0x8A8, }; -static int mt8365_regs[] = { +static const int mt8365_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -1166,7 +1166,7 @@ static int mt8365_regs[] = { [PWRAP_WDT_SRC_EN_1] = 0xf8, }; -static int mt8516_regs[] = { +static const int mt8516_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -1251,7 +1251,7 @@ static int mt8516_regs[] = { [PWRAP_MSB_FIRST] = 0x170, }; -static int mt8186_regs[] = { +static const int mt8186_regs[] = { [PWRAP_MUX_SEL] = 0x0, [PWRAP_WRAP_EN] = 0x4, [PWRAP_DIO_EN] = 0x8, @@ -1366,10 +1366,6 @@ struct pmic_wrapper { struct regmap *regmap; const struct pmic_wrapper_type *master; const struct pwrap_slv_type *slave; - struct clk *clk_spi; - struct clk *clk_wrap; - struct clk *clk_sys; - struct clk *clk_tmr; struct reset_control *rstc; struct reset_control *rstc_bridge; @@ -1377,7 +1373,7 @@ struct pmic_wrapper { }; struct pmic_wrapper_type { - int *regs; + const int *regs; enum pwrap_type type; u32 arb_en_all; u32 int_en_all; @@ -2397,7 +2393,7 @@ static const struct pmic_wrapper_type pwrap_mt8183 = { .init_soc_specific = pwrap_mt8183_init_soc_specific, }; -static struct pmic_wrapper_type pwrap_mt8195 = { +static const struct pmic_wrapper_type pwrap_mt8195 = { .regs = mt8195_regs, .type = PWRAP_MT8195, .arb_en_all = 0x777f, /* NEED CONFIRM */ @@ -2423,7 +2419,7 @@ static const struct pmic_wrapper_type pwrap_mt8365 = { .init_soc_specific = NULL, }; -static struct pmic_wrapper_type pwrap_mt8516 = { +static const struct pmic_wrapper_type pwrap_mt8516 = { .regs = mt8516_regs, .type = PWRAP_MT8516, .arb_en_all = 0xff, @@ -2435,7 +2431,7 @@ static struct pmic_wrapper_type pwrap_mt8516 = { .init_soc_specific = NULL, }; -static struct pmic_wrapper_type pwrap_mt8186 = { +static const struct pmic_wrapper_type pwrap_mt8186 = { .regs = mt8186_regs, .type = PWRAP_MT8186, .arb_en_all = 0xfb27f, @@ -2472,6 +2468,7 @@ static int pwrap_probe(struct platform_device *pdev) int ret, irq; u32 mask_done; struct pmic_wrapper *wrp; + struct clk_bulk_data *clk; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_slave_id = NULL; @@ -2521,49 +2518,10 @@ static int pwrap_probe(struct platform_device *pdev) } } - wrp->clk_spi = devm_clk_get(wrp->dev, "spi"); - if (IS_ERR(wrp->clk_spi)) { - dev_dbg(wrp->dev, "failed to get clock: %ld\n", - PTR_ERR(wrp->clk_spi)); - return PTR_ERR(wrp->clk_spi); - } - - wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap"); - if (IS_ERR(wrp->clk_wrap)) { - dev_dbg(wrp->dev, "failed to get clock: %ld\n", - PTR_ERR(wrp->clk_wrap)); - return PTR_ERR(wrp->clk_wrap); - } - - wrp->clk_sys = devm_clk_get_optional(wrp->dev, "sys"); - if (IS_ERR(wrp->clk_sys)) { - return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_sys), - "failed to get clock: %pe\n", - wrp->clk_sys); - } - - wrp->clk_tmr = devm_clk_get_optional(wrp->dev, "tmr"); - if (IS_ERR(wrp->clk_tmr)) { - return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_tmr), - "failed to get clock: %pe\n", - wrp->clk_tmr); - } - - ret = clk_prepare_enable(wrp->clk_spi); - if (ret) - return ret; - - ret = clk_prepare_enable(wrp->clk_wrap); + ret = devm_clk_bulk_get_all_enable(wrp->dev, &clk); if (ret) - goto err_out1; - - ret = clk_prepare_enable(wrp->clk_sys); - if (ret) - goto err_out2; - - ret = clk_prepare_enable(wrp->clk_tmr); - if (ret) - goto err_out3; + return dev_err_probe(wrp->dev, ret, + "failed to get clocks\n"); /* Enable internal dynamic clock */ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_DCM)) { @@ -2579,7 +2537,7 @@ static int pwrap_probe(struct platform_device *pdev) ret = pwrap_init(wrp); if (ret) { dev_dbg(wrp->dev, "init failed with %d\n", ret); - goto err_out4; + return ret; } } @@ -2592,8 +2550,7 @@ static int pwrap_probe(struct platform_device *pdev) if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & mask_done)) { dev_dbg(wrp->dev, "initialization isn't finished\n"); - ret = -ENODEV; - goto err_out4; + return -ENODEV; } /* Initialize watchdog, may not be done by the bootloader */ @@ -2622,42 +2579,27 @@ static int pwrap_probe(struct platform_device *pdev) pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto err_out2; - } + if (irq < 0) + return irq; ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH, "mt-pmic-pwrap", wrp); if (ret) - goto err_out4; + return ret; wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap); - if (IS_ERR(wrp->regmap)) { - ret = PTR_ERR(wrp->regmap); - goto err_out2; - } + if (IS_ERR(wrp->regmap)) + return PTR_ERR(wrp->regmap); ret = of_platform_populate(np, NULL, NULL, wrp->dev); if (ret) { dev_dbg(wrp->dev, "failed to create child devices at %pOF\n", np); - goto err_out4; + return ret; } return 0; - -err_out4: - clk_disable_unprepare(wrp->clk_tmr); -err_out3: - clk_disable_unprepare(wrp->clk_sys); -err_out2: - clk_disable_unprepare(wrp->clk_wrap); -err_out1: - clk_disable_unprepare(wrp->clk_spi); - - return ret; } static struct platform_driver pwrap_drv = { diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index d3560f861085ed..acbca2ab5cc2a9 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -25,6 +25,7 @@ qcom_rpmh-y += rpmh.o obj-$(CONFIG_QCOM_SMD_RPM) += rpm-proc.o smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o +CFLAGS_smp2p.o := -I$(src) obj-$(CONFIG_QCOM_SMP2P) += smp2p.o obj-$(CONFIG_QCOM_SMSM) += smsm.o obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 4fbff3a890e23f..a956c407ce0302 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -485,11 +485,10 @@ static int of_apr_add_pd_lookups(struct device *dev) { const char *service_name, *service_path; struct packet_router *apr = dev_get_drvdata(dev); - struct device_node *node; struct pdr_service *pds; int ret; - for_each_child_of_node(dev->of_node, node) { + for_each_child_of_node_scoped(dev->of_node, node) { ret = of_property_read_string_index(node, "qcom,protection-domain", 0, &service_name); if (ret < 0) @@ -499,14 +498,12 @@ static int of_apr_add_pd_lookups(struct device *dev) 1, &service_path); if (ret < 0) { dev_err(dev, "pdr service path missing: %d\n", ret); - of_node_put(node); return ret; } pds = pdr_add_lookup(apr->pdr, service_name, service_path); if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds)); - of_node_put(node); return PTR_ERR(pds); } } diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index e7851974084b65..f9235bc3aa3bbb 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -17,6 +17,8 @@ #include #include #include +#define CREATE_TRACE_POINTS +#include "trace_icc-bwmon.h" /* * The BWMON samples data throughput within 'sample_ms' time. With three @@ -645,9 +647,10 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) struct icc_bwmon *bwmon = dev_id; unsigned int irq_enable = 0; struct dev_pm_opp *opp, *target_opp; - unsigned int bw_kbps, up_kbps, down_kbps; + unsigned int bw_kbps, up_kbps, down_kbps, meas_kbps; bw_kbps = bwmon->target_kbps; + meas_kbps = bwmon->target_kbps; target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0); if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) @@ -679,6 +682,7 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) bwmon_clear_irq(bwmon); bwmon_enable(bwmon, irq_enable); + trace_qcom_bwmon_update(dev_name(bwmon->dev), meas_kbps, up_kbps, down_kbps); if (bwmon->target_kbps == bwmon->current_kbps) goto out; diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c index fbab7fe5c652b9..50be7a9274a14e 100644 --- a/drivers/soc/qcom/ice.c +++ b/drivers/soc/qcom/ice.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -265,7 +266,6 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct qcom_ice *ice; - struct device_node *node; struct resource *res; void __iomem *base; @@ -292,15 +292,15 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev) * (legacy DT binding), then it must at least provide a phandle * to the ICE devicetree node, otherwise ICE is not supported. */ - node = of_parse_phandle(dev->of_node, "qcom,ice", 0); + struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node, + "qcom,ice", 0); if (!node) return NULL; pdev = of_find_device_by_node(node); if (!pdev) { dev_err(dev, "Cannot find device node %s\n", node->name); - ice = ERR_PTR(-EPROBE_DEFER); - goto out; + return ERR_PTR(-EPROBE_DEFER); } ice = platform_get_drvdata(pdev); @@ -308,8 +308,7 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev) dev_err(dev, "Cannot get ice instance from %s\n", dev_name(&pdev->dev)); platform_device_put(pdev); - ice = ERR_PTR(-EPROBE_DEFER); - goto out; + return ERR_PTR(-EPROBE_DEFER); } ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); @@ -321,9 +320,6 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev) ice = ERR_PTR(-EINVAL); } -out: - of_node_put(node); - return ice; } EXPORT_SYMBOL_GPL(of_qcom_ice_get); diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 37e11e50172859..8fa4ffd3a9b592 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -453,26 +453,24 @@ static const struct llcc_slice_config qdu1000_data_8ch[] = { }; static const struct llcc_slice_config x1e80100_data[] = { - {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_AUDIO, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {LLCC_CMPT, 10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_GPUHTW, 11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_GPU, 9, 4096, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPU, 9, 4608, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_CVP, 8, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_CAMEXP1, 7, 3072, 2, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CVP, 8, 512, 4, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_WRCACHE, 31, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP0, 4, 256, 4, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP1, 7, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, {LLCC_AENPU, 3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_ISLAND1, 12, 512, 7, 1, 0x1, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_ISLAND2, 13, 512, 7, 1, 0x2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_ISLAND3, 14, 512, 7, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_ISLAND4, 15, 512, 7, 1, 0x4, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_CAMEXP3, 20, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {LLCC_CAMEXP4, 21, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND1, 12, 2048, 7, 1, 0x0, 0xF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP3, 20, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP4, 21, 3072, 2, 1, 0xFFC, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = { diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index 6b6dd80cbc0f6a..ff8df7d75d6b26 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -186,23 +186,20 @@ static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf, struct ocmem *of_get_ocmem(struct device *dev) { struct platform_device *pdev; - struct device_node *devnode; struct ocmem *ocmem; - devnode = of_parse_phandle(dev->of_node, "sram", 0); + struct device_node *devnode __free(device_node) = of_parse_phandle(dev->of_node, + "sram", 0); if (!devnode || !devnode->parent) { dev_err(dev, "Cannot look up sram phandle\n"); - of_node_put(devnode); return ERR_PTR(-ENODEV); } pdev = of_find_device_by_node(devnode->parent); if (!pdev) { dev_err(dev, "Cannot find device node %s\n", devnode->name); - of_node_put(devnode); return ERR_PTR(-EPROBE_DEFER); } - of_node_put(devnode); ocmem = platform_get_drvdata(pdev); if (!ocmem) { diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c index 6af49b5060e58d..77a70d3d0d0b51 100644 --- a/drivers/soc/qcom/qcom-pbs.c +++ b/drivers/soc/qcom/qcom-pbs.c @@ -3,6 +3,7 @@ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ +#include #include #include #include @@ -148,11 +149,11 @@ EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event); */ struct pbs_dev *get_pbs_client_device(struct device *dev) { - struct device_node *pbs_dev_node; struct platform_device *pdev; struct pbs_dev *pbs; - pbs_dev_node = of_parse_phandle(dev->of_node, "qcom,pbs", 0); + struct device_node *pbs_dev_node __free(device_node) = of_parse_phandle(dev->of_node, + "qcom,pbs", 0); if (!pbs_dev_node) { dev_err(dev, "Missing qcom,pbs property\n"); return ERR_PTR(-ENODEV); @@ -161,28 +162,23 @@ struct pbs_dev *get_pbs_client_device(struct device *dev) pdev = of_find_device_by_node(pbs_dev_node); if (!pdev) { dev_err(dev, "Unable to find PBS dev_node\n"); - pbs = ERR_PTR(-EPROBE_DEFER); - goto out; + return ERR_PTR(-EPROBE_DEFER); } pbs = platform_get_drvdata(pdev); if (!pbs) { dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev)); platform_device_put(pdev); - pbs = ERR_PTR(-EPROBE_DEFER); - goto out; + return ERR_PTR(-EPROBE_DEFER); } pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); if (!pbs->link) { dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev)); platform_device_put(pdev); - pbs = ERR_PTR(-EINVAL); - goto out; + return ERR_PTR(-EINVAL); } -out: - of_node_put(pbs_dev_node); return pbs; } EXPORT_SYMBOL_GPL(get_pbs_client_device); diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index ca2f6b7629ce03..60af26667bce45 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -394,7 +394,7 @@ static int qmp_cooling_device_add(struct qmp *qmp, static int qmp_cooling_devices_register(struct qmp *qmp) { - struct device_node *np, *child; + struct device_node *np; int count = 0; int ret; @@ -407,15 +407,13 @@ static int qmp_cooling_devices_register(struct qmp *qmp) if (!qmp->cooling_devs) return -ENOMEM; - for_each_available_child_of_node(np, child) { + for_each_available_child_of_node_scoped(np, child) { if (!of_property_present(child, "#cooling-cells")) continue; ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], child); - if (ret) { - of_node_put(child); + if (ret) goto unroll; - } } if (!count) diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c index 2228595a3dc5a4..c940f4da28ed5c 100644 --- a/drivers/soc/qcom/qcom_pd_mapper.c +++ b/drivers/soc/qcom/qcom_pd_mapper.c @@ -517,12 +517,25 @@ static const struct qcom_pdm_domain_data *sm8550_domains[] = { NULL, }; +static const struct qcom_pdm_domain_data *x1e80100_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_charger_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + NULL, +}; + static const struct of_device_id qcom_pdm_domains[] __maybe_unused = { + { .compatible = "qcom,apq8016", .data = NULL, }, { .compatible = "qcom,apq8064", .data = NULL, }, { .compatible = "qcom,apq8074", .data = NULL, }, { .compatible = "qcom,apq8084", .data = NULL, }, { .compatible = "qcom,apq8096", .data = msm8996_domains, }, { .compatible = "qcom,msm8226", .data = NULL, }, + { .compatible = "qcom,msm8909", .data = NULL, }, + { .compatible = "qcom,msm8916", .data = NULL, }, + { .compatible = "qcom,msm8939", .data = NULL, }, { .compatible = "qcom,msm8974", .data = NULL, }, { .compatible = "qcom,msm8996", .data = msm8996_domains, }, { .compatible = "qcom,msm8998", .data = msm8998_domains, }, @@ -539,12 +552,14 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = { { .compatible = "qcom,sm4250", .data = sm6115_domains, }, { .compatible = "qcom,sm6115", .data = sm6115_domains, }, { .compatible = "qcom,sm6350", .data = sm6350_domains, }, + { .compatible = "qcom,sm7325", .data = sc7280_domains, }, { .compatible = "qcom,sm8150", .data = sm8150_domains, }, { .compatible = "qcom,sm8250", .data = sm8250_domains, }, { .compatible = "qcom,sm8350", .data = sm8350_domains, }, { .compatible = "qcom,sm8450", .data = sm8350_domains, }, { .compatible = "qcom,sm8550", .data = sm8550_domains, }, { .compatible = "qcom,sm8650", .data = sm8550_domains, }, + { .compatible = "qcom,x1e80100", .data = x1e80100_domains, }, {}, }; diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index b7056aed4c7d10..f2b3e02abdf138 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm; - if (!rpdev->dev.of_node) - return -EINVAL; - rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; @@ -218,18 +215,44 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); } -static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = { - { .name = "rpm_requests", }, - { /* sentinel */ } +static const struct of_device_id qcom_smd_rpm_of_match[] = { + { .compatible = "qcom,glink-smd-rpm" }, + { .compatible = "qcom,smd-rpm" }, + /* + * Don't add any more compatibles to the list, two previous entryes + * should match all defined devices. + */ + { .compatible = "qcom,rpm-apq8084" }, + { .compatible = "qcom,rpm-ipq6018" }, + { .compatible = "qcom,rpm-ipq9574" }, + { .compatible = "qcom,rpm-msm8226" }, + { .compatible = "qcom,rpm-msm8909" }, + { .compatible = "qcom,rpm-msm8916" }, + { .compatible = "qcom,rpm-msm8936" }, + { .compatible = "qcom,rpm-msm8953" }, + { .compatible = "qcom,rpm-msm8974" }, + { .compatible = "qcom,rpm-msm8976" }, + { .compatible = "qcom,rpm-msm8994" }, + { .compatible = "qcom,rpm-msm8996" }, + { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-sdm660" }, + { .compatible = "qcom,rpm-sm6115" }, + { .compatible = "qcom,rpm-sm6125" }, + { .compatible = "qcom,rpm-sm6375" }, + { .compatible = "qcom,rpm-qcm2290" }, + { .compatible = "qcom,rpm-qcs404" }, + {} }; -MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table); +MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .id_table = qcom_smd_rpm_id_table, - .drv.name = "qcom_smd_rpm", + .drv = { + .name = "qcom_smd_rpm", + .of_match_table = qcom_smd_rpm_of_match, + }, }; static int __init qcom_smd_rpm_init(void) diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 696c2a8387d073..cefcbd61c62815 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -161,6 +161,9 @@ struct qcom_smp2p { struct list_head outbound; }; +#define CREATE_TRACE_POINTS +#include "trace-smp2p.h" + static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) { /* Make sure any updated data is written before the kick */ @@ -192,6 +195,7 @@ static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p) struct smp2p_smem_item *out = smp2p->out; u32 val; + trace_smp2p_ssr_ack(smp2p->dev); smp2p->ssr_ack = !smp2p->ssr_ack; val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT); @@ -214,6 +218,7 @@ static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p) smp2p->ssr_ack_enabled = true; smp2p->negotiation_done = true; + trace_smp2p_negotiate(smp2p->dev, out->features); } } @@ -252,6 +257,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) status = val ^ entry->last_value; entry->last_value = val; + trace_smp2p_notify_in(entry, status, val); + /* No changes of this entry? */ if (!status) continue; @@ -415,6 +422,8 @@ static int smp2p_update_bits(void *data, u32 mask, u32 value) writel(val, entry->value); spin_unlock_irqrestore(&entry->lock, flags); + trace_smp2p_update_bits(entry, orig, val); + if (val != orig) qcom_smp2p_kick(entry->smp2p); @@ -530,7 +539,6 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p) static int qcom_smp2p_probe(struct platform_device *pdev) { struct smp2p_entry *entry; - struct device_node *node; struct qcom_smp2p *smp2p; const char *key; int irq; @@ -584,11 +592,10 @@ static int qcom_smp2p_probe(struct platform_device *pdev) if (ret < 0) goto release_mbox; - for_each_available_child_of_node(pdev->dev.of_node, node) { + for_each_available_child_of_node_scoped(pdev->dev.of_node, node) { entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL); if (!entry) { ret = -ENOMEM; - of_node_put(node); goto unwind_interfaces; } @@ -596,25 +603,19 @@ static int qcom_smp2p_probe(struct platform_device *pdev) spin_lock_init(&entry->lock); ret = of_property_read_string(node, "qcom,entry-name", &entry->name); - if (ret < 0) { - of_node_put(node); + if (ret < 0) goto unwind_interfaces; - } if (of_property_read_bool(node, "interrupt-controller")) { ret = qcom_smp2p_inbound_entry(smp2p, entry, node); - if (ret < 0) { - of_node_put(node); + if (ret < 0) goto unwind_interfaces; - } list_add(&entry->node, &smp2p->inbound); } else { ret = qcom_smp2p_outbound_entry(smp2p, entry, node); - if (ret < 0) { - of_node_put(node); + if (ret < 0) goto unwind_interfaces; - } list_add(&entry->node, &smp2p->outbound); } diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index d7359a235e3cff..64fc4f41da7715 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include @@ -401,11 +401,13 @@ static const struct soc_id soc_id[] = { { qcom_board_id(SA8540P) }, { qcom_board_id(QCM4290) }, { qcom_board_id(QCS4290) }, + { qcom_board_id(SM7325) }, { qcom_board_id_named(SM8450_2, "SM8450") }, { qcom_board_id_named(SM8450_3, "SM8450") }, { qcom_board_id(SC7280) }, { qcom_board_id(SC7180P) }, { qcom_board_id(QCM6490) }, + { qcom_board_id(SM7325P) }, { qcom_board_id(IPQ5000) }, { qcom_board_id(IPQ0509) }, { qcom_board_id(IPQ0518) }, @@ -441,6 +443,8 @@ static const struct soc_id soc_id[] = { { qcom_board_id(QCM8550) }, { qcom_board_id(IPQ5300) }, { qcom_board_id(IPQ5321) }, + { qcom_board_id(QCS8300) }, + { qcom_board_id(QCS8275) }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/qcom/trace-smp2p.h b/drivers/soc/qcom/trace-smp2p.h new file mode 100644 index 00000000000000..9a6392043f1096 --- /dev/null +++ b/drivers/soc/qcom/trace-smp2p.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qcom_smp2p + +#if !defined(__QCOM_SMP2P_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __QCOM_SMP2P_TRACE_H__ + +#include +#include + +TRACE_EVENT(smp2p_ssr_ack, + TP_PROTO(const struct device *dev), + TP_ARGS(dev), + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + ), + TP_fast_assign( + __assign_str(dev_name); + ), + TP_printk("%s: SSR detected", __get_str(dev_name)) +); + +TRACE_EVENT(smp2p_negotiate, + TP_PROTO(const struct device *dev, unsigned int features), + TP_ARGS(dev, features), + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(u32, out_features) + ), + TP_fast_assign( + __assign_str(dev_name); + __entry->out_features = features; + ), + TP_printk("%s: state=open out_features=%s", __get_str(dev_name), + __print_flags(__entry->out_features, "|", + {SMP2P_FEATURE_SSR_ACK, "SMP2P_FEATURE_SSR_ACK"}) + ) +); + +TRACE_EVENT(smp2p_notify_in, + TP_PROTO(struct smp2p_entry *smp2p_entry, unsigned long status, u32 val), + TP_ARGS(smp2p_entry, status, val), + TP_STRUCT__entry( + __string(dev_name, dev_name(smp2p_entry->smp2p->dev)) + __string(client_name, smp2p_entry->name) + __field(unsigned long, status) + __field(u32, val) + ), + TP_fast_assign( + __assign_str(dev_name); + __assign_str(client_name); + __entry->status = status; + __entry->val = val; + ), + TP_printk("%s: %s: status:0x%0lx val:0x%0x", + __get_str(dev_name), + __get_str(client_name), + __entry->status, + __entry->val + ) +); + +TRACE_EVENT(smp2p_update_bits, + TP_PROTO(struct smp2p_entry *smp2p_entry, u32 orig, u32 val), + TP_ARGS(smp2p_entry, orig, val), + TP_STRUCT__entry( + __string(dev_name, dev_name(smp2p_entry->smp2p->dev)) + __string(client_name, smp2p_entry->name) + __field(u32, orig) + __field(u32, val) + ), + TP_fast_assign( + __assign_str(dev_name); + __assign_str(client_name); + __entry->orig = orig; + __entry->val = val; + ), + TP_printk("%s: %s: orig:0x%0x new:0x%0x", + __get_str(dev_name), + __get_str(client_name), + __entry->orig, + __entry->val + ) +); + +#endif /* __QCOM_SMP2P_TRACE_H__ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-smp2p + +#include diff --git a/drivers/soc/qcom/trace_icc-bwmon.h b/drivers/soc/qcom/trace_icc-bwmon.h new file mode 100644 index 00000000000000..beb8e6b485a9f8 --- /dev/null +++ b/drivers/soc/qcom/trace_icc-bwmon.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM icc_bwmon + +#if !defined(_TRACE_ICC_BWMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ICC_BWMON_H +#include + +TRACE_EVENT(qcom_bwmon_update, + TP_PROTO(const char *name, + unsigned int meas_kbps, unsigned int up_kbps, unsigned int down_kbps), + + TP_ARGS(name, meas_kbps, up_kbps, down_kbps), + + TP_STRUCT__entry( + __string(name, name) + __field(unsigned int, meas_kbps) + __field(unsigned int, up_kbps) + __field(unsigned int, down_kbps) + ), + + TP_fast_assign( + __assign_str(name); + __entry->meas_kbps = meas_kbps; + __entry->up_kbps = up_kbps; + __entry->down_kbps = down_kbps; + ), + + TP_printk("name=%s meas_kbps=%u up_kbps=%u down_kbps=%u", + __get_str(name), + __entry->meas_kbps, + __entry->up_kbps, + __entry->down_kbps) +); + +#endif /* _TRACE_ICC_BWMON_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/ + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_icc-bwmon + +#include diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 5fd62046b28aae..1eab4bb0eacffe 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -41,9 +41,11 @@ static const struct rockchip_grf_info rk3036_grf __initconst = { }; #define RK3128_GRF_SOC_CON0 0x140 +#define RK3128_GRF_SOC_CON1 0x144 static const struct rockchip_grf_value rk3128_defaults[] __initconst = { { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) }, + { "vpu main clock", RK3128_GRF_SOC_CON1, HIWORD_UPDATE(0, 1, 10) }, }; static const struct rockchip_grf_info rk3128_grf __initconst = { @@ -121,6 +123,29 @@ static const struct rockchip_grf_info rk3566_pipegrf __initconst = { .num_values = ARRAY_SIZE(rk3566_defaults), }; +#define RK3576_SYSGRF_SOC_CON1 0x0004 + +static const struct rockchip_grf_value rk3576_defaults_sys_grf[] __initconst = { + { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 6) }, + { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 8) }, +}; + +static const struct rockchip_grf_info rk3576_sysgrf __initconst = { + .values = rk3576_defaults_sys_grf, + .num_values = ARRAY_SIZE(rk3576_defaults_sys_grf), +}; + +#define RK3576_IOCGRF_MISC_CON 0x04F0 + +static const struct rockchip_grf_value rk3576_defaults_ioc_grf[] __initconst = { + { "jtag switching", RK3576_IOCGRF_MISC_CON, HIWORD_UPDATE(0, 1, 1) }, +}; + +static const struct rockchip_grf_info rk3576_iocgrf __initconst = { + .values = rk3576_defaults_ioc_grf, + .num_values = ARRAY_SIZE(rk3576_defaults_ioc_grf), +}; + #define RK3588_GRF_SOC_CON6 0x0318 static const struct rockchip_grf_value rk3588_defaults[] __initconst = { @@ -132,7 +157,6 @@ static const struct rockchip_grf_info rk3588_sysgrf __initconst = { .num_values = ARRAY_SIZE(rk3588_defaults), }; - static const struct of_device_id rockchip_grf_dt_match[] __initconst = { { .compatible = "rockchip,rk3036-grf", @@ -158,6 +182,12 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = { }, { .compatible = "rockchip,rk3566-pipe-grf", .data = (void *)&rk3566_pipegrf, + }, { + .compatible = "rockchip,rk3576-sys-grf", + .data = (void *)&rk3576_sysgrf, + }, { + .compatible = "rockchip,rk3576-ioc-grf", + .data = (void *)&rk3576_iocgrf, }, { .compatible = "rockchip,rk3588-sys-grf", .data = (void *)&rk3588_sysgrf, diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c index 18f809c160a720..fd9fd31f71c253 100644 --- a/drivers/soc/rockchip/io-domain.c +++ b/drivers/soc/rockchip/io-domain.c @@ -39,6 +39,10 @@ #define RK3288_SOC_CON2_FLASH0 BIT(7) #define RK3288_SOC_FLASH_SUPPLY_NUM 2 +#define RK3308_SOC_CON0 0x300 +#define RK3308_SOC_CON0_VCCIO3 BIT(8) +#define RK3308_SOC_VCCIO3_SUPPLY_NUM 3 + #define RK3328_SOC_CON4 0x410 #define RK3328_SOC_CON4_VCCIO2 BIT(7) #define RK3328_SOC_VCCIO2_SUPPLY_NUM 1 @@ -229,6 +233,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod) dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); } +static void rk3308_iodomain_init(struct rockchip_iodomain *iod) +{ + int ret; + u32 val; + + /* if no vccio3 supply we should leave things alone */ + if (!iod->supplies[RK3308_SOC_VCCIO3_SUPPLY_NUM].reg) + return; + + /* + * set vccio3 iodomain to also use this framework + * instead of a special gpio. + */ + val = RK3308_SOC_CON0_VCCIO3 | (RK3308_SOC_CON0_VCCIO3 << 16); + ret = regmap_write(iod->grf, RK3308_SOC_CON0, val); + if (ret < 0) + dev_warn(iod->dev, "couldn't update vccio3 vsel ctrl\n"); +} + static void rk3328_iodomain_init(struct rockchip_iodomain *iod) { int ret; @@ -376,6 +399,19 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = { .init = rk3288_iodomain_init, }; +static const struct rockchip_iodomain_soc_data soc_data_rk3308 = { + .grf_offset = 0x300, + .supply_names = { + "vccio0", + "vccio1", + "vccio2", + "vccio3", + "vccio4", + "vccio5", + }, + .init = rk3308_iodomain_init, +}; + static const struct rockchip_iodomain_soc_data soc_data_rk3328 = { .grf_offset = 0x410, .supply_names = { @@ -528,6 +564,10 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3288-io-voltage-domain", .data = &soc_data_rk3288 }, + { + .compatible = "rockchip,rk3308-io-voltage-domain", + .data = &soc_data_rk3308 + }, { .compatible = "rockchip,rk3328-io-voltage-domain", .data = &soc_data_rk3328 diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 6c37d6eb8b495c..a08c377933c505 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1438,7 +1438,7 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, struct device_node *parent) { struct of_phandle_args child_args, parent_args; - struct device_node *np, *child; + struct device_node *np; int err = 0; /* @@ -1457,12 +1457,10 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, if (!np) return 0; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { err = tegra_powergate_add(pmc, child); - if (err < 0) { - of_node_put(child); + if (err < 0) break; - } if (of_parse_phandle_with_args(child, "power-domains", "#power-domain-cells", @@ -1474,10 +1472,8 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, err = of_genpd_add_subdomain(&parent_args, &child_args); of_node_put(parent_args.np); - if (err) { - of_node_put(child); + if (err) break; - } } of_node_put(np); diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c index fd4251d759352b..8c01029683515c 100644 --- a/drivers/soc/ti/k3-ringacc.c +++ b/drivers/soc/ti/k3-ringacc.c @@ -161,7 +161,7 @@ struct k3_ring { struct k3_ringacc_proxy_target_regs __iomem *proxy; dma_addr_t ring_mem_dma; void *ring_mem_virt; - struct k3_ring_ops *ops; + const struct k3_ring_ops *ops; u32 size; enum k3_ring_size elm_size; enum k3_ring_mode mode; @@ -268,17 +268,17 @@ static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem); static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem); -static struct k3_ring_ops k3_ring_mode_ring_ops = { +static const struct k3_ring_ops k3_ring_mode_ring_ops = { .push_tail = k3_ringacc_ring_push_mem, .pop_head = k3_ringacc_ring_pop_mem, }; -static struct k3_ring_ops k3_dmaring_fwd_ops = { +static const struct k3_ring_ops k3_dmaring_fwd_ops = { .push_tail = k3_ringacc_ring_push_mem, .pop_head = k3_dmaring_fwd_pop, }; -static struct k3_ring_ops k3_dmaring_reverse_ops = { +static const struct k3_ring_ops k3_dmaring_reverse_ops = { /* Reverse side of the DMA ring can only be popped by SW */ .pop_head = k3_dmaring_reverse_pop, }; @@ -288,7 +288,7 @@ static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); -static struct k3_ring_ops k3_ring_mode_msg_ops = { +static const struct k3_ring_ops k3_ring_mode_msg_ops = { .push_tail = k3_ringacc_ring_push_io, .push_head = k3_ringacc_ring_push_head_io, .pop_tail = k3_ringacc_ring_pop_tail_io, @@ -300,7 +300,7 @@ static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); -static struct k3_ring_ops k3_ring_mode_proxy_ops = { +static const struct k3_ring_ops k3_ring_mode_proxy_ops = { .push_tail = k3_ringacc_ring_push_tail_proxy, .push_head = k3_ringacc_ring_push_head_proxy, .pop_tail = k3_ringacc_ring_pop_tail_proxy, diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index 6023006685fc4b..fb0746d8caad4a 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -602,7 +602,7 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node) unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched; struct device_node *node = dma_node; struct knav_dma_device *dma; - int ret, len, num_chan = 0; + int ret, num_chan = 0; resource_size_t size; u32 timeout; u32 i; @@ -615,25 +615,13 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node) INIT_LIST_HEAD(&dma->list); INIT_LIST_HEAD(&dma->chan_list); - if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) { - dev_err(kdev->dev, "unspecified navigator cloud addresses\n"); - return -ENODEV; - } - - dma->logical_queue_managers = len / sizeof(u32); - if (dma->logical_queue_managers > DMA_MAX_QMS) { - dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n", - dma->logical_queue_managers); - dma->logical_queue_managers = DMA_MAX_QMS; - } - - ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address", - dma->qm_base_address, - dma->logical_queue_managers); - if (ret) { + ret = of_property_read_variable_u32_array(cloud, "ti,navigator-cloud-address", + dma->qm_base_address, 1, DMA_MAX_QMS); + if (ret < 0) { dev_err(kdev->dev, "invalid navigator cloud addresses\n"); return -ENODEV; } + dma->logical_queue_managers = ret; dma->reg_global = pktdma_get_regs(dma, node, 0, &size); if (IS_ERR(dma->reg_global)) diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index f2055a76f84c2b..6c98738e548a80 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1076,14 +1076,20 @@ static const char *knav_queue_find_name(struct device_node *node) } static int knav_queue_setup_regions(struct knav_device *kdev, - struct device_node *regions) + struct device_node *node) { struct device *dev = kdev->dev; + struct device_node *regions __free(device_node) = + of_get_child_by_name(node, "descriptor-regions"); struct knav_region *region; struct device_node *child; u32 temp[2]; int ret; + if (!regions) + return dev_err_probe(dev, -ENODEV, + "descriptor-regions not specified\n"); + for_each_child_of_node(regions, child) { region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); if (!region) { @@ -1104,11 +1110,6 @@ static int knav_queue_setup_regions(struct knav_device *kdev, continue; } - if (!of_get_property(child, "link-index", NULL)) { - dev_err(dev, "No link info for %s\n", region->name); - devm_kfree(dev, region); - continue; - } ret = of_property_read_u32(child, "link-index", ®ion->link_index); if (ret) { @@ -1121,10 +1122,9 @@ static int knav_queue_setup_regions(struct knav_device *kdev, INIT_LIST_HEAD(®ion->pools); list_add_tail(®ion->list, &kdev->regions); } - if (list_empty(&kdev->regions)) { - dev_err(dev, "no valid region information found\n"); - return -ENODEV; - } + if (list_empty(&kdev->regions)) + return dev_err_probe(dev, -ENODEV, + "no valid region information found\n"); /* Next, we run through the regions and set things up */ for_each_region(kdev, region) @@ -1306,10 +1306,16 @@ static int knav_setup_queue_range(struct knav_device *kdev, } static int knav_setup_queue_pools(struct knav_device *kdev, - struct device_node *queue_pools) + struct device_node *node) { + struct device_node *queue_pools __free(device_node) = + of_get_child_by_name(node, "queue-pools"); struct device_node *type, *range; + if (!queue_pools) + return dev_err_probe(kdev->dev, -ENODEV, + "queue-pools not specified\n"); + for_each_child_of_node(queue_pools, type) { for_each_child_of_node(type, range) { /* return value ignored, we init the rest... */ @@ -1318,10 +1324,9 @@ static int knav_setup_queue_pools(struct knav_device *kdev, } /* ... and barf if they all failed! */ - if (list_empty(&kdev->queue_ranges)) { - dev_err(kdev->dev, "no valid queue range found\n"); - return -ENODEV; - } + if (list_empty(&kdev->queue_ranges)) + return dev_err_probe(kdev->dev, -ENODEV, + "no valid queue range found\n"); return 0; } @@ -1389,14 +1394,20 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev, } static int knav_queue_init_qmgrs(struct knav_device *kdev, - struct device_node *qmgrs) + struct device_node *node) { struct device *dev = kdev->dev; + struct device_node *qmgrs __free(device_node) = + of_get_child_by_name(node, "qmgrs"); struct knav_qmgr_info *qmgr; struct device_node *child; u32 temp[2]; int ret; + if (!qmgrs) + return dev_err_probe(dev, -ENODEV, + "queue manager info not specified\n"); + for_each_child_of_node(qmgrs, child) { qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); if (!qmgr) { @@ -1668,6 +1679,26 @@ static int knav_queue_start_pdsps(struct knav_device *kdev) return 0; } +static int knav_queue_setup_pdsps(struct knav_device *kdev, + struct device_node *node) +{ + struct device_node *pdsps __free(device_node) = + of_get_child_by_name(node, "pdsps"); + + if (pdsps) { + int ret; + + ret = knav_queue_init_pdsps(kdev, pdsps); + if (ret) + return ret; + + ret = knav_queue_start_pdsps(kdev); + if (ret) + return ret; + } + return 0; +} + static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) { struct knav_qmgr_info *qmgr; @@ -1755,7 +1786,6 @@ MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); static int knav_queue_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct device_node *qmgrs, *queue_pools, *regions, *pdsps; struct device *dev = &pdev->dev; u32 temp[2]; int ret; @@ -1799,39 +1829,17 @@ static int knav_queue_probe(struct platform_device *pdev) kdev->num_queues = temp[1]; /* Initialize queue managers using device tree configuration */ - qmgrs = of_get_child_by_name(node, "qmgrs"); - if (!qmgrs) { - dev_err(dev, "queue manager info not specified\n"); - ret = -ENODEV; - goto err; - } - ret = knav_queue_init_qmgrs(kdev, qmgrs); - of_node_put(qmgrs); + ret = knav_queue_init_qmgrs(kdev, node); if (ret) goto err; /* get pdsp configuration values from device tree */ - pdsps = of_get_child_by_name(node, "pdsps"); - if (pdsps) { - ret = knav_queue_init_pdsps(kdev, pdsps); - if (ret) - goto err; - - ret = knav_queue_start_pdsps(kdev); - if (ret) - goto err; - } - of_node_put(pdsps); + ret = knav_queue_setup_pdsps(kdev, node); + if (ret) + goto err; /* get usable queue range values from device tree */ - queue_pools = of_get_child_by_name(node, "queue-pools"); - if (!queue_pools) { - dev_err(dev, "queue-pools not specified\n"); - ret = -ENODEV; - goto err; - } - ret = knav_setup_queue_pools(kdev, queue_pools); - of_node_put(queue_pools); + ret = knav_setup_queue_pools(kdev, node); if (ret) goto err; @@ -1853,14 +1861,7 @@ static int knav_queue_probe(struct platform_device *pdev) if (ret) goto err; - regions = of_get_child_by_name(node, "descriptor-regions"); - if (!regions) { - dev_err(dev, "descriptor-regions not specified\n"); - ret = -ENODEV; - goto err; - } - ret = knav_queue_setup_regions(kdev, regions); - of_node_put(regions); + ret = knav_queue_setup_regions(kdev, node); if (ret) goto err; diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index 3a56bbf3268ab3..8169885ab1e056 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -383,54 +383,44 @@ static void am33xx_pm_free_sram(void) */ static int am33xx_pm_alloc_sram(void) { - struct device_node *np; - int ret = 0; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "ti,omap3-mpu"); - np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu"); if (!np) { np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); - if (!np) { - dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n", - __func__); - return -ENODEV; - } + if (!np) + return dev_err_probe(pm33xx_dev, -ENODEV, + "PM: %s: Unable to find device node for mpu\n", + __func__); } sram_pool = of_gen_pool_get(np, "pm-sram", 0); - if (!sram_pool) { - dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n", - __func__); - ret = -ENODEV; - goto mpu_put_node; - } + if (!sram_pool) + return dev_err_probe(pm33xx_dev, -ENODEV, + "PM: %s: Unable to get sram pool for ocmcram\n", + __func__); sram_pool_data = of_gen_pool_get(np, "pm-sram", 1); - if (!sram_pool_data) { - dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n", - __func__); - ret = -ENODEV; - goto mpu_put_node; - } + if (!sram_pool_data) + return dev_err_probe(pm33xx_dev, -ENODEV, + "PM: %s: Unable to get sram data pool for ocmcram\n", + __func__); ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz); - if (!ocmcram_location) { - dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n", - __func__); - ret = -ENOMEM; - goto mpu_put_node; - } + if (!ocmcram_location) + return dev_err_probe(pm33xx_dev, -ENOMEM, + "PM: %s: Unable to allocate memory from ocmcram\n", + __func__); ocmcram_location_data = gen_pool_alloc(sram_pool_data, sizeof(struct emif_regs_amx3)); if (!ocmcram_location_data) { - dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n"); gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz); - ret = -ENOMEM; + return dev_err_probe(pm33xx_dev, -ENOMEM, + "PM: Unable to allocate memory from ocmcram\n"); } -mpu_put_node: - of_node_put(np); - return ret; + return 0; } static int am33xx_pm_rtc_setup(void) diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index 24a42e0b645c13..3ec758f50e2481 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -380,39 +380,81 @@ static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux, static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node) { - const struct pruss_private_data *data; - struct device_node *clks_np; struct device *dev = pruss->dev; - int ret = 0; - - data = of_device_get_match_data(dev); + struct device_node *clks_np __free(device_node) = + of_get_child_by_name(cfg_node, "clocks"); + const struct pruss_private_data *data = of_device_get_match_data(dev); + int ret; - clks_np = of_get_child_by_name(cfg_node, "clocks"); - if (!clks_np) { - dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node); - return -ENODEV; - } + if (!clks_np) + return dev_err_probe(dev, -ENODEV, + "%pOF is missing its 'clocks' node\n", + cfg_node); if (data && data->has_core_mux_clock) { ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux, "coreclk-mux", clks_np); - if (ret) { - dev_err(dev, "failed to setup coreclk-mux\n"); - goto put_clks_node; - } + if (ret) + return dev_err_probe(dev, ret, + "failed to setup coreclk-mux\n"); } ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux", clks_np); - if (ret) { - dev_err(dev, "failed to setup iepclk-mux\n"); - goto put_clks_node; - } + if (ret) + return dev_err_probe(dev, ret, "failed to setup iepclk-mux\n"); -put_clks_node: - of_node_put(clks_np); + return 0; +} - return ret; +static int pruss_of_setup_memories(struct device *dev, struct pruss *pruss) +{ + struct device_node *np = dev_of_node(dev); + struct device_node *child __free(device_node) = + of_get_child_by_name(np, "memories"); + const struct pruss_private_data *data = of_device_get_match_data(dev); + const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" }; + int i; + + if (!child) + return dev_err_probe(dev, -ENODEV, + "%pOF is missing its 'memories' node\n", + child); + + for (i = 0; i < PRUSS_MEM_MAX; i++) { + struct resource res; + int index; + + /* + * On AM437x one of two PRUSS units don't contain Shared RAM, + * skip it + */ + if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2) + continue; + + index = of_property_match_string(child, "reg-names", + mem_names[i]); + if (index < 0) + return index; + + if (of_address_to_resource(child, index, &res)) + return -EINVAL; + + pruss->mem_regions[i].va = devm_ioremap(dev, res.start, + resource_size(&res)); + if (!pruss->mem_regions[i].va) + return dev_err_probe(dev, -ENOMEM, + "failed to parse and map memory resource %d %s\n", + i, mem_names[i]); + pruss->mem_regions[i].pa = res.start; + pruss->mem_regions[i].size = resource_size(&res); + + dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n", + mem_names[i], &pruss->mem_regions[i].pa, + pruss->mem_regions[i].size, pruss->mem_regions[i].va); + } + + return 0; } static struct regmap_config regmap_conf = { @@ -424,26 +466,21 @@ static struct regmap_config regmap_conf = { static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss) { struct device_node *np = dev_of_node(dev); - struct device_node *child; + struct device_node *child __free(device_node) = + of_get_child_by_name(np, "cfg"); struct resource res; int ret; - child = of_get_child_by_name(np, "cfg"); - if (!child) { - dev_err(dev, "%pOF is missing its 'cfg' node\n", child); - return -ENODEV; - } + if (!child) + return dev_err_probe(dev, -ENODEV, + "%pOF is missing its 'cfg' node\n", child); - if (of_address_to_resource(child, 0, &res)) { - ret = -ENOMEM; - goto node_put; - } + if (of_address_to_resource(child, 0, &res)) + return -ENOMEM; pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res)); - if (!pruss->cfg_base) { - ret = -ENOMEM; - goto node_put; - } + if (!pruss->cfg_base) + return -ENOMEM; regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child, (u64)res.start); @@ -452,34 +489,22 @@ static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss) pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base, ®map_conf); kfree(regmap_conf.name); - if (IS_ERR(pruss->cfg_regmap)) { - dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n", - PTR_ERR(pruss->cfg_regmap)); - ret = PTR_ERR(pruss->cfg_regmap); - goto node_put; - } + if (IS_ERR(pruss->cfg_regmap)) + return dev_err_probe(dev, PTR_ERR(pruss->cfg_regmap), + "regmap_init_mmio failed for cfg\n"); ret = pruss_clk_init(pruss, child); if (ret) - dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret); + return dev_err_probe(dev, ret, "pruss_clk_init failed\n"); -node_put: - of_node_put(child); - return ret; + return 0; } static int pruss_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = dev_of_node(dev); - struct device_node *child; struct pruss *pruss; - struct resource res; - int ret, i, index; - const struct pruss_private_data *data; - const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" }; - - data = of_device_get_match_data(&pdev->dev); + int ret; ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) { @@ -494,48 +519,9 @@ static int pruss_probe(struct platform_device *pdev) pruss->dev = dev; mutex_init(&pruss->lock); - child = of_get_child_by_name(np, "memories"); - if (!child) { - dev_err(dev, "%pOF is missing its 'memories' node\n", child); - return -ENODEV; - } - - for (i = 0; i < PRUSS_MEM_MAX; i++) { - /* - * On AM437x one of two PRUSS units don't contain Shared RAM, - * skip it - */ - if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2) - continue; - - index = of_property_match_string(child, "reg-names", - mem_names[i]); - if (index < 0) { - of_node_put(child); - return index; - } - - if (of_address_to_resource(child, index, &res)) { - of_node_put(child); - return -EINVAL; - } - - pruss->mem_regions[i].va = devm_ioremap(dev, res.start, - resource_size(&res)); - if (!pruss->mem_regions[i].va) { - dev_err(dev, "failed to parse and map memory resource %d %s\n", - i, mem_names[i]); - of_node_put(child); - return -ENOMEM; - } - pruss->mem_regions[i].pa = res.start; - pruss->mem_regions[i].size = resource_size(&res); - - dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n", - mem_names[i], &pruss->mem_regions[i].pa, - pruss->mem_regions[i].size, pruss->mem_regions[i].va); - } - of_node_put(child); + ret = pruss_of_setup_memories(dev, pruss); + if (ret < 0) + return ret; platform_set_drvdata(pdev, pruss); diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig index c3792c0a84acde..7bbf54a8d879e3 100644 --- a/drivers/soc/versatile/Kconfig +++ b/drivers/soc/versatile/Kconfig @@ -4,7 +4,7 @@ # config SOC_INTEGRATOR_CM bool "SoC bus device for the ARM Integrator platform core modules" - depends on ARCH_INTEGRATOR + depends on ARCH_INTEGRATOR || COMPILE_TEST select SOC_BUS help Include support for the SoC bus on the ARM Integrator platform @@ -13,7 +13,7 @@ config SOC_INTEGRATOR_CM config SOC_REALVIEW bool "SoC bus device for the ARM RealView platforms" - depends on ARCH_REALVIEW + depends on ARCH_REALVIEW || COMPILE_TEST select SOC_BUS help Include support for the SoC bus on the ARM RealView platforms diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c index bab4ad87aa7500..d5099a3386b4fc 100644 --- a/drivers/soc/versatile/soc-integrator.c +++ b/drivers/soc/versatile/soc-integrator.c @@ -113,6 +113,7 @@ static int __init integrator_soc_init(void) return -ENODEV; syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap); diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index c6876d232d8fd6..cf91abe07d38d0 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -4,6 +4,7 @@ * * Author: Linus Walleij */ +#include #include #include #include @@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = { ATTRIBUTE_GROUPS(realview); +static void realview_soc_socdev_release(void *data) +{ + struct soc_device *soc_dev = data; + + soc_device_unregister(soc_dev); +} + static int realview_soc_probe(struct platform_device *pdev) { struct regmap *syscon_regmap; @@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev) if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap); - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; @@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev) soc_dev_attr->family = "Versatile"; soc_dev_attr->custom_attr_group = realview_groups[0]; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr); + if (IS_ERR(soc_dev)) return -ENODEV; - } + + ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release, + soc_dev); + if (ret) + return ret; + ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET, &realview_coreid); if (ret) diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index d928258c676155..77dc094075e131 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -83,7 +83,6 @@ static int sdw_drv_probe(struct device *dev) struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); const struct sdw_device_id *id; - const char *name; int ret; /* @@ -108,11 +107,6 @@ static int sdw_drv_probe(struct device *dev) ret = drv->probe(slave, id); if (ret) { - name = drv->name; - if (!name) - name = drv->driver.name; - - dev_err(dev, "Probe of %s failed: %d\n", name, ret); dev_pm_domain_detach(dev, false); return ret; } @@ -129,7 +123,7 @@ static int sdw_drv_probe(struct device *dev) /* init the dynamic sysfs attributes we need */ ret = sdw_slave_sysfs_dpn_init(slave); if (ret < 0) - dev_warn(dev, "Slave sysfs init failed:%d\n", ret); + dev_warn(dev, "failed to initialise sysfs: %d\n", ret); /* * Check for valid clk_stop_timeout, use DisCo worst case value of @@ -153,7 +147,7 @@ static int sdw_drv_probe(struct device *dev) if (drv->ops && drv->ops->update_status) { ret = drv->ops->update_status(slave, slave->status); if (ret < 0) - dev_warn(dev, "%s: update_status failed with status %d\n", __func__, ret); + dev_warn(dev, "failed to update status at probe: %d\n", ret); } mutex_unlock(&slave->sdw_dev_lock); @@ -204,16 +198,11 @@ static void sdw_drv_shutdown(struct device *dev) */ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner) { - const char *name; - drv->driver.bus = &sdw_bus_type; if (!drv->probe) { - name = drv->name; - if (!name) - name = drv->driver.name; - - pr_err("driver %s didn't provide SDW probe routine\n", name); + pr_err("driver %s didn't provide SDW probe routine\n", + drv->driver.name); return -EINVAL; } diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index e0683a5975d101..05652e983539b4 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -890,8 +890,14 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns, } } - if (is_slave) - return sdw_handle_slave_status(&cdns->bus, status); + if (is_slave) { + int ret; + + mutex_lock(&cdns->status_update_lock); + ret = sdw_handle_slave_status(&cdns->bus, status); + mutex_unlock(&cdns->status_update_lock); + return ret; + } return 0; } @@ -988,6 +994,31 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id) } EXPORT_SYMBOL(sdw_cdns_irq); +static void cdns_check_attached_status_dwork(struct work_struct *work) +{ + struct sdw_cdns *cdns = + container_of(work, struct sdw_cdns, attach_dwork.work); + enum sdw_slave_status status[SDW_MAX_DEVICES + 1]; + u32 val; + int ret; + int i; + + val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT); + + for (i = 0; i <= SDW_MAX_DEVICES; i++) { + status[i] = val & 0x3; + if (status[i]) + dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]); + val >>= 2; + } + + mutex_lock(&cdns->status_update_lock); + ret = sdw_handle_slave_status(&cdns->bus, status); + mutex_unlock(&cdns->status_update_lock); + if (ret < 0) + dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret); +} + /** * cdns_update_slave_status_work - update slave status in a work since we will need to handle * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave @@ -1740,7 +1771,11 @@ int sdw_cdns_probe(struct sdw_cdns *cdns) init_completion(&cdns->tx_complete); cdns->bus.port_ops = &cdns_port_ops; + mutex_init(&cdns->status_update_lock); + INIT_WORK(&cdns->work, cdns_update_slave_status_work); + INIT_DELAYED_WORK(&cdns->attach_dwork, cdns_check_attached_status_dwork); + return 0; } EXPORT_SYMBOL(sdw_cdns_probe); diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index bc84435e420f5b..e1d7969ba48ae8 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -117,6 +117,8 @@ struct sdw_cdns_dai_runtime { * @link_up: Link status * @msg_count: Messages sent on bus * @dai_runtime_array: runtime context for each allocated DAI. + * @status_update_lock: protect concurrency between interrupt-based and delayed work + * status update */ struct sdw_cdns { struct device *dev; @@ -148,10 +150,13 @@ struct sdw_cdns { bool interrupt_enabled; struct work_struct work; + struct delayed_work attach_dwork; struct list_head list; struct sdw_cdns_dai_runtime **dai_runtime_array; + + struct mutex status_update_lock; /* add mutual exclusion to sdw_handle_slave_status() */ }; #define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus) diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h index 68838e843b543e..dddd293814418b 100644 --- a/drivers/soundwire/intel.h +++ b/drivers/soundwire/intel.h @@ -103,6 +103,8 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value) #define INTEL_MASTER_RESET_ITERATIONS 10 +#define SDW_INTEL_DELAYED_ENUMERATION_MS 100 + #define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \ (sdw)->link_res->hw_ops->cb) #define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb) @@ -222,6 +224,13 @@ static inline bool sdw_intel_sync_check_cmdsync_unlocked(struct sdw_intel *sdw) return false; } +static inline int sdw_intel_get_link_count(struct sdw_intel *sdw) +{ + if (SDW_INTEL_CHECK_OPS(sdw, get_link_count)) + return SDW_INTEL_OPS(sdw, get_link_count)(sdw); + return 4; /* default on older generations */ +} + /* common bus management */ int intel_start_bus(struct sdw_intel *sdw); int intel_start_bus_after_reset(struct sdw_intel *sdw); diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c index 781fe0aefa68ff..fff312c6968dd3 100644 --- a/drivers/soundwire/intel_ace2x.c +++ b/drivers/soundwire/intel_ace2x.c @@ -706,10 +706,30 @@ static void intel_program_sdi(struct sdw_intel *sdw, int dev_num) __func__, sdw->instance, dev_num); } +static int intel_get_link_count(struct sdw_intel *sdw) +{ + int ret; + + ret = hdac_bus_eml_get_count(sdw->link_res->hbus, true, AZX_REG_ML_LEPTR_ID_SDW); + if (!ret) { + dev_err(sdw->cdns.dev, "%s: could not retrieve link count\n", __func__); + return -ENODEV; + } + + if (ret > SDW_INTEL_MAX_LINKS) { + dev_err(sdw->cdns.dev, "%s: link count %d exceed max %d\n", __func__, ret, SDW_INTEL_MAX_LINKS); + return -EINVAL; + } + + return ret; +} + const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = { .debugfs_init = intel_ace2x_debugfs_init, .debugfs_exit = intel_ace2x_debugfs_exit, + .get_link_count = intel_get_link_count, + .register_dai = intel_register_dai, .check_clock_stop = intel_check_clock_stop, diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 8807e01cbf7c7e..ae689d5d1ab9eb 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -317,6 +317,20 @@ static int intel_link_probe(struct auxiliary_device *auxdev, bus->link_id = auxdev->id; bus->clk_stop_timeout = 1; + /* + * paranoia check: make sure ACPI-reported number of links is aligned with + * hardware capabilities. + */ + ret = sdw_intel_get_link_count(sdw); + if (ret < 0) { + dev_err(dev, "%s: sdw_intel_get_link_count failed: %d\n", __func__, ret); + return ret; + } + if (ret <= sdw->instance) { + dev_err(dev, "%s: invalid link id %d, link count %d\n", __func__, auxdev->id, ret); + return -EINVAL; + } + sdw_cdns_probe(cdns); /* Set ops */ @@ -475,6 +489,7 @@ static void intel_link_remove(struct auxiliary_device *auxdev) */ if (!bus->prop.hw_disabled) { sdw_intel_debugfs_exit(sdw); + cancel_delayed_work_sync(&cdns->attach_dwork); sdw_cdns_enable_interrupt(cdns, false); } sdw_bus_master_delete(bus); diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c index df944e11b9caa5..d3ff6c65b64c33 100644 --- a/drivers/soundwire/intel_bus_common.c +++ b/drivers/soundwire/intel_bus_common.c @@ -45,21 +45,24 @@ int intel_start_bus(struct sdw_intel *sdw) return ret; } - ret = sdw_cdns_exit_reset(cdns); + ret = sdw_cdns_enable_interrupt(cdns, true); if (ret < 0) { - dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret); + dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret); return ret; } - ret = sdw_cdns_enable_interrupt(cdns, true); + ret = sdw_cdns_exit_reset(cdns); if (ret < 0) { - dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret); + dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret); return ret; } sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS); + schedule_delayed_work(&cdns->attach_dwork, + msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS)); + return 0; } @@ -136,21 +139,24 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw) return ret; } - ret = sdw_cdns_exit_reset(cdns); + ret = sdw_cdns_enable_interrupt(cdns, true); if (ret < 0) { - dev_err(dev, "unable to exit bus reset sequence during resume\n"); + dev_err(dev, "cannot enable interrupts during resume\n"); return ret; } - ret = sdw_cdns_enable_interrupt(cdns, true); + ret = sdw_cdns_exit_reset(cdns); if (ret < 0) { - dev_err(dev, "cannot enable interrupts during resume\n"); + dev_err(dev, "unable to exit bus reset sequence during resume\n"); return ret; } } sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS); + schedule_delayed_work(&cdns->attach_dwork, + msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS)); + return 0; } @@ -184,6 +190,9 @@ int intel_start_bus_after_clock_stop(struct sdw_intel *sdw) sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS); + schedule_delayed_work(&cdns->attach_dwork, + msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS)); + return 0; } @@ -194,6 +203,8 @@ int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop) bool wake_enable = false; int ret; + cancel_delayed_work_sync(&cdns->attach_dwork); + if (clock_stop) { ret = sdw_cdns_clock_stop(cdns, true); if (ret < 0) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ec1550c698d5f3..82379721740480 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -267,7 +267,7 @@ config SPI_CADENCE_QUADSPI config SPI_CADENCE_XSPI tristate "Cadence XSPI controller" - depends on OF && HAS_IOMEM + depends on OF && HAS_IOMEM && 64BIT depends on SPI_MEM help Enable support for the Cadence XSPI Flash controller. diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 5aaff3bee1b78f..95cdfc28361ef7 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, * If the QSPI controller is set in regular SPI mode, set it in * Serial Memory Mode (SMM). */ - if (aq->mr != QSPI_MR_SMM) { - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); } /* Clear pending interrupts */ @@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi) if (ret < 0) return ret; - aq->scr = QSPI_SCR_SCBR(scbr); + aq->scr &= ~QSPI_SCR_SCBR_MASK; + aq->scr |= QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR); pm_runtime_mark_last_busy(ctrl->dev.parent); @@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) if (ret < 0) return ret; + aq->scr &= ~QSPI_SCR_DLYBS_MASK; aq->scr |= QSPI_SCR_DLYBS(cs_setup); atmel_qspi_write(aq->scr, aq, QSPI_SCR); @@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq) atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); /* Set the QSPI controller by default in Serial Memory Mode */ - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); /* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); @@ -601,20 +603,17 @@ static int atmel_qspi_probe(struct platform_device *pdev) aq->pdev = pdev; /* Map the registers */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base"); - aq->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(aq->regs)) { - dev_err(&pdev->dev, "missing registers\n"); - return PTR_ERR(aq->regs); - } + aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base"); + if (IS_ERR(aq->regs)) + return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs), + "missing registers\n"); /* Map the AHB memory */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap"); aq->mem = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(aq->mem)) { - dev_err(&pdev->dev, "missing AHB memory\n"); - return PTR_ERR(aq->mem); - } + if (IS_ERR(aq->mem)) + return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem), + "missing AHB memory\n"); aq->mmap_size = resource_size(res); @@ -623,17 +622,15 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (IS_ERR(aq->pclk)) aq->pclk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(aq->pclk)) { - dev_err(&pdev->dev, "missing peripheral clock\n"); - return PTR_ERR(aq->pclk); - } + if (IS_ERR(aq->pclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk), + "missing peripheral clock\n"); /* Enable the peripheral clock */ err = clk_prepare_enable(aq->pclk); - if (err) { - dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, + "failed to enable the peripheral clock\n"); aq->caps = of_device_get_match_data(&pdev->dev); if (!aq->caps) { @@ -726,6 +723,7 @@ static void atmel_qspi_remove(struct platform_device *pdev) clk_unprepare(aq->pclk); pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); } diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c index 9d97ec98881cc9..1369691a997bfc 100644 --- a/drivers/spi/spi-airoha-snfi.c +++ b/drivers/spi/spi-airoha-snfi.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include /* SPI */ #define REG_SPI_CTRL_BASE 0x1FA10000 @@ -211,9 +211,6 @@ struct airoha_snand_dev { u8 *txrx_buf; dma_addr_t dma_addr; - - u64 cur_page_num; - bool data_need_update; }; struct airoha_snand_ctrl { @@ -405,7 +402,7 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, for (i = 0; i < len; i += data_len) { int err; - data_len = min(len, SPI_MAX_TRANSFER_SIZE); + data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); if (err) return err; @@ -427,7 +424,7 @@ static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, for (i = 0; i < len; i += data_len) { int err; - data_len = min(len, SPI_MAX_TRANSFER_SIZE); + data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); if (err) return err; @@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, u32 val, rd_mode; int err; - if (!as_dev->data_need_update) - return len; - - as_dev->data_need_update = false; - switch (op->cmd.opcode) { case SPI_NAND_OP_READ_FROM_CACHE_DUAL: rd_mode = 1; @@ -739,8 +731,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, if (err) return err; - err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, - SPI_NFI_READ_FROM_CACHE_DONE); + /* + * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end + * of dirmap_read operation even if it is already set. + */ + err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_READ_FROM_CACHE_DONE, + SPI_NFI_READ_FROM_CACHE_DONE); if (err) return err; @@ -870,8 +867,13 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, if (err) return err; - err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, - SPI_NFI_LOAD_TO_CACHE_DONE); + /* + * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end + * of dirmap_write operation even if it is already set. + */ + err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_LOAD_TO_CACHE_DONE, + SPI_NFI_LOAD_TO_CACHE_DONE); if (err) return err; @@ -885,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, static int airoha_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi); u8 data[8], cmd, opcode = op->cmd.opcode; struct airoha_snand_ctrl *as_ctrl; int i, err; as_ctrl = spi_controller_get_devdata(mem->spi->controller); - if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE && - op->addr.val == as_dev->cur_page_num) { - as_dev->data_need_update = true; - } else if (opcode == SPI_NAND_OP_PAGE_READ) { - if (!as_dev->data_need_update && - op->addr.val == as_dev->cur_page_num) - return 0; - - as_dev->data_need_update = true; - as_dev->cur_page_num = op->addr.val; - } /* switch to manual mode */ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); @@ -986,7 +976,6 @@ static int airoha_snand_setup(struct spi_device *spi) if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr)) return -ENOMEM; - as_dev->data_need_update = true; spi_set_ctldata(spi, as_dev); return 0; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 447e5a962dee86..2dff95d2b3f5d6 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -41,6 +41,7 @@ #define SPI_ENGINE_CONFIG_CPHA BIT(0) #define SPI_ENGINE_CONFIG_CPOL BIT(1) #define SPI_ENGINE_CONFIG_3WIRE BIT(2) +#define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH BIT(3) #define SPI_ENGINE_INST_TRANSFER 0x0 #define SPI_ENGINE_INST_ASSERT 0x1 @@ -137,6 +138,10 @@ static unsigned int spi_engine_get_config(struct spi_device *spi) config |= SPI_ENGINE_CONFIG_CPHA; if (spi->mode & SPI_3WIRE) config |= SPI_ENGINE_CONFIG_3WIRE; + if (spi->mode & SPI_MOSI_IDLE_HIGH) + config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH; + if (spi->mode & SPI_MOSI_IDLE_LOW) + config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH; return config; } @@ -258,7 +263,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, clk_div - 1)); } - if (bits_per_word != xfer->bits_per_word) { + if (bits_per_word != xfer->bits_per_word && xfer->len) { bits_per_word = xfer->bits_per_word; spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS, @@ -692,9 +697,13 @@ static int spi_engine_probe(struct platform_device *pdev) host->num_chipselect = 8; /* Some features depend of the IP core version. */ - if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) { - host->mode_bits |= SPI_CS_HIGH; - host->setup = spi_engine_setup; + if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) { + if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) { + host->mode_bits |= SPI_CS_HIGH; + host->setup = spi_engine_setup; + } + if (ADI_AXI_PCORE_VER_MINOR(version) >= 3) + host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH; } if (host->max_speed_hz == 0) diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 2fb8d4e55c7773..ef3a7226db125c 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -466,6 +466,7 @@ static const struct platform_device_id bcm63xx_spi_dev_match[] = { { }, }; +MODULE_DEVICE_TABLE(platform, bcm63xx_spi_dev_match); static const struct of_device_id bcm63xx_spi_of_match[] = { { .compatible = "brcm,bcm6348-spi", .data = &bcm6348_spi_reg_offsets }, @@ -583,13 +584,15 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); - pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + goto out_clk_disable; /* register and we are done */ ret = devm_spi_register_controller(dev, host); if (ret) { dev_err(dev, "spi register failed\n"); - goto out_pm_disable; + goto out_clk_disable; } dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n", @@ -597,8 +600,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) return 0; -out_pm_disable: - pm_runtime_disable(&pdev->dev); out_clk_disable: clk_disable_unprepare(clk); out_err: diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c index 9f64afd8164ea9..d936104a41ece7 100644 --- a/drivers/spi/spi-bcmbca-hsspi.c +++ b/drivers/spi/spi-bcmbca-hsspi.c @@ -433,7 +433,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) { struct spi_controller *host; struct bcmbca_hsspi *bs; - struct resource *res_mem; void __iomem *spim_ctrl; void __iomem *regs; struct device *dev = &pdev->dev; @@ -445,17 +444,11 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) if (irq < 0) return irq; - res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsspi"); - if (!res_mem) - return -EINVAL; - regs = devm_ioremap_resource(dev, res_mem); + regs = devm_platform_ioremap_resource_byname(pdev, "hsspi"); if (IS_ERR(regs)) return PTR_ERR(regs); - res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spim-ctrl"); - if (!res_mem) - return -EINVAL; - spim_ctrl = devm_ioremap_resource(dev, res_mem); + spim_ctrl = devm_platform_ioremap_resource_byname(pdev, "spim-ctrl"); if (IS_ERR(spim_ctrl)) return PTR_ERR(spim_ctrl); @@ -487,7 +480,7 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) } } - host = spi_alloc_host(&pdev->dev, sizeof(*bs)); + host = devm_spi_alloc_host(&pdev->dev, sizeof(*bs)); if (!host) { ret = -ENOMEM; goto out_disable_pll_clk; @@ -543,15 +536,17 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED, pdev->name, bs); if (ret) - goto out_put_host; + goto out_disable_pll_clk; } - pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + goto out_disable_pll_clk; ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group); if (ret) { dev_err(&pdev->dev, "couldn't register sysfs group\n"); - goto out_pm_disable; + goto out_disable_pll_clk; } /* register and we are done */ @@ -565,10 +560,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) out_sysgroup_disable: sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group); -out_pm_disable: - pm_runtime_disable(&pdev->dev); -out_put_host: - spi_controller_put(host); out_disable_pll_clk: clk_disable_unprepare(pll_clk); out_disable_clk: diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index afb1b1105ec2b0..ebe18f0b5d23e4 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -54,21 +54,28 @@ static unsigned int bitbang_txrx_8(struct spi_device *spi, struct spi_transfer *t, unsigned int flags) { + struct spi_bitbang *bitbang; unsigned int bits = t->bits_per_word; unsigned int count = t->len; const u8 *tx = t->tx_buf; u8 *rx = t->rx_buf; + bitbang = spi_controller_get_devdata(spi->controller); while (likely(count > 0)) { u8 word = 0; if (tx) word = *tx++; + else + word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFF : 0; word = txrx_word(spi, ns, word, bits, flags); if (rx) *rx++ = word; count -= 1; } + if (bitbang->set_mosi_idle) + bitbang->set_mosi_idle(spi); + return t->len - count; } @@ -78,21 +85,28 @@ static unsigned int bitbang_txrx_16(struct spi_device *spi, struct spi_transfer *t, unsigned int flags) { + struct spi_bitbang *bitbang; unsigned int bits = t->bits_per_word; unsigned int count = t->len; const u16 *tx = t->tx_buf; u16 *rx = t->rx_buf; + bitbang = spi_controller_get_devdata(spi->controller); while (likely(count > 1)) { u16 word = 0; if (tx) word = *tx++; + else + word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFFFF : 0; word = txrx_word(spi, ns, word, bits, flags); if (rx) *rx++ = word; count -= 2; } + if (bitbang->set_mosi_idle) + bitbang->set_mosi_idle(spi); + return t->len - count; } @@ -102,21 +116,28 @@ static unsigned int bitbang_txrx_32(struct spi_device *spi, struct spi_transfer *t, unsigned int flags) { + struct spi_bitbang *bitbang; unsigned int bits = t->bits_per_word; unsigned int count = t->len; const u32 *tx = t->tx_buf; u32 *rx = t->rx_buf; + bitbang = spi_controller_get_devdata(spi->controller); while (likely(count > 3)) { u32 word = 0; if (tx) word = *tx++; + else + word = spi->mode & SPI_MOSI_IDLE_HIGH ? 0xFFFFFFFF : 0; word = txrx_word(spi, ns, word, bits, flags); if (rx) *rx++ = word; count -= 4; } + if (bitbang->set_mosi_idle) + bitbang->set_mosi_idle(spi); + return t->len - count; } @@ -192,6 +213,9 @@ int spi_bitbang_setup(struct spi_device *spi) goto err_free; } + if (bitbang->set_mosi_idle) + bitbang->set_mosi_idle(spi); + dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); return 0; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index d4607cb89c4840..1755ca026f08ff 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1662,23 +1662,20 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) unsigned int max_cs = cqspi->num_chipselect - 1; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct cqspi_flash_pdata *f_pdata; unsigned int cs; int ret; /* Get flash device data */ - for_each_available_child_of_node(dev->of_node, np) { + for_each_available_child_of_node_scoped(dev->of_node, np) { ret = of_property_read_u32(np, "reg", &cs); if (ret) { dev_err(dev, "Couldn't determine chip select.\n"); - of_node_put(np); return ret; } if (cs >= cqspi->num_chipselect) { dev_err(dev, "Chip select %d out of range.\n", cs); - of_node_put(np); return -EINVAL; } else if (cs < max_cs) { max_cs = cs; @@ -1689,10 +1686,8 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) f_pdata->cs = cs; ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); - if (ret) { - of_node_put(np); + if (ret) return ret; - } } cqspi->num_chipselect = max_cs + 1; diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index 2e3eacd46b7232..aed98ab1433467 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -2,6 +2,7 @@ // Cadence XSPI flash controller driver // Copyright (C) 2020-21 Cadence +#include #include #include #include @@ -19,6 +20,7 @@ #include #include #include +#include #define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522 #define CDNS_XSPI_MAX_BANKS 8 @@ -193,6 +195,98 @@ ((op)->data.dir == SPI_MEM_DATA_IN) ? \ CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE)) +/* Helper macros for GENERIC and GENERIC-DSEQ instruction type */ +#define CMD_REG_LEN (6*4) +#define INSTRUCTION_TYPE_GENERIC 96 +#define CDNS_XSPI_CMD_FLD_P1_GENERIC_CMD (\ + FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, INSTRUCTION_TYPE_GENERIC)) + +#define GENERIC_NUM_OF_BYTES GENMASK(27, 24) +#define CDNS_XSPI_CMD_FLD_P3_GENERIC_CMD(len) (\ + FIELD_PREP(GENERIC_NUM_OF_BYTES, len)) + +#define GENERIC_BANK_NUM GENMASK(14, 12) +#define GENERIC_GLUE_CMD BIT(28) +#define CDNS_XSPI_CMD_FLD_P4_GENERIC_CMD(cs, glue) (\ + FIELD_PREP(GENERIC_BANK_NUM, cs) | FIELD_PREP(GENERIC_GLUE_CMD, glue)) + +#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_1 (\ + FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)) + +#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_2(nbytes) (\ + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, nbytes & 0xffff)) + +#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_3(nbytes) ( \ + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, (nbytes >> 16) & 0xffff)) + +#define CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_4(dir, chipsel) ( \ + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \ + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, dir)) + +/* Marvell PHY default values */ +#define MARVELL_REGS_DLL_PHY_CTRL 0x00000707 +#define MARVELL_CTB_RFILE_PHY_CTRL 0x00004000 +#define MARVELL_RFILE_PHY_TSEL 0x00000000 +#define MARVELL_RFILE_PHY_DQ_TIMING 0x00000101 +#define MARVELL_RFILE_PHY_DQS_TIMING 0x00700404 +#define MARVELL_RFILE_PHY_GATE_LPBK_CTRL 0x00200030 +#define MARVELL_RFILE_PHY_DLL_MASTER_CTRL 0x00800000 +#define MARVELL_RFILE_PHY_DLL_SLAVE_CTRL 0x0000ff01 + +/* PHY config registers */ +#define CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL 0x1034 +#define CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL 0x0080 +#define CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL 0x0084 +#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING 0x0000 +#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING 0x0004 +#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL 0x0008 +#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL 0x000c +#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL 0x0010 +#define CDNS_XSPI_DATASLICE_RFILE_PHY_DLL_OBS_REG_0 0x001c + +#define CDNS_XSPI_DLL_RST_N BIT(24) +#define CDNS_XSPI_DLL_LOCK BIT(0) + +/* Marvell overlay registers - clock */ +#define MRVL_XSPI_CLK_CTRL_AUX_REG 0x2020 +#define MRVL_XSPI_CLK_ENABLE BIT(0) +#define MRVL_XSPI_CLK_DIV GENMASK(4, 1) +#define MRVL_XSPI_IRQ_ENABLE BIT(6) +#define MRVL_XSPI_CLOCK_IO_HZ 800000000 +#define MRVL_XSPI_CLOCK_DIVIDED(div) ((MRVL_XSPI_CLOCK_IO_HZ) / (div)) +#define MRVL_DEFAULT_CLK 25000000 + +/* Marvell overlay registers - xfer */ +#define MRVL_XFER_FUNC_CTRL 0x210 +#define MRVL_XFER_FUNC_CTRL_READ_DATA(i) (0x000 + 8 * (i)) +#define MRVL_XFER_SOFT_RESET BIT(11) +#define MRVL_XFER_CS_N_HOLD GENMASK(9, 6) +#define MRVL_XFER_RECEIVE_ENABLE BIT(4) +#define MRVL_XFER_FUNC_ENABLE BIT(3) +#define MRVL_XFER_CLK_CAPTURE_POL BIT(2) +#define MRVL_XFER_CLK_DRIVE_POL BIT(1) +#define MRVL_XFER_FUNC_START BIT(0) +#define MRVL_XFER_QWORD_COUNT 32 +#define MRVL_XFER_QWORD_BYTECOUNT 8 + +#define MRVL_XSPI_POLL_TIMEOUT_US 1000 +#define MRVL_XSPI_POLL_DELAY_US 10 + +/* Macros for calculating data bits in generic command + * Up to 10 bytes can be fit into cmd_registers + * least significant is placed in cmd_reg[1] + * Other bits are inserted after it in cmd_reg[1,2,3] register + */ +#define GENERIC_CMD_DATA_REG_3_COUNT(len) (len >= 10 ? 2 : len - 8) +#define GENERIC_CMD_DATA_REG_2_COUNT(len) (len >= 7 ? 3 : len - 4) +#define GENERIC_CMD_DATA_REG_1_COUNT(len) (len >= 3 ? 2 : len - 1) +#define GENERIC_CMD_DATA_3_OFFSET(position) (8*(position)) +#define GENERIC_CMD_DATA_2_OFFSET(position) (8*(position)) +#define GENERIC_CMD_DATA_1_OFFSET(position) (8 + 8*(position)) +#define GENERIC_CMD_DATA_INSERT(data, pos) ((data) << (pos)) +#define GENERIC_CMD_REG_3_NEEDED(len) (len > 7) +#define GENERIC_CMD_REG_2_NEEDED(len) (len > 3) + enum cdns_xspi_stig_instr_type { CDNS_XSPI_STIG_INSTR_TYPE_0, CDNS_XSPI_STIG_INSTR_TYPE_1, @@ -209,6 +303,51 @@ enum cdns_xspi_stig_cmd_dir { CDNS_XSPI_STIG_CMD_DIR_WRITE, }; +struct cdns_xspi_driver_data { + bool mrvl_hw_overlay; + u32 dll_phy_ctrl; + u32 ctb_rfile_phy_ctrl; + u32 rfile_phy_tsel; + u32 rfile_phy_dq_timing; + u32 rfile_phy_dqs_timing; + u32 rfile_phy_gate_lpbk_ctrl; + u32 rfile_phy_dll_master_ctrl; + u32 rfile_phy_dll_slave_ctrl; +}; + +static struct cdns_xspi_driver_data marvell_driver_data = { + .mrvl_hw_overlay = true, + .dll_phy_ctrl = MARVELL_REGS_DLL_PHY_CTRL, + .ctb_rfile_phy_ctrl = MARVELL_CTB_RFILE_PHY_CTRL, + .rfile_phy_tsel = MARVELL_RFILE_PHY_TSEL, + .rfile_phy_dq_timing = MARVELL_RFILE_PHY_DQ_TIMING, + .rfile_phy_dqs_timing = MARVELL_RFILE_PHY_DQS_TIMING, + .rfile_phy_gate_lpbk_ctrl = MARVELL_RFILE_PHY_GATE_LPBK_CTRL, + .rfile_phy_dll_master_ctrl = MARVELL_RFILE_PHY_DLL_MASTER_CTRL, + .rfile_phy_dll_slave_ctrl = MARVELL_RFILE_PHY_DLL_SLAVE_CTRL, +}; + +static struct cdns_xspi_driver_data cdns_driver_data = { + .mrvl_hw_overlay = false, +}; + +static const int cdns_mrvl_xspi_clk_div_list[] = { + 4, //0x0 = Divide by 4. SPI clock is 200 MHz. + 6, //0x1 = Divide by 6. SPI clock is 133.33 MHz. + 8, //0x2 = Divide by 8. SPI clock is 100 MHz. + 10, //0x3 = Divide by 10. SPI clock is 80 MHz. + 12, //0x4 = Divide by 12. SPI clock is 66.666 MHz. + 16, //0x5 = Divide by 16. SPI clock is 50 MHz. + 18, //0x6 = Divide by 18. SPI clock is 44.44 MHz. + 20, //0x7 = Divide by 20. SPI clock is 40 MHz. + 24, //0x8 = Divide by 24. SPI clock is 33.33 MHz. + 32, //0x9 = Divide by 32. SPI clock is 25 MHz. + 40, //0xA = Divide by 40. SPI clock is 20 MHz. + 50, //0xB = Divide by 50. SPI clock is 16 MHz. + 64, //0xC = Divide by 64. SPI clock is 12.5 MHz. + 128 //0xD = Divide by 128. SPI clock is 6.25 MHz. +}; + struct cdns_xspi_dev { struct platform_device *pdev; struct device *dev; @@ -216,6 +355,7 @@ struct cdns_xspi_dev { void __iomem *iobase; void __iomem *auxbase; void __iomem *sdmabase; + void __iomem *xferbase; int irq; int cur_cs; @@ -230,8 +370,102 @@ struct cdns_xspi_dev { const void *out_buffer; u8 hw_num_banks; + + const struct cdns_xspi_driver_data *driver_data; + void (*sdma_handler)(struct cdns_xspi_dev *cdns_xspi); + void (*set_interrupts_handler)(struct cdns_xspi_dev *cdns_xspi, bool enabled); + + bool xfer_in_progress; + int current_xfer_qword; }; +static void cdns_xspi_reset_dll(struct cdns_xspi_dev *cdns_xspi) +{ + u32 dll_cntrl = readl(cdns_xspi->iobase + + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL); + + /* Reset DLL */ + dll_cntrl |= CDNS_XSPI_DLL_RST_N; + writel(dll_cntrl, cdns_xspi->iobase + + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL); +} + +static bool cdns_xspi_is_dll_locked(struct cdns_xspi_dev *cdns_xspi) +{ + u32 dll_lock; + + return !readl_relaxed_poll_timeout(cdns_xspi->iobase + + CDNS_XSPI_INTR_STATUS_REG, + dll_lock, ((dll_lock & CDNS_XSPI_DLL_LOCK) == 1), 10, 10000); +} + +/* Static configuration of PHY */ +static bool cdns_xspi_configure_phy(struct cdns_xspi_dev *cdns_xspi) +{ + writel(cdns_xspi->driver_data->dll_phy_ctrl, + cdns_xspi->iobase + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL); + writel(cdns_xspi->driver_data->ctb_rfile_phy_ctrl, + cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL); + writel(cdns_xspi->driver_data->rfile_phy_tsel, + cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL); + writel(cdns_xspi->driver_data->rfile_phy_dq_timing, + cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING); + writel(cdns_xspi->driver_data->rfile_phy_dqs_timing, + cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING); + writel(cdns_xspi->driver_data->rfile_phy_gate_lpbk_ctrl, + cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL); + writel(cdns_xspi->driver_data->rfile_phy_dll_master_ctrl, + cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL); + writel(cdns_xspi->driver_data->rfile_phy_dll_slave_ctrl, + cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL); + + cdns_xspi_reset_dll(cdns_xspi); + + return cdns_xspi_is_dll_locked(cdns_xspi); +} + +static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi, + int requested_clk) +{ + int i = 0; + int clk_val; + u32 clk_reg; + bool update_clk = false; + + while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) { + clk_val = MRVL_XSPI_CLOCK_DIVIDED( + cdns_mrvl_xspi_clk_div_list[i]); + if (clk_val <= requested_clk) + break; + i++; + } + + dev_dbg(cdns_xspi->dev, "Found clk div: %d, clk val: %d\n", + cdns_mrvl_xspi_clk_div_list[i], + MRVL_XSPI_CLOCK_DIVIDED( + cdns_mrvl_xspi_clk_div_list[i])); + + clk_reg = readl(cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG); + + if (FIELD_GET(MRVL_XSPI_CLK_DIV, clk_reg) != i) { + clk_reg &= ~MRVL_XSPI_CLK_ENABLE; + writel(clk_reg, + cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG); + clk_reg = FIELD_PREP(MRVL_XSPI_CLK_DIV, i); + clk_reg &= ~MRVL_XSPI_CLK_DIV; + clk_reg |= FIELD_PREP(MRVL_XSPI_CLK_DIV, i); + clk_reg |= MRVL_XSPI_CLK_ENABLE; + clk_reg |= MRVL_XSPI_IRQ_ENABLE; + update_clk = true; + } + + if (update_clk) + writel(clk_reg, + cdns_xspi->auxbase + MRVL_XSPI_CLK_CTRL_AUX_REG); + + return update_clk; +} + static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi) { u32 ctrl_stat; @@ -304,6 +538,23 @@ static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi, writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG); } +static void marvell_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi, + bool enabled) +{ + u32 intr_enable; + u32 irq_status; + + irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG); + writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG); + + intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG); + if (enabled) + intr_enable |= CDNS_XSPI_INTR_MASK; + else + intr_enable &= ~CDNS_XSPI_INTR_MASK; + writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG); +} + static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi) { u32 ctrl_ver; @@ -321,7 +572,7 @@ static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi) ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG); cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features); - cdns_xspi_set_interrupts(cdns_xspi, false); + cdns_xspi->set_interrupts_handler(cdns_xspi, false); return 0; } @@ -348,6 +599,78 @@ static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi) } } +static void m_ioreadq(void __iomem *addr, void *buf, int len) +{ + if (IS_ALIGNED((long)buf, 8) && len >= 8) { + u64 full_ops = len / 8; + u64 *buffer = buf; + + len -= full_ops * 8; + buf += full_ops * 8; + + do { + u64 b = readq(addr); + *buffer++ = b; + } while (--full_ops); + } + + + while (len) { + u64 tmp_buf; + + tmp_buf = readq(addr); + memcpy(buf, &tmp_buf, min(len, 8)); + len = len > 8 ? len - 8 : 0; + buf += 8; + } +} + +static void m_iowriteq(void __iomem *addr, const void *buf, int len) +{ + if (IS_ALIGNED((long)buf, 8) && len >= 8) { + u64 full_ops = len / 8; + const u64 *buffer = buf; + + len -= full_ops * 8; + buf += full_ops * 8; + + do { + writeq(*buffer++, addr); + } while (--full_ops); + } + + while (len) { + u64 tmp_buf; + + memcpy(&tmp_buf, buf, min(len, 8)); + writeq(tmp_buf, addr); + len = len > 8 ? len - 8 : 0; + buf += 8; + } +} + +static void marvell_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi) +{ + u32 sdma_size, sdma_trd_info; + u8 sdma_dir; + + sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG); + sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG); + sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info); + + switch (sdma_dir) { + case CDNS_XSPI_SDMA_DIR_READ: + m_ioreadq(cdns_xspi->sdmabase, + cdns_xspi->in_buffer, sdma_size); + break; + + case CDNS_XSPI_SDMA_DIR_WRITE: + m_iowriteq(cdns_xspi->sdmabase, + cdns_xspi->out_buffer, sdma_size); + break; + } +} + static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi, const struct spi_mem_op *op, bool data_phase) @@ -364,7 +687,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi, writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG), cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG); - cdns_xspi_set_interrupts(cdns_xspi, true); + cdns_xspi->set_interrupts_handler(cdns_xspi, true); cdns_xspi->sdma_error = false; memset(cmd_regs, 0, sizeof(cmd_regs)); @@ -396,14 +719,14 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi, wait_for_completion(&cdns_xspi->sdma_complete); if (cdns_xspi->sdma_error) { - cdns_xspi_set_interrupts(cdns_xspi, false); + cdns_xspi->set_interrupts_handler(cdns_xspi, false); return -EIO; } - cdns_xspi_sdma_handle(cdns_xspi); + cdns_xspi->sdma_handler(cdns_xspi); } wait_for_completion(&cdns_xspi->cmd_complete); - cdns_xspi_set_interrupts(cdns_xspi, false); + cdns_xspi->set_interrupts_handler(cdns_xspi, false); cmd_status = cdns_xspi_check_command_status(cdns_xspi); if (cmd_status) @@ -437,6 +760,81 @@ static int cdns_xspi_mem_op_execute(struct spi_mem *mem, return ret; } +static int marvell_xspi_mem_op_execute(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct cdns_xspi_dev *cdns_xspi = + spi_controller_get_devdata(mem->spi->controller); + int ret = 0; + + cdns_mrvl_xspi_setup_clock(cdns_xspi, mem->spi->max_speed_hz); + + ret = cdns_xspi_mem_op(cdns_xspi, mem, op); + + return ret; +} + +#ifdef CONFIG_ACPI +static bool cdns_xspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + const union acpi_object *obj; + struct acpi_device *adev; + + adev = ACPI_COMPANION(&spi->dev); + + if (!acpi_dev_get_property(adev, "spi-tx-bus-width", ACPI_TYPE_INTEGER, + &obj)) { + switch (obj->integer.value) { + case 1: + break; + case 2: + spi->mode |= SPI_TX_DUAL; + break; + case 4: + spi->mode |= SPI_TX_QUAD; + break; + case 8: + spi->mode |= SPI_TX_OCTAL; + break; + default: + dev_warn(&spi->dev, + "spi-tx-bus-width %lld not supported\n", + obj->integer.value); + break; + } + } + + if (!acpi_dev_get_property(adev, "spi-rx-bus-width", ACPI_TYPE_INTEGER, + &obj)) { + switch (obj->integer.value) { + case 1: + break; + case 2: + spi->mode |= SPI_RX_DUAL; + break; + case 4: + spi->mode |= SPI_RX_QUAD; + break; + case 8: + spi->mode |= SPI_RX_OCTAL; + break; + default: + dev_warn(&spi->dev, + "spi-rx-bus-width %lld not supported\n", + obj->integer.value); + break; + } + } + + if (!spi_mem_default_supports_op(mem, op)) + return false; + + return true; +} +#endif + static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op) { struct cdns_xspi_dev *cdns_xspi = @@ -448,10 +846,21 @@ static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op * } static const struct spi_controller_mem_ops cadence_xspi_mem_ops = { +#ifdef CONFIG_ACPI + .supports_op = cdns_xspi_supports_op, +#endif .exec_op = cdns_xspi_mem_op_execute, .adjust_op_size = cdns_xspi_adjust_mem_op_size, }; +static const struct spi_controller_mem_ops marvell_xspi_mem_ops = { +#ifdef CONFIG_ACPI + .supports_op = cdns_xspi_supports_op, +#endif + .exec_op = marvell_xspi_mem_op_execute, + .adjust_op_size = cdns_xspi_adjust_mem_op_size, +}; + static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev) { struct cdns_xspi_dev *cdns_xspi = dev; @@ -495,15 +904,20 @@ static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev) static int cdns_xspi_of_get_plat_data(struct platform_device *pdev) { - struct device_node *node_prop = pdev->dev.of_node; + struct fwnode_handle *fwnode_child; unsigned int cs; - for_each_available_child_of_node_scoped(node_prop, node_child) { - if (of_property_read_u32(node_child, "reg", &cs)) { + device_for_each_child_node(&pdev->dev, fwnode_child) { + if (!fwnode_device_is_available(fwnode_child)) + continue; + + if (fwnode_property_read_u32(fwnode_child, "reg", &cs)) { dev_err(&pdev->dev, "Couldn't get memory chip select\n"); + fwnode_handle_put(fwnode_child); return -ENXIO; } else if (cs >= CDNS_XSPI_MAX_BANKS) { dev_err(&pdev->dev, "reg (cs) parameter value too large\n"); + fwnode_handle_put(fwnode_child); return -ENXIO; } } @@ -528,6 +942,204 @@ static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi) readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL)); } +static int cdns_xspi_prepare_generic(int cs, const void *dout, int len, int glue, u32 *cmd_regs) +{ + u8 *data = (u8 *)dout; + int i; + int data_counter = 0; + + memset(cmd_regs, 0x00, CMD_REG_LEN); + + if (GENERIC_CMD_REG_3_NEEDED(len)) { + for (i = GENERIC_CMD_DATA_REG_3_COUNT(len); i >= 0 ; i--) + cmd_regs[3] |= GENERIC_CMD_DATA_INSERT(data[data_counter++], + GENERIC_CMD_DATA_3_OFFSET(i)); + } + if (GENERIC_CMD_REG_2_NEEDED(len)) { + for (i = GENERIC_CMD_DATA_REG_2_COUNT(len); i >= 0; i--) + cmd_regs[2] |= GENERIC_CMD_DATA_INSERT(data[data_counter++], + GENERIC_CMD_DATA_2_OFFSET(i)); + } + for (i = GENERIC_CMD_DATA_REG_1_COUNT(len); i >= 0 ; i--) + cmd_regs[1] |= GENERIC_CMD_DATA_INSERT(data[data_counter++], + GENERIC_CMD_DATA_1_OFFSET(i)); + + cmd_regs[1] |= CDNS_XSPI_CMD_FLD_P1_GENERIC_CMD; + cmd_regs[3] |= CDNS_XSPI_CMD_FLD_P3_GENERIC_CMD(len); + cmd_regs[4] |= CDNS_XSPI_CMD_FLD_P4_GENERIC_CMD(cs, glue); + + return 0; +} + +static void marvell_xspi_read_single_qword(struct cdns_xspi_dev *cdns_xspi, u8 **buffer) +{ + u64 d = readq(cdns_xspi->xferbase + + MRVL_XFER_FUNC_CTRL_READ_DATA(cdns_xspi->current_xfer_qword)); + u8 *ptr = (u8 *)&d; + int k; + + for (k = 0; k < 8; k++) { + u8 val = bitrev8((ptr[k])); + **buffer = val; + *buffer = *buffer + 1; + } + + cdns_xspi->current_xfer_qword++; + cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT; +} + +static void cdns_xspi_finish_read(struct cdns_xspi_dev *cdns_xspi, u8 **buffer, u32 data_count) +{ + u64 d = readq(cdns_xspi->xferbase + + MRVL_XFER_FUNC_CTRL_READ_DATA(cdns_xspi->current_xfer_qword)); + u8 *ptr = (u8 *)&d; + int k; + + for (k = 0; k < data_count % MRVL_XFER_QWORD_BYTECOUNT; k++) { + u8 val = bitrev8((ptr[k])); + **buffer = val; + *buffer = *buffer + 1; + } + + cdns_xspi->current_xfer_qword++; + cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT; +} + +static int cdns_xspi_prepare_transfer(int cs, int dir, int len, u32 *cmd_regs) +{ + memset(cmd_regs, 0x00, CMD_REG_LEN); + + cmd_regs[1] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_1; + cmd_regs[2] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_2(len); + cmd_regs[4] |= CDNS_XSPI_CMD_FLD_GENERIC_DSEQ_CMD_4(dir, cs); + + return 0; +} + +static bool cdns_xspi_is_stig_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep) +{ + u32 ctrl_stat; + + return !readl_relaxed_poll_timeout + (cdns_xspi->iobase + CDNS_XSPI_CTRL_STATUS_REG, + ctrl_stat, + ((ctrl_stat & BIT(3)) == 0), + sleep ? MRVL_XSPI_POLL_DELAY_US : 0, + sleep ? MRVL_XSPI_POLL_TIMEOUT_US : 0); +} + +static bool cdns_xspi_is_sdma_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep) +{ + u32 ctrl_stat; + + return !readl_relaxed_poll_timeout + (cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG, + ctrl_stat, + (ctrl_stat & CDNS_XSPI_SDMA_TRIGGER), + sleep ? MRVL_XSPI_POLL_DELAY_US : 0, + sleep ? MRVL_XSPI_POLL_TIMEOUT_US : 0); +} + +static int cdns_xspi_transfer_one_message_b0(struct spi_controller *controller, + struct spi_message *m) +{ + struct cdns_xspi_dev *cdns_xspi = spi_controller_get_devdata(controller); + struct spi_device *spi = m->spi; + struct spi_transfer *t = NULL; + + const unsigned int max_len = MRVL_XFER_QWORD_BYTECOUNT * MRVL_XFER_QWORD_COUNT; + int current_transfer_len; + int cs = spi_get_chipselect(spi, 0); + int cs_change = 0; + + /* Enable xfer state machine */ + if (!cdns_xspi->xfer_in_progress) { + u32 xfer_control = readl(cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL); + + cdns_xspi->current_xfer_qword = 0; + cdns_xspi->xfer_in_progress = true; + xfer_control |= (MRVL_XFER_RECEIVE_ENABLE | + MRVL_XFER_CLK_CAPTURE_POL | + MRVL_XFER_FUNC_START | + MRVL_XFER_SOFT_RESET | + FIELD_PREP(MRVL_XFER_CS_N_HOLD, (1 << cs))); + xfer_control &= ~(MRVL_XFER_FUNC_ENABLE | MRVL_XFER_CLK_DRIVE_POL); + writel(xfer_control, cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL); + } + + list_for_each_entry(t, &m->transfers, transfer_list) { + u8 *txd = (u8 *) t->tx_buf; + u8 *rxd = (u8 *) t->rx_buf; + u8 data[10]; + u32 cmd_regs[6]; + + if (!txd) + txd = data; + + cdns_xspi->in_buffer = txd + 1; + cdns_xspi->out_buffer = txd + 1; + + while (t->len) { + + current_transfer_len = min(max_len, t->len); + + if (current_transfer_len < 10) { + cdns_xspi_prepare_generic(cs, txd, current_transfer_len, + false, cmd_regs); + cdns_xspi_trigger_command(cdns_xspi, cmd_regs); + if (!cdns_xspi_is_stig_ready(cdns_xspi, true)) + return -EIO; + } else { + cdns_xspi_prepare_generic(cs, txd, 1, true, cmd_regs); + cdns_xspi_trigger_command(cdns_xspi, cmd_regs); + cdns_xspi_prepare_transfer(cs, 1, current_transfer_len - 1, + cmd_regs); + cdns_xspi_trigger_command(cdns_xspi, cmd_regs); + if (!cdns_xspi_is_sdma_ready(cdns_xspi, true)) + return -EIO; + cdns_xspi->sdma_handler(cdns_xspi); + if (!cdns_xspi_is_stig_ready(cdns_xspi, true)) + return -EIO; + + cdns_xspi->in_buffer += current_transfer_len; + cdns_xspi->out_buffer += current_transfer_len; + } + + if (rxd) { + int j; + + for (j = 0; j < current_transfer_len / 8; j++) + marvell_xspi_read_single_qword(cdns_xspi, &rxd); + cdns_xspi_finish_read(cdns_xspi, &rxd, current_transfer_len); + } else { + cdns_xspi->current_xfer_qword += current_transfer_len / + MRVL_XFER_QWORD_BYTECOUNT; + if (current_transfer_len % MRVL_XFER_QWORD_BYTECOUNT) + cdns_xspi->current_xfer_qword++; + + cdns_xspi->current_xfer_qword %= MRVL_XFER_QWORD_COUNT; + } + cs_change = t->cs_change; + t->len -= current_transfer_len; + } + spi_transfer_delay_exec(t); + } + + if (!cs_change) { + u32 xfer_control = readl(cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL); + + xfer_control &= ~(MRVL_XFER_RECEIVE_ENABLE | + MRVL_XFER_SOFT_RESET); + writel(xfer_control, cdns_xspi->xferbase + MRVL_XFER_FUNC_CTRL); + cdns_xspi->xfer_in_progress = false; + } + + m->status = 0; + spi_finalize_current_message(controller); + + return 0; +} + static int cdns_xspi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -544,13 +1156,29 @@ static int cdns_xspi_probe(struct platform_device *pdev) SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL | SPI_MODE_0 | SPI_MODE_3; - host->mem_ops = &cadence_xspi_mem_ops; + cdns_xspi = spi_controller_get_devdata(host); + cdns_xspi->driver_data = of_device_get_match_data(dev); + if (!cdns_xspi->driver_data) { + cdns_xspi->driver_data = acpi_device_get_match_data(dev); + if (!cdns_xspi->driver_data) + return -ENODEV; + } + + if (cdns_xspi->driver_data->mrvl_hw_overlay) { + host->mem_ops = &marvell_xspi_mem_ops; + host->transfer_one_message = cdns_xspi_transfer_one_message_b0; + cdns_xspi->sdma_handler = &marvell_xspi_sdma_handle; + cdns_xspi->set_interrupts_handler = &marvell_xspi_set_interrupts; + } else { + host->mem_ops = &cadence_xspi_mem_ops; + cdns_xspi->sdma_handler = &cdns_xspi_sdma_handle; + cdns_xspi->set_interrupts_handler = &cdns_xspi_set_interrupts; + } host->dev.of_node = pdev->dev.of_node; host->bus_num = -1; platform_set_drvdata(pdev, host); - cdns_xspi = spi_controller_get_devdata(host); cdns_xspi->pdev = pdev; cdns_xspi->dev = &pdev->dev; cdns_xspi->cur_cs = 0; @@ -565,20 +1193,42 @@ static int cdns_xspi_probe(struct platform_device *pdev) cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io"); if (IS_ERR(cdns_xspi->iobase)) { - dev_err(dev, "Failed to remap controller base address\n"); - return PTR_ERR(cdns_xspi->iobase); + cdns_xspi->iobase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cdns_xspi->iobase)) { + dev_err(dev, "Failed to remap controller base address\n"); + return PTR_ERR(cdns_xspi->iobase); + } } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma"); cdns_xspi->sdmabase = devm_ioremap_resource(dev, res); - if (IS_ERR(cdns_xspi->sdmabase)) - return PTR_ERR(cdns_xspi->sdmabase); + if (IS_ERR(cdns_xspi->sdmabase)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cdns_xspi->sdmabase = devm_ioremap_resource(dev, res); + if (IS_ERR(cdns_xspi->sdmabase)) + return PTR_ERR(cdns_xspi->sdmabase); + } cdns_xspi->sdmasize = resource_size(res); cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux"); if (IS_ERR(cdns_xspi->auxbase)) { - dev_err(dev, "Failed to remap AUX address\n"); - return PTR_ERR(cdns_xspi->auxbase); + cdns_xspi->auxbase = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(cdns_xspi->auxbase)) { + dev_err(dev, "Failed to remap AUX address\n"); + return PTR_ERR(cdns_xspi->auxbase); + } + } + + if (cdns_xspi->driver_data->mrvl_hw_overlay) { + cdns_xspi->xferbase = devm_platform_ioremap_resource_byname(pdev, "xfer"); + if (IS_ERR(cdns_xspi->xferbase)) { + cdns_xspi->xferbase = devm_platform_ioremap_resource(pdev, 3); + if (IS_ERR(cdns_xspi->xferbase)) { + dev_info(dev, "XFER register base not found, set it\n"); + // For compatibility with older firmware + cdns_xspi->xferbase = cdns_xspi->iobase + 0x8000; + } + } } cdns_xspi->irq = platform_get_irq(pdev, 0); @@ -592,6 +1242,11 @@ static int cdns_xspi_probe(struct platform_device *pdev) return ret; } + if (cdns_xspi->driver_data->mrvl_hw_overlay) { + cdns_mrvl_xspi_setup_clock(cdns_xspi, MRVL_DEFAULT_CLK); + cdns_xspi_configure_phy(cdns_xspi); + } + cdns_xspi_print_phy_config(cdns_xspi); ret = cdns_xspi_controller_init(cdns_xspi); @@ -616,6 +1271,11 @@ static int cdns_xspi_probe(struct platform_device *pdev) static const struct of_device_id cdns_xspi_of_match[] = { { .compatible = "cdns,xspi-nor", + .data = &cdns_driver_data, + }, + { + .compatible = "marvell,cn10-xspi-nor", + .data = &marvell_driver_data, }, { /* end of table */} }; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index e07e081de5ea41..3c87d2bf786a9e 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -678,8 +678,8 @@ static int cdns_spi_probe(struct platform_device *pdev) clk_dis_all: if (!spi_controller_is_target(ctlr)) { - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); } remove_ctlr: spi_controller_put(ctlr); @@ -701,8 +701,10 @@ static void cdns_spi_remove(struct platform_device *pdev) cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); - pm_runtime_set_suspended(&pdev->dev); - pm_runtime_disable(&pdev->dev); + if (!spi_controller_is_target(ctlr)) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + } spi_unregister_controller(ctlr); } diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index f7e8b5efa50e56..ad26c8409733ca 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -570,6 +570,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) u32 errors = 0; struct davinci_spi_config *spicfg; struct davinci_spi_platform_data *pdata; + unsigned long timeout; dspi = spi_controller_get_devdata(spi->controller); pdata = &dspi->pdata; @@ -661,7 +662,12 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) /* Wait for the transfer to complete */ if (spicfg->io_type != SPI_IO_TYPE_POLL) { - if (wait_for_completion_timeout(&dspi->done, HZ) == 0) + timeout = DIV_ROUND_UP(t->speed_hz, MSEC_PER_SEC); + timeout = DIV_ROUND_UP(t->len * 8, timeout); + /* Assume we are at most 2x slower than the nominal bus speed */ + timeout = 2 * msecs_to_jiffies(timeout); + + if (wait_for_completion_timeout(&dspi->done, timeout) == 0) errors = SPIFLG_TIMEOUT_MASK; } else { while (dspi->rcount > 0 || dspi->wcount > 0) { diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c index d319dc357fef07..4ba1d9245c9fd1 100644 --- a/drivers/spi/spi-dln2.c +++ b/drivers/spi/spi-dln2.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #define DLN2_SPI_MODULE_ID 0x02 #define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID) diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index a1d60e51c053e4..dc6bdc74643d3a 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -18,18 +18,18 @@ #include #include #include +#include +#include #include #include #include #include +#include #include #include #include #include -#include -#include - #define SSPCR0 0x0000 #define SSPCR0_SPO BIT(6) #define SSPCR0_SPH BIT(7) @@ -76,8 +76,6 @@ * frame decreases this level and sending one frame increases it. * @dma_rx: RX DMA channel * @dma_tx: TX DMA channel - * @dma_rx_data: RX parameters passed to the DMA engine - * @dma_tx_data: TX parameters passed to the DMA engine * @rx_sgt: sg table for RX transfers * @tx_sgt: sg table for TX transfers * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by @@ -92,8 +90,6 @@ struct ep93xx_spi { size_t fifo_level; struct dma_chan *dma_rx; struct dma_chan *dma_tx; - struct ep93xx_dma_data dma_rx_data; - struct ep93xx_dma_data dma_tx_data; struct sg_table rx_sgt; struct sg_table tx_sgt; void *zeropage; @@ -575,46 +571,23 @@ static int ep93xx_spi_unprepare_hardware(struct spi_controller *host) return 0; } -static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) +static int ep93xx_spi_setup_dma(struct device *dev, struct ep93xx_spi *espi) { - if (ep93xx_dma_chan_is_m2p(chan)) - return false; - - chan->private = filter_param; - return true; -} - -static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) -{ - dma_cap_mask_t mask; int ret; espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); if (!espi->zeropage) return -ENOMEM; - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - espi->dma_rx_data.port = EP93XX_DMA_SSP; - espi->dma_rx_data.direction = DMA_DEV_TO_MEM; - espi->dma_rx_data.name = "ep93xx-spi-rx"; - - espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, - &espi->dma_rx_data); - if (!espi->dma_rx) { - ret = -ENODEV; + espi->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(espi->dma_rx)) { + ret = dev_err_probe(dev, PTR_ERR(espi->dma_rx), "rx DMA setup failed"); goto fail_free_page; } - espi->dma_tx_data.port = EP93XX_DMA_SSP; - espi->dma_tx_data.direction = DMA_MEM_TO_DEV; - espi->dma_tx_data.name = "ep93xx-spi-tx"; - - espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, - &espi->dma_tx_data); - if (!espi->dma_tx) { - ret = -ENODEV; + espi->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(espi->dma_tx)) { + ret = dev_err_probe(dev, PTR_ERR(espi->dma_tx), "tx DMA setup failed"); goto fail_release_rx; } @@ -647,18 +620,11 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) static int ep93xx_spi_probe(struct platform_device *pdev) { struct spi_controller *host; - struct ep93xx_spi_info *info; struct ep93xx_spi *espi; struct resource *res; int irq; int error; - info = dev_get_platdata(&pdev->dev); - if (!info) { - dev_err(&pdev->dev, "missing platform data\n"); - return -EINVAL; - } - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -713,12 +679,17 @@ static int ep93xx_spi_probe(struct platform_device *pdev) goto fail_release_host; } - if (info->use_dma && ep93xx_spi_setup_dma(espi)) + error = ep93xx_spi_setup_dma(&pdev->dev, espi); + if (error == -EPROBE_DEFER) + goto fail_release_host; + + if (error) dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); /* make sure that the hardware is disabled */ writel(0, espi->mmio + SSPCR1); + device_set_node(&host->dev, dev_fwnode(&pdev->dev)); error = devm_spi_register_controller(&pdev->dev, host); if (error) { dev_err(&pdev->dev, "failed to register SPI host\n"); @@ -746,9 +717,16 @@ static void ep93xx_spi_remove(struct platform_device *pdev) ep93xx_spi_release_dma(espi); } +static const struct of_device_id ep93xx_spi_of_ids[] = { + { .compatible = "cirrus,ep9301-spi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ep93xx_spi_of_ids); + static struct platform_driver ep93xx_spi_driver = { .driver = { .name = "ep93xx-spi", + .of_match_table = ep93xx_spi_of_ids, }, .probe = ep93xx_spi_probe, .remove_new = ep93xx_spi_remove, diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 8ecb426be45c75..977e8b55c82b7d 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -986,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev) fsl_lpspi_dma_exit(controller); + pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); pm_runtime_disable(fsl_lpspi->dev); } diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 6f4057330444d5..f6e40f90418f42 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -604,6 +604,21 @@ static int spi_geni_prepare_message(struct spi_controller *spi, return -EINVAL; } +static void spi_geni_release_dma_chan(void *data) +{ + struct spi_geni_master *mas = data; + + if (mas->rx) { + dma_release_channel(mas->rx); + mas->rx = NULL; + } + + if (mas->tx) { + dma_release_channel(mas->tx); + mas->tx = NULL; + } +} + static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) { int ret; @@ -622,6 +637,12 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) goto err_rx; } + ret = devm_add_action_or_reset(mas->dev, spi_geni_release_dma_chan, mas); + if (ret) { + dev_err(mas->dev, "Unable to add action.\n"); + return ret; + } + return 0; err_rx: @@ -632,19 +653,6 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) return ret; } -static void spi_geni_release_dma_chan(struct spi_geni_master *mas) -{ - if (mas->rx) { - dma_release_channel(mas->rx); - mas->rx = NULL; - } - - if (mas->tx) { - dma_release_channel(mas->tx); - mas->tx = NULL; - } -} - static int spi_geni_init(struct spi_geni_master *mas) { struct spi_controller *spi = dev_get_drvdata(mas->dev); @@ -1146,33 +1154,11 @@ static int spi_geni_probe(struct platform_device *pdev) if (mas->cur_xfer_mode == GENI_GPI_DMA) spi->flags = SPI_CONTROLLER_MUST_TX; - ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi); - if (ret) - goto spi_geni_release_dma; - - ret = spi_register_controller(spi); + ret = devm_request_irq(dev, mas->irq, geni_spi_isr, 0, dev_name(dev), spi); if (ret) - goto spi_geni_probe_free_irq; - - return 0; -spi_geni_probe_free_irq: - free_irq(mas->irq, spi); -spi_geni_release_dma: - spi_geni_release_dma_chan(mas); - return ret; -} - -static void spi_geni_remove(struct platform_device *pdev) -{ - struct spi_controller *spi = platform_get_drvdata(pdev); - struct spi_geni_master *mas = spi_controller_get_devdata(spi); - - /* Unregister _before_ disabling pm_runtime() so we stop transfers */ - spi_unregister_controller(spi); - - free_irq(mas->irq, spi); + return ret; - spi_geni_release_dma_chan(mas); + return devm_spi_register_controller(dev, spi); } static int __maybe_unused spi_geni_runtime_suspend(struct device *dev) @@ -1254,7 +1240,6 @@ MODULE_DEVICE_TABLE(of, spi_geni_dt_match); static struct platform_driver spi_geni_driver = { .probe = spi_geni_probe, - .remove_new = spi_geni_remove, .driver = { .name = "geni_spi", .pm = &spi_geni_pm_ops, diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 36c587be9e2880..4f192e013cd6f2 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -236,6 +236,14 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active) } } +static void spi_gpio_set_mosi_idle(struct spi_device *spi) +{ + struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); + + gpiod_set_value_cansleep(spi_gpio->mosi, + !!(spi->mode & SPI_MOSI_IDLE_HIGH)); +} + static int spi_gpio_setup(struct spi_device *spi) { struct gpio_desc *cs; @@ -389,7 +397,8 @@ static int spi_gpio_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); host->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL | - SPI_CS_HIGH | SPI_LSB_FIRST; + SPI_CS_HIGH | SPI_LSB_FIRST | SPI_MOSI_IDLE_LOW | + SPI_MOSI_IDLE_HIGH; if (!spi_gpio->mosi) { /* HW configuration without MOSI pin * @@ -414,6 +423,7 @@ static int spi_gpio_probe(struct platform_device *pdev) host->flags |= SPI_CONTROLLER_GPIO_SS; bb->chipselect = spi_gpio_chipselect; bb->set_line_direction = spi_gpio_set_direction; + bb->set_mosi_idle = spi_gpio_set_mosi_idle; if (host->flags & SPI_CONTROLLER_NO_TX) { bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 85bd1a82a34eb4..4c31d36f3130a9 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1865,8 +1865,8 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx_sdma_exit(spi_imx); out_runtime_pm_put: pm_runtime_dont_use_autosuspend(spi_imx->dev); - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(spi_imx->dev); + pm_runtime_set_suspended(&pdev->dev); clk_disable_unprepare(spi_imx->clk_ipg); out_put_per: diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 8838a98b04c234..1d05590a743469 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -655,8 +655,8 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc) } init.num_parents = 1; - pow2_fixed_div->mult = 1, - pow2_fixed_div->div = 4, + pow2_fixed_div->mult = 1; + pow2_fixed_div->div = 4; pow2_fixed_div->hw.init = &init; clk = devm_clk_register(dev, &pow2_fixed_div->hw); @@ -674,9 +674,9 @@ static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc) parent_data[0].hw = &pow2_fixed_div->hw; init.num_parents = 1; - spicc->pow2_div.shift = 16, - spicc->pow2_div.width = 3, - spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO, + spicc->pow2_div.shift = 16; + spicc->pow2_div.width = 3; + spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO; spicc->pow2_div.reg = spicc->base + SPICC_CONREG; spicc->pow2_div.hw.init = &init; @@ -721,8 +721,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc) } init.num_parents = 1; - enh_fixed_div->mult = 1, - enh_fixed_div->div = 2, + enh_fixed_div->mult = 1; + enh_fixed_div->div = 2; enh_fixed_div->hw.init = &init; clk = devm_clk_register(dev, &enh_fixed_div->hw); @@ -740,8 +740,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc) parent_data[0].hw = &enh_fixed_div->hw; init.num_parents = 1; - enh_div->shift = 16, - enh_div->width = 8, + enh_div->shift = 16; + enh_div->width = 8; enh_div->reg = spicc->base + SPICC_ENH_CTL0; enh_div->hw.init = &init; @@ -761,8 +761,8 @@ static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc) init.num_parents = 2; init.flags = CLK_SET_RATE_PARENT; - mux->mask = 0x1, - mux->shift = 24, + mux->mask = 0x1; + mux->shift = 24; mux->reg = spicc->base + SPICC_ENH_CTL0; mux->hw.init = &init; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 36c2f52cd6b846..dfee244fc3173c 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -743,25 +743,13 @@ static int mtk_spi_setup(struct spi_device *spi) return 0; } -static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) +static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id) { u32 cmd, reg_val, cnt, remainder, len; struct spi_controller *host = dev_id; struct mtk_spi *mdata = spi_controller_get_devdata(host); struct spi_transfer *xfer = mdata->cur_transfer; - reg_val = readl(mdata->base + SPI_STATUS0_REG); - if (reg_val & MTK_SPI_PAUSE_INT_STATUS) - mdata->state = MTK_SPI_PAUSED; - else - mdata->state = MTK_SPI_IDLE; - - /* SPI-MEM ops */ - if (mdata->use_spimem) { - complete(&mdata->spimem_done); - return IRQ_HANDLED; - } - if (!host->can_dma(host, NULL, xfer)) { if (xfer->rx_buf) { cnt = mdata->xfer_len / 4; @@ -845,6 +833,27 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) +{ + struct spi_controller *host = dev_id; + struct mtk_spi *mdata = spi_controller_get_devdata(host); + u32 reg_val; + + reg_val = readl(mdata->base + SPI_STATUS0_REG); + if (reg_val & MTK_SPI_PAUSE_INT_STATUS) + mdata->state = MTK_SPI_PAUSED; + else + mdata->state = MTK_SPI_IDLE; + + /* SPI-MEM ops */ + if (mdata->use_spimem) { + complete(&mdata->spimem_done); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { @@ -1255,8 +1264,9 @@ static int mtk_spi_probe(struct platform_device *pdev) dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n", addr_bits, ret); - ret = devm_request_irq(dev, irq, mtk_spi_interrupt, - IRQF_TRIGGER_NONE, dev_name(dev), host); + ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt, + mtk_spi_interrupt_thread, + IRQF_TRIGGER_NONE, dev_name(dev), host); if (ret) return dev_err_probe(dev, ret, "failed to register irq\n"); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 88cbe4f00cc3b1..3e341d1ff3b63b 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -477,7 +477,7 @@ static int mxs_spi_runtime_resume(struct device *dev) return ret; } -static int __maybe_unused mxs_spi_suspend(struct device *dev) +static int mxs_spi_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; @@ -492,7 +492,7 @@ static int __maybe_unused mxs_spi_suspend(struct device *dev) return 0; } -static int __maybe_unused mxs_spi_resume(struct device *dev) +static int mxs_spi_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; @@ -512,9 +512,8 @@ static int __maybe_unused mxs_spi_resume(struct device *dev) } static const struct dev_pm_ops mxs_spi_pm = { - SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend, - mxs_spi_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume) + RUNTIME_PM_OPS(mxs_spi_runtime_suspend, mxs_spi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume) }; static const struct of_device_id mxs_spi_dt_ids[] = { @@ -662,7 +661,7 @@ static struct platform_driver mxs_spi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = mxs_spi_dt_ids, - .pm = &mxs_spi_pm, + .pm = pm_ptr(&mxs_spi_pm), }, }; diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index a7feb20b06eee1..30aa37b0c3b82f 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 6585b19a48662d..5a1e55a01c5210 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -57,13 +57,6 @@ #include #include -/* - * The driver only uses one single LUT entry, that is updated on - * each call of exec_op(). Index 0 is preset at boot with a basic - * read operation, so let's use the last entry (31). - */ -#define SEQID_LUT 31 - /* Registers used by the driver */ #define FSPI_MCR0 0x00 #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) @@ -263,9 +256,6 @@ #define FSPI_TFDR 0x180 #define FSPI_LUT_BASE 0x200 -#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) -#define FSPI_LUT_REG(idx) \ - (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4) /* register map end */ @@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data { unsigned int txfifo; unsigned int ahb_buf_size; unsigned int quirks; + unsigned int lut_num; bool little_endian; }; @@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ }; @@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ }; @@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ }; @@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = FSPI_QUIRK_USE_IP_ONLY, + .lut_num = 32, + .little_endian = true, /* little-endian */ +}; + +static struct nxp_fspi_devtype_data imx8ulp_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .lut_num = 16, .little_endian = true, /* little-endian */ }; @@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, void __iomem *base = f->iobase; u32 lutval[4] = {}; int lutidx = 1, i; + u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4; + u32 target_lut_reg; /* cmd */ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), @@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR); /* fill LUT */ - for (i = 0; i < ARRAY_SIZE(lutval); i++) - fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + for (i = 0; i < ARRAY_SIZE(lutval); i++) { + target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4; + fspi_writel(f, lutval[i], base + target_lut_reg); + } dev_dbg(f->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x], size: 0x%08x\n", op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes); @@ -756,8 +764,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) iounmap(f->ahb_addr); f->memmap_start = start; - f->memmap_len = len > NXP_FSPI_MIN_IOMAP ? - len : NXP_FSPI_MIN_IOMAP; + f->memmap_len = max_t(u32, len, NXP_FSPI_MIN_IOMAP); f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start, f->memmap_len); @@ -876,7 +883,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) void __iomem *base = f->iobase; int seqnum = 0; int err = 0; - u32 reg; + u32 reg, seqid_lut; reg = fspi_readl(f, base + FSPI_IPRXFCR); /* invalid RXFIFO first */ @@ -892,8 +899,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) * the LUT at each exec_op() call. And also specify the DATA * length, since it's has not been specified in the LUT. */ + seqid_lut = f->devtype_data->lut_num - 1; fspi_writel(f, op->data.nbytes | - (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) | (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), base + FSPI_IPCR1); @@ -1017,7 +1025,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) { void __iomem *base = f->iobase; int ret, i; - u32 reg; + u32 reg, seqid_lut; /* disable and unprepare clock to avoid glitch pass to controller */ nxp_fspi_clk_disable_unprep(f); @@ -1092,11 +1100,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) fspi_writel(f, reg, base + FSPI_FLSHB1CR1); fspi_writel(f, reg, base + FSPI_FLSHB2CR1); + /* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry. + */ + seqid_lut = f->devtype_data->lut_num - 1; /* AHB Read - Set lut sequence ID for all CS. */ - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2); f->selected = -1; @@ -1291,6 +1305,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, }, { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, }, + { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index eee9ff4bfa5b57..4730e4ba890101 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #define DRIVER_NAME "orion_spi" diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index 942c3117ab3a90..4a64ea0f596f9a 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -20,23 +20,21 @@ * during SPI transfers by setting max_speed_hz via the device tree. */ -#include -#include -#include +#include #include -#include -#include +#include +#include +#include #include -#include #include -#include -#include #include +#include +#include +#include #include #include -#include #include #include @@ -412,7 +410,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op) } /* Request IRQ */ - hw->irqnum = irq_of_parse_and_map(np, 0); + ret = platform_get_irq(op, 0); + if (ret < 0) + goto free_host; + hw->irqnum = ret; + ret = request_irq(hw->irqnum, spi_ppc4xx_int, 0, "spi_ppc4xx_of", (void *)hw); if (ret) { diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index d3f07fd719bdb7..c24dad51a0e966 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -14,7 +14,7 @@ #include -#include +#include static void rpcif_spi_mem_prepare(struct spi_device *spi_dev, const struct spi_mem_op *spi_op, @@ -198,9 +198,16 @@ static int __maybe_unused rpcif_spi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume); +static const struct platform_device_id rpc_if_spi_id_table[] = { + { .name = "rpc-if-spi" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, rpc_if_spi_id_table); + static struct platform_driver rpcif_spi_driver = { .probe = rpcif_spi_probe, .remove_new = rpcif_spi_remove, + .id_table = rpc_if_spi_id_table, .driver = { .name = "rpc-if-spi", #ifdef CONFIG_PM_SLEEP diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 833c58c88e4086..8c9e5e97041f9c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -245,7 +245,7 @@ static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd) loops = msecs_to_loops(1); do { val = readl(regs + S3C64XX_SPI_STATUS); - } while (TX_FIFO_LVL(val, sdd) && loops--); + } while (TX_FIFO_LVL(val, sdd) && --loops); if (loops == 0) dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); @@ -258,7 +258,7 @@ static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd) readl(regs + S3C64XX_SPI_RX_DATA); else break; - } while (loops--); + } while (--loops); if (loops == 0) dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); @@ -1637,6 +1637,7 @@ static const struct platform_device_id s3c64xx_spi_driver_ids[] = { }, { }, }; +MODULE_DEVICE_TABLE(platform, s3c64xx_spi_driver_ids); static const struct of_device_id s3c64xx_spi_dt_match[] = { { .compatible = "google,gs101-spi", diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 6f12e4fb2e2e18..3519656515ea12 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -27,7 +27,7 @@ #include #include -#include +#include #define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0) diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c index f1ddf4c099a341..4a91b7bae3c68a 100644 --- a/drivers/spi/spi-slave-mt27xx.c +++ b/drivers/spi/spi-slave-mt27xx.c @@ -69,7 +69,7 @@ struct mtk_spi_slave { struct clk *spi_clk; struct completion xfer_done; struct spi_transfer *cur_transfer; - bool slave_aborted; + bool target_aborted; const struct mtk_spi_compatible *dev_comp; }; @@ -118,7 +118,7 @@ static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata) static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata) { if (wait_for_completion_interruptible(&mdata->xfer_done) || - mdata->slave_aborted) { + mdata->target_aborted) { dev_err(mdata->dev, "interrupted\n"); return -EINTR; } @@ -286,7 +286,7 @@ static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr, struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); reinit_completion(&mdata->xfer_done); - mdata->slave_aborted = false; + mdata->target_aborted = false; mdata->cur_transfer = xfer; if (xfer->len > mdata->dev_comp->max_fifo_size) @@ -314,11 +314,11 @@ static int mtk_spi_slave_setup(struct spi_device *spi) return 0; } -static int mtk_slave_abort(struct spi_controller *ctlr) +static int mtk_target_abort(struct spi_controller *ctlr) { struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr); - mdata->slave_aborted = true; + mdata->target_aborted = true; complete(&mdata->xfer_done); return 0; @@ -402,7 +402,7 @@ static int mtk_spi_slave_probe(struct platform_device *pdev) ctlr->prepare_message = mtk_spi_slave_prepare_message; ctlr->transfer_one = mtk_spi_slave_transfer_one; ctlr->setup = mtk_spi_slave_setup; - ctlr->slave_abort = mtk_slave_abort; + ctlr->target_abort = mtk_target_abort; of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node); if (!of_id) { diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c index d37cfe995a6324..8f5c32b61a5b4a 100644 --- a/drivers/spi/spi-slave-system-control.c +++ b/drivers/spi/spi-slave-system-control.c @@ -136,7 +136,7 @@ static void spi_slave_system_control_remove(struct spi_device *spi) { struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi); - spi_slave_abort(spi); + spi_target_abort(spi); wait_for_completion(&priv->finished); } diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c index f56c1afb853400..8bb3070e4b8006 100644 --- a/drivers/spi/spi-slave-time.c +++ b/drivers/spi/spi-slave-time.c @@ -110,7 +110,7 @@ static void spi_slave_time_remove(struct spi_device *spi) { struct spi_slave_time_priv *priv = spi_get_drvdata(spi); - spi_slave_abort(spi); + spi_target_abort(spi); wait_for_completion(&priv->finished); } diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 4a18cf89619473..07b155980e712c 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -15,7 +15,7 @@ #include #include -#include +#include #define SSI_TIMEOUT_MS 2000 #define SSI_POLL_TIMEOUT_US 200 diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c index 886d6d7771d4e7..a9aee2a6c7dcbc 100644 --- a/drivers/spi/spi-wpcm-fiu.c +++ b/drivers/spi/spi-wpcm-fiu.c @@ -448,12 +448,10 @@ static int wpcm_fiu_probe(struct platform_device *pdev) fiu = spi_controller_get_devdata(ctrl); fiu->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); - fiu->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(fiu->regs)) { - dev_err(dev, "Failed to map registers\n"); - return PTR_ERR(fiu->regs); - } + fiu->regs = devm_platform_ioremap_resource_byname(pdev, "control"); + if (IS_ERR(fiu->regs)) + return dev_err_probe(dev, PTR_ERR(fiu->regs), + "Failed to map registers\n"); fiu->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(fiu->clk)) @@ -462,10 +460,9 @@ static int wpcm_fiu_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); fiu->memory = devm_ioremap_resource(dev, res); fiu->memory_size = min_t(size_t, resource_size(res), MAX_MEMORY_SIZE_TOTAL); - if (IS_ERR(fiu->memory)) { - dev_err(dev, "Failed to map flash memory window\n"); - return PTR_ERR(fiu->memory); - } + if (IS_ERR(fiu->memory)) + return dev_err_probe(dev, PTR_ERR(fiu->memory), + "Failed to map flash memory window\n"); fiu->shm_regmap = syscon_regmap_lookup_by_phandle_optional(dev->of_node, "nuvoton,shm"); diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index 846f00e23b7176..3bd0149d8f4eae 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #define SPI_XCOMM_SETTINGS_LEN_OFFSET 10 #define SPI_XCOMM_SETTINGS_3WIRE BIT(6) diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index d6325c6be3d41a..b67455bda972b2 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -569,7 +569,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, } if (op->dummy.nbytes) { - tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + tmpbuf = kmalloc(op->dummy.nbytes, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 558c466135a51b..fcd0ca99668419 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1242,7 +1242,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) u32 num_cs; const struct qspi_platform_data *p_data; - ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi)); + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*xqspi)); if (!ctlr) return -ENOMEM; @@ -1256,30 +1256,22 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) xqspi->has_tapdelay = true; xqspi->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(xqspi->regs)) { - ret = PTR_ERR(xqspi->regs); - goto remove_ctlr; - } + if (IS_ERR(xqspi->regs)) + return PTR_ERR(xqspi->regs); xqspi->pclk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(xqspi->pclk)) { - dev_err(dev, "pclk clock not found.\n"); - ret = PTR_ERR(xqspi->pclk); - goto remove_ctlr; - } + if (IS_ERR(xqspi->pclk)) + return dev_err_probe(dev, PTR_ERR(xqspi->pclk), + "pclk clock not found.\n"); xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk"); - if (IS_ERR(xqspi->refclk)) { - dev_err(dev, "ref_clk clock not found.\n"); - ret = PTR_ERR(xqspi->refclk); - goto remove_ctlr; - } + if (IS_ERR(xqspi->refclk)) + return dev_err_probe(dev, PTR_ERR(xqspi->refclk), + "ref_clk clock not found.\n"); ret = clk_prepare_enable(xqspi->pclk); - if (ret) { - dev_err(dev, "Unable to enable APB clock.\n"); - goto remove_ctlr; - } + if (ret) + return dev_err_probe(dev, ret, "Unable to enable APB clock.\n"); ret = clk_prepare_enable(xqspi->refclk); if (ret) { @@ -1364,8 +1356,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) clk_disable_unprepare(xqspi->refclk); clk_dis_pclk: clk_disable_unprepare(xqspi->pclk); -remove_ctlr: - spi_controller_put(ctlr); return ret; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6ebe5dd9bbb185..c1dad30a4528b7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1440,7 +1440,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr, u32 speed_hz = xfer->speed_hz; unsigned long long ms; - if (spi_controller_is_slave(ctlr)) { + if (spi_controller_is_target(ctlr)) { if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); return -EINTR; @@ -2425,7 +2425,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } } - if (spi_controller_is_slave(ctlr)) { + if (spi_controller_is_target(ctlr)) { if (!of_node_name_eq(nc, "slave")) { dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", nc); @@ -2934,21 +2934,10 @@ static struct class spi_master_class = { #ifdef CONFIG_SPI_SLAVE /** - * spi_slave_abort - abort the ongoing transfer request on an SPI slave + * spi_target_abort - abort the ongoing transfer request on an SPI slave * controller * @spi: device used for the current transfer */ -int spi_slave_abort(struct spi_device *spi) -{ - struct spi_controller *ctlr = spi->controller; - - if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) - return ctlr->slave_abort(ctlr); - - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(spi_slave_abort); - int spi_target_abort(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; @@ -3321,7 +3310,7 @@ int spi_register_controller(struct spi_controller *ctlr) */ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); - if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { + if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { status = spi_get_gpio_descs(ctlr); if (status) goto free_bus_id; @@ -3349,7 +3338,7 @@ int spi_register_controller(struct spi_controller *ctlr) if (status < 0) goto free_bus_id; dev_dbg(dev, "registered %s %s\n", - spi_controller_is_slave(ctlr) ? "slave" : "master", + spi_controller_is_target(ctlr) ? "target" : "host", dev_name(&ctlr->dev)); /* @@ -3921,6 +3910,12 @@ int spi_setup(struct spi_device *spi) (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; + /* Check against conflicting MOSI idle configuration */ + if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { + dev_err(&spi->dev, + "setup: MOSI configured to idle low and high at the same time.\n"); + return -EINVAL; + } /* * Help drivers fail *cleanly* when they need options * that aren't supported with their current controller. diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index face93a9cf2035..653f82984216c3 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -666,7 +666,7 @@ static int spidev_release(struct inode *inode, struct file *filp) } #ifdef CONFIG_SPI_SLAVE if (!dofree) - spi_slave_abort(spidev->spi); + spi_target_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -685,7 +685,6 @@ static const struct file_operations spidev_fops = { .compat_ioctl = spidev_compat_ioctl, .open = spidev_open, .release = spidev_release, - .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index db4a392841b163..3fb68d60dfc1b5 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -54,8 +54,6 @@ source "drivers/staging/fbtft/Kconfig" source "drivers/staging/most/Kconfig" -source "drivers/staging/ks7010/Kconfig" - source "drivers/staging/greybus/Kconfig" source "drivers/staging/vc04_services/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 5390879b5d1b75..c977aa13fad40c 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/ obj-$(CONFIG_LTE_GDM724X) += gdm724x/ obj-$(CONFIG_FB_TFT) += fbtft/ obj-$(CONFIG_MOST) += most/ -obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c index 0be7c2d51548ad..050fc2367c122f 100644 --- a/drivers/staging/fbtft/fb_ili9320.c +++ b/drivers/staging/fbtft/fb_ili9320.c @@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par) par->fbtftops.reset(par); devcode = read_devicecode(par); - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n", - devcode); if ((devcode != 0x0000) && (devcode != 0x9320)) dev_warn(par->info->device, "Unrecognized Device code: 0x%04X (expected 0x9320)\n", diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c index 398bdbf53c9a59..0ab1de6647d074 100644 --- a/drivers/staging/fbtft/fb_ra8875.c +++ b/drivers/staging/fbtft/fb_ra8875.c @@ -41,13 +41,6 @@ static int init_display(struct fbtft_par *par) { gpiod_set_value(par->gpio.dc, 1); - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, - "%s()\n", __func__); - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, - "display size %dx%d\n", - par->info->var.xres, - par->info->var.yres); - par->fbtftops.reset(par); if ((par->info->var.xres == 320) && (par->info->var.yres == 240)) { diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c index 9685ca516a0e3c..e4c50c1ffed059 100644 --- a/drivers/staging/fbtft/fb_sh1106.c +++ b/drivers/staging/fbtft/fb_sh1106.c @@ -88,9 +88,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) static int blank(struct fbtft_par *par, bool on) { - fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n", - __func__, on ? "true" : "false"); - write_reg(par, on ? 0xAE : 0xAF); return 0; diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c index f27bab38b3ec4c..255a6d21ca8e5d 100644 --- a/drivers/staging/fbtft/fb_ssd1289.c +++ b/drivers/staging/fbtft/fb_ssd1289.c @@ -93,9 +93,6 @@ static int set_var(struct fbtft_par *par) { if (par->fbtftops.init_display != init_display) { /* don't risk messing up register 11h */ - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, - "%s: skipping since custom init_display() is used\n", - __func__); return 0; } diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c index 6cf9df579e88b6..478d710469b91e 100644 --- a/drivers/staging/fbtft/fb_ssd1306.c +++ b/drivers/staging/fbtft/fb_ssd1306.c @@ -148,9 +148,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) static int blank(struct fbtft_par *par, bool on) { - fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n", - __func__, on ? "true" : "false"); - if (on) write_reg(par, 0xAE); else diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c index 796a2ac3e19486..256b0b87a9305e 100644 --- a/drivers/staging/fbtft/fb_ssd1325.c +++ b/drivers/staging/fbtft/fb_ssd1325.c @@ -72,10 +72,6 @@ static uint8_t rgb565_to_g16(u16 pixel) static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, - "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, - ye); - write_reg(par, 0x75); write_reg(par, 0x00); write_reg(par, 0x3f); @@ -86,9 +82,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) static int blank(struct fbtft_par *par, bool on) { - fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n", - __func__, on ? "true" : "false"); - if (on) write_reg(par, 0xAE); else @@ -109,8 +102,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves) { int i; - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__); - for (i = 0; i < GAMMA_LEN; i++) { if (i > 0 && curves[i] < 1) { dev_err(par->info->device, diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c index ec5eced7f8cbd6..06b7056d6c71c0 100644 --- a/drivers/staging/fbtft/fb_ssd1331.c +++ b/drivers/staging/fbtft/fb_ssd1331.c @@ -167,8 +167,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves) static int blank(struct fbtft_par *par, bool on) { - fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n", - __func__, on ? "true" : "false"); if (on) write_reg(par, 0xAE); else diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c index ca2cba2185ae3e..f6db2933ebba2a 100644 --- a/drivers/staging/fbtft/fb_ssd1351.c +++ b/drivers/staging/fbtft/fb_ssd1351.c @@ -72,9 +72,6 @@ static int set_var(struct fbtft_par *par) if (par->fbtftops.init_display != init_display) { /* don't risk messing up register A0h */ - fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, - "%s: skipping since custom init_display() is used\n", - __func__); return 0; } @@ -213,7 +210,7 @@ static void register_onboard_backlight(struct fbtft_par *par) struct backlight_properties bl_props = { 0, }; bl_props.type = BACKLIGHT_RAW; - bl_props.power = FB_BLANK_POWERDOWN; + bl_props.power = BACKLIGHT_POWER_OFF; bd = backlight_device_register(dev_driver_string(par->info->device), par->info->device, par, &bl_ops, diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c index f61e373c75e966..ca35b386a12d53 100644 --- a/drivers/staging/fbtft/fb_uc1611.c +++ b/drivers/staging/fbtft/fb_uc1611.c @@ -135,9 +135,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) static int blank(struct fbtft_par *par, bool on) { - fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n", - __func__, on ? "true" : "false"); - if (on) write_reg(par, 0xA8 | 0x00); else diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c index 3d422bc1164116..30e436ff19e4c4 100644 --- a/drivers/staging/fbtft/fbtft-bus.c +++ b/drivers/staging/fbtft/fbtft-bus.c @@ -129,9 +129,6 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) int ret = 0; size_t startbyte_size = 0; - fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n", - __func__, offset, len); - remain = len / 2; vmem16 = (u16 *)(par->info->screen_buffer + offset); @@ -182,9 +179,6 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len) int i; int ret = 0; - fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n", - __func__, offset, len); - if (!par->txbuf.buf) { dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__); return -1; @@ -232,9 +226,6 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len) { u16 *vmem16; - fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n", - __func__, offset, len); - vmem16 = (u16 *)(par->info->screen_buffer + offset); /* no need for buffered write with 16-bit bus */ diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 8e2fd0c0fee21b..4cfa494243b983 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -152,7 +152,7 @@ static int fbtft_backlight_get_brightness(struct backlight_device *bd) void fbtft_unregister_backlight(struct fbtft_par *par) { if (par->info->bl_dev) { - par->info->bl_dev->props.power = FB_BLANK_POWERDOWN; + par->info->bl_dev->props.power = BACKLIGHT_POWER_OFF; backlight_update_status(par->info->bl_dev); backlight_device_unregister(par->info->bl_dev); par->info->bl_dev = NULL; @@ -178,7 +178,7 @@ void fbtft_register_backlight(struct fbtft_par *par) bl_props.type = BACKLIGHT_RAW; /* Assume backlight is off, get polarity from current state of pin */ - bl_props.power = FB_BLANK_POWERDOWN; + bl_props.power = BACKLIGHT_POWER_OFF; if (!gpiod_get_value(par->gpio.led[0])) par->polarity = true; @@ -215,8 +215,6 @@ static void fbtft_reset(struct fbtft_par *par) if (!par->gpio.reset) return; - fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__); - gpiod_set_value_cansleep(par->gpio.reset, 1); usleep_range(20, 40); gpiod_set_value_cansleep(par->gpio.reset, 0); @@ -801,7 +799,7 @@ int fbtft_register_framebuffer(struct fb_info *fb_info) /* Turn on backlight if available */ if (fb_info->bl_dev) { - fb_info->bl_dev->props.power = FB_BLANK_UNBLANK; + fb_info->bl_dev->props.power = BACKLIGHT_POWER_ON; fb_info->bl_dev->ops->update_status(fb_info->bl_dev); } @@ -1052,8 +1050,6 @@ static int fbtft_verify_gpios(struct fbtft_par *par) struct fbtft_platform_data *pdata = par->pdata; int i; - fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__); - if (pdata->display.buswidth != 9 && par->startbyte == 0 && !par->gpio.dc) { dev_err(par->info->device, @@ -1157,9 +1153,6 @@ int fbtft_probe_common(struct fbtft_display *display, else dev = &pdev->dev; - if (unlikely(display->debug & DEBUG_DRIVER_INIT_FUNCTIONS)) - dev_info(dev, "%s()\n", __func__); - pdata = dev->platform_data; if (!pdata) { pdata = fbtft_properties_read(dev); diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c index 39e8d28066cbe1..e45c90a03a903e 100644 --- a/drivers/staging/fbtft/fbtft-sysfs.c +++ b/drivers/staging/fbtft/fbtft-sysfs.c @@ -27,13 +27,9 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves, int curve_counter, value_counter; int _count; - fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__); - if (!str || !curves) return -EINVAL; - fbtft_par_dbg(DEBUG_SYSFS, par, "%s\n", str); - tmp = kmemdup(str, size + 1, GFP_KERNEL); if (!tmp) return -ENOMEM; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index f86ed9d470b8c5..3e00a26a29d5c3 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -202,6 +202,7 @@ struct fbtft_par { u8 *buf; u8 startbyte; struct fbtft_ops fbtftops; + /* Spinlock to ensure thread-safe access to dirty_lines_start and dirty_lines_end */ spinlock_t dirty_lock; unsigned int dirty_lines_start; unsigned int dirty_lines_end; @@ -218,6 +219,7 @@ struct fbtft_par { } gpio; const s16 *init_sequence; struct { + /* Mutex to synchronize access to gamma curve locking */ struct mutex lock; u32 *curves; int num_values; diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h index 5fc469101fc1cb..3e09147435a58c 100644 --- a/drivers/staging/greybus/gb-camera.h +++ b/drivers/staging/greybus/gb-camera.h @@ -92,8 +92,8 @@ struct gb_camera_ops { unsigned int *flags, struct gb_camera_stream *streams, struct gb_camera_csi_params *csi_params); int (*capture)(void *priv, u32 request_id, - unsigned int streams, unsigned int num_frames, - size_t settings_size, const void *settings); + unsigned int streams, unsigned int num_frames, + size_t settings_size, const void *settings); int (*flush)(void *priv, u32 *request_id); }; diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c index 0e4ae01eb00ff2..24e9c909fa0211 100644 --- a/drivers/staging/greybus/spilib.c +++ b/drivers/staging/greybus/spilib.c @@ -490,10 +490,10 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, int ret; u8 i; - /* Allocate master with space for data */ - ctlr = spi_alloc_master(dev, sizeof(*spi)); + /* Allocate host with space for data */ + ctlr = spi_alloc_host(dev, sizeof(*spi)); if (!ctlr) { - dev_err(dev, "cannot alloc SPI master\n"); + dev_err(dev, "cannot alloc SPI host\n"); return -ENOMEM; } diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index cd00d960756593..4ae1a7039418b1 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -547,7 +547,8 @@ static int ad5933_ring_preenable(struct iio_dev *indio_dev) struct ad5933_state *st = iio_priv(indio_dev); int ret; - if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) + if (bitmap_empty(indio_dev->active_scan_mask, + iio_get_masklength(indio_dev))) return -EINVAL; ret = ad5933_reset(st); @@ -625,7 +626,7 @@ static void ad5933_work(struct work_struct *work) if (status & AD5933_STAT_DATA_VALID) { int scan_count = bitmap_weight(indio_dev->active_scan_mask, - indio_dev->masklength); + iio_get_masklength(indio_dev)); ret = ad5933_i2c_read(st->client, test_bit(1, indio_dev->active_scan_mask) ? AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig deleted file mode 100644 index 8ea6c092867980..00000000000000 --- a/drivers/staging/ks7010/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config KS7010 - tristate "KeyStream KS7010 SDIO support" - depends on MMC && WIRELESS - select WIRELESS_EXT - select WEXT_PRIV - select FW_LOADER - select CRYPTO - select CRYPTO_HASH - select CRYPTO_MICHAEL_MIC - help - This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is - found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only, - sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards. diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile deleted file mode 100644 index 009851a32310c2..00000000000000 --- a/drivers/staging/ks7010/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_KS7010) += ks7010.o - -ks7010-y := ks_hostif.o ks_wlan_net.o ks7010_sdio.o diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO deleted file mode 100644 index 80c97543b97728..00000000000000 --- a/drivers/staging/ks7010/TODO +++ /dev/null @@ -1,36 +0,0 @@ -KS7010 Linux driver -=================== - -This driver is based on source code from the Ben Nanonote extra repository [1] -which is based on the original v007 release from Renesas [2]. Some more -background info about the chipset can be found here [3] and here [4]. Thank -you to all which already participated in cleaning up the driver so far! - -[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src -[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2 -[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi -[4] https://wikidevi.com/wiki/Renesas - -TODO ----- - -First a few words what not to do (at least not blindly): - -- don't be overly strict with the 80 char limit. Only if it REALLY makes the - code more readable - -Now the TODOs: - -- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure - that you are not only silencing the warning but really fixing code. You - should understand the change you submit. -- fix the 'card removal' event when card is inserted when booting -- check what other upstream wireless mechanisms can be used instead of the - custom ones here -- Switch to use LIB80211. -- Switch to use MAC80211. -- Switch to use CFG80211. - -Please send any patches to: -Greg Kroah-Hartman -Linux Driver Project Developer List diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h deleted file mode 100644 index 1eee774319ad3f..00000000000000 --- a/drivers/staging/ks7010/eap_packet.h +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef EAP_PACKET_H -#define EAP_PACKET_H - -#include -#include -#include - -struct ether_hdr { - unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ - unsigned char h_source[ETH_ALEN]; /* source ether addr */ - unsigned char h_dest_snap; - unsigned char h_source_snap; - unsigned char h_command; - unsigned char h_vendor_id[3]; - __be16 h_proto; /* packet type ID field */ - /* followed by length octets of data */ -} __packed; - -#define ETHER_HDR_SIZE sizeof(struct ether_hdr) - -struct ieee802_1x_hdr { - unsigned char version; - unsigned char type; - unsigned short length; - /* followed by length octets of data */ -} __packed; - -enum { - IEEE802_1X_TYPE_EAP_PACKET = 0, - IEEE802_1X_TYPE_EAPOL_START = 1, - IEEE802_1X_TYPE_EAPOL_LOGOFF = 2, - IEEE802_1X_TYPE_EAPOL_KEY = 3, - IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4 -}; - -#define WPA_NONCE_LEN 32 -#define WPA_REPLAY_COUNTER_LEN 8 - -struct wpa_eapol_key { - unsigned char type; - __be16 key_info; - unsigned short key_length; - unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN]; - unsigned char key_nonce[WPA_NONCE_LEN]; - unsigned char key_iv[16]; - unsigned char key_rsc[8]; - unsigned char key_id[8]; /* Reserved in IEEE 802.11i/RSN */ - unsigned char key_mic[16]; - unsigned short key_data_length; - /* followed by key_data_length bytes of key_data */ -} __packed; - -#define WPA_KEY_INFO_TYPE_MASK GENMASK(2, 0) -#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 BIT(0) -#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES BIT(1) -#define WPA_KEY_INFO_KEY_TYPE BIT(3) /* 1 = Pairwise, 0 = Group key */ -/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */ -#define WPA_KEY_INFO_KEY_INDEX_MASK GENMASK(5, 4) -#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4 -#define WPA_KEY_INFO_INSTALL BIT(6) /* pairwise */ -#define WPA_KEY_INFO_TXRX BIT(6) /* group */ -#define WPA_KEY_INFO_ACK BIT(7) -#define WPA_KEY_INFO_MIC BIT(8) -#define WPA_KEY_INFO_SECURE BIT(9) -#define WPA_KEY_INFO_ERROR BIT(10) -#define WPA_KEY_INFO_REQUEST BIT(11) -#define WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */ - -#endif /* EAP_PACKET_H */ diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c deleted file mode 100644 index 8df0e77b57f6ca..00000000000000 --- a/drivers/staging/ks7010/ks7010_sdio.c +++ /dev/null @@ -1,1143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for KeyStream, KS7010 based SDIO cards. - * - * Copyright (C) 2006-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - * Copyright (C) 2016 Sang Engineering, Wolfram Sang - */ - -#include -#include -#include -#include -#include -#include -#include -#include "ks_wlan.h" -#include "ks_hostif.h" - -#define ROM_FILE "ks7010sd.rom" - -/* SDIO KeyStream vendor and device */ -#define SDIO_VENDOR_ID_KS_CODE_A 0x005b -#define SDIO_VENDOR_ID_KS_CODE_B 0x0023 - -/* Older sources suggest earlier versions were named 7910 or 79xx */ -#define SDIO_DEVICE_ID_KS_7010 0x7910 - -/* Read/Write Status Register */ -#define READ_STATUS_REG 0x000000 -#define WRITE_STATUS_REG 0x00000C -enum reg_status_type { - REG_STATUS_BUSY, - REG_STATUS_IDLE -}; - -/* Read Index Register */ -#define READ_INDEX_REG 0x000004 - -/* Read Data Size Register */ -#define READ_DATA_SIZE_REG 0x000008 - -/* Write Index Register */ -#define WRITE_INDEX_REG 0x000010 - -/* - * Write Status/Read Data Size Register - * for network packet (less than 2048 bytes data) - */ -#define WSTATUS_RSIZE_REG 0x000014 - -/* Write Status Register value */ -#define WSTATUS_MASK 0x80 - -/* Read Data Size Register value [10:4] */ -#define RSIZE_MASK 0x7F - -/* ARM to SD interrupt Enable */ -#define INT_ENABLE_REG 0x000020 -/* ARM to SD interrupt Pending */ -#define INT_PENDING_REG 0x000024 - -#define INT_GCR_B BIT(7) -#define INT_GCR_A BIT(6) -#define INT_WRITE_STATUS BIT(5) -#define INT_WRITE_INDEX BIT(4) -#define INT_WRITE_SIZE BIT(3) -#define INT_READ_STATUS BIT(2) -#define INT_READ_INDEX BIT(1) -#define INT_READ_SIZE BIT(0) - -/* General Communication Register A */ -#define GCR_A_REG 0x000028 -enum gen_com_reg_a { - GCR_A_INIT, - GCR_A_REMAP, - GCR_A_RUN -}; - -/* General Communication Register B */ -#define GCR_B_REG 0x00002C -enum gen_com_reg_b { - GCR_B_ACTIVE, - GCR_B_DOZE -}; - -/* Wakeup Register */ -#define WAKEUP_REG 0x008018 -#define WAKEUP_REQ 0x5a - -/* AHB Data Window 0x010000-0x01FFFF */ -#define DATA_WINDOW 0x010000 -#define WINDOW_SIZE (64 * 1024) - -#define KS7010_IRAM_ADDRESS 0x06000000 - -#define KS7010_IO_BLOCK_SIZE 512 - -/** - * struct ks_sdio_card - SDIO device data. - * - * Structure is used as the &struct sdio_func private data. - * - * @func: Pointer to the SDIO function device. - * @priv: Pointer to the &struct net_device private data. - */ -struct ks_sdio_card { - struct sdio_func *func; - struct ks_wlan_private *priv; -}; - -static struct sdio_func *ks7010_to_func(struct ks_wlan_private *priv) -{ - struct ks_sdio_card *ks_sdio = priv->if_hw; - - return ks_sdio->func; -} - -/* Read single byte from device address into byte (CMD52) */ -static int ks7010_sdio_readb(struct ks_wlan_private *priv, - u32 address, u8 *byte) -{ - struct sdio_func *func = ks7010_to_func(priv); - int ret; - - *byte = sdio_readb(func, address, &ret); - - return ret; -} - -/* Read length bytes from device address into buffer (CMD53) */ -static int ks7010_sdio_read(struct ks_wlan_private *priv, u32 address, - u8 *buffer, unsigned int length) -{ - struct sdio_func *func = ks7010_to_func(priv); - - return sdio_memcpy_fromio(func, buffer, address, length); -} - -/* Write single byte to device address (CMD52) */ -static int ks7010_sdio_writeb(struct ks_wlan_private *priv, - u32 address, u8 byte) -{ - struct sdio_func *func = ks7010_to_func(priv); - int ret; - - sdio_writeb(func, byte, address, &ret); - - return ret; -} - -/* Write length bytes to device address from buffer (CMD53) */ -static int ks7010_sdio_write(struct ks_wlan_private *priv, u32 address, - u8 *buffer, unsigned int length) -{ - struct sdio_func *func = ks7010_to_func(priv); - - return sdio_memcpy_toio(func, address, buffer, length); -} - -static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv) -{ - int ret; - - /* clear request */ - atomic_set(&priv->sleepstatus.doze_request, 0); - - if (atomic_read(&priv->sleepstatus.status) == 0) { - ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE); - if (ret) { - netdev_err(priv->net_dev, "write GCR_B_REG\n"); - goto set_sleep_mode; - } - atomic_set(&priv->sleepstatus.status, 1); - priv->last_doze = jiffies; - } - -set_sleep_mode: - priv->sleep_mode = atomic_read(&priv->sleepstatus.status); -} - -static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv) -{ - int ret; - - /* clear request */ - atomic_set(&priv->sleepstatus.wakeup_request, 0); - - if (atomic_read(&priv->sleepstatus.status) == 1) { - ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ); - if (ret) { - netdev_err(priv->net_dev, "write WAKEUP_REG\n"); - goto set_sleep_mode; - } - atomic_set(&priv->sleepstatus.status, 0); - priv->last_wakeup = jiffies; - ++priv->wakeup_count; - } - -set_sleep_mode: - priv->sleep_mode = atomic_read(&priv->sleepstatus.status); -} - -void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv) -{ - int ret; - - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ); - if (ret) - netdev_err(priv->net_dev, "write WAKEUP_REG\n"); - - priv->last_wakeup = jiffies; - ++priv->wakeup_count; - } -} - -static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv) -{ - u8 byte; - int ret; - - if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE) - return; - - if (priv->reg.operation_mode != MODE_INFRASTRUCTURE) - return; - - if (!is_connect_status(priv->connect_status)) - return; - - if (priv->dev_state != DEVICE_STATE_SLEEP) - return; - - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) - return; - - netdev_dbg(priv->net_dev, - "STATUS:\n" - "- psstatus.status = %d\n" - "- psstatus.confirm_wait = %d\n" - "- psstatus.snooze_guard = %d\n" - "- txq_count = %d\n", - atomic_read(&priv->psstatus.status), - atomic_read(&priv->psstatus.confirm_wait), - atomic_read(&priv->psstatus.snooze_guard), - txq_count(priv)); - - if (atomic_read(&priv->psstatus.confirm_wait) || - atomic_read(&priv->psstatus.snooze_guard) || - txq_has_space(priv)) { - queue_delayed_work(priv->wq, &priv->rw_dwork, 0); - return; - } - - ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &byte); - if (ret) { - netdev_err(priv->net_dev, "read INT_PENDING_REG\n"); - goto queue_delayed_work; - } - if (byte) - goto queue_delayed_work; - - ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE); - if (ret) { - netdev_err(priv->net_dev, "write GCR_B_REG\n"); - goto queue_delayed_work; - } - atomic_set(&priv->psstatus.status, PS_SNOOZE); - - return; - -queue_delayed_work: - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); -} - -int ks_wlan_hw_power_save(struct ks_wlan_private *priv) -{ - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); - return 0; -} - -static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p, - unsigned long size, - void (*complete_handler)(struct ks_wlan_private *priv, - struct sk_buff *skb), - struct sk_buff *skb) -{ - struct tx_device_buffer *sp; - int ret; - - if (priv->dev_state < DEVICE_STATE_BOOT) { - ret = -EPERM; - goto err_complete; - } - - if ((TX_DEVICE_BUFF_SIZE - 1) <= txq_count(priv)) { - netdev_err(priv->net_dev, "tx buffer overflow\n"); - ret = -EOVERFLOW; - goto err_complete; - } - - sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail]; - sp->sendp = p; - sp->size = size; - sp->complete_handler = complete_handler; - sp->skb = skb; - inc_txqtail(priv); - - return 0; - -err_complete: - kfree(p); - if (complete_handler) - (*complete_handler)(priv, skb); - - return ret; -} - -/* write data */ -static int write_to_device(struct ks_wlan_private *priv, u8 *buffer, - unsigned long size) -{ - struct hostif_hdr *hdr; - int ret; - - hdr = (struct hostif_hdr *)buffer; - - if (le16_to_cpu(hdr->event) < HIF_DATA_REQ || - le16_to_cpu(hdr->event) > HIF_REQ_MAX) { - netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event); - return 0; - } - - ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size); - if (ret) { - netdev_err(priv->net_dev, "write DATA_WINDOW\n"); - return ret; - } - - ret = ks7010_sdio_writeb(priv, WRITE_STATUS_REG, REG_STATUS_BUSY); - if (ret) { - netdev_err(priv->net_dev, "write WRITE_STATUS_REG\n"); - return ret; - } - - return 0; -} - -static void tx_device_task(struct ks_wlan_private *priv) -{ - struct tx_device_buffer *sp; - int ret; - - if (!txq_has_space(priv) || - atomic_read(&priv->psstatus.status) == PS_SNOOZE) - return; - - sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead]; - if (priv->dev_state >= DEVICE_STATE_BOOT) { - ret = write_to_device(priv, sp->sendp, sp->size); - if (ret) { - netdev_err(priv->net_dev, - "write_to_device error !!(%d)\n", ret); - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); - return; - } - } - kfree(sp->sendp); - if (sp->complete_handler) /* TX Complete */ - (*sp->complete_handler)(priv, sp->skb); - inc_txqhead(priv); - - if (txq_has_space(priv)) - queue_delayed_work(priv->wq, &priv->rw_dwork, 0); -} - -int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size, - void (*complete_handler)(struct ks_wlan_private *priv, - struct sk_buff *skb), - struct sk_buff *skb) -{ - int result; - struct hostif_hdr *hdr; - - hdr = (struct hostif_hdr *)p; - - if (le16_to_cpu(hdr->event) < HIF_DATA_REQ || - le16_to_cpu(hdr->event) > HIF_REQ_MAX) { - netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event); - return 0; - } - - /* add event to hostt buffer */ - priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event); - priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE; - - spin_lock_bh(&priv->tx_dev.tx_dev_lock); - result = enqueue_txdev(priv, p, size, complete_handler, skb); - spin_unlock_bh(&priv->tx_dev.tx_dev_lock); - - if (txq_has_space(priv)) - queue_delayed_work(priv->wq, &priv->rw_dwork, 0); - - return result; -} - -static void rx_event_task(struct tasklet_struct *t) -{ - struct ks_wlan_private *priv = from_tasklet(priv, t, rx_bh_task); - struct rx_device_buffer *rp; - - if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) { - rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead]; - hostif_receive(priv, rp->data, rp->size); - inc_rxqhead(priv); - - if (rxq_has_space(priv)) - tasklet_schedule(&priv->rx_bh_task); - } -} - -static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size) -{ - int ret; - struct rx_device_buffer *rx_buffer; - struct hostif_hdr *hdr; - u16 event = 0; - - /* receive data */ - if (rxq_count(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) { - netdev_err(priv->net_dev, "rx buffer overflow\n"); - return; - } - rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail]; - - ret = ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0], - hif_align_size(size)); - if (ret) - return; - - /* length check */ - if (size > 2046 || size == 0) { -#ifdef DEBUG - print_hex_dump_bytes("INVALID DATA dump: ", - DUMP_PREFIX_OFFSET, - rx_buffer->data, 32); -#endif - ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, - REG_STATUS_IDLE); - if (ret) - netdev_err(priv->net_dev, "write READ_STATUS_REG\n"); - - /* length check fail */ - return; - } - - hdr = (struct hostif_hdr *)&rx_buffer->data[0]; - rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size); - event = le16_to_cpu(hdr->event); - inc_rxqtail(priv); - - ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE); - if (ret) - netdev_err(priv->net_dev, "write READ_STATUS_REG\n"); - - if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) { - netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n"); - atomic_dec(&priv->psstatus.confirm_wait); - } - - tasklet_schedule(&priv->rx_bh_task); -} - -static void ks7010_rw_function(struct work_struct *work) -{ - struct ks_wlan_private *priv = container_of(work, - struct ks_wlan_private, - rw_dwork.work); - struct sdio_func *func = ks7010_to_func(priv); - u8 byte; - int ret; - - /* wait after DOZE */ - if (time_after(priv->last_doze + msecs_to_jiffies(30), jiffies)) { - netdev_dbg(priv->net_dev, "wait after DOZE\n"); - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); - return; - } - - /* wait after WAKEUP */ - while (time_after(priv->last_wakeup + msecs_to_jiffies(30), jiffies)) { - netdev_dbg(priv->net_dev, "wait after WAKEUP\n"); - dev_info(&func->dev, "wake: %lu %lu\n", - priv->last_wakeup + msecs_to_jiffies(30), jiffies); - msleep(30); - } - - sdio_claim_host(func); - - /* power save wakeup */ - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - if (txq_has_space(priv)) { - ks_wlan_hw_wakeup_request(priv); - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); - } - goto release_host; - } - - /* sleep mode doze */ - if (atomic_read(&priv->sleepstatus.doze_request) == 1) { - ks_wlan_hw_sleep_doze_request(priv); - goto release_host; - } - /* sleep mode wakeup */ - if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) { - ks_wlan_hw_sleep_wakeup_request(priv); - goto release_host; - } - - /* read (WriteStatus/ReadDataSize FN1:00_0014) */ - ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte); - if (ret) { - netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG psstatus=%d\n", - atomic_read(&priv->psstatus.status)); - goto release_host; - } - - if (byte & RSIZE_MASK) { /* Read schedule */ - ks_wlan_hw_rx(priv, (size_t)((byte & RSIZE_MASK) << 4)); - } - if ((byte & WSTATUS_MASK)) - tx_device_task(priv); - - _ks_wlan_hw_power_save(priv); - -release_host: - sdio_release_host(func); -} - -static void ks_sdio_interrupt(struct sdio_func *func) -{ - int ret; - struct ks_sdio_card *card; - struct ks_wlan_private *priv; - u8 status, rsize, byte; - - card = sdio_get_drvdata(func); - priv = card->priv; - - if (priv->dev_state < DEVICE_STATE_BOOT) - goto queue_delayed_work; - - ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &status); - if (ret) { - netdev_err(priv->net_dev, "read INT_PENDING_REG\n"); - goto queue_delayed_work; - } - - /* schedule task for interrupt status */ - /* bit7 -> Write General Communication B register */ - /* read (General Communication B register) */ - /* bit5 -> Write Status Idle */ - /* bit2 -> Read Status Busy */ - if (status & INT_GCR_B || - atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - ret = ks7010_sdio_readb(priv, GCR_B_REG, &byte); - if (ret) { - netdev_err(priv->net_dev, "read GCR_B_REG\n"); - goto queue_delayed_work; - } - if (byte == GCR_B_ACTIVE) { - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - atomic_set(&priv->psstatus.status, PS_WAKEUP); - priv->wakeup_count = 0; - } - complete(&priv->psstatus.wakeup_wait); - } - } - - do { - /* read (WriteStatus/ReadDataSize FN1:00_0014) */ - ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte); - if (ret) { - netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG\n"); - goto queue_delayed_work; - } - rsize = byte & RSIZE_MASK; - if (rsize != 0) /* Read schedule */ - ks_wlan_hw_rx(priv, (size_t)(rsize << 4)); - - if (byte & WSTATUS_MASK) { - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - if (txq_has_space(priv)) { - ks_wlan_hw_wakeup_request(priv); - queue_delayed_work(priv->wq, - &priv->rw_dwork, 1); - return; - } - } else { - tx_device_task(priv); - } - } - } while (rsize); - -queue_delayed_work: - queue_delayed_work(priv->wq, &priv->rw_dwork, 0); -} - -static int trx_device_init(struct ks_wlan_private *priv) -{ - priv->tx_dev.qhead = 0; - priv->tx_dev.qtail = 0; - - priv->rx_dev.qhead = 0; - priv->rx_dev.qtail = 0; - - spin_lock_init(&priv->tx_dev.tx_dev_lock); - spin_lock_init(&priv->rx_dev.rx_dev_lock); - - tasklet_setup(&priv->rx_bh_task, rx_event_task); - - return 0; -} - -static void trx_device_exit(struct ks_wlan_private *priv) -{ - struct tx_device_buffer *sp; - - /* tx buffer clear */ - while (txq_has_space(priv)) { - sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead]; - kfree(sp->sendp); - if (sp->complete_handler) /* TX Complete */ - (*sp->complete_handler)(priv, sp->skb); - inc_txqhead(priv); - } - - tasklet_kill(&priv->rx_bh_task); -} - -static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index) -{ - int ret; - unsigned char *data_buf; - - data_buf = kmemdup(&index, sizeof(u32), GFP_KERNEL); - if (!data_buf) - return -ENOMEM; - - ret = ks7010_sdio_write(priv, WRITE_INDEX_REG, data_buf, sizeof(index)); - if (ret) - goto err_free_data_buf; - - ret = ks7010_sdio_write(priv, READ_INDEX_REG, data_buf, sizeof(index)); - if (ret) - goto err_free_data_buf; - - return 0; - -err_free_data_buf: - kfree(data_buf); - - return ret; -} - -#define ROM_BUFF_SIZE (64 * 1024) -static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address, - u8 *data, unsigned int size) -{ - int ret; - u8 *read_buf; - - read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); - if (!read_buf) - return -ENOMEM; - - ret = ks7010_sdio_read(priv, address, read_buf, size); - if (ret) - goto err_free_read_buf; - - if (memcmp(data, read_buf, size) != 0) { - ret = -EIO; - netdev_err(priv->net_dev, "data compare error (%d)\n", ret); - goto err_free_read_buf; - } - - return 0; - -err_free_read_buf: - kfree(read_buf); - - return ret; -} - -static int ks7010_copy_firmware(struct ks_wlan_private *priv, - const struct firmware *fw_entry) -{ - unsigned int length; - unsigned int size; - unsigned int offset; - unsigned int n = 0; - u8 *rom_buf; - int ret; - - rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL); - if (!rom_buf) - return -ENOMEM; - - length = fw_entry->size; - - do { - if (length >= ROM_BUFF_SIZE) { - size = ROM_BUFF_SIZE; - length = length - ROM_BUFF_SIZE; - } else { - size = length; - length = 0; - } - if (size == 0) - break; - - memcpy(rom_buf, fw_entry->data + n, size); - - offset = n; - ret = ks7010_sdio_update_index(priv, - KS7010_IRAM_ADDRESS + offset); - if (ret) - goto free_rom_buf; - - ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size); - if (ret) - goto free_rom_buf; - - ret = ks7010_sdio_data_compare(priv, - DATA_WINDOW, rom_buf, size); - if (ret) - goto free_rom_buf; - - n += size; - - } while (size); - - ret = ks7010_sdio_writeb(priv, GCR_A_REG, GCR_A_REMAP); - -free_rom_buf: - kfree(rom_buf); - return ret; -} - -static int ks7010_upload_firmware(struct ks_sdio_card *card) -{ - struct ks_wlan_private *priv = card->priv; - struct sdio_func *func = ks7010_to_func(priv); - unsigned int n; - u8 byte = 0; - int ret; - const struct firmware *fw_entry = NULL; - - sdio_claim_host(func); - - /* Firmware running ? */ - ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte); - if (ret) - goto release_host; - if (byte == GCR_A_RUN) { - netdev_dbg(priv->net_dev, "MAC firmware running ...\n"); - ret = -EBUSY; - goto release_host; - } - - ret = request_firmware(&fw_entry, ROM_FILE, - &func->dev); - if (ret) - goto release_host; - - ret = ks7010_copy_firmware(priv, fw_entry); - if (ret) - goto release_firmware; - - /* Firmware running check */ - for (n = 0; n < 50; ++n) { - usleep_range(10000, 11000); /* wait_ms(10); */ - ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte); - if (ret) - goto release_firmware; - - if (byte == GCR_A_RUN) - break; - } - if ((50) <= n) { - netdev_err(priv->net_dev, "firmware can't start\n"); - ret = -EIO; - goto release_firmware; - } - - ret = 0; - - release_firmware: - release_firmware(fw_entry); - release_host: - sdio_release_host(func); - - return ret; -} - -static void ks7010_sme_enqueue_events(struct ks_wlan_private *priv) -{ - static const u16 init_events[] = { - SME_GET_EEPROM_CKSUM, SME_STOP_REQUEST, - SME_RTS_THRESHOLD_REQUEST, SME_FRAGMENTATION_THRESHOLD_REQUEST, - SME_WEP_INDEX_REQUEST, SME_WEP_KEY1_REQUEST, - SME_WEP_KEY2_REQUEST, SME_WEP_KEY3_REQUEST, - SME_WEP_KEY4_REQUEST, SME_WEP_FLAG_REQUEST, - SME_RSN_ENABLED_REQUEST, SME_MODE_SET_REQUEST, - SME_START_REQUEST - }; - int ev; - - for (ev = 0; ev < ARRAY_SIZE(init_events); ev++) - hostif_sme_enqueue(priv, init_events[ev]); -} - -static void ks7010_card_init(struct ks_wlan_private *priv) -{ - init_completion(&priv->confirm_wait); - - /* get mac address & firmware version */ - hostif_sme_enqueue(priv, SME_START); - - if (!wait_for_completion_interruptible_timeout - (&priv->confirm_wait, 5 * HZ)) { - netdev_dbg(priv->net_dev, "wait time out!! SME_START\n"); - } - - if (priv->mac_address_valid && priv->version_size != 0) - priv->dev_state = DEVICE_STATE_PREINIT; - - ks7010_sme_enqueue_events(priv); - - if (!wait_for_completion_interruptible_timeout - (&priv->confirm_wait, 5 * HZ)) { - netdev_dbg(priv->net_dev, "wait time out!! wireless parameter set\n"); - } - - if (priv->dev_state >= DEVICE_STATE_PREINIT) { - netdev_dbg(priv->net_dev, "DEVICE READY!!\n"); - priv->dev_state = DEVICE_STATE_READY; - } -} - -static void ks7010_init_defaults(struct ks_wlan_private *priv) -{ - priv->reg.tx_rate = TX_RATE_AUTO; - priv->reg.preamble = LONG_PREAMBLE; - priv->reg.power_mgmt = POWER_MGMT_ACTIVE; - priv->reg.scan_type = ACTIVE_SCAN; - priv->reg.beacon_lost_count = 20; - priv->reg.rts = 2347UL; - priv->reg.fragment = 2346UL; - priv->reg.phy_type = D_11BG_COMPATIBLE_MODE; - priv->reg.cts_mode = CTS_MODE_FALSE; - priv->reg.rate_set.body[11] = TX_RATE_54M; - priv->reg.rate_set.body[10] = TX_RATE_48M; - priv->reg.rate_set.body[9] = TX_RATE_36M; - priv->reg.rate_set.body[8] = TX_RATE_18M; - priv->reg.rate_set.body[7] = TX_RATE_9M; - priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE; - priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE; - priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE; - priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE; - priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE; - priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE; - priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE; - priv->reg.tx_rate = TX_RATE_FULL_AUTO; - priv->reg.rate_set.size = 12; -} - -static int ks7010_sdio_setup_irqs(struct sdio_func *func) -{ - int ret; - - /* interrupt disable */ - sdio_writeb(func, 0, INT_ENABLE_REG, &ret); - if (ret) - goto irq_error; - - sdio_writeb(func, 0xff, INT_PENDING_REG, &ret); - if (ret) - goto irq_error; - - /* setup interrupt handler */ - ret = sdio_claim_irq(func, ks_sdio_interrupt); - -irq_error: - return ret; -} - -static void ks7010_sdio_init_irqs(struct sdio_func *func, - struct ks_wlan_private *priv) -{ - u8 byte; - int ret; - - /* - * interrupt setting - * clear Interrupt status write - * (ARMtoSD_InterruptPending FN1:00_0024) - */ - sdio_claim_host(func); - ret = ks7010_sdio_writeb(priv, INT_PENDING_REG, 0xff); - sdio_release_host(func); - if (ret) - netdev_err(priv->net_dev, "write INT_PENDING_REG\n"); - - /* enable ks7010sdio interrupt */ - byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS); - sdio_claim_host(func); - ret = ks7010_sdio_writeb(priv, INT_ENABLE_REG, byte); - sdio_release_host(func); - if (ret) - netdev_err(priv->net_dev, "write INT_ENABLE_REG\n"); -} - -static void ks7010_private_init(struct ks_wlan_private *priv, - struct ks_sdio_card *card, - struct net_device *netdev) -{ - /* private memory initialize */ - priv->if_hw = card; - - priv->dev_state = DEVICE_STATE_PREBOOT; - priv->net_dev = netdev; - priv->firmware_version[0] = '\0'; - priv->version_size = 0; - priv->last_doze = jiffies; - priv->last_wakeup = jiffies; - memset(&priv->nstats, 0, sizeof(priv->nstats)); - memset(&priv->wstats, 0, sizeof(priv->wstats)); - - /* sleep mode */ - atomic_set(&priv->sleepstatus.status, 0); - atomic_set(&priv->sleepstatus.doze_request, 0); - atomic_set(&priv->sleepstatus.wakeup_request, 0); - - trx_device_init(priv); - hostif_init(priv); - ks_wlan_net_start(netdev); - ks7010_init_defaults(priv); -} - -static int ks7010_sdio_probe(struct sdio_func *func, - const struct sdio_device_id *device) -{ - struct ks_wlan_private *priv = NULL; - struct net_device *netdev = NULL; - struct ks_sdio_card *card; - int ret; - - card = kzalloc(sizeof(*card), GFP_KERNEL); - if (!card) - return -ENOMEM; - - card->func = func; - - sdio_claim_host(func); - - ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE); - if (ret) - goto err_free_card; - - dev_dbg(&card->func->dev, "multi_block=%d sdio_set_block_size()=%d %d\n", - func->card->cccr.multi_block, func->cur_blksize, ret); - - ret = sdio_enable_func(func); - if (ret) - goto err_free_card; - - ret = ks7010_sdio_setup_irqs(func); - if (ret) - goto err_disable_func; - - sdio_release_host(func); - - sdio_set_drvdata(func, card); - - dev_dbg(&card->func->dev, "class = 0x%X, vendor = 0x%X, device = 0x%X\n", - func->class, func->vendor, func->device); - - /* private memory allocate */ - netdev = alloc_etherdev(sizeof(*priv)); - if (!netdev) { - dev_err(&card->func->dev, "Unable to alloc new net device\n"); - goto err_release_irq; - } - - ret = dev_alloc_name(netdev, "wlan%d"); - if (ret < 0) { - dev_err(&card->func->dev, "Couldn't get name!\n"); - goto err_free_netdev; - } - - priv = netdev_priv(netdev); - - card->priv = priv; - SET_NETDEV_DEV(netdev, &card->func->dev); - - ks7010_private_init(priv, card, netdev); - - ret = ks7010_upload_firmware(card); - if (ret) { - netdev_err(priv->net_dev, - "firmware load failed !! ret = %d\n", ret); - goto err_free_netdev; - } - - ks7010_sdio_init_irqs(func, priv); - - priv->dev_state = DEVICE_STATE_BOOT; - - priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1); - if (!priv->wq) { - netdev_err(priv->net_dev, "create_workqueue failed !!\n"); - goto err_free_netdev; - } - - INIT_DELAYED_WORK(&priv->rw_dwork, ks7010_rw_function); - ks7010_card_init(priv); - - ret = register_netdev(priv->net_dev); - if (ret) - goto err_destroy_wq; - - return 0; - - err_destroy_wq: - destroy_workqueue(priv->wq); - err_free_netdev: - free_netdev(netdev); - err_release_irq: - sdio_claim_host(func); - sdio_release_irq(func); - err_disable_func: - sdio_disable_func(func); - err_free_card: - sdio_release_host(func); - sdio_set_drvdata(func, NULL); - kfree(card); - - return -ENODEV; -} - -/* send stop request to MAC */ -static int send_stop_request(struct sdio_func *func) -{ - struct hostif_stop_request *pp; - struct ks_sdio_card *card; - size_t size; - - card = sdio_get_drvdata(func); - - pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL); - if (!pp) - return -ENOMEM; - - size = sizeof(*pp) - sizeof(pp->header.size); - pp->header.size = cpu_to_le16(size); - pp->header.event = cpu_to_le16(HIF_STOP_REQ); - - sdio_claim_host(func); - write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp))); - sdio_release_host(func); - - kfree(pp); - return 0; -} - -static void ks7010_sdio_remove(struct sdio_func *func) -{ - int ret; - struct ks_sdio_card *card; - struct ks_wlan_private *priv; - - card = sdio_get_drvdata(func); - - if (!card) - return; - - priv = card->priv; - if (!priv) - goto err_free_card; - - ks_wlan_net_stop(priv->net_dev); - - /* interrupt disable */ - sdio_claim_host(func); - sdio_writeb(func, 0, INT_ENABLE_REG, &ret); - sdio_writeb(func, 0xff, INT_PENDING_REG, &ret); - sdio_release_host(func); - - ret = send_stop_request(func); - if (ret) /* memory allocation failure */ - goto err_free_card; - - if (priv->wq) - destroy_workqueue(priv->wq); - - hostif_exit(priv); - - unregister_netdev(priv->net_dev); - - trx_device_exit(priv); - free_netdev(priv->net_dev); - card->priv = NULL; - - sdio_claim_host(func); - sdio_release_irq(func); - sdio_disable_func(func); - sdio_release_host(func); -err_free_card: - sdio_set_drvdata(func, NULL); - kfree(card); -} - -static const struct sdio_device_id ks7010_sdio_ids[] = { - {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)}, - { /* all zero */ } -}; -MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids); - -static struct sdio_driver ks7010_sdio_driver = { - .name = "ks7010_sdio", - .id_table = ks7010_sdio_ids, - .probe = ks7010_sdio_probe, - .remove = ks7010_sdio_remove, -}; - -module_sdio_driver(ks7010_sdio_driver); -MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream"); -MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards"); -MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE(ROM_FILE); diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c deleted file mode 100644 index af3825578d85f8..00000000000000 --- a/drivers/staging/ks7010/ks_hostif.c +++ /dev/null @@ -1,2312 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for KeyStream wireless LAN cards. - * - * Copyright (C) 2005-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - */ - -#include -#include -#include -#include -#include -#include "eap_packet.h" -#include "ks_wlan.h" -#include "ks_hostif.h" - -#define MICHAEL_MIC_KEY_LEN 8 -#define MICHAEL_MIC_LEN 8 - -static inline void inc_smeqhead(struct ks_wlan_private *priv) -{ - priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE; -} - -static inline void inc_smeqtail(struct ks_wlan_private *priv) -{ - priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE; -} - -static inline unsigned int cnt_smeqbody(struct ks_wlan_private *priv) -{ - return CIRC_CNT_TO_END(priv->sme_i.qhead, priv->sme_i.qtail, - SME_EVENT_BUFF_SIZE); -} - -static inline u8 get_byte(struct ks_wlan_private *priv) -{ - u8 data; - - data = *priv->rxp++; - /* length check in advance ! */ - --(priv->rx_size); - return data; -} - -static inline u16 get_word(struct ks_wlan_private *priv) -{ - u16 data; - - data = (get_byte(priv) & 0xff); - data |= ((get_byte(priv) << 8) & 0xff00); - return data; -} - -static inline u32 get_dword(struct ks_wlan_private *priv) -{ - u32 data; - - data = (get_byte(priv) & 0xff); - data |= ((get_byte(priv) << 8) & 0x0000ff00); - data |= ((get_byte(priv) << 16) & 0x00ff0000); - data |= ((get_byte(priv) << 24) & 0xff000000); - return data; -} - -static void ks_wlan_hw_wakeup_task(struct work_struct *work) -{ - struct ks_wlan_private *priv; - int ps_status; - long time_left; - - priv = container_of(work, struct ks_wlan_private, wakeup_work); - ps_status = atomic_read(&priv->psstatus.status); - - if (ps_status == PS_SNOOZE) { - ks_wlan_hw_wakeup_request(priv); - time_left = wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, - msecs_to_jiffies(20)); - if (time_left <= 0) { - netdev_dbg(priv->net_dev, "wake up timeout or interrupted !!!\n"); - schedule_work(&priv->wakeup_work); - return; - } - } -} - -static void ks_wlan_do_power_save(struct ks_wlan_private *priv) -{ - if (is_connect_status(priv->connect_status)) - hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); - else - priv->dev_state = DEVICE_STATE_READY; -} - -static -int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info) -{ - struct local_ap *ap; - union iwreq_data wrqu; - struct net_device *netdev = priv->net_dev; - u8 size; - - ap = &priv->current_ap; - - if (is_disconnect_status(priv->connect_status)) { - memset(ap, 0, sizeof(struct local_ap)); - return -EPERM; - } - - ether_addr_copy(ap->bssid, ap_info->bssid); - memcpy(ap->ssid.body, priv->reg.ssid.body, - priv->reg.ssid.size); - ap->ssid.size = priv->reg.ssid.size; - memcpy(ap->rate_set.body, ap_info->rate_set.body, - ap_info->rate_set.size); - ap->rate_set.size = ap_info->rate_set.size; - if (ap_info->ext_rate_set.size != 0) { - memcpy(&ap->rate_set.body[ap->rate_set.size], - ap_info->ext_rate_set.body, - ap_info->ext_rate_set.size); - ap->rate_set.size += ap_info->ext_rate_set.size; - } - ap->channel = ap_info->ds_parameter.channel; - ap->rssi = ap_info->rssi; - ap->sq = ap_info->sq; - ap->noise = ap_info->noise; - ap->capability = le16_to_cpu(ap_info->capability); - size = (ap_info->rsn.size <= RSN_IE_BODY_MAX) ? - ap_info->rsn.size : RSN_IE_BODY_MAX; - if ((ap_info->rsn_mode & RSN_MODE_WPA2) && - (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) { - ap->rsn_ie.id = RSN_INFO_ELEM_ID; - ap->rsn_ie.size = size; - memcpy(ap->rsn_ie.body, ap_info->rsn.body, size); - } else if ((ap_info->rsn_mode & RSN_MODE_WPA) && - (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) { - ap->wpa_ie.id = WPA_INFO_ELEM_ID; - ap->wpa_ie.size = size; - memcpy(ap->wpa_ie.body, ap_info->rsn.body, size); - } else { - ap->rsn_ie.id = 0; - ap->rsn_ie.size = 0; - ap->wpa_ie.id = 0; - ap->wpa_ie.size = 0; - } - - wrqu.data.length = 0; - wrqu.data.flags = 0; - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - if (is_connect_status(priv->connect_status)) { - ether_addr_copy(wrqu.ap_addr.sa_data, priv->current_ap.bssid); - netdev_dbg(priv->net_dev, - "IWEVENT: connect bssid=%pM\n", - wrqu.ap_addr.sa_data); - wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL); - } - netdev_dbg(priv->net_dev, "Link AP\n" - "- bssid=%pM\n" - "- essid=%s\n" - "- rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n" - "- channel=%d\n" - "- rssi=%d\n" - "- sq=%d\n" - "- capability=%04X\n" - "- rsn.mode=%d\n" - "- rsn.size=%d\n" - "- ext_rate_set_size=%d\n" - "- rate_set_size=%d\n", - ap->bssid, - &ap->ssid.body[0], - ap->rate_set.body[0], ap->rate_set.body[1], - ap->rate_set.body[2], ap->rate_set.body[3], - ap->rate_set.body[4], ap->rate_set.body[5], - ap->rate_set.body[6], ap->rate_set.body[7], - ap->channel, ap->rssi, ap->sq, ap->capability, - ap_info->rsn_mode, ap_info->rsn.size, - ap_info->ext_rate_set.size, ap_info->rate_set.size); - - return 0; -} - -static u8 read_ie(unsigned char *bp, u8 max, u8 *body) -{ - u8 size = (*(bp + 1) <= max) ? *(bp + 1) : max; - - memcpy(body, bp + 2, size); - return size; -} - -static int -michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result) -{ - u8 pad_data[4] = { priority, 0, 0, 0 }; - struct crypto_shash *tfm = NULL; - struct shash_desc *desc = NULL; - int ret; - - tfm = crypto_alloc_shash("michael_mic", 0, 0); - if (IS_ERR(tfm)) { - ret = PTR_ERR(tfm); - goto err; - } - - ret = crypto_shash_setkey(tfm, key, MICHAEL_MIC_KEY_LEN); - if (ret < 0) - goto err_free_tfm; - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto err_free_tfm; - } - - desc->tfm = tfm; - - ret = crypto_shash_init(desc); - if (ret < 0) - goto err_free_desc; - - // Compute the MIC value - /* - * IEEE802.11i page 47 - * Figure 43g TKIP MIC processing format - * +--+--+--------+--+----+--+--+--+--+--+--+--+--+ - * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet - * +--+--+--------+--+----+--+--+--+--+--+--+--+--+ - * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7| - * +--+--+--------+--+----+--+--+--+--+--+--+--+--+ - */ - - ret = crypto_shash_update(desc, data, 12); - if (ret < 0) - goto err_free_desc; - - ret = crypto_shash_update(desc, pad_data, 4); - if (ret < 0) - goto err_free_desc; - - ret = crypto_shash_finup(desc, data + 12, len - 12, result); - -err_free_desc: - kfree_sensitive(desc); - -err_free_tfm: - crypto_free_shash(tfm); - -err: - return ret; -} - -static -int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info, - struct local_ap *ap) -{ - unsigned char *bp; - int bsize, offset; - - memset(ap, 0, sizeof(struct local_ap)); - - ether_addr_copy(ap->bssid, ap_info->bssid); - ap->rssi = ap_info->rssi; - ap->sq = ap_info->sq; - ap->noise = ap_info->noise; - ap->capability = le16_to_cpu(ap_info->capability); - ap->channel = ap_info->ch_info; - - bp = ap_info->body; - bsize = le16_to_cpu(ap_info->body_size); - offset = 0; - - while (bsize > offset) { - switch (*bp) { /* Information Element ID */ - case WLAN_EID_SSID: - ap->ssid.size = read_ie(bp, IEEE80211_MAX_SSID_LEN, - ap->ssid.body); - break; - case WLAN_EID_SUPP_RATES: - case WLAN_EID_EXT_SUPP_RATES: - if ((*(bp + 1) + ap->rate_set.size) <= - RATE_SET_MAX_SIZE) { - memcpy(&ap->rate_set.body[ap->rate_set.size], - bp + 2, *(bp + 1)); - ap->rate_set.size += *(bp + 1); - } else { - memcpy(&ap->rate_set.body[ap->rate_set.size], - bp + 2, - RATE_SET_MAX_SIZE - ap->rate_set.size); - ap->rate_set.size += - (RATE_SET_MAX_SIZE - ap->rate_set.size); - } - break; - case WLAN_EID_RSN: - ap->rsn_ie.id = *bp; - ap->rsn_ie.size = read_ie(bp, RSN_IE_BODY_MAX, - ap->rsn_ie.body); - break; - case WLAN_EID_VENDOR_SPECIFIC: /* WPA */ - /* WPA OUI check */ - if (memcmp(bp + 2, CIPHER_ID_WPA_WEP40, 4) == 0) { - ap->wpa_ie.id = *bp; - ap->wpa_ie.size = read_ie(bp, RSN_IE_BODY_MAX, - ap->wpa_ie.body); - } - break; - case WLAN_EID_DS_PARAMS: - case WLAN_EID_FH_PARAMS: - case WLAN_EID_CF_PARAMS: - case WLAN_EID_TIM: - case WLAN_EID_IBSS_PARAMS: - case WLAN_EID_COUNTRY: - case WLAN_EID_ERP_INFO: - break; - default: - netdev_err(priv->net_dev, - "unknown Element ID=%d\n", *bp); - break; - } - - offset += 2; /* id & size field */ - offset += *(bp + 1); /* +size offset */ - bp += (*(bp + 1) + 2); /* pointer update */ - } - - return 0; -} - -static -int hostif_data_indication_wpa(struct ks_wlan_private *priv, - unsigned short auth_type) -{ - struct ether_hdr *eth_hdr; - unsigned short eth_proto; - unsigned char recv_mic[MICHAEL_MIC_LEN]; - char buf[128]; - unsigned long now; - struct mic_failure *mic_failure; - u8 mic[MICHAEL_MIC_LEN]; - union iwreq_data wrqu; - unsigned int key_index = auth_type - 1; - struct wpa_key *key = &priv->wpa.key[key_index]; - - eth_hdr = (struct ether_hdr *)(priv->rxp); - eth_proto = ntohs(eth_hdr->h_proto); - - if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) { - netdev_err(priv->net_dev, "invalid data format\n"); - priv->nstats.rx_errors++; - return -EINVAL; - } - if (((auth_type == TYPE_PMK1 && - priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) || - (auth_type == TYPE_GMK1 && - priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP) || - (auth_type == TYPE_GMK2 && - priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) && - key->key_len) { - int ret; - - netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n", - eth_proto, priv->rx_size); - /* MIC save */ - memcpy(&recv_mic[0], - (priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)), - sizeof(recv_mic)); - priv->rx_size = priv->rx_size - sizeof(recv_mic); - - ret = michael_mic(key->rx_mic_key, priv->rxp, priv->rx_size, - 0, mic); - if (ret < 0) - return ret; - if (memcmp(mic, recv_mic, sizeof(mic)) != 0) { - now = jiffies; - mic_failure = &priv->wpa.mic_failure; - /* MIC FAILURE */ - if (mic_failure->last_failure_time && - (now - mic_failure->last_failure_time) / HZ >= 60) { - mic_failure->failure = 0; - } - netdev_err(priv->net_dev, "MIC FAILURE\n"); - if (mic_failure->failure == 0) { - mic_failure->failure = 1; - mic_failure->counter = 0; - } else if (mic_failure->failure == 1) { - mic_failure->failure = 2; - mic_failure->counter = - (u16)((now - mic_failure->last_failure_time) / HZ); - /* range 1-60 */ - if (!mic_failure->counter) - mic_failure->counter = 1; - } - priv->wpa.mic_failure.last_failure_time = now; - - /* needed parameters: count, keyid, key type, TSC */ - sprintf(buf, - "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)", - key_index, - eth_hdr->h_dest[0] & 0x01 ? "broad" : "uni", - eth_hdr->h_source); - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = strlen(buf); - wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, - buf); - return -EINVAL; - } - } - return 0; -} - -static -void hostif_data_indication(struct ks_wlan_private *priv) -{ - unsigned int rx_ind_size; /* indicate data size */ - struct sk_buff *skb; - u16 auth_type; - unsigned char temp[256]; - struct ether_hdr *eth_hdr; - struct ieee802_1x_hdr *aa1x_hdr; - size_t size; - int ret; - - /* min length check */ - if (priv->rx_size <= ETH_HLEN) { - priv->nstats.rx_errors++; - return; - } - - auth_type = get_word(priv); /* AuthType */ - get_word(priv); /* Reserve Area */ - - eth_hdr = (struct ether_hdr *)(priv->rxp); - - /* source address check */ - if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) { - netdev_err(priv->net_dev, "invalid : source is own mac address !!\n"); - netdev_err(priv->net_dev, "eth_hdrernet->h_dest=%pM\n", eth_hdr->h_source); - priv->nstats.rx_errors++; - return; - } - - /* for WPA */ - if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) { - ret = hostif_data_indication_wpa(priv, auth_type); - if (ret) - return; - } - - if ((priv->connect_status & FORCE_DISCONNECT) || - priv->wpa.mic_failure.failure == 2) { - return; - } - - /* check 13th byte at rx data */ - switch (*(priv->rxp + 12)) { - case LLC_SAP_SNAP: - rx_ind_size = priv->rx_size - 6; - skb = dev_alloc_skb(rx_ind_size); - if (!skb) { - priv->nstats.rx_dropped++; - return; - } - netdev_dbg(priv->net_dev, "SNAP, rx_ind_size = %d\n", - rx_ind_size); - - size = ETH_ALEN * 2; - skb_put_data(skb, priv->rxp, size); - - /* (SNAP+UI..) skip */ - - size = rx_ind_size - (ETH_ALEN * 2); - skb_put_data(skb, ð_hdr->h_proto, size); - - aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE); - break; - case LLC_SAP_NETBEUI: - rx_ind_size = (priv->rx_size + 2); - skb = dev_alloc_skb(rx_ind_size); - if (!skb) { - priv->nstats.rx_dropped++; - return; - } - netdev_dbg(priv->net_dev, "NETBEUI/NetBIOS rx_ind_size=%d\n", - rx_ind_size); - - /* 8802/FDDI MAC copy */ - skb_put_data(skb, priv->rxp, 12); - - /* NETBEUI size add */ - temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); - temp[1] = ((rx_ind_size - 12) & 0xff); - skb_put_data(skb, temp, 2); - - /* copy after Type */ - skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14); - - aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14); - break; - default: /* other rx data */ - netdev_err(priv->net_dev, "invalid data format\n"); - priv->nstats.rx_errors++; - return; - } - - if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY && - priv->wpa.rsn_enabled) - atomic_set(&priv->psstatus.snooze_guard, 1); - - /* rx indication */ - skb->dev = priv->net_dev; - skb->protocol = eth_type_trans(skb, skb->dev); - priv->nstats.rx_packets++; - priv->nstats.rx_bytes += rx_ind_size; - netif_rx(skb); -} - -static -void hostif_mib_get_confirm(struct ks_wlan_private *priv) -{ - struct net_device *dev = priv->net_dev; - u32 mib_status; - u32 mib_attribute; - - mib_status = get_dword(priv); - mib_attribute = get_dword(priv); - get_word(priv); /* mib_val_size */ - get_word(priv); /* mib_val_type */ - - if (mib_status) { - netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n", - mib_attribute, mib_status); - return; - } - - switch (mib_attribute) { - case DOT11_MAC_ADDRESS: - hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS); - ether_addr_copy(priv->eth_addr, priv->rxp); - priv->mac_address_valid = true; - eth_hw_addr_set(dev, priv->eth_addr); - netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr); - break; - case DOT11_PRODUCT_VERSION: - priv->version_size = priv->rx_size; - memcpy(priv->firmware_version, priv->rxp, priv->rx_size); - priv->firmware_version[priv->rx_size] = '\0'; - netdev_info(dev, "firmware ver. = %s\n", - priv->firmware_version); - hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION); - /* wake_up_interruptible_all(&priv->confirm_wait); */ - complete(&priv->confirm_wait); - break; - case LOCAL_GAIN: - memcpy(&priv->gain, priv->rxp, sizeof(priv->gain)); - netdev_dbg(priv->net_dev, "tx_mode=%d, rx_mode=%d, tx_gain=%d, rx_gain=%d\n", - priv->gain.tx_mode, priv->gain.rx_mode, - priv->gain.tx_gain, priv->gain.rx_gain); - break; - case LOCAL_EEPROM_SUM: - memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum)); - if (priv->eeprom_sum.type != 0 && - priv->eeprom_sum.type != 1) { - netdev_err(dev, "LOCAL_EEPROM_SUM error!\n"); - return; - } - priv->eeprom_checksum = (priv->eeprom_sum.type == 0) ? - EEPROM_CHECKSUM_NONE : - (priv->eeprom_sum.result == 0) ? - EEPROM_NG : EEPROM_OK; - break; - default: - netdev_err(priv->net_dev, "mib_attribute=%08x\n", - (unsigned int)mib_attribute); - break; - } -} - -static -void hostif_mib_set_confirm(struct ks_wlan_private *priv) -{ - u32 mib_status; - u32 mib_attribute; - - mib_status = get_dword(priv); - mib_attribute = get_dword(priv); - - if (mib_status) { - /* in case of error */ - netdev_err(priv->net_dev, "error :: attribute=%08X, status=%08X\n", - mib_attribute, mib_status); - } - - switch (mib_attribute) { - case DOT11_RTS_THRESHOLD: - hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM); - break; - case DOT11_FRAGMENTATION_THRESHOLD: - hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM); - break; - case DOT11_WEP_DEFAULT_KEY_ID: - if (!priv->wpa.wpa_enabled) - hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM); - break; - case DOT11_WEP_DEFAULT_KEY_VALUE1: - if (priv->wpa.rsn_enabled) - hostif_sme_enqueue(priv, SME_SET_PMK_TSC); - else - hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM); - break; - case DOT11_WEP_DEFAULT_KEY_VALUE2: - if (priv->wpa.rsn_enabled) - hostif_sme_enqueue(priv, SME_SET_GMK1_TSC); - else - hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM); - break; - case DOT11_WEP_DEFAULT_KEY_VALUE3: - if (priv->wpa.rsn_enabled) - hostif_sme_enqueue(priv, SME_SET_GMK2_TSC); - else - hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM); - break; - case DOT11_WEP_DEFAULT_KEY_VALUE4: - if (!priv->wpa.rsn_enabled) - hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM); - break; - case DOT11_PRIVACY_INVOKED: - if (!priv->wpa.rsn_enabled) - hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM); - break; - case DOT11_RSN_ENABLED: - hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM); - break; - case LOCAL_RSN_MODE: - hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM); - break; - case LOCAL_MULTICAST_ADDRESS: - hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST); - break; - case LOCAL_MULTICAST_FILTER: - hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM); - break; - case LOCAL_CURRENTADDRESS: - priv->mac_address_valid = true; - break; - case DOT11_RSN_CONFIG_MULTICAST_CIPHER: - hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM); - break; - case DOT11_RSN_CONFIG_UNICAST_CIPHER: - hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM); - break; - case DOT11_RSN_CONFIG_AUTH_SUITE: - hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM); - break; - case DOT11_GMK1_TSC: - if (atomic_read(&priv->psstatus.snooze_guard)) - atomic_set(&priv->psstatus.snooze_guard, 0); - break; - case DOT11_GMK2_TSC: - if (atomic_read(&priv->psstatus.snooze_guard)) - atomic_set(&priv->psstatus.snooze_guard, 0); - break; - case DOT11_PMK_TSC: - case LOCAL_PMK: - case LOCAL_GAIN: - case LOCAL_WPS_ENABLE: - case LOCAL_WPS_PROBE_REQ: - case LOCAL_REGION: - default: - break; - } -} - -static -void hostif_power_mgmt_confirm(struct ks_wlan_private *priv) -{ - if (priv->reg.power_mgmt > POWER_MGMT_ACTIVE && - priv->reg.operation_mode == MODE_INFRASTRUCTURE) { - atomic_set(&priv->psstatus.confirm_wait, 0); - priv->dev_state = DEVICE_STATE_SLEEP; - ks_wlan_hw_power_save(priv); - } else { - priv->dev_state = DEVICE_STATE_READY; - } -} - -static -void hostif_sleep_confirm(struct ks_wlan_private *priv) -{ - atomic_set(&priv->sleepstatus.doze_request, 1); - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); -} - -static -void hostif_start_confirm(struct ks_wlan_private *priv) -{ - union iwreq_data wrqu; - - wrqu.data.length = 0; - wrqu.data.flags = 0; - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - if (is_connect_status(priv->connect_status)) { - eth_zero_addr(wrqu.ap_addr.sa_data); - wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); - } - netdev_dbg(priv->net_dev, " scan_ind_count=%d\n", priv->scan_ind_count); - hostif_sme_enqueue(priv, SME_START_CONFIRM); -} - -static -void hostif_connect_indication(struct ks_wlan_private *priv) -{ - u16 connect_code; - unsigned int tmp = 0; - unsigned int old_status = priv->connect_status; - struct net_device *netdev = priv->net_dev; - union iwreq_data wrqu0; - - connect_code = get_word(priv); - - switch (connect_code) { - case RESULT_CONNECT: - if (!(priv->connect_status & FORCE_DISCONNECT)) - netif_carrier_on(netdev); - tmp = FORCE_DISCONNECT & priv->connect_status; - priv->connect_status = tmp + CONNECT_STATUS; - break; - case RESULT_DISCONNECT: - netif_carrier_off(netdev); - tmp = FORCE_DISCONNECT & priv->connect_status; - priv->connect_status = tmp + DISCONNECT_STATUS; - break; - default: - netdev_dbg(priv->net_dev, "unknown connect_code=%d :: scan_ind_count=%d\n", - connect_code, priv->scan_ind_count); - netif_carrier_off(netdev); - tmp = FORCE_DISCONNECT & priv->connect_status; - priv->connect_status = tmp + DISCONNECT_STATUS; - break; - } - - get_current_ap(priv, (struct link_ap_info *)priv->rxp); - if (is_connect_status(priv->connect_status) && - is_disconnect_status(old_status)) { - /* for power save */ - atomic_set(&priv->psstatus.snooze_guard, 0); - atomic_set(&priv->psstatus.confirm_wait, 0); - } - ks_wlan_do_power_save(priv); - - wrqu0.data.length = 0; - wrqu0.data.flags = 0; - wrqu0.ap_addr.sa_family = ARPHRD_ETHER; - if (is_disconnect_status(priv->connect_status) && - is_connect_status(old_status)) { - eth_zero_addr(wrqu0.ap_addr.sa_data); - netdev_dbg(priv->net_dev, "disconnect :: scan_ind_count=%d\n", - priv->scan_ind_count); - wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL); - } - priv->scan_ind_count = 0; -} - -static -void hostif_scan_indication(struct ks_wlan_private *priv) -{ - int i; - struct ap_info *ap_info; - - netdev_dbg(priv->net_dev, - "scan_ind_count = %d\n", priv->scan_ind_count); - ap_info = (struct ap_info *)(priv->rxp); - - if (priv->scan_ind_count) { - /* bssid check */ - for (i = 0; i < priv->aplist.size; i++) { - u8 *bssid = priv->aplist.ap[i].bssid; - - if (ether_addr_equal(ap_info->bssid, bssid)) - continue; - - if (ap_info->frame_type == IEEE80211_STYPE_PROBE_RESP) - get_ap_information(priv, ap_info, - &priv->aplist.ap[i]); - return; - } - } - priv->scan_ind_count++; - if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) { - netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n", - priv->scan_ind_count, priv->aplist.size); - get_ap_information(priv, (struct ap_info *)(priv->rxp), - &priv->aplist.ap[priv->scan_ind_count - 1]); - priv->aplist.size = priv->scan_ind_count; - } else { - netdev_dbg(priv->net_dev, " count over :: scan_ind_count=%d\n", - priv->scan_ind_count); - } -} - -static -void hostif_stop_confirm(struct ks_wlan_private *priv) -{ - unsigned int tmp = 0; - unsigned int old_status = priv->connect_status; - struct net_device *netdev = priv->net_dev; - union iwreq_data wrqu0; - - if (priv->dev_state == DEVICE_STATE_SLEEP) - priv->dev_state = DEVICE_STATE_READY; - - /* disconnect indication */ - if (is_connect_status(priv->connect_status)) { - netif_carrier_off(netdev); - tmp = FORCE_DISCONNECT & priv->connect_status; - priv->connect_status = tmp | DISCONNECT_STATUS; - netdev_info(netdev, "IWEVENT: disconnect\n"); - - wrqu0.data.length = 0; - wrqu0.data.flags = 0; - wrqu0.ap_addr.sa_family = ARPHRD_ETHER; - if (is_disconnect_status(priv->connect_status) && - is_connect_status(old_status)) { - eth_zero_addr(wrqu0.ap_addr.sa_data); - netdev_info(netdev, "IWEVENT: disconnect\n"); - wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL); - } - priv->scan_ind_count = 0; - } - - hostif_sme_enqueue(priv, SME_STOP_CONFIRM); -} - -static -void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv) -{ - priv->infra_status = 0; /* infrastructure mode cancel */ - hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM); -} - -static -void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv) -{ - get_word(priv); /* result_code */ - priv->infra_status = 1; /* infrastructure mode set */ - hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM); -} - -static -void hostif_adhoc_set_confirm(struct ks_wlan_private *priv) -{ - priv->infra_status = 1; /* infrastructure mode set */ - hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM); -} - -static -void hostif_associate_indication(struct ks_wlan_private *priv) -{ - struct association_request *assoc_req; - struct association_response *assoc_resp; - unsigned char *pb; - union iwreq_data wrqu; - char buf[IW_CUSTOM_MAX]; - char *pbuf = &buf[0]; - int i; - - static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs="; - static const char associnfo_leader1[] = " RespIEs="; - - assoc_req = (struct association_request *)(priv->rxp); - assoc_resp = (struct association_response *)(assoc_req + 1); - pb = (unsigned char *)(assoc_resp + 1); - - memset(&wrqu, 0, sizeof(wrqu)); - memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1); - wrqu.data.length += sizeof(associnfo_leader0) - 1; - pbuf += sizeof(associnfo_leader0) - 1; - - for (i = 0; i < le16_to_cpu(assoc_req->req_ies_size); i++) - pbuf += sprintf(pbuf, "%02x", *(pb + i)); - wrqu.data.length += (le16_to_cpu(assoc_req->req_ies_size)) * 2; - - memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1); - wrqu.data.length += sizeof(associnfo_leader1) - 1; - pbuf += sizeof(associnfo_leader1) - 1; - - pb += le16_to_cpu(assoc_req->req_ies_size); - for (i = 0; i < le16_to_cpu(assoc_resp->resp_ies_size); i++) - pbuf += sprintf(pbuf, "%02x", *(pb + i)); - wrqu.data.length += (le16_to_cpu(assoc_resp->resp_ies_size)) * 2; - - pbuf += sprintf(pbuf, ")"); - wrqu.data.length += 1; - - wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf); -} - -static -void hostif_bss_scan_confirm(struct ks_wlan_private *priv) -{ - u32 result_code; - struct net_device *dev = priv->net_dev; - union iwreq_data wrqu; - - result_code = get_dword(priv); - netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n", - result_code, priv->scan_ind_count); - - priv->sme_i.sme_flag &= ~SME_AP_SCAN; - hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM); - - wrqu.data.length = 0; - wrqu.data.flags = 0; - wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); - priv->scan_ind_count = 0; -} - -static -void hostif_phy_information_confirm(struct ks_wlan_private *priv) -{ - struct iw_statistics *wstats = &priv->wstats; - u8 rssi, signal; - u8 link_speed; - u32 transmitted_frame_count, received_fragment_count; - u32 failed_count, fcs_error_count; - - rssi = get_byte(priv); - signal = get_byte(priv); - get_byte(priv); /* noise */ - link_speed = get_byte(priv); - transmitted_frame_count = get_dword(priv); - received_fragment_count = get_dword(priv); - failed_count = get_dword(priv); - fcs_error_count = get_dword(priv); - - netdev_dbg(priv->net_dev, "phyinfo confirm rssi=%d signal=%d\n", - rssi, signal); - priv->current_rate = (link_speed & RATE_MASK); - wstats->qual.qual = signal; - wstats->qual.level = 256 - rssi; - wstats->qual.noise = 0; /* invalid noise value */ - wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; - - netdev_dbg(priv->net_dev, "\n rssi=%u\n" - " signal=%u\n" - " link_speed=%ux500Kbps\n" - " transmitted_frame_count=%u\n" - " received_fragment_count=%u\n" - " failed_count=%u\n" - " fcs_error_count=%u\n", - rssi, signal, link_speed, transmitted_frame_count, - received_fragment_count, failed_count, fcs_error_count); - /* wake_up_interruptible_all(&priv->confirm_wait); */ - complete(&priv->confirm_wait); -} - -static -void hostif_mic_failure_confirm(struct ks_wlan_private *priv) -{ - netdev_dbg(priv->net_dev, "mic_failure=%u\n", - priv->wpa.mic_failure.failure); - hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM); -} - -static -void hostif_event_check(struct ks_wlan_private *priv) -{ - u16 event; - - event = get_word(priv); - switch (event) { - case HIF_DATA_IND: - hostif_data_indication(priv); - break; - case HIF_MIB_GET_CONF: - hostif_mib_get_confirm(priv); - break; - case HIF_MIB_SET_CONF: - hostif_mib_set_confirm(priv); - break; - case HIF_POWER_MGMT_CONF: - hostif_power_mgmt_confirm(priv); - break; - case HIF_SLEEP_CONF: - hostif_sleep_confirm(priv); - break; - case HIF_START_CONF: - hostif_start_confirm(priv); - break; - case HIF_CONNECT_IND: - hostif_connect_indication(priv); - break; - case HIF_STOP_CONF: - hostif_stop_confirm(priv); - break; - case HIF_PS_ADH_SET_CONF: - hostif_ps_adhoc_set_confirm(priv); - break; - case HIF_INFRA_SET_CONF: - case HIF_INFRA_SET2_CONF: - hostif_infrastructure_set_confirm(priv); - break; - case HIF_ADH_SET_CONF: - case HIF_ADH_SET2_CONF: - hostif_adhoc_set_confirm(priv); - break; - case HIF_ASSOC_INFO_IND: - hostif_associate_indication(priv); - break; - case HIF_MIC_FAILURE_CONF: - hostif_mic_failure_confirm(priv); - break; - case HIF_SCAN_CONF: - hostif_bss_scan_confirm(priv); - break; - case HIF_PHY_INFO_CONF: - case HIF_PHY_INFO_IND: - hostif_phy_information_confirm(priv); - break; - case HIF_SCAN_IND: - hostif_scan_indication(priv); - break; - case HIF_AP_SET_CONF: - default: - netdev_err(priv->net_dev, "undefined event[%04X]\n", event); - /* wake_up_all(&priv->confirm_wait); */ - complete(&priv->confirm_wait); - break; - } - - /* add event to hostt buffer */ - priv->hostt.buff[priv->hostt.qtail] = event; - priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE; -} - -/* allocate size bytes, set header size and event */ -static void *hostif_generic_request(size_t size, int event) -{ - struct hostif_hdr *p; - - p = kzalloc(hif_align_size(size), GFP_ATOMIC); - if (!p) - return NULL; - - p->size = cpu_to_le16(size - sizeof(p->size)); - p->event = cpu_to_le16(event); - - return p; -} - -int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb) -{ - unsigned int skb_len = 0; - unsigned char *buffer = NULL; - unsigned int length = 0; - struct hostif_data_request *pp; - unsigned char *p; - unsigned short eth_proto; - struct ether_hdr *eth_hdr; - unsigned short keyinfo = 0; - struct ieee802_1x_hdr *aa1x_hdr; - struct wpa_eapol_key *eap_key; - struct ethhdr *eth; - size_t size; - int ret; - - skb_len = skb->len; - if (skb_len > ETH_FRAME_LEN) { - netdev_err(priv->net_dev, "bad length skb_len=%d\n", skb_len); - ret = -EOVERFLOW; - goto err_kfree_skb; - } - - if (is_disconnect_status(priv->connect_status) || - (priv->connect_status & FORCE_DISCONNECT) || - priv->wpa.mic_failure.stop) { - if (netif_queue_stopped(priv->net_dev)) - netif_wake_queue(priv->net_dev); - - dev_kfree_skb(skb); - - return 0; - } - - /* power save wakeup */ - if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { - if (!netif_queue_stopped(priv->net_dev)) - netif_stop_queue(priv->net_dev); - } - - size = sizeof(*pp) + 6 + skb_len + 8; - pp = kmalloc(hif_align_size(size), GFP_ATOMIC); - if (!pp) { - ret = -ENOMEM; - goto err_kfree_skb; - } - - p = (unsigned char *)pp->data; - - buffer = skb->data; - length = skb->len; - - /* skb check */ - eth = (struct ethhdr *)skb->data; - if (!ether_addr_equal(&priv->eth_addr[0], eth->h_source)) { - netdev_err(priv->net_dev, - "Invalid mac address: ethernet->h_source=%pM\n", - eth->h_source); - ret = -ENXIO; - goto err_kfree; - } - - /* dest and src MAC address copy */ - size = ETH_ALEN * 2; - memcpy(p, buffer, size); - p += size; - buffer += size; - length -= size; - - /* EtherType/Length check */ - if (*(buffer + 1) + (*buffer << 8) > 1500) { - /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */ - /* SAP/CTL/OUI(6 byte) add */ - *p++ = 0xAA; /* DSAP */ - *p++ = 0xAA; /* SSAP */ - *p++ = 0x03; /* CTL */ - *p++ = 0x00; /* OUI ("000000") */ - *p++ = 0x00; /* OUI ("000000") */ - *p++ = 0x00; /* OUI ("000000") */ - skb_len += 6; - } else { - /* Length(2 byte) delete */ - buffer += 2; - length -= 2; - skb_len -= 2; - } - - /* pp->data copy */ - memcpy(p, buffer, length); - - p += length; - - /* for WPA */ - eth_hdr = (struct ether_hdr *)&pp->data[0]; - eth_proto = ntohs(eth_hdr->h_proto); - - /* for MIC FAILURE REPORT check */ - if (eth_proto == ETH_P_PAE && - priv->wpa.mic_failure.failure > 0) { - aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1); - if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) { - eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1); - keyinfo = ntohs(eap_key->key_info); - } - } - - if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) { - /* no encryption */ - if (eth_proto == ETH_P_PAE && - priv->wpa.key[1].key_len == 0 && - priv->wpa.key[2].key_len == 0 && - priv->wpa.key[3].key_len == 0) { - pp->auth_type = cpu_to_le16(TYPE_AUTH); - } else { - if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) { - u8 mic[MICHAEL_MIC_LEN]; - - ret = michael_mic(priv->wpa.key[0].tx_mic_key, - &pp->data[0], skb_len, - 0, mic); - if (ret < 0) - goto err_kfree; - - memcpy(p, mic, sizeof(mic)); - length += sizeof(mic); - skb_len += sizeof(mic); - p += sizeof(mic); - pp->auth_type = - cpu_to_le16(TYPE_DATA); - } else if (priv->wpa.pairwise_suite == - IW_AUTH_CIPHER_CCMP) { - pp->auth_type = - cpu_to_le16(TYPE_DATA); - } - } - } else { - if (eth_proto == ETH_P_PAE) - pp->auth_type = cpu_to_le16(TYPE_AUTH); - else - pp->auth_type = cpu_to_le16(TYPE_DATA); - } - - /* header value set */ - pp->header.size = - cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len)); - pp->header.event = cpu_to_le16(HIF_DATA_REQ); - - /* tx request */ - ret = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len), - send_packet_complete, skb); - - /* MIC FAILURE REPORT check */ - if (eth_proto == ETH_P_PAE && - priv->wpa.mic_failure.failure > 0) { - if (keyinfo & WPA_KEY_INFO_ERROR && - keyinfo & WPA_KEY_INFO_REQUEST) { - netdev_err(priv->net_dev, - "MIC ERROR Report SET : %04X\n", keyinfo); - hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST); - } - if (priv->wpa.mic_failure.failure == 2) - priv->wpa.mic_failure.stop = 1; - } - - return ret; - -err_kfree: - kfree(pp); -err_kfree_skb: - dev_kfree_skb(skb); - - return ret; -} - -static inline void ps_confirm_wait_inc(struct ks_wlan_private *priv) -{ - if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) - atomic_inc(&priv->psstatus.confirm_wait); -} - -static inline void send_request_to_device(struct ks_wlan_private *priv, - void *data, size_t size) -{ - ps_confirm_wait_inc(priv); - ks_wlan_hw_tx(priv, data, size, NULL, NULL); -} - -static void hostif_mib_get_request(struct ks_wlan_private *priv, - u32 mib_attribute) -{ - struct hostif_mib_get_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ); - if (!pp) - return; - - pp->mib_attribute = cpu_to_le32(mib_attribute); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static void hostif_mib_set_request(struct ks_wlan_private *priv, - enum mib_attribute attr, - enum mib_data_type type, - void *data, size_t size) -{ - struct hostif_mib_set_request_t *pp; - - if (priv->dev_state < DEVICE_STATE_BOOT) - return; - - pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ); - if (!pp) - return; - - pp->mib_attribute = cpu_to_le32(attr); - pp->mib_value.size = cpu_to_le16(size); - pp->mib_value.type = cpu_to_le16(type); - memcpy(&pp->mib_value.body, data, size); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size)); -} - -static inline void hostif_mib_set_request_int(struct ks_wlan_private *priv, - enum mib_attribute attr, int val) -{ - __le32 v = cpu_to_le32(val); - size_t size = sizeof(v); - - hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_INT, &v, size); -} - -static inline void hostif_mib_set_request_bool(struct ks_wlan_private *priv, - enum mib_attribute attr, - bool val) -{ - __le32 v = cpu_to_le32(val); - size_t size = sizeof(v); - - hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_BOOL, &v, size); -} - -static inline void hostif_mib_set_request_ostring(struct ks_wlan_private *priv, - enum mib_attribute attr, - void *data, size_t size) -{ - hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_OSTRING, data, size); -} - -static -void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode) -{ - struct hostif_start_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ); - if (!pp) - return; - - pp->mode = cpu_to_le16(mode); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); - - priv->aplist.size = 0; - priv->scan_ind_count = 0; -} - -static __le16 ks_wlan_cap(struct ks_wlan_private *priv) -{ - u16 capability = 0x0000; - - if (priv->reg.preamble == SHORT_PREAMBLE) - capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; - - capability &= ~(WLAN_CAPABILITY_PBCC); /* pbcc not support */ - - if (priv->reg.phy_type != D_11B_ONLY_MODE) { - capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME; - capability &= ~(WLAN_CAPABILITY_DSSS_OFDM); - } - - return cpu_to_le16(capability); -} - -static void init_request(struct ks_wlan_private *priv, - struct hostif_request *req) -{ - req->phy_type = cpu_to_le16(priv->reg.phy_type); - req->cts_mode = cpu_to_le16(priv->reg.cts_mode); - req->scan_type = cpu_to_le16(priv->reg.scan_type); - req->rate_set.size = priv->reg.rate_set.size; - req->capability = ks_wlan_cap(priv); - memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0], - priv->reg.rate_set.size); -} - -static -void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv) -{ - struct hostif_ps_adhoc_set_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ); - if (!pp) - return; - - init_request(priv, &pp->request); - pp->channel = cpu_to_le16(priv->reg.channel); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event) -{ - struct hostif_infrastructure_set_request *pp; - - pp = hostif_generic_request(sizeof(*pp), event); - if (!pp) - return; - - init_request(priv, &pp->request); - pp->ssid.size = priv->reg.ssid.size; - memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); - pp->beacon_lost_count = - cpu_to_le16(priv->reg.beacon_lost_count); - pp->auth_type = cpu_to_le16(priv->reg.authenticate_type); - - pp->channel_list.body[0] = 1; - pp->channel_list.body[1] = 8; - pp->channel_list.body[2] = 2; - pp->channel_list.body[3] = 9; - pp->channel_list.body[4] = 3; - pp->channel_list.body[5] = 10; - pp->channel_list.body[6] = 4; - pp->channel_list.body[7] = 11; - pp->channel_list.body[8] = 5; - pp->channel_list.body[9] = 12; - pp->channel_list.body[10] = 6; - pp->channel_list.body[11] = 13; - pp->channel_list.body[12] = 7; - if (priv->reg.phy_type == D_11G_ONLY_MODE) { - pp->channel_list.size = 13; - } else { - pp->channel_list.body[13] = 14; - pp->channel_list.size = 14; - } - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_adhoc_set_request(struct ks_wlan_private *priv) -{ - struct hostif_adhoc_set_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ); - if (!pp) - return; - - init_request(priv, &pp->request); - pp->channel = cpu_to_le16(priv->reg.channel); - pp->ssid.size = priv->reg.ssid.size; - memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_adhoc_set2_request(struct ks_wlan_private *priv) -{ - struct hostif_adhoc_set2_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ); - if (!pp) - return; - - init_request(priv, &pp->request); - pp->ssid.size = priv->reg.ssid.size; - memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); - - pp->channel_list.body[0] = priv->reg.channel; - pp->channel_list.size = 1; - memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_stop_request(struct ks_wlan_private *priv) -{ - struct hostif_stop_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ); - if (!pp) - return; - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_phy_information_request(struct ks_wlan_private *priv) -{ - struct hostif_phy_information_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ); - if (!pp) - return; - - if (priv->reg.phy_info_timer) { - pp->type = cpu_to_le16(TIME_TYPE); - pp->time = cpu_to_le16(priv->reg.phy_info_timer); - } else { - pp->type = cpu_to_le16(NORMAL_TYPE); - pp->time = cpu_to_le16(0); - } - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_power_mgmt_request(struct ks_wlan_private *priv, - u32 mode, u32 wake_up, u32 receive_dtims) -{ - struct hostif_power_mgmt_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ); - if (!pp) - return; - - pp->mode = cpu_to_le32(mode); - pp->wake_up = cpu_to_le32(wake_up); - pp->receive_dtims = cpu_to_le32(receive_dtims); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -static -void hostif_sleep_request(struct ks_wlan_private *priv, - enum sleep_mode_type mode) -{ - struct hostif_sleep_request *pp; - - if (mode == SLP_SLEEP) { - pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ); - if (!pp) - return; - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); - } else if (mode == SLP_ACTIVE) { - atomic_set(&priv->sleepstatus.wakeup_request, 1); - queue_delayed_work(priv->wq, &priv->rw_dwork, 1); - } else { - netdev_err(priv->net_dev, "invalid mode %ld\n", (long)mode); - return; - } -} - -static -void hostif_bss_scan_request(struct ks_wlan_private *priv, - unsigned long scan_type, u8 *scan_ssid, - u8 scan_ssid_len) -{ - struct hostif_bss_scan_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ); - if (!pp) - return; - - pp->scan_type = scan_type; - - pp->ch_time_min = cpu_to_le32(110); /* default value */ - pp->ch_time_max = cpu_to_le32(130); /* default value */ - pp->channel_list.body[0] = 1; - pp->channel_list.body[1] = 8; - pp->channel_list.body[2] = 2; - pp->channel_list.body[3] = 9; - pp->channel_list.body[4] = 3; - pp->channel_list.body[5] = 10; - pp->channel_list.body[6] = 4; - pp->channel_list.body[7] = 11; - pp->channel_list.body[8] = 5; - pp->channel_list.body[9] = 12; - pp->channel_list.body[10] = 6; - pp->channel_list.body[11] = 13; - pp->channel_list.body[12] = 7; - if (priv->reg.phy_type == D_11G_ONLY_MODE) { - pp->channel_list.size = 13; - } else { - pp->channel_list.body[13] = 14; - pp->channel_list.size = 14; - } - pp->ssid.size = 0; - - /* specified SSID SCAN */ - if (scan_ssid_len > 0 && scan_ssid_len <= 32) { - pp->ssid.size = scan_ssid_len; - memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len); - } - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); - - priv->aplist.size = 0; - priv->scan_ind_count = 0; -} - -static -void hostif_mic_failure_request(struct ks_wlan_private *priv, - u16 failure_count, u16 timer) -{ - struct hostif_mic_failure_request *pp; - - pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ); - if (!pp) - return; - - pp->failure_count = cpu_to_le16(failure_count); - pp->timer = cpu_to_le16(timer); - - send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); -} - -/* Device I/O Receive indicate */ -static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p, - unsigned int size) -{ - if (!priv->is_device_open) - return; - - spin_lock(&priv->dev_read_lock); - priv->dev_data[atomic_read(&priv->rec_count)] = p; - priv->dev_size[atomic_read(&priv->rec_count)] = size; - - if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) { - /* rx event count inc */ - atomic_inc(&priv->event_count); - } - atomic_inc(&priv->rec_count); - if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT) - atomic_set(&priv->rec_count, 0); - - wake_up_interruptible_all(&priv->devread_wait); - - spin_unlock(&priv->dev_read_lock); -} - -void hostif_receive(struct ks_wlan_private *priv, unsigned char *p, - unsigned int size) -{ - devio_rec_ind(priv, p, size); - - priv->rxp = p; - priv->rx_size = size; - - if (get_word(priv) == priv->rx_size) - hostif_event_check(priv); -} - -static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type) -{ - switch (type) { - case SME_WEP_INDEX_REQUEST: - hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID, - priv->reg.wep_index); - break; - case SME_WEP_KEY1_REQUEST: - if (priv->wpa.wpa_enabled) - return; - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE1, - &priv->reg.wep_key[0].val[0], - priv->reg.wep_key[0].size); - break; - case SME_WEP_KEY2_REQUEST: - if (priv->wpa.wpa_enabled) - return; - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE2, - &priv->reg.wep_key[1].val[0], - priv->reg.wep_key[1].size); - break; - case SME_WEP_KEY3_REQUEST: - if (priv->wpa.wpa_enabled) - return; - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE3, - &priv->reg.wep_key[2].val[0], - priv->reg.wep_key[2].size); - break; - case SME_WEP_KEY4_REQUEST: - if (priv->wpa.wpa_enabled) - return; - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE4, - &priv->reg.wep_key[3].val[0], - priv->reg.wep_key[3].size); - break; - case SME_WEP_FLAG_REQUEST: - hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED, - priv->reg.privacy_invoked); - break; - } -} - -struct wpa_suite { - __le16 size; - unsigned char suite[4][CIPHER_ID_LEN]; -} __packed; - -struct rsn_mode { - __le32 rsn_mode; - __le16 rsn_capability; -} __packed; - -static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type) -{ - struct wpa_suite wpa_suite; - struct rsn_mode rsn_mode; - size_t size; - u32 mode; - const u8 *buf = NULL; - - memset(&wpa_suite, 0, sizeof(wpa_suite)); - - switch (type) { - case SME_RSN_UCAST_REQUEST: - wpa_suite.size = cpu_to_le16(1); - switch (priv->wpa.pairwise_suite) { - case IW_AUTH_CIPHER_NONE: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE; - break; - case IW_AUTH_CIPHER_WEP40: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40; - break; - case IW_AUTH_CIPHER_TKIP: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP; - break; - case IW_AUTH_CIPHER_CCMP: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP; - break; - case IW_AUTH_CIPHER_WEP104: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104; - break; - } - - if (buf) - memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN); - size = sizeof(wpa_suite.size) + - (CIPHER_ID_LEN * le16_to_cpu(wpa_suite.size)); - hostif_mib_set_request_ostring(priv, - DOT11_RSN_CONFIG_UNICAST_CIPHER, - &wpa_suite, size); - break; - case SME_RSN_MCAST_REQUEST: - switch (priv->wpa.group_suite) { - case IW_AUTH_CIPHER_NONE: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE; - break; - case IW_AUTH_CIPHER_WEP40: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40; - break; - case IW_AUTH_CIPHER_TKIP: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP; - break; - case IW_AUTH_CIPHER_CCMP: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP; - break; - case IW_AUTH_CIPHER_WEP104: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104; - break; - } - if (buf) - memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN); - hostif_mib_set_request_ostring(priv, - DOT11_RSN_CONFIG_MULTICAST_CIPHER, - &wpa_suite.suite[0][0], - CIPHER_ID_LEN); - break; - case SME_RSN_AUTH_REQUEST: - wpa_suite.size = cpu_to_le16(1); - switch (priv->wpa.key_mgmt_suite) { - case IW_AUTH_KEY_MGMT_802_1X: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - KEY_MGMT_ID_WPA2_1X : KEY_MGMT_ID_WPA_1X; - break; - case IW_AUTH_KEY_MGMT_PSK: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - KEY_MGMT_ID_WPA2_PSK : KEY_MGMT_ID_WPA_PSK; - break; - case 0: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - KEY_MGMT_ID_WPA2_NONE : KEY_MGMT_ID_WPA_NONE; - break; - case 4: - buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - KEY_MGMT_ID_WPA2_WPANONE : - KEY_MGMT_ID_WPA_WPANONE; - break; - } - - if (buf) - memcpy(&wpa_suite.suite[0][0], buf, KEY_MGMT_ID_LEN); - size = sizeof(wpa_suite.size) + - (KEY_MGMT_ID_LEN * le16_to_cpu(wpa_suite.size)); - hostif_mib_set_request_ostring(priv, - DOT11_RSN_CONFIG_AUTH_SUITE, - &wpa_suite, size); - break; - case SME_RSN_ENABLED_REQUEST: - hostif_mib_set_request_bool(priv, DOT11_RSN_ENABLED, - priv->wpa.rsn_enabled); - break; - case SME_RSN_MODE_REQUEST: - mode = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ? - RSN_MODE_WPA2 : - (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) ? - RSN_MODE_WPA : RSN_MODE_NONE; - rsn_mode.rsn_mode = cpu_to_le32(mode); - rsn_mode.rsn_capability = cpu_to_le16(0); - hostif_mib_set_request_ostring(priv, LOCAL_RSN_MODE, - &rsn_mode, sizeof(rsn_mode)); - break; - } -} - -static -void hostif_sme_mode_setup(struct ks_wlan_private *priv) -{ - unsigned char rate_size; - unsigned char rate_octet[RATE_SET_MAX_SIZE]; - int i = 0; - - /* rate setting if rate segging is auto for changing phy_type (#94) */ - if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) { - if (priv->reg.phy_type == D_11B_ONLY_MODE) { - priv->reg.rate_set.body[3] = TX_RATE_11M; - priv->reg.rate_set.body[2] = TX_RATE_5M; - priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE; - priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE; - priv->reg.rate_set.size = 4; - } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */ - priv->reg.rate_set.body[11] = TX_RATE_54M; - priv->reg.rate_set.body[10] = TX_RATE_48M; - priv->reg.rate_set.body[9] = TX_RATE_36M; - priv->reg.rate_set.body[8] = TX_RATE_18M; - priv->reg.rate_set.body[7] = TX_RATE_9M; - priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE; - priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE; - priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE; - priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE; - priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE; - priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE; - priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE; - priv->reg.rate_set.size = 12; - } - } - - /* rate mask by phy setting */ - if (priv->reg.phy_type == D_11B_ONLY_MODE) { - for (i = 0; i < priv->reg.rate_set.size; i++) { - if (!is_11b_rate(priv->reg.rate_set.body[i])) - break; - - if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) { - rate_octet[i] = priv->reg.rate_set.body[i] & - RATE_MASK; - } else { - rate_octet[i] = priv->reg.rate_set.body[i]; - } - } - - } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */ - for (i = 0; i < priv->reg.rate_set.size; i++) { - if (!is_11bg_rate(priv->reg.rate_set.body[i])) - break; - - if (is_ofdm_ext_rate(priv->reg.rate_set.body[i])) { - rate_octet[i] = priv->reg.rate_set.body[i] & - RATE_MASK; - } else { - rate_octet[i] = priv->reg.rate_set.body[i]; - } - } - } - rate_size = i; - if (rate_size == 0) { - if (priv->reg.phy_type == D_11G_ONLY_MODE) - rate_octet[0] = TX_RATE_6M | BASIC_RATE; - else - rate_octet[0] = TX_RATE_2M | BASIC_RATE; - rate_size = 1; - } - - /* rate set update */ - priv->reg.rate_set.size = rate_size; - memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size); - - switch (priv->reg.operation_mode) { - case MODE_PSEUDO_ADHOC: - hostif_ps_adhoc_set_request(priv); - break; - case MODE_INFRASTRUCTURE: - if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) { - hostif_infrastructure_set_request(priv, - HIF_INFRA_SET_REQ); - } else { - hostif_infrastructure_set_request(priv, - HIF_INFRA_SET2_REQ); - netdev_dbg(priv->net_dev, - "Infra bssid = %pM\n", priv->reg.bssid); - } - break; - case MODE_ADHOC: - if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) { - hostif_adhoc_set_request(priv); - } else { - hostif_adhoc_set2_request(priv); - netdev_dbg(priv->net_dev, - "Adhoc bssid = %pM\n", priv->reg.bssid); - } - break; - default: - break; - } -} - -static -void hostif_sme_multicast_set(struct ks_wlan_private *priv) -{ - struct net_device *dev = priv->net_dev; - int mc_count; - struct netdev_hw_addr *ha; - char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN]; - int i = 0; - - spin_lock(&priv->multicast_spin); - - memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); - - if (dev->flags & IFF_PROMISC) { - hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_PROMISC); - goto spin_unlock; - } - - if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || - (dev->flags & IFF_ALLMULTI)) { - hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_MCASTALL); - goto spin_unlock; - } - - if (priv->sme_i.sme_flag & SME_MULTICAST) { - mc_count = netdev_mc_count(dev); - netdev_for_each_mc_addr(ha, dev) { - ether_addr_copy(&set_address[i * ETH_ALEN], ha->addr); - i++; - } - priv->sme_i.sme_flag &= ~SME_MULTICAST; - hostif_mib_set_request_ostring(priv, LOCAL_MULTICAST_ADDRESS, - &set_address[0], - ETH_ALEN * mc_count); - } else { - priv->sme_i.sme_flag |= SME_MULTICAST; - hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER, - MCAST_FILTER_MCAST); - } - -spin_unlock: - spin_unlock(&priv->multicast_spin); -} - -static void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv) -{ - u32 mode, wake_up, receive_dtims; - - if (priv->reg.power_mgmt != POWER_MGMT_SAVE1 && - priv->reg.power_mgmt != POWER_MGMT_SAVE2) { - mode = POWER_ACTIVE; - wake_up = 0; - receive_dtims = 0; - } else { - mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ? - POWER_SAVE : POWER_ACTIVE; - wake_up = 0; - receive_dtims = (priv->reg.operation_mode == MODE_INFRASTRUCTURE && - priv->reg.power_mgmt == POWER_MGMT_SAVE2); - } - - hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims); -} - -static void hostif_sme_sleep_set(struct ks_wlan_private *priv) -{ - if (priv->sleep_mode != SLP_SLEEP && - priv->sleep_mode != SLP_ACTIVE) - return; - - hostif_sleep_request(priv, priv->sleep_mode); -} - -static -void hostif_sme_set_key(struct ks_wlan_private *priv, int type) -{ - switch (type) { - case SME_SET_FLAG: - hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED, - priv->reg.privacy_invoked); - break; - case SME_SET_TXKEY: - hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID, - priv->wpa.txkey); - break; - case SME_SET_KEY1: - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE1, - &priv->wpa.key[0].key_val[0], - priv->wpa.key[0].key_len); - break; - case SME_SET_KEY2: - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE2, - &priv->wpa.key[1].key_val[0], - priv->wpa.key[1].key_len); - break; - case SME_SET_KEY3: - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE3, - &priv->wpa.key[2].key_val[0], - priv->wpa.key[2].key_len); - break; - case SME_SET_KEY4: - hostif_mib_set_request_ostring(priv, - DOT11_WEP_DEFAULT_KEY_VALUE4, - &priv->wpa.key[3].key_val[0], - priv->wpa.key[3].key_len); - break; - case SME_SET_PMK_TSC: - hostif_mib_set_request_ostring(priv, DOT11_PMK_TSC, - &priv->wpa.key[0].rx_seq[0], - WPA_RX_SEQ_LEN); - break; - case SME_SET_GMK1_TSC: - hostif_mib_set_request_ostring(priv, DOT11_GMK1_TSC, - &priv->wpa.key[1].rx_seq[0], - WPA_RX_SEQ_LEN); - break; - case SME_SET_GMK2_TSC: - hostif_mib_set_request_ostring(priv, DOT11_GMK2_TSC, - &priv->wpa.key[2].rx_seq[0], - WPA_RX_SEQ_LEN); - break; - } -} - -static -void hostif_sme_set_pmksa(struct ks_wlan_private *priv) -{ - struct pmk_cache { - __le16 size; - struct { - u8 bssid[ETH_ALEN]; - u8 pmkid[IW_PMKID_LEN]; - } __packed list[PMK_LIST_MAX]; - } __packed pmkcache; - struct pmk *pmk; - size_t size; - int i = 0; - - list_for_each_entry(pmk, &priv->pmklist.head, list) { - if (i >= PMK_LIST_MAX) - break; - ether_addr_copy(pmkcache.list[i].bssid, pmk->bssid); - memcpy(pmkcache.list[i].pmkid, pmk->pmkid, IW_PMKID_LEN); - i++; - } - pmkcache.size = cpu_to_le16(priv->pmklist.size); - size = sizeof(priv->pmklist.size) + - ((ETH_ALEN + IW_PMKID_LEN) * priv->pmklist.size); - hostif_mib_set_request_ostring(priv, LOCAL_PMK, &pmkcache, size); -} - -/* execute sme */ -static void hostif_sme_execute(struct ks_wlan_private *priv, int event) -{ - u16 failure; - - switch (event) { - case SME_START: - if (priv->dev_state == DEVICE_STATE_BOOT) - hostif_mib_get_request(priv, DOT11_MAC_ADDRESS); - break; - case SME_MULTICAST_REQUEST: - hostif_sme_multicast_set(priv); - break; - case SME_MACADDRESS_SET_REQUEST: - hostif_mib_set_request_ostring(priv, LOCAL_CURRENTADDRESS, - &priv->eth_addr[0], ETH_ALEN); - break; - case SME_BSS_SCAN_REQUEST: - hostif_bss_scan_request(priv, priv->reg.scan_type, - priv->scan_ssid, priv->scan_ssid_len); - break; - case SME_POW_MNGMT_REQUEST: - hostif_sme_power_mgmt_set(priv); - break; - case SME_PHY_INFO_REQUEST: - hostif_phy_information_request(priv); - break; - case SME_MIC_FAILURE_REQUEST: - failure = priv->wpa.mic_failure.failure; - if (failure != 1 && failure != 2) { - netdev_err(priv->net_dev, - "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n", - failure); - return; - } - hostif_mic_failure_request(priv, failure - 1, (failure == 1) ? - 0 : priv->wpa.mic_failure.counter); - break; - case SME_MIC_FAILURE_CONFIRM: - if (priv->wpa.mic_failure.failure == 2) { - if (priv->wpa.mic_failure.stop) - priv->wpa.mic_failure.stop = 0; - priv->wpa.mic_failure.failure = 0; - hostif_start_request(priv, priv->reg.operation_mode); - } - break; - case SME_GET_MAC_ADDRESS: - if (priv->dev_state == DEVICE_STATE_BOOT) - hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION); - break; - case SME_GET_PRODUCT_VERSION: - if (priv->dev_state == DEVICE_STATE_BOOT) - priv->dev_state = DEVICE_STATE_PREINIT; - break; - case SME_STOP_REQUEST: - hostif_stop_request(priv); - break; - case SME_RTS_THRESHOLD_REQUEST: - hostif_mib_set_request_int(priv, DOT11_RTS_THRESHOLD, - priv->reg.rts); - break; - case SME_FRAGMENTATION_THRESHOLD_REQUEST: - hostif_mib_set_request_int(priv, DOT11_FRAGMENTATION_THRESHOLD, - priv->reg.fragment); - break; - case SME_WEP_INDEX_REQUEST: - case SME_WEP_KEY1_REQUEST: - case SME_WEP_KEY2_REQUEST: - case SME_WEP_KEY3_REQUEST: - case SME_WEP_KEY4_REQUEST: - case SME_WEP_FLAG_REQUEST: - hostif_sme_set_wep(priv, event); - break; - case SME_RSN_UCAST_REQUEST: - case SME_RSN_MCAST_REQUEST: - case SME_RSN_AUTH_REQUEST: - case SME_RSN_ENABLED_REQUEST: - case SME_RSN_MODE_REQUEST: - hostif_sme_set_rsn(priv, event); - break; - case SME_SET_FLAG: - case SME_SET_TXKEY: - case SME_SET_KEY1: - case SME_SET_KEY2: - case SME_SET_KEY3: - case SME_SET_KEY4: - case SME_SET_PMK_TSC: - case SME_SET_GMK1_TSC: - case SME_SET_GMK2_TSC: - hostif_sme_set_key(priv, event); - break; - case SME_SET_PMKSA: - hostif_sme_set_pmksa(priv); - break; - case SME_WPS_ENABLE_REQUEST: - hostif_mib_set_request_int(priv, LOCAL_WPS_ENABLE, - priv->wps.wps_enabled); - break; - case SME_WPS_PROBE_REQUEST: - hostif_mib_set_request_ostring(priv, LOCAL_WPS_PROBE_REQ, - priv->wps.ie, priv->wps.ielen); - break; - case SME_MODE_SET_REQUEST: - hostif_sme_mode_setup(priv); - break; - case SME_SET_GAIN: - hostif_mib_set_request_ostring(priv, LOCAL_GAIN, - &priv->gain, sizeof(priv->gain)); - break; - case SME_GET_GAIN: - hostif_mib_get_request(priv, LOCAL_GAIN); - break; - case SME_GET_EEPROM_CKSUM: - priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */ - hostif_mib_get_request(priv, LOCAL_EEPROM_SUM); - break; - case SME_START_REQUEST: - hostif_start_request(priv, priv->reg.operation_mode); - break; - case SME_START_CONFIRM: - /* for power save */ - atomic_set(&priv->psstatus.snooze_guard, 0); - atomic_set(&priv->psstatus.confirm_wait, 0); - if (priv->dev_state == DEVICE_STATE_PREINIT) - priv->dev_state = DEVICE_STATE_INIT; - /* wake_up_interruptible_all(&priv->confirm_wait); */ - complete(&priv->confirm_wait); - break; - case SME_SLEEP_REQUEST: - hostif_sme_sleep_set(priv); - break; - case SME_SET_REGION: - hostif_mib_set_request_int(priv, LOCAL_REGION, priv->region); - break; - case SME_MULTICAST_CONFIRM: - case SME_BSS_SCAN_CONFIRM: - case SME_POW_MNGMT_CONFIRM: - case SME_PHY_INFO_CONFIRM: - case SME_STOP_CONFIRM: - case SME_RTS_THRESHOLD_CONFIRM: - case SME_FRAGMENTATION_THRESHOLD_CONFIRM: - case SME_WEP_INDEX_CONFIRM: - case SME_WEP_KEY1_CONFIRM: - case SME_WEP_KEY2_CONFIRM: - case SME_WEP_KEY3_CONFIRM: - case SME_WEP_KEY4_CONFIRM: - case SME_WEP_FLAG_CONFIRM: - case SME_RSN_UCAST_CONFIRM: - case SME_RSN_MCAST_CONFIRM: - case SME_RSN_AUTH_CONFIRM: - case SME_RSN_ENABLED_CONFIRM: - case SME_RSN_MODE_CONFIRM: - case SME_MODE_SET_CONFIRM: - case SME_TERMINATE: - default: - break; - } -} - -static void hostif_sme_work(struct work_struct *work) -{ - struct ks_wlan_private *priv; - - priv = container_of(work, struct ks_wlan_private, sme_work); - - if (priv->dev_state < DEVICE_STATE_BOOT) - return; - - if (cnt_smeqbody(priv) <= 0) - return; - - hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]); - inc_smeqhead(priv); - if (cnt_smeqbody(priv) > 0) - schedule_work(&priv->sme_work); -} - -/* send to Station Management Entity module */ -void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event) -{ - /* enqueue sme event */ - if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) { - priv->sme_i.event_buff[priv->sme_i.qtail] = event; - inc_smeqtail(priv); - } else { - /* in case of buffer overflow */ - netdev_err(priv->net_dev, "sme queue buffer overflow\n"); - } - - schedule_work(&priv->sme_work); -} - -static inline void hostif_aplist_init(struct ks_wlan_private *priv) -{ - size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap); - - priv->aplist.size = 0; - memset(&priv->aplist.ap[0], 0, size); -} - -static inline void hostif_status_init(struct ks_wlan_private *priv) -{ - priv->infra_status = 0; - priv->current_rate = 4; - priv->connect_status = DISCONNECT_STATUS; -} - -static inline void hostif_sme_init(struct ks_wlan_private *priv) -{ - priv->sme_i.sme_status = SME_IDLE; - priv->sme_i.qhead = 0; - priv->sme_i.qtail = 0; - spin_lock_init(&priv->sme_i.sme_spin); - priv->sme_i.sme_flag = 0; - INIT_WORK(&priv->sme_work, hostif_sme_work); -} - -static inline void hostif_wpa_init(struct ks_wlan_private *priv) -{ - memset(&priv->wpa, 0, sizeof(priv->wpa)); - priv->wpa.rsn_enabled = false; - priv->wpa.mic_failure.failure = 0; - priv->wpa.mic_failure.last_failure_time = 0; - priv->wpa.mic_failure.stop = 0; -} - -static inline void hostif_power_save_init(struct ks_wlan_private *priv) -{ - atomic_set(&priv->psstatus.status, PS_NONE); - atomic_set(&priv->psstatus.confirm_wait, 0); - atomic_set(&priv->psstatus.snooze_guard, 0); - init_completion(&priv->psstatus.wakeup_wait); - INIT_WORK(&priv->wakeup_work, ks_wlan_hw_wakeup_task); -} - -static inline void hostif_pmklist_init(struct ks_wlan_private *priv) -{ - int i; - - memset(&priv->pmklist, 0, sizeof(priv->pmklist)); - INIT_LIST_HEAD(&priv->pmklist.head); - for (i = 0; i < PMK_LIST_MAX; i++) - INIT_LIST_HEAD(&priv->pmklist.pmk[i].list); -} - -static inline void hostif_counters_init(struct ks_wlan_private *priv) -{ - priv->dev_count = 0; - atomic_set(&priv->event_count, 0); - atomic_set(&priv->rec_count, 0); -} - -int hostif_init(struct ks_wlan_private *priv) -{ - hostif_aplist_init(priv); - hostif_status_init(priv); - - spin_lock_init(&priv->multicast_spin); - spin_lock_init(&priv->dev_read_lock); - init_waitqueue_head(&priv->devread_wait); - - hostif_counters_init(priv); - hostif_power_save_init(priv); - hostif_wpa_init(priv); - hostif_pmklist_init(priv); - hostif_sme_init(priv); - - return 0; -} - -void hostif_exit(struct ks_wlan_private *priv) -{ - cancel_work_sync(&priv->sme_work); -} diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h deleted file mode 100644 index c62a494ed6bb4f..00000000000000 --- a/drivers/staging/ks7010/ks_hostif.h +++ /dev/null @@ -1,617 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Driver for KeyStream wireless LAN - * - * Copyright (c) 2005-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - */ - -#ifndef _KS_HOSTIF_H_ -#define _KS_HOSTIF_H_ - -#include -#include - -/* - * HOST-MAC I/F events - */ -#define HIF_DATA_REQ 0xE001 -#define HIF_DATA_IND 0xE801 -#define HIF_MIB_GET_REQ 0xE002 -#define HIF_MIB_GET_CONF 0xE802 -#define HIF_MIB_SET_REQ 0xE003 -#define HIF_MIB_SET_CONF 0xE803 -#define HIF_POWER_MGMT_REQ 0xE004 -#define HIF_POWER_MGMT_CONF 0xE804 -#define HIF_START_REQ 0xE005 -#define HIF_START_CONF 0xE805 -#define HIF_CONNECT_IND 0xE806 -#define HIF_STOP_REQ 0xE006 -#define HIF_STOP_CONF 0xE807 -#define HIF_PS_ADH_SET_REQ 0xE007 -#define HIF_PS_ADH_SET_CONF 0xE808 -#define HIF_INFRA_SET_REQ 0xE008 -#define HIF_INFRA_SET_CONF 0xE809 -#define HIF_ADH_SET_REQ 0xE009 -#define HIF_ADH_SET_CONF 0xE80A -#define HIF_AP_SET_REQ 0xE00A -#define HIF_AP_SET_CONF 0xE80B -#define HIF_ASSOC_INFO_IND 0xE80C -#define HIF_MIC_FAILURE_REQ 0xE00B -#define HIF_MIC_FAILURE_CONF 0xE80D -#define HIF_SCAN_REQ 0xE00C -#define HIF_SCAN_CONF 0xE80E -#define HIF_PHY_INFO_REQ 0xE00D -#define HIF_PHY_INFO_CONF 0xE80F -#define HIF_SLEEP_REQ 0xE00E -#define HIF_SLEEP_CONF 0xE810 -#define HIF_PHY_INFO_IND 0xE811 -#define HIF_SCAN_IND 0xE812 -#define HIF_INFRA_SET2_REQ 0xE00F -#define HIF_INFRA_SET2_CONF 0xE813 -#define HIF_ADH_SET2_REQ 0xE010 -#define HIF_ADH_SET2_CONF 0xE814 - -#define HIF_REQ_MAX 0xE010 - -/* - * HOST-MAC I/F data structure - * Byte alignment Little Endian - */ - -struct hostif_hdr { - __le16 size; - __le16 event; -} __packed; - -struct hostif_data_request { - struct hostif_hdr header; - __le16 auth_type; -#define TYPE_DATA 0x0000 -#define TYPE_AUTH 0x0001 - __le16 reserved; - u8 data[]; -} __packed; - -#define TYPE_PMK1 0x0001 -#define TYPE_GMK1 0x0002 -#define TYPE_GMK2 0x0003 - -#define CHANNEL_LIST_MAX_SIZE 14 -struct channel_list { - u8 size; - u8 body[CHANNEL_LIST_MAX_SIZE]; - u8 pad; -} __packed; - -/** - * enum mib_attribute - Management Information Base attribute - * Attribute value used for accessing and updating MIB - * - * @DOT11_MAC_ADDRESS: MAC Address (R) - * @DOT11_PRODUCT_VERSION: FirmWare Version (R) - * @DOT11_RTS_THRESHOLD: RTS Threshold (R/W) - * @DOT11_FRAGMENTATION_THRESHOLD: Fragment Threshold (R/W) - * @DOT11_PRIVACY_INVOKED: WEP ON/OFF (W) - * @DOT11_WEP_DEFAULT_KEY_ID: WEP Index (W) - * @DOT11_WEP_DEFAULT_KEY_VALUE1: WEP Key#1(TKIP AES: PairwiseTemporalKey) (W) - * @DOT11_WEP_DEFAULT_KEY_VALUE2: WEP Key#2(TKIP AES: GroupKey1) (W) - * @DOT11_WEP_DEFAULT_KEY_VALUE3: WEP Key#3(TKIP AES: GroupKey2) (W) - * @DOT11_WEP_DEFAULT_KEY_VALUE4: WEP Key#4 (W) - * @DOT11_WEP_LIST: WEP LIST - * @DOT11_DESIRED_SSID: SSID - * @DOT11_CURRENT_CHANNEL: channel set - * @DOT11_OPERATION_RATE_SET: rate set - * @LOCAL_AP_SEARCH_INTERVAL: AP search interval (R/W) - * @LOCAL_CURRENTADDRESS: MAC Address change (W) - * @LOCAL_MULTICAST_ADDRESS: Multicast Address (W) - * @LOCAL_MULTICAST_FILTER: Multicast Address Filter enable/disable (W) - * @LOCAL_SEARCHED_AP_LIST: AP list (R) - * @LOCAL_LINK_AP_STATUS: Link AP status (R) - * @LOCAL_PACKET_STATISTICS: tx,rx packets statistics - * @LOCAL_AP_SCAN_LIST_TYPE_SET: AP_SCAN_LIST_TYPE - * @DOT11_RSN_ENABLED: WPA enable/disable (W) - * @LOCAL_RSN_MODE: RSN mode WPA/WPA2 (W) - * @DOT11_RSN_CONFIG_MULTICAST_CIPHER: GroupKeyCipherSuite (W) - * @DOT11_RSN_CONFIG_UNICAST_CIPHER: PairwiseKeyCipherSuite (W) - * @DOT11_RSN_CONFIG_AUTH_SUITE: AuthenticationKeyManagementSuite (W) - * @DOT11_RSN_CONFIG_VERSION: RSN version (W) - * @LOCAL_RSN_CONFIG_ALL: RSN CONFIG ALL (W) - * @DOT11_PMK_TSC: PMK_TSC (W) - * @DOT11_GMK1_TSC: GMK1_TSC (W) - * @DOT11_GMK2_TSC: GMK2_TSC (W) - * @DOT11_GMK3_TSC: GMK3_TSC - * @LOCAL_PMK: Pairwise Master Key cache (W) - * @LOCAL_REGION: Region setting - * @LOCAL_WPS_ENABLE: WiFi Protected Setup - * @LOCAL_WPS_PROBE_REQ: WPS Probe Request - * @LOCAL_GAIN: Carrer sense threshold for demo ato show - * @LOCAL_EEPROM_SUM: EEPROM checksum information - */ -enum mib_attribute { - DOT11_MAC_ADDRESS = 0x21010100, - DOT11_PRODUCT_VERSION = 0x31024100, - DOT11_RTS_THRESHOLD = 0x21020100, - DOT11_FRAGMENTATION_THRESHOLD = 0x21050100, - DOT11_PRIVACY_INVOKED = 0x15010100, - DOT11_WEP_DEFAULT_KEY_ID = 0x15020100, - DOT11_WEP_DEFAULT_KEY_VALUE1 = 0x13020101, - DOT11_WEP_DEFAULT_KEY_VALUE2 = 0x13020102, - DOT11_WEP_DEFAULT_KEY_VALUE3 = 0x13020103, - DOT11_WEP_DEFAULT_KEY_VALUE4 = 0x13020104, - DOT11_WEP_LIST = 0x13020100, - DOT11_DESIRED_SSID = 0x11090100, - DOT11_CURRENT_CHANNEL = 0x45010100, - DOT11_OPERATION_RATE_SET = 0x11110100, - LOCAL_AP_SEARCH_INTERVAL = 0xF1010100, - LOCAL_CURRENTADDRESS = 0xF1050100, - LOCAL_MULTICAST_ADDRESS = 0xF1060100, - LOCAL_MULTICAST_FILTER = 0xF1060200, - LOCAL_SEARCHED_AP_LIST = 0xF1030100, - LOCAL_LINK_AP_STATUS = 0xF1040100, - LOCAL_PACKET_STATISTICS = 0xF1020100, - LOCAL_AP_SCAN_LIST_TYPE_SET = 0xF1030200, - DOT11_RSN_ENABLED = 0x15070100, - LOCAL_RSN_MODE = 0x56010100, - DOT11_RSN_CONFIG_MULTICAST_CIPHER = 0x51040100, - DOT11_RSN_CONFIG_UNICAST_CIPHER = 0x52020100, - DOT11_RSN_CONFIG_AUTH_SUITE = 0x53020100, - DOT11_RSN_CONFIG_VERSION = 0x51020100, - LOCAL_RSN_CONFIG_ALL = 0x5F010100, - DOT11_PMK_TSC = 0x55010100, - DOT11_GMK1_TSC = 0x55010101, - DOT11_GMK2_TSC = 0x55010102, - DOT11_GMK3_TSC = 0x55010103, - LOCAL_PMK = 0x58010100, - LOCAL_REGION = 0xF10A0100, - LOCAL_WPS_ENABLE = 0xF10B0100, - LOCAL_WPS_PROBE_REQ = 0xF10C0100, - LOCAL_GAIN = 0xF10D0100, - LOCAL_EEPROM_SUM = 0xF10E0100 -}; - -struct hostif_mib_get_request { - struct hostif_hdr header; - __le32 mib_attribute; -} __packed; - -/** - * enum mib_data_type - Message Information Base data type. - * @MIB_VALUE_TYPE_NULL: NULL type - * @MIB_VALUE_TYPE_INT: INTEGER type - * @MIB_VALUE_TYPE_BOOL: BOOL type - * @MIB_VALUE_TYPE_COUNT32: unused - * @MIB_VALUE_TYPE_OSTRING: Chunk of memory - */ -enum mib_data_type { - MIB_VALUE_TYPE_NULL = 0, - MIB_VALUE_TYPE_INT, - MIB_VALUE_TYPE_BOOL, - MIB_VALUE_TYPE_COUNT32, - MIB_VALUE_TYPE_OSTRING -}; - -struct hostif_mib_value { - __le16 size; - __le16 type; - u8 body[]; -} __packed; - -struct hostif_mib_get_confirm_t { - struct hostif_hdr header; - __le32 mib_status; -#define MIB_SUCCESS 0 -#define MIB_INVALID 1 -#define MIB_READ_ONLY 2 -#define MIB_WRITE_ONLY 3 - __le32 mib_attribute; - struct hostif_mib_value mib_value; -} __packed; - -struct hostif_mib_set_request_t { - struct hostif_hdr header; - __le32 mib_attribute; - struct hostif_mib_value mib_value; -} __packed; - -struct hostif_power_mgmt_request { - struct hostif_hdr header; - __le32 mode; -#define POWER_ACTIVE 1 -#define POWER_SAVE 2 - __le32 wake_up; -#define SLEEP_FALSE 0 -#define SLEEP_TRUE 1 /* not used */ - __le32 receive_dtims; -#define DTIM_FALSE 0 -#define DTIM_TRUE 1 -} __packed; - -enum power_mgmt_mode_type { - POWER_MGMT_ACTIVE, - POWER_MGMT_SAVE1, - POWER_MGMT_SAVE2 -}; - -#define RESULT_SUCCESS 0 -#define RESULT_INVALID_PARAMETERS 1 -#define RESULT_NOT_SUPPORTED 2 -/* #define RESULT_ALREADY_RUNNING 3 */ -#define RESULT_ALREADY_RUNNING 7 - -struct hostif_start_request { - struct hostif_hdr header; - __le16 mode; -#define MODE_PSEUDO_ADHOC 0 -#define MODE_INFRASTRUCTURE 1 -#define MODE_AP 2 /* not used */ -#define MODE_ADHOC 3 -} __packed; - -struct ssid { - u8 size; - u8 body[IEEE80211_MAX_SSID_LEN]; - u8 ssid_pad; -} __packed; - -#define RATE_SET_MAX_SIZE 16 -struct rate_set8 { - u8 size; - u8 body[8]; - u8 rate_pad; -} __packed; - -struct fh_parms { - __le16 dwell_time; - u8 hop_set; - u8 hop_pattern; - u8 hop_index; -} __packed; - -struct ds_parms { - u8 channel; -} __packed; - -struct cf_parms { - u8 count; - u8 period; - __le16 max_duration; - __le16 dur_remaining; -} __packed; - -struct ibss_parms { - __le16 atim_window; -} __packed; - -struct rsn_t { - u8 size; -#define RSN_BODY_SIZE 64 - u8 body[RSN_BODY_SIZE]; -} __packed; - -struct erp_params_t { - u8 erp_info; -} __packed; - -struct rate_set16 { - u8 size; - u8 body[16]; - u8 rate_pad; -} __packed; - -struct ap_info { - u8 bssid[6]; /* +00 */ - u8 rssi; /* +06 */ - u8 sq; /* +07 */ - u8 noise; /* +08 */ - u8 pad0; /* +09 */ - __le16 beacon_period; /* +10 */ - __le16 capability; /* +12 */ - u8 frame_type; /* +14 */ - u8 ch_info; /* +15 */ - __le16 body_size; /* +16 */ - u8 body[1024]; /* +18 */ - /* +1032 */ -} __packed; - -struct link_ap_info { - u8 bssid[6]; /* +00 */ - u8 rssi; /* +06 */ - u8 sq; /* +07 */ - u8 noise; /* +08 */ - u8 pad0; /* +09 */ - __le16 beacon_period; /* +10 */ - __le16 capability; /* +12 */ - struct rate_set8 rate_set; /* +14 */ - struct fh_parms fh_parameter; /* +24 */ - struct ds_parms ds_parameter; /* +29 */ - struct cf_parms cf_parameter; /* +30 */ - struct ibss_parms ibss_parameter; /* +36 */ - struct erp_params_t erp_parameter; /* +38 */ - u8 pad1; /* +39 */ - struct rate_set8 ext_rate_set; /* +40 */ - u8 DTIM_period; /* +50 */ - u8 rsn_mode; /* +51 */ -#define RSN_MODE_NONE 0 -#define RSN_MODE_WPA 1 -#define RSN_MODE_WPA2 2 - struct { - u8 size; /* +52 */ - u8 body[128]; /* +53 */ - } __packed rsn; -} __packed; - -#define RESULT_CONNECT 0 -#define RESULT_DISCONNECT 1 - -struct hostif_stop_request { - struct hostif_hdr header; -} __packed; - -#define D_11B_ONLY_MODE 0 -#define D_11G_ONLY_MODE 1 -#define D_11BG_COMPATIBLE_MODE 2 -#define D_11A_ONLY_MODE 3 - -#define CTS_MODE_FALSE 0 -#define CTS_MODE_TRUE 1 - -struct hostif_request { - __le16 phy_type; - __le16 cts_mode; - __le16 scan_type; - __le16 capability; - struct rate_set16 rate_set; -} __packed; - -/** - * struct hostif_ps_adhoc_set_request - pseudo adhoc mode - * @capability: bit5 : preamble - * bit6 : pbcc - Not supported always 0 - * bit10 : ShortSlotTime - * bit13 : DSSS-OFDM - Not supported always 0 - */ -struct hostif_ps_adhoc_set_request { - struct hostif_hdr header; - struct hostif_request request; - __le16 channel; -} __packed; - -#define AUTH_TYPE_OPEN_SYSTEM 0 -#define AUTH_TYPE_SHARED_KEY 1 - -/** - * struct hostif_infrastructure_set_request - * @capability: bit5 : preamble - * bit6 : pbcc - Not supported always 0 - * bit10 : ShortSlotTime - * bit13 : DSSS-OFDM - Not supported always 0 - */ -struct hostif_infrastructure_set_request { - struct hostif_hdr header; - struct hostif_request request; - struct ssid ssid; - __le16 beacon_lost_count; - __le16 auth_type; - struct channel_list channel_list; - u8 bssid[ETH_ALEN]; -} __packed; - -/** - * struct hostif_adhoc_set_request - * @capability: bit5 : preamble - * bit6 : pbcc - Not supported always 0 - * bit10 : ShortSlotTime - * bit13 : DSSS-OFDM - Not supported always 0 - */ -struct hostif_adhoc_set_request { - struct hostif_hdr header; - struct hostif_request request; - struct ssid ssid; - __le16 channel; -} __packed; - -/** - * struct hostif_adhoc_set2_request - * @capability: bit5 : preamble - * bit6 : pbcc - Not supported always 0 - * bit10 : ShortSlotTime - * bit13 : DSSS-OFDM - Not supported always 0 - */ -struct hostif_adhoc_set2_request { - struct hostif_hdr header; - struct hostif_request request; - __le16 reserved; - struct ssid ssid; - struct channel_list channel_list; - u8 bssid[ETH_ALEN]; -} __packed; - -struct association_request { - u8 type; - u8 pad; - __le16 capability; - __le16 listen_interval; - u8 ap_address[6]; - __le16 req_ies_size; -} __packed; - -struct association_response { - u8 type; - u8 pad; - __le16 capability; - __le16 status; - __le16 association_id; - __le16 resp_ies_size; -} __packed; - -struct hostif_bss_scan_request { - struct hostif_hdr header; - u8 scan_type; -#define ACTIVE_SCAN 0 -#define PASSIVE_SCAN 1 - u8 pad[3]; - __le32 ch_time_min; - __le32 ch_time_max; - struct channel_list channel_list; - struct ssid ssid; -} __packed; - -struct hostif_phy_information_request { - struct hostif_hdr header; - __le16 type; -#define NORMAL_TYPE 0 -#define TIME_TYPE 1 - __le16 time; /* unit 100ms */ -} __packed; - -enum sleep_mode_type { - SLP_ACTIVE, - SLP_SLEEP -}; - -struct hostif_sleep_request { - struct hostif_hdr header; -} __packed; - -struct hostif_mic_failure_request { - struct hostif_hdr header; - __le16 failure_count; - __le16 timer; -} __packed; - -#define BASIC_RATE 0x80 -#define RATE_MASK 0x7F - -#define TX_RATE_AUTO 0xff -#define TX_RATE_1M_FIXED 0 -#define TX_RATE_2M_FIXED 1 -#define TX_RATE_1_2M_AUTO 2 -#define TX_RATE_5M_FIXED 3 -#define TX_RATE_11M_FIXED 4 - -#define TX_RATE_FULL_AUTO 0 -#define TX_RATE_11_AUTO 1 -#define TX_RATE_11B_AUTO 2 -#define TX_RATE_11BG_AUTO 3 -#define TX_RATE_MANUAL_AUTO 4 -#define TX_RATE_FIXED 5 - -/* 11b rate */ -#define TX_RATE_1M ((u8)(10 / 5)) /* 11b 11g basic rate */ -#define TX_RATE_2M ((u8)(20 / 5)) /* 11b 11g basic rate */ -#define TX_RATE_5M ((u8)(55 / 5)) /* 11g basic rate */ -#define TX_RATE_11M ((u8)(110 / 5)) /* 11g basic rate */ - -/* 11g rate */ -#define TX_RATE_6M ((u8)(60 / 5)) /* 11g basic rate */ -#define TX_RATE_12M ((u8)(120 / 5)) /* 11g basic rate */ -#define TX_RATE_24M ((u8)(240 / 5)) /* 11g basic rate */ -#define TX_RATE_9M ((u8)(90 / 5)) -#define TX_RATE_18M ((u8)(180 / 5)) -#define TX_RATE_36M ((u8)(360 / 5)) -#define TX_RATE_48M ((u8)(480 / 5)) -#define TX_RATE_54M ((u8)(540 / 5)) - -static inline bool is_11b_rate(u8 rate) -{ - return (((rate & RATE_MASK) == TX_RATE_1M) || - ((rate & RATE_MASK) == TX_RATE_2M) || - ((rate & RATE_MASK) == TX_RATE_5M) || - ((rate & RATE_MASK) == TX_RATE_11M)); -} - -static inline bool is_ofdm_rate(u8 rate) -{ - return (((rate & RATE_MASK) == TX_RATE_6M) || - ((rate & RATE_MASK) == TX_RATE_12M) || - ((rate & RATE_MASK) == TX_RATE_24M) || - ((rate & RATE_MASK) == TX_RATE_9M) || - ((rate & RATE_MASK) == TX_RATE_18M) || - ((rate & RATE_MASK) == TX_RATE_36M) || - ((rate & RATE_MASK) == TX_RATE_48M) || - ((rate & RATE_MASK) == TX_RATE_54M)); -} - -static inline bool is_11bg_rate(u8 rate) -{ - return (is_11b_rate(rate) || is_ofdm_rate(rate)); -} - -static inline bool is_ofdm_ext_rate(u8 rate) -{ - return (((rate & RATE_MASK) == TX_RATE_9M) || - ((rate & RATE_MASK) == TX_RATE_18M) || - ((rate & RATE_MASK) == TX_RATE_36M) || - ((rate & RATE_MASK) == TX_RATE_48M) || - ((rate & RATE_MASK) == TX_RATE_54M)); -} - -enum connect_status_type { - CONNECT_STATUS, - DISCONNECT_STATUS -}; - -enum preamble_type { - LONG_PREAMBLE, - SHORT_PREAMBLE -}; - -enum multicast_filter_type { - MCAST_FILTER_MCAST, - MCAST_FILTER_MCASTALL, - MCAST_FILTER_PROMISC, -}; - -#define NIC_MAX_MCAST_LIST 32 - -#define HIF_EVENT_MASK 0xE800 - -static inline bool is_hif_ind(unsigned short event) -{ - return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) && - (((event & ~HIF_EVENT_MASK) == 0x0001) || - ((event & ~HIF_EVENT_MASK) == 0x0006) || - ((event & ~HIF_EVENT_MASK) == 0x000C) || - ((event & ~HIF_EVENT_MASK) == 0x0011) || - ((event & ~HIF_EVENT_MASK) == 0x0012))); -} - -static inline bool is_hif_conf(unsigned short event) -{ - return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) && - ((event & ~HIF_EVENT_MASK) > 0x0000) && - ((event & ~HIF_EVENT_MASK) < 0x0012) && - !is_hif_ind(event)); -} - -#ifdef __KERNEL__ - -#include "ks_wlan.h" - -/* function prototype */ -int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb); -void hostif_receive(struct ks_wlan_private *priv, unsigned char *p, - unsigned int size); -void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event); -int hostif_init(struct ks_wlan_private *priv); -void hostif_exit(struct ks_wlan_private *priv); -int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size, - void (*complete_handler)(struct ks_wlan_private *priv, - struct sk_buff *skb), - struct sk_buff *skb); -void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb); - -void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv); -int ks_wlan_hw_power_save(struct ks_wlan_private *priv); - -#define KS7010_SIZE_ALIGNMENT 32 - -static inline size_t hif_align_size(size_t size) -{ - return ALIGN(size, KS7010_SIZE_ALIGNMENT); -} - -#endif /* __KERNEL__ */ - -#endif /* _KS_HOSTIF_H_ */ diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h deleted file mode 100644 index 3e9a91b5131ce8..00000000000000 --- a/drivers/staging/ks7010/ks_wlan.h +++ /dev/null @@ -1,567 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Driver for KeyStream IEEE802.11 b/g wireless LAN cards. - * - * Copyright (C) 2006-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - */ - -#ifndef _KS_WLAN_H -#define _KS_WLAN_H - -#include -#include -#include -#include -#include -#include -#include - -struct ks_wlan_parameter { - u8 operation_mode; - u8 channel; - u8 tx_rate; - struct { - u8 size; - u8 body[16]; - } rate_set; - u8 bssid[ETH_ALEN]; - struct { - u8 size; - u8 body[32 + 1]; - } ssid; - u8 preamble; - u8 power_mgmt; - u32 scan_type; -#define BEACON_LOST_COUNT_MAX 65535 - u32 beacon_lost_count; - u32 rts; - u32 fragment; - u32 privacy_invoked; - u32 wep_index; - struct { - u8 size; - u8 val[13 * 2 + 1]; - } wep_key[4]; - u16 authenticate_type; - u16 phy_type; - u16 cts_mode; - u16 phy_info_timer; -}; - -enum { - DEVICE_STATE_OFF = 0, /* this means hw_unavailable is != 0 */ - DEVICE_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */ - DEVICE_STATE_BOOT, /* boot state (fw upload, run fw) */ - DEVICE_STATE_PREINIT, /* pre-init state */ - DEVICE_STATE_INIT, /* init state (restore MIB backup to device) */ - DEVICE_STATE_READY, /* driver&device are in operational state */ - DEVICE_STATE_SLEEP /* device in sleep mode */ -}; - -/* SME flag */ -#define SME_MODE_SET BIT(0) -#define SME_RTS BIT(1) -#define SME_FRAG BIT(2) -#define SME_WEP_FLAG BIT(3) -#define SME_WEP_INDEX BIT(4) -#define SME_WEP_VAL1 BIT(5) -#define SME_WEP_VAL2 BIT(6) -#define SME_WEP_VAL3 BIT(7) -#define SME_WEP_VAL4 BIT(8) -#define SME_WEP_VAL_MASK GENMASK(8, 5) -#define SME_RSN BIT(9) -#define SME_RSN_MULTICAST BIT(10) -#define SME_RSN_UNICAST BIT(11) -#define SME_RSN_AUTH BIT(12) - -#define SME_AP_SCAN BIT(13) -#define SME_MULTICAST BIT(14) - -/* SME Event */ -enum { - SME_START, - - SME_MULTICAST_REQUEST, - SME_MACADDRESS_SET_REQUEST, - SME_BSS_SCAN_REQUEST, - SME_SET_FLAG, - SME_SET_TXKEY, - SME_SET_KEY1, - SME_SET_KEY2, - SME_SET_KEY3, - SME_SET_KEY4, - SME_SET_PMK_TSC, - SME_SET_GMK1_TSC, - SME_SET_GMK2_TSC, - SME_SET_GMK3_TSC, - SME_SET_PMKSA, - SME_POW_MNGMT_REQUEST, - SME_PHY_INFO_REQUEST, - SME_MIC_FAILURE_REQUEST, - SME_GET_MAC_ADDRESS, - SME_GET_PRODUCT_VERSION, - SME_STOP_REQUEST, - SME_RTS_THRESHOLD_REQUEST, - SME_FRAGMENTATION_THRESHOLD_REQUEST, - SME_WEP_INDEX_REQUEST, - SME_WEP_KEY1_REQUEST, - SME_WEP_KEY2_REQUEST, - SME_WEP_KEY3_REQUEST, - SME_WEP_KEY4_REQUEST, - SME_WEP_FLAG_REQUEST, - SME_RSN_UCAST_REQUEST, - SME_RSN_MCAST_REQUEST, - SME_RSN_AUTH_REQUEST, - SME_RSN_ENABLED_REQUEST, - SME_RSN_MODE_REQUEST, - SME_WPS_ENABLE_REQUEST, - SME_WPS_PROBE_REQUEST, - SME_SET_GAIN, - SME_GET_GAIN, - SME_SLEEP_REQUEST, - SME_SET_REGION, - SME_MODE_SET_REQUEST, - SME_START_REQUEST, - SME_GET_EEPROM_CKSUM, - - SME_MIC_FAILURE_CONFIRM, - SME_START_CONFIRM, - - SME_MULTICAST_CONFIRM, - SME_BSS_SCAN_CONFIRM, - SME_GET_CURRENT_AP, - SME_POW_MNGMT_CONFIRM, - SME_PHY_INFO_CONFIRM, - SME_STOP_CONFIRM, - SME_RTS_THRESHOLD_CONFIRM, - SME_FRAGMENTATION_THRESHOLD_CONFIRM, - SME_WEP_INDEX_CONFIRM, - SME_WEP_KEY1_CONFIRM, - SME_WEP_KEY2_CONFIRM, - SME_WEP_KEY3_CONFIRM, - SME_WEP_KEY4_CONFIRM, - SME_WEP_FLAG_CONFIRM, - SME_RSN_UCAST_CONFIRM, - SME_RSN_MCAST_CONFIRM, - SME_RSN_AUTH_CONFIRM, - SME_RSN_ENABLED_CONFIRM, - SME_RSN_MODE_CONFIRM, - SME_MODE_SET_CONFIRM, - SME_SLEEP_CONFIRM, - - SME_RSN_SET_CONFIRM, - SME_WEP_SET_CONFIRM, - SME_TERMINATE, - - SME_EVENT_SIZE -}; - -/* SME Status */ -enum { - SME_IDLE, - SME_SETUP, - SME_DISCONNECT, - SME_CONNECT -}; - -#define SME_EVENT_BUFF_SIZE 128 - -struct sme_info { - int sme_status; - int event_buff[SME_EVENT_BUFF_SIZE]; - unsigned int qhead; - unsigned int qtail; - spinlock_t sme_spin; - unsigned long sme_flag; -}; - -struct hostt { - int buff[SME_EVENT_BUFF_SIZE]; - unsigned int qhead; - unsigned int qtail; -}; - -#define RSN_IE_BODY_MAX 64 -struct rsn_ie { - u8 id; /* 0xdd = WPA or 0x30 = RSN */ - u8 size; /* max ? 255 ? */ - u8 body[RSN_IE_BODY_MAX]; -} __packed; - -#define WPA_INFO_ELEM_ID 0xdd -#define RSN_INFO_ELEM_ID 0x30 - -#define WPS_IE_BODY_MAX 255 -struct wps_ie { - u8 id; /* 221 'dd 00 50 F2 04' */ - u8 size; /* max ? 255 ? */ - u8 body[WPS_IE_BODY_MAX]; -} __packed; - -struct local_ap { - u8 bssid[6]; - u8 rssi; - u8 sq; - struct { - u8 size; - u8 body[32]; - u8 ssid_pad; - } ssid; - struct { - u8 size; - u8 body[16]; - u8 rate_pad; - } rate_set; - u16 capability; - u8 channel; - u8 noise; - struct rsn_ie wpa_ie; - struct rsn_ie rsn_ie; - struct wps_ie wps_ie; -}; - -#define LOCAL_APLIST_MAX 31 -#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX -struct local_aplist { - int size; - struct local_ap ap[LOCAL_APLIST_MAX + 1]; -}; - -struct local_gain { - u8 tx_mode; - u8 rx_mode; - u8 tx_gain; - u8 rx_gain; -}; - -struct local_eeprom_sum { - u8 type; - u8 result; -}; - -enum { - EEPROM_OK, - EEPROM_CHECKSUM_NONE, - EEPROM_FW_NOT_SUPPORT, - EEPROM_NG, -}; - -/* Power Save Status */ -enum { - PS_NONE, - PS_ACTIVE_SET, - PS_SAVE_SET, - PS_CONF_WAIT, - PS_SNOOZE, - PS_WAKEUP -}; - -struct power_save_status { - atomic_t status; /* initialvalue 0 */ - struct completion wakeup_wait; - atomic_t confirm_wait; - atomic_t snooze_guard; -}; - -struct sleep_status { - atomic_t status; /* initialvalue 0 */ - atomic_t doze_request; - atomic_t wakeup_request; -}; - -/* WPA */ -struct scan_ext { - unsigned int flag; - char ssid[IW_ESSID_MAX_SIZE + 1]; -}; - -#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00" -#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01" -#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02" -#define CIPHER_ID_WPA_CCMP "\x00\x50\xf2\x04" -#define CIPHER_ID_WPA_WEP104 "\x00\x50\xf2\x05" - -#define CIPHER_ID_WPA2_NONE "\x00\x0f\xac\x00" -#define CIPHER_ID_WPA2_WEP40 "\x00\x0f\xac\x01" -#define CIPHER_ID_WPA2_TKIP "\x00\x0f\xac\x02" -#define CIPHER_ID_WPA2_CCMP "\x00\x0f\xac\x04" -#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05" - -#define CIPHER_ID_LEN 4 - -enum { - KEY_MGMT_802_1X, - KEY_MGMT_PSK, - KEY_MGMT_WPANONE, -}; - -#define KEY_MGMT_ID_WPA_NONE "\x00\x50\xf2\x00" -#define KEY_MGMT_ID_WPA_1X "\x00\x50\xf2\x01" -#define KEY_MGMT_ID_WPA_PSK "\x00\x50\xf2\x02" -#define KEY_MGMT_ID_WPA_WPANONE "\x00\x50\xf2\xff" - -#define KEY_MGMT_ID_WPA2_NONE "\x00\x0f\xac\x00" -#define KEY_MGMT_ID_WPA2_1X "\x00\x0f\xac\x01" -#define KEY_MGMT_ID_WPA2_PSK "\x00\x0f\xac\x02" -#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff" - -#define KEY_MGMT_ID_LEN 4 - -#define MIC_KEY_SIZE 8 - -struct wpa_key { - u32 ext_flags; /* IW_ENCODE_EXT_xxx */ - u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ - u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ - struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast - * (group) keys or unicast address for - * individual keys - */ - u16 alg; - u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */ - u8 key_val[IW_ENCODING_TOKEN_MAX]; - u8 tx_mic_key[MIC_KEY_SIZE]; - u8 rx_mic_key[MIC_KEY_SIZE]; -}; - -#define WPA_KEY_INDEX_MAX 4 -#define WPA_RX_SEQ_LEN 6 - -struct mic_failure { - u16 failure; /* MIC Failure counter 0 or 1 or 2 */ - u16 counter; /* 1sec counter 0-60 */ - u32 last_failure_time; - int stop; -}; - -struct wpa_status { - int wpa_enabled; - bool rsn_enabled; - int version; - int pairwise_suite; /* unicast cipher */ - int group_suite; /* multicast cipher */ - int key_mgmt_suite; - int auth_alg; - int txkey; - struct wpa_key key[WPA_KEY_INDEX_MAX]; - struct scan_ext scan_ext; - struct mic_failure mic_failure; -}; - -#include -#define PMK_LIST_MAX 8 -struct pmk_list { - u16 size; - struct list_head head; - struct pmk { - struct list_head list; - u8 bssid[ETH_ALEN]; - u8 pmkid[IW_PMKID_LEN]; - } pmk[PMK_LIST_MAX]; -}; - -struct wps_status { - int wps_enabled; - int ielen; - u8 ie[255]; -}; - -/* Tx Device struct */ -#define TX_DEVICE_BUFF_SIZE 1024 - -struct ks_wlan_private; - -/** - * struct tx_device_buffer - Queue item for the tx queue. - * @sendp: Pointer to the send request data. - * @size: Size of @sendp data. - * @complete_handler: Function called once data write to device is complete. - * @arg1: First argument to @complete_handler. - * @arg2: Second argument to @complete_handler. - */ -struct tx_device_buffer { - unsigned char *sendp; - unsigned int size; - void (*complete_handler)(struct ks_wlan_private *priv, - struct sk_buff *skb); - struct sk_buff *skb; -}; - -/** - * struct tx_device - Tx buffer queue. - * @tx_device_buffer: Queue buffer. - * @qhead: Head of tx queue. - * @qtail: Tail of tx queue. - * @tx_dev_lock: Queue lock. - */ -struct tx_device { - struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE]; - unsigned int qhead; - unsigned int qtail; - spinlock_t tx_dev_lock; /* protect access to the queue */ -}; - -/* Rx Device struct */ -#define RX_DATA_SIZE (2 + 2 + 2347 + 1) -#define RX_DEVICE_BUFF_SIZE 32 - -/** - * struct rx_device_buffer - Queue item for the rx queue. - * @data: rx data. - * @size: Size of @data. - */ -struct rx_device_buffer { - unsigned char data[RX_DATA_SIZE]; - unsigned int size; -}; - -/** - * struct rx_device - Rx buffer queue. - * @rx_device_buffer: Queue buffer. - * @qhead: Head of rx queue. - * @qtail: Tail of rx queue. - * @rx_dev_lock: Queue lock. - */ -struct rx_device { - struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE]; - unsigned int qhead; - unsigned int qtail; - spinlock_t rx_dev_lock; /* protect access to the queue */ -}; - -struct ks_wlan_private { - /* hardware information */ - void *if_hw; - struct workqueue_struct *wq; - struct delayed_work rw_dwork; - struct tasklet_struct rx_bh_task; - - struct net_device *net_dev; - struct net_device_stats nstats; - struct iw_statistics wstats; - - struct completion confirm_wait; - - /* trx device & sme */ - struct tx_device tx_dev; - struct rx_device rx_dev; - struct sme_info sme_i; - u8 *rxp; - unsigned int rx_size; - struct work_struct sme_work; - struct work_struct wakeup_work; - int scan_ind_count; - - unsigned char eth_addr[ETH_ALEN]; - - struct local_aplist aplist; - struct local_ap current_ap; - struct power_save_status psstatus; - struct sleep_status sleepstatus; - struct wpa_status wpa; - struct pmk_list pmklist; - /* wireless parameter */ - struct ks_wlan_parameter reg; - u8 current_rate; - - char nick[IW_ESSID_MAX_SIZE + 1]; - - spinlock_t multicast_spin; - - spinlock_t dev_read_lock; - wait_queue_head_t devread_wait; - - unsigned int need_commit; /* for ioctl */ - - /* DeviceIoControl */ - bool is_device_open; - atomic_t event_count; - atomic_t rec_count; - int dev_count; -#define DEVICE_STOCK_COUNT 20 - unsigned char *dev_data[DEVICE_STOCK_COUNT]; - int dev_size[DEVICE_STOCK_COUNT]; - - /* ioctl : IOCTL_FIRMWARE_VERSION */ - unsigned char firmware_version[128 + 1]; - int version_size; - - bool mac_address_valid; - - int dev_state; - - struct sk_buff *skb; - unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ -#define FORCE_DISCONNECT 0x80000000 -#define CONNECT_STATUS_MASK 0x7FFFFFFF - u32 connect_status; - int infra_status; - u8 scan_ssid_len; - u8 scan_ssid[IW_ESSID_MAX_SIZE + 1]; - struct local_gain gain; - struct wps_status wps; - u8 sleep_mode; - - u8 region; - struct local_eeprom_sum eeprom_sum; - u8 eeprom_checksum; - - struct hostt hostt; - - unsigned long last_doze; - unsigned long last_wakeup; - - unsigned int wakeup_count; /* for detect wakeup loop */ -}; - -static inline void inc_txqhead(struct ks_wlan_private *priv) -{ - priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE; -} - -static inline void inc_txqtail(struct ks_wlan_private *priv) -{ - priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE; -} - -static inline bool txq_has_space(struct ks_wlan_private *priv) -{ - return (CIRC_SPACE(priv->tx_dev.qhead, priv->tx_dev.qtail, - TX_DEVICE_BUFF_SIZE) > 0); -} - -static inline void inc_rxqhead(struct ks_wlan_private *priv) -{ - priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE; -} - -static inline void inc_rxqtail(struct ks_wlan_private *priv) -{ - priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE; -} - -static inline bool rxq_has_space(struct ks_wlan_private *priv) -{ - return (CIRC_SPACE(priv->rx_dev.qhead, priv->rx_dev.qtail, - RX_DEVICE_BUFF_SIZE) > 0); -} - -static inline unsigned int txq_count(struct ks_wlan_private *priv) -{ - return CIRC_CNT_TO_END(priv->tx_dev.qhead, priv->tx_dev.qtail, - TX_DEVICE_BUFF_SIZE); -} - -static inline unsigned int rxq_count(struct ks_wlan_private *priv) -{ - return CIRC_CNT_TO_END(priv->rx_dev.qhead, priv->rx_dev.qtail, - RX_DEVICE_BUFF_SIZE); -} - -int ks_wlan_net_start(struct net_device *dev); -int ks_wlan_net_stop(struct net_device *dev); -bool is_connect_status(u32 status); -bool is_disconnect_status(u32 status); - -#endif /* _KS_WLAN_H */ diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h deleted file mode 100644 index 97c7d95de41130..00000000000000 --- a/drivers/staging/ks7010/ks_wlan_ioctl.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Driver for KeyStream 11b/g wireless LAN - * - * Copyright (c) 2005-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - */ - -#ifndef _KS_WLAN_IOCTL_H -#define _KS_WLAN_IOCTL_H - -#include -/* The low order bit identify a SET (0) or a GET (1) ioctl. */ - -/* (SIOCIWFIRSTPRIV + 0) */ -/* former KS_WLAN_GET_DRIVER_VERSION (SIOCIWFIRSTPRIV + 1) */ -/* (SIOCIWFIRSTPRIV + 2) */ -#define KS_WLAN_GET_FIRM_VERSION (SIOCIWFIRSTPRIV + 3) -#define KS_WLAN_SET_WPS_ENABLE (SIOCIWFIRSTPRIV + 4) -#define KS_WLAN_GET_WPS_ENABLE (SIOCIWFIRSTPRIV + 5) -#define KS_WLAN_SET_WPS_PROBE_REQ (SIOCIWFIRSTPRIV + 6) -#define KS_WLAN_GET_EEPROM_CKSUM (SIOCIWFIRSTPRIV + 7) -#define KS_WLAN_SET_PREAMBLE (SIOCIWFIRSTPRIV + 8) -#define KS_WLAN_GET_PREAMBLE (SIOCIWFIRSTPRIV + 9) -#define KS_WLAN_SET_POWER_SAVE (SIOCIWFIRSTPRIV + 10) -#define KS_WLAN_GET_POWER_SAVE (SIOCIWFIRSTPRIV + 11) -#define KS_WLAN_SET_SCAN_TYPE (SIOCIWFIRSTPRIV + 12) -#define KS_WLAN_GET_SCAN_TYPE (SIOCIWFIRSTPRIV + 13) -#define KS_WLAN_SET_RX_GAIN (SIOCIWFIRSTPRIV + 14) -#define KS_WLAN_GET_RX_GAIN (SIOCIWFIRSTPRIV + 15) -#define KS_WLAN_HOSTT (SIOCIWFIRSTPRIV + 16) /* unused */ -//#define KS_WLAN_SET_REGION (SIOCIWFIRSTPRIV + 17) -#define KS_WLAN_SET_BEACON_LOST (SIOCIWFIRSTPRIV + 18) -#define KS_WLAN_GET_BEACON_LOST (SIOCIWFIRSTPRIV + 19) - -#define KS_WLAN_SET_TX_GAIN (SIOCIWFIRSTPRIV + 20) -#define KS_WLAN_GET_TX_GAIN (SIOCIWFIRSTPRIV + 21) - -/* for KS7010 */ -#define KS_WLAN_SET_PHY_TYPE (SIOCIWFIRSTPRIV + 22) -#define KS_WLAN_GET_PHY_TYPE (SIOCIWFIRSTPRIV + 23) -#define KS_WLAN_SET_CTS_MODE (SIOCIWFIRSTPRIV + 24) -#define KS_WLAN_GET_CTS_MODE (SIOCIWFIRSTPRIV + 25) -/* (SIOCIWFIRSTPRIV + 26) */ -/* (SIOCIWFIRSTPRIV + 27) */ -#define KS_WLAN_SET_SLEEP_MODE (SIOCIWFIRSTPRIV + 28) /* sleep mode */ -#define KS_WLAN_GET_SLEEP_MODE (SIOCIWFIRSTPRIV + 29) /* sleep mode */ -/* (SIOCIWFIRSTPRIV + 30) */ -/* (SIOCIWFIRSTPRIV + 31) */ - -#ifdef __KERNEL__ - -#include "ks_wlan.h" -#include - -int ks_wlan_setup_parameter(struct ks_wlan_private *priv, - unsigned int commit_flag); - -#endif /* __KERNEL__ */ - -#endif /* _KS_WLAN_IOCTL_H */ diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c deleted file mode 100644 index 0fb97a79ad0b32..00000000000000 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ /dev/null @@ -1,2676 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for KeyStream 11b/g wireless LAN - * - * Copyright (C) 2005-2008 KeyStream Corp. - * Copyright (C) 2009 Renesas Technology Corp. - */ - -#include -#include -#include -#include -#include -#include - -static int wep_on_off; -#define WEP_OFF 0 -#define WEP_ON_64BIT 1 -#define WEP_ON_128BIT 2 - -#include "ks_wlan.h" -#include "ks_hostif.h" -#include "ks_wlan_ioctl.h" - -/* Include Wireless Extension definition and check version */ -#include -#define WIRELESS_SPY /* enable iwspy support */ -#include /* New driver API */ - -/* Frequency list (map channels to frequencies) */ -static const long frequency_list[] = { - 2412, 2417, 2422, 2427, 2432, 2437, 2442, - 2447, 2452, 2457, 2462, 2467, 2472, 2484 -}; - -/* A few details needed for WEP (Wireless Equivalent Privacy) */ -#define MAX_KEY_SIZE 13 /* 128 (?) bits */ -#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */ -struct wep_key { - u16 len; - u8 key[16]; /* 40-bit and 104-bit keys */ -}; - -/* - * function prototypes - */ -static int ks_wlan_open(struct net_device *dev); -static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue); -static netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev); -static int ks_wlan_close(struct net_device *dev); -static void ks_wlan_set_rx_mode(struct net_device *dev); -static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev); -static int ks_wlan_set_mac_address(struct net_device *dev, void *addr); -static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq, - int cmd); - -static atomic_t update_phyinfo; -static struct timer_list update_phyinfo_timer; -static -int ks_wlan_update_phy_information(struct ks_wlan_private *priv) -{ - struct iw_statistics *wstats = &priv->wstats; - - netdev_dbg(priv->net_dev, "in_interrupt = %ld\n", in_interrupt()); - - if (priv->dev_state < DEVICE_STATE_READY) - return -EBUSY; /* not finished initialize */ - - if (atomic_read(&update_phyinfo)) - return -EPERM; - - /* The status */ - wstats->status = priv->reg.operation_mode; /* Operation mode */ - - /* Signal quality and co. But where is the noise level ??? */ - hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST); - - /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */ - if (!wait_for_completion_interruptible_timeout - (&priv->confirm_wait, HZ / 2)) { - netdev_dbg(priv->net_dev, "wait time out!!\n"); - } - - atomic_inc(&update_phyinfo); - update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */ - add_timer(&update_phyinfo_timer); - - return 0; -} - -static -void ks_wlan_update_phyinfo_timeout(struct timer_list *unused) -{ - pr_debug("in_interrupt = %ld\n", in_interrupt()); - atomic_set(&update_phyinfo, 0); -} - -int ks_wlan_setup_parameter(struct ks_wlan_private *priv, - unsigned int commit_flag) -{ - hostif_sme_enqueue(priv, SME_STOP_REQUEST); - - if (commit_flag & SME_RTS) - hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST); - if (commit_flag & SME_FRAG) - hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST); - - if (commit_flag & SME_WEP_INDEX) - hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST); - if (commit_flag & SME_WEP_VAL1) - hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST); - if (commit_flag & SME_WEP_VAL2) - hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST); - if (commit_flag & SME_WEP_VAL3) - hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST); - if (commit_flag & SME_WEP_VAL4) - hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST); - if (commit_flag & SME_WEP_FLAG) - hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST); - - if (commit_flag & SME_RSN) { - hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST); - hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST); - } - if (commit_flag & SME_RSN_MULTICAST) - hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST); - if (commit_flag & SME_RSN_UNICAST) - hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST); - if (commit_flag & SME_RSN_AUTH) - hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST); - - hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST); - - hostif_sme_enqueue(priv, SME_START_REQUEST); - - return 0; -} - -/* - * Initial Wireless Extension code for Ks_Wlannet driver by : - * Jean Tourrilhes - HPL - 17 November 00 - * Conversion to new driver API by : - * Jean Tourrilhes - HPL - 26 March 02 - * Javier also did a good amount of work here, adding some new extensions - * and fixing my code. Let's just say that without him this code just - * would not work at all... - Jean II - */ - -static int ks_wlan_get_name(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *cwrq, - char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (priv->dev_state < DEVICE_STATE_READY) - strscpy(cwrq->name, "NOT READY!", sizeof(cwrq->name)); - else if (priv->reg.phy_type == D_11B_ONLY_MODE) - strscpy(cwrq->name, "IEEE 802.11b", sizeof(cwrq->name)); - else if (priv->reg.phy_type == D_11G_ONLY_MODE) - strscpy(cwrq->name, "IEEE 802.11g", sizeof(cwrq->name)); - else - strscpy(cwrq->name, "IEEE 802.11b/g", sizeof(cwrq->name)); - - return 0; -} - -static int ks_wlan_set_freq(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *fwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int channel; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* If setting by frequency, convert to a channel */ - if ((fwrq->freq.e == 1) && - (fwrq->freq.m >= 241200000) && (fwrq->freq.m <= 248700000)) { - int f = fwrq->freq.m / 100000; - int c = 0; - - while ((c < 14) && (f != frequency_list[c])) - c++; - /* Hack to fall through... */ - fwrq->freq.e = 0; - fwrq->freq.m = c + 1; - } - /* Setting by channel number */ - if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0)) - return -EOPNOTSUPP; - - channel = fwrq->freq.m; - /* We should do a better check than that, - * based on the card capability !!! - */ - if ((channel < 1) || (channel > 14)) { - netdev_dbg(dev, "%s: New channel value of %d is invalid!\n", - dev->name, fwrq->freq.m); - return -EINVAL; - } - - /* Yes ! We can set it !!! */ - priv->reg.channel = (u8)(channel); - priv->need_commit |= SME_MODE_SET; - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_freq(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *fwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int f; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (is_connect_status(priv->connect_status)) - f = (int)priv->current_ap.channel; - else - f = (int)priv->reg.channel; - - fwrq->freq.m = frequency_list[f - 1] * 100000; - fwrq->freq.e = 1; - - return 0; -} - -static int ks_wlan_set_essid(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - size_t len; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* Check if we asked for `any' */ - if (!dwrq->essid.flags) { - /* Just send an empty SSID list */ - memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body)); - priv->reg.ssid.size = 0; - } else { - len = dwrq->essid.length; - /* iwconfig uses nul termination in SSID.. */ - if (len > 0 && extra[len - 1] == '\0') - len--; - - /* Check the size of the string */ - if (len > IW_ESSID_MAX_SIZE) - return -EINVAL; - - /* Set the SSID */ - memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body)); - memcpy(priv->reg.ssid.body, extra, len); - priv->reg.ssid.size = len; - } - /* Write it to the card */ - priv->need_commit |= SME_MODE_SET; - - ks_wlan_setup_parameter(priv, priv->need_commit); - priv->need_commit = 0; - return 0; -} - -static int ks_wlan_get_essid(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* Note : if dwrq->flags != 0, we should - * get the relevant SSID from the SSID list... - */ - if (priv->reg.ssid.size != 0) { - /* Get the current SSID */ - memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size); - - /* If none, we may want to get the one that was set */ - - /* Push it out ! */ - dwrq->essid.length = priv->reg.ssid.size; - dwrq->essid.flags = 1; /* active */ - } else { - dwrq->essid.length = 0; - dwrq->essid.flags = 0; /* ANY */ - } - - return 0; -} - -static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *awrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (priv->reg.operation_mode != MODE_ADHOC && - priv->reg.operation_mode != MODE_INFRASTRUCTURE) { - eth_zero_addr(priv->reg.bssid); - return -EOPNOTSUPP; - } - - ether_addr_copy(priv->reg.bssid, awrq->ap_addr.sa_data); - if (is_valid_ether_addr((u8 *)priv->reg.bssid)) - priv->need_commit |= SME_MODE_SET; - - netdev_dbg(dev, "bssid = %pM\n", priv->reg.bssid); - - /* Write it to the card */ - if (priv->need_commit) { - priv->need_commit |= SME_MODE_SET; - return -EINPROGRESS; /* Call commit handler */ - } - return 0; -} - -static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *awrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (is_connect_status(priv->connect_status)) - ether_addr_copy(awrq->ap_addr.sa_data, priv->current_ap.bssid); - else - eth_zero_addr(awrq->ap_addr.sa_data); - - awrq->ap_addr.sa_family = ARPHRD_ETHER; - - return 0; -} - -static int ks_wlan_set_nick(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* Check the size of the string */ - if (dwrq->data.length > 16 + 1) - return -E2BIG; - - memset(priv->nick, 0, sizeof(priv->nick)); - memcpy(priv->nick, extra, dwrq->data.length); - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_nick(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - strscpy(extra, priv->nick, 17); - dwrq->data.length = strlen(extra) + 1; - - return 0; -} - -static int ks_wlan_set_rate(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int i = 0; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (priv->reg.phy_type == D_11B_ONLY_MODE) { - if (vwrq->bitrate.fixed == 1) { - switch (vwrq->bitrate.value) { - case 11000000: - case 5500000: - priv->reg.rate_set.body[0] = - (u8)(vwrq->bitrate.value / 500000); - break; - case 2000000: - case 1000000: - priv->reg.rate_set.body[0] = - ((u8)(vwrq->bitrate.value / 500000)) | - BASIC_RATE; - break; - default: - return -EINVAL; - } - priv->reg.tx_rate = TX_RATE_FIXED; - priv->reg.rate_set.size = 1; - } else { /* vwrq->fixed == 0 */ - if (vwrq->bitrate.value > 0) { - switch (vwrq->bitrate.value) { - case 11000000: - priv->reg.rate_set.body[3] = - TX_RATE_11M; - i++; - fallthrough; - case 5500000: - priv->reg.rate_set.body[2] = TX_RATE_5M; - i++; - fallthrough; - case 2000000: - priv->reg.rate_set.body[1] = - TX_RATE_2M | BASIC_RATE; - i++; - fallthrough; - case 1000000: - priv->reg.rate_set.body[0] = - TX_RATE_1M | BASIC_RATE; - i++; - break; - default: - return -EINVAL; - } - priv->reg.tx_rate = TX_RATE_MANUAL_AUTO; - priv->reg.rate_set.size = i; - } else { - priv->reg.rate_set.body[3] = TX_RATE_11M; - priv->reg.rate_set.body[2] = TX_RATE_5M; - priv->reg.rate_set.body[1] = - TX_RATE_2M | BASIC_RATE; - priv->reg.rate_set.body[0] = - TX_RATE_1M | BASIC_RATE; - priv->reg.tx_rate = TX_RATE_FULL_AUTO; - priv->reg.rate_set.size = 4; - } - } - } else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */ - if (vwrq->bitrate.fixed == 1) { - switch (vwrq->bitrate.value) { - case 54000000: - case 48000000: - case 36000000: - case 18000000: - case 9000000: - priv->reg.rate_set.body[0] = - (u8)(vwrq->bitrate.value / 500000); - break; - case 24000000: - case 12000000: - case 11000000: - case 6000000: - case 5500000: - case 2000000: - case 1000000: - priv->reg.rate_set.body[0] = - ((u8)(vwrq->bitrate.value / 500000)) | - BASIC_RATE; - break; - default: - return -EINVAL; - } - priv->reg.tx_rate = TX_RATE_FIXED; - priv->reg.rate_set.size = 1; - } else { /* vwrq->fixed == 0 */ - if (vwrq->bitrate.value > 0) { - switch (vwrq->bitrate.value) { - case 54000000: - priv->reg.rate_set.body[11] = - TX_RATE_54M; - i++; - fallthrough; - case 48000000: - priv->reg.rate_set.body[10] = - TX_RATE_48M; - i++; - fallthrough; - case 36000000: - priv->reg.rate_set.body[9] = - TX_RATE_36M; - i++; - fallthrough; - case 24000000: - case 18000000: - case 12000000: - case 11000000: - case 9000000: - case 6000000: - if (vwrq->bitrate.value == 24000000) { - priv->reg.rate_set.body[8] = - TX_RATE_18M; - i++; - priv->reg.rate_set.body[7] = - TX_RATE_9M; - i++; - priv->reg.rate_set.body[6] = - TX_RATE_24M | BASIC_RATE; - i++; - priv->reg.rate_set.body[5] = - TX_RATE_12M | BASIC_RATE; - i++; - priv->reg.rate_set.body[4] = - TX_RATE_6M | BASIC_RATE; - i++; - priv->reg.rate_set.body[3] = - TX_RATE_11M | BASIC_RATE; - i++; - } else if (vwrq->bitrate.value == 18000000) { - priv->reg.rate_set.body[7] = - TX_RATE_18M; - i++; - priv->reg.rate_set.body[6] = - TX_RATE_9M; - i++; - priv->reg.rate_set.body[5] = - TX_RATE_12M | BASIC_RATE; - i++; - priv->reg.rate_set.body[4] = - TX_RATE_6M | BASIC_RATE; - i++; - priv->reg.rate_set.body[3] = - TX_RATE_11M | BASIC_RATE; - i++; - } else if (vwrq->bitrate.value == 12000000) { - priv->reg.rate_set.body[6] = - TX_RATE_9M; - i++; - priv->reg.rate_set.body[5] = - TX_RATE_12M | BASIC_RATE; - i++; - priv->reg.rate_set.body[4] = - TX_RATE_6M | BASIC_RATE; - i++; - priv->reg.rate_set.body[3] = - TX_RATE_11M | BASIC_RATE; - i++; - } else if (vwrq->bitrate.value == 11000000) { - priv->reg.rate_set.body[5] = - TX_RATE_9M; - i++; - priv->reg.rate_set.body[4] = - TX_RATE_6M | BASIC_RATE; - i++; - priv->reg.rate_set.body[3] = - TX_RATE_11M | BASIC_RATE; - i++; - } else if (vwrq->bitrate.value == 9000000) { - priv->reg.rate_set.body[4] = - TX_RATE_9M; - i++; - priv->reg.rate_set.body[3] = - TX_RATE_6M | BASIC_RATE; - i++; - } else { /* vwrq->value == 6000000 */ - priv->reg.rate_set.body[3] = - TX_RATE_6M | BASIC_RATE; - i++; - } - fallthrough; - case 5500000: - priv->reg.rate_set.body[2] = - TX_RATE_5M | BASIC_RATE; - i++; - fallthrough; - case 2000000: - priv->reg.rate_set.body[1] = - TX_RATE_2M | BASIC_RATE; - i++; - fallthrough; - case 1000000: - priv->reg.rate_set.body[0] = - TX_RATE_1M | BASIC_RATE; - i++; - break; - default: - return -EINVAL; - } - priv->reg.tx_rate = TX_RATE_MANUAL_AUTO; - priv->reg.rate_set.size = i; - } else { - priv->reg.rate_set.body[11] = TX_RATE_54M; - priv->reg.rate_set.body[10] = TX_RATE_48M; - priv->reg.rate_set.body[9] = TX_RATE_36M; - priv->reg.rate_set.body[8] = TX_RATE_18M; - priv->reg.rate_set.body[7] = TX_RATE_9M; - priv->reg.rate_set.body[6] = - TX_RATE_24M | BASIC_RATE; - priv->reg.rate_set.body[5] = - TX_RATE_12M | BASIC_RATE; - priv->reg.rate_set.body[4] = - TX_RATE_6M | BASIC_RATE; - priv->reg.rate_set.body[3] = - TX_RATE_11M | BASIC_RATE; - priv->reg.rate_set.body[2] = - TX_RATE_5M | BASIC_RATE; - priv->reg.rate_set.body[1] = - TX_RATE_2M | BASIC_RATE; - priv->reg.rate_set.body[0] = - TX_RATE_1M | BASIC_RATE; - priv->reg.tx_rate = TX_RATE_FULL_AUTO; - priv->reg.rate_set.size = 12; - } - } - } - - priv->need_commit |= SME_MODE_SET; - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_rate(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - netdev_dbg(dev, "in_interrupt = %ld update_phyinfo = %d\n", - in_interrupt(), atomic_read(&update_phyinfo)); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (!atomic_read(&update_phyinfo)) - ks_wlan_update_phy_information(priv); - - vwrq->bitrate.value = ((priv->current_rate) & RATE_MASK) * 500000; - vwrq->bitrate.fixed = (priv->reg.tx_rate == TX_RATE_FIXED) ? 1 : 0; - - return 0; -} - -static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int rthr = vwrq->rts.value; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (vwrq->rts.disabled) - rthr = 2347; - if ((rthr < 0) || (rthr > 2347)) - return -EINVAL; - - priv->reg.rts = rthr; - priv->need_commit |= SME_RTS; - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - vwrq->rts.value = priv->reg.rts; - vwrq->rts.disabled = (vwrq->rts.value >= 2347); - vwrq->rts.fixed = 1; - - return 0; -} - -static int ks_wlan_set_frag(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int fthr = vwrq->frag.value; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (vwrq->frag.disabled) - fthr = 2346; - if ((fthr < 256) || (fthr > 2346)) - return -EINVAL; - - fthr &= ~0x1; /* Get an even value - is it really needed ??? */ - priv->reg.fragment = fthr; - priv->need_commit |= SME_FRAG; - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_frag(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - vwrq->frag.value = priv->reg.fragment; - vwrq->frag.disabled = (vwrq->frag.value >= 2346); - vwrq->frag.fixed = 1; - - return 0; -} - -static int ks_wlan_set_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (uwrq->mode != IW_MODE_ADHOC && - uwrq->mode != IW_MODE_INFRA) - return -EINVAL; - - priv->reg.operation_mode = (uwrq->mode == IW_MODE_ADHOC) ? - MODE_ADHOC : MODE_INFRASTRUCTURE; - priv->need_commit |= SME_MODE_SET; - - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* If not managed, assume it's ad-hoc */ - uwrq->mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ? - IW_MODE_INFRA : IW_MODE_ADHOC; - - return 0; -} - -static int ks_wlan_set_encode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_point *enc = &dwrq->encoding; - struct wep_key key; - int index = (enc->flags & IW_ENCODE_INDEX); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (enc->length > MAX_KEY_SIZE) - return -EINVAL; - - /* for SLEEP MODE */ - if ((index < 0) || (index > 4)) - return -EINVAL; - - index = (index == 0) ? priv->reg.wep_index : (index - 1); - - /* Is WEP supported ? */ - /* Basic checking: do we have a key to set ? */ - if (enc->length > 0) { - key.len = (enc->length > MIN_KEY_SIZE) ? - MAX_KEY_SIZE : MIN_KEY_SIZE; - priv->reg.privacy_invoked = 0x01; - priv->need_commit |= SME_WEP_FLAG; - wep_on_off = (enc->length > MIN_KEY_SIZE) ? - WEP_ON_128BIT : WEP_ON_64BIT; - /* Check if the key is not marked as invalid */ - if (enc->flags & IW_ENCODE_NOKEY) - return 0; - - /* Cleanup */ - memset(key.key, 0, MAX_KEY_SIZE); - /* Copy the key in the driver */ - if (copy_from_user(key.key, enc->pointer, enc->length)) { - key.len = 0; - return -EFAULT; - } - /* Send the key to the card */ - priv->reg.wep_key[index].size = key.len; - memcpy(&priv->reg.wep_key[index].val[0], &key.key[0], - priv->reg.wep_key[index].size); - priv->need_commit |= (SME_WEP_VAL1 << index); - priv->reg.wep_index = index; - priv->need_commit |= SME_WEP_INDEX; - } else { - if (enc->flags & IW_ENCODE_DISABLED) { - priv->reg.wep_key[0].size = 0; - priv->reg.wep_key[1].size = 0; - priv->reg.wep_key[2].size = 0; - priv->reg.wep_key[3].size = 0; - priv->reg.privacy_invoked = 0x00; - if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) - priv->need_commit |= SME_MODE_SET; - - priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; - wep_on_off = WEP_OFF; - priv->need_commit |= SME_WEP_FLAG; - } else { - /* set_wep_key(priv, index, 0, 0, 1); xxx */ - if (priv->reg.wep_key[index].size == 0) - return -EINVAL; - priv->reg.wep_index = index; - priv->need_commit |= SME_WEP_INDEX; - } - } - - /* Commit the changes if needed */ - if (enc->flags & IW_ENCODE_MODE) - priv->need_commit |= SME_WEP_FLAG; - - if (enc->flags & IW_ENCODE_OPEN) { - if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) - priv->need_commit |= SME_MODE_SET; - - priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; - } else if (enc->flags & IW_ENCODE_RESTRICTED) { - if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) - priv->need_commit |= SME_MODE_SET; - - priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY; - } - if (priv->need_commit) { - ks_wlan_setup_parameter(priv, priv->need_commit); - priv->need_commit = 0; - } - return 0; -} - -static int ks_wlan_get_encode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_point *enc = &dwrq->encoding; - int index = (enc->flags & IW_ENCODE_INDEX) - 1; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - enc->flags = IW_ENCODE_DISABLED; - - /* Check encryption mode */ - switch (priv->reg.authenticate_type) { - case AUTH_TYPE_OPEN_SYSTEM: - enc->flags = IW_ENCODE_OPEN; - break; - case AUTH_TYPE_SHARED_KEY: - enc->flags = IW_ENCODE_RESTRICTED; - break; - } - - /* Which key do we want ? -1 -> tx index */ - if ((index < 0) || (index >= 4)) - index = priv->reg.wep_index; - if (priv->reg.privacy_invoked) { - enc->flags &= ~IW_ENCODE_DISABLED; - /* dwrq->flags |= IW_ENCODE_NOKEY; */ - } - enc->flags |= index + 1; - /* Copy the key to the user buffer */ - if (index >= 0 && index < 4) { - enc->length = (priv->reg.wep_key[index].size <= 16) ? - priv->reg.wep_key[index].size : 0; - memcpy(extra, priv->reg.wep_key[index].val, enc->length); - } - - return 0; -} - -static int ks_wlan_get_range(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_range *range = (struct iw_range *)extra; - int i, k; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - dwrq->data.length = sizeof(struct iw_range); - memset(range, 0, sizeof(*range)); - range->min_nwid = 0x0000; - range->max_nwid = 0x0000; - range->num_channels = 14; - /* Should be based on cap_rid.country to give only - * what the current card support - */ - k = 0; - for (i = 0; i < 13; i++) { /* channel 1 -- 13 */ - range->freq[k].i = i + 1; /* List index */ - range->freq[k].m = frequency_list[i] * 100000; - range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ - } - range->num_frequency = k; - if (priv->reg.phy_type == D_11B_ONLY_MODE || - priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */ - range->freq[13].i = 14; /* List index */ - range->freq[13].m = frequency_list[13] * 100000; - range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */ - range->num_frequency = 14; - } - - /* Hum... Should put the right values there */ - range->max_qual.qual = 100; - range->max_qual.level = 256 - 128; /* 0 dBm? */ - range->max_qual.noise = 256 - 128; - range->sensitivity = 1; - - if (priv->reg.phy_type == D_11B_ONLY_MODE) { - range->bitrate[0] = 1e6; - range->bitrate[1] = 2e6; - range->bitrate[2] = 5.5e6; - range->bitrate[3] = 11e6; - range->num_bitrates = 4; - } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */ - range->bitrate[0] = 1e6; - range->bitrate[1] = 2e6; - range->bitrate[2] = 5.5e6; - range->bitrate[3] = 11e6; - - range->bitrate[4] = 6e6; - range->bitrate[5] = 9e6; - range->bitrate[6] = 12e6; - if (IW_MAX_BITRATES < 9) { - range->bitrate[7] = 54e6; - range->num_bitrates = 8; - } else { - range->bitrate[7] = 18e6; - range->bitrate[8] = 24e6; - range->bitrate[9] = 36e6; - range->bitrate[10] = 48e6; - range->bitrate[11] = 54e6; - - range->num_bitrates = 12; - } - } - - /* Set an indication of the max TCP throughput - * in bit/s that we can expect using this interface. - * May be use for QoS stuff... Jean II - */ - if (i > 2) - range->throughput = 5000 * 1000; - else - range->throughput = 1500 * 1000; - - range->min_rts = 0; - range->max_rts = 2347; - range->min_frag = 256; - range->max_frag = 2346; - - range->encoding_size[0] = 5; /* WEP: RC4 40 bits */ - range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */ - range->num_encoding_sizes = 2; - range->max_encoding_tokens = 4; - - /* power management not support */ - range->pmp_flags = IW_POWER_ON; - range->pmt_flags = IW_POWER_ON; - range->pm_capa = 0; - - /* Transmit Power - values are in dBm( or mW) */ - range->txpower[0] = -256; - range->num_txpower = 1; - range->txpower_capa = IW_TXPOW_DBM; - /* range->txpower_capa = IW_TXPOW_MWATT; */ - - range->we_version_source = 21; - range->we_version_compiled = WIRELESS_EXT; - - range->retry_capa = IW_RETRY_ON; - range->retry_flags = IW_RETRY_ON; - range->r_time_flags = IW_RETRY_ON; - - /* Experimental measurements - boundary 11/5.5 Mb/s - * - * Note : with or without the (local->rssi), results - * are somewhat different. - Jean II - */ - range->avg_qual.qual = 50; - range->avg_qual.level = 186; /* -70 dBm */ - range->avg_qual.noise = 0; - - /* Event capability (kernel + driver) */ - range->event_capa[0] = (IW_EVENT_CAPA_K_0 | - IW_EVENT_CAPA_MASK(SIOCGIWAP) | - IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); - range->event_capa[1] = IW_EVENT_CAPA_K_1; - range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) | - IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE)); - - /* encode extension (WPA) capability */ - range->enc_capa = (IW_ENC_CAPA_WPA | - IW_ENC_CAPA_WPA2 | - IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP); - return 0; -} - -static int ks_wlan_set_power(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (vwrq->power.disabled) { - priv->reg.power_mgmt = POWER_MGMT_ACTIVE; - } else { - if (priv->reg.operation_mode != MODE_INFRASTRUCTURE) - return -EINVAL; - priv->reg.power_mgmt = POWER_MGMT_SAVE1; - } - - hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); - - return 0; -} - -static int ks_wlan_get_power(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - vwrq->power.disabled = (priv->reg.power_mgmt <= 0); - - return 0; -} - -static int ks_wlan_get_iwstats(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - vwrq->qual.qual = 0; /* not supported */ - vwrq->qual.level = priv->wstats.qual.level; - vwrq->qual.noise = 0; /* not supported */ - vwrq->qual.updated = 0; - - return 0; -} - -/* Note : this is deprecated in favor of IWSCAN */ -static int ks_wlan_get_aplist(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct sockaddr *address = (struct sockaddr *)extra; - struct iw_quality qual[LOCAL_APLIST_MAX]; - int i; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - for (i = 0; i < priv->aplist.size; i++) { - ether_addr_copy(address[i].sa_data, priv->aplist.ap[i].bssid); - address[i].sa_family = ARPHRD_ETHER; - qual[i].level = 256 - priv->aplist.ap[i].rssi; - qual[i].qual = priv->aplist.ap[i].sq; - qual[i].noise = 0; /* invalid noise value */ - qual[i].updated = 7; - } - if (i) { - dwrq->data.flags = 1; /* Should be define'd */ - memcpy(extra + sizeof(struct sockaddr) * i, - &qual, sizeof(struct iw_quality) * i); - } - dwrq->data.length = i; - - return 0; -} - -static int ks_wlan_set_scan(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_scan_req *req = NULL; - int len; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* specified SSID SCAN */ - if (wrqu->data.length == sizeof(struct iw_scan_req) && - wrqu->data.flags & IW_SCAN_THIS_ESSID) { - req = (struct iw_scan_req *)extra; - len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); - priv->scan_ssid_len = len; - memcpy(priv->scan_ssid, req->essid, len); - } else { - priv->scan_ssid_len = 0; - } - - priv->sme_i.sme_flag |= SME_AP_SCAN; - hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST); - - /* At this point, just return to the user. */ - - return 0; -} - -static char *ks_wlan_add_leader_event(const char *rsn_leader, char *end_buf, - char *current_ev, struct rsn_ie *rsn, - struct iw_event *iwe, - struct iw_request_info *info) -{ - char buffer[RSN_IE_BODY_MAX * 2 + 30]; - char *pbuf; - int i; - - pbuf = &buffer[0]; - memset(iwe, 0, sizeof(*iwe)); - iwe->cmd = IWEVCUSTOM; - memcpy(buffer, rsn_leader, sizeof(rsn_leader) - 1); - iwe->u.data.length += sizeof(rsn_leader) - 1; - pbuf += sizeof(rsn_leader) - 1; - pbuf += sprintf(pbuf, "%02x", rsn->id); - pbuf += sprintf(pbuf, "%02x", rsn->size); - iwe->u.data.length += 4; - - for (i = 0; i < rsn->size; i++) - pbuf += sprintf(pbuf, "%02x", rsn->body[i]); - - iwe->u.data.length += rsn->size * 2; - - return iwe_stream_add_point(info, current_ev, end_buf, iwe, &buffer[0]); -} - -/* - * Translate scan data returned from the card to a card independent - * format that the Wireless Tools will understand - Jean II - */ -static inline char *ks_wlan_translate_scan(struct net_device *dev, - struct iw_request_info *info, - char *current_ev, char *end_buf, - struct local_ap *ap) -{ - /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */ - static const char rsn_leader[] = "rsn_ie="; - static const char wpa_leader[] = "wpa_ie="; - struct iw_event iwe; /* Temporary buffer */ - u16 capabilities; - char *current_val; /* For rates */ - int i; - - /* First entry *MUST* be the AP MAC address */ - iwe.cmd = SIOCGIWAP; - iwe.u.ap_addr.sa_family = ARPHRD_ETHER; - ether_addr_copy(iwe.u.ap_addr.sa_data, ap->bssid); - current_ev = iwe_stream_add_event(info, current_ev, - end_buf, &iwe, IW_EV_ADDR_LEN); - - /* Other entries will be displayed in the order we give them */ - - /* Add the ESSID */ - iwe.u.data.length = ap->ssid.size; - if (iwe.u.data.length > 32) - iwe.u.data.length = 32; - iwe.cmd = SIOCGIWESSID; - iwe.u.data.flags = 1; - current_ev = iwe_stream_add_point(info, current_ev, - end_buf, &iwe, ap->ssid.body); - - /* Add mode */ - iwe.cmd = SIOCGIWMODE; - capabilities = ap->capability; - if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { - iwe.u.mode = (capabilities & WLAN_CAPABILITY_ESS) ? - IW_MODE_INFRA : IW_MODE_ADHOC; - current_ev = iwe_stream_add_event(info, current_ev, - end_buf, &iwe, IW_EV_UINT_LEN); - } - - /* Add frequency */ - iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = ap->channel; - iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000; - iwe.u.freq.e = 1; - current_ev = iwe_stream_add_event(info, current_ev, - end_buf, &iwe, IW_EV_FREQ_LEN); - - /* Add quality statistics */ - iwe.cmd = IWEVQUAL; - iwe.u.qual.level = 256 - ap->rssi; - iwe.u.qual.qual = ap->sq; - iwe.u.qual.noise = 0; /* invalid noise value */ - current_ev = iwe_stream_add_event(info, current_ev, end_buf, - &iwe, IW_EV_QUAL_LEN); - - /* Add encryption capability */ - iwe.cmd = SIOCGIWENCODE; - iwe.u.data.flags = (capabilities & WLAN_CAPABILITY_PRIVACY) ? - (IW_ENCODE_ENABLED | IW_ENCODE_NOKEY) : - IW_ENCODE_DISABLED; - iwe.u.data.length = 0; - current_ev = iwe_stream_add_point(info, current_ev, end_buf, - &iwe, ap->ssid.body); - - /* - * Rate : stuffing multiple values in a single event - * require a bit more of magic - Jean II - */ - current_val = current_ev + IW_EV_LCP_LEN; - - iwe.cmd = SIOCGIWRATE; - - /* These two flags are ignored... */ - iwe.u.bitrate.fixed = 0; - iwe.u.bitrate.disabled = 0; - - /* Max 16 values */ - for (i = 0; i < 16; i++) { - /* NULL terminated */ - if (i >= ap->rate_set.size) - break; - /* Bit rate given in 500 kb/s units (+ 0x80) */ - iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000); - /* Add new value to event */ - current_val = iwe_stream_add_value(info, current_ev, - current_val, end_buf, &iwe, - IW_EV_PARAM_LEN); - } - /* Check if we added any event */ - if ((current_val - current_ev) > IW_EV_LCP_LEN) - current_ev = current_val; - - if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0) - current_ev = ks_wlan_add_leader_event(rsn_leader, end_buf, - current_ev, &ap->rsn_ie, - &iwe, info); - - if (ap->wpa_ie.id == WPA_INFO_ELEM_ID && ap->wpa_ie.size != 0) - current_ev = ks_wlan_add_leader_event(wpa_leader, end_buf, - current_ev, &ap->wpa_ie, - &iwe, info); - - /* - * The other data in the scan result are not really - * interesting, so for now drop it - Jean II - */ - return current_ev; -} - -static int ks_wlan_get_scan(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int i; - char *current_ev = extra; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (priv->sme_i.sme_flag & SME_AP_SCAN) - return -EAGAIN; - - if (priv->aplist.size == 0) { - /* Client error, no scan results... - * The caller need to restart the scan. - */ - return -ENODATA; - } - - /* Read and parse all entries */ - for (i = 0; i < priv->aplist.size; i++) { - if ((extra + dwrq->data.length) - current_ev <= IW_EV_ADDR_LEN) { - dwrq->data.length = 0; - return -E2BIG; - } - /* Translate to WE format this entry */ - current_ev = ks_wlan_translate_scan(dev, info, current_ev, - extra + dwrq->data.length, - &priv->aplist.ap[i]); - } - /* Length of data */ - dwrq->data.length = (current_ev - extra); - dwrq->data.flags = 0; - - return 0; -} - -/* called after a bunch of SET operations */ -static int ks_wlan_config_commit(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *zwrq, - char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (!priv->need_commit) - return 0; - - ks_wlan_setup_parameter(priv, priv->need_commit); - priv->need_commit = 0; - return 0; -} - -/* set association ie params */ -static int ks_wlan_set_genie(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - return 0; -// return -EOPNOTSUPP; -} - -static int ks_wlan_set_auth_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_param *param = &vwrq->param; - int index = (param->flags & IW_AUTH_INDEX); - int value = param->value; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - switch (index) { - case IW_AUTH_WPA_VERSION: /* 0 */ - switch (value) { - case IW_AUTH_WPA_VERSION_DISABLED: - priv->wpa.version = value; - if (priv->wpa.rsn_enabled) - priv->wpa.rsn_enabled = false; - priv->need_commit |= SME_RSN; - break; - case IW_AUTH_WPA_VERSION_WPA: - case IW_AUTH_WPA_VERSION_WPA2: - priv->wpa.version = value; - if (!(priv->wpa.rsn_enabled)) - priv->wpa.rsn_enabled = true; - priv->need_commit |= SME_RSN; - break; - default: - return -EOPNOTSUPP; - } - break; - case IW_AUTH_CIPHER_PAIRWISE: /* 1 */ - switch (value) { - case IW_AUTH_CIPHER_NONE: - if (priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x00; - priv->need_commit |= SME_WEP_FLAG; - } - break; - case IW_AUTH_CIPHER_WEP40: - case IW_AUTH_CIPHER_TKIP: - case IW_AUTH_CIPHER_CCMP: - case IW_AUTH_CIPHER_WEP104: - if (!priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x01; - priv->need_commit |= SME_WEP_FLAG; - } - priv->wpa.pairwise_suite = value; - priv->need_commit |= SME_RSN_UNICAST; - break; - default: - return -EOPNOTSUPP; - } - break; - case IW_AUTH_CIPHER_GROUP: /* 2 */ - switch (value) { - case IW_AUTH_CIPHER_NONE: - if (priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x00; - priv->need_commit |= SME_WEP_FLAG; - } - break; - case IW_AUTH_CIPHER_WEP40: - case IW_AUTH_CIPHER_TKIP: - case IW_AUTH_CIPHER_CCMP: - case IW_AUTH_CIPHER_WEP104: - if (!priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x01; - priv->need_commit |= SME_WEP_FLAG; - } - priv->wpa.group_suite = value; - priv->need_commit |= SME_RSN_MULTICAST; - break; - default: - return -EOPNOTSUPP; - } - break; - case IW_AUTH_KEY_MGMT: /* 3 */ - switch (value) { - case IW_AUTH_KEY_MGMT_802_1X: - case IW_AUTH_KEY_MGMT_PSK: - case 0: /* NONE or 802_1X_NO_WPA */ - case 4: /* WPA_NONE */ - priv->wpa.key_mgmt_suite = value; - priv->need_commit |= SME_RSN_AUTH; - break; - default: - return -EOPNOTSUPP; - } - break; - case IW_AUTH_80211_AUTH_ALG: /* 6 */ - switch (value) { - case IW_AUTH_ALG_OPEN_SYSTEM: - priv->wpa.auth_alg = value; - priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM; - break; - case IW_AUTH_ALG_SHARED_KEY: - priv->wpa.auth_alg = value; - priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY; - break; - case IW_AUTH_ALG_LEAP: - default: - return -EOPNOTSUPP; - } - priv->need_commit |= SME_MODE_SET; - break; - case IW_AUTH_WPA_ENABLED: /* 7 */ - priv->wpa.wpa_enabled = value; - break; - case IW_AUTH_PRIVACY_INVOKED: /* 10 */ - if ((value && !priv->reg.privacy_invoked) || - (!value && priv->reg.privacy_invoked)) { - priv->reg.privacy_invoked = value ? 0x01 : 0x00; - priv->need_commit |= SME_WEP_FLAG; - } - break; - case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */ - case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */ - case IW_AUTH_DROP_UNENCRYPTED: /* 8 */ - case IW_AUTH_ROAMING_CONTROL: /* 9 */ - default: - break; - } - - /* return -EINPROGRESS; */ - if (priv->need_commit) { - ks_wlan_setup_parameter(priv, priv->need_commit); - priv->need_commit = 0; - } - return 0; -} - -static int ks_wlan_get_auth_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *vwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_param *param = &vwrq->param; - int index = (param->flags & IW_AUTH_INDEX); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* WPA (not used ?? wpa_supplicant) */ - switch (index) { - case IW_AUTH_WPA_VERSION: - param->value = priv->wpa.version; - break; - case IW_AUTH_CIPHER_PAIRWISE: - param->value = priv->wpa.pairwise_suite; - break; - case IW_AUTH_CIPHER_GROUP: - param->value = priv->wpa.group_suite; - break; - case IW_AUTH_KEY_MGMT: - param->value = priv->wpa.key_mgmt_suite; - break; - case IW_AUTH_80211_AUTH_ALG: - param->value = priv->wpa.auth_alg; - break; - case IW_AUTH_WPA_ENABLED: - param->value = priv->wpa.rsn_enabled; - break; - case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */ - case IW_AUTH_TKIP_COUNTERMEASURES: - case IW_AUTH_DROP_UNENCRYPTED: - default: - /* return -EOPNOTSUPP; */ - break; - } - return 0; -} - -/* set encoding token & mode (WPA)*/ -static int ks_wlan_set_encode_ext(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_encode_ext *enc; - int index = dwrq->encoding.flags & IW_ENCODE_INDEX; - unsigned int commit = 0; - struct wpa_key *key; - - enc = (struct iw_encode_ext *)extra; - if (!enc) - return -EINVAL; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (index < 1 || index > 4) - return -EINVAL; - index--; - key = &priv->wpa.key[index]; - - if (dwrq->encoding.flags & IW_ENCODE_DISABLED) - key->key_len = 0; - - key->ext_flags = enc->ext_flags; - if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { - priv->wpa.txkey = index; - commit |= SME_WEP_INDEX; - } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { - memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE); - } - - ether_addr_copy(&key->addr.sa_data[0], &enc->addr.sa_data[0]); - - switch (enc->alg) { - case IW_ENCODE_ALG_NONE: - if (priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x00; - commit |= SME_WEP_FLAG; - } - key->key_len = 0; - - break; - case IW_ENCODE_ALG_WEP: - case IW_ENCODE_ALG_CCMP: - if (!priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x01; - commit |= SME_WEP_FLAG; - } - if (enc->key_len) { - int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX); - - memcpy(&key->key_val[0], &enc->key[0], key_len); - key->key_len = key_len; - commit |= (SME_WEP_VAL1 << index); - } - break; - case IW_ENCODE_ALG_TKIP: - if (!priv->reg.privacy_invoked) { - priv->reg.privacy_invoked = 0x01; - commit |= SME_WEP_FLAG; - } - if (enc->key_len == 32) { - memcpy(&key->key_val[0], &enc->key[0], enc->key_len - 16); - key->key_len = enc->key_len - 16; - if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */ - memcpy(&key->tx_mic_key[0], &enc->key[16], 8); - memcpy(&key->rx_mic_key[0], &enc->key[16], 8); - } else { - memcpy(&key->tx_mic_key[0], &enc->key[16], 8); - memcpy(&key->rx_mic_key[0], &enc->key[24], 8); - } - commit |= (SME_WEP_VAL1 << index); - } - break; - default: - return -EINVAL; - } - key->alg = enc->alg; - - if (commit) { - if (commit & SME_WEP_INDEX) - hostif_sme_enqueue(priv, SME_SET_TXKEY); - if (commit & SME_WEP_VAL_MASK) - hostif_sme_enqueue(priv, SME_SET_KEY1 + index); - if (commit & SME_WEP_FLAG) - hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST); - } - - return 0; -} - -/* get encoding token & mode (WPA)*/ -static int ks_wlan_get_encode_ext(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - /* WPA (not used ?? wpa_supplicant) - * struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; - * struct iw_encode_ext *enc; - * enc = (struct iw_encode_ext *)extra; - * int index = dwrq->flags & IW_ENCODE_INDEX; - * WPA (not used ?? wpa_supplicant) - */ - return 0; -} - -static int ks_wlan_set_pmksa(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_pmksa *pmksa; - int i; - struct pmk *pmk; - struct list_head *ptr; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (!extra) - return -EINVAL; - - pmksa = (struct iw_pmksa *)extra; - - switch (pmksa->cmd) { - case IW_PMKSA_ADD: - if (list_empty(&priv->pmklist.head)) { - for (i = 0; i < PMK_LIST_MAX; i++) { - pmk = &priv->pmklist.pmk[i]; - if (is_zero_ether_addr(pmk->bssid)) - break; - } - ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data); - memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN); - list_add(&pmk->list, &priv->pmklist.head); - priv->pmklist.size++; - break; - } - /* search cache data */ - list_for_each(ptr, &priv->pmklist.head) { - pmk = list_entry(ptr, struct pmk, list); - if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) { - memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN); - list_move(&pmk->list, &priv->pmklist.head); - break; - } - } - /* not find address. */ - if (ptr != &priv->pmklist.head) - break; - /* new cache data */ - if (priv->pmklist.size < PMK_LIST_MAX) { - for (i = 0; i < PMK_LIST_MAX; i++) { - pmk = &priv->pmklist.pmk[i]; - if (is_zero_ether_addr(pmk->bssid)) - break; - } - ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data); - memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN); - list_add(&pmk->list, &priv->pmklist.head); - priv->pmklist.size++; - } else { /* overwrite old cache data */ - pmk = list_entry(priv->pmklist.head.prev, struct pmk, - list); - ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data); - memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN); - list_move(&pmk->list, &priv->pmklist.head); - } - break; - case IW_PMKSA_REMOVE: - if (list_empty(&priv->pmklist.head)) - return -EINVAL; - /* search cache data */ - list_for_each(ptr, &priv->pmklist.head) { - pmk = list_entry(ptr, struct pmk, list); - if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) { - eth_zero_addr(pmk->bssid); - memset(pmk->pmkid, 0, IW_PMKID_LEN); - list_del_init(&pmk->list); - break; - } - } - /* not find address. */ - if (ptr == &priv->pmklist.head) - return 0; - break; - case IW_PMKSA_FLUSH: - memset(&priv->pmklist, 0, sizeof(priv->pmklist)); - INIT_LIST_HEAD(&priv->pmklist.head); - for (i = 0; i < PMK_LIST_MAX; i++) - INIT_LIST_HEAD(&priv->pmklist.pmk[i].list); - break; - default: - return -EINVAL; - } - - hostif_sme_enqueue(priv, SME_SET_PMKSA); - return 0; -} - -static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_statistics *wstats = &priv->wstats; - - if (!atomic_read(&update_phyinfo)) - return (priv->dev_state < DEVICE_STATE_READY) ? NULL : wstats; - - /* - * Packets discarded in the wireless adapter due to wireless - * specific problems - */ - wstats->discard.nwid = 0; /* Rx invalid nwid */ - wstats->discard.code = 0; /* Rx invalid crypt */ - wstats->discard.fragment = 0; /* Rx invalid frag */ - wstats->discard.retries = 0; /* Tx excessive retries */ - wstats->discard.misc = 0; /* Invalid misc */ - wstats->miss.beacon = 0; /* Missed beacon */ - - return wstats; -} - -static int ks_wlan_set_stop_request(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (!(uwrq->mode)) - return -EINVAL; - - hostif_sme_enqueue(priv, SME_STOP_REQUEST); - return 0; -} - -#include -static int ks_wlan_set_mlme(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *dwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct iw_mlme *mlme = (struct iw_mlme *)extra; - union iwreq_data uwrq; - - uwrq.mode = 1; - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (mlme->cmd != IW_MLME_DEAUTH && - mlme->cmd != IW_MLME_DISASSOC) - return -EOPNOTSUPP; - - if (mlme->cmd == IW_MLME_DEAUTH && - mlme->reason_code == WLAN_REASON_MIC_FAILURE) - return 0; - - return ks_wlan_set_stop_request(dev, NULL, &uwrq, NULL); -} - -static int ks_wlan_get_firmware_version(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct iw_point *dwrq = &uwrq->data; - struct ks_wlan_private *priv = netdev_priv(dev); - - dwrq->length = priv->version_size + 1; - strscpy(extra, priv->firmware_version, dwrq->length); - return 0; -} - -static int ks_wlan_set_preamble(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - if (uwrq->mode != LONG_PREAMBLE && uwrq->mode != SHORT_PREAMBLE) - return -EINVAL; - - priv->reg.preamble = uwrq->mode; - priv->need_commit |= SME_MODE_SET; - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_preamble(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - uwrq->mode = priv->reg.preamble; - return 0; -} - -static int ks_wlan_set_power_mgmt(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (uwrq->mode != POWER_MGMT_ACTIVE && - uwrq->mode != POWER_MGMT_SAVE1 && - uwrq->mode != POWER_MGMT_SAVE2) - return -EINVAL; - - if ((uwrq->mode == POWER_MGMT_SAVE1 || uwrq->mode == POWER_MGMT_SAVE2) && - (priv->reg.operation_mode != MODE_INFRASTRUCTURE)) - return -EINVAL; - - priv->reg.power_mgmt = uwrq->mode; - hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST); - - return 0; -} - -static int ks_wlan_get_power_mgmt(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* for SLEEP MODE */ - uwrq->mode = priv->reg.power_mgmt; - return 0; -} - -static int ks_wlan_set_scan_type(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - - if (uwrq->mode != ACTIVE_SCAN && uwrq->mode != PASSIVE_SCAN) - return -EINVAL; - - priv->reg.scan_type = uwrq->mode; - return 0; -} - -static int ks_wlan_get_scan_type(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->reg.scan_type; - return 0; -} - -static int ks_wlan_set_beacon_lost(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (uwrq->mode > BEACON_LOST_COUNT_MAX) - return -EINVAL; - - priv->reg.beacon_lost_count = uwrq->mode; - - if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) { - priv->need_commit |= SME_MODE_SET; - return -EINPROGRESS; /* Call commit handler */ - } - - return 0; -} - -static int ks_wlan_get_beacon_lost(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->reg.beacon_lost_count; - return 0; -} - -static int ks_wlan_set_phy_type(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - if (uwrq->mode != D_11B_ONLY_MODE && - uwrq->mode != D_11G_ONLY_MODE && - uwrq->mode != D_11BG_COMPATIBLE_MODE) - return -EINVAL; - - /* for SLEEP MODE */ - priv->reg.phy_type = uwrq->mode; - priv->need_commit |= SME_MODE_SET; - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_phy_type(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->reg.phy_type; - return 0; -} - -static int ks_wlan_set_cts_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (uwrq->mode != CTS_MODE_FALSE && uwrq->mode != CTS_MODE_TRUE) - return -EINVAL; - - priv->reg.cts_mode = (uwrq->mode == CTS_MODE_FALSE) ? uwrq->mode : - (priv->reg.phy_type == D_11G_ONLY_MODE || - priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ? - uwrq->mode : !uwrq->mode; - - priv->need_commit |= SME_MODE_SET; - return -EINPROGRESS; /* Call commit handler */ -} - -static int ks_wlan_get_cts_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->reg.cts_mode; - return 0; -} - -static int ks_wlan_set_sleep_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (uwrq->mode != SLP_SLEEP && - uwrq->mode != SLP_ACTIVE) { - netdev_err(dev, "SET_SLEEP_MODE %d error\n", uwrq->mode); - return -EINVAL; - } - - priv->sleep_mode = uwrq->mode; - netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode); - - if (uwrq->mode == SLP_SLEEP) - hostif_sme_enqueue(priv, SME_STOP_REQUEST); - - hostif_sme_enqueue(priv, SME_SLEEP_REQUEST); - - return 0; -} - -static int ks_wlan_get_sleep_mode(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - uwrq->mode = priv->sleep_mode; - - return 0; -} - -static int ks_wlan_set_wps_enable(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (uwrq->mode != 0 && uwrq->mode != 1) - return -EINVAL; - - priv->wps.wps_enabled = uwrq->mode; - hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST); - - return 0; -} - -static int ks_wlan_get_wps_enable(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->wps.wps_enabled; - netdev_info(dev, "return=%d\n", uwrq->mode); - - return 0; -} - -static int ks_wlan_set_wps_probe_req(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct iw_point *dwrq = &uwrq->data; - u8 *p = extra; - unsigned char len; - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - - /* length check */ - if (p[1] + 2 != dwrq->length || dwrq->length > 256) - return -EINVAL; - - priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */ - len = p[1] + 2; /* IE header + IE */ - - memcpy(priv->wps.ie, &len, sizeof(len)); - p = memcpy(priv->wps.ie + 1, p, len); - - netdev_dbg(dev, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n", - priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3], - p[priv->wps.ielen - 3], p[priv->wps.ielen - 2], - p[priv->wps.ielen - 1]); - - hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST); - - return 0; -} - -static int ks_wlan_set_tx_gain(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (uwrq->mode > 0xFF) - return -EINVAL; - - priv->gain.tx_gain = (u8)uwrq->mode; - priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0; - hostif_sme_enqueue(priv, SME_SET_GAIN); - return 0; -} - -static int ks_wlan_get_tx_gain(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->gain.tx_gain; - hostif_sme_enqueue(priv, SME_GET_GAIN); - return 0; -} - -static int ks_wlan_set_rx_gain(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - if (uwrq->mode > 0xFF) - return -EINVAL; - - priv->gain.rx_gain = (u8)uwrq->mode; - priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0; - hostif_sme_enqueue(priv, SME_SET_GAIN); - return 0; -} - -static int ks_wlan_get_rx_gain(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->sleep_mode == SLP_SLEEP) - return -EPERM; - /* for SLEEP MODE */ - uwrq->mode = priv->gain.rx_gain; - hostif_sme_enqueue(priv, SME_GET_GAIN); - return 0; -} - -static int ks_wlan_get_eeprom_cksum(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - uwrq->mode = priv->eeprom_checksum; - return 0; -} - -static void print_hif_event(struct net_device *dev, int event) -{ - switch (event) { - case HIF_DATA_REQ: - netdev_info(dev, "HIF_DATA_REQ\n"); - break; - case HIF_DATA_IND: - netdev_info(dev, "HIF_DATA_IND\n"); - break; - case HIF_MIB_GET_REQ: - netdev_info(dev, "HIF_MIB_GET_REQ\n"); - break; - case HIF_MIB_GET_CONF: - netdev_info(dev, "HIF_MIB_GET_CONF\n"); - break; - case HIF_MIB_SET_REQ: - netdev_info(dev, "HIF_MIB_SET_REQ\n"); - break; - case HIF_MIB_SET_CONF: - netdev_info(dev, "HIF_MIB_SET_CONF\n"); - break; - case HIF_POWER_MGMT_REQ: - netdev_info(dev, "HIF_POWER_MGMT_REQ\n"); - break; - case HIF_POWER_MGMT_CONF: - netdev_info(dev, "HIF_POWER_MGMT_CONF\n"); - break; - case HIF_START_REQ: - netdev_info(dev, "HIF_START_REQ\n"); - break; - case HIF_START_CONF: - netdev_info(dev, "HIF_START_CONF\n"); - break; - case HIF_CONNECT_IND: - netdev_info(dev, "HIF_CONNECT_IND\n"); - break; - case HIF_STOP_REQ: - netdev_info(dev, "HIF_STOP_REQ\n"); - break; - case HIF_STOP_CONF: - netdev_info(dev, "HIF_STOP_CONF\n"); - break; - case HIF_PS_ADH_SET_REQ: - netdev_info(dev, "HIF_PS_ADH_SET_REQ\n"); - break; - case HIF_PS_ADH_SET_CONF: - netdev_info(dev, "HIF_PS_ADH_SET_CONF\n"); - break; - case HIF_INFRA_SET_REQ: - netdev_info(dev, "HIF_INFRA_SET_REQ\n"); - break; - case HIF_INFRA_SET_CONF: - netdev_info(dev, "HIF_INFRA_SET_CONF\n"); - break; - case HIF_ADH_SET_REQ: - netdev_info(dev, "HIF_ADH_SET_REQ\n"); - break; - case HIF_ADH_SET_CONF: - netdev_info(dev, "HIF_ADH_SET_CONF\n"); - break; - case HIF_AP_SET_REQ: - netdev_info(dev, "HIF_AP_SET_REQ\n"); - break; - case HIF_AP_SET_CONF: - netdev_info(dev, "HIF_AP_SET_CONF\n"); - break; - case HIF_ASSOC_INFO_IND: - netdev_info(dev, "HIF_ASSOC_INFO_IND\n"); - break; - case HIF_MIC_FAILURE_REQ: - netdev_info(dev, "HIF_MIC_FAILURE_REQ\n"); - break; - case HIF_MIC_FAILURE_CONF: - netdev_info(dev, "HIF_MIC_FAILURE_CONF\n"); - break; - case HIF_SCAN_REQ: - netdev_info(dev, "HIF_SCAN_REQ\n"); - break; - case HIF_SCAN_CONF: - netdev_info(dev, "HIF_SCAN_CONF\n"); - break; - case HIF_PHY_INFO_REQ: - netdev_info(dev, "HIF_PHY_INFO_REQ\n"); - break; - case HIF_PHY_INFO_CONF: - netdev_info(dev, "HIF_PHY_INFO_CONF\n"); - break; - case HIF_SLEEP_REQ: - netdev_info(dev, "HIF_SLEEP_REQ\n"); - break; - case HIF_SLEEP_CONF: - netdev_info(dev, "HIF_SLEEP_CONF\n"); - break; - case HIF_PHY_INFO_IND: - netdev_info(dev, "HIF_PHY_INFO_IND\n"); - break; - case HIF_SCAN_IND: - netdev_info(dev, "HIF_SCAN_IND\n"); - break; - case HIF_INFRA_SET2_REQ: - netdev_info(dev, "HIF_INFRA_SET2_REQ\n"); - break; - case HIF_INFRA_SET2_CONF: - netdev_info(dev, "HIF_INFRA_SET2_CONF\n"); - break; - case HIF_ADH_SET2_REQ: - netdev_info(dev, "HIF_ADH_SET2_REQ\n"); - break; - case HIF_ADH_SET2_CONF: - netdev_info(dev, "HIF_ADH_SET2_CONF\n"); - } -} - -/* get host command history */ -static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - int i, event; - struct ks_wlan_private *priv = netdev_priv(dev); - - for (i = 63; i >= 0; i--) { - event = - priv->hostt.buff[(priv->hostt.qtail - 1 - i) % - SME_EVENT_BUFF_SIZE]; - print_hif_event(dev, event); - } - return 0; -} - -/* Structures to export the Wireless Handlers */ - -static const struct iw_priv_args ks_wlan_private_args[] = { -/*{ cmd, set_args, get_args, name[16] } */ - {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"}, - {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetWPSEnable"}, - {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"}, - {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE, - "SetWPSProbeReq"}, - {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetPreamble"}, - {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"}, - {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetPowerSave"}, - {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"}, - {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetScanType"}, - {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"}, - {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetRxGain"}, - {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"}, - {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1), - "hostt"}, - {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetBeaconLost"}, - {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"}, - {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetSleepMode"}, - {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"}, - {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetTxGain"}, - {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"}, - {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetPhyType"}, - {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"}, - {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_NONE, "SetCtsMode"}, - {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"}, - {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"}, -}; - -static const iw_handler ks_wlan_handler[] = { - IW_HANDLER(SIOCSIWCOMMIT, ks_wlan_config_commit), - IW_HANDLER(SIOCGIWNAME, ks_wlan_get_name), - IW_HANDLER(SIOCSIWFREQ, ks_wlan_set_freq), - IW_HANDLER(SIOCGIWFREQ, ks_wlan_get_freq), - IW_HANDLER(SIOCSIWMODE, ks_wlan_set_mode), - IW_HANDLER(SIOCGIWMODE, ks_wlan_get_mode), - IW_HANDLER(SIOCGIWRANGE, ks_wlan_get_range), - IW_HANDLER(SIOCGIWSTATS, ks_wlan_get_iwstats), - IW_HANDLER(SIOCSIWAP, ks_wlan_set_wap), - IW_HANDLER(SIOCGIWAP, ks_wlan_get_wap), - IW_HANDLER(SIOCSIWMLME, ks_wlan_set_mlme), - IW_HANDLER(SIOCGIWAPLIST, ks_wlan_get_aplist), - IW_HANDLER(SIOCSIWSCAN, ks_wlan_set_scan), - IW_HANDLER(SIOCGIWSCAN, ks_wlan_get_scan), - IW_HANDLER(SIOCSIWESSID, ks_wlan_set_essid), - IW_HANDLER(SIOCGIWESSID, ks_wlan_get_essid), - IW_HANDLER(SIOCSIWNICKN, ks_wlan_set_nick), - IW_HANDLER(SIOCGIWNICKN, ks_wlan_get_nick), - IW_HANDLER(SIOCSIWRATE, ks_wlan_set_rate), - IW_HANDLER(SIOCGIWRATE, ks_wlan_get_rate), - IW_HANDLER(SIOCSIWRTS, ks_wlan_set_rts), - IW_HANDLER(SIOCGIWRTS, ks_wlan_get_rts), - IW_HANDLER(SIOCSIWFRAG, ks_wlan_set_frag), - IW_HANDLER(SIOCGIWFRAG, ks_wlan_get_frag), - IW_HANDLER(SIOCSIWENCODE, ks_wlan_set_encode), - IW_HANDLER(SIOCGIWENCODE, ks_wlan_get_encode), - IW_HANDLER(SIOCSIWPOWER, ks_wlan_set_power), - IW_HANDLER(SIOCGIWPOWER, ks_wlan_get_power), - IW_HANDLER(SIOCSIWGENIE, ks_wlan_set_genie), - IW_HANDLER(SIOCSIWAUTH, ks_wlan_set_auth_mode), - IW_HANDLER(SIOCGIWAUTH, ks_wlan_get_auth_mode), - IW_HANDLER(SIOCSIWENCODEEXT, ks_wlan_set_encode_ext), - IW_HANDLER(SIOCGIWENCODEEXT, ks_wlan_get_encode_ext), - IW_HANDLER(SIOCSIWPMKSA, ks_wlan_set_pmksa), -}; - -/* private_handler */ -static const iw_handler ks_wlan_private_handler[] = { - NULL, /* 0 */ - NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */ - NULL, /* 2 */ - ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */ - ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */ - ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */ - ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */ - ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */ - ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */ - ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */ - ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */ - ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */ - ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */ - ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */ - ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */ - ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */ - ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */ - NULL, /* 17 */ - ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */ - ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */ - ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */ - ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */ - ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */ - ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */ - ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */ - ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */ - NULL, /* 26 */ - NULL, /* 27 */ - ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */ - ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */ - NULL, /* 30 */ - NULL, /* 31 */ -}; - -static const struct iw_handler_def ks_wlan_handler_def = { - .num_standard = ARRAY_SIZE(ks_wlan_handler), - .num_private = ARRAY_SIZE(ks_wlan_private_handler), - .num_private_args = ARRAY_SIZE(ks_wlan_private_args), - .standard = ks_wlan_handler, - .private = ks_wlan_private_handler, - .private_args = ks_wlan_private_args, - .get_wireless_stats = ks_get_wireless_stats, -}; - -static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq, - int cmd) -{ - int ret; - struct iwreq *wrq = (struct iwreq *)rq; - - switch (cmd) { - case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */ - ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u, NULL); - break; - // All other calls are currently unsupported - default: - ret = -EOPNOTSUPP; - } - - return ret; -} - -static -struct net_device_stats *ks_wlan_get_stats(struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->dev_state < DEVICE_STATE_READY) - return NULL; /* not finished initialize */ - - return &priv->nstats; -} - -static -int ks_wlan_set_mac_address(struct net_device *dev, void *addr) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - struct sockaddr *mac_addr = (struct sockaddr *)addr; - - if (netif_running(dev)) - return -EBUSY; - eth_hw_addr_set(dev, mac_addr->sa_data); - ether_addr_copy(priv->eth_addr, mac_addr->sa_data); - - priv->mac_address_valid = false; - hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST); - netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr); - return 0; -} - -static -void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - netdev_dbg(dev, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead, - priv->tx_dev.qtail); - if (!netif_queue_stopped(dev)) - netif_stop_queue(dev); - priv->nstats.tx_errors++; - netif_wake_queue(dev); -} - -static -netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - int ret; - - netdev_dbg(dev, "in_interrupt()=%ld\n", in_interrupt()); - - if (!skb) { - netdev_err(dev, "ks_wlan: skb == NULL!!!\n"); - return 0; - } - if (priv->dev_state < DEVICE_STATE_READY) { - dev_kfree_skb(skb); - return 0; /* not finished initialize */ - } - - if (netif_running(dev)) - netif_stop_queue(dev); - - ret = hostif_data_request(priv, skb); - netif_trans_update(dev); - - if (ret) - netdev_err(dev, "hostif_data_request error: =%d\n", ret); - - return 0; -} - -void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb) -{ - priv->nstats.tx_packets++; - - if (netif_queue_stopped(priv->net_dev)) - netif_wake_queue(priv->net_dev); - - if (skb) { - priv->nstats.tx_bytes += skb->len; - dev_kfree_skb(skb); - } -} - -/* - * Set or clear the multicast filter for this adaptor. - * This routine is not state sensitive and need not be SMP locked. - */ -static -void ks_wlan_set_rx_mode(struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - if (priv->dev_state < DEVICE_STATE_READY) - return; /* not finished initialize */ - hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST); -} - -static -int ks_wlan_open(struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - priv->cur_rx = 0; - - if (!priv->mac_address_valid) { - netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name); - return -EBUSY; - } - netif_start_queue(dev); - - return 0; -} - -static -int ks_wlan_close(struct net_device *dev) -{ - netif_stop_queue(dev); - - return 0; -} - -/* Operational parameters that usually are not changed. */ -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (3 * HZ) -static const unsigned char dummy_addr[] = { - 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00 -}; - -static const struct net_device_ops ks_wlan_netdev_ops = { - .ndo_start_xmit = ks_wlan_start_xmit, - .ndo_open = ks_wlan_open, - .ndo_stop = ks_wlan_close, - .ndo_do_ioctl = ks_wlan_netdev_ioctl, - .ndo_set_mac_address = ks_wlan_set_mac_address, - .ndo_get_stats = ks_wlan_get_stats, - .ndo_tx_timeout = ks_wlan_tx_timeout, - .ndo_set_rx_mode = ks_wlan_set_rx_mode, -}; - -int ks_wlan_net_start(struct net_device *dev) -{ - struct ks_wlan_private *priv; - /* int rc; */ - - priv = netdev_priv(dev); - priv->mac_address_valid = false; - priv->is_device_open = true; - priv->need_commit = 0; - /* phy information update timer */ - atomic_set(&update_phyinfo, 0); - timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0); - - /* dummy address set */ - ether_addr_copy(priv->eth_addr, dummy_addr); - eth_hw_addr_set(dev, priv->eth_addr); - - /* The ks_wlan-specific entries in the device structure. */ - dev->netdev_ops = &ks_wlan_netdev_ops; - dev->wireless_handlers = &ks_wlan_handler_def; - dev->watchdog_timeo = TX_TIMEOUT; - - netif_carrier_off(dev); - - return 0; -} - -int ks_wlan_net_stop(struct net_device *dev) -{ - struct ks_wlan_private *priv = netdev_priv(dev); - - priv->is_device_open = false; - del_timer_sync(&update_phyinfo_timer); - - if (netif_running(dev)) - netif_stop_queue(dev); - - return 0; -} - -/** - * is_connect_status() - return true if status is 'connected' - * @status: high bit is used as FORCE_DISCONNECT, low bits used for - * connect status. - */ -bool is_connect_status(u32 status) -{ - return (status & CONNECT_STATUS_MASK) == CONNECT_STATUS; -} - -/** - * is_disconnect_status() - return true if status is 'disconnected' - * @status: high bit is used as FORCE_DISCONNECT, low bits used for - * disconnect status. - */ -bool is_disconnect_status(u32 status) -{ - return (status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS; -} diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h index fefbe3cd08f33d..4cfe9a0e0d56fa 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp.h @@ -200,7 +200,7 @@ struct atomisp_dis_vector { }; /* DVS 2.0 Coefficient types. This structure contains 4 pointers to - * arrays that contain the coeffients for each type. + * arrays that contain the coefficients for each type. */ struct atomisp_dvs2_coef_types { short __user *odd_real; /** real part of the odd coefficients*/ @@ -698,7 +698,7 @@ enum atomisp_burst_capture_options { /* Digital Image Stabilization: * 1. get dis statistics: reads DIS statistics from ISP (every frame) * 2. set dis coefficients: set DIS filter coefficients (one time) - * 3. set dis motion vecotr: set motion vector (result of DIS, every frame) + * 3. set dis motion vector: set motion vector (result of DIS, every frame) */ #define ATOMISP_IOC_G_DIS_STAT \ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics) diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h index fdeb247036b06f..064449fd51afc1 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h @@ -116,7 +116,7 @@ struct intel_v4l2_subdev_table { }; /* - * Sensor of external ISP can send multiple steams with different mipi data + * Sensor of external ISP can send multiple streams with different mipi data * type in the same virtual channel. This information needs to come from the * sensor or external ISP */ @@ -138,7 +138,7 @@ struct atomisp_input_stream_info { /* * if more isys_configs is more than 0, sensor needs to configure the * input format differently. width and height can be 0. If width and - * height is not zero, then the corresponsing data needs to be set + * height is not zero, then the corresponding data needs to be set */ struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL]; }; @@ -175,8 +175,6 @@ int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes, enum atomisp_bayer_order bayer_order); void atomisp_unregister_subdev(struct v4l2_subdev *subdev); -int v4l2_get_acpi_sensor_info(struct device *dev, char **module_id_str); - /* API from old platform_camera.h, new CPUID implementation */ #define __IS_SOC(x) (boot_cpu_data.x86_vfm == x) #define __IS_SOCS(x, y) (boot_cpu_data.x86_vfm == x || boot_cpu_data.x86_vfm == y) diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c index d789d38ef68993..6abda358a72f76 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c +++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c @@ -109,6 +109,8 @@ static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = { static struct gmin_cfg_var xiaomi_mipad2_vars[] = { /* _DSM contains the wrong CsiPort for the front facing OV5693 sensor */ { "INT33BE:00", "CsiPort", "0" }, + /* _DSM contains the wrong CsiLanes for the back facing T4KA3 sensor */ + { "XMCC0003:00", "CsiLanes", "4" }, {} }; diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index 50c4123ba00664..b180fcbea9b1e6 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -441,6 +441,8 @@ const struct vb2_ops atomisp_vb2_ops = { .buf_queue = atomisp_buf_queue, .start_streaming = atomisp_start_streaming, .stop_streaming = atomisp_stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, }; static void atomisp_dev_init_struct(struct atomisp_device *isp) diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index 3a3e84a035e262..202497695e46fb 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -797,12 +797,12 @@ static int atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, pipe->vb_queue.ops = &atomisp_vb2_ops; pipe->vb_queue.mem_ops = &vb2_vmalloc_memops; pipe->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + pipe->vb_queue.lock = &pipe->vb_queue_mutex; ret = vb2_queue_init(&pipe->vb_queue); if (ret) return ret; pipe->vdev.queue = &pipe->vb_queue; - pipe->vdev.queue->lock = &pipe->vb_queue_mutex; INIT_LIST_HEAD(&pipe->buffers_in_css); INIT_LIST_HEAD(&pipe->activeq); diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h index d0ba59cedc921f..6f0a8fe868bd79 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h @@ -20,8 +20,10 @@ #include "vmem_global.h" typedef u16 t_vmem_elem; +typedef s16 t_svmem_elem; -#define VMEM_ARRAY(x, s) t_vmem_elem x[s / ISP_NWAY][ISP_NWAY] +#define VMEM_ARRAY(x, s) t_vmem_elem x[(s) / ISP_NWAY][ISP_NWAY] +#define SVMEM_ARRAY(x, s) t_svmem_elem x[(s) / ISP_NWAY][ISP_NWAY] void isp_vmem_load( const isp_ID_t ID, diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h index d294ac402de86b..c5ab13511db83a 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h @@ -27,7 +27,8 @@ * #define assert(cnd) BUG_ON(cnd) * but that causes many compiler warnings (==errors) under Android * because it seems that the BUG_ON() macro is not seen as a check by - * gcc like the BUG() macro is. */ + * gcc like the BUG() macro is. + */ #define assert(cnd) \ do { \ if (!(cnd)) \ @@ -37,7 +38,8 @@ #ifndef PIPE_GENERATION /* Deprecated OP___assert, this is still used in ~1000 places * in the code. This will be removed over time. - * The implementation for the pipe generation tool is in see support.isp.h */ + * The implementation for the pipe generation tool is in see support.isp.h + */ #define OP___assert(cnd) assert(cnd) static inline void compile_time_assert(unsigned int cond) diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h index 693154e8ec2f91..7e37f08090342a 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h @@ -94,7 +94,7 @@ hrt_data csi_rx_fe_ctrl_reg_load( const hrt_address reg); /** * @brief Store a value to the register. - * Store a value to the registe of the csi rx fe. + * Store a value to the register of the csi rx fe. * * @param[in] ID The global unique ID for the ibuf-controller instance. * @param[in] reg The offset address of the register. @@ -119,7 +119,7 @@ hrt_data csi_rx_be_ctrl_reg_load( const hrt_address reg); /** * @brief Store a value to the register. - * Store a value to the registe of the csi rx be. + * Store a value to the register of the csi rx be. * * @param[in] ID The global unique ID for the ibuf-controller instance. * @param[in] reg The offset address of the register. diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h index 160c496784b7fc..907f9ebcc60db4 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h @@ -28,12 +28,6 @@ #define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1) >> (b)) #define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) -#if !defined(PIPE_GENERATION) - -#define ceil_div(a, b) (CEIL_DIV(a, b)) - -#endif /* !defined(PIPE_GENERATION) */ - /* * For SP and ISP, SDK provides the definition of OP_std_modadd. * We need it only for host diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index 3e2899ad85174f..e8c5d728fd55be 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -204,9 +204,6 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, goto bind_err; } - dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n", - bo->start, bytes, type, vmalloc_noprof); - return bo->start; bind_err: @@ -231,8 +228,6 @@ void hmm_free(ia_css_ptr virt) { struct hmm_buffer_object *bo; - dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt); - if (WARN_ON(virt == mmgr_EXCEPTION)) return; diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c index 457a004e194d0f..b75cfd3096d8c3 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c @@ -45,7 +45,8 @@ ia_css_bnr_dump( const struct sh_css_isp_bnr_params *bnr, unsigned int level) { - if (!bnr) return; + if (!bnr) + return; ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_gain_all", bnr->gain_all); diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c index 25e3f0822fb874..e66faeda361347 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c @@ -47,7 +47,8 @@ ia_css_de_dump( const struct sh_css_isp_de_params *de, unsigned int level) { - if (!de) return; + if (!de) + return; ia_css_debug_dtrace(level, "Demosaic:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "de_pixelnoise", de->pixelnoise); diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c index e4fc90f88e243d..b79d78e5b77f5b 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c @@ -172,25 +172,21 @@ ia_css_eed1_8_vmem_encode( base = shuffle_block * i; for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { - to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_x[j], 0), - 8191); - to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_y[j], -8192), - 8191); + to->e_dew_enh_x[0][base + j] = clamp(from->dew_enhance_seg_x[j], + 0, 8191); + to->e_dew_enh_y[0][base + j] = clamp(from->dew_enhance_seg_y[j], + -8192, 8191); } for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) { - to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int, - from->dew_enhance_seg_slope[j], - -8192), 8191); + to->e_dew_enh_a[0][base + j] = clamp(from->dew_enhance_seg_slope[j], + -8192, 8191); /* Convert dew_enhance_seg_exp to flag: * 0 -> 0 * 1...13 -> 1 */ - to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int, - from->dew_enhance_seg_exp[j], - 0), 13) > 0); + to->e_dew_enh_f[0][base + j] = clamp(from->dew_enhance_seg_exp[j], + 0, 13) > 0; } /* Hard-coded to 0, in order to be able to handle out of @@ -276,7 +272,7 @@ ia_css_eed1_8_encode( for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) { min_exp = max(min_exp, from->dew_enhance_seg_exp[i]); } - to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13); + to->e_dew_enh_asr = 13 - clamp(min_exp, 0, 13); to->dedgew_max = from->dedgew_max; } diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h index 6fb3b38f49e762..b9eeeb592ec818 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h +++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h @@ -94,8 +94,8 @@ struct eed1_8_vmem_params { VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS); - VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS); - VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS); + SVMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS); + SVMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS); VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS); VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS); VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS); diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c index 57b5e11e1cfe5e..8ccfa99c61efa2 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c @@ -43,7 +43,8 @@ ia_css_fpn_dump( const struct sh_css_isp_fpn_params *fpn, unsigned int level) { - if (!fpn) return; + if (!fpn) + return; ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fpn_shift", fpn->shift); diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c index 0091e2a3da5260..c32659894c29d5 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c @@ -13,9 +13,11 @@ * more details. */ +#include +#include + #include "ia_css_bayer_io.host.h" #include "dma.h" -#include "math_support.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif @@ -29,9 +31,8 @@ int ia_css_bayer_io_config(const struct ia_css_binary *binary, const struct ia_css_frame **out_frames = (const struct ia_css_frame **) &args->out_frame; const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame); - const unsigned int ddr_bits_per_element = sizeof(short) * 8; - const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, - ddr_bits_per_element); + const unsigned int ddr_elems_per_word = + DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short)); unsigned int size_get = 0, size_put = 0; unsigned int offset = 0; int ret; diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c index 32c504a950cef1..5b2d5023b5eeed 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c @@ -13,9 +13,11 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +#include +#include + #include "ia_css_yuv444_io.host.h" #include "dma.h" -#include "math_support.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif @@ -29,9 +31,8 @@ int ia_css_yuv444_io_config(const struct ia_css_binary *binary, const struct ia_css_frame **out_frames = (const struct ia_css_frame **) &args->out_frame; const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame); - const unsigned int ddr_bits_per_element = sizeof(short) * 8; - const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, - ddr_bits_per_element); + const unsigned int ddr_elems_per_word = + DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short)); unsigned int size_get = 0, size_put = 0; unsigned int offset = 0; int ret; diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c index 5f186fb036426f..15386a773dc510 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c @@ -65,8 +65,7 @@ int ia_css_iterator_configure(const struct ia_css_binary *binary, * the original out res. for video pipe, it has two output pins --- out and * vf_out, so it can keep these two resolutions already. */ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW && - binary->vf_downscale_log2 > 0) - { + binary->vf_downscale_log2 > 0) { /* TODO: Remove this after preview output decimation is fixed * by configuring out&vf info files properly */ my_info.padded_width <<= binary->vf_downscale_log2; diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c index 70132d955e9bce..def2c8fb4b389d 100644 --- a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c +++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c @@ -108,7 +108,7 @@ compute_coring(int coring) * factor. Clip to [0, isp_scale-1). */ isp_coring = ((coring * isp_scale) + offset) / host_scale; - return min(max(isp_coring, 0), isp_scale - 1); + return clamp(isp_coring, 0, isp_scale - 1); } /* @@ -168,15 +168,15 @@ ia_css_xnr3_encode( to->alpha.y0 = alpha_y0; to->alpha.u0 = alpha_u0; to->alpha.v0 = alpha_v0; - to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff); - to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff); - to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff); + to->alpha.ydiff = clamp(alpha_ydiff, min_diff, max_diff); + to->alpha.udiff = clamp(alpha_udiff, min_diff, max_diff); + to->alpha.vdiff = clamp(alpha_vdiff, min_diff, max_diff); /* coring parameters are expressed in q1.NN format */ to->coring.u0 = coring_u0; to->coring.v0 = coring_v0; - to->coring.udiff = min(max(coring_udiff, min_diff), max_diff); - to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff); + to->coring.udiff = clamp(coring_udiff, min_diff, max_diff); + to->coring.vdiff = clamp(coring_vdiff, min_diff, max_diff); /* blending strength is expressed in q1.NN format */ to->blending.strength = blending; diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c index b0f904a5e442ee..7ce2b2d6da11ae 100644 --- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c +++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c @@ -328,7 +328,8 @@ ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary, dvs_info = &info->dvs_grid.dvs_grid_info; - /* for DIS, we use a division instead of a ceil_div. If this is smaller + /* + * For DIS, we use a division instead of a DIV_ROUND_UP(). If this is smaller * than the 3a grid size, it indicates that the outer values are not * valid for DIS. */ @@ -923,8 +924,8 @@ ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo, return 0; } -static int __ia_css_binary_find(struct ia_css_binary_descr *descr, - struct ia_css_binary *binary) { +int ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary) +{ int mode; bool online; bool two_ppc; @@ -953,10 +954,8 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, /* MW: used after an error check, may accept NULL, but doubtfull */ assert(binary); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n", - descr, descr->mode, - binary); + dev_dbg(atomisp_dev, "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n", + descr, descr->mode, binary); mode = descr->mode; online = descr->online; @@ -1001,15 +1000,15 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, } /* print a map of the binary file */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n"); + dev_dbg(atomisp_dev, "BINARY INFO:\n"); for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) { xcandidate = binary_infos[i]; if (xcandidate) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i); + dev_dbg(atomisp_dev, "%d:\n", i); while (xcandidate) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n", - xcandidate->blob->name, xcandidate->type, - xcandidate->sp.enable.continuous); + dev_dbg(atomisp_dev, " Name:%s Type:%d Cont:%d\n", + xcandidate->blob->name, xcandidate->type, + xcandidate->sp.enable.continuous); xcandidate = xcandidate->next; } } @@ -1021,9 +1020,9 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary_info *candidate = &xcandidate->sp; /* printf("sh_css_binary_find: evaluating candidate: * %d\n",candidate->id); */ - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n", - candidate, candidate->pipeline.mode, candidate->id); + dev_dbg(atomisp_dev, + "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n", + candidate, candidate->pipeline.mode, candidate->id); /* * MW: Only a limited set of jointly configured binaries can @@ -1032,17 +1031,16 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, */ if (!candidate->enable.continuous && continuous && (mode != IA_CSS_BINARY_MODE_COPY)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n", - __LINE__, candidate->enable.continuous, - continuous, mode, - IA_CSS_BINARY_MODE_COPY); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n", + __LINE__, candidate->enable.continuous, + continuous, mode, IA_CSS_BINARY_MODE_COPY); continue; } if (striped && candidate->iterator.num_stripes == 1) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: binary is not striped\n", - __LINE__); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: binary is not striped\n", + __LINE__); continue; } @@ -1050,58 +1048,38 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, (mode != IA_CSS_BINARY_MODE_COPY) && (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) && (mode != IA_CSS_BINARY_MODE_VF_PP)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d != %d)\n", - __LINE__, - candidate->pipeline.isp_pipe_version, isp_pipe_version); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d != %d)\n", + __LINE__, candidate->pipeline.isp_pipe_version, isp_pipe_version); continue; } if (!candidate->enable.reduced_pipe && enable_reduced_pipe) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.reduced_pipe, - enable_reduced_pipe); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n", + __LINE__, candidate->enable.reduced_pipe, enable_reduced_pipe); continue; } if (!candidate->enable.dvs_6axis && enable_dvs_6axis) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.dvs_6axis, - enable_dvs_6axis); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n", + __LINE__, candidate->enable.dvs_6axis, enable_dvs_6axis); continue; } if (candidate->enable.high_speed && !enable_high_speed) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - candidate->enable.high_speed, - enable_high_speed); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n", + __LINE__, candidate->enable.high_speed, enable_high_speed); continue; } if (!candidate->enable.xnr && need_xnr) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - candidate->enable.xnr, - need_xnr); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n", + __LINE__, candidate->enable.xnr, need_xnr); continue; } if (!(candidate->enable.ds & 2) && enable_yuv_ds) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - ((candidate->enable.ds & 2) != 0), - enable_yuv_ds); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n", + __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds); continue; } if ((candidate->enable.ds & 2) && !enable_yuv_ds) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && !%d\n", - __LINE__, - ((candidate->enable.ds & 2) != 0), - enable_yuv_ds); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n", + __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds); continue; } @@ -1115,100 +1093,85 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, candidate->vf_dec.is_variable || /* or more than one output pin. */ xcandidate->num_output_pins > 1)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n", - __LINE__, req_vf_info, - candidate->enable.vf_veceven, - candidate->vf_dec.is_variable, - xcandidate->num_output_pins, 1); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n", + __LINE__, req_vf_info, candidate->enable.vf_veceven, + candidate->vf_dec.is_variable, xcandidate->num_output_pins, 1); continue; } if (!candidate->enable.dvs_envelope && need_dvs) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, - candidate->enable.dvs_envelope, (int)need_dvs); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n", + __LINE__, candidate->enable.dvs_envelope, (int)need_dvs); continue; } /* internal_res check considers input, output, and dvs envelope sizes */ ia_css_binary_internal_res(req_in_info, req_bds_out_info, req_bin_out_info, &dvs_env, candidate, &internal_res); if (internal_res.width > candidate->internal.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, internal_res.width, - candidate->internal.max_width); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n", + __LINE__, internal_res.width, candidate->internal.max_width); continue; } if (internal_res.height > candidate->internal.max_height) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, internal_res.height, - candidate->internal.max_height); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n", + __LINE__, internal_res.height, candidate->internal.max_height); continue; } if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && %d\n", - __LINE__, candidate->enable.ds, (int)need_ds); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n", + __LINE__, candidate->enable.ds, (int)need_ds); continue; } if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n", - __LINE__, candidate->enable.uds, - candidate->enable.dvs_6axis, (int)need_dz); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n", + __LINE__, candidate->enable.uds, candidate->enable.dvs_6axis, + (int)need_dz); continue; } if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n", - __LINE__, online, candidate->input.source, - IA_CSS_BINARY_INPUT_MEMORY); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n", + __LINE__, online, candidate->input.source, + IA_CSS_BINARY_INPUT_MEMORY); continue; } if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n", - __LINE__, online, candidate->input.source, - IA_CSS_BINARY_INPUT_SENSOR); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n", + __LINE__, online, candidate->input.source, + IA_CSS_BINARY_INPUT_SENSOR); continue; } if (req_bin_out_info->res.width < candidate->output.min_width || req_bin_out_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n", - __LINE__, - req_bin_out_info->padded_width, - candidate->output.min_width, - req_bin_out_info->padded_width, - candidate->output.max_width); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n", + __LINE__, req_bin_out_info->padded_width, + candidate->output.min_width, req_bin_out_info->padded_width, + candidate->output.max_width); continue; } if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */ req_vf_info) { /* and we need vf output. */ if (req_vf_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d < %d)\n", - __LINE__, - req_vf_info->res.width, - candidate->output.max_width); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%d < %d)\n", + __LINE__, req_vf_info->res.width, + candidate->output.max_width); continue; } } if (req_in_info->padded_width > candidate->input.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d)\n", - __LINE__, req_in_info->padded_width, - candidate->input.max_width); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n", + __LINE__, req_in_info->padded_width, candidate->input.max_width); continue; } if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: !%d\n", - __LINE__, - binary_supports_output_format(xcandidate, req_bin_out_info->format)); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d\n", + __LINE__, binary_supports_output_format(xcandidate, + req_bin_out_info->format)); continue; } if (xcandidate->num_output_pins > 1 && @@ -1217,11 +1180,10 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, /* check if the required vf format is supported. */ !binary_supports_output_format(xcandidate, req_vf_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n", - __LINE__, xcandidate->num_output_pins, 1, - req_vf_info, - binary_supports_output_format(xcandidate, req_vf_info->format)); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n", + __LINE__, xcandidate->num_output_pins, 1, req_vf_info, + binary_supports_output_format(xcandidate, req_vf_info->format)); continue; } @@ -1229,11 +1191,11 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, if (xcandidate->num_output_pins == 1 && req_vf_info && candidate->enable.vf_veceven && !binary_supports_vf_format(xcandidate, req_vf_info->format)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n", - __LINE__, xcandidate->num_output_pins, 1, - req_vf_info, candidate->enable.vf_veceven, - binary_supports_vf_format(xcandidate, req_vf_info->format)); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n", + __LINE__, xcandidate->num_output_pins, 1, + req_vf_info, candidate->enable.vf_veceven, + binary_supports_vf_format(xcandidate, req_vf_info->format)); continue; } @@ -1241,37 +1203,31 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, if (xcandidate->num_output_pins == 1 && req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */ if (req_vf_info->res.width > candidate->output.max_width) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: (%d < %d)\n", - __LINE__, - req_vf_info->res.width, - candidate->output.max_width); + dev_dbg(atomisp_dev, + "ia_css_binary_find() [%d] continue: (%d < %d)\n", + __LINE__, req_vf_info->res.width, + candidate->output.max_width); continue; } } if (!supports_bds_factor(candidate->bds.supported_bds_factors, descr->required_bds_factor)) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->bds.supported_bds_factors, - descr->required_bds_factor); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", + __LINE__, candidate->bds.supported_bds_factors, + descr->required_bds_factor); continue; } if (!candidate->enable.dpc && need_dpc) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->enable.dpc, - descr->enable_dpc); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", + __LINE__, candidate->enable.dpc, descr->enable_dpc); continue; } if (candidate->uds.use_bci && enable_capture_pp_bli) { - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", - __LINE__, candidate->uds.use_bci, - descr->enable_capture_pp_bli); + dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", + __LINE__, candidate->uds.use_bci, descr->enable_capture_pp_bli); continue; } @@ -1290,39 +1246,18 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr, break; } - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() selected = %p, mode = %d ID = %d\n", - xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0); - - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_binary_find() leave: return_err=%d\n", err); - if (!err && xcandidate) - dev_dbg(atomisp_dev, - "Using binary %s (id %d), type %d, mode %d, continuous %s\n", - xcandidate->blob->name, - xcandidate->sp.id, - xcandidate->type, + dev_dbg(atomisp_dev, "Using binary %s (id %d), type %d, mode %d, continuous %s\n", + xcandidate->blob->name, xcandidate->sp.id, xcandidate->type, xcandidate->sp.pipeline.mode, xcandidate->sp.enable.continuous ? "true" : "false"); + if (err) + dev_err(atomisp_dev, "Failed to find a firmware binary matching the pipeline parameters\n"); return err; } -int ia_css_binary_find(struct ia_css_binary_descr *descr, - struct ia_css_binary *binary) -{ - int ret = __ia_css_binary_find(descr, binary); - - if (unlikely(ret)) { - dev_dbg(atomisp_dev, "Seeking for binary failed at:"); - dump_stack(); - } - - return ret; -} - unsigned ia_css_binary_max_vf_width(void) { diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c index 52483498239d2d..2e0193671f4b0d 100644 --- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c +++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c @@ -13,6 +13,8 @@ * more details. */ +#include +#include #include /* for memcpy() */ #include "system_global.h" @@ -20,7 +22,6 @@ #include "ia_css_isys.h" #include "ia_css_debug.h" -#include "math_support.h" #include "virtual_isys.h" #include "isp.h" #include "sh_css_defs.h" @@ -558,7 +559,7 @@ static int32_t calculate_stride( bits_per_pixel = CEIL_MUL(bits_per_pixel, 8); pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel; - words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word); + words_per_line = DIV_ROUND_UP(pixels_per_line_padded, pixels_per_word); bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line; return bytes_per_line; @@ -690,7 +691,6 @@ static bool calculate_ibuf_ctrl_cfg( const isp2401_input_system_cfg_t *isys_cfg, ibuf_ctrl_cfg_t *cfg) { - const s32 bits_per_byte = 8; s32 bits_per_pixel; s32 bytes_per_pixel; s32 left_padding; @@ -698,7 +698,7 @@ static bool calculate_ibuf_ctrl_cfg( (void)input_port; bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel; - bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte); + bytes_per_pixel = BITS_TO_BYTES(bits_per_pixel); left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS) * bytes_per_pixel; diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 01f0b8a33c999c..ca97ea082cf4d6 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -5826,20 +5826,19 @@ need_yuv_scaler_stage(const struct ia_css_pipe *pipe) * Later, merge this with ia_css_pipe_create_cas_scaler_desc */ static int ia_css_pipe_create_cas_scaler_desc_single_output( - struct ia_css_frame_info *cas_scaler_in_info, - struct ia_css_frame_info *cas_scaler_out_info, - struct ia_css_frame_info *cas_scaler_vf_info, + struct ia_css_frame_info *in_info, + struct ia_css_frame_info *out_info, + struct ia_css_frame_info *vf_info, struct ia_css_cas_binary_descr *descr) { unsigned int i; unsigned int hor_ds_factor = 0, ver_ds_factor = 0; int err = 0; struct ia_css_frame_info tmp_in_info; - unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP; - assert(cas_scaler_in_info); - assert(cas_scaler_out_info); + assert(in_info); + assert(out_info); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() enter:\n"); @@ -5847,10 +5846,8 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output( /* We assume that this function is used only for single output port case. */ descr->num_output_stage = 1; - hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width, - cas_scaler_out_info->res.width); - ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height, - cas_scaler_out_info->res.height); + hor_ds_factor = CEIL_DIV(in_info->res.width, out_info->res.width); + ver_ds_factor = CEIL_DIV(in_info->res.height, out_info->res.height); /* use the same horizontal and vertical downscaling factor for simplicity */ assert(hor_ds_factor == ver_ds_factor); @@ -5895,30 +5892,29 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output( goto ERR; } - tmp_in_info = *cas_scaler_in_info; + tmp_in_info = *in_info; for (i = 0; i < descr->num_stage; i++) { descr->in_info[i] = tmp_in_info; - if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= - cas_scaler_out_info->res.width) { + if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info->res.width) { descr->is_output_stage[i] = true; if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) { - descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width; - descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height; - descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width; + descr->internal_out_info[i].res.width = out_info->res.width; + descr->internal_out_info[i].res.height = out_info->res.height; + descr->internal_out_info[i].padded_width = out_info->padded_width; descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420; } else { assert(i == (descr->num_stage - 1)); descr->internal_out_info[i].res.width = 0; descr->internal_out_info[i].res.height = 0; } - descr->out_info[i].res.width = cas_scaler_out_info->res.width; - descr->out_info[i].res.height = cas_scaler_out_info->res.height; - descr->out_info[i].padded_width = cas_scaler_out_info->padded_width; - descr->out_info[i].format = cas_scaler_out_info->format; - if (cas_scaler_vf_info) { - descr->vf_info[i].res.width = cas_scaler_vf_info->res.width; - descr->vf_info[i].res.height = cas_scaler_vf_info->res.height; - descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width; + descr->out_info[i].res.width = out_info->res.width; + descr->out_info[i].res.height = out_info->res.height; + descr->out_info[i].padded_width = out_info->padded_width; + descr->out_info[i].format = out_info->format; + if (vf_info) { + descr->vf_info[i].res.width = vf_info->res.width; + descr->vf_info[i].res.height = vf_info->res.height; + descr->vf_info[i].padded_width = vf_info->padded_width; ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE); } else { descr->vf_info[i].res.width = 0; diff --git a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h deleted file mode 100644 index 6f058f13230041..00000000000000 --- a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/** -Support for Intel Camera Imaging ISP subsystem. -Copyright (c) 2010 - 2015, Intel Corporation. - -This program is free software; you can redistribute it and/or modify it -under the terms and conditions of the GNU General Public License, -version 2, as published by the Free Software Foundation. - -This program is distributed in the hope it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -more details. -*/ - -#ifndef __SH_CSS_DVS_INFO_H__ -#define __SH_CSS_DVS_INFO_H__ - -#include - -/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */ -#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2)) - -/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */ -#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA)) - -/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently. - * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */ -#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE - -#define DVS_INPUT_BYTES_PER_PIXEL (1) - -#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X)) - -#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA)) - -#endif /* __SH_CSS_DVS_INFO_H__ */ diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h index 7782f76b9f9776..25e5b4570f7da1 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h +++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h @@ -18,7 +18,6 @@ #include #include -#include #include "gdc_global.h" /* gdc_warp_param_mem_t */ #define DVS_ENV_MIN_X (12) diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/av7110/av7110.c index 728b3892a20c45..bc9a2a40afcb54 100644 --- a/drivers/staging/media/av7110/av7110.c +++ b/drivers/staging/media/av7110/av7110.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c index 31b2b48085c59a..712f916f0935f5 100644 --- a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c +++ b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c @@ -333,20 +333,16 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = { static int isc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; - struct device_node *epn = NULL; + struct device_node *epn; struct isc_subdev_entity *subdev_entity; unsigned int flags; - int ret; + int ret = -EINVAL; INIT_LIST_HEAD(&isc->subdev_entities); - while (1) { + for_each_endpoint_of_node(np, epn) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - return 0; - ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c index 020034f631f57c..9485167d5b7d73 100644 --- a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c +++ b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c @@ -316,23 +316,19 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = { static int xisc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; - struct device_node *epn = NULL; + struct device_node *epn; struct isc_subdev_entity *subdev_entity; unsigned int flags; - int ret; + int ret = -EINVAL; bool mipi_mode; INIT_LIST_HEAD(&isc->subdev_entities); mipi_mode = of_property_read_bool(np, "microchip,mipi-mode"); - while (1) { + for_each_endpoint_of_node(np, epn) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - return 0; - ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index 3df58eb3e8822b..e7aee7e3db5bbd 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -535,31 +535,53 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) container_of(vq, struct imgu_video_device, vbq); int r; unsigned int pipe; + bool stop_streaming = false; + /* Verify that the node had been setup with imgu_v4l2_node_setup() */ WARN_ON(!node->enabled); pipe = node->pipe; dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id); - imgu_pipe = &imgu->imgu_pipe[pipe]; - r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0); - if (r) - dev_err(&imgu->pci_dev->dev, - "failed to stop subdev streaming\n"); + /* + * When the first node of a streaming setup is stopped, the entire + * pipeline needs to stop before individual nodes are disabled. + * Perform the inverse of the initial setup. + * + * Part 1 - s_stream on the entire pipeline + */ mutex_lock(&imgu->streaming_lock); - /* Was this the first node with streaming disabled? */ - if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) { + if (imgu->streaming) { /* Yes, really stop streaming now */ dev_dbg(dev, "IMGU streaming is ready to stop"); r = imgu_s_stream(imgu, false); if (!r) imgu->streaming = false; + stop_streaming = true; } - - imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); mutex_unlock(&imgu->streaming_lock); + /* Part 2 - s_stream on subdevs + * + * If we call s_stream multiple times, Linux v6.7's call_s_stream() + * WARNs and aborts. Thus, disable all pipes at once, and only once. + */ + if (stop_streaming) { + for_each_set_bit(pipe, imgu->css.enabled_pipes, + IMGU_MAX_PIPE_NUM) { + imgu_pipe = &imgu->imgu_pipe[pipe]; + + r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, + video, s_stream, 0); + if (r) + dev_err(&imgu->pci_dev->dev, + "failed to stop subdev streaming\n"); + } + } + + /* Part 3 - individual node teardown */ video_device_pipeline_stop(&node->vdev); + imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); } /******************** v4l2_ioctl_ops ********************/ diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index de3e0345ab7c60..5e5b296f93bab4 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -982,6 +982,8 @@ static const struct of_device_id vdec_dt_match[] = { .data = &vdec_platform_gxm }, { .compatible = "amlogic,gxl-vdec", .data = &vdec_platform_gxl }, + { .compatible = "amlogic,gxlx-vdec", + .data = &vdec_platform_gxlx }, { .compatible = "amlogic,g12a-vdec", .data = &vdec_platform_g12a }, { .compatible = "amlogic,sm1-vdec", diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c index 3fe2de0c9331f2..a65cb49594465a 100644 --- a/drivers/staging/media/meson/vdec/vdec_1.c +++ b/drivers/staging/media/meson/vdec/vdec_1.c @@ -129,7 +129,7 @@ static u32 vdec_1_vififo_level(struct amvdec_session *sess) return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL); } -static int vdec_1_stop(struct amvdec_session *sess) +static void __vdec_1_stop(struct amvdec_session *sess) { struct amvdec_core *core = sess->core; struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; @@ -158,10 +158,17 @@ static int vdec_1_stop(struct amvdec_session *sess) regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, GEN_PWR_VDEC_1, GEN_PWR_VDEC_1); - clk_disable_unprepare(core->vdec_1_clk); - if (sess->priv) codec_ops->stop(sess); +} + +static int vdec_1_stop(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + + __vdec_1_stop(sess); + + clk_disable_unprepare(core->vdec_1_clk); return 0; } @@ -235,7 +242,8 @@ static int vdec_1_start(struct amvdec_session *sess) return 0; stop: - vdec_1_stop(sess); + __vdec_1_stop(sess); + clk_disable_unprepare(core->vdec_1_clk); return ret; } diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c index afced435c90702..1939c47def58d9 100644 --- a/drivers/staging/media/meson/vdec/vdec_hevc.c +++ b/drivers/staging/media/meson/vdec/vdec_hevc.c @@ -110,7 +110,7 @@ static u32 vdec_hevc_vififo_level(struct amvdec_session *sess) return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL); } -static int vdec_hevc_stop(struct amvdec_session *sess) +static void __vdec_hevc_stop(struct amvdec_session *sess) { struct amvdec_core *core = sess->core; struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; @@ -142,6 +142,13 @@ static int vdec_hevc_stop(struct amvdec_session *sess) else regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC); +} + +static int vdec_hevc_stop(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + + __vdec_hevc_stop(sess); clk_disable_unprepare(core->vdec_hevc_clk); if (core->platform->revision == VDEC_REVISION_G12A || @@ -151,20 +158,12 @@ static int vdec_hevc_stop(struct amvdec_session *sess) return 0; } -static int vdec_hevc_start(struct amvdec_session *sess) +static int __vdec_hevc_start(struct amvdec_session *sess) { int ret; struct amvdec_core *core = sess->core; struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops; - if (core->platform->revision == VDEC_REVISION_G12A || - core->platform->revision == VDEC_REVISION_SM1) { - clk_set_rate(core->vdec_hevcf_clk, 666666666); - ret = clk_prepare_enable(core->vdec_hevcf_clk); - if (ret) - return ret; - } - clk_set_rate(core->vdec_hevc_clk, 666666666); ret = clk_prepare_enable(core->vdec_hevc_clk); if (ret) { @@ -223,10 +222,32 @@ static int vdec_hevc_start(struct amvdec_session *sess) return 0; stop: - vdec_hevc_stop(sess); + __vdec_hevc_stop(sess); + clk_disable_unprepare(core->vdec_hevc_clk); return ret; } +static int vdec_hevc_start(struct amvdec_session *sess) +{ + struct amvdec_core *core = sess->core; + int ret; + + if (core->platform->revision == VDEC_REVISION_G12A || + core->platform->revision == VDEC_REVISION_SM1) { + clk_set_rate(core->vdec_hevcf_clk, 666666666); + ret = clk_prepare_enable(core->vdec_hevcf_clk); + if (ret) + return ret; + + ret = __vdec_hevc_start(sess); + if (ret) + clk_disable_unprepare(core->vdec_hevcf_clk); + return ret; + } + + return __vdec_hevc_start(sess); +} + struct amvdec_ops vdec_hevc_ops = { .start = vdec_hevc_start, .stop = vdec_hevc_stop, diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c index 70c9fd7c8bc505..66bb307db85a46 100644 --- a/drivers/staging/media/meson/vdec/vdec_platform.c +++ b/drivers/staging/media/meson/vdec/vdec_platform.c @@ -101,6 +101,44 @@ static const struct amvdec_format vdec_formats_gxl[] = { }, }; +static const struct amvdec_format vdec_formats_gxlx[] = { + { + .pixfmt = V4L2_PIX_FMT_H264, + .min_buffers = 2, + .max_buffers = 24, + .max_width = 3840, + .max_height = 2160, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_h264_ops, + .firmware_path = "meson/vdec/gxl_h264.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED | + V4L2_FMT_FLAG_DYN_RESOLUTION, + }, { + .pixfmt = V4L2_PIX_FMT_MPEG1, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, + }, { + .pixfmt = V4L2_PIX_FMT_MPEG2, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 }, + .flags = V4L2_FMT_FLAG_COMPRESSED, + }, +}; + static const struct amvdec_format vdec_formats_gxm[] = { { .pixfmt = V4L2_PIX_FMT_VP9, @@ -263,6 +301,12 @@ const struct vdec_platform vdec_platform_gxl = { .revision = VDEC_REVISION_GXL, }; +const struct vdec_platform vdec_platform_gxlx = { + .formats = vdec_formats_gxlx, + .num_formats = ARRAY_SIZE(vdec_formats_gxlx), + .revision = VDEC_REVISION_GXLX, +}; + const struct vdec_platform vdec_platform_gxm = { .formats = vdec_formats_gxm, .num_formats = ARRAY_SIZE(vdec_formats_gxm), diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h index 731877a771f47d..88ca4a9db8a884 100644 --- a/drivers/staging/media/meson/vdec/vdec_platform.h +++ b/drivers/staging/media/meson/vdec/vdec_platform.h @@ -14,6 +14,7 @@ struct amvdec_format; enum vdec_revision { VDEC_REVISION_GXBB, VDEC_REVISION_GXL, + VDEC_REVISION_GXLX, VDEC_REVISION_GXM, VDEC_REVISION_G12A, VDEC_REVISION_SM1, @@ -28,6 +29,7 @@ struct vdec_platform { extern const struct vdec_platform vdec_platform_gxbb; extern const struct vdec_platform vdec_platform_gxm; extern const struct vdec_platform vdec_platform_gxl; +extern const struct vdec_platform vdec_platform_gxlx; extern const struct vdec_platform vdec_platform_g12a; extern const struct vdec_platform vdec_platform_sm1; diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c index fecd3e67c7a1d5..b6d34145bc191e 100644 --- a/drivers/staging/media/starfive/camss/stf-camss.c +++ b/drivers/staging/media/starfive/camss/stf-camss.c @@ -358,8 +358,6 @@ static int stfcamss_probe(struct platform_device *pdev) /* * stfcamss_remove - Remove STFCAMSS platform device * @pdev: Pointer to STFCAMSS platform device - * - * Always returns 0. */ static void stfcamss_remove(struct platform_device *pdev) { diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c index ec5169e7b3918b..e15d2e97eb0b6c 100644 --- a/drivers/staging/media/starfive/camss/stf-capture.c +++ b/drivers/staging/media/starfive/camss/stf-capture.c @@ -180,6 +180,8 @@ static void stf_channel_set(struct stfcamss_video *video) u32 val; if (cap->type == STF_CAPTURE_RAW) { + const struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix; + val = stf_syscon_reg_read(stfcamss, VIN_CHANNEL_SEL_EN); val &= ~U0_VIN_CHANNEL_SEL_MASK; val |= CHANNEL(0); @@ -193,7 +195,7 @@ static void stf_channel_set(struct stfcamss_video *video) val |= PIXEL_HEIGH_BIT_SEL(0); val &= ~U0_VIN_PIX_CNT_END_MASK; - val |= PIX_CNT_END(IMAGE_MAX_WIDTH / 4 - 1); + val |= PIX_CNT_END(pix->width / 4 - 1); stf_syscon_reg_write(stfcamss, VIN_INRT_PIX_CFG, val); } else if (cap->type == STF_CAPTURE_YUV) { diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c index 6254a5df2502fa..2b3cdb1ce1404c 100644 --- a/drivers/staging/most/video/video.c +++ b/drivers/staging/most/video/video.c @@ -454,18 +454,18 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx, struct most_video_dev *mdev = get_comp_dev(iface, channel_idx); if (mdev) { - pr_err("channel already linked\n"); + pr_err("Channel already linked\n"); return -EEXIST; } if (ccfg->direction != MOST_CH_RX) { - pr_err("wrong direction, expect rx\n"); + pr_err("Wrong direction, expected rx\n"); return -EINVAL; } if (ccfg->data_type != MOST_CH_SYNC && ccfg->data_type != MOST_CH_ISOC) { - pr_err("wrong channel type, expect sync or isoc\n"); + pr_err("Wrong channel type, expected sync or isoc\n"); return -EINVAL; } diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index d09211589d1cf1..977f8fc29e631f 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -175,7 +175,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, } } - dev_err(nvec->dev, "could not allocate %s buffer\n", + dev_err(nvec->dev, "Could not allocate %s buffer\n", (category == NVEC_MSG_TX) ? "TX" : "RX"); return NULL; @@ -315,7 +315,7 @@ int nvec_write_sync(struct nvec_chip *nvec, if (!(wait_for_completion_timeout(&nvec->sync_write, msecs_to_jiffies(2000)))) { dev_warn(nvec->dev, - "timeout waiting for sync write to complete\n"); + "Timeout waiting for sync write to complete\n"); mutex_unlock(&nvec->sync_write_mutex); return -ETIMEDOUT; } @@ -392,7 +392,7 @@ static void nvec_request_master(struct work_struct *work) msecs_to_jiffies(5000)); if (err == 0) { - dev_warn(nvec->dev, "timeout waiting for ec transfer\n"); + dev_warn(nvec->dev, "Timeout waiting for ec transfer\n"); nvec_gpio_set_value(nvec, 1); msg->pos = 0; } @@ -454,7 +454,7 @@ static void nvec_dispatch(struct work_struct *work) if (nvec->sync_write_pending == (msg->data[2] << 8) + msg->data[0]) { - dev_dbg(nvec->dev, "sync write completed!\n"); + dev_dbg(nvec->dev, "Sync write completed!\n"); nvec->sync_write_pending = 0; nvec->last_sync_msg = msg; complete(&nvec->sync_write); @@ -477,7 +477,7 @@ static void nvec_tx_completed(struct nvec_chip *nvec) { /* We got an END_TRANS, let's skip this, maybe there's an event */ if (nvec->tx->pos != nvec->tx->size) { - dev_err(nvec->dev, "premature END_TRANS, resending\n"); + dev_err(nvec->dev, "Premature END_TRANS, resending\n"); nvec->tx->pos = 0; nvec_gpio_set_value(nvec, 0); } else { @@ -608,7 +608,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev) /* Filter out some errors */ if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) { - dev_err(nvec->dev, "unexpected irq mask %lx\n", status); + dev_err(nvec->dev, "Unexpected irq mask %lx\n", status); return IRQ_HANDLED; } if ((status & I2C_SL_IRQ) == 0) { @@ -631,7 +631,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev) if (status != (I2C_SL_IRQ | RCVD)) nvec_invalid_flags(nvec, status, false); break; - case 1: /* command byte */ + case 1: /* Command byte */ if (status != I2C_SL_IRQ) { nvec_invalid_flags(nvec, status, true); } else { @@ -845,13 +845,12 @@ static int tegra_nvec_probe(struct platform_device *pdev) return PTR_ERR(nvec->gpiod); } - err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0, + err = devm_request_irq(dev, nvec->irq, nvec_interrupt, IRQF_NO_AUTOEN, "nvec", nvec); if (err) { dev_err(dev, "couldn't request irq\n"); return -ENODEV; } - disable_irq(nvec->irq); tegra_init_i2c_slave(nvec); diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 9eee28f2940cf1..a5e99cc78a454f 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -425,7 +425,7 @@ int cvm_oct_common_init(struct net_device *dev) dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; /* We do our own locking, Linux doesn't need to */ - dev->features |= NETIF_F_LLTX; + dev->lltx = true; dev->ethtool_ops = &cvm_oct_ethtool_ops; cvm_oct_set_mac_filter(dev); diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 08ec3aae90eac2..4cb904a5f8f402 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -544,7 +544,7 @@ static const struct backlight_ops dcon_bl_ops = { static struct backlight_properties dcon_bl_props = { .max_brightness = 15, .type = BACKLIGHT_RAW, - .power = FB_BLANK_UNBLANK, + .power = BACKLIGHT_POWER_ON, }; static int dcon_reboot_notify(struct notifier_block *nb, diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h index d87bace0a19b27..552fd9b6e3e514 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h +++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h @@ -8,6 +8,7 @@ #define R8190P_DEF_H #include +#include "r8192E_phy.h" #define MAX_SILENT_RESET_RX_SLOT_NUM 10 @@ -137,7 +138,7 @@ struct tx_fwinfo_8190pci { }; struct phy_sts_ofdm_819xpci { - u8 trsw_gain_X[4]; + u8 trsw_gain_X[RF90_PATH_MAX]; u8 pwdb_all; u8 cfosho_X[4]; u8 cfotail_X[4]; @@ -226,7 +227,7 @@ struct rx_desc { u16 Length:14; u16 CRC32:1; u16 ICV:1; - u8 RxDrvInfoSize; + u8 rx_drv_info_size; u8 Shift:2; u8 PHYStatus:1; u8 SWDec:1; diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c index e470b49b0ff784..533cc4e723f684 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c @@ -16,18 +16,18 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data, struct sk_buff *skb; unsigned char *seg_ptr; struct cb_desc *tcb_desc; - u8 bLastIniPkt; + u8 last_ini_pkt; struct tx_fwinfo_8190pci *pTxFwInfo = NULL; do { if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) { frag_length = CMDPACKET_FRAG_SIZE; - bLastIniPkt = 0; + last_ini_pkt = 0; } else { frag_length = (u16)(len - frag_offset); - bLastIniPkt = 1; + last_ini_pkt = 1; } if (type == DESC_PACKET_TYPE_NORMAL) @@ -42,8 +42,8 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data, memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; - tcb_desc->bCmdOrInit = type; - tcb_desc->bLastIniPkt = bLastIniPkt; + tcb_desc->cmd_or_init = type; + tcb_desc->last_ini_pkt = last_ini_pkt; if (type == DESC_PACKET_TYPE_NORMAL) { tcb_desc->pkt_size = frag_length; diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index b3d4b33942844f..2672b1ddf88e39 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c @@ -289,7 +289,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev) for (i = 0; i < 6; i += 2) { usValue = rtl92e_eeprom_read(dev, - (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1); + (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1); *(u16 *)(&addr[i]) = usValue; } eth_hw_addr_set(dev, addr); @@ -987,10 +987,10 @@ void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry, if (dma_mapping_error(&priv->pdev->dev, mapping)) netdev_err(dev, "%s(): DMA Mapping error\n", __func__); memset(entry, 0, 12); - entry->LINIP = cb_desc->bLastIniPkt; + entry->LINIP = cb_desc->last_ini_pkt; entry->FirstSeg = 1; entry->LastSeg = 1; - if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) { + if (cb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) { entry->CmdInit = DESC_PACKET_TYPE_INIT; } else { struct tx_desc *entry_tmp = (struct tx_desc *)entry; @@ -1145,23 +1145,21 @@ static long _rtl92e_signal_scale_mapping(struct r8192_priv *priv, long currsig) _pdrvinfo->RxRate == DESC90_RATE11M) &&\ !_pdrvinfo->RxHT) -static void _rtl92e_query_rxphystatus( - struct r8192_priv *priv, - struct rtllib_rx_stats *pstats, - struct rx_desc *pdesc, - struct rx_fwinfo *pdrvinfo, - struct rtllib_rx_stats *precord_stats, - bool bpacket_match_bssid, - bool bpacket_toself, - bool bPacketBeacon, - bool bToSelfBA - ) +static void _rtl92e_query_rxphystatus(struct r8192_priv *priv, + struct rtllib_rx_stats *pstats, + struct rx_desc *pdesc, + struct rx_fwinfo *pdrvinfo, + struct rtllib_rx_stats *precord_stats, + bool bpacket_match_bssid, + bool bpacket_toself, + bool bPacketBeacon, + bool bToSelfBA) { struct phy_sts_ofdm_819xpci *pofdm_buf; struct phy_sts_cck_819xpci *pcck_buf; u8 *prxpkt; u8 i, max_spatial_stream, tmp_rxevm; - s8 rx_pwr[4], rx_pwr_all = 0; + s8 rx_pwr[RF90_PATH_MAX], rx_pwr_all = 0; s8 rx_evmX; u8 evm, pwdb_all; u32 RSSI, total_rssi = 0; @@ -1174,7 +1172,7 @@ static void _rtl92e_query_rxphystatus( memset(precord_stats, 0, sizeof(struct rtllib_rx_stats)); pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID = bpacket_match_bssid; - pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself; + pstats->packet_to_self = precord_stats->packet_to_self = bpacket_toself; pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate; pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon; pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA; @@ -1266,8 +1264,8 @@ static void _rtl92e_query_rxphystatus( else sq = ((64 - sq) * 100) / 44; } - pstats->SignalQuality = sq; - precord_stats->SignalQuality = sq; + pstats->signal_quality = sq; + precord_stats->signal_quality = sq; pstats->RxMIMOSignalQuality[0] = sq; precord_stats->RxMIMOSignalQuality[0] = sq; pstats->RxMIMOSignalQuality[1] = -1; @@ -1311,8 +1309,8 @@ static void _rtl92e_query_rxphystatus( evm = rtl92e_evm_db_to_percent(rx_evmX); if (bpacket_match_bssid) { if (i == 0) { - pstats->SignalQuality = evm & 0xff; - precord_stats->SignalQuality = evm & 0xff; + pstats->signal_quality = evm & 0xff; + precord_stats->signal_quality = evm & 0xff; } pstats->RxMIMOSignalQuality[i] = evm & 0xff; precord_stats->RxMIMOSignalQuality[i] = evm & 0xff; @@ -1321,13 +1319,12 @@ static void _rtl92e_query_rxphystatus( } if (is_cck_rate) { - pstats->SignalStrength = precord_stats->SignalStrength = - _rtl92e_signal_scale_mapping(priv, - (long)pwdb_all); + pstats->signal_strength = precord_stats->signal_strength = + _rtl92e_signal_scale_mapping(priv, (long)pwdb_all); } else { if (rf_rx_num != 0) - pstats->SignalStrength = precord_stats->SignalStrength = + pstats->signal_strength = precord_stats->signal_strength = _rtl92e_signal_scale_mapping(priv, (long)(total_rssi /= rf_rx_num)); } @@ -1355,10 +1352,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, last_rssi = priv->stats.slide_signal_strength[slide_rssi_index]; priv->stats.slide_rssi_total -= last_rssi; } - priv->stats.slide_rssi_total += prev_st->SignalStrength; + priv->stats.slide_rssi_total += prev_st->signal_strength; priv->stats.slide_signal_strength[slide_rssi_index++] = - prev_st->SignalStrength; + prev_st->signal_strength; if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX) slide_rssi_index = 0; @@ -1373,7 +1370,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, if (!bcheck) return; - if (!prev_st->bIsCCK && prev_st->bPacketToSelf) { + if (!prev_st->bIsCCK && prev_st->packet_to_self) { for (rfpath = RF90_PATH_A; rfpath < priv->num_total_rf_path; rfpath++) { if (priv->stats.rx_rssi_percentage[rfpath] == 0) { priv->stats.rx_rssi_percentage[rfpath] = @@ -1419,7 +1416,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, if (prev_st->RxPWDBAll >= 3) prev_st->RxPWDBAll -= 3; } - if (prev_st->bPacketToSelf || prev_st->bPacketBeacon || + if (prev_st->packet_to_self || prev_st->bPacketBeacon || prev_st->bToSelfBA) { if (priv->undecorated_smoothed_pwdb < 0) priv->undecorated_smoothed_pwdb = prev_st->RxPWDBAll; @@ -1439,8 +1436,8 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, rtl92e_update_rx_statistics(priv, prev_st); } - if (prev_st->SignalQuality != 0) { - if (prev_st->bPacketToSelf || prev_st->bPacketBeacon || + if (prev_st->signal_quality != 0) { + if (prev_st->packet_to_self || prev_st->bPacketBeacon || prev_st->bToSelfBA) { if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) { slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX; @@ -1449,10 +1446,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, priv->stats.slide_evm_total -= last_evm; } - priv->stats.slide_evm_total += prev_st->SignalQuality; + priv->stats.slide_evm_total += prev_st->signal_quality; priv->stats.slide_evm[slide_evm_index++] = - prev_st->SignalQuality; + prev_st->signal_quality; if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX) slide_evm_index = 0; @@ -1461,7 +1458,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer, priv->stats.last_signal_strength_inpercent = tmp_val; } - if (prev_st->bPacketToSelf || + if (prev_st->packet_to_self || prev_st->bPacketBeacon || prev_st->bToSelfBA) { for (ij = 0; ij < 2; ij++) { @@ -1496,7 +1493,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev, u8 *tmp_buf; u8 *praddr; - tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift; + tmp_buf = skb->data + pstats->rx_drv_info_size + pstats->rx_buf_shift; hdr = (struct ieee80211_hdr_3addr *)tmp_buf; fc = le16_to_cpu(hdr->frame_control); @@ -1509,7 +1506,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3) && - (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV)); + (!pstats->hw_error) && (!pstats->bCRC) && (!pstats->bICV)); bpacket_toself = bpacket_match_bssid && /* check this */ ether_addr_equal(praddr, priv->rtllib->dev->dev_addr); if (ieee80211_is_beacon(hdr->frame_control)) @@ -1521,9 +1518,8 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev, rtl92e_copy_mpdu_stats(pstats, &previous_stats); } -static void _rtl92e_update_received_rate_histogram_stats( - struct net_device *dev, - struct rtllib_rx_stats *pstats) +static void _rtl92e_update_received_rate_histogram_stats(struct net_device *dev, + struct rtllib_rx_stats *pstats) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u32 rcvType = 1; @@ -1634,20 +1630,20 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats, stats->bICV = pdesc->ICV; stats->bCRC = pdesc->CRC32; - stats->bHwError = pdesc->CRC32 | pdesc->ICV; + stats->hw_error = pdesc->CRC32 | pdesc->ICV; stats->Length = pdesc->Length; if (stats->Length < 24) - stats->bHwError |= 1; + stats->hw_error |= 1; - if (stats->bHwError) + if (stats->hw_error) return false; - stats->RxDrvInfoSize = pdesc->RxDrvInfoSize; - stats->RxBufShift = (pdesc->Shift) & 0x03; + stats->rx_drv_info_size = pdesc->rx_drv_info_size; + stats->rx_buf_shift = (pdesc->Shift) & 0x03; stats->decrypted = !pdesc->SWDec; - pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift); + pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->rx_buf_shift); stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT, pDrvInfo->RxRate); @@ -1837,8 +1833,8 @@ bool rtl92e_is_rx_stuck(struct net_device *dev) rx_chk_cnt++; if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) { rx_chk_cnt = 0; - } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5)) - && (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) && + } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5)) && + (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) && (priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M)) || ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) && (priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M)))) { @@ -1859,7 +1855,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev) rx_chk_cnt = 0; } - slot_index = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum; if (priv->rx_ctr == RegRxCounter) { diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h index 1b444529b59c87..e507593b939cd6 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h @@ -229,7 +229,7 @@ enum _RTL8192PCI_HW { RATR_MCS6 | RATR_MCS7) #define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \ - RATR_MCS14|RATR_MCS15) + RATR_MCS14 | RATR_MCS15) DRIVER_RSSI = 0x32c, MCS_TXAGC = 0x340, diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c index 18b948d4d86d39..fbe624e235df0d 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c @@ -416,6 +416,7 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev) return rtStatus; } + bool rtl92e_config_bb(struct net_device *dev) { _rtl92e_init_bb_rf_reg_def(dev); @@ -508,8 +509,8 @@ static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel) static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev, struct sw_chnl_cmd *CmdTable, u32 CmdTableIdx, u32 CmdTableSz, - enum sw_chnl_cmd_id CmdID, - u32 Para1, u32 Para2, u32 msDelay) + enum sw_chnl_cmd_id cmd_id, + u32 para1, u32 para2, u32 ms_delay) { struct sw_chnl_cmd *pCmd; @@ -523,10 +524,10 @@ static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev, } pCmd = CmdTable + CmdTableIdx; - pCmd->CmdID = CmdID; - pCmd->Para1 = Para1; - pCmd->Para2 = Para2; - pCmd->msDelay = msDelay; + pCmd->cmd_id = cmd_id; + pCmd->para1 = para1; + pCmd->para2 = para2; + pCmd->ms_delay = ms_delay; return true; } @@ -552,18 +553,18 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel, _rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, - CmdID_SetTxPowerLevel, + cmd_id_set_tx_power_level, 0, 0, 0); _rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd, PreCommonCmdCnt++, - MAX_PRECMD_CNT, CmdID_End, + MAX_PRECMD_CNT, cmd_id_end, 0, 0, 0); PostCommonCmdCnt = 0; _rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PostCommonCmd, PostCommonCmdCnt++, - MAX_POSTCMD_CNT, CmdID_End, + MAX_POSTCMD_CNT, cmd_id_end, 0, 0, 0); RfDependCmdCnt = 0; @@ -578,14 +579,14 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel, ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, - CmdID_RF_WriteReg, + cmd_id_rf_write_reg, rZebra1_Channel, channel, 10); _rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, - CmdID_End, 0, 0, 0); + cmd_id_end, 0, 0, 0); do { switch (*stage) { @@ -600,7 +601,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel, break; } - if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) { + if (CurrentCmd && CurrentCmd->cmd_id == cmd_id_end) { if ((*stage) == 2) return true; (*stage)++; @@ -610,31 +611,31 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel, if (!CurrentCmd) continue; - switch (CurrentCmd->CmdID) { - case CmdID_SetTxPowerLevel: + switch (CurrentCmd->cmd_id) { + case cmd_id_set_tx_power_level: if (priv->ic_cut > VERSION_8190_BD) _rtl92e_set_tx_power_level(dev, channel); break; - case CmdID_WritePortUlong: - rtl92e_writel(dev, CurrentCmd->Para1, - CurrentCmd->Para2); + case cmd_id_write_port_ulong: + rtl92e_writel(dev, CurrentCmd->para1, + CurrentCmd->para2); break; - case CmdID_WritePortUshort: - rtl92e_writew(dev, CurrentCmd->Para1, - CurrentCmd->Para2); + case cmd_id_write_port_ushort: + rtl92e_writew(dev, CurrentCmd->para1, + CurrentCmd->para2); break; - case CmdID_WritePortUchar: - rtl92e_writeb(dev, CurrentCmd->Para1, - CurrentCmd->Para2); + case cmd_id_write_port_uchar: + rtl92e_writeb(dev, CurrentCmd->para1, + CurrentCmd->para2); break; - case CmdID_RF_WriteReg: + case cmd_id_rf_write_reg: for (eRFPath = 0; eRFPath < priv->num_total_rf_path; eRFPath++) rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath, - CurrentCmd->Para1, bMask12Bits, - CurrentCmd->Para2 << 7); + CurrentCmd->para1, bMask12Bits, + CurrentCmd->para2 << 7); break; default: break; @@ -644,7 +645,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel, } while (true); } /*for (Number of RF paths)*/ - (*delay) = CurrentCmd->msDelay; + (*delay) = CurrentCmd->ms_delay; (*step)++; return false; } @@ -944,19 +945,19 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation) case IG_Restore: BitMask = 0x7f; rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask, - (u32)priv->initgain_backup.xaagccore1); + (u32)priv->initgain_backup.xaagccore1); rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask, - (u32)priv->initgain_backup.xbagccore1); + (u32)priv->initgain_backup.xbagccore1); rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask, - (u32)priv->initgain_backup.xcagccore1); + (u32)priv->initgain_backup.xcagccore1); rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask, - (u32)priv->initgain_backup.xdagccore1); + (u32)priv->initgain_backup.xdagccore1); BitMask = bMaskByte2; rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask, - (u32)priv->initgain_backup.cca); + (u32)priv->initgain_backup.cca); rtl92e_set_tx_power(dev, - priv->rtllib->current_network.channel); + priv->rtllib->current_network.channel); break; } } diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h index ff4b4004b0d005..956dfbdd5b6838 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h @@ -20,8 +20,6 @@ enum hw90_block { enum rf90_radio_path { RF90_PATH_A = 0, RF90_PATH_B = 1, - RF90_PATH_C = 2, - RF90_PATH_D = 3, RF90_PATH_MAX }; @@ -45,13 +43,13 @@ void rtl92e_set_channel(struct net_device *dev, u8 channel); void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth, enum ht_extchnl_offset Offset); -void rtl92e_init_gain(struct net_device *dev, u8 Operation); +void rtl92e_init_gain(struct net_device *dev, u8 operation); void rtl92e_set_rf_off(struct net_device *dev); bool rtl92e_set_rf_power_state(struct net_device *dev, enum rt_rf_power_state rf_power_state); -void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation); +void rtl92e_scan_op_backup(struct net_device *dev, u8 operation); #endif diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 9eeae01dc98df8..dc1301f1a0c16b 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -25,7 +25,7 @@ int hwwep = 1; static char *ifname = "wlan%d"; -static struct pci_device_id rtl8192_pci_id_tbl[] = { +static const struct pci_device_id rtl8192_pci_id_tbl[] = { {PCI_DEVICE(0x10ec, 0x8192)}, {PCI_DEVICE(0x07aa, 0x0044)}, {PCI_DEVICE(0x07aa, 0x0047)}, @@ -173,7 +173,7 @@ bool rtl92e_set_rf_state(struct net_device *dev, else priv->blinked_ingpio = false; rtllib_mgnt_disconnect(priv->rtllib, - WLAN_REASON_DISASSOC_STA_HAS_LEFT); + WLAN_REASON_DISASSOC_STA_HAS_LEFT); } } if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off) @@ -322,7 +322,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv, if (network->flags & NETWORK_HAS_QOS_MASK) { if (active_network && - (network->flags & NETWORK_HAS_QOS_PARAMETERS)) + (network->flags & NETWORK_HAS_QOS_PARAMETERS)) network->qos_data.active = network->qos_data.supported; if ((network->qos_data.active == 1) && (active_network == 1) && @@ -665,7 +665,7 @@ static void _rtl92e_init_priv_handler(struct net_device *dev) priv->rtllib->init_gain_handler = rtl92e_init_gain; priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq; priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave; - priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup; + priv->rtllib->scan_operation_backup_handler = rtl92e_scan_op_backup; } static void _rtl92e_init_priv_variable(struct net_device *dev) @@ -860,13 +860,13 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev) skb = __skb_peek(&ring->queue); tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - tcb_desc->nStuckCount++; + tcb_desc->stuck_count++; bCheckFwTxCnt = true; - if (tcb_desc->nStuckCount > 1) + if (tcb_desc->stuck_count > 1) netdev_info(dev, - "%s: QueueID=%d tcb_desc->nStuckCount=%d\n", + "%s: QueueID=%d tcb_desc->stuck_count=%d\n", __func__, QueueID, - tcb_desc->nStuckCount); + tcb_desc->stuck_count); } } spin_unlock_irqrestore(&priv->irq_th_lock, flags); @@ -1522,8 +1522,8 @@ static void _rtl92e_rx_normal(struct net_device *dev) priv->rxbuffersize, DMA_FROM_DEVICE); skb_put(skb, pdesc->Length); - skb_reserve(skb, stats.RxDrvInfoSize + - stats.RxBufShift); + skb_reserve(skb, stats.rx_drv_info_size + + stats.rx_buf_shift); skb_trim(skb, skb->len - S_CRC_LEN); rtllib_hdr = (struct ieee80211_hdr *)skb->data; if (!is_multicast_ether_addr(rtllib_hdr->addr1)) { diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h index 1d6d31292f4116..8297d7e5941596 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h @@ -300,7 +300,7 @@ struct r8192_priv { u32 rf_reg_0value[4]; u8 num_total_rf_path; - bool brfpath_rxenable[4]; + bool brfpath_rxenable[RF90_PATH_MAX]; bool tx_pwr_data_read_from_eeprom; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index 0c7f38a4a7db8b..e9ca5a8768ad13 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -1484,8 +1484,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev) rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable, 0x1 << i, 0x1); - dm_rx_path_sel_table.rf_enable_rssi_th[i] - = 100; + dm_rx_path_sel_table.rf_enable_rssi_th[i] = 100; disabled_rf_cnt--; } } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c index 5aac9110bff686..7b6247acf6f4a2 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c @@ -204,12 +204,11 @@ void rtl92e_leisure_ps_enter(struct net_device *dev) &priv->rtllib->pwr_save_ctrl; if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) && - (priv->rtllib->link_state == MAC80211_LINKED))) + (priv->rtllib->link_state == MAC80211_LINKED))) return; if (psc->bLeisurePs) { if (psc->lps_idle_count >= RT_CHECK_FOR_HANG_PERIOD) { - if (priv->rtllib->ps == RTLLIB_PS_DISABLED) _rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST); } else { diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index c21a0560410a88..fe3a42a4fcd5a9 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -288,11 +288,11 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, if (priv->rtllib->rf_power_state != rf_off) { priv->rtllib->actscanning = true; - ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP); + ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP); rtllib_start_scan_syncro(priv->rtllib); - ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE); + ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE); } ret = 0; } else { @@ -526,7 +526,8 @@ static int _rtl92e_wx_set_enc(struct net_device *dev, mutex_unlock(&priv->wx_mutex); if (wrqu->encoding.flags & IW_ENCODE_DISABLED) { - ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA; + ieee->pairwise_key_type = KEY_TYPE_NA; + ieee->group_key_type = KEY_TYPE_NA; rtl92e_cam_reset(dev); memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); @@ -675,9 +676,9 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev, u8 idx = 0, alg = 0, group = 0; if ((encoding->flags & IW_ENCODE_DISABLED) || - ext->alg == IW_ENCODE_ALG_NONE) { - ieee->pairwise_key_type = ieee->group_key_type - = KEY_TYPE_NA; + ext->alg == IW_ENCODE_ALG_NONE) { + ieee->pairwise_key_type = KEY_TYPE_NA; + ieee->group_key_type = KEY_TYPE_NA; rtl92e_cam_reset(dev); memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); @@ -710,7 +711,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev, rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key); } else { if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && - ieee->ht_info->current_ht_support) + ieee->ht_info->current_ht_support) rtl92e_writeb(dev, 0x173, 1); rtl92e_set_key(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, 0, key); diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c index 834329886ea2ef..c400d4f8ff9a24 100644 --- a/drivers/staging/rtl8192e/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c @@ -5,7 +5,7 @@ * Contact Information: wlanfae */ #include -#include +#include #include #include "rtllib.h" #include "rtl819x_BA.h" diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c index e38cd0c9c013d9..9c9c0bc0cfde31 100644 --- a/drivers/staging/rtl8192e/rtl819x_HTProc.c +++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c @@ -188,7 +188,7 @@ static void ht_iot_peer_determine(struct rtllib_device *ieee) } static u8 ht_iot_act_is_mgnt_use_cck_6m(struct rtllib_device *ieee, - struct rtllib_network *network) + struct rtllib_network *network) { u8 retValue = 0; @@ -559,7 +559,7 @@ void ht_initialize_bss_desc(struct bss_ht *bss_ht) } void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee, - struct rtllib_network *pNetwork) + struct rtllib_network *network) { struct rt_hi_throughput *ht_info = ieee->ht_info; u8 bIOTAction = 0; @@ -567,32 +567,32 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee, /* unmark enable_ht flag here is the same reason why unmarked in * function rtllib_softmac_new_net. WB 2008.09.10 */ - if (pNetwork->bssht.bd_support_ht) { + if (network->bssht.bd_support_ht) { ht_info->current_ht_support = true; - ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver; + ht_info->peer_ht_spec_ver = network->bssht.bd_ht_spec_ver; - if (pNetwork->bssht.bd_ht_cap_len > 0 && - pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf)) + if (network->bssht.bd_ht_cap_len > 0 && + network->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf)) memcpy(ht_info->peer_ht_cap_buf, - pNetwork->bssht.bd_ht_cap_buf, - pNetwork->bssht.bd_ht_cap_len); + network->bssht.bd_ht_cap_buf, + network->bssht.bd_ht_cap_len); - if (pNetwork->bssht.bd_ht_info_len > 0 && - pNetwork->bssht.bd_ht_info_len <= + if (network->bssht.bd_ht_info_len > 0 && + network->bssht.bd_ht_info_len <= sizeof(ht_info->peer_ht_info_buf)) memcpy(ht_info->peer_ht_info_buf, - pNetwork->bssht.bd_ht_info_buf, - pNetwork->bssht.bd_ht_info_len); + network->bssht.bd_ht_info_buf, + network->bssht.bd_ht_info_len); ht_info->current_rt2rt_aggregation = - pNetwork->bssht.bd_rt2rt_aggregation; + network->bssht.bd_rt2rt_aggregation; ht_info->current_rt2rt_long_slot_time = - pNetwork->bssht.bd_rt2rt_long_slot_time; + network->bssht.bd_rt2rt_long_slot_time; ht_iot_peer_determine(ieee); ht_info->iot_action = 0; - bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, pNetwork); + bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, network); if (bIOTAction) ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M; bIOTAction = ht_iot_act_is_ccd_fsync(ieee); @@ -609,23 +609,23 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee, } void HT_update_self_and_peer_setting(struct rtllib_device *ieee, - struct rtllib_network *pNetwork) + struct rtllib_network *network) { struct rt_hi_throughput *ht_info = ieee->ht_info; struct ht_info_ele *pPeerHTInfo = - (struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf; + (struct ht_info_ele *)network->bssht.bd_ht_info_buf; if (ht_info->current_ht_support) { - if (pNetwork->bssht.bd_ht_info_len != 0) + if (network->bssht.bd_ht_info_len != 0) ht_info->current_op_mode = pPeerHTInfo->opt_mode; } } EXPORT_SYMBOL(HT_update_self_and_peer_setting); -u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame) +u8 ht_c_check(struct rtllib_device *ieee, u8 *frame) { if (ieee->ht_info->current_ht_support) { - if ((is_qos_data_frame(pFrame) && frame_order(pFrame)) == 1) { + if ((is_qos_data_frame(frame) && frame_order(frame)) == 1) { netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n"); return true; } diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c index ed6a488bc7acee..89092cd434de1f 100644 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c @@ -14,7 +14,7 @@ static void RxPktPendingTimeout(struct timer_list *t) struct rtllib_device *ieee = container_of(ts, struct rtllib_device, rx_ts_records[ts->num]); - struct rx_reorder_entry *pReorderEntry = NULL; + struct rx_reorder_entry *reorder_entry = NULL; unsigned long flags = 0; u8 index = 0; @@ -23,31 +23,31 @@ static void RxPktPendingTimeout(struct timer_list *t) spin_lock_irqsave(&(ieee->reorder_spinlock), flags); if (ts->rx_timeout_indicate_seq != 0xffff) { while (!list_empty(&ts->rx_pending_pkt_list)) { - pReorderEntry = (struct rx_reorder_entry *) + reorder_entry = (struct rx_reorder_entry *) list_entry(ts->rx_pending_pkt_list.prev, struct rx_reorder_entry, list); if (index == 0) - ts->rx_indicate_seq = pReorderEntry->SeqNum; + ts->rx_indicate_seq = reorder_entry->seq_num; - if (SN_LESS(pReorderEntry->SeqNum, + if (SN_LESS(reorder_entry->seq_num, ts->rx_indicate_seq) || - SN_EQUAL(pReorderEntry->SeqNum, + SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) { - list_del_init(&pReorderEntry->list); + list_del_init(&reorder_entry->list); - if (SN_EQUAL(pReorderEntry->SeqNum, + if (SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096; netdev_dbg(ieee->dev, - "%s(): Indicate SeqNum: %d\n", - __func__, pReorderEntry->SeqNum); + "%s(): Indicate seq_num: %d\n", + __func__, reorder_entry->seq_num); ieee->stats_IndicateArray[index] = - pReorderEntry->prxb; + reorder_entry->prxb; index++; - list_add_tail(&pReorderEntry->list, + list_add_tail(&reorder_entry->list, &ieee->RxReorder_Unused_List); } else { pkt_in_buf = true; @@ -225,7 +225,7 @@ static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr, } bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, - u8 *addr, u8 TID, enum tr_select tx_rx_select, bool bAddNewTs) + u8 *addr, u8 TID, enum tr_select tx_rx_select, bool add_new_ts) { u8 UP = 0; struct qos_tsinfo tspec; @@ -269,7 +269,7 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, if (*ppTS) return true; - if (!bAddNewTs) { + if (!add_new_ts) { netdev_dbg(ieee->dev, "add new TS failed(tid:%d)\n", UP); return false; } @@ -336,8 +336,8 @@ static void RemoveTsEntry(struct rtllib_device *ieee, pRxReorderEntry = (struct rx_reorder_entry *) list_entry(ts->rx_pending_pkt_list.prev, struct rx_reorder_entry, list); - netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n", - __func__, pRxReorderEntry->SeqNum); + netdev_dbg(ieee->dev, "%s(): Delete seq_num %d!\n", + __func__, pRxReorderEntry->seq_num); list_del_init(&pRxReorderEntry->list); { int i = 0; diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 022851b7f1a9ca..d6615f787d5302 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -93,21 +93,21 @@ static inline void *netdev_priv_rsl(struct net_device *dev) #define SUPPORT_CKIP_PK 0x10 #define RT_RF_OFF_LEVL_HALT_NIC BIT(3) #define RT_IN_PS_LEVEL(psc, _PS_FLAG) \ - ((psc->CurPsLevel & _PS_FLAG) ? true : false) + ((psc->cur_ps_level & _PS_FLAG) ? true : false) #define RT_CLEAR_PS_LEVEL(psc, _PS_FLAG) \ - (psc->CurPsLevel &= (~(_PS_FLAG))) + (psc->cur_ps_level &= (~(_PS_FLAG))) /* defined for skb cb field */ /* At most 28 byte */ struct cb_desc { /* Tx Desc Related flags (8-9) */ - u8 bLastIniPkt:1; - u8 bCmdOrInit:1; + u8 last_ini_pkt:1; + u8 cmd_or_init:1; u8 tx_dis_rate_fallback:1; u8 tx_use_drv_assinged_rate:1; u8 hw_sec:1; - u8 nStuckCount; + u8 stuck_count; /* Tx Firmware Related flags (10-11)*/ u8 cts_enable:1; @@ -153,20 +153,20 @@ struct cb_desc { }; enum sw_chnl_cmd_id { - CmdID_End, - CmdID_SetTxPowerLevel, - CmdID_BBRegWrite10, - CmdID_WritePortUlong, - CmdID_WritePortUshort, - CmdID_WritePortUchar, - CmdID_RF_WriteReg, + cmd_id_end, + cmd_id_set_tx_power_level, + cmd_id_bbreg_write10, + cmd_id_write_port_ulong, + cmd_id_write_port_ushort, + cmd_id_write_port_uchar, + cmd_id_rf_write_reg, }; struct sw_chnl_cmd { - enum sw_chnl_cmd_id CmdID; - u32 Para1; - u32 Para2; - u32 msDelay; + enum sw_chnl_cmd_id cmd_id; + u32 para1; + u32 para2; + u32 ms_delay; }; /*--------------------------Define -------------------------------------------*/ @@ -339,12 +339,12 @@ enum rt_op_mode { #define FC_QOS_BIT BIT(7) #define is_data_frame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false) -#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0]&FC_QOS_BIT))) +#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0] & FC_QOS_BIT))) #define is_qos_data_frame(pframe) \ - ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \ - (IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) -#define frame_order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER) -#define SN_LESS(a, b) (((a-b)&0x800) != 0) + ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA)) == \ + (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA)) +#define frame_order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER) +#define SN_LESS(a, b) (((a - b) & 0x800) != 0) #define SN_EQUAL(a, b) (a == b) #define MAX_DEV_ADDR_SIZE 8 @@ -414,24 +414,13 @@ enum _REG_PREAMBLE_MODE { #define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTLLIB_SCTL_FRAG) #define WLAN_GET_SEQ_SEQ(seq) (((seq) & RTLLIB_SCTL_SEQ) >> 4) -/* Authentication algorithms */ -#define WLAN_AUTH_OPEN 0 -#define WLAN_AUTH_SHARED_KEY 1 -#define WLAN_AUTH_LEAP 128 - -#define WLAN_CAPABILITY_ESS (1<<0) -#define WLAN_CAPABILITY_IBSS (1<<1) -#define WLAN_CAPABILITY_PRIVACY (1<<4) -#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) -#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) - -#define RTLLIB_STATMASK_SIGNAL (1<<0) -#define RTLLIB_STATMASK_RSSI (1<<1) -#define RTLLIB_STATMASK_NOISE (1<<2) +#define RTLLIB_STATMASK_SIGNAL (1 << 0) +#define RTLLIB_STATMASK_RSSI (1 << 1) +#define RTLLIB_STATMASK_NOISE (1 << 2) #define RTLLIB_STATMASK_WEMASK 0x7 -#define RTLLIB_CCK_MODULATION (1<<0) -#define RTLLIB_OFDM_MODULATION (1<<1) +#define RTLLIB_CCK_MODULATION (1 << 0) +#define RTLLIB_OFDM_MODULATION (1 << 1) #define RTLLIB_CCK_RATE_LEN 4 #define RTLLIB_CCK_RATE_1MB 0x02 @@ -475,27 +464,27 @@ struct rtllib_rx_stats { u8 mask; u16 len; u16 Length; - u8 SignalQuality; + u8 signal_quality; s32 RecvSignalPower; - u8 SignalStrength; - u16 bHwError:1; + u8 signal_strength; + u16 hw_error:1; u16 bCRC:1; u16 bICV:1; u16 decrypted:1; u32 time_stamp_low; u32 time_stamp_high; - u8 RxDrvInfoSize; - u8 RxBufShift; + u8 rx_drv_info_size; + u8 rx_buf_shift; bool bIsAMPDU; bool bFirstMPDU; bool contain_htc; u32 RxPWDBAll; - u8 RxMIMOSignalStrength[4]; + u8 RxMIMOSignalStrength[2]; s8 RxMIMOSignalQuality[2]; bool bPacketMatchBSSID; bool bIsCCK; - bool bPacketToSelf; + bool packet_to_self; bool bPacketBeacon; bool bToSelfBA; }; @@ -518,11 +507,11 @@ struct rtllib_frag_entry { struct rtllib_device; -#define SEC_ACTIVE_KEY (1<<4) -#define SEC_AUTH_MODE (1<<5) -#define SEC_UNICAST_GROUP (1<<6) -#define SEC_LEVEL (1<<7) -#define SEC_ENABLED (1<<8) +#define SEC_ACTIVE_KEY (1 << 4) +#define SEC_AUTH_MODE (1 << 5) +#define SEC_UNICAST_GROUP (1 << 6) +#define SEC_LEVEL (1 << 7) +#define SEC_ENABLED (1 << 8) #define SEC_LEVEL_0 0 /* None */ #define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ @@ -707,17 +696,17 @@ union frameqos { #define MAX_WPA_IE_LEN 64 #define MAX_WZC_IE_LEN 256 -#define NETWORK_EMPTY_ESSID (1<<0) -#define NETWORK_HAS_OFDM (1<<1) -#define NETWORK_HAS_CCK (1<<2) +#define NETWORK_EMPTY_ESSID (1 << 0) +#define NETWORK_HAS_OFDM (1 << 1) +#define NETWORK_HAS_CCK (1 << 2) /* QoS structure */ -#define NETWORK_HAS_QOS_PARAMETERS (1<<3) -#define NETWORK_HAS_QOS_INFORMATION (1<<4) +#define NETWORK_HAS_QOS_PARAMETERS (1 << 3) +#define NETWORK_HAS_QOS_INFORMATION (1 << 4) #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ NETWORK_HAS_QOS_INFORMATION) /* 802.11h */ -#define NETWORK_HAS_ERP_VALUE (1<<10) +#define NETWORK_HAS_ERP_VALUE (1 << 10) #define QOS_QUEUE_NUM 4 #define QOS_OUI_LEN 3 @@ -962,7 +951,7 @@ struct rtllib_network { bool unknown_cap_exist; bool berp_info_valid; bool buseprotection; - u8 SignalStrength; + u8 signal_strength; u8 RSSI; struct list_head list; }; @@ -1007,8 +996,8 @@ enum rtl_link_state { #define DEFAULT_MAX_SCAN_AGE (15 * HZ) #define DEFAULT_FTS 2346 -#define CFG_RTLLIB_RESERVE_FCS (1<<0) -#define CFG_RTLLIB_COMPUTE_FCS (1<<1) +#define CFG_RTLLIB_RESERVE_FCS (1 << 0) +#define CFG_RTLLIB_COMPUTE_FCS (1 << 1) struct tx_pending { int frag; @@ -1026,7 +1015,7 @@ struct bandwidth_autoswitch { #define REORDER_ENTRY_NUM 128 struct rx_reorder_entry { struct list_head list; - u16 SeqNum; + u16 seq_num; struct rtllib_rxb *prxb; }; @@ -1057,7 +1046,7 @@ struct rt_pwr_save_ctrl { u8 lps_idle_count; u8 lps_awake_intvl; - u32 CurPsLevel; + u32 cur_ps_level; }; #define RT_RF_CHANGE_SOURCE u32 @@ -1299,7 +1288,7 @@ struct rtllib_device { u16 scan_watch_dog; /* map of allowed channels. 0 is dummy */ - u8 active_channel_map[MAX_CHANNEL_NUMBER+1]; + u8 active_channel_map[MAX_CHANNEL_NUMBER + 1]; int rate; /* current rate */ int basic_rate; @@ -1471,9 +1460,9 @@ struct rtllib_device { void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode); bool (*get_half_nmode_support_by_aps_handler)(struct net_device *dev); u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee); - void (*init_gain_handler)(struct net_device *dev, u8 Operation); - void (*ScanOperationBackupHandler)(struct net_device *dev, - u8 Operation); + void (*init_gain_handler)(struct net_device *dev, u8 operation); + void (*scan_operation_backup_handler)(struct net_device *dev, + u8 operation); void (*set_hw_reg_handler)(struct net_device *dev, u8 variable, u8 *val); void (*allow_all_dest_addr_handler)(struct net_device *dev, @@ -1497,32 +1486,32 @@ struct rtllib_device { /* Uses the channel change callback directly * instead of [start/stop] scan callbacks */ -#define IEEE_SOFTMAC_SCAN (1<<2) +#define IEEE_SOFTMAC_SCAN (1 << 2) /* Perform authentication and association handshake */ -#define IEEE_SOFTMAC_ASSOCIATE (1<<3) +#define IEEE_SOFTMAC_ASSOCIATE (1 << 3) /* Generate probe requests */ -#define IEEE_SOFTMAC_PROBERQ (1<<4) +#define IEEE_SOFTMAC_PROBERQ (1 << 4) /* Generate response to probe requests */ -#define IEEE_SOFTMAC_PROBERS (1<<5) +#define IEEE_SOFTMAC_PROBERS (1 << 5) /* The ieee802.11 stack will manage the netif queue * wake/stop for the driver, taking care of 802.11 * fragmentation. See softmac.c for details. */ -#define IEEE_SOFTMAC_TX_QUEUE (1<<7) +#define IEEE_SOFTMAC_TX_QUEUE (1 << 7) /* Uses only the softmac_data_hard_start_xmit * even for TX management frames. */ -#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8) +#define IEEE_SOFTMAC_SINGLE_QUEUE (1 << 8) /* Generate beacons. The stack will enqueue beacons * to the card */ -#define IEEE_SOFTMAC_BEACONS (1<<6) +#define IEEE_SOFTMAC_BEACONS (1 << 6) static inline void *rtllib_priv(struct net_device *dev) { @@ -1737,21 +1726,21 @@ void ht_set_connect_bw_mode(struct rtllib_device *ieee, void ht_update_default_setting(struct rtllib_device *ieee); void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap, u8 *len, - u8 isEncrypt, bool bAssoc); + u8 is_encrypt, bool assoc); void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee, u8 *posRT2RTAgg, u8 *len); void ht_on_assoc_rsp(struct rtllib_device *ieee); void ht_initialize_ht_info(struct rtllib_device *ieee); void ht_initialize_bss_desc(struct bss_ht *bss_ht); void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee, - struct rtllib_network *pNetwork); + struct rtllib_network *network); void HT_update_self_and_peer_setting(struct rtllib_device *ieee, - struct rtllib_network *pNetwork); + struct rtllib_network *network); u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter); extern u8 MCS_FILTER_ALL[]; extern u16 MCS_DATA_RATE[2][2][77]; -u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame); +u8 ht_c_check(struct rtllib_device *ieee, u8 *frame); void ht_reset_iot_setting(struct rt_hi_throughput *ht_info); bool is_ht_half_nmode_aps(struct rtllib_device *ieee); u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate); @@ -1768,7 +1757,7 @@ void rtllib_tx_ba_inact_timeout(struct timer_list *t); void rtllib_rx_ba_inact_timeout(struct timer_list *t); void rtllib_reset_ba_entry(struct ba_record *ba); bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr, - u8 TID, enum tr_select tx_rx_select, bool bAddNewTs); + u8 TID, enum tr_select tx_rx_select, bool add_new_ts); void rtllib_ts_init(struct rtllib_device *ieee); void rtllib_ts_start_add_ba_process(struct rtllib_device *ieee, struct tx_ts_record *pTxTS); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c index 639877069fad25..138733cb00e272 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c @@ -378,7 +378,7 @@ static void rtllib_ccmp_print_stats(struct seq_file *m, void *priv) ccmp->dot11rsna_stats_ccmp_decrypt_errors); } -static struct lib80211_crypto_ops rtllib_crypt_ccmp = { +static const struct lib80211_crypto_ops rtllib_crypt_ccmp = { .name = "R-CCMP", .init = rtllib_ccmp_init, .deinit = rtllib_ccmp_deinit, diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c index dc0917b0351174..e544379bfa91aa 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c @@ -637,12 +637,6 @@ static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv) if (seq) { /* Return the sequence number of the last transmitted frame. */ - u16 iv16 = tkey->tx_iv16; - u32 iv32 = tkey->tx_iv32; - - if (iv16 == 0) - iv32--; - iv16--; seq[0] = tkey->tx_iv16; seq[1] = tkey->tx_iv16 >> 8; seq[2] = tkey->tx_iv32; @@ -678,7 +672,7 @@ static void rtllib_tkip_print_stats(struct seq_file *m, void *priv) tkip->dot11RSNAStatsTKIPLocalMICFailures); } -static struct lib80211_crypto_ops rtllib_crypt_tkip = { +static const struct lib80211_crypto_ops rtllib_crypt_tkip = { .name = "R-TKIP", .init = rtllib_tkip_init, .deinit = rtllib_tkip_deinit, diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c index 10092f6884ff18..aa18c060d72706 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c @@ -209,7 +209,7 @@ static void prism2_wep_print_stats(struct seq_file *m, void *priv) seq_printf(m, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); } -static struct lib80211_crypto_ops rtllib_crypt_wep = { +static const struct lib80211_crypto_ops rtllib_crypt_wep = { .name = "R-WEP", .init = prism2_wep_init, .deinit = prism2_wep_deinit, diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 84ca5d769b7e47..8fe224a83dd691 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -403,26 +403,26 @@ static int is_duplicate_packet(struct rtllib_device *ieee, } static bool add_reorder_entry(struct rx_ts_record *ts, - struct rx_reorder_entry *pReorderEntry) + struct rx_reorder_entry *reorder_entry) { struct list_head *list = &ts->rx_pending_pkt_list; while (list->next != &ts->rx_pending_pkt_list) { - if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *) + if (SN_LESS(reorder_entry->seq_num, ((struct rx_reorder_entry *) list_entry(list->next, struct rx_reorder_entry, - list))->SeqNum)) + list))->seq_num)) list = list->next; - else if (SN_EQUAL(pReorderEntry->SeqNum, + else if (SN_EQUAL(reorder_entry->seq_num, ((struct rx_reorder_entry *)list_entry(list->next, - struct rx_reorder_entry, list))->SeqNum)) + struct rx_reorder_entry, list))->seq_num)) return false; else break; } - pReorderEntry->list.next = list->next; - pReorderEntry->list.next->prev = &pReorderEntry->list; - pReorderEntry->list.prev = list; - list->next = &pReorderEntry->list; + reorder_entry->list.next = list->next; + reorder_entry->list.next->prev = &reorder_entry->list; + reorder_entry->list.prev = list; + list->next = &reorder_entry->list; return true; } @@ -504,8 +504,8 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee, pRxReorderEntry = (struct rx_reorder_entry *) list_entry(ts->rx_pending_pkt_list.prev, struct rx_reorder_entry, list); - netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__, - pRxReorderEntry->SeqNum); + netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n", __func__, + pRxReorderEntry->seq_num); list_del_init(&pRxReorderEntry->list); ieee->rfd_array[rfd_cnt] = pRxReorderEntry->prxb; @@ -521,10 +521,10 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee, static void rx_reorder_indicate_packet(struct rtllib_device *ieee, struct rtllib_rxb *prxb, - struct rx_ts_record *ts, u16 SeqNum) + struct rx_ts_record *ts, u16 seq_num) { struct rt_hi_throughput *ht_info = ieee->ht_info; - struct rx_reorder_entry *pReorderEntry = NULL; + struct rx_reorder_entry *reorder_entry = NULL; u8 win_size = ht_info->rx_reorder_win_size; u16 win_end = 0; u8 index = 0; @@ -533,20 +533,20 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee, netdev_dbg(ieee->dev, "%s(): Seq is %d, ts->rx_indicate_seq is %d, win_size is %d\n", - __func__, SeqNum, ts->rx_indicate_seq, win_size); + __func__, seq_num, ts->rx_indicate_seq, win_size); spin_lock_irqsave(&(ieee->reorder_spinlock), flags); win_end = (ts->rx_indicate_seq + win_size - 1) % 4096; /* Rx Reorder initialize condition.*/ if (ts->rx_indicate_seq == 0xffff) - ts->rx_indicate_seq = SeqNum; + ts->rx_indicate_seq = seq_num; - /* Drop out the packet which SeqNum is smaller than WinStart */ - if (SN_LESS(SeqNum, ts->rx_indicate_seq)) { + /* Drop out the packet which seq_num is smaller than WinStart */ + if (SN_LESS(seq_num, ts->rx_indicate_seq)) { netdev_dbg(ieee->dev, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n", - ts->rx_indicate_seq, SeqNum); + ts->rx_indicate_seq, seq_num); ht_info->rx_reorder_drop_counter++; { int i; @@ -561,62 +561,62 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee, } /* Sliding window manipulation. Conditions includes: - * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 - * 2. Incoming SeqNum is larger than the win_end => Window shift N + * 1. Incoming seq_num is equal to WinStart =>Window shift 1 + * 2. Incoming seq_num is larger than the win_end => Window shift N */ - if (SN_EQUAL(SeqNum, ts->rx_indicate_seq)) { + if (SN_EQUAL(seq_num, ts->rx_indicate_seq)) { ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096; match_win_start = true; - } else if (SN_LESS(win_end, SeqNum)) { - if (SeqNum >= (win_size - 1)) - ts->rx_indicate_seq = SeqNum + 1 - win_size; + } else if (SN_LESS(win_end, seq_num)) { + if (seq_num >= (win_size - 1)) + ts->rx_indicate_seq = seq_num + 1 - win_size; else ts->rx_indicate_seq = 4095 - - (win_size - (SeqNum + 1)) + 1; + (win_size - (seq_num + 1)) + 1; netdev_dbg(ieee->dev, "Window Shift! IndicateSeq: %d, NewSeq: %d\n", - ts->rx_indicate_seq, SeqNum); + ts->rx_indicate_seq, seq_num); } /* Indication process. * After Packet dropping and Sliding Window shifting as above, we can - * now just indicate the packets with the SeqNum smaller than latest + * now just indicate the packets with the seq_num smaller than latest * WinStart and struct buffer other packets. * * For Rx Reorder condition: - * 1. All packets with SeqNum smaller than WinStart => Indicate - * 2. All packets with SeqNum larger than or equal to + * 1. All packets with seq_num smaller than WinStart => Indicate + * 2. All packets with seq_num larger than or equal to * WinStart => Buffer it. */ if (match_win_start) { /* Current packet is going to be indicated.*/ netdev_dbg(ieee->dev, "Packets indication! IndicateSeq: %d, NewSeq: %d\n", - ts->rx_indicate_seq, SeqNum); + ts->rx_indicate_seq, seq_num); ieee->prxb_indicate_array[0] = prxb; index = 1; } else { /* Current packet is going to be inserted into pending list.*/ if (!list_empty(&ieee->RxReorder_Unused_List)) { - pReorderEntry = (struct rx_reorder_entry *) + reorder_entry = (struct rx_reorder_entry *) list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, list); - list_del_init(&pReorderEntry->list); + list_del_init(&reorder_entry->list); /* Make a reorder entry and insert * into a the packet list. */ - pReorderEntry->SeqNum = SeqNum; - pReorderEntry->prxb = prxb; + reorder_entry->seq_num = seq_num; + reorder_entry->prxb = prxb; - if (!add_reorder_entry(ts, pReorderEntry)) { + if (!add_reorder_entry(ts, reorder_entry)) { int i; netdev_dbg(ieee->dev, "%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n", __func__, ts->rx_indicate_seq, - SeqNum); - list_add_tail(&pReorderEntry->list, + seq_num); + list_add_tail(&reorder_entry->list, &ieee->RxReorder_Unused_List); for (i = 0; i < prxb->nr_subframes; i++) @@ -626,7 +626,7 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee, } else { netdev_dbg(ieee->dev, "Pkt insert into struct buffer. IndicateSeq: %d, NewSeq: %d\n", - ts->rx_indicate_seq, SeqNum); + ts->rx_indicate_seq, seq_num); } } else { /* Packets are dropped if there are not enough reorder @@ -653,12 +653,12 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee, netdev_dbg(ieee->dev, "%s(): start RREORDER indicate\n", __func__); - pReorderEntry = (struct rx_reorder_entry *) + reorder_entry = (struct rx_reorder_entry *) list_entry(ts->rx_pending_pkt_list.prev, struct rx_reorder_entry, list); - if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) || - SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) { + if (SN_LESS(reorder_entry->seq_num, ts->rx_indicate_seq) || + SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) { /* This protect struct buffer from overflow. */ if (index >= REORDER_WIN_SIZE) { netdev_err(ieee->dev, @@ -668,18 +668,18 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee, break; } - list_del_init(&pReorderEntry->list); + list_del_init(&reorder_entry->list); - if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) + if (SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096; - ieee->prxb_indicate_array[index] = pReorderEntry->prxb; - netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", - __func__, pReorderEntry->SeqNum); + ieee->prxb_indicate_array[index] = reorder_entry->prxb; + netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n", + __func__, reorder_entry->seq_num); index++; - list_add_tail(&pReorderEntry->list, + list_add_tail(&reorder_entry->list, &ieee->RxReorder_Unused_List); } else { pkt_in_buf = true; @@ -729,12 +729,12 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, u16 llc_offset = sizeof(struct ieee80211_hdr_3addr); bool is_aggregate_frame = false; - u16 nSubframe_Length; + u16 subframe_len; u8 pad_len = 0; - u16 SeqNum = 0; + u16 seq_num = 0; struct sk_buff *sub_skb; /* just for debug purpose */ - SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl)); + seq_num = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl)); if ((RTLLIB_QOS_HAS_SEQ(fc)) && (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved)) is_aggregate_frame = true; @@ -781,23 +781,23 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, memcpy(rxb->dst, dst, ETH_ALEN); while (skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ - nSubframe_Length = *((u16 *)(skb->data + 12)); - nSubframe_Length = (nSubframe_Length >> 8) + - (nSubframe_Length << 8); + subframe_len = *((u16 *)(skb->data + 12)); + subframe_len = (subframe_len >> 8) + + (subframe_len << 8); - if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) { + if (skb->len < (ETHERNET_HEADER_SIZE + subframe_len)) { netdev_info(ieee->dev, "%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n", __func__, rxb->nr_subframes); netdev_info(ieee->dev, "%s: A-MSDU parse error!! Subframe Length: %d\n", - __func__, nSubframe_Length); + __func__, subframe_len); netdev_info(ieee->dev, - "nRemain_Length is %d and nSubframe_Length is : %d\n", - skb->len, nSubframe_Length); + "nRemain_Length is %d and subframe_len is : %d\n", + skb->len, subframe_len); netdev_info(ieee->dev, - "The Packet SeqNum is %d\n", - SeqNum); + "The Packet seq_num is %d\n", + seq_num); return 0; } @@ -813,11 +813,11 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, */ /* Allocate new skb for releasing to upper layer */ - sub_skb = dev_alloc_skb(nSubframe_Length + 12); + sub_skb = dev_alloc_skb(subframe_len + 12); if (!sub_skb) return 0; skb_reserve(sub_skb, 12); - skb_put_data(sub_skb, skb->data, nSubframe_Length); + skb_put_data(sub_skb, skb->data, subframe_len); sub_skb->dev = ieee->dev; rxb->subframes[rxb->nr_subframes++] = sub_skb; @@ -826,10 +826,10 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, "ParseSubframe(): Too many Subframes! Packets dropped!\n"); break; } - skb_pull(skb, nSubframe_Length); + skb_pull(skb, subframe_len); if (skb->len != 0) { - pad_len = 4 - ((nSubframe_Length + + pad_len = 4 - ((subframe_len + ETHERNET_HEADER_SIZE) % 4); if (pad_len == 4) pad_len = 0; @@ -1227,7 +1227,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb struct lib80211_crypt_data *crypt = NULL; struct rtllib_rxb *rxb = NULL; struct rx_ts_record *ts = NULL; - u16 fc, sc, SeqNum = 0; + u16 fc, sc, seq_num = 0; u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; @@ -1321,7 +1321,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb if (ieee->current_network.qos_data.active && is_qos_data_frame(skb->data) && !is_multicast_ether_addr(hdr->addr1)) { TID = frame_qos_tid(skb->data); - SeqNum = WLAN_GET_SEQ_SEQ(sc); + seq_num = WLAN_GET_SEQ_SEQ(sc); rtllib_get_ts(ieee, (struct ts_common_info **)&ts, hdr->addr2, TID, RX_DIR, true); if (TID != 0 && TID != 3) @@ -1362,7 +1362,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb if (!ieee->ht_info->cur_rx_reorder_enable || !ts) rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src); else - rx_reorder_indicate_packet(ieee, rxb, ts, SeqNum); + rx_reorder_indicate_packet(ieee, rxb, ts, seq_num); dev_kfree_skb(skb); @@ -2177,8 +2177,8 @@ static inline int rtllib_network_init( network->marvell_cap_exist = false; network->airgo_cap_exist = false; network->turbo_enable = 0; - network->SignalStrength = stats->SignalStrength; - network->RSSI = stats->SignalStrength; + network->signal_strength = stats->signal_strength; + network->RSSI = stats->signal_strength; network->country_ie_len = 0; memset(network->country_ie_buf, 0, MAX_IE_LEN); ht_initialize_bss_desc(&network->bssht); @@ -2215,7 +2215,7 @@ static inline int rtllib_network_init( } if (rtllib_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; - stats->signal = 30 + (stats->SignalStrength * 70) / 100; + stats->signal = 30 + (stats->signal_strength * 70) / 100; stats->noise = rtllib_translate_todbm((u8)(100 - stats->signal)) - 25; memcpy(&network->stats, stats, sizeof(network->stats)); @@ -2334,7 +2334,7 @@ static inline void update_network(struct rtllib_device *ieee, src->wmm_param[3].ac_aci_acm_aifsn) memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); - dst->SignalStrength = src->SignalStrength; + dst->signal_strength = src->signal_strength; dst->RSSI = src->RSSI; dst->turbo_enable = src->turbo_enable; diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index 11542aea4a209c..c59686d68a33b9 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -314,7 +314,7 @@ void rtllib_wx_sync_scan_wq(void *data) /* wait for ps packet to be kicked out successfully */ msleep(50); - ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP); + ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP); if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht && ieee->ht_info->cur_bw_40mhz) { @@ -339,7 +339,7 @@ void rtllib_wx_sync_scan_wq(void *data) ieee->set_chan(ieee->dev, chan); } - ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE); + ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE); ieee->link_state = MAC80211_LINKED; ieee->link_change(ieee->dev); diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c index fbd4ec824084dc..c730d921463dd4 100644 --- a/drivers/staging/rtl8192e/rtllib_wx.c +++ b/drivers/staging/rtl8192e/rtllib_wx.c @@ -474,7 +474,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee, int i, idx; int group_key = 0; const char *alg, *module; - struct lib80211_crypto_ops *ops; + const struct lib80211_crypto_ops *ops; struct lib80211_crypt_data **crypt; struct rtllib_security sec = { diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c index 1fabc5137a4cb0..ab344d676bb946 100644 --- a/drivers/staging/rtl8712/rtl8712_recv.c +++ b/drivers/staging/rtl8712/rtl8712_recv.c @@ -136,10 +136,6 @@ void r8712_free_recvframe(union recv_frame *precvframe, static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib, struct recv_stat *prxstat) { - u16 drvinfo_sz; - - drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16; - drvinfo_sz <<= 3; /*TODO: * Offset 0 */ diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index bbd4a13c7bb9d1..ffeb91dd28c438 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -528,8 +528,9 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key) if (unicast_key) memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16); else - memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1]. - skey, 16); + memcpy(&psetstakey_para->key, + &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].skey, + 16); r8712_enqueue_cmd(pcmdpriv, ph2c); } diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h index 2613b3c2acfcbe..268844af57f006 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.h +++ b/drivers/staging/rtl8712/rtl871x_cmd.h @@ -736,7 +736,7 @@ void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd); void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd); void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd); -void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt, +void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO); struct _cmd_callback { diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c index 6789a4c985649e..20e080e284dd43 100644 --- a/drivers/staging/rtl8712/rtl871x_io.c +++ b/drivers/staging/rtl8712/rtl871x_io.c @@ -48,8 +48,8 @@ static uint _init_intf_hdl(struct _adapter *padapter, set_intf_funs = &(r8712_usb_set_intf_funs); set_intf_ops = &r8712_usb_set_intf_ops; init_intf_priv = &r8712_usb_init_intf_priv; - pintf_priv = pintf_hdl->pintfpriv = kmalloc(sizeof(struct intf_priv), - GFP_ATOMIC); + pintf_priv = kmalloc(sizeof(*pintf_priv), GFP_ATOMIC); + pintf_hdl->pintfpriv = pintf_priv; if (!pintf_priv) goto _init_intf_hdl_fail; pintf_hdl->adapter = (u8 *)padapter; diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c index 0a3451cdc8a11e..4a34824830e39d 100644 --- a/drivers/staging/rtl8712/usb_ops_linux.c +++ b/drivers/staging/rtl8712/usb_ops_linux.c @@ -221,7 +221,7 @@ static void r8712_usb_read_port_complete(struct urb *purb) fallthrough; case -EPROTO: r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, - (unsigned char *)precvbuf); + (unsigned char *)precvbuf); break; case -EINPROGRESS: netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n"); diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index f23e29b679fb5e..8d48c61961a6b7 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -3,7 +3,6 @@ config RTL8723BS tristate "Realtek RTL8723BS SDIO Wireless LAN NIC driver" depends on WLAN && MMC && CFG80211 depends on m - select CFG80211_WEXT select CRYPTO select CRYPTO_LIB_ARC4 help diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile index 7f5067e89295e4..ba200ee669f3af 100644 --- a/drivers/staging/rtl8723bs/Makefile +++ b/drivers/staging/rtl8723bs/Makefile @@ -3,7 +3,6 @@ r8723bs-y = \ core/rtw_ap.o \ core/rtw_btcoex.o \ core/rtw_cmd.o \ - core/rtw_debug.o \ core/rtw_efuse.o \ core/rtw_io.o \ core/rtw_ioctl_set.o \ @@ -12,7 +11,6 @@ r8723bs-y = \ core/rtw_mlme_ext.o \ core/rtw_pwrctrl.o \ core/rtw_recv.o \ - core/rtw_rf.o \ core/rtw_security.o \ core/rtw_sta_mgt.o \ core/rtw_wlan_util.o \ diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index e4063713fecc02..a6dc88dd4ba158 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c @@ -6,8 +6,7 @@ ******************************************************************************/ #include -#include -#include +#include void init_mlme_ap_info(struct adapter *padapter) { @@ -277,7 +276,7 @@ void expire_timeout_chk(struct adapter *padapter) /* switch to correct channel of current network before issue keep-alive frames */ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) { backup_oper_channel = rtw_get_oper_ch(padapter); - SelectChannel(padapter, pmlmeext->cur_channel); + r8723bs_select_channel(padapter, pmlmeext->cur_channel); } /* issue null data to check sta alive*/ @@ -315,7 +314,7 @@ void expire_timeout_chk(struct adapter *padapter) } if (backup_oper_channel > 0) /* back to the original operation channel */ - SelectChannel(padapter, backup_oper_channel); + r8723bs_select_channel(padapter, backup_oper_channel); } associated_clients_update(padapter, updated); diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c index 62cbf84b079a4c..d54095f5011320 100644 --- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c +++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c index d3f10a3cf972a3..84ce7307d8f3df 100644 --- a/drivers/staging/rtl8723bs/core/rtw_cmd.c +++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include @@ -1884,9 +1883,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd) /* copy pdev_network information to pmlmepriv->cur_network */ memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork))); - /* reset ds_config */ - /* tgt_network->network.configuration.ds_config = (u32)rtw_ch2freq(pnetwork->configuration.ds_config); */ - _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c deleted file mode 100644 index 5354fdd11c9b58..00000000000000 --- a/drivers/staging/rtl8723bs/core/rtw_debug.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/****************************************************************************** - * - * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. - * - ******************************************************************************/ - -#include -#include -#include - -#include - -static void dump_4_regs(struct adapter *adapter, int offset) -{ - u32 reg[4]; - int i; - - for (i = 0; i < 4; i++) - reg[i] = rtw_read32(adapter, offset + i); - - netdev_dbg(adapter->pnetdev, "0x%03x 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, reg[0], reg[1], reg[2], reg[3]); -} - -void mac_reg_dump(struct adapter *adapter) -{ - int i; - - netdev_dbg(adapter->pnetdev, "======= MAC REG =======\n"); - - for (i = 0x0; i < 0x800; i += 4) - dump_4_regs(adapter, i); -} - -void bb_reg_dump(struct adapter *adapter) -{ - int i; - - netdev_dbg(adapter->pnetdev, "======= BB REG =======\n"); - - for (i = 0x800; i < 0x1000 ; i += 4) - dump_4_regs(adapter, i); -} - -static void dump_4_rf_regs(struct adapter *adapter, int path, int offset) -{ - u8 reg[4]; - int i; - - for (i = 0; i < 4; i++) - reg[i] = rtw_hal_read_rfreg(adapter, path, offset + i, - 0xffffffff); - - netdev_dbg(adapter->pnetdev, "0x%02x 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, reg[0], reg[1], reg[2], reg[3]); -} - -void rf_reg_dump(struct adapter *adapter) -{ - int i, path = 0; - - netdev_dbg(adapter->pnetdev, "======= RF REG =======\n"); - - netdev_dbg(adapter->pnetdev, "RF_Path(%x)\n", path); - for (i = 0; i < 0x100; i++) - dump_4_rf_regs(adapter, path, i); -} diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c index eb848f9bbf2c36..8b671f8a79659f 100644 --- a/drivers/staging/rtl8723bs/core/rtw_efuse.c +++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include @@ -38,7 +37,7 @@ Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value) if (fakeEfuseBank == 0) *Value = fakeEfuseContent[Offset]; else - *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset]; + *Value = fakeBTEfuseContent[fakeEfuseBank - 1][Offset]; return true; } @@ -50,7 +49,7 @@ Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value) if (fakeEfuseBank == 0) fakeEfuseContent[Offset] = Value; else - fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value; + fakeBTEfuseContent[fakeEfuseBank - 1][Offset] = Value; return true; } @@ -206,21 +205,21 @@ u16 Address) if (Address < contentLen) {/* E-fuse 512Byte */ /* Write E-fuse Register address bit0~7 */ temp = Address & 0xFF; - rtw_write8(Adapter, EFUSE_CTRL+1, temp); - Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2); + rtw_write8(Adapter, EFUSE_CTRL + 1, temp); + Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 2); /* Write E-fuse Register address bit8~9 */ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC); - rtw_write8(Adapter, EFUSE_CTRL+2, temp); + rtw_write8(Adapter, EFUSE_CTRL + 2, temp); /* Write 0x30[31]= 0 */ - Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3); + Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3); temp = Bytetemp & 0x7F; - rtw_write8(Adapter, EFUSE_CTRL+3, temp); + rtw_write8(Adapter, EFUSE_CTRL + 3, temp); /* Wait Write-ready (0x30[31]= 1) */ - Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3); + Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3); while (!(Bytetemp & 0x80)) { - Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3); + Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3); k++; if (k == 1000) break; @@ -253,16 +252,16 @@ bool bPseudoTest) /* -----------------e-fuse reg ctrl --------------------------------- */ /* address */ - rtw_write8(padapter, EFUSE_CTRL+1, (u8)(addr&0xff)); - rtw_write8(padapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) | - (rtw_read8(padapter, EFUSE_CTRL+2)&0xFC)); + rtw_write8(padapter, EFUSE_CTRL + 1, (u8)(addr & 0xff)); + rtw_write8(padapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) | + (rtw_read8(padapter, EFUSE_CTRL + 2) & 0xFC)); /* rtw_write8(padapter, EFUSE_CTRL+3, 0x72); read cmd */ /* Write bit 32 0 */ - readbyte = rtw_read8(padapter, EFUSE_CTRL+3); - rtw_write8(padapter, EFUSE_CTRL+3, (readbyte & 0x7f)); + readbyte = rtw_read8(padapter, EFUSE_CTRL + 3); + rtw_write8(padapter, EFUSE_CTRL + 3, (readbyte & 0x7f)); - while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 1000)) { + while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 1000)) { mdelay(1); tmpidx++; } @@ -282,31 +281,22 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT { u8 tmpidx = 0; u8 bResult = false; - u32 efuseValue; if (bPseudoTest) return Efuse_Write1ByteToFakeContent(addr, data); - /* -----------------e-fuse reg ctrl --------------------------------- */ /* address */ - - efuseValue = rtw_read32(padapter, EFUSE_CTRL); - efuseValue |= (BIT21|BIT31); - efuseValue &= ~(0x3FFFF); - efuseValue |= ((addr<<8 | data) & 0x3FFFF); - - /* <20130227, Kordan> 8192E MP chip A-cut had better not set 0x34[11] until B-Cut. */ /* <20130121, Kordan> For SMIC EFUSE specificatoin. */ /* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */ /* PHY_SetMacReg(padapter, 0x34, BIT11, 1); */ rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) | (BIT11)); - rtw_write32(padapter, EFUSE_CTRL, 0x90600000|((addr<<8 | data))); + rtw_write32(padapter, EFUSE_CTRL, 0x90600000 | ((addr << 8 | data))); - while ((0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 100)) { + while ((0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 100)) { mdelay(1); tmpidx++; } @@ -365,19 +355,19 @@ efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata) { - if (!(word_en&BIT(0))) { + if (!(word_en & BIT(0))) { targetdata[0] = sourdata[0]; targetdata[1] = sourdata[1]; } - if (!(word_en&BIT(1))) { + if (!(word_en & BIT(1))) { targetdata[2] = sourdata[2]; targetdata[3] = sourdata[3]; } - if (!(word_en&BIT(2))) { + if (!(word_en & BIT(2))) { targetdata[4] = sourdata[4]; targetdata[5] = sourdata[5]; } - if (!(word_en&BIT(3))) { + if (!(word_en & BIT(3))) { targetdata[6] = sourdata[6]; targetdata[7] = sourdata[7]; } @@ -463,7 +453,7 @@ static void efuse_ShadowRead2Byte(struct adapter *padapter, u16 Offset, u16 *Val struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter); *Value = pEEPROM->efuse_eeprom_data[Offset]; - *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8; + *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8; } /* EFUSE_ShadowRead2Byte */ @@ -473,9 +463,9 @@ static void efuse_ShadowRead4Byte(struct adapter *padapter, u16 Offset, u32 *Val struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter); *Value = pEEPROM->efuse_eeprom_data[Offset]; - *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8; - *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16; - *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24; + *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8; + *Value |= pEEPROM->efuse_eeprom_data[Offset + 2] << 16; + *Value |= pEEPROM->efuse_eeprom_data[Offset + 3] << 24; } /* efuse_ShadowRead4Byte */ diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c index b89e88d6a82d52..0ed420f3d09628 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c +++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c @@ -6,9 +6,8 @@ ******************************************************************************/ #include -#include #include -#include +#include u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 }; u16 RTW_WPA_VERSION = 1; @@ -55,7 +54,9 @@ static u8 WIFI_OFDMRATES[] = { int rtw_get_bit_value_from_ieee_value(u8 val) { - unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */ + static const unsigned char dot11_rate_table[] = { + 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0 + }; /* last element must be zero!! */ int i = 0; while (dot11_rate_table[i] != 0) { diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c index 4d3c30ec93b571..fcda9db6ebb55a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_io.c +++ b/drivers/staging/rtl8723bs/core/rtw_io.c @@ -26,7 +26,6 @@ jackson@realtek.com.tw */ #include -#include u8 rtw_read8(struct adapter *adapter, u32 addr) { diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c index 3b44f0dd5b0aee..587a87fbffeb4e 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c +++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include u8 rtw_validate_bssid(u8 *bssid) { diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index 8c487b7b7a40eb..cbdb134278d37b 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 9ebf25a0ef9b7b..4d4bec47d1874c 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -5,11 +5,10 @@ * ******************************************************************************/ #include -#include #include #include #include -#include +#include static struct mlme_handler mlme_sta_tbl[] = { {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq}, @@ -628,7 +627,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame) ret = rtw_check_bcn_info(padapter, pframe, len); if (!ret) { netdev_dbg(padapter->pnetdev, - "ap has changed, disconnect now\n "); + "ap has changed, disconnect now\n"); receive_disconnect(padapter, pmlmeinfo->network.mac_address, 0); return _SUCCESS; @@ -3831,10 +3830,10 @@ void site_survey(struct adapter *padapter) } else { #ifdef DBG_FIXED_CHAN if (pmlmeext->fixed_chan != 0xff) - SelectChannel(padapter, pmlmeext->fixed_chan); + r8723bs_select_channel(padapter, pmlmeext->fixed_chan); else #endif - SelectChannel(padapter, survey_channel); + r8723bs_select_channel(padapter, survey_channel); } if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */ diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c index e9763eab16f660..dbfcbac3d85554 100644 --- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c +++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include @@ -285,14 +284,12 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv) if (rpwm & PS_ACK) { unsigned long start_time; u8 cpwm_now; - u8 poll_cnt = 0; start_time = jiffies; /* polling cpwm */ do { mdelay(1); - poll_cnt++; rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_now); if ((cpwm_orig ^ cpwm_now) & 0x80) { pwrpriv->cpwm = PS_STATE_S4; diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 0eadc23a7d5433..a389ba5ecc6f6d 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -5,11 +5,10 @@ * ******************************************************************************/ #include -#include #include #include #include -#include +#include static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37}; static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3}; @@ -2027,12 +2026,9 @@ static int recv_func(struct adapter *padapter, union recv_frame *rframe) /* check if need to handle uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) { union recv_frame *pending_frame; - int cnt = 0; - while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) { - cnt++; + while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) recv_func_posthandle(padapter, pending_frame); - } } ret = recv_func_prehandle(padapter, rframe); diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c deleted file mode 100644 index 4f120c894998d2..00000000000000 --- a/drivers/staging/rtl8723bs/core/rtw_rf.c +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. - * - ******************************************************************************/ - -#include -#include - -static const u32 ch_freq_map[] = { - 2412, - 2417, - 2422, - 2427, - 2432, - 2437, - 2442, - 2447, - 2452, - 2457, - 2462, - 2467, - 2472, - 2484 -}; - -u32 rtw_ch2freq(u32 channel) -{ - if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map)) - return 2412; - - return ch_freq_map[channel - 1]; -} diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c index 7ecdaa2eeaf33e..1e9eff01b1aa52 100644 --- a/drivers/staging/rtl8723bs/core/rtw_security.c +++ b/drivers/staging/rtl8723bs/core/rtw_security.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include #include -#include #include static const char * const _security_type_str[] = { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index 0145c4da5ac069..1b72f2196a1cfc 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include void _rtw_init_stainfo(struct sta_info *psta); void _rtw_init_stainfo(struct sta_info *psta) diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index 7fac9ca3e9a01b..f37fec1efaf91c 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f}; @@ -333,7 +332,7 @@ inline unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter) return 0; } -void SelectChannel(struct adapter *padapter, unsigned char channel) +void r8723bs_select_channel(struct adapter *padapter, unsigned char channel) { if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex))) return; diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index b1965ec0181fdd..3e88f14e3bf78a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; @@ -45,7 +44,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) init_completion(&pxmitpriv->terminate_xmitthread_comp); /* - * Please insert all the queue initializaiton using _rtw_init_queue below + * Please insert all the queue initialization using _rtw_init_queue below */ pxmitpriv->adapter = padapter; diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c index 22e33b97800d1f..81149ab81904c1 100644 --- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c +++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include "odm_precomp.h" /* MACRO definition for pRFCalibrateInfo->TxIQC_8723B[0] */ diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c index 5f9e94a7e6ad6c..86404b5e6c5244 100644 --- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c +++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c @@ -21,7 +21,6 @@ Major Change History: --*/ #include -#include #include diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c index e26b789b9cdd01..b72cf520d5768b 100644 --- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c +++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c index 85223210243313..719dd116d8076a 100644 --- a/drivers/staging/rtl8723bs/hal/hal_com.c +++ b/drivers/staging/rtl8723bs/hal/hal_com.c @@ -7,7 +7,6 @@ #include #include -#include #include "hal_com_h2c.h" #include "odm_precomp.h" diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c index 3e814a15e893fc..d5649e7d8f9985 100644 --- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c +++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c index 7e3db8d3c910e8..0a3900548fd2ca 100644 --- a/drivers/staging/rtl8723bs/hal/hal_intf.c +++ b/drivers/staging/rtl8723bs/hal/hal_intf.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include void rtw_hal_chip_configure(struct adapter *padapter) @@ -160,12 +159,6 @@ void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariab padapter->HalFunc.SetHalODMVarHandler(padapter, eVariable, pValue1, bSet); } -void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2) -{ - if (padapter->HalFunc.GetHalODMVarHandler) - padapter->HalFunc.GetHalODMVarHandler(padapter, eVariable, pValue1, pValue2); -} - void rtw_hal_enable_interrupt(struct adapter *padapter) { if (padapter->HalFunc.enable_interrupt) diff --git a/drivers/staging/rtl8723bs/hal/hal_sdio.c b/drivers/staging/rtl8723bs/hal/hal_sdio.c index 9de62a0f5d35bc..665c85eccbdf33 100644 --- a/drivers/staging/rtl8723bs/hal/hal_sdio.c +++ b/drivers/staging/rtl8723bs/hal/hal_sdio.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include u8 rtw_hal_sdio_max_txoqt_free_space(struct adapter *padapter) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c index d1ac2f44939ce3..56526056dd1d2e 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include #include "hal_com_h2c.h" diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c index 2028791988e711..d1c875cf8e6d23 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c @@ -8,7 +8,6 @@ /* This file is for 92CE/92CU dynamic mechanism only */ #include -#include #include /* Global var */ diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c index 7a5c3a98183b49..37ebbbf408ecfd 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "hal_com_h2c.h" diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c index 7764896a04ea25..4ff092b7c9c99a 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include /** diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c index 74e75dc970f727..28c914ec260492 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include static void initrecvbuf(struct recv_buf *precvbuf, struct adapter *padapter) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index 15810438a472f0..78298e63edce6e 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -6,7 +6,6 @@ ******************************************************************************/ #include -#include #include static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num) diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c index c9cd6578f7f829..d3aae413fc0f92 100644 --- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c +++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include "hal_com_h2c.h" @@ -380,8 +379,8 @@ static void _InitWMACSetting(struct adapter *padapter) rtw_write32(padapter, REG_RCR, pHalData->ReceiveConfig); /* Accept all multicast address */ - rtw_write32(padapter, REG_MAR, 0xFFFFFFFF); - rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF); + rtw_write32(padapter, REG_MAR, 0xFFFFFFFF); /* Offset 0x0620-0x0623 */ + rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF); /* Offset 0x0624-0x0627 */ /* Accept all data frames */ value16 = 0xFFFF; diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c index 107f427ee4aa51..21e9f185874523 100644 --- a/drivers/staging/rtl8723bs/hal/sdio_ops.c +++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c @@ -5,7 +5,6 @@ * *******************************************************************************/ #include -#include #include /* */ diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h index 9e6ca1dec52527..0b35c97843cc7b 100644 --- a/drivers/staging/rtl8723bs/include/drv_types.h +++ b/drivers/staging/rtl8723bs/include/drv_types.h @@ -452,14 +452,7 @@ struct adapter { #define DF_RX_BIT BIT1 #define DF_IO_BIT BIT2 -/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */ /* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */ -static inline void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit) -{ - int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func); - df |= func_bit; - atomic_set(&adapter_to_dvobj(padapter)->disable_func, df); -} static inline void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit) { diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h index 5cffab2d06ffc1..efdd1f912b5d9e 100644 --- a/drivers/staging/rtl8723bs/include/hal_intf.h +++ b/drivers/staging/rtl8723bs/include/hal_intf.h @@ -301,7 +301,6 @@ u8 rtw_hal_set_def_var(struct adapter *padapter, enum hal_def_variable eVariable u8 rtw_hal_get_def_var(struct adapter *padapter, enum hal_def_variable eVariable, void *pValue); void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet); -void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2); void rtw_hal_enable_interrupt(struct adapter *padapter); void rtw_hal_disable_interrupt(struct adapter *padapter); diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h index 5e43cc89f5353e..b93d74a5b9a5da 100644 --- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h +++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h @@ -101,7 +101,7 @@ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \ - {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/ diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h index cf96b5f7a77629..b21267d7ef7209 100644 --- a/drivers/staging/rtl8723bs/include/osdep_service.h +++ b/drivers/staging/rtl8723bs/include/osdep_service.h @@ -81,9 +81,7 @@ static inline void thread_enter(char *name) static inline void flush_signals_thread(void) { if (signal_pending(current)) - { flush_signals(current); - } } #define rtw_warn_on(condition) WARN_ON(condition) @@ -102,7 +100,7 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par #define MAC_ARG(x) (x) #endif -extern void rtw_free_netdev(struct net_device * netdev); +extern void rtw_free_netdev(struct net_device *netdev); /* Macros for handling unaligned memory accesses */ diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h index 188ed7e26550d7..2ec54f9e180c20 100644 --- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h +++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h @@ -7,43 +7,41 @@ #ifndef __OSDEP_LINUX_SERVICE_H_ #define __OSDEP_LINUX_SERVICE_H_ - #include - #include - #include - #include - #include - #include - #include - #include - /* include */ - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include /* for struct tasklet_struct */ - #include - #include - #include - #include - -/* #include */ - #include - #include - - struct __queue { - struct list_head queue; - spinlock_t lock; - }; +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for struct tasklet_struct */ +#include +#include +#include +#include + +#include +#include + +struct __queue { + struct list_head queue; + spinlock_t lock; +}; static inline struct list_head *get_next(struct list_head *list) { diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h index f9ecd9047d5234..e6d6e9de547475 100644 --- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h +++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h @@ -38,7 +38,7 @@ struct rt_firmware { /* This structure must be carefully byte-ordered. */ struct rt_firmware_hdr { - /* 8-byte alinment required */ + /* 8-byte alignment required */ /* LONG WORD 0 ---- */ __le16 signature; /* 92C0: test chip; 92C, 88C0: test chip; diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h index fe1b0310120352..cb44119ce9a913 100644 --- a/drivers/staging/rtl8723bs/include/rtw_cmd.h +++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h @@ -516,10 +516,6 @@ struct drvextra_cmd_parm { /*------------------- Below are used for RF/BB tuning ---------------------*/ -struct getcountjudge_rsp { - u8 count_judge[MAX_RATES_LENGTH]; -}; - struct addBaReq_parm { unsigned int tid; u8 addr[ETH_ALEN]; diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h deleted file mode 100644 index 7f96ff66915f7e..00000000000000 --- a/drivers/staging/rtl8723bs/include/rtw_debug.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. - * - ******************************************************************************/ -#ifndef __RTW_DEBUG_H__ -#define __RTW_DEBUG_H__ - -void mac_reg_dump(struct adapter *adapter); -void bb_reg_dump(struct adapter *adapter); -void rf_reg_dump(struct adapter *adapter); - -#endif /* __RTW_DEBUG_H__ */ diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h index d48bae5416fe62..62e0dec249adb9 100644 --- a/drivers/staging/rtl8723bs/include/rtw_event.h +++ b/drivers/staging/rtl8723bs/include/rtw_event.h @@ -28,7 +28,7 @@ struct surveydone_event { }; /* -Used to report the link result of joinning the given bss +Used to report the link result of joining the given bss join_res: diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h index be9741a056e558..0ee87be6dc4f82 100644 --- a/drivers/staging/rtl8723bs/include/rtw_io.h +++ b/drivers/staging/rtl8723bs/include/rtw_io.h @@ -13,7 +13,7 @@ Otherwise, io_handler will free io_req */ -/* below is for the intf_option bit defition... */ +/* below is for the intf_option bit definition... */ struct intf_priv; struct intf_hdl; diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h index e103c4a15d1a1a..e665479babc254 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h @@ -131,7 +131,7 @@ struct mlme_priv { u8 roam_rssi_diff_th; /* rssi difference threshold for active scan candidate selection */ u32 roam_scan_int_ms; /* scan interval for active roam */ u32 roam_scanr_exp_ms; /* scan result expire time in ms for roam */ - u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to speicific target without other consideration */ + u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to specific target without other consideration */ u8 *nic_hdl; diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h index 5b8574f5a251fb..8315399b64fdb5 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h @@ -384,8 +384,8 @@ struct mlme_ext_priv { unsigned char default_supported_mcs_set[16]; struct ss_res sitesurvey_res; - struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */ - /* for ap mode, network includes ap's cap_info */ + struct mlme_ext_info mlmext_info; /* for sta/adhoc mode, including current scanning/connecting/connected related info. */ + /* for ap mode, network includes ap's cap_info */ struct timer_list survey_timer; struct timer_list link_timer; struct timer_list sa_query_timer; @@ -455,7 +455,7 @@ u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset); unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter); void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode); -void SelectChannel(struct adapter *padapter, unsigned char channel); +void r8723bs_select_channel(struct adapter *padapter, unsigned char channel); unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval); diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h index c93594f75436a1..18dd1464e0c2d9 100644 --- a/drivers/staging/rtl8723bs/include/rtw_recv.h +++ b/drivers/staging/rtl8723bs/include/rtw_recv.h @@ -444,16 +444,6 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, signed int s } -static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem) -{ - /* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */ - /* from any given member of recv_frame. */ - /* rxmem indicates the any member/address in recv_frame */ - - return (union recv_frame *)(((SIZE_PTR)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN); - -} - static inline signed int get_recvframe_len(union recv_frame *precvframe) { return precvframe->u.hdr.len; diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h index 718275ee4500af..9f98b3f5a2e357 100644 --- a/drivers/staging/rtl8723bs/include/rtw_rf.h +++ b/drivers/staging/rtl8723bs/include/rtw_rf.h @@ -97,6 +97,4 @@ enum { HT_DATA_SC_20_LOWER_OF_40MHZ = 2, }; -u32 rtw_ch2freq(u32 ch); - #endif /* _RTL8711_RF_H_ */ diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h index 98afbd3054a4b6..32f6d3a5e30926 100644 --- a/drivers/staging/rtl8723bs/include/rtw_security.h +++ b/drivers/staging/rtl8723bs/include/rtw_security.h @@ -50,43 +50,43 @@ union pn48 { #ifdef __LITTLE_ENDIAN struct { - u8 TSC0; - u8 TSC1; - u8 TSC2; - u8 TSC3; - u8 TSC4; - u8 TSC5; - u8 TSC6; - u8 TSC7; + u8 TSC0; + u8 TSC1; + u8 TSC2; + u8 TSC3; + u8 TSC4; + u8 TSC5; + u8 TSC6; + u8 TSC7; } _byte_; #else struct { - u8 TSC7; - u8 TSC6; - u8 TSC5; - u8 TSC4; - u8 TSC3; - u8 TSC2; - u8 TSC1; - u8 TSC0; + u8 TSC7; + u8 TSC6; + u8 TSC5; + u8 TSC4; + u8 TSC3; + u8 TSC2; + u8 TSC1; + u8 TSC0; } _byte_; #endif }; union Keytype { - u8 skey[16]; - u32 lkey[4]; + u8 skey[16]; + u32 lkey[4]; }; struct rt_pmkid_list { - u8 bUsed; - u8 Bssid[6]; - u8 PMKID[16]; - u8 SsidBuf[33]; + u8 bUsed; + u8 Bssid[6]; + u8 PMKID[16]; + u8 SsidBuf[33]; u8 *ssid_octet; - u16 ssid_length; + u16 ssid_length; }; @@ -162,7 +162,7 @@ struct security_priv { /* For WPA2 Pre-Authentication. */ struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE]; /* Renamed from PreAuthKey[NUM_PRE_AUTH_KEY]. Annie, 2006-10-13. */ - u8 PMKIDIndex; + u8 PMKIDIndex; u8 bWepDefaultKeyIdxSet; @@ -170,50 +170,48 @@ struct security_priv { #define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\ do {\ - switch (psecuritypriv->dot11AuthAlgrthm)\ - {\ - case dot11AuthAlgrthm_Open:\ - case dot11AuthAlgrthm_Shared:\ - case dot11AuthAlgrthm_Auto:\ - encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\ - break;\ - case dot11AuthAlgrthm_8021X:\ - if (bmcst)\ - encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\ - else\ - encry_algo = (u8)psta->dot118021XPrivacy;\ - break;\ - case dot11AuthAlgrthm_WAPI:\ - encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\ - break;\ + switch (psecuritypriv->dot11AuthAlgrthm) {\ + case dot11AuthAlgrthm_Open:\ + case dot11AuthAlgrthm_Shared:\ + case dot11AuthAlgrthm_Auto:\ + encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\ + break;\ + case dot11AuthAlgrthm_8021X:\ + if (bmcst)\ + encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\ + else\ + encry_algo = (u8)psta->dot118021XPrivacy;\ + break;\ + case dot11AuthAlgrthm_WAPI:\ + encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\ + break;\ } \ } while (0) #define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\ do {\ - switch (encrypt)\ - {\ - case _WEP40_:\ - case _WEP104_:\ - iv_len = 4;\ - icv_len = 4;\ - break;\ - case _TKIP_:\ - iv_len = 8;\ - icv_len = 4;\ - break;\ - case _AES_:\ - iv_len = 8;\ - icv_len = 8;\ - break;\ - case _SMS4_:\ - iv_len = 18;\ - icv_len = 16;\ - break;\ - default:\ - iv_len = 0;\ - icv_len = 0;\ - break;\ + switch (encrypt) {\ + case _WEP40_:\ + case _WEP104_:\ + iv_len = 4;\ + icv_len = 4;\ + break;\ + case _TKIP_:\ + iv_len = 8;\ + icv_len = 4;\ + break;\ + case _AES_:\ + iv_len = 8;\ + icv_len = 8;\ + break;\ + case _SMS4_:\ + iv_len = 18;\ + icv_len = 16;\ + break;\ + default:\ + iv_len = 0;\ + icv_len = 0;\ + break;\ } \ } while (0) @@ -242,7 +240,8 @@ struct mic_data { /* ===== start - public domain SHA256 implementation ===== */ /* This is based on SHA256 implementation in LibTomCrypt that was released into - * public domain by Tom St Denis. */ + * public domain by Tom St Denis. + */ int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac); void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key); diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h index a3b4310caddf77..544468f5769249 100644 --- a/drivers/staging/rtl8723bs/include/rtw_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h @@ -15,7 +15,7 @@ #define XMITBUF_ALIGN_SZ 512 -/* xmit extension buff defination */ +/* xmit extension buff definition */ #define MAX_XMIT_EXTBUF_SZ (1536) #define NR_XMIT_EXTBUFF (32) diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index eb3c73cc26622a..b63a74e669bcf2 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -7,7 +7,6 @@ #include #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index c81b30f1f1b05b..a9e481e182ad6c 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -7,7 +7,6 @@ #include #include -#include #include #include #include diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c index 1341801e5c213a..1904e82a24b5c2 100644 --- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include static void _dynamic_check_timer_handler(struct timer_list *t) { diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index 55d0140cd54372..fc9b9c5efb50e1 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include MODULE_LICENSE("GPL"); diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index f09c1324c39c6e..a00f9f0c85c5ba 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include /* * Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c index 4d28b300b2358e..ca808ded61ac0f 100644 --- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c @@ -5,10 +5,9 @@ * ******************************************************************************/ #include -#include #include #include -#include +#include void rtw_os_free_recvframe(union recv_frame *precvframe) { diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index 49043148452429..d18fde4e5d6cea 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include #include #include diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c index 0a0b04088e668b..4a7c0c9cc7ef41 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c @@ -6,7 +6,6 @@ *******************************************************************************/ #include -#include static bool rtw_sdio_claim_host_needed(struct sdio_func *func) { diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c index 5eef1d68c6f0db..dbd4bf53133903 100644 --- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c +++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c @@ -6,7 +6,6 @@ *****************************************************************************/ #include -#include #include diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c index 1eeabfffd6d244..944b9c724b32e7 100644 --- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c @@ -5,7 +5,6 @@ * ******************************************************************************/ #include -#include uint rtw_remainder_len(struct pkt_file *pfile) @@ -144,9 +143,8 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb) psta = list_entry(plist, struct sta_info, asoc_list); stainfo_offset = rtw_stainfo_offset(pstapriv, psta); - if (stainfo_offset_valid(stainfo_offset)) { + if (stainfo_offset_valid(stainfo_offset)) chk_alive_list[chk_alive_num++] = stainfo_offset; - } } spin_unlock_bh(&pstapriv->asoc_list_lock); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index c4d97dbf6ba836..3dbeffc650d339 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -857,10 +857,10 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const switch (mode) { case VCHIQ_BULK_MODE_NOCALLBACK: case VCHIQ_BULK_MODE_CALLBACK: - ret = vchiq_bulk_transfer(instance, handle, - (void *)data, NULL, - size, userdata, mode, - VCHIQ_BULK_TRANSMIT); + ret = vchiq_bulk_xfer_callback_interruptible(instance, handle, + (void *)data, NULL, + size, mode, userdata, + VCHIQ_BULK_TRANSMIT); break; case VCHIQ_BULK_MODE_BLOCKING: ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, @@ -895,9 +895,10 @@ int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle, switch (mode) { case VCHIQ_BULK_MODE_NOCALLBACK: case VCHIQ_BULK_MODE_CALLBACK: - ret = vchiq_bulk_transfer(instance, handle, data, NULL, - size, userdata, - mode, VCHIQ_BULK_RECEIVE); + ret = vchiq_bulk_xfer_callback_interruptible(instance, handle, + (void *)data, NULL, + size, mode, userdata, + VCHIQ_BULK_RECEIVE); break; case VCHIQ_BULK_MODE_BLOCKING: ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size, @@ -968,9 +969,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl return -ENOMEM; } - ret = vchiq_bulk_transfer(instance, handle, data, NULL, size, - &waiter->bulk_waiter, - VCHIQ_BULK_MODE_BLOCKING, dir); + ret = vchiq_bulk_xfer_blocking_interruptible(instance, handle, data, NULL, size, + &waiter->bulk_waiter, dir); if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 50af04b217f4b8..1f94db6e0cd984 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -1139,7 +1139,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, int msgid, ssize_t (*copy_callback)(void *context, void *dest, size_t offset, size_t maxsize), - void *context, int size, int is_blocking) + void *context, int size) { struct vchiq_shared_state *local; struct vchiq_header *header; @@ -1517,7 +1517,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header) /* Acknowledge the OPEN */ if (service->sync) { if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback, - &ack_payload, sizeof(ack_payload), 0) == -EAGAIN) + &ack_payload, sizeof(ack_payload)) == -EAGAIN) goto bail_not_ready; /* The service is now open */ @@ -2655,6 +2655,132 @@ close_service_complete(struct vchiq_service *service, int failstate) return status; } +/* + * Prepares a bulk transfer to be queued. The function is interruptible and is + * intended to be called from user threads. It may return -EAGAIN to indicate + * that a signal has been received and the call should be retried after being + * returned to user context. + */ +static int +vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service, + void *offset, void __user *uoffset, + int size, void *userdata, + enum vchiq_bulk_mode mode, + enum vchiq_bulk_dir dir) +{ + struct vchiq_bulk_queue *queue; + struct bulk_waiter *bulk_waiter = NULL; + struct vchiq_bulk *bulk; + struct vchiq_state *state = service->state; + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ? + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; + int status = -EINVAL; + int payload[2]; + + if (mode == VCHIQ_BULK_MODE_BLOCKING) { + bulk_waiter = userdata; + init_completion(&bulk_waiter->event); + bulk_waiter->actual = 0; + bulk_waiter->bulk = NULL; + } + + queue = (dir == VCHIQ_BULK_TRANSMIT) ? + &service->bulk_tx : &service->bulk_rx; + + if (mutex_lock_killable(&service->bulk_mutex)) + return -EAGAIN; + + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); + do { + mutex_unlock(&service->bulk_mutex); + if (wait_for_completion_interruptible(&service->bulk_remove_event)) + return -EAGAIN; + if (mutex_lock_killable(&service->bulk_mutex)) + return -EAGAIN; + } while (queue->local_insert == queue->remove + + VCHIQ_NUM_SERVICE_BULKS); + } + + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; + + bulk->mode = mode; + bulk->dir = dir; + bulk->userdata = userdata; + bulk->size = size; + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + + if (vchiq_prepare_bulk_data(service->instance, bulk, offset, uoffset, size, dir)) + goto unlock_error_exit; + + /* + * Ensure that the bulk data record is visible to the peer + * before proceeding. + */ + wmb(); + + dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n", + state->id, service->localport, service->remoteport, + dir_char, size, &bulk->data, userdata); + + /* + * The slot mutex must be held when the service is being closed, so + * claim it here to ensure that isn't happening + */ + if (mutex_lock_killable(&state->slot_mutex)) { + status = -EAGAIN; + goto cancel_bulk_error_exit; + } + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto unlock_both_error_exit; + + payload[0] = lower_32_bits(bulk->data); + payload[1] = bulk->size; + status = queue_message(state, + NULL, + VCHIQ_MAKE_MSG(dir_msgtype, + service->localport, + service->remoteport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING | + QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK); + if (status) + goto unlock_both_error_exit; + + queue->local_insert++; + + mutex_unlock(&state->slot_mutex); + mutex_unlock(&service->bulk_mutex); + + dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n", + state->id, service->localport, dir_char, queue->local_insert, + queue->remote_insert, queue->process); + + if (bulk_waiter) { + bulk_waiter->bulk = bulk; + if (wait_for_completion_interruptible(&bulk_waiter->event)) + status = -EAGAIN; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + status = -EINVAL; + } + + return status; + +unlock_both_error_exit: + mutex_unlock(&state->slot_mutex); +cancel_bulk_error_exit: + vchiq_complete_bulk(service->instance, bulk); +unlock_error_exit: + mutex_unlock(&service->bulk_mutex); + + return status; +} + /* Called by the slot handler */ int vchiq_close_service_internal(struct vchiq_service *service, int close_recvd) @@ -2978,31 +3104,17 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle) return status; } -/* - * This function may be called by kernel threads or user threads. - * User threads may receive -EAGAIN to indicate that a signal has been - * received and the call should be retried after being returned to user - * context. - * When called in blocking mode, the userdata field points to a bulk_waiter - * structure. - */ -int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, - void *offset, void __user *uoffset, int size, void *userdata, - enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir) +int +vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle, + void *offset, void __user *uoffset, int size, + void __user *userdata, enum vchiq_bulk_dir dir) { struct vchiq_service *service = find_service_by_handle(instance, handle); - struct vchiq_bulk_queue *queue; - struct vchiq_bulk *bulk; - struct vchiq_state *state; - struct bulk_waiter *bulk_waiter = NULL; - const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; - const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ? - VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; + enum vchiq_bulk_mode mode = VCHIQ_BULK_MODE_BLOCKING; int status = -EINVAL; - int payload[2]; if (!service) - goto error_exit; + return -EINVAL; if (service->srvstate != VCHIQ_SRVSTATE_OPEN) goto error_exit; @@ -3013,133 +3125,91 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, if (vchiq_check_service(service)) goto error_exit; - switch (mode) { - case VCHIQ_BULK_MODE_NOCALLBACK: - case VCHIQ_BULK_MODE_CALLBACK: - break; - case VCHIQ_BULK_MODE_BLOCKING: - bulk_waiter = userdata; - init_completion(&bulk_waiter->event); - bulk_waiter->actual = 0; - bulk_waiter->bulk = NULL; - break; - case VCHIQ_BULK_MODE_WAITING: - bulk_waiter = userdata; - bulk = bulk_waiter->bulk; - goto waiting; - default: - goto error_exit; - } - state = service->state; + status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, size, + userdata, mode, dir); - queue = (dir == VCHIQ_BULK_TRANSMIT) ? - &service->bulk_tx : &service->bulk_rx; +error_exit: + vchiq_service_put(service); - if (mutex_lock_killable(&service->bulk_mutex)) { - status = -EAGAIN; + return status; +} + +int +vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle, + void *offset, void __user *uoffset, int size, + enum vchiq_bulk_mode mode, void *userdata, + enum vchiq_bulk_dir dir) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (mode != VCHIQ_BULK_MODE_CALLBACK && + mode != VCHIQ_BULK_MODE_NOCALLBACK) goto error_exit; - } - if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { - VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); - do { - mutex_unlock(&service->bulk_mutex); - if (wait_for_completion_interruptible(&service->bulk_remove_event)) { - status = -EAGAIN; - goto error_exit; - } - if (mutex_lock_killable(&service->bulk_mutex)) { - status = -EAGAIN; - goto error_exit; - } - } while (queue->local_insert == queue->remove + - VCHIQ_NUM_SERVICE_BULKS); - } + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; - bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; + if (!offset && !uoffset) + goto error_exit; - bulk->mode = mode; - bulk->dir = dir; - bulk->userdata = userdata; - bulk->size = size; - bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + if (vchiq_check_service(service)) + goto error_exit; - if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir)) - goto unlock_error_exit; + status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, + size, userdata, mode, dir); - /* - * Ensure that the bulk data record is visible to the peer - * before proceeding. - */ - wmb(); +error_exit: + vchiq_service_put(service); - dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n", - state->id, service->localport, service->remoteport, - dir_char, size, &bulk->data, userdata); + return status; +} - /* - * The slot mutex must be held when the service is being closed, so - * claim it here to ensure that isn't happening - */ - if (mutex_lock_killable(&state->slot_mutex)) { - status = -EAGAIN; - goto cancel_bulk_error_exit; - } +/* + * This function is called by VCHIQ ioctl interface and is interruptible. + * It may receive -EAGAIN to indicate that a signal has been received + * and the call should be retried after being returned to user context. + */ +int +vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance, + unsigned int handle, struct bulk_waiter *userdata) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct bulk_waiter *bulk_waiter; + int status = -EINVAL; - if (service->srvstate != VCHIQ_SRVSTATE_OPEN) - goto unlock_both_error_exit; + if (!service) + return -EINVAL; - payload[0] = lower_32_bits(bulk->data); - payload[1] = bulk->size; - status = queue_message(state, - NULL, - VCHIQ_MAKE_MSG(dir_msgtype, - service->localport, - service->remoteport), - memcpy_copy_callback, - &payload, - sizeof(payload), - QMFLAGS_IS_BLOCKING | - QMFLAGS_NO_MUTEX_LOCK | - QMFLAGS_NO_MUTEX_UNLOCK); - if (status) - goto unlock_both_error_exit; + if (!userdata) + goto error_exit; - queue->local_insert++; + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; - mutex_unlock(&state->slot_mutex); - mutex_unlock(&service->bulk_mutex); + if (vchiq_check_service(service)) + goto error_exit; - dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n", - state->id, service->localport, dir_char, queue->local_insert, - queue->remote_insert, queue->process); + bulk_waiter = userdata; -waiting: vchiq_service_put(service); status = 0; - if (bulk_waiter) { - bulk_waiter->bulk = bulk; - if (wait_for_completion_interruptible(&bulk_waiter->event)) - status = -EAGAIN; - else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) - status = -EINVAL; - } + if (wait_for_completion_interruptible(&bulk_waiter->event)) + return -EAGAIN; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return -EINVAL; return status; -unlock_both_error_exit: - mutex_unlock(&state->slot_mutex); -cancel_bulk_error_exit: - vchiq_complete_bulk(service->instance, bulk); -unlock_error_exit: - mutex_unlock(&service->bulk_mutex); - error_exit: - if (service) - vchiq_service_put(service); + vchiq_service_put(service); + return status; } @@ -3175,11 +3245,12 @@ vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, switch (service->srvstate) { case VCHIQ_SRVSTATE_OPEN: status = queue_message(service->state, service, data_id, - copy_callback, context, size, 1); + copy_callback, context, size, + QMFLAGS_IS_BLOCKING); break; case VCHIQ_SRVSTATE_OPENSYNC: status = queue_message_sync(service->state, service, data_id, - copy_callback, context, size, 1); + copy_callback, context, size); break; default: status = -EINVAL; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h index 77cc4d7ac07768..468463f318018c 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h @@ -471,9 +471,19 @@ extern void remote_event_pollall(struct vchiq_state *state); extern int -vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset, - void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode, - enum vchiq_bulk_dir dir); +vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance, + unsigned int handle, struct bulk_waiter *userdata); + +extern int +vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle, + void *offset, void __user *uoffset, int size, + void __user *userdata, enum vchiq_bulk_dir dir); + +extern int +vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle, + void *offset, void __user *uoffset, int size, + enum vchiq_bulk_mode mode, void *userdata, + enum vchiq_bulk_dir dir); extern void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c index 9cd2a64dce5e68..d41a4624cc92c3 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c @@ -304,6 +304,11 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, } userdata = &waiter->bulk_waiter; + + status = vchiq_bulk_xfer_blocking_interruptible(instance, args->handle, + NULL, args->data, args->size, + userdata, dir); + } else if (args->mode == VCHIQ_BULK_MODE_WAITING) { mutex_lock(&instance->bulk_waiter_list_mutex); list_for_each_entry(iter, &instance->bulk_waiter_list, @@ -324,12 +329,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n", waiter, current->pid); userdata = &waiter->bulk_waiter; + + status = vchiq_bulk_xfer_waiting_interruptible(instance, args->handle, userdata); } else { userdata = args->userdata; - } - status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size, - userdata, args->mode, dir); + status = vchiq_bulk_xfer_callback_interruptible(instance, args->handle, NULL, + args->data, args->size, + args->mode, userdata, dir); + + } if (!waiter) { ret = 0; diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c index 9a091463656d08..42304c9f83a2dc 100644 --- a/drivers/staging/vme_user/vme.c +++ b/drivers/staging/vme_user/vme.c @@ -416,10 +416,6 @@ void vme_slave_free(struct vme_resource *resource) slave_image = list_entry(resource->entry, struct vme_slave_resource, list); - if (!slave_image) { - dev_err(bridge->parent, "Can't find slave resource\n"); - return; - } /* Unlock image */ mutex_lock(&slave_image->mtx); @@ -794,10 +790,6 @@ void vme_master_free(struct vme_resource *resource) master_image = list_entry(resource->entry, struct vme_master_resource, list); - if (!master_image) { - dev_err(bridge->parent, "Can't find master resource\n"); - return; - } /* Unlock image */ spin_lock(&master_image->lock); @@ -1265,7 +1257,7 @@ EXPORT_SYMBOL(vme_unregister_error_handler); void vme_irq_handler(struct vme_bridge *bridge, int level, int statid) { - void (*call)(int, int, void *); + void (*call)(int level, int statid, void *priv_data); void *priv_data; call = bridge->irq[level - 1].callback[statid].func; diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h index 26aa40f78a74f7..7753e736f9fd86 100644 --- a/drivers/staging/vme_user/vme.h +++ b/drivers/staging/vme_user/vme.h @@ -129,8 +129,7 @@ struct vme_driver { }; void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *); -void vme_free_consistent(struct vme_resource *, size_t, void *, - dma_addr_t); +void vme_free_consistent(struct vme_resource *, size_t, void *, dma_addr_t); size_t vme_get_size(struct vme_resource *); int vme_check_window(struct vme_bridge *bridge, u32 aspace, @@ -138,20 +137,20 @@ int vme_check_window(struct vme_bridge *bridge, u32 aspace, struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); int vme_slave_set(struct vme_resource *, int, unsigned long long, - unsigned long long, dma_addr_t, u32, u32); + unsigned long long, dma_addr_t, u32, u32); int vme_slave_get(struct vme_resource *, int *, unsigned long long *, - unsigned long long *, dma_addr_t *, u32 *, u32 *); + unsigned long long *, dma_addr_t *, u32 *, u32 *); void vme_slave_free(struct vme_resource *); struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32); int vme_master_set(struct vme_resource *, int, unsigned long long, - unsigned long long, u32, u32, u32); + unsigned long long, u32, u32, u32); int vme_master_get(struct vme_resource *, int *, unsigned long long *, - unsigned long long *, u32 *, u32 *, u32 *); + unsigned long long *, u32 *, u32 *, u32 *); ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t); ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t); unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int, - unsigned int, loff_t); + unsigned int, loff_t); int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma); void vme_master_free(struct vme_resource *); @@ -162,13 +161,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t); struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32); void vme_dma_free_attribute(struct vme_dma_attr *); int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *, - struct vme_dma_attr *, size_t); + struct vme_dma_attr *, size_t); int vme_dma_list_exec(struct vme_dma_list *); int vme_dma_list_free(struct vme_dma_list *); int vme_dma_free(struct vme_resource *); int vme_irq_request(struct vme_dev *, int, int, - void (*callback)(int, int, void *), void *); + void (*callback)(int, int, void *), void *); void vme_irq_free(struct vme_dev *, int, int); int vme_irq_generate(struct vme_dev *, int, int); diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c index 7f84d1c86f2917..4a59c9069605fe 100644 --- a/drivers/staging/vme_user/vme_fake.c +++ b/drivers/staging/vme_user/vme_fake.c @@ -79,7 +79,7 @@ struct fake_driver { }; /* Module parameter */ -static int geoid; +static u32 geoid; static const char driver_name[] = "vme_fake"; @@ -1059,6 +1059,12 @@ static int __init fake_init(void) struct vme_slave_resource *slave_image; struct vme_lm_resource *lm; + if (geoid >= VME_MAX_SLOTS) { + pr_err("VME geographical address must be between 0 and %d (exclusive), but got %d\n", + VME_MAX_SLOTS, geoid); + return -EINVAL; + } + /* We need a fake parent device */ vme_root = root_device_register("vme"); if (IS_ERR(vme_root)) @@ -1283,7 +1289,7 @@ static void __exit fake_exit(void) } MODULE_PARM_DESC(geoid, "Set geographical addressing"); -module_param(geoid, int, 0); +module_param(geoid, uint, 0); MODULE_DESCRIPTION("Fake VME bridge driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c index 2ec9c290440414..31a44025e08f50 100644 --- a/drivers/staging/vme_user/vme_tsi148.c +++ b/drivers/staging/vme_user/vme_tsi148.c @@ -36,7 +36,7 @@ static void tsi148_remove(struct pci_dev *); /* Module parameter */ static bool err_chk; -static int geoid; +static u32 geoid; static const char driver_name[] = "vme_tsi148"; @@ -55,14 +55,14 @@ static struct pci_driver tsi148_driver = { }; static void reg_join(unsigned int high, unsigned int low, - unsigned long long *variable) + unsigned long long *variable) { *variable = (unsigned long long)high << 32; *variable |= (unsigned long long)low; } static void reg_split(unsigned long long variable, unsigned int *high, - unsigned int *low) + unsigned int *low) { *low = (unsigned int)variable & 0xFFFFFFFF; *high = (unsigned int)(variable >> 32); @@ -72,7 +72,7 @@ static void reg_split(unsigned long long variable, unsigned int *high, * Wakes up DMA queue. */ static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge, - int channel_mask) + int channel_mask) { u32 serviced = 0; @@ -207,7 +207,7 @@ static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge) * Calling VME bus interrupt callback if provided. */ static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, - u32 stat) + u32 stat) { int vec, i, serviced = 0; struct tsi148_driver *bridge; @@ -358,7 +358,7 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) } static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge, - struct pci_dev *pdev) + struct pci_dev *pdev) { struct tsi148_driver *bridge = tsi148_bridge->driver_priv; @@ -392,7 +392,7 @@ static int tsi148_iack_received(struct tsi148_driver *bridge) * Configure VME interrupt */ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, - int state, int sync) + int state, int sync) { struct pci_dev *pdev; u32 tmp; @@ -430,7 +430,7 @@ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, * interrupt to be acked. */ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, - int statid) + int statid) { u32 tmp; struct tsi148_driver *bridge; @@ -453,7 +453,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, /* XXX Consider implementing a timeout? */ wait_event_interruptible(bridge->iack_queue, - tsi148_iack_received(bridge)); + tsi148_iack_received(bridge)); mutex_unlock(&bridge->vme_int); @@ -464,8 +464,8 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, * Initialize a slave window with the requested attributes. */ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, - unsigned long long vme_base, unsigned long long size, - dma_addr_t pci_base, u32 aspace, u32 cycle) + unsigned long long vme_base, unsigned long long size, + dma_addr_t pci_base, u32 aspace, u32 cycle) { unsigned int i, addr = 0, granularity = 0; unsigned int temp_ctl = 0; @@ -607,8 +607,8 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, * Get slave window configuration. */ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, - unsigned long long *vme_base, unsigned long long *size, - dma_addr_t *pci_base, u32 *aspace, u32 *cycle) + unsigned long long *vme_base, unsigned long long *size, + dma_addr_t *pci_base, u32 *aspace, u32 *cycle) { unsigned int i, granularity = 0, ctl = 0; unsigned int vme_base_low, vme_base_high; @@ -706,7 +706,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, * Allocate and map PCI Resource */ static int tsi148_alloc_resource(struct vme_master_resource *image, - unsigned long long size) + unsigned long long size) { unsigned long long existing_size; int retval = 0; @@ -751,9 +751,9 @@ static int tsi148_alloc_resource(struct vme_master_resource *image, image->bus_resource.end = (unsigned long)size; image->bus_resource.flags = IORESOURCE_MEM; - retval = pci_bus_alloc_resource(pdev->bus, - &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM, - 0, NULL, NULL); + retval = pci_bus_alloc_resource(pdev->bus, &image->bus_resource, + size, 0x10000, PCIBIOS_MIN_MEM, + 0, NULL, NULL); if (retval) { dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n", image->number, (unsigned long)size, @@ -796,8 +796,8 @@ static void tsi148_free_resource(struct vme_master_resource *image) * Set the attributes of an outbound window. */ static int tsi148_master_set(struct vme_master_resource *image, int enabled, - unsigned long long vme_base, unsigned long long size, u32 aspace, - u32 cycle, u32 dwidth) + unsigned long long vme_base, unsigned long long size, + u32 aspace, u32 cycle, u32 dwidth) { int retval = 0; unsigned int i; @@ -1031,8 +1031,8 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled, * XXX Not parsing prefetch information. */ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, - unsigned long long *vme_base, unsigned long long *size, u32 *aspace, - u32 *cycle, u32 *dwidth) + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) { unsigned int i, ctl; unsigned int pci_base_low, pci_base_high; @@ -1140,15 +1140,15 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled, } static int tsi148_master_get(struct vme_master_resource *image, int *enabled, - unsigned long long *vme_base, unsigned long long *size, u32 *aspace, - u32 *cycle, u32 *dwidth) + unsigned long long *vme_base, unsigned long long *size, + u32 *aspace, u32 *cycle, u32 *dwidth) { int retval; spin_lock(&image->lock); retval = __tsi148_master_get(image, enabled, vme_base, size, aspace, - cycle, dwidth); + cycle, dwidth); spin_unlock(&image->lock); @@ -1156,7 +1156,7 @@ static int tsi148_master_get(struct vme_master_resource *image, int *enabled, } static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, - size_t count, loff_t offset) + size_t count, loff_t offset) { int retval, enabled; unsigned long long vme_base, size; @@ -1241,7 +1241,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, } static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, - size_t count, loff_t offset) + size_t count, loff_t offset) { int retval = 0, enabled; unsigned long long vme_base, size; @@ -1342,9 +1342,8 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, * * Requires a previously configured master window, returns final value. */ -static unsigned int tsi148_master_rmw(struct vme_master_resource *image, - unsigned int mask, unsigned int compare, unsigned int swap, - loff_t offset) +static unsigned int tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask, + unsigned int compare, unsigned int swap, loff_t offset) { unsigned long long pci_addr; unsigned int pci_addr_high, pci_addr_low; @@ -1399,7 +1398,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image, } static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, - u32 aspace, u32 cycle, u32 dwidth) + u32 aspace, u32 cycle, u32 dwidth) { u32 val; @@ -1497,7 +1496,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, } static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, - u32 aspace, u32 cycle, u32 dwidth) + u32 aspace, u32 cycle, u32 dwidth) { u32 val; @@ -1599,8 +1598,8 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, * * Note: DMA engine expects the DMA descriptor to be big endian. */ -static int tsi148_dma_list_add(struct vme_dma_list *list, - struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count) +static int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src, + struct vme_dma_attr *dest, size_t count) { struct tsi148_dma_entry *entry, *prev; u32 address_high, address_low, val; @@ -1653,8 +1652,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, case VME_DMA_PCI: pci_attr = src->private; - reg_split((unsigned long long)pci_attr->address, &address_high, - &address_low); + reg_split((unsigned long long)pci_attr->address, &address_high, &address_low); entry->descriptor.dsau = cpu_to_be32(address_high); entry->descriptor.dsal = cpu_to_be32(address_low); entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI); @@ -1662,15 +1660,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, case VME_DMA_VME: vme_attr = src->private; - reg_split((unsigned long long)vme_attr->address, &address_high, - &address_low); + reg_split((unsigned long long)vme_attr->address, &address_high, &address_low); entry->descriptor.dsau = cpu_to_be32(address_high); entry->descriptor.dsal = cpu_to_be32(address_low); entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME); - retval = tsi148_dma_set_vme_src_attributes( - tsi148_bridge->parent, &entry->descriptor.dsat, - vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + retval = tsi148_dma_set_vme_src_attributes(tsi148_bridge->parent, + &entry->descriptor.dsat, + vme_attr->aspace, + vme_attr->cycle, + vme_attr->dwidth); if (retval < 0) goto err_source; break; @@ -1690,7 +1689,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, pci_attr = dest->private; reg_split((unsigned long long)pci_attr->address, &address_high, - &address_low); + &address_low); entry->descriptor.ddau = cpu_to_be32(address_high); entry->descriptor.ddal = cpu_to_be32(address_low); entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI); @@ -1699,14 +1698,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, vme_attr = dest->private; reg_split((unsigned long long)vme_attr->address, &address_high, - &address_low); + &address_low); entry->descriptor.ddau = cpu_to_be32(address_high); entry->descriptor.ddal = cpu_to_be32(address_low); entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME); - retval = tsi148_dma_set_vme_dest_attributes( - tsi148_bridge->parent, &entry->descriptor.ddat, - vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth); + retval = tsi148_dma_set_vme_dest_attributes(tsi148_bridge->parent, + &entry->descriptor.ddat, + vme_attr->aspace, + vme_attr->cycle, + vme_attr->dwidth); if (retval < 0) goto err_dest; break; @@ -1735,7 +1736,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, /* Fill out previous descriptors "Next Address" */ if (entry->list.prev != &list->entries) { reg_split((unsigned long long)entry->dma_handle, &address_high, - &address_low); + &address_low); prev = list_entry(entry->list.prev, struct tsi148_dma_entry, list); prev->descriptor.dnlau = cpu_to_be32(address_high); @@ -1813,7 +1814,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list) /* Get first bus address and write into registers */ entry = list_first_entry(&list->entries, struct tsi148_dma_entry, - list); + list); mutex_unlock(&ctrlr->mtx); @@ -1832,7 +1833,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list) TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); retval = wait_event_interruptible(bridge->dma_queue[channel], - tsi148_dma_busy(ctrlr->parent, channel)); + tsi148_dma_busy(ctrlr->parent, channel)); if (retval) { iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base + @@ -1883,7 +1884,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list) entry = list_entry(pos, struct tsi148_dma_entry, list); dma_unmap_single(tsi148_bridge->parent, entry->dma_handle, - sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); + sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE); kfree(entry); } @@ -1898,7 +1899,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list) * callback is attached and disabled when the last callback is removed. */ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, - u32 aspace, u32 cycle) + u32 aspace, u32 cycle) { u32 lm_base_high, lm_base_low, lm_ctl = 0; int i; @@ -1963,7 +1964,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, * or disabled. */ static int tsi148_lm_get(struct vme_lm_resource *lm, - unsigned long long *lm_base, u32 *aspace, u32 *cycle) + unsigned long long *lm_base, u32 *aspace, u32 *cycle) { u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; struct tsi148_driver *bridge; @@ -2013,7 +2014,7 @@ static int tsi148_lm_get(struct vme_lm_resource *lm, * Callback will be passed the monitor triggered. */ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, - void (*callback)(void *), void *data) + void (*callback)(void *), void *data) { u32 lm_ctl, tmp; struct vme_bridge *tsi148_bridge; @@ -2086,7 +2087,7 @@ static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); iowrite32be(TSI148_LCSR_INTC_LMC[monitor], - bridge->base + TSI148_LCSR_INTC); + bridge->base + TSI148_LCSR_INTC); /* Detach callback */ bridge->lm_callback[monitor] = NULL; @@ -2126,7 +2127,7 @@ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge) } static void *tsi148_alloc_consistent(struct device *parent, size_t size, - dma_addr_t *dma) + dma_addr_t *dma) { struct pci_dev *pdev; @@ -2137,7 +2138,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size, } static void tsi148_free_consistent(struct device *parent, size_t size, - void *vaddr, dma_addr_t dma) + void *vaddr, dma_addr_t dma) { struct pci_dev *pdev; @@ -2160,7 +2161,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size, * be mapped onto PCI memory. */ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, - struct pci_dev *pdev) + struct pci_dev *pdev) { u32 cbar, crat, vstat; u32 crcsr_bus_high, crcsr_bus_low; @@ -2201,8 +2202,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n"); } else { dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n"); - iowrite32be(crat | TSI148_LCSR_CRAT_EN, - bridge->base + TSI148_LCSR_CRAT); + iowrite32be(crat | TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT); } /* If we want flushed, error-checked writes, set up a window @@ -2210,9 +2210,8 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, * through VME writes. */ if (err_chk) { - retval = tsi148_master_set(bridge->flush_image, 1, - (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT, - VME_D16); + retval = tsi148_master_set(bridge->flush_image, 1, (vstat * 0x80000), + 0x80000, VME_CRCSR, VME_SCT, VME_D16); if (retval) dev_err(tsi148_bridge->parent, "Configuring flush image failed\n"); } @@ -2221,7 +2220,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, } static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, - struct pci_dev *pdev) + struct pci_dev *pdev) { u32 crat; struct tsi148_driver *bridge; @@ -2231,7 +2230,7 @@ static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, /* Turn off CR/CSR space */ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT); iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, - bridge->base + TSI148_LCSR_CRAT); + bridge->base + TSI148_LCSR_CRAT); /* Free image */ iowrite32be(0, bridge->base + TSI148_LCSR_CROU); @@ -2253,6 +2252,12 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct vme_dma_resource *dma_ctrlr; struct vme_lm_resource *lm; + if (geoid >= VME_MAX_SLOTS) { + dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n", + VME_MAX_SLOTS, geoid); + return -EINVAL; + } + /* If we want to support more than one of each bridge, we need to * dynamically generate this so we get one per device */ @@ -2287,7 +2292,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* map registers in BAR 0 */ tsi148_device->base = ioremap(pci_resource_start(pdev, 0), - 4096); + 4096); if (!tsi148_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); retval = -EIO; @@ -2367,7 +2372,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) sizeof(master_image->bus_resource)); master_image->kern_base = NULL; list_add_tail(&master_image->list, - &tsi148_bridge->master_resources); + &tsi148_bridge->master_resources); } /* Add slave windows to list */ @@ -2388,7 +2393,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | VME_PROG | VME_DATA; list_add_tail(&slave_image->list, - &tsi148_bridge->slave_resources); + &tsi148_bridge->slave_resources); } /* Add dma engines to list */ @@ -2409,7 +2414,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&dma_ctrlr->pending); INIT_LIST_HEAD(&dma_ctrlr->running); list_add_tail(&dma_ctrlr->list, - &tsi148_bridge->dma_resources); + &tsi148_bridge->dma_resources); } /* Add location monitor to list */ @@ -2447,16 +2452,16 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT); dev_info(&pdev->dev, "Board is%s the VME system controller\n", - (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); + (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not"); if (!geoid) dev_info(&pdev->dev, "VME geographical address is %d\n", - data & TSI148_LCSR_VSTAT_GA_M); + data & TSI148_LCSR_VSTAT_GA_M); else dev_info(&pdev->dev, "VME geographical address is set to %d\n", - geoid); + geoid); dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", - err_chk ? "enabled" : "disabled"); + err_chk ? "enabled" : "disabled"); retval = tsi148_crcsr_init(tsi148_bridge, pdev); if (retval) { @@ -2507,8 +2512,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_master: /* resources are stored in link list */ list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { - master_image = list_entry(pos, struct vme_master_resource, - list); + master_image = list_entry(pos, struct vme_master_resource, list); list_del(pos); kfree(master_image); } @@ -2605,8 +2609,7 @@ static void tsi148_remove(struct pci_dev *pdev) /* resources are stored in link list */ list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) { - master_image = list_entry(pos, struct vme_master_resource, - list); + master_image = list_entry(pos, struct vme_master_resource, list); list_del(pos); kfree(master_image); } @@ -2628,7 +2631,7 @@ MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes"); module_param(err_chk, bool, 0); MODULE_PARM_DESC(geoid, "Override geographical addressing"); -module_param(geoid, int, 0); +module_param(geoid, uint, 0); MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge"); MODULE_LICENSE("GPL"); diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO index 63607ef9c97e55..529bc22cd6081e 100644 --- a/drivers/staging/vt6655/TODO +++ b/drivers/staging/vt6655/TODO @@ -18,4 +18,4 @@ TODO: - integrate with drivers/net/wireless Please send any patches to Greg Kroah-Hartman -and Forest Bond . +and Philipp Hortmann . diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index 688c870d89bc06..6a2e390e94939a 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -388,22 +388,22 @@ void card_safe_reset_tx(struct vnt_private *priv) struct vnt_tx_desc *curr_td; /* initialize TD index */ - priv->tail_td[0] = &priv->apTD0Rings[0]; - priv->apCurrTD[0] = &priv->apTD0Rings[0]; + priv->tail_td[0] = &priv->ap_td0_rings[0]; + priv->apCurrTD[0] = &priv->ap_td0_rings[0]; - priv->tail_td[1] = &priv->apTD1Rings[0]; - priv->apCurrTD[1] = &priv->apTD1Rings[0]; + priv->tail_td[1] = &priv->ap_td1_rings[0]; + priv->apCurrTD[1] = &priv->ap_td1_rings[0]; for (uu = 0; uu < TYPE_MAXTD; uu++) priv->iTDUsed[uu] = 0; for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) { - curr_td = &priv->apTD0Rings[uu]; + curr_td = &priv->ap_td0_rings[uu]; curr_td->td0.owner = OWNED_BY_HOST; /* init all Tx Packet pointer to NULL */ } for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) { - curr_td = &priv->apTD1Rings[uu]; + curr_td = &priv->ap_td1_rings[uu]; curr_td->td0.owner = OWNED_BY_HOST; /* init all Tx Packet pointer to NULL */ } diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h index f52e42564e81af..f6b462ebca51cd 100644 --- a/drivers/staging/vt6655/card.h +++ b/drivers/staging/vt6655/card.h @@ -55,8 +55,8 @@ void CARDvSafeResetRx(struct vnt_private *priv); void card_radio_power_off(struct vnt_private *priv); bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type); bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate, - u64 bss_timestamp); + u64 bss_timestamp); bool card_set_beacon_period(struct vnt_private *priv, - unsigned short beacon_interval); + unsigned short beacon_interval); #endif /* __CARD_H__ */ diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h index 0212240ba23f91..5eaab6b172d370 100644 --- a/drivers/staging/vt6655/device.h +++ b/drivers/staging/vt6655/device.h @@ -135,8 +135,8 @@ struct vnt_private { struct vnt_tx_desc *apCurrTD[TYPE_MAXTD]; struct vnt_tx_desc *tail_td[TYPE_MAXTD]; - struct vnt_tx_desc *apTD0Rings; - struct vnt_tx_desc *apTD1Rings; + struct vnt_tx_desc *ap_td0_rings; + struct vnt_tx_desc *ap_td1_rings; struct vnt_rx_desc *aRD0Ring; struct vnt_rx_desc *aRD1Ring; @@ -189,10 +189,10 @@ struct vnt_private { u8 byBBType; /* 0:11A, 1:11B, 2:11G */ u8 packet_type; /* - * 0:11a,1:11b,2:11gb (only CCK - * in BasicRate), 3:11ga (OFDM in - * Basic Rate) - */ + * 0:11a,1:11b,2:11gb (only CCK + * in BasicRate), 3:11ga (OFDM in + * Basic Rate) + */ unsigned short wBasicRate; unsigned char byACKRate; unsigned char byTopOFDMBasicRate; diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 3ff8103366c142..bf3ecf72020666 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -550,11 +550,11 @@ static bool device_init_rings(struct vnt_private *priv) priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc); /* vir_pool: pvoid type */ - priv->apTD0Rings = vir_pool + priv->ap_td0_rings = vir_pool + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc); - priv->apTD1Rings = vir_pool + priv->ap_td1_rings = vir_pool + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc); @@ -720,7 +720,7 @@ static int device_init_td0_ring(struct vnt_private *priv) curr = priv->td0_pool_dma; for (i = 0; i < priv->opts.tx_descs[0]; i++, curr += sizeof(struct vnt_tx_desc)) { - desc = &priv->apTD0Rings[i]; + desc = &priv->ap_td0_rings[i]; desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL); if (!desc->td_info) { ret = -ENOMEM; @@ -730,20 +730,20 @@ static int device_init_td0_ring(struct vnt_private *priv) desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ; desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ; - desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]); + desc->next = &(priv->ap_td0_rings[(i + 1) % priv->opts.tx_descs[0]]); desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc)); } if (i > 0) - priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma); - priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0]; + priv->ap_td0_rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma); + priv->tail_td[0] = priv->apCurrTD[0] = &priv->ap_td0_rings[0]; return 0; err_free_desc: while (i--) { - desc = &priv->apTD0Rings[i]; + desc = &priv->ap_td0_rings[i]; kfree(desc->td_info); } @@ -761,7 +761,7 @@ static int device_init_td1_ring(struct vnt_private *priv) curr = priv->td1_pool_dma; for (i = 0; i < priv->opts.tx_descs[1]; i++, curr += sizeof(struct vnt_tx_desc)) { - desc = &priv->apTD1Rings[i]; + desc = &priv->ap_td1_rings[i]; desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL); if (!desc->td_info) { ret = -ENOMEM; @@ -771,19 +771,19 @@ static int device_init_td1_ring(struct vnt_private *priv) desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ; desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ; - desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]); + desc->next = &(priv->ap_td1_rings[(i + 1) % priv->opts.tx_descs[1]]); desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc)); } if (i > 0) - priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma); - priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0]; + priv->ap_td1_rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma); + priv->tail_td[1] = priv->apCurrTD[1] = &priv->ap_td1_rings[0]; return 0; err_free_desc: while (i--) { - desc = &priv->apTD1Rings[i]; + desc = &priv->ap_td1_rings[i]; kfree(desc->td_info); } @@ -795,7 +795,7 @@ static void device_free_td0_ring(struct vnt_private *priv) int i; for (i = 0; i < priv->opts.tx_descs[0]; i++) { - struct vnt_tx_desc *desc = &priv->apTD0Rings[i]; + struct vnt_tx_desc *desc = &priv->ap_td0_rings[i]; struct vnt_td_info *td_info = desc->td_info; dev_kfree_skb(td_info->skb); @@ -808,7 +808,7 @@ static void device_free_td1_ring(struct vnt_private *priv) int i; for (i = 0; i < priv->opts.tx_descs[1]; i++) { - struct vnt_tx_desc *desc = &priv->apTD1Rings[i]; + struct vnt_tx_desc *desc = &priv->ap_td1_rings[i]; struct vnt_td_info *td_info = desc->td_info; dev_kfree_skb(td_info->skb); @@ -1140,7 +1140,7 @@ static void vnt_interrupt_process(struct vnt_private *priv) PSbIsNextTBTTWakeUp((void *)priv); if ((priv->op_mode == NL80211_IFTYPE_AP || - priv->op_mode == NL80211_IFTYPE_ADHOC) && + priv->op_mode == NL80211_IFTYPE_ADHOC) && priv->vif->bss_conf.enable_beacon) MACvOneShotTimer1MicroSec(priv, (priv->vif->bss_conf.beacon_int - @@ -1535,7 +1535,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->op_mode != NL80211_IFTYPE_AP) { if (vif->cfg.assoc && conf->beacon_rate) { card_update_tsf(priv, conf->beacon_rate->hw_value, - conf->sync_tsf); + conf->sync_tsf); card_set_beacon_period(priv, conf->beacon_int); @@ -1763,7 +1763,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) priv->memaddr = pci_resource_start(pcid, 0); priv->ioaddr = pci_resource_start(pcid, 1); priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK, - 256); + 256); if (!priv->port_offset) { dev_err(&pcid->dev, ": Failed to IO remapping ..\n"); device_free_info(priv); diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h index acf931c3f5fd9b..a33af285222763 100644 --- a/drivers/staging/vt6655/mac.h +++ b/drivers/staging/vt6655/mac.h @@ -537,9 +537,9 @@ /*--------------------- Export Macros ------------------------------*/ -#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, iobase + MAC_REG_PAGE1SEL) +#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, (iobase) + MAC_REG_PAGE1SEL) -#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, iobase + MAC_REG_PAGE1SEL) +#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, (iobase) + MAC_REG_PAGE1SEL) #define MAKEWORD(lb, hb) \ ((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8))) diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 5e5ed582c35e39..3705cb1e87b635 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -493,9 +493,9 @@ s_uFillDataHead( buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B, - pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); + pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); buf->duration_a_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType, - wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); + wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); buf->duration_a_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); @@ -520,7 +520,7 @@ s_uFillDataHead( buf->duration_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); buf->duration_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType, - wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); + wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate); return buf->duration; } @@ -1375,8 +1375,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv, /* Get Duration and TimeStampOff */ short_head->duration = cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B, - frame_size, PK_TYPE_11A, current_rate, - false, 0, 0, 1, AUTO_FB_NONE)); + frame_size, PK_TYPE_11A, current_rate, + false, 0, 0, 1, AUTO_FB_NONE)); short_head->time_stamp_off = vnt_time_stamp_off(priv, current_rate); @@ -1391,8 +1391,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv, /* Get Duration and TimeStampOff */ short_head->duration = cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B, - frame_size, PK_TYPE_11B, current_rate, - false, 0, 0, 1, AUTO_FB_NONE)); + frame_size, PK_TYPE_11B, current_rate, + false, 0, 0, 1, AUTO_FB_NONE)); short_head->time_stamp_off = vnt_time_stamp_off(priv, current_rate); diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index acfc39683c87f0..3698f2eb097e37 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1d25e64b068a02..6002283cbebabc 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 0c997a08adecc0..873411e95ed251 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -15,7 +15,6 @@ struct kref; struct sockaddr_storage; extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *); -extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int); extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *); extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); extern void iscsit_del_tiqn(struct iscsi_tiqn *); @@ -35,7 +34,6 @@ extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *); extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *); -extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8); extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery); extern void iscsit_thread_get_cpumask(struct iscsit_conn *); extern int iscsi_target_tx_thread(void *); diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3ca2f232b38719..e8760735486bc9 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -24,6 +24,5 @@ extern int iscsit_start_kthreads(struct iscsit_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8); extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool); extern int iscsi_target_login_thread(void *); -extern void iscsi_handle_login_thread_timeout(struct timer_list *t); #endif /*** ISCSI_TARGET_LOGIN_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h index 41c3db3ddeaaf0..e60a46d348352f 100644 --- a/drivers/target/iscsi/iscsi_target_nego.h +++ b/drivers/target/iscsi/iscsi_target_nego.h @@ -15,8 +15,6 @@ extern int extract_param(const char *, const char *, unsigned int, char *, unsigned char *); extern int iscsi_target_check_login_request(struct iscsit_conn *, struct iscsi_login *); -extern int iscsi_target_get_initial_payload(struct iscsit_conn *, - struct iscsi_login *); extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *, struct iscsi_login *); extern int iscsi_target_start_negotiation( diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 9c4aa01b6351b5..f60b156ede12eb 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -8,7 +8,7 @@ * ******************************************************************************/ -#include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index 71d067f6217784..d44d09f2dde96d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -24,12 +24,7 @@ extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_ int); extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *); extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int); -extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl( - struct iscsi_portal_group *, const char *, u32); -extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *, - struct se_node_acl *); extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *); -extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *); extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int); extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *, struct sockaddr_storage *, struct iscsi_tpg_np *, diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 24b8e577575a6b..336da4fb0a7709 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -17,7 +17,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32); extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *); -extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t); extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *); @@ -34,7 +33,6 @@ extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *); extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *); -extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *); extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *); extern void iscsit_release_cmd(struct iscsit_cmd *); @@ -64,9 +62,6 @@ extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int); extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8); extern void iscsit_print_session_params(struct iscsit_session *); -extern int iscsit_print_dev_to_proc(char *, char **, off_t, int); -extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int); -extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int); extern int rx_data(struct iscsit_conn *, struct kvec *, int, int); extern int tx_data(struct iscsit_conn *, struct kvec *, int, int); extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8); diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index b604fcae21e112..3b89b5a70331f6 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include "sbp_target.h" diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 01751faad38663..10250aca5a816a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index bf4892544cfdb4..7d43d92c44d479 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 6600ae44f29d9e..43f47e3aa4482c 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 94e6cd4e7e43df..2d78ef74633c8b 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index a3e09adc4e767c..c8dc92a7d63e64 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 80b7d85030d006..4f4ad6af416c8f 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index f98ebb18666bf0..440e07b1d5cdb1 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6a02561cc20ce9..fe8beb7dbab12a 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 50290abc07bc23..ea14a38356814d 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 73d0d6133ac8f2..05d29201b730f0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 4128631c9dfdd3..877ce58c0a708d 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 21783cd71c15d2..34ab628809e82c 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 5ee03d1cba2bee..639fc358ed0fdb 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index bbe2e29612fa85..45329284f52f59 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 593540da93465b..d6afaba52ea561 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 976928641aa64e..7bb7990d0b074e 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -4,6 +4,7 @@ config OPTEE tristate "OP-TEE" depends on HAVE_ARM_SMCCC depends on MMU + depends on RPMB || !RPMB help This implements the OP-TEE Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 39e688d4e974bc..c75fddc83576a5 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -10,17 +10,85 @@ #include #include #include +#include #include #include #include #include #include "optee_private.h" +struct blocking_notifier_head optee_rpmb_intf_added = + BLOCKING_NOTIFIER_INIT(optee_rpmb_intf_added); + +static int rpmb_add_dev(struct device *dev) +{ + blocking_notifier_call_chain(&optee_rpmb_intf_added, 0, + to_rpmb_dev(dev)); + + return 0; +} + +static struct class_interface rpmb_class_intf = { + .add_dev = rpmb_add_dev, +}; + +void optee_bus_scan_rpmb(struct work_struct *work) +{ + struct optee *optee = container_of(work, struct optee, + rpmb_scan_bus_work); + int ret; + + if (!optee->rpmb_scan_bus_done) { + ret = optee_enumerate_devices(PTA_CMD_GET_DEVICES_RPMB); + optee->rpmb_scan_bus_done = !ret; + if (ret && ret != -ENODEV) + pr_info("Scanning for RPMB device: ret %d\n", ret); + } +} + +int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action, + void *data) +{ + struct optee *optee = container_of(intf, struct optee, rpmb_intf); + + schedule_work(&optee->rpmb_scan_bus_work); + + return 0; +} + static void optee_bus_scan(struct work_struct *work) { WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); } +static ssize_t rpmb_routing_model_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct optee *optee = dev_get_drvdata(dev); + const char *s; + + if (optee->in_kernel_rpmb_routing) + s = "kernel"; + else + s = "user"; + + return scnprintf(buf, PAGE_SIZE, "%s\n", s); +} +static DEVICE_ATTR_RO(rpmb_routing_model); + +static struct attribute *optee_dev_attrs[] = { + &dev_attr_rpmb_routing_model.attr, + NULL +}; + +ATTRIBUTE_GROUPS(optee_dev); + +void optee_set_dev_group(struct optee *optee) +{ + tee_device_set_dev_groups(optee->teedev, optee_dev_groups); + tee_device_set_dev_groups(optee->supp_teedev, optee_dev_groups); +} + int optee_open(struct tee_context *ctx, bool cap_memref_null) { struct optee_context_data *ctxdata; @@ -97,6 +165,9 @@ void optee_release_supp(struct tee_context *ctx) void optee_remove_common(struct optee *optee) { + blocking_notifier_chain_unregister(&optee_rpmb_intf_added, + &optee->rpmb_intf); + cancel_work_sync(&optee->rpmb_scan_bus_work); /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); @@ -113,13 +184,18 @@ void optee_remove_common(struct optee *optee) tee_shm_pool_free(optee->pool); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); + rpmb_dev_put(optee->rpmb_dev); + mutex_destroy(&optee->rpmb_dev_mutex); } static int smc_abi_rc; static int ffa_abi_rc; +static bool intf_is_regged; static int __init optee_core_init(void) { + int rc; + /* * The kernel may have crashed at the same time that all available * secure world threads were suspended and we cannot reschedule the @@ -130,18 +206,36 @@ static int __init optee_core_init(void) if (is_kdump_kernel()) return -ENODEV; + if (IS_REACHABLE(CONFIG_RPMB)) { + rc = rpmb_interface_register(&rpmb_class_intf); + if (rc) + return rc; + intf_is_regged = true; + } + smc_abi_rc = optee_smc_abi_register(); ffa_abi_rc = optee_ffa_abi_register(); /* If both failed there's no point with this module */ - if (smc_abi_rc && ffa_abi_rc) + if (smc_abi_rc && ffa_abi_rc) { + if (IS_REACHABLE(CONFIG_RPMB)) { + rpmb_interface_unregister(&rpmb_class_intf); + intf_is_regged = false; + } return smc_abi_rc; + } + return 0; } module_init(optee_core_init); static void __exit optee_core_exit(void) { + if (IS_REACHABLE(CONFIG_RPMB) && intf_is_regged) { + rpmb_interface_unregister(&rpmb_class_intf); + intf_is_regged = false; + } + if (!smc_abi_rc) optee_smc_abi_unregister(); if (!ffa_abi_rc) diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index d296c70ddfdcc8..950b4661d5dfe5 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -43,6 +43,13 @@ static int get_devices(struct tee_context *ctx, u32 session, ret = tee_client_invoke_func(ctx, &inv_arg, param); if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) && (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) { + /* + * TEE_ERROR_STORAGE_NOT_AVAILABLE is returned when getting + * the list of device TAs that depends on RPMB but a usable + * RPMB device isn't found. + */ + if (inv_arg.ret == TEE_ERROR_STORAGE_NOT_AVAILABLE) + return -ENODEV; pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n", inv_arg.ret); return -EINVAL; diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 3e73efa51bba0b..f3af5666bb1182 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -909,6 +910,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee->ffa.bottom_half_value = U32_MAX; optee->rpc_param_count = rpc_param_count; + if (IS_REACHABLE(CONFIG_RPMB) && + (sec_caps & OPTEE_FFA_SEC_CAP_RPMB_PROBE)) + optee->in_kernel_rpmb_routing = true; + teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool, optee); if (IS_ERR(teedev)) { @@ -925,6 +930,8 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) } optee->supp_teedev = teedev; + optee_set_dev_group(optee); + rc = tee_device_register(optee->teedev); if (rc) goto err_unreg_supp_teedev; @@ -940,6 +947,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee_cq_init(&optee->call_queue, 0); optee_supp_init(&optee->supp); optee_shm_arg_cache_init(optee, arg_cache_flags); + mutex_init(&optee->rpmb_dev_mutex); ffa_dev_set_drvdata(ffa_dev, optee); ctx = teedev_open(optee->teedev); if (IS_ERR(ctx)) { @@ -961,6 +969,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (rc) goto err_unregister_devices; + INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb); + optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev; + blocking_notifier_chain_register(&optee_rpmb_intf_added, + &optee->rpmb_intf); pr_info("initialized driver\n"); return 0; @@ -974,6 +986,8 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) teedev_close_context(ctx); err_rhashtable_free: rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); + rpmb_dev_put(optee->rpmb_dev); + mutex_destroy(&optee->rpmb_dev_mutex); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); mutex_destroy(&optee->ffa.mutex); diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h index 5db779dc00de05..257735ae5b5630 100644 --- a/drivers/tee/optee/optee_ffa.h +++ b/drivers/tee/optee/optee_ffa.h @@ -92,6 +92,8 @@ #define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0) /* OP-TEE supports asynchronous notification via FF-A */ #define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1) +/* OP-TEE supports probing for RPMB device if needed */ +#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2) #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 424898cdc4e97d..dc0f355ef72aae 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -7,7 +7,9 @@ #define OPTEE_PRIVATE_H #include +#include #include +#include #include #include #include @@ -20,6 +22,7 @@ /* Some Global Platform error codes used in this driver */ #define TEEC_SUCCESS 0x00000000 #define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_ITEM_NOT_FOUND 0xFFFF0008 #define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A #define TEEC_ERROR_COMMUNICATION 0xFFFF000E #define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C @@ -28,6 +31,7 @@ /* API Return Codes are from the GP TEE Internal Core API Specification */ #define TEE_ERROR_TIMEOUT 0xFFFF3001 +#define TEE_ERROR_STORAGE_NOT_AVAILABLE 0xF0100003 #define TEEC_ORIGIN_COMMS 0x00000002 @@ -200,6 +204,12 @@ struct optee_ops { * @notif: notification synchronization struct * @supp: supplicant synchronization struct for RPC to supplicant * @pool: shared memory pool + * @mutex: mutex protecting @rpmb_dev + * @rpmb_dev: current RPMB device or NULL + * @rpmb_scan_bus_done flag if device registation of RPMB dependent devices + * was already done + * @rpmb_scan_bus_work workq to for an RPMB device and to scan optee bus + * and register RPMB dependent optee drivers * @rpc_param_count: If > 0 number of RPC parameters to make room for * @scan_bus_done flag if device registation was already done. * @scan_bus_work workq to scan optee bus and register optee drivers @@ -218,9 +228,16 @@ struct optee { struct optee_notif notif; struct optee_supp supp; struct tee_shm_pool *pool; + /* Protects rpmb_dev pointer */ + struct mutex rpmb_dev_mutex; + struct rpmb_dev *rpmb_dev; + struct notifier_block rpmb_intf; unsigned int rpc_param_count; - bool scan_bus_done; + bool scan_bus_done; + bool rpmb_scan_bus_done; + bool in_kernel_rpmb_routing; struct work_struct scan_bus_work; + struct work_struct rpmb_scan_bus_work; }; struct optee_session { @@ -253,6 +270,8 @@ struct optee_call_ctx { size_t num_entries; }; +extern struct blocking_notifier_head optee_rpmb_intf_added; + int optee_notif_init(struct optee *optee, u_int max_key); void optee_notif_uninit(struct optee *optee); int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); @@ -283,9 +302,14 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); #define PTA_CMD_GET_DEVICES 0x0 #define PTA_CMD_GET_DEVICES_SUPP 0x1 +#define PTA_CMD_GET_DEVICES_RPMB 0x2 int optee_enumerate_devices(u32 func); void optee_unregister_devices(void); +void optee_bus_scan_rpmb(struct work_struct *work); +int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action, + void *data); +void optee_set_dev_group(struct optee *optee); void optee_remove_common(struct optee *optee); int optee_open(struct tee_context *ctx, bool cap_memref_null); void optee_release(struct tee_context *ctx); diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h index 4576751b490c34..87a59cc0348049 100644 --- a/drivers/tee/optee/optee_rpc_cmd.h +++ b/drivers/tee/optee/optee_rpc_cmd.h @@ -104,4 +104,39 @@ /* I2C master control flags */ #define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0) +/* + * Reset RPMB probing + * + * Releases an eventually already used RPMB devices and starts over searching + * for RPMB devices. Returns the kind of shared memory to use in subsequent + * OPTEE_RPC_CMD_RPMB_PROBE_NEXT and OPTEE_RPC_CMD_RPMB calls. + * + * [out] value[0].a OPTEE_RPC_SHM_TYPE_*, the parameter for + * OPTEE_RPC_CMD_SHM_ALLOC + */ +#define OPTEE_RPC_CMD_RPMB_PROBE_RESET 22 + +/* + * Probe next RPMB device + * + * [out] value[0].a Type of RPMB device, OPTEE_RPC_RPMB_* + * [out] value[0].b EXT CSD-slice 168 "RPMB Size" + * [out] value[0].c EXT CSD-slice 222 "Reliable Write Sector Count" + * [out] memref[1] Buffer with the raw CID + */ +#define OPTEE_RPC_CMD_RPMB_PROBE_NEXT 23 + +/* Type of RPMB device */ +#define OPTEE_RPC_RPMB_EMMC 0 +#define OPTEE_RPC_RPMB_UFS 1 +#define OPTEE_RPC_RPMB_NVME 2 + +/* + * Replay Protected Memory Block access + * + * [in] memref[0] Frames to device + * [out] memref[1] Frames from device + */ +#define OPTEE_RPC_CMD_RPMB_FRAMES 24 + #endif /*__OPTEE_RPC_CMD_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 7d9fa426505ba5..87942630082124 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -278,6 +278,8 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5) /* Secure world supports pre-allocating RPC arg struct */ #define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6) +/* Secure world supports probing for RPMB device if needed */ +#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7) #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 5de4504665be91..ebbbd42b0e3e72 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include "optee_private.h" @@ -261,6 +262,154 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, ¶m); } +static void handle_rpc_func_rpmb_probe_reset(struct tee_context *ctx, + struct optee *optee, + struct optee_msg_arg *arg) +{ + struct tee_param params[1]; + + if (arg->num_params != ARRAY_SIZE(params) || + optee->ops->from_msg_param(optee, params, arg->num_params, + arg->params) || + params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + params[0].u.value.a = OPTEE_RPC_SHM_TYPE_KERNEL; + params[0].u.value.b = 0; + params[0].u.value.c = 0; + if (optee->ops->to_msg_param(optee, arg->params, + arg->num_params, params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + mutex_lock(&optee->rpmb_dev_mutex); + rpmb_dev_put(optee->rpmb_dev); + optee->rpmb_dev = NULL; + mutex_unlock(&optee->rpmb_dev_mutex); + + arg->ret = TEEC_SUCCESS; +} + +static int rpmb_type_to_rpc_type(enum rpmb_type rtype) +{ + switch (rtype) { + case RPMB_TYPE_EMMC: + return OPTEE_RPC_RPMB_EMMC; + case RPMB_TYPE_UFS: + return OPTEE_RPC_RPMB_UFS; + case RPMB_TYPE_NVME: + return OPTEE_RPC_RPMB_NVME; + default: + return -1; + } +} + +static int rpc_rpmb_match(struct device *dev, const void *data) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return rpmb_type_to_rpc_type(rdev->descr.type) >= 0; +} + +static void handle_rpc_func_rpmb_probe_next(struct tee_context *ctx, + struct optee *optee, + struct optee_msg_arg *arg) +{ + struct rpmb_dev *rdev; + struct tee_param params[2]; + void *buf; + + if (arg->num_params != ARRAY_SIZE(params) || + optee->ops->from_msg_param(optee, params, arg->num_params, + arg->params) || + params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT || + params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + buf = tee_shm_get_va(params[1].u.memref.shm, + params[1].u.memref.shm_offs); + if (IS_ERR(buf)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + mutex_lock(&optee->rpmb_dev_mutex); + rdev = rpmb_dev_find_device(NULL, optee->rpmb_dev, rpc_rpmb_match); + rpmb_dev_put(optee->rpmb_dev); + optee->rpmb_dev = rdev; + mutex_unlock(&optee->rpmb_dev_mutex); + + if (!rdev) { + arg->ret = TEEC_ERROR_ITEM_NOT_FOUND; + return; + } + + if (params[1].u.memref.size < rdev->descr.dev_id_len) { + arg->ret = TEEC_ERROR_SHORT_BUFFER; + return; + } + memcpy(buf, rdev->descr.dev_id, rdev->descr.dev_id_len); + params[1].u.memref.size = rdev->descr.dev_id_len; + params[0].u.value.a = rpmb_type_to_rpc_type(rdev->descr.type); + params[0].u.value.b = rdev->descr.capacity; + params[0].u.value.c = rdev->descr.reliable_wr_count; + if (optee->ops->to_msg_param(optee, arg->params, + arg->num_params, params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + arg->ret = TEEC_SUCCESS; +} + +static void handle_rpc_func_rpmb_frames(struct tee_context *ctx, + struct optee *optee, + struct optee_msg_arg *arg) +{ + struct tee_param params[2]; + struct rpmb_dev *rdev; + void *p0, *p1; + + mutex_lock(&optee->rpmb_dev_mutex); + rdev = rpmb_dev_get(optee->rpmb_dev); + mutex_unlock(&optee->rpmb_dev_mutex); + if (!rdev) { + arg->ret = TEEC_ERROR_ITEM_NOT_FOUND; + return; + } + + if (arg->num_params != ARRAY_SIZE(params) || + optee->ops->from_msg_param(optee, params, arg->num_params, + arg->params) || + params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT || + params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + p0 = tee_shm_get_va(params[0].u.memref.shm, + params[0].u.memref.shm_offs); + p1 = tee_shm_get_va(params[1].u.memref.shm, + params[1].u.memref.shm_offs); + if (rpmb_route_frames(rdev, p0, params[0].u.memref.size, p1, + params[1].u.memref.size)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + if (optee->ops->to_msg_param(optee, arg->params, + arg->num_params, params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + arg->ret = TEEC_SUCCESS; +out: + rpmb_dev_put(rdev); +} + void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee, struct optee_msg_arg *arg) { @@ -277,6 +426,34 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee, case OPTEE_RPC_CMD_I2C_TRANSFER: handle_rpc_func_cmd_i2c_transfer(ctx, arg); break; + /* + * optee->in_kernel_rpmb_routing true means that OP-TEE supports + * in-kernel RPMB routing _and_ that the RPMB subsystem is + * reachable. This is reported to user space with + * rpmb_routing_model=kernel in sysfs. + * + * rpmb_routing_model=kernel is also a promise to user space that + * RPMB access will not require supplicant support, hence the + * checks below. + */ + case OPTEE_RPC_CMD_RPMB_PROBE_RESET: + if (optee->in_kernel_rpmb_routing) + handle_rpc_func_rpmb_probe_reset(ctx, optee, arg); + else + handle_rpc_supp_cmd(ctx, optee, arg); + break; + case OPTEE_RPC_CMD_RPMB_PROBE_NEXT: + if (optee->in_kernel_rpmb_routing) + handle_rpc_func_rpmb_probe_next(ctx, optee, arg); + else + handle_rpc_supp_cmd(ctx, optee, arg); + break; + case OPTEE_RPC_CMD_RPMB_FRAMES: + if (optee->in_kernel_rpmb_routing) + handle_rpc_func_rpmb_frames(ctx, optee, arg); + else + handle_rpc_supp_cmd(ctx, optee, arg); + break; default: handle_rpc_supp_cmd(ctx, optee, arg); } diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 844285d4f03c18..e9456e3e74ccaa 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -1685,6 +1686,10 @@ static int optee_probe(struct platform_device *pdev) optee->smc.sec_caps = sec_caps; optee->rpc_param_count = rpc_param_count; + if (IS_REACHABLE(CONFIG_RPMB) && + (sec_caps & OPTEE_SMC_SEC_CAP_RPMB_PROBE)) + optee->in_kernel_rpmb_routing = true; + teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); @@ -1699,6 +1704,8 @@ static int optee_probe(struct platform_device *pdev) } optee->supp_teedev = teedev; + optee_set_dev_group(optee); + rc = tee_device_register(optee->teedev); if (rc) goto err_unreg_supp_teedev; @@ -1712,6 +1719,7 @@ static int optee_probe(struct platform_device *pdev) optee->smc.memremaped_shm = memremaped_shm; optee->pool = pool; optee_shm_arg_cache_init(optee, arg_cache_flags); + mutex_init(&optee->rpmb_dev_mutex); platform_set_drvdata(pdev, optee); ctx = teedev_open(optee->teedev); @@ -1766,6 +1774,10 @@ static int optee_probe(struct platform_device *pdev) if (rc) goto err_disable_shm_cache; + INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb); + optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev; + blocking_notifier_chain_register(&optee_rpmb_intf_added, + &optee->rpmb_intf); pr_info("initialized driver\n"); return 0; @@ -1779,6 +1791,8 @@ static int optee_probe(struct platform_device *pdev) err_close_ctx: teedev_close_context(ctx); err_supp_uninit: + rpmb_dev_put(optee->rpmb_dev); + mutex_destroy(&optee->rpmb_dev_mutex); optee_shm_arg_cache_uninit(optee); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index d52e879b204e3d..d113679b1e2d7a 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -40,10 +40,7 @@ static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683, static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); static DEFINE_SPINLOCK(driver_lock); -static const struct class tee_class = { - .name = "tee", -}; - +static const struct class tee_class; static dev_t tee_devt; struct tee_context *teedev_open(struct tee_device *teedev) @@ -965,6 +962,13 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, } EXPORT_SYMBOL_GPL(tee_device_alloc); +void tee_device_set_dev_groups(struct tee_device *teedev, + const struct attribute_group **dev_groups) +{ + teedev->dev.groups = dev_groups; +} +EXPORT_SYMBOL_GPL(tee_device_set_dev_groups); + static ssize_t implementation_id_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -983,6 +987,11 @@ static struct attribute *tee_dev_attrs[] = { ATTRIBUTE_GROUPS(tee_dev); +static const struct class tee_class = { + .name = "tee", + .dev_groups = tee_dev_groups, +}; + /** * tee_device_register() - Registers a TEE device * @teedev: Device to register @@ -1001,8 +1010,6 @@ int tee_device_register(struct tee_device *teedev) return -EINVAL; } - teedev->dev.groups = tee_dev_groups; - rc = cdev_device_add(&teedev->cdev, &teedev->dev); if (rc) { dev_err(&teedev->dev, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index ed16897584b489..61e7ae524b1f85 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -40,6 +40,15 @@ config THERMAL_DEBUGFS Say Y to allow the thermal subsystem to collect diagnostic information that can be accessed via debugfs. +config THERMAL_CORE_TESTING + tristate "Thermal core testing facility" + depends on DEBUG_FS + help + Say Y to add a debugfs-based thermal core testing facility. + It allows test thermal zones to be created and populated + with trip points in order to exercise the thermal core + functionality in a controlled way. + config THERMAL_EMERGENCY_POWEROFF_DELAY_MS int "Emergency poweroff delay in milli-seconds" default 0 @@ -429,7 +438,7 @@ source "drivers/thermal/samsung/Kconfig" endmenu menu "STMicroelectronics thermal drivers" -depends on (ARCH_STI || ARCH_STM32) && OF +depends on (ARCH_STI || ARCH_STM32) && THERMAL_OF source "drivers/thermal/st/Kconfig" endmenu diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index ce7a4752ef52fe..41c4d56beb40d0 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -63,3 +63,4 @@ obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o obj-$(CONFIG_LOONGSON2_THERMAL) += loongson2_thermal.o +obj-$(CONFIG_THERMAL_CORE_TESTING) += testing/ diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 5ad87eb3f57817..7d61493082b5a0 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -208,8 +208,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) */ val = readl(data->regs + BCM2835_TS_TSENSCTL); if (!(val & BCM2835_TS_TSENSCTL_RSTB)) { - struct thermal_trip trip; - int offset, slope; + int offset, slope, crit_temp; slope = thermal_zone_get_slope(tz); offset = thermal_zone_get_offset(tz); @@ -217,7 +216,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) * For now we deal only with critical, otherwise * would need to iterate */ - err = thermal_zone_get_trip(tz, 0, &trip); + err = thermal_zone_get_crit_temp(tz, &crit_temp); if (err < 0) { dev_err(dev, "Not able to read trip_temp: %d\n", err); return err; @@ -232,7 +231,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) val |= (0xFE << BCM2835_TS_TSENSCTL_RSTDELAY_SHIFT); /* trip_adc value from info */ - val |= bcm2835_thermal_temp2adc(trip.temperature, + val |= bcm2835_thermal_temp2adc(crit_temp, offset, slope) << BCM2835_TS_TSENSCTL_THOLD_SHIFT; diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index 9674e5ffcfa274..270982740fded3 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c @@ -338,11 +338,9 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv, of_ops); - if (IS_ERR(thermal)) { - ret = PTR_ERR(thermal); - dev_err(&pdev->dev, "could not register sensor: %d\n", ret); - return ret; - } + if (IS_ERR(thermal)) + return dev_err_probe(&pdev->dev, PTR_ERR(thermal), + "could not register sensor\n"); priv->thermal = thermal; @@ -352,10 +350,9 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) brcmstb_tmon_irq_thread, IRQF_ONESHOT, DRV_NAME, priv); - if (ret < 0) { - dev_err(&pdev->dev, "could not request IRQ: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "could not request IRQ\n"); } dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n"); diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index daed67d19efb81..863e7a4272e66f 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -92,23 +92,21 @@ static void bang_bang_manage(struct thermal_zone_device *tz) for_each_trip_desc(tz, td) { const struct thermal_trip *trip = &td->trip; + bool turn_on; - if (tz->temperature >= td->threshold || - trip->temperature == THERMAL_TEMP_INVALID || + if (trip->temperature == THERMAL_TEMP_INVALID || trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT) continue; /* - * If the initial cooling device state is "on", but the zone - * temperature is not above the trip point, the core will not - * call bang_bang_control() until the zone temperature reaches - * the trip point temperature which may be never. In those - * cases, set the initial state of the cooling device to 0. + * Adjust the target states for uninitialized thermal instances + * to the thermal zone temperature and the trip point threshold. */ + turn_on = tz->temperature >= td->threshold; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (!instance->initialized && instance->trip == trip) - bang_bang_set_instance_target(instance, 0); + bang_bang_set_instance_target(instance, turn_on); } } diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 0eb657db62e42b..f1fe0f8ab04f17 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -465,11 +465,22 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) return IRQ_HANDLED; } +static int hisi_trip_walk_cb(struct thermal_trip *trip, void *arg) +{ + struct hisi_thermal_sensor *sensor = arg; + + if (trip->type != THERMAL_TRIP_PASSIVE) + return 0; + + sensor->thres_temp = trip->temperature; + /* Return nonzero to terminate the search. */ + return 1; +} + static int hisi_thermal_register_sensor(struct platform_device *pdev, struct hisi_thermal_sensor *sensor) { - int ret, i; - struct thermal_trip trip; + int ret; sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->id, sensor, @@ -482,15 +493,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, return ret; } - for (i = 0; i < thermal_zone_get_num_trips(sensor->tzd); i++) { - - thermal_zone_get_trip(sensor->tzd, i, &trip); - - if (trip.type == THERMAL_TRIP_PASSIVE) { - sensor->thres_temp = trip.temperature; - break; - } - } + thermal_zone_for_each_trip(sensor->tzd, hisi_trip_walk_cb, sensor); return 0; } diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index 7224f8d21db97e..88558ce588807a 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -111,8 +111,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) if (ret == -ENODEV) continue; - dev_err(&pdev->dev, "failed to register thermal zone\n"); - return ret; + return dev_err_probe(&pdev->dev, ret, "failed to register thermal zone\n"); } devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 091fb30dedf3bc..b8e85a405351ba 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -353,24 +353,16 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, return 0; } -static int imx_bind(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev) +static bool imx_should_bind(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { - return thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, - THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT, - THERMAL_WEIGHT_DEFAULT); -} - -static int imx_unbind(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev) -{ - return thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev); + return trip->type == THERMAL_TRIP_PASSIVE; } static struct thermal_zone_device_ops imx_tz_ops = { - .bind = imx_bind, - .unbind = imx_unbind, + .should_bind = imx_should_bind, .get_temp = imx_get_temp, .change_mode = imx_change_mode, .set_trip_temp = imx_set_trip_temp, @@ -773,7 +765,7 @@ static void imx_thermal_remove(struct platform_device *pdev) imx_thermal_unregister_legacy_cooling(data); } -static int __maybe_unused imx_thermal_suspend(struct device *dev) +static int imx_thermal_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); int ret; @@ -792,7 +784,7 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev) return pm_runtime_force_suspend(data->dev); } -static int __maybe_unused imx_thermal_resume(struct device *dev) +static int imx_thermal_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); int ret; @@ -804,7 +796,7 @@ static int __maybe_unused imx_thermal_resume(struct device *dev) return thermal_zone_device_enable(data->tz); } -static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev) +static int imx_thermal_runtime_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); const struct thermal_soc_data *socdata = data->socdata; @@ -826,7 +818,7 @@ static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused imx_thermal_runtime_resume(struct device *dev) +static int imx_thermal_runtime_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); const struct thermal_soc_data *socdata = data->socdata; @@ -857,15 +849,15 @@ static int __maybe_unused imx_thermal_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx_thermal_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume) - SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend, - imx_thermal_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume) + RUNTIME_PM_OPS(imx_thermal_runtime_suspend, + imx_thermal_runtime_resume, NULL) }; static struct platform_driver imx_thermal = { .driver = { .name = "imx_thermal", - .pm = &imx_thermal_pm_ops, + .pm = pm_ptr(&imx_thermal_pm_ops), .of_match_table = of_imx_thermal_match, }, .probe = imx_thermal_probe, diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c index 4b4a4d63e61fe5..cb149bcdd7d5d3 100644 --- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c @@ -564,7 +564,6 @@ static const struct file_operations acpi_thermal_rel_fops = { .open = acpi_thermal_rel_open, .release = acpi_thermal_rel_release, .unlocked_ioctl = acpi_thermal_rel_ioctl, - .llseek = no_llseek, }; static struct miscdevice acpi_thermal_rel_misc_device = { diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index 7c9f4023babcfc..5e94a45eba3eef 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include "../thermal_hwmon.h" diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 96daad28b0c04c..c2d59cbfaea912 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -291,24 +291,6 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data) return IRQ_HANDLED; } -static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip) -{ - struct thermal_trip trip; - int i, ret; - - for (i = 0; i < thermal_zone_get_num_trips(chip->tz_dev); i++) { - - ret = thermal_zone_get_trip(chip->tz_dev, i, &trip); - if (ret) - continue; - - if (trip.type == THERMAL_TRIP_CRITICAL) - return trip.temperature; - } - - return THERMAL_TEMP_INVALID; -} - /* * This function initializes the internal temp value based on only the * current thermal stage and threshold. Setup threshold control and @@ -343,7 +325,9 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip) mutex_unlock(&chip->lock); - crit_temp = qpnp_tm_get_critical_trip_temp(chip); + ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp); + if (ret) + crit_temp = THERMAL_TEMP_INVALID; mutex_lock(&chip->lock); diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 404f01cca4dab5..52e26be8c53df6 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -347,7 +347,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) return 0; } -static int __maybe_unused qoriq_tmu_suspend(struct device *dev) +static int qoriq_tmu_suspend(struct device *dev) { struct qoriq_tmu_data *data = dev_get_drvdata(dev); int ret; @@ -361,7 +361,7 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev) return 0; } -static int __maybe_unused qoriq_tmu_resume(struct device *dev) +static int qoriq_tmu_resume(struct device *dev) { int ret; struct qoriq_tmu_data *data = dev_get_drvdata(dev); @@ -374,8 +374,8 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev) return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME); } -static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops, - qoriq_tmu_suspend, qoriq_tmu_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops, + qoriq_tmu_suspend, qoriq_tmu_resume); static const struct of_device_id qoriq_tmu_match[] = { { .compatible = "fsl,qoriq-tmu", }, @@ -387,7 +387,7 @@ MODULE_DEVICE_TABLE(of, qoriq_tmu_match); static struct platform_driver qoriq_tmu = { .driver = { .name = "qoriq_thermal", - .pm = &qoriq_tmu_pm_ops, + .pm = pm_sleep_ptr(&qoriq_tmu_pm_ops), .of_match_table = qoriq_tmu_match, }, .probe = qoriq_tmu_probe, diff --git a/drivers/thermal/renesas/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c index 5c769871753add..810f866774612b 100644 --- a/drivers/thermal/renesas/rcar_gen3_thermal.c +++ b/drivers/thermal/renesas/rcar_gen3_thermal.c @@ -563,11 +563,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) if (ret) goto error_unregister; - ret = thermal_zone_get_num_trips(tsc->zone); - if (ret < 0) - goto error_unregister; - - dev_info(dev, "Sensor %u: Loaded %d trip points\n", i, ret); + dev_info(dev, "Sensor %u: Loaded\n", i); } if (!priv->num_tscs) { diff --git a/drivers/thermal/renesas/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c index 1e93f60b6d7445..ddc8341e5c3fa0 100644 --- a/drivers/thermal/renesas/rcar_thermal.c +++ b/drivers/thermal/renesas/rcar_thermal.c @@ -447,7 +447,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, rcar_thermal_irq, IRQF_SHARED, dev_name(dev), common); if (ret) { - dev_err(dev, "irq request failed\n "); + dev_err(dev, "irq request failed\n"); goto error_unregister; } diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c index 874192546548c5..dfd1d529c4101d 100644 --- a/drivers/thermal/sprd_thermal.c +++ b/drivers/thermal/sprd_thermal.c @@ -359,21 +359,17 @@ static int sprd_thm_probe(struct platform_device *pdev) return -EINVAL; } - thm->clk = devm_clk_get(&pdev->dev, "enable"); + thm->clk = devm_clk_get_enabled(&pdev->dev, "enable"); if (IS_ERR(thm->clk)) { dev_err(&pdev->dev, "failed to get enable clock\n"); return PTR_ERR(thm->clk); } - ret = clk_prepare_enable(thm->clk); - if (ret) - return ret; - sprd_thm_para_config(thm); ret = sprd_thm_cal_read(np, "thm_sign_cal", &val); if (ret) - goto disable_clk; + return ret; if (val > 0) thm->ratio_sign = -1; @@ -382,7 +378,7 @@ static int sprd_thm_probe(struct platform_device *pdev) ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off); if (ret) - goto disable_clk; + return ret; for_each_child_of_node(np, sen_child) { sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL); @@ -439,8 +435,6 @@ static int sprd_thm_probe(struct platform_device *pdev) of_put: of_node_put(sen_child); -disable_clk: - clk_disable_unprepare(thm->clk); return ret; } @@ -526,8 +520,6 @@ static void sprd_thm_remove(struct platform_device *pdev) devm_thermal_of_zone_unregister(&pdev->dev, thm->sensor[i]->tzd); } - - clk_disable_unprepare(thm->clk); } static const struct of_device_id sprd_thermal_of_match[] = { diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index 2a105409864ead..a14a37d546982e 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -12,6 +12,7 @@ #include #include "st_thermal.h" +#include "../thermal_hwmon.h" /* The Thermal Framework expects millidegrees */ #define mcelsius(temp) ((temp) * 1000) @@ -135,8 +136,6 @@ static struct thermal_zone_device_ops st_tz_ops = { .get_temp = st_thermal_get_temp, }; -static struct thermal_trip trip; - int st_thermal_register(struct platform_device *pdev, const struct of_device_id *st_thermal_of_match) { @@ -145,7 +144,6 @@ int st_thermal_register(struct platform_device *pdev, struct device_node *np = dev->of_node; const struct of_device_id *match; - int polling_delay; int ret; if (!np) { @@ -197,29 +195,24 @@ int st_thermal_register(struct platform_device *pdev, if (ret) goto sensor_off; - polling_delay = sensor->ops->register_enable_irq ? 0 : 1000; - - trip.temperature = sensor->cdata->crit_temp; - trip.type = THERMAL_TRIP_CRITICAL; - sensor->thermal_dev = - thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, sensor, - &st_tz_ops, NULL, 0, polling_delay); + devm_thermal_of_zone_register(dev, 0, sensor, &st_tz_ops); if (IS_ERR(sensor->thermal_dev)) { - dev_err(dev, "failed to register thermal zone device\n"); + dev_err(dev, "failed to register thermal of zone\n"); ret = PTR_ERR(sensor->thermal_dev); goto sensor_off; } - ret = thermal_zone_device_enable(sensor->thermal_dev); - if (ret) - goto tzd_unregister; platform_set_drvdata(pdev, sensor); + /* + * devm_thermal_of_zone_register() doesn't enable hwmon by default + * Enable it here + */ + devm_thermal_add_hwmon_sysfs(dev, sensor->thermal_dev); + return 0; -tzd_unregister: - thermal_zone_device_unregister(sensor->thermal_dev); sensor_off: st_thermal_sensor_off(sensor); @@ -232,11 +225,11 @@ void st_thermal_unregister(struct platform_device *pdev) struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); st_thermal_sensor_off(sensor); - thermal_zone_device_unregister(sensor->thermal_dev); + thermal_remove_hwmon_sysfs(sensor->thermal_dev); + devm_thermal_of_zone_unregister(sensor->dev, sensor->thermal_dev); } EXPORT_SYMBOL_GPL(st_thermal_unregister); -#ifdef CONFIG_PM_SLEEP static int st_thermal_suspend(struct device *dev) { struct st_thermal_sensor *sensor = dev_get_drvdata(dev); @@ -265,9 +258,8 @@ static int st_thermal_resume(struct device *dev) return 0; } -#endif -SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); +DEFINE_SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); EXPORT_SYMBOL_GPL(st_thermal_pm_ops); MODULE_AUTHOR("STMicroelectronics (R&D) Limited "); diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index e427117381a410..97493d2b2f4999 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -170,7 +170,7 @@ static void st_mmap_remove(struct platform_device *pdev) static struct platform_driver st_mmap_thermal_driver = { .driver = { .name = "st_thermal_mmap", - .pm = &st_thermal_pm_ops, + .pm = pm_sleep_ptr(&st_thermal_pm_ops), .of_match_table = st_mmap_thermal_of_match, }, .probe = st_mmap_probe, diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index 34785b9276fc74..ffd988600ed6a2 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -440,7 +440,6 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor) return ret; } -#ifdef CONFIG_PM_SLEEP static int stm_thermal_suspend(struct device *dev) { struct stm_thermal_sensor *sensor = dev_get_drvdata(dev); @@ -466,10 +465,9 @@ static int stm_thermal_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, - stm_thermal_suspend, stm_thermal_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, + stm_thermal_suspend, stm_thermal_resume); static const struct thermal_zone_device_ops stm_tz_ops = { .get_temp = stm_thermal_get_temp, @@ -580,7 +578,7 @@ static void stm_thermal_remove(struct platform_device *pdev) static struct platform_driver stm_thermal_driver = { .driver = { .name = "stm_thermal", - .pm = &stm_thermal_pm_ops, + .pm = pm_sleep_ptr(&stm_thermal_pm_ops), .of_match_table = stm_thermal_of_match, }, .probe = stm_thermal_probe, diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index d3dfc34c62c684..a023c948afbddd 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -682,24 +682,25 @@ static const struct thermal_zone_device_ops tegra_of_thermal_ops = { .set_trips = tegra_thermctl_set_trips, }; -static int get_hot_temp(struct thermal_zone_device *tz, int *trip_id, int *temp) +static int get_hot_trip_cb(struct thermal_trip *trip, void *arg) { - int i, ret; - struct thermal_trip trip; + const struct thermal_trip **trip_ret = arg; - for (i = 0; i < thermal_zone_get_num_trips(tz); i++) { + if (trip->type != THERMAL_TRIP_HOT) + return 0; - ret = thermal_zone_get_trip(tz, i, &trip); - if (ret) - return -EINVAL; + *trip_ret = trip; + /* Return nonzero to terminate the search. */ + return 1; +} - if (trip.type == THERMAL_TRIP_HOT) { - *trip_id = i; - return 0; - } - } +static const struct thermal_trip *get_hot_trip(struct thermal_zone_device *tz) +{ + const struct thermal_trip *trip = NULL; - return -EINVAL; + thermal_zone_for_each_trip(tz, get_hot_trip_cb, &trip); + + return trip; } /** @@ -731,8 +732,9 @@ static int tegra_soctherm_set_hwtrips(struct device *dev, struct thermal_zone_device *tz) { struct tegra_soctherm *ts = dev_get_drvdata(dev); + const struct thermal_trip *hot_trip; struct soctherm_throt_cfg *stc; - int i, trip, temperature, ret; + int i, temperature, ret; /* Get thermtrips. If missing, try to get critical trips. */ temperature = tsensor_group_thermtrip_get(ts, sg->id); @@ -749,8 +751,8 @@ static int tegra_soctherm_set_hwtrips(struct device *dev, dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n", sg->name, temperature); - ret = get_hot_temp(tz, &trip, &temperature); - if (ret) { + hot_trip = get_hot_trip(tz); + if (!hot_trip) { dev_info(dev, "throttrip: %s: missing hot temperature\n", sg->name); return 0; @@ -763,7 +765,7 @@ static int tegra_soctherm_set_hwtrips(struct device *dev, continue; cdev = ts->throt_cfgs[i].cdev; - if (get_thermal_instance(tz, cdev, trip)) + if (thermal_trip_is_bound_to_cdev(tz, hot_trip, cdev)) stc = find_throttle_cfg_by_name(ts, cdev->type); else continue; diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c index d911fa60f10045..6245f6b97f43ec 100644 --- a/drivers/thermal/tegra/tegra30-tsensor.c +++ b/drivers/thermal/tegra/tegra30-tsensor.c @@ -303,33 +303,37 @@ static int tegra_tsensor_disable_hw_channel(const struct tegra_tsensor *ts, return 0; } -static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd, - int *hot_trip, int *crit_trip) +struct trip_temps { + int hot_trip; + int crit_trip; +}; + +static int tegra_tsensor_get_trips_cb(struct thermal_trip *trip, void *arg) { - unsigned int i; + struct trip_temps *temps = arg; + + if (trip->type == THERMAL_TRIP_HOT) + temps->hot_trip = trip->temperature; + else if (trip->type == THERMAL_TRIP_CRITICAL) + temps->crit_trip = trip->temperature; + + return 0; +} +static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd, + struct trip_temps *temps) +{ /* * 90C is the maximal critical temperature of all Tegra30 SoC variants, * use it for the default trip if unspecified in a device-tree. */ - *hot_trip = 85000; - *crit_trip = 90000; - - for (i = 0; i < thermal_zone_get_num_trips(tzd); i++) { - - struct thermal_trip trip; + temps->hot_trip = 85000; + temps->crit_trip = 90000; - thermal_zone_get_trip(tzd, i, &trip); - - if (trip.type == THERMAL_TRIP_HOT) - *hot_trip = trip.temperature; - - if (trip.type == THERMAL_TRIP_CRITICAL) - *crit_trip = trip.temperature; - } + thermal_zone_for_each_trip(tzd, tegra_tsensor_get_trips_cb, temps); /* clamp hardware trips to the calibration limits */ - *hot_trip = clamp(*hot_trip, 25000, 90000); + temps->hot_trip = clamp(temps->hot_trip, 25000, 90000); /* * Kernel will perform a normal system shut down if it will @@ -338,7 +342,7 @@ static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd, * shut down gracefully before sending signal to the Power * Management controller. */ - *crit_trip = clamp(*crit_trip + 5000, 25000, 90000); + temps->crit_trip = clamp(temps->crit_trip + 5000, 25000, 90000); } static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts, @@ -346,7 +350,8 @@ static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts, { const struct tegra_tsensor_channel *tsc = &ts->ch[id]; struct thermal_zone_device *tzd = tsc->tzd; - int err, hot_trip = 0, crit_trip = 0; + struct trip_temps temps = { 0 }; + int err; u32 val; if (!tzd) { @@ -357,24 +362,24 @@ static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts, return 0; } - tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip); + tegra_tsensor_get_hw_channel_trips(tzd, &temps); dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n", - id, DIV_ROUND_CLOSEST(crit_trip, 1000)); + id, DIV_ROUND_CLOSEST(temps.crit_trip, 1000)); - hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip); - crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip); + temps.hot_trip = tegra_tsensor_temp_to_counter(ts, temps.hot_trip); + temps.crit_trip = tegra_tsensor_temp_to_counter(ts, temps.crit_trip); /* program LEVEL2 counter threshold */ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1); val &= ~TSENSOR_SENSOR0_CONFIG1_TH2; - val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip); + val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, temps.hot_trip); writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1); /* program LEVEL3 counter threshold */ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2); val &= ~TSENSOR_SENSOR0_CONFIG2_TH3; - val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip); + val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, temps.crit_trip); writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2); /* diff --git a/drivers/thermal/testing/Makefile b/drivers/thermal/testing/Makefile new file mode 100644 index 00000000000000..ede9678efbcee6 --- /dev/null +++ b/drivers/thermal/testing/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Thermal core testing facility. + +obj-$(CONFIG_THERMAL_CORE_TESTING) += thermal-testing.o + +thermal-testing-y := command.o zone.o diff --git a/drivers/thermal/testing/command.c b/drivers/thermal/testing/command.c new file mode 100644 index 00000000000000..ba11d70e80219b --- /dev/null +++ b/drivers/thermal/testing/command.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024, Intel Corporation + * + * Author: Rafael J. Wysocki + * + * Thermal subsystem testing facility. + * + * This facility allows the thermal core functionality to be exercised in a + * controlled way in order to verify its behavior. + * + * It resides in the "thermal-testing" directory under the debugfs root and + * starts with a single file called "command" which can be written a string + * representing a thermal testing facility command. + * + * The currently supported commands are listed in the tt_commands enum below. + * + * The "addtz" command causes a new test thermal zone template to be created, + * for example: + * + * # echo addtz > /sys/kernel/debug/thermal-testing/command + * + * That template will be represented as a subdirectory in the "thermal-testing" + * directory, for example + * + * # ls /sys/kernel/debug/thermal-testing/ + * command tz0 + * + * The thermal zone template can be populated with trip points with the help of + * the "tzaddtrip" command, for example: + * + * # echo tzaddtrip:0 > /sys/kernel/debug/thermal-testing/command + * + * which causes a trip point template to be added to the test thermal zone + * template 0 (represented by the tz0 subdirectory in "thermal-testing"). + * + * # ls /sys/kernel/debug/thermal-testing/tz0 + * init_temp temp trip_0_temp trip_0_hyst + * + * The temperature of a trip point template is initially THERMAL_TEMP_INVALID + * and its hysteresis is initially 0. They can be adjusted by writing to the + * "trip_x_temp" and "trip_x_hyst" files correspoinding to that trip point + * template, respectively. + * + * The initial temperature of a thermal zone based on a template can be set by + * writing to the "init_temp" file in its directory under "thermal-testing", for + * example: + * + * echo 50000 > /sys/kernel/debug/thermal-testing/tz0/init_temp + * + * When ready, "tzreg" command can be used for registering and enabling a + * thermal zone based on a given template with the thermal core, for example + * + * # echo tzreg:0 > /sys/kernel/debug/thermal-testing/command + * + * In this case, test thermal zone template 0 is used for registering a new + * thermal zone and the set of trip point templates associated with it is used + * for populating the new thermal zone's trip points table. The type of the new + * thermal zone is "test_tz". + * + * The temperature and hysteresis of all of the trip points in that new thermal + * zone are adjustable via sysfs, so they can be updated at any time. + * + * The current temperature of the new thermal zone can be set by writing to the + * "temp" file in the corresponding thermal zone template's directory under + * "thermal-testing", for example + * + * echo 10000 > /sys/kernel/debug/thermal-testing/tz0/temp + * + * which will also trigger a temperature update for this zone in the thermal + * core, including checking its trip points, sending notifications to user space + * if any of them have been crossed and so on. + * + * When it is not needed any more, a test thermal zone template can be deleted + * with the help of the "deltz" command, for example + * + * # echo deltz:0 > /sys/kernel/debug/thermal-testing/command + * + * which will also unregister the thermal zone based on it, if present. + */ + +#define pr_fmt(fmt) "thermal-testing: " fmt + +#include +#include + +#include "thermal_testing.h" + +struct dentry *d_testing; + +#define TT_COMMAND_SIZE 16 + +enum tt_commands { + TT_CMD_ADDTZ, + TT_CMD_DELTZ, + TT_CMD_TZADDTRIP, + TT_CMD_TZREG, + TT_CMD_TZUNREG, +}; + +static const char *tt_command_strings[] = { + [TT_CMD_ADDTZ] = "addtz", + [TT_CMD_DELTZ] = "deltz", + [TT_CMD_TZADDTRIP] = "tzaddtrip", + [TT_CMD_TZREG] = "tzreg", + [TT_CMD_TZUNREG] = "tzunreg", +}; + +static int tt_command_exec(int index, const char *arg) +{ + int ret; + + switch (index) { + case TT_CMD_ADDTZ: + ret = tt_add_tz(); + break; + + case TT_CMD_DELTZ: + ret = tt_del_tz(arg); + break; + + case TT_CMD_TZADDTRIP: + ret = tt_zone_add_trip(arg); + break; + + case TT_CMD_TZREG: + ret = tt_zone_reg(arg); + break; + + case TT_CMD_TZUNREG: + ret = tt_zone_unreg(arg); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static ssize_t tt_command_process(struct dentry *dentry, const char __user *user_buf, + size_t count) +{ + char *buf __free(kfree); + char *arg; + int i; + + buf = kmalloc(count + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = '\0'; + strim(buf); + + arg = strstr(buf, ":"); + if (arg) { + *arg = '\0'; + arg++; + } + + for (i = 0; i < ARRAY_SIZE(tt_command_strings); i++) { + if (!strcmp(buf, tt_command_strings[i])) + return tt_command_exec(i, arg); + } + + return -EINVAL; +} + +static ssize_t tt_command_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + ssize_t ret; + + if (*ppos) + return -EINVAL; + + if (count + 1 > TT_COMMAND_SIZE) + return -E2BIG; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = tt_command_process(dentry, user_buf, count); + if (ret) + return ret; + + return count; +} + +static const struct file_operations tt_command_fops = { + .write = tt_command_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static int __init thermal_testing_init(void) +{ + d_testing = debugfs_create_dir("thermal-testing", NULL); + if (!IS_ERR(d_testing)) + debugfs_create_file("command", 0200, d_testing, NULL, + &tt_command_fops); + + return 0; +} +module_init(thermal_testing_init); + +static void __exit thermal_testing_exit(void) +{ + debugfs_remove(d_testing); + tt_zone_cleanup(); +} +module_exit(thermal_testing_exit); + +MODULE_DESCRIPTION("Thermal core testing facility"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/testing/thermal_testing.h b/drivers/thermal/testing/thermal_testing.h new file mode 100644 index 00000000000000..c790a32aae4ed7 --- /dev/null +++ b/drivers/thermal/testing/thermal_testing.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +extern struct dentry *d_testing; + +int tt_add_tz(void); +int tt_del_tz(const char *arg); +int tt_zone_add_trip(const char *arg); +int tt_zone_reg(const char *arg); +int tt_zone_unreg(const char *arg); + +void tt_zone_cleanup(void); diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c new file mode 100644 index 00000000000000..c6d8c66f40f980 --- /dev/null +++ b/drivers/thermal/testing/zone.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024, Intel Corporation + * + * Author: Rafael J. Wysocki + * + * Thermal zone tempalates handling for thermal core testing. + */ + +#define pr_fmt(fmt) "thermal-testing: " fmt + +#include +#include +#include +#include +#include + +#include "thermal_testing.h" + +#define TT_MAX_FILE_NAME_LENGTH 16 + +/** + * struct tt_thermal_zone - Testing thermal zone template + * + * Represents a template of a thermal zone that can be used for registering + * a test thermal zone with the thermal core. + * + * @list_node: Node in the list of all testing thermal zone templates. + * @trips: List of trip point templates for this thermal zone template. + * @d_tt_zone: Directory in debugfs representing this template. + * @tz: Test thermal zone based on this template, if present. + * @lock: Mutex for synchronizing changes of this template. + * @ida: IDA for trip point IDs. + * @id: The ID of this template for the debugfs interface. + * @temp: Temperature value. + * @tz_temp: Current thermal zone temperature (after registration). + * @num_trips: Number of trip points in the @trips list. + * @refcount: Reference counter for usage and removal synchronization. + */ +struct tt_thermal_zone { + struct list_head list_node; + struct list_head trips; + struct dentry *d_tt_zone; + struct thermal_zone_device *tz; + struct mutex lock; + struct ida ida; + int id; + int temp; + int tz_temp; + unsigned int num_trips; + unsigned int refcount; +}; + +DEFINE_GUARD(tt_zone, struct tt_thermal_zone *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock)) + +/** + * struct tt_trip - Testing trip point template + * + * Represents a template of a trip point to be used for populating a trip point + * during the registration of a thermal zone based on a given zone template. + * + * @list_node: Node in the list of all trip templates in the zone template. + * @trip: Trip point data to use for thernal zone registration. + * @id: The ID of this trip template for the debugfs interface. + */ +struct tt_trip { + struct list_head list_node; + struct thermal_trip trip; + int id; +}; + +/* + * It is both questionable and potentially problematic from the sychnronization + * perspective to attempt to manipulate debugfs from within a debugfs file + * "write" operation, so auxiliary work items are used for that. The majority + * of zone-related command functions have a part that runs from a workqueue and + * make changes in debugs, among other things. + */ +struct tt_work { + struct work_struct work; + struct tt_thermal_zone *tt_zone; + struct tt_trip *tt_trip; +}; + +static inline struct tt_work *tt_work_of_work(struct work_struct *work) +{ + return container_of(work, struct tt_work, work); +} + +static LIST_HEAD(tt_thermal_zones); +static DEFINE_IDA(tt_thermal_zones_ida); +static DEFINE_MUTEX(tt_thermal_zones_lock); + +static int tt_int_get(void *data, u64 *val) +{ + *val = *(int *)data; + return 0; +} +static int tt_int_set(void *data, u64 val) +{ + if ((int)val < THERMAL_TEMP_INVALID) + return -EINVAL; + + *(int *)data = val; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_int_attr, tt_int_get, tt_int_set, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(tt_unsigned_int_attr, tt_int_get, tt_int_set, "%llu\n"); + +static int tt_zone_tz_temp_get(void *data, u64 *val) +{ + struct tt_thermal_zone *tt_zone = data; + + guard(tt_zone)(tt_zone); + + if (!tt_zone->tz) + return -EBUSY; + + *val = tt_zone->tz_temp; + + return 0; +} +static int tt_zone_tz_temp_set(void *data, u64 val) +{ + struct tt_thermal_zone *tt_zone = data; + + guard(tt_zone)(tt_zone); + + if (!tt_zone->tz) + return -EBUSY; + + WRITE_ONCE(tt_zone->tz_temp, val); + thermal_zone_device_update(tt_zone->tz, THERMAL_EVENT_TEMP_SAMPLE); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_zone_tz_temp_attr, tt_zone_tz_temp_get, + tt_zone_tz_temp_set, "%lld\n"); + +static void tt_zone_free_trips(struct tt_thermal_zone *tt_zone) +{ + struct tt_trip *tt_trip, *aux; + + list_for_each_entry_safe(tt_trip, aux, &tt_zone->trips, list_node) { + list_del(&tt_trip->list_node); + ida_free(&tt_zone->ida, tt_trip->id); + kfree(tt_trip); + } +} + +static void tt_zone_free(struct tt_thermal_zone *tt_zone) +{ + tt_zone_free_trips(tt_zone); + ida_free(&tt_thermal_zones_ida, tt_zone->id); + ida_destroy(&tt_zone->ida); + kfree(tt_zone); +} + +static void tt_add_tz_work_fn(struct work_struct *work) +{ + struct tt_work *tt_work = tt_work_of_work(work); + struct tt_thermal_zone *tt_zone = tt_work->tt_zone; + char f_name[TT_MAX_FILE_NAME_LENGTH]; + + kfree(tt_work); + + snprintf(f_name, TT_MAX_FILE_NAME_LENGTH, "tz%d", tt_zone->id); + tt_zone->d_tt_zone = debugfs_create_dir(f_name, d_testing); + if (IS_ERR(tt_zone->d_tt_zone)) { + tt_zone_free(tt_zone); + return; + } + + debugfs_create_file_unsafe("temp", 0600, tt_zone->d_tt_zone, tt_zone, + &tt_zone_tz_temp_attr); + + debugfs_create_file_unsafe("init_temp", 0600, tt_zone->d_tt_zone, + &tt_zone->temp, &tt_int_attr); + + guard(mutex)(&tt_thermal_zones_lock); + + list_add_tail(&tt_zone->list_node, &tt_thermal_zones); +} + +int tt_add_tz(void) +{ + struct tt_thermal_zone *tt_zone __free(kfree); + struct tt_work *tt_work __free(kfree); + int ret; + + tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL); + if (!tt_zone) + return -ENOMEM; + + tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL); + if (!tt_work) + return -ENOMEM; + + INIT_LIST_HEAD(&tt_zone->trips); + mutex_init(&tt_zone->lock); + ida_init(&tt_zone->ida); + tt_zone->temp = THERMAL_TEMP_INVALID; + + ret = ida_alloc(&tt_thermal_zones_ida, GFP_KERNEL); + if (ret < 0) + return ret; + + tt_zone->id = ret; + + INIT_WORK(&tt_work->work, tt_add_tz_work_fn); + tt_work->tt_zone = no_free_ptr(tt_zone); + schedule_work(&(no_free_ptr(tt_work)->work)); + + return 0; +} + +static void tt_del_tz_work_fn(struct work_struct *work) +{ + struct tt_work *tt_work = tt_work_of_work(work); + struct tt_thermal_zone *tt_zone = tt_work->tt_zone; + + kfree(tt_work); + + debugfs_remove(tt_zone->d_tt_zone); + tt_zone_free(tt_zone); +} + +static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone) +{ + guard(tt_zone)(tt_zone); + + if (tt_zone->tz) { + thermal_zone_device_unregister(tt_zone->tz); + tt_zone->tz = NULL; + } +} + +int tt_del_tz(const char *arg) +{ + struct tt_work *tt_work __free(kfree); + struct tt_thermal_zone *tt_zone, *aux; + int ret; + int id; + + ret = sscanf(arg, "%d", &id); + if (ret != 1) + return -EINVAL; + + tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL); + if (!tt_work) + return -ENOMEM; + + guard(mutex)(&tt_thermal_zones_lock); + + ret = -EINVAL; + list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) { + if (tt_zone->id == id) { + if (tt_zone->refcount) { + ret = -EBUSY; + } else { + list_del(&tt_zone->list_node); + ret = 0; + } + break; + } + } + + if (ret) + return ret; + + tt_zone_unregister_tz(tt_zone); + + INIT_WORK(&tt_work->work, tt_del_tz_work_fn); + tt_work->tt_zone = tt_zone; + schedule_work(&(no_free_ptr(tt_work)->work)); + + return 0; +} + +static struct tt_thermal_zone *tt_get_tt_zone(const char *arg) +{ + struct tt_thermal_zone *tt_zone; + int ret, id; + + ret = sscanf(arg, "%d", &id); + if (ret != 1) + return ERR_PTR(-EINVAL); + + guard(mutex)(&tt_thermal_zones_lock); + + ret = -EINVAL; + list_for_each_entry(tt_zone, &tt_thermal_zones, list_node) { + if (tt_zone->id == id) { + tt_zone->refcount++; + ret = 0; + break; + } + } + + if (ret) + return ERR_PTR(ret); + + return tt_zone; +} + +static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone) +{ + guard(mutex)(&tt_thermal_zones_lock); + + tt_zone->refcount--; +} + +static void tt_zone_add_trip_work_fn(struct work_struct *work) +{ + struct tt_work *tt_work = tt_work_of_work(work); + struct tt_thermal_zone *tt_zone = tt_work->tt_zone; + struct tt_trip *tt_trip = tt_work->tt_trip; + char d_name[TT_MAX_FILE_NAME_LENGTH]; + + kfree(tt_work); + + snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_temp", tt_trip->id); + debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone, + &tt_trip->trip.temperature, &tt_int_attr); + + snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_hyst", tt_trip->id); + debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone, + &tt_trip->trip.hysteresis, &tt_unsigned_int_attr); + + tt_put_tt_zone(tt_zone); +} + +int tt_zone_add_trip(const char *arg) +{ + struct tt_work *tt_work __free(kfree); + struct tt_trip *tt_trip __free(kfree); + struct tt_thermal_zone *tt_zone; + int id; + + tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL); + if (!tt_work) + return -ENOMEM; + + tt_trip = kzalloc(sizeof(*tt_trip), GFP_KERNEL); + if (!tt_trip) + return -ENOMEM; + + tt_zone = tt_get_tt_zone(arg); + if (IS_ERR(tt_zone)) + return PTR_ERR(tt_zone); + + id = ida_alloc(&tt_zone->ida, GFP_KERNEL); + if (id < 0) { + tt_put_tt_zone(tt_zone); + return id; + } + + tt_trip->trip.type = THERMAL_TRIP_ACTIVE; + tt_trip->trip.temperature = THERMAL_TEMP_INVALID; + tt_trip->trip.flags = THERMAL_TRIP_FLAG_RW; + tt_trip->id = id; + + guard(tt_zone)(tt_zone); + + list_add_tail(&tt_trip->list_node, &tt_zone->trips); + tt_zone->num_trips++; + + INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn); + tt_work->tt_zone = tt_zone; + tt_work->tt_trip = no_free_ptr(tt_trip); + schedule_work(&(no_free_ptr(tt_work)->work)); + + return 0; +} + +static int tt_zone_get_temp(struct thermal_zone_device *tz, int *temp) +{ + struct tt_thermal_zone *tt_zone = thermal_zone_device_priv(tz); + + *temp = READ_ONCE(tt_zone->tz_temp); + + if (*temp < THERMAL_TEMP_INVALID) + return -ENODATA; + + return 0; +} + +static struct thermal_zone_device_ops tt_zone_ops = { + .get_temp = tt_zone_get_temp, +}; + +static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone) +{ + struct thermal_trip *trips __free(kfree); + struct thermal_zone_device *tz; + struct tt_trip *tt_trip; + int i; + + guard(tt_zone)(tt_zone); + + if (tt_zone->tz) + return -EINVAL; + + trips = kcalloc(tt_zone->num_trips, sizeof(*trips), GFP_KERNEL); + if (!trips) + return -ENOMEM; + + i = 0; + list_for_each_entry(tt_trip, &tt_zone->trips, list_node) + trips[i++] = tt_trip->trip; + + tt_zone->tz_temp = tt_zone->temp; + + tz = thermal_zone_device_register_with_trips("test_tz", trips, i, tt_zone, + &tt_zone_ops, NULL, 0, 0); + if (IS_ERR(tz)) + return PTR_ERR(tz); + + tt_zone->tz = tz; + + thermal_zone_device_enable(tz); + + return 0; +} + +int tt_zone_reg(const char *arg) +{ + struct tt_thermal_zone *tt_zone; + int ret; + + tt_zone = tt_get_tt_zone(arg); + if (IS_ERR(tt_zone)) + return PTR_ERR(tt_zone); + + ret = tt_zone_register_tz(tt_zone); + + tt_put_tt_zone(tt_zone); + + return ret; +} + +int tt_zone_unreg(const char *arg) +{ + struct tt_thermal_zone *tt_zone; + + tt_zone = tt_get_tt_zone(arg); + if (IS_ERR(tt_zone)) + return PTR_ERR(tt_zone); + + tt_zone_unregister_tz(tt_zone); + + tt_put_tt_zone(tt_zone); + + return 0; +} + +void tt_zone_cleanup(void) +{ + struct tt_thermal_zone *tt_zone, *aux; + + list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) { + tt_zone_unregister_tz(tt_zone); + + list_del(&tt_zone->list_node); + + tt_zone_free(tt_zone); + } +} diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index e6669aeda1fff0..073d02e21352e6 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -323,11 +323,10 @@ static void thermal_zone_broken_disable(struct thermal_zone_device *tz) static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, unsigned long delay) { - if (delay) - mod_delayed_work(system_freezable_power_efficient_wq, - &tz->poll_queue, delay); - else - cancel_delayed_work(&tz->poll_queue); + if (delay > HZ) + delay = round_jiffies_relative(delay); + + mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay); } static void thermal_zone_recheck(struct thermal_zone_device *tz, int error) @@ -360,9 +359,7 @@ static void thermal_zone_recheck(struct thermal_zone_device *tz, int error) static void monitor_thermal_zone(struct thermal_zone_device *tz) { - if (tz->mode != THERMAL_DEVICE_ENABLED) - thermal_zone_device_set_polling(tz, 0); - else if (tz->passive > 0) + if (tz->passive > 0 && tz->passive_delay_jiffies) thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies); else if (tz->polling_delay_jiffies) thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies); @@ -547,12 +544,10 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, struct thermal_trip_desc *td; LIST_HEAD(way_down_list); LIST_HEAD(way_up_list); + int low = -INT_MAX, high = INT_MAX; int temp, ret; - if (tz->suspended) - return; - - if (!thermal_zone_device_is_enabled(tz)) + if (tz->suspended || tz->mode != THERMAL_DEVICE_ENABLED) return; ret = __thermal_zone_get_temp(tz, &temp); @@ -580,10 +575,17 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, tz->notify_event = event; - for_each_trip_desc(tz, td) + for_each_trip_desc(tz, td) { handle_thermal_trip(tz, td, &way_up_list, &way_down_list); - thermal_zone_set_trips(tz); + if (td->threshold <= tz->temperature && td->threshold > low) + low = td->threshold; + + if (td->threshold >= tz->temperature && td->threshold < high) + high = td->threshold; + } + + thermal_zone_set_trips(tz, low, high); list_sort(NULL, &way_up_list, thermal_trip_notify_cmp); list_for_each_entry(td, &way_up_list, notify_list_node) @@ -647,13 +649,6 @@ int thermal_zone_device_disable(struct thermal_zone_device *tz) } EXPORT_SYMBOL_GPL(thermal_zone_device_disable); -int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) -{ - lockdep_assert_held(&tz->lock); - - return tz->mode == THERMAL_DEVICE_ENABLED; -} - static bool thermal_zone_is_present(struct thermal_zone_device *tz) { return !list_empty(&tz->node); @@ -757,15 +752,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) * @tz: pointer to struct thermal_zone_device * @trip: trip point the cooling devices is associated with in this zone. * @cdev: pointer to struct thermal_cooling_device - * @upper: the Maximum cooling state for this trip point. - * THERMAL_NO_LIMIT means no upper limit, - * and the cooling device can be in max_state. - * @lower: the Minimum cooling state can be used for this trip point. - * THERMAL_NO_LIMIT means no lower limit, - * and the cooling device can be in cooling state 0. - * @weight: The weight of the cooling device to be bound to the - * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the - * default value + * @cool_spec: cooling specification for @trip and @cdev * * This interface function bind a thermal cooling device to the certain trip * point of a thermal zone device. @@ -773,55 +760,41 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) * * Return: 0 on success, the proper error value otherwise. */ -int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, +static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, const struct thermal_trip *trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) + struct cooling_spec *cool_spec) { struct thermal_instance *dev; struct thermal_instance *pos; - struct thermal_zone_device *pos1; - struct thermal_cooling_device *pos2; bool upper_no_limit; int result; - list_for_each_entry(pos1, &thermal_tz_list, node) { - if (pos1 == tz) - break; - } - list_for_each_entry(pos2, &thermal_cdev_list, node) { - if (pos2 == cdev) - break; - } - - if (tz != pos1 || cdev != pos2) - return -EINVAL; - /* lower default 0, upper default max_state */ - lower = lower == THERMAL_NO_LIMIT ? 0 : lower; + if (cool_spec->lower == THERMAL_NO_LIMIT) + cool_spec->lower = 0; - if (upper == THERMAL_NO_LIMIT) { - upper = cdev->max_state; + if (cool_spec->upper == THERMAL_NO_LIMIT) { + cool_spec->upper = cdev->max_state; upper_no_limit = true; } else { upper_no_limit = false; } - if (lower > upper || upper > cdev->max_state) + if (cool_spec->lower > cool_spec->upper || cool_spec->upper > cdev->max_state) return -EINVAL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; - dev->tz = tz; + dev->cdev = cdev; dev->trip = trip; - dev->upper = upper; + dev->upper = cool_spec->upper; dev->upper_no_limit = upper_no_limit; - dev->lower = lower; + dev->lower = cool_spec->lower; dev->target = THERMAL_NO_TARGET; - dev->weight = weight; + dev->weight = cool_spec->weight; result = ida_alloc(&tz->ida, GFP_KERNEL); if (result < 0) @@ -855,10 +828,9 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, if (result) goto remove_trip_file; - mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + if (pos->trip == trip && pos->cdev == cdev) { result = -EEXIST; break; } @@ -870,7 +842,6 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV); } mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); if (!result) return 0; @@ -886,21 +857,6 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, kfree(dev); return result; } -EXPORT_SYMBOL_GPL(thermal_bind_cdev_to_trip); - -int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, - int trip_index, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) -{ - if (trip_index < 0 || trip_index >= tz->num_trips) - return -EINVAL; - - return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index].trip, cdev, - upper, lower, weight); -} -EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); /** * thermal_unbind_cdev_from_trip - unbind a cooling device from a thermal zone. @@ -911,33 +867,28 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); * This interface function unbind a thermal cooling device from the certain * trip point of a thermal zone device. * This function is usually called in the thermal zone device .unbind callback. - * - * Return: 0 on success, the proper error value otherwise. */ -int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip, - struct thermal_cooling_device *cdev) +static void thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev) { struct thermal_instance *pos, *next; - mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { - if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + if (pos->trip == trip && pos->cdev == cdev) { list_del(&pos->tz_node); list_del(&pos->cdev_node); thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV); mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); goto unbind; } } mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - return -ENODEV; + return; unbind: device_remove_file(&tz->device, &pos->weight_attr); @@ -945,20 +896,7 @@ int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, sysfs_remove_link(&tz->device.kobj, pos->name); ida_free(&tz->ida, pos->id); kfree(pos); - return 0; } -EXPORT_SYMBOL_GPL(thermal_unbind_cdev_from_trip); - -int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, - int trip_index, - struct thermal_cooling_device *cdev) -{ - if (trip_index < 0 || trip_index >= tz->num_trips) - return -EINVAL; - - return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index].trip, cdev); -} -EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); static void thermal_release(struct device *dev) { @@ -985,24 +923,41 @@ static struct class *thermal_class; static inline void print_bind_err_msg(struct thermal_zone_device *tz, + const struct thermal_trip *trip, struct thermal_cooling_device *cdev, int ret) { - dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", - tz->type, cdev->type, ret); + dev_err(&tz->device, "binding cdev %s to trip %d failed: %d\n", + cdev->type, thermal_zone_trip_id(tz, trip), ret); } -static void bind_cdev(struct thermal_cooling_device *cdev) +static void thermal_zone_cdev_bind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) { - int ret; - struct thermal_zone_device *pos = NULL; + struct thermal_trip_desc *td; - list_for_each_entry(pos, &thermal_tz_list, node) { - if (pos->ops.bind) { - ret = pos->ops.bind(pos, cdev); - if (ret) - print_bind_err_msg(pos, cdev, ret); - } + if (!tz->ops.should_bind) + return; + + mutex_lock(&tz->lock); + + for_each_trip_desc(tz, td) { + struct thermal_trip *trip = &td->trip; + struct cooling_spec c = { + .upper = THERMAL_NO_LIMIT, + .lower = THERMAL_NO_LIMIT, + .weight = THERMAL_WEIGHT_DEFAULT + }; + int ret; + + if (!tz->ops.should_bind(tz, trip, cdev, &c)) + continue; + + ret = thermal_bind_cdev_to_trip(tz, trip, cdev, &c); + if (ret) + print_bind_err_msg(tz, trip, cdev, ret); } + + mutex_unlock(&tz->lock); } /** @@ -1100,7 +1055,8 @@ __thermal_cooling_device_register(struct device_node *np, list_add(&cdev->node, &thermal_cdev_list); /* Update binding information for 'this' new cdev */ - bind_cdev(cdev); + list_for_each_entry(pos, &thermal_tz_list, node) + thermal_zone_cdev_bind(pos, cdev); list_for_each_entry(pos, &thermal_tz_list, node) if (atomic_cmpxchg(&pos->need_update, 1, 0)) @@ -1301,6 +1257,19 @@ void thermal_cooling_device_update(struct thermal_cooling_device *cdev) } EXPORT_SYMBOL_GPL(thermal_cooling_device_update); +static void thermal_zone_cdev_unbind(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev) +{ + struct thermal_trip_desc *td; + + mutex_lock(&tz->lock); + + for_each_trip_desc(tz, td) + thermal_unbind_cdev_from_trip(tz, &td->trip, cdev); + + mutex_unlock(&tz->lock); +} + /** * thermal_cooling_device_unregister - removes a thermal cooling device * @cdev: the thermal cooling device to remove. @@ -1327,10 +1296,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) list_del(&cdev->node); /* Unbind all thermal zones associated with 'this' cdev */ - list_for_each_entry(tz, &thermal_tz_list, node) { - if (tz->ops.unbind) - tz->ops.unbind(tz, cdev); - } + list_for_each_entry(tz, &thermal_tz_list, node) + thermal_zone_cdev_unbind(tz, cdev); mutex_unlock(&thermal_list_lock); @@ -1338,32 +1305,6 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); -static void bind_tz(struct thermal_zone_device *tz) -{ - int ret; - struct thermal_cooling_device *pos = NULL; - - if (!tz->ops.bind) - return; - - mutex_lock(&thermal_list_lock); - - list_for_each_entry(pos, &thermal_cdev_list, node) { - ret = tz->ops.bind(tz, pos); - if (ret) - print_bind_err_msg(tz, pos, ret); - } - - mutex_unlock(&thermal_list_lock); -} - -static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms) -{ - *delay_jiffies = msecs_to_jiffies(delay_ms); - if (delay_ms > 1000) - *delay_jiffies = round_jiffies(*delay_jiffies); -} - int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp) { const struct thermal_trip_desc *td; @@ -1424,6 +1365,7 @@ thermal_zone_device_register_with_trips(const char *type, unsigned int polling_delay) { const struct thermal_trip *trip = trips; + struct thermal_cooling_device *cdev; struct thermal_zone_device *tz; struct thermal_trip_desc *td; int id; @@ -1447,20 +1389,15 @@ thermal_zone_device_register_with_trips(const char *type, } if (!ops || !ops->get_temp) { - pr_err("Thermal zone device ops not defined\n"); + pr_err("Thermal zone device ops not defined or invalid\n"); return ERR_PTR(-EINVAL); } if (num_trips > 0 && !trips) return ERR_PTR(-EINVAL); - if (polling_delay) { - if (passive_delay > polling_delay) - return ERR_PTR(-EINVAL); - - if (!passive_delay) - passive_delay = polling_delay; - } + if (polling_delay && passive_delay > polling_delay) + return ERR_PTR(-EINVAL); if (!thermal_class) return ERR_PTR(-ENODEV); @@ -1509,8 +1446,8 @@ thermal_zone_device_register_with_trips(const char *type, td->threshold = INT_MAX; } - thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); - thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); + tz->polling_delay_jiffies = msecs_to_jiffies(polling_delay); + tz->passive_delay_jiffies = msecs_to_jiffies(passive_delay); tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY; /* sys I/F */ @@ -1554,13 +1491,16 @@ thermal_zone_device_register_with_trips(const char *type, } mutex_lock(&thermal_list_lock); + mutex_lock(&tz->lock); list_add_tail(&tz->node, &thermal_tz_list); mutex_unlock(&tz->lock); - mutex_unlock(&thermal_list_lock); /* Bind cooling devices for this zone */ - bind_tz(tz); + list_for_each_entry(cdev, &thermal_cdev_list, node) + thermal_zone_cdev_bind(tz, cdev); + + mutex_unlock(&thermal_list_lock); thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ @@ -1652,8 +1592,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) /* Unbind all cdevs associated with 'this' thermal zone */ list_for_each_entry(cdev, &thermal_cdev_list, node) - if (tz->ops.unbind) - tz->ops.unbind(tz, cdev); + thermal_zone_cdev_unbind(tz, cdev); mutex_unlock(&thermal_list_lock); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 4cf2b7230d04bb..50b858aa173a07 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -15,8 +15,20 @@ #include "thermal_netlink.h" #include "thermal_debugfs.h" +struct thermal_attr { + struct device_attribute attr; + char name[THERMAL_NAME_LENGTH]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + struct thermal_trip_desc { struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; struct list_head notify_list_node; int notify_temp; int threshold; @@ -56,9 +68,6 @@ struct thermal_governor { * @device: &struct device for this thermal zone * @removal: removal completion * @resume: resume completion - * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature - * @trip_type_attrs: attributes for trip points for sysfs: trip type - * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @num_trips: number of trip points the thermal zone supports @@ -102,9 +111,6 @@ struct thermal_zone_device { struct completion removal; struct completion resume; struct attribute_group trips_attribute_group; - struct thermal_attr *trip_temp_attrs; - struct thermal_attr *trip_type_attrs; - struct thermal_attr *trip_hyst_attrs; enum thermal_device_mode mode; void *devdata; int num_trips; @@ -188,11 +194,6 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), struct thermal_zone_device *thermal_zone_get_by_id(int id); -struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) { return cdev->ops->get_requested_power && cdev->ops->state2power && @@ -204,11 +205,6 @@ void __thermal_cdev_update(struct thermal_cooling_device *cdev); int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip); -struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, - int trip); - /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point @@ -217,7 +213,6 @@ get_thermal_instance(struct thermal_zone_device *tz, struct thermal_instance { int id; char name[THERMAL_NAME_LENGTH]; - struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; const struct thermal_trip *trip; bool initialized; @@ -259,14 +254,14 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz, const char *thermal_trip_type_name(enum thermal_trip_type trip_type); -void thermal_zone_set_trips(struct thermal_zone_device *tz); +void thermal_zone_set_trips(struct thermal_zone_device *tz, int low, int high); int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip); -void thermal_zone_trip_updated(struct thermal_zone_device *tz, - const struct thermal_trip *trip); int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); void thermal_zone_trip_down(struct thermal_zone_device *tz, const struct thermal_trip *trip); +void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz, + struct thermal_trip *trip, int hyst); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *tz); @@ -289,7 +284,4 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, unsigned long new_state) {} #endif /* CONFIG_THERMAL_STATISTICS */ -/* device tree support */ -int thermal_zone_device_is_enabled(struct thermal_zone_device *tz); - #endif /* __THERMAL_CORE_H__ */ diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index aedb8369e2aa2d..dc374a7a1a659f 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -39,18 +39,18 @@ int get_tz_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip return trend; } -static struct thermal_instance *get_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, - const struct thermal_trip *trip) +static bool thermal_instance_present(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, + const struct thermal_trip *trip) { struct thermal_instance *ti; list_for_each_entry(ti, &tz->thermal_instances, tz_node) { if (ti->trip == trip && ti->cdev == cdev) - return ti; + return true; } - return NULL; + return false; } bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, @@ -62,7 +62,7 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, mutex_lock(&tz->lock); mutex_lock(&cdev->lock); - ret = !!get_instance(tz, cdev, trip); + ret = thermal_instance_present(tz, cdev, trip); mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); @@ -71,24 +71,6 @@ bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_trip_is_bound_to_cdev); -struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip_index) -{ - struct thermal_instance *ti; - - mutex_lock(&tz->lock); - mutex_lock(&cdev->lock); - - ti = get_instance(tz, cdev, &tz->trips[trip_index].trip); - - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); - - return ti; -} -EXPORT_SYMBOL(get_thermal_instance); - /** * __thermal_zone_get_temp() - returns the temperature of a thermal zone * @tz: a valid pointer to a struct thermal_zone_device @@ -199,8 +181,6 @@ void __thermal_cdev_update(struct thermal_cooling_device *cdev) /* Make sure cdev enters the deepest cooling state */ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { - dev_dbg(&cdev->device, "zone%d->target=%lu\n", - instance->tz->id, instance->target); if (instance->target == THERMAL_NO_TARGET) continue; if (instance->target > target) diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 1f252692815a18..a4caf7899f8e4a 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -20,37 +20,6 @@ /*** functions parsing device tree nodes ***/ -static int of_find_trip_id(struct device_node *np, struct device_node *trip) -{ - struct device_node *trips; - struct device_node *t; - int i = 0; - - trips = of_get_child_by_name(np, "trips"); - if (!trips) { - pr_err("Failed to find 'trips' node\n"); - return -EINVAL; - } - - /* - * Find the trip id point associated with the cooling device map - */ - for_each_child_of_node(trips, t) { - - if (t == trip) { - of_node_put(t); - goto out; - } - i++; - } - - i = -ENXIO; -out: - of_node_put(trips); - - return i; -} - /* * It maps 'enum thermal_trip_type' found in include/linux/thermal.h * into the device tree binding of 'trip', property type. @@ -119,6 +88,8 @@ static int thermal_of_populate_trip(struct device_node *np, trip->flags = THERMAL_TRIP_FLAG_RW_TEMP; + trip->priv = np; + return 0; } @@ -291,39 +262,9 @@ static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_devic return tz_np; } -static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id, - struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) -{ - struct of_phandle_args cooling_spec; - int ret; - - ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells", - index, &cooling_spec); - - if (ret < 0) { - pr_err("Invalid cooling-device entry\n"); - return ret; - } - - of_node_put(cooling_spec.np); - - if (cooling_spec.args_count < 2) { - pr_err("wrong reference to cooling device, missing limits\n"); - return -EINVAL; - } - - if (cooling_spec.np != cdev->np) - return 0; - - ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev); - if (ret) - pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret); - - return ret; -} - -static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id, - struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) +static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { struct of_phandle_args cooling_spec; int ret, weight = THERMAL_WEIGHT_DEFAULT; @@ -335,104 +276,73 @@ static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id, if (ret < 0) { pr_err("Invalid cooling-device entry\n"); - return ret; + return false; } of_node_put(cooling_spec.np); if (cooling_spec.args_count < 2) { pr_err("wrong reference to cooling device, missing limits\n"); - return -EINVAL; + return false; } if (cooling_spec.np != cdev->np) - return 0; + return false; - ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1], - cooling_spec.args[0], - weight); - if (ret) - pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret); + c->lower = cooling_spec.args[0]; + c->upper = cooling_spec.args[1]; + c->weight = weight; - return ret; + return true; } -static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np, - struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, - int (*action)(struct device_node *, int, int, - struct thermal_zone_device *, struct thermal_cooling_device *)) -{ - struct device_node *tr_np; - int count, i, trip_id; - - tr_np = of_parse_phandle(map_np, "trip", 0); - if (!tr_np) - return -ENODEV; - - trip_id = of_find_trip_id(tz_np, tr_np); - if (trip_id < 0) - return trip_id; - - count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells"); - if (count <= 0) { - pr_err("Add a cooling_device property with at least one device\n"); - return -ENOENT; - } - - /* - * At this point, we don't want to bail out when there is an - * error, we will try to bind/unbind as many as possible - * cooling devices - */ - for (i = 0; i < count; i++) - action(map_np, i, trip_id, tz, cdev); - - return 0; -} - -static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, - int (*action)(struct device_node *, int, int, - struct thermal_zone_device *, struct thermal_cooling_device *)) +static bool thermal_of_should_bind(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) { struct device_node *tz_np, *cm_np, *child; - int ret = 0; + bool result = false; tz_np = thermal_of_zone_get_by_name(tz); if (IS_ERR(tz_np)) { pr_err("Failed to get node tz by name\n"); - return PTR_ERR(tz_np); + return false; } cm_np = of_get_child_by_name(tz_np, "cooling-maps"); if (!cm_np) goto out; + /* Look up the trip and the cdev in the cooling maps. */ for_each_child_of_node(cm_np, child) { - ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action); - if (ret) { - of_node_put(child); - break; + struct device_node *tr_np; + int count, i; + + tr_np = of_parse_phandle(child, "trip", 0); + if (tr_np != trip->priv) + continue; + + /* The trip has been found, look up the cdev. */ + count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells"); + if (count <= 0) + pr_err("Add a cooling_device property with at least one device\n"); + + for (i = 0; i < count; i++) { + result = thermal_of_get_cooling_spec(child, i, cdev, c); + if (result) + break; } + + of_node_put(child); + break; } of_node_put(cm_np); out: of_node_put(tz_np); - return ret; -} - -static int thermal_of_bind(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev) -{ - return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind); -} - -static int thermal_of_unbind(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev) -{ - return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind); + return result; } /** @@ -504,8 +414,7 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node * thermal_of_parameters_init(np, &tzp); - of_ops.bind = thermal_of_bind; - of_ops.unbind = thermal_of_unbind; + of_ops.should_bind = thermal_of_should_bind; ret = of_property_read_string(np, "critical-action", &action); if (!ret) diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 72b302bf914e31..1838aa729bb50a 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -52,7 +53,7 @@ mode_show(struct device *dev, struct device_attribute *attr, char *buf) int enabled; mutex_lock(&tz->lock); - enabled = thermal_zone_device_is_enabled(tz); + enabled = tz->mode == THERMAL_DEVICE_ENABLED; mutex_unlock(&tz->lock); return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled"); @@ -78,51 +79,58 @@ mode_store(struct device *dev, struct device_attribute *attr, return count; } +#define thermal_trip_of_attr(_ptr_, _attr_) \ + ({ \ + struct thermal_trip_desc *td; \ + \ + td = container_of(_ptr_, struct thermal_trip_desc, \ + trip_attrs._attr_.attr); \ + &td->trip; \ + }) + static ssize_t trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; + struct thermal_trip *trip = thermal_trip_of_attr(attr, type); - if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) - return -EINVAL; - - return sprintf(buf, "%s\n", thermal_trip_type_name(tz->trips[trip_id].trip.type)); + return sprintf(buf, "%s\n", thermal_trip_type_name(trip->type)); } static ssize_t trip_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct thermal_trip *trip = thermal_trip_of_attr(attr, temp); struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip *trip; - int trip_id, ret; - int temp; + int ret, temp; ret = kstrtoint(buf, 10, &temp); if (ret) return -EINVAL; - if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) - return -EINVAL; - mutex_lock(&tz->lock); - trip = &tz->trips[trip_id].trip; - - if (temp != trip->temperature) { - if (tz->ops.set_trip_temp) { - ret = tz->ops.set_trip_temp(tz, trip, temp); - if (ret) - goto unlock; - } + if (temp == trip->temperature) + goto unlock; - thermal_zone_set_trip_temp(tz, trip, temp); + /* Arrange the condition to avoid integer overflows. */ + if (temp != THERMAL_TEMP_INVALID && + temp <= trip->hysteresis + THERMAL_TEMP_INVALID) { + ret = -EINVAL; + goto unlock; + } - __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + if (tz->ops.set_trip_temp) { + ret = tz->ops.set_trip_temp(tz, trip, temp); + if (ret) + goto unlock; } + thermal_zone_set_trip_temp(tz, trip, temp); + + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + unlock: mutex_unlock(&tz->lock); @@ -133,57 +141,61 @@ static ssize_t trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; - - if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) - return -EINVAL; + struct thermal_trip *trip = thermal_trip_of_attr(attr, temp); - return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.temperature)); + return sprintf(buf, "%d\n", READ_ONCE(trip->temperature)); } static ssize_t trip_point_hyst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst); struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip *trip; - int trip_id, ret; - int hyst; + int ret, hyst; ret = kstrtoint(buf, 10, &hyst); if (ret || hyst < 0) return -EINVAL; - if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) - return -EINVAL; - mutex_lock(&tz->lock); - trip = &tz->trips[trip_id].trip; + if (hyst == trip->hysteresis) + goto unlock; - if (hyst != trip->hysteresis) { + /* + * Allow the hysteresis to be updated when the temperature is invalid + * to allow user space to avoid having to adjust hysteresis after a + * valid temperature has been set, but in that case just change the + * value and do nothing else. + */ + if (trip->temperature == THERMAL_TEMP_INVALID) { WRITE_ONCE(trip->hysteresis, hyst); + goto unlock; + } - thermal_zone_trip_updated(tz, trip); + if (trip->temperature - hyst <= THERMAL_TEMP_INVALID) { + ret = -EINVAL; + goto unlock; } + thermal_zone_set_trip_hyst(tz, trip, hyst); + + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + +unlock: mutex_unlock(&tz->lock); - return count; + return ret ? ret : count; } static ssize_t trip_point_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; + struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst); - if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) - return -EINVAL; - - return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.hysteresis)); + return sprintf(buf, "%d\n", READ_ONCE(trip->hysteresis)); } static ssize_t @@ -382,87 +394,55 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = { */ static int create_trip_attrs(struct thermal_zone_device *tz) { - const struct thermal_trip_desc *td; + struct thermal_trip_desc *td; struct attribute **attrs; - - /* This function works only for zones with at least one trip */ - if (tz->num_trips <= 0) - return -EINVAL; - - tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs), - GFP_KERNEL); - if (!tz->trip_type_attrs) - return -ENOMEM; - - tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs), - GFP_KERNEL); - if (!tz->trip_temp_attrs) { - kfree(tz->trip_type_attrs); - return -ENOMEM; - } - - tz->trip_hyst_attrs = kcalloc(tz->num_trips, - sizeof(*tz->trip_hyst_attrs), - GFP_KERNEL); - if (!tz->trip_hyst_attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - return -ENOMEM; - } + int i; attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); - if (!attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); + if (!attrs) return -ENOMEM; - } + i = 0; for_each_trip_desc(tz, td) { - int indx = thermal_zone_trip_id(tz, &td->trip); + struct thermal_trip_attrs *trip_attrs = &td->trip_attrs; /* create trip type attribute */ - snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_type", indx); + snprintf(trip_attrs->type.name, THERMAL_NAME_LENGTH, + "trip_point_%d_type", i); - sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); - tz->trip_type_attrs[indx].attr.attr.name = - tz->trip_type_attrs[indx].name; - tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_type_attrs[indx].attr.show = trip_point_type_show; - attrs[indx] = &tz->trip_type_attrs[indx].attr.attr; + sysfs_attr_init(&trip_attrs->type.attr.attr); + trip_attrs->type.attr.attr.name = trip_attrs->type.name; + trip_attrs->type.attr.attr.mode = S_IRUGO; + trip_attrs->type.attr.show = trip_point_type_show; + attrs[i] = &trip_attrs->type.attr.attr; /* create trip temp attribute */ - snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_temp", indx); - - sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); - tz->trip_temp_attrs[indx].attr.attr.name = - tz->trip_temp_attrs[indx].name; - tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; + snprintf(trip_attrs->temp.name, THERMAL_NAME_LENGTH, + "trip_point_%d_temp", i); + + sysfs_attr_init(&trip_attrs->temp.attr.attr); + trip_attrs->temp.attr.attr.name = trip_attrs->temp.name; + trip_attrs->temp.attr.attr.mode = S_IRUGO; + trip_attrs->temp.attr.show = trip_point_temp_show; if (td->trip.flags & THERMAL_TRIP_FLAG_RW_TEMP) { - tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_temp_attrs[indx].attr.store = - trip_point_temp_store; + trip_attrs->temp.attr.attr.mode |= S_IWUSR; + trip_attrs->temp.attr.store = trip_point_temp_store; } - attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr; + attrs[i + tz->num_trips] = &trip_attrs->temp.attr.attr; - snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_hyst", indx); + snprintf(trip_attrs->hyst.name, THERMAL_NAME_LENGTH, + "trip_point_%d_hyst", i); - sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); - tz->trip_hyst_attrs[indx].attr.attr.name = - tz->trip_hyst_attrs[indx].name; - tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; + sysfs_attr_init(&trip_attrs->hyst.attr.attr); + trip_attrs->hyst.attr.attr.name = trip_attrs->hyst.name; + trip_attrs->hyst.attr.attr.mode = S_IRUGO; + trip_attrs->hyst.attr.show = trip_point_hyst_show; if (td->trip.flags & THERMAL_TRIP_FLAG_RW_HYST) { - tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_hyst_attrs[indx].attr.store = - trip_point_hyst_store; + trip_attrs->hyst.attr.attr.mode |= S_IWUSR; + trip_attrs->hyst.attr.store = trip_point_hyst_store; } - attrs[indx + tz->num_trips * 2] = - &tz->trip_hyst_attrs[indx].attr.attr; + attrs[i + 2 * tz->num_trips] = &trip_attrs->hyst.attr.attr; + i++; } attrs[tz->num_trips * 3] = NULL; @@ -479,13 +459,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz) */ static void destroy_trip_attrs(struct thermal_zone_device *tz) { - if (!tz) - return; - - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); + if (tz) + kfree(tz->trips_attribute_group.attrs); } int thermal_zone_create_device_groups(struct thermal_zone_device *tz) @@ -887,13 +862,12 @@ void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev) ssize_t trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_instance *instance; - instance = - container_of(attr, struct thermal_instance, attr); + instance = container_of(attr, struct thermal_instance, attr); - return sprintf(buf, "%d\n", - thermal_zone_trip_id(instance->tz, instance->trip)); + return sprintf(buf, "%d\n", thermal_zone_trip_id(tz, instance->trip)); } ssize_t @@ -909,6 +883,7 @@ weight_show(struct device *dev, struct device_attribute *attr, char *buf) ssize_t weight_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_instance *instance; int ret, weight; @@ -919,14 +894,13 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr, instance = container_of(attr, struct thermal_instance, weight_attr); /* Don't race with governors using the 'weight' value */ - mutex_lock(&instance->tz->lock); + mutex_lock(&tz->lock); instance->weight = weight; - thermal_governor_update_tz(instance->tz, - THERMAL_INSTANCE_WEIGHT_CHANGED); + thermal_governor_update_tz(tz, THERMAL_INSTANCE_WEIGHT_CHANGED); - mutex_unlock(&instance->tz->lock); + mutex_unlock(&tz->lock); return count; } diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index 06a0554ddc3891..b53fac333ec5de 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -55,31 +55,8 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_for_each_trip); -int thermal_zone_get_num_trips(struct thermal_zone_device *tz) +void thermal_zone_set_trips(struct thermal_zone_device *tz, int low, int high) { - return tz->num_trips; -} -EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); - -/** - * thermal_zone_set_trips - Computes the next trip points for the driver - * @tz: a pointer to a thermal zone device structure - * - * The function computes the next temperature boundaries by browsing - * the trip points. The result is the closer low and high trip points - * to the current temperature. These values are passed to the backend - * driver to let it set its own notification mechanism (usually an - * interrupt). - * - * This function must be called with tz->lock held. Both tz and tz->ops - * must be valid pointers. - * - * It does not return a value - */ -void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ - const struct thermal_trip_desc *td; - int low = -INT_MAX, high = INT_MAX; int ret; lockdep_assert_held(&tz->lock); @@ -87,14 +64,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) if (!tz->ops.set_trips) return; - for_each_trip_desc(tz, td) { - if (td->threshold <= tz->temperature && td->threshold > low) - low = td->threshold; - - if (td->threshold >= tz->temperature && td->threshold < high) - high = td->threshold; - } - /* No need to change trip points */ if (tz->prev_low_trip == low && tz->prev_high_trip == high) return; @@ -114,20 +83,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) dev_err(&tz->device, "Failed to set trips: %d\n", ret); } -int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, - struct thermal_trip *trip) -{ - if (!tz || !trip || trip_id < 0 || trip_id >= tz->num_trips) - return -EINVAL; - - mutex_lock(&tz->lock); - *trip = tz->trips[trip_id].trip; - mutex_unlock(&tz->lock); - - return 0; -} -EXPORT_SYMBOL_GPL(thermal_zone_get_trip); - int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip) { @@ -138,11 +93,11 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz, return trip_to_trip_desc(trip) - tz->trips; } -void thermal_zone_trip_updated(struct thermal_zone_device *tz, - const struct thermal_trip *trip) +void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz, + struct thermal_trip *trip, int hyst) { + WRITE_ONCE(trip->hysteresis, hyst); thermal_notify_tz_trip_change(tz, trip); - __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); } void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h index 1f4bbaf31675ca..46263c1da8b6b4 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h @@ -336,10 +336,6 @@ struct ti_bandgap_data { struct ti_temp_sensor sensors[]; }; -int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot); -int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val); -int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold); -int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val); int ti_bandgap_read_update_interval(struct ti_bandgap *bgp, int id, int *interval); int ti_bandgap_write_update_interval(struct ti_bandgap *bgp, int id, diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index c9b6bb46111c4b..d2a0054217dacc 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -32,40 +32,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, goto out_put; /* - * Try to find physical device walking upwards to the hierarcy. - * We need to do this because the xHCI driver might not yet be - * bound so the USB3 SuperSpeed ports are not yet created. + * Ignore USB3 ports here as USB core will set up device links between + * tunneled USB3 devices and NHI host during USB device creation. + * USB3 ports might not even have a physical device yet if xHCI driver + * isn't bound yet. */ - do { - dev = acpi_get_first_physical_node(adev); - if (dev) - break; - - adev = acpi_dev_parent(adev); - } while (adev); - - /* - * Check that the device is PCIe. This is because USB3 - * SuperSpeed ports have this property and they are not power - * managed with the xHCI and the SuperSpeed hub so we create the - * link from xHCI instead. - */ - while (dev && !dev_is_pci(dev)) - dev = dev->parent; - - if (!dev) + dev = acpi_get_first_physical_node(adev); + if (!dev || !dev_is_pci(dev)) goto out_put; - /* - * Check that this actually matches the type of device we - * expect. It should either be xHCI or PCIe root/downstream - * port. - */ + /* Check that this matches a PCIe root/downstream port. */ pdev = to_pci_dev(dev); - if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI || - (pci_is_pcie(pdev) && - (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { + if (pci_is_pcie(pdev) && + (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) { const struct device_link *link; /* diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 9ed4bb2e8d05fd..350310bd0feea7 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -34,6 +35,14 @@ #define COUNTER_SET_LEN 3 +/* + * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms + * probed to give good results. + */ +#define MIN_DWELL_TIME 100 /* ms */ +#define MAX_DWELL_TIME 500 /* ms */ +#define DWELL_SAMPLE_INTERVAL 10 + /* Sideband registers and their sizes as defined in the USB4 spec */ struct sb_reg { unsigned int reg; @@ -394,8 +403,15 @@ static ssize_t retimer_sb_regs_write(struct file *file, * @ber_level: Current BER level contour value * @voltage_steps: Number of mandatory voltage steps * @max_voltage_offset: Maximum mandatory voltage offset (in mV) + * @voltage_steps_optional_range: Number of voltage steps for optional range + * @max_voltage_offset_optional_range: Maximum voltage offset for the optional + * range (in mV). * @time_steps: Number of time margin steps * @max_time_offset: Maximum time margin offset (in mUI) + * @voltage_time_offset: Offset for voltage / time for software margining + * @dwell_time: Dwell time for software margining (in ms) + * @error_counter: Error counter operation for software margining + * @optional_voltage_offset_range: Enable optional extended voltage range * @software: %true if software margining is used instead of hardware * @time: %true if time margining is used instead of voltage * @right_high: %false if left/low margin test is performed, %true if @@ -414,13 +430,37 @@ struct tb_margining { unsigned int ber_level; unsigned int voltage_steps; unsigned int max_voltage_offset; + unsigned int voltage_steps_optional_range; + unsigned int max_voltage_offset_optional_range; unsigned int time_steps; unsigned int max_time_offset; + unsigned int voltage_time_offset; + unsigned int dwell_time; + enum usb4_margin_sw_error_counter error_counter; + bool optional_voltage_offset_range; bool software; bool time; bool right_high; }; +static int margining_modify_error_counter(struct tb_margining *margining, + u32 lanes, enum usb4_margin_sw_error_counter error_counter) +{ + struct usb4_port_margining_params params = { 0 }; + struct tb_port *port = margining->port; + u32 result; + + if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR && + error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP) + return -EOPNOTSUPP; + + params.error_counter = error_counter; + params.lanes = lanes; + + return usb4_port_sw_margin(port, margining->target, margining->index, + ¶ms, &result); +} + static bool supports_software(const struct tb_margining *margining) { return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW; @@ -454,6 +494,12 @@ independent_time_margins(const struct tb_margining *margining) return FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1]); } +static bool +supports_optional_voltage_offset_range(const struct tb_margining *margining) +{ + return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT; +} + static ssize_t margining_ber_level_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -553,6 +599,14 @@ static int margining_caps_show(struct seq_file *s, void *not_used) margining->voltage_steps); seq_printf(s, "# maximum voltage offset: %u mV\n", margining->max_voltage_offset); + seq_printf(s, "# optional voltage offset range support: %s\n", + str_yes_no(supports_optional_voltage_offset_range(margining))); + if (supports_optional_voltage_offset_range(margining)) { + seq_printf(s, "# voltage margin steps, optional range: %u\n", + margining->voltage_steps_optional_range); + seq_printf(s, "# maximum voltage offset, optional range: %u mV\n", + margining->max_voltage_offset_optional_range); + } switch (independent_voltage_margins(margining)) { case USB4_MARGIN_CAP_0_VOLTAGE_MIN: @@ -667,6 +721,198 @@ static int margining_lanes_show(struct seq_file *s, void *not_used) } DEBUGFS_ATTR_RW(margining_lanes); +static ssize_t +margining_voltage_time_offset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + unsigned int max_margin; + unsigned int val; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 10, &val); + if (ret) + return ret; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + if (margining->time) + max_margin = margining->time_steps; + else + if (margining->optional_voltage_offset_range) + max_margin = margining->voltage_steps_optional_range; + else + max_margin = margining->voltage_steps; + + margining->voltage_time_offset = clamp(val, 0, max_margin); + } + + return count; +} + +static int margining_voltage_time_offset_show(struct seq_file *s, + void *not_used) +{ + const struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + seq_printf(s, "%d\n", margining->voltage_time_offset); + } + + return 0; +} +DEBUGFS_ATTR_RW(margining_voltage_time_offset); + +static ssize_t +margining_error_counter_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + enum usb4_margin_sw_error_counter error_counter; + struct seq_file *s = file->private_data; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (!strcmp(buf, "nop")) + error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP; + else if (!strcmp(buf, "clear")) + error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR; + else if (!strcmp(buf, "start")) + error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START; + else if (!strcmp(buf, "stop")) + error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP; + else + return -EINVAL; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + margining->error_counter = error_counter; + } + + return count; +} + +static int margining_error_counter_show(struct seq_file *s, void *not_used) +{ + const struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + switch (margining->error_counter) { + case USB4_MARGIN_SW_ERROR_COUNTER_NOP: + seq_puts(s, "[nop] clear start stop\n"); + break; + case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: + seq_puts(s, "nop [clear] start stop\n"); + break; + case USB4_MARGIN_SW_ERROR_COUNTER_START: + seq_puts(s, "nop clear [start] stop\n"); + break; + case USB4_MARGIN_SW_ERROR_COUNTER_STOP: + seq_puts(s, "nop clear start [stop]\n"); + break; + } + } + + return 0; +} +DEBUGFS_ATTR_RW(margining_error_counter); + +static ssize_t +margining_dwell_time_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + unsigned int val; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 10, &val); + if (ret) + return ret; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME); + } + + return count; +} + +static int margining_dwell_time_show(struct seq_file *s, void *not_used) +{ + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + if (!margining->software) + return -EOPNOTSUPP; + + seq_printf(s, "%d\n", margining->dwell_time); + } + + return 0; +} +DEBUGFS_ATTR_RW(margining_dwell_time); + +static ssize_t +margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + bool val; + int ret; + + ret = kstrtobool_from_user(user_buf, count, &val); + if (ret) + return ret; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + margining->optional_voltage_offset_range = val; + } + + return count; +} + +static int margining_optional_voltage_offset_show(struct seq_file *s, + void *not_used) +{ + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; + + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) { + seq_printf(s, "%u\n", margining->optional_voltage_offset_range); + } + + return 0; +} +DEBUGFS_ATTR_RW(margining_optional_voltage_offset); + static ssize_t margining_mode_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -739,6 +985,51 @@ static int margining_mode_show(struct seq_file *s, void *not_used) } DEBUGFS_ATTR_RW(margining_mode); +static int margining_run_sw(struct tb_margining *margining, + struct usb4_port_margining_params *params) +{ + u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL; + int ret, i; + + ret = usb4_port_sw_margin(margining->port, margining->target, margining->index, + params, margining->results); + if (ret) + goto out_stop; + + for (i = 0; i <= nsamples; i++) { + u32 errors = 0; + + ret = usb4_port_sw_margin_errors(margining->port, margining->target, + margining->index, &margining->results[1]); + if (ret) + break; + + if (margining->lanes == USB4_MARGIN_SW_LANE_0) + errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK, + margining->results[1]); + else if (margining->lanes == USB4_MARGIN_SW_LANE_1) + errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK, + margining->results[1]); + else if (margining->lanes == USB4_MARGIN_SW_ALL_LANES) + errors = margining->results[1]; + + /* Any errors stop the test */ + if (errors) + break; + + fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC); + } + +out_stop: + /* + * Stop the counters but don't clear them to allow the + * different error counter configurations. + */ + margining_modify_error_counter(margining, margining->lanes, + USB4_MARGIN_SW_ERROR_COUNTER_STOP); + return ret; +} + static int margining_run_write(void *data, u64 val) { struct tb_margining *margining = data; @@ -779,36 +1070,43 @@ static int margining_run_write(void *data, u64 val) clx = ret; } + /* Clear the results */ + memset(margining->results, 0, sizeof(margining->results)); + if (margining->software) { + struct usb4_port_margining_params params = { + .error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR, + .lanes = margining->lanes, + .time = margining->time, + .voltage_time_offset = margining->voltage_time_offset, + .right_high = margining->right_high, + .optional_voltage_offset_range = margining->optional_voltage_offset_range, + }; + tb_port_dbg(port, "running software %s lane margining for %s lanes %u\n", margining->time ? "time" : "voltage", dev_name(dev), margining->lanes); - ret = usb4_port_sw_margin(port, margining->target, margining->index, - margining->lanes, margining->time, - margining->right_high, - USB4_MARGIN_SW_COUNTER_CLEAR); - if (ret) - goto out_clx; - ret = usb4_port_sw_margin_errors(port, margining->target, - margining->index, - &margining->results[0]); + ret = margining_run_sw(margining, ¶ms); } else { + struct usb4_port_margining_params params = { + .ber_level = margining->ber_level, + .lanes = margining->lanes, + .time = margining->time, + .right_high = margining->right_high, + .optional_voltage_offset_range = margining->optional_voltage_offset_range, + }; + tb_port_dbg(port, "running hardware %s lane margining for %s lanes %u\n", margining->time ? "time" : "voltage", dev_name(dev), margining->lanes); - /* Clear the results */ - margining->results[0] = 0; - margining->results[1] = 0; - ret = usb4_port_hw_margin(port, margining->target, margining->index, - margining->lanes, margining->ber_level, - margining->time, margining->right_high, + + ret = usb4_port_hw_margin(port, margining->target, margining->index, ¶ms, margining->results); } -out_clx: if (down_sw) tb_switch_clx_enable(down_sw, clx); out_unlock: @@ -837,6 +1135,13 @@ static ssize_t margining_results_write(struct file *file, margining->results[0] = 0; margining->results[1] = 0; + if (margining->software) { + /* Clear the error counters */ + margining_modify_error_counter(margining, + USB4_MARGIN_SW_ALL_LANES, + USB4_MARGIN_SW_ERROR_COUNTER_CLEAR); + } + mutex_unlock(&tb->lock); return count; } @@ -852,6 +1157,8 @@ static void voltage_margin_show(struct seq_file *s, if (val & USB4_MARGIN_HW_RES_1_EXCEEDS) seq_puts(s, " exceeds maximum"); seq_puts(s, "\n"); + if (margining->optional_voltage_offset_range) + seq_puts(s, " optional voltage offset range enabled\n"); } static void time_margin_show(struct seq_file *s, @@ -924,6 +1231,24 @@ static int margining_results_show(struct seq_file *s, void *not_used) voltage_margin_show(s, margining, val); } } + } else { + u32 lane_errors, result; + + seq_printf(s, "0x%08x\n", margining->results[1]); + result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]); + + if (result == USB4_MARGIN_SW_LANE_0 || + result == USB4_MARGIN_SW_ALL_LANES) { + lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK, + margining->results[1]); + seq_printf(s, "# lane 0 errors: %u\n", lane_errors); + } + if (result == USB4_MARGIN_SW_LANE_1 || + result == USB4_MARGIN_SW_ALL_LANES) { + lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK, + margining->results[1]); + seq_printf(s, "# lane 1 errors: %u\n", lane_errors); + } } mutex_unlock(&tb->lock); @@ -1091,6 +1416,15 @@ static struct tb_margining *margining_alloc(struct tb_port *port, val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]); margining->max_voltage_offset = 74 + val * 2; + if (supports_optional_voltage_offset_range(margining)) { + val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK, + margining->caps[0]); + margining->voltage_steps_optional_range = val; + val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK, + margining->caps[1]); + margining->max_voltage_offset_optional_range = 74 + val * 2; + } + if (supports_time(margining)) { val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]); margining->time_steps = val; @@ -1127,6 +1461,22 @@ static struct tb_margining *margining_alloc(struct tb_port *port, independent_time_margins(margining) == USB4_MARGIN_CAP_1_TIME_LR)) debugfs_create_file("margin", 0600, dir, margining, &margining_margin_fops); + + margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR; + margining->dwell_time = MIN_DWELL_TIME; + + if (supports_optional_voltage_offset_range(margining)) + debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining, + &margining_optional_voltage_offset_fops); + + if (supports_software(margining)) { + debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining, + &margining_voltage_time_offset_fops); + debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining, + &margining_error_counter_fops); + debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining, + &margining_dwell_time_fops); + } return margining; } diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h index 2a88edfc97b2ba..dbcad25ead50f1 100644 --- a/drivers/thunderbolt/sb_regs.h +++ b/drivers/thunderbolt/sb_regs.h @@ -57,6 +57,9 @@ enum usb4_sb_opcode { #define USB4_MARGIN_CAP_0_TIME BIT(5) #define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6) #define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13) +#define USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT BIT(19) +#define USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK GENMASK(26, 20) +#define USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK GENMASK(7, 0) #define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8) #define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9) #define USB4_MARGIN_CAP_1_TIME_MIN 0x0 @@ -72,6 +75,7 @@ enum usb4_sb_opcode { #define USB4_MARGIN_HW_RH BIT(4) #define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5) #define USB4_MARGIN_HW_BER_SHIFT 5 +#define USB4_MARGIN_HW_OPT_VOLTAGE BIT(10) /* Applicable to all margin values */ #define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0) @@ -82,13 +86,17 @@ enum usb4_sb_opcode { #define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24 /* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */ +#define USB4_MARGIN_SW_LANES_MASK GENMASK(2, 0) +#define USB4_MARGIN_SW_LANE_0 0x0 +#define USB4_MARGIN_SW_LANE_1 0x1 +#define USB4_MARGIN_SW_ALL_LANES 0x7 #define USB4_MARGIN_SW_TIME BIT(3) #define USB4_MARGIN_SW_RH BIT(4) +#define USB4_MARGIN_SW_OPT_VOLTAGE BIT(5) +#define USB4_MARGIN_SW_VT_MASK GENMASK(12, 6) #define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13) -#define USB4_MARGIN_SW_COUNTER_SHIFT 13 -#define USB4_MARGIN_SW_COUNTER_NOP 0x0 -#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1 -#define USB4_MARGIN_SW_COUNTER_START 0x2 -#define USB4_MARGIN_SW_COUNTER_STOP 0x3 + +#define USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK GENMASK(3, 0) +#define USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK GENMASK(7, 4) #endif diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index b47f7873c847fe..6737188f258157 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1353,14 +1353,48 @@ int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, const void *buf, u8 size); +/** + * enum usb4_margin_sw_error_counter - Software margining error counter operation + * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup + * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter + * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value + * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value + */ +enum usb4_margin_sw_error_counter { + USB4_MARGIN_SW_ERROR_COUNTER_NOP, + USB4_MARGIN_SW_ERROR_COUNTER_CLEAR, + USB4_MARGIN_SW_ERROR_COUNTER_START, + USB4_MARGIN_SW_ERROR_COUNTER_STOP, +}; + +/** + * struct usb4_port_margining_params - USB4 margining parameters + * @error_counter: Error counter operation for software margining + * @ber_level: Current BER level contour value + * @lanes: %0, %1 or %7 (all) + * @voltage_time_offset: Offset for voltage / time for software margining + * @optional_voltage_offset_range: Enable optional extended voltage range + * @right_high: %false if left/low margin test is performed, %true if right/high + * @time: %true if time margining is used instead of voltage + */ +struct usb4_port_margining_params { + enum usb4_margin_sw_error_counter error_counter; + u32 ber_level; + u32 lanes; + u32 voltage_time_offset; + bool optional_voltage_offset_range; + bool right_high; + bool time; +}; + int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *caps); int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, - u8 index, unsigned int lanes, unsigned int ber_level, - bool timing, bool right_high, u32 *results); + u8 index, const struct usb4_port_margining_params *params, + u32 *results); int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, - u8 index, unsigned int lanes, bool timing, - bool right_high, u32 counter); + u8 index, const struct usb4_port_margining_params *params, + u32 *results); int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *errors); diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 4d83b65afb5b69..0a9b4aeb3fa140 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1653,31 +1653,31 @@ int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, * @port: USB4 port * @target: Sideband target * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER - * @lanes: Which lanes to run (must match the port capabilities). Can be - * %0, %1 or %7. - * @ber_level: BER level contour value - * @timing: Perform timing margining instead of voltage - * @right_high: Use Right/high margin instead of left/low + * @params: Parameters for USB4 hardware margining * @results: Array with at least two elements to hold the results * * Runs hardware lane margining on USB4 port and returns the result in * @results. */ int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, - u8 index, unsigned int lanes, unsigned int ber_level, - bool timing, bool right_high, u32 *results) + u8 index, const struct usb4_port_margining_params *params, + u32 *results) { u32 val; int ret; - val = lanes; - if (timing) + if (WARN_ON_ONCE(!params)) + return -EINVAL; + + val = params->lanes; + if (params->time) val |= USB4_MARGIN_HW_TIME; - if (right_high) + if (params->right_high) val |= USB4_MARGIN_HW_RH; - if (ber_level) - val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & - USB4_MARGIN_HW_BER_MASK; + if (params->ber_level) + val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level); + if (params->optional_voltage_offset_range) + val |= USB4_MARGIN_HW_OPT_VOLTAGE; ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, sizeof(val)); @@ -1698,38 +1698,46 @@ int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, * @port: USB4 port * @target: Sideband target * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER - * @lanes: Which lanes to run (must match the port capabilities). Can be - * %0, %1 or %7. - * @timing: Perform timing margining instead of voltage - * @right_high: Use Right/high margin instead of left/low - * @counter: What to do with the error counter + * @params: Parameters for USB4 software margining + * @results: Data word for the operation completion data * * Runs software lane margining on USB4 port. Read back the error * counters by calling usb4_port_sw_margin_errors(). Returns %0 in * success and negative errno otherwise. */ int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, - u8 index, unsigned int lanes, bool timing, - bool right_high, u32 counter) + u8 index, const struct usb4_port_margining_params *params, + u32 *results) { u32 val; int ret; - val = lanes; - if (timing) + if (WARN_ON_ONCE(!params)) + return -EINVAL; + + val = params->lanes; + if (params->time) val |= USB4_MARGIN_SW_TIME; - if (right_high) + if (params->optional_voltage_offset_range) + val |= USB4_MARGIN_SW_OPT_VOLTAGE; + if (params->right_high) val |= USB4_MARGIN_SW_RH; - val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & - USB4_MARGIN_SW_COUNTER_MASK; + val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter); + val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset); ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, sizeof(val)); if (ret) return ret; - return usb4_port_sb_op(port, target, index, - USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); + ret = usb4_port_sb_op(port, target, index, + USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); + if (ret) + return ret; + + return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results, + sizeof(*results)); + } /** diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index 22e1bc4d8a6683..b35c44caf3d792 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c @@ -303,7 +303,7 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr) pr_devel("HVSI@%x: %s DTR...\n", pv->termno, dtr ? "Setting" : "Clearing"); - ctrl.hdr.type = VS_CONTROL_PACKET_HEADER, + ctrl.hdr.type = VS_CONTROL_PACKET_HEADER; ctrl.hdr.len = sizeof(struct hvsi_control); ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL); ctrl.mask = cpu_to_be32(HVSI_TSDTR); diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 5b97e420a95f84..4d45eca4929abd 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -208,9 +208,6 @@ static const struct { }; #define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info) - -/* driver_data correspond to the lines in the structure above - see also ISA probe function before you change something */ static const struct pci_device_id mxser_pcibrds[] = { { PCI_DEVICE_DATA(MOXA, C168, 8) }, { PCI_DEVICE_DATA(MOXA, C104, 4) }, @@ -986,7 +983,7 @@ static int mxser_get_serial_info(struct tty_struct *tty, ss->baud_base = MXSER_BAUD_BASE; ss->close_delay = close_delay; ss->closing_wait = closing_wait; - ss->custom_divisor = MXSER_CUSTOM_DIVISOR, + ss->custom_divisor = MXSER_CUSTOM_DIVISOR; mutex_unlock(&port->mutex); return 0; } @@ -1773,8 +1770,6 @@ static void mxser_initbrd(struct mxser_board *brd, bool high_baud) mxser_process_txrx_fifo(info); - info->port.close_delay = 5 * HZ / 10; - info->port.closing_wait = 30 * HZ; spin_lock_init(&info->slock); /* before set INT ISR, disable all int */ diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 8913cdd675f6bd..ebf0bbc2cff2ed 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -529,7 +529,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl) bool found = false; for_each_available_child_of_node(ctrl->dev.of_node, node) { - if (!of_get_property(node, "compatible", NULL)) + if (!of_property_present(node, "compatible")) continue; dev_dbg(&ctrl->dev, "adding child %pOF\n", node); diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 53d8eee9b1c810..25c201cfb91e17 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -561,6 +561,7 @@ static const struct of_device_id aspeed_vuart_table[] = { { .compatible = "aspeed,ast2500-vuart" }, { }, }; +MODULE_DEVICE_TABLE(of, aspeed_vuart_table); static struct platform_driver aspeed_vuart_driver = { .driver = { diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c index 121a5ce8605089..d7a0f271263a93 100644 --- a/drivers/tty/serial/8250/8250_bcm2835aux.c +++ b/drivers/tty/serial/8250/8250_bcm2835aux.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -213,11 +214,57 @@ static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match); +static bool bcm2835aux_can_disable_clock(struct device *dev) +{ + struct bcm2835aux_data *data = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(data->line); + + if (device_may_wakeup(dev)) + return false; + + if (uart_console(&up->port) && !console_suspend_enabled) + return false; + + return true; +} + +static int bcm2835aux_suspend(struct device *dev) +{ + struct bcm2835aux_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + if (!bcm2835aux_can_disable_clock(dev)) + return 0; + + clk_disable_unprepare(data->clk); + return 0; +} + +static int bcm2835aux_resume(struct device *dev) +{ + struct bcm2835aux_data *data = dev_get_drvdata(dev); + int ret; + + if (bcm2835aux_can_disable_clock(dev)) { + ret = clk_prepare_enable(data->clk); + if (ret) + return ret; + } + + serial8250_resume_port(data->line); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835aux_dev_pm_ops, bcm2835aux_suspend, bcm2835aux_resume); + static struct platform_driver bcm2835aux_serial_driver = { .driver = { .name = "bcm2835-aux-uart", .of_match_table = bcm2835aux_serial_match, .acpi_match_table = bcm2835aux_serial_acpi_match, + .pm = pm_ptr(&bcm2835aux_dev_pm_ops), }, .probe = bcm2835aux_serial_probe, .remove_new = bcm2835aux_serial_remove, diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 29e4b83e037651..5f9f06911795cc 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -423,11 +423,11 @@ static int univ8250_console_setup(struct console *co, char *options) port = &serial8250_ports[co->index].port; /* link port to console */ - port->cons = co; + uart_port_set_cons(port, co); retval = serial8250_console_setup(port, options, false); if (retval != 0) - port->cons = NULL; + uart_port_set_cons(port, NULL); return retval; } @@ -485,7 +485,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx, continue; co->index = i; - port->cons = co; + uart_port_set_cons(port, co); return serial8250_console_setup(port, options, true); } diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 8a353e3cc3dd42..d215c494ee24c1 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -89,7 +89,9 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct tty_port *tport = &p->port.state->port; struct dma_async_tx_descriptor *desc; struct uart_port *up = &p->port; - struct scatterlist sg; + struct scatterlist *sg; + struct scatterlist sgl[2]; + int i; int ret; if (dma->tx_running) { @@ -110,18 +112,17 @@ int serial8250_tx_dma(struct uart_8250_port *p) serial8250_do_prepare_tx_dma(p); - sg_init_table(&sg, 1); - /* kfifo can do more than one sg, we don't (quite yet) */ - ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, + sg_init_table(sgl, ARRAY_SIZE(sgl)); + + ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, sgl, ARRAY_SIZE(sgl), UART_XMIT_SIZE, dma->tx_addr); - /* we already checked empty fifo above, so there should be something */ - if (WARN_ON_ONCE(ret != 1)) - return 0; + dma->tx_size = 0; - dma->tx_size = sg_dma_len(&sg); + for_each_sg(sgl, sg, ret, i) + dma->tx_size += sg_dma_len(sg); - desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, + desc = dmaengine_prep_slave_sg(dma->txchan, sgl, ret, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c index 5a2520943dfd53..b055d89cfb3937 100644 --- a/drivers/tty/serial/8250/8250_dwlib.c +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -89,7 +89,7 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, unsigned int quot, unsigned int quot_frac) { dw8250_writel_ext(p, DW_UART_DLF, quot_frac); - serial8250_do_set_divisor(p, baud, quot, quot_frac); + serial8250_do_set_divisor(p, baud, quot); } void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index e3f482fd3de481..6176083d0341ca 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -171,6 +171,17 @@ OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup); OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup); OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup); +static int __init early_serial8250_rs2_setup(struct earlycon_device *device, + const char *options) +{ + device->port.regshift = 2; + + return early_serial8250_setup(device, options); +} +OF_EARLYCON_DECLARE(uart, "intel,xscale-uart", early_serial8250_rs2_setup); +OF_EARLYCON_DECLARE(uart, "mrvl,mmp-uart", early_serial8250_rs2_setup); +OF_EARLYCON_DECLARE(uart, "mrvl,pxa-uart", early_serial8250_rs2_setup); + #ifdef CONFIG_SERIAL_8250_OMAP static int __init early_omap8250_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 616128254bbdc4..b7a75db15249a7 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -500,7 +500,7 @@ static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud, static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud, unsigned int quot, unsigned int quot_frac) { - serial8250_do_set_divisor(p, baud, quot, quot_frac); + serial8250_do_set_divisor(p, baud, quot); /* Preserve bits not related to baudrate; DLD[7:4]. */ quot_frac |= serial_port_in(p, 0x2) & 0xf0; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index afef1dd4ddf49c..88b58f44e4e976 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -137,7 +137,6 @@ struct omap8250_priv { atomic_t active; bool is_suspending; int wakeirq; - int wakeups_enabled; u32 latency; u32 calc_latency; struct pm_qos_request pm_qos_request; @@ -1523,7 +1522,10 @@ static int omap8250_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - device_init_wakeup(&pdev->dev, true); + device_set_wakeup_capable(&pdev->dev, true); + if (of_property_read_bool(np, "wakeup-source")) + device_set_wakeup_enable(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); @@ -1581,7 +1583,7 @@ static int omap8250_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0, dev_name(&pdev->dev), priv); if (ret < 0) - return ret; + goto err; priv->wakeirq = irq_of_parse_and_map(np, 1); @@ -1622,7 +1624,7 @@ static void omap8250_remove(struct platform_device *pdev) flush_work(&priv->qos_work); pm_runtime_disable(&pdev->dev); cpu_latency_qos_remove_request(&priv->pm_qos_request); - device_init_wakeup(&pdev->dev, false); + device_set_wakeup_capable(&pdev->dev, false); } static int omap8250_prepare(struct device *dev) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index e1d7aa2fa3474d..6709b6a5f3011d 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1277,7 +1277,7 @@ static void pci_oxsemi_tornado_set_divisor(struct uart_port *port, serial_icr_write(up, UART_TCR, tcr); serial_icr_write(up, UART_CPR, cpr); serial_icr_write(up, UART_CKS, cpr2); - serial8250_do_set_divisor(port, baud, quot, 0); + serial8250_do_set_divisor(port, baud, quot); } /* diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c index d5c8d851348d11..be7ff07cbdd009 100644 --- a/drivers/tty/serial/8250/8250_platform.c +++ b/drivers/tty/serial/8250/8250_platform.c @@ -2,11 +2,15 @@ /* * Universal/legacy platform driver for 8250/16550-type serial ports * - * Supports: ISA-compatible 8250/16550 ports + * Supports: + * ISA-compatible 8250/16550 ports + * ACPI 8250/16550 ports * PNP 8250/16550 ports * "serial8250" platform devices */ +#include #include +#include #include #include #include @@ -22,9 +26,9 @@ /* * Configuration: - * share_irqs Whether we pass IRQF_SHARED to request_irq(). + * share_irqs: Whether we pass IRQF_SHARED to request_irq(). * This option is unsafe when used on edge-triggered interrupts. - * skip_txen_test Force skip of txen test at init time. + * skip_txen_test: Force skip of txen test at init time. */ unsigned int share_irqs = SERIAL8250_SHARE_IRQS; unsigned int skip_txen_test; @@ -61,9 +65,9 @@ static void __init __serial8250_isa_init_ports(void) nr_uarts = UART_NR; /* - * Set up initial isa ports based on nr_uart module param, or else + * Set up initial ISA ports based on nr_uart module param, or else * default to CONFIG_SERIAL_8250_RUNTIME_UARTS. Note that we do not - * need to increase nr_uarts when setting up the initial isa ports. + * need to increase nr_uarts when setting up the initial ISA ports. */ for (i = 0; i < nr_uarts; i++) serial8250_setup_port(i); @@ -101,13 +105,63 @@ void __init serial8250_isa_init_ports(void) } /* - * Register a set of serial devices attached to a platform device. The - * list is terminated with a zero flags entry, which means we expect - * all entries to have at least UPF_BOOT_AUTOCONF set. + * Generic 16550A platform devices */ -static int serial8250_probe(struct platform_device *dev) +static int serial8250_probe_acpi(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uart_8250_port uart = { }; + struct resource *regs; + unsigned char iotype; + int ret, line; + + regs = platform_get_mem_or_io(pdev, 0); + if (!regs) + return dev_err_probe(dev, -EINVAL, "no registers defined\n"); + + switch (resource_type(regs)) { + case IORESOURCE_IO: + uart.port.iobase = regs->start; + iotype = UPIO_PORT; + break; + case IORESOURCE_MEM: + uart.port.mapbase = regs->start; + uart.port.mapsize = resource_size(regs); + uart.port.flags = UPF_IOREMAP; + iotype = UPIO_MEM; + break; + default: + return -EINVAL; + } + + /* default clock frequency */ + uart.port.uartclk = 1843200; + uart.port.type = PORT_16550A; + uart.port.dev = &pdev->dev; + uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + + ret = uart_read_and_validate_port_properties(&uart.port); + /* no interrupt -> fall back to polling */ + if (ret == -ENXIO) + ret = 0; + if (ret) + return ret; + + /* + * The previous call may not set iotype correctly when reg-io-width + * property is absent and it doesn't support IO port resource. + */ + uart.port.iotype = iotype; + + line = serial8250_register_8250_port(&uart); + if (line < 0) + return line; + + return 0; +} + +static int serial8250_probe_platform(struct platform_device *dev, struct plat_serial8250_port *p) { - struct plat_serial8250_port *p = dev_get_platdata(&dev->dev); struct uart_8250_port uart; int ret, i, irqflag = 0; @@ -155,6 +209,31 @@ static int serial8250_probe(struct platform_device *dev) return 0; } +/* + * Register a set of serial devices attached to a platform device. + * The list is terminated with a zero flags entry, which means we expect + * all entries to have at least UPF_BOOT_AUTOCONF set. + */ +static int serial8250_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct plat_serial8250_port *p; + + p = dev_get_platdata(dev); + if (p) + return serial8250_probe_platform(pdev, p); + + /* + * Probe platform UART devices defined using standard hardware + * discovery mechanism like ACPI or DT. Support only ACPI based + * serial device for now. + */ + if (has_acpi_companion(dev)) + return serial8250_probe_acpi(pdev); + + return 0; +} + /* * Remove serial ports registered against a platform device. */ @@ -198,6 +277,12 @@ static int serial8250_resume(struct platform_device *dev) return 0; } +static const struct acpi_device_id acpi_platform_serial_table[] = { + { "RSCV0003" }, /* RISC-V Generic 16550A UART */ + { } +}; +MODULE_DEVICE_TABLE(acpi, acpi_platform_serial_table); + static struct platform_driver serial8250_isa_driver = { .probe = serial8250_probe, .remove_new = serial8250_remove, @@ -205,12 +290,13 @@ static struct platform_driver serial8250_isa_driver = { .resume = serial8250_resume, .driver = { .name = "serial8250", + .acpi_match_table = acpi_platform_serial_table, }, }; /* * This "device" covers _all_ ISA 8250-compatible serial devices listed - * in the table in include/asm/serial.h + * in the table in include/asm/serial.h. */ struct platform_device *serial8250_isa_devs; @@ -239,8 +325,7 @@ static int __init serial8250_init(void) if (ret) goto unreg_uart_drv; - serial8250_isa_devs = platform_device_alloc("serial8250", - PLAT8250_DEV_LEGACY); + serial8250_isa_devs = platform_device_alloc("serial8250", PLAT8250_DEV_LEGACY); if (!serial8250_isa_devs) { ret = -ENOMEM; goto unreg_pnp; @@ -279,7 +364,7 @@ static void __exit serial8250_exit(void) /* * This tells serial8250_unregister_port() not to re-register * the ports (thereby making serial8250_isa_driver permanently - * in use.) + * in use). */ serial8250_isa_devs = NULL; @@ -312,12 +397,13 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); #ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS #ifndef MODULE -/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name - * working as well for the module options so we don't break people. We +/* + * This module was renamed to 8250_core in 3.7. Keep the old "8250" name + * working as well for the module options so we don't break people. We * need to keep the names identical and the convenient macros will happily * refuse to let us do that by failing the build with redefinition errors - * of global variables. So we stick them inside a dummy function to avoid - * those conflicts. The options still get parsed, and the redefined + * of global variables. So we stick them inside a dummy function to avoid + * those conflicts. The options still get parsed, and the redefined * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive. * * This is hacky. I'm sorry. diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 2786918aea98e9..3509af7dc52b88 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2609,7 +2609,7 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up, } void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, - unsigned int quot, unsigned int quot_frac) + unsigned int quot) { struct uart_8250_port *up = up_to_u8250p(port); @@ -2641,7 +2641,7 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, if (port->set_divisor) port->set_divisor(port, baud, quot, quot_frac); else - serial8250_do_set_divisor(port, baud, quot, quot_frac); + serial8250_do_set_divisor(port, baud, quot); } static unsigned int serial8250_get_baud_rate(struct uart_port *port, diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c index 1ac86b56537498..96dd6126296c6d 100644 --- a/drivers/tty/serial/8250/8250_pxa.c +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -165,22 +165,6 @@ static struct platform_driver serial_pxa_driver = { module_platform_driver(serial_pxa_driver); -#ifdef CONFIG_SERIAL_8250_CONSOLE -static int __init early_serial_pxa_setup(struct earlycon_device *device, - const char *options) -{ - struct uart_port *port = &device->port; - - if (!(device->port.membase || device->port.iobase)) - return -ENODEV; - - port->regshift = 2; - return early_serial8250_setup(device, NULL); -} -OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup); -OF_EARLYCON_DECLARE(mmp, "mrvl,mmp-uart", early_serial_pxa_setup); -#endif - MODULE_AUTHOR("Sergei Ianovich"); MODULE_DESCRIPTION("driver for PXA on-board UARTS"); MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8b1644f5411ecf..7d0134ecd82fa5 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2480,7 +2480,7 @@ static int pl011_console_match(struct console *co, char *name, int idx, continue; co->index = i; - port->cons = co; + uart_port_set_cons(port, co); return pl011_console_setup(co, options); } diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index fda63918d1eb85..cde5f1c86353e3 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -32,7 +32,7 @@ #include #include -#include +#include #define MAX3100_C (1<<14) #define MAX3100_D (0<<14) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 69a632fefc41f9..6f0db310cf69e9 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -124,13 +124,14 @@ struct qcom_geni_serial_port { dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; bool setup; - unsigned int baud; + unsigned long poll_timeout_us; unsigned long clk_rate; void *rx_buf; u32 loopback; bool brk; unsigned int tx_remaining; + unsigned int tx_queued; int wakeup_irq; bool rx_tx_swap; bool cts_rts_swap; @@ -144,6 +145,9 @@ static const struct uart_ops qcom_geni_uart_pops; static struct uart_driver qcom_geni_console_driver; static struct uart_driver qcom_geni_uart_driver; +static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport); +static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport); + static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport) { return container_of(uport, struct qcom_geni_serial_port, uport); @@ -265,27 +269,18 @@ static bool qcom_geni_serial_secondary_active(struct uart_port *uport) return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE; } -static bool qcom_geni_serial_poll_bit(struct uart_port *uport, - int offset, int field, bool set) +static bool qcom_geni_serial_poll_bitfield(struct uart_port *uport, + unsigned int offset, u32 field, u32 val) { u32 reg; struct qcom_geni_serial_port *port; - unsigned int baud; - unsigned int fifo_bits; unsigned long timeout_us = 20000; struct qcom_geni_private_data *private_data = uport->private_data; if (private_data->drv) { port = to_dev_port(uport); - baud = port->baud; - if (!baud) - baud = 115200; - fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; - /* - * Total polling iterations based on FIFO worth of bytes to be - * sent at current baud. Add a little fluff to the wait. - */ - timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500; + if (port->poll_timeout_us) + timeout_us = port->poll_timeout_us; } /* @@ -295,7 +290,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10; while (timeout_us) { reg = readl(uport->membase + offset); - if ((bool)(reg & field) == set) + if ((reg & field) == val) return true; udelay(10); timeout_us -= 10; @@ -303,6 +298,12 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, return false; } +static bool qcom_geni_serial_poll_bit(struct uart_port *uport, + unsigned int offset, u32 field, bool set) +{ + return qcom_geni_serial_poll_bitfield(uport, offset, field, set ? field : 0); +} + static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size) { u32 m_cmd; @@ -315,18 +316,16 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size) static void qcom_geni_serial_poll_tx_done(struct uart_port *uport) { int done; - u32 irq_clear = M_CMD_DONE_EN; done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_DONE_EN, true); if (!done) { writel(M_GENI_CMD_ABORT, uport->membase + SE_GENI_M_CMD_CTRL_REG); - irq_clear |= M_CMD_ABORT_EN; qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_ABORT_EN, true); + writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); } - writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR); } static void qcom_geni_serial_abort_rx(struct uart_port *uport) @@ -386,17 +385,27 @@ static int qcom_geni_serial_get_char(struct uart_port *uport) static void qcom_geni_serial_poll_put_char(struct uart_port *uport, unsigned char c) { - writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + if (qcom_geni_serial_main_active(uport)) { + qcom_geni_serial_poll_tx_done(uport); + __qcom_geni_serial_cancel_tx_cmd(uport); + } + + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); qcom_geni_serial_setup_tx(uport, 1); - WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_TX_FIFO_WATERMARK_EN, true)); writel(c, uport->membase + SE_GENI_TX_FIFOn); - writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); qcom_geni_serial_poll_tx_done(uport); } #endif #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE +static void qcom_geni_serial_drain_fifo(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + + qcom_geni_serial_poll_bitfield(uport, SE_GENI_M_GP_LENGTH, GP_LENGTH, + port->tx_queued); +} + static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch) { struct qcom_geni_private_data *private_data = uport->private_data; @@ -431,6 +440,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s, } writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); qcom_geni_serial_setup_tx(uport, bytes_to_send); for (i = 0; i < count; ) { size_t chars_to_write = 0; @@ -469,10 +479,9 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, { struct uart_port *uport; struct qcom_geni_serial_port *port; + u32 m_irq_en, s_irq_en; bool locked = true; unsigned long flags; - u32 geni_status; - u32 irq_en; WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS); @@ -486,40 +495,28 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, else uart_port_lock_irqsave(uport, &flags); - geni_status = readl(uport->membase + SE_GENI_STATUS); - - if (!locked) { - /* - * We can only get here if an oops is in progress then we were - * unable to get the lock. This means we can't safely access - * our state variables like tx_remaining. About the best we - * can do is wait for the FIFO to be empty before we start our - * transfer, so we'll do that. - */ - qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_TX_FIFO_NOT_EMPTY_EN, false); - } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) { - /* - * It seems we can't interrupt existing transfers if all data - * has been sent, in which case we need to look for done first. - */ - qcom_geni_serial_poll_tx_done(uport); - - if (!kfifo_is_empty(&uport->state->port.xmit_fifo)) { - irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); - writel(irq_en | M_TX_FIFO_WATERMARK_EN, - uport->membase + SE_GENI_M_IRQ_EN); - } + m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); + s_irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN); + writel(0, uport->membase + SE_GENI_M_IRQ_EN); + writel(0, uport->membase + SE_GENI_S_IRQ_EN); + + if (qcom_geni_serial_main_active(uport)) { + /* Wait for completion or drain FIFO */ + if (!locked || port->tx_remaining == 0) + qcom_geni_serial_poll_tx_done(uport); + else + qcom_geni_serial_drain_fifo(uport); + + qcom_geni_serial_cancel_tx_cmd(uport); } __qcom_geni_serial_console_write(uport, s, count); + writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN); + writel(s_irq_en, uport->membase + SE_GENI_S_IRQ_EN); - if (locked) { - if (port->tx_remaining) - qcom_geni_serial_setup_tx(uport, port->tx_remaining); + if (locked) uart_port_unlock_irqrestore(uport, flags); - } } static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop) @@ -682,13 +679,10 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport) writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); } -static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) +static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) { struct qcom_geni_serial_port *port = to_dev_port(uport); - if (!qcom_geni_serial_main_active(uport)) - return; - geni_se_cancel_m_cmd(&port->se); if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_CANCEL_EN, true)) { @@ -698,8 +692,19 @@ static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); } writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); +} + +static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + + if (!qcom_geni_serial_main_active(uport)) + return; + + __qcom_geni_serial_cancel_tx_cmd(uport); port->tx_remaining = 0; + port->tx_queued = 0; } static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop) @@ -923,9 +928,10 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, if (!chunk) goto out_write_wakeup; - if (!port->tx_remaining) { + if (!active) { qcom_geni_serial_setup_tx(uport, pending); port->tx_remaining = pending; + port->tx_queued = 0; irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); if (!(irq_en & M_TX_FIFO_WATERMARK_EN)) @@ -934,6 +940,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, } qcom_geni_serial_send_chunk_fifo(uport, chunk); + port->tx_queued += chunk; /* * The tx fifo watermark is level triggered and latched. Though we had @@ -1244,11 +1251,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, unsigned long clk_rate; u32 ver, sampling_rate; unsigned int avg_bw_core; + unsigned long timeout; qcom_geni_serial_stop_rx(uport); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); - port->baud = baud; sampling_rate = UART_OVERSAMPLING; /* Sampling rate is halved for IP versions >= 2.5 */ @@ -1326,9 +1333,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, else tx_trans_cfg |= UART_CTS_MASK; - if (baud) + if (baud) { uart_update_timeout(uport, termios->c_cflag, baud); + /* + * Make sure that qcom_geni_serial_poll_bitfield() waits for + * the FIFO, two-word intermediate transfer register and shift + * register to clear. + * + * Note that uart_fifo_timeout() also adds a 20 ms margin. + */ + timeout = jiffies_to_usecs(uart_fifo_timeout(uport)); + timeout += 3 * timeout / port->tx_fifo_depth; + WRITE_ONCE(port->poll_timeout_us, timeout); + } + if (!uart_console(uport)) writel(port->loopback, uport->membase + SE_UART_LOOPBACK_CFG); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index 4132fcff7d4e29..8bab2aedc4991f 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -577,8 +577,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) u32 clk_cfg; writew(1, base + RP2_GLOBAL_CMD); - readw(base + RP2_GLOBAL_CMD); msleep(100); + readw(base + RP2_GLOBAL_CMD); writel(0, base + RP2_CLK_PRESCALER); /* TDM clock configuration */ diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index dc35eb77d2ef34..0d184ee2f9cec0 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -550,6 +550,7 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port) case TYPE_APPLE_S5L: s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON); s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON); + s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON); break; default: disable_irq_nosync(ourport->rx_irq); @@ -707,9 +708,8 @@ static void enable_rx_pio(struct s3c24xx_uart_port *ourport) static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport); -static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id) +static irqreturn_t s3c24xx_serial_rx_chars_dma(struct s3c24xx_uart_port *ourport) { - struct s3c24xx_uart_port *ourport = dev_id; struct uart_port *port = &ourport->port; struct s3c24xx_uart_dma *dma = ourport->dma; struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port); @@ -843,9 +843,8 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport) tty_flip_buffer_push(&port->state->port); } -static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id) +static irqreturn_t s3c24xx_serial_rx_chars_pio(struct s3c24xx_uart_port *ourport) { - struct s3c24xx_uart_port *ourport = dev_id; struct uart_port *port = &ourport->port; uart_port_lock(port); @@ -855,13 +854,11 @@ static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id) return IRQ_HANDLED; } -static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id) +static irqreturn_t s3c24xx_serial_rx_irq(struct s3c24xx_uart_port *ourport) { - struct s3c24xx_uart_port *ourport = dev_id; - if (ourport->dma && ourport->dma->rx_chan) - return s3c24xx_serial_rx_chars_dma(dev_id); - return s3c24xx_serial_rx_chars_pio(dev_id); + return s3c24xx_serial_rx_chars_dma(ourport); + return s3c24xx_serial_rx_chars_pio(ourport); } static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) @@ -928,9 +925,8 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) s3c24xx_serial_stop_tx(port); } -static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id) +static irqreturn_t s3c24xx_serial_tx_irq(struct s3c24xx_uart_port *ourport) { - struct s3c24xx_uart_port *ourport = id; struct uart_port *port = &ourport->port; uart_port_lock(port); @@ -944,17 +940,17 @@ static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id) /* interrupt handler for s3c64xx and later SoC's.*/ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id) { - const struct s3c24xx_uart_port *ourport = id; + struct s3c24xx_uart_port *ourport = id; const struct uart_port *port = &ourport->port; u32 pend = rd_regl(port, S3C64XX_UINTP); irqreturn_t ret = IRQ_HANDLED; if (pend & S3C64XX_UINTM_RXD_MSK) { - ret = s3c24xx_serial_rx_irq(irq, id); + ret = s3c24xx_serial_rx_irq(ourport); wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK); } if (pend & S3C64XX_UINTM_TXD_MSK) { - ret = s3c24xx_serial_tx_irq(irq, id); + ret = s3c24xx_serial_tx_irq(ourport); wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK); } return ret; @@ -963,19 +959,21 @@ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id) /* interrupt handler for Apple SoC's.*/ static irqreturn_t apple_serial_handle_irq(int irq, void *id) { - const struct s3c24xx_uart_port *ourport = id; + struct s3c24xx_uart_port *ourport = id; const struct uart_port *port = &ourport->port; u32 pend = rd_regl(port, S3C2410_UTRSTAT); irqreturn_t ret = IRQ_NONE; - if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) { + if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO | + APPLE_S5L_UTRSTAT_RXTO_LEGACY)) { wr_regl(port, S3C2410_UTRSTAT, - APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO); - ret = s3c24xx_serial_rx_irq(irq, id); + APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO | + APPLE_S5L_UTRSTAT_RXTO_LEGACY); + ret = s3c24xx_serial_rx_irq(ourport); } if (pend & APPLE_S5L_UTRSTAT_TXTHRESH) { wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_TXTHRESH); - ret = s3c24xx_serial_tx_irq(irq, id); + ret = s3c24xx_serial_tx_irq(ourport); } return ret; @@ -1195,7 +1193,8 @@ static void apple_s5l_serial_shutdown(struct uart_port *port) ucon = rd_regl(port, S3C2410_UCON); ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK | APPLE_S5L_UCON_RXTHRESH_ENA_MSK | - APPLE_S5L_UCON_RXTO_ENA_MSK); + APPLE_S5L_UCON_RXTO_ENA_MSK | + APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK); wr_regl(port, S3C2410_UCON, ucon); wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS); @@ -1292,6 +1291,7 @@ static int apple_s5l_serial_startup(struct uart_port *port) /* Enable Rx Interrupt */ s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON); s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON); + s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON); return ret; } @@ -2148,13 +2148,15 @@ static int s3c24xx_serial_resume_noirq(struct device *dev) ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK | APPLE_S5L_UCON_RXTHRESH_ENA_MSK | - APPLE_S5L_UCON_RXTO_ENA_MSK); + APPLE_S5L_UCON_RXTO_ENA_MSK | + APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK); if (ourport->tx_enabled) ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK; if (ourport->rx_enabled) ucon |= APPLE_S5L_UCON_RXTHRESH_ENA_MSK | - APPLE_S5L_UCON_RXTO_ENA_MSK; + APPLE_S5L_UCON_RXTO_ENA_MSK | + APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK; wr_regl(port, S3C2410_UCON, ucon); @@ -2541,7 +2543,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = { .name = "Apple S5L UART", .type = TYPE_APPLE_S5L, .port_type = PORT_8250, - .iotype = UPIO_MEM, + .iotype = UPIO_MEM32, .fifosize = 16, .rx_fifomask = S3C2410_UFSTAT_RXMASK, .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, @@ -2827,6 +2829,9 @@ OF_EARLYCON_DECLARE(gs101, "google,gs101-uart", gs101_early_console_setup); static int __init apple_s5l_early_console_setup(struct earlycon_device *device, const char *opt) { + /* Apple A7-A11 requires MMIO32 register accesses. */ + device->port.iotype = UPIO_MEM32; + /* Close enough to S3C2410 for earlycon... */ device->port.private_data = &s3c2410_early_console_data; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index b4c1798a1df2a0..ad88a33a504f53 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -10,6 +10,7 @@ #undef DEFAULT_SYMBOL_NAMESPACE #define DEFAULT_SYMBOL_NAMESPACE SERIAL_NXP_SC16IS7XX +#include #include #include #include @@ -78,52 +79,52 @@ #define SC16IS7XX_XOFF2_REG (0x07) /* Xoff2 word */ /* IER register bits */ -#define SC16IS7XX_IER_RDI_BIT (1 << 0) /* Enable RX data interrupt */ -#define SC16IS7XX_IER_THRI_BIT (1 << 1) /* Enable TX holding register +#define SC16IS7XX_IER_RDI_BIT BIT(0) /* Enable RX data interrupt */ +#define SC16IS7XX_IER_THRI_BIT BIT(1) /* Enable TX holding register * interrupt */ -#define SC16IS7XX_IER_RLSI_BIT (1 << 2) /* Enable RX line status +#define SC16IS7XX_IER_RLSI_BIT BIT(2) /* Enable RX line status * interrupt */ -#define SC16IS7XX_IER_MSI_BIT (1 << 3) /* Enable Modem status +#define SC16IS7XX_IER_MSI_BIT BIT(3) /* Enable Modem status * interrupt */ /* IER register bits - write only if (EFR[4] == 1) */ -#define SC16IS7XX_IER_SLEEP_BIT (1 << 4) /* Enable Sleep mode */ -#define SC16IS7XX_IER_XOFFI_BIT (1 << 5) /* Enable Xoff interrupt */ -#define SC16IS7XX_IER_RTSI_BIT (1 << 6) /* Enable nRTS interrupt */ -#define SC16IS7XX_IER_CTSI_BIT (1 << 7) /* Enable nCTS interrupt */ +#define SC16IS7XX_IER_SLEEP_BIT BIT(4) /* Enable Sleep mode */ +#define SC16IS7XX_IER_XOFFI_BIT BIT(5) /* Enable Xoff interrupt */ +#define SC16IS7XX_IER_RTSI_BIT BIT(6) /* Enable nRTS interrupt */ +#define SC16IS7XX_IER_CTSI_BIT BIT(7) /* Enable nCTS interrupt */ /* FCR register bits */ -#define SC16IS7XX_FCR_FIFO_BIT (1 << 0) /* Enable FIFO */ -#define SC16IS7XX_FCR_RXRESET_BIT (1 << 1) /* Reset RX FIFO */ -#define SC16IS7XX_FCR_TXRESET_BIT (1 << 2) /* Reset TX FIFO */ -#define SC16IS7XX_FCR_RXLVLL_BIT (1 << 6) /* RX Trigger level LSB */ -#define SC16IS7XX_FCR_RXLVLH_BIT (1 << 7) /* RX Trigger level MSB */ +#define SC16IS7XX_FCR_FIFO_BIT BIT(0) /* Enable FIFO */ +#define SC16IS7XX_FCR_RXRESET_BIT BIT(1) /* Reset RX FIFO */ +#define SC16IS7XX_FCR_TXRESET_BIT BIT(2) /* Reset TX FIFO */ +#define SC16IS7XX_FCR_RXLVLL_BIT BIT(6) /* RX Trigger level LSB */ +#define SC16IS7XX_FCR_RXLVLH_BIT BIT(7) /* RX Trigger level MSB */ /* FCR register bits - write only if (EFR[4] == 1) */ -#define SC16IS7XX_FCR_TXLVLL_BIT (1 << 4) /* TX Trigger level LSB */ -#define SC16IS7XX_FCR_TXLVLH_BIT (1 << 5) /* TX Trigger level MSB */ +#define SC16IS7XX_FCR_TXLVLL_BIT BIT(4) /* TX Trigger level LSB */ +#define SC16IS7XX_FCR_TXLVLH_BIT BIT(5) /* TX Trigger level MSB */ /* IIR register bits */ -#define SC16IS7XX_IIR_NO_INT_BIT (1 << 0) /* No interrupts pending */ -#define SC16IS7XX_IIR_ID_MASK 0x3e /* Mask for the interrupt ID */ -#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */ -#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */ -#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */ -#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */ -#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt - * - only on 75x/76x - */ -#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state - * - only on 75x/76x - */ -#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */ -#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state - * from active (LOW) - * to inactive (HIGH) - */ +#define SC16IS7XX_IIR_NO_INT_BIT 0x01 /* No interrupts pending */ +#define SC16IS7XX_IIR_ID_MASK GENMASK(5, 1) /* Mask for the interrupt ID */ +#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */ +#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */ +#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */ +#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */ +#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt + * - only on 75x/76x + */ +#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state + * - only on 75x/76x + */ +#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */ +#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state + * from active (LOW) + * to inactive (HIGH) + */ /* LCR register bits */ -#define SC16IS7XX_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */ -#define SC16IS7XX_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1 +#define SC16IS7XX_LCR_LENGTH0_BIT BIT(0) /* Word length bit 0 */ +#define SC16IS7XX_LCR_LENGTH1_BIT BIT(1) /* Word length bit 1 * * Word length bits table: * 00 -> 5 bit words @@ -131,7 +132,7 @@ * 10 -> 7 bit words * 11 -> 8 bit words */ -#define SC16IS7XX_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit +#define SC16IS7XX_LCR_STOPLEN_BIT BIT(2) /* STOP length bit * * STOP length bit table: * 0 -> 1 stop bit @@ -139,11 +140,11 @@ * word length is 5, * 2 stop bits otherwise */ -#define SC16IS7XX_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */ -#define SC16IS7XX_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */ -#define SC16IS7XX_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */ -#define SC16IS7XX_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */ -#define SC16IS7XX_LCR_DLAB_BIT (1 << 7) /* Divisor Latch enable */ +#define SC16IS7XX_LCR_PARITY_BIT BIT(3) /* Parity bit enable */ +#define SC16IS7XX_LCR_EVENPARITY_BIT BIT(4) /* Even parity bit enable */ +#define SC16IS7XX_LCR_FORCEPARITY_BIT BIT(5) /* 9-bit multidrop parity */ +#define SC16IS7XX_LCR_TXBREAK_BIT BIT(6) /* TX break enable */ +#define SC16IS7XX_LCR_DLAB_BIT BIT(7) /* Divisor Latch enable */ #define SC16IS7XX_LCR_WORD_LEN_5 (0x00) #define SC16IS7XX_LCR_WORD_LEN_6 (0x01) #define SC16IS7XX_LCR_WORD_LEN_7 (0x02) @@ -154,61 +155,65 @@ * reg set */ /* MCR register bits */ -#define SC16IS7XX_MCR_DTR_BIT (1 << 0) /* DTR complement +#define SC16IS7XX_MCR_DTR_BIT BIT(0) /* DTR complement * - only on 75x/76x */ -#define SC16IS7XX_MCR_RTS_BIT (1 << 1) /* RTS complement */ -#define SC16IS7XX_MCR_TCRTLR_BIT (1 << 2) /* TCR/TLR register enable */ -#define SC16IS7XX_MCR_LOOP_BIT (1 << 4) /* Enable loopback test mode */ -#define SC16IS7XX_MCR_XONANY_BIT (1 << 5) /* Enable Xon Any +#define SC16IS7XX_MCR_RTS_BIT BIT(1) /* RTS complement */ +#define SC16IS7XX_MCR_TCRTLR_BIT BIT(2) /* TCR/TLR register enable */ +#define SC16IS7XX_MCR_LOOP_BIT BIT(4) /* Enable loopback test mode */ +#define SC16IS7XX_MCR_XONANY_BIT BIT(5) /* Enable Xon Any * - write enabled * if (EFR[4] == 1) */ -#define SC16IS7XX_MCR_IRDA_BIT (1 << 6) /* Enable IrDA mode +#define SC16IS7XX_MCR_IRDA_BIT BIT(6) /* Enable IrDA mode * - write enabled * if (EFR[4] == 1) */ -#define SC16IS7XX_MCR_CLKSEL_BIT (1 << 7) /* Divide clock by 4 +#define SC16IS7XX_MCR_CLKSEL_BIT BIT(7) /* Divide clock by 4 * - write enabled * if (EFR[4] == 1) */ /* LSR register bits */ -#define SC16IS7XX_LSR_DR_BIT (1 << 0) /* Receiver data ready */ -#define SC16IS7XX_LSR_OE_BIT (1 << 1) /* Overrun Error */ -#define SC16IS7XX_LSR_PE_BIT (1 << 2) /* Parity Error */ -#define SC16IS7XX_LSR_FE_BIT (1 << 3) /* Frame Error */ -#define SC16IS7XX_LSR_BI_BIT (1 << 4) /* Break Interrupt */ -#define SC16IS7XX_LSR_BRK_ERROR_MASK 0x1E /* BI, FE, PE, OE bits */ -#define SC16IS7XX_LSR_THRE_BIT (1 << 5) /* TX holding register empty */ -#define SC16IS7XX_LSR_TEMT_BIT (1 << 6) /* Transmitter empty */ -#define SC16IS7XX_LSR_FIFOE_BIT (1 << 7) /* Fifo Error */ +#define SC16IS7XX_LSR_DR_BIT BIT(0) /* Receiver data ready */ +#define SC16IS7XX_LSR_OE_BIT BIT(1) /* Overrun Error */ +#define SC16IS7XX_LSR_PE_BIT BIT(2) /* Parity Error */ +#define SC16IS7XX_LSR_FE_BIT BIT(3) /* Frame Error */ +#define SC16IS7XX_LSR_BI_BIT BIT(4) /* Break Interrupt */ +#define SC16IS7XX_LSR_BRK_ERROR_MASK \ + (SC16IS7XX_LSR_OE_BIT | \ + SC16IS7XX_LSR_PE_BIT | \ + SC16IS7XX_LSR_FE_BIT | \ + SC16IS7XX_LSR_BI_BIT) + +#define SC16IS7XX_LSR_THRE_BIT BIT(5) /* TX holding register empty */ +#define SC16IS7XX_LSR_TEMT_BIT BIT(6) /* Transmitter empty */ +#define SC16IS7XX_LSR_FIFOE_BIT BIT(7) /* Fifo Error */ /* MSR register bits */ -#define SC16IS7XX_MSR_DCTS_BIT (1 << 0) /* Delta CTS Clear To Send */ -#define SC16IS7XX_MSR_DDSR_BIT (1 << 1) /* Delta DSR Data Set Ready +#define SC16IS7XX_MSR_DCTS_BIT BIT(0) /* Delta CTS Clear To Send */ +#define SC16IS7XX_MSR_DDSR_BIT BIT(1) /* Delta DSR Data Set Ready * or (IO4) * - only on 75x/76x */ -#define SC16IS7XX_MSR_DRI_BIT (1 << 2) /* Delta RI Ring Indicator +#define SC16IS7XX_MSR_DRI_BIT BIT(2) /* Delta RI Ring Indicator * or (IO7) * - only on 75x/76x */ -#define SC16IS7XX_MSR_DCD_BIT (1 << 3) /* Delta CD Carrier Detect +#define SC16IS7XX_MSR_DCD_BIT BIT(3) /* Delta CD Carrier Detect * or (IO6) * - only on 75x/76x */ -#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */ -#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4) +#define SC16IS7XX_MSR_CTS_BIT BIT(4) /* CTS */ +#define SC16IS7XX_MSR_DSR_BIT BIT(5) /* DSR (IO4) * - only on 75x/76x */ -#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7) +#define SC16IS7XX_MSR_RI_BIT BIT(6) /* RI (IO7) * - only on 75x/76x */ -#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6) +#define SC16IS7XX_MSR_CD_BIT BIT(7) /* CD (IO6) * - only on 75x/76x */ -#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */ /* * TCR register bits @@ -241,19 +246,19 @@ #define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4) /* IOControl register bits (Only 75x/76x) */ -#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */ -#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */ -#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */ -#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */ +#define SC16IS7XX_IOCONTROL_LATCH_BIT BIT(0) /* Enable input latching */ +#define SC16IS7XX_IOCONTROL_MODEM_A_BIT BIT(1) /* Enable GPIO[7:4] as modem A pins */ +#define SC16IS7XX_IOCONTROL_MODEM_B_BIT BIT(2) /* Enable GPIO[3:0] as modem B pins */ +#define SC16IS7XX_IOCONTROL_SRESET_BIT BIT(3) /* Software Reset */ /* EFCR register bits */ -#define SC16IS7XX_EFCR_9BIT_MODE_BIT (1 << 0) /* Enable 9-bit or Multidrop +#define SC16IS7XX_EFCR_9BIT_MODE_BIT BIT(0) /* Enable 9-bit or Multidrop * mode (RS485) */ -#define SC16IS7XX_EFCR_RXDISABLE_BIT (1 << 1) /* Disable receiver */ -#define SC16IS7XX_EFCR_TXDISABLE_BIT (1 << 2) /* Disable transmitter */ -#define SC16IS7XX_EFCR_AUTO_RS485_BIT (1 << 4) /* Auto RS485 RTS direction */ -#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */ -#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode +#define SC16IS7XX_EFCR_RXDISABLE_BIT BIT(1) /* Disable receiver */ +#define SC16IS7XX_EFCR_TXDISABLE_BIT BIT(2) /* Disable transmitter */ +#define SC16IS7XX_EFCR_AUTO_RS485_BIT BIT(4) /* Auto RS485 RTS direction */ +#define SC16IS7XX_EFCR_RTS_INVERT_BIT BIT(5) /* RTS output inversion */ +#define SC16IS7XX_EFCR_IRDA_MODE_BIT BIT(7) /* IrDA mode * 0 = rate upto 115.2 kbit/s * - Only 75x/76x * 1 = rate upto 1.152 Mbit/s @@ -261,16 +266,16 @@ */ /* EFR register bits */ -#define SC16IS7XX_EFR_AUTORTS_BIT (1 << 6) /* Auto RTS flow ctrl enable */ -#define SC16IS7XX_EFR_AUTOCTS_BIT (1 << 7) /* Auto CTS flow ctrl enable */ -#define SC16IS7XX_EFR_XOFF2_DETECT_BIT (1 << 5) /* Enable Xoff2 detection */ -#define SC16IS7XX_EFR_ENABLE_BIT (1 << 4) /* Enable enhanced functions +#define SC16IS7XX_EFR_AUTORTS_BIT BIT(6) /* Auto RTS flow ctrl enable */ +#define SC16IS7XX_EFR_AUTOCTS_BIT BIT(7) /* Auto CTS flow ctrl enable */ +#define SC16IS7XX_EFR_XOFF2_DETECT_BIT BIT(5) /* Enable Xoff2 detection */ +#define SC16IS7XX_EFR_ENABLE_BIT BIT(4) /* Enable enhanced functions * and writing to IER[7:4], * FCR[5:4], MCR[7:5] */ -#define SC16IS7XX_EFR_SWFLOW3_BIT (1 << 3) /* SWFLOW bit 3 */ -#define SC16IS7XX_EFR_SWFLOW2_BIT (1 << 2) /* SWFLOW bit 2 - * +#define SC16IS7XX_EFR_SWFLOW3_BIT BIT(3) +#define SC16IS7XX_EFR_SWFLOW2_BIT BIT(2) + /* * SWFLOW bits 3 & 2 table: * 00 -> no transmitter flow * control @@ -282,10 +287,10 @@ * XON1, XON2, XOFF1 and * XOFF2 */ -#define SC16IS7XX_EFR_SWFLOW1_BIT (1 << 1) /* SWFLOW bit 2 */ -#define SC16IS7XX_EFR_SWFLOW0_BIT (1 << 0) /* SWFLOW bit 3 - * - * SWFLOW bits 3 & 2 table: +#define SC16IS7XX_EFR_SWFLOW1_BIT BIT(1) +#define SC16IS7XX_EFR_SWFLOW0_BIT BIT(0) + /* + * SWFLOW bits 1 & 0 table: * 00 -> no received flow * control * 01 -> receiver compares @@ -309,9 +314,9 @@ #define SC16IS7XX_FIFO_SIZE (64) #define SC16IS7XX_GPIOS_PER_BANK 4 -#define SC16IS7XX_RECONF_MD (1 << 0) -#define SC16IS7XX_RECONF_IER (1 << 1) -#define SC16IS7XX_RECONF_RS485 (1 << 2) +#define SC16IS7XX_RECONF_MD BIT(0) +#define SC16IS7XX_RECONF_IER BIT(1) +#define SC16IS7XX_RECONF_RS485 BIT(2) struct sc16is7xx_one_config { unsigned int flags; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5bea3af46abcef..d94d73e45fb6de 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -407,14 +407,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) /* * Turn off DTR and RTS early. */ - if (uport && uart_console(uport) && tty) { - uport->cons->cflag = tty->termios.c_cflag; - uport->cons->ispeed = tty->termios.c_ispeed; - uport->cons->ospeed = tty->termios.c_ospeed; - } + if (uport) { + if (uart_console(uport) && tty) { + uport->cons->cflag = tty->termios.c_cflag; + uport->cons->ispeed = tty->termios.c_ispeed; + uport->cons->ospeed = tty->termios.c_ospeed; + } - if (!tty || C_HUPCL(tty)) - uart_port_dtr_rts(uport, false); + if (!tty || C_HUPCL(tty)) + uart_port_dtr_rts(uport, false); + } uart_port_shutdown(port); } @@ -1102,21 +1104,19 @@ static int uart_tiocmget(struct tty_struct *tty) struct uart_state *state = tty->driver_data; struct tty_port *port = &state->port; struct uart_port *uport; - int result = -EIO; + int result; + + guard(mutex)(&port->mutex); - mutex_lock(&port->mutex); uport = uart_port_check(state); - if (!uport) - goto out; + if (!uport || tty_io_error(tty)) + return -EIO; + + uart_port_lock_irq(uport); + result = uport->mctrl; + result |= uport->ops->get_mctrl(uport); + uart_port_unlock_irq(uport); - if (!tty_io_error(tty)) { - uart_port_lock_irq(uport); - result = uport->mctrl; - result |= uport->ops->get_mctrl(uport); - uart_port_unlock_irq(uport); - } -out: - mutex_unlock(&port->mutex); return result; } @@ -1126,20 +1126,16 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) struct uart_state *state = tty->driver_data; struct tty_port *port = &state->port; struct uart_port *uport; - int ret = -EIO; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); + uport = uart_port_check(state); - if (!uport) - goto out; + if (!uport || tty_io_error(tty)) + return -EIO; - if (!tty_io_error(tty)) { - uart_update_mctrl(uport, set, clear); - ret = 0; - } -out: - mutex_unlock(&port->mutex); - return ret; + uart_update_mctrl(uport, set, clear); + + return 0; } static int uart_break_ctl(struct tty_struct *tty, int break_state) @@ -1147,19 +1143,17 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) struct uart_state *state = tty->driver_data; struct tty_port *port = &state->port; struct uart_port *uport; - int ret = -EIO; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); + uport = uart_port_check(state); if (!uport) - goto out; + return -EIO; if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl) uport->ops->break_ctl(uport, break_state); - ret = 0; -out: - mutex_unlock(&port->mutex); - return ret; + + return 0; } static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state) @@ -1176,17 +1170,14 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state) * changing, and hence any extra opens of the port while * we're auto-configuring. */ - if (mutex_lock_interruptible(&port->mutex)) - return -ERESTARTSYS; + scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &port->mutex) { + uport = uart_port_check(state); + if (!uport) + return -EIO; - uport = uart_port_check(state); - if (!uport) { - ret = -EIO; - goto out; - } + if (tty_port_users(port) != 1) + return -EBUSY; - ret = -EBUSY; - if (tty_port_users(port) == 1) { uart_shutdown(tty, state); /* @@ -1207,14 +1198,15 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state) uport->ops->config_port(uport, flags); ret = uart_startup(tty, state, true); - if (ret == 0) - tty_port_set_initialized(port, true); + if (ret < 0) + return ret; if (ret > 0) - ret = 0; + return 0; + + tty_port_set_initialized(port, true); } -out: - mutex_unlock(&port->mutex); - return ret; + + return 0; } static void uart_enable_ms(struct uart_port *uport) @@ -1709,10 +1701,11 @@ static void uart_set_termios(struct tty_struct *tty, unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK; bool sw_changed = false; - mutex_lock(&state->port.mutex); + guard(mutex)(&state->port.mutex); + uport = uart_port_check(state); if (!uport) - goto out; + return; /* * Drivers doing software flow control also need to know @@ -1735,9 +1728,8 @@ static void uart_set_termios(struct tty_struct *tty, tty->termios.c_ospeed == old_termios->c_ospeed && tty->termios.c_ispeed == old_termios->c_ispeed && ((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 && - !sw_changed) { - goto out; - } + !sw_changed) + return; uart_change_line_settings(tty, state, old_termios); /* reload cflag from termios; port driver may have overridden flags */ @@ -1754,8 +1746,6 @@ static void uart_set_termios(struct tty_struct *tty, mask |= TIOCM_RTS; uart_set_mctrl(uport, mask); } -out: - mutex_unlock(&state->port.mutex); } /* @@ -2049,10 +2039,11 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) unsigned int status; int mmio; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); + uport = uart_port_check(state); if (!uport) - goto out; + return; mmio = uport->iotype >= UPIO_MEM; seq_printf(m, "%d: uart:%s %s%08llX irq:%d", @@ -2064,7 +2055,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) if (uport->type == PORT_UNKNOWN) { seq_putc(m, '\n'); - goto out; + return; } if (capable(CAP_SYS_ADMIN)) { @@ -2115,8 +2106,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) seq_putc(m, '\n'); #undef STATBIT #undef INFOBIT -out: - mutex_unlock(&port->mutex); } static int uart_proc_show(struct seq_file *m, void *v) @@ -2393,13 +2382,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) struct device *tty_dev; struct uart_match match = {uport, drv}; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port); if (tty_dev && device_may_wakeup(tty_dev)) { enable_irq_wake(uport->irq); put_device(tty_dev); - mutex_unlock(&port->mutex); return 0; } put_device(tty_dev); @@ -2417,7 +2405,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) uart_port_unlock_irq(uport); } device_set_awake_path(uport->dev); - goto unlock; + return 0; } uport->suspended = 1; @@ -2460,8 +2448,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) console_stop(uport->cons); uart_change_pm(state, UART_PM_STATE_OFF); -unlock: - mutex_unlock(&port->mutex); return 0; } @@ -2475,14 +2461,13 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) struct uart_match match = {uport, drv}; struct ktermios termios; - mutex_lock(&port->mutex); + guard(mutex)(&port->mutex); tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port); if (!uport->suspended && device_may_wakeup(tty_dev)) { if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq)))) disable_irq_wake(uport->irq); put_device(tty_dev); - mutex_unlock(&port->mutex); return 0; } put_device(tty_dev); @@ -2555,8 +2540,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) tty_port_set_suspended(port, false); } - mutex_unlock(&port->mutex); - return 0; } EXPORT_SYMBOL(uart_resume_port); @@ -2696,14 +2679,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) int ret = 0; tport = &state->port; - mutex_lock(&tport->mutex); + + guard(mutex)(&tport->mutex); port = uart_port_check(state); if (!port || port->type == PORT_UNKNOWN || - !(port->ops->poll_get_char && port->ops->poll_put_char)) { - ret = -1; - goto out; - } + !(port->ops->poll_get_char && port->ops->poll_put_char)) + return -1; pm_state = state->pm_state; uart_change_pm(state, UART_PM_STATE_ON); @@ -2723,10 +2705,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) ret = uart_set_options(port, NULL, baud, parity, bits, flow); console_list_unlock(); } -out: + if (ret) uart_change_pm(state, pm_state); - mutex_unlock(&tport->mutex); + return ret; } @@ -3176,8 +3158,15 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u state->uart_port = uport; uport->state = state; + /* + * If this port is in use as a console then the spinlock is already + * initialised. + */ + if (!uart_console_registered(uport)) + uart_port_spin_lock_init(uport); + state->pm_state = UART_PM_STATE_UNDEFINED; - uport->cons = drv->cons; + uart_port_set_cons(uport, drv->cons); uport->minor = drv->tty_driver->minor_start + uport->line; uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name, drv->tty_driver->name_base + uport->line); @@ -3186,13 +3175,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u goto out; } - /* - * If this port is in use as a console then the spinlock is already - * initialised. - */ - if (!uart_console_registered(uport)) - uart_port_spin_lock_init(uport); - if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index f91753a40a69b5..8aea59f8ca13da 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -808,7 +808,6 @@ static void asc_serial_remove(struct platform_device *pdev) uart_remove_one_port(&asc_uart_driver, port); } -#ifdef CONFIG_PM_SLEEP static int asc_serial_suspend(struct device *dev) { struct uart_port *port = dev_get_drvdata(dev); @@ -823,8 +822,6 @@ static int asc_serial_resume(struct device *dev) return uart_resume_port(&asc_uart_driver, port); } -#endif /* CONFIG_PM_SLEEP */ - /*----------------------------------------------------------------------*/ #ifdef CONFIG_SERIAL_ST_ASC_CONSOLE @@ -932,16 +929,15 @@ static struct uart_driver asc_uart_driver = { .cons = ASC_SERIAL_CONSOLE, }; -static const struct dev_pm_ops asc_serial_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume) -}; +static DEFINE_SIMPLE_DEV_PM_OPS(asc_serial_pm_ops, asc_serial_suspend, + asc_serial_resume); static struct platform_driver asc_serial_driver = { .probe = asc_serial_probe, .remove_new = asc_serial_remove, .driver = { .name = DRIVER_NAME, - .pm = &asc_serial_pm_ops, + .pm = pm_sleep_ptr(&asc_serial_pm_ops), .of_match_table = of_match_ptr(asc_match), }, }; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 2acfcea403cef2..777392914819d7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -219,7 +219,7 @@ struct cdns_platform_data { u32 quirks; }; -struct serial_rs485 cdns_rs485_supported = { +static struct serial_rs485 cdns_rs485_supported = { .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, .delay_rts_before_send = 1, diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 14f8f00fdcf9ab..930b04e3d148f5 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -531,6 +531,7 @@ static const struct sysrq_key_op *sysrq_key_table[62] = { NULL, /* P */ NULL, /* Q */ &sysrq_replay_logs_op, /* R */ + /* S: May be registered by sched_ext for resetting */ NULL, /* S */ NULL, /* T */ NULL, /* U */ diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 407b0d87b7c108..9771072da177cb 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -350,22 +350,19 @@ int tty_dev_name_to_number(const char *name, dev_t *number) return ret; prefix_length = str - name; - mutex_lock(&tty_mutex); + + guard(mutex)(&tty_mutex); list_for_each_entry(p, &tty_drivers, tty_drivers) if (prefix_length == strlen(p->name) && strncmp(name, p->name, prefix_length) == 0) { if (index < p->num) { *number = MKDEV(p->major, p->minor_start + index); - goto out; + return 0; } } - /* if here then driver wasn't found */ - ret = -ENODEV; -out: - mutex_unlock(&tty_mutex); - return ret; + return -ENODEV; } EXPORT_SYMBOL_GPL(tty_dev_name_to_number); @@ -462,7 +459,6 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file) } static const struct file_operations tty_fops = { - .llseek = no_llseek, .read_iter = tty_read, .write_iter = tty_write, .splice_read = copy_splice_read, @@ -477,7 +473,6 @@ static const struct file_operations tty_fops = { }; static const struct file_operations console_fops = { - .llseek = no_llseek, .read_iter = tty_read, .write_iter = redirected_tty_write, .splice_read = copy_splice_read, @@ -491,7 +486,6 @@ static const struct file_operations console_fops = { }; static const struct file_operations hung_up_tty_fops = { - .llseek = no_llseek, .read_iter = hung_up_tty_read, .write_iter = hung_up_tty_write, .poll = hung_up_tty_poll, @@ -2225,6 +2219,12 @@ static int __tty_fasync(int fd, struct file *filp, int on) if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync")) goto out; + if (on) { + retval = file_f_owner_allocate(filp); + if (retval) + goto out; + } + retval = fasync_helper(fd, filp, on, &tty->fasync); if (retval <= 0) goto out; @@ -3567,7 +3567,7 @@ static ssize_t show_cons_active(struct device *dev, for_each_console(c) { if (!c->device) continue; - if (!c->write) + if (!(c->flags & CON_NBCON) && !c->write) continue; if ((c->flags & CON_ENABLED) == 0) continue; diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index da33c6c4691c06..79b33d998d4359 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -48,7 +48,7 @@ #include #include -#include +#include #define HEADER_SIZE 4u #define CON_BUF_SIZE (IS_ENABLED(CONFIG_BASE_SMALL) ? 256 : PAGE_SIZE) diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c index 1695404170790d..55db38e75cc47a 100644 --- a/drivers/ufs/core/ufs-fault-injection.c +++ b/drivers/ufs/core/ufs-fault-injection.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include "ufs-fault-injection.h" diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 5891cdacd0b3c5..8402151330fef8 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -7,7 +7,7 @@ * Can Guo */ -#include +#include #include #include #include diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index e80a32421a8c4c..265f21133b633e 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include @@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer) FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); } +static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg) +{ + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ufshcd_hold(hba); + *val = ufshcd_readl(hba, reg); + ufshcd_release(hba); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return 0; +} + static ssize_t auto_hibern8_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev, if (!ufshcd_is_auto_hibern8_supported(hba)) return -EOPNOTSUPP; - down(&hba->host_sem); - if (!ufshcd_is_user_access_allowed(hba)) { - ret = -EBUSY; - goto out; - } - - pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba); - ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); - ufshcd_release(hba); - pm_runtime_put_sync(hba->dev); - - ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); + ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); + if (ret) + return ret; -out: - up(&hba->host_sem); - return ret; + return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); } static ssize_t auto_hibern8_store(struct device *dev, @@ -519,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = { .attrs = ufs_sysfs_capabilities_attrs, }; +static ssize_t version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "0x%x\n", hba->ufs_version); +} + +static ssize_t product_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + u32 val; + struct ufs_hba *hba = dev_get_drvdata(dev); + + ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID); + if (ret) + return ret; + + return sysfs_emit(buf, "0x%x\n", val); +} + +static ssize_t man_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + u32 val; + struct ufs_hba *hba = dev_get_drvdata(dev); + + ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID); + if (ret) + return ret; + + return sysfs_emit(buf, "0x%x\n", val); +} + +static DEVICE_ATTR_RO(version); +static DEVICE_ATTR_RO(product_id); +static DEVICE_ATTR_RO(man_id); + +static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = { + &dev_attr_version.attr, + &dev_attr_product_id.attr, + &dev_attr_man_id.attr, + NULL +}; + +static const struct attribute_group ufs_sysfs_ufshci_group = { + .name = "ufshci_capabilities", + .attrs = ufs_sysfs_ufshci_cap_attrs, +}; + static ssize_t monitor_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1502,6 +1560,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = { static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_default_group, &ufs_sysfs_capabilities_group, + &ufs_sysfs_ufshci_group, &ufs_sysfs_monitor_group, &ufs_sysfs_power_info_group, &ufs_sysfs_device_descriptor_group, diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h new file mode 100644 index 00000000000000..84deca2b841d9c --- /dev/null +++ b/drivers/ufs/core/ufs_trace.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ufs + +#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UFS_H + +#include +#include + +#define str_opcode(opcode) \ + __print_symbolic(opcode, \ + { WRITE_16, "WRITE_16" }, \ + { WRITE_10, "WRITE_10" }, \ + { READ_16, "READ_16" }, \ + { READ_10, "READ_10" }, \ + { SYNCHRONIZE_CACHE, "SYNC" }, \ + { UNMAP, "UNMAP" }) + +#define UFS_LINK_STATES \ + EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \ + EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \ + EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE") + +#define UFS_PWR_MODES \ + EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \ + EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \ + EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \ + EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE") + +#define UFSCHD_CLK_GATING_STATES \ + EM(CLKS_OFF, "CLKS_OFF") \ + EM(CLKS_ON, "CLKS_ON") \ + EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \ + EMe(REQ_CLKS_ON, "REQ_CLKS_ON") + +#define UFS_CMD_TRACE_STRINGS \ + EM(UFS_CMD_SEND, "send_req") \ + EM(UFS_CMD_COMP, "complete_rsp") \ + EM(UFS_DEV_COMP, "dev_complete") \ + EM(UFS_QUERY_SEND, "query_send") \ + EM(UFS_QUERY_COMP, "query_complete") \ + EM(UFS_QUERY_ERR, "query_complete_err") \ + EM(UFS_TM_SEND, "tm_send") \ + EM(UFS_TM_COMP, "tm_complete") \ + EMe(UFS_TM_ERR, "tm_complete_err") + +#define UFS_CMD_TRACE_TSF_TYPES \ + EM(UFS_TSF_CDB, "CDB") \ + EM(UFS_TSF_OSF, "OSF") \ + EM(UFS_TSF_TM_INPUT, "TM_INPUT") \ + EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT") + +/* Enums require being exported to userspace, for user tool parsing */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +UFS_LINK_STATES; +UFS_PWR_MODES; +UFSCHD_CLK_GATING_STATES; +UFS_CMD_TRACE_STRINGS +UFS_CMD_TRACE_TSF_TYPES + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +#define show_ufs_cmd_trace_str(str_t) \ + __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS) +#define show_ufs_cmd_trace_tsf(tsf) \ + __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES) + +TRACE_EVENT(ufshcd_clk_gating, + + TP_PROTO(const char *dev_name, int state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(int, state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->state = state; + ), + + TP_printk("%s: gating state changed to %s", + __get_str(dev_name), + __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES)) +); + +TRACE_EVENT(ufshcd_clk_scaling, + + TP_PROTO(const char *dev_name, const char *state, const char *clk, + u32 prev_state, u32 curr_state), + + TP_ARGS(dev_name, state, clk, prev_state, curr_state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + __string(clk, clk) + __field(u32, prev_state) + __field(u32, curr_state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(state); + __assign_str(clk); + __entry->prev_state = prev_state; + __entry->curr_state = curr_state; + ), + + TP_printk("%s: %s %s from %u to %u Hz", + __get_str(dev_name), __get_str(state), __get_str(clk), + __entry->prev_state, __entry->curr_state) +); + +TRACE_EVENT(ufshcd_auto_bkops_state, + + TP_PROTO(const char *dev_name, const char *state), + + TP_ARGS(dev_name, state), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(state, state) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(state); + ), + + TP_printk("%s: auto bkops - %s", + __get_str(dev_name), __get_str(state)) +); + +DECLARE_EVENT_CLASS(ufshcd_profiling_template, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + + TP_ARGS(dev_name, profile_info, time_us, err), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(profile_info, profile_info) + __field(s64, time_us) + __field(int, err) + ), + + TP_fast_assign( + __assign_str(dev_name); + __assign_str(profile_info); + __entry->time_us = time_us; + __entry->err = err; + ), + + TP_printk("%s: %s: took %lld usecs, err %d", + __get_str(dev_name), __get_str(profile_info), + __entry->time_us, __entry->err) +); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling, + TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + int err), + TP_ARGS(dev_name, profile_info, time_us, err)); + +DECLARE_EVENT_CLASS(ufshcd_template, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + + TP_ARGS(dev_name, err, usecs, dev_state, link_state), + + TP_STRUCT__entry( + __field(s64, usecs) + __field(int, err) + __string(dev_name, dev_name) + __field(int, dev_state) + __field(int, link_state) + ), + + TP_fast_assign( + __entry->usecs = usecs; + __entry->err = err; + __assign_str(dev_name); + __entry->dev_state = dev_state; + __entry->link_state = link_state; + ), + + TP_printk( + "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d", + __get_str(dev_name), + __entry->usecs, + __print_symbolic(__entry->dev_state, UFS_PWR_MODES), + __print_symbolic(__entry->link_state, UFS_LINK_STATES), + __entry->err + ) +); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_system_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_init, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, + TP_PROTO(const char *dev_name, int err, s64 usecs, + int dev_state, int link_state), + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + +TRACE_EVENT(ufshcd_command, + TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t, + unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len, + u32 intr, u64 lba, u8 opcode, u8 group_id), + + TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, + opcode, group_id), + + TP_STRUCT__entry( + __field(struct scsi_device *, sdev) + __field(enum ufs_trace_str_t, str_t) + __field(unsigned int, tag) + __field(u32, doorbell) + __field(u32, hwq_id) + __field(u32, intr) + __field(u64, lba) + __field(int, transfer_len) + __field(u8, opcode) + __field(u8, group_id) + ), + + TP_fast_assign( + __entry->sdev = sdev; + __entry->str_t = str_t; + __entry->tag = tag; + __entry->doorbell = doorbell; + __entry->hwq_id = hwq_id; + __entry->intr = intr; + __entry->lba = lba; + __entry->transfer_len = transfer_len; + __entry->opcode = opcode; + __entry->group_id = group_id; + ), + + TP_printk( + "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d", + show_ufs_cmd_trace_str(__entry->str_t), + dev_name(&__entry->sdev->sdev_dev), __entry->tag, + __entry->doorbell, __entry->transfer_len, __entry->intr, + __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode), + (u32)__entry->group_id, __entry->hwq_id + ) +); + +TRACE_EVENT(ufshcd_uic_command, + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd, + u32 arg1, u32 arg2, u32 arg3), + + TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(enum ufs_trace_str_t, str_t) + __field(u32, cmd) + __field(u32, arg1) + __field(u32, arg2) + __field(u32, arg3) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->str_t = str_t; + __entry->cmd = cmd; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + ), + + TP_printk( + "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3 + ) +); + +TRACE_EVENT(ufshcd_upiu, + TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr, + void *tsf, enum ufs_trace_tsf_t tsf_t), + + TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(enum ufs_trace_str_t, str_t) + __array(unsigned char, hdr, 12) + __array(unsigned char, tsf, 16) + __field(enum ufs_trace_tsf_t, tsf_t) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->str_t = str_t; + memcpy(__entry->hdr, hdr, sizeof(__entry->hdr)); + memcpy(__entry->tsf, tsf, sizeof(__entry->tsf)); + __entry->tsf_t = tsf_t; + ), + + TP_printk( + "%s: %s: HDR:%s, %s:%s", + show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + __print_hex(__entry->hdr, sizeof(__entry->hdr)), + show_ufs_cmd_trace_tsf(__entry->tsf_t), + __print_hex(__entry->tsf, sizeof(__entry->tsf)) + ) +); + +TRACE_EVENT(ufshcd_exception_event, + + TP_PROTO(const char *dev_name, u16 status), + + TP_ARGS(dev_name, status), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(u16, status) + ), + + TP_fast_assign( + __assign_str(dev_name); + __entry->status = status; + ), + + TP_printk("%s: status 0x%x", + __get_str(dev_name), __entry->status + ) +); + +#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/ufs/core +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ufs_trace + +/* This part must be outside protection */ +#include diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a6f818cdef0e77..fc55fdab526b4a 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -36,10 +36,10 @@ #include "ufs-fault-injection.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" -#include +#include #define CREATE_TRACE_POINTS -#include +#include "ufs_trace.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ @@ -51,8 +51,10 @@ /* UIC command timeout, unit: ms */ -#define UIC_CMD_TIMEOUT 500 - +enum { + UIC_CMD_TIMEOUT_DEFAULT = 500, + UIC_CMD_TIMEOUT_MAX = 2000, +}; /* NOP OUT retries waiting for NOP IN response */ #define NOP_OUT_RETRIES 10 /* Timeout after 50 msecs if NOP OUT hangs without response */ @@ -116,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba) module_param(use_mcq_mode, bool, 0644); MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); +static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT; + +static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT, + UIC_CMD_TIMEOUT_MAX); +} + +static const struct kernel_param_ops uic_cmd_timeout_ops = { + .set = uic_cmd_timeout_set, + .get = param_get_uint, +}; + +module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644); +MODULE_PARM_DESC(uic_cmd_timeout, + "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively"); + #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ int _ret; \ @@ -1785,8 +1804,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) static void ufshcd_init_clk_scaling(struct ufs_hba *hba) { - char wq_name[sizeof("ufs_clkscaling_00")]; - if (!ufshcd_is_clkscaling_supported(hba)) return; @@ -1798,9 +1815,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba) INIT_WORK(&hba->clk_scaling.resume_work, ufshcd_clk_scaling_resume_work); - snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", - hba->host->host_no); - hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + hba->clk_scaling.workq = alloc_ordered_workqueue( + "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no); hba->clk_scaling.is_initialized = true; } @@ -2124,8 +2140,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) static void ufshcd_init_clk_gating(struct ufs_hba *hba) { - char wq_name[sizeof("ufs_clk_gating_00")]; - if (!ufshcd_is_clkgating_allowed(hba)) return; @@ -2135,10 +2149,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); - snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", - hba->host->host_no); - hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, - WQ_MEM_RECLAIM | WQ_HIGHPRI); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue( + "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI, + hba->host->host_no); ufshcd_init_clk_gating_sysfs(hba); @@ -2452,7 +2465,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) { u32 val; int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY, - 500, UIC_CMD_TIMEOUT * 1000, false, hba, + 500, uic_cmd_timeout * 1000, false, hba, REG_CONTROLLER_STATUS); return ret == 0; } @@ -2512,7 +2525,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) lockdep_assert_held(&hba->uic_cmd_mutex); if (wait_for_completion_timeout(&uic_cmd->done, - msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + msecs_to_jiffies(uic_cmd_timeout))) { ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; } else { ret = -ETIMEDOUT; @@ -4285,7 +4298,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) } if (!wait_for_completion_timeout(hba->uic_async_done, - msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + msecs_to_jiffies(uic_cmd_timeout))) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", cmd->command, cmd->argument3); @@ -5876,12 +5889,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) /** * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status * @hba: per-adapter instance - * @status: bkops_status value * * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn * flag in the device to permit background operations if the device - * bkops_status is greater than or equal to "status" argument passed to - * this function, disable otherwise. + * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl", + * disable otherwise. * * Return: 0 for success, non-zero in case of failure. * @@ -5889,11 +5901,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) * to know whether auto bkops is enabled or disabled after this function * returns control to it. */ -static int ufshcd_bkops_ctrl(struct ufs_hba *hba, - enum bkops_status status) +static int ufshcd_bkops_ctrl(struct ufs_hba *hba) { - int err; + enum bkops_status status = hba->urgent_bkops_lvl; u32 curr_status = 0; + int err; err = ufshcd_get_bkops_status(hba, &curr_status); if (err) { @@ -5915,23 +5927,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, return err; } -/** - * ufshcd_urgent_bkops - handle urgent bkops exception event - * @hba: per-adapter instance - * - * Enable fBackgroundOpsEn flag in the device to permit background - * operations. - * - * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled - * and negative error value for any other failure. - * - * Return: 0 upon success; < 0 upon failure. - */ -static int ufshcd_urgent_bkops(struct ufs_hba *hba) -{ - return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); -} - static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -9692,7 +9687,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * allow background operations if bkops status shows * that performance might be impacted. */ - ret = ufshcd_urgent_bkops(hba); + ret = ufshcd_bkops_ctrl(hba); if (ret) { /* * If return err in suspend flow, IO will hang. @@ -9881,7 +9876,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) * If BKOPs operations are urgently needed at this moment then * keep auto-bkops enabled or else disable it. */ - ufshcd_urgent_bkops(hba); + ufshcd_bkops_ctrl(hba); if (hba->ee_usr_mask) ufshcd_write_ee_control(hba); @@ -10395,7 +10390,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) int err; struct Scsi_Host *host = hba->host; struct device *dev = hba->dev; - char eh_wq_name[sizeof("ufs_eh_wq_00")]; /* * dev_set_drvdata() must be called before any callbacks are registered @@ -10462,9 +10456,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; /* Initialize work queues */ - snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", - hba->host->host_no); - hba->eh_wq = create_singlethread_workqueue(eh_wq_name); + hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM, + hba->host->host_no); if (!hba->eh_wq) { dev_err(hba->dev, "%s: failed to create eh workqueue\n", __func__); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 9ec318ef52bff2..5867e633856233 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -8,7 +8,7 @@ * */ -#include +#include #include #include #include diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index c87fdc849c627e..ecdfff2456e31d 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table { [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, - [MODE_MAX][0][0] = { 7643136, 307200 }, + [MODE_MAX][0][0] = { 7643136, 819200 }, }; static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index a3e69ecafd2750..1f4f30d6cb4234 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -31,8 +31,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) const char *name; u32 *clkfreq = NULL; struct ufs_clk_info *clki; - int len = 0; - size_t sz = 0; + ssize_t sz = 0; if (!np) goto out; @@ -50,15 +49,12 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (cnt <= 0) goto out; - if (!of_get_property(np, "freq-table-hz", &len)) { + sz = of_property_count_u32_elems(np, "freq-table-hz"); + if (sz <= 0) { dev_info(dev, "freq-table-hz property not specified\n"); goto out; } - if (len <= 0) - goto out; - - sz = len / sizeof(*clkfreq); if (sz != 2 * cnt) { dev_err(dev, "%s len mismatch\n", "freq-table-hz"); ret = -EINVAL; @@ -272,10 +268,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba) const char **clk_names; int cnt, i, ret; - if (!of_find_property(np, "operating-points-v2", NULL)) + if (!of_property_present(np, "operating-points-v2")) return 0; - if (of_find_property(np, "freq-table-hz", NULL)) { + if (of_property_present(np, "freq-table-hz")) { dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n", __func__); return -EINVAL; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 20d2a55cb40b87..004a549c6c7d42 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -118,7 +118,7 @@ static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; -static struct kobj_type map_attr_type = { +static const struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, .default_groups = map_groups, @@ -207,7 +207,7 @@ static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; -static struct kobj_type portio_attr_type = { +static const struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, .default_groups = portio_groups, diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 8f3b9a0a38e1dd..0dd85d2635b998 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "usbatm.h" diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 16703815be0c48..e8e43c38aa1b43 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include "usbatm.h" diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c index 1f6320d98a76ba..591d149de8f3d9 100644 --- a/drivers/usb/cdns3/cdns3-pci-wrap.c +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -37,8 +37,7 @@ struct cdns3_wrap { #define PCI_DRIVER_NAME "cdns3-pci-usbss" #define PLAT_DRIVER_NAME "cdns-usb3" -#define CDNS_VENDOR_ID 0x17cd -#define CDNS_DEVICE_ID 0x0100 +#define PCI_DEVICE_ID_CDNS_USB3 0x0100 static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev) { @@ -190,7 +189,7 @@ static void cdns3_pci_remove(struct pci_dev *pdev) } static const struct pci_device_id cdns3_pci_ids[] = { - { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, + { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_USB3) }, { 0, } }; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index 225540fc81ba11..2d05368a6745a1 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -28,10 +28,11 @@ #define PCI_DRIVER_NAME "cdns-pci-usbssp" #define PLAT_DRIVER_NAME "cdns-usbssp" -#define CDNS_VENDOR_ID 0x17cd -#define CDNS_DEVICE_ID 0x0200 -#define CDNS_DRD_ID 0x0100 -#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) +#define PCI_DEVICE_ID_CDNS_USB3 0x0100 +#define PCI_DEVICE_ID_CDNS_UDC 0x0200 + +#define PCI_CLASS_SERIAL_USB_CDNS_USB3 (PCI_CLASS_SERIAL_USB << 8 | 0x80) +#define PCI_CLASS_SERIAL_USB_CDNS_UDC PCI_CLASS_SERIAL_USB_DEVICE static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) { @@ -40,10 +41,10 @@ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) * Platform has two function. The fist keeps resources for * Host/Device while the secon keeps resources for DRD/OTG. */ - if (pdev->device == CDNS_DEVICE_ID) - return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL); - else if (pdev->device == CDNS_DRD_ID) - return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL); + if (pdev->device == PCI_DEVICE_ID_CDNS_UDC) + return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_USB3, NULL); + if (pdev->device == PCI_DEVICE_ID_CDNS_USB3) + return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_UDC, NULL); return NULL; } @@ -220,12 +221,12 @@ static const struct dev_pm_ops cdnsp_pci_pm_ops = { }; static const struct pci_device_id cdnsp_pci_ids[] = { - { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, - { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, - CDNS_DRD_IF, PCI_ANY_ID }, - { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID, - CDNS_DRD_IF, PCI_ANY_ID }, + { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC), + .class = PCI_CLASS_SERIAL_USB_CDNS_UDC }, + { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC), + .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 }, + { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB3), + .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 }, { 0, } }; diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index dbd83d321bca01..46852529499d16 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, cur_td->last_trb, hw_deq); - if (seg && (pep->ep_state & EP_ENABLED)) + if (seg && (pep->ep_state & EP_ENABLED) && + !(pep->ep_state & EP_DIS_IN_RROGRESS)) cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, cur_td, &deq_state); else @@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, * During disconnecting all endpoint will be disabled so we don't * have to worry about updating dequeue pointer. */ - if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING || + pep->ep_state & EP_DIS_IN_RROGRESS) { status = -ESHUTDOWN; ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); } diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index ceca4d839dfd42..7ba760ee62e331 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .resume_quirk = xhci_cdns3_resume_quirk, }; -static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; +static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = { + .quirks = XHCI_CDNS_SCTX_QUIRK, +}; static int __cdns_host_init(struct cdns *cdns) { diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index bdc04ce919f7a5..c64ab0e07ea030 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -128,7 +128,7 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) * In case the fsl,usbmisc property is not present this device doesn't * need usbmisc. Return NULL (which is no error here) */ - if (!of_get_property(np, "fsl,usbmisc", NULL)) + if (!of_property_present(np, "fsl,usbmisc")) return NULL; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); diff --git a/drivers/usb/chipidea/ci_hdrc_npcm.c b/drivers/usb/chipidea/ci_hdrc_npcm.c index b14127873c559e..3e5e05dbda8904 100644 --- a/drivers/usb/chipidea/ci_hdrc_npcm.c +++ b/drivers/usb/chipidea/ci_hdrc_npcm.c @@ -18,7 +18,7 @@ struct npcm_udc_data { struct ci_hdrc_platform_data pdata; }; -static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event) +static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned int event) { struct device *dev = ci->dev->parent; @@ -28,7 +28,7 @@ static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event) hw_write(ci, OP_USBMODE, 0xffffffff, 0x0); break; default: - dev_dbg(dev, "unknown ci_hdrc event (%d)\n",event); + dev_dbg(dev, "unknown ci_hdrc event (%d)\n", event); break; } diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 2d7f616270c17b..69ef3cd8d4f836 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -86,7 +86,7 @@ static int hw_device_state(struct ci_hdrc *ci, u32 dma) hw_write(ci, OP_ENDPTLISTADDR, ~0, dma); /* interrupt, error, port change, reset, sleep/suspend */ hw_write(ci, OP_USBINTR, ~0, - USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI); + USBi_UI|USBi_UEI|USBi_PCI|USBi_URI); } else { hw_write(ci, OP_USBINTR, ~0, 0); } @@ -877,6 +877,7 @@ __releases(ci->lock) __acquires(ci->lock) { int retval; + u32 intr; spin_unlock(&ci->lock); if (ci->gadget.speed != USB_SPEED_UNKNOWN) @@ -890,6 +891,11 @@ __acquires(ci->lock) if (retval) goto done; + /* clear SLI */ + hw_write(ci, OP_USBSTS, USBi_SLI, USBi_SLI); + intr = hw_read(ci, OP_USBINTR, ~0); + hw_write(ci, OP_USBINTR, ~0, intr | USBi_SLI); + ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC); if (ci->status == NULL) retval = -ENOMEM; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0c1b69d944ca45..6b37d1c47fce13 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include @@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) struct acm *acm = tty->driver_data; ss->line = acm->minor; + mutex_lock(&acm->port.mutex); ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : jiffies_to_msecs(acm->port.closing_wait) / 10; + mutex_unlock(&acm->port.mutex); return 0; } diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 6830be4419e20a..86ee39db013f39 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #define DRIVER_AUTHOR "Oliver Neukum" diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 6bd9fe56538589..34e46ef308abfd 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void) if (!urb) return NULL; - dmabuf = kmalloc(bufsize, GFP_KERNEL); + dmabuf = kzalloc(bufsize, GFP_KERNEL); if (!dmabuf) { usb_free_urb(urb); return NULL; diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 59b55d6cf49053..b7bea1015d7c1d 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -107,19 +107,18 @@ EXPORT_SYMBOL_GPL(usb_speed_string); */ enum usb_device_speed usb_get_maximum_speed(struct device *dev) { - const char *maximum_speed; + const char *p = "maximum-speed"; int ret; - ret = device_property_read_string(dev, "maximum-speed", &maximum_speed); - if (ret < 0) - return USB_SPEED_UNKNOWN; - - ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed); + ret = device_property_match_property_string(dev, p, ssp_rate, ARRAY_SIZE(ssp_rate)); if (ret > 0) return USB_SPEED_SUPER_PLUS; - ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); - return (ret < 0) ? USB_SPEED_UNKNOWN : ret; + ret = device_property_match_property_string(dev, p, speed_names, ARRAY_SIZE(speed_names)); + if (ret > 0) + return ret; + + return USB_SPEED_UNKNOWN; } EXPORT_SYMBOL_GPL(usb_get_maximum_speed); @@ -276,14 +275,13 @@ EXPORT_SYMBOL_GPL(usb_decode_interval); */ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) { - struct device_node *controller = NULL; + struct device_node *controller; struct of_phandle_args args; const char *dr_mode; int index; int err; - do { - controller = of_find_node_with_property(controller, "phys"); + for_each_node_with_property(controller, "phys") { if (!of_device_is_available(controller)) continue; index = 0; @@ -306,7 +304,7 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) goto finish; index++; } while (args.np); - } while (controller); + } finish: err = of_property_read_string(controller, "dr_mode", &dr_mode); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1ff7d901fedead..500dc35e64774d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 7f8a912d4fe2a2..21585ed89ef849 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -142,6 +142,53 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) } EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); +/** + * usb_acpi_add_usb4_devlink - add device link to USB4 Host Interface for tunneled USB3 devices + * + * @udev: Tunneled USB3 device connected to a roothub. + * + * Adds a device link between a tunneled USB3 device and the USB4 Host Interface + * device to ensure correct runtime PM suspend and resume order. This function + * should only be called for tunneled USB3 devices. + * The USB4 Host Interface this tunneled device depends on is found from the roothub + * port ACPI device specific data _DSD entry. + * + * Return: negative error code on failure, 0 otherwise + */ +static int usb_acpi_add_usb4_devlink(struct usb_device *udev) +{ + const struct device_link *link; + struct usb_port *port_dev; + struct usb_hub *hub; + + if (!udev->parent || udev->parent->parent) + return 0; + + hub = usb_hub_to_struct_hub(udev->parent); + port_dev = hub->ports[udev->portnum - 1]; + + struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = + fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0); + + if (IS_ERR(nhi_fwnode)) + return 0; + + link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev, + DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_RPM_ACTIVE | + DL_FLAG_PM_RUNTIME); + if (!link) { + dev_err(&port_dev->dev, "Failed to created device link from %s to %s\n", + dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); + return -EINVAL; + } + + dev_dbg(&port_dev->dev, "Created device link from %s to %s\n", + dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); + + return 0; +} + /* * Private to usb-acpi, all the core needs to know is that * port_dev->location is non-zero when it has been set by the firmware. @@ -262,6 +309,12 @@ usb_acpi_find_companion_for_device(struct usb_device *udev) if (!hub) return NULL; + + /* Tunneled USB3 devices depend on USB4 Host Interface, set device link to it */ + if (udev->speed >= USB_SPEED_SUPER && + udev->tunnel_mode != USB_LINK_NATIVE) + usb_acpi_add_usb4_devlink(udev); + /* * This is an embedded USB device connected to a port and such * devices share port's ACPI companion. diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c index 7c82ab590401a8..3116ac72747f78 100644 --- a/drivers/usb/dwc2/debugfs.c +++ b/drivers/usb/dwc2/debugfs.c @@ -702,6 +702,7 @@ static int params_show(struct seq_file *seq, void *v) print_param(seq, p, uframe_sched); print_param(seq, p, external_id_pin_ctl); print_param(seq, p, power_down); + print_param(seq, p, no_clock_gating); print_param(seq, p, lpm); print_param(seq, p, lpm_clock_gating); print_param(seq, p, besl); diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index a8605b02115b1c..1ad8fa3f862a15 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) role = USB_ROLE_DEVICE; } + if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) && + dwc2_is_device_mode(hsotg) && + hsotg->lx_state == DWC2_L2 && + hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && + hsotg->bus_suspended && + !hsotg->params.no_clock_gating) + dwc2_gadget_exit_clock_gating(hsotg, 0); + if (role == USB_ROLE_HOST) { already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index a937eadbc9b3e9..68226defdc60af 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -23,6 +23,7 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg) p->max_transfer_size = 65535; p->max_packet_count = 511; p->ahbcfg = 0x10; + p->no_clock_gating = true; } static void dwc2_set_his_params(struct dwc2_hsotg *hsotg) @@ -352,6 +353,7 @@ const struct of_device_id dwc2_of_match_table[] = { MODULE_DEVICE_TABLE(of, dwc2_of_match_table); const struct acpi_device_id dwc2_acpi_match[] = { + /* This ID refers to the same USB IP as of_device_id brcm,bcm2835-usb */ { "BCM2848", (kernel_ulong_t)dwc2_set_bcm_params }, { }, }; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 7b84416dfc2b11..c1b7209b94836c 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -469,18 +469,6 @@ static int dwc2_driver_probe(struct platform_device *dev) spin_lock_init(&hsotg->lock); - hsotg->irq = platform_get_irq(dev, 0); - if (hsotg->irq < 0) - return hsotg->irq; - - dev_dbg(hsotg->dev, "registering common handler for irq%d\n", - hsotg->irq); - retval = devm_request_irq(hsotg->dev, hsotg->irq, - dwc2_handle_common_intr, IRQF_SHARED, - dev_name(hsotg->dev), hsotg); - if (retval) - return retval; - hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus"); if (IS_ERR(hsotg->vbus_supply)) { retval = PTR_ERR(hsotg->vbus_supply); @@ -524,6 +512,20 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) goto error; + hsotg->irq = platform_get_irq(dev, 0); + if (hsotg->irq < 0) { + retval = hsotg->irq; + goto error; + } + + dev_dbg(hsotg->dev, "registering common handler for irq%d\n", + hsotg->irq); + retval = devm_request_irq(hsotg->dev, hsotg->irq, + dwc2_handle_common_intr, IRQF_SHARED, + dev_name(hsotg->dev), hsotg); + if (retval) + goto error; + /* * For OTG cores, set the force mode bits to reflect the value * of dr_mode. Force mode bits should not be touched at any diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c index 8ee44806850314..64c0cd1995aa06 100644 --- a/drivers/usb/dwc3/dwc3-imx8mp.c +++ b/drivers/usb/dwc3/dwc3-imx8mp.c @@ -5,6 +5,7 @@ * Copyright (c) 2020 NXP. */ +#include #include #include #include @@ -96,7 +97,8 @@ static void imx8mp_configure_glue(struct dwc3_imx8mp *dwc3_imx) writel(value, dwc3_imx->glue_base + USB_CTRL1); } -static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx) +static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx, + pm_message_t msg) { struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3); u32 val; @@ -106,12 +108,14 @@ static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx) val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL); - if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci) - val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN | - USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN; - else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) + if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci) { + val |= USB_WAKEUP_EN | USB_WAKEUP_DPDM_EN; + if (PMSG_IS_AUTO(msg)) + val |= USB_WAKEUP_SS_CONN | USB_WAKEUP_U3_EN; + } else { val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN | USB_WAKEUP_VBUS_SRC_SESS_VAL; + } writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL); } @@ -144,10 +148,21 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx) return IRQ_HANDLED; } +static int dwc3_imx8mp_set_software_node(struct device *dev) +{ + struct property_entry props[3] = { 0 }; + int prop_idx = 0; + + props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk"); + props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk"); + + return device_create_managed_software_node(dev, props, NULL); +} + static int dwc3_imx8mp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *dwc3_np, *node = dev->of_node; + struct device_node *node = dev->of_node; struct dwc3_imx8mp *dwc3_imx; struct resource *res; int err, irq; @@ -178,39 +193,26 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) return PTR_ERR(dwc3_imx->glue_base); } - dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio"); - if (IS_ERR(dwc3_imx->hsio_clk)) { - err = PTR_ERR(dwc3_imx->hsio_clk); - dev_err(dev, "Failed to get hsio clk, err=%d\n", err); - return err; - } - - err = clk_prepare_enable(dwc3_imx->hsio_clk); - if (err) { - dev_err(dev, "Failed to enable hsio clk, err=%d\n", err); - return err; - } - - dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend"); - if (IS_ERR(dwc3_imx->suspend_clk)) { - err = PTR_ERR(dwc3_imx->suspend_clk); - dev_err(dev, "Failed to get suspend clk, err=%d\n", err); - goto disable_hsio_clk; - } + dwc3_imx->hsio_clk = devm_clk_get_enabled(dev, "hsio"); + if (IS_ERR(dwc3_imx->hsio_clk)) + return dev_err_probe(dev, PTR_ERR(dwc3_imx->hsio_clk), + "Failed to get hsio clk\n"); - err = clk_prepare_enable(dwc3_imx->suspend_clk); - if (err) { - dev_err(dev, "Failed to enable suspend clk, err=%d\n", err); - goto disable_hsio_clk; - } + dwc3_imx->suspend_clk = devm_clk_get_enabled(dev, "suspend"); + if (IS_ERR(dwc3_imx->suspend_clk)) + return dev_err_probe(dev, PTR_ERR(dwc3_imx->suspend_clk), + "Failed to get suspend clk\n"); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - err = irq; - goto disable_clks; - } + if (irq < 0) + return irq; dwc3_imx->irq = irq; + struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(node, + "snps,dwc3"); + if (!dwc3_np) + return dev_err_probe(dev, -ENODEV, "failed to find dwc3 core child\n"); + imx8mp_configure_glue(dwc3_imx); pm_runtime_set_active(dev); @@ -219,17 +221,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) if (err < 0) goto disable_rpm; - dwc3_np = of_get_compatible_child(node, "snps,dwc3"); - if (!dwc3_np) { + err = dwc3_imx8mp_set_software_node(dev); + if (err) { err = -ENODEV; - dev_err(dev, "failed to find dwc3 core child\n"); + dev_err(dev, "failed to create software node\n"); goto disable_rpm; } err = of_platform_populate(node, NULL, NULL, dev); if (err) { dev_err(&pdev->dev, "failed to create dwc3 core\n"); - goto err_node_put; + goto disable_rpm; } dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np); @@ -238,7 +240,6 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) err = -ENODEV; goto depopulate; } - of_node_put(dwc3_np); err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt, IRQF_ONESHOT, dev_name(dev), dwc3_imx); @@ -254,51 +255,39 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev) depopulate: of_platform_depopulate(dev); -err_node_put: - of_node_put(dwc3_np); disable_rpm: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); -disable_clks: - clk_disable_unprepare(dwc3_imx->suspend_clk); -disable_hsio_clk: - clk_disable_unprepare(dwc3_imx->hsio_clk); return err; } static void dwc3_imx8mp_remove(struct platform_device *pdev) { - struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; pm_runtime_get_sync(dev); of_platform_depopulate(dev); - clk_disable_unprepare(dwc3_imx->suspend_clk); - clk_disable_unprepare(dwc3_imx->hsio_clk); - pm_runtime_disable(dev); pm_runtime_put_noidle(dev); } -static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx, - pm_message_t msg) +static int dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg) { if (dwc3_imx->pm_suspended) return 0; /* Wakeup enable */ if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev)) - dwc3_imx8mp_wakeup_enable(dwc3_imx); + dwc3_imx8mp_wakeup_enable(dwc3_imx, msg); dwc3_imx->pm_suspended = true; return 0; } -static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, - pm_message_t msg) +static int dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg) { struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3); int ret = 0; @@ -331,7 +320,7 @@ static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, return ret; } -static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev) +static int dwc3_imx8mp_pm_suspend(struct device *dev) { struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); int ret; @@ -349,7 +338,7 @@ static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev) return ret; } -static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev) +static int dwc3_imx8mp_pm_resume(struct device *dev) { struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); int ret; @@ -379,7 +368,7 @@ static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev) return ret; } -static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev) +static int dwc3_imx8mp_runtime_suspend(struct device *dev) { struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); @@ -388,7 +377,7 @@ static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev) return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND); } -static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev) +static int dwc3_imx8mp_runtime_resume(struct device *dev) { struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev); @@ -398,9 +387,9 @@ static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev) } static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume) - SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend, - dwc3_imx8mp_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume) + RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend, dwc3_imx8mp_runtime_resume, + NULL) }; static const struct of_device_id dwc3_imx8mp_of_match[] = { @@ -414,7 +403,7 @@ static struct platform_driver dwc3_imx8mp_driver = { .remove_new = dwc3_imx8mp_remove, .driver = { .name = "imx8mp-dwc3", - .pm = &dwc3_imx8mp_dev_pm_ops, + .pm = pm_ptr(&dwc3_imx8mp_dev_pm_ops), .of_match_table = dwc3_imx8mp_of_match, }, }; diff --git a/drivers/usb/dwc3/dwc3-octeon.c b/drivers/usb/dwc3/dwc3-octeon.c index 6010135e1acce6..1a3b205367fd55 100644 --- a/drivers/usb/dwc3/dwc3-octeon.c +++ b/drivers/usb/dwc3/dwc3-octeon.c @@ -419,7 +419,7 @@ static int dwc3_octeon_probe(struct platform_device *pdev) int ref_clk_sel, ref_clk_fsel, mpll_mul; int power_active_low, power_gpio; int err, len; - u32 clock_rate; + u32 clock_rate, gpio_pwr[3]; if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) { dev_err(dev, "No UCTL \"refclk-frequency\"\n"); @@ -476,21 +476,10 @@ static int dwc3_octeon_probe(struct platform_device *pdev) power_gpio = DWC3_GPIO_POWER_NONE; power_active_low = 0; - if (of_find_property(node, "power", &len)) { - u32 gpio_pwr[3]; - - switch (len) { - case 8: - of_property_read_u32_array(node, "power", gpio_pwr, 2); - break; - case 12: - of_property_read_u32_array(node, "power", gpio_pwr, 3); + len = of_property_read_variable_u32_array(node, "power", gpio_pwr, 2, 3); + if (len > 0) { + if (len == 3) power_active_low = gpio_pwr[2] & 0x01; - break; - default: - dev_err(dev, "invalid power configuration\n"); - return -EINVAL; - } power_gpio = gpio_pwr[1]; } diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 88fb6706a18d38..c1d4b52f25b063 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -4,6 +4,7 @@ * Inspired by dwc3-of-simple.c */ +#include #include #include #include @@ -702,11 +703,12 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count) static int dwc3_qcom_of_register_core(struct platform_device *pdev) { struct dwc3_qcom *qcom = platform_get_drvdata(pdev); - struct device_node *np = pdev->dev.of_node, *dwc3_np; + struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; int ret; - dwc3_np = of_get_compatible_child(np, "snps,dwc3"); + struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np, + "snps,dwc3"); if (!dwc3_np) { dev_err(dev, "failed to find dwc3 core child\n"); return -ENODEV; @@ -715,7 +717,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) ret = of_platform_populate(np, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to register dwc3 core - %d\n", ret); - goto node_put; + return ret; } qcom->dwc3 = of_find_device_by_node(dwc3_np); @@ -725,9 +727,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) of_platform_depopulate(dev); } -node_put: - of_node_put(dwc3_np); - return ret; } @@ -736,7 +735,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct dwc3_qcom *qcom; - struct resource *res; int ret, i; bool ignore_pipe_clk; bool wakeup_source; @@ -774,9 +772,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev) goto reset_assert; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - qcom->qscratch_base = devm_ioremap_resource(dev, res); + qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qcom->qscratch_base)) { ret = PTR_ERR(qcom->qscratch_base); goto clk_disable; diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c index 3cd6b184551ce2..e9c8b032c72cb6 100644 --- a/drivers/usb/dwc3/dwc3-rtk.c +++ b/drivers/usb/dwc3/dwc3-rtk.c @@ -6,6 +6,7 @@ * */ +#include #include #include #include @@ -173,23 +174,20 @@ static const char *const speed_names[] = { static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np) { - struct device_node *dwc3_np; const char *maximum_speed; int ret; - dwc3_np = of_get_compatible_child(np, "snps,dwc3"); + struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np, + "snps,dwc3"); if (!dwc3_np) return USB_SPEED_UNKNOWN; ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed); if (ret < 0) - goto out; + return USB_SPEED_UNKNOWN; ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed); -out: - of_node_put(dwc3_np); - return (ret < 0) ? USB_SPEED_UNKNOWN : ret; } @@ -276,7 +274,6 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk) struct device_node *node = dev->of_node; struct platform_device *dwc3_pdev; struct device *dwc3_dev; - struct device_node *dwc3_node; enum usb_dr_mode dr_mode; int ret = 0; @@ -290,7 +287,8 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk) return ret; } - dwc3_node = of_get_compatible_child(node, "snps,dwc3"); + struct device_node *dwc3_node __free(device_node) = of_get_compatible_child(node, + "snps,dwc3"); if (!dwc3_node) { dev_err(dev, "failed to find dwc3 core node\n"); ret = -ENODEV; @@ -301,7 +299,7 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk) if (!dwc3_pdev) { dev_err(dev, "failed to find dwc3 core platform_device\n"); ret = -ENODEV; - goto err_node_put; + goto depopulate; } dwc3_dev = &dwc3_pdev->dev; @@ -343,14 +341,11 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk) switch_usb2_role(rtk, rtk->cur_role); platform_device_put(dwc3_pdev); - of_node_put(dwc3_node); return 0; err_pdev_put: platform_device_put(dwc3_pdev); -err_node_put: - of_node_put(dwc3_node); depopulate: of_platform_depopulate(dev); @@ -363,30 +358,18 @@ static int dwc3_rtk_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; void __iomem *regs; - int ret = 0; rtk = devm_kzalloc(dev, sizeof(*rtk), GFP_KERNEL); - if (!rtk) { - ret = -ENOMEM; - goto out; - } + if (!rtk) + return -ENOMEM; platform_set_drvdata(pdev, rtk); rtk->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing memory resource\n"); - ret = -ENODEV; - goto out; - } - - regs = devm_ioremap_resource(dev, res); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - goto out; - } + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(regs)) + return PTR_ERR(regs); rtk->regs = regs; rtk->regs_size = resource_size(res); @@ -394,16 +377,11 @@ static int dwc3_rtk_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { rtk->pm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(rtk->pm_base)) { - ret = PTR_ERR(rtk->pm_base); - goto out; - } + if (IS_ERR(rtk->pm_base)) + return PTR_ERR(rtk->pm_base); } - ret = dwc3_rtk_probe_dwc3_core(rtk); - -out: - return ret; + return dwc3_rtk_probe_dwc3_core(rtk); } static void dwc3_rtk_remove(struct platform_device *pdev) diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index c8c7cd0c179693..2841021f3557f8 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -14,6 +14,7 @@ * Inspired by dwc3-omap.c and dwc3-exynos.c. */ +#include #include #include #include @@ -197,7 +198,7 @@ static int st_dwc3_probe(struct platform_device *pdev) struct st_dwc3 *dwc3_data; struct resource *res; struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node, *child; + struct device_node *node = dev->of_node; struct platform_device *child_pdev; struct regmap *regmap; int ret; @@ -224,15 +225,21 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->syscfg_reg_off = res->start; - dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n", + dev_vdbg(dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); + struct device_node *child __free(device_node) = of_get_compatible_child(node, + "snps,dwc3"); + if (!child) { + dev_err(dev, "failed to find dwc3 core node\n"); + return -ENODEV; + } + dwc3_data->rstc_pwrdn = devm_reset_control_get_exclusive(dev, "powerdown"); - if (IS_ERR(dwc3_data->rstc_pwrdn)) { - dev_err(&pdev->dev, "could not get power controller\n"); - return PTR_ERR(dwc3_data->rstc_pwrdn); - } + if (IS_ERR(dwc3_data->rstc_pwrdn)) + return dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_pwrdn), + "could not get power controller\n"); /* Manage PowerDown */ reset_control_deassert(dwc3_data->rstc_pwrdn); @@ -240,26 +247,19 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->rstc_rst = devm_reset_control_get_shared(dev, "softreset"); if (IS_ERR(dwc3_data->rstc_rst)) { - dev_err(&pdev->dev, "could not get reset controller\n"); - ret = PTR_ERR(dwc3_data->rstc_rst); + ret = dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_rst), + "could not get reset controller\n"); goto undo_powerdown; } /* Manage SoftReset */ reset_control_deassert(dwc3_data->rstc_rst); - child = of_get_compatible_child(node, "snps,dwc3"); - if (!child) { - dev_err(&pdev->dev, "failed to find dwc3 core node\n"); - ret = -ENODEV; - goto err_node_put; - } - /* Allocate and initialize the core */ ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); - goto err_node_put; + goto undo_softreset; } child_pdev = of_find_device_by_node(child); @@ -270,7 +270,6 @@ static int st_dwc3_probe(struct platform_device *pdev) } dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev); - of_node_put(child); platform_device_put(child_pdev); /* @@ -282,8 +281,7 @@ static int st_dwc3_probe(struct platform_device *pdev) ret = st_dwc3_drd_init(dwc3_data); if (ret) { dev_err(dev, "drd initialisation failed\n"); - of_platform_depopulate(dev); - goto undo_softreset; + goto depopulate; } /* ST glue logic init */ @@ -294,8 +292,6 @@ static int st_dwc3_probe(struct platform_device *pdev) depopulate: of_platform_depopulate(dev); -err_node_put: - of_node_put(child); undo_softreset: reset_control_assert(dwc3_data->rstc_rst); undo_powerdown: diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index f1298b1b4f8491..b5e5be424ce997 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -285,11 +285,8 @@ static int dwc3_xlnx_probe(struct platform_device *pdev) return -ENOMEM; regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - dev_err_probe(dev, ret, "failed to map registers\n"); - return ret; - } + if (IS_ERR(regs)) + return dev_err_probe(dev, PTR_ERR(regs), "failed to map registers\n"); match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node); diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c index 8c5aaf8606357d..3d404d19a205e3 100644 --- a/drivers/usb/fotg210/fotg210-hcd.c +++ b/drivers/usb/fotg210/fotg210-hcd.c @@ -36,7 +36,7 @@ #include #include -#include +#include #include "fotg210.h" diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index f45d5bedda689e..f25dd2cb5d03b1 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "u_os_desc.h" diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 0e7c1e947c0a0e..c82a6a0fba93dd 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -6,13 +6,13 @@ #include #include #include +#include #include #include #include "configfs.h" -#include "u_f.h" #include "u_os_desc.h" -int check_user_usb_string(const char *name, +static int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) { u16 num; @@ -902,7 +902,7 @@ static struct configfs_group_operations gadget_language_langid_group_ops = { .drop_item = gadget_language_string_drop, }; -static struct config_item_type gadget_language_type = { +static const struct config_item_type gadget_language_type = { .ct_item_ops = &gadget_language_langid_item_ops, .ct_group_ops = &gadget_language_langid_group_ops, .ct_attrs = gadget_language_langid_attrs, @@ -961,7 +961,7 @@ static struct configfs_group_operations gadget_language_group_ops = { .drop_item = &gadget_language_drop, }; -static struct config_item_type gadget_language_strings_type = { +static const struct config_item_type gadget_language_strings_type = { .ct_group_ops = &gadget_language_group_ops, .ct_owner = THIS_MODULE, }; @@ -1106,7 +1106,7 @@ static struct configfs_attribute *webusb_attrs[] = { NULL, }; -static struct config_item_type webusb_type = { +static const struct config_item_type webusb_type = { .ct_attrs = webusb_attrs, .ct_owner = THIS_MODULE, }; @@ -1263,7 +1263,7 @@ static struct configfs_item_operations os_desc_ops = { .drop_link = os_desc_unlink, }; -static struct config_item_type os_desc_type = { +static const struct config_item_type os_desc_type = { .ct_item_ops = &os_desc_ops, .ct_attrs = os_desc_attrs, .ct_owner = THIS_MODULE, diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 724b2631f249b1..7061720b9732e4 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -41,6 +41,7 @@ struct f_acm { struct gserial port; u8 ctrl_id, data_id; u8 port_num; + u8 bInterfaceProtocol; u8 pending; @@ -89,7 +90,7 @@ acm_iad_descriptor = { .bInterfaceCount = 2, // control + data .bFunctionClass = USB_CLASS_COMM, .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, - .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER, + /* .bFunctionProtocol = DYNAMIC */ /* .iFunction = DYNAMIC */ }; @@ -101,7 +102,7 @@ static struct usb_interface_descriptor acm_control_interface_desc = { .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, - .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER, + /* .bInterfaceProtocol = DYNAMIC */ /* .iInterface = DYNAMIC */ }; @@ -663,6 +664,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) goto fail; acm->notify = ep; + acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol; + acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol; + /* allocate notification */ acm->notify_req = gs_alloc_req(ep, sizeof(struct usb_cdc_notification) + 2, @@ -719,8 +723,14 @@ static void acm_unbind(struct usb_configuration *c, struct usb_function *f) static void acm_free_func(struct usb_function *f) { struct f_acm *acm = func_to_acm(f); + struct f_serial_opts *opts; + + opts = container_of(f->fi, struct f_serial_opts, func_inst); kfree(acm); + mutex_lock(&opts->lock); + opts->instances--; + mutex_unlock(&opts->lock); } static void acm_resume(struct usb_function *f) @@ -761,7 +771,11 @@ static struct usb_function *acm_alloc_func(struct usb_function_instance *fi) acm->port.func.disable = acm_disable; opts = container_of(fi, struct f_serial_opts, func_inst); + mutex_lock(&opts->lock); acm->port_num = opts->port_num; + acm->bInterfaceProtocol = opts->protocol; + opts->instances++; + mutex_unlock(&opts->lock); acm->port.func.unbind = acm_unbind; acm->port.func.free_func = acm_free_func; acm->port.func.resume = acm_resume; @@ -812,11 +826,42 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page) CONFIGFS_ATTR_RO(f_acm_, port_num); +static ssize_t f_acm_protocol_show(struct config_item *item, char *page) +{ + return sprintf(page, "%u\n", to_f_serial_opts(item)->protocol); +} + +static ssize_t f_acm_protocol_store(struct config_item *item, + const char *page, size_t count) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + int ret; + + mutex_lock(&opts->lock); + + if (opts->instances) { + ret = -EBUSY; + goto out; + } + + ret = kstrtou8(page, 0, &opts->protocol); + if (ret) + goto out; + ret = count; + +out: + mutex_unlock(&opts->lock); + return ret; +} + +CONFIGFS_ATTR(f_acm_, protocol); + static struct configfs_attribute *acm_attrs[] = { #ifdef CONFIG_U_SERIAL_CONSOLE &f_acm_attr_console, #endif &f_acm_attr_port_num, + &f_acm_attr_protocol, NULL, }; @@ -832,6 +877,7 @@ static void acm_free_instance(struct usb_function_instance *fi) opts = container_of(fi, struct f_serial_opts, func_inst); gserial_free_line(opts->port_num); + mutex_destroy(&opts->lock); kfree(opts); } @@ -843,7 +889,9 @@ static struct usb_function_instance *acm_alloc_instance(void) opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); + opts->protocol = USB_CDC_ACM_PROTO_AT_V25TER; opts->func_inst.free_func_inst = acm_free_instance; + mutex_init(&opts->lock); ret = gserial_alloc_line(&opts->port_num); if (ret) { kfree(opts); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index e0ceaa72194937..2920f8000bbd83 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -28,11 +28,12 @@ #include #include #include -#include +#include #include #include #include +#include #include #include @@ -40,7 +41,6 @@ #include #include "u_fs.h" -#include "u_f.h" #include "u_os_desc.h" #include "configfs.h" @@ -722,7 +722,6 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait) } static const struct file_operations ffs_ep0_operations = { - .llseek = no_llseek, .open = ffs_ep0_open, .write = ffs_ep0_write, @@ -1830,7 +1829,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, } static const struct file_operations ffs_epfile_operations = { - .llseek = no_llseek, .open = ffs_epfile_open, .write_iter = ffs_epfile_write_iter, @@ -2478,7 +2476,7 @@ typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity, static int __must_check ffs_do_single_desc(char *data, unsigned len, ffs_entity_callback entity, - void *priv, int *current_class) + void *priv, int *current_class, int *current_subclass) { struct usb_descriptor_header *_ds = (void *)data; u8 length; @@ -2535,6 +2533,7 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len, if (ds->iInterface) __entity(STRING, ds->iInterface); *current_class = ds->bInterfaceClass; + *current_subclass = ds->bInterfaceSubClass; } break; @@ -2559,6 +2558,12 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len, if (length != sizeof(struct ccid_descriptor)) goto inv_length; break; + } else if (*current_class == USB_CLASS_APP_SPEC && + *current_subclass == USB_SUBCLASS_DFU) { + pr_vdebug("dfu functional descriptor\n"); + if (length != sizeof(struct usb_dfu_functional_descriptor)) + goto inv_length; + break; } else { pr_vdebug("unknown descriptor: %d for class %d\n", _ds->bDescriptorType, *current_class); @@ -2621,6 +2626,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, const unsigned _len = len; unsigned long num = 0; int current_class = -1; + int current_subclass = -1; for (;;) { int ret; @@ -2640,7 +2646,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, return _len - len; ret = ffs_do_single_desc(data, len, entity, priv, - ¤t_class); + ¤t_class, ¤t_subclass); if (ret < 0) { pr_debug("%s returns %d\n", __func__, ret); return ret; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 93dae017ae4568..740311c4fa2496 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -15,13 +15,21 @@ #include #include #include +#include +#include #include +#include -#include "u_f.h" #include "u_hid.h" #define HIDG_MINORS 4 +/* + * Most operating systems seem to allow for 5000ms timeout, we will allow + * userspace half that time to respond before we return an empty report. + */ +#define GET_REPORT_TIMEOUT_MS 2500 + static int major, minors; static const struct class hidg_class = { @@ -31,6 +39,11 @@ static const struct class hidg_class = { static DEFINE_IDA(hidg_ida); static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */ +struct report_entry { + struct usb_hidg_report report_data; + struct list_head node; +}; + /*-------------------------------------------------------------------------*/ /* HID gadget struct */ @@ -75,6 +88,19 @@ struct f_hidg { wait_queue_head_t write_queue; struct usb_request *req; + /* get report */ + struct usb_request *get_req; + struct usb_hidg_report get_report; + bool get_report_returned; + int get_report_req_report_id; + int get_report_req_report_length; + spinlock_t get_report_spinlock; + wait_queue_head_t get_queue; /* Waiting for userspace response */ + wait_queue_head_t get_id_queue; /* Get ID came in */ + struct work_struct work; + struct workqueue_struct *workqueue; + struct list_head report_list; + struct device dev; struct cdev cdev; struct usb_function func; @@ -524,6 +550,174 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, return status; } +static struct report_entry *f_hidg_search_for_report(struct f_hidg *hidg, u8 report_id) +{ + struct list_head *ptr; + struct report_entry *entry; + + list_for_each(ptr, &hidg->report_list) { + entry = list_entry(ptr, struct report_entry, node); + if (entry->report_data.report_id == report_id) + return entry; + } + + return NULL; +} + +static void get_report_workqueue_handler(struct work_struct *work) +{ + struct f_hidg *hidg = container_of(work, struct f_hidg, work); + struct usb_composite_dev *cdev = hidg->func.config->cdev; + struct usb_request *req; + struct report_entry *ptr; + unsigned long flags; + + int status = 0; + + spin_lock_irqsave(&hidg->get_report_spinlock, flags); + req = hidg->get_req; + if (!req) { + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + return; + } + + req->zero = 0; + req->length = min_t(unsigned int, min_t(unsigned int, hidg->get_report_req_report_length, + hidg->report_length), + MAX_REPORT_LENGTH); + + /* Check if there is a response available for immediate response */ + ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id); + if (ptr && !ptr->report_data.userspace_req) { + /* Report exists in list and it is to be used for immediate response */ + req->buf = ptr->report_data.data; + status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + hidg->get_report_returned = true; + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + } else { + /* + * Report does not exist in list or should not be immediately sent + * i.e. give userspace time to respond + */ + hidg->get_report_returned = false; + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + wake_up(&hidg->get_id_queue); +#define GET_REPORT_COND (!hidg->get_report_returned) + /* Wait until userspace has responded or timeout */ + status = wait_event_interruptible_timeout(hidg->get_queue, !GET_REPORT_COND, + msecs_to_jiffies(GET_REPORT_TIMEOUT_MS)); + spin_lock_irqsave(&hidg->get_report_spinlock, flags); + req = hidg->get_req; + if (!req) { + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + return; + } + if (status == 0 && !hidg->get_report_returned) { + /* GET_REPORT request was not serviced by userspace within timeout period */ + VDBG(cdev, "get_report : userspace timeout.\n"); + hidg->get_report_returned = true; + } + + /* Search again for report ID in list and respond to GET_REPORT request */ + ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id); + if (ptr) { + /* + * Either get an updated response just serviced by userspace + * or send the latest response in the list + */ + req->buf = ptr->report_data.data; + } else { + /* If there are no prevoiusly sent reports send empty report */ + req->buf = hidg->get_report.data; + memset(req->buf, 0x0, req->length); + } + + status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + } + + if (status < 0) + VDBG(cdev, "usb_ep_queue error on ep0 responding to GET_REPORT\n"); +} + +static int f_hidg_get_report_id(struct file *file, __u8 __user *buffer) +{ + struct f_hidg *hidg = file->private_data; + int ret = 0; + + ret = put_user(hidg->get_report_req_report_id, buffer); + + return ret; +} + +static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer) +{ + struct f_hidg *hidg = file->private_data; + struct usb_composite_dev *cdev = hidg->func.config->cdev; + unsigned long flags; + struct report_entry *entry; + struct report_entry *ptr; + __u8 report_id; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + if (copy_from_user(&entry->report_data, buffer, + sizeof(struct usb_hidg_report))) { + ERROR(cdev, "copy_from_user error\n"); + kfree(entry); + return -EINVAL; + } + + report_id = entry->report_data.report_id; + + spin_lock_irqsave(&hidg->get_report_spinlock, flags); + ptr = f_hidg_search_for_report(hidg, report_id); + + if (ptr) { + /* Report already exists in list - update it */ + if (copy_from_user(&ptr->report_data, buffer, + sizeof(struct usb_hidg_report))) { + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + ERROR(cdev, "copy_from_user error\n"); + kfree(entry); + return -EINVAL; + } + kfree(entry); + } else { + /* Report does not exist in list - add it */ + list_add_tail(&entry->node, &hidg->report_list); + } + + /* If there is no response pending then do nothing further */ + if (hidg->get_report_returned) { + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + return 0; + } + + /* If this userspace response serves the current pending report */ + if (hidg->get_report_req_report_id == report_id) { + hidg->get_report_returned = true; + wake_up(&hidg->get_queue); + } + + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + return 0; +} + +static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg) +{ + switch (code) { + case GADGET_HID_READ_GET_REPORT_ID: + return f_hidg_get_report_id(file, (__u8 __user *)arg); + case GADGET_HID_WRITE_GET_REPORT: + return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg); + default: + return -ENOTTY; + } +} + static __poll_t f_hidg_poll(struct file *file, poll_table *wait) { struct f_hidg *hidg = file->private_data; @@ -531,6 +725,8 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait) poll_wait(file, &hidg->read_queue, wait); poll_wait(file, &hidg->write_queue, wait); + poll_wait(file, &hidg->get_queue, wait); + poll_wait(file, &hidg->get_id_queue, wait); if (WRITE_COND) ret |= EPOLLOUT | EPOLLWRNORM; @@ -543,12 +739,16 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait) ret |= EPOLLIN | EPOLLRDNORM; } + if (GET_REPORT_COND) + ret |= EPOLLPRI; + return ret; } #undef WRITE_COND #undef READ_COND_SSREPORT #undef READ_COND_INTOUT +#undef GET_REPORT_COND static int f_hidg_release(struct inode *inode, struct file *fd) { @@ -641,6 +841,10 @@ static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req) wake_up(&hidg->read_queue); } +static void hidg_get_report_complete(struct usb_ep *ep, struct usb_request *req) +{ +} + static int hidg_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { @@ -649,6 +853,7 @@ static int hidg_setup(struct usb_function *f, struct usb_request *req = cdev->req; int status = 0; __u16 value, length; + unsigned long flags; value = __le16_to_cpu(ctrl->wValue); length = __le16_to_cpu(ctrl->wLength); @@ -660,14 +865,20 @@ static int hidg_setup(struct usb_function *f, switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_REPORT): - VDBG(cdev, "get_report\n"); + VDBG(cdev, "get_report | wLength=%d\n", ctrl->wLength); - /* send an empty report */ - length = min_t(unsigned, length, hidg->report_length); - memset(req->buf, 0x0, length); + /* + * Update GET_REPORT ID so that an ioctl can be used to determine what + * GET_REPORT the request was actually for. + */ + spin_lock_irqsave(&hidg->get_report_spinlock, flags); + hidg->get_report_req_report_id = value & 0xff; + hidg->get_report_req_report_length = length; + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); - goto respond; - break; + queue_work(hidg->workqueue, &hidg->work); + + return status; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 | HID_REQ_GET_PROTOCOL): @@ -793,6 +1004,14 @@ static void hidg_disable(struct usb_function *f) spin_unlock_irqrestore(&hidg->read_spinlock, flags); } + spin_lock_irqsave(&hidg->get_report_spinlock, flags); + if (!hidg->get_report_returned) { + usb_ep_free_request(f->config->cdev->gadget->ep0, hidg->get_req); + hidg->get_req = NULL; + hidg->get_report_returned = true; + } + spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + spin_lock_irqsave(&hidg->write_spinlock, flags); if (!hidg->write_pending) { free_ep_req(hidg->in_ep, hidg->req); @@ -902,6 +1121,14 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) return status; } +#ifdef CONFIG_COMPAT +static long f_hidg_compat_ioctl(struct file *file, unsigned int code, + unsigned long value) +{ + return f_hidg_ioctl(file, code, value); +} +#endif + static const struct file_operations f_hidg_fops = { .owner = THIS_MODULE, .open = f_hidg_open, @@ -909,6 +1136,10 @@ static const struct file_operations f_hidg_fops = { .write = f_hidg_write, .read = f_hidg_read, .poll = f_hidg_poll, + .unlocked_ioctl = f_hidg_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = f_hidg_compat_ioctl, +#endif .llseek = noop_llseek, }; @@ -919,6 +1150,15 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) struct usb_string *us; int status; + hidg->get_req = usb_ep_alloc_request(c->cdev->gadget->ep0, GFP_ATOMIC); + if (!hidg->get_req) + return -ENOMEM; + + hidg->get_req->zero = 0; + hidg->get_req->complete = hidg_get_report_complete; + hidg->get_req->context = hidg; + hidg->get_report_returned = true; + /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, ARRAY_SIZE(ct_func_string_defs)); @@ -1004,9 +1244,24 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg->write_pending = 1; hidg->req = NULL; spin_lock_init(&hidg->read_spinlock); + spin_lock_init(&hidg->get_report_spinlock); init_waitqueue_head(&hidg->write_queue); init_waitqueue_head(&hidg->read_queue); + init_waitqueue_head(&hidg->get_queue); + init_waitqueue_head(&hidg->get_id_queue); INIT_LIST_HEAD(&hidg->completed_out_req); + INIT_LIST_HEAD(&hidg->report_list); + + INIT_WORK(&hidg->work, get_report_workqueue_handler); + hidg->workqueue = alloc_workqueue("report_work", + WQ_FREEZABLE | + WQ_MEM_RECLAIM, + 1); + + if (!hidg->workqueue) { + status = -ENOMEM; + goto fail; + } /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); @@ -1016,12 +1271,16 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail_free_descs: + destroy_workqueue(hidg->workqueue); usb_free_all_descriptors(f); fail: ERROR(f->config->cdev, "hidg_bind FAILED\n"); if (hidg->req != NULL) free_ep_req(hidg->in_ep, hidg->req); + usb_ep_free_request(c->cdev->gadget->ep0, hidg->get_req); + hidg->get_req = NULL; + return status; } @@ -1256,7 +1515,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) struct f_hidg *hidg = func_to_hidg(f); cdev_device_del(&hidg->cdev, &hidg->dev); - + destroy_workqueue(hidg->workqueue); usb_free_all_descriptors(f); } diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 979b028edb99ef..49b009a7d5d792 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -14,9 +14,9 @@ #include #include #include +#include #include "g_zero.h" -#include "u_f.h" /* * LOOPBACK FUNCTION ... a testing vehicle for USB peripherals, diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index cfd712fd745237..08e0d1c511e8d9 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -188,7 +188,7 @@ #include #include #include -#include +#include #include #include @@ -3050,7 +3050,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) if (!common->thread_task) { common->state = FSG_STATE_NORMAL; common->thread_task = - kthread_create(fsg_main_thread, common, "file-storage"); + kthread_run(fsg_main_thread, common, "file-storage"); if (IS_ERR(common->thread_task)) { ret = PTR_ERR(common->thread_task); common->thread_task = NULL; @@ -3059,7 +3059,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) } DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task)); - wake_up_process(common->thread_task); } fsg->gadget = gadget; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 67052a664e746d..1067847cc07995 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -30,11 +30,11 @@ #include #include +#include #include #include #include -#include "u_f.h" #include "u_midi.h" MODULE_AUTHOR("Ben Williamson"); diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index 3f63253ad3e074..8285df9ed6fd78 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -15,11 +15,11 @@ #include #include +#include #include #include #include -#include "u_f.h" #include "u_midi2.h" struct f_midi2; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index ef2ffde625c328..d295ade8fa6797 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 6f3702210450ff..ec5fd25020fdbf 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -13,10 +13,10 @@ #include #include #include +#include #include #include "g_zero.h" -#include "u_f.h" /* * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 90906d71473658..15bb3aa12aa8b4 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "tcm.h" #include "u_tcm.h" diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 2b9fb4daa80689..c87e74afc88159 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -377,24 +377,10 @@ enum { STR_AS_OUT_IF_ALT1, STR_AS_IN_IF_ALT0, STR_AS_IN_IF_ALT1, + NUM_STR_DESCRIPTORS, }; -static struct usb_string strings_uac1[] = { - /* [STR_AC_IF].s = DYNAMIC, */ - [STR_USB_OUT_IT].s = "Playback Input terminal", - [STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels", - [STR_IO_OUT_OT].s = "Playback Output terminal", - [STR_IO_IN_IT].s = "Capture Input terminal", - [STR_IO_IN_IT_CH_NAMES].s = "Capture Channels", - [STR_USB_IN_OT].s = "Capture Output terminal", - [STR_FU_IN].s = "Capture Volume", - [STR_FU_OUT].s = "Playback Volume", - [STR_AS_OUT_IF_ALT0].s = "Playback Inactive", - [STR_AS_OUT_IF_ALT1].s = "Playback Active", - [STR_AS_IN_IF_ALT0].s = "Capture Inactive", - [STR_AS_IN_IF_ALT1].s = "Capture Active", - { }, -}; +static struct usb_string strings_uac1[NUM_STR_DESCRIPTORS + 1] = {}; static struct usb_gadget_strings str_uac1 = { .language = 0x0409, /* en-us */ @@ -1265,6 +1251,20 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) strings_uac1[STR_AC_IF].s = audio_opts->function_name; + strings_uac1[STR_USB_OUT_IT].s = audio_opts->c_it_name; + strings_uac1[STR_USB_OUT_IT_CH_NAMES].s = audio_opts->c_it_ch_name; + strings_uac1[STR_IO_OUT_OT].s = audio_opts->c_ot_name; + strings_uac1[STR_FU_OUT].s = audio_opts->c_fu_vol_name; + strings_uac1[STR_AS_OUT_IF_ALT0].s = "Playback Inactive"; + strings_uac1[STR_AS_OUT_IF_ALT1].s = "Playback Active"; + + strings_uac1[STR_IO_IN_IT].s = audio_opts->p_it_name; + strings_uac1[STR_IO_IN_IT_CH_NAMES].s = audio_opts->p_it_ch_name; + strings_uac1[STR_USB_IN_OT].s = audio_opts->p_ot_name; + strings_uac1[STR_FU_IN].s = audio_opts->p_fu_vol_name; + strings_uac1[STR_AS_IN_IF_ALT0].s = "Capture Inactive"; + strings_uac1[STR_AS_IN_IF_ALT1].s = "Capture Active"; + us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1)); if (IS_ERR(us)) return PTR_ERR(us); @@ -1681,8 +1681,19 @@ UAC1_ATTRIBUTE(bool, c_volume_present); UAC1_ATTRIBUTE(s16, c_volume_min); UAC1_ATTRIBUTE(s16, c_volume_max); UAC1_ATTRIBUTE(s16, c_volume_res); + UAC1_ATTRIBUTE_STRING(function_name); +UAC1_ATTRIBUTE_STRING(p_it_name); +UAC1_ATTRIBUTE_STRING(p_it_ch_name); +UAC1_ATTRIBUTE_STRING(p_ot_name); +UAC1_ATTRIBUTE_STRING(p_fu_vol_name); + +UAC1_ATTRIBUTE_STRING(c_it_name); +UAC1_ATTRIBUTE_STRING(c_it_ch_name); +UAC1_ATTRIBUTE_STRING(c_ot_name); +UAC1_ATTRIBUTE_STRING(c_fu_vol_name); + static struct configfs_attribute *f_uac1_attrs[] = { &f_uac1_opts_attr_c_chmask, &f_uac1_opts_attr_c_srate, @@ -1706,6 +1717,16 @@ static struct configfs_attribute *f_uac1_attrs[] = { &f_uac1_opts_attr_function_name, + &f_uac1_opts_attr_p_it_name, + &f_uac1_opts_attr_p_it_ch_name, + &f_uac1_opts_attr_p_ot_name, + &f_uac1_opts_attr_p_fu_vol_name, + + &f_uac1_opts_attr_c_it_name, + &f_uac1_opts_attr_c_it_ch_name, + &f_uac1_opts_attr_c_ot_name, + &f_uac1_opts_attr_c_fu_vol_name, + NULL, }; @@ -1760,6 +1781,16 @@ static struct usb_function_instance *f_audio_alloc_inst(void) scnprintf(opts->function_name, sizeof(opts->function_name), "AC Interface"); + scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "Capture Input terminal"); + scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels"); + scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "Capture Output terminal"); + scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume"); + + scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "Playback Input terminal"); + scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels"); + scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "Playback Output terminal"); + scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume"); + return &opts->func_inst; } diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 2d6d3286ffde2c..1cdda44455b343 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -95,7 +95,9 @@ enum { STR_CLKSRC_IN, STR_CLKSRC_OUT, STR_USB_IT, + STR_USB_IT_CH, STR_IO_IT, + STR_IO_IT_CH, STR_USB_OT, STR_IO_OT, STR_FU_IN, @@ -104,25 +106,10 @@ enum { STR_AS_OUT_ALT1, STR_AS_IN_ALT0, STR_AS_IN_ALT1, + NUM_STR_DESCRIPTORS, }; -static struct usb_string strings_fn[] = { - /* [STR_ASSOC].s = DYNAMIC, */ - [STR_IF_CTRL].s = "Topology Control", - [STR_CLKSRC_IN].s = "Input Clock", - [STR_CLKSRC_OUT].s = "Output Clock", - [STR_USB_IT].s = "USBH Out", - [STR_IO_IT].s = "USBD Out", - [STR_USB_OT].s = "USBH In", - [STR_IO_OT].s = "USBD In", - [STR_FU_IN].s = "Capture Volume", - [STR_FU_OUT].s = "Playback Volume", - [STR_AS_OUT_ALT0].s = "Playback Inactive", - [STR_AS_OUT_ALT1].s = "Playback Active", - [STR_AS_IN_ALT0].s = "Capture Inactive", - [STR_AS_IN_ALT1].s = "Capture Active", - { }, -}; +static struct usb_string strings_fn[NUM_STR_DESCRIPTORS + 1] = {}; static const char *const speed_names[] = { [USB_SPEED_UNKNOWN] = "UNKNOWN", @@ -1049,6 +1036,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) return ret; strings_fn[STR_ASSOC].s = uac2_opts->function_name; + strings_fn[STR_IF_CTRL].s = uac2_opts->if_ctrl_name; + strings_fn[STR_CLKSRC_IN].s = uac2_opts->clksrc_in_name; + strings_fn[STR_CLKSRC_OUT].s = uac2_opts->clksrc_out_name; + + strings_fn[STR_USB_IT].s = uac2_opts->c_it_name; + strings_fn[STR_USB_IT_CH].s = uac2_opts->c_it_ch_name; + strings_fn[STR_IO_OT].s = uac2_opts->c_ot_name; + strings_fn[STR_FU_OUT].s = uac2_opts->c_fu_vol_name; + strings_fn[STR_AS_OUT_ALT0].s = "Playback Inactive"; + strings_fn[STR_AS_OUT_ALT1].s = "Playback Active"; + + strings_fn[STR_IO_IT].s = uac2_opts->p_it_name; + strings_fn[STR_IO_IT_CH].s = uac2_opts->p_it_ch_name; + strings_fn[STR_USB_OT].s = uac2_opts->p_ot_name; + strings_fn[STR_FU_IN].s = uac2_opts->p_fu_vol_name; + strings_fn[STR_AS_IN_ALT0].s = "Capture Inactive"; + strings_fn[STR_AS_IN_ALT1].s = "Capture Active"; us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn)); if (IS_ERR(us)) @@ -1072,7 +1076,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) in_clk_src_desc.iClockSource = us[STR_CLKSRC_IN].id; out_clk_src_desc.iClockSource = us[STR_CLKSRC_OUT].id; usb_out_it_desc.iTerminal = us[STR_USB_IT].id; + usb_out_it_desc.iChannelNames = us[STR_USB_IT_CH].id; io_in_it_desc.iTerminal = us[STR_IO_IT].id; + io_in_it_desc.iChannelNames = us[STR_IO_IT_CH].id; usb_in_ot_desc.iTerminal = us[STR_USB_OT].id; io_out_ot_desc.iTerminal = us[STR_IO_OT].id; std_as_out_if0_desc.iInterface = us[STR_AS_OUT_ALT0].id; @@ -2100,10 +2106,24 @@ UAC2_ATTRIBUTE(s16, c_volume_max); UAC2_ATTRIBUTE(s16, c_volume_res); UAC2_ATTRIBUTE(u32, fb_max); UAC2_ATTRIBUTE_STRING(function_name); +UAC2_ATTRIBUTE_STRING(if_ctrl_name); +UAC2_ATTRIBUTE_STRING(clksrc_in_name); +UAC2_ATTRIBUTE_STRING(clksrc_out_name); + +UAC2_ATTRIBUTE_STRING(p_it_name); +UAC2_ATTRIBUTE_STRING(p_it_ch_name); +UAC2_ATTRIBUTE_STRING(p_ot_name); +UAC2_ATTRIBUTE_STRING(p_fu_vol_name); + +UAC2_ATTRIBUTE_STRING(c_it_name); +UAC2_ATTRIBUTE_STRING(c_it_ch_name); +UAC2_ATTRIBUTE_STRING(c_ot_name); +UAC2_ATTRIBUTE_STRING(c_fu_vol_name); UAC2_ATTRIBUTE(s16, p_terminal_type); UAC2_ATTRIBUTE(s16, c_terminal_type); + static struct configfs_attribute *f_uac2_attrs[] = { &f_uac2_opts_attr_p_chmask, &f_uac2_opts_attr_p_srate, @@ -2130,6 +2150,19 @@ static struct configfs_attribute *f_uac2_attrs[] = { &f_uac2_opts_attr_c_volume_res, &f_uac2_opts_attr_function_name, + &f_uac2_opts_attr_if_ctrl_name, + &f_uac2_opts_attr_clksrc_in_name, + &f_uac2_opts_attr_clksrc_out_name, + + &f_uac2_opts_attr_p_it_name, + &f_uac2_opts_attr_p_it_ch_name, + &f_uac2_opts_attr_p_ot_name, + &f_uac2_opts_attr_p_fu_vol_name, + + &f_uac2_opts_attr_c_it_name, + &f_uac2_opts_attr_c_it_ch_name, + &f_uac2_opts_attr_c_ot_name, + &f_uac2_opts_attr_c_fu_vol_name, &f_uac2_opts_attr_p_terminal_type, &f_uac2_opts_attr_c_terminal_type, @@ -2191,6 +2224,19 @@ static struct usb_function_instance *afunc_alloc_inst(void) opts->fb_max = FBACK_FAST_MAX; scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink"); + scnprintf(opts->if_ctrl_name, sizeof(opts->if_ctrl_name), "Topology Control"); + scnprintf(opts->clksrc_in_name, sizeof(opts->clksrc_in_name), "Input Clock"); + scnprintf(opts->clksrc_out_name, sizeof(opts->clksrc_out_name), "Output Clock"); + + scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "USBD Out"); + scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels"); + scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "USBH In"); + scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume"); + + scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "USBH Out"); + scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels"); + scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "USBD In"); + scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume"); opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE; opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE; diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 12c5d9cf450c10..afd75d72412c9f 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include "u_rndis.h" diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h index 0a544a82cbf850..ced5d2b09234dc 100644 --- a/drivers/usb/gadget/function/storage_common.h +++ b/drivers/usb/gadget/function/storage_common.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #ifndef DEBUG #undef VERBOSE_DEBUG diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 24299576972fe1..ca8dbec65f736c 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -1140,35 +1140,35 @@ static int u_audio_rate_get(struct snd_kcontrol *kcontrol, } static struct snd_kcontrol_new u_audio_controls[] = { - [UAC_FBACK_CTRL] { + [UAC_FBACK_CTRL] = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "Capture Pitch 1000000", .info = u_audio_pitch_info, .get = u_audio_pitch_get, .put = u_audio_pitch_put, }, - [UAC_P_PITCH_CTRL] { + [UAC_P_PITCH_CTRL] = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "Playback Pitch 1000000", .info = u_audio_pitch_info, .get = u_audio_pitch_get, .put = u_audio_pitch_put, }, - [UAC_MUTE_CTRL] { + [UAC_MUTE_CTRL] = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = u_audio_mute_info, .get = u_audio_mute_get, .put = u_audio_mute_put, }, - [UAC_VOLUME_CTRL] { + [UAC_VOLUME_CTRL] = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = u_audio_volume_info, .get = u_audio_volume_get, .put = u_audio_volume_put, }, - [UAC_RATE_CTRL] { + [UAC_RATE_CTRL] = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "", /* will be filled later */ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index b394105e55d6cc..0a8c05b2746b4e 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "u_serial.h" @@ -126,6 +127,7 @@ struct gs_port { wait_queue_head_t close_wait; bool suspended; /* port suspended */ bool start_delayed; /* delay start when suspended */ + struct async_icount icount; /* REVISIT this state ... */ struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ @@ -257,6 +259,7 @@ __acquires(&port->port_lock) break; } do_tty_wake = true; + port->icount.tx += len; req->length = len; list_del(&req->list); @@ -408,6 +411,7 @@ static void gs_rx_push(struct work_struct *work) size -= n; } + port->icount.rx += size; count = tty_insert_flip_string(&port->port, packet, size); if (count) @@ -851,6 +855,23 @@ static int gs_break_ctl(struct tty_struct *tty, int duration) return status; } +static int gs_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount) +{ + struct gs_port *port = tty->driver_data; + struct async_icount cnow; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock, flags); + cnow = port->icount; + spin_unlock_irqrestore(&port->port_lock, flags); + + icount->rx = cnow.rx; + icount->tx = cnow.tx; + + return 0; +} + static const struct tty_operations gs_tty_ops = { .open = gs_open, .close = gs_close, @@ -861,6 +882,7 @@ static const struct tty_operations gs_tty_ops = { .chars_in_buffer = gs_chars_in_buffer, .unthrottle = gs_unthrottle, .break_ctl = gs_break_ctl, + .get_icount = gs_get_icount, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h index 901d99310bc48d..e1274338ea6191 100644 --- a/drivers/usb/gadget/function/u_serial.h +++ b/drivers/usb/gadget/function/u_serial.h @@ -17,6 +17,10 @@ struct f_serial_opts { struct usb_function_instance func_inst; u8 port_num; + u8 protocol; + + struct mutex lock; /* protect instances */ + int instances; }; /* diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h index f7a616760e314b..feb6eb76462f3b 100644 --- a/drivers/usb/gadget/function/u_uac1.h +++ b/drivers/usb/gadget/function/u_uac1.h @@ -52,7 +52,17 @@ struct f_uac1_opts { int req_number; unsigned bound:1; - char function_name[32]; + char function_name[USB_MAX_STRING_LEN]; + + char p_it_name[USB_MAX_STRING_LEN]; + char p_it_ch_name[USB_MAX_STRING_LEN]; + char p_ot_name[USB_MAX_STRING_LEN]; + char p_fu_vol_name[USB_MAX_STRING_LEN]; + + char c_it_name[USB_MAX_STRING_LEN]; + char c_it_ch_name[USB_MAX_STRING_LEN]; + char c_ot_name[USB_MAX_STRING_LEN]; + char c_fu_vol_name[USB_MAX_STRING_LEN]; struct mutex lock; int refcnt; diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h index 5e81bdd6c5fbab..0df808289ded0d 100644 --- a/drivers/usb/gadget/function/u_uac2.h +++ b/drivers/usb/gadget/function/u_uac2.h @@ -68,7 +68,20 @@ struct f_uac2_opts { int fb_max; bool bound; - char function_name[32]; + char function_name[USB_MAX_STRING_LEN]; + char if_ctrl_name[USB_MAX_STRING_LEN]; + char clksrc_in_name[USB_MAX_STRING_LEN]; + char clksrc_out_name[USB_MAX_STRING_LEN]; + + char p_it_name[USB_MAX_STRING_LEN]; + char p_it_ch_name[USB_MAX_STRING_LEN]; + char p_ot_name[USB_MAX_STRING_LEN]; + char p_fu_vol_name[USB_MAX_STRING_LEN]; + + char c_it_name[USB_MAX_STRING_LEN]; + char c_it_ch_name[USB_MAX_STRING_LEN]; + char c_ot_name[USB_MAX_STRING_LEN]; + char c_fu_vol_name[USB_MAX_STRING_LEN]; s16 p_terminal_type; s16 c_terminal_type; diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index a024aecb76dc37..de1736f834e6b8 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -121,6 +121,9 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc, list_for_each_entry(format, &uvc->header->formats, entry) { const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt); + if (IS_ERR(fmtdesc)) + continue; + if (fmtdesc->fcc == pixelformat) { uformat = format->fmt; break; @@ -240,6 +243,7 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) struct uvc_video *video = &uvc->video; struct uvcg_format *uformat; struct uvcg_frame *uframe; + const struct uvc_format_desc *fmtdesc; u8 *fcc; if (fmt->type != video->queue.queue.type) @@ -277,7 +281,10 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) fmt->fmt.pix.height = uframe->frame.w_height; fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); - fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc; + fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + fmt->fmt.pix.pixelformat = fmtdesc->fcc; } fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; @@ -389,6 +396,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) return -EINVAL; fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + f->pixelformat = fmtdesc->fcc; return 0; diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index a9edd60fbbf779..57a851151225de 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 03179b1880fd3e..9c7381661016da 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -705,7 +705,6 @@ static const struct file_operations ep_io_operations = { .open = ep_open, .release = ep_release, - .llseek = no_llseek, .unlocked_ioctl = ep_ioctl, .read_iter = ep_read_iter, .write_iter = ep_write_iter, @@ -1939,7 +1938,6 @@ gadget_dev_open (struct inode *inode, struct file *fd) } static const struct file_operations ep0_operations = { - .llseek = no_llseek, .open = gadget_dev_open, .read = ep0_read, diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 399fca32a8ace7..112fd18d8c99dc 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1364,7 +1364,6 @@ static const struct file_operations raw_fops = { .unlocked_ioctl = raw_ioctl, .compat_ioctl = raw_ioctl, .release = raw_release, - .llseek = no_llseek, }; static struct miscdevice raw_misc_device = { diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 40870227999ab9..fc1e06246d9df1 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "u_tcm.h" diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c index 6aea1ecb399998..115d219c9c00cf 100644 --- a/drivers/usb/gadget/u_f.c +++ b/drivers/usb/gadget/u_f.c @@ -8,8 +8,8 @@ * Author: Andrzej Pietrasiewicz */ -#include "u_f.h" #include +#include struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len) { diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h deleted file mode 100644 index e313c3b8dcb19a..00000000000000 --- a/drivers/usb/gadget/u_f.h +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * u_f.h - * - * Utility definitions for USB functions - * - * Copyright (c) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Andrzej Pietrasiewicz - */ - -#ifndef __U_F_H__ -#define __U_F_H__ - -#include -#include - -/* Variable Length Array Macros **********************************************/ -#define vla_group(groupname) size_t groupname##__next = 0 -#define vla_group_size(groupname) groupname##__next - -#define vla_item(groupname, type, name, n) \ - size_t groupname##_##name##__offset = ({ \ - size_t offset = 0; \ - if (groupname##__next != SIZE_MAX) { \ - size_t align_mask = __alignof__(type) - 1; \ - size_t size = array_size(n, sizeof(type)); \ - offset = (groupname##__next + align_mask) & \ - ~align_mask; \ - if (check_add_overflow(offset, size, \ - &groupname##__next)) { \ - groupname##__next = SIZE_MAX; \ - offset = 0; \ - } \ - } \ - offset; \ - }) - -#define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ - size_t groupname##_##name##__offset = ({ \ - size_t offset = 0; \ - if (groupname##__next != SIZE_MAX) { \ - size_t align_mask = __alignof__(type) - 1; \ - offset = (groupname##__next + align_mask) & \ - ~align_mask; \ - if (check_add_overflow(offset, groupname##_##name##__sz,\ - &groupname##__next)) { \ - groupname##__next = SIZE_MAX; \ - offset = 0; \ - } \ - } \ - offset; \ - }) - -#define vla_ptr(ptr, groupname, name) \ - ((void *) ((char *)ptr + groupname##_##name##__offset)) - -struct usb_ep; -struct usb_request; - -/** - * alloc_ep_req - returns a usb_request allocated by the gadget driver and - * allocates the request's buffer. - * - * @ep: the endpoint to allocate a usb_request - * @len: usb_requests's buffer suggested size - * - * In case @ep direction is OUT, the @len will be aligned to ep's - * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use - * usb_requests's length (req->length) to refer to the allocated buffer size. - * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req(). - */ -struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len); - -/* Frees a usb_request previously allocated by alloc_ep_req() */ -static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - WARN_ON(req->buf == NULL); - kfree(req->buf); - req->buf = NULL; - usb_ep_free_request(ep, req); -} - -#endif /* __U_F_H__ */ diff --git a/drivers/usb/gadget/u_os_desc.h b/drivers/usb/gadget/u_os_desc.h index 5d7d35c8cc314e..f8b9f0faa9b16d 100644 --- a/drivers/usb/gadget/u_os_desc.h +++ b/drivers/usb/gadget/u_os_desc.h @@ -13,7 +13,7 @@ #ifndef __U_OS_DESC_H__ #define __U_OS_DESC_H__ -#include +#include #include #define USB_EXT_PROP_DW_SIZE 0 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index b76885d78e8a78..4928eba1932721 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -187,7 +187,6 @@ static int regs_dbg_release(struct inode *inode, struct file *file) static const struct file_operations queue_dbg_fops = { .owner = THIS_MODULE, .open = queue_dbg_open, - .llseek = no_llseek, .read = queue_dbg_read, .release = queue_dbg_release, }; diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h index 8d00b1239f2198..2f4abf6f8f7736 100644 --- a/drivers/usb/gadget/udc/bdc/bdc.h +++ b/drivers/usb/gadget/udc/bdc/bdc.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include #define BRCM_BDC_NAME "bdc" #define BRCM_BDC_DESC "Broadcom USB Device Controller driver" diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 35a652807fca87..5149e2b7f05087 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -639,6 +639,7 @@ static const struct of_device_id bdc_of_match[] = { { .compatible = "brcm,bdc" }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, bdc_of_match); static struct platform_driver bdc_driver = { .driver = { diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index fa88f210ecd57d..f995cfa9b99e14 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 53ffaf4e2e3762..23826fd7a8e693 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include "bdc.h" diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c b/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c index fa12a5d46f2e9a..a5a9d395fd0d81 100644 --- a/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c +++ b/drivers/usb/gadget/udc/cdns2/cdns2-ep0.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include "cdns2-gadget.h" #include "cdns2-trace.h" diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c index d394affb707236..62fce42ef2dabd 100644 --- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c +++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c @@ -2033,8 +2033,8 @@ static void cdns2_quiesce(struct cdns2_device *pdev) set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON); /* Disable interrupt. */ - writeb(0, &pdev->interrupt_regs->extien), - writeb(0, &pdev->interrupt_regs->usbien), + writeb(0, &pdev->interrupt_regs->extien); + writeb(0, &pdev->interrupt_regs->usbien); writew(0, &pdev->adma_regs->ep_ien); /* Clear interrupt line. */ diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c index 50c3d0974d9b53..b1a8f772467c0d 100644 --- a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c +++ b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c @@ -15,8 +15,7 @@ #include "cdns2-gadget.h" #define PCI_DRIVER_NAME "cdns-pci-usbhs" -#define CDNS_VENDOR_ID 0x17cd -#define CDNS_DEVICE_ID 0x0120 +#define PCI_DEVICE_ID_CDNS_USB2 0x0120 #define PCI_BAR_DEV 0 #define PCI_DEV_FN_DEVICE 0 @@ -114,8 +113,8 @@ static const struct dev_pm_ops cdns2_pci_pm_ops = { }; static const struct pci_device_id cdns2_pci_ids[] = { - { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, + { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB2), + .class = PCI_CLASS_SERIAL_USB_DEVICE }, { 0, } }; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index f37b0d8386c1a9..8820d992444882 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #define DRIVER_DESC "USB Host+Gadget Emulator" #define DRIVER_VERSION "02 May 2005" @@ -1304,7 +1304,8 @@ static int dummy_urb_enqueue( /* kick the scheduler, it'll do the rest */ if (!hrtimer_active(&dum_hcd->timer)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), + HRTIMER_MODE_REL_SOFT); done: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); @@ -1325,7 +1326,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum_hcd->urbp_list)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT); spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return rc; @@ -1995,7 +1996,8 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t) dum_hcd->udev = NULL; } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) { /* want a 1 msec delay here */ - hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), + HRTIMER_MODE_REL_SOFT); } spin_unlock_irqrestore(&dum->lock, flags); @@ -2389,7 +2391,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd) dum_hcd->rh_state = DUMMY_RH_RUNNING; set_link_state(dum_hcd); if (!list_empty(&dum_hcd->urbp_list)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT); hcd->state = HC_STATE_RUNNING; } spin_unlock_irq(&dum_hcd->dum->lock); @@ -2467,7 +2469,7 @@ static DEVICE_ATTR_RO(urbs); static int dummy_start_ss(struct dummy_hcd *dum_hcd) { - hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); dum_hcd->timer.function = dummy_timer; dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; @@ -2497,7 +2499,7 @@ static int dummy_start(struct usb_hcd *hcd) return dummy_start_ss(dum_hcd); spin_lock_init(&dum_hcd->dum->lock); - hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); dum_hcd->timer.function = dummy_timer; dum_hcd->rh_state = DUMMY_RH_RUNNING; diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 3432ebfae97879..0cabd4eee6acb5 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include "fsl_usb2_udc.h" diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index 5ffb3d5c635bec..b860c2e7644940 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include "goku_udc.h" diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index d5f29f8fe48131..3bfd889ed56a47 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1487,31 +1487,29 @@ static int udc_ep0_out_req(struct lpc32xx_udc *udc) req = list_entry(ep0->queue.next, struct lpc32xx_request, queue); - if (req) { - if (req->req.length == 0) { - /* Just dequeue request */ - done(ep0, req, 0); - udc->ep0state = WAIT_FOR_SETUP; - return 1; - } + if (req->req.length == 0) { + /* Just dequeue request */ + done(ep0, req, 0); + udc->ep0state = WAIT_FOR_SETUP; + return 1; + } - /* Get data from FIFO */ - bufferspace = req->req.length - req->req.actual; - if (bufferspace > ep0->ep.maxpacket) - bufferspace = ep0->ep.maxpacket; + /* Get data from FIFO */ + bufferspace = req->req.length - req->req.actual; + if (bufferspace > ep0->ep.maxpacket) + bufferspace = ep0->ep.maxpacket; - /* Copy data to buffer */ - prefetchw(req->req.buf + req->req.actual); - tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual, - bufferspace); - req->req.actual += bufferspace; + /* Copy data to buffer */ + prefetchw(req->req.buf + req->req.actual); + tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual, + bufferspace); + req->req.actual += bufferspace; - if (tr < ep0->ep.maxpacket) { - /* This is the last packet */ - done(ep0, req, 0); - udc->ep0state = WAIT_FOR_SETUP; - return 1; - } + if (tr < ep0->ep.maxpacket) { + /* This is the last packet */ + done(ep0, req, 0); + udc->ep0state = WAIT_FOR_SETUP; + return 1; } return 0; @@ -1962,18 +1960,17 @@ static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) /* If there isn't a request waiting, something went wrong */ req = list_entry(ep->queue.next, struct lpc32xx_request, queue); - if (req) { - done(ep, req, 0); - /* Start another request if ready */ - if (!list_empty(&ep->queue)) { - if (ep->is_in) - udc_ep_in_req_dma(udc, ep); - else - udc_ep_out_req_dma(udc, ep); - } else - ep->req_pending = 0; - } + done(ep, req, 0); + + /* Start another request if ready */ + if (!list_empty(&ep->queue)) { + if (ep->is_in) + udc_ep_in_req_dma(udc, ep); + else + udc_ep_out_req_dma(udc, ep); + } else + ep->req_pending = 0; } @@ -1989,10 +1986,6 @@ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) #endif req = list_entry(ep->queue.next, struct lpc32xx_request, queue); - if (!req) { - ep_err(ep, "DMA interrupt on no req!\n"); - return; - } dd = req->dd_desc_ptr; /* DMA descriptor should always be retired for this call */ diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 78308b64955dd1..71012b28289105 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include "mv_udc.h" diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 19bbc38f3d35dc..9230db57dab7a6 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include "net2272.h" diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 1b929c519cd712..b2903e4bbf54d0 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -56,7 +56,7 @@ #include #include -#include +#include #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller" #define DRIVER_VERSION "2005 Sept 27/v3.0" diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c index e13b8ec8ef8ad0..61a45e4657d5d4 100644 --- a/drivers/usb/gadget/udc/omap_udc.c +++ b/drivers/usb/gadget/udc/omap_udc.c @@ -36,7 +36,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index 1ac26cb49ecf98..7c96fc9f680f1b 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c index 2fc5d4d277bc4a..cd89532adec262 100644 --- a/drivers/usb/gadget/udc/snps_udc_core.c +++ b/drivers/usb/gadget/udc/snps_udc_core.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include "amd5536udc.h" static void udc_setup_endpoints(struct udc *dev); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 74590f93ea617d..ebc45565c33e54 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -947,7 +947,7 @@ static int xudc_ep_disable(struct usb_ep *_ep) ep->desc = NULL; ep->ep_usb.desc = NULL; - dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber); + dev_dbg(udc->dev, "USB Ep %d disable\n", ep->epnumber); /* Disable the endpoint.*/ epcfg = udc->read_fn(udc->addr + ep->offset); epcfg &= ~XUSB_EP_CFG_VALID_MASK; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 4448d0ab06f0d4..d011d6c753edfc 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -40,11 +40,11 @@ config USB_XHCI_DBGCAP config USB_XHCI_PCI tristate depends on USB_PCI - depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS default y config USB_XHCI_PCI_RENESAS tristate "Support for additional Renesas xHCI controller with firmware" + depends on USB_XHCI_PCI help Say 'Y' to enable the support for the Renesas xHCI controller with firmware. Make sure you have the firmware for the device and diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c index 77e42c739c5806..68cad0620f1a93 100644 --- a/drivers/usb/host/ehci-brcm.c +++ b/drivers/usb/host/ehci-brcm.c @@ -246,6 +246,7 @@ static const struct of_device_id brcm_ehci_of_match[] = { { .compatible = "brcm,bcm7445-ehci", }, {} }; +MODULE_DEVICE_TABLE(of, brcm_ehci_of_match); static struct platform_driver ehci_brcm_driver = { .probe = ehci_brcm_probe, diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index f40bc2a7a1247e..e3a961d3f5fc96 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -48,7 +48,6 @@ struct exynos_ehci_hcd { static int exynos_ehci_get_phy(struct device *dev, struct exynos_ehci_hcd *exynos_ehci) { - struct device_node *child; struct phy *phy; int phy_number, num_phys; int ret; @@ -66,26 +65,22 @@ static int exynos_ehci_get_phy(struct device *dev, return 0; /* Get PHYs using legacy bindings */ - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { ret = of_property_read_u32(child, "reg", &phy_number); if (ret) { dev_err(dev, "Failed to parse device tree\n"); - of_node_put(child); return ret; } if (phy_number >= PHY_NUMBER) { dev_err(dev, "Invalid number of PHYs\n"); - of_node_put(child); return -EINVAL; } phy = devm_of_phy_optional_get(dev, child, NULL); exynos_ehci->phy[phy_number] = phy; - if (IS_ERR(phy)) { - of_node_put(child); + if (IS_ERR(phy)) return PTR_ERR(phy); - } } exynos_ehci->legacy_phy = true; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 802bfafb1012bb..cbc0b86fcc365e 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #if defined(CONFIG_PPC_PS3) #include diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index a52c3d858f3ee5..31059c8f94e63f 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -83,7 +83,7 @@ #include #include -#include +#include static int dbg_level; #ifdef ISP1362_DEBUG diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index d9adae53466b7a..d2b67da76762c6 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include "ohci.h" diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index bfa2eba4e3a757..1379e03644b2a3 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -37,7 +37,6 @@ struct exynos_ohci_hcd { static int exynos_ohci_get_phy(struct device *dev, struct exynos_ohci_hcd *exynos_ohci) { - struct device_node *child; struct phy *phy; int phy_number, num_phys; int ret; @@ -55,26 +54,22 @@ static int exynos_ohci_get_phy(struct device *dev, return 0; /* Get PHYs using legacy bindings */ - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { ret = of_property_read_u32(child, "reg", &phy_number); if (ret) { dev_err(dev, "Failed to parse device tree\n"); - of_node_put(child); return ret; } if (phy_number >= PHY_NUMBER) { dev_err(dev, "Invalid number of PHYs\n"); - of_node_put(child); return -EINVAL; } phy = devm_of_phy_optional_get(dev, child, NULL); exynos_ohci->phy[phy_number] = phy; - if (IS_ERR(phy)) { - of_node_put(child); + if (IS_ERR(phy)) return PTR_ERR(phy); - } } exynos_ohci->legacy_phy = true; diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 5cec7640e913c8..9b24181fee6017 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -44,7 +44,7 @@ #include #include -#include +#include #include diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index 8264c454f6bddf..5b775e1ea52707 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -51,8 +51,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver; static struct i2c_client *isp1301_i2c_client; -static struct clk *usb_host_clk; - static void isp1301_configure_lpc32xx(void) { /* LPC32XX only supports DAT_SE0 USB mode */ @@ -155,6 +153,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) struct resource *res; int ret = 0, irq; struct device_node *isp1301_node; + struct clk *usb_host_clk; if (pdev->dev.of_node) { isp1301_node = of_parse_phandle(pdev->dev.of_node, @@ -180,26 +179,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) } /* Enable USB host clock */ - usb_host_clk = devm_clk_get(&pdev->dev, NULL); + usb_host_clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(usb_host_clk)) { - dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n"); + dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n"); ret = PTR_ERR(usb_host_clk); goto fail_disable; } - ret = clk_prepare_enable(usb_host_clk); - if (ret < 0) { - dev_err(&pdev->dev, "failed to start USB OHCI clock\n"); - goto fail_disable; - } - isp1301_configure(); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; - goto fail_hcd; + goto fail_disable; } hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); @@ -229,8 +222,6 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) ohci_nxp_stop_hc(); fail_resource: usb_put_hcd(hcd); -fail_hcd: - clk_disable_unprepare(usb_host_clk); fail_disable: isp1301_i2c_client = NULL; return ret; @@ -243,7 +234,6 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev) usb_remove_hcd(hcd); ohci_nxp_stop_hc(); usb_put_hcd(hcd); - clk_disable_unprepare(usb_host_clk); isp1301_i2c_client = NULL; } diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index f64bfe5f4d4d9d..a6be061f86532b 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -202,10 +202,6 @@ static const struct of_device_id ohci_hcd_ppc_of_match[] = { }, #endif #ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE - { - .name = "usb", - .compatible = "ohci-littledian", - }, { .name = "usb", .compatible = "ohci-le", diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 3f871fe62b90f2..ca3859463ba141 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 9f4bf8c5f8a51a..6576515a29cd37 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -297,9 +297,9 @@ static void put_child_connect_map(struct r8a66597 *r8a66597, int address) static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch) { u16 pipenum = pipe->info.pipenum; - const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO}; - const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL}; - const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR}; + static const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO}; + static const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL}; + static const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR}; if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */ dma_ch = R8A66597_PIPE_NO_DMA; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 2b871540bb5002..92f2d12384488f 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -54,7 +54,7 @@ #include #include #include -#include +#include #include "sl811.h" diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 161c09953c4e08..241d7aa1fbc20f 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -173,16 +173,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status) spin_lock(&dbc->lock); } -static void xhci_dbc_flush_single_request(struct dbc_request *req) +static void trb_to_noop(union xhci_trb *trb) { - union xhci_trb *trb = req->trb; - trb->generic.field[0] = 0; trb->generic.field[1] = 0; trb->generic.field[2] = 0; trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); +} +static void xhci_dbc_flush_single_request(struct dbc_request *req) +{ + trb_to_noop(req->trb); xhci_dbc_giveback(req, -ESHUTDOWN); } @@ -649,7 +651,6 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: - case DS_STALLED: if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; @@ -669,6 +670,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) pm_runtime_put_sync(dbc->dev); /* note, was self.controller */ } +static void +handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted) +{ + if (halted) { + dev_info(dbc->dev, "DbC Endpoint halted\n"); + dep->halted = 1; + + } else if (dep->halted) { + dev_info(dbc->dev, "DbC Endpoint halt cleared\n"); + dep->halted = 0; + + if (!list_empty(&dep->list_pending)) + writel(DBC_DOOR_BELL_TARGET(dep->direction), + &dbc->regs->doorbell); + } +} + static void dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event) { @@ -697,6 +715,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) struct xhci_ring *ring; int ep_id; int status; + struct xhci_ep_ctx *ep_ctx; u32 comp_code; size_t remain_length; struct dbc_request *req = NULL, *r; @@ -706,8 +725,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); dep = (ep_id == EPID_OUT) ? get_out_ep(dbc) : get_in_ep(dbc); + ep_ctx = (ep_id == EPID_OUT) ? + dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc); ring = dep->ring; + /* Match the pending request: */ + list_for_each_entry(r, &dep->list_pending, list_pending) { + if (r->trb_dma == event->trans_event.buffer) { + req = r; + break; + } + if (r->status == -COMP_STALL_ERROR) { + dev_warn(dbc->dev, "Give back stale stalled req\n"); + ring->num_trbs_free++; + xhci_dbc_giveback(r, 0); + } + } + + if (!req) { + dev_warn(dbc->dev, "no matched request\n"); + return; + } + + trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); + switch (comp_code) { case COMP_SUCCESS: remain_length = 0; @@ -718,31 +759,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) case COMP_TRB_ERROR: case COMP_BABBLE_DETECTED_ERROR: case COMP_USB_TRANSACTION_ERROR: - case COMP_STALL_ERROR: dev_warn(dbc->dev, "tx error %d detected\n", comp_code); status = -comp_code; break; + case COMP_STALL_ERROR: + dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n", + event->trans_event.buffer, remain_length, ep_ctx->deq); + status = 0; + dep->halted = 1; + + /* + * xHC DbC may trigger a STALL bulk xfer event when host sends a + * ClearFeature(ENDPOINT_HALT) request even if there wasn't an + * active bulk transfer. + * + * Don't give back this transfer request as hardware will later + * start processing TRBs starting from this 'STALLED' TRB, + * causing TRBs and requests to be out of sync. + * + * If STALL event shows some bytes were transferred then assume + * it's an actual transfer issue and give back the request. + * In this case mark the TRB as No-Op to avoid hw from using the + * TRB again. + */ + + if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) { + dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n"); + if (remain_length == req->length) { + dev_dbg(dbc->dev, "Spurious stall event, keep req\n"); + req->status = -COMP_STALL_ERROR; + req->actual = 0; + return; + } + dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n"); + trb_to_noop(req->trb); + } + break; + default: dev_err(dbc->dev, "unknown tx error %d\n", comp_code); status = -comp_code; break; } - /* Match the pending request: */ - list_for_each_entry(r, &dep->list_pending, list_pending) { - if (r->trb_dma == event->trans_event.buffer) { - req = r; - break; - } - } - - if (!req) { - dev_warn(dbc->dev, "no matched request\n"); - return; - } - - trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); - ring->num_trbs_free++; req->actual = req->length - remain_length; xhci_dbc_giveback(req, status); @@ -762,7 +821,6 @@ static void inc_evt_deq(struct xhci_ring *ring) static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; - struct dbc_ep *dep; union xhci_trb *evt; u32 ctrl, portsc; bool update_erdp = false; @@ -814,43 +872,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) return EVT_DISC; } - /* Handle endpoint stall event: */ + /* Check and handle changes in endpoint halt status */ ctrl = readl(&dbc->regs->control); - if ((ctrl & DBC_CTRL_HALT_IN_TR) || - (ctrl & DBC_CTRL_HALT_OUT_TR)) { - dev_info(dbc->dev, "DbC Endpoint stall\n"); - dbc->state = DS_STALLED; - - if (ctrl & DBC_CTRL_HALT_IN_TR) { - dep = get_in_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - if (ctrl & DBC_CTRL_HALT_OUT_TR) { - dep = get_out_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - return EVT_DONE; - } + handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR); + handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR); /* Clear DbC run change bit: */ if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { writel(ctrl, &dbc->regs->control); ctrl = readl(&dbc->regs->control); } - break; - case DS_STALLED: - ctrl = readl(&dbc->regs->control); - if (!(ctrl & DBC_CTRL_HALT_IN_TR) && - !(ctrl & DBC_CTRL_HALT_OUT_TR) && - (ctrl & DBC_CTRL_DBC_RUN)) { - dbc->state = DS_CONFIGURED; - break; - } - - return EVT_DONE; default: dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); break; @@ -939,7 +971,6 @@ static const char * const dbc_state_strings[DS_MAX] = { [DS_ENABLED] = "enabled", [DS_CONNECTED] = "connected", [DS_CONFIGURED] = "configured", - [DS_STALLED] = "stalled", }; static ssize_t dbc_show(struct device *dev, diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 0118c6288a3cce..8ec813b6e9fda6 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -81,7 +81,6 @@ enum dbc_state { DS_ENABLED, DS_CONNECTED, DS_CONFIGURED, - DS_STALLED, DS_MAX }; @@ -90,6 +89,7 @@ struct dbc_ep { struct list_head list_pending; struct xhci_ring *ring; unsigned int direction:1; + unsigned int halted:1; }; #define DBC_QUEUE_SIZE 16 @@ -110,7 +110,6 @@ struct dbc_port { struct tasklet_struct push; struct list_head write_pool; - struct kfifo write_fifo; bool registered; }; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index b74e98e9439326..b8e78867e25a54 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -24,19 +24,6 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) return dbc->priv; } -static unsigned int -dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size) -{ - unsigned int len; - - len = kfifo_len(&port->write_fifo); - if (len < size) - size = len; - if (size != 0) - size = kfifo_out(&port->write_fifo, packet, size); - return size; -} - static int dbc_start_tx(struct dbc_port *port) __releases(&port->port_lock) __acquires(&port->port_lock) @@ -49,7 +36,7 @@ static int dbc_start_tx(struct dbc_port *port) while (!list_empty(pool)) { req = list_entry(pool->next, struct dbc_request, list_pool); - len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET); + len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET); if (len == 0) break; do_tty_wake = true; @@ -216,7 +203,7 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, spin_lock_irqsave(&port->port_lock, flags); if (count) - count = kfifo_in(&port->write_fifo, buf, count); + count = kfifo_in(&port->port.xmit_fifo, buf, count); dbc_start_tx(port); spin_unlock_irqrestore(&port->port_lock, flags); @@ -230,7 +217,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) int status; spin_lock_irqsave(&port->port_lock, flags); - status = kfifo_put(&port->write_fifo, ch); + status = kfifo_put(&port->port.xmit_fifo, ch); spin_unlock_irqrestore(&port->port_lock, flags); return status; @@ -253,7 +240,7 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty) unsigned int room; spin_lock_irqsave(&port->port_lock, flags); - room = kfifo_avail(&port->write_fifo); + room = kfifo_avail(&port->port.xmit_fifo); spin_unlock_irqrestore(&port->port_lock, flags); return room; @@ -266,7 +253,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty) unsigned int chars; spin_lock_irqsave(&port->port_lock, flags); - chars = kfifo_len(&port->write_fifo); + chars = kfifo_len(&port->port.xmit_fifo); spin_unlock_irqrestore(&port->port_lock, flags); return chars; @@ -346,7 +333,7 @@ static void dbc_rx_push(struct tasklet_struct *t) port->n_read = 0; } - list_move(&req->list_pool, &port->read_pool); + list_move_tail(&req->list_pool, &port->read_pool); } if (do_push) @@ -424,7 +411,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) goto err_idr; } - ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL); + ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE, + GFP_KERNEL); if (ret) goto err_exit_port; @@ -453,7 +441,7 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) xhci_dbc_free_requests(&port->read_pool); xhci_dbc_free_requests(&port->write_pool); err_free_fifo: - kfifo_free(&port->write_fifo); + kfifo_free(&port->port.xmit_fifo); err_exit_port: idr_remove(&dbc_tty_minors, port->minor); err_idr: @@ -478,7 +466,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) idr_remove(&dbc_tty_minors, port->minor); mutex_unlock(&dbc_tty_minors_lock); - kfifo_free(&port->write_fifo); + kfifo_free(&port->port.xmit_fifo); xhci_dbc_free_requests(&port->read_pool); xhci_dbc_free_requests(&port->read_queue); xhci_dbc_free_requests(&port->write_pool); diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 96eb36a5873826..67ecf7320c6239 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -42,6 +42,7 @@ #define XHCI_EXT_CAPS_DEBUG 10 /* Vendor caps */ #define XHCI_EXT_CAPS_VENDOR_INTEL 192 +#define XHCI_EXT_CAPS_INTEL_SPR_SHADOW 206 /* USB Legacy Support Capability - section 7.1.1 */ #define XHCI_HC_BIOS_OWNED (1 << 16) #define XHCI_HC_OS_OWNED (1 << 24) @@ -64,6 +65,10 @@ #define XHCI_HLC (1 << 19) #define XHCI_BLC (1 << 20) +/* Intel SPR shadow capability */ +#define XHCI_INTEL_SPR_ESS_PORT_OFFSET 0x8ac4 /* SuperSpeed port control */ +#define XHCI_INTEL_SPR_TUNEN BIT(4) /* Tunnel mode enabled */ + /* command register values to disable interrupts and halt the HC */ /* start/stop HC execution - do not write unless HC is halted*/ #define XHCI_CMD_RUN (1 << 0) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 61f083de6e1967..8d774f19271e6a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -10,7 +10,7 @@ #include -#include +#include #include #include "xhci.h" @@ -752,6 +752,42 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) return xhci_reset(xhci, XHCI_RESET_SHORT_USEC); } +/** + * xhci_port_is_tunneled() - Check if USB3 connection is tunneled over USB4 + * @xhci: xhci host controller + * @port: USB3 port to be checked. + * + * Some hosts can detect if a USB3 connection is native USB3 or tunneled over + * USB4. Intel hosts expose this via vendor specific extended capability 206 + * eSS PORT registers TUNEN (tunnel enabled) bit. + * + * A USB3 device must be connected to the port to detect the tunnel. + * + * Return: link tunnel mode enum, USB_LINK_UNKNOWN if host is incapable of + * detecting USB3 over USB4 tunnels. USB_LINK_NATIVE or USB_LINK_TUNNELED + * otherwise. + */ +enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, + struct xhci_port *port) +{ + void __iomem *base; + u32 offset; + + base = &xhci->cap_regs->hc_capbase; + offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW); + + if (offset && offset <= XHCI_INTEL_SPR_ESS_PORT_OFFSET) { + offset = XHCI_INTEL_SPR_ESS_PORT_OFFSET + port->hcd_portnum * 0x20; + + if (readl(base + offset) & XHCI_INTEL_SPR_TUNEN) + return USB_LINK_TUNNELED; + else + return USB_LINK_NATIVE; + } + + return USB_LINK_UNKNOWN; +} + void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, u32 link_state) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 937ce5fd58095b..d2900197a49e74 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2332,7 +2332,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, } struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs) +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, + u32 imod_interval) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_interrupter *ir; @@ -2365,6 +2366,11 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs) return NULL; } + err = xhci_set_interrupter_moderation(ir, imod_interval); + if (err) + xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n", + i, imod_interval); + xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", i, xhci->max_interrupters); diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 247cc7c2ce7009..65fc9319d5e70f 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include "xhci.h" #include "xhci-trace.h" @@ -50,6 +50,8 @@ #define RENESAS_RETRY 10000 #define RENESAS_DELAY 10 +#define RENESAS_FW_NAME "renesas_usb_fw.mem" + static int renesas_fw_download_image(struct pci_dev *dev, const u32 *fw, size_t step, bool rom) { @@ -573,12 +575,10 @@ static int renesas_load_fw(struct pci_dev *pdev, const struct firmware *fw) return err; } -int renesas_xhci_check_request_fw(struct pci_dev *pdev, - const struct pci_device_id *id) +static int renesas_xhci_check_request_fw(struct pci_dev *pdev, + const struct pci_device_id *id) { - struct xhci_driver_data *driver_data = - (struct xhci_driver_data *)id->driver_data; - const char *fw_name = driver_data->firmware; + const char fw_name[] = RENESAS_FW_NAME; const struct firmware *fw; bool has_rom; int err; @@ -625,7 +625,41 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev, release_firmware(fw); return err; } -EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw); -MODULE_DESCRIPTION("Support for Renesas xHCI controller with firmware"); +static int +xhci_pci_renesas_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int retval; + + retval = renesas_xhci_check_request_fw(dev, id); + if (retval) + return retval; + + return xhci_pci_common_probe(dev, id); +} + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) }, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) }, + { /* end: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver xhci_renesas_pci_driver = { + .name = "xhci-pci-renesas", + .id_table = pci_ids, + + .probe = xhci_pci_renesas_probe, + .remove = xhci_pci_remove, + + .shutdown = usb_hcd_pci_shutdown, + .driver = { + .pm = pm_ptr(&usb_hcd_pci_pm_ops), + }, +}; +module_pci_driver(xhci_renesas_pci_driver); + +MODULE_DESCRIPTION("Renesas xHCI PCI Host Controller Driver"); +MODULE_FIRMWARE(RENESAS_FW_NAME); +MODULE_IMPORT_NS(xhci); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index dc1e345ab67ea8..91dccd25a55142 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -55,6 +55,9 @@ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 +#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 + /* Thunderbolt */ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5 @@ -78,6 +81,9 @@ #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 +#define PCI_DEVICE_ID_CADENCE 0x17CD +#define PCI_DEVICE_ID_CADENCE_SSP 0x0200 + static const char hcd_name[] = "xhci_hcd"; static struct hc_driver __read_mostly xhci_pci_hc_driver; @@ -93,6 +99,10 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { .update_hub_device = xhci_pci_update_hub_device, }; +/* + * Primary Legacy and MSI IRQ are synced in suspend_common(). + * All MSI-X IRQs and secondary MSI IRQs should be synced here. + */ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); @@ -105,13 +115,12 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) } } -/* Free any IRQs and disable MSI-X */ +/* Legacy IRQ is freed by usb_remove_hcd() or usb_hcd_pci_shutdown() */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - /* return if using legacy interrupt */ if (hcd->irq > 0) return; @@ -235,15 +244,6 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); - struct xhci_driver_data *driver_data; - const struct pci_device_id *id; - - id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev); - - if (id && id->driver_data) { - driver_data = (struct xhci_driver_data *)id->driver_data; - xhci->quirks |= driver_data->quirks; - } /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && @@ -416,6 +416,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM && + pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) @@ -473,6 +477,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; } + if (pdev->vendor == PCI_DEVICE_ID_CADENCE && + pdev->device == PCI_DEVICE_ID_CADENCE_SSP) + xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; + /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; @@ -534,10 +542,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct xhci_hcd *xhci; struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + u8 sbrn; xhci = hcd_to_xhci(hcd); - if (!xhci->sbrn) - pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn); /* imod_interval is the interrupt moderation value in nanoseconds. */ xhci->imod_interval = 40000; @@ -552,7 +559,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_acpi_rtd3_enable(pdev); - xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); + pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &sbrn); + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int)sbrn); /* Find any debug ports */ return xhci_pci_reinit(xhci, pdev); @@ -572,21 +580,13 @@ static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hd * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. */ -static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; struct xhci_hcd *xhci; struct usb_hcd *hcd; - struct xhci_driver_data *driver_data; struct reset_control *reset; - driver_data = (struct xhci_driver_data *)id->driver_data; - if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) { - retval = renesas_xhci_check_request_fw(dev, id); - if (retval) - return retval; - } - reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); @@ -651,12 +651,30 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pm_runtime_put_noidle(&dev->dev); return retval; } +EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, xhci); -static void xhci_pci_remove(struct pci_dev *dev) +static const struct pci_device_id pci_ids_reject[] = { + /* handled by xhci-pci-renesas */ + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) }, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) }, + { /* end: all zeroes */ } +}; + +static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + if (pci_match_id(pci_ids_reject, dev)) + return -ENODEV; + + return xhci_pci_common_probe(dev, id); +} + +void xhci_pci_remove(struct pci_dev *dev) { struct xhci_hcd *xhci; + bool set_power_d3; xhci = hcd_to_xhci(pci_get_drvdata(dev)); + set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP; xhci->xhc_state |= XHCI_STATE_REMOVING; @@ -669,12 +687,13 @@ static void xhci_pci_remove(struct pci_dev *dev) xhci->shared_hcd = NULL; } + usb_hcd_pci_remove(dev); + /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + if (set_power_d3) pci_set_power_state(dev, PCI_D3hot); - - usb_hcd_pci_remove(dev); } +EXPORT_SYMBOL_NS_GPL(xhci_pci_remove, xhci); /* * In some Intel xHCI controllers, in order to get D3 working, @@ -783,7 +802,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - int retval = 0; reset_control_reset(xhci->reset); @@ -814,8 +832,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); - retval = xhci_resume(xhci, msg); - return retval; + return xhci_resume(xhci, msg); } static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup) @@ -882,19 +899,8 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ -static const struct xhci_driver_data reneses_data = { - .quirks = XHCI_RENESAS_FW_QUIRK, - .firmware = "renesas_usb_fw.mem", -}; - /* PCI driver selection metadata; PCI hotplugging uses this */ static const struct pci_device_id pci_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014), - .driver_data = (unsigned long)&reneses_data, - }, - { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015), - .driver_data = (unsigned long)&reneses_data, - }, /* handle any USB 3.0 xHCI controller */ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0), }, @@ -902,14 +908,6 @@ static const struct pci_device_id pci_ids[] = { }; MODULE_DEVICE_TABLE(pci, pci_ids); -/* - * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't - * load firmware, so don't encumber the xhci-pci driver with it. - */ -#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) -MODULE_FIRMWARE("renesas_usb_fw.mem"); -#endif - /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver xhci_pci_driver = { .name = hcd_name, diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h index cb9a8f331a4463..e87c7d9d76b8e2 100644 --- a/drivers/usb/host/xhci-pci.h +++ b/drivers/usb/host/xhci-pci.h @@ -4,22 +4,7 @@ #ifndef XHCI_PCI_H #define XHCI_PCI_H -#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) -int renesas_xhci_check_request_fw(struct pci_dev *dev, - const struct pci_device_id *id); - -#else -static int renesas_xhci_check_request_fw(struct pci_dev *dev, - const struct pci_device_id *id) -{ - return 0; -} - -#endif - -struct xhci_driver_data { - u64 quirks; - const char *firmware; -}; +int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id); +void xhci_pci_remove(struct pci_dev *dev); #endif diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 31bdfa52eeb252..ecaa75718e5926 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -259,6 +259,12 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk")) xhci->quirks |= XHCI_WRITE_64_HI_LO; + if (device_property_read_bool(tmpdev, "xhci-missing-cas-quirk")) + xhci->quirks |= XHCI_MISSING_CAS; + + if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk")) + xhci->quirks |= XHCI_SKIP_PHY_INIT; + device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 4ea2c3e072a9e3..4d664ba53fe9a4 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1399,6 +1399,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_stream_ctx *ctx = &ep->stream_info->stream_ctx_array[stream_id]; deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + + /* + * Cadence xHCI controllers store some endpoint state + * information within Rsvd0 fields of Stream Endpoint + * context. This field is not cleared during Set TR + * Dequeue Pointer command which causes XDMA to skip + * over transfer ring and leads to data loss on stream + * pipe. + * To fix this issue driver must clear Rsvd0 field. + */ + if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { + ctx->reserved[0] = 0; + ctx->reserved[1] = 0; + } } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; } @@ -2521,9 +2535,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->status = 0; break; case COMP_SHORT_PACKET: - xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", - td->urb->ep->desc.bEndpointAddress, - requested, remaining); td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: @@ -2764,35 +2775,25 @@ static int handle_tx_event(struct xhci_hcd *xhci, return 0; } - do { - /* This TRB should be in the TD at the head of this ring's - * TD list. + if (list_empty(&ep_ring->td_list)) { + /* + * Don't print wanings if ring is empty due to a stopped endpoint generating an + * extra completion event if the device was suspended. Or, a event for the last TRB + * of a short TD we already got a short event for. The short TD is already removed + * from the TD list. */ - if (list_empty(&ep_ring->td_list)) { - /* - * Don't print wanings if it's due to a stopped endpoint - * generating an extra completion event if the device - * was suspended. Or, a event for the last TRB of a - * short TD we already got a short event for. - * The short TD is already removed from the TD list. - */ - - if (!(trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID || - ep_ring->last_td_was_short)) { - xhci_warn(xhci, "WARN Event TRB for slot %u ep %d with no TDs queued?\n", - slot_id, ep_index); - } - if (ep->skip) { - ep->skip = false; - xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", - slot_id, ep_index); - } - - td = NULL; - goto check_endpoint_halted; + if (trb_comp_code != COMP_STOPPED && + trb_comp_code != COMP_STOPPED_LENGTH_INVALID && + !ep_ring->last_td_was_short) { + xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n", + slot_id, ep_index); } + ep->skip = false; + goto check_endpoint_halted; + } + + do { td = list_first_entry(&ep_ring->td_list, struct xhci_td, td_list); @@ -2803,7 +2804,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { skip_isoc_td(xhci, td, ep, status); - continue; + if (!list_empty(&ep_ring->td_list)) + continue; + + xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n", + slot_id, ep_index); + ep->skip = false; + td = NULL; + goto check_endpoint_halted; } /* @@ -3941,10 +3949,6 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, start_frame_id = (start_frame_id >> 3) & 0x7ff; end_frame_id = (end_frame_id >> 3) & 0x7ff; - xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", - __func__, index, readl(&xhci->run_regs->microframe_index), - start_frame_id, end_frame_id, start_frame); - if (start_frame_id < end_frame_id) { if (start_frame > end_frame_id || start_frame < start_frame_id) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index efdf4c228b8c0a..899c0effb5d3c1 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -347,8 +347,8 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir) } /* interrupt moderation interval imod_interval in nanoseconds */ -static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, - u32 imod_interval) +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval) { u32 imod; @@ -4525,6 +4525,20 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_port *port; u32 capability; + /* Check if USB3 device at root port is tunneled over USB4 */ + if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + + udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); + if (udev->tunnel_mode == USB_LINK_UNKNOWN) + dev_dbg(&udev->dev, "link tunnel state unknown\n"); + else if (udev->tunnel_mode == USB_LINK_TUNNELED) + dev_dbg(&udev->dev, "tunneled over USB4 link\n"); + else if (udev->tunnel_mode == USB_LINK_NATIVE) + dev_dbg(&udev->dev, "native USB 3.x link\n"); + return 0; + } + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) return 0; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ebd0afd59a60be..620502de971a43 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1498,15 +1498,10 @@ struct xhci_hcd { spinlock_t lock; /* packed release number */ - u8 sbrn; u16 hci_version; - u8 max_slots; u16 max_interrupters; - u8 max_ports; - u8 isoc_threshold; /* imod_interval in ns (I * 250ns) */ u32 imod_interval; - int event_ring_max; /* 4KB min, 128MB max */ int page_size; /* Valid values are 12 to 20, inclusive */ @@ -1616,7 +1611,7 @@ struct xhci_hcd { #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) -#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36) +/* Reserved. It was XHCI_RENESAS_FW_QUIRK */ #define XHCI_SKIP_PHY_INIT BIT_ULL(37) #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) @@ -1628,6 +1623,7 @@ struct xhci_hcd { #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) +#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1831,7 +1827,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs); +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, + u32 imod_interval); void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir); @@ -1871,6 +1868,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); @@ -1904,10 +1903,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type); int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 slot_id); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_td *td); -void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, @@ -1929,7 +1924,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); - +enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, + struct xhci_port *port); void xhci_hc_died(struct xhci_hcd *xhci); #ifdef CONFIG_PM diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c index 0e5e4cb74c876f..add2d2e3b61bfd 100644 --- a/drivers/usb/isp1760/isp1760-hcd.c +++ b/drivers/usb/isp1760/isp1760-hcd.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include "isp1760-core.h" diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index c8098e9b432e13..62b5a30edc4267 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb) case ACD_BTN_BRIGHT_UP: case ACD_BTN_BRIGHT_DOWN: pdata->button_pressed = 1; - schedule_delayed_work(&pdata->work, 0); + /* + * there is a window during which no device + * is registered + */ + if (pdata->bd ) + schedule_delayed_work(&pdata->work, 0); break; case ACD_BTN_NONE: default: @@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct backlight_properties props; + struct backlight_device *backlight; struct appledisplay *pdata; struct usb_device *udev = interface_to_usbdev(iface); struct usb_endpoint_descriptor *endpoint; @@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface, memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 0xff; - pdata->bd = backlight_device_register(bl_name, NULL, pdata, + backlight = backlight_device_register(bl_name, NULL, pdata, &appledisplay_bl_data, &props); - if (IS_ERR(pdata->bd)) { + if (IS_ERR(backlight)) { dev_err(&iface->dev, "Backlight registration failed\n"); - retval = PTR_ERR(pdata->bd); + retval = PTR_ERR(backlight); goto error; } + pdata->bd = backlight; /* Try to get brightness */ brightness = appledisplay_bl_get_brightness(pdata->bd); diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c index 2b2019c19cdeb1..1ce885e4184c2e 100644 --- a/drivers/usb/misc/brcmstb-usb-pinmap.c +++ b/drivers/usb/misc/brcmstb-usb-pinmap.c @@ -335,6 +335,7 @@ static const struct of_device_id brcmstb_usb_pinmap_of_match[] = { { .compatible = "brcm,usb-pinmap" }, { }, }; +MODULE_DEVICE_TABLE(of, brcmstb_usb_pinmap_of_match); static struct platform_driver brcmstb_usb_pinmap_driver = { .driver = { diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index cecd7693b7413c..75f5a740cba397 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, address, data, iobuf, CYPRESS_MAX_REQSIZE, USB_CTRL_GET_TIMEOUT); + /* we must not process garbage */ + if (retval < 2) + goto err_buf; /* store returned data (more READs to be added) */ switch (request) { @@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request, break; } +err_buf: kfree(iobuf); error: return retval; diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 7cbef74dfc9a95..f392d6f84df9c3 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -627,7 +627,6 @@ static const struct file_operations ld_usb_fops = { .open = ld_usb_open, .release = ld_usb_release, .poll = ld_usb_poll, - .llseek = no_llseek, }; /* diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index 56710e6b165315..560591e02d6a4e 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,17 @@ #include "onboard_usb_dev.h" +/* USB5744 register offset and mask */ +#define USB5744_CMD_ATTACH 0xAA +#define USB5744_CMD_ATTACH_LSB 0x56 +#define USB5744_CMD_CREG_ACCESS 0x99 +#define USB5744_CMD_CREG_ACCESS_LSB 0x37 +#define USB5744_CREG_MEM_ADDR 0x00 +#define USB5744_CREG_WRITE 0x00 +#define USB5744_CREG_RUNTIMEFLAGS2 0x41 +#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D +#define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3) + static void onboard_dev_attach_usb_driver(struct work_struct *work); static struct usb_device_driver onboard_dev_usbdev_driver; @@ -98,6 +110,7 @@ static int onboard_dev_power_on(struct onboard_dev *onboard_dev) fsleep(onboard_dev->pdata->reset_us); gpiod_set_value_cansleep(onboard_dev->reset_gpio, 0); + fsleep(onboard_dev->pdata->power_on_delay_us); onboard_dev->is_powered_on = true; @@ -296,10 +309,50 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work) pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err)); } +static int onboard_dev_5744_i2c_init(struct i2c_client *client) +{ +#if IS_ENABLED(CONFIG_I2C) + struct device *dev = &client->dev; + int ret; + + /* + * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled + * and ready to respond to SMBus runtime commands. + * The command writes 5 bytes to memory and single data byte in + * configuration register. + */ + char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5, + USB5744_CREG_WRITE, 1, + USB5744_CREG_RUNTIMEFLAGS2, + USB5744_CREG_RUNTIMEFLAGS2_LSB, + USB5744_CREG_BYPASS_UDC_SUSPEND}; + + ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf); + if (ret) + return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n"); + + ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS, + USB5744_CMD_CREG_ACCESS_LSB); + if (ret) + return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n"); + + /* Send SMBus command to boot hub. */ + ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH, + USB5744_CMD_ATTACH_LSB); + if (ret < 0) + return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n"); + + return ret; +#else + return -ENODEV; +#endif +} + static int onboard_dev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct onboard_dev *onboard_dev; + struct device_node *i2c_node; int err; onboard_dev = devm_kzalloc(dev, sizeof(*onboard_dev), GFP_KERNEL); @@ -339,6 +392,27 @@ static int onboard_dev_probe(struct platform_device *pdev) if (err) return err; + i2c_node = of_parse_phandle(pdev->dev.of_node, "i2c-bus", 0); + if (i2c_node) { + struct i2c_client *client; + + client = of_find_i2c_device_by_node(i2c_node); + of_node_put(i2c_node); + + if (!client) { + err = -EPROBE_DEFER; + goto err_power_off; + } + + if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") || + of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) + err = onboard_dev_5744_i2c_init(client); + + put_device(&client->dev); + if (err < 0) + goto err_power_off; + } + /* * The USB driver might have been detached from the USB devices by * onboard_dev_remove() (e.g. through an 'unbind' by userspace), @@ -350,6 +424,10 @@ static int onboard_dev_probe(struct platform_device *pdev) schedule_work(&attach_usb_driver_work); return 0; + +err_power_off: + onboard_dev_power_off(onboard_dev); + return err; } static void onboard_dev_remove(struct platform_device *pdev) diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h index fbba549c0f47bf..317b3eb99c0255 100644 --- a/drivers/usb/misc/onboard_usb_dev.h +++ b/drivers/usb/misc/onboard_usb_dev.h @@ -10,6 +10,7 @@ struct onboard_dev_pdata { unsigned long reset_us; /* reset pulse width in us */ + unsigned long power_on_delay_us; /* power on delay in us */ unsigned int num_supplies; /* number of supplies */ const char * const supply_names[MAX_SUPPLIES]; bool is_hub; @@ -24,6 +25,7 @@ static const struct onboard_dev_pdata microchip_usb424_data = { static const struct onboard_dev_pdata microchip_usb5744_data = { .reset_us = 0, + .power_on_delay_us = 10000, .num_supplies = 2, .supply_names = { "vdd", "vdd2" }, .is_hub = true, diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c index 26e9b8749d8ab1..19906301a4eb8a 100644 --- a/drivers/usb/misc/qcom_eud.c +++ b/drivers/usb/misc/qcom_eud.c @@ -232,7 +232,7 @@ static void eud_remove(struct platform_device *pdev) } static const struct of_device_id eud_dt_match[] = { - { .compatible = "qcom,sc7280-eud" }, + { .compatible = "qcom,eud" }, { } }; MODULE_DEVICE_TABLE(of, eud_dt_match); diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c index 1a8d5e80b9aec2..01ceafc4ab78ce 100644 --- a/drivers/usb/misc/usb-ljca.c +++ b/drivers/usb/misc/usb-ljca.c @@ -18,7 +18,7 @@ #include #include -#include +#include /* command flags */ #define LJCA_ACK_FLAG BIT(0) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 4745a320eae453..4a9859e03f6b4c 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -404,7 +404,6 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, struct usb_yurex *dev; int len = 0; char in_buffer[MAX_S64_STRLEN]; - unsigned long flags; dev = file->private_data; @@ -419,9 +418,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, return -EIO; } - spin_lock_irqsave(&dev->lock, flags); + spin_lock_irq(&dev->lock); scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu); - spin_unlock_irqrestore(&dev->lock, flags); + spin_unlock_irq(&dev->lock); mutex_unlock(&dev->io_mutex); return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); @@ -511,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, __func__, retval); goto error; } - if (set && timeout) + if (set && timeout) { + spin_lock_irq(&dev->lock); dev->bbu = c2; + spin_unlock_irq(&dev->lock); + } return timeout ? count : -EIO; error: diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 4e30de4db1c0a8..afb71c18415dd1 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1289,7 +1289,6 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) static const struct file_operations mon_fops_binary = { .owner = THIS_MODULE, .open = mon_bin_open, - .llseek = no_llseek, .read = mon_bin_read, /* .write = mon_text_write, */ .poll = mon_bin_poll, diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c index 3c23805ab1a444..398e02af6a2bd8 100644 --- a/drivers/usb/mon/mon_stat.c +++ b/drivers/usb/mon/mon_stat.c @@ -62,7 +62,6 @@ static int mon_stat_release(struct inode *inode, struct file *file) const struct file_operations mon_fops_stat = { .owner = THIS_MODULE, .open = mon_stat_open, - .llseek = no_llseek, .read = mon_stat_read, /* .write = mon_stat_write, */ /* .poll = mon_stat_poll, */ diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 2fe9b95bac1d57..68b9b2b411899f 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -685,7 +685,6 @@ static int mon_text_release(struct inode *inode, struct file *file) static const struct file_operations mon_fops_text_t = { .owner = THIS_MODULE, .open = mon_text_open, - .llseek = no_llseek, .read = mon_text_read_t, .release = mon_text_release, }; @@ -693,7 +692,6 @@ static const struct file_operations mon_fops_text_t = { static const struct file_operations mon_fops_text_u = { .owner = THIS_MODULE, .open = mon_text_open, - .llseek = no_llseek, .read = mon_text_read_u, .release = mon_text_release, }; diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c index 0a35aab3ab815f..63c86c046b9865 100644 --- a/drivers/usb/musb/mediatek.c +++ b/drivers/usb/musb/mediatek.c @@ -416,10 +416,9 @@ static int mtk_musb_probe(struct platform_device *pdev) return -ENOMEM; ret = of_platform_populate(np, NULL, NULL, dev); - if (ret) { - dev_err(dev, "failed to create child devices at %p\n", np); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "failed to create child devices at %p\n", np); ret = mtk_musb_clks_get(glue); if (ret) @@ -448,23 +447,19 @@ static int mtk_musb_probe(struct platform_device *pdev) glue->role = USB_ROLE_NONE; break; default: - dev_err(&pdev->dev, "Error 'dr_mode' property\n"); - return -EINVAL; + return dev_err_probe(&pdev->dev, -EINVAL, + "Error 'dr_mode' property\n"); } glue->phy = devm_of_phy_get_by_index(dev, np, 0); - if (IS_ERR(glue->phy)) { - dev_err(dev, "fail to getting phy %ld\n", - PTR_ERR(glue->phy)); - return PTR_ERR(glue->phy); - } + if (IS_ERR(glue->phy)) + return dev_err_probe(dev, PTR_ERR(glue->phy), + "fail to getting phy\n"); glue->usb_phy = usb_phy_generic_register(); - if (IS_ERR(glue->usb_phy)) { - dev_err(dev, "fail to registering usb-phy %ld\n", - PTR_ERR(glue->usb_phy)); - return PTR_ERR(glue->usb_phy); - } + if (IS_ERR(glue->usb_phy)) + return dev_err_probe(dev, PTR_ERR(glue->usb_phy), + "fail to registering usb-phy\n"); glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR(glue->xceiv)) { diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c index 29c7e5cdb230e0..00e13214aa766c 100644 --- a/drivers/usb/musb/mpfs.c +++ b/drivers/usb/musb/mpfs.c @@ -49,30 +49,6 @@ static const struct musb_hdrc_config mpfs_musb_hdrc_config = { .ram_bits = MPFS_MUSB_RAM_BITS, }; -static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci) -{ - unsigned long flags; - irqreturn_t ret = IRQ_NONE; - struct musb *musb = __hci; - - spin_lock_irqsave(&musb->lock, flags); - - musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); - musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); - musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); - - if (musb->int_usb || musb->int_tx || musb->int_rx) { - musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb); - musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx); - musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx); - ret = musb_interrupt(musb); - } - - spin_unlock_irqrestore(&musb->lock, flags); - - return ret; -} - static void mpfs_musb_set_vbus(struct musb *musb, int is_on) { u8 devctl; @@ -111,6 +87,129 @@ static void mpfs_musb_set_vbus(struct musb *musb, int is_on) musb_readb(musb->mregs, MUSB_DEVCTL)); } +#define POLL_SECONDS 2 + +static void otg_timer(struct timer_list *t) +{ + struct musb *musb = from_timer(musb, t, dev_timer); + void __iomem *mregs = musb->mregs; + u8 devctl; + unsigned long flags; + + /* + * We poll because PolarFire SoC won't expose several OTG-critical + * status change events (from the transceiver) otherwise. + */ + devctl = musb_readb(mregs, MUSB_DEVCTL); + dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, + usb_otg_state_string(musb->xceiv->otg->state)); + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv->otg->state) { + case OTG_STATE_A_WAIT_BCON: + devctl &= ~MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->otg->state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); + } else { + musb->xceiv->otg->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + break; + case OTG_STATE_A_WAIT_VFALL: + if (devctl & MUSB_DEVCTL_VBUS) { + mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); + break; + } + musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; + break; + case OTG_STATE_B_IDLE: + /* + * There's no ID-changed IRQ, so we have no good way to tell + * when to switch to the A-Default state machine (by setting + * the DEVCTL.Session bit). + * + * Workaround: whenever we're in B_IDLE, try setting the + * session flag every few seconds. If it works, ID was + * grounded and we're now in the A-Default state machine. + * + * NOTE: setting the session flag is _supposed_ to trigger + * SRP but clearly it doesn't. + */ + musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); + else + musb->xceiv->otg->state = OTG_STATE_A_IDLE; + break; + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + +static void __maybe_unused mpfs_musb_try_idle(struct musb *musb, unsigned long timeout) +{ + static unsigned long last_timer; + + if (timeout == 0) + timeout = jiffies + msecs_to_jiffies(3); + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || (musb->a_wait_bcon == 0 && + musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { + dev_dbg(musb->controller, "%s active, deleting timer\n", + usb_otg_state_string(musb->xceiv->otg->state)); + del_timer(&musb->dev_timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) { + dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); + return; + } + last_timer = timeout; + + dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", + usb_otg_state_string(musb->xceiv->otg->state), + jiffies_to_msecs(timeout - jiffies)); + mod_timer(&musb->dev_timer, timeout); +} + +static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci) +{ + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + struct musb *musb = __hci; + + spin_lock_irqsave(&musb->lock, flags); + + musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); + musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); + musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); + + if (musb->int_usb || musb->int_tx || musb->int_rx) { + musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb); + musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx); + musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx); + ret = musb_interrupt(musb); + } + + /* Poll for ID change */ + if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) + mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ); + + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + static int mpfs_musb_init(struct musb *musb) { struct device *dev = musb->controller; @@ -121,6 +220,8 @@ static int mpfs_musb_init(struct musb *musb) return PTR_ERR(musb->xceiv); } + timer_setup(&musb->dev_timer, otg_timer, 0); + musb->dyn_fifo = true; musb->isr = mpfs_musb_interrupt; @@ -129,13 +230,24 @@ static int mpfs_musb_init(struct musb *musb) return 0; } +static int mpfs_musb_exit(struct musb *musb) +{ + del_timer_sync(&musb->dev_timer); + + return 0; +} + static const struct musb_platform_ops mpfs_ops = { .quirks = MUSB_DMA_INVENTRA, .init = mpfs_musb_init, + .exit = mpfs_musb_exit, .fifo_mode = 2, #ifdef CONFIG_USB_INVENTRA_DMA .dma_init = musbhs_dma_controller_create, .dma_exit = musbhs_dma_controller_destroy, +#endif +#ifndef CONFIG_USB_MUSB_HOST + .try_idle = mpfs_musb_try_idle, #endif .set_vbus = mpfs_musb_set_vbus }; diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 2b2164e028b318..ce6f25a9650b3b 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include "musb_core.h" diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index 1ebbf189a53509..c5c6b818998eea 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include "phy-fsl-usb.h" diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c index 817c242a76ca06..5428b2b67de1fc 100644 --- a/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -374,6 +374,7 @@ static const struct of_device_id gpio_vbus_of_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, gpio_vbus_of_match); static struct platform_driver gpio_vbus_driver = { .driver = { diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 920a32cd094d6f..cc4156c1b148ee 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -18,6 +18,7 @@ #include #include #include +#include #define DRIVER_NAME "mxs_phy" @@ -70,6 +71,9 @@ #define BM_USBPHY_PLL_EN_USB_CLKS BIT(6) /* Anatop Registers */ +#define ANADIG_REG_1P1_SET 0x114 +#define ANADIG_REG_1P1_CLR 0x118 + #define ANADIG_ANA_MISC0 0x150 #define ANADIG_ANA_MISC0_SET 0x154 #define ANADIG_ANA_MISC0_CLR 0x158 @@ -117,6 +121,14 @@ #define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29) #define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28) +/* System Integration Module (SIM) Registers */ +#define SIM_GPR1 0x30 + +#define USB_PHY_VLLS_WAKEUP_EN BIT(0) + +#define BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG BIT(18) +#define BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP BIT(19) + #define to_mxs_phy(p) container_of((p), struct mxs_phy, phy) /* Do disconnection between PHY and controller without vbus */ @@ -149,6 +161,15 @@ #define MXS_PHY_TX_D_CAL_MIN 79 #define MXS_PHY_TX_D_CAL_MAX 119 +/* + * At imx6q/6sl/6sx, the PHY2's clock is controlled by hardware directly, + * eg, according to PHY's suspend status. In these PHYs, we only need to + * open the clock at the initialization and close it at its shutdown routine. + * These PHYs can send resume signal without software interfere if not + * gate clock. + */ +#define MXS_PHY_HARDWARE_CONTROL_PHY2_CLK BIT(4) + struct mxs_phy_data { unsigned int flags; }; @@ -160,12 +181,14 @@ static const struct mxs_phy_data imx23_phy_data = { static const struct mxs_phy_data imx6q_phy_data = { .flags = MXS_PHY_SENDING_SOF_TOO_FAST | MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | - MXS_PHY_NEED_IP_FIX, + MXS_PHY_NEED_IP_FIX | + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK, }; static const struct mxs_phy_data imx6sl_phy_data = { .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | - MXS_PHY_NEED_IP_FIX, + MXS_PHY_NEED_IP_FIX | + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK, }; static const struct mxs_phy_data vf610_phy_data = { @@ -174,11 +197,13 @@ static const struct mxs_phy_data vf610_phy_data = { }; static const struct mxs_phy_data imx6sx_phy_data = { - .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, + .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK, }; static const struct mxs_phy_data imx6ul_phy_data = { - .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, + .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK, }; static const struct mxs_phy_data imx7ulp_phy_data = { @@ -201,9 +226,11 @@ struct mxs_phy { struct clk *clk; const struct mxs_phy_data *data; struct regmap *regmap_anatop; + struct regmap *regmap_sim; int port_id; u32 tx_reg_set; u32 tx_reg_mask; + struct regulator *phy_3p0; }; static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy) @@ -221,6 +248,11 @@ static inline bool is_imx7ulp_phy(struct mxs_phy *mxs_phy) return mxs_phy->data == &imx7ulp_phy_data; } +static inline bool is_imx6ul_phy(struct mxs_phy *mxs_phy) +{ + return mxs_phy->data == &imx6ul_phy_data; +} + /* * PHY needs some 32K cycles to switch from 32K clock to * bus (such as AHB/AXI, etc) clock. @@ -288,6 +320,16 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy) if (ret) goto disable_pll; + if (mxs_phy->phy_3p0) { + ret = regulator_enable(mxs_phy->phy_3p0); + if (ret) { + dev_err(mxs_phy->phy.dev, + "Failed to enable 3p0 regulator, ret=%d\n", + ret); + return ret; + } + } + /* Power up the PHY */ writel(0, base + HW_USBPHY_PWD); @@ -448,6 +490,9 @@ static void mxs_phy_shutdown(struct usb_phy *phy) if (is_imx7ulp_phy(mxs_phy)) mxs_phy_pll_enable(phy->io_priv, false); + if (mxs_phy->phy_3p0) + regulator_disable(mxs_phy->phy_3p0); + clk_disable_unprepare(mxs_phy->clk); } @@ -503,12 +548,19 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend) } writel(BM_USBPHY_CTRL_CLKGATE, x->io_priv + HW_USBPHY_CTRL_SET); - clk_disable_unprepare(mxs_phy->clk); + if (!(mxs_phy->port_id == 1 && + (mxs_phy->data->flags & + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK))) + clk_disable_unprepare(mxs_phy->clk); } else { mxs_phy_clock_switch_delay(); - ret = clk_prepare_enable(mxs_phy->clk); - if (ret) - return ret; + if (!(mxs_phy->port_id == 1 && + (mxs_phy->data->flags & + MXS_PHY_HARDWARE_CONTROL_PHY2_CLK))) { + ret = clk_prepare_enable(mxs_phy->clk); + if (ret) + return ret; + } writel(BM_USBPHY_CTRL_CLKGATE, x->io_priv + HW_USBPHY_CTRL_CLR); writel(0, x->io_priv + HW_USBPHY_PWD); @@ -738,6 +790,17 @@ static int mxs_phy_probe(struct platform_device *pdev) } } + /* Currently, only imx7ulp has SIM module */ + if (of_get_property(np, "nxp,sim", NULL)) { + mxs_phy->regmap_sim = syscon_regmap_lookup_by_phandle + (np, "nxp,sim"); + if (IS_ERR(mxs_phy->regmap_sim)) { + dev_dbg(&pdev->dev, + "failed to find regmap for sim\n"); + return PTR_ERR(mxs_phy->regmap_sim); + } + } + /* Precompute which bits of the TX register are to be updated, if any */ if (!of_property_read_u32(np, "fsl,tx-cal-45-dn-ohms", &val) && val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) { @@ -789,6 +852,17 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->clk = clk; mxs_phy->data = of_device_get_match_data(&pdev->dev); + mxs_phy->phy_3p0 = devm_regulator_get(&pdev->dev, "phy-3p0"); + if (PTR_ERR(mxs_phy->phy_3p0) == -ENODEV) + /* not exist */ + mxs_phy->phy_3p0 = NULL; + else if (IS_ERR(mxs_phy->phy_3p0)) + return dev_err_probe(&pdev->dev, PTR_ERR(mxs_phy->phy_3p0), + "Getting regulator error\n"); + + if (mxs_phy->phy_3p0) + regulator_set_voltage(mxs_phy->phy_3p0, 3200000, 3200000); + platform_set_drvdata(pdev, mxs_phy); device_set_wakeup_capable(&pdev->dev, true); @@ -804,28 +878,58 @@ static void mxs_phy_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP +static void mxs_phy_wakeup_enable(struct mxs_phy *mxs_phy, bool on) +{ + u32 mask = USB_PHY_VLLS_WAKEUP_EN; + + /* If the SoCs don't have SIM, quit */ + if (!mxs_phy->regmap_sim) + return; + + if (on) { + regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, mask); + udelay(500); + } else { + regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, 0); + } +} + static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on) { - unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR; + unsigned int reg; + u32 value; /* If the SoCs don't have anatop, quit */ if (!mxs_phy->regmap_anatop) return; - if (is_imx6q_phy(mxs_phy)) + if (is_imx6q_phy(mxs_phy)) { + reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR; regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG); - else if (is_imx6sl_phy(mxs_phy)) + } else if (is_imx6sl_phy(mxs_phy)) { + reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR; regmap_write(mxs_phy->regmap_anatop, reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL); + } else if (is_imx6ul_phy(mxs_phy)) { + reg = on ? ANADIG_REG_1P1_SET : ANADIG_REG_1P1_CLR; + value = BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG | + BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP; + if (mxs_phy_get_vbus_status(mxs_phy) && on) + regmap_write(mxs_phy->regmap_anatop, reg, value); + else if (!on) + regmap_write(mxs_phy->regmap_anatop, reg, value); + } } static int mxs_phy_system_suspend(struct device *dev) { struct mxs_phy *mxs_phy = dev_get_drvdata(dev); - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev)) { mxs_phy_enable_ldo_in_suspend(mxs_phy, true); + mxs_phy_wakeup_enable(mxs_phy, true); + } return 0; } @@ -834,8 +938,10 @@ static int mxs_phy_system_resume(struct device *dev) { struct mxs_phy *mxs_phy = dev_get_drvdata(dev); - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev)) { mxs_phy_enable_ldo_in_suspend(mxs_phy, false); + mxs_phy_wakeup_enable(mxs_phy, false); + } return 0; } diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index d7aa913ceb8a0b..c58a12c147f451 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,7 @@ static const struct class role_class = { struct usb_role_switch { struct device dev; + struct lock_class_key key; struct mutex lock; /* device lock*/ struct module *module; /* the module this device depends on */ enum usb_role role; @@ -326,6 +328,8 @@ static void usb_role_switch_release(struct device *dev) { struct usb_role_switch *sw = to_role_switch(dev); + mutex_destroy(&sw->lock); + lockdep_unregister_key(&sw->key); kfree(sw); } @@ -364,7 +368,8 @@ usb_role_switch_register(struct device *parent, if (!sw) return ERR_PTR(-ENOMEM); - mutex_init(&sw->lock); + lockdep_register_key(&sw->key); + mutex_init_with_key(&sw->lock, &sw->key); sw->allow_userspace_control = desc->allow_userspace_control; sw->usb2_port = desc->usb2_port; diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c index a1df686c306648..2a76f1f0ee4f83 100644 --- a/drivers/usb/serial/aircable.c +++ b/drivers/usb/serial/aircable.c @@ -35,7 +35,7 @@ * */ -#include +#include #include #include #include @@ -138,7 +138,6 @@ static void aircable_process_read_urb(struct urb *urb) static struct usb_serial_driver aircable_device = { .driver = { - .owner = THIS_MODULE, .name = "aircable", }, .id_table = id_table, diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 67a07cc007f09a..800b04fe37fa47 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -599,7 +599,6 @@ static void ark3116_process_read_urb(struct urb *urb) static struct usb_serial_driver ark3116_device = { .driver = { - .owner = THIS_MODULE, .name = "ark3116", }, .id_table = id_table, diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c index cf47ee4ae5d3bd..44f5b58beec922 100644 --- a/drivers/usb/serial/belkin_sa.c +++ b/drivers/usb/serial/belkin_sa.c @@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(usb, id_table); /* All of the device info needed for the serial converters */ static struct usb_serial_driver belkin_device = { .driver = { - .owner = THIS_MODULE, .name = "belkin", }, .description = "Belkin / Peracom / GoHubs USB Serial Adapter", diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 0870c6533f8012..d10e4c4848a0ab 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #define DEFAULT_BAUD_RATE 9600 #define DEFAULT_TIMEOUT 1000 @@ -837,7 +837,6 @@ static int ch341_reset_resume(struct usb_serial *serial) static struct usb_serial_driver ch341_device = { .driver = { - .owner = THIS_MODULE, .name = "ch341-uart", }, .id_table = id_table, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 21fd26609252be..c24101f0a07ad1 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -299,7 +299,6 @@ struct cp210x_port_private { static struct usb_serial_driver cp210x_device = { .driver = { - .owner = THIS_MODULE, .name = "cp210x", }, .id_table = id_table, diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index 51e5aac3bf4c5c..76dd8e7453b5db 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -67,7 +67,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver cyberjack_device = { .driver = { - .owner = THIS_MODULE, .name = "cyberjack", }, .description = "Reiner SCT Cyberjack USB card reader", diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 1e0c028c5ec95f..e29569d65991b3 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include "cypress_m8.h" @@ -139,7 +139,6 @@ static void cypress_write_int_callback(struct urb *urb); static struct usb_serial_driver cypress_earthmate_device = { .driver = { - .owner = THIS_MODULE, .name = "earthmate", }, .description = "DeLorme Earthmate USB", @@ -166,7 +165,6 @@ static struct usb_serial_driver cypress_earthmate_device = { static struct usb_serial_driver cypress_hidcom_device = { .driver = { - .owner = THIS_MODULE, .name = "cyphidcom", }, .description = "HID->COM RS232 Adapter", @@ -192,7 +190,6 @@ static struct usb_serial_driver cypress_hidcom_device = { static struct usb_serial_driver cypress_ca42v2_device = { .driver = { - .owner = THIS_MODULE, .name = "nokiaca42v2", }, .description = "Nokia CA-42 V2 Adapter", diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index d1dea38505762c..a064859654121d 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -262,7 +262,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_serial_driver digi_acceleport_2_device = { .driver = { - .owner = THIS_MODULE, .name = "digi_2", }, .description = "Digi 2 port USB adapter", @@ -293,7 +292,6 @@ static struct usb_serial_driver digi_acceleport_2_device = { static struct usb_serial_driver digi_acceleport_4_device = { .driver = { - .owner = THIS_MODULE, .name = "digi_4", }, .description = "Digi 4 port USB adapter", diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c index 405e835e93dd93..aedcf7ebd26938 100644 --- a/drivers/usb/serial/empeg.c +++ b/drivers/usb/serial/empeg.c @@ -43,7 +43,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver empeg_device = { .driver = { - .owner = THIS_MODULE, .name = "empeg", }, .id_table = id_table, diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 5f7a46bcace65c..530b77fc2f78d8 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -967,7 +967,6 @@ static int f81232_resume(struct usb_serial *serial) static struct usb_serial_driver f81232_device = { .driver = { - .owner = THIS_MODULE, .name = "f81232", }, .id_table = f81232_id_table, @@ -994,7 +993,6 @@ static struct usb_serial_driver f81232_device = { static struct usb_serial_driver f81534a_device = { .driver = { - .owner = THIS_MODULE, .name = "f81534a", }, .id_table = f81534a_id_table, diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index ef126cd3d94fba..685930ac83833c 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -1538,7 +1538,6 @@ static int f81534_resume(struct usb_serial *serial) static struct usb_serial_driver f81534_device = { .driver = { - .owner = THIS_MODULE, .name = "f81534", }, .description = DRIVER_DESC, diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76a04ab411006d..c6f17d732b9581 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2871,7 +2871,6 @@ static int ftdi_ioctl(struct tty_struct *tty, static struct usb_serial_driver ftdi_device = { .driver = { - .owner = THIS_MODULE, .name = "ftdi_sio", .dev_groups = ftdi_groups, }, diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 6d6ec7eed87c92..b97ba8ed6801dc 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1412,7 +1412,6 @@ static void garmin_port_remove(struct usb_serial_port *port) /* All of the device info needed */ static struct usb_serial_driver garmin_device = { .driver = { - .owner = THIS_MODULE, .name = "garmin_gps", }, .description = "Garmin GPS usb/tty", diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 15b6dee3a8e503..757f0a586ddb1c 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -63,7 +63,6 @@ static int usb_serial_generic_calc_num_ports(struct usb_serial *serial, static struct usb_serial_driver usb_serial_generic_device = { .driver = { - .owner = THIS_MODULE, .name = "generic", }, .id_table = generic_device_ids, diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index abe4bbb0ac654f..c7d6b5e3f8982b 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2978,7 +2978,6 @@ static void edge_port_remove(struct usb_serial_port *port) static struct usb_serial_driver edgeport_2port_device = { .driver = { - .owner = THIS_MODULE, .name = "edgeport_2", }, .description = "Edgeport 2 port adapter", @@ -3013,7 +3012,6 @@ static struct usb_serial_driver edgeport_2port_device = { static struct usb_serial_driver edgeport_4port_device = { .driver = { - .owner = THIS_MODULE, .name = "edgeport_4", }, .description = "Edgeport 4 port adapter", @@ -3048,7 +3046,6 @@ static struct usb_serial_driver edgeport_4port_device = { static struct usb_serial_driver edgeport_8port_device = { .driver = { - .owner = THIS_MODULE, .name = "edgeport_8", }, .description = "Edgeport 8 port adapter", @@ -3083,7 +3080,6 @@ static struct usb_serial_driver edgeport_8port_device = { static struct usb_serial_driver epic_device = { .driver = { - .owner = THIS_MODULE, .name = "epic", }, .description = "EPiC device", diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 7a3a6e53945652..7d0584b2a23473 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -2670,7 +2670,6 @@ static int edge_resume(struct usb_serial *serial) static struct usb_serial_driver edgeport_1port_device = { .driver = { - .owner = THIS_MODULE, .name = "edgeport_ti_1", }, .description = "Edgeport TI 1 port adapter", @@ -2708,7 +2707,6 @@ static struct usb_serial_driver edgeport_1port_device = { static struct usb_serial_driver edgeport_2port_device = { .driver = { - .owner = THIS_MODULE, .name = "edgeport_ti_2", }, .description = "Edgeport TI 2 port adapter", diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index e11441bac44f82..3c6a9b9b9c2b93 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -496,7 +496,6 @@ MODULE_DEVICE_TABLE(usb, ipaq_id_table); /* All of the device info needed for the Compaq iPAQ */ static struct usb_serial_driver ipaq_device = { .driver = { - .owner = THIS_MODULE, .name = "ipaq", }, .description = "PocketPC PDA", diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c index d04c7cc5c1c289..b1e672c2f42379 100644 --- a/drivers/usb/serial/ipw.c +++ b/drivers/usb/serial/ipw.c @@ -285,7 +285,6 @@ static void ipw_close(struct usb_serial_port *port) static struct usb_serial_driver ipw_device = { .driver = { - .owner = THIS_MODULE, .name = "ipw", }, .description = "IPWireless converter", diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 82f108134e6fad..a106b71e698f4e 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -71,7 +71,6 @@ MODULE_DEVICE_TABLE(usb, ir_id_table); static struct usb_serial_driver ir_device = { .driver = { - .owner = THIS_MODULE, .name = "ir-usb", }, .description = "IR Dongle", diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 77cba71bcccb5d..c21dcc9b6f0593 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -1157,7 +1157,6 @@ static int iuu_remove_sysfs_attrs(struct usb_serial_port *port) static struct usb_serial_driver iuu_device = { .driver = { - .owner = THIS_MODULE, .name = "iuu_phoenix", }, .id_table = id_table, diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 0a783985197c3d..9129e0282c246e 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -3001,7 +3001,6 @@ static void keyspan_port_remove(struct usb_serial_port *port) /* Structs for the devices, pre and post renumeration. */ static struct usb_serial_driver keyspan_pre_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_no_firm", }, .description = "Keyspan - (without firmware)", @@ -3012,7 +3011,6 @@ static struct usb_serial_driver keyspan_pre_device = { static struct usb_serial_driver keyspan_1port_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_1", }, .description = "Keyspan 1 port adapter", @@ -3036,7 +3034,6 @@ static struct usb_serial_driver keyspan_1port_device = { static struct usb_serial_driver keyspan_2port_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_2", }, .description = "Keyspan 2 port adapter", @@ -3060,7 +3057,6 @@ static struct usb_serial_driver keyspan_2port_device = { static struct usb_serial_driver keyspan_4port_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_4", }, .description = "Keyspan 4 port adapter", diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 0eef358b314a31..e98b479593d371 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -676,7 +676,6 @@ static void keyspan_pda_port_remove(struct usb_serial_port *port) static struct usb_serial_driver keyspan_pda_fake_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_pda_pre", }, .description = "Keyspan PDA - (prerenumeration)", @@ -687,7 +686,6 @@ static struct usb_serial_driver keyspan_pda_fake_device = { static struct usb_serial_driver keyspan_pda_device = { .driver = { - .owner = THIS_MODULE, .name = "keyspan_pda", }, .description = "Keyspan PDA", diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 394b3189e003a1..d36155b6d2bfaa 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include "kl5kusb105.h" @@ -75,7 +75,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver kl5kusb105d_device = { .driver = { - .owner = THIS_MODULE, .name = "kl5kusb105d", }, .description = "KL5KUSB105D / PalmConnect", diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 5e775f68fcb857..464433be203443 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -77,7 +77,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver kobil_device = { .driver = { - .owner = THIS_MODULE, .name = "kobil", }, .description = "KOBIL USB smart card terminal", @@ -156,8 +155,7 @@ static void kobil_init_termios(struct tty_struct *tty) { /* Default to echo off and other sane device settings */ tty->termios.c_lflag = 0; - tty->termios.c_iflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE); - tty->termios.c_iflag |= IGNBRK | IGNPAR | IXOFF; + tty->termios.c_iflag = IGNBRK | IGNPAR | IXOFF; /* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */ tty->termios.c_oflag &= ~ONLCR; } diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 6570c8817a8045..2bce8cc03aca2e 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include @@ -69,7 +69,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver mct_u232_device = { .driver = { - .owner = THIS_MODULE, .name = "mct_u232", }, .description = "MCT U232", diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index 30ab565e073885..02887829290192 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c @@ -341,7 +341,6 @@ static void metrousb_unthrottle(struct tty_struct *tty) static struct usb_serial_driver metrousb_device = { .driver = { - .owner = THIS_MODULE, .name = "metro-usb", }, .description = "Metrologic USB to Serial", diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 23544074eb1c3d..e59bfa7c803007 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1724,7 +1724,6 @@ static void mos7720_port_remove(struct usb_serial_port *port) static struct usb_serial_driver moschip7720_2port_driver = { .driver = { - .owner = THIS_MODULE, .name = "moschip7720", }, .description = "Moschip 2 port adapter", diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 85697466b14768..ca3da79afd23f9 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1782,7 +1782,6 @@ static int mos7840_resume(struct usb_serial *serial) static struct usb_serial_driver moschip7840_4port_device = { .driver = { - .owner = THIS_MODULE, .name = "mos7840", }, .description = DRIVER_DESC, diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c index 942cb01534232e..ad5fdf55a02e18 100644 --- a/drivers/usb/serial/mxuport.c +++ b/drivers/usb/serial/mxuport.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include /* Definitions for the vendor ID and device ID */ #define MX_USBSERIAL_VID 0x110A @@ -1278,7 +1278,6 @@ static int mxuport_resume(struct usb_serial *serial) static struct usb_serial_driver mxuport_device = { .driver = { - .owner = THIS_MODULE, .name = "mxuport", }, .description = "MOXA UPort", diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index 82791fd67c468e..4d57a59022920c 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c @@ -95,7 +95,6 @@ static int navman_write(struct tty_struct *tty, struct usb_serial_port *port, static struct usb_serial_driver navman_device = { .driver = { - .owner = THIS_MODULE, .name = "navman", }, .id_table = id_table, diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 41f1b872d277b4..397ebd5a3e7478 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -49,7 +49,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver zyxel_omninet_device = { .driver = { - .owner = THIS_MODULE, .name = "omninet", }, .description = "ZyXEL - omni.net usb", diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index e31a6d77da3a22..1ee84ccc4bbdc1 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -375,7 +375,6 @@ static void opticon_port_remove(struct usb_serial_port *port) static struct usb_serial_driver opticon_device = { .driver = { - .owner = THIS_MODULE, .name = "opticon", }, .id_table = id_table, diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 176f38750ad589..eb0731992ca909 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -2381,7 +2381,6 @@ MODULE_DEVICE_TABLE(usb, option_ids); static struct usb_serial_driver option_1port_device = { .driver = { - .owner = THIS_MODULE, .name = "option1", }, .description = "GSM modem (1-port)", diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index fa07f6ff9ecc84..24068368892c9e 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -138,7 +138,6 @@ static void oti6858_port_remove(struct usb_serial_port *port); /* device info */ static struct usb_serial_driver oti6858_device = { .driver = { - .owner = THIS_MODULE, .name = "oti6858", }, .id_table = id_table, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index d93f5d58455782..ad41363e3cea5a 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "pl2303.h" @@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) }, + { USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -1234,7 +1235,6 @@ static void pl2303_process_read_urb(struct urb *urb) static struct usb_serial_driver pl2303_device = { .driver = { - .owner = THIS_MODULE, .name = "pl2303", }, .id_table = id_table, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 732f9b13ad5d59..d60eda7f6edaf8 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -171,3 +171,7 @@ /* Allied Telesis VT-Kit3 */ #define AT_VENDOR_ID 0x0caa #define AT_VTKIT3_PRODUCT_ID 0x3001 + +/* Macrosilicon MS3020 */ +#define MACROSILICON_VENDOR_ID 0x345f +#define MACROSILICON_MS3020_PRODUCT_ID 0x3020 diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 015bb7c5d19d3b..485ec5b0712265 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -72,7 +72,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver qcaux_device = { .driver = { - .owner = THIS_MODULE, .name = "qcaux", }, .id_table = id_table, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 703a9c56355731..c7de9585feb238 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -454,7 +454,6 @@ static void qc_release(struct usb_serial *serial) static struct usb_serial_driver qcdevice = { .driver = { - .owner = THIS_MODULE, .name = "qcserial", }, .description = "Qualcomm USB modem", diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 821f25e52ec24c..a317bdbd00ad5c 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -9,7 +9,7 @@ * */ -#include +#include #include #include #include @@ -924,7 +924,6 @@ static int qt2_write(struct tty_struct *tty, static struct usb_serial_driver qt2_device = { .driver = { - .owner = THIS_MODULE, .name = "quatech-serial", }, .description = DRIVER_DESC, diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 6accbecb6318bb..238b54993446cf 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -284,7 +284,6 @@ static int safe_startup(struct usb_serial *serial) static struct usb_serial_driver safe_device = { .driver = { - .owner = THIS_MODULE, .name = "safe_serial", }, .id_table = id_table, diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 353b2549eaa8a7..64a2e0bb5723ed 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -1021,7 +1021,6 @@ static int sierra_resume(struct usb_serial *serial) static struct usb_serial_driver sierra_device = { .driver = { - .owner = THIS_MODULE, .name = "sierra", }, .description = "Sierra USB modem", diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 6b294bf8bc435f..11077beb7232a7 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -452,7 +452,6 @@ static int spcp8x5_tiocmget(struct tty_struct *tty) static struct usb_serial_driver spcp8x5_device = { .driver = { - .owner = THIS_MODULE, .name = "SPCP8x5", }, .id_table = id_table, diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 1e1888b66305d8..df21009bdf42f2 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -500,7 +500,6 @@ static void ssu100_process_read_urb(struct urb *urb) static struct usb_serial_driver ssu100_device = { .driver = { - .owner = THIS_MODULE, .name = "ssu100", }, .description = DRIVER_DESC, diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c index 9aabb087f733cb..58962bcbf9babf 100644 --- a/drivers/usb/serial/symbolserial.c +++ b/drivers/usb/serial/symbolserial.c @@ -169,7 +169,6 @@ static void symbol_port_remove(struct usb_serial_port *port) static struct usb_serial_driver symbol_device = { .driver = { - .owner = THIS_MODULE, .name = "symbol", }, .id_table = id_table, diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 0fba25abf671ca..a0c244bc77c094 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -417,7 +417,6 @@ static const struct usb_device_id ti_id_table_combined[] = { static struct usb_serial_driver ti_1port_device = { .driver = { - .owner = THIS_MODULE, .name = "ti_usb_3410_5052_1", }, .description = "TI USB 3410 1 port adapter", @@ -450,7 +449,6 @@ static struct usb_serial_driver ti_1port_device = { static struct usb_serial_driver ti_2port_device = { .driver = { - .owner = THIS_MODULE, .name = "ti_usb_3410_5052_2", }, .description = "TI USB 5052 2 port adapter", diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c index 46952182e04f79..15a17bf111f1fc 100644 --- a/drivers/usb/serial/upd78f0730.c +++ b/drivers/usb/serial/upd78f0730.c @@ -407,7 +407,6 @@ static void upd78f0730_close(struct usb_serial_port *port) static struct usb_serial_driver upd78f0730_device = { .driver = { - .owner = THIS_MODULE, .name = "upd78f0730", }, .id_table = id_table, diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 82f4f0b992aaa7..2c12449ff60c51 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -24,7 +24,6 @@ static const struct usb_device_id vendor##_id_table[] = { \ }; \ static struct usb_serial_driver vendor##_device = { \ .driver = { \ - .owner = THIS_MODULE, \ .name = #vendor, \ }, \ .id_table = vendor##_id_table, \ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index f1e91eb7f8a413..df6a2ae0bf4246 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1459,17 +1459,18 @@ static void usb_serial_deregister(struct usb_serial_driver *device) } /** - * usb_serial_register_drivers - register drivers for a usb-serial module + * __usb_serial_register_drivers - register drivers for a usb-serial module * @serial_drivers: NULL-terminated array of pointers to drivers to be registered + * @owner: owning module * @name: name of the usb_driver for this set of @serial_drivers * @id_table: list of all devices this @serial_drivers set binds to * * Registers all the drivers in the @serial_drivers array, and dynamically * creates a struct usb_driver with the name @name and id_table of @id_table. */ -int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], - const char *name, - const struct usb_device_id *id_table) +int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], + struct module *owner, const char *name, + const struct usb_device_id *id_table) { int rc; struct usb_driver *udriver; @@ -1514,6 +1515,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] for (sd = serial_drivers; *sd; ++sd) { (*sd)->usb_driver = udriver; + (*sd)->driver.owner = owner; rc = usb_serial_register(*sd); if (rc) goto err_deregister_drivers; @@ -1532,7 +1534,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] kfree(udriver); return rc; } -EXPORT_SYMBOL_GPL(usb_serial_register_drivers); +EXPORT_SYMBOL_GPL(__usb_serial_register_drivers); /** * usb_serial_deregister_drivers - deregister drivers for a usb-serial module diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 61a8425b7762a4..ec9fff794b36cf 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -83,7 +83,6 @@ static void usb_debug_init_termios(struct tty_struct *tty) static struct usb_serial_driver debug_device = { .driver = { - .owner = THIS_MODULE, .name = "debug", }, .id_table = id_table, @@ -96,7 +95,6 @@ static struct usb_serial_driver debug_device = { static struct usb_serial_driver dbc_device = { .driver = { - .owner = THIS_MODULE, .name = "xhci_dbc", }, .id_table = dbc_id_table, diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 4412834db21c5a..062a38fe0c1cc6 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined); and Palm 4.0 devices */ static struct usb_serial_driver handspring_device = { .driver = { - .owner = THIS_MODULE, .name = "visor", }, .description = "Handspring Visor / Palm OS", @@ -180,7 +179,6 @@ static struct usb_serial_driver handspring_device = { /* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */ static struct usb_serial_driver clie_5_device = { .driver = { - .owner = THIS_MODULE, .name = "clie_5", }, .description = "Sony Clie 5.0", @@ -200,7 +198,6 @@ static struct usb_serial_driver clie_5_device = { /* device info for the Sony Clie OS version 3.5 */ static struct usb_serial_driver clie_3_5_device = { .driver = { - .owner = THIS_MODULE, .name = "clie_3.5", }, .description = "Sony Clie 3.5", diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index ca48e90a8e81fa..009faeb2ef556b 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -91,7 +91,6 @@ static int whiteheat_break_ctl(struct tty_struct *tty, int break_state); static struct usb_serial_driver whiteheat_fake_device = { .driver = { - .owner = THIS_MODULE, .name = "whiteheatnofirm", }, .description = "Connect Tech - WhiteHEAT - (prerenumeration)", @@ -103,7 +102,6 @@ static struct usb_serial_driver whiteheat_fake_device = { static struct usb_serial_driver whiteheat_device = { .driver = { - .owner = THIS_MODULE, .name = "whiteheat", }, .description = "Connect Tech - WhiteHEAT", diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c index ff4092f9b33c7e..670d573f6b6346 100644 --- a/drivers/usb/serial/wishbone-serial.c +++ b/drivers/usb/serial/wishbone-serial.c @@ -70,7 +70,6 @@ static void wishbone_serial_close(struct usb_serial_port *port) static struct usb_serial_driver wishbone_serial_device = { .driver = { - .owner = THIS_MODULE, .name = "wishbone_serial", }, .id_table = id_table, diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c index 1d9a12628f8112..4186e791b38416 100644 --- a/drivers/usb/serial/xr_serial.c +++ b/drivers/usb/serial/xr_serial.c @@ -1082,7 +1082,6 @@ MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver xr_device = { .driver = { - .owner = THIS_MODULE, .name = "xr_serial", }, .id_table = id_table, diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c index cf262c9a963823..382b3698c1d5fd 100644 --- a/drivers/usb/serial/xsens_mt.c +++ b/drivers/usb/serial/xsens_mt.c @@ -49,7 +49,6 @@ static int xsens_mt_probe(struct usb_serial *serial, static struct usb_serial_driver xsens_mt_device = { .driver = { - .owner = THIS_MODULE, .name = "xsens_mt", }, .id_table = id_table, diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 40d34cc28344a4..a9d3c58ce7d935 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -132,7 +132,7 @@ static int init_alauda(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id alauda_usb_ids[] = { +static const struct usb_device_id alauda_usb_ids[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; @@ -154,7 +154,7 @@ MODULE_DEVICE_TABLE(usb, alauda_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev alauda_unusual_dev_list[] = { +static const struct us_unusual_dev alauda_unusual_dev_list[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c index 98b3ec352a1386..30dfd0082474c3 100644 --- a/drivers/usb/storage/cypress_atacb.c +++ b/drivers/usb/storage/cypress_atacb.c @@ -33,7 +33,7 @@ MODULE_IMPORT_NS(USB_STORAGE); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id cypress_usb_ids[] = { +static const struct usb_device_id cypress_usb_ids[] = { # include "unusual_cypress.h" { } /* Terminating entry */ }; @@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(usb, cypress_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev cypress_unusual_dev_list[] = { +static const struct us_unusual_dev cypress_unusual_dev_list[] = { # include "unusual_cypress.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c index bcc4a2fad863be..3ea5601d16b8b7 100644 --- a/drivers/usb/storage/datafab.c +++ b/drivers/usb/storage/datafab.c @@ -80,7 +80,7 @@ static int datafab_determine_lun(struct us_data *us, { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id datafab_usb_ids[] = { +static const struct usb_device_id datafab_usb_ids[] = { # include "unusual_datafab.h" { } /* Terminating entry */ }; @@ -102,7 +102,7 @@ MODULE_DEVICE_TABLE(usb, datafab_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev datafab_unusual_dev_list[] = { +static const struct us_unusual_dev datafab_unusual_dev_list[] = { # include "unusual_datafab.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 97c66c0d91f4d6..a4bfbecbf16c33 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -43,7 +43,7 @@ MODULE_FIRMWARE(MS_RW_FIRMWARE); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)} -static struct usb_device_id ene_ub6250_usb_ids[] = { +static const struct usb_device_id ene_ub6250_usb_ids[] = { # include "unusual_ene_ub6250.h" { } /* Terminating entry */ }; @@ -65,7 +65,7 @@ MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { +static const struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { # include "unusual_ene_ub6250.h" { } /* Terminating entry */ }; @@ -1484,7 +1484,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) { u32 bl_num; - u16 bl_len; + u32 bl_len; unsigned int offset = 0; unsigned char buf[8]; struct scatterlist *sg = NULL; diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c index c3ce51c2dabde3..cab27ba7a32a65 100644 --- a/drivers/usb/storage/freecom.c +++ b/drivers/usb/storage/freecom.c @@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id freecom_usb_ids[] = { +static const struct usb_device_id freecom_usb_ids[] = { # include "unusual_freecom.h" { } /* Terminating entry */ }; @@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(usb, freecom_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev freecom_unusual_dev_list[] = { +static const struct us_unusual_dev freecom_unusual_dev_list[] = { # include "unusual_freecom.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 300aeef160e75c..f2254eb3c0d73f 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -67,7 +67,7 @@ static int isd200_Initialization(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id isd200_usb_ids[] = { +static const struct usb_device_id isd200_usb_ids[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; @@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(usb, isd200_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev isd200_unusual_dev_list[] = { +static const struct us_unusual_dev isd200_unusual_dev_list[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c index 229bf0c1afc91f..0e71a8f33c2b63 100644 --- a/drivers/usb/storage/jumpshot.c +++ b/drivers/usb/storage/jumpshot.c @@ -62,7 +62,7 @@ MODULE_IMPORT_NS(USB_STORAGE); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id jumpshot_usb_ids[] = { +static const struct usb_device_id jumpshot_usb_ids[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; @@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev jumpshot_unusual_dev_list[] = { +static const struct us_unusual_dev jumpshot_unusual_dev_list[] = { # include "unusual_jumpshot.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c index 38ddfedef629ca..d6a5e54f2ca823 100644 --- a/drivers/usb/storage/karma.c +++ b/drivers/usb/storage/karma.c @@ -51,7 +51,7 @@ static int rio_karma_init(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id karma_usb_ids[] = { +static const struct usb_device_id karma_usb_ids[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; @@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(usb, karma_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev karma_unusual_dev_list[] = { +static const struct us_unusual_dev karma_unusual_dev_list[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c index 01f3c2779ccf3e..f97cf6cadb8eb7 100644 --- a/drivers/usb/storage/onetouch.c +++ b/drivers/usb/storage/onetouch.c @@ -55,7 +55,7 @@ struct usb_onetouch { { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id onetouch_usb_ids[] = { +static const struct usb_device_id onetouch_usb_ids[] = { # include "unusual_onetouch.h" { } /* Terminating entry */ }; @@ -77,7 +77,7 @@ MODULE_DEVICE_TABLE(usb, onetouch_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev onetouch_unusual_dev_list[] = { +static const struct us_unusual_dev onetouch_unusual_dev_list[] = { # include "unusual_onetouch.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c index 51bcd4a4369058..03d1b9c69ea18f 100644 --- a/drivers/usb/storage/sddr09.c +++ b/drivers/usb/storage/sddr09.c @@ -63,7 +63,7 @@ static int usb_stor_sddr09_init(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id sddr09_usb_ids[] = { +static const struct usb_device_id sddr09_usb_ids[] = { # include "unusual_sddr09.h" { } /* Terminating entry */ }; @@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(usb, sddr09_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev sddr09_unusual_dev_list[] = { +static const struct us_unusual_dev sddr09_unusual_dev_list[] = { # include "unusual_sddr09.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index 0aa079405d23c2..b8227478a7add2 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c @@ -40,7 +40,7 @@ MODULE_IMPORT_NS(USB_STORAGE); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id sddr55_usb_ids[] = { +static const struct usb_device_id sddr55_usb_ids[] = { # include "unusual_sddr55.h" { } /* Terminating entry */ }; @@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(usb, sddr55_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev sddr55_unusual_dev_list[] = { +static const struct us_unusual_dev sddr55_unusual_dev_list[] = { # include "unusual_sddr55.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c index f0d0ca37163da9..e7c224b7c46463 100644 --- a/drivers/usb/storage/shuttle_usbat.c +++ b/drivers/usb/storage/shuttle_usbat.c @@ -162,7 +162,7 @@ static int init_usbat_flash(struct us_data *us); { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id usbat_usb_ids[] = { +static const struct usb_device_id usbat_usb_ids[] = { # include "unusual_usbat.h" { } /* Terminating entry */ }; @@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(usb, usbat_usb_ids); .initFunction = init_function, \ } -static struct us_unusual_dev usbat_unusual_dev_list[] = { +static const struct us_unusual_dev usbat_unusual_dev_list[] = { # include "unusual_usbat.h" { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index c223b4dc1b199f..03043d567fa1be 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -927,7 +927,7 @@ static const struct scsi_host_template uas_host_template = { { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } -static struct usb_device_id uas_usb_ids[] = { +static const struct usb_device_id uas_usb_ids[] = { # include "unusual_uas.h" { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) }, { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) }, diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c index 5a5bf3532ad7ad..d1e7c487ddfbb5 100644 --- a/drivers/usb/typec/anx7411.c +++ b/drivers/usb/typec/anx7411.c @@ -6,6 +6,7 @@ * Copyright(c) 2022, Analogix Semiconductor. All rights reserved. * */ +#include #include #include #include @@ -884,8 +885,8 @@ static void anx7411_chip_standby(struct anx7411_data *ctx) OCM_RESET); ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80); /* Set TCPC to RD and DRP enable */ - cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT; - cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT; + cc1 = FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD); + cc2 = FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD); ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP | cc1 | cc2); @@ -1339,12 +1340,6 @@ static void anx7411_get_gpio_irq(struct anx7411_data *ctx) dev_err(dev, "failed to get GPIO IRQ\n"); } -static enum power_supply_usb_type anx7411_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_PPS, -}; - static enum power_supply_property anx7411_psy_props[] = { POWER_SUPPLY_PROP_USB_TYPE, POWER_SUPPLY_PROP_ONLINE, @@ -1422,8 +1417,9 @@ static int anx7411_psy_register(struct anx7411_data *ctx) psy_desc->name = psy_name; psy_desc->type = POWER_SUPPLY_TYPE_USB; - psy_desc->usb_types = anx7411_psy_usb_types; - psy_desc->num_usb_types = ARRAY_SIZE(anx7411_psy_usb_types); + psy_desc->usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS); psy_desc->properties = anx7411_psy_props; psy_desc->num_properties = ARRAY_SIZE(anx7411_psy_props); @@ -1576,6 +1572,7 @@ static const struct of_device_id anx_match_table[] = { {.compatible = "analogix,anx7411",}, {}, }; +MODULE_DEVICE_TABLE(of, anx_match_table); static struct i2c_driver anx7411_driver = { .driver = { diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 56989a0d0f43e0..46b4c5c3a6beb3 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -18,8 +19,6 @@ #include #include -#include - #define PMC_USBC_CMD 0xa7 /* Response status bits */ diff --git a/drivers/usb/typec/rt1719.c b/drivers/usb/typec/rt1719.c index be02d420920e57..0b0c23a0b014bd 100644 --- a/drivers/usb/typec/rt1719.c +++ b/drivers/usb/typec/rt1719.c @@ -109,12 +109,6 @@ struct rt1719_data { u16 conn_stat; }; -static const enum power_supply_usb_type rt1719_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_PPS -}; - static const enum power_supply_property rt1719_psy_properties[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_USB_TYPE, @@ -572,8 +566,9 @@ static int devm_rt1719_psy_register(struct rt1719_data *data) data->psy_desc.name = psy_name; data->psy_desc.type = POWER_SUPPLY_TYPE_USB; - data->psy_desc.usb_types = rt1719_psy_usb_types; - data->psy_desc.num_usb_types = ARRAY_SIZE(rt1719_psy_usb_types); + data->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS); data->psy_desc.properties = rt1719_psy_properties; data->psy_desc.num_properties = ARRAY_SIZE(rt1719_psy_properties); data->psy_desc.get_property = rt1719_psy_get_property; diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c index f8504a90da2675..22163d8f9eb07e 100644 --- a/drivers/usb/typec/tcpm/maxim_contaminant.c +++ b/drivers/usb/typec/tcpm/maxim_contaminant.c @@ -5,6 +5,7 @@ * USB-C module to reduce wakeups due to contaminants. */ +#include #include #include #include @@ -45,14 +46,9 @@ enum fladc_select { #define READ1_SLEEP_MS 10 #define READ2_SLEEP_MS 5 -#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val)) - #define IS_CC_OPEN(cc_status) \ - (STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \ - TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \ - TCPC_CC_STATUS_CC2_MASK << \ - TCPC_CC_STATUS_CC2_SHIFT, \ - TCPC_CC_STATE_SRC_OPEN)) + (FIELD_GET(TCPC_CC_STATUS_CC1, cc_status) == TCPC_CC_STATE_SRC_OPEN \ + && FIELD_GET(TCPC_CC_STATUS_CC2, cc_status) == TCPC_CC_STATE_SRC_OPEN) static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel, bool ua_src, u8 fladc) @@ -80,8 +76,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s int ret; /* Channel & scale select */ - ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, - channel << ADC_CHANNEL_OFFSET); + ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL, + FIELD_PREP(ADCINSEL, channel)); if (ret < 0) return ret; @@ -100,7 +96,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s if (ret < 0) return ret; - ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0); + ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL, + FIELD_PREP(ADCINSEL, 0)); if (ret < 0) return ret; @@ -120,13 +117,14 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip, if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 || channel == CC2_SCALE2) { /* Enable 1uA current source */ - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, - ULTRA_LOW_POWER_MODE); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, ULTRA_LOW_POWER_MODE)); if (ret < 0) return ret; /* Enable 1uA current source */ - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL, + FIELD_PREP(CCRPCTRL, UA_1_SRC)); if (ret < 0) return ret; @@ -180,7 +178,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven int ret; /* Enable 80uA source */ - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL, + FIELD_PREP(CCRPCTRL, UA_80_SRC)); if (ret < 0) return ret; @@ -213,7 +212,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven if (ret < 0) return ret; - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL, + FIELD_PREP(CCRPCTRL, 0)); if (ret < 0) return ret; @@ -284,10 +284,11 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) u8 temp; int ret; - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK - | WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT | - CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S << - WTRCYCLE_SHIFT); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, + CCWTRDEB | CCWTRSEL | WTRCYCLE, + FIELD_PREP(CCWTRDEB, CCWTRDEB_1MS) + | FIELD_PREP(CCWTRSEL, CCWTRSEL_1V) + | FIELD_PREP(WTRCYCLE, WTRCYCLE_4_8_S)); if (ret < 0) return ret; @@ -302,8 +303,9 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) if (ret < 0) return ret; - ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK, - ULTRA_LOW_POWER_MODE); + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, + ULTRA_LOW_POWER_MODE)); if (ret < 0) return ret; ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp); @@ -322,11 +324,14 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) return 0; } -bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce) +bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce, + bool *cc_handled) { u8 cc_status, pwr_cntl; int ret; + *cc_handled = true; + ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status); if (ret < 0) return false; @@ -368,9 +373,8 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect return true; } } - return false; } else if (chip->contaminant_state == DETECTED) { - if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) { + if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) { chip->contaminant_state = max_contaminant_detect_contaminant(chip); if (chip->contaminant_state == DETECTED) { max_contaminant_enable_dry_detection(chip); @@ -379,6 +383,7 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect } } + *cc_handled = false; return false; } diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 3e3dcb983ddeab..ed32583829bec2 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -5,6 +5,7 @@ * USB Type-C Port Controller Interface. */ +#include #include #include #include @@ -103,45 +104,45 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc) switch (cc) { case TYPEC_CC_RA: - reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RA) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RA)); break; case TYPEC_CC_RD: - reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD)); break; case TYPEC_CC_RP_DEF: - reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) | - (TCPC_ROLE_CTRL_RP_VAL_DEF << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_DEF)); break; case TYPEC_CC_RP_1_5: - reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) | - (TCPC_ROLE_CTRL_RP_VAL_1_5 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_1_5)); break; case TYPEC_CC_RP_3_0: - reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) | - (TCPC_ROLE_CTRL_RP_VAL_3_0 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_3_0)); break; case TYPEC_CC_OPEN: default: - reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT); + reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN)); break; } if (vconn_pres) { if (polarity == TYPEC_POLARITY_CC2) { - reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT); - reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT); + reg &= ~TCPC_ROLE_CTRL_CC1; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN); } else { - reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT); - reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT); + reg &= ~TCPC_ROLE_CTRL_CC2; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN); } } @@ -167,15 +168,11 @@ static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc, * APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on * disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2. */ - if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >> - TCPC_ROLE_CTRL_CC2_SHIFT) != - ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >> - TCPC_ROLE_CTRL_CC1_SHIFT)) + if (FIELD_GET(TCPC_ROLE_CTRL_CC2, reg) != FIELD_GET(TCPC_ROLE_CTRL_CC1, reg)) return 0; return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ? - TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT : - TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT, + TCPC_ROLE_CTRL_CC2 : TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN); } @@ -200,25 +197,25 @@ static int tcpci_start_toggling(struct tcpc_dev *tcpc, switch (cc) { default: case TYPEC_CC_RP_DEF: - reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_DEF); break; case TYPEC_CC_RP_1_5: - reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_1_5); break; case TYPEC_CC_RP_3_0: - reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_3_0); break; } if (cc == TYPEC_CC_RD) - reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT); + reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD)); else - reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT); + reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)); ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg); if (ret < 0) return ret; @@ -241,12 +238,10 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc, if (ret < 0) return ret; - *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) & - TCPC_CC_STATUS_CC1_MASK, + *cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, reg), reg & TCPC_CC_STATUS_TERM || tcpc_presenting_rd(role_control, CC1)); - *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) & - TCPC_CC_STATUS_CC2_MASK, + *cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, reg), reg & TCPC_CC_STATUS_TERM || tcpc_presenting_rd(role_control, CC2)); @@ -282,28 +277,28 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc, reg = reg & ~TCPC_ROLE_CTRL_DRP; if (polarity == TYPEC_POLARITY_CC2) { - reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT); + reg &= ~TCPC_ROLE_CTRL_CC2; /* Local port is source */ if (cc2 == TYPEC_CC_RD) /* Role control would have the Rp setting when DRP was enabled */ - reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP); else - reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD); } else { - reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT); + reg &= ~TCPC_ROLE_CTRL_CC1; /* Local port is source */ if (cc1 == TYPEC_CC_RD) /* Role control would have the Rp setting when DRP was enabled */ - reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP); else - reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD); } } if (polarity == TYPEC_POLARITY_CC2) - reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN); else - reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT; + reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN); ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg); if (ret < 0) return ret; @@ -461,7 +456,7 @@ static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached, unsigned int reg; int ret; - reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT; + reg = FIELD_PREP(TCPC_MSG_HDR_INFO_REV, PD_REV20); if (role == TYPEC_SOURCE) reg |= TCPC_MSG_HDR_INFO_PWR_ROLE; if (data == TYPEC_HOST) @@ -612,8 +607,11 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type } /* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */ - reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT) - << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT); + reg = FIELD_PREP(TCPC_TRANSMIT_RETRY, + (negotiated_rev > PD_REV20 + ? PD_RETRY_COUNT_3_0_OR_HIGHER + : PD_RETRY_COUNT_DEFAULT)); + reg |= FIELD_PREP(TCPC_TRANSMIT_TYPE, type); ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg); if (ret < 0) return ret; @@ -709,10 +707,13 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) { u16 status; int ret; + int irq_ret; unsigned int raw; tcpci_read16(tcpci, TCPC_ALERT, &status); + irq_ret = status & tcpci->alert_mask; +process_status: /* * Clear alert status for everything except RX_STATUS, which shouldn't * be cleared until we have successfully retrieved message. @@ -785,7 +786,12 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) else if (status & TCPC_ALERT_TX_FAILED) tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED); - return IRQ_RETVAL(status & tcpci->alert_mask); + tcpci_read16(tcpci, TCPC_ALERT, &status); + + if (status & tcpci->alert_mask) + goto process_status; + + return IRQ_RETVAL(irq_ret); } EXPORT_SYMBOL_GPL(tcpci_irq); @@ -917,20 +923,22 @@ static int tcpci_probe(struct i2c_client *client) chip->data.set_orientation = err; - chip->tcpci = tcpci_register_port(&client->dev, &chip->data); - if (IS_ERR(chip->tcpci)) - return PTR_ERR(chip->tcpci); - err = devm_request_threaded_irq(&client->dev, client->irq, NULL, _tcpci_irq, - IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW, + IRQF_SHARED | IRQF_ONESHOT, dev_name(&client->dev), chip); - if (err < 0) { - tcpci_unregister_port(chip->tcpci); + if (err < 0) return err; - } - return 0; + /* + * Disable irq while registering port. If irq is configured as an edge + * irq this allow to keep track and process the irq as soon as it is enabled. + */ + disable_irq(client->irq); + chip->tcpci = tcpci_register_port(&client->dev, &chip->data); + enable_irq(client->irq); + + return PTR_ERR_OR_ZERO(chip->tcpci); } static void tcpci_remove(struct i2c_client *client) diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h index 78ff3b73ee7e3c..76270d5c283880 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.h +++ b/drivers/usb/typec/tcpm/tcpci_maxim.h @@ -20,28 +20,24 @@ #define SBUOVPDIS BIT(7) #define CCOVPDIS BIT(6) #define SBURPCTRL BIT(5) -#define CCLPMODESEL_MASK GENMASK(4, 3) -#define ULTRA_LOW_POWER_MODE BIT(3) -#define CCRPCTRL_MASK GENMASK(2, 0) +#define CCLPMODESEL GENMASK(4, 3) +#define ULTRA_LOW_POWER_MODE 1 +#define CCRPCTRL GENMASK(2, 0) #define UA_1_SRC 1 #define UA_80_SRC 3 #define TCPC_VENDOR_CC_CTRL3 0x8e -#define CCWTRDEB_MASK GENMASK(7, 6) -#define CCWTRDEB_SHIFT 6 +#define CCWTRDEB GENMASK(7, 6) #define CCWTRDEB_1MS 1 -#define CCWTRSEL_MASK GENMASK(5, 3) -#define CCWTRSEL_SHIFT 3 +#define CCWTRSEL GENMASK(5, 3) #define CCWTRSEL_1V 0x4 #define CCLADDERDIS BIT(2) -#define WTRCYCLE_MASK BIT(0) -#define WTRCYCLE_SHIFT 0 +#define WTRCYCLE GENMASK(0, 0) #define WTRCYCLE_2_4_S 0 #define WTRCYCLE_4_8_S 1 #define TCPC_VENDOR_ADC_CTRL1 0x91 -#define ADCINSEL_MASK GENMASK(7, 5) -#define ADC_CHANNEL_OFFSET 5 +#define ADCINSEL GENMASK(7, 5) #define ADCEN BIT(0) enum contamiant_state { @@ -85,6 +81,20 @@ static inline int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8)); } -bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce); +/** + * max_contaminant_is_contaminant - Test if CC was toggled due to contaminant + * + * @chip: Handle to a struct max_tcpci_chip + * @disconnect_while_debounce: Whether the disconnect was detected when CC + * pins were debouncing + * @cc_handled: Returns whether or not update to CC status was handled here + * + * Determine if a contaminant was detected. + * + * Returns: true if a contaminant was detected, false otherwise. cc_handled + * is updated to reflect whether or not further CC handling is required. + */ +bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce, + bool *cc_handled); #endif // TCPCI_MAXIM_H_ diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c index 760e2f92b958dc..fd1b8059336764 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c +++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c @@ -97,11 +97,13 @@ static void max_tcpci_init_regs(struct max_tcpci_chip *chip) if (ret < 0) return; - alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED | - TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS | - TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS | - /* Enable Extended alert for detecting Fast Role Swap Signal */ - TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | TCPC_ALERT_FAULT; + alert_mask = (TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | + TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST | + TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS | + TCPC_ALERT_CC_STATUS | + TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | + TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | + TCPC_ALERT_FAULT); ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask); if (ret < 0) { @@ -191,9 +193,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) * Read complete, clear RX status alert bit. * Clear overflow as well if set. */ - ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ? - TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF : - TCPC_ALERT_RX_STATUS); + ret = max_tcpci_write16(chip, TCPC_ALERT, + TCPC_ALERT_RX_STATUS | (status & TCPC_ALERT_RX_BUF_OVF)); if (ret < 0) return; @@ -295,9 +296,8 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status) * be cleared until we have successfully retrieved message. */ if (status & ~TCPC_ALERT_RX_STATUS) { - mask = status & TCPC_ALERT_RX_BUF_OVF ? - status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) : - status & ~TCPC_ALERT_RX_STATUS; + mask = status & ~(TCPC_ALERT_RX_STATUS + | (status & TCPC_ALERT_RX_BUF_OVF)); ret = max_tcpci_write16(chip, TCPC_ALERT, mask); if (ret < 0) { dev_err(chip->dev, "ALERT clear failed\n"); @@ -357,12 +357,14 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status) tcpm_vbus_change(chip->port); if (status & TCPC_ALERT_CC_STATUS) { + bool cc_handled = false; + if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) { - if (!max_contaminant_is_contaminant(chip, false)) + if (!max_contaminant_is_contaminant(chip, false, &cc_handled)) tcpm_port_clean(chip->port); - } else { - tcpm_cc_change(chip->port); } + if (!cc_handled) + tcpm_cc_change(chip->port); } if (status & TCPC_ALERT_POWER_STATUS) @@ -397,7 +399,7 @@ static irqreturn_t max_tcpci_irq(int irq, void *dev_id) } while (status) { irq_return = _max_tcpci_irq(chip, status); - /* Do not return if the ALERT is already set. */ + /* Do not return if a (new) ALERT is set (again). */ ret = max_tcpci_read16(chip, TCPC_ALERT, &status); if (ret < 0) break; @@ -455,8 +457,9 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data) static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata) { struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata); + bool cc_handled; - if (!max_contaminant_is_contaminant(chip, true)) + if (!max_contaminant_is_contaminant(chip, true, &cc_handled)) tcpm_port_clean(chip->port); } @@ -472,6 +475,11 @@ static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct t return true; } +static void max_tcpci_unregister_tcpci_port(void *tcpci) +{ + tcpci_unregister_port(tcpci); +} + static int max_tcpci_probe(struct i2c_client *client) { int ret; @@ -484,17 +492,17 @@ static int max_tcpci_probe(struct i2c_client *client) chip->client = client; chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config); - if (IS_ERR(chip->data.regmap)) { - dev_err(&client->dev, "Regmap init failed\n"); - return PTR_ERR(chip->data.regmap); - } + if (IS_ERR(chip->data.regmap)) + return dev_err_probe(&client->dev, PTR_ERR(chip->data.regmap), + "Regmap init failed\n"); chip->dev = &client->dev; i2c_set_clientdata(client, chip); ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status); if (ret < 0) - return ret; + return dev_err_probe(&client->dev, ret, + "Failed to read TCPC_POWER_STATUS\n"); /* Chip level tcpci callbacks */ chip->data.set_vbus = max_tcpci_set_vbus; @@ -511,30 +519,25 @@ static int max_tcpci_probe(struct i2c_client *client) max_tcpci_init_regs(chip); chip->tcpci = tcpci_register_port(chip->dev, &chip->data); - if (IS_ERR(chip->tcpci)) { - dev_err(&client->dev, "TCPCI port registration failed\n"); - return PTR_ERR(chip->tcpci); - } + if (IS_ERR(chip->tcpci)) + return dev_err_probe(&client->dev, PTR_ERR(chip->tcpci), + "TCPCI port registration failed\n"); + + ret = devm_add_action_or_reset(&client->dev, + max_tcpci_unregister_tcpci_port, + chip->tcpci); + if (ret) + return ret; + chip->port = tcpci_get_tcpm_port(chip->tcpci); + ret = max_tcpci_init_alert(chip, client); if (ret < 0) - goto unreg_port; + return dev_err_probe(&client->dev, ret, + "IRQ initialization failed\n"); device_init_wakeup(chip->dev, true); return 0; - -unreg_port: - tcpci_unregister_port(chip->tcpci); - - return ret; -} - -static void max_tcpci_remove(struct i2c_client *client) -{ - struct max_tcpci_chip *chip = i2c_get_clientdata(client); - - if (!IS_ERR_OR_NULL(chip->tcpci)) - tcpci_unregister_port(chip->tcpci); } static const struct i2c_device_id max_tcpci_id[] = { @@ -557,7 +560,6 @@ static struct i2c_driver max_tcpci_i2c_driver = { .of_match_table = of_match_ptr(max_tcpci_of_match), }, .probe = max_tcpci_probe, - .remove = max_tcpci_remove, .id_table = max_tcpci_id, }; module_i2c_driver(max_tcpci_i2c_driver); diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 67422d45eb5486..64f6dd0dc66096 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -5,6 +5,7 @@ * Richtek RT1711H Type-C Chip Driver */ +#include #include #include #include @@ -195,12 +196,10 @@ static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status) if (ret < 0) return ret; - cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) & - TCPC_CC_STATUS_CC1_MASK, + cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, status), status & TCPC_CC_STATUS_TERM || tcpc_presenting_rd(role, CC1)); - cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) & - TCPC_CC_STATUS_CC2_MASK, + cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, status), status & TCPC_CC_STATUS_TERM || tcpc_presenting_rd(role, CC2)); @@ -233,25 +232,25 @@ static int rt1711h_start_drp_toggling(struct tcpci *tcpci, switch (cc) { default: case TYPEC_CC_RP_DEF: - reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_DEF); break; case TYPEC_CC_RP_1_5: - reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_1_5); break; case TYPEC_CC_RP_3_0: - reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 << - TCPC_ROLE_CTRL_RP_VAL_SHIFT); + reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL, + TCPC_ROLE_CTRL_RP_VAL_3_0); break; } if (cc == TYPEC_CC_RD) - reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT); + reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD)); else - reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) | - (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT); + reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP) + | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)); ret = rt1711h_write8(chip, TCPC_ROLE_CTRL, reg); if (ret < 0) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4b02d647425910..fc619478200f5d 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -7483,12 +7483,6 @@ static int tcpm_psy_prop_writeable(struct power_supply *psy, } } -static enum power_supply_usb_type tcpm_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_PPS, -}; - static const char *tcpm_psy_name_prefix = "tcpm-source-psy-"; static int devm_tcpm_psy_register(struct tcpm_port *port) @@ -7509,8 +7503,9 @@ static int devm_tcpm_psy_register(struct tcpm_port *port) port_dev_name); port->psy_desc.name = psy_name; port->psy_desc.type = POWER_SUPPLY_TYPE_USB; - port->psy_desc.usb_types = tcpm_psy_usb_types; - port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types); + port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS); port->psy_desc.properties = tcpm_psy_props; port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props); port->psy_desc.get_property = tcpm_psy_get_prop; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index dd51a25480bfb9..7ee721a877c12d 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -150,11 +150,6 @@ static enum power_supply_property tps6598x_psy_props[] = { POWER_SUPPLY_PROP_ONLINE, }; -static enum power_supply_usb_type tps6598x_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, -}; - static const char *tps6598x_psy_name_prefix = "tps6598x-source-psy-"; /* @@ -827,8 +822,8 @@ static int devm_tps6598_psy_register(struct tps6598x *tps) tps->psy_desc.name = psy_name; tps->psy_desc.type = POWER_SUPPLY_TYPE_USB; - tps->psy_desc.usb_types = tps6598x_psy_usb_types; - tps->psy_desc.num_usb_types = ARRAY_SIZE(tps6598x_psy_usb_types); + tps->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD); tps->psy_desc.properties = tps6598x_psy_props; tps->psy_desc.num_properties = ARRAY_SIZE(tps6598x_psy_props); tps->psy_desc.get_property = tps6598x_psy_get_prop; @@ -1465,8 +1460,9 @@ static void tps6598x_remove(struct i2c_client *client) if (!client->irq) cancel_delayed_work_sync(&tps->wq_poll); + else + devm_free_irq(tps->dev, client->irq, tps); - devm_free_irq(tps->dev, client->irq, tps); tps6598x_disconnect(tps, 0); typec_unregister_port(tps->port); usb_role_switch_put(tps->role_sw); diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c index e623d80e177c07..1c631c7855a960 100644 --- a/drivers/usb/typec/ucsi/psy.c +++ b/drivers/usb/typec/ucsi/psy.c @@ -254,12 +254,6 @@ static int ucsi_psy_get_prop(struct power_supply *psy, } } -static enum power_supply_usb_type ucsi_psy_usb_types[] = { - POWER_SUPPLY_USB_TYPE_C, - POWER_SUPPLY_USB_TYPE_PD, - POWER_SUPPLY_USB_TYPE_PD_PPS, -}; - int ucsi_register_port_psy(struct ucsi_connector *con) { struct power_supply_config psy_cfg = {}; @@ -276,8 +270,9 @@ int ucsi_register_port_psy(struct ucsi_connector *con) con->psy_desc.name = psy_name; con->psy_desc.type = POWER_SUPPLY_TYPE_USB; - con->psy_desc.usb_types = ucsi_psy_usb_types; - con->psy_desc.num_usb_types = ARRAY_SIZE(ucsi_psy_usb_types); + con->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) | + BIT(POWER_SUPPLY_USB_TYPE_PD) | + BIT(POWER_SUPPLY_USB_TYPE_PD_PPS); con->psy_desc.properties = ucsi_psy_props; con->psy_desc.num_properties = ARRAY_SIZE(ucsi_psy_props); con->psy_desc.get_property = ucsi_psy_get_prop; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 17155ed17fdf84..e0f3925e401b3d 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -38,6 +38,10 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci) { + /* Ignore bogus data in CCI if busy indicator is set. */ + if (cci & UCSI_CCI_BUSY) + return; + if (UCSI_CCI_CONNECTOR(cci)) ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); @@ -99,24 +103,18 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci, *cci = 0; - /* - * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the - * reads here. - */ - if (ucsi->version <= UCSI_VERSION_1_2) - size = clamp(size, 0, 16); + if (size > UCSI_MAX_DATA_LENGTH(ucsi)) + return -EINVAL; ret = ucsi->ops->sync_control(ucsi, command); - if (ret) - return ret; + if (ucsi->ops->read_cci(ucsi, cci)) + return -EIO; - ret = ucsi->ops->read_cci(ucsi, cci); + if (*cci & UCSI_CCI_BUSY) + return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY; if (ret) return ret; - if (*cci & UCSI_CCI_BUSY) - return -EBUSY; - if (!(*cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO; @@ -148,21 +146,10 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num) int ret; command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num); - ret = ucsi_run_command(ucsi, command, &cci, - &error, sizeof(error), false); - - if (cci & UCSI_CCI_BUSY) { - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false); - - return ret ? ret : -EBUSY; - } - + ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false); if (ret < 0) return ret; - if (cci & UCSI_CCI_ERROR) - return -EIO; - switch (error) { case UCSI_ERROR_INCOMPATIBLE_PARTNER: return -EOPNOTSUPP; @@ -238,9 +225,8 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, mutex_lock(&ucsi->ppm_lock); ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack); - if (cci & UCSI_CCI_BUSY) - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY; - else if (cci & UCSI_CCI_ERROR) + + if (cci & UCSI_CCI_ERROR) ret = ucsi_read_error(ucsi, connector_num); mutex_unlock(&ucsi->ppm_lock); @@ -752,104 +738,66 @@ static struct usb_power_delivery_capabilities *ucsi_get_pd_caps(struct ucsi_conn &pd_caps); } -static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient, - u8 offset, u8 bytes, void *resp) +static int ucsi_get_pd_message(struct ucsi_connector *con, u8 recipient, + size_t bytes, void *data, u8 type) { - struct ucsi *ucsi = con->ucsi; + size_t len = min(bytes, UCSI_MAX_DATA_LENGTH(con->ucsi)); u64 command; + u8 offset; int ret; - command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) | - UCSI_CONNECTOR_NUMBER(con->num); - command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient); - command |= UCSI_GET_PD_MESSAGE_OFFSET(offset); - command |= UCSI_GET_PD_MESSAGE_BYTES(bytes); - command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY); - - ret = ucsi_send_command(ucsi, command, resp, bytes); - if (ret < 0) - dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret); - - return ret; -} - -static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient, - struct usb_pd_identity *id) -{ - struct ucsi *ucsi = con->ucsi; - struct ucsi_pd_message_disc_id resp = {}; - int ret; - - if (ucsi->version < UCSI_VERSION_2_0) { - /* - * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit - * the 28 byte identity response including the VDM header. - * First request the VDM header, ID Header VDO, Cert Stat VDO - * and Product VDO. - */ - ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp); - if (ret < 0) - return ret; - + for (offset = 0; offset < bytes; offset += len) { + len = min(len, bytes - offset); - /* Then request Product Type VDO1 through Product Type VDO3. */ - ret = ucsi_read_identity(con, recipient, 0x10, 0xc, - &resp.vdo[0]); - if (ret < 0) - return ret; + command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) | UCSI_CONNECTOR_NUMBER(con->num); + command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient); + command |= UCSI_GET_PD_MESSAGE_OFFSET(offset); + command |= UCSI_GET_PD_MESSAGE_BYTES(len); + command |= UCSI_GET_PD_MESSAGE_TYPE(type); - } else { - /* - * In UCSI v2.0 and after, MESSAGE_IN is large enough to request - * the large enough to request the full Discover Identity - * response at once. - */ - ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp); + ret = ucsi_send_command(con->ucsi, command, data + offset, len); if (ret < 0) return ret; } - id->id_header = resp.id_header; - id->cert_stat = resp.cert_stat; - id->product = resp.product; - id->vdo[0] = resp.vdo[0]; - id->vdo[1] = resp.vdo[1]; - id->vdo[2] = resp.vdo[2]; return 0; } static int ucsi_get_partner_identity(struct ucsi_connector *con) { + u32 vdo[7] = {}; int ret; - ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP, - &con->partner_identity); + ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP, sizeof(vdo), vdo, + UCSI_GET_PD_MESSAGE_TYPE_IDENTITY); if (ret < 0) return ret; + /* VDM Header is not part of struct usb_pd_identity, so dropping it. */ + con->partner_identity = *(struct usb_pd_identity *)&vdo[1]; + ret = typec_partner_set_identity(con->partner); - if (ret < 0) { - dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n", - ret); - } + if (ret < 0) + dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n", ret); return ret; } static int ucsi_get_cable_identity(struct ucsi_connector *con) { + u32 vdo[7] = {}; int ret; - ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P, - &con->cable_identity); + ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP_P, sizeof(vdo), vdo, + UCSI_GET_PD_MESSAGE_TYPE_IDENTITY); if (ret < 0) return ret; + con->cable_identity = *(struct usb_pd_identity *)&vdo[1]; + ret = typec_cable_set_identity(con->cable); - if (ret < 0) { - dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n", - ret); - } + if (ret < 0) + dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n", ret); return ret; } @@ -993,7 +941,8 @@ static int ucsi_register_cable(struct ucsi_connector *con) break; } - desc.identity = &con->cable_identity; + if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE) + desc.identity = &con->cable_identity; desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE & cable_prop.flags); if (con->ucsi->version >= UCSI_VERSION_2_1) @@ -1094,7 +1043,8 @@ static int ucsi_register_partner(struct ucsi_connector *con) if (pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD) ucsi_register_device_pdos(con); - desc.identity = &con->partner_identity; + if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE) + desc.identity = &con->partner_identity; desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD; partner = typec_register_partner(con->port, &desc); @@ -1249,6 +1199,10 @@ static void ucsi_handle_connector_change(struct work_struct *work) mutex_lock(&con->lock); + if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags)) + dev_err_once(ucsi->dev, "%s entered without EVENT_PENDING\n", + __func__); + command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); ret = ucsi_send_command_common(ucsi, command, &con->status, @@ -1341,12 +1295,26 @@ EXPORT_SYMBOL_GPL(ucsi_connector_change); /* -------------------------------------------------------------------------- */ +/* + * Hard Reset bit field was defined with value 1 in UCSI spec version 1.0. + * Starting with spec version 1.1, Hard Reset bit field was removed from the + * CONNECTOR_RESET command, until spec 2.0 reintroduced it with value 0, so, in effect, + * the value to pass in to the command for a Hard Reset is different depending + * on the supported UCSI version by the LPM. + * + * For performing a Data Reset on LPMs supporting version 2.0 and greater, + * this function needs to be called with the second argument set to 0. + */ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard) { u64 command; command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num); - command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0; + + if (con->ucsi->version < UCSI_VERSION_1_1) + command |= hard ? UCSI_CONNECTOR_RESET_HARD_VER_1_0 : 0; + else if (con->ucsi->version >= UCSI_VERSION_2_0) + command |= hard ? 0 : UCSI_CONNECTOR_RESET_DATA_VER_2_0; return ucsi_send_command(con->ucsi, command, NULL, 0); } diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 5a3481d36d7abc..1cf5aad4c23a9e 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -11,7 +11,7 @@ #include #include #include -#include +#include /* -------------------------------------------------------------------------- */ @@ -29,6 +29,7 @@ struct dentry; #define UCSIv2_MESSAGE_OUT 272 /* UCSI versions */ +#define UCSI_VERSION_1_1 0x0110 #define UCSI_VERSION_1_2 0x0120 #define UCSI_VERSION_2_0 0x0200 #define UCSI_VERSION_2_1 0x0210 @@ -122,7 +123,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_DEFAULT_GET_CONNECTOR_NUMBER(_cmd_) (((_cmd_) >> 16) & GENMASK(6, 0)) /* CONNECTOR_RESET command bits */ -#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */ +#define UCSI_CONNECTOR_RESET_HARD_VER_1_0 BIT(23) /* Deprecated in v1.1 */ +#define UCSI_CONNECTOR_RESET_DATA_VER_2_0 BIT(23) /* Redefined in v2.0 */ + /* ACK_CC_CI bits */ #define UCSI_ACK_CONNECTOR_CHANGE BIT(16) @@ -344,47 +347,12 @@ struct ucsi_connector_status { #define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6 u32 request_data_obj; - u8 pwr_status[3]; -#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0)) + u8 pwr_status; +#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(1, 0)) #define UCSI_CONSTAT_BC_NOT_CHARGING 0 #define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1 #define UCSI_CONSTAT_BC_SLOW_CHARGING 2 #define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3 -#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2) -#define UCSI_CONSTAT_CAP_PWR_LOWERED 0 -#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1 -#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \ - ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6) -#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6) -#define UCSI_CONSTAT_ORIENTATION_DIRECT 0 -#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1 -#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7) -#define UCSI_CONSTAT_SINK_PATH_DISABLED 0 -#define UCSI_CONSTAT_SINK_PATH_ENABLED 1 - u8 pwr_readings[9]; -#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1) -#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1) -#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2) -#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \ - ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5) -#define UCSI_CONSTAT_AVG_CURRENT(_p_) \ - ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5) -#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \ - ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5) -#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \ - ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1) -} __packed; - -/* - * Data structure filled by PPM in response to GET_PD_MESSAGE command with the - * Response Message Type set to Discover Identity Response. - */ -struct ucsi_pd_message_disc_id { - u32 vdm_header; - u32 id_header; - u32 cert_stat; - u32 product; - u32 vdo[3]; } __packed; /* -------------------------------------------------------------------------- */ @@ -435,6 +403,8 @@ struct ucsi { #define UCSI_DELAY_DEVICE_PDOS BIT(1) /* Reading PDOs fails until the parter is in PD mode */ }; +#define UCSI_MAX_DATA_LENGTH(u) (((u)->version < UCSI_VERSION_2_0) ? 0x10 : 0xff) + #define UCSI_MAX_SVID 5 #define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6) diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index b3ec799fc87337..ba58d11907bc58 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include "ucsi.h" enum enum_fw_mode { diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index 6aace19d595bcd..03c0fa8edc8db5 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -278,7 +278,7 @@ static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv) case UC_UCSI_USBC_NOTIFY_IND: schedule_work(&ucsi->notify_work); break; - }; + } } static void pmic_glink_ucsi_pdr_notify(void *priv, int state) diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c index ddbec2b78c8e18..6923fad31d7951 100644 --- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "ucsi.h" diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 302a89aeb258a5..8dac1edc74d4ee 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -372,7 +372,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } switch (wValue) { case USB_PORT_FEAT_SUSPEND: - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not " "supported for USB 3.0 roothub\n"); goto error; @@ -388,7 +388,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_POWER: usbip_dbg_vhci_rh( " ClearPortFeature: USB_PORT_FEAT_POWER\n"); - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) vhci_hcd->port_status[rhport] &= ~USB_SS_PORT_STAT_POWER; else vhci_hcd->port_status[rhport] &= ~USB_PORT_STAT_POWER; @@ -404,19 +404,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case GetHubDescriptor: usbip_dbg_vhci_rh(" GetHubDescriptor\n"); - if (hcd->speed == HCD_USB3 && + if (hcd->speed >= HCD_USB3 && (wLength < USB_DT_SS_HUB_SIZE || wValue != (USB_DT_SS_HUB << 8))) { pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n"); goto error; } - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) ss_hub_descriptor((struct usb_hub_descriptor *) buf); else hub_descriptor((struct usb_hub_descriptor *) buf); break; case DeviceRequest | USB_REQ_GET_DESCRIPTOR: - if (hcd->speed != HCD_USB3) + if (hcd->speed < HCD_USB3) goto error; if ((wValue >> 8) != USB_DT_BOS) @@ -503,7 +503,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_LINK_STATE: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_LINK_STATE\n"); - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { pr_err("USB_PORT_FEAT_LINK_STATE req not " "supported for USB 2.0 roothub\n"); goto error; @@ -521,7 +521,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n"); /* TODO: add suspend/resume support! */ - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not " "supported for USB 2.0 roothub\n"); goto error; @@ -531,7 +531,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_SUSPEND\n"); /* Applicable only for USB2.0 hub */ - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { pr_err("USB_PORT_FEAT_SUSPEND req not " "supported for USB 3.0 roothub\n"); goto error; @@ -551,7 +551,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, pr_err("invalid port number %d\n", wIndex); goto error; } - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; else vhci_hcd->port_status[rhport] |= USB_PORT_STAT_POWER; @@ -564,7 +564,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; } /* Applicable only for USB3.0 hub */ - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " "supported for USB 2.0 roothub\n"); goto error; @@ -578,7 +578,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; } /* if it's already enabled, disable */ - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { vhci_hcd->port_status[rhport] = 0; vhci_hcd->port_status[rhport] = (USB_SS_PORT_STAT_POWER | @@ -602,7 +602,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } if (wValue >= 32) goto error; - if (hcd->speed == HCD_USB3) { + if (hcd->speed >= HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { vhci_hcd->port_status[rhport] |= (1 << wValue); @@ -616,7 +616,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case GetPortErrorCount: usbip_dbg_vhci_rh(" GetPortErrorCount\n"); - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { pr_err("GetPortErrorCount req not " "supported for USB 2.0 roothub\n"); goto error; @@ -626,7 +626,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case SetHubDepth: usbip_dbg_vhci_rh(" SetHubDepth\n"); - if (hcd->speed != HCD_USB3) { + if (hcd->speed < HCD_USB3) { pr_err("SetHubDepth req not supported for " "USB 2.0 roothub\n"); goto error; @@ -646,7 +646,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, if (!invalid_rhport) { dump_port_status_diff(prev_port_status[rhport], vhci_hcd->port_status[rhport], - hcd->speed == HCD_USB3); + hcd->speed >= HCD_USB3); } } usbip_dbg_vhci_rh(" bye\n"); @@ -1157,8 +1157,8 @@ static int vhci_setup(struct usb_hcd *hcd) } else { vhci->vhci_hcd_ss = hcd_to_vhci_hcd(hcd); vhci->vhci_hcd_ss->vhci = vhci; - hcd->speed = HCD_USB3; - hcd->self.root_hub->speed = USB_SPEED_SUPER; + hcd->speed = HCD_USB31; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; } /* @@ -1319,7 +1319,7 @@ static const struct hc_driver vhci_hc_driver = { .product_desc = driver_desc, .hcd_priv_size = sizeof(struct vhci_hcd), - .flags = HCD_USB3 | HCD_SHARED, + .flags = HCD_USB31 | HCD_SHARED, .reset = vhci_setup, .start = vhci_start, diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index e2847cd3e6e363..d5865460e82d54 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -283,6 +283,7 @@ static int valid_args(__u32 *pdev_nr, __u32 *rhport, case USB_SPEED_HIGH: case USB_SPEED_WIRELESS: case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: break; default: pr_err("Failed attach request for unsupported USB speed: %s\n", @@ -349,7 +350,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, vhci_hcd = hcd_to_vhci_hcd(hcd); vhci = vhci_hcd->vhci; - if (speed == USB_SPEED_SUPER) + if (speed >= USB_SPEED_SUPER) vdev = &vhci->vhci_hcd_ss->vdev[rhport]; else vdev = &vhci->vhci_hcd_hs->vdev[rhport]; diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index 5265d09fc1c409..559fb9d3271fcc 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -11,8 +11,7 @@ if VDPA config VDPA_SIM tristate "vDPA device simulator core" - depends on RUNTIME_TESTING_MENU && HAS_DMA - select DMA_OPS + depends on RUNTIME_TESTING_MENU select VHOST_RING select IOMMU_IOVA help @@ -36,7 +35,12 @@ config VDPA_SIM_BLOCK config VDPA_USER tristate "VDUSE (vDPA Device in Userspace) support" depends on EVENTFD && MMU && HAS_DMA - select DMA_OPS + # + # This driver incorrectly tries to override the dma_ops. It should + # never have done that, but for now keep it working on architectures + # that use dma ops + # + depends on ARCH_HAS_DMA_OPS select VHOST_IOTLB select IOMMU_IOVA help diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 0f347717021a2e..aa36de361c10ec 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -112,15 +112,12 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, const void *src, int length); u8 ifcvf_get_status(struct ifcvf_hw *hw); void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); -void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void ifcvf_reset(struct ifcvf_hw *hw); u64 ifcvf_get_dev_features(struct ifcvf_hw *hw); u64 ifcvf_get_hw_features(struct ifcvf_hw *hw); int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features); u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); -struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); -int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); u32 ifcvf_get_config_size(struct ifcvf_hw *hw); u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector); u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector); diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 50aac8fe57ef57..2cedf7e2dbc495 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -83,10 +83,28 @@ enum { MLX5_VDPA_NUM_AS = 2 }; +struct mlx5_vdpa_mr_resources { + struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; + unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; + + /* Pre-deletion mr list */ + struct list_head mr_list_head; + + /* Deferred mr list */ + struct list_head mr_gc_list_head; + struct workqueue_struct *wq_gc; + struct delayed_work gc_dwork_ent; + + struct mutex lock; + + atomic_t shutdown; +}; + struct mlx5_vdpa_dev { struct vdpa_device vdev; struct mlx5_core_dev *mdev; struct mlx5_vdpa_resources res; + struct mlx5_vdpa_mr_resources mres; u64 mlx_features; u64 actual_features; @@ -95,14 +113,23 @@ struct mlx5_vdpa_dev { u16 max_idx; u32 generation; - struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; - struct list_head mr_list_head; - /* serialize mr access */ - struct mutex mr_mtx; struct mlx5_control_vq cvq; struct workqueue_struct *wq; - unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; bool suspended; + + struct mlx5_async_ctx async_ctx; +}; + +struct mlx5_vdpa_async_cmd { + int err; + struct mlx5_async_work cb_work; + struct completion cmd_done; + + void *in; + size_t inlen; + + void *out; + size_t outlen; }; int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn); @@ -121,7 +148,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in, int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); +int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev); void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev); +void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev); void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr); void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, @@ -134,6 +163,14 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, unsigned int asid); int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev); int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid); +int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_async_cmd *cmds, + int num_cmds); + +#define mlx5_vdpa_err(__dev, format, ...) \ + dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__, \ + current->pid, ##__VA_ARGS__) + #define mlx5_vdpa_warn(__dev, format, ...) \ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \ diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 4758914ccf8608..2dd21e0b399e78 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -49,17 +49,23 @@ static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) } } -static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) -{ - int inlen; +struct mlx5_create_mkey_mem { + u8 out[MLX5_ST_SZ_BYTES(create_mkey_out)]; + u8 in[MLX5_ST_SZ_BYTES(create_mkey_in)]; + __be64 mtt[]; +}; + +struct mlx5_destroy_mkey_mem { + u8 out[MLX5_ST_SZ_BYTES(destroy_mkey_out)]; + u8 in[MLX5_ST_SZ_BYTES(destroy_mkey_in)]; +}; + +static void fill_create_direct_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_direct_mr *mr, + struct mlx5_create_mkey_mem *mem) +{ + void *in = &mem->in; void *mkc; - void *in; - int err; - - inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); @@ -76,18 +82,36 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct MLX5_SET(create_mkey_in, in, translations_octword_actual_size, get_octo_len(mr->end - mr->start, mr->log_size)); populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt)); - err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen); - kvfree(in); - if (err) { - mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n"); - return err; - } - return 0; + MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); +} + +static void create_direct_mr_end(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_direct_mr *mr, + struct mlx5_create_mkey_mem *mem) +{ + u32 mkey_index = MLX5_GET(create_mkey_out, mem->out, mkey_index); + + mr->mr = mlx5_idx_to_mkey(mkey_index); +} + +static void fill_destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_direct_mr *mr, + struct mlx5_destroy_mkey_mem *mem) +{ + void *in = &mem->in; + + MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid); + MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->mr)); } static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) { + if (!mr->mr) + return; + mlx5_vdpa_destroy_mkey(mvdev, mr->mr); } @@ -179,6 +203,123 @@ static int klm_byte_size(int nklms) return 16 * ALIGN(nklms, 4); } +#define MLX5_VDPA_MTT_ALIGN 16 + +static int create_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) +{ + struct mlx5_vdpa_async_cmd *cmds; + struct mlx5_vdpa_direct_mr *dmr; + int err = 0; + int i = 0; + + cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + list_for_each_entry(dmr, &mr->head, list) { + struct mlx5_create_mkey_mem *cmd_mem; + int mttlen, mttcount; + + mttlen = roundup(MLX5_ST_SZ_BYTES(mtt) * dmr->nsg, MLX5_VDPA_MTT_ALIGN); + mttcount = mttlen / sizeof(cmd_mem->mtt[0]); + cmd_mem = kvcalloc(1, struct_size(cmd_mem, mtt, mttcount), GFP_KERNEL); + if (!cmd_mem) { + err = -ENOMEM; + goto done; + } + + cmds[i].out = cmd_mem->out; + cmds[i].outlen = sizeof(cmd_mem->out); + cmds[i].in = cmd_mem->in; + cmds[i].inlen = struct_size(cmd_mem, mtt, mttcount); + + fill_create_direct_mr(mvdev, dmr, cmd_mem); + + i++; + } + + err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs); + if (err) { + + mlx5_vdpa_err(mvdev, "error issuing MTT mkey creation for direct mrs: %d\n", err); + goto done; + } + + i = 0; + list_for_each_entry(dmr, &mr->head, list) { + struct mlx5_vdpa_async_cmd *cmd = &cmds[i++]; + struct mlx5_create_mkey_mem *cmd_mem; + + cmd_mem = container_of(cmd->out, struct mlx5_create_mkey_mem, out); + + if (!cmd->err) { + create_direct_mr_end(mvdev, dmr, cmd_mem); + } else { + err = err ? err : cmd->err; + mlx5_vdpa_err(mvdev, "error creating MTT mkey [0x%llx, 0x%llx]: %d\n", + dmr->start, dmr->end, cmd->err); + } + } + +done: + for (i = i-1; i >= 0; i--) { + struct mlx5_create_mkey_mem *cmd_mem; + + cmd_mem = container_of(cmds[i].out, struct mlx5_create_mkey_mem, out); + kvfree(cmd_mem); + } + + kvfree(cmds); + return err; +} + +DEFINE_FREE(free_cmds, struct mlx5_vdpa_async_cmd *, kvfree(_T)) +DEFINE_FREE(free_cmd_mem, struct mlx5_destroy_mkey_mem *, kvfree(_T)) + +static int destroy_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) +{ + struct mlx5_destroy_mkey_mem *cmd_mem __free(free_cmd_mem) = NULL; + struct mlx5_vdpa_async_cmd *cmds __free(free_cmds) = NULL; + struct mlx5_vdpa_direct_mr *dmr; + int err = 0; + int i = 0; + + cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL); + cmd_mem = kvcalloc(mr->num_directs, sizeof(*cmd_mem), GFP_KERNEL); + if (!cmds || !cmd_mem) + return -ENOMEM; + + list_for_each_entry(dmr, &mr->head, list) { + cmds[i].out = cmd_mem[i].out; + cmds[i].outlen = sizeof(cmd_mem[i].out); + cmds[i].in = cmd_mem[i].in; + cmds[i].inlen = sizeof(cmd_mem[i].in); + fill_destroy_direct_mr(mvdev, dmr, &cmd_mem[i]); + i++; + } + + err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs); + if (err) { + + mlx5_vdpa_err(mvdev, "error issuing MTT mkey deletion for direct mrs: %d\n", err); + return err; + } + + i = 0; + list_for_each_entry(dmr, &mr->head, list) { + struct mlx5_vdpa_async_cmd *cmd = &cmds[i++]; + + dmr->mr = 0; + if (cmd->err) { + err = err ? err : cmd->err; + mlx5_vdpa_err(mvdev, "error deleting MTT mkey [0x%llx, 0x%llx]: %d\n", + dmr->start, dmr->end, cmd->err); + } + } + + return err; +} + static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { int inlen; @@ -279,14 +420,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr goto err_map; } - err = create_direct_mr(mvdev, mr); - if (err) - goto err_direct; - return 0; -err_direct: - dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); err_map: sg_free_table(&mr->sg_head); return err; @@ -401,6 +536,10 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto err_chain; + err = create_direct_keys(mvdev, mr); + if (err) + goto err_chain; + /* Create the memory key that defines the guests's address space. This * memory key refers to the direct keys that contain the MTT * translations @@ -489,6 +628,7 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr struct mlx5_vdpa_direct_mr *n; destroy_indirect_key(mvdev, mr); + destroy_direct_keys(mvdev, mr); list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { list_del_init(&dmr->list); unmap_direct_mr(mvdev, dmr); @@ -513,22 +653,58 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_ kfree(mr); } +/* There can be multiple .set_map() operations in quick succession. + * This large delay is a simple way to prevent the MR cleanup from blocking + * .set_map() MR creation in this scenario. + */ +#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000 + +static void mlx5_vdpa_mr_gc_handler(struct work_struct *work) +{ + struct mlx5_vdpa_mr_resources *mres; + struct mlx5_vdpa_mr *mr, *tmp; + struct mlx5_vdpa_dev *mvdev; + + mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work); + + if (atomic_read(&mres->shutdown)) { + mutex_lock(&mres->lock); + } else if (!mutex_trylock(&mres->lock)) { + queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent, + msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS)); + return; + } + + mvdev = container_of(mres, struct mlx5_vdpa_dev, mres); + + list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) { + _mlx5_vdpa_destroy_mr(mvdev, mr); + } + + mutex_unlock(&mres->lock); +} + static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { + struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; + if (!mr) return; - if (refcount_dec_and_test(&mr->refcount)) - _mlx5_vdpa_destroy_mr(mvdev, mr); + if (refcount_dec_and_test(&mr->refcount)) { + list_move_tail(&mr->mr_list, &mres->mr_gc_list_head); + queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent, + msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS)); + } } void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); _mlx5_vdpa_put_mr(mvdev, mr); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.lock); } static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, @@ -543,44 +719,47 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); _mlx5_vdpa_get_mr(mvdev, mr); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.lock); } void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *new_mr, unsigned int asid) { - struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid]; + struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid]; - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); _mlx5_vdpa_put_mr(mvdev, old_mr); - mvdev->mr[asid] = new_mr; + mvdev->mres.mr[asid] = new_mr; - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.lock); } static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) { struct mlx5_vdpa_mr *mr; - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); - list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { + list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) { mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " "mr: %p, mkey: 0x%x, refcount: %u\n", mr, mr->mkey, refcount_read(&mr->refcount)); } - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.lock); } -void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) +void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev) { + if (!mvdev->res.valid) + return; + for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) mlx5_vdpa_update_mr(mvdev, NULL, i); @@ -613,7 +792,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto err_iotlb; - list_add_tail(&mr->mr_list, &mvdev->mr_list_head); + list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head); return 0; @@ -639,9 +818,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (!mr) return ERR_PTR(-ENOMEM); - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.lock); if (err) goto out_err; @@ -661,7 +840,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, { int err; - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid) return 0; spin_lock(&mvdev->cvq.iommu_lock); @@ -703,3 +882,33 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) return 0; } + +int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; + + mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc"); + if (!mres->wq_gc) + return -ENOMEM; + + INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler); + + mutex_init(&mres->lock); + + INIT_LIST_HEAD(&mres->mr_list_head); + INIT_LIST_HEAD(&mres->mr_gc_list_head); + + return 0; +} + +void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; + + atomic_set(&mres->shutdown, 1); + + flush_delayed_work(&mres->gc_dwork_ent); + destroy_workqueue(mres->wq_gc); + mres->wq_gc = NULL; + mutex_destroy(&mres->lock); +} diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c index 5c5a41b64bfcd6..aeae31d0cefaee 100644 --- a/drivers/vdpa/mlx5/core/resources.c +++ b/drivers/vdpa/mlx5/core/resources.c @@ -256,7 +256,6 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) mlx5_vdpa_warn(mvdev, "resources already allocated\n"); return -EINVAL; } - mutex_init(&mvdev->mr_mtx); res->uar = mlx5_get_uars_page(mdev); if (IS_ERR(res->uar)) { err = PTR_ERR(res->uar); @@ -301,7 +300,6 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) err_uctx: mlx5_put_uars_page(mdev, res->uar); err_uars: - mutex_destroy(&mvdev->mr_mtx); return err; } @@ -318,6 +316,78 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) dealloc_pd(mvdev, res->pdn, res->uid); destroy_uctx(mvdev, res->uid); mlx5_put_uars_page(mvdev->mdev, res->uar); - mutex_destroy(&mvdev->mr_mtx); res->valid = false; } + +static void virtqueue_cmd_callback(int status, struct mlx5_async_work *context) +{ + struct mlx5_vdpa_async_cmd *cmd = + container_of(context, struct mlx5_vdpa_async_cmd, cb_work); + + cmd->err = mlx5_cmd_check(context->ctx->dev, status, cmd->in, cmd->out); + complete(&cmd->cmd_done); +} + +static int issue_async_cmd(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_async_cmd *cmds, + int issued, + int *completed) + +{ + struct mlx5_vdpa_async_cmd *cmd = &cmds[issued]; + int err; + +retry: + err = mlx5_cmd_exec_cb(&mvdev->async_ctx, + cmd->in, cmd->inlen, + cmd->out, cmd->outlen, + virtqueue_cmd_callback, + &cmd->cb_work); + if (err == -EBUSY) { + if (*completed < issued) { + /* Throttled by own commands: wait for oldest completion. */ + wait_for_completion(&cmds[*completed].cmd_done); + (*completed)++; + + goto retry; + } else { + /* Throttled by external commands: switch to sync api. */ + err = mlx5_cmd_exec(mvdev->mdev, + cmd->in, cmd->inlen, + cmd->out, cmd->outlen); + if (!err) + (*completed)++; + } + } + + return err; +} + +int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev, + struct mlx5_vdpa_async_cmd *cmds, + int num_cmds) +{ + int completed = 0; + int issued = 0; + int err = 0; + + for (int i = 0; i < num_cmds; i++) + init_completion(&cmds[i].cmd_done); + + while (issued < num_cmds) { + + err = issue_async_cmd(mvdev, cmds, issued, &completed); + if (err) { + mlx5_vdpa_err(mvdev, "error issuing command %d of %d: %d\n", + issued, num_cmds, err); + break; + } + + issued++; + } + + while (completed < issued) + wait_for_completion(&cmds[completed++].cmd_done); + + return err; +} diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index fa78e8288ebbf8..dee019977716b4 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); @@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, /* If there is no mr update, make sure that the existing ones are set * modify to ready. */ - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (vq_desc_mr) mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; } @@ -1184,40 +1184,92 @@ struct mlx5_virtq_attr { u16 used_index; }; -static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, - struct mlx5_virtq_attr *attr) -{ - int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out); - u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {}; - void *out; - void *obj_context; - void *cmd_hdr; - int err; +struct mlx5_virtqueue_query_mem { + u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)]; + u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)]; +}; - out = kzalloc(outlen, GFP_KERNEL); - if (!out) - return -ENOMEM; +struct mlx5_virtqueue_modify_mem { + u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)]; + u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)]; +}; - cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr); +static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + struct mlx5_virtqueue_query_mem *cmd) +{ + void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); - err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); - if (err) - goto err_cmd; +} + +static void query_virtqueue_end(struct mlx5_vdpa_net *ndev, + struct mlx5_virtqueue_query_mem *cmd, + struct mlx5_virtq_attr *attr) +{ + void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context); - obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context); memset(attr, 0, sizeof(*attr)); attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); - kfree(out); - return 0; +} -err_cmd: - kfree(out); +static int query_virtqueues(struct mlx5_vdpa_net *ndev, + int start_vq, + int num_vqs, + struct mlx5_virtq_attr *attrs) +{ + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; + struct mlx5_virtqueue_query_mem *cmd_mem; + struct mlx5_vdpa_async_cmd *cmds; + int err = 0; + + WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n", + start_vq, start_vq + num_vqs, mvdev->max_vqs); + + cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL); + cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL); + if (!cmds || !cmd_mem) { + err = -ENOMEM; + goto done; + } + + for (int i = 0; i < num_vqs; i++) { + cmds[i].in = &cmd_mem[i].in; + cmds[i].inlen = sizeof(cmd_mem[i].in); + cmds[i].out = &cmd_mem[i].out; + cmds[i].outlen = sizeof(cmd_mem[i].out); + fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]); + } + + err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); + if (err) { + mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n", + start_vq, start_vq + num_vqs, err); + goto done; + } + + for (int i = 0; i < num_vqs; i++) { + struct mlx5_vdpa_async_cmd *cmd = &cmds[i]; + int vq_idx = start_vq + i; + + if (cmd->err) { + mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err); + if (!err) + err = cmd->err; + continue; + } + + query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]); + } + +done: + kvfree(cmd_mem); + kvfree(cmds); return err; } @@ -1251,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq) return true; } -static int modify_virtqueue(struct mlx5_vdpa_net *ndev, - struct mlx5_vdpa_virtqueue *mvq, - int state) +static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + int state, + struct mlx5_virtqueue_modify_mem *cmd) { - int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); - u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; struct mlx5_vdpa_mr *desc_mr = NULL; struct mlx5_vdpa_mr *vq_mr = NULL; - bool state_change = false; void *obj_context; void *cmd_hdr; void *vq_ctx; - void *in; - int err; - if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) - return 0; - - if (!modifiable_virtqueue_fields(mvq)) - return -EINVAL; - - in = kzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr); + cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); - obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); + obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context); vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); - if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) { - if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { - err = -EINVAL; - goto done; - } - + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) MLX5_SET(virtio_net_q_object, obj_context, state, state); - state_change = true; - } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); @@ -1323,7 +1354,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); @@ -1332,7 +1363,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { - desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); @@ -1341,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, } MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); - err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); - if (err) - goto done; +} - if (state_change) - mvq->fw_state = state; +static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, + struct mlx5_vdpa_virtqueue *mvq, + int state) +{ + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; + struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; + mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); mlx5_vdpa_get_mr(mvdev, vq_mr); mvq->vq_mr = vq_mr; } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; + struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; + mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); mlx5_vdpa_get_mr(mvdev, desc_mr); mvq->desc_mr = desc_mr; } - mvq->modified_fields = 0; - -done: - kfree(in); - return err; -} + if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) + mvq->fw_state = state; -static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev, - struct mlx5_vdpa_virtqueue *mvq, - unsigned int state) -{ - mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; - return modify_virtqueue(ndev, mvq, state); + mvq->modified_fields = 0; } static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) @@ -1525,53 +1554,136 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, return err; } -static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state) { - struct mlx5_virtq_attr attr; - int err; + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; + struct mlx5_virtqueue_modify_mem *cmd_mem; + struct mlx5_vdpa_async_cmd *cmds; + int err = 0; - if (!mvq->initialized) - return 0; + WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n", + start_vq, start_vq + num_vqs, mvdev->max_vqs); - if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) - return 0; + cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL); + cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL); + if (!cmds || !cmd_mem) { + err = -ENOMEM; + goto done; + } - err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND); - if (err) { - mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err); - return err; + for (int i = 0; i < num_vqs; i++) { + struct mlx5_vdpa_async_cmd *cmd = &cmds[i]; + struct mlx5_vdpa_virtqueue *mvq; + int vq_idx = start_vq + i; + + mvq = &ndev->vqs[vq_idx]; + + if (!modifiable_virtqueue_fields(mvq)) { + err = -EINVAL; + goto done; + } + + if (mvq->fw_state != state) { + if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { + err = -EINVAL; + goto done; + } + + mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; + } + + cmd->in = &cmd_mem[i].in; + cmd->inlen = sizeof(cmd_mem[i].in); + cmd->out = &cmd_mem[i].out; + cmd->outlen = sizeof(cmd_mem[i].out); + fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]); } - err = query_virtqueue(ndev, mvq, &attr); + err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs); if (err) { - mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err); - return err; + mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n", + start_vq, start_vq + num_vqs); + goto done; } - mvq->avail_idx = attr.available_index; - mvq->used_idx = attr.used_index; + for (int i = 0; i < num_vqs; i++) { + struct mlx5_vdpa_async_cmd *cmd = &cmds[i]; + struct mlx5_vdpa_virtqueue *mvq; + int vq_idx = start_vq + i; - return 0; + mvq = &ndev->vqs[vq_idx]; + + if (cmd->err) { + mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n", + vq_idx, mvq->fw_state, state, err); + if (!err) + err = cmd->err; + continue; + } + + modify_virtqueue_end(ndev, mvq, state); + } + +done: + kvfree(cmd_mem); + kvfree(cmds); + return err; } -static int suspend_vqs(struct mlx5_vdpa_net *ndev) +static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs) { - int err = 0; - int i; + struct mlx5_vdpa_virtqueue *mvq; + struct mlx5_virtq_attr *attrs; + int vq_idx, i; + int err; + + if (start_vq >= ndev->cur_num_vqs) + return -EINVAL; + + mvq = &ndev->vqs[start_vq]; + if (!mvq->initialized) + return 0; + + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) + return 0; + + err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND); + if (err) + return err; + + attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL); + if (!attrs) + return -ENOMEM; - for (i = 0; i < ndev->cur_num_vqs; i++) { - int local_err = suspend_vq(ndev, &ndev->vqs[i]); + err = query_virtqueues(ndev, start_vq, num_vqs, attrs); + if (err) + goto done; - err = local_err ? local_err : err; + for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) { + mvq = &ndev->vqs[vq_idx]; + mvq->avail_idx = attrs[i].available_index; + mvq->used_idx = attrs[i].used_index; } +done: + kfree(attrs); return err; } -static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + return suspend_vqs(ndev, mvq->index, 1); +} + +static int resume_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs) { + struct mlx5_vdpa_virtqueue *mvq; int err; + if (start_vq >= ndev->mvdev.max_vqs) + return -EINVAL; + + mvq = &ndev->vqs[start_vq]; if (!mvq->initialized) return 0; @@ -1583,13 +1695,9 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq /* Due to a FW quirk we need to modify the VQ fields first then change state. * This should be fixed soon. After that, a single command can be used. */ - err = modify_virtqueue(ndev, mvq, 0); - if (err) { - mlx5_vdpa_warn(&ndev->mvdev, - "modify vq properties failed for vq %u, err: %d\n", - mvq->index, err); + err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state); + if (err) return err; - } break; case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND: if (!is_resumable(ndev)) { @@ -1600,30 +1708,17 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY: return 0; default: - mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n", + mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n", mvq->index, mvq->fw_state); return -EINVAL; } - err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); - if (err) - mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n", - mvq->index, err); - - return err; + return modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); } -static int resume_vqs(struct mlx5_vdpa_net *ndev) +static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { - int err = 0; - - for (int i = 0; i < ndev->cur_num_vqs; i++) { - int local_err = resume_vq(ndev, &ndev->vqs[i]); - - err = local_err ? local_err : err; - } - - return err; + return resume_vqs(ndev, mvq->index, 1); } static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) @@ -2002,13 +2097,13 @@ static int setup_steering(struct mlx5_vdpa_net *ndev) ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); if (!ns) { - mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); + mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n"); return -EOPNOTSUPP; } ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); if (IS_ERR(ndev->rxft)) { - mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); + mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n"); return PTR_ERR(ndev->rxft); } mlx5_vdpa_add_rx_flow_table(ndev); @@ -2124,45 +2219,48 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); - int cur_qps = ndev->cur_num_vqs / 2; + int cur_vqs = ndev->cur_num_vqs; + int new_vqs = newqps * 2; int err; int i; - if (cur_qps > newqps) { - err = modify_rqt(ndev, 2 * newqps); + if (cur_vqs > new_vqs) { + err = modify_rqt(ndev, new_vqs); if (err) return err; - for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) { - struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i]; - - if (is_resumable(ndev)) - suspend_vq(ndev, mvq); - else - teardown_vq(ndev, mvq); + if (is_resumable(ndev)) { + suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs); + } else { + for (i = new_vqs; i < cur_vqs; i++) + teardown_vq(ndev, &ndev->vqs[i]); } - ndev->cur_num_vqs = 2 * newqps; + ndev->cur_num_vqs = new_vqs; } else { - ndev->cur_num_vqs = 2 * newqps; - for (i = cur_qps * 2; i < 2 * newqps; i++) { - struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i]; + ndev->cur_num_vqs = new_vqs; - err = mvq->initialized ? resume_vq(ndev, mvq) : setup_vq(ndev, mvq, true); + for (i = cur_vqs; i < new_vqs; i++) { + err = setup_vq(ndev, &ndev->vqs[i], false); if (err) goto clean_added; } - err = modify_rqt(ndev, 2 * newqps); + + err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs); + if (err) + goto clean_added; + + err = modify_rqt(ndev, new_vqs); if (err) goto clean_added; } return 0; clean_added: - for (--i; i >= 2 * cur_qps; --i) + for (--i; i >= cur_vqs; --i) teardown_vq(ndev, &ndev->vqs[i]); - ndev->cur_num_vqs = 2 * cur_qps; + ndev->cur_num_vqs = cur_vqs; return err; } @@ -2528,9 +2626,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa return 0; } - err = query_virtqueue(ndev, mvq, &attr); + err = query_virtqueues(ndev, mvq->index, 1, &attr); if (err) { - mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); + mlx5_vdpa_err(mvdev, "failed to query virtqueue\n"); return err; } state->split.avail_index = attr.used_index; @@ -2755,6 +2853,9 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p struct mlx5_eqe *eqe = param; int ret = NOTIFY_DONE; + if (ndev->mvdev.suspended) + return NOTIFY_DONE; + if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: @@ -2879,7 +2980,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu int err; if (mvq->initialized) { - err = query_virtqueue(ndev, mvq, &attr); + err = query_virtqueues(ndev, mvq->index, 1, &attr); if (err) return err; } @@ -2948,7 +3049,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, bool teardown = !is_resumable(ndev); int err; - suspend_vqs(ndev); + suspend_vqs(ndev, 0, ndev->cur_num_vqs); if (teardown) { err = save_channels_info(ndev); if (err) @@ -2973,7 +3074,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, return err; } - resume_vqs(ndev); + resume_vqs(ndev, 0, ndev->cur_num_vqs); return 0; } @@ -3097,7 +3198,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) teardown_vq_resources(ndev); if (ndev->setup) { - err = resume_vqs(ndev); + err = resume_vqs(ndev, 0, ndev->cur_num_vqs); if (err) { mlx5_vdpa_warn(mvdev, "failed to resume VQs\n"); goto err_driver; @@ -3122,7 +3223,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) err_driver: unregister_link_notifier(ndev); err_setup: - mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); + mlx5_vdpa_clean_mrs(&ndev->mvdev); ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; err_clear: up_write(&ndev->reslock); @@ -3134,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) /* default mapping all groups are mapped to asid 0 */ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++) - mvdev->group2asid[i] = 0; + mvdev->mres.group2asid[i] = 0; } static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev) @@ -3174,7 +3275,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags) } if (flags & VDPA_RESET_F_CLEAN_MAP) - mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); + mlx5_vdpa_clean_mrs(&ndev->mvdev); ndev->mvdev.status = 0; ndev->mvdev.suspended = false; ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT; @@ -3189,7 +3290,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags) if ((flags & VDPA_RESET_F_CLEAN_MAP) && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (mlx5_vdpa_create_dma_mr(mvdev)) - mlx5_vdpa_warn(mvdev, "create MR failed\n"); + mlx5_vdpa_err(mvdev, "create MR failed\n"); } if (vq_reset) setup_vq_resources(ndev, false); @@ -3244,7 +3345,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, new_mr = mlx5_vdpa_create_mr(mvdev, iotlb); if (IS_ERR(new_mr)) { err = PTR_ERR(new_mr); - mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err); + mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err); return err; } } else { @@ -3252,12 +3353,12 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, new_mr = NULL; } - if (!mvdev->mr[asid]) { + if (!mvdev->mres.mr[asid]) { mlx5_vdpa_update_mr(mvdev, new_mr, asid); } else { err = mlx5_vdpa_change_map(mvdev, new_mr, asid); if (err) { - mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err); + mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err); goto out_err; } } @@ -3332,7 +3433,10 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) ndev = to_mlx5_vdpa_ndev(mvdev); free_fixed_resources(ndev); - mlx5_vdpa_destroy_mr_resources(mvdev); + mlx5_vdpa_clean_mrs(mvdev); + mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); + mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); + if (!is_zero_ether_addr(ndev->config.mac)) { pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); @@ -3500,8 +3604,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) mlx5_vdpa_info(mvdev, "suspending device\n"); down_write(&ndev->reslock); - unregister_link_notifier(ndev); - err = suspend_vqs(ndev); + err = suspend_vqs(ndev, 0, ndev->cur_num_vqs); mlx5_vdpa_cvq_suspend(mvdev); mvdev->suspended = true; up_write(&ndev->reslock); @@ -3521,8 +3624,8 @@ static int mlx5_vdpa_resume(struct vdpa_device *vdev) down_write(&ndev->reslock); mvdev->suspended = false; - err = resume_vqs(ndev); - register_link_notifier(ndev); + err = resume_vqs(ndev, 0, ndev->cur_num_vqs); + queue_link_work(ndev); up_write(&ndev->reslock); return err; @@ -3537,12 +3640,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, if (group >= MLX5_VDPA_NUMVQ_GROUPS) return -EINVAL; - mvdev->group2asid[group] = asid; + mvdev->mres.group2asid[group] = asid; - mutex_lock(&mvdev->mr_mtx); - if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid]) - err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid); - mutex_unlock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.lock); + if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) + err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); + mutex_unlock(&mvdev->mres.lock); return err; } @@ -3854,18 +3957,22 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->rqt_size = 1; } + mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); + ndev->mvdev.mlx_features = device_features; mvdev->vdev.dma_dev = &mdev->pdev->dev; err = mlx5_vdpa_alloc_resources(&ndev->mvdev); if (err) goto err_mpfs; - INIT_LIST_HEAD(&mvdev->mr_list_head); + err = mlx5_vdpa_init_mr_resources(mvdev); + if (err) + goto err_res; if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { err = mlx5_vdpa_create_dma_mr(mvdev); if (err) - goto err_res; + goto err_mr_res; } err = alloc_fixed_resources(ndev); @@ -3906,6 +4013,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, err_res2: free_fixed_resources(ndev); err_mr: + mlx5_vdpa_clean_mrs(mvdev); +err_mr_res: mlx5_vdpa_destroy_mr_resources(mvdev); err_res: mlx5_vdpa_free_resources(&ndev->mvdev); @@ -3937,9 +4046,37 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * mgtdev->ndev = NULL; } +static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev, + const struct vdpa_dev_set_config *add_config) +{ + struct virtio_net_config *config; + struct mlx5_core_dev *pfmdev; + struct mlx5_vdpa_dev *mvdev; + struct mlx5_vdpa_net *ndev; + struct mlx5_core_dev *mdev; + int err = -EOPNOTSUPP; + + mvdev = to_mvdev(dev); + ndev = to_mlx5_vdpa_ndev(mvdev); + mdev = mvdev->mdev; + config = &ndev->config; + + down_write(&ndev->reslock); + if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { + pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); + err = mlx5_mpfs_add_mac(pfmdev, config->mac); + if (!err) + ether_addr_copy(config->mac, add_config->net.mac); + } + + up_write(&ndev->reslock); + return err; +} + static const struct vdpa_mgmtdev_ops mdev_ops = { .dev_add = mlx5_vdpa_dev_add, .dev_del = mlx5_vdpa_dev_del, + .dev_set_attr = mlx5_vdpa_set_attr, }; static struct virtio_device_id id_table[] = { diff --git a/drivers/vdpa/pds/cmds.h b/drivers/vdpa/pds/cmds.h index e24d85cb8f1ced..6b1bc33356b090 100644 --- a/drivers/vdpa/pds/cmds.h +++ b/drivers/vdpa/pds/cmds.h @@ -14,5 +14,4 @@ int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, struct pds_vdpa_vq_info *vq_info); int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, struct pds_vdpa_vq_info *vq_info); -int pds_vdpa_cmd_set_features(struct pds_vdpa_device *pdsv, u64 features); #endif /* _VDPA_CMDS_H_ */ diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 4dbd2e55a288bb..8a372b51c21ad2 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -1361,6 +1361,80 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info return err; } +static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev, + struct genl_info *info) +{ + struct vdpa_dev_set_config set_config = {}; + struct vdpa_mgmt_dev *mdev = vdev->mdev; + struct nlattr **nl_attrs = info->attrs; + const u8 *macaddr; + int err = -EOPNOTSUPP; + + down_write(&vdev->cf_lock); + if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) { + set_config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); + macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]); + + if (is_valid_ether_addr(macaddr)) { + ether_addr_copy(set_config.net.mac, macaddr); + if (mdev->ops->dev_set_attr) { + err = mdev->ops->dev_set_attr(mdev, vdev, + &set_config); + } else { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "Operation not supported by the device."); + } + } else { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "Invalid MAC address"); + } + } + up_write(&vdev->cf_lock); + return err; +} + +static int vdpa_nl_cmd_dev_attr_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct vdpa_device *vdev; + struct device *dev; + const char *name; + u64 classes; + int err = 0; + + if (!info->attrs[VDPA_ATTR_DEV_NAME]) + return -EINVAL; + + name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); + + down_write(&vdpa_dev_lock); + dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); + if (!dev) { + NL_SET_ERR_MSG_MOD(info->extack, "device not found"); + err = -ENODEV; + goto dev_err; + } + vdev = container_of(dev, struct vdpa_device, dev); + if (!vdev->mdev) { + NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); + err = -EINVAL; + goto mdev_err; + } + classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL); + if (classes & BIT_ULL(VIRTIO_ID_NET)) { + err = vdpa_dev_net_device_attr_set(vdev, info); + } else { + NL_SET_ERR_MSG_FMT_MOD(info->extack, "%s device not supported", + name); + } + +mdev_err: + put_device(dev); +dev_err: + up_write(&vdpa_dev_lock); + return err; +} + static int vdpa_dev_config_dump(struct device *dev, void *data) { struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); @@ -1497,6 +1571,11 @@ static const struct genl_ops vdpa_nl_ops[] = { .doit = vdpa_nl_cmd_dev_stats_get_doit, .flags = GENL_ADMIN_PERM, }, + { + .cmd = VDPA_CMD_DEV_ATTR_SET, + .doit = vdpa_nl_cmd_dev_attr_set_doit, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family vdpa_nl_family __ro_after_init = { diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c index cfe9629118045c..6caf09a1907b59 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c @@ -414,6 +414,24 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config) net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); } +static int vdpasim_net_set_attr(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev, + const struct vdpa_dev_set_config *config) +{ + struct vdpasim *vdpasim = container_of(dev, struct vdpasim, vdpa); + struct virtio_net_config *vio_config = vdpasim->config; + + mutex_lock(&vdpasim->mutex); + + if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { + ether_addr_copy(vio_config->mac, config->net.mac); + mutex_unlock(&vdpasim->mutex); + return 0; + } + + mutex_unlock(&vdpasim->mutex); + return -EOPNOTSUPP; +} + static void vdpasim_net_setup_config(struct vdpasim *vdpasim, const struct vdpa_dev_set_config *config) { @@ -510,7 +528,8 @@ static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev, static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = { .dev_add = vdpasim_net_dev_add, - .dev_del = vdpasim_net_dev_del + .dev_del = vdpasim_net_dev_del, + .dev_set_attr = vdpasim_net_set_attr }; static struct virtio_device_id id_table[] = { diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 791d38d6284c56..58116f89d8dae9 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, enum dma_data_direction dir) { struct vduse_bounce_map *map; + struct page *page; unsigned int offset; void *addr; size_t sz; @@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, map->orig_phys == INVALID_PHYS_ADDR)) return; - addr = kmap_local_page(map->bounce_page); + page = domain->user_bounce_pages ? + map->user_bounce_page : map->bounce_page; + + addr = kmap_local_page(page); do_bounce(map->orig_phys + offset, addr + offset, sz, dir); kunmap_local(addr); size -= sz; @@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain, memcpy_to_page(pages[i], 0, page_address(map->bounce_page), PAGE_SIZE); - __free_page(map->bounce_page); } - map->bounce_page = pages[i]; + map->user_bounce_page = pages[i]; get_page(pages[i]); } domain->user_bounce_pages = true; @@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) struct page *page = NULL; map = &domain->bounce_maps[i]; - if (WARN_ON(!map->bounce_page)) + if (WARN_ON(!map->user_bounce_page)) continue; /* Copy user page to kernel page if it's in use */ if (map->orig_phys != INVALID_PHYS_ADDR) { - page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL); + page = map->bounce_page; memcpy_from_page(page_address(page), - map->bounce_page, 0, PAGE_SIZE); + map->user_bounce_page, 0, PAGE_SIZE); } - put_page(map->bounce_page); - map->bounce_page = page; + put_page(map->user_bounce_page); + map->user_bounce_page = NULL; } domain->user_bounce_pages = false; out: diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h index f92f22a7267d70..7f3f0928ec7814 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -21,6 +21,7 @@ struct vduse_bounce_map { struct page *bounce_page; + struct page *user_bounce_page; u64 orig_phys; }; diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index 82b2afa9b7e313..7e7988c4258fa5 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -108,10 +108,10 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, void *data) { struct fsl_mc_device *mc_dev = vdev->mc_dev; - int ret, hwirq; struct vfio_fsl_mc_irq *irq; struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev); struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev); + int ret; if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) return vfio_set_trigger(vdev, index, -1); @@ -136,8 +136,6 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, return vfio_set_trigger(vdev, index, fd); } - hwirq = vdev->mc_dev->irqs[index]->virq; - irq = &vdev->mc_irqs[index]; if (flags & VFIO_IRQ_SET_DATA_NONE) { diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c index ded364588d2973..95b336de8a1732 100644 --- a/drivers/vfio/group.c +++ b/drivers/vfio/group.c @@ -112,7 +112,7 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group, return -EFAULT; f = fdget(fd); - if (!f.file) + if (!fd_file(f)) return -EBADF; mutex_lock(&group->group_lock); @@ -125,13 +125,13 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group, goto out_unlock; } - container = vfio_container_from_file(f.file); + container = vfio_container_from_file(fd_file(f)); if (container) { ret = vfio_container_attach_group(container, group); goto out_unlock; } - iommufd = iommufd_ctx_from_file(f.file); + iommufd = iommufd_ctx_from_file(fd_file(f)); if (!IS_ERR(iommufd)) { if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && group->type == VFIO_NO_IOMMU) diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 63a1316b08b72a..5f61acd0fe42b9 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -10,9 +10,6 @@ #ifndef MDEV_PRIVATE_H #define MDEV_PRIVATE_H -int mdev_bus_register(void); -void mdev_bus_unregister(void); - extern const struct bus_type mdev_bus_type; extern const struct attribute_group *mdev_device_groups[]; diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 9d2738e10c0b9d..e44bb44c581e2f 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -160,7 +160,7 @@ static void mdev_type_release(struct kobject *kobj) put_device(type->parent->dev); } -static struct kobj_type mdev_type_ktype = { +static const struct kobj_type mdev_type_ktype = { .sysfs_ops = &mdev_type_sysfs_ops, .release = mdev_type_release, .default_groups = mdev_type_groups, diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 9a3e97108ace81..0d632ba5d2a3c9 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -723,7 +723,6 @@ static const struct file_operations hisi_acc_vf_resume_fops = { .owner = THIS_MODULE, .write = hisi_acc_vf_resume_write, .release = hisi_acc_vf_release_file, - .llseek = no_llseek, }; static struct hisi_acc_vf_migration_file * @@ -845,7 +844,6 @@ static const struct file_operations hisi_acc_vf_save_fops = { .unlocked_ioctl = hisi_acc_vf_precopy_ioctl, .compat_ioctl = compat_ptr_ioctl, .release = hisi_acc_vf_release_file, - .llseek = no_llseek, }; static struct hisi_acc_vf_migration_file * diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 61d9b0f9146d1b..242c23eef452e8 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -587,7 +587,6 @@ static const struct file_operations mlx5vf_save_fops = { .unlocked_ioctl = mlx5vf_precopy_ioctl, .compat_ioctl = compat_ptr_ioctl, .release = mlx5vf_release_file, - .llseek = no_llseek, }; static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev) @@ -1000,7 +999,6 @@ static const struct file_operations mlx5vf_resume_fops = { .owner = THIS_MODULE, .write = mlx5vf_resume_write, .release = mlx5vf_release_file, - .llseek = no_llseek, }; static struct mlx5_vf_migration_file * diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c index 6b94cc0bf45b44..f2673d395236a2 100644 --- a/drivers/vfio/pci/pds/lm.c +++ b/drivers/vfio/pci/pds/lm.c @@ -235,7 +235,6 @@ static const struct file_operations pds_vfio_save_fops = { .owner = THIS_MODULE, .read = pds_vfio_save_read, .release = pds_vfio_release_file, - .llseek = no_llseek, }; static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio) @@ -334,7 +333,6 @@ static const struct file_operations pds_vfio_restore_fops = { .owner = THIS_MODULE, .write = pds_vfio_restore_write, .release = pds_vfio_release_file, - .llseek = no_llseek, }; static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio) diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c index e36740a282e7bc..be3644ced17be4 100644 --- a/drivers/vfio/pci/qat/main.c +++ b/drivers/vfio/pci/qat/main.c @@ -220,7 +220,6 @@ static const struct file_operations qat_vf_save_fops = { .unlocked_ioctl = qat_vf_precopy_ioctl, .compat_ioctl = compat_ptr_ioctl, .release = qat_vf_release_file, - .llseek = no_llseek, }; static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev, @@ -345,7 +344,6 @@ static const struct file_operations qat_vf_resume_fops = { .owner = THIS_MODULE, .write = qat_vf_resume_write, .release = qat_vf_release_file, - .llseek = no_llseek, }; static struct qat_vf_migration_file * diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index ba0ce0075b2fb1..1ab58da9f38a6e 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -57,11 +58,6 @@ struct vfio_pci_vf_token { int users; }; -struct vfio_pci_mmap_vma { - struct vm_area_struct *vma; - struct list_head vma_next; -}; - static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -1328,7 +1324,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( static int vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev, - int array_count, bool slot, + u32 array_count, bool slot, struct vfio_pci_hot_reset __user *arg) { int32_t *group_fds; @@ -1657,14 +1653,20 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma) return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff; } -static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, + unsigned int order) { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; - unsigned long addr = vma->vm_start; vm_fault_t ret = VM_FAULT_SIGBUS; + if (order && (vmf->address & ((PAGE_SIZE << order) - 1) || + vmf->address + (PAGE_SIZE << order) > vma->vm_end)) { + ret = VM_FAULT_FALLBACK; + goto out; + } + pfn = vma_to_pfn(vma); down_read(&vdev->memory_lock); @@ -1672,30 +1674,49 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) goto out_unlock; - ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); - if (ret & VM_FAULT_ERROR) - goto out_unlock; - - /* - * Pre-fault the remainder of the vma, abort further insertions and - * supress error if fault is encountered during pre-fault. - */ - for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) { - if (addr == vmf->address) - continue; - - if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) - break; + switch (order) { + case 0: + ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); + break; +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP + case PMD_ORDER: + ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff, + PFN_DEV), false); + break; +#endif +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP + case PUD_ORDER: + ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff, + PFN_DEV), false); + break; +#endif + default: + ret = VM_FAULT_FALLBACK; } out_unlock: up_read(&vdev->memory_lock); +out: + dev_dbg_ratelimited(&vdev->pdev->dev, + "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n", + __func__, order, + vma->vm_pgoff >> + (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT), + pgoff, (unsigned int)ret); return ret; } +static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf) +{ + return vfio_pci_mmap_huge_fault(vmf, 0); +} + static const struct vm_operations_struct vfio_pci_mmap_ops = { - .fault = vfio_pci_mmap_fault, + .fault = vfio_pci_mmap_page_fault, +#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP + .huge_fault = vfio_pci_mmap_huge_fault, +#endif }; int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0960699e75543e..bf391b40e576fc 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -513,12 +513,10 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, bool write_fault) { - pte_t *ptep; - pte_t pte; - spinlock_t *ptl; + struct follow_pfnmap_args args = { .vma = vma, .address = vaddr }; int ret; - ret = follow_pte(vma, vaddr, &ptep, &ptl); + ret = follow_pfnmap_start(&args); if (ret) { bool unlocked = false; @@ -532,19 +530,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, if (ret) return ret; - ret = follow_pte(vma, vaddr, &ptep, &ptl); + ret = follow_pfnmap_start(&args); if (ret) return ret; } - pte = ptep_get(ptep); - - if (write_fault && !pte_write(pte)) + if (write_fault && !args.writable) ret = -EFAULT; else - *pfn = pte_pfn(pte); + *pfn = args.pfn; - pte_unmap_unlock(ptep, ptl); + follow_pfnmap_end(&args); return ret; } diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 53226913380197..d22881245e8914 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -134,12 +134,12 @@ int vfio_virqfd_enable(void *opaque, INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject); irqfd = fdget(fd); - if (!irqfd.file) { + if (!fd_file(irqfd)) { ret = -EBADF; goto err_fd; } - ctx = eventfd_ctx_fileget(irqfd.file); + ctx = eventfd_ctx_fileget(fd_file(irqfd)); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto err_ctx; @@ -171,7 +171,7 @@ int vfio_virqfd_enable(void *opaque, init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); - events = vfs_poll(irqfd.file, &virqfd->pt); + events = vfs_poll(fd_file(irqfd), &virqfd->pt); /* * Check if there was an event already pending on the eventfd diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 006ffacf1c56cb..7db9bbdfb0381a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 478cd46a49ede2..5a49b5a6d49641 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) if (irq < 0) return; - irq_bypass_unregister_producer(&vq->call_ctx.producer); if (!vq->call_ctx.ctx) return; - vq->call_ctx.producer.token = vq->call_ctx.ctx; vq->call_ctx.producer.irq = irq; ret = irq_bypass_register_producer(&vq->call_ctx.producer); if (unlikely(ret)) @@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, vq->last_avail_idx = vq_state.split.avail_index; } break; + case VHOST_SET_VRING_CALL: + if (vq->call_ctx.ctx) { + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_unsetup_vq_irq(v, idx); + vq->call_ctx.producer.token = NULL; + } + break; } r = vhost_vring_ioctl(&v->vdev, cmd, argp); @@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; cb.trigger = vq->call_ctx.ctx; + vq->call_ctx.producer.token = vq->call_ctx.ctx; + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_setup_vq_irq(v, idx); } else { cb.callback = NULL; cb.private = NULL; cb.trigger = NULL; } ops->set_vq_cb(vdpa, idx, &cb); - vhost_vdpa_setup_vq_irq(v, idx); break; case VHOST_SET_VRING_NUM: @@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) for (i = 0; i < nvqs; i++) { vqs[i] = &v->vqs[i]; vqs[i]->handle_kick = handle_vq_kick; + vqs[i]->call_ctx.ctx = NULL; } vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false, vhost_vdpa_process_iotlb_msg); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bf664ec9341b35..802153e230730b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -244,7 +244,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, restart_tx = true; } - consume_skb(skb); + virtio_transport_consume_skb_sent(skb, true); } } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) @@ -451,6 +451,8 @@ static struct virtio_transport vhost_transport = { .notify_buffer_size = virtio_transport_notify_buffer_size, .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, + .unsent_bytes = virtio_transport_unsent_bytes, + .read_skb = virtio_transport_read_skb, }, diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index dd0874f8c7ffc3..4175a460307112 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -166,6 +166,7 @@ static const struct lcd_ops l4f_ops = { static int l4f00242t03_probe(struct spi_device *spi) { struct l4f00242t03_priv *priv; + int ret; priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv), GFP_KERNEL); @@ -174,7 +175,9 @@ static int l4f00242t03_probe(struct spi_device *spi) spi_set_drvdata(spi, priv); spi->bits_per_word = 9; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return dev_err_probe(&spi->dev, ret, "Unable to setup spi.\n"); priv->spi = spi; diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c index e4b2c89baee2d4..826fb04de64c26 100644 --- a/drivers/video/fbdev/aty/mach64_accel.c +++ b/drivers/video/fbdev/aty/mach64_accel.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include